added new server
All checks were successful
ci/woodpecker/push/demo-workflow Pipeline was successful
All checks were successful
ci/woodpecker/push/demo-workflow Pipeline was successful
- added new server - added instruction regarding joining quoram - use terraform to provision VM for new server
This commit is contained in:
20
README.md
20
README.md
@ -108,6 +108,26 @@ qm set 103 -scsi2 /dev/disk/by-id/usb-WD_BLACK_SN770_1TB_012938055C4B-0:0
|
||||
grep 5C4B /etc/pve/qemu-server/103.conf
|
||||
```
|
||||
|
||||
#### Proxmox How to Join a Cluster with Live VM
|
||||
|
||||
Proxmox does not allow nodes to join a cluster with guest VMs running or existing.
|
||||
The workaround is to backup existing configuration files, join the cluster, and
|
||||
then restore the files.
|
||||
|
||||
```bash
|
||||
mkdir -p /root/guest-backup/qemu /root/guest-backup/lxc
|
||||
cp /etc/pve/qemu-server/*.conf /root/guest-backup/qemu/
|
||||
cp /etc/pve/lxc/*.conf /root/guest-backup/lxc/
|
||||
rm /etc/pve/qemu-server/*.conf
|
||||
rm /etc/pve/lxc/*.conf
|
||||
|
||||
# After that, join the node from GUI
|
||||
|
||||
# Finally restore the configuration files
|
||||
cp /root/guest-backup/qemu/*.conf /etc/pve/qemu-server/
|
||||
cp /root/guest-backup/lxc/*.conf /etc/pve/lxc/
|
||||
```
|
||||
|
||||
> 📚 Reference: [Proxmox Disk Passthrough Guide](<https://pve.proxmox.com/wiki/Passthrough_Physical_Disk_to_Virtual_Machine_(VM)>)
|
||||
|
||||
### 2. Kubernetes Cluster Setup
|
||||
|
||||
29
infra/ansible/inventory/host_vars/proxmox3.yaml
Normal file
29
infra/ansible/inventory/host_vars/proxmox3.yaml
Normal file
@ -0,0 +1,29 @@
|
||||
# VM related variables
|
||||
vm_list:
|
||||
- id: 301
|
||||
name: "vm10"
|
||||
memory: 4096
|
||||
cores: 2
|
||||
disk_size: 30G
|
||||
ip: "192.168.1.172/24"
|
||||
gateway: "192.168.1.1"
|
||||
nameserver1: "192.168.1.145"
|
||||
nameserver2: "1.1.1.1"
|
||||
- id: 302
|
||||
name: "vm11"
|
||||
memory: 4096
|
||||
cores: 2
|
||||
disk_size: 30G
|
||||
ip: "192.168.1.173/24"
|
||||
gateway: "192.168.1.1"
|
||||
nameserver1: "192.168.1.145"
|
||||
nameserver2: "1.1.1.1"
|
||||
|
||||
# cloud-init template variables
|
||||
node: "homeserver3"
|
||||
net0: "virtio,bridge=vmbr0"
|
||||
# disk_name: "local:2000/vm-2000-disk-0.raw,discard=on"
|
||||
disk_path: "/var/lib/vz/images/3000"
|
||||
ide2: "local:cloudinit,format=qcow2"
|
||||
boot_order: "order=scsi0"
|
||||
scsi_hw: "virtio-scsi-pci"
|
||||
@ -7,19 +7,27 @@ hypervisors:
|
||||
children:
|
||||
server1:
|
||||
server2:
|
||||
server3:
|
||||
|
||||
server1:
|
||||
hosts:
|
||||
proxmox1:
|
||||
ansible_host: 192.168.1.121
|
||||
ansible_user: "{{ ansible_proxmox_user }}"
|
||||
ansible_user: "{{ ansible_proxmox_user }}"
|
||||
ansible_ssh_private_key_file: "{{ ansible_ssh_private_key_file }}"
|
||||
|
||||
server2:
|
||||
hosts:
|
||||
proxmox2:
|
||||
ansible_host: 192.168.1.122
|
||||
ansible_user: "{{ ansible_proxmox_user }}"
|
||||
ansible_user: "{{ ansible_proxmox_user }}"
|
||||
ansible_ssh_private_key_file: "{{ ansible_ssh_private_key_file }}"
|
||||
|
||||
server3:
|
||||
hosts:
|
||||
proxmox3:
|
||||
ansible_host: 192.168.1.123
|
||||
ansible_user: "{{ ansible_proxmox_user }}"
|
||||
ansible_ssh_private_key_file: "{{ ansible_ssh_private_key_file }}"
|
||||
|
||||
vms:
|
||||
@ -48,4 +56,3 @@ vm_group_2:
|
||||
ansible_host: 192.168.1.162
|
||||
ansible_user: "{{ ansible_vm_user }}"
|
||||
ansible_ssh_private_key_file: "{{ ansible_ssh_private_key_file }}"
|
||||
|
||||
|
||||
@ -11,42 +11,42 @@ vms = [
|
||||
gateway = "192.168.1.1"
|
||||
dns_servers = ["192.168.1.145", "1.1.1.1"]
|
||||
cores = 2
|
||||
memory = 2048
|
||||
memory = 4096
|
||||
disk_size = 20
|
||||
},
|
||||
{
|
||||
name = "vm7"
|
||||
node_name = "homeserver1"
|
||||
vm_id = 106
|
||||
ip_address = "192.168.1.152/24"
|
||||
gateway = "192.168.1.1"
|
||||
dns_servers = ["192.168.1.145", "1.1.1.1"]
|
||||
cores = 2
|
||||
memory = 2048
|
||||
disk_size = 20
|
||||
},
|
||||
{
|
||||
name = "vm8"
|
||||
node_name = "homeserver2"
|
||||
vm_id = 205
|
||||
ip_address = "192.168.1.161/24"
|
||||
gateway = "192.168.1.1"
|
||||
dns_servers = ["192.168.1.145", "1.1.1.1"]
|
||||
cores = 2
|
||||
memory = 2048
|
||||
memory = 4096
|
||||
disk_size = 20
|
||||
},
|
||||
{
|
||||
name = "vm9"
|
||||
node_name = "homeserver2"
|
||||
vm_id = 206
|
||||
ip_address = "192.168.1.162/24"
|
||||
name = "vm8"
|
||||
node_name = "homeserver3"
|
||||
vm_id = 301
|
||||
ip_address = "192.168.1.172/24"
|
||||
gateway = "192.168.1.1"
|
||||
dns_servers = ["192.168.1.145", "1.1.1.1"]
|
||||
cores = 2
|
||||
memory = 2048
|
||||
disk_size = 20
|
||||
memory = 4096
|
||||
disk_size = 50
|
||||
},
|
||||
{
|
||||
name = "vm9"
|
||||
node_name = "homeserver3"
|
||||
vm_id = 302
|
||||
ip_address = "192.168.1.173/24"
|
||||
gateway = "192.168.1.1"
|
||||
dns_servers = ["192.168.1.145", "1.1.1.1"]
|
||||
cores = 2
|
||||
memory = 4096
|
||||
disk_size = 50
|
||||
}
|
||||
]
|
||||
|
||||
nodes = ["homeserver1", "homeserver2"]
|
||||
nodes = ["homeserver1", "homeserver2", "homeserver3"]
|
||||
@ -66,7 +66,7 @@ variable "vms" {
|
||||
|
||||
variable "nodes" {
|
||||
type = list(string)
|
||||
default = ["homeserver1", "homeserver2"]
|
||||
default = ["homeserver1", "homeserver2", "homeserver3"]
|
||||
}
|
||||
|
||||
variable "vm_user_name" {
|
||||
|
||||
@ -29,9 +29,9 @@ tenant:
|
||||
#
|
||||
#
|
||||
image:
|
||||
repository: quay.io/minio/minio
|
||||
tag: RELEASE.2025-04-08T15-41-24Z
|
||||
pullPolicy: IfNotPresent
|
||||
repository: docker.io/minio/minio
|
||||
tag: RELEASE.2025-06-13T11-33-47Z-cpuv1
|
||||
pullPolicy: Always
|
||||
###
|
||||
#
|
||||
# An array of Kubernetes secrets to use for pulling images from a private ``image.repository``.
|
||||
|
||||
Reference in New Issue
Block a user