infra: introduce terraform/opentofu for proxmox management

- move ansible project within infra
- introduce terraform/opentofu for proxmox VM management
This commit is contained in:
2025-06-30 19:16:14 +03:00
parent a79de74a6a
commit 03c882f311
24 changed files with 882 additions and 0 deletions

View File

@ -0,0 +1,11 @@
---
- name: Update apt cache
ansible.builtin.apt:
update_cache: yes
become: true
- name: Install necessary packages
ansible.builtin.apt:
name: "{{ apt_packages }}"
state: present
become: true

View File

@ -0,0 +1,97 @@
- name: Remove known_hosts file if it exists
delegate_to: localhost
run_once: true
ansible.builtin.file:
path: /home/taqi/.ssh/known_hosts
state: absent
- name: Remove k0ctl lock file if it exists
ansible.builtin.file:
path: /run/lock/k0sctl
state: absent
become: true
- name: Install k0sctl on host
delegate_to: localhost
ansible.builtin.command:
cmd: "go install github.com/k0sproject/k0sctl@latest"
- name: Ensure k0sctl is installed on host
delegate_to: localhost
run_once: true
ansible.builtin.command:
cmd: "k0sctl version"
register: k0sctl_version
changed_when: false
- name: Generate k0sctl configuration file
delegate_to: localhost
run_once: true
ansible.builtin.template:
src: k0sctl.yaml.j2
dest: /tmp/k0sctl.yaml
when: k0sctl_version is defined
tags:
- generate-k0sctl-config
- name: Generate MetalLB IP Address Pool configuration file
delegate_to: localhost
run_once: true
ansible.builtin.template:
src: ipAddressPool.yaml.j2
dest: /tmp/ipAddressPool.yaml
when: k0sctl_version is defined
tags:
- generatemetallb-ippool
- metallb-ippool
- name: Create Cluster using k0sctl from host
delegate_to: localhost
run_once: true
ansible.builtin.command:
cmd: "k0sctl apply --config /tmp/k0sctl.yaml"
when: k0sctl_version is defined
- name: Save kubeconfig file on host
delegate_to: localhost
run_once: true
ansible.builtin.shell:
cmd: "cd /tmp && k0sctl kubeconfig > /home/taqi/.kube/k0s_config.yaml"
register: kubeconfig_result
retries: 3
delay: 5
until: kubeconfig_result.rc == 0
when: k0sctl_version is defined
tags:
- generate-kubeconfig
- name: Apply IP Pool for MetalLB from host
delegate_to: localhost
run_once: true
ansible.builtin.shell:
cmd: "kubectl apply -f /tmp/ipAddressPool.yaml --kubeconfig /home/taqi/.kube/k0s_config.yaml"
register: metallb_ippool_result
retries: 3
delay: 5
until: metallb_ippool_result.rc == 0
when: k0sctl_version is defined
tags:
- metallb-ippool
- name: Cleanup temporary files
delegate_to: localhost
run_once: true
block:
- name: Remove k0sctl.yaml temporary file
ansible.builtin.file:
path: /tmp/k0sctl.yaml
state: absent
- name: Remove ipAddressPool.yaml temporary file
ansible.builtin.shell:
cmd: "rm -f /tmp/ipAddressPool.yaml"
delegate_to: localhost
run_once: true
tags:
- cleanup
when: k0sctl_version is defined

View File

@ -0,0 +1,8 @@
apiVersion: metallb.io/v1beta1
kind: IPAddressPool
metadata:
name: first-pool
namespace: metallb-system
spec:
addresses:
- "{{ metallb_ip_range }}"

View File

@ -0,0 +1,70 @@
apiVersion: k0sctl.k0sproject.io/v1beta1
kind: Cluster
metadata:
name: k0s-cluster
spec:
hosts:
- ssh:
address: "{{ master1_ip }}"
user: "{{ ansible_vm_user }}"
keyPath: "{{ ansible_ssh_private_key_file }}"
role: controller+worker
hostname: "{{ master1_hostname }}"
noTaints: true
- ssh:
address: "{{ master2_ip }}"
user: "{{ ansible_vm_user }}"
keyPath: "{{ ansible_ssh_private_key_file }}"
role: controller+worker
hostname: "{{ master2_hostname }}"
noTaints: true
- ssh:
address: "{{ worker1_ip }}"
user: "{{ ansible_vm_user }}"
keyPath: "{{ ansible_ssh_private_key_file }}"
role: worker
hostname: "{{ worker1_hostname }}"
- ssh:
address: "{{ worker2_ip }}"
user: "{{ ansible_vm_user }}"
keyPath: "{{ ansible_ssh_private_key_file }}"
role: worker
hostname: "{{ worker2_hostname }}"
k0s:
version: "{{ k0s_version }}"
config:
spec:
api:
address: "{{ master1_ip }}"
port: 6443
k0sApiPort: 9443
sans:
- "{{ master1_ip }}"
- "{{ master2_ip }}"
- k8s.local
- api.k8s.local
network:
kubeProxy:
mode: iptables
kuberouter:
disabled: false
podCIDR: "{{ pod_CIDR }}"
serviceCIDR: "{{ service_CIDR }}"
provider: kuberouter
extensions:
helm:
concurrencyLevel: 5
repositories:
- name: metallb
url: https://metallb.github.io/metallb
- name: traefik
url: https://traefik.github.io/charts
charts:
- name: metallb
chartname: metallb/metallb
version: "{{ metallb_version }}"
namespace: metallb-system
- name: traefik
chartname: traefik/traefik
version: "{{ traefik_version }}"
namespace: traefik-system

View File

@ -0,0 +1,70 @@
---
- name: Download cloud image
get_url:
url: "{{ image_url }}"
dest: "{{ image_dest }}"
use_netrc: yes
- name: create VMs
delegate_to: localhost
vars:
ansible_python_interpreter: /home/taqi/.venv/ansible/bin/python
community.general.proxmox_kvm:
api_host: "{{ proxmox_api_url }}"
api_user: "{{ proxmox_user }}"
api_token_id: "{{ proxmox_api_token_id }}"
api_token_secret: "{{ proxmox_api_token }}"
node: "{{ node }}"
vmid: "{{ item.id }}"
name: "{{ item.name }}"
memory: "{{ item.memory }}"
cores: "{{ item.cores }}"
scsihw: "{{ scsi_hw }}"
boot: "{{ boot_order }}"
net:
net0: "{{ net0 }}"
ipconfig:
ipconfig0: "ip={{ item.ip }},gw={{ item.gateway }}"
ide:
ide2: "{{ ide2 }}"
nameservers: "{{ item.nameserver1 }},{{ item.nameserver2 }}"
ciuser: "{{ ciuser }}"
cipassword: "{{ cipassword }}"
sshkeys: "{{ lookup('file', '/home/taqi/.ssh/homeserver.pub') }}"
loop: "{{ vm_list }}"
- name: Import disk image
ansible.builtin.shell: |
qm importdisk "{{ item.id }}" "{{ image_dest }}" "{{ storage_name }}" --format "{{ image_format }}"
loop: "{{ vm_list }}"
- name: Attach disk to VM
ansible.builtin.shell: |
qm set "{{ item.id }}" --scsi0 "{{ storage_name }}:{{ item.id }}/vm-{{ item.id }}-disk-0.{{ image_format }},discard=on"
loop: "{{ vm_list }}"
- name: Resize disk
ansible.builtin.shell: |
qm resize {{ item.id }} scsi0 {{ item.disk_size }}
loop: "{{ vm_list }}"
- name: Start VMs
delegate_to: localhost
vars:
ansible_python_interpreter: /home/taqi/.venv/ansible/bin/python
community.general.proxmox_kvm:
api_host: "{{ proxmox_api_url }}"
api_user: "{{ proxmox_user }}"
api_token_id: "{{ proxmox_api_token_id }}"
api_token_secret: "{{ proxmox_api_token }}"
node: "{{ node }}"
name: "{{ item.name }}"
state: started
loop: "{{ vm_list }}"
tags:
- start_vms
- name: Clean up downloaded image
file:
path: "{{ image_dest }}"
state: absent

View File

@ -0,0 +1,72 @@
- name: Get VM current state
delegate_to: localhost
vars:
ansible_python_interpreter: "{{ ansible_venv }}"
community.general.proxmox_kvm:
api_host: "{{ proxmox_api_url }}"
api_user: "{{ proxmox_user }}"
api_token_id: "{{ proxmox_api_token_id }}"
api_token_secret: "{{ proxmox_api_token }}"
name: "{{ item.name }}"
node: "{{ node }}"
state: current
register: vm_state
ignore_errors: yes
loop: "{{ vm_list }}"
loop_control:
index_var: vm_index
tags:
- vm_delete
- name: Debug VM state
debug:
msg: "VM {{ item.name }} state: {{ vm_state.results[vm_index].status }}"
when: vm_state.results[vm_index] is defined and vm_state.results[vm_index] is succeeded
loop: "{{ vm_list }}"
loop_control:
index_var: vm_index
- name: Stop VM
delegate_to: localhost
vars:
ansible_python_interpreter: "{{ ansible_venv }}"
community.general.proxmox_kvm:
api_host: "{{ proxmox_api_url }}"
api_user: "{{ proxmox_user }}"
api_token_id: "{{ proxmox_api_token_id }}"
api_token_secret: "{{ proxmox_api_token }}"
name: "{{ item.name }}"
node: "{{ node }}"
state: stopped
force: true
when: >
vm_state.results[vm_index] is defined and
vm_state.results[vm_index] is succeeded and
vm_state.results[vm_index].status != 'absent'
loop: "{{ vm_list }}"
loop_control:
index_var: vm_index
tags:
- vm_delete
- name: Delete VM
delegate_to: localhost
vars:
ansible_python_interpreter: "{{ ansible_venv }}"
community.general.proxmox_kvm:
api_host: "{{ proxmox_api_url }}"
api_user: "{{ proxmox_user }}"
api_token_id: "{{ proxmox_api_token_id }}"
api_token_secret: "{{ proxmox_api_token }}"
name: "{{ item.name }}"
node: "{{ node }}"
state: absent
when: >
vm_state.results[vm_index] is defined and
vm_state.results[vm_index] is succeeded and
vm_state.results[vm_index].status != 'absent'
loop: "{{ vm_list }}"
loop_control:
index_var: vm_index
tags:
- vm_delete