Compare commits

...

18 Commits

Author SHA1 Message Date
a68f3e0e70 swap proxmox backend from s3 to gitlab http
All checks were successful
ci/woodpecker/push/demo-workflow Pipeline was successful
2026-01-15 00:54:59 +02:00
bbf6e5d871 allow sast failure 2026-01-11 15:18:05 +02:00
53875a1c91 override sast default rules 2026-01-11 15:15:28 +02:00
d1c7b4ad9f update conditional logic to the markdown linter 2026-01-11 15:12:18 +02:00
5b56fa83e3 update linting image 2026-01-11 15:10:57 +02:00
8e04b981c4 fix gitlab ci pipeline 2026-01-11 15:03:33 +02:00
f81bae773a updated gitlab ci script 2026-01-11 15:02:25 +02:00
a016ec6bf8 add gitlab CI config doc 2026-01-11 14:54:51 +02:00
c799649039 fix gitlab CI pipeline script 2026-01-11 14:50:43 +02:00
cc163c16f5 fix gitlab ci 2026-01-11 14:43:03 +02:00
3e6287876e add linting and security checking for gitlab ci 2026-01-11 14:38:10 +02:00
a2e4215dd3 Update .gitlab-ci.yml file 2026-01-11 12:22:30 +00:00
0ebb6e9d09 Delete .gitlab-ci.yaml 2026-01-11 07:43:41 +00:00
611cfb02bf Update .gitlab-ci.yml file 2026-01-11 07:42:57 +00:00
ca4a2f7051 add demo .gitlab-ci.yaml file 2026-01-11 07:41:14 +00:00
56c14df540 kubernetes: fix wireguard issue in qbittorrent-helm-chart
All checks were successful
ci/woodpecker/push/demo-workflow Pipeline was successful
2026-01-09 17:00:11 +02:00
b9fcdfc6b7 infra: add haproxy to as central reverse proxy
- add haproxy to work as central reverse proxy
- based on the domain, it can route to either docker or k8s proxy
2026-01-09 17:00:01 +02:00
10f72b8b59 docker: update traefik reverse proxy dockerfile 2026-01-09 16:57:40 +02:00
20 changed files with 453 additions and 150 deletions

35
.gitlab-ci.yml Normal file
View File

@@ -0,0 +1,35 @@
workflow:
rules:
- if: $CI_PIPELINE_SOURCE == "web"
- if: $CI_PIPELINE_SOURCE == "merge_request_event"
- if: $CI_COMMIT_BRANCH
stages:
- test
- lint
include:
- template: Security/SAST.gitlab-ci.yml
sast:
allow_failure: true
rules:
- if: $CI_PIPELINE_SOURCE == "web"
- if: $CI_PIPELINE_SOURCE == "merge_request_event"
- if: $CI_COMMIT_BRANCH
markdownlint:
stage: lint
image:
name: registry.gitlab.com/06kellyjac/docker_markdownlint-cli:0.28.1-alpine
entrypoint:
- "/usr/bin/env"
- "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
before_script:
- markdownlint --version
allow_failure: true
rules:
- changes:
- "**/*.md"
script:
- markdownlint .

1
doc/gitlab_ci.md Normal file
View File

@@ -0,0 +1 @@
# Gitlab CI Configuration

3
docker/.gitignore vendored
View File

@@ -1 +1,2 @@
.env
.env
**/.env

83
docker/gitlab/README.md Normal file
View File

@@ -0,0 +1,83 @@
# GitLab Docker Setup
This directory contains the necessary files to set up GitLab using Docker
and Docker Compose. The setup includes a `gitlab.yaml` file that defines the
GitLab service configuration.
## Adding gitlab runner as docker container
To add a GitLab runner as a Docker container, follow these steps:
1. SSH into the target machine where you want to run the GitLab runner.
2. Make sure Docker and Docker Compose are installed on the machine.
3. Obtain the GitLab runner registration token from your GitLab instance.
You can find this token in the GitLab web interface under
`Settings > CI/CD > Runners > Create Instance Runner > Registration Token`.
4. Then run the following command to start the GitLab runner container. There
can be multiple gitlab runners commisioned the same way by changing the name
of the container.
```bash
docker volume create gitlab-runner-config-2
docker run -d \
--name gitlab-runner-2 \
--restart always \
-v gitlab-runner-config-2:/etc/gitlab-runner \
-v /var/run/docker.sock:/var/run/docker.sock \
gitlab/gitlab-runner:latest
docker exec -it gitlab-runner-2 \
gitlab-runner register \
--non-interactive \
--url "https://<gitlab_instance_url>/" \
--token "<gitlab-runner-registration-token>" \
--executor "docker" \
--docker-image alpine:latest \
--description "docker-runner 2"
```
## Troubleshooting
- If the URL returns a 404 error, it is usually gitlab container takes long time
to start. Please wait for few minutes and try again. If the problem persists,
check the traefik labels and access logs for more information.
- The initial root password is set in the `gitlab.yaml` file under the
`GITLAB_ROOT_PASSWORD` environment variable. Make sure to change it to a
secure password after the first login. If for some reason it does not work.
You can reset it via the following commands:
1. Access the GitLab container's shell:
```
docker exec -it <gitlab_container_name> /bin/bash
```
2. Run the following command to reset the root password:
```
gitlab-rails console
```
3. In the Rails console, execute the following commands:
```ruby
user = User.find_by_username('root')
user.password = 'NewSecurePassword123!'
user.password_confirmation == 'NewSecurePassword123!'
user.save!
```
4. Exit the Rails console and the container shell.
- If while disabling signup you get server (500) error, please follow the below
steps:
1. Access the GitLab container's shell:
```
docker exec -it <gitlab_container_name> /bin/bash
```
2. Run the following command to open the Rails console:
```
gitlab-rails console
```
3. In the Rails console, execute the following command to disable user signup:
```ruby
settings = ApplicationSetting.last
settings.update_column(:runners_registration_token_encrypted, nil)
```
4. Exit the Rails console and the container shell.

42
docker/gitlab/gitlab.yaml Normal file
View File

@@ -0,0 +1,42 @@
services:
gitlab:
image: gitlab/gitlab-ce:18.5.5-ce.0
container_name: gitlab
restart: unless-stopped
env_file:
- ./.env
hostname: gitlab.${DOMAINNAME}
ports:
- "2424:22"
volumes:
- "$GITLAB_HOME/config:/etc/gitlab"
- "$GITLAB_HOME/logs:/var/log/gitlab"
- "$GITLAB_HOME/data:/var/opt/gitlab"
shm_size: "256m"
networks:
- t3_proxy
labels:
- "traefik.enable=true"
- "traefik.http.routers.gitlab-rtr.entrypoints=websecure"
- "traefik.http.routers.gitlab-rtr.rule=Host(`gitlab.${DOMAINNAME}`)"
- "traefik.http.routers.gitlab-rtr.tls=true"
- "traefik.http.routers.gitlab-rtr.service=gitlab-svc"
- "traefik.http.services.gitlab-svc.loadbalancer.server.port=80"
environment:
GITLAB_ROOT_PASSWORD: ${GITLAB_ROOT_PASSWORD}
GITLAB_OMNIBUS_CONFIG: |
external_url "https://gitlab.${DOMAINNAME}"
gitlab_rails['gitlab_shell_ssh_port'] = 2424
letsencrypt['enable'] = false
nginx['listen_port'] = 80
nginx['listen_https'] = false
postgresql['shared_buffers'] = '256MB'
sidekiq['max_concurrency'] = 4
sidekiq['concurrency'] = 1
puma['worker_timeout'] = 120
puma['worker_processes'] = 1
prometheus_monitoring['enable'] = false
networks:
t3_proxy:
external: true

View File

@@ -1,18 +0,0 @@
version: "3"
services:
jackett:
image: "linuxserver/jackett"
container_name: "jackett"
env_file:
./.env
volumes:
- ${DOCKERDIR}/appdata/jackett:/config
- ${DATADIR}/downloads:/downloads
- "/etc/localtime:/etc/localtime:ro"
ports:
- "9117:9117"
restart: unless-stopped
environment:
- PUID=${PUID}
- PGID=${PGID}
- TZ=${TZ}

View File

@@ -1,9 +1,8 @@
version: "3"
services:
portainer:
image: portainer/portainer-ce:latest
ports:
- 9000:9000
# ports:
# - 9000:9000
volumes:
- /home/taqi/docker/portainer/data:/data
- /var/run/docker.sock:/var/run/docker.sock:ro
@@ -14,15 +13,12 @@ services:
- t3_proxy
labels:
- "traefik.enable=true"
# HTTP Routers
- "traefik.http.routers.portainer-rtr.entrypoints=websecure"
- "traefik.http.routers.portainer-rtr.rule=Host(`portainer.${DOMAINNAME}`)"
# HTTP Services
- "traefik.http.routers.portainer-rtr.tls=true"
- "traefik.http.routers.portainer-rtr.service=portainer-svc"
- "traefik.http.services.portainer-svc.loadbalancer.server.port=9000"
- "traefik.http.routers.traefik-rtr.middlewares=middlewares-rate-limit@file,middlewares-secure-headers@file"
command:
command:
--http-enabled
environment:
- TZ=${TZ}

View File

@@ -1,21 +0,0 @@
version: "3"
services:
radarr:
image: "linuxserver/radarr"
container_name: "radarr"
env_file:
./.env
volumes:
- ${DOCKERDIR}/appdata/radarr:/config
- ${DATADIR}/downloads:/downloads
- ${DATADIR}/movies:/movies
- "/etc/localtime:/etc/localtime:ro"
ports:
- "7878:7878"
restart: always
environment:
- PUID=${PUID}
- PGID=${PGID}
- TZ=${TZ}
networks:
- bridge

View File

@@ -0,0 +1,35 @@
http:
middlewares:
# Rate Limiting Middleware
middlewares-rate-limit:
rateLimit:
average: 100
burst: 100
period: 1m
# Security Headers Middleware
middlewares-secure-headers:
headers:
browserXssFilter: true
contentTypeNosniff: true
frameDeny: true
permissionsPolicy: "GEOLOCATION 'none'; MICROPHONE 'none'; CAMERA 'none'"
referrerPolicy: "strict-origin-when-cross-origin"
stsIncludeSubdomains: true
stsMaxAge: 63072000
stsPreload: true
customFrameOptionsValue: "SAMEORIGIN"
customRequestHeaders:
X-Forwarded-Proto: "https"
tls:
options:
default:
minVersion: VersionTLS12
cipherSuites:
- TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256
- TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256
- TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384
- TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384
- TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305
- TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305

View File

@@ -1,5 +1,3 @@
version: '3.8'
networks:
t3_proxy:
name: t3_proxy
@@ -8,16 +6,10 @@ networks:
config:
- subnet: 192.168.90.0/24
secrets:
basic_auth_credentials:
file: $DOCKERDIR/secrets/basic_auth_credentials
cf_dns_api_token:
file: $DOCKERDIR/secrets/cf_dns_api_token
services:
traefik:
container_name: traefik
image: traefik:3.0
image: traefik:3.6.6
restart: unless-stopped
env_file:
- ./.env
@@ -36,7 +28,6 @@ services:
- --api=true
- --api.dashboard=true
# - --api.insecure=true
- --entrypoints.websecure.forwardedHeaders.trustedIPs=$CLOUDFLARE_IPS,$LOCAL_IPS
- --log=true
- --log.filePath=/logs/traefik.log
- --log.level=DEBUG
@@ -52,6 +43,7 @@ services:
- --entrypoints.websecure.http.tls.domains[0].sans=*.$DOMAINNAME
- --providers.file.directory=/rules
- --providers.file.watch=true
- --certificatesresolvers.dns-cloudflare.acme.email=${CLOUDFLARE_EMAIL}
- --certificatesResolvers.dns-cloudflare.acme.storage=/acme.json
- --certificatesResolvers.dns-cloudflare.acme.dnsChallenge.provider=cloudflare
- --certificatesResolvers.dns-cloudflare.acme.dnsChallenge.resolvers=1.1.1.1:53,1.0.0.1:53
@@ -59,38 +51,22 @@ services:
# - 80:80
- 443:443
- 8080:8080
# - target: 80
# published: 80
# protocol: tcp
# mode: host
# - target: 443
# published: 443
# protocol: tcp
# mode: host
# - target: 8080
# published: 8585
# protocol: tcp
# mode: host
volumes:
- $DOCKERDIR/appdata/traefik3/rules/$HOSTNAME:/rules
- ./traefik-rules.yaml:/rules/traefik-rules.yaml
- /var/run/docker.sock:/var/run/docker.sock:ro
- $DOCKERDIR/appdata/traefik3/acme/acme.json:/acme.json
- $DOCKERDIR/logs/$HOSTNAME/traefik:/logs
- $DOCKERDIR/appdata/traefik/acme/acme.json:/acme.json
- $DOCKERDIR/logs/traefik:/logs
environment:
- PUID=${PUID}
- PGID=${PGID}
- TZ=$TZ
- CF_DNS_API_TOKEN_FILE=/run/secrets/cf_dns_api_token
- HTPASSWD_FILE=/run/secrets/basic_auth_credentials
- CF_DNS_API_TOKEN=${CLOUDFLARE_TOKEN}
- DOMAINNAME=${DOMAINNAME}
secrets:
- cf_dns_api_token
- basic_auth_credentials
- CLOUDFLARE_EMAIL=${CLOUDFLARE_EMAIL}
labels:
- "traefik.enable=true"
- "traefik.http.routers.dashboard.tls=true"
- "traefik.http.routers.traefik-rtr.entrypoints=websecure"
- "traefik.http.routers.traefik-rtr.rule=Host(`traefik.${DOMAINNAME}`)"
- "traefik.http.routers.traefik-rtr.service=api@internal"
# Middlewares
- "traefik.http.routers.traefik-rtr.middlewares=middlewares-rate-limit@file,middlewares-secure-headers@file,middlewares-basic-auth@file"
- "traefik.http.routers.dashboard.tls=true"
- "traefik.http.routers.api.entrypoints=websecure"
- "traefik.http.routers.api.rule=Host(`traefik.${DOMAINNAME}`)"
- "traefik.http.routers.api.service=api@internal"

93
infra/haproxy/README.md Normal file
View File

@@ -0,0 +1,93 @@
# HAProxy Configuration
## Rationale
This HAProxy instance serves as the primary entry point for the
homeserver infrastructure. It acts as a unified reverse proxy that
allows services from both Docker and Kubernetes environments to be
exposed behind a single, cohesive frontend. By using HAProxy with
SNI-based routing, the following can be achieved:
- **Centralize SSL/TLS termination** across multiple backend
environments
- **Route traffic dynamically** based on the requested domain to either
Docker or Kubernetes services
- **Maintain a single point of entry** for external clients while
distributing load across heterogeneous backends
- **Simplify certificate management** by terminating SSL at one
location
- **Note**: TLS termination and certificate management are not handled in this
setup; SSL/TLS traffic is passed through to backend services
## Overview
HAProxy is used as a reverse proxy and load balancer to route incoming
HTTPS traffic to the appropriate backend services in the homeserver
setup.
This HAProxy configuration implements SNI (Server Name Indication)
based routing to direct traffic to either the Kubernetes cluster or
Docker backend based on the requested domain.
## Global Settings
- **Logging**: Logs are written to syslog at `/dev/log` (local0) and
localhost (local2)
- **Admin Socket**: Accessible at `/run/haproxy/admin.sock` for
statistics and administration
- **Max Connections**: 10,000 concurrent connections
- **User/Group**: Runs as `haproxy` user and group
## Default Timeout Settings
- **Connect Timeout**: 5 seconds
- **Client Timeout**: 3600 seconds (1 hour)
- **Server Timeout**: 3600 seconds (1 hour)
## Frontend Configuration
The HAProxy frontend listens on port 443 (HTTPS) and TCP mode is used
for SSL/TLS traffic.
### SNI-Based Routing
Traffic is routed based on the SSL SNI (Server Name Indication)
hostname:
**Kubernetes Backend** (`k8s_backend`):
- Domains ending with `.mydomain.com`
**Docker Backend** (`docker_backend`):
- Domains ending with `.docker.mydomain.com`
## Backend Configuration
### Kubernetes Backend
- **Server**: `k8s-ingress` at `192.168.1.141:443`
- **Mode**: TCP
- **Health Checks**: Enabled (10s interval, 3 failures to mark
down, 2 successes to mark up)
### Docker Backend
- **Server**: `docker-proxy` at `192.168.1.135:443`
- **Mode**: TCP
- **Health Checks**: Enabled (10s interval, 3 failures to mark
down, 2 successes to mark up)
## Usage
The SSL hello packet is automatically inspected to determine the SNI
hostname, and the connection is routed to the appropriate backend
service.
## Notes
- TCP mode is used to preserve SSL/TLS encryption end-to-end
- Domain patterns marked with `# example` are placeholders and should
be customized for the setup
- The TCP routing logs can be monitored via journald for debugging and
verification purposes. `journalctl -u haproxy -f`

42
infra/haproxy/haproxy.cfg Normal file
View File

@@ -0,0 +1,42 @@
global
log /dev/log local0
log 127.0.0.1 local2
stats socket /run/haproxy/admin.sock mode 660 level admin expose-fd listeners
stats timeout 30s
user haproxy
group haproxy
daemon
maxconn 10000
defaults
log global
mode tcp
option dontlognull
timeout connect 5s
timeout client 3600s
timeout server 3600s
frontend https-in
bind *:443
mode tcp
option tcplog
tcp-request inspect-delay 5s
tcp-request content accept if { req_ssl_hello_type 1 }
acl is_docker req_ssl_sni -i -m end .docker.mydomain.com
acl is_k8s req_ssl_sni -i -m end .mydomain.com
# More specific wins → put docker rule first
use_backend docker_backend if is_docker
use_backend k8s_backend if is_k8s
default_backend k8s_backend
backend k8s_backend
mode tcp
server k8s-ingress 192.168.1.141:443 check inter 10s fall 3 rise 2
backend docker_backend
mode tcp
server docker-proxy 192.168.1.135:443 check inter 10s fall 3 rise 2

View File

@@ -1,14 +1,11 @@
terraform {
backend "s3" {
bucket = "terraform-state" # Name of the MinIO bucket
key = "proxmox/terraform.tfstate" # Path to the state file in the bucket
endpoint = var.minio_endpoint # MinIO API endpoint
access_key = var.minio_access_key # MinIO access key
secret_key = var.minio_secret_key # MinIO secret key
region = "us-east-1" # Arbitrary region (MinIO ignores this)
skip_credentials_validation = true # Skip AWS-specific credential checks
skip_metadata_api_check = true # Skip AWS metadata API checks
skip_region_validation = true # Skip AWS region validation
use_path_style = true # Use path-style URLs[](http://<host>/<bucket>)
backend "http" {
address = var.http_address
lock_address = var.http_lock_address
unlock_address = var.http_lock_address
lock_method = "POST"
unlock_method = "DELETE"
retry_wait_min = 5
}
}
}

View File

@@ -3,28 +3,28 @@ pm_ssh_public_key_path = "/home/taqi/.ssh/homeserver.pub"
pm_ssh_private_key_path = "/home/taqi/.ssh/homeserver"
vms = [
{
name = "vm6"
node_name = "homeserver1"
vm_id = 105
ip_address = "192.168.1.151/24"
gateway = "192.168.1.1"
dns_servers = ["1.1.1.1"]
cores = 2
memory = 4096
disk_size = 20
},
{
name = "vm7"
node_name = "homeserver2"
vm_id = 205
ip_address = "192.168.1.161/24"
gateway = "192.168.1.1"
dns_servers = ["1.1.1.1"]
cores = 2
memory = 4096
disk_size = 20
},
# {
# name = "vm6"
# node_name = "homeserver1"
# vm_id = 105
# ip_address = "192.168.1.151/24"
# gateway = "192.168.1.1"
# dns_servers = ["1.1.1.1"]
# cores = 2
# memory = 4096
# disk_size = 20
# },
# {
# name = "vm7"
# node_name = "homeserver2"
# vm_id = 205
# ip_address = "192.168.1.161/24"
# gateway = "192.168.1.1"
# dns_servers = ["1.1.1.1"]
# cores = 2
# memory = 4096
# disk_size = 20
# },
{
name = "vm8"
node_name = "homeserver3"
@@ -57,6 +57,17 @@ vms = [
cores = 2
memory = 2048
disk_size = 20
},
{
name = "vm11"
node_name = "homeserver3"
vm_id = 304
ip_address = "192.168.1.175/24"
gateway = "192.168.1.1"
dns_servers = ["1.1.1.1"]
cores = 2
memory = 2048
disk_size = 20
}
]

View File

@@ -1,16 +1,28 @@
# variables for minio backend configuration
variable "minio_access_key" {
description = "MinIO access key"
# variables for Terraform HTTP backend
variable "http_username" {
description = "Username for HTTP backend"
type = string
sensitive = true
}
variable "http_password" {
description = "Password for HTTP backend"
type = string
sensitive = true
}
variable "http_address" {
description = "HTTP backend address"
type = string
}
variable "minio_secret_key" {
description = "MinIO secret key"
variable "http_lock_address" {
description = "HTTP backend lock address"
type = string
}
variable "minio_endpoint" {
description = "MinIO API endpoint"
variable "http_unlock_address" {
description = "HTTP backend unlock address"
type = string
}
@@ -52,15 +64,15 @@ variable "pm_ssh_private_key_path" {
variable "vms" {
description = "List of VMs to create"
type = list(object({
name = string
node_name = string
vm_id = number
ip_address = string
name = string
node_name = string
vm_id = number
ip_address = string
dns_servers = list(string)
gateway = string
cores = number
memory = number
disk_size = number
gateway = string
cores = number
memory = number
disk_size = number
}))
}
@@ -78,4 +90,4 @@ variable "vm_user_password" {
description = "Password for the VM user"
type = string
sensitive = true
}
}

View File

@@ -74,6 +74,7 @@ default values,
```bash
helm upgrade --install traefik traefik/traefik \
-f traefik/traefik-values.yaml \
-n kube-system \
--set ingressRoute.dashboard.enabled=true \
--set ingressRoute.dashboard.matchRule='Host(`dashboard.traefik`)' \
@@ -516,7 +517,14 @@ network via ingress. It is accessible locally via loadbalancer IP address.
source .env
helm upgrade --install \
qbittorrent qbittorrent-helm-chart/ \
-n media \
--set ingress.host=$QBITTORRENT_HOST \
--set wireguard.address=$WIREGUARD_IP \
--set wireguard.peerPublicKey=$WIREGUARD_PEER_PUBLIC_KEY \
--set wireguard.presharedKey=$WIREGUARD_PRESHARED_KEY \
--set wireguard.endpoint=$WIREGUARD_ENDPOINT \
--set wireguard.dns=$WIREGUARD_DNS \
--set wireguard.privateKey=$WIREGUARD_PRIVATE_KEY \
--atomic
```

View File

@@ -16,7 +16,7 @@ gitea:
image:
repository: gitea
tag: 1.24.2
tag: 1.25.3
postgresql:
enabled: false

View File

@@ -9,7 +9,6 @@ data:
Address = {{ .Values.wireguard.address }}
PrivateKey = {{ .Values.wireguard.privateKey }}
MTU = {{ .Values.wireguard.mtu }}
DNS = {{ .Values.wireguard.dns }}
ListenPort = {{ .Values.wireguard.listenPort }}
[Peer]
@@ -17,4 +16,4 @@ data:
PresharedKey = {{ .Values.wireguard.presharedKey }}
AllowedIPs = {{ .Values.wireguard.allowedIPs }}
Endpoint = {{ .Values.wireguard.endpoint }}
PersistentKeepalive = {{ .Values.wireguard.persistentKeepalive }}
PersistentKeepalive = {{ .Values.wireguard.persistentKeepalive }}

View File

@@ -13,6 +13,14 @@ spec:
labels:
app: {{ .Release.Name }}
spec:
dnsPolicy: None
dnsConfig:
nameservers:
- 10.128.0.1
searches: []
options:
- name: ndots
value: "1"
initContainers:
- name: wireguard-init
image: {{ .Values.wireguardImage.repository }}:{{ .Values.wireguardImage.tag }}
@@ -29,21 +37,18 @@ spec:
- |
set -x
echo "Starting WireGuard initialization..."
mkdir -p /etc/wireguard
cp /config/wg_confs/wg0.conf /etc/wireguard/wg0.conf
chmod 600 /etc/wireguard/wg0.conf
mkdir -p /run/wireguard
cp /config/wg0.conf /run/wireguard/wg0.conf
chmod 600 /run/wireguard/wg0.conf
if ! lsmod | grep -q wireguard; then
modprobe wireguard || echo "Failed to load wireguard module"
fi
wg-quick up wg0 || echo "Failed to bring up WireGuard interface"
wg-quick up /run/wireguard/wg0.conf
ip link show wg0
wg show
volumeMounts:
- name: wireguard-config
mountPath: /config/wg_confs
mountPath: /config/
- name: modules
mountPath: /lib/modules
containers:
@@ -75,9 +80,7 @@ spec:
- -c
- |
while true; do
if ! ip link show wg0 > /dev/null 2>&1; then
wg-quick up wg0
fi
ip link show wg0 >/dev/null 2>&1 || exit 1
sleep 30
done
ports:
@@ -122,4 +125,4 @@ spec:
claimName: {{ .Values.persistence.downloads.existingClaim }}
- name: modules
hostPath:
path: /lib/modules
path: /lib/modules

View File

@@ -6,8 +6,9 @@ deployment:
global:
systemDefaultRegistry: ""
image:
repository: rancher/mirrored-library-traefik
tag: 2.11.8
registry: docker.io
repository: traefik
tag: 3.6.6
priorityClassName: system-cluster-critical
providers:
kubernetesIngress:
@@ -24,3 +25,10 @@ tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/master
operator: Exists
additionalArguments:
- "--entrypoints.web.transport.respondingtimeouts.readtimeout=600s"
- "--entrypoints.web.transport.respondingtimeouts.writetimeout=600s"
- "--entrypoints.web.transport.respondingtimeouts.idletimeout=600s"
- "--entrypoints.websecure.transport.respondingtimeouts.readtimeout=600s"
- "--entrypoints.websecure.transport.respondingtimeouts.writetimeout=600s"
- "--entrypoints.websecure.transport.respondingtimeouts.idletimeout=600s"