Compare commits

..

3 Commits

Author SHA1 Message Date
56c14df540 kubernetes: fix wireguard issue in qbittorrent-helm-chart
All checks were successful
ci/woodpecker/push/demo-workflow Pipeline was successful
2026-01-09 17:00:11 +02:00
b9fcdfc6b7 infra: add haproxy to as central reverse proxy
- add haproxy to work as central reverse proxy
- based on the domain, it can route to either docker or k8s proxy
2026-01-09 17:00:01 +02:00
10f72b8b59 docker: update traefik reverse proxy dockerfile 2026-01-09 16:57:40 +02:00
14 changed files with 256 additions and 118 deletions

3
docker/.gitignore vendored
View File

@@ -1 +1,2 @@
.env
.env
**/.env

View File

@@ -1,18 +0,0 @@
version: "3"
services:
jackett:
image: "linuxserver/jackett"
container_name: "jackett"
env_file:
./.env
volumes:
- ${DOCKERDIR}/appdata/jackett:/config
- ${DATADIR}/downloads:/downloads
- "/etc/localtime:/etc/localtime:ro"
ports:
- "9117:9117"
restart: unless-stopped
environment:
- PUID=${PUID}
- PGID=${PGID}
- TZ=${TZ}

View File

@@ -1,9 +1,8 @@
version: "3"
services:
portainer:
image: portainer/portainer-ce:latest
ports:
- 9000:9000
# ports:
# - 9000:9000
volumes:
- /home/taqi/docker/portainer/data:/data
- /var/run/docker.sock:/var/run/docker.sock:ro
@@ -22,7 +21,7 @@ services:
- "traefik.http.routers.portainer-rtr.service=portainer-svc"
- "traefik.http.services.portainer-svc.loadbalancer.server.port=9000"
- "traefik.http.routers.traefik-rtr.middlewares=middlewares-rate-limit@file,middlewares-secure-headers@file"
command:
command:
--http-enabled
environment:
- TZ=${TZ}

View File

@@ -1,21 +0,0 @@
version: "3"
services:
radarr:
image: "linuxserver/radarr"
container_name: "radarr"
env_file:
./.env
volumes:
- ${DOCKERDIR}/appdata/radarr:/config
- ${DATADIR}/downloads:/downloads
- ${DATADIR}/movies:/movies
- "/etc/localtime:/etc/localtime:ro"
ports:
- "7878:7878"
restart: always
environment:
- PUID=${PUID}
- PGID=${PGID}
- TZ=${TZ}
networks:
- bridge

View File

@@ -0,0 +1,36 @@
http:
middlewares:
# Rate Limiting Middleware
middlewares-rate-limit:
rateLimit:
average: 100
burst: 100
period: 1m
# Security Headers Middleware
middlewares-secure-headers:
headers:
browserXssFilter: true
contentTypeNosniff: true
frameDeny: true
permissionsPolicy: "GEOLOCATION 'none'; MICROPHONE 'none'; CAMERA 'none'"
referrerPolicy: "strict-origin-when-cross-origin"
stsIncludeSubdomains: true
stsMaxAge: 63072000
stsPreload: true
customFrameOptionsValue: "SAMEORIGIN"
customRequestHeaders:
X-Forwarded-Proto: "https"
tls:
options:
default:
#sniStrict: true # prevents leaking default cert; see https://doc.traefik.io/traefik/v2.2/https/tls/#strict-sni-checking
minVersion: VersionTLS12
cipherSuites:
- TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256
- TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256
- TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384
- TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384
- TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305
- TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305

View File

@@ -1,5 +1,3 @@
version: '3.8'
networks:
t3_proxy:
name: t3_proxy
@@ -8,16 +6,10 @@ networks:
config:
- subnet: 192.168.90.0/24
secrets:
basic_auth_credentials:
file: $DOCKERDIR/secrets/basic_auth_credentials
cf_dns_api_token:
file: $DOCKERDIR/secrets/cf_dns_api_token
services:
traefik:
container_name: traefik
image: traefik:3.0
image: traefik:3.6.6
restart: unless-stopped
env_file:
- ./.env
@@ -36,7 +28,6 @@ services:
- --api=true
- --api.dashboard=true
# - --api.insecure=true
- --entrypoints.websecure.forwardedHeaders.trustedIPs=$CLOUDFLARE_IPS,$LOCAL_IPS
- --log=true
- --log.filePath=/logs/traefik.log
- --log.level=DEBUG
@@ -52,6 +43,7 @@ services:
- --entrypoints.websecure.http.tls.domains[0].sans=*.$DOMAINNAME
- --providers.file.directory=/rules
- --providers.file.watch=true
- --certificatesresolvers.dns-cloudflare.acme.email=${CLOUDFLARE_EMAIL}
- --certificatesResolvers.dns-cloudflare.acme.storage=/acme.json
- --certificatesResolvers.dns-cloudflare.acme.dnsChallenge.provider=cloudflare
- --certificatesResolvers.dns-cloudflare.acme.dnsChallenge.resolvers=1.1.1.1:53,1.0.0.1:53
@@ -59,38 +51,23 @@ services:
# - 80:80
- 443:443
- 8080:8080
# - target: 80
# published: 80
# protocol: tcp
# mode: host
# - target: 443
# published: 443
# protocol: tcp
# mode: host
# - target: 8080
# published: 8585
# protocol: tcp
# mode: host
volumes:
- $DOCKERDIR/appdata/traefik3/rules/$HOSTNAME:/rules
- ./traefik-rules.yaml:/rules/traefik-rules.yaml
- /var/run/docker.sock:/var/run/docker.sock:ro
- $DOCKERDIR/appdata/traefik3/acme/acme.json:/acme.json
- $DOCKERDIR/logs/$HOSTNAME/traefik:/logs
- $DOCKERDIR/appdata/traefik/acme/acme.json:/acme.json
- $DOCKERDIR/logs/traefik:/logs
environment:
- PUID=${PUID}
- PGID=${PGID}
- TZ=$TZ
- CF_DNS_API_TOKEN_FILE=/run/secrets/cf_dns_api_token
- HTPASSWD_FILE=/run/secrets/basic_auth_credentials
- CF_DNS_API_TOKEN=${CLOUDFLARE_TOKEN}
- DOMAINNAME=${DOMAINNAME}
secrets:
- cf_dns_api_token
- basic_auth_credentials
- CLOUDFLARE_EMAIL=${CLOUDFLARE_EMAIL}
labels:
- "traefik.enable=true"
- "traefik.http.routers.dashboard.tls=true"
- "traefik.http.routers.traefik-rtr.entrypoints=websecure"
- "traefik.http.routers.traefik-rtr.rule=Host(`traefik.${DOMAINNAME}`)"
- "traefik.http.routers.traefik-rtr.service=api@internal"
- "traefik.http.routers.dashboard.tls=true"
- "traefik.http.routers.api.entrypoints=websecure"
- "traefik.http.routers.api.rule=Host(`traefik.${DOMAINNAME}`)"
- "traefik.http.routers.api.service=api@internal"
# Middlewares
- "traefik.http.routers.traefik-rtr.middlewares=middlewares-rate-limit@file,middlewares-secure-headers@file,middlewares-basic-auth@file"
- "traefik.http.routers.api.middlewares=middlewares-rate-limit@file,middlewares-secure-headers@file"

93
infra/haproxy/README.md Normal file
View File

@@ -0,0 +1,93 @@
# HAProxy Configuration
## Rationale
This HAProxy instance serves as the primary entry point for the
homeserver infrastructure. It acts as a unified reverse proxy that
allows services from both Docker and Kubernetes environments to be
exposed behind a single, cohesive frontend. By using HAProxy with
SNI-based routing, the following can be achieved:
- **Centralize SSL/TLS termination** across multiple backend
environments
- **Route traffic dynamically** based on the requested domain to either
Docker or Kubernetes services
- **Maintain a single point of entry** for external clients while
distributing load across heterogeneous backends
- **Simplify certificate management** by terminating SSL at one
location
- **Note**: TLS termination and certificate management are not handled in this
setup; SSL/TLS traffic is passed through to backend services
## Overview
HAProxy is used as a reverse proxy and load balancer to route incoming
HTTPS traffic to the appropriate backend services in the homeserver
setup.
This HAProxy configuration implements SNI (Server Name Indication)
based routing to direct traffic to either the Kubernetes cluster or
Docker backend based on the requested domain.
## Global Settings
- **Logging**: Logs are written to syslog at `/dev/log` (local0) and
localhost (local2)
- **Admin Socket**: Accessible at `/run/haproxy/admin.sock` for
statistics and administration
- **Max Connections**: 10,000 concurrent connections
- **User/Group**: Runs as `haproxy` user and group
## Default Timeout Settings
- **Connect Timeout**: 5 seconds
- **Client Timeout**: 3600 seconds (1 hour)
- **Server Timeout**: 3600 seconds (1 hour)
## Frontend Configuration
The HAProxy frontend listens on port 443 (HTTPS) and TCP mode is used
for SSL/TLS traffic.
### SNI-Based Routing
Traffic is routed based on the SSL SNI (Server Name Indication)
hostname:
**Kubernetes Backend** (`k8s_backend`):
- Domains ending with `.mydomain.com`
**Docker Backend** (`docker_backend`):
- Domains ending with `.docker.mydomain.com`
## Backend Configuration
### Kubernetes Backend
- **Server**: `k8s-ingress` at `192.168.1.141:443`
- **Mode**: TCP
- **Health Checks**: Enabled (10s interval, 3 failures to mark
down, 2 successes to mark up)
### Docker Backend
- **Server**: `docker-proxy` at `192.168.1.135:443`
- **Mode**: TCP
- **Health Checks**: Enabled (10s interval, 3 failures to mark
down, 2 successes to mark up)
## Usage
The SSL hello packet is automatically inspected to determine the SNI
hostname, and the connection is routed to the appropriate backend
service.
## Notes
- TCP mode is used to preserve SSL/TLS encryption end-to-end
- Domain patterns marked with `# example` are placeholders and should
be customized for the setup
- The TCP routing logs can be monitored via journald for debugging and
verification purposes. `journalctl -u haproxy -f`

42
infra/haproxy/haproxy.cfg Normal file
View File

@@ -0,0 +1,42 @@
global
log /dev/log local0
log 127.0.0.1 local2
stats socket /run/haproxy/admin.sock mode 660 level admin expose-fd listeners
stats timeout 30s
user haproxy
group haproxy
daemon
maxconn 10000
defaults
log global
mode tcp
option dontlognull
timeout connect 5s
timeout client 3600s
timeout server 3600s
frontend https-in
bind *:443
mode tcp
option tcplog
tcp-request inspect-delay 5s
tcp-request content accept if { req_ssl_hello_type 1 }
acl is_docker req_ssl_sni -i -m end .docker.mydomain.com
acl is_k8s req_ssl_sni -i -m end .mydomain.com
# More specific wins → put docker rule first
use_backend docker_backend if is_docker
use_backend k8s_backend if is_k8s
default_backend k8s_backend
backend k8s_backend
mode tcp
server k8s-ingress 192.168.1.141:443 check inter 10s fall 3 rise 2
backend docker_backend
mode tcp
server docker-proxy 192.168.1.135:443 check inter 10s fall 3 rise 2

View File

@@ -3,28 +3,28 @@ pm_ssh_public_key_path = "/home/taqi/.ssh/homeserver.pub"
pm_ssh_private_key_path = "/home/taqi/.ssh/homeserver"
vms = [
{
name = "vm6"
node_name = "homeserver1"
vm_id = 105
ip_address = "192.168.1.151/24"
gateway = "192.168.1.1"
dns_servers = ["1.1.1.1"]
cores = 2
memory = 4096
disk_size = 20
},
{
name = "vm7"
node_name = "homeserver2"
vm_id = 205
ip_address = "192.168.1.161/24"
gateway = "192.168.1.1"
dns_servers = ["1.1.1.1"]
cores = 2
memory = 4096
disk_size = 20
},
# {
# name = "vm6"
# node_name = "homeserver1"
# vm_id = 105
# ip_address = "192.168.1.151/24"
# gateway = "192.168.1.1"
# dns_servers = ["1.1.1.1"]
# cores = 2
# memory = 4096
# disk_size = 20
# },
# {
# name = "vm7"
# node_name = "homeserver2"
# vm_id = 205
# ip_address = "192.168.1.161/24"
# gateway = "192.168.1.1"
# dns_servers = ["1.1.1.1"]
# cores = 2
# memory = 4096
# disk_size = 20
# },
{
name = "vm8"
node_name = "homeserver3"
@@ -57,6 +57,17 @@ vms = [
cores = 2
memory = 2048
disk_size = 20
},
{
name = "vm11"
node_name = "homeserver3"
vm_id = 304
ip_address = "192.168.1.175/24"
gateway = "192.168.1.1"
dns_servers = ["1.1.1.1"]
cores = 2
memory = 2048
disk_size = 20
}
]

View File

@@ -74,6 +74,7 @@ default values,
```bash
helm upgrade --install traefik traefik/traefik \
-f traefik/traefik-values.yaml \
-n kube-system \
--set ingressRoute.dashboard.enabled=true \
--set ingressRoute.dashboard.matchRule='Host(`dashboard.traefik`)' \
@@ -516,7 +517,14 @@ network via ingress. It is accessible locally via loadbalancer IP address.
source .env
helm upgrade --install \
qbittorrent qbittorrent-helm-chart/ \
-n media \
--set ingress.host=$QBITTORRENT_HOST \
--set wireguard.address=$WIREGUARD_IP \
--set wireguard.peerPublicKey=$WIREGUARD_PEER_PUBLIC_KEY \
--set wireguard.presharedKey=$WIREGUARD_PRESHARED_KEY \
--set wireguard.endpoint=$WIREGUARD_ENDPOINT \
--set wireguard.dns=$WIREGUARD_DNS \
--set wireguard.privateKey=$WIREGUARD_PRIVATE_KEY \
--atomic
```

View File

@@ -16,7 +16,7 @@ gitea:
image:
repository: gitea
tag: 1.24.2
tag: 1.25.3
postgresql:
enabled: false

View File

@@ -9,7 +9,6 @@ data:
Address = {{ .Values.wireguard.address }}
PrivateKey = {{ .Values.wireguard.privateKey }}
MTU = {{ .Values.wireguard.mtu }}
DNS = {{ .Values.wireguard.dns }}
ListenPort = {{ .Values.wireguard.listenPort }}
[Peer]
@@ -17,4 +16,4 @@ data:
PresharedKey = {{ .Values.wireguard.presharedKey }}
AllowedIPs = {{ .Values.wireguard.allowedIPs }}
Endpoint = {{ .Values.wireguard.endpoint }}
PersistentKeepalive = {{ .Values.wireguard.persistentKeepalive }}
PersistentKeepalive = {{ .Values.wireguard.persistentKeepalive }}

View File

@@ -13,6 +13,14 @@ spec:
labels:
app: {{ .Release.Name }}
spec:
dnsPolicy: None
dnsConfig:
nameservers:
- 10.128.0.1
searches: []
options:
- name: ndots
value: "1"
initContainers:
- name: wireguard-init
image: {{ .Values.wireguardImage.repository }}:{{ .Values.wireguardImage.tag }}
@@ -29,21 +37,18 @@ spec:
- |
set -x
echo "Starting WireGuard initialization..."
mkdir -p /etc/wireguard
cp /config/wg_confs/wg0.conf /etc/wireguard/wg0.conf
chmod 600 /etc/wireguard/wg0.conf
mkdir -p /run/wireguard
cp /config/wg0.conf /run/wireguard/wg0.conf
chmod 600 /run/wireguard/wg0.conf
if ! lsmod | grep -q wireguard; then
modprobe wireguard || echo "Failed to load wireguard module"
fi
wg-quick up wg0 || echo "Failed to bring up WireGuard interface"
wg-quick up /run/wireguard/wg0.conf
ip link show wg0
wg show
volumeMounts:
- name: wireguard-config
mountPath: /config/wg_confs
mountPath: /config/
- name: modules
mountPath: /lib/modules
containers:
@@ -75,9 +80,7 @@ spec:
- -c
- |
while true; do
if ! ip link show wg0 > /dev/null 2>&1; then
wg-quick up wg0
fi
ip link show wg0 >/dev/null 2>&1 || exit 1
sleep 30
done
ports:
@@ -122,4 +125,4 @@ spec:
claimName: {{ .Values.persistence.downloads.existingClaim }}
- name: modules
hostPath:
path: /lib/modules
path: /lib/modules

View File

@@ -6,8 +6,9 @@ deployment:
global:
systemDefaultRegistry: ""
image:
repository: rancher/mirrored-library-traefik
tag: 2.11.8
registry: docker.io
repository: traefik
tag: 3.6.6
priorityClassName: system-cluster-critical
providers:
kubernetesIngress:
@@ -24,3 +25,10 @@ tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/master
operator: Exists
additionalArguments:
- "--entrypoints.web.transport.respondingtimeouts.readtimeout=600s"
- "--entrypoints.web.transport.respondingtimeouts.writetimeout=600s"
- "--entrypoints.web.transport.respondingtimeouts.idletimeout=600s"
- "--entrypoints.websecure.transport.respondingtimeouts.readtimeout=600s"
- "--entrypoints.websecure.transport.respondingtimeouts.writetimeout=600s"
- "--entrypoints.websecure.transport.respondingtimeouts.idletimeout=600s"