Kubernetes: added multiple microservices
- added minio object storage - added immich photo viewer - added cloudnative-pg postgres operator for db management - added cronjobs to run different maintenance tasks - updated readme
This commit is contained in:
@ -102,13 +102,9 @@ Next, deploy the docker registry with helm chart:
|
|||||||
|
|
||||||
```bash
|
```bash
|
||||||
source .env
|
source .env
|
||||||
kubectl get secret wildcard-cert-secret --namespace=cert-manager -o yaml \
|
|
||||||
| sed 's/namespace: cert-manager/namespace: docker-registry/' \
|
|
||||||
| kubectl apply -f -
|
|
||||||
|
|
||||||
helm install registry docker-registry-helm-chart/ \
|
helm install registry docker-registry-helm-chart/ \
|
||||||
--set host=$DOCKER_REGISTRY_HOST \
|
--set host=$DOCKER_REGISTRY_HOST \
|
||||||
--set ingress.tls.host=$DNSNAME \
|
--set ingress.tls.host=$REGISTRY_HOST \
|
||||||
--atomic
|
--atomic
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -289,7 +285,9 @@ helm repo add longhorn https://charts.longhorn.io
|
|||||||
helm repo update
|
helm repo update
|
||||||
|
|
||||||
kubectl create namespace longhorn-system
|
kubectl create namespace longhorn-system
|
||||||
helm install longhorn longhorn/longhorn --namespace longhorn-system
|
helm install longhorn longhorn/longhorn \
|
||||||
|
--namespace longhorn-system \
|
||||||
|
-f values.yaml
|
||||||
|
|
||||||
kubectl -n longhorn-system get pods
|
kubectl -n longhorn-system get pods
|
||||||
|
|
||||||
@ -332,6 +330,19 @@ kubectl edit configmap -n longhorn-system longhorn-storageclass
|
|||||||
set the numberOfReplicas: "1"
|
set the numberOfReplicas: "1"
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Multiple storage classes for different replica counts with Longhorn
|
||||||
|
|
||||||
|
To create multiple storage classes with different replica counts, create
|
||||||
|
multiple storage class yaml files with different replica counts and apply
|
||||||
|
them. The storage class name must be different for each storage class.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Create a new storage class with 2 replicas
|
||||||
|
kubectl apply -n longhorn-system -f longhorn-storageclass-2-replica.yaml
|
||||||
|
# Create a new storage class with 3 replicas
|
||||||
|
kubectl apply -n longhorn-system -f longhorn-storageclass-3-replica.yaml
|
||||||
|
```
|
||||||
|
|
||||||
# Configure AdGuard Adblocker
|
# Configure AdGuard Adblocker
|
||||||
|
|
||||||
AdGuard is deployed in the K3S cluster for network ad protection.
|
AdGuard is deployed in the K3S cluster for network ad protection.
|
||||||
@ -474,7 +485,7 @@ service is exposed via ingress and is accessible from the internet.
|
|||||||
Configure a new user, database, and schema for Gitea in the postgres database.
|
Configure a new user, database, and schema for Gitea in the postgres database.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
CREATE ROLE gitea WITH LOGIN PASSWORD 'gitea';
|
CREATE ROLE gitea WITH LOGIN PASSWORD 'dummypassword';
|
||||||
|
|
||||||
CREATE DATABASE giteadb
|
CREATE DATABASE giteadb
|
||||||
WITH OWNER gitea
|
WITH OWNER gitea
|
||||||
@ -599,3 +610,185 @@ helm install ldap \
|
|||||||
--atomic \
|
--atomic \
|
||||||
-n ldap
|
-n ldap
|
||||||
```
|
```
|
||||||
|
|
||||||
|
# Minio Object Storage
|
||||||
|
|
||||||
|
MinIO is a High Performance Object Storage. It is compatible with Amazon S3.
|
||||||
|
It is deployed in the k3s cluster using the helm chart.
|
||||||
|
|
||||||
|
The minio deployment is divided into two parts: the MinIO operator and the
|
||||||
|
MinIO tenant. The MinIO operator is responsible for managing the MinIO
|
||||||
|
deployment and the MinIO tenant is responsible for managing the MinIO
|
||||||
|
buckets and objects. The MinIO operator is deployed in the `minio-operator`
|
||||||
|
namespace and the MinIO tenant is deployed in the `minio` namespace.
|
||||||
|
|
||||||
|
## Deploy MinIO Operator
|
||||||
|
|
||||||
|
For deploying the MinIO operator, the MinIO operator helm chart is used.
|
||||||
|
The default values are sufficient for the operator deployment.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
helm repo add minio https://operator.min.io/
|
||||||
|
helm repo update
|
||||||
|
helm install \
|
||||||
|
--namespace minio-operator \
|
||||||
|
--create-namespace \
|
||||||
|
minio-operator minio/operator
|
||||||
|
```
|
||||||
|
|
||||||
|
## Deploy MinIO Tenant
|
||||||
|
|
||||||
|
The MinIO tenant is deployed in the `minio` namespace. The default values
|
||||||
|
are overridden with local values-tenant.yaml file.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
source .env
|
||||||
|
kubectl create namespace minio
|
||||||
|
helm upgrade --install minio-tenant \
|
||||||
|
minio/tenant \
|
||||||
|
--namespace minio \
|
||||||
|
-f minio/values-tenant.yaml \
|
||||||
|
--set tenant.configSecret.accessKey=$MINIO_ROOT_USER \
|
||||||
|
--set tenant.configSecret.secretKey=$MINIO_ROOT_PASSWORD \
|
||||||
|
--set ingress.console.host=$MINIO_HOST \
|
||||||
|
--set ingress.console.tls[0].hosts[0]=$MINIO_HOST \
|
||||||
|
--atomic
|
||||||
|
```
|
||||||
|
|
||||||
|
# Deploy Database with CloudNativePG operator
|
||||||
|
|
||||||
|
Ref: https://cloudnative-pg.io/documentation/current/backup/#main-concepts
|
||||||
|
CloudNativePG is a Kubernetes operator that manages PostgreSQL clusters.
|
||||||
|
First, deploy the operator in the `cloudnative-pg` namespace.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
helm repo add cnpg https://cloudnative-pg.github.io/charts
|
||||||
|
helm upgrade --install cnpg \
|
||||||
|
--namespace cnpg-system \
|
||||||
|
--create-namespace \
|
||||||
|
cnpg/cloudnative-pg
|
||||||
|
```
|
||||||
|
|
||||||
|
Next, deploy the PostgreSQL cluster in the `postgres` namespace with backup
|
||||||
|
configured towards the minio object storage.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
source .env
|
||||||
|
kubectl create namespace immich
|
||||||
|
# First create the secret for minio access
|
||||||
|
envsubst < cloud-native-pg/secrets.yaml | kubectl apply -n immich -f -
|
||||||
|
|
||||||
|
# Then deploy the postgres cluster
|
||||||
|
envsubst < cloud-native-pg/cloudnative-pg.yaml | kubectl apply -n immich -f -
|
||||||
|
|
||||||
|
# Deploy the backup schedule
|
||||||
|
kubectl apply -f cloud-native-pg/backup.yaml -n immich
|
||||||
|
```
|
||||||
|
|
||||||
|
## Recovery from Backup
|
||||||
|
|
||||||
|
Ref: https://cloudnative-pg.io/documentation/1.20/recovery/
|
||||||
|
To recover the PostgreSQL cluster from a backup using cloudnative-pg,
|
||||||
|
there are two ways.
|
||||||
|
|
||||||
|
1. Recovery from volume snapshot - requires cnpg plugin to take the snapshot
|
||||||
|
with kubectl.
|
||||||
|
2. Recovery from backup stored in object storage - requires the backup to be
|
||||||
|
stored in the object storage.
|
||||||
|
|
||||||
|
To recover from a backup stored in the object storage, apply the backup-recovery.yaml template with the desired values.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
source .env
|
||||||
|
envsubst < cloud-native-pg/backup-recovery.yaml | kubectl apply -n immich -f -
|
||||||
|
```
|
||||||
|
|
||||||
|
## Create a new PostgreSQL cluster from existing Database
|
||||||
|
|
||||||
|
To create a new PostgreSQL cluster from an existing database, you can use the `create-cluster.yaml` template. This template allows you to create a new PostgreSQL cluster from an existing database by specifying the necessary configurations and parameters in the YAML file.
|
||||||
|
|
||||||
|
# Immich Self-hosted Photo and Video Backup Solution
|
||||||
|
|
||||||
|
Immich is a self-hosted photo and video backup solution that is deployed in
|
||||||
|
the k3s cluster. The Immich deployment uses the existing postgres database
|
||||||
|
for data storage. The Immich service is exposed via ingress and is accessible
|
||||||
|
from the internet.
|
||||||
|
|
||||||
|
To use the existing postgres database, first create a new user and database
|
||||||
|
for Immich in the postgres database.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Log into the postgres pod
|
||||||
|
kubectl exec -it -n immich pg-backup-1 -- psql -U postgres
|
||||||
|
|
||||||
|
|
||||||
|
# Then run the following commands in the psql shell
|
||||||
|
CREATE ROLE immich WITH LOGIN PASSWORD 'dummypassword';
|
||||||
|
ALTER ROLE immich WITH SUPERUSER;
|
||||||
|
CREATE DATABASE immichdb
|
||||||
|
WITH OWNER immich
|
||||||
|
TEMPLATE template0
|
||||||
|
ENCODING UTF8
|
||||||
|
LC_COLLATE 'en_US.UTF-8'
|
||||||
|
LC_CTYPE 'en_US.UTF-8';
|
||||||
|
|
||||||
|
# Install pgvecto.rs extension
|
||||||
|
\c immichdb
|
||||||
|
CREATE EXTENSION vectors;
|
||||||
|
```
|
||||||
|
|
||||||
|
Next, create or verify local disk for immich backup
|
||||||
|
|
||||||
|
```bash
|
||||||
|
ssh dockerhost
|
||||||
|
|
||||||
|
sudo mkdir -p /media/immich
|
||||||
|
sudo mkfs.ext4 /dev/sdd
|
||||||
|
sudo mount /dev/sdd /media/immich
|
||||||
|
echo "/dev/sdd /media/immich ext4 defaults 0 2" | sudo tee -a /etc/fstab
|
||||||
|
|
||||||
|
echo "/media/immich 192.168.1.135/24(rw,sync,no_subtree_check,no_root_squash)" | sudo tee -a /etc/exports
|
||||||
|
sudo exportfs -a
|
||||||
|
```
|
||||||
|
|
||||||
|
After that, create a PV and PVC for the immich backup storage.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
source .env
|
||||||
|
envsubst < immich/persistence.yaml | kubectl apply -n immich -f -
|
||||||
|
```
|
||||||
|
|
||||||
|
Finally, deploy the Immich helm chart with the following values:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
source .env
|
||||||
|
helm upgrade --install \
|
||||||
|
--namespace immich immich oci://ghcr.io/immich-app/immich-charts/immich \
|
||||||
|
-f immich/values.yaml \
|
||||||
|
--set env.DB_USERNAME=$IMMICH_DB_USER \
|
||||||
|
--set env.DB_PASSWORD=$IMMICH_DB_PASSWORD \
|
||||||
|
--set env.DB_DATABASE_NAME=$IMMICH_DB_NAME \
|
||||||
|
--set server.ingress.main.hosts[0].host=$IMMICH_HOST \
|
||||||
|
--set server.ingress.main.tls[0].hosts[0]=$IMMICH_HOST \
|
||||||
|
--atomic
|
||||||
|
```
|
||||||
|
|
||||||
|
# Cron Jobs for Periodic Tasks
|
||||||
|
|
||||||
|
## Update DNS Record
|
||||||
|
|
||||||
|
This cronjob updates current public IP address to the DNS record in Cloudflare.
|
||||||
|
The script to update DNS record is added to the cronjob as configmap and then
|
||||||
|
mounted as a volume in the cronjob pod. The script uses the Cloudflare API
|
||||||
|
to update the DNS record with the current public IP address.
|
||||||
|
|
||||||
|
Currently the cronjob is scheduled to run every hour.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
kubectl create namespace cronjobs --dry-run=client -o yaml | kubectl apply -f -
|
||||||
|
kubectl create secret generic cloudflare-dns-token \
|
||||||
|
--from-literal=api-token=$CLOUDFLARE_TOKEN \
|
||||||
|
-n cronjobs
|
||||||
|
kubectl apply -f cronjobs/update-dns/update_dns_config.yaml -n cronjobs
|
||||||
|
kubectl apply -f cronjobs/update-dns/update_dns_cronjob.yaml -n cronjobs
|
||||||
|
```
|
||||||
|
|||||||
33
kubernetes/cloud-native-pg/backup-recovery.yaml
Normal file
33
kubernetes/cloud-native-pg/backup-recovery.yaml
Normal file
@ -0,0 +1,33 @@
|
|||||||
|
apiVersion: postgresql.cnpg.io/v1
|
||||||
|
kind: Cluster
|
||||||
|
metadata:
|
||||||
|
name: cluster-restore
|
||||||
|
spec:
|
||||||
|
instances: 1
|
||||||
|
imageName: ghcr.io/tensorchord/cloudnative-pgvecto.rs:16.5-v0.3.0
|
||||||
|
|
||||||
|
# superuserSecret:
|
||||||
|
# name: superuser-secret
|
||||||
|
|
||||||
|
bootstrap:
|
||||||
|
recovery:
|
||||||
|
source: pg-backup # Name of the cluster to restore from
|
||||||
|
|
||||||
|
externalClusters:
|
||||||
|
- name: pg-backup # Name of the cluster to restore from
|
||||||
|
barmanObjectStore:
|
||||||
|
destinationPath: s3://immich/
|
||||||
|
endpointURL: ${MINIO_ENDPOINT_URL}
|
||||||
|
s3Credentials:
|
||||||
|
accessKeyId:
|
||||||
|
name: minio-creds
|
||||||
|
key: ACCESS_KEY_ID
|
||||||
|
secretAccessKey:
|
||||||
|
name: minio-creds
|
||||||
|
key: ACCESS_SECRET_KEY
|
||||||
|
wal:
|
||||||
|
maxParallel: 4
|
||||||
|
|
||||||
|
storage:
|
||||||
|
storageClass: longhorn-2x
|
||||||
|
size: 5Gi
|
||||||
10
kubernetes/cloud-native-pg/backup.yaml
Normal file
10
kubernetes/cloud-native-pg/backup.yaml
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
apiVersion: postgresql.cnpg.io/v1
|
||||||
|
kind: ScheduledBackup
|
||||||
|
metadata:
|
||||||
|
name: immich-db-backup
|
||||||
|
spec:
|
||||||
|
immediate: true
|
||||||
|
schedule: "0 0 0 * * *" # At midnight every day
|
||||||
|
backupOwnerReference: self
|
||||||
|
cluster:
|
||||||
|
name: pg-backup
|
||||||
48
kubernetes/cloud-native-pg/cloudnative-pg.yaml
Normal file
48
kubernetes/cloud-native-pg/cloudnative-pg.yaml
Normal file
@ -0,0 +1,48 @@
|
|||||||
|
apiVersion: postgresql.cnpg.io/v1
|
||||||
|
kind: Cluster
|
||||||
|
metadata:
|
||||||
|
name: pg-backup
|
||||||
|
spec:
|
||||||
|
instances: 1
|
||||||
|
imageName: ghcr.io/tensorchord/cloudnative-pgvecto.rs:16.5-v0.3.0
|
||||||
|
|
||||||
|
postgresql:
|
||||||
|
shared_preload_libraries:
|
||||||
|
- "vectors.so"
|
||||||
|
|
||||||
|
managed:
|
||||||
|
roles:
|
||||||
|
- name: immich
|
||||||
|
superuser: true
|
||||||
|
login: true
|
||||||
|
|
||||||
|
|
||||||
|
# Example of rolling update strategy:
|
||||||
|
# - unsupervised: automated update of the primary once all
|
||||||
|
# replicas have been upgraded (default)
|
||||||
|
# - supervised: requires manual supervision to perform
|
||||||
|
# the switchover of the primary
|
||||||
|
primaryUpdateStrategy: unsupervised
|
||||||
|
|
||||||
|
# Persistent storage configuration
|
||||||
|
storage:
|
||||||
|
storageClass: longhorn-2x
|
||||||
|
size: 5Gi
|
||||||
|
|
||||||
|
# Backup properties for MinIO
|
||||||
|
backup:
|
||||||
|
barmanObjectStore:
|
||||||
|
destinationPath: s3://immich
|
||||||
|
endpointURL: ${MINIO_ENDPOINT_URL}
|
||||||
|
s3Credentials:
|
||||||
|
accessKeyId:
|
||||||
|
name: minio-creds
|
||||||
|
key: ACCESS_KEY_ID
|
||||||
|
secretAccessKey:
|
||||||
|
name: minio-creds
|
||||||
|
key: ACCESS_SECRET_KEY
|
||||||
|
wal:
|
||||||
|
compression: gzip
|
||||||
|
data:
|
||||||
|
compression: gzip
|
||||||
|
retentionPolicy: "15d"
|
||||||
10
kubernetes/cloud-native-pg/secrets.yaml
Normal file
10
kubernetes/cloud-native-pg/secrets.yaml
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
---
|
||||||
|
# Secret for MinIO credentials
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Secret
|
||||||
|
metadata:
|
||||||
|
name: minio-creds
|
||||||
|
type: Opaque
|
||||||
|
stringData:
|
||||||
|
ACCESS_KEY_ID: ${MINIO_ACCESS_KEY_ID}
|
||||||
|
ACCESS_SECRET_KEY: ${MINIO_ACCESS_SECRET_KEY}
|
||||||
107
kubernetes/cronjobs/update-dns/update_dns_configmap.yaml
Normal file
107
kubernetes/cronjobs/update-dns/update_dns_configmap.yaml
Normal file
@ -0,0 +1,107 @@
|
|||||||
|
apiVersion: v1
|
||||||
|
kind: ConfigMap
|
||||||
|
metadata:
|
||||||
|
name: update-dns-script
|
||||||
|
data:
|
||||||
|
update_dns.sh: |
|
||||||
|
#! /usr/bin/env bash
|
||||||
|
# This script updates a DNS record using the Cloudflare API.
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
function get_my_ip() {
|
||||||
|
curl -s https://api.ipify.org
|
||||||
|
}
|
||||||
|
|
||||||
|
function get_zone_id() {
|
||||||
|
local zone_name="$1"
|
||||||
|
local api_token="${CLOUDFLARE_API_TOKEN}"
|
||||||
|
|
||||||
|
response=$(curl -s -X GET "https://api.cloudflare.com/client/v4/zones?name=${zone_name}" \
|
||||||
|
-H "Content-Type: application/json" \
|
||||||
|
-H "Authorization: Bearer ${api_token}")
|
||||||
|
|
||||||
|
if echo "$response" | grep -q '"success":true'; then
|
||||||
|
echo "$response" | jq -r '.result[0].id'
|
||||||
|
else
|
||||||
|
echo "Failed to retrieve zone ID for ${zone_name}. Response: $response"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
function get_dns_record_id() {
|
||||||
|
local zone_id="$1"
|
||||||
|
local api_token="${CLOUDFLARE_API_TOKEN}"
|
||||||
|
|
||||||
|
response=$(curl -s -X GET "https://api.cloudflare.com/client/v4/zones/${zone_id}/dns_records" \
|
||||||
|
-H "Content-Type: application/json" \
|
||||||
|
-H "Authorization: Bearer ${api_token}")
|
||||||
|
|
||||||
|
if echo "${response}" | grep -q '"success":true'; then
|
||||||
|
for record in $(echo "${response}" | jq -r '.result[] | select(.type=="A") | .id'); do
|
||||||
|
echo "${record}"
|
||||||
|
return
|
||||||
|
done
|
||||||
|
else
|
||||||
|
echo "Failed to retrieve DNS record ID for ${record_name}. Response: $response"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
function update_dns_record() {
|
||||||
|
local zone_id="$1"
|
||||||
|
local record_id="$2"
|
||||||
|
local ip_address="$3"
|
||||||
|
local api_token="${CLOUDFLARE_API_TOKEN}"
|
||||||
|
|
||||||
|
local max_attempts=3
|
||||||
|
local attempt=1
|
||||||
|
local success=false
|
||||||
|
|
||||||
|
while [ $attempt -le $max_attempts ] && [ "$success" = false ]; do
|
||||||
|
if response=$(curl -s -X PUT "https://api.cloudflare.com/client/v4/zones/${zone_id}/dns_records/${record_id}" \
|
||||||
|
-H "Content-Type: application/json" \
|
||||||
|
-H "Authorization: Bearer ${api_token}" \
|
||||||
|
-d '{
|
||||||
|
"type": "A",
|
||||||
|
"name": "tahmidcloud.com",
|
||||||
|
"content": "'"${ip_address}"'",
|
||||||
|
"ttl": 1,
|
||||||
|
"proxied": false
|
||||||
|
}'); then
|
||||||
|
if echo "$response" | grep -q '"success":true'; then
|
||||||
|
success=true
|
||||||
|
else
|
||||||
|
echo "Attempt $attempt failed. Response: $response"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "$success" = false ] ; then
|
||||||
|
if [ $attempt -lt $max_attempts ]; then
|
||||||
|
echo "Retrying in 5 seconds..."
|
||||||
|
sleep 5
|
||||||
|
fi
|
||||||
|
((attempt++))
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
if [ "$success" = false ]; then
|
||||||
|
echo "Failed to update DNS record after ${max_attempts} attempts"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
echo "DNS record updated successfully to IP address: ${ip_address}"
|
||||||
|
}
|
||||||
|
|
||||||
|
function main() {
|
||||||
|
if [ -z "${CLOUDFLARE_API_TOKEN:-}" ]; then
|
||||||
|
echo "CLOUDFLARE_API_TOKEN environment variable is not set."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
zone_id=$(get_zone_id "tahmidcloud.com")
|
||||||
|
record_id=$(get_dns_record_id "${zone_id}")
|
||||||
|
ip_address=$(get_my_ip)
|
||||||
|
|
||||||
|
update_dns_record "${zone_id}" "${record_id}" "${ip_address}"
|
||||||
|
}
|
||||||
|
|
||||||
|
main
|
||||||
33
kubernetes/cronjobs/update-dns/update_dns_cronjob.yaml
Normal file
33
kubernetes/cronjobs/update-dns/update_dns_cronjob.yaml
Normal file
@ -0,0 +1,33 @@
|
|||||||
|
apiVersion: batch/v1
|
||||||
|
kind: CronJob
|
||||||
|
metadata:
|
||||||
|
name: update-dns-cronjob
|
||||||
|
spec:
|
||||||
|
schedule: "15 * * * *"
|
||||||
|
concurrencyPolicy: Replace # Add this line
|
||||||
|
jobTemplate:
|
||||||
|
spec:
|
||||||
|
template:
|
||||||
|
spec:
|
||||||
|
containers:
|
||||||
|
- name: cron-container
|
||||||
|
image: alpine/curl
|
||||||
|
command: ["/bin/sh", "-c"]
|
||||||
|
env:
|
||||||
|
- name: CLOUDFLARE_API_TOKEN
|
||||||
|
valueFrom:
|
||||||
|
secretKeyRef:
|
||||||
|
name: cloudflare-secret
|
||||||
|
key: api-token
|
||||||
|
args:
|
||||||
|
- apk add --no-cache bash jq curl &&
|
||||||
|
/script/update_dns.sh
|
||||||
|
volumeMounts:
|
||||||
|
- name: script-volume
|
||||||
|
mountPath: /script
|
||||||
|
volumes:
|
||||||
|
- name: script-volume
|
||||||
|
configMap:
|
||||||
|
name: update-dns-script
|
||||||
|
defaultMode: 0777
|
||||||
|
restartPolicy: OnFailure
|
||||||
@ -14,10 +14,11 @@ ingress:
|
|||||||
enabled: true
|
enabled: true
|
||||||
annotations:
|
annotations:
|
||||||
traefik.ingress.kubernetes.io/router.entrypoints: websecure
|
traefik.ingress.kubernetes.io/router.entrypoints: websecure
|
||||||
|
cert-manager.io/issuer: "letsencrypt-prod"
|
||||||
tls:
|
tls:
|
||||||
enabled: true
|
enabled: true
|
||||||
host: "*.example.com"
|
host: "*.example.com"
|
||||||
secretName: wildcard-cert-secret
|
secretName: registry-tls
|
||||||
|
|
||||||
service:
|
service:
|
||||||
type: ClusterIP
|
type: ClusterIP
|
||||||
|
|||||||
31
kubernetes/immich/persistence.yaml
Normal file
31
kubernetes/immich/persistence.yaml
Normal file
@ -0,0 +1,31 @@
|
|||||||
|
apiVersion: v1
|
||||||
|
kind: PersistentVolume
|
||||||
|
metadata:
|
||||||
|
name: immich-library-pv
|
||||||
|
labels:
|
||||||
|
app: local
|
||||||
|
spec:
|
||||||
|
capacity:
|
||||||
|
storage: 900Gi
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteMany
|
||||||
|
nfs:
|
||||||
|
path: /media/immich # Path to your NFS share
|
||||||
|
server: "${NFS_SERVER}" # IP of your NFS server (replace with correct IP)
|
||||||
|
persistentVolumeReclaimPolicy: Retain
|
||||||
|
storageClassName: manual
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: PersistentVolumeClaim
|
||||||
|
metadata:
|
||||||
|
name: immich-library
|
||||||
|
spec:
|
||||||
|
storageClassName: manual
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteMany
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
storage: 900Gi
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
app: local
|
||||||
89
kubernetes/immich/values.yaml
Normal file
89
kubernetes/immich/values.yaml
Normal file
@ -0,0 +1,89 @@
|
|||||||
|
## This chart relies on the common library chart from bjw-s
|
||||||
|
## You can find it at https://github.com/bjw-s-labs/helm-charts/tree/923ef40a39520979c98f354ea23963ee54f54433/charts/library/common
|
||||||
|
## Refer there for more detail about the supported values
|
||||||
|
|
||||||
|
# These entries are shared between all the Immich components
|
||||||
|
|
||||||
|
env:
|
||||||
|
# REDIS_HOSTNAME: '{{ printf "%s-redis-master" .Release.Name }}'
|
||||||
|
DB_HOSTNAME: pg-backup-rw.immich.svc.cluster.local
|
||||||
|
DB_USERNAME: placeholder
|
||||||
|
DB_DATABASE_NAME: immich
|
||||||
|
# -- You should provide your own secret outside of this helm-chart and use `postgresql.global.postgresql.auth.existingSecret` to provide credentials to the postgresql instance
|
||||||
|
DB_PASSWORD: placeholder
|
||||||
|
IMMICH_IGNORE_MOUNT_CHECK_ERRORS: "true"
|
||||||
|
# IMMICH_MACHINE_LEARNING_URL: '{{ printf "http://%s-machine-learning:3003" .Release.Name }}'
|
||||||
|
|
||||||
|
image:
|
||||||
|
tag: v1.119.0
|
||||||
|
|
||||||
|
immich:
|
||||||
|
metrics:
|
||||||
|
# Enabling this will create the service monitors needed to monitor immich with the prometheus operator
|
||||||
|
enabled: false
|
||||||
|
persistence:
|
||||||
|
# Main data store for all photos shared between different components.
|
||||||
|
library:
|
||||||
|
# Automatically creating the library volume is not supported by this chart
|
||||||
|
# You have to specify an existing PVC to use
|
||||||
|
existingClaim: immich-library
|
||||||
|
# configuration is immich-config.json converted to yaml
|
||||||
|
# ref: https://immich.app/docs/install/config-file/
|
||||||
|
#
|
||||||
|
configuration:
|
||||||
|
trash:
|
||||||
|
enabled: true
|
||||||
|
days: 30
|
||||||
|
storageTemplate:
|
||||||
|
enabled: true
|
||||||
|
template: "{{y}}/{{y}}-{{MM}}-{{dd}}/{{filename}}"
|
||||||
|
|
||||||
|
# Dependencies
|
||||||
|
|
||||||
|
# DEPRECATED
|
||||||
|
# The postgres subchart is deprecated and will be removed in chart version 0.10.0
|
||||||
|
# See https://github.com/immich-app/immich-charts/issues/149 for more detail.
|
||||||
|
postgresql:
|
||||||
|
enabled: false
|
||||||
|
|
||||||
|
redis:
|
||||||
|
enabled: true
|
||||||
|
architecture: standalone
|
||||||
|
auth:
|
||||||
|
enabled: false
|
||||||
|
|
||||||
|
# Immich components
|
||||||
|
server:
|
||||||
|
enabled: true
|
||||||
|
image:
|
||||||
|
repository: ghcr.io/immich-app/immich-server
|
||||||
|
pullPolicy: IfNotPresent
|
||||||
|
ingress:
|
||||||
|
main:
|
||||||
|
enabled: true
|
||||||
|
annotations:
|
||||||
|
traefik.ingress.kubernetes.io/router.entrypoints: websecure
|
||||||
|
cert-manager.io/issuer: "letsencrypt-prod"
|
||||||
|
hosts:
|
||||||
|
- host: placeholder.immich.app
|
||||||
|
paths:
|
||||||
|
- path: "/"
|
||||||
|
tls:
|
||||||
|
- secretName: immich-tls
|
||||||
|
hosts:
|
||||||
|
- placeholder.immich.app
|
||||||
|
|
||||||
|
machine-learning:
|
||||||
|
enabled: false # disabled due to resource constraints
|
||||||
|
image:
|
||||||
|
repository: ghcr.io/immich-app/immich-machine-learning
|
||||||
|
pullPolicy: IfNotPresent
|
||||||
|
env:
|
||||||
|
TRANSFORMERS_CACHE: /cache
|
||||||
|
persistence:
|
||||||
|
cache:
|
||||||
|
enabled: true
|
||||||
|
size: 10Gi
|
||||||
|
# Optional: Set this to pvc to avoid downloading the ML models every start.
|
||||||
|
type: pvc
|
||||||
|
accessMode: ReadWriteMany
|
||||||
11
kubernetes/longhorn/longhorn-storageclass-2-replica.yaml
Normal file
11
kubernetes/longhorn/longhorn-storageclass-2-replica.yaml
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
# longhorn-3x.yaml (High Availability)
|
||||||
|
apiVersion: storage.k8s.io/v1
|
||||||
|
kind: StorageClass
|
||||||
|
metadata:
|
||||||
|
name: longhorn-2x
|
||||||
|
provisioner: driver.longhorn.io
|
||||||
|
parameters:
|
||||||
|
numberOfReplicas: "2"
|
||||||
|
staleReplicaTimeout: "2880" # 48 hours in minutes (optional)
|
||||||
|
fsType: "ext4"
|
||||||
|
allowVolumeExpansion: true
|
||||||
11
kubernetes/longhorn/longhorn-storageclass-3-replica.yaml
Normal file
11
kubernetes/longhorn/longhorn-storageclass-3-replica.yaml
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
# longhorn-3x.yaml (High Availability)
|
||||||
|
apiVersion: storage.k8s.io/v1
|
||||||
|
kind: StorageClass
|
||||||
|
metadata:
|
||||||
|
name: longhorn-3x
|
||||||
|
provisioner: driver.longhorn.io
|
||||||
|
parameters:
|
||||||
|
numberOfReplicas: "3"
|
||||||
|
staleReplicaTimeout: "2880" # 48 hours in minutes (optional)
|
||||||
|
fsType: "ext4"
|
||||||
|
allowVolumeExpansion: true
|
||||||
523
kubernetes/minio/values-tenant.yaml
Normal file
523
kubernetes/minio/values-tenant.yaml
Normal file
@ -0,0 +1,523 @@
|
|||||||
|
# Root key for MinIO Tenant Chart
|
||||||
|
tenant:
|
||||||
|
###
|
||||||
|
# The Tenant name
|
||||||
|
#
|
||||||
|
# Change this to match your preferred MinIO Tenant name.
|
||||||
|
name: myminio
|
||||||
|
###
|
||||||
|
# Specify the Operator container image to use for the deployment.
|
||||||
|
# ``image.tag``
|
||||||
|
# For example, the following sets the image to the ``quay.io/minio/operator`` repo and the v7.1.1 tag.
|
||||||
|
# The container pulls the image if not already present:
|
||||||
|
#
|
||||||
|
# .. code-block:: yaml
|
||||||
|
#
|
||||||
|
# image:
|
||||||
|
# repository: quay.io/minio/minio
|
||||||
|
# tag: RELEASE.2025-04-08T15-41-24Z
|
||||||
|
# pullPolicy: IfNotPresent
|
||||||
|
#
|
||||||
|
# The chart also supports specifying an image based on digest value:
|
||||||
|
#
|
||||||
|
# .. code-block:: yaml
|
||||||
|
#
|
||||||
|
# image:
|
||||||
|
# repository: quay.io/minio/minio@sha256
|
||||||
|
# digest: 28c80b379c75242c6fe793dfbf212f43c602140a0de5ebe3d9c2a3a7b9f9f983
|
||||||
|
# pullPolicy: IfNotPresent
|
||||||
|
#
|
||||||
|
#
|
||||||
|
image:
|
||||||
|
repository: quay.io/minio/minio
|
||||||
|
tag: RELEASE.2025-04-08T15-41-24Z
|
||||||
|
pullPolicy: IfNotPresent
|
||||||
|
###
|
||||||
|
#
|
||||||
|
# An array of Kubernetes secrets to use for pulling images from a private ``image.repository``.
|
||||||
|
# Only one array element is supported at this time.
|
||||||
|
imagePullSecret: { }
|
||||||
|
###
|
||||||
|
#
|
||||||
|
# Specify `initContainers <https://kubernetes.io/docs/concepts/workloads/pods/init-containers/>`__ to perform setup or configuration tasks before the main Tenant pods starts.
|
||||||
|
#
|
||||||
|
# Example of init container which waits for idenity provider to be reachable before starting MinIO Tenant:
|
||||||
|
#
|
||||||
|
# .. code-block:: yaml
|
||||||
|
#
|
||||||
|
# initContainers:
|
||||||
|
# - name: wait-for-idp
|
||||||
|
# image: busybox
|
||||||
|
# command:
|
||||||
|
# - sh
|
||||||
|
# - -c
|
||||||
|
# - |
|
||||||
|
# URL="https://idp-url"
|
||||||
|
# echo "Checking IdP reachability (${URL})"
|
||||||
|
# until $(wget -q -O "/dev/null" ${URL}) ; do
|
||||||
|
# echo "IdP (${URL}) not reachable. Waiting to be reachable..."
|
||||||
|
# sleep 5
|
||||||
|
# done
|
||||||
|
# echo "IdP (${URL}) reachable. Starting MinIO..."
|
||||||
|
#
|
||||||
|
initContainers: [ ]
|
||||||
|
###
|
||||||
|
# The Kubernetes `Scheduler <https://kubernetes.io/docs/concepts/scheduling-eviction/kube-scheduler/>`__ to use for dispatching Tenant pods.
|
||||||
|
#
|
||||||
|
# Specify an empty dictionary ``{}`` to dispatch pods with the default scheduler.
|
||||||
|
scheduler: { }
|
||||||
|
###
|
||||||
|
# Root key for dynamically creating a secret for use with configuring root MinIO User
|
||||||
|
# Specify the ``name`` and then a list of environment variables.
|
||||||
|
#
|
||||||
|
# .. important::
|
||||||
|
#
|
||||||
|
# Do not use this in production environments.
|
||||||
|
# This field is intended for use with rapid development or testing only.
|
||||||
|
#
|
||||||
|
# For example:
|
||||||
|
#
|
||||||
|
# .. code-block:: yaml
|
||||||
|
#
|
||||||
|
# name: myminio-env-configuration
|
||||||
|
# accessKey: minio
|
||||||
|
# secretKey: minio123
|
||||||
|
#
|
||||||
|
configSecret:
|
||||||
|
name: myminio-env-configuration
|
||||||
|
accessKey: minio
|
||||||
|
secretKey: minio123
|
||||||
|
#existingSecret: true
|
||||||
|
|
||||||
|
###
|
||||||
|
# Metadata that will be added to the statefulset and pods of all pools
|
||||||
|
poolsMetadata:
|
||||||
|
###
|
||||||
|
# Specify `annotations <https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/>`__ to associate to Tenant pods.
|
||||||
|
annotations: { }
|
||||||
|
###
|
||||||
|
# Specify `labels <https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/>`__ to associate to Tenant pods.
|
||||||
|
labels: { }
|
||||||
|
|
||||||
|
###
|
||||||
|
# If this variable is set to true, then enable the usage of an existing Kubernetes secret to set environment variables for the Tenant.
|
||||||
|
# The existing Kubernetes secret name must be placed under .tenant.configuration.name e.g. existing-minio-env-configuration
|
||||||
|
# The secret must contain a key ``config.env``.
|
||||||
|
# The values should be a series of export statements to set environment variables for the Tenant.
|
||||||
|
# For example:
|
||||||
|
#
|
||||||
|
# .. code-block:: shell
|
||||||
|
#
|
||||||
|
# stringData:
|
||||||
|
# config.env: |-
|
||||||
|
# export MINIO_ROOT_USER=ROOTUSERNAME
|
||||||
|
# export MINIO_ROOT_PASSWORD=ROOTUSERPASSWORD
|
||||||
|
#
|
||||||
|
# existingSecret: false
|
||||||
|
###
|
||||||
|
# Top level key for configuring MinIO Pool(s) in this Tenant.
|
||||||
|
#
|
||||||
|
# See `Operator CRD: Pools <https://min.io/docs/minio/kubernetes/upstream/reference/operator-crd.html#pool>`__ for more information on all subfields.
|
||||||
|
pools:
|
||||||
|
###
|
||||||
|
# The number of MinIO Tenant Pods / Servers in this pool.
|
||||||
|
# For standalone mode, supply 1. For distributed mode, supply 4 or more.
|
||||||
|
# Note that the operator does not support upgrading from standalone to distributed mode.
|
||||||
|
- servers: 2
|
||||||
|
###
|
||||||
|
# Custom name for the pool
|
||||||
|
name: pool-0
|
||||||
|
###
|
||||||
|
# The number of volumes attached per MinIO Tenant Pod / Server.
|
||||||
|
volumesPerServer: 2
|
||||||
|
###
|
||||||
|
# The capacity per volume requested per MinIO Tenant Pod.
|
||||||
|
size: 20Gi
|
||||||
|
###
|
||||||
|
# The `storageClass <https://kubernetes.io/docs/concepts/storage/storage-classes/>`__ to associate with volumes generated for this pool.
|
||||||
|
#
|
||||||
|
# If using Amazon Elastic Block Store (EBS) CSI driver
|
||||||
|
# Please make sure to set xfs for "csi.storage.k8s.io/fstype" parameter under StorageClass.parameters.
|
||||||
|
# Docs: https://github.com/kubernetes-sigs/aws-ebs-csi-driver/blob/master/docs/parameters.md
|
||||||
|
storageClassName: longhorn-2x
|
||||||
|
###
|
||||||
|
# Specify `storageAnnotations <https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/>`__ to associate to PVCs.
|
||||||
|
storageAnnotations: { }
|
||||||
|
###
|
||||||
|
# Specify `storageLabels <https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/>`__ to associate to PVCs.
|
||||||
|
storageLabels: { }
|
||||||
|
###
|
||||||
|
# Specify `annotations <https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/>`__ to associate to Tenant pods.
|
||||||
|
annotations: { }
|
||||||
|
###
|
||||||
|
# Specify `labels <https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/>`__ to associate to Tenant pods.
|
||||||
|
labels: { }
|
||||||
|
###
|
||||||
|
#
|
||||||
|
# An array of `Toleration labels <https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/>`__ to associate to Tenant pods.
|
||||||
|
#
|
||||||
|
# These settings determine the distribution of pods across worker nodes.
|
||||||
|
tolerations: [ ]
|
||||||
|
###
|
||||||
|
# Any `Node Selectors <https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/>`__ to apply to Tenant pods.
|
||||||
|
#
|
||||||
|
# The Kubernetes scheduler uses these selectors to determine which worker nodes onto which it can deploy Tenant pods.
|
||||||
|
#
|
||||||
|
# If no worker nodes match the specified selectors, the Tenant deployment will fail.
|
||||||
|
nodeSelector: { }
|
||||||
|
###
|
||||||
|
#
|
||||||
|
# The `affinity <https://kubernetes.io/docs/tasks/configure-pod-container/assign-pods-nodes-using-node-affinity/>`__ or anti-affinity settings to apply to Tenant pods.
|
||||||
|
#
|
||||||
|
# These settings determine the distribution of pods across worker nodes and can help prevent or allow colocating pods onto the same worker nodes.
|
||||||
|
affinity: { }
|
||||||
|
###
|
||||||
|
#
|
||||||
|
# The `Requests or Limits <https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/>`__ for resources to associate to Tenant pods.
|
||||||
|
#
|
||||||
|
# These settings can control the minimum and maximum resources requested for each pod.
|
||||||
|
# If no worker nodes can meet the specified requests, the Operator may fail to deploy.
|
||||||
|
resources: { }
|
||||||
|
###
|
||||||
|
# The Kubernetes `SecurityContext <https://kubernetes.io/docs/tasks/configure-pod-container/security-context/>`__ to use for deploying Tenant resources.
|
||||||
|
#
|
||||||
|
# You may need to modify these values to meet your cluster's security and access settings.
|
||||||
|
#
|
||||||
|
# We recommend disabling recursive permission changes by setting ``fsGroupChangePolicy`` to ``OnRootMismatch`` as those operations can be expensive for certain workloads (e.g. large volumes with many small files).
|
||||||
|
securityContext:
|
||||||
|
runAsUser: 1000
|
||||||
|
runAsGroup: 1000
|
||||||
|
fsGroup: 1000
|
||||||
|
fsGroupChangePolicy: "OnRootMismatch"
|
||||||
|
runAsNonRoot: true
|
||||||
|
###
|
||||||
|
# The Kubernetes `SecurityContext <https://kubernetes.io/docs/tasks/configure-pod-container/security-context/>`__ to use for deploying Tenant containers.
|
||||||
|
# You may need to modify these values to meet your cluster's security and access settings.
|
||||||
|
containerSecurityContext:
|
||||||
|
runAsUser: 1000
|
||||||
|
runAsGroup: 1000
|
||||||
|
runAsNonRoot: true
|
||||||
|
allowPrivilegeEscalation: false
|
||||||
|
capabilities:
|
||||||
|
drop:
|
||||||
|
- ALL
|
||||||
|
seccompProfile:
|
||||||
|
type: RuntimeDefault
|
||||||
|
###
|
||||||
|
#
|
||||||
|
# An array of `Topology Spread Constraints <https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/>`__ to associate to Operator Console pods.
|
||||||
|
#
|
||||||
|
# These settings determine the distribution of pods across worker nodes.
|
||||||
|
topologySpreadConstraints: [ ]
|
||||||
|
###
|
||||||
|
#
|
||||||
|
# The name of a custom `Container Runtime <https://kubernetes.io/docs/concepts/containers/runtime-class/>`__ to use for the Operator Console pods.
|
||||||
|
# runtimeClassName: ""
|
||||||
|
###
|
||||||
|
# The mount path where Persistent Volumes are mounted inside Tenant container(s).
|
||||||
|
mountPath: /export
|
||||||
|
###
|
||||||
|
# The Sub path inside Mount path where MinIO stores data.
|
||||||
|
#
|
||||||
|
# .. warning::
|
||||||
|
#
|
||||||
|
# Treat the ``mountPath`` and ``subPath`` values as immutable once you deploy the Tenant.
|
||||||
|
# If you change these values post-deployment, then you may have different paths for new and pre-existing data.
|
||||||
|
# This can vastly increase operational complexity and may result in unpredictable data states.
|
||||||
|
subPath: /data
|
||||||
|
###
|
||||||
|
# Configures a Prometheus-compatible scraping endpoint at the specified port.
|
||||||
|
metrics:
|
||||||
|
enabled: false
|
||||||
|
port: 9000
|
||||||
|
protocol: http
|
||||||
|
###
|
||||||
|
# Configures external certificate settings for the Tenant.
|
||||||
|
certificate:
|
||||||
|
###
|
||||||
|
# Specify an array of Kubernetes TLS secrets, where each entry corresponds to a secret the TLS private key and public certificate pair.
|
||||||
|
#
|
||||||
|
# This is used by MinIO to verify TLS connections from clients using those CAs
|
||||||
|
# If you omit this and have clients using TLS certificates minted by an external CA, those connections may fail with warnings around certificate verification.
|
||||||
|
# See `Operator CRD: TenantSpec <https://min.io/docs/minio/kubernetes/upstream/reference/operator-crd.html#tenantspec>`__.
|
||||||
|
externalCaCertSecret: [ ]
|
||||||
|
###
|
||||||
|
# Specify an array of Kubernetes secrets, where each entry corresponds to a secret contains the TLS private key and public certificate pair.
|
||||||
|
#
|
||||||
|
# Omit this to use only the MinIO Operator autogenerated certificates.
|
||||||
|
#
|
||||||
|
# If you omit this field *and* set ``requestAutoCert`` to false, the Tenant starts without TLS.
|
||||||
|
#
|
||||||
|
# See `Operator CRD: TenantSpec <https://min.io/docs/minio/kubernetes/upstream/reference/operator-crd.html#tenantspec>`__.
|
||||||
|
#
|
||||||
|
# .. important::
|
||||||
|
#
|
||||||
|
# The MinIO Operator may output TLS connectivity errors if it cannot trust the Certificate Authority (CA) which minted the custom certificates.
|
||||||
|
#
|
||||||
|
# You can pass the CA to the Operator to allow it to trust that cert.
|
||||||
|
# See `Self-Signed, Internal, and Private Certificates <https://min.io/docs/minio/kubernetes/upstream/operations/network-encryption.html#self-signed-internal-and-private-certificates>`__ for more information.
|
||||||
|
# This step may also be necessary for globally trusted CAs where you must provide intermediate certificates to the Operator to help build the full chain of trust.
|
||||||
|
externalCertSecret: [ ]
|
||||||
|
###
|
||||||
|
# Enable automatic Kubernetes based `certificate generation and signing <https://kubernetes.io/docs/tasks/tls/managing-tls-in-a-cluster>`__
|
||||||
|
requestAutoCert: false
|
||||||
|
###
|
||||||
|
# The minimum number of days to expiry before an alert for an expiring certificate is fired.
|
||||||
|
# In the below example, if a given certificate will expire in 7 days then expiration events will only be triggered 1 day before expiry
|
||||||
|
# certExpiryAlertThreshold: 1
|
||||||
|
###
|
||||||
|
# This field is used only when ``requestAutoCert: true``.
|
||||||
|
# Use this field to set CommonName for the auto-generated certificate.
|
||||||
|
# MinIO defaults to using the internal Kubernetes DNS name for the pod
|
||||||
|
# The default DNS name format is typically ``*.minio.default.svc.cluster.local``.
|
||||||
|
#
|
||||||
|
# See `Operator CRD: CertificateConfig <https://min.io/docs/minio/kubernetes/upstream/reference/operator-crd.html#certificateconfig>`__
|
||||||
|
certConfig: { }
|
||||||
|
###
|
||||||
|
# MinIO features to enable or disable in the MinIO Tenant
|
||||||
|
# See `Operator CRD: Features <https://min.io/docs/minio/kubernetes/upstream/reference/operator-crd.html#features>`__.
|
||||||
|
features:
|
||||||
|
bucketDNS: false
|
||||||
|
domains: { }
|
||||||
|
enableSFTP: false
|
||||||
|
###
|
||||||
|
# Array of objects describing one or more buckets to create during tenant provisioning.
|
||||||
|
# Example:
|
||||||
|
#
|
||||||
|
# .. code-block:: yaml
|
||||||
|
#
|
||||||
|
# - name: my-minio-bucket
|
||||||
|
# objectLock: false # optional
|
||||||
|
# region: us-east-1 # optional
|
||||||
|
buckets: [ ]
|
||||||
|
###
|
||||||
|
# Array of Kubernetes secrets from which the Operator generates MinIO users during tenant provisioning.
|
||||||
|
#
|
||||||
|
# Each secret should specify the ``CONSOLE_ACCESS_KEY`` and ``CONSOLE_SECRET_KEY`` as the access key and secret key for that user.
|
||||||
|
users: [ ]
|
||||||
|
###
|
||||||
|
# The `PodManagement <https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#pod-management-policy>`__ policy for MinIO Tenant Pods.
|
||||||
|
# Can be "OrderedReady" or "Parallel"
|
||||||
|
podManagementPolicy: Parallel
|
||||||
|
# The `Liveness Probe <https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes>`__ for monitoring Tenant pod liveness.
|
||||||
|
# Tenant pods will be restarted if the probe fails.
|
||||||
|
liveness: { }
|
||||||
|
###
|
||||||
|
# `Readiness Probe <https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/>`__ for monitoring Tenant container readiness.
|
||||||
|
# Tenant pods will be removed from service endpoints if the probe fails.
|
||||||
|
readiness: { }
|
||||||
|
###
|
||||||
|
# `Startup Probe <https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/>`__ for monitoring container startup.
|
||||||
|
# Tenant pods will be restarted if the probe fails.
|
||||||
|
# Refer
|
||||||
|
startup: { }
|
||||||
|
###
|
||||||
|
# The `Lifecycle hooks <https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/>`__ for container.
|
||||||
|
lifecycle: { }
|
||||||
|
###
|
||||||
|
# Directs the Operator to deploy the MinIO S3 API and Console services as LoadBalancer objects.
|
||||||
|
#
|
||||||
|
# If the Kubernetes cluster has a configured LoadBalancer, it can attempt to route traffic to those services automatically.
|
||||||
|
#
|
||||||
|
# - Specify ``minio: true`` to expose the MinIO S3 API.
|
||||||
|
# - Specify ``console: true`` to expose the Console.
|
||||||
|
#
|
||||||
|
# Both fields default to ``false``.
|
||||||
|
exposeServices:
|
||||||
|
minio: false
|
||||||
|
console: false
|
||||||
|
###
|
||||||
|
# The `Kubernetes Service Account <https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/>`__ associated with the Tenant.
|
||||||
|
serviceAccountName: ""
|
||||||
|
###
|
||||||
|
# Directs the Operator to add the Tenant's metric scrape configuration to an existing Kubernetes Prometheus deployment managed by the Prometheus Operator.
|
||||||
|
prometheusOperator: false
|
||||||
|
###
|
||||||
|
# Configure pod logging configuration for the MinIO Tenant.
|
||||||
|
#
|
||||||
|
# - Specify ``json`` for JSON-formatted logs.
|
||||||
|
# - Specify ``anonymous`` for anonymized logs.
|
||||||
|
# - Specify ``quiet`` to supress logging.
|
||||||
|
#
|
||||||
|
# An example of JSON-formatted logs is as follows:
|
||||||
|
#
|
||||||
|
# .. code-block:: shell
|
||||||
|
#
|
||||||
|
# $ k logs myminio-pool-0-0 -n default
|
||||||
|
# {"level":"INFO","errKind":"","time":"2022-04-07T21:49:33.740058549Z","message":"All MinIO sub-systems initialized successfully"}
|
||||||
|
logging: { }
|
||||||
|
###
|
||||||
|
# serviceMetadata allows passing additional labels and annotations to MinIO and Console specific
|
||||||
|
# services created by the operator.
|
||||||
|
serviceMetadata: { }
|
||||||
|
###
|
||||||
|
# Add environment variables to be set in MinIO container (https://github.com/minio/minio/tree/master/docs/config)
|
||||||
|
env: [ ]
|
||||||
|
###
|
||||||
|
# PriorityClassName indicates the Pod priority and hence importance of a Pod relative to other Pods.
|
||||||
|
# This is applied to MinIO pods only.
|
||||||
|
# Refer Kubernetes documentation for details https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass/
|
||||||
|
priorityClassName: ""
|
||||||
|
###
|
||||||
|
# An array of `Volumes <https://kubernetes.io/docs/concepts/storage/volumes/>`__ which the Operator can mount to Tenant pods.
|
||||||
|
#
|
||||||
|
# The volumes must exist *and* be accessible to the Tenant pods.
|
||||||
|
additionalVolumes: [ ]
|
||||||
|
###
|
||||||
|
# An array of volume mount points associated to each Tenant container.
|
||||||
|
#
|
||||||
|
# Specify each item in the array as follows:
|
||||||
|
#
|
||||||
|
# .. code-block:: yaml
|
||||||
|
#
|
||||||
|
# volumeMounts:
|
||||||
|
# - name: volumename
|
||||||
|
# mountPath: /path/to/mount
|
||||||
|
#
|
||||||
|
# The ``name`` field must correspond to an entry in the ``additionalVolumes`` array.
|
||||||
|
additionalVolumeMounts: [ ]
|
||||||
|
# Define configuration for KES (stateless and distributed key-management system)
|
||||||
|
# Refer https://github.com/minio/kes
|
||||||
|
#kes:
|
||||||
|
# ## Image field:
|
||||||
|
# # Image from tag (original behavior), for example:
|
||||||
|
# # image:
|
||||||
|
# # repository: quay.io/minio/kes
|
||||||
|
# # tag: 2025-03-12T09-35-18Z
|
||||||
|
# # Image from digest (added after original behavior), for example:
|
||||||
|
# # image:
|
||||||
|
# # repository: quay.io/minio/kes@sha256
|
||||||
|
# # digest: fb15af611149892f357a8a99d1bcd8bf5dae713bd64c15e6eb27fbdb88fc208b
|
||||||
|
# image:
|
||||||
|
# repository: quay.io/minio/kes
|
||||||
|
# tag: 2025-03-12T09-35-18Z
|
||||||
|
# pullPolicy: IfNotPresent
|
||||||
|
# env: [ ]
|
||||||
|
# replicas: 2
|
||||||
|
# configuration: |-
|
||||||
|
# address: :7373
|
||||||
|
# tls:
|
||||||
|
# key: /tmp/kes/server.key # Path to the TLS private key
|
||||||
|
# cert: /tmp/kes/server.crt # Path to the TLS certificate
|
||||||
|
# proxy:
|
||||||
|
# identities: []
|
||||||
|
# header:
|
||||||
|
# cert: X-Tls-Client-Cert
|
||||||
|
# admin:
|
||||||
|
# identity: ${MINIO_KES_IDENTITY}
|
||||||
|
# cache:
|
||||||
|
# expiry:
|
||||||
|
# any: 5m0s
|
||||||
|
# unused: 20s
|
||||||
|
# log:
|
||||||
|
# error: on
|
||||||
|
# audit: off
|
||||||
|
# keystore:
|
||||||
|
# # KES configured with fs (File System mode) doesn't work in Kubernetes environments and is not recommended
|
||||||
|
# # use a real KMS
|
||||||
|
# # fs:
|
||||||
|
# # path: "./keys" # Path to directory. Keys will be stored as files. Not Recommended for Production.
|
||||||
|
# vault:
|
||||||
|
# endpoint: "http://vault.default.svc.cluster.local:8200" # The Vault endpoint
|
||||||
|
# namespace: "" # An optional Vault namespace. See: https://www.vaultproject.io/docs/enterprise/namespaces/index.html
|
||||||
|
# prefix: "my-minio" # An optional K/V prefix. The server will store keys under this prefix.
|
||||||
|
# approle: # AppRole credentials. See: https://www.vaultproject.io/docs/auth/approle.html
|
||||||
|
# id: "<YOUR APPROLE ID HERE>" # Your AppRole Role ID
|
||||||
|
# secret: "<YOUR APPROLE SECRET ID HERE>" # Your AppRole Secret ID
|
||||||
|
# retry: 15s # Duration until the server tries to re-authenticate after connection loss.
|
||||||
|
# tls: # The Vault client TLS configuration for mTLS authentication and certificate verification
|
||||||
|
# key: "" # Path to the TLS client private key for mTLS authentication to Vault
|
||||||
|
# cert: "" # Path to the TLS client certificate for mTLS authentication to Vault
|
||||||
|
# ca: "" # Path to one or multiple PEM root CA certificates
|
||||||
|
# status: # Vault status configuration. The server will periodically reach out to Vault to check its status.
|
||||||
|
# ping: 10s # Duration until the server checks Vault's status again.
|
||||||
|
# # aws:
|
||||||
|
# # # The AWS SecretsManager key store. The server will store
|
||||||
|
# # # secret keys at the AWS SecretsManager encrypted with
|
||||||
|
# # # AWS-KMS. See: https://aws.amazon.com/secrets-manager
|
||||||
|
# # secretsmanager:
|
||||||
|
# # endpoint: "" # The AWS SecretsManager endpoint - e.g.: secretsmanager.us-east-2.amazonaws.com
|
||||||
|
# # region: "" # The AWS region of the SecretsManager - e.g.: us-east-2
|
||||||
|
# # kmskey: "" # The AWS-KMS key ID used to en/decrypt secrets at the SecretsManager. By default (if not set) the default AWS-KMS key will be used.
|
||||||
|
# # credentials: # The AWS credentials for accessing secrets at the AWS SecretsManager.
|
||||||
|
# # accesskey: "" # Your AWS Access Key
|
||||||
|
# # secretkey: "" # Your AWS Secret Key
|
||||||
|
# # token: "" # Your AWS session token (usually optional)
|
||||||
|
# imagePullPolicy: "IfNotPresent"
|
||||||
|
# externalCertSecret: null
|
||||||
|
# clientCertSecret: null
|
||||||
|
# # Key name to be created on the KMS, default is "my-minio-key"
|
||||||
|
# keyName: ""
|
||||||
|
# resources: { }
|
||||||
|
# nodeSelector: { }
|
||||||
|
# affinity:
|
||||||
|
# nodeAffinity: { }
|
||||||
|
# podAffinity: { }
|
||||||
|
# podAntiAffinity: { }
|
||||||
|
# tolerations: [ ]
|
||||||
|
# annotations: { }
|
||||||
|
# labels: { }
|
||||||
|
# serviceAccountName: ""
|
||||||
|
# securityContext:
|
||||||
|
# runAsUser: 1000
|
||||||
|
# runAsGroup: 1000
|
||||||
|
# runAsNonRoot: true
|
||||||
|
# fsGroup: 1000
|
||||||
|
# containerSecurityContext:
|
||||||
|
# runAsUser: 1000
|
||||||
|
# runAsGroup: 1000
|
||||||
|
# runAsNonRoot: true
|
||||||
|
# allowPrivilegeEscalation: false
|
||||||
|
# capabilities:
|
||||||
|
# drop:
|
||||||
|
# - ALL
|
||||||
|
# seccompProfile:
|
||||||
|
# type: RuntimeDefault
|
||||||
|
|
||||||
|
###
|
||||||
|
# Configures `Ingress <https://kubernetes.io/docs/concepts/services-networking/ingress/>`__ for the Tenant S3 API and Console.
|
||||||
|
#
|
||||||
|
# Set the keys to conform to the Ingress controller and configuration of your choice.
|
||||||
|
# Disabled due to security concerns.
|
||||||
|
ingress:
|
||||||
|
api:
|
||||||
|
enabled: false
|
||||||
|
ingressClassName:
|
||||||
|
labels: { }
|
||||||
|
annotations:
|
||||||
|
tls:
|
||||||
|
- secretName: minio-tls
|
||||||
|
hosts:
|
||||||
|
- api.minio.local
|
||||||
|
host: minio.local
|
||||||
|
path: /api
|
||||||
|
pathType: Prefix
|
||||||
|
console:
|
||||||
|
enabled: true
|
||||||
|
ingressClassName: "traefik"
|
||||||
|
labels: { }
|
||||||
|
annotations:
|
||||||
|
traefik.ingress.kubernetes.io/router.entrypoints: websecure
|
||||||
|
cert-manager.io/issuer: "letsencrypt-prod"
|
||||||
|
traefik.ingress.kubernetes.io/service.serversTransport: insecure-transport
|
||||||
|
traefik.ingress.kubernetes.io/router.middlewares: kube-system-ip-whitelist@kubernetescrd
|
||||||
|
tls:
|
||||||
|
- secretName: minio-console-tls
|
||||||
|
hosts:
|
||||||
|
- minio.local
|
||||||
|
host: minio.local
|
||||||
|
path: /
|
||||||
|
pathType: Prefix
|
||||||
|
# Use an extraResources template section to include additional Kubernetes resources
|
||||||
|
# with the Helm deployment.
|
||||||
|
#extraResources:
|
||||||
|
# - |
|
||||||
|
# apiVersion: v1
|
||||||
|
# kind: Secret
|
||||||
|
# type: Opaque
|
||||||
|
# metadata:
|
||||||
|
# name: {{ dig "tenant" "configSecret" "name" "" (.Values | merge (dict)) }}
|
||||||
|
# stringData:
|
||||||
|
# config.env: |-
|
||||||
|
# export MINIO_ROOT_USER='minio'
|
||||||
|
# export MINIO_ROOT_PASSWORD='minio123'
|
||||||
10
kubernetes/traefik/traefik-middleware/ip-whitelist.yaml
Normal file
10
kubernetes/traefik/traefik-middleware/ip-whitelist.yaml
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
apiVersion: traefik.io/v1alpha1
|
||||||
|
kind: Middleware
|
||||||
|
metadata:
|
||||||
|
name: ip-whitelist
|
||||||
|
namespace: minio
|
||||||
|
spec:
|
||||||
|
ipWhiteList:
|
||||||
|
sourceRange:
|
||||||
|
- 192.168.1.0/24
|
||||||
|
- 87.92.7.212/32
|
||||||
Reference in New Issue
Block a user