updated README

This commit is contained in:
2025-05-11 20:43:36 +03:00
parent a6df84d495
commit 1b8923afb1
4 changed files with 377 additions and 35 deletions

View File

@ -163,19 +163,11 @@ kubectl expose deployment nginx --port=80 --type=LoadBalancer -n nginx
kubectl delete namespace nginx
```
## 🤝 Contributing
Contributions welcome! Feel free to open issues or submit PRs.
## 📝 License
MIT License - feel free to use this as a template for your own homelab!
# Upgrade K3s cluster
## Upgrade K3s cluster
Ref: https://github.com/k3s-io/k3s-upgrade
## Deploying the K3s Upgrade Controller
### Deploying the K3s Upgrade Controller
First deploy the k3s upgrade controller
@ -193,7 +185,7 @@ kubectl create clusterrolebinding system-upgrade \
--serviceaccount=system-upgrade:system-upgrade
```
## Create the upgrade plan
### Label the nodes
First label the selected node with `k3s-upgrade=true` label. This is
needed to select the node for upgrade.
@ -206,7 +198,7 @@ It is best practice to upgrade node one by one. Thus, the cluster will
still be operational during the upgrade. And, for any issues, it is possible
to rollback the upgrade.
## Create the upgrade plan
### Create the upgrade plan
Then create the upgrade plan. The plan will be created in the `system-upgrade`
namespace. You can change the namespace by using the `--namespace` flag.
@ -227,3 +219,11 @@ kubectl drain vm4 --ignore-daemonsets \
--delete-emptydir-data \
--pod-selector='app!=csi-attacher,app!=csi-provisioner'
```
## 🤝 Contributing
Contributions welcome! Feel free to open issues or submit PRs.
## 📝 License
MIT License - feel free to use this as a template for your own homelab!

View File

@ -148,9 +148,9 @@ services running locally or remotely.
```bash
source .env
kubectl create namespace external-services
kubectl get secret wildcard-cert-secret --namespace=cert -o yaml \
| sed 's/namespace: cert/namespace: external-services/' | kubectl apply -f -
envsubst < external-service/proxmox.yaml | \
kubectl get secret wildcard-cert-secret --namespace=cert-manager -o yaml \
| sed 's/namespace: cert-manager/namespace: external-services/' | kubectl apply -f -
envsubst '${PROXMOX_IP} ${PROXMOX_HOST}' < external-service/proxmox.yaml | \
kubectl apply -n external-services -f -
```
@ -282,6 +282,13 @@ sudo mkfs.ext4 /dev/sda4
sudo mkdir /mnt/longhorn
sudo mount /dev/sda4 /mnt/longhorn
# Add entry to /etc/fstab to persist across reboot
echo "/dev/sda4 /mnt/longhorn ext4 defaults 0 2" | sudo tee -a /etc/fstab
```
Deploy the longhorn helm chart.
Ref: https://github.com/longhorn/charts/tree/v1.8.x/charts/longhorn
```bash
helm repo add longhorn https://charts.longhorn.io
helm repo update
@ -299,6 +306,8 @@ kubectl -n longhorn-system edit svc longhorn-frontend
## If the /mnt/longhorn is not shown
Ref: https://longhorn.io/docs/1.8.1/nodes-and-volumes/nodes/default-disk-and-node-config/
kubectl -n longhorn-system get nodes.longhorn.io
kubectl -n longhorn-system edit nodes.longhorn.io <node-name>

View File

@ -61,7 +61,7 @@ data:
}
}
---
apiVersion: traefik.containo.us/v1alpha1
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: proxmox-route

View File

@ -0,0 +1,333 @@
# Default values for longhorn.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
global:
cattle:
systemDefaultRegistry: ""
windowsCluster:
# Enable this to allow Longhorn to run on the Rancher deployed Windows cluster
enabled: false
# Tolerate Linux node taint
tolerations:
- key: "cattle.io/os"
value: "linux"
effect: "NoSchedule"
operator: "Equal"
# Select Linux nodes
nodeSelector:
kubernetes.io/os: "linux"
# Recognize toleration and node selector for Longhorn run-time created components
defaultSetting:
taintToleration: cattle.io/os=linux:NoSchedule
systemManagedComponentsNodeSelector: kubernetes.io/os:linux
image:
longhorn:
engine:
repository: longhornio/longhorn-engine
tag: v1.4.0
manager:
repository: longhornio/longhorn-manager
tag: v1.4.0
ui:
repository: longhornio/longhorn-ui
tag: v1.4.0
instanceManager:
repository: longhornio/longhorn-instance-manager
tag: v1.4.0
shareManager:
repository: longhornio/longhorn-share-manager
tag: v1.4.0
backingImageManager:
repository: longhornio/backing-image-manager
tag: v1.4.0
supportBundleKit:
repository: longhornio/support-bundle-kit
tag: v0.0.17
csi:
attacher:
repository: longhornio/csi-attacher
tag: v3.4.0
provisioner:
repository: longhornio/csi-provisioner
tag: v2.1.2
nodeDriverRegistrar:
repository: longhornio/csi-node-driver-registrar
tag: v2.5.0
resizer:
repository: longhornio/csi-resizer
tag: v1.3.0
snapshotter:
repository: longhornio/csi-snapshotter
tag: v5.0.1
livenessProbe:
repository: longhornio/livenessprobe
tag: v2.8.0
pullPolicy: IfNotPresent
service:
ui:
type: ClusterIP
nodePort: null
manager:
type: ClusterIP
nodePort: ""
loadBalancerIP: ""
loadBalancerSourceRanges: ""
persistence:
defaultClass: true
defaultFsType: ext4
defaultMkfsParams: ""
defaultClassReplicaCount: 1
defaultDataLocality: disabled # best-effort otherwise
defaultReplicaAutoBalance: ignored # "disabled", "least-effort" or "best-effort" otherwise
reclaimPolicy: Delete
migratable: false
recurringJobSelector:
enable: false
jobList: []
backingImage:
enable: false
name: ~
dataSourceType: ~
dataSourceParameters: ~
expectedChecksum: ~
defaultNodeSelector:
enable: false # disable by default
selector: []
removeSnapshotsDuringFilesystemTrim: ignored # "enabled" or "disabled" otherwise
csi:
kubeletRootDir: ~
attacherReplicaCount: ~
provisionerReplicaCount: ~
resizerReplicaCount: ~
snapshotterReplicaCount: ~
defaultSettings:
backupTarget: ~
backupTargetCredentialSecret: ~
allowRecurringJobWhileVolumeDetached: ~
createDefaultDiskLabeledNodes: ~
defaultDataPath: ~
defaultDataLocality: ~
replicaSoftAntiAffinity: ~
replicaAutoBalance: ~
storageOverProvisioningPercentage: ~
storageMinimalAvailablePercentage: ~
upgradeChecker: ~
defaultReplicaCount: 1
defaultLonghornStaticStorageClass: ~
backupstorePollInterval: ~
failedBackupTTL: ~
restoreVolumeRecurringJobs: ~
recurringSuccessfulJobsHistoryLimit: ~
recurringFailedJobsHistoryLimit: ~
supportBundleFailedHistoryLimit: ~
taintToleration: ~
systemManagedComponentsNodeSelector: ~
priorityClass: ~
autoSalvage: ~
autoDeletePodWhenVolumeDetachedUnexpectedly: ~
disableSchedulingOnCordonedNode: ~
replicaZoneSoftAntiAffinity: ~
nodeDownPodDeletionPolicy: ~
allowNodeDrainWithLastHealthyReplica: ~
mkfsExt4Parameters: ~
disableReplicaRebuild: ~
replicaReplenishmentWaitInterval: ~
concurrentReplicaRebuildPerNodeLimit: ~
concurrentVolumeBackupRestorePerNodeLimit: ~
disableRevisionCounter: ~
systemManagedPodsImagePullPolicy: ~
allowVolumeCreationWithDegradedAvailability: ~
autoCleanupSystemGeneratedSnapshot: ~
concurrentAutomaticEngineUpgradePerNodeLimit: ~
backingImageCleanupWaitInterval: ~
backingImageRecoveryWaitInterval: ~
guaranteedEngineManagerCPU: ~
guaranteedReplicaManagerCPU: ~
kubernetesClusterAutoscalerEnabled: ~
orphanAutoDeletion: ~
storageNetwork: ~
deletingConfirmationFlag: ~
engineReplicaTimeout: ~
snapshotDataIntegrity: ~
snapshotDataIntegrityImmediateCheckAfterSnapshotCreation: ~
snapshotDataIntegrityCronjob: ~
removeSnapshotsDuringFilesystemTrim: ~
fastReplicaRebuildEnabled: ~
replicaFileSyncHttpClientTimeout: ~
privateRegistry:
createSecret: ~
registryUrl: ~
registryUser: ~
registryPasswd: ~
registrySecret: ~
longhornManager:
log:
## Allowed values are `plain` or `json`.
format: plain
priorityClass: ~
tolerations: []
## If you want to set tolerations for Longhorn Manager DaemonSet, delete the `[]` in the line above
## and uncomment this example block
# - key: "key"
# operator: "Equal"
# value: "value"
# effect: "NoSchedule"
nodeSelector: {}
## If you want to set node selector for Longhorn Manager DaemonSet, delete the `{}` in the line above
## and uncomment this example block
# label-key1: "label-value1"
# label-key2: "label-value2"
serviceAnnotations: {}
## If you want to set annotations for the Longhorn Manager service, delete the `{}` in the line above
## and uncomment this example block
# annotation-key1: "annotation-value1"
# annotation-key2: "annotation-value2"
longhornDriver:
priorityClass: ~
tolerations: []
## If you want to set tolerations for Longhorn Driver Deployer Deployment, delete the `[]` in the line above
## and uncomment this example block
# - key: "key"
# operator: "Equal"
# value: "value"
# effect: "NoSchedule"
nodeSelector: {}
## If you want to set node selector for Longhorn Driver Deployer Deployment, delete the `{}` in the line above
## and uncomment this example block
# label-key1: "label-value1"
# label-key2: "label-value2"
longhornUI:
replicas: 1
priorityClass: ~
tolerations: []
## If you want to set tolerations for Longhorn UI Deployment, delete the `[]` in the line above
## and uncomment this example block
# - key: "key"
# operator: "Equal"
# value: "value"
# effect: "NoSchedule"
nodeSelector: {}
## If you want to set node selector for Longhorn UI Deployment, delete the `{}` in the line above
## and uncomment this example block
# label-key1: "label-value1"
# label-key2: "label-value2"
longhornConversionWebhook:
replicas: 1
priorityClass: ~
tolerations: []
## If you want to set tolerations for Longhorn conversion webhook Deployment, delete the `[]` in the line above
## and uncomment this example block
# - key: "key"
# operator: "Equal"
# value: "value"
# effect: "NoSchedule"
nodeSelector: {}
## If you want to set node selector for Longhorn conversion webhook Deployment, delete the `{}` in the line above
## and uncomment this example block
# label-key1: "label-value1"
# label-key2: "label-value2"
longhornAdmissionWebhook:
replicas: 1
priorityClass: ~
tolerations: []
## If you want to set tolerations for Longhorn admission webhook Deployment, delete the `[]` in the line above
## and uncomment this example block
# - key: "key"
# operator: "Equal"
# value: "value"
# effect: "NoSchedule"
nodeSelector: {}
## If you want to set node selector for Longhorn admission webhook Deployment, delete the `{}` in the line above
## and uncomment this example block
# label-key1: "label-value1"
# label-key2: "label-value2"
longhornRecoveryBackend:
replicas: 1
priorityClass: ~
tolerations: []
## If you want to set tolerations for Longhorn recovery backend Deployment, delete the `[]` in the line above
## and uncomment this example block
# - key: "key"
# operator: "Equal"
# value: "value"
# effect: "NoSchedule"
nodeSelector: {}
## If you want to set node selector for Longhorn recovery backend Deployment, delete the `{}` in the line above
## and uncomment this example block
# label-key1: "label-value1"
# label-key2: "label-value2"
ingress:
## Set to true to enable ingress record generation
enabled: false
## Add ingressClassName to the Ingress
## Can replace the kubernetes.io/ingress.class annotation on v1.18+
ingressClassName: ~
host: sslip.io
## Set this to true in order to enable TLS on the ingress record
tls: false
## Enable this in order to enable that the backend service will be connected at port 443
secureBackends: false
## If TLS is set to true, you must declare what secret will store the key/certificate for TLS
tlsSecret: longhorn.local-tls
## If ingress is enabled you can set the default ingress path
## then you can access the UI by using the following full path {{host}}+{{path}}
path: /
## Ingress annotations done as key:value pairs
## If you're using kube-lego, you will want to add:
## kubernetes.io/tls-acme: true
##
## For a full list of possible ingress annotations, please see
## ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/annotations.md
##
## If tls is set to true, annotation ingress.kubernetes.io/secure-backends: "true" will automatically be set
annotations:
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: true
secrets:
## If you're providing your own certificates, please use this to add the certificates as secrets
## key and certificate should start with -----BEGIN CERTIFICATE----- or
## -----BEGIN RSA PRIVATE KEY-----
##
## name should line up with a tlsSecret set further up
## If you're using kube-lego, this is unneeded, as it will create the secret for you if it is not set
##
## It is also possible to create and manage the certificates outside of this helm chart
## Please see README.md for more information
# - name: longhorn.local-tls
# key:
# certificate:
# For Kubernetes < v1.25, if your cluster enables Pod Security Policy admission controller,
# set this to `true` to ship longhorn-psp which allow privileged Longhorn pods to start
enablePSP: false
## Specify override namespace, specifically this is useful for using longhorn as sub-chart
## and its release namespace is not the `longhorn-system`
namespaceOverride: ""
# Annotations to add to the Longhorn Manager DaemonSet Pods. Optional.
annotations: {}
serviceAccount:
# Annotations to add to the service account
annotations: {}