From 448a0a89b911d8d331e0154b1546f44b8cd8fe02 Mon Sep 17 00:00:00 2001 From: Taqi Tahmid Date: Wed, 2 Jul 2025 22:49:27 +0300 Subject: [PATCH] kubernetes: added woodpecker-ci - added woodpecker CI - removed kubernetes infra terraform files. - added demo woodpecker pipeline --- .woodpecker/demo-workflow.yaml | 14 + infra/terraform/README.md | 4 +- infra/terraform/kubernetes/backend.tf | 14 - infra/terraform/kubernetes/kube-vip.tf | 16 -- infra/terraform/kubernetes/main.tf | 22 -- infra/terraform/kubernetes/portfolio.tf | 50 ---- infra/terraform/kubernetes/variables.tf | 46 --- kubernetes/README.md | 34 +++ kubernetes/woodpecker-ci/values.yaml | 361 ++++++++++++++++++++++++ 9 files changed, 411 insertions(+), 150 deletions(-) create mode 100644 .woodpecker/demo-workflow.yaml delete mode 100644 infra/terraform/kubernetes/backend.tf delete mode 100644 infra/terraform/kubernetes/kube-vip.tf delete mode 100644 infra/terraform/kubernetes/main.tf delete mode 100644 infra/terraform/kubernetes/portfolio.tf delete mode 100644 infra/terraform/kubernetes/variables.tf create mode 100644 kubernetes/woodpecker-ci/values.yaml diff --git a/.woodpecker/demo-workflow.yaml b/.woodpecker/demo-workflow.yaml new file mode 100644 index 0000000..8211d15 --- /dev/null +++ b/.woodpecker/demo-workflow.yaml @@ -0,0 +1,14 @@ +when: + - event: push + +steps: + - name: build + image: debian + commands: + - echo "This is the build step" + - echo "binary-data-123" > executable + - name: a-test-step + image: golang:1.16 + commands: + - echo "Testing ..." + - ./executable diff --git a/infra/terraform/README.md b/infra/terraform/README.md index cfa176a..34fbfaa 100644 --- a/infra/terraform/README.md +++ b/infra/terraform/README.md @@ -11,8 +11,8 @@ Currently, only the Proxmox virtual machines are managed using Terraform. Kubernetes clusters are still created with Ansible, and Kubernetes resources are managed using Helm charts and kubectl. Previously, Proxmox was also managed with Ansible, but it has been moved to Terraform for improved consistency and state -management. The goal is to eventually manage all infrastructure—including -Kubernetes clusters—using Terraform. +management. The goal is to eventually manage all infrastructure including +creating Kubernetes clusters with Terraform, but this is a work in progress. The terraform state files are stored in a remote backend, which allows for collaboration and state management across different environments. The backend diff --git a/infra/terraform/kubernetes/backend.tf b/infra/terraform/kubernetes/backend.tf deleted file mode 100644 index c579f9b..0000000 --- a/infra/terraform/kubernetes/backend.tf +++ /dev/null @@ -1,14 +0,0 @@ -terraform { - backend "s3" { - bucket = "terraform-state" # Name of the MinIO bucket - key = "kubernetes/terraform.tfstate" # Path to the state file in the bucket - endpoint = var.minio_endpoint # MinIO API endpoint - access_key = var.minio_access_key # MinIO access key - secret_key = var.minio_secret_key # MinIO secret key - region = "us-east-1" # Arbitrary region (MinIO ignores this) - skip_credentials_validation = true # Skip AWS-specific credential checks - skip_metadata_api_check = true # Skip AWS metadata API checks - skip_region_validation = true # Skip AWS region validation - use_path_style = true # Use path-style URLs[](http:///) - } -} \ No newline at end of file diff --git a/infra/terraform/kubernetes/kube-vip.tf b/infra/terraform/kubernetes/kube-vip.tf deleted file mode 100644 index 3c18b71..0000000 --- a/infra/terraform/kubernetes/kube-vip.tf +++ /dev/null @@ -1,16 +0,0 @@ -# No new namespace is required since it is being deployed in kube-system namespace. -resource "helm_release" "kube_vip" { - name = "kube-vip" - repository = "https://kube-vip.github.io/helm-charts" - chart = "kube-vip" - version = "0.6.6" - atomic = true - - namespace = "kube-system" - - values = [ - templatefile("${var.kubernetes_project_path}/kube-vip/values.yaml", { - VIP_ADDRESS = var.vip_address - }) - ] -} \ No newline at end of file diff --git a/infra/terraform/kubernetes/main.tf b/infra/terraform/kubernetes/main.tf deleted file mode 100644 index 9d6bc5e..0000000 --- a/infra/terraform/kubernetes/main.tf +++ /dev/null @@ -1,22 +0,0 @@ -terraform { - required_providers { - kubernetes = { - source = "hashicorp/kubernetes" - version = "2.37.1" - } - helm = { - source = "hashicorp/helm" - version = "3.0.2" - } - } -} - -provider "kubernetes" { - config_path = "~/.kube/config" -} - -provider "helm" { - kubernetes = { - config_path = "~/.kube/config" - } -} \ No newline at end of file diff --git a/infra/terraform/kubernetes/portfolio.tf b/infra/terraform/kubernetes/portfolio.tf deleted file mode 100644 index 42a8d2d..0000000 --- a/infra/terraform/kubernetes/portfolio.tf +++ /dev/null @@ -1,50 +0,0 @@ -resource "kubernetes_namespace" "portfolio" { - metadata { - name = "my-portfolio" - } -} - -resource "kubernetes_secret" "docker_secret" { - metadata { - name = "docker-registry-credentials" - namespace = "my-portfolio" - } - - type = "kubernetes.io/dockerconfigjson" - - data = { - ".dockerconfigjson" = jsonencode({ - auths = { - "${var.docker_registry_host}" = { - username = var.docker_username - password = var.docker_password - auth = base64encode("${var.docker_username}:${var.docker_password}") - } - } - }) - } - - depends_on = [kubernetes_namespace.portfolio] -} - -locals { - # Read and process the YAML file with placeholders - manifest_content = templatefile("../../../kubernetes/my-portfolio/portfolioManifest.yaml", { - PORTFOLIO_HOST = var.portfolio_host - DOCKER_REGISTRY_HOST = var.docker_registry_host - }) - # Split into individual documents - manifest_documents = split("---", replace(local.manifest_content, "/\\n\\s*\\n/", "---")) -} - -resource "kubernetes_manifest" "portfolio_manifest" { - for_each = { for i, doc in local.manifest_documents : i => doc if trimspace(doc) != "" } - - manifest = yamldecode(each.value) - - field_manager { - force_conflicts = true - } - - depends_on = [kubernetes_namespace.portfolio] -} \ No newline at end of file diff --git a/infra/terraform/kubernetes/variables.tf b/infra/terraform/kubernetes/variables.tf deleted file mode 100644 index 7f9a510..0000000 --- a/infra/terraform/kubernetes/variables.tf +++ /dev/null @@ -1,46 +0,0 @@ -# variables for minio backend configuration -variable "minio_access_key" { - description = "MinIO access key" - type = string -} - -variable "minio_secret_key" { - description = "MinIO secret key" - type = string -} - -variable "minio_endpoint" { - description = "MinIO API endpoint" - type = string -} - -variable "portfolio_host" { - description = "Host for the portfolio application" - type = string -} - -variable "docker_registry_host" { - description = "Host for the Docker registry" - type = string -} - -variable "docker_username" { - description = "Docker registry username" - type = string -} - -variable "docker_password" { - description = "Docker registry password" - type = string -} - -variable "kubernetes_project_path" { - description = "Path to the Kubernetes configuration files" - type = string - default = "../../../kubernetes" -} - -variable "vip_address" { - description = "VIP address for kube-vip" - type = string -} \ No newline at end of file diff --git a/kubernetes/README.md b/kubernetes/README.md index a5c217b..cac8453 100644 --- a/kubernetes/README.md +++ b/kubernetes/README.md @@ -914,3 +914,37 @@ kubectl create secret generic cloudflare-dns-token \ kubectl apply -f cronjobs/update-dns/update_dns_config.yaml -n cronjobs kubectl apply -f cronjobs/update-dns/update_dns_cronjob.yaml -n cronjobs ``` + +# Woodpecker CI + +Woodpecker is a lightweight CI/CD server that is deployed in the k3s cluster. + +Since Woodpecker uses Oauth2 for authentication, it requires a Gitea +application to be created for Woodpecker to use for authentication. + +First, create a new application in Gitea for Woodpecker. The path to create the +application is: +`https:///user/settings/applications/` + +The application should have the following settings: + +- **Application Name**: Woodpecker +- **Redirect URI**: https:///authorize + +```bash +source .env +helm repo add woodpecker https://woodpecker-ci.org/ +helm repo update +helm upgrade --install woodpecker woodpecker/woodpecker \ + -f woodpecker-ci/values.yaml \ + --version 3.2.0 \ + --namespace woodpecker \ + --create-namespace \ + --set server.ingress.hosts[0].host=$WOODPECKER_HOST \ + --set server.ingress.tls[0].hosts[0]=$WOODPECKER_HOST \ + --set server.env.WOODPECKER_HOST=https://$WOODPECKER_HOST \ + --set server.env.WOODPECKER_GITEA_URL=https://$GITEA_HOST \ + --set server.env.WOODPECKER_GITEA_CLIENT=$WOODPECKER_CLIENT_ID \ + --set server.env.WOODPECKER_GITEA_SECRET=$WOODPECKER_CLIENT_SECRET \ + --atomic +``` diff --git a/kubernetes/woodpecker-ci/values.yaml b/kubernetes/woodpecker-ci/values.yaml new file mode 100644 index 0000000..75e8907 --- /dev/null +++ b/kubernetes/woodpecker-ci/values.yaml @@ -0,0 +1,361 @@ +# Default values for woodpecker. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +# -- Overrides the name of the chart +nameOverride: "" +# -- Overrides the full name of the chart +fullnameOverride: "" + +agent: + # -- Enable the agent component + enabled: true + + # -- The number of replicas for the deployment + replicaCount: 2 + + image: + # -- The image registry + registry: docker.io + # -- The image repository + repository: woodpeckerci/woodpecker-agent + # -- The pull policy for the image + pullPolicy: IfNotPresent + # -- Overrides the image tag whose default is the chart appVersion. + tag: "" + + env: + # -- Add the environment variables for the agent component + WOODPECKER_SERVER: "woodpecker-server.woodpecker.svc.cluster.local:9000" + WOODPECKER_BACKEND: kubernetes + WOODPECKER_BACKEND_K8S_NAMESPACE: woodpecker + WOODPECKER_BACKEND_K8S_STORAGE_CLASS: "" + WOODPECKER_BACKEND_K8S_VOLUME_SIZE: 10G + WOODPECKER_BACKEND_K8S_STORAGE_RWX: true + WOODPECKER_BACKEND_K8S_POD_LABELS: "" + WOODPECKER_BACKEND_K8S_POD_ANNOTATIONS: "" + WOODPECKER_CONNECT_RETRY_COUNT: "3" + + # -- Add extra secret that is contains environment variables + extraSecretNamesForEnvFrom: [] + + persistence: + # -- Enable the creation of the persistent volume + enabled: true + # -- Defines an existing claim to use + existingClaim: + # -- Defines the size of the persistent volume + size: 1Gi + # -- Defines the path where the volume should be mounted + mountPath: "/etc/woodpecker" + # -- Defines the storageClass of the persistent volume + storageClass: "" + # -- Defines the access mode of the persistent volume + accessModes: + - ReadWriteOnce + + # -- Additional volumes that can be mounted in containers + extraVolumes: + [] + # - name: docker-config + # configMap: + # name: docker-config + # - name: data-volume + # persistentVolumeClaim: + # claimName: example + + # -- Additional volumes that will be attached to the agent container + extraVolumeMounts: + [] + # - name: ca-certs + # mountPath: /etc/ssl/certs/ca-certificates.crt + + # -- The image pull secrets + imagePullSecrets: [] + # -- Overrides the name of the chart of the agent component + nameOverride: "" + # -- Overrides the full name of the chart of the agent component + fullnameOverride: "" + + serviceAccount: + # -- Specifies whether a service account should be created (also see RBAC subsection) + create: true + # -- Annotations to add to the service account + annotations: {} + # -- The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: "" + rbac: + # -- If your cluster has RBAC enabled and you're using the Kubernetes agent- + # backend you'll need this. (this is true for almost all production clusters) + # only change this if you have a non CNCF compliant cluster, missing the RBAC endpoints + # the Role and RoleBinding are only created if serviceAccount.create is also true + create: true + # Additional annotations and labels in role and roleBinding are only needed, if you + # are using additional tooling to manage / verify roles or roleBindings (OPA, etc.) + role: + annotations: {} + labels: {} + roleBinding: + annotations: {} + labels: {} + + # -- Add pod annotations for the agent component + podAnnotations: {} + + # -- Add pod security context + podSecurityContext: + {} + # fsGroup: 2000 + + # -- Add security context + securityContext: + {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + + # -- Specifies the resources for the agent component + resources: + {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + + # -- Specifies the labels of the nodes that the agent component must be running + nodeSelector: {} + + # -- Specifies the tolerations + tolerations: [] + + # -- Specifies the affinity + affinity: {} + + # -- Overrides the default DNS configuration + dnsConfig: {} + + # -- Using topology spread constraints, you can ensure that there is at least one agent + # pod for each topology zone, e.g. one per arch for multi-architecture clusters + # or one for each region for geographically distributed cloud-hosted clusters. + # Ref: + topologySpreadConstraints: [] + # - maxSkew: 1 + # topologyKey: "beta.kubernetes.io/arch" + # whenUnsatisfiable: "DoNotSchedule" + # labelSelector: + # matchLabels: + # "app.kubernetes.io/name": woodpecker-agent + +server: + # -- Enable the server component + enabled: true + + statefulSet: + # -- Add annotations to the StatefulSet + annotations: {} + # -- Add labels to the StatefulSet + labels: {} + # -- Defines the number of replicas + replicaCount: 1 + # -- The maximum number of revisions that will be maintained in the StatefulSet's revision history + # Default in 10. + revisionHistoryLimit: 5 + + updateStrategy: + # -- Defines the update strategy of the StatefulSet + type: RollingUpdate + + image: + # -- The image registry + registry: docker.io + # -- The image repository + repository: woodpeckerci/woodpecker-server + # -- The image pull policy + pullPolicy: IfNotPresent + # -- Overrides the image tag whose default is the chart appVersion. + tag: "" + + # -- Add environment variables for the server component + env: + WOODPECKER_ADMIN: "taqi" + WOODPECKER_HOST: "placeholder.woodpecker.svc.cluster.local:9000" + WOODPECKER_GITEA: "true" + WOODPECKER_GITEA_URL: "${GITEA_HOST}" + WOODPECKER_GITEA_CLIENT: "${WOODPECKER_CLIENT_ID}" + WOODPECKER_GITEA_SECRET: "${WOODPECKER_CLIENT_SECRET}" + + # WOODPECKER_GITHUB: "true" + + # -- Add extra environment variables from the secrets list + extraSecretNamesForEnvFrom: [] + + # whether to create the default WOODPECKER_AGENT_SECRET in woodpecker-default-agent-secret + createAgentSecret: true + # -- Create a generic secret to store things in, e.g. env values + secrets: [] + # - name: secret + # data: + # key: value + + # -- Additional volumes that can be mounted in containers + extraVolumes: + [] + # - name: docker-config + # configMap: + # name: docker-config + # - name: data-volume + # persistentVolumeClaim: + # claimName: example + + # -- Additional volumes that will be attached to the agent container + extraVolumeMounts: + [] + # - name: ca-certs + # mountPath: /etc/ssl/certs/ca-certificates.crt + + # -- Add additional init containers to the pod (evaluated as a template) + initContainers: [] + + persistentVolume: + # -- Enable the creation of the persistent volume + enabled: true + # -- Defines the size of the persistent volume + size: 10Gi + # -- Defines the path where the volume should be mounted + mountPath: "/var/lib/woodpecker" + # -- Defines the storageClass of the persistent volume + storageClass: "" + + # -- The image pull secrets + imagePullSecrets: [] + # -- Overrides the name of the helm chart of the server component + nameOverride: "" + # -- Overrides the full name of the helm chart of the server component + fullnameOverride: "" + + serviceAccount: + # -- Specifies whether a service account should be created + create: false + # -- Annotations to add to the service account + annotations: {} + # -- The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: "" + + # -- Add pod annotations + podAnnotations: + {} + # prometheus.io/scrape: "true" + + # -- Add pod security context + podSecurityContext: + {} + # fsGroup: 2000 + + # -- Add security context + securityContext: + {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + + service: + # -- The type of the service + type: ClusterIP + # -- The port of the service + port: &servicePort 80 + # -- The cluster IP of the service (optional) + clusterIP: + # -- The loadbalancer IP of the service (optional) + loadBalancerIP: + + ingress: + # -- Enable the ingress for the server component + enabled: true + # -- Add annotations to the ingress + annotations: + cert-manager.io/cluster-issuer: "acme-issuer" + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + + # -- Defines which ingress controller will implement the resource + ingressClassName: "nginx" + + hosts: + - host: example.com + paths: + - path: / + backend: + serviceName: chart-example.local + servicePort: *servicePort + tls: + - hosts: + - example.com + secretName: woodpecker-tls + # -- Defines the secret that contains the TLS certificate and key + # secretName: chart-example-tls + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + + # -- Specifies the ressources for the server component + resources: + {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + + # -- Defines the labels of the node where the server component must be running + nodeSelector: {} + + # -- Add tolerations rules + tolerations: [] + + # -- Add affinity + affinity: {} + + # -- Overrides the default DNS configuration + dnsConfig: {} + + # -- Configure probe options for container health checking + probes: + # -- Configure liveness probe options + liveness: + # -- Number of seconds after which the probe times out (default: 10) + timeoutSeconds: 10 + # -- How often (in seconds) to perform the probe (default: 10) + periodSeconds: 10 + # -- Minimum consecutive successes for the probe to be considered successful after having failed (default: 1) + successThreshold: 1 + # -- When a probe fails, Kubernetes will try failureThreshold times before giving up (default: 3) + failureThreshold: 3 + # -- Configure readiness probe options + readiness: + # -- Number of seconds after which the probe times out (default: 10) + timeoutSeconds: 10 + # -- How often (in seconds) to perform the probe (default: 10) + periodSeconds: 10 + # -- Minimum consecutive successes for the probe to be considered successful after having failed (default: 1) + successThreshold: 1 + # -- When a probe fails, Kubernetes will try failureThreshold times before giving up (default: 3) + failureThreshold: 3