kubernetes: added woodpecker-ci

- added woodpecker CI
- removed kubernetes infra terraform files.
- added demo woodpecker pipeline
This commit is contained in:
2025-07-02 22:49:27 +03:00
parent 523c190c7a
commit 448a0a89b9
9 changed files with 411 additions and 150 deletions

View File

@ -0,0 +1,14 @@
when:
- event: push
steps:
- name: build
image: debian
commands:
- echo "This is the build step"
- echo "binary-data-123" > executable
- name: a-test-step
image: golang:1.16
commands:
- echo "Testing ..."
- ./executable

View File

@ -11,8 +11,8 @@ Currently, only the Proxmox virtual machines are managed using Terraform.
Kubernetes clusters are still created with Ansible, and Kubernetes resources are Kubernetes clusters are still created with Ansible, and Kubernetes resources are
managed using Helm charts and kubectl. Previously, Proxmox was also managed with managed using Helm charts and kubectl. Previously, Proxmox was also managed with
Ansible, but it has been moved to Terraform for improved consistency and state Ansible, but it has been moved to Terraform for improved consistency and state
management. The goal is to eventually manage all infrastructureincluding management. The goal is to eventually manage all infrastructure including
Kubernetes clusters—using Terraform. creating Kubernetes clusters with Terraform, but this is a work in progress.
The terraform state files are stored in a remote backend, which allows for The terraform state files are stored in a remote backend, which allows for
collaboration and state management across different environments. The backend collaboration and state management across different environments. The backend

View File

@ -1,14 +0,0 @@
terraform {
backend "s3" {
bucket = "terraform-state" # Name of the MinIO bucket
key = "kubernetes/terraform.tfstate" # Path to the state file in the bucket
endpoint = var.minio_endpoint # MinIO API endpoint
access_key = var.minio_access_key # MinIO access key
secret_key = var.minio_secret_key # MinIO secret key
region = "us-east-1" # Arbitrary region (MinIO ignores this)
skip_credentials_validation = true # Skip AWS-specific credential checks
skip_metadata_api_check = true # Skip AWS metadata API checks
skip_region_validation = true # Skip AWS region validation
use_path_style = true # Use path-style URLs[](http://<host>/<bucket>)
}
}

View File

@ -1,16 +0,0 @@
# No new namespace is required since it is being deployed in kube-system namespace.
resource "helm_release" "kube_vip" {
name = "kube-vip"
repository = "https://kube-vip.github.io/helm-charts"
chart = "kube-vip"
version = "0.6.6"
atomic = true
namespace = "kube-system"
values = [
templatefile("${var.kubernetes_project_path}/kube-vip/values.yaml", {
VIP_ADDRESS = var.vip_address
})
]
}

View File

@ -1,22 +0,0 @@
terraform {
required_providers {
kubernetes = {
source = "hashicorp/kubernetes"
version = "2.37.1"
}
helm = {
source = "hashicorp/helm"
version = "3.0.2"
}
}
}
provider "kubernetes" {
config_path = "~/.kube/config"
}
provider "helm" {
kubernetes = {
config_path = "~/.kube/config"
}
}

View File

@ -1,50 +0,0 @@
resource "kubernetes_namespace" "portfolio" {
metadata {
name = "my-portfolio"
}
}
resource "kubernetes_secret" "docker_secret" {
metadata {
name = "docker-registry-credentials"
namespace = "my-portfolio"
}
type = "kubernetes.io/dockerconfigjson"
data = {
".dockerconfigjson" = jsonencode({
auths = {
"${var.docker_registry_host}" = {
username = var.docker_username
password = var.docker_password
auth = base64encode("${var.docker_username}:${var.docker_password}")
}
}
})
}
depends_on = [kubernetes_namespace.portfolio]
}
locals {
# Read and process the YAML file with placeholders
manifest_content = templatefile("../../../kubernetes/my-portfolio/portfolioManifest.yaml", {
PORTFOLIO_HOST = var.portfolio_host
DOCKER_REGISTRY_HOST = var.docker_registry_host
})
# Split into individual documents
manifest_documents = split("---", replace(local.manifest_content, "/\\n\\s*\\n/", "---"))
}
resource "kubernetes_manifest" "portfolio_manifest" {
for_each = { for i, doc in local.manifest_documents : i => doc if trimspace(doc) != "" }
manifest = yamldecode(each.value)
field_manager {
force_conflicts = true
}
depends_on = [kubernetes_namespace.portfolio]
}

View File

@ -1,46 +0,0 @@
# variables for minio backend configuration
variable "minio_access_key" {
description = "MinIO access key"
type = string
}
variable "minio_secret_key" {
description = "MinIO secret key"
type = string
}
variable "minio_endpoint" {
description = "MinIO API endpoint"
type = string
}
variable "portfolio_host" {
description = "Host for the portfolio application"
type = string
}
variable "docker_registry_host" {
description = "Host for the Docker registry"
type = string
}
variable "docker_username" {
description = "Docker registry username"
type = string
}
variable "docker_password" {
description = "Docker registry password"
type = string
}
variable "kubernetes_project_path" {
description = "Path to the Kubernetes configuration files"
type = string
default = "../../../kubernetes"
}
variable "vip_address" {
description = "VIP address for kube-vip"
type = string
}

View File

@ -914,3 +914,37 @@ kubectl create secret generic cloudflare-dns-token \
kubectl apply -f cronjobs/update-dns/update_dns_config.yaml -n cronjobs kubectl apply -f cronjobs/update-dns/update_dns_config.yaml -n cronjobs
kubectl apply -f cronjobs/update-dns/update_dns_cronjob.yaml -n cronjobs kubectl apply -f cronjobs/update-dns/update_dns_cronjob.yaml -n cronjobs
``` ```
# Woodpecker CI
Woodpecker is a lightweight CI/CD server that is deployed in the k3s cluster.
Since Woodpecker uses Oauth2 for authentication, it requires a Gitea
application to be created for Woodpecker to use for authentication.
First, create a new application in Gitea for Woodpecker. The path to create the
application is:
`https://<your-gitea-domain>/user/settings/applications/`
The application should have the following settings:
- **Application Name**: Woodpecker
- **Redirect URI**: https://<your-woodpecker-domain>/authorize
```bash
source .env
helm repo add woodpecker https://woodpecker-ci.org/
helm repo update
helm upgrade --install woodpecker woodpecker/woodpecker \
-f woodpecker-ci/values.yaml \
--version 3.2.0 \
--namespace woodpecker \
--create-namespace \
--set server.ingress.hosts[0].host=$WOODPECKER_HOST \
--set server.ingress.tls[0].hosts[0]=$WOODPECKER_HOST \
--set server.env.WOODPECKER_HOST=https://$WOODPECKER_HOST \
--set server.env.WOODPECKER_GITEA_URL=https://$GITEA_HOST \
--set server.env.WOODPECKER_GITEA_CLIENT=$WOODPECKER_CLIENT_ID \
--set server.env.WOODPECKER_GITEA_SECRET=$WOODPECKER_CLIENT_SECRET \
--atomic
```

View File

@ -0,0 +1,361 @@
# Default values for woodpecker.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
# -- Overrides the name of the chart
nameOverride: ""
# -- Overrides the full name of the chart
fullnameOverride: ""
agent:
# -- Enable the agent component
enabled: true
# -- The number of replicas for the deployment
replicaCount: 2
image:
# -- The image registry
registry: docker.io
# -- The image repository
repository: woodpeckerci/woodpecker-agent
# -- The pull policy for the image
pullPolicy: IfNotPresent
# -- Overrides the image tag whose default is the chart appVersion.
tag: ""
env:
# -- Add the environment variables for the agent component
WOODPECKER_SERVER: "woodpecker-server.woodpecker.svc.cluster.local:9000"
WOODPECKER_BACKEND: kubernetes
WOODPECKER_BACKEND_K8S_NAMESPACE: woodpecker
WOODPECKER_BACKEND_K8S_STORAGE_CLASS: ""
WOODPECKER_BACKEND_K8S_VOLUME_SIZE: 10G
WOODPECKER_BACKEND_K8S_STORAGE_RWX: true
WOODPECKER_BACKEND_K8S_POD_LABELS: ""
WOODPECKER_BACKEND_K8S_POD_ANNOTATIONS: ""
WOODPECKER_CONNECT_RETRY_COUNT: "3"
# -- Add extra secret that is contains environment variables
extraSecretNamesForEnvFrom: []
persistence:
# -- Enable the creation of the persistent volume
enabled: true
# -- Defines an existing claim to use
existingClaim:
# -- Defines the size of the persistent volume
size: 1Gi
# -- Defines the path where the volume should be mounted
mountPath: "/etc/woodpecker"
# -- Defines the storageClass of the persistent volume
storageClass: ""
# -- Defines the access mode of the persistent volume
accessModes:
- ReadWriteOnce
# -- Additional volumes that can be mounted in containers
extraVolumes:
[]
# - name: docker-config
# configMap:
# name: docker-config
# - name: data-volume
# persistentVolumeClaim:
# claimName: example
# -- Additional volumes that will be attached to the agent container
extraVolumeMounts:
[]
# - name: ca-certs
# mountPath: /etc/ssl/certs/ca-certificates.crt
# -- The image pull secrets
imagePullSecrets: []
# -- Overrides the name of the chart of the agent component
nameOverride: ""
# -- Overrides the full name of the chart of the agent component
fullnameOverride: ""
serviceAccount:
# -- Specifies whether a service account should be created (also see RBAC subsection)
create: true
# -- Annotations to add to the service account
annotations: {}
# -- The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: ""
rbac:
# -- If your cluster has RBAC enabled and you're using the Kubernetes agent-
# backend you'll need this. (this is true for almost all production clusters)
# only change this if you have a non CNCF compliant cluster, missing the RBAC endpoints
# the Role and RoleBinding are only created if serviceAccount.create is also true
create: true
# Additional annotations and labels in role and roleBinding are only needed, if you
# are using additional tooling to manage / verify roles or roleBindings (OPA, etc.)
role:
annotations: {}
labels: {}
roleBinding:
annotations: {}
labels: {}
# -- Add pod annotations for the agent component
podAnnotations: {}
# -- Add pod security context
podSecurityContext:
{}
# fsGroup: 2000
# -- Add security context
securityContext:
{}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
# -- Specifies the resources for the agent component
resources:
{}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
# -- Specifies the labels of the nodes that the agent component must be running
nodeSelector: {}
# -- Specifies the tolerations
tolerations: []
# -- Specifies the affinity
affinity: {}
# -- Overrides the default DNS configuration
dnsConfig: {}
# -- Using topology spread constraints, you can ensure that there is at least one agent
# pod for each topology zone, e.g. one per arch for multi-architecture clusters
# or one for each region for geographically distributed cloud-hosted clusters.
# Ref: <https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/>
topologySpreadConstraints: []
# - maxSkew: 1
# topologyKey: "beta.kubernetes.io/arch"
# whenUnsatisfiable: "DoNotSchedule"
# labelSelector:
# matchLabels:
# "app.kubernetes.io/name": woodpecker-agent
server:
# -- Enable the server component
enabled: true
statefulSet:
# -- Add annotations to the StatefulSet
annotations: {}
# -- Add labels to the StatefulSet
labels: {}
# -- Defines the number of replicas
replicaCount: 1
# -- The maximum number of revisions that will be maintained in the StatefulSet's revision history
# Default in 10.
revisionHistoryLimit: 5
updateStrategy:
# -- Defines the update strategy of the StatefulSet
type: RollingUpdate
image:
# -- The image registry
registry: docker.io
# -- The image repository
repository: woodpeckerci/woodpecker-server
# -- The image pull policy
pullPolicy: IfNotPresent
# -- Overrides the image tag whose default is the chart appVersion.
tag: ""
# -- Add environment variables for the server component
env:
WOODPECKER_ADMIN: "taqi"
WOODPECKER_HOST: "placeholder.woodpecker.svc.cluster.local:9000"
WOODPECKER_GITEA: "true"
WOODPECKER_GITEA_URL: "${GITEA_HOST}"
WOODPECKER_GITEA_CLIENT: "${WOODPECKER_CLIENT_ID}"
WOODPECKER_GITEA_SECRET: "${WOODPECKER_CLIENT_SECRET}"
# WOODPECKER_GITHUB: "true"
# -- Add extra environment variables from the secrets list
extraSecretNamesForEnvFrom: []
# whether to create the default WOODPECKER_AGENT_SECRET in woodpecker-default-agent-secret
createAgentSecret: true
# -- Create a generic secret to store things in, e.g. env values
secrets: []
# - name: secret
# data:
# key: value
# -- Additional volumes that can be mounted in containers
extraVolumes:
[]
# - name: docker-config
# configMap:
# name: docker-config
# - name: data-volume
# persistentVolumeClaim:
# claimName: example
# -- Additional volumes that will be attached to the agent container
extraVolumeMounts:
[]
# - name: ca-certs
# mountPath: /etc/ssl/certs/ca-certificates.crt
# -- Add additional init containers to the pod (evaluated as a template)
initContainers: []
persistentVolume:
# -- Enable the creation of the persistent volume
enabled: true
# -- Defines the size of the persistent volume
size: 10Gi
# -- Defines the path where the volume should be mounted
mountPath: "/var/lib/woodpecker"
# -- Defines the storageClass of the persistent volume
storageClass: ""
# -- The image pull secrets
imagePullSecrets: []
# -- Overrides the name of the helm chart of the server component
nameOverride: ""
# -- Overrides the full name of the helm chart of the server component
fullnameOverride: ""
serviceAccount:
# -- Specifies whether a service account should be created
create: false
# -- Annotations to add to the service account
annotations: {}
# -- The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: ""
# -- Add pod annotations
podAnnotations:
{}
# prometheus.io/scrape: "true"
# -- Add pod security context
podSecurityContext:
{}
# fsGroup: 2000
# -- Add security context
securityContext:
{}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
service:
# -- The type of the service
type: ClusterIP
# -- The port of the service
port: &servicePort 80
# -- The cluster IP of the service (optional)
clusterIP:
# -- The loadbalancer IP of the service (optional)
loadBalancerIP:
ingress:
# -- Enable the ingress for the server component
enabled: true
# -- Add annotations to the ingress
annotations:
cert-manager.io/cluster-issuer: "acme-issuer"
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
# -- Defines which ingress controller will implement the resource
ingressClassName: "nginx"
hosts:
- host: example.com
paths:
- path: /
backend:
serviceName: chart-example.local
servicePort: *servicePort
tls:
- hosts:
- example.com
secretName: woodpecker-tls
# -- Defines the secret that contains the TLS certificate and key
# secretName: chart-example-tls
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
# -- Specifies the ressources for the server component
resources:
{}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
# -- Defines the labels of the node where the server component must be running
nodeSelector: {}
# -- Add tolerations rules
tolerations: []
# -- Add affinity
affinity: {}
# -- Overrides the default DNS configuration
dnsConfig: {}
# -- Configure probe options for container health checking
probes:
# -- Configure liveness probe options
liveness:
# -- Number of seconds after which the probe times out (default: 10)
timeoutSeconds: 10
# -- How often (in seconds) to perform the probe (default: 10)
periodSeconds: 10
# -- Minimum consecutive successes for the probe to be considered successful after having failed (default: 1)
successThreshold: 1
# -- When a probe fails, Kubernetes will try failureThreshold times before giving up (default: 3)
failureThreshold: 3
# -- Configure readiness probe options
readiness:
# -- Number of seconds after which the probe times out (default: 10)
timeoutSeconds: 10
# -- How often (in seconds) to perform the probe (default: 10)
periodSeconds: 10
# -- Minimum consecutive successes for the probe to be considered successful after having failed (default: 1)
successThreshold: 1
# -- When a probe fails, Kubernetes will try failureThreshold times before giving up (default: 3)
failureThreshold: 3