Skip to content
Go back

CI/CD Pipeline with GitLab and Kubernetes

CI/CD Pipeline with GitLab and Kubernetes

Introduction

GitLab CI/CD with Kubernetes provides automated testing, building, and deployment pipelines. This guide covers comprehensive pipeline setup with security scanning and deployment strategies.

Prerequisites

Step 1: GitLab Runner Configuration

Install GitLab Runner on Kubernetes:

apiVersion: v1
kind: ServiceAccount
metadata:
  name: gitlab-runner
  namespace: gitlab-runner
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  name: gitlab-runner
rules:
- apiGroups: [""]
  resources: ["pods", "pods/log", "services", "configmaps", "secrets"]
  verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
- apiGroups: ["apps"]
  resources: ["deployments", "replicasets"]
  verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: gitlab-runner
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: gitlab-runner
subjects:
- kind: ServiceAccount
  name: gitlab-runner
  namespace: gitlab-runner
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: gitlab-runner
  namespace: gitlab-runner
spec:
  replicas: 1
  selector:
    matchLabels:
      app: gitlab-runner
  template:
    metadata:
      labels:
        app: gitlab-runner
    spec:
      serviceAccountName: gitlab-runner
      containers:
      - name: gitlab-runner
        image: gitlab/gitlab-runner:v16.5.0
        command: ["/bin/bash"]
        args: ["-c", "gitlab-runner register --non-interactive --url=$GITLAB_URL --registration-token=$REGISTRATION_TOKEN --executor=kubernetes --kubernetes-namespace=gitlab-runner && gitlab-runner run"]
        env:
        - name: GITLAB_URL
          value: "https://gitlab.com"
        - name: REGISTRATION_TOKEN
          valueFrom:
            secretKeyRef:
              name: runner-secret
              key: registration-token
        volumeMounts:
        - name: config
          mountPath: /etc/gitlab-runner
      volumes:
      - name: config
        emptyDir: {}

Step 2: Basic GitLab CI Configuration

Create .gitlab-ci.yml:

# Pipeline stages
stages:
  - validate
  - test
  - build
  - security
  - deploy-staging
  - deploy-production

# Global variables
variables:
  DOCKER_REGISTRY: registry.gitlab.com/$CI_PROJECT_PATH
  KUBERNETES_NAMESPACE_STAGING: staging
  KUBERNETES_NAMESPACE_PRODUCTION: production
  HELM_CHART_PATH: ./helm

# Docker image building template
.docker_build: &docker_build
  image: docker:20.10.16
  services:
    - docker:20.10.16-dind
  before_script:
    - echo $CI_REGISTRY_PASSWORD | docker login -u $CI_REGISTRY_USER --password-stdin $CI_REGISTRY

# Kubernetes deployment template
.deploy_template: &deploy_template
  image: bitnami/kubectl:latest
  before_script:
    - kubectl config use-context $CI_PROJECT_PATH:$KUBE_CONTEXT

# Validation stage
validate:code:
  stage: validate
  image: node:18-alpine
  script:
    - npm ci
    - npm run lint
    - npm run type-check
  cache:
    key: $CI_COMMIT_REF_SLUG
    paths:
      - node_modules/

# Testing stages
test:unit:
  stage: test
  image: node:18-alpine
  script:
    - npm ci
    - npm run test:unit -- --coverage
  coverage: '/Coverage: \d+\.\d+/'
  artifacts:
    reports:
      coverage_report:
        coverage_format: cobertura
        path: coverage/cobertura-coverage.xml
    paths:
      - coverage/
    expire_in: 1 week

test:integration:
  stage: test
  image: node:18-alpine
  services:
    - postgres:13
    - redis:7-alpine
  variables:
    DATABASE_URL: "postgresql://postgres:postgres@postgres:5432/test"
    REDIS_URL: "redis://redis:6379"
  script:
    - npm ci
    - npm run test:integration
  artifacts:
    reports:
      junit: test-results.xml

test:e2e:
  stage: test
  image: node:18-alpine
  services:
    - selenium/standalone-chrome:latest
  script:
    - npm ci
    - npm run build
    - npm run test:e2e
  artifacts:
    when: always
    paths:
      - cypress/videos/
      - cypress/screenshots/
    expire_in: 1 week

# Build stage
build:docker:
  <<: *docker_build
  stage: build
  script:
    - docker build -t $DOCKER_REGISTRY:$CI_COMMIT_SHA .
    - docker build -t $DOCKER_REGISTRY:latest .
    - docker push $DOCKER_REGISTRY:$CI_COMMIT_SHA
    - docker push $DOCKER_REGISTRY:latest
  only:
    - main
    - develop

# Security scanning
security:container-scan:
  stage: security
  image: aquasec/trivy:latest
  script:
    - trivy image --format template --template "@contrib/gitlab.tpl" --output gl-container-scanning-report.json $DOCKER_REGISTRY:$CI_COMMIT_SHA
  artifacts:
    reports:
      container_scanning: gl-container-scanning-report.json

security:sast:
  stage: security
  image: node:18-alpine
  script:
    - npm ci
    - npm audit --json > gl-sast-report.json || true
  artifacts:
    reports:
      sast: gl-sast-report.json

security:dependency-scan:
  stage: security
  image: owasp/dependency-check:latest
  script:
    - dependency-check.sh --project $CI_PROJECT_NAME --scan . --format JSON --out gl-dependency-scanning-report.json
  artifacts:
    reports:
      dependency_scanning: gl-dependency-scanning-report.json

# Staging deployment
deploy:staging:
  <<: *deploy_template
  stage: deploy-staging
  variables:
    KUBE_CONTEXT: staging-context
  script:
    - helm upgrade --install $CI_PROJECT_NAME $HELM_CHART_PATH
      --namespace $KUBERNETES_NAMESPACE_STAGING
      --set image.tag=$CI_COMMIT_SHA
      --set environment=staging
      --set ingress.host=staging.example.com
      --wait --timeout=300s
  environment:
    name: staging
    url: https://staging.example.com
  only:
    - develop

# Production deployment
deploy:production:
  <<: *deploy_template
  stage: deploy-production
  variables:
    KUBE_CONTEXT: production-context
  script:
    - helm upgrade --install $CI_PROJECT_NAME $HELM_CHART_PATH
      --namespace $KUBERNETES_NAMESPACE_PRODUCTION
      --set image.tag=$CI_COMMIT_SHA
      --set environment=production
      --set ingress.host=api.example.com
      --set resources.requests.cpu=500m
      --set resources.requests.memory=512Mi
      --set replicaCount=3
      --wait --timeout=600s
  environment:
    name: production
    url: https://api.example.com
  when: manual
  only:
    - main

# Database migrations
migrate:staging:
  stage: deploy-staging
  image: node:18-alpine
  script:
    - npm ci
    - npm run migrate
  environment:
    name: staging
  variables:
    DATABASE_URL: $STAGING_DATABASE_URL
  only:
    - develop

migrate:production:
  stage: deploy-production
  image: node:18-alpine
  script:
    - npm ci
    - npm run migrate
  environment:
    name: production
  variables:
    DATABASE_URL: $PRODUCTION_DATABASE_URL
  when: manual
  only:
    - main

Step 3: Advanced Pipeline with Parallel Jobs

Create .gitlab-ci-advanced.yml:

stages:
  - validate
  - test
  - build
  - security
  - deploy
  - verify
  - cleanup

# Parallel testing
test:unit:
  stage: test
  parallel:
    matrix:
      - NODE_VERSION: ["16", "18", "20"]
  image: node:$NODE_VERSION-alpine
  script:
    - npm ci
    - npm run test:unit

test:browsers:
  stage: test
  parallel:
    matrix:
      - BROWSER: ["chrome", "firefox", "edge"]
  image: cypress/browsers:latest
  script:
    - npm ci
    - npm run test:e2e -- --browser $BROWSER

# Multi-architecture builds
build:multi-arch:
  stage: build
  image: docker:20.10.16
  services:
    - docker:20.10.16-dind
  before_script:
    - docker buildx create --use --name multi-arch-builder
    - echo $CI_REGISTRY_PASSWORD | docker login -u $CI_REGISTRY_USER --password-stdin $CI_REGISTRY
  script:
    - docker buildx build --platform linux/amd64,linux/arm64 
      -t $DOCKER_REGISTRY:$CI_COMMIT_SHA 
      -t $DOCKER_REGISTRY:latest 
      --push .

# Canary deployment
deploy:canary:
  stage: deploy
  image: bitnami/kubectl:latest
  script:
    - helm upgrade --install $CI_PROJECT_NAME-canary $HELM_CHART_PATH
      --namespace production
      --set image.tag=$CI_COMMIT_SHA
      --set environment=production
      --set canary.enabled=true
      --set canary.weight=10
      --wait
  environment:
    name: production/canary
    url: https://api.example.com
  when: manual
  only:
    - main

# Production deployment (blue-green)
deploy:blue-green:
  stage: deploy
  image: bitnami/kubectl:latest
  script:
    - |
      # Determine current active slot
      ACTIVE_SLOT=$(kubectl get service $CI_PROJECT_NAME-service -o jsonpath='{.spec.selector.slot}')
      NEW_SLOT=$([ "$ACTIVE_SLOT" = "blue" ] && echo "green" || echo "blue")
      
      echo "Deploying to $NEW_SLOT slot (currently $ACTIVE_SLOT is active)"
      
      # Deploy to inactive slot
      helm upgrade --install $CI_PROJECT_NAME-$NEW_SLOT $HELM_CHART_PATH
        --namespace production
        --set image.tag=$CI_COMMIT_SHA
        --set slot=$NEW_SLOT
        --wait
      
      # Switch traffic after verification
      kubectl patch service $CI_PROJECT_NAME-service -p '{"spec":{"selector":{"slot":"'$NEW_SLOT'"}}}'
      
      echo "Traffic switched to $NEW_SLOT"
  environment:
    name: production
    url: https://api.example.com
  when: manual
  only:
    - main

# Verification jobs
verify:health-check:
  stage: verify
  image: curlimages/curl:latest
  script:
    - |
      for i in {1..30}; do
        if curl -f https://api.example.com/health; then
          echo "Health check passed"
          exit 0
        fi
        echo "Health check failed, retrying in 10s..."
        sleep 10
      done
      echo "Health check failed after 5 minutes"
      exit 1
  needs:
    - deploy:blue-green

verify:load-test:
  stage: verify
  image: loadimpact/k6:latest
  script:
    - k6 run --vus 50 --duration 2m load-test.js
  artifacts:
    reports:
      performance: performance.json
  needs:
    - verify:health-check

# Cleanup old deployments
cleanup:old-deployments:
  stage: cleanup
  image: bitnami/kubectl:latest
  script:
    - |
      # Keep only last 3 releases
      helm list --namespace production --output json | 
      jq -r '.[] | select(.name | startswith("'$CI_PROJECT_NAME'")) | .name' |
      tail -n +4 |
      xargs -r helm uninstall --namespace production
  when: always
  needs:
    - verify:load-test

Step 4: Helm Chart for Application

Create Helm chart structure:

apiVersion: v2
name: nodejs-app
description: Node.js application Helm chart
version: 0.1.0
appVersion: "1.0.0"
replicaCount: 2

image:
  repository: registry.gitlab.com/company/nodejs-app
  pullPolicy: IfNotPresent
  tag: latest

environment: development

service:
  type: ClusterIP
  port: 80
  targetPort: 3000

ingress:
  enabled: true
  className: nginx
  annotations:
    cert-manager.io/cluster-issuer: letsencrypt-prod
  host: api.example.com
  tls:
    enabled: true

resources:
  limits:
    cpu: 1000m
    memory: 1Gi
  requests:
    cpu: 500m
    memory: 512Mi

autoscaling:
  enabled: false
  minReplicas: 2
  maxReplicas: 10
  targetCPUUtilizationPercentage: 80

canary:
  enabled: false
  weight: 0
apiVersion: apps/v1
kind: Deployment
metadata:
  name: {{ include "nodejs-app.fullname" . }}
  labels:
    {{- include "nodejs-app.labels" . | nindent 4 }}
    {{- if .Values.canary.enabled }}
    slot: canary
    {{- else if .Values.slot }}
    slot: {{ .Values.slot }}
    {{- end }}
spec:
  {{- if not .Values.autoscaling.enabled }}
  replicas: {{ .Values.replicaCount }}
  {{- end }}
  selector:
    matchLabels:
      {{- include "nodejs-app.selectorLabels" . | nindent 6 }}
      {{- if .Values.canary.enabled }}
      slot: canary
      {{- else if .Values.slot }}
      slot: {{ .Values.slot }}
      {{- end }}
  template:
    metadata:
      labels:
        {{- include "nodejs-app.selectorLabels" . | nindent 8 }}
        {{- if .Values.canary.enabled }}
        slot: canary
        {{- else if .Values.slot }}
        slot: {{ .Values.slot }}
        {{- end }}
    spec:
      containers:
        - name: {{ .Chart.Name }}
          image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
          imagePullPolicy: {{ .Values.image.pullPolicy }}
          ports:
            - name: http
              containerPort: {{ .Values.service.targetPort }}
              protocol: TCP
          env:
            - name: NODE_ENV
              value: {{ .Values.environment }}
          livenessProbe:
            httpGet:
              path: /health
              port: http
            initialDelaySeconds: 30
            periodSeconds: 10
          readinessProbe:
            httpGet:
              path: /ready
              port: http
            initialDelaySeconds: 5
            periodSeconds: 5
          resources:
            {{- toYaml .Values.resources | nindent 12 }}

Summary

GitLab CI/CD with Kubernetes enables automated testing, security scanning, and deployment strategies like blue-green and canary deployments. Use Helm for templating, parallel jobs for performance, and comprehensive verification for reliable releases.


Share this post on:

Previous Post
OAuth2 and OpenID Connect Authentication in Node.js
Next Post
Advanced Monitoring with Prometheus and AlertManager