基于 Helm + Argo CD 的 Kubernetes GitOps 多环境( dev/test/prod)自动化部署实践

1、整体架构

image

当前总体流水线

  • 提交dockerfile至gitlab -- CI 构建镜像 -- 推送至 Harbor -- CI 更新helm chart中使用harbor镜像的版本 -- ArgoCD 自动同步gitlab -- kustomize 管理多环境下的values.yaml -- K8s 自动完成部署

优化方向

  • 提交helm chart代码至gitlab -- CI 构建镜像 -- 推送至 Harbor -- ArgoCD 自动同步harbor -- image updater更新镜像标签
  • CI中使用buildkit构建镜像

2、Dockerfile仓库配置

image

Dockerfile

FROM  192.168.40.15:5005/mygroup/dockerproject/nginx:alpine
RUN echo "Hello Docker Build in GitLab CI 10" > /usr/share/nginx/html/index.html

.gitlab-ci.yaml

  • 在脚本里先 export IMAGE_FULL 拼接上 tagKaniko 使用 --destination 引用镜像名称
  • 不要在 Kaniko 命令里直接写 ${…},它不会被解析
  • 在之前的实践中使用docker:dind来构建镜像,但是 docker:dind需要 特权容器(--privileged)才能启动自己的 Docker daemon。使用kaniko,可以在没有特权的情况下部署镜像
stages:
  - build
  - deploy_prod

variables:
  DOCKER_CONFIG: /kaniko/.docker/

# 构建
build:
  stage: build
  tags: [k8s]
  image: ${REGISTRY}/library/kaniko-executor:debug
  script:
    - mkdir -p /kaniko/.docker
    - echo "{\"auths\":{\"${REGISTRY}\":{\"username\":\"${HARBOR_USERNAME}\",\"password\":\"${HARBOR_PASSWORD}\"}}}" > /kaniko/.docker/config.json
    - export IMAGE_FULL="${REGISTRY}/${PROJECT}/${IMAGE_NAME}:${CI_COMMIT_SHORT_SHA}"
    - echo "Building image $IMAGE_FULL"
    - |
      /kaniko/executor \
      --context $CI_PROJECT_DIR \
      --dockerfile $CI_PROJECT_DIR/Dockerfile \
      --destination $IMAGE_FULL \
      --oci-layout-path $CI_PROJECT_DIR/output \
      --skip-tls-verify
  artifacts:
    paths:
      - $CI_PROJECT_DIR/output
    expire_in: 1h

# 部署 PROD
deploy_prod:
  stage: deploy_prod
  tags: [k8s]
  image: core.harbor.domain/library/alpine-git:2.36.2

  script:
    - |
      git clone -b  v1 http://${GITLAB_USER}:${GITLAB_TOKEN}@${GITLAB_ADDR}/k8s/k8s-deploy.git
      cd k8s-deploy/nginx/envs/prod

       # 更新 Helm values
      sed -i "s|tag: .*|tag: ${CI_COMMIT_SHORT_SHA}|g" values-prod.yaml

      git config --global user.email "ci@gitlab.com"
      git config --global user.name "gitlab-ci"

      git add .
      if git diff --cached --quiet; then
        echo "No changes"
      else
        git commit -m "ci(prod): update image to ${CI_COMMIT_SHORT_SHA}"
        git push origin HEAD:v1
      fi

  only:
    - v1

变量

# 全局变量
REGISTRY: core.harbor.domain
PROJECT: myproject
IMAGE_NAME: nginx
IMAGE_TAG: ${CI_COMMIT_SHORT_SHA}
IMAGE_FULL: ${REGISTRY}/${PROJECT}/${IMAGE_NAME}:${IMAGE_TAG}
GITLAB_ADDR: 192.168.40.15
 
# 项目变量
HARBOR_USERNAME: admin
HARBOR_PASSWORD: Harbor12345
GITLAB_USER: root
GITLAB_TOKEN: glpat-bjQGqa59FHemVnSTc3NY

runner配置

  • 注册runner时使用的executor为k8s
  • 需要提前创建命名空间gitlab-runner
[runners.kubernetes]
  host = "https://192.168.40.11:6443"
  bearer_token = "<TOKEN>"      # token和service_account二选一
  namespace = "gitlab-runner"
  service_account = "gitlab-runner"
  image = "core.harbor.domain/library/alpine-git:2.36.2"
  ca_file = "/etc/gitlab-runner/config/ca.crt"

快速获取runner配置内容

  • 获取token
kubectl create token gitlab-runner -n gitlab-runner --duration=315360000s
  • 获取 API Server。即runner配置中的host字段
kubectl config view --minify -o jsonpath='{.clusters[0].cluster.server}'
  • 创建 ServiceAccount + rbac权限
kubectl create namespace gitlab-runner
kubectl create serviceaccount gitlab-runner -n gitlab-runner

apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
  name: gitlab-runner-role
  namespace: gitlab-runner
rules:
  - apiGroups: [""]
    resources: ["pods", "pods/exec", "pods/attach", "pods/log", "secrets", "configmaps", "services", "persistentvolumeclaims"]
    verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
  - apiGroups: ["apps"]
    resources: ["deployments", "statefulsets", "replicasets"]
    verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
  - apiGroups: ["batch"]
    resources: ["jobs", "cronjobs"]
    verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]

kubectl create rolebinding gitlab-runner-binding \
  --role=gitlab-runner-role \
  --serviceaccount=gitlab-runner:gitlab-runner \
  -n gitlab-runner
  • 获取k8s集群ca
kubectl config view --raw -o jsonpath='{.clusters[0].cluster.certificate-authority-data}' | base64 -d > ca.crt

3、helm仓库配置

image

创建 Helm Chart

helm create nginx-app
  • 目录结构
helm-repo/
├── apps/
│   └── nginx/
│       ├── Chart.yaml
|       ├── values.yaml
│       └── templates/
│           ├── rollout.yaml
│           └── service.yaml
│       └── envs/
│           ├── dev/
│           │   └── values-dev.yaml
│           ├── test/
│           │   └── values-test.yaml
│           └── prod/
│               └── values-prod.yaml

image

Chart.yaml

apiVersion: v2
name: nginx
version: 0.1.0
description: Nginx chart

/helm-repo/apps/nginx/values.yaml

replicaCount: 2

image:
  repository: core.harbor.domain/myproject/nginx
  tag: "latest"
  pullPolicy: IfNotPresent

service:
  type: ClusterIP
  port: 80
  targetPort: 80

resources:
  requests:
    cpu: "50m"
    memory: "64Mi"
  limits:
    cpu: "100m"
    memory: "128Mi"

probes:
  readiness:
    path: /
    initialDelaySeconds: 5
    periodSeconds: 10
  liveness:
    path: /
    initialDelaySeconds: 10
    periodSeconds: 10

strategy:
  maxSurge: 25%
  maxUnavailable: 25%

labels:
  app: demo-nginx

/helm-repo/apps/nginx/envs/prod/values-prod.yaml

  • 金丝雀配置
replicaCount: 3

image:
  tag: stable

rollout:
  strategy:
    canary:
      steps:
        - setWeight: 10
        - pause: { duration: 60s }
        - setWeight: 30
        - pause: { duration: 60s }
        - setWeight: 60
        - pause: { duration: 60s }

/helm-repo/apps/nginx/templates/rollout.yaml

  • 用 Rollout.yaml 替换 Deployment.yaml
apiVersion: argoproj.io/v1alpha1
kind: Rollout
metadata:
  name: {{ .Release.Name }}
  labels:
    app: {{ .Values.labels.app }}
spec:
  replicas: {{ .Values.replicaCount }}
  strategy:
    canary:
      steps:
{{ toYaml .Values.rollout.strategy.canary.steps | indent 8 }}
  selector:
    matchLabels:
      app: {{ .Values.labels.app }}
  template:
    metadata:
      labels:
        app: {{ .Values.labels.app }}
    spec:
      containers:
        - name: {{ .Chart.Name }}
          image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
          imagePullPolicy: {{ .Values.image.pullPolicy }}
          ports:
            - containerPort: {{ .Values.service.targetPort }}

          resources:
            requests:
              cpu: {{ .Values.resources.requests.cpu }}
              memory: {{ .Values.resources.requests.memory }}
            limits:
              cpu: {{ .Values.resources.limits.cpu }}
              memory: {{ .Values.resources.limits.memory }}

          readinessProbe:
            httpGet:
              path: {{ .Values.probes.readiness.path }}
              port: {{ .Values.service.targetPort }}
            initialDelaySeconds: {{ .Values.probes.readiness.initialDelaySeconds }}
            periodSeconds: {{ .Values.probes.readiness.periodSeconds }}

          livenessProbe:
            httpGet:
              path: {{ .Values.probes.liveness.path }}
              port: {{ .Values.service.targetPort }}
            initialDelaySeconds: {{ .Values.probes.liveness.initialDelaySeconds }}
            periodSeconds: {{ .Values.probes.liveness.periodSeconds }}

/helm-repo/apps/nginx/templates/service.yaml

apiVersion: v1
kind: Service
metadata:
  name: {{ .Release.Name }}
  labels:
    app: {{ .Values.labels.app }}
spec:
  type: {{ .Values.service.type }}
  selector:
    app: {{ .Values.labels.app }}
  ports:
    - name: http
      port: {{ .Values.service.port }}
      targetPort: {{ .Values.service.targetPort }}
      protocol: TCP

4、ArgoCD配置

  • 通过注解使用 Argo CD Image Updater 自动更新镜像
  • 添加仓库连接,使argocd可以直接访问helm repo

安装Rollouts

安装argo-rollouts

kubectl create namespace argo-rollouts
kubectl apply -n argo-rollouts -f https://github.com/argoproj/argo-rollouts/releases/latest/download/install.yaml
kubectl get all -n argo-rollouts
  • 查看安装情况

image

安装Kubectl 插件

wget https://github.com/argoproj/argo-rollouts/releases/download/v1.8.3/kubectl-argo-rollouts-linux-amd64
mv kubectl-argo-rollouts-linux-amd64 /usr/local/bin/kubectl-argo-rollouts
chmod u+x /usr/local/bin/kubectl-argo-rollouts
kubectl argo rollouts version

image

安装dashboard插件

wget https://github.com/argoproj/argo-rollouts/releases/download/v1.8.3/dashboard-install.yaml
kubectl apply -f dashboard-install.yaml -n argo-rollouts

创建ingress

cat <<EOF> argocd-dashboard-ingress.yaml
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
  name: argo-rollouts
  namespace: argo-rollouts
spec:
  ingressClassName: nginx
  tls:
    - hosts:
        - argo-rollouts.clx.cn
      secretName: ingress-tls
  rules:
    - host: argo-rollouts.clx.cn
      http:
        paths:
          - path: /
            pathType: Prefix
            backend:
              service:
                name: argo-rollouts-dashboard
                port:
                  number: 3100

kubectl apply -f argocd-dashboard-ingress.yaml
  • 查看ingress
kubectl get ingress -A

image

配置hosts后访问验证

image

5、kustomize管理多套application

  • 用 Kustomize 管理 ArgoCD Application,实现三环境差异化配置复用
  • 目录结构
argocd-repo/
├── argocd-apps/
│   ├── base/
│   │   ├── application.yaml
│   │   └── kustomization.yaml
│   └── overlays/
│       ├── dev/
│       │   ├── kustomization.yaml
│       │   └── patch.yaml
│       ├── test/
│       │   ├── kustomization.yaml
│       │   └── patch.yaml
│       └── prod/
│           ├── kustomization.yaml
│           └── patch.yaml

image

base/application.yaml

apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
  name: nginx
  namespace: argocd
  annotations:
    argocd-image-updater.argoproj.io/image-list: nginx=core.harbor.domain/myproject/nginx
    argocd-image-updater.argoproj.io/nginx.update-strategy: latest
    argocd-image-updater.argoproj.io/write-back-method: git
spec:
  project: default
  source:
    repoURL: http://192.168.40.15/k8s/k8s-deploy.git
    targetRevision: v1
    path: nginx
  destination:
    server: https://kubernetes.default.svc
    namespace: default
  syncPolicy:
    automated:
      prune: true
      selfHeal: true

base/kustomization.yaml

apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization

resources:
  - application.yaml

overlays/prod/patch.yaml

apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
  name: nginx
  namespace: argocd
  annotations:
    argocd-image-updater.argoproj.io/nginx.allow-tags: regexp:^v.*
spec:
  destination:
    namespace: prod
  source:
    helm:
      valueFiles:
        - envs/prod/values-prod.yaml
      values: |
        replicaCount: 3

        image:
          tag: 5b258db4    # CI会把tag替换为构建过程中使用的tag

        rollout:
          strategy:
            canary:
              steps:
                - setWeight: 10
                - pause: { duration: 60s }
                - setWeight: 30
                - pause: { duration: 60s }
                - setWeight: 60
                - pause: { duration: 60s }

overlays/prod/kustomization.yaml

  • namePrefix字段区分多个环境下的application,避免资源覆盖
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization

resources:
  - ../../base

namePrefix: prod-    # 给新的application名字添加前缀

patches:
  - path: patch.yaml
    target:
      kind: Application
      name: nginx
      namespace: argocd

使用kustomize部署chart

  • 分别部署三套
kubectl apply -k overlays/dev
kubectl apply -k overlays/test
kubectl apply -k overlays/prod
  • 查看rollouts状态
kubectl argo rollouts get rollout prod-nginx -n prod

image

  • 查看argocd web ui

image

6、测试金丝雀效果

测试前查看

  • 访问service,显示 Hello Docker Build in GitLab CI 10

image

  • kubectl argo rollouts get rollout prod-nginx -n prod

image

  • 修改dockersfile后推送至gitlab。
  • argocd web ui 点击同步
  • 再次部署 kubectl apply -k overlays/prod/
  • 可以看到创建了一个新的pod

image

  • 旧版本的pod会逐步减少,直到完全被新版本替代

image

image

  • 再次访问service,显示 Hello Docker Build in GitLab CI 20

image

  • 查看 rollout dashboard

image

posted @ 2026-04-10 15:43  立勋  阅读(2)  评论(0)    收藏  举报