k8s-nginx实战部署1

yaml 资源清单

deploy.yaml

apiVersion: v1
kind: ConfigMap
metadata:
  name: api-proxy-config-offline-map
data:
  api_proxy.conf: |
    server {
      listen 80;
      server_name offline-map;
      client_max_body_size 10m;
      # 配置前端静态文件目录
      location / {
        index index.html index.htm;
        root /usr/share/nginx/html;
        try_files $uri $uri/ /index.html;
      }
    }
---
kind: Deployment
apiVersion: apps/v1
metadata:
  name: offline-map
  labels:
    k8s-app: offline-map
spec:
  replicas: 1
  selector:
    matchLabels:
      k8s-app: offline-map
  template:
    metadata:
      name: offline-map
      labels:
        k8s-app: offline-map
    spec:
      nodeName: k8s-master
      containers:
        - name: offline-map
          image: '{IMAGE}'
          imagePullPolicy: IfNotPresent
          volumeMounts:
          - name: nginx-config
            mountPath: /etc/nginx/conf.d
      volumes:
        - name: nginx-config
          configMap:
            name: api-proxy-config-offline-map
---
apiVersion: v1
kind: Service
metadata:
  labels:
    name: offline-map
  name: offline-map
spec:
  ports:
  - name: port80
    # 将Service的端口号映射到每个Node的一个端口号上,
    # 这样集群中的任意Node都可以作为Service的访问入口地址,即NodeIP:NodePort
    nodePort: 32001 # 向外暴露的端口
    port: 80 # svc端口
    protocol: TCP
    targetPort: 80 # pod端口
  selector:
    k8s-app: offline-map
  sessionAffinity: None
  type: NodePort

run_deploy.sh

#!/bin/bash
# @zcf: 执行脚本文件需要和对应的k8s编排模板放到同一目录下才能生效!!!
path=$(cd `dirname $0`;pwd)
cd ${path}

res=0
# 镜像名
image_name=$1
echo "cert_view_image="${image_name}
sed -i "s@{IMAGE}@$image_name@g" deploy.yaml
r=$?
res=`expr ${res} + ${r}`
echo "$res"

# 执行部署
if [ $res -ne 0 ]; then
    exit 1
fi

.gitlab-ci.yml

variables:
  SOFT_VERSION: ${SOFT_VERSION}
  DOCK_HUB: 192.168.2.103:80
  DOCK_HUB_USER: admin
  DOCK_HUB_PASSWD: '123456'
  HUB_GROUP: 512cert
  HUB_GROUP_PASSWD: 1234QWERasdf
  LAY_OUT_URL: ${DOCK_HUB}/512cert
  TEST_INFRA_NAMESPACE: ${TEST_INFRA_NAMESPACE}
  PROD_INFRA_NAMESPACE: ${PROD_INFRA_NAMESPACE}

stages:
  - build
  - test-deploy
  - prod-deploy

before_script:
  # npm install的依赖配置
  - git config --global url."https://".insteadOf git:// 
  - docker login -u ${DOCK_HUB_USER} -p${DOCK_HUB_PASSWD} http://${DOCK_HUB}
  # TAG版本号
  - export TM=`date +%y%m%d%H%M`
  - export TAG="${CI_COMMIT_REF_NAME}-${CI_COMMIT_SHA:0:8}-build${CI_PIPELINE_ID}-${SOFT_VERSION}" 

# 打包镜像
build:
  stage: build
  tags:
    - baler #这里写 runner 的标签名 ,按标签名选择打包的服务器
  rules:
    - if: $CI_PIPELINE_SOURCE == "schedule"
      when: on_success
    - if: $CI_PIPELINE_SOURCE != "schedule"
      when: manual
  artifacts:
    paths:
      - scripts/k8s/deploy.yaml
  script:
    - docker build -t ${LAY_OUT_URL}/offlinemap:${TAG} .
    - docker push ${LAY_OUT_URL}/offlinemap:${TAG}
    - scripts/k8s/run_deploy.sh ${LAY_OUT_URL}/offlinemap:${TAG}
    - docker rmi ${LAY_OUT_URL}/offlinemap:${TAG}

# 部署微服务
test-deploy:
  stage: test-deploy
  tags:
    - test-env #这里写k8s主节点的runner的标签名,下面的script命令就会在主节点执行
  rules:
    - if: $CI_PIPELINE_SOURCE == "schedule"
      when: on_success
    - if: $CI_PIPELINE_SOURCE != "schedule"
      when: manual
  script:
    - kubectl apply -f scripts/k8s/deploy.yaml -n ${TEST_INFRA_NAMESPACE}


.gitlab-ci.yml 中的 tags 要和 runner中的标签对应上

image

posted @ 2023-08-29 20:50  HaimaBlog  阅读(88)  评论(0编辑  收藏  举报