28-k8s的存储卷

一、常见的存储卷之emptyDir实战

1.emptyDir概述

​ emptyDir表示"空目录"(临时目录)存储卷,可以对容器的指定路径做数据持久化

​ 其特点就是随着Pod生命周期结束而结束。

应用场景

​ 1.实现同一个Pod内不同容器的数据共享;

​ 2.对数据做临时存储;

2.实战案例

2.1 对容器的指定目录做数据持久化

[root@master231 volumes]# cat 01-deploy-emptyDir.yaml 
apiVersion: apps/v1
kind: Deployment
metadata:
  name: deploy-xiuxian-emptydir
spec:
  replicas: 3
  selector:
    matchLabels:
      apps: xiuxian
  template:
    metadata:
      labels:
        apps: xiuxian
    spec:
      # 定义存储卷
      volumes:
        # 定义存储类型是一个"空目录"(emptyDir)
      - emptyDir: {}
        # 为存储卷起名字
        name: data
      containers:
      - name: c1
        image: registry.cn-hangzhou.aliyuncs.com/yinzhengjie-k8s/apps:v1
        # 定义存储卷挂载
        volumeMounts:
          # 需要要挂载的存储卷
        - name: data
          # 挂载到容器的指定路径,如果路径有文件,则会将文件的内容全部清空
          mountPath: /usr/share/nginx/html
          
          
[root@master231 volumes]# kubectl apply -f 01-deploy-emptyDir.yaml 
deployment.apps/deploy-xiuxian-emptydir created


2.2 查看emptyDir数据的存储路径

[root@master231 volumes]# kubectl get pods -o wide 
NAME                                       READY   STATUS        RESTARTS   AGE   IP               NODE        NOMINATED NODE   READINESS GATES
deploy-xiuxian-emptydir-745ffd54fc-cqd6d   1/1     Running       0          8s    10.100.140.74    worker233   <none>           <none>
deploy-xiuxian-emptydir-745ffd54fc-gzv92   1/1     Running       0          8s    10.100.203.162   worker232   <none>           <none>
deploy-xiuxian-emptydir-745ffd54fc-v9smc   1/1     Running       0          8s    10.100.140.75    worker233   <none>           <none>

#去233节点查看,写入数据
[root@worker233 ~]# echo 111111 >  /var/lib/kubelet/pods/9536bfe4-bf5f-4837-8cc3-853e0e4fb054/volumes/kubernetes.io~empty-dir/data/index.html
[root@worker233 ~]# ll /var/lib/kubelet/pods/9536bfe4-bf5f-4837-8cc3-853e0e4fb054/volumes/kubernetes.io~empty-dir/data/
total 12
drwxrwxrwx 2 root root 4096 Apr 15 15:30 ./
drwxr-xr-x 3 root root 4096 Apr 15 15:28 ../
-rw-r--r-- 1 root root    7 Apr 15 15:30 index.html

#211查看
[root@master231 volumes]# curl 10.100.140.74
111111

3.emptyDir实现同一个Pod内不同容器的数据共享

3.1编写资源清单

[root@master231 volumes]# cat 02-deploy-emptyDir.yaml 
apiVersion: apps/v1
kind: Deployment
metadata:
  name: deploy-xiuxian-emptydir-multiple
spec:
  replicas: 3
  selector:
    matchLabels:
      apps: xiuxian
  template:
    metadata:
      labels:
        apps: xiuxian
    spec:
      volumes:
      - emptyDir: {}
        name: data
      containers:
      - name: c1
        image: registry.cn-hangzhou.aliyuncs.com/yinzhengjie-k8s/apps:v1
        ports:
        - containerPort: 80
        volumeMounts:
        - name: data
          mountPath: /usr/share/nginx/html
      - name: c2
        image: registry.cn-hangzhou.aliyuncs.com/yinzhengjie-k8s/apps:v1
        command:
        - /bin/sh
        - -c
        - echo ${HOSTNAME} >> /dingzhiyan/index.html; tail -f /etc/hosts
        volumeMounts:
        - name: data
          mountPath: /dingzhiyan
[root@master231 volumes]# 
[root@master231 volumes]# kubectl apply -f 02-deploy-emptyDir.yaml 
deployment.apps/deploy-xiuxian-emptydir-multiple created

3.2验证测试

[root@master231 volumes]# kubectl get pods -o wide
NAME                                                READY   STATUS    RESTARTS   AGE   IP               NODE        NOMINATED NODE   READINESS GATES
deploy-xiuxian-emptydir-multiple-769f9d954f-mxcts   2/2     Running   0          35s   10.100.203.167   worker232   <none>           <none>
deploy-xiuxian-emptydir-multiple-769f9d954f-ngbpf   2/2     Running   0          35s   10.100.203.166   worker232   <none>           <none>
deploy-xiuxian-emptydir-multiple-769f9d954f-x6nn9   2/2     Running   0          35s   10.100.140.102   worker233   <none>           <none>
[root@master231 volumes]# 
[root@master231 volumes]# curl 10.100.203.167 
deploy-xiuxian-emptydir-multiple-769f9d954f-mxcts
[root@master231 volumes]# 
[root@master231 volumes]# 
[root@master231 volumes]# curl 10.100.203.166 
deploy-xiuxian-emptydir-multiple-769f9d954f-ngbpf
[root@master231 volumes]# 
[root@master231 volumes]# curl  10.100.140.102 
deploy-xiuxian-emptydir-multiple-769f9d954f-x6nn9

二、常见的存储卷之hostPath

1.hostPath

​ hostPath用于Pod内容器访问worker节点宿主机任意路径。

​ 应用场景:

  • 1.将某个worker节点的宿主机数据共享给Pod的容器指定路径;
  • 2.同步时区;

2.实战案例

[root@master231 volumes]# cat 03-deploy-hostPath.yaml 
apiVersion: apps/v1
kind: Deployment
metadata:
  name: deploy-xiuxian-hostpath
spec:
  replicas: 3
  selector:
    matchLabels:
      apps: xiuxian
  template:
    metadata:
      labels:
        apps: xiuxian
    spec:
      volumes:
      - emptyDir: {}
        name: data01
        # 声明存储卷的名称是hostPath
      - hostPath:
          # 将worker节点宿主机路径暴露给容器,如果目录不存在会自动创建。
          path: /linux96
        name: data02
      - name: data03
        hostPath:
          path: /etc/localtime
      containers:
      - name: c1
        image: registry.cn-hangzhou.aliyuncs.com/yinzhengjie-k8s/apps:v1
        ports:
        - containerPort: 80
        volumeMounts:
        - name: data01
          mountPath: /usr/share/nginx/html
        - name: data02
          mountPath: /dingzhiyan
        - name: data03
          mountPath: /etc/localtime

3.验证测试

[root@master231 volumes]# kubectl apply -f 03-deploy-hostPath.yaml 
deployment.apps/deploy-xiuxian-hostpath created
[root@master231 volumes]# 
[root@master231 volumes]# kubectl get pods -o wide 
NAME                                       READY   STATUS    RESTARTS   AGE   IP               NODE        NOMINATED NODE   READINESS GATES
deploy-xiuxian-hostpath-7d7c8c588d-4bqmj   1/1     Running   0          4s    10.100.140.76    worker233   <none>           <none>
deploy-xiuxian-hostpath-7d7c8c588d-8gs56   1/1     Running   0          4s    10.100.140.80    worker233   <none>           <none>
deploy-xiuxian-hostpath-7d7c8c588d-98blc   1/1     Running   0          4s    10.100.203.165   worker232   <none>           <none>



####
[root@worker232 ~]# echo 1111 > /linux96/xixi.log
[root@master231 volumes]# kubectl exec -it deploy-xiuxian-hostpath-7d7c8c588d-98blc -- ls /dingzhiyan
xixi.log

####
[root@worker233 ~]# echo hahaha > /linux96/hahahahaha.log
[root@master231 volumes]# kubectl exec -it deploy-xiuxian-hostpath-7d7c8c588d-8gs56 -- ls /dingzhiyan
hahahahaha.log
[root@master231 volumes]# kubectl exec -it deploy-xiuxian-hostpath-7d7c8c588d-4bqmj  -- ls /dingzhiyan
hahahahaha.log

三、常见的存储卷之nfs

1.nfs

​ nfs表示网络文件系统,存在客户端和服务端,需要单独部署服务端。

​ K8S在使用nfs时,集群应该安装nfs的相关模块。

​ nfs的应用场景:

  • 1.实现跨节点不同Pod的数据共享;
  • 2.实现跨节点存储数据;

2.Ubuntu快速部署nfs服务器

2.1 K8S所有节点安装nfs程序

apt -y install nfs-kernel-server

2.2 服务端配置共享目录

[root@master231 volumes]# mkdir -pv /dingzhiyan/data/nfs-server

2.3 配置共享目录

[root@master231 volumes]# tail -1 /etc/exports 
/dingzhiyan/data/nfs-server         *(rw,no_root_squash)
[root@master231 volumes]# 

2.4 重启nfs服务使得配置生效

[root@master231 volumes]# systemctl restart nfs-server

2.5 验证配置是否生效

[root@master231 volumes]# exportfs 
/dingzhiyan/data/nfs-server
		<world>

2.6 客户端测试验证

2.6.1 尝试挂载并写入数据

[root@worker233 ~]# mount -t nfs 10.0.0.231:/dingzhiyan/data/nfs-server /mnt
[root@worker233 ~]# 
[root@worker233 ~]# df -h | grep mnt
10.0.0.231:/dingzhiyan/data/nfs-server   48G   16G   31G  34% /mnt
[root@worker233 ~]# 
[root@worker233 ~]# cp /etc/os-release /mnt/
[root@worker233 ~]# 
[root@worker233 ~]# ll /mnt/
total 12
drwxr-xr-x  2 root root 4096 Apr 15 16:37 ./
drwxr-xr-x 22 root root 4096 Apr 15 15:40 ../
-rw-r--r--  1 root root  427 Apr 15 16:37 os-release
[root@worker233 ~]# 
[root@worker233 ~]# umount /mnt/
[root@worker233 ~]# 
[root@worker233 ~]# df -h | grep mnt

2.6.2 尝试挂载并读取数据

[root@worker232 ~]# mount -t nfs 10.0.0.231:/dingzhiyan/data/nfs-server /mnt
[root@worker232 ~]# 
[root@worker232 ~]# ll /mnt
total 12
drwxr-xr-x  2 root root 4096 Apr 15 16:37 ./
drwxr-xr-x 22 root root 4096 Apr 15 15:40 ../
-rw-r--r--  1 root root  427 Apr 15 16:37 os-release
[root@worker232 ~]# 
[root@worker232 ~]# df -h | grep mnt
10.0.0.231:/dingzhiyan/data/nfs-server   48G   16G   31G  34% /mnt
[root@worker232 ~]# 
[root@worker232 ~]# umount /mnt 
[root@worker232 ~]# 
[root@worker232 ~]# df -h | grep mnt

3.使用nfs存储卷实战案例

3.1 编写资源清单

[root@master231 volumes]# cat 04-deploy-nfs.yaml 
apiVersion: apps/v1
kind: Deployment
metadata:
  name: deploy-xiuxian-nfs
spec:
  replicas: 3
  selector:
    matchLabels:
      apps: xiuxian
  template:
    metadata:
      labels:
        apps: xiuxian
    spec:
      volumes:
      - emptyDir: {}
        name: data01
      - name: data02
        hostPath:
          path: /etc/localtime
      - name: data03
        # 表示存储卷的类型是nfs
        nfs:
          # 指定nfs的服务器
          server: 10.0.0.231
          # 指定nfs共享的数据路径
          path: /dingzhiyan/data/nfs-server
      containers:
      - name: c1
        image: registry.cn-hangzhou.aliyuncs.com/yinzhengjie-k8s/apps:v1
        ports:
        - containerPort: 80
        volumeMounts:
        - name: data03
          mountPath: /usr/share/nginx/html
        - name: data02
          mountPath: /etc/localtime
        - name: data01
          mountPath: /dingzhiyan-linux96
[root@master231 volumes]# 

3.2 测试验证

[root@master231 volumes]# kubectl apply -f  04-deploy-nfs.yaml 
deployment.apps/deploy-xiuxian-nfs created
[root@master231 volumes]# 
[root@master231 volumes]# kubectl get pods -o wide
NAME                                  READY   STATUS    RESTARTS   AGE   IP               NODE        NOMINATED NODE   READINESS GATES
deploy-xiuxian-nfs-785494468d-2ljcg   1/1     Running   0          3s    10.100.203.169   worker232   <none>           <none>
deploy-xiuxian-nfs-785494468d-nndpb   1/1     Running   0          3s    10.100.140.106   worker233   <none>           <none>
deploy-xiuxian-nfs-785494468d-tr2wx   1/1     Running   0          3s    10.100.140.105   worker233   <none>           <none>
[root@master231 volumes]# 
[root@master231 volumes]# kubectl exec -it deploy-xiuxian-nfs-785494468d-2ljcg -- sh
/ # echo www.dingzhiyan.com > /usr/share/nginx/html/index.html
/ # 
/ # 
[root@master231 volumes]# 
[root@master231 volumes]# curl 10.100.203.169
www.dingzhiyan.com
[root@master231 volumes]# 
[root@master231 volumes]# curl 10.100.140.106
www.dingzhiyan.com
[root@master231 volumes]# 
[root@master231 volumes]# curl 10.100.140.105
www.dingzhiyan.com

四、部署WordPress到K8S集群(hostPath+nfs)

1.要求

- 1.数据库一个副本;
- 2.wordpress为3个副本;
- 3.windows可以正常访问WordPress服务,正常发表博客;
- 4.删除所有Pod,实现秒级数据恢复;

2.工作流程

- 1.需求分析:
	- 数据库使用deploy控制器 
	- wordpress使用deploy控制器 
	- wordpress基于CLusterIP svc访问数据库
	- windows基于LoadBalancer svc访问WordPress 
	- 数据持久化:
		- 数据库借助hostPath实现数据存储
		- WordPress借助NFS实现数据存储共享
- 2.画架构图:
	略,见视频。
	
- 3.实操落地:
	见笔记。

3.资源清单编写

3.1 nfs server创建数据共享目录

[root@master231 case-demo]# mkdir -pv /dingzhiyan/data/nfs-server/case-demo/wp
mkdir: created directory '/dingzhiyan/data/nfs-server/case-demo'
mkdir: created directory '/dingzhiyan/data/nfs-server/case-demo/wp'
[root@master231 case-demo]# 
[root@master231 case-demo]# ll /dingzhiyan/data/nfs-server/case-demo/wp/
total 8
drwxr-xr-x 2 root root 4096 Apr 15 17:48 ./
drwxr-xr-x 3 root root 4096 Apr 15 17:48 ../

3.2 编写资源清单

[root@master231 case-demo]# ll
total 24
drwxr-xr-x 2 root root 4096 Apr 15 17:55 ./
drwxr-xr-x 3 root root 4096 Apr 15 17:42 ../
-rw-r--r-- 1 root root  944 Apr 15 17:45 01-deploy-db.yaml
-rw-r--r-- 1 root root  109 Apr 15 17:46 02-svc-db.yaml
-rw-r--r-- 1 root root 1122 Apr 15 17:52 03-deploy-wp.yaml
-rw-r--r-- 1 root root  131 Apr 15 17:55 04-svc-wp.yaml
[root@master231 case-demo]# 
[root@master231 case-demo]# 
[root@master231 case-demo]# cat 01-deploy-db.yaml 
apiVersion: apps/v1
kind: Deployment
metadata:
  name: deploy-db
spec:
  replicas: 1
  selector:
    matchLabels:
      apps: db
  template:
    metadata:
      labels:
        apps: db
    spec:
      volumes:
      - name: data
        hostPath:
          path: /dingzhiyan/wordpress/db
      nodeSelector:
        kubernetes.io/hostname: worker232      
      containers:
      - image: harbor250.dingzhiyan.com/dingzhiyan-wp/mysql:8.0.36-oracle
        name: db
        volumeMounts:
        - name: data
          mountPath: /var/lib/mysql
        env:
        - name: MYSQL_DATABASE
          value: wordpress
        - name: MYSQL_ALLOW_EMPTY_PASSWORD
          value: "yes"
        - name: MYSQL_USER
          value: admin
        - name: MYSQL_PASSWORD
          value: yinzhengjie
        args:
        - --character-set-server=utf8 
        - --collation-server=utf8_bin
        - --default-authentication-plugin=mysql_native_password
[root@master231 case-demo]# 
[root@master231 case-demo]# 
[root@master231 case-demo]# cat 02-svc-db.yaml 
apiVersion: v1
kind: Service
metadata:
  name: svc-db
spec:
  ports:
  - port: 3306
  selector:
    apps: db
[root@master231 case-demo]# 
[root@master231 case-demo]# 
[root@master231 case-demo]# cat 03-deploy-wp.yaml 
apiVersion: apps/v1
kind: Deployment
metadata:
  name: deploy-wp
spec:
  replicas: 3
  selector:
    matchLabels:
      apps: wp
  template:
    metadata:
      labels:
        apps: wp
    spec:
      tolerations:
      - key: node-role.kubernetes.io/master
        operator: Exists
        effect: NoSchedule
      affinity:
        podAntiAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
          - topologyKey: kubernetes.io/hostname
            labelSelector:
              matchLabels:
                apps: wp
      volumes:
      - name: data
        nfs:
          server: 10.0.0.231
          path: /dingzhiyan/data/nfs-server/case-demo/wp/
      containers:
      - image: harbor250.dingzhiyan.com/dingzhiyan-wp/wordpress:6.7.1-php8.1-apache 
        volumeMounts:
        - name: data
          mountPath: /var/www/html
        name: wp
        env:
        - name: WORDPRESS_DB_HOST
          value: svc-db
        - name: WORDPRESS_DB_NAME
          value: wordpress
        - name: WORDPRESS_DB_USER
          value: admin
        - name: WORDPRESS_DB_PASSWORD
          value: yinzhengjie
[root@master231 case-demo]# 
[root@master231 case-demo]# 
[root@master231 case-demo]# cat 04-svc-wp.yaml 
apiVersion: v1
kind: Service
metadata:
  name: svc-wp-lb
spec:
  type: LoadBalancer
  ports:
  - port: 80
  selector:
    apps: wp
[root@master231 case-demo]# 

3.3 创建所有资源

[root@master231 case-demo]# ll
total 24
drwxr-xr-x 2 root root 4096 Apr 15 17:55 ./
drwxr-xr-x 3 root root 4096 Apr 15 17:42 ../
-rw-r--r-- 1 root root  944 Apr 15 17:45 01-deploy-db.yaml
-rw-r--r-- 1 root root  109 Apr 15 17:46 02-svc-db.yaml
-rw-r--r-- 1 root root 1122 Apr 15 17:52 03-deploy-wp.yaml
-rw-r--r-- 1 root root  131 Apr 15 17:55 04-svc-wp.yaml
[root@master231 case-demo]# 
[root@master231 case-demo]# kubectl apply -f .
deployment.apps/deploy-db created
service/svc-db created
deployment.apps/deploy-wp created
service/svc-wp-lb created
[root@master231 case-demo]# 
[root@master231 case-demo]# 

4.初始化并发表博客测试

[root@master231 case-demo]# kubectl get -f .
NAME                        READY   UP-TO-DATE   AVAILABLE   AGE
deployment.apps/deploy-db   1/1     1            1           2m24s

NAME             TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)    AGE
service/svc-db   ClusterIP   10.200.4.1   <none>        3306/TCP   2m24s

NAME                        READY   UP-TO-DATE   AVAILABLE   AGE
deployment.apps/deploy-wp   3/3     3            3           2m24s

NAME                TYPE           CLUSTER-IP       EXTERNAL-IP   PORT(S)       AGE
service/svc-wp-lb   LoadBalancer   10.200.211.103   10.0.0.152    80:6244/TCP   2m24s
[root@master231 case-demo]# 

5.测试验证数据是否丢失

[root@master231 case-demo]# kubectl get pods -o wide
NAME                         READY   STATUS    RESTARTS   AGE   IP               NODE        NOMINATED NODE   READINESS GATES
deploy-db-67f7c87c78-hssw7   1/1     Running   0          6s    10.100.203.174   worker232   <none>           <none>
deploy-wp-764599b68-kzxcb    1/1     Running   0          6s    10.100.160.177   master231   <none>           <none>
deploy-wp-764599b68-qm2l9    1/1     Running   0          6s    10.100.140.110   worker233   <none>           <none>
deploy-wp-764599b68-v8wqh    1/1     Running   0          6s    10.100.203.176   worker232   <none>           <none>
[root@master231 case-demo]# 
[root@master231 case-demo]# 
[root@master231 case-demo]# kubectl delete pods --all
pod "deploy-db-67f7c87c78-hssw7" deleted
pod "deploy-wp-764599b68-kzxcb" deleted
pod "deploy-wp-764599b68-qm2l9" deleted
pod "deploy-wp-764599b68-v8wqh" deleted
[root@master231 case-demo]# 
[root@master231 case-demo]# 
[root@master231 case-demo]# kubectl get pods -o wide
NAME                         READY   STATUS    RESTARTS   AGE   IP               NODE        NOMINATED NODE   READINESS GATES
deploy-db-67f7c87c78-l8r68   1/1     Running   0          8s    10.100.203.173   worker232   <none>           <none>
deploy-wp-764599b68-2x4xf    1/1     Running   0          8s    10.100.160.176   master231   <none>           <none>
deploy-wp-764599b68-7n65x    1/1     Running   0          8s    10.100.140.108   worker233   <none>           <none>
deploy-wp-764599b68-fvbp4    1/1     Running   0          8s    10.100.203.175   worker232   <none>           <none>
[root@master231 case-demo]# 

6.删除所有资源

[root@master231 case-demo]# ll
total 24
drwxr-xr-x 2 root root 4096 Apr 15 17:55 ./
drwxr-xr-x 3 root root 4096 Apr 15 17:42 ../
-rw-r--r-- 1 root root  944 Apr 15 17:45 01-deploy-db.yaml
-rw-r--r-- 1 root root  109 Apr 15 17:46 02-svc-db.yaml
-rw-r--r-- 1 root root 1122 Apr 15 17:52 03-deploy-wp.yaml
-rw-r--r-- 1 root root  131 Apr 15 17:55 04-svc-wp.yaml
[root@master231 case-demo]# kubectl delete -f .
deployment.apps "deploy-db" deleted
service "svc-db" deleted
deployment.apps "deploy-wp" deleted
service "svc-wp-lb" deleted

五、常用存储卷之cephFS

1.k8s集群所有节点安装ceph相关的驱动

[root@master231 ~]# apt -y install ceph

[root@worker232 ~]# apt -y install ceph

[root@worker233 ~]# apt -y install ceph

2.获取ceph集群admin用户的认证信息

[root@ceph141 ~]# ceph auth get-key client.admin | base64 
QVFBa1JlcG5sOFFIRGhBQWFqSy9hTUgxS2FDb1ZKV3Q1SDJOT1E9PQ==
[root@ceph141 ~]# 

3.拷贝认证文件

[root@ceph141 ~]# scp /etc/ceph/ceph.client.admin.keyring 10.0.0.231:/etc/ceph

[root@ceph141 ~]# scp /etc/ceph/ceph.client.admin.keyring 10.0.0.232:/etc/ceph

[root@ceph141 ~]# scp /etc/ceph/ceph.client.admin.keyring 10.0.0.233:/etc/ceph

4.编写资源清单

[root@master231 volumes]# cat 05-deploy-cephFS.yaml 
apiVersion: v1
kind: Secret
metadata:
  name: ceph-admin-secret
stringData:
  # 注意,此处的认证信息是ceph admin账号经过base64编码后的数据,即'ceph auth get-key client.admin | base64 '
  key: QVFBa1JlcG5sOFFIRGhBQWFqSy9hTUgxS2FDb1ZKV3Q1SDJOT1E9PQ==


---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: deploy-xiuxian-cephfs
spec:
  replicas: 3
  selector:
    matchLabels:
      apps: xiuxian
  template:
    metadata:
      labels:
        apps: xiuxian
    spec:
      volumes:
      - name: data01
        hostPath:
          path: /etc/localtime
      - name: data02
        # 指定存储类型是cephFS
        cephfs:
          # 指定ceph的mon组件
          monitors:
          - 10.0.0.141:6789
          - 10.0.0.142:6789
          - 10.0.0.143:6789
          # 指定cephFS的路径
          path: /
          # 指定用户
          user: admin
          # 指定kerying文件的路径,此参数忽略,貌似并没有卵用!!!
          # secretFile: /etc/ceph/ceph.client.admin.keyring
          # 认证信息引用一个secret资源,注意名称不要写错啦!
          # 注意,配置secretRef参数后,将无视secretFile参数的存在,说白了就是覆盖了secretFile的配置
          secretRef:
            # 指定secret名称
            name: ceph-admin-secret
      containers:
      - name: c1
        image: registry.cn-hangzhou.aliyuncs.com/yinzhengjie-k8s/apps:v1
        ports:
        - containerPort: 80
        volumeMounts:
        - name: data02
          mountPath: /usr/share/nginx/html
        - name: data01
          mountPath: /etc/localtime
[root@master231 volumes]# 
[root@master231 volumes]# kubectl apply -f 05-deploy-cephFS.yaml 
secret/ceph-admin-secret created
deployment.apps/deploy-xiuxian-cephfs created

5.测试验证

[root@master231 volumes]# kubectl get pods -o wide
NAME                                     READY   STATUS        RESTARTS   AGE     IP               NODE        NOMINATED NODE   READINESS GATES
deploy-xiuxian-cephfs-54dbd94849-htxd7   1/1     Running       0          4s      10.100.203.181   worker232   <none>           <none>
deploy-xiuxian-cephfs-54dbd94849-vrfhh   1/1     Running       0          4s      10.100.203.179   worker232   <none>           <none>
deploy-xiuxian-cephfs-54dbd94849-w4kvn   1/1     Running       0          4s      10.100.140.113   worker233   <none>           <none>
[root@master231 volumes]# 
[root@master231 volumes]# 
[root@master231 volumes]# 
[root@master231 volumes]# 
[root@master231 volumes]# kubectl exec -it deploy-xiuxian-cephfs-54dbd94849-htxd7 -- sh
/ # ls /usr/share/nginx/html/
/ # 
/ # echo www.dingzhiyan.com > /usr/share/nginx/html/index.html
/ # 
[root@master231 volumes]# 
[root@master231 volumes]# kubectl get pods -o wide
NAME                                     READY   STATUS        RESTARTS   AGE     IP               NODE        NOMINATED NODE   READINESS GATES
deploy-xiuxian-cephfs-54dbd94849-htxd7   1/1     Running       0          36s     10.100.203.181   worker232   <none>           <none>
deploy-xiuxian-cephfs-54dbd94849-vrfhh   1/1     Running       0          36s     10.100.203.179   worker232   <none>           <none>
deploy-xiuxian-cephfs-54dbd94849-w4kvn   1/1     Running       0          36s     10.100.140.113   worker233   <none>           <none>
[root@master231 volumes]# 
[root@master231 volumes]# curl 10.100.203.181 
www.dingzhiyan.com
[root@master231 volumes]# 
[root@master231 volumes]# curl 10.100.203.179 
www.dingzhiyan.com
[root@master231 volumes]# 
[root@master231 volumes]# curl  10.100.140.113 
www.dingzhiyan.com

6.手动挂载cephFS测试验证(可跳过)

[root@master231 volumes]# mkdir /data
[root@master231 volumes]# 
[root@master231 volumes]# mount -t ceph 10.0.0.143:6789:/ /data -o name=admin,secret=QVFBa1JlcG5sOFFIRGhBQWFqSy9hTUgxS2FDb1ZKV3Q1SDJOT1E9PQ==
did not load config file, using default settings.
2025-04-16T11:08:14.377+0800 7f06f87a2f40 -1 Errors while parsing config file!

2025-04-16T11:08:14.377+0800 7f06f87a2f40 -1 can't open ceph.conf: (2) No such file or directory

2025-04-16T11:08:14.377+0800 7f06f87a2f40 -1 Errors while parsing config file!

2025-04-16T11:08:14.377+0800 7f06f87a2f40 -1 can't open ceph.conf: (2) No such file or directory

unable to get monitor info from DNS SRV with service name: ceph-mon
2025-04-16T11:08:14.401+0800 7f06f87a2f40 -1 failed for service _ceph-mon._tcp

[root@master231 volumes]# 
[root@master231 volumes]# df -h | grep data
10.0.0.143:6789:/                  1.7T     0  1.7T   0% /data
[root@master231 volumes]# 
[root@master231 volumes]# ll /data/
total 5
drwxr-xr-x  2 root root    1 Apr 16 11:04 ./
drwxr-xr-x 23 root root 4096 Apr 16 11:08 ../
-rw-r--r--  1 root root   18 Apr 16 11:04 index.html
[root@master231 volumes]# 
[root@master231 volumes]# cat /data/index.html 
www.dingzhiyan.com

六、常用的存储卷之rbd

1.添加测试用户

[root@ceph141 ~]# ceph auth add client.linux96 mon 'allow rwx' osd 'allow rwx pool=k8s'
added key for client.linux96
[root@ceph141 ~]# 
[root@ceph141 ~]# ceph auth get client.linux96
[client.linux96]
	key = AQAoJP9n5SsaDBAA6Hy7f9awCJzVycYMJ3IUkA==
	caps mon = "allow rwx"
	caps osd = "allow rwx pool=k8s"

2.创建存储池

[root@ceph141 ~]# ceph osd pool create k8s
pool 'k8s' created
[root@ceph141 ~]# 
[root@ceph141 ~]# rbd pool init k8s

3.创建块设备

[root@ceph141 ~]# rbd create -s 10G k8s/db
[root@ceph141 ~]# 
[root@ceph141 ~]# rbd ls -l k8s
NAME  SIZE    PARENT  FMT  PROT  LOCK
db    10 GiB            2            
[root@ceph141 ~]# 
[root@ceph141 ~]# 
[root@ceph141 ~]# rbd info k8s/db
rbd image 'db':
	size 10 GiB in 2560 objects
	order 22 (4 MiB objects)
	snapshot_count: 0
	id: d4caf25cf308
	block_name_prefix: rbd_data.d4caf25cf308
	format: 2
	features: layering, exclusive-lock, object-map, fast-diff, deep-flatten
	op_features: 
	flags: 
	create_timestamp: Wed Apr 16 11:32:06 2025
	access_timestamp: Wed Apr 16 11:32:06 2025
	modify_timestamp: Wed Apr 16 11:32:06 2025
[root@ceph141 ~]# 

4.对key进行编码

[root@ceph141 ~]# ceph auth get-key client.linux96 | base64 
QVFBb0pQOW41U3NhREJBQTZIeTdmOWF3Q0p6VnljWU1KM0lVa0E9PQ==

5.编写资源清单

[root@master231 volumes]# cat 06-deploy-rbd.yaml 
apiVersion: v1
kind: Secret
metadata:
  name: ceph-linu96-secret
type: "kubernetes.io/rbd"
data:
  # 注意,此处要使用编码后的key信息
  key: QVFBb0pQOW41U3NhREJBQTZIeTdmOWF3Q0p6VnljWU1KM0lVa0E9PQ==

---

apiVersion: apps/v1
kind: Deployment
metadata:
  name: deploy-db
spec:
  replicas: 1
  selector:
    matchLabels:
      apps: db
  template:
    metadata:
      labels:
        apps: db
    spec:
      volumes:
      - name: data
        # 声明存储的类型为RBD
        rbd:
          # 指定mon组件
          monitors:
          - 10.0.0.141:6789
          - 10.0.0.142:6789
          - 10.0.0.143:6789
          # 指定存储池
          pool: k8s
          # 指定块设备名称
          image: db
          # 指定文件系统的格式,有效值为: "ext4"(default), "xfs", "ntfs"     
          fsType: xfs
          # 指定认证的用户,若不指定,默认为: admin
          user: linux96
          # 引用secret信息
          secretRef:
            # 指定secret的名称
            name: ceph-linu96-secret
      containers:
      - image: harbor250.dingzhiyan.com/dingzhiyan-wp/mysql:8.0.36-oracle
        name: db
        volumeMounts:
        - name: data
          mountPath: /var/lib/mysql
        env:
        - name: MYSQL_DATABASE
          value: wordpress
        - name: MYSQL_ALLOW_EMPTY_PASSWORD
          value: "yes"
        - name: MYSQL_USER
          value: admin
        - name: MYSQL_PASSWORD
          value: yinzhengjie
        args:
        - --character-set-server=utf8 
        - --collation-server=utf8_bin
        - --default-authentication-plugin=mysql_native_password
[root@master231 volumes]# 

6.写入测试数据

[root@master231 volumes]# kubectl get pods -o wide
NAME                         READY   STATUS    RESTARTS   AGE   IP               NODE        NOMINATED NODE   READINESS GATES
deploy-db-777fdf9987-7gpcf   1/1     Running   0          16s   10.100.203.180   worker232   <none>           <none>
[root@master231 volumes]# 
[root@master231 volumes]# kubectl exec -it deploy-db-777fdf9987-7gpcf -- mysql
Welcome to the MySQL monitor.  Commands end with ; or \g.
Your MySQL connection id is 8
Server version: 8.0.36 MySQL Community Server - GPL

Copyright (c) 2000, 2024, Oracle and/or its affiliates.

Oracle is a registered trademark of Oracle Corporation and/or its
affiliates. Other names may be trademarks of their respective
owners.

Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.

mysql> SHOW DATABASES;
+--------------------+
| Database           |
+--------------------+
| information_schema |
| mysql              |
| performance_schema |
| sys                |
| wordpress          |
+--------------------+
5 rows in set (0.00 sec)

mysql> 
mysql> CREATE DATABASE dingzhiyan;
Query OK, 1 row affected (0.15 sec)

mysql> 
mysql> use dingzhiyan
Database changed
mysql> 
mysql> CREATE TABLE student(id INT AUTO_INCREMENT PRIMARY KEY,name VARCHAR(255) NOT NULL, hobby VARCHAR(255) NOT NULL);
Query OK, 0 rows affected (0.08 sec)

mysql> 
mysql> DESC student;
+-------+--------------+------+-----+---------+----------------+
| Field | Type         | Null | Key | Default | Extra          |
+-------+--------------+------+-----+---------+----------------+
| id    | int          | NO   | PRI | NULL    | auto_increment |
| name  | varchar(255) | NO   |     | NULL    |                |
| hobby | varchar(255) | NO   |     | NULL    |                |
+-------+--------------+------+-----+---------+----------------+
3 rows in set (0.00 sec)

mysql> 
mysql> INSERT INTO student(name,hobby) VALUE ('LiuTongTong','Sleep'),('TianMingLei','KouBiShi');
Query OK, 2 rows affected (0.03 sec)
Records: 2  Duplicates: 0  Warnings: 0

mysql> 
mysql> SELECT * FROM student;
+----+-------------+----------+
| id | name        | hobby    |
+----+-------------+----------+
|  1 | LiuTongTong | Sleep    |
|  2 | TianMingLei | KouBiShi |
+----+-------------+----------+
2 rows in set (0.00 sec)

mysql> 

7.重新删除Pod,观察数据是否丢失

[root@master231 volumes]# kubectl get pods -o wide
NAME                         READY   STATUS    RESTARTS   AGE    IP               NODE        NOMINATED NODE   READINESS GATES
deploy-db-777fdf9987-7gpcf   1/1     Running   0          4m4s   10.100.203.180   worker232   <none>           <none>
[root@master231 volumes]# 
[root@master231 volumes]# kubectl delete pod deploy-db-777fdf9987-7gpcf 
pod "deploy-db-777fdf9987-7gpcf" deleted
[root@master231 volumes]# 
[root@master231 volumes]# kubectl get pods -o wide  # 如果没有调度到worker233节点,可以修改资源清单,指定调度到worker233节点
NAME                         READY   STATUS              RESTARTS   AGE   IP       NODE        NOMINATED NODE   READINESS GATES
deploy-db-777fdf9987-lq2vf   0/1     ContainerCreating   0          2s    <none>   worker233   <none>           <none>
[root@master231 volumes]# 
[root@master231 volumes]# kubectl get pods -o wide
NAME                         READY   STATUS    RESTARTS   AGE   IP               NODE        NOMINATED NODE   READINESS GATES
deploy-db-777fdf9987-lq2vf   1/1     Running   0          28s   10.100.140.114   worker233   <none>           <none>
[root@master231 volumes]# 
[root@master231 volumes]# 
[root@master231 volumes]# kubectl exec -it deploy-db-777fdf9987-lq2vf -- mysql dingzhiyan
Reading table information for completion of table and column names
You can turn off this feature to get a quicker startup with -A

Welcome to the MySQL monitor.  Commands end with ; or \g.
Your MySQL connection id is 8
Server version: 8.0.36 MySQL Community Server - GPL

Copyright (c) 2000, 2024, Oracle and/or its affiliates.

Oracle is a registered trademark of Oracle Corporation and/or its
affiliates. Other names may be trademarks of their respective
owners.

Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.

mysql> SHOW TABLES;
+---------------------+
| Tables_in_dingzhiyan |
+---------------------+
| student             |
+---------------------+
1 row in set (0.01 sec)

mysql> 
mysql> 
mysql> DESC student;
+-------+--------------+------+-----+---------+----------------+
| Field | Type         | Null | Key | Default | Extra          |
+-------+--------------+------+-----+---------+----------------+
| id    | int          | NO   | PRI | NULL    | auto_increment |
| name  | varchar(255) | NO   |     | NULL    |                |
| hobby | varchar(255) | NO   |     | NULL    |                |
+-------+--------------+------+-----+---------+----------------+
3 rows in set (0.01 sec)

mysql> 
mysql> SELECT * FROM student;
+----+-------------+----------+
| id | name        | hobby    |
+----+-------------+----------+
|  1 | LiuTongTong | Sleep    |
|  2 | TianMingLei | KouBiShi |
+----+-------------+----------+
2 rows in set (0.00 sec)

mysql> 

七、部署WordPress到K8S集群(rbd+cephfs)

1.创建cephFS的存储目录

[root@master231 ceph]# df -h | grep data
10.0.0.143:6789:/                  1.7T     0  1.7T   0% /data
[root@master231 ceph]# 
[root@master231 ceph]# mkdir /data/wordpress
[root@master231 ceph]# 
[root@master231 ceph]# ll /data/wordpress/
total 0
drwxr-xr-x 2 root root 0 Apr 16 14:39 ./
drwxr-xr-x 3 root root 2 Apr 16 14:39 ../

2.编写资源清单

[root@master231 case-demo-cephfs]# cat 01-deploy-db.yaml 
apiVersion: v1
kind: Secret
metadata:
  name: ceph-linu96-secret
type: "kubernetes.io/rbd"
data:
  key: QVFBb0pQOW41U3NhREJBQTZIeTdmOWF3Q0p6VnljWU1KM0lVa0E9PQ==

---

apiVersion: apps/v1
kind: Deployment
metadata:
  name: deploy-db
spec:
  replicas: 1
  selector:
    matchLabels:
      apps: db
  template:
    metadata:
      labels:
        apps: db
    spec:
      volumes:
      - name: data
        rbd:
          monitors:
          - 10.0.0.141:6789
          - 10.0.0.142:6789
          - 10.0.0.143:6789
          pool: k8s
          image: db
          fsType: xfs
          user: linux96
          secretRef:
            name: ceph-linu96-secret
      containers:
      - image: harbor250.dingzhiyan.com/dingzhiyan-wp/mysql:8.0.36-oracle
        name: db
        volumeMounts:
        - name: data
          mountPath: /var/lib/mysql
        env:
        - name: MYSQL_DATABASE
          value: wordpress
        - name: MYSQL_ALLOW_EMPTY_PASSWORD
          value: "yes"
        - name: MYSQL_USER
          value: admin
        - name: MYSQL_PASSWORD
          value: yinzhengjie
        args:
        - --character-set-server=utf8 
        - --collation-server=utf8_bin
        - --default-authentication-plugin=mysql_native_password
[root@master231 case-demo-cephfs]# 
[root@master231 case-demo-cephfs]# 
[root@master231 case-demo-cephfs]# cat 02-svc-db.yaml 
apiVersion: v1
kind: Service
metadata:
  name: svc-db
spec:
  ports:
  - port: 3306
  selector:
    apps: db
[root@master231 case-demo-cephfs]# 
[root@master231 case-demo-cephfs]# 
[root@master231 case-demo-cephfs]# cat 03-deploy-wp.yaml 
apiVersion: v1
kind: Secret
metadata:
  name: ceph-admin-secret
stringData:
  key: QVFBa1JlcG5sOFFIRGhBQWFqSy9hTUgxS2FDb1ZKV3Q1SDJOT1E9PQ==

---

apiVersion: apps/v1
kind: Deployment
metadata:
  name: deploy-wp
spec:
  replicas: 3
  selector:
    matchLabels:
      apps: wp
  template:
    metadata:
      labels:
        apps: wp
    spec:
      tolerations:
      - key: node-role.kubernetes.io/master
        operator: Exists
        effect: NoSchedule
      affinity:
        podAntiAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
          - topologyKey: kubernetes.io/hostname
            labelSelector:
              matchLabels:
                apps: wp
      volumes:
      - name: data
      - name: data02
        cephfs:
          monitors:
          - 10.0.0.141:6789
          - 10.0.0.142:6789
          - 10.0.0.143:6789
          path: /wordpress
          user: admin
          secretRef:
            name: ceph-admin-secret
      containers:
      - image: harbor250.dingzhiyan.com/dingzhiyan-wp/wordpress:6.7.1-php8.1-apache 
        volumeMounts:
        - name: data02
          mountPath: /var/www/html
        name: wp
        env:
        - name: WORDPRESS_DB_HOST
          value: svc-db
        - name: WORDPRESS_DB_NAME
          value: wordpress
        - name: WORDPRESS_DB_USER
          value: admin
        - name: WORDPRESS_DB_PASSWORD
          value: yinzhengjie
[root@master231 case-demo-cephfs]# 
[root@master231 case-demo-cephfs]# 
[root@master231 case-demo-cephfs]# cat 04-svc-wp.yaml 
apiVersion: v1
kind: Service
metadata:
  name: svc-wp-lb
spec:
  type: LoadBalancer
  ports:
  - port: 80
  selector:
    apps: wp
[root@master231 case-demo-cephfs]# 

3.创建资源

[root@master231 case-demo-cephfs]# ll
total 24
drwxr-xr-x 2 root root 4096 Apr 16 14:42 ./
drwxr-xr-x 4 root root 4096 Apr 16 14:37 ../
-rw-r--r-- 1 root root 1246 Apr 16 14:38 01-deploy-db.yaml
-rw-r--r-- 1 root root  109 Apr 16 14:37 02-svc-db.yaml
-rw-r--r-- 1 root root 1418 Apr 16 14:42 03-deploy-wp.yaml
-rw-r--r-- 1 root root  131 Apr 16 14:37 04-svc-wp.yaml
[root@master231 case-demo-cephfs]# 
[root@master231 case-demo-cephfs]# kubectl apply -f .
secret/ceph-linu96-secret created
deployment.apps/deploy-db created
service/svc-db created
secret/ceph-admin-secret created
deployment.apps/deploy-wp created
service/svc-wp-lb created
[root@master231 case-demo-cephfs]# 
[root@master231 case-demo-cephfs]# kubectl get pods -o wide
NAME                         READY   STATUS    RESTARTS   AGE   IP               NODE        NOMINATED NODE   READINESS GATES
deploy-db-777fdf9987-ptjv8   1/1     Running   0          8s    10.100.140.119   worker233   <none>           <none>
deploy-wp-8448b9b644-4swnx   1/1     Running   0          8s    10.100.140.115   worker233   <none>           <none>
deploy-wp-8448b9b644-dn47m   1/1     Running   0          8s    10.100.203.183   worker232   <none>           <none>
deploy-wp-8448b9b644-pvj75   1/1     Running   0          8s    10.100.160.179   master231   <none>           <none>
[root@master231 case-demo-cephfs]# 
[root@master231 case-demo-cephfs]# kubectl get -f .
NAME                        TYPE                DATA   AGE
secret/ceph-linu96-secret   kubernetes.io/rbd   1      39s

NAME                        READY   UP-TO-DATE   AVAILABLE   AGE
deployment.apps/deploy-db   1/1     1            1           39s

NAME             TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)    AGE
service/svc-db   ClusterIP   10.200.156.63   <none>        3306/TCP   39s

NAME                       TYPE     DATA   AGE
secret/ceph-admin-secret   Opaque   1      39s

NAME                        READY   UP-TO-DATE   AVAILABLE   AGE
deployment.apps/deploy-wp   3/3     3            3           39s

NAME                TYPE           CLUSTER-IP      EXTERNAL-IP   PORT(S)        AGE
service/svc-wp-lb   LoadBalancer   10.200.91.169   10.0.0.152    80:10841/TCP   39s
[root@master231 case-demo-cephfs]# 

4.访问测试

http://10.0.0.152/

5.删除Pod验证数据是否存在

[root@master231 case-demo-cephfs]# kubectl get pods -o wide
NAME                         READY   STATUS    RESTARTS   AGE    IP               NODE        NOMINATED NODE   READINESS GATES
deploy-db-777fdf9987-ptjv8   1/1     Running   0          4m3s   10.100.140.119   worker233   <none>           <none>
deploy-wp-8448b9b644-4swnx   1/1     Running   0          4m3s   10.100.140.115   worker233   <none>           <none>
deploy-wp-8448b9b644-dn47m   1/1     Running   0          4m3s   10.100.203.183   worker232   <none>           <none>
deploy-wp-8448b9b644-pvj75   1/1     Running   0          4m3s   10.100.160.179   master231   <none>           <none>
[root@master231 case-demo-cephfs]# 
[root@master231 case-demo-cephfs]# 
[root@master231 case-demo-cephfs]# kubectl delete pods --all
pod "deploy-db-777fdf9987-ptjv8" deleted
pod "deploy-wp-8448b9b644-4swnx" deleted
pod "deploy-wp-8448b9b644-dn47m" deleted
pod "deploy-wp-8448b9b644-pvj75" deleted
[root@master231 case-demo-cephfs]# 
[root@master231 case-demo-cephfs]# 
[root@master231 case-demo-cephfs]# kubectl get pods -o wide
NAME                         READY   STATUS    RESTARTS   AGE   IP               NODE        NOMINATED NODE   READINESS GATES
deploy-db-777fdf9987-nwc2h   1/1     Running   0          10s   10.100.203.185   worker232   <none>           <none>
deploy-wp-8448b9b644-7nns5   1/1     Running   0          10s   10.100.140.117   worker233   <none>           <none>
deploy-wp-8448b9b644-nbsb9   1/1     Running   0          10s   10.100.203.182   worker232   <none>           <none>
deploy-wp-8448b9b644-w82zg   1/1     Running   0          10s   10.100.160.180   master231   <none>           <none>
[root@master231 case-demo-cephfs]# 

6.再次观察数据是否丢失

7.删除资源

[root@master231 case-demo-cephfs]# kubectl delete -f .
secret "ceph-linu96-secret" deleted
deployment.apps "deploy-db" deleted
service "svc-db" deleted
secret "ceph-admin-secret" deleted
deployment.apps "deploy-wp" deleted
service "svc-wp-lb" deleted
[root@master231 case-demo-cephfs]# 

八、周期性备份etcd数据实战案例

1.思路

- 1.备份工具etcdctl
- 2.使用cj控制器
- 3.备份时需要使用证书

2.实战案例

2.1 将证书上传到cephFS

[root@master231 case-demo-etcd-backup]# ll /etc/kubernetes/pki/etcd/
total 40
drwxr-xr-x 2 root root 4096 Apr  7 11:00 ./
drwxr-xr-x 4 root root 4096 Apr 14 10:06 ../
-rw-r--r-- 1 root root 1086 Apr  7 11:00 ca.crt
-rw------- 1 root root 1675 Apr  7 11:00 ca.key
-rw-r--r-- 1 root root 1159 Apr  7 11:00 healthcheck-client.crt
-rw------- 1 root root 1679 Apr  7 11:00 healthcheck-client.key
-rw-r--r-- 1 root root 1200 Apr  7 11:00 peer.crt
-rw------- 1 root root 1675 Apr  7 11:00 peer.key
-rw-r--r-- 1 root root 1200 Apr  7 11:00 server.crt
-rw------- 1 root root 1675 Apr  7 11:00 server.key
[root@master231 case-demo-etcd-backup]# 
[root@master231 case-demo-etcd-backup]# df -h | grep data
10.0.0.143:6789:/                  1.7T   76M  1.7T   1% /data
[root@master231 case-demo-etcd-backup]# 
[root@master231 case-demo-etcd-backup]# mkdir /data/etcd-certs  # 创建存放证书目录
[root@master231 case-demo-etcd-backup]# 
[root@master231 case-demo-etcd-backup]# cp /etc/kubernetes/pki/etcd/{ca.crt,peer.crt,peer.key} /data/etcd-certs
[root@master231 case-demo-etcd-backup]# 
[root@master231 case-demo-etcd-backup]# ll /data/etcd-certs
total 5
drwxr-xr-x 2 root root    3 Apr 16 15:16 ./
drwxr-xr-x 4 root root    3 Apr 16 15:16 ../
-rw-r--r-- 1 root root 1086 Apr 16 15:16 ca.crt
-rw-r--r-- 1 root root 1200 Apr 16 15:16 peer.crt
-rw------- 1 root root 1675 Apr 16 15:16 peer.key
[root@master231 case-demo-etcd-backup]# 
[root@master231 case-demo-etcd-backup]# mkdir  /data/backup  # 创建备份目录
[root@master231 case-demo-etcd-backup]# 
[root@master231 case-demo-etcd-backup]# ll /data/backup
total 0
drwxr-xr-x 2 root root 0 Apr 16 15:29 ./
drwxr-xr-x 5 root root 4 Apr 16 15:29 ../
[root@master231 case-demo-etcd-backup]# 

2.2 编写Dockerfile

[root@master231 case-demo-etcd-backup]# cat Dockerfile 
FROM  harbor250.dingzhiyan.com/dingzhiyan-xiuxian/apps:v1

MAINTAINER JasonYin

LABEL school=dingzhiyan \
      class=xxx

COPY etcdctl /usr/local/bin/ 

CMD ["/bin/sh","-c","etcdctl --endpoints=${ETCD_HOST}:${ETCD_PORT} --cacert=/certs/ca.crt --cert=/certs/peer.crt --key=/certs/peer.key snapshot save /backup/dingzhiyan-etcd-`date +%F-%T`.backup"]
[root@master231 case-demo-etcd-backup]# 

2.3 构建镜像推送harbor仓库

[root@master231 case-demo-etcd-backup]# docker build -t harbor250.dingzhiyan.com/dingzhiyan-backup-etcd/tools:v0.1 .
Sending build context to Docker daemon  18.42MB
Step 1/5 : FROM  harbor250.dingzhiyan.com/dingzhiyan-xiuxian/apps:v1
# Executing 1 build trigger
 ---> Using cache
 ---> 3702ed09067c
Step 2/5 : MAINTAINER JasonYin
 ---> Using cache
 ---> e45c3391c338
Step 3/5 : LABEL school=dingzhiyan       class=xxx
 ---> Using cache
 ---> d713219b897c
Step 4/5 : COPY etcdctl /usr/local/bin/
 ---> Using cache
 ---> f7bdd47d599d
Step 5/5 : CMD ["/bin/sh","-c","etcdctl --endpoints=${ETCD_HOST}:${ETCD_PORT} --cacert=/certs/ca.crt --cert=/certs/peer.crt --key=/certs/peer.key snapshot save /backup/dingzhiyan-etcd-`date +%F-%T`.backup"]
 ---> Running in 1a16a58960ae
Removing intermediate container 1a16a58960ae
 ---> fecebd00f05a
Successfully built fecebd00f05a
Successfully tagged harbor250.dingzhiyan.com/dingzhiyan-backup-etcd/tools:v0.1
[root@master231 case-demo-etcd-backup]# 
[root@master231 case-demo-etcd-backup]# docker login -u admin -p 1 harbor250.dingzhiyan.com
WARNING! Using --password via the CLI is insecure. Use --password-stdin.
WARNING! Your password will be stored unencrypted in /root/.docker/config.json.
Configure a credential helper to remove this warning. See
https://docs.docker.com/engine/reference/commandline/login/#credentials-store

Login Succeeded
[root@master231 case-demo-etcd-backup]# 
[root@master231 case-demo-etcd-backup]# docker push harbor250.dingzhiyan.com/dingzhiyan-backup-etcd/tools:v0.1
The push refers to repository [harbor250.dingzhiyan.com/dingzhiyan-backup-etcd/tools]
185474e5c018: Pushed 
8e2be8913e57: Mounted from dingzhiyan-xiuxian/apps 
9d5b000ce7c7: Mounted from dingzhiyan-xiuxian/apps 
b8dbe22b95f7: Mounted from dingzhiyan-xiuxian/apps 
c39c1c35e3e8: Mounted from dingzhiyan-xiuxian/apps 
5f66747c8a72: Mounted from dingzhiyan-xiuxian/apps 
15d7cdc64789: Mounted from dingzhiyan-xiuxian/apps 
7fcb75871b21: Mounted from dingzhiyan-xiuxian/apps 
v0.1: digest: sha256:a4c74c0162caa0504cc8e554d941bce7142c8c5a9342992ba7411ae77691c8d0 size: 1990

2.4 编写资源清单

[root@master231 case-demo-etcd-backup]# cat cj-backup-etc.yaml 
apiVersion: v1
kind: Secret
metadata:
  name: ceph-admin-secret
stringData:
  key: QVFBa1JlcG5sOFFIRGhBQWFqSy9hTUgxS2FDb1ZKV3Q1SDJOT1E9PQ==

---

apiVersion: batch/v1
kind: CronJob
metadata:
  name: backup-etcd
spec:
  schedule: "* * * * *"
  jobTemplate:
    spec:
      template:
        spec:
          volumes:
          - name: certs
            cephfs:
              monitors:
              - 10.0.0.141:6789
              - 10.0.0.142:6789
              - 10.0.0.143:6789
              path: /etcd-certs
              user: admin
              secretRef:
                name: ceph-admin-secret
              readOnly: true
          - name: bak
            cephfs:
              monitors:
              - 10.0.0.141:6789
              - 10.0.0.142:6789
              - 10.0.0.143:6789
              path: /backup
              user: admin
              secretRef:
                name: ceph-admin-secret
          containers:
          - name: etcd-backup
            image: harbor250.dingzhiyan.com/dingzhiyan-backup-etcd/tools:v0.1
            imagePullPolicy: IfNotPresent
            volumeMounts:
            - name: certs
              mountPath: /certs
            - name: bak
              mountPath: /backup
            env:
            - name: ETCD_HOST
              value: 10.0.0.231
            - name: ETCD_PORT
              value: "2379"
          restartPolicy: OnFailure
[root@master231 case-demo-etcd-backup]# 
[root@master231 case-demo-etcd-backup]# kubectl apply -f  cj-backup-etc.yaml 
secret/ceph-admin-secret configured
cronjob.batch/backup-etcd created

2.5 测试验证

[root@master231 case-demo-etcd-backup]# kubectl get cj,jobs,pods -o wide
NAME                        SCHEDULE    SUSPEND   ACTIVE   LAST SCHEDULE   AGE     CONTAINERS    IMAGES                                                     SELECTOR
cronjob.batch/backup-etcd   * * * * *   False     0        4s              2m55s   etcd-backup   harbor250.dingzhiyan.com/dingzhiyan-backup-etcd/tools:v0.1   <none>

NAME                             COMPLETIONS   DURATION   AGE    CONTAINERS    IMAGES                                                     SELECTOR
job.batch/backup-etcd-29079815   1/1           7s         2m4s   etcd-backup   harbor250.dingzhiyan.com/dingzhiyan-backup-etcd/tools:v0.1   controller-uid=f395cc73-cfdd-4772-b32e-93690100e0c4
job.batch/backup-etcd-29079816   1/1           3s         64s    etcd-backup   harbor250.dingzhiyan.com/dingzhiyan-backup-etcd/tools:v0.1   controller-uid=7342dc84-165f-4a76-b974-1b5e78a50bbe
job.batch/backup-etcd-29079817   1/1           3s         4s     etcd-backup   harbor250.dingzhiyan.com/dingzhiyan-backup-etcd/tools:v0.1   controller-uid=db9b04df-29c0-4ecb-bc82-6c0b37065e15

NAME                             READY   STATUS      RESTARTS   AGE    IP               NODE        NOMINATED NODE   READINESS GATES
pod/backup-etcd-29079815-5pf5x   0/1     Completed   0          2m4s   10.100.140.118   worker233   <none>           <none>
pod/backup-etcd-29079816-8q475   0/1     Completed   0          64s    10.100.140.120   worker233   <none>           <none>
pod/backup-etcd-29079817-fg89q   0/1     Completed   0          4s     10.100.140.122   worker233   <none>           <none>
[root@master231 case-demo-etcd-backup]# 
[root@master231 case-demo-etcd-backup]# df -h | grep data
10.0.0.143:6789:/                  1.7T  104M  1.7T   1% /data
[root@master231 case-demo-etcd-backup]# 
[root@master231 case-demo-etcd-backup]# ll /data/backup -h
total 27M
drwxr-xr-x 2 root root    3 Apr 16 15:37 ./
drwxr-xr-x 5 root root    4 Apr 16 15:29 ../
-rw------- 1 root root 9.0M Apr 16 15:35 dingzhiyan-etcd-2025-04-16-07:35:06.backup
-rw------- 1 root root 9.0M Apr 16 15:36 dingzhiyan-etcd-2025-04-16-07:36:00.backup
-rw------- 1 root root 9.0M Apr 16 15:37 dingzhiyan-etcd-2025-04-16-07:37:00.backup
[root@master231 case-demo-etcd-backup]# 

九、常用的存储卷之configMap

1.什么是configMap

​ configMap本质上就是将配置信息基于键值对的方式进行映射的一种手段,其数据存储在etcd。

​ ConfigMap将你的环境配置信息和容器镜像解耦,便于应用程序配置的修改。

​ 官方文档:

https://kubernetes.io/zh-cn/docs/concepts/configuration/configmap/

2.声明式API创建cm资源

2.1 编写资源清单

[root@master231 volumes]# cat 07-cm.yaml
apiVersion: v1
kind: ConfigMap
metadata:
  name: game-demo
# 定义配置信息
data:
  # 类属性键;每一个键都映射到一个简单的值
  player_initial_lives: "3"
  ui_properties_file_name: "user-interface.properties"
  # 自定义键值对
  school: dingzhiyan
  class: linux96

  # 类文件键
  game.properties: |
    enemy.types=aliens,monsters
    player.maximum-lives=5    

  user-interface.properties: |
    color.good=purple
    color.bad=yellow
    allow.textmode=true

  # 定义MySQL的配置信息
  my.cnf: |
    datadir=/dingzhiyan/data/mysql80
    basedir=/dingzhiyan/softwares/mysql80
    port=3306
    socket=/tmp/mysql80.sock
[root@master231 volumes]# 

2.2 创建cm资源

[root@master231 volumes]# kubectl apply -f  07-cm.yaml
configmap/game-demo created
[root@master231 volumes]# 
[root@master231 volumes]# kubectl get cm
NAME               DATA   AGE
game-demo          7      6s
kube-root-ca.crt   1      9d
[root@master231 volumes]# 
[root@master231 volumes]# kubectl get cm game-demo 
NAME        DATA   AGE
game-demo   7      8s

3.响应式创建cm资源

3.1 响应式创建cm资源

[root@master231 volumes]# kubectl create configmap dingzhiyan-cm --from-file=myhost=/etc/hosts --from-file=myos=/etc/os-release --from-literal=school=dingzhiyan --from-literal=class=xxx 
configmap/dingzhiyan-cm created

3.2 查看cm资源

[root@master231 volumes]# kubectl get cm dingzhiyan-cm 
NAME           DATA   AGE
dingzhiyan-cm   4      19s
[root@master231 volumes]# 

3.3 查看cm的详细信息

[root@master231 volumes]#  
[root@master231 volumes]# kubectl get cm dingzhiyan-cm -o yaml
apiVersion: v1
data:
  class: linux96
  myhost: |
    127.0.0.1 localhost
    127.0.1.1 yinzhengjie

    # The following lines are desirable for IPv6 capable hosts
    ::1     ip6-localhost ip6-loopback
    fe00::0 ip6-localnet
    ff00::0 ip6-mcastprefix
    ff02::1 ip6-allnodes
    ff02::2 ip6-allrouters
  myos: |
    PRETTY_NAME="Ubuntu 22.04.4 LTS"
    NAME="Ubuntu"
    VERSION_ID="22.04"
    VERSION="22.04.4 LTS (Jammy Jellyfish)"
    VERSION_CODENAME=jammy
    ID=ubuntu
    ID_LIKE=debian
    HOME_URL="https://www.ubuntu.com/"
    SUPPORT_URL="https://help.ubuntu.com/"
    BUG_REPORT_URL="https://bugs.launchpad.net/ubuntu/"
    PRIVACY_POLICY_URL="https://www.ubuntu.com/legal/terms-and-policies/privacy-policy"
    UBUNTU_CODENAME=jammy
    BLOG=https://www.cnblogs.com/yinzhengjie
  school: dingzhiyan
kind: ConfigMap
metadata:
  creationTimestamp: "2025-04-16T08:08:18Z"
  name: dingzhiyan-cm
  namespace: default
  resourceVersion: "550585"
  uid: 2e9fed53-8323-47bd-a821-9ad53d7014c1
[root@master231 volumes]# 
[root@master231 volumes]# 
[root@master231 volumes]# kubectl describe cm dingzhiyan-cm 
Name:         dingzhiyan-cm
Namespace:    default
Labels:       <none>
Annotations:  <none>

Data
====
class:
----
linux96
myhost:
----
127.0.0.1 localhost
127.0.1.1 yinzhengjie

# The following lines are desirable for IPv6 capable hosts
::1     ip6-localhost ip6-loopback
fe00::0 ip6-localnet
ff00::0 ip6-mcastprefix
ff02::1 ip6-allnodes
ff02::2 ip6-allrouters

myos:
----
PRETTY_NAME="Ubuntu 22.04.4 LTS"
NAME="Ubuntu"
VERSION_ID="22.04"
VERSION="22.04.4 LTS (Jammy Jellyfish)"
VERSION_CODENAME=jammy
ID=ubuntu
ID_LIKE=debian
HOME_URL="https://www.ubuntu.com/"
SUPPORT_URL="https://help.ubuntu.com/"
BUG_REPORT_URL="https://bugs.launchpad.net/ubuntu/"
PRIVACY_POLICY_URL="https://www.ubuntu.com/legal/terms-and-policies/privacy-policy"
UBUNTU_CODENAME=jammy
BLOG=https://www.cnblogs.com/yinzhengjie

school:
----
dingzhiyan

BinaryData
====

Events:  <none>

4.Pod基于环境变量引用cm资源

4.1 编写资源清单

[root@master231 volumes]# cat 08-deploy-env-cm.yaml
apiVersion: v1
kind: ConfigMap
metadata:
  name: game-demo
data:
  player_initial_lives: "3"
  ui_properties_file_name: "user-interface.properties"
  school: dingzhiyan
  class: linux96

  game.properties: |
    enemy.types=aliens,monsters
    player.maximum-lives=5    

  user-interface.properties: |
    color.good=purple
    color.bad=yellow
    allow.textmode=true

  my.cnf: |
    datadir=/dingzhiyan/data/mysql80
    basedir=/dingzhiyan/softwares/mysql80
    port=3306
    socket=/tmp/mysql80.sock


---

apiVersion: apps/v1
kind: Deployment
metadata:
  name: deploy-cm-env
spec:
  replicas: 3
  selector:
    matchLabels:
      apps: xiuxian
  template:
    metadata:
      labels:
        apps: xiuxian
    spec:
      containers:
      - name: c1
        image: registry.cn-hangzhou.aliyuncs.com/yinzhengjie-k8s/apps:v1
        env:
        - name: linux96-SCHOOL
          # 表示值从某个资源引用
          valueFrom:
            # 表示值从一个cm资源引用
            configMapKeyRef:
              # 指定cm的名称
              name: game-demo
              # cm资源里面的key,此key必须存在!
              key: school
        - name: linux96-mysql-config
          valueFrom:
            configMapKeyRef:
              name: game-demo
              key: my.cnf

4.2 运行测试

[root@master231 volumes]# kubectl apply -f 08-deploy-env-cm.yaml
configmap/game-demo created
deployment.apps/deploy-cm-env created
[root@master231 volumes]# 
[root@master231 volumes]# 
[root@master231 volumes]# kubectl get pods -o wide -l  apps=xiuxian
NAME                             READY   STATUS    RESTARTS   AGE   IP               NODE        NOMINATED NODE   READINESS GATES
deploy-cm-env-5c565cc4bd-2bmcx   1/1     Running   0          25s   10.100.140.110   worker233   <none>           <none>
deploy-cm-env-5c565cc4bd-skm9s   1/1     Running   0          25s   10.100.203.184   worker232   <none>           <none>
deploy-cm-env-5c565cc4bd-xwq5l   1/1     Running   0          25s   10.100.140.112   worker233   <none>           <none>
[root@master231 volumes]# 
[root@master231 volumes]# kubectl exec -it deploy-cm-env-5c565cc4bd-2bmcx -- env
...
linux96-SCHOOL=dingzhiyan
linux96-mysql-config=datadir=/dingzhiyan/data/mysql80
basedir=/dingzhiyan/softwares/mysql80
port=3306
socket=/tmp/mysql80.sock

...

5.Pod基于存储卷引用cm资源

5.1 编写资源清单

[root@master231 volumes]# cat 09-deploy-volume-cm.yaml
apiVersion: v1
kind: ConfigMap
metadata:
  name: game-demo
data:
  player_initial_lives: "3"
  ui_properties_file_name: "user-interface.properties"
  school: dingzhiyan
  class: linux96

  game.properties: |
    enemy.types=aliens,monsters
    player.maximum-lives=5    

  user-interface.properties: |
    color.good=purple
    color.bad=yellow
    allow.textmode=true

  my.cnf: |
    datadir=/dingzhiyan/data/mysql80
    basedir=/dingzhiyan/softwares/mysql80
    port=3306
    socket=/tmp/mysql80.sock


---

apiVersion: apps/v1
kind: Deployment
metadata:
  name: deploy-cm-env-volume
spec:
  replicas: 3
  selector:
    matchLabels:
      apps: xiuxian
  template:
    metadata:
      labels:
        apps: xiuxian
    spec:
      volumes:
      - name: data
        # 存储卷类型为cm
        configMap:
          # 指定cm的名称
          name: game-demo
          # 引用特定的key,若不定义该字段,则引用所有的key
          items:
            # 引用的cm的key
          - key: school
            # 可以暂时理解为,将来容器挂载后,对应的文件名称
            path: school.txt
          - key: my.cnf
            path: my.cnf
      containers:
      - name: c1
        image: registry.cn-hangzhou.aliyuncs.com/yinzhengjie-k8s/apps:v1
        volumeMounts:
        - name: data
          mountPath: /dingzhiyan
        env:
        - name: linux96-SCHOOL
          valueFrom:
            configMapKeyRef:
              name: game-demo
              key: school
        - name: linux96-mysql-config
          valueFrom:
            configMapKeyRef:
              name: game-demo
              key: my.cnf

5.2 创建测试

[root@master231 volumes]# kubectl apply -f  09-deploy-volume-cm.yaml
configmap/game-demo created
deployment.apps/deploy-cm-env-volume created
[root@master231 volumes]# 
[root@master231 volumes]# kubectl get pods -o wide
NAME                                   READY   STATUS      RESTARTS   AGE     IP               NODE        NOMINATED NODE   READINESS GATES
...
deploy-cm-env-volume-b9b8ccb97-49qgw   1/1     Running     0          3s      10.100.140.117   worker233   <none>           <none>
deploy-cm-env-volume-b9b8ccb97-jmwtt   1/1     Running     0          3s      10.100.203.190   worker232   <none>           <none>
deploy-cm-env-volume-b9b8ccb97-pf4l9   1/1     Running     0          3s      10.100.203.191   worker232   <none>           <none>
[root@master231 volumes]# 
[root@master231 volumes]# 
[root@master231 volumes]# kubectl exec deploy-cm-env-volume-b9b8ccb97-49qgw -- ls -l /dingzhiyan
total 0
lrwxrwxrwx    1 root     root            13 Apr 16 08:34 my.cnf -> ..data/my.cnf
lrwxrwxrwx    1 root     root            17 Apr 16 08:34 school.txt -> ..data/school.txt
[root@master231 volumes]# 

6.cm资源映射nginx配置文件subPath案例

6.1 编写资源清单

[root@master231 volumes]# cat 10-deploy-cm-nginx-subPath.yaml 
apiVersion: v1
kind: ConfigMap
metadata:
  name: nginx-all-config
data:
  nginx.conf: |
    user  nginx;
    worker_processes  auto;
    error_log  /var/log/nginx/error.log notice;
    pid        /var/run/nginx.pid;
    events {
        worker_connections  1024;
    }
    http {
        include       /etc/nginx/mime.types;
        default_type  application/octet-stream;
        log_format  main  '$remote_addr - $remote_user [$time_local] "$request" '
                          '$status $body_bytes_sent "$http_referer" '
                          '"$http_user_agent" "$http_x_forwarded_for"';
        access_log  /var/log/nginx/access.log  main;
        sendfile        on;
        keepalive_timeout  65;
        include /etc/nginx/conf.d/*.conf;
    }


  default.conf: |
    server {
        listen       81;
        listen  [::]:81;
        server_name  localhost;
    
        location / {
            root   /usr/share/nginx/html;
            index  index.html index.htm;
        }
    
        error_page   500 502 503 504  /50x.html;
        location = /50x.html {
            root   /usr/share/nginx/html;
        }
    
    }


---

apiVersion: apps/v1
kind: Deployment
metadata:
  name: deploy-cm-nginx-subpath
spec:
  replicas: 3
  selector:
    matchLabels:
      apps: xiuxian
  template:
    metadata:
      labels:
        apps: xiuxian
    spec:
      volumes:
      - name: sub
        configMap:
          name: nginx-all-config
          items:
          - key: default.conf
            path: default.conf
      - name: main
        configMap:
          name: nginx-all-config
          items:
          - key: nginx.conf
            path: nginx.conf
      - name: tz
        hostPath:
          path: /etc/localtime
      containers:
      - name: c1
        image: harbor250.dingzhiyan.com/dingzhiyan-xiuxian/apps:v1
        # command: ["tail","-f","/etc/hosts"]
        volumeMounts:
        - name: sub
          mountPath: /etc/nginx/conf.d/
        - name: main
          #mountPath: /etc/nginx/
          mountPath: /etc/nginx/nginx.conf
          # 当subPath的值和存储卷的path相同时,mountPath挂载点将是一个文件,而非目录。
          # 换句话说,其不会清除同级目录下的所有文件
          subPath: nginx.conf
        - name: tz
          mountPath: /etc/localtime
[root@master231 volumes]# 

6.2 测试验证

[root@master231 volumes]# kubectl apply -f 10-deploy-cm-nginx-subPath.yaml 
configmap/nginx-all-config created
deployment.apps/deploy-cm-nginx-subpath created
[root@master231 volumes]# 
[root@master231 volumes]# kubectl get pods -o wide
NAME                                       READY   STATUS    RESTARTS   AGE   IP               NODE        NOMINATED NODE   READINESS GATES
deploy-cm-nginx-subpath-5569db7cfd-c9bnj   1/1     Running   0          5s    10.100.140.69    worker233   <none>           <none>
deploy-cm-nginx-subpath-5569db7cfd-p4hz2   1/1     Running   0          5s    10.100.203.143   worker232   <none>           <none>
deploy-cm-nginx-subpath-5569db7cfd-pptlr   1/1     Running   0          5s    10.100.140.64    worker233   <none>           <none>
[root@master231 volumes]# 
[root@master231 volumes]# kubectl exec -it deploy-cm-nginx-subpath-5569db7cfd-c9bnj -- ls -l /etc/nginx
total 32
drwxrwxrwx    3 root     root          4096 Apr 16 17:04 conf.d
-rw-r--r--    1 root     root          1077 May 25  2021 fastcgi.conf
-rw-r--r--    1 root     root          1007 May 25  2021 fastcgi_params
-rw-r--r--    1 root     root          5231 May 25  2021 mime.types
lrwxrwxrwx    1 root     root            22 Nov 13  2021 modules -> /usr/lib/nginx/modules
-rw-r--r--    1 root     root           597 Apr 16 17:04 nginx.conf
-rw-r--r--    1 root     root           636 May 25  2021 scgi_params
-rw-r--r--    1 root     root           664 May 25  2021 uwsgi_params
[root@master231 volumes]# 
[root@master231 volumes]# curl 10.100.140.69:81
<!DOCTYPE html>
<html>
  <head>
    <meta charset="utf-8"/>
    <title>yinzhengjie apps v1</title>
    <style>
       div img {
          width: 900px;
          height: 600px;
          margin: 0;
       }
    </style>
  </head>

  <body>
    <h1 style="color: green">凡人修仙传 v1 </h1>
    <div>
      <img src="1.jpg">
    <div>
  </body>

</html>
[root@master231 volumes]# 
[root@master231 volumes]# 
[root@master231 volumes]# kubectl exec -it deploy-cm-nginx-subpath-5569db7cfd-c9bnj -- date -R
Wed, 16 Apr 2025 17:05:35 +0800
[root@master231 volumes]# 
[root@master231 volumes]# date -R
Wed, 16 Apr 2025 17:05:39 +0800

十、ElasticStack对接K8S集群实现日志采集

1.K8S日志采集三种常见方案

1.1 sidecar边车模式

为每一个Pod注入一个Filebeat进行日志采集。
将采集的数据写入到ES集群。

优点:
	理解起来方便,就是在同一个Pod不同容器使用同一个存储卷,比如emptyDir。
	
缺点:
	当业务容器较多时,会启动过多的Filebeat实例进程。占用资源。

1.2 ds控制器模式

为每个工作节点有且仅有一个Pod实现数据采集。


优点:
	相比于边车模式更加节省资源。每个节点仅有一个进程。
	
缺点:
	需要学习k8s的RBAC知识点。

1.3 业务容器本事拥有日志采集功能

业务开发人员本事开发了日志采集的功能。

优点:
	无需运维人员介入,只需要将ES集群地址及认证信息给到开发人员即可。
	
	
缺点:
	开发人员可能没有开发此功能,推动起来相对困难。

2.k8s日志采集案例

2.1 检查ES集群是否正常

[root@master231 case-demo-elasticstack]# curl -k -u elastic:123456 https://10.0.0.91:9200/_cat/nodes
10.0.0.66 81 32 5 0.10 0.22 0.27 cdfhilmrstw - elk91
10.0.0.93 70 30 2 0.95 0.63 0.61 cdfhilmrstw - elk93
10.0.0.92 83 25 2 0.11 0.10 0.12 cdfhilmrstw * elk92

2.2 检查kibana是否正常

http://10.0.0.91:5601/app/management/data/index_management/indices

2.3 将ES集群映射为K8S集群内部的svc

[root@master231 case-demo-elasticstack]# cat 01-ep-svc.yaml
apiVersion: v1
kind: Endpoints
metadata:
  name: dingzhiyan-es7
subsets:
- addresses:
  - ip: 10.0.0.91
  - ip: 10.0.0.92
  - ip: 10.0.0.93
  ports:
  - port: 9200

---

apiVersion: v1
kind: Service
metadata:
  name: dingzhiyan-es7
spec:
  type: ClusterIP
  ports:
  - port: 9200
[root@master231 case-demo-elasticstack]# 
[root@master231 case-demo-elasticstack]# kubectl apply -f 01-ep-svc.yaml 
endpoints/dingzhiyan-es7 created
service/dingzhiyan-es7 created
[root@master231 case-demo-elasticstack]# 
[root@master231 case-demo-elasticstack]# kubectl get svc dingzhiyan-es7 
NAME            TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)    AGE
dingzhiyan-es7   ClusterIP   10.200.120.230   <none>        9200/TCP   5s
[root@master231 case-demo-elasticstack]# 
[root@master231 case-demo-elasticstack]# 
[root@master231 case-demo-elasticstack]# kubectl describe svc dingzhiyan-es7 
Name:              dingzhiyan-es7
Namespace:         default
Labels:            <none>
Annotations:       <none>
Selector:          <none>
Type:              ClusterIP
IP Family Policy:  SingleStack
IP Families:       IPv4
IP:                10.200.120.230
IPs:               10.200.120.230
Port:              <unset>  9200/TCP
TargetPort:        9200/TCP
Endpoints:         10.0.0.91:9200,10.0.0.92:9200,10.0.0.93:9200
Session Affinity:  None
Events:            <none>
[root@master231 case-demo-elasticstack]# 

2.4 创建服务账号并授权

[root@master231 case-demo-elasticstack]# cat 02-sa-rbac.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
  name: filebeat

---

apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: filebeat
subjects:
- kind: ServiceAccount
  name: filebeat
  namespace: default
roleRef:
  kind: ClusterRole
  name: filebeat
  apiGroup: rbac.authorization.k8s.io

---

apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  name: filebeat
  labels:
    k8s-app: filebeat
rules:
- apiGroups: [""]
  resources:
  - namespaces
  - pods
  - nodes
  verbs:
  - get
  - watch
  - list

[root@master231 case-demo-elasticstack]# 
[root@master231 case-demo-elasticstack]# 
[root@master231 case-demo-elasticstack]# kubectl apply -f  02-sa-rbac.yaml
serviceaccount/filebeat created
clusterrolebinding.rbac.authorization.k8s.io/filebeat created
clusterrole.rbac.authorization.k8s.io/filebeat created

2.5 添加Filebeat的配置文件

[root@master231 case-demo-elasticstack]# cat 03-cm-filebeat-k8s.yaml
apiVersion: v1
kind: ConfigMap
metadata:
  name: filebeat-config
data:
  filebeat.yml: |-
    filebeat.config:
      inputs:
        path: ${path.config}/inputs.d/*.yml
        reload.enabled: true
      modules:
        path: ${path.config}/modules.d/*.yml
        reload.enabled: true

    output.elasticsearch:
      hosts: ['https://dingzhiyan-es7:9200']
      # hosts: ['dingzhiyan-es7.default.svc.dingzhiyan.com:9200']
      index: 'dingzhiyan-k8s-ds-%{+yyyy.MM.dd}'
      # 跳过证书校验,有效值为: full(default),strict,certificate,none
      # 参考链接:
      # 	https://www.elastic.co/guide/en/beats/filebeat/7.17/configuration-ssl.html#client-verification-mode
      ssl.verification_mode: none
      username: "elastic"
      password: "123456"

    # 配置索引模板
    setup.ilm.enabled: false
    setup.template.name: "dingzhiyan-k8s-ds"
    setup.template.pattern: "dingzhiyan-k8s-ds*"
    setup.template.overwrite: false
    setup.template.settings:
      index.number_of_shards: 5
      index.number_of_replicas: 0

---

apiVersion: v1
kind: ConfigMap
metadata:
  name: filebeat-inputs
data:
  kubernetes.yml: |-
    - type: docker
      containers.ids:
      - "*"
      processors:
        - add_kubernetes_metadata:
            in_cluster: true

[root@master231 case-demo-elasticstack]# 
[root@master231 case-demo-elasticstack]# kubectl apply -f 03-cm-filebeat-k8s.yaml
configmap/filebeat-config created
configmap/filebeat-inputs created
[root@master231 case-demo-elasticstack]# 

2.6 Filebeat采集日志到ES集群

[root@master231 case-demo-elasticstack]# cat 04-ds-filebeat.yaml
apiVersion: apps/v1 
kind: DaemonSet
metadata:
  name: filebeat
spec:
  selector:
    matchLabels:
      k8s-app: filebeat
  template:
    metadata:
      labels:
        k8s-app: filebeat
    spec:
      tolerations:
      - key: node-role.kubernetes.io/master
        effect: NoSchedule
        operator: Exists
      serviceAccountName: filebeat
      terminationGracePeriodSeconds: 30
      containers:
      - name: filebeat
        # image: docker.elastic.co/beats/filebeat:7.17.28
        image: harbor250.dingzhiyan.com/dingzhiyan-elasitcstack/filebeat:7.17.28
        args: [
          "-c", "/etc/filebeat.yml",
          "-e",
        ]
        securityContext:
          runAsUser: 0
          # If using Red Hat OpenShift uncomment this:
          #privileged: true
        resources:
          limits:
            memory: 200Mi
          requests:
            cpu: 100m
            memory: 100Mi
        volumeMounts:
        - name: config
          mountPath: /etc/filebeat.yml
          readOnly: true
          subPath: filebeat.yml
        - name: inputs
          mountPath: /usr/share/filebeat/inputs.d
          readOnly: true
        - name: varlibdockercontainers
          mountPath: /var/lib/docker/containers
          readOnly: true
      volumes:
      - name: config
        configMap:
          defaultMode: 0600
          name: filebeat-config
      - name: varlibdockercontainers
        hostPath:
          path: /var/lib/docker/containers
      - name: inputs
        configMap:
          defaultMode: 0600
          name: filebeat-inputs
[root@master231 case-demo-elasticstack]# 
[root@master231 case-demo-elasticstack]# 
[root@master231 case-demo-elasticstack]# kubectl apply -f  04-ds-filebeat.yaml
daemonset.apps/filebeat created
[root@master231 case-demo-elasticstack]# 
[root@master231 case-demo-elasticstack]# kubectl get pods -o wide
NAME             READY   STATUS    RESTARTS   AGE   IP               NODE        NOMINATED NODE   READINESS GATES
filebeat-bg5hs   1/1     Running   0          18s   10.100.203.144   worker232   <none>           <none>
filebeat-w2nq7   1/1     Running   0          19s   10.100.140.71    worker233   <none>           <none>
filebeat-xv2rw   1/1     Running   0          18s   10.100.160.181   master231   <none>           <none>

2.7 kibana查看数据验证

比如使用KQL语句过滤数据: kubernetes.namespace:"default" and  kubernetes.pod.name : "scheduler-nodeselector-774bf9875f-65cdv"

十一、常见的存储卷之secrets

1.什么是secrets

​ 和cm作用类似,但secrets是k8s用于存储敏感数据(密码、令牌,证书,密钥的对象)的资源。

​ 会将数据以base64进行编码,将敏感数据单独存储这样让数据泄露的风险会减小。

​ 官网链接:
https://kubernetes.io/zh-cn/docs/concepts/configuration/secret/
https://kubernetes.io/zh-cn/docs/concepts/configuration/secret/#secret-types

2.声明式创建secret的两种方式

2.1 编写资源清单

[root@master231 volumes]# cat 11-secrets-stringData.yaml
apiVersion: v1
kind: Secret
metadata:
  name: secrets-stringdata
# 创建secret时会自动将value的值自动编码,编写是原样输入
stringData:
  username: admin
  password: dingzhiyan
  my.cnf: |
    datadir=/dingzhiyan/data/mysql80
    basedir=/dingzhiyan/softwares/mysql80
    port=3306
    socket=/tmp/mysql80.sock

---

apiVersion: v1
kind: Secret
metadata:
  name: secrets-data
# 存储的编码后的数据
data:
  my.cnf: ZGF0YWRpcj0vb2xkYm95ZWR1L2RhdGEvbXlzcWw4MApiYXNlZGlyPS9vbGRib3llZHUvc29mdHdhcmVzL215c3FsODAKcG9ydD0zMzA2CnNvY2tldD0vdG1wL215c3FsODAuc29jawo=
  password: b2xkYm95ZWR1
  username: YWRtaW4=
  class: bGludXg5Ngo=

2.2 创建测试

[root@master231 volumes]# kubectl apply -f  11-secrets-stringData.yaml
secret/secrets-stringdata created
secret/secrets-data created
[root@master231 volumes]# 
[root@master231 volumes]# kubectl get secrets secrets-data secrets-stringdata 
NAME                 TYPE     DATA   AGE
secrets-data         Opaque   4      16s
secrets-stringdata   Opaque   3      16s
[root@master231 volumes]# 

3.响应式创建secret

[root@master231 volumes]# kubectl create secret generic dingzhiyan-secrets --from-file=myhosts=/etc/hosts --from-file=myos=/etc/os-release --from-literal=school=dingzhiyan --from-literal=class=xxx --from-literal=office=https://www.dingzhiyan.com
secret/dingzhiyan-secrets created

[root@master231 volumes]# kubectl get secrets dingzhiyan-secrets 
NAME                TYPE     DATA   AGE
dingzhiyan-secrets   Opaque   5      8s
[root@master231 volumes]# 
[root@master231 volumes]# 
[root@master231 volumes]# kubectl describe secrets dingzhiyan-secrets 
Name:         dingzhiyan-secrets
Namespace:    default
Labels:       <none>
Annotations:  <none>

Type:  Opaque

Data
====
office:   25 bytes
school:   9 bytes
class:    7 bytes
myhosts:  226 bytes
myos:     427 bytes
[root@master231 volumes]# 
[root@master231 volumes]# kubectl get secrets dingzhiyan-secrets  -o yaml
apiVersion: v1
data:
  class: bGludXg5Ng==
  myhosts: MTI3LjAuMC4xIGxvY2FsaG9zdAoxMjcuMC4xLjEgeWluemhlbmdqaWUKCiMgVGhlIGZvbGxvd2luZyBsaW5lcyBhcmUgZGVzaXJhYmxlIGZvciBJUHY2IGNhcGFibGUgaG9zdHMKOjoxICAgICBpcDYtbG9jYWxob3N0IGlwNi1sb29wYmFjawpmZTAwOjowIGlwNi1sb2NhbG5ldApmZjAwOjowIGlwNi1tY2FzdHByZWZpeApmZjAyOjoxIGlwNi1hbGxub2RlcwpmZjAyOjoyIGlwNi1hbGxyb3V0ZXJzCg==
  myos: UFJFVFRZX05BTUU9IlVidW50dSAyMi4wNC40IExUUyIKTkFNRT0iVWJ1bnR1IgpWRVJTSU9OX0lEPSIyMi4wNCIKVkVSU0lPTj0iMjIuMDQuNCBMVFMgKEphbW15IEplbGx5ZmlzaCkiClZFUlNJT05fQ09ERU5BTUU9amFtbXkKSUQ9dWJ1bnR1CklEX0xJS0U9ZGViaWFuCkhPTUVfVVJMPSJodHRwczovL3d3dy51YnVudHUuY29tLyIKU1VQUE9SVF9VUkw9Imh0dHBzOi8vaGVscC51YnVudHUuY29tLyIKQlVHX1JFUE9SVF9VUkw9Imh0dHBzOi8vYnVncy5sYXVuY2hwYWQubmV0L3VidW50dS8iClBSSVZBQ1lfUE9MSUNZX1VSTD0iaHR0cHM6Ly93d3cudWJ1bnR1LmNvbS9sZWdhbC90ZXJtcy1hbmQtcG9saWNpZXMvcHJpdmFjeS1wb2xpY3kiClVCVU5UVV9DT0RFTkFNRT1qYW1teQpCTE9HPWh0dHBzOi8vd3d3LmNuYmxvZ3MuY29tL3lpbnpoZW5namllCg==
  office: aHR0cHM6Ly93d3cub2xkYm95ZWR1LmNvbQ==
  school: b2xkYm95ZWR1
kind: Secret
metadata:
  creationTimestamp: "2025-04-17T01:47:10Z"
  name: dingzhiyan-secrets
  namespace: default
  resourceVersion: "673079"
  uid: d5614e17-2d9f-46ca-a7a1-46468640a4ba
type: Opaque
[root@master231 volumes]# 

4.Pod基于两种方式引用secrets

4.1 pod基于环境变量引用secret

[root@master231 volumes]# cat 12-deploy-env-secrets.yaml 
apiVersion: v1
kind: Secret
metadata:
  name: secrets-stringdata
stringData:
  username: admin
  password: dingzhiyan
  my.cnf: |
    datadir=/dingzhiyan/data/mysql80
    basedir=/dingzhiyan/softwares/mysql80
    port=3306
    socket=/tmp/mysql80.sock

---

apiVersion: apps/v1
kind: Deployment
metadata:
  name: deploy-secrets-env
spec:
  replicas: 3
  selector:
    matchLabels:
      apps: xiuxian
  template:
    metadata:
      labels:
        apps: xiuxian
    spec:
      containers:
      - name: c1
        image: registry.cn-hangzhou.aliyuncs.com/yinzhengjie-k8s/apps:v1
        env:
        - name: linux96-USERNAME
          # 表示值从某个资源引用
          valueFrom:
            # 表示值从一个secrets资源引用
            secretKeyRef:
              # 指定cm的名称
              name: secrets-stringdata
              # cm资源里面的key,此key必须存在!
              key: username
        - name: linux96-mysql-CONFIG
          valueFrom:
            secretKeyRef:
              name: secrets-stringdata
              key: my.cnf
              
[root@master231 volumes]# kubectl apply -f 12-deploy-env-secrets.yaml
secret/secrets-stringdata configured
deployment.apps/deploy-secrets-env created
[root@master231 volumes]# 
[root@master231 volumes]# kubectl get pods -o wide
NAME                                  READY   STATUS    RESTARTS   AGE   IP               NODE        NOMINATED NODE   READINESS GATES
deploy-secrets-env-7968d46474-7j9dz   1/1     Running   0          36s   10.100.140.70    worker233   <none>           <none>
deploy-secrets-env-7968d46474-lnchw   1/1     Running   0          36s   10.100.140.74    worker233   <none>           <none>
deploy-secrets-env-7968d46474-p9nss   1/1     Running   0          36s   10.100.203.147   worker232   <none>           <none>
[root@master231 volumes]# 
[root@master231 volumes]# 
[root@master231 volumes]# kubectl exec -it deploy-secrets-env-7968d46474-7j9dz -- env
...
linux96-USERNAME=admin
linux96-mysql-CONFIG=datadir=/dingzhiyan/data/mysql80
basedir=/dingzhiyan/softwares/mysql80
port=3306
socket=/tmp/mysql80.sock

...

[root@master231 volumes]# kubectl delete -f 12-deploy-env-secrets.yaml 
secret "secrets-stringdata" deleted
deployment.apps "deploy-secrets-env" deleted
[root@master231 volumes]# 

4.2 pod基于存储卷引用secret

[root@master231 volumes]# cat 13-deploy-volume-secrets.yaml
apiVersion: v1
kind: Secret
metadata:
  name: secrets-stringdata
stringData:
  username: admin
  password: dingzhiyan
  my.cnf: |
    datadir=/dingzhiyan/data/mysql80
    basedir=/dingzhiyan/softwares/mysql80
    port=3306
    socket=/tmp/mysql80.sock

---

apiVersion: apps/v1
kind: Deployment
metadata:
  name: deploy-secrets-env-volumes
spec:
  replicas: 3
  selector:
    matchLabels:
      apps: xiuxian
  template:
    metadata:
      labels:
        apps: xiuxian
    spec:
      volumes:
      - name: data
        secret:
          secretName:  secrets-stringdata
          items:
          - key: username
            path: username.txt
          - key: password
            path: pwd.txt
      containers:
      - name: c1
        image: registry.cn-hangzhou.aliyuncs.com/yinzhengjie-k8s/apps:v1
        volumeMounts:
        - name: data
          mountPath: /dingzhiyan 
        env:
        - name: linux96-USERNAME
          valueFrom:
            secretKeyRef:
              name: secrets-stringdata
              key: username
        - name: linux96-mysql-CONFIG
          valueFrom:
            secretKeyRef:
              name: secrets-stringdata
              key: my.cnf

[root@master231 volumes]# 
[root@master231 volumes]# kubectl apply -f  13-deploy-volume-secrets.yaml
secret/secrets-stringdata created
deployment.apps/deploy-secrets-env-volumes created
[root@master231 volumes]# 
[root@master231 volumes]# kubectl get pods -o wide
NAME                                          READY   STATUS    RESTARTS   AGE   IP               NODE        NOMINATED NODE   READINESS GATES
deploy-secrets-env-volumes-5f64798676-662fq   1/1     Running   0          2s    10.100.140.76    worker233   <none>           <none>
deploy-secrets-env-volumes-5f64798676-9tpj5   1/1     Running   0          2s    10.100.203.131   worker232   <none>           <none>
deploy-secrets-env-volumes-5f64798676-mbqvk   1/1     Running   0          2s    10.100.140.72    worker233   <none>           <none>
[root@master231 volumes]# 
[root@master231 volumes]# kubectl exec  deploy-secrets-env-volumes-5f64798676-662fq -- ls -l /dingzhiyan
total 0
lrwxrwxrwx    1 root     root            14 Apr 17 01:56 pwd.txt -> ..data/pwd.txt
lrwxrwxrwx    1 root     root            19 Apr 17 01:56 username.txt -> ..data/username.txt
[root@master231 volumes]# 

5.响应式创建docker认证信息并拉取私有项目案例

5.1 设置一个私有的harbor仓库

5.2 使用管理员响应式创建harbor的认证secret

#声明式可以直接使用--dry-run=client -o yaml获取资源清单
[root@master231 volumes]# kubectl create secret docker-registry admin-harbor --docker-username=admin --docker-password=1 --docker-email=admin@dingzhiyan.com --docker-server=harbor250.dingzhiyan.com --dry-run=client -o yaml
apiVersion: v1
data:
  .dockerconfigjson: eyJhdXRocyI6eyJoYXJib3IyNTAub2xkYm95ZWR1LmNvbSI6eyJ1c2VybmFtZSI6ImFkbWluIiwicGFzc3dvcmQiOiIxIiwiZW1haWwiOiJhZG1pbkBvbGRib3llZHUuY29tIiwiYXV0aCI6IllXUnRhVzQ2TVE9PSJ9fX0=
kind: Secret
metadata:
  creationTimestamp: null
  name: admin-harbor
type: kubernetes.io/dockerconfigjson

#响应式创建
[root@master231 volumes]# kubectl create secret docker-registry admin-harbor --docker-username=admin --docker-password=1 --docker-email=admin@dingzhiyan.com --docker-server=harbor250.dingzhiyan.com 
secret/admin-harbor created
[root@master231 volumes]# 
[root@master231 volumes]# 
[root@master231 volumes]# kubectl get secrets admin-harbor 
NAME           TYPE                             DATA   AGE
admin-harbor   kubernetes.io/dockerconfigjson   1      58s

5.3 拉取镜像时指定secret

[root@master231 volumes]# cat 14-deploy-secrets-harbor.yaml 
apiVersion: apps/v1
kind: Deployment
metadata:
  name: deploy-xiuxian-harbor
spec:
  replicas: 3
  selector:
    matchLabels:
      apps: xiuxian
  template:
    metadata:
      labels:
        apps: xiuxian
    spec:
      imagePullSecrets:
      - name: admin-harbor
      containers:
      - name: c1
        image: harbor250.dingzhiyan.com/dingzhiyan-xiuxian/apps:v1
        imagePullPolicy: Always
[root@master231 volumes]# 
[root@master231 volumes]# kubectl apply -f 14-deploy-secrets-harbor.yaml
deployment.apps/deploy-xiuxian-harbor created
[root@master231 volumes]# 
[root@master231 volumes]# kubectl get pods -o wide
NAME                                     READY   STATUS    RESTARTS   AGE   IP               NODE        NOMINATED NODE   READINESS GATES
deploy-xiuxian-harbor-657c89bf7c-8z8p5   1/1     Running   0          8s    10.100.203.148   worker232   <none>           <none>
deploy-xiuxian-harbor-657c89bf7c-f2lqb   1/1     Running   0          8s    10.100.140.77    worker233   <none>           <none>
deploy-xiuxian-harbor-657c89bf7c-f82sn   1/1     Running   0          8s    10.100.140.78    worker233   <none>           <none>

6.声明式基于sa认证harbor仓库

6.1 创建服务账号

用户名: linux96 
密  码: Linux96@2025
邮  箱: linux96@dingzhiyan.com

6.2 获取secret的资源清单

[root@master231 volumes]# kubectl create secret docker-registry linux96-harbor --docker-username=linux96 --docker-password=Linux96@2025 --docker-email=linux96@dingzhiyan.com --docker-server=harbor250.dingzhiyan.com --dry-run=client -o yaml
apiVersion: v1
data:
  .dockerconfigjson: eyJhdXRocyI6eyJoYXJib3IyNTAub2xkYm95ZWR1LmNvbSI6eyJ1c2VybmFtZSI6ImxpbnV4OTYiLCJwYXNzd29yZCI6IkxpbnV4OTZAMjAyNSIsImVtYWlsIjoibGludXg5NkBvbGRib3llZHUuY29tIiwiYXV0aCI6ImJHbHVkWGc1TmpwTWFXNTFlRGsyUURJd01qVT0ifX19
kind: Secret
metadata:
  creationTimestamp: null
  name: linux96-harbor
type: kubernetes.io/dockerconfigjson
[root@master231 volumes]# 

6.3 编写资源清单

[root@master231 volumes]# cat 15-deploy-sa-harbor.yaml 
apiVersion: v1
kind: Secret
metadata:
  name: linux96-harbor
type: kubernetes.io/dockerconfigjson
data:
  .dockerconfigjson: eyJhdXRocyI6eyJoYXJib3IyNTAub2xkYm95ZWR1LmNvbSI6eyJ1c2VybmFtZSI6ImxpbnV4OTYiLCJwYXNzd29yZCI6IkxpbnV4OTZAMjAyNSIsImVtYWlsIjoibGludXg5NkBvbGRib3llZHUuY29tIiwiYXV0aCI6ImJHbHVkWGc1TmpwTWFXNTFlRGsyUURJd01qVT0ifX19

---

apiVersion: v1
kind: ServiceAccount
metadata:
  name: linux96-sa
imagePullSecrets:
- name: linux96-harbor


---

apiVersion: apps/v1
kind: Deployment
metadata:
  name: deploy-xiuxian-harbor-sa
spec:
  replicas: 3
  selector:
    matchLabels:
      apps: xiuxian
  template:
    metadata:
      labels:
        apps: xiuxian
    spec:
      #imagePullSecrets:
      #- name: admin-harbor
      serviceAccountName: linux96-sa
      containers:
      - name: c1
        image: harbor250.dingzhiyan.com/dingzhiyan-xiuxian/apps:v1
        imagePullPolicy: Always

6.4 测试验证

[root@master231 volumes]# kubectl apply -f  15-deploy-sa-harbor.yaml 
secret/linux96-harbor created
serviceaccount/linux96-sa created
deployment.apps/deploy-xiuxian-harbor-sa created
[root@master231 volumes]# 
[root@master231 volumes]# kubectl get pods -o wide
NAME                                        READY   STATUS    RESTARTS   AGE   IP               NODE        NOMINATED NODE   READINESS GATES
deploy-xiuxian-harbor-sa-866c568f77-gs6zk   1/1     Running   0          4s    10.100.140.82    worker233   <none>           <none>
deploy-xiuxian-harbor-sa-866c568f77-mv8zc   1/1     Running   0          4s    10.100.140.80    worker233   <none>           <none>
deploy-xiuxian-harbor-sa-866c568f77-xhglx   1/1     Running   0          4s    10.100.203.151   worker232   <none>           <none>
[root@master231 volumes]# 

十二、存储卷之DownwardAPI实战【扩展】

1.DownwardAPI

与ConfigMap和Secret不同,DownwardAPI自身并非一种独立的API资源类型。

DownwardAPI只是一种将Pod的metadata、spec或status中的字段值注入到其内部Container里的方式。

DownwardAPI提供了两种方式用于将POD的信息注入到容器内部
	- 环境变量:
		用于单个变量,可以将POD信息和容器信息直接注入容器内部.
	- Volume挂载:
		将 POD 信息生成为文件,直接挂载到容器内部中去

2.环境变量方式使用DownwardAPI

2.1 参数说明

fieldRef有效值:
	- metadata.name
	- metadata.namespace,
	- `metadata.labels['<KEY>']`
	- `metadata.annotations['<KEY>']`
	- spec.nodeName
	- spec.serviceAccountName
	- status.hostIP
	- status.podIP
	- status.podIPs
	 
resourceFieldRef有效值:
	- limits.cpu
	- limits.memory
	- limits.ephemeral-storage
	- requests.cpu
	- requests.memory
	- requests.ephemeral-storage

2.2 实战案例

[root@master231 volumes]# cat 16-deploy-DownwardAPI-env.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: deploy-downwardapi-env
spec:
  replicas: 3
  selector:
    matchLabels:
      apps: xiuxian
  template:
    metadata:
      labels:
        apps: xiuxian
    spec:
      containers:
      - name: c1
        image: harbor250.dingzhiyan.com/dingzhiyan-xiuxian/apps:v1
        resources:
          requests:
            cpu: 0.2
            memory: 200Mi
          limits:
            cpu: 0.5
            memory: 500Mi
        imagePullPolicy: Always
        env:
        - name: dingzhiyan-PODNAME
          valueFrom:
            fieldRef:
              fieldPath: metadata.name
        - name: dingzhiyan-IP
          valueFrom:
            fieldRef:
              fieldPath: status.podIP
        - name: dingzhiyan-REQUESTS
          valueFrom:
            resourceFieldRef:
              resource: requests.cpu
        - name: dingzhiyan-LIMITS
          valueFrom:
            resourceFieldRef:
              resource: limits.memory
[root@master231 volumes]# 
[root@master231 volumes]# kubectl apply -f  16-deploy-DownwardAPI-env.yaml
deployment.apps/deploy-downwardapi-env created
[root@master231 volumes]# 
[root@master231 volumes]# kubectl get pods -o wide
NAME                                      READY   STATUS    RESTARTS   AGE   IP               NODE        NOMINATED NODE   READINESS GATES
deploy-downwardapi-env-77f7dcdb98-bh8m9   1/1     Running   0          5s    10.100.140.90    worker233   <none>           <none>
deploy-downwardapi-env-77f7dcdb98-gd46j   1/1     Running   0          5s    10.100.140.86    worker233   <none>           <none>
deploy-downwardapi-env-77f7dcdb98-wvd4p   1/1     Running   0          5s    10.100.203.135   worker232   <none>           <none>
[root@master231 volumes]# 
[root@master231 volumes]# kubectl exec deploy-downwardapi-env-77f7dcdb98-bh8m9 -- env | grep dingzhiyan
dingzhiyan-PODNAME=deploy-downwardapi-env-77f7dcdb98-bh8m9
dingzhiyan-IP=10.100.140.90
dingzhiyan-REQUESTS=1
dingzhiyan-LIMITS=524288000
[root@master231 volumes]# 

2.3 验证测试

[root@master231 volumes]# kubectl apply -f 14-deploy-valueFrom.yaml
deployment.apps/deploy-valuefrom created
[root@master231 volumes]# 
[root@master231 volumes]# kubectl get pods -o wide
NAME                                   READY   STATUS    RESTARTS   AGE   IP             NODE        NOMINATED NODE   READINESS GATES
deploy-secret-harbor-dfdbdfbb5-2dzb8   1/1     Running   0          28m   10.100.1.137   worker232   <none>           <none>
deploy-secret-harbor-dfdbdfbb5-5fnpl   1/1     Running   0          28m   10.100.1.136   worker232   <none>           <none>
deploy-secret-harbor-dfdbdfbb5-6mp68   1/1     Running   0          28m   10.100.2.181   worker233   <none>           <none>
deploy-valuefrom-7f48549b-5v4rn        1/1     Running   0          3s    10.100.2.182   worker233   <none>           <none>
deploy-valuefrom-7f48549b-lkctz        1/1     Running   0          3s    10.100.1.138   worker232   <none>           <none>
deploy-valuefrom-7f48549b-wkf7s        1/1     Running   0          3s    10.100.1.139   worker232   <none>           <none>
[root@master231 volumes]# kubectl exec  deploy-valuefrom-7f48549b-5v4rn -- env
...
dingzhiyan-LIMITS=524288000
dingzhiyan-PODNAME=deploy-valuefrom-7f48549b-5v4rn
dingzhiyan-IP=10.100.2.182
# 很明显,对于requests字段并没有抓到0.2,而是"向上取整"。
dingzhiyan-REQUESTS=1
...

3.存储卷方式使用DownwardAPI

[root@master231 volumes]# cat 17-deploy-DownwardAPI-volume.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: downwardapi-volume
spec:
  replicas: 1
  selector:
    matchLabels:
      apps: v1
  template:
    metadata:
      labels:
        apps: v1
    spec:
      volumes:
      - name: data01
        # 指定存储卷类型
        downwardAPI:
          # 指定键值对
          items:
          - path: pod-name
            # 仅支持: annotations, labels, name and namespace。
            fieldRef:
              fieldPath: "metadata.name"
      - name: data02
        downwardAPI:
          items:
          - path: pod-ns
            fieldRef:
              fieldPath: "metadata.namespace"
      - name: data03
        downwardAPI:
          items:
          - path: containers-limists-memory
            # 仅支持: limits.cpu, limits.memory, requests.cpu and requests.memory
            resourceFieldRef:
              containerName: c1
              resource: "limits.memory"
      containers:
      - name: c1
        image: registry.cn-hangzhou.aliyuncs.com/yinzhengjie-k8s/apps:v1
        resources:
          requests:
            cpu: 0.2
            memory: 300Mi
          limits:
            cpu: 0.5
            memory: 500Mi
        volumeMounts:
        - name: data01
          mountPath: /yinzhengjie-xixi
        - name: data02
          mountPath: /yinzhengjie-haha
        - name: data03
          mountPath: /yinzhengjie-hehe
      - name: c2
        image: registry.cn-hangzhou.aliyuncs.com/yinzhengjie-k8s/apps:v2
        volumeMounts:
        - name: data03
          mountPath: /yinzhengjie-hehe
        command:
        - tail
        args:
        - -f
        - /etc/hosts
        resources:
          limits:
            cpu: 1.5
            memory: 1.5Gi
[root@master231 volumes]# 
[root@master231 volumes]# kubectl apply -f  17-deploy-DownwardAPI-volume.yaml
deployment.apps/downwardapi-volume created
[root@master231 volumes]# 
[root@master231 volumes]# 
[root@master231 volumes]# kubectl get po -o wide
NAME                                      READY   STATUS    RESTARTS   AGE    IP               NODE        NOMINATED NODE   READINESS GATES
...
downwardapi-volume-7d978684fc-8tkd2       2/2     Running   0          8s     10.100.203.150   worker232   <none>           <none>
[root@master231 volumes]# 
[root@master231 volumes]# kubectl exec -it downwardapi-volume-7d978684fc-8tkd2 -c c1 -- sh
/ # 
/ # cat /yinzhengjie-xixi/pod-name;echo
downwardapi-volume-7d978684fc-8tkd2
/ # 
/ # cat /yinzhengjie-haha/pod-ns ;echo
default
/ #                           containers-limists-memory
/ # cat /yinzhengjie-hehe/containers-limists-memory ;echo 
524288000
/ # 
/ # 
[root@master231 volumes]# 
[root@master231 volumes]# kubectl exec -it downwardapi-volume-7d978684fc-8tkd2 -c c2 -- sh
/ # 
/ # cat /yinzhengjie-hehe/containers-limists-memory ;echo
524288000

十三、初始化容器

1.什么是初始化容器

​ 所谓的初始化容器,顾名思义,就是为业务容器做一些初始化的相关工作。

​ 当所有的初始化容器执行完毕后,才会启动业务容器。说白了,业务容器和初始化容器不能同时存在。

2.初始化容器和业务容器的启动顺序

[root@master231 case-demo-initContainers]# cat 01-initContainers.yaml 
apiVersion: apps/v1
kind: Deployment
metadata:
  name: deploy-initcontainers
spec:
  replicas: 3
  selector:
    matchLabels:
      apps: xiuxian
  template:
    metadata:
      labels:
        apps: xiuxian
    spec:
      volumes:
      - emptyDir: {}
        name: data
      initContainers:
      - name: i1
        image: registry.cn-hangzhou.aliyuncs.com/yinzhengjie-k8s/apps:v1
        command: ["sleep","10"]
      - name: i2
        image: registry.cn-hangzhou.aliyuncs.com/yinzhengjie-k8s/apps:v1
        command: ["sleep","20"]
      containers:
      - name: c1
        image: registry.cn-hangzhou.aliyuncs.com/yinzhengjie-k8s/apps:v1
        volumeMounts:
        - name: data
          mountPath: /usr/share/nginx/html

3.实战案例

3.1 案例需求说明

​ 要求你部署一个nginx服务,且nginx的首页文件内容是POD名称和对应的Pod ID地址。你该如何操作呢?

​ 需求分析:

  • 1.使用downwardAPI获取Pod名称和IP地址,将IP地址传递给初始化容器;
  • 2.初始化容器将业务容器的首页文件初始化完成将数据基于存储卷传递给业务容器;
  • 3.启动业务容器

3.2 编写资源清单

[root@master231 case-demo-initContainers]# cat 02-initContainers-downwardAPI.yaml 
apiVersion: apps/v1
kind: Deployment
metadata:
  name: deploy-initcontainers-env
spec:
  replicas: 3
  selector:
    matchLabels:
      apps: xiuxian
  template:
    metadata:
      labels:
        apps: xiuxian
    spec:
      volumes:
      - emptyDir: {}
        name: data
      initContainers:
      - name: i1
        image: registry.cn-hangzhou.aliyuncs.com/yinzhengjie-k8s/apps:v1
        env:
        - name: PODNAME
          valueFrom:
            fieldRef:
              fieldPath: metadata.name
        - name: PODIP
          valueFrom:
            fieldRef:
              fieldPath: status.podIP
        volumeMounts:
        - name: data
          mountPath: /dingzhiyan
        command: 
        - /bin/sh
        - -c
        - 'echo "<h1>$PODNAME: $PODIP</h1>"  > /dingzhiyan/index.html'
      containers:
      - name: c1
        image: registry.cn-hangzhou.aliyuncs.com/yinzhengjie-k8s/apps:v1
        volumeMounts:
        - name: data
          mountPath: /usr/share/nginx/html
[root@master231 case-demo-initContainers]# 
[root@master231 case-demo-initContainers]# kubectl apply -f 02-initContainers-downwardAPI.yaml 
deployment.apps/deploy-initcontainers-env created
[root@master231 case-demo-initContainers]# 
[root@master231 case-demo-initContainers]# kubectl get pods -o wide
NAME                                        READY   STATUS    RESTARTS   AGE   IP               NODE        NOMINATED NODE   READINESS GATES
deploy-initcontainers-env-9886495b6-2jw9k   1/1     Running   0          6s    10.100.203.162   worker232   <none>           <none>
deploy-initcontainers-env-9886495b6-9fqhp   1/1     Running   0          6s    10.100.140.95    worker233   <none>           <none>
deploy-initcontainers-env-9886495b6-pslgk   1/1     Running   0          6s    10.100.140.87    worker233   <none>           <none>
[root@master231 case-demo-initContainers]# 
[root@master231 case-demo-initContainers]# curl 10.100.203.162 
<h1>deploy-initcontainers-env-9886495b6-2jw9k: 10.100.203.162</h1>
[root@master231 case-demo-initContainers]# 
[root@master231 case-demo-initContainers]# 
[root@master231 case-demo-initContainers]# curl 10.100.140.95
<h1>deploy-initcontainers-env-9886495b6-9fqhp: 10.100.140.95</h1>
[root@master231 case-demo-initContainers]# 
[root@master231 case-demo-initContainers]# curl 10.100.140.87 
<h1>deploy-initcontainers-env-9886495b6-pslgk: 10.100.140.87</h1>

十四、存储卷进阶之PV、PVC、SC

1.作用

- pv
	pv用于和后端存储对接的资源,关联后端存储。

- sc 
	sc可以动态创建pv的资源,关联后端存储。
	
- pvc 
	可以向pv或者sc进行资源请求,获取特定的存储。
	
	pod只需要在存储卷声明使用哪个pvc即可。

2.实战案例: 手动创建pv和pvc及pod引用

2.1 手动创建pv

2.1.1 创建工作目录

[root@master231 ~]# mkdir -pv /dingzhiyan/data/nfs-server/pv/linux/pv00{1,2,3}
mkdir: created directory '/dingzhiyan/data/nfs-server/pv'
mkdir: created directory '/dingzhiyan/data/nfs-server/pv/linux'
mkdir: created directory '/dingzhiyan/data/nfs-server/pv/linux/pv001'
mkdir: created directory '/dingzhiyan/data/nfs-server/pv/linux/pv002'
mkdir: created directory '/dingzhiyan/data/nfs-server/pv/linux/pv003'
[root@master231 ~]# 
[root@master231 ~]# tree /dingzhiyan/data/nfs-server/pv/linux/
/dingzhiyan/data/nfs-server/pv/linux/
├── pv001
├── pv002
└── pv003

3 directories, 0 files
[root@master231 ~]# 

2.1.2 编写资源清单

[root@master231 persistentvolumes]# cat  manual-pv.yaml 
apiVersion: v1
kind: PersistentVolume
metadata:
  name: dingzhiyan-linux-pv01
  labels:
    school: dingzhiyan
spec:
   # 声明PV的访问模式,常用的有"ReadWriteOnce","ReadOnlyMany"和"ReadWriteMany":
   #   ReadWriteOnce:(简称:"RWO")
   #      只允许单个worker节点读写存储卷,但是该节点的多个Pod是可以同时访问该存储卷的。
   #   ReadOnlyMany:(简称:"ROX")
   #      允许多个worker节点进行只读存储卷。
   #   ReadWriteMany:(简称:"RWX")
   #      允许多个worker节点进行读写存储卷。
   #   ReadWriteOncePod:(简称:"RWOP")
   #       该卷可以通过单个Pod以读写方式装入。
   #       如果您想确保整个集群中只有一个pod可以读取或写入PVC,请使用ReadWriteOncePod访问模式。
   #       这仅适用于CSI卷和Kubernetes版本1.22+。
   accessModes:
   - ReadWriteMany
   # 声明存储卷的类型为nfs
   nfs:
     path: /dingzhiyan/data/nfs-server/pv/linux/pv001
     server: 10.0.0.231
   # 指定存储卷的回收策略,常用的有"Retain"和"Delete"
   #    Retain:
   #       "保留回收"策略允许手动回收资源。
   #       删除PersistentVolumeClaim时,PersistentVolume仍然存在,并且该卷被视为"已释放"。
   #       在管理员手动回收资源之前,使用该策略其他Pod将无法直接使用。
   #    Delete:
   #       对于支持删除回收策略的卷插件,k8s将删除pv及其对应的数据卷数据。
   #    Recycle:
   #       对于"回收利用"策略官方已弃用。相反,推荐的方法是使用动态资源调配。
   #       如果基础卷插件支持,回收回收策略将对卷执行基本清理(rm -rf /thevolume/*),并使其再次可用于新的声明。
   persistentVolumeReclaimPolicy: Retain
   # 声明存储的容量
   capacity:
     storage: 2Gi

---

apiVersion: v1
kind: PersistentVolume
metadata:
  name: dingzhiyan-linux-pv02
  labels:
    school: dingzhiyan
spec:
   accessModes:
   - ReadWriteMany
   nfs:
     path: /dingzhiyan/data/nfs-server/pv/linux/pv002
     server: 10.0.0.231
   persistentVolumeReclaimPolicy: Retain
   capacity:
     storage: 5Gi

---

apiVersion: v1
kind: PersistentVolume
metadata:
  name: dingzhiyan-linux-pv03
  labels:
    school: dingzhiyan
spec:
   accessModes:
   - ReadWriteMany
   nfs:
     path: /dingzhiyan/data/nfs-server/pv/linux/pv003
     server: 10.0.0.231
   persistentVolumeReclaimPolicy: Retain
   capacity:
     storage: 10Gi
[root@master231 persistentvolumes]# 

2.1.3 创建pv

[root@master231 persistentvolumes]# kubectl apply -f manual-pv.yaml 
persistentvolume/dingzhiyan-linux-pv01 created
persistentvolume/dingzhiyan-linux-pv02 created
persistentvolume/dingzhiyan-linux-pv03 created
[root@master231 persistentvolumes]# 
[root@master231 persistentvolumes]# kubectl get pv
NAME                   CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS      CLAIM   STORAGECLASS   REASON   AGE
dingzhiyan-linux-pv01   2Gi        RWX            Retain           Available                                   8s
dingzhiyan-linux-pv02   5Gi        RWX            Retain           Available                                   8s
dingzhiyan-linux-pv03   10Gi       RWX            Retain           Available                                   8s
相关资源说明:
		NAME : 
			pv的名称
		CAPACITY : 
			pv的容量
		ACCESS MODES: 
			pv的访问模式
		RECLAIM POLICY:
			pv的回收策略。
		STATUS :
			pv的状态。
		CLAIM:
			pv被哪个pvc使用。
		STORAGECLASS  
			sc的名称。
		REASON   
			pv出错时的原因。
		AGE
			创建的时间。

2.2 手动创建pvc

[root@master231 persistentvolumeclaims]# cat manual-pvc.yaml 
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: dingzhiyan-linux-pvc
spec:
  # 声明要是用的pv
  # volumeName: dingzhiyan-linux-pv03
  # 声明资源的访问模式
  accessModes:
  - ReadWriteMany
  # 声明资源的使用量
  resources:
    limits:
       storage: 4Gi
    requests:
       storage: 3Gi
[root@master231 persistentvolumeclaims]# 
[root@master231 persistentvolumeclaims]# kubectl apply -f  manual-pvc.yaml 
persistentvolumeclaim/dingzhiyan-linux-pvc created
[root@master231 persistentvolumeclaims]# 
[root@master231 persistentvolumeclaims]# kubectl get pv,pvc
NAME                                    CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS      CLAIM                         STORAGECLASS   REASON   AGE
persistentvolume/dingzhiyan-linux-pv01   2Gi        RWX            Retain           Available                                                         3m39s
persistentvolume/dingzhiyan-linux-pv02   5Gi        RWX            Retain           Bound       default/dingzhiyan-linux-pvc                           3m39s
persistentvolume/dingzhiyan-linux-pv03   10Gi       RWX            Retain           Available                                                         3m39s

NAME                                        STATUS   VOLUME                 CAPACITY   ACCESS MODES   STORAGECLASS   AGE
persistentvolumeclaim/dingzhiyan-linux-pvc   Bound    dingzhiyan-linux-pv02   5Gi        RWX                           6s
[root@master231 persistentvolumeclaims]# 

2.3 Pod引用pvc

[root@master231 volumes]# cat 17-deploy-pvc.yaml 
apiVersion: apps/v1
kind: Deployment
metadata:
  name: deploy-pvc-demo
spec:
  replicas: 1
  selector:
    matchLabels:
      apps: v1
  template:
    metadata:
      labels:
        apps: v1
    spec:
      volumes:
      - name: data
        # 声明存储卷的类型是pvc
        persistentVolumeClaim:
          # 声明pvc的名称
          claimName: dingzhiyan-linux-pvc
      - name: dt
        hostPath:
         path: /etc/localtime
      initContainers:
      - name: init01
        image: registry.cn-hangzhou.aliyuncs.com/yinzhengjie-k8s/apps:v1
        volumeMounts:
        - name: data
          mountPath: /dingzhiyan
        - name: dt
          mountPath: /etc/localtime
        command:
        - /bin/sh
        - -c
        - date -R > /dingzhiyan/index.html ; echo www.dingzhiyan.com >> /dingzhiyan/index.html
      containers:
      - name: c1
        image: registry.cn-hangzhou.aliyuncs.com/yinzhengjie-k8s/apps:v1
        volumeMounts:
        - name: data
          mountPath: /usr/share/nginx/html
        - name: dt
          mountPath: /etc/localtime
[root@master231 volumes]# 
[root@master231 volumes]# 
[root@master231 volumes]# kubectl apply -f  17-deploy-pvc.yaml 
deployment.apps/deploy-pvc-demo created
[root@master231 volumes]# 
[root@master231 volumes]# kubectl get pods -o wide
NAME                              READY   STATUS    RESTARTS   AGE   IP             NODE        NOMINATED NODE   READINESS GATES
deploy-pvc-demo-688b57bdd-dlkzd   1/1     Running   0          3s    10.100.1.142   worker232   <none>           <none>
[root@master231 volumes]# 
[root@master231 volumes]# curl  10.100.203.163 
Fri, 18 Apr 2025 09:38:12 +0800
www.dingzhiyan.com
[root@master231 volumes]# 

2.4 基于Pod找到后端的pv

2.4.1 找到pvc的名称

[root@master231 volumes]# kubectl describe pod deploy-pvc-demo-688b57bdd-dlkzd 
Name:         deploy-pvc-demo-688b57bdd-dlkzd
Namespace:    default
...
Volumes:
  data:
    Type:       PersistentVolumeClaim (a reference to a PersistentVolumeClaim in the same namespace)
    ClaimName:  dingzhiyan-linux-pvc
...
	

2.4.2 基于pvc找到与之关联的pv

[root@master231 volumes]# kubectl get pvc dingzhiyan-linux-pvc 
NAME                  STATUS   VOLUME                 CAPACITY   ACCESS MODES   STORAGECLASS   AGE
dingzhiyan-linux-pvc   Bound    dingzhiyan-linux-pv02   5Gi        RWX                           12m
[root@master231 volumes]# 
[root@master231 volumes]# kubectl get pv dingzhiyan-linux-pv02
NAME                   CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS   CLAIM                         STORAGECLASS   REASON   AGE
dingzhiyan-linux-pv02   5Gi        RWX            Retain           Bound    default/dingzhiyan-linux-pvc                           15m
[root@master231 volumes]# 

2.4.3 查看pv的详细信息

[root@master231 volumes]# kubectl describe pv dingzhiyan-linux-pv02
Name:            dingzhiyan-linux-pv02
Labels:          school=dingzhiyan
...
Source:
    Type:      NFS (an NFS mount that lasts the lifetime of a pod)
    Server:    10.0.0.231
    Path:      /dingzhiyan/data/nfs-server/pv/linux/pv002
    ReadOnly:  false
...

2.4.4 验证数据的内容

[root@master231 volumes]# ll /dingzhiyan/data/nfs-server/pv/linux/pv002
total 12
drwxr-xr-x 2 root root 4096 Feb 14 16:46 ./
drwxr-xr-x 5 root root 4096 Feb 14 16:36 ../
-rw-r--r-- 1 root root   68 Feb 14 16:49 index.html
[root@master231 volumes]# 
[root@master231 volumes]# cat /dingzhiyan/data/nfs-server/pv/linux/pv002/index.html 
Fri, 18 Apr 2025 09:38:12 +0800
www.dingzhiyan.com
[root@master231 volumes]# 

3.基于nfs4.9.0版本实现动态存储类实战案例

推荐阅读:
https://github.com/kubernetes-csi/csi-driver-nfs/blob/master/docs/install-csi-driver-v4.9.0.md

https://kubernetes.io/docs/concepts/storage/storage-classes/#nfs

3.1 克隆代码

[root@master231 nfs]# git clone https://github.com/kubernetes-csi/csi-driver-nfs.git

3.2 安装nfs动态存储类

[root@master231 ~]# cd csi-driver-nfs-4.9.0/
[root@master231 csi-driver-nfs-4.9.0]# 
[root@master231 csi-driver-nfs-4.9.0]# ./deploy/install-driver.sh v4.9.0 local
use local deploy
Installing NFS CSI driver, version: v4.9.0 ...
serviceaccount/csi-nfs-controller-sa created
serviceaccount/csi-nfs-node-sa created
clusterrole.rbac.authorization.k8s.io/nfs-external-provisioner-role created
clusterrolebinding.rbac.authorization.k8s.io/nfs-csi-provisioner-binding created
csidriver.storage.k8s.io/nfs.csi.k8s.io created
deployment.apps/csi-nfs-controller created
daemonset.apps/csi-nfs-node created
NFS CSI driver installed successfully.
[root@master231 csi-driver-nfs-4.9.0]# 

3.3 验证是否安装成功

镜像拉取需要代理

[root@master231 csi-driver-nfs]# kubectl -n kube-system get pod -o wide -l app
NAME                                 READY   STATUS    RESTARTS   AGE   IP           NODE        NOMINATED NODE   READINESS GATES
csi-nfs-controller-5c5c695fb-6psv8   4/4     Running   0          4s    10.0.0.232   worker232   <none>           <none>
csi-nfs-node-bsmr7                   3/3     Running   0          3s    10.0.0.232   worker232   <none>           <none>
csi-nfs-node-ghtvt                   3/3     Running   0          3s    10.0.0.231   master231   <none>           <none>
csi-nfs-node-s4dm5                   3/3     Running   0          3s    10.0.0.233   worker233   <none>           <none>

3.4 创建存储类

[root@master231 csi-driver-nfs]# mkdir /dingzhiyan/data/nfs-server/sc/
[root@master231 csi-driver-nfs]#
[root@master231 csi-driver-nfs]# cat deploy/v4.9.0/storageclass.yaml 
...
parameters:
  server: 10.0.0.231
  share: /dingzhiyan/data/nfs-server/sc
  ...
[root@master231 csi-driver-nfs]# 
[root@master231 csi-driver-nfs]# kubectl apply -f deploy/v4.9.0/storageclass.yaml 
storageclass.storage.k8s.io/nfs-csi created
[root@master231 csi-driver-nfs]# 
[root@master231 csi-driver-nfs]# kubectl get sc
NAME      PROVISIONER      RECLAIMPOLICY   VOLUMEBINDINGMODE   ALLOWVOLUMEEXPANSION   AGE
nfs-csi   nfs.csi.k8s.io   Delete          Immediate           false                  3s
[root@master231 csi-driver-nfs]# 

3.5 删除pv和pvc保证环境"干净"

3.6 创建pvc测试

[root@master231 volumes]# cat 22-pvc-sc.yaml 
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: dingzhiyan-linux-pvc-sc
spec:
  # 声明要是用的pv
  # volumeName: dingzhiyan-linux-pv03
  # 声明使用的存储类
  storageClassName: nfs-csi
  # 声明资源的访问模式
  accessModes:
  - ReadWriteMany
  # 声明资源的使用量
  resources:
    limits:
       storage: 2Mi
    requests:
       storage: 1Mi
       
[root@master231 volumes]# kubectl apply -f 22-pvc-sc.yaml
persistentvolumeclaim/dingzhiyan-linux-pvc-sc created

[root@master231 volumes]# kubectl get pvc,pv -o wide
NAME                                           STATUS   VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS   AGE   VOLUMEMODE
persistentvolumeclaim/dingzhiyan-linux-pvc-sc   Bound    pvc-c23ad1b2-3f0b-4f53-bc51-9dbb20e6c037   1Mi        RWX            nfs-csi        6s    Filesystem

NAME                                                        CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS   CLAIM                            STORAGECLASS   REASON   AGE   VOLUMEMODE
persistentvolume/pvc-c23ad1b2-3f0b-4f53-bc51-9dbb20e6c037   1Mi        RWX            Delete           Bound    default/dingzhiyan-linux-pvc-sc   nfs-csi                 6s    Filesystem

3.7 pod引用pvc

[root@master231 volumes]# cat 21-deploy-pvc.yaml 
apiVersion: apps/v1
kind: Deployment
metadata:
  name: deploy-pvc-demo
spec:
  replicas: 1
  selector:
    matchLabels:
      apps: v1
  template:
    metadata:
      labels:
        apps: v1
    spec:
      volumes:
      - name: data
        # 声明存储卷的类型是pvc
        persistentVolumeClaim:
          # 声明pvc的名称
          # claimName: dingzhiyan-linux-pvc
          claimName: dingzhiyan-linux-pvc-sc
      - name: dt
        hostPath:
         path: /etc/localtime
      initContainers:
      - name: init01
        image: registry.cn-hangzhou.aliyuncs.com/yinzhengjie-k8s/apps:v1
        volumeMounts:
        - name: data
          mountPath: /dingzhiyan
        - name: dt
          mountPath: /etc/localtime
        command:
        - /bin/sh
        - -c
        - date -R > /dingzhiyan/index.html ; echo www.dingzhiyan.com >> /dingzhiyan/index.html
      containers:
      - name: c1
        image: registry.cn-hangzhou.aliyuncs.com/yinzhengjie-k8s/apps:v1
        volumeMounts:
        - name: data
          mountPath: /usr/share/nginx/html
        - name: dt
          mountPath: /etc/localtime
[root@master231 volumes]# 
[root@master231 volumes]# kubectl apply -f 21-deploy-pvc.yaml
deployment.apps/deploy-pvc-demo created
[root@master231 volumes]# 
[root@master231 volumes]# kubectl get pods -o wide
NAME                               READY   STATUS    RESTARTS   AGE   IP              NODE        NOMINATED NODE   READINESS GATES
deploy-pvc-demo-65d4b9bf97-st2ch   1/1     Running   0          3s    10.100.140.99   worker233   <none>           <none>
[root@master231 volumes]# 
[root@master231 volumes]# curl 10.100.140.99 
Fri, 18 Apr 2025 10:31:32 +0800
www.dingzhiyan.com

3.8 验证pod的后端存储数据

[root@master231 volumes]# kubectl describe pod deploy-pvc-demo-688b57bdd-td2z7   | grep ClaimName
    ClaimName:  dingzhiyan-linux-pvc
[root@master231 volumes]# 
[root@master231 volumes]# kubectl get pvc dingzhiyan-linux-pvc 
NAME                  STATUS   VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS   AGE
dingzhiyan-linux-pvc   Bound    pvc-b425481e-3b15-4854-bf34-801a29edfcc5   3Gi        RWX            nfs-csi        2m29s
[root@master231 volumes]# 
[root@master231 volumes]# kubectl describe pv  pvc-b425481e-3b15-4854-bf34-801a29edfcc5 | grep Source -A  5
Source:
    Type:              CSI (a Container Storage Interface (CSI) volume source)
    Driver:            nfs.csi.k8s.io
    FSType:            
    VolumeHandle:      10.0.0.231#dingzhiyan/data/nfs-server/sc#pvc-c23ad1b2-3f0b-4f53-bc51-9dbb20e6c037##
    ReadOnly:          false
[root@master231 volumes]# 
[root@master231 volumes]# ll /dingzhiyan/data/nfs-server/sc/pvc-c23ad1b2-3f0b-4f53-bc51-9dbb20e6c037/
total 12
drwxr-xr-x 2 root root 4096 Apr 18 10:31 ./
drwxr-xr-x 3 root root 4096 Apr 18 10:30 ../
-rw-r--r-- 1 root root   50 Apr 18 10:31 index.html
[root@master231 volumes]# 
[root@master231 volumes]# cat /dingzhiyan/data/nfs-server/sc/pvc-c23ad1b2-3f0b-4f53-bc51-9dbb20e6c037/index.html 
Fri, 18 Apr 2025 10:31:32 +0800
www.dingzhiyan.com
[root@master231 volumes]# 

4.K8S配置默认的存储类及多个存储类定义

'{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}'

  • 在 Kubernetes 中,当用户创建持久卷声明(PersistentVolumeClaim,PVC)时,如果没有指定存储类,Kubernetes 会自动使用默认的存储类来动态创建持久卷(PersistentVolume,PV)。
  • 如果没有默认存储类,用户在创建 PVC 时必须显式指定存储类,否则会报错。

4.1 响应式配置默认存储类

[root@master231 nfs]# kubectl get sc
NAME      PROVISIONER      RECLAIMPOLICY   VOLUMEBINDINGMODE   ALLOWVOLUMEEXPANSION   AGE
nfs-csi   nfs.csi.k8s.io   Delete          Immediate           false                  165m
[root@master231 nfs]# 
[root@master231 nfs]# kubectl patch sc nfs-csi -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}'
storageclass.storage.k8s.io/nfs-csi patched
[root@master231 nfs]# 
[root@master231 nfs]# kubectl get sc
NAME                PROVISIONER      RECLAIMPOLICY   VOLUMEBINDINGMODE   ALLOWVOLUMEEXPANSION   AGE
nfs-csi (default)   nfs.csi.k8s.io   Delete          Immediate           false                  166m

4.2 响应式取消默认存储类

[root@master231 nfs]# kubectl get sc
NAME                PROVISIONER      RECLAIMPOLICY   VOLUMEBINDINGMODE   ALLOWVOLUMEEXPANSION   AGE
nfs-csi (default)   nfs.csi.k8s.io   Delete          Immediate           false                  168m
[root@master231 nfs]# 
[root@master231 nfs]# kubectl patch sc nfs-csi -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"false"}}}'
storageclass.storage.k8s.io/nfs-csi patched
[root@master231 nfs]# 
[root@master231 nfs]# kubectl get sc
NAME      PROVISIONER      RECLAIMPOLICY   VOLUMEBINDINGMODE   ALLOWVOLUMEEXPANSION   AGE
nfs-csi   nfs.csi.k8s.io   Delete          Immediate           false                  168m

4.3 声明式配置多个存储类

​ 这段代码定义了两个 NFS 类型的存储类,分别用于不同的 NFS 共享目录。它们的配置几乎相同,但 dingzhiyan-sc-haha 被设置为默认存储类,这意味着如果没有指定存储类,Kubernetes 会自动使用这个存储类来创建存储卷。

[root@master231 volumes]# cat 23-sc-multiple.yaml
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: dingzhiyan-sc-xixi
  annotations:
    storageclass.kubernetes.io/is-default-class: "false"
provisioner: nfs.csi.k8s.io
parameters:
  server: 10.0.0.231
  share: /dingzhiyan/data/nfs-server/sc-xixi
reclaimPolicy: Delete
volumeBindingMode: Immediate
mountOptions:
  - nfsvers=4.1


---

apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: dingzhiyan-sc-haha
  annotations:
    storageclass.kubernetes.io/is-default-class: "true"
provisioner: nfs.csi.k8s.io
parameters:
  server: 10.0.0.231
  share: /dingzhiyan/data/nfs-server/sc-haha
reclaimPolicy: Delete
volumeBindingMode: Immediate
mountOptions:
  - nfsvers=4.1
  
  
[root@master231 volumes]# kubectl apply -f  23-sc-multiple.yaml
storageclass.storage.k8s.io/dingzhiyan-sc-xixi created
storageclass.storage.k8s.io/dingzhiyan-sc-haha created
[root@master231 volumes]# 
[root@master231 volumes]# kubectl get sc
NAME                          PROVISIONER      RECLAIMPOLICY   VOLUMEBINDINGMODE   ALLOWVOLUMEEXPANSION   AGE
nfs-csi                       nfs.csi.k8s.io   Delete          Immediate           false                  26m
dingzhiyan-sc-haha (default)   nfs.csi.k8s.io   Delete          Immediate           false                  2s
dingzhiyan-sc-xixi             nfs.csi.k8s.io   Delete          Immediate           false                  2s

4.4 准备目录

[root@master231 storageclasses]# mkdir -pv /dingzhiyan/data/nfs-server/sc-{xixi,haha}

4.5 pod引用pvc

[root@master231 volumes]# cat 24-pvc-po.yaml 
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: pvc-xiuxian
spec:
  accessModes:
  - ReadWriteMany
  resources:
    limits:
       storage: 4Gi
    requests:
       storage: 3Gi

---

apiVersion: apps/v1
kind: Deployment
metadata:
  name: deploy-pvc-demo
spec:
  replicas: 1
  selector:
    matchLabels:
      apps: v1
  template:
    metadata:
      labels:
        apps: v1
    spec:
      volumes:
      - name: data
        persistentVolumeClaim:
          claimName: pvc-xiuxian
      - name: dt
        hostPath:
         path: /etc/localtime
      initContainers:
      - name: init01
        image: registry.cn-hangzhou.aliyuncs.com/yinzhengjie-k8s/apps:v1
        volumeMounts:
        - name: data
          mountPath: /dingzhiyan
        - name: dt
          mountPath: /etc/localtime
        command:
        - /bin/sh
        - -c
        - date -R > /dingzhiyan/index.html ; echo www.dingzhiyan.com >> /dingzhiyan/index.html
      containers:
      - name: c1
        image: registry.cn-hangzhou.aliyuncs.com/yinzhengjie-k8s/apps:v1
        volumeMounts:
        - name: data
          mountPath: /usr/share/nginx/html
        - name: dt
          mountPath: /etc/localtime

[root@master231 volumes]# 
[root@master231 volumes]# kubectl apply -f 24-pvc-po.yaml
persistentvolumeclaim/pvc-xiuxian created
deployment.apps/deploy-pvc-demo configured
[root@master231 volumes]# 
[root@master231 volumes]# kubectl get pods -o wide
NAME                               READY   STATUS    RESTARTS   AGE   IP               NODE        NOMINATED NODE   READINESS GATES
deploy-pvc-demo-587bcdb966-4qrvc   1/1     Running   0          5s    10.100.203.161   worker232   <none>           <none>
[root@master231 volumes]# 
[root@master231 volumes]# curl 10.100.203.161 
Fri, 18 Apr 2025 10:57:01 +0800
www.dingzhiyan.com

4.6 验证后端存储

4.6.1 编写一个快捷查询脚本

[root@master231 ~]# cat /opt/get-pv.sh
#!/bin/bash

POD_NAME=$1
PVC_NAME=`kubectl describe pod $POD_NAME | grep ClaimName | awk '{print $2}'`
PV_NAME=`kubectl get pvc ${PVC_NAME} | awk 'NR==2{print $3}'`
kubectl describe pv $PV_NAME  | grep Source -A 5
[root@master231 ~]# 
[root@master231 ~]# chmod +x /opt/get-pv.sh
[root@master231 ~]# 

4.6.2 使用脚本快速查询验证

[root@master231 ~]# kubectl get pods
NAME                               READY   STATUS    RESTARTS   AGE
deploy-pvc-demo-587bcdb966-4qrvc   1/1     Running   0          5m57s
[root@master231 ~]# 
[root@master231 ~]# /opt/get-pv.sh deploy-pvc-demo-587bcdb966-4qrvc 
Source:
    Type:              CSI (a Container Storage Interface (CSI) volume source)
    Driver:            nfs.csi.k8s.io
    FSType:            
    VolumeHandle:      10.0.0.231#dingzhiyan/data/nfs-server/sc-haha#pvc-9994c204-3cc7-41c1-be95-008a3117b13d##
    ReadOnly:          false
[root@master231 ~]# 
[root@master231 ~]# 
[root@master231 ~]# ll /dingzhiyan/data/nfs-server/sc-haha/pvc-9994c204-3cc7-41c1-be95-008a3117b13d/
total 12
drwxr-xr-x 2 root root 4096 Apr 18 10:57 ./
drwxr-xr-x 3 root root 4096 Apr 18 10:56 ../
-rw-r--r-- 1 root root   50 Apr 18 10:57 index.html
[root@master231 ~]# 
[root@master231 ~]# cat  /dingzhiyan/data/nfs-server/sc-haha/pvc-9994c204-3cc7-41c1-be95-008a3117b13d/index.html 
Fri, 18 Apr 2025 10:57:01 +0800
www.dingzhiyan.com
[root@master231 ~]# 

5.K8S对接ceph集群的驱动实战

参考链接:
https://kubernetes.io/zh-cn/docs/concepts/storage/storage-classes/#ceph-rbd
https://github.com/ceph/ceph-csi/tree/release-v3.7
https://github.com/ceph/ceph-csi/blob/release-v3.7/examples/csi-config-map-sample.yaml

5.1 实验前建议先导入镜像

for i in `ls -1` ;do  docker load -i $i;done

5.2 下载资源清单

[root@master231 case-demo-ceph]#  wget http://192.168.16.253/Resources/Kubernetes/sc/rbd/code/dingzhiyan-ceph-rbd-sc.tar.gz

[root@master231 case-demo-ceph]# tar xf dingzhiyan-ceph-rbd-sc.tar.gz 
[root@master231 case-demo-ceph]# 
[root@master231 case-demo-ceph]# ll
total 44
drwxr-xr-x 3 root root 4096 Apr 18 12:12 ./
drwxr-xr-x 8 root root 4096 Apr 18 12:12 ../
-rw-r--r-- 1 root root  402 Dec  9 14:57 ceph-config-map.yaml
-rw-r--r-- 1 root root  392 Dec  9 14:58 csi-config-map.yaml
-rw-r--r-- 1 root root  358 Dec  9 14:58 csi-kms-config-map.yaml
-rw-r--r-- 1 root root  370 Dec  9 14:58 csi-rbd-secret.yaml
drwxr-xr-x 3 root root 4096 Dec  9 14:50 deploy/
-rw-r--r-- 1 root root 5011 Dec  9 15:26 dingzhiyan-ceph-rbd-sc.tar.gz
-rw-r--r-- 1 root root  384 Dec  9 14:59 pvc.yaml
-rw-r--r-- 1 root root  837 Dec  9 15:14 storageclass.yaml
[root@master231 case-demo-ceph]# 
[root@master231 case-demo-ceph]# ll deploy/rbd/kubernetes/
total 44
drwxr-xr-x 2 root root 4096 Dec  9 14:50 ./
drwxr-xr-x 3 root root 4096 Dec  9 14:50 ../
-rw-r--r-- 1 root root  309 Dec  9 14:50 csi-config-map.yaml
-rw-r--r-- 1 root root  457 Dec  9 14:50 csidriver.yaml
-rw-r--r-- 1 root root 1193 Dec  9 14:50 csi-nodeplugin-rbac.yaml
-rw-r--r-- 1 root root 3347 Dec  9 14:50 csi-provisioner-rbac.yaml
-rw-r--r-- 1 root root 8490 Dec  9 14:50 csi-rbdplugin-provisioner.yaml
-rw-r--r-- 1 root root 7364 Dec  9 14:50 csi-rbdplugin.yaml
[root@master231 case-demo-ceph]# 

5.3 查看ceph集群的相关配置

[root@ceph141 ~]# cat /etc/ceph/ceph.conf
# minimal ceph.conf for 11e66474-0e02-11f0-82d6-4dcae3d59070
[global]
	fsid = 11e66474-0e02-11f0-82d6-4dcae3d59070
	mon_host = [v2:10.0.0.141:3300/0,v1:10.0.0.141:6789/0] [v2:10.0.0.142:3300/0,v1:10.0.0.142:6789/0] [v2:10.0.0.143:3300/0,v1:10.0.0.143:6789/0]
[root@ceph141 ~]# 
[root@ceph141 ~]# ceph -s
  cluster:
    id:     11e66474-0e02-11f0-82d6-4dcae3d59070
    health: HEALTH_OK
 
  services:
    mon: 3 daemons, quorum ceph141,ceph142,ceph143 (age 2d)
    mgr: ceph141.mbakds(active, since 2d), standbys: ceph142.qgifwo
    mds: 1/1 daemons up, 1 standby
    osd: 9 osds: 9 up (since 2d), 9 in (since 2w)
 
  data:
    volumes: 1/1 healthy
    pools:   4 pools, 305 pgs
    objects: 3.61k objects, 888 MiB
    usage:   4.5 GiB used, 5.3 TiB / 5.3 TiB avail
    pgs:     305 active+clean
 
[root@ceph141 ~]# 
[root@ceph141 ~]# 
[root@ceph141 ~]# cat /etc/ceph/ceph.client.admin.keyring 
[client.admin]
	key = AQAkRepnl8QHDhAAajK/aMH1KaCoVJWt5H2NOQ==
	caps mds = "allow *"
	caps mgr = "allow *"
	caps mon = "allow *"
	caps osd = "allow *"

5.4 K8S集群根据自己的ceph集群状态修改相应的配置文件

[root@master231 case-demo-ceph]# cat ceph-config-map.yaml 
apiVersion: v1
kind: ConfigMap
metadata:
  name: ceph-config
data:
  # ceph集群的配置文件"/etc/ceph/ceph.conf"
  ceph.conf: |
    [global]
    	fsid = 11e66474-0e02-11f0-82d6-4dcae3d59070
    	mon_host = [v2:10.0.0.141:3300/0,v1:10.0.0.141:6789/0] [v2:10.0.0.142:3300/0,v1:10.0.0.142:6789/0] [v2:10.0.0.143:3300/0,v1:10.0.0.143:6789/0]

  # 要求存在keyring这个key,值为空
  keyring: |
[root@master231 case-demo-ceph]# 
[root@master231 case-demo-ceph]# cat csi-config-map.yaml 
apiVersion: v1
kind: ConfigMap
metadata:
  name: ceph-csi-config
data:
  # clusterID可以通过"ceph -s"获取集群ID
  # monitors为你自己的ceph集群mon地址
  config.json: |-
    [
      {
        "clusterID": "11e66474-0e02-11f0-82d6-4dcae3d59070",
        "monitors": [
          "10.0.0.141:6789",
          "10.0.0.142:6789",
          "10.0.0.143:6789"
        ]
      }
    ]
[root@master231 case-demo-ceph]# 
[root@master231 case-demo-ceph]# cat csi-rbd-secret.yaml 
apiVersion: v1
kind: Secret
metadata:
  name: csi-rbd-secret
  namespace: default
# 对于stringData和Data字段有所不同,无需进行base64编码,说白了就是原样输出。
stringData:
  # 指定用户名是admin
  userID: admin
  # 指定admin用户的key,当然,你也可以自定义普通用户。
  userKey: AQAkRepnl8QHDhAAajK/aMH1KaCoVJWt5H2NOQ==

5.5 创建配置信息

[root@master231 case-demo-ceph]# kubectl apply -f ceph-config-map.yaml -f csi-config-map.yaml -f csi-kms-config-map.yaml -f csi-rbd-secret.yaml 
configmap/ceph-config created
configmap/ceph-csi-config created
configmap/ceph-csi-encryption-kms-config created
secret/csi-rbd-secret created

5.6 打ceph驱动,并查看csidrivers

[root@master231 case-demo-ceph]# kubectl apply -f  deploy/rbd/kubernetes/
configmap/ceph-csi-config configured
serviceaccount/rbd-csi-nodeplugin created
clusterrole.rbac.authorization.k8s.io/rbd-csi-nodeplugin created
clusterrolebinding.rbac.authorization.k8s.io/rbd-csi-nodeplugin created
serviceaccount/rbd-csi-provisioner created
clusterrole.rbac.authorization.k8s.io/rbd-external-provisioner-runner created
clusterrolebinding.rbac.authorization.k8s.io/rbd-csi-provisioner-role created
role.rbac.authorization.k8s.io/rbd-external-provisioner-cfg created
rolebinding.rbac.authorization.k8s.io/rbd-csi-provisioner-role-cfg created
service/csi-rbdplugin-provisioner created
deployment.apps/csi-rbdplugin-provisioner created
daemonset.apps/csi-rbdplugin created
service/csi-metrics-rbdplugin created
csidriver.storage.k8s.io/rbd.csi.ceph.com created
[root@master231 case-demo-ceph]# 
[root@master231 case-demo-ceph]# kubectl get pods -o wide
NAME                                         READY   STATUS    RESTARTS   AGE   IP               NODE        NOMINATED NODE   READINESS GATES
csi-rbdplugin-lcd9x                          3/3     Running   0          15s   10.0.0.232       worker232   <none>           <none>
csi-rbdplugin-msbpj                          3/3     Running   0          14s   10.0.0.233       worker233   <none>           <none>
csi-rbdplugin-provisioner-5dfcf67885-n6ld6   7/7     Running   0          15s   10.100.140.98    worker233   <none>           <none>
csi-rbdplugin-provisioner-5dfcf67885-qqwnq   7/7     Running   0          15s   10.100.203.160   worker232   <none>           <none>
[root@master231 case-demo-ceph]# 
[root@master231 case-demo-ceph]# kubectl get csidrivers
NAME               ATTACHREQUIRED   PODINFOONMOUNT   STORAGECAPACITY   TOKENREQUESTS   REQUIRESREPUBLISH   MODES        AGE
csi.tigera.io      true             true             false             <unset>         false               Ephemeral    11d
nfs.csi.k8s.io     false            false            false             <unset>         false               Persistent   123m
rbd.csi.ceph.com   true             false            false             <unset>         false               Persistent   6m56s
[root@master231 case-demo-ceph]# 
[root@master231 case-demo-ceph]# kubectl get csidrivers rbd.csi.ceph.com 
NAME               ATTACHREQUIRED   PODINFOONMOUNT   STORAGECAPACITY   TOKENREQUESTS   REQUIRESREPUBLISH   MODES        AGE
rbd.csi.ceph.com   true             false            false             <unset>         false               Persistent   7m13s
[root@master231 case-demo-ceph]# 

6.k8s对接ceph的RBD动态存储类实战

参考链接:
https://github.com/ceph/ceph-csi/blob/release-v3.7/examples/rbd/storageclass.yaml
https://github.com/ceph/ceph-csi/blob/release-v3.7/examples/README.md

6.1 添加rdb的存储类

[root@ceph141 ~]# ceph osd pool create ceph-sc-rbd
pool 'ceph-sc-rbd' created
[root@ceph141 ~]# 
[root@ceph141 ~]# rbd pool init ceph-sc-rbd
[root@ceph141 ~]# 
[root@ceph141 ~]# rbd ls ceph-sc-rbd
[root@ceph141 ~]# 

6.2 修改k8s的rbd存储类

[root@master231 case-demo-ceph]# cat storageclass.yaml 
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
   name: csi-rbd-sc
provisioner: rbd.csi.ceph.com
parameters:
   # 指定集群的ID地址
   clusterID: "11e66474-0e02-11f0-82d6-4dcae3d59070"
   # 指定存储池
   pool: ceph-sc-rbd
   # 镜像的特性
   imageFeatures: layering
   csi.storage.k8s.io/provisioner-secret-name: csi-rbd-secret
   csi.storage.k8s.io/provisioner-secret-namespace: default
   csi.storage.k8s.io/controller-expand-secret-name: csi-rbd-secret
   csi.storage.k8s.io/controller-expand-secret-namespace: default
   csi.storage.k8s.io/node-stage-secret-name: csi-rbd-secret
   csi.storage.k8s.io/node-stage-secret-namespace: default
   # 指定文件系统的类型
   csi.storage.k8s.io/fstype: xfs
# 定义回收策略
reclaimPolicy: Delete
allowVolumeExpansion: true
mountOptions:
   - discard
[root@master231 case-demo-ceph]# 
[root@master231 case-demo-ceph]# 
[root@master231 case-demo-ceph]# kubectl apply -f  storageclass.yaml 
storageclass.storage.k8s.io/csi-rbd-sc created
[root@master231 case-demo-ceph]# 
[root@master231 case-demo-ceph]# 
[root@master231 case-demo-ceph]# kubectl get sc 
NAME                          PROVISIONER        RECLAIMPOLICY   VOLUMEBINDINGMODE   ALLOWVOLUMEEXPANSION   AGE
csi-rbd-sc                    rbd.csi.ceph.com   Delete          Immediate           true                   5s
nfs-csi                       nfs.csi.k8s.io     Delete          Immediate           false                  122m
dingzhiyan-sc-haha (default)   nfs.csi.k8s.io     Delete          Immediate           false                  95m
dingzhiyan-sc-xixi             nfs.csi.k8s.io     Delete          Immediate           false                  95m
[root@master231 case-demo-ceph]# 
[root@master231 case-demo-ceph]# 
[root@master231 case-demo-ceph]# kubectl get sc csi-rbd-sc 
NAME         PROVISIONER        RECLAIMPOLICY   VOLUMEBINDINGMODE   ALLOWVOLUMEEXPANSION   AGE
csi-rbd-sc   rbd.csi.ceph.com   Delete          Immediate           true                   13s
[root@master231 case-demo-ceph]# 

6.3 创建pvc

[root@master231 case-demo-ceph]#  cat pvc.yaml 
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: rbd-pvc01
spec:
  accessModes:
    - ReadWriteOnce
  resources:
    requests:
      storage: 2Gi
  storageClassName: csi-rbd-sc

---

apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: rbd-pvc02
spec:
  accessModes:
    - ReadWriteOnce
  resources:
    requests:
      storage: 4Gi
  storageClassName: csi-rbd-sc
[root@master231 case-demo-ceph]# 
[root@master231 case-demo-ceph]# kubectl apply -f pvc.yaml
persistentvolumeclaim/rbd-pvc01 created
persistentvolumeclaim/rbd-pvc02 created
[root@master231 case-demo-ceph]# 
[root@master231 case-demo-ceph]# kubectl get pvc
NAME                     STATUS   VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS   AGE
rbd-pvc01                Bound    pvc-8ce0ac3c-586e-4722-ba00-2c9f463fa2e9   2Gi        RWO            csi-rbd-sc     11m
rbd-pvc02                Bound    pvc-98945661-f66c-4d55-b482-25d6e610c22c   4Gi        RWO            csi-rbd-sc     11m

6.4 验证pv对应的后端数据

[root@master231 case-demo-ceph]# kubectl describe pv pvc-8ce0ac3c-586e-4722-ba00-2c9f463fa2e9 | grep Source -A 5
Source:
    Type:              CSI (a Container Storage Interface (CSI) volume source)
    Driver:            rbd.csi.ceph.com
    FSType:            xfs
    VolumeHandle:      0001-0024-11e66474-0e02-11f0-82d6-4dcae3d59070-0000000000000006-c5e8dd5e-1c1d-11f0-813b-e255ba6ad5b4
    ReadOnly:          false
[root@master231 case-demo-ceph]# 
[root@master231 case-demo-ceph]# kubectl describe pv pvc-98945661-f66c-4d55-b482-25d6e610c22c | grep Source -A 5
Source:
    Type:              CSI (a Container Storage Interface (CSI) volume source)
    Driver:            rbd.csi.ceph.com
    FSType:            xfs
    VolumeHandle:      0001-0024-11e66474-0e02-11f0-82d6-4dcae3d59070-0000000000000006-c5e8cdbd-1c1d-11f0-813b-e255ba6ad5b4
    ReadOnly:          false
[root@master231 case-demo-ceph]# 

6.5 rbd验证数据是否存在

[root@ceph141 ~]# rbd ls -l ceph-sc-rbd
NAME                                          SIZE   PARENT  FMT  PROT  LOCK
csi-vol-c5e8cdbd-1c1d-11f0-813b-e255ba6ad5b4  4 GiB            2            
csi-vol-c5e8dd5e-1c1d-11f0-813b-e255ba6ad5b4  2 GiB            2  

7.安装rbd的驱动到指定名称空间

7.1 修改配置

7.2 先卸载驱动

[root@master231 case-demo-ceph]# kubectl apply -f deploy/rbd/kubernetes/

7.3 先安装配置文件

[root@master231 case-demo-ceph]# kubectl apply -f ceph-config-map.yaml -f csi-config-map.yaml -f csi-kms-config-map.yaml -f csi-rbd-secret.yaml 

7.4 安装驱动【驱动依赖配置文件】

[root@master231 case-demo-ceph]# ll
total 44
drwxr-xr-x 3 root root 4096 Apr 18 14:50 ./
drwxr-xr-x 8 root root 4096 Apr 18 12:12 ../
-rw-r--r-- 1 root root  427 Apr 18 14:46 ceph-config-map.yaml
-rw-r--r-- 1 root root  417 Apr 18 14:47 csi-config-map.yaml
-rw-r--r-- 1 root root  383 Apr 18 14:47 csi-kms-config-map.yaml
-rw-r--r-- 1 root root  374 Apr 18 14:48 csi-rbd-secret.yaml
drwxr-xr-x 3 root root 4096 Dec  9 14:50 deploy/
-rw-r--r-- 1 root root 4965 Apr 18 14:50 dingzhiyan-ceph-rbd-sc-custom-ns-kube-public.tar.gz
-rw-r--r-- 1 root root  384 Dec  9 14:59 pvc.yaml
-rw-r--r-- 1 root root  720 Apr 18 14:50 storageclass.yaml
[root@master231 case-demo-ceph]# 
[root@master231 case-demo-ceph]# 
[root@master231 case-demo-ceph]# ll deploy/rbd/kubernetes/
total 44
drwxr-xr-x 2 root root 4096 Apr 18 14:46 ./
drwxr-xr-x 3 root root 4096 Dec  9 14:50 ../
-rw-r--r-- 1 root root  334 Apr 18 14:42 csi-config-map.yaml
-rw-r--r-- 1 root root  482 Apr 18 14:43 csidriver.yaml
-rw-r--r-- 1 root root 1201 Apr 18 14:43 csi-nodeplugin-rbac.yaml
-rw-r--r-- 1 root root 3187 Apr 18 14:44 csi-provisioner-rbac.yaml
-rw-r--r-- 1 root root 8410 Apr 18 14:45 csi-rbdplugin-provisioner.yaml
-rw-r--r-- 1 root root 7328 Apr 18 14:46 csi-rbdplugin.yaml
[root@master231 case-demo-ceph]# 
[root@master231 case-demo-ceph]# kubectl apply -f deploy/rbd/kubernetes/
configmap/ceph-csi-config created
serviceaccount/rbd-csi-nodeplugin created
clusterrole.rbac.authorization.k8s.io/rbd-csi-nodeplugin created
clusterrolebinding.rbac.authorization.k8s.io/rbd-csi-nodeplugin created
serviceaccount/rbd-csi-provisioner created
clusterrole.rbac.authorization.k8s.io/rbd-external-provisioner-runner created
clusterrolebinding.rbac.authorization.k8s.io/rbd-csi-provisioner-role created
role.rbac.authorization.k8s.io/rbd-external-provisioner-cfg created
rolebinding.rbac.authorization.k8s.io/rbd-csi-provisioner-role-cfg created
service/csi-rbdplugin-provisioner created
deployment.apps/csi-rbdplugin-provisioner created
daemonset.apps/csi-rbdplugin created
service/csi-metrics-rbdplugin created
csidriver.storage.k8s.io/rbd.csi.ceph.com created
[root@master231 case-demo-ceph]# 
[root@master231 case-demo-ceph]# kubectl get csidrivers rbd.csi.ceph.com 
NAME               ATTACHREQUIRED   PODINFOONMOUNT   STORAGECAPACITY   TOKENREQUESTS   REQUIRESREPUBLISH   MODES        AGE
rbd.csi.ceph.com   true             false            false             <unset>         false               Persistent   101s
[root@master231 case-demo-ceph]#
[root@master231 case-demo-ceph]# kubectl get pods -n kube-public  -o wide
NAME                                         READY   STATUS    RESTARTS   AGE   IP               NODE        NOMINATED NODE   READINESS GATES
csi-rbdplugin-5k5lv                          3/3     Running   0          25s   10.0.0.232       worker232   <none>           <none>
csi-rbdplugin-627df                          3/3     Running   0          25s   10.0.0.233       worker233   <none>           <none>
csi-rbdplugin-provisioner-5dfcf67885-gt2vx   7/7     Running   0          67s   10.100.203.166   worker232   <none>           <none>
csi-rbdplugin-provisioner-5dfcf67885-mpv4p   7/7     Running   0          67s   10.100.140.102   worker233   <none>           <none>

7.5 安装存储类

[root@master231 case-demo-ceph]# kubectl apply -f storageclass.yaml 
storageclass.storage.k8s.io/csi-rbd-sc created

7.6 创建pvc测试验证

[root@master231 case-demo-ceph]# cat pvc.yaml 
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: rbd-sc-pvc01
  labels:
    class: linux96
spec:
  accessModes:
    - ReadWriteOnce
  resources:
    requests:
      storage: 10Mi
  storageClassName: csi-rbd-sc

---

apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: rbd-sc-pvc02
  labels:
    class: linux97
spec:
  accessModes:
    - ReadWriteOnce
  resources:
    requests:
      storage: 20Mi
  storageClassName: csi-rbd-sc
[root@master231 case-demo-ceph]# 
[root@master231 case-demo-ceph]# 
[root@master231 case-demo-ceph]# kubectl apply -f  pvc.yaml 
persistentvolumeclaim/rbd-sc-pvc01 created
persistentvolumeclaim/rbd-sc-pvc02 created
[root@master231 case-demo-ceph]# 
[root@master231 case-demo-ceph]# kubectl get pvc -l class -o wide
NAME           STATUS   VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS   AGE   VOLUMEMODE
rbd-sc-pvc01   Bound    pvc-8d0539e3-4cf3-4aba-b727-818e74d50e4b   10Mi       RWO            csi-rbd-sc     8s    Filesystem
rbd-sc-pvc02   Bound    pvc-2ab14c5d-04bb-44af-9859-30f3f7788538   20Mi       RWO            csi-rbd-sc     8s    Filesystem
[root@master231 case-demo-ceph]# 
[root@master231 case-demo-ceph]# kubectl describe pv  pvc-8d0539e3-4cf3-4aba-b727-818e74d50e4b  | grep Source -A 5
Source:
    Type:              CSI (a Container Storage Interface (CSI) volume source)
    Driver:            rbd.csi.ceph.com
    FSType:            xfs
    VolumeHandle:      0001-0024-11e66474-0e02-11f0-82d6-4dcae3d59070-0000000000000006-8f1cf7b4-1c22-11f0-acc6-4a8411b1fe30
    ReadOnly:          false
[root@master231 case-demo-ceph]# 
[root@master231 case-demo-ceph]# 
[root@master231 case-demo-ceph]# kubectl describe pv  pvc-2ab14c5d-04bb-44af-9859-30f3f7788538  | grep Source -A 5
Source:
    Type:              CSI (a Container Storage Interface (CSI) volume source)
    Driver:            rbd.csi.ceph.com
    FSType:            xfs
    VolumeHandle:      0001-0024-11e66474-0e02-11f0-82d6-4dcae3d59070-0000000000000006-8f1f02d8-1c22-11f0-acc6-4a8411b1fe30
    ReadOnly:          false

7.7 在ceph集群的rbd中测试验证

[root@ceph141 ~]# rbd ls -l ceph-sc-rbd
NAME                                          SIZE    PARENT  FMT  PROT  LOCK
csi-vol-8f1cf7b4-1c22-11f0-acc6-4a8411b1fe30  10 MiB            2            
csi-vol-8f1f02d8-1c22-11f0-acc6-4a8411b1fe30  20 MiB            2            
csi-vol-c5e8cdbd-1c1d-11f0-813b-e255ba6ad5b4   4 GiB            2            
csi-vol-c5e8dd5e-1c1d-11f0-813b-e255ba6ad5b4   2 GiB            2            
[root@ceph141 ~]#
posted @ 2025-05-20 19:56  丁志岩  阅读(23)  评论(0)    收藏  举报