实现kubernetes基于ceph块存储和cephfs的数据持久化

  ceph对接k8s使用案例

  k8s节点安装 ceph-common

  分别在 k8s master 与各 node 节点安装 ceph-common 组件包。

  下载 ceph仓库 key 文件

root@master1:~/yaml# wget -q -O- 'https://download.ceph.com/keys/release.asc' | apt-key add -
root@node1:~# wget -q -O- 'https://download.ceph.com/keys/release.asc' | apt-key add -
root@node2:~# wget -q -O- 'https://download.ceph.com/keys/release.asc' | apt-key add -

 

  验证k8s节点系统版本

root@master1:~# lsb_release -a
No LSB modules are available.
Distributor ID:	Ubuntu
Description:	Ubuntu 20.04.3 LTS
Release:	20.04
Codename:	focal

 

  各 master 与 node 节点配置 apt 源

root@node1:~# vim /etc/apt/sources.list
deb https://mirrors.tuna.tsinghua.edu.cn/ceph/debian-pacific/ focal main
# deb-src https://mirrors.tuna.tsinghua.edu.cn/ceph/debian-pacific/ focal main

#更新仓库
root@node1:~# apt update

 

  验证 ceph 版本

root@master1:~# apt-cache madison ceph-common
ceph-common | 16.2.10-1focal | https://mirrors.tuna.tsinghua.edu.cn/ceph/debian-pacific focal/main amd64 Packages
ceph-common | 15.2.17-0ubuntu0.20.04.1 | http://mirrors.tuna.tsinghua.edu.cn/ubuntu focal-updates/main amd64 Packages
ceph-common | 15.2.12-0ubuntu0.20.04.1 | http://mirrors.tuna.tsinghua.edu.cn/ubuntu focal-security/main amd64 Packages
ceph-common | 15.2.1-0ubuntu1 | http://mirrors.tuna.tsinghua.edu.cn/ubuntu focal/main amd64 Packages

 

  各节点安装和当前 ceph 集群相同版本的 ceph-common

root@master1:~# apt install ceph-common=16.2.10-1focal
root@master1:~# ceph -v
ceph version 16.2.10 (45fa1a083152e41a408d15505f594ec5f1b4fe17) pacific (stable)

 

  k8s 节点配置主机名解析

  在 ceph.conf 配置文件中包含 ceph 主机的主机名,因此需要在 k8s 各 master 及 node 配置配置主机名解析:

root@master1:~# cat /etc/hosts
127.0.0.1 localhost
127.0.1.1 es1

# The following lines are desirable for IPv6 capable hosts
::1     ip6-localhost ip6-loopback
fe00::0 ip6-localnet
ff00::0 ip6-mcastprefix
ff02::1 ip6-allnodes
ff02::2 ip6-allrouters
192.168.100.15 harbor.cncf.net
192.168.100.16    easzlab.io.local
192.168.100.40 ceph-deploy.example.local ceph-deploy
192.168.100.31 ceph-node1.example.local ceph-node1
192.168.100.32 ceph-node2.example.local ceph-node2
192.168.100.33 ceph-node3.example.local ceph-node3
192.168.100.34 ceph-node4.example.local ceph-node4
192.168.100.41 ceph-node5.example.local ceph-node5
192.168.100.35 ceph-mon1.example.local ceph-mon1
192.168.100.36 ceph-mon2.example.local ceph-mon2
192.168.100.37 ceph-mon3.example.local ceph-mon3
192.168.100.38 ceph-mgr1.example.local ceph-mgr1
192.168.100.39 ceph-mgr2.example.local ceph-mgr2

 

  rbd 结合 k8s 提供存储卷及动态存储卷使用案例

  让 k8s 中的 pod 可以访问 ceph 中 rbd 提供的镜像作为存储设备,需要在 ceph 创建 rbd、并且让 k8s node 节点能够通过 ceph 的认证。

  k8s 在使用 ceph 作为动态存储卷的时候,需要 kube-controller-manager 组件能够访问ceph,因此需要在包括 k8s master 及 node 节点在内的每一个 node 同步认证文件。

  注意:rbd 的 镜像只允许同时一个服务实例进行挂载使用。

 

  创建和管理rbd

  创建初始化rbd

root@ceph-mon1:~# ceph osd pool create rbd-pool 32 32
pool 'rbd-pool' created

#验证存储池
root@ceph-mon1:~# ceph osd pool ls
device_health_metrics
.rgw.root
default.rgw.log
default.rgw.control
default.rgw.meta
rbd-pool

 

  存储池启用 rbd

root@ceph-mon1:~# ceph osd pool application enable rbd-pool rbd
enabled application 'rbd' on pool 'rbd-pool'

 

  初始化rbd存储池

root@ceph-mon1:~# rbd pool init -p rbd-pool

 

  创建 image

  创建镜像

root@ceph-mon1:~# rbd create img1 --size 3G --pool rbd-pool --image-format 2 --image-feature layering

 

  验证镜像

root@ceph-mon1:~# rbd ls --pool rbd-pool
img1

 

  验证镜像信息

root@ceph-mon1:~# rbd --image img1 --pool rbd-pool info
rbd image 'img1':
	size 3 GiB in 768 objects
	order 22 (4 MiB objects)
	snapshot_count: 0
	id: f7a5246dc9401
	block_name_prefix: rbd_data.f7a5246dc9401
	format: 2
	features: layering
	op_features: 
	flags: 
	create_timestamp: Tue Jan  3 15:15:18 2023
	access_timestamp: Tue Jan  3 15:15:18 2023
	modify_timestamp: Tue Jan  3 15:15:18 2023

 

  创建 ceph 用户与授权

  指定 rbd-pool 存储池创建普通用户

cephadmin@ceph-deploy:~/ceph-cluster$ ceph auth get-or-create client.lxh mon 'allow r' osd 'allow * pool=rbd-pool'
[client.lxh]
	key = AQCnFLljFWClJhAAd4KL0bCunpP8wMY04vc41Q==

 

  验证用户权限

cephadmin@ceph-deploy:~/ceph-cluster$ ceph auth get client.lxh
[client.lxh]
	key = AQCnFLljFWClJhAAd4KL0bCunpP8wMY04vc41Q==
	caps mon = "allow r"
	caps osd = "allow * pool=rbd-pool"
exported keyring for client.lxh

 

  导出用户信息至 keyring 文件

cephadmin@ceph-deploy:~/ceph-cluster$ ceph auth get client.lxh -o ceph.client.lxh.keyring

cephadmin@ceph-deploy:~/ceph-cluster$ cat ceph.client.lxh.keyring 
[client.lxh]
	key = AQCnFLljFWClJhAAd4KL0bCunpP8wMY04vc41Q==
	caps mon = "allow r"
	caps osd = "allow * pool=rbd-pool"

 

  同步认证文件到 k8s 各 master 及 node 节点

cephadmin@ceph-deploy:~/ceph-cluster$ scp ceph.conf ceph.client.lxh.keyring root@192.168.100.3:/etc/ceph
cephadmin@ceph-deploy:~/ceph-cluster$ scp ceph.conf ceph.client.lxh.keyring root@192.168.100.4:/etc/ceph
cephadmin@ceph-deploy:~/ceph-cluster$ scp ceph.conf ceph.client.lxh.keyring root@192.168.100.5:/etc/ceph

 

  在 k8s node 节点验证用户权限

root@master1:~# ceph --user lxh -s
root@node1:~# ceph --user lxh -s

 

 

  验证k8s节点对镜像访问权限

root@node1:~# rbd --id lxh ls --pool=rbd-pool
img1

 

  通过 keyring 文件挂载 rbd

  基于 ceph 提供的 rbd 实现存储卷的动态提供,由两种实现方式,一是通过宿主机的 keyring 文件挂载 rbd,另外一个是通过将 keyring 中 key 定义为 k8s 中的 secret,然后 pod 通过 secret 挂载 rbd。

  创建namespace

root@master1:~/yaml# kubectl create ns ceph

 

  1、创建 pod 引用ceph rbd yaml

root@master1:~/yaml# cat pod-ceph-rbd.yaml 
apiVersion: v1
kind: Pod
metadata:
  name: busybox
  namespace: ceph   #指定 namespace
spec:
  containers:
  - image: busybox 
    command:
      - sleep
      - "3600"
    imagePullPolicy: IfNotPresent 
    name: busybox
    volumeMounts:
    - name: rbd-data1     #指定volues名称
      mountPath: /data    #pod 挂载路径
  volumes:
    - name: rbd-data1	  #定义volumes名称
      rbd:
        monitors:          #指定ceph mon ip
        - '192.168.100.35:6789'
        - '192.168.100.36:6789'
        - '192.168.100.37:6789'
        pool: rbd-pool    #指定ceph的rbd 存储池
        image: img1		  #指定rbd存储池创建的镜像
        fsType: xfs		  #指定将镜像挂载后格式化的文件系统类型
        readOnly: false	  #指定文件系统非只读
        user: lxh		  #指定ceph 认证的用户名
        keyring: /etc/ceph/ceph.client.lxh.keyring   #指定ceph 创建的普通用户的keyring文件
        
        
root@master1:~/yaml\# kubectl apply -f pod-ceph-rbd.yaml 

 

  验证pod挂载使用rbd

root@master1:~/yaml# kubectl config set-context context-cluster1 --namespace ceph

root@master1:~/yaml# kubectl get pods
NAME      READY   STATUS    RESTARTS   AGE
busybox   1/1     Running   0          48s

root@master1:~/yaml# kubectl exec -it busybox -- df -h
Filesystem                Size      Used Available Use% Mounted on
overlay                  49.2G     12.1G     37.1G  25% /
tmpfs                    64.0M         0     64.0M   0% /dev
tmpfs                     1.9G         0      1.9G   0% /sys/fs/cgroup
/dev/rbd0                 3.0G     53.9M      2.9G   2% /data
/dev/sda3                49.2G     12.1G     37.1G  25% /etc/hosts
/dev/sda3                49.2G     12.1G     37.1G  25% /dev/termination-log
/dev/sda3                49.2G     12.1G     37.1G  25% /etc/hostname
/dev/sda3                49.2G     12.1G     37.1G  25% /etc/resolv.conf
shm                      64.0M         0     64.0M   0% /dev/shm
tmpfs                     3.5G     12.0K      3.5G   0% /var/run/secrets/kubernetes.io/serviceaccount
tmpfs                     1.9G         0      1.9G   0% /proc/acpi
tmpfs                    64.0M         0     64.0M   0% /proc/kcore
tmpfs                    64.0M         0     64.0M   0% /proc/keys
tmpfs                    64.0M         0     64.0M   0% /proc/timer_list
tmpfs                    64.0M         0     64.0M   0% /proc/sched_debug
tmpfs                     1.9G         0      1.9G   0% /proc/scsi
tmpfs                     1.9G         0      1.9G   0% /sys/firmware

 

   测试卷的数据写入

root@master1:~/yaml# kubectl exec -it busybox -- cp -r /etc /data
root@master1:~/yaml# kubectl exec -it busybox -- ls -l /data
total 0
drwxr-xr-x    3 root     root           148 Jan  7 09:13 etc

 

  验证 k8s node的rbd挂载使用

root@master1:~/yaml# kubectl get pods -o wide
NAME      READY   STATUS    RESTARTS   AGE   IP               NODE            NOMINATED NODE   READINESS GATES
busybox   1/1     Running   0          23m   10.200.166.164   192.168.100.4   <none>           <none>

 

  查看节点对rbd卷的挂载

root@node1:~# mount |grep rbd
/dev/rbd0 on /var/lib/kubelet/plugins/kubernetes.io/rbd/mounts/rbd-pool-image-img1 type xfs (rw,relatime,attr2,inode64,logbufs=8,logbsize=64k,sunit=128,swidth=128,noquota)
/dev/rbd0 on /var/lib/kubelet/pods/d2fb7d63-3e9d-4105-b5c4-d234e287ca5b/volumes/kubernetes.io~rbd/rbd-data1 type xfs (rw,relatime,attr2,inode64,logbufs=8,logbsize=64k,sunit=128,swidth=128,noquota)

root@node1:~# lsblk
NAME   MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT
loop0    7:0    0 55.6M  1 loop /snap/core18/2566
loop1    7:1    0 55.6M  1 loop /snap/core18/2667
loop2    7:2    0 67.8M  1 loop /snap/lxd/22753
loop3    7:3    0 49.6M  1 loop /snap/snapd/17883
loop4    7:4    0 63.3M  1 loop /snap/core20/1778
loop5    7:5    0 63.2M  1 loop /snap/core20/1623
loop6    7:6    0 91.9M  1 loop /snap/lxd/24061
sda      8:0    0   50G  0 disk 
©À©¤sda1   8:1    0    1M  0 part 
©À©¤sda2   8:2    0  800M  0 part /boot
©¸©¤sda3   8:3    0 49.2G  0 part /
sr0     11:0    1  1.2G  0 rom  
rbd0   252:0    0    3G  0 disk /var/lib/kubelet/pods/8e608e5d-445c-4257-ae8e-9069756f182b/volumes/kubernetes.io~rbd/rbd-data1

root@node1:~# rbd showmapped
id  pool      namespace  image  snap  device   
0   rbd-pool             img1   -     /dev/rbd0

 

  2、通过deployment 引用rbd

  1、部署nginx
root@master1:~/yaml# cat nginx-rbd.yaml 
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-deployment
spec:
  replicas: 1
  selector:
    matchLabels: 
      app: nginx-rbd
  template:
    metadata:
      labels:
        app: nginx-rbd
    spec:
      containers:
      - name: nginx-rbd
        image: nginx
        ports:
        - containerPort: 80
        imagePullPolicy: IfNotPresent
        volumeMounts:
        - name: rbd-data1
          mountPath: /data
      volumes:
        - name: rbd-data1
          rbd:
            monitors:
            - '192.168.100.35:6789'
            - '192.168.100.36:6789'
            - '192.168.100.37:6789'
            pool: rbd-pool
            image: img1
            fsType: xfs
            readOnly: false
            user: lxh
            keyring: /etc/ceph/ceph.client.lxh.keyring
            
            
root@master1:~/yaml# kubectl apply -f nginx-rbd.yaml

 

  查看 pod 具体挂载使用rbd

root@master1:~/yaml# kubectl exec -it nginx-deployment-69466b449f-xbszn -- df -h /data
Filesystem      Size  Used Avail Use% Mounted on
/dev/rbd0       3.0G   55M  3.0G   2% /data

root@master1:~/yaml# kubectl exec -it nginx-deployment-69466b449f-xbszn -- mount|grep rbd
/dev/rbd0 on /data type xfs (rw,relatime,attr2,inode64,logbufs=8,logbsize=64k,sunit=128,swidth=128,noquota)

root@master1:~/yaml# kubectl exec -it nginx-deployment-69466b449f-xbszn -- ls -l /data
total 0
drwxr-xr-x 3 root root 148 Jan  7 09:13 etc

 

  2、部署mysql
root@master1:~/yaml# cat mysql-rbd.yaml 
apiVersion: apps/v1
kind: Deployment
metadata:
  name: mysql-deployment
spec:
  replicas: 1
  selector:
    matchLabels:
      app: mysql
  template:
    metadata:
      labels:
        app: mysql
    spec:
      containers:
      - name: mysql
        image: mysql:5.7.31
        env:
        - name: MYSQL_ROOT_PASSWORD
          value: root123456
        ports:
        - containerPort: 80

        volumeMounts:
        - name: rbd-data1
          mountPath: /var/lib/mysql
      volumes:
        - name: rbd-data1
          rbd:
            monitors:
            - '192.168.100.35:6789'
            - '192.168.100.36:6789'
            - '192.168.100.37:6789'
            pool: rbd-pool
            image: img1
            fsType: xfs
            readOnly: false
            user: lxh
            keyring: /etc/ceph/ceph.client.lxh.keyring

root@master1:~/yaml# kubectl apply -f mysql-rbd.yaml

 

  验证mysql deployment 挂载rbd

  注意mysql使用的rbd镜像内必须是空的数据,不能包含任何文件在内,mysql初始化需要数据目录为空

root@master1:~/yaml# kubectl get pods 
NAME                               READY   STATUS    RESTARTS   AGE
mysql-deployment-966ccf6c5-nddw8   1/1     Running   0          6m16s

root@master1:~/yaml# kubectl exec -it mysql-deployment-966ccf6c5-nddw8 -- df -h
Filesystem      Size  Used Avail Use% Mounted on
overlay          50G   13G   37G  27% /
tmpfs            64M     0   64M   0% /dev
tmpfs           2.0G     0  2.0G   0% /sys/fs/cgroup
shm              64M     0   64M   0% /dev/shm
/dev/sda3        50G   13G   37G  27% /etc/hosts
/dev/rbd0       3.0G  265M  2.8G   9% /var/lib/mysql
tmpfs           3.6G   12K  3.6G   1% /run/secrets/kubernetes.io/serviceaccount
tmpfs           2.0G     0  2.0G   0% /proc/acpi
tmpfs           2.0G     0  2.0G   0% /proc/scsi
tmpfs           2.0G     0  2.0G   0% /sys/firmware

root@master1:~/yaml# kubectl exec -it mysql-deployment-966ccf6c5-nddw8 -- mount |grep rbd
/dev/rbd0 on /var/lib/mysql type xfs (rw,relatime,attr2,inode64,logbufs=8,logbsize=64k,sunit=128,swidth=128,noquota)

root@master1:~/yaml# kubectl exec -it mysql-deployment-966ccf6c5-nddw8 -- ls -l /var/lib/mysql
total 188484
-rw-r----- 1 mysql mysql       56 Jan  7 11:10 auto.cnf
-rw------- 1 mysql mysql     1680 Jan  7 11:10 ca-key.pem
-rw-r--r-- 1 mysql mysql     1112 Jan  7 11:10 ca.pem
-rw-r--r-- 1 mysql mysql     1112 Jan  7 11:10 client-cert.pem
-rw------- 1 mysql mysql     1676 Jan  7 11:10 client-key.pem
-rw-r----- 1 mysql mysql     1353 Jan  7 11:10 ib_buffer_pool
-rw-r----- 1 mysql mysql 50331648 Jan  7 11:10 ib_logfile0
-rw-r----- 1 mysql mysql 50331648 Jan  7 11:09 ib_logfile1
-rw-r----- 1 mysql mysql 79691776 Jan  7 11:10 ibdata1
-rw-r----- 1 mysql mysql 12582912 Jan  7 11:10 ibtmp1
drwxr-x--- 2 mysql mysql     4096 Jan  7 11:10 mysql
drwxr-x--- 2 mysql mysql     8192 Jan  7 11:10 performance_schema
-rw------- 1 mysql mysql     1676 Jan  7 11:10 private_key.pem
-rw-r--r-- 1 mysql mysql      452 Jan  7 11:10 public_key.pem
-rw-r--r-- 1 mysql mysql     1112 Jan  7 11:10 server-cert.pem
-rw------- 1 mysql mysql     1680 Jan  7 11:10 server-key.pem
drwxr-x--- 2 mysql mysql     8192 Jan  7 11:10 sys

 

  通过 secret 挂载 rbd

  将 ceph keying 文件 先定义为 k8s 的 secret,然后再挂载至 pod,每个 k8s node 节点就不再需要分发使用 keyring 文件

 

  创建 secret

  首先要创建 secret,secret 中主要就是包含 ceph 中被授权用户 keyrin 文件中的 key,需要将 key 内容通过 base64 编码后即可创建 secret。

   将 key 进行编码

  将key输出

cephadmin@ceph-deploy:~/ceph-cluster$ ceph auth print-key client.lxh
AQCnFLljFWClJhAAd4KL0bCunpP8wMY04vc41Q==

 

  将key进行 base64 编码

cephadmin@ceph-deploy:~/ceph-cluster$ ceph auth print-key client.lxh|base64
QVFDbkZMbGpGV0NsSmhBQWQ0S0wwYkN1bnBQOHdNWTA0dmM0MVE9PQ==

 

  创建secret,将编码后的ceph keyring复制到yaml中

apiVersion: v1
kind: Secret
metadata:
  name: ceph-secret-lxh-keying
type: "kubernetes.io/rbd"
data:
  key: QVFDbkZMbGpGV0NsSmhBQWQ0S0wwYkN1bnBQOHdNWTA0dmM0MVE9PQ==

 

   创建secret

root@master1:~/yaml# kubectl apply -f client-lxh-secret.yaml 
secret/ceph-secret-lxh-keying created

root@master1:~/yaml# kubectl get secrets 
NAME                     TYPE                DATA   AGE
ceph-secret-lxh-keying   kubernetes.io/rbd   1      6s

 

  创建deployment

  创建secret后,编辑pod yaml 使用secret替代 keyring 文件

root@master1:~/yaml# cat nginx-secret.yaml 
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-deployment
spec:
  replicas: 1
  selector:
    matchLabels: 
      app: nginx
  template:
    metadata:
      labels:
        app: nginx
    spec:
      containers:
      - name: nginx
        image: nginx
        imagePullPolicy: IfNotPresent
        ports:
        - containerPort: 80

        volumeMounts:
        - name: rbd-data1
          mountPath: /usr/share/nginx/html/rbd   #指定将volumes挂载路径
      volumes:
        - name: rbd-data1
          rbd:
            monitors:
            - '192.168.100.35:6789'
            - '192.168.100.36:6789'
            - '192.168.100.37:6789'
            pool: rbd-pool
            image: img1
            fsType: xfs
            readOnly: false
            user: lxh       #指定ceph 认证用户名称
            secretRef:
              name: ceph-secret-lxh-keying      #指定ceph-keyring创建的secret名称
              
root@master1:~/yaml# kubectl apply -f nginx-secret.yaml               

 

  验证pod 挂载rbd

root@master1:~/yaml# kubectl get pods
NAME                                READY   STATUS    RESTARTS   AGE
nginx-deployment-8554645cd6-z55zw   1/1     Running   0          4m34s

root@master1:~/yaml# kubectl exec -it nginx-deployment-8554645cd6-z55zw -- df -h
Filesystem      Size  Used Avail Use% Mounted on
overlay          50G   12G   38G  24% /
tmpfs            64M     0   64M   0% /dev
tmpfs           2.0G     0  2.0G   0% /sys/fs/cgroup
shm              64M     0   64M   0% /dev/shm
/dev/sda3        50G   12G   38G  24% /etc/hosts
tmpfs           3.6G   12K  3.6G   1% /run/secrets/kubernetes.io/serviceaccount
/dev/rbd0       3.0G  253M  2.8G   9% /usr/share/nginx/html/rbd
tmpfs           2.0G     0  2.0G   0% /proc/acpi
tmpfs           2.0G     0  2.0G   0% /proc/scsi
tmpfs           2.0G     0  2.0G   0% /sys/firmware

 

  验证挂载文件

root@master1:~/yaml# kubectl exec -it nginx-deployment-8554645cd6-z55zw -- ls -l /usr/share/nginx/html/rbd

 

   验证 k8s 节点 挂载

root@master1:~/yaml# kubectl get pods -o wide

 

root@node2:~# rbd showmapped 

 

 

  动态存储卷供给

  要求 k8s 集群采用二进制方式部署

  https://github.com/kubernetes/kubernetes/issues/38923

  存储卷可以通过 kube-controller-manager 组件动态创建,适用于有状态服务需要多个存储卷的场合。

  1、将 ceph admin 用户 key 文件定义为 k8s secret,用于 k8s 调用 ceph admin 权限动态创建存储卷,即不再需要提前创建好 image 而是 k8s 在需要使用的时候再调用 ceph 创建。

  2、k8s再使用普通用户secret 进行挂载rbd。

 

  创建 admin 用户 secret

  获取ceph admin 用户 secret

cephadmin@ceph-deploy:~/ceph-cluster$ ceph auth print-key client.admin |base64 
QVFBQW5GTmphM1FlQkJBQWo0ZXZJNXNuajJVRmpqdDFYK01zb0E9PQ==

 

  编辑 ceph admin 用户 k8s secret 文件内容并创建 sectet

root@master1:~/yaml# cat ceph-admin-secret.yaml 
apiVersion: v1
kind: Secret
metadata:
  name: ceph-admin-secret
type: "kubernetes.io/rbd"
data:
  key: QVFBQW5GTmphM1FlQkJBQWo0ZXZJNXNuajJVRmpqdDFYK01zb0E9PQ==
  
root@master1:~/yaml# kubectl apply -f ceph-admin-secret.yaml

 

  验证创建secret

root@master1:~/yaml# kubectl get secrets 
NAME                     TYPE                DATA   AGE
ceph-admin-secret        kubernetes.io/rbd   1      6s
ceph-secret-lxh-keying   kubernetes.io/rbd   1      2m24s

 

  创建普通用户secret

  重复参考admin用户的secret方式《创建secret》

 

  创建存储类

  创建动态存储类,为 pod 提供动态 pvc

root@master1:~/yaml# cat ceph-storage-class.yaml 
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: ceph-storage-class
  namespace: ceph
  annotations:
    storageclass.kubernetes.io/is-default-class: "false" #不将该存储类设置为k8s默认存储类
provisioner: kubernetes.io/rbd
parameters:
  monitors: 192.168.100.35:6789,192.168.100.36:6789,192.168.100.37:6789   #指定ceph监控地址和监控服务端口
  adminId: admin     #指定ceph admin账户
  adminSecretName: ceph-admin-secret       #指定 ceph-admin账户的secret
  adminSecretNamespace: ceph       #指定 ceph-admin secret 所在的namespace
  pool: rbd-pool		#指定存储池
  userId: lxh           #指定普通用户,用于挂载rbd
  userSecretName: ceph-secret-lxh-keying   #指定普通用户的secret


root@master1:~/yaml# kubectl apply -f ceph-storage-class.yaml 

 

  验证

root@master1:~/yaml# kubectl get sc
NAME                 PROVISIONER         RECLAIMPOLICY   VOLUMEBINDINGMODE   ALLOWVOLUMEEXPANSION   AGE
ceph-storage-class   kubernetes.io/rbd   Delete          Immediate           false                  10s

 

  创建基于存储类的 PVC

root@master1:~/yaml# cat mysql-pvc.yaml 
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: mysql-pvc
  namespace: ceph
spec:
  accessModes:
    - ReadWriteOnce				#设置pvc存储的权限为rwo,只允许同时一个pod可以读写访问。
  storageClassName: ceph-storage-class  	#指定ceph的存储类
  resources:
    requests:
      storage: '5Gi'

root@master1:~/yaml# kubectl apply -f mysql-pvc.yaml 
persistentvolumeclaim/mysql-pvc created

 

   验证pvc

root@master1:~/yaml# kubectl get pvc

root@master1:~/yaml# kubectl get pv

 

  验证ceph是否自动创建image

cephadmin@ceph-deploy:~/ceph-cluster$ rbd ls --pool rbd-pool

 

  创建应用pod验证

  创建mysql deployment

root@master1:~/yaml# cat mysql-sc-pvc.yaml 
apiVersion: apps/v1
kind: Deployment
metadata:
  name: mysql
spec:
  selector:
    matchLabels:
      app: mysql
  strategy:
    type: Recreate
  template:
    metadata:
      labels:
        app: mysql
    spec:
      containers:
      - image: mysql:5.7.31
        name: mysql
        env:
          # Use secret in real usage
        - name: MYSQL_ROOT_PASSWORD
          value: root123456
        ports:
        - containerPort: 3306
          name: mysql
        volumeMounts:
        - name: mysql-persistent-storage
          mountPath: /var/lib/mysql
      volumes:
      - name: mysql-persistent-storage
        persistentVolumeClaim:
          claimName: mysql-pvc 

---
kind: Service
apiVersion: v1
metadata:
  labels:
    app: mysql-service-label 
  name: mysql-service
spec:
  type: NodePort
  ports:
  - name: http
    port: 3306
    protocol: TCP
    targetPort: 3306
    nodePort: 33306
  selector:
    app: mysql


root@master1:~/yaml# kubectl apply -f mysql-sc-pvc.yaml 
deployment.apps/mysql created
service/mysql-service created

 

   验证 mysql pod

  首次部署,会出现数据目录已存在文件,需要将目录清空文件。定位找到该pod的k8s 节点上手动清空目录

 

 

  找到目录,清空

 

  删除 mysql pod,触发deployment重新创建 mysql pod

 

  验证访问mysql pod,查看service暴露的宿主机端口

root@master1:~/yaml# kubectl get svc
NAME            TYPE       CLUSTER-IP      EXTERNAL-IP   PORT(S)          AGE
mysql-service   NodePort   10.100.56.161   <none>        3306:33306/TCP   7m19s

 

 

 

  新建数据库,并插入表数据

 

 

 

  删除 mysql pod,Deployment 会重新创建一个 pod,验证数据库在 ceph rbd 卷的持久化存储

 

  重新连接 mysql,验证数据

 

 

  cephfs结合k8s使用案例

  k8s 中的 pod 挂载 ceph 的 cephfs 共享存储,实现业务中数据共享、持久化、高性能、高可用的目的。cephfs 支持多个服务实例同时使用和挂载。

 

  创建 普通用户 secret

   参考创建ceph admin用户的步骤

   并将创建的用户key进行base64编码

cephadmin@ceph-deploy:~/ceph-cluster$ ceph auth print-key client.lxhfs|base64
QVFCaXBZNWo5WHdxQlJBQUIwOWUxd2JDcHpvSGJ0SkhNZkxKbEE9PQ==

 

  创建secret yaml

root@master1:~/yaml# vim ceph-cephfs-lxhfs-secret.yaml
apiVersion: v1
kind: Secret
metadata:
  name: ceph-secret-lxhfs-keying
type: "kubernetes.io/rbd"
data:
  key: QVFCaXBZNWo5WHdxQlJBQUIwOWUxd2JDcHpvSGJ0SkhNZkxKbEE9PQ==

 

  创建secret 并验证

root@master1:~/yaml# kubectl apply -f ceph-cephfs-lxhfs-secret.yaml 
secret/ceph-secret-lxhfs-keying created
root@master1:~/yaml# kubectl get secrets 
NAME                       TYPE                DATA   AGE
ceph-admin-secret          kubernetes.io/rbd   1      147m
ceph-secret-lxh-keying     kubernetes.io/rbd   1      145m
ceph-secret-lxhfs-keying   kubernetes.io/rbd   1      3s

 

  创建应用

  编辑应用yaml,添加 deployment和 service

root@master1:~/yaml# cat nginx-cephfs.yaml 
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-deployment
spec:
  replicas: 3     #指定副本数为3
  selector:
    matchLabels: #rs or deployment
      app: nginx 
  template:
    metadata:
      labels:
        app: nginx
    spec:
      containers:
      - name: nginx
        image: nginx
        ports:
        - containerPort: 80
        imagePullPolicy: IfNotPresent
        volumeMounts:
        - name: staticdata-cephfs 
          mountPath: /usr/share/nginx/html/cephfs
      volumes:
        - name: staticdata-cephfs
          cephfs:
            monitors:    #指定ceph 监控的地址和服务端口
            - '192.168.100.35:6789'
            - '192.168.100.36:6789'
            - '192.168.100.37:6789'
            path: /
            user: lxhfs       #指定ceph fs的认证用户
            secretRef:
              name: ceph-secret-lxhfs-keying    #指定创建的k8s的cephfs用户secret

---
kind: Service
apiVersion: v1
metadata:
  labels:
    app: nginx-service
  name: nginx-service
spec:
  type: NodePort
  ports:
  - name: http
    port: 80
    protocol: TCP
    targetPort: 80
    nodePort: 33380
  selector:
    app: nginx

 

  创建 deployment 并验证 pods

root@master1:~/yaml# kubectl apply -f nginx-cephfs.yaml 
deployment.apps/nginx-deployment created
service/nginx-service created

root@master1:~/yaml# kubectl get pods
NAME                                READY   STATUS    RESTARTS   AGE
nginx-deployment-5db48bbc6c-44nr5   1/1     Running   0          4s
nginx-deployment-5db48bbc6c-bz7md   1/1     Running   0          4s
nginx-deployment-5db48bbc6c-vv7bp   1/1     Running   0          4s

root@master1:~/yaml# kubectl get svc
NAME            TYPE       CLUSTER-IP       EXTERNAL-IP   PORT(S)        AGE
nginx-service   NodePort   10.100.224.126   <none>        80:33380/TCP

 

  验证多个pod副本挂载cephfs

root@master1:~/yaml# kubectl get pods
NAME                                READY   STATUS    RESTARTS   AGE
nginx-deployment-5db48bbc6c-44nr5   1/1     Running   0          5m44s
nginx-deployment-5db48bbc6c-bz7md   1/1     Running   0          5m44s
nginx-deployment-5db48bbc6c-vv7bp   1/1     Running   0          5m44s

root@master1:~/yaml# kubectl exec -it nginx-deployment-5db48bbc6c-44nr5 -- df -h|grep cephfs
192.168.100.35:6789,192.168.100.36:6789,192.168.100.37:6789:/  380G     0  380G   0% /usr/share/nginx/html/cephfs

root@master1:~/yaml# kubectl exec -it nginx-deployment-5db48bbc6c-bz7md -- df -h|grep cephfs
192.168.100.35:6789,192.168.100.36:6789,192.168.100.37:6789:/  380G     0  380G   0% /usr/share/nginx/html/cephfs

root@master1:~/yaml# kubectl exec -it nginx-deployment-5db48bbc6c-vv7bp -- df -h|grep cephfs
192.168.100.35:6789,192.168.100.36:6789,192.168.100.37:6789:/  380G     0  380G   0% /usr/share/nginx/html/cephfs

 

 

  单实例客户端挂载并添加共享文件

  单节点客户端挂载参考步骤:cephfs的部署和使用

 

  首先验证访问nginx

  根据k8s创建的svc暴露的nodeport端口访问验证

 

  验证客户端挂载

 

   客户端创建静态页面

 

  在k8spod中验证是否同步创建文件

 

  访问nginx页面 URI:cephfs/index.html

 

 
 
posted @ 2023-01-29 18:21  PunchLinux  阅读(508)  评论(0编辑  收藏  举报