Ceph RBD结合K8S提供存储卷及动态存储卷使用案例【十七】
#让 k8s 中的 pod 可以访问 ceph 中 rbd 提供的镜像作为存储设备,需要在 ceph 创建 rbd并且让 k8s node 节点能够通过 ceph 的认证。
#k8s 在使用 ceph 作为动态存储卷的时候,需要 kube-controller-manager 组件能够访问ceph,因此需要在包括 k8s master 及 node 节点在内的每一个 node 同步认证文件。
初始化RBD & 创建image
##创建初始化rbd
root@ceph-deploy:~# ceph osd pool create dzzz-rbd-pool 32 32 #创建新的rbd
pool 'dzzz-rbd-pool'
root@ceph-deploy:~# ceph osd pool ls
device_health_metrics
mypool
cephfs-metadata
cephfs-data
rbd-ibm
cephfs-metadatat
.rgw.root
default.rgw.log
default.rgw.control
default.rgw.meta
default.rgw.buckets.index
default.rgw.buckets.data
dzzz-rbd-pool #确认已创建
root@ceph-deploy:~# ceph osd pool application enable dzzz-rbd-pool rbd #存储池启用rbd
enabled application 'rbd' on pool 'dzzz-rbd-pool'
root@ceph-deploy:~# rbd pool init -p dzzz-rbd-pool #初始化rbd
##创建image
root@ceph-deploy:~# rbd create dzzz-image-img1 --size 5G --pool dzzz-rbd-pool --image-format 2 --image-feature layering #创建镜像
root@ceph-deploy:~# rbd ls --pool dzzz-rbd-pool #验证镜像
dzzz-image-img1
root@ceph-deploy:~# rbd --image dzzz-image-img1 --pool dzzz-rbd-pool info #验证信息
rbd image 'dzzz-image-img1':
size 5 GiB in 1280 objects
order 22 (4 MiB objects)
snapshot_count: 0
id: fd107302be0e
block_name_prefix: rbd_data.fd107302be0e
format: 2
features: layering
op_features:
flags:
create_timestamp: Mon Mar 18 09:59:13 2024
access_timestamp: Mon Mar 18 09:59:13 2024
modify_timestamp: Mon Mar 18 09:59:13 2024
客户端安装ceph-common
##分别在k8s manager 与 各个node节点安装 ceph-common组件包
#环境 Ubuntu22.04 K8S:1.28.版本 Cilium网络插件
root@ubuntu-k8s-master01:~# kubectl get nodes
NAME STATUS ROLES AGE VERSION
ubuntu-k8s-master01 Ready control-plane 17d v1.28.2
ubuntu-k8s-node01 Ready <none> 17d v1.28.2
ubuntu-k8s-node02 Ready <none> 17d v1.28.2
#下载key文件
root@ubuntu-k8s-master01:~# wget -q -O- 'https://download.ceph.com/keys/release.asc' | sudo apt-key add -
root@ubuntu-k8s-node01:~# wget -q -O- 'https://download.ceph.com/keys/release.asc' | sudo apt-key add -
root@ubuntu-k8s-node02:~# wget -q -O- 'https://download.ceph.com/keys/release.asc' | sudo apt-key add -
#各master和node节点配置apt源
#/etc/apt/source.list 配置 但是安装时由于版本不对 实验安装了 ceph-common=17.2.6-0ubuntu0.22.04.3版本
#deb https://mirrors.tuna.tsinghua.edu.cn/ceph/debian-pacific/ focal main
root@ubuntu-k8s-master01:/etc# apt-cache madison ceph-common
ceph-common | 17.2.6-0ubuntu0.22.04.3 | http://mirrors.aliyun.com/ubuntu jammy-updates/main amd64 Packages
ceph-common | 17.2.6-0ubuntu0.22.04.3 | http://mirrors.aliyun.com/ubuntu jammy-security/main amd64 Packages
ceph-common | 17.1.0-0ubuntu3 | http://mirrors.aliyun.com/ubuntu jammy/main amd64 Packages
ceph-common | 16.2.15-1focal | https://mirrors.tuna.tsinghua.edu.cn/ceph/debian-pacific focal/main amd64 Packages 473 apt install -y ceph-common=16.2.15-1focal
root@ubuntu-k8s-master01:/etc# apt install -y ceph-common
创建ceph用户与授权
#创建用户和授权
root@ceph-deploy:~# ceph auth get-or-create client.dzzz-xks mon 'allow r' osd 'allow * pool=dzzz-rbd-pool'
[client.dzzz-xks]
key = AQDMo/dlW0M3GRAAPi2LqIqCFM1eBybAWRzhTg==
root@ceph-deploy:~# ceph auth get client.dzzz-xks
[client.dzzz-xks]
key = AQDMo/dlW0M3GRAAPi2LqIqCFM1eBybAWRzhTg==
caps mon = "allow r"
caps osd = "allow * pool=dzzz-rbd-pool"
exported keyring for client.dzzz-xks
root@ceph-deploy:~# ceph auth get client.dzzz-xks -o ceph.client.dzzz-xks.keyring
exported keyring for client.dzzz-xks
root@ceph-deploy:~# cat ceph.client.dzzz-xks.keyring
[client.dzzz-xks]
key = AQDMo/dlW0M3GRAAPi2LqIqCFM1eBybAWRzhTg==
caps mon = "allow r"
caps osd = "allow * pool=dzzz-rbd-pool"
#同步认证文件到k8s 所有服务器上
root@ceph-deploy:~# scp /home/cephadmin/ceph.conf ceph.client.dzzz-xks.keyring root@192.168.40.132:/etc/ceph/ 100% 129 214.0KB/s 00:00
root@ceph-deploy:~# scp /home/cephadmin/ceph.conf ceph.client.dzzz-xks.keyring root@192.168.40.133:/etc/ceph/ 100% 129 256.3KB/s 00:00
root@ceph-deploy:~# scp /home/cephadmin/ceph.conf ceph.client.dzzz-xks.keyring root@192.168.40.134:/etc/ceph/
#在k8s node节点上验证用户权限
root@ubuntu-k8s-node01:~# ceph --user dzzz-xks -s
cluster:
id: 0d8fb726-ee6d-4aaf-aeca-54c68e2584af
health: HEALTH_WARN
1 pool(s) do not have an application enabled
services:
mon: 3 daemons, quorum ceph-mon1,ceph-mon2,ceph-mon3 (age 3d)
mgr: ceph-mgr2(active, since 2d), standbys: ceph-mgr1
mds: 2/2 daemons up, 2 standby
osd: 9 osds: 9 up (since 3d), 9 in (since 3d)
rgw: 2 daemons active (2 hosts, 1 zones)
data:
volumes: 1/1 healthy
pools: 13 pools, 449 pgs
objects: 402 objects, 170 MiB
usage: 3.1 GiB used, 267 GiB / 270 GiB avail
pgs: 449 active+clean
io:
client: 4.0 KiB/s rd, 0 B/s wr, 3 op/s rd, 2 op/s wr
#验证镜像访问权限
root@ubuntu-k8s-node01:~# rbd --id dzzz-xks ls --pool=dzzz-rbd-pool
dzzz-image-img1
##k8s节点配置主机名称解析
#在 ceph.conf 配置文件中包含 ceph 主机的主机名,因此需要在 k8s 各 master 及 node 配
#192.168.40.132/133/134
root@ubuntu-k8s-master01:/etc# cat /etc/hosts
root@ubuntu-k8s-node01:/etc# cat /etc/hosts
root@ubuntu-k8s-node02:/etc# cat /etc/hosts
127.0.0.1 localhost
127.0.1.1 ubuntu-k8s-master01
# The following lines are desirable for IPv6 capable hosts
::1 ip6-localhost ip6-loopback
fe00::0 ip6-localnet
ff00::0 ip6-mcastprefix
ff02::1 ip6-allnodes
ff02::2 ip6-allrouters
192.168.40.132 ubuntu-k8s-master01
192.168.40.133 ubuntu-k8s-node01
192.168.40.134 ubuntu-k8s-node02
192.168.40.151 ceph-mon1.sheca.com ceph-mon1
192.168.40.152 ceph-mon2.sheca.com ceph-mon2
192.168.40.153 ceph-mon3.sheca.com ceph-mon3
192.168.40.154 ceph-mgr1.sheca.com ceph-mgr1
192.168.40.155 ceph-mgr2.sheca.com ceph-mgr2
192.168.40.156 ceph-node1.sheca.com ceph-node1
192.168.40.157 ceph-node2.sheca.com ceph-node2
192.168.40.158 ceph-node3.sheca.com ceph-node3
192.168.40.159 ceph-deploy.sheca.com ceph-deploy
通过keyring文件挂载rbd
#基于 ceph 提供的 rbd 实现存储卷的动态提供,由两种实现方式
#一是通过宿主机的 keyring文件挂载 rbd
#另外一个是通过将 keyring 中 key 定义为 k8s 中的 secret,然后 pod 通过secret 挂载 rbd.
1、通过 keyring 文件直接挂载-busybox
root@ubuntu-k8s-master01:~/ceph-file# cat case1-busybox-keyring.yaml
apiVersion: v1
kind: Pod
metadata:
name: busybox
namespace: default
spec:
containers:
- image: busybox
command:
- sleep
- "3600"
imagePullPolicy: Always
name: busybox
volumeMounts:
- name: rbd-data1
mountPath: /data
volumes:
- name: rbd-data1
rbd:
monitors:
- '192.168.40.151:6789'
- '192.168.40.152:6789'
- '192.168.40.153:6789'
pool: dzzz-rbd-pool
image: dzzz-image-img1
fsType: ext4
readOnly: false
user: dzzz-xks
keyring: /etc/ceph/ceph.client.dzzz-xks.keyring
root@ubuntu-k8s-master01:~/ceph-file# kubectl apply -f case1-busybox-keyring.yaml
root@ubuntu-k8s-master01:~/ceph-file# kubectl get pods
NAME READY STATUS RESTARTS AGE
busybox 1/1 Running 0 2m22s
demoapp-cilium-65c55ccffb-288mg 1/1 Running 1 (43m ago) 12d
demoapp-cilium-65c55ccffb-fgwn6 1/1 Running 1 (43m ago) 12d
demoapp-cilium-65c55ccffb-shnmd 1/1 Running 1 (43m ago) 12d
#进入容器内部验证
root@ubuntu-k8s-master01:~/ceph-file# kubectl exec -it busybox sh
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
/ # df -h | grep /data
/dev/rbd0 4.8G 24.0K 4.8G 0% /data
/ # mount | grep data
/dev/rbd0 on /data type ext4 (rw,relatime,stripe=16)
2、通过 keyring 文件直接挂载-nginx
root@ubuntu-k8s-master01:~/ceph-file# kubectl delete -f case1-busybox-keyring.yaml
root@ubuntu-k8s-master01:~/ceph-file# cat case2-nginx-keyring.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx-deployment
spec:
replicas: 1
selector:
matchLabels: #rs or deployment
app: ng-deploy-80
template:
metadata:
labels:
app: ng-deploy-80
spec:
containers:
- name: ng-deploy-80
image: nginx
#image: mysql:5.6.46
#env:
# Use secret in real usage
#- name: MYSQL_ROOT_PASSWORD
#value: magedu123456
ports:
- containerPort: 80
volumeMounts:
- name: rbd-data1
#mountPath: /var/lib/mysql
mountPath: /usr/share/html/ceph
volumes:
- name: rbd-data1
rbd:
monitors:
- '192.168.40.151:6789'
- '192.168.40.152:6789'
- '192.168.40.153:6789'
pool: dzzz-rbd-pool
image: dzzz-image-img1
fsType: ext4
readOnly: false
user: dzzz-xks
keyring: /etc/ceph/ceph.client.dzzz-xks.keyring
root@ubuntu-k8s-master01:~/ceph-file# kubectl get pods
NAME READY STATUS RESTARTS AGE
demoapp-cilium-65c55ccffb-288mg 1/1 Running 1 (59m ago) 12d
demoapp-cilium-65c55ccffb-fgwn6 1/1 Running 1 (59m ago) 12d
demoapp-cilium-65c55ccffb-shnmd 1/1 Running 1 (59m ago) 12d
nginx-deployment-68ff98555c-fnjbr 1/1 Running 0 5m47s
root@ubuntu-k8s-master01:~/ceph-file# kubectl exec -it nginx-deployment-68ff98555c-fnjbr sh
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
# df
Filesystem 1K-blocks Used Available Use% Mounted on
overlay 14339080 6972344 6616556 52% /
tmpfs 65536 0 65536 0% /dev
/dev/mapper/ubuntu--vg-ubuntu--lv 14339080 6972344 6616556 52% /etc/hosts
shm 65536 0 65536 0% /dev/shm
/dev/rbd0 5074592 24 5058184 1% /usr/share/html/ceph
tmpfs 3867076 12 3867064 1% /run/secrets/kubernetes.io/serviceaccount
tmpfs 1984736 0 1984736 0% /proc/asound
tmpfs 1984736 0 1984736 0% /proc/acpi
tmpfs 1984736 0 1984736 0% /proc/scsi
tmpfs 1984736 0 1984736 0% /sys/firmware
#宿主机验证rbd
#rbd在pod里面看是挂载到了 pod,,但是由于 pod 是使用的宿主机内核,因此实际是在宿主机挂载的。
#到宿主机所在的 node 验证 rbd 挂载:
root@ubuntu-k8s-node02:/etc/ceph# rbd showmapped
id pool namespace image snap device
0 dzzz-rbd-pool dzzz-image-img1 - /dev/rbd0
root@ubuntu-k8s-node02:/etc/ceph# lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINTS
sda 8:0 0 30G 0 disk
├─sda1 8:1 0 1M 0 part
├─sda2 8:2 0 2G 0 part /boot
└─sda3 8:3 0 28G 0 part
└─ubuntu--vg-ubuntu--lv 253:0 0 14G 0 lvm /
sr0 11:0 1 1.4G 0 rom
nbd0 43:0 0 0B 0 disk
nbd1 43:32 0 0B 0 disk
nbd2 43:64 0 0B 0 disk
nbd3 43:96 0 0B 0 disk
nbd4 43:128 0 0B 0 disk
nbd5 43:160 0 0B 0 disk
nbd6 43:192 0 0B 0 disk
nbd7 43:224 0 0B 0 disk
rbd0 252:0 0 5G 0 disk /var/lib/kubelet/pods/697fe035-3868-4c3e-9c2c-bc7c93522edd/volumes/kubernetes.io~rbd/rbd-data1
/var/lib/kubelet/plugins/kubernetes.io/rbd/mounts/dzzz-rbd-pool-image-dzzz-image-img1
nbd8 43:256 0 0B 0 disk
nbd9 43:288 0 0B 0 disk
nbd10 43:320 0 0B 0 disk
nbd11 43:352 0 0B 0 disk
nbd12 43:384 0 0B 0 disk
nbd13 43:416 0 0B 0 disk
nbd14 43:448 0 0B 0 disk
nbd15 43:480 0 0B 0 disk
3、通过secret挂载rbd
#获取dzzz-xks 口令 并 base64编码
root@ceph-deploy:~# ceph auth print-key client.dzzz-xks
AQDMo/dlW0M3GRAAPi2LqIqCFM1eBybAWRzhTg==
root@ceph-deploy:~# ceph auth print-key client.dzzz-xks | base64
QVFETW8vZGxXME0zR1JBQVBpMkxxSXFDRk0xZUJ5YkFXUnpoVGc9PQ==
#创建secret 资源
root@ubuntu-k8s-master01:~/ceph-file# cat case3-secret-client-dzzz-xks.yaml
apiVersion: v1
kind: Secret
metadata:
name: ceph-secret-dzzz-xks
type: "kubernetes.io/rbd"
data:
key: QVFETW8vZGxXME0zR1JBQVBpMkxxSXFDRk0xZUJ5YkFXUnpoVGc9PQ==
root@ubuntu-k8s-master01:~/ceph-file# kubectl apply -f case3-secret-client-dzzz-xks.yaml
secret/ceph-secret-dzzz-xks created
root@ubuntu-k8s-master01:~/ceph-file# kubectl get secret
NAME TYPE DATA AGE
ceph-secret-dzzz-xks kubernetes.io/rbd 1 54s
#创建pod
root@ubuntu-k8s-master01:~/ceph-file# kubectl delete -f case2-nginx-keyring.yaml
deployment.apps "nginx-deployment" deleted
root@ubuntu-k8s-master01:~/ceph-file# cat case4-nginx-secret.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx-deployment
spec:
replicas: 1
selector:
matchLabels: #rs or deployment
app: ng-deploy-80
template:
metadata:
labels:
app: ng-deploy-80
spec:
containers:
- name: ng-deploy-80
image: nginx
ports:
- containerPort: 80
volumeMounts:
- name: rbd-data1
mountPath: /usr/share/nginx/html/rbd
volumes:
- name: rbd-data1
rbd:
monitors:
- '192.168.40.151:6789'
- '192.168.40.152:6789'
- '192.168.40.153:6789'
pool: dzzz-rbd-pool
image: dzzz-image-img1
fsType: ext4
readOnly: false
user: dzzz-xks
secretRef:
name: ceph-secret-dzzz-xks
root@ubuntu-k8s-master01:~/ceph-file# kubectl apply -f case4-nginx-secret.yaml
Warning: spec.template.spec.volumes[0].rbd: deprecated in v1.28, non-functional in v1.31+
deployment.apps/nginx-deployment created
#验证pod
root@ubuntu-k8s-master01:~/ceph-file# kubectl get pods
NAME READY STATUS RESTARTS AGE
demoapp-cilium-65c55ccffb-288mg 1/1 Running 1 (66m ago) 12d
demoapp-cilium-65c55ccffb-fgwn6 1/1 Running 1 (66m ago) 12d
demoapp-cilium-65c55ccffb-shnmd 1/1 Running 1 (66m ago) 12d
nginx-deployment-77fcd7dd47-nkrwf 1/1 Running 0 56s
root@ubuntu-k8s-master01:~/ceph-file# kubectl exec -it nginx-deployment-77fcd7dd47-nkrwf sh
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
# df
Filesystem 1K-blocks Used Available Use% Mounted on
overlay 14339080 6972360 6616540 52% /
tmpfs 65536 0 65536 0% /dev
/dev/mapper/ubuntu--vg-ubuntu--lv 14339080 6972360 6616540 52% /etc/hosts
shm 65536 0 65536 0% /dev/shm
/dev/rbd0 5074592 24 5058184 1% /usr/share/nginx/html/rbd #
tmpfs 3867076 12 3867064 1% /run/secrets/kubernetes.io/serviceaccount
tmpfs 1984736 0 1984736 0% /proc/asound
tmpfs 1984736 0 1984736 0% /proc/acpi
tmpfs 1984736 0 1984736 0% /proc/scsi
tmpfs 1984736 0 1984736 0% /sys/firmware
root@ubuntu-k8s-node02:/etc/ceph# rbd showmapped
id pool namespace image snap device
0 dzzz-rbd-pool dzzz-image-img1 - /dev/rbd0
root@ubuntu-k8s-node02:/etc/ceph# lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINTS
sda 8:0 0 30G 0 disk
├─sda1 8:1 0 1M 0 part
├─sda2 8:2 0 2G 0 part /boot
└─sda3 8:3 0 28G 0 part
└─ubuntu--vg-ubuntu--lv 253:0 0 14G 0 lvm /
sr0 11:0 1 1.4G 0 rom
nbd0 43:0 0 0B 0 disk
nbd1 43:32 0 0B 0 disk
nbd2 43:64 0 0B 0 disk
nbd3 43:96 0 0B 0 disk
nbd4 43:128 0 0B 0 disk
nbd5 43:160 0 0B 0 disk
nbd6 43:192 0 0B 0 disk
nbd7 43:224 0 0B 0 disk
rbd0 252:0 0 5G 0 disk /var/lib/kubelet/pods/1dbd7944-e806-4b52-9c59-91e9966f4f9d/volumes/kubernetes.io~rbd/rbd-data1
/var/lib/kubelet/plugins/kubernetes.io/rbd/mounts/dzzz-rbd-pool-image-dzzz-image-img1
nbd8 43:256 0 0B 0 disk
nbd9 43:288 0 0B 0 disk
nbd10 43:320 0 0B 0 disk
nbd11 43:352 0 0B 0 disk
nbd12 43:384 0 0B 0 disk
nbd13 43:416 0 0B 0 disk
nbd14 43:448 0 0B 0 disk
nbd15 43:480 0 0B 0 disk
4、Ceph作为StorageClass - 实验未成功 因为实验用的是1.28.2
Ceph RBD (deprecated)- Kubernetes v1.28 [deprecated]
#存储卷可以通过 kube-controller-manager 组件动态创建,适用于有状态服务需要多个存储卷的场合。
#将 ceph admin 用户 key 文件定义为 k8s secret,用于 k8s 调用 ceph admin 权限动态创建存储卷,即不再需要提前创建好 image 而是 k8s 在需要使用的时候再调用 ceph 创建
#创建amin用户secret
root@ceph-deploy:~# ceph auth print-key client.admin | base64
QVFCLzArcGxGSTVESmhBQStURUNoSG1zR3M2MlJIaFRhTy9ZVWc9PQ==
#创建ceph admin用户secret 并验证
root@ubuntu-k8s-master01:~/ceph-file# cat case5-secret-admin.yaml
apiVersion: v1
kind: Secret
metadata:
name: ceph-secret-admin
type: "kubernetes.io/rbd"
data:
key: QVFCLzArcGxGSTVESmhBQStURUNoSG1zR3M2MlJIaFRhTy9ZVWc9PQ==
root@ubuntu-k8s-master01:~/ceph-file# kubectl apply -f case5-secret-admin.yaml
secret/ceph-secret-admin created
root@ubuntu-k8s-master01:~/ceph-file# kubectl get secret
NAME TYPE DATA AGE
ceph-secret-admin kubernetes.io/rbd 1 3s
ceph-secret-dzzz-xks kubernetes.io/rbd 1 12m
#创建普通用户secret
#获取dzzz-xks 口令 并 base64编码
root@ceph-deploy:~# ceph auth print-key client.dzzz-xks
AQDMo/dlW0M3GRAAPi2LqIqCFM1eBybAWRzhTg==
root@ceph-deploy:~# ceph auth print-key client.dzzz-xks | base64
QVFETW8vZGxXME0zR1JBQVBpMkxxSXFDRk0xZUJ5YkFXUnpoVGc9PQ==
#创建secret 资源
root@ubuntu-k8s-master01:~/ceph-file# cat case3-secret-client-dzzz-xks.yaml
apiVersion: v1
kind: Secret
metadata:
name: ceph-secret-dzzz-xks
type: "kubernetes.io/rbd"
data:
key: QVFETW8vZGxXME0zR1JBQVBpMkxxSXFDRk0xZUJ5YkFXUnpoVGc9PQ==
root@ubuntu-k8s-master01:~/ceph-file# kubectl apply -f case3-secret-client-dzzz-xks.yaml
secret/ceph-secret-dzzz-xks created
root@ubuntu-k8s-master01:~/ceph-file# kubectl get secret
NAME TYPE DATA AGE
ceph-secret-dzzz-xks kubernetes.io/rbd 1 54s
##创建存储类
#创建动态存储类,为 pod 提供动态 pvc
root@ubuntu-k8s-master01:~/ceph-file# cat case6-ceph-storage-class.yaml
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: ceph-storage-class-dzzz-xks
annotations:
storageclass.kubernetes.io/is-default-class: "false" #设置为默认存储类
provisioner: kubernetes.io/rbd
parameters:
monitors: 192.168.40.151:6789,192.168.40.152:6789,192.168.40.153:6789
adminId: admin
adminSecretName: ceph-secret-admin
adminSecretNamespace: default
pool: dzzz-rbd-pool
userId: dzzz-xks
userSecretName: ceph-secret-dzzz-xks
root@ubuntu-k8s-master01:~/ceph-file# kubectl apply -f case6-ceph-storage-class.yaml
storageclass.storage.k8s.io/ceph-storage-class-dzzz-xks created
root@ubuntu-k8s-master01:~/ceph-file# kubectl get sc
NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE
ceph-storage-class-dzzz-xks kubernetes.io/rbd Delete Immediate false 2s
#创建基于存储类的PVC
root@ubuntu-k8s-master01:~/ceph-file# cat case7-mysql-pvc.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: mysql-data-pvc
spec:
accessModes:
- ReadWriteOnce
storageClassName: ceph-storage-class-dzzz-xks
resources:
requests:
storage: '2Gi'
root@ubuntu-k8s-master01:~/ceph-file# kubectl apply -f case7-mysql-pvc.yaml
5、CephFS 使用案例
#k8s 中的 pod 挂载 ceph 的 cephfs 共享存储,实现业务中数据共享、持久化、高性能、高可用的目的。
#创建Secret
case5-secret-admin.yaml
apiVersion: v1
kind: Secret
metadata:
name: ceph-secret-admin
type: "kubernetes.io/rbd"
data:
key: QVFCLzArcGxGSTVESmhBQStURUNoSG1zR3M2MlJIaFRhTy9ZVWc9PQ==
root@ubuntu-k8s-master01:~/ceph-file# cat case9-nginx-cephfs.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx-deployment
spec:
replicas: 3
selector:
matchLabels: #rs or deployment
app: ng-deploy-80
template:
metadata:
labels:
app: ng-deploy-80
spec:
containers:
- name: ng-deploy-80
image: nginx
ports:
- containerPort: 80
volumeMounts:
- name: dzzz-staticdata-cephfs
mountPath: /usr/share/nginx/html/cephfs
volumes:
- name: dzzz-staticdata-cephfs
cephfs:
monitors:
- '192.168.40.151:6789'
- '192.168.40.152:6789'
- '192.168.40.153:6789'
path: /
user: admin
secretRef:
name: ceph-secret-admin
---
kind: Service
apiVersion: v1
metadata:
labels:
app: ng-deploy-80-service-label
name: ng-deploy-80-service
spec:
type: NodePort
ports:
- name: http
port: 80
protocol: TCP
targetPort: 80
nodePort: 30080
selector:
app: ng-deploy-80
root@ubuntu-k8s-master01:~/ceph-file# kubectl apply -f case9-nginx-cephfs.yaml
Warning: spec.template.spec.volumes[0].cephfs: deprecated in v1.28, non-functional in v1.31+
deployment.apps/nginx-deployment configured
service/ng-deploy-80-service created
#在另一台主机挂载 这个共享目录 放index文件测试
#参考https://www.cnblogs.com/birkhoffxia/articles/18073140 - 内核空间挂载ceph-fs使用
[root@k8s-haproxy02 ~]# mount -t ceph 192.168.40.151:6789,192.168.40.152:6789,192.168.40.153:6789:/ /gali -o name=cadzzz,secret=AQAWq/Jlf+6ZLhAAr6ClV7deNoHCHxNfTZiOmA==
[root@k8s-haproxy02 ~]# df -h
Filesystem Size Used Avail Use% Mounted on
devtmpfs 979M 0 979M 0% /dev
tmpfs 991M 0 991M 0% /dev/shm
tmpfs 991M 9.6M 981M 1% /run
tmpfs 991M 0 991M 0% /sys/fs/cgroup
/dev/mapper/centos-root 17G 2.9G 15G 17% /
/dev/sda1 1014M 138M 877M 14% /boot
ceph-fuse 85G 0 85G 0% /tablo
tmpfs 199M 0 199M 0% /run/user/0
192.168.40.151:6789,192.168.40.152:6789,192.168.40.153:6789:/ 85G 0 85G 0% /gali
[root@k8s-haproxy02 ~]# cd /gali/
[root@k8s-haproxy02 gali]# ll
total 2
-rw-r--r-- 1 root root 1135 Mar 14 16:00 passwd
-rw-r--r-- 1 root root 16 Mar 15 09:43 sheca.txt
[root@k8s-haproxy02 gali]# cat index.html
2024-03-18 CephFS in K8S Nginx Index HTML
#K8s容器中 也写入了
root@ubuntu-k8s-master01:~/ceph-file# kubectl get pods
NAME READY STATUS RESTARTS AGE
demoapp-cilium-65c55ccffb-288mg 1/1 Running 1 (4h49m ago) 12d
demoapp-cilium-65c55ccffb-fgwn6 1/1 Running 1 (4h49m ago) 12d
demoapp-cilium-65c55ccffb-shnmd 1/1 Running 1 (4h49m ago) 12d
nginx-deployment-cephfs-b5b6d78c6-2vbt5 1/1 Running 0 10m
nginx-deployment-cephfs-b5b6d78c6-4cgvv 1/1 Running 0 10m
nginx-deployment-cephfs-b5b6d78c6-dcjxl 1/1 Running 0 10m
root@ubuntu-k8s-master01:~/ceph-file# kubectl exec -it nginx-deployment-cephfs-b5b6d78c6-2vbt5 sh
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
# ls /usr/share/nginx/html/cephfs
index.html passwd sheca.txt
##访问网站 开放端口30080 就是首页
http://192.168.40.132:30080/cephfs/index.html
#随便上传一个文件
[root@k8s-haproxy02 gali]# pwd
/gali
[root@k8s-haproxy02 gali]# ls
2023-10-14.png 2023-10-19.png 2024-01-05 (1).png index.html passwd sheca.txt
[root@k8s-haproxy02 gali]# ll
total 9240
-rw-r--r-- 1 root root 7560161 Mar 18 14:46 2023-10-14.png
-rw-r--r-- 1 root root 1420040 Mar 18 14:47 2023-10-19.png
-rw-r--r-- 1 root root 478476 Mar 18 14:46 2024-01-05 (1).png
-rw-r--r-- 1 root root 43 Mar 18 14:41 index.html
-rw-r--r-- 1 root root 1135 Mar 14 16:00 passwd
-rw-r--r-- 1 root root 16 Mar 15 09:43 sheca.txt
#访问这个文件
http://192.168.40.132:30080/cephfs/2023-10-19.png


浙公网安备 33010602011771号