imirsh

导航

使用Ceph RBD为Kubernetes集群提供存储卷

前提: k8s 集群各节点安装 ceph-common 程序包

~]# rpm -ivh https://mirrors.aliyun.com/ceph/rpm-mimic/el7/noarch/ceph-release-1-1.el7.noarch.rpm
yum install ceph-common

准备 RBD 卷

  1. 创建 rbd 相关存储池,并创建测试使用的image, 以 rbddata、myimg 为例
ceph-cluster]$ ceph osd pool create rbddata 64 64
ceph-cluster]$ ceph osd pool application enable rbddata rbd
ceph-cluster]$ rbd pool init -p rbddata
ceph-cluster]$ rbd create rbddata/myimg --size 4Gi
ceph-cluster]$ rbd feature disable rbddata/myimg  exclusive-lock object-map fast-diff deep-flatten
  1. 创建 拥有访问相关存储池权限的用户账号,以 k8s 为例
ceph-cluster]$ ceph auth get-or-create client.k8s mon 'allow r' osd 'allow * pool=rbddata'
[client.k8s]
        key = AQCSsxdfiNOpCRAA1j1uAfD/NNnfH6EscOhXOQ==
  1. 保存 k8s 用户信息至单一文件中, 并g跟 ceph配置文件一起复制到 k8s 集群各个节点
ceph-cluster]$ ceph auth get client.k8s  -o ceph.client.k8s.keyring 
ceph-cluster]$ scp ceph.client.k8s.keyring /etc/ceph.conf  root@k8s-master01:/etc/ceph
ceph-cluster]$ scp ceph.client.k8s.keyring /etc/ceph.conf  root@k8s-master02:/etc/ceph
ceph-cluster]$ scp ceph.client.k8s.keyring /etc/ceph.conf  root@k8s-master03:/etc/ceph
ceph-cluster]$ scp ceph.client.k8s.keyring /etc/ceph.conf  root@k8s-node01:/etc/ceph
ceph-cluster]$ scp ceph.client.k8s.keyring /etc/ceph.conf  root@k8s-node02:/etc/ceph
ceph-cluster]$ scp ceph.client.k8s.keyring /etc/ceph.conf  root@k8s-node03:/etc/ceph
  1. 在 k8s 集群节点上验证
~]# rbd --user k8s -p rbddata ls
myimg

keyring接入Ceph 集群

  1. 创建 Pod 测试使用 rbd 存储卷 ,以 keyring接入Ceph 集群
ceph]# vim  pod-keyring.yaml
apiVersion: v1
kind: Pod
metadata:
  name: pod-with-rbd-vol
spec:
  containers:
  - name: busybox 
    image: busybox
    imagePullPolicy: IfNotPresent
    command: ["/bin/sh", "-c", "sleep 86400"]
    volumeMounts:
    - name : rbdpod
      mountPath: /data
  volumes:
    - name: rbdpod
      rbd:
        monitors:
        - '192.168.124.161:6789' # 此为 monitor地址
        - '192.168.124.162:6789'
        - '192.168.124.163:6789'
        pool: rbddata
        image: myimg
        fsType: ext4
        readOnly: false
        user: k8s
        keyring: /etc/ceph/ceph.client.k8s.keyring 
  1. 验证

登陆pod 所在的节点执行 rbd showmapped

[root@k8s-node01 ~]# rbd showmapped
id pool    image snap device    
0  rbddata myimg -    /dev/rbd0 

以 secret 方式接入 Ceph 集群

如果使用 secret 方式接入 Ceph 集群,那么 k8s 集群节点可以不用 复制 keyring 文件到 /etc/ceph/ 目录下

  1. 创建镜像
ceph-cluster]$ rbd create rbddata/img2 --size 1Gi --image-feature layering
  1. 获取 ceph用户信息并编码
ceph-cluster]$ ceph auth print-key client.k8s | base64
QVFDU3N4ZGZpTk9wQ1JBQTFqMXVBZkQvTk5uZkg2RXNjT2hYT1E9PQ==
  1. 创建 Secret
secret]# cat ceph-secret.yaml 
apiVersion: v1
kind: Secret
metadata:
  name: ceph-secret
  namespace: default
type: "kubernetes.io/rbd"
data:
  key: QVFDU3N4ZGZpTk9wQ1JBQTFqMXVBZkQvTk5uZkg2RXNjT2hYT1E9PQ==
  1. 创建测试 Pod
secret]# cat pod-secret.yaml 
apiVersion: v1
kind: Pod
metadata:
  name: pod-with-rbd-vol2
spec:
  containers:
  - name: busybox 
    image: busybox
    imagePullPolicy: IfNotPresent
    command: ["/bin/sh", "-c", "sleep 86400"]
    volumeMounts:
    - name : rbdpod
      mountPath: /data
  volumes:
    - name: rbdpod
      rbd:
        monitors:
        - '192.168.124.161:6789'
        - '192.168.124.162:6789'
        - '192.168.124.163:6789'
        pool: rbddata
        image: img2
        fsType: ext4
        readOnly: false
        user: k8s
        secretRef:
          name: ceph-secret
  1. 验证
[root@k8s-node3 ~]# rbd showmapped
id pool    image snap device    
0  rbddata img2  -    /dev/rbd0

posted on 2020-07-22 13:46  imirsh  阅读(362)  评论(0编辑  收藏  举报