xone

  博客园 :: 首页 :: 博问 :: 闪存 :: 新随笔 :: 联系 :: 订阅 订阅 :: 管理 ::

storageclass动态生成pv,实现pod持久化存储

# 创建ceph-secret这个k8s secret对象,这个secret对象用于k8s volume插件访问ceph集群
# 获取client.admin的keyring值,并用base64编码,在ceph1上操作
[root@ceph1 firstrbd]# ceph auth get-key client.admin | base64
QVFDZXFWZGovdnJrREJBQW1FRXFSOWI1d2MycE5mUlJPaEF6dWc9PQ==

# 创建pool池,在ceph1上操作
[root@ceph1 ~]#ceph osd pool create k8spool 256
# 添加ceph源,在每个kubernets节点上操作
root@test-k8s-master1 ~]# cat /etc/yum.repos.d/ceph.repo 
[ceph] 
name=Ceph packages for $basearch
baseurl=https://mirrors.aliyun.com/ceph/rpm-15.2.17/el7/x86_64/ 
enabled=1 
gpgcheck=1 
type=rpm-md 
gpgkey=https://download.ceph.com/keys/release.asc
 
[ceph-noarch] 
name=Ceph noarch packages 
baseurl=https://mirrors.aliyun.com/ceph/rpm-15.2.17/el7/noarch/ 
enabled=1 
gpgcheck=1 
type=rpm-md 
gpgkey=https://download.ceph.com/keys/release.asc
 
[ceph-source] 
name=Ceph source packages
baseurl=https://mirrors.aliyun.com/ceph/rpm-15.2.17/el7/SRPMS/ 
enabled=1 
gpgcheck=1 
type=rpm-md 
gpgkey=https://download.ceph.com/keys/release.asc
# 安装ceph-common,在每个kubernets节点上操作
[root@test-k8s-master1 ~]# yum -y install ceph-common
# 添加rbd-provisioner
[root@test-k8s-master1 ceph-storageclass]# cat rbd-provisioner.yaml 
apiVersion: apps/v1
kind: Deployment
metadata:
  name: rbd-provisioner
  namespace: kube-system
spec:
  replicas: 1
  selector:
    matchLabels:
      app: rbd-provisioner
  strategy:
    type: Recreate
  template:
    metadata:
      labels:
        app: rbd-provisioner
    spec:
      containers:
      - name: rbd-provisioner
        image: "registry.cn-hangzhou.aliyuncs.com/ialiyun/rbd-provisioner:15.2.17"  # 要升级到高版本的ceph-common,这里已经升级到15.2.17
        env:
        - name: PROVISIONER_NAME
          value: ceph.com/rbd
      serviceAccountName: persistent-volume-binder
[root@test-k8s-master1 ceph-storageclass]# kubectl apply -f rbd-provisioner.yaml 

[root@test-k8s-master1 ceph-storageclass]# cat ceph-sc.yaml 
apiVersion: v1
kind: Secret
metadata:
  name: ceph-storageclass-secret
  namespace: kube-system
data:
  key: QVFDZXFWZGovdnJrREJBQW1FRXFSOWI1d2MycE5mUlJPaEF6dWc9PQ==
type:
  kubernetes.io/rbd
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: ceph-sc
  annotations:
    storageclass.kubernetes.io/is-default-class: "false"
provisioner: ceph.com/rbd
parameters:
  monitors: 192.168.1.20:6789,192.168.1.21:6789,192.168.1.22:6789
  adminId: admin
  adminSecretName: ceph-storageclass-secret
  adminSecretNamespace: kube-system
  pool: k8spool
  userId: admin
  userSecretName: ceph-storageclass-secret
  userSecretNamespace: kube-system
  imageFormat: "2"
  imageFeatures: "layering"
allowVolumeExpansion: true
[root@test-k8s-master1 ceph-storageclass]# kubectl apply -f ceph-sc.yaml 
[root@test-k8s-master1 ceph-storageclass]# cat test-pod.yaml
apiVersion: v1
kind: Pod
metadata:
  name: ceph-pod1
  namespace: ops
spec:
  containers:
  - name: ceph-busybox
    image: busybox
    command: ["sleep", "60000"]
    volumeMounts:
    - name: ceph-vol1
      mountPath: /usr/share/busybox
      readOnly: false
  volumes:
  - name: ceph-vol1
    persistentVolumeClaim:
      claimName: ceph-pvc-test1

---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
  name: ceph-pvc-test1
  namespace: ops
spec:
  accessModes:
    - ReadWriteOnce
  resources:
    requests:
      storage: 1Gi
  storageClassName: ceph-sc
[root@test-k8s-master1 ceph-storageclass]# kubectl apply -f test-pod.yaml
pod/ceph-pod1 created
persistentvolumeclaim/ceph-pvc-test1 created
# 查看pool里的rbd,在ceph1上操作
[root@master1-admin ~]# rbd ls --pool k8spool
kubernetes-dynamic-pvc-af98f5ba-5668-11ed-8f1a-568633827378
posted on 2022-11-15 18:35  周小百  阅读(71)  评论(0)    收藏  举报