k8s 持久化存储(emptyDir、hostpath、NFS、PV、PVC)
常用的如下: emptyDir hostPath nfs persistentVolumeClaim glusterfs cephfs configMap secret
k8s 持久化存储:emptyDir
emptyDir 类型的 Volume 是在 Pod 分配到 Node 上时被创建,Kubernetes 会在 Node 上自动分配一个
目录,因此无需指定宿主机 Node 上对应的目录文件。 这个目录的初始内容为空,当 Pod 从 Node 上移除
时,emptyDir 中的数据会被永久删除。emptyDir Volume 主要用于某些应用程序无需永久保存的临时目
录,多个容器的共享目录等
vim emptydir.yaml
apiVersion: v1 kind: Pod metadata: name: pod-empty spec: containers: - name: container-empty image: nginx imagePullPolicy: IfNotPresent volumeMounts: - mountPath: /cache name: cache-volume volumes: - emptyDir: {} name: cache-volume
kubectl apply -f emptydir.yaml
[root@xksmaster1 05_Storage]# kubectl get pods
NAME READY STATUS RESTARTS AGE
pod-empty 1/1 Running 0 5s
pod-secret-volume 1/1 Running 0 49m
[root@xksmaster1 05_Storage]# kubectl get pods -o wide | grep empty
pod-empty 1/1 Running 0 15s 172.16.182.63 xksnode1 <none> <none>
[root@xksmaster1 05_Storage]# kubectl get pods pod-empty -o yaml | grep uid
uid: d050ef64-9dd6-4a6a-9735-af487fe715b2
[root@xksnode1 K8S]# tree /var/lib/kubelet/pods/d050ef64-9dd6-4a6a-9735-af487fe715b2
[root@xksnode1 d050ef64-9dd6-4a6a-9735-af487fe715b2]# ll
total 4
drwxr-x--- 3 root root 29 Mar 13 11:54 containers
-rw-r--r-- 1 root root 207 Mar 13 11:54 etc-hosts
drwxr-x--- 3 root root 37 Mar 13 11:54 plugins
drwxr-x--- 4 root root 65 Mar 13 11:54 volumes
[root@xksnode1 d050ef64-9dd6-4a6a-9735-af487fe715b2]# cat etc-hosts
# Kubernetes-managed hosts file.
127.0.0.1 localhost
::1 localhost ip6-localhost ip6-loopback
fe00::0 ip6-localnet
fe00::0 ip6-mcastprefix
fe00::1 ip6-allnodes
fe00::2 ip6-allrouters
172.16.182.63 pod-empty
k8s 持久化存储:hostPath
vim hostpath.yaml
apiVersion: v1 kind: Pod metadata: name: test-hostpath spec: containers: - image: nginx imagePullPolicy: IfNotPresent name: test-nginx volumeMounts: - mountPath: /test-nginx name: test-volume - image: tomcat:8.5-jre8-alpine imagePullPolicy: IfNotPresent name: test-tomcat volumeMounts: - mountPath: /test-tomcat name: test-volume volumes: - name: test-volume hostPath: path: /data1 type: DirectoryOrCreate
# DirectoryOrCreate 表示本地有/data1 目录,就用本地的,本地没有就会在 pod 调度到的节点自动创建一个
kubectl apply -f hostpath.yaml
查看 pod 调度到了哪个物理节点
[root@xksmaster1 ~]# kubectl get pods -o wide | grep hostpath
test-hostpath 2/2 Running 10.244.209.153 xksnode1
#由上面可以知道 pod 调度到了 xksnode1 上,登录到 xksnode1 机器,查看是否在这台机
器创建了存储目录
[root@xksnode1 ~]# ll /data1/
total 0
#上面可以看到已经创建了存储目录/data1,这个/data1 会作为 pod 的持久化存储目录
#在 xksnode1 上的/data1 下创建一个目录
[root@xksnode1 ~]# cd /data1/
[root@xksnode1 data1]# mkdir aa
#测试存储卷是否可以正常使用,登录到 nginx 容器
[root@xksmaster1 ~]# kubectl exec -it test-hostpath -c test-nginx -- /bin/bash
root@test-hostpath:/# cd /test-nginx/
#/test-nginx/目录存在,说明已经把宿主机目录挂载到了容器里
root@test-hostpath:/test-nginx# ls
aa
#测试存储卷是否可以正常使用,登录到 tomcat 容器
[root@xksmaster1 ~]# kubectl exec -it test-hostpath -c test-tomcat -- /bin/bash
root@test-hostpath:/usr/local/tomcat# cd /test-tomcat/
#/test-tomcat/目录存在,说明已经把宿主机目录挂载到了容器里
root@test-hostpath:/test-tomcat# ls
aa
#通过上面测试可以看到,同一个 pod 里的 test-nginx 和 test-tomcat 这两个容器是共享存储卷的。
hostpath 存储卷缺点:
单节点
pod 删除之后重新创建必须调度到同一个 node 节点,数据才不会丢失
可以用分布式存储:
nfs,cephfs,glusterfs
k8s 持久化存储:nfs
master 安装yum install nfs-utils -y [root@xksmaster1 yum.repos.d]# mkdir /data/volumes -pv mkdir: created directory ‘/data/volumes’ You have new mail in /var/spool/mail/root [root@xksmaster1 yum.repos.d]# systemctl start nfs [root@xksmaster1 yum.repos.d]# vim /etc/exports [root@xksmaster1 yum.repos.d]# exportfs -arv exporting 192.168.19.0/24:/data/volumes [root@xksmaster1 yum.repos.d]# service nfs start Redirecting to /bin/systemctl start nfs.service [root@xksmaster1 yum.repos.d]# systemctl enable nfs Created symlink from /etc/systemd/system/multi-user.target.wants/nfs-server.service to /usr/lib/systemd/system/nfs-server.service. [root@xksmaster1 yum.repos.d]# systemctl status nfs ● nfs-server.service - NFS server and services Loaded: loaded (/usr/lib/systemd/system/nfs-server.service; enabled; vendor preset: disabled) Drop-In: /run/systemd/generator/nfs-server.service.d └─order-with-mounts.conf Active: active (exited) since Mon 2023-03-13 12:20:00 CST; 49s ago Main PID: 61780 (code=exited, status=0/SUCCESS) CGroup: /system.slice/nfs-server.service Mar 13 12:20:00 xksmaster1 systemd[1]: Starting NFS server and services... Mar 13 12:20:00 xksmaster1 systemd[1]: Started NFS server and services.
node端 :yum install nfs-utils -y
[root@xksnode1 yum.repos.d]# systemctl enable nfs Created symlink from /etc/systemd/system/multi-user.target.wants/nfs-server.service to /usr/lib/systemd/system/nfs-server.service. You have new mail in /var/spool/mail/root [root@xksnode1 yum.repos.d]# clear [root@xksnode1 yum.repos.d]# mkdir /test [root@xksnode1 yum.repos.d]# mount 192.168.19.180:/data/volumes /test/ [root@xksnode1 yum.repos.d]# df -h Filesystem Size Used Avail Use% Mounted on /dev/mapper/centos-root 27G 7.1G 20G 27% / devtmpfs 2.0G 0 2.0G 0% /dev tmpfs 2.0G 0 2.0G 0% /dev/shm tmpfs 2.0G 14M 2.0G 1% /run tmpfs 2.0G 0 2.0G 0% /sys/fs/cgroup /dev/sda1 1014M 133M 882M 14% /boot tmpfs 394M 0 394M 0% /run/user/0 /dev/sr0 4.3G 4.3G 0 100% /mnt/cdrom 192.168.19.180:/data/volumes 27G 12G 16G 42% /test
vim nfs.yaml
apiVersion: v1 kind: Pod metadata: name: test-nfs-volume spec: containers: - name: test-nfs image: nginx imagePullPolicy: IfNotPresent ports: - containerPort: 80 protocol: TCP volumeMounts: - name: nfs-volumes mountPath: /usr/share/nginx/html volumes: - name: nfs-volumes nfs: path: /data/volumes server: 192.168.19.180
#更新资源清单文件 [root@xksmaster1 ~]# kubectl apply -f nfs.yaml #查看 pod 是否创建成功 [root@xksmaster1 volumes]# kubectl get pods -o wide | grep nfs test-nfs-volume 1/1 Running 10.244.187.108 xksnode1 #登录到 nfs 服务器,在共享目录创建一个 index.html root@xksmaster1 volumes]# pwd /data/volumes [root@xksmaster1 volumes]# cat index.html 0319-xks #请求 pod,看结果 [root@xksmaster1 volumes]# curl 10.244.187.108 0319-xks #通过上面可以看到,在共享目录创建的 index.html 已经被 pod 挂载了 #登录到 pod 验证下 [root@xksmaster1 volumes]# kubectl exec -it test-nfs-volume -- /bin/bash root@test-nfs-volume:/# cat /usr/share/nginx/html/index.html 0319-xks #上面说明挂载 nfs 存储卷成功了,nfs 支持多个客户端挂载,可以创建多个 pod,挂载同一个 nfs 服务器共享出来的目录;但是 nfs 如果宕机了,数据也就丢失了,所以需要使用分布式存储,常见的分 布式存储有 glusterfs 和 cephfs
PV、PVC
静态的配置
创建 pod,使用 pvc 作为持久化存储卷 1、创建 nfs 共享目录 #在宿主机创建 NFS 需要的共享目录 [root@xksmaster1 ~]# mkdir /data/volume_test/v{1,2,3,4,5,6,7,8,9,10} -p #配置 nfs 共享宿主机上的/data/volume_test/v1..v10 目录 [root@xksmaster1 ~]# cat /etc/exports /data/volumes 192.168.19.0/24(rw,no_root_squash) /data/volume_test/v1 192.168.19.0/24(rw,no_root_squash) /data/volume_test/v2 192.168.19.0/24(rw,no_root_squash) /data/volume_test/v3 192.168.19.0/24(rw,no_root_squash) /data/volume_test/v4 192.168.19.0/24(rw,no_root_squash) /data/volume_test/v5 192.168.19.0/24(rw,no_root_squash) /data/volume_test/v6 192.168.19.0/24(rw,no_root_squash) /data/volume_test/v7 192.168.19.0/24(rw,no_root_squash) /data/volume_test/v8 192.168.19.0/24(rw,no_root_squash) /data/volume_test/v9 192.168.19.0/24(rw,no_root_squash) /data/volume_test/v10 192.168.19.0/24(rw,no_root_squash) #重新加载配置,使配置成效 [root@xksmaster1 ~]# exportfs -arv
vim pv.yaml
apiVersion: v1 kind: PersistentVolume metadata: name: v1 spec: capacity: storage: 1Gi #pv的存储空间容量 accessModes: ["ReadWriteOnce"] nfs: path: /data/volume_test/v1 #把nfs的存储空间创建成pv server: 192.168.19.180 #nfs服务器的地址 --- apiVersion: v1 kind: PersistentVolume metadata: name: v2 spec: persistentVolumeReclaimPolicy: Delete capacity: storage: 2Gi accessModes: ["ReadWriteMany"] nfs: path: /data/volume_test/v2 server: 192.168.19.180 --- apiVersion: v1 kind: PersistentVolume metadata: name: v3 spec: capacity: storage: 3Gi accessModes: ["ReadOnlyMany"] nfs: path: /data/volume_test/v3 server: 192.168.19.180 --- apiVersion: v1 kind: PersistentVolume metadata: name: v4 spec: capacity: storage: 4Gi accessModes: ["ReadWriteOnce","ReadWriteMany"] nfs: path: /data/volume_test/v4 server: 192.168.19.180 --- apiVersion: v1 kind: PersistentVolume metadata: name: v5 spec: capacity: storage: 5Gi accessModes: ["ReadWriteOnce","ReadWriteMany"] nfs: path: /data/volume_test/v5 server: 192.168.19.180 --- apiVersion: v1 kind: PersistentVolume metadata: name: v6 spec: capacity: storage: 6Gi accessModes: ["ReadWriteOnce","ReadWriteMany"] nfs: path: /data/volume_test/v6 server: 192.168.19.180 --- apiVersion: v1 kind: PersistentVolume metadata: name: v7 spec: capacity: storage: 7Gi accessModes: ["ReadWriteOnce","ReadWriteMany"] nfs: path: /data/volume_test/v7 server: 192.168.19.180 --- apiVersion: v1 kind: PersistentVolume metadata: name: v8 spec: capacity: storage: 8Gi accessModes: ["ReadWriteOnce","ReadWriteMany"] nfs: path: /data/volume_test/v8 server: 192.168.19.180 --- apiVersion: v1 kind: PersistentVolume metadata: name: v9 spec: capacity: storage: 9Gi accessModes: ["ReadWriteOnce","ReadWriteMany"] nfs: path: /data/volume_test/v9 server: 192.168.19.180 --- apiVersion: v1 kind: PersistentVolume metadata: name: v10 spec: capacity: storage: 10Gi accessModes: ["ReadWriteOnce","ReadWriteMany"] nfs: path: /data/volume_test/v10 server: 192.168.19.180
#更新资源清单文件 [root@xksmaster1 ~]# kubectl apply -f pv.yaml persistentvolume/v1 created persistentvolume/v2 created persistentvolume/v3 created persistentvolume/v4 created persistentvolume/v5 created persistentvolume/v6 created persistentvolume/v7 created persistentvolume/v8 created persistentvolume/v9 created persistentvolume/v10 created #查看 pv 资源 [root@xksmaster1 ~]# kubectl get pv NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS v1 1Gi RWO Retain Available v10 10Gi RWO,RWX Retain Available v2 2Gi RWX Retain Available v3 3Gi ROX Retain Available v4 4Gi RWO,RWX Retain Available v5 5Gi RWO,RWX Retain Available v6 6Gi RWO,RWX Retain Available v7 7Gi RWO,RWX Retain Available v8 8Gi RWO,RWX Retain Available v9 9Gi RWO,RWX Retain A #STATUS 是 Available,表示 pv 是可用的 4、创建 pvc,和符合条件的 pv 绑定 [root@xksmaster1 ~]# cat pvc.yaml apiVersion: v1 kind: PersistentVolumeClaim metadata: name: my-pvc spec: accessModes: ["ReadWriteMany"] resources: requests: storage: 2Gi #更新资源清单文件 [root@xksmaster1 ~]# kubectl apply -f pvc.yaml persistentvolumeclaim/my-pvc created #查看 pv 和 pvc [root@xksmaster1 ~]# kubectl get pv NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM v1 1Gi RWO Retain Available v10 10Gi RWO,RWX Retain Available v2 2Gi RWX Retain Bound default/my-pvc v3 3Gi ROX Retain Available v4 4Gi RWO,RWX Retain Available v5 5Gi RWO,RWX Retain Available v6 6Gi RWO,RWX Retain Available v7 7Gi RWO,RWX Retain Available v8 8Gi RWO,RWX Retain Available v9 9Gi RWO,RWX Retain Available #STATUS 是 Bound,表示这个 pv 已经被 my-pvc 绑定了vailable [root@xksmaster1 ~]# kubectl get pvc NAME STATUS VOLUME CAPACITY ACCESS MODES my-pvc Bound v2 2Gi RWX pvc 的名字-绑定到 pv-绑定的是 v2 这个 pv-pvc 可使用的容量是 2G
5、创建 pod,挂载 pvc
vim pod_pvc.yaml
apiVersion: v1 kind: Pod metadata: name: pod-pvc spec: containers: - name: nginx image: nginx imagePullPolicy: IfNotPresent volumeMounts: - name: nginx-html mountPath: /usr/share/nginx/html volumes: - name: nginx-html persistentVolumeClaim: claimName: my-pvc
#更新资源清单文件 [root@xksmaster1 ~]# kubectl apply -f pod_pvc.yaml pod/pod-pvc created #查看 pod 状态 [root@xksmaster1 ~]# kubectl get pods | grep pod-pvc pod-pvc 1/1 Running 0 27s #通过上面可以看到 pod 处于 running 状态,正常运行 注:使用 pvc 和 pv 的注意事项 1、我们每次创建 pvc 的时候,需要事先有划分好的 pv,这样可能不方便,那么可以在创建 pvc 的时 候直接动态创建一个 pv 这个存储类,pv 事先是不存在的 2、pvc 和 pv 绑定,如果使用默认的回收策略 retain,那么删除 pvc 之后,pv 会处于 released 状 态,我们想要继续使用这个 pv,需要手动删除 pv,kubectl delete pv pv_name,删除 pv,不会删除 pv 里的数据,当我们重新创建 pvc 时还会和这个最匹配的 pv 绑定,数据还是原来数据,不会丢失。
动态:k8s 存储类:storageclass
步骤总结: 1、供应商:创建一个 nfs provisioner
2、创建 storageclass,storageclass 指定刚才创建的供应商
3、创建 pvc,这个 pvc 指定 storageclass
4. 使用这个pvc

1.安装 nfs provisioner,用于配合存储类动态生成 pv #把 nfs-subdir-external-provisioner.tar.gz 上传到 xksnode1 上,手动解压 [root@xksnode1 ~]# docker load -i nfs-subdir-external-provisioner.tar.gz 1、创建运行 nfs-provisioner 需要的 sa 账号 [root@xksmaster1 nfs]# cat serviceaccount.yaml apiVersion: v1 kind: ServiceAccount metadata: name: nfs-provisioner [root@xksmaster1 nfs]# kubectl apply -f serviceaccount.yaml serviceaccount/nfs-provisioner created 扩展:什么是 sa? sa 的全称是 serviceaccount。 serviceaccount 是为了方便 Pod 里面的进程调用 Kubernetes API 或其他外部服务而设计的。 指定了 serviceaccount 之后,我们把 pod 创建出来了,我们在使用这个 pod 时,这个 pod 就有了 我们指定的账户的权限了。 2、对 sa 授权 [root@xksmaster1]# kubectl create clusterrolebinding nfs-provisionerclusterrolebinding --clusterrole=cluster-admin --serviceaccount=default:nfsprovisioner 3、安装 nfs-provisioner 程序 [root@xksmaster1 ~]# mkdir /data/nfs_pro -p #把/data/nfs_pro 变成 nfs 共享的目录 [root@xksmaster1 ~]# cat /etc/exports /data/nfs_pro 192.168.40.0/24(rw,no_root_squash) [root@xksmaster1 ~]# exportfs -arv
vim nfs-deployment.yaml
kind: Deployment apiVersion: apps/v1 metadata: name: nfs-provisioner spec: selector: matchLabels: app: nfs-provisioner replicas: 1 strategy: type: Recreate template: metadata: labels: app: nfs-provisioner spec: serviceAccount: nfs-provisioner containers: - name: nfs-provisioner image: registry.cn-beijing.aliyuncs.com/mydlq/nfs-subdir-external-provisioner:v4.0.0 imagePullPolicy: IfNotPresent volumeMounts: - name: nfs-client-root mountPath: /persistentvolumes env: - name: PROVISIONER_NAME value: example.com/nfs - name: NFS_SERVER value: 192.168.19.180 - name: NFS_PATH value: /data/nfs_pro/ volumes: - name: nfs-client-root nfs: server: 192.168.19.180 path: /data/nfs_pro/
#更新资源清单文件 [root@xksmaster1 ~]# kubectl apply -f nfs-deployment.yaml deployment.apps/nfs-provisioner created #查看 nfs-provisioner 是否正常运行 [root@xksmaster1 nfs]# kubectl get pods | grep nfs nfs-provisioner-cd5589cfc-pjwsq 1/1 Running 2.创建 storageclass,动态供给 pv [root@xksmaster1]# cat nfs-storageclass.yaml kind: StorageClass apiVersion: storage.k8s.io/v1 metadata: name: nfs provisioner: example.com/nfs [root@xksmaster1]# kubectl apply -f nfs-storageclass.yaml #查看 storageclass 是否创建成功 [root@xksmaster1 nfs]# kubectl get storageclass NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE nfs example.com/nfs Delete Immediate #显示内容如上,说明 storageclass 创建成功了 注意:provisioner 处写的 example.com/nfs 应该跟安装 nfs provisioner 时候的 env 下的 PROVISIONER_NAME 的 value 值保持一致,如下: env: - name: PROVISIONER_NAME value: example.com/nfs
3.创建 pvc,通过 storageclass 动态生成 pv [root@xksmaster1]# vim claim.yaml kind: PersistentVolumeClaim apiVersion: v1 metadata: name: test-claim1 spec: accessModes: ["ReadWriteMany"] resources: requests: storage: 1Gi storageClassName: nfs
[root@xksmaster1]# kubectl apply -f claim.yaml persistentvolumeclaim/test-claim1 created #查看是否动态生成了 pv,pvc 是否创建成功,并和 pv 绑定 [root@xksmaster1nfs]# kubectl get pvc
4 创建 pod,挂载 storageclass 动态生成的 pvc:test-claim1
[root@xksmaster1]# cat read-pod.yaml
kind: Pod apiVersion: v1 metadata: name: read-pod spec: containers: - name: read-pod image: nginx imagePullPolicy: IfNotPresent volumeMounts: - name: nfs-pvc mountPath: /usr/share/nginx/html restartPolicy: "Never" volumes: - name: nfs-pvc persistentVolumeClaim: claimName: test-claim1


浙公网安备 33010602011771号