Pod挂载存储卷

Pod挂载存储卷

Volume-存储卷简介

Volume将容器中指定目录下的数据和容器自身解耦,将数据存储到指定的共享存储挂载位置,基于不同后端存储服务的存储卷功能不一样,但是所有基于网络共享存储的存储卷都可以实现容器间的数据持久化甚至是数据共享。

常用的几种卷:

Configmap

  • Configmap将非机密性信息(如配置信息)和镜像解耦, 实现方式为将配置信息放到configmap对象中,然后在pod的中作为Volume挂载到pod中,从而实现导入配置的目的。
  • 使用场景:
    1、通过Configmap给pod中的容器服务提供配置文件,配置文件以挂载到容器的形式使用。
    2、通过Configmap给pod定义全局环境变量(很少用)
    3、通过Configmap给pod传递命令行参数,如mysql -u -p中的账户名密码可以通过Configmap传递。
  • 注意事项:
    1、Configmap需要在pod使用它之前创建。
    2、pod只能使用位于同一个namespace的Configmap,即Configmap不能跨namespace使用。
    3、通常用于非安全加密的配置场景。
    4、Configmap通常是小于1MB的配置。

使用案例(需重点掌握)

1、编辑nginx配置文件并加载

apiVersion: v1
kind: ConfigMap
metadata:
  name: nginx-config
data:
  mysite: |
    server {
       listen       80;
       server_name  www.mysite.com;
       index        index.html index.php index.htm;

       location / {
           root /data/nginx/mysite;
           if (!-e $request_filename) {
               rewrite ^/(.*) /index.html last;
           }
       }
    }

  myserver: |
    server {
       listen       80;
       server_name  www.myserver.com;
       index        index.html index.php index.htm;

       location / {
           root /data/nginx/myserver;
           if (!-e $request_filename) {
               rewrite ^/(.*) /index.html last;
           }
       }
    }  

---
#apiVersion: extensions/v1beta1
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-deployment
spec:
  replicas: 1
  selector:
    matchLabels:
      app: myserver-nginx-selector
  template:
    metadata:
      labels:
        app: myserver-nginx-selector
    spec:
      containers:
      - name: myserver-nginx-container
        image: harbor.yourdomainname/myserver/nginx:1.22.0-alpine 
        ports:
          - containerPort: 80
            name: http
            protocol: TCP
          - containerPort: 443
            name: https
            protocol: TCP
        volumeMounts:
        - mountPath: /data/nginx/mysite
          name: nginx-mysite-statics
          readOnly: true
        - mountPath: /data/nginx/myserver
          name: nginx-myserver-statics
          readOnly: true
        - name: nginx-mysite-config
          mountPath:  /etc/nginx/conf.d/mysite/
          readOnly: true
        - name: nginx-myserver-config
          mountPath:  /etc/nginx/conf.d/myserver/
          readOnly: true
        - mountPath: /etc/localtime
          name: timefile
          readOnly: true
      volumes:
      - name: nginx-mysite-config
        configMap:
          name: nginx-config
          items:
             - key: mysite
               path: mysite.conf
      - name: nginx-myserver-config
        configMap:
          name: nginx-config
          items:
             - key: myserver
               path: myserver.conf
      - name: nginx-myserver-statics
        nfs:
          server: 172.31.7.109
          path: /data/k8sdata/myserver
      - name: nginx-mysite-statics
        nfs:
          server: 172.31.7.109
          path: /data/k8sdata/mysite
      - name: timefile
        hostPath:
          path: /etc/localtime
          type: File

---
apiVersion: v1
kind: Service
metadata:
  name: myserver-nginx-selector
spec:
  ports:
  - name: http
    port: 81
    targetPort: 80
    nodePort: 30019
    protocol: TCP
  type: NodePort
  selector:
    app: myserver-nginx-selector

2、进入pod

root@deploy01:~# kubectl exec -it nginx-deployment-77d8d6c459-gvfx2 -- sh
#使用df命令可以查看挂载情况
/ # df
Filesystem           1K-blocks      Used Available Use% Mounted on
overlay              207512580  13439116 194073464   6% /
tmpfs                    65536         0     65536   0% /dev
/dev/mapper/ubuntu--vg-ubuntu--lv
                     207512580  13439116 194073464   6% /etc/localtime
/dev/mapper/ubuntu--vg-ubuntu--lv
                     207512580  13439116 194073464   6% /etc/hosts
/dev/mapper/ubuntu--vg-ubuntu--lv
                     207512580  13439116 194073464   6% /dev/termination-log
/dev/mapper/ubuntu--vg-ubuntu--lv
                     207512580  13439116 194073464   6% /etc/hostname
/dev/mapper/ubuntu--vg-ubuntu--lv
                     207512580  13439116 194073464   6% /etc/resolv.conf
shm                      65536         0     65536   0% /dev/shm
172.31.7.109:/data/k8sdata/mysite
                     207512832  16558080 190954752   8% /data/nginx/mysite
172.31.7.109:/data/k8sdata/myserver
                     207512832  16558080 190954752   8% /data/nginx/myserver
/dev/mapper/ubuntu--vg-ubuntu--lv
                     207512580  13439116 194073464   6% /etc/nginx/conf.d/mysite
/dev/mapper/ubuntu--vg-ubuntu--lv
                     207512580  13439116 194073464   6% /etc/nginx/conf.d/myserver
tmpfs                  1658716        12   1658704   0% /run/secrets/kubernetes.io/serviceaccount
tmpfs                   982956         0    982956   0% /proc/acpi
tmpfs                    65536         0     65536   0% /proc/interrupts
tmpfs                    65536         0     65536   0% /proc/kcore
tmpfs                    65536         0     65536   0% /proc/keys
tmpfs                    65536         0     65536   0% /proc/latency_stats
tmpfs                    65536         0     65536   0% /proc/timer_list
tmpfs                   982956         0    982956   0% /proc/scsi
tmpfs                   982956         0    982956   0% /sys/firmware
#使用cat命令可以查看nginx挂载的配置文件
/ # cat /etc/nginx/conf.d/myserver/myserver.conf 
server {
   listen       80;
   server_name  www.myserver.com;
   index        index.html index.php index.htm;

   location / {
       root /data/nginx/myserver;
       if (!-e $request_filename) {
           rewrite ^/(.*) /index.html last;
       }
   }
}  
/ # cat /etc/nginx/conf.d/mysite/mysite.conf 
server {
   listen       80;
   server_name  www.mysite.com;
   index        index.html index.php index.htm;

   location / {
       root /data/nginx/mysite;
       if (!-e $request_filename) {
           rewrite ^/(.*) /index.html last;
       }
   }
}
# 测试配置文件
/etc/nginx # nginx -t
nginx: the configuration file /etc/nginx/nginx.conf syntax is ok
# 重新加载配置 
/etc/nginx # nginx -s reload
2026/01/29 16:25:02 [notice] 53#53: signal process started
# 在NFS服务器上准备测试页面
root@ha01:/data# echo "This is MyServer" >> k8sdata/myserver/index.html
root@ha01:/data# echo "This is MySite" >> k8sdata/mysite/index.html
# 编辑负载均衡配置,使用虚拟IP将请求转发到node节点的指定端口
# 测试时只开了一个节点,实际部署可以根据集群实际节点数量添加条目
listen myserver-nginx
        bind 172.31.7.189:80
        mode tcp
        server node01 172.31.7.111:30019 check inter 3s fall 3 rise 5
# 重启HAProxy
root@ha01:/data# systemctl restart haproxy.service
# 修改本地hosts文件
# 将www.myserver.com 和 www.mysite.com 解析到172.31.7.189
# 使用浏览器访问进行测试

Secret

  • Secret 的功能类似于ConfigMap给pod提供额外的配置信息,但是Secret是一种包含少量敏感信息例如密码、令牌或密钥的对象。
  • Secret 的名称必须是合法的 DNS 子域名。
  • 每个Secret的大小最多为1MiB,主要是为了避免用户创建非常大的Secret进而导致API服务器和kubelet内存耗尽
  • 创建很多小的Secret也可能耗尽内存,可以使用资源配额来约束每个名字空间中Secret的个数。
  • 在通过yaml文件创建Secret时,可以设置data或stringData字段,data和stringData字段都是可选的,data字段中所有键值都必须是base64编码的字符串,如果不希望执行这种 base64字符串的转换操作,也可以选择设置stringData字段,其中可以使用任何非加密
    的字符串作为其取值。
apiVersion: v1
kind: Secret
metadata:
  name: mysecret
  namespace: myserver
type: Opaque
data:
  superuser: YWRtaW4K
  password: MTIzNDU2Cg==
  • Pod 可以用三种方式的任意一种来使用 Secret:
    1、作为挂载到一个或多个容器上的卷 中的文件(crt文件、key文件)。
    2、作为容器的环境变量。
    3、由kubelet 在为 Pod 拉取镜像时使用(与镜像仓库的认证)。

案例:使用Secret为nginx提供证书(需重点掌握)

1、创建自签名证书

root@deploy01:~# mkdir  certs
root@deploy01:~# cd certs/
root@deploy01:~/certs# openssl req -x509 -sha256 -newkey rsa:4096 -keyout ca.key -out ca.crt -days 3560 -nodes -subj '/CN=www.ca.com'
root@deploy01:~/certs# openssl req -new -newkey rsa:4096 -keyout server.key -out server.csr -nodes -subj '/CN=www.mysite.com'
root@deploy01:~/certs# openssl x509 -req -sha256 -days 3650 -in server.csr -CA ca.crt -CAkey ca.key -set_serial 01 -out server.crt
root@deploy01:~/certs# kubectl  create secret tls  myserver-tls-key  --cert=./server.crt --key=./server.key  -n myserver 

2、创建nginx服务并使用证书

apiVersion: v1
kind: ConfigMap
metadata:
  name: nginx-config
  namespace: myserver
data:
 default: |
    server {
       listen       80;
       server_name  www.mysite.com;
       listen 443 ssl;
       ssl_certificate /etc/nginx/conf.d/certs/tls.crt;
       ssl_certificate_key /etc/nginx/conf.d/certs/tls.key;

       location / {
           root /usr/share/nginx/html; 
           index index.html;
           if ($scheme = http ){  #未加条件判断,会导致死循环
              rewrite / https://www.mysite.com permanent;
           }  

           if (!-e $request_filename) {
               rewrite ^/(.*) /index.html last;
           }
       }
    }

---
#apiVersion: extensions/v1beta1
apiVersion: apps/v1
kind: Deployment
metadata:
  name: myserver-myapp-frontend-deployment
  namespace: myserver
spec:
  replicas: 1
  selector:
    matchLabels:
      app: myserver-myapp-frontend
  template:
    metadata:
      labels:
        app: myserver-myapp-frontend
    spec:
      containers:
      - name: myserver-myapp-frontend
        image:  harbor.yourdomianname/myserver/nginx:1.20.2-alpine 
        ports:
          - containerPort: 80
            name: http
            protocol: TCP
          - containerPort: 443
            name: https
            protocol: TCP
        volumeMounts:
          - name: nginx-config
            mountPath:  /etc/nginx/conf.d/myserver
          - name: myserver-tls-key
            mountPath:  /etc/nginx/conf.d/certs
      volumes:
      - name: nginx-config
        configMap:
          name: nginx-config
          items:
             - key: default
               path: mysite.conf
      - name: myserver-tls-key
        secret:
          secretName: myserver-tls-key 
---
apiVersion: v1
kind: Service
metadata:
  name: myserver-myapp-frontend
  namespace: myserver
spec:
  type: NodePort
  ports:
  - name: http
    port: 80
    targetPort: 80
    nodePort: 30018
    protocol: TCP
  - name: htts
    port: 443
    targetPort: 443
    nodePort: 30019
    protocol: TCP
  selector:
    app: myserver-myapp-frontend 

3、配置负载均衡,转发请求到nodeport,配置完后重启HAProxy服务

listen myserver-nginx-80
        bind 172.31.7.189:80
        mode tcp
        server node01 172.31.7.111:30018 check inter 3s fall 3 rise 5

listen myserver-nginx-443
        bind 172.31.7.189:443
        mode tcp
        server node01 172.31.7.111:30019 check inter 3s fall 3 rise 5

4、进入pod检查配置,重新加载nginx

root@deploy01:~/certs# kubectl exec -it -n myserver myserver-myapp-frontend-deployment-d48668b78-79c8h -- sh
# 检查配置文件
/ # ls /etc/nginx/conf.d/myserver/*.conf
/etc/nginx/conf.d/myserver/mysite.conf
/ # ls /etc/nginx/conf.d/certs/
# 检查证书
tls.crt  tls.key
# 将配置文件路径写入nginx主配置
/ # vi /etc/nginx/nginx.conf 
include /etc/nginx/conf.d/myserver/*.conf;
# 重新加载nginx
/ # nginx -s reload
2026/01/29 09:31:17 [notice] 42#42: signal process started
# 检查端口监听情况
/ # netstat -lantp
Active Internet connections (servers and established)
Proto Recv-Q Send-Q Local Address           Foreign Address         State       PID/Program name    
tcp        0      0 0.0.0.0:80              0.0.0.0:*               LISTEN      1/nginx: master pro
tcp        0      0 0.0.0.0:443             0.0.0.0:*               LISTEN      1/nginx: master pro
tcp        0      0 :::80                   :::*                    LISTEN      1/nginx: master pro

通过浏览器访问https://www.myserver.com 即可完成验证。

emptyDir

  • 当Pod 被分配给节点时,首先创建 emptyDir 卷,并且只要该 Pod 在该节点上运行,该卷就会存在。
  • 正如卷的名字所述,emptyDir最初是空的。
  • Pod中的容器可以读取和写入emptyDir 卷中的相同文件。
  • 尽管该卷可以挂载到每个容器中的相同或不同路径上。但出于任何原因从节点中删除 Pod 时,emptyDir中的数据将被永久删除。

hostPath

  • hostPath卷将主机节点的文件系统中的文件或目录挂载到集群中。
  • pod删除的时候,卷不会被删除。

Pod挂载NFS(需掌握)

  • nfs卷允许将现有的NFS(网络文件系统)挂载到容器中,且不像 emptyDir会丢失数据,当删除 Pod 时,nfs卷的内容被保留,卷仅仅是被卸载。
  • NFS 卷可以预先上传好数据待pod启动后即可直接使用,并且网络存储可以在多 pod 之间共享同一份数据,即NFS 可以被多个pod同时挂载和读写。

案例1:创建多个pod测试挂载同一个NFS

#apiVersion: extensions/v1beta1
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-deployment
spec:
  replicas: 1
  selector:
    matchLabels:
      app: myserver-nginx-selector
  template:
    metadata:
      labels:
        app: myserver-nginx-selector
    spec:
      containers:
      - name: myserver-nginx-container
        image: harbor.yourdomainname/myserver/nginx:1.22.0-alpine 
        ports:
        - containerPort: 80
          name: http
          protocol: TCP
        - containerPort: 443
          name: https
          protocol: TCP
        volumeMounts:
        - mountPath: /usr/share/nginx/html/images
          name: my-nfs-volume
      volumes:
      - name: my-nfs-volume
        nfs:
          server: 172.31.7.109
          path: /data/k8sdata/images

---
apiVersion: v1
kind: Service
metadata:
  name: myserver-nginx-selector
spec:
  ports:
  - name: http
    port: 81
    targetPort: 80
    nodePort: 30016
    protocol: TCP
  type: NodePort
  selector:
    app: myserver-nginx-selector

可以通过访问nginx服务下的images目录测试挂载效果(图片需要提前准备)

案例2:创建多个pod测试每个pod挂载多个NFS

#apiVersion: extensions/v1beta1
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-deployment-site2
spec:
  replicas: 1
  selector:
    matchLabels:
      app: myserver-nginx-selector
  template:
    metadata:
      labels:
        app: myserver-nginx-selector
    spec:
      containers:
      - name: myserver-nginx-container
        image: harbor.yourdomainname/myserver/nginx:1.22.0-alpine 
        ports:
        - containerPort: 80
          name: http
          protocol: TCP
        - containerPort: 443
          name: https
          protocol: TCP
        volumeMounts:
        - mountPath: /usr/share/nginx/html/pool1
          name: my-nfs-volume-pool1
        - mountPath: /usr/share/nginx/html/pool2
          name: my-nfs-volume-pool2
        - mountPath: /etc/localtime
          name: timefile
          readOnly: true
      volumes:
      - name: my-nfs-volume-pool1
        nfs:
          server: 172.31.7.109
          path: /data/k8sdata/pool1
          readOnly: true
      - name: my-nfs-volume-pool2
        nfs:
          server: 172.31.7.109
          path: /data/k8sdata/pool2
      - name: timefile
        hostPath:
          path: /etc/localtime
---
apiVersion: v1
kind: Service
metadata:
  name: myserver-nginx-selector
spec:
  ports:
  - name: http
    port: 80
    targetPort: 80
    nodePort: 30017
    protocol: TCP
  type: NodePort
  selector:
    app: myserver-nginx-selector
#准备两个测试页面
root@ha01:/data/k8sdata# echo "This is pool1" > pool1/index.html
root@ha01:/data/k8sdata# ls pool1
index.html
root@ha01:/data/k8sdata# echo "This is pool2" > pool2/index.html
#pod启动后可以分别访问nginx服务下的pool1和pool2进行测试

PV/PVC

PV简介

  • PersistentVolume(PV):是集群中已经由kubernetes管理员配置的一个网络存储,集群中的存储资源一个集群资源,不隶属于任何namespace。
  • PV的数据最终存储在硬件存储。
  • pod不能直接挂载PV,PV需要绑定给PVC并最终由pod挂载PVC使用。
  • PV支持NFS、Ceph、商业存储或云提供商的特定的存储等。
  • 可以自定义PV的类型是块还是文件存储、存储空间大小、访问模式等。
  • PV的生命周期独立于Pod,当使用PV的Pod被删除时对PV中的数据没有影响。

PVC简介

  • PersistentVolumeClaim(PVC):是pod对存储的请求。
  • pod挂载PVC并将数据存储在PVC,PVC需要绑定到PV才能使用。
  • PVC在创建的时候要指定namespace,pod要和PVC运行在同一个namespace。
  • 可以对PVC设置特定的空间大小和访问模式。
  • 使用PVC的pod在删除时对PVC中的数据没有影响。

PV/PVC总结

  • PV是对底层网络存储的抽象,即将网络存储定义为一种存储资源,将一个整体的存储资源拆分成多份后给不同的业务使用。
  • PVC是对PV资源的申请调用,pod通过PVC将数据保存至PV,PV再把数据保存至真正的硬件存储。

存储卷类型

  • static:静态存储卷 ,需要在使用前手动创建PV、然后创建PVC并绑定到PV、挂载至pod使用,适用于PV和PVC相对比较固定的业务场景。
  • dynamic:动态存储卷,先创建一个存储类storageclass,后期pod在使用PVC的时候可以通过存储类动态创建PVC,适用于有状态服务集群如MySQL一主多从、zookeeper集群等。

静态PVC的使用(需重点掌握)

准备NFS存储:

~# mkdir  /data/k8sdata/myserver/myappdata -p
~# vim /etc/exports
/data/k8sdata/myserver/myappdata *(rw,no_root_squash)
~# systemctl  restart  nfs-server && systemctl  enable nfs-server

1、创建PV:

apiVersion: v1
kind: PersistentVolume
metadata:
  name: myserver-myapp-static-pv
spec:
  capacity:
    storage: 10Gi
  persistentVolumeReclaimPolicy: Retain #默认,https://kubernetes.io/zh-cn/docs/concepts/storage/persistent-volumes/#reclaim-policy
  accessModes:
    - ReadWriteOnce
  nfs:
    path: /data/k8sdata/myserver/myappdata
    server: 172.31.7.109

2、创建PVC:

apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: myserver-myapp-static-pvc
  namespace: myserver
spec:
  volumeName: myserver-myapp-static-pv
  accessModes:
    - ReadWriteOnce
  resources:
    requests:
      storage: 10Gi

3、部署web服务

kind: Deployment
#apiVersion: extensions/v1beta1
apiVersion: apps/v1
metadata:
  labels:
    app: myserver-myapp 
  name: myserver-myapp-deployment-name
  namespace: myserver
spec:
  replicas: 1 
  selector:
    matchLabels:
      app: myserver-myapp-frontend
  template:
    metadata:
      labels:
        app: myserver-myapp-frontend
    spec:
      containers:
        - name: myserver-myapp-container
          image: harbor.yourdomainname/myserver/nginx:1.22.0-alpine 
          #imagePullPolicy: Always
          ports:
            - containerPort: 80
              name: http
              protocol: TCP
            - containerPort: 443
              name: https
              protocol: TCP
          volumeMounts:
          - mountPath: "/usr/share/nginx/html/statics"
            name: statics-datadir
      volumes:
        - name: statics-datadir
          persistentVolumeClaim:
            claimName: myserver-myapp-static-pvc 

---
kind: Service
apiVersion: v1
metadata:
  labels:
    app: myserver-myapp-service
  name: myserver-myapp-service-name
  namespace: myserver
spec:
  type: NodePort
  ports:
  - name: http
    port: 80
    targetPort: 80
    nodePort: 30009
  selector:
    app: myserver-myapp-frontend

4、产生业务数据

# kubectl exec -it -n myserver myserver-myapp-deployment-name-6d59b7cc44-hj6pc -- sh
/ # cd /usr/share/nginx/html/statics
/usr/share/nginx/html/statics # echo "Static PV Test" > index.html
/usr/share/nginx/html/statics # exit

5、可以在NFS服务端验证数据:

~# ls /data/k8sdata/myserver/myappdata/

也可以浏览器访问验证。

动态PVC的使用(需重点掌握)

1、创建基于角色的访问控制

apiVersion: v1
kind: Namespace
metadata:
  name: nfs
---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: nfs-client-provisioner
  # replace with namespace where provisioner is deployed
  namespace: nfs
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: nfs-client-provisioner-runner
rules:
  - apiGroups: [""]
    resources: ["nodes"]
    verbs: ["get", "list", "watch"]
  - apiGroups: [""]
    resources: ["persistentvolumes"]
    verbs: ["get", "list", "watch", "create", "delete"]
  - apiGroups: [""]
    resources: ["persistentvolumeclaims"]
    verbs: ["get", "list", "watch", "update"]
  - apiGroups: ["storage.k8s.io"]
    resources: ["storageclasses"]
    verbs: ["get", "list", "watch"]
  - apiGroups: [""]
    resources: ["events"]
    verbs: ["create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: run-nfs-client-provisioner
subjects:
  - kind: ServiceAccount
    name: nfs-client-provisioner
    # replace with namespace where provisioner is deployed
    namespace: nfs
roleRef:
  kind: ClusterRole
  name: nfs-client-provisioner-runner
  apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: leader-locking-nfs-client-provisioner
  # replace with namespace where provisioner is deployed
  namespace: nfs
rules:
  - apiGroups: [""]
    resources: ["endpoints"]
    verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: leader-locking-nfs-client-provisioner
  # replace with namespace where provisioner is deployed
  namespace: nfs
subjects:
  - kind: ServiceAccount
    name: nfs-client-provisioner
    # replace with namespace where provisioner is deployed
    namespace: nfs
roleRef:
  kind: Role
  name: leader-locking-nfs-client-provisioner
  apiGroup: rbac.authorization.k8s.io

2、创建storageclass:(需要熟练掌握)

apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: nfs
provisioner: k8s-sigs.io/nfs-subdir-external-provisioner # or choose another name, must match deployment's env PROVISIONER_NAME'
reclaimPolicy: Retain #PV的删除策略,默认为delete,删除PV后立即删除NFS server的数据
mountOptions:
  #- vers=4.1 #containerd有部分参数异常
  #- noresvport #告知NFS客户端在重新建立网络连接时,使用新的传输控制协议源端口
  - noatime #访问文件时不更新文件inode中的时间戳,高并发环境可提高性能
parameters:
  archiveOnDelete: "true"  #删除PVC后对PV进行归档保留,PV切换为Released 状态

3、创建NFS provisioner:

apiVersion: apps/v1
kind: Deployment
metadata:
  name: nfs-client-provisioner
  labels:
    app: nfs-client-provisioner
  # replace with namespace where provisioner is deployed
  namespace: nfs
spec:
  replicas: 1
  strategy: #部署策略
    type: Recreate
  selector:
    matchLabels:
      app: nfs-client-provisioner
  template:
    metadata:
      labels:
        app: nfs-client-provisioner
    spec:
      serviceAccountName: nfs-client-provisioner
      containers:
        - name: nfs-client-provisioner
          #image: k8s.gcr.io/sig-storage/nfs-subdir-external-provisioner:v4.0.2 
          image: harbor.yourdomainname/myserver/nfs-subdir-external-provisioner:v4.0.2 
          volumeMounts:
            - name: nfs-client-root
              mountPath: /persistentvolumes
          env:
            - name: PROVISIONER_NAME
              value: k8s-sigs.io/nfs-subdir-external-provisioner
            - name: NFS_SERVER
              value: 172.31.7.109
            - name: NFS_PATH
              value: /data/volumes
      volumes:
        - name: nfs-client-root
          nfs:
            server: 172.31.7.109
            path: /data/volumes

4、创建PVC

# Test PVC
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
  name: myserver-myapp-dynamic-pvc
  namespace: myserver
spec:
  storageClassName: nfs #调用的storageclass 名称
  accessModes:
    - ReadWriteMany #访问权限
  resources:
    requests:
      storage: 500Mi #空间大小

5、创建web服务

kind: Deployment
#apiVersion: extensions/v1beta1
apiVersion: apps/v1
metadata:
  labels:
    app: myserver-myapp 
  name: myserver-myapp-deployment-name
  namespace: myserver
spec:
  replicas: 1 
  selector:
    matchLabels:
      app: myserver-myapp-frontend
  template:
    metadata:
      labels:
        app: myserver-myapp-frontend
    spec:
      containers:
        - name: myserver-myapp-container
          image: harbor.yourdomianname/myserver/nginx:1.22.0-alpine 
          #imagePullPolicy: Always
          ports:
            - containerPort: 80
              name: http
              protocol: TCP
            - containerPort: 443
              name: https
              protocol: TCP
          volumeMounts:
          - mountPath: "/usr/share/nginx/html/statics"
            name: statics-datadir
      volumes:
        - name: statics-datadir
          persistentVolumeClaim:
            claimName: myserver-myapp-dynamic-pvc 

---
kind: Service
apiVersion: v1
metadata:
  labels:
    app: myserver-myapp-service
  name: myserver-myapp-service-name
  namespace: myserver
spec:
  type: NodePort
  ports:
  - name: http
    port: 80
    targetPort: 80
    nodePort: 30010
  selector:
    app: myserver-myapp-frontend

可以在NFS服务器进行验证

~# ll /data/volumes/ 
posted @ 2026-01-29 12:07  Y99017  阅读(3)  评论(0)    收藏  举报