emptyDir, hostPath
SAN: iSCSI,...
NAS: nfs, cifs, http
分布式存储:
glusterfs, rbd, cephfs
云存储:
EBS, Azure Disk,
存储类:

gitRepo
emptyDir
pvc:


node01:
[root@node01 ~]# docker image ls REPOSITORY TAG IMAGE ID CREATED SIZE busybox latest 1c35c4412082 2 weeks ago 1.22MB redis 4.0-alpine e3dd0e49bca5 8 weeks ago 20.4MB nginx 1.14-alpine 8a2fb25a19f5 14 months ago 16MB tomcat 8.5.32-jre8-alpine 0ec1f56e761f 23 months ago 107MB k8s.gcr.io/kube-proxy-amd64 v1.11.1 d5c25579d0ff 23 months ago 97.8MB quay.io/kubernetes-ingress-controller/nginx-ingress-controller 0.17.1 8410cbcd825d 23 months ago 360MB quay.io/kubernetes-ingress-controller/nginx-ingress-controller 0.17.0 2952cea7d7f1 23 months ago 360MB ikubernetes/filebeat 5.6.6-alpine 6af2484d5393 2 years ago 21.5MB ikubernetes/filebeat 5.6.5-alpine 9b492593aa13 2 years ago 21.5MB ikubernetes/myapp v1 d4a5e0eaa84f 2 years ago 15.5MB ikubernetes/myapp v3 61f932bf5041 2 years ago 15.5MB ikubernetes/myapp v2 54202d3f0f35 2 years ago 15.5MB k8s.gcr.io/pause-amd64 3.1 da86e6ba6ca1 2 years ago 742kB k8s.gcr.io/pause 3.1 da86e6ba6ca1 2 years ago 742kB quay.io/coreos/flannel v0.9.1-amd64 2b736d06ca4c 2 years ago 51.3MB gcr.io/google_containers/defaultbackend 1.4 846921f0fe0e 2 years ago
master:
[root@master ~]# kubectl explain pods.spec
[root@master ~]# kubectl explain pods.spec.volumes
[root@master ~]# kubectl explain pods.spec.volumes.rbd
[root@master ~]# kubectl explain pods.spec.volumes.emptyDir
[root@master ~]# kubectl explain pods.spec.containers
[root@master ~]# kubectl explain pods.spec.containers.volumeMounts
[root@master ~]# cd manifests/
[root@master manifests]# mkdir volumes
[root@master manifests]# cd volumes/
[root@master volumes]# cp ../pod-demo.yaml .
[root@master volumes]# mv pod-demo.yaml pod-vol-demo.yaml
[root@master volumes]# vim pod-vol-demo.yaml
apiVersion: v1
kind: Pod
metadata:
name: pod-demo
namespace: default
labels:
app: myapp
tier: frontend
annotations:
smoke.com/created-by: "cluster admin"
spec:
containers:
- name: myapp
image: ikubernetes/myapp:v1
ports:
- name: http
containerPort: 80
volumeMounts:
- name: html
mountPath: /data/web/html/
- name: busybox
image: busybox:latest
imagePullPolicy: IfNotPresent
volumeMounts:
- name: html
mountPath: /data/
command:
- "/bin/sh"
- "-c"
- "$(date) >> /data/index.html"
volumes:
- name: html
emptyDir: {}
apiVersion: v1
kind: Pod
metadata:
name: pod-demo
namespace: default
labels:
app: myapp
tier: frontend
annotations:
smoke.com/created-by: "cluster admin"
spec:
containers:
- name: myapp
image: ikubernetes/myapp:v1
ports:
- name: http
containerPort: 80
volumeMounts:
- name: html
mountPath: /data/web/html/
- name: busybox
image: busybox:latest
imagePullPolicy: IfNotPresent
volumeMounts:
- name: html
mountPath: /data/
command:
- "/bin/sh"
- "-c"
- "$(date) >> /data/index.html"
volumes:
- name: html
emptyDir: {}
[root@master volumes]# kubectl apply -f pod-vol-demo.yaml
[root@master volumes]# kubectl apply -f pod-vol-demo.yaml
pod/pod-demo created
[root@master volumes]# kubectl get pods
NAME READY STATUS RESTARTS AGE
myapp-deploy-67f6f6b4dc-642hc 1/1 Running 0 2d
myapp-deploy-67f6f6b4dc-d4cpv 1/1 Running 0 2d
myapp-deploy-67f6f6b4dc-rqj8g 1/1 Running 0 2d
pod-demo 1/2 CrashLoopBackOff 3 1m
tomcat-deploy-588c79d48d-6r8dw 1/1 Running 0 23h
tomcat-deploy-588c79d48d-cjlj9 1/1 Running 0 23h
tomcat-deploy-588c79d48d-r7nfq 1/1 Running 0 23h
[root@master volumes]# kubectl describe pods pod-demo
Name: pod-demo
Namespace: default
Priority: 0
PriorityClassName: <none>
Node: node02.smoke.com/172.20.0.67
Start Time: Tue, 23 Jun 2020 21:22:00 +0800
Labels: app=myapp
tier=frontend
Annotations: kubectl.kubernetes.io/last-applied-configuration={"apiVersion":"v1","kind":"Pod","metadata":{"annotations":{"smoke.com/created-by":"cluster admin"},"labels":{"app":"myapp","tier":"frontend"},
"name":"p...
smoke.com/created-by=cluster admin
Status: Running
IP: 10.244.2.35
Containers:
myapp:
Container ID: docker://7da7c649063b63d2105cbb0645ae506e15d2335a734b00455be0e99911d336a5
Image: ikubernetes/myapp:v1
Image ID: docker-pullable://ikubernetes/myapp@sha256:9c3dc30b5219788b2b8a4b065f548b922a34479577befb54b03330999d30d513
Port: 80/TCP
Host Port: 0/TCP
State: Running
Started: Tue, 23 Jun 2020 21:22:01 +0800
Ready: True
Restart Count: 0
Environment: <none>
Mounts:
/data/web/html/ from html (rw)
/var/run/secrets/kubernetes.io/serviceaccount from default-token-jrx89 (ro)
busybox:
Container ID: docker://20cac869a45f7f31cf5e90b5999da6c5cb2311ccaa271c887d83715d1df55bb5
Image: busybox:latest
Image ID: docker-pullable://busybox@sha256:95cf004f559831017cdf4628aaf1bb30133677be8702a8c5f2994629f637a209
Port: <none>
Host Port: <none>
Command:
/bin/sh
-c
$(date) >> /data/index.html
State: Waiting
Reason: CrashLoopBackOff
Last State: Terminated
Reason: Error
Exit Code: 127
Started: Tue, 23 Jun 2020 21:22:46 +0800
Finished: Tue, 23 Jun 2020 21:22:46 +0800
Ready: False
Restart Count: 3
Environment: <none>
Mounts:
/data/ from html (rw)
/var/run/secrets/kubernetes.io/serviceaccount from default-token-jrx89 (ro)
Conditions:
Type Status
Initialized True
Ready False
ContainersReady False
PodScheduled True
Volumes:
html:
Type: EmptyDir (a temporary directory that shares a pod's lifetime)
Medium:
default-token-jrx89:
Type: Secret (a volume populated by a Secret)
SecretName: default-token-jrx89
Optional: false
QoS Class: BestEffort
Node-Selectors: <none>
Tolerations: node.kubernetes.io/not-ready:NoExecute for 300s
node.kubernetes.io/unreachable:NoExecute for 300s
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Scheduled 1m default-scheduler Successfully assigned default/pod-demo to node02.smoke.com
Normal Pulled 1m kubelet, node02.smoke.com Container image "ikubernetes/myapp:v1" already present on machine
Normal Created 1m kubelet, node02.smoke.com Created container
Normal Started 1m kubelet, node02.smoke.com Started container
Normal Started 54s (x4 over 1m) kubelet, node02.smoke.com Started container
Warning BackOff 15s (x8 over 1m) kubelet, node02.smoke.com Back-off restarting failed container
Normal Pulled 1s (x5 over 1m) kubelet, node02.smoke.com Container image "busybox:latest" already present on machine
Normal Created 1s (x5 over 1m) kubelet, node02.smoke.com Created container
[root@master volumes]# kubectl delete -f pod-vol-demo.yaml
[root@master volumes]# vim pod-vol-demo.yaml
apiVersion: v1
kind: Pod
metadata:
name: pod-demo
namespace: default
labels:
app: myapp
tier: frontend
annotations:
smoke.com/created-by: "cluster admin"
spec:
containers:
- name: myapp
image: ikubernetes/myapp:v1
ports:
- name: http
containerPort: 80
volumeMounts:
- name: html
mountPath: /data/web/html/
- name: busybox
image: busybox:latest
imagePullPolicy: IfNotPresent
volumeMounts:
- name: html
mountPath: /data/
command:
- "/bin/sh"
- "-c"
- "sleep 7200"
volumes:
- name: html
emptyDir: {}
[root@master volumes]# kubectl apply -f pod-vol-demo.yaml
[root@master volumes]# kubectl get pods
NAME READY STATUS RESTARTS AGE
myapp-deploy-67f6f6b4dc-642hc 1/1 Running 0 2d
myapp-deploy-67f6f6b4dc-d4cpv 1/1 Running 0 2d
myapp-deploy-67f6f6b4dc-rqj8g 1/1 Running 0 2d
pod-demo 2/2 Running 0 13s
tomcat-deploy-588c79d48d-6r8dw 1/1 Running 0 23h
tomcat-deploy-588c79d48d-cjlj9 1/1 Running 0 23h
tomcat-deploy-588c79d48d-r7nfq 1/1 Running 0 23h
[root@master volumes]# kubectl exec -it pod-demo -c busybox -- /bin/sh
[root@master volumes]# kubectl exec -it pod-demo -c busybox -- /bin/sh
/ # ls
bin data dev etc home proc root sys tmp usr var
/ # mount | grep data
/dev/mapper/centos-root on /data type xfs (rw,seclabel,relatime,attr2,inode64,noquota)
/ # echo $(date) >> /data/index.html
/ # cat /data/index.html
Tue Jun 23 13:30:43 UTC 2020
/ # echo $(date) >> /data/index.html
/ # cat /data/index.html
Tue Jun 23 13:30:43 UTC 2020
Tue Jun 23 13:30:48 UTC 2020
[root@master volumes]# kubectl exec -it pod-demo -c myapp -- /bin/sh #myapp和busybo容器存储卷是共享存储
/ # ls
bin data dev etc home lib media mnt proc root run sbin srv sys tmp usr var
/ # ls /data/web/html/
index.html
/ # cat /data/web/html/index.html
Tue Jun 23 13:30:43 UTC 2020
Tue Jun 23 13:30:48 UTC 2020
/ # exit
[root@master volumes]# kubectl delete -f pod-vol-demo.yaml
[root@master volumes]# vim pod-vol-demo.yaml
apiVersion: v1
kind: Pod
metadata:
name: pod-demo
namespace: default
labels:
app: myapp
tier: frontend
annotations:
smoke.com/created-by: "cluster admin"
spec:
containers:
- name: httpd
image: busybox:latest
imagePullPolicy: IfNotPresent
command: ['/bin/httpd','-f','-h /data/web/html']
ports:
- name: http
containerPort: 80
volumeMounts:
- name: html
mountPath: /data/web/html/
- name: busybox
image: busybox:latest
imagePullPolicy: IfNotPresent
volumeMounts:
- name: html
mountPath: /data/
command:
- "/bin/sh"
- "-c"
- "while true; do echo $(date) >> /data/index.html; sleep 2; done"
volumes:
- name: html
emptyDir: {}
[root@master volumes]# kubectl apply -f pod-vol-demo.yaml
[root@master volumes]# kubectl get pods
NAME READY STATUS RESTARTS AGE
myapp-deploy-67f6f6b4dc-642hc 1/1 Running 0 2d
myapp-deploy-67f6f6b4dc-d4cpv 1/1 Running 0 2d
myapp-deploy-67f6f6b4dc-rqj8g 1/1 Running 0 2d
pod-demo 1/2 CrashLoopBackOff 1 6s
tomcat-deploy-588c79d48d-6r8dw 1/1 Running 0 23h
tomcat-deploy-588c79d48d-cjlj9 1/1 Running 0 23h
tomcat-deploy-588c79d48d-r7nfq 1/1 Running 0 23h
[root@master volumes]# vim pod-vol-demo.yaml
apiVersion: v1
kind: Pod
metadata:
name: pod-demo
namespace: default
labels:
app: myapp
tier: frontend
annotations:
smoke.com/created-by: "cluster admin"
spec:
containers:
- name: httpd
image: busybox:latest
imagePullPolicy: IfNotPresent
command: ['/bin/httpd','-f','-h /data/web/html']
ports:
- name: http
containerPort: 80
volumeMounts:
- name: html
mountPath: /data/web/html/
- name: busybox
image: busybox:latest
imagePullPolicy: IfNotPresent
volumeMounts:
- name: html
mountPath: /data/
command: ['/bin/sh','-c','while true; do echo $(date) >> /data/index.html; sleep 2; done']
volumes:
- name: html
emptyDir: {}
[root@master volumes]# kubectl apply -f pod-vol-demo.yaml
[root@master volumes]# kubectl get pods
NAME READY STATUS RESTARTS AGE
myapp-deploy-67f6f6b4dc-642hc 1/1 Running 0 2d
myapp-deploy-67f6f6b4dc-d4cpv 1/1 Running 0 2d
myapp-deploy-67f6f6b4dc-rqj8g 1/1 Running 0 2d
pod-demo 1/2 CrashLoopBackOff 7 12m
tomcat-deploy-588c79d48d-6r8dw 1/1 Running 0 23h
tomcat-deploy-588c79d48d-cjlj9 1/1 Running 0 23h
tomcat-deploy-588c79d48d-r7nfq 1/1 Running 0 23h
[root@master volumes]# kubectl delete -f pod-vol-demo.yaml
[root@master volumes]# vim pod-vol-demo.yaml
apiVersion: v1
kind: Pod
metadata:
name: pod-demo
namespace: default
labels:
app: myapp
tier: frontend
annotations:
smoke.com/created-by: "cluster admin"
spec:
containers:
- name: httpd
image: busybox:latest
imagePullPolicy: IfNotPresent
command: ['/bin/httpd','-f','-h /data/web/html']
ports:
- name: http
containerPort: 80
volumeMounts:
- name: html
mountPath: /data/web/html/
- name: busybox
image: busybox:latest
imagePullPolicy: IfNotPresent
volumeMounts:
- name: html
mountPath: /data/
command: ['/bin/sh','-c','while true; do echo $$(date) >> /data/index.html; sleep 2; done']
volumes:
- name: html
emptyDir: {}
[root@master volumes]# kubectl apply -f pod-vol-demo.yaml
[root@master volumes]# kubectl get pods
NAME READY STATUS RESTARTS AGE
myapp-deploy-67f6f6b4dc-642hc 1/1 Running 0 2d
myapp-deploy-67f6f6b4dc-d4cpv 1/1 Running 0 2d
myapp-deploy-67f6f6b4dc-rqj8g 1/1 Running 0 2d
pod-demo 1/2 Error 2 17s
tomcat-deploy-588c79d48d-6r8dw 1/1 Running 0 23h
tomcat-deploy-588c79d48d-cjlj9 1/1 Running 0 23h
tomcat-deploy-588c79d48d-r7nfq 1/1 Running 0 23h
[root@master volumes]# kubectl describe pods pod-demo
Name: pod-demo
Namespace: default
Priority: 0
PriorityClassName: <none>
Node: node02.smoke.com/172.20.0.67
Start Time: Tue, 23 Jun 2020 22:05:21 +0800
Labels: app=myapp
tier=frontend
Annotations: kubectl.kubernetes.io/last-applied-configuration={"apiVersion":"v1","kind":"Pod","metadata":{"annotations":{"smoke.com/created-by":"cluster admin"},"labels":{"app":"myapp","tier":"frontend"},
"name":"p...
smoke.com/created-by=cluster admin
Status: Running
IP: 10.244.2.38
Containers:
httpd:
Container ID: docker://6ce6a01f8faeaffaaaa1bb1a2029e891c7f7d984f0ba49169e0be044a94edecf
Image: busybox:latest
Image ID: docker-pullable://busybox@sha256:95cf004f559831017cdf4628aaf1bb30133677be8702a8c5f2994629f637a209
Port: 80/TCP
Host Port: 0/TCP
Command:
/bin/httpd
-f
-h /data/web/html
State: Waiting
Reason: CrashLoopBackOff
Last State: Terminated
Reason: Error
Exit Code: 1
Started: Tue, 23 Jun 2020 22:05:36 +0800
Finished: Tue, 23 Jun 2020 22:05:36 +0800
Ready: False
Restart Count: 2
Environment: <none>
Mounts:
/data/web/html/ from html (rw)
/var/run/secrets/kubernetes.io/serviceaccount from default-token-jrx89 (ro)
busybox:
Container ID: docker://3f83538dd36d9e747bc9d9ccff7e517596a1c66702da250c4aee88c3d1fc88d1
Image: busybox:latest
Image ID: docker-pullable://busybox@sha256:95cf004f559831017cdf4628aaf1bb30133677be8702a8c5f2994629f637a209
Port: <none>
Host Port: <none>
Command:
/bin/sh
-c
while true; do echo $$(date) >> /data/index.html; sleep 2; done
State: Running
Started: Tue, 23 Jun 2020 22:05:22 +0800
Ready: True
Restart Count: 0
Environment: <none>
Mounts:
/data/ from html (rw)
/var/run/secrets/kubernetes.io/serviceaccount from default-token-jrx89 (ro)
Conditions:
Type Status
Initialized True
Ready False
ContainersReady False
PodScheduled True
Volumes:
html:
Type: EmptyDir (a temporary directory that shares a pod's lifetime)
Medium:
default-token-jrx89:
Type: Secret (a volume populated by a Secret)
SecretName: default-token-jrx89
Optional: false
QoS Class: BestEffort
Node-Selectors: <none>
Tolerations: node.kubernetes.io/not-ready:NoExecute for 300s
node.kubernetes.io/unreachable:NoExecute for 300s
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Scheduled 41s default-scheduler Successfully assigned default/pod-demo to node02.smoke.com
Normal Pulled 40s kubelet, node02.smoke.com Container image "busybox:latest" already present on machine
Normal Created 40s kubelet, node02.smoke.com Created container
Normal Started 40s kubelet, node02.smoke.com Started container
Normal Pulled 26s (x3 over 40s) kubelet, node02.smoke.com Container image "busybox:latest" already present on machine
Normal Created 26s (x3 over 40s) kubelet, node02.smoke.com Created container
Normal Started 26s (x3 over 40s) kubelet, node02.smoke.com Started container
Warning BackOff 10s (x4 over 38s) kubelet, node02.smoke.com Back-off restarting failed container
[root@master volumes]# kubectl delete -f pod-vol-demo.yaml
[root@master volumes]# vim pod-vol-demo.yaml
kind: Pod
metadata:
name: pod-demo
namespace: default
labels:
app: myapp
tier: frontend
annotations:
smoke.com/created-by: "cluster admin"
spec:
containers:
- name: httpd
image: busybox:latest
imagePullPolicy: IfNotPresent
command: ['/bin/httpd','-f','-h /data/web/html']
ports:
- name: http
containerPort: 80
volumeMounts:
- name: html
mountPath: /data/web/html/
- name: busybox
image: busybox:latest
imagePullPolicy: IfNotPresent
volumeMounts:
- name: html
mountPath: /data/
command: ['/bin/sh','-c']
args:
- 'while true; do echo $(date) >> /data/index.html; sleep 2; done'
volumes:
- name: html
emptyDir: {}
[root@master volumes]# kubectl apply -f pod-vol-demo.yaml
[root@master volumes]# kubectl get pods
NAME READY STATUS RESTARTS AGE
myapp-deploy-67f6f6b4dc-642hc 1/1 Running 0 2d
myapp-deploy-67f6f6b4dc-d4cpv 1/1 Running 0 2d
myapp-deploy-67f6f6b4dc-rqj8g 1/1 Running 0 2d
pod-demo 1/2 CrashLoopBackOff 1 11s
tomcat-deploy-588c79d48d-6r8dw 1/1 Running 0 23h
tomcat-deploy-588c79d48d-cjlj9 1/1 Running 0 23h
tomcat-deploy-588c79d48d-r7nfq 1/1 Running 0 23h
[root@master volumes]# kubectl delete -f pod-vol-demo.yaml
[root@master volumes]# vim pod-vol-demo.yaml
apiVersion: v1
kind: Pod
metadata:
name: pod-demo
namespace: default
labels:
app: myapp
tier: frontend
annotations:
smoke.com/created-by: "cluster admin"
spec:
containers:
- name: httpd
image: busybox:latest
imagePullPolicy: IfNotPresent
command: ['/bin/httpd','-f','-h /data/web/html']
ports:
- name: http
containerPort: 80
volumeMounts:
- name: html
mountPath: /data/web/html/
- name: busybox
image: busybox:latest
imagePullPolicy: IfNotPresent
volumeMounts:
- name: html
mountPath: /data/
command: ['/bin/sh','-c']
args:
- while true; do echo $(date) >> /data/index.html; sleep 2; done
volumes:
- name: html
emptyDir: {}
[root@master volumes]# kubectl apply -f pod-vol-demo.yaml
[root@master volumes]# kubectl get pods
NAME READY STATUS RESTARTS AGE
myapp-deploy-67f6f6b4dc-642hc 1/1 Running 0 3d
myapp-deploy-67f6f6b4dc-d4cpv 1/1 Running 0 3d
myapp-deploy-67f6f6b4dc-rqj8g 1/1 Running 0 3d
pod-demo 1/2 CrashLoopBackOff 1 15s
tomcat-deploy-588c79d48d-6r8dw 1/1 Running 0 1d
tomcat-deploy-588c79d48d-cjlj9 1/1 Running 0 1d
tomcat-deploy-588c79d48d-r7nfq 1/1 Running 0 1d
[root@master volumes]# kubectl delete -f pod-vol-demo.yaml
[root@master volumes]# vim pod-vol-demo.yaml
apiVersion: v1
kind: Pod
metadata:
name: pod-demo
namespace: default
labels:
app: myapp
tier: frontend
annotations:
smoke.com/created-by: "cluster admin"
spec:
containers:
- name: myapp
image: ikubernetes/myapp:v1
imagePullPolicy: IfNotPresent
ports:
- name: http
containerPort: 80
volumeMounts:
- name: html
mountPath: /usr/share/nginx/html/
- name: busybox
image: busybox:latest
imagePullPolicy: IfNotPresent
volumeMounts:
- name: html
mountPath: /data/
command: ["/bin/sh"]
args: ["-c", "while true; do echo $(date) >> /data/index.html; sleep 2; done"]
volumes:
- name: html
emptyDir: {}
[root@master volumes]# kubectl apply -f pod-vol-demo.yaml
[root@master volumes]# kubectl get pods
NAME READY STATUS RESTARTS AGE
myapp-deploy-67f6f6b4dc-642hc 1/1 Running 0 3d
myapp-deploy-67f6f6b4dc-d4cpv 1/1 Running 0 3d
myapp-deploy-67f6f6b4dc-rqj8g 1/1 Running 0 3d
pod-demo 2/2 Running 0 37s
tomcat-deploy-588c79d48d-6r8dw 1/1 Running 0 1d
tomcat-deploy-588c79d48d-cjlj9 1/1 Running 0 1d
tomcat-deploy-588c79d48d-r7nfq 1/1 Running 0 1d
[root@master volumes]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE
myapp-deploy-67f6f6b4dc-642hc 1/1 Running 0 3d 10.244.1.35 node01.smoke.com
myapp-deploy-67f6f6b4dc-d4cpv 1/1 Running 0 3d 10.244.2.33 node02.smoke.com
myapp-deploy-67f6f6b4dc-rqj8g 1/1 Running 0 3d 10.244.2.32 node02.smoke.com
pod-demo 2/2 Running 0 2m 10.244.2.41 node02.smoke.com
tomcat-deploy-588c79d48d-6r8dw 1/1 Running 0 1d 10.244.1.37 node01.smoke.com
tomcat-deploy-588c79d48d-cjlj9 1/1 Running 0 1d 10.244.1.36 node01.smoke.com
tomcat-deploy-588c79d48d-r7nfq 1/1 Running 0 1d 10.244.2.34 node02.smoke.com
[root@master volumes]# curl 10.244.2.41
Wed Jun 24 13:29:47 UTC 2020
Wed Jun 24 13:29:49 UTC 2020
Wed Jun 24 13:29:51 UTC 2020
Wed Jun 24 13:29:53 UTC 2020
Wed Jun 24 13:29:55 UTC 2020
Wed Jun 24 13:29:57 UTC 2020
[root@master volumes]# kubectl delete -f pod-vol-demo.yaml
[root@master volumes]# kubectl explain pods.spec.volumes.hostPath
[root@master volumes]# kubectl explain pods.spec.volumes.hostPath.type
[root@master volumes]# vim pod-hostpath-vol.yaml
apiVersion: v1
kind: Pod
metadata:
name: pod-vol-hostpath
namespace: default
spec:
containers:
- name: myapp
image: ikubernetes/myapp:v1
volumeMounts:
- name: html
mountPath: /usr/share/nginx/html/
volumes:
- name: html
hostPath:
path: /data/pod/volume1
type: DirectoryOrCreate
node01:
[root@node01 ~]# mkdir -pv /data/pod/volume1 [root@node01 ~]# vim /data/pod/volume1/index.html node01.smoke.com
node02:
[root@node02 ~]# mkdir -pv /data/pod/volume1 [root@node02 ~]# vim /data/pod/volume1/index.html node02.smoke.com
master:
[root@master volumes]# kubectl apply -f pod-hostpath-vol.yaml [root@master volumes]# kubectl get pods NAME READY STATUS RESTARTS AGE myapp-deploy-67f6f6b4dc-642hc 1/1 Running 0 3d myapp-deploy-67f6f6b4dc-d4cpv 1/1 Running 0 3d myapp-deploy-67f6f6b4dc-rqj8g 1/1 Running 0 3d pod-vol-hostpath 1/1 Running 0 59s tomcat-deploy-588c79d48d-6r8dw 1/1 Running 0 1d tomcat-deploy-588c79d48d-cjlj9 1/1 Running 0 1d tomcat-deploy-588c79d48d-r7nfq 1/1 Running 0 1d [root@master volumes]# kubectl get pods -o wide NAME READY STATUS RESTARTS AGE IP NODE myapp-deploy-67f6f6b4dc-642hc 1/1 Running 0 3d 10.244.1.35 node01.smoke.com myapp-deploy-67f6f6b4dc-d4cpv 1/1 Running 0 3d 10.244.2.33 node02.smoke.com myapp-deploy-67f6f6b4dc-rqj8g 1/1 Running 0 3d 10.244.2.32 node02.smoke.com pod-vol-hostpath 1/1 Running 0 1m 10.244.2.42 node02.smoke.com tomcat-deploy-588c79d48d-6r8dw 1/1 Running 0 1d 10.244.1.37 node01.smoke.com tomcat-deploy-588c79d48d-cjlj9 1/1 Running 0 1d 10.244.1.36 node01.smoke.com tomcat-deploy-588c79d48d-r7nfq 1/1 Running 0 1d 10.244.2.34 node02.smoke.com [root@master volumes]# curl 10.244.2.42 node02.smoke.com [root@master volumes]# kubectl delete -f pod-hostpath-vol.yaml [root@master volumes]# kubectl apply -f pod-hostpath-vol.yaml [root@master volumes]# kubectl get pods -o wide NAME READY STATUS RESTARTS AGE IP NODE myapp-deploy-67f6f6b4dc-642hc 1/1 Running 0 3d 10.244.1.35 node01.smoke.com myapp-deploy-67f6f6b4dc-d4cpv 1/1 Running 0 3d 10.244.2.33 node02.smoke.com myapp-deploy-67f6f6b4dc-rqj8g 1/1 Running 0 3d 10.244.2.32 node02.smoke.com pod-vol-hostpath 1/1 Running 0 30s 10.244.2.43 node02.smoke.com tomcat-deploy-588c79d48d-6r8dw 1/1 Running 0 1d 10.244.1.37 node01.smoke.com tomcat-deploy-588c79d48d-cjlj9 1/1 Running 0 1d 10.244.1.36 node01.smoke.com tomcat-deploy-588c79d48d-r7nfq 1/1 Running 0 1d 10.244.2.34 node02.smoke.com [root@master volumes]# curl 10.244.2.43 node02.smoke.com
volumes:
主机名:volumes.smoke.com
操作系统:centos 7.5
内核版本:3.10.0-862.el7.x86_64
kubernetes版本: v1.11.1
docker版本:17.03.3.ce-1.el7
网卡1:vmnet8 172.20.0.76/16
使用nfs存储卷
volumes:
[root@stor01 ~]# hostname
stor01.smoke.com
[root@stor01 ~]# ip addr show
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
link/ether 00:0c:29:58:e4:97 brd ff:ff:ff:ff:ff:ff
inet 172.20.0.76/24 brd 172.20.0.255 scope global noprefixroute ens33
valid_lft forever preferred_lft forever
inet6 fe80::90c4:7ad5:b28a:b7a/64 scope link noprefixroute
valid_lft forever preferred_lft forever
[root@stor01 ~]# ip route show
default via 172.20.0.2 dev ens33 proto static metric 100
172.20.0.0/24 dev ens33 proto kernel scope link src 172.20.0.76 metric 100
[root@stor01 ~]# vim /etc/hosts
172.20.0.70 master.smoke.com master
172.20.0.66 node01.smoke.com node01
172.20.0.67 node02.smoke.com node02
172.20.0.76 stor01.smoke.com stor01
[root@stor01 ~]# ntpdate ntp1.aliyun.com
[root@stor01 ~]# crontab -l
*/5 * * * * /usr/sbin/ntpdate ntp1.aliyun.com &> /dev/null
[root@stor01 ~]# setenforce 0
[root@stor01 ~]# vim /etc/selinux/config
SELINUX=permissive
[root@stor01 ~]# systemctl stop firewalld
[root@stor01 ~]# systemctl disable firewalld
master:
[root@master ~]# vim /etc/hosts 172.20.0.70 master.smoke.com master 172.20.0.66 node01.smoke.com node01 172.20.0.67 node02.smoke.com node02 172.20.0.76 stor01.smoke.com stor01
node01:
[root@node01 ~]# vim /etc/hosts 172.20.0.70 master.smoke.com master 172.20.0.66 node01.smoke.com node01 172.20.0.67 node02.smoke.com node02 172.20.0.76 stor01.smoke.com stor01
node02:
[root@node02 ~]# vim /etc/hosts 172.20.0.70 master.smoke.com master 172.20.0.66 node01.smoke.com node01 172.20.0.67 node02.smoke.com node02 172.20.0.76 stor01.smoke.com stor01
volumes:
[root@stor01 ~]# yum -y install nfs-utils [root@stor01 ~]# mkdir -pv /data/volumes [root@stor01 ~]# vim /etc/exports /data/volumes 172.20.0.0/16(rw,no_root_squash) [root@stor01 ~]# systemctl start nfs [root@stor01 ~]# netstat -tnlp Active Internet connections (only servers) Proto Recv-Q Send-Q Local Address Foreign Address State PID/Program name tcp 0 0 0.0.0.0:41376 0.0.0.0:* LISTEN 21440/rpc.statd tcp 0 0 0.0.0.0:2049 0.0.0.0:* LISTEN - tcp 0 0 0.0.0.0:33353 0.0.0.0:* LISTEN - tcp 0 0 0.0.0.0:111 0.0.0.0:* LISTEN 21443/rpcbind tcp 0 0 0.0.0.0:20048 0.0.0.0:* LISTEN 21461/rpc.mountd tcp 0 0 0.0.0.0:22 0.0.0.0:* LISTEN 1065/sshd tcp 0 0 127.0.0.1:25 0.0.0.0:* LISTEN 1403/master tcp6 0 0 :::2049 :::* LISTEN - tcp6 0 0 :::33070 :::* LISTEN - tcp6 0 0 :::45422 :::* LISTEN 21440/rpc.statd tcp6 0 0 :::111 :::* LISTEN 21443/rpcbind tcp6 0 0 :::20048 :::* LISTEN 21461/rpc.mountd tcp6 0 0 :::22 :::* LISTEN 1065/sshd tcp6 0 0 ::1:25 :::* LISTEN 1403/master
node01:
[root@node01 ~]# yum -y install nfs-utils
node02:
[root@node02 ~]# yum -y install nfs-utils [root@node02 ~]# mount -t nfs stor01:/data/volumes /mnt #测试挂载 stor01:/data/volumes on /mnt type nfs4 (rw,relatime,vers=4.1,rsize=262144,wsize=262144,namlen=255,hard,proto=tcp,port=0,timeo=600,retrans=2,sec=sys,clientaddr=172.20.0.67,local_lock=none,addr=172.20.0.76) [root@node02 ~]# umount /mnt
master:
[root@master volumes]# cp pod-hostpath-vol.yaml pod-vol-nfs.yaml
[root@master volumes]# kubectl explain pods.spec.volumes.nfs
[root@master volumes]# vim pod-vol-nfs.yaml
apiVersion: v1
kind: Pod
metadata:
name: pod-vol-nfs
namespace: default
spec:
containers:
- name: myapp
image: ikubernetes/myapp:v1
volumeMounts:
- name: html
mountPath: /usr/share/nginx/html/
volumes:
- name: html
nfs:
path: /data/volumes
server: stor01.smoke.com
[root@master volumes]# kubectl get pods
NAME READY STATUS RESTARTS AGE
myapp-deploy-67f6f6b4dc-642hc 1/1 Running 0 6d
myapp-deploy-67f6f6b4dc-d4cpv 1/1 Running 0 6d
myapp-deploy-67f6f6b4dc-rqj8g 1/1 Running 0 6d
pod-vol-hostpath 1/1 Running 0 3d
tomcat-deploy-588c79d48d-6r8dw 1/1 Running 0 5d
tomcat-deploy-588c79d48d-cjlj9 1/1 Running 0 5d
tomcat-deploy-588c79d48d-r7nfq 1/1 Running 0 5d
[root@master volumes]# kubectl apply -f pod-vol-nfs.yaml
[root@master volumes]# kubectl get pods
NAME READY STATUS RESTARTS AGE
myapp-deploy-67f6f6b4dc-642hc 1/1 Running 0 6d
myapp-deploy-67f6f6b4dc-d4cpv 1/1 Running 0 6d
myapp-deploy-67f6f6b4dc-rqj8g 1/1 Running 0 6d
pod-vol-hostpath 1/1 Running 0 3d
pod-vol-nfs 1/1 Running 0 1m
tomcat-deploy-588c79d48d-6r8dw 1/1 Running 0 5d
tomcat-deploy-588c79d48d-cjlj9 1/1 Running 0 5d
tomcat-deploy-588c79d48d-r7nfq 1/1 Running 0 5d
[root@master volumes]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE
myapp-deploy-67f6f6b4dc-642hc 1/1 Running 0 6d 10.244.1.35 node01.smoke.com
myapp-deploy-67f6f6b4dc-d4cpv 1/1 Running 0 6d 10.244.2.33 node02.smoke.com
myapp-deploy-67f6f6b4dc-rqj8g 1/1 Running 0 6d 10.244.2.32 node02.smoke.com
pod-vol-hostpath 1/1 Running 0 3d 10.244.2.43 node02.smoke.com
pod-vol-nfs 1/1 Running 0 1m 10.244.2.44 node02.smoke.com
tomcat-deploy-588c79d48d-6r8dw 1/1 Running 0 5d 10.244.1.37 node01.smoke.com
tomcat-deploy-588c79d48d-cjlj9 1/1 Running 0 5d 10.244.1.36 node01.smoke.com
tomcat-deploy-588c79d48d-r7nfq 1/1 Running 0 5d 10.244.2.34 node02.smoke.com
stor01:
[root@stor01 ~]# cd /data/volumes/ [root@stor01 volumes]# ls [root@stor01 volumes]# vim index.html <h1>NFS stor01</h1>
master:
[root@master volumes]# curl 10.244.2.44 <h1>NFS stor01</h1> [root@master volumes]# kubectl delete -f pod-vol-nfs.yaml
stor01:
[root@stor01 volumes]# ll 总用量 4 -rw-r--r--. 1 root root 20 6月 28 21:07 index.html
使用pvc存储卷
master:
[root@master volumes]# kubectl explain pods.spec.volumes.persistentVolumeClaim [root@master volumes]# kubectl explain pvc [root@master volumes]# kubectl explain pvc.spec
stor01:
[root@stor01 volumes]# mkdir v{1,2,3,4,5}
[root@stor01 volumes]# ls
index.html v1 v2 v3 v4 v5
[root@stor01 volumes]# vim /etc/exports
/data/volumes/v1 172.20.0.0/16(rw,no_root_squash)
/data/volumes/v2 172.20.0.0/16(rw,no_root_squash)
/data/volumes/v3 172.20.0.0/16(rw,no_root_squash)
/data/volumes/v4 172.20.0.0/16(rw,no_root_squash)
/data/volumes/v5 172.20.0.0/16(rw,no_root_squash)
[root@stor01 volumes]# exportfs -arv
exporting 172.20.0.0/16:/data/volumes/v5
exporting 172.20.0.0/16:/data/volumes/v4
exporting 172.20.0.0/16:/data/volumes/v3
exporting 172.20.0.0/16:/data/volumes/v2
exporting 172.20.0.0/16:/data/volumes/v1
[root@stor01 volumes]# showmount -e
Export list for stor01.smoke.com:
/data/volumes/v5 172.20.0.0/16
/data/volumes/v4 172.20.0.0/16
/data/volumes/v3 172.20.0.0/16
/data/volumes/v2 172.20.0.0/16
/data/volumes/v1 172.20.0.0/16
master:
[root@master volumes]# kubectl explain pv
[root@master volumes]# kubectl explain pv.spec
[root@master volumes]# kubectl explain pv.spec.nfs
[root@master volumes]# vim pv-demo.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: pv001
labels:
name: pv001
spec:
nfs:
path: /data/volumes/v1
server: stor01.smoke.com
accessModes: ["ReadWriteMany","ReadWriteOnce"]
capacity:
storage: 2Gi
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: pv002
labels:
name: pv002
spec:
nfs:
path: /data/volumes/v2
server: stor01.smoke.com
accessModes: ["ReadWriteOnce"]
capacity:
storage: 5Gi
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: pv003
labels:
name: pv003
spec:
nfs:
path: /data/volumes/v3
server: stor01.smoke.com
accessModes: ["ReadWriteMany","ReadWriteOnce"]
capacity:
storage: 20Gi
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: pv004
labels:
name: pv004
spec:
nfs:
path: /data/volumes/v4
server: stor01.smoke.com
accessModes: ["ReadWriteMany","ReadWriteOnce"]
capacity:
storage: 10Gi
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: pv005
labels:
name: pv005
spec:
nfs:
path: /data/volumes/v5
server: stor01.smoke.com
accessModes: ["ReadWriteMany","ReadWriteOnce"]
capacity:
storage: 10Gi
[root@master volumes]# kubectl apply -f pv-demo.yaml #RECLAIM POLICY保留策略,Retain保留
[root@master volumes]# kubectl get pv
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
pv001 2Gi RWO,RWX Retain Available 1m
pv002 5Gi RWO Retain Available 1m
pv003 20Gi RWO,RWX Retain Available 1m
pv004 10Gi RWO,RWX Retain Available 1m
pv005 10Gi RWO,RWX Retain Available 1m
[root@master volumes]# kubectl explain pvc.spec
[root@master volumes]# kubectl explain pods.spec.volumes.persistentVolumeClaim
[root@master volumes]# cp pod-vol-nfs.yaml pod-vol-pvc.yaml
[root@master volumes]# vim pod-vol-pvc.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: mypvc
namespace: default
spec:
accessModes: ["ReadWriteMany"]
resources:
requests:
storage: 6Gi
---
apiVersion: v1
kind: Pod
metadata:
name: pod-vol-pvc
namespace: default
spec:
containers:
- name: myapp
image: ikubernetes/myapp:v1
volumeMounts:
- name: html
mountPath: /usr/share/nginx/html/
volumes:
- name: html
persistentVolumeClaim:
claimName: mypvc
[root@master volumes]# kubectl apply -f pod-vol-pvc.yaml
[root@master volumes]# kubectl get pv
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
pv001 2Gi RWO,RWX Retain Available 23m
pv002 5Gi RWO Retain Available 23m
pv003 20Gi RWO,RWX Retain Available 23m
pv004 10Gi RWO,RWX Retain Bound default/mypvc 23m
pv005 10Gi RWO,RWX Retain Available 23m
[root@master volumes]# kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
mypvc Bound pv004 10Gi RWO,RWX 2m
[root@master volumes]# kubectl describe pods pod-vol-pvc
Mounts:
/usr/share/nginx/html/ from html (rw)
/var/run/secrets/kubernetes.io/serviceaccount from default-token-jrx89 (ro)
Volumes:
html:
Type: PersistentVolumeClaim (a reference to a PersistentVolumeClaim in the same namespace)
ClaimName: mypvc
ReadOnly: false
default-token-jrx89:
Type: Secret (a volume populated by a Secret)
SecretName: default-token-jrx89
Optional: false
[root@master volumes]# kubectl get pods
NAME READY STATUS RESTARTS AGE
myapp-deploy-67f6f6b4dc-642hc 1/1 Running 0 7d
myapp-deploy-67f6f6b4dc-d4cpv 1/1 Running 0 7d
myapp-deploy-67f6f6b4dc-rqj8g 1/1 Running 0 7d
pod-vol-hostpath 1/1 Running 0 4d
pod-vol-pvc 1/1 Running 0 8m
tomcat-deploy-588c79d48d-6r8dw 1/1 Running 0 6d
tomcat-deploy-588c79d48d-cjlj9 1/1 Running 0 6d
tomcat-deploy-588c79d48d-r7nfq 1/1 Running 0 6d
浙公网安备 33010602011771号