## 污点和容忍度 - NoSchedule,preferNoScheduler,NoExecute
### node - 污点taints
```sh
# 查看当前node的污点
[22:32:45 root@master1 scheduler]#kubectl get nodes node1.noisedu.cn -o go-template={{.spec.taints}}
<no value>
# 给node1和node2一个env标签
[22:34:36 root@master1 scheduler]#kubectl get nodes --show-labels
NAME STATUS ROLES AGE VERSION LABELS
master1 Ready control-plane,master 35d v1.22.1 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=master1,kubernetes.io/os=linux,node-role.kubernetes.io/control-plane=,node-role.kubernetes.io/master=,node.kubernetes.io/exclude-from-external-load-balancers=
master2.noisedu.cn Ready control-plane,master 35d v1.22.1 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=master2.noisedu.cn,kubernetes.io/os=linux,node-role.kubernetes.io/control-plane=,node-role.kubernetes.io/master=,node.kubernetes.io/exclude-from-external-load-balancers=
master3.noisedu.cn Ready control-plane,master 35d v1.22.1 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=master3.noisedu.cn,kubernetes.io/os=linux,node-role.kubernetes.io/control-plane=,node-role.kubernetes.io/master=,node.kubernetes.io/exclude-from-external-load-balancers=
node1.noisedu.cn Ready <none> 35d v1.22.1 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,env=dev,kubernetes.io/arch=amd64,kubernetes.io/hostname=node1.noisedu.cn,kubernetes.io/os=linux
node2.noisedu.cn Ready <none> 35d v1.22.1 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,env=test,kubernetes.io/arch=amd64,kubernetes.io/hostname=node2.noisedu.cn,kubernetes.io/os=linux
# 开始测试
[22:36:14 root@master1 scheduler]#cat 14-scheduler-pod-taint-test.yaml
apiVersion: v1
kind: Pod
metadata:
name: pod-test
spec:
containers:
- name: pod-test
image: 10.0.0.55:80/mykubernetes/pod_test:v0.1
imagePullPolicy: IfNotPresent
nodeSelector:
env: dev
[22:37:17 root@master1 scheduler]#kubectl apply -f 14-scheduler-pod-taint-test.yaml
pod/pod-test created
[22:37:54 root@master1 scheduler]#kubectl get pod -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
pod-test 1/1 Running 0 6s 10.244.3.4 node1.noisedu.cn <none> <none>
# 无污点的情况下在node1成功创建
# 给node1添加污点
[22:40:11 root@master1 scheduler]#kubectl taint node node1.noisedu.cn node-type=production:NoSchedule
node/node1.noisedu.cn tainted
[22:40:37 root@master1 scheduler]#kubectl get nodes node1.noisedu.cn -o go-template={{.spec.taints}}
[map[effect:NoSchedule key:node-type value:production]]
# 重新创建pod
[22:41:12 root@master1 scheduler]#kubectl apply -f 14-scheduler-pod-taint-test.yaml
pod/pod-test created
[22:41:19 root@master1 scheduler]#kubectl get pod -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
pod-test 0/1 Pending 0 3s <none> <none> <none> <none>
[22:41:23 root@master1 scheduler]#kubectl describe pod pod-test
Name: pod-test
Namespace: default
Priority: 0
Node: <none>
Labels: <none>
Annotations: <none>
Status: Pending
IP:
IPs: <none>
Containers:
pod-test:
Image: 10.0.0.55:80/mykubernetes/pod_test:v0.1
Port: <none>
Host Port: <none>
Environment: <none>
Mounts:
/var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-gfbt4 (ro)
Conditions:
Type Status
PodScheduled False
Volumes:
kube-api-access-gfbt4:
Type: Projected (a volume that contains injected data from multiple sources)
TokenExpirationSeconds: 3607
ConfigMapName: kube-root-ca.crt
ConfigMapOptional: <nil>
DownwardAPI: true
QoS Class: BestEffort
Node-Selectors: env=dev
Tolerations: node.kubernetes.io/not-ready:NoExecute op=Exists for 300s
node.kubernetes.io/unreachable:NoExecute op=Exists for 300s
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Warning FailedScheduling 10s default-scheduler 0/5 nodes are available: 1 node(s) didn't match Pod's node affinity/selector, 1 node(s) had taint {node-type: production}, that the pod didn't tolerate, 3 node(s) had taint {node-role.kubernetes.io/master: }, that the pod didn't tolerate.
# 发现不成功,处于pending,因为有一个污点node-type: production
# 删除污点发现马上成功
[22:43:15 root@master1 scheduler]#kubectl get pod -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
pod-test 0/1 Pending 0 3s <none> <none> <none> <none>
[22:43:18 root@master1 scheduler]#kubectl taint node node1.noisedu.cn node-type-
node/node1.noisedu.cn untainted
[22:43:32 root@master1 scheduler]#kubectl get pod -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
pod-test 1/1 Running 0 19s 10.244.3.5 node1.noisedu.cn <none> <none>
```
### pod - 容忍度tolerations
```sh
# 污点实验上述证明pod如果未特质设置容忍度,则无法创建成功
# 我们给pod添加容忍度
[22:44:52 root@master1 scheduler]#cat 15-scheduler-pod-toleration-test.yaml
apiVersion: v1
kind: Pod
metadata:
name: pod-tol
spec:
containers:
- name: pod-test
image: 10.0.0.55:80/mykubernetes/pod_test:v0.1
imagePullPolicy: IfNotPresent
nodeSelector:
env: dev
tolerations:
- key: "node-type"
operator: "Equal"
value: "production"
effect: "NoSchedule"
[22:45:25 root@master1 scheduler]#kubectl apply -f 15-scheduler-pod-toleration-test.yaml
pod/pod-tol created
[22:52:04 root@master1 scheduler]#kubectl get pod -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
pod-tol 1/1 Running 0 5s 10.244.3.6 node1.noisedu.cn <none> <none>
# 由于创建指定容忍度能够容忍node1的污点,所以成功创建
# 但是如果立马添加污点是NoExecute会立马驱逐已经存在的pod
[22:52:09 root@master1 scheduler]#kubectl taint node node1.noisedu.cn diskfull=true:NoExecute
node/node1.noisedu.cn tainted
[22:54:37 root@master1 scheduler]#kubectl get pod -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
pod-tol 1/1 Terminating 0 2m35s 10.244.3.6 node1.noisedu.cn <none> <none>
[22:54:39 root@master1 scheduler]#kubectl get pod -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
pod-tol 1/1 Terminating 0 2m36s 10.244.3.6 node1.noisedu.cn <none> <none>
[22:54:40 root@master1 scheduler]#kubectl get pod -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
pod-tol 1/1 Terminating 0 2m41s 10.244.3.6 node1.noisedu.cn <none> <none>
[22:54:45 root@master1 scheduler]#kubectl get pod -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
pod-tol 1/1 Terminating 0 2m44s 10.244.3.6 node1.noisedu.cn <none> <none>
[22:54:48 root@master1 scheduler]#kubectl get pod -o wide
No resources found in default namespace.
```