节点选择器:nodeSelector, nodeName

节点亲和调度:nodeAffinity

 

taint的effect定义i对Pod排斥效果:

  NoSchedule: 仅影响调度过程,对现存的Pod对象不产生影响;

  NoExecute: 即影响调度过程,也影响现存的Pod对象;不容忍的Pod对象将被驱逐;

  PreferNoSchedule:

 

master:

[root@master ~]# kubectl explain pods.spec.nodeSelector
[root@master manifests]# mkdir schedule
[root@master manifests]# cd schedule/
[root@master schedule]# cp ../pod-demo.yaml ./
[root@master schedule]# vim pod-demo.yaml 
apiVersion: v1
kind: Pod
metadata:
  name: pod-demo
  namespace: default
  labels:
    app: myapp
    tier: frontend
  annotations:
    smoke.com/created-by: "cluster admin"
spec:
  containers:
  - name: myapp
    image: ikubernetes/myapp:v1
  nodeSelector:
    disktype: ssd
[root@master schedule]# kubectl get pods
[root@master schedule]# kubectl apply -f pod-demo.yaml 
[root@master schedule]# kubectl get pods
NAME       READY     STATUS    RESTARTS   AGE
pod-demo   1/1       Running   0          4m
[root@master schedule]# kubectl get nodes --show-labels
NAME               STATUS    ROLES     AGE       VERSION   LABELS
master.smoke.com   Ready     master    51d       v1.11.1   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/hostname=master.smoke.com,node-role.kubernetes.io/master=
node01.smoke.com   Ready     <none>    50d       v1.11.1   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,disktype=ssd,kubernetes.io/hostname=node01.smoke.com    #disktype=ssd标签,因为此前给node01打过标签
为disktype=ssd
node02.smoke.com   Ready     <none>    50d       v1.11.1   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/hostname=node02.smoke.com
[root@master schedule]# kubectl get pods -o wide
NAME       READY     STATUS    RESTARTS   AGE       IP           NODE
pod-demo   1/1       Running   0          8m        10.244.1.3   node01.smoke.com
[root@master schedule]# kubectl delete -f pod-demo.yaml
[root@master schedule]# vim pod-demo.yaml 
apiVersion: v1
kind: Pod
metadata:
  name: pod-demo
  namespace: default
  labels:
    app: myapp
    tier: frontend
  annotations:
    smoke.com/created-by: "cluster admin"
spec:
  containers:
  - name: myapp
    image: ikubernetes/myapp:v1
  nodeSelector:
    disktype: harddisk
[root@master schedule]# kubectl apply -f pod-demo.yaml 
[root@master schedule]# kubectl get pods -o wide    #Pending状态因为调度无法成功
NAME       READY     STATUS    RESTARTS   AGE       IP        NODE
pod-demo   0/1       Pending   0          2m        <none>    <none>
[root@master schedule]# kubectl describe pods pod-demo
Events:
  Type     Reason            Age                 From               Message
  ----     ------            ----                ----               -------
  Warning  FailedScheduling  2m (x183 over 12m)  default-scheduler  0/3 nodes are available: 3 node(s) didn't match node selector.
[root@master schedule]# kubectl label nodes node02.smoke.com disktype=harddisk
[root@master schedule]# kubectl get pods -o wide
NAME       READY     STATUS    RESTARTS   AGE       IP           NODE
pod-demo   1/1       Running   0          22h       10.244.2.3   node02.smoke.com
[root@master schedule]# kubectl delete -f pod-demo.yaml
[root@master schedule]# kubectl explain pods.spec.affinity
[root@master schedule]# kubectl explain pods.spec.affinity.nodeAffinity
[root@master schedule]# kubectl explain pods.spec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution
[root@master schedule]# kubectl explain pods.spec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms
[root@master schedule]# kubectl explain pods.spec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms.matchFields
[root@master schedule]# scp pod-demo.yaml pod-nodeaffinity-demo.yaml
[root@master schedule]# vim pod-nodeaffinity-demo.yaml 
apiVersion: v1
kind: Pod
metadata:
  name: pod-node-affinity-demo
  namespace: default
  labels:
    app: myapp
    tier: frontend
  annotations:
    smoke.com/created-by: "cluster admin"
spec:
  containers:
  - name: myapp
    image: ikubernetes/myapp:v1
  affinity:
    nodeAffinity:
      requiredDuringSchedulingIgnoredDuringExecution:
        nodeSelectorTerms:
        - matchExpressions:
          - key: zone
            operator: In
            values:
            - foo
            - bar
[root@master schedule]# kubectl apply -f pod-nodeaffinity-demo.yaml 
[root@master schedule]# kubectl get pods 
NAME                     READY     STATUS    RESTARTS   AGE
pod-node-affinity-demo   0/1       Pending   0          29s
[root@master schedule]# vim pod-nodeaffinity-demo.yaml 
apiVersion: v1
kind: Pod
metadata:
  name: pod-node-affinity-demo
  namespace: default
  labels:
    app: myapp
    tier: frontend
  annotations:
    smoke.com/created-by: "cluster admin"
spec:
  containers:
  - name: myapp
    image: ikubernetes/myapp:v1
  affinity:
    nodeAffinity:
      preferredDuringSchedulingIgnoredDuringExecution:
        nodeSelectorTerms:
        - matchExpressions:
          - key: zone
            operator: In
            values:
            - foo
            - bar
[root@master schedule]# kubectl delete -f pod-nodeaffinity-demo.yaml 
[root@master schedule]# kubectl apply -f pod-nodeaffinity-demo.yaml 
error: error validating "pod-nodeaffinity-demo.yaml": error validating data: ValidationError(Pod.spec.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution): invalid type for io.k8s.api.core.v1.
NodeAffinity.preferredDuringSchedulingIgnoredDuringExecution: got "map", expected "array"; if you choose to ignore these errors, turn validation off with --validate=false
[root@master schedule]# kubectl explain pods.spec.affinity.nodeAffinity
[root@master schedule]# kubectl explain pods.spec.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution.preference
[root@master schedule]# vim pod-nodeaffinity-demo.yaml 
apiVersion: v1
kind: Pod
metadata:
  name: pod-node-affinity-demo
  namespace: default
  labels:
    app: myapp
    tier: frontend
  annotations:
    smoke.com/created-by: "cluster admin"
spec:
  containers:
  - name: myapp
    image: ikubernetes/myapp:v1
  affinity:
    nodeAffinity:
      requiredDuringSchedulingIgnoredDuringExecution:
        nodeSelectorTerms:
        - matchExpressions:
          - key: zone
            operator: In
            values:
            - foo
            - bar
[root@master schedule]# cp pod-nodeaffinity-demo.yaml pod-nodeaffinity-demo-2.yaml 
[root@master schedule]# vim pod-nodeaffinity-demo-2.yaml 
apiVersion: v1
kind: Pod
metadata:
  name: pod-node-affinity-demo-2
  namespace: default
  labels:
    app: myapp
    tier: frontend
  annotations:
    smoke.com/created-by: "cluster admin"
spec:
  containers:
  - name: myapp
    image: ikubernetes/myapp:v1
  affinity:
    nodeAffinity:
      preferredDuringSchedulingIgnoredDuringExecution:
      - preference:
          matchExpressions:
          - key: zone
            operator: In
            values:
            - foo
            - bar
        weight: 60
[root@master schedule]# kubectl apply -f pod-nodeaffinity-demo-2.yaml 
[root@master schedule]# kubectl get pods
NAME                       READY     STATUS    RESTARTS   AGE
pod-node-affinity-demo-2   1/1       Running   0          1m
[root@master schedule]# kubectl describe pods pod-node-affinity-demo-2
[root@master schedule]# cat pod-nodeaffinity-demo.yaml 
apiVersion: v1
kind: Pod
metadata:
  name: pod-node-affinity-demo
  namespace: default
  labels:
    app: myapp
    tier: frontend
  annotations:
    smoke.com/created-by: "cluster admin"
spec:
  containers:
  - name: myapp
    image: ikubernetes/myapp:v1
  affinity:
    nodeAffinity:
      requiredDuringSchedulingIgnoredDuringExecution:
        nodeSelectorTerms:
        - matchExpressions:
          - key: zone
            operator: In
            values:
            - foo
            - bar
[root@master schedule]# kubectl delete -f pod-nodeaffinity-demo-2.yaml 
[root@master schedule]# kubectl explain pods.spec.affinity.podAffinity
[root@master schedule]# kubectl explain pods.spec.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution
[root@master schedule]# kubectl explain pods.spec.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.labelSelector
[root@master schedule]# cp pod-demo.yaml pod-required-affinity-demo.yaml
[root@master schedule]# kubectl get nodes --show-labels
NAME               STATUS    ROLES     AGE       VERSION   LABELS
master.smoke.com   Ready     master    54d       v1.11.1   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/hostname=master.smoke.com,node-role.kubernetes.io/master=
node01.smoke.com   Ready     <none>    53d       v1.11.1   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,disktype=ssd,kubernetes.io/hostname=node01.smoke.com
node02.smoke.com   Ready     <none>    53d       v1.11.1   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,disktype=harddisk,kubernetes.io/hostname=node02.smoke.com
[root@master schedule]# vim pod-required-affinity-demo.yaml 
apiVersion: v1
kind: Pod
metadata:
  name: pod-first
  labels:
    app: myapp
    tier: frontend
spec:
  containers:
  - name: myapp
    image: ikubernetes/myapp:v1
---
apiVersion: v1
kind: Pod
metadata:
  name: pod-second
  labels:
    app: backend
    tier: db
spec:
  containers:
  - name: busybox
    image: busybox:latest
    imagePullPolicy: IfNotPresent
    command: ["sh","-c","sleep 3600"]
  affinity:
    podAffinity:
      requiredDuringSchedulingIgnoredDuringExecution:
      - labelSelector:
          matchExpressions:
          - {key: app, operator: In, values: ["myapp"]}
        topologyKey: kubernetes.io/hostname
[root@master schedule]# kubectl apply -f pod-required-affinity-demo.yaml 
[root@master schedule]# kubectl get pods
NAME                       READY     STATUS    RESTARTS   AGE
pod-first                  1/1       Running   0          31s
pod-node-affinity-demo-2   1/1       Running   0          1d
pod-second                 1/1       Running   0          31s
[root@master schedule]# kubectl get pods -owide    #都在node01上
NAME                       READY     STATUS    RESTARTS   AGE       IP            NODE
pod-first                  1/1       Running   0          52s       10.244.1.9    node01.smoke.com
pod-node-affinity-demo-2   1/1       Running   0          1d        10.244.2.4    node02.smoke.com
pod-second                 1/1       Running   0          52s       10.244.1.10   node01.smoke.com
[root@master schedule]# kubectl describe pods pod-second
Events:
  Type    Reason     Age   From                       Message
  ----    ------     ----  ----                       -------
  Normal  Scheduled  2m    default-scheduler          Successfully assigned default/pod-second to node01.smoke.com
  Normal  Pulled     2m    kubelet, node01.smoke.com  Container image "busybox:latest" already present on machine
  Normal  Created    2m    kubelet, node01.smoke.com  Created container
  Normal  Started    2m    kubelet, node01.smoke.com  Started container
[root@master schedule]#  kubectl explain pods.spec.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.labelSelector
[root@master schedule]#  kubectl explain pods.spec.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution
[root@master schedule]# kubectl get pods
NAME                       READY     STATUS    RESTARTS   AGE
pod-first                  1/1       Running   0          7m
pod-node-affinity-demo-2   1/1       Running   0          1d
pod-second                 1/1       Running   0          7m
[root@master schedule]# kubectl delete -f pod-required-affinity-demo.yaml 
[root@master schedule]# cp pod-required-affinity-demo.yaml pod-required-anti-affinity-demo.yaml 
[root@master schedule]# vim pod-required-anti-affinity-demo.yaml 
apiVersion: v1
kind: Pod
metadata:
  name: pod-first
  labels:
    app: myapp
    tier: frontend
spec:
  containers:
  - name: myapp
    image: ikubernetes/myapp:v1
---
apiVersion: v1
kind: Pod
metadata:
  name: pod-second
  labels:
    app: backend
    tier: db
spec:
  containers:
  - name: busybox
    image: busybox:latest
    imagePullPolicy: IfNotPresent
    command: ["sh","-c","sleep 3600"]
  affinity:
    podAntiAffinity:
      requiredDuringSchedulingIgnoredDuringExecution:
      - labelSelector:
          matchExpressions:
          - {key: app, operator: In, values: ["myapp"]}
        topologyKey: kubernetes.io/hostname
[root@master schedule]# kubectl delete -f pod-nodeaffinity-demo-2.yaml 
[root@master schedule]# kubectl apply -f pod-required-anti-affinity-demo.yaml 
[root@master schedule]# kubectl get pods
NAME         READY     STATUS    RESTARTS   AGE
pod-first    1/1       Running   0          1m
pod-second   1/1       Running   0          1m
[root@master schedule]# kubectl get pods -o wide    #两个pod一定不再一个node
NAME         READY     STATUS    RESTARTS   AGE       IP            NODE
pod-first    1/1       Running   0          2m        10.244.2.6    node02.smoke.com
pod-second   1/1       Running   0          2m        10.244.1.15   node01.smoke.com
[root@master schedule]# kubectl delete -f pod-required-anti-affinity-demo.yaml 
[root@master schedule]# kubectl get nodes --show-labels
NAME               STATUS    ROLES     AGE       VERSION   LABELS
master.smoke.com   Ready     master    54d       v1.11.1   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/hostname=master.smoke.com,node-role.kubernetes.io/master=
node01.smoke.com   Ready     <none>    54d       v1.11.1   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,disktype=ssd,kubernetes.io/hostname=node01.smoke.com
node02.smoke.com   Ready     <none>    54d       v1.11.1   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,disktype=harddisk,kubernetes.io/hostname=node02.smoke.com
[root@master schedule]# kubectl label nodes node01.smoke.com zone=foo
[root@master schedule]# kubectl label nodes node02.smoke.com zone=foo
[root@master schedule]# vim pod-required-anti-affinity-demo.yaml 
apiVersion: v1
kind: Pod
metadata:
  name: pod-first
  labels:
    app: myapp
    tier: frontend
spec:
  containers:
  - name: myapp
    image: ikubernetes/myapp:v1
---
apiVersion: v1
kind: Pod
metadata:
  name: pod-second
  labels:
    app: backend
    tier: db
spec:
  containers:
  - name: busybox
    image: busybox:latest
    imagePullPolicy: IfNotPresent
    command: ["sh","-c","sleep 3600"]
  affinity:
    podAntiAffinity:
      requiredDuringSchedulingIgnoredDuringExecution:
      - labelSelector:
          matchExpressions:
          - {key: app, operator: In, values: ["myapp"]}
        topologyKey: zone
[root@master schedule]# kubectl apply -f pod-required-anti-affinity-demo.yaml 
[root@master schedule]# kubectl get pods -o wide
NAME         READY     STATUS    RESTARTS   AGE       IP            NODE
pod-first    1/1       Running   0          8s        10.244.1.16   node01.smoke.com
pod-second   0/1       Pending   0          8s        <none>        <none>
[root@master schedule]# kubectl delete -f pod-required-anti-affinity-demo.yaml 
[root@master schedule]# kubectl get nodes node01.smoke.com -o yaml
spec:
  podCIDR: 10.244.1.0/24
[root@master schedule]# kubectl explain node.spec
[root@master schedule]# kubectl explain node.spec.taints
[root@master schedule]# kubectl describe node master.smoke.com
Taints:             node-role.kubernetes.io/master:NoSchedule
[root@master schedule]# kubectl get pods -n kube-system
NAME                                       READY     STATUS    RESTARTS   AGE
canal-997tb                                3/3       Running   0          15d
canal-j6t4j                                3/3       Running   0          15d
canal-jxq25                                3/3       Running   0          15d
coredns-78fcdf6894-bt5g6                   1/1       Running   1          55d
coredns-78fcdf6894-zzbll                   1/1       Running   1          55d
etcd-master.smoke.com                      1/1       Running   1          55d
kube-apiserver-master.smoke.com            1/1       Running   1          55d
kube-controller-manager-master.smoke.com   1/1       Running   1          55d
kube-flannel-ds-7f544                      1/1       Running   0          18d
kube-flannel-ds-mzpzm                      1/1       Running   0          18d
kube-flannel-ds-zb2nc                      1/1       Running   0          18d
kube-proxy-5jppm                           1/1       Running   1          54d
kube-proxy-7lg96                           1/1       Running   1          55d
kube-proxy-qmrq7                           1/1       Running   1          54d
kube-scheduler-master.smoke.com            1/1       Running   1          55d
kubernetes-dashboard-6948bdb78-7rkwz       1/1       Running   0          24d
[root@master schedule]# kubectl describe pods kube-apiserver-master.smoke.com -n kube-system
Tolerations:       :NoExecute
[root@master schedule]# kubectl get pods -n kube-system
NAME                                       READY     STATUS    RESTARTS   AGE
canal-997tb                                3/3       Running   0          15d
canal-j6t4j                                3/3       Running   0          15d
canal-jxq25                                3/3       Running   0          15d
coredns-78fcdf6894-bt5g6                   1/1       Running   1          55d
coredns-78fcdf6894-zzbll                   1/1       Running   1          55d
etcd-master.smoke.com                      1/1       Running   1          55d
kube-apiserver-master.smoke.com            1/1       Running   1          55d
kube-controller-manager-master.smoke.com   1/1       Running   1          55d
kube-flannel-ds-7f544                      1/1       Running   0          18d
kube-flannel-ds-mzpzm                      1/1       Running   0          18d
kube-flannel-ds-zb2nc                      1/1       Running   0          18d
kube-proxy-5jppm                           1/1       Running   1          54d
kube-proxy-7lg96                           1/1       Running   1          55d
kube-proxy-qmrq7                           1/1       Running   1          54d
kube-scheduler-master.smoke.com            1/1       Running   1          55d
kubernetes-dashboard-6948bdb78-7rkwz       1/1       Running   0          24d
[root@master schedule]# kubectl describe pods kube-flannel-ds-7f544 -n kube-system
Tolerations:     node-role.kubernetes.io/master:NoSchedule
                 node.kubernetes.io/disk-pressure:NoSchedule
                 node.kubernetes.io/memory-pressure:NoSchedule
                 node.kubernetes.io/not-ready:NoExecute
                 node.kubernetes.io/unreachable:NoExecute
[root@master schedule]# kubectl get nodes master.smoke.com -o yaml
  taints:
  - effect: NoSchedule
    key: node-role.kubernetes.io/master
[root@master schedule]# kubectl taint --help
[root@master schedule]# kubectl taint node node01.smoke.com node-type=production:NoSchedule 
[root@master schedule]# cp ../deploy-demo.yaml ./
[root@master schedule]# vim deploy-demo.yaml 
apiVersion: apps/v1
kind: Deployment
metadata:
  name: myapp-deploy
  namespace: default
spec:
  replicas: 3
  selector:
    matchLabels:
      app: myapp
      release: canary
  template:
    metadata:
      labels:
        app: myapp
        release: canary
    spec:
      containers:
      - name: myapp
        image: ikubernetes/myapp:v2
        ports:
        - name: http
          containerPort: 80
[root@master schedule]# kubectl apply -f deploy-demo.yaml 
[root@master schedule]# kubectl get pods -o wide
NAME                            READY     STATUS    RESTARTS   AGE       IP           NODE
myapp-deploy-67f6f6b4dc-fmh89   1/1       Running   0          26s       10.244.2.7   node02.smoke.com
myapp-deploy-67f6f6b4dc-tqdld   1/1       Running   0          26s       10.244.2.9   node02.smoke.com
myapp-deploy-67f6f6b4dc-wq2pj   1/1       Running   0          26s       10.244.2.8   node02.smoke.com
[root@master schedule]# kubectl taint node node02.smoke.com node-type=dev:NoExecute
[root@master schedule]# kubectl get pods -o wide
NAME                            READY     STATUS    RESTARTS   AGE       IP        NODE
myapp-deploy-67f6f6b4dc-c7dww   0/1       Pending   0          19s       <none>    <none>
myapp-deploy-67f6f6b4dc-d267n   0/1       Pending   0          19s       <none>    <none>
myapp-deploy-67f6f6b4dc-kpp6j   0/1       Pending   0          19s       <none>    <none>
[root@master schedule]# kubectl explain pods.spec.tolerations 
[root@master schedule]# vim deploy-demo.yaml 
apiVersion: apps/v1
kind: Deployment
metadata:
  name: myapp-deploy
  namespace: default
spec:
  replicas: 3
  selector:
    matchLabels:
      app: myapp
      release: canary
  template:
    metadata:
      labels:
        app: myapp
        release: canary
    spec:
      containers:
      - name: myapp
        image: ikubernetes/myapp:v2
        ports:
        - name: http
          containerPort: 80
      tolerations:
      - key: "node-type"
        operator: "Equal"
        value: "production"
        effect: "NoExecute"
        tolerationSeconds: 3600
[root@master schedule]# kubectl apply -f deploy-demo.yaml 
[root@master schedule]# kubectl get pods -o wide
NAME                            READY     STATUS    RESTARTS   AGE       IP        NODE
myapp-deploy-67f6f6b4dc-c7dww   0/1       Pending   0          10m       <none>    <none>
myapp-deploy-67f6f6b4dc-d267n   0/1       Pending   0          10m       <none>    <none>
myapp-deploy-67f6f6b4dc-kpp6j   0/1       Pending   0          10m       <none>    <none>
myapp-deploy-77fb48ff96-8jtjk   0/1       Pending   0          24s       <none>    <none>
[root@master schedule]# vim deploy-demo.yaml 
apiVersion: apps/v1
kind: Deployment
metadata:
  name: myapp-deploy
  namespace: default
spec:
  replicas: 3
  selector:
    matchLabels:
      app: myapp
      release: canary
  template:
    metadata:
      labels:
        app: myapp
        release: canary
    spec:
      containers:
      - name: myapp
        image: ikubernetes/myapp:v2
        ports:
        - name: http
          containerPort: 80
      tolerations:
      - key: "node-type"
        operator: "Equal"
        value: "production"
        effect: "NoSchedule"
[root@master schedule]# kubectl apply -f deploy-demo.yaml 
[root@master schedule]# kubectl get pods -o wide
NAME                            READY     STATUS    RESTARTS   AGE       IP            NODE
myapp-deploy-65cc47f858-bbzlm   1/1       Running   0          8s        10.244.1.18   node01.smoke.com
myapp-deploy-65cc47f858-r5kgf   1/1       Running   0          7s        10.244.1.19   node01.smoke.com
myapp-deploy-65cc47f858-vcs2k   1/1       Running   0          9s        10.244.1.17   node01.smoke.com
[root@master schedule]# vim deploy-demo.yaml 
apiVersion: apps/v1
kind: Deployment
metadata:
  name: myapp-deploy
  namespace: default
spec:
  replicas: 3
  selector:
    matchLabels:
      app: myapp
      release: canary
  template:
    metadata:
      labels:
        app: myapp
        release: canary
    spec:
      containers:
      - name: myapp
        image: ikubernetes/myapp:v2
        ports:
        - name: http
          containerPort: 80
      tolerations:
      - key: "node-type"
        operator: "Exists"
        value: ""
        effect: "NoSchedule"
[root@master schedule]# kubectl apply -f deploy-demo.yaml 

[root@master schedule]# kubectl get pods -o wide
NAME                            READY     STATUS    RESTARTS   AGE       IP            NODE
myapp-deploy-559f559bcc-6ctrb   1/1       Running   0          13s       10.244.1.20   node01.smoke.com
myapp-deploy-559f559bcc-http2   1/1       Running   0          11s       10.244.1.22   node01.smoke.com
myapp-deploy-559f559bcc-jcdtd   1/1       Running   0          12s       10.244.1.21   node01.smoke.com
[root@master schedule]# vim deploy-demo.yaml 
apiVersion: apps/v1
kind: Deployment
metadata:
  name: myapp-deploy
  namespace: default
spec:
  replicas: 3
  selector:
    matchLabels:
      app: myapp
      release: canary
  template:
    metadata:
      labels:
        app: myapp
        release: canary
    spec:
      containers:
      - name: myapp
        image: ikubernetes/myapp:v2
        ports:
        - name: http
          containerPort: 80
      tolerations:
      - key: "node-type"
        operator: "Exists"
        value: ""
        effect: ""
[root@master schedule]# kubectl apply -f deploy-demo.yaml 
[root@master schedule]# kubectl get pods -o wide
NAME                            READY     STATUS    RESTARTS   AGE       IP            NODE
myapp-deploy-5d9c6985f5-7sbdk   1/1       Running   0          15s       10.244.2.10   node02.smoke.com
myapp-deploy-5d9c6985f5-rcxvj   1/1       Running   0          12s       10.244.2.11   node02.smoke.com
myapp-deploy-5d9c6985f5-ssdf6   1/1       Running   0          13s       10.244.1.23   node01.smoke.com