资源配置清单:

  自主式Pod资源

  资源的清单格式:

    一级字段: apliVersion(group/version), kind, metadata(name, namespace, labels, annotations, ...), spec, status(只读)

  Pod资源:

    spec.containers <[]object>

  - name <string>

    image <string>

    imagePullPolicy <string>

        Always, Never, IfNotPresent

  修改镜像中的默认应用:

    command, args

    https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/

  标签:

    key=value

    key: 字母、数字、_、-、.

    value:可以为空,只能字母或数字开头及结尾,中间可使用

  标签选择器:

    等值关系:=,==,!=

  集合关系:

    KEY in (VALUE1,VALUE2,...)

    KEY notin (VALUE1,VALUE2,...)

    KEY

    !KEY

 

  许多资源支持内嵌字段定义其使用的标签选择器:
    matchLabels:直接给定键值
    matchExpressions:基于给定的表达式来定义使用标签选择器,{key:"KEY", operator:"OPERATOR",values:[VAL1,VAL2,...]}
      操作符:

        In, NotIn:values字段的值必须为非空列表;

        Exists, NotExists:values字段的值必须为空列表;

 

  nodeSelector <map[string]string>

    节点标签选择器,

  nodeName <string>

  annotations:

    与label不同的地方在于,它不能用于挑选资源对象,仅用于为对象提供“元数据”。

 

   Pod的生命周期:

    状态:Pending, Running, Failed, Succeeded, Unknown

    创建Pod:

    Pod生命周期中的重要行为:

      初始化容器

      容器探测:

        liveness

        readiness

  restartPolicy:

    Always, OnFailure, Never. Default to Always.

探针类型有三种:

ExecAction、TCPSocketAction、HTTPGetAction

 

apiVersion: v1

kind: Pod

metadata:

  name: liveness-exec-pod

  namespace: default

spec:

  containers:

  - name: liveness-exec-container

    image: busybox:latest

    imagePullPolicy: IfNotPresent

    command: ["/bin/sh","-c","touch /tmp/healthy; sleep 30; rm -f /tmp/healthy; sleep 3600"]

    livenessProbe:

      exec:

        command: ["test","-e","/tmp/healthy"]

      initialDelaySeconds: 1

      periodSeconds: 3

 

apiVersion: v1

kind: Pod

metadata:

  name: readiness-httpget-pod

  namespace: default

spec:

  containers

  - name: readiness-httpget-container

    image: ikubernetes/myapp:v1

    imagePullPolicy: IfNotPresent

    ports:

    - name: http

      containerPort: 80

    readinessProbe:

      httpGet:

        port: http

        path: /index.html

      initialDelaySeconds: 1

      periodSeconds: 3

 

master:

[root@master ~]# cd manifests/
[root@master manifests]# cat pod-demo.yaml
apiVersion: v1
kind: Pod
metadata:
  name: pod-demo
  namespace: default
  labels:
    app: myapp
    tier: frontend
spec:
  containers:
  - name: myapp
    image: ikubernetes/myapp:v1
  - name: busybox
    image: busybox:latest
    command:
    - "/bin/sh"
    - "-c"
    - "sleep 3600"
[root@master manifests]# kubectl explain pods.spec.containers
[root@master manifests]# docker image ls
REPOSITORY                                 TAG                 IMAGE ID            CREATED             SIZE
k8s.gcr.io/kube-proxy-amd64                v1.11.1             d5c25579d0ff        21 months ago       97.8MB
k8s.gcr.io/kube-controller-manager-amd64   v1.11.1             52096ee87d0e        21 months ago       155MB
k8s.gcr.io/kube-apiserver-amd64            v1.11.1             816332bd9d11        21 months ago       187MB
k8s.gcr.io/kube-scheduler-amd64            v1.11.1             272b3a60cd68        21 months ago       56.8MB
k8s.gcr.io/coredns                         1.1.3               b3b94275d97c        23 months ago       45.6MB
k8s.gcr.io/etcd-amd64                      3.2.18              b8df3b177be2        2 years ago         219MB
k8s.gcr.io/pause                           3.1                 da86e6ba6ca1        2 years ago         742kB
k8s.gcr.io/pause-amd64                     3.1                 da86e6ba6ca1        2 years ago         742kB
quay.io/coreos/flannel                     v0.9.1-amd64        2b736d06ca4c        2 years ago         51.3MB
[root@master manifests]# vim pod-demo.yaml
apiVersion: v1
kind: Pod
metadata:
  name: pod-demo
  namespace: default
  labels:
    app: myapp
    tier: frontend
spec:
  containers:
  - name: myapp
    image: ikubernetes/myapp:v1
  - name: busybox
    image: busybox:latest
    imagePullPolicy: IfNotPresent    #本地有就是用本地镜像,本地没有就去下载
    command:
    - "/bin/sh"
    - "-c"
    - "sleep 3600"
[root@master manifests]# kubectl explain pods.spec.containers.ports
[root@master manifests]# vim pod-demo.yaml
apiVersion: v1
kind: Pod
metadata:
  name: pod-demo
  namespace: default
  labels:
    app: myapp
    tier: frontend
spec:
  containers:
  - name: myapp
    image: ikubernetes/myapp:v1
    ports:
    - name: http
      containerPort: 80    #暴露端口
    - name: https
      containerPort: 443
  - name: busybox
    image: busybox:latest
    imagePullPolicy: IfNotPresent
    command:
    - "/bin/sh"
    - "-c"
    - "sleep 3600"
[root@master manifests]# kubectl explain pods.spec.containers
[root@master manifests]# kubectl create -f pod-demo.yaml
[root@master manifests]# kubectl get pods
NAME                          READY     STATUS    RESTARTS   AGE
client                        1/1       Running   0          4h
myapp-848b5b879b-4cd8x        1/1       Running   0          4h
myapp-848b5b879b-rt8bs        1/1       Running   0          4h
myapp-848b5b879b-wmgd8        1/1       Running   0          4h
nginx-deploy-5b595999-n52b5   1/1       Running   0          4h
pod-demo                      2/2       Running   0          10s
[root@master manifests]# kubectl get pods --show-labels    #查看所有资源标签
NAME                          READY     STATUS    RESTARTS   AGE       LABELS
client                        1/1       Running   0          4h        run=client
myapp-848b5b879b-4cd8x        1/1       Running   0          4h        pod-template-hash=4046164356,run=myapp
myapp-848b5b879b-rt8bs        1/1       Running   0          4h        pod-template-hash=4046164356,run=myapp
myapp-848b5b879b-wmgd8        1/1       Running   0          4h        pod-template-hash=4046164356,run=myapp
nginx-deploy-5b595999-n52b5   1/1       Running   0          4h        pod-template-hash=16151555,run=nginx-deploy
pod-demo                      2/2       Running   0          44s       app=myapp,tier=frontend
[root@master manifests]# kubectl get pods -L app    #显示指定类别的资源对象对每一个资源标签显示标签值
NAME                          READY     STATUS    RESTARTS   AGE       APP
client                        1/1       Running   0          4h
myapp-848b5b879b-4cd8x        1/1       Running   0          4h
myapp-848b5b879b-rt8bs        1/1       Running   0          4h
myapp-848b5b879b-wmgd8        1/1       Running   0          4h
nginx-deploy-5b595999-n52b5   1/1       Running   0          4h
pod-demo                      2/2       Running   0          2m        myapp
[root@master manifests]# kubectl get pods -l app    #过滤只显示标签为app的资源
NAME       READY     STATUS    RESTARTS   AGE
pod-demo   2/2       Running   0          4m
[root@master manifests]# kubectl get pods -l app --show-labels
NAME       READY     STATUS    RESTARTS   AGE       LABELS
pod-demo   2/2       Running   0          5m        app=myapp,tier=frontend
[root@master manifests]# kubectl get pods -L app,run    #显示app标签和run标签值
NAME                          READY     STATUS    RESTARTS   AGE       APP       RUN
client                        1/1       Running   0          4h                  client
myapp-848b5b879b-4cd8x        1/1       Running   0          4h                  myapp
myapp-848b5b879b-rt8bs        1/1       Running   0          4h                  myapp
myapp-848b5b879b-wmgd8        1/1       Running   0          4h                  myapp
nginx-deploy-5b595999-n52b5   1/1       Running   0          4h                  nginx-deploy
pod-demo                      2/2       Running   0          6m        myapp
[root@master manifests]# kubectl label --help
[root@master manifests]# kubectl label pods pod-demo release=canary    #给pod-demo资源打标签
[root@master manifests]# kubectl get pods -l app --show-labels
NAME       READY     STATUS    RESTARTS   AGE       LABELS
pod-demo   2/2       Running   0          9m        app=myapp,release=canary,tier=frontend
[root@master manifests]# kubectl label pods pod-demo release=stable
error: 'release' already has a value (canary), and --overwrite is false
[root@master manifests]# kubectl label pods pod-demo release=stable --overwrite    #修改pod-demo中release标签值
[root@master manifests]# kubectl get pods -l app --show-labels
NAME       READY     STATUS    RESTARTS   AGE       LABELS
pod-demo   2/2       Running   0          11m       app=myapp,release=stable,tier=frontend
[root@master manifests]# kubectl get pods -l release
NAME       READY     STATUS    RESTARTS   AGE
pod-demo   2/2       Running   0          12m
[root@master manifests]# kubectl get pods -l release,app
NAME       READY     STATUS    RESTARTS   AGE
pod-demo   2/2       Running   0          12m
[root@master manifests]# kubectl get pods -l release=stable
NAME       READY     STATUS    RESTARTS   AGE
pod-demo   2/2       Running   0          17m
[root@master manifests]# kubectl get pods -l release=stable --show-labels
NAME       READY     STATUS    RESTARTS   AGE       LABELS
pod-demo   2/2       Running   0          17m       app=myapp,release=stable,tier=frontend
[root@master manifests]# kubectl get pods
NAME                          READY     STATUS    RESTARTS   AGE
client                        1/1       Running   0          5h
myapp-848b5b879b-4cd8x        1/1       Running   0          4h
myapp-848b5b879b-rt8bs        1/1       Running   0          4h
myapp-848b5b879b-wmgd8        1/1       Running   0          4h
nginx-deploy-5b595999-n52b5   1/1       Running   0          5h
pod-demo                      2/2       Running   0          17m
[root@master manifests]# kubectl label pods nginx-deploy-5b595999-n52b5 release=canary
[root@master manifests]# kubectl get pods -l release
NAME                          READY     STATUS    RESTARTS   AGE
nginx-deploy-5b595999-n52b5   1/1       Running   0          5h
pod-demo                      2/2       Running   0          18m
[root@master manifests]# kubectl get pods -l release=canary
NAME                          READY     STATUS    RESTARTS   AGE
nginx-deploy-5b595999-n52b5   1/1       Running   0          5h
[root@master manifests]# kubectl get pods -l release,app
NAME       READY     STATUS    RESTARTS   AGE
pod-demo   2/2       Running   0          19m
[root@master manifests]# kubectl get pods -l release=stable,app=myapp
NAME       READY     STATUS    RESTARTS   AGE
pod-demo   2/2       Running   0          20m
[root@master manifests]# kubectl get pods -l release!=canary
NAME                     READY     STATUS    RESTARTS   AGE
client                   1/1       Running   0          5h
myapp-848b5b879b-4cd8x   1/1       Running   0          4h
myapp-848b5b879b-rt8bs   1/1       Running   0          4h
myapp-848b5b879b-wmgd8   1/1       Running   0          4h
pod-demo                 2/2       Running   0          20m
[root@master manifests]# kubectl get pods -l "release in (canary,beta,alpha)"
NAME                          READY     STATUS    RESTARTS   AGE
nginx-deploy-5b595999-n52b5   1/1       Running   0          5h
[root@master manifests]# kubectl get pods -l "release notin (canary,beta,alpha)"
NAME                     READY     STATUS    RESTARTS   AGE
client                   1/1       Running   0          5h
myapp-848b5b879b-4cd8x   1/1       Running   0          5h
myapp-848b5b879b-rt8bs   1/1       Running   0          5h
myapp-848b5b879b-wmgd8   1/1       Running   0          5h
pod-demo                 2/2       Running   0          25m
[root@master manifests]# kubectl get nodes
NAME               STATUS    ROLES     AGE       VERSION
master.smoke.com   Ready     master    7d        v1.11.1
node01.smoke.com   Ready     <none>    3d        v1.11.1
node02.smoke.com   Ready     <none>    3d        v1.11.1
[root@master manifests]# kubectl get nodes --show-labels
NAME               STATUS    ROLES     AGE       VERSION   LABELS
master.smoke.com   Ready     master    7d        v1.11.1   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/hostname=
master.smoke.com,node-role.kubernetes.io/master=
node01.smoke.com   Ready     <none>    3d        v1.11.1   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/hostname=
node01.smoke.com
node02.smoke.com   Ready     <none>    3d        v1.11.1   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/hostname=
node02.smoke.com
[root@master manifests]# kubectl label nodes node01.smoke.com disktype=ssd
[root@master manifests]# kubectl get nodes --show-labels
NAME               STATUS    ROLES     AGE       VERSION   LABELS
master.smoke.com   Ready     master    7d        v1.11.1   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/hostname=
master.smoke.com,node-role.kubernetes.io/master=
node01.smoke.com   Ready     <none>    3d        v1.11.1   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,disktype=ssd,kubernetes
.io/hostname=node01.smoke.com
node02.smoke.com   Ready     <none>    3d        v1.11.1   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/hostname=
node02.smoke.com
[root@master manifests]# kubectl explain pods.spec
[root@master manifests]# kubectl get pods -o wide
NAME                          READY     STATUS    RESTARTS   AGE       IP            NODE
client                        1/1       Running   0          1d        10.244.2.3    node02.smoke.com
myapp-848b5b879b-4cd8x        1/1       Running   0          1d        10.244.1.7    node01.smoke.com
myapp-848b5b879b-rt8bs        1/1       Running   0          1d        10.244.2.9    node02.smoke.com
myapp-848b5b879b-wmgd8        1/1       Running   0          1d        10.244.2.10   node02.smoke.com
nginx-deploy-5b595999-n52b5   1/1       Running   0          1d        10.244.1.3    node01.smoke.com
pod-demo                      2/2       Running   2          23h       10.244.1.10   node01.smoke.com
[root@master manifests]# vim pod-demo.yaml
apiVersion: v1
kind: Pod
metadata:
  name: pod-demo
  namespace: default
  labels:
    app: myapp
    tier: frontend
spec:
  containers:
  - name: myapp
    image: ikubernetes/myapp:v1
    ports:
    - name: http
      containerPort: 80
    - name: https
      containerPort: 443
  - name: busybox
    image: busybox:latest
    imagePullPolicy: IfNotPresent
    command:
    - "/bin/sh"
    - "-c"
    - "sleep 3600"
  nodeSelector:
    disktype: ssd    #标签选择只会运行在支持ssd的node上
[root@master manifests]# kubectl delete -f pod-demo.yaml
[root@master manifests]# kubectl create -f pod-demo.yaml
[root@master manifests]# kubectl describe pods pod-demo    #在node01上面
Name:               pod-demo
Namespace:          default
Priority:           0
PriorityClassName:  <none>
Node:               node01.smoke.com/172.20.0.66
Start Time:         Mon, 04 May 2020 20:49:12 +0800
Labels:             app=myapp
                    tier=frontend
Annotations:        <none>
Status:             Running
IP:                 10.244.1.11
Containers:
  myapp:
    Container ID:   docker://a485ca0bbde7e1fffe515df9f5dd275bfebcf94985ed33e9de12da4259049777
    Image:          ikubernetes/myapp:v1
    Image ID:       docker-pullable://ikubernetes/myapp@sha256:9c3dc30b5219788b2b8a4b065f548b922a34479577befb54b03330999d30d513
    Ports:          80/TCP, 443/TCP
    Host Ports:     0/TCP, 0/TCP
    State:          Running
      Started:      Mon, 04 May 2020 20:49:15 +0800
    Ready:          True
    Restart Count:  0
    Environment:    <none>
    Mounts:
      /var/run/secrets/kubernetes.io/serviceaccount from default-token-xvxpl (ro)
  busybox:
    Container ID:  docker://4f57c25afd2e470399c47c4475dc18e8a952fbdb26c71e9fd6f3d7691f639824
    Image:         busybox:latest
    Image ID:      docker-pullable://busybox@sha256:a8cf7ff6367c2afa2a90acd081b484cbded349a7076e7bdf37a05279f276bc12
    Port:          <none>
    Host Port:     <none>
    Command:
      /bin/sh
      -c
      sleep 3600
    State:          Running
      Started:      Mon, 04 May 2020 20:49:16 +0800
    Ready:          True
    Restart Count:  0
    Environment:    <none>
    Mounts:
      /var/run/secrets/kubernetes.io/serviceaccount from default-token-xvxpl (ro)
Conditions:
  Type              Status
  Initialized       True
  Ready             True
  ContainersReady   True
  PodScheduled      True
Volumes:
  default-token-xvxpl:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  default-token-xvxpl
    Optional:    false
QoS Class:       BestEffort
Node-Selectors:  disktype=ssd
Tolerations:     node.kubernetes.io/not-ready:NoExecute for 300s
                 node.kubernetes.io/unreachable:NoExecute for 300s
Events:
  Type    Reason     Age   From                       Message
  ----    ------     ----  ----                       -------
  Normal  Scheduled  24s   default-scheduler          Successfully assigned default/pod-demo to node01.smoke.com
  Normal  Pulled     21s   kubelet, node01.smoke.com  Container image "ikubernetes/myapp:v1" already present on machine
  Normal  Created    21s   kubelet, node01.smoke.com  Created container
  Normal  Started    21s   kubelet, node01.smoke.com  Started container
  Normal  Pulled     21s   kubelet, node01.smoke.com  Container image "busybox:latest" already present on machine
  Normal  Created    21s   kubelet, node01.smoke.com  Created container
  Normal  Started    20s   kubelet, node01.smoke.com  Started container
[root@master manifests]# kubectl delete -f pod-demo.yaml
[root@master manifests]# vim pod-demo.yaml
apiVersion: v1
kind: Pod
metadata:
  name: pod-demo
  namespace: default
  labels:
    app: myapp
    tier: frontend
  annotations:
    smoke.com/created-by: "cluster admin"    #添加的annotations资源注解
spec:
  containers:
  - name: myapp
    image: ikubernetes/myapp:v1
    ports:
    - name: http
      containerPort: 80
    - name: https
      containerPort: 443
  - name: busybox
    image: busybox:latest
    imagePullPolicy: IfNotPresent
    command:
    - "/bin/sh"
    - "-c"
    - "sleep 3600"
  nodeSelector:
    disktype: ssd
[root@master manifests]# kubectl create -f pod-demo.yaml
[root@master manifests]# kubectl describe pods pod-demo
Name:               pod-demo
Namespace:          default
Priority:           0
PriorityClassName:  <none>
Node:               node01.smoke.com/172.20.0.66
Start Time:         Mon, 04 May 2020 21:01:10 +0800
Labels:             app=myapp
                    tier=frontend
Annotations:        smoke.com/created-by=cluster admin
Status:             Running
IP:                 10.244.1.12
Containers:
  myapp:
    Container ID:   docker://047b9ff03f111899b195016c2501584b33a5ea698eb7846200fc6ae5918f332f
    Image:          ikubernetes/myapp:v1
    Image ID:       docker-pullable://ikubernetes/myapp@sha256:9c3dc30b5219788b2b8a4b065f548b922a34479577befb54b03330999d30d513
    Ports:          80/TCP, 443/TCP
    Host Ports:     0/TCP, 0/TCP
    State:          Running
      Started:      Mon, 04 May 2020 21:01:11 +0800
    Ready:          True
    Restart Count:  0
    Environment:    <none>
    Mounts:
      /var/run/secrets/kubernetes.io/serviceaccount from default-token-xvxpl (ro)
  busybox:
    Container ID:  docker://a45568655e7700e72eb64ad468c73acfd9c2d7e51adca18e622d2c5cf4b9d796
    Image:         busybox:latest
    Image ID:      docker-pullable://busybox@sha256:a8cf7ff6367c2afa2a90acd081b484cbded349a7076e7bdf37a05279f276bc12
    Port:          <none>
    Host Port:     <none>
    Command:
      /bin/sh
      -c
      sleep 3600
    State:          Running
      Started:      Mon, 04 May 2020 21:01:12 +0800
    Ready:          True
    Restart Count:  0
    Environment:    <none>
    Mounts:
      /var/run/secrets/kubernetes.io/serviceaccount from default-token-xvxpl (ro)
Conditions:
  Type              Status
  Initialized       True
  Ready             True
  ContainersReady   True
  PodScheduled      True
Volumes:
  default-token-xvxpl:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  default-token-xvxpl
    Optional:    false
QoS Class:       BestEffort
Node-Selectors:  disktype=ssd
Tolerations:     node.kubernetes.io/not-ready:NoExecute for 300s
                 node.kubernetes.io/unreachable:NoExecute for 300s
Events:
  Type    Reason     Age   From                       Message
  ----    ------     ----  ----                       -------
  Normal  Scheduled  15s   default-scheduler          Successfully assigned default/pod-demo to node01.smoke.com
  Normal  Pulled     14s   kubelet, node01.smoke.com  Container image "ikubernetes/myapp:v1" already present on machine
  Normal  Created    14s   kubelet, node01.smoke.com  Created container
  Normal  Started    14s   kubelet, node01.smoke.com  Started container
  Normal  Pulled     14s   kubelet, node01.smoke.com  Container image "busybox:latest" already present on machine
  Normal  Created    14s   kubelet, node01.smoke.com  Created container
  Normal  Started    13s   kubelet, node01.smoke.com  Started container
[root@master manifests]# kubectl explain pods.spec.containers
[root@master manifests]# kubectl explain pods.spec.containers.livenessProbe
[root@master manifests]# kubectl explain pods.spec.containers.livenessProbe.exec
[root@master manifests]# vim liveness-exec.yaml
apiVersion: v1
kind: Pod
metadata:
  name: liveness-exec-pod
  namespace: default
spec:
  containers:
  - name: liveness-exec-container
    image: busybox:latest
    imagePullPolicy: IfNotPresent
    command: ["/bin/sh","-c","touch /tmp/healthy; sleep 30; rm -rf /tmp/healthy; sleep 3600"]
    livenessProbe:
      exec:
        command: ["test","-e","/tmp/healthy"]
      initialDelaySeconds: 1
      periodSeconds: 3
[root@master manifests]# kubectl create -f liveness-exec.yaml
[root@master manifests]# kubectl get pods
NAME                          READY     STATUS    RESTARTS   AGE
client                        1/1       Running   0          2d
liveness-exec-pod             1/1       Running   8          19m
myapp-848b5b879b-4cd8x        1/1       Running   0          2d
myapp-848b5b879b-rt8bs        1/1       Running   0          2d
myapp-848b5b879b-wmgd8        1/1       Running   0          2d
nginx-deploy-5b595999-n52b5   1/1       Running   0          2d
pod-demo                      2/2       Running   2          1d
[root@master manifests]# kubectl get pods -w
NAME                          READY     STATUS    RESTARTS   AGE
client                        1/1       Running   0          2d
liveness-exec-pod             1/1       Running   8          19m
myapp-848b5b879b-4cd8x        1/1       Running   0          2d
myapp-848b5b879b-rt8bs        1/1       Running   0          2d
myapp-848b5b879b-wmgd8        1/1       Running   0          2d
nginx-deploy-5b595999-n52b5   1/1       Running   0          2d
pod-demo                      2/2       Running   2          1d
liveness-exec-pod   1/1       Running   9         20m
[root@master manifests]# kubectl describe pods liveness-exec-pod
[root@master manifests]# kubectl describe pods liveness-exec-pod
Name:               liveness-exec-pod
Namespace:          default
Priority:           0
PriorityClassName:  <none>
Node:               node02.smoke.com/172.20.0.67
Start Time:         Tue, 05 May 2020 21:41:16 +0800
Labels:             <none>
Annotations:        <none>
Status:             Running
IP:                 10.244.2.11
Containers:
  liveness-exec-container:
    Container ID:  docker://4ad4284271394fd34f46df09b5adb1a388044c4e41ba06d505007b40e8f3fa1e
    Image:         busybox:latest
    Image ID:      docker-pullable://busybox@sha256:a8cf7ff6367c2afa2a90acd081b484cbded349a7076e7bdf37a05279f276bc12
    Port:          <none>
    Host Port:     <none>
    Command:
      /bin/sh
      -c
      touch /tmp/healthy; sleep 30; rm -rf /tmp/healthy; sleep 3600
    State:          Running
      Started:      Tue, 05 May 2020 22:01:30 +0800
    Last State:     Terminated
      Reason:       Error
      Exit Code:    137
      Started:      Tue, 05 May 2020 22:00:22 +0800
      Finished:     Tue, 05 May 2020 22:01:29 +0800
    Ready:          True
    Restart Count:  9
    Liveness:       exec [test -e /tmp/healthy] delay=1s timeout=1s period=3s #success=1 #failure=3
    Environment:    <none>
    Mounts:
      /var/run/secrets/kubernetes.io/serviceaccount from default-token-xvxpl (ro)
Conditions:
  Type              Status
  Initialized       True
  Ready             True
  ContainersReady   True
  PodScheduled      True
Volumes:
  default-token-xvxpl:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  default-token-xvxpl
    Optional:    false
QoS Class:       BestEffort
Node-Selectors:  <none>
Tolerations:     node.kubernetes.io/not-ready:NoExecute for 300s
                 node.kubernetes.io/unreachable:NoExecute for 300s
Events:
  Type     Reason     Age                 From                       Message
  ----     ------     ----                ----                       -------
  Normal   Scheduled  21m                 default-scheduler          Successfully assigned default/liveness-exec-pod to node02.smoke.com
  Normal   Pulled     17m (x4 over 20m)   kubelet, node02.smoke.com  Container image "busybox:latest" already present on machine
  Normal   Created    17m (x4 over 20m)   kubelet, node02.smoke.com  Created container
  Normal   Started    17m (x4 over 20m)   kubelet, node02.smoke.com  Started container
  Normal   Killing    17m (x3 over 19m)   kubelet, node02.smoke.com  Killing container with id docker://liveness-exec-container:Container 
failed liveness probe.. Container will be killed and recreated.
  Warning  BackOff    5m (x30 over 13m)   kubelet, node02.smoke.com  Back-off restarting failed container
  Warning  Unhealthy  33s (x28 over 20m)  kubelet, node02.smoke.com  Liveness probe failed:
[root@master manifests]# kubectl get pods -w
NAME                          READY     STATUS             RESTARTS   AGE
client                        1/1       Running            0          2d
liveness-exec-pod             0/1       CrashLoopBackOff   9          23m
myapp-848b5b879b-4cd8x        1/1       Running            0          2d
myapp-848b5b879b-rt8bs        1/1       Running            0          2d
myapp-848b5b879b-wmgd8        1/1       Running            0          2d
nginx-deploy-5b595999-n52b5   1/1       Running            0          2d
pod-demo                      2/2       Running            2          1d
[root@master manifests]# kubectl explain pods.spec.containers.livenessProbe.tcpSocket
[root@master ~]# kubectl explain pods.spec.containers.livenessProbe.httpGet
[root@master manifests]# cp liveness-exec.yaml liveness-httpget.yaml
[root@master manifests]# vim liveness-httpget.yaml
apiVersion: v1
kind: Pod
metadata:
  name: liveness-httpget-pod
  namespace: default
spec:
  containers:
  - name: liveness-httpget-container
    image: ikubernetes/myapp:v1
    imagePullPolicy: IfNotPresent
    ports:
    - name: http
      containerPort: 80
    livenessProbe:
      httpGet:
        port: http
        path: /index.html
      initialDelaySeconds: 1
      periodSeconds: 3
[root@master manifests]# kubectl create -f liveness-httpget.yaml
[root@master manifests]# kubectl get pods
NAME                          READY     STATUS             RESTARTS   AGE
client                        1/1       Running            0          3d
liveness-exec-pod             0/1       CrashLoopBackOff   31         22h
liveness-httpget-pod          1/1       Running            0          16s
myapp-848b5b879b-4cd8x        1/1       Running            0          3d
myapp-848b5b879b-rt8bs        1/1       Running            0          3d
myapp-848b5b879b-wmgd8        1/1       Running            0          3d
nginx-deploy-5b595999-n52b5   1/1       Running            0          3d
pod-demo                      2/2       Running            3          1d
[root@master manifests]# kubectl describe pods liveness-httpget-pod
Name:               liveness-httpget-pod
Namespace:          default
Priority:           0
PriorityClassName:  <none>
Node:               node02.smoke.com/172.20.0.67
Start Time:         Wed, 06 May 2020 20:22:17 +0800
Labels:             <none>
Annotations:        <none>
Status:             Running
IP:                 10.244.2.12
Containers:
  liveness-httpget-container:
    Container ID:   docker://48a2b4249e2540e8f4767227d878a66372fe29c0e1ce7d36c5513322c3a80563
    Image:          ikubernetes/myapp:v1
    Image ID:       docker-pullable://ikubernetes/myapp@sha256:9c3dc30b5219788b2b8a4b065f548b922a34479577befb54b03330999d30d513
    Port:           80/TCP
    Host Port:      0/TCP
    State:          Running
      Started:      Wed, 06 May 2020 20:22:19 +0800
    Ready:          True
    Restart Count:  0
    Liveness:       http-get http://:http/index.html delay=1s timeout=1s period=3s #success=1 #failure=3
    Environment:    <none>
    Mounts:
      /var/run/secrets/kubernetes.io/serviceaccount from default-token-xvxpl (ro)
Conditions:
  Type              Status
  Initialized       True
  Ready             True
  ContainersReady   True
  PodScheduled      True
Volumes:
  default-token-xvxpl:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  default-token-xvxpl
    Optional:    false
QoS Class:       BestEffort
Node-Selectors:  <none>
Tolerations:     node.kubernetes.io/not-ready:NoExecute for 300s
                 node.kubernetes.io/unreachable:NoExecute for 300s
Events:
  Type    Reason     Age   From                       Message
  ----    ------     ----  ----                       -------
  Normal  Scheduled  1m    default-scheduler          Successfully assigned default/liveness-httpget-pod to node02.smoke.com
  Normal  Pulled     1m    kubelet, node02.smoke.com  Container image "ikubernetes/myapp:v1" already present on machine
  Normal  Created    1m    kubelet, node02.smoke.com  Created container
  Normal  Started    1m    kubelet, node02.smoke.com  Started container
[root@master manifests]# kubectl get pods
NAME                          READY     STATUS             RESTARTS   AGE
client                        1/1       Running            0          3d
liveness-exec-pod             0/1       CrashLoopBackOff   31         22h
liveness-httpget-pod          1/1       Running            0          2m
myapp-848b5b879b-4cd8x        1/1       Running            0          3d
myapp-848b5b879b-rt8bs        1/1       Running            0          3d
myapp-848b5b879b-wmgd8        1/1       Running            0          3d
nginx-deploy-5b595999-n52b5   1/1       Running            0          3d
pod-demo                      2/2       Running            3          1d
[root@master manifests]# kubectl exec -it liveness-httpget-pod -- /bin/sh
/ # rm -f /usr/share/nginx/html/index.html
[root@master manifests]# kubectl describe pods liveness-httpget-pod
Name:               liveness-httpget-pod
Namespace:          default
Priority:           0
PriorityClassName:  <none>
Node:               node02.smoke.com/172.20.0.67
Start Time:         Wed, 06 May 2020 20:22:17 +0800
Labels:             <none>
Annotations:        <none>
Status:             Running
IP:                 10.244.2.12
Containers:
  liveness-httpget-container:
    Container ID:   docker://c2e234ceca0b29359045304f4c56ea9cf4173d131204196b4546148fe17236e8
    Image:          ikubernetes/myapp:v1
    Image ID:       docker-pullable://ikubernetes/myapp@sha256:9c3dc30b5219788b2b8a4b065f548b922a34479577befb54b03330999d30d513
    Port:           80/TCP
    Host Port:      0/TCP
    State:          Running
      Started:      Wed, 06 May 2020 20:25:35 +0800
    Last State:     Terminated
      Reason:       Completed
      Exit Code:    0
      Started:      Wed, 06 May 2020 20:22:19 +0800
      Finished:     Wed, 06 May 2020 20:25:35 +0800
    Ready:          True
    Restart Count:  1
    Liveness:       http-get http://:http/index.html delay=1s timeout=1s period=3s #success=1 #failure=3
    Environment:    <none>
    Mounts:
      /var/run/secrets/kubernetes.io/serviceaccount from default-token-xvxpl (ro)
Conditions:
  Type              Status
  Initialized       True
  Ready             True
  ContainersReady   True
  PodScheduled      True
Volumes:
  default-token-xvxpl:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  default-token-xvxpl
    Optional:    false
QoS Class:       BestEffort
Node-Selectors:  <none>
Tolerations:     node.kubernetes.io/not-ready:NoExecute for 300s
                 node.kubernetes.io/unreachable:NoExecute for 300s
Events:
  Type     Reason     Age                From                       Message
  ----     ------     ----               ----                       -------
  Normal   Scheduled  3m                 default-scheduler          Successfully assigned default/liveness-httpget-pod to node02.smoke.com
  Warning  Unhealthy  35s (x3 over 41s)  kubelet, node02.smoke.com  Liveness probe failed: HTTP probe failed with statuscode: 404
  Normal   Pulled     34s (x2 over 3m)   kubelet, node02.smoke.com  Container image "ikubernetes/myapp:v1" already present on machine
  Normal   Created    34s (x2 over 3m)   kubelet, node02.smoke.com  Created container
  Normal   Started    34s (x2 over 3m)   kubelet, node02.smoke.com  Started container
  Normal   Killing    34s                kubelet, node02.smoke.com  Killing container with id docker://liveness-httpget-container:Container
 failed liveness probe.. Container will be killed and recreated.
[root@master manifests]# kubectl exec -it liveness-httpget-pod -- /bin/sh
/ # ls /usr/share/nginx/html/
50x.html    index.html
/ # exit
[root@master manifests]# kubectl get pods
NAME                          READY     STATUS             RESTARTS   AGE
client                        1/1       Running            0          3d
liveness-exec-pod             0/1       CrashLoopBackOff   33         22h
liveness-httpget-pod          1/1       Running            1          5m
myapp-848b5b879b-4cd8x        1/1       Running            0          3d
myapp-848b5b879b-rt8bs        1/1       Running            0          3d
myapp-848b5b879b-wmgd8        1/1       Running            0          3d
nginx-deploy-5b595999-n52b5   1/1       Running            0          3d
pod-demo                      2/2       Running            3          1d
[root@master manifests]# kubectl delete -f liveness-exec.yaml
[root@master manifests]# kubectl get pods
NAME                          READY     STATUS    RESTARTS   AGE
client                        1/1       Running   0          3d
liveness-httpget-pod          1/1       Running   1          22m
myapp-848b5b879b-4cd8x        1/1       Running   0          3d
myapp-848b5b879b-rt8bs        1/1       Running   0          3d
myapp-848b5b879b-wmgd8        1/1       Running   0          3d
nginx-deploy-5b595999-n52b5   1/1       Running   0          3d
pod-demo                      2/2       Running   3          1d
[root@master manifests]# cp liveness-httpget.yaml readiness-httpdget.yaml
[root@master manifests]# vim readiness-httpdget.yaml
apiVersion: v1
kind: Pod
metadata:
  name: readiness-httpget-pod
  namespace: default
spec:
  containers:
  - name: readiness-httpget-container
    image: ikubernetes/myapp:v1
    imagePullPolicy: IfNotPresent
    ports:
    - name: http
      containerPort: 80
    readinessProbe:
      httpGet:
        port: http
        path: /index.html
      initialDelaySeconds: 1
      periodSeconds: 3
[root@master manifests]# kubectl create -f readiness-httpdget.yaml
[root@master manifests]# kubectl get pods
NAME                          READY     STATUS    RESTARTS   AGE
client                        1/1       Running   0          3d
liveness-httpget-pod          1/1       Running   1          30m
myapp-848b5b879b-4cd8x        1/1       Running   0          3d
myapp-848b5b879b-rt8bs        1/1       Running   0          3d
myapp-848b5b879b-wmgd8        1/1       Running   0          3d
nginx-deploy-5b595999-n52b5   1/1       Running   0          3d
pod-demo                      2/2       Running   3          1d
readiness-httpget-pod         1/1       Running   0          14s
[root@master manifests]# kubectl exec -it readiness-httpget-pod -- /bin/sh
/ # rm -f /usr/share/nginx/html/index.html
[root@master manifests]# kubectl get pods
NAME                          READY     STATUS    RESTARTS   AGE
client                        1/1       Running   0          3d
liveness-httpget-pod          1/1       Running   1          33m
myapp-848b5b879b-4cd8x        1/1       Running   0          3d
myapp-848b5b879b-rt8bs        1/1       Running   0          3d
myapp-848b5b879b-wmgd8        1/1       Running   0          3d
nginx-deploy-5b595999-n52b5   1/1       Running   0          3d
pod-demo                      2/2       Running   3          1d
readiness-httpget-pod         0/1       Running   0          3m
/ # ps aux    #readiness-httpget-pod
PID   USER     TIME   COMMAND
    1 root       0:00 nginx: master process nginx -g daemon off;
    6 nginx      0:00 nginx: worker process
   13 root       0:00 /bin/sh
   18 root       0:00 ps aux
/ # echo "hi" >> /usr/share/nginx/html/index.html
/ # exit
[root@master manifests]# kubectl get pods
NAME                          READY     STATUS    RESTARTS   AGE
client                        1/1       Running   0          3d
liveness-httpget-pod          1/1       Running   1          35m
myapp-848b5b879b-4cd8x        1/1       Running   0          3d
myapp-848b5b879b-rt8bs        1/1       Running   0          3d
myapp-848b5b879b-wmgd8        1/1       Running   0          3d
nginx-deploy-5b595999-n52b5   1/1       Running   0          3d
pod-demo                      2/2       Running   4          1d
readiness-httpget-pod         1/1       Running   0          5m
[root@master manifests]# kubectl describe pods readiness-httpget-pod
Name:               readiness-httpget-pod
Namespace:          default
Priority:           0
PriorityClassName:  <none>
Node:               node02.smoke.com/172.20.0.67
Start Time:         Wed, 06 May 2020 20:52:16 +0800
Labels:             <none>
Annotations:        <none>
Status:             Running
IP:                 10.244.2.13
Containers:
  readiness-httpget-container:
    Container ID:   docker://69020d8ccf42ba071fce00d36bd11556e34005d53d3c472c7fdc68a392da0615
    Image:          ikubernetes/myapp:v1
    Image ID:       docker-pullable://ikubernetes/myapp@sha256:9c3dc30b5219788b2b8a4b065f548b922a34479577befb54b03330999d30d513
    Port:           80/TCP
    Host Port:      0/TCP
    State:          Running
      Started:      Wed, 06 May 2020 20:52:17 +0800
    Ready:          True
    Restart Count:  0
    Readiness:      http-get http://:http/index.html delay=1s timeout=1s period=3s #success=1 #failure=3
    Environment:    <none>
    Mounts:
      /var/run/secrets/kubernetes.io/serviceaccount from default-token-xvxpl (ro)
Conditions:
  Type              Status
  Initialized       True
  Ready             True
  ContainersReady   True
  PodScheduled      True
Volumes:
  default-token-xvxpl:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  default-token-xvxpl
    Optional:    false
QoS Class:       BestEffort
Node-Selectors:  <none>
Tolerations:     node.kubernetes.io/not-ready:NoExecute for 300s
                 node.kubernetes.io/unreachable:NoExecute for 300s
Events:
  Type     Reason     Age               From                       Message
  ----     ------     ----              ----                       -------
  Normal   Scheduled  8m                default-scheduler          Successfully assigned default/readiness-httpget-pod to node02.smoke.com
  Normal   Pulled     8m                kubelet, node02.smoke.com  Container image "ikubernetes/myapp:v1" already present on machine
  Normal   Created    8m                kubelet, node02.smoke.com  Created container
  Normal   Started    8m                kubelet, node02.smoke.com  Started container
  Warning  Unhealthy  4m (x22 over 5m)  kubelet, node02.smoke.com  Readiness probe failed: HTTP probe failed with statuscode: 404
[root@master manifests]# kubectl explain pods.spec.containers.lifecycle
[root@master manifests]# kubectl explain pods.spec.containers.lifecycle.postStart
[root@master manifests]# kubectl explain pods.spec.containers.lifecycle.preStop
[root@master manifests]# vim poststart-pod.yaml
apiVersion: v1
kind: Pod
metadata:
    name: poststart-pod
    namespace: default
spec:
    containers:
    - name: busybox-httpd
      image: busybox:latest
      imagePullPolicy: IfNotPresent
      lifecycle:
        postStart:
          exec:
            command: ["/bin/sh","-c","mkdir -p /data/web/html; echo 'Home Page' >> /data/web/html/index.html"]
      command: ["/bin/httpd"]
      args: ["-f","-h /data/web/html"]
[root@master manifests]# kubectl create -f poststart-pod.yaml
[root@master manifests]# kubectl get pods
NAME                          READY     STATUS             RESTARTS   AGE
client                        1/1       Running            0          3d
liveness-httpget-pod          1/1       Running            1          1h
myapp-848b5b879b-4cd8x        1/1       Running            0          3d
myapp-848b5b879b-rt8bs        1/1       Running            0          3d
myapp-848b5b879b-wmgd8        1/1       Running            0          3d
nginx-deploy-5b595999-n52b5   1/1       Running            0          3d
pod-demo                      2/2       Running            4          2d
poststart-pod                 0/1       CrashLoopBackOff   6          5m
readiness-httpget-pod         1/1       Running            0          35m
[root@master manifests]# kubectl delete -f poststart-pod.yaml
[root@master manifests]# vim poststart-pod.yaml
apiVersion: v1
kind: Pod
metadata:
    name: poststart-pod
    namespace: default
spec:
    containers:
    - name: busybox-httpd
      image: busybox:latest
      imagePullPolicy: IfNotPresent
      lifecycle:
        postStart:
          exec:
            command: ["/bin/sh","-c","mkdir -p /data/web/html; echo Home_Page >> /data/web/html/index.html"]
      command: ["/bin/httpd"]
      args: ["-f","-h /data/web/html"]
[root@master manifests]# kubectl create -f poststart-pod.yaml
[root@master manifests]# kubectl get pods
NAME                          READY     STATUS             RESTARTS   AGE
client                        1/1       Running            0          3d
liveness-httpget-pod          1/1       Running            1          1h
myapp-848b5b879b-4cd8x        1/1       Running            0          3d
myapp-848b5b879b-rt8bs        1/1       Running            0          3d
myapp-848b5b879b-wmgd8        1/1       Running            0          3d
nginx-deploy-5b595999-n52b5   1/1       Running            0          3d
pod-demo                      2/2       Running            4          2d
poststart-pod                 0/1       CrashLoopBackOff   1          14s
readiness-httpget-pod         1/1       Running            0          38m
[root@master manifests]# kubectl delete -f poststart-pod.yaml
[root@master manifests]# vim poststart-pod.yaml
apiVersion: v1
kind: Pod
metadata:
    name: poststart-pod
    namespace: default
spec:
    containers:
    - name: busybox-httpd
      image: busybox:latest
      imagePullPolicy: IfNotPresent
      lifecycle:
        postStart:
          exec:
            command: ["/bin/sh","-c","mkdir -p /data/web/html && echo Home_Page >> /data/web/html/index.html"]
      command: ["/bin/httpd"]
      args: ["-f","-h /data/web/html"]
[root@master manifests]# kubectl create -f poststart-pod.yaml
[root@master manifests]# kubectl get pods
NAME                          READY     STATUS                                            RESTARTS   AGE
client                        1/1       Running                                           0          3d
liveness-httpget-pod          1/1       Running                                           1          1h
myapp-848b5b879b-4cd8x        1/1       Running                                           0          3d
myapp-848b5b879b-rt8bs        1/1       Running                                           0          3d
myapp-848b5b879b-wmgd8        1/1       Running                                           0          3d
nginx-deploy-5b595999-n52b5   1/1       Running                                           0          3d
pod-demo                      2/2       Running                                           4          2d
poststart-pod                 0/1       PostStartHookError:
 command '/bin/sh -c mkdir -p /data/web/html && echo Home_Page >> /data/web/html/
index.html' exited with 126:    0          3s
readiness-httpget-pod         1/1       Running                                           0          40m
[root@master manifests]# kubectl delete -f poststart-pod.yaml
[root@master manifests]# vim poststart-pod.yaml
apiVersion: v1
kind: Pod
metadata:
    name: poststart-pod
    namespace: default
spec:
    containers:
    - name: busybox-httpd
      image: busybox:latest
      imagePullPolicy: IfNotPresent
      lifecycle:
        postStart:
          exec:
            command: ["/bin/sh","-c","echo Home_Page >> /tmp/index.html"]
      command: ["/bin/httpd"]
      args: ["-f","-h /tmp"]
[root@master manifests]# kubectl create -f poststart-pod.yaml
[root@master manifests]# kubectl get pods
NAME                          READY     STATUS             RESTARTS   AGE
client                        1/1       Running            0          3d
liveness-httpget-pod          1/1       Running            1          1h
myapp-848b5b879b-4cd8x        1/1       Running            0          3d
myapp-848b5b879b-rt8bs        1/1       Running            0          3d
myapp-848b5b879b-wmgd8        1/1       Running            0          3d
nginx-deploy-5b595999-n52b5   1/1       Running            0          3d
pod-demo                      2/2       Running            4          2d
poststart-pod                 0/1       CrashLoopBackOff   1          5s
readiness-httpget-pod         1/1       Running            0          44m
[root@master manifests]# kubectl describe pods poststart-pod
Name:               poststart-pod
Namespace:          default
Priority:           0
PriorityClassName:  <none>
Node:               node01.smoke.com/172.20.0.66
Start Time:         Wed, 06 May 2020 21:46:25 +0800
Labels:             <none>
Annotations:        <none>
Status:             Running
IP:                 10.244.1.15
Containers:
  busybox-httpd:
    Container ID:  docker://df36185d0ec299c138116b7888e466c6e33fc8a8d7e10730236da29828db210d
    Image:         busybox:latest
    Image ID:      docker-pullable://busybox@sha256:a8cf7ff6367c2afa2a90acd081b484cbded349a7076e7bdf37a05279f276bc12
    Port:          <none>
    Host Port:     <none>
    Command:
      /bin/httpd
    Args:
      -f
      -h /tmp
    State:          Waiting
      Reason:       CrashLoopBackOff
    Last State:     Terminated
      Reason:       Error
      Exit Code:    1
      Started:      Wed, 06 May 2020 21:47:14 +0800
      Finished:     Wed, 06 May 2020 21:47:14 +0800
    Ready:          False
    Restart Count:  3
    Environment:    <none>
    Mounts:
      /var/run/secrets/kubernetes.io/serviceaccount from default-token-xvxpl (ro)
Conditions:
  Type              Status
  Initialized       True
  Ready             False
  ContainersReady   False
  PodScheduled      True
Volumes:
  default-token-xvxpl:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  default-token-xvxpl
    Optional:    false
QoS Class:       BestEffort
Node-Selectors:  <none>
Tolerations:     node.kubernetes.io/not-ready:NoExecute for 300s
                 node.kubernetes.io/unreachable:NoExecute for 300s
Events:
  Type     Reason               Age               From                       Message
  ----     ------               ----              ----                       -------
  Normal   Scheduled            1m                default-scheduler          Successfully assigned default/poststart-pod to node01.smoke.com
  Normal   Pulled               24s (x4 over 1m)  kubelet, node01.smoke.com  Container image "busybox:latest" already present on machine
  Normal   Created              24s (x4 over 1m)  kubelet, node01.smoke.com  Created container
  Normal   Started              24s (x4 over 1m)  kubelet, node01.smoke.com  Started container
  Warning  FailedPostStartHook  24s (x4 over 1m)  kubelet, node01.smoke.com  Exec lifecycle hook ([/bin/sh -c echo Home_Page >> /tmp/index.h
tml]) for Container "busybox-httpd" in Pod "poststart-pod_default(fa595d00-8f9f-11ea-8ad3-000c2967e570)" failed - error: command '/bin/sh -c
 echo Home_Page >> /tmp/index.html' exited with 126: , message: "cannot exec in a stopped state: unknown\r\n"
  Normal   Killing              24s (x4 over 1m)  kubelet, node01.smoke.com  Killing container with id docker://busybox-httpd:FailedPostStar
tHook
  Warning  BackOff              23s (x5 over 1m)  kubelet, node01.smoke.com  Back-off restarting failed container
[root@master manifests]# kubectl delete -f poststart-pod.yaml