apiVersion, kind, metadata, spec, status(只读)

spec:

  containers

  nodeSelector

  nodeName

  restartPolicy:

    Always, Never, OnFailure

  containers:

    name

    image

    imagePullPolicy:Always、Never、IfNotPresent

    ports:

      name

      containerPort

    livenessProbe

    readinessProbe

    liftcycle

  ExecAction: exec

  TCPSocketAction:tcpSocket

  HTTPGetAction: httpGet

 

Pod控制器:

  ReplicationController:

  ReplicaSet:

  Deployment:

  DaemonSet:

  Job:

  Cronjob:

  StatefulSet

  TPR: Third Party Resources, 1.2+, 1.7

  CDR: Custom Defined Resources, 1.8+

 

  Operator:

 

ReplicaSet控制器示例:

apiVersion: apps/v1

kind: ReplicaSet

metadata:

  name: myapp

  namespace: default

spec:

  replicas: 2

  selector:

    matchLabels:

      app: myapp

      release: canary

  template:

    metadata:

      name: myapp-pod

      labels:

        app: myapp

        release: canary

        environment: qa

    spec:

      containers:

      - name: myapp-container

        image: ikubernetes/myapp:v1

        ports:

        - name: http

          containerPort: 80

 

Deployment控制器示例:

apiVersion: apps/v1

kind: Deployment

metadata:

    name: myapp-deploy

    namespace: default

spec:

    replicas: 3

    selector:

        matchLabels:

        app: myapp

      release: canary

    template:

       metadata:

       labels:

    app: myapp

    release: canary

  spec:

      containers:

    - name: myapp

    image: ikubernetes/myapp:v2

    ports:

    - name: http

      containerPort: 80

 

DaemonSet控制器示例:

apiVersion: apps/v1

kind: DaemonSet

metadata:

  name: filebeat-ds

     namespace: default

spec:

    selector:

  matchLabels:

    app: filebeat

    release: stable

  template:

     metadata:

     labels:

     app: filebeat

     release: stable

   spec:

       containers:

     - name: filebeat

     image: ikubernetes/filebeat:5.6.5-alpine

     env:

      - name: REDIS_HOST

      value: redis.default.svc.cluster.local

      - name: REDIS_LOG_LEVEL

        value: info

 

 

 

 

master:

[root@master manifests]# kubectl get pods
NAME                          READY     STATUS             RESTARTS   AGE
client                        1/1       Running            0          4d
liveness-httpget-pod          1/1       Running            1          1d
myapp-848b5b879b-4cd8x        1/1       Running            0          4d
myapp-848b5b879b-rt8bs        1/1       Running            0          4d
myapp-848b5b879b-wmgd8        1/1       Running            0          4d
nginx-deploy-5b595999-n52b5   1/1       Running            0          4d
pod-demo                      2/2       Running            6          2d
poststart-pod                 0/1       CrashLoopBackOff   29         22h
readiness-httpget-pod         1/1       Running            0          23h
[root@master manifests]# kubectl delete -f poststart-pod.yaml
[root@master manifests]# kubectl explain rs
[root@master manifests]# kubectl explain rs.spec
[root@master manifests]# kubectl explain rs.spec.template
[root@master manifests]# kubectl explain rs.spec.template.spec
[root@master manifests]# kubectl get pods
NAME                          READY     STATUS    RESTARTS   AGE
client                        1/1       Running   0          4d
liveness-httpget-pod          1/1       Running   1          1d
myapp-848b5b879b-4cd8x        1/1       Running   0          4d
myapp-848b5b879b-rt8bs        1/1       Running   0          4d
myapp-848b5b879b-wmgd8        1/1       Running   0          4d
nginx-deploy-5b595999-n52b5   1/1       Running   0          4d
pod-demo                      2/2       Running   7          3d
readiness-httpget-pod         1/1       Running   0          1d
[root@master manifests]# kubectl get deploy
NAME           DESIRED   CURRENT   UP-TO-DATE   AVAILABLE   AGE
myapp          3         3         3            3           4d
nginx-deploy   1         1         1            1           4d
[root@master manifests]# kubectl delete deploy myapp
[root@master manifests]# kubectl delete deploy nginx-deploy
[root@master manifests]# vim rs-demo.yaml
apiVersion: apps/v1
kind: ReplicaSet
metadata:
    name: myapp
    namespace: default
spec:
    replicas: 2
    selector:
        matchLabels:
            app: myapp
            release: canary
    template:
        metadata:
            name: myapp-pod
            labels:
                app: myapp
                release: canary
                environment: qa
        spec:
            containers:
            - name: myapp-container
              image: ikubernetes/myapp:v1
              ports:
              - name: http
                containerPort: 80
[root@master manifests]# kubectl create -f rs-demo.yaml
[root@master manifests]# kubectl get rs
NAME      DESIRED   CURRENT   READY     AGE
myapp     2         2         2         13s
[root@master manifests]# kubectl get pods
NAME                    READY     STATUS    RESTARTS   AGE
client                  1/1       Running   0          4d
liveness-httpget-pod    1/1       Running   1          1d
myapp-5qpr9             1/1       Running   0          5m
myapp-jbzwq             1/1       Running   0          5m
pod-demo                2/2       Running   7          3d
readiness-httpget-pod   1/1       Running   0          1d
[root@master manifests]# kubectl describe pods myapp-5qpr9
Name:               myapp-5qpr9
Namespace:          default
Priority:           0
PriorityClassName:  <none>
Node:               node02.smoke.com/172.20.0.67
Start Time:         Thu, 07 May 2020 21:43:32 +0800
Labels:             app=myapp
                    environment=qa
                    release=canary
Annotations:        <none>
Status:             Running
IP:                 10.244.2.16
Controlled By:      ReplicaSet/myapp
Containers:
  myapp-container:
    Container ID:   docker://dc7def392a67797e3051b835bdc8946f3a4404a7faf63c3771a34fdceb2956bb
    Image:          ikubernetes/myapp:v1
    Image ID:       docker-pullable://ikubernetes/myapp@sha256:9c3dc30b5219788b2b8a4b065f548b922a34479577befb54b03330999d30d513
    Port:           80/TCP
    Host Port:      0/TCP
    State:          Running
      Started:      Thu, 07 May 2020 21:43:37 +0800
    Ready:          True
    Restart Count:  0
    Environment:    <none>
    Mounts:
      /var/run/secrets/kubernetes.io/serviceaccount from default-token-xvxpl (ro)
Conditions:
  Type              Status
  Initialized       True
  Ready             True
  ContainersReady   True
  PodScheduled      True
Volumes:
  default-token-xvxpl:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  default-token-xvxpl
    Optional:    false
QoS Class:       BestEffort
Node-Selectors:  <none>
Tolerations:     node.kubernetes.io/not-ready:NoExecute for 300s
                 node.kubernetes.io/unreachable:NoExecute for 300s
Events:
  Type    Reason     Age   From                       Message
  ----    ------     ----  ----                       -------
  Normal  Scheduled  6m    default-scheduler          Successfully assigned default/myapp-5qpr9 to node02.smoke.com
  Normal  Pulled     6m    kubelet, node02.smoke.com  Container image "ikubernetes/myapp:v1" already present on machine
  Normal  Created    6m    kubelet, node02.smoke.com  Created container
  Normal  Started    6m    kubelet, node02.smoke.com  Started container
[root@master manifests]# curl 10.244.2.16
Hello MyApp | Version: v1 | <a href="hostname.html">Pod Name</a>
[root@master manifests]# kubectl get pods
NAME                    READY     STATUS    RESTARTS   AGE
client                  1/1       Running   0          5d
liveness-httpget-pod    1/1       Running   1          2d
myapp-5qpr9             1/1       Running   0          22h
myapp-jbzwq             1/1       Running   0          22h
pod-demo                2/2       Running   10         3d
readiness-httpget-pod   1/1       Running   0          1d
[root@master manifests]# kubectl delete pods myapp-5qpr9
[root@master manifests]# kubectl get pods
NAME                    READY     STATUS    RESTARTS   AGE
client                  1/1       Running   0          5d
liveness-httpget-pod    1/1       Running   1          2d
myapp-4kmnr             1/1       Running   0          17s
myapp-jbzwq             1/1       Running   0          22h
pod-demo                2/2       Running   10         3d
readiness-httpget-pod   1/1       Running   0          1d
[root@master manifests]# kubectl get pods --show-labels
NAME                    READY     STATUS    RESTARTS   AGE       LABELS
client                  1/1       Running   0          5d        run=client
liveness-httpget-pod    1/1       Running   1          2d        <none>
myapp-4kmnr             1/1       Running   0          2m        app=myapp,environment=qa,release=canary
myapp-jbzwq             1/1       Running   0          22h       app=myapp,environment=qa,release=canary
pod-demo                2/2       Running   10         3d        app=myapp,tier=frontend
readiness-httpget-pod   1/1       Running   0          1d        <none>
[root@master manifests]# kubectl label pods pod-demo release=canary
[root@master manifests]# kubectl get pods --show-labels
NAME                    READY     STATUS    RESTARTS   AGE       LABELS
client                  1/1       Running   0          5d        run=client
liveness-httpget-pod    1/1       Running   1          2d        <none>
myapp-jbzwq             1/1       Running   0          22h       app=myapp,environment=qa,release=canary
pod-demo                2/2       Running   10         3d        app=myapp,release=canary,tier=frontend
readiness-httpget-pod   1/1       Running   0          1d        <none>
[root@master manifests]# kubectl delete pods pod-demo
[root@master manifests]# kubectl get pods
NAME                    READY     STATUS    RESTARTS   AGE
client                  1/1       Running   0          5d
liveness-httpget-pod    1/1       Running   1          2d
myapp-7j7sr             1/1       Running   0          4m
myapp-jbzwq             1/1       Running   0          22h
readiness-httpget-pod   1/1       Running   0          1d
[root@master manifests]# kubectl edit rs myapp
# Please edit the object below. Lines beginning with a '#' will be ignored,
# and an empty file will abort the edit. If an error occurs while saving this file will be
# reopened with the relevant failures.
#
apiVersion: extensions/v1beta1
kind: ReplicaSet
metadata:
  creationTimestamp: 2020-05-07T13:43:31Z
  generation: 1
  name: myapp
  namespace: default
  resourceVersion: "246101"
  selfLink: /apis/extensions/v1beta1/namespaces/default/replicasets/myapp
  uid: bd8085ae-9068-11ea-8ad3-000c2967e570
spec:
  replicas: 5    #吧数量从2个改成5个
  selector:
    matchLabels:
      app: myapp
      release: canary
  template:
    metadata:
      creationTimestamp: null
      labels:
        app: myapp
        environment: qa
        release: canary
      name: myapp-pod
    spec:
      containers:
      - image: ikubernetes/myapp:v1
        imagePullPolicy: IfNotPresent
        name: myapp-container
        ports:
        - containerPort: 80
          name: http
          protocol: TCP
        resources: {}
        terminationMessagePath: /dev/termination-log
        terminationMessagePolicy: File
      dnsPolicy: ClusterFirst
      restartPolicy: Always
      schedulerName: default-scheduler
      securityContext: {}
      terminationGracePeriodSeconds: 30
status:
  availableReplicas: 2
  fullyLabeledReplicas: 2
  observedGeneration: 1
  readyReplicas: 2
  replicas: 2
[root@master manifests]# kubectl get pods
NAME                    READY     STATUS    RESTARTS   AGE
client                  1/1       Running   0          5d
liveness-httpget-pod    1/1       Running   1          2d
myapp-7j7sr             1/1       Running   0          21m
myapp-cm645             1/1       Running   0          2m
myapp-fnd2q             1/1       Running   0          2m
myapp-hf52s             1/1       Running   0          2m
myapp-jbzwq             1/1       Running   0          23h
readiness-httpget-pod   1/1       Running   0          2d
[root@master manifests]# kubectl edit rs myapp
# Please edit the object below. Lines beginning with a '#' will be ignored,
# and an empty file will abort the edit. If an error occurs while saving this file will be
# reopened with the relevant failures.
#
apiVersion: extensions/v1beta1
kind: ReplicaSet
metadata:
  creationTimestamp: 2020-05-07T13:43:31Z
  generation: 2
  labels:
    app: myapp
    environment: qa
    release: canary
  name: myapp
  namespace: default
  resourceVersion: "247679"
  selfLink: /apis/extensions/v1beta1/namespaces/default/replicasets/myapp
  uid: bd8085ae-9068-11ea-8ad3-000c2967e570
spec:
  replicas: 5
  selector:
    matchLabels:
      app: myapp
      release: canary
  template:
    metadata:
      creationTimestamp: null
      labels:
        app: myapp
        environment: qa
        release: canary
      name: myapp-pod
    spec:
      containers:
      - image: ikubernetes/myapp:v2    #将镜像从v1改为v2
        imagePullPolicy: IfNotPresent
        name: myapp-container
        ports:
        - containerPort: 80
          name: http
          protocol: TCP
        resources: {}
        terminationMessagePath: /dev/termination-log
        terminationMessagePolicy: File
      dnsPolicy: ClusterFirst
      restartPolicy: Always
      schedulerName: default-scheduler
      securityContext: {}
      terminationGracePeriodSeconds: 30
status:
  availableReplicas: 5
  fullyLabeledReplicas: 5
  observedGeneration: 2
  readyReplicas: 5
  replicas: 5
[root@master manifests]# kubectl get pods -o wide
NAME                    READY     STATUS    RESTARTS   AGE       IP            NODE
client                  1/1       Running   0          6d        10.244.2.3    node02.smoke.com
liveness-httpget-pod    1/1       Running   1          3d        10.244.2.12   node02.smoke.com
myapp-7j7sr             1/1       Running   0          1d        10.244.2.18   node02.smoke.com
myapp-cm645             1/1       Running   0          1d        10.244.1.18   node01.smoke.com
myapp-fnd2q             1/1       Running   0          1d        10.244.2.19   node02.smoke.com
myapp-hf52s             1/1       Running   0          1d        10.244.1.17   node01.smoke.com
myapp-jbzwq             1/1       Running   0          2d        10.244.1.16   node01.smoke.com
readiness-httpget-pod   1/1       Running   0          3d        10.244.2.13   node02.smoke.com
[root@master manifests]# kubectl get rs -o wide
NAME      DESIRED   CURRENT   READY     AGE       CONTAINERS        IMAGES                 SELECTOR
myapp     5         5         5         23h       myapp-container   ikubernetes/myapp:v2   app=myapp,release=canary
[root@master manifests]# curl 10.244.1.17    #还是v1版本,改了控制器pod资源并不会改掉,因为pod资源足额不会被重建的,只有重建的pod资源它的版本才是v2;
Hello MyApp | Version: v1 | <a href="hostname.html">Pod Name</a>
[root@master manifests]# kubectl delete pods myapp-7j7sr
[root@master manifests]# kubectl get pods -o wide
NAME                    READY     STATUS    RESTARTS   AGE       IP            NODE
client                  1/1       Running   0          6d        10.244.2.3    node02.smoke.com
liveness-httpget-pod    1/1       Running   1          3d        10.244.2.12   node02.smoke.com
myapp-6npts             1/1       Running   0          20s       10.244.2.20   node02.smoke.com
myapp-cm645             1/1       Running   0          1d        10.244.1.18   node01.smoke.com
myapp-fnd2q             1/1       Running   0          1d        10.244.2.19   node02.smoke.com
myapp-hf52s             1/1       Running   0          1d        10.244.1.17   node01.smoke.com
myapp-jbzwq             1/1       Running   0          2d        10.244.1.16   node01.smoke.com
readiness-httpget-pod   1/1       Running   0          3d        10.244.2.13   node02.smoke.com
[root@master manifests]# curl 10.244.2.20
Hello MyApp | Version: v2 | <a href="hostname.html">Pod Name</a>
[root@master manifests]# curl 10.244.1.18
Hello MyApp | Version: v1 | <a href="hostname.html">Pod Name</a>
[root@master manifests]# kubectl explain deploy
[root@master manifests]# kubectl explain deploy.spec
[root@master manifests]# kubectl explain deploy.spec.strategy
[root@master manifests]# kubectl explain deploy.spec.strategy.rollingUpdate
[root@master manifests]#  kubectl explain deploy.spec.template
[root@master manifests]# kubectl get rs
NAME      DESIRED   CURRENT   READY     AGE
myapp     5         5         5         2d
[root@master manifests]# kubectl delete rs myapp
[root@master manifests]# vim deploy-demo.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: myapp-deploy
  namespace: default
spec:
  replicas: 2
  selector:
    matchLabels:
      app: myapp
      release: canary
  template:
    metadata:
      labels:
        app: myapp
        release: canary
    spec:
      containers:
      - name: myapp
        image: ikubernetes/myapp:v1
        ports:
        - name: http
          containerPort: 80
[root@master manifests]# kubectl apply -f deploy-demo.yaml    #apply即可以创建也可以更新
[root@master manifests]# kubectl get deploy
NAME           DESIRED   CURRENT   UP-TO-DATE   AVAILABLE   AGE
myapp-deploy   2         2         2            2           9m
[root@master manifests]# kubectl get rs
NAME                      DESIRED   CURRENT   READY     AGE
myapp-deploy-69b47bc96d   2         2         2         10m
[root@master manifests]# kubectl get pods
NAME                            READY     STATUS    RESTARTS   AGE
client                          1/1       Running   0          7d
liveness-httpget-pod            1/1       Running   2          4d
myapp-deploy-69b47bc96d-sftgw   1/1       Running   0          11m
myapp-deploy-69b47bc96d-zpzwn   1/1       Running   0          11m
readiness-httpget-pod           1/1       Running   0          4d
[root@master manifests]# vim deploy-demo.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: myapp-deploy
  namespace: default
spec:
  replicas: 3    #把副本数改为3个
  selector:
    matchLabels:
      app: myapp
      release: canary
  template:
    metadata:
      labels:
        app: myapp
        release: canary
    spec:
      containers:
      - name: myapp
        image: ikubernetes/myapp:v1
        ports:
        - name: http
          containerPort: 80
[root@master manifests]# kubectl apply -f deploy-demo.yaml
[root@master manifests]# kubectl get pods
NAME                            READY     STATUS    RESTARTS   AGE
client                          1/1       Running   0          7d
liveness-httpget-pod            1/1       Running   2          4d
myapp-deploy-69b47bc96d-rhnsw   1/1       Running   0          54s
myapp-deploy-69b47bc96d-sftgw   1/1       Running   0          20m
myapp-deploy-69b47bc96d-zpzwn   1/1       Running   0          20m
readiness-httpget-pod           1/1       Running   0          4d
[root@master manifests]# kubectl describe deploy myapp-deploy
Name:                   myapp-deploy
Namespace:              default
CreationTimestamp:      Sun, 10 May 2020 21:19:34 +0800
Labels:                 <none>
Annotations:            deployment.kubernetes.io/revision=1
                        kubectl.kubernetes.io/last-applied-configuration={"apiVersion":"apps/v1","kind":"Deployment","metadata":{"annotations"
:{},"name":"myapp-deploy","namespace":"default"},"spec":{"replicas":3,"selector":{...
Selector:               app=myapp,release=canary
Replicas:               3 desired | 3 updated | 3 total | 3 available | 0 unavailable
StrategyType:           RollingUpdate
MinReadySeconds:        0
RollingUpdateStrategy:  25% max unavailable, 25% max surge
Pod Template:
  Labels:  app=myapp
           release=canary
  Containers:
   myapp:
    Image:        ikubernetes/myapp:v1
    Port:         80/TCP
    Host Port:    0/TCP
    Environment:  <none>
    Mounts:       <none>
  Volumes:        <none>
Conditions:
  Type           Status  Reason
  ----           ------  ------
  Progressing    True    NewReplicaSetAvailable
  Available      True    MinimumReplicasAvailable
OldReplicaSets:  <none>
NewReplicaSet:   myapp-deploy-69b47bc96d (3/3 replicas created)
Events:
  Type    Reason             Age   From                   Message
  ----    ------             ----  ----                   -------
  Normal  ScalingReplicaSet  21m   deployment-controller  Scaled up replica set myapp-deploy-69b47bc96d to 2
  Normal  ScalingReplicaSet  1m    deployment-controller  Scaled up replica set myapp-deploy-69b47bc96d to 3
[root@master manifests]# kubectl get pods -w
NAME                            READY     STATUS    RESTARTS   AGE
client                          1/1       Running   0          7d
liveness-httpget-pod            1/1       Running   2          4d
myapp-deploy-69b47bc96d-rhnsw   1/1       Running   0          3m
myapp-deploy-69b47bc96d-sftgw   1/1       Running   0          22m
myapp-deploy-69b47bc96d-zpzwn   1/1       Running   0          22m
readiness-httpget-pod           1/1       Running   0          4d
[root@master manifests]# kubectl get pods -l app=myapp -w
NAME                            READY     STATUS    RESTARTS   AGE
myapp-deploy-69b47bc96d-rhnsw   1/1       Running   0          3m
myapp-deploy-69b47bc96d-sftgw   1/1       Running   0          23m
myapp-deploy-69b47bc96d-zpzwn   1/1       Running   0          23m
[root@master manifests]# vim deploy-demo.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: myapp-deploy
  namespace: default
spec:
  replicas: 3
  selector:
    matchLabels:
      app: myapp
      release: canary
  template:
    metadata:
      labels:
        app: myapp
        release: canary
    spec:
      containers:
      - name: myapp
        image: ikubernetes/myapp:v2    #将版本从v1改为v2
        ports:
        - name: http
          containerPort: 80
[root@master manifests]# kubectl apply -f deploy-demo.yaml
[root@master manifests]# kubectl get pods -l app=myapp -w
NAME                            READY     STATUS    RESTARTS   AGE
myapp-deploy-69b47bc96d-rhnsw   1/1       Running   0          3m
myapp-deploy-69b47bc96d-sftgw   1/1       Running   0          23m
myapp-deploy-69b47bc96d-zpzwn   1/1       Running   0          23m
myapp-deploy-67f6f6b4dc-8ngsn   0/1       Pending   0         0s
myapp-deploy-67f6f6b4dc-8ngsn   0/1       Pending   0         0s
myapp-deploy-67f6f6b4dc-8ngsn   0/1       ContainerCreating   0         0s
myapp-deploy-67f6f6b4dc-8ngsn   1/1       Running   0         2s
myapp-deploy-69b47bc96d-rhnsw   1/1       Terminating   0         6m
myapp-deploy-67f6f6b4dc-plxkk   0/1       Pending   0         0s
myapp-deploy-67f6f6b4dc-plxkk   0/1       Pending   0         0s
myapp-deploy-67f6f6b4dc-plxkk   0/1       ContainerCreating   0         0s
myapp-deploy-67f6f6b4dc-plxkk   1/1       Running   0         2s
myapp-deploy-69b47bc96d-sftgw   1/1       Terminating   0         26m
myapp-deploy-69b47bc96d-rhnsw   0/1       Terminating   0         6m
myapp-deploy-67f6f6b4dc-dk9r4   0/1       Pending   0         0s
myapp-deploy-67f6f6b4dc-dk9r4   0/1       Pending   0         0s
myapp-deploy-67f6f6b4dc-dk9r4   0/1       ContainerCreating   0         0s
myapp-deploy-69b47bc96d-sftgw   0/1       Terminating   0         26m
myapp-deploy-67f6f6b4dc-dk9r4   1/1       Running   0         3s
myapp-deploy-69b47bc96d-zpzwn   1/1       Terminating   0         26m
myapp-deploy-69b47bc96d-zpzwn   0/1       Terminating   0         26m
myapp-deploy-69b47bc96d-sftgw   0/1       Terminating   0         26m
myapp-deploy-69b47bc96d-sftgw   0/1       Terminating   0         26m
myapp-deploy-69b47bc96d-rhnsw   0/1       Terminating   0         6m
myapp-deploy-69b47bc96d-rhnsw   0/1       Terminating   0         6m
myapp-deploy-69b47bc96d-zpzwn   0/1       Terminating   0         26m
myapp-deploy-69b47bc96d-zpzwn   0/1       Terminating   0         26m
[root@master manifests]# kubectl get rs
NAME                      DESIRED   CURRENT   READY     AGE
myapp-deploy-67f6f6b4dc   3         3         3         1m
myapp-deploy-69b47bc96d   0         0         0         27m
[root@master manifests]# kubectl get rs -o wide
NAME                      DESIRED   CURRENT   READY     AGE       CONTAINERS   IMAGES                 SELECTOR
myapp-deploy-67f6f6b4dc   3         3         3         1m        myapp        ikubernetes/myapp:v2   app=myapp,pod-template-hash=2392926087,
release=canary
myapp-deploy-69b47bc96d   0         0         0         27m       myapp        ikubernetes/myapp:v1   app=myapp,pod-template-hash=2560367528,
release=canary
[root@master manifests]# kubectl rollout --help
[root@master manifests]# kubectl rollout history --help
[root@master manifests]# kubectl rollout history deployment myapp-deploy    #查看滚动历史
deployments "myapp-deploy"
REVISION  CHANGE-CAUSE
1         <none>
2         <none>
[root@master manifests]# kubectl patch --help
[root@master manifests]# kubectl patch deployment myapp-deploy -p '{"spec":{"replicas":5}}'    #打补丁,修改replicas副本数为5个
[root@master manifests]# kubectl get pods
NAME                            READY     STATUS    RESTARTS   AGE
client                          1/1       Running   0          7d
liveness-httpget-pod            1/1       Running   2          4d
myapp-deploy-67f6f6b4dc-8ngsn   1/1       Running   0          7m
myapp-deploy-67f6f6b4dc-dk9r4   1/1       Running   0          7m
myapp-deploy-67f6f6b4dc-plxkk   1/1       Running   0          7m
myapp-deploy-67f6f6b4dc-sjmzl   1/1       Running   0          13s
myapp-deploy-67f6f6b4dc-wds5j   1/1       Running   0          13s
readiness-httpget-pod           1/1       Running   0          4d
[root@master manifests]# kubectl rollout --help
[root@master manifests]# kubectl rollout pause --help
[root@master manifests]# kubectl explain deploy.spec.strategy
[root@master manifests]# kubectl explain deploy.spec.strategy.rollingUpdate
[root@master manifests]# kubectl patch deployment myapp-deploy -p '{"spec":{"strategy":{"rollingUpdate":{"maxSurge":1,"maxUnavailable":0}}}}'
[root@master manifests]# kubectl describe deployment myapp-deploy
Name:                   myapp-deploy
Namespace:              default
CreationTimestamp:      Sun, 10 May 2020 21:19:34 +0800
Labels:                 app=myapp
                        release=canary
Annotations:            deployment.kubernetes.io/revision=2
                        kubectl.kubernetes.io/last-applied-configuration={"apiVersion":"apps/v1","kind":"Deployment","metadata":{"annotations"
:{},"name":"myapp-deploy","namespace":"default"},"spec":{"replicas":3,"selector":{...
Selector:               app=myapp,release=canary
Replicas:               5 desired | 5 updated | 5 total | 5 available | 0 unavailable
StrategyType:           RollingUpdate
MinReadySeconds:        0
RollingUpdateStrategy:  0 max unavailable, 1 max surge
Pod Template:
  Labels:  app=myapp
           release=canary
  Containers:
   myapp:
    Image:        ikubernetes/myapp:v2
    Port:         80/TCP
    Host Port:    0/TCP
    Environment:  <none>
    Mounts:       <none>
  Volumes:        <none>
Conditions:
  Type           Status  Reason
  ----           ------  ------
  Progressing    True    NewReplicaSetAvailable
  Available      True    MinimumReplicasAvailable
OldReplicaSets:  <none>
NewReplicaSet:   myapp-deploy-67f6f6b4dc (5/5 replicas created)
Events:          <none>
[root@master manifests]# kubectl set image --help
[root@master manifests]# kubectl get pods -l app=myapp -w
NAME                            READY     STATUS    RESTARTS   AGE
myapp-deploy-67f6f6b4dc-8ngsn   1/1       Running   0          23h
myapp-deploy-67f6f6b4dc-dk9r4   1/1       Running   0          23h
myapp-deploy-67f6f6b4dc-plxkk   1/1       Running   0          23h
myapp-deploy-67f6f6b4dc-sjmzl   1/1       Running   0          23h
myapp-deploy-67f6f6b4dc-wds5j   1/1       Running   0          23h
[root@master manifests]# kubectl set image deployment myapp-deploy myapp=ikubernetes/myapp:v3 && kubectl rollout pause deployment myapp-deploy
    #更新后暂停
[root@master manifests]# kubectl get pods -l app=myapp -w    #创建完成后暂停
NAME                            READY     STATUS    RESTARTS   AGE
myapp-deploy-67f6f6b4dc-8ngsn   1/1       Running   0          23h
myapp-deploy-67f6f6b4dc-dk9r4   1/1       Running   0          23h
myapp-deploy-67f6f6b4dc-plxkk   1/1       Running   0          23h
myapp-deploy-67f6f6b4dc-sjmzl   1/1       Running   0          23h
myapp-deploy-67f6f6b4dc-wds5j   1/1       Running   0          23h
myapp-deploy-6bdcd6755d-ftjst   0/1       Pending   0         0s
myapp-deploy-6bdcd6755d-ftjst   0/1       Pending   0         1s
myapp-deploy-6bdcd6755d-ftjst   0/1       ContainerCreating   0         1s
myapp-deploy-6bdcd6755d-ftjst   1/1       Running   0         7s
[root@master manifests]# kubectl rollout status deployment myapp-deploy    #查看更新过程
Waiting for deployment "myapp-deploy" rollout to finish: 1 out of 5 new replicas have been updated...
[root@master manifests]# kubectl rollout --help
[root@master manifests]# kubectl rollout resume deployment myapp-deploy    #继续更新
[root@master manifests]# kubectl get pods -l app=myapp -w
NAME                            READY     STATUS    RESTARTS   AGE
myapp-deploy-67f6f6b4dc-8ngsn   1/1       Running   0          23h
myapp-deploy-67f6f6b4dc-dk9r4   1/1       Running   0          23h
myapp-deploy-67f6f6b4dc-plxkk   1/1       Running   0          23h
myapp-deploy-67f6f6b4dc-sjmzl   1/1       Running   0          23h
myapp-deploy-67f6f6b4dc-wds5j   1/1       Running   0          23h
myapp-deploy-6bdcd6755d-ftjst   0/1       Pending   0         0s
myapp-deploy-6bdcd6755d-ftjst   0/1       Pending   0         1s
myapp-deploy-6bdcd6755d-ftjst   0/1       ContainerCreating   0         1s
myapp-deploy-6bdcd6755d-ftjst   1/1       Running   0         7s
myapp-deploy-67f6f6b4dc-sjmzl   1/1       Terminating   0         23h
myapp-deploy-6bdcd6755d-pp6mz   0/1       Pending   0         0s
myapp-deploy-6bdcd6755d-pp6mz   0/1       Pending   0         0s
myapp-deploy-6bdcd6755d-pp6mz   0/1       ContainerCreating   0         0s
myapp-deploy-67f6f6b4dc-sjmzl   0/1       Terminating   0         23h
myapp-deploy-67f6f6b4dc-sjmzl   0/1       Terminating   0         23h
myapp-deploy-67f6f6b4dc-sjmzl   0/1       Terminating   0         23h
myapp-deploy-67f6f6b4dc-sjmzl   0/1       Terminating   0         23h
myapp-deploy-6bdcd6755d-pp6mz   1/1       Running   0         8s
myapp-deploy-67f6f6b4dc-wds5j   1/1       Terminating   0         23h
myapp-deploy-6bdcd6755d-qcv5p   0/1       Pending   0         1s
myapp-deploy-6bdcd6755d-qcv5p   0/1       Pending   0         1s
myapp-deploy-6bdcd6755d-qcv5p   0/1       ContainerCreating   0         1s
myapp-deploy-67f6f6b4dc-wds5j   0/1       Terminating   0         23h
myapp-deploy-6bdcd6755d-qcv5p   1/1       Running   0         4s
myapp-deploy-67f6f6b4dc-dk9r4   1/1       Terminating   0         23h
myapp-deploy-6bdcd6755d-z2w6c   0/1       Pending   0         0s
myapp-deploy-6bdcd6755d-z2w6c   0/1       Pending   0         0s
myapp-deploy-67f6f6b4dc-wds5j   0/1       Terminating   0         23h
myapp-deploy-67f6f6b4dc-wds5j   0/1       Terminating   0         23h
myapp-deploy-6bdcd6755d-z2w6c   0/1       ContainerCreating   0         1s
myapp-deploy-67f6f6b4dc-dk9r4   0/1       Terminating   0         23h
myapp-deploy-67f6f6b4dc-dk9r4   0/1       Terminating   0         23h
myapp-deploy-6bdcd6755d-z2w6c   1/1       Running   0         6s
myapp-deploy-67f6f6b4dc-plxkk   1/1       Terminating   0         23h
myapp-deploy-6bdcd6755d-9p2x5   0/1       Pending   0         0s
myapp-deploy-6bdcd6755d-9p2x5   0/1       Pending   0         0s
myapp-deploy-6bdcd6755d-9p2x5   0/1       ContainerCreating   0         0s
myapp-deploy-67f6f6b4dc-plxkk   0/1       Terminating   0         23h
myapp-deploy-67f6f6b4dc-plxkk   0/1       Terminating   0         23h
myapp-deploy-6bdcd6755d-9p2x5   1/1       Running   0         6s
myapp-deploy-67f6f6b4dc-8ngsn   1/1       Terminating   0         23h
myapp-deploy-67f6f6b4dc-dk9r4   0/1       Terminating   0         23h
myapp-deploy-67f6f6b4dc-dk9r4   0/1       Terminating   0         23h
myapp-deploy-67f6f6b4dc-8ngsn   0/1       Terminating   0         23h
[root@master manifests]# kubectl rollout status deployment myapp-deploy
Waiting for deployment "myapp-deploy" rollout to finish: 1 out of 5 new replicas have been updated...
Waiting for deployment spec update to be observed...
Waiting for deployment spec update to be observed...
Waiting for deployment "myapp-deploy" rollout to finish: 1 out of 5 new replicas have been updated...
Waiting for deployment "myapp-deploy" rollout to finish: 1 out of 5 new replicas have been updated...
Waiting for deployment "myapp-deploy" rollout to finish: 2 out of 5 new replicas have been updated...
Waiting for deployment "myapp-deploy" rollout to finish: 2 out of 5 new replicas have been updated...
Waiting for deployment "myapp-deploy" rollout to finish: 2 out of 5 new replicas have been updated...
Waiting for deployment "myapp-deploy" rollout to finish: 3 out of 5 new replicas have been updated...
Waiting for deployment "myapp-deploy" rollout to finish: 3 out of 5 new replicas have been updated...
Waiting for deployment "myapp-deploy" rollout to finish: 3 out of 5 new replicas have been updated...
Waiting for deployment "myapp-deploy" rollout to finish: 4 out of 5 new replicas have been updated...
Waiting for deployment "myapp-deploy" rollout to finish: 4 out of 5 new replicas have been updated...
Waiting for deployment "myapp-deploy" rollout to finish: 4 out of 5 new replicas have been updated...
Waiting for deployment "myapp-deploy" rollout to finish: 1 old replicas are pending termination...
Waiting for deployment "myapp-deploy" rollout to finish: 1 old replicas are pending termination...
deployment "myapp-deploy" successfully rolled out
[root@master manifests]# kubectl get rs -o wide
NAME                      DESIRED   CURRENT   READY     AGE       CONTAINERS   IMAGES                 SELECTOR
myapp-deploy-67f6f6b4dc   0         0         0         23h       myapp        ikubernetes/myapp:v2   app=myapp,pod-template-hash=2392926087,
release=canary
myapp-deploy-69b47bc96d   0         0         0         23h       myapp        ikubernetes/myapp:v1   app=myapp,pod-template-hash=2560367528,
release=canary
myapp-deploy-6bdcd6755d   5         5         5         11m       myapp        ikubernetes/myapp:v3   app=myapp,pod-template-hash=2687823118,
release=canary
[root@master manifests]# kubectl rollout undo --help
[root@master manifests]# kubectl rollout history deployment myapp-deploy
deployments "myapp-deploy"    #查看当前版本
REVISION  CHANGE-CAUSE
1         <none>
2         <none>
3         <none>
[root@master manifests]# kubectl rollout undo deployment myapp-deploy --to-revision=1    #回滚到第一个版本
[root@master ~]# kubectl get pods -l app=myapp -w
NAME                            READY     STATUS    RESTARTS   AGE
myapp-deploy-67f6f6b4dc-8ngsn   1/1       Running   0          23h
myapp-deploy-67f6f6b4dc-dk9r4   1/1       Running   0          23h
myapp-deploy-67f6f6b4dc-plxkk   1/1       Running   0          23h
myapp-deploy-67f6f6b4dc-sjmzl   1/1       Running   0          23h
myapp-deploy-67f6f6b4dc-wds5j   1/1       Running   0          23h
myapp-deploy-6bdcd6755d-ftjst   0/1       Pending   0         0s
myapp-deploy-6bdcd6755d-ftjst   0/1       Pending   0         1s
myapp-deploy-6bdcd6755d-ftjst   0/1       ContainerCreating   0         1s
myapp-deploy-6bdcd6755d-ftjst   1/1       Running   0         7s
myapp-deploy-67f6f6b4dc-sjmzl   1/1       Terminating   0         23h
myapp-deploy-6bdcd6755d-pp6mz   0/1       Pending   0         0s
myapp-deploy-6bdcd6755d-pp6mz   0/1       Pending   0         0s
myapp-deploy-6bdcd6755d-pp6mz   0/1       ContainerCreating   0         0s
myapp-deploy-67f6f6b4dc-sjmzl   0/1       Terminating   0         23h
myapp-deploy-67f6f6b4dc-sjmzl   0/1       Terminating   0         23h
myapp-deploy-67f6f6b4dc-sjmzl   0/1       Terminating   0         23h
myapp-deploy-67f6f6b4dc-sjmzl   0/1       Terminating   0         23h
myapp-deploy-6bdcd6755d-pp6mz   1/1       Running   0         8s
myapp-deploy-67f6f6b4dc-wds5j   1/1       Terminating   0         23h
myapp-deploy-6bdcd6755d-qcv5p   0/1       Pending   0         1s
myapp-deploy-6bdcd6755d-qcv5p   0/1       Pending   0         1s
myapp-deploy-6bdcd6755d-qcv5p   0/1       ContainerCreating   0         1s
myapp-deploy-67f6f6b4dc-wds5j   0/1       Terminating   0         23h
myapp-deploy-6bdcd6755d-qcv5p   1/1       Running   0         4s
myapp-deploy-67f6f6b4dc-dk9r4   1/1       Terminating   0         23h
myapp-deploy-6bdcd6755d-z2w6c   0/1       Pending   0         0s
myapp-deploy-6bdcd6755d-z2w6c   0/1       Pending   0         0s
myapp-deploy-67f6f6b4dc-wds5j   0/1       Terminating   0         23h
myapp-deploy-67f6f6b4dc-wds5j   0/1       Terminating   0         23h
myapp-deploy-6bdcd6755d-z2w6c   0/1       ContainerCreating   0         1s
myapp-deploy-67f6f6b4dc-dk9r4   0/1       Terminating   0         23h
myapp-deploy-67f6f6b4dc-dk9r4   0/1       Terminating   0         23h
myapp-deploy-6bdcd6755d-z2w6c   1/1       Running   0         6s
myapp-deploy-67f6f6b4dc-plxkk   1/1       Terminating   0         23h
myapp-deploy-6bdcd6755d-9p2x5   0/1       Pending   0         0s
myapp-deploy-6bdcd6755d-9p2x5   0/1       Pending   0         0s
myapp-deploy-6bdcd6755d-9p2x5   0/1       ContainerCreating   0         0s
myapp-deploy-67f6f6b4dc-plxkk   0/1       Terminating   0         23h
myapp-deploy-67f6f6b4dc-plxkk   0/1       Terminating   0         23h
myapp-deploy-6bdcd6755d-9p2x5   1/1       Running   0         6s
myapp-deploy-67f6f6b4dc-8ngsn   1/1       Terminating   0         23h
myapp-deploy-67f6f6b4dc-dk9r4   0/1       Terminating   0         23h
myapp-deploy-67f6f6b4dc-dk9r4   0/1       Terminating   0         23h
myapp-deploy-67f6f6b4dc-8ngsn   0/1       Terminating   0         23h
myapp-deploy-67f6f6b4dc-8ngsn   0/1       Terminating   0         23h
myapp-deploy-67f6f6b4dc-8ngsn   0/1       Terminating   0         23h
myapp-deploy-67f6f6b4dc-plxkk   0/1       Terminating   0         23h
myapp-deploy-67f6f6b4dc-plxkk   0/1       Terminating   0         23h
myapp-deploy-69b47bc96d-vkksx   0/1       Pending   0         0s
myapp-deploy-69b47bc96d-vkksx   0/1       Pending   0         0s
myapp-deploy-69b47bc96d-vkksx   0/1       ContainerCreating   0         0s
myapp-deploy-69b47bc96d-vkksx   1/1       Running   0         2s
myapp-deploy-6bdcd6755d-9p2x5   1/1       Terminating   0         6m
myapp-deploy-69b47bc96d-p9b98   0/1       Pending   0         0s
myapp-deploy-69b47bc96d-p9b98   0/1       Pending   0         0s
myapp-deploy-69b47bc96d-p9b98   0/1       ContainerCreating   0         0s
myapp-deploy-6bdcd6755d-9p2x5   0/1       Terminating   0         6m
myapp-deploy-69b47bc96d-p9b98   1/1       Running   0         3s
myapp-deploy-6bdcd6755d-z2w6c   1/1       Terminating   0         6m
myapp-deploy-69b47bc96d-mxlpg   0/1       Pending   0         0s
myapp-deploy-69b47bc96d-mxlpg   0/1       Pending   0         0s
myapp-deploy-69b47bc96d-mxlpg   0/1       ContainerCreating   0         2s
myapp-deploy-6bdcd6755d-z2w6c   0/1       Terminating   0         7m
myapp-deploy-6bdcd6755d-z2w6c   0/1       Terminating   0         7m
myapp-deploy-6bdcd6755d-z2w6c   0/1       Terminating   0         7m
myapp-deploy-69b47bc96d-mxlpg   1/1       Running   0         8s
myapp-deploy-6bdcd6755d-qcv5p   1/1       Terminating   0         7m
myapp-deploy-6bdcd6755d-9p2x5   0/1       Terminating   0         7m
myapp-deploy-69b47bc96d-9xq44   0/1       Pending   0         0s
myapp-deploy-69b47bc96d-9xq44   0/1       Pending   0         1s
myapp-deploy-6bdcd6755d-9p2x5   0/1       Terminating   0         7m
myapp-deploy-69b47bc96d-9xq44   0/1       ContainerCreating   0         1s
myapp-deploy-6bdcd6755d-qcv5p   0/1       Terminating   0         7m
myapp-deploy-6bdcd6755d-qcv5p   0/1       Terminating   0         7m
myapp-deploy-6bdcd6755d-qcv5p   0/1       Terminating   0         7m
myapp-deploy-69b47bc96d-9xq44   1/1       Running   0         4s
myapp-deploy-6bdcd6755d-pp6mz   1/1       Terminating   0         7m
myapp-deploy-69b47bc96d-v4sdf   0/1       Pending   0         0s
myapp-deploy-69b47bc96d-v4sdf   0/1       Pending   0         1s
myapp-deploy-69b47bc96d-v4sdf   0/1       ContainerCreating   0         2s
myapp-deploy-6bdcd6755d-pp6mz   0/1       Terminating   0         7m
myapp-deploy-69b47bc96d-v4sdf   1/1       Running   0         4s
myapp-deploy-6bdcd6755d-pp6mz   0/1       Terminating   0         7m
myapp-deploy-6bdcd6755d-pp6mz   0/1       Terminating   0         7m
myapp-deploy-6bdcd6755d-ftjst   1/1       Terminating   0         16m
myapp-deploy-6bdcd6755d-ftjst   0/1       Terminating   0         16m
myapp-deploy-6bdcd6755d-ftjst   0/1       Terminating   0         16m
myapp-deploy-6bdcd6755d-ftjst   0/1       Terminating   0         16m
[root@master manifests]# kubectl rollout history deployment myapp-deploy
deployments "myapp-deploy"
REVISION  CHANGE-CAUSE
2         <none>
3         <none>
4         <none>
[root@master manifests]# kubectl get rs -o wide    #当前工作的是v1版本
NAME                      DESIRED   CURRENT   READY     AGE       CONTAINERS   IMAGES                 SELECTOR
myapp-deploy-67f6f6b4dc   0         0         0         23h       myapp        ikubernetes/myapp:v2   app=myapp,pod-template-hash=2392926087,
release=canary
myapp-deploy-69b47bc96d   5         5         5         1d        myapp        ikubernetes/myapp:v1   app=myapp,pod-template-hash=2560367528,
release=canary
myapp-deploy-6bdcd6755d   0         0         0         20m       myapp        ikubernetes/myapp:v3   app=myapp,pod-template-hash=2687823118,
release=canary
[root@master manifests]# kubectl explain ds
[root@master manifests]# kubectl explain ds.spec
[root@master manifests]# cp deploy-demo.yaml ds-demo.yaml

node01:

[root@node01 ~]# docker pull ikubernetes/filebeat:5.6.5-alpine

node02:

[root@node02 ~]# docker pull ikubernetes/filebeat:5.6.5-alpine

node01:

[root@node01 ~]# docker image inspect ikubernetes/filebeat:5.6.5-alpine

master:

[root@master manifests]# kubectl explain pods.spec.containers
[root@master manifests]# kubectl explain pods.spec.containers.env
[root@master manifests]# vim ds-demo.yaml
apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: myapp-ds
  namespace: default
spec:
  selector:
    matchLabels:
      app: filebeat
      release: stable
  template:
    metadata:
      labels:
        app: filebeat
        release: stable
    spec:
      containers:
      - name: filebeat
        image: ikubernetes/filebeat:5.6.5-alpine
        env:
        - name: REDIS_HOST
          value: redis.default.svc.cluster.local
        - name: REDIS_LOG_LEVEL
          value: info
[root@master manifests]# kubectl apply -f ds-demo.yaml
[root@master manifests]# kubectl get pods
NAME                            READY     STATUS    RESTARTS   AGE
client                          1/1       Running   0          8d
liveness-httpget-pod            1/1       Running   2          5d
myapp-deploy-69b47bc96d-9xq44   1/1       Running   0          37m
myapp-deploy-69b47bc96d-mxlpg   1/1       Running   0          38m
myapp-deploy-69b47bc96d-p9b98   1/1       Running   0          38m
myapp-deploy-69b47bc96d-v4sdf   1/1       Running   0          37m
myapp-deploy-69b47bc96d-vkksx   1/1       Running   0          38m
myapp-ds-c29sl                  1/1       Running   0          34s
myapp-ds-lmx8k                  1/1       Running   0          34s
readiness-httpget-pod           1/1       Running   0          5d
[root@master manifests]# kubectl logs myapp-ds-c29sl
[root@master manifests]# kubectl get pods    #myapp-ds有两个,因为有两个node,不会放到master上面,因为有污点
NAME                            READY     STATUS    RESTARTS   AGE
client                          1/1       Running   0          8d
liveness-httpget-pod            1/1       Running   2          5d
myapp-deploy-69b47bc96d-9xq44   1/1       Running   0          39m
myapp-deploy-69b47bc96d-mxlpg   1/1       Running   0          39m
myapp-deploy-69b47bc96d-p9b98   1/1       Running   0          39m
myapp-deploy-69b47bc96d-v4sdf   1/1       Running   0          39m
myapp-deploy-69b47bc96d-vkksx   1/1       Running   0          39m
myapp-ds-c29sl                  1/1       Running   0          1m
myapp-ds-lmx8k                  1/1       Running   0          1m
readiness-httpget-pod           1/1       Running   0          5d
[root@master manifests]# vim ds-demo.yaml    #定义两个资源
apiVersion: apps/v1
kind: Deployment
metadata:
  name: redis
  namespace: default
spec:
  replicas: 1
  selector:
    matchLabels:
      app: redis
      role: logstor
  template:
    metadata:
      labels:
        app: redis
        role: logstor
    spec:
      containers:
      - name: redis
        image: redis:4.0-alpine
        ports:
        - name: redis
          containerPort: 6379
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: myapp-ds
  namespace: default
spec:
  selector:
    matchLabels:
      app: filebeat
      release: stable
  template:
    metadata:
      labels:
        app: filebeat
        release: stable
    spec:
      containers:
      - name: filebeat
        image: ikubernetes/filebeat:5.6.5-alpine
        env:
        - name: REDIS_HOST
          value: redis.default.svc.cluster.local
        - name: REDIS_LOG_LEVEL
          value: info
[root@master manifests]# kubectl delete -f ds-demo.yaml
[root@master manifests]# vim vim ds-demo.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: redis
  namespace: default
spec:
  replicas: 1
  selector:
    matchLabels:
      app: redis
      role: logstor
  template:
    metadata:
      labels:
        app: redis
        role: logstor
    spec:
      containers:
      - name: redis
        image: redis:4.0-alpine
        ports:
        - name: redis
          containerPort: 6379
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: filebeat-ds
  namespace: default
spec:
  selector:
    matchLabels:
      app: filebeat
      release: stable
  template:
    metadata:
      labels:
        app: filebeat
        release: stable
    spec:
      containers:
      - name: filebeat
        image: ikubernetes/filebeat:5.6.5-alpine
        env:
        - name: REDIS_HOST
          value: redis.default.svc.cluster.local
        - name: REDIS_LOG_LEVEL
          value: info
[root@master manifests]# kubectl apply -f ds-demo.yaml
[root@master manifests]# kubectl expose deployment redis --port=6379
[root@master manifests]# kubectl get svc
NAME         TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)        AGE
kubernetes   ClusterIP   10.96.0.1        <none>        443/TCP        15d
myapp        NodePort    10.100.224.224   <none>        80:30308/TCP   9d
nginx        ClusterIP   10.101.88.185    <none>        80/TCP         9d
redis        ClusterIP   10.106.58.175    <none>        6379/TCP       1m
[root@master manifests]# kubectl get pods
NAME                            READY     STATUS    RESTARTS   AGE
client                          0/1       Error     0          10d
filebeat-ds-gwqhd               1/1       Running   2          1d
filebeat-ds-jc6cs               1/1       Running   1          1d
liveness-httpget-pod            1/1       Running   3          7d
myapp-deploy-69b47bc96d-9xq44   1/1       Running   1          2d
myapp-deploy-69b47bc96d-mxlpg   1/1       Running   1          2d
myapp-deploy-69b47bc96d-p9b98   1/1       Running   1          2d
myapp-deploy-69b47bc96d-v4sdf   1/1       Running   1          2d
myapp-deploy-69b47bc96d-vkksx   1/1       Running   1          2d
readiness-httpget-pod           1/1       Running   1          7d
redis-5b5d6fbbbd-9m5p5          1/1       Running   1          1d
[root@master manifests]# kubectl exec -it redis-5b5d6fbbbd-9m5p5 -- /bin/sh
/data # netstat -tnl
Active Internet connections (only servers)
Proto Recv-Q Send-Q Local Address           Foreign Address         State
tcp        0      0 0.0.0.0:6379            0.0.0.0:*               LISTEN
tcp        0      0 :::6379                 :::*                    LISTEN
/data # ls
/data # nslookup redis.default.svc.cluster.local
Server:         10.96.0.10
Address:        10.96.0.10:53

Name:   redis.default.svc.cluster.local
Address: 10.106.58.175
/data # redis-cli -h redis.default.svc.cluster.local
redis.default.svc.cluster.local:6379> keys *
(empty list or set)
redis.default.svc.cluster.local:6379> exit
/data # exit
[root@master manifests]# kubectl get pods
NAME                            READY     STATUS    RESTARTS   AGE
client                          0/1       Error     0          11d
filebeat-ds-gwqhd               1/1       Running   2          1d
filebeat-ds-jc6cs               1/1       Running   1          1d
liveness-httpget-pod            1/1       Running   3          8d
myapp-deploy-69b47bc96d-9xq44   1/1       Running   1          2d
myapp-deploy-69b47bc96d-mxlpg   1/1       Running   1          2d
myapp-deploy-69b47bc96d-p9b98   1/1       Running   1          2d
myapp-deploy-69b47bc96d-v4sdf   1/1       Running   1          2d
myapp-deploy-69b47bc96d-vkksx   1/1       Running   1          2d
readiness-httpget-pod           1/1       Running   1          7d
redis-5b5d6fbbbd-9m5p5          1/1       Running   1          1d
[root@master manifests]# kubectl exec -it filebeat-ds-gwqhd -- /bin/sh
/ # ps aux
PID   USER     TIME   COMMAND
    1 root       0:17 /usr/local/bin/filebeat -e -c /etc/filebeat/filebeat.yml
   15 root       0:00 /bin/sh
   22 root       0:00 ps aux
/ # cat /etc/filebeat/filebeat.yml
filebeat.registry_file: /var/log/containers/filebeat_registry
filebeat.idle_timeout: 5s
filebeat.spool_size: 2048

logging.level: info

filebeat.prospectors:
- input_type: log
  paths:
    - "/var/log/containers/*.log"
    - "/var/log/docker/containers/*.log"
    - "/var/log/startupscript.log"
    - "/var/log/kubelet.log"
    - "/var/log/kube-proxy.log"
    - "/var/log/kube-apiserver.log"
    - "/var/log/kube-controller-manager.log"
    - "/var/log/kube-scheduler.log"
    - "/var/log/rescheduler.log"
    - "/var/log/glbc.log"
    - "/var/log/cluster-autoscaler.log"
  symlinks: true
  json.message_key: log
  json.keys_under_root: true
  json.add_error_key: true
  multiline.pattern: '^\s'
  multiline.match: after
  document_type: kube-logs
  tail_files: true
  fields_under_root: true

output.redis:
  hosts: ${REDIS_HOST:?No Redis host configured. Use env var REDIS_HOST to set host.}
  key: "filebeat"
/ # printenv    #查看REDIS_HOST是定义的redis服务器
REDIS_PORT=tcp://10.106.58.175:6379
KUBERNETES_PORT=tcp://10.96.0.1:443
KUBERNETES_SERVICE_PORT=443
REDIS_SERVICE_PORT=6379
REDIS_PORT_6379_TCP_ADDR=10.106.58.175
HOSTNAME=filebeat-ds-gwqhd
SHLVL=1
HOME=/root
REDIS_PORT_6379_TCP_PORT=6379
REDIS_PORT_6379_TCP_PROTO=tcp
NGINX_PORT_80_TCP=tcp://10.101.88.185:80
MYAPP_SERVICE_HOST=10.100.224.224
REDIS_PORT_6379_TCP=tcp://10.106.58.175:6379
MYAPP_SERVICE_PORT=80
MYAPP_PORT=tcp://10.100.224.224:80
TERM=xterm
KUBERNETES_PORT_443_TCP_ADDR=10.96.0.1
NGINX_SERVICE_HOST=10.101.88.185
MYAPP_PORT_80_TCP_ADDR=10.100.224.224
PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
KUBERNETES_PORT_443_TCP_PORT=443
REDIS_LOG_LEVEL=info
KUBERNETES_PORT_443_TCP_PROTO=tcp
MYAPP_PORT_80_TCP_PORT=80
MYAPP_PORT_80_TCP_PROTO=tcp
NGINX_SERVICE_PORT=80
NGINX_PORT=tcp://10.101.88.185:80
KUBERNETES_PORT_443_TCP=tcp://10.96.0.1:443
KUBERNETES_SERVICE_PORT_HTTPS=443
PWD=/
MYAPP_PORT_80_TCP=tcp://10.100.224.224:80
REDIS_SERVICE_HOST=10.106.58.175
KUBERNETES_SERVICE_HOST=10.96.0.1
REDIS_HOST=redis.default.svc.cluster.local
NGINX_PORT_80_TCP_ADDR=10.101.88.185
FILEBEAT_VERSION=5.6.5
NGINX_PORT_80_TCP_PORT=80
NGINX_PORT_80_TCP_PROTO=tcp
/ # nslookup redis.default.svc.cluster.local
nslookup: can't resolve '(null)': Name does not resolve

Name:      redis.default.svc.cluster.local
Address 1: 10.106.58.175 redis.default.svc.cluster.local
/ # ps aux
PID   USER     TIME   COMMAND
    1 root       0:17 /usr/local/bin/filebeat -e -c /etc/filebeat/filebeat.yml
   15 root       0:00 /bin/sh
   26 root       0:00 ps aux
/ # kill -1 1
[root@master manifests]# kubectl get pods
NAME                            READY     STATUS    RESTARTS   AGE
client                          0/1       Error     0          11d
filebeat-ds-gwqhd               1/1       Running   3          1d
filebeat-ds-jc6cs               1/1       Running   1          1d
liveness-httpget-pod            1/1       Running   3          8d
myapp-deploy-69b47bc96d-9xq44   1/1       Running   1          2d
myapp-deploy-69b47bc96d-mxlpg   1/1       Running   1          2d
myapp-deploy-69b47bc96d-p9b98   1/1       Running   1          2d
myapp-deploy-69b47bc96d-v4sdf   1/1       Running   1          2d
myapp-deploy-69b47bc96d-vkksx   1/1       Running   1          2d
readiness-httpget-pod           1/1       Running   1          7d
redis-5b5d6fbbbd-9m5p5          1/1       Running   1          1d
[root@master manifests]# kubectl exec -it redis-5b5d6fbbbd-9m5p5 -- /bin/sh    #日志还是没发送过来,主要为了演示各pod间如何调用,用svc的主机名
/data # redis-cli -h redis.default.svc.cluster.local
redis.default.svc.cluster.local:6379> keys *
(empty list or set)
redis.default.svc.cluster.local:6379> exit
/data # exit
[root@master manifests]# vim ds-demo.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: redis
  namespace: default
spec:
  replicas: 1
  selector:
    matchLabels:
      app: redis
      role: logstor
  template:
    metadata:
      labels:
        app: redis
        role: logstor
    spec:
      containers:
      - name: redis
        image: redis:4.0-alpine
        ports:
        - name: redis
          containerPort: 6379
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: filebeat-ds
  namespace: default
spec:
  selector:
    matchLabels:
      app: filebeat
      release: stable
  template:
    metadata:
      labels:
        app: filebeat
        release: stable
    spec:
      containers:
      - name: filebeat
        image: ikubernetes/filebeat:5.6.5-alpine
        env:
        - name: REDIS_HOST
          value: redis.default.svc.cluster.local
        - name: REDIS_LOG_LEVEL
          value: info
[root@master manifests]# kubectl get pods -l app=filebeat -o wide    #这两个资源各运行在一个节点上
NAME                READY     STATUS    RESTARTS   AGE       IP            NODE
filebeat-ds-gwqhd   1/1       Running   3          1d        10.244.1.36   node01.smoke.com
filebeat-ds-jc6cs   1/1       Running   1          1d        10.244.2.35   node02.smoke.com
[root@master manifests]# kubectl explain ds.spec
[root@master manifests]# kubectl explain ds.spec.updateStrategy
[root@master manifests]# kubectl explain ds.spec.updateStrategy.rollingUpdate
[root@master manifests]# kubectl describe ds filebeat
Name:           filebeat-ds
Selector:       app=filebeat,release=stable
Node-Selector:  <none>
Labels:         app=filebeat
                release=stable
Annotations:    kubectl.kubernetes.io/last-applied-configuration={"apiVersion":"apps/v1","kind":"DaemonSet","metadata":{"annotations":{},
"name":"filebeat-ds","namespace":"default"},"spec":{"selector":{"matchLabels":{...
Desired Number of Nodes Scheduled: 2
Current Number of Nodes Scheduled: 2
Number of Nodes Scheduled with Up-to-date Pods: 2
Number of Nodes Scheduled with Available Pods: 2
Number of Nodes Misscheduled: 0
Pods Status:  2 Running / 0 Waiting / 0 Succeeded / 0 Failed
Pod Template:
  Labels:  app=filebeat
           release=stable
  Containers:
   filebeat:
    Image:      ikubernetes/filebeat:5.6.5-alpine
    Port:       <none>
    Host Port:  <none>
    Environment:
      REDIS_HOST:       redis.default.svc.cluster.local
      REDIS_LOG_LEVEL:  info
    Mounts:             <none>
  Volumes:              <none>
Events:                 <none>
[root@master manifests]# kubectl set image --help
[root@master manifests]# kubectl set image daemonsets filebeat-ds filebeat=ikubernetes/filebeat:5.6.6-alpine    #更新版本
[root@master manifests]# kubectl get pods -w
NAME                            READY     STATUS              RESTARTS   AGE
client                          0/1       Error               0          11d
filebeat-ds-jc6cs               1/1       Running             1          1d
filebeat-ds-shxhs               0/1       ContainerCreating   0          14s
liveness-httpget-pod            1/1       Running             3          8d
myapp-deploy-69b47bc96d-9xq44   1/1       Running             1          2d
myapp-deploy-69b47bc96d-mxlpg   1/1       Running             1          2d
myapp-deploy-69b47bc96d-p9b98   1/1       Running             1          2d
myapp-deploy-69b47bc96d-v4sdf   1/1       Running             1          2d
myapp-deploy-69b47bc96d-vkksx   1/1       Running             1          2d
readiness-httpget-pod           1/1       Running             1          8d
redis-5b5d6fbbbd-9m5p5          1/1       Running             1          1d
filebeat-ds-shxhs   1/1       Running   0         14s
filebeat-ds-jc6cs   1/1       Terminating   1         1d
filebeat-ds-jc6cs   0/1       Terminating   1         1d
filebeat-ds-jc6cs   0/1       Terminating   1         1d
filebeat-ds-jc6cs   0/1       Terminating   1         1d
filebeat-ds-dlqxm   0/1       Pending   0         0s
filebeat-ds-dlqxm   0/1       ContainerCreating   0         1s
filebeat-ds-dlqxm   1/1       Running   0         25s
[root@master manifests]# kubectl explain pods.spec