Kubernetes-POD高可用

目录

说明:
保证POD高可用,K8S除DC外,RS,RC,DS,Statefull-set
Deployments: 保证POD高可用,监控POD运行状态,保证无时无刻都有期望的POD复本在运行。运行多POD复本 ,可以手动scale复本数
Daemonsets:保证POD高可用,监控POD运行状态,保证无时无刻都有期望的POD复本在运行。运行多POD复本,要求同一个DS部署出来的POD 一定要运行不同节点。保证所有Node (worker)至少要运行一个POD,无须指定复本数,等于节点数

Deployment

  • 创建deployment模板
kubectl create deployment --image=nginx --dry-run=client dc1 -o yaml > dc1.yml
trnuser@k8s:~/pod/dc$ cat dc1.yaml 
apiVersion: apps/v1
kind: Deployment
metadata:
  creationTimestamp: null
  labels:
    app: dc2
  name: dc2
spec:
  replicas: 4   # 期望pod数
  selector:
    matchLabels:
      app: dc2  # DC监控的是带了app=dc2的标签的所有pod
  strategy: {}
  template:
    metadata:
      creationTimestamp: null
      labels:
        app: dc2  # 所有通过此dc部署出来POD都打上app=dc1的标签 
    spec:
      containers:
      - image: nginx
        name: nginx
        livenessProbe:
          httpGet:
            path: /index.html
            port: 80
            scheme: HTTP
        imagePullPolicy: IfNotPresent
        resources: 
          requests:
            cpu: 400m
status: {}
  • 手动扩展pod数
kubectl scale deployment dc1 --replicas=8  
  • 高可用测试
    Node1关机,pod将在5分钟后重建
trnuser@k8s:~/pod/dc$ kubectl describe pod dc1-674d4c69bd-gpzbr -n deployment
Name:         dc1-674d4c69bd-gpzbr
Namespace:    deployment
Priority:     0
Node:         node3/10.250.101.66
Start Time:   Fri, 26 Feb 2021 15:32:53 +0800
Labels:       app=dc1
              pod-template-hash=674d4c69bd
Annotations:  cni.projectcalico.org/podIP: 10.244.135.15/32
              cni.projectcalico.org/podIPs: 10.244.135.15/32
Status:       Running
IP:           10.244.135.15
IPs:
  IP:           10.244.135.15
Controlled By:  ReplicaSet/dc1-674d4c69bd
Containers:
  nginx:
    Container ID:   docker://b746af7eab828b3d0bdca450c7a3f89317d4ccb253f16a83d4cd4723cacece46
    Image:          nginx:1.9
    Image ID:       docker-pullable://nginx@sha256:54313b5c376892d55205f13d620bc3dcccc8e70e596d083953f95e94f071f6db
    Port:           <none>
    Host Port:      <none>
    State:          Running
      Started:      Fri, 26 Feb 2021 15:32:54 +0800
    Ready:          True
    Restart Count:  0
    Requests:
      cpu:        400m
    Environment:  <none>
    Mounts:
      /var/run/secrets/kubernetes.io/serviceaccount from default-token-blsbs (ro)
Conditions:
  Type              Status
  Initialized       True 
  Ready             True 
  ContainersReady   True 
  PodScheduled      True 
Volumes:
  default-token-blsbs:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  default-token-blsbs
    Optional:    false
QoS Class:       Burstable
Node-Selectors:  <none>
Tolerations:     node.kubernetes.io/not-ready:NoExecute for 300s  ##300s无响应将重建
                 node.kubernetes.io/unreachable:NoExecute for 300s
Events:          <none>

Daemonsets

  • 创建daemonSet的yaml文件
trnuser@k8s:~/pod$ cat daemonset.yaml 
apiVersion: apps/v1
kind: DaemonSet
metadata:
  creationTimestamp: null
  labels:
    app: ds1
  name: ds1
spec:
 #replicas: 4
  selector:
    matchLabels:
      app: ds1
 #strategy: {}
  template:
    metadata:
      creationTimestamp: null
      labels:
        app: ds1
    spec:
      containers:
      - image: nginx
        name: nginx
        imagePullPolicy: IfNotPresent
        resources: {}
#status: {}
  • daemonset无法scale
[root@master ~]# kubectl scale daemonsets.apps ds1 --replicas=4 
Error from server (NotFound): the server could not find the requested resource 
  • Deployment:升级回滚
trnuser@k8s:~/pod/dc$ cat dc1-update.yaml 
apiVersion: apps/v1
kind: Deployment
metadata:
  creationTimestamp: null
  labels:
    app: dc1
  name: dc1
spec:
  replicas: 4
  selector:
    matchLabels:
      app: dc1
  strategy: {}
  template:
    metadata:
      creationTimestamp: null
      labels:
        app: dc1
    spec:
      containers:
      - image: nginx:1.9  ##版本升级到1.9
        name: nginx
        imagePullPolicy: IfNotPresent
        resources: {}
status: {}
  • 应用升级yaml
kubectl apply -f dc1-update.yml 
  • 查看升级的历史版本版本
kubectl rollout history deployment dc1  
  • 回滚历史版本
kubectl rollout undo deployment dc1 --to-revision 1 
  • 取消版本回退
kubectl rollout undo deployment dc1 --to-revision=4 
  • 修改升级的数量和比例
trnuser@k8s:~/pod/dc$ cat dc1-update-change-num.yaml 
apiVersion: apps/v1
kind: Deployment
metadata:
  creationTimestamp: null
  labels:
    app: dc1
  name: dc1
spec:
  strategy:
    rollingUpdate:
      maxSurge: 2
      maxUnavailable: 1
  replicas: 4
  selector:
    matchLabels:
      app: dc1
  strategy: {}
  template:
    metadata:
      creationTimestamp: null
      labels:
        app: dc1
    spec:
      containers:
      - image: nginx:1.9
        name: nginx
        imagePullPolicy: IfNotPresent
        resources: 
          requests:
            cpu: 400m
status: {}
  • 自动scale,需限制cpu/内存资源
    HPA中的最小保底的POD数量会优先于在deployment文件指定
kubectl autoscale deployment dc1 --max=8 --min=2 --cpu-percent=80
trnuser@k8s:~/pod/dc$ kubectl get hpa -n deployment 
NAME   REFERENCE        TARGETS   MINPODS   MAXPODS   REPLICAS   AGE
dc1    Deployment/dc1   0%/80%    2         8         2          13d
trnuser@k8s:~/pod/dc$
  • 进入一个pod,大量使用CPU
[root@master ~]# kubectl exec -it dc1-7d984979d8-25f7m -- bash 
root@dc1-7d984979d8-25f7m:/# cat /dev/zero > /dev/null & 
root@dc1-7d984979d8-25f7m:/# cat /dev/zero > /dev/null & 
root@dc1-7d984979d8-25f7m:/# cat /dev/zero > /dev/null & 
root@dc1-7d984979d8-25f7m:/# cat /dev/zero > /dev/null & 
root@dc1-7d984979d8-25f7m:/# cat /dev/zero > /dev/null & 
root@dc1-7d984979d8-25f7m:/# cat /dev/zero > /dev/null & 
root@dc1-7d984979d8-25f7m:/# cat /dev/zero > /dev/null &
posted @ 2021-03-11 17:32  独孤云翔  阅读(661)  评论(0)    收藏  举报