|NO.Z.00087|——————————|^^ 部署 ^^|——|KuberNetes&kubeadm.V16|5台Server|——|kubernetes验证|集群可用性验证|

一、集群可用性验证:查看现有资源信息
### --- 查询集群中所有namespace下的容器

[root@k8s-master01 ~]# kubectl get po --all-namespaces -owide
NAMESPACE              NAME                                         READY   STATUS    RESTARTS   AGE     IP                NODE           NOMINATED NODE   READINESS GATES
kube-system            calico-kube-controllers-cdd5755b9-9dtk4      1/1     Running   0          3m38s   192.168.1.11      k8s-master01   <none>           <none>
kube-system            calico-node-jsprh                            1/1     Running   0          107m    192.168.1.12      k8s-master02   <none>           <none>
kube-system            calico-node-n97ff                            1/1     Running   0          107m    192.168.1.11      k8s-master01   <none>           <none>
kube-system            calico-node-tk4kz                            1/1     Running   1          107m    192.168.1.15      k8s-node02     <none>           <none>
kube-system            calico-node-vfcxf                            1/1     Running   0          107m    192.168.1.13      k8s-master03   <none>           <none>
kube-system            calico-node-wvwbh                            1/1     Running   0          107m    192.168.1.14      k8s-node01     <none>           <none>
kube-system            coredns-6f6b8cc4f6-bxsh9                     1/1     Running   0          3m38s   172.162.195.2     k8s-master03   <none>           <none>
kube-system            coredns-6f6b8cc4f6-p55bx                     1/1     Running   0          3m38s   172.162.195.1     k8s-master03   <none>           <none>
kube-system            etcd-k8s-master01                            1/1     Running   0          173m    192.168.1.11      k8s-master01   <none>           <none>
kube-system            etcd-k8s-master02                            1/1     Running   0          152m    192.168.1.12      k8s-master02   <none>           <none>
kube-system            etcd-k8s-master03                            1/1     Running   0          137m    192.168.1.13      k8s-master03   <none>           <none>
kube-system            kube-apiserver-k8s-master01                  1/1     Running   0          173m    192.168.1.11      k8s-master01   <none>           <none>
kube-system            kube-apiserver-k8s-master02                  1/1     Running   0          152m    192.168.1.12      k8s-master02   <none>           <none>
kube-system            kube-apiserver-k8s-master03                  1/1     Running   0          137m    192.168.1.13      k8s-master03   <none>           <none>
kube-system            kube-controller-manager-k8s-master01         1/1     Running   2          173m    192.168.1.11      k8s-master01   <none>           <none>
kube-system            kube-controller-manager-k8s-master02         1/1     Running   1          152m    192.168.1.12      k8s-master02   <none>           <none>
kube-system            kube-controller-manager-k8s-master03         1/1     Running   0          137m    192.168.1.13      k8s-master03   <none>           <none>
kube-system            kube-proxy-4shwt                             1/1     Running   0          79m     192.168.1.12      k8s-master02   <none>           <none>
kube-system            kube-proxy-6n6z9                             1/1     Running   0          80m     192.168.1.13      k8s-master03   <none>           <none>
kube-system            kube-proxy-7r4pk                             1/1     Running   0          80m     192.168.1.14      k8s-node01     <none>           <none>
kube-system            kube-proxy-ddghx                             1/1     Running   0          80m     192.168.1.11      k8s-master01   <none>           <none>
kube-system            kube-proxy-msb5v                             1/1     Running   0          79m     192.168.1.15      k8s-node02     <none>           <none>
kube-system            kube-scheduler-k8s-master01                  1/1     Running   2          173m    192.168.1.11      k8s-master01   <none>           <none>
kube-system            kube-scheduler-k8s-master02                  1/1     Running   0          152m    192.168.1.12      k8s-master02   <none>           <none>
kube-system            kube-scheduler-k8s-master03                  1/1     Running   1          137m    192.168.1.13      k8s-master03   <none>           <none>
kube-system            metrics-server-d6c46b546-fwktk               1/1     Running   0          3m28s   172.169.244.193   k8s-master01   <none>           <none>
kubernetes-dashboard   dashboard-metrics-scraper-86bb69c5f6-5mbh5   1/1     Running   0          3m38s   172.169.92.65     k8s-master02   <none>           <none>
kubernetes-dashboard   kubernetes-dashboard-6576c84894-snkjz        1/1     Running   0          3m28s   172.162.195.3     k8s-master03   <none>           <none>
### --- 查看pod检测数据

[root@k8s-master01 ~]# kubectl top po -n kube-system  --use-protocol-buffers
NAME                                      CPU(cores)   MEMORY(bytes)   
calico-kube-controllers-cdd5755b9-9dtk4   2m           12Mi            
calico-node-jsprh                         60m          44Mi            
calico-node-n97ff                         48m          44Mi            
calico-node-tk4kz                         64m          57Mi            
calico-node-vfcxf                         54m          51Mi            
calico-node-wvwbh                         72m          50Mi            
coredns-6f6b8cc4f6-bxsh9                  6m           22Mi            
coredns-6f6b8cc4f6-p55bx                  3m           17Mi            
etcd-k8s-master01                         62m          69Mi            
etcd-k8s-master02                         90m          73Mi            
etcd-k8s-master03                         55m          76Mi            
kube-apiserver-k8s-master01               87m          324Mi           
kube-apiserver-k8s-master02               81m          289Mi           
kube-apiserver-k8s-master03               83m          300Mi           
kube-controller-manager-k8s-master01      41m          59Mi            
kube-controller-manager-k8s-master02      2m           26Mi            
kube-controller-manager-k8s-master03      2m           31Mi            
kube-proxy-4shwt                          12m          17Mi            
kube-proxy-6n6z9                          20m          22Mi            
kube-proxy-7r4pk                          13m          16Mi            
kube-proxy-ddghx                          12m          19Mi            
kube-proxy-msb5v                          17m          19Mi            
kube-scheduler-k8s-master01               4m           24Mi            
kube-scheduler-k8s-master02               3m           24Mi            
kube-scheduler-k8s-master03               5m           21Mi            
metrics-server-d6c46b546-fwktk            4m           17Mi 
二、验证pod之间是否通信
### --- 查看service地址及端口
~~~     查看kubernetes.service地址及端口

[root@k8s-master01 ~]# kubectl get service -owide
NAME         TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)   AGE    SELECTOR
kubernetes   ClusterIP   10.96.0.1    <none>        443/TCP   175m   <none>
### --- 查看kube-dns.service地址及端口
~~~     coredns的service是cluster.ip的第十个IP地址

[root@k8s-master01 ~]# kubectl get service -n kube-system -owide
NAME             TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)                  AGE    SELECTOR
kube-dns         ClusterIP   10.96.0.10      <none>        53/UDP,53/TCP,9153/TCP   175m   k8s-app=kube-dns
metrics-server   ClusterIP   10.103.12.220   <none>        443/TCP                  99m    k8s-app=metrics-server
### --- 验证service端口是否通信
~~~     测试所有的宿主机是否与这两个地址是通的:所有节点执行

[root@k8s-master01 ~]# telnet 10.96.0.1 443
Trying 10.96.0.1...
Connected to 10.96.0.1.
Escape character is '^]'.
[root@k8s-master01 ~]# telnet 10.96.0.10 53
Trying 10.96.0.10...
Connected to 10.96.0.10.
Escape character is '^]'.
Connection closed by foreign host.
### --- 验证服务器与pod之间是否通信
~~~     查看已有的pod服务

[root@k8s-master01 ~]# kubectl get po --all-namespaces -owide
NAMESPACE              NAME                                         READY   STATUS    RESTARTS   AGE    IP                NODE           NOMINATED NODE   READINESS GATES
kube-system            calico-node-n97ff                            1/1     Running   0          117m   192.168.1.11      k8s-master01   <none>           <none>
kube-system            coredns-6f6b8cc4f6-bxsh9                     1/1     Running   0          12m    172.162.195.2     k8s-master03   <none>           <none>
~~~     # 进入容器内部
~~~     都是通的,说明pod跨主机访问是没有任何问题的
 
[root@k8s-master01 ~]# kubectl exec -ti  calico-node-n97ff -n kube-system -- bash
[root@k8s-master01 ~]# ping  192.168.1.11
[root@k8s-master01 /]# ping  192.168.1.12
[root@k8s-master01 /]# ping  192.168.1.13
[root@k8s-master01 /]# ping  192.168.1.14
[root@k8s-master01 /]# ping  192.168.1.15
### --- 验证pod与pod之间是否通信
~~~     # 查看已有的pod服务

[root@k8s-master01 ~]# kubectl get po --all-namespaces -owide
NAMESPACE              NAME                                         READY   STATUS    RESTARTS   AGE    IP                NODE           NOMINATED NODE   READINESS GATES
kube-system            calico-node-n97ff                            1/1     Running   0          117m   192.168.1.11      k8s-master01   <none>           <none>
kube-system            coredns-6f6b8cc4f6-bxsh9                     1/1     Running   0          12m    172.162.195.2     k8s-master03   <none>           <none>
~~~     # 验证master01的pod和master03的pod是否是通的

[root@k8s-master01 ~]# kubectl exec -ti  calico-node-n97ff -n kube-system -- bash
~~~     # ping.coredns-6f6b8cc4f6-bxsh9容器的地址
~~~     说明pod和pod之间是通的

[root@k8s-master01 /]# ping 172.162.195.2
PING 172.162.195.2 (172.162.195.2) 56(84) bytes of data.
64 bytes from 172.162.195.2: icmp_seq=1 ttl=63 time=0.473 ms
64 bytes from 172.162.195.2: icmp_seq=2 ttl=63 time=1.42 ms

 
 
 
 
 
 
 
 
 

Walter Savage Landor:strove with none,for none was worth my strife.Nature I loved and, next to Nature, Art:I warm'd both hands before the fire of life.It sinks, and I am ready to depart
                                                                                                                                                   ——W.S.Landor

 

 

posted on 2022-03-29 13:49  yanqi_vip  阅读(99)  评论(0)    收藏  举报

导航