k8s 运算节点的介绍以及部署

4. 运算节点部署

4.1. kubelet 部署

4.1.1. 签发证书

证书签发在 hdss7-200 操作

[root@hdss7-200 ~]# cd /opt/certs/
[root@hdss7-200 certs]# vim kubelet-csr.json # 将所有可能的kubelet机器IP添加到hosts中
{
    "CN": "k8s-kubelet",
    "hosts": [
    "127.0.0.1",
    "10.4.7.10",
    "10.4.7.21",
    "10.4.7.22",
    "10.4.7.23",
    "10.4.7.24",
    "10.4.7.25",
    "10.4.7.26",
    "10.4.7.27",
    "10.4.7.28"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "ST": "beijing",
            "L": "beijing",
            "O": "od",
            "OU": "ops"
        }
    ]
}
[root@hdss7-200 certs]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=server kubelet-csr.json | cfssl-json -bare kubelet
2020/01/06 23:10:56 [INFO] generate received request
2020/01/06 23:10:56 [INFO] received CSR
2020/01/06 23:10:56 [INFO] generating key: rsa-2048
2020/01/06 23:10:56 [INFO] encoded CSR
2020/01/06 23:10:56 [INFO] signed certificate with serial number 61221942784856969738771370531559555767101820379
2020/01/06 23:10:56 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
websites. For more information see the Baseline Requirements for the Issuance and Management
of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
specifically, section 10.2.3 ("Information Requirements").
[root@hdss7-200 certs]# ls kubelet* -l
-rw-r--r-- 1 root root 1115 Jan  6 23:10 kubelet.csr
-rw-r--r-- 1 root root  452 Jan  6 23:10 kubelet-csr.json
-rw------- 1 root root 1675 Jan  6 23:10 kubelet-key.pem
-rw-r--r-- 1 root root 1468 Jan  6 23:10 kubelet.pem

[root@hdss7-200 certs]# scp kubelet.pem kubelet-key.pem hdss7-21:/opt/apps/kubernetes/server/bin/certs/
[root@hdss7-200 certs]# scp kubelet.pem kubelet-key.pem hdss7-22:/opt/apps/kubernetes/server/bin/certs/

4.1.2. 创建kubelet配置

kubelet配置在 hdss7-21 hdss7-22 操作

  • set-cluster # 创建需要连接的集群信息,可以创建多个k8s集群信息
[root@hdss7-21 ~]# kubectl config set-cluster myk8s \
--certificate-authority=/opt/apps/kubernetes/server/bin/certs/ca.pem \
--embed-certs=true \
--server=https://10.4.7.10:7443 \
--kubeconfig=/opt/apps/kubernetes/conf/kubelet.kubeconfig
  • set-credentials # 创建用户账号,即用户登陆使用的客户端私有和证书,可以创建多个证书
[root@hdss7-21 ~]# kubectl config set-credentials k8s-node \
--client-certificate=/opt/apps/kubernetes/server/bin/certs/client.pem \
--client-key=/opt/apps/kubernetes/server/bin/certs/client-key.pem \
--embed-certs=true \
--kubeconfig=/opt/apps/kubernetes/conf/kubelet.kubeconfig
  • set-context # 设置context,即确定账号和集群对应关系
[root@hdss7-21 ~]# kubectl config set-context myk8s-context \
--cluster=myk8s \
--user=k8s-node \
--kubeconfig=/opt/apps/kubernetes/conf/kubelet.kubeconfig
  • use-context # 设置当前使用哪个context
[root@hdss7-21 ~]# kubectl config use-context myk8s-context --kubeconfig=/opt/apps/kubernetes/conf/kubelet.kubeconfig

4.1.3. 授权k8s-node用户

此步骤只需要在一台master节点执行

授权 k8s-node 用户绑定集群角色 system:node ,让 k8s-node 成为具备运算节点的权限。

[root@hdss7-21 ~]# vim k8s-node.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: k8s-node
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:node
subjects:
- apiGroup: rbac.authorization.k8s.io
  kind: User
  name: k8s-node
[root@hdss7-21 ~]# kubectl create -f k8s-node.yaml 
clusterrolebinding.rbac.authorization.k8s.io/k8s-node created
[root@hdss7-21 ~]# kubectl get clusterrolebinding k8s-node
NAME       AGE
k8s-node   36s

4.1.4. 装备pause镜像

将pause镜像放入到harbor私有仓库中,仅在 hdss7-200 操作:

[root@hdss7-200 ~]# docker image pull kubernetes/pause
[root@hdss7-200 ~]# docker image tag kubernetes/pause:latest harbor.od.com/public/pause:latest
[root@hdss7-200 ~]# docker login -u admin harbor.od.com
[root@hdss7-200 ~]# docker image push harbor.od.com/public/pause:latest

4.1.5. 创建启动脚本

在node节点创建脚本并启动kubelet,涉及服务器: hdss7-21 hdss7-22 #--hostname-override 根据不同的机器进行修改

[root@hdss7-21 ~]# vim /opt/apps/kubernetes/server/bin/kubelet-startup.sh
#!/bin/sh

WORK_DIR=$(dirname $(readlink -f $0))
[ $? -eq 0 ] && cd $WORK_DIR || exit

/opt/apps/kubernetes/server/bin/kubelet \
    --anonymous-auth=false \
    --cgroup-driver systemd \
    --cluster-dns 192.168.0.2 \
    --cluster-domain cluster.local \
    --runtime-cgroups=/systemd/system.slice \
    --kubelet-cgroups=/systemd/system.slice \
    --fail-swap-on="false" \
    --client-ca-file ./certs/ca.pem \
    --tls-cert-file ./certs/kubelet.pem \
    --tls-private-key-file ./certs/kubelet-key.pem \
    --hostname-override hdss7-21.host.com \
    --image-gc-high-threshold 20 \
    --image-gc-low-threshold 10 \
    --kubeconfig ../../conf/kubelet.kubeconfig \
    --log-dir /data/logs/kubernetes/kube-kubelet \
    --pod-infra-container-image harbor.od.com/public/pause:latest \
    --root-dir /data/kubelet
[root@hdss7-21 ~]# chmod u+x /opt/apps/kubernetes/server/bin/kubelet-startup.sh
[root@hdss7-21 ~]# mkdir -p /data/logs/kubernetes/kube-kubelet /data/kubelet




[root@hdss7-21 ~]# vim /etc/supervisord.d/kube-kubelet.ini
[program:kube-kubelet-7-21]
command=/opt/apps/kubernetes/server/bin/kubelet-startup.sh
numprocs=1
directory=/opt/apps/kubernetes/server/bin
autostart=true
autorestart=true
startsecs=30
startretries=3
exitcodes=0,2
stopsignal=QUIT
stopwaitsecs=10
user=root
redirect_stderr=true
stdout_logfile=/data/logs/kubernetes/kube-kubelet/kubelet.stdout.log
stdout_logfile_maxbytes=64MB
stdout_logfile_backups=5
stdout_capture_maxbytes=1MB
stdout_events_enabled=false
[root@hdss7-21 ~]# supervisorctl update
[root@hdss7-21 ~]# supervisorctl status
etcd-server-7-21                 RUNNING   pid 23637, uptime 1 day, 14:56:25
kube-apiserver-7-21              RUNNING   pid 32591, uptime 16:35:54
kube-controller-manager-7-21     RUNNING   pid 33357, uptime 14:40:09
kube-kubelet-7-21                RUNNING   pid 37232, uptime 0:01:08
kube-scheduler-7-21              RUNNING   pid 33450, uptime 14:30:50
[root@hdss7-21 ~]# kubectl get node
NAME                STATUS   ROLES    AGE     VERSION
hdss7-21.host.com   Ready    <none>   3m13s   v1.15.2
hdss7-22.host.com   Ready    <none>   3m13s   v1.15.2

4.1.6. 修改节点角色

使用 kubectl get nodes 获取的Node节点角色为空,可以按照以下方式修改

[root@hdss7-21 ~]# kubectl get node
NAME                STATUS   ROLES    AGE     VERSION
hdss7-21.host.com   Ready    <none>   3m13s   v1.15.2
hdss7-22.host.com   Ready    <none>   3m13s   v1.15.2
[root@hdss7-21 ~]# kubectl label node hdss7-21.host.com node-role.kubernetes.io/node=
node/hdss7-21.host.com labeled
[root@hdss7-21 ~]# kubectl label node hdss7-21.host.com node-role.kubernetes.io/master=
node/hdss7-21.host.com labeled
[root@hdss7-21 ~]# kubectl label node hdss7-22.host.com node-role.kubernetes.io/master=
node/hdss7-22.host.com labeled
[root@hdss7-21 ~]# kubectl label node hdss7-22.host.com node-role.kubernetes.io/node=
node/hdss7-22.host.com labeled
[root@hdss7-21 ~]# kubectl get node
NAME                STATUS   ROLES         AGE     VERSION
hdss7-21.host.com   Ready    master,node   7m44s   v1.15.2
hdss7-22.host.com   Ready    master,node   7m44s   v1.15.2

4.2. kube-proxy部署

4.2.1. 签发证书

证书签发在 hdss7-200 操作

[root@hdss7-200 ~]# cd /opt/certs/
[root@hdss7-200 certs]# vim kube-proxy-csr.json  # CN 其实是k8s中的角色
{
    "CN": "system:kube-proxy",
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "ST": "beijing",
            "L": "beijing",
            "O": "od",
            "OU": "ops"
        }
    ]
}
[root@hdss7-200 certs]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=client kube-proxy-csr.json |cfssl-json -bare kube-proxy-client
2020/01/07 21:45:53 [INFO] generate received request
2020/01/07 21:45:53 [INFO] received CSR
2020/01/07 21:45:53 [INFO] generating key: rsa-2048
2020/01/07 21:45:53 [INFO] encoded CSR
2020/01/07 21:45:53 [INFO] signed certificate with serial number 620191685968917036075463174423999296907693104226
2020/01/07 21:45:53 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
websites. For more information see the Baseline Requirements for the Issuance and Management
of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
[root@hdss7-200 certs]# ls kube-proxy-c* -l  # 因为kube-proxy使用的用户是kube-proxy,不能使用client证书,必须要重新签发自己的证书
-rw-r--r-- 1 root root 1005 Jan  7 21:45 kube-proxy-client.csr
-rw------- 1 root root 1675 Jan  7 21:45 kube-proxy-client-key.pem
-rw-r--r-- 1 root root 1375 Jan  7 21:45 kube-proxy-client.pem
-rw-r--r-- 1 root root  267 Jan  7 21:45 kube-proxy-csr.json

[root@hdss7-200 certs]# scp kube-proxy-client-key.pem kube-proxy-client.pem hdss7-21:/opt/apps/kubernetes/server/bin/certs/                                                                         100% 1375   870.6KB/s   00:00    
[root@hdss7-200 certs]# scp kube-proxy-client-key.pem kube-proxy-client.pem hdss7-22:/opt/apps/kubernetes/server/bin/certs/

4.2.2. 创建kube-proxy配置

在所有node节点创建,涉及服务器:hdss7-21 ,hdss7-22

[root@hdss7-21 ~]# kubectl config set-cluster myk8s \
--certificate-authority=/opt/apps/kubernetes/server/bin/certs/ca.pem \
--embed-certs=true \
--server=https://10.4.7.10:7443 \
--kubeconfig=/opt/apps/kubernetes/conf/kube-proxy.kubeconfig
  
[root@hdss7-21 ~]# kubectl config set-credentials kube-proxy \
--client-certificate=/opt/apps/kubernetes/server/bin/certs/kube-proxy-client.pem \
--client-key=/opt/apps/kubernetes/server/bin/certs/kube-proxy-client-key.pem \
--embed-certs=true \
--kubeconfig=/opt/apps/kubernetes/conf/kube-proxy.kubeconfig
  
[root@hdss7-21 ~]# kubectl config set-context myk8s-context \
--cluster=myk8s \
--user=kube-proxy \
--kubeconfig=/opt/apps/kubernetes/conf/kube-proxy.kubeconfig
  
[root@hdss7-21 ~]# kubectl config use-context myk8s-context --kubeconfig=/opt/apps/kubernetes/conf/kube-proxy.kubeconfig

4.2.3. 加载ipvs模块

kube-proxy 共有3种流量调度模式,分别是 namespace,iptables,ipvs,其中ipvs性能最好。

[root@hdss7-21 ~]# for i in $(ls /usr/lib/modules/$(uname -r)/kernel/net/netfilter/ipvs|grep -o "^[^.]*");do echo $i; /sbin/modinfo -F filename $i >/dev/null 2>&1 && /sbin/modprobe $i;done
[root@hdss7-21 ~]# lsmod | grep ip_vs  # 查看ipvs模块

4.2.4. 创建启动脚本

[root@hdss7-21 ~]# vim /opt/apps/kubernetes/server/bin/kube-proxy-startup.sh
#!/bin/sh

WORK_DIR=$(dirname $(readlink -f $0))
[ $? -eq 0 ] && cd $WORK_DIR || exit

/opt/apps/kubernetes/server/bin/kube-proxy \
  --cluster-cidr 172.7.0.0/16 \
  --hostname-override hdss7-21.host.com \
  --proxy-mode=ipvs \
  --ipvs-scheduler=nq \
  --kubeconfig ../../conf/kube-proxy.kubeconfig
[root@hdss7-21 ~]# chmod u+x /opt/apps/kubernetes/server/bin/kube-proxy-startup.sh
[root@hdss7-21 ~]# mkdir -p /data/logs/kubernetes/kube-proxy
[root@hdss7-21 ~]# vim /etc/supervisord.d/kube-proxy.ini
[program:kube-proxy-7-21]
command=/opt/apps/kubernetes/server/bin/kube-proxy-startup.sh                
numprocs=1                                                      
directory=/opt/apps/kubernetes/server/bin                            
autostart=true                                                  
autorestart=true                                                
startsecs=30                                                    
startretries=3                                                  
exitcodes=0,2                                                   
stopsignal=QUIT                                                 
stopwaitsecs=10                                                 
user=root                                                       
redirect_stderr=true                                            
stdout_logfile=/data/logs/kubernetes/kube-proxy/proxy.stdout.log
stdout_logfile_maxbytes=64MB                                    
stdout_logfile_backups=5                                       
stdout_capture_maxbytes=1MB                                     
stdout_events_enabled=false

[root@hdss7-21 ~]# supervisorctl update

4.2.5. 验证集群

[root@hdss7-21 ~]# supervisorctl status
etcd-server-7-21                 RUNNING   pid 23637, uptime 2 days, 0:27:18
kube-apiserver-7-21              RUNNING   pid 32591, uptime 1 day, 2:06:47
kube-controller-manager-7-21     RUNNING   pid 33357, uptime 1 day, 0:11:02
kube-kubelet-7-21                RUNNING   pid 37232, uptime 9:32:01
kube-proxy-7-21                  RUNNING   pid 47088, uptime 0:06:19
kube-scheduler-7-21              RUNNING   pid 33450, uptime 1 day, 0:01:43

[root@hdss7-21 ~]# yum install -y ipvsadm
[root@hdss7-21 ~]# ipvsadm -Ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
  -> RemoteAddress:Port           Forward Weight ActiveConn InActConn
TCP  192.168.0.1:443 nq
  -> 10.4.7.21:6443               Masq    1      0          0         
  -> 10.4.7.22:6443               Masq    1      0          0  

在任意一个运算节点,创建一个资源配置清单
这里我们选择HDSS7-21.host.com主机

[root@hdss7-21 ~]# vi /root/nginx-ds.yaml
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
  name: nginx-ds
  labels:
    addonmanager.kubernetes.io/mode: Reconcile
spec:
  template:
    metadata:
      labels:
        app: nginx-ds
    spec:
      containers:

   - name: my-nginx
     image: nginx:1.7.9
     ports:
     - containerPort: 80
     
[root@hdss7-21 ~]# kubectl create -f /root/nginx-ds.yaml 
daemonset.extensions/nginx-ds created

[root@hdss7-21 ~]# curl -I 172.7.21.2
HTTP/1.1 200 OK
Server: nginx/1.17.6
Date: Tue, 07 Jan 2020 14:28:46 GMT
Content-Type: text/html
Content-Length: 612
Last-Modified: Tue, 19 Nov 2019 12:50:08 GMT
Connection: keep-alive
ETag: "5dd3e500-264"
Accept-Ranges: bytes

[root@hdss7-21 ~]# curl -I 172.7.22.2  # 缺少网络插件,无法跨节点通信

验证部署结果

[root@hdss7-21 supervisord.d]# kubectl get pods
NAME             READY   STATUS    RESTARTS   AGE
nginx-ds-fbxjj   1/1     Running   0          4m25s
nginx-ds-vgf7k   1/1     Running   0          4m25s
[root@hdss7-21 supervisord.d]# kubectl get cs
NAME                 STATUS    MESSAGE              ERROR
controller-manager   Healthy   ok                   
scheduler            Healthy   ok                   
etcd-0               Healthy   {"health": "true"}   
etcd-1               Healthy   {"health": "true"}   
etcd-2               Healthy   {"health": "true"}   
[root@hdss7-21 supervisord.d]# kubectl get nodes
NAME                STATUS   ROLES         AGE   VERSION
hdss7-21.host.com   Ready    master,node   41h   v1.15.2
hdss7-22.host.com   Ready    master,node   41h   v1.15.2
证书有效期查询****
openssl x509 -in client.pem -text -noout
posted @ 2020-09-07 16:56  爱可耐  阅读(571)  评论(0编辑  收藏  举报