node节点的部署

master点赋予用户权限

[root@mast-1 k8s]# kubectl create clusterrolebinding kubelet-bootstrap \
> --clusterrole=system:node-bootstrapper \
> --user=kubelet-bootstrap
clusterrolebinding.rbac.authorization.k8s.io/kubelet-bootstrap created

  生成配置文件脚本

[root@mast-1 k8s]# cat kubeconfig.sh 
# 创建 TLS Bootstrapping Token
#BOOTSTRAP_TOKEN=$(head -c 16 /dev/urandom | od -An -t x | tr -d ' ')
BOOTSTRAP_TOKEN=0fb61c46f8991b718eb38d27b605b008

#cat > token.csv <<EOF
#${BOOTSTRAP_TOKEN},kubelet-bootstrap,10001,"system:kubelet-bootstrap"
#EOF

#----------------------

APISERVER=$1
SSL_DIR=$2

# 创建kubelet bootstrapping kubeconfig 
export KUBE_APISERVER="https://$APISERVER:6443"

# 设置集群参数
kubectl config set-cluster kubernetes \
  --certificate-authority=$SSL_DIR/ca.pem \
  --embed-certs=true \
  --server=${KUBE_APISERVER} \
  --kubeconfig=bootstrap.kubeconfig

# 设置客户端认证参数
kubectl config set-credentials kubelet-bootstrap \
  --token=${BOOTSTRAP_TOKEN} \
  --kubeconfig=bootstrap.kubeconfig

# 设置上下文参数
kubectl config set-context default \
  --cluster=kubernetes \
  --user=kubelet-bootstrap \
  --kubeconfig=bootstrap.kubeconfig

# 设置默认上下文
kubectl config use-context default --kubeconfig=bootstrap.kubeconfig

#----------------------

# 创建kube-proxy kubeconfig文件

kubectl config set-cluster kubernetes \
  --certificate-authority=$SSL_DIR/ca.pem \
  --embed-certs=true \
  --server=${KUBE_APISERVER} \
  --kubeconfig=kube-proxy.kubeconfig

kubectl config set-credentials kube-proxy \
  --client-certificate=$SSL_DIR/kube-proxy.pem \
  --client-key=$SSL_DIR/kube-proxy-key.pem \
  --embed-certs=true \
  --kubeconfig=kube-proxy.kubeconfig

kubectl config set-context default \
  --cluster=kubernetes \
  --user=kube-proxy \
  --kubeconfig=kube-proxy.kubeconfig

kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig

[root@mast-1 k8s]# bash kubeconfig.sh 192.168.10.11 /root/k8s/
[root@mast-1 k8s]# cat bootstrap.kubeconfig 
apiVersion: v1
clusters:
- cluster:
    certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUR2akNDQXFhZ0F3SUJBZ0lVYnBqMkFDaS8zeDE2NDdqMXBHSlhxS2QxR3M0d0RRWUpLb1pJaHZjTkFRRUwKQlFBd1pURUxNQWtHQTFVRUJoTUNRMDR4RUR
BT0JnTlZCQWdUQjBKbGFXcHBibWN4RURBT0JnTlZCQWNUQjBKbAphV3BwYm1jeEREQUtCZ05WQkFvVEEyczRjekVQTUEwR0ExVUVDeE1HVTNsemRHVnRNUk13RVFZRFZRUURFd3ByCmRXSmxjbTVsZEdWek1CNFhEVEU1TURReU1qRXdNREF3TUZvWERUSTBNRFF5TURFd01EQXdNRm93WlRFTE1Ba0cKQTFVRUJoTUNRMDR4RURBT0JnTlZCQWdUQjBKbGFXcHBibWN4RURBT0JnTlZCQWNUQjBKbGFXcHBibWN4RERBSwpCZ05WQkFvVEEyczRjekVQTUEwR0ExVUVDeE1HVTNsemRHVnRNUk13RVFZRFZRUURFd3ByZFdKbGNtNWxkR1Z6Ck1JSUJJakFOQmdrcWhraUc5dzBCQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBNUJaRFR5cFpDam1tUTE2VFRsSlMKVityVzZ4OUUrVGV5c3JiZ0FPNE1iMmNlVW1DaG56MU9SUXlvT0V3MnlJVFd5SkQyVmJZQUJKUEVMLzRQWk1QNApwblRXRVdXMGhYSXYvN0pNOTJid1hGWSsrWVFYVE42c2FzTXF1cGViL1ZwR0x4NFZiYjFNUUJPcDBtSHV3Q1ZxCk1PUEE2Z0ZMQ2lORmIrZHUvd1FDNjgzc2p1VnFEbWFNYXBEMmVMQmJXeFRaOFgxei9zUGtCTC9GcTRJM2JIbCsKUlBVWmVHakU3YmgwaXJ2S25KSWpKbmdnbStDdTAyc0NPZWNIN3JsUDhyNm5ONFY2L1JrMjN4NmlndHFMMWllawpYdncxNFpoUGM1MFE4Mk9HQW9SSGJjdlBiQUc3NTh0cjdkaXZ5WFZ6NjYzdlJYMC9jN1RHWW9yeDV0WmJiQVJOCkpRSURBUUFCbzJZd1pEQU9CZ05WSFE4QkFmOEVCQU1DQVFZd0VnWURWUjBUQVFIL0JBZ3dCZ0VCL3dJQkFqQWQKQmdOVkhRNEVGZ1FVZnlWQlE4YlJFZTJ6UzVTTXVCSGlPRGNGWExRd0h3WURWUjBqQkJnd0ZvQVVmeVZCUThiUgpFZTJ6UzVTTXVCSGlPRGNGWExRd0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dFQkFIeXhxL2VDdVF6cU83bm8rdGlVCmRmNTZOcDY0Tit2OFdFYXc0S1lrZlZsZ0VReXNBR1RGNlBPV2RlSnJNRkw2WCtnRlpZSU41VVMyV0tiK1ErOHAKc1Y5bmdPUUNraGJwWjYwYWJXMUNCTFJ6eGJHdGE4Ymo4TnRZdU84TGZReWF4NnZOd3cwakpsTmpjejlBYS9tVQpWSGljMFZzVHphUFZ5NEhqN09MdVdGNS9NRjY2aVJySFl0aTV4WFpkZ3VMSWV3TDREemxuU042WTNOcDFHc1NTCmtwcmk2elprME1PSTRtbVBIMXdsR2xKOGhFU2dkZ3RrZVZIbUpUTjVid29hc0JSMWlZdXgyKzgwYXFkZUFFQXoKeFdYYU9wRXlKc3ZGbURZcWJBR3pSN2N3VUJqdEU5TjdIKzIrRjJ0VlBGaDlCbk45cXVjdmcxZTlSSCt2YlNHaAp2NjQ9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K    server: https://192.168.10.11:6443
  name: kubernetes
contexts:
- context:
    cluster: kubernetes
    user: kubelet-bootstrap
  name: default
current-context: default
kind: Config
preferences: {}
users:
- name: kubelet-bootstrap
  user:
    token: 0fb61c46f8991b718eb38d27b605b008   token一定一致

   将node上的配置文件复制到node节点上

[root@mast-1 k8s]# scp bootstrap.kubeconfig kube-proxy.kubeconfig 192.168.10.13:/opt/kubernetes/cfg/
root@192.168.10.13's password: 
bootstrap.kubeconfig                                                                                                                                         100% 2167   187.8KB/s   00:00    
kube-proxy.kubeconfig                                                                                                                                        100% 6273   272.0KB/s   00:00    
[root@mast-1 k8s]# scp bootstrap.kubeconfig kube-proxy.kubeconfig 192.168.10.14:/opt/kubernetes/cfg/
The authenticity of host '192.168.10.14 (192.168.10.14)' can't be established.
ECDSA key fingerprint is SHA256:49eEsjLcmpvTYF6ELlDwwvvpnG9ikMvLKdITIjxW1PU.
ECDSA key fingerprint is MD5:c8:58:8a:28:65:88:de:dd:08:7d:4f:69:a3:3e:2b:25.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added '192.168.10.14' (ECDSA) to the list of known hosts.
root@192.168.10.14's password: 
bootstrap.kubeconfig                                                                                                                                         100% 2167   231.8KB/s   00:00    
kube-proxy.kubeconfig                                                                                                                                        100% 6273    86.1KB/s   00:00   

  将kubelet,kube-proxy,拷贝node节点

[root@mast-1 k8s]# scp kubernetes/server/bin/{kubelet,kube-proxy} 192.168.10.13:/opt/kubernetes/bin/
root@192.168.10.13's password: 
kubelet                                                                                                                                                      100%  169MB   2.7MB/s   01:02    
kube-proxy                                                                                                                                                   100%   48MB   2.6MB/s   00:18    
[root@mast-1 k8s]# scp kubernetes/server/bin/{kubelet,kube-proxy} 192.168.10.14:/opt/kubernetes/bin/
root@192.168.10.14's password: 
kubelet                                                                                                                                                      100%  169MB  11.2MB/s   00:15    
kube-proxy                                                                                                                                                   100%   48MB   8.0MB/s   00:06 

  运行kubelet.sh,脚本生成配置文件并启动kubelet

[root@node-1 ~]# cat kubelet.sh 
#!/bin/bash

NODE_ADDRESS=$1
DNS_SERVER_IP=${2:-"10.0.0.2"}

cat <<EOF >/opt/kubernetes/cfg/kubelet

KUBELET_OPTS="--logtostderr=true \\
--v=4 \\
--address=${NODE_ADDRESS} \\
--hostname-override=${NODE_ADDRESS} \\
--kubeconfig=/opt/kubernetes/cfg/kubelet.kubeconfig \\
--experimental-bootstrap-kubeconfig=/opt/kubernetes/cfg/bootstrap.kubeconfig \\
--config=/opt/kubernetes/cfg/kubelet.config \\
--cert-dir=/opt/kubernetes/ssl \\
--pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google-containers/pause-amd64:3.0"

EOF

cat <<EOF >/opt/kubernetes/cfg/kubelet.config

kind: KubeletConfiguration
apiVersion: kubelet.config.k8s.io/v1beta1
address: ${NODE_ADDRESS}
port: 10250
cgroupDriver: cgroupfs
clusterDNS:
- ${DNS_SERVER_IP} 
clusterDomain: cluster.local.
failSwapOn: false

EOF

cat <<EOF >/usr/lib/systemd/system/kubelet.service
[Unit]
Description=Kubernetes Kubelet
After=docker.service
Requires=docker.service

[Service]
EnvironmentFile=/opt/kubernetes/cfg/kubelet
ExecStart=/opt/kubernetes/bin/kubelet \$KUBELET_OPTS
Restart=on-failure
KillMode=process

[Install]
WantedBy=multi-user.target
EOF

systemctl daemon-reload
systemctl enable kubelet
systemctl restart kubelet
[root@node-1 ~]#  bash  kubelet.sh 192.168.10.13
Created symlink from /etc/systemd/system/multi-user.target.wants/kubelet.service to /usr/lib/systemd/system/kubelet.service.
[root@node-1 ~]# systemctl start kubelet
[root@node-1 ~]# systemctl status kubelet
● kubelet.service - Kubernetes Kubelet
   Loaded: loaded (/usr/lib/systemd/system/kubelet.service; enabled; vendor preset: disabled)
   Active: active (running) since 二 2019-04-23 15:51:21 CST; 1min 2s ago
 Main PID: 98249 (kubelet)
    Tasks: 9
   Memory: 15.2M
   CGroup: /system.slice/kubelet.service
           └─98249 /opt/kubernetes/bin/kubelet --logtostderr=true --v=4 --address=192.168.10.13 --hostname-override=192.168.10.13 --kubeconfig=/opt/kubernetes/cfg/kubelet.kubeconfig --expe...

4月 23 15:51:22 node-1 kubelet[98249]: I0423 15:51:22.662290   98249 feature_gate.go:206] feature gates: &{map[]}
4月 23 15:51:24 node-1 kubelet[98249]: I0423 15:51:24.018824   98249 server.go:826] Using self-signed cert (/opt/kubernetes/ssl/kubelet.crt, /opt/kubernetes/ssl/kubelet.key)
4月 23 15:51:24 node-1 kubelet[98249]: I0423 15:51:24.089220   98249 mount_linux.go:179] Detected OS with systemd
4月 23 15:51:24 node-1 kubelet[98249]: I0423 15:51:24.089336   98249 server.go:408] Version: v1.12.1
4月 23 15:51:24 node-1 kubelet[98249]: I0423 15:51:24.089505   98249 feature_gate.go:206] feature gates: &{map[]}
4月 23 15:51:24 node-1 kubelet[98249]: I0423 15:51:24.089662   98249 feature_gate.go:206] feature gates: &{map[]}
4月 23 15:51:24 node-1 kubelet[98249]: I0423 15:51:24.089869   98249 plugins.go:99] No cloud provider specified.
4月 23 15:51:24 node-1 kubelet[98249]: I0423 15:51:24.089904   98249 server.go:524] No cloud provider specified: "" from the config file: ""
4月 23 15:51:24 node-1 kubelet[98249]: I0423 15:51:24.089979   98249 bootstrap.go:61] Using bootstrap kubeconfig to generate TLS client cert, key and kubeconfig file
4月 23 15:51:24 node-1 kubelet[98249]: I0423 15:51:24.107032   98249 bootstrap.go:92] No valid private key and/or certificate found, reusing existing private key or creating a new one

查看生成的配置文件

[root@node-1 ~]# vim /opt/kubernetes/cfg/kubelet
KUBELET_OPTS="--logtostderr=false \   日志写到 
--log-dir=/opt/kubernetes/logs \ 
--v=4 \
--address=192.168.10.13 \   节点IP
--hostname-override=192.168.10.13 \   主机名
--kubeconfig=/opt/kubernetes/cfg/kubelet.kubeconfig \  自动生成配置文件
--experimental-bootstrap-kubeconfig=/opt/kubernetes/cfg/bootstrap.kubeconfig \  考过的配置文件
--config=/opt/kubernetes/cfg/kubelet.config \ 本身信息
--cert-dir=/opt/kubernetes/ssl \   证书存放目录
--pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google-containers/pause-amd64:3.0"   第一个容器引用镜像地址

  查看进程

[root@node-1 ~]# ps -ef | grep kubelet
root     102230      1  9 16:44 ?        00:00:57 /opt/kubernetes/bin/kubelet --logtostderr=false --log-dir=/opt/kubernetes/logs --v=4 --address=192.168.10.13 --hostname-override=192.168.10.1
3 --kubeconfig=/opt/kubernetes/cfg/kubelet.kubeconfig --experimental-bootstrap-kubeconfig=/opt/kubernetes/cfg/bootstrap.kubeconfig --config=/opt/kubernetes/cfg/kubelet.config --cert-dir=/opt/kubernetes/ssl --pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google-containers/pause-amd64:3.0

  在master查看请求

[root@mast-1 k8s]# kubectl get csr
NAME                                                   AGE   REQUESTOR           CONDITION
node-csr-ZONNGVKhERtGrVy4_N5uQFlnt-JlvnAVazkN19No8M8   66m   kubelet-bootstrap   Pending

  在master同意此请求

[root@mast-1 k8s]# kubectl certificate approve node-csr-ZONNGVKhERtGrVy4_N5uQFlnt-JlvnAVazkN19No8M8
certificatesigningrequest.certificates.k8s.io/node-csr-ZONNGVKhERtGrVy4_N5uQFlnt-JlvnAVazkN19No8M8 approved
[root@mast-1 k8s]# kubectl get node
NAME            STATUS   ROLES    AGE   VERSION
192.168.10.13   Ready(准备完成状态)    <none>   62s   v1.12.1

  node的proxy的部署

[root@node-1 ~]# cat proxy.sh 
#!/bin/bash

NODE_ADDRESS=$1

cat <<EOF >/opt/kubernetes/cfg/kube-proxy

KUBE_PROXY_OPTS="--logtostderr=true \\
--v=4 \\
--hostname-override=${NODE_ADDRESS} \\
--cluster-cidr=10.0.0.0/24 \\
--proxy-mode=ipvs \\
--kubeconfig=/opt/kubernetes/cfg/kube-proxy.kubeconfig"

EOF

cat <<EOF >/usr/lib/systemd/system/kube-proxy.service
[Unit]
Description=Kubernetes Proxy
After=network.target

[Service]
EnvironmentFile=-/opt/kubernetes/cfg/kube-proxy
ExecStart=/opt/kubernetes/bin/kube-proxy \$KUBE_PROXY_OPTS
Restart=on-failure

[Install]
WantedBy=multi-user.target
EOF

systemctl daemon-reload
systemctl enable kube-proxy
systemctl restart kube-proxy
[root@node-1 ~]# bash proxy.sh 192.168.10.13
Created symlink from /etc/systemd/system/multi-user.target.wants/kube-proxy.service to /usr/lib/systemd/system/kube-proxy.service.
[root@node-1 ~]# ps -ef | grep proxy
root     103894      1  4 17:06 ?        00:00:01 /opt/kubernetes/bin/kube-proxy --logtostderr=true --v=4 --hostname-override=192.168.10.13 --cluster-cidr=10.0.0.0/24 --proxy-mode=ipvs --kubeconfig=/opt/k
ubernetes/cfg/kube-proxy.kubeconfigroot     104091   1436  0 17:06 pts/0    00:00:00 grep --color=auto proxy

  部署第二个节点的两个程序

[root@node-2 ~]# cd /opt/kubernetes/ssl/
[root@node-2 ssl]# ls
kubelet-client-2019-04-23-17-01-51.pem  kubelet-client-current.pem  kubelet.crt  kubelet.key
[root@node-2 ssl]# rm -rf *
[root@node-2 ssl]# cd ../cfg/
[root@node-2 cfg]# grep 192.168.10.13 *
flanneld:FLANNEL_OPTIONS="--etcd-endpoints=https://192.168.10.11:2379,https://192.168.10.12:2379,https://192.168.10.13:2379 -etcd-cafile=/opt/etcd/ssl/ca.pem -etcd-certfile=/opt/etcd/ssl/serv
er.pem -etcd-keyfile=/opt/etcd/ssl/server-key.pem"kubelet:--address=192.168.10.13 \
kubelet:--hostname-override=192.168.10.13 \
kubelet.config:address: 192.168.10.13
[root@node-2 cfg]# vim kubelet
KUBELET_OPTS="--logtostderr=false \
--log-dir=/opt/kubernetes/logs \
--v=4 \
--address=192.168.10.14 \
--hostname-override=192.168.10.14 \
--kubeconfig=/opt/kubernetes/cfg/kubelet.kubeconfig \
--experimental-bootstrap-kubeconfig=/opt/kubernetes/cfg/bootstrap.kubeconfig \
--config=/opt/kubernetes/cfg/kubelet.config \
--cert-dir=/opt/kubernetes/ssl \
--pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google-containers/pause-amd64:3.0"
[root@node-2 cfg]# vim kubelet.config 
kind: KubeletConfiguration
apiVersion: kubelet.config.k8s.io/v1beta1
address: 192.168.10.14
port: 10250  监控端口
cgroupDriver: cgroupfs
clusterDNS:
- 10.0.0.2
clusterDomain: cluster.local.
failSwapOn: false
[root@node-2 cfg]# vim kube-proxy
KUBE_PROXY_OPTS="--logtostderr=true \
--v=4 \
--hostname-override=192.168.10.14 \
--cluster-cidr=10.0.0.0/24 \
--proxy-mode=ipvs \
--kubeconfig=/opt/kubernetes/cfg/kube-proxy.kubeconfig"
[root@node-2 cfg]# systemctl start kubelet.service  启动kube
[root@node-2 cfg]# systemctl start kube-proxy.service 启动kube-proxy
[root@mast-1 k8s]#  kubectl certificate approve node-csr-wBzPhquG-6WwQgPANF53GZn1KAL8zgKOglbfZEncLdk
certificatesigningrequest.certificates.k8s.io/node-csr-wBzPhquG-6WwQgPANF53GZn1KAL8zgKOglbfZEncLdk approved
[root@mast-1 k8s]# kubectl get node
NAME            STATUS     ROLES    AGE   VERSION
192.168.10.13   Ready      <none>   29m   v1.12.1
192.168.10.14   NotReady   <none>   6s    v1.12.1
[root@mast-1 k8s]# kubectl get node
NAME            STATUS     ROLES    AGE   VERSION
192.168.10.13   Ready      <none>   29m   v1.12.1
192.168.10.14   NotReady   <none>   9s    v1.12.1
[root@mast-1 k8s]# kubectl get node
NAME            STATUS   ROLES    AGE   VERSION
192.168.10.13   Ready    <none>   29m   v1.12.1
192.168.10.14   Ready    <none>   11s   v1.12.1

  

  

 

posted @ 2019-04-23 17:35  烟雨楼台,行云流水  阅读(830)  评论(0编辑  收藏  举报