k8s(8):k8s安装(七)部署Master组件(apiserver+scheduler+controller-manager)

Master端需要安装的组件如下:

  1. kube-apiserver
  2. kube-scheduler
  3. kube-controller-manager

一、安装API Server服务

1. 下载Kubernetes二进制包(1.15.1)(master-1)

[root@master-1 soft]# cd /soft
[root@master-1 soft]# tar xvf kubernetes-server-linux-amd64.tar.gz 
[root@master-1 soft]# cd kubernetes/server/bin/
[root@master-1 soft]# cp kube-scheduler kube-apiserver kube-controller-manager kubectl /usr/local/bin/

#复制执行文件到其他的master节点
[root@master-1 bin]# for i in master-2 master-3;do scp /usr/local/bin/kube* $i:/usr/local/bin/;done

2. 配置Kubernetes证书

#Kubernetes各个组件之间通信需要证书,需要复制个每个master节点(master-1)
[root@master-1  soft]#mkdir -p /etc/kubernetes/{cfg,ssl}
[root@master-1  soft]#cp /root/kubernetes/*.pem /etc/kubernetes/ssl/

#复制到其他的节点
[root@master-1  soft]# for i in master-2 master-3 node-1 node-2;do ssh $i mkdir -p /etc/kubernetes/{cfg,ssl};done
[root@master-1  soft]# for i in master-2 master-3 node-1 node-2;do scp /etc/kubernetes/ssl/* $i:/etc/kubernetes/ssl/;done
[root@master-1 bin]# for i in master-2 master-3 node-1 node-2;do echo $i "---------->"; ssh $i ls /etc/kubernetes/ssl;done

3. 创建 TLS Bootstrapping Token

# TLS bootstrapping 功能就是让 kubelet 先使用一个预定的低权限用户连接到 apiserver,
#然后向 apiserver 申请证书,kubelet 的证书由 apiserver 动态签署
#Token可以是任意的包涵128 bit的字符串,可以使用安全的随机数发生器生成
[root@master-1  soft]# head -c 16 /dev/urandom | od -An -t x | tr -d ' '
f89a76f197526a0d4bc2bf9c86e871c3

4. 编辑Token 文件(master-1)

#f89a76f197526a0d4bc2bf9c86e871c3:随机字符串,自定义生成; kubelet-bootstrap:用户名; 10001:UID; system:kubelet-bootstrap:用户组
[root@master-1  soft]# vim /etc/kubernetes/cfg/token.csv
f89a76f197526a0d4bc2bf9c86e871c3,kubelet-bootstrap,10001,"system:kubelet-bootstrap"

#复制到其他的master节点
[root@master-1 bin]# for i in master-2 master-3;do scp /etc/kubernetes/cfg/token.csv $i:/etc/kubernetes/cfg/token.csv;done

5. 创建Apiserver配置文件(所有的master节点)

#配置文件内容基本相同, 如果有多个节点, 那么需要修改IP地址即可
[root@master-1  soft]# cat >/etc/kubernetes/cfg/kube-apiserver.cfg <<EOFL
KUBE_APISERVER_OPTS="--logtostderr=true \
--v=4 \
--insecure-bind-address=0.0.0.0 \
--insecure-port=8080 \
--etcd-servers=https://172.31.7.41:2379,https://172.31.7.42:2379,https://172.31.7.43:2379 \
--bind-address=0.0.0.0 \
--secure-port=6443 \
--advertise-address=0.0.0.0 \
--allow-privileged=true \
--service-cluster-ip-range=10.0.0.0/24 \
--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,NodeRestriction \
--authorization-mode=RBAC,Node \
--enable-bootstrap-token-auth \
--token-auth-file=/etc/kubernetes/cfg/token.csv \
--service-node-port-range=30000-50000 \
--tls-cert-file=/etc/kubernetes/ssl/server.pem  \
--tls-private-key-file=/etc/kubernetes/ssl/server-key.pem \
--client-ca-file=/etc/kubernetes/ssl/ca.pem \
--service-account-key-file=/etc/kubernetes/ssl/ca-key.pem \
--etcd-cafile=/etc/etcd/ssl/ca.pem \
--etcd-certfile=/etc/etcd/ssl/server.pem \
--etcd-keyfile=/etc/etcd/ssl/server-key.pem"
EOFL
------------------------------------------------------------------------------------------------------------------------------
'''
#参数说明
--logtostderr                             启用日志 
---v                                     日志等级
--etcd-servers                         etcd 集群地址 
--etcd-servers=https://172.31.7.430:2379,https://172.31.7.431:2379,https://172.31.7.432:2379
--bind-address                         监听地址 
--secure-port https                        安全端口 
--advertise-address                     集群通告地址 
--allow-privileged                      启用授权 
--service-cluster-ip-range Service             虚拟IP地址段 
--enable-admission-plugins                 准入控制模块 
--authorization-mode                      认证授权,启用RBAC授权
--enable-bootstrap-token-auth             启用TLS bootstrap功能
--token-auth-file                         token 文件 
--service-node-port-range                 Service Node类型默认分配端口范围
'''

6. 配置kube-apiserver 启动文件(所有的master节点)

[root@master-1  soft]# cat >/usr/lib/systemd/system/kube-apiserver.service<<EOFL
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes

[Service]
EnvironmentFile=/etc/kubernetes/cfg/kube-apiserver.cfg
ExecStart=/usr/local/bin/kube-apiserver \$KUBE_APISERVER_OPTS
Restart=on-failure

[Install]
WantedBy=multi-user.target
EOFL

7. 启动kube-apiserver服务

[root@master-1  soft]# service kube-apiserver start 
[root@master-1  soft]# chkconfig kube-apiserver on
[root@master-1  soft]# service kube-apiserver status
[root@master-2 ~]# service kube-apiserver status
Redirecting to /bin/systemctl status kube-apiserver.service
● kube-apiserver.service - Kubernetes API Server
   Loaded: loaded (/usr/lib/systemd/system/kube-apiserver.service; disabled; vendor preset: disabled)
   Active: active (running) since Sun 2020-04-05 15:12:09 CST; 523ms ago
     Docs: https://github.com/kubernetes/kubernetes
 Main PID: 13884 (kube-apiserver)
   CGroup: /system.slice/kube-apiserver.service
           └─13884 /usr/local/bin/kube-apiserver --logtostderr=true --v=4 --insecure-bind-address=0.0.0.0 --insecure-port=8080 --etcd-servers=https://172.31.7.41:2379,https://192.16...

Apr 05 15:12:10 master-2 kube-apiserver[13884]: I0405 15:12:09.939058   13884 flags.go:33] FLAG: --token-auth-file="/etc/kubernetes/cfg/token.csv"
Apr 05 15:12:10 master-2 kube-apiserver[13884]: I0405 15:12:09.939062   13884 flags.go:33] FLAG: --v="4"
Apr 05 15:12:10 master-2 kube-apiserver[13884]: I0405 15:12:09.939065   13884 flags.go:33] FLAG: --version="false"
Apr 05 15:12:10 master-2 kube-apiserver[13884]: I0405 15:12:09.939077   13884 flags.go:33] FLAG: --vmodule=""
Apr 05 15:12:10 master-2 kube-apiserver[13884]: I0405 15:12:09.939081   13884 flags.go:33] FLAG: --watch-cache="true"
Apr 05 15:12:10 master-2 kube-apiserver[13884]: I0405 15:12:09.939085   13884 flags.go:33] FLAG: --watch-cache-sizes="[]"
Apr 05 15:12:10 master-2 kube-apiserver[13884]: I0405 15:12:09.939103   13884 services.go:45] Setting service IP to "10.0.0.1" (read-write).
Apr 05 15:12:10 master-2 kube-apiserver[13884]: I0405 15:12:09.939120   13884 server.go:560] external host was not specified, using 172.31.7.42
Apr 05 15:12:10 master-2 kube-apiserver[13884]: I0405 15:12:09.939129   13884 server.go:603] Initializing cache sizes based on 0MB limit
Apr 05 15:12:10 master-2 kube-apiserver[13884]: I0405 15:12:09.952964   13884 server.go:147] Version: v1.15.1


#查看加密的端口是否已经启动
[root@master-2 ~]# netstat -anltup | grep 6443
tcp        0      0 172.31.7.42:6443      0.0.0.0:*               LISTEN      14061/kube-apiserve 
tcp        0      0 172.31.7.42:6443      172.31.7.42:36760     ESTABLISHED 14061/kube-apiserve 
tcp        0      0 172.31.7.42:36760     172.31.7.42:6443      ESTABLISHED 14061/kube-apiserve

#查看加密的端口是否已经启动(node节点)
[root@node-1 ~]# telnet 172.31.7.49 6443
Trying 172.31.7.49...
Connected to 172.31.7.49.
Escape character is '^]'.

二、部署kube-scheduler 服务

1. 创建kube-scheduler配置文件(所有的master节点)

[root@master-1  soft]# cat >/etc/kubernetes/cfg/kube-scheduler.cfg<<EOFL
KUBE_SCHEDULER_OPTS="--logtostderr=true --v=4 --bind-address=0.0.0.0 --master=127.0.0.1:8080 --leader-elect"
EOFL

#查看配置文件
[root@master-3 ~]# cat  /etc/kubernetes/cfg/kube-scheduler.cfg

2. 创建kube-scheduler 启动文件

#创建kube-scheduler systemd unit 文件(所有的master节点)
[root@master-1  soft]# cat >/usr/lib/systemd/system/kube-scheduler.service<<EOFL
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/kubernetes/kubernetes

[Service]
EnvironmentFile=/etc/kubernetes/cfg/kube-scheduler.cfg
ExecStart=/usr/local/bin/kube-scheduler \$KUBE_SCHEDULER_OPTS
Restart=on-failure

[Install]
WantedBy=multi-user.target
EOFL

3. 启动kube-scheduler服务(所有的master节点)

[root@master-1  soft]# service kube-scheduler restart
[root@master-1  soft]# chkconfig kube-scheduler on

4. 查看Master节点组件状态(任意一台master)

[root@master-1 bin]# kubectl get cs
NAME                 STATUS      MESSAGE               ERROR
controller-manager   Unhealthy   Get http://127.0.0.1:10252/healthz: dial tcp 127.0.0.1:10252: connect: connection refused   
scheduler            Healthy     ok                                                                                          
etcd-0               Healthy     {"health":"true"}

三、部署kube-controller-manager

1. 创建kube-controller-manager配置文件(所有节点)

[root@master-1 bin]# cat >/etc/kubernetes/cfg/kube-controller-manager.cfg<<EOFL
KUBE_CONTROLLER_MANAGER_OPTS="--logtostderr=true \
--v=4 \
--master=127.0.0.1:8080 \
--leader-elect=true \
--address=0.0.0.0 \
--service-cluster-ip-range=10.0.0.0/24 \
--cluster-name=kubernetes \
--cluster-signing-cert-file=/etc/kubernetes/ssl/ca.pem \
--cluster-signing-key-file=/etc/kubernetes/ssl/ca-key.pem  \
--root-ca-file=/etc/kubernetes/ssl/ca.pem \
--service-account-private-key-file=/etc/kubernetes/ssl/ca-key.pem"
EOFL
#参数说明
--master=127.0.0.1:8080  #指定Master地址
--leader-elect              #竞争选举机制产生一个 leader 节点,其它节点为阻塞状态。
--service-cluster-ip-range #kubernetes service 指定的IP地址范围。

2. 创建kube-controller-manager 启动文件

[root@master-1 bin]#  cat  >/usr/lib/systemd/system/kube-controller-manager.service<<EOFL
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes

[Service]
EnvironmentFile=/etc/kubernetes/cfg/kube-controller-manager.cfg
ExecStart=/usr/local/bin/kube-controller-manager \$KUBE_CONTROLLER_MANAGER_OPTS
Restart=on-failure

[Install]
WantedBy=multi-user.target
EOFL

3. 启动kube-controller-manager服务

[root@master-1 bin]#  chkconfig kube-controller-manager on
[root@master-1 bin]#  service kube-controller-manager start
[root@master-2 ~]# service kube-controller-manager status
Redirecting to /bin/systemctl status kube-controller-manager.service
● kube-controller-manager.service - Kubernetes Controller Manager
   Loaded: loaded (/usr/lib/systemd/system/kube-controller-manager.service; enabled; vendor preset: disabled)
   Active: active (running) since Sun 2020-04-05 15:52:30 CST; 1s ago
     Docs: https://github.com/kubernetes/kubernetes
 Main PID: 16979 (kube-controller)
   CGroup: /system.slice/kube-controller-manager.service
           └─16979 /usr/local/bin/kube-controller-manager --logtostderr=true --v=4 --master=127.0.0.1:8080 --leader-elect=true --address=0.0.0.0

4. 查看Master 节点组件状态

#必须要在各个节点组件正常的情况下, 才去部署Node节点组件.(master节点)
[root@master-1 bin]# kubectl get cs
NAME                 STATUS    MESSAGE             ERROR
scheduler            Healthy   ok                  
controller-manager   Healthy   ok                  
etcd-1               Healthy   {"health":"true"}   
etcd-0               Healthy   {"health":"true"}   
etcd-2               Healthy   {"health":"true"}

 

posted on 2021-05-08 20:27  torotoise512  阅读(280)  评论(0)    收藏  举报