[root@k8s-master01 pki]# for NODE in k8s-master02 k8s-master03; do for FILE in $(ls /etc/kubernetes/pki/ | grep -v etcd); do scp /etc/kubernetes/pki/${FILE} $NODE:/etc/kubernetes/pki/${FILE}; done;done
[root@k8s-master01 pki]#for NODE in k8s-master02 k8s-master03; do for FILE in admin.kubeconfig controller-manager.kubeconfig scheduler.kubeconfig; do scp /etc/kubernetes/${FILE} $NODE:/etc/kubernetes/${FILE}; done; done
#ls /etc/kubernetes/pki/ |wc -l 23
四、Kubernetes系统组件配置
4.1、etcd配置
4.1.1 、k8s-master01 配置
[root@k8s-master01 ~]# vim /etc/etcd/etcd.config.yml
[root@k8s-master01 kubernetes]# for NODE in k8s-master02 k8s-master03 k8s-node01 k8s-node02; do ssh $NODE mkdir -p /etc/kubernetes/pki /etc/etcd/ssl; for FILE in etcd-ca.pem etcd.pem etcd-key.pem; do scp /etc/etcd/ssl/$FILE $NODE:/etc/etcd/ssl/; done; for FILE in pki/ca.pem pki/ca-key.pem pki/front-proxy-ca.pem bootstrap-kubelet.kubeconfig; do scp /etc/kubernetes/$FILE $NODE:/etc/kubernetes/${FILE}; done; done
集群架构
集群系统环境
一、集群环境配置
1.1、关闭所有节点swap分区
1.2、yum环境配置
curl -o /etc/yum.repos.d/CentOS-Base.repo https://mirrors.aliyun.com/repo/Centos-7.repoyum install -y yum-utils device-mapper-persistent-data lvm2yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.reposed -i -e'/mirrors.cloud.aliyuncs.com/d'-e'/mirrors.aliyuncs.com/d'/etc/yum.repos.d/CentOS-Base.repo1.3、关闭firewalld、dnsmasq、selinux
systemctl disable --now firewalldsystemctl disable --now dnsmasqsystemctl disable --now NetworkManagersetenforce 0sed -i's#SELINUX=enforcing#SELINUX=disabled#g'/etc/sysconfig/selinuxsed -i's#SELINUX=enforcing#SELINUX=disabled#g'/etc/selinux/config1.4、安装集群环境所需要的依赖工具
yum install wget jq psmisc vim net-tools telnet yum-utils device-mapper-persistent-data lvm2 git -y1.5、集群节点时间同步
rpm -ivh http://mirrors.wlnmp.com/centos/wlnmp-release-centos.noarch.rpm yum install -y ntpdate echo 'Asia/Shanghai' > /etc/timezone ntpdate time2.aliyun.com1.6、配置集群节点host解析
#vim /etc/hosts
192.168.60.101 k8s-master01 192.168.60.102 k8s-master02 192.168.60.103 k8s-master03 192.168.60.236 k8s-master-lb 192.168.60.104 k8s-node01 192.168.60.105 k8s-node021.7、设置时间定期执行任务
# crontab -l*/1 * * * * ntpdate time2.aliyun.com1.7、节点配置limit
#cat >>/etc/security/limits.conf <<EOF* soft nofile 655360* hard nofile 131072* soft nproc 655350* hard nproc 655350* soft memlock unlimited* hard memlock unlimitedEOF1.8、设置ssh免密
# ssh-keygen -t rsa
# for i in k8s-master01 k8s-master02 k8s-master03 k8s-node01 k8s-node02;do ssh-copy-id -i .ssh/id_rsa.pub $i ;done
#Master01下载安装文件
链接: https://pan.baidu.com/s/1mvmTi1fPoWRkPDbPAdiwow 密码: k8sy #从网盘中下载证书以及k8s集群所依赖的yaml文件
本地连接:https://files.cnblogs.com/files/blogs/776379/k8s-ha-install.tar.gz?t=1670295998
1.8、升级节点系统版本
yum update -y --exclude=kernel*
#升级Centos内核版本
CentOS7 需要升级内核至4.18+,本地升级的版本为4.19
#wget http://193.49.22.109/elrepo/kernel/el7/x86_64/RPMS/kernel-ml-devel-4.19.12-1.el7.elrepo.x86_64.rpm #百度网盘:https://pan.baidu.com/s/1AwzqvNQ87ODLvYhsVJmMLQ 提取码:k8sy
#wget http://193.49.22.109/elrepo/kernel/el7/x86_64/RPMS/kernel-ml-4.19.12-1.el7.elrepo.x86_64.rpm #百度网盘:https://pan.baidu.com/s/1g99OXQ_lRTV5wcw046C4Qw 提取码:k8sy
通过for循环语句批量将下载到k8s-master01的内核文件传送到其他k8s所有集群节点上
#for i in k8s-master02 k8s-master03 k8s-node01 k8s-node02;do scp kernel-ml-4.19.12-1.el7.elrepo.x86_64.rpm kernel-ml-devel-4.19.12-1.el7.elrepo.x86_64.rpm $i:/root/ ; done
所有节点安装内核版本
#yum localinstall -y kernel-ml*
#grub2-set-default 0 && grub2-mkconfig -o /etc/grub2.cfg #更改内核启动顺序
#grubby --args="user_namespace.enable=1" --update-kernel="$(grubby --default-kernel)"
检查默认内核是不是4.19
# grubby --default-kernel /boot/vmlinuz-4.19.12-1.el7.elrepo.x86_64
确定版本升级完毕,重启k8s集群所有节点,使其生效,然后检查内核文件是否是4.19或者4.19+
# uname -a
Linux k8s-master01 4.19.12-1.el7.elrepo.x86_64 #1 SMP Fri Dec 21 11:06:36 EST 2018 x86_64 x86_64 x86_64 GNU/Linux
2.1、部署ipvsadmin
所有节点配置ipvs模块
在内核4.19+版本nf_conntrack_ipv4已经改为nf_conntrack, 4.18以下使用nf_conntrack_ipv4即可:
#yum install ipvsadm ipset sysstat conntrack libseccomp -y
# cat /etc/modules-load.d/ipvs.conf
ip_vsip_vs_lcip_vs_wlcip_vs_rrip_vs_wrrip_vs_lblcip_vs_lblcrip_vs_dhip_vs_ship_vs_foip_vs_nqip_vs_sedip_vs_ftpip_vs_shnf_conntrackip_tablesip_setxt_setipt_setipt_rpfilteript_REJECTipip#systemctl enable --now systemd-modules-load.service
#所有节点配置内核参数 开启K8s集群中必须的内核参数
#cat <<EOF > /etc/sysctl.d/k8s.conf
#cat <<EOF > /etc/sysctl.d/k8s.confnet.ipv4.ip_forward = 1net.bridge.bridge-nf-call-iptables = 1net.bridge.bridge-nf-call-ip6tables = 1fs.may_detach_mounts = 1vm.overcommit_memory=1vm.panic_on_oom=0fs.inotify.max_user_watches=89100fs.file-max=52706963fs.nr_open=52706963net.netfilter.nf_conntrack_max=2310720net.ipv4.tcp_keepalive_time = 600net.ipv4.tcp_keepalive_probes = 3net.ipv4.tcp_keepalive_intvl =15net.ipv4.tcp_max_tw_buckets = 36000net.ipv4.tcp_tw_reuse = 1net.ipv4.tcp_max_orphans = 327680net.ipv4.tcp_orphan_retries = 3net.ipv4.tcp_syncookies = 1net.ipv4.tcp_max_syn_backlog = 16384net.ipv4.ip_conntrack_max = 65536net.ipv4.tcp_max_syn_backlog = 16384net.ipv4.tcp_timestamps = 0net.core.somaxconn = 16384#EOF#sysctl --system
#reboot
#lsmod | grep --color=auto -e ip_vs -e nf_conntrack. #列出已加载到系统的模块
nf_conntrack 143360 4 xt_conntrack,nf_nat,nf_nat_ipv6,nf_nat_ipv4 nf_defrag_ipv6 20480 1 nf_conntrack nf_defrag_ipv4 16384 1 nf_conntrack libcrc32c 16384 3 nf_conntrack,nf_nat,xfs二、基本组件部署
2.1、docker部署
Ps:这里我们把集群中所有的节点都作为node节点复用,故均需要部署docker环境
#yum install -y docker-ce-19.03.*
#mkdir -p /etc/docker
#cat > /etc/docker/daemon.json <<EOF{"registry-mirrors": ["https://registry.docker-cn.com","http://hub-mirror.c.163.com","https://docker.mirrors.ustc.edu.cn"],"exec-opts": ["native.cgroupdriver=systemd"]}#EOF#systemctl daemon-reload && systemctl enable --now docker
2.2、kubernetes组件以及etcd部署
[root@k8s-master01 ~]#wget https://github.com/etcd-io/etcd/releases/download/v3.4.12/etcd-v3.4.12-linux-amd64.tar.gz
#将etcd、etcdctl二进制文件传送到/usr/local/bin目录中
[root@k8s-master01 ~]#tar -zxvf etcd-v3.4.12-linux-amd64.tar.gz --strip-components=1 -C /usr/local/bin etcd-v3.4.12-linux-amd64/etcd{,ctl}
[root@k8s-master01 ~]#wget https://dl.k8s.io/v1.20.0/kubernetes-server-linux-amd64.tar.gz
将kubernetes二进制文件(kubelet,kubectl,kube-apiserver,kube-controller-manager,kube-scheduler,kube-proxy)解压到指定的/usr/local/bin目录中;
这里的--strip-components=3表示去除3级目录结构,只将需要的二进制文件拷贝到/usr/local/bin/目录中
[root@k8s-master01 ~]#tar -xf kubernetes-server-linux-amd64.tar.gz --strip-components=3 -C /usr/local/bin kubernetes/server/bin/kube{let,ctl,-apiserver,-controller-manager,-scheduler,-proxy}
检查kubelet以及etcd版本信息
#定义环境变量,将master01节点上解压出来的二进制文件拷贝到另外两个master节点
[root@k8s-master01 ~]#MasterNodes='k8s-master02 k8s-master03'
[root@k8s-master01 ~]#for NODE in $MasterNodes; do echo $NODE; scp /usr/local/bin/kube{let,ctl,-apiserver,-controller-manager,-scheduler,-proxy} $NODE:/usr/local/bin/; scp /usr/local/bin/etcd* $NODE:/usr/local/bin/; done
#而对于node节点,只需要将master01节点生成的kubelet和kubelet-proxy组件拷贝到node节点即可
[root@k8s-master01 ~]#WorkNodes='k8s-node01 k8s-node02'
[root@k8s-master01 ~]#for NODE in $WorkNodes;do scp /usr/local/bin/kube{let,-proxy} $NODE:/usr/local/bin/ ; done
目前为止,k8s集群环境所需要的二进制文件均已分配到各个节点
三、生成证书
3.1、安装证书工具
所有节点创建/opt/cni/bin目录
#mkdir -p /opt/cni/bin
安装cfssl证书生成工具(Ps:在这只需要在master01节点上安装此工具即可,生成完毕之后通过for循环批量拷贝到各个集群节点目录)
3.2、生成etcd证书
首先在所有master节点创建etcd证书
#mkdir /etc/etcd/ssl -p
在kubernetes集群所有节点创建kubernetes相关pki证书目录
#mkdir -p /etc/kubernetes/pki
Ps: https://pan.baidu.com/s/1zdh46AnHrk4NabaPClwn8A 密码: 1u7n #从网盘中下载的k8s-ha-install.tar.gz 压缩文件
#在Master01节点上生成etcd证书,生成证书的CSR文件,主要包含证书签名请求文件,配置了一些域名,公司,单位等信息;
[root@k8s-master01 pki]# cfssl gencert -initca etcd-ca-csr.json | cfssljson -bare /etc/etcd/ssl/etcd-ca #生成etcd CA证书和CA证书的key
[root@k8s-master01 pki]# ls /etc/etcd/ssl/
etcd-ca.csr etcd-ca-key.pem etcd-ca.pem
#生成etcd客户端证书
[root@k8s-master01 pki]# cfssl gencert \
-ca=/etc/etcd/ssl/etcd-ca.pem \
-ca-key=/etc/etcd/ssl/etcd-ca-key.pem \
-config=ca-config.json \
-hostname=127.0.0.1,k8s-master01,k8s-master02,k8s-master03,192.168.60.101,192.168.60.102,192.168.60.103 \
-profile=kubernetes \
etcd-csr.json | cfssljson -bare /etc/etcd/ssl/etcd
#将etcd相关的证书文件复制到其他Master节点
#MasterNodes='k8s-master02 k8s-master03'
#WorkNodes='k8s-node01 k8s-node02'
[root@k8s-master01 pki]# for NODE in $MasterNodes;do ssh $NODE "mkdir -p /etc/etcd/ssl";for FILE in etcd-ca-key.pem etcd-ca.pem etcd-key.pem etcd.pem; do scp /etc/etcd/ssl/${FILE} $NODE:/etc/etcd/ssl/${FILE};done; done
3.3、k8s组件证书
#Master01生成kubernetes证书;
[root@k8s-master01 ~]# cd /root/k8s-ha-install/pki/
[root@k8s-master01 pki]# cfssl gencert -initca ca-csr.json | cfssljson -bare /etc/kubernetes/pki/ca
3.3.1、生成kubernetes api server证书
#10.96.0.1是k8s-service的网段,如果需要更改k8s service网段,那么就需要更改10.96.0.1,如果不是高可用集群,那么192.168.60.236为master01的vip
[root@k8s-master01 pki]#cfssl gencert -ca=/etc/kubernetes/pki/ca.pem -ca-key=/etc/kubernetes/pki/ca-key.pem -config=ca-config.json -hostname=10.96.0.1,192.168.60.236,127.0.0.1,kubernetes,kubernetes.default,kubernetes.default.svc,kubernetes.default.svc.cluster,kubernetes.default.svc.cluster.local,192.168.60.101,192.168.60.102,192.168.0.103 -profile=kubernetes apiserver-csr.json | cfssljson -bare /etc/kubernetes/pki/apiserver
3.3.2、生成apiserver的聚合证书
[root@k8s-master01 pki]# cfssl gencert -initca front-proxy-ca-csr.json | cfssljson -bare /etc/kubernetes/pki/front-proxy-ca. #先生成apiserver聚合证书的CA文件
[root@k8s-master01 pki]# cfssl gencert -ca=/etc/kubernetes/pki/front-proxy-ca.pem -ca-key=/etc/kubernetes/pki/front-proxy-ca-key.pem -config=ca-config.json -profile=kubernetes front-proxy-client-csr.json | cfssljson -bare /etc/kubernetes/pki/front-proxy-client #生成apiserver客户端证书
3.3.3、生成controller-manage客户端证书
#cfssl gencert -ca=/etc/kubernetes/pki/ca.pem -ca-key=/etc/kubernetes/pki/ca-key.pem -config=ca-config.json -profile=kubernetes manager-csr.json | cfssljson -bare /etc/kubernetes/pki/controller-manager
#设置一个集群项,主要用于配置多个集群[root@k8s-master01 pki]# kubectl configset-cluster kubernetes \--certificate-authority=/etc/kubernetes/pki/ca.pem \--embed-certs=true\--server=https://192.168.60.236:8443 \--kubeconfig=/etc/kubernetes/controller-manager.kubeconfigCluster"kubernetes"set.#设置一个用户项[root@k8s-master01 pki]# kubectl configset-credentials system:kube-controller-manager \--client-certificate=/etc/kubernetes/pki/controller-manager.pem \--client-key=/etc/kubernetes/pki/controller-manager-key.pem \--embed-certs=true\--kubeconfig=/etc/kubernetes/controller-manager.kubeconfigUser"system:kube-controller-manager"set.#设置一个环境项,一个上下文,通过kube-controller-manager用户名连接kubernetes集群[root@k8s-master01 pki]# kubectl configset-context system:kube-controller-manager@kubernetes \--cluster=kubernetes \--user=system:kube-controller-manager \--kubeconfig=/etc/kubernetes/controller-manager.kubeconfigContext"system:kube-controller-manager@kubernetes"created.#使用某个环境当做默认的环境[root@k8s-master01 pki]# kubectl config use-context system:kube-controller-manager@kubernetes \--kubeconfig=/etc/kubernetes/controller-manager.kubeconfigSwitched to context"system:kube-controller-manager@kubernetes".3.3.4、生成scheduler证书文件
[root@k8s-master01 ~]# cd /root/k8s-ha-install/pki/<br>root@k8s-master01 pki]# cfssl gencert \-ca=/etc/kubernetes/pki/ca.pem \-ca-key=/etc/kubernetes/pki/ca-key.pem \-config=ca-config.json \-profile=kubernetes \scheduler-csr.json | cfssljson -bare /etc/kubernetes/pki/scheduler#设置一个集群项[root@k8s-master01 pki]# kubectl configset-cluster kubernetes \--certificate-authority=/etc/kubernetes/pki/ca.pem \--embed-certs=true\--server=https://192.168.60.236:8443 \--kubeconfig=/etc/kubernetes/scheduler.kubeconfig[root@k8s-master01 pki]# kubectl configset-credentials system:kube-scheduler \--client-certificate=/etc/kubernetes/pki/scheduler.pem \--client-key=/etc/kubernetes/pki/scheduler-key.pem \--embed-certs=true\--kubeconfig=/etc/kubernetes/scheduler.kubeconfig[root@k8s-master01 pki]# kubectl configset-context system:kube-scheduler@kubernetes \--cluster=kubernetes \--user=system:kube-scheduler \--kubeconfig=/etc/kubernetes/scheduler.kubeconfig[root@k8s-master01 pki]# kubectl config use-context system:kube-scheduler@kubernetes \--kubeconfig=/etc/kubernetes/scheduler.kubeconfig#生成admin证书,主要用于管理kubernetes集群[root@k8s-master01 pki]# cfssl gencert \-ca=/etc/kubernetes/pki/ca.pem \-ca-key=/etc/kubernetes/pki/ca-key.pem \-config=ca-config.json \-profile=kubernetes \admin-csr.json | cfssljson -bare /etc/kubernetes/pki/admin# kubectl config set-cluster kubernetes --certificate-authority=/etc/kubernetes/pki/ca.pem --embed-certs=true --server=https://192.168.60.236:8443 --kubeconfig=/etc/kubernetes/admin.kubeconfig
# kubectl config set-credentials kubernetes-admin --client-certificate=/etc/kubernetes/pki/admin.pem --client-key=/etc/kubernetes/pki/admin-key.pem --embed-certs=true --kubeconfig=/etc/kubernetes/admin.kubeconfig
# kubectl config set-context kubernetes-admin@kubernetes --cluster=kubernetes --user=kubernetes-admin --kubeconfig=/etc/kubernetes/admin.kubeconfig
# kubectl config use-context kubernetes-admin@kubernetes --kubeconfig=/etc/kubernetes/admin.kubeconfig
3.3.5、创建ServiceAccount key会生成一个与之绑定的secret,那么secret会产生一个token;
[root@k8s-master01 pki]# openssl genrsa -out/etc/kubernetes/pki/sa.key 2048Generating RSAprivatekey, 2048 bitlongmodulus.................................................................................................................................+++......................................+++eis65537 (0x10001)[root@k8s-master01 pki]# openssl rsa -in/etc/kubernetes/pki/sa.key -pubout -out/etc/kubernetes/pki/sa.pubwriting RSA key3.3.6、生成的kubernetes的证书发送到其他master节点上
[root@k8s-master01 pki]# for NODE in k8s-master02 k8s-master03; do for FILE in $(ls /etc/kubernetes/pki/ | grep -v etcd); do scp /etc/kubernetes/pki/${FILE} $NODE:/etc/kubernetes/pki/${FILE}; done;done
[root@k8s-master01 pki]#for NODE in k8s-master02 k8s-master03; do for FILE in admin.kubeconfig controller-manager.kubeconfig scheduler.kubeconfig; do scp /etc/kubernetes/${FILE} $NODE:/etc/kubernetes/${FILE}; done; done
#ls /etc/kubernetes/pki/ |wc -l
23
四、Kubernetes系统组件配置
4.1、etcd配置
4.1.1 、k8s-master01 配置
[root@k8s-master01 ~]# vim /etc/etcd/etcd.config.yml
name:'k8s-master01'data-dir: /var/lib/etcdwal-dir: /var/lib/etcd/walsnapshot-count: 5000heartbeat-interval: 100election-timeout: 1000quota-backend-bytes: 0listen-peer-urls:'https://192.168.60.101:2380'listen-client-urls:'https://192.168.60.101:2379,http://127.0.0.1:2379'max-snapshots: 3max-wals: 5cors:initial-advertise-peer-urls:'https://192.168.60.101:2380'advertise-client-urls:'https://192.168.60.101:2379'discovery:discovery-fallback:'proxy'discovery-proxy:discovery-srv:initial-cluster:'k8s-master01=https://192.168.60.101:2380,k8s-master02=https://192.168.60.102:2380,k8s-master03=https://192.168.60.103:2380'initial-cluster-token:'etcd-k8s-cluster'initial-cluster-state:'new'strict-reconfig-check:falseenable-v2:trueenable-pprof:trueproxy:'off'proxy-failure-wait: 5000proxy-refresh-interval: 30000proxy-dial-timeout: 1000proxy-write-timeout: 5000proxy-read-timeout: 0client-transport-security:cert-file:'/etc/kubernetes/pki/etcd/etcd.pem'key-file:'/etc/kubernetes/pki/etcd/etcd-key.pem'client-cert-auth:truetrusted-ca-file:'/etc/kubernetes/pki/etcd/etcd-ca.pem'auto-tls:truepeer-transport-security:cert-file:'/etc/kubernetes/pki/etcd/etcd.pem'key-file:'/etc/kubernetes/pki/etcd/etcd-key.pem'peer-client-cert-auth:truetrusted-ca-file:'/etc/kubernetes/pki/etcd/etcd-ca.pem'auto-tls:truedebug:falselog-package-levels:log-outputs: [default]force-new-cluster:false在master01节点创建etcd service文件,用于设置开机自启
[root@k8s-master01 ~]# vim /usr/lib/systemd/system/etcd.service
[Unit]Description=Etcd ServiceDocumentation=https://coreos.com/etcd/docs/latest/After=network.target[Service]Type=notifyExecStart=/usr/local/bin/etcd --config-file=/etc/etcd/etcd.config.ymlRestart=on-failureRestartSec=10LimitNOFILE=65536[Install]WantedBy=multi-user.targetAlias=etcd1.service[root@k8s-master01 ~]#mkdir /etc/kubernetes/pki/etcd
[root@k8s-master01 ~]#ln -s /etc/etcd/ssl/* /etc/kubernetes/pki/etcd/
[root@k8s-master01 ~]#systemctl daemon-reload
[root@k8s-master01 ~]#systemctl enable --now etcd
4.1.2、k8s-master02配置
[root@k8s-master02 ~]# vim /etc/etcd/etcd.config.yml
name:'k8s-master02'data-dir: /var/lib/etcdwal-dir: /var/lib/etcd/walsnapshot-count: 5000heartbeat-interval: 100election-timeout: 1000quota-backend-bytes: 0listen-peer-urls:'https://192.168.60.102:2380'listen-client-urls:'https://192.168.60.102:2379,http://127.0.0.1:2379'max-snapshots: 3max-wals: 5cors:initial-advertise-peer-urls:'https://192.168.60.102:2380'advertise-client-urls:'https://192.168.60.102:2379'discovery:discovery-fallback:'proxy'discovery-proxy:discovery-srv:initial-cluster:'k8s-master01=https://192.168.60.101:2380,k8s-master02=https://192.168.60.102:2380,k8s-master03=https://192.168.60.103:2380'initial-cluster-token:'etcd-k8s-cluster'initial-cluster-state:'new'strict-reconfig-check:falseenable-v2:trueenable-pprof:trueproxy:'off'proxy-failure-wait: 5000proxy-refresh-interval: 30000proxy-dial-timeout: 1000proxy-write-timeout: 5000proxy-read-timeout: 0client-transport-security:cert-file:'/etc/kubernetes/pki/etcd/etcd.pem'key-file:'/etc/kubernetes/pki/etcd/etcd-key.pem'client-cert-auth:truetrusted-ca-file:'/etc/kubernetes/pki/etcd/etcd-ca.pem'auto-tls:truepeer-transport-security:cert-file:'/etc/kubernetes/pki/etcd/etcd.pem'key-file:'/etc/kubernetes/pki/etcd/etcd-key.pem'peer-client-cert-auth:truetrusted-ca-file:'/etc/kubernetes/pki/etcd/etcd-ca.pem'auto-tls:truedebug:falselog-package-levels:log-outputs: [default]force-new-cluster:false[root@k8s-master02 ~]# vim /usr/lib/systemd/system/etcd.service
[Unit]Description=Etcd ServiceDocumentation=https://coreos.com/etcd/docs/latest/After=network.target[Service]Type=notifyExecStart=/usr/local/bin/etcd --config-file=/etc/etcd/etcd.config.ymlRestart=on-failureRestartSec=10LimitNOFILE=65536[Install]WantedBy=multi-user.targetAlias=etcd2.service[root@k8s-master02 ~]#mkdir /etc/kubernetes/pki/etcd
[root@k8s-master02 ~]#ln -s /etc/etcd/ssl/* /etc/kubernetes/pki/etcd/
[root@k8s-master02 ~]#systemctl daemon-reload
[root@k8s-master02 ~]#systemctl enable --now etcd
4.1.2、k8s-master03
[root@k8s-master03 ~]# vim /etc/etcd/etcd.config.yml #修改配置文件
name:'k8s-master03'data-dir: /var/lib/etcdwal-dir: /var/lib/etcd/walsnapshot-count: 5000heartbeat-interval: 100election-timeout: 1000quota-backend-bytes: 0listen-peer-urls:'https://192.168.60.103:2380'listen-client-urls:'https://192.168.60.103:2379,http://127.0.0.1:2379'max-snapshots: 3max-wals: 5cors:initial-advertise-peer-urls:'https://192.168.60.103:2380'advertise-client-urls:'https://192.168.60.103:2379'discovery:discovery-fallback:'proxy'discovery-proxy:discovery-srv:initial-cluster:'k8s-master01=https://192.168.60.101:2380,k8s-master02=https://192.168.60.102:2380,k8s-master03=https://192.168.60.103:2380'initial-cluster-token:'etcd-k8s-cluster'initial-cluster-state:'new'strict-reconfig-check:falseenable-v2:trueenable-pprof:trueproxy:'off'proxy-failure-wait: 5000proxy-refresh-interval: 30000proxy-dial-timeout: 1000proxy-write-timeout: 5000proxy-read-timeout: 0client-transport-security:cert-file:'/etc/kubernetes/pki/etcd/etcd.pem'key-file:'/etc/kubernetes/pki/etcd/etcd-key.pem'client-cert-auth:truetrusted-ca-file:'/etc/kubernetes/pki/etcd/etcd-ca.pem'auto-tls:truepeer-transport-security:cert-file:'/etc/kubernetes/pki/etcd/etcd.pem'key-file:'/etc/kubernetes/pki/etcd/etcd-key.pem'peer-client-cert-auth:truetrusted-ca-file:'/etc/kubernetes/pki/etcd/etcd-ca.pem'auto-tls:truedebug:falselog-package-levels:log-outputs: [default]force-new-cluster:false[root@k8s-master03 ~]# vim /usr/lib/systemd/system/etcd.service
[Unit]Description=Etcd ServiceDocumentation=https://coreos.com/etcd/docs/latest/After=network.target[Service]Type=notifyExecStart=/usr/local/bin/etcd --config-file=/etc/etcd/etcd.config.ymlRestart=on-failureRestartSec=10LimitNOFILE=65536[Install]WantedBy=multi-user.targetAlias=etcd3.service[root@k8s-master03 ~]#mkdir /etc/kubernetes/pki/etcd
[root@k8s-master03 ~]#ln -s /etc/etcd/ssl/* /etc/kubernetes/pki/etcd/
[root@k8s-master03 ~]#systemctl daemon-reload
[root@k8s-master03 ~]#systemctl enable --now etcd
验证etcd集群是否正常运行
查看日志,确保etcd没有异常日志信息输出
#journalctl -f -u etcd 如果部署完etcd集群,日志信息出现“connect: connection refused”等异常信息,请检查节点之间的防火墙、selinux是否正常关闭
-- Logs begin at 一 2021-03-29 18:23:40 CST. --3月 30 14:29:40 k8s-master01 etcd[14611]: established a TCP streaming connection with peer f1b9c306df35ee70 (stream MsgApp v2 reader)3月 30 14:29:40 k8s-master01 etcd[14611]: established a TCP streaming connection with peer f1b9c306df35ee70 (stream Message reader)3月 30 14:29:40 k8s-master01 etcd[14611]: established a TCP streaming connection with peer f1b9c306df35ee70 (stream MsgApp v2 writer)3月 30 14:29:40 k8s-master01 etcd[14611]: established a TCP streaming connection with peer f1b9c306df35ee70 (stream Message writer)3月 30 14:29:40 k8s-master01 etcd[14611]: f71c0fef22fcf333 initialized peer connection; fast-forwarding 8 ticks (election ticks 10) with 2 active peer(s)3月 30 14:29:41 k8s-master01 etcd[14611]: health checkforpeer f1b9c306df35ee70 could not connect: dial tcp 192.168.60.103:2380: connect: connection refused3月 30 14:29:41 k8s-master01 etcd[14611]: health checkforpeer f1b9c306df35ee70 could not connect: dial tcp 192.168.60.103:2380: connect: connection refused[root@k8s-master01 ~]# export ETCDCTL_API=3 #切换etcd API为3版本
[root@k8s-master01 ~]# etcdctl --endpoints="192.168.60.103:2379,192.168.60.102:2379,192.168.60.101:2379" --cacert=/etc/kubernetes/pki/etcd/etcd-ca.pem --cert=/etc/kubernetes/pki/etcd/etcd.pem --key=/etc/kubernetes/pki/etcd/etcd-key.pem endpoint status --write-out=table
etcd集群成功运行,并且192.168.60.101为etcd集群的leader节点;
五、高可用集群配置
主要为三台k8s-master节点配置一个高可用,采用haproxy+keepalived形式,实现高可用架构
如果该架构师在云上部署可以忽略此步骤,可以直接使用云上的LB,比如阿里云的slb、腾挪讯云elb等
公有云要用自带的负载均衡、用来替代haproxy和keepalived,因为公有云大部分都是不支持keepalived的,另外如果使用阿里云的话,kubectl控制端不能放在master节点,因为阿里云的slb有回环的问题,也就是slb代理的服务器不能返乡访问SLB,在这里elb修复了这个问题,故推荐使用腾讯云
Ps:所有Master节点部署keepalived和haproxy软件
#yum install -y keepalived haproxy
5.1、所有节点配置haproxy服务
#vim /etc/haproxy/haproxy.cfg
globalmaxconn 2000ulimit-n 16384log 127.0.0.1 local0 errstats timeout 30sdefaultslog globalmode httpoption httplogtimeout connect 5000timeout client 50000timeout server 50000timeout http-request 15stimeout http-keep-alive 15sfrontend k8s-masterbind 0.0.0.0:8443bind 127.0.0.1:8443mode tcpoption tcplogtcp-request inspect-delay 5sdefault_backend k8s-masterbackend k8s-mastermode tcpoption tcplogoption tcp-checkbalance roundrobindefault-server inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 250 maxqueue 256 weight 100server k8s-master01 192.168.60.101:6443 checkserver k8s-master02 192.168.60.102:6443 checkserver k8s-master03 192.168.60.103:6443 check# systemctl enable --now haproxy #为每个master节点上的haproxy服务设置开机自启动
修改三台k8s-master节点keepliaved配置文件
5.2、k8s-master01配置keepalived
[root@k8s-master01 ~]# vim /etc/keepalived/keepalived.conf
Configuration Fileforkeepalivedglobal_defs {router_id LVS_DEVEL}vrrp_script chk_apiserver {script"/etc/keepalived/check_apiserver.sh"interval 5weight -5fall 2rise 1}vrrp_instance VI_1 {state MASTERinterfaceeth0mcast_src_ip 192.168.60.101virtual_router_id 51priority 101nopreemptadvert_int 2authentication {auth_type PASSauth_pass K8SHA_KA_AUTH}virtual_ipaddress {192.168.60.236}track_script {chk_apiserver} }5.2、k8s-master02配置keepalived
[root@k8s-master02 ~]# vim /etc/keepalived/keepalived.conf
! Configuration Fileforkeepalivedglobal_defs {router_id LVS_DEVEL}vrrp_script chk_apiserver {script"/etc/keepalived/check_apiserver.sh"interval 5weight -5fall 2rise 1}vrrp_instance VI_1 {state BACKUPinterfaceeth0mcast_src_ip 192.168.60.102virtual_router_id 51priority 100nopreemptadvert_int 2authentication {auth_type PASSauth_pass K8SHA_KA_AUTH}virtual_ipaddress {192.168.60.236}track_script {chk_apiserver} }5.3、k8s-master03配置keepalived
[root@k8s-master03 ~]# vim /etc/keepalived/keepalived.conf
! Configuration Fileforkeepalivedglobal_defs {router_id LVS_DEVEL}vrrp_script chk_apiserver {script"/etc/keepalived/check_apiserver.sh"interval 5weight -5fall 2rise 1}vrrp_instance VI_1 {state BACKUPinterfaceeth0mcast_src_ip 192.168.60.103virtual_router_id 51priority 100nopreemptadvert_int 2authentication {auth_type PASSauth_pass K8SHA_KA_AUTH}virtual_ipaddress {192.168.60.236}track_script {chk_apiserver} }5.4、在每个k8s-master节点编写健康检查脚本;
# vim /etc/keepalived/check_apiserver.sh
#!/bin/basherr=0forkin$(seq 1 3)docheck_code=$(pgrep haproxy)if[[ $check_code ==""]]; thenerr=$(expr $err + 1)sleep 1continueelseerr=0breakfidoneif[[ $err !="0"]]; thenecho"systemctl stop keepalived"/usr/bin/systemctl stop keepalivedexit 1elseexit 0fi#chmod +x /etc/keepalived/check_apiserver.sh
#systemctl enable --now keepalived
验证高可用集群是否正常,在任意节点通过ping或者telnet进行测试即可如
六、kubernetes组件
#mkdir -p /etc/kubernetes/manifests/ /etc/systemd/system/kubelet.service.d /var/lib/kubelet /var/log/kubernetes #为所有节点创建目录,用于后续环境部署
6.1、部署kubernetes-apiserver组件
6.1.1、Master01配置
[root@k8s-master01 ~]# vim /usr/lib/systemd/system/kube-apiserver.service
[Unit]Description=Kubernetes API ServerDocumentation=https://github.com/kubernetes/kubernetesAfter=network.target[Service]ExecStart=/usr/local/bin/kube-apiserver \--v=2 \--logtostderr=true\--allow-privileged=true\--bind-address=0.0.0.0 \--secure-port=6443 \--insecure-port=0 \--advertise-address=192.168.60.101 \--service-cluster-ip-range=10.96.0.0/12 \--service-node-port-range=30000-32767 \--etcd-servers=https://192.168.60.101:2379,https://192.168.60.102:2379,https://192.168.60.103:2379 \--etcd-cafile=/etc/etcd/ssl/etcd-ca.pem \--etcd-certfile=/etc/etcd/ssl/etcd.pem \--etcd-keyfile=/etc/etcd/ssl/etcd-key.pem \--client-ca-file=/etc/kubernetes/pki/ca.pem \--tls-cert-file=/etc/kubernetes/pki/apiserver.pem \--tls-private-key-file=/etc/kubernetes/pki/apiserver-key.pem \--kubelet-client-certificate=/etc/kubernetes/pki/apiserver.pem \--kubelet-client-key=/etc/kubernetes/pki/apiserver-key.pem \--service-account-key-file=/etc/kubernetes/pki/sa.pub \--service-account-signing-key-file=/etc/kubernetes/pki/sa.key \--service-account-issuer=https://kubernetes.default.svc.cluster.local \--kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname \--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota \--authorization-mode=Node,RBAC \--enable-bootstrap-token-auth=true\--requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem \--proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.pem \--proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client-key.pem \--requestheader-allowed-names=aggregator \--requestheader-group-headers=X-Remote-Group \--requestheader-extra-headers-prefix=X-Remote-Extra- \--requestheader-username-headers=X-Remote-User# --token-auth-file=/etc/kubernetes/token.csvRestart=on-failureRestartSec=10sLimitNOFILE=65535[Install]WantedBy=multi-user.target[root@k8s-master01 ~]# systemctl enable --now kube-apiserver
6.1.2、Master02配置
[root@k8s-master02 ~]# vim /usr/lib/systemd/system/kube-apiserver.service
[Unit]Description=Kubernetes API ServerDocumentation=https://github.com/kubernetes/kubernetesAfter=network.target[Service]ExecStart=/usr/local/bin/kube-apiserver \--v=2 \--logtostderr=true\--allow-privileged=true\--bind-address=0.0.0.0 \--secure-port=6443 \--insecure-port=0 \--advertise-address=192.168.60.102 \--service-cluster-ip-range=10.96.0.0/12 \--service-node-port-range=30000-32767 \--etcd-servers=https://192.168.60.101:2379,https://192.168.60.102:2379,https://192.168.60.103:2379 \--etcd-cafile=/etc/etcd/ssl/etcd-ca.pem \--etcd-certfile=/etc/etcd/ssl/etcd.pem \--etcd-keyfile=/etc/etcd/ssl/etcd-key.pem \--client-ca-file=/etc/kubernetes/pki/ca.pem \--tls-cert-file=/etc/kubernetes/pki/apiserver.pem \--tls-private-key-file=/etc/kubernetes/pki/apiserver-key.pem \--kubelet-client-certificate=/etc/kubernetes/pki/apiserver.pem \--kubelet-client-key=/etc/kubernetes/pki/apiserver-key.pem \--service-account-key-file=/etc/kubernetes/pki/sa.pub \--service-account-signing-key-file=/etc/kubernetes/pki/sa.key \--service-account-issuer=https://kubernetes.default.svc.cluster.local \--kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname \--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota \--authorization-mode=Node,RBAC \--enable-bootstrap-token-auth=true\--requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem \--proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.pem \--proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client-key.pem \--requestheader-allowed-names=aggregator \--requestheader-group-headers=X-Remote-Group \--requestheader-extra-headers-prefix=X-Remote-Extra- \--requestheader-username-headers=X-Remote-User# --token-auth-file=/etc/kubernetes/token.csvRestart=on-failureRestartSec=10sLimitNOFILE=65535[Install]WantedBy=multi-user.target[root@k8s-master02 ~]# systemctl enable --now kube-apiserver
6.1.3、Master03配置
[root@k8s-master03 ~]# vim /usr/lib/systemd/system/kube-apiserver.service
[Unit]Description=Kubernetes API ServerDocumentation=https://github.com/kubernetes/kubernetesAfter=network.target[Service]ExecStart=/usr/local/bin/kube-apiserver \--v=2 \--logtostderr=true\--allow-privileged=true\--bind-address=0.0.0.0 \--secure-port=6443 \--insecure-port=0 \--advertise-address=192.168.60.103 \--service-cluster-ip-range=10.96.0.0/12 \--service-node-port-range=30000-32767 \--etcd-servers=https://192.168.60.101:2379,https://192.168.60.102:2379,https://192.168.60.103:2379 \--etcd-cafile=/etc/etcd/ssl/etcd-ca.pem \--etcd-certfile=/etc/etcd/ssl/etcd.pem \--etcd-keyfile=/etc/etcd/ssl/etcd-key.pem \--client-ca-file=/etc/kubernetes/pki/ca.pem \--tls-cert-file=/etc/kubernetes/pki/apiserver.pem \--tls-private-key-file=/etc/kubernetes/pki/apiserver-key.pem \--kubelet-client-certificate=/etc/kubernetes/pki/apiserver.pem \--kubelet-client-key=/etc/kubernetes/pki/apiserver-key.pem \--service-account-key-file=/etc/kubernetes/pki/sa.pub \--service-account-signing-key-file=/etc/kubernetes/pki/sa.key \--service-account-issuer=https://kubernetes.default.svc.cluster.local \--kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname \--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota \--authorization-mode=Node,RBAC \--enable-bootstrap-token-auth=true\--requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem \--proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.pem \--proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client-key.pem \--requestheader-allowed-names=aggregator \--requestheader-group-headers=X-Remote-Group \--requestheader-extra-headers-prefix=X-Remote-Extra- \--requestheader-username-headers=X-Remote-User# --token-auth-file=/etc/kubernetes/token.csvRestart=on-failureRestartSec=10sLimitNOFILE=65535[Install]WantedBy=multi-user.target[root@k8s-master03 ~]# systemctl enable --now kube-apiserver
查看系统日志关于kube-apiserver日志信息
#tail /var/log/messagesMay 9 17:30:09 k8s-master01 kube-apiserver: I0509 17:30:09.629324 2419 clientconn.go:948] ClientConn switching balancer to"pick_first"May 9 17:30:09 k8s-master01 kube-apiserver: I0509 17:30:09.629778 2419 balancer_conn_wrappers.go:78] pickfirstBalancer: HandleSubConnStateChange: 0xc00a8320b0, {CONNECTING <nil>}May 9 17:30:09 k8s-master01 kube-apiserver: I0509 17:30:09.637829 2419 balancer_conn_wrappers.go:78] pickfirstBalancer: HandleSubConnStateChange: 0xc00a8320b0, {READY <nil>}May 9 17:30:09 k8s-master01 kube-apiserver: I0509 17:30:09.638990 2419 controlbuf.go:508] transport: loopyWriter.run returning. connection error: desc ="transport is closing"May 9 17:30:22 k8s-master01 kube-apiserver: I0509 17:30:22.818751 2419 client.go:360] parsed scheme:"passthrough"May 9 17:30:22 k8s-master01 kube-apiserver: I0509 17:30:22.818798 2419 passthrough.go:48] ccResolverWrapper: sending update to cc: {[{https://192.168.60.101:2379 <nil> 0 <nil>}] <nil> <nil>}May 9 17:30:22 k8s-master01 kube-apiserver: I0509 17:30:22.818807 2419 clientconn.go:948] ClientConn switching balancer to"pick_first"May 9 17:30:22 k8s-master01 kube-apiserver: I0509 17:30:22.818968 2419 balancer_conn_wrappers.go:78] pickfirstBalancer: HandleSubConnStateChange: 0xc00a8afca0, {CONNECTING <nil>}May 9 17:30:22 k8s-master01 kube-apiserver: I0509 17:30:22.827739 2419 balancer_conn_wrappers.go:78] pickfirstBalancer: HandleSubConnStateChange: 0xc00a8afca0, {READY <nil>}May 9 17:30:22 k8s-master01 kube-apiserver: I0509 17:30:22.828687 2419 controlbuf.go:508] transport: loopyWriter.run returning. connection error: desc ="transport is closing". #启动apiserver服务之后,出现这个错误可以忽略,这是正常关闭连接信息6.2、所有master节点配置kube-controller-manager-service
#vim /usr/lib/systemd/system/kube-controller-manager.service
[Unit]Description=Kubernetes Controller ManagerDocumentation=https://github.com/kubernetes/kubernetesAfter=network.target[Service]ExecStart=/usr/local/bin/kube-controller-manager \--v=2 \--logtostderr=true\--address=127.0.0.1 \--root-ca-file=/etc/kubernetes/pki/ca.pem \--cluster-signing-cert-file=/etc/kubernetes/pki/ca.pem \--cluster-signing-key-file=/etc/kubernetes/pki/ca-key.pem \--service-account-private-key-file=/etc/kubernetes/pki/sa.key \--kubeconfig=/etc/kubernetes/controller-manager.kubeconfig \--leader-elect=true\--use-service-account-credentials=true\--node-monitor-grace-period=40s \--node-monitor-period=5s \--pod-eviction-timeout=2m0s \--controllers=*,bootstrapsigner,tokencleaner \--allocate-node-cidrs=true\--cluster-cidr=172.16.0.0/12 \--requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem \--node-cidr-mask-size=24Restart=alwaysRestartSec=10s[Install]WantedBy=multi-user.target#systemctl daemon-reload
#systemctl enable --now kube-controller-manager
6.3、所有Master节点部署Scheduler组件
#vim /usr/lib/systemd/system/kube-scheduler.service
[Unit]Description=Kubernetes SchedulerDocumentation=https://github.com/kubernetes/kubernetesAfter=network.target[Service]ExecStart=/usr/local/bin/kube-scheduler \--v=2 \--logtostderr=true\--address=127.0.0.1 \--leader-elect=true\--kubeconfig=/etc/kubernetes/scheduler.kubeconfigRestart=alwaysRestartSec=10s[Install]WantedBy=multi-user.target# systemctl daemon-reload
# systemctl enable --now kube-scheduler
七、TSI Bootstrapping配置
在kuberlete集群环境中,node节点的组件kubelet和kube-proxy需要与Master端(kube-apiserver)进行通信,为了确保通信私密性其不受干扰,保证集群的每个组件都在与另一个受信任的组件通信,在这里使用客户端TLS证书
只需要在Master01 创建bootstrap配置文件,为node节点生成TLS证书文件
[root@k8s-master01 ~]# cd /root/k8s-ha-install/bootstrap/
设置一个集群条目
[root@k8s-master01 bootstrap]# kubectl config set-cluster kubernetes --certificate-authority=/etc/kubernetes/pki/ca.pem --embed-certs=true --server=https://192.168.60.236:8443 --kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig
Cluster "kubernetes" set.
在kubeconfig中设置一个用户条目
[root@k8s-master01 bootstrap]# kubectl config set-credentials tls-bootstrap-token-user --token=c8ad9c.2e4d610cf3e7426e --kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig
User "tls-bootstrap-token-user" set.
在kubeconfig中设置一个上下文条目
[root@k8s-master01 bootstrap]# kubectl config set-context tls-bootstrap-token-user@kubernetes --cluster=kubernetes --user=tls-bootstrap-token-user --kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig
Context "tls-bootstrap-token-user@kubernetes" modified.
在kubeconfig文件中设置当前上下文
[root@k8s-master01 bootstrap]# kubectl config use-context tls-bootstrap-token-user@kubernetes --kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig
Switched to context "tls-bootstrap-token-user@kubernetes".
[root@k8s-master01 ~]# mkdir -p /root/.kube ; cp /etc/kubernetes/admin.kubeconfig /root/.kube/config
[root@k8s-master01 bootstrap]# kubectl create -f bootstrap.secret.yaml #创建一个secret的yaml文件
八、Node节点配置
8.1、复制证书
将在kubernetes的生成的证书复制到其他master节点和node节点上,以下for循环主要执行以下操作
1、首先在所有节点上创建/etc/kubernetes/pki /etc/etcd/ssl两个目录,主要用于存放证书
2、然后将/etc/etcd/ssl/目录下的"etcd-ca.pem,etcd.pem,etcd-key.pem"三个证书拷贝到其余所有node节点的"/etcd/etcd/ssl"目录
3、最后将"/etcd/kubernetes/"目录下的bootsttrap-kubelet.kubeconfig以及"/etc/kubernetes/pki/"下的ca.pem,ca-key.pem,front-proxy-ca.pem拷贝到/etc/kuberneres/目录下
[root@k8s-master01 ~]# cd /etc/kubernetes/
[root@k8s-master01 kubernetes]# for NODE in k8s-master02 k8s-master03 k8s-node01 k8s-node02; do ssh $NODE mkdir -p /etc/kubernetes/pki /etc/etcd/ssl; for FILE in etcd-ca.pem etcd.pem etcd-key.pem; do scp /etc/etcd/ssl/$FILE $NODE:/etc/etcd/ssl/; done; for FILE in pki/ca.pem pki/ca-key.pem pki/front-proxy-ca.pem bootstrap-kubelet.kubeconfig; do scp /etc/kubernetes/$FILE $NODE:/etc/kubernetes/${FILE}; done; done
8.2、Kubelet配置
所有node节点均配置kubelet.service (Ps: 这里的node节点也包括三台master节点,因为master节点被复用)
Ps: 配置之前确保kubelet二进制文件已经拷贝到对应的node节点上/usr/local/bin目录
#vim /usr/lib/systemd/system/kubelet.service
[Unit]Description=Kubernetes KubeletDocumentation=https://github.com/kubernetes/kubernetesAfter=docker.serviceRequires=docker.service[Service]ExecStart=/usr/local/bin/kubeletRestart=alwaysStartLimitInterval=0RestartSec=10[Install]WantedBy=multi-user.target# vim /etc/systemd/system/kubelet.service.d/10-kubelet.conf
[Service]Environment="KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig --kubeconfig=/etc/kubernetes/kubelet.kubeconfig"Environment="KUBELET_SYSTEM_ARGS=--network-plugin=cni --cni-conf-dir=/etc/cni/net.d --cni-bin-dir=/opt/cni/bin"Environment="KUBELET_CONFIG_ARGS=--config=/etc/kubernetes/kubelet-conf.yml --pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google_containers/pause-amd64:3.2"Environment="KUBELET_EXTRA_ARGS=--root-dir=/home/kubelete/kubelet --node-labels=node.kubernetes.io/node='' "ExecStart=ExecStart=/usr/local/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_SYSTEM_ARGS $KUBELET_EXTRA_ARGS#vim /etc/kubernetes/kubelet-conf.yml
apiVersion: kubelet.config.k8s.io/v1beta1kind: KubeletConfigurationaddress: 0.0.0.0port: 10250readOnlyPort: 10255authentication:anonymous:enabled:falsewebhook:cacheTTL: 2m0senabled:truex509:clientCAFile: /etc/kubernetes/pki/ca.pemauthorization:mode: Webhookwebhook:cacheAuthorizedTTL: 5m0scacheUnauthorizedTTL: 30scgroupDriver: systemdcgroupsPerQOS:trueclusterDNS:- 10.96.0.10clusterDomain: cluster.localcontainerLogMaxFiles: 5containerLogMaxSize: 10MicontentType: application/vnd.kubernetes.protobufcpuCFSQuota:truecpuManagerPolicy: nonecpuManagerReconcilePeriod: 10senableControllerAttachDetach:trueenableDebuggingHandlers:trueenforceNodeAllocatable:- podseventBurst: 10eventRecordQPS: 5evictionHard:imagefs.available: 15%memory.available: 100Minodefs.available: 10%nodefs.inodesFree: 5%evictionPressureTransitionPeriod: 5m0sfailSwapOn:truefileCheckFrequency: 20shairpinMode: promiscuous-bridgehealthzBindAddress: 127.0.0.1healthzPort: 10248httpCheckFrequency: 20simageGCHighThresholdPercent: 85imageGCLowThresholdPercent: 80imageMinimumGCAge: 2m0siptablesDropBit: 15iptablesMasqueradeBit: 14kubeAPIBurst: 10kubeAPIQPS: 5makeIPTablesUtilChains:truemaxOpenFiles: 1000000maxPods: 110nodeStatusUpdateFrequency: 10soomScoreAdj: -999podPidsLimit: -1registryBurst: 10registryPullQPS: 5resolvConf: /etc/resolv.confrotateCertificates:trueruntimeRequestTimeout: 2m0sserializeImagePulls:truestaticPodPath: /etc/kubernetes/manifestsstreamingConnectionIdleTimeout: 4h0m0ssyncFrequency: 1m0svolumeStatsAggPeriod: 1m0s# systemctl daemon-reload
# systemctl enable --now kubelet
8.3、在所有Master以及node节点上配置kube-proxy
[root@k8s-master01 # cd /root/k8s-ha-install/
[root@k8s-master01 k8s-ha-install]# kubectl -n kube-system create serviceaccount kube-proxy
[root@k8s-master01 k8s-ha-install]# kubectl create clusterrolebinding system:kube-proxy --clusterrole system:node-proxier --serviceaccount kube-system:kube-proxy
[root@k8s-master01 k8s-ha-install]# SECRET=$(kubectl -n kube-system get sa/kube-proxy \
--output=jsonpath='{.secrets[0].name}')
[root@k8s-master01 k8s-ha-install]# JWT_TOKEN=$(kubectl -n kube-system get secret/$SECRET \
--output=jsonpath='{.data.token}' | base64 -d)
[root@k8s-master01 k8s-ha-install]# PKI_DIR=/etc/kubernetes/pki
[root@k8s-master01 k8s-ha-install]# K8S_DIR=/etc/kubernetes
设置kubeconfig文件中的一个集群条目
[root@k8s-master01 k8s-ha-install]# kubectl config set-cluster kubernetes --certificate-authority=/etc/kubernetes/pki/ca.pem --embed-certs=true --server=https://192.168.60.236:8443 --kubeconfig=${K8S_DIR}/kube-proxy.kubeconfig
设置kubeconfig文件中的一个用户条目
[root@k8s-master01 k8s-ha-install]# kubectl config set-credentials kubernetes --token=${JWT_TOKEN} --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig
设置kubeconfig文件中的一个用户条目
[root@k8s-master01 k8s-ha-install]# kubectl config set-context kubernetes --cluster=kubernetes --user=kubernetes --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig
设置kubeconfig文件中的当前上下文
[root@k8s-master01 k8s-ha-install]# kubectl config use-context kubernetes --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig
[root@k8s-master01 k8s-ha-install]# vim kube-proxy/kube-proxy.conf
apiVersion: kubeproxy.config.k8s.io/v1alpha1bindAddress: 0.0.0.0clientConnection:acceptContentTypes:""burst: 10contentType: application/vnd.kubernetes.protobufkubeconfig: /etc/kubernetes/kube-proxy.kubeconfigqps: 5clusterCIDR: 172.16.0.0/12configSyncPeriod: 15m0sconntrack:max:nullmaxPerCore: 32768min: 131072tcpCloseWaitTimeout: 1h0m0stcpEstablishedTimeout: 24h0m0senableProfiling:falsehealthzBindAddress: 0.0.0.0:10256hostnameOverride:""iptables:masqueradeAll:falsemasqueradeBit: 14minSyncPeriod: 0ssyncPeriod: 30sipvs:masqueradeAll:trueminSyncPeriod: 5sscheduler:"rr"syncPeriod: 30skind: KubeProxyConfigurationmetricsBindAddress: 127.0.0.1:10249mode:"ipvs"nodePortAddresses:nulloomScoreAdj: -999portRange:""udpIdleTimeout: 250ms#在k8s-master01节点将kube-proxy相关配置文件发送到其他节点
[root@k8s-master01 k8s-ha-install]# for NODE in k8s-master01 k8s-master02 k8s-master03; do scp ${K8S_DIR}/kube-proxy.kubeconfig $NODE:/etc/kubernetes/kube-proxy.kubeconfig; scp kube-proxy/kube-proxy.conf $NODE:/etc/kubernetes/kube-proxy.conf; scp kube-proxy/kube-proxy.service $NODE:/usr/lib/systemd/system/kube-proxy.service; done
[root@k8s-master01 k8s-ha-install]# for NODE in k8s-node01 k8s-node02; do
scp /etc/kubernetes/kube-proxy.kubeconfig $NODE:/etc/kubernetes/kube-proxy.kubeconfig
scp kube-proxy/kube-proxy.conf $NODE:/etc/kubernetes/kube-proxy.conf
scp kube-proxy/kube-proxy.service $NODE:/usr/lib/systemd/system/kube-proxy.service
done
#启用所有k8s node节点上的kube-proxy组件
# systemctl daemon-reload
# systemctl enable --now kube-proxy
九、部署Calico
[root@k8s-master01 ~]# cd /root/k8s-ha-install/calico/
[root@k8s-master01 calico]# sed -i 's#etcd_endpoints: "http://<ETCD_IP>:<ETCD_PORT>"#etcd_endpoints: "https://192.168.60.101:2379,https://192.168.60.102:2379,https://192.168.60.103:2379"#g' calico-etcd.yaml
[root@k8s-master01 calico]# ETCD_CA=`cat /etc/kubernetes/pki/etcd/etcd-ca.pem | base64 | tr -d '\n'`
[root@k8s-master01 calico]# ETCD_CERT=`cat /etc/kubernetes/pki/etcd/etcd.pem | base64 | tr -d '\n'`
[root@k8s-master01 calico]# ETCD_KEY=`cat /etc/kubernetes/pki/etcd/etcd-key.pem | base64 | tr -d '\n'`
[root@k8s-master01 calico]# sed -i "s@# etcd-key: null@etcd-key: ${ETCD_KEY}@g; s@# etcd-cert: null@etcd-cert: ${ETCD_CERT}@g; s@# etcd-ca: null@etcd-ca: ${ETCD_CA}@g" calico-etcd.yaml
[root@k8s-master01 calico]# sed -i 's#etcd_ca: ""#etcd_ca: "/calico-secrets/etcd-ca"#g; s#etcd_cert: ""#etcd_cert: "/calico-secrets/etcd-cert"#g; s#etcd_key: "" #etcd_key: "/calico-secrets/etcd-key" #g' calico-etcd.yaml
[root@k8s-master01 calico]# POD_SUBNET="172.16.0.0/12"
[root@k8s-master01 calico]# sed -i 's@# - name: CALICO_IPV4POOL_CIDR@- name: CALICO_IPV4POOL_CIDR@g; s@# value: "192.168.0.0/16"@ value: '"${POD_SUBNET}"'@g' calico-etcd.yaml
[root@k8s-master01 calico]# kubectl apply -f calico-etcd.yaml
[root@k8s-master01 calico]# kubectl get po -n kube-system #查看node节点状态
十、安装CoreDNS
[root@k8s-master01 ~]# cd /root/k8s-ha-install/
[root@k8s-master01 k8s-ha-install]#vim CoreDNS/coredns.yaml
apiVersion: v1kind: ServiceAccountmetadata:name: corednsnamespace: kube-system---apiVersion: rbac.authorization.k8s.io/v1kind: ClusterRolemetadata:labels:kubernetes.io/bootstrapping: rbac-defaultsname: system:corednsrules:- apiGroups:-""resources:- endpoints- services- pods- namespacesverbs:- list- watch---apiVersion: rbac.authorization.k8s.io/v1kind: ClusterRoleBindingmetadata:annotations:rbac.authorization.kubernetes.io/autoupdate:"true"labels:kubernetes.io/bootstrapping: rbac-defaultsname: system:corednsroleRef:apiGroup: rbac.authorization.k8s.iokind: ClusterRolename: system:corednssubjects:- kind: ServiceAccountname: corednsnamespace: kube-system---apiVersion: v1kind: ConfigMapmetadata:name: corednsnamespace: kube-systemdata:Corefile: |.:53 {errorshealth {lameduck 5s}readykubernetes cluster.localin-addr.arpa ip6.arpa {fallthroughin-addr.arpa ip6.arpa}prometheus :9153forward . /etc/resolv.conf {max_concurrent 1000}cache 30loopreloadloadbalance}---apiVersion: apps/v1kind: Deploymentmetadata:name: corednsnamespace: kube-systemlabels:k8s-app: kube-dnskubernetes.io/name:"CoreDNS"spec:# replicas: not specified here:# 1. Default is 1.# 2. Will be tuned in real time if DNS horizontal auto-scaling is turned on.strategy:type: RollingUpdaterollingUpdate:maxUnavailable: 1selector:matchLabels:k8s-app: kube-dnstemplate:metadata:labels:k8s-app: kube-dnsspec:priorityClassName: system-cluster-criticalserviceAccountName: corednstolerations:- key:"CriticalAddonsOnly"operator:"Exists"nodeSelector:kubernetes.io/os: linuxaffinity:podAntiAffinity:preferredDuringSchedulingIgnoredDuringExecution:- weight: 100podAffinityTerm:labelSelector:matchExpressions:- key: k8s-appoperator: Invalues: ["kube-dns"]topologyKey: kubernetes.io/hostnamecontainers:- name: corednsimage: registry.cn-beijing.aliyuncs.com/dotbalo/coredns:1.7.0imagePullPolicy: IfNotPresentresources:limits:memory: 170Mirequests:cpu: 100mmemory: 70Miargs: ["-conf","/etc/coredns/Corefile"]volumeMounts:- name: config-volumemountPath: /etc/corednsreadOnly:trueports:- containerPort: 53name: dnsprotocol: UDP- containerPort: 53name: dns-tcpprotocol: TCP- containerPort: 9153name: metricsprotocol: TCPsecurityContext:allowPrivilegeEscalation:falsecapabilities:add:- NET_BIND_SERVICEdrop:- allreadOnlyRootFilesystem:truelivenessProbe:httpGet:path: /healthport: 8080scheme: HTTPinitialDelaySeconds: 60timeoutSeconds: 5successThreshold: 1failureThreshold: 5readinessProbe:httpGet:path: /readyport: 8181scheme: HTTPdnsPolicy: Defaultvolumes:- name: config-volumeconfigMap:name: corednsitems:- key: Corefilepath: Corefile---apiVersion: v1kind: Servicemetadata:name: kube-dnsnamespace: kube-systemannotations:prometheus.io/port:"9153"prometheus.io/scrape:"true"labels:k8s-app: kube-dnskubernetes.io/cluster-service:"true"kubernetes.io/name:"CoreDNS"spec:selector:k8s-app: kube-dnsclusterIP: 10.96.0.10ports:- name: dnsport: 53protocol: UDP- name: dns-tcpport: 53protocol: TCP- name: metricsport: 9153protocol: TCP[root@k8s-master01 k8s-ha-install]# kubectl create -f CoreDNS/coredns.yaml
十一、安装Metrics Server
在新版Kubernetes中系统资源的采集均使用Metrics-server,可以通过Metrics采集节点和Pod的内存、磁盘、CPU和网络的使用率
[root@k8s-master01 ~]# cd /root/k8s-ha-install/metrics-server-0.4.x
[root@k8s-master01 metrics-server-0.4.x]# kubectl create -f .
十二、安装dashboard
[root@k8s-master01 ~]# cd /root/k8s-ha-install/dashboard/
[root@k8s-master01 dashboard]# kubectl create -f .
[root@k8s-master01 dashboard]# kubectl get po -n kubernetes-dashboard
[root@k8s-master01 dashboard]# kubectl edit svc kubernetes-dashboard -n kubernetes-dashboard #更改dashboard的svc为NodePort
apiVersion: v1kind: Servicemetadata:creationTimestamp:"2020-12-24T01:20:21Z"labels:k8s-app: kubernetes-dashboardname: kubernetes-dashboardnamespace: kubernetes-dashboardresourceVersion:"30932"uid: 141a3d84-aa0f-414f-995f-5d40a609ca22spec:clusterIP: 10.96.33.35clusterIPs:- 10.96.33.35externalTrafficPolicy: Clusterports:- nodePort: 30900port: 443protocol: TCPtargetPort: 8443selector:k8s-app: kubernetes-dashboardsessionAffinity: Nonetype: NodePortstatus:loadBalancer: {}Ps :将Cluster IP更改为NodePort(Ps:如果已经为NodePort可忽略上述步骤)
[root@k8s-master01 dashboard]# kubectl get svc kubernetes-dashboard -n kubernetes-dashboard # 查看dashboard暴露的端口号,通过任意安装了kube-proxy宿主机或者VIP+端口即可访问到dashboard界面
# kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep admin-user | awk '{print $1}') #查看token,用于dashboard界面认证
#https://192.168.60.236:30900/#/login 我这里选择是的访问VIP加上暴露的30900端口号;
十三、集群验证
新建一个busybox
[root@k8s-master01 ~]# cat<<EOF | kubectl apply -f -apiVersion: v1kind: Podmetadata:name: busyboxnamespace:defaultspec:containers:- name: busyboximage: busybox:1.28command:- sleep-"3600"imagePullPolicy: IfNotPresentrestartPolicy: AlwaysEOF2、验证pod是否能解析跨namespace的service;在这里 busybox是在default的namespache上,而kube-dns则是在kube-system命名空间的services;
3、验证每个节点都必须能访问kubernetes的kubernetes svc 443端口和kube-dns的service 53端口
[root@k8s-master01 ~]# kubectlgetsvc #查看一下kubernetes services地址NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGEkubernetes ClusterIP 10.96.0.1 <none> 443/TCP 5d1h[root@k8s-master01 ~]# kubectlgetsvc -n kube-system #NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGEkube-dns ClusterIP 10.96.0.10 <none> 53/UDP,53/TCP,9153/TCP 4d23hmetrics-server ClusterIP 10.111.213.213 <none> 443/TCP 4d23h4、验证集群pod之间能正常通信
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATEScalico-kube-controllers-5f6d4b864b-67tbv 1/1 Running 3 5d 192.168.60.105 k8s-node02 <none> <none>calico-node-b2jlr 1/1 Running 3 5d 192.168.60.104 k8s-node01 <none> <none>calico-node-ggzsc 1/1 Running 10 4d22h 192.168.60.102 k8s-master02 <none> <none>calico-node-mrmb7 1/1 Running 3 5d 192.168.60.105 k8s-node02 <none> <none>calico-node-z4jgk 1/1 Running 9 4d22h 192.168.60.103 k8s-master03 <none> <none>calico-node-zzbbk 1/1 Running 4 4d22h 192.168.60.101 k8s-master01 <none> <none>coredns-867d46bfc6-x72tn 1/1 Running 3 4d23h 172.17.125.8 k8s-node01 <none> <none>metrics-server-595f65d8d5-gw2rw 1/1 Running 3 4d23h 172.17.125.7 k8s-node01 <none>#kubectl get pod -n default -owide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATESbusybox 1/1 Running 0 47m 172.25.92.65 k8s-master02 <none> <none>[root@k8s-master01 ~]# kubectl exec -it busybox -n default -- sh #进入busybox容器中,通过ping命令检测pod之间以及跨namespace能否正常通信
[root@k8s-master01 ~]# kubectl create deploy nginx --image=nginx --replicas=3 #创建并部署一个nginx pod,并生成三个副本数量
deployment.apps/nginx created
[root@k8s-master01 ~]# kubectl get deploy
NAME READY UP-TO-DATE AVAILABLE AGEnginx 3/3 3 3 47sNAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATESbusybox 1/1 Running 0 59m 172.25.92.65 k8s-master02 <none> <none>nginx 1/1 Running 0 4m6s 172.18.195.1 k8s-master03 <none> <none>nginx-6799fc88d8-lhxr7 1/1 Running 0 2m19s 172.25.244.198 k8s-master01 <none> <none>nginx-6799fc88d8-snd7l 1/1 Running 0 2m19s 172.27.14.193 k8s-node02 <none> <none>nginx-6799fc88d8-wr7v8 1/1 Running 0 2m19s 172.25.244.197 k8s-master01 <none> <none>END!