打赏 jQuery火箭图标返回顶部代码

kubeadm部署k8s高可用集群

1 节点规划信息

角色

IP地址

系统

k8s-master01

192.168.226.20

CentOS7.8.2003

k8s-master02 192.168.226.21 CentOS7.8.2003

k8s-master03

192.168.226.22 CentOS7.8.2003
k8s-node01 192.168.226.23 CentOS7.8.2003
k8s-lb 192.168.226.24 CentOS7.8.2003

2 环境初始化

1)配置主机名,以k8s-master01为例(需要依次根据节点规划角色修改主机名)

k8s-lb不需要设置

[root@localhost ~]# hostnamectl set-hostname k8s-master01

2)配置主机hosts映射

cat <<EOF > /etc/hosts
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.226.20 k8s-master01
192.168.226.21 k8s-master02
192.168.226.22 k8s-master03
192.168.226.23 k8s-node01
192.168.226.24 k8s-lb
EOF

3)配置密钥

ssh-keygen -t rsa -P "" -f /root/.ssh/id_rsa
yum install -y expect
for i in k8s-master01 k8s-master02 k8s-master03 k8s-node01 k8s-lb;do
expect -c "
spawn ssh-copy-id -i /root/.ssh/id_rsa.pub root@$i
        expect {
                \"*yes/no*\" {send \"yes\r\"; exp_continue}
                \"*password*\" {send \"root\r\"; exp_continue}
                \"*Password*\" {send \"root\r\";}
        } "
done
for host in k8s-master01 k8s-master02 k8s-master03 k8s-node01 k8s-lb;do ping -c 1 $host;done

4)关闭防火墙,selinux和swap分区

systemctl stop firewalld
systemctl disable firewalld
setenforce 0
sed -i "s/^SELINUX=.*/SELINUX=disabled/g" /etc/selinux/config
swapoff -a
sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab

5)时间同步

yum install chrony -y
systemctl enable chronyd
systemctl start chronyd
chronyc sources

6)配置内核参数

ulimit -SHn 65535
cat >> /etc/sysctl.d/k8s.conf << EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
vm.swappiness=0
EOF
sysctl -p

7)内核升级

wget https://cbs.centos.org/kojifiles/packages/kernel/4.9.220/37.el7/x86_64/kernel-4.9.220-37.el7.x86_64.rpm
rpm -ivh kernel-4.9.220-37.el7.x86_64.rpm
reboot
uname -r

8)安装ipvs

yum install ipvsadm ipset sysstat conntrack libseccomp -y

cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack
modprobe -- ip_tables
modprobe -- ip_set
modprobe -- xt_set
modprobe -- ipt_set
modprobe -- ipt_rpfilter
modprobe -- ipt_REJECT
modprobe -- ipip
EOF

 9)配置重启自动加载

chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack

10)安装docker-ce

yum install -y yum-utils device-mapper-persistent-data lvm2
yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
yum list | grep docker-ce
yum install docker-ce-19.03.8-3.el7 -y
systemctl start docker
systemctl enable docker
cat <<EOF >/etc/docker/daemon.json
{
  "registry-mirrors": ["https://b9pmyelo.mirror.aliyuncs.com"],
  "exec-opts":["native.cgroupdriver=systemd"]
}
EOF
systemctl restart docker

 

3)安装kubernetes组件

以上操作在所有节点执行

cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
 yum install -y kubelet-1.18.2-0 kubeadm-1.18.2-0 kubectl-1.18.2-0 --disableexcludes=kubernetes
systemctl enable kubelet.service

4)集群初始化

1)配置集群高可用

高可用采用的是HAProxy+Keepalived来进行高可用和master节点的流量负载均衡,HAProxy和KeepAlived以守护进程的方式在所有Master节点部署

yum install keepalived haproxy -y
  • 配置haproxy

所有master节点的配置相同,如下:

注意:把apiserver地址改成自己节点规划的master地址

cat <<EOF >/etc/haproxy/haproxy.cfg
#---------------------------------------------------------------------
# Global settings
#---------------------------------------------------------------------
global
    # to have these messages end up in /var/log/haproxy.log you will
    # need to:
    #
    # 1) configure syslog to accept network log events.  This is done
    #    by adding the '-r' option to the SYSLOGD_OPTIONS in
    #    /etc/sysconfig/syslog
    #
    # 2) configure local2 events to go to the /var/log/haproxy.log
    #   file. A line like the following can be added to
    #   /etc/sysconfig/syslog
    #
    #    local2.*                       /var/log/haproxy.log
    #
    log         127.0.0.1 local2

    chroot      /var/lib/haproxy
    pidfile     /var/run/haproxy.pid
    maxconn     4000
    user        haproxy
    group       haproxy
    daemon

    # turn on stats unix socket
    stats socket /var/lib/haproxy/stats

#---------------------------------------------------------------------
# common defaults that all the 'listen' and 'backend' sections will
# use if not designated in their block
#---------------------------------------------------------------------
defaults
    mode                    http
    log                     global
    option                  httplog
    option                  dontlognull
    option http-server-close
    option                  redispatch
    retries                 3
    timeout http-request    10s
    timeout queue           1m
    timeout connect         10s
    timeout client          1m
    timeout server          1m
    timeout http-keep-alive 10s
    timeout check           10s
    maxconn                 3000

#---------------------------------------------------------------------
# kubernetes apiserver frontend which proxys to the backends
#---------------------------------------------------------------------
frontend kubernetes
    mode                 tcp
    bind                 *:16443
    option               tcplog
    default_backend      kubernetes-apiserver

#---------------------------------------------------------------------
# round robin balancing between the various backends
#---------------------------------------------------------------------
backend kubernetes-apiserver
    mode        tcp
    balance     roundrobin
    server  k8s-master01 192.168.226.20:6443 check
    server  k8s-master02 192.168.226.21:6443 check
    server  k8s-master03 192.168.226.22:6443 check

#---------------------------------------------------------------------
# collection haproxy statistics message
#---------------------------------------------------------------------
listen stats
    bind                 *:9999
    stats auth           admin:P@ssW0rd
    stats refresh        5s
    stats realm          HAProxy\ Statistics
    stats uri            /admin?stats
EOF
  • 配置keepalived

k8s-master01

cat <<EOF > /etc/keepalived/keepalived.conf 
! Configuration File for keepalived

global_defs {
   notification_email {
     acassen@firewall.loc
     failover@firewall.loc
     sysadmin@firewall.loc
   }
   notification_email_from Alexandre.Cassen@firewall.loc
   smtp_server 127.0.0.1
   smtp_connect_timeout 30
   router_id LVS_DEVEL
   vrrp_skip_check_adv_addr
   vrrp_garp_interval 0
   vrrp_gna_interval 0
}
# 定义脚本
vrrp_script check_apiserver {
    script "/etc/keepalived/check_apiserver.sh" 
    interval 2                                  
    weight -5                                  
    fall 3                                   
    rise 2                               
}

vrrp_instance VI_1 {
    state MASTER
    interface ens33
    virtual_router_id 51
    priority 100
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 1111
    }
    virtual_ipaddress {
      192.168.226.24
    }

    # 调用脚本
    #track_script {
    #    check_apiserver
    #}
}

EOF

k8s-master02节点配置

cat <<EOF >/etc/keepalived/keepalived.conf 
! Configuration File for keepalived

global_defs {
   notification_email {
     acassen@firewall.loc
     failover@firewall.loc
     sysadmin@firewall.loc
   }
   notification_email_from Alexandre.Cassen@firewall.loc
   smtp_server 127.0.0.1
   smtp_connect_timeout 30
   router_id LVS_DEVEL
   vrrp_skip_check_adv_addr
   vrrp_garp_interval 0
   vrrp_gna_interval 0
}
# 定义脚本
vrrp_script check_apiserver {
    script "/etc/keepalived/check_apiserver.sh" 
    interval 2                                  
    weight -5                                  
    fall 3                                   
    rise 2                               
}

vrrp_instance VI_1 {
    state MASTER
    interface ens33
    virtual_router_id 51
    priority 99
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 1111
    }
    virtual_ipaddress {
       192.168.226.24
    }

    # 调用脚本
    #track_script {
    #    check_apiserver
    #}
}

EOF

k8s-master03节点配置

cat <<EOF >/etc/keepalived/keepalived.conf 
! Configuration File for keepalived

global_defs {
   notification_email {
     acassen@firewall.loc
     failover@firewall.loc
     sysadmin@firewall.loc
   }
   notification_email_from Alexandre.Cassen@firewall.loc
   smtp_server 127.0.0.1
   smtp_connect_timeout 30
   router_id LVS_DEVEL
   vrrp_skip_check_adv_addr
   vrrp_garp_interval 0
   vrrp_gna_interval 0
}
# 定义脚本
vrrp_script check_apiserver {
    script "/etc/keepalived/check_apiserver.sh" 
    interval 2                                  
    weight -5                                  
    fall 3                                   
    rise 2                               
}

vrrp_instance VI_1 {
    state MASTER
    interface ens33
    virtual_router_id 51
    priority 98
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 1111
    }
    virtual_ipaddress {
       192.168.226.24
    }

    # 调用脚本
    #track_script {
    #    check_apiserver
    #}
}
EOF

编写健康检测脚本

cat <<EOF >/etc/keepalived/check-apiserver.sh
#!/bin/bash

function check_apiserver(){
 for ((i=0;i<5;i++))
 do
  apiserver_job_id=${pgrep kube-apiserver}
  if [[ ! -z ${apiserver_job_id} ]];then
   return
  else
   sleep 2
  fi
 done
 apiserver_job_id=0
}

# 1->running    0->stopped
check_apiserver
if [[ $apiserver_job_id -eq 0 ]];then
 /usr/bin/systemctl stop keepalived
 exit 1
else
 exit 0
fi
EOF

启动haproxy和keepalived

systemctl enable --now keepalived
systemctl enable --now haproxy

 5)部署master

1)在k8s-master01上,编写kubeadm.yaml配置文件,如下:

[root@k8s-master01 ~]# cat >> kubeadm.yaml <<EOF
apiVersion: kubeadm.k8s.io/v1beta2
kind: ClusterConfiguration
kubernetesVersion: v1.18.2
imageRepository: registry.cn-hangzhou.aliyuncs.com/google_containers
controlPlaneEndpoint: "k8s-lb:16443"
networking:
  dnsDomain: cluster.local
  podSubnet: 192.168.0.0/16
  serviceSubnet: 10.211.0.0/12
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
featureGates:
  SupportIPVSProxyMode: true
mode: ipv
EOF

2)下载镜像

[root@k8s-master01 ~]# kubeadm config images pull --config kubeadm.yaml
docker load -i  1-18-kube-apiserver.tar.gz
docker load -i  1-18-kube-scheduler.tar.gz
docker load -i  1-18-kube-controller-manager.tar.gz
docker load -i  1-18-pause.tar.gz
docker load -i  1-18-cordns.tar.gz
docker load -i  1-18-etcd.tar.gz
docker load -i 1-18-kube-proxy.tar.gz
说明:
pause版本是3.2,用到的镜像是k8s.gcr.io/pause:3.2
etcd版本是3.4.3,用到的镜像是k8s.gcr.io/etcd:3.4.3-0        
cordns版本是1.6.7,用到的镜像是k8s.gcr.io/coredns:1.6.7
apiserver、scheduler、controller-manager、kube-proxy版本是1.18.2,用到的镜像分别是
k8s.gcr.io/kube-apiserver:v1.18.2
k8s.gcr.io/kube-controller-manager:v1.18.2
k8s.gcr.io/kube-scheduler:v1.18.2
k8s.gcr.io/kube-proxy:v1.18.2

3)进行初始化

在master01节点操作

kubeadm init --config kubeadm.yaml --upload-certs

最后输出的kubeadm jion需要记录下来,后面的master节点和node节点需要用

4)配置环境变量

在master01节点操作

cat >> /root/.bashrc <<EOF
export KUBECONFIG=/etc/kubernetes/admin.conf
EOF
source /root/.bashrc

5)查看节点状态

[root@k8s-master01 ~]# kubectl get node
NAME           STATUS     ROLES    AGE   VERSION
k8s-master01   NotReady   master   3m    v1.18.2

6)安装网络插件

如果有节点是多网卡,所以需要在资源清单文件中指定内网网卡(如何单网卡可以不用修改)

wget https://docs.projectcalico.org/v3.8/manifests/calico.yaml
vi calico.yaml
......
      containers:
        # Runs calico-node container on each Kubernetes node.  This
        # container programs network policy and routes on each
        # host.
        - name: calico-node
          image: calico/node:v3.8.8-1
          env:
            # Use Kubernetes API as the backing datastore.
            - name: DATASTORE_TYPE
              value: "kubernetes"
            # Wait for the datastore.
            - name: IP_AUTODETECTION_METHOD # DaemonSet中添加该环境变量
              value: interface=ens33 # 指定内网网卡
            - name: WAIT_FOR_DATASTORE
              value: "true"
            # Set based on the k8s node name.
            - name: NODENAME
              valueFrom:
                fieldRef:
                  fieldPath: spec.nodeName
......
# 安装calico网络插件
[root@k8s-master01 ~]# kubectl apply -f calico.yaml

 当网络插件安装完成后,查看node节点信息如下:

[root@k8s-master01 ~]# kubectl get node
NAME           STATUS   ROLES    AGE   VERSION
k8s-master01   Ready    master   17m   v1.18.2

可以看到状态已经从NotReady变为ready了。

7)将master02加入集群

  • 下载镜像
 kubeadm config images pull --config kubeadm.yaml
 kubeadm join k8s-lb:16443 --token wnukb8.gn07zmn7il6jdysv \
    --discovery-token-ca-cert-hash sha256:fe0f71f154cfe35cf1ffc19742bd68d360da08e688f6e9a8f5d4c3211d9ae204 \
    --control-plane --certificate-key fae1a738686dc651c52617d4413368d0a694719cbc88d444b550fb88854e9763

配置环境变量

cat >> /root/.bashrc <<EOF
export KUBECONFIG=/etc/kubernetes/admin.conf
EOF
source /root/.bashrc

master03也如上操作

[root@k8s-master03 ~]# kubectl get nodes
NAME           STATUS   ROLES    AGE     VERSION
k8s-master01   Ready    master   28m     v1.18.2
k8s-master02   Ready    master   2m31s   v1.18.2
k8s-master03   Ready    master   55s     v1.18.2
  • 查看集群组件状态

全部都Running,则所有组件都正常了,不正常,可以具体查看pod日志进行排查

[root@k8s-master01 ~]# kubectl get pod -n kube-system
NAME                                       READY   STATUS     RESTARTS   AGE       NODE           NOMINATED NODE   READINESS GATES
calico-kube-controllers-77c5fc8d7f-stl57   1/1     Running    0          26m      k8s-master01   <none>           <none>
calico-node-ppsph                          1/1     Running    0          26m      k8s-master01   <none>           <none>
calico-node-tl6sq                          1/1     Running   0          26m      k8s-master02   <none>           <none>
calico-node-w92qh                          1/1     Running    0          26m      k8s-master03   <none>           <none>
coredns-546565776c-vtlhr                   1/1     Running    0          42m      k8s-master01   <none>           <none>
coredns-546565776c-wz9bk                   1/1     Running    0          42m      k8s-master01   <none>           <none>
etcd-k8s-master01                          1/1     Running    0          42m      k8s-master01   <none>           <none>
etcd-k8s-master02                          1/1     Running    0          30m      k8s-master02   <none>           <none>
etcd-k8s-master03                          1/1     Running    0          28m      k8s-master03   <none>           <none>
kube-apiserver-k8s-master01                1/1     Running    0          42m      k8s-master01   <none>           <none>
kube-apiserver-k8s-master02                1/1     Running    0          30m      k8s-master02   <none>           <none>
kube-apiserver-k8s-master03                1/1     Running    0          28m      k8s-master03   <none>           <none>
kube-controller-manager-k8s-master01       1/1     Running    1          42m      k8s-master01   <none>           <none>
kube-controller-manager-k8s-master02       1/1     Running    1          30m      k8s-master02   <none>           <none>
kube-controller-manager-k8s-master03       1/1     Running    0          28m      k8s-master03   <none>           <none>
kube-proxy-6sbpp                           1/1     Running    0          28m      k8s-master03   <none>           <none>
kube-proxy-dpppr                           1/1     Running    0          42m      k8s-master01   <none>           <none>
kube-proxy-ln7l7                           1/1     Running    0          30m      k8s-master02   <none>           <none>
kube-scheduler-k8s-master01                1/1     Running    1          42m      k8s-master01   <none>           <none>
kube-scheduler-k8s-master02                1/1     Running    1          30m      k8s-master02   <none>           <none>
kube-scheduler-k8s-master03                1/1     Running    0          28m      k8s-master03   <none>           <none>

查看CSR

[root@k8s-master01 ~]# kubectl get csr
NAME        AGE   SIGNERNAME                                    REQUESTOR                  CONDITION
csr-cfl2w   42m   kubernetes.io/kube-apiserver-client-kubelet   system:node:k8s-master01   Approved,Issued
csr-mm7g7   28m   kubernetes.io/kube-apiserver-client-kubelet   system:bootstrap:3k4vr0    Approved,Issued
csr-qzn6r   30m   kubernetes.io/kube-apiserver-client-kubelet   system:bootstrap:3k4vr0    Approved,Issued

 

 

posted @ 2020-12-07 21:17  浪漫De刺猬  阅读(758)  评论(0编辑  收藏  举报