人生总是要折腾一下的

二、k8s二进制安装集群(第一章、k8s高可用集群安装)

作者:北京小远
出处:http://www.cnblogs.com/bj-xy/
参考课程: Kubernetes全栈架构师(电脑端购买优惠)
文档禁止转载,转载需标明出处,否则保留追究法律责任的权利!

目录:

目录

一、高可用架构图

管理层
master:
ETCD(键值数据库): 集群环境用于存储k8s产生的数据 
ApiServer:k8s控制单元
ContrllerManager:集群控制器
scheduler:集群调度器,调度pod等

负载层
load Balancer:master复制均衡组件
(Haproxy+Keepliaved)

节点层
node:
kubelet:
kube-proxy:

二、环境初始化

2.1 关闭防火墙、selinux、networkmanager

1、关闭防火墙、selinux、networkmanager
systemctl disable --now firewalld
systemctl disable --now NetworkManager
#selinux关闭
setenforce 0
cat /etc/sysconfig/selinix
SELINUX=disabled
#关闭 swappiness影响docker性能
swapoff -a && sysctl -w vm.swappiness=0
vi /etc/fstab 注释掉swap自动挂载

2.2、配置hosts解析


2、配置hosts解析
k8smaster01 172.16.0.101
k8smaster02 172.16.0.102
k8smaster03 172.16.0.103
k8snode01 172.16.0.121
k8snode01 172.16.0.122
k8smasterslb 172.16.0.100

2.3、设置时间同步 ntpdate

yum install ntp -y

保证时区是上海
ln -sf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime
echo "Asia/Shanghai" > /etc/timezone

同步时间,并添加计划任务:
ntpdate time2.aliyun.com
echo "*/5 * * * * ntpdate time2.aliyun.com" >> /etc/crontab

2.4、配置ulimit

ulimit -SHn 65535

2.5、升级linux内核 要求在4.18以上

查看版本 cat /proc/version

设置源
rpm --import https://www.elrepo.org/RPM-GPG-KEY-elrepo.org
rpm -Uvh http://www.elrepo.org/elrepo-release-7.0-2.el7.elrepo.noarch.rpm

查看可用内核版本
yum --disablerepo="*" --enablerepo="elrepo-kernel" list available

升级安装完成后reboot
yum --enablerepo=elrepo-kernel install kernel-ml kernel-ml-devel -y

更改内核顺序:
grub2-set-default  0 && grub2-mkconfig -o /etc/grub2.cfg && grubby --args="user_namespace.enable=1" --update-kernel="$(grubby --default-kernel)" && reboot

开机后查看内核 
uname -a

2.6、安装ipvsadm,与基本工具

安装基本工具
yum install wget jq psmisc vim net-tools yum-utils device-mapper-persistent-data lvm2 git -y

ipvs用于网络转换替代iptables ipvs性能更高
yum install ipvsadm ipset sysstat conntrack libseccomp -y

所有节点配置ipvs模块,在内核4.19+版本nf_conntrack_ipv4已经改为nf_conntrack
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack

cat > /etc/modules-load.d/k8s-ipvs.conf <<\EOF
ip_vs
ip_vs_rr
ip_vs_wrr
ip_vs_sh
nf_conntrack
ip_tables
ip_set
xt_set
ipt_set
ipt_rpfilter
ipt_REJECT
ipip
EOF

开启
systemctl enable --now systemd-modules-load.service

检查是否加载:
lsmod | grep -e ip_vs -e nf_conntrack

2.7、系统参数优化


echo """* soft nofile 655360
* hard nofile 131072
* soft nproc 655350
* hard nproc 655350
* soft memlock unlimited
* hard memlock unlimited """ >> /etc/security/limits.conf


cat <<\EOF > /etc/sysctl.d/k8s.conf
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
fs.may_detach_mounts = 1
vm.overcommit_memory=1
vm.panic_on_oom=0
fs.inotify.max_user_watches=89100
fs.file-max=52706963
fs.nr_open=52706963
net.netfilter.nf_conntrack_max=2310720

net.ipv4.tcp_keepalive_time = 600
net.ipv4.tcp_keepalive_probes = 3
net.ipv4.tcp_keepalive_intvl =15
net.ipv4.tcp_max_tw_buckets = 36000
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_max_orphans = 327680
net.ipv4.tcp_orphan_retries = 3
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.ip_conntrack_max = 65536
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.tcp_timestamps = 0
net.core.somaxconn = 16384
EOF

加载:
sysctl --system

重启操作系统:
reboot

验证:
sysctl -a
lsmod | grep --color=auto -e ip_vs -e nf_conntrack

三、基本组件安装

3.1、Docker-ce安装与配置

注意:官方推荐安装19.03版本
1、安装必要的一些系统工具
yum install -y yum-utils device-mapper-persistent-data lvm2
2、添加软件源信息
yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
3、更新并安装Docker-CE
yum makecache fast
yum -y install docker-ce
4、开启Docker服务
service docker start
5、添加开机自启
systemctl enable docker

6、配置daemon.json
增加日志大小限制,镜像加速、文件驱动CgroupDriver改成systemd
cat > /etc/docker/daemon.json <<\EOF
{
        "exec-opts": ["native.cgroupdriver=systemd"],
        "log-driver": "json-file",
        "log-opts": {
                "max-size": "50m",
                "max-file": "3"
        },
        "registry-mirrors": ["https://ufkb7xyg.mirror.aliyuncs.com"]
}
EOF

7、重启docker服务
systemctl restart docker

3.2、高可用组件安装

3.2.1、安装HaProxy与Keepalived

在所有的Master节点 安装 HaProxy 与 Keepalived

yum install keepalived -y
yum install haproxy -y

3.2.2 配置HaProxy

在所有的Master节点配置haproxy
cat > /etc/haproxy/haproxy.cfg <<\EOF
global
  maxconn  2000
  ulimit-n  16384
  log  127.0.0.1 local0 err
  stats timeout 30s

defaults
  log global
  mode  http
  option  httplog
  timeout connect 5000
  timeout client  50000
  timeout server  50000
  timeout http-request 15s
  timeout http-keep-alive 15s

frontend k8s-master
  bind 0.0.0.0:8443
  bind 127.0.0.1:8443
  mode tcp
  option tcplog
  tcp-request inspect-delay 5s
  default_backend k8s-master

backend k8s-master
  mode tcp
  option tcplog
  option tcp-check
  balance roundrobin
  default-server inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 250 maxqueue 256 weight 100
  server k8s-master01   172.16.20.141:6443  check
  server k8s-master02   172.16.20.142:6443  check
  server k8s-master03   172.16.20.143:6443  check
EOF

3.2.2 配置Keepalived

在所有的Master节点配置Keepalived

Master01

cat > /etc/keepalived/keepalived.conf <<\EOF
! Configuration File for keepalived
global_defs {
    router_id LVS_DEVEL
}
vrrp_script chk_apiserver {
    script "/etc/keepalived/check_apiserver.sh"
    interval 10
    weight -5
    fall 3  
    rise 2
}
vrrp_instance VI_1 {
    state MASTER
    interface ens192
    mcast_src_ip 172.16.20.141
    virtual_router_id 51
    priority 100
    advert_int 2
    authentication {
        auth_type PASS
        auth_pass K8SHA_KA_AUTH
    }
    virtual_ipaddress {
        172.16.20.190
    }
#    track_script {
#       chk_apiserver
#    }
}
EOF

Master02

cat > /etc/keepalived/keepalived.conf <<\EOF
! Configuration File for keepalived
global_defs {
    router_id LVS_DEVEL
}
vrrp_script chk_apiserver {
    script "/etc/keepalived/check_apiserver.sh"
    interval 10
    weight -5
    fall 3  
    rise 2
}
vrrp_instance VI_1 {
    state BACKUP
    interface ens192
    mcast_src_ip 172.16.20.142
    virtual_router_id 51
    priority 101
    advert_int 2
    authentication {
        auth_type PASS
        auth_pass K8SHA_KA_AUTH
    }
    virtual_ipaddress {
        172.16.20.190
    }
#    track_script {
#       chk_apiserver
#    }
}
EOF

Master03

cat > /etc/keepalived/keepalived.conf <<\EOF
! Configuration File for keepalived
global_defs {
    router_id LVS_DEVEL
}
vrrp_script chk_apiserver {
    script "/etc/keepalived/check_apiserver.sh"
    interval 10
    weight -5
    fall 3  
    rise 2
}
vrrp_instance VI_1 {
    state BACKUP
    interface ens192
    mcast_src_ip 172.16.20.143
    virtual_router_id 51
    priority 102
    advert_int 2
    authentication {
        auth_type PASS
        auth_pass K8SHA_KA_AUTH
    }
    virtual_ipaddress {
        172.16.20.190
    }
#    track_script {
#       chk_apiserver
#    }
}
EOF

配置KeepAlived健康检查文件:
在所有master上执行

cat > /etc/keepalived/check_apiserver.sh <<\EOF
#!/bin/bash

err=0
for k in \$(seq 1 5)
do
    check_code=\$(pgrep kube-apiserver)
    if [[ \$check_code == "" ]]; then
        err=\$(expr \$err + 1)
        sleep 1
        continue
    else
        err=0
        break
    fi
done

if [[ \$err != "0" ]]; then
    echo "systemctl stop keepalived"
    /usr/bin/systemctl stop keepalived
    exit 1
else
    exit 0
fi
EOF

3.2.3 启动haproxy和keepalived

systemctl enable --now haproxy
systemctl enable --now keepalived

检查
netstat -nultp
ping 172.16.20.190

四、k8s服务组件下载分发

4.1 下载基础软件包

k8s二进制包下载地址:二进制包下载地址

Client binaries
Server binaries
Node binaries
均要下载amd64位的tar,gz的包

Etcd:Etcd下载地址

wget https://github.com/etcd-io/etcd/releases/download/v3.4.12/etcd-v3.4.12-linux-amd64.tar.gz
这里使用3.4.12版本

4.2 解压并添加到/usr/local/bin

解压etcd:
tar -zxvf etcd-v3.4.12-linux-amd64.tar.gz --strip-components=1 -C /usr/local/bin etcd-v3.4.12-linux-amd64/etcd{,ctl}

解压k8s server:
tar -xf kubernetes-server-linux-amd64.tar.gz  --strip-components=3 -C /usr/local/bin kubernetes/server/bin/kube{let,ctl,-apiserver,-controller-manager,-scheduler,-proxy}

查看版本:
kubelet --version
etcdctl version

将组件发送到其他节点:
MasterNodes='k8s2 k8s3'
WorkNodes='k8s4'

for NODE in $MasterNodes; do echo $NODE; scp /usr/local/bin/kube{let,ctl,-apiserver,-controller-manager,-scheduler,-proxy} $NODE:/usr/local/bin/; scp /usr/local/bin/etcd* $NODE:/usr/local/bin/; done

for NODE in $WorkNodes; do     scp /usr/local/bin/kube{let,-proxy} $NODE:/usr/local/bin/ ; done

五、k8s证书生成

证书生成是比较重要的生成错误的话导致整个集群不可用

5.1 下载证书生成工具

在master01上下载证书生成工具
wget "https://pkg.cfssl.org/R1.2/cfssl_linux-amd64" -O /usr/local/bin/cfssl
wget "https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64" -O /usr/local/bin/cfssljson
chmod +x /usr/local/bin/cfssl /usr/local/bin/cfssljson

所有Master节点创建etcd证书目录
mkdir /opt/etcd/ssl -p

所有节点创建kubernetes相关目录
mkdir -p /opt/kubernetes/pki

5.2 生成etcd自签名证书

在master01上执行
mkdir -p /opt/k8s-ha-install/pki/

# 证书信息文件可以不修改因为是etcd内部使用
cat > /opt/k8s-ha-install/pki/etcd-ca-csr.json <<\EOF
{
  "CN": "etcd",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Beijing",
      "L": "Beijing",
      "O": "etcd",
      "OU": "Etcd Security"
    }
  ]
}
EOF

#生成etcd CA证书和CA证书的key
cd /opt/k8s-ha-install/pki/
cfssl gencert -initca etcd-ca-csr.json | cfssljson -bare /opt/etcd/ssl/etcd-ca

#颁发etcd的客户端证书,expiry证书生效时间
cat > /opt/k8s-ha-install/pki/ca-config.json <EOF
{
  "signing": {
    "default": {
      "expiry": "876000h"
    },
    "profiles": {
      "kubernetes": {
        "usages": [
            "signing",
            "key encipherment",
            "server auth",
            "client auth"
        ],
        "expiry": "876000h"
      }
    }
  }
}
EOF

cat > /opt/k8s-ha-install/pki/etcd-csr.json <<\EOF
{
  "CN": "etcd",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Beijing",
      "L": "Beijing",
      "O": "etcd",
      "OU": "Etcd Security"
    }
  ]
}
EOF

# 此处更改 -hostname=绑定的ip与域名,然后可以多增加几个ip用于日后扩容
cfssl gencert \
   -ca=/opt/etcd/ssl/etcd-ca.pem \
   -ca-key=/opt/etcd/ssl/etcd-ca-key.pem \
   -config=/opt/k8s-ha-install/pki/ca-config.json \
   -hostname=127.0.0.1,k8s1,k8s2,k8s3,172.16.20.141,172.16.20.142,172.16.20.143 \
   -profile=kubernetes \
   etcd-csr.json | cfssljson -bare /opt/etcd/ssl/etcd
   
#分发证书
MasterNodes='k8s2 k8s3'
WorkNodes='k8s4'
cd /opt/etcd/ssl
for NODE in $MasterNodes; do
     ssh $NODE "mkdir -p /opt/etcd/ssl"
     for FILE in etcd-ca-key.pem  etcd-ca.pem  etcd-key.pem  etcd.pem; do
       scp /opt/etcd/ssl/${FILE} $NODE:/opt/etcd/ssl/${FILE}
     done
done

5.3 生成kubernetes证书

5.3.1 生成kubernetes api server证书

#在master01上执行
生成ca文件

cat > /opt/k8s-ha-install/pki/ca-csr.json <<\EOF
{
  "CN": "etcd",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Beijing",
      "L": "Beijing",
      "O": "etcd",
      "OU": "Etcd Security"
    }
  ]
}
EOF

cfssl gencert -initca /opt/k8s-ha-install/pki/ca-csr.json | cfssljson -bare /opt/kubernetes/pki/ca

# 通过 ca-key.pem  ca.pem生产其他组件证书
cat > /opt/k8s-ha-install/pki/ca-config.json <<\EOF
{
  "signing": {
    "default": {
      "expiry": "876000h"
    },
    "profiles": {
      "kubernetes": {
        "usages": [
            "signing",
            "key encipherment",
            "server auth",
            "client auth"
        ],
        "expiry": "876000h"
      }
    }
  }
}
EOF

cat > /opt/k8s-ha-install/pki/apiserver-csr.json <<\EOF
{
  "CN": "kube-apiserver",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Beijing",
      "L": "Beijing",
      "O": "Kubernetes",
      "OU": "Kubernetes-manual"
    }
  ]
}
EOF

# 自行更改VIP:172.16.20.190 masterIP:172.16.20.141,172.16.20.142,172.16.20.143,可以添加多个ip为扩容master
如果说需要更改k8s service网段,那就需要更改10.96.0.1

cfssl gencert -ca=/opt/kubernetes/pki/ca.pem -ca-key=/opt/kubernetes/pki/ca-key.pem -config=/opt/k8s-ha-install/pki/ca-config.json -hostname=10.96.0.1,172.16.20.190,127.0.0.1,kubernetes,kubernetes.default,kubernetes.default.svc,kubernetes.default.svc.cluster,kubernetes.default.svc.cluster.local,172.16.20.141,172.16.20.142,172.16.20.143 -profile=kubernetes /opt/k8s-ha-install/pki/apiserver-csr.json | cfssljson -bare /opt/kubernetes/pki/apiserver

#生成聚合证书的ca文件

cat > /opt/k8s-ha-install/pki/front-proxy-ca-csr.json <<\EOF
{
  "CN": "kubernetes",
  "key": {
     "algo": "rsa",
     "size": 2048
  }
}
EOF

cfssl gencert -initca /opt/k8s-ha-install/pki/front-proxy-ca-csr.json | cfssljson -bare /opt/kubernetes/pki/front-proxy-ca 

# 生成apiserver的聚合证书,用于验证请求

cat > /opt/k8s-ha-install/pki/front-proxy-client-csr.json <<\EOF
{
  "CN": "front-proxy-client",
  "key": {
     "algo": "rsa",
     "size": 2048
  }
}
EOF

cfssl gencert -ca=/opt/kubernetes/pki/front-proxy-ca.pem -ca-key=/opt/kubernetes/pki/front-proxy-ca-key.pem -config=/opt/k8s-ha-install/pki/ca-config.json   -profile=kubernetes  /opt/k8s-ha-install/pki/front-proxy-client-csr.json | cfssljson -bare /opt/kubernetes/pki/front-proxy-client

5.3.2 生成kubernetes controller-manage证书

cat > /opt/k8s-ha-install/pki/manager-csr.json <<\EOF
{
  "CN": "system:kube-controller-manager",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Beijing",
      "L": "Beijing",
      "O": "system:kube-controller-manager",
      "OU": "Kubernetes-manual"
    }
  ]
}
EOF

cfssl gencert \
   -ca=/opt/kubernetes/pki/ca.pem \
   -ca-key=/opt/kubernetes/pki/ca-key.pem \
   -config=/opt/k8s-ha-install/pki/ca-config.json \
   -profile=kubernetes \
   /opt/k8s-ha-install/pki/manager-csr.json | cfssljson -bare /opt/kubernetes/pki/controller-manager
   
# set-cluster:设置一个集群项,
kubectl config set-cluster kubernetes \
     --certificate-authority=/opt/kubernetes/pki/ca.pem \
     --embed-certs=true \
     --server=https://172.16.20.190:8443 \
     --kubeconfig=/opt/kubernetes/controller-manager.kubeconfig

# set-credentials 设置一个用户项
kubectl config set-credentials system:kube-controller-manager \
     --client-certificate=/opt/kubernetes/pki/controller-manager.pem \
     --client-key=/opt/kubernetes/pki/controller-manager-key.pem \
     --embed-certs=true \
     --kubeconfig=/opt/kubernetes/controller-manager.kubeconfig

# 设置一个环境项,一个上下文
kubectl config set-context system:kube-controller-manager@kubernetes \
    --cluster=kubernetes \
    --user=system:kube-controller-manager \
    --kubeconfig=/opt/kubernetes/controller-manager.kubeconfig

# 使用某个环境当做默认环境
kubectl config use-context system:kube-controller-manager@kubernetes \
     --kubeconfig=/opt/kubernetes/controller-manager.kubeconfig

5.3.3 生成kubernetes scheduler证书

cat > /opt/k8s-ha-install/pki/scheduler-csr.json <<\EOF
{
  "CN": "system:kube-scheduler",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Beijing",
      "L": "Beijing",
      "O": "system:kube-scheduler",
      "OU": "Kubernetes-manual"
    }
  ]
}
EOF

cfssl gencert \
   -ca=/opt/kubernetes/pki/ca.pem \
   -ca-key=/opt/kubernetes/pki/ca-key.pem \
   -config=/opt/k8s-ha-install/pki/ca-config.json \
   -profile=kubernetes \
   /opt/k8s-ha-install/pki/scheduler-csr.json | cfssljson -bare /opt/kubernetes/pki/scheduler
   
#设置一个集群项 (重复生成api证书的步骤)
kubectl config set-cluster kubernetes \
     --certificate-authority=/opt/kubernetes/pki/ca.pem \
     --embed-certs=true \
     --server=https://172.16.20.190:8443 \
     --kubeconfig=/opt/kubernetes/scheduler.kubeconfig
     
# set-credentials 设置一个用户项
kubectl config set-credentials system:kube-scheduler \
     --client-certificate=/opt/kubernetes/pki/scheduler.pem \
     --client-key=/opt/kubernetes/pki/scheduler-key.pem \
     --embed-certs=true \
     --kubeconfig=/opt/kubernetes/scheduler.kubeconfig
     
# 设置一个环境项,一个上下文
kubectl config set-context system:kube-scheduler@kubernetes \
     --cluster=kubernetes \
     --user=system:kube-scheduler \
     --kubeconfig=/opt/kubernetes/scheduler.kubeconfig

# 使用某个环境当做默认环境
kubectl config use-context system:kube-scheduler@kubernetes \
     --kubeconfig=/opt/kubernetes/scheduler.kubeconfig

5.3.4 生成kubernetes admin证书

cat > /opt/k8s-ha-install/pki/admin-csr.json.json <<\EOF
{
  "CN": "admin",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Beijing",
      "L": "Beijing",
      "O": "system:masters",
      "OU": "Kubernetes-manual"
    }
  ]
}
EOF

cfssl gencert \
   -ca=/opt/kubernetes/pki/ca.pem \
   -ca-key=/opt/kubernetes/pki/ca-key.pem \
   -config=/opt/k8s-ha-install/pki/ca-config.json \
   -profile=kubernetes \
   /opt/k8s-ha-install/pki/admin-csr.json | cfssljson -bare /opt/kubernetes/pki/admin

kubectl config set-cluster kubernetes \
 --certificate-authority=/opt/kubernetes/pki/ca.pem \
 --embed-certs=true \
 --server=https://172.16.20.190:8443 \
 --kubeconfig=/opt/kubernetes/admin.kubeconfig
 
kubectl config set-credentials kubernetes-admin \
 --client-certificate=/opt/kubernetes/pki/admin.pem \
 --client-key=/opt/kubernetes/pki/admin-key.pem \
 --embed-certs=true \
 --kubeconfig=/opt/kubernetes/admin.kubeconfig
 
kubectl config set-context kubernetes-admin@kubernetes \
  --cluster=kubernetes  \
  --user=kubernetes-admin \
  --kubeconfig=/opt/kubernetes/admin.kubeconfig
  
kubectl config use-context kubernetes-admin@kubernetes \
--kubeconfig=/opt/kubernetes/admin.kubeconfig

5.3.5 生成kubernetes kubelet证书

全部采用自动办法,无需手动生成kubelet证书

5.3.6 创建ServiceAccount Key

openssl genrsa -out /opt/kubernetes/pki/sa.key 2048

openssl rsa -in /opt/kubernetes/pki/sa.key -pubout -out /opt/kubernetes/pki/sa.pub

5.3.7 证书分发到其它节点

for NODE in k8s2 k8s3; do 
    for FILE in $(ls /opt/kubernetes/pki | grep -v etcd); do 
        scp /opt/kubernetes/pki/${FILE} $NODE:/opt/kubernetes/pki/${FILE};
    done; 
    for FILE in admin.kubeconfig controller-manager.kubeconfig scheduler.kubeconfig; do 
        scp /opt/kubernetes/${FILE} $NODE:/opt/kubernetes/${FILE};
    done;
done

六、etcd 服务器启动

6.1、生成etcd配置文件

etcd配置大致相同,注意修改每个Master节点的etcd配置的主机名和IP地址
etcd的证书在 /opt/etcd/ssl/ 下
#配置文件生成

Master01配置文件
cat > /opt/etcd/etcd.config.yml <<\EOF
name: 'k8s1'
data-dir: /var/lib/etcd
wal-dir: /var/lib/etcd/wal
snapshot-count: 5000
heartbeat-interval: 100
election-timeout: 1000
quota-backend-bytes: 0
listen-peer-urls: 'https://172.16.20.141:2380'
listen-client-urls: 'https://172.16.20.141:2379,http://127.0.0.1:2379'
max-snapshots: 3
max-wals: 5
cors:
initial-advertise-peer-urls: 'https://172.16.20.141:2380'
advertise-client-urls: 'https://172.16.20.141:2379'
discovery:
discovery-fallback: 'proxy'
discovery-proxy:
discovery-srv:
initial-cluster: 'k8s1=https://172.16.20.141:2380,k8s2=https://172.16.20.142:2380,k8s3=https://172.16.20.143:2380'
initial-cluster-token: 'etcd-k8s-cluster'
initial-cluster-state: 'new'
strict-reconfig-check: false
enable-v2: true
enable-pprof: true
proxy: 'off'
proxy-failure-wait: 5000
proxy-refresh-interval: 30000
proxy-dial-timeout: 1000
proxy-write-timeout: 5000
proxy-read-timeout: 0
client-transport-security:
  cert-file: '/opt/etcd/ssl/etcd.pem'
  key-file: '/opt/etcd/ssl/etcd-key.pem'
  client-cert-auth: true
  trusted-ca-file: '/opt/etcd/ssl/etcd-ca.pem'
  auto-tls: true
peer-transport-security:
  cert-file: '/opt/etcd/ssl/etcd.pem'
  key-file: '/opt/etcd/ssl/etcd-key.pem'
  peer-client-cert-auth: true
  trusted-ca-file: '/opt/etcd/ssl/etcd-ca.pem'
  auto-tls: true
debug: false
log-package-levels:
log-outputs: [default]
force-new-cluster: false
EOF

Master02配置文件
cat > /opt/etcd/etcd.config.yml <<\EOF
name: 'k8s2'
data-dir: /var/lib/etcd
wal-dir: /var/lib/etcd/wal
snapshot-count: 5000
heartbeat-interval: 100
election-timeout: 1000
quota-backend-bytes: 0
listen-peer-urls: 'https://172.16.20.142:2380'
listen-client-urls: 'https://172.16.20.142:2379,http://127.0.0.1:2379'
max-snapshots: 3
max-wals: 5
cors:
initial-advertise-peer-urls: 'https://172.16.20.142:2380'
advertise-client-urls: 'https://172.16.20.142:2379'
discovery:
discovery-fallback: 'proxy'
discovery-proxy:
discovery-srv:
initial-cluster: 'k8s1=https://172.16.20.141:2380,k8s2=https://172.16.20.142:2380,k8s3=https://172.16.20.143:2380'
initial-cluster-token: 'etcd-k8s-cluster'
initial-cluster-state: 'new'
strict-reconfig-check: false
enable-v2: true
enable-pprof: true
proxy: 'off'
proxy-failure-wait: 5000
proxy-refresh-interval: 30000
proxy-dial-timeout: 1000
proxy-write-timeout: 5000
proxy-read-timeout: 0
client-transport-security:
  cert-file: '/opt/etcd/ssl/etcd.pem'
  key-file: '/opt/etcd/ssl/etcd-key.pem'
  client-cert-auth: true
  trusted-ca-file: '/opt/etcd/ssl/etcd-ca.pem'
  auto-tls: true
peer-transport-security:
  cert-file: '/opt/etcd/ssl/etcd.pem'
  key-file: '/opt/etcd/ssl/etcd-key.pem'
  peer-client-cert-auth: true
  trusted-ca-file: '/opt/etcd/ssl/etcd-ca.pem'
  auto-tls: true
debug: false
log-package-levels:
log-outputs: [default]
force-new-cluster: false
EOF

Master03配置文件
cat > /opt/etcd/etcd.config.yml <<\EOF
name: 'k8s3'
data-dir: /var/lib/etcd
wal-dir: /var/lib/etcd/wal
snapshot-count: 5000
heartbeat-interval: 100
election-timeout: 1000
quota-backend-bytes: 0
listen-peer-urls: 'https://172.16.20.143:2380'
listen-client-urls: 'https://172.16.20.143:2379,http://127.0.0.1:2379'
max-snapshots: 3
max-wals: 5
cors:
initial-advertise-peer-urls: 'https://172.16.20.143:2380'
advertise-client-urls: 'https://172.16.20.143:2379'
discovery:
discovery-fallback: 'proxy'
discovery-proxy:
discovery-srv:
initial-cluster: 'k8s1=https://172.16.20.141:2380,k8s2=https://172.16.20.142:2380,k8s3=https://172.16.20.143:2380'
initial-cluster-token: 'etcd-k8s-cluster'
initial-cluster-state: 'new'
strict-reconfig-check: false
enable-v2: true
enable-pprof: true
proxy: 'off'
proxy-failure-wait: 5000
proxy-refresh-interval: 30000
proxy-dial-timeout: 1000
proxy-write-timeout: 5000
proxy-read-timeout: 0
client-transport-security:
  cert-file: '/opt/etcd/ssl/etcd.pem'
  key-file: '/opt/etcd/ssl/etcd-key.pem'
  client-cert-auth: true
  trusted-ca-file: '/opt/etcd/ssl/etcd-ca.pem'
  auto-tls: true
peer-transport-security:
  cert-file: '/opt/etcd/ssl/etcd.pem'
  key-file: '/opt/etcd/ssl/etcd-key.pem'
  peer-client-cert-auth: true
  trusted-ca-file: '/opt/etcd/ssl/etcd-ca.pem'
  auto-tls: true
debug: false
log-package-levels:
log-outputs: [default]
force-new-cluster: false
EOF

6.2、创建etcd service并启动

在所有etcd节点执行

#创建 etcd service启动文件
cat > /usr/lib/systemd/system/etcd.service << \EOF
[Unit]
Description=Etcd Service
Documentation=https://coreos.com/etcd/docs/latest/
After=network.target

[Service]
Type=notify
ExecStart=/usr/local/bin/etcd --config-file=/opt/etcd/etcd.config.yml
Restart=on-failure
RestartSec=10
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
Alias=etcd3.service
EOF

#启动服务
systemctl daemon-reload
systemctl enable --now etcd

6.3、校验

查看服务状态
systemctl  status etcd

查看系统日志
tailf  /var/log/messages

查看etcd状态
 etcdctl --endpoints="172.16.20.141:2379,172.16.20.142:2379,172.16.20.143:2379" --cacert=/opt/etcd/ssl/etcd-ca.pem --cert=/opt/etcd/ssl/etcd.pem --key=/opt/etcd/ssl/etcd-key.pem  endpoint status --write-out=table

七、Kubernetes组件配置

7.1 配置kube-api server

所有节点创建相关目录
mkdir -p /etc/kubernetes/manifests/ /etc/systemd/system/kubelet.service.d /var/lib/kubelet /var/log/kubernetes

所有Master节点创建kube-apiserver service

cat > /usr/lib/systemd/system/kube-apiserver.service <<\EOF
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
After=network.target

[Service]
ExecStart=/usr/local/bin/kube-apiserver \
      --v=2  \
      --logtostderr=true  \
      --allow-privileged=true  \
      --bind-address=0.0.0.0  \
      --secure-port=6443  \
      --insecure-port=0  \
      --advertise-address=172.16.20.190 \
      --service-cluster-ip-range=10.96.0.0/12  \
      --service-node-port-range=30000-32767  \
      --etcd-servers=https://172.16.20.141:2379,https://172.16.20.142:2379,https://172.16.20.143:2379 \
      --etcd-cafile=/opt/etcd/ssl/etcd-ca.pem  \
      --etcd-certfile=/opt/etcd/ssl/etcd.pem  \
      --etcd-keyfile=/opt/etcd/ssl/etcd-key.pem  \
      --client-ca-file=/opt/kubernetes/pki/ca.pem  \
      --tls-cert-file=/opt/kubernetes/pki/apiserver.pem  \
      --tls-private-key-file=/opt/kubernetes/pki/apiserver-key.pem  \
      --kubelet-client-certificate=/opt/kubernetes/pki/apiserver.pem  \
      --kubelet-client-key=/opt/kubernetes/pki/apiserver-key.pem  \
      --service-account-key-file=/opt/kubernetes/pki/sa.pub  \
      --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname  \
      --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota  \
      --authorization-mode=Node,RBAC  \
      --enable-bootstrap-token-auth=true  \
      --requestheader-client-ca-file=/opt/kubernetes/pki/front-proxy-ca.pem  \
      --proxy-client-cert-file=/opt/kubernetes/pki/front-proxy-client.pem  \
      --proxy-client-key-file=/opt/kubernetes/pki/front-proxy-client-key.pem  \
      --requestheader-allowed-names=aggregator  \
      --requestheader-group-headers=X-Remote-Group  \
      --requestheader-extra-headers-prefix=X-Remote-Extra-  \
      --requestheader-username-headers=X-Remote-User
      # --token-auth-file=/etc/kubernetes/token.csv

Restart=on-failure
RestartSec=10s
LimitNOFILE=65535

[Install]
WantedBy=multi-user.target
EOF

#启动服务
systemctl daemon-reload && systemctl enable --now kube-apiserver 

#检查
systemctl status kube-apiserver

7.2 配置kube-controller-manager service

#配置 kube-controller-manager service
cat >  /usr/lib/systemd/system/kube-controller-manager.service <<\EOF
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes
After=network.target

[Service]
ExecStart=/usr/local/bin/kube-controller-manager \
      --v=2 \
      --logtostderr=true \
      --address=127.0.0.1 \
      --root-ca-file=/opt/kubernetes/pki/ca.pem \
      --cluster-signing-cert-file=/opt/kubernetes/pki/ca.pem \
      --cluster-signing-key-file=/opt/kubernetes/pki/ca-key.pem \
      --service-account-private-key-file=/opt/kubernetes/pki/sa.key \
      --kubeconfig=/opt/kubernetes/controller-manager.kubeconfig \
      --leader-elect=true \
      --use-service-account-credentials=true \
      --node-monitor-grace-period=40s \
      --node-monitor-period=5s \
      --pod-eviction-timeout=2m0s \
      --controllers=*,bootstrapsigner,tokencleaner \
      --allocate-node-cidrs=true \
      --cluster-cidr=10.244.0.0/16 \
      --requestheader-client-ca-file=/opt/kubernetes/pki/front-proxy-ca.pem \
      --node-cidr-mask-size=24
      
Restart=always
RestartSec=10s

[Install]
WantedBy=multi-user.target
EOF

# 启动服务
systemctl daemon-reload && systemctl enable --now kube-controller-manager

#检查
systemctl status kube-controller-manager

7.3 配置kube-scheduler

cat >  /usr/lib/systemd/system/kube-scheduler.service  <<\EOF
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/kubernetes/kubernetes
After=network.target

[Service]
ExecStart=/usr/local/bin/kube-scheduler \
      --v=2 \
      --logtostderr=true \
      --address=127.0.0.1 \
      --leader-elect=true \
      --kubeconfig=/opt/kubernetes/scheduler.kubeconfig

Restart=always
RestartSec=10s

[Install]
WantedBy=multi-user.target
EOF

八、TLS Bootstrapping配置(客户端证书颁发)

kubectl config set-cluster kubernetes     --certificate-authority=/opt/kubernetes/pki/ca.pem     --embed-certs=true     --server=https://172.16.20.190:8443     --kubeconfig=/opt/kubernetes/bootstrap-kubelet.kubeconfig
kubectl config set-credentials tls-bootstrap-token-user     --token=c8ad9c.2e4d610cf3e7426e --kubeconfig=/opt/kubernetes/bootstrap-kubelet.kubeconfig
kubectl config set-context tls-bootstrap-token-user@kubernetes     --cluster=kubernetes     --user=tls-bootstrap-token-user     --kubeconfig=/opt/kubernetes/bootstrap-kubelet.kubeconfig
kubectl config use-context tls-bootstrap-token-user@kubernetes     --kubeconfig=/opt/kubernetes/bootstrap-kubelet.kubeconfig

#复制
cp /opt/kubernetes/admin.kubeconfig /root/.kube/config


cat > /opt/k8s-ha-install/bootstrap/bootstrap.secret.yaml << \EOF
apiVersion: v1
kind: Secret
metadata:
  name: bootstrap-token-c8ad9c
  namespace: kube-system
type: bootstrap.kubernetes.io/token
stringData:
  description: "The default bootstrap token generated by 'kubelet '."
  token-id: c8ad9c
  token-secret: 2e4d610cf3e7426e
  usage-bootstrap-authentication: "true"
  usage-bootstrap-signing: "true"
  auth-extra-groups:  system:bootstrappers:default-node-token,system:bootstrappers:worker,system:bootstrappers:ingress
 
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: kubelet-bootstrap
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:node-bootstrapper
subjects:
- apiGroup: rbac.authorization.k8s.io
  kind: Group
  name: system:bootstrappers:default-node-token
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: node-autoapprove-bootstrap
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:certificates.k8s.io:certificatesigningrequests:nodeclient
subjects:
- apiGroup: rbac.authorization.k8s.io
  kind: Group
  name: system:bootstrappers:default-node-token
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: node-autoapprove-certificate-rotation
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:certificates.k8s.io:certificatesigningrequests:selfnodeclient
subjects:
- apiGroup: rbac.authorization.k8s.io
  kind: Group
  name: system:nodes
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  annotations:
    rbac.authorization.kubernetes.io/autoupdate: "true"
  labels:
    kubernetes.io/bootstrapping: rbac-defaults
  name: system:kube-apiserver-to-kubelet
rules:
  - apiGroups:
      - ""
    resources:
      - nodes/proxy
      - nodes/stats
      - nodes/log
      - nodes/spec
      - nodes/metrics
    verbs:
      - "*"
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: system:kube-apiserver
  namespace: ""
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:kube-apiserver-to-kubelet
subjects:
  - apiGroup: rbac.authorization.k8s.io
    kind: User
    name: kube-apiserver
EOF

kubectl create -f /opt/k8s-ha-install/bootstrap/bootstrap.secret.yaml

九、Node节点配置

9.1证书分发

for NODE in k8s2 k8s3 k8s4; do
    ssh $NODE mkdir -p /opt/kubernetes/pki /opt/etcd/ssl /opt/etcd/ssl
    for FILE in etcd-ca.pem etcd.pem etcd-key.pem; do
       scp /opt/etcd/ssl/$FILE $NODE:/opt/etcd/ssl/
    done
    for FILE in pki/ca.pem pki/ca-key.pem pki/front-proxy-ca.pem bootstrap-kubelet.kubeconfig; do
       scp /opt/kubernetes/$FILE $NODE:/opt/kubernetes/${FILE}
    done
done

9.2 目录创建

mkdir -p /var/lib/kubelet /var/log/kubernetes /etc/systemd/system/kubelet.service.d /opt/kubernetes/manifests/

9.3 所有节点配置kubelet service

(Master节点不部署Pod也可无需配置)

cat > /usr/lib/systemd/system/kubelet.service << \EOF
[Unit]
Description=Kubernetes Kubelet
Documentation=https://github.com/kubernetes/kubernetes
After=docker.service
Requires=docker.service

[Service]
ExecStart=/usr/local/bin/kubelet

Restart=always
StartLimitInterval=0
RestartSec=10

[Install]
WantedBy=multi-user.target
EOF


cat > /etc/systemd/system/kubelet.service.d/10-kubelet.conf  << \EOF
[Service]
Environment="KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/opt/kubernetes/bootstrap-kubelet.kubeconfig --kubeconfig=/opt/kubernetes/kubelet.kubeconfig"
Environment="KUBELET_SYSTEM_ARGS=--network-plugin=cni --cni-conf-dir=/etc/cni/net.d --cni-bin-dir=/opt/cni/bin"
Environment="KUBELET_CONFIG_ARGS=--config=/opt/kubernetes/kubelet-conf.yml --pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google_containers/pause-amd64:3.1"
Environment="KUBELET_EXTRA_ARGS=--node-labels=node.kubernetes.io/node='' "
ExecStart=
ExecStart=/usr/local/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_SYSTEM_ARGS $KUBELET_EXTRA_ARGS
EOF

cat > /opt/kubernetes/kubelet-conf.yml << \EOF
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
address: 0.0.0.0
port: 10250
readOnlyPort: 10255
authentication:
  anonymous:
    enabled: false
  webhook:
    cacheTTL: 2m0s
    enabled: true
  x509:
    clientCAFile: /opt/kubernetes/pki/ca.pem
authorization:
  mode: Webhook
  webhook:
    cacheAuthorizedTTL: 5m0s
    cacheUnauthorizedTTL: 30s
cgroupDriver: systemd
cgroupsPerQOS: true
clusterDNS:
- 10.96.0.10
clusterDomain: cluster.local
containerLogMaxFiles: 5
containerLogMaxSize: 10Mi
contentType: application/vnd.kubernetes.protobuf
cpuCFSQuota: true
cpuManagerPolicy: none
cpuManagerReconcilePeriod: 10s
enableControllerAttachDetach: true
enableDebuggingHandlers: true
enforceNodeAllocatable:
- pods
eventBurst: 10
eventRecordQPS: 5
evictionHard:
  imagefs.available: 15%
  memory.available: 100Mi
  nodefs.available: 10%
  nodefs.inodesFree: 5%
evictionPressureTransitionPeriod: 5m0s
failSwapOn: true
fileCheckFrequency: 20s
hairpinMode: promiscuous-bridge
healthzBindAddress: 127.0.0.1
healthzPort: 10248
httpCheckFrequency: 20s
imageGCHighThresholdPercent: 85
imageGCLowThresholdPercent: 80
imageMinimumGCAge: 2m0s
iptablesDropBit: 15
iptablesMasqueradeBit: 14
kubeAPIBurst: 10
kubeAPIQPS: 5
makeIPTablesUtilChains: true
maxOpenFiles: 1000000
maxPods: 110
nodeStatusUpdateFrequency: 10s
oomScoreAdj: -999
podPidsLimit: -1
registryBurst: 10
registryPullQPS: 5
resolvConf: /etc/resolv.conf
rotateCertificates: true
runtimeRequestTimeout: 2m0s
serializeImagePulls: true
staticPodPath: /opt/kubernetes/manifests
streamingConnectionIdleTimeout: 4h0m0s
syncFrequency: 1m0s
volumeStatsAggPeriod: 1m0s
EOF

systemctl daemon-reload && systemctl enable --now kubelet

9.4 启动并校验

systemctl daemon-reload && systemctl enable --now kubelet
kubectl get node

tailf /var/log/messages
出现下面的报错没关系,因为还没有安装网络插件
Unable to update cni config: no networks found in /etc/cni/net.d

十、Kube-Proxy

Kube-Proxy配置在master01操作即可

10.1、配置kube-proxy

如果更改了集群Pod的网段,需要更改kube-proxy/kube-proxy.conf的clusterCIDR: 10.244.0.0/16参数

kubectl -n kube-system create serviceaccount kube-proxy

kubectl create clusterrolebinding system:kube-proxy \
  --clusterrole system:node-proxier \
  --serviceaccount kube-system:kube-proxy

SECRET=$(kubectl -n kube-system get sa/kube-proxy --output=jsonpath='{.secrets[0].name}')

JWT_TOKEN=$(kubectl -n kube-system get secret/$SECRET --output=jsonpath='{.data.token}' | base64 -d)

PKI_DIR=/opt/kubernetes/pki

K8S_DIR=/opt/kubernetes

kubectl config set-cluster kubernetes \
  --certificate-authority=/opt/kubernetes/pki/ca.pem \
  --embed-certs=true \
  --server=https://172.16.20.190:8443 \
  --kubeconfig=${K8S_DIR}/kube-proxy.kubeconfig
  
kubectl config set-credentials kubernetes \
  --token=${JWT_TOKEN} \
  --kubeconfig=/opt/kubernetes/kube-proxy.kubeconfig
    
kubectl config set-context kubernetes \
  --cluster=kubernetes \
  --user=kubernetes \
  --kubeconfig=/opt/kubernetes/kube-proxy.kubeconfig
    
kubectl config use-context kubernetes \
  --kubeconfig=/opt/kubernetes/kube-proxy.kubeconfig

10.2、分发kube-proxy文件master01执行

#创建目录
mkdir /opt/k8s-ha-install/kube-proxy

#创建配置文件
cat > /opt/k8s-ha-install/kube-proxy/kube-proxy.conf << \EOF
apiVersion: kubeproxy.config.k8s.io/v1alpha1
bindAddress: 0.0.0.0
clientConnection:
  acceptContentTypes: ""
  burst: 10
  contentType: application/vnd.kubernetes.protobuf
  kubeconfig: /opt/kubernetes/kube-proxy.kubeconfig
  qps: 5
clusterCIDR: 10.244.0.0/16
configSyncPeriod: 15m0s
conntrack:
  max: null
  maxPerCore: 32768
  min: 131072
  tcpCloseWaitTimeout: 1h0m0s
  tcpEstablishedTimeout: 24h0m0s
enableProfiling: false
healthzBindAddress: 0.0.0.0:10256
hostnameOverride: ""
iptables:
  masqueradeAll: false
  masqueradeBit: 14
  minSyncPeriod: 0s
  syncPeriod: 30s
ipvs:
  masqueradeAll: true
  minSyncPeriod: 5s
  scheduler: "rr"
  syncPeriod: 30s
kind: KubeProxyConfiguration
metricsBindAddress: 127.0.0.1:10249
mode: "ipvs"
nodePortAddresses: null
oomScoreAdj: -999
portRange: ""
udpIdleTimeout: 250ms
EOF

cat > /opt/k8s-ha-install/kube-proxy/kube-proxy.service << \EOF
[Unit]
Description=Kubernetes Kube Proxy
Documentation=https://github.com/kubernetes/kubernetes
After=network.target

[Service]
ExecStart=/usr/local/bin/kube-proxy \
  --config=/opt/kubernetes/kube-proxy.conf \
  --v=2

Restart=always
RestartSec=10s

[Install]
WantedBy=multi-user.target
EOF


cat > /opt/k8s-ha-install/kube-proxy/kube-proxy.yml << \EOF
apiVersion: v1
kind: ConfigMap
metadata:
  name: kube-proxy
  namespace: kube-system
  labels:
    app: kube-proxy
data:
  config.conf: |-
    apiVersion: kubeproxy.config.k8s.io/v1alpha1
    kind: KubeProxyConfiguration
    bindAddress: 0.0.0.0
    clientConnection:
      acceptContentTypes: ""
      burst: 10
      contentType: application/vnd.kubernetes.protobuf
      kubeconfig: /var/lib/kube-proxy/kubeconfig.conf
      qps: 5
    clusterCIDR: 10.244.0.0/16
    configSyncPeriod: 15m0s
    conntrack:
      maxPerCore: 32768
      min: 131072
      tcpCloseWaitTimeout: 1h0m0s
      tcpEstablishedTimeout: 24h0m0s
    enableProfiling: false
    healthzBindAddress: 0.0.0.0:10256
    hostnameOverride: ""
    iptables:
      masqueradeAll: false
      masqueradeBit: 14
      minSyncPeriod: 0s
      syncPeriod: 30s
    ipvs:
      minSyncPeriod: 0s
      scheduler: rr
      syncPeriod: 30s
    metricsBindAddress: 127.0.0.1:10249
    mode: ipvs
    featureGates:
      SupportIPVSProxyMode: true
    oomScoreAdj: -999
    portRange: ""
    resourceContainer: /kube-proxy
    udpIdleTimeout: 250ms
  kubeconfig.conf: |-
    apiVersion: v1
    kind: Config
    clusters:
    - cluster:
        certificate-authority: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
        server: {{KUBE_APISERVER}}
      name: default
    contexts:
    - context:
        cluster: default
        namespace: default
        user: default
      name: default
    current-context: default
    users:
    - name: default
      user:
        tokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: kube-proxy
  namespace: kube-system
  labels:
    addonmanager.kubernetes.io/mode: Reconcile
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
  name: system:kube-proxy
  labels:
    addonmanager.kubernetes.io/mode: Reconcile
subjects:
  - kind: ServiceAccount
    name: kube-proxy
    namespace: kube-system
roleRef:
  kind: ClusterRole
  name: system:node-proxier
  apiGroup: rbac.authorization.k8s.io
---
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
  labels:
    k8s-app: kube-proxy
  name: kube-proxy
  namespace: kube-system
spec:
  selector:
    matchLabels:
      k8s-app: kube-proxy
  template:
    metadata:
      annotations:
        scheduler.alpha.kubernetes.io/critical-pod: ""
      labels:
        k8s-app: kube-proxy
    spec:
      serviceAccount: kube-proxy
      serviceAccountName: kube-proxy
      priorityClassName: system-node-critical
      tolerations:
      - key: CriticalAddonsOnly
        operator: Exists
      - effect: NoSchedule
        key: node-role.kubernetes.io/master
      - key: node.kubernetes.io/not-ready
        operator: Exists
        effect: NoSchedule
      hostNetwork: true
      containers:
      - name: kube-proxy
        image: registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy-amd64:v1.11.3
        command:
        - /usr/local/bin/kube-proxy
        - --config=/var/lib/kube-proxy/config.conf
        securityContext:
          privileged: true
        volumeMounts:
        - mountPath: /var/lib/kube-proxy
          name: kube-proxy
        - mountPath: /run/xtables.lock
          name: xtables-lock
        - mountPath: /lib/modules
          name: lib-modules
          readOnly: true
      volumes:
      - configMap:
          defaultMode: 420
          name: kube-proxy
        name: kube-proxy
      - hostPath:
          path: /run/xtables.lock
          type: FileOrCreate
        name: xtables-lock
      - hostPath:
          path: /lib/modules
          type: ""
        name: lib-modules
EOF

#分发
for NODE in k8s1 k8s2 k8s3 k8s4; do
     scp ${K8S_DIR}/kube-proxy.kubeconfig $NODE:/opt/kubernetes/kube-proxy.kubeconfig
     scp /opt/k8s-ha-install/kube-proxy/kube-proxy.conf $NODE:/opt/kubernetes/kube-proxy.conf
     scp /opt/k8s-ha-install/kube-proxy/kube-proxy.service $NODE:/usr/lib/systemd/system/kube-proxy.service
done

10.3、启动kube-proxy

systemctl daemon-reload && systemctl enable --now kube-proxy

十一、安装calico

官网:https://docs.projectcalico.org/
找到对应的版本进行安装
验证:
kubectl get po -n kube-system -owide
如果容器状态异常可以使用kubectl describe 或者logs查看容器的日志

十二、安装CoreDNS

安装最新版CoreDNS
git clone https://github.com/coredns/deployment.git
cd deployment/kubernetes
./deploy.sh -s -i 10.96.0.10 | kubectl apply -f -

查看状态
kubectl get po -n kube-system -l k8s-app=kube-dns

十三、安装Metrics Server

github地址:https://github.com/kubernetes-sigs

安装方法:
kubectl apply -f https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml

镜像下载不来来需要FQ,或者使用代理

查看状态:
kubectl get po -n kube-system -l k8s-app=metrics-server

查看集群POD度量指标:
kubectl top po -n kube-system

十四、验证集群

cat<<\EOF | kubectl apply -f -
apiVersion: v1
kind: Pod
metadata:
  name: busybox
  namespace: default
spec:
  containers:
  - name: busybox
    image: busybox:1.28
    command:
      - sleep
      - "3600"
    imagePullPolicy: IfNotPresent
  restartPolicy: Always
EOF

1.  Pod必须能解析Service
kubectl exec  busybox -n default -- nslookup kubernetes

2.  Pod必须能解析跨namespace的Service
kubectl exec  busybox -n default -- nslookup kube-dns.kube-system

3.  每个节点都必须要能访问Kubernetes的kubernetes svc 443和kube-dns的service 53
4.  Pod和Pod之前要能通 同namespace能通信 跨namespace能通信 跨机器能通信

十五、安装Dashboard

15.1 安装Dashboard

注意2.1.0版本的dashboard只支持1.20版本

Dashboard官方GitHub:https://github.com/kubernetes/dashboard/tags

#有墙
wget --no-check-certificate https://raw.githubusercontent.com/kubernetes/dashboard/v2.1.0/aio/deploy/recommended.yaml -O kube-dashboard.yaml


mkdir -p /opt/dashboard2.1.0

cat > /opt/dashboard2.1.0/kube-dashboard.yaml << \EOF
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

apiVersion: v1
kind: Namespace
metadata:
  name: kubernetes-dashboard

---

apiVersion: v1
kind: ServiceAccount
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard

---

kind: Service
apiVersion: v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
spec:
  ports:
    - port: 443
      targetPort: 8443
  selector:
    k8s-app: kubernetes-dashboard

---

apiVersion: v1
kind: Secret
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard-certs
  namespace: kubernetes-dashboard
type: Opaque

---

apiVersion: v1
kind: Secret
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard-csrf
  namespace: kubernetes-dashboard
type: Opaque
data:
  csrf: ""

---

apiVersion: v1
kind: Secret
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard-key-holder
  namespace: kubernetes-dashboard
type: Opaque

---

kind: ConfigMap
apiVersion: v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard-settings
  namespace: kubernetes-dashboard

---

kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
rules:
  # Allow Dashboard to get, update and delete Dashboard exclusive secrets.
  - apiGroups: [""]
    resources: ["secrets"]
    resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]
    verbs: ["get", "update", "delete"]
    # Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
  - apiGroups: [""]
    resources: ["configmaps"]
    resourceNames: ["kubernetes-dashboard-settings"]
    verbs: ["get", "update"]
    # Allow Dashboard to get metrics.
  - apiGroups: [""]
    resources: ["services"]
    resourceNames: ["heapster", "dashboard-metrics-scraper"]
    verbs: ["proxy"]
  - apiGroups: [""]
    resources: ["services/proxy"]
    resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"]
    verbs: ["get"]

---

kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
rules:
  # Allow Metrics Scraper to get metrics from the Metrics server
  - apiGroups: ["metrics.k8s.io"]
    resources: ["pods", "nodes"]
    verbs: ["get", "list", "watch"]

---

apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: Role
  name: kubernetes-dashboard
subjects:
  - kind: ServiceAccount
    name: kubernetes-dashboard
    namespace: kubernetes-dashboard

---

apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: kubernetes-dashboard
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: kubernetes-dashboard
subjects:
  - kind: ServiceAccount
    name: kubernetes-dashboard
    namespace: kubernetes-dashboard

---

kind: Deployment
apiVersion: apps/v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
spec:
  replicas: 1
  revisionHistoryLimit: 10
  selector:
    matchLabels:
      k8s-app: kubernetes-dashboard
  template:
    metadata:
      labels:
        k8s-app: kubernetes-dashboard
    spec:
      containers:
        - name: kubernetes-dashboard
          image: kubernetesui/dashboard:v2.1.0
          imagePullPolicy: Always
          ports:
            - containerPort: 8443
              protocol: TCP
          args:
            - --auto-generate-certificates
            - --namespace=kubernetes-dashboard
            # Uncomment the following line to manually specify Kubernetes API server Host
            # If not specified, Dashboard will attempt to auto discover the API server and connect
            # to it. Uncomment only if the default does not work.
            # - --apiserver-host=http://my-address:port
          volumeMounts:
            - name: kubernetes-dashboard-certs
              mountPath: /certs
              # Create on-disk volume to store exec logs
            - mountPath: /tmp
              name: tmp-volume
          livenessProbe:
            httpGet:
              scheme: HTTPS
              path: /
              port: 8443
            initialDelaySeconds: 30
            timeoutSeconds: 30
          securityContext:
            allowPrivilegeEscalation: false
            readOnlyRootFilesystem: true
            runAsUser: 1001
            runAsGroup: 2001
      volumes:
        - name: kubernetes-dashboard-certs
          secret:
            secretName: kubernetes-dashboard-certs
        - name: tmp-volume
          emptyDir: {}
      serviceAccountName: kubernetes-dashboard
      nodeSelector:
        "kubernetes.io/os": linux
      # Comment the following tolerations if Dashboard must not be deployed on master
      tolerations:
        - key: node-role.kubernetes.io/master
          effect: NoSchedule

---

kind: Service
apiVersion: v1
metadata:
  labels:
    k8s-app: dashboard-metrics-scraper
  name: dashboard-metrics-scraper
  namespace: kubernetes-dashboard
spec:
  ports:
    - port: 8000
      targetPort: 8000
  selector:
    k8s-app: dashboard-metrics-scraper

---

kind: Deployment
apiVersion: apps/v1
metadata:
  labels:
    k8s-app: dashboard-metrics-scraper
  name: dashboard-metrics-scraper
  namespace: kubernetes-dashboard
spec:
  replicas: 1
  revisionHistoryLimit: 10
  selector:
    matchLabels:
      k8s-app: dashboard-metrics-scraper
  template:
    metadata:
      labels:
        k8s-app: dashboard-metrics-scraper
      annotations:
        seccomp.security.alpha.kubernetes.io/pod: 'runtime/default'
    spec:
      containers:
        - name: dashboard-metrics-scraper
          image: kubernetesui/metrics-scraper:v1.0.6
          ports:
            - containerPort: 8000
              protocol: TCP
          livenessProbe:
            httpGet:
              scheme: HTTP
              path: /
              port: 8000
            initialDelaySeconds: 30
            timeoutSeconds: 30
          volumeMounts:
          - mountPath: /tmp
            name: tmp-volume
          securityContext:
            allowPrivilegeEscalation: false
            readOnlyRootFilesystem: true
            runAsUser: 1001
            runAsGroup: 2001
      serviceAccountName: kubernetes-dashboard
      nodeSelector:
        "kubernetes.io/os": linux
      # Comment the following tolerations if Dashboard must not be deployed on master
      tolerations:
        - key: node-role.kubernetes.io/master
          effect: NoSchedule
      volumes:
        - name: tmp-volume
          emptyDir: {}
EOF


cd /opt/dashboard2.1.0
kubectl create -f kube-dashboard.yaml

查看dashboard容器的状态:
kubectl get po -n  kubernetes-dashboard

15.2 在浏览器访问

建议使用Chrome浏览器访问
https://172.16.20.190:30000/

windows:在谷歌浏览器(Chrome)启动文件中加入启动参数 --test-type --ignore-certificate-errors ,用于解决无法访问Dashboard的问题

MAC在对应的页面输入 thisisunsafe 即可进入页面

选择token登录
查看token值:
kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep admin-user | awk '{print $1}')

posted @ 2021-07-20 18:14  北京-小远  阅读(577)  评论(0编辑  收藏  举报