使用二进制方式部署k8s高可用集群

使用二进制部署k8s高可用集群

k8s版本:1.18.16

节点规划

IPhostnameUsed By
192.168.139.21 k8s-m1 etcd/master/keepalived+haproxy
192.168.139.22 k8s-m2 node/master/keepalived+haproxy
192.168.139.201 k8s-n1 etcd/node
192.168.139.202 k8s-n2 etcd/node
192.168.139.20 k8s-master vip

环境准备

所有节点环境进行如下准备:

  • 操作系统: centos7

  • 节点资源: 每节点4CPU 4GRAM 虚拟机

  • 节点网络配置:

    • ens33 dhcp NAT模式,系统默认公网出口

    • ens37 static host-only模式,集群通讯网络

  • 卸载交换分区

  • 关闭系统自带防火墙和SELINUX

  • 配置统一的ntp服务: 例如chrony

  • 调整时区: timedatectl set-timezone Asia/Shanghai && timedatectl set-local-rtc 0

  • 配置rsyslogd和systemd journald:

cat > /etc/systemd/journald.conf <<EOF
[Journal]
Storage=persistent
Compress=yes
SyncIntervalSec=5m
RateLimitInterval=30s
RateLimitBurst=1000
SystemMaxUse=10G
SystemMaxFileSize=300M
MaxRetentionSec=1month
ForwardToSyslog=no
EOF

systemctl restart systemd-journald.service
  • 调整内核参数:

cat <<EOF | sudo tee /etc/modules-load.d/k8s.conf
br_netfilter
ip_conntrack
EOF

cat <<EOF | sudo tee /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-iptables=1
net.bridge.bridge-nf-call-ip6tables=1
net.ipv4.ip_forward=1
net.ipv4.tcp_tw_recycle=0
vm.swappiness=0
vm.overcommit_memory=1
vm.panic_on_oom=0
fs.inotify.max_user_instances=8192
fs.inotify.max_user_watches=1048576
fs.file-max=52706963
fs.nr_open=52706963
net.ipv6.conf.all.disable_ipv6=1
net.netfilter.nf_conntrack_max=2310720
EOF

modprobe ip_conntrack
modprobe br_netfilter
sysctl -p /etc/sysctl.d/k8s.conf
lsmod | grep -E "br_netfilter|conntrack"
  • 主机名/MAC地址/product_uuid确保唯一:

    • 检查MAC地址: ip linkifconfig -a

    • 检查hostname: /etc/hosts文件各个节点配置一致,主机名唯一且相互可达

cat /etc/hosts
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.139.21 k8s-m1
192.168.139.22 k8s-m2
192.168.139.201 k8s-n1
192.168.139.202 k8s-n2
192.168.139.20 k8s-master

部署master

下载cfssl

wget http://pkg.cfssl.org/R1.2/cfssl_linux-amd64
wget http://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
wget http://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64

chmod +x ./cfssl*

mv cfssl-certinfo_linux-amd64 /usr/local/bin/cfssl-certinfo
mv cfssl_linux-amd64 /usr/local/bin/cfssl
mv cfssljson_linux-amd64 /usr/local/bin/cfssljson

创建ca证书

cat > ca-config.json <<EOF
{
 "signing": {
   "default": {
     "expiry": "87600h"
  },
   "profiles": {
     "kubernetes": {
        "expiry": "87600h",
        "usages": [
           "signing",
           "key encipherment",
           "server auth",
           "client auth"
      ]
    }
  }
}
}
EOF

cat > ca-csr.json <<EOF
{
   "CN": "kubernetes",
   "key": {
       "algo": "rsa",
       "size": 2048
  },
   "names": [
      {
           "C": "CN",
           "L": "Beijing",
           "ST": "Beijing",
           "O": "k8s",
           "OU": "System"
      }
  ]
}
EOF

cfssl gencert -initca ca-csr.json | cfssljson -bare ca -

颁发etcd证书

cat > etcd-csr.json <<EOF
{
   "CN": "etcd",
   "hosts": [
     "10.0.0.1",
     "127.0.0.1",
     "192.168.139.20",
     "192.168.139.21",
     "192.168.139.22",
     "192.168.139.201",
     "192.168.139.202"
  ],
   "key": {
       "algo": "rsa",
       "size": 2048
  },
   "names": [
      {
           "C": "CN",
           "L": "BeiJing",
           "ST": "BeiJing",
           "O": "k8s",
           "OU": "System"
      }
  ]
}
EOF

cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes etcd-csr.json | cfssljson -bare etcd

安装etcd

安装etct证书

mkdir -p /opt/etcd/ssl

cp etcd*.pem /opt/etcd/ssl
cp ca.pem /opt/etcd/ssl

安装ETCD二进制文件

wget https://github.com/etcd-io/etcd/releases/download/v3.4.14/etcd-v3.4.14-linux-amd64.tar.gz

tar zxvf etcd-v3.4.14-linux-amd64.tar.gz

mkdir -p /opt/etcd/bin
mkdir -p /opt/etcd/cnf

mv etcd-v3.4.14-linux-amd64/etcd* /opt/etcd/bin

chown $(id -u):$(id -g) /opt/etcd/bin/*

同步到其他节点

同步到其他节点

scp -r /opt/etcd root@k8s-n1:/opt
scp -r /opt/etcd root@k8s-n2:/opt

生成配置文件并注册服务

#!/bin/bash
# example: ./etcd_init.sh etcd-1 192.168.139.21 etcd-1=https://192.168.139.21:2380,etcd-2=https://192.168.139.201:2380,etcd-3=https://192.168.139.202:2380

ETCD_NAME=$1
ETCD_IP=$2
ETCD_CLUSTER=$3

WORK_DIR=/opt/etcd

cat <<EOF >$WORK_DIR/cnf/etcd
#[Member]
ETCD_NAME="${ETCD_NAME}"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://${ETCD_IP}:2380"
ETCD_LISTEN_CLIENT_URLS="https://${ETCD_IP}:2379"

#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://${ETCD_IP}:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://${ETCD_IP}:2379"
ETCD_INITIAL_CLUSTER="${ETCD_CLUSTER}"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
ETCD_ENABLE_V2="true"
EOF

cat <<EOF >/usr/lib/systemd/system/etcd.service
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target

[Service]
Type=notify
EnvironmentFile=${WORK_DIR}/cnf/etcd
ExecStart=${WORK_DIR}/bin/etcd \
--cert-file=${WORK_DIR}/ssl/etcd.pem \
--key-file=${WORK_DIR}/ssl/etcd-key.pem \
--peer-cert-file=${WORK_DIR}/ssl/etcd.pem \
--peer-key-file=${WORK_DIR}/ssl/etcd-key.pem \
--trusted-ca-file=${WORK_DIR}/ssl/ca.pem \
--peer-trusted-ca-file=${WORK_DIR}/ssl/ca.pem
Restart=on-failure
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF

systemctl daemon-reload
systemctl enable etcd

三个节点分别执行脚本进行配置,注意对IP地址和主机名分别进行配置

./etcd_init.sh etcd-1 192.168.139.21 etcd-1=https://192.168.139.21:2380,etcd-2=https://192.168.139.201:2380,etcd-3=https://192.168.139.202:2380

启动

在所有节点启动etcd

systemctl start etcd

查看集群状态

/opt/etcd/bin/etcdctl --cacert=/opt/etcd/ssl/ca.pem --cert=/opt/etcd/ssl/etcd.pem --key=/opt/etcd/ssl/etcd-key.pem --endpoints="https://192.168.139.21:2379,https://192.168.139.201:2379,https://192.168.139.202:2379" --write-out=table endpoint health

/opt/etcd/bin/etcdctl --cacert=/opt/etcd/ssl/ca.pem --cert=/opt/etcd/ssl/etcd.pem --key=/opt/etcd/ssl/etcd-key.pem --endpoints="https://192.168.139.21:2379,https://192.168.139.201:2379,https://192.168.139.202:2379" --write-out=table endpoint status

/opt/etcd/bin/etcdctl --cacert=/opt/etcd/ssl/ca.pem --cert=/opt/etcd/ssl/etcd.pem --key=/opt/etcd/ssl/etcd-key.pem --endpoints="https://192.168.139.21:2379,https://192.168.139.201:2379,https://192.168.139.202:2379" --write-out=table member list

安装master

生成证书

cat > kube-server-csr.json <<EOF
{
   "CN": "kubernetes",
   "hosts": [
     "10.0.0.1",
     "127.0.0.1",
     "192.168.139.20",
     "192.168.139.21",
     "192.168.139.22",
     "kubernetes",
     "kubernetes.default",
     "kubernetes.default.svc",
     "kubernetes.default.svc.cluster",
     "kubernetes.default.svc.cluster.local"
  ],
   "key": {
       "algo": "rsa",
       "size": 2048
  },
   "names": [
      {
           "C": "CN",
           "L": "BeiJing",
           "ST": "BeiJing",
           "O": "k8s",
           "OU": "System"
      }
  ]
}
EOF

cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-server-csr.json | cfssljson -bare kube-server

cat > admin-csr.json <<EOF
{
 "CN": "admin",
 "hosts": [],
 "key": {
   "algo": "rsa",
   "size": 2048
},
 "names": [
  {
     "C": "CN",
     "L": "BeiJing",
     "ST": "BeiJing",
     "O": "system:masters",
     "OU": "System"
  }
]
}
EOF

cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes admin-csr.json | cfssljson -bare admin

cat > kube-proxy-csr.json <<EOF
{
 "CN": "system:kube-proxy",
 "hosts": [],
 "key": {
   "algo": "rsa",
   "size": 2048
},
 "names": [
  {
     "C": "CN",
     "L": "BeiJing",
     "ST": "BeiJing",
     "O": "k8s",
     "OU": "System"
  }
]
}
EOF

cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy

安装kube-server证书

mkdir -p /opt/kubernetes/ssl/

cp kube-server*.pem /opt/kubernetes/ssl/
cp ca*.pem /opt/kubernetes/ssl/

安装k8s-server二进制文件

mkdir /opt/kubernetes/{cnf,bin,log} -p

wget https://dl.k8s.io/v1.18.16/kubernetes-server-linux-amd64.tar.gz

tar zxvf kubernetes-server-linux-amd64.tar.gz

cp kubernetes/server/bin/{kubectl,kube-apiserver,kube-controller-manager,kube-scheduler} /opt/kubernetes/bin/

ln -s /opt/kubernetes/bin/kubectl /usr/local/bin/kubectl

chown $(id -u):$(id -g) /opt/kubernetes/bin/*

生成apiserver配置文件并注册服务

#!/bin/bash

MASTER_ADDRESS=$1
ETCD_SERVERS=$2

cat <<EOF >/opt/kubernetes/cnf/kube-apiserver

KUBE_APISERVER_OPTS="--logtostderr=false \\
--v=2 \\
--log-dir=/opt/kubernetes/log \\
--audit-log-path=/opt/kubernetes/log/k8s-audit.log \\
--audit-log-maxage=30 \\
--audit-log-maxbackup=6 \\
--audit-log-maxsize=200 \\
--etcd-servers=${ETCD_SERVERS} \\
--bind-address=${MASTER_ADDRESS} \\
--secure-port=6443 \\
--advertise-address=${MASTER_ADDRESS} \\
--allow-privileged=true \\
--service-cluster-ip-range=10.0.0.0/24 \\
--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,NodeRestriction \\
--authorization-mode=RBAC,Node \\
--enable-bootstrap-token-auth=true \\
--token-auth-file=/opt/kubernetes/cnf/token.csv \\
--service-node-port-range=30000-32767 \\
--tls-cert-file=/opt/kubernetes/ssl/kube-server.pem \\
--tls-private-key-file=/opt/kubernetes/ssl/kube-server-key.pem \\
--kubelet-client-certificate=/opt/kubernetes/ssl/kube-server.pem \\
--kubelet-client-key=/opt/kubernetes/ssl/kube-server-key.pem \\
--client-ca-file=/opt/kubernetes/ssl/ca.pem \\
--service-account-key-file=/opt/kubernetes/ssl/ca-key.pem \\
--etcd-cafile=/opt/etcd/ssl/ca.pem \\
--etcd-certfile=/opt/etcd/ssl/etcd.pem \\
--etcd-keyfile=/opt/etcd/ssl/etcd-key.pem"
EOF

cat <<EOF >/usr/lib/systemd/system/kube-apiserver.service
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes

[Service]
EnvironmentFile=-/opt/kubernetes/cnf/kube-apiserver
ExecStart=/opt/kubernetes/bin/kube-apiserver \$KUBE_APISERVER_OPTS
Restart=on-failure

[Install]
WantedBy=multi-user.target
EOF
./api_init.sh 192.168.139.21 https://192.168.139.21:2379,https://192.168.139.201:2379,https://192.168.139.202:2379

生成controller-manager配置文件并注册服务

#!/bin/bash

cat <<EOF >/opt/kubernetes/cnf/kube-controller-manager


KUBE_CONTROLLER_MANAGER_OPTS="--logtostderr=false \\
--v=2 \\
--log-dir=/opt/kubernetes/log/ \\
--master=127.0.0.1:8080 \\
--leader-elect=true \\
--bind-address=127.0.0.1 \\
--service-cluster-ip-range=10.0.0.0/24 \\
--allocate-node-cidrs=true \\
--cluster-cidr=10.244.0.0/16 \\
--cluster-name=kubernetes \\
--cluster-signing-cert-file=/opt/kubernetes/ssl/ca.pem \\
--cluster-signing-key-file=/opt/kubernetes/ssl/ca-key.pem \\
--root-ca-file=/opt/kubernetes/ssl/ca.pem \\
--service-account-private-key-file=/opt/kubernetes/ssl/ca-key.pem \\
--experimental-cluster-signing-duration=87600h0m0s"
EOF
cat <<EOF >/usr/lib/systemd/system/kube-controller-manager.service
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes

[Service]
EnvironmentFile=-/opt/kubernetes/cnf/kube-controller-manager
ExecStart=/opt/kubernetes/bin/kube-controller-manager \$KUBE_CONTROLLER_MANAGER_OPTS
Restart=on-failure

[Install]
WantedBy=multi-user.target
EOF
./cm_init.sh

生成scheduler配置文件并注册服务

#!/bin/bash

cat <<EOF >/opt/kubernetes/cnf/kube-scheduler

KUBE_SCHEDULER_OPTS="--logtostderr=false \\
--v=2 \\
--log-dir=/opt/kubernetes/log/ \\
--master=127.0.0.1:8080 \\
--bind-address=127.0.0.1 \\
--leader-elect"
EOF
cat <<EOF >/usr/lib/systemd/system/kube-scheduler.service
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/kubernetes/kubernetes

[Service]
EnvironmentFile=-/opt/kubernetes/cnf/kube-scheduler
ExecStart=/opt/kubernetes/bin/kube-scheduler \$KUBE_SCHEDULER_OPTS
Restart=on-failure

[Install]
WantedBy=multi-user.target
EOF
./sch_init.sh

启动服务

systemctl daemon-reload
systemctl enable {kube-apiserver,kube-scheduler,kube-controller-manager}
systemctl restart {kube-apiserver,kube-scheduler,kube-controller-manager}

授权kubelet-bootstrap用户允许请求证书

kubectl create clusterrolebinding kubelet-bootstrap \
--clusterrole=system:node-bootstrapper \
--user=kubelet-bootstrap

检查状态

[root@k8s-m1 ~]# kubectl get cs
NAME                 STATUS   MESSAGE             ERROR
scheduler           Healthy   ok                  
controller-manager   Healthy   ok                  
etcd-0               Healthy   {"health":"true"}  
etcd-1               Healthy   {"health":"true"}  
etcd-2               Healthy   {"health":"true"}

安装worker_node

安装docker

wget https://download.docker.com/linux/static/stable/x86_64/docker-20.10.3.tgz

tar zxvf docker-20.10.3.tgz
mv docker/* /usr/bin
cat > /usr/lib/systemd/system/docker.service << EOF
[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target firewalld.service
Wants=network-online.target
[Service]
Type=notify
ExecStart=/usr/bin/dockerd
ExecReload=/bin/kill -s HUP $MAINPID
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
TimeoutStartSec=0
Delegate=yes
KillMode=process
Restart=on-failure
StartLimitBurst=3
StartLimitInterval=60s
[Install]
WantedBy=multi-user.target
EOF
mkdir /etc/docker
cat > /etc/docker/daemon.json << EOF
{
 "registry-mirrors": ["https://3e9974ml.mirror.aliyuncs.com"]
}
EOF
systemctl daemon-reload
systemctl start docker
systemctl enable docker

安装k8s-worker二进制文件

mkdir /opt/kubernetes/{cnf,bin,log} -p

wget https://dl.k8s.io/v1.18.16/kubernetes-server-linux-amd64.tar.gz

tar zxvf kubernetes-server-linux-amd64.tar.gz

cp kubernetes/server/bin/{kubelet,kube-proxy} /opt/kubernetes/bin/

chown $(id -u):$(id -g) /opt/kubernetes/bin/*

拷贝必要证书到node节点

mkdir /opt/kubernetes/ssl -p

生成kubelet配置文件并注册服务

cat > /opt/kubernetes/cnf/kubelet.conf << EOF
KUBELET_OPTS="--logtostderr=false \\
--v=2 \\
--log-dir=/opt/kubernetes/log \\
--hostname-override=k8s-n2 \\
--network-plugin=cni \\
--kubeconfig=/opt/kubernetes/cnf/kubelet.kubeconfig \\
--bootstrap-kubeconfig=/opt/kubernetes/cnf/bootstrap.kubeconfig \\
--config=/opt/kubernetes/cnf/kubelet-config.yml \\
--cert-dir=/opt/kubernetes/ssl \\
--pod-infra-container-image=lizhenliang/pause-amd64:3.0"
EOF

cat > /opt/kubernetes/cnf/kubelet-config.yml << EOF
kind: KubeletConfiguration
apiVersion: kubelet.config.k8s.io/v1beta1
address: 0.0.0.0
port: 10250
readOnlyPort: 10255
cgroupDriver: cgroupfs
clusterDNS:
- 10.0.0.2
clusterDomain: cluster.local
failSwapOn: false
authentication:
anonymous:
  enabled: false
webhook:
  cacheTTL: 2m0s
  enabled: true
x509:
  clientCAFile: /opt/kubernetes/ssl/ca.pem
authorization:
mode: Webhook
webhook:
  cacheAuthorizedTTL: 5m0s
  cacheUnauthorizedTTL: 30s
evictionHard:
imagefs.available: 15%
memory.available: 100Mi
nodefs.available: 10%
nodefs.inodesFree: 5%
maxOpenFiles: 1000000
maxPods: 110
EOF

在master节点执行:

KUBE_APISERVER="https://192.168.139.21:6443" # apiserver IP:PORT
TOKEN="66277ddffbbe3d94b60963552e7c1ad5" # 与token.csv里保持一致

# 生成 kubelet bootstrap kubeconfig 配置文件
kubectl config set-cluster kubernetes \
 --certificate-authority=/opt/kubernetes/ssl/ca.pem \
 --embed-certs=true \
 --server=${KUBE_APISERVER} \
 --kubeconfig=bootstrap.kubeconfig

kubectl config set-credentials "kubelet-bootstrap" \
 --token=${TOKEN} \
 --kubeconfig=bootstrap.kubeconfig

kubectl config set-context default \
 --cluster=kubernetes \
 --user="kubelet-bootstrap" \
 --kubeconfig=bootstrap.kubeconfig

kubectl config use-context default --kubeconfig=bootstrap.kubeconfig

将生成的bootstrap.kubeconfig拷贝到woker节点

scp bootstrap.kubeconfig root@k8s-n2:/opt/kubernetes/cnf/
cat > /usr/lib/systemd/system/kubelet.service << EOF
[Unit]
Description=Kubernetes Kubelet
After=docker.service
[Service]
EnvironmentFile=/opt/kubernetes/cnf/kubelet.conf
ExecStart=/opt/kubernetes/bin/kubelet \$KUBELET_OPTS
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
EOF

systemctl daemon-reload
systemctl start kubelet
systemctl enable kubelet

批准kubelet证书申请并加入集群

在master节点上执行

# 查看kubelet证书请求
kubectl get csr
NAME                                                   AGE   SIGNERNAME                                   REQUESTOR           CONDITION
node-csr-gOm3t6t9pc50sBnXKfO88LZjXMLa7p8vOTCW8mThKSQ   90s   kubernetes.io/kube-apiserver-client-kubelet   kubelet-bootstrap   Pending

# 批准申请
kubectl certificate approve node-csr-gOm3t6t9pc50sBnXKfO88LZjXMLa7p8vOTCW8mThKSQ

# 查看节点
kubectl get node
NAME     STATUS     ROLES   AGE   VERSION
k8s-n2   NotReady   <none>   6s   v1.18.16

生成kube-proxy配置文件并注册服务

cat > /opt/kubernetes/cnf/kube-proxy.conf << EOF
KUBE_PROXY_OPTS="--logtostderr=false \\
--v=2 \\
--log-dir=/opt/kubernetes/log \\
--config=/opt/kubernetes/cnf/kube-proxy-config.yml"
EOF

cat > /opt/kubernetes/cnf/kube-proxy-config.yml << EOF
kind: KubeProxyConfiguration
apiVersion: kubeproxy.config.k8s.io/v1alpha1
bindAddress: 0.0.0.0
metricsBindAddress: 0.0.0.0:10249
clientConnection:
kubeconfig: /opt/kubernetes/cnf/kube-proxy.kubeconfig
hostnameOverride: k8s-n2
clusterCIDR: 10.0.0.0/24
EOF

之前在master上生成了kube-proxy的证书,进入master节点的该证书目录执行:

KUBE_APISERVER="https://192.168.139.21:6443"

kubectl config set-cluster kubernetes \
 --certificate-authority=/opt/kubernetes/ssl/ca.pem \
 --embed-certs=true \
 --server=${KUBE_APISERVER} \
 --kubeconfig=kube-proxy.kubeconfig
kubectl config set-credentials kube-proxy \
 --client-certificate=./kube-proxy.pem \
 --client-key=./kube-proxy-key.pem \
 --embed-certs=true \
 --kubeconfig=kube-proxy.kubeconfig
kubectl config set-context default \
 --cluster=kubernetes \
 --user=kube-proxy \
 --kubeconfig=kube-proxy.kubeconfig
kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig

将生成的kube-proxy.kubeconfig拷贝到node节点:

scp kube-proxy.kubeconfig root@k8s-n2:/opt/kubernetes/cnf/
cat > /usr/lib/systemd/system/kube-proxy.service << EOF
[Unit]
Description=Kubernetes Proxy
After=network.target
[Service]
EnvironmentFile=/opt/kubernetes/cnf/kube-proxy.conf
ExecStart=/opt/kubernetes/bin/kube-proxy $KUBE_PROXY_OPTS
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
EOF

systemctl daemon-reload
systemctl start kube-proxy
systemctl enable kube-proxy

woker节点部署CNI网络插件

https://github.com/containernetworking/plugins/releases/download/v0.8.6/cni-plugins-linux-amd64-v0.8.6.tgz

mkdir /opt/cni/bin -p
tar zxvf cni-plugins-linux-amd64-v0.8.6.tgz -C /opt/cni/bin

在master上安装flannel

wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
# 默认镜像无法访问,修改为Docker Hub镜像
sed -i -r "s#quay.io/coreos/flannel:v0.13.1-rc2#lizhenliang/flannel:v0.12.0-amd64#g" kube-flannel.yml

kubectl apply -f kube-flannel.yml
kubectl get pods -n kube-system

kubectl get node

授权apiserver访问kubelet

cat > apiserver-to-kubelet-rbac.yaml << EOF
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
annotations:
  rbac.authorization.kubernetes.io/autoupdate: "true"
labels:
  kubernetes.io/bootstrapping: rbac-defaults
name: system:kube-apiserver-to-kubelet
rules:
 - apiGroups:
     - ""
  resources:
     - nodes/proxy
     - nodes/stats
     - nodes/log
     - nodes/spec
     - nodes/metrics
     - pods/log
  verbs:
     - "*"
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: system:kube-apiserver
namespace: ""
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:kube-apiserver-to-kubelet
subjects:
 - apiGroup: rbac.authorization.k8s.io
  kind: User
  name: kubernetes
EOF

kubectl apply -f apiserver-to-kubelet-rbac.yaml

部署dashboard

wget https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.5/aio/deploy/recommended.yaml

修改yaml文件以下区域配置

---

kind: Service
apiVersion: v1
metadata:
labels:
  k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
ports:
  - port: 443
    targetPort: 8443
    nodePort: 30001
type: NodePort
selector:
  k8s-app: kubernetes-dashboard
apply -f recommended.yaml
kubectl get pods,svc -n kubernetes-dashboard

NAME                                             READY   STATUS   RESTARTS   AGE
pod/dashboard-metrics-scraper-78f5d9f487-9l798   1/1     Running   0         5h19m
pod/kubernetes-dashboard-54445cdd96-gjbhf        1/1     Running   0         5h19m

NAME                               TYPE       CLUSTER-IP   EXTERNAL-IP   PORT(S)         AGE
service/dashboard-metrics-scraper   ClusterIP   10.0.0.54   <none>        8000/TCP       5h19m
service/kubernetes-dashboard       NodePort    10.0.0.172   <none>        443:30001/TCP   5h19m

创建service account并绑定默认cluster-admin管理员集群角色,获取登录token

kubectl create serviceaccount dashboard-admin -n kube-system
kubectl create clusterrolebinding dashboard-admin --clusterrole=cluster-admin --serviceaccount=kube-system:dashboard-admin
kubectl describe secrets -n kube-system $(kubectl -n kube-system get secret | awk '/dashboard-admin/{print $1}')

访问任意一个worker节点上的dashboard地址,例如:https://192.168.139.202:30001,使用token登录dashboard

部署CoreDNS

apiVersion: v1
kind: ServiceAccount
metadata:
name: coredns
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
  kubernetes.io/bootstrapping: rbac-defaults
name: system:coredns
rules:
- apiGroups:
- ""
resources:
- endpoints
- services
- pods
- namespaces
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
annotations:
  rbac.authorization.kubernetes.io/autoupdate: "true"
labels:
  kubernetes.io/bootstrapping: rbac-defaults
name: system:coredns
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:coredns
subjects:
- kind: ServiceAccount
name: coredns
namespace: kube-system
---
apiVersion: v1
kind: ConfigMap
metadata:
name: coredns
namespace: kube-system
data:
Corefile: |
  .:53 {
      errors
      health {
        lameduck 5s
      }
      ready
      kubernetes cluster.local in-addr.arpa ip6.arpa {
        fallthrough in-addr.arpa ip6.arpa
      }
      prometheus :9153
      forward . /etc/resolv.conf
      cache 30
      loop
      reload
      loadbalance
  }
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: coredns
namespace: kube-system
labels:
  k8s-app: kube-dns
  kubernetes.io/name: "CoreDNS"
spec:
 # replicas: not specified here:
 # 1. Default is 1.
 # 2. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
strategy:
  type: RollingUpdate
  rollingUpdate:
    maxUnavailable: 1
selector:
  matchLabels:
    k8s-app: kube-dns
template:
  metadata:
    labels:
      k8s-app: kube-dns
  spec:
    priorityClassName: system-cluster-critical
    serviceAccountName: coredns
    tolerations:
      - key: "CriticalAddonsOnly"
        operator: "Exists"
    nodeSelector:
      beta.kubernetes.io/os: linux
    affinity:
      podAntiAffinity:
        requiredDuringSchedulingIgnoredDuringExecution:
        - labelSelector:
            matchExpressions:
            - key: k8s-app
              operator: In
              values: ["kube-dns"]
          topologyKey: kubernetes.io/hostname
    containers:
    - name: coredns
      image: coredns/coredns:1.6.7
      imagePullPolicy: IfNotPresent
      resources:
        limits:
          memory: 170Mi
        requests:
          cpu: 100m
          memory: 70Mi
      args: [ "-conf", "/etc/coredns/Corefile" ]
      volumeMounts:
      - name: config-volume
        mountPath: /etc/coredns
        readOnly: true
      ports:
      - containerPort: 53
        name: dns
        protocol: UDP
      - containerPort: 53
        name: dns-tcp
        protocol: TCP
      - containerPort: 9153
        name: metrics
        protocol: TCP
      securityContext:
        allowPrivilegeEscalation: false
        capabilities:
          add:
          - NET_BIND_SERVICE
          drop:
          - all
        readOnlyRootFilesystem: true
      livenessProbe:
        httpGet:
          path: /health
          port: 8080
          scheme: HTTP
        initialDelaySeconds: 60
        timeoutSeconds: 5
        successThreshold: 1
        failureThreshold: 5
      readinessProbe:
        httpGet:
          path: /ready
          port: 8181
          scheme: HTTP
    dnsPolicy: Default
    volumes:
      - name: config-volume
        configMap:
          name: coredns
          items:
          - key: Corefile
            path: Corefile
---
apiVersion: v1
kind: Service
metadata:
name: kube-dns
namespace: kube-system
annotations:
  prometheus.io/port: "9153"
  prometheus.io/scrape: "true"
labels:
  k8s-app: kube-dns
  kubernetes.io/cluster-service: "true"
  kubernetes.io/name: "CoreDNS"
spec:
selector:
  k8s-app: kube-dns
clusterIP: 10.0.0.2
ports:
- name: dns
  port: 53
  protocol: UDP
- name: dns-tcp
  port: 53
  protocol: TCP
- name: metrics
  port: 9153
  protocol: TCP
kubectl apply -f coredns.yaml

kubectl get pods -n kube-system
NAME                       READY   STATUS   RESTARTS   AGE
coredns-65dbdb44db-wwjmp   1/1     Running   0         15m
kube-flannel-ds-5jljs      1/1     Running   0         22h
kube-flannel-ds-mgltf      1/1     Running   0         6h36m
kube-flannel-ds-ml9ds      1/1     Running   0         22h

DNS解析测试

kubectl run -it --rm dns-test --image=busybox:1.28 sh

If you don't see a command prompt, try pressing enter.
/ # nslookup kubernetes
Server:    10.0.0.2
Address 1: 10.0.0.2 kube-dns.kube-system.svc.cluster.local

Name:     kubernetes
Address 1: 10.0.0.1 kubernetes.default.svc.cluster.local

安装haproxy和keepaived

安装haproxy

yum install -y haproxy

配置haproxy

所有master节点配置一致

root@k8s-m1:~# cat /etc/haproxy/haproxy.cfg
global
log /dev/log local0 warning
 chroot     /var/lib/haproxy
pidfile     /var/run/haproxy.pid  
maxconn     4000
user       haproxy
group       haproxy
daemon
stats socket /var/lib/haproxy/stats
 
defaults
log global
option httplog
option dontlognull
timeout connect 5000
timeout client 50000
timeout server 50000


frontend kube-apiserver
bind *:9443
mode tcp
option tcplog
default_backend kube-apiserver

backend kube-apiserver
mode tcp
option tcp-check
balance roundrobin
default-server inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 250 maxqueue 256 weight 100
server kube-m1 192.168.139.21:6443 check
server kube-m2 192.168.139.22:6443 check

安装keepalived

yum install -y keepalived

配置keepalived

每台master节点的priority不一致(1-255)

root@k8s-m1:~# cat /etc/keepalived/keepalived.conf 
global_defs {
script_user root
enable_script_security
}

vrrp_script chk_haproxy {
script "/etc/keepalived/chk_haproxy.sh"
interval 5
fall 2
timeout 4
}

vrrp_instance VI_1 {
interface ens33
virtual_router_id 51
priority 100
nopreempt
advert_int 1
authentication {
  auth_type PASS
  auth_pass infohold
}
virtual_ipaddress {
   192.168.139.20
}
track_script {
  chk_haproxy
}
}

该配置中使用了一个检测脚本,用来检查haproxy的运行情况,进而控制vip的切换

root@k8s-m1:~# cat /etc/keepalived/chk_haproxy.sh  
#!/bin/bash
LOGFILE="/var/log/keepalived-chk_haproxy.log"
if [ `ps -C haproxy --no-header | wc -l` -eq 0 ];then
date >> $LOGFILE
 echo "Haproxy service is not running! Try to start haproxy service..." >> $LOGFILE
systemctl start haproxy.service
 sleep 2
 if [ `ps -C haproxy --no-header | wc -l` -eq 0 ];then
  date >> $LOGFILE
   echo "Starting haproxy service faild, stopping keepalived service..." >> $LOGFILE
   killall keepalived
 fi
fi
chmod +x /etc/keepalived/chk_haproxy.sh

该脚本切换策略为:每5秒检查一次haproxy存活状态,如未发现haproxy服务,则尝试启动haproxy服务;如仍无法启动服务,则杀死自身keepalived进程,以触发vip迁移

启动keepalived

systemctl enable keepalived.service
systemctl start keepalived.service

haproxy会被keepalived启动

检查负载均衡

curl -k https://192.168.139.20:9443/version
{
 "major": "1",
 "minor": "18",
 "gitVersion": "v1.18.16",
 "gitCommit": "7a98bb2b7c9112935387825f2fce1b7d40b76236",
 "gitTreeState": "clean",
 "buildDate": "2021-02-17T11:52:32Z",
 "goVersion": "go1.13.15",
 "compiler": "gc",
 "platform": "linux/amd64"

通过启停两台master节点的apiserver验证可用性

修改node节点连接apiserver的配置

sed -i 's#192.168.139.21:6443#192.168.139.20:9443#' /opt/kubernetes/cnf/*

systemctl restart kubelet
systemctl restart kube-proxy

kubectl get node
NAME     STATUS   ROLES   AGE   VERSION
k8s-m2   Ready   <none>   22h   v1.18.16
k8s-n1   Ready   <none>   22h   v1.18.16
k8s-n2   Ready   <none>   22h   v1.18.16

后续新增node节点,生成bootstrap.kubeconfig,kube-proxy.kubeconfig,kubelet.kubeconfig时,直接使用192.168.139.20:9443这个VIP作为api地址.

posted @ 2021-03-03 17:40  打闹闹酱  阅读(187)  评论(0编辑  收藏  举报