Centos7.5+k8s 1.25.0 部署手册

Centos7.5+k8s 1.25.0 部署手册

一、系统准备

注:以下操作为所有节点均执行

升级内核!!!

# 导入内核源
 rpm --import https://www.elrepo.org/RPM-GPG-KEY-elrepo.org
 rpm -ivh http://www.elrepo.org/elrepo-release-7.0-3.el7.elrepo.noarch.rpm

 # 优先安装稳定版本内核
 yum --enablerepo=elrepo-kernel install -y kernel-ml

 # 查看已安装内核顺序
 awk -F\' '$1=="menuentry " {print $2}' /etc/grub2.cfg

 # 根据上面的结果选择启动顺序
 grub2-set-default 0

 # 列出可用内核版本
 yum --disablerepo="*" --enablerepo="elrepo-kernel" list available
# 设置完成 执行重启
reboot

1. 环境信息(采用2个master节点+3个node节点),操作系统均为Centos7.5

主机名 IP
master01 192.168.17.131
master02 192.168.17.132
node01 192.168.17.133
node02 192.168.17.134
node03 192.168.17.135
mastervip 192.168.17.130

2. /etc/hosts文件修改

echo "192.168.17.151   master1  
192.168.17.152   master2
192.168.17.153   node1  
192.168.17.154   node2  
192.168.17.155   node3  
192.168.17.130   mastervip " >> /etc/hosts

如下所示:

[root@localhost ~]# echo "192.168.17.131   master01  
> 192.168.17.132   master02  
> 192.168.17.133   node01  
> 192.168.17.134   node02  
> 192.168.17.135   node03  
> 192.168.17.130   masyervip  " >> /etc/hosts
[root@localhost ~]# cat /etc/hosts
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.17.131   master01  
192.168.17.132   master02  
192.168.17.133   node01  
192.168.17.134   node02  
192.168.17.135   node03  
192.168.17.130   mastervip  
[root@localhost ~]# 

3. 关闭swap

swapoff -a

sed -i 's/.*swap.*/#&/' /etc/fstab

4. 关闭防火墙

systemctl stop firewalld

systemctl disable firewalld

5. 关闭selinux

setenforce 0

sed -i "s/^SELINUX=enforcing/SELINUX=disabled/g" /etc/sysconfig/selinux

sed -i "s/^SELINUX=enforcing/SELINUX=disabled/g" /etc/selinux/config

sed -i "s/^SELINUX=permissive/SELINUX=disabled/g" /etc/sysconfig/selinux

sed -i "s/^SELINUX=permissive/SELINUX=disabled/g" /etc/selinux/config

6.更改yum源为阿里云

wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo

yum makecache

7.配置所需依赖

yum install -y  bash-completion jq psmisc net-tools telnet yum-utils device-mapper-persistent-data lvm2 git  keepalived haproxy 

8.配置免密登录(master上执行,可省)

ssh-keygen

ssh-copy-id master02
yes
ssh-copy-id node01
yes
ssh-copy-id node02
yes
ssh-copy-id node03
yes

9.配置主机名

[root@localhost 
[root@localhost ~]# more /etc/hostname
master01
[root@localhost ~]# bash    #bash 使主机名生效
[root@master01 ~]#

10.配置时间同步

yum install -y chrony
更改 /etc/chrony.conf 配置文件

将pool 2.pool.ntp.org iburst

改为

server ntp1.aliyun.com iburst
server ntp2.aliyun.com iburst
server ntp1.tencent.com iburst
server ntp2.tencent.com iburst

systemctl enable --now chronyd
chronyc sources
date

11.修改内核参数

modprobe br_netfilter
lsmod | grep br_netfilter

vim /etc/security/limits.conf
* soft nofile 655360
* hard nofile 131072
* soft nproc 655350
* hard nproc 655350
* soft memlock unlimited
* hard memlock unlimited

vim /etc/modules-load.d/ipvs.conf
ip_vs
ip_vs_lc
ip_vs_wlc
ip_vs_rr
ip_vs_wrr
ip_vs_lblc
ip_vs_lblcr
ip_vs_dh
ip_vs_sh
ip_vs_fo
ip_vs_nq
ip_vs_sed
ip_vs_ftp
ip_vs_sh
nf_conntrack
ip_tables
ip_set
xt_set
ipt_set
ipt_rpfilter
ipt_REJECT
ipip

cat <<EOF > /etc/sysctl.d/k8s.conf
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
fs.may_detach_mounts = 1
vm.overcommit_memory=1
vm.panic_on_oom=0
fs.inotify.max_user_watches=89100
fs.file-max=52706963
fs.nr_open=52706963
net.netfilter.nf_conntrack_max=2310720
 
net.ipv4.tcp_keepalive_time = 600
net.ipv4.tcp_keepalive_probes = 3
net.ipv4.tcp_keepalive_intvl =15
net.ipv4.tcp_max_tw_buckets = 36000
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_max_orphans = 327680
net.ipv4.tcp_orphan_retries = 3
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.ip_conntrack_max = 65536
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.tcp_timestamps = 0
net.core.somaxconn = 16384
EOF


sysctl -p /etc/sysctl.d/k8s.conf

12.安装依整包及配置docker源、k8s源

wget https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo -O /etc/yum.repos.d/docker-ce.repo 

cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

yum makecache

二、安装高可用(此操作在两个master节点操作)

1. haproxy

yum install -y keepalived haproxy
vim  /etc/haproxy/haproxy.cfg
#注意需要根据ip 规划修改自己的ip地址等内容.
# 添加内容为:                   
global
  maxconn  2000
  ulimit-n  16384
  log  127.0.0.1 local0 err
  stats timeout 30s
 
defaults
  log global
  mode  http
  option  httplog
  timeout connect 5000
  timeout client  50000
  timeout server  50000
  timeout http-request 15s
  timeout http-keep-alive 15s
 
frontend monitor-in
  bind *:33305
  mode http
  option httplog
  monitor-uri /monitor
 
frontend mastervip
  bind 0.0.0.0:16443
  bind 127.0.0.1:16443
  mode tcp
  option tcplog
  tcp-request inspect-delay 5s
  default_backend mastervip
 
backend mastervip
  mode tcp
  option tcplog
  option tcp-check
  balance roundrobin
  default-server inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 250 maxqueue 256 weight 100
  server master01   192.168.17.131:6443  check				 
  server master02   192.168.17.132:6443  check				 

2. KeepAlived

master01:

yum install -y keepalived

vim /etc/keepalived/keepalived.conf


! Configuration File for keepalived
global_defs {
    router_id LVS_DEVEL
script_user root
    enable_script_security
}
vrrp_script chk_apiserver {
    script "/etc/keepalived/check_apiserver.sh"
    interval 5
    weight -5
    fall 2 
rise 1
}
vrrp_instance VI_1 {
    state MASTER
    interface ens33 # 这里要修改成ifconfig 查出来本地局域网ip地址对应的网卡信息 
    mcast_src_ip 192.168.17.131 #这里需要修改为具体的master机器地址
    virtual_router_id 51
    priority 101
    advert_int 2
    authentication {
        auth_type PASS
        auth_pass K8SHA_KA_AUTH
    }
    virtual_ipaddress {
        192.168.17.130 #这里需要修改成 vip 的地址
    }
    track_script {
       chk_apiserver
    }
}

master02:

yum install -y keepalived
vim /etc/keepalived/keepalived.conf

! Configuration File for keepalived
global_defs {
    router_id LVS_DEVEL
script_user root
    enable_script_security
}
vrrp_script chk_apiserver {
    script "/etc/keepalived/check_apiserver.sh"
    interval 5
    weight -5
    fall 2 
rise 1
}
vrrp_instance VI_1 {
    state BACKUP
    interface ens33 # 这里要修改成ifconfig 查出来本地局域网ip地址对应的网卡信息 
    mcast_src_ip 192.168.17.132 #这里需要修改为具体的master机器地址
    virtual_router_id 51
    priority 88
    advert_int 2
    authentication {
        auth_type PASS
        auth_pass K8SHA_KA_AUTH
    }
    virtual_ipaddress {
        192.168.17.130 #这里需要修改成 vip 的地址
    }
    track_script {
       chk_apiserver
    }
}

注意 最后的 track 脚本需要手动添加 主要内容如下:
注意 需要添加可执行权限: chmod +x /etc/keepalived/check_apiserver.sh

vim /etc/keepalived/check_apiserver.sh
#!/bin/bash
 
err=0
for k in $(seq 1 3)
do
    check_code=$(pgrep haproxy)
    if [[ $check_code == "" ]]; then
        err=$(expr $err + 1)
        sleep 1
        continue
    else
        err=0
        break
    fi
done
 
if [[ $err != "0" ]]; then
    echo "systemctl stop keepalived"
    /usr/bin/systemctl stop keepalived
    exit 1
else
    exit 0
fi

3. 设置高可用服务自动启动

systemctl enable keepalived && systemctl enable haproxy
systemctl start keepalived && systemctl start haproxy



三、安装使用工具

1. 安装containerd

在containerd前,需要优先升级libseccomp。

在CentOS 7中yum下载libseccomp的版本是2.3的,版本不满足最新 Containerd 的需求,需要下载2.4以上的。

wget https://rpmfind.net/linux/centos/8-stream/BaseOS/x86_64/os/Packages/libseccomp-2.5.1-1.el8.x86_64.rpm --no-check-certificate

卸载原来的

[root@master01 ~]# rpm -qa | grep libseccomp
libseccomp-2.3.1-3.el7.x86_64
[root@master01 ~]# rpm -e libseccomp-2.3.1-3.el7.x86_64 --nodeps
[root@master01 ~]# 


安装并查看
[root@master01 ~]# rpm -ivh libseccomp-2.5.1-1.el8.x86_64.rpm 
warning: libseccomp-2.5.1-1.el8.x86_64.rpm: Header V3 RSA/SHA256 Signature, key ID 8483c65d: NOKEY
Preparing...                          ################################# [100%]
Updating / installing...
   1:libseccomp-2.5.1-1.el8           ################################# [100%]
[root@master01 ~]# rpm -qa | grep libseccomp
libseccomp-2.5.1-1.el8.x86_64
[root@master01 ~]# 
wget https://github.com/containerd/containerd/releases/download/v1.6.5/cri-containerd-cni-1.6.5-linux-amd64.tar.gz --no-check-certificate

tar -zxvf cri-containerd-cni-1.6.5-linux-amd64.tar.gz -C /

yum install -y docker-ce docker-ce-cli containerd.io docker-compose-plugin

containerd  config default > /etc/containerd/config.toml
vim /etc/containerd/config.toml
# 在此行下面加两行
[plugins."io.containerd.grpc.v1.cri".registry.mirrors]
	[plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"]
		endpoint = ["https://registry.aliyuncs.com"]

systemctl daemon-reload
systemctl status containerd
systemctl enable --now containerd
systemctl is-active containerd

2. 安装kubelet kubeadm kubectl

在master设备上执行

yum -y install  kubeadm  kubelet kubectl

systemctl enable kubelet

在node上执行

yum install -y kubelet kubeadm
systemctl enable kubelet

四、初始化k8s集群(master节点上执行)

1.master节点


vim kubeadm-config.yaml

apiVersion: kubeadm.k8s.io/v1beta3
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
  token: abcdef.0123456789abcdef
  ttl: 24h0m0s
  usages:
  - signing
  - authentication
kind: InitConfiguration
localAPIEndpoint:
  advertiseAddress: 192.168.17.151
  bindPort: 6443
nodeRegistration:
  criSocket: unix:///var/run/containerd/containerd.sock
  imagePullPolicy: IfNotPresent
  name: master01
  taints: null
---
apiServer:
  timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta3
certificatesDir: /etc/kubernetes/pki
clusterName: mycluster
controllerManager: {}
dns: 
  type: CoreDNS
etcd:
  local:
    dataDir: /var/lib/etcd
imageRepository: registry.aliyuncs.com/google_containers
kind: ClusterConfiguration
kubernetesVersion: 1.20.0
networking:
  dnsDomain: cluster.local
  podSubnet: 172.168.0.0/12
  serviceSubnet: 10.96.0.0/12
scheduler: {}
---
kind: KubeletConfiguration
apiVersion: kubelet.config.k8s.io/v1beta1
cgroupDriver: systemd

2.执行镜像拉取的命令为:

kubeadm config images pull --config /root/kubeadm-config.yaml

kubeadm init --config /root/kubeadm-config.yaml --upload-certs

  • 注意1: master 节点加入和node节点加入的命令是不一样的:
  • 注意2: 必须进行 修改当前用户下面的 .kube/config 文件才可以进行相关的kubectl 操作.
Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

Alternatively, if you are the root user, you can run:

  export KUBECONFIG=/etc/kubernetes/admin.conf

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

You can now join any number of the control-plane node running the following command on each as root:

  kubeadm join 192.168.17.130:16443 --token abcdef.0123456789abcdef \
	--discovery-token-ca-cert-hash sha256:f3a99a726e75912b69168e2f524c58255bb7267d9b5a45c6414820dd63dcade8 \
	--control-plane --certificate-key 4ba268bc79b0b16dd97efd97a04546e11d7be3de75c4333267296f48c318e2e8

Please note that the certificate-key gives access to cluster sensitive data, keep it secret!
As a safeguard, uploaded-certs will be deleted in two hours; If necessary, you can use
"kubeadm init phase upload-certs --upload-certs" to reload certs afterward.

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 192.168.17.130:16443 --token abcdef.0123456789abcdef \
	--discovery-token-ca-cert-hash sha256:f3a99a726e75912b69168e2f524c58255bb7267d9b5a45c6414820dd63dcade8 
	mkdir -p $HOME/.kube
	sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
	sudo chown $(id -u):$(id -g) $HOME/.kube/config
	
	export KUBECONFIG=/etc/kubernetes/admin.conf

至此,master01的初始化就以及完成了,接下来进行添加master02节点

3.添加master02节点

  • 还记得master01初始化完成时的提示么?
You can now join any number of the control-plane node running the following command on each as root:

  kubeadm join 192.168.17.130:16443 --token abcdef.0123456789abcdef \
	--discovery-token-ca-cert-hash sha256:f3a99a726e75912b69168e2f524c58255bb7267d9b5a45c6414820dd63dcade8 \
	--control-plane --certificate-key 4ba268bc79b0b16dd97efd97a04546e11d7be3de75c4333267296f48c318e2e8
  • 在master02执行它
kubeadm join 192.168.17.130:16443 --token abcdef.0123456789abcdef \
	--discovery-token-ca-cert-hash sha256:f3a99a726e75912b69168e2f524c58255bb7267d9b5a45c6414820dd63dcade8 \
	--control-plane --certificate-key 4ba268bc79b0b16dd97efd97a04546e11d7be3de75c4333267296f48c318e2e8
  • 成功后如下所示,创建即可:
  This node has joined the cluster and a new control plane instance was created:
  
  * Certificate signing request was sent to apiserver and approval was received.
  * The Kubelet was informed of the new secure connection details.
  * Control plane label and taint were applied to the new node.
  * The Kubernetes control plane instances scaled up.
  * A new etcd member was added to the local/stacked etcd cluster.
  
  To start administering your cluster from this node, you need to run the following as a regular user:
  
  	mkdir -p $HOME/.kube
  	sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  	sudo chown $(id -u):$(id -g) $HOME/.kube/config
  
  Run 'kubectl get nodes' to see this node join the cluster.

4.添加node节点

  • 依次在node01-03中执行
kubeadm join 192.168.17.130:16443 --token abcdef.0123456789abcdef \
	--discovery-token-ca-cert-hash sha256:f3a99a726e75912b69168e2f524c58255bb7267d9b5a45c6414820dd63dcade8 
  • 添加成功如下所示
This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.

Run 'kubectl get nodes' on the control-plane to see this node join the cluster.

  • 检查是否成功
[root@master01 ~]# kubectl get nodes 
NAME       STATUS   ROLES           AGE     VERSION
master01   Ready    control-plane   14m     v1.25.3
master02   Ready    control-plane   9m30s   v1.25.3
node01     Ready    <none>          104s    v1.25.3
node02     Ready    <none>          102s    v1.25.3
node03     Ready    <none>          97s     v1.25.3

五、配置网络插件 calico

1. 安装插件

wget https://docs.projectcalico.org/manifests/calico.yaml --no-check-certificate

kubectl apply -f calico.yaml
  • 查看pod启动情况
[root@master01 ~]# kubectl get po --all-namespaces
NAMESPACE     NAME                                      READY   STATUS    RESTARTS   AGE
kube-system   calico-kube-controllers-f79f7749d-hwpxw   1/1     Running   0          2m36s
kube-system   calico-node-4stcc                         1/1     Running   0          2m36s
kube-system   calico-node-4wz8b                         1/1     Running   0          2m36s
kube-system   calico-node-6hsbp                         1/1     Running   0          2m36s
kube-system   calico-node-gjwcn                         1/1     Running   0          2m36s
kube-system   calico-node-qvz77                         1/1     Running   0          2m36s
kube-system   coredns-c676cc86f-mx6xk                   1/1     Running   0          19m
kube-system   coredns-c676cc86f-pf7c5                   1/1     Running   0          19m
kube-system   etcd-master01                             1/1     Running   1          19m
kube-system   etcd-master02                             1/1     Running   0          14m
kube-system   kube-apiserver-master01                   1/1     Running   1          19m
kube-system   kube-apiserver-master02                   1/1     Running   0          14m
kube-system   kube-controller-manager-master01          1/1     Running   1          19m
kube-system   kube-controller-manager-master02          1/1     Running   0          13m
kube-system   kube-proxy-25k92                          1/1     Running   0          7m11s
kube-system   kube-proxy-cjn88                          1/1     Running   0          15m
kube-system   kube-proxy-ckjjp                          1/1     Running   0          7m18s
kube-system   kube-proxy-jxgv4                          1/1     Running   0          7m16s
kube-system   kube-proxy-npq68                          1/1     Running   0          19m
kube-system   kube-scheduler-master01                   1/1     Running   3          19m
kube-system   kube-scheduler-master02                   1/1     Running   0          14m
[root@master01 ~]# 

至此,k8s-1.25.3已经全部部署完成。

三、部署Kuboard集群管理系统

1.执行 Kuboard v3 在 K8S 中的安装

kubectl apply -f https://addons.kuboard.cn/kuboard/kuboard-v3.yaml
# 也可以使用下面的指令,唯一的区别是,该指令使用华为云的镜像仓库替代 docker hub 分发 Kuboard 所需要的镜像
# kubectl apply -f https://addons.kuboard.cn/kuboard/kuboard-v3-swr.yaml

2.等待 Kuboard v3 就绪

[root@master01 ~]# kubectl get pods -n kuboard
NAME                               READY   STATUS    RESTARTS   AGE
kuboard-agent-2-65bc84c86c-r7tc4   1/1     Running   2          28s
kuboard-agent-78d594567-cgfp4      1/1     Running   2          28s
kuboard-etcd-fh9rp                 1/1     Running   0          67s
kuboard-etcd-nrtkr                 1/1     Running   0          67s
kuboard-etcd-ader3                 1/1     Running   0          67s
kuboard-v3-645bdffbf6-sbdxb        1/1     Running   0          67s

3.访问kuboard

用户名: admin
密码: Kuboard123

posted @ 2024-03-27 21:40  FuSepe  阅读(242)  评论(0)    收藏  举报