dengyouf

导航

Kubeadm 安装 Kubernetes v1.28.6(Docker)

1. Kubeadm 安装 kubernetes集群

基于 Kubeadm 部署Kubernetes集群。操作系统为 Ubuntu 20.04 LTS,用到的各相关程序版本如下:

  • kubernetes: v1.28.6
  • docker: 20.10.22
  • cri-dockerd: v0.3.8
  • cni: flannel
主机名 IP 机器配置
kubeadm-master01 192.168.1.81 4c8g
kubeadm-worker01 192.168.1.91 4c8g
kubeadm-worker02 192.168.1.92 4c8g
kubeadm-worker03 192.168.1.93 4c8g

1.2 安装环境准备

  • 主机名解析
~# cat  >> /etc/hosts <<EOF
192.168.1.81 kubeadm-master01 kubeadm-master01.linux.io
192.168.1.91 kubeadm-worker01 kubeadm-worker01.linux.io
192.168.1.92 kubeadm-worker02 kubeadm-worker02.linux.io
192.168.1.93 kubeadm-worker03 kubeadm-worker03.linux.io
# 用于扩展 ApiServer
192.168.1.81 kubeadm-vip.linux.io
EOF
  • 关闭防火墙
~# ufw  disable
  • 禁用Swap
~# systemctl  --type swap
    UNIT          LOAD   ACTIVE SUB    DESCRIPTION
    dev-vda2.swap loaded active active Swap Partition

~# systemctl  mask dev-vda2.swap

~# sed  -ri  's@/.*swap.*@# &@' /etc/fstab && swapoff -a
  • 时间同步
~# cp /usr/share/zoneinfo/Asia/Shanghai  /etc/localtime

~# apt install  -y chrony

~# cat > /etc/chrony/chrony.conf <<EOF
server ntp.aliyun.com iburst
stratumweight 0
driftfile /var/lib/chrony/drift
rtcsync
makestep 10 3
bindcmdaddress 127.0.0.1
bindcmdaddress ::1
keyfile /etc/chrony.keys
commandkey 1
generatecommandkey
logchange 0.5
logdir /var/log/chrony
EOF

~# systemctl  restart chrony
~# chronyc  sources
210 Number of sources = 1
MS Name/IP address         Stratum Poll Reach LastRx Last sample
===============================================================================
^* 203.107.6.88                  2   6    17    12  -1012us[-4551us] +/-   26ms

  • 加载ipvs模块
~# apt install ipvsadm ipset -y
~# cat > load_ipvs.sh << "EOF"
#!/bin/bash
ipvs_mods_dir="/usr/lib/modules/$(uname -r)/kernel/net/netfilter/ipvs"
for i in $(ls $ipvs_mods_dir | grep -o "^[^.]*"); do
    /sbin/modinfo -F filename $i  &> /dev/null
    if [ $? -eq 0 ]; then
        /sbin/modprobe $i
        echo $i >> /etc/modules-load.d/ipvs.conf
    fi
done
EOF
~# bash load_ipvs.sh
~# lsmod | grep -e ip_vs -e nf_conntrack_ipv4
  • 内核参数优化
cat > /etc/sysctl.d/k8s.conf <<EOF
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-arptables = 1
net.ipv4.tcp_tw_reuse = 0
net.core.somaxconn = 32768
net.netfilter.nf_conntrack_max=1000000
vm.swappiness = 0
vm.max_map_count=655360
fs.file-max=6553600
EOF

sysctl --system

cat >> /etc/modules-load.d/k8s.conf << "EOF"
overlay
br_netfilter
EOF
sudo modprobe overlay
sudo modprobe br_netfilter

1.3 安装容器运行时

  • 添加docker软件仓库
or pkg in docker.io docker-doc docker-compose podman-docker containerd runc; do sudo apt-get remove $pkg; done

# 安装必要的一些系统工具
sudo apt-get update
sudo apt-get install ca-certificates curl gnupg

# 信任 Docker 的 GPG 公钥
sudo install -m 0755 -d /etc/apt/keyrings
curl -fsSL https://mirrors.aliyun.com/docker-ce/linux/ubuntu/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg
sudo chmod a+r /etc/apt/keyrings/docker.gpg

# 写入软件源信息
echo \
  "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://mirrors.aliyun.com/docker-ce/linux/ubuntu \
  "$(. /etc/os-release && echo "$VERSION_CODENAME")" stable" | \
  sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
  • 安装docker
~# sudo apt-get update
~# sudo apt-cache madison docker-ce
~# sudo apt install -y docker-ce=5:20.10.22~3-0~ubuntu-focal
  • 配置docker加速器
# 加速器
~# mkdir -pv /etc/docker
~# sudo cat > /etc/docker/daemon.json <<-'EOF'
{
    "exec-opts": ["native.cgroupdriver=systemd"],
    "registry-mirrors": [
        "https://docker.m.daocloud.io",
        "https://registry.dockermirror.com",
        "https://docker.1panel.live"
    ]
}
EOF
~# systemctl daemon-reload && systemctl restart docker

# 代理
mkdir -p /etc/systemd/system/docker.service.d
cat > /etc/systemd/system/docker.service.d/proxy.conf << EOF
[Service]
Environment="HTTP_PROXY=http://172.16.192.1:7890/"
Environment="HTTPS_PROXY=http://172.16.192.1:7890/"
Environment="NO_PROXY=localhost,127.0.0.1,.linux.io,10.244.0.0/16"
EOF
systemctl daemon-reload && systemctl restart docker
docker pull registry.k8s.io/pause:3.6

1.4 安装 cri-dockerd

  • 安装 cri-dockerd
wget https://github.com/Mirantis/cri-dockerd/releases/download/v0.3.8/cri-dockerd-0.3.8.amd64.tgz
tar -xzvf cri-dockerd-0.3.8.amd64.tgz
sudo install -m 0755 -o root -g root -t /usr/local/bin cri-dockerd/cri-dockerd

wget https://raw.githubusercontent.com/Mirantis/cri-dockerd/master/packaging/systemd/cri-docker.service
wget https://raw.githubusercontent.com/Mirantis/cri-dockerd/master/packaging/systemd/cri-docker.socket

sudo install cri-docker.service /etc/systemd/system
sudo install cri-docker.socket /etc/systemd/system
sudo sed -i -e 's@/usr/bin/cri-dockerd@/usr/local/bin/cri-dockerd@' /etc/systemd/system/cri-docker.service

sudo systemctl daemon-reload
sudo systemctl enable --now cri-docker.socket
sudo systemctl start cri-docker.service && systemctl status cri-docker.service
  • 配置cri-docker中初始化容器
~# cp /etc/systemd/system/cri-docker.service{,.bak}
~# vim  /etc/systemd/system/cri-docker.service
[Unit]
...
[Service]
Type=notify
#ExecStart=/usr/local/bin/cri-dockerd --container-runtime-endpoint fd://
ExecStart=/usr/local/bin/cri-dockerd --container-runtime-endpoint fd:// --network-plugin=cni --cni-bin-dir=/opt/cni/bin --cni-cache-dir=/var/lib/cni/cache --cni-conf-dir=/etc/cni/net.d --pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.9

~# systemctl  daemon-reload && systemctl  restart  cri-docker.service && systemctl  status cri-docker.service

2. 安装 Kubernetes

2.1 安装 kubeadm、kubelet 和 kubectl

  • 配置kubernetes源
~# apt-get update && apt-get install -y apt-transport-https
~# sudo mkdir -m 755 /etc/apt/keyrings
~# curl -fsSL https://mirrors.aliyun.com/kubernetes-new/core/stable/v1.28/deb/Release.key |
    gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg
~# echo "deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://mirrors.aliyun.com/kubernetes-new/core/stable/v1.28/deb/ /" |
    tee /etc/apt/sources.list.d/kubernetes.list
~# apt-get update
~# apt-cache madison kubeadm

~# apt install -y kubeadm=1.28.6-1.1 kubelet=1.28.6-1.1 kubectl=1.28.6-1.1
  • 配置Kubelet

配置 kubelet,为其指定cri-dockerd在本地打开的Unix Sock文件的路径。该配置也可不进行,而是直接在后面的各kubeadm命令上使用 --cri-socket unix:///run/cri-dockerd.sock 选项。

~# echo "KUBELET_KUBEADM_ARGS="--container-runtime=remote --container-runtime-endpoint=/run/cri-dockerd.sock"" > /etc/sysconfig/kubelet

2.2 初始化集群

  • 拉取镜像
~# kubeadm config images pull --image-repository=registry.cn-hangzhou.aliyuncs.com/google_containers \
--kubernetes-version=1.28.6 \
--cri-socket=unix:///var/run/cri-dockerd.sock

[config/images] Pulled registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver:v1.28.6
[config/images] Pulled registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager:v1.28.6
[config/images] Pulled registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler:v1.28.6
[config/images] Pulled registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy:v1.28.6
[config/images] Pulled registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.9
[config/images] Pulled registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:3.5.10-0
[config/images] Pulled registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:v1.10.1
  • 初始化 master
# 命令行初始化
~# kubeadm init --kubernetes-version=v1.28.6 \
    --control-plane-endpoint=kubeadm-vip.linux.io \
    --apiserver-advertise-address=0.0.0.0 \
    --pod-network-cidr=10.244.0.0/16   \
    --service-cidr=10.96.0.0/12 \
    --image-repository=registry.cn-hangzhou.aliyuncs.com/google_containers \
    --token-ttl=0 \
    --upload-certs \
    --ignore-preflight-errors=Swap \
    --cri-socket=unix:///var/run/cri-dockerd.sock | tee kubeadm-init.log

Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

Alternatively, if you are the root user, you can run:

  export KUBECONFIG=/etc/kubernetes/admin.conf

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

You can now join any number of the control-plane node running the following command on each as root:

  kubeadm join kubeadm-vip.linux.io:6443 --token nsph1t.sqkby8cz31lhqwi7 \
        --discovery-token-ca-cert-hash sha256:2b759ab416241f1a0b6117e9afb65af52cc7229211d18e7c30f92ec2b7000c05 \
        --control-plane --certificate-key ea08efc443d1895942c286432f4528cb163ee896606fa1d9ecfa8c59adcfc7ec

Please note that the certificate-key gives access to cluster sensitive data, keep it secret!
As a safeguard, uploaded-certs will be deleted in two hours; If necessary, you can use
"kubeadm init phase upload-certs --upload-certs" to reload certs afterward.

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join kubeadm-vip.linux.io:6443 --token nsph1t.sqkby8cz31lhqwi7 \
        --discovery-token-ca-cert-hash sha256:2b759ab416241f1a0b6117e9afb65af52cc7229211d18e7c30f92ec2b7000c05
  • 配置kubectl
~# mkdir -p $HOME/.kube
~# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
~# sudo chown $(id -u):$(id -g) $HOME/.kube/config
~# kubectl  get nodes
  • 加入 Worker
~# kubeadm join kubeadm-vip.linux.io:6443 --token nsph1t.sqkby8cz31lhqwi7 \
        --discovery-token-ca-cert-hash sha256:2b759ab416241f1a0b6117e9afb65af52cc7229211d18e7c30f92ec2b7000c05 \
        --cri-socket=unix:///var/run/cri-dockerd.sock

~# kubectl  get nodes -o wide
NAME               STATUS     ROLES           AGE     VERSION   INTERNAL-IP    EXTERNAL-IP   OS-IMAGE             KERNEL-VERSION      CONTAINER-RUNTIME
kubeadm-master01   NotReady   control-plane   2m33s   v1.28.6   192.168.1.81   <none>        Ubuntu 20.04.6 LTS   5.4.0-204-generic   docker://20.10.22
kubeadm-worker01   NotReady   <none>          27s     v1.28.6   192.168.1.91   <none>        Ubuntu 20.04.6 LTS   5.4.0-204-generic   docker://20.10.22
kubeadm-worker02   NotReady   <none>          23s     v1.28.6   192.168.1.92   <none>        Ubuntu 20.04.6 LTS   5.4.0-204-generic   docker://20.10.22
kubeadm-worker03   NotReady   <none>          20s     v1.28.6   192.168.1.93   <none>        Ubuntu 20.04.6 LTS   5.4.0-204-generic   docker://20.10.22

3. 安装网络插件

  • 安装Flannel
~# wget https://github.com/flannel-io/flannel/releases/latest/download/kube-flannel.yml
~# grep -C 5 244 kube-flannel.yml
        }
      ]
    }
  net-conf.json: |
    {
      "Network": "10.244.0.0/16",
      "EnableNFTables": false,
      "Backend": {
        "Type": "vxlan"
      }
    }
~# grep image kube-flannel.yml
        image: docker.io/flannel/flannel:v0.26.1
        image: docker.io/flannel/flannel-cni-plugin:v1.5.1-flannel2
        image: docker.io/flannel/flannel:v0.26.1

~# kubectl  apply -f kube-flannel.yml

~# kubectl  get nodes
NAME               STATUS   ROLES           AGE     VERSION
kubeadm-master01   Ready    control-plane   8m30s   v1.28.6
kubeadm-worker01   Ready    <none>          6m24s   v1.28.6
kubeadm-worker02   Ready    <none>          6m20s   v1.28.6
kubeadm-worker03   Ready    <none>          6m17s   v1.28.6
  • 调整为ipvs模式
~# kubectl  edit cm kube-proxy -n kube-system
    mode: "ipvs" 

~# kubectl  delete pod -n kube-system -l k8s-app=kube-proxy

4. 验证集群

4.1 验证网络

  • 创建 Deployment 和 Service 资源
~#  kubectl  create deployment myapp --image=ikubernetes/myapp:v1 --replicas=3
deployment.apps/myapp created
~# kubectl  expose deployment/myapp --type=NodePort --port=80 --target-port=80
service/myapp exposed
~# kubectl  get svc/myapp
NAME    TYPE       CLUSTER-IP      EXTERNAL-IP   PORT(S)        AGE
myapp   NodePort   10.111.43.175   <none>        80:30095/TCP   34s

~# kubectl  get pod -o wide
NAME                     READY   STATUS    RESTARTS   AGE   IP           NODE               NOMINATED NODE   READINESS GATES
myapp-5d9c4b4647-bb6fx   1/1     Running   0          49s   10.244.3.4   kubeadm-worker03   <none>           <none>
myapp-5d9c4b4647-k5brt   1/1     Running   0          49s   10.244.2.2   kubeadm-worker02   <none>           <none>
myapp-5d9c4b4647-wb5gm   1/1     Running   0          49s   10.244.1.2   kubeadm-worker01   <none>           <none>

~# ipvsadm -Ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
  -> RemoteAddress:Port           Forward Weight ActiveConn InActConn
    ...
TCP  10.111.43.175:80 rr
  -> 10.244.1.2:80                Masq    1      0          0
  -> 10.244.2.2:80                Masq    1      0          0
  -> 10.244.3.4:80                Masq    1      0          0
TCP  10.244.0.0:30095 rr
  -> 10.244.1.2:80                Masq    1      0          0
  -> 10.244.2.2:80                Masq    1      0          0
  -> 10.244.3.4:80                Masq    1      0          0
    ...

~# for i in `seq 5`;do curl 10.111.43.175/hostname.html;done
myapp-5d9c4b4647-wb5gm
myapp-5d9c4b4647-bb6fx
myapp-5d9c4b4647-k5brt
myapp-5d9c4b4647-wb5gm
myapp-5d9c4b4647-bb6fx  

注意:Service IPNodePort 仅是网络规则,所以不支持ping, 但是支持telnet

4.1 验证网络

service 的默认域名为 SVC_NAME.NAMESPACE.svc.cluster.local

~# kubectl  get svc -n kube-system -o wide
NAME       TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)                  AGE   SELECTOR
kube-dns   ClusterIP   10.96.0.10   <none>        53/UDP,53/TCP,9153/TCP   29m   k8s-app=kube-dns


~# kubectl  run busybox --image=busybox:1.28 -- sleep 3600
~# kubectl  get pod/busybox
NAME      READY   STATUS    RESTARTS   AGE
busybox   1/1     Running   0          16s

~# kubectl  exec -it busybox -- nslookup myapp.default.svc.cluster.local
Server:    10.96.0.10
Address 1: 10.96.0.10 kube-dns.kube-system.svc.cluster.local

Name:      myapp.default.svc.cluster.local
Address 1: 10.106.198.16 myapp.default.svc.cluster.local

posted on 2025-02-19 10:24  dengyouf  阅读(55)  评论(0)    收藏  举报