部署K8S-1-26

DEVops 入门

1 部署K8S

1.1 节点准备

节点名 ip 功能
k8s-master 10.0.0.153
k8s-node1 10.0.0.154
k8s-node2 10.0.0.155

1.2 初始操作

在所有节点执行

#1 关闭防火墙
systemctl disable firewalld
systemctl stop firewalld
firewall-cmd --state
#2 关闭selinux
sed -i 's/enforcing/disabled/' /etc/selinux/config  #永久
setenforce 0 #临时
# 关闭swap
swapoff -a
sed -ri 's/.*swap.*/#&/' /etc/fstab #永久
#init 6
# 设置主机名
#hostnamectl set-hostname k8S-xxxx
# 设置hosts
cat >> /etc/hosts << EOF
10.0.0.153 k8S-master
10.0.0.154 k8S-node1
10.0.0.155 k8S-node2
EOF
# 将桥接的IPv4流量传递到iptavles的链
cat > /etc/sysctl.d/k8s.conf << EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
vm.swappiness = 0
EOF
sysctl --system
# 加载br_netfiler模块
modprobe br_netfilter
# 查看是否加载
lsmod|grep br_netfilter
# 安装ipset和ipvsadm
yum install  -y ipset ipvsadm 

# 配置ipvsadm模块加载方式
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack
EOF

chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod |grep -e ip_vs -e nf_conntrack

# 时间同步
yum install -y chrony 
编辑`/etc/chrony.conf`文件,确保配置了时间服务器
添加或修改以下行:
`server ntp1.aliyun.com iburst`
`server ntp2.aliyun.com iburst`
`server ntp3.aliyun.com iburst`
使用`systemctl start chronyd.service`命令启动chrony服务
使用`systemctl enable chronyd.service`命令设置chrony服务开机自启
使用`chronyc sources -v`命令检查时间同步状态
使用`date -R`命令查看当前系统时间,并与时间服务器进行对比

升级操作系统 内核

# 查看内核版本
[root@K8S-master ~]# uname -r
4.18.0-408.el8.x86_64
# 导入ELRepo仓库的公共密钥
rpm --import https://www.elrepo.org/RPM-GPG-KEY-elrepo.org
# 安装ELRepo仓库的yum源
yum install https://www.elrepo.org/elrepo-release-8.el8.elrepo.noarch.rpm
# 安装最新版内核 
启用 ELRepo 源仓库
yum --disablerepo="*" --enablerepo="elrepo-kernel" list available
yum list kernel --showduplicates | sort -V

# 报错解决办法
sed -i 's/mirrorlist/#mirrorlist/g' /etc/yum.repos.d/CentOS-*
sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos

# 安装  在 ELRepo 中有两个内核选项,一个是 kernel-lt(长期支持版本),一个是 kernel-ml(主线最新版本),采用长期支持版本(kernel-lt)更稳定。
yum --enablerepo=elrepo-kernel install kernel-lt

# 设置以新的内核启动
查看当前默认启动内核
grubby --default-kernel

0 表示最新安装的内核,设置为 0 表示以新版本内核启动:
grub2-set-default 0
或指定内核启动
grubby --set-default /boot/vmlinuz-5.19.2-1.el8.elrepo.x86_64

# 重启验证新内核
uname -r 

1.3 安装基础软件

在所有节点操作

1.3.1安装 Docker

# 设置docker rpm仓库
sudo yum install -y yum-utils
sudo yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
or
wget https://mirrors.aliyum.com/docker-ce/linux/centos/docker-ce-repo -O /etc/yum.repos.d/docker
# 安装docker engine
yum list docker-ce --showduplicates | sort -r
sudo yum install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin


# 启动docker
sudo systemctl start docker
sudo systemctl enable docker
or
systemctl enable --now docker
# 测试docker
sudo docker run hello-world
#删除docker
sudo yum remove docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin docker-ce-rootless-extras
sudo rm -rf /var/lib/docker
sudo rm -rf /var/lib/containerd
# 配置cgroup
docker info|grep Driver
vi /etc/docker/daemon.json
添加 {"exec-opts": ["native.cgroupdriver=systemd"]}
# 重启docker
systemctl daemon-reload
systemctl restart docker
systemctl enable docker

1.3.2 安装go和cri-dockerd

yum install git make go -y

# cir-docker v 0.3.10
git clone https://github.com/Mirantis/cri-dockerd.git

cd cri-dockerd
make cri-dockerd

# Run these commands as root

cd cri-dockerd
mkdir -p /usr/local/bin
install -o root -g root -m 0755 cri-dockerd /usr/local/bin/cri-dockerd
install packaging/systemd/* /etc/systemd/system
sed -i -e 's,/usr/bin/cri-dockerd,/usr/local/bin/cri-dockerd,' /etc/systemd/system/cri-docker.service
systemctl daemon-reload
systemctl restart cri-docker.socket
systemctl enable --now cri-docker.socket




vim /etc/systemd/system/cri-docker.service
第十行添加
ExecStart=/usr/local/bin/cri-dockerd --pod-infra-container-image=registry.aliyuncs.com/google_containers/pause:3.9 --container-runtime-endpoint fd://
 # 安装go
 rm -rf /usr/local/go && tar -C /usr/local -xzf go1.22.0.linux-amd64.tar.gz
 export PATH=$PATH:/usr/local/go/bin
 go version
#cri-docker 0.3.1
# https://github.com/Mirantis/cri-dockerd/tree/v0.3.1 github地址
git clone https://github.com/Mirantis/cri-dockerd.git
# Run these commands as root
###Install GO 可选###
wget https://storage.googleapis.com/golang/getgo/installer_linux
chmod +x ./installer_linux
./installer_linux
source ~/.bash_profile

cd cri-dockerd
mkdir bin
go build -o bin/cri-dockerd
mkdir -p /usr/local/bin
install -o root -g root -m 0755 bin/cri-dockerd /usr/local/bin/cri-dockerd
cp -a packaging/systemd/* /etc/systemd/system
sed -i -e 's,/usr/bin/cri-dockerd,/usr/local/bin/cri-dockerd,' /etc/systemd/system/cri-docker.service
systemctl daemon-reload
systemctl enable cri-docker.service
systemctl enable --now cri-docker.socket

1.3.3 添加阿里云yum源

# 添加阿里云yum源
cat > /etc/yum.repos.d/kubernetes.repo <<EOF
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

yum list kubelet --showduplicates | sort -V
yum list kubeadm --showduplicates | sort -V
yum list kubectl --showduplicates | sort -V

1.3.4 安装kubadm,kubelet,kubectl

# 安装kubadm,kubelet,kubectl
yum install -y kubelet-1.26.2-0 kubeadm-1.26.2-0 kubectl-1.26.2-0

vi /etc/sysconfig/kubelet
KUBELET_EXTRA_ARGS="--cgroup-driver=systemd"

systemctl enable kubelet

# 查看镜像配置
[root@K8S-master cri-dockerd]# kubeadm config images list --kubernetes-version=v1.26.2
registry.k8s.io/kube-apiserver:v1.26.2
registry.k8s.io/kube-controller-manager:v1.26.2
registry.k8s.io/kube-scheduler:v1.26.2
registry.k8s.io/kube-proxy:v1.26.2
registry.k8s.io/pause:3.9
registry.k8s.io/etcd:3.5.6-0
registry.k8s.io/coredns/coredns:v1.9.3

kubeadm config images pull --kubernetes-version v1.26.2 --cri-socket unix:///var/run/cri-dockerd.sock --image-repository registry.aliyuncs.com/google_containers

1.4 部署Kubernetes Master

在Master节点执行

# 初始化kubelet
kubeadm init \
	--apiserver-advertise-address=10.0.0.153 \
	--image-repository registry.aliyuncs.com/google_containers \
	--kubernetes-version v1.26.2 \
	--service-cidr=10.96.0.0/12 \
	--pod-network-cidr=10.244.0.0/16 \
	--cri-socket unix:///var/run/cri-dockerd.sock
# 重置kubelet
kubeadm reset --cri-socket unix:///var/run/cri-dockerd.sock

# 初始化结果
Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

Alternatively, if you are the root user, you can run:

  export KUBECONFIG=/etc/kubernetes/admin.conf

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 10.0.0.153:6443 --token rfq5i1.rhsnjhkvt4vvr3bl \
	--discovery-token-ca-cert-hash sha256:25d1cb98cde7dc48bf91493801fece428db0d5e4c21590853edbe069e7af011b \
    --cri-socket unix:///var/run/cri-dockerd.sock

# 配置kubectl
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
#测试kubectl
kubectl get po
kubectl get nodes

1.5 添加Kubernetes node

在所有计算节点执行

# 加入群集
kubeadm join 10.0.0.153:6443 --token rfq5i1.rhsnjhkvt4vvr3bl \
	--discovery-token-ca-cert-hash sha256:25d1cb98cde7dc48bf91493801fece428db0d5e4c21590853edbe069e7af011b \
    --cri-socket unix:///var/run/cri-dockerd.sock
#重新申请token
kubeadm token create
kubeadm token list
#获得discovery的hash值 sha256: + 
openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt|openssl rsa -pubin -outform der 2>/dev/null|\
openssl dgst -sha256 -hex|sed 's/^.* //'

1.6 部署CNI网络插件

在master节点执行

# 查看组件状态
kubectl get componentstatus   
kubectl get cs
kubectl get pods -n kube-system
#下载calico配置文件
mkdir -p /opt/k8s
cd /opt/k8s
curl https://docs.tigera.io/archive/v3.25/manifests/calico.yaml -O

#修改calico.yaml 文件中的CALICO_IPV4POOL_CIDR配置,修改为与初始化的cidr一样
#修改IP_AUTODETECTION_METHOD下的网卡名称  实际没有这个参数了 没有改
#删除镜像docker.io/ 前缀
grep image calico.yaml
sed -i 's#docker.io/##g' calico.yaml
# 部署calico
kubectl apply -f calico.yaml
kubectl get po -n kube-system
kubectl describe po calico-kube-controllers-cd8566cf-sftxd -n kube-system
#拉取镜像的时间比较长
#测试
kubectl create deployment nginx --image=nginx
kubectl expose deployment nginx --port=80 --type=NodePort

1.7 在任意节点使用kubectl

# 拷贝master节点的/etc/kubernetes/admin.conf 到其他节点
scp /etc/kubernetes/admin.conf root@10.0.0.151:/etc/kubernetes/
scp /etc/kubernetes/admin.conf root@10.0.0.152:/etc/kubernetes/
# 在要运行kubectl的节点上配置环境变量
echo "export KUBECONFIG=/etc/kubernetes/admin.conf" >> ~/.bash_profile
source ~/.bash_profile 
posted @ 2024-02-26 14:46  goldtree358  阅读(6)  评论(0编辑  收藏  举报