Ubuntu 22.04 K8s v1.32.2 升级重装

 

---------------------------卸载---------------------------

systemctl stop kubelet
systemctl stop etcd
systemctl stop docker
sudo systemctl stop containerd

sudo apt-get purge --auto-remove containerd.io

kubeadm reset -f

sudo apt-get purge --auto-remove kubernetes-cni

sudo apt-get purge --auto-remove kubeadm

sudo apt-get purge --auto-remove kubectl

sudo apt-get purge --auto-remove kubelet


rm -rf ~/.kube/
rm -rf /etc/kubernetes/
rm -rf /etc/systemd/system/kubelet.service.d
rm -rf /etc/systemd/system/kubelet.service
rm -rf /usr/bin/kube*
rm -rf /etc/cni
rm -rf /opt/cni
rm -rf /var/lib/etcd
rm -rf /var/etcd
# 以上指令单行整合
sudo rm -rf ~/.kube/ /etc/kubernetes/ /etc/systemd/system/kubelet.service.d /etc/systemd/system/kubelet.service /usr/bin/kube* /etc/cni /opt/cni /var/lib/etcd /var/etcd

apt remove kube*

 


------------------------安装-------------------------------------------------------


echo 'deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v1.32/deb/ /' | sudo tee /etc/apt/sources.list.d/kubernetes.list

sudo apt-get update

sudo apt install -y apt-transport-https ca-certificates curl gpg

sudo mkdir -p -m 755 /etc/apt/keyrings

curl -fsSL https://pkgs.k8s.io/core:/stable:/v1.32/deb/Release.key | sudo gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg

sudo apt-get install -y kubelet kubeadm kubectl

## 不建议使用containerd作为容器运行时 (和docker共存一堆问题)apt install -y containerd
## sudo mkdir -p /etc/containerd

# 使用cri-dockerd 作为容器运行时
#下载合适的cri-dockerd版本 https://github.com/Mirantis/cri-dockerd 我的Ubuntu22.04
wget https://github.com/Mirantis/cri-dockerd/releases/download/v0.3.16/cri-dockerd_0.3.16.3-0.ubuntu-jammy_amd64.deb

#安装
sudo dpkg -i cri-dockerd_0.3.16.3-0.ubuntu-jammy_amd64.deb

kubectl version --client
kubeadm version
kubelet --version

sudo apt-mark hold kubelet kubeadm kubectl


# sudo kubeadm config images pull --kubernetes-version=v1.32.2 --cri-socket=unix:///var/run/cri-dockerd.sock
# 提前下载镜像
sudo kubeadm config images pull --kubernetes-version=v1.32.2 --cri-socket=unix:///var/run/cri-dockerd.sock --image-repository=registry.aliyuncs.com/google_containers

sudo hostnamectl set-hostname csn-t40

# 本机ip 192.168.31.213
# apiserver-advertise-address、service-cidr、pod-network-cidr 三者的IP网段 不能重叠 后两个一般用 10.x.x.x 网段 如下:
# csn-t40 完全限定域名 需要符合RFC1035规范
sudo kubeadm init \
--apiserver-advertise-address=192.168.31.213 \
--control-plane-endpoint=csn-t40 \
--kubernetes-version=v1.32.2 \
--service-cidr=10.50.0.0/16 \
--pod-network-cidr=10.60.0.0/16 \
--cri-socket=unix:///var/run/cri-dockerd.sock \
--image-repository=registry.aliyuncs.com/google_containers

#上一步初始化失败 重新初始化执行清除指令:
sudo kubeadm reset -f # 重置 kubeadm
#sudo kubeadm reset -f --cri-socket unix:///var/run/cri-dockerd.sock

sudo rm -rf /etc/cni/net.d # 删除上次 init 生成的文件
sudo rm -rf /var/lib/etcd # 删除上次 init 生成的文件



#初始化成功:
Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

Alternatively, if you are the root user, you can run:

export KUBECONFIG=/etc/kubernetes/admin.conf

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/

You can now join any number of control-plane nodes by copying certificate authorities
and service account keys on each node and then running the following as root:

kubeadm join csn-t40:6443 --token cif72w.v2rnleu00z1qbh3a \
--discovery-token-ca-cert-hash sha256:0aee5c0594a535eb37dc0d084361d980a65a011d8b04ba96b5e98da2d309cc82 \
--control-plane

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join csn-t40:6443 --token cif72w.v2rnleu00z1qbh3a \
--discovery-token-ca-cert-hash sha256:0aee5c0594a535eb37dc0d084361d980a65a011d8b04ba96b5e98da2d309cc82


#创建相关配置文件
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

 

# 主节点上也允许调度
kubectl taint nodes csn-t40 node-role.kubernetes.io/control-plane-

#主节点执行: 控制平面节点(执行kubeadm init 的节点)不需要执行
## kubeadm join csn-t40:6443 --token cif72w.v2rnleu00z1qbh3a \
## --discovery-token-ca-cert-hash sha256:0aee5c0594a535eb37dc0d084361d980a65a011d8b04ba96b5e98da2d309cc82 \
## --control-plane
#子节点执行:
## kubeadm join csn-t40:6443 --token cif72w.v2rnleu00z1qbh3a \
## --discovery-token-ca-cert-hash sha256:0aee5c0594a535eb37dc0d084361d980a65a011d8b04ba96b5e98da2d309cc82

#使用tigera-operator安装calico 毛病太多 不用 别问
#wget https://raw.githubusercontent.com/projectcalico/calico/v3.28.0/manifests/tigera-operator.yaml
# 替换镜像源地址
#kubectl create -f tigera-operator.yaml
#wget https://raw.githubusercontent.com/projectcalico/calico/v3.28.0/manifests/custom-resources.yaml
#vi custom-resources.yaml
#cidr 修改为pod-network-cidr
#cidr: 10.60.0.0/16

# 安装calico
wget https://docs.projectcalico.org/manifests/calico.yaml
#替换calico.yaml所有镜像地址 改为国内代理
vi calico.yaml

kubectl create -f calico.yaml

 

 

#安装kuboard
wget https://addons.kuboard.cn/kuboard/kuboard-v3-swr.yaml

vi kuboard-v3-swr.yaml
#KUBOARD_SERVER_NODE_PORT: '30080' //注释掉这行
KUBOARD_ENDPOINT: http://192.168.31.213:30080 //添加这行

kubectl create -f kuboard-v3-swr.yaml

kubectl get pods -A
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system calico-kube-controllers-f58f6d565-rq5h9 1/1 Running 0 22h
kube-system calico-node-45mws 1/1 Running 0 22h
kube-system coredns-6766b7b6bb-7qmz2 1/1 Running 0 22h
kube-system coredns-6766b7b6bb-n7bhw 1/1 Running 0 22h
kube-system etcd-csn-t40 1/1 Running 7 22h
kube-system kube-apiserver-csn-t40 1/1 Running 8 22h
kube-system kube-controller-manager-csn-t40 1/1 Running 7 22h
kube-system kube-proxy-dxrn2 1/1 Running 0 22h
kube-system kube-scheduler-csn-t40 1/1 Running 7 22h
kuboard kuboard-etcd-48xgq 1/1 Running 0 147m
kuboard kuboard-v3-d586d5c48-p678r 1/1 Running 0 147m

# 最后访问 http://localhost:38000/

集群列表选择集群 > 点击导入集群信息 > 删除集群信息 >  kubeconfig 重新导入集群信息

完成

kubeconfig

 

posted @ 2025-03-14 09:59  java从精通到入门  阅读(173)  评论(0)    收藏  举报