K8S 及其常用功能安装.md

说明

安装系统为 ubuntu-24.04.3-live-server-amd64, 环境为VM,规划 5台虚拟机,1个master,4个work配置均相同。

统一操作

# 安装VIM
sudo apt update
sudo apt install -y vim
# 开启IPV4
sudo sysctl -w net.ipv4.ip_forward=1
# 永久开启(下次重启仍然生效)
echo "net.ipv4.ip_forward = 1" | sudo tee -a /etc/sysctl.conf
sudo sysctl -p
# 将桥接的流量传递到iptables的链
sudo modprobe br_netfilter
echo "br_netfilter" | sudo tee /etc/modules-load.d/br_netfilter.conf

sudo modprobe overlay

sudo tee /etc/sysctl.d/k8s.conf<<EOF
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.ipv4.ip_forward = 1
EOF

sudo sysctl --system



# 禁用 SELINUX  使用下面命令验证是否禁用成功 [ubuntu不需要]
setenforce 0
cat /etc/selinux/config
SELINUX=disabled

# 关闭swap 分区
sudo vim /etc/fstab
# 注释掉最后一行 /swap.img
sudo reboot


# 安装容器运行时
sudo apt update
sudo apt install -y apt-transport-https ca-certificates curl software-properties-common
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg
echo "deb [arch=amd64 signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
sudo apt update
sudo apt-get install -y containerd.io


# 验证
containerd --version
sudo systemctl restart containerd
sudo systemctl enable containerd
sudo systemctl status containerd

# 让容器运行时使用systemd 驱动
sudo mkdir -p /etc/containerd
containerd config default | sudo tee /etc/containerd/config.toml
sudo vim /etc/containerd/config.toml
# 找到配置并修改 SystemdCgroup 为 true
# [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]"")
#  [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
#    SystemdCgroup = true
#      ....


# 加速地址,自己找,这里采用全局FQ了

# 重启
sudo systemctl daemon-reload
sudo systemctl restart containerd


# 安装Kubeadm (高版本直接去官网 https://kubernetes.io/zh-cn/docs/tasks/tools/install-kubectl-linux/)
sudo apt-get update
sudo apt-get install -y apt-transport-https ca-certificates curl
curl -fsSL https://pkgs.k8s.io/core:/stable:/v1.34/deb/Release.key | sudo gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg
sudo chmod 644 /etc/apt/keyrings/kubernetes-apt-keyring.gpg 
echo 'deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v1.34/deb/ /' | sudo tee /etc/apt/sources.list.d/kubernetes.list
sudo chmod 644 /etc/apt/sources.list.d/kubernetes.list 
sudo apt-get update
sudo apt-get install -y kubelet kubeadm kubectl
sudo apt-mark hold kubelet kubeadm kubectl
# 验证
kubeadm version
kubectl version --client
sudo systemctl enable --now kubelet

修改IP 和hostname (如果需要的话)

sudo hostnamectl set-hostname work2
sudo vim /etc/hosts

sudo vim /etc/netplan/50-cloud-init.yaml 
# 修改如下
network:
  version: 2
  renderer: networkd
  ethernets:
    ens33:
      dhcp4: no
      addresses:
        - 192.168.236.134/24
      routes:
        - to: default
          via: 192.168.236.2
      nameservers:
        addresses:
          - 8.8.8.8
          - 114.114.114.114
# 重启生效
sudo netplan apply
sudo systemctl restart systemd-networkd

# 修改Host [如果有别的DNS服务器可以不用管]
sudo cat >> /etc/hosts << EOF
192.168.236.130 master
192.168.236.131 work1
192.168.236.132 work2
192.168.236.133 work3
192.168.236.134 work4
EOF

初始化master (只操作master)

kubeadm config print init-defaults --component-configs KubeletConfiguration > kubeadm.yaml

然后根据我们自己的需求修改配置。

apiVersion: kubeadm.k8s.io/v1beta4
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
  token: abcdef.0123456789abcdef
  ttl: 24h0m0s
  usages:
  - signing
  - authentication
kind: InitConfiguration
localAPIEndpoint:
  # 本节点对外公布的 API 地址(通常是宿主机 IP)
  advertiseAddress: 192.168.236.101
  bindPort: 6443
nodeRegistration:
  criSocket: unix:///var/run/containerd/containerd.sock
  imagePullPolicy: IfNotPresent
  imagePullSerial: true
  # 节点名称
  name: master
  # 禁止调度master
  taints:
  - key: "node-role.kubernetes.io/control-plane"
    effect: "NoSchedule"
timeouts:
  controlPlaneComponentHealthCheck: 4m0s
  discovery: 5m0s
  etcdAPICall: 2m0s
  kubeletHealthCheck: 4m0s
  kubernetesAPICall: 1m0s
  tlsBootstrap: 5m0s
  upgradeManifests: 5m0s
---
apiServer: {}
apiVersion: kubeadm.k8s.io/v1beta4
caCertificateValidityPeriod: 87600h0m0s
certificateValidityPeriod: 8760h0m0s
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
dns: {}
encryptionAlgorithm: RSA-2048
etcd:
  local:
    dataDir: /var/lib/etcd
imageRepository: registry.k8s.io
kind: ClusterConfiguration
kubernetesVersion: 1.33.0
networking:
  dnsDomain: cluster.local
  serviceSubnet: 10.96.0.0/12
  # Flannel 使用的 Pod 网段
  podSubnet: 10.244.0.0/16
proxy: {}
scheduler: {}
---
apiVersion: kubelet.config.k8s.io/v1beta1
authentication:
  anonymous:
    enabled: false
  webhook:
    cacheTTL: 0s
    enabled: true
  x509:
    clientCAFile: /etc/kubernetes/pki/ca.crt
authorization:
  mode: Webhook
  webhook:
    cacheAuthorizedTTL: 0s
    cacheUnauthorizedTTL: 0s
cgroupDriver: systemd
clusterDNS:
- 10.96.0.10
clusterDomain: cluster.local
containerRuntimeEndpoint: ""
cpuManagerReconcilePeriod: 0s
crashLoopBackOff: {}
evictionPressureTransitionPeriod: 0s
fileCheckFrequency: 0s
healthzBindAddress: 127.0.0.1
healthzPort: 10248
httpCheckFrequency: 0s
imageMaximumGCAge: 0s
imageMinimumGCAge: 0s
kind: KubeletConfiguration
logging:
  flushFrequency: 0
  options:
    json:
      infoBufferSize: "0"
    text:
      infoBufferSize: "0"
  verbosity: 0
memorySwap: {}
nodeStatusReportFrequency: 0s
nodeStatusUpdateFrequency: 0s
rotateCertificates: true
runtimeRequestTimeout: 0s
shutdownGracePeriod: 0s
shutdownGracePeriodCriticalPods: 0s
staticPodPath: /etc/kubernetes/manifests
streamingConnectionIdleTimeout: 0s
syncFrequency: 0s
volumeStatsAggPeriod: 0s

初始化:

# 配置完可以试试拉镜像 时间比较长
sudo kubeadm config images pull --config kubeadm.yaml

# 初始化
sudo kubeadm init --config kubeadm.yaml

# 记录下最后输出
kubeadm join 192.168.236.130:6443 --token abcdef.0123456789abcdef \
	--discovery-token-ca-cert-hash sha256:5bbdc625d05ea1efc03244893fdf59daf91554b2d375ef15512cd71a97b045b2

# 拷贝kubeconfig文件
sudo mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

# 验证
kubectl get nodes

# 配置CNI
kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
# 观察
kubectl get pods -A

# 去别的节点 加入集群 就是上面的join
sudo kubeadm join 192.168.236.130:6443 --token abcdef.0123456789abcdef \
	--discovery-token-ca-cert-hash sha256:5bbdc625d05ea1efc03244893fdf59daf91554b2d375ef15512cd71a97b045b2

# 观察
kubectl get nodes

安装面板(Rancher)

# 安装helm
curl -fsSL https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | bash
helm version

# 安装 LB
kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.14.3/config/manifests/metallb-native.yaml
# 给分配IP 192.168.236.200- 192.168.236.210
cat <<EOF | kubectl apply -f -
apiVersion: metallb.io/v1beta1
kind: IPAddressPool
metadata:
  name: default-pool
  namespace: metallb-system
spec:
  addresses:
  - 192.168.236.200-192.168.236.210
---
apiVersion: metallb.io/v1beta1
kind: L2Advertisement
metadata:
  name: default
  namespace: metallb-system
spec: {}
EOF



# 安装 ingress-nginx
helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx
helm repo update
helm install ingress-nginx ingress-nginx/ingress-nginx \
  --namespace ingress-nginx \
  --create-namespace \
  --set controller.service.type=LoadBalancer

# 检查 kubectl get svc -n ingress-nginx


# 安装 cert-manager(Rancher TLS 依赖)
kubectl create namespace cert-manager
helm repo add jetstack https://charts.jetstack.io
helm repo update
helm install cert-manager jetstack/cert-manager \
  --namespace cert-manager \
  --set installCRDs=true
# 检查 kubectl get pods -n cert-manager

# 查看 ingress-nginx 的IP kubectl get svc -n ingress-nginx
# 记住  EXTERNAL-IP 然后安装 Rancher
kubectl create namespace cattle-system
helm repo add rancher-latest https://releases.rancher.com/server-charts/latest
# 生产环境用这个 
# helm repo add rancher-stable https://releases.rancher.com/server-charts/stable
helm repo update
helm install rancher rancher-latest/rancher \
  --namespace cattle-system \
  --set hostname=rancher.192.168.236.200.sslip.io \
  --set ingress.tls.source=secret \
  --set replicas=1 \
  --set ingress.ingressClassName=nginx

# 检查 kubectl get pods -n cattle-system  
# 检查 kubectl get ingress -n cattle-system

# 浏览器打开 https://rancher.192.168.236.200.sslip.io
# 查看密码
kubectl get secret --namespace cattle-system bootstrap-secret -o go-template='{{.data.bootstrapPassword|base64decode}}{{"\n"}}'
# 重置密码
# kubectl -n cattle-system get pods -l app=rancher
# kubectl -n cattle-system exec -it rancher-795594bbf7-wq59h -c rancher -- reset-password
# F1CikW1X3fGNG9mu

开启NodeLocalDNS

# 获取yaml
wget https://raw.githubusercontent.com/kubernetes/kubernetes/master/cluster/addons/dns/nodelocaldns/nodelocaldns.yaml
# kube-dns ClusterIP
kubectl get svc -n kube-system kube-dns

# 应用
sed \
  -e 's/__PILLAR__DNS__SERVER__/10.96.0.10/g' \
  -e 's/__PILLAR__LOCAL__DNS__/169.254.20.10/g' \
  -e 's/__PILLAR__DNS__DOMAIN__/cluster.local/g' \
  nodelocaldns.yaml | kubectl apply -f -

# 查看 kubectl get pods -n kube-system -l k8s-app=node-local-dns -o wide
# 开启 NodeLocalDNS

kubectl -n kube-system get configmap kube-proxy -o yaml
# 看model是啥, 空为iptables


# 每个节点执行
sudo sed -i 's/10.96.0.10/169.254.20.10/g' /var/lib/kubelet/config.yaml
sudo systemctl daemon-reload
sudo systemctl restart kubelet

验证 NodeLocalDNS 是否生效

# dns-test.yaml
apiVersion: v1
kind: Pod
metadata:
  name: dns-test
spec:
  containers:
  - name: test
    image: busybox
    args: ["sleep", "3600"]
kubectl apply -f ./dns-test.yaml 

# 进入容器看解析是不是
ubuntu@master:~/k8s-init$ kubectl exec -it dns-test -- /bin/sh
/ # cat /etc/resolv.conf 
search default.svc.cluster.local svc.cluster.local cluster.local
nameserver 169.254.20.10
options ndots:5
/ # exit

ubuntu@master:~/k8s-init$ kubectl delete -f ./dns-test.yaml 
pod "dns-test" deleted from default namespace
posted @ 2025-12-02 18:13  beamsoflight  阅读(5)  评论(0)    收藏  举报