杨梅冲
每天在想什么呢?

一、环境准备

k8s集群角色 ip 主机名 安装组件 配置
控制节点 192.168.10.10 master   2核4G
工作节点 192.168.10.11 node1   2核4G
工作节点2 192.168.10.12 node2   2核4G

 

 

 

 

 

kubernetes官网文档:https://kubernetes.io/zh-cn/docs

github:https://github.com/kubernetes/kubernetes/releases

1.1 基础环境准备

# 控制节点和工作节点都需要运行
# 1.设置主机名
hostnamectl set-hostname master && bash

# 2.配置hosts
192.168.10.10 master
192.168.10.11 node1
192.168.10.12 node2

# 3.ssh信任
ssh-keygen -t rsa
ssh-copy-id node1

# 5.修改机器内核参数
modprobe br_netfilter
echo "modprobe br_netfilter" >> /etc/profile

cat > /etc/sysctl.d/k8s.conf <<EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
EOF

sysctl -p /etc/sysctl.d/k8s.conf

# 6. 关闭防火墙
systemctl stop firewalld ; systemctl disable firewalld

# 7.关闭selinux,修改 x selinux  配置文件之后,重启
sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config

# 8.配置kubernets源
[root@master yum.repos.d]# cat kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes-new/core/stable/v1.33/rpm/
enabled=1
gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes-new/core/stable/v1.33/rpm/repodata/repomd.xml.key

# 9.时间同步
dnf install chrony -y
vi /etc/chrony.conf
server ntp4.aliyun.com iburst
注销:#pool 2.rocky.pool.ntp.org iburst
systemctl enable chronyd
systemctl start chronyd
# 同步时间
chronyc makestep
# 验证时间同步状态
chronyc tracking
chronyc sources

# chronyc tracking 显示系统时间的跟踪信息,包括偏差、漂移率等。
# chronyc sources 列出当前使用的 NTP 服务器及其状态。
# 查看时间:timedatectl

#10.docker可以选择性安装

# 11.关闭 swap
swapoff -a 或者注销/etc/fstab中的 swap

# 12.

cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack
EOF

chmod +x /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules

1.2 基础软件包安装

# 1.基础软件安装
yum install -y device-mapper-persistent-data lvm2 wget net-tools nfs-utils lrzsz gcc gcc-c++ make cmake libxml2-devel openssl-devel curl unzip libaio-devel wget vim ncurses-devel autoconf automake zlib-devel python-devel epel-release openssh-server socat ipset ipvsadm conntrack telnet ipvsadm

#2. containerd安装配置:1.6版本以上
yum install -y containerd
# containerd.io-1.7.27-3.1.el9.x86_64
生成containerd配置文件
mkdir -p /etc/containerd
containerd config default > /etc/containerd/config.toml

#3.修改配置文件
vim /etc/containerd/config.toml
SystemdCgroup = true   # false改为true
sandbox_image = "registry.aliyuncs.com/google_containers/pause:3.10"

4.配置为开机启动
systemctl enable containerd --now

# 5.修改/etc/crictl.yaml 文件
cat > /etc/crictl.yaml <<EOF
runtime-endpoint: unix:///run/containerd/containerd.sock
image-endpoint: unix:///run/containerd/containerd.sock
timeout: 10
debug: false
EOF

systemctl restart containerd

# 6.配置镜像加速器
# 编辑 vim /etc/containerd/config.toml 文件,修改
config_path = "/etc/containerd/certs.d"

mkdir /etc/containerd/certs.d/docker.io/ -p
vim /etc/containerd/certs.d/docker.io/hosts.toml 
[host."https://pft7f97f.mirror.aliyuncs.com",host."https://registry.docker-cn.com",host."https://docker.mirrors.ustc.edu.cn"]
  capabilities = ["pull"]

systemctl restart containerd

二、安装k8s

 2.1 安装k8s所需软件

# 1.安装k8s软件包,master和node都需要
yum install -y kubelet-1.33.0 kubeadm-1.33.0 kubectl-1.33.0
systemctl enable kubelet

注:每个软件包的作用
Kubeadm: kubeadm 是一个工具,用来初始化 k8s 集群的
kubelet: 安装在集群所有节点上,用于启动 Pod 的,kubeadm 安装k8s,k8s 控制节点和工作节点的组件,都是基于 pod 运行的,只要 pod 启动,就需要 kubelet 
kubectl: 通过 kubectl 可以部署和管理应用,查看各种资源,创建、删除和更新各种组件

 2.2 kubeadm 初始化配置文件生成与配置

# 1.设置容器运行时,master,node
crictl config runtime-endpoint unix:///run/containerd/containerd.sock

#2.使用配置文件初始化k8s:master
kubeadm config print init-defaults > kubeadm.yaml

参考:https://kubernetes.io/zh-cn/docs/reference/setup-tools/kubeadm/kubeadm-config/#cmd-config-print-init-defaults  # 官网文档搜索kubeadm config

3.3 配置初始化配置文件kubeadm.yaml

[root@master ~]# cat kubeadm.yaml 
apiVersion: kubeadm.k8s.io/v1beta4
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
  token: abcdef.0123456789abcdef
  ttl: 24h0m0s
  usages:
  - signing
  - authentication
kind: InitConfiguration
localAPIEndpoint:
  advertiseAddress: 192.168.10.10
  bindPort: 6443
nodeRegistration:
  criSocket: unix:///var/run/containerd/containerd.sock
  imagePullPolicy: IfNotPresent
  imagePullSerial: true
  name: master
  taints: null
timeouts:
  controlPlaneComponentHealthCheck: 4m0s
  discovery: 5m0s
  etcdAPICall: 2m0s
  kubeletHealthCheck: 4m0s
  kubernetesAPICall: 1m0s
  tlsBootstrap: 5m0s
  upgradeManifests: 5m0s
---
apiServer: {}
apiVersion: kubeadm.k8s.io/v1beta4
caCertificateValidityPeriod: 87600h0m0s
certificateValidityPeriod: 8760h0m0s
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
dns: {}
encryptionAlgorithm: RSA-2048
etcd:
  local:
    dataDir: /var/lib/etcd
imageRepository: registry.cn-hangzhou.aliyuncs.com/google_containers
kind: ClusterConfiguration
kubernetesVersion: 1.33.0
networking:
  dnsDomain: cluster.local
  serviceSubnet: 10.96.0.0/12
  podSubnet: 10.244.0.0/16
proxy: {}
scheduler: {}
---
# 新增:启动ipvs
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: ipvs
---
# 新增:申明cgroup使用systemd
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
cgroupDriver: systemd


# 参考
配置cgroup驱动:
https://kubernetes.io/zh-cn/docs/tasks/administer-cluster/kubeadm/configure-cgroup-driver/
配置ipvs模式:https://kubernetes.io/zh-cn/docs/reference/config-api/kube-proxy-config.v1alpha1/#kubeproxy-config-k8s-io-v1alpha1-KubeProxyConfiguration
证书:https://kubernetes.io/zh-cn/docs/tasks/administer-cluster/kubeadm/kubeadm-certs/

3.4 拉取集群所需镜像

# 查看要拉取的镜像
[root@master ~]# kubeadm config images list --config=kubeadm.yaml 
registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver:v1.33.0
registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager:v1.33.0
registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler:v1.33.0
registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy:v1.33.0
registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:v1.12.0
registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.10
registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:3.5.21-0


# 拉取镜像
[root@master ~]# kubeadm config images pull --config=kubeadm.yaml 
[config/images] Pulled registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver:v1.33.0
[config/images] Pulled registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager:v1.33.0
[config/images] Pulled registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler:v1.33.0
[config/images] Pulled registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy:v1.33.0
[config/images] Pulled registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:v1.12.0
[config/images] Pulled registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.10
[config/images] Pulled registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:3.5.21-0

[root@master ~]# crictl images
IMAGE                                                                         TAG                 IMAGE ID            SIZE
registry.cn-hangzhou.aliyuncs.com/google_containers/coredns                   v1.12.0             1cf5f116067c6       20.9MB
registry.cn-hangzhou.aliyuncs.com/google_containers/etcd                      3.5.21-0            499038711c081       58.9MB
registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver            v1.33.0             6ba9545b2183e       30.1MB
registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager   v1.33.0             1d579cb6d6967       27.6MB
registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy                v1.33.0             f1184a0bd7fe5       31.9MB
registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler            v1.33.0             8d72586a76469       21.8MB
registry.cn-hangzhou.aliyuncs.com/google_containers/pause                     3.10                873ed75102791       320kB

3.5 k8s初始化

 参考:https://kubernetes.io/zh-cn/docs/setup/production-environment/tools/kubeadm/high-availability/

[root@master ~]# kubeadm init --config=kubeadm.yaml --ignore-preflight-errors=SystemVerification

初始化结果:

[init] Using Kubernetes version: v1.33.0
[preflight] Running pre-flight checks
    [WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action beforehand using 'kubeadm config images pull'
W0517 15:13:42.859925    1821 checks.go:846] detected that the sandbox image "registry.aliyuncs.com/google_containers/pause:3.10" of the container runtime is inconsistent with that used by kubeadm.It is recommended to use "registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.10" as the CRI sandbox image.
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "ca" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local master] and IPs [10.96.0.1 192.168.10.10]
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "front-proxy-ca" certificate and key
[certs] Generating "front-proxy-client" certificate and key
[certs] Generating "etcd/ca" certificate and key
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [localhost master] and IPs [192.168.10.10 127.0.0.1 ::1]
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [localhost master] and IPs [192.168.10.10 127.0.0.1 ::1]
[certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Generating "sa" key and public key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "super-admin.conf" kubeconfig file
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Starting the kubelet
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests"
[kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s
[kubelet-check] The kubelet is healthy after 1.503766856s
[control-plane-check] Waiting for healthy control plane components. This can take up to 4m0s
[control-plane-check] Checking kube-apiserver at https://192.168.10.10:6443/livez
[control-plane-check] Checking kube-controller-manager at https://127.0.0.1:10257/healthz
[control-plane-check] Checking kube-scheduler at https://127.0.0.1:10259/livez
[control-plane-check] kube-controller-manager is healthy after 3.923462321s
[control-plane-check] kube-scheduler is healthy after 4.757877626s
[control-plane-check] kube-apiserver is healthy after 6.501341095s
[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
[upload-certs] Skipping phase. Please see --upload-certs
[mark-control-plane] Marking the node master as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
[mark-control-plane] Marking the node master as control-plane by adding the taints [node-role.kubernetes.io/control-plane:NoSchedule]
[bootstrap-token] Using token: abcdef.0123456789abcdef
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
[bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
[bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
[kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
[addons] Applied essential addon: CoreDNS
[addons] Applied essential addon: kube-proxy

Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

Alternatively, if you are the root user, you can run:

  export KUBECONFIG=/etc/kubernetes/admin.conf

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 192.168.10.10:6443 --token abcdef.0123456789abcdef \
    --discovery-token-ca-cert-hash sha256:768a3c5684197659207c1f7d6d90093c7e21908833f5e0f5fe989831311cc94a 
初始化步骤
  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

[root@master ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
master NotReady control-plane 4m8s v1.33.0

3.6 添加工作节点

# 1.添加工作节点
kubeadm join 192.168.10.10:6443 --token abcdef.0123456789abcdef \
    --discovery-token-ca-cert-hash sha256:768a3c5684197659207c1f7d6d90093c7e21908833f5e0f5fe989831311cc94a
[root@node1 ~]# kubeadm join 192.168.10.10:6443 --token abcdef.0123456789abcdef \
> --discovery-token-ca-cert-hash sha256:768a3c5684197659207c1f7d6d90093c7e21908833f5e0f5fe989831311cc94a
[preflight] Running pre-flight checks
[preflight] Reading configuration from the "kubeadm-config" ConfigMap in namespace "kube-system"...
[preflight] Use 'kubeadm init phase upload-config --config your-config-file' to re-upload it.
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Starting the kubelet
[kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s
[kubelet-check] The kubelet is healthy after 502.403248ms
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap

This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.

Run 'kubectl get nodes' on the control-plane to see this node join the cluster.
执行过程

# 如果有问题,解决后,进行重置
kubeadm reset

查看节点情况

[root@master ~]# kubectl get nodes
NAME     STATUS     ROLES           AGE   VERSION
master   NotReady   control-plane   18m   v1.33.0
node1    NotReady   <none>          87s   v1.33.0


# 给节点打上标签
[root@master ~]# kubectl label nodes node1 node-role.kubernetes.io/work=work
node/node1 labeled

[root@master ~]# kubectl get nodes
NAME     STATUS     ROLES           AGE     VERSION
master   NotReady   control-plane   19m     v1.33.0
node1    NotReady   work            2m31s   v1.33.0

四、安装Kubernetes网络插件Calico

查看calico支持的版本:https://docs.tigera.io/calico/latest/getting-started/kubernetes/requirements

calico文件下载地址:https://docs.tigera.io/calico/latest/getting-started/kubernetes/self-managed-onprem/onpremises

线下50节点或更少:curl https://raw.githubusercontent.com/projectcalico/calico/v3.30.0/manifests/calico.yaml -O

calico镜像github下载地址:https://github.com/projectcalico/calico/releases

集群的话使用etcd

# 修改:IP_AUTODETECTION_METHOD:获取 Node IP 地址的方式,默认使用第 1 个网络接口的 IP 地址,对于安装了多块网卡的 Node,可以使用正则表达式选择正确的网卡,
# 例如"interface=eth.*"表示选择名称以 eth 开头的网卡的 IP 地址。

# Enable IPIP
- name: CALICO_IPV4POOL_IPIP
  value: "Always"
- name: IP_AUTODETECTION_METHOD
  value: "interface=ens33"
[root@master ~]# kubectl apply -f calico.yaml 
poddisruptionbudget.policy/calico-kube-controllers created
serviceaccount/calico-kube-controllers created
serviceaccount/calico-node created
serviceaccount/calico-cni-plugin created
configmap/calico-config created
customresourcedefinition.apiextensions.k8s.io/bgpconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/bgpfilters.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/bgppeers.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/blockaffinities.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/caliconodestatuses.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/clusterinformations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/felixconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/globalnetworkpolicies.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/globalnetworksets.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/hostendpoints.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamblocks.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamconfigs.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamhandles.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ippools.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipreservations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/kubecontrollersconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/networkpolicies.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/networksets.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/stagedglobalnetworkpolicies.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/stagedkubernetesnetworkpolicies.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/stagednetworkpolicies.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/tiers.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/adminnetworkpolicies.policy.networking.k8s.io created
customresourcedefinition.apiextensions.k8s.io/baselineadminnetworkpolicies.policy.networking.k8s.io created
clusterrole.rbac.authorization.k8s.io/calico-kube-controllers created
clusterrole.rbac.authorization.k8s.io/calico-node created
clusterrole.rbac.authorization.k8s.io/calico-cni-plugin created
clusterrole.rbac.authorization.k8s.io/calico-tier-getter created
clusterrolebinding.rbac.authorization.k8s.io/calico-kube-controllers created
clusterrolebinding.rbac.authorization.k8s.io/calico-node created
clusterrolebinding.rbac.authorization.k8s.io/calico-cni-plugin created
clusterrolebinding.rbac.authorization.k8s.io/calico-tier-getter created
daemonset.apps/calico-node created
deployment.apps/calico-kube-controllers created
# 问题:
calico由于网络限制问题不能下载,所以最好提前准备好calico镜像导入到containerd中
# ctr -n=k8s.io images import calico-v3.30.1.tar.gz
unpacking docker.io/calico/node:v3.30.1 (sha256:0cf09f6e96be59fe99d8bcc862800727b574077c17a033a3c243dbfeb7ab3500)...done
unpacking docker.io/calico/kube-controllers:v3.30.1 (sha256:2003eaf88588c698d080432dcbbd53c24de3d13984606d1150d11823634540ce)...done
unpacking docker.io/calico/cni:v3.30.1 (sha256:4a2f7ded2035918a1d702b43557e822a7254d93b5ce58693c9c1205420474b37)...done
# kubectl get nodes
NAME        STATUS   ROLES           AGE    VERSION
master   Ready    control-plane   111m   v1.33.0
node1    Ready    stage,work      95m    v1.33.0
node2    Ready    stage           96m    v1.33.0

# kubectl get pods -n kube-system
NAME                                       READY   STATUS    RESTARTS   AGE
calico-kube-controllers-64b69c8f54-dgf58   1/1     Running   0          27s
calico-node-bvt2w                          1/1     Running   0          27s
calico-node-rgrzd                          1/1     Running   0          27s
calico-node-tcbd6                          1/1     Running   0          27s
coredns-746c97786-qj9fr                    1/1     Running   0          111m
coredns-746c97786-xms8f                    1/1     Running   0          111m
etcd-server104                             1/1     Running   0          111m
kube-apiserver-server104                   1/1     Running   0          111m
kube-controller-manager-server104          1/1     Running   0          111m
kube-proxy-k2wdw                           1/1     Running   0          111m
kube-proxy-lzb9b                           1/1     Running   0          95m
kube-proxy-qv8wt                           1/1     Running   0          96m
kube-scheduler-server104                   1/1     Running   0          111m

# 测试网络是否正常访问

# kubectl run busybox --image docker.io/library/busybox:latest --image-pull-policy=IfNotPresent --restart=Never --rm -it busybox -- sh
If you don't see a command prompt, try pressing enter.
/ # ping baidu.com
PING baidu.com (182.61.244.181): 56 data bytes
64 bytes from 182.61.244.181: seq=0 ttl=49 time=9.413 ms
64 bytes from 182.61.244.181: seq=1 ttl=49 time=9.046 ms
64 bytes from 182.61.244.181: seq=2 ttl=49 time=9.393 ms
64 bytes from 182.61.244.181: seq=3 ttl=49 time=20.877 ms
^C
--- baidu.com ping statistics ---
4 packets transmitted, 4 packets received, 0% packet loss
round-trip min/avg/max = 9.046/12.182/20.877 ms
/ # nslookup  kubernetes.default.svc.cluster.local 
Server:         10.96.0.10
Address:        10.96.0.10:53


Name:   kubernetes.default.svc.cluster.local
Address: 10.96.0.1
posted on 2025-05-17 15:55  杨梅冲  阅读(1265)  评论(0)    收藏  举报