使用 kubeadm 安装 Kubernetes

kubernetes CHANGELOG:https://github.com/kubernetes/kubernetes/releases

vim ~/.bashrc 
source ~/.bashrc

#!/bin/bash

declare -A MIRROR_MAP=(
    [github.com]=https://ghfast.top/
    [raw.githubusercontent.com]=https://ghfast.top/
    [gist.github.com]=https://ghfast.top/
    [gist.githubusercontent.com]=https://ghfast.top/
)
process() { # 通用处理函数
    local u=$1
    # 提取主域名(支持带端口的情况)
    [[ $u =~ ^https?://([^:/]+) ]] && d=${BASH_REMATCH[1]%%:*}
    echo "${MIRROR_MAP[$d]-}$u" # 若未匹配则保留原URL
    # 输出调试信息到标准错误(不影响函数返回值)
    printf "\\033[33mDEBUG: %-25s -> %s\\033[0m\\n" "$u" "${MIRROR_MAP[$d]-}$u" >&2
}
curl() { # 包装 curl/wget (处理所有参数)
    local a=() i; for i in "$@"; do
        [[ $i == http* ]] && i=$(process "$i")
        a+=("$i")
    done
    command curl "${a[@]}"
}
wget() { 
    local a=() i; for i in "$@"; do
        [[ $i == http* ]] && i=$(process "$i")
        a+=("$i")
    done
    command wget "${a[@]}"
}
kubectl() { # 包装 kubectl (只处理 -f/--filename 后的 URL)
    local a=() f=0; for i in "$@"; do
        if (( f )); then
            [[ $i == http* ]] && i=$(process "$i")
            f=0
        elif [[ $i =~ ^(-f|--filename)$ ]]; then
            f=1
        fi
        a+=("$i") # 自动处理带空格等特殊字符的参数
    done
    command kubectl "${a[@]}" # 防止包装函数递归调用
}

# 添加新镜像
M[xxx.com]=https://xxx.mirror.com/
# 跳过前缀添加
/usr/bin/curl https://github.com/... # 方法1:使用完整路径
\\curl https://github.com/... # 方法2:转义命令
unset -f curl wget kubectl # 方法3:临时卸载
View Code

 

一、安装 container-runtimes (所有节点)

使用 Docker,安装参考:https://www.cnblogs.com/jhxxb/p/11410816.html。自 1.24 版起,Dockershim 已从 Kubernetes 项目中移除,所以还须安装 cri-dockerd

# 下载安装
wget https://github.com/Mirantis/cri-dockerd/releases/download/v0.3.1/cri-dockerd-0.3.1-3.el7.x86_64.rpm
sudo yum install -y cri-dockerd-0.3.1-3.el7.x86_64.rpm

# 修改启动文件,在 ExecStart 后面追加
# --network-plugin=cni,告诉容器使用 kubernetes 的网络接口
# --pod-infra-container-image,覆盖默认的沙盒(pause)镜像
sudo sed -i 's,^ExecStart.*,& --network-plugin=cni --pod-infra-container-image=registry.aliyuncs.com/google_containers/pause:3.9,' /usr/lib/systemd/system/cri-docker.service

# 启动 cri-dockerd,依赖 docker,需要先安装 docker
sudo systemctl daemon-reload
sudo systemctl enable --now cri-docker.service
sudo systemctl enable --now cri-docker.socket

# 查看 cri-dockerd 状态
systemctl status cri-docker.service
View Code

使用 containerd

curl -LOJ https://github.com/containerd/containerd/releases/download/v2.0.4/containerd-2.0.4-linux-amd64.tar.gz
sudo tar Cxzvf /usr/local containerd-2.0.4-linux-amd64.tar.gz

sudo mkdir -p /usr/local/lib/systemd/system
curl -LOJ https://raw.githubusercontent.com/containerd/containerd/main/containerd.service && sudo cp containerd.service /usr/local/lib/systemd/system/
sudo systemctl daemon-reload && sudo systemctl enable --now containerd

curl -LOJ https://github.com/opencontainers/runc/releases/download/v1.2.6/runc.amd64
sudo install -m 755 runc.amd64 /usr/local/sbin/runc

curl -LOJ https://github.com/containernetworking/plugins/releases/download/v1.6.2/cni-plugins-linux-amd64-v1.6.2.tgz
sudo mkdir -p /opt/cni/bin && sudo tar Cxzvf /opt/cni/bin cni-plugins-linux-amd64-v1.6.2.tgz

sudo mkdir -p /etc/containerd && sudo tee -a /etc/containerd/config.toml << EOF
version = 3
[plugins.'io.containerd.cri.v1.runtime'.containerd.runtimes.runc.options]
  SystemdCgroup = true
[plugins."io.containerd.cri.v1.images".registry]
  config_path = "/etc/containerd/certs.d"
EOF

sudo mkdir -p /etc/containerd/certs.d/docker.io && sudo tee /etc/containerd/certs.d/docker.io/hosts.toml << EOF
server = "https://docker.io"
[host."https://docker.linkos.org"]
  capabilities = ["pull", "resolve"]
[host."https://dockerproxy.com"]
  capabilities = ["pull", "resolve"]
EOF
sudo mkdir -p /etc/containerd/certs.d/registry.k8s.io && sudo tee /etc/containerd/certs.d/registry.k8s.io/hosts.toml << EOF
server = "registry.k8s.io"
[host."k8s.nju.edu.cn"]
  capabilities = ["pull", "resolve"]
EOF
sudo mkdir -p /etc/containerd/certs.d/ghcr.io && sudo tee /etc/containerd/certs.d/ghcr.io/hosts.toml << EOF
server = "ghcr.io"
[host."ghcr.nju.edu.cn"]
  capabilities = ["pull", "resolve"]
EOF

containerd -v
sudo systemctl restart containerd
containerd config dump | grep SystemdCgroup

 

二、安装 kubeadm (所有节点)

https://developer.aliyun.com/mirror/kubernetes

必要设置

# 关闭 swap,https://unix.stackexchange.com/questions/671940/disabling-swap-on-debian-permanently
sudo swapoff -a
sudo sed -ri 's/.*swap.*/#&/' /etc/fstab
systemctl status *swap # systemctl --type swap --all
sudo systemctl mask dev-sda3.swap # 替换为对应的服务名
free -g

# 启用 IP 转发
# echo 'net.ipv4.ip_forward=1' | sudo tee /etc/sysctl.d/k8s.conf
sudo sed -i 's/^#\s*\(net.ipv4.ip_forward=1\)/\1/' /etc/sysctl.conf
sudo sysctl -p
# sudo sysctl --system

安装

sudo apt-get update
# apt-transport-https 可能是一个虚拟包(dummy package);如果是的话,你可以跳过安装这个包
sudo apt-get install -y apt-transport-https ca-certificates curl gpg
# 如果 `/etc/apt/keyrings` 目录不存在,则应在 curl 命令之前创建它,请阅读下面的注释。
# sudo mkdir -p -m 755 /etc/apt/keyrings
curl -fsSL https://mirrors.ustc.edu.cn/kubernetes/core:/stable:/v1.32/deb/Release.key | sudo gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg
# 此操作会覆盖 /etc/apt/sources.list.d/kubernetes.list 中现存的所有配置。
echo 'deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://mirrors.ustc.edu.cn/kubernetes/core:/stable:/v1.32/deb/ /' | sudo tee /etc/apt/sources.list.d/kubernetes.list
sudo apt-get update
sudo apt-get install -y kubelet kubeadm kubectl
sudo apt-mark hold kubelet kubeadm kubectl

kubectl 自动补全https://github.com/scop/bash-completion

sudo apt install -y bash-completion
source /usr/share/bash-completion/bash_completion
source <(kubectl completion bash)
sudo mkdir -p /etc/bash_completion.d/
kubectl completion bash | sudo tee /etc/bash_completion.d/kubectl
View Code

查看

# 开机启动,sudo systemctl enable kubelet && sudo systemctl start kubelet
sudo systemctl enable --now kubelet
# 查看 kubelet
systemctl status kubelet
kubelet --version
cat /var/lib/kubelet/config.yaml
kind: KubeletConfiguration
apiVersion: kubelet.config.k8s.io/v1beta1
cgroupDriver: "systemd"
# 重新启动 kubelet
sudo systemctl daemon-reload
sudo systemctl restart kubelet
View Code

 

三、使用 kubeadm 创建集群(Master 安装,其它节点加入)

初始化控制平面节点(Master 节点初始化安装 kubernetes)

镜像:https://github.com/zhangguanzhang/google_containershttps://doc.nju.edu.cn/books/e1654https://github.com/DaoCloud/public-image-mirrorsearch

确保主机名不是 localhost,且所有节点都已写入 /etc/hosts 文件,且可以 ping 通。kubeadm init 参数:https://kubernetes.io/zh-cn/docs/reference/setup-tools/kubeadm/kubeadm-init

sudo kubeadm reset --force
# --apiserver-advertise-address=MasterIP
# --image-repository=registry.aliyuncs.com/google_containers
# --service-cidr=10.96.0.0/12
# --pod-network-cidr,A 类私有网段:10.0.0.0/8,本地回路网段:127.0.0.0/8,B 类私有网段:172.16.0.0/12,C 类私有网段:192.168.0.0/16
# --cri-socket unix:///var/run/cri-dockerd.sock
# 不指定 --kubernetes-version,默认会从 https://dl.k8s.io/release/stable-1.txt 获取最新版本号
sudo kubeadm init --apiserver-advertise-address=x.x.x.x --pod-network-cidr=192.168.0.0/16
# kubeadm config print init-defaults > kubeadm-init.yaml
# kubeadm config images list --config kubeadm-init.yaml
# kubeadm config images pull --config kubeadm-init.yaml
# sudo kubeadm init --config kubeadm-init.yaml | tee kubeadm-init.log

初始化过程中若下载失败,可暂时下载 latest,然后打 tag,例如 coredns:v1.8.4

# 查看所需镜像
kubeadm config images list
docker pull registry.aliyuncs.com/google_containers/coredns
docker tag registry.aliyuncs.com/google_containers/coredns:latest registry.aliyuncs.com/google_containers/coredns:v1.8.4

kubeadm init 好后按照提示执行:https://kubernetes.io/zh/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm/#更多信息

# 要使非 root 用户可以运行 kubectl,请运行以下命令, 它们也是 kubeadm init 输出的一部分:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
# 或者,如果你是 root 用户,则可以运行:
export KUBECONFIG=/etc/kubernetes/admin.conf

安装 Pod 网络附加组件(Master 节点安装 Pod 网络)

集群只能安装一个 Pod 网络,通过 kubectl get pods --all-namespaces 检查 CoreDNS Pod 是否 Running 来确认其是否正常运行。一旦 CoreDNS Pod 启用并运行,就让 Node 可以加入 Master 了

# 使用 Flannel
# kubectl apply -f https://raw.githubusercontent.com/flannel-io/flannel/master/Documentation/kube-flannel.yml

# 使用 Calico,https://projectcalico.docs.tigera.io/getting-started/kubernetes/quickstart
kubectl create -f https://raw.githubusercontent.com/projectcalico/calico/v3.29.3/manifests/tigera-operator.yaml
kubectl create -f https://raw.githubusercontent.com/projectcalico/calico/v3.29.3/manifests/custom-resources.yaml # 其中 cidr 对应 kubeadm init 时的 --pod-network-cidr
curl -LOJ https://github.com/projectcalico/calico/releases/download/v3.29.3/calicoctl-linux-amd64 && sudo cp calicoctl-linux-amd64 /usr/local/sbin/kubectl-calico && sudo chmod +x /usr/local/sbin/kubectl-calico
kubectl calico -h
# kubectl apply -f https://docs.projectcalico.org/manifests/calico.yaml

kubectl get ns
kubectl get pod -o wide -A # kubectl get pods --all-namespaces # 查看所有名称空间的 pods
watch kubectl get pods -n calico-system # 查看指定名称空间的 pods

控制平面节点隔离(允许 Master 节点部署 Pod (可选))

# 允许 Master 节点部署 Pod
kubectl taint nodes --all node-role.kubernetes.io/control-plane-
# 如果不允许调度
kubectl taint nodes master1 node-role.kubernetes.io/master=:NoSchedule
# 污点可选参数:NoSchedule: 一定不能被调度,PreferNoSchedule: 尽量不要调度,NoExecute: 不仅不会调度, 还会驱逐 Node 上已有的 Pod

添加工作节点(其它节点加入主节点)

# 其它节点加入,token 会失效
sudo kubeadm join x.x.x.x:6443 --token tqaitp.3imn92ur339n4olo --discovery-token-ca-cert-hash sha256:fb3da80b6f1dd5ce6f78cb304bc1d42f775fdbbdc80773ff7c59
sudo journalctl -f -u kubelet

# 如果超过 2 小时忘记了令牌
# 打印新令牌
kubeadm token create --print-join-command
# 创建一个永不过期的令牌
kubeadm token create --ttl 0 --print-join-command

# 主节点监控 pod 进度,等待 3-10 分钟,完全都是 running 以后继续
watch kubectl get pod -n kube-system -o wide
# 等到所有的 status 都变为 running
kubectl get nodes

到这里 K8s 集群就安装完成了,下面的不是必须步骤

 

四、可视化

Kubernetes Dashboard

https://github.com/kubernetes/dashboard & https://github.com/kubernetes/dashboard/issues/8842#issuecomment-2026942971

curl -LOJ https://mirrors.huaweicloud.com/helm/v3.17.2/helm-v3.17.2-linux-amd64.tar.gz # https://helm.sh/zh/docs/intro/install/#用二进制版本安装
sudo tar -xzf helm-v3.17.2-linux-amd64.tar.gz --transform 's|linux-amd64/helm|helm|' -C /usr/local/bin
helm completion bash | sudo tee /etc/bash_completion.d/helm # https://helm.sh/zh/docs/helm/helm_completion_bash/

curl -LOJ https://github.com/kubernetes/dashboard/releases/download/kubernetes-dashboard-7.11.1/kubernetes-dashboard-7.11.1.tgz
helm show values kubernetes-dashboard-7.11.1.tgz
helm install kubernetes-dashboard kubernetes-dashboard-7.11.1.tgz --create-namespace --namespace kubernetes-dashboard --set kong.enabled=false,ingress.enabled=true
helm uninstall kubernetes-dashboard -n kubernetes-dashboard

kubectl -n kubernetes-dashboard create serviceaccount kubernetes-dashboard-admin # 新建 kubernetes-dashboard-admin 账户
kubectl create clusterrolebinding kubernetes-dashboard-admin --clusterrole=cluster-admin --serviceaccount=kubernetes-dashboard:kubernetes-dashboard-admin # 给 kubernetes-dashboard-admin 账户 cluster-admin 角色
kubectl -n kubernetes-dashboard create token kubernetes-dashboard-admin --duration=720h # 获取 kubernetes-dashboard-admin 账户 Token,有过期时间

# https://github.com/kubernetes/dashboard/blob/master/docs/user/accessing-dashboard/README.md
kubectl -n kubernetes-dashboard port-forward svc/kubernetes-dashboard-web 8000:8000 --address 0.0.0.0 # Now access Dashboard at: http://x.x.x.x:8000
# kubectl -n kubernetes-dashboard port-forward svc/kubernetes-dashboard-kong-proxy 8443:443 --address 0.0.0.0 # Now access Dashboard at: https://x.x.x.x:8443

可以创建不过期的 token

# 用绑定 ServiceAccount 的 Secret 创建 token(保存在 Secret 中)
cat << EOF | kubectl apply -f -
apiVersion: v1
kind: Secret
metadata:
  name: kubernetes-dashboard-admin-secret
  namespace: kubernetes-dashboard
  annotations:
    kubernetes.io/service-account.name: "kubernetes-dashboard-admin"
type: kubernetes.io/service-account-token  
EOF

kubectl apply -f - << EOF
apiVersion: v1
kind: Secret
metadata:
  name: kubernetes-dashboard-admin-secret
  namespace: kubernetes-dashboard
  annotations:
    kubernetes.io/service-account.name: "kubernetes-dashboard-admin"
type: kubernetes.io/service-account-token  
EOF

# 获取不过期 token
kubectl -n kubernetes-dashboard get secret kubernetes-dashboard-admin-secret -o jsonpath="{.data.token}" | base64 -d | xargs echo
kubectl -n kubernetes-dashboard get secret kubernetes-dashboard-admin-secret -o go-template="{{.data.token | base64decode}}" | xargs echo
kubectl -n kubernetes-dashboard describe secret kubernetes-dashboard-admin-secret

# 设置 kubernetes-dashboard 不过期,添加 --token-ttl=0
# kubectl -n kubernetes-dashboard edit deployment kubernetes-dashboard-web
View Code

其它设置

# kubectl -n kubernetes-dashboard edit service kubernetes-dashboard-kong-proxy # 修改 type: ClusterIP 为 type: NodePort 或 LoadBalancer
kubectl patch -n kubernetes-dashboard service kubernetes-dashboard-kong-proxy -p '{"spec": {"type": "LoadBalancer"}}'
kubectl get service -A # 查看 IP 和 Port,获取访问地址,这里为 https://<master-ip>:32547
# NAMESPACE              NAME                                   TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)                  AGE
# kubernetes-dashboard   kubernetes-dashboard-kong-proxy        NodePort    10.104.57.106    <none>        443:32547/TCP            154m

# https://segmentfault.com/a/1190000023130407
kubectl proxy --port=8001 --address='0.0.0.0' --accept-hosts='^.*'
# Now access Dashboard at: http://x.x.x.x:8001/api/v1/namespaces/kubernetes-dashboard/services/https:kubernetes-dashboard-kong-proxy:443/proxy

# 修改权限:新增角色绑定,修改原有绑定角色的权限
# 新增角色绑定
kubectl create clusterrolebinding kubernetes-dashboard-operator --clusterrole=cluster-admin --serviceaccount=kubernetes-dashboard:kubernetes-dashboard-kong
# 修改原有绑定角色的权限,先查看账户(ServiceAccount)绑定的角色(ClusterRoleBinding、RoleBinding)
kubectl get clusterrolebinding,rolebinding -A -o jsonpath='{range .items[?(@.subjects[].name=="kubernetes-dashboard-kong")]}{.roleRef.kind}:{.roleRef.name}{"\n"}{end}'
# 再修改,让角色权限和内置的 cluster-admin 角色权限一样
kubectl get ClusterRole cluster-admin -o yaml
kubectl edit ClusterRole kubernetes-dashboard

kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.7.0/aio/deploy/recommended.yaml # 安装旧版
View Code

Headlamp

https://headlamp.dev

kubectl apply -f https://raw.githubusercontent.com/kubernetes-sigs/headlamp/main/kubernetes-headlamp.yaml
kubectl -n kube-system create serviceaccount headlamp-admin
kubectl create clusterrolebinding headlamp-admin --serviceaccount=kube-system:headlamp-admin --clusterrole=cluster-admin
kubectl create token headlamp-admin -n kube-system

集成 metrics-server

用来监控 pod、node 资源情况(默认只有 cpu、memory 信息),并在 Kubernetes Dashboard 上显示,更多信息可用 Prometheus

kubectl apply -f https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml
# 添加 --kubelet-insecure-tls,不验证客户端证书,或者 serverTLSBootstrap: true,https://github.com/kubernetes-sigs/metrics-server/blob/master/README.md#requirements
# kubectl edit deployment $(kubectl get deployment -n kube-system | grep metrics-server | awk '{print $1}') -n kube-system
# kubectl patch deployment metrics-server -n kube-system --patch '{"spec":{"template":{"spec":{"containers":[{"name":"metrics-server","args":["--kubelet-insecure-tls"]}]}}}}'
kubectl patch deployment metrics-server -n kube-system --type='json' -p='[{"op": "add", "path": "/spec/template/spec/containers/0/args/-", "value": "--kubelet-insecure-tls"}]'

# 查看日志(镜像下载可能会失败)
kubectl -n kube-system describe pod $(kubectl get pods -n kube-system | grep metrics-server | awk '{print $1}')
# 安装好后查看
kubectl top nodes
kubectl top pods -A

其它 UI

https://github.com/derailed/k9s/releases & https://k8slens.dev

kubesphere:https://kubesphere.io/zh/docs/quick-start/minimal-kubesphere-on-k8s,若安装过程中有 pod 一直无法启动,可看看是否为 etcd 监控证书找不到

# 证书在下面路径
# --etcd-cafile=/etc/kubernetes/pki/etcd/ca.crt
# --etcd-certfile=/etc/kubernetes/pki/apiserver-etcd-client.crt 
# --etcd-keyfile=/etc/kubernetes/pki/apiserver-etcd-client.key
# 创建证书:
kubectl -n kubesphere-monitoring-system create secret generic kube-etcd-client-certs  --from-file=etcd-client-ca.crt=/etc/kubernetes/pki/etcd/ca.crt  --from-file=etcd-client.crt=/etc/kubernetes/pki/apiserver-etcd-client.crt  --from-file=etcd-client.key=/etc/kubernetes/pki/apiserver-etcd-client.key

# 创建后可以看到 kube-etcd-client-certs
ps -ef | grep kube-apiserver
View Code

kuboard:https://kuboard.cn

 

五、持久卷(PV、PVC),这里使用 NFS

安装 NFS 服务端(所有节点,因为需要执行 mount -t nfs)

sudo yum install -y nfs-utils
# 执行命令 vi /etc/exports,创建 exports 文件,文件内容如下(使用 no_root_squash 不安全):
sudo bash -c 'echo "/ifs/ *(rw,sync,no_wdelay,no_root_squash)" > /etc/exports'

# 创建共享目录,设置所属用户与组
sudo mkdir -p /ifs/kubernetes
sudo chown -R nfsnobody:nfsnobody /ifs/

# 启动 nfs 服务
sudo systemctl enable --now rpcbind
sudo systemctl enable --now nfs-server

# -a 全部挂载或者全部卸载,-r 重新挂载,-u 卸载某一个目录,-v 显示共享目录
sudo exportfs -arv
# 检查配置是否生效,会输出 /ifs <world>
sudo exportfs

安装 NFS 客户端(非必须),K8S 直连 NFS 服务端。

# 注意服务端 NFS 端口放行,否则客户端无法连接,安装客户端
sudo yum install -y nfs-utils

# 检查 NFS 服务器端是否有设置共享目录
showmount -e NFS服务端IP
# 输出结果如下所示
# Export list for 172.26.165.243:
# /nfs *

# 执行以下命令挂载 nfs 服务器上的共享目录到本机路径 ~/nfsmount
sudo mkdir ~/nfsmount
sudo mount -t nfs NFS服务端IP:/nfs/data ~/nfsmount
# 卸载
sudo umount ~/nfsmount/

# 写入一个测试文件
echo "hello nfs server" > ~/nfsmount/test.txt
# 在 NFS 服务器上查看,验证文件写入成功
cat /nfs/data/test.txt
View Code

创建 PV 测试

https://github.com/kubernetes/examples/blob/master/staging/volumes/nfs/nfs-pv.yaml

PV 没有 namespace 租户的概念,PVC 有,当需要在某个 namespace 下使用 PVC 时,需要指定该 PVC 所属 namespace

vim nfs-pv.yaml

apiVersion: v1
kind: PersistentVolume
metadata:
  name: nfs
spec:
  capacity:
    storage: 100Mi
  accessModes:
    - ReadWriteMany
  nfs:
    server: 10.74.2.71
    path: "/nfs/data"

kubectl create -f nfs-pv.yaml
View Code

创建 PVC 测试

https://github.com/kubernetes/examples/blob/master/staging/volumes/nfs/nfs-pvc.yaml

vim nfs-pvc.yaml

apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: nfs
  namespace: default
spec:
  accessModes:
    - ReadWriteMany # https://kubernetes.io/zh/docs/concepts/storage/persistent-volumes/#access-modes
  storageClassName: "" # 此处须显式设置空字符串,否则会被设置为默认的 StorageClass
  resources:
    requests:
      storage: 10Mi

kubectl create -f nfs-pvc.yaml
View Code

 

六、存储类,这里使用 NFS

默认不支持 NFS,需要插件:https://github.com/kubernetes-sigs/nfs-subdir-external-provisioner

运维人员创建 PV,开发操作 PVC。在大规模集群中可能会有很多 PV,如果这些 PV 都需要运维手动来处理,也是一件很繁琐的事情,所以就有了动态供给(Dynamic Provisioning)概念

上面创建的 PV 都是静态供给方式(Static Provisioning),而动态供给的关键就是 StorageClass,它的作用就是创建 PV 模板

创建的 StorageClass 里面需要定义 PV 属性,如存储类型、大小等,另外创建这种 PV 需要用到存储插件。最终效果是用户创建 PVC,里面指定存储类(StorageClass),如果符合我们定义的 StorageClass,则会为其自动创建 PV 并进行绑定

# 先创建授权,创建 PV 需要相关权限,命名空间默认为 default,若为其它名字,需要下载后修改再创建
kubectl apply -f https://raw.githubusercontent.com/kubernetes-sigs/nfs-subdir-external-provisioner/nfs-subdir-external-provisioner-4.0.17/deploy/rbac.yaml

# 配置 NFS
curl -O https://raw.githubusercontent.com/kubernetes-sigs/nfs-subdir-external-provisioner/nfs-subdir-external-provisioner-4.0.17/deploy/deployment.yaml
vim deployment.yaml

# 只列出修改部分,已注释
containers:
  - name: nfs-client-provisioner
    image: willdockerhub/nfs-subdir-external-provisioner:v4.0.2 # (kubebiz/nfs-subdir-external-provisioner:v4.0.2)可以下载就不用换
    volumeMounts:
      - name: nfs-client-root
        mountPath: /persistentvolumes
    env:
      - name: PROVISIONER_NAME
        value: k8s-sigs.io/nfs-subdir-external-provisioner # 供应者名字,可随意命名,但后面引用要一致
      - name: NFS_SERVER
        value: 10.74.2.71 # NFS 地址
      - name: NFS_PATH
        value: /ifs/kubernetes # NFS 路径
volumes:
  - name: nfs-client-root
    nfs:
      server: 10.74.2.71 # NFS 地址
      path: /ifs/kubernetes # NFS 路径

# 再创建文件夹(若没有)
sudo mkdir /nfs/kubernetes
kubectl apply -f deployment.yaml

# 最后创建 storage class,其中 provisioner 值为上面定义的供应者名字
kubectl apply -f https://raw.githubusercontent.com/kubernetes-sigs/nfs-subdir-external-provisioner/nfs-subdir-external-provisioner-4.0.17/deploy/class.yaml
# 查看
kubectl get sc
# 将 nfs-client 设置为默认,再查看 name 上会显示 default
kubectl patch storageclass nfs-client -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}'

 

七 、ingress 控制器

ingress 控制器(IngressClass) 这里使用 ingress-nginx,版本对应关系:https://github.com/kubernetes/ingress-nginx/blob/main/README.md#supported-versions-table

kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.12.1/deploy/static/provider/cloud/deploy.yaml

# 查看 ingress 控制器状态
kubectl get pods --namespace=ingress-nginx
# NAME                                        READY   STATUS      RESTARTS   AGE
# ingress-nginx-admission-create--1-jpb4z     0/1     Completed   0          24m
# ingress-nginx-admission-patch--1-jhzng      0/1     Completed   1          24m
# ingress-nginx-controller-5c9fd6c974-sbkmw   1/1     Running     0          24m

# 查看 ingress 控制器暴露端口
kubectl get svc -n ingress-nginx
# NAME                                 TYPE           CLUSTER-IP     EXTERNAL-IP   PORT(S)                      AGE
# ingress-nginx-controller             LoadBalancer   10.96.213.91   <pending>     80:58013/TCP,443:63536/TCP   27m
# ingress-nginx-controller-admission   ClusterIP      10.96.56.233   <none>        443/TCP                      27m

# 修改暴露端口
# 在 - --service-cluster-ip-range=10.96.0.0/12 下添加 - --service-node-port-range=1-65535
sudo vim /etc/kubernetes/manifests/kube-apiserver.yaml
# 重启 kubelet
sudo systemctl daemon-reload && sudo systemctl restart kubelet
# 修改 nodePort: 58013 为 nodePort: 80,nodePort: 63536 为 nodePort: 443
kubectl edit service ingress-nginx-controller -n ingress-nginx
kubectl get service -n ingress-nginx

部署 ingress 资源

kubectl apply -f - << EOF
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
  name: kubernetes-dashboard-kong-proxy-ingress
  namespace: kubernetes-dashboard
  annotations: # https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/annotations/#backend-protocol
    nginx.ingress.kubernetes.io/backend-protocol: HTTPS
spec:
  ingressClassName: nginx # 如果 ingressClassName 被省略,那么你应该定义一个默认的 Ingress 类,kubectl get ingressClass
  rules:
  - http:
      paths:
      - path: /
        pathType: Prefix
        backend:
          service:
            name: kubernetes-dashboard-kong-proxy
            port:
              number: 443
    host: d.x
  tls:
    - hosts:
        - d.x
      # secretName: kubernetes-dashboard-certs # https://kubernetes.github.io/ingress-nginx/user-guide/tls/,使用默认 SSL 证书,强制 HTTPS 重定向
EOF
kubectl apply -f - << EOF
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
  name: kubernetes-dashboard-web-ingress
  namespace: kubernetes-dashboard
spec:
  ingressClassName: nginx
  rules:
  - http:
      paths:
      - path: /
        pathType: Prefix
        backend:
          service:
            name: kubernetes-dashboard-web
            port:
              number: 8000
    host: d.x
EOF

# 查看 nginx 配置文件
kubectl -n ingress-nginx exec $(kubectl get pod -A | grep ingress-nginx-controller | awk '{print $2}') -it -- cat /etc/nginx/nginx.conf
# 查看 ingress 资源
kubectl get ingress -A -o wide
kubectl -n kubernetes-dashboard describe ingress kubernetes-dashboard-web-ingress

Hosts 文件访问方式

上面 Ingress 资源的 host 为 d.x,添加<集群任意节点IP d.x>到本机 hosts 文件,然后访问 http://d.x 或 https://d.x

或直接使用 curl 测试:curl -vk -D- http://192.168.124.26 -H 'Host: d.x'

自定义 DNS方式访问

将集群任意节点 IP 加入到本机 DNS 设置,可用 nslookup d.x 192.168.124.26 测试

配置 kube-dns(CoreDNS)

kubectl edit -n kube-system configmap coredns
data:
  Corefile: |
    .:53 {
        ...
        ready
        template IN A x {
           match .*\.x
           answer "{{ .Name }} 60 IN A 192.168.124.26"
           answer "{{ .Name }} 60 IN A 192.168.124.27"
           answer "{{ .Name }} 60 IN A 192.168.124.28"
           fallthrough # 未匹配时继续执行后续插件
        }
        kubernetes cluster.local in-addr.arpa ip6.arpa {
        ...
kubectl rollout restart deployment coredns -n kube-system

直接暴露 kube-dns

# 增加 nodePort: 53,修改 type:ClusterIP 为 type:LoadBalancer
kubectl edit service kube-dns -n kube-system

通过 ingress-nginx 暴露 kube-dns

ingress-nginx 默认只转发 HTTP/HTTPS,转发 TCP/UDP 需要配置,以暴露 UDP 53 端口为例(和 Ingress 资源无关,是直接通过 IP:53 访问)

# 添加两行参数 - --tcp-services-configmap=$(POD_NAMESPACE)/tcp-services 和 - --udp-services-configmap=$(POD_NAMESPACE)/udp-services,用于读取 tcp-services 和 udp-services
# kubectl -n ingress-nginx edit deployment ingress-nginx-controller
kubectl -n ingress-nginx patch deployment ingress-nginx-controller --type=json -p='[
  {"op": "add", "path": "/spec/template/spec/containers/0/args/-", "value": "--tcp-services-configmap=$(POD_NAMESPACE)/tcp-services"},
  {"op": "add", "path": "/spec/template/spec/containers/0/args/-", "value": "--udp-services-configmap=$(POD_NAMESPACE)/udp-services"}
]'

# 添加端口暴露
kubectl edit service ingress-nginx-controller -n ingress-nginx
spec:
  ports:
  - appProtocol: tcp
    name: tcp
    nodePort: 5000
    port: 5000
    protocol: TCP
    targetPort: 5000
  - appProtocol: dns
    name: dns
    nodePort: 53
    port: 53
    protocol: UDP
    targetPort: 53

部署 ConfigMap,data 部分格式为:"外部端口": "<namespace/service name>:<service port>:[PROXY]:[PROXY]"

kubectl apply -f - << EOF
apiVersion: v1
kind: ConfigMap
metadata:
  name: tcp-services
  namespace: ingress-nginx
data:
  5000: "devops/docker-registry-service:5000"
---
apiVersion: v1
kind: ConfigMap
metadata:
  name: udp-services
  namespace: ingress-nginx
data:
  53: "kube-system/kube-dns:53"
EOF
# 查看端口
kubectl get svc -n ingress-nginx

 

八、Gateway API

入口(Ingress)目前已停止更新。新的功能正在集成至网关 API 中。这里安装 nginx-gateway-fabric 作为 GatewayClass。https://gateway-api.kubernetes.ac.cn/guides/

# kubectl kustomize "https://github.com/nginx/nginx-gateway-fabric/config/crd/gateway-api/standard?ref=v1.6.2" | kubectl apply -f -
# 查看 Gateway API resources 版本:https://github.com/nginx/nginx-gateway-fabric/blob/v1.6.2/config/crd/gateway-api/standard/kustomization.yaml
# apiVersion: kustomize.config.k8s.io/v1beta1
# kind: Kustomization
# resources:
# - https://github.com/kubernetes-sigs/gateway-api/config/crd?timeout=120&ref=v1.2.1
kubectl apply -f https://github.com/kubernetes-sigs/gateway-api/releases/download/v1.2.1/standard-install.yaml # 部署对应版本的 Gateway API resources(Gateway API CRD)
kubectl apply -f https://raw.githubusercontent.com/nginx/nginx-gateway-fabric/v1.6.2/deploy/crds.yaml
kubectl apply -f https://raw.githubusercontent.com/nginx/nginx-gateway-fabric/v1.6.2/deploy/default/deploy.yaml

部署 Gateway + HTTPRoute(类似 Ingress 资源):https://docs.nginx.com/nginx-gateway-fabric/how-to/traffic-management/https-termination/

kubectl apply -f - << EOF
apiVersion: gateway.networking.k8s.io/v1
kind: Gateway
metadata:
  name: kubernetes-dashboard-web-gateway
  namespace: nginx-gateway # 与网关(nginx-gateway-fabric)同命名空间
spec:
  gatewayClassName: nginx # kubectl get gatewayclass
  listeners:
  - name: kubernetes-dashboard-web-gateway-http
    protocol: HTTP
    port: 80
    hostname: d.x
    allowedRoutes:
      namespaces:
        from: All
---
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
  name: kubernetes-dashboard-web-httproute
  namespace: kubernetes-dashboard # 与服务(kubernetes-dashboard-kong-proxy)同命名空间
spec:
  parentRefs:
  - name: kubernetes-dashboard-web-gateway
    namespace: nginx-gateway
    #sectionName: kubernetes-dashboard-web-gateway-http
  #hostnames:
  #- d.x
  rules:
  - matches:
    - path:
        type: PathPrefix
        value: /
    backendRefs:
    - name: kubernetes-dashboard-web
      port: 8000
EOF
kubectl apply -f - << EOF
apiVersion: gateway.networking.k8s.io/v1
kind: Gateway
metadata:
  name: kube-dns-gateway
  namespace: nginx-gateway
spec:
  gatewayClassName: nginx
  listeners:
  - name: kube-dns-gateway-udp
    protocol: UDP
    port: 53
    allowedRoutes:
      kinds:
      - kind: UDPRoute
---
apiVersion: gateway.networking.k8s.io/v1
kind: UDPRoute
metadata:
  name: kube-system-udproute
  namespace: kube-system
spec:
  parentRefs:
  - name: kube-dns-gateway
    namespace: nginx-gateway
  rules:
  - backendRefs:
    - name: kube-dns
      port: 53
EOF

查看

kubectl get svc nginx-gateway -n nginx-gateway
kubectl get gatewayclass,gateway,httproute -A
kubectl exec -it -n nginx-gateway $(kubectl get pod -n nginx-gateway | awk '{print $1}') -c nginx -- nginx -T
curl --resolve d.x:80:192.168.124.26 https://d.x:80/
kubectl describe gateway -n nginx-gateway kubernetes-dashboard-web-gateway
kubectl describe httproute -n kubernetes-dashboard kubernetes-dashboard-web-httproute

 

九、部署应用

https://www.cnblogs.com/jhxxb/p/15298810.html

https://kubernetes.io/zh-cn/docs/tasks/extend-kubectl/kubectl-plugins/

 

十、重置(卸载)

sudo kubeadm reset --force
sudo rm -rf /etc/cni/net.d/
sudo rm -rf ~/.kube/config
sudo systemctl restart containerd kubelet
docker container stop $(docker container ls -a -q)
docker system prune --all --force --volumes

 


https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands

https://www.cnblogs.com/sparkdev/p/9177283.html

https://www.cnblogs.com/rexcheny/p/10925464.html

posted @ 2021-02-01 11:13  江湖小小白  阅读(3267)  评论(0)    收藏  举报