Fork me on GitHub

1.1 集群列表

节点名称 节点ip 节点角色 备注
Kubespray 10.211.55.11 Ansible-manager
K8s-master 10.211.55.8 Master
k8s-node1 10.211.55.9 Nodeworker
K8s-node2 10.211.55.10 Node worker

1.2 kubespray节点python3准备

本次需要使用python3.10

1.2.1 安装python3.10/pip3

apt -y install  python3 python3-pip

1.3 kubespray源文件获取/安装ansible

git clone https://github.com/kubernetes-sigs/kubespray.git
cd kubespray/
pip3 install -r requirements.txt

1.4 创建主机清单


ls inventory/

cp -rfp inventory/sample inventory/mycluster 

declare -a IPS=(192.168.255.102 192.168.255.103)

echo ${IPS[*]}

CONFIG_FILE=inventory/mycluster/hosts.yaml python3 contrib/inventory_builder/inventory.py ${IPS[@]}

CONFIG_FILE=inventory/zy/hosts.yaml python3 contrib/inventory_builder/inventory.py ${IPS[@]}
# 如果报错缺少模块ruamel.yaml,则需要pip3 install ruamel.yaml
# 对inventory/mycluster/host.yaml进行修改
cd inventory/mycluster && vim hosts.yaml

all:
  hosts:
    node1:
      ansible_host: 10.211.55.8
      ip: 10.211.55.8
      access_ip: 10.211.55.8
    node2:
      ansible_host: 10.211.55.9
      ip: 10.211.55.9
      access_ip: 10.211.55.9
    node3:
      ansible_host: 10.211.55.10
      ip: 10.211.55.10
      access_ip: 10.211.55.10
  children:
    kube_control_plane:           # 将控制平面修改为node1,根据上述集群列表安排修改
      hosts:
        node1:
        node2:										# 删除此行
    kube_node:										# 将工作节点修改为node2和node3,根据同上
      hosts:
        node1:										# 删除此行
        node2:
        node3:
    etcd:
      hosts:
        node1:
        node2:
        node3:
    k8s_cluster:
      children:
        kube_control_plane:
        kube_node:
    calico_rr:
      hosts: {}            

1.5 准备k8s集群配置文件

## 本章节可以不修改,可以使用默认配置
cat inventory/mycluster/group_vars/k8s_cluster/k8s-cluster.yml
vim inventory/mycluster/group_vars/k8s_cluster/k8s-cluster.yml
# 76/81行可以修改pod和service网段
# 125行选择ipvs或者iptables
# 129行将false修改为true,开启LB
# 229行可以修改容器运行时,默认是containerd

1.6 准备k8s集群插件文件

## 本章节也可以不修改,可以使用默认配置

cd inventory/mycluster/group_vars/k8s_cluster/ && vim addons.yml
# 第4行可以打开改成true,打开dashboard
dashboard_enabled: true
# 第7行可以打开改成true,安装helm
helm_enabled: true
# 第16行可以打开metrics接口,以prometheus获取k8s集群的监控指标
metrics_server_enabled: true
metrics_server_container_port: 10250
metrics_server_kubelet_insecure_tls: true
metrics_server_metric_resolution: 15s 
metrics_server_kubelet_preferred_address_types: "InternalIP,ExternalIP,Hostname"
metrics_server_host_network: false
metrics_server_replicas: 1
# 第100行开启ingress-nginx
ngress_nginx_enabled: true
# 部署上云,可以打开alb,第131行改为true
ingress_alb_enabled: true
# 开启负载均衡器,第174行(暂时不开启,后期手动安装)
metallb_enabled: false
# 第238行,开启cd(暂时不开启)
argocd_enabled: false

1.7 kubespray主机对三台集群主机进行免密登陆操作

# 生成密钥对
ssh-keygen
# 免密登陆
ssh-copy-id root@192.168.255.102
ssh-copy-id root@192.168.255.103
ssh-copy-id root@10.211.55.10

1.8 在k8s集群节点添加sysops用户执行授权

root@kubespray:~# mkdir -p /mnt/inventory && cat >> /mnt/inventory/hosts <<EOF
> 10.211.55.8
> 10.211.55.9
> 10.211.55.10
> EOF

echo "sysops ALL=(ALL) NOPASSWD:ALL" >> /mnt/inventory/sysops

ansible all -i /mnt/inventory/hosts -m copy -a "src=/mnt/inventory/sysops dest=/etc/sudoers.d/sysops"

1.9 检查集群所有节点防火墙是否关闭

ansible all -i /mnt/inventory/hosts -m shell -a "ufw disable && ufw status"

1.10 开启k8s集群所有节点ip_forward

ansible all -i /mnt/inventory/hosts -m shell -a "echo 'net.ipv4.ip_forward=1' >> /etc/sysctl.conf && sysctl -p /etc/sysctl.conf"

ansible all  -i /mnt/inventory/hosts -m shell -a "sysctl -a | grep ip_forward"

1.11 禁用swap 分区

ansible all -i /mnt/inventory/hosts -m shell -a "sed -i '/swap/s/^/#/' /etc/fstab && swapoff -a "

集群部署及其验证

cd kubespray/ && ansible-playbook -i inventory/mycluster/hosts.yaml --become --become-user=root cluster.yml

部署完成后查看集群状态

查看各个名称空间下的pod状态

2.1 kubectl命令tab自动补全

apt -y install bash-completion
echo 'source <(kubectl completion bash)' >>~/.bashrc
source ~/.bashrc

posted on 2025-07-25 15:29  anyux  阅读(31)  评论(0)    收藏  举报