云原生k8s01 kubeasz搭建k8s, 搭建dashboard, kuboard

1.2:服务器准备:

操作系统都是ubuntu24.04

3个master: master节点4C8G可管理几百个pod; 8C16G可管理上千个pod; 16C24G可管理几千个

类型服务器IP备注
K8S Master1 10.0.0.101 保证主节点时间同步
K8S Master2 10.0.0.102  
K8S Master3 10.0.0.103  
Node节点1 10.0.0.104  
Node节点2 10.0.0.105  
Harbor 10.0.0.106 单个harbor
Haproxy 10.0.0.107

2个Haproxy通过keepalived做高可用, 作为k8s apiserver lb

vip用10.0.0.188:6443, 作为k8s的apiserver lb的入口地址

做为k8s主和从节点的部署节点

提供nfs服务

1.4.3:安装运行时并部署镜像仓库harbor:

root@harbor:~# mkdir /apps
root@harbor:~# cd /apps/
#传入harbor包  harbor-offline-installer-v2.14.1.tgz
#解压
root@harbor:/apps# tar xvf harbor-offline-installer-v2.14.1.tgz

#安装docker,可以在线安装,这里使用docker安装脚本,传入runtime-docker_24.0.9-containerd_1.7.20-binary-install.tar.gz
root@harbor:/apps# mv runtime-docker_24.0.9-containerd_1.7.20-binary-install.tar.gz /usr/local/src/
root@harbor:/apps# cd /usr/local/src/
root@harbor:/usr/local/src# tar xvf runtime-docker_24.0.9-containerd_1.7.20-binary-install.tar.gz
root@harbor:/usr/local/src# bash runtime-install.sh docker

root@harbor:/usr/local/src# cd /apps/harbor/
#存放现有https证书 (如要自己申请证书可用如下2个网站)
#https://certificate-console.jdcloud.com/jsecssl/create?fastConfig=false&certBrand=TrustAsia&certType=domainType&protectionType=DV-1  #3个月免费证书
#https://ssl.spug.cc/        #3个月免费证书,可买商业的比较便宜
root@harbor:/apps/harbor# mkdir certs
root@harbor:/apps/harbor# cd certs/
#传入harbor.myarchitect.online_nginx.zip
root@harbor:/apps/harbor/certs# unzip harbor.myarchitect.online_nginx.zip 
Archive:  harbor.myarchitect.online_nginx.zip
 extracting: harbor.myarchitect.online.pem  
 extracting: harbor.myarchitect.online.key
root@harbor:/apps/harbor/certs# ll
total 24
drwxr-xr-x 2 root root  123 Jan 16 00:05 ./
drwxr-xr-x 3 root root  154 Jan 16 00:02 ../
-rw------- 1 root root 1675 Oct 13 16:48 harbor.myarchitect.online.key #私钥
-rw------- 1 root root 6955 Oct 13 16:48 harbor.myarchitect.online.pem #公钥
-rw-r--r-- 1 root root 8920 Jan 12 20:41 harbor.myarchitect.online_nginx.zip

root@harbor:/apps/harbor# cp harbor.yml.tmpl harbor.yml
root@harbor:/apps/harbor# vim harbor.yml
hostname: harbor.myarchitect.online        #使用这个域名,有证书
...
https:
  # https port for harbor, default is 443
  port: 443
  # The path of cert and key files for nginx
  certificate: /apps/harbor/certs/harbor.myarchitect.online.pem #修改公钥地址
  private_key: /apps/harbor/certs/harbor.myarchitect.online.key #修改私钥地址
...
harbor_admin_password: 123456    #修改密码
...
data_volume: /data    #数据存储位置,建议挂一个存储放里面

#第一次安装要./install.sh
root@harbor:/apps/harbor# ./install.sh
#查看启动的容器
root@harbor:/apps/harbor# docker-compose ps

#测试连接
#在宿主机配置  C:\Windows\System32\drivers\etc\hosts
10.0.0.106 harbor.myarchitect.online
#浏览器输入测试      admin/123456
https://harbor.myarchitect.online/

#新建myserver项目,设为公开(下载不需要认证,上传需要认证),一般都选公开,否则k8s拉取pod还需要secret
#新建baseimages项目,放基础镜像

1.4.2:高可用负载均衡:

#安装keepalived和haproxy
root@k8s-ha1:~# apt update
root@k8s-ha1:~# apt install keepalived haproxy

#修改配置
root@k8s-ha1:~# cd /etc/keepalived/
root@k8s-ha1:/etc/keepalived# mv keepalived.conf.sample keepalived.conf
#配置vip时先ping下看看这个vip有没有人用
root@k8s-ha1:/etc/keepalived# vim keepalived.conf
global_defs {
   notification_email {
     acassen@firewall.loc
     failover@firewall.loc
     sysadmin@firewall.loc
   }
   notification_email_from Alexandre.Cassen@firewall.loc
   smtp_server 192.168.200.1
   smtp_connect_timeout 30
   router_id LVS_DEVEL
   vrrp_skip_check_adv_addr
   #vrrp_strict        #这个是禁ping的,注释掉
   vrrp_garp_interval 0
   vrrp_gna_interval 0
}

vrrp_instance VI_1 {
    state MASTER
    interface eth0
    virtual_router_id 51
    priority 100
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 1111
    }
    virtual_ipaddress {
        10.0.0.188 dev eth0 label eth0:0    #vip地址
    }

root@k8s-ha1:/etc/keepalived# systemctl restart keepalived.service
root@k8s-ha1:/etc/keepalived# systemctl enable keepalived.service

#编辑负载均衡器
root@k8s-ha1:/etc/keepalived# vim /etc/haproxy/haproxy.cfg
...    #下面追加
listen k8s-6443
  bind 10.0.0.188:6443
  mode tcp    #tcp就不用配置https证书之类的
  server k8s-master1 10.0.0.101:6443 check inter 3s fall 3 rise 5
  server k8s-master2 10.0.0.102:6443 check inter 3s fall 3 rise 5
  server k8s-master3 10.0.0.103:6443 check inter 3s fall 3 rise 5

#提示没有后端服务器没事,本来也没有
root@k8s-ha1:/etc/keepalived# systemctl restart haproxy.service
Broadcast message from systemd-journald@k8s-ha1 (Fri 2026-01-16 22:59:49 CST):
haproxy[23852]: proxy k8s-6443 has no server available!
Broadcast message from systemd-journald@k8s-ha1 (Fri 2026-01-16 22:59:49 CST):
haproxy[23852]: proxy k8s-6443 has no server available!

root@k8s-ha1:/etc/keepalived# systemctl enable haproxy.service

1.5:部署节点初始化:

部署项目为kubeasz,https://github.com/easzlab/kubeasz,部署节点需要使用docker下载部署kubernetes过程中 的各种镜像及二进制文件等资源,需要安装docker环境,另外此节点为docker环境但是后期也可能会向harbor上传下 载镜像,因此也需要登录habror并上传下载镜像。

集群架构图

 部署节点为10.0.0.107,主要作用如下:

1、从互联网下载安装资源
2、可选将部分镜像修改tag后上传到公司内部镜像仓库服务器
3、对master进行初始化
4、对node进行初始化
5、后期集群维护
    添加及删除master节点
    添加就删除node节点
    etcd数据备份及恢复

部署节点操作

#安装docker(可以选其他安装方法)
#传入runtime-docker_24.0.9-containerd_1.7.20-binary-install.tar.gz
root@k8s-ha1:~# cd /usr/local/src/
root@k8s-ha1:/usr/local/src# tar xvf runtime-docker_24.0.9-containerd_1.7.20-binary-install.tar.gz
root@k8s-ha1:/usr/local/src# bash runtime-install.sh docker
#docker可以换源,参考如下:
# vim /etc/docker/daemon.json
{
    "registry-mirrors": [
        "https://docker.1panel.live",
        "https://hub.rat.dev",
        "https://docker.actima.top"
    ],
    ...
# systemctl  restart  docker

#如果还是下载镜像不了,配置代理
#vim /lib/systemd/system/docker.service
[Service]
Environment="HTTPS_PROXY=http://10.0.0.1:7890"
Environment="NO_PROXY=127.0.0.0/8,localhost,10.0.0.0/21,10.100.0.0/16,10.200.0.0/16,easzlab.io.local,*.cluster.local,harbor.myarchitect.online"
Environment="HTTP_PROXY=http://10.0.0.1:7890"

#重载配置
systemctl daemon-reload
systemctl restart docker


#测试镜像下载和上传
root@k8s-deploy:~# docker pull registry.cn-hangzhou.aliyuncs.com/myhubregistry/rockylinux:10.0.20250606
root@k8s-deploy:~# docker tag registry.cn-hangzhou.aliyuncs.com/myhubregistry/rockylinux:10.0.20250606 harbor.myarchitect.online/baseimages/rockylinux:10.0.20250606

#在该机器上改host做域名解析
root@k8s-deploy:~# vim /etc/hosts
10.0.0.106 harbor.myarchitect.online
#上传镜像需要登录,下载镜像不需要
root@k8s-deploy:~# docker login harbor.myarchitect.online
Username: admin
Password: 
WARNING! Your password will be stored unencrypted in /root/.docker/config.json.
Configure a credential helper to remove this warning. See
https://docs.docker.com/engine/reference/commandline/login/#credentials-store

Login Succeeded
#上传镜像
root@k8s-deploy:~# docker push harbor.myarchitect.online/baseimages/rockylinux:10.0.20250606
#harbor.myarchitect.online网页端可以看到对应镜像了

root@k8s-ha1:/usr/local/src# apt install ansible
#生成密钥
root@k8s-deploy:~# ssh-keygen -t rsa-sha2-512 -b 4096
#安装sshpass命令用于同步公钥到各k8s服务器
root@k8s-ha1:/usr/local/src# apt install sshpass

#把密钥分发过去
root@k8s-ha1:/usr/local/src# cd
root@k8s-ha1:~# vim key-ssh.sh
#!/bin/bash
#目标主机列表
IP="
10.0.0.101
10.0.0.102
10.0.0.103
10.0.0.104
10.0.0.105
10.0.0.106
"
REMOTE_PORT="22"
REMOTE_USER="root"
REMOTE_PASS="admin123"

for REMOTE_HOST in ${IP};do
  REMOTE_CMD="echo ${REMOTE_HOST} is successfully!"
  #添加目标远程主机的公钥
  ssh-keyscan -p "${REMOTE_PORT}" "${REMOTE_HOST}" >> ~/.ssh/known_hosts
  
  #通过sshpass配置免秘钥登录、并创建python3软连接
  sshpass -p "${REMOTE_PASS}" ssh-copy-id "${REMOTE_USER}@${REMOTE_HOST}"
  ssh ${REMOTE_HOST} ln -sv /usr/bin/python3 /usr/bin/python
  echo ${REMOTE_HOST} 免秘钥配置完成!
done

#执行脚本同步:
root@k8s-ha1:~# bash key-scp.sh

1.6.2:下载kubeasz项目及组件:

#部署节点操作
root@k8s-ha1:~# apt install git
#设置变量,表示克隆哪个版本(具体版本在github上看)
root@k8s-ha1:~# export release=3.6.8
#下载安装脚本,这是个shell脚本,可以微调里面的版本信息(如果没有docker会自动安装docker,不如自己装好)
root@k8s-ha1:~# wget https://github.com/easzlab/kubeasz/releases/download/${release}/ezdown
root@k8s-ha1:~# chmod a+x ezdown

#修改版本和删掉下载docker在清华源wget的命令参数
root@k8s-ha1:~# vim ezdown
K8S_BIN_VER=v1.34.2    #根据harbor的tag信息,这里修改个版本
...
163 function download_docker() {
...
170   if [[ -f "$BASE/down/docker-${DOCKER_VER}.tgz" ]];then
171     logger warn "docker binaries already existed"
172   else
173     logger info "downloading docker binaries, arch:$ARCH, version:$DOCKER_VER"
174     if [[ -e /usr/bin/wget ]];then
#下面删掉wget的命令参数
175       wget "$DOCKER_URL" || { logger error "downloading docker failed"; exit 1; }

#此步骤需要能从docker官方镜像仓库下载镜像、并需要几分钟下载时间(docker开代理否则太慢)
root@k8s-deploy:~# ./ezdown -D 
#会本地起个register为 easzlab.io.local,会把本地镜像传到里面,做本地镜像分发 (docker的NO_PROXY代理要有*.cluster.local)

#以后如果其他机器要部署,直接把下载的文件拷贝过去,改下配置文件就行,不用重新拉镜像
#下载项目所在地址
root@k8s-ha1:~# cd /etc/kubeasz
#查看ezctl命令(各种k8s管理命令,还能更新证书,管理kubeconfig文件)
root@k8s-ha1:/etc/kubeasz# ./ezctl --help

1.6.3:生成并自定义hosts文件:

#new一个集群跟集群名称,会拷贝模板文件(example文件夹中)生成初始的配置(哪些主机,什么角色,要改)
root@k8s-ha1:/etc/kubeasz# ./ezctl new k8s-cluster1
#进入集群对应配置文件目录
root@k8s-ha1:/etc/kubeasz# cd clusters/k8s-cluster1/
root@k8s-ha1:/etc/kubeasz/clusters/k8s-cluster1# ls
config.yml  hosts

#修改哪些主机,部署哪些角色
root@k8s-ha1:/etc/kubeasz/clusters/k8s-cluster1# vim hosts
#先批量替换,执行     :%s/192.168.1/10.0.0/g
[etcd]
10.0.0.101
10.0.0.102
10.0.0.103

[kube_master] #节点名称可用ip地址;如用master节点名称,master如解析不了,执行exec进不了节点(要写host)
10.0.0.101 k8s_nodename='10.0.0.101'    #这里先写2个,后面演示扩容
10.0.0.102 k8s_nodename='10.0.0.102'
#10.0.0.103 k8s_nodename='10.0.0.103'

#会在从节点上安装二进制文件,在/etc/kubeasz/bin/路径下
[kube_node] #k8s_nodename是kubectl get node的显示名称
10.0.0.104 k8s_nodename='10.0.0.104'
10.0.0.105 k8s_nodename='10.0.0.105'
...
[harbor]    #harbor不用他装,不用管
[ex_lb]        #不用他部署,也不用改
[chrony]    #不用动
...
[all:vars]    #和apiserver相关的配置,环境变量传给apiserver的
#K8S里的SVC/POD地址千万不要和其他IDC/其他环境的地址冲突(否则打通通信,写路由不好写)
SERVICE_CIDR="10.100.0.0/16"    #service的地址
CLUSTER_CIDR="10.200.0.0/16"    #pod的地址,尽量要足够大(不够大要扩就麻烦了)
NODE_PORT_RANGE="30000-32767"    #service的nodeport范围
CLUSTER_DNS_DOMAIN="cluster.local"    #service的域名后缀
bin_dir="/usr/local/bin"        #往master,node上部署的二进制要拷到哪些目录下
base_dir="/etc/kubeasz"        #部署目录,不动
cluster_dir="{{ base_dir }}/clusters/k8s-cluster1"    #当前集群配置文件,不动
ca_dir="/etc/kubernetes/ssl"    #证书目录,不动
k8s_nodename=''        #默认节点名称,没有加这个名称就加下默认的,不动
ansible_python_interpreter=/usr/bin/python3    #python默认路径,看各个节点上是不是这个路径

1.6.3.2:编辑cluster config.yml文件:

#类似k8s部署时的剧本
root@k8s-ha1:/etc/kubeasz/clusters/k8s-cluster1# vim config.yml
INSTALL_SOURCE: "online"    #能上网就选online,如果offline,要把所有的包都传到本地
OS_HARDEN: false    #可选进行系统安全加固,一般不做
CA_EXPIRY: "876000h"    #ca证书过期时间,小时,这里是100年
CERT_EXPIRY: "438000h"    #它会签发cert文件,过期时间,这里是50年
CHANGE_CA: false    #强制重新创建ca证书和签发cert,不动。自己给自己创建证书和签发
# kubeconfig 配置参数(下面2个参数)    会自己生成kubeconfig文件
CLUSTER_NAME: "cluster1"    #不动
CONTEXT_NAME: "context-{{ CLUSTER_NAME }}"    #不动
#自动获取node名称,不用动(在上面配置文件hosts中获取的)
K8S_NODENAME: "{%- if k8s_nodename != '' -%} \
                    {{ k8s_nodename|replace('_', '-')|lower }} \
               {%- else -%} \
                    k8s-{{ inventory_hostname|replace('.', '-') }} \
               {%- endif -%}"
#打开后会使用nodename去设置主机node名称,这里改false
ENABLE_SETTING_HOSTNAME: false
############################
# role:etcd
############################
# 设置不同的wal目录,可以避免磁盘io竞争,提高性能
ETCD_DATA_DIR: "/var/lib/etcd"    #etcd数据目录,生成环境放单独挂的固态盘,性能吞吐要快,整个etcd数据也就几个g(数据量很大情况),平时几百兆
ETCD_WAL_DIR: ""
INSECURE_REG:    #添加信任的私有仓库
  - "http://easzlab.io.local:5000"
  - "https://reg.yourcompany.com"
# 所有pod的基础容器镜像pause去初始化运行环境,这里传到自己私有仓库,改私有仓库地址
SANDBOX_IMAGE: "harbor.myarchitect.online/baseimages/pause:3.10"
****************下面为k8s-ha1机器对应操作****************
#打镜像传到自己私有仓库
root@k8s-ha1:~#docker tag easzlab.io.local:5000/easzlab/pause:3.10 harbor.myarchitect.online/baseimages/pause:3.10
root@k8s-ha1:~# docker push harbor.myarchitect.online/baseimages/pause:3.10
*******************************************************
# [containerd]容器持久化存储目录
CONTAINERD_STORAGE_DIR: "/var/lib/containerd"
# [docker]开启Restful API    (是没有认证的,别人可以通过端口控制docker,千万别开)
DOCKER_ENABLE_REMOTE_API: false
############################
# role:kube-master
############################
# k8s 集群 master 节点证书配置,可以添加多个ip和域名(比如增加公网ip和域名)
MASTER_CERT_HOSTS:    #会生成证书,对哪些ip哪些域名有效,一定要把后期可能用到的域名ip都加上
  - "10.0.0.188"
  - "api.k8s.com"
  #- "www.test.com"

# node 节点上 pod 网段掩码长度(决定每个节点最多能分配的pod ip地址)
# 如果flannel 使用 --kube-subnet-mgr 参数,那么它将读取该设置为每个节点分配pod网段
# https://github.com/coreos/flannel/issues/847
NODE_CIDR_LEN: 24    #每个节点分配小的子网,他的长度是多少,24相当于200多个地址

# 是否启用集群审计功能(重要,记录谁什么时候调用master做什么操作)
ENABLE_CLUSTER_AUDIT: true

# 配置为kube组件(kubelet,kube-proxy,dockerd等)预留的资源量(会给予百分比预留cpu内存)
# 数值设置详见templates/kubelet-config.yaml.j2
KUBE_RESERVED_ENABLED: "no"

#网络组件单独配置,用flannel就配flannel,用calico就配calico
# ------------------------------------------- flannel  不用flannel就忽略
FLANNEL_BACKEND: "vxlan"
DIRECT_ROUTING: false    #如开启,node在同一个子网用直接路由,不封装,性能强
# [flannel]
flannel_ver: "v0.27.3"

############################
# role:cluster-addon    集群组件
############################
# coredns 自动安装
dns_install: "no"    #自己安装,不用他装,体验下自己怎么装
corednsVer: "1.12.4"
ENABLE_LOCAL_DNS_CACHE: false    #是否开启dns缓存(开启会在每个节点起个localdns做本地缓存,先找localdns再找coredns。但是coredns自身也有缓存)
dnsNodeCacheVer: "1.26.4"
# 设置 local dns cache 地址
LOCAL_DNS_CACHE: "169.254.20.10"    #开启dns缓存时,localdns的地址

# metric server 自动安装
metricsserver_install: "no"    #不用装
metricsVer: "v0.8.0"

############################
# role:harbor
############################
HARBOR_SELF_SIGNED_CERT: false    #harbor自签发证书,不不用他装,也不需要了


#配置文件都好了,可以反复部署k8s,如果部署错了,铲掉重新部署

1.6.4:部署k8s集群:

通过ansible脚本初始化环境及部署k8s 高可用集群

1.6.4.1:环境初始化

root@k8s-ha1:~# cd /etc/kubeasz/
#查看命令说明
root@k8s-ha1:/etc/kubeasz# ./ezctl --help
#参数集群名,步骤号   01表示环境初始化(会坚持节点是否通,然后签发证书)
root@k8s-ha1:/etc/kubeasz# ./ezctl setup k8s-cluster1 01
#如果要修改步骤里面的操作,可以修改下方文件,如01.prepare.yml
#root@k8s-ha1:~# ls /etc/kubeasz/playbooks/

1.6.4.2:部署etcd集群:

#部署etcd集群
root@k8s-ha1:/etc/kubeasz# ./ezctl setup k8s-cluster1 02

#可以进到一个etcd服务器上看etcd进程在不在   10.0.0.101上看
root@ubuntu101:~# ps -ef|grep etcd

#各etcd服务器验证etcd服务:检查etcd心跳信息,etcd默认就支持
root@ubuntu101:~# export NODE_IPS="10.0.0.101 10.0.0.102 10.0.0.103"
#看到3个successfully就没问题
root@ubuntu101:~# for ip in ${NODE_IPS}; do /usr/local/bin/etcdctl --endpoints=https://${ip}:2379 --cacert=/etc/kubernetes/ssl/ca.pem --cert=/etc/kubernetes/ssl/etcd.pem --key=/etc/kubernetes/ssl/etcd-key.pem endpoint health; done
https://10.0.0.101:2379 is healthy: successfully committed proposal: took = 13.195276ms
https://10.0.0.102:2379 is healthy: successfully committed proposal: took = 14.404572ms
https://10.0.0.103:2379 is healthy: successfully committed proposal: took = 12.877453ms

注:以上返回信息表示etcd集群运行正常,否则异常!

1.6.4.3:部署容器运行时containerd:

由证书签发机构签发的证书不需要执行分发步骤,证书可被信任。

# 验证基础容器镜像
root@k8s-ha1:/etc/kubeasz# grep SANDBOX_IMAGE ./clusters/* -R #使用私有仓库pause镜像
./clusters/k8s-cluster1/config.yml:SANDBOX_IMAGE:
"harbor.myarchitect.online/baseimages/pause:3.10"
1):配置基础镜像    #这个上面已修改
root@k8s-ha1:/etc/kubeasz# vim ./clusters/k8s-cluster1/config.yml
# [containerd]基础容器镜像
SANDBOX_IMAGE: "harbor.myarchitect.online/baseimages/pause:3.10"

2):配置harbor镜像仓库域名解析-公司有DNS服务器进行域名解析:#每添加一个node会自动添加域名解析
root@k8s-ha1:/etc/kubeasz# vim roles/containerd/tasks/main.yml
...    #在  创建 containerd 配置文件 步骤下面
- name: 添加域名解析
  shell: "echo '10.0.0.106 harbor.myarchitect.online' >> /etc/hosts"
  
4):配置nerdctl客户端:#containerd客户端不好用,这里用nerdctl客户端(和docker一样),每个节点部署
#传入nerdctl-2.2.1-linux-amd64.tar.gz
#/etc/kubeasz/bin/containerd-bin/ 为默认放containedrd相关命令的路径
root@k8s-ha1:/etc/kubeasz# tar xvf nerdctl-2.2.1-linux-amd64.tar.gz -C /etc/kubeasz/bin/containerd-bin/
nerdctl
containerd-rootless-setuptool.sh
containerd-rootless.sh

#要把nerdctl配置文件分发到每个节点上,编辑
root@k8s-ha1:/etc/kubeasz# vim roles/containerd/tasks/main.yml
- name: 准备containerd相关目录
  file: name={{ item }} state=directory
  with_items:
  - "{{ bin_dir }}/containerd-bin"
  - "/etc/containerd"
  - "/etc/containerd/certs.d/docker.io"
  - "/etc/containerd/certs.d/easzlab.io.local:5000"
  - "/etc/containerd/certs.d/{{ HARBOR_REGISTRY }}"
  - "/etc/nerdctl/"    #追加,创建nerdctl配置文件目录
...
- name: 创建 nerdctl 配置文件
  template: src=nerdctl.toml.j2 dest=/etc/nerdctl/nerdctl.toml #添加分发nerdctl配置文件
  tags: upgrade

5):nerdctl配置文件:#准备下上面的nerdctl配置文件
root@k8s-deploy:/etc/kubeasz# vim roles/containerd/templates/nerdctl.toml.j2
namespace = "k8s.io"    #containerd容器和镜像所在默认的名称空间
debug = false            #不可以debug模式
debug_full = false        #不可以debug模式
insecure_registry = true    #是否信任insecure_registry,有时候是自建仓库要true

6):可选自定义containerd 配置文件:
root@k8s-deploy:/etc/kubeasz# vim roles/containerd/templates/config.toml.j2
...
  [plugins.'io.containerd.transfer.v1.local']
    max_concurrent_downloads = 10 #containerd下镜像默认3并发,如镜像layer多,同一时刻只能下3个layer
    max_concurrent_uploaded_layers = 10    #上传并发

7):执行部署运行时:
root@k8s-deploy:/etc/kubeasz# ./ezctl setup k8s-cluster1 03


#找一个node节点看下
root@ubuntu104:~# cat /etc/hosts
...
10.0.0.106 harbor.myarchitect.online

root@ubuntu104:~# ll /usr/local/bin/containerd-bin/
total 128104
...
-rwxr-xr-x 1 root root 30355640 Jan 18 01:58 nerdctl*

#做个软链接,就可以使用nerdctl命令了
root@ubuntu104:~# ln -sv /usr/local/bin/containerd-bin/* /usr/bin/
root@ubuntu104:~# nerdctl images
REPOSITORY    TAG    IMAGE ID    CREATED    PLATFORM    SIZE    BLOB SIZE
root@ubuntu104:~# crictl images
IMAGE               TAG                 IMAGE ID            SIZE

1.6.4.4:部署kubernetes master节点:

#root@k8s-ha1:/etc/kubeasz# vim roles/kube-master/tasks/main.yml #可自定义配置
root@k8s-ha1:/etc/kubeasz# ./ezctl setup k8s-cluster1 04

#此时可以执行kubectl命令看节点了,默认master不调度业务容器所以SchedulingDisabled
root@k8s-ha1:/etc/kubeasz# kubectl get nodes
NAME         STATUS                     ROLES    AGE   VERSION
10.0.0.101   Ready,SchedulingDisabled   master   39s   v1.34.2
10.0.0.102   Ready,SchedulingDisabled   master   39s   v1.34.2

1.6.4.5:部署kubernetes node节点:

#root@k8s-ha1:/etc/kubeasz# vim roles/kube-node/tasks/main.yml #可自定义配置
root@k8s-ha1:/etc/kubeasz#  ./ezctl setup k8s-cluster1 05

root@k8s-ha1:/etc/kubeasz# kubectl get nodes
NAME         STATUS                     ROLES    AGE     VERSION
10.0.0.101   Ready,SchedulingDisabled   master   3m11s   v1.34.2
10.0.0.102   Ready,SchedulingDisabled   master   3m11s   v1.34.2
10.0.0.104   Ready                      node     36s     v1.34.2
10.0.0.105   Ready                      node     36s     v1.34.2

1.6.4.5:部署网络组件calico:

#calico可以不用它来部署,它部署拉的镜像都是它自己的(本地),这里自己部署,可换成自己的镜像
#用它自己的方式使用06也可以

#上传calico的yaml,calico_v3.28.1-k8s_1.30.1-ubuntu2404.yaml(在kubeasz-20260111.zip中)
root@k8s-ha1:/etc/kubeasz# vim calico_v3.28.1-k8s_1.30.1-ubuntu2404.yaml
...    #修改了image地址
image: registry.cn-hangzhou.aliyuncs.com/myhubregistry/calico:cni-v3.28.4
...
#自定义Pod子网掩码
- name: CALICO_IPV4POOL_CIDR #calico地址池一定要等于安装时hosts的地址池(否则pod通讯问题)
  value: "10.200.0.0/16"
#自定义node节点子网范围
- name: CALICO_IPV4POOL_BLOCK_SIZE #calico每个主机的子网范围,24是200多个地址
  value: "24"
...
- name: CLUSTER_TYPE
  value: "k8s,bgp"    #一般不用动
...
- name: IP_AUTODETECTION_METHOD #相互发现使用的网卡(默认情况使用第一块网卡)
  value: "interface=eth0" #指定使用eth0网卡(如果网卡不固定就指定网卡,有时第一块网卡docker0那就要指定)
  
root@k8s-ha1:/etc/kubeasz# kubectl apply -f calico_v3.28.1-k8s_1.30.1-ubuntu2404.yaml

#查看
root@k8s-ha1:/etc/kubeasz# kubectl get pod -A
NAMESPACE     NAME                                       READY   STATUS    RESTARTS   AGE
kube-system   calico-kube-controllers-695bf6cc9d-6z5lr   1/1     Running   0          5m50s
kube-system   calico-node-8kpvs                          1/1     Running   0          5m50s
kube-system   calico-node-d4blr                          1/1     Running   0          5m50s
kube-system   calico-node-l44tb                          1/1     Running   0          5m50s
kube-system   calico-node-r2w6n                          1/1     Running   0          5m50s

1.7:部署kubernetes 内部域名解析服务-CoredDNS:

1.7.1:部署coredns:

#传入coredns的yaml, coredns-v1.9.4.yaml
root@k8s-ha1:/etc/kubeasz/20260111# vim coredns-v1.9.4.yaml
...
        kubernetes cluster.local in-addr.arpa ip6.arpa { #k8s后缀域名如果改了,这里要改
            pods insecure
            fallthrough in-addr.arpa ip6.arpa
            ttl 30
        }
        #forward . /etc/resolv.conf {    #默认国外的dns
        forward . 223.6.6.6 {    #这里改成forward到国内的dns
            max_concurrent 1000
        }
...    #下面镜像也改到国内了
        image: registry.cn-hangzhou.aliyuncs.com/myhubregistry/coredns:1.9.4
...    #下面指定了service地址段,要和配置的一致,不能被使用
  clusterIP: 10.100.0.2
  
root@k8s-ha1:/etc/kubeasz/20260111# kubectl apply -f coredns-v1.9.4.yaml

#如果上面calico没起来,coredns是起不来的
root@k8s-ha1:/etc/kubeasz/20260111# kubectl get pods -A
NAMESPACE     NAME                                       READY   STATUS    RESTARTS   AGE
kube-system   calico-kube-controllers-695bf6cc9d-6z5lr   1/1     Running   0          30m
kube-system   calico-node-8kpvs                          1/1     Running   0          30m
kube-system   calico-node-d4blr                          1/1     Running   0          30m
kube-system   calico-node-l44tb                          1/1     Running   0          30m
kube-system   calico-node-r2w6n                          1/1     Running   0          30m
kube-system   coredns-68cf8f8659-fgfpg                   1/1     Running   0          3m25s
kube-system   coredns-68cf8f8659-s94b9                   1/1     Running   0          3m25s

1.8:部署dashboard:

部署kubernetes的web管理界面dashboard 不太好用(功能少)

#传入文件如下
root@k8s-ha1:/etc/kubeasz/20260111/dashboard-v2.7.0# ls
admin-secret.yaml  admin-user.yaml  dashboard-v2.7.0.yaml

#镜像有问题,先自己拉镜像传到harbor上
root@k8s-ha1:/etc/kubeasz# docker pull kubernetesui/dashboard:v2.7.0
root@k8s-ha1:/etc/kubeasz# docker pull kubernetesui/metrics-scraper:v1.0.8

root@k8s-ha1:/etc/kubeasz# docker tag kubernetesui/dashboard:v2.7.0 harbor.myarchitect.online/myserver/dashboard:v2.7.0
root@k8s-ha1:/etc/kubeasz# docker push harbor.myarchitect.online/myserver/dashboard:v2.7.0
root@k8s-ha1:/etc/kubeasz# docker tag kubernetesui/metrics-scraper:v1.0.8 harbor.myarchitect.online/myserver/metrics-scraper:v1.0.8
root@k8s-ha1:/etc/kubeasz# docker push harbor.myarchitect.online/myserver/metrics-scraper:v1.0.8

#部署
root@k8s-ha1:/etc/kubeasz/20260111/dashboard-v2.7.0# vim dashboard-v2.7.0.yaml
          image: harbor.myarchitect.online/myserver/dashboard:v2.7.0
          image: harbor.myarchitect.online/myserver/metrics-scraper:v1.0.8

root@k8s-ha1:/etc/kubeasz/20260111/dashboard-v2.7.0# kubectl apply -f .
Error from server (NotFound): error when creating "admin-secret.yaml": namespaces "kubernetes-dashboard" not found
Error from server (NotFound): error when creating "admin-user.yaml": namespaces "kubernetes-dashboard" not found
#有报错,因为没有顺序执行,再次执行就行
root@k8s-ha1:/etc/kubeasz/20260111/dashboard-v2.7.0# kubectl apply -f .

root@k8s-ha1:/etc/kubeasz/20260111/dashboard-v2.7.0# kubectl get pods -A
NAMESPACE              NAME                                        READY   STATUS    RESTARTS   AGE
...
kubernetes-dashboard   dashboard-metrics-scraper-7f8cd5bf5-vtdvr   1/1     Running   0          32m
kubernetes-dashboard   kubernetes-dashboard-785cd4cd59-pz6px       1/1     Running   0          32m

root@k8s-ha1:/etc/kubeasz/20260111# kubectl get svc -A
NAMESPACE              NAME                        TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)                  AGE
default                kubernetes                  ClusterIP   10.100.0.1      <none>        443/TCP                  19h
kube-system            kube-dns                    ClusterIP   10.100.0.2      <none>        53/UDP,53/TCP,9153/TCP   62m
kubernetes-dashboard   dashboard-metrics-scraper   ClusterIP   10.100.173.19   <none>        8000/TCP                 56m
kubernetes-dashboard   kubernetes-dashboard        NodePort    10.100.23.71    <none>        443:30000/TCP            56m

#访问任意节点的30000端口
https://10.0.0.104:30000/
#登录要token,获取token
root@k8s-ha1:/etc/kubeasz/20260111# kubectl get secrets -A|grep admin
kubernetes-dashboard   dashboard-admin-user              kubernetes.io/service-account-token   3      5m15
root@k8s-ha1:/etc/kubeasz/20260111# kubectl get secrets -n kubernetes-dashboard   dashboard-admin-user
NAME                   TYPE                                  DATA   AGE
dashboard-admin-user   kubernetes.io/service-account-token   3      7m9s
root@k8s-ha1:/etc/kubeasz/20260111# kubectl get secrets -n kubernetes-dashboard   dashboard-admin-user -o yaml
...
  token: ZXlKaGJHY2lPaUpTVXpJMU5pSXNJbXRwWkNJNklucENNbFpHYnpac1gzQk9Ta2hLYkU1VVJsSlJOalJFTlZSUk0wYzJUamhIZGtsclRrWnBSV2R4ZHpBaWZRLmV5SnBjM01pT2lKcmRXSmxjbTVsZEdWekwzTmxjblpwWTJWaFkyTnZkVzUwSWl3aWEzVmlaWEp1WlhSbGN5NXBieTl6WlhKMmFXTmxZV05qYjNWdWRDOXVZVzFsYzNCaFkyVWlPaUpyZFdKbGNtNWxkR1Z6TFdSaGMyaGliMkZ5WkNJc0ltdDFZbVZ5Ym1WMFpYTXVhVzh2YzJWeWRtbGpaV0ZqWTI5MWJuUXZjMlZqY21WMExtNWhiV1VpT2lKa1lYTm9ZbTloY21RdFlXUnRhVzR0ZFhObGNpSXNJbXQxWW1WeWJtVjBaWE11YVc4dmMyVnlkbWxqWldGalkyOTFiblF2YzJWeWRtbGpaUzFoWTJOdmRXNTBMbTVoYldVaU9pSmhaRzFwYmkxMWMyVnlJaXdpYTNWaVpYSnVaWFJsY3k1cGJ5OXpaWEoyYVdObFlXTmpiM1Z1ZEM5elpYSjJhV05sTFdGalkyOTFiblF1ZFdsa0lqb2laakF5TXpCbU5XWXRNalk1WXkwMFpHSTFMVGs0TlRjdFpUbGlOMlJsT0dJMk1qZ3pJaXdpYzNWaUlqb2ljM2x6ZEdWdE9uTmxjblpwWTJWaFkyTnZkVzUwT210MVltVnlibVYwWlhNdFpHRnphR0p2WVhKa09tRmtiV2x1TFhWelpYSWlmUS5NZmxLTUZ4OGhfSWVCQmxrVTByQmp1dmJGbm5FbXYyTElkTHl1LXZleEYtMmhvdFByNmUyeVFkLWZpQ3pqTmVMaENCY0JpWkExeUNRTmZzajBSODRPQS1rbFRzaGJHLVFQQWtUSTUtTDJ0RVdFV05PZnhEZ3dibFE5ejJhV0xuVzZ5Z0plVHJzcnV0X3cxSWRwZEZ1bGVlNGZWWWk2OWZVUmJpOFZORlpuYVZzT0ttTGFoWmFCRHVpZEo5NDl6S25ybmI3Yl9GSWdmbFlCaHl1bVpqUnFyWnZSS2pXTnJCSm5pVjdTRE1nOVlETGlfNUpGcjZOUkVPWWR1Z2pBalFYbVN3eHBJY1BHa2pvZGlqVDBVOW8zR0FjWHN5eWwtV1ItaDAtd2dQX3I1M19INXpYdVpha21WcjRMUUJSazFZbGhGeE0zSFpYUzBGWFFEVWJEZ2ZMVGc=
...

#下面进行解密,获取token,登录  https://10.0.0.104:30000/
root@k8s-ha1:/etc/kubeasz/20260111# echo ZXlKaGJHY2lPaUpTVXpJMU5pSXNJbXRwWkNJNklucENNbFpHYnpac1gzQk9Ta2hLYkU1VVJsSlJOalJFTlZSUk0wYzJUamhIZGtsclRrWnBSV2R4ZHpBaWZRLmV5SnBjM01pT2lKcmRXSmxjbTVsZEdWekwzTmxjblpwWTJWaFkyTnZkVzUwSWl3aWEzVmlaWEp1WlhSbGN5NXBieTl6WlhKMmFXTmxZV05qYjNWdWRDOXVZVzFsYzNCaFkyVWlPaUpyZFdKbGNtNWxkR1Z6TFdSaGMyaGliMkZ5WkNJc0ltdDFZbVZ5Ym1WMFpYTXVhVzh2YzJWeWRtbGpaV0ZqWTI5MWJuUXZjMlZqY21WMExtNWhiV1VpT2lKa1lYTm9ZbTloY21RdFlXUnRhVzR0ZFhObGNpSXNJbXQxWW1WeWJtVjBaWE11YVc4dmMyVnlkbWxqWldGalkyOTFiblF2YzJWeWRtbGpaUzFoWTJOdmRXNTBMbTVoYldVaU9pSmhaRzFwYmkxMWMyVnlJaXdpYTNWaVpYSnVaWFJsY3k1cGJ5OXpaWEoyYVdObFlXTmpiM1Z1ZEM5elpYSjJhV05sTFdGalkyOTFiblF1ZFdsa0lqb2laakF5TXpCbU5XWXRNalk1WXkwMFpHSTFMVGs0TlRjdFpUbGlOMlJsT0dJMk1qZ3pJaXdpYzNWaUlqb2ljM2x6ZEdWdE9uTmxjblpwWTJWaFkyTnZkVzUwT210MVltVnlibVYwWlhNdFpHRnphR0p2WVhKa09tRmtiV2x1TFhWelpYSWlmUS5NZmxLTUZ4OGhfSWVCQmxrVTByQmp1dmJGbm5FbXYyTElkTHl1LXZleEYtMmhvdFByNmUyeVFkLWZpQ3pqTmVMaENCY0JpWkExeUNRTmZzajBSODRPQS1rbFRzaGJHLVFQQWtUSTUtTDJ0RVdFV05PZnhEZ3dibFE5ejJhV0xuVzZ5Z0plVHJzcnV0X3cxSWRwZEZ1bGVlNGZWWWk2OWZVUmJpOFZORlpuYVZzT0ttTGFoWmFCRHVpZEo5NDl6S25ybmI3Yl9GSWdmbFlCaHl1bVpqUnFyWnZSS2pXTnJCSm5pVjdTRE1nOVlETGlfNUpGcjZOUkVPWWR1Z2pBalFYbVN3eHBJY1BHa2pvZGlqVDBVOW8zR0FjWHN5eWwtV1ItaDAtd2dQX3I1M19INXpYdVpha21WcjRMUUJSazFZbGhGeE0zSFpYUzBGWFFEVWJEZ2ZMVGc= | base64 -d
eyJhbGciOiJSUzI1NiIsImtpZCI6InpCMlZGbzZsX3BOSkhKbE5URlJRNjRENVRRM0c2TjhHdklrTkZpRWdxdzAifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJkYXNoYm9hcmQtYWRtaW4tdXNlciIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50Lm5hbWUiOiJhZG1pbi11c2VyIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQudWlkIjoiZjAyMzBmNWYtMjY5Yy00ZGI1LTk4NTctZTliN2RlOGI2MjgzIiwic3ViIjoic3lzdGVtOnNlcnZpY2VhY2NvdW50Omt1YmVybmV0ZXMtZGFzaGJvYXJkOmFkbWluLXVzZXIifQ.MflKMFx8h_IeBBlkU0rBjuvbFnnEmv2LIdLyu-vexF-2hotPr6e2yQd-fiCzjNeLhCBcBiZA1yCQNfsj0R84OA-klTshbG-QPAkTI5-L2tEWEWNOfxDgwblQ9z2aWLnW6ygJeTrsrut_w1IdpdFulee4fVYi69fURbi8VNFZnaVsOKmLahZaBDuidJ949zKnrnb7b_FIgflYBhyumZjRqrZvRKjWNrBJniV7SDMg9YDLi_5JFr6NREOYdugjAjQXmSwxpIcPGkjodijT0U9o3GAcXsyyl-WR-h0-wgP_r53_H5zXuZakmVr4LQBRk1YlhFxM3HZXS0FXQDUbDgfLTg

 

1.8.3:kuboard:

推荐使用kuboard,比dashboard好用些

#kuboard存储需要nfs服务
root@k8s-ha1:~# apt install nfs-server
#这里简单用个目录代替存储类,把目录共享出去
root@k8s-ha1:~# mkdir -p /data/k8sdata/kuboard
root@k8s-ha1:~# vim /etc/exports
/data/k8sdata *(rw,no_root_squash) 
root@k8s-ha1:~# systemctl restart nfs-server
root@k8s-ha1:~# systemctl enable nfs-server

#在k8s环境里,查看目录是否共享出来,下面在从节点查看
root@ubuntu104:~# showmount -e 10.0.0.107
Export list for 10.0.0.107:
/data/k8sdata *

#传入kuboard-all-in-one.yaml
root@k8s-ha1:/etc/kubeasz/20260111/kuboard# vim kuboard-all-in-one.yaml
...#下面为挂载地址
      volumes:
      - name: kuboard-data
        nfs:
          server: 10.0.0.107
          path: /data/k8sdata/kuboard
          
root@k8s-ha1:/etc/kubeasz/20260111/kuboard# kubectl apply -f kuboard-all-in-one.yaml

#查看
root@k8s-ha1:/etc/kubeasz/20260111/kuboard# kubectl get pods -A
NAMESPACE     NAME                                       READY   STATUS    RESTARTS      AGE
...
kuboard       kuboard-v3-78f6bc89dc-6dkqf                1/1     Running   0             2m57s

root@k8s-ha1:/etc/kubeasz/20260111/kuboard# kubectl get svc -A
NAMESPACE     NAME         TYPE        CLUSTER-IP     EXTERNAL-IP   PORT(S)                                        AGE
default       kubernetes   ClusterIP   10.100.0.1     <none>        443/TCP                                        21h
kube-system   kube-dns     ClusterIP   10.100.0.2     <none>        53/UDP,53/TCP,9153/TCP                         164m
kuboard       kuboard-v3   NodePort    10.100.5.249   <none>        80:30080/TCP,10081:30081/TCP,10081:30081/UDP   67s

#登录
10.0.0.104:30080
用户名: admin
密 码: Kuboard123
#第一次登录导入k8s集群,一般选kubeconfig,这种对集群的侵入性比较小
#把下面的内容完整的复制进去,一行不要多,一行不要少
root@k8s-ha1:/etc/kubeasz/20260111/kuboard# cat /root/.kube/config
#如下图,可以该master地址,也可把红框圈出的master地址改成188,走vip连接后面的master
#连接上后,管理k8s选kuboard-admin账号,点集群概要进入

加入k8s集群

 

posted @ 2026-01-20 22:14  战斗小人  阅读(0)  评论(0)    收藏  举报