Kubernetes部署和学习

一.概述:

1.初始化所有服务器环境

2.准备好所有必需的离线镜像或安装文件

3.初始化和配置Kubernetes集群

4.部署微服务和应用常见问题和实例

 

二.准备工作: (集群内每一台机器都要配置一遍)

1.节点规划:双主双节点 (此下为实验, 实际生产环境master必须是单数, 一般多是3个master+N个node, 切记切记)

主机名 IP 角色 操作系统 k8s版本
baiinfo-web005 192.168.10.35 master|keepalived(vip:192.168.10.60)|nfs-server CentOS8 1.26.2
baiinfo-web006 192.168.10.36 node|nfs-client CentOS8 1.26.2
baiinfo-web007 192.168.10.37 node|nfs-client CentOS8 1.26.2
baiinfo-web004 192.168.10.34 master|keepalived|nfs-server CentOS8 1.26.2
cluster-endpoint 192.168.10.60 keepalived VIP 虚拟IP  

项目网址:

https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.26.md#downloads-for-v1262

下载包:

kubernetes-server-linux-amd64.tar.gz

2.时钟同步

修改/etc/chrony.conf, 同步时钟为阿里云, 生产环境要配置成内网的时钟服务器

sed -i 's/server 192.168.10.23 iburst/server ntp.aliyun.com iburst/g' /etc/chrony.conf
systemctl restart chronyd.service
chronyc sources -v #查看是否成功同步

3.修改主机名和配置hosts映射

修改主机名, 每台服务器都要执行:

cat >>/etc/hostname<<DD
baiinfo-web005
DD
# 其它几台服务器同上,修改好名字

修改hosts, 每台服务器都要执行:

cat >>/etc/hosts<<DD
192.168.10.35 baiinfo-web005
192.168.10.36 baiinfo-web006
192.168.10.37 baiinfo-web007
192.168.10.34 baiinfo-web004
# Keepalived VIP
cluster-endpoint 192.168.10.60
DD

4.配置ssh数字免密登陆认证

#在192.168.10.35上执行
ssh-keygen
ssh-copy-id root@baiinfo-web006
ssh-copy-id root@baiinfo-web007
ssh-copy-id root@baiinfo-web004
​
#在192.168.10.36上执行
ssh-keygen
ssh-copy-id root@baiinfo-web005
ssh-copy-id root@baiinfo-web007
ssh-copy-id root@baiinfo-web004
​
#在192.168.10.37上执行
ssh-keygen
ssh-copy-id root@baiinfo-web005
ssh-copy-id root@baiinfo-web006
ssh-copy-id root@baiinfo-web004
​
#在192.168.10.34上执行
ssh-keygen
ssh-copy-id root@baiinfo-web005
ssh-copy-id root@baiinfo-web006
ssh-copy-id root@baiinfo-web007

5.关闭swap分区

PS:不关闭一定出问题!

# 临时关闭
swapoff -a
# 永久关闭
sed -ri 's/.*swap.*/#&/' /etc/fstab
# 查看swap分区状态
free -h

6.关闭selinux

# 临时关闭
setenforce 0
# 永久关闭
sed -i 's/^SELINUX=enforcing$/SELINUX=disabled/' /etc/selinux/config
# 查看selinux状态
sestatus -v

7.关闭firewalld

systemctl stop firewalld && systemctl disable firewalld
# 查看firewall-cmd状态
firewall-cmd --state

8.允许iptables检查桥接流量

# 向内核中加载br_netfilter模块
modprobe br_netfilter
# 查看模块状态
lsmod | grep br_netfilter

添加sysctl配置

# tee写入配置保存到文件,DD结束
cat >/etc/modules-load.d/k8s.conf<<DD
overlay
br_netfilter
DD
​
# 载入模块
modprobe overlay
modprobe br_netfilter
​
# 设置所需的 sysctl 参数,参数在重新启动后保持不变
cat >/etc/sysctl.d/k8s.conf<<DD
net.bridge.bridge-nf-call-iptables  = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.ipv4.ip_forward                 = 1
DD
​
sysctl --system
# 查看一下系统启动参数是否存在如下:
# * Applying /etc/sysctl.d/k8s.conf ...
# net.bridge.bridge-nf-call-iptables = 1
# net.bridge.bridge-nf-call-ip6tables = 1
# net.ipv4.ip_forward = 1
​

9.安装docker和docker-compose

#配置dnf源
mkdir -p /etc/yum.repos.d/backup && mv /etc/yum.repos.d/*.repo /etc/yum.repos.d/backup
curl http://119.57.77.139:22538/os/x86_64/centos8.repo -o /etc/yum.repos.d/centos8.repo
dnf clean all && dnf makecache
dnf install -y mtr vim bash-completion git telnet npm
​
#配置docker和docker-compose
dnf erase -y podman buildah
dnf remove docker*
rm -fr /var/lib/docker/
dnf config-manager --add-repo=https://download.docker.com/linux/centos/docker-ce.repo
systemctl daemon-reload
dnf clean all && dnf makecache
dnf install -y docker-ce
systemctl start docker
systemctl enable docker
curl -SL https://github.com/docker/compose/releases/download/v2.16.0/docker-compose-linux-x86_64 -o /usr/local/bin/docker-compose
chmod +x /usr/local/bin/docker-compose
ln -s /usr/local/bin/docker-compose /usr/bin/docker-compose
#docker-compose version && docker version
cat >/etc/docker/daemon.json<<DD
{
  "log-driver":"json-file",
  "log-opts": {"max-size":"500m", "max-file":"2"}
}
DD
systemctl daemon-reload
systemctl restart docker
(可略过)docker手动安装方法
# 下载
wget https://download.docker.com/linux/static/stable/x86_64/docker-20.10.20.tgz
# 解压
tar -zxvf docker-20.10.20.tgz && cp -p docker/* /usr/bin
# 写入启动文件
touch /usr/lib/systemd/system/docker.service
cat> /usr/lib/systemd/system/docker.service <<DD
[Unit]
Description=Docker Application Container Engine
Documentation=http://docs.docker.com
After=network.target docker.socket
[Service]
Type=notify
EnvironmentFile=-/run/flannel/docker
WorkingDirectory=/usr/local/bin
ExecStart=/usr/bin/dockerd \
                -H tcp://0.0.0.0:4243 \
                -H unix:///var/run/docker.sock \
                --selinux-enabled=false \
                --log-opt max-size=1g
ExecReload=/bin/kill -s HUP $MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
#TasksMax=infinity
TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
Restart=on-failure
[Install]
WantedBy=multi-user.target
DD

10.配置k8s yum源

cat > /etc/yum.repos.d/kubernetes.repo << DD
[k8s]
name=k8s
enabled=1
gpgcheck=0
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
DD
yum clean all && yum makecache

11.将 sandbox_image 镜像源替换为阿里云源

# 导出默认配置,config.toml这个文件默认是不存在的
containerd config default > /etc/containerd/config.toml
grep sandbox_image  /etc/containerd/config.toml 
# 看清上边grep筛选出来的pause前的URL,用下边语句替换,url有变化的自行修改
sed -i "s#k8s.gcr.io/pause#registry.aliyuncs.com/google_containers/pause#g"       /etc/containerd/config.toml
# 确认一下替换成功
grep sandbox_image  /etc/containerd/config.toml

12.配置containerd cgroup 驱动程序systemd

# SystemdCgroup = false 修改成 true
sed -i 's#SystemdCgroup = false#SystemdCgroup = true#g' /etc/containerd/config.toml
# 确认一下替换成功
grep SystemdCgroup /etc/containerd/config.toml
# 应用所有更改后,重新启动containerd
systemctl restart containerd

 

三.正式安装Kubernetes集群

1.在master节点上安装kubeadm,kubelet和kubectl

# 不指定版本就是最新版本,当前最新版就是1.26.2
# 如果没配置上边k8s云源yum是安装不了的, 需要自己下载安装包手动安装
yum install -y kubelet-1.26.2  kubeadm-1.26.2  kubectl-1.26.2 --disableexcludes=kubernetes
# 设置为开机自启并现在立刻启动服务, 参数--now:立刻启动服务
systemctl enable --now kubelet
# 查看状态,这里需要等待一段时间再查看服务状态,启动会有点慢
systemctl status kubelet
kubectl version
yum info kubeadm
(可略过)k8s的手动安装方法
# 下载地址:https://github.com/kubernetes/kubernetes/tree/master/CHANGELOG
wget https://dl.k8s.io/v1.26.2/kubernetes-server-linux-amd64.tar.gz
tar -xf kubernetes-server-linux-amd64.tar.gz
cd kubernetes/server/bin
cp -ar kubelet kubeadm kubectl /usr/bin/
​
# 配置 kubeadm 启动文件
touch /usr/lib/systemd/system/kubelet.service
cat >/usr/lib/systemd/system/kubelet.service <<DD
[Unit]
Description=kubelet: The Kubernetes Node Agent
Documentation=https://kubernetes.io/docs/
​
[Service]
ExecStart=/usr/bin/kubelet
Restart=always
StartLimitInterval=0
RestartSec=10
​
[Install]
WantedBy=multi-user.target
DD
​
#注意ExecStart内容换行
mkdir -p /usr/lib/systemd/system/kubelet.service.d/
touch /usr/lib/systemd/system/kubelet.service.d/10-kubeadm.conf
cat >/usr/lib/systemd/system/kubelet.service.d/10-kubeadm.conf <<DD
[Service]
Environment="KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf"
Environment="KUBELET_CONFIG_ARGS=--config=/var/lib/kubelet/config.yaml"
EnvironmentFile=-/var/lib/kubelet/kubeadm-flags.env
EnvironmentFile=-/etc/sysconfig/kubelet
ExecStart=
ExecStart=/usr/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_KUBEADM_ARGS $KUBELET_EXTRA_ARGS
DD
​

 

2.在master节点上初始化Kubernetes集群

先拉取基础镜像

# 若k8s配置的是docker, 命令自行替换成docker pull,无法直接拉取的国外镜像可以找国内镜像打上Tag替代
crictl pull registry.aliyuncs.com/google_containers/kube-apiserver:v1.26.2
crictl pull registry.aliyuncs.com/google_containers/kube-controller-manager:v1.26.2
crictl pull registry.aliyuncs.com/google_containers/kube-scheduler:v1.26.2
crictl pull registry.aliyuncs.com/google_containers/kube-proxy:v1.26.2
#crictl pull registry.aliyuncs.com/google_containers/pause:3.6
#crictl pull registry.aliyuncs.com/google_containers/pause:3.7
crictl pull registry.aliyuncs.com/google_containers/pause:3.8
crictl pull registry.aliyuncs.com/google_containers/etcd:3.5.3-0
crictl pull registry.aliyuncs.com/google_containers/coredns:v1.8.6
crictl pull docker.io/rancher/mirrored-flannelcni-flannel:v0.20.2
crictl pull kubernetesui/dashboard:v2.7.0
crictl pull kubernetesui/metrics-scraper:v1.0.8
crictl pull registry.cn-hangzhou.aliyuncs.com/google_containers/defaultbackend:1.4

集群初始化 (熟悉k8s的话可以先配置keepalived,直接用VIP初始化集群)

kubeadm init \
  --apiserver-advertise-address=192.168.10.35 \
  --image-repository registry.aliyuncs.com/google_containers \
  --control-plane-endpoint=baiinfo-web005 \
  --kubernetes-version v1.26.2 \
  --service-cidr=10.1.0.0/16 \
  --pod-network-cidr=10.244.0.0/16 \
  --v=5
# –apiserver-advertise-address  指明用Master的哪个interface与Cluster的其他节点通信。如果 Master 有多个interface,建议明确指定,如果不指定,kubeadm会自动选择有默认网关的 interface。这里的ip为master节点ip,记得更换。
# –image-repository string  这个用于指定从什么位置来拉取镜像(1.13版本才有的),默认值是k8s.gcr.io,我们将其指定为国内镜像地址:registry.aliyuncs.com/google_containers
# --control-plane-endpoint  此参数是指映射到该IP的自定义DNS名称,这里配置hosts映射:192.168.10.35 baiinfo-web05,所以配置成--control-plane-endpoint=baiinfo-web005, 初始化时会把DNS名称传递给 kubeadm init,并将相同的DNS名称传递给 kubeadm join, 后边配置好keepalived时需要将cluster-endpoint 以指向高可用性方案中的负载均衡器的地址cluster-endpoint。
# –kubernetes-version string:  指定kubenets版本号,默认值是stable-1,会导致从https://dl.k8s.io/release/stable-1.txt下载最新的版本号,我们可以将其指定为固定版本(v1.26.2)来跳过网络请求。
# –pod-network-cidr   指定Pod IP的地址范围(注:Kubernetes 支持多种网络方案,而且不同网络方案对  –pod-network-cidr有自己的要求,这里设置为10.244.0.0/16是因为我们将使用flannel网络方案,必须设置成这个CIDR)
​

重置, 再次初始化

# 再次初始化一遍,测试一下主节点配置是否都正确
kubeadm reset
rm -fr ~/.kube/  /etc/kubernetes/* var/lib/etcd/*
kubeadm init \
  --apiserver-advertise-address=192.168.10.35  \
  --image-repository registry.aliyuncs.com/google_containers \
  --control-plane-endpoint=baiinfo-web005 \
  --kubernetes-version v1.26.2 \
  --service-cidr=10.1.0.0/16 \
  --pod-network-cidr=10.244.0.0/16 \
  --v=5
# 这里一定要记录下最后一段内容,在节点上运行它,才能使节点加入集群, 如下
Then you can join any number of worker nodes by running the following on each as root:
​
kubeadm join baiinfo-web005:6443 --token obfadi.pwazuw5jxhitncms \
        --discovery-token-ca-cert-hash sha256:cfee95f28f317e4c0f46d6e4386999a9a91d523e735afc44cdfd1e6d030aee3d 
        
# 如果init时没有记下命令或超过token有效期2天,可以使用下列命令重新获取
kubeadm token create --print-join-command

配置环境变量 (集群中的机器名或IP有更改或问题时,清理这里)

mkdir -p $HOME/.kube
cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
chown $(id -u):$(id -g) $HOME/.kube/config
echo "export KUBECONFIG=/etc/kubernetes/admin.conf" >> ~/.bash_profile
source  ~/.bash_profile

3.在master节点上安装Pod网络插件(CNI)

必须部署基于 Pod 网络插件的 容器网络接口 (CNI),以便 Pod 之间可以相互通信。

# 下载yaml配置到本地, 可以提前docker拉好使用的镜像
crictl pull docker.io/rancher/mirrored-flannelcni-flannel:v0.20.2
crictl pull docker.io/rancher/mirrored-flannelcni-flannel-cni-plugin:v1.1.0
wget https://raw.githubusercontent.com/flannel-io/flannel/v0.20.2/Documentation/kube-flannel.yml
kubeclt apply -f kube-flannel.yml
​
# kubectl查看集群状态
kubectl get pods -A
kubectl get node

4.在node节点上的配置 (所有节点)

安装kubelet, kubeadm, kubectl

yum install -y kubelet-1.26.2  kubeadm-1.26.2  kubectl-1.26.2 --disableexcludes=kubernetes
systemctl enable --now kubelet
systemctl status kubelet

修改sandbox_image的镜像引用仓库

# 这里也是kube-proxy一直ContainerCreating的解决方法
vi /etc/containerd/config.toml
sandbox_image = "registry.aliyuncs.com/google_containers/pause:3.8" 
SystemdCgroup = true # 使用systemd cgroup
​
# 重启containerd服务
systemctl restart containerd

执行kubeadm join命令将节点加入集群

# 这里一定要记录下最后一段内容,在节点上运行它,才能使节点加入集群, 如下
Then you can join any number of worker nodes by running the following on each as root:
​
kubeadm join baiinfo-web05:6443 --token obfadi.pwazuw5jxhitncms \
        --discovery-token-ca-cert-hash sha256:cfee95f28f317e4c0f46d6e4386999a9a91d523e735afc44cdfd1e6d030aee3d 
        
# 如果init时没有记下命令或超过token有效期2天,可以使用下列命令重新获取
kubeadm token create --print-join-command
​

5.配置IPVS保证集群内部能正常通信

# 加载ip_vs内核模块
modprobe -- ip_vs
modprobe -- ip_vs_sh
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
# 验证开启ip_vs
lsmod |grep ip_vs
# 安装ipvsadm工具
yum install ipset ipvsadm -y
# 修改配置kube-proxy模式为ipvs
kubectl edit configmap -n kube-system kube-proxy  
mode: "ipvs"
​
# 重启kube-proxy, 手动delete后会自动启动
kubectl get pod -n kube-system | grep kube-proxy
kubectl get pod -n kube-system | grep kube-proxy |awk '{system("kubectl delete pod "$1" -n kube-system")}'
kubectl get pod -n kube-system | grep kube-proxy
# 查看一下ipvs转发规则
ipvsadm -Ln

6.配置Kubernetes高可用性: keepalived

(PS: 以下2台master服务器都要配置, nodes不需要配置但需要重新加入集群.)

离线安装keepalived2.2.7

# 下载最新版本的keepalived 2.2.7
wget https://www.keepalived.org/software/keepalived-2.2.7.tar.gz --no-check-certificate
tar -zxvf keepalived-2.2.7.tar.gz
yum -y install openssl openssl-devel gcc make
# 编译&安装
cd keepalived-2.2.7 && ./configure --prefix=/usr/local/keepalived  --sysconf=/etc
make && make install
# 配置环境变量
cp /usr/local/keepalived/sbin/keepalived /usr/sbin/keepalived
(可略过)在线安装keepalived
# 因yum源库的维护,并不是最新版本
yum install keepalived -y
systemctl daemon-reload
systemctl restart keepalived && systemctl enable keepalived
systemctl status keepalived

keepalived_master配置:

cat > /etc/keepalived/keepalived.conf<<DD
! Configuration File for keepalived 
global_defs {
    #每个keepalived取个不同名称
    router_id 38
}
vrrp_instance VI_1 {
    # MASTER为主实例,BACKUP 为从实例
    state MASTER
    # 网卡名称, 查看命令"ll /etc/sysconfig/network-scripts/ifcfg-enp1s0"
    interface enp1s0
    # 主备这里要配置为同样的LVS级别
    virtual_router_id 51
    # 优先级,主要高于备. 一般主配置为100  备配置为80
    priority 100
    # 指定VRRP心跳通告间隔时间,默认1秒
    advert_int 1
    # 主动抢占,主备都开启的话,服务器抢占过去后,要等这个服务器keepalived停掉才会漂移到另一台
    nopreempt
    authentication {
        # 主备必须配置成同样的
        auth_type PASS
         # 主备必须配置成同样的
        auth_pass 2538
    }
    virtual_ipaddress {
      # vip,主备必须配置一样
      192.168.10.60
    }
}
DD

keepalived_backend配置:

cat > /etc/keepalived/keepalived.conf<<DD
! Configuration File for keepalived 
global_defs {
    #每个keepalived取个不同名称
    router_id 38
}
vrrp_instance VI_1 {
    # MASTER为主实例,BACKUP 为从实例
    state BACKUP
    # 网卡名称, 查看命令"ll /etc/sysconfig/network-scripts/ifcfg-enp1s0"
    interface enp1s0
    # 主备这里要配置为同样的LVS级别
    virtual_router_id 51
    # 优先级,主要高于备. 一般主配置为100  备配置为80
    priority 80
    # 指定VRRP心跳通告间隔时间,默认1秒
    advert_int 1
    # 主动抢占,主备都开启的话,服务器抢占过去后,要等这个服务器keepalived停掉才会漂移到另一台
    nopreempt
    authentication {
        # 主备必须配置成同样的
        auth_type PASS
         # 主备必须配置成同样的
        auth_pass 2538
    }
    virtual_ipaddress {
      # vip,主备必须配置一样
      192.168.10.60
    }
}
DD

配置系统启动服务:

cat > /usr/lib/systemd/system/keepalived.service <<DD
[Unit]
Description=LVS and VRRP High Availability Monitor
After=network-online.target syslog.target
Wants=network-online.target
Documentation=man:keepalived(8)
Documentation=man:keepalived.conf(5)
Documentation=man:genhash(1)
Documentation=https://keepalived.org
​
[Service]
Type=forking
PIDFile=/run/keepalived.pid
KillMode=process
EnvironmentFile=-/usr/local/keepalived/sysconfig/keepalived
ExecStart=/usr/local/keepalived/sbin/keepalived  $KEEPALIVED_OPTIONS
ExecReload=/bin/kill -HUP $MAINPID
​
[Install]
WantedBy=multi-user.target
DD

启动keepalived:

systemctl daemon-reload
# 加入开始自启, 并立即启动keepalived
systemctl enable --now keepalived
# 检查keepalived运行状态和VIP
systemctl status keepalived.service 
ping 192.168.10.60 #VIP

7.k8s高可用集群初始化

master重新初始化, node初始化重新加入集群

# 重置master节点
kubeadm reset
rm -fr ~/.kube/  /etc/kubernetes/* var/lib/etcd/*
# 重新初始化master, 这里apiserver地址要指向VIP, control-plane-endpoint也要指向VIP的host
kubeadm init \
  --apiserver-advertise-address=192.168.10.60  \
  --image-repository registry.aliyuncs.com/google_containers \
  --control-plane-endpoint=cluster-endpoint \
  --kubernetes-version v1.26.2 \
  --service-cidr=10.1.0.0/16 \
  --pod-network-cidr=10.244.0.0/16 \
  --v=5
# 清理home缓存
mkdir -p $HOME/.kube
cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
chown $(id -u):$(id -g) $HOME/.kube/config
echo "export KUBECONFIG=/etc/kubernetes/admin.conf" >> ~/.bash_profile
source  ~/.bash_profile
# 重新安装cni插件
wget https://raw.githubusercontent.com/flannel-io/flannel/v0.20.2/Documentation/kube-flannel.yml
kubectl apply -f kube-flannel.yml
# nodes们重置, 重新加入集群
kubeadm reset
kubeadm join baiinfo-web005:6443 --token r09n1z.mwelfc9qrqi4lbhw \
        --discovery-token-ca-cert-hash sha256:b8ccea0329691b610635a60fa305723e52cd87d33d26ea17f77c36d4076b3a7a

添加第2个master节点

# 重复第一个master配置, 到初始化之前步骤.
# 在需要添加的master节点上执行
CERT_KEY=`ssh cluster-endpoint "kubeadm init phase upload-certs --upload-certs|tail -1"`
join_str=`ssh cluster-endpoint kubeadm token create --print-join-command`
echo $join_str " --control-plane --certificate-key $CERT_KEY --v=5"
# 拿到上面打印的命令(如下,注意加入的节点服务器名称此时应显示VIP名称, 否则高可用配置是失败的)在需要添加的节点上执行, 需要特别漫长的等待
kubeadm join cluster-endpoint:6443 --token 2e61tv.9i2unjclhvl2cfgq --discovery-token-ca-cert-hash sha256:b8ccea0329691b610635a60fa305723e52cd87d33d26ea17f77c36d4076b3a7a   --control-plane --certificate-key 473dfd8f16d982bc3feb7dc8a14f3a5d50e35476ff2371b50f063063cf53b848 --v=5
# --control-plane 参数通知kubeadm join创建一个新的控制平面,加入master必须加这个标记
# --certificate-key 参数将导致从集群中的kubeadm-certs Secret下载控制平面证书并使用给定的密钥进行解密。这里的值就是上面这个命令(kubeadm init phase upload-certs --upload-certs)打印出的key。
​
# 执行完毕后, 按屏幕提示, 执行如下命令
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
​
# 查看一下当前集群状态, 确认master节点添加成功
kubectl get nodes
kubectl get pods -A -owide
curl -k https://cluster-endpoint:6443/version
​
# 去掉master污点
# kubectl taint nodes `hostname` node-role.kubernetes.io/master:NoSchedule- 2>/dev/null
# kubectl taint nodes `hostname` node.kubernetes.io/not-ready:NoSchedule- 2>/dev/null

8.安装ingress-nginx

# 需要手动修改yaml文件中引用的镜像源,如下国内大佬已经上传仓库,修改对应镜像即可
# wget https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.2.0/deploy/static/provider/cloud/deploy.yaml
docker pull anjia0532/google-containers.ingress-nginx.kube-webhook-certgen:v1.1.1
docker pull anjia0532/google-containers.ingress-nginx.controller:v1.1.1   
kubectl apply -f deploy.yaml
​
# 查看状态
kubectl get all -n ingress-nginx
kubectl delete -f deploy.yaml #删除所有ingress-nginx的pod

9.安装 nfs provisioner

安装helm

# 下载包
wget https://get.helm.sh/helm-v3.11.2-linux-amd64.tar.gz
# 解压压缩包
tar -xzvf helm-v3.11.2-linux-amd64.tar.gz -C /root/
# 制作软连接
ln -s /root/linux-amd64/helm /usr/local/bin/helm
# 添加helm仓库源
helm repo add nfs-subdir-external-provisioner https://kubernetes-sigs.github.io/nfs-subdir-external-provisioner/

安装nfs共享

#### 安装nfs【服务端】
yum -y install  nfs-utils rpcbind
# 服务端
mkdir -p /data/nfsdata
# 授权共享目录
chmod 666 /data/nfsdata
cat > /etc/exports<<DD
/data/nfsdata *(rw,no_root_squash,no_all_squash,sync)
DD
# 配置生效
exportfs -r
systemctl enable --now rpcbind
systemctl enable --now nfs-server
#### 安装nfs【客户端】
yum -y install nfs-utils rpcbind
systemctl enable --now rpcbind
​
# 挂载
mkdir -p /data/nfsdata
echo "192.168.10.35:/data/nfsdata /data/nfsdata     nfs    defaults  0 1">> /etc/fstab
mount -a
​
# 查看
showmount -e cluster-endpoint
showmount -e 192.168.10.35
​

安装nfs provisioner

### helm安装nfs provisioner,目录下有安装包nfs-subdir-external-provisioner-4.0.17.tgz
# 官网:https://kubernetes-sigs.github.io/nfs-subdir-external-provisioner/
helm install baiinfo-bigdata ./nfs-subdir-external-provisioner-4.0.17.tgz \
  --namespace=nfs-baiinfo \
  --create-namespace \
  --set image.repository=willdockerhub/nfs-subdir-external-provisioner \
  --set image.tag=v4.0.2 \
  --set replicaCount=2 \
  --set storageClass.name=nfs-client \
  --set storageClass.defaultClass=true \
  --set nfs.server=192.168.10.60 \
  --set nfs.path=/data/nfsdata
# 启动所需的镜像被墙, 从当前目录下找到nfs-subdir-external-provisioner-4.0.17.tgz直接导入
ctr -n k8s.io image import /data/Kubernetes/nfs-subdir-external-provisioner.tar
crictl images #看到镜像已成功导入
​
# 排错: Error: INSTALLATION FAILED: cannot re-use a name that is still in use
# 看到类似错误直接用下列名字查询已存在的名称删掉,重新生成共享即可
helm ls --all-namespaces
helm delete baiinfo-test
kubectl get pods,deploy,sc -n nfs-provisioner
kubectl delete storageclass.storage.k8s.io/nfs-client

10.安装Dashboard

下载: https://github.com/kubernetes/dashboard

# 拉取yaml文件启动容器
kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.7.0/aio/deploy/recommended.yaml
wget https://raw.githubusercontent.com/kubernetes/dashboard/v2.7.0/aio/deploy/recommended.yaml
# 修改曝露远程访问端口
vim recommended.yaml
# 找到kind: Service段落,添加2行NodePort参数, 修改如下:
########################################
#kind: Service
#apiVersion: v1
#metadata:
#  labels:
#    k8s-app: kubernetes-dashboard
#  name: kubernetes-dashboard
#  namespace: kubernetes-dashboard
#spec:
#  type: NodePort               <--
#  ports:
#    - port: 443
#      targetPort: 8443
#      nodePort: 31443          <--
#  selector:
#    k8s-app: kubernetes-dashboard
########################################
# 重新部署
kubectl delete -f recommended.yaml
kubectl apply -f recommended.yaml

# 安装 Metrics server收集CPU等数据
wget https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/high-availability.yaml
grep -rn image high-availability.yaml
sed -i "s#registry.k8s.io/metrics-server#registry.cn-hangzhou.aliyuncs.com/chenby#g" high-availability.yaml
grep -rn image high-availability.yaml
vim high-availability.yaml
# 找到args字段添加参数"- --kubelet-insecure-tls", 如下示例
# ##################################################################################
#       args:                                                                      #
#       - --cert-dir=/tmp                                                          #
#       - --secure-port=4443                                                       #
#       - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname         #
#       - --kubelet-use-node-status-port                                           #
#       - --metric-resolution=15s                                                  #
#       - --kubelet-insecure-tls                                                   #
#       image: registry.cn-hangzhou.aliyuncs.com/chenby/metrics-server:v0.6.1      #
####################################################################################
# 找到PodDisruptionBudget, 修改引用的api版本,原文件是policy/v1beta,去掉beta,如下示例      #
#---                                                                               #
#apiVersion: policy/v1                                                             #
#kind: PodDisruptionBudget                                                         #
####################################################################################
kubectl apply -f high-availability.yaml

# 查看一下启动状态
kubectl  get pod -n kube-system -owide| grep metrics
kubectl get pods,svc -n kubernetes-dashboard

创建登陆用户

# 创建yaml配置文件
cat >ServiceAccount.yaml<<EOF
apiVersion: v1
kind: ServiceAccount
metadata:
  name: admin-user
  namespace: kubernetes-dashboard
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: admin-user
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: cluster-admin
subjects:
- kind: ServiceAccount
  name: admin-user
  namespace: kubernetes-dashboard
EOF
# 应用yaml配置文件
kubectl apply -f ServiceAccount.yaml

创建并获取登录token

kubectl -n kubernetes-dashboard create token admin-user
​
# 获取到的token
eyJhbGciOiJSUzI1NiIsImtpZCI6IjZWSTh6eUFYQ2hvRmNhZ09oam5iZGNBYXNUWTNkb09qTExvaU1Ga0lacWcifQ.eyJhdWQiOlsiaHR0cHM6Ly9rdWJlcm5ldGVzLmRlZmF1bHQuc3ZjLmNsdXN0ZXIubG9jYWwiXSwiZXhwIjoxNjc4NDI0OTYxLCJpYXQiOjE2Nzg0MjEzNjEsImlzcyI6Imh0dHBzOi8va3ViZXJuZXRlcy5kZWZhdWx0LnN2Yy5jbHVzdGVyLmxvY2FsIiwia3ViZXJuZXRlcy5pbyI6eyJuYW1lc3BhY2UiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsInNlcnZpY2VhY2NvdW50Ijp7Im5hbWUiOiJhZG1pbi11c2VyIiwidWlkIjoiMzAyMjFhNDUtMzBlZC00OTIwLWIzNDEtOGFhMDdmYTExZWMwIn19LCJuYmYiOjE2Nzg0MjEzNjEsInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDprdWJlcm5ldGVzLWRhc2hib2FyZDphZG1pbi11c2VyIn0.bB9sBE5idswqLXZgSAIqYkdLJtPyTcAaCX7d_L3uidsRUgiKhOSmEEWCqG-rhevW69qb9QPTS_8_cnxg4RoUV8Th-guEgRer9KRf6djBhgDJ89uNTfEKbyorg2DwgTSqZQ6X5MANHbLuI6yu462bozawAk2ugQHx8PsUdQtkrXrPGfp2IvPyMA2N5MXkf80qlKMoAreTWTZgDCVLh-06Kr_h91nn4MqjhIFcqAzuxsf88d1k_KsN-uLL1qnwRbPgSl2HsekIDoGGE_bJHS5OUvMiOzUWMjtKG21__sxUFCmonspMq3cfoG-NmS8DpzPwbNTXn3qP2SIkIZQZFHTWyQ

登陆web端, 地址: https://公网IP:31443/

 

11.裸金属的选择: MetalLB v0.13.9

此时创建LoadBalancer资源时EXTERNAL-IP会一直是pending状态, 需要安装此插件解决.

# 修改kubernetes集群配置
kubectl edit configmap -n kube-system kube-proxy
######################################################
#找到如下位置修改mode和strictARP的值:                    #
#apiVersion: kubeproxy.config.k8s.io/v1alpha1        #
#kind: KubeProxyConfiguration                        #
#mode: "ipvs"                                        #
#ipvs:                                               #
#  strictARP: true                                   #
######################################################
​
# 在线安装MetalLB
kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.13.9/config/manifests/metallb-native.yaml
kubectl get pod,svc -n metallb-system -o wide #查看一下安装状态
​
# 配置自动分配的IP池
cat >ip-cheap.yaml<<DD
apiVersion: metallb.io/v1beta1
kind: IPAddressPool
metadata:
  name: ip-cheap
  namespace: metallb-system
spec:
  addresses:
  - 192.168.10.36-192.168.10.37 #此处修改为你的集群节点(nodes)IP地址范围
DD
​
# (已配置动态,可略)配置静态指定的IP地址
cat >ip-cheap.yaml<<DD
apiVersion: metallb.io/v1beta1
kind: IPAddressPool
metadata:
  name: ip-cheap
  namespace: metallb-system
spec:
  addresses:
# (已配置动态,可略)配置静态指定的IP地址
cat >ip-cheap.yaml<<DD
apiVersion: metallb.io/v1beta1
kind: IPAddressPool
metadata:
  name: ip-cheap
  namespace: metallb-system
spec:
  addresses:
  - 192.168.10.50/30 #此处为你的集群暴露IP地址(指定某IP)
  - 192.168.10.100-110 #也可以分配物理服务器同网段的虚拟IP(空余,不能与物理机IP地址冲突,否则节点故障)
  autoAssign: false #静态策略
DD
​
# 配置一个ARP广播声明,通告来自IPAddressPool的IP,L2Advertisement实例必须关联到IPAddressPool
cat >advertise.yaml<<DD
apiVersion: metallb.io/v1beta1
kind: L2Advertisement
metadata:
  name: l2adver
  namespace: metallb-system
spec:
  ipAddressPools: # 如果不配置则会通告所有的IP池地址
  - ip-cheap
DD
​
# 应用配置生效
kubectl apply -f ip-cheap.yaml 
kubectl apply -f advertise.yaml 
kubectl get svc  #此时再查看loadbalancer的资源时,EXTERNAL-IP已经分配好IP地址了

 

posted @ 2023-03-16 14:33  天海沙  阅读(358)  评论(0)    收藏  举报