架构第四次作业-20230730

一、总结kube-apiserver、kube-controler-manager、kube-scheduler、kube-proxy、kubelet等各组件的功能

kube-apiserver:为k8s各类资源对象的增删改查及watch等提供 HTTP Rest 接口,这些对象包括 pods、services、replicationcontrollers等,API Server 为 REST 操作提供服务,并为集群的共享状态提供前端,所有其他组件都通过该前端进行交互

kube-controler-manager:是集群内部的管理控制中心,负责集群内的 Node、Pod 副本、服务端点(Endpoint)、命名空间(Namespace)、服务账号(ServiceAccount)、资源定额(ResourceQuota)的管理,当某个 Node 以外宕机时,Controller Manager 会及时发现并执行自动化修复流程,确保集群中的 pod 副本始终处于预期的工作状态

kube-scheduler:是一个控制面(管理)进程,负责 Pods 的调度

kube-proxy:运行在每个节点上,监听 API Server 中服务对象的变化,再通过管理 IPtables 或者 IPVS 规则来实现网络的转发实现 Kubernetes 服务访问

kubelet:运行在每一个 worker 节点的代理组件,它会监视已分配给节点的pod,完整某些功能:1、向 master 汇报 node 节点的状态信息 2、接受指令并在 pod 中创建 docker 容器 3、准备 pod 所需要的数据卷 4、返回 pod 的运行状态 5、在 node 节点执行容器健康性检查

二、基于Kubeadm在私有云部署高可用kubernetes环境

2.1 实验环境

image

# 操作系统版本
root@master1-120:~# lsb_release -a
No LSB modules are available.
Distributor ID:	Ubuntu
Description:	Ubuntu 22.04.2 LTS
Release:	22.04
Codename:	jammy

# 各节点IP分配
master1-120:  192.168.119.120
master2-121:  192.168.119.121

VIP:    192.168.119.200
HA1-keepalived-122:  192.168.119.122
HA2-keepalived-123:  192.168.119.123

node1-124:  192.168.119.124
node2-125:  192.168.119.125

# master 节点和 node 节点上已经预先安装 containerd 环境
root@master1-120:~# containerd -v
containerd github.com/containerd/containerd v1.7.2 0cae528dd6cb557f7201036e9f43420650207b58
root@master1-120:~# runc -v
runc version 1.1.8
commit: v1.1.8-0-g82f18fe0
spec: 1.0.2-dev
go: go1.20.3
libseccomp: 2.5.4
root@master1-120:~# nerdctl -v
nerdctl version 1.4.0

2.2 实验过程

2.2.1 配置负载均衡器

# 主节点配置
root@HA1-keepalived-122:~# apt -y install keepalived
root@HA1-keepalived-122:/etc/keepalived# cat keepalived.conf 
! Configuration File for keepalived

global_defs {
   notification_email {
     acassen
   }
   notification_email_from Alexandre.Cassen@firewall.loc
   smtp_server 192.168.200.1
   smtp_connect_timeout 30
   router_id LVS_DEVEL
}

vrrp_instance VI_1 {
    state MASTER
    interface eth0
    garp_master_delay 10
    smtp_alert
    virtual_router_id 200        # 注意router-id不可以冲突
    priority 100
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 1111
    }
    virtual_ipaddress {
        192.168.119.200 label eth0:1        # 定义 VIP
    }
}
root@HA1-keepalived-122:/etc/keepalived# systemctl restart keepalived.service
root@HA2-keepalived-122:~# systemctl enable keepalived
root@HA1-keepalived-122:/etc/keepalived# hostname -I
192.168.119.122 192.168.119.200 

# 从节点配置
root@HA2-keepalived-123:~# apt -y install keepalived
# 使用主节点的配置,修改priority和state
root@HA2-keepalived-123:~# cat /etc/keepalived/keepalived.conf 
! Configuration File for keepalived

global_defs {
   notification_email {
     acassen
   }
   notification_email_from Alexandre.Cassen@firewall.loc
   smtp_server 192.168.200.1
   smtp_connect_timeout 30
   router_id LVS_DEVEL
}

vrrp_instance VI_1 {
    state BACKUP
    interface eth0
    garp_master_delay 10
    smtp_alert
    virtual_router_id 200
    priority 80
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 1111
    }
    virtual_ipaddress {
        192.168.119.200 label eth0:1
    }
}
root@HA2-keepalived-123:~# systemctl enable keepalived
root@HA2-keepalived-123:~# systemctl restart keepalived.service

# 默认keepalived是抢占模式,当主节点挂掉,从节点获取到VIP后,在主节点重新回复后,VIP会重新飘到主节点上

2.2.2 安装 HAproxy 做反向代理

# 在HA1节点上安装 HAproxy,HA2节点同理
root@HA1-keepalived-122:~# apt -y install haproxy
# 添加一个内核参数,使得即使我的主机没有该IP地址,但也监听,HA2节点同理
root@HA1-keepalived-122:~# vim /etc/sysctl.conf
net.ipv4.ip_nonlocal_bind = 1
root@HA1-keepalived-122:~# sysctl -p
# 更改配置,在配置文件的最后添加监听的地址和需要调度的后端真实服务器的IP地址和端口号,HA2节点同理
root@HA1-keepalived-122:~# vim /etc/haproxy/haproxy.cfg
...
listen k8s-api-6443
  bind 192.168.119.200:6443
  mode tcp
  server server1 192.168.119.120:6443 check inter 3s fall 3 rise 3
  server server2 192.168.119.121:6443 check inter 3s fall 3 rise 3
root@HA1-keepalived-122:~# systemctl restart haproxy.service 
root@HA1-keepalived-122:~# systemctl enable haproxy
root@HA1-keepalived-122:~# ss -ntl | grep 6443
LISTEN 0      4096   192.168.119.200:6443       0.0.0.0:* 

2.2.3 安装kubeadm、kubectl、kubelet

# 在master和node节点上安装
apt-get update && apt-get install -y apt-transport-https
curl https://mirrors.aliyun.com/kubernetes/apt/doc/apt-key.gpg | apt-key add - 
cat <<EOF >/etc/apt/sources.list.d/kubernetes.list
deb https://mirrors.aliyun.com/kubernetes/apt/ kubernetes-xenial main
EOF
apt-get update
# node 节点上可不安装kubectl
apt-get install -y kubelet kubeadm kubectl

-- 注意需要关闭交换分区的使用,否则后续初始化集群会失败

2.2.4 内核参数优化

# 在master和node节点上操作
root@master1-120:~# cat /etc/sysctl.conf
net.ipv4.ip_forward=1
vm.max_map_count=262144
kernel.pid_max=4194303
fs.file-max=1000000
net.ipv4.tcp_max_tw_buckets=6000
net.netfilter.nf_conntrack_max=2097152
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
vm.swappiness=0
root@master1-120:~# sysctl -p
root@master1-120:~# vim /etc/modules-load.d/modules.conf
ip_vs
ip_vs_lc
ip_vs_lblc 
ip_vs_lblcr 
ip_vs_rr 
ip_vs_wrr
ip_vs_sh 
ip_vs_dh 
ip_vs_fo 
ip_vs_nq
ip_vs_sed 
ip_vs_ftp 
ip_vs_sh 
ip_tables 
ip_set
ipt_set 
ipt_rpfilter 
ipt_REJECT 
ipip
xt_set
br_netfilter 
nf_conntrack 
overlay

root@master1-120:~# reboot

# 若在 sysctl -p 时遇到以下错误
sysctl: cannot stat /proc/sys/net/netfilter/nf_conntrack_max: No such file or directory
sysctl: cannot stat /proc/sys/net/bridge/bridge-nf-call-ip6tables: No such file or directory
sysctl: cannot stat /proc/sys/net/bridge/bridge-nf-call-iptables: No such file or directory
# 使用此方法解决
modprobe ip_conntrack
modprobe  br_netfilter

# 检查重启后的内核模块与参数
root@master1-120:~# lsmod | grep br_netfilter
br_netfilter           32768  0
bridge                307200  1 br_netfilter
root@master1-120:~# sysctl -a | grep bridge-nf-call-iptables
net.bridge.bridge-nf-call-iptables = 1

2.2.5 下载 kubernetes镜像并进行 kubernetes 集群初始化

------ 下载镜像 ------
# 在所有 master 节点上操作
root@master1-120:~# vim  images-down.sh
#!/bin/bash
nerdctl pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver:v1.27.4
nerdctl pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager:v1.27.4                                       
nerdctl pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler:v1.27.4
nerdctl pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy:v1.27.4
nerdctl pull registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.9
nerdctl pull registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:3.5.7-0
nerdctl pull registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:v1.10.1
root@master1-120:~# bash images-down.sh

# 在所有node节点上安装
root@node2-125:~# cat images_node_install.sh 
#!/bin/bash
nerdctl pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy:v1.27.4
nerdctl pull registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.9
nerdctl pull registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:3.5.7-0
nerdctl pull registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:v1.10.1
root@node2-125:~# bash images_node_install.sh

------ 基于init文件初始化 ------
# 获取默认的 init 初始化文件
root@master1-120:~# kubeadm config print init-defaults > kubeadm-init.yaml

# 修改初始化文件
root@master1-120:~# cat kubeadm-init.yaml 
apiVersion: kubeadm.k8s.io/v1beta3
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
  token: abcdef.0123456789abcdef
  ttl: 24h0m0s
  usages:
  - signing
  - authentication
kind: InitConfiguration
localAPIEndpoint:
  advertiseAddress: 192.168.119.120        #当前主机监听IP
  bindPort: 6443
nodeRegistration:
  criSocket: unix:///var/run/containerd/containerd.sock      # cri socket文件
  imagePullPolicy: IfNotPresent
  name: master1-120               # 当前主机名称,不可重复
  taints: null
---
apiServer:
  timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta3
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controlPlaneEndpoint: 192.168.119.200:6443       # 负载均衡
controllerManager: {}
dns: {}
etcd:
  local:
    dataDir: /var/lib/etcd
imageRepository: registry.cn-hangzhou.aliyuncs.com/google_containers #镜像仓库
kind: ClusterConfiguration
kubernetesVersion: 1.27.4
networking:
  dnsDomain: cluster.local
  podSubnet: 10.200.0.0/16          # pod子网
  serviceSubnet: 10.100.0.0/16      # service 子网
scheduler: {}
---               # 指定kubelet使用systemd
kind: KubeletConfiguration
apiVersion: kubelet.config.k8s.io/v1beta1
cgroupDriver: systemd
---               # 指定kubeproxy使用ipvs
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: ipvs

# 进行初始化
root@master1-120:~# kubeadm init --config kubeadm-init.yaml
···
Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

Alternatively, if you are the root user, you can run:

  export KUBECONFIG=/etc/kubernetes/admin.conf

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

You can now join any number of control-plane nodes by copying certificate authorities
and service account keys on each node and then running the following as root:

  kubeadm join 192.168.119.200:6443 --token abcdef.0123456789abcdef \
	--discovery-token-ca-cert-hash sha256:b44a4d86fb5a82d5c9128bcfda69dee93daed45e0e4462fa87238d6aaa72969f \
	--control-plane 

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 192.168.119.200:6443 --token abcdef.0123456789abcdef \
	--discovery-token-ca-cert-hash sha256:b44a4d86fb5a82d5c9128bcfda69dee93daed45e0e4462fa87238d6aaa72969f 

# 初始化成功后
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

root@master1-120:~# kubectl get node
NAME          STATUS     ROLES           AGE     VERSION
master1-120   NotReady   control-plane   6m56s   v1.27.4

三、部署网络组件calico或flannel(上课有提供yaml文件)

参考文档:https://docs.tigera.io/calico/latest/getting-started/kubernetes/self-managed-onprem/onpremises%23install-calico-with-kubernetes-api-datastore-50-nodes-or-less

# 部署calico
root@master1-120:~# kubectl apply -f calico3.26.1-ipip_ubuntu2204-k8s-1.27.x.yaml
root@master1-120:~# kubectl get pod -A
NAMESPACE     NAME                                      READY   STATUS    RESTARTS   AGE
kube-system   calico-kube-controllers-6655b6c4b-2kcq9   1/1     Running   0          2m39s
kube-system   calico-node-rsmct                         1/1     Running   0          2m40s
kube-system   coredns-65dcc469f7-5tgr5                  1/1     Running   0          15m
kube-system   coredns-65dcc469f7-jmz4q                  1/1     Running   0          15m
kube-system   etcd-master1-120                          1/1     Running   0          15m
kube-system   kube-apiserver-master1-120                1/1     Running   0          15m
kube-system   kube-controller-manager-master1-120       1/1     Running   0          15m
kube-system   kube-proxy-sk4rd                          1/1     Running   0          15m
kube-system   kube-scheduler-master1-120                1/1     Running   0          15m

------  添加worker节点,在所有node节点上操作  ------
root@node1-124:~# kubeadm join 192.168.119.200:6443 --token abcdef.0123456789abcdef  --discovery-token-ca-cert-hash sha256:b44a4d86fb5a82d5c9128bcfda69dee93daed45e0e4462fa87238d6aaa72969f

# 查看node节点的状态是否为Ready,表示网络连接成功
root@master1-120:~# kubectl get nodes
NAME          STATUS   ROLES           AGE    VERSION
master1-120   Ready    control-plane   20m    v1.27.4
node1-124     Ready    <none>          101s   v1.27.4
node2-125     Ready    <none>          98s    v1.27.4

------  添加 master 节点,在另一个master节点上操作  ------
# 在master1节点生成新的证书用于添加新的控制节点
root@master1-120:~# kubeadm  init phase upload-certs --upload-certs
W0804 14:30:24.062364   26910 version.go:104] could not fetch a Kubernetes version from the internet: unable to get URL "https://dl.k8s.io/release/stable-1.txt": Get "https://dl.k8s.io/release/stable-1.txt": dial tcp [2600:1901:0:26f3::]:443: connect: network is unreachable
W0804 14:30:24.062466   26910 version.go:105] falling back to the local client version: v1.27.4
[upload-certs] Storing the certificates in Secret "kubeadm-certs" in the "kube-system" Namespace
[upload-certs] Using certificate key:
54a6374ae4b3fa378710c75417127e7d02f88fa423a6c72d6e280e8a9eb52176 # 获取的key
# 在 mster2 节点上操作
root@master2-121:~# kubeadm join 192.168.119.200:6443 --token abcdef.0123456789abcdef   --discovery-token-ca-cert-hash sha256:b44a4d86fb5a82d5c9128bcfda69dee93daed45e0e4462fa87238d6aaa72969f --control-plane --certificate-key 54a6374ae4b3fa378710c75417127e7d02f88fa423a6c72d6e280e8a9eb52176
root@master1-120:~# kubectl get node
NAME          STATUS   ROLES           AGE     VERSION
master1-120   Ready    control-plane   39m     v1.27.4
master2-121   Ready    control-plane   2m10s   v1.27.4
node1-124     Ready    <none>          21m     v1.27.4
node2-125     Ready    <none>          21m     v1.27.4

** token 如果过期需要重新获取 **
# 查看当前的token
root@master1-120:~# kubeadm token list
TOKEN                     TTL         EXPIRES                USAGES                   DESCRIPTION                                                EXTRA GROUPS
abcdef.0123456789abcdef   23h         2023-08-05T13:54:53Z   authentication,signing   <none>                                                     system:bootstrappers:kubeadm:default-node-token
koxmfc.pc4ir5wv24jz8zs1   1h          2023-08-04T16:30:24Z   <none>                   Proxy for managing TTL for the kubeadm-certs secret        <none>
# 重新获取
kubeadm token create

四、总结nginx的yaml文件的编写

yaml 文件的格式注意

- 大小写敏感
- 使用缩进表示层级关系
- 缩进时不允许使用 Tab 键,只允许使用空格
- 缩进的空格数量不重要,只需要相同层级的元素左侧对齐即可
- 使用 "#" 表示注释,从这个字符一直到行尾都会被解析为注释而忽略
- 相比于 json 更适用于配置文件

nginx 的 yaml 文件的详解

root@master1-120:~# cat nginx.yaml
kind: Deployment  # 类型,deployment 控制器,使用 kubectl explain Deployment 进行查看
#apiVersion: extensions/v1beta1
apiVersion: apps/v1 # API 版本信息,kubectl explain Deployment.apiVersion
metadata: # pod 的元数据信息,kubectl explain Deployment.metadata
  labels: # pod 标签,kubectl explain Deployment.metadata.labels
    app: myserver-nginx-deployment-label # 标签的名称为 app
  name: myserver-nginx-deployment # pod 的名称
  namespace: myserver # pod 的Namespace,默认是default
spec: # 定义deployment中容器的详细信息,kubectl explain Deployment.spec
  replicas: 1 # 创建的pod的副本数,即多少个pod
  selector: # 定义标签选择器
    matchLabels: # 定义匹配的标签,必须设置
      app: myserver-nginx-selector # 匹配的目标标签
  template: # 定义模板,描述创建的pod
    metadata: # 定义模板元数据
      labels: # 模板的标签,kubectl explain Deployment.spec.template.metadata.labels
        app: myserver-nginx-selector # 定义标签,kubectl explain Deployment.spec.selector.matchLabels
    spec: # 定义pod信息
      containers: # 定义pod中容器列表,可以多个至少一个,不可动态增减容器
      - name: myserver-nginx-container # 容器名称
        image: registry.cn-hangzhou.aliyuncs.com/zhangshijie/nginx:1.22.0-alpine # 镜像地址
        #command: ["/apps/tomcat/bin/run_tomcat.sh"] # 容器启动执行的命令或脚本
        #imagePullPolicy: IfNotPresent
        imagePullPolicy: Always # 拉取镜像策略
        ports: # 定义容器端口列表
        - containerPort: 80 # 定义端口
          protocol: TCP # 端口协议
          name: http # 端口名称
        - containerPort: 443
          protocol: TCP
          name: https
        env: # 配置环境标量
        - name: "password"
          value: "123456"
        - name: "age"
          value: "18"
#        resources: # 对资源请求设置和限制设置
#          limits:
#            cpu: 2
#            memory: 2Gi
#          requests:
#            cpu: 500m
#            memory: 1Gi


---
kind: Service # 定义类型为Service
apiVersion: v1 # Service API 版本
metadata: # 定义 Service 元数据
  labels: # 定义标签
    app: myserver-nginx-service-label # 标签的内容
  name: myserver-nginx-service # Service的名称,会被DNS解析
  namespace: myserver # Service属于哪一个命名空间
spec: # 定义 Service 详细信息
  type: NodePort # Service 的类型
  ports: # 定义访问端口
  - name: http
    port: 80
    protocol: TCP
    targetPort: 80
    nodePort: 30004
  - name: https
    port: 443
    protocol: TCP
    targetPort: 443
    nodePort: 30443
  selector: # Service的标签选择器,定义访问的目标pod
    app: myserver-nginx-selector # 将流量转发到选择的pod上,需等于kubectl explain Deployment.spec.selector.matchLabels
lable 标签,对k8s中资源做过滤,匹配,但不起实际功能作用
service 把流量转发给pod,通常使用标签进行匹配
通过namespace隔离不同项目的容器

镜像拉取策略
 - none: 不管本地有没有镜像都不拉取
 - Always: 不管本地有没有镜像都从镜像仓库拉取
 - IfNotPresent: 本地有就拉去本地镜像,没有也不从镜像仓库拉取
 
CPU 的限制按照时间的方式进行切分,1000m 表示 1 核
建议资源请求的设置和资源的限制设置相同

port:  k8s 中通过 service 访问的端口
targetport:  pod 节点中服务所运行的实际端口
nodeport:  宿主机所监听的端口
转发流程: 客户 -> nodeport -> port -> targetport

五、部署web服务并进行服务的访问

root@master1-120:~# mkdir /data/nginx-tomcat-case -p
root@master1-120:~# cd /data/nginx-tomcat-case/
root@master1-120:/data/nginx-tomcat-case# ls
myserver-namespace.yaml  nginx.yaml  tomcat.yaml
# 创建一个myserver的命名空间
root@master1-120:/data/nginx-tomcat-case# cat myserver-namespace.yaml 
apiVersion: v1
kind: Namespace
metadata:
   name: myserver
   labels:
     name: myserver
# 创建服务
root@master1-120:/data/nginx-tomcat-case# kubectl apply -f myserver-namespace.yaml -f nginx.yaml -f tomcat.yaml

# 查看容器
root@master1-120:/data/nginx-tomcat-case# kubectl get pod -n myserver
NAME                                               READY   STATUS         RESTARTS   AGE
myserver-nginx-deployment-548f767db4-x24fl         1/1     Running        0          4m25s
myserver-tomcat-app1-deployment-78974d9679-5njf2   1/1     Running        0          4m25s

# 使用 -o wide 选项显示更加详细的信息
root@master1-120:~# kubectl get pod -n myserver -o wide
NAME                                               READY   STATUS             RESTARTS   AGE     IP             NODE        NOMINATED NODE   READINESS GATES
myserver-nginx-deployment-548f767db4-x24fl         1/1     Running            0          13m     10.200.169.1   node2-125   <none>           <none>
myserver-tomcat-app1-deployment-78974d9679-5njf2   1/1     Running            0          13m     10.200.56.1    node1-124   <none>           <none>

# 查看在k8s中的service端口信息 svc
root@master1-120:~# kubectl get svc -n myserver -o wide
NAME                           TYPE       CLUSTER-IP      EXTERNAL-IP   PORT(S)                      AGE   SELECTOR
myserver-nginx-service         NodePort   10.100.5.185    <none>        80:30004/TCP,443:30443/TCP   16m   app=myserver-nginx-selector
myserver-tomcat-app1-service   NodePort   10.100.14.214   <none>        80:30005/TCP                 16m   app=myserver-tomcat-app1-selector

# 查看pod的端口信息
root@master1-120:~# kubectl get ep -n myserver -o wide
NAME                           ENDPOINTS                          AGE
myserver-nginx-service         10.200.169.1:443,10.200.169.1:80   18m
myserver-tomcat-app1-service   10.200.56.1:8080                   18m

image
image

六、配置负载均衡器转发到后端服务

# 编辑 HA 节点,两个节点做相同操作
root@HA1-keepalived-122:~# vim /etc/haproxy/haproxy.cfg

# 在配置文件中添加监听地址和后端转发地址和端口
listen k8s-pod-server
  bind 192.168.119.200:30004
  mode tcp
  server server1 192.168.119.124:30004 check inter 3s fall 3 rise 3
  server server2 192.168.119.125:30004 check inter 3s fall 3 rise 3
root@HA1-keepalived-122:~# systemctl restart haproxy.service

# 测试结果如下 

image

posted @ 2023-08-06 22:20  wuhaolam  阅读(43)  评论(0编辑  收藏  举报