k8s、jumpserver

一、安装jumpserver

官网:https://docs.jumpserver.org/zh/master/install/setup_by_fast/

安装docker

#! /bin/bash
apt update

apt install \
    ca-certificates \
    curl \
    gnupg \
    lsb-release

mkdir -p /etc/apt/keyrings
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | gpg --dearmor -o /etc/apt/keyrings/docker.gpg

echo \
  "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://mirrors.tuna.tsinghua.edu.cn/docker-ce/linux/ubuntu \
  $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null

apt update
apt -y install docker-ce docker-ce-cli containerd.io docker-compose-plugin

sed -i '/GRUB_CMDLINE_LINUX="net.ifnames=0 biosdevname=0"/c GRUB_CMDLINE_LINUX="net.ifnames=0 biosdevname=0 cgroup_enable=memory swapaccount=1"' /etc/default/grub
update-grub
reboot

查看docker

[root@jumpserver ~]#docker --version
Docker version 20.10.17, build 100c701

下载镜像

docker pull mysql:5.6
docker pull redis:6.0
docker pull jumpserver/jms_all:1.5.9

查看镜像

[root@jumpserver ~]#docker images
REPOSITORY   TAG       IMAGE ID       CREATED        SIZE
redis                6.0       6487e6a1b120   2 weeks ago    112MB
mysql                5.6       dd3b2a5dcb48   8 months ago   303MB
jumpserver/jms_all   1.5.9     6573e4b624c9   2 years ago    1.48GB

部署mysql

  1. 准备配置文件

    创建目录

    mkdir -p /data/mysql
    mkdir -p /data/etc/mysql/conf.d
    mkdir -p /data/etc/mysql/mysql.conf.d
    

    mysql.cnf配置文件

    [root@jumpserver ~]#cat /data/etc/mysql/conf.d/mysql.cnf
    [mysql]
    default-character-set=utf8
    

    mysqld.cnf配置文件

    [root@jumpserver ~]#cat /data/etc/mysql/mysql.conf.d/mysqld.cnf
    [mysqld]
    pid-file	= /var/run/mysqld/mysqld.pid
    socket		= /var/run/mysqld/mysqld.sock
    datadir		= /var/lib/mysql
    log-error	= /var/log/mysql/error.log
    symbolic-links=0
    character-set-server=utf8
    

    目录结构如下

    [root@jumpserver ~]#tree /data
    /data
    ├── etc
    │   └── mysql
    │       ├── conf.d
    │       │   └── mysql.cnf
    │       └── mysql.conf.d
    │           └── mysqld.cnf
    └── mysql
    
    5 directories, 2 files
    
  2. 创建mysql容器

    docker run -it -d -p 3306:3306 -v /data/etc/mysql/conf.d/mysql.cnf:/etc/mysql/conf.d/mysql.cnf \
    -v /data/etc/mysql/mysql.conf.d/mysqld.cnf:/etc/mysql/mysql.conf.d/mysqld.cnf \
    -v /data/mysql:/var/lib/mysql -e MYSQL_ROOT_PASSWORD='123456' mysql:5.6
    
  3. 验证数据库连接

    # 宿主机连接,先安装apt install mysql-client
    [root@jumpserver mysql]#mysql -uroot -p123456 -h10.0.0.12
    mysql: [Warning] Using a password on the command line interface can be insecure.
    Welcome to the MySQL monitor.  Commands end with ; or \g.
    Your MySQL connection id is 2
    Server version: 5.6.51 MySQL Community Server (GPL)
    
    Copyright (c) 2000, 2022, Oracle and/or its affiliates.
    
    Oracle is a registered trademark of Oracle Corporation and/or its
    affiliates. Other names may be trademarks of their respective
    owners.
    
    Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.
    
    mysql> show databases;
    +--------------------+
    | Database           |
    +--------------------+
    | information_schema |
    | mysql              |
    | performance_schema |
    +--------------------+
    3 rows in set (0.01 sec)
    
  4. 验证数据库编码(utf8)

    mysql> show variables like "%character%";
    +--------------------------+----------------------------+
    | Variable_name            | Value                      |
    +--------------------------+----------------------------+
    | character_set_client     | utf8                       |
    | character_set_connection | utf8                       |
    | character_set_database   | utf8                       |
    | character_set_filesystem | binary                     |
    | character_set_results    | utf8                       |
    | character_set_server     | utf8                       |
    | character_set_system     | utf8                       |
    | character_sets_dir       | /usr/share/mysql/charsets/ |
    +--------------------------+----------------------------+
    8 rows in set (0.01 sec)
    
    mysql> show variables like "%collation%";
    +----------------------+-----------------+
    | Variable_name        | Value           |
    +----------------------+-----------------+
    | collation_connection | utf8_general_ci |
    | collation_database   | utf8_general_ci |
    | collation_server     | utf8_general_ci |
    +----------------------+-----------------+
    3 rows in set (0.00 sec)
    
  5. 创建jumpserver数据库,并授权jumpserver用户

    注意:授权密码不能为纯数字

    [root@jumpserver data]#mysql -uroot -p123456 -h10.0.0.12
    
    mysql> create database jumpserver default charset 'utf8';
    Query OK, 1 row affected (0.00 sec)
    
    mysql> grant all on jumpserver.* to 'jumpserver'@'%' identified by 'jp123456';
    Query OK, 0 rows affected (0.00 sec)
    
  6. 验证jumpserver用户权限

    [root@jumpserver mysql.conf.d]#mysql -ujumpserver -pjp123456 -h10.0.0.12
    mysql: [Warning] Using a password on the command line interface can be insecure.
    Welcome to the MySQL monitor.  Commands end with ; or \g.
    Your MySQL connection id is 2
    Server version: 5.6.51 MySQL Community Server (GPL)
    
    Copyright (c) 2000, 2022, Oracle and/or its affiliates.
    
    Oracle is a registered trademark of Oracle Corporation and/or its
    affiliates. Other names may be trademarks of their respective
    owners.
    
    Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.
    
    mysql> show databases;
    +--------------------+
    | Database           |
    +--------------------+
    | information_schema |
    | jumpserver         |
    +--------------------+
    2 rows in set (0.00 sec)
    

部署redis

创建redis容器

[root@jumpserver mysql]#docker run -d -it -p 6379:6379 redis:6.0
331baf1779e94bc110f4d15fad2e0bce743ff17c051862216bc1d629ba47177c

验证redis访问

[root@jumpserver mysql]#redis-cli -h 10.0.0.12
10.0.0.12:6379> info
# Server
redis_version:6.0.16
redis_git_sha1:00000000
redis_git_dirty:0
redis_build_id:b9d515b0d5e963de
redis_mode:standalone
os:Linux 5.4.0-122-generic x86_64
arch_bits:64

部署jumpserver

生成随机加密密钥和初始化token

# 随机密钥
cat /dev/urandom |tr -dc A-Za-z0-9|head -c 50 >> /data/secret_key
j3GFsi5Qwcn5Jj4rugfwvdBc56B6QMkyBuncwRP72XviP7mxTU

# token
cat /dev/urandom |tr -dc A-Za-z0-9|head -c 16 >> bootstrap_token
kiBEmIVpWrFrlY3q

创建jumpserver容器

docker run -d --name jms_all \
-v /data/jumpserver:/opt/jumpserver/data/media \
-p 80:80 \
-p 2222:2222 \
-e SECRET_KEY=j3GFsi5Qwcn5Jj4rugfwvdBc56B6QMkyBuncwRP72XviP7mxTU \
-e BOOTSTRAP_TOKEN=kiBEmIVpWrFrlY3q \
-e DB_HOST=10.0.0.12 \
-e DB_PORT=3306 \
-e DB_USER='jumpserver' \
-e DB_PASSWORD='jp123456' \
-e DB_NAME=jumpserver \
-e REDIS_HOST=10.0.0.12 \
-e REDIS_PORT=6379 \
-e REDIS_PASSWORD= \
jumpserver/jms_all:1.5.9

启动后需等待一段时间

Starting guacd: guacd[91]: INFO:	Guacamole proxy daemon (guacd) version 1.0.0 started
SUCCESS
Tomcat started.
Jumpserver ALL 1.5.9
官网 http://www.jumpserver.org
文档 http://docs.jumpserver.org
有问题请参考 http://docs.jumpserver.org/zh/docs/faq.html

进入容器命令 docker exec -it jms_all /bin/bash

查看数据库内容

[root@jumpserver ~]#mysql -ujumpserver -pjp123456 -h10.0.0.12
mysql: [Warning] Using a password on the command line interface can be insecure.
Welcome to the MySQL monitor.  Commands end with ; or \g.
Your MySQL connection id is 24
Server version: 5.6.51 MySQL Community Server (GPL)

Copyright (c) 2000, 2022, Oracle and/or its affiliates.

Oracle is a registered trademark of Oracle Corporation and/or its
affiliates. Other names may be trademarks of their respective
owners.

Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.

mysql> show databases;
+--------------------+
| Database           |
+--------------------+
| information_schema |
| jumpserver         |
+--------------------+
2 rows in set (0.06 sec)

mysql> use jumpserver;
Reading table information for completion of table and column names
You can turn off this feature to get a quicker startup with -A

Database changed
mysql> show tables;
+----------------------------------------------+
| Tables_in_jumpserver                         |
+----------------------------------------------+
| applications_databaseapp                     |
| applications_remoteapp                       |
| assets_adminuser                             |
| assets_asset                                 |
| assets_asset_labels                          |
| assets_asset_nodes                           |
| assets_assetgroup                            |
| assets_authbook                              |
| assets_cluster                               |

登录web界面

默认用户名:admin

密码:admin

进入首页

二、配置用户tom,并添加一个相应的服务器资源

创建规则

  1. 创建用户

    用户管理--用户列表

    添加登录jumpserver用户信息

  2. 创建用户组

    用户管理--用户组

    添加组信息,将tom加入该组中

  3. 创建管理用户

    登录服务器的管理员账户

    添加管理员账户密码

  4. 创建系统用户

    该系统用户是用于登录服务器的普通账户

    资产管理--系统用户

    添加系统用户信息

    添加系统用户

    资产管理--系统用户,选择www用户--资料列表,点击推送

    执行推送情况

  5. 创建资产

    资产包括服务器、虚拟机等

    资产管理--资产列表,可新建web节点

    添加服务器信息,可加入web节点

    选中相应主机名,查看资产信息

    点击测试

    查看测试结果

  6. 创建授权规则

    权限管理--资产授权

    添加授权信息

    将用户组、资产节点、系统用户进行关联

登录测试

  1. jumpserver普通用户登录

    输入tom用户名、密码

  2. 查看资产

    我的资产--web节点--服务器

    从动作进入服务器终端

    或者点击Web终端

  3. 远程登录服务器

    执行命令正常

三、使用命令过滤器禁止tom用户在服务器上使用 reboot、init、rm -rf、poweroff 这几个命令

  1. 创建过滤器

    资产管理--命令过滤

新建过滤器名称

<img src="https://img2022.cnblogs.com/blog/2621461/202208/2621461-20220825210807192-729271614.png" width="50%" height="50%" />


点击过滤器名称

<img src="https://img2022.cnblogs.com/blog/2621461/202208/2621461-20220825210814463-1057751346.png" width="50%" height="50%" />


创建规则

<img src="https://img2022.cnblogs.com/blog/2621461/202208/2621461-20220825210843768-62335829.png" width="50%" height="50%" />
​

配置过滤器具体规则

<img src="https://img2022.cnblogs.com/blog/2621461/202208/2621461-20220825210835210-274524939.png" width="50%" height="50%" />

​
  1. 使用过滤器

    资产管理--系统用户

    选择系统用户,点击更新

    新添加创建好的命令过滤器

  2. 验证命令过滤器生效

    无法执行reboot、poweroff、rm -rf等命令,其余命令执行正常

四、部署k8s指定版本1.20高可用集群

架构

安装高可用反向代理

基于haproxy+keepalived实现

安装keepalived

节点1(10.0.0.51)

安装keepalived

apt install keepalived -y

keepalived配置文件

root@haproxy1 keepalived]#vim /etc/keepalived/keepalived.conf
! Configuration File for keepalived

global_defs {
   notification_email {
     acassen@firewall.loc
     failover@firewall.loc
     sysadmin@firewall.loc
   }
   notification_email_from Alexandre.Cassen@firewall.loc
   smtp_server 192.168.200.1
   smtp_connect_timeout 30
   router_id 10.0.0.51
   vrrp_skip_check_adv_addr
   vrrp_garp_interval 0
   vrrp_gna_interval 0
   vrrp_mcast_group4 224.0.0.18
}

# 脚本需放在调用之前,先定义好
vrrp_script chk_haproxy {
    script "/etc/keepalived/chk_haproxy.sh"
    interval 1
    timeout 2
    weight -30
    fall 3
    rise 5
}

vrrp_instance haproxy {
    state MASTER
    interface eth0
    virtual_router_id 51
    priority 100
    advert_int 1
    authentication {                                                                                                                                                                           
        auth_type PASS
        auth_pass 1111
    }
    virtual_ipaddress {
        10.0.0.53/24 dev eth0 label eth0:1
    }
  
    track_script {
      chk_haproxy
    }

}

chk_haproxy脚本

[root@haproxy1 keepalived#vim /etc/keepalived/chk_haproxy.sh
#!/bin/bash
/usr/bin/killall -0 haproxy

#添加执行权限
[root@haproxy1 keepalived# chmod a+x /etc/keepalived/chk_haproxy.sh

重启服务

systemctl restart keepalived

查看VIP


[root@haproxy1 keepalived]#ifconfig  -a
eth0: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500
        inet 10.0.0.51  netmask 255.255.255.0  broadcast 10.0.0.255
        inet6 fe80::20c:29ff:fef9:b4ed  prefixlen 64  scopeid 0x20<link>
        ether 00:0c:29:f9:b4:ed  txqueuelen 1000  (Ethernet)
        RX packets 20377  bytes 23085737 (23.0 MB)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 11683  bytes 1239148 (1.2 MB)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

eth0:1: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500
        inet 10.0.0.53  netmask 255.255.255.0  broadcast 0.0.0.0
        ether 00:0c:29:f9:b4:ed  txqueuelen 1000  (Ethernet)

lo: flags=73<UP,LOOPBACK,RUNNING>  mtu 65536
        inet 127.0.0.1  netmask 255.0.0.0
        inet6 ::1  prefixlen 128  scopeid 0x10<host>
        loop  txqueuelen 1000  (Local Loopback)
        RX packets 0  bytes 0 (0.0 B)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 0  bytes 0 (0.0 B)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

节点2(10.0.0.52)

安装keepalived

[root@haproxy2 ~]#apt install keepalived -y

keepalived配置文件

[root@haproxy2 ~]#vim /etc/keepalived/keepalived.conf

! Configuration File for keepalived

global_defs {
   notification_email {
     acassen@firewall.loc
     failover@firewall.loc
     sysadmin@firewall.loc
   }
   notification_email_from Alexandre.Cassen@firewall.loc
   smtp_server 192.168.200.1
   smtp_connect_timeout 30
   router_id 10.0.0.52			#与节点1不同,为本机IP
   vrrp_skip_check_adv_addr
   vrrp_garp_interval 0
   vrrp_gna_interval 0
   vrrp_mcast_group4 224.0.0.18
}

vrrp_instance haproxy {
    state BACKUP			#BACKUP
    interface eth0
    virtual_router_id 51
    priority 80                         #优先级80                                                                                                                                                        
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 1111
    }
    virtual_ipaddress {
        10.0.0.10/24 dev eth0 label eth0:1
    }
}

chk_haproxy脚本

[root@haproxy2 ~]#vim /etc/keepalived/chk_haproxy.sh
#!/bin/bash
/usr/bin/killall -0 haproxy

#添加执行权限
[root@haproxy2 ~]#chmod a+x /etc/keepalived/chk_haproxy.sh

重启服务

[root@haproxy2 ~]#systemctl restart keepalived

查看VIP

注意:需先停止节点1的keepalived服务

[root@haproxy2 ~]#ifconfig 
eth0: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500
        inet 10.0.0.52  netmask 255.255.255.0  broadcast 10.0.0.255
        inet6 fe80::20c:29ff:fe3d:976d  prefixlen 64  scopeid 0x20<link>
        ether 00:0c:29:3d:97:6d  txqueuelen 1000  (Ethernet)
        RX packets 3371  bytes 3424676 (3.4 MB)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 2136  bytes 253767 (253.7 KB)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

eth0:1: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500
        inet 10.0.0.53  netmask 255.255.255.0  broadcast 0.0.0.0
        ether 00:0c:29:3d:97:6d  txqueuelen 1000  (Ethernet)

lo: flags=73<UP,LOOPBACK,RUNNING>  mtu 65536
        inet 127.0.0.1  netmask 255.0.0.0
        inet6 ::1  prefixlen 128  scopeid 0x10<host>
        loop  txqueuelen 1000  (Local Loopback)
        RX packets 0  bytes 0 (0.0 B)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 0  bytes 0 (0.0 B)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

安装haproxy

节点1(10.0.0.51)

安装

[root@haproxy1 ~]#apt install haproxy -y

修改配置文件

[root@haproxy1 ~]#vim /etc/haproxy/haproxy.cfg
....
#末尾添加如下配置
listen stats
    mode http
    bind 0.0.0.0:9999
    stats enable
    log global
    stats uri   /haproxy-status
    stats auth  haadmin:123456

listen k8s-6443
    bind 10.0.0.53:6443
    mode tcp
    balance roundrobin
    server k8s-master1 10.0.0.54:6443 check inter 3s fall 3 rise 5
    server k8s-master2 10.0.0.55:6443 check inter 3s fall 3 rise 5
    server k8s-master3 10.0.0.56:6443 check inter 3s fall 3 rise 5

启用内核参数

#限制响应级别:arp_ignore
#0:默认值,表示可使用本地任意接口上配置的任意地址进行响应
#1:仅在请求的目标IP配置在本地主机的接收到请求报文的接口上时,才给予响应
echo 1 > /proc/sys/net/ipv4/conf/all/arp_ignore
echo 1 > /proc/sys/net/ipv4/conf/lo/arp_ignore

#限制通告级别:arp_announce
#0:默认值,把本机所有接口的所有信息向每个接口的网络进行通告
#1:尽量避免将接口信息向非直接连接网络进行通告
#2:必须避免将接口信息向非本网络进行通告
echo 2 > /proc/sys/net/ipv4/conf/all/arp_announce
echo 2 > /proc/sys/net/ipv4/conf/lo/arp_announce

echo "net.ipv4.ip_nonlocal_bind = 1" >> /etc/sysctl.conf	#开启后VIP不在本地,haproxy也可绑定该地址
echo "net.ipv4.ip_forward = 1" >> /etc/sysctl.conf		#开启ipv4路由转发功能

执行sysctl -p命令,修改内核生效

重启服务

root@haproxy1 ~]#systemctl restart haproxy

查看IP

[root@haproxy1 ~]#ifconfig 
eth0: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500
        inet 10.0.0.51  netmask 255.255.255.0  broadcast 10.0.0.255
        inet6 fe80::20c:29ff:fef9:b4ed  prefixlen 64  scopeid 0x20<link>
        ether 00:0c:29:f9:b4:ed  txqueuelen 1000  (Ethernet)
        RX packets 24898  bytes 25024044 (25.0 MB)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 15962  bytes 1611934 (1.6 MB)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

eth0:1: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500
        inet 10.0.0.53  netmask 255.255.255.0  broadcast 0.0.0.0
        ether 00:0c:29:f9:b4:ed  txqueuelen 1000  (Ethernet)

lo: flags=73<UP,LOOPBACK,RUNNING>  mtu 65536
        inet 127.0.0.1  netmask 255.0.0.0
        inet6 ::1  prefixlen 128  scopeid 0x10<host>
        loop  txqueuelen 1000  (Local Loopback)
        RX packets 0  bytes 0 (0.0 B)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 0  bytes 0 (0.0 B)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

查看端口,haproxy绑定VIP 6443端口正常

[root@haproxy1 ~]#netstat -ntlp
Active Internet connections (only servers)
Proto Recv-Q Send-Q Local Address           Foreign Address         State       PID/Program name  
tcp        0      0 10.0.0.53:6443          0.0.0.0:*               LISTEN      7783/haproxy  
tcp        0      0 0.0.0.0:9999            0.0.0.0:*               LISTEN      7783/haproxy  
tcp        0      0 0.0.0.0:111             0.0.0.0:*               LISTEN      1/init    
tcp        0      0 0.0.0.0:46961           0.0.0.0:*               LISTEN      824/rpc.mountd  
tcp        0      0 127.0.0.53:53           0.0.0.0:*               LISTEN      822/systemd-resolve 
tcp        0      0 0.0.0.0:22              0.0.0.0:*               LISTEN      915/sshd: /usr/sbin 
tcp        0      0 127.0.0.1:6010          0.0.0.0:*               LISTEN      1493/sshd: root@pts 
tcp        0      0 0.0.0.0:2049            0.0.0.0:*               LISTEN      -         
tcp        0      0 0.0.0.0:35845           0.0.0.0:*               LISTEN      824/rpc.mountd  
tcp        0      0 0.0.0.0:37381           0.0.0.0:*               LISTEN      824/rpc.mountd  
tcp        0      0 0.0.0.0:44905           0.0.0.0:*               LISTEN      -         
tcp6       0      0 :::37675                :::*                    LISTEN      -         
tcp6       0      0 :::111                  :::*                    LISTEN      1/init    
tcp6       0      0 :::22                   :::*                    LISTEN      915/sshd: /usr/sbin 
tcp6       0      0 ::1:6010                :::*                    LISTEN      1493/sshd: root@pts 
tcp6       0      0 :::47389                :::*                    LISTEN      824/rpc.mountd  
tcp6       0      0 :::42335                :::*                    LISTEN      824/rpc.mountd  
tcp6       0      0 :::54751                :::*                    LISTEN      824/rpc.mountd  
tcp6       0      0 :::2049                 :::*                    LISTEN      -   

节点2(10.0.0.52)

安装

[root@haproxy1 ~]#apt install haproxy -y

修改配置文件

[root@haproxy1 ~]#vim /etc/haproxy/haproxy.cfg
....
#末尾添加如下配置
listen stats
    mode http
    bind 0.0.0.0:9999
    stats enable
    log global
    stats uri   /haproxy-status
    stats auth  haadmin:123456

listen k8s-6443			#新增以下配置
    bind 10.0.0.53:6443
    mode tcp
    balance roundrobin
    server k8s-master1 10.0.0.54:6443 check inter 3s fall 3 rise 5
    server k8s-master2 10.0.0.55:6443 check inter 3s fall 3 rise 5
    server k8s-master3 10.0.0.56:6443 check inter 3s fall 3 rise 5

启用内核参数

#限制响应级别:arp_ignore
#0:默认值,表示可使用本地任意接口上配置的任意地址进行响应
#1:仅在请求的目标IP配置在本地主机的接收到请求报文的接口上时,才给予响应
echo 1 > /proc/sys/net/ipv4/conf/all/arp_ignore
echo 1 > /proc/sys/net/ipv4/conf/lo/arp_ignore

#限制通告级别:arp_announce
#0:默认值,把本机所有接口的所有信息向每个接口的网络进行通告
#1:尽量避免将接口信息向非直接连接网络进行通告
#2:必须避免将接口信息向非本网络进行通告
echo 2 > /proc/sys/net/ipv4/conf/all/arp_announce
echo 2 > /proc/sys/net/ipv4/conf/lo/arp_announce

echo "net.ipv4.ip_nonlocal_bind = 1" >> /etc/sysctl.conf	#开启后VIP不在本地,haproxy也可绑定该地址
echo "net.ipv4.ip_forward = 1" >> /etc/sysctl.conf		#开启ipv4路由转发功能

重启服务

root@haproxy1 ~]#systemctl restart haproxy

查看IP

[root@haproxy2 ~]#ifconfig 
eth0: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500
        inet 10.0.0.52  netmask 255.255.255.0  broadcast 10.0.0.255
        inet6 fe80::20c:29ff:fe3d:976d  prefixlen 64  scopeid 0x20<link>
        ether 00:0c:29:3d:97:6d  txqueuelen 1000  (Ethernet)
        RX packets 7089  bytes 5315138 (5.3 MB)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 6911  bytes 687192 (687.1 KB)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

lo: flags=73<UP,LOOPBACK,RUNNING>  mtu 65536
        inet 127.0.0.1  netmask 255.0.0.0
        inet6 ::1  prefixlen 128  scopeid 0x10<host>
        loop  txqueuelen 1000  (Local Loopback)
        RX packets 0  bytes 0 (0.0 B)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 0  bytes 0 (0.0 B)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

查看端口

[root@haproxy2 ~]#netstat -ntlp
Active Internet connections (only servers)
Proto Recv-Q Send-Q Local Address           Foreign Address         State       PID/Program name  
tcp        0      0 10.0.0.53:6443          0.0.0.0:*               LISTEN      7999/haproxy  
tcp        0      0 0.0.0.0:9999            0.0.0.0:*               LISTEN      7999/haproxy  
tcp        0      0 0.0.0.0:111             0.0.0.0:*               LISTEN      1/init    
tcp        0      0 0.0.0.0:46547           0.0.0.0:*               LISTEN      -         
tcp        0      0 127.0.0.53:53           0.0.0.0:*               LISTEN      812/systemd-resolve 
tcp        0      0 0.0.0.0:22              0.0.0.0:*               LISTEN      906/sshd: /usr/sbin 
tcp        0      0 127.0.0.1:6010          0.0.0.0:*               LISTEN      1451/sshd: root@pts 
tcp        0      0 0.0.0.0:45693           0.0.0.0:*               LISTEN      814/rpc.mountd  
tcp        0      0 0.0.0.0:60991           0.0.0.0:*               LISTEN      814/rpc.mountd  
tcp        0      0 0.0.0.0:40063           0.0.0.0:*               LISTEN      814/rpc.mountd  
tcp        0      0 0.0.0.0:2049            0.0.0.0:*               LISTEN      -         
tcp6       0      0 :::111                  :::*                    LISTEN      1/init    
tcp6       0      0 :::44277                :::*                    LISTEN      814/rpc.mountd  
tcp6       0      0 :::22                   :::*                    LISTEN      906/sshd: /usr/sbin 
tcp6       0      0 ::1:6010                :::*                    LISTEN      1451/sshd: root@pts 
tcp6       0      0 :::35711                :::*                    LISTEN      -         
tcp6       0      0 :::2049                 :::*                    LISTEN      -         
tcp6       0      0 :::38787                :::*                    LISTEN      814/rpc.mountd  
tcp6       0      0 :::58725                :::*                    LISTEN      814/rpc.mountd  

查看haproxy监控状态页

访问10.0.0.51/10.0.0.52和VIP10.0.0.53

安装harbor镜像仓库

安装方法见

第五节-安装docker镜像仓库harbor,并实现高可用

登录web页面

新建test项目

上传镜像

[root@harbor harbor]#docker push 10.0.0.60/test/alpine:v1
The push refers to repository [10.0.0.60/test/alpine]
994393dc58e7: Pushed 
v1: digest: sha256:1304f174557314a7ed9eddb4eab12fed12cb0cd9809e4c28f29af86979a3c870 size: 528

所有节点安装kubeadm组件

包括master节点和node节点

版本选择:https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.20.md#v1205

安装docker

#! /bin/bash
apt update

# 安装依赖包
apt install -y \
    apt-transport-https \
    ca-certificates \
    curl \
    gnupg \
    lsb-release \
    software-properties-common

# 安装GPG证书
curl -fsSL http://mirrors.aliyun.com/docker-ce/linux/ubuntu/gpg | sudo apt-key add -

sudo add-apt-repository "deb [arch=$(dpkg --print-architecture)] http://mirrors.aliyun.com/docker-ce/linux/ubuntu \
  $(lsb_release -cs) stable"

apt update
# apt-cache madison docker-ce docker-ce-cli
apt -y install docker-ce=5:19.03.15~3-0~ubuntu-$(lsb_release -cs) \
docker-ce-cli=5:19.03.15~3-0~ubuntu-$(lsb_release -cs)

系统优化

master节点CPU数量至少2

# 关闭防火墙
systemctl disable firewalld && systemctl stop firewalld
# 在/etc/hosts中添加IP、主机名
cat >> /etc/hosts <<EOF
`hostname -I|awk '{print $1}'` `hostname`
EOF

# 内核参数优化
cat <<EOF | sudo tee /etc/modules-load.d/k8s.conf
br_netfilter
EOF

cat <<EOF | sudo tee /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF

sudo sysctl --system

# 设置docker的cgroup driver
# docker 默认的 cgroup driver 是 cgroupfs,可以通过 docker info 命令查看
# 如果用户没有在 KubeletConfiguration 下设置 cgroupDriver 字段,则 kubeadm 将默认为systemd,需要将docker cgroup driver更改为systemd
cat <<EOF >/etc/docker/daemon.json
{
    "exec-opts": ["native.cgroupdriver=systemd"]
}
EOF

# 关闭swap
# 在/etc/fstab注释swap那一行
sed -ri 's/(^[^#]*swap)/#\1/' /etc/fstab
echo 'swapoff -a' >> /etc/profile
swapoff -a

# 修改grub
sed -i '/GRUB_CMDLINE_LINUX="net.ifnames=0 biosdevname=0"/c GRUB_CMDLINE_LINUX="net.ifnames=0 biosdevname=0 cgroup_enable=memory swapaccount=1"' /etc/default/grub
update-grub
reboot

设置国内阿里云镜像源

apt update && apt-get install -y apt-transport-https
curl https://mirrors.aliyun.com/kubernetes/apt/doc/apt-key.gpg | apt-key add - 
cat <<EOF >/etc/apt/sources.list.d/kubernetes.list
deb https://mirrors.aliyun.com/kubernetes/apt/ kubernetes-xenial main
deb https://mirrors.tuna.tsinghua.edu.cn/kubernetes/apt kubernetes-xenial main
EOF

安装kubelet、kubeadm、kubectl

apt update
# 查看版本
# apt-cache madison kubeadm

# 安装1.20.5版本kubeadm、kubelet、kubectl
apt install -y kubeadm=1.20.5-00 kubelet=1.20.5-00 kubectl=1.20.5-00

# 查看版本
[root@k8s-master1 ~]#kubeadm version -o json
{
  "clientVersion": {
    "major": "1",
    "minor": "20",
    "gitVersion": "v1.20.5",
    "gitCommit": "6b1d87acf3c8253c123756b9e61dac642678305f",
    "gitTreeState": "clean",
    "buildDate": "2021-03-18T01:08:27Z",
    "goVersion": "go1.15.8",
    "compiler": "gc",
    "platform": "linux/amd64"
  }
}

目前kubelet启动报错

[root@k8s-master1 ~]#systemctl status kubelet.service 
● kubelet.service - kubelet: The Kubernetes Node Agent
     Loaded: loaded (/lib/systemd/system/kubelet.service; enabled; vendor preset: enabled)
    Drop-In: /etc/systemd/system/kubelet.service.d
             └─10-kubeadm.conf
     Active: activating (auto-restart) (Result: exit-code) since Wed 2022-08-24 23:27:21 CST; 456ms ago
       Docs: https://kubernetes.io/docs/home/
    Process: 3707 ExecStart=/usr/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_KUBEADM_ARGS $KUBELET_EXTRA_ARGS (code=exited, status=255/EXCEPTION)
   Main PID: 3707 (code=exited, status=255/EXCEPTION)

Aug 24 23:27:21 k8s-master1 systemd[1]: kubelet.service: Main process exited, code=exited, status=255/EXCEPTION
Aug 24 23:27:21 k8s-master1 systemd[1]: kubelet.service: Failed with result 'exit-code'.

kubeadm init集群初始化

官网:https://kubernetes.io/zh-cn/docs/reference/setup-tools/kubeadm/kubeadm-init/

准备镜像

# 查看版本
[root@k8s-master1 ~]#kubeadm version -o json
{
  "clientVersion": {
    "major": "1",
    "minor": "20",
    "gitVersion": "v1.20.5",
    "gitCommit": "6b1d87acf3c8253c123756b9e61dac642678305f",
    "gitTreeState": "clean",
    "buildDate": "2021-03-18T01:08:27Z",
    "goVersion": "go1.15.8",
    "compiler": "gc",
    "platform": "linux/amd64"
  }
}

# 查看当前版本所需镜像
[root@k8s-master1 ~]#kubeadm config images list --kubernetes-version v1.20.5
k8s.gcr.io/kube-apiserver:v1.20.5
k8s.gcr.io/kube-controller-manager:v1.20.5
k8s.gcr.io/kube-scheduler:v1.20.5
k8s.gcr.io/kube-proxy:v1.20.5
k8s.gcr.io/pause:3.2
k8s.gcr.io/etcd:3.4.13-0
k8s.gcr.io/coredns:1.7.0

下载master节点镜像

# 先提前下载特定的 Kubernetes 版本镜像,减少安装等待时间,镜像默认使用Google镜像仓库,国内无法直接下载
# 可使用阿里云镜像仓库把镜像提前下载下来,可以避免后期因镜像下载异常而导致k8s部署异常
[root@k8s-master1 ~]#kubeadm config images pull --kubernetes-version v1.20.5 \
--image-repository registry.cn-hangzhou.aliyuncs.com/google_containers

查看镜像

[root@k8s-master1 ~]#docker images
REPOSITORY                                                                    TAG        IMAGE ID       CREATED         SIZE
registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy                v1.20.5    5384b1650507   17 months ago   118MB
registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler            v1.20.5    8d13f1db8bfb   17 months ago   47.3MB
registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver            v1.20.5    d7e24aeb3b10   17 months ago   122MB
registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager   v1.20.5    6f0c3da8c99e   17 months ago   116MB
registry.cn-hangzhou.aliyuncs.com/google_containers/etcd                      3.4.13-0   0369cf4303ff   24 months ago   253MB
registry.cn-hangzhou.aliyuncs.com/google_containers/coredns                   1.7.0      bfe3a36ebd25   2 years ago     45.2MB
registry.cn-hangzhou.aliyuncs.com/google_containers/pause                     3.2        80d28bedfe5d   2 years ago     683kB

高可用master初始化

基于keepalived实现高可用VIP,通过haproxy实现kube-apiserver的反向代理,将kube-apiserver的管理请求转发至多台k8s-master以实现管理端高可用。

注意:选择master1进行集群初始化,集群初始化只要初始化一次

kubeadm --init参数说明

--apiserver-advertise-address string API 服务器所公布的其正在监听的 IP 地址。如果未设置,则使用默认网络接口。
--apiserver-bind-port int32 默认值:6443 API 服务器绑定的端口。
--apiserver-cert-extra-sans strings 用于 API Server 服务证书的可选附加主题备用名称(SAN)。可以是 IP 地址和 DNS 名称。
--cert-dir string 默认值:"/etc/kubernetes/pki" 保存和存储证书的路径。
--certificate-key string 用于加密 kubeadm-certs Secret 中的控制平面证书的密钥。
--config string kubeadm 配置文件的路径。
--control-plane-endpoint string 为控制平面指定一个稳定的 IP 地址或 DNS 名称。
--cri-socket string 要连接的 CRI 套接字的路径。如果为空,则 kubeadm 将尝试自动检测此值;仅当安装了多个 CRI 或具有非标准 CRI 插槽时,才使用此选项。
--dry-run 不要应用任何更改;只是输出将要执行的操作。
--feature-gates string 一组用来描述各种功能特性的键值(key=value)对。选项是:PublicKeysECDSA=true|false (ALPHA - 默认值=false)RootlessControlPlane=true|false (ALPHA - 默认值=false)UnversionedKubeletConfigMap=true|false (BETA - 默认值=true)
-h, --help init 操作的帮助命令
--ignore-preflight-errors strings 错误将显示为警告的检查列表;例如:'IsPrivilegedUser,Swap'。取值为 'all' 时将忽略检查中的所有错误。
--image-repository string 默认值:"k8s.gcr.io" 选择用于拉取控制平面镜像的容器仓库
--kubernetes-version string 默认值:"stable-1" 为控制平面选择一个特定的 Kubernetes 版本。
--node-name string 指定节点的名称。
--patches string 它包含名为 "target[suffix][+patchtype].extension" 的文件的目录的路径。 例如,"kube-apiserver0+merge.yaml"或仅仅是 "etcd.json"。 "target" 可以是 "kube-apiserver"、"kube-controller-manager"、"kube-scheduler"、"etcd" 之一。 "patchtype" 可以是 "strategic"、"merge" 或者 "json" 之一, 并且它们与 kubectl 支持的补丁格式相同。 默认的 "patchtype" 是 "strategic"。 "extension" 必须是"json" 或"yaml"。 "suffix" 是一个可选字符串,可用于确定首先按字母顺序应用哪些补丁。
--pod-network-cidr string 指明 pod 网络可以使用的 IP 地址段。如果设置了这个参数,控制平面将会为每一个节点自动分配 CIDRs。
--service-cidr string 默认值:"10.96.0.0/12" 为服务的虚拟 IP 地址另外指定 IP 地址段
--service-dns-domain string 默认值:"cluster.local" 为服务另外指定域名,例如:"myorg.internal"。
--skip-certificate-key-print 不要打印用于加密控制平面证书的密钥。
--skip-phases strings 要跳过的阶段列表
--skip-token-print 跳过打印 'kubeadm init' 生成的默认引导令牌。
--token string 这个令牌用于建立控制平面节点与工作节点间的双向通信。格式为 [a-z0-9]{6}.[a-z0-9]{16} - 示例:abcdef.0123456789abcdef
--token-ttl duration 默认值:24h0m0s 令牌被自动删除之前的持续时间(例如 1 s,2 m,3 h)。如果设置为 '0',则令牌将永不过期
--upload-certs 将控制平面证书上传到 kubeadm-certs Secret。

初始化命令

kubeadm init --apiserver-advertise-address 10.0.0.54 \
--apiserver-bind-port 6443 \
--control-plane-endpoint 10.0.0.53 \
--pod-network-cidr 172.16.0.0/16 \
--service-cidr 10.10.0.0/16 \
--image-repository registry.cn-hangzhou.aliyuncs.com/google_containers \
--kubernetes-version v1.20.5 \
--ignore-preflight-errors swap \
--service-dns-domain=shi.local \
#--upload-certs #该选项也可后续使用kubeadm init phase upload-certs --upload-certs获取

初始化完成输出信息如下:

Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

Alternatively, if you are the root user, you can run:

  export KUBECONFIG=/etc/kubernetes/admin.conf

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

You can now join any number of control-plane nodes by copying certificate authorities
and service account keys on each node and then running the following as root:

  kubeadm join 10.0.0.53:6443 --token 4xe09u.grs65k149mu3ozja \
    --discovery-token-ca-cert-hash sha256:231cd8f418873ea811166d3876c9cb3765481451abbcf4e8b87155457999a36b \
    --control-plane

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 10.0.0.53:6443 --token 4xe09u.grs65k149mu3ozja \
    --discovery-token-ca-cert-hash sha256:231cd8f418873ea811166d3876c9cb3765481451abbcf4e8b87155457999a36b 

配置kube-config文件

kube-config文件包含kube-apiserver地址及相关认证信息

# 复制执行kubeadm --init初始化生成的命令
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

kubectl自动补全

bash

source <(kubectl completion bash) # 在 bash 中设置当前 shell 的自动补全,要先安装 bash-completion 包。
echo "source <(kubectl completion bash)" >> ~/.bashrc # 在你的 bash shell 中永久地添加自动补全

ZSH

source <(kubectl completion zsh)  # 在 zsh 中设置当前 shell 的自动补全
echo '[[ $commands[kubectl] ]] && source <(kubectl completion zsh)' >> ~/.zshrc # 在你的 zsh shell 中永久地添加自动补全

查看node信息

[root@k8s-master1 ~]#kubectl get node
NAME          STATUS     ROLES                  AGE   VERSION
k8s-master1   NotReady   control-plane,master   54s   v1.20.5

部署网络组件flannel

进入https://github.com/flannel-io/flannel/tree/master/Documentation下载yml文件

注意:kube-flannel.yml文件参数需修改,yml文件中"10.244.0.0/16"和kubeadm --init初始化时--pod-network-cidr的值相同,参考kubernetes使用flannel网络插件服务状态显示CrashLoopBackOff

# 若网络连接异常无法下载镜像时,选择开全局代理下载镜像
[root@k8s-master1 ~]#kubectl apply -f kube-flannel.yml
namespace/kube-flannel created
clusterrole.rbac.authorization.k8s.io/flannel created
clusterrolebinding.rbac.authorization.k8s.io/flannel created
serviceaccount/flannel created
configmap/kube-flannel-cfg created
daemonset.apps/kube-flannel-ds created

# 等待flannel初始化完成
[root@k8s-master1 ~]#kubectl get pod -A
NAMESPACE      NAME                                  READY   STATUS     RESTARTS   AGE
kube-flannel   kube-flannel-ds-bzgbv                 0/1     Init:1/2   0          16s
kube-system    coredns-54d67798b7-cb4mv              0/1     Pending    0          10m
kube-system    coredns-54d67798b7-lwmrz              0/1     Pending    0          10m
kube-system    etcd-k8s-master1                      1/1     Running    0          10m
kube-system    kube-apiserver-k8s-master1            1/1     Running    0          10m
kube-system    kube-controller-manager-k8s-master1   1/1     Running    0          10m
kube-system    kube-proxy-tpr65                      1/1     Running    0          10m
kube-system    kube-scheduler-k8s-master1            1/1     Running    0          10m

# 查看镜像
[root@k8s-master1 ~]#docker images
REPOSITORY                                                                    TAG                 IMAGE ID            CREATED             SIZE
rancher/mirrored-flannelcni-flannel                                           v0.19.1             252b2c3ee6c8        2 weeks ago         62.3MB
rancher/mirrored-flannelcni-flannel-cni-plugin                                v1.1.0              fcecffc7ad4a        3 months ago        8.09MB

查看master节点状态

[root@k8s-master1 ~]#kubectl get node
NAME          STATUS   ROLES                  AGE   VERSION
k8s-master1   Ready    control-plane,master   34m   v1.20.5

当前maser生成证书用于添加新控制节点

[root@k8s-master1 ~]#kubeadm init phase upload-certs --upload-certs
I0825 10:08:25.373762  590886 version.go:254] remote version is much newer: v1.25.0; falling back to: stable-1.20
[upload-certs] Storing the certificates in Secret "kubeadm-certs" in the "kube-system" Namespace
[upload-certs] Using certificate key:
027da56ef05e18a7627135dca22fd571908eec8d1989df1dbbddd263782d2215

安装其他节点

安装docker、kubeadm、kubelet、kubectl,方法可见前面部分

#! /bin/bash
apt update

# 安装依赖包
apt install -y \
    apt-transport-https \
    ca-certificates \
    curl \
    gnupg \
    lsb-release \
    software-properties-common

# 安装GPG证书
curl -fsSL http://mirrors.aliyun.com/docker-ce/linux/ubuntu/gpg | sudo apt-key add -

sudo add-apt-repository "deb [arch=$(dpkg --print-architecture)] http://mirrors.aliyun.com/docker-ce/linux/ubuntu \
  $(lsb_release -cs) stable"

apt update
# apt-cache madison docker-ce docker-ce-cli
apt -y install docker-ce=5:19.03.15~3-0~ubuntu-$(lsb_release -cs) \
docker-ce-cli=5:19.03.15~3-0~ubuntu-$(lsb_release -cs)

# 关闭防火墙
systemctl disable firewalld && systemctl stop firewalld
# 在/etc/hosts中添加IP、主机名
cat >> /etc/hosts <<EOF
`hostname -I|awk '{print $1}'` `hostname`
EOF

# 内核参数优化
cat <<EOF | sudo tee /etc/modules-load.d/k8s.conf
br_netfilter
EOF

cat <<EOF | sudo tee /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF

sudo sysctl --system

# 设置docker的cgroup driver
# docker 默认的 cgroup driver 是 cgroupfs,可以通过 docker info 命令查看
# 如果用户没有在 KubeletConfiguration 下设置 cgroupDriver 字段,则 kubeadm 将默认为
cat <<EOF >/etc/docker/daemon.json
{
    "exec-opts": ["native.cgroupdriver=systemd"]
}
EOF

# 关闭swap
# 在/etc/fstab注释swap那一行
sed -ri 's/(^[^#]*swap)/#\1/' /etc/fstab
echo 'swapoff -a' >> /etc/profile
swapoff -a

# 添加本地harbor镜像仓库,HTTP连接方式
sed -ri 's/(^ExecStart.*$)/\1 --insecure-registry=10.0.0.60/' /lib/systemd/system/docker.service
systemctl daemon-reload && systemctl restart docker

# 设置国内阿里云镜像源
apt update && apt-get install -y apt-transport-https
curl https://mirrors.aliyun.com/kubernetes/apt/doc/apt-key.gpg | apt-key add - 
cat <<EOF >/etc/apt/sources.list.d/kubernetes.list
deb https://mirrors.aliyun.com/kubernetes/apt/ kubernetes-xenial main
deb https://mirrors.tuna.tsinghua.edu.cn/kubernetes/apt kubernetes-xenial main
EOF

# 安装1.20.5版本kubeadm、kubelet、kubectl
apt update
apt install -y kubeadm=1.20.5-00 kubelet=1.20.5-00 kubectl=1.20.5-00

# 修改grub
sed -i '/GRUB_CMDLINE_LINUX="net.ifnames=0 biosdevname=0"/c GRUB_CMDLINE_LINUX="net.ifnames=0 biosdevname=0 cgroup_enable=memory swapaccount=1"' /etc/default/grub
update-grub
reboot

其他master节点加入集群

添加命令为k8s-master1执行kubeadm init初始化完成之后返回的kubeadm join信息+kubeadm init phase upload-certs --upload-certs生成的certificate-key信息

执行加入命令

  kubeadm join 10.0.0.53:6443 --token 4xe09u.grs65k149mu3ozja \
    --discovery-token-ca-cert-hash sha256:231cd8f418873ea811166d3876c9cb3765481451abbcf4e8b87155457999a36b \
    --control-plane --certificate-key 027da56ef05e18a7627135dca22fd571908eec8d1989df1dbbddd263782d2215

执行完成输出信息

This node has joined the cluster and a new control plane instance was created:

* Certificate signing request was sent to apiserver and approval was received.
* The Kubelet was informed of the new secure connection details.
* Control plane (master) label and taint were applied to the new node.
* The Kubernetes control plane instances scaled up.
* A new etcd member was added to the local/stacked etcd cluster.

To start administering your cluster from this node, you need to run the following as a regular user:

	mkdir -p $HOME/.kube
	sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
	sudo chown $(id -u):$(id -g) $HOME/.kube/config

Run 'kubectl get nodes' to see this node join the cluster.

配置kube-config文件

kube-config文件包含kube-apiserver地址及相关认证信息

mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

kubectl自动补全

source <(kubectl completion bash) # 在 bash 中设置当前 shell 的自动补全,要先安装 bash-completion 包。
echo "source <(kubectl completion bash)" >> ~/.bashrc # 在你的 bash shell 中永久地添加自动补全

node节点加入集群

执行加入命令

添加命令为k8s-master1执行kubeadm init初始化完成之后返回的kubeadm join信息

kubeadm join 10.0.0.53:6443 --token 4xe09u.grs65k149mu3ozja \
    --discovery-token-ca-cert-hash sha256:231cd8f418873ea811166d3876c9cb3765481451abbcf4e8b87155457999a36b

执行完成输出信息

[root@k8s-node3 ~]#kubeadm join 10.0.0.53:6443 --token 4xe09u.grs65k149mu3ozja \
>     --discovery-token-ca-cert-hash sha256:231cd8f418873ea811166d3876c9cb3765481451abbcf4e8b87155457999a36b
[preflight] Running pre-flight checks
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -o yaml'
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Starting the kubelet
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...

This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.

Run 'kubectl get nodes' on the control-plane to see this node join the cluster.

配置kube-config

将master端下的$HOME/.kube文件拷贝到node节点上

[root@k8s-master1 ~]#scp -r /root/.kube 10.0.0.57:/root/

kubectl命令可正常使用

[root@k8s-node1 ~]#kubectl get node
NAME          STATUS   ROLES                  AGE   VERSION
k8s-master1   Ready    control-plane,master   10h   v1.20.5
k8s-master2   Ready    control-plane,master   26m   v1.20.5
k8s-master3   Ready    control-plane,master   16m   v1.20.5
k8s-node1     Ready    <none>                 15m   v1.20.5

配置kubectl自动补全

source <(kubectl completion bash) # 在 bash 中设置当前 shell 的自动补全,要先安装 bash-completion 包。
echo "source <(kubectl completion bash)" >> ~/.bashrc # 在你的 bash shell 中永久地添加自动补全

验证集群所有节点状态

各node节点会自动加入到master节点,下载镜像并启动flannel,直到最终在master看到node处于Ready状态。

[root@k8s-master1 ~]#kubectl get node
NAME          STATUS   ROLES                  AGE    VERSION
k8s-master1   Ready    control-plane,master   10h    v1.20.5
k8s-master2   Ready    control-plane,master   12m    v1.20.5
k8s-master3   Ready    control-plane,master   2m5s   v1.20.5
k8s-node1     Ready    <none>                 115s   v1.20.5
k8s-node2     Ready    <none>                 54s    v1.20.5
k8s-node3     Ready    <none>                 49s    v1.20.5

验证当前证书状态

[root@k8s-master1 ~]#kubectl get csr
NAME        AGE   SIGNERNAME                                    REQUESTOR                 CONDITION
csr-cxk7f   33m   kubernetes.io/kube-apiserver-client-kubelet   system:bootstrap:4xe09u   Approved,Issued
csr-ffdw7   43m   kubernetes.io/kube-apiserver-client-kubelet   system:bootstrap:4xe09u   Approved,Issued
csr-s6hdv   31m   kubernetes.io/kube-apiserver-client-kubelet   system:bootstrap:4xe09u   Approved,Issued
csr-wnns2   32m   kubernetes.io/kube-apiserver-client-kubelet   system:bootstrap:4xe09u   Approved,Issued
csr-zq264   33m   kubernetes.io/kube-apiserver-client-kubelet   system:bootstrap:4xe09u   Approved,Issued

创建容器并测试网络

创建容器

[root@k8s-master1 ~]#kubectl run net-test1 --image=alpine sleep 360000
pod/net-test1 created
[root@k8s-master1 ~]#kubectl get pod -o wide
NAME        READY   STATUS    RESTARTS   AGE   IP           NODE        NOMINATED NODE   READINESS GATES
net-test1   1/1     Running   0          11s   172.16.5.2   k8s-node3   <none>           <none>

进入容器,测试内部网络

[root@k8s-master1 ~]#kubectl exec -it net-test1 sh
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
/ # ifconfig 
eth0      Link encap:Ethernet  HWaddr 16:B0:A1:C1:0A:7A  
          inet addr:172.16.5.2  Bcast:172.16.5.255  Mask:255.255.255.0
          UP BROADCAST RUNNING MULTICAST  MTU:1450  Metric:1
          RX packets:13 errors:0 dropped:0 overruns:0 frame:0
          TX packets:1 errors:0 dropped:0 overruns:0 carrier:0
          collisions:0 txqueuelen:0 
          RX bytes:1178 (1.1 KiB)  TX bytes:42 (42.0 B)

lo        Link encap:Local Loopback  
          inet addr:127.0.0.1  Mask:255.0.0.0
          UP LOOPBACK RUNNING  MTU:65536  Metric:1
          RX packets:0 errors:0 dropped:0 overruns:0 frame:0
          TX packets:0 errors:0 dropped:0 overruns:0 carrier:0
          collisions:0 txqueuelen:1000 
          RX bytes:0 (0.0 B)  TX bytes:0 (0.0 B)

/ # ping -c1 -W1 10.0.0.54
PING 10.0.0.54 (10.0.0.54): 56 data bytes
64 bytes from 10.0.0.54: seq=0 ttl=63 time=0.289 ms

--- 10.0.0.54 ping statistics ---
1 packets transmitted, 1 packets received, 0% packet loss
round-trip min/avg/max = 0.289/0.289/0.289 ms
/ # 

验证外部网络

/ # ping www.baidu.com
PING www.baidu.com (180.101.49.12): 56 data bytes
64 bytes from 180.101.49.12: seq=0 ttl=127 time=7.129 ms
64 bytes from 180.101.49.12: seq=1 ttl=127 time=6.786 ms
64 bytes from 180.101.49.12: seq=2 ttl=127 time=6.315 ms
64 bytes from 180.101.49.12: seq=3 ttl=127 time=6.840 ms
64 bytes from 180.101.49.12: seq=4 ttl=127 time=6.577 ms
^C
--- www.baidu.com ping statistics ---
5 packets transmitted, 5 packets received, 0% packet loss
round-trip min/avg/max = 6.315/6.729/7.129 ms

查看haproxy监控状态页

部署dashboard

参考:https://github.com/kubernetes/dashboard

下载dashboard

地址:https://raw.githubusercontent.com/kubernetes/dashboard/v2.5.0/aio/deploy/recommended.yaml

修改recommended.yaml中的Service部分

kind: Service
apiVersion: v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
spec:
  type: NodePort		#新增NodePort,可通过集群外部浏览器访问
  ports:
    - port: 443
      targetPort: 8443
      nodePort: 30002		#添加30002端口,可根据实际添加端口
  selector:
    k8s-app: kubernetes-dashboard

创建admin-user.yaml文件,进行鉴权

apiVersion: v1
kind: ServiceAccount
metadata:
  name: admin-user
  namespace: kubernetes-dashboard

---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: admin-user
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: cluster-admin
subjects:
- kind: ServiceAccount
  name: admin-user
  namespace: kubernetes-dashboard

创建dashboard

kubectl apply -f recommended.yaml admin-user.yaml

验证状态

查看service

[root@k8s-master1 ~]#kubectl get svc -n kubernetes-dashboard 
NAME                        TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)         AGE
dashboard-metrics-scraper   ClusterIP   10.10.3.238     <none>        8000/TCP        61m
kubernetes-dashboard        NodePort    10.10.224.229   <none>        443:30002/TCP   61m

查看serviceaccount

[root@k8s-master1 ~]#kubectl get sa -n kubernetes-dashboard 
NAME                   SECRETS   AGE
admin-user             1         56m
default                1         63m
kubernetes-dashboard   1         63m

[root@k8s-master1 ~]#kubectl get secrets -n kubernetes-dashboard 
NAME                               TYPE                                  DATA   AGE
admin-user-token-tv25x             kubernetes.io/service-account-token   3      55m
default-token-679qb                kubernetes.io/service-account-token   3      61m
kubernetes-dashboard-certs         Opaque                                0      61m
kubernetes-dashboard-csrf          Opaque                                1      61m
kubernetes-dashboard-key-holder    Opaque                                2      61m
kubernetes-dashboard-token-94gzz   kubernetes.io/service-account-token   3      61m

查看token

[root@k8s-master1 ~]#kubectl describe secrets admin-user-token-tv25x -n kubernetes-dashboard
Name:         admin-user-token-tv25x
Namespace:    kubernetes-dashboard
Labels:       <none>
Annotations:  kubernetes.io/service-account.name: admin-user
              kubernetes.io/service-account.uid: 405d9bc5-3da8-45f1-8cc7-1a7f569ecdcd

Type:  kubernetes.io/service-account-token

Data
====
ca.crt:     1066 bytes
namespace:  20 bytes
token:      eyJhbGciOiJSUzI1NiIsImtpZCI6Imx0TjZKUTk5WnBKT1pMUHVXUnBLYzg1cG9nMTVMT183cG1wZ2E4MnBuRFUifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJhZG1pbi11c2VyLXRva2VuLXR2MjV4Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6ImFkbWluLXVzZXIiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiI0MDVkOWJjNS0zZGE4LTQ1ZjEtOGNjNy0xYTdmNTY5ZWNkY2QiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZXJuZXRlcy1kYXNoYm9hcmQ6YWRtaW4tdXNlciJ9.ACazHf5aujd_Qm3-8WOzVO6HM0ndbqh8qwPaDKoK309HypIU__g2SIDvvTdztkfbpnLhU_WvBDuCJWJ_IxYihkJoFVW5vgKLFTmy0Mxl3WlBbfb2VmfxMluOEVJOslVlHgHYhrs2OX5X0B7uvzNgyuV5QuvWljA1rfzj_GphnRTCdA2uGZv1lgF_UGgwyl_s7pYwe6nhUqV843c1x9xv0-IDdQ0BS5MDeKA092oT6p6F74U7dRFbxeQ3lGi_3fw18tc_-uNPuQ0kiQzFHlQjZXSL0L4uzmTrZd0CnJrbSRzoROaqNHQQXuoECpoaAxyE79e3NwE_Pf8JwwDwAFOoZQ

访问任一集群节点30002端口

输入token,默认token时间是900s,15分钟,到期后会自动退出登陆。

进入首页,对集群进行管理

五、升级k8s集群1.21版本

官方说明:https://kubernetes.io/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade/

升级工作的基本流程如下:

  1. 升级主控制平面节点
  2. 升级其他控制平面节点
  3. 升级工作节点

升级准备

  • 集群应使用静态的控制平面和 etcd Pod 或者外部 etcd。
  • 务必备份所有重要组件,例如存储在数据库中应用层面的状态。 kubeadm upgrade 不会影响你的工作负载,只会涉及 Kubernetes 内部的组件,但备份终究是好的。
  • 必须禁用交换分区

注意事项

  • 在对 kubelet 作次版本升版时需要腾空节点。 对于控制面节点,其上可能运行着 CoreDNS Pods 或者其它非常重要的负载。
  • 升级后,因为容器规约的哈希值已更改,所有容器都会被重新启动

[root@k8s-master1 ~]#kubeadm version -o json
{
  "clientVersion": {
    "major": "1",
    "minor": "20",
    "gitVersion": "v1.20.5",
    "gitCommit": "6b1d87acf3c8253c123756b9e61dac642678305f",
    "gitTreeState": "clean",
    "buildDate": "2021-03-18T01:08:27Z",
    "goVersion": "go1.15.8",
    "compiler": "gc",
    "platform": "linux/amd64"
  }
}

[root@k8s-master1 ~]#kubectl get node
NAME          STATUS   ROLES                  AGE   VERSION
k8s-master1   Ready    control-plane,master   11h   v1.20.5
k8s-master2   Ready    control-plane,master   67m   v1.20.5
k8s-master3   Ready    control-plane,master   57m   v1.20.5
k8s-node1     Ready    <none>                 56m   v1.20.5
k8s-node2     Ready    <none>                 55m   v1.20.5
k8s-node3     Ready    <none>                 55m   v1.20.5

[root@k8s-master1 ~]#kubectl version -o json
{
  "clientVersion": {
    "major": "1",
    "minor": "20",
    "gitVersion": "v1.20.5",
    "gitCommit": "6b1d87acf3c8253c123756b9e61dac642678305f",
    "gitTreeState": "clean",
    "buildDate": "2021-03-18T01:10:43Z",
    "goVersion": "go1.15.8",
    "compiler": "gc",
    "platform": "linux/amd64"
  },
  "serverVersion": {
    "major": "1",
    "minor": "20",
    "gitVersion": "v1.20.5",
    "gitCommit": "6b1d87acf3c8253c123756b9e61dac642678305f",
    "gitTreeState": "clean",
    "buildDate": "2021-03-18T01:02:01Z",
    "goVersion": "go1.15.8",
    "compiler": "gc",
    "platform": "linux/amd64"
  }
}

[root@k8s-master1 ~]#kubelet --version
Kubernetes v1.20.5

确定要升级到哪个版本

使用操作系统的包管理器找到最新的稳定 1.21:

[root@k8s-master1 ~]#apt update
[root@k8s-master1 ~]#apt-cache policy kubeadm
kubeadm:
  Installed: 1.20.5-00
  Candidate: 1.25.0-00
  Version table:
     1.25.0-00 500
        500 https://mirrors.aliyun.com/kubernetes/apt kubernetes-xenial/main amd64 Packages
     1.24.4-00 500
        500 https://mirrors.aliyun.com/kubernetes/apt kubernetes-xenial/main amd64 Packages
        500 https://mirrors.tuna.tsinghua.edu.cn/kubernetes/apt kubernetes-xenial/main amd64 Packages
     1.24.3-00 500
        500 https://mirrors.aliyun.com/kubernetes/apt kubernetes-xenial/main amd64 Packages
        500 https://mirrors.tuna.tsinghua.edu.cn/kubernetes/apt kubernetes-xenial/main amd64 Packages
     ......
     1.22.1-00 500
        500 https://mirrors.aliyun.com/kubernetes/apt kubernetes-xenial/main amd64 Packages
        500 https://mirrors.tuna.tsinghua.edu.cn/kubernetes/apt kubernetes-xenial/main amd64 Packages
     1.22.0-00 500
        500 https://mirrors.aliyun.com/kubernetes/apt kubernetes-xenial/main amd64 Packages
        500 https://mirrors.tuna.tsinghua.edu.cn/kubernetes/apt kubernetes-xenial/main amd64 Packages
     1.21.14-00 500
        500 https://mirrors.aliyun.com/kubernetes/apt kubernetes-xenial/main amd64 Packages
        500 https://mirrors.tuna.tsinghua.edu.cn/kubernetes/apt kubernetes-xenial/main amd64 Packages
     1.21.13-00 500
        500 https://mirrors.aliyun.com/kubernetes/apt kubernetes-xenial/main amd64 Packages
        500 https://mirrors.tuna.tsinghua.edu.cn/kubernetes/apt kubernetes-xenial/main amd64 Packages
     1.21.12-00 500
        500 https://mirrors.aliyun.com/kubernetes/apt kubernetes-xenial/main amd64 Packages
        500 https://mirrors.tuna.tsinghua.edu.cn/kubernetes/apt kubernetes-xenial/main amd64 Packages

升级master节点

控制面节点上的升级过程应该每次处理一个节点。 首先选择一个要先行升级的控制面节点。该节点上必须拥有 /etc/kubernetes/admin.conf 文件。

升级master1节点

对master节点进行组件升级,将管理端服务kube-controller-manager、kube-apiserver、kube-scheduler、kube-proxy等进行版本升级

升级kubeadm

  • 升级 kubeadm:

    # 选择1.21最新稳定版本1.21.14-00
    apt-get update && \
    apt-get install -y --allow-change-held-packages kubeadm=1.21.14-00
    
  • 验证下载操作正常,并且 kubeadm版本正确:

    # 查看kubeadm版本
    [root@k8s-master1 ~]#kubectl version -o json
    {
      "clientVersion": {
        "major": "1",
        "minor": "21",
        "gitVersion": "v1.21.14",
        "gitCommit": "0f77da5bd4809927e15d1658fb4aa8f13ad890a5",
        "gitTreeState": "clean",
        "buildDate": "2022-06-15T14:17:29Z",
        "goVersion": "go1.16.15",
        "compiler": "gc",
        "platform": "linux/amd64"
      },
      "serverVersion": {
        "major": "1",
        "minor": "20",
        "gitVersion": "v1.20.5",
        "gitCommit": "6b1d87acf3c8253c123756b9e61dac642678305f",
        "gitTreeState": "clean",
        "buildDate": "2021-03-18T01:02:01Z",
        "goVersion": "go1.15.8",
        "compiler": "gc",
        "platform": "linux/amd64"
      }
    }
    
  • 验证升级计划:

    kubeadm upgrade plan
    

    此命令检查你的集群是否可被升级,并取回你要升级的目标版本。 命令也会显示一个包含组件配置版本状态的表格。

    说明:kubeadm upgrade 也会自动对 kubeadm 在节点上所管理的证书执行续约操作。 如果需要略过证书续约操作,可以使用标志 --certificate-renewal=false。 更多的信息,可参阅证书管理指南

    说明:如果 kubeadm upgrade plan 给出任何需要手动升级的组件配置,用户必须 通过 --config 命令行标志向 kubeadm upgrade apply 命令提供替代的配置文件。 如果不这样做,kubeadm upgrade apply 会出错并退出,不再执行升级操作。

    升级计划如下:

    Components that must be upgraded manually after you have upgraded the control plane with 'kubeadm upgrade apply':
    COMPONENT   CURRENT        TARGET
    kubelet     5 x v1.20.5    v1.21.14
                1 x v1.21.14   v1.21.14
    
    Upgrade to the latest stable version:
    
    COMPONENT                 CURRENT    TARGET
    kube-apiserver            v1.20.5    v1.21.14
    kube-controller-manager   v1.20.5    v1.21.14
    kube-scheduler            v1.20.5    v1.21.14
    kube-proxy                v1.20.5    v1.21.14
    CoreDNS                   1.7.0      v1.8.0
    etcd                      3.4.13-0   3.4.13-0
    
    You can now apply the upgrade by executing the following command:
    
    	kubeadm upgrade apply v1.21.14		# 执行该命令升级
    
  • 执行升级

    选择要升级到的目标版本,运行合适的命令

    kubeadm upgrade apply v1.21.14 \
    # --etcd-upgrade=false  #手动设置不升级etcd
    
    # 升级成功信息如下
    [upgrade/successful] SUCCESS! Your cluster was upgraded to "v1.21.14". Enjoy!
    
    [upgrade/kubelet] Now that your control plane is upgraded, please proceed with upgrading your kubelets if you haven't already done so.
    
  • 手动升级 CNI 驱动插件

    容器网络接口(CNI)驱动应该提供了程序自身的升级说明。 参阅插件页面查找 CNI 驱动, 并查看是否需要其他升级步骤。

    如果 CNI 驱动作为 DaemonSet 运行,则在其他控制平面节点上不需要此步骤。

腾空节点

通过将节点标记为不可调度并腾空节点为节点作升级准备

# 标记k8s-master1 节点为不可调度
[root@k8s-master1 ~]#kubectl cordon k8s-master1
node/k8s-master1 cordoned
[root@k8s-master1 ~]#kubectl get node
NAME          STATUS                     ROLES                  AGE     VERSION
k8s-master1   Ready,SchedulingDisabled   control-plane,master   15h     v1.20.5
k8s-master2   Ready                      control-plane,master   4h29m   v1.20.5
k8s-master3   Ready                      control-plane,master   4h19m   v1.20.5

# 对k8s-master1 节点进行清空操作,为节点维护做准备
[root@k8s-master1 ~]#kubectl drain k8s-master1 --ignore-daemonsets --force=true
node/k8s-master1 already cordoned
WARNING: ignoring DaemonSet-managed Pods: kube-flannel/kube-flannel-ds-n8b9v, kube-system/kube-proxy-tpr65
evicting pod kube-system/coredns-54d67798b7-lwmrz
evicting pod kube-system/coredns-54d67798b7-cb4mv
pod/coredns-54d67798b7-lwmrz evicted
pod/coredns-54d67798b7-cb4mv evicted
node/k8s-master1 evicted

说明

kubectl cordon my-node                                                # 标记 my-node 节点为不可调度
kubectl drain my-node                                                 # 对 my-node 节点进行清空操作,为节点维护做准备
kubectl uncordon my-node                                              # 标记 my-node 节点为可以调度

升级kubelet、kubectl

# 选择1.21最新稳定版本1.21.14-00
apt-get update && \
apt-get install -y --allow-change-held-packages kubelet=1.21.14-00 kubectl=1.21.14-00

验证下载操作正常,并且 kubeadm、kubectl、kubelet版本正确

# 查看kubectl版本
[root@k8s-master1 ~]#kubectl version -o json
{
  "clientVersion": {
    "major": "1",
    "minor": "21",
    "gitVersion": "v1.21.5",
    "gitCommit": "aea7bbadd2fc0cd689de94a54e5b7b758869d691",
    "gitTreeState": "clean",
    "buildDate": "2021-09-15T21:10:45Z",
    "goVersion": "go1.16.8",
    "compiler": "gc",
    "platform": "linux/amd64"
  },
  "serverVersion": {
    "major": "1",
    "minor": "20",
    "gitVersion": "v1.20.5",
    "gitCommit": "6b1d87acf3c8253c123756b9e61dac642678305f",
    "gitTreeState": "clean",
    "buildDate": "2021-03-18T01:02:01Z",
    "goVersion": "go1.15.8",
    "compiler": "gc",
    "platform": "linux/amd64"
  }
}
# 查看kubelet版本
[root@k8s-master1 ~]#kubelet --version
Kubernetes v1.21.14

重启kubelet

sudo systemctl daemon-reload
sudo systemctl restart kubelet

取消节点保护

# 将节点标记为可调度,让节点重新上线
kubectl uncordon k8s-master1

验证状态

[root@k8s-master1 ~]#kubectl get node
NAME          STATUS   ROLES                  AGE     VERSION
k8s-master1   Ready    control-plane,master   17h     v1.21.14
k8s-master2   Ready    control-plane,master   7h26m   v1.20.5
k8s-master3   Ready    control-plane,master   7h16m   v1.20.5
k8s-node1     Ready    <none>                 7h16m   v1.20.5
k8s-node2     Ready    <none>                 7h15m   v1.20.5
k8s-node3     Ready    <none>                 7h14m   v1.20.5

升级其他master节点

升级kubeadm

# 下载kubeadm
apt-get update && \
apt-get install -y --allow-change-held-packages kubeadm=1.21.14-00

# 执行节点升级,使用kubeadm upgrade node,不需要执行 kubeadm upgrade plan 和更新 CNI 驱动插件的操作
sudo kubeadm upgrade node

升级成功信息如下

[upgrade/staticpods] Component "kube-scheduler" upgraded successfully!
[upgrade] The control plane instance for this node was successfully updated!
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[upgrade] The configuration for this node was successfully updated!
[upgrade] Now you should go ahead and upgrade the kubelet package using your package manager.

腾空节点

# master2
kubectl cordon k8s-master2
kubectl drain k8s-master2 --ignore-daemonsets --force=true

# master3
kubectl cordon k8s-master3
kubectl drain k8s-master3 --ignore-daemonsets --force=true

升级kubelet、kubectl

apt-get update && \
apt-get install -y --allow-change-held-packages kubelet=1.21.14-00 kubectl=1.21.14-00

重启kubelet

sudo systemctl daemon-reload
sudo systemctl restart kubelet

取消节点保护

# master2
kubectl uncordon k8s-master2

# master3
kubectl uncordon k8s-master3

验证状态

[root@k8s-master3 ~]#kubectl get node
NAME          STATUS   ROLES                  AGE     VERSION
k8s-master1   Ready    control-plane,master   18h     v1.21.14
k8s-master2   Ready    control-plane,master   7h54m   v1.21.14
k8s-master3   Ready    control-plane,master   7h44m   v1.21.14
k8s-node1     Ready    <none>                 7h44m   v1.20.5
k8s-node2     Ready    <none>                 7h43m   v1.20.5
k8s-node3     Ready    <none>                 7h43m   v1.20.5

升级node节点

升级kubeadm

# 下载kubeadm
apt-get update && \
apt-get install -y --allow-change-held-packages kubeadm=1.21.14-00

# 执行节点升级,对于工作节点,该命令会升级本地的 kubelet 配置
sudo kubeadm upgrade node

腾空节点

kubectl cordon `hostname`
kubectl drain `hostname` --ignore-daemonsets --force=true

升级kubelet、kubectl

# 升级kubelet、kubectl
apt-get update && \
apt-get install -y --allow-change-held-packages kubelet=1.21.14-00 kubectl=1.21.14-00

# 重启kubelet
sudo systemctl daemon-reload
sudo systemctl restart kubelet

取消节点保护

kubectl uncordon `hostname`

验证集群状态

所有节点STATUS 应显示为 Ready 状态,并且版本号已经被更新

[root@k8s-node3 ~]#kubectl get node
NAME          STATUS   ROLES                  AGE     VERSION
k8s-master1   Ready    control-plane,master   18h     v1.21.14
k8s-master2   Ready    control-plane,master   8h      v1.21.14
k8s-master3   Ready    control-plane,master   7h57m   v1.21.14
k8s-node1     Ready    <none>                 7h57m   v1.21.14
k8s-node2     Ready    <none>                 7h56m   v1.21.14
k8s-node3     Ready    <none>                 7h56m   v1.21.14

六、通过deployment.yaml 部署pod,镜像为nginx,3副本,并编辑index内容,分别显示各自pod的IP

参考:https://kubernetes.io/zh-cn/docs/concepts/workloads/controllers/deployment/

  1. 准备yaml文件

    apiVersion: apps/v1
    kind: Deployment
    metadata:
      name: nginx-deployment
      labels:
        app: nginx
    spec:
      replicas: 3
      selector:
        matchLabels:
          app: nginx
      template:
        metadata:
          labels:
            app: nginx
        spec:
          containers:
          - name: nginx
            image: nginx:1.14.2
            ports:
            - containerPort: 80
    
  2. 创建deployment

    [root@k8s-master1 ~]#kubectl apply -f deploy-nginx.yml 
    deployment.apps/nginx-deployment created
    
    # 查看deployment
    [root@k8s-master1 ~]#kubectl get deploy nginx-deployment 
    NAME               READY   UP-TO-DATE   AVAILABLE   AGE
    nginx-deployment   3/3     3            3           28s
    
    # 查看pod
    [root@k8s-master1 ~]#kubectl get pod -o wide
    NAME                                READY   STATUS    RESTARTS   AGE    IP           NODE        NOMINATED NODE   READINESS GATES
    nginx-deployment-66b6c48dd5-b8wxl   1/1     Running   0          105s   172.16.4.4   k8s-node2   <none>           <none>
    nginx-deployment-66b6c48dd5-lfn7g   1/1     Running   0          105s   172.16.3.4   k8s-node1   <none>           <none>
    nginx-deployment-66b6c48dd5-nv2md   1/1     Running   0          105s   172.16.5.5   k8s-node3   <none>           <none>
    
  3. 进入pod容器,编辑index内容

    # 进入ppod
    [root@k8s-master1 ~]#kubectl exec -it nginx-deployment-66b6c48dd5-b8wxl bash
    
    # 修改index内容
    root@nginx-deployment-66b6c48dd5-b8wxl:/# hostname -i > /usr/share/nginx/html/index.html
    
  4. 验证

    显示pod详细信息

    [root@k8s-master1 ~]#kubectl get pod -o wide
    NAME                                READY   STATUS    RESTARTS   AGE   IP           NODE        NOMINATED NODE   READINESS GATES
    nginx-deployment-66b6c48dd5-b8wxl   1/1     Running   0          36m   172.16.4.4   k8s-node2   <none>           <none>
    nginx-deployment-66b6c48dd5-lfn7g   1/1     Running   0          36m   172.16.3.4   k8s-node1   <none>           <none>
    nginx-deployment-66b6c48dd5-nv2md   1/1     Running   0          36m   172.16.5.5   k8s-node3   <none>           <none>
    

    显示各自pod的IP

    [root@k8s-master1 ~]#curl 172.16.4.4
    172.16.4.4
    
    [root@k8s-master1 ~]#curl 172.16.3.4
    172.16.3.4
    
    [root@k8s-master1 ~]#curl 172.16.5.5
    172.16.5.5
    

七、创建svc,用来访问已知的nginx pod

创建ClusterPort类型service

只能集群内部访问

  1. 准备service yaml文件

    apiVersion: v1
    kind: Service
    metadata:
      name: nginx-service
    spec:
      selector:
        app: nginx
      ports:
        - protocol: TCP
          port: 8080		# service端口
          targetPort: 80		# pod端口
    
  2. 创建service

    # 创建service
    [root@k8s-master1 ~]#kubectl apply -f nginx-service.yaml 
    service/nginx-service created
    
    # 查看service
    [root@k8s-master1 ~]#kubectl get svc -o wide
    NAME            TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)    AGE   SELECTOR
    kubernetes      ClusterIP   10.10.0.1       <none>        443/TCP    20h   <none>
    nginx-service   ClusterIP   10.10.188.189   <none>        8080/TCP   13m   app=nginx
    
    # 查看endpoints
    [root@k8s-master1 ~]#kubectl get ep
    NAME            ENDPOINTS                                      AGE
    kubernetes      10.0.0.54:6443,10.0.0.55:6443,10.0.0.56:6443   20h
    nginx-service   172.16.3.4:80,172.16.4.4:80,172.16.5.5:80      14m
    
  3. 验证,集群内部访问,随机调度至后端pod

    [root@k8s-master1 ~]#curl 10.10.188.189:8080
    172.16.4.4
    [root@k8s-master1 ~]#curl 10.10.188.189:8080
    172.16.4.4
    [root@k8s-master1 ~]#curl 10.10.188.189:8080
    172.16.3.4
    [root@k8s-master1 ~]#curl 10.10.188.189:8080
    172.16.5.5
    [root@k8s-master1 ~]#curl 10.10.188.189:8080
    172.16.5.5
    [root@k8s-master1 ~]#curl 10.10.188.189:8080
    172.16.4.4
    [root@k8s-master1 ~]#curl 10.10.188.189:8080
    172.16.4.4
    [root@k8s-master1 ~]#curl 10.10.188.189:8080
    172.16.4.4
    

创建NodePort类型Service

集群外部可访问

  1. 准备service yaml文件

    apiVersion: v1
    kind: Service
    metadata:
      name: nginx-service
    spec:
      selector:
        app: nginx
      type: NodePort
      ports:
        - protocol: TCP
          port: 8080		# service暴露端口
          targetPort: 80		# pod暴露端口
          nodePort: 30080		# node暴露端口,端口范围30000-32767
    
  2. 创建service

    # 创建service
    [root@k8s-master1 ~]#kubectl apply -f nginx-service-node.yaml
    service/nginx-service created
    
    # 查看service
    [root@k8s-master1 ~]#kubectl get svc -o wide
    NAME            TYPE        CLUSTER-IP     EXTERNAL-IP   PORT(S)          AGE   SELECTOR
    kubernetes      ClusterIP   10.10.0.1      <none>        443/TCP          21h   <none>
    nginx-service   NodePort    10.10.81.129   <none>        8080:30080/TCP   92s   app=nginx
    
    # 查看endpoints
    [root@k8s-master1 ~]#kubectl get ep
    NAME            ENDPOINTS                                      AGE
    kubernetes      10.0.0.54:6443,10.0.0.55:6443,10.0.0.56:6443   21h
    nginx-service   172.16.3.4:80,172.16.4.4:80,172.16.5.5:80      116s
    
  3. 验证,集群外部访问,随机调度至后端pod

    [root@haproxy1 ~]#curl 10.0.0.54:30080
    172.16.3.4
    [root@haproxy1 ~]#curl 10.0.0.54:30080
    172.16.3.4
    [root@haproxy1 ~]#curl 10.0.0.54:30080
    172.16.4.4
    [root@haproxy1 ~]#curl 10.0.0.54:30080
    172.16.5.5
    [root@haproxy1 ~]#curl 10.0.0.54:30080
    172.16.3.4
    [root@haproxy1 ~]#curl 10.0.0.54:30080
    172.16.5.5
    [root@haproxy1 ~]#curl 10.0.0.54:30080
    172.16.3.4
    [root@haproxy1 ~]#curl 10.0.0.54:30080
    172.16.4.4
    

    浏览器访问

posted @ 2022-08-25 20:58  areke  阅读(987)  评论(0)    收藏  举报