Kubernetes 1.18.0 二进制高可用集群搭建
本文出自刘腾飞视频教程:http://video.jessetalk.cn/
| 
 
 
  | 
 
  | 
 
 
  | 
| 
 
 
  | 
 
  | 
 
  | 
| 
 
 
  | 
 
  | 
 
  | 
| 
 
  | 
 
  | 
 
  | 
# 更新centos yum update # 下载 wget 工具 yum install wget # 禁用防火墙 systemctl stop firewalld systemctl disable firewalld # 安装 epel yum install epel-release
swapoff -a
swapon -s
vi /etc/selinux/config # set SELINUX=disabled SELINUX=disabled # 重启 reboot
sestatus SELinux status: disabled
hostname 主机名称修改
#192.168.0.201 hostnamectl set-hostname node00 #192.168.0.202 hostnamectl set-hostname node01 #192.168.0.203 hostnamectl set-hostname node02
# 安装 yum install chrony # 启用 systemctl start chronyd systemctl enable chronyd # 设置亚洲时区 timedatectl set-timezone Asia/Shanghai # 启用NTP同步 timedatectl set-ntp yes
vi /etc/hosts # 添加以下内容 192.168.0.201 node00 192.168.0.202 node01 192.168.0.203 node02
生成的 CA 证书和秘钥文件如下:
使用证书的组件如下:
| 
 
  | 
 
  | 
 
  | 
 
  | 
 
  | 
| 
 
  | 
 
  | 
 
 
  | 
 
  | 
 
  | 
| 
 
  | 
 
  | 
 
 
  | 
 
  | 
 
  | 
| 
 
  | 
 
 
  | 
 
  | 
 
  | 
 
 
  | 
| 
 
  | 
 
  | 
 
  | 
 
  | 
 
 
  | 
| 
 
  | 
 
  | 
 
  | 
 
  | 
 
 
  | 
| 
 
  | 
 
  | 
 
  | 
 
  | 
 
 
  | 
mkdir -p /ssl cd /ssl wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64 chmod +x cfssl_linux-amd64 mv cfssl_linux-amd64 /usr/local/bin/cfssl wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64 chmod +x cfssljson_linux-amd64 mv cfssljson_linux-amd64 /usr/local/bin/cfssljson wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64 chmod +x cfssl-certinfo_linux-amd64 mv cfssl-certinfo_linux-amd64 /usr/local/bin/cfssl-certinfo #export PATH=/usr/local/bin:$PATH
mkdir /root/ssl
cd /root/ssl
cfssl print-defaults config > config.json
cfssl print-defaults csr > csr.json
# 根据config.json文件的格式创建如下的ca-config.json文件
# 过期时间设置成了 87600h
cat > ca-config.json <<EOF
{
  "signing": {
    "default": {
      "expiry": "87600h"
    },
    "profiles": {
      "kubernetes": {
        "usages": [
            "signing",
            "key encipherment",
            "server auth",
            "client auth"
        ],
        "expiry": "87600h"
      }
    }
  }
}
EOF
{
  "CN": "kubernetes",
  "hosts": [
      "127.0.0.1",
      "172.21.0.17",
      "172.21.0.2",
      "172.21.0.8",
      "172.21.0.210"
  ],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "BeiJing",
      "L": "BeiJing",
      "O": "k8s",
      "OU": "System"
    }
  ],
    "ca": {
       "expiry": "87600h"
    }
}
cfssl gencert -initca ca-csr.json | cfssljson -bare ca ls ca* ca-config.json ca.csr ca-csr.json ca-key.pem ca.pem
{
    "CN": "kubernetes",
    "hosts": [
      "127.0.0.1",
      "172.21.0.17",
      "172.21.0.2",
      "172.21.0.8",
      "172.21.0.210",
      "10.254.0.1",
      "kubernetes",
      "kubernetes.default",
      "kubernetes.default.svc",
      "kubernetes.default.svc.cluster",
      "kubernetes.default.svc.cluster.local"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "ST": "BeiJing",
            "L": "BeiJing",
            "O": "k8s",
            "OU": "System"
        }
    ]
}
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kubernetes-csr.json | cfssljson -bare kubernetes # 查看生成的证书 ls kubernetes* kubernetes.csr kubernetes-csr.json kubernetes-key.pem kubernetes.pem
创建kubelet证书
# node00 
cat > node00.json <<EOF
{
  "CN": "system:node:node00",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "hosts": [
     "node00",
     "node01",
     "node02",
     "172.21.0.17",
      "172.21.0.2",
      "172.21.0.8"
  ],
  "names": [
    {
      "C": "China",
      "L": "Shanghai",
      "O": "system:nodes",
      "OU": "Kubernetes",
      "ST": "Shanghai"
    }
  ]
}
EOF
# node01
cat > node01.json <<EOF
{
  "CN": "system:node:node01",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "hosts": [
     "node00",
     "node01",
     "node02",
      "172.21.0.17",
      "172.21.0.2",
      "172.21.0.8"
  ],
  "names": [
    {
      "C": "China",
      "L": "Shanghai",
      "O": "system:nodes",
      "OU": "Kubernetes",
      "ST": "Shanghai"
    }
  ]
}
EOF
# node02
cat > node02.json <<EOF
{
  "CN": "system:node:node02",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "hosts": [
     "node00",
     "node01",
     "node02",
      "172.21.0.17",
      "172.21.0.2",
      "172.21.0.8"
  ],
  "names": [
    {
      "C": "China",
      "L": "Shanghai",
      "O": "system:nodes",
      "OU": "Kubernetes",
      "ST": "Shanghai"
    }
  ]
}
EOF
cfssl gencert \ -ca=ca.pem \ -ca-key=ca-key.pem \ -config=ca-config.json \ -profile=kubernetes \ node00.json | cfssljson -bare node00 cfssl gencert \ -ca=ca.pem \ -ca-key=ca-key.pem \ -config=ca-config.json \ -profile=kubernetes \ node01.json | cfssljson -bare node01 cfssl gencert \ -ca=ca.pem \ -ca-key=ca-key.pem \ -config=ca-config.json \ -profile=kubernetes \ node02.json | cfssljson -bare node02
{
  "CN": "admin",
  "hosts": [],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "BeiJing",
      "L": "BeiJing",
      "O": "system:masters",
      "OU": "System"
    }
  ]
}
注意
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes admin-csr.json | cfssljson -bare admin # 查看生成的证书 ls admin* admin.csr admin-csr.json admin-key.pem admin.pem
创建 kube-controller-manager 证书
cat > kube-controller-manager-csr.json <<EOF
{
  "CN": "system:kube-controller-manager",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "L": "BeiJing",
      "O": "system:kube-controller-manager",
      "OU": "Kubernetes",
      "ST": "BeiJing"
    }
  ]
}
EOF
cfssl gencert \
  -ca=ca.pem \
  -ca-key=ca-key.pem \
  -config=ca-config.json \
  -profile=kubernetes \
  kube-controller-manager-csr.json | cfssljson -bare kube-controller-manager
{
  "CN": "system:kube-proxy",
  "hosts": [],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "BeiJing",
      "L": "BeiJing",
      "O": "k8s",
      "OU": "System"
    }
  ]
}
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy # 查看生成的证书 ls kube-proxy* kube-proxy.csr kube-proxy-csr.json kube-proxy-key.pem kube-proxy.pem
cat > kube-scheduler-csr.json <<EOF
{
  "CN": "system:kube-scheduler",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "L": "BeiJing",
      "O": "system:kube-scheduler",
      "OU": "Kubernetes",
      "ST": "BeiJing"
    }
  ]
}
EOF
cfssl gencert \
  -ca=ca.pem \
  -ca-key=ca-key.pem \
  -config=ca-config.json \
  -profile=kubernetes \
  kube-scheduler-csr.json | cfssljson -bare kube-scheduler
cat > service-account-csr.json <<EOF
{
  "CN": "service-accounts",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "L": "BeiJing",
      "O": "Kubernetes",
      "OU": "Kubernetes",
      "ST": "BeiJing"
    }
  ]
}
EOF
cfssl gencert \
  -ca=ca.pem \
  -ca-key=ca-key.pem \
  -config=ca-config.json \
  -profile=kubernetes \
  service-account-csr.json | cfssljson -bare service-account
cfssl-certinfo -cert kubernetes.pem
mkdir -p /etc/kubernetes/ssl cp *.pem /etc/kubernetes/ssl cd /etc/kubernetes/ssl/ ls admin-key.pem ca-key.pem kube-proxy-key.pem kubernetes-key.pem admin.pem ca.pem kube-proxy.pem kubernetes.pem
scp *.pem root@192.168.0.202:/etc/kubernetes/ssl scp *.pem root@192.168.0.203:/etc/kubernetes/ssl
# 在3台节点上创建etcd文件临时目录 mkdir -p /root/etcd cd /root/etcd # 在node00上下载文件 wget https://github.com/coreos/etcd/releases/download/v3.3.18/etcd-v3.3.18-linux-amd64.tar.gz # 下载完之后复制到 node01和 node 02 scp etcd-v3.3.18-linux-amd64.tar.gz root@192.168.0.202:/root/etcd scp etcd-v3.3.18-linux-amd64.tar.gz root@192.168.0.203:/root/etcd # 在node00, node01, node02的 /root/etcd目录下执行 tar -xvf etcd-v3.3.18-linux-amd64.tar.gz mv etcd-v3.3.18-linux-amd64/etcd* /usr/local/bin
etcd --version etcd Version: 3.4.3 Git SHA: 3c8740a79 Go Version: go1.12.9 Go OS/Arch: linux/amd64
mkdir -p /var/lib/etcd
[Unit] Description=Etcd Server After=network.target After=network-online.target Wants=network-online.target Documentation=https://github.com/coreos [Service] Type=notify WorkingDirectory=/var/lib/etcd/ Restart=always RestartSec=5s LimitNOFILE=40000 TimeoutStartSec=0 ExecStart=/usr/local/bin/etcd \ --name infra1 \ --data-dir /var/lib/etcd \ --initial-advertise-peer-urls https://172.21.0.17:2380 \ --listen-peer-urls https://172.21.0.17:2380 \ --listen-client-urls https://172.21.0.17:2379 \ --advertise-client-urls https://172.21.0.17:2379 \ --initial-cluster-token etcd-cluster \ --initial-cluster infra1=https://172.21.0.17:2380,infra2=https://172.21.0.2:2380,infra3=https://172.21.0.8:2380 \ --initial-cluster-state new \ --client-cert-auth \ --trusted-ca-file=/etc/kubernetes/ssl/ca.pem \ --cert-file=/etc/kubernetes/ssl/kubernetes.pem \ --key-file=/etc/kubernetes/ssl/kubernetes-key.pem \ --peer-client-cert-auth \ --peer-trusted-ca-file=/etc/kubernetes/ssl/ca.pem \ --peer-cert-file=/etc/kubernetes/ssl/kubernetes.pem \ --peer-key-file=/etc/kubernetes/ssl/kubernetes-key.pem [Install] WantedBy=multi-user.target
[Unit] Description=Etcd Server After=network.target After=network-online.target Wants=network-online.target Documentation=https://github.com/coreos [Service] Type=notify WorkingDirectory=/var/lib/etcd/ Restart=always RestartSec=5s LimitNOFILE=40000 TimeoutStartSec=0 ExecStart=/usr/local/bin/etcd \ --name infra2 \ #注意 --data-dir /var/lib/etcd \ --initial-advertise-peer-urls https://172.21.0.2:2380 \ --listen-peer-urls https://172.21.0.2:2380 \ --listen-client-urls https://172.21.0.2:2379 \ --advertise-client-urls https://172.21.0.2:2379 \ --initial-cluster-token etcd-cluster \ --initial-cluster infra1=https://172.21.0.17:2380,infra2=https://172.21.0.2:2380,infra3=https://172.21.0.8:2380 \ --initial-cluster-state new \ --client-cert-auth \ --trusted-ca-file=/etc/kubernetes/ssl/ca.pem \ --cert-file=/etc/kubernetes/ssl/kubernetes.pem \ --key-file=/etc/kubernetes/ssl/kubernetes-key.pem \ --peer-client-cert-auth \ --peer-trusted-ca-file=/etc/kubernetes/ssl/ca.pem \ --peer-cert-file=/etc/kubernetes/ssl/kubernetes.pem \ --peer-key-file=/etc/kubernetes/ssl/kubernetes-key.pem [Install] WantedBy=multi-user.target
[Unit] Description=Etcd Server After=network.target After=network-online.target Wants=network-online.target Documentation=https://github.com/coreos [Service] Type=notify WorkingDirectory=/var/lib/etcd/ Restart=always RestartSec=5s LimitNOFILE=40000 TimeoutStartSec=0 ExecStart=/usr/local/bin/etcd \ --name infra2 \ --data-dir /var/lib/etcd \ --initial-advertise-peer-urls https://172.21.0.8:2380 \ --listen-peer-urls https://172.21.0.8:2380 \ --listen-client-urls https://172.21.0.8:2379 \ --advertise-client-urls https://172.21.0.8:2379 \ --initial-cluster-token etcd-cluster \ --initial-cluster infra1=https://172.21.0.17:2380,infra2=https://172.21.0.2:2380,infra3=https://172.21.0.8:2380 \ --initial-cluster-state new \ --client-cert-auth \ --trusted-ca-file=/etc/kubernetes/ssl/ca.pem \ --cert-file=/etc/kubernetes/ssl/kubernetes.pem \ --key-file=/etc/kubernetes/ssl/kubernetes-key.pem \ --peer-client-cert-auth \ --peer-trusted-ca-file=/etc/kubernetes/ssl/ca.pem \ --peer-cert-file=/etc/kubernetes/ssl/kubernetes.pem \ --peer-key-file=/etc/kubernetes/ssl/kubernetes-key.pem [Install] WantedBy=multi-user.target
重要参数解释
| 
 name  | 
 本member名称  | 
| 
 data-dir  | 
 指定节点的数据存储目录,这些数据包括节点ID,集群ID,集群初始化配置,Snapshot文件,若未指定-wal-dir,还会存储WAL文件;如果不指定会用缺省目录。  | 
| 
 initial-advertise-peer-urls  | 
 其他member使用,其他member通过该地址与本member交互信息。一定要保证从其他member能可访问该地址。静态配置方式下,该参数的value一定要同时在--initial-cluster参数中存在。 
 memberID的生成受--initial-cluster-token和--initial-advertise-peer-urls影响。  | 
| 
 listen-peer-urls  | 
 本member侧使用,用于监听其他member发送信息的地址。ip为全0代表监听本member侧所有接口  | 
| 
 listen-client-urls  | 
 本member侧使用,用于监听etcd客户发送信息的地址。ip为全0代表监听本member侧所有接口  | 
| 
 advertise-client-urls  | 
 etcd客户使用,客户通过该地址与本member交互信息。一定要保证从客户侧能可访问该地址  | 
| 
 
  | 
 
  | 
| 
 client-cert-auth  | 
 启用客户证书认证  | 
| 
 trusted-ca-file  | 
 客户端认证CA文件  | 
| 
 cert-file  | 
 客户端认证公钥  | 
| 
 key-file  | 
 客户端认证私钥  | 
| 
 peer-client-cert-auth  | 
 启用member成员之间证书认证  | 
| 
 peer-trusted-ca-file  | 
 成员之间证书认证CA文件  | 
| 
 peer-cert-file  | 
 成员之间证书认证公钥  | 
| 
 peer-key-file  | 
 成员之间证书认证私钥  | 
| 
 
  | 
 
  | 
| 
 initial-cluster-token  | 
 用于区分不同集群。本地如有多个集群要设为不同  | 
| 
 initial-cluster  | 
 本member侧使用。描述集群中所有节点的信息,本member根据此信息去联系其他member。 memberID的生成受--initial-cluster-token和--initial-advertise-peer-urls影响。  | 
| 
 initial-cluster-state  | 
 用于指示本次是否为新建集群。有两个取值new和existing。如果填为existing,则该member启动时会尝试与其他member交互。 
 集群初次建立时,要填为new,经尝试最后一个节点填existing也正常,其他节点不能填为existing。 
 集群运行过程中,一个member故障后恢复时填为existing,经尝试填为new也正常。  | 
mv etcd.service /usr/lib/systemd/system/ systemctl daemon-reload systemctl enable etcd systemctl start etcd systemctl status etcd
ETCDCTL_API=3 etcdctl --cert=/etc/kubernetes/ssl/kubernetes.pem --key /etc/kubernetes/ssl/kubernetes-key.pem --insecure-skip-tls-verify=true --endpoints=https://192.168.0.201:2379,https://192.168.0.202:2379,https://192.168.0.203:2379 endpoint health https://192.168.0.201:2379 is healthy: successfully committed proposal: took = 13.87734ms https://192.168.0.202:2379 is healthy: successfully committed proposal: took = 16.08662ms https://192.168.0.203:2379 is healthy: successfully committed proposal: took = 15.656404ms
# 创建统一文件存放目录 mkdir /kube cd /kube # 下载 kube-apiserver 组件 wget https://storage.googleapis.com/kubernetes-release/release/v1.17.1/bin/linux/amd64/kube-apiserver # 下载 kube-scheduler组件 wget https://storage.googleapis.com/kubernetes-release/release/v1.17.1/bin/linux/amd64/kube-scheduler # 下载 kube-controller-manager组件 wget https://storage.googleapis.com/kubernetes-release/release/v1.17.1/bin/linux/amd64/kube-controller-manager
Token auth file
head -c 16 /dev/urandom | od -An -t x | tr -d ' ' 7dc36cb645fbb422aeb328320673bbe0
export BOOTSTRAP_TOKEN=$(head -c 16 /dev/urandom | od -An -t x | tr -d ' ')
cat > token.csv <<EOF
{BOOTSTRAP_TOKEN},kubelet-bootstrap,10001,"system:kubelet-bootstrap"
EOF
BOOTSTRAP_TOKEN
cp token.csv /etc/kubernetes/ scp token.csv root@192.168.0.202:/etc/kubernetes scp token.csv root@192.168.0.203:/etc/kubernetes
mv ~/kube/kube-apiserver /usr/local/bin cd /usr/local/bin chmod 755 kube-apiserver
[Unit]
Description=Kubernetes API Service
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=network.target
After=etcd.service
[Service]
ExecStart=/usr/local/bin/kube-apiserver \
    --advertise-address=172.21.0.17 \
    --allow-privileged=true \
    --audit-log-maxage=30 \
    --audit-log-maxbackup=3 \
    --audit-log-maxsize=100 \
    --audit-log-path=/var/log/audit.log \
    --authorization-mode=Node,RBAC \
    --bind-address=0.0.0.0 \
    --client-ca-file=/etc/kubernetes/ssl/ca.pem \
    --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota \
    --enable-swagger-ui=true \
    --etcd-cafile=/etc/kubernetes/ssl/ca.pem \
    --etcd-certfile=/etc/kubernetes/ssl/kubernetes.pem \
    --etcd-keyfile=/etc/kubernetes/ssl/kubernetes-key.pem \
    --etcd-servers=https://172.21.0.17:2379,https://172.21.0.2:2379,https://172.21.0.8:2379 \
    --event-ttl=1h \
    --insecure-bind-address=127.0.0.1 \
    --kubelet-certificate-authority=/etc/kubernetes/ssl/ca.pem \
    --kubelet-client-certificate=/etc/kubernetes/ssl/node00.pem \
    --kubelet-client-key=/etc/kubernetes/ssl/node00-key.pem \
    --kubelet-https=true \
    --service-account-key-file=/etc/kubernetes/ssl/service-account.pem \
    --service-cluster-ip-range=10.254.0.0/16 \
    --tls-cert-file=/etc/kubernetes/ssl/kubernetes.pem \
    --tls-private-key-file=/etc/kubernetes/ssl/kubernetes-key.pem \
    --v=2
Restart=always
Type=notify
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
[Unit]
Description=Kubernetes API Service
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=network.target
After=etcd.service
[Service]
ExecStart=/usr/local/bin/kube-apiserver \
    --advertise-address=172.21.0.2 \
    --allow-privileged=true \
    --audit-log-maxage=30 \
    --audit-log-maxbackup=3 \
    --audit-log-maxsize=100 \
    --audit-log-path=/var/log/audit.log \
    --authorization-mode=Node,RBAC \
    --bind-address=0.0.0.0 \
    --client-ca-file=/etc/kubernetes/ssl/ca.pem \
    --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota \
    --enable-swagger-ui=true \
    --etcd-cafile=/etc/kubernetes/ssl/ca.pem \
    --etcd-certfile=/etc/kubernetes/ssl/kubernetes.pem \
    --etcd-keyfile=/etc/kubernetes/ssl/kubernetes-key.pem \
    --etcd-servers=https://172.21.0.17:2379,https://172.21.0.2:2379,https://172.21.0.8:2379 \
    --event-ttl=1h \
    --insecure-bind-address=127.0.0.1 \
    --kubelet-certificate-authority=/etc/kubernetes/ssl/ca.pem \
    --kubelet-client-certificate=/etc/kubernetes/ssl/node01.pem \
    --kubelet-client-key=/etc/kubernetes/ssl/node01-key.pem \
    --kubelet-https=true \
    --service-account-key-file=/etc/kubernetes/ssl/service-account.pem \
    --service-cluster-ip-range=10.254.0.0/16 \
    --tls-cert-file=/etc/kubernetes/ssl/kubernetes.pem \
    --tls-private-key-file=/etc/kubernetes/ssl/kubernetes-key.pem \
    --v=2
Restart=always
Type=notify
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
[Unit]
Description=Kubernetes API Service
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=network.target
After=etcd.service
[Service]
ExecStart=/usr/local/bin/kube-apiserver \
    --advertise-address=172.21.0.8 \
    --allow-privileged=true \
    --audit-log-maxage=30 \
    --audit-log-maxbackup=3 \
    --audit-log-maxsize=100 \
    --audit-log-path=/var/log/audit.log \
    --authorization-mode=Node,RBAC \
    --bind-address=0.0.0.0 \
    --client-ca-file=/etc/kubernetes/ssl/ca.pem \
    --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota \
    --enable-swagger-ui=true \
    --etcd-cafile=/etc/kubernetes/ssl/ca.pem \
    --etcd-certfile=/etc/kubernetes/ssl/kubernetes.pem \
    --etcd-keyfile=/etc/kubernetes/ssl/kubernetes-key.pem \
    --etcd-servers=https://172.21.0.17:2379,https://172.21.0.2:2379,https://172.21.0.8:2379 \
    --event-ttl=1h \
    --insecure-bind-address=127.0.0.1 \
    --kubelet-certificate-authority=/etc/kubernetes/ssl/ca.pem \
    --kubelet-client-certificate=/etc/kubernetes/ssl/node02.pem \
    --kubelet-client-key=/etc/kubernetes/ssl/node02-key.pem \
    --kubelet-https=true \
    --service-account-key-file=/etc/kubernetes/ssl/service-account.pem \
    --service-cluster-ip-range=10.254.0.0/16 \
    --tls-cert-file=/etc/kubernetes/ssl/kubernetes.pem \
    --tls-private-key-file=/etc/kubernetes/ssl/kubernetes-key.pem \
    --v=2
Restart=always
Type=notify
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
systemctl daemon-reload systemctl enable kube-apiserver systemctl start kube-apiserver systemctl status kube-apiserver
重要参数解释
https://blog.csdn.net/zhonglinzhang/article/details/90697495
| 
 advertise-address  | 
 向集群成员发布apiserver的IP地址,该地址必须能够被集群的成员访问。如果为空,则使用 --bind-address,如果 --bind-address未指定,那么使用主机的默认接口。  | 
| 
 authorization-mode  | 
 在安全端口上执行授权的有序的插件列表。默认值:AlwaysAllow 以逗号分隔的列表:AlwaysAllow,AlwaysDeny,ABAC,Webhook,RBAC,Node.  | 
| 
 allow-privileged  | 
 true允许特权模式的容器。默认值false  | 
| 
 audit-log-maxage  | 
 
  | 
| 
 audit-log-maxbackup  | 
 
  | 
| 
 audit-log-maxsize  | 
 
  | 
| 
 audit-log-path  | 
 
  | 
| 
 
  | 
 
  | 
| 
 bind-address  | 
 监听安全端口的IP地址。必须能被集群的其他以及CLI/web客户机访问  | 
| 
 tls-cert-file  | 
 包含HTTPS的默认x509证书的文件。 CA证书,如果有的话,在服务器证书之后连接。如果启用了HTTPS服务,但未提供 --tls-cert-file和--tls-private-key-file,则会为公共地址生成自签名证书和密钥,并将其保存到--cert-dir指定的目录中。  | 
| 
 tls-private-key-file  | 
 包含和--tls-cert-file配对的默认x509私钥的文件  | 
| 
 insecure-bind-address  | 
 地址绑定到不安全服务端口,(default 127.0.0.1),将来会被remove  | 
| 
 
  | 
 
  | 
| 
 client-ca-file  | 
 启用客户端证书认证。该参数引用的文件中必须包含一个或多个证书颁发机构,用于验证提交给该组件的客户端证书。如果客户端证书已验证,则用其中的 Common Name 作为请求的用户名  | 
| 
 enable-admission  | 
 
  | 
| 
 enable-swagger-ui  | 
 启用swagger ui  | 
| 
 
  | 
 
  | 
| 
 etcd-cafile  | 
 保护etcd通信的SSL证书颁发机构文件  | 
| 
 etcd-certfile  | 
 用于保护etcd通信的SSL证书文件  | 
| 
 etcd-keyfile  | 
 用来保护etcd通信的SSL key文件  | 
| 
 etcd-servers  | 
 etcd服务器列表(格式://ip:port),逗号分隔  | 
| 
 
  | 
 
  | 
| 
 event-ttl  | 
 保留事件的时间。默认值 1h0m0s  | 
| 
 
  | 
 
  | 
| 
 
  | 
 
  | 
| 
 kubelet-certificate-authority  | 
 
  | 
| 
 kubelet-client-certificate  | 
 
  | 
| 
 kubelet-client-key  | 
 
  | 
| 
 kubelet-https  | 
 kubelet通信使用https,默认值 true  | 
| 
 service-account-key-file  | 
 包含PEM编码的x509 RSA或ECDSA私有或者公共密钥的文件。用于验证service account token。指定的文件可以包含多个值。参数可以被指定多个不同的文件。如未指定,--tls-private-key-file将被使用。如果提供了--service-account-signing-key,则必须指定该参数  | 
| 
 service-cluster-ip-range  | 
 CIDR表示IP范围,用于分配服务集群IP。不能与分配给pod节点的IP重叠 (default 10.0.0.0/24)  | 
| 
 
  | 
 
  | 
| 
 
  | 
 
  | 
| 
 
  | 
 
  | 
| 
 v  | 
 
  | 
cd ~/kube wget https://storage.googleapis.com/kubernetes-release/release/v1.17.1/bin/linux/amd64/kubectl mv kubectl /usr/local/bin chmod 755 /usr/local/bin/kubectl
kubectl config set-cluster kubernetes-training \ --certificate-authority=/etc/kubernetes/ssl/ca.pem \ --embed-certs=true \ --server=https://127.0.0.1:6443 \ --kubeconfig=admin.config kubectl config set-credentials admin \ --client-certificate=/etc/kubernetes/ssl/admin.pem \ --client-key=/etc/kubernetes/ssl/admin-key.pem \ --embed-certs=true \ --kubeconfig=admin.config kubectl config set-context default \ --cluster=kubernetes-training \ --user=admin \ --kubeconfig=admin.config kubectl config use-context default --kubeconfig=admin.config
注意:
kubectl get ns NAME STATUS AGE default Active 4h31m kube-node-lease Active 4h32m kube-public Active 4h32m kube-system Active 4h32m
mv ~/kube/kube-controller-manager /usr/local/bin cd /usr/local/bin chmod 755 kube-controller-manager
[Unit] Description=Kubernetes Controller Manager Documentation=https://github.com/GoogleCloudPlatform/kubernetes [Service] ExecStart=/usr/local/bin/kube-controller-manager \ --address=0.0.0.0 \ --allocate-node-cidrs=true \ --cluster-cidr=10.244.0.0/16 \ --cluster-name=kubernetes \ --cluster-signing-cert-file=/etc/kubernetes/ssl/ca.pem \ --cluster-signing-key-file=/etc/kubernetes/ssl/ca-key.pem \ --kubeconfig=/etc/kubernetes/kube-controller-manager.config\ --leader-elect=true \ --root-ca-file=/etc/kubernetes/ssl/ca.pem \ --service-account-private-key-file=/etc/kubernetes/ssl/service-account-key.pem \ --service-cluster-ip-range=10.254.0.0/16 \ --use-service-account-credentials=true \ --v=2 Restart=always LimitNOFILE=65536 [Install] WantedBy=multi-user.target
kubectl config set-cluster kubernetes-training \ --certificate-authority=/etc/kubernetes/ssl/ca.pem \ --embed-certs=true \ --server=https://127.0.0.1:6443 \ --kubeconfig=kube-controller-manager.config kubectl config set-credentials system:kube-controller-manager \ --client-certificate=/etc/kubernetes/ssl/kube-controller-manager.pem \ --client-key=/etc/kubernetes/ssl/kube-controller-manager-key.pem \ --embed-certs=true \ --kubeconfig=kube-controller-manager.config kubectl config set-context default \ --cluster=kubernetes-training \ --user=system:kube-controller-manager \ --kubeconfig=kube-controller-manager.config kubectl config use-context default --kubeconfig=kube-controller-manager.config
scp kube-controller-manager.config root@172.21.0.2:/etc/kubernetes/ scp kube-controller-manager.config root@172.21.0.8:/etc/kubernetes/
启动
systemctl daemon-reload systemctl enable kube-controller-manager systemctl start kube-controller-manager systemctl status kube-controller-manager kubectl get componentstatus
重要参数说明
https://www.jianshu.com/p/bdb153daba21
| 
 address  | 
 
  | 
| 
 allocate-node-cidrs  | 
 
  | 
| 
 cluster-cidr  | 
 
  | 
| 
 cluster-name  | 
 
  | 
| 
 cluster-signing-cert-file  | 
 一个PEM编码的有X509 CA证书的文件,用于在集群内发布证书  | 
| 
 cluster-signing-key-file  | 
 一个PEM编码的有RSA或ECDSA私钥的文件,用于对集群内的证书进行签名  | 
| 
 kubeconfig  | 
 
  | 
| 
 leader-elect  | 
 
  | 
| 
 root-ca-file  | 
 
  | 
| 
 service-account-private-key-file  | 
 用于签署 service account tokens 的 PEM 编码的RSA或ECDSA密钥文件  | 
| 
 service-cluster-ip-range  | 
 集群中服务的CIDR范围。 要求--allocate-node-cidrs为true  | 
| 
 use-service-account-credentials  | 
 
  | 
| 
 v  | 
 
  | 
mv ~/kube/kube-scheduler /usr/local/bin cd /usr/local/bin chmod 755 kube-scheduler
kubectl config set-cluster kubernetes-training \ --certificate-authority=/etc/kubernetes/ssl/ca.pem \ --embed-certs=true \ --server=https://127.0.0.1:6443 \ --kubeconfig=kube-scheduler.config kubectl config set-credentials system:kube-scheduler \ --client-certificate=/etc/kubernetes/ssl/kube-scheduler.pem \ --client-key=/etc/kubernetes/ssl/kube-scheduler-key.pem \ --embed-certs=true \ --kubeconfig=kube-scheduler.config kubectl config set-context default \ --cluster=kubernetes-training \ --user=system:kube-scheduler \ --kubeconfig=kube-scheduler.config kubectl config use-context default --kubeconfig=kube-scheduler.config
vi /etc/kubernetes/config/kube-scheduler.yaml
apiVersion: kubescheduler.config.k8s.io/v1alpha1 kind: KubeSchedulerConfiguration clientConnection: kubeconfig: "/etc/kubernetes/kube-scheduler.config" leaderElection: leaderElect: true
[Unit] Description=Kubernetes Scheduler Documentation=https://github.com/kubernetes/kubernetes [Service] ExecStart=/usr/local/bin/kube-scheduler \ --config=/etc/kubernetes/config/kube-scheduler.yaml \ --v=2 Restart=on-failure RestartSec=5 [Install] WantedBy=multi-user.target
sudo systemctl daemon-reload sudo systemctl enable kube-scheduler sudo systemctl start kube-scheduler
sudo yum install -y socat conntrack ipset sudo yum install -y yum-utils device-mapper-persistent-data lvm2 sudo yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo sudo yum install -y docker-ce docker-ce-cli containerd.io sudo systemctl enable docker sudo systemctl start docker
cd ~/kube wget --timestamping \ https://github.com/containernetworking/plugins/releases/download/v0.8.5/cni-plugins-linux-amd64-v0.8.5.tgz \ https://storage.googleapis.com/kubernetes-release/release/v1.17.1/bin/linux/amd64/kube-proxy \ https://storage.googleapis.com/kubernetes-release/release/v1.17.1/bin/linux/amd64/kubelet
cd ~/kube chmod +x kube-proxy kubelet sudo mv kube-proxy kubelet /usr/local/bin/ mkdir -p /opt/cni/bin tar -xvf cni-plugins-linux-amd64-v0.8.5.tgz --directory /opt/cni/bin/ scp cni-plugins-linux-amd64-v0.8.5.tgz root@172.21.0.2/root/kube cd ~/kube mkdir -p /opt/cni/bin tar -xvf cni-plugins-linux-amd64-v0.8.5.tgz --directory /opt/cni/bin scp cni-p lugins-linux-amd64-v0.8.5.tgz root@172.21.0.8:/root/kube cd ~/kube mkdir -p /opt/cni/bin tar -xvf cni-plugins-linux-amd64-v0.8.5.tgz --directory /opt/cni/bin --------------------------------------------------------- /etc/kubenetes 目录下执行 # node00 kubectl config set-cluster kubernetes-training \ --certificate-authority=/etc/kubernetes/ssl/ca.pem \ --embed-certs=true \ --server=https://127.0.0.1:6443 \ --kubeconfig=kubelet.config kubectl config set-credentials system:node:node00 \ --client-certificate=/etc/kubernetes/ssl/node00.pem \ --client-key=/etc/kubernetes/ssl/node00-key.pem \ --embed-certs=true \ --kubeconfig=kubelet.config kubectl config set-context default \ --cluster=kubernetes-training \ --user=system:node:node00 \ --kubeconfig=kubelet.config kubectl config use-context default --kubeconfig=kubelet.config # node01 kubectl config set-cluster kubernetes-training \ --certificate-authority=/etc/kubernetes/ssl/ca.pem \ --embed-certs=true \ --server=https://127.0.0.1:6443 \ --kubeconfig=kubelet.config kubectl config set-credentials system:node:node01 \ --client-certificate=/etc/kubernetes/ssl/node01.pem \ --client-key=/etc/kubernetes/ssl/node01-key.pem \ --embed-certs=true \ --kubeconfig=kubelet.config kubectl config set-context default \ --cluster=kubernetes-training \ --user=system:node:node01 \ --kubeconfig=kubelet.config kubectl config use-context default --kubeconfig=kubelet.config # node02 kubectl config set-cluster kubernetes-training \ --certificate-authority=/etc/kubernetes/ssl/ca.pem \ --embed-certs=true \ --server=https://127.0.0.1:6443 \ --kubeconfig=kubelet.config kubectl config set-credentials system:node:node02 \ --client-certificate=/etc/kubernetes/ssl/node02.pem \ --client-key=/etc/kubernetes/ssl/node02-key.pem \ --embed-certs=true \ --kubeconfig=kubelet.config kubectl config set-context default \ --cluster=kubernetes-training \ --user=system:node:node02 \ --kubeconfig=kubelet.config kubectl config use-context default --kubeconfig=kubelet.config
Kind: KubeletConfiguration
apiVersion: kubelet.config.k8s.io/v1beta1
authentication:
  anonymous:
    enabled: false
  webhook:
    enabled: true
  x509:
    clientCAFile: "/etc/kubernetes/ssl/ca.pem"
authorization:
  mode: Webhook
clusterDomain: "cluster.local"
clusterDNS:
  - "10.254.0.10"
runtimeRequestTimeout: "15m"
tlsCertFile: "/etc/kubernetes/ssl/node00.pem"
tlsPrivateKeyFile: "/etc/kubernetes/ssl/node00-key.pem"
[Unit] Description=Kubernetes Kubelet Documentation=https://github.com/kubernetes/kubernetes After=docker.service Requires=docker.service [Service] ExecStart=/usr/local/bin/kubelet \ --config=/etc/kubernetes/config/kubelet.yaml \ --image-pull-progress-deadline=2m \ --kubeconfig=/etc/kubernetes/kubelet.config \ --pod-infra-container-image=cargo.caicloud.io/caicloud/pause-amd64:3.1 \ --network-plugin=cni \ --register-node=true \ --cni-conf-dir=/etc/cni/net.d \ --cni-bin-dir=/opt/cni/bin \ --v=2 Restart=always RestartSec=5 [Install] WantedBy=multi-user.target
重要参数解释
| 
 config  | 
 
  | 
| 
 image-pull-progress-deadline  | 
 
  | 
| 
 kubeconfig  | 
 
  | 
| 
 pod-infra-container-image  | 
 
  | 
| 
 network-plugin  | 
 
  | 
| 
 register-node  | 
 
  | 
| 
 cni-conf-dir  | 
 
  | 
| 
 cni-bin-dir  | 
 
  | 
| 
 v  | 
 
  | 
kubectl config set-cluster kubernetes-training \ --certificate-authority=/etc/kubernetes/ssl/ca.pem \ --embed-certs=true \ --server=https://127.0.0.1:6443 \ --kubeconfig=kube-proxy.config kubectl config set-credentials system:kube-proxy \ --client-certificate=/etc/kubernetes/ssl/kube-proxy.pem \ --client-key=/etc/kubernetes/ssl/kube-proxy-key.pem \ --embed-certs=true \ --kubeconfig=kube-proxy.config kubectl config set-context default \ --cluster=kubernetes-training \ --user=system:kube-proxy \ --kubeconfig=kube-proxy.config kubectl config use-context default --kubeconfig=kube-proxy.config
Kind: KubeProxyConfiguration apiVersion: kubeproxy.config.k8s.io/v1alpha1 clientConnection: kubeconfig: "/etc/kubernetes/kube-proxy.config" mode: "iptables" clusterCIDR: "10.244.0.0/16"
[Unit] Description=Kubernetes Kube Proxy Documentation=https://github.com/kubernetes/kubernetes [Service] ExecStart=/usr/local/bin/kube-proxy \ --config=/etc/kubernetes/config/kube-proxy-config.yaml Restart=always RestartSec=5 [Install] WantedBy=multi-user.target
sudo systemctl daemon-reload sudo systemctl enable kubelet kube-proxy sudo systemctl start kubelet kube-proxy
cat <<EOF | kubectl apply -f -
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
  annotations:
    rbac.authorization.kubernetes.io/autoupdate: "true"
  labels:
    kubernetes.io/bootstrapping: rbac-defaults
  name: system:kube-apiserver-to-kubelet
rules:
  - apiGroups:
      - ""
    resources:
      - nodes/proxy
      - nodes/stats
      - nodes/log
      - nodes/spec
      - nodes/metrics
    verbs:
      - "*"
EOF
cat <<EOF | kubectl apply -f -
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
  name: system:kube-apiserver
  namespace: ""
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:kube-apiserver-to-kubelet
subjects:
  - apiGroup: rbac.authorization.k8s.io
    kind: Group
    name: system:nodes
EOF
kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
https://shimo.im/docs/VWdqDhDg3wWJWqcQ/ 「kube-flannel.yml」,可复制链接后用石墨文档 App 或小程序打开
kubectl apply -f https://raw.githubusercontent.com/caicloud/kube-ladder/master/tutorials/resources/coredns.yaml
kubectl run busybox --image=busybox:1.28.3 --command -- sleep 3600
kubectl get pods -l run=busybox
NAME READY STATUS RESTARTS AGE busybox-d967695b6-29hfh 1/1 Running 0 61s
POD_NAME=$(kubectl get pods -l run=busybox -o jsonpath="{.items[0].metadata.name}")
kubectl exec -ti $POD_NAME -- nslookup kubernetes
Server: 10.254.0.10 Address 1: 10.254.0.10 kube-dns.kube-system.svc.cluster.local Name: kubernetes Address 1: 10.254.0.1 kubernetes.default.svc.cluster.local
yum install haproxy yum install keepalived
cat >> /etc/sysctl.conf << EOF net.ipv4.ip_forward = 1 EOF
vrrp_script haproxy-check {
    script "killall -0 haproxy"
    interval 2
    weight -2
    fall 10
    rise 2
}
 
vrrp_instance haproxy-vip {
    state MASTER
    priority 250
    interface ens33
    virtual_router_id 47
    advert_int 3
 
    unicast_src_ip 192.168.0.201
    unicast_peer {
        192.168.0.202
        192.168.0.203 
    }
 
    virtual_ipaddress {
        192.168.0.210
    }
 
    track_script {
        haproxy-check
    }
}
cat >> /etc/sysctl.conf << EOF net.ipv4.ip_nonlocal_bind = 1 EOF
frontend k8s-api bind *:8443 #ingre 443 冲突 mode tcp option tcplog default_backend k8s-api backend k8s-api mode tcp option tcplog option tcp-check balance roundrobin default-server inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 250 maxqueue 256 weight 100 server k8s-api-1 192.168.0.201:6443 check server k8s-api-2 192.168.0.202:6443 check server k8s-api-3 192.168.0.203:6443 check
systemctl enable keepalived haproxy systemctl restart keepalived haproxy
systemctl restart kube-controller-manager kube-scheduler kubelet kube-proxy
                    
                
                
            
        
浙公网安备 33010602011771号