二进制安装集群k8s
环境
系统 ubuntu16.04
版本 1.14
节点 192.168.204.13 k8s-node01
节点 192.168.204.14 k8s-node02
节点 192.168.204.15 k8s-node03
下载二进制文件
以下 请注意在哪台机器进行操作
部署docker
# step 1: 安装必要的一些系统工具
sudo yum install -y yum-utils device-mapper-persistent-data lvm2
# Step 2: 添加软件源信息
sudo yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
# Step 3: 更新并安装 Docker-CE
sudo yum makecache fast
sudo yum -y install docker-ce
# Step 4: 开启Docker服务
sudo systemctl start docker
sydo systemctl enable docker
所有机器添加内核参数
cat >> /etc/sysctl.conf << EOF
net.ipv6.conf.all.disable_ipv6 = 1
net.ipv6.conf.default.disable_ipv6 = 1
net.ipv6.conf.lo.disable_ipv6 = 1
vm.swappiness = 0
net.ipv4.neigh.default.gc_stale_time=120
net.ipv4.ip_forward = 1
# see details in https://help.aliyun.com/knowledge_detail/39428.html
net.ipv4.conf.all.rp_filter=0
net.ipv4.conf.default.rp_filter=0
net.ipv4.conf.default.arp_announce = 2
net.ipv4.conf.lo.arp_announce=2
net.ipv4.conf.all.arp_announce=2
# see details in https://help.aliyun.com/knowledge_detail/41334.html
net.ipv4.tcp_max_tw_buckets = 5000
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_max_syn_backlog = 1024
net.ipv4.tcp_synack_retries = 2
kernel.sysrq = 1
# iptables透明网桥的实现
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-arptables = 1
EOF
cat >> /etc/hosts << EOF
192.168.204.13 k8s-node01
192.168.204.14 k8s-node02
192.168.204.15 k8s-node03
EOF
mkdir -p /etc/kubernetes/ssl
node01:
网址 https://github.com/kubernetes/kubernetes/releases
https://github.com/etcd-io/etcd/releases
https://github.com/containernetworking/plugins/releases
cd /usr/local/src
# k8s 相关
wget https://dl.k8s.io/v1.14.0/kubernetes-client-linux-amd64.tar.gz # 下载client
wget https://dl.k8s.io/v1.14.0/kubernetes-server-linux-amd64.tar.gz # 下载server
wget https://dl.k8s.io/v1.14.0/kubernetes-node-linux-amd64.tar.gz # 下载node
tar xf kubernetes-client-linux-amd64.tar.gz
tar xf kubernetes-server-linux-amd64.tar.gz
tar xf kubernetes-node-linux-amd64.tar.gz
cd kubernetes
tar xf kubernetes-src.tar.gz
cd ..
cp -a kubernetes/server/bin/kube-apiserver /usr/local/bin/
cp -a kubernetes/server/bin/kube-controller-manager /usr/local/bin/
cp -a kubernetes/server/bin/kube-scheduler /usr/local/bin/
cp -a kubernetes/client/bin/kubectl /usr/local/bin/
cp -a kubernetes/server/bin/kubelet /usr/local/bin/
cp -a kubernetes/server/bin/kube-proxy /usr/local/bin/
cp -a kubernetes/cluster/centos/node/bin/remove-docker0.sh /usr/local/bin/
# 此复制请注意ubuntu的普通用户
scp kubernetes/server/bin/kubelet k8s-node02:/usr/local/bin/
scp kubernetes/server/bin/kube-proxy k8s-node02:/usr/local/bin/
scp kubernetes/server/bin/kubelet k8s-node03:/usr/local/bin/
scp kubernetes/server/bin/kube-proxy k8s-node03:/usr/local/bin/
scp kubernetes/cluster/centos/node/bin/remove-docker0.sh k8s-node02:/usr/local/bin/
scp kubernetes/cluster/centos/node/bin/remove-docker0.sh k8s-node03:/usr/local/bin/
# 下载证书相关命令
wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64
chmod +x cfssl*
mv cfssl_linux-amd64 /usr/local/bin/cfssl
mv cfssljson_linux-amd64 /usr/local/bin/cfssljson
mv cfssl-certinfo_linux-amd64 /usr/local/bin/cfssl-certinfo
# etcd 相关
wget https://github.com/etcd-io/etcd/releases/download/v3.3.12/etcd-v3.3.12-linux-amd64.tar.gz # 下载etcd
tar xf etcd-v3.3.12-linux-amd64.tar.gz
cd etcd-v3.3.12-linux-amd64
cp -a etcd etcdctl /usr/local/bin/
cd ..
# Flannel相关
wget https://github.com/coreos/flannel/releases/download/v0.10.0/flannel-v0.10.0-linux-amd64.tar.gz
tar zxf flannel-v0.10.0-linux-amd64.tar.gz
cp flanneld mk-docker-opts.sh /usr/local/bin/
scp flanneld mk-docker-opts.sh k8s-node02:/usr/local/bin/
scp flanneld mk-docker-opts.sh k8s-node03:/usr/local/bin/
# CNI集成
wget https://github.com/containernetworking/plugins/releases/download/v0.7.1/cni-plugins-amd64-v0.7.1.tgz
mkdir /usr/local/bin/cni
tar zxf cni-plugins-amd64-v0.7.1.tgz -C /usr/local/bin/cni
ssh k8s-node02 mkdir /usr/local/bin/cni
ssh k8s-node03 mkdir /usr/local/bin/cni
scp /usr/local/bin/cni/* k8s-node02:/usr/local/bin/cni/
scp /usr/local/bin/cni/* k8s-node03:/usr/local/bin/cni/
手动制作ca证书
node01:
1. 初始化cfssl
cd /usr/local/src/
mkdir ssl && cd ssl
cfssl print-defaults config > config.json
cfssl print-defaults csr > csr.json
2. 创建用来生成 CA 文件的 JSON 配置文件
cat >> ca-config.json << EOF
{
"signing": {
"default": {
"expiry": "8760h"
},
"profiles": {
"kubernetes": {
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth"
],
"expiry": "8760h"
}
}
}
}
EOF
3. 创建用来生成 CA 证书签名请求(CSR)的 JSON 配置文件
cat >> ca-csr.json << EOF
{
"CN": "kubernetes",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "GuangDong",
"L": "ShenZhen",
"O": "k8s",
"OU": "System"
}
]
}
EOF
4. 生成CA证书(ca.pem)和密钥(ca-key.pem)
cfssl gencert -initca ca-csr.json | cfssljson -bare ca
5. 分发证书
# 分发密钥,因为ubuntu的root不能设置密码缘故, 请设置密钥登录,或者分发到普通用户下面,在进行转移
cp ca.csr ca.pem ca-key.pem ca-config.json /etc/kubernetes/ssl
scp ca.csr ca.pem ca-key.pem ca-config.json k8s-node02:/etc/kubernetes/ssl
scp ca.csr ca.pem ca-key.pem ca-config.json k8s-node03:/etc/kubernetes/ssl
# mv /home/hank/ca* /etc/kubernetes/ssl/
# chown -R root. /etc/kubernetes/
安装etcd
1. 分发命令到各个机器
scp /usr/local/bin/etcd /usr/local/bin/etcdctl k8s-node02:/etc/kubernetes/ssl
scp /usr/local/bin/etcd /usr/local/bin/etcdctl k8s-node03:/etc/kubernetes/ssl
2. 制作证书
证书制作的时候使用域名,这样才能被域名访问
cd /usr/local/src/ssl
cat >> etcd-csr.json << EOF
{
"CN": "etcd",
"hosts": [
"127.0.0.1",
"192.168.204.13",
"192.168.204.14",
"192.168.204.15"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "GuangDong",
"L": "ShenZhen",
"O": "k8s",
"OU": "System"
}
]
}
EOF
3. 生成 etcd 证书和私钥
cfssl gencert -ca=/etc/kubernetes/ssl/ca.pem \
-ca-key=/etc/kubernetes/ssl/ca-key.pem \
-config=/etc/kubernetes/ssl/ca-config.json \
-profile=kubernetes etcd-csr.json | cfssljson -bare etcd
4. 分发证书
cp etcd*.pem /etc/kubernetes/ssl
scp etcd*.pem k8s-node02:/etc/kubernetes/ssl
scp etcd*.pem k8s-node03:/etc/kubernetes/ssl
rm -f etcd.csr etcd-csr.json
5. 制作etcd系统服务
制作配置文件
ETCD_NAME和ETCD_INITIAL_CLUSTER需要一一对应
node01
cat >> /etc/kubernetes/cfg/etcd.conf << EOF
#[member]
ETCD_NAME="etcd-node02"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
#ETCD_SNAPSHOT_COUNTER="10000"
#ETCD_HEARTBEAT_INTERVAL="100"
#ETCD_ELECTION_TIMEOUT="1000"
ETCD_LISTEN_PEER_URLS="https://192.168.204.13:2380"
ETCD_LISTEN_CLIENT_URLS="https://192.168.204.13:2379,https://127.0.0.1:2379"
#ETCD_MAX_SNAPSHOTS="5"
#ETCD_MAX_WALS="5"
#ETCD_CORS=""
#[cluster]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.204.13:2380"
# if you use different ETCD_NAME (e.g. test),
# set ETCD_INITIAL_CLUSTER value for this name, i.e. "test=http://..."
ETCD_INITIAL_CLUSTER="etcd-node01=https://192.168.204.13:2380,etcd-node02=https://192.168.204.14:2380,etcd-node03=https://192.168.204.15:2380"
ETCD_INITIAL_CLUSTER_STATE="new"
ETCD_INITIAL_CLUSTER_TOKEN="k8s-etcd-cluster"
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.204.13:2379"
#[security]
CLIENT_CERT_AUTH="true"
ETCD_CA_FILE="/etc/kubernetes/ssl/ca.pem"
ETCD_CERT_FILE="/etc/kubernetes/ssl/etcd.pem"
ETCD_KEY_FILE="/etc/kubernetes/ssl/etcd-key.pem"
PEER_CLIENT_CERT_AUTH="true"
ETCD_PEER_CA_FILE="/etc/kubernetes/ssl/ca.pem"
ETCD_PEER_CERT_FILE="/etc/kubernetes/ssl/etcd.pem"
ETCD_PEER_KEY_FILE="/etc/kubernetes/ssl/etcd-key.pem"
EOF
node02:
cat >> /etc/kubernetes/cfg/etcd.conf << EOF
ETCD_NAME="etcd-node02"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://192.168.204.14:2380"
ETCD_LISTEN_CLIENT_URLS="https://192.168.204.14:2379,https://127.0.0.1:2379"
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.204.14:2380"
ETCD_INITIAL_CLUSTER="etcd-node01=https://192.168.204.13:2380,etcd-node02=https://192.168.204.14:2380,etcd-node03=https://192.168.204.15:2380"
ETCD_INITIAL_CLUSTER_STATE="new"
ETCD_INITIAL_CLUSTER_TOKEN="k8s-etcd-cluster"
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.204.14:2379"
CLIENT_CERT_AUTH="true"
ETCD_CA_FILE="/etc/kubernetes/ssl/ca.pem"
ETCD_CERT_FILE="/etc/kubernetes/ssl/etcd.pem"
ETCD_KEY_FILE="/etc/kubernetes/ssl/etcd-key.pem"
PEER_CLIENT_CERT_AUTH="true"
ETCD_PEER_CA_FILE="/etc/kubernetes/ssl/ca.pem"
ETCD_PEER_CERT_FILE="/etc/kubernetes/ssl/etcd.pem"
ETCD_PEER_KEY_FILE="/etc/kubernetes/ssl/etcd-key.pem"
EOF
node03:
cat >> /etc/kubernetes/cfg/etcd.conf << EOF
ETCD_NAME="etcd-node03"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://192.168.204.15:2380"
ETCD_LISTEN_CLIENT_URLS="https://192.168.204.15:2379,https://127.0.0.1:2379"
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.204.15:2380"
ETCD_INITIAL_CLUSTER="etcd-node01=https://192.168.204.13:2380,etcd-node02=https://192.168.204.14:2380,etcd-node03=https://192.168.204.15:2380"
ETCD_INITIAL_CLUSTER_STATE="new"
ETCD_INITIAL_CLUSTER_TOKEN="k8s-etcd-cluster"
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.204.15:2379"
CLIENT_CERT_AUTH="true"
ETCD_CA_FILE="/etc/kubernetes/ssl/ca.pem"
ETCD_CERT_FILE="/etc/kubernetes/ssl/etcd.pem"
ETCD_KEY_FILE="/etc/kubernetes/ssl/etcd-key.pem"
PEER_CLIENT_CERT_AUTH="true"
ETCD_PEER_CA_FILE="/etc/kubernetes/ssl/ca.pem"
ETCD_PEER_CERT_FILE="/etc/kubernetes/ssl/etcd.pem"
ETCD_PEER_KEY_FILE="/etc/kubernetes/ssl/etcd-key.pem"
EOF
所有机器:
mkdir -p /var/lib/etcd
cat >> /etc/systemd/system/etcd.service << EOF
[Unit]
Description=Etcd Server
After=network.target
[Service]
Type=simple
WorkingDirectory=/var/lib/etcd
EnvironmentFile=-/etc/kubernetes/cfg/etcd.conf
# set GOMAXPROCS to number of processors
ExecStart=/bin/bash -c "GOMAXPROCS=$(nproc) /usr/local/bin/etcd"
Type=notify
[Install]
WantedBy=multi-user.target
EOF
systemctl daemon-reload
systemctl start etcd
systemctl status etcd
systemctl enable etcd
6. 验证集群
etcdctl --endpoints=https://192.168.204.13:2379 \
--ca-file=/etc/kubernetes/ssl/ca.pem \
--cert-file=/etc/kubernetes/ssl/etcd.pem \
--key-file=/etc/kubernetes/ssl/etcd-key.pem cluster-health
etcdctl --endpoints=https://192.168.204.14:2379 \
--ca-file=/etc/kubernetes/ssl/ca.pem \
--cert-file=/etc/kubernetes/ssl/etcd.pem \
--key-file=/etc/kubernetes/ssl/etcd-key.pem cluster-health
etcdctl --endpoints=https://192.168.204.15:2379 \
--ca-file=/etc/kubernetes/ssl/ca.pem \
--cert-file=/etc/kubernetes/ssl/etcd.pem \
--key-file=/etc/kubernetes/ssl/etcd-key.pem cluster-health
master节点部署
1. 制作证书
node01:
cd /usr/local/src/ssl
cat >> kubernetes-csr.json << EOF
{
"CN": "kubernetes",
"hosts": [
"127.0.0.1",
"192.168.204.13",
"10.1.0.1",
"kubernetes",
"kubernetes.default",
"kubernetes.default.svc",
"kubernetes.default.svc.cluster",
"kubernetes.default.svc.cluster.local"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "GuangDong",
"L": "ShenZhen",
"O": "k8s",
"OU": "System"
}
]
}
EOF
2. 生成并分发证书
cfssl gencert -ca=/etc/kubernetes/ssl/ca.pem \
-ca-key=/etc/kubernetes/ssl/ca-key.pem \
-config=/etc/kubernetes/ssl/ca-config.json \
-profile=kubernetes kubernetes-csr.json | cfssljson -bare kubernetes
cp kubernetes*.pem /etc/kubernetes/ssl/
scp kubernetes*.pem k8s-node02:/etc/kubernetes/ssl/
scp kubernetes*.pem k8s-node02:/etc/kubernetes/ssl/
3. 创建 kube-apiserver 使用的客户端 token 文件
head -c 16 /dev/urandom | od -An -t x | tr -d ' '
此token可以试随机值,是让node节点添加到master使用的
cat >> /etc/kubernetes/ssl/bootstrap-token.csv << EOF
00f1c8a566d853d0af327de8fe959457,kubelet-bootstrap,10001,"system:kubelet-bootstrap"
EOF
4. 创建基础用户名/密码认证配置
cat >> /etc/kubernetes/ssl/basic-auth.csv << EOF
admin,admin,1
readonly,readonly,2
EOF
5. 制作api系统服务
cat >> /etc/systemd/system/kube-apiserver.service << EOF
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=network.target
[Service]
ExecStart=/usr/local/bin/kube-apiserver \
--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota,NodeRestriction \
--bind-address=0.0.0.0 \
--insecure-bind-address=127.0.0.1 \
--authorization-mode=Node,RBAC \
--runtime-config=rbac.authorization.k8s.io/v1 \
--kubelet-https=true \
--anonymous-auth=false \
--basic-auth-file=/etc/kubernetes/ssl/basic-auth.csv \
--enable-bootstrap-token-auth \
--token-auth-file=/etc/kubernetes/ssl/bootstrap-token.csv \
--service-cluster-ip-range=10.1.0.0/16 \
--service-node-port-range=20000-40000 \
--tls-cert-file=/etc/kubernetes/ssl/kubernetes.pem \
--tls-private-key-file=/etc/kubernetes/ssl/kubernetes-key.pem \
--client-ca-file=/etc/kubernetes/ssl/ca.pem \
--service-account-key-file=/etc/kubernetes/ssl/ca-key.pem \
--etcd-cafile=/etc/kubernetes/ssl/ca.pem \
--etcd-certfile=/etc/kubernetes/ssl/etcd.pem \
--etcd-keyfile=/etc/kubernetes/ssl/etcd-key.pem \
--etcd-servers=https://192.168.204.13:2379,https://192.168.204.14:2379,https://192.168.204.15:2379 \
--allow-privileged=true \
--audit-log-maxage=30 \
--audit-log-maxbackup=3 \
--audit-log-maxsize=100 \
--audit-log-path=/log/kubernetes/log/api-audit.log \
--event-ttl=1h \
--v=2 \
--logtostderr=false \
--log-dir=/log/kubernetes/log
Restart=on-failure
RestartSec=5
Type=notify
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
EOF
systemctl daemon-reload
systemctl enable kube-apiserver
systemctl start kube-apiserver
systemctl status kube-apiserver
6. 制作Controller Manager服务
cat >> /etc/systemd/system/kube-controller-manager.service << EOF
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
[Service]
ExecStart=/usr/local/bin/kube-controller-manager \
--address=127.0.0.1 \
--master=http://127.0.0.1:8080 \
--allocate-node-cidrs=true \
--service-cluster-ip-range=10.1.0.0/16 \
--cluster-cidr=10.2.0.0/16 \
--cluster-name=kubernetes \
--cluster-signing-cert-file=/etc/kubernetes/ssl/ca.pem \
--cluster-signing-key-file=/etc/kubernetes/ssl/ca-key.pem \
--service-account-private-key-file=/etc/kubernetes/ssl/ca-key.pem \
--root-ca-file=/etc/kubernetes/ssl/ca.pem \
--leader-elect=true \
--v=2 \
--logtostderr=false \
--log-dir=/log/kubernetes/log
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
EOF
systemctl daemon-reload
systemctl enable kube-controller-manager
systemctl start kube-controller-manager
systemctl status kube-controller-manager
7. 制作Kubernetes Scheduler服务
cat >> /etc/systemd/system/kube-scheduler.service << EOF
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
[Service]
ExecStart=/usr/local/bin/kube-scheduler \
--address=127.0.0.1 \
--master=http://127.0.0.1:8080 \
--leader-elect=true \
--v=2 \
--logtostderr=false \
--log-dir=/log/kubernetes/log
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
EOF
systemctl daemon-reload
systemctl enable kube-scheduler
systemctl start kube-scheduler
systemctl status kube-scheduler
8. 制作kubectl 命令行工具
cd /usr/local/src/ssl/
cat >> admin-csr.json << EOF
{
"CN": "admin",
"hosts": [],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "GuangDong",
"L": "ShenZhen",
"O": "system:masters",
"OU": "System"
}
]
}
EOF
# 制作证书
cfssl gencert -ca=/etc/kubernetes/ssl/ca.pem \
-ca-key=/etc/kubernetes/ssl/ca-key.pem \
-config=/etc/kubernetes/ssl/ca-config.json \
-profile=kubernetes admin-csr.json | cfssljson -bare admin
mv admin*.pem /etc/kubernetes/ssl/
# 设置集群参数
kubectl config set-cluster kubernetes \
--certificate-authority=/etc/kubernetes/ssl/ca.pem \
--embed-certs=true \
--server=https://192.168.204.13:6443
# 设置客户端认证参数
kubectl config set-credentials admin \
--client-certificate=/etc/kubernetes/ssl/admin.pem \
--embed-certs=true \
--client-key=/etc/kubernetes/ssl/admin-key.pem
# 设置上下文参数
kubectl config set-context kubernetes \
--cluster=kubernetes \
--user=admin
# 设置默认上下文
kubectl config use-context kubernetes
kubectl get cs
以上所有操作全部都在node01上进行操作
node节点部署
1. 创建角色绑定
创建此角色一旦有问题,请把/etc/kubernetes/cfg/xxxx.conf 给删掉重新部署
kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --user=kubelet-bootstrap
2. 设置客户端认证参数
kubectl config set-cluster kubernetes \
--certificate-authority=/etc/kubernetes/ssl/ca.pem \
--embed-certs=true \
--server=https://192.168.204.13:6443 \
--kubeconfig=bootstrap.kubeconfig
# 设置客户端认证参数
kubectl config set-credentials kubelet-bootstrap \
--token=00f1c8a566d853d0af327de8fe959457 \
--kubeconfig=bootstrap.kubeconfig
# 设置上下文参数
kubectl config set-context default \
--cluster=kubernetes \
--user=kubelet-bootstrap \
--kubeconfig=bootstrap.kubeconfig
# 选择默认上下文
kubectl config use-context default --kubeconfig=bootstrap.kubeconfig
cp bootstrap.kubeconfig /etc/kubernetes/cfg
scp bootstrap.kubeconfig k8s-node02:/etc/kubernetes/cfg
scp bootstrap.kubeconfig k8s-node03:/etc/kubernetes/cfg
3. 部署kubelet 1.设置CNI支持
mkdir -p /etc/cni/net.d
cat >> /etc/cni/net.d/10-default.conf << EOF
{
"name": "flannel",
"type": "flannel",
"delegate": {
"bridge": "docker0",
"isDefaultGateway": true,
"mtu": 1400
}
}
EOF
4. 创建kubelet服务,三台机器同时操作
node01:
mkdir -p /log/kubernetes/log
mkdir -p /var/lib/kubelet
cat >> /etc/systemd/system/kubelet.service << EOF
[Unit]
Description=Kubernetes Kubelet
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=docker.service
Requires=docker.service
[Service]
WorkingDirectory=/var/lib/kubelet
ExecStart=/usr/local/bin/kubelet \
--address=192.168.204.13 \
--hostname-override=192.168.204.13 \
--pod-infra-container-image=mirrorgooglecontainers/pause-amd64:3.0 \
--experimental-bootstrap-kubeconfig=/etc/kubernetes/cfg/bootstrap.kubeconfig \
--kubeconfig=/etc/kubernetes/cfg/kubelet.kubeconfig \
--cert-dir=/etc/kubernetes/ssl \
--network-plugin=cni \
--cni-conf-dir=/etc/cni/net.d \
--cni-bin-dir=/usr/local/bin/cni \
--cluster-dns=10.1.0.2 \
--cluster-domain=cluster.local. \
--hairpin-mode hairpin-veth \
--allow-privileged=true \
--fail-swap-on=false \
--logtostderr=true \
--v=2 \
--logtostderr=false \
--log-dir=/log/kubernetes/log \
--feature-gates=DevicePlugins=true
Restart=on-failure
RestartSec=5
EOF
systemctl daemon-reload
systemctl enable kubelet
systemctl start kubelet
node02:
mkdir /var/lib/kubelet
cat >> /etc/systemd/system/kubelet.service << EOF
[Unit]
Description=Kubernetes Kubelet
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=docker.service
Requires=docker.service
[Service]
WorkingDirectory=/var/lib/kubelet
ExecStart=/usr/local/bin/kubelet \
--address=192.168.204.14 \
--hostname-override=192.168.204.14 \
--pod-infra-container-image=mirrorgooglecontainers/pause-amd64:3.0 \
--experimental-bootstrap-kubeconfig=/etc/kubernetes/cfg/bootstrap.kubeconfig \
--kubeconfig=/etc/kubernetes/cfg/kubelet.kubeconfig \
--cert-dir=/etc/kubernetes/ssl \
--network-plugin=cni \
--cni-conf-dir=/etc/cni/net.d \
--cni-bin-dir=/usr/local/bin/cni \
--cluster-dns=10.1.0.2 \
--cluster-domain=cluster.local. \
--hairpin-mode hairpin-veth \
--allow-privileged=true \
--fail-swap-on=false \
--logtostderr=true \
--v=2 \
--logtostderr=false \
--log-dir=/log/kubernetes/log \
--feature-gates=DevicePlugins=true
Restart=on-failure
RestartSec=5
EOF
systemctl daemon-reload
systemctl enable kubelet
systemctl start kubelet
node03:
mkdir /var/lib/kubelet
cat >> /etc/systemd/system/kubelet.service << EOF
[Unit]
Description=Kubernetes Kubelet
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=docker.service
Requires=docker.service
[Service]
WorkingDirectory=/var/lib/kubelet
ExecStart=/usr/local/bin/kubelet \
--address=192.168.204.15 \
--hostname-override=192.168.204.15 \
--pod-infra-container-image=mirrorgooglecontainers/pause-amd64:3.0 \
--experimental-bootstrap-kubeconfig=/etc/kubernetes/cfg/bootstrap.kubeconfig \
--kubeconfig=/etc/kubernetes/cfg/kubelet.kubeconfig \
--cert-dir=/etc/kubernetes/ssl \
--network-plugin=cni \
--cni-conf-dir=/etc/cni/net.d \
--cni-bin-dir=/usr/local/bin/cni \
--cluster-dns=10.1.0.2 \
--cluster-domain=cluster.local. \
--hairpin-mode hairpin-veth \
--allow-privileged=true \
--fail-swap-on=false \
--logtostderr=true \
--v=2 \
--logtostderr=false \
--log-dir=/log/kubernetes/log \
--feature-gates=DevicePlugins=true
Restart=on-failure
RestartSec=5
EOF
systemctl daemon-reload
systemctl enable kubelet
systemctl start kubelet
5. 同意node节点加入
node01
kubectl get csr
kubectl get csr|grep 'Pending' | awk 'NR>0{print $1}'| xargs kubectl certificate approve
kubectl get node
部署Kubernetes Proxy
1. 配置kube-proxy使用LVS
所有机器
apt-get install ipvsadm ipset conntrack -y
2. 配置证书
node01:
cd /usr/local/src/ssl/
cat >> kube-proxy-csr.json << EOF
{
"CN": "system:kube-proxy",
"hosts": [],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "GuangDong",
"L": "ShenZhen",
"O": "k8s",
"OU": "System"
}
]
}
EOF
cfssl gencert -ca=/etc/kubernetes/ssl/ca.pem \
-ca-key=/etc/kubernetes/ssl/ca-key.pem \
-config=/etc/kubernetes/ssl/ca-config.json \
-profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy
cp kube-proxy*.pem /etc/kubernetes/ssl/
scp kube-proxy*.pem k8s-node02:/etc/kubernetes/ssl/
scp kube-proxy*.pem k8s-node03:/etc/kubernetes/ssl/
3. 创建kube-proxy配置文件
node01:
kubectl config set-cluster kubernetes \
--certificate-authority=/etc/kubernetes/ssl/ca.pem \
--embed-certs=true \
--server=https://192.168.204.13:6443 \
--kubeconfig=kube-proxy.kubeconfig
kubectl config set-credentials kube-proxy \
--client-certificate=/etc/kubernetes/ssl/kube-proxy.pem \
--client-key=/etc/kubernetes/ssl/kube-proxy-key.pem \
--embed-certs=true \
--kubeconfig=kube-proxy.kubeconfig
kubectl config set-context default \
--cluster=kubernetes \
--user=kube-proxy \
--kubeconfig=kube-proxy.kubeconfig
kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig
# 分发kubeconfig配置文件
cp kube-proxy.kubeconfig /etc/kubernetes/cfg/
scp kube-proxy.kubeconfig k8s-node02:/etc/kubernetes/cfg/
scp kube-proxy.kubeconfig k8s-node03:/etc/kubernetes/cfg/
4. 创建kube-proxy服务配置
node01:
mkdir /var/lib/kube-proxy
cat >> /etc/systemd/system/kube-proxy.service << EOF
[Unit]
Description=Kubernetes Kube-Proxy Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=network.target
[Service]
WorkingDirectory=/var/lib/kube-proxy
ExecStart=/usr/local/bin/kube-proxy \
--bind-address=192.168.204.13 \
--hostname-override=192.168.204.13 \
--kubeconfig=/etc/kubernetes/cfg/kube-proxy.kubeconfig \
--masquerade-all \
--feature-gates=SupportIPVSProxyMode=true \
--proxy-mode=ipvs \
--ipvs-min-sync-period=5s \
--ipvs-sync-period=5s \
--ipvs-scheduler=rr \
--logtostderr=true \
--v=2 \
--logtostderr=false \
--log-dir=/log/kubernetes/log
Restart=on-failure
RestartSec=5
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
EOF
systemctl daemon-reload
systemctl enable kube-proxy
systemctl start kube-proxy
systemctl status kube-proxy
ipvsadm -L -n
node02:
mkdir /var/lib/kube-proxy
cat >> /etc/systemd/system/kube-proxy.service << EOF
[Unit]
Description=Kubernetes Kube-Proxy Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=network.target
[Service]
WorkingDirectory=/var/lib/kube-proxy
ExecStart=/usr/local/bin/kube-proxy \
--bind-address=192.168.204.14 \
--hostname-override=192.168.204.14 \
--kubeconfig=/etc/kubernetes/cfg/kube-proxy.kubeconfig \
--masquerade-all \
--feature-gates=SupportIPVSProxyMode=true \
--proxy-mode=ipvs \
--ipvs-min-sync-period=5s \
--ipvs-sync-period=5s \
--ipvs-scheduler=rr \
--logtostderr=true \
--v=2 \
--logtostderr=false \
--log-dir=/log/kubernetes/log
Restart=on-failure
RestartSec=5
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
EOF
systemctl daemon-reload
systemctl enable kube-proxy
systemctl start kube-proxy
systemctl status kube-proxy
ipvsadm -L -n
node03:
mkdir /var/lib/kube-proxy
cat >> /etc/systemd/system/kube-proxy.service << EOF
[Unit]
Description=Kubernetes Kube-Proxy Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=network.target
[Service]
WorkingDirectory=/var/lib/kube-proxy
ExecStart=/usr/local/bin/kube-proxy \
--bind-address=192.168.204.15 \
--hostname-override=192.168.204.15 \
--kubeconfig=/etc/kubernetes/cfg/kube-proxy.kubeconfig \
--masquerade-all \
--feature-gates=SupportIPVSProxyMode=true \
--proxy-mode=ipvs \
--ipvs-min-sync-period=5s \
--ipvs-sync-period=5s \
--ipvs-scheduler=rr \
--logtostderr=true \
--v=2 \
--logtostderr=false \
--log-dir=/log/kubernetes/log
Restart=on-failure
RestartSec=5
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
EOF
systemctl daemon-reload
systemctl enable kube-proxy
systemctl start kube-proxy
systemctl status kube-proxy
ipvsadm -L -n
部署Flannel
1. 为Flannel生成证书
node01:
cd /usr/local/src/ssl
cat >> flanneld-csr.json << EOF
{
"CN": "flanneld",
"hosts": [],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "GuangDong",
"L": "ShenZhen",
"O": "k8s",
"OU": "System"
}
]
}
EOF
cfssl gencert -ca=/etc/kubernetes/ssl/ca.pem \
-ca-key=/etc/kubernetes/ssl/ca-key.pem \
-config=/etc/kubernetes/ssl/ca-config.json \
-profile=kubernetes flanneld-csr.json | cfssljson -bare flanneld
cp flanneld*.pem /etc/kubernetes/ssl/
scp flanneld*.pem k8s-node02:/etc/kubernetes/ssl/
scp flanneld*.pem k8s-node03:/etc/kubernetes/ssl/
2. 配置文件
all:
cat >> /etc/kubernetes/cfg/flannel << EOF
FLANNEL_ETCD="-etcd-endpoints=https://192.168.204.13:2379,https://192.168.204.14:2379,https://192.168.204.15:2379"
FLANNEL_ETCD_KEY="-etcd-prefix=/kubernetes/network"
FLANNEL_ETCD_CAFILE="--etcd-cafile=/etc/kubernetes/ssl/ca.pem"
FLANNEL_ETCD_CERTFILE="--etcd-certfile=/etc/kubernetes/ssl/flanneld.pem"
FLANNEL_ETCD_KEYFILE="--etcd-keyfile=/etc/kubernetes/ssl/flanneld-key.pem"
EOF
3. 制作系统服务
all:
花括号的内容会被解析
cat >> /etc/systemd/system/flannel.service << EOF
[Unit]
Description=Flanneld overlay address etcd agent
After=network.target
Before=docker.service
[Service]
EnvironmentFile=-/etc/kubernetes/cfg/flannel
ExecStartPre=/usr/local/bin/remove-docker0.sh
ExecStart=/usr/local/bin/flanneld ${FLANNEL_ETCD} ${FLANNEL_ETCD_KEY} ${FLANNEL_ETCD_CAFILE} ${FLANNEL_ETCD_CERTFILE} ${FLANNEL_ETCD_KEYFILE}
ExecStartPost=/usr/local/bin/mk-docker-opts.sh -d /run/flannel/docker
Type=notify
[Install]
WantedBy=multi-user.target
RequiredBy=docker.service
EOF
4. CNI支持
node01:
mkdir -p /kubernetes/network
etcdctl --ca-file /etc/kubernetes/ssl/ca.pem --cert-file /etc/kubernetes/ssl/flanneld.pem --key-file /etc/kubernetes/ssl/flanneld-key.pem --no-sync -C https://192.168.204.13:2379,https://192.168.204.14:2379,https://192.168.204.15:2379 mk /kubernetes/network/config '{ "Network": "10.2.0.0/16", "Backend": { "Type": "vxlan", "VNI": 1 }}' >/dev/null 2>&1
5. 启动服务
all:
systemctl daemon-reload
systemctl enable flannel
systemctl start flannel
systemctl status flannel
6. 配置docker使用flannel
all:
vim /usr/lib/systemd/system/docker.service
#在Unit下面修改After和增加Requires
[Unit]
After=network-online.target firewalld.service flannel.service
Wants=network-online.target
Requires=flannel.service
#增加EnvironmentFile=-/run/flannel/docker
[Service]
Type=notify
EnvironmentFile=-/run/flannel/docker
# 这个也需要修改,把主要是为了把参数带上
ExecStart=/usr/bin/dockerd $DOCKER_OPTS
systemctl daemon-reload
systemctl restart docker
systemctl status docker
systemctl enable docker
查看是否修改完毕
ifconfig
# 查看docker0网卡和flannel.1是否一个网段
部署CoreDNS和Dashboard
kubectl create -f coredns.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: coredns
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
kubernetes.io/bootstrapping: rbac-defaults
addonmanager.kubernetes.io/mode: Reconcile
name: system:coredns
rules:
- apiGroups:
- ""
resources:
- endpoints
- services
- pods
- namespaces
verbs:
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
labels:
kubernetes.io/bootstrapping: rbac-defaults
addonmanager.kubernetes.io/mode: EnsureExists
name: system:coredns
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:coredns
subjects:
- kind: ServiceAccount
name: coredns
namespace: kube-system
---
apiVersion: v1
kind: ConfigMap
metadata:
name: coredns
namespace: kube-system
labels:
addonmanager.kubernetes.io/mode: EnsureExists
data:
Corefile: |
.:53 {
errors
health
kubernetes cluster.local. in-addr.arpa ip6.arpa {
pods insecure
upstream
fallthrough in-addr.arpa ip6.arpa
}
prometheus :9153
proxy . /etc/resolv.conf
cache 30
}
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: coredns
namespace: kube-system
labels:
k8s-app: coredns
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/name: "CoreDNS"
spec:
replicas: 2
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
selector:
matchLabels:
k8s-app: coredns
template:
metadata:
labels:
k8s-app: coredns
spec:
serviceAccountName: coredns
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
- key: "CriticalAddonsOnly"
operator: "Exists"
containers:
- name: coredns
image: coredns/coredns:1.0.6
imagePullPolicy: IfNotPresent
resources:
limits:
memory: 170Mi
requests:
cpu: 100m
memory: 70Mi
args: [ "-conf", "/etc/coredns/Corefile" ]
volumeMounts:
- name: config-volume
mountPath: /etc/coredns
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
livenessProbe:
httpGet:
path: /health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
dnsPolicy: Default
volumes:
- name: config-volume
configMap:
name: coredns
items:
- key: Corefile
path: Corefile
---
apiVersion: v1
kind: Service
metadata:
name: coredns
namespace: kube-system
labels:
k8s-app: coredns
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/name: "CoreDNS"
spec:
selector:
k8s-app: coredns
clusterIP: 10.1.0.2
ports:
- name: dns
port: 53
protocol: UDP
- name: dns-tcp
port: 53
protocol: TCP
kubectl create -f kubernetes-dashboard.yaml
# ------------------- Dashboard Secret ------------------- #
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-certs
namespace: kube-system
type: Opaque
---
# ------------------- Dashboard Service Account ------------------- #
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
---
# ------------------- Dashboard Role & Role Binding ------------------- #
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kubernetes-dashboard-minimal
namespace: kube-system
rules:
# Allow Dashboard to create 'kubernetes-dashboard-key-holder' secret.
- apiGroups: [""]
resources: ["secrets"]
verbs: ["create"]
# Allow Dashboard to create 'kubernetes-dashboard-settings' config map.
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["create"]
# Allow Dashboard to get, update and delete Dashboard exclusive secrets.
- apiGroups: [""]
resources: ["secrets"]
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs"]
verbs: ["get", "update", "delete"]
# Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
- apiGroups: [""]
resources: ["configmaps"]
resourceNames: ["kubernetes-dashboard-settings"]
verbs: ["get", "update"]
# Allow Dashboard to get metrics from heapster.
- apiGroups: [""]
resources: ["services"]
resourceNames: ["heapster"]
verbs: ["proxy"]
- apiGroups: [""]
resources: ["services/proxy"]
resourceNames: ["heapster", "http:heapster:", "https:heapster:"]
verbs: ["get"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: kubernetes-dashboard-minimal
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kubernetes-dashboard-minimal
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kube-system
---
# ------------------- Dashboard Deployment ------------------- #
kind: Deployment
apiVersion: apps/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: kubernetes-dashboard
template:
metadata:
labels:
k8s-app: kubernetes-dashboard
spec:
containers:
- name: kubernetes-dashboard
image: registry.cn-hangzhou.aliyuncs.com/google_containers/kubernetes-dashboard-amd64:v1.10.1
ports:
- containerPort: 8443
protocol: TCP
args:
- --auto-generate-certificates
# Uncomment the following line to manually specify Kubernetes API server Host
# If not specified, Dashboard will attempt to auto discover the API server and connect
# to it. Uncomment only if the default does not work.
# - --apiserver-host=http://my-address:port
volumeMounts:
- name: kubernetes-dashboard-certs
mountPath: /certs
# Create on-disk volume to store exec logs
- mountPath: /tmp
name: tmp-volume
livenessProbe:
httpGet:
scheme: HTTPS
path: /
port: 8443
initialDelaySeconds: 30
timeoutSeconds: 30
volumes:
- name: kubernetes-dashboard-certs
secret:
secretName: kubernetes-dashboard-certs
- name: tmp-volume
emptyDir: {}
serviceAccountName: kubernetes-dashboard
# Comment the following tolerations if Dashboard must not be deployed on master
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
---
# ------------------- Dashboard Service ------------------- #
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
spec:
type: NodePort
ports:
- port: 443
targetPort: 8443
nodePort: 30001
selector:
k8s-app: kubernetes-dashboard
浙公网安备 33010602011771号