简单部署K8S
目录
K8S简单部署
1.规划
| 名称 | IP |
|---|---|
| master01 | 172.1.2.11 |
| master02 | 172.1.2.12 |
| master03 | 172.1.2.13 |
| node01 | 172.1.2.21 |
| node02 | 172.1.2.22 |
| node03 | 172.1.2.23 |
2.准备工作
2.1系统:Ubuntu 20.04,配置为允许root远程登录;配置阿里源、K8S源
#配置root账号
sudo useradd root
passwd root
#允许root远程登录,vim /etc/ssh/sshd_config
33 LoginGraceTime 2m
34 PermitRootLogin yes
35 StrictModes yes
36 #MaxAuthTries 6
37 #MaxSessions 10
#配置阿里源、K8S源 ,vim /etc/apt/sources.list
deb http://mirrors.aliyun.com/ubuntu/ focal main restricted universe multiverse
deb-src http://mirrors.aliyun.com/ubuntu/ focal main restricted universe multiverse
deb http://mirrors.aliyun.com/ubuntu/ focal-security main restricted universe multiverse
deb-src http://mirrors.aliyun.com/ubuntu/ focal-security main restricted universe multiverse
deb http://mirrors.aliyun.com/ubuntu/ focal-updates main restricted universe multiverse
deb-src http://mirrors.aliyun.com/ubuntu/ focal-updates main restricted universe multiverse
deb http://mirrors.aliyun.com/ubuntu/ focal-proposed main restricted universe multiverse
deb-src http://mirrors.aliyun.com/ubuntu/ focal-proposed main restricted universe multiverse
deb http://mirrors.aliyun.com/ubuntu/ focal-backports main restricted universe multiverse
deb-src http://mirrors.aliyun.com/ubuntu/ focal-backports main restricted universe multiverse
#配置K8S源
apt-get update && apt-get install -y apt-transport-https
curl https://mirrors.aliyun.com/kubernetes/apt/doc/apt-key.gpg | apt-key add -
cat <<EOF >/etc/apt/sources.list.d/kubernetes.list
deb https://mirrors.aliyun.com/kubernetes/apt/ kubernetes-xenial main
EOF
apt-get update
apt-get install -y kubelet kubeadm kubectl
2.2关闭防火墙
ufw disable && ufw status
2.3关闭swap
#关闭swap
swapoff -a
#配置文件,注释用于挂载Swap设备的所有行,vim etc/fstab
# /etc/fstab: static file system information.
#
# Use 'blkid' to print the universally unique identifier for a
# device; this may be used with UUID= as a more robust way to name devices
# that works even if disks are added and removed. See fstab(5).
#
# <file system> <mount point> <type> <options> <dump> <pass>
# / was on /dev/sda2 during curtin installation
/dev/disk/by-uuid/42933b0b-fea1-4206-8514-008642fbb380 / ext4 defaults 0 0
#/swap.img none swap sw 0 0
#查看所有被标识为swap的设备,0个表示关闭成功,如果有swap设备被load的话,使用“systemctl mask 虚拟设备名”取消load
[root@ubuntu ~]# systemctl --type swap
UNIT LOAD ACTIVE SUB DESCRIPTION
0 loaded units listed. Pass --all to see loaded but inactive units, too.
To show all installed unit files use 'systemctl list-unit-files'.
2.4时间同步
apt install chrony -y
systemctl start chrony && systemctl enable chrony
2.5每台主机配置/etc/hosts
172.1.2.11 master01.com master01 k8sapi.com k8sapi
172.1.2.12 master02.com master02
172.1.2.13 master03.com master03
172.1.2.21 node01.com node01
172.1.2.22 node02.com node02
172.1.2.23 node03.com node03
2.6安装docker,配置镜像加速,配置cgroup为systemd
# step 1: 安装必要的一些系统工具
sudo apt-get update
sudo apt-get -y install apt-transport-https ca-certificates curl software-properties-common
# step 2: 安装GPG证书
curl -fsSL https://mirrors.aliyun.com/docker-ce/linux/ubuntu/gpg | sudo apt-key add -
# Step 3: 写入软件源信息
sudo add-apt-repository "deb [arch=amd64] https://mirrors.aliyun.com/docker-ce/linux/ubuntu $(lsb_release -cs) stable"
# Step 4: 更新并安装Docker-CE
sudo apt-get -y update
sudo apt-get -y install docker-ce
#配置镜像加速,配置cgroup为systemd
vim /etc/docker/daemon.json
{
"registry-mirrors": [
"https://3v4fwhuc.mirror.aliyuncs.com",
"https://docker.mirrors.ustc.edu.cn/",
"https://hub-mirror.c.163.com",
"https://reg-mirror.qiniu.com",
"https://registry.docker-cn.com"
],
"exec-opts": ["native.cgroupdriver=systemd"]
}
2.7安装kubeadm、kubelet、kubectl
#安装最新版
apt install kubeadm kubelet kubectl -y
#指定版本
apt install kubeadm=1.22.4-00
2.8 从阿里的镜像仓库拉取K8S组件镜像
鉴于网络问题,用docker从阿里的镜像仓库拉取K8S组件,有coredns\etcd\kube-apiserver\kube-controller-manager\kube-proxy\kube-scheduler\pause
拉取镜像后重新打标签,参考此文https://www.cnblogs.com/thirteen-yang/p/14442523.html,如下
#!/bin/sh
### 版本信息
K8S_VERSION=v1.13.2
ETCD_VERSION=3.2.24
DASHBOARD_VERSION=v1.8.3
FLANNEL_VERSION=v0.10.0-amd64
DNS_VERSION=1.14.8
PAUSE_VERSION=3.1
coredns_version=1.2.6
## 基本组件
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver-amd64:$K8S_VERSION
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager-amd64:$K8S_VERSION
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler-amd64:$K8S_VERSION
docker pull registry.cn-hangzhou.aliyuncs.com/openthings/k8s-gcr-io-kube-proxy-amd64:$K8S_VERSION
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/etcd-amd64:$ETCD_VERSION
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/pause-amd64:$PAUSE_VERSION
### 网络
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/k8s-dns-sidecar-amd64:$DNS_VERSION
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/k8s-dns-kube-dns-amd64:$DNS_VERSION
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/k8s-dns-dnsmasq-nanny-amd64:$DNS_VERSION
docker pull quay.io/coreos/flannel:$FLANNEL_VERSION
docker pull registry.cn-hangzhou.aliyuncs.com/openthings/k8s-gcr-io-coredns:$coredns_version
### 前端
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kubernetes-dashboard-amd64:$DASHBOARD_VERSION
## 修改tag
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver-amd64:$K8S_VERSION k8s.gcr.io/kube-apiserver-amd64:$K8S_VERSION
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager-amd64:$K8S_VERSION k8s.gcr.io/kube-controller-manager-amd64:$K8S_VERSION
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler-amd64:$K8S_VERSION k8s.gcr.io/kube-scheduler-amd64:$K8S_VERSION
docker tag registry.cn-hangzhou.aliyuncs.com/openthings/k8s-gcr-io-kube-proxy-amd64:$K8S_VERSION k8s.gcr.io/kube-proxy-amd64:$K8S_VERSION
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/etcd-amd64:$ETCD_VERSION k8s.gcr.io/etcd-amd64:$ETCD_VERSION
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/pause-amd64:$PAUSE_VERSION k8s.gcr.io/pause:$PAUSE_VERSION
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/k8s-dns-sidecar-amd64:$DNS_VERSION k8s.gcr.io/k8s-dns-sidecar-amd64:$DNS_VERSION
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/k8s-dns-kube-dns-amd64:$DNS_VERSION k8s.gcr.io/k8s-dns-kube-dns-amd64:$DNS_VERSION
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/k8s-dns-dnsmasq-nanny-amd64:$DNS_VERSION k8s.gcr.io/k8s-dns-dnsmasq-nanny-amd64:$DNS_VERSION
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kubernetes-dashboard-amd64:$DASHBOARD_VERSION k8s.gcr.io/kubernetes-dashboard-amd64:$DASHBOARD_VERSION
docker tag registry.cn-hangzhou.aliyuncs.com/openthings/k8s-gcr-io-coredns:$coredns_version k8s.gcr.io/coredns:$coredns_version
## 删除镜像
docker rmi registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver-amd64:$K8S_VERSION
docker rmi registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager-amd64:$K8S_VERSION
docker rmi registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler-amd64:$K8S_VERSION
docker rmi registry.cn-hangzhou.aliyuncs.com/openthings/k8s-gcr-io-kube-proxy-amd64:$K8S_VERSION
docker rmi registry.cn-hangzhou.aliyuncs.com/google_containers/etcd-amd64:$ETCD_VERSION
docker rmi registry.cn-hangzhou.aliyuncs.com/google_containers/pause-amd64:$PAUSE_VERSION
docker rmi registry.cn-hangzhou.aliyuncs.com/google_containers/k8s-dns-sidecar-amd64:$DNS_VERSION
docker rmi registry.cn-hangzhou.aliyuncs.com/google_containers/k8s-dns-kube-dns-amd64:$DNS_VERSION
docker rmi registry.cn-hangzhou.aliyuncs.com/google_containers/k8s-dns-dnsmasq-nanny-amd64:$DNS_VERSION
docker rmi registry.cn-hangzhou.aliyuncs.com/google_containers/kubernetes-dashboard-amd64:$DASHBOARD_VERSION
docker rmi registry.cn-hangzhou.aliyuncs.com/openthings/k8s-gcr-io-coredns:$coredns_version
3.初始化K8S集群
3.1初始化第一台master
kubeadm init --control-plane-endpoint k8sapi.com --kubernetes-version=v1.22.4 \
--pod-network-cidr=10.244.0.0/16 --service-cidr=10.96.0.0/12 --token-ttl=0
#calico pod网段:192.168.0.0/16
#--token-ttl=0 token,永不过期
#--control-plane-endpoint,控制平面入口
生成如下提示,root身份执行mkdir -p $HOME/.kube && cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
Alternatively, if you are the root user, you can run:
export KUBECONFIG=/etc/kubernetes/admin.conf
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
You can now join any number of control-plane nodes by copying certificate authorities
and service account keys on each node and then running the following as root:
kubeadm join k8sapi.com:6443 --token xc9d4o.yhc6fxa6d4oot1yg \
--discovery-token-ca-cert-hash sha256:9a193d17c5ece7f1697134fdb3b0452b953af64932706f3d320fdad8c2b8858d \
--control-plane
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join k8sapi.com:6443 --token xc9d4o.yhc6fxa6d4oot1yg \
--discovery-token-ca-cert-hash sha256:9a193d17c5ece7f1697134fdb3b0452b953af64932706f3d320fdad8c2b8858d
3.2安装pod网络插件Flannel
kubectl apply -f https://raw.githubusercontent.com/flannel-io/flannel/master/Documentation/kube-flannel.yml
[root@master01 ~]# cat kube-flannel.yml
---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: psp.flannel.unprivileged
annotations:
seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default
seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default
apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
spec:
privileged: false
volumes:
- configMap
- secret
- emptyDir
- hostPath
allowedHostPaths:
- pathPrefix: "/etc/cni/net.d"
- pathPrefix: "/etc/kube-flannel"
- pathPrefix: "/run/flannel"
readOnlyRootFilesystem: false
# Users and groups
runAsUser:
rule: RunAsAny
supplementalGroups:
rule: RunAsAny
fsGroup:
rule: RunAsAny
# Privilege Escalation
allowPrivilegeEscalation: false
defaultAllowPrivilegeEscalation: false
# Capabilities
allowedCapabilities: ['NET_ADMIN', 'NET_RAW']
defaultAddCapabilities: []
requiredDropCapabilities: []
# Host namespaces
hostPID: false
hostIPC: false
hostNetwork: true
hostPorts:
- min: 0
max: 65535
# SELinux
seLinux:
# SELinux is unused in CaaSP
rule: 'RunAsAny'
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: flannel
rules:
- apiGroups: ['extensions']
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames: ['psp.flannel.unprivileged']
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: flannel
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: flannel
subjects:
- kind: ServiceAccount
name: flannel
namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: flannel
namespace: kube-system
---
kind: ConfigMap
apiVersion: v1
metadata:
name: kube-flannel-cfg
namespace: kube-system
labels:
tier: node
app: flannel
data:
cni-conf.json: |
{
"name": "cbr0",
"cniVersion": "0.3.1",
"plugins": [
{
"type": "flannel",
"delegate": {
"hairpinMode": true,
"isDefaultGateway": true
}
},
{
"type": "portmap",
"capabilities": {
"portMappings": true
}
}
]
}
net-conf.json: |
{
"Network": "10.244.0.0/16",
"Backend": {
"Type": "vxlan"
}
}
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-flannel-ds
namespace: kube-system
labels:
tier: node
app: flannel
spec:
selector:
matchLabels:
app: flannel
template:
metadata:
labels:
tier: node
app: flannel
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/os
operator: In
values:
- linux
hostNetwork: true
priorityClassName: system-node-critical
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: flannel
initContainers:
- name: install-cni-plugin
#image: flannelcni/flannel-cni-plugin:v1.0.1 for ppc64le (dockerhub limitations may apply)
image: rancher/mirrored-flannelcni-flannel-cni-plugin:v1.0.1
command:
- cp
args:
- -f
- /flannel
- /opt/cni/bin/flannel
volumeMounts:
- name: cni-plugin
mountPath: /opt/cni/bin
- name: install-cni
#image: flannelcni/flannel:v0.16.1 for ppc64le (dockerhub limitations may apply)
image: rancher/mirrored-flannelcni-flannel:v0.16.1
command:
- cp
args:
- -f
- /etc/kube-flannel/cni-conf.json
- /etc/cni/net.d/10-flannel.conflist
volumeMounts:
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
containers:
- name: kube-flannel
#image: flannelcni/flannel:v0.16.1 for ppc64le (dockerhub limitations may apply)
image: rancher/mirrored-flannelcni-flannel:v0.16.1
command:
- /opt/bin/flanneld
args:
- --ip-masq
- --kube-subnet-mgr
resources:
requests:
cpu: "100m"
memory: "50Mi"
limits:
cpu: "100m"
memory: "50Mi"
securityContext:
privileged: false
capabilities:
add: ["NET_ADMIN", "NET_RAW"]
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: run
mountPath: /run/flannel
- name: flannel-cfg
mountPath: /etc/kube-flannel/
- name: xtables-lock
mountPath: /run/xtables.lock
volumes:
- name: run
hostPath:
path: /run/flannel
- name: cni-plugin
hostPath:
path: /opt/cni/bin
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg
- name: xtables-lock
hostPath:
path: /run/xtables.lock
type: FileOrCreate
3.3node节点加入集群
kubeadm join k8sapi.com:6443 --token xc9d4o.yhc6fxa6d4oot1yg \
--discovery-token-ca-cert-hash sha256:9a193d17c5ece7f1697134fdb3b0452b953af64932706f3d320fdad8c2b8858d
3.4master02、master03加入控制平面
#先将master01的证书拷贝到master02、master03
scp -rp /etc/kubernetes/pki/ca* master02:/etc/kubernetes/pki/
scp -rp /etc/kubernetes/pki/sa* master02:/etc/kubernetes/pki/
scp -rp /etc/kubernetes/pki/front-proxy-ca* master02:/etc/kubernetes/pki/
scp -rp /etc/kubernetes/pki/etcd/ca.* master02:/etc/kubernetes/pki/etcd/
#加入集群
kubeadm join k8sapi.com:6443 --token xc9d4o.yhc6fxa6d4oot1yg \
--discovery-token-ca-cert-hash sha256:9a193d17c5ece7f1697134fdb3b0452b953af64932706f3d320fdad8c2b8858d \
--control-plane
#master03也是和master02一样操作
3.5 查看集群
[root@master01 ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
master01 Ready control-plane,master 41m v1.22.4
master02 Ready control-plane,master 14m v1.22.4
master03 Ready control-plane,master 12m v1.22.4
node01 Ready <none> 36m v1.22.4
node02 Ready <none> 35m v1.22.4
node03 Ready <none> 35m v1.22.4
[root@master01 ~]# kubectl get pods -n kube-system
NAME READY STATUS RESTARTS AGE
coredns-78fcd69978-bshpd 1/1 Running 0 61m
coredns-78fcd69978-kj5rz 1/1 Running 0 61m
etcd-master01 1/1 Running 0 61m
etcd-master02 1/1 Running 0 35m
etcd-master03 1/1 Running 0 32m
kube-apiserver-master01 1/1 Running 0 61m
kube-apiserver-master02 1/1 Running 0 35m
kube-apiserver-master03 1/1 Running 0 33m
kube-controller-manager-master01 1/1 Running 1 (35m ago) 61m
kube-controller-manager-master02 1/1 Running 0 35m
kube-controller-manager-master03 1/1 Running 0 33m
kube-flannel-ds-8mgmv 1/1 Running 0 33m
kube-flannel-ds-9kh2l 1/1 Running 0 56m
kube-flannel-ds-kn9bs 1/1 Running 0 56m
kube-flannel-ds-m2pc9 1/1 Running 0 35m
kube-flannel-ds-r4v2w 1/1 Running 0 59m
kube-flannel-ds-sxzs9 1/1 Running 0 56m
kube-proxy-6pb85 1/1 Running 0 35m
kube-proxy-8q772 1/1 Running 0 33m
kube-proxy-f56d4 1/1 Running 0 56m
kube-proxy-gmz5d 1/1 Running 0 56m
kube-proxy-m9qnf 1/1 Running 0 56m
kube-proxy-qzzwf 1/1 Running 0 61m
kube-scheduler-master01 1/1 Running 1 (35m ago) 61m
kube-scheduler-master02 1/1 Running 0 35m
kube-scheduler-master03 1/1 Running 0 33m
=勘误更正====
以上规划网段使用的为公网IP,没有“上网”需求用hosts解析地址也能正常使用,但是后期给k8s集群配置"上网"的网关(openwrt旁路由)会出错误,具体表现为无法打开openwrt的控制面板,但能正常工作。以上操作均在vmwear的NAT网络下,openwrt和集群在同一网络。
浙公网安备 33010602011771号