kubeadm安装k8s1.19
虚拟机 2cpu,2g内存,安装有问题换台新机,或者重启
[root@localhost ~]# uname -r
3.10.0-1160.el7.x86_64
1.安装docker
wget https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo -O /etc/yum.repos.d/docker-ce.repo
yum list docker-ce --showduplicates | sort -r
yum -y install docker-ce-18.06.1.ce-3.el7
yum -y install docker-ce-19.03.8-3.el7 #安装19版
systemctl enable docker && systemctl start docker
Created symlink from /etc/systemd/system/multi-user.target.wants/docker.service to /usr/lib/systemd/system/docker.service
docker version
vi /etc/docker/daemon.json
{
"exec-opts": ["native.cgroupdriver=systemd"],
"registry-mirrors": [
"https://1nj0zren.mirror.aliyuncs.com",
"https://kfwkfulq.mirror.aliyuncs.com",
"https://2lqq34jg.mirror.aliyuncs.com",
"https://pee6w651.mirror.aliyuncs.com",
"http://hub-mirror.c.163.com",
"https://docker.mirrors.ustc.edu.cn",
"http://f1361db2.m.daocloud.io",
"https://registry.docker-cn.com"
]
}
systemctl daemon-reload
systemctl restart docker
docker info
yum list installed | grep docker
2.准备镜像
[root@localhost 1.19]# docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
registry.aliyuncs.com/google_containers/kube-proxy v1.19.0 bc9c328f379c 9 months ago 118MB
registry.aliyuncs.com/google_containers/kube-apiserver v1.19.0 1b74e93ece2f 9 months ago 119MB
registry.aliyuncs.com/google_containers/kube-controller-manager v1.19.0 09d665d529d0 9 months ago 111MB
registry.aliyuncs.com/google_containers/kube-scheduler v1.19.0 cbdc8369d8b1 9 months ago 45.7MB
registry.aliyuncs.com/google_containers/etcd 3.4.9-1 d4ca8726196c 11 months ago 253MB
registry.aliyuncs.com/google_containers/coredns 1.7.0 bfe3a36ebd25 12 months ago 45.2MB
registry.aliyuncs.com/google_containers/pause 3.2 80d28bedfe5d 16 months ago 683kB
3.一顿操作
# 关闭防火墙
systemctl stop firewalld && systemctl disable firewalld
# 关闭 swap
swapoff -a && sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab
# 关闭 selinux
setenforce 0 && sed -i 's/^SELINUX=.*/SELINUX=disabled/' /etc/selinux/config
# 时间同步
yum -y install ntpdate
ntpdate time.windows.com
hwclock --systohc
# 将桥接的IPv4流量传递到iptables的链
cat > /etc/sysctl.d/k8s.conf << EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
sysctl --system # 生效
4.安装kubeadm
cat > /etc/yum.repos.d/kubernetes.repo << EOF
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
yum install -y kubelet-1.19.0 kubeadm-1.19.0 kubectl-1.19.0
systemctl enable kubelet
5.执行kubeadm init
[root@localhost ~]# cat kubeadm.sh
kubeadm init --kubernetes-version=1.19.0 \
--apiserver-advertise-address=$(ifconfig | grep ens -A 1 | grep inet | awk '{ print $2 }') \
--image-repository=registry.aliyuncs.com/google_containers \
--service-cidr=10.1.0.0/16 \
--pod-network-cidr=10.244.0.0/16 \
--ignore-preflight-errors=all \
--v=5
6.创建目录
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
7.创建flannel
wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
kubectl apply -f kube-flannel.yml
kubectl get pods -n kube-system
https://kubernetes.io/zh/docs/concepts/scheduling-eviction/taint-and-toleration/
8.设置master可调度 再join。完成
kubectl get nodes
[root@localhost ~]# kubectl describe node localhost.localdomain | grep Taints
Taints: node-role.kubernetes.io/master:NoSchedule
kubectl taint nodes localhost.localdomain node-role.kubernetes.io/master-
[root@localhost ~]# kubectl describe node localhost.localdomain | grep Taints
Taints: node.kubernetes.io/not-ready:NoSchedule
执行kubeadm join
kubectl describe node localhost.localdomain | grep Taints
Taints: <none>
master node ready了就不用join
kubeadm join --token 7q9hvo.e65aim12as1a0v98 --discovery-token-ca-cert-hash sha256:76e68e8ac68af391e3f406a5ea451ea44b0f67a63ac44754a43f8e483d613e14 192.168.116.27:6443
--ignore-preflight-errors=all
kubectl get nodes
查看kubelet状态
journalctl -f -u kubelet.service
[root@localhost ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
localhost.localdomain Ready master 9m28s v1.19.0
[root@localhost ~]#
[root@localhost ~]#
[root@localhost ~]# kubectl describe node localhost.localdomain | grep Taints
Taints: <none>
[root@localhost ~]# kubectl get all -A
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system pod/coredns-6d56c8448f-7w8dj 1/1 Running 0 4m11s
kube-system pod/coredns-6d56c8448f-njzpk 1/1 Running 0 4m11s
kube-system pod/etcd-localhost.localdomain 1/1 Running 1 8m54s
kube-system pod/kube-apiserver-localhost.localdomain 1/1 Running 1 8m57s
kube-system pod/kube-controller-manager-localhost.localdomain 1/1 Running 3 9m17s
kube-system pod/kube-flannel-ds-gcx6m 1/1 Running 0 46s
kube-system pod/kube-proxy-v2m8j 1/1 Running 0 4m11s
kube-system pod/kube-scheduler-localhost.localdomain 1/1 Running 2 8m57s
NAMESPACE NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
default service/kubernetes ClusterIP 10.1.0.1 <none> 443/TCP 8m51s
kube-system service/kube-dns ClusterIP 10.1.0.10 <none> 53/UDP,53/TCP,9153/TCP 4m11s
NAMESPACE NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE
kube-system daemonset.apps/kube-flannel-ds 1 1 1 1 1 <none> 46s
kube-system daemonset.apps/kube-proxy 1 1 1 1 1 kubernetes.io/os=linux 4m11s
NAMESPACE NAME READY UP-TO-DATE AVAILABLE AGE
kube-system deployment.apps/coredns 2/2 2 2 4m11s
NAMESPACE NAME DESIRED CURRENT READY AGE
kube-system replicaset.apps/coredns-6d56c8448f 2 2 2 4m11s
测试
kubectl create deployment nginx --image=nginx # 创建一个nginx
kubectl expose deployment nginx --port=80 --type=NodePort # 对外暴露端口
kubectl get pod,svc
测试2
apiVersion: apps/v1
kind: Deployment
metadata:
creationTimestamp: null
labels:
app: nginx
name: nginx
spec:
replicas: 1
selector:
matchLabels:
app: nginx
strategy: {}
template:
metadata:
creationTimestamp: null
labels:
app: nginx
spec:
containers:
- image: nginx
imagePullPolicy: IfNotPresent
name: nginx
resources: {}
status: {}
注入istio
kubectl apply -f <(istioctl kube-inject -f nginx_deploy.yaml)
podpreset
cat /etc/kubernetes/manifests/kube-apiserver.yaml
在spec.containers.command 段 增加如下内容:
- --enable-admission-plugins=NodeRestriction,PodPreset
- --runtime-config=settings.k8s.io/v1alpha1=true
kubeadm reset
[root@localhost ~]# kubeadm init --kubernetes-version=1.19.0 \
> --apiserver-advertise-address=192.168.150.84 \
> --image-repository=registry.aliyuncs.com/google_containers \
> --service-cidr=10.1.0.0/16 \
> --pod-network-cidr=10.244.0.0/16 \
> --ignore-preflight-errors=all \
> --v=5
I0620 10:32:08.700988 8811 initconfiguration.go:103] detected and using CRI socket: /var/run/dockershim.sock
W0620 10:32:09.644670 8811 configset.go:348] WARNING: kubeadm cannot validate component configs for API groups [kubelet.config.k8s.io kubeproxy.config.k8s.io]
[init] Using Kubernetes version: v1.19.0
[preflight] Running pre-flight checks
I0620 10:32:09.645476 8811 checks.go:577] validating Kubernetes and kubeadm version
I0620 10:32:09.645565 8811 checks.go:166] validating if the firewall is enabled and active
I0620 10:32:10.193676 8811 checks.go:201] validating availability of port 6443
[WARNING Port-6443]: Port 6443 is in use
I0620 10:32:10.194131 8811 checks.go:201] validating availability of port 10259
[WARNING Port-10259]: Port 10259 is in use
I0620 10:32:10.194186 8811 checks.go:201] validating availability of port 10257
[WARNING Port-10257]: Port 10257 is in use
I0620 10:32:10.194226 8811 checks.go:286] validating the existence of file /etc/kubernetes/manifests/kube-apiserver.yaml
[WARNING FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml]: /etc/kubernetes/manifests/kube-apiserver.yaml already exists
I0620 10:32:10.194263 8811 checks.go:286] validating the existence of file /etc/kubernetes/manifests/kube-controller-manager.yaml
[WARNING FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml]: /etc/kubernetes/manifests/kube-controller-manager.yaml already exists
I0620 10:32:10.194287 8811 checks.go:286] validating the existence of file /etc/kubernetes/manifests/kube-scheduler.yaml
[WARNING FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml]: /etc/kubernetes/manifests/kube-scheduler.yaml already exists
I0620 10:32:10.194309 8811 checks.go:286] validating the existence of file /etc/kubernetes/manifests/etcd.yaml
[WARNING FileAvailable--etc-kubernetes-manifests-etcd.yaml]: /etc/kubernetes/manifests/etcd.yaml already exists
I0620 10:32:10.194339 8811 checks.go:432] validating if the connectivity type is via proxy or direct
I0620 10:32:10.194394 8811 checks.go:471] validating http connectivity to first IP address in the CIDR
I0620 10:32:10.194424 8811 checks.go:471] validating http connectivity to first IP address in the CIDR
I0620 10:32:10.194439 8811 checks.go:102] validating the container runtime
I0620 10:32:10.384939 8811 checks.go:128] validating if the "docker" service is enabled and active
I0620 10:32:10.596893 8811 checks.go:335] validating the contents of file /proc/sys/net/bridge/bridge-nf-call-iptables
I0620 10:32:10.597128 8811 checks.go:335] validating the contents of file /proc/sys/net/ipv4/ip_forward
I0620 10:32:10.597175 8811 checks.go:649] validating whether swap is enabled or not
I0620 10:32:10.597215 8811 checks.go:376] validating the presence of executable conntrack
I0620 10:32:10.597248 8811 checks.go:376] validating the presence of executable ip
I0620 10:32:10.597277 8811 checks.go:376] validating the presence of executable iptables
I0620 10:32:10.597299 8811 checks.go:376] validating the presence of executable mount
I0620 10:32:10.597322 8811 checks.go:376] validating the presence of executable nsenter
I0620 10:32:10.597341 8811 checks.go:376] validating the presence of executable ebtables
I0620 10:32:10.597387 8811 checks.go:376] validating the presence of executable ethtool
I0620 10:32:10.597415 8811 checks.go:376] validating the presence of executable socat
I0620 10:32:10.597453 8811 checks.go:376] validating the presence of executable tc
I0620 10:32:10.597480 8811 checks.go:376] validating the presence of executable touch
I0620 10:32:10.597504 8811 checks.go:520] running all checks
I0620 10:32:10.817620 8811 checks.go:406] checking whether the given node name is reachable using net.LookupHost
I0620 10:32:10.817947 8811 checks.go:618] validating kubelet version
I0620 10:32:11.218129 8811 checks.go:128] validating if the "kubelet" service is enabled and active
I0620 10:32:11.236388 8811 checks.go:201] validating availability of port 10250
[WARNING Port-10250]: Port 10250 is in use
I0620 10:32:11.236518 8811 checks.go:201] validating availability of port 2379
[WARNING Port-2379]: Port 2379 is in use
I0620 10:32:11.236587 8811 checks.go:201] validating availability of port 2380
[WARNING Port-2380]: Port 2380 is in use
I0620 10:32:11.236628 8811 checks.go:249] validating the existence and emptiness of directory /var/lib/etcd
[WARNING DirAvailable--var-lib-etcd]: /var/lib/etcd is not empty
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
I0620 10:32:11.375451 8811 checks.go:839] image exists: registry.aliyuncs.com/google_containers/kube-apiserver:v1.19.0
I0620 10:32:11.512835 8811 checks.go:839] image exists: registry.aliyuncs.com/google_containers/kube-controller-manager:v1.19.0
I0620 10:32:11.646536 8811 checks.go:839] image exists: registry.aliyuncs.com/google_containers/kube-scheduler:v1.19.0
I0620 10:32:11.804045 8811 checks.go:839] image exists: registry.aliyuncs.com/google_containers/kube-proxy:v1.19.0
I0620 10:32:11.959058 8811 checks.go:839] image exists: registry.aliyuncs.com/google_containers/pause:3.2
I0620 10:32:12.098751 8811 checks.go:839] image exists: registry.aliyuncs.com/google_containers/etcd:3.4.9-1
I0620 10:32:12.260080 8811 checks.go:839] image exists: registry.aliyuncs.com/google_containers/coredns:1.7.0
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Using existing ca certificate authority
[certs] Using existing apiserver certificate and key on disk
[certs] Using existing apiserver-kubelet-client certificate and key on disk
[certs] Using existing front-proxy-ca certificate authority
[certs] Using existing front-proxy-client certificate and key on disk
[certs] Using existing etcd/ca certificate authority
[certs] Using existing etcd/server certificate and key on disk
[certs] Using existing etcd/peer certificate and key on disk
[certs] Using existing etcd/healthcheck-client certificate and key on disk
[certs] Using existing apiserver-etcd-client certificate and key on disk
I0620 10:32:12.310286 8811 certs.go:69] creating new public/private key files for signing service account users
[certs] Using the existing "sa" key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
I0620 10:32:12.310564 8811 kubeconfig.go:84] creating kubeconfig file for admin.conf
[kubeconfig] Using existing kubeconfig file: "/etc/kubernetes/admin.conf"
I0620 10:32:12.499948 8811 kubeconfig.go:84] creating kubeconfig file for kubelet.conf
[kubeconfig] Using existing kubeconfig file: "/etc/kubernetes/kubelet.conf"
I0620 10:32:12.627574 8811 kubeconfig.go:84] creating kubeconfig file for controller-manager.conf
[kubeconfig] Using existing kubeconfig file: "/etc/kubernetes/controller-manager.conf"
I0620 10:32:13.762574 8811 kubeconfig.go:84] creating kubeconfig file for scheduler.conf
[kubeconfig] Using existing kubeconfig file: "/etc/kubernetes/scheduler.conf"
I0620 10:32:14.056430 8811 kubelet.go:63] Stopping the kubelet
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Starting the kubelet
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
I0620 10:32:14.478542 8811 manifests.go:96] [control-plane] getting StaticPodSpecs
I0620 10:32:14.480296 8811 manifests.go:109] [control-plane] adding volume "ca-certs" for component "kube-apiserver"
I0620 10:32:14.480324 8811 manifests.go:109] [control-plane] adding volume "etc-pki" for component "kube-apiserver"
I0620 10:32:14.480334 8811 manifests.go:109] [control-plane] adding volume "k8s-certs" for component "kube-apiserver"
I0620 10:32:14.498233 8811 manifests.go:135] [control-plane] wrote static Pod manifest for component "kube-apiserver" to "/etc/kubernetes/manifests/kube-apiserver.yaml"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
I0620 10:32:14.498290 8811 manifests.go:96] [control-plane] getting StaticPodSpecs
I0620 10:32:14.506097 8811 manifests.go:109] [control-plane] adding volume "ca-certs" for component "kube-controller-manager"
I0620 10:32:14.506148 8811 manifests.go:109] [control-plane] adding volume "etc-pki" for component "kube-controller-manager"
I0620 10:32:14.506162 8811 manifests.go:109] [control-plane] adding volume "flexvolume-dir" for component "kube-controller-manager"
I0620 10:32:14.506174 8811 manifests.go:109] [control-plane] adding volume "k8s-certs" for component "kube-controller-manager"
I0620 10:32:14.506184 8811 manifests.go:109] [control-plane] adding volume "kubeconfig" for component "kube-controller-manager"
I0620 10:32:14.508900 8811 manifests.go:135] [control-plane] wrote static Pod manifest for component "kube-controller-manager" to "/etc/kubernetes/manifests/kube-controller-manager.yaml"
[control-plane] Creating static Pod manifest for "kube-scheduler"
I0620 10:32:14.508970 8811 manifests.go:96] [control-plane] getting StaticPodSpecs
I0620 10:32:14.509781 8811 manifests.go:109] [control-plane] adding volume "kubeconfig" for component "kube-scheduler"
I0620 10:32:14.511102 8811 manifests.go:135] [control-plane] wrote static Pod manifest for component "kube-scheduler" to "/etc/kubernetes/manifests/kube-scheduler.yaml"
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
I0620 10:32:14.513169 8811 local.go:82] [etcd] wrote Static Pod manifest for a local etcd member to "/etc/kubernetes/manifests/etcd.yaml"
I0620 10:32:14.513213 8811 waitcontrolplane.go:87] [wait-control-plane] Waiting for the API server to be healthy
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
[apiclient] All control plane components are healthy after 0.021117 seconds
I0620 10:32:14.537132 8811 uploadconfig.go:108] [upload-config] Uploading the kubeadm ClusterConfiguration to a ConfigMap
[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
I0620 10:32:14.598049 8811 uploadconfig.go:122] [upload-config] Uploading the kubelet component config to a ConfigMap
[kubelet] Creating a ConfigMap "kubelet-config-1.19" in namespace kube-system with the configuration for the kubelets in the cluster
I0620 10:32:14.659960 8811 uploadconfig.go:127] [upload-config] Preserving the CRISocket information for the control-plane node
I0620 10:32:14.660059 8811 patchnode.go:30] [patchnode] Uploading the CRI Socket information "/var/run/dockershim.sock" to the Node API object "localhost.localdomain" as an annotation
[upload-certs] Skipping phase. Please see --upload-certs
[mark-control-plane] Marking the node localhost.localdomain as control-plane by adding the label "node-role.kubernetes.io/master=''"
[mark-control-plane] Marking the node localhost.localdomain as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]
[bootstrap-token] Using token: 8jm9zg.45r59j98hazgeqoe
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to get nodes
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
I0620 10:32:15.968515 8811 request.go:581] Throttling request took 196.507706ms, request: POST:https://192.168.150.84:6443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings?timeout=10s
I0620 10:32:16.168226 8811 request.go:581] Throttling request took 193.533793ms, request: PUT:https://192.168.150.84:6443/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/kubeadm:node-autoapprove-certificate-rotation?timeout=10s
[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
I0620 10:32:16.172938 8811 clusterinfo.go:45] [bootstrap-token] loading admin kubeconfig
I0620 10:32:16.173700 8811 clusterinfo.go:53] [bootstrap-token] copying the cluster from admin.conf to the bootstrap kubeconfig
I0620 10:32:16.175296 8811 clusterinfo.go:65] [bootstrap-token] creating/updating ConfigMap in kube-public namespace
I0620 10:32:16.185069 8811 clusterinfo.go:79] creating the RBAC rules for exposing the cluster-info ConfigMap in the kube-public namespace
I0620 10:32:16.368105 8811 request.go:581] Throttling request took 182.913697ms, request: POST:https://192.168.150.84:6443/apis/rbac.authorization.k8s.io/v1/namespaces/kube-public/roles?timeout=10s
I0620 10:32:16.568551 8811 request.go:581] Throttling request took 194.540485ms, request: POST:https://192.168.150.84:6443/apis/rbac.authorization.k8s.io/v1/namespaces/kube-public/rolebindings?timeout=10s
I0620 10:32:16.574288 8811 kubeletfinalize.go:88] [kubelet-finalize] Assuming that kubelet client certificate rotation is enabled: found "/var/lib/kubelet/pki/kubelet-client-current.pem"
[kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
I0620 10:32:16.576158 8811 kubeletfinalize.go:132] [kubelet-finalize] Restarting the kubelet to enable client certificate rotation
[addons] Applied essential addon: CoreDNS
[addons] Applied essential addon: kube-proxy
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join 192.168.150.84:6443 --token 8jm9zg.45r59j98hazgeqoe \
--discovery-token-ca-cert-hash sha256:baa600edd133dcc2e98f1bfb9dcb126338cdc6cf0af44dd7a448956d28484fc6
root@localhost ~]# kubeadm join 192.168.150.84:6443 --token 8jm9zg.45r59j98hazgeqoe --discovery-token-ca-cert-hash sha256:baa600edd133dcc2e98f1bfb9dcb126338cdc6cf0af44dd7a448956d28484fc6 --ignore-preflight-errors=all
[preflight] Running pre-flight checks
[WARNING DirAvailable--etc-kubernetes-manifests]: /etc/kubernetes/manifests is not empty
[WARNING FileAvailable--etc-kubernetes-kubelet.conf]: /etc/kubernetes/kubelet.conf already exists
[WARNING Port-10250]: Port 10250 is in use
[WARNING FileAvailable--etc-kubernetes-pki-ca.crt]: /etc/kubernetes/pki/ca.crt already exists
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml'
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Starting the kubelet
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...
This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.
Run 'kubectl get nodes' on the control-plane to see this node join the cluster.
[root@localhost ~]# cat kube-flannel.yml.1
---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: psp.flannel.unprivileged
annotations:
seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default
seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default
apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
spec:
privileged: false
volumes:
- configMap
- secret
- emptyDir
- hostPath
allowedHostPaths:
- pathPrefix: "/etc/cni/net.d"
- pathPrefix: "/etc/kube-flannel"
- pathPrefix: "/run/flannel"
readOnlyRootFilesystem: false
# Users and groups
runAsUser:
rule: RunAsAny
supplementalGroups:
rule: RunAsAny
fsGroup:
rule: RunAsAny
# Privilege Escalation
allowPrivilegeEscalation: false
defaultAllowPrivilegeEscalation: false
# Capabilities
allowedCapabilities: ['NET_ADMIN', 'NET_RAW']
defaultAddCapabilities: []
requiredDropCapabilities: []
# Host namespaces
hostPID: false
hostIPC: false
hostNetwork: true
hostPorts:
- min: 0
max: 65535
# SELinux
seLinux:
# SELinux is unused in CaaSP
rule: 'RunAsAny'
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: flannel
rules:
- apiGroups: ['extensions']
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames: ['psp.flannel.unprivileged']
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: flannel
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: flannel
subjects:
- kind: ServiceAccount
name: flannel
namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: flannel
namespace: kube-system
---
kind: ConfigMap
apiVersion: v1
metadata:
name: kube-flannel-cfg
namespace: kube-system
labels:
tier: node
app: flannel
data:
cni-conf.json: |
{
"name": "cbr0",
"cniVersion": "0.3.1",
"plugins": [
{
"type": "flannel",
"delegate": {
"hairpinMode": true,
"isDefaultGateway": true
}
},
{
"type": "portmap",
"capabilities": {
"portMappings": true
}
}
]
}
net-conf.json: |
{
"Network": "10.244.0.0/16",
"Backend": {
"Type": "vxlan"
}
}
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-flannel-ds
namespace: kube-system
labels:
tier: node
app: flannel
spec:
selector:
matchLabels:
app: flannel
template:
metadata:
labels:
tier: node
app: flannel
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/os
operator: In
values:
- linux
hostNetwork: true
priorityClassName: system-node-critical
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: flannel
initContainers:
- name: install-cni
image: quay.io/coreos/flannel:v0.14.0
command:
- cp
args:
- -f
- /etc/kube-flannel/cni-conf.json
- /etc/cni/net.d/10-flannel.conflist
volumeMounts:
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
containers:
- name: kube-flannel
image: quay.io/coreos/flannel:v0.14.0
command:
- /opt/bin/flanneld
args:
- --ip-masq
- --kube-subnet-mgr
resources:
requests:
cpu: "100m"
memory: "50Mi"
limits:
cpu: "100m"
memory: "50Mi"
securityContext:
privileged: false
capabilities:
add: ["NET_ADMIN", "NET_RAW"]
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: run
mountPath: /run/flannel
- name: flannel-cfg
mountPath: /etc/kube-flannel/
volumes:
- name: run
hostPath:
path: /run/flannel
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg
参考
https://www.cnblogs.com/lb477/p/14841415.html
network plugin is not ready: cni config uninitialized
https://www.cnblogs.com/hellxz/p/kuberntes_cni_config_uninitialized.html

浙公网安备 33010602011771号