10-k8s简单环境的环境部署

k8s简单环境的环境部署,需要用到4台机器:
master01 192.168.66.10
node01 192.168.66.20
node02 192.168.66.21
harbor 192.168.66.100  # 前一个文章已经介绍过如何安装harbor了。

 

一、系统初始化
1.设置系统主机名以及 Host 文件的相互解析

hostnamectl set-hostname k8s-master01
hostnamectl set-hostname k8s-node01
hostnamectl set-hostname k8s-node02
hostnamectl set-hostname k8s-harbor

在/etc/hosts添加:
192.168.66.10 k8s-master01
192.168.66.20 k8s-node01
192.168.66.21 k8s-node02
192.168.66.100 k8s.harbor.com

 

2.安装相关的依赖包

yum install -y conntrack ntpdate ntp ipvsadm ipset jq iptables curl sysstat libseccomp wget vim
net-tools git

 

3.设置防火墙为 Iptables 并设置空规则

systemctl stop firewalld && systemctl disable firewalld
yum -y install iptables-services && systemctl start iptables && systemctl enable iptables && iptables -F && service iptables save

 

4.关闭 SELINUX

swapoff -a && sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab
setenforce 0 && sed -i 's/^SELINUX=.*/SELINUX=disabled/' /etc/selinux/config

 

5.调整内核参数

cat > kubernetes.conf <<EOF
net.bridge.bridge-nf-call-iptables=1
net.bridge.bridge-nf-call-ip6tables=1
net.ipv4.ip_forward=1
net.ipv4.tcp_tw_recycle=0
vm.swappiness=0 # 禁止使用 swap 空间,只有当系统 OOM 时才允许使用它
vm.overcommit_memory=1 # 不检查物理内存是否够用
vm.panic_on_oom=0 # 开启 OOM
fs.inotify.max_user_instances=8192
fs.inotify.max_user_watches=1048576
fs.file-max=52706963
fs.nr_open=52706963
net.ipv6.conf.all.disable_ipv6=1
net.netfilter.nf_conntrack_max=2310720
EOF
cp kubernetes.conf /etc/sysctl.d/kubernetes.conf
sysctl -p /etc/sysctl.d/kubernetes.conf

 

6.调整系统时区

# 设置系统时区为 中国/上海
timedatectl set-timezone Asia/Shanghai
# 将当前的 UTC 时间写入硬件时钟
timedatectl set-local-rtc 0
# 重启依赖于系统时间的服务
systemctl restart rsyslog
systemctl restart crond

 

7.关闭系统不需要服务

systemctl stop postfix && systemctl disable postfix

 

8.设置 rsyslogd 和 systemd journald

mkdir /var/log/journal # 持久化保存日志的目录
mkdir /etc/systemd/journald.conf.d
cat > /etc/systemd/journald.conf.d/99-prophet.conf <<EOF
[Journal]
# 持久化保存到磁盘
Storage=persistent
# 压缩历史日志
Compress=yes
SyncIntervalSec=5m
RateLimitInterval=30s
RateLimitBurst=1000
# 最大占用空间 10G
SystemMaxUse=10G
# 单日志文件最大 200M
SystemMaxFileSize=200M
# 日志保存时间 2 周
MaxRetentionSec=2week
# 不将日志转发到 syslog
ForwardToSyslog=no
EOF
systemctl restart systemd-journald

 

9.升级系统内核为 4.44版本

CentOS 7.x 系统自带的 3.10.x 内核存在一些 Bugs,导致运行的 Docker、Kubernetes 不稳定,例如: rpm -Uvh
http://www.elrepo.org/elrepo-release-7.0-3.el7.elrepo.noarch.rpm

rpm -Uvh http://www.elrepo.org/elrepo-release-7.0-3.el7.elrepo.noarch.rpm
# 安装完成后检查 /boot/grub2/grub.cfg 中对应内核 menuentry 中是否包含 initrd16 配置,如果没有,再安装一次!
yum --enablerepo=elrepo-kernel install -y kernel-lt
# 设置开机从新内核启动
cat /boot/grub2/grub.cfg|grep initrd16 # 通过这个命令查看具体更新的4.4.X的版本号,我更新后是4.4.227 grub2-set-default 'CentOS Linux (4.4.227-1.el7.elrepo.x86_64) 7 (Core)'

 

二、使用kubeadm部署安装k8s服务
1.kube-proxy开启ipvs的前置条件

modprobe br_netfilter
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF
chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules &&
lsmod | grep -e ip_vs -e nf_conntrack_ipv4

 

2.安装新版本的Docker 软件 # Docker version 19.03.11

yum install -y yum-utils device-mapper-persistent-data lvm2
yum-config-manager \
--add-repo \
http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
yum update -y && yum install -y docker-ce
## 创建 /etc/docker 目录
mkdir /etc/docker
# 配置 daemon.
cat > /etc/docker/daemon.json <<EOF
{
"exec-opts": ["native.cgroupdriver=systemd"],
"log-driver": "json-file",
"log-opts": {
"max-size": "100m"
}
}
EOF
mkdir -p /etc/systemd/system/docker.service.d
# 重启docker服务
systemctl daemon-reload && systemctl restart docker && systemctl enable docker

 

3.安装好docker 重启机器后查看内核版本还是3.10的 重新执行4.4内核命令然后重启机器查看。

grub2-set-default 'CentOS Linux (4.4.189-1.el7.elrepo.x86_64) 7 (Core)' && reboot

[root@k8s-harbor ~]# uname -r
4.4.227-1.el7.elrepo.x86_64

 

4.安装 Kubeadm 

cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
yum -y install kubeadm-1.15.1 kubectl-1.15.1 kubelet-1.15.1
systemctl enable kubelet.service

 

6.导入下载好的镜像包

[root@k8s-master01 ~]# ll kubeadm-basic.images
total 868232
-rw------- 1 root root 208394752 Aug 5 2019 apiserver.tar
-rw------- 1 root root 40542720 Aug 5 2019 coredns.tar
-rw------- 1 root root 258365952 Aug 5 2019 etcd.tar
-rw------- 1 root root 53746688 Jun 14 17:38 flannel.tar
-rw------- 1 root root 160290304 Aug 5 2019 kubec-con-man.tar
-rw------- 1 root root 754176 Aug 5 2019 pause.tar
-rw------- 1 root root 84282368 Aug 5 2019 proxy.tar
-rw------- 1 root root 82675200 Aug 5 2019 scheduler.tar

使用load命令导入。
docker load -i 镜像包

 

7.初始化主节点

kubeadm config print init-defaults > kubeadm-config.yaml
localAPIEndpoint:
advertiseAddress: 192.168.66.10 # 修改master的ip
kubernetesVersion: v1.15.1
networking:
podSubnet: "10.244.0.0/16" # 添加网段
serviceSubnet: 10.96.0.0/12

# 再添加以下内容
--- 
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
featureGates:
SupportIPVSProxyMode: true
mode: ipvs

kubeadm init --config=kubeadm-config.yaml --experimental-upload-certs | tee kubeadm-init.log

 

8.加入主节点以及其余工作节点

执行加入命令:
kubeadm join 192.168.66.10:6443 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:ae6b17f2fd852ba477c29876fc207b867003c983125067defb908ba74965f35f

 

9.部署网络

wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
kubectl create -f kube-flannel.yml # 第6步的时候已经提前导入镜像quay.io/coreos/flannel。

[root@k8s-master01 flannel]# kubectl create -f kube-flannel.yml
podsecuritypolicy.policy/psp.flannel.unprivileged created
clusterrole.rbac.authorization.k8s.io/flannel created
clusterrolebinding.rbac.authorization.k8s.io/flannel created
serviceaccount/flannel created
configmap/kube-flannel-cfg created
daemonset.apps/kube-flannel-ds-amd64 created
daemonset.apps/kube-flannel-ds-arm64 created
daemonset.apps/kube-flannel-ds-arm created
daemonset.apps/kube-flannel-ds-ppc64le created
daemonset.apps/kube-flannel-ds-s390x created

[root@k8s-master01 flannel]# kubectl get pod -n kube-system 
NAME READY STATUS RESTARTS AGE
coredns-5c98db65d4-9m6lp 0/1 Pending 0 9m5s
coredns-5c98db65d4-s4n76 0/1 Pending 0 9m5s
etcd-k8s-master01 1/1 Running 0 8m21s
kube-apiserver-k8s-master01 1/1 Running 0 8m8s
kube-controller-manager-k8s-master01 1/1 Running 0 8m
kube-flannel-ds-amd64-jwwrr 0/1 Init:0/1 0 65s
kube-proxy-bjfgx 1/1 Running 0 9m6s
kube-scheduler-k8s-master01 1/1 Running 0 8m19s

[root@k8s-master01 flannel]# kubectl get node
NAME STATUS ROLES AGE VERSION
k8s-master01 NotReady master 10m v1.15.1

[root@k8s-master01 ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s-master01 Ready master 18h v1.15.1
k8s-node01 Ready <none> 18h v1.15.1
k8s-node02 Ready <none> 18h v1.15.1

 

10.相关查看状态命令

kubectl get nodes
kubectl get pod -n kube-system kubectl get pod -n kube-system -w kubectl get pod -n kube-system -o wide

 

11.遇到的问题

flannel pod状态出现ImagePullBackOff的原因
是因为相关镜像无法从网上pull 下来。
解决办法 从有镜像的机器上把镜像导过去,然后重启docker。
docker save -o flannel.tar quay.io/coreos/flannel:v0.12.0-amd64
docker load -i flannel.tar

 

posted @ 2020-06-15 11:24  hejp  阅读(661)  评论(0编辑  收藏  举报