K8S-HA-3master

1.加载模块

modprobe br_netfilter
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bashmodprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF
chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules &&lsmod | grep -e ip_vs -e nf_conntrack_ipv4



2.安装更新docker

yum install -y yum-utils device-mapper-persistent-data lvm2
yum-config-manager \
--add-repo \
http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo

 

yum update -y && yum install -y docker-ce

reboot
rpm -qa | grep kernel #查看安装过的内核版本
grub2-set-default "CentOS Linux (4.4.192-1.el7.elrepo.x86_64) 7 (Core)" #更新后的内核版本改变需要重新指定
mkdir /etc/docker
cat > /etc/docker/daemon.json <<EOF
{
 "exec-opts": ["native.cgroupdriver=systemd"],
 "log-driver": "json-file",
 "log-opts": {
   "max-size": "100m"  
 }
}
EOF
mkdir -p /etc/systemd/system/docker.service.d

systemctl daemon-reload && systemctl restart docker && systemctl enable docker


haproxy和keepalived
mkdir /usr/local/kubernetes/install -p
cd /usr/local/kubernetes/install
rz

docker load -i haproxy.tar
docker load -i keepalived.tar
tar -zxvf kubeadm-basic.images.tar.gz

/usr/local/kubernetes/install
vi load-images.sh

/usr/local/kubernetes/install/kubeadm-basic.images 修改目录
sh load-images.sh

tar -zxvf start.keep.tar.gz
mv data/ /
cd /usr/local/kubernetes/install/data/lb

cat etc/haproxy.cfg

修改最后
  balance roundrobin
  server rancher01 192.168.120.101:6443
  server rancher02 192.168.120.102:6443
  server rancher03 192.168.120.103:6443

vi start-haproxy.sh
MasterIP1=192.168.120.101
MasterIP2=192.168.120.102
MasterIP3=192.168.120.103

cp haproxy.cfg /root/
  balance roundrobin
  server rancher01 192.168.120.101:6443

sh start-haproxy.sh

start-keepalived.sh
#!/bin/bash
VIRTUAL_IP=192.168.120.105
INTERFACE=ens33
sh start-keepalived.sh

ip a

cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

 

yum -y  install  kubeadm-1.15.1 kubectl-1.15.1 kubelet-1.15.1
systemctl enable kubelet.service



cd /usr/local/kubernetes/install
[root@k8s-master01 install]# mkdir images
[root@k8s-master01 install]# mv * images/

kubeadm config print init-defaults > kubeadm-config.yaml



kubeadm init --config=kubeadm-config.yaml --experimental-upload-certs | tee kubeadm-init.log

多master

apiVersion: kubeadm.k8s.io/v1beta2
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
  token: abcdef.0123456789abcdef
  ttl: 24h0m0s
  usages:
  - signing
  - authentication
kind: InitConfiguration
localAPIEndpoint:
  advertiseAddress: 192.168.120.101
  bindPort: 6443
nodeRegistration:
  criSocket: /var/run/dockershim.sock
  name: k8s-master01
  taints:
  - effect: NoSchedule
    key: node-role.kubernetes.io/master
---
apiServer:
  timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta2
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controlPlaneEndpoint: "192.168.120.105:6444"
controllerManager: {}
dns:
  type: CoreDNS
etcd:
  local:
    dataDir: /var/lib/etcd
imageRepository: k8s.gcr.io
kind: ClusterConfiguration
kubernetesVersion: v1.15.1
networking:
  dnsDomain: cluster.local
  podSubnet: "10.244.0.0/16"
  serviceSubnet: 10.96.0.0/12
scheduler: {}
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
featureGates:
  SupportIPVSProxyMode: true
mode: ipvs

 

单master配置

apiVersion: kubeadm.k8s.io/v1beta2
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
  token: abcdef.0123456789abcdef
  ttl: 24h0m0s
  usages:
  - signing
  - authentication
kind: InitConfiguration
localAPIEndpoint:
  advertiseAddress: 192.168.120.101
  bindPort: 6443
nodeRegistration:
  criSocket: /var/run/dockershim.sock
  name: k8s-master01
  taints:
  - effect: NoSchedule
    key: node-role.kubernetes.io/master
---
apiServer:
  timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta2
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controlPlaneEndpoint: ""
controllerManager: {}
dns:
  type: CoreDNS
etcd:
  local:
    dataDir: /var/lib/etcd
imageRepository: k8s.gcr.io
kind: ClusterConfiguration
kubernetesVersion: v1.15.1
networking:
  dnsDomain: cluster.local
  podSubnet: "10.244.0.0/16"
  serviceSubnet: 10.96.0.0/12
scheduler: {}
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
featureGates:
  SupportIPVSProxyMode: true
mode: ipvs

 

 



---------------------------

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

You can now join any number of the control-plane node running the following command on each as root:

  kubeadm join 192.168.120.105:6444 --token abcdef.0123456789abcdef \
    --discovery-token-ca-cert-hash sha256:10c7176c1f3af746104c0156645b05cd3469acbd7e949e0f6d6afff7cdc85b5d \
    --control-plane --certificate-key eedd99bc3ed092777a1c063276d0c3f166e532775a47d00d5d469e49cdddae90

Please note that the certificate-key gives access to cluster sensitive data, keep it secret!
As a safeguard, uploaded-certs will be deleted in two hours; If necessary, you can use
"kubeadm init phase upload-certs --upload-certs" to reload certs afterward.

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 192.168.120.105:6444 --token abcdef.0123456789abcdef \
    --discovery-token-ca-cert-hash sha256:10c7176c1f3af746104c0156645b05cd3469acbd7e949e0f6d6afff7cdc85b5d



  245  vi /data/lb/etc/haproxy.cfg

cat /usr/local/kubernetes/install/kubeadm-config.yaml

docker rm  -f HAProxy-K8S && sh /data/lb/start-haproxy.sh

注意配置文件修改集群访问地址为vip

vi /root/.kube/config

 

 


配置网络

kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
kubectl get node


kubectl -n kube-system exec etcd-k8s-master01 -- etcdctl \
--endpoints=https://192.168.120.105:2379 \
--ca-file=/etc/kubernetes/pki/etcd/ca.crt \
--cert-file=/etc/kubernetes/pki/etcd/server.crt \
--key-file=/etc/kubernetes/pki/etcd/server.key cluster-health

 

kubectl get endpoints kube-controller-manager --namespace=kube-system  -o yaml

posted @ 2019-09-17 17:18  夜辰雪扬  阅读(390)  评论(0)    收藏  举报