rancher-ha部署

参考
http://www.eryajf.net/2723.html
https://www.kubernetes.org.cn/3280.html
https://www.rancher.cn/docs/rancher/v2.x/cn/installation/ha-install/rke-ha-install/tcp-l4/
https://www.rancher.cn/docs/rancher/v2.x/cn/install-prepare/download/helm/

1.优化打开数

echo -e  "root soft nofile 65535\nroot hard nofile 65535\n* soft nofile 65535\n* hard nofile 65535\n"     >> /etc/security/limits.conf
sed -i 's#4096#65535#g' /etc/security/limits.d/20-nproc.conf

2.优化内核

cat >> /etc/sysctl.conf<<EOF
net.ipv4.ip_forward=1
net.bridge.bridge-nf-call-iptables=1
net.bridge.bridge-nf-call-ip6tables=1
vm.swappiness=0
vm.max_map_count=655360
EOF

3基础环境安装

yum -y install wget ntpdate lrzsz curl yum-utils device-mapper-persistent-data lvm2 bash-completion && ntpdate -u cn.pool.ntp.org


yum --enablerepo=base clean metadata
yum --enablerepo=updates clean metadata
安装docker源

yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
yum remove -y docker \
                  docker-client \
                  docker-client-latest \
                  docker-common \
                  docker-latest \
                  docker-latest-logrotate \
                  docker-logrotate \
                  docker-selinux \
                  docker-engine-selinux \
                  docker-engine \
                  container*

安装指定版本

yum -y install --setopt=obsoletes=0 docker-ce-17.03.2.ce-1.el7.centos docker-ce-selinux-17.03.2.ce-1.el7.centos

systemctl enable docker
systemctl start docker
systemctl status docker


cat > /etc/docker/daemon.json << EOF
   {
     "registry-mirrors": ["https://7bezldxe.mirror.aliyuncs.com/","https://kw88y6eh.mirror.aliyuncs.com"],
     "insecure-registries":["192.168.112.69"],
       "storage-driver": "overlay2",
       "storage-opts": [
       "overlay2.override_kernel_check=true"
       ]
   }
EOF

 

systemctl daemon-reload
systemctl restart docker


vi /etc/hosts

192.168.100.99 rancher99.com
192.168.100.102 rancher102.com
192.168.100.103 rancher103.com

切换用户执行免密认证

su - rancher
ssh-keygen
ssh-copy-id rancher@192.168.100.99
ssh-copy-id rancher@192.168.100.102
ssh-copy-id rancher@192.168.100.103

rke工具安装 注意版本

wget https://www.rancher.cn/download/rke/v0.2.8-rke_linux-amd64
wget https://www.rancher.cn/download/kubernetes/linux-amd64-v1.15.3-kubectl

 

chmod +x v0.2.8-rke_linux-amd64
chmod +x linux-amd64-v1.15.3-kubectl
mv v0.2.8-rke_linux-amd64 /usr/bin/rke
mv linux-amd64-v1.15.3-kubectl /usr/bin/kubectl

 

wget https://www.rancher.cn/download/helm/helm-v2.14.3-linux-amd64.tar.gz

tar xf helm-v2.14.3-linux-amd64.tar.gz

mv linux-amd64/helm /usr/bin/helm
mv linux-amd64/tiller /usr/bin/tiller
rm -rf  linux-amd64

注意可能helm服务版本和客户端端版本不匹配需要下载2.13.1版本

wget https://www.rancher.cn/download/helm/helm-v2.13.1-linux-amd64.tar.gz

tar xf helm-v2.14.3-linux-amd64.tar.gz
mv linux-amd64/helm /usr/bin/helm
mv linux-amd64/tiller /usr/bin/tiller
rm -rf  linux-amd64/

 

下面操作在rancher用户下执行
ip 是需要修改的

cat > rancher-cluster.yml << EOF
    nodes:
      - address: 192.168.120.99
        user: rancher
        role: [controlplane,worker,etcd]
      - address: 192.168.120.102
        user: rancher
        role: [controlplane,worker,etcd]
      - address: 192.168.120.103
        user: rancher
        role: [controlplane,worker,etcd]
    services:
      etcd:
        backup_config:
          enabled: true
          interval_hours: 6
          retention: 60
EOF

注意不同版本的rke配置上有所差异参考

https://www.rancher.cn/docs/rancher/v2.x/cn/installation/ha-install/helm-rancher/tcp-l4/rke-install-k8s/

 

 

 开始部署

rke up --config ./rancher-cluster.yml

 

如果报错需要重新部署

rke remove --config ./rancher-cluster.yml #然后删除docker

有时候某台会报错 最后克隆另一个节点修改ip解决

配置环境变量

su root
vi /etc/profile
export KUBECONFIG=/home/rancher/kube_config_rancher-cluster.yml

source /etc/profile


两个用户下加载环境

    $ echo "source <(kubectl completion bash)" >> ~/.bashrc
    $ source ~/.bashrc
    $ su - rancher
    $ echo "source <(kubectl completion bash)" >> ~/.bashrc
    $ source ~/.bashrc


查看node运行情况 如果报错可以考虑重启三台服务器

su - rancher
kubectl get node

 

 

kubectl get pods --all-namespaces

 

 开始是这样

helm安装

kubectl -n kube-system create serviceaccount tiller
kubectl create clusterrolebinding tiller --clusterrole cluster-admin --serviceaccount=kube-system:tiller

helm init --service-account tiller   --tiller-image registry.cn-hangzhou.aliyuncs.com/eryajf/tiller:v2.13.1 --stable-repo-url https://kubernetes.oss-cn-hangzhou.aliyuncs.com/charts

helm init --service-account tiller --canary-image

kubectl --namespace=kube-system set image deployments/tiller-deploy tiller=registry.cn-hangzhou.aliyuncs.com/eryajf/tiller:v2.13.1

helm安装镜像仓

helm repo add rancher-stable https://releases.rancher.com/server-charts/stable

安装证书只有Rancher自动生成的证书和LetsEncrypt颁发的证书才需要cert-manager

helm install stable/cert-manager \
      --name cert-manager \
      --namespace kube-system

自动生成证书

执行下面命令可能会弹出下面内容不用管

 

 

helm install rancher-stable/rancher \
      --name rancher \
      --namespace cattle-system \
      --set hostname=rancher.com
4台主机上都执行
echo "192.168.120.104 rancher.com" >> /etc/hosts
创建命名空间的解析
kubectl -n cattle-system patch deployments cattle-cluster-agent --patch '{ "spec": { "template": { "spec": { "hostAliases": [ { "hostnames": [ "rancher.com" ], "ip": "192.168.120.104" } ] } } } }'

 两个认证镜像包可能因为dns无法拉取 需要手动打包封装

 

 

这个也没事

在192.168.120.104上安装nginx

sudo rpm -Uvh http://nginx.org/packages/centos/7/noarch/RPMS/nginx-release-centos-7-0.el7.ngx.noarch.rpm
yum install nginx -y

cp /etc/nginx/nginx.conf /etc/nginx/nginx.conf.bbak #备份配置文件
vi /etc/nginx/nginx.conf #配置nginx解析
    user nginx;
    worker_processes 2;
    worker_rlimit_nofile 40000;
    events {
        worker_connections 8192;
    }
    http {
        # Gzip Settings
        gzip on;
        gzip_disable "msie6";
        gzip_disable "MSIE [1-6]\.(?!.*SV1)";
        gzip_vary on;
        gzip_static on;
        gzip_proxied any;
        gzip_min_length 0;
        gzip_comp_level 8;
        gzip_buffers 16 8k;
        gzip_http_version 1.1;
        gzip_types text/xml application/xml application/atom+xml application/rss+xml application/xhtml+xml image/svg+xml application/font-woff text/javascript application/javascript application/x-javascript text/x-json application/json application/x-web-app-manifest+json text/css text/plain text/x-component font/opentype application/x-font-ttf application/vnd.ms-fontobjectfont/woff2 image/x-icon image/png image/jpeg;
        server {
            listen         80;
            return 301 https://$host$request_uri;
        }
    }
    stream {
        upstream rancher_servers {
            least_conn;
            server 192.168.120.99:443 max_fails=3 fail_timeout=5s;
            server 192.168.120.102:443 max_fails=3 fail_timeout=5s;
            server 192.168.120.104:443 max_fails=3 fail_timeout=5s;
        }
        server {
            listen     443;
            proxy_pass rancher_servers;
        }
    }

 

sudo systemctl enable nginx.service

systemctl restart nginx.service #重启nginx服务

 

 

运行中执行下面命令修改windows下的hosts文件

C:\windows\System32\drivers\etc

192.168.120.104 rancher.com
会自动创建了一个本地集群 来存放rancher服务

https://rancher.com/ #开始有认证同意 可能需要多点几次

 

最后在服务中创建集群的时候一个服务总是报错

 

 这是因为dns解析的问题修改解析1分钟后服务就正常了

 命令行连接rancher

 

wget https://www.rancher.cn/download/kubernetes/linux-amd64-v1.15.3-kubectl
chmod +x linux-amd64-v1.15.3-kubectl
mv linux-amd64-v1.15.3-kubectl /usr/bin/kubectl

 

 复制配置文件内的内容写入到集群文件

vi cluster.yml

apiVersion: v1
kind: Config
clusters:
- name: "lvnk"
  cluster:
    server: "https://rancher.com/k8s/clusters/c-5d67l"
    certificate-authority-data: "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM3akNDQ\
      WRhZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFvTVJJd0VBWURWUVFLRXdsMGFHVXQKY\
      21GdVkyZ3hFakFRQmdOVkJBTVRDV05oZEhSc1pTMWpZVEFlRncweE9UQTVNVEF4TVRJek1qZGFGd\
      zB5T1RBNQpNRGN4TVRJek1qZGFNQ2d4RWpBUUJnTlZCQW9UQ1hSb1pTMXlZVzVqYURFU01CQUdBM\
      VVFQXhNSlkyRjBkR3hsCkxXTmhNSUlCSWpBTkJna3Foa2lHOXcwQkFRRUZBQU9DQVE4QU1JSUJDZ\
      0tDQVFFQXNMRVJRcW1EZnhUZjhENE8Kb0FnWFJXbUNRV0hlQm9WTlBVN3dwMklGaXd0MkRteTN0Z\
      mtVU3dESGlrbVdhb0dMczFSYVo4NldXdEUxMU1SRwpQdUcrNWsybTlUenlXeXJwWm95aWN1UVJ2c\
      WxUSzQ0azhnTHpmTERnRklPVU9PVVlwQXRlOFRvR3FUa1NUY25nCnRyaUg4aE9QSzFDRUxmWEFyZ\
      nAvNEVjQnczdkp1VlVkV25BQStucU9oczNJQUN5UkdEM1JETVlMU1JkemNJb24KczRWb3RVSmhwN\
      TJrc2FUVGlIeG5YMmJ6emtXOXdnWDJlTlovbDJZSEZDVGd4eUZOVHJHQTJxbUxuRE9BNDFvMQpyS\
      DBmeCtFalJIQmFoS3VLcG1zVDd3RE5wcW1IVlRHbVdqT3NoSGJuNkJGVGpsVDk4L2Z0clMxK3RkM\
      kNoQlVuCjNBQVBoUUlEQVFBQm95TXdJVEFPQmdOVkhROEJBZjhFQkFNQ0FxUXdEd1lEVlIwVEFRS\
      C9CQVV3QXdFQi96QU4KQmdrcWhraUc5dzBCQVFzRkFBT0NBUUVBVi80dkVrWkRkZ1NBMEwzLzRRV\
      nArbEFMU0dObGRheTRIT2hDZjFKcQpsbndtQ0Vselo1NDdNUFMrRmxFNEkra0h1ekxQY2tNTDRZY\
      lE0U2NXZEpmTjRzd2dTbkJuNlBjR1dmcGlzdGpYClhHdSt6L1I2V2FjWFdLbVVzM04yaUR3QkdxM\
      ndmQmE5YjNPcHhxV1ZKa0ovZVkyMnF4TkxwWnAwWWRPNDVrSFMKbU5nRm9CTmg2d2MwWldrWG1PN\
      DU3N0Ixak9yOEpvSnhHTTJlKzFUeEZnMnhRZkVrYm5pVlJBb1VCWHoxbzF4YwoweEFmbEdrUE50M\
      nlIQ0l3Yk1iK1ZNSzlrRXpiUVdzUldwUWFONG0xbUFYZEMwM2t2c25YMVR3Q0RLMVhINEdBCmVue\
      HJUVE9qWUxuSWViR2V2a0RUaHVGT0Q4bkx5bUtwSXVWRFJqNHNqeHpZZnc9PQotLS0tLUVORCBDR\
      VJUSUZJQ0FURS0tLS0t"
- name: "lvnk-rancher176"
  cluster:
    server: "https://192.168.1.176:6443"
    certificate-authority-data: "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUN3akNDQ\
      WFxZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFTTVJBd0RnWURWUVFERXdkcmRXSmwKT\
      FdOaE1CNFhEVEU1TURreE1UQTJNRGd4TTFvWERUSTVNRGt3T0RBMk1EZ3hNMW93RWpFUU1BNEdBM\
      VVFQXhNSAphM1ZpWlMxallUQ0NBU0l3RFFZSktvWklodmNOQVFFQkJRQURnZ0VQQURDQ0FRb0NnZ\
      0VCQUtqZDU5Y0dEblpUCmIvbHJPbGtkWWRCUmpHZnJYcE0rb1Nkd3hIa0QrR1NHS0FuNGZqWFlXQ\
      mxnZ2p4eTM1OGVKbDdCWDEyam1NcTEKZnRkVExZN0t6RzhMbmNNOW56bDloamQ1SUZleG1PODF3Q\
      29DS3k3VkRMNWNDYWlZUmlEZDdtNXlLSzZMREluVQpVekY3UVZNbXp2OXdqWENhZDRRaVBzUjAyS\
      3lYNFdzWXd3WWg0UCtkeitjRkpvZ2tuQ3JnSkt0b0RTcjFhK2txCjJid0hBd0JwMStpQXB2dlhCb\
      lF6dnZwaDlpOERPZE9EWHBhcW9NekJVcHJHcFh5NXY2Y3FXRlJiVFBxamF1NHAKYm8ydXpUZDVid\
      3VpNjJlMFRBckp4WmNtSm1GTDh6c1htdDlEWkdJcU9rN3lDMDRZRjNVb3IwcitMdjloUWdSSQpSc\
      i9DR0JNY2FTa0NBd0VBQWFNak1DRXdEZ1lEVlIwUEFRSC9CQVFEQWdLa01BOEdBMVVkRXdFQi93U\
      UZNQU1CCkFmOHdEUVlKS29aSWh2Y05BUUVMQlFBRGdnRUJBRFhYV1Z5bkhxWDNrOU9vV2RIT0xlT\
      nRielVoUE53R1YwSkgKcVdva2JqbDFLYVkxMjdwM0RxSlJUL3ZJLzRwaG5Nc2pvL2h6cFVKdHpyN\
      FAzajM4UjVETHNwK29YWGplbHJlNQpYR1dKY0kycjlOOHVKYWp4a0xYaU9iWWdRWFJtVmMrbkRVN\
      1BDaTF2QnBxQ3hHaUJQV0pFT1NKUFhRbWxlS09hCks4SEozRU8vNFJtTzJRMzN1Q1hUdDRBZEN5W\
      GZFOTVJM0Z5bGdmMzJxdE9kN2drOFJjNXRNSnBtVlRNd1dYblIKTUxSdHdHOHdZY1JvV1RoV1I3M\
      2lBazFFcFNmSVBFU0tNU1BCOVQya2V4TlA2M09Tb3ZJcmk1eFZRUC9DcjZPQwp2VFdUY3krZi9GU\
      FZCYzFUTDNUa2c5N2pXODhobUp1QXE0QW5UMUVsUnZOZG84VlpDWmc9Ci0tLS0tRU5EIENFUlRJR\
      klDQVRFLS0tLS0K"

users:
- name: "user-jzlk4"
  user:
    token: "kubeconfig-user-jzlk4.c-5d67l:hhgbxbtx7wmqz85v9wcnjx77xxc7jbnncnbnmvgb2qfqgrg548hp5w"

contexts:
- name: "lvnk"
  context:
    user: "user-jzlk4"
    cluster: "lvnk"
- name: "lvnk-rancher176"
  context:
    user: "user-jzlk4"
    cluster: "lvnk-rancher176"

current-context: "lvnk"
View Code
# 通过配置文件查询集群内的信息
kubectl --kubeconfig=cluster.yml  get  nodes
kubectl --kubeconfig=cluster.yml  get  pods -A -o wide

 

posted @ 2019-09-09 20:46  夜辰雪扬  阅读(377)  评论(0)    收藏  举报