Docker Swarm集群环境手动部署

1. 建议配置

  • 系统: CentOS7+ (最小化安装 能访问公网)
  • CPU: 8Core+
  • 内存: 24GB
  • 磁盘: >=100GB+ 存储空间计算参考:100个测点1分钟采集一次,年存储空间消耗30GB。
  • 服务器数量: 4

2. 适配系统

  • 测试兼容阿里云CentOS7+、Redhat7+
  • 测试兼容华为云CentOS7+、Redhat7+
  • 测试兼容腾讯云CentOS7+、Redhat7+
  • 其余平台/系统目前暂时没有进行过多测试

3. 环境信息

以下为4台服务器方式部署 所有命令均在 172.31.32.200 服务器执行

服务器信息:
  主机名            主机IP          swarm角色
  node01        172.31.32.200        master
  node02        172.31.32.201        master
  node03        172.31.32.202        works
  node04        172.31.32.203        works

4. 配置环境变量

请确保 SERVER_IP 变量中 IP填写无误

SERVER_NAME=(node01 node02 node03 node04)
SERVER_IP=(172.31.32.200 172.31.32.201 172.31.32.202 172.31.32.203)

5. 配置本地 hosts 解析

只保留hosts文件前三行, 将ERVER_NAME 与 SERVER_IP 一一对应写入 hosts

sed -i '3,$d' /etc/hosts
echo -e "\n# swarm cluster" >> /etc/hosts
let SER_LEN=${#SERVER_IP[@]}-1
for ((i=0;i<=$SER_LEN;i++)); do
    echo "${SERVER_IP[i]}  ${SERVER_NAME[i]}" >> /etc/hosts
done

6. 配置秘钥登录

配置 172.31.32.200 到其他节点秘钥登录 (请替换ssh root用户密码 123abc@DEF )

SSH_RROT_PASSWD=123abc@DEF
bash <(curl -sSL https://gitee.com/yx571304/olz/raw/master/shell/ssh-key-copy.sh) "$(echo ${SERVER_IP[@]})" root $SSH_RROT_PASSWD

7. 系统优化

1.配置hosts

# 同步 hosts 到其他节点
for node in ${SERVER_IP[@]}; do
    echo "[INFO] scp hosts -----> $node"
    scp /etc/hosts $node:/etc/hosts
done

2.设置主机名

注意: 请确保此步骤执行成功(执行完成后在每台主机执行 hostnamectl 查看主机名应该为 node0[1-4])

# 手动指定网卡 eth1(此网卡为 SERVER_IP 变量中的IP网卡)
for node in ${SERVER_IP[@]}; do
    ssh -T $node <<'EOF'
    HOST_IF=eth1
    HOST_IP=$(ip a|grep "$HOST_IF$"|awk '{print $2}'|cut -d'/' -f1)
    hostnamectl set-hostname $(grep $HOST_IP /etc/hosts | awk '{print $2}')
EOF
done

# 自动获取网卡方式(获取到的网卡IP为能上网的网卡IP)
for node in ${SERVER_IP[@]}; do
    ssh -T $node <<'EOF'
    HOST_IF=$(ip route|grep default|cut -d ' ' -f5)
    HOST_IP=$(ip a|grep "$HOST_IF$"|awk '{print $2}'|cut -d'/' -f1)
    hostnamectl set-hostname $(grep $HOST_IP /etc/hosts | awk '{print $2}')
EOF
done

3.调整内核参数/selinux/防火墙

for node in ${SERVER_IP[@]}; do
    echo "[INFO] Config -----> $node"
    ssh -T $node <<'EOF'
    # 优化ssh连接速度
    sed -i "s/#UseDNS yes/UseDNS no/" /etc/ssh/sshd_config
    sed -i "s/GSSAPIAuthentication .*/GSSAPIAuthentication no/" /etc/ssh/sshd_config
    systemctl restart sshd

    # 配置阿里云yum源
    rm -f /etc/yum.repos.d/*.repo
    curl -so /etc/yum.repos.d/epel-7.repo http://mirrors.aliyun.com/repo/epel-7.repo
    curl -so /etc/yum.repos.d/Centos-7.repo http://mirrors.aliyun.com/repo/Centos-7.repo
    sed -i '/aliyuncs.com/d' /etc/yum.repos.d/Centos-7.repo /etc/yum.repos.d/epel-7.repo
    
    # 防火墙
    firewall-cmd --set-default-zone=trusted
    firewall-cmd --complete-reload
    iptables -P INPUT ACCEPT
    iptables -F
    iptables -X
    iptables -F -t nat
    iptables -X -t nat
    iptables -F -t raw
    iptables -X -t raw
    iptables -F -t mangle
    iptables -X -t mangle

    # 文件/进程 限制
    if [ ! "$(grep '# My Limits' /etc/security/limits.conf)" ]; then
        echo -e "\n# My Limits" >> /etc/security/limits.conf
        echo "* soft nofile 65535" >> /etc/security/limits.conf
        echo "* hard nofile 65535" >> /etc/security/limits.conf
        echo "* soft nproc 65535"  >> /etc/security/limits.conf
        echo "* hard nproc 65535"  >> /etc/security/limits.conf
        echo "* soft  memlock  unlimited"  >> /etc/security/limits.conf
        echo "* hard memlock  unlimited"  >> /etc/security/limits.conf
    fi

    # 启用路由转发
    echo 'net.ipv4.ip_forward = 1' >> /etc/sysctl.conf
    echo 'net.bridge.bridge-nf-call-iptables = 1' >> /etc/sysctl.conf
    echo 'net.bridge.bridge-nf-call-ip6tables = 1' >> /etc/sysctl.conf

    # 同时同一用户可以监控的目录数量
    echo 'fs.inotify.max_user_watches=524288' >> /etc/sysctl.conf

    # 进程拥有VMA(虚拟内存区域)的数量
    echo 'vm.max_map_count=655360' >> /etc/sysctl.conf

    # TIME_WAIT
    echo 'net.ipv4.tcp_syncookies = 1' >> /etc/sysctl.conf
    echo 'net.ipv4.tcp_tw_reuse = 1' >> /etc/sysctl.conf
    echo 'net.ipv4.tcp_tw_recycle = 1' >> /etc/sysctl.conf

    modprobe br_netfilter
    sysctl -p -w /etc/sysctl.conf
    
    # stop/disable selinux
    setenforce 0
    sed -i 's#SELINUX=.*#SELINUX=disabled#' /etc/selinux/config
EOF
done

4.配置时间同步

for node in ${SERVER_IP[@]}; do
    echo "[INFO] Install ntpdate -----> $node"
    ssh -T $node <<'EOF'
    yum install -y ntpdate
    ntpdate ntp1.aliyun.com
    hwclock -w
    crontab -l > /tmp/crontab.tmp
    echo "*/20 * * * * /usr/sbin/ntpdate ntp1.aliyun.com > /dev/null 2>&1 && /usr/sbin/hwclock -w" >> /tmp/crontab.tmp
    cat /tmp/crontab.tmp | uniq > /tmp/crontab
    crontab /tmp/crontab
    rm -f /tmp/crontab.tmp /tmp/crontab
EOF
done

8. 安装docker环境

从安装源获取最新稳定版本并安装(二进制版)

for node in ${SERVER_IP[@]}; do
    echo "[INFO] Install docker -----> $node"
    ssh -T $node 'bash <(curl -sSL https://gitee.com/yx571304/olz/raw/master/shell/docker/install.sh) -i docker'
done

9. 配置 swarm 集群

# 创建swarm 集群
docker swarm init --advertise-addr $(awk '/node01/{print $1}' /etc/hosts)

# 获取加入 worker 角色 token
worker_cmd=$(docker swarm join-token worker | grep 'token')
manager_cmd=$(docker swarm join-token manager | grep 'token')

# node02 加入集群 角色 manager
ssh node02 "$manager_cmd"

# node03 node04 加入集群 角色 worker
ssh node03 "$worker_cmd"
ssh node04 "$worker_cmd"

# 验证
docker node ls

10. 安装 docker 插件 weave

for node in ${SERVER_IP[@]}; do
    echo "[INFO] install weave -----> $node"
    ssh -T $node '''
    repeat() { while true; do $@ && return; done; }
    repeat echo -e "y\n" | repeat docker plugin install weaveworks/net-plugin:latest_release
    docker plugin disable weaveworks/net-plugin:latest_release
    docker plugin set weaveworks/net-plugin:latest_release WEAVE_PASSWORD=MySwarmCluster
    docker plugin set weaveworks/net-plugin:latest_release WEAVE_MULTICAST=1
    docker plugin enable weaveworks/net-plugin:latest_release
    '''
done

11. 安装配置 glusterfs 集群

1.安装 glusterfs-server

# 定义 gluster 数据存储目录(请根据实际情况更改 建议存储使用一个新分区或硬盘格式化后挂载用于存储)
gluster_data=/gluster/data

for node in ${SERVER_IP[@]}; do
    echo "[INFO] Install glusterfs-server -----> $node"
    ssh -T $node """
      # 添加软件仓库
      yum install -y centos-release-gluster bash-completion

      # 安装 glusterfs-server
      yum install -y glusterfs-server

      # 启动服务跟随系统启动
      systemctl start glusterd
      systemctl enable glusterd
      
      # 创建gluster存储数据目录
      mkdir -p $gluster_data
      
      # 创建gluster挂载到本地的路径
      mkdir -p /swarm/volumes
      """
done

2.配置集群

# 配置信任池
gluster peer probe node02
gluster peer probe node03
gluster peer probe node04

# 检查信任池状态
gluster peer status

# 创建复制卷
gluster volume create swarm-volumes replica 2 node01:${gluster_data} node02:${gluster_data} node03:${gluster_data} node04:${gluster_data} force

# 设置权限 仅允许从本地挂载
gluster volume set swarm-volumes auth.allow 127.0.0.1

# 启动
gluster volume start swarm-volumes

# 查看状态
gluster volume status swarm-volumes
gluster volume info

11. 安装配置 autofs 自动挂载

# 安装 配置 autofs
for node in ${SERVER_IP[@]}; do
    ssh -T $node <<'EOF'
      # 安装 autofs
      yum install -y autofs

      # 配置
      echo -e '\n# add glusterfs config\n/swarm    /etc/glusterfs.net'  >> /etc/auto.master
      echo 'volumes  -fstype=glusterfs,rw  localhost:/swarm-volumes' > /etc/glusterfs.net

      # 启动服务跟随系统启动
      systemctl restart autofs
      systemctl enable autofs
EOF
done


# 验证
for node in ${SERVER_IP[@]}; do
    echo "[INFO] Mount glusterfs -----> $node"
    ssh -T $node 'df -hT | grep glusterfs'
done

posted on 2020-01-21 10:40  大数据运维  阅读(285)  评论(0编辑  收藏  举报

导航