Docker Swarm动态扩容MINIO集群

需求背景

当一个minio集群的磁盘不够用的时候,可以有两种方式,第一个通过扩磁盘,第二种增加机器。但是不管是哪种方式,都不能去动原来集群的启动方式,那样集群就起不来了,会一直提示类似下面这种报错,ERROR Unable to initialize backend: /data1 drive is already being used in another erasure deployment. (Number of drives specified: 9 but the number of drives found in the 1st drive's format.json: 6),因为MINIO默认已经将你这个新的拉起来的集群是一个不同于之前那个集群的新集群。最好的解决方法是使用MINIO官方提供的集群池的概念。
原发链接: Expand a Distributed MinIO Deployment

以下给出具体的操作

先停掉旧的集群。

docker stack down minio

为了方便对比,下面是旧的集群的docker-compose文件

  • docker-compose-minio-3.yml
version: '3.7'

# Settings and configurations that are common for all containers
x-minio-common: &minio-common
  #image: minio:latest
  image: quay.io/minio/minio:RELEASE.2023-06-19T19-52-50Z
  command: server --console-address ":9001" http://minio0{1...3}/data{1...2}
  #expose:
  #  - "9000"
  #  - "9001"
  environment:
    MINIO_ROOT_USER: minioadmin
    MINIO_ROOT_PASSWORD: minioadmin
  networks:
    - minio_distributed
  healthcheck:
    test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"]
    interval: 30s
    timeout: 20s
    retries: 3

# starts 3 docker containers running minio server instances.
# using nginx reverse proxy, load balancing, you can access
# it through port 9000.
services:
  minio1:
    <<: *minio-common
    hostname: minio01
    deploy:
      restart_policy:
        delay: 10s
        max_attempts: 10
        window: 60s
      resources:
        limits:
          memory: 800M
          cpus: "0.5"
      placement:
        constraints:
          - node.labels.minio.replica==1
    volumes:
      - minio1-data-1:/data1
      - minio1-data-2:/data2

  minio2:
    <<: *minio-common
    hostname: minio02
    deploy:
      restart_policy:
        delay: 10s
        max_attempts: 10
        window: 60s
      resources:
        limits:
          memory: 800M
          cpus: "0.5"
      placement:
        constraints:
          - node.labels.minio.replica==2
    volumes:
      - minio2-data-1:/data1
      - minio2-data-2:/data2

  minio3:
    <<: *minio-common
    hostname: minio03
    deploy:
      restart_policy:
        delay: 10s
        max_attempts: 10
        window: 60s
      resources:
        limits:
          memory: 800M
          cpus: "0.5"
      placement:
        constraints:
          - node.labels.minio.replica==3
    volumes:
      - minio3-data-1:/data1
      - minio3-data-2:/data2

  nginx:
    image: nginx:1.20-alpine
    hostname: nginx
    volumes:
      - ./nginx.conf:/etc/nginx/nginx.conf:ro
    networks:
      - minio_distributed
    ports:
      - "9000:9000"
      - "9001:9001"
    deploy:
      resources:
        limits:
          memory: 800M
          cpus: "0.5"
      placement:
        constraints:
          - node.labels.minio.replica==1
    depends_on:
      - minio1
      - minio2
      - minio3

## By default this config uses default local driver,
## For custom volumes replace with volume driver configuration.
volumes:
  minio1-data-1:
  minio1-data-2:
  minio2-data-1:
  minio2-data-2:
  minio3-data-1:
  minio3-data-2:

networks:
  minio_distributed:
    external: true # 用已经创建好的网络

使用docker swarm方式部署,部署方式详见另一篇博客:docker swarm 部署minio集群并配合nginx实现负载均衡

调整docker-compose文件,引入Server Pool

  • docker-compose-minio-6.yml
version: '3.7'

# Settings and configurations that are common for all containers
x-minio-common: &minio-common
  #image: minio:latest
  image: quay.io/minio/minio:RELEASE.2023-06-19T19-52-50Z
  command: server --console-address ":9001" http://minio0{1...3}/data{1...2} http://minio0{4...6}/data{1...2}
  #expose:
  #  - "9000"
  #  - "9001"
  environment:
    MINIO_ROOT_USER: minioadmin
    MINIO_ROOT_PASSWORD: minioadmin
  networks:
    - minio_distributed
  healthcheck:
    test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"]
    interval: 30s
    timeout: 20s
    retries: 3

# starts 3 docker containers running minio server instances.
# using nginx reverse proxy, load balancing, you can access
# it through port 9000.
services:
  minio1:
    <<: *minio-common
    hostname: minio01
    deploy:
      restart_policy:
        delay: 10s
        max_attempts: 10
        window: 60s
      resources:
        limits:
          memory: 800M
          cpus: "0.5"
      placement:
        constraints:
          - node.labels.minio.replica==1
    volumes:
      - minio1-data-1:/data1
      - minio1-data-2:/data2

  minio2:
    <<: *minio-common
    hostname: minio02
    deploy:
      restart_policy:
        delay: 10s
        max_attempts: 10
        window: 60s
      resources:
        limits:
          memory: 800M
          cpus: "0.5"
      placement:
        constraints:
          - node.labels.minio.replica==2
    volumes:
      - minio2-data-1:/data1
      - minio2-data-2:/data2

  minio3:
    <<: *minio-common
    hostname: minio03
    deploy:
      restart_policy:
        delay: 10s
        max_attempts: 10
        window: 60s
      resources:
        limits:
          memory: 800M
          cpus: "0.5"
      placement:
        constraints:
          - node.labels.minio.replica==3
    volumes:
      - minio3-data-1:/data1
      - minio3-data-2:/data2
  
  minio4:
    <<: *minio-common
    hostname: minio04
    deploy:
      restart_policy:
        delay: 10s
        max_attempts: 10
        window: 60s
      resources:
        limits:
          memory: 800M
          cpus: "0.5"
      placement:
        constraints:
          - node.labels.minio.replica==3
    volumes:
      - minio4-data-1:/data1
      - minio4-data-2:/data2

  minio5:
    <<: *minio-common
    hostname: minio05
    deploy:
      restart_policy:
        delay: 10s
        max_attempts: 10
        window: 60s
      resources:
        limits:
          memory: 800M
          cpus: "0.5"
      placement:
        constraints:
          - node.labels.minio.replica==3
    volumes:
      - minio5-data-1:/data1
      - minio5-data-2:/data2

  minio6:
    <<: *minio-common
    hostname: minio06
    deploy:
      restart_policy:
        delay: 10s
        max_attempts: 10
        window: 60s
      resources:
        limits:
          memory: 800M
          cpus: "0.5"
      placement:
        constraints:
          - node.labels.minio.replica==3
    volumes:
      - minio6-data-1:/data1
      - minio6-data-2:/data2

  nginx:
    image: nginx:1.20-alpine
    hostname: nginx
    volumes:
      - ./nginx.conf:/etc/nginx/nginx.conf:ro
    networks:
      - minio_distributed
    ports:
      - "9000:9000"
      - "9001:9001"
    deploy:
      resources:
        limits:
          memory: 800M
          cpus: "0.5"
      placement:
        constraints:
          - node.labels.minio.replica==1
    depends_on:
      - minio1
      - minio2
      - minio3

## By default this config uses default local driver,
## For custom volumes replace with volume driver configuration.
volumes:
  minio1-data-1:
  minio1-data-2:
  minio2-data-1:
  minio2-data-2:
  minio3-data-1:
  minio3-data-2:
  minio4-data-1:
  minio4-data-2:
  minio5-data-1:
  minio5-data-2:
  minio6-data-1:
  minio6-data-2:

networks:
  minio_distributed:
    external: true # 用已经创建好的网络

注意看启动的comman已经变了,同时增加了3个container

  • 如果是通过扩磁盘的方式来扩容,则新增的3个服务的volume需要手动创建,但是一定要先把宿主机上面的文件夹创建好
# 创建文件夹
mkdir -p /data/minio/minio4-data-2
# 创建volume
docker volume create --opt type=none --opt device=/data/minio/minio4-data-2 --opt o=bind minio4-data-2

修改nginx.conf文件

新增了三个节点,所以nginx.conf文件一定要调整过来

  • nginx.conf
user  nginx;
worker_processes  auto;

error_log  /var/log/nginx/error.log warn;
pid        /var/run/nginx.pid;

events {
    worker_connections  4096;
}

http {
    include       /etc/nginx/mime.types;
    default_type  application/octet-stream;

    log_format  main  '$remote_addr - $remote_user [$time_local] "$request" '
                      '$status $body_bytes_sent "$http_referer" '
                      '"$http_user_agent" "$http_x_forwarded_for"';

    access_log  /var/log/nginx/access.log  main;
    sendfile        on;
    keepalive_timeout  65;

    # include /etc/nginx/conf.d/*.conf;

    upstream minio {
        server minio01:9000;
        server minio02:9000;
        server minio03:9000;
        server minio04:9000;
        server minio05:9000;
        server minio06:9000;
    }

    upstream console {
        ip_hash;
        server minio01:9001;
        server minio02:9001;
        server minio03:9001;
        server minio04:9001;
        server minio05:9001;
        server minio06:9001;
    }

    server {
        listen       9000;
        listen  [::]:9000;
        server_name  localhost;

        # To allow special characters in headers
        ignore_invalid_headers off;
        # Allow any size file to be uploaded.
        # Set to a value such as 1000m; to restrict file size to a specific value
        client_max_body_size 0;
        # To disable buffering
        proxy_buffering off;
        proxy_request_buffering off;

        location / {
            proxy_set_header Host $http_host;
            proxy_set_header X-Real-IP $remote_addr;
            proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
            proxy_set_header X-Forwarded-Proto $scheme;

            proxy_connect_timeout 300;
            # Default is HTTP/1, keepalive is only enabled in HTTP/1.1
            proxy_http_version 1.1;
            proxy_set_header Connection "";
            chunked_transfer_encoding off;

            proxy_pass http://minio;
        }
    }

    server {
        listen       9001;
        listen  [::]:9001;
        server_name  localhost;

        # To allow special characters in headers
        ignore_invalid_headers off;
        # Allow any size file to be uploaded.
        # Set to a value such as 1000m; to restrict file size to a specific value
        client_max_body_size 0;
        # To disable buffering
        proxy_buffering off;
        proxy_request_buffering off;

        location / {
            proxy_set_header Host $http_host;
            proxy_set_header X-Real-IP $remote_addr;
            proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
            proxy_set_header X-Forwarded-Proto $scheme;
            proxy_set_header X-NginX-Proxy true;

            # This is necessary to pass the correct IP to be hashed
            real_ip_header X-Real-IP;

            proxy_connect_timeout 300;

            # To support websocket
            proxy_http_version 1.1;
            proxy_set_header Upgrade $http_upgrade;
            proxy_set_header Connection "upgrade";

            chunked_transfer_encoding off;

            proxy_pass http://console;
        }
    }
}

然后使用swarm再重新拉起来服务就可以了。

posted on 2023-11-22 18:14  JentZhang  阅读(118)  评论(0编辑  收藏  举报