10.10.10.103 graylog单台docker-compose es集群分离docker-compose

centos7

cp -r  /etc/yum.repos.d/ /etc/yum.repos.d.bak

vim /etc/yum.repos.d/CentOS-Base.repo

[base]
name=CentOS-$releasever - Base
baseurl=https://mirrors.tuna.tsinghua.edu.cn/centos/$releasever/os/$basearch/
#mirrorlist=http://mirrorlist.centos.org/?release=$releasever&arch=$basearch&repo=os
gpgcheck=1
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7

#released updates
[updates]
name=CentOS-$releasever - Updates
baseurl=https://mirrors.tuna.tsinghua.edu.cn/centos/$releasever/updates/$basearch/
#mirrorlist=http://mirrorlist.centos.org/?release=$releasever&arch=$basearch&repo=updates
gpgcheck=1
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7

#additional packages that may be useful
[extras]
name=CentOS-$releasever - Extras
baseurl=https://mirrors.tuna.tsinghua.edu.cn/centos/$releasever/extras/$basearch/
#mirrorlist=http://mirrorlist.centos.org/?release=$releasever&arch=$basearch&repo=extras
gpgcheck=1
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7

#additional packages that extend functionality of existing packages
[centosplus]
name=CentOS-$releasever - Plus
baseurl=https://mirrors.tuna.tsinghua.edu.cn/centos/$releasever/centosplus/$basearch/
#mirrorlist=http://mirrorlist.centos.org/?release=$releasever&arch=$basearch&repo=centosplus
gpgcheck=1
enabled=0
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7

 

 

yum clean all

yum makecache

yum install wget

cp /usr/share/zoneinfo/Asia/Shanghai  /etc/localtime

echo 'LANG="en_US.UTF-8"' >> /etc/profile

source /etc/profile

 

cat >> /etc/security/limits.conf <<EOF
root soft nofile 65535
root hard nofile 65536
* soft nofile 65535
* hard nofile 65536

* soft nproc 4096
* hard nproc 4096

EOF

 

systemctl stop firewalld

systemctl disable firewalld

yum remove docker docker-client docker-client-latest docker-common docker-latest docker-latest-logrotate docker-logrotate docker-engine

sudo yum install -y yum-utils device-mapper-persistent-data lvm2

yum install -y yum-utils

yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo

sudo sed -i 's+https://download.docker.com+https://mirrors.tuna.tsinghua.edu.cn/docker-ce+' /etc/yum.repos.d/docker-ce.repo

yum makecache fast

yum install docker-ce

 

vim /etc/selinux/config 

 

systemctl start docker

systemctl enable docker

wget www.alexman.cn/docker-compose-2.17.1

mv docker-compose-2.17.1 /usr/local/bin/docker-compose

chmod +x /usr/local/bin/docker-compose

mkdir graylog

cd graylog

vim docker-compose.yml

version: '2'
services:
# MongoDB: https://hub.docker.com/_/mongo/
  mongodb:
    image: mongo:5.0.13
    #image: harbor.onlyedu.online:1080/baseimage/mongo:5.0.13_alex
    volumes:
    - mongo_data:/data/db
    environment:
    - TZ = Asia/Shanghai
    networks:
    - graylog
# Elasticsearch: https://www.elastic.co/guide/en/elasticsearch/reference/7.10/docker.html
# elasticsearch:
#   image: docker.elastic.co/elasticsearch/elasticsearch-oss:7.10.2
#   #image: harbor.onlyedu.online:1080/baseimage/elasticsearch-oss:7.10.2_alex 
#   volumes:
#   - es_data:/usr/share/elasticsearch/data
#   environment:
#   - http.host=0.0.0.0
#   - transport.host=localhost
#   - network.host=0.0.0.0
#   - "ES_JAVA_OPTS=-Xms32g -Xmx32g"
#   - TZ = Asia/Shanghai
#   ulimits:
#     memlock:
#       soft: -1
#       hard: -1
#   mem_limit: 34g
#   ports:
#   - 9200:9200
# Graylog: https://hub.docker.com/r/graylog/graylog/
  graylog:
    image: graylog/graylog:5.0
    #image: harbor.onlyedu.online:1080/baseimage/graylog/graylog:5.0_alex
    volumes:
    - graylog_data:/usr/share/graylog/data
    - /etc/localtime:/etc/localtime:ro
    - /root/graylog/graylog.conf:/usr/share/graylog/data/config/graylog.conf:ro
    networks:
    - graylog
    environment:
# CHANGE ME (must be at least 16 characters)!
    - GRAYLOG_PASSWORD_SECRET=somepasswordpepper
# Password: admin
    - GRAYLOG_ROOT_PASSWORD_SHA2=8c6976e5b5410415bde908bd4dee15dfb167a9c873fc4bb8a81f6f2ab448a918
    - GRAYLOG_HTTP_EXTERNAL_URI=http://127.0.0.1:9000/
#    - GRAYLOG_ROOT_TIMEZONE = Asia/Shanghai
    - GRAYLOG_ROOT_TIMEZONE=Asia/Shanghai
    - TZ = Asia/Shanghai
    - GRAYLOG_TIMEZONE = Asia/Shanghai
#    entrypoint: /usr/bin/tini -- wait-for-it elasticsearch:9200 -- /docker-entrypoint.sh 
    entrypoint: /usr/bin/tini  -- /docker-entrypoint.sh 
    links:
      - mongodb:mongo
#      - elasticsearch
    restart: always
    depends_on:
    - mongodb
#    - elasticsearch
    ports:
# Graylog web interface and REST API
    - 9000:9000
# Syslog TCP
    - 1514:1514
# Syslog UDP
    - 1514:1514/udp
# GELF TCP
    - 12201:12201
    - 12202:12202
# GELF UDP
    - 12201:12201/udp
    - 12202:12202/udp
    - 5555:5555
# Volumes for persisting data, see https://docs.docker.com/engine/admin/volumes/volumes/
volumes:
    mongo_data:
      driver: local
#    es_data:
#      driver: local
    graylog_data:
      driver: local
networks:
  graylog: 
    driver: bridge
    ipam:
      config:
        - subnet: 172.29.0.0/16
          gateway: 172.29.0.1

 

 

 

es集群部份:

参考 https://blog.csdn.net/tianqiuhao/article/details/130368132

 

 echo "vm.max_map_count=262144">>/etc/sysctl.conf

sysctl -p

mkdir -p /etc/es/elasticsearch/node-{1..3}/{config,plugins}

mkdir -p /home/es/elasticsearch/node-{1..3}/{data,log}

 chmod 777 /etc/es/elasticsearch/node-{1..3}/{config,plugins}

chmod 777 /home/es/elasticsearch/node-{1..3}/{data,log}

 

vi /etc/es/elasticsearch/node-1/config/elasticsearch.yml

#集群名称  
cluster.name: elastic
#当前该节点的名称
node.name: node-1
#是不是有资格竞选主节点
node.master: true
#是否存储数据
node.data: true
#最大集群节点数
node.max_local_storage_nodes: 3
#给当前节点自定义属性(可以省略)
#node.attr.rack: r1
#数据存档位置
path.data: /usr/share/elasticsearch/data
#日志存放位置
path.logs: /usr/share/elasticsearch/log
#是否开启时锁定内存(默认为是)
#bootstrap.memory_lock: true
#设置网关地址,我是被这个坑死了,这个地址我原先填写了自己的实际物理IP地址,
#然后启动一直报无效的IP地址,无法注入9300端口,这里只需要填写0.0.0.0
network.host: 0.0.0.0
#设置映射端口
http.port: 9200
#内部节点之间沟通端口
transport.tcp.port: 9300
# 寻找集群中的主节点,默认值为127.0.0.1:9300,也就是自己。如果要发现已存在的群集需要填写对应集群的服务节点信息,如果单节点可以忽略。es7.x 之后新增的配置,写入候选主节点的设备地址,在开启服务后可以被选为主节点,建议把所有的节点都写上。
discovery.seed_hosts: ["node-1","node-2","node-3"]
#当你在搭建集群的时候,选出合格的节点集群,有些人说的太官方了,
#其实就是,让你选择比较好的几个节点,在你节点启动时,在这些节点中选一个做领导者,
#如果你不设置呢,elasticsearch就会自己选举,这里我们把三个节点都写上
cluster.initial_master_nodes: ["node-1","node-2","node-3"]
#在群集完全重新启动后阻止初始恢复,直到启动N个节点
#简单点说在集群启动后,至少复活多少个节点以上,那么这个服务才可以被使用,否则不可以被使用
gateway.recover_after_nodes: 2
#删除索引是是否需要显示其名称,默认为显示
#action.destructive_requires_name: true
#禁用安全配置,否则查询的时候会提示警告
#xpack.security.enabled: false
#配置允许跨域访问
http.cors.enabled: true
http.cors.allow-origin: "*"

 

vi /etc/es/elasticsearch/node-2/config/elasticsearch.yml

#集群名称  
cluster.name: elastic
#当前该节点的名称
node.name: node-2
#是不是有资格竞选主节点
node.master: true
#是否存储数据
node.data: true
#最大集群节点数
node.max_local_storage_nodes: 3
#给当前节点自定义属性(可以省略)
#node.attr.rack: r1
#数据存档位置
path.data: /usr/share/elasticsearch/data
#日志存放位置
path.logs: /usr/share/elasticsearch/log
#是否开启时锁定内存(默认为是)
#bootstrap.memory_lock: true
#设置网关地址,我是被这个坑死了,这个地址我原先填写了自己的实际物理IP地址,
#然后启动一直报无效的IP地址,无法注入9300端口,这里只需要填写0.0.0.0
network.host: 0.0.0.0
#设置映射端口
http.port: 9200
#内部节点之间沟通端口
transport.tcp.port: 9300
#寻找集群中的主节点,默认值为127.0.0.1:9300,也就是自己。如果要发现已存在的群集需要填写对应集群的服务节点信息,如果单节点可以忽略。es7.x 之后新增的配置,写入候选主节点的设备地址,在开启服务后可以被选为主节点,建议把所有的节点都写上。
discovery.seed_hosts: ["node-1","node-2","node-3"]
#当你在搭建集群的时候,选出合格的节点集群,有些人说的太官方了,
#其实就是,让你选择比较好的几个节点,在你节点启动时,在这些节点中选一个做领导者,
#如果你不设置呢,elasticsearch就会自己选举,这里我们把三个节点都写上
cluster.initial_master_nodes: ["node-1","node-2","node-3"]
#在群集完全重新启动后阻止初始恢复,直到启动N个节点
#简单点说在集群启动后,至少复活多少个节点以上,那么这个服务才可以被使用,否则不可以被使用
gateway.recover_after_nodes: 2
#删除索引是是否需要显示其名称,默认为显示
#action.destructive_requires_name: true
#禁用安全配置,否则查询的时候会提示警告
#xpack.security.enabled: false
#配置允许跨域访问
http.cors.enabled: true
http.cors.allow-origin: "*"

 

vi /etc/es/elasticsearch/node-3/config/elasticsearch.yml

#集群名称  
cluster.name: elastic
#当前该节点的名称
node.name: node-3
#是不是有资格竞选主节点
node.master: true
#是否存储数据
node.data: true
#最大集群节点数
node.max_local_storage_nodes: 3
#给当前节点自定义属性(可以省略)
#node.attr.rack: r1
#数据存档位置
path.data: /usr/share/elasticsearch/data
#日志存放位置
path.logs: /usr/share/elasticsearch/log
#是否开启时锁定内存(默认为是)
#bootstrap.memory_lock: true
#设置网关地址,我是被这个坑死了,这个地址我原先填写了自己的实际物理IP地址,
#然后启动一直报无效的IP地址,无法注入9300端口,这里只需要填写0.0.0.0
network.host: 0.0.0.0
#设置映射端口
http.port: 9200
#内部节点之间沟通端口
transport.tcp.port: 9300
#寻找集群中的主节点,默认值为127.0.0.1:9300,也就是自己。如果要发现已存在的群集需要填写对应集群的服务节点信息,如果单节点可以忽略。es7.x 之后新增的配置,写入候选主节点的设备地址,在开启服务后可以被选为主节点,建议把所有的节点都写上。
discovery.seed_hosts: ["node-1","node-2","node-3"]
#当你在搭建集群的时候,选出合格的节点集群,有些人说的太官方了,
#其实就是,让你选择比较好的几个节点,在你节点启动时,在这些节点中选一个做领导者,
#如果你不设置呢,elasticsearch就会自己选举,这里我们把三个节点都写上
cluster.initial_master_nodes: ["node-1","node-2","node-3"]
#在群集完全重新启动后阻止初始恢复,直到启动N个节点
#简单点说在集群启动后,至少复活多少个节点以上,那么这个服务才可以被使用,否则不可以被使用
gateway.recover_after_nodes: 2
#删除索引是是否需要显示其名称,默认为显示
#action.destructive_requires_name: true
#禁用安全配置,否则查询的时候会提示警告
#xpack.security.enabled: false
#配置允许跨域访问
http.cors.enabled: true
http.cors.allow-origin: "*"

 

mkdir /root/es

cd /root/es

vim docker-compose.yml

version: "3"
services:
    node-1:
        image: docker.elastic.co/elasticsearch/elasticsearch-oss:7.10.2
        container_name: node-1
        environment:
            - "ES_JAVA_OPTS=-Xms10288m -Xmx10288m"
            - "TZ=Asia/Shanghai"
        ulimits:
            memlock:
                soft: -1
                hard: -1
            nofile:
                soft: 65536
                hard: 65536
        ports:
            - "9200:9200"
        logging:
            driver: "json-file"
            options:
                max-size: "50m"
        volumes:
            - /etc/es/elasticsearch/node-1/config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml
            - /etc/es/elasticsearch/node-1/plugins:/usr/share/elasticsearch/plugins
            - /home/es/elasticsearch/node-1/data:/usr/share/elasticsearch/data
            - /home/es/elasticsearch/node-1/log:/usr/share/elasticsearch/log
        networks:
            - elastic
    node-2:
        image: docker.elastic.co/elasticsearch/elasticsearch-oss:7.10.2
        container_name: node-2
        environment:
            - "ES_JAVA_OPTS=-Xms10288m -Xmx10288m"
            - "TZ=Asia/Shanghai"
        ulimits:
            memlock:
                soft: -1
                hard: -1
            nofile:
                soft: 65536
                hard: 65536
        ports:
            - "9201:9200"
        logging:
            driver: "json-file"
            options:
                max-size: "50m"
        volumes:
            - /etc/es/elasticsearch/node-2/config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml
            - /etc/es/elasticsearch/node-2/plugins:/usr/share/elasticsearch/plugins
            - /home/es/elasticsearch/node-2/data:/usr/share/elasticsearch/data
            - /home/es/elasticsearch/node-2/log:/usr/share/elasticsearch/log
        networks:
            - elastic
    node-3:
        image: docker.elastic.co/elasticsearch/elasticsearch-oss:7.10.2
        container_name: node-3
        environment:
            - "ES_JAVA_OPTS=-Xms10288m -Xmx10288m"
            - "TZ=Asia/Shanghai"
        ulimits:
            memlock:
                soft: -1
                hard: -1
            nofile:
                soft: 65536
                hard: 65536
        ports:
            - "9202:9200"
        logging:
            driver: "json-file"
            options:
                max-size: "50m"
        volumes:
            - /etc/es/elasticsearch/node-3/config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml
            - /etc/es/elasticsearch/node-3/plugins:/usr/share/elasticsearch/plugins
            - /home/es/elasticsearch/node-3/data:/usr/share/elasticsearch/data
            - /home/es/elasticsearch/node-3/log:/usr/share/elasticsearch/log
        networks:
            - elastic
    elasticsearch-head:
        image: wallbase/elasticsearch-head:6-alpine
        container_name: elasticsearch-head
        environment:
            TZ: 'Asia/Shanghai'
        ports:
            - '9100:9100'
        networks:
            - elastic
networks:
    elastic:
        driver: bridge
        ipam:
          config:
          - subnet: 172.25.0.0/24

 

docker-compose up -d

cat graylog.conf

is_master = true
node_id_file = /usr/share/graylog/data/config/node-id
password_secret =
root_password_sha2 =
bin_dir = /usr/share/graylog/bin
data_dir = /usr/share/graylog/data
plugin_dir = /usr/share/graylog/plugin
http_bind_address = 0.0.0.0:9000
elasticsearch_hosts = http://10.10.10.103:9200,http://10.10.10.103:9201,http://10.10.10.103:9202
rotation_strategy = count
elasticsearch_max_docs_per_index = 20000000
elasticsearch_max_number_of_indices = 20
retention_strategy = delete
elasticsearch_shards = 4
elasticsearch_replicas = 0
elasticsearch_index_prefix = graylog
allow_leading_wildcard_searches = false
allow_highlighting = false
elasticsearch_analyzer = standard
output_batch_size = 500
output_flush_interval = 1
output_fault_count_threshold = 5
output_fault_penalty_seconds = 30
processbuffer_processors = 5
outputbuffer_processors = 3
processor_wait_strategy = blocking
ring_size = 65536
inputbuffer_ring_size = 65536
inputbuffer_processors = 2
inputbuffer_wait_strategy = blocking
message_journal_enabled = true
message_journal_dir = data/journal
lb_recognition_period_seconds = 3
mongodb_uri = mongodb://mongo/graylog
mongodb_max_connections = 1000
mongodb_threads_allowed_to_block_multiplier = 5
proxied_requests_thread_pool_size = 32

 

 

 

posted @ 2023-07-05 11:52  alexhe  阅读(101)  评论(0编辑  收藏  举报