elk+zookeeper+kafka集群版部署(未测试)
一、zookeeper集群
1、服务器规划
Server1:192.168.89.11
Server2:192.168.89.12
Server3:192.168.89.13
#架构图


2、软件
mkdir -p /nulige/tools
cd /nulige/tools
zookeeper-3.4.13.tar.gz
kafka_2.12-2.1.0.tgz
3、安装jdk (备注:三台机都要安装)
yum install java-1.8.0-openjdk -y
4、搭建zookeeper集群
#下载地址
http://mirrors.shu.edu.cn/apache/zookeeper/zookeeper-3.4.13/zookeeper-3.4.13.tar.gz
#安装zookeeper(备注:三台机都要安装)
tar xf zookeeper-3.4.13.tar.gz
mv zookeeper-3.4.13 /usr/local/
cd /usr/local/zookeeper-3.4.13/config
#修改配置文件:
mkdir -p /usr/local/zookeeper-3.4.13/data
mkdir -p /usr/local/zookeeper-3.4.13/logs
cp /usr/local/zookeeper-3.4.13/conf/zoo_sample.cfg zoo.cfg
vi /usr/local/zookeeper-3.4.13/conf/zoo.cfg
tickTime=2000
initLimit=10
syncLimit=5
dataDir=/usr/local/zookeeper-3.4.13/data
dataLogDir=/usr/local/zookeeper-3.4.13/logs
clientPort=2181
server.1=192.168.3.21:2888:3888
server.2=192.168.3.22:2888:3888
server.3=192.168.3.23:2888:3888
#第一个端口是master和slave之间的通信端口,默认是2888,第二个端口是leader选举的端口,集群刚启动的时候选举或者leader挂掉之后进行新的选举的端口默认是3888
echo "1" > /data/zookeeper/zookeeper-3.4.13/data/myid #server1配置,各节点不同,跟上面配置server.1的号码一样
echo "2" > /data/zookeeper/zookeeper-3.4.13/data/myid #server2配置,各节点不同,跟上面配置server.2的号码一样
echo "3" > /data/zookeeper/zookeeper-3.4.13/data/myid #server3配置,各节点不同,跟上面配置server.3的号码一样
#启动服务:
/usr/local/zookeeper-3.4.13/bin/zkServer.sh start
#查看进程
[root@a1 conf]# netstat -lntup|grep java
5、搭建kafka集群
#下载地址
http://mirror.bit.edu.cn/apache/kafka/2.1.0/kafka_2.12-2.1.0.tgz
#安装kafka
kafka_2.12-2.1.0.tgz
#解压kafka
tar xf kafka_2.12-2.1.0.tgz
mv kafka_2.12-2.1.0 /usr/local/kafka_2.12
mkdir -p /usr/local/kafka_2.12/logs
cp /usr/local/kafka_2.12/config/server.properties /usr/local/kafka_2.12/config/server.properties.bak
cd /usr/local/kafka_2.12/config/
[root@node1 config]# egrep -v "^#|^$" server.properties
broker.id=1 #分别为:1,2,3,记得修改
listeners=PLAINTEXT://192.168.3.21:9092
num.network.threads=3
num.io.threads=8
socket.send.buffer.bytes=102400
socket.receive.buffer.bytes=102400
socket.request.max.bytes=104857600
log.dirs=/usr/local/kafka_2.12/logs
num.partitions=1
num.recovery.threads.per.data.dir=1
offsets.topic.replication.factor=1
transaction.state.log.replication.factor=1
transaction.state.log.min.isr=1
log.retention.hours=168
log.segment.bytes=1073741824
log.retention.check.interval.ms=300000
zookeeper.connect=192.168.3.21:2181,192.168.3.22:2181,192.168.3.23:2181
zookeeper.connection.timeout.ms=6000
group.initial.rebalance.delay.ms=0
#kafka节点默认需要的内存为1G,如果需要修改内存,可以修改kafka-server-start.sh的配置项
#vim /usr/local/kafka_2.12/bin/kafka-server-start.sh
#找到KAFKA_HEAP_OPTS配置项,例如修改如下:
export KAFKA_HEAP_OPTS="-Xmx2G -Xms2G"
#启动服务:
nohup /usr/local/kafka_2.11/bin/kafka-server-start.sh /usr/local/kafka_2.11/config/server.properties > /tmp/kafka.log 2>&1
6、kafka常用命令
#创建topic
[root@node1 bin]# ./kafka-topics.sh --create --zookeeper 192.168.3.21:2181,192.168.3.22:2181,192.168.3.23
Created topic "test".
#展示topic
[root@node1 bin]# ./kafka-topics.sh --list --zookeeper 192.168.3.21:2181,192.168.3.22:2181,192.168.3.23:2
test
#查看描述topic
./bin/kafka-topics.sh --describe --zookeeper 192.168.89.11:2181,192.168.89.12:2181,192.168.89.13:2181 --topic topic_name
#删除topic
./bin/kafka-topics.sh --delete --topictopic_name --zookeeper 192.168.89.11:2181,192.168.89.12:2181,192.168.89.13:2181
#查看每分区consumer_offsets(可以连接到的消费主机)
./bin/kafka-topics.sh --describe --zookeeper 192.168.89.11:2181,192.168.89.12:2181,192.168.89.13:2181 --topic __consumer_offsets
参考:
https://www.cnblogs.com/longBlogs/p/10340251.html
二、ELK集群部署
一、ELK环境部署
#服务器规划
Server1:192.168.89.11
Server2:192.168.89.12
Server3:192.168.89.13
#软件:
mkdir -p /nulige/tools
cd /nulige/tools
jdk-8u25-linux-x64.tar.gz
logstash-6.6.1.tar.gz
elasticsearch-6.6.1.tar.gz
kafka_2.12-2.1.0.tgz
filebeat-6.6.1-linux-x86_64.tar.gz
二、部署elk
1、install java
yum install java-1.8.0-openjdk -y
#验证环境变量
java -version
2、Elasticsearch二进制安装和启动
cd /nulige/tools
tar xf elasticsearch-6.6.1.tar.gz
mv elasticsearch-6.6.1 /usr/local/
mkdir -p /usr/local/elasticsearch-6.6.1/data
cp /usr/local/elasticsearch-6.6.1/config/elasticsearch.yml /usr/local/elasticsearch-6.6.1/config/elasticsearch.yml.bak
#修改配置文件()
mkdir -p /usr/local/elasticsearch-6.6.1/data
mkdir -p /usr/local/elasticsearch-6.6.1/logs
#修改配置文件(备注:三台机都要配置)
vi /usr/local/elasticsearch-6.6.1/config/elasticsearch.yml
cluster.name: es_cluster #集群名称,同一集群需要一致
node.name: node-1 #节点名称,同一集群不同主机不能一致,其它节点node-2,node-3
path.data: /usr/local/elasticsearch-6.6.1/data
path.logs: /usr/local/elasticsearch-6.6.1/logs
network.host: 0.0.0.0
http.port: 9200
discovery.zen.ping.unicast.hosts: ["192.168.89.11", "192.168.89.12","Server3:192.168.89.13"] #集群成员,不指定host.master则是自由选举
discovery.zen.minimum_master_nodes: 2 #官方的推荐值是(N/2)+1
indices.query.bool.max_clause_count: 10000 #增加消息最大数,默认1024
#Elasticsearch的启动,得用普通用户启动
useradd -s /sbin/nologin elk
chown -R elk:elk /usr/local/elasticsearch-6.6.1/
su - elk -s /bin/bash
/usr/local/elasticsearch-6.6.1/bin/elasticsearch -d
[root@a1 config]$ netstat -lntup|grep java
tcp 0 0 127.0.0.1:9200 0.0.0.0:* LISTEN 23123/java
tcp 0 0 127.0.0.1:9300 0.0.0.0:* LISTEN 11773/java
4、常用命令
#观察日志
[elk@a1 local]$ tail -f /usr/local/elasticsearch-6.6.1/logs/elasticsearch.log
#查看是否可以访问
[root@a1 vhosts]# curl 127.0.0.1:9200
#curl -XGET 'http://172.18.68.11:9200/_cat/nodes?pretty'
192.168.89.11 18 68 0 0.07 0.06 0.05 mdi - els2
192.168.89.12 25 67 0 0.01 0.02 0.05 mdi * els3 # *号表示为当前节点为主节点的意思
192.168.89.13 7 95 0 0.02 0.04 0.05 mdi - els1
#集群测试
curl -XGET 'http://192.168.89.20:9200/_cat/nodes?v'
#查询集群状态方法2
curl -XGET 'http://192.168.89.20:9200/_cluster/state/nodes?pretty'
#查询集群健康状况(cat api)
curl -XGET 'http://192.168.89.20:9200/_cat/health?v
#查看集群的健康状况
curl -XGET 'http://192.168.89.20:9200/_cluster/health
#如何快速了解集群的健康状况
green:每个索引的primary shard和replica shard都是active状态的
yellow:每个索引的primary shard都是active状态的,但是部分replica shard不是active状态,处于不可用的状态
red:不是所有索引的primary shard都是active状态的,部分索引有数据丢失了
5、Kibana二进制安装和启动(备注:仅es-node1安装)
cd /nulige/tools
tar xf kibana-6.6.1-linux-x86_64.tar.gz
mv kibana-6.6.1-linux-x86_64 /usr/local/kibana-6.6.1
[root@a1 config]# cat /usr/local/kibana-6.6.1/config/kibana.yml |egrep -v "^#|^$"
server.port: 5601 #要使用5601端口,使用nginx转80端口访问
server.host: "0.0.0.0"
elasticsearch.url: "http://192.168.89.11:9200" #设置es-node1IP地址
#启动服务
nohup /usr/local/kibana-6.6.1/bin/kibana >/tmp/kibana.log &
#访问
http://120.79.171.145:5601
6、安装Logstash
cd /nulige/tools
tar xf logstash-6.6.1.tar.gz
mv logstash-6.6.1 /usr/local/
#JVM内存大小调优
[root@a1 config]# vi /usr/local/logstash-6.6.1/config/jvm.options
-Xms1g 修改为:-Xms2g
-Xmx1g 修改为:-Xmx2g
#修改配置文件
cp /usr/local/logstash-6.6.1/config/logstash.conf /usr/local/logstash-6.6.1/config/logstash.conf.bak
vi /usr/local/logstash-6.6.1/config/logstash.conf
input {
kafka {
bootstrap_servers => "192.168.89.11:9092,192.168.89.12:9092,192.168.89.13:9092"
topics => ["nignx_access"]
group_id => "nginx_access"
codec => "json"
}
}
output{
elasticsearch {
hosts => ["192.168.89.11:9200,192.168.89.12:9200,192.168.89.13:9200"]
index => "nginx_access-%{+YYYY.MM.dd}"
}
}
#加速logstash启动,需要安装haveged
yum install haveged -y;
chkconfig --add haveged
chkconfig --level 35 haveged on
chkconfig --list |grep haveged
#启动服务
nohup /usr/local/logstash-6.6.1/bin/logstash -f /usr/local/logstash-6.6.1/config/logstash.conf >/tmp/logstash.log &
7、安装filebeat
cd /nulige/tools
tar xf filebeat-6.6.1-linux-x86_64.tar.gz
mv filebeat-6.6.1-linux-x86_64 /usr/local/filebeat-6.6.1
[root@a1 filebeat-6.6.1]# cd /usr/local/filebeat-6.6.1
[root@a1 filebeat-6.6.1]# cat filebeat.yml
filebeat.inputs:
- type: log
tail_files: true
backoff: "1s"
paths:
- /roobo/logs/nginx/access.json.log
output.kafka:
enabled: true
hosts: ["192.168.89.11:9092,192.168.89.12:9092,192.168.89.13:9092"]
topic: nignx_access
nohup /usr/local/filebeat-6.6.1/filebeat -e -c /usr/local/filebeat-6.6.1/filebeat.yml&>/tmp/filebeat.log &
cat /tmp/filebeat.log

浙公网安备 33010602011771号