hadoop生态搭建(3节点)-16.elk配置

# ==================================================================ELK环境准备

# 修改文件限制
# * 代表Linux所有用户名称,保存、退出、重新登录生效。
vi /etc/security/limits.conf

* soft nofile 65536
* hard nofile 65536
* soft nproc 2048
* hard nproc 4096
* soft memlock unlimited
* hard memlock unlimited


# 调整进程数
#调整成以下配置
vi /etc/security/limits.d/20-nproc.conf

*          soft    nproc     4096
root       soft    nproc     unlimited


# 调整虚拟内存&最大并发连接
vi /etc/sysctl.conf

vm.max_map_count=655360
fs.file-max=655360


# 并执行命令生效:
sysctl -p



shutdown -h now
# 快照 elk前

 

# ==================================================================安装 elasticsearch
tar -zxvf ~/elasticsearch-6.2.4.tar.gz -C /usr/local
rm –r ~/elasticsearch-6.2.4.tar.gz


# ==================================================================安装 logstash
tar -zxvf ~/logstash-6.2.4.tar.gz -C /usr/local
rm –r ~/logstash-6.2.4.tar.gz


# ==================================================================安装 kibana
tar -zvxf ~/kibana-6.2.4-linux-x86_64.tar.gz -C /usr/local
mv /usr/local/kibana-6.2.4-linux-x86_64 /usr/local/kibana-6.2.4
rm –r ~/kibana-6.2.4-linux-x86_64.tar.gz

# 环境变量
# ==================================================================node1 node2 node3

vi /etc/profile

# 在export PATH USER LOGNAME MAIL HOSTNAME HISTSIZE HISTCONTROL下添加

export JAVA_HOME=/usr/java/jdk1.8.0_111
export JRE_HOME=/usr/java/jdk1.8.0_111/jre
export ZOOKEEPER_HOME=/usr/local/zookeeper-3.4.12
export HADOOP_HOME=/usr/local/hadoop-2.7.6
export MYSQL_HOME=/usr/local/mysql
export HBASE_HOME=/usr/local/hbase-1.2.4
export HIVE_HOME=/usr/local/hive-2.1.1
export SCALA_HOME=/usr/local/scala-2.12.4
export KAFKA_HOME=/usr/local/kafka_2.12-0.10.2.1
export FLUME_HOME=/usr/local/flume-1.8.0
export SPARK_HOME=/usr/local/spark-2.3.0
export STORM_HOME=/usr/local/storm-1.1.0
export REDIS_HOME=/usr/local/redis-4.0.2
export ERLANG_HOME=/usr/local/erlang
export RABBITMQ_HOME=/usr/local/rabbitmq_server-3.7.5
export MONGODB_HOME=/usr/local/mongodb-3.4.5
export NGINX_HOME=/usr/local/nginx
export CATALINA_BASE=/usr/local/tomcat
export CATALINA_HOME=/usr/local/tomcat
export TOMCAT_HOME=/usr/local/tomcat
export KEEPALIVED_HOME=/usr/local/keepalived
export ELASTICSEARCH_HOME=/usr/local/elasticsearch-6.2.4
export LOGSTASH_HOME=/usr/local/logstash-6.2.4
export KIBANA_HOME=/usr/local/kibana-6.2.4

export PATH=$PATH:$JAVA_HOME/bin:$JAVA_HOME/jre/bin:$ZOOKEEPER_HOME/bin:$HADOOP_HOME/bin:$HADOOP_HOME/sbin:$MYSQL_HOME/bin:$HBASE_HOME/bin:$HIVE_HOME/bin:$SCALA_HOME/bin:$KAFKA_HOME/bin:$FLUME_HOME/bin:$SPARK_HOME/bin:$STORM_HOME/bin:$REDIS_HOME/bin:$ERLANG_HOME/bin:$RABBITMQ_HOME/ebin:$RABBITMQ_HOME/sbin:$MONGODB_HOME/bin:$NGINX_HOME/sbin:$CATALINA_HOME/bin:$KEEPALIVED_HOME/sbin:$ELASTICSEARCH_HOME/bin:$LOGSTASH_HOME/bin:$KIBANA_HOME/bin
export CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar

export HADOOP_INSTALL=$HADOOP_HOME
export HADOOP_MAPRED_HOME=$HADOOP_HOME
export HADOOP_COMMON_HOME=$HADOOP_HOME
export HADOOP_HDFS_HOME=$HADOOP_HOME
export YARN_HOME=$HADOOP_HOME
export HADOOP_COMMON_LIB_NATIVE_DIR=$HADOOP_HOME/lib/native

# ==================================================================node1

# 使环境变量生效
source /etc/profile


# 查看配置结果
echo $ELASTICSEARCH_HOME
echo $LOGSTASH_HOME
echo $KIBANA_HOME


# 由于Elasticsearch、Logstash、Kibana均不能以root账号运行
# 但是Linux对非root账号可并发操作的文件、线程都有限制
# useradd elk (用户名) -g elk (组名) -p 123456 (密码)
groupadd elk
useradd elk -g elk -p 123456


chown -R elk:elk $ELASTICSEARCH_HOME/


# 账号切换到 elk
su - elk

# 创建Elasticsearch主目录、数据目录、日志目录
mkdir $ELASTICSEARCH_HOME/data
# mkdir $ELASTICSEARCH_HOME/logs

# chown -R elk:elk $ELASTICSEARCH_HOME/data
# chown -R elk:elk $ELASTICSEARCH_HOME/logs
# ll $ELASTICSEARCH_HOME

# 修改elasticsearch配置文件
vi $ELASTICSEARCH_HOME/config/elasticsearch.yml

# 集群名
cluster.name: es_cluster
# 节点名
node.name: node1
# 节点host/ip
network.host: node1
http.port: 9200
# TCP传输端口
transport.tcp.port: 9300
# 数据保存目录
path.data: /usr/local/elasticsearch-6.2.4/data
# 日志保存目录
path.logs: /usr/local/elasticsearch-6.2.4/logs
# 是否允许作为主节点
node.master: true
# 是否保存数据
node.data: true
# 集群广播
# discovery.zen.ping.multicast.enabled: true
# 集群中的主节点的初始列表,当节点(主节点或者数据节点)启动时使用这个列表进行探测
discovery.zen.ping.unicast.hosts: ["node1","node2","node3"]
# 主节点个数
discovery.zen.minimum_master_nodes: 2
#避免出现跨域问题
http.cors.enabled: true
http.cors.allow-origin: "*"


su root

scp -r $ELASTICSEARCH_HOME  node2:/usr/local/
scp -r $ELASTICSEARCH_HOME  node3:/usr/local/

# ==================================================================node2 node3

# 使环境变量生效
source /etc/profile


# 查看配置结果
echo $ELASTICSEARCH_HOME

# ==================================================================node2

groupadd elk
useradd elk -g elk -p 123456
chown -R elk:elk $ELASTICSEARCH_HOME/


# 账号切换到 elk
su - elk


# 修改elasticsearch配置文件
vi $ELASTICSEARCH_HOME/config/elasticsearch.yml

node.name: node2
network.host: node2

# ==================================================================node3

groupadd elk
useradd elk -g elk -p 123456
chown -R elk:elk $ELASTICSEARCH_HOME/


# 账号切换到 elk
su - elk


# 修改elasticsearch配置文件
#vi $ELASTICSEARCH_HOME/config/elasticsearch.yml

node.name: node3
network.host: node3

su - root

shutdown -h now
# 快照 Elasticsearch启动前

 

# 启动&健康检查 Elasticsearch

# ==================================================================node1 node2 node3

su - elk

nohup $ELASTICSEARCH_HOME/bin/elasticsearch > $ELASTICSEARCH_HOME/logs/es.log 2>&1 &
# $ELASTICSEARCH_HOME/bin/elasticsearch


# 查看集群节点
curl -XGET 'http://node1:9200/_cat/nodes?pretty'

# 查看健康状态
curl http://node1:9200/_cluster/health

# 查看可以监测的参数
curl http://node1:9200/_cat

# 查看集群健康信息
curl http://node1:9200/_cat/health

# 安装 elasticsearch 插件 cerebro
# https://github.com/lmenezes/cerebro/releases
# ==================================================================cerebro 安装

su - root

tar -xivf ~/cerebro-0.8.1.tgz -C /usr/local/
mv /usr/local/cerebro-0.8.1 /usr/local/cerebro

# 启动
mkdir /usr/local/cerebro/logs

nohup /usr/local/cerebro/bin/cerebro > /usr/local/cerebro/logs/cerebro.log 2>&1 &
# /usr/local/cerebro/bin/cerebro &


# 默认端口为9000
# http://node1:9000
# 在文本框中输入下面的地址
# http://node1:9200

# 配置kibana
# ==================================================================node1

chown -R elk:elk $KIBANA_HOME/

# 账号切换到 elk
su - elk

# mkdir $KIBANA_HOME/data
mkdir $KIBANA_HOME/logs


# 修改配置
vi $KIBANA_HOME/config/kibana.yml

#增加以下内容
server.port: 5601
server.host: "node1"
server.name: "kibana-master"
elasticsearch.url: "http://node1:9200"
elasticsearch.url: "http://node2:9200"
elasticsearch.url: "http://node3:9200"
 
# 启动
su - elk

nohup $KIBANA_HOME/bin/kibana > $KIBANA_HOME/logs/kibana.log 2>&1 &
# $KIBANA_HOME/bin/kibana
# http://node1:5601

# 配置logstash
# ==================================================================node1

su - root


chown -R elk:elk $LOGSTASH_HOME/

# ll $LOGSTASH_HOME


# 账号切换到 elk
su - elk

# 创建Logstash主目录、配置目录、数据目录、日志目录
mkdir $LOGSTASH_HOME/logs

# 配置数据&日志目录
vi $LOGSTASH_HOME/config/logstash.yml

# 增加以下内容
path.data: /usr/local/logstash-6.2.4/data
path.logs: /usr/local/logstash-6.2.4/logs


su - root

scp -r $LOGSTASH_HOME  node2:/usr/local/
scp -r $LOGSTASH_HOME  node3:/usr/local/

# ==================================================================node2 node3

su - root


# 使环境变量生效
source /etc/profile


# 查看配置结果
echo $LOGSTASH_HOME/


chown -R elk:elk $LOGSTASH_HOME/

# 启动
# ==================================================================node1 node2 node3

# 账号切换到 elk
su - elk

# 如果没有启动
nohup $ELASTICSEARCH_HOME/bin/elasticsearch > $ELASTICSEARCH_HOME/logs/es.log 2>&1 &

# ==================================================================node1

nohup /usr/local/cerebro/bin/cerebro > /usr/local/cerebro/logs/cerebro.log 2>&1 &


nohup $KIBANA_HOME/bin/kibana > $KIBANA_HOME/logs/kibana.log 2>&1 &

# ==================================================================node1

$LOGSTASH_HOME/bin/logstash -e 'input { stdin { } } output { stdout {} }'
# hello

$LOGSTASH_HOME/bin/logstash -e 'input { stdin { } } output { stdout {codec => rubydebug} }'
# hi

su - root

chown -R elk:elk /var/log/messages


su - elk

vi $LOGSTASH_HOME/config/system.conf

input {
	file {
		path => "/var/log/messages"
		type => "system"
		start_position => "beginning"
	}
}
output {    
	elasticsearch {
		hosts => ["node1:9200","node2:9200","node3:9200"]
		index => "system-%{+YYYY.MM.dd}"
	}
}
	
$LOGSTASH_HOME/bin/logstash -f $LOGSTASH_HOME/config/system.conf



su - root

chown -R elk:elk /var/log/secure


su - elk

vi $LOGSTASH_HOME/config/system_secure.conf

# 添加secure日志的路径
input {
	file {
		path => "/var/log/messages"
		type => "system"
		start_position => "beginning"
	}

	file {
		path => "/var/log/secure"
		type => "secure"
		start_position => "beginning"
	}
}
output {
    if [type] == "system" {
        elasticsearch {
            hosts => ["node1:9200","node2:9200","node3:9200"]
            index => "logs-system-%{+YYYY.MM.dd}"
        }
    }

    if [type] == "secure" {
        elasticsearch {
            hosts => ["node1:9200","node2:9200","node3:9200"]
            index => "logs-secure-%{+YYYY.MM.dd}"
        }
    }
}

$LOGSTASH_HOME/bin/logstash -f $LOGSTASH_HOME/config/system_secure.conf


vi $LOGSTASH_HOME/config/nginx_log.conf

input {
     file {
        path => ["/usr/local/nginx/logs/access.log"]
        type => "nginx_log"
        start_position => "beginning"
     }
}
filter {
    grok {
		match => { "message" => "%{IPORHOST:http_host} %{IPORHOST:clientip} - %{USERNAME:remote_user} \[%{HTTPDATE:timestamp}\] \"(?:%{WORD:http_verb} %{NOTSPACE:http_request}(?: HTTP/%{NUMBER:http_version})?|%{DATA:raw_http_request})\" %{NUMBER:response} (?:%{NUMBER:bytes_read}|-) %{QS:referrer} %{QS:agent} %{QS:xforwardedfor} %{NUMBER:request_time:float}"}
    }
    geoip {
		source => "clientip"
    }
}
output {
    if [type] == "nginx_log" {
        elasticsearch {
            hosts => ["node1:9200","node2:9200","node3:9200"]
            index => "nginx_log-%{+YYYY.MM.dd}"
        }
    }
}

$LOGSTASH_HOME/bin/logstash -f $LOGSTASH_HOME/config/nginx_log.conf

 

posted on 2018-11-07 20:43  风中追影  阅读(218)  评论(0编辑  收藏  举报

导航