生产环境从阿里云kafka消费到es

用的阿里云的kafka消息队列产品,然后在IDC的服务器上部署logstash消费数据,再存到es。配置如下

配置文件名称:ali_kafka_to_local_es.conf

input{
      kafka{
        bootstrap_servers => xxx111-1-vpc.alikafka.aliyuncs.com:9092,xxx222-2-vpc.alikafka.aliyuncs.com:9092,xxx333-3-vpc.alikafka.aliyuncs.com:9092"
        client_id => "logstash_192_168_16_187"
        group_id => "group_wbird_logstash7_g1"
        auto_offset_reset => "latest"
        consumer_threads => 6
        decorate_events => true
        topics => ["db_thirdpartApi_log_v2"]
        type => "json"
        codec => json {
            charset => "UTF-8"
        }
      }
      kafka{
        bootstrap_servers => "xxx111-1-vpc.alikafka.aliyuncs.com:9092,xxx222-2-vpc.alikafka.aliyuncs.com:9092,xxx333-3-vpc.alikafka.aliyuncs.com:9092"
        client_id => "logstash_192_168_16_187"
        group_id => "group_wbird_logstash7_g1"
        auto_offset_reset => "latest"
        consumer_threads => 6
        decorate_events => true
        topics => ["db_all_common_queuelog_v1"]
        type => "json"
        codec => json {
            charset => "UTF-8"
        }
      }
}
output {

  if [@metadata][kafka][topic] == "db_log_v2" {
        elasticsearch {
          hosts => ["http://192.168.16.100:9200","http://192.168.16.101:9200","http://192.168.16.102:9200"]
          #user => "elastic"
          #password => "7pdejZOxE9cy"
          index => "thirdpartapi_log-%{+YYYY.MM.dd}"
          timeout => 300
        }
    }
  if [@metadata][kafka][topic] == "db_all_v1" {
        elasticsearch {
          hosts => ["http://192.168.16.100:9200","http://192.168.16.101:9200","http://192.168.16.102:9200"]
          #user => "elastic"
          #password => "7pdejZOxE9cy"
          index => "commonqueuelog-%{+YYYY.MM.dd}"
          timeout => 300
        }
    }
}

pipelines.yml配置内容:

- pipeline.id: main
  path.config: "/etc/logstash/conf.d/*.conf"

所以配置文件就放在/etc/logstash/conf.d/目录下就可以了。

 

posted @ 2024-10-21 14:14  羊脂玉净瓶  阅读(7)  评论(0)    收藏  举报