filebeat扫描本地日志发送到kafka的配置

filebeat扫描本地日志发送到kafka的配置

input配置文件自动加载

filebeat.config.inputs:
  enable: true
  path: ${path.config}/configs/*.yml
  reload.enabled: true #自动加载配置文件
  reload.period: 10s #配置文件扫描间隔

output配置

output.kafka:
  # The Logstash hosts
  hosts: ["192.168.174.133:9092","192.168.174.134:9092"]
  topic: test
  enabled: true

完整的filebeat配置文件

# ============================== Filebeat inputs ===============================
filebeat.config.inputs:
  enable: true
  path: ${path.config}/configs/*.yml
  reload.enabled: true #自动加载配置文件
  reload.period: 10s #配置文件扫描间隔
# ============================== Filebeat modules ==============================

filebeat.config.modules:
  # Glob pattern for configuration loading
  path: ${path.config}/modules.d/*.yml

  # Set to true to enable config reloading
  reload.enabled: false

  # Period on which files under path should be checked for changes
  #reload.period: 10s

 # ------------------------------ kafka Output -------------------------------
output.kafka:
  # The Logstash hosts
  hosts: ["192.168.174.133:9092","192.168.174.134:9092"]
  topic: test
  enabled: true

# ================================= Processors =================================
processors:
  - add_host_metadata:
      when.not.contains.tags: forwarded
  - add_cloud_metadata: ~
  - add_docker_metadata: ~
  - add_kubernetes_metadata: ~

独立的input配置文件

- type: log

  enabled: true
  paths:
    - D:\logs\*.log
  #其他参数配置
  #include_lines: ['.*Exception.*']           # 只发送包含这些字样的日志
  spool_size: 1024                            # 最大可以攒够 1024 条数据一起发送出去
  idle_timeout: '5s'                          # 否则每 5 秒钟也得发送一次
  scan_frequency: '10s'                       # 每 10 秒钟扫描一次目录
  harvester_buffer_size: 16384                # 实际读取文件时,默认读取 16384 字节

  # 多行合并(将所有不以 [ 开始的行与之前的行进行合并)
  multiline.pattern: '^[[:space:]]+(at|\.{3})\b|^Caused by:'
  multiline.negate: false
  multiline.match: after

 参考大牛的连接:https://zhuanlan.zhihu.com/p/380001028

posted @ 2021-11-24 15:23  風巽千龍  阅读(615)  评论(0)    收藏  举报