prometheus联邦统一收集

 

 

# my global config
global:
  scrape_interval:     30s # Set the scrape interval to every 15 seconds. Default is every 1 minute.
  evaluation_interval: 30s # Evaluate rules every 15 seconds. The default is every 1 minute.
  # scrape_timeout is set to the global default (10s).

# Alertmanager configuration
#alerting:
#  alertmanagers:
#  - static_configs:
#    - targets:
      # - alertmanager:9093

# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
rule_files:
  # - "first_rules.yml"
  # - "second_rules.yml"
  - /data/monitor/prometheus/rules/prometheus-k8s-rulefiles-0/monitoring-kube-prometheus-rules.yaml
  - /data/monitor/prometheus/rules/prometheus-k8s-rulefiles-0/monitoring-kubernetes-monitoring-rules.yaml
  - /data/monitor/prometheus/rules/prometheus-k8s-rulefiles-0/monitoring-kube-state-metrics-rules.yaml
  - /data/monitor/prometheus/rules/prometheus-k8s-rulefiles-0/monitoring-node-exporter-rules.yaml
  - /data/monitor/prometheus/rules/prometheus-k8s-rulefiles-0/monitoring-prometheus-k8s-prometheus-rules.yaml
  - /data/monitor/prometheus/rules/prometheus-k8s-rulefiles-0/monitoring-prometheus-operator-rules.yaml


# A scrape configuration containing exactly one endpoint to scrape:
# Here it's Prometheus itself.
scrape_configs:
  - job_name: 'federate'
    scrape_interval: 30s

    honor_labels: true
    metrics_path: '/federate'

    params:
      'match[]':
        - '{job="prometheus"}'
        - '{__name__=~"job:.*"}'
        - '{job=~"prometheus.*"}'
        - '{job="docker"}'
        - '{job=~"node.*"}'
        - '{job="kubelet"}'
        - '{job=~"kube.*"}'
        - '{job=~"apiserver.*"}'
    static_configs:
      - targets:
        - '192.168.15.131:31816'
        - '172.22.128.221:31816'

 

posted @ 2022-08-09 10:21  fengjian1585  阅读(46)  评论(0编辑  收藏  举报