学习K8S之路.10---安装alertmanger设置报警系统,及人为影响K8S调度策略

一:安装alertmanger,设置报警系统

1.1:准备镜像  

[root@k8s-6-96 ~]# docker pull docker.io/prom/alertmanager:v0.14.0
[root@k8s-6-96 ~]# docker images|grep alert
[root@k8s-6-96 ~]# docker tag 23744b2d645c harbor.auth.com/k8s/alertmanager:v0.14.0
[root@k8s-6-96 ~]# docker push  harbor.auth.com/k8s/alertmanager:v0.14.0

1.2:准备资源配置清单

[root@k8s-6-96 ~]# mkdir /data/k8s-yaml/alertmanager
[root@k8s-6-96 ~]# cd /data/k8s-yaml/alertmanager/
[root@k8s-6-96 alertmanager]# cat cm.yaml 
apiVersion: v1
kind: ConfigMap
metadata:
  name: alertmanager-config
  namespace: infra
data:
  config.yml: |-
    global:
      # 在没有报警的情况下声明为已解决的时间
      resolve_timeout: 5m
      # 配置邮件发送信息
      smtp_smarthost: 'smtp.163.com:25'
      smtp_from: 'xxx@163.com'
      smtp_auth_username: 'xxx@163.com'
      smtp_auth_password: 'xxxxxx'
      smtp_require_tls: false
      # 所有报警信息进入后的根路由,用来设置报警的分发策略
    route:
      # 这里的标签列表是接收到报警信息后的重新分组标签,例如,接收到的报警信息里面有许多具有 cluster=A 和 alertname=LatncyHigh 这样的标签的报警信息将会批量被聚合到一个分组里面
      group_by: ['alertname', 'cluster']
      # 当一个新的报警分组被创建后,需要等待至少group_wait时间来初始化通知,这种方式可以确保您能有足够的时间为同一分组来获取多个警报,然后一起触发这个报警信息。
      group_wait: 30s
      # 当第一个报警发送后,等待'group_interval'时间来发送新的一组报警信息。
      group_interval: 5m
      # 如果一个报警信息已经发送成功了,等待'repeat_interval'时间来重新发送他们
      repeat_interval: 5m
      # 默认的receiver:如果一个报警没有被一个route匹配,则发送给默认的接收器
      receiver: default
    receivers:
    - name: 'default'
      email_configs:
      - to: 'xxxx@qq.com'
        send_resolved: true
[root@k8s-6-96 alertmanager]# cat dp.yaml 
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
  name: alertmanager
  namespace: infra
spec:
  replicas: 1
  selector:
    matchLabels:
      app: alertmanager
  template:
    metadata:
      labels:
        app: alertmanager
    spec:
      containers:
      - name: alertmanager
        image: harbor.auth.com/k8s/alertmanager:v0.14.0
        args:
          - "--config.file=/etc/alertmanager/config.yml"
          - "--storage.path=/alertmanager"
        ports:
        - name: alertmanager
          containerPort: 9093
        volumeMounts:
        - name: alertmanager-cm
          mountPath: /etc/alertmanager
      volumes:
      - name: alertmanager-cm
        configMap:
          name: alertmanager-config
      imagePullSecrets:
      - name: harbor
[root@k8s-6-96 alertmanager]# cat svc.yaml 
apiVersion: v1
kind: Service
metadata:
  name: alertmanager
  namespace: infra
spec:
  selector:
    app: alertmanager
  ports:
    - port: 80
      targetPort: 9093

1.3:在任意一台运算节点上应用资源配置清单

[root@k8s-6-94 ~]# kubectl apply -f http://k8s-yaml.auth.com/alertmanager/cm.yaml
[root@k8s-6-94 ~]# kubectl apply -f http://k8s-yaml.auth.com/alertmanager/dp.yaml
[root@k8s-6-94 ~]# kubectl apply -f http://k8s-yaml.auth.com/alertmanager/svc.yaml

1.4:创建配置文件

[root@k8s-6-96 ~]# cd /data/nfs-volume/prometheus/etc
[root@k8s-6-96 etc]# cat rules.yml
groups:
- name: hostStatsAlert
  rules:
  - alert: hostCpuUsageAlert
    expr: sum(avg without (cpu)(irate(node_cpu{mode!='idle'}[5m]))) by (instance) > 0.85
    for: 5m
    labels:
      severity: warning
    annotations:
      summary: "{{ $labels.instance }} CPU usage above 85% (current value: {{ $value }}%)"
  - alert: hostMemUsageAlert
    expr: (node_memory_MemTotal - node_memory_MemAvailable)/node_memory_MemTotal > 0.85
    for: 5m
    labels:
      severity: warning
    annotations:
      summary: "{{ $labels.instance }} MEM usage above 85% (current value: {{ $value }}%)"
  - alert: OutOfInodes
    expr: node_filesystem_free{fstype="overlay",mountpoint ="/"} / node_filesystem_size{fstype="overlay",mountpoint ="/"} * 100 < 10
    for: 5m
    labels:
      severity: warning
    annotations:
      summary: "Out of inodes (instance {{ $labels.instance }})"
      description: "Disk is almost running out of available inodes (< 10% left) (current value: {{ $value }})"
  - alert: OutOfDiskSpace
    expr: node_filesystem_free{fstype="overlay",mountpoint ="/rootfs"} / node_filesystem_size{fstype="overlay",mountpoint ="/rootfs"} * 100 < 10
    for: 5m
    labels:
      severity: warning
    annotations:
      summary: "Out of disk space (instance {{ $labels.instance }})"
      description: "Disk is almost full (< 10% left) (current value: {{ $value }})"
  - alert: UnusualNetworkThroughputIn
    expr: sum by (instance) (irate(node_network_receive_bytes[2m])) / 1024 / 1024 > 100
    for: 5m
    labels:
      severity: warning
    annotations:
      summary: "Unusual network throughput in (instance {{ $labels.instance }})"
      description: "Host network interfaces are probably receiving too much data (> 100 MB/s) (current value: {{ $value }})"
  - alert: UnusualNetworkThroughputOut
    expr: sum by (instance) (irate(node_network_transmit_bytes[2m])) / 1024 / 1024 > 100
    for: 5m
    labels:
      severity: warning
    annotations:
      summary: "Unusual network throughput out (instance {{ $labels.instance }})"
      description: "Host network interfaces are probably sending too much data (> 100 MB/s) (current value: {{ $value }})"
  - alert: UnusualDiskReadRate
    expr: sum by (instance) (irate(node_disk_bytes_read[2m])) / 1024 / 1024 > 50
    for: 5m
    labels:
      severity: warning
    annotations:
      summary: "Unusual disk read rate (instance {{ $labels.instance }})"
      description: "Disk is probably reading too much data (> 50 MB/s) (current value: {{ $value }})"
  - alert: UnusualDiskWriteRate
    expr: sum by (instance) (irate(node_disk_bytes_written[2m])) / 1024 / 1024 > 50
    for: 5m
    labels:
      severity: warning
    annotations:
      summary: "Unusual disk write rate (instance {{ $labels.instance }})"
      description: "Disk is probably writing too much data (> 50 MB/s) (current value: {{ $value }})"
  - alert: UnusualDiskReadLatency
    expr: rate(node_disk_read_time_ms[1m]) / rate(node_disk_reads_completed[1m]) > 100
    for: 5m
    labels:
      severity: warning
    annotations:
      summary: "Unusual disk read latency (instance {{ $labels.instance }})"
      description: "Disk latency is growing (read operations > 100ms) (current value: {{ $value }})"
  - alert: UnusualDiskWriteLatency
    expr: rate(node_disk_write_time_ms[1m]) / rate(node_disk_writes_completedl[1m]) > 100
    for: 5m
    labels:
      severity: warning
    annotations:
      summary: "Unusual disk write latency (instance {{ $labels.instance }})"
      description: "Disk latency is growing (write operations > 100ms) (current value: {{ $value }})"
- name: http_status
  rules:
  - alert: ProbeFailed
    expr: probe_success == 0
    for: 1m
    labels:
      severity: error
    annotations:
      summary: "Probe failed (instance {{ $labels.instance }})"
      description: "Probe failed (current value: {{ $value }})"
  - alert: StatusCode
    expr: probe_http_status_code <= 199 OR probe_http_status_code >= 400
    for: 1m
    labels:
      severity: error
    annotations:
      summary: "Status Code (instance {{ $labels.instance }})"
      description: "HTTP status code is not 200-399 (current value: {{ $value }})"
  - alert: SslCertificateWillExpireSoon
    expr: probe_ssl_earliest_cert_expiry - time() < 86400 * 30
    for: 5m
    labels:
      severity: warning
    annotations:
      summary: "SSL certificate will expire soon (instance {{ $labels.instance }})"
      description: "SSL certificate expires in 30 days (current value: {{ $value }})"
  - alert: SslCertificateHasExpired
    expr: probe_ssl_earliest_cert_expiry - time()  <= 0
    for: 5m
    labels:
      severity: error
    annotations:
      summary: "SSL certificate has expired (instance {{ $labels.instance }})"
      description: "SSL certificate has expired already (current value: {{ $value }})"
  - alert: BlackboxSlowPing
    expr: probe_icmp_duration_seconds > 2
    for: 5m
    labels:
      severity: warning
    annotations:
      summary: "Blackbox slow ping (instance {{ $labels.instance }})"
      description: "Blackbox ping took more than 2s (current value: {{ $value }})"
  - alert: BlackboxSlowRequests
    expr: probe_http_duration_seconds > 2
    for: 5m
    labels:
      severity: warning
    annotations:
      summary: "Blackbox slow requests (instance {{ $labels.instance }})"
      description: "Blackbox request took more than 2s (current value: {{ $value }})"
  - alert: PodCpuUsagePercent
    expr: sum(sum(label_replace(irate(container_cpu_usage_seconds_total[1m]),"pod","$1","container_label_io_kubernetes_pod_name", "(.*)"))by(pod) / on(pod) group_right kube_pod_container_resource_limits_cpu_cores *100 )by(container,namespace,node,pod,severity) > 80
    for: 5m
    labels:
      severity: warning
    annotations:
      summary: "Pod cpu usage percent has exceeded 80% (current value: {{ $value }}%)"

1.5:在prometheus配置文件最后追加配置:

[root@k8s-6-96 ~]# cd /data/nfs-volume/prometheus/etc
[root@k8s-6-96 etc]# vi prometheus.yml
alerting:
  alertmanagers:
    - static_configs:
        - targets: ["alertmanager"]
rule_files:
 - "/data/etc/rules.yml"

1.6:prometheus加载配置文件

prometheus在实际生产中,我们能不重启POD则不重启,因为占用资源较多,容易拖垮集群,所以我们可以这样平滑加载:

[root@k8s-6-94 ~]# kubectl -n infra get pod  -o wide
NAME                            READY   STATUS    RESTARTS   AGE    IP            NODE                NOMINATED NODE   READINESS GATES
prometheus-6bdf55cb8-lggqm      1/1     Running   0          25h    172.6.94.8    k8s-6-94.host.com   <none>           <none>
[root@k8s-6-94 ~]# ps -ef |grep prometheus
root      7116  7097  3 8月03 ?       00:56:08 /bin/prometheus --config.file=/data/etc/prometheus.yml --storage.tsdb.path=/data/prom-db --storage.tsdb.retention=72h --storage.tsdb.min-block-duration=10m
[root@k8s-6-94 ~]# kill -SIGHUP 7116

1.7:浏览器访问:http://prometheus.auth.com/ 点击 Alerts 就能显示相关的信息,表示安装成功。

补充:

  看到其他人的博客,新增(替换上面)资源配置清单中cm.yaml。(个人认为比较好)

[root@ops-200 alertmanager]# vim configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
  name: alertmanager-config
  namespace: infra
data:
  config.yml: |
    global:
      resolve_timeout: 5m
      smtp_from: 'XXXX@qq.com'
      smtp_smarthost: 'smtp.qq.com:465'
      smtp_auth_username: 'XXXX@qq.com'
      smtp_auth_password: 'passwd'
      smtp_require_tls: false
      smtp_hello: 'XXXX@qq.com'
    templates:   
      - '/etc/alertmanager/*.tmpl'
    route:
      group_by: ['alertname', 'cluster']
      group_wait: 20s     
      group_interval: 20s 
      repeat_interval: 12h 
      receiver: 'email' 
    receivers:
    - name: 'email'
      email_configs:
      - to: '186xxxxxxxx@163.com'   
        send_resolved: true 
        html: '{{ template "email.to.html" . }}' 
        headers: { Subject: " {{ .CommonLabels.instance }} {{ .CommonAnnotations.summary }}" }   
  email.tmpl: |
    {{ define "email.to.html" }}
    {{- if gt (len .Alerts.Firing) 0 -}}
    {{ range .Alerts }}
    告警程序: prometheus_alert
    告警级别: {{ .Labels.severity }}
    告警类型: {{ .Labels.alertname }}
    故障主机: {{ .Labels.instance }}
    告警主题: {{ .Annotations.summary }}
    触发时间: {{ .StartsAt.Format "2006-01-02 15:04:05" }}
    {{ end }}{{ end -}}
    
    {{- if gt (len .Alerts.Resolved) 0 -}}
    {{ range .Alerts }}
    告警程序: prometheus_alert
    告警级别: {{ .Labels.severity }}
    告警类型: {{ .Labels.alertname }}
    故障主机: {{ .Labels.instance }}
    告警主题: {{ .Annotations.summary }}
    触发时间: {{ .StartsAt.Format "2006-01-02 15:04:05" }}
    恢复时间: {{ .EndsAt.Format "2006-01-02 15:04:05" }} 
    {{ end }}{{ end -}}
    
    {{- end }}

 以上是邮件报警方式,但是目前基本对接微信或者钉钉方式,进行告警

二:alertmanger添加企业微信报警

2.1:访问网站 注册企业微信账号(不需要企业认证)

2.2:访问apps 创建第三方应用,点击创建应用按钮 -> 填写应用信息:

2.3:准备资源配置清单

[root@k8s-6-96 ~]# cd /data/k8s-yaml/alertmanager/
[root@k8s-6-96 alertmanager]# cat wechat.yaml
apiVersion: v1
kind: ConfigMap
metadata:
  name: alertmanager-config
  namespace: promethus
data:
  config.yml: |
    global:
      resolve_timeout: 5m

    templates:   
      - '/etc/alertmanager/*.tmpl'

    route:
      group_by: ['alertname', 'cluster']
      group_wait: 20s
      group_interval: 20s
      repeat_interval: 1m
      receiver: 'wechat'

    receivers:
    - name: 'wechat'
      wechat_configs:
        - send_resolved: true
          # 企业ID
          corp_id: 'wwd9a373d94f5****'
          # 部门ID
          # to_party: '3'
          # 接收告警信息的人员(人员ID)
          to_user: 'liyuanping'
          # 自建应用的agentId
          agent_id: '1000016'
          # 自建应用的secret
          api_secret: 'gG5yXp4wuZB1iTYi-LqdISTbK9WTh1SSqXHm*****'

  wechat.tmpl: |
    {{ define "wechat.default.message" }}
    {{ range $i, $alert :=.Alerts }}
    ====监控报警====
    告警状态:{{   .Status }}
    告警级别:{{ $alert.Labels.severity }}
    告警类型:{{ $alert.Labels.alertname }}
    告警应用:{{ $alert.Annotations.summary }}
    告警主机:{{ $alert.Labels.instance }}
    告警详情:{{ $alert.Annotations.description }}
    触发阀值:{{ $alert.Annotations.value }}
    告警时间:{{ $alert.StartsAt.Format "2006-01-02 15:04:05" }}
    ====end====
    {{ end }}
    {{ end }}

2.4:在任意一台运算节点应用资源配置清单,新增(替换上面)资源配置清单中cm.yaml,重启alertmanager容器

三: alertmanger添加钉钉告警

3.1:在钉钉上创建机器人管理,会生成access_token,access_token请妥善保管

3.2:安装prometheus-webhook-dingtalk插件

# docker.io上下载镜像
[root@k8s-6-96 ~]# docker pull timonwong/prometheus-webhook-dingtalk

# 启动容器
[root@k8s-6-96 ~]# docker run -d -p 8060:8060 --name webhook timonwong/prometheus-webhook --ding.profile="webhook1=https://oapi.dingtalk.com/robot/send?access_token={替换成自己钉钉的access_token}

3.3:准备资源配置清单,新增(替换上面)资源配置清单中cm.yaml,重启alertmanager容器

[root@k8s-6-96 ~]# cd /data/k8s-yaml/promethus/alertmanager/
[root@k8s-6-96 alertmanager]# cat DingTalk.yaml 
apiVersion: v1
kind: ConfigMap
metadata:
  name: alertmanager-config
  namespace: promethus
data:
  config.yml: |
    global:
      resolve_timeout: 5m

    route:
      group_by: ['alertname', 'cluster']
      group_wait: 20s
      group_interval: 20s
      repeat_interval: 1m
      receiver: 'webhook'

    receivers:
    - name: 'webhook'
      webhook_configs:
        - url: http://192.168.6.96:8060/dingtalk/webhook1/send
          send_resolved: true

3.4:测试是否能收到告警信息

3.5:由于钉钉默认的告警模板不太美观,需要进行简单的优化,感谢下面两位作者的贡献:

  参考博客:https://www.geek-share.com/detail/2785688766.html

      github地址:https://github.com/yangpeng14/DevOps/blob/master/kubernetes/AlertManager-%E9%92%89%E9%92%89%E6%8A%A5%E8%AD%A6.md

  3.5.1:可以直接下载镜像,启动容器,进行使用。

[root@k8s-6-96 ~]# docker run -d -p 5000:5000 --name gm -e ROBOT_TOKEN={自己钉钉的access_token} yangpeng2468/alertmanager-dingtalk-hook:v1

     3.5.2:修改资源配置清单,重启alertmanager容器

[root@k8s-6-96 alertmanager]# cat DingTalk.yaml 
apiVersion: v1
kind: ConfigMap
metadata:
  name: alertmanager-config
  namespace: promethus
data:
  config.yml: |
    global:
      resolve_timeout: 5m

    route:
      group_by: ['alertname', 'cluster']
      group_wait: 20s
      group_interval: 20s
      repeat_interval: 1m
      receiver: 'webhook'

    receivers:
    - name: 'webhook'
      webhook_configs: 
        - url: http://192.168.6.96:5000
          send_resolved: true

   3.5.3:可以将脚本修改为自己修改的样式,重新提交镜像,用新的镜像进行启动容器,然后重启alertmanager容器 

1:去百度云下载tar包,利用load导入镜像
[root@k8s-6-96 ~]# docker load < alertmanager-dingtalk-hook.tar
2:tar包导入没有标签,使用tag进行打标签
[root@k8s-6-96 ~]# docker tag 87ef6b3af368 harbor.auth.com/public/ertmanager-dingtalk-hook:v1.0
3:启动容器
[root@k8s-6-96 ~]# docker run -d -p 5000:5000 --name gm -e ROBOT_TOKEN={自己钉钉的access_token}  harbor.auth.com/public/ertmanager-dingtalk-hook:v1.0

  3.5.4:资源配置清单不需要修改,直接重启alertmanager容器,进行验证模板

四:简单介绍人为影响K8S调度策略

人为影响K8S调度策略的三种方法:
  1:污点,容忍度方法
    污点:运算节点node上的污点
    容忍度:pod是否可以容忍污点

  2:NodeName:让pod运行在指定的node上
  3:nodeSelector:通过标签选择器,让pod运行在指定的一类node上

给运算节点打污点:
  语法:kubectl taint node node1 key=value:NoSchedule
  实例:kubectl taint node k8s-6-94.host.com distype=ssd:NoSchedule

给运算节点去掉污点:
  实例:kubectl taint node k8s-6-94.host.com distype-

实例1:普罗米修斯监控特别消耗内存,指定普罗米修斯运行在指定的运算节点上,其他的pod容器不调度在此运算节点上
1:在运算节点上打污点
  kubectl taint node k8s-6-94.host.com quedian=jiankong:NoSchedule
2:在pod控制器上添加容忍度

tolerations:
- key: quedian
  value: jiankong
  effect: NoSchedule

实例2:当一台运算节点出现故障下架,需要对上面的pod容器进行驱逐
  kubectl taint node k8s-6-94.host.com key=guzhang:NoExecute

当pod无法删除时,进行强制删除:
  [root@k8s-6-94 ~]# kubectl delete pods traefik-ingress-rbbnb -n kybe-system --force --grace-period=0

posted @ 2020-08-05 10:30  为生活而努力  阅读(606)  评论(0)    收藏  举报