Title

k8s部署prometheus+grafana

部署prometheus

创建namespace

kubectl create namespace prometheus

部署prometheus的pvc

vim prometheus-pvc.yaml

apiVersion: v1

kind: PersistentVolume

metadata:  

  name: nfs-pv-prometheus  

  labels: 

    pv: nfs-pv-prometheus  

  annotations: 

    volume.beta.kubernetes.io/mount-options: "noatime,nodiratime,noresvport,nolock,rsize=1048576,wsize=1048576,hard"  

spec:  

  capacity: 

    storage: 2Gi 

  accessModes:

    - ReadWriteMany 

  persistentVolumeReclaimPolicy: Retain  

  nfs:  

    path: /nfs/prometheus 

    server: 10.50.195.2

---  

kind: PersistentVolumeClaim

apiVersion: v1

metadata:

  name: nfs-pvc-prometheus 

  namespace: prometheus

spec:  

  accessModes:  

    - ReadWriteMany  

  resources: 

    requests: 

      storage: 2Gi  

  selector: 

    matchLabels:  

      pv: nfs-pv-prometheus

 

Apply执行

kubectl apply -f prometheus-pvc.yaml

kubectl get pvc -n prometheus

kubectl get pv

部署prometheus的configmap

vim prometheus-cm.yaml

apiVersion: v1

kind: ConfigMap

metadata

  name: prometheus-config

  namespace: prometheus

data

  prometheus.yml: |

    global

      scrape_interval:     15s

      evaluation_interval: 15s

    scrape_configs:  

    - job_name: prometheus

      honor_timestamps: true 

      scrape_interval: 15s

      scrape_timeout: 15s

      scheme: http

      metrics_path: /metrics

      static_configs

      - targets:   

        - localhost:9090 

                                                                                                                                                                     

    - job_name: 'kubernetes-apiservers' 

      kubernetes_sd_configs:

      - role: endpoints

      scheme: https

      tls_config

        ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt

      bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token

      relabel_configs

      - source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name]  

        action: keep

        regex: default;kubernetes;https

                                                                                                                                                                     

    - job_name: 'kubernetes-service-endpoints'

      kubernetes_sd_configs:  

      - role: endpoints

      relabel_configs:

      - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape] 

        action: keep

        regex: true 

      - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme]  

        action: replace

        target_label: __scheme__

        regex: (https?)

      - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path] 

        action: replace

        target_label: __metrics_path__

        regex: (.+)

      - source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port]

        action: replace

        target_label: __address__

        regex: ([^:]+)(?::\d+)?;(\d+)

        replacement: $1:$2

      - action: labelmap

        regex: __meta_kubernetes_service_label_(.+)

      - source_labels: [__meta_kubernetes_namespace]    

        action: replace

        target_label: kubernetes_namespace

      - source_labels: [__meta_kubernetes_service_name]  

        action: replace

        target_label: kubernetes_name

                                                                                                                                                                     

    - job_name: 'kubernetes-services' 

      kubernetes_sd_configs

      - role: service

      metrics_path: /probe

      params

        module: [http_2xx]  

      relabel_configs:  

      - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_probe]

        action: keep

        regex: true 

      - source_labels: [__address__] 

        target_label: __param_target

      - target_label: __address__

        replacement: blackbox-exporter.example.com:9115

      - source_labels: [__param_target] 

        target_label: instance

      - action: labelmap

        regex: __meta_kubernetes_service_label_(.+)

      - source_labels: [__meta_kubernetes_namespace]  

        target_label: kubernetes_namespace

      - source_labels: [__meta_kubernetes_service_name] 

        target_label: kubernetes_name 

 

    - job_name: 'kubernetes-ingresses'     

      kubernetes_sd_configs

      - role: ingress

      relabel_configs:  

      - source_labels: [__meta_kubernetes_ingress_annotation_prometheus_io_probe]  

        action: keep

        regex: true  

      - source_labels: [__meta_kubernetes_ingress_scheme,__address__,__meta_kubernetes_ingress_path] 

        regex: (.+);(.+);(.+)

        replacement: ${1}://${2}${3}

        target_label: __param_target

      - target_label: __address__

        replacement: blackbox-exporter.example.com:9115

      - source_labels: [__param_target]   

        target_label: instance

      - action: labelmap

        regex: __meta_kubernetes_ingress_label_(.+)

      - source_labels: [__meta_kubernetes_namespace] 

        target_label: kubernetes_namespace

      - source_labels: [__meta_kubernetes_ingress_name] 

        target_label: kubernetes_name

                                                                                                                                                                     

    - job_name: 'kubernetes-pods' 

      kubernetes_sd_configs:

      - role: pod

      relabel_configs

      - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape] 

        action: keep

        regex: true  

      - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path]  

        action: replace

        target_label: __metrics_path__

        regex: (.+)

      - source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port]  

        action: replace

        regex: ([^:]+)(?::\d+)?;(\d+) 

        replacement: $1:$2

        target_label: __address__

      - action: labelmap

        regex: __meta_kubernetes_pod_label_(.+)

      - source_labels: [__meta_kubernetes_namespace]  

        action: replace

        target_label: kubernetes_namespace

      - source_labels: [__meta_kubernetes_pod_name] 

        action: replace

        target_label: kubernetes_pod_name

                                                                                                                                                                     

    - job_name: 'mysql_node_exporter_master'   

      static_configs:    

        - targets: [10.50.195.2:30015]  

    - job_name: 'mysql_node_exporter_slave' 

      static_configs:

        - targets: [10.50.195.2:30016]  

                                                                                                                                                                     

    - job_name: 'k8s-node' 

      kubernetes_sd_configs:

      - role: node

      relabel_configs:

      - source_labels: [__address__]    

        regex: '(.*):10250' 

        replacement: '${1}:9100'  

        target_label: __address__

        action: replace

      - action: labelmap

        regex: __meta_kubernetes_node_label_(.+) 

 

Apply执行

kubectl apply -f prometheus-cm.yaml

kubectl get cm -n prometheus

部署prometheus的rabc

vim prometheus-rabc.yaml

apiVersion: rbac.authorization.k8s.io/v1

kind: ClusterRole

metadata: 

  name: prometheus

rules:

- apiGroups: [""]  

  resources: 

  - nodes     

  - nodes/proxy  

  - services   

  - endpoints   

  - pods 

  verbs: ["get", "list", "watch"] 

- apiGroups: [""]  

  resources: 

  - configmaps   

  verbs: ["get"]   

- apiGroups:  

  - extensions

  resources:

  - ingresses  

  verbs: ["get", "list", "watch"] 

- nonResourceURLs: ["/metrics"] 

  verbs: ["get"]   

--- 

apiVersion: v1

kind: ServiceAccount

metadata:  

  name: prometheus

namespace: prometheus

---  

apiVersion: rbac.authorization.k8s.io/v1 

kind: ClusterRoleBinding

metadata:    

  name: prometheus

roleRef:    

  apiGroup: rbac.authorization.k8s.io 

  kind: ClusterRole

  name: prometheus

subjects: 

- kind: ServiceAccount

  name: prometheus

namespace: prometheus 

Apply执行

kubectl apply -f prometheus-rabc.yaml

部署prometheus的deployment

先给pvc的挂载目录授权

chown 65534.65534 /nfs/prometheus

 

vim prometheus-deployment.yaml

apiVersion: apps/v1

kind: Deployment

metadata:

  labels: 

    name: prometheus-deployment

  name: prometheus

  namespace: prometheus

spec:   

  replicas: 1

  selector: 

    matchLabels:  

      app: prometheus

  template: 

    metadata:   

      labels:

        app: prometheus

    spec: 

      containers: 

      - image: prom/prometheus:v2.27.1

        name: prometheus

        command:

        - "/bin/prometheus"

        args: 

        - "--config.file=/etc/prometheus/prometheus.yml"  

        - "--storage.tsdb.path=/prometheus"  

        - "--storage.tsdb.retention=24h"  

        - "--web.enable-admin-api" 

        - "--web.enable-lifecycle"  

        ports:  

        - containerPort: 9090 

          protocol: TCP

        volumeMounts: 

        - mountPath: "/prometheus"  

          name: data

        - mountPath: "/etc/prometheus"  

          name: config-volume

        resources:  

          requests:

            cpu: 100m

            memory: 100Mi

          limits:

            cpu: 500m

            memory: 1024Mi

      serviceAccountName: prometheus 

      volumes: 

      - name: data

        persistentVolumeClaim: 

          claimName: nfs-pvc-prometheus 

          #需要chown 65534.65534 目录

      - name: config-volume

        configMap:  

          name: prometheus-config

 

Apply执行

kubectl apply -f prometheus-deployment.yaml

kubectl get po -n prometheus

创建prometheus的svc

vim prometheus-svc.yaml

kind: Service

apiVersion: v1

metadata: 

  labels: 

    app: prometheus

  name: prometheus

  namespace: prometheus

spec: 

  type: NodePort

  ports:    

  - port: 9090

    targetPort: 9090 

    nodePort: 30003 

  selector:  

    app: prometheus 

 

Apply执行

kubectl apply -f prometheus-svc.yaml

kubectl get svc -A |grep prometheus

 

部署grafana

创建grafana的挂载目录

mkdir /nfs/grafana

mkdir /data/grafana/grafana-plugin    

chown -R 472:root grafana-plugin/     #不授权启动会报错

部署grafana的pv和pvc

vim grafana-pvc.yaml

apiVersion: v1

kind: PersistentVolume

metadata: 

  name: nfs-pv-grafana

  labels:

    pv: nfs-pv-grafana

  annotations: 

    volume.beta.kubernetes.io/mount-options: "noatime,nodiratime,noresvport,nolock,rsize=1048576,wsize=1048576,hard"

spec:  

  capacity: 

    storage: 2Gi

  accessModes: 

    - ReadWriteMany

  persistentVolumeReclaimPolicy: Retain 

  nfs:

    path: /nfs/grafana

    server: 10.50.195.2

---

kind: PersistentVolumeClaim

apiVersion: v1

metadata:  

  name: nfs-pvc-grafana

  namespace: prometheus

spec:  

  accessModes:  

    - ReadWriteMany  

  resources:

    requests:  

      storage: 2Gi

  selector:

    matchLabels:   

      pv: nfs-pv-grafana

---   

apiVersion: v1

kind: PersistentVolume

metadata:  

  name: nfs-pv-grafana-plugin

  labels:  

    pv: nfs-pv-grafana-plugin

  annotations: 

    volume.beta.kubernetes.io/mount-options: "noatime,nodiratime,noresvport,nolock,rsize=1048576,wsize=1048576,hard" 

spec: 

  capacity:  

    storage: 2Gi

  accessModes:

    - ReadWriteMany  

  persistentVolumeReclaimPolicy: Retain  

  nfs:   

    path: /nfs/grafana-plugin

    server: 10.50.195.2

---

kind: PersistentVolumeClaim

apiVersion: v1

metadata:  

  name: nfs-pvc-grafana-plugin

  namespace: prometheus 

spec: 

  accessModes:

    - ReadWriteMany   

  resources:  

    requests: 

      storage: 2Gi

  selector:  

    matchLabels: 

      pv: nfs-pv-grafana-plugin

Apply执行

kubectl apply -f grafana-pvc.yaml

kubectl get pvc -n prometheus

创建grafana的configmap(这里整合了grafana的datasource,dashboard)

vim grafana-cm.yaml

(内容太多,直接放网盘,自行下载)

链接:https://pan.baidu.com/s/1ekAN91n_xMRxwcPixbgFCg?pwd=u663 

提取码:u663

 

Apply执行

kubectl apply -f grafana-cm.yaml

kubectl get cm -n prometheus

创建grafana的deployment

vim grafana.yaml

apiVersion: apps/v1

kind: Deployment

metadata: 

  name: grafana-core

  namespace: prometheus

  labels:   

    app: grafana

    component: core

spec: 

  selector: 

    matchLabels: 

      app: grafana

      component: core

  replicas: 1

  template: 

    metadata:  

      labels: 

        app: grafana

        component: core

    spec: 

      containers:

      - image: grafana/grafana:7.5.6

        name: grafana-core

        imagePullPolicy: IfNotPresent

        resources: 

          limits: 

            cpu: 100m

            memory: 100Mi

          requests: 

            cpu: 100m

            memory: 100Mi

        env: 

          - name: GF_AUTH_BASIC_ENABLED

            value: "true"  

          - name: GF_AUTH_ANONYMOUS_ENABLED

            value: "false"  

        readinessProbe: 

          httpGet: 

            path: /login

            port: 3000 

        volumeMounts: 

        - name: grafana-persistent-storage

          mountPath: /var

        - name: grafana-plugin-storage

          mountPath: /var/lib/grafana

        - mountPath: /etc/grafana/provisioning/datasources

          name: grafana-datasources

          readOnly: false 

        - mountPath: /etc/grafana/provisioning/dashboards

          name: grafana-dashboards

          readOnly: false  

        - mountPath: /grafana-dashboard-definitions/0/podresource

          name: grafana-k8s-pod-resource

          readOnly: false  

      volumes: 

      - name: grafana-persistent-storage

        persistentVolumeClaim:

          claimName: nfs-pvc-grafana 

      - name: grafana-plugin-storage

        persistentVolumeClaim: 

          claimName: nfs-pvc-grafana-plugin

      - name: grafana-datasources

        configMap:    

          name: grafana-datasources

      - configMap: 

          name: grafana-dashboards

        name: grafana-dashboards

      - configMap: 

          name: grafana-k8s-pod-resource

        name: grafana-k8s-pod-resource 

 

Apply执行

kubectl apply -f grafana.yaml

kubectl get po -n prometheus

创建grafana的svc

vim grafana-svc.yaml

apiVersion: v1

kind: Service

metadata: 

  name: grafana

  namespace: prometheus

  labels: 

    app: grafana

    component: core

spec:   

  type: NodePort

  ports:  

    - port: 3000  

      nodePort: 30004

  selector:

    app: grafana

    component: core 

 

Apply执行

kubectl apply -f grafana-svc.yaml

kubectl get svc -n prometheus

 

部署mysql-exporter

我的msyql是主备,所以需要做两个exporter

master:

vim mysql-exporter-master.yaml

--- 

apiVersion: apps/v1

kind: Deployment

metadata: 

  name: mysql-exporter-master

spec:  

  replicas: 1

  selector: 

    matchLabels: 

      app: mysql-exporter-master

  template: 

    metadata:

      labels:

        app: mysql-exporter-master

    spec:

      containers:  

      - name: mysql-exporter-master

        image: prom/mysqld-exporter:v0.12.1

        ports:  

        - containerPort: 9104

        env:   

        - name: DATA_SOURCE_NAME

          value: "root:root@tcp(10.202.1.9:3306)/"  

---   

apiVersion: v1

kind: Service

metadata:

  name: mysql-exporter-master

spec: 

  selector: 

     app: mysql-exporter-master

  type: NodePort

  ports:

  - protocol: TCP

    port: 9104 

    targetPort: 9104 

    nodePort: 30015

 

Apply执行

kubectl apply -f mysql-exporter-master.yaml

kubectl get po -A |grep mysql

kubectl get svc -A

 slave:

vim mysql-exporter-slave.yaml

---  

apiVersion: apps/v1

kind: Deployment

metadata: 

  name: mysql-exporter-slave

spec:  

  replicas: 1  

  selector:  

    matchLabels:  

      app: mysql-exporter-slave

  template: 

    metadata:

      labels:   

        app: mysql-exporter-slave

    spec: 

      containers: 

      - name: mysql-exporter-slave

        image: prom/mysqld-exporter:v0.12.1

        ports:

        - containerPort: 9104 

        env:  

        - name: DATA_SOURCE_NAME

          value: "root:root@tcp(111.6.83.37:30308)/"   

---    

apiVersion: v1

kind: Service

metadata:

  name: mysql-exporter-slave

spec:  

  selector:  

    app: mysql-exporter-slave

  type: NodePort

  ports:  

  - protocol: TCP

    port: 9104  

    targetPort: 9104 

    nodePort: 30016

 

Apply执行

kubectl apply -f mysql-exporter-slave.yaml

kubectl get svc -A |grep mysql

kubectl get po -A |grep mysql

在prometheus的cm里添加mysql-exporter的数据源

最后一行添加如图信息,重启cm和deployment

    - job_name: 'mysql_node_exporter_master'                                                                                                                         

      static_configs:                                                                                                                                                

        - targets: [10.50.195.2:30015]                                                                                                                               

    - job_name: 'mysql_node_exporter_slave'                                                                                                                          

      static_configs:                                                                                                                                                

        - targets: [10.50.195.2:30016]  

 

打开prometheus的web页面看是否生效

 grafana配置数据源

添加mysql-exporter和宿主机监控模板

添加prometheus数据源

 

添加监控模板

 

主机监控

Mysql数据库监控

 

 
 
 
 
 
 
posted @ 2024-06-20 16:16  Esurts~  阅读(145)  评论(0)    收藏  举报