Prometheus+grafana监控Kubernetes1.13.1

一.安装Kubernetes1.12.2

参考Kubernetes1.13.1安装

二.安装grafana

1.wget https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-5.3.0-1.x86_64.rpm 
rpm -ivh grafana-5.3.0-1.x86_64.rpm 
yum install urw-fonts -y
rpm -ivh grafana-5.3.0-1.x86_64.rpm

2.vi /etc/grafana/grafana.ini,修改如下值
;#################################### Dashboard JSON files ##########################
[dashboards.json]
;enabled = true
;path = /var/lib/grafana/dashboards

3.grafana-dashboards下载地址https://github.com/percona/grafana-dashboards
将grafana-dashboards/dashboards目录下文件上传到/var/lib/grafana/dashboards

4.systemctl daemon-reload
service grafana-server start

5.访问地址http://IP:3000
http://120.27.159.108:3000

 

三.创建单独的域名空间

1.vi namespace.yaml

apiVersion: v1

kind: Namespace

metadata:

   name: monitor

2.kubectl create -f namespace.yaml

 

四.安装配置node-exporter

1.vi node-exporter.yaml

apiVersion: apps/v1

kind: DaemonSet

metadata:

  name: node-exporter

  namespace: monitor

  labels:

    app: node-exporter

spec:

  selector:

    matchLabels:

      app: node-exporter

  template:

    metadata:

      labels:

        app: node-exporter

    spec:

      tolerations:

      - effect: NoSchedule

        key: node-role.kubernetes.io/master

      containers:

      - name: node-exporter

        image: mirrorgooglecontainers/node-exporter:v0.15.0

        imagePullPolicy: IfNotPresent

        resources:

          limits:

            memory: 200Mi

          requests:

            cpu: 100m

            memory: 200Mi

        ports:

        - name: node-exporter

          containerPort: 9100

          hostPort: 9100

      hostNetwork: true

      hostPID: true

      restartPolicy: Always

---

apiVersion: v1

kind: Service

metadata:

  annotations:

    prometheus.io/scrape: 'true'

  name: node-exporter

  namespace: monitor

  labels:

    app: node-exporter

spec:

  ports:

    - name: node-exporter

      port: 9100

      targetPort: 9100

      protocol: TCP

  selector:

    app: node-exporter

  type: ClusterIP

 

2.kubectl create -f node-exporter.yaml

 

五.kube-state-metrics组件的安装和调试

项目地址: https://github.com/kubernetes/kube-state-metrics

组件kube-state-metrics是一个简单的服务,用来监听Kubernetes API服务并且生成关于对象状态的Metrics。它不关注Kubernetes各个组件的状态,而是关注内部的,比如 Deployments, Pods的状态。

1.vi kube-state-metrics.yaml

apiVersion: v1

kind: ServiceAccount

metadata:

  name: kube-state-metrics

  namespace: monitor

---

apiVersion: rbac.authorization.k8s.io/v1

kind: ClusterRole

metadata:

  name: kube-state-metrics

rules:

- apiGroups: [""]

  resources: ["nodes", "pods", "services", "resourcequotas", "replicationcontrollers", "limitranges", "persistentvolumeclaims", "persistentvolumes", "namespaces", "endpoints"]

  verbs: ["list", "watch"]

- apiGroups: ["extensions"]

  resources: ["daemonsets", "deployments", "replicasets"]

  verbs: ["list", "watch"]

- apiGroups: ["apps"]

  resources: ["statefulsets"]

  verbs: ["list", "watch"]

- apiGroups: ["batch"]

  resources: ["cronjobs", "jobs"]

  verbs: ["list", "watch"]

- apiGroups: ["autoscaling"]

  resources: ["horizontalpodautoscalers"]

  verbs: ["list", "watch"]

---

apiVersion: rbac.authorization.k8s.io/v1

kind: ClusterRoleBinding

metadata:

  name: kube-state-metrics

roleRef:

  apiGroup: rbac.authorization.k8s.io

  kind: ClusterRole

  name: kube-state-metrics

subjects:

- kind: ServiceAccount

  name: kube-state-metrics

  namespace: monitor

---

apiVersion: apps/v1

kind: Deployment

metadata:

  name: kube-state-metrics

  namespace: monitor

spec:

  selector:

    matchLabels:

      app: kube-state-metrics

  replicas: 1

  template:

    metadata:

      labels:

        app: kube-state-metrics

    spec:

      serviceAccountName: kube-state-metrics

      restartPolicy: Always

      containers:

      - name: kube-state-metrics

        image: mirrorgooglecontainers/kube-state-metrics:v1.2.0

        imagePullPolicy: IfNotPresent

        ports:

        - name: http-metrics

          containerPort: 8080

        - name: telemetry

          containerPort: 8081

        readinessProbe:

          httpGet:

            path: /healthz

            port: 8080

          initialDelaySeconds: 5

          timeoutSeconds: 5

      - name: addon-resizer

        image: mirrorgooglecontainers/addon-resizer:1.7

        imagePullPolicy: IfNotPresent

        resources:

          limits:

            cpu: 100m

            memory: 30Mi

          requests:

            cpu: 100m

            memory: 30Mi

        env:

          - name: MY_POD_NAME

            valueFrom:

              fieldRef:

                fieldPath: metadata.name

          - name: MY_POD_NAMESPACE

            valueFrom:

              fieldRef:

                fieldPath: metadata.namespace

        command:

          - /pod_nanny

          - --container=kube-state-metrics

          - --cpu=100m

          - --extra-cpu=1m

          - --memory=100Mi

          - --extra-memory=2Mi

          - --threshold=5

          - --deployment=kube-state-metrics

---

apiVersion: v1

kind: Service

metadata:

  name: kube-state-metrics

  namespace: monitor

  labels:

    app: kube-state-metrics

  annotations:

    prometheus.io/scrape: 'true'

spec:

  ports:

  - name: http-metrics

    port: 8080

    targetPort: 8080

    nodePort: 8480

    protocol: TCP

  - name: telemetry

    port: 8081

    targetPort: 8081

    nodePort: 8481

    protocol: TCP

  selector:

    app: kube-state-metrics

  type: NodePort

 

2.kubectl create -f kube-state-metrics.yaml

3.检查启动状况 
[root@iZbp1at8fph52evh70atb1Z prometheus-2.5.0-rc.2.linux-amd64]# kubectl get deployment -n monitor
NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE
kube-state-metrics 1 1 1 0 1h

[root@iZbp1at8fph52evh70atb1Z prometheus-2.5.0-rc.2.linux-amd64]#  kubectl get svc -n monitor

NAME                 TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)                         AGE

kube-state-metrics   NodePort    10.254.167.82   <none>        8080:30480/TCP,8081:30481/TCP   2m36s

node-exporter        ClusterIP   10.254.99.94    <none>        9100/TCP                        3m33s


[root@iZbp1at8fph52evh70atb1Z prometheus-2.5.0-rc.2.linux-amd64]# kubectl get serviceaccount -n monitor
NAME SECRETS AGE
default 1 1h
kube-state-metrics 1 1h
prometheus-bearer-token 1 1h

[root@iZbp1at8fph52evh70atb1Z prometheus-2.5.0-rc.2.linux-amd64]# ss -lntup|grep -E "30480|30481"

tcp    LISTEN     0      128      :::30480                :::*                   users:(("kube-proxy",pid=25557,fd=16))

tcp    LISTEN     0      128      :::30481                :::*                   users:(("kube-proxy",pid=25557,fd=17))

 

六.给Prometheus服务配置Bearer Token

参考链接: https://github.com/prometheus/prometheus/blob/master/documentation/examples/rbac-setup.yml

1.vi prometheus-bearer-token.yaml

apiVersion: v1

kind: ServiceAccount

metadata:

  name: prometheus-bearer-token

  namespace: monitor

---

apiVersion: rbac.authorization.k8s.io/v1

kind: ClusterRole

metadata:

  name: prometheus-bearer-token

rules:

- apiGroups: [""]

  resources: ["nodes", "nodes/proxy", "services", "endpoints", "pods"]

  verbs: ["get", "list", "watch"]

- apiGroups: ["extensions"]

  resources: ["ingresses"]

  verbs: ["get", "list", "watch"]

- nonResourceURLs: ["/metrics"]

  verbs: ["get"]

---

apiVersion: rbac.authorization.k8s.io/v1

kind: ClusterRoleBinding

metadata:

  name: prometheus-bearer-token

roleRef:

  apiGroup: rbac.authorization.k8s.io

  kind: ClusterRole

  name: prometheus-bearer-token

subjects:

- kind: ServiceAccount

  name: prometheus-bearer-token

  namespace: monitor


2.建立token
kubectl create -f prometheus-bearer-token.yaml

[root@iZbp1at8fph52evh70atb1Z prometheus-2.5.0-rc.2.linux-amd64]# kubectl get serviceaccount prometheus-bearer-token -n monitor -o yaml

apiVersion: v1

kind: ServiceAccount

metadata:

  creationTimestamp: 2018-11-16T07:47:50Z

  name: prometheus-bearer-token

  namespace: monitor

  resourceVersion: "101687"

  selfLink: /api/v1/namespaces/monitor/serviceaccounts/prometheus-bearer-token

  uid: eb17efe4-e973-11e8-add5-00163e0e5e8f

secrets:

- name: prometheus-bearer-token-token-vm7k9



3.kubectl get secret prometheus-bearer-token-token-vm7k9 -n monitor -o yaml

4.TOKEN=$(kubectl get secret prometheus-bearer-token-token-vm7k9 -n monitor -o yaml |grep "token:" | awk '{print $2}' | base64 -d)

[root@iZbp1at8fph52evh70atb1Z prometheus-2.5.0-rc.2.linux-amd64]# echo $TOKEN

eyJhbGciOiJSUzI1NiIsImtpZCI6IiJ9.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJtb25pdG9yIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZWNyZXQubmFtZSI6InByb21ldGhldXMtYmVhcmVyLXRva2VuLXRva2VuLXZtN2s5Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6InByb21ldGhldXMtYmVhcmVyLXRva2VuIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQudWlkIjoiZWIxN2VmZTQtZTk3My0xMWU4LWFkZDUtMDAxNjNlMGU1ZThmIiwic3ViIjoic3lzdGVtOnNlcnZpY2VhY2NvdW50Om1vbml0b3I6cHJvbWV0aGV1cy1iZWFyZXItdG9rZW4ifQ.ZOdzJxfv_2e-SSSjrLkyENn200P-dTKIAHTVr8vHJYLbPIUy44_CrLx0VIZwWT93HkHCNUpX-YBuHM5itt15tIBK8xdAL8tpI2LKie-7zKHocXPMoq9SDXNpyVNTPirhUQXecF_bvBuU-6b0yz91_x0TmGQgvs-egys0STpVw47HyPqEj1h1pab_ODOcXpV8mhmNIvAzw9fCWLFmCFi42DMjj1ZTxpcnfqZZraREUkYvNHaRIR1bszt1n2KOQQdVBhPc56mFDAeeFYagqLcjZ_Gvo28u7jOKFO8PCIRzydJHriaGN5Ji6y41TQ_zrHwHjSm06jtEYUdn3VFbodWy9Q



6.kubectl get secret prometheus-bearer-token-token-vm7k9 -n monitor -o yaml|grep "ca.crt"|awk '{print $2}' | base64 -d > /root/prometheus/ca.crt

7.测试token与crt
curl -H "Authorization: Bearer eyJhbGciOiJSUzI1NiIsImtpZCI6IiJ9.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJtb25pdG9yIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZWNyZXQubmFtZSI6InByb21ldGhldXMtYmVhcmVyLXRva2VuLXRva2VuLXZtN2s5Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6InByb21ldGhldXMtYmVhcmVyLXRva2VuIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQudWlkIjoiZWIxN2VmZTQtZTk3My0xMWU4LWFkZDUtMDAxNjNlMGU1ZThmIiwic3ViIjoic3lzdGVtOnNlcnZpY2VhY2NvdW50Om1vbml0b3I6cHJvbWV0aGV1cy1iZWFyZXItdG9rZW4ifQ.ZOdzJxfv_2e-SSSjrLkyENn200P-dTKIAHTVr8vHJYLbPIUy44_CrLx0VIZwWT93HkHCNUpX-YBuHM5itt15tIBK8xdAL8tpI2LKie-7zKHocXPMoq9SDXNpyVNTPirhUQXecF_bvBuU-6b0yz91_x0TmGQgvs-egys0STpVw47HyPqEj1h1pab_ODOcXpV8mhmNIvAzw9fCWLFmCFi42DMjj1ZTxpcnfqZZraREUkYvNHaRIR1bszt1n2KOQQdVBhPc56mFDAeeFYagqLcjZ_Gvo28u7jOKFO8PCIRzydJHriaGN5Ji6y41TQ_zrHwHjSm06jtEYUdn3VFbodWy9Q" --cacert /root/prometheus/ca.crt https://10.47.92.186:6443/api/v1/pods

curl -H "Authorization: Bearer eyJhbGciOiJSUzI1NiIsImtpZCI6IiJ9.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJtb25pdG9yIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZWNyZXQubmFtZSI6InByb21ldGhldXMtYmVhcmVyLXRva2VuLXRva2VuLXZtN2s5Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6InByb21ldGhldXMtYmVhcmVyLXRva2VuIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQudWlkIjoiZWIxN2VmZTQtZTk3My0xMWU4LWFkZDUtMDAxNjNlMGU1ZThmIiwic3ViIjoic3lzdGVtOnNlcnZpY2VhY2NvdW50Om1vbml0b3I6cHJvbWV0aGV1cy1iZWFyZXItdG9rZW4ifQ.ZOdzJxfv_2e-SSSjrLkyENn200P-dTKIAHTVr8vHJYLbPIUy44_CrLx0VIZwWT93HkHCNUpX-YBuHM5itt15tIBK8xdAL8tpI2LKie-7zKHocXPMoq9SDXNpyVNTPirhUQXecF_bvBuU-6b0yz91_x0TmGQgvs-egys0STpVw47HyPqEj1h1pab_ODOcXpV8mhmNIvAzw9fCWLFmCFi42DMjj1ZTxpcnfqZZraREUkYvNHaRIR1bszt1n2KOQQdVBhPc56mFDAeeFYagqLcjZ_Gvo28u7jOKFO8PCIRzydJHriaGN5Ji6y41TQ_zrHwHjSm06jtEYUdn3VFbodWy9Q" --cacert /root/prometheus/ca.crt https://10.47.92.186:6443/api/v1/services?limit=500&resourceVersion=0

 

七.安装prometheus

1.cd /app
tar xvf prometheus-2.5.0-rc.2.linux-amd64.tar.gz

2.vi /app/rometheus-2.5.0-rc.2.linux-amd64/prometheus.yml (https://10.47.92.186:6443为k8s master的访问地址)

# cat prometheus.yml

global:

  scrape_interval:     15s

  evaluation_interval: 30s

  scrape_timeout:      10s

  external_labels:

    monitor: 'prometheus-server'

 

rule_files:

  - 'prometheus.rules.yml'

 

alerting:

  alertmanagers:

    - timeout:    10s

    - static_configs:

      - targets:

        - 'localhost:9093'

 

scrape_configs:

- job_name: 'kubernetes-apiservers'   # kube-apiserver 的metric

  kubernetes_sd_configs:

  - role: endpoints

    api_server: https://10.47.92.186:6443

    tls_config:

      ca_file: /root/prometheus/ca.crt

    bearer_token: "eyJhbGciOiJSUzI1NiIsImtpZCI6IiJ9.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJtb25pdG9yIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZWNyZXQubmFtZSI6InByb21ldGhldXMtYmVhcmVyLXRva2VuLXRva2VuLXZtN2s5Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6InByb21ldGhldXMtYmVhcmVyLXRva2VuIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQudWlkIjoiZWIxN2VmZTQtZTk3My0xMWU4LWFkZDUtMDAxNjNlMGU1ZThmIiwic3ViIjoic3lzdGVtOnNlcnZpY2VhY2NvdW50Om1vbml0b3I6cHJvbWV0aGV1cy1iZWFyZXItdG9rZW4ifQ.ZOdzJxfv_2e-SSSjrLkyENn200P-dTKIAHTVr8vHJYLbPIUy44_CrLx0VIZwWT93HkHCNUpX-YBuHM5itt15tIBK8xdAL8tpI2LKie-7zKHocXPMoq9SDXNpyVNTPirhUQXecF_bvBuU-6b0yz91_x0TmGQgvs-egys0STpVw47HyPqEj1h1pab_ODOcXpV8mhmNIvAzw9fCWLFmCFi42DMjj1ZTxpcnfqZZraREUkYvNHaRIR1bszt1n2KOQQdVBhPc56mFDAeeFYagqLcjZ_Gvo28u7jOKFO8PCIRzydJHriaGN5Ji6y41TQ_zrHwHjSm06jtEYUdn3VFbodWy9Q"

  scheme: https

  tls_config:

    ca_file: /root/prometheus/ca.crt

  bearer_token: "eyJhbGciOiJSUzI1NiIsImtpZCI6IiJ9.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJtb25pdG9yIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZWNyZXQubmFtZSI6InByb21ldGhldXMtYmVhcmVyLXRva2VuLXRva2VuLXZtN2s5Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6InByb21ldGhldXMtYmVhcmVyLXRva2VuIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQudWlkIjoiZWIxN2VmZTQtZTk3My0xMWU4LWFkZDUtMDAxNjNlMGU1ZThmIiwic3ViIjoic3lzdGVtOnNlcnZpY2VhY2NvdW50Om1vbml0b3I6cHJvbWV0aGV1cy1iZWFyZXItdG9rZW4ifQ.ZOdzJxfv_2e-SSSjrLkyENn200P-dTKIAHTVr8vHJYLbPIUy44_CrLx0VIZwWT93HkHCNUpX-YBuHM5itt15tIBK8xdAL8tpI2LKie-7zKHocXPMoq9SDXNpyVNTPirhUQXecF_bvBuU-6b0yz91_x0TmGQgvs-egys0STpVw47HyPqEj1h1pab_ODOcXpV8mhmNIvAzw9fCWLFmCFi42DMjj1ZTxpcnfqZZraREUkYvNHaRIR1bszt1n2KOQQdVBhPc56mFDAeeFYagqLcjZ_Gvo28u7jOKFO8PCIRzydJHriaGN5Ji6y41TQ_zrHwHjSm06jtEYUdn3VFbodWy9Q"

  relabel_configs:

  - source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name]

    action: keep

    regex: default;kubernetes;https

 

- job_name: 'kubernetes-service-endpoints'  # Kubernetes node-exporter 的数据  在文章最开始就配置好了

  kubernetes_sd_configs:

  - role: endpoints

    api_server: https://10.47.92.186:6443

    tls_config:

      ca_file: /root/prometheus/ca.crt

    bearer_token: "eyJhbGciOiJSUzI1NiIsImtpZCI6IiJ9.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJtb25pdG9yIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZWNyZXQubmFtZSI6InByb21ldGhldXMtYmVhcmVyLXRva2VuLXRva2VuLXZtN2s5Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6InByb21ldGhldXMtYmVhcmVyLXRva2VuIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQudWlkIjoiZWIxN2VmZTQtZTk3My0xMWU4LWFkZDUtMDAxNjNlMGU1ZThmIiwic3ViIjoic3lzdGVtOnNlcnZpY2VhY2NvdW50Om1vbml0b3I6cHJvbWV0aGV1cy1iZWFyZXItdG9rZW4ifQ.ZOdzJxfv_2e-SSSjrLkyENn200P-dTKIAHTVr8vHJYLbPIUy44_CrLx0VIZwWT93HkHCNUpX-YBuHM5itt15tIBK8xdAL8tpI2LKie-7zKHocXPMoq9SDXNpyVNTPirhUQXecF_bvBuU-6b0yz91_x0TmGQgvs-egys0STpVw47HyPqEj1h1pab_ODOcXpV8mhmNIvAzw9fCWLFmCFi42DMjj1ZTxpcnfqZZraREUkYvNHaRIR1bszt1n2KOQQdVBhPc56mFDAeeFYagqLcjZ_Gvo28u7jOKFO8PCIRzydJHriaGN5Ji6y41TQ_zrHwHjSm06jtEYUdn3VFbodWy9Q"

  relabel_configs:

  - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape]

    action: keep

    regex: true

  - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme]

    action: replace

    target_label: __scheme__

    regex: (https?)

  - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path]

    action: replace

    target_label: __metrics_path__

    regex: (.+)

  - source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port]

    action: replace

    target_label: __address__

    regex: ([^:]+)(?::\d+)?;(\d+)

    replacement: $1:$2

  - action: labelmap

    regex: __meta_kubernetes_service_label_(.+)

  - source_labels: [__meta_kubernetes_namespace]

    action: replace

    target_label: kubernetes_namespace

  - source_labels: [__meta_kubernetes_service_name]

    action: replace

    target_label: kubernetes_name

 

- job_name: 'kubernetes-nodes'  # kubelet metrics接口 在Kubernetes集群内部可以使用域名kubernetes.default.svc:443来访问

  scheme: https

  tls_config:

    ca_file: /root/prometheus/ca.crt

  bearer_token: "eyJhbGciOiJSUzI1NiIsImtpZCI6IiJ9.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJtb25pdG9yIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZWNyZXQubmFtZSI6InByb21ldGhldXMtYmVhcmVyLXRva2VuLXRva2VuLXZtN2s5Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6InByb21ldGhldXMtYmVhcmVyLXRva2VuIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQudWlkIjoiZWIxN2VmZTQtZTk3My0xMWU4LWFkZDUtMDAxNjNlMGU1ZThmIiwic3ViIjoic3lzdGVtOnNlcnZpY2VhY2NvdW50Om1vbml0b3I6cHJvbWV0aGV1cy1iZWFyZXItdG9rZW4ifQ.ZOdzJxfv_2e-SSSjrLkyENn200P-dTKIAHTVr8vHJYLbPIUy44_CrLx0VIZwWT93HkHCNUpX-YBuHM5itt15tIBK8xdAL8tpI2LKie-7zKHocXPMoq9SDXNpyVNTPirhUQXecF_bvBuU-6b0yz91_x0TmGQgvs-egys0STpVw47HyPqEj1h1pab_ODOcXpV8mhmNIvAzw9fCWLFmCFi42DMjj1ZTxpcnfqZZraREUkYvNHaRIR1bszt1n2KOQQdVBhPc56mFDAeeFYagqLcjZ_Gvo28u7jOKFO8PCIRzydJHriaGN5Ji6y41TQ_zrHwHjSm06jtEYUdn3VFbodWy9Q"

  kubernetes_sd_configs:

  - role: node

    api_server: https://10.47.92.186:6443

    tls_config:

      ca_file: /root/prometheus/ca.crt

    bearer_token: "eyJhbGciOiJSUzI1NiIsImtpZCI6IiJ9.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJtb25pdG9yIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZWNyZXQubmFtZSI6InByb21ldGhldXMtYmVhcmVyLXRva2VuLXRva2VuLXZtN2s5Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6InByb21ldGhldXMtYmVhcmVyLXRva2VuIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQudWlkIjoiZWIxN2VmZTQtZTk3My0xMWU4LWFkZDUtMDAxNjNlMGU1ZThmIiwic3ViIjoic3lzdGVtOnNlcnZpY2VhY2NvdW50Om1vbml0b3I6cHJvbWV0aGV1cy1iZWFyZXItdG9rZW4ifQ.ZOdzJxfv_2e-SSSjrLkyENn200P-dTKIAHTVr8vHJYLbPIUy44_CrLx0VIZwWT93HkHCNUpX-YBuHM5itt15tIBK8xdAL8tpI2LKie-7zKHocXPMoq9SDXNpyVNTPirhUQXecF_bvBuU-6b0yz91_x0TmGQgvs-egys0STpVw47HyPqEj1h1pab_ODOcXpV8mhmNIvAzw9fCWLFmCFi42DMjj1ZTxpcnfqZZraREUkYvNHaRIR1bszt1n2KOQQdVBhPc56mFDAeeFYagqLcjZ_Gvo28u7jOKFO8PCIRzydJHriaGN5Ji6y41TQ_zrHwHjSm06jtEYUdn3VFbodWy9Q"

  relabel_configs:

  - action: labelmap

    regex: __meta_kubernetes_node_label_(.+)

  - target_label: __address__

    #replacement: kubernetes.default.svc:443

    replacement: 10.47.92.186:6443

  - source_labels: [__meta_kubernetes_node_name]

    regex: (.+)

    target_label: __metrics_path__

    replacement: /api/v1/nodes/${1}/proxy/metrics

 

- job_name: 'kubernetes-cadvisor'  # cadvisor 接口  在Kubernetes集群内部可以使用域名kubernetes.default.svc:443访问

  scheme: https

  tls_config:

    ca_file: /root/prometheus/ca.crt

  bearer_token: "eyJhbGciOiJSUzI1NiIsImtpZCI6IiJ9.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJtb25pdG9yIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZWNyZXQubmFtZSI6InByb21ldGhldXMtYmVhcmVyLXRva2VuLXRva2VuLXZtN2s5Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6InByb21ldGhldXMtYmVhcmVyLXRva2VuIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQudWlkIjoiZWIxN2VmZTQtZTk3My0xMWU4LWFkZDUtMDAxNjNlMGU1ZThmIiwic3ViIjoic3lzdGVtOnNlcnZpY2VhY2NvdW50Om1vbml0b3I6cHJvbWV0aGV1cy1iZWFyZXItdG9rZW4ifQ.ZOdzJxfv_2e-SSSjrLkyENn200P-dTKIAHTVr8vHJYLbPIUy44_CrLx0VIZwWT93HkHCNUpX-YBuHM5itt15tIBK8xdAL8tpI2LKie-7zKHocXPMoq9SDXNpyVNTPirhUQXecF_bvBuU-6b0yz91_x0TmGQgvs-egys0STpVw47HyPqEj1h1pab_ODOcXpV8mhmNIvAzw9fCWLFmCFi42DMjj1ZTxpcnfqZZraREUkYvNHaRIR1bszt1n2KOQQdVBhPc56mFDAeeFYagqLcjZ_Gvo28u7jOKFO8PCIRzydJHriaGN5Ji6y41TQ_zrHwHjSm06jtEYUdn3VFbodWy9Q"

  kubernetes_sd_configs:

  - role: node

    api_server: https://10.47.92.186:6443

    tls_config:

      ca_file: /root/prometheus/ca.crt

    bearer_token: "eyJhbGciOiJSUzI1NiIsImtpZCI6IiJ9.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJtb25pdG9yIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZWNyZXQubmFtZSI6InByb21ldGhldXMtYmVhcmVyLXRva2VuLXRva2VuLXZtN2s5Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6InByb21ldGhldXMtYmVhcmVyLXRva2VuIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQudWlkIjoiZWIxN2VmZTQtZTk3My0xMWU4LWFkZDUtMDAxNjNlMGU1ZThmIiwic3ViIjoic3lzdGVtOnNlcnZpY2VhY2NvdW50Om1vbml0b3I6cHJvbWV0aGV1cy1iZWFyZXItdG9rZW4ifQ.ZOdzJxfv_2e-SSSjrLkyENn200P-dTKIAHTVr8vHJYLbPIUy44_CrLx0VIZwWT93HkHCNUpX-YBuHM5itt15tIBK8xdAL8tpI2LKie-7zKHocXPMoq9SDXNpyVNTPirhUQXecF_bvBuU-6b0yz91_x0TmGQgvs-egys0STpVw47HyPqEj1h1pab_ODOcXpV8mhmNIvAzw9fCWLFmCFi42DMjj1ZTxpcnfqZZraREUkYvNHaRIR1bszt1n2KOQQdVBhPc56mFDAeeFYagqLcjZ_Gvo28u7jOKFO8PCIRzydJHriaGN5Ji6y41TQ_zrHwHjSm06jtEYUdn3VFbodWy9Q"

  relabel_configs:

  - action: labelmap

    regex: __meta_kubernetes_node_label_(.+)

  - target_label: __address__

    #replacement: kubernetes.default.svc:443

    replacement: 10.47.92.186:6443

  - source_labels: [__meta_kubernetes_node_name]

    regex: (.+)

    target_label: __metrics_path__

    replacement: /api/v1/nodes/${1}/proxy/metrics/cadvisor



3.启动prometheus
nohup /app/prometheus-2.5.0-rc.2.linux-amd64/prometheus & > /dev/null &

4. 从下面地址下载kubernetes dashboard,并将dashboard的 json文件导入到grafana,

https://grafana.com/dashboards/315

https://grafana.com/dashboards

5.结合grafana显示的效果,访问http://10.47.92.186:3000

posted @ 2020-06-03 00:07  $world  阅读(562)  评论(0)    收藏  举报