Chart-->helm, tiller service

Chart-->Config-->Release

Config: values.yaml

 

  incubator

    https://kubernetes-charts-incubator.storage.googleapis.com

  EFK:

    E: elasticsearch

    L: logstash

    K: 

 

Filebeat, Fluentd, 

 

/var/log --> /var/log/containers/

 

 

master:

[root@master helm]# helm fetch stable/redis
[root@master helm]# helm repo list
NAME  	URL                                             
stable	https://kubernetes-charts.storage.googleapis.com
local 	http://127.0.0.1:8879/charts    
[root@master helm]# ll
总用量 56
-rw-r--r--. 1 root root 31909 9月   9 22:29 redis-10.5.7.tgz
-rw-r--r--. 1 root root   354 9月   5 22:28 tiller-rbac.yaml
-rwxr-xr-x. 1 root root 20419 9月   7 22:19 values.yaml
[root@master helm]# tar xf redis-10.5.7.tgz 
[root@master helm]# tree redis
redis
├── Chart.yaml
├── ci
│   ├── default-values.yaml
│   ├── dev-values.yaml
│   ├── extra-flags-values.yaml
│   ├── insecure-sentinel-values.yaml
│   ├── production-sentinel-values.yaml
│   ├── production-values.yaml
│   ├── redisgraph-module-values.yaml
│   └── redis-lib-values.yaml
├── README.md
├── templates
│   ├── configmap.yaml
│   ├── headless-svc.yaml
│   ├── health-configmap.yaml
│   ├── _helpers.tpl
│   ├── metrics-prometheus.yaml
│   ├── metrics-svc.yaml
│   ├── networkpolicy.yaml
│   ├── NOTES.txt
│   ├── prometheusrule.yaml
│   ├── psp.yaml
│   ├── redis-master-statefulset.yaml
│   ├── redis-master-svc.yaml
│   ├── redis-rolebinding.yaml
│   ├── redis-role.yaml
│   ├── redis-serviceaccount.yaml
│   ├── redis-slave-statefulset.yaml
│   ├── redis-slave-svc.yaml
│   ├── redis-with-sentinel-svc.yaml
│   └── secret.yaml
├── values-production.yaml
├── values.schema.json
└── values.yaml

2 directories, 32 files
[root@master helm]# helm create --help
[root@master helm]# helm create myapp
[root@master helm]# tree myapp/
myapp/
├── charts
├── Chart.yaml
├── templates
│   ├── deployment.yaml
│   ├── _helpers.tpl
│   ├── ingress.yaml
│   ├── NOTES.txt
│   └── service.yaml
└── values.yaml

2 directories, 7 files
[root@master helm]# cd myapp/
[root@master myapp]# ls
charts  Chart.yaml  templates  values.yaml
[root@master myapp]# vim Chart.yaml 
apiVersion: v1
appVersion: "1.0"
description: A Helm chart for Kubernetes myapp chart
name: myapp
version: 0.0.1
maintainer:
- name: smoke
  email: smoke@smoke.com
  url: http://www.smoke.com/
[root@master myapp]# ls charts/
[root@master myapp]# cd templates/
[root@master templates]# ls
deployment.yaml  _helpers.tpl  ingress.yaml  NOTES.txt  service.yaml
[root@master templates]# vim deployment.yaml 
apiVersion: apps/v1beta2
kind: Deployment
metadata:
  name: {{ template "myapp.fullname" . }}
  labels:
    app: {{ template "myapp.name" . }}
    chart: {{ template "myapp.chart" . }}
    release: {{ .Release.Name }}
    heritage: {{ .Release.Service }}
spec:
  replicas: {{ .Values.replicaCount }}
  selector:
    matchLabels:
      app: {{ template "myapp.name" . }}
      release: {{ .Release.Name }}
  template:
    metadata:
      labels:
        app: {{ template "myapp.name" . }}
        release: {{ .Release.Name }}
    spec:
      containers:
        - name: {{ .Chart.Name }}
          image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
          imagePullPolicy: {{ .Values.image.pullPolicy }}
          ports:
            - name: http
              containerPort: 80
              protocol: TCP
          livenessProbe:
            httpGet:
              path: /
              port: http
          readinessProbe:
            httpGet:
              path: /
              port: http
          resources:
{{ toYaml .Values.resources | indent 12 }}
    {{- with .Values.nodeSelector }}
      nodeSelector:
{{ toYaml . | indent 8 }}
    {{- end }}
    {{- with .Values.affinity }}
      affinity:
{{ toYaml . | indent 8 }}
    {{- end }}
    {{- with .Values.tolerations }}
      tolerations:
{{ toYaml . | indent 8 }}
    {{- end }}
[root@master templates]# vim service.yaml 
apiVersion: v1
kind: Service
metadata:
  name: {{ template "myapp.fullname" . }}
  labels:
    app: {{ template "myapp.name" . }}
    chart: {{ template "myapp.chart" . }}
    release: {{ .Release.Name }}
    heritage: {{ .Release.Service }}
spec:
  type: {{ .Values.service.type }}
  ports:
    - port: {{ .Values.service.port }}
      targetPort: http
      protocol: TCP
      name: http
  selector:
    app: {{ template "myapp.name" . }}
    release: {{ .Release.Name }}
[root@master templates]# vim ingress.yaml 
{{- if .Values.ingress.enabled -}}
{{- $fullName := include "myapp.fullname" . -}}
{{- $ingressPath := .Values.ingress.path -}}
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
  name: {{ $fullName }}
  labels:
    app: {{ template "myapp.name" . }}
    chart: {{ template "myapp.chart" . }}
    release: {{ .Release.Name }}
    heritage: {{ .Release.Service }}
{{- with .Values.ingress.annotations }}
  annotations:
{{ toYaml . | indent 4 }}
{{- end }}
spec:
{{- if .Values.ingress.tls }}
  tls:
  {{- range .Values.ingress.tls }}
    - hosts:
      {{- range .hosts }}
        - {{ . }}
      {{- end }}
      secretName: {{ .secretName }}
  {{- end }}
{{- end }}
  rules:
  {{- range .Values.ingress.hosts }}
    - host: {{ . }}
      http:
        paths:
          - path: {{ $ingressPath }}
            backend:
              serviceName: {{ $fullName }}
              servicePort: http
  {{- end }}
{{- end }}
[root@master templates]# kubectl get pods -n ingress-nginx
NAME                                       READY     STATUS    RESTARTS   AGE
default-http-backend-846b65fb5f-4489p      1/1       Running   0          33d
nginx-ingress-controller-d658896cd-krhh5   1/1       Running   1          84d
[root@master templates]# cd ..
[root@master myapp]# vim values.yaml 
# Default values for myapp.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.

replicaCount: 2

image:
  repository: ikubernetes/myapp:v1
  tag: v1
  pullPolicy: IfNotPresent

service:
  type: ClusterIP
  port: 80

ingress:
  enabled: false
  annotations: {}
    kubernetes.io/ingress.class: nginx
    kubernetes.io/tls-acme: "true"
  path: /
  hosts:
    - chart-example.local
  tls: []
  #  - secretName: chart-example-tls
  #    hosts:
  #      - chart-example.local
resources: {}
  # We usually recommend not to specify default resources and to leave this as a conscious
  # choice for the user. This also increases chances charts run on environments with little
  # resources, such as Minikube. If you do want to specify resources, uncomment the following
  # lines, adjust them as necessary, and remove the curly braces after 'resources:'.
  limits:
    cpu: 100m
    memory: 128Mi
  requests:
    cpu: 100m
    memory: 128Mi

nodeSelector: {}

tolerations: []

affinity: {}
[root@master myapp]# helm lint myapp    #语法检查
==> Skipping myapp
No chart found for linting (missing Chart.yaml)

Error: 0 chart(s) linted, 1 chart(s) failed
[root@master myapp]# helm lint ../myapp
==> Linting ../myapp
[INFO] Chart.yaml: icon is recommended
[ERROR] values.yaml: unable to parse YAML
	error converting YAML to JSON: yaml: line 18: did not find expected key

Error: 1 chart(s) linted, 1 chart(s) failed
[root@master myapp]# vim values.yaml 
# Default values for myapp.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.

replicaCount: 2

image:
  repository: ikubernetes/myapp:v1
  tag: v1
  pullPolicy: IfNotPresent

service:
  type: ClusterIP
  port: 80

ingress:
  enabled: false
  annotations:
    kubernetes.io/ingress.class: nginx
    kubernetes.io/tls-acme: "true"
  path: /
  hosts:
    - chart-example.local
  tls: []
  #  - secretName: chart-example-tls
  #    hosts:
  #      - chart-example.local

resources: 
  # We usually recommend not to specify default resources and to leave this as a conscious
  # choice for the user. This also increases chances charts run on environments with little
  # resources, such as Minikube. If you do want to specify resources, uncomment the following
  # lines, adjust them as necessary, and remove the curly braces after 'resources:'.
  limits:
    cpu: 100m
    memory: 128Mi
  requests:
    cpu: 100m
    memory: 128Mi

nodeSelector: {}

tolerations: []

affinity: {}
[root@master myapp]# helm lint ../myapp
==> Linting ../myapp
[INFO] Chart.yaml: icon is recommended

1 chart(s) linted, no failures
[root@master myapp]# helm package -h
[root@master myapp]# cd ..
[root@master helm]# helm package myapp/    #打包
[root@master helm]# ls
myapp  myapp-0.0.1.tgz  redis  redis-10.5.7.tgz  tiller-rbac.yaml  values.yaml
[root@master helm]# helm repo list
NAME  	URL                                             
stable	https://kubernetes-charts.storage.googleapis.com
local 	http://127.0.0.1:8879/charts      
[root@master helm]# ss -tnl
State      Recv-Q Send-Q                                                 Local Address:Port                                                                Peer Address:Port              
LISTEN     0      128                                                        127.0.0.1:38405                                                                          *:*                  
LISTEN     0      128                                                        127.0.0.1:10248                                                                          *:*                  
LISTEN     0      128                                                        127.0.0.1:10249                                                                          *:*                  
LISTEN     0      128                                                        127.0.0.1:10251                                                                          *:*                  
LISTEN     0      128                                                        127.0.0.1:2379                                                                           *:*                  
LISTEN     0      128                                                        127.0.0.1:10252                                                                          *:*                  
LISTEN     0      128                                                        127.0.0.1:2380                                                                           *:*                  
LISTEN     0      128                                                                *:22                                                                             *:*                  
LISTEN     0      100                                                        127.0.0.1:25                                                                             *:*                  
LISTEN     0      128                                                      172.20.0.70:10010                                                                          *:*                  
LISTEN     0      128                                                               :::30080                                                                         :::*                  
LISTEN     0      128                                                               :::32169                                                                         :::*                  
LISTEN     0      128                                                               :::30090                                                                         :::*                  
LISTEN     0      128                                                               :::10250                                                                         :::*                  
LISTEN     0      128                                                               :::9099                                                                          :::*                  
LISTEN     0      128                                                               :::30443                                                                         :::*                  
LISTEN     0      128                                                               :::6443                                                                          :::*                  
LISTEN     0      128                                                               :::9100                                                                          :::*                  
LISTEN     0      128                                                               :::10256                                                                         :::*                  
LISTEN     0      128                                                               :::31762                                                                         :::*                  
LISTEN     0      128                                                               :::22                                                                            :::*                  
LISTEN     0      128                                                               :::30807                                                                         :::*                  
LISTEN     0      100                                                              ::1:25                                                                            :::*     
[root@master helm]# helm serve    #简单的helm自带的仓库服务
Regenerating index. This may take a moment.
Now serving you on 127.0.0.1:8879
[root@master helm]# helm search myapp
NAME       	CHART VERSION	APP VERSION	DESCRIPTION                            
local/myapp	0.0.1        	1.0        	A Helm chart for Kubernetes myapp chart
[root@master helm]# cd myapp
[root@master myapp]# ls
charts  Chart.yaml  templates  values.yaml
[root@master myapp]# vim templates/NOTES.txt 
[root@master myapp]# helm install --name myapp3 local/myapp
NAME:   myapp3
LAST DEPLOYED: Sun Sep 13 21:42:09 2020
NAMESPACE: default
STATUS: DEPLOYED

RESOURCES:
==> v1/Service
NAME    TYPE       CLUSTER-IP     EXTERNAL-IP  PORT(S)  AGE
myapp3  ClusterIP  10.104.121.23  <none>       80/TCP   4s

==> v1beta2/Deployment
NAME    DESIRED  CURRENT  UP-TO-DATE  AVAILABLE  AGE
myapp3  2        0        0           0          3s

==> v1/Pod(related)
NAME                     READY  STATUS   RESTARTS  AGE
myapp3-5cd87bf587-g75bw  0/1    Pending  0         2s
myapp3-5cd87bf587-njdm7  0/1    Pending  0         2s


NOTES:
1. Get the application URL by running these commands:
  export POD_NAME=$(kubectl get pods --namespace default -l "app=myapp,release=myapp3" -o jsonpath="{.items[0].metadata.name}")
  echo "Visit http://127.0.0.1:8080 to use your application"
  kubectl port-forward $POD_NAME 8080:80
[root@master helm]# helm status myapp3
LAST DEPLOYED: Sun Sep 13 21:42:09 2020
NAMESPACE: default
STATUS: DEPLOYED

RESOURCES:
==> v1/Service
NAME    TYPE       CLUSTER-IP     EXTERNAL-IP  PORT(S)  AGE
myapp3  ClusterIP  10.104.121.23  <none>       80/TCP   2m

==> v1beta2/Deployment
NAME    DESIRED  CURRENT  UP-TO-DATE  AVAILABLE  AGE
myapp3  2        2        2           0          2m

==> v1/Pod(related)
NAME                     READY  STATUS            RESTARTS  AGE
myapp3-5cd87bf587-g75bw  0/1    InvalidImageName  0         2m
myapp3-5cd87bf587-njdm7  0/1    InvalidImageName  0         2m


NOTES:
1. Get the application URL by running these commands:
  export POD_NAME=$(kubectl get pods --namespace default -l "app=myapp,release=myapp3" -o jsonpath="{.items[0].metadata.name}")
  echo "Visit http://127.0.0.1:8080 to use your application"
  kubectl port-forward $POD_NAME 8080:80
[root@master helm]# helm delete --help
[root@master helm]# helm delete --purge myapp3
[root@master helm]# helm delete --purge myapp2
[root@master helm]# helm delete --purge myapp1
[root@master helm]# helm repo add --help
[root@master helm]# helm repo list
NAME  	URL                                             
stable	https://kubernetes-charts.storage.googleapis.com
local 	http://127.0.0.1:8879/charts       
[root@master helm]# helm repo add incubator https://kubernetes-charts-incubator.storage.googleapis.com
[root@master helm]# helm repo list
NAME     	URL                                                       
stable   	https://kubernetes-charts.storage.googleapis.com          
local    	http://127.0.0.1:8879/charts                              
incubator	https://kubernetes-charts-incubator.storage.googleapis.com

部署elasticsearch

下载地址:https://hub.kubeapps.com/charts/choerodon/elasticsearch

master:

[root@master helm]# helm repo add choerodon https://openchart.choerodon.com.cn/choerodon/c7n
[root@master helm]# helm fetch choerodon/elasticsearch
[root@master helm]# ls
elasticsearch-1.13.2.tgz  myapp  myapp-0.0.1.tgz  redis  redis-10.5.7.tgz  tiller-rbac.yaml  values.yaml
[root@master helm]# tar xf elasticsearch-1.13.2.tgz 
[root@master helm]# cd elasticsearch
[root@master elasticsearch]# ls
Chart.yaml  ci  OWNERS  README.md  templates  values.yaml
[root@master elasticsearch]# vim  values.yaml 
# Default values for elasticsearch.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
appVersion: "6.4.2"

## Define serviceAccount names for components. Defaults to component's fully qualified name.
##
serviceAccounts:
  client:
    create: true
    name:
  master:
    create: true
    name:
  data:
    create: true
    name:

## Specify if a Pod Security Policy for node-exporter must be created
## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/
##
podSecurityPolicy:
  enabled: false
  annotations: {}
    ## Specify pod annotations
    ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor
    ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp
    ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl
    ##
    # seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'
    # seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default'
    # apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'

image:
  repository: "registry.cn-hangzhou.aliyuncs.com/choerodon-tools/elasticsearch-oss"
  tag: "6.4.2"
  pullPolicy: "IfNotPresent"
  # If specified, use these secrets to access the image
  # pullSecrets:
  #   - registry-secret

initImage:
  repository: "registry.cn-hangzhou.aliyuncs.com/choerodon-tools/busyboxy"
  tag: "latest"
  pullPolicy: "Always"

cluster:
  name: "elasticsearch"
  # If you want X-Pack installed, switch to an image that includes it, enable this option and toggle the features you want
  # enabled in the environment variables outlined in the README
  xpackEnable: false
  # Some settings must be placed in a keystore, so they need to be mounted in from a secret.
  # Use this setting to specify the name of the secret
  # keystoreSecret: eskeystore
  config: {}
  # Custom parameters, as string, to be added to ES_JAVA_OPTS environment variable
  additionalJavaOpts: ""
  env:
    # IMPORTANT: https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#minimum_master_nodes
    # To prevent data loss, it is vital to configure the discovery.zen.minimum_master_nodes setting so that each master-eligible
    # node knows the minimum number of master-eligible nodes that must be visible in order to form a cluster.
    MINIMUM_MASTER_NODES: "2"

client:
  name: client
  replicas: 2
  serviceType: ClusterIP
  loadBalancerIP: {}
  loadBalancerSourceRanges: {}
## (dict) If specified, apply these annotations to the client service
#  serviceAnnotations:
#    example: client-svc-foo
  heapSize: "512m"
  antiAffinity: "soft"
  nodeAffinity: {}
  nodeSelector: {}
  tolerations: []
  resources:
    limits:
      cpu: "1"
      # memory: "1024Mi"
    requests:
      cpu: "25m"
      memory: "512Mi"
  priorityClassName: ""
  ## (dict) If specified, apply these annotations to each client Pod
  # podAnnotations:
  #   example: client-foo
  podDisruptionBudget:
    enabled: false
    minAvailable: 1
    # maxUnavailable: 1

logs:
  parser: docker

master:
  name: master
  exposeHttp: false
  replicas: 3
  heapSize: "512m"
  persistence:
    enabled: false
    accessMode: ReadWriteOnce
    name: data
    size: "4Gi"
    # storageClass: "ssd"
  antiAffinity: "soft"
  nodeAffinity: {}
  nodeSelector: {}
  tolerations: []
  resources:
    limits:
      cpu: "1"
      # memory: "1024Mi"
    requests:
      cpu: "25m"
      memory: "512Mi"
  priorityClassName: ""
  ## (dict) If specified, apply these annotations to each master Pod
  # podAnnotations:
  #   example: master-foo
  podDisruptionBudget:
    enabled: false
    minAvailable: 2  # Same as `cluster.env.MINIMUM_MASTER_NODES`
    # maxUnavailable: 1
  updateStrategy:
    type: OnDelete

data:
  name: data
  exposeHttp: false
  replicas: 2
  heapSize: "1536m"
  persistence:
    enabled: false
    accessMode: ReadWriteOnce
    name: data
    size: "30Gi"
    # storageClass: "ssd"
  terminationGracePeriodSeconds: 3600
  antiAffinity: "soft"
  nodeAffinity: {}
  nodeSelector: {}
  tolerations: []
  resources:
    limits:
      cpu: "1"
      # memory: "2048Mi"
    requests:
      cpu: "25m"
      memory: "1536Mi"
  priorityClassName: ""
  ## (dict) If specified, apply these annotations to each data Pod
  # podAnnotations:
  #   example: data-foo
  podDisruptionBudget:
    enabled: false
    # minAvailable: 1
    maxUnavailable: 1
  updateStrategy:
    type: OnDelete
  hooks:  # post-start and pre-stop hooks
    drain:  # drain the node before stopping it and re-integrate it into the cluster after start
      enabled: true

## Additional init containers
extraInitContainers: |
[root@master elasticsearch]# kubectl top nodes
NAME               CPU(cores)   CPU%      MEMORY(bytes)   MEMORY%   
master.smoke.com   253m         12%       1238Mi          71%       
node01.smoke.com   179m         8%        766Mi           44%       
node02.smoke.com   130m         6%        743Mi           43%      
[root@master elasticsearch]# kubectl create namespace logs
[root@master elasticsearch]# kubectl create namespace efk
[root@master elasticsearch]# helm install --name els1 --namespace=efk -f values.yaml choerodon/elasticsearch
[root@master elasticsearch]# kubectl get pods -n efk 
NAME                                  READY     STATUS    RESTARTS   AGE
els1-elasticsearch-59fffdbfdd-dvnv8   0/1       Running   1          27m
els1-elasticsearch-59fffdbfdd-h6kwb   0/1       Running   1          27m
els1-elasticsearch-data-0             1/1       Running   0          27m
els1-elasticsearch-data-1             1/1       Running   0          5m
els1-elasticsearch-master-0           1/1       Running   1          27m
els1-elasticsearch-master-1           1/1       Running   0          11m
els1-elasticsearch-master-2           1/1       Running   1          11m
[root@master elasticsearch]# helm list
NAME  	REVISION	UPDATED                 	STATUS  	CHART               	NAMESPACE
els1  	1       	Mon Sep 14 22:50:26 2020	DEPLOYED	elasticsearch-1.13.2	efk      
redis1	1       	Mon Sep  7 22:25:42 2020	DEPLOYED	redis-10.5.7        	default  
[root@master elasticsearch]# helm delete --purge redis1
[root@master elasticsearch]# helm list
NAME	REVISION	UPDATED                 	STATUS  	CHART               	NAMESPACE
els1	1       	Mon Sep 14 22:50:26 2020	DEPLOYED	elasticsearch-1.13.2	efk      
[root@master elasticsearch]# kubectl top nodes
NAME               CPU(cores)   CPU%      MEMORY(bytes)   MEMORY%   
master.smoke.com   273m         13%       1236Mi          71%       
node01.smoke.com   215m         10%       3016Mi          82%       
node02.smoke.com   210m         10%       2897Mi          78%       
[root@master elasticsearch]# helm status els1
els1-elasticsearch.efk.svc
[root@master elasticsearch]# kubectl run cirros-$RANDOM --rm -it --image=cirros -- /bin/sh
/ # nslookup els1-elasticsearch.efk.svc
Server:		10.96.0.10
Address:	10.96.0.10:53

Name:        els1-elasticsearch.efk.svc
Address 1:  10.102.177.112
/ # curl els1-elasticsearch.efk.svc:9200
{
  "name" : "els1-elasticsearch-59fffdbfdd-45qjj",
  "cluster_name" : "elasticsearch",
  "cluster_uuid" : "_na_",
  "version" : {
    "number" : "6.4.2",
    "build_flavor" : "oss",
    "build_type" : "tar",
    "build_hash" : "04711c2",
    "build_date" : "2018-09-26T13:34:09.098244Z",
    "build_snapshot" : false,
    "lucene_version" : "7.4.0",
    "minimum_wire_compatibility_version" : "5.6.0",
    "minimum_index_compatibility_version" : "5.0.0"
  },
  "tagline" : "You Know, for Search"
}
/ # curl els1-elasticsearch.efk.svc:9200/_cat/
=^.^=
/_cat/allocation
/_cat/shards
/_cat/shards/{index}
/_cat/master
/_cat/nodes
/_cat/tasks
/_cat/indices
/_cat/indices/{index}
/_cat/segments
/_cat/segments/{index}
/_cat/count
/_cat/count/{index}
/_cat/recovery
/_cat/recovery/{index}
/_cat/health
/_cat/pending_tasks
/_cat/aliases
/_cat/aliases/{alias}
/_cat/thread_pool
/_cat/thread_pool/{thread_pools}
/_cat/plugins
/_cat/fielddata
/_cat/fielddata/{fields}
/_cat/nodeattrs
/_cat/repositories
/_cat/snapshots/{repository}
/_cat/templates
/ # curl els1-elasticsearch.efk.svc:9200/_cat/nodes
10.244.2.115 21 97 16 0.60 2.21 1.31 mi - els1-elasticsearch-master-2
10.244.2.116 22 97 14 0.60 2.21 1.31 mi * els1-elasticsearch-master-1
10.244.1.169 21 97 10 0.67 3.19 1.87 i  - els1-elasticsearch-59fffdbfdd-45qjj
10.244.2.117 21 97 10 0.60 2.21 1.31 i  - els1-elasticsearch-59fffdbfdd-lx4g2
10.244.1.171 20 97 16 0.67 3.19 1.87 mi - els1-elasticsearch-master-0
/ # curl els1-elasticsearch.efk.svc:9200/_cat/indices

部署fluentd-elasticsearch

下载地址:https://hub.kubeapps.com/charts/stakater/fluentd-elasticsearch

master:

[root@master elasticsearch]# cd ..
[root@master helm]# helm repo add stakater https://stakater.github.io/stakater-charts
[root@master helm]# helm fetch stakater/fluentd-elasticsearch --version 1.5.0
[root@master helm]# tar xf fluentd-elasticsearch-1.5.0.tgz
[root@master helm]# cd fluentd-elasticsearch
[root@master fluentd-elasticsearch]# vim values.yaml 
image:
  repository: gcr.io/google-containers/fluentd-elasticsearch
## Specify an imagePullPolicy (Required)
## It's recommended to change this to 'Always' if the image tag is 'latest'
## ref: http://kubernetes.io/docs/user-guide/images/#updating-images
  tag: v2.3.1
  pullPolicy: IfNotPresent

## Configure resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
resources: {}
  # limits:
  #   cpu: 100m
  #   memory: 500Mi
  # requests:
  #   cpu: 100m
  #   memory: 200Mi

elasticsearch:
  host: 'els1-elasticsearch.efk.svc'
  port: 9200
  buffer_chunk_limit: 2M
  buffer_queue_limit: 8
  logstash_prefix: 'logstash'

rbac:
  create: true

serviceAccount:
  # Specifies whether a ServiceAccount should be created
  create: true
  # The name of the ServiceAccount to use.
  # If not set and create is true, a name is generated using the fullname template
  name:

## Specify if a Pod Security Policy for node-exporter must be created
## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/
##
podSecurityPolicy:
  enabled: false
  annotations: {}
    ## Specify pod annotations
    ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor
    ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp
    ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl
    ##
    # seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'
    # seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default'
    # apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'

livenessProbe:
  enabled: true

annotations:
  prometheus.io/scrape: "true"
  prometheus.io/port: "24231"

tolerations:
  - key: node-role.kubernetes.io/master
    operator: Exists
    effect: NoSchedule

nodeSelector: {}

service:
  type: ClusterIP
  ports:
    - name: "monitor-agent"
      port: 24231

configMaps:
  system.conf: |-
    <system>
      root_dir /tmp/fluentd-buffers/
    </system>
  containers.input.conf: |-
    # This configuration file for Fluentd / td-agent is used
    # to watch changes to Docker log files. The kubelet creates symlinks that
    # capture the pod name, namespace, container name & Docker container ID
    # to the docker logs for pods in the /var/log/containers directory on the host.
    # If running this fluentd configuration in a Docker container, the /var/log
    # directory should be mounted in the container.
    #
    # These logs are then submitted to Elasticsearch which assumes the
    # installation of the fluent-plugin-elasticsearch & the
    # fluent-plugin-kubernetes_metadata_filter plugins.
    # See https://github.com/uken/fluent-plugin-elasticsearch &
    # https://github.com/fabric8io/fluent-plugin-kubernetes_metadata_filter for
    # more information about the plugins.
    #
    # Example
    # =======
    # A line in the Docker log file might look like this JSON:
    #
    # {"log":"2014/09/25 21:15:03 Got request with path wombat\n",
    #  "stream":"stderr",
    #   "time":"2014-09-25T21:15:03.499185026Z"}
    #
    # The time_format specification below makes sure we properly
    # parse the time format produced by Docker. This will be
    # submitted to Elasticsearch and should appear like:
    # $ curl 'http://elasticsearch-logging:9200/_search?pretty'
    # ...
    # {
    #      "_index" : "logstash-2014.09.25",
    #      "_type" : "fluentd",
    #      "_id" : "VBrbor2QTuGpsQyTCdfzqA",
    #      "_score" : 1.0,
    #      "_source":{"log":"2014/09/25 22:45:50 Got request with path wombat\n",
    #                 "stream":"stderr","tag":"docker.container.all",
    #                 "@timestamp":"2014-09-25T22:45:50+00:00"}
    #    },
    # ...
    #
    # The Kubernetes fluentd plugin is used to write the Kubernetes metadata to the log
    # record & add labels to the log record if properly configured. This enables users
    # to filter & search logs on any metadata.
    # For example a Docker container's logs might be in the directory:
    #
    #  /var/lib/docker/containers/997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b
    #
    # and in the file:
    #
    #  997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b-json.log
    #
    # where 997599971ee6... is the Docker ID of the running container.
    # The Kubernetes kubelet makes a symbolic link to this file on the host machine
    # in the /var/log/containers directory which includes the pod name and the Kubernetes
    # container name:
    #
    #    synthetic-logger-0.25lps-pod_default_synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log
    #    ->
    #    /var/lib/docker/containers/997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b/997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b-json.log
    #
    # The /var/log directory on the host is mapped to the /var/log directory in the container
    # running this instance of Fluentd and we end up collecting the file:
    #
    #   /var/log/containers/synthetic-logger-0.25lps-pod_default_synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log
    #
    # This results in the tag:
    #
    #  var.log.containers.synthetic-logger-0.25lps-pod_default_synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log
    #
    # The Kubernetes fluentd plugin is used to extract the namespace, pod name & container name
    # which are added to the log message as a kubernetes field object & the Docker container ID
    # is also added under the docker field object.
    # The final tag is:
    #
    #   kubernetes.var.log.containers.synthetic-logger-0.25lps-pod_default_synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log
    #
    # And the final log record look like:
    #
    # {
    #   "log":"2014/09/25 21:15:03 Got request with path wombat\n",
    #   "stream":"stderr",
    #   "time":"2014-09-25T21:15:03.499185026Z",
    #   "kubernetes": {
    #     "namespace": "default",
    #     "pod_name": "synthetic-logger-0.25lps-pod",
    #     "container_name": "synth-lgr"
    #   },
    #   "docker": {
    #     "container_id": "997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b"
    #   }
    # }
    #
    # This makes it easier for users to search for logs by pod name or by
    # the name of the Kubernetes container regardless of how many times the
    # Kubernetes pod has been restarted (resulting in a several Docker container IDs).
    # Json Log Example:
    # {"log":"[info:2016-02-16T16:04:05.930-08:00] Some log text here\n","stream":"stdout","time":"2016-02-17T00:04:05.931087621Z"}
    # CRI Log Example:
    # 2016-02-17T00:04:05.931087621Z stdout F [info:2016-02-16T16:04:05.930-08:00] Some log text here
    <source>
      @id fluentd-containers.log
      @type tail
      path /var/log/containers/*.log
      pos_file /var/log/fluentd-containers.log.pos
      time_format %Y-%m-%dT%H:%M:%S.%NZ
      tag raw.kubernetes.*
      format json
      read_from_head true
    </source>
    # Detect exceptions in the log output and forward them as one log entry.
    <match raw.kubernetes.**>
      @id raw.kubernetes
      @type detect_exceptions
      remove_tag_prefix raw
      message log
      stream stream
      multiline_flush_interval 5
      max_bytes 500000
      max_lines 1000
    </match>
  system.input.conf: |-
    # Example:
    # 2015-12-21 23:17:22,066 [salt.state       ][INFO    ] Completed state [net.ipv4.ip_forward] at time 23:17:22.066081
    <source>
      @id minion
      @type tail
      format /^(?<time>[^ ]* [^ ,]*)[^\[]*\[[^\]]*\]\[(?<severity>[^ \]]*) *\] (?<message>.*)$/
      time_format %Y-%m-%d %H:%M:%S
      path /var/log/salt/minion
      pos_file /var/log/salt.pos
      tag salt
    </source>
    # Example:
    # Dec 21 23:17:22 gke-foo-1-1-4b5cbd14-node-4eoj startupscript: Finished running startup script /var/run/google.startup.script
    <source>
      @id startupscript.log
      @type tail
      format syslog
      path /var/log/startupscript.log
      pos_file /var/log/startupscript.log.pos
      tag startupscript
    </source>
    # Examples:
    # time="2016-02-04T06:51:03.053580605Z" level=info msg="GET /containers/json"
    # time="2016-02-04T07:53:57.505612354Z" level=error msg="HTTP Error" err="No such image: -f" statusCode=404
    <source>
      @id docker.log
      @type tail
      format /^time="(?<time>[^)]*)" level=(?<severity>[^ ]*) msg="(?<message>[^"]*)"( err="(?<error>[^"]*)")?( statusCode=($<status_code>\d+))?/
      path /var/log/docker.log
      pos_file /var/log/docker.log.pos
      tag docker
    </source>
    # Example:
    # 2016/02/04 06:52:38 filePurge: successfully removed file /var/etcd/data/member/wal/00000000000006d0-00000000010a23d1.wal
    <source>
      @id etcd.log
      @type tail
      # Not parsing this, because it doesn't have anything particularly useful to
      # parse out of it (like severities).
      format none
      path /var/log/etcd.log
      pos_file /var/log/etcd.log.pos
      tag etcd
    </source>
    # Multi-line parsing is required for all the kube logs because very large log
    # statements, such as those that include entire object bodies, get split into
    # multiple lines by glog.
    # Example:
    # I0204 07:32:30.020537    3368 server.go:1048] POST /stats/container/: (13.972191ms) 200 [[Go-http-client/1.1] 10.244.1.3:40537]
    <source>
      @id kubelet.log
      @type tail
      format multiline
      multiline_flush_interval 5s
      format_firstline /^\w\d{4}/
      format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
      time_format %m%d %H:%M:%S.%N
      path /var/log/kubelet.log
      pos_file /var/log/kubelet.log.pos
      tag kubelet
    </source>
    # Example:
    # I1118 21:26:53.975789       6 proxier.go:1096] Port "nodePort for kube-system/default-http-backend:http" (:31429/tcp) was open before and is still needed
    <source>
      @id kube-proxy.log
      @type tail
      format multiline
      multiline_flush_interval 5s
      format_firstline /^\w\d{4}/
      format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
      time_format %m%d %H:%M:%S.%N
      path /var/log/kube-proxy.log
      pos_file /var/log/kube-proxy.log.pos
      tag kube-proxy
    </source>
    # Example:
    # I0204 07:00:19.604280       5 handlers.go:131] GET /api/v1/nodes: (1.624207ms) 200 [[kube-controller-manager/v1.1.3 (linux/amd64) kubernetes/6a81b50] 127.0.0.1:38266]
    <source>
      @id kube-apiserver.log
      @type tail
      format multiline
      multiline_flush_interval 5s
      format_firstline /^\w\d{4}/
      format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
      time_format %m%d %H:%M:%S.%N
      path /var/log/kube-apiserver.log
      pos_file /var/log/kube-apiserver.log.pos
      tag kube-apiserver
    </source>
    # Example:
    # I0204 06:55:31.872680       5 servicecontroller.go:277] LB already exists and doesn't need update for service kube-system/kube-ui
    <source>
      @id kube-controller-manager.log
      @type tail
      format multiline
      multiline_flush_interval 5s
      format_firstline /^\w\d{4}/
      format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
      time_format %m%d %H:%M:%S.%N
      path /var/log/kube-controller-manager.log
      pos_file /var/log/kube-controller-manager.log.pos
      tag kube-controller-manager
    </source>
    # Example:
    # W0204 06:49:18.239674       7 reflector.go:245] pkg/scheduler/factory/factory.go:193: watch of *api.Service ended with: 401: The event in requested index is outdated and cleared (the requested history has been
 cleared [2578313/2577886]) [2579312]
    <source>
      @id kube-scheduler.log
      @type tail
      format multiline
      multiline_flush_interval 5s
      format_firstline /^\w\d{4}/
      format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
      time_format %m%d %H:%M:%S.%N
      path /var/log/kube-scheduler.log
      pos_file /var/log/kube-scheduler.log.pos
      tag kube-scheduler
    </source>
    # Example:
    # I1104 10:36:20.242766       5 rescheduler.go:73] Running Rescheduler
    <source>
      @id rescheduler.log
      @type tail
      format multiline
      multiline_flush_interval 5s
      format_firstline /^\w\d{4}/
      format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
      time_format %m%d %H:%M:%S.%N
      path /var/log/rescheduler.log
      pos_file /var/log/rescheduler.log.pos
      tag rescheduler
    </source>
    # Example:
    # I0603 15:31:05.793605       6 cluster_manager.go:230] Reading config from path /etc/gce.conf
    <source>
      @id glbc.log
      @type tail
      format multiline
      multiline_flush_interval 5s
      format_firstline /^\w\d{4}/
      format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
      time_format %m%d %H:%M:%S.%N
      path /var/log/glbc.log
      pos_file /var/log/glbc.log.pos
      tag glbc
    </source>
    # Example:
    # I0603 15:31:05.793605       6 cluster_manager.go:230] Reading config from path /etc/gce.conf
    <source>
      @id cluster-autoscaler.log
      @type tail
      format multiline
      multiline_flush_interval 5s
      format_firstline /^\w\d{4}/
      format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
      time_format %m%d %H:%M:%S.%N
      path /var/log/cluster-autoscaler.log
      pos_file /var/log/cluster-autoscaler.log.pos
      tag cluster-autoscaler
    </source>
    # Logs from systemd-journal for interesting services.
    <source>
      @id journald-docker
      @type systemd
      matches [{ "_SYSTEMD_UNIT": "docker.service" }]
      <storage>
        @type local
        persistent true
        path /var/log/journald-docker.pos
      </storage>
      read_from_head true
      tag docker
    </source>
    <source>
      @id journald-kubelet
      @type systemd
      matches [{ "_SYSTEMD_UNIT": "kubelet.service" }]
      <storage>
        @type local
        persistent true
        path /var/log/journald-kubelet.pos
      </storage>
      read_from_head true
      tag kubelet
    </source>
    <source>
      @id journald-node-problem-detector
      @type systemd
      matches [{ "_SYSTEMD_UNIT": "node-problem-detector.service" }]
      <storage>
        @type local
        persistent true
        path /var/log/journald-node-problem-detector.pos
      </storage>
      read_from_head true
      tag node-problem-detector
    </source>
  forward.input.conf: |-
    # Takes the messages sent over TCP
    <source>
      @type forward
    </source>
  monitoring.conf: |-
    # Prometheus Exporter Plugin
    # input plugin that exports metrics
    <source>
      @type prometheus
    </source>
    <source>
      @type monitor_agent
    </source>
    # input plugin that collects metrics from MonitorAgent
    <source>
      @type prometheus_monitor
      <labels>
        host ${hostname}
      </labels>
    </source>
    # input plugin that collects metrics for output plugin
    <source>
      @type prometheus_output_monitor
      <labels>
        host ${hostname}
      </labels>
    </source>
    # input plugin that collects metrics for in_tail plugin
    <source>
      @type prometheus_tail_monitor
      <labels>
        host ${hostname}
      </labels>
    </source>
  output.conf: |
    # Enriches records with Kubernetes metadata
    <filter kubernetes.**>
      @type kubernetes_metadata
    </filter>

    <match **>
      @id elasticsearch
      @type elasticsearch
      @log_level info
      include_tag_key true
      type_name fluentd
      host "#{ENV['OUTPUT_HOST']}"
      port "#{ENV['OUTPUT_PORT']}"
      logstash_format true
      logstash_prefix "#{ENV['LOGSTASH_PREFIX']}"
      <buffer>
        @type file
        path /var/log/fluentd-buffers/kubernetes.system.buffer
        flush_mode interval
        retry_type exponential_backoff
        flush_thread_count 2
        flush_interval 5s
        retry_forever
        retry_max_interval 30
        chunk_limit_size "#{ENV['OUTPUT_BUFFER_CHUNK_LIMIT']}"
        queue_limit_length "#{ENV['OUTPUT_BUFFER_QUEUE_LIMIT']}"
        overflow_action block
      </buffer>
    </match>
# Specify custom resources to create in the form of an array
# Values will be mapped to customResources.yaml
# Example
# customResources:
# - apiVersion: custom-version
#   kind: custom-kind
#   any other property
#
# - apiVersion: custom-version
#   kind: custom-kind
#   any other property
customResources: []
daemonset:
  annotations: {}
  env: []
  securityContext: {}
  updateStrategy: {}
  hostNetwork: true
  dnsPolicy: ClusterFirstWithHostNet
# extraVolumes:
#   - name: es-certs
#     secret:
#       defaultMode: 420
#       secretName: es-certs
# extraVolumeMounts:
#   - name: es-certs
#     mountPath: /certs
#     readOnly: true
[root@master fluentd-elasticsearch]# helm install --name flu1 --namespace=efk -f values.yaml stakater/fluentd-elasticsearch --version 1.5.0
[root@master fluentd-elasticsearch]# helm list
NAME	REVISION	UPDATED                 	STATUS  	CHART                      	NAMESPACE
els1	1       	Tue Sep 22 21:45:36 2020	DEPLOYED	elasticsearch-1.13.2       	efk      
flu1	1       	Tue Sep 22 22:18:15 2020	DEPLOYED	fluentd-elasticsearch-1.5.0	efk      
[root@master fluentd-elasticsearch]# kubectl get pods -n efk    #busybox pod上面查看产生的日志
NAME                                  READY     STATUS    RESTARTS   AGE
els1-elasticsearch-59fffdbfdd-45qjj   1/1       Running   4          57m
els1-elasticsearch-59fffdbfdd-lx4g2   1/1       Running   3          57m
els1-elasticsearch-data-0             1/1       Running   1          53m
els1-elasticsearch-data-1             1/1       Running   2          56m
els1-elasticsearch-master-0           1/1       Running   2          57m
els1-elasticsearch-master-1           1/1       Running   1          47m
els1-elasticsearch-master-2           1/1       Running   1          54m
flu1-fluentd-elasticsearch-hdtq4      1/1       Running   0          8m
flu1-fluentd-elasticsearch-vbl9b      1/1       Running   6          8m
flu1-fluentd-elasticsearch-z67qj      1/1       Running   0          8m

部署kibana

下载地址:https://hub.kubeapps.com/charts/stable/kibana/0.17.0(注意:kibana镜像版本必须和elasticsearch相同)

master:

[root@master fluentd-elasticsearch]# cd ..
[root@master helm]# helm fetch stable/kibana --version 0.17.0
[root@master helm]# tar xf kibana-0.17.0.tgz 
[root@master helm]# cd kibana
[root@master kibana]# helm status els1
els1-elasticsearch.efk.svc
[root@master kibana]# vim values.yaml 
image:
  repository: "docker.elastic.co/kibana/kibana-oss"
  tag: "6.4.2"
  pullPolicy: "IfNotPresent"

commandline:
  args: []

env: {}
  # All Kibana configuration options are adjustable via env vars.
  # To adjust a config option to an env var uppercase + replace `.` with `_`
  # Ref: https://www.elastic.co/guide/en/kibana/current/settings.html
  #
  # ELASTICSEARCH_URL: http://elasticsearch-client:9200
  # SERVER_PORT: 5601
  # LOGGING_VERBOSE: "true"
  # SERVER_DEFAULTROUTE: "/app/kibana"

files:
  kibana.yml:
    ## Default Kibana configuration from kibana-docker.
    server.name: kibana
    server.host: "0"
    elasticsearch.url: http://els1-elasticsearch.efk.svc:9200

    ## Custom config properties below
    ## Ref: https://www.elastic.co/guide/en/kibana/current/settings.html
    # server.port: 5601
    # logging.verbose: "true"
    # server.defaultRoute: "/app/kibana"

service:
  type: NodePort
  externalPort: 443
  internalPort: 5601
  # authProxyPort: 5602 To be used with authProxyEnabled and a proxy extraContainer
  ## External IP addresses of service
  ## Default: nil
  ##
  # externalIPs:
  # - 192.168.0.1
  #
  ## LoadBalancer IP if service.type is LoadBalancer
  ## Default: nil
  ##
  # loadBalancerIP: 10.2.2.2
  annotations: {}
    # Annotation example: setup ssl with aws cert when service.type is LoadBalancer
    # service.beta.kubernetes.io/aws-load-balancer-ssl-cert: arn:aws:acm:us-east-1:EXAMPLE_CERT
  labels: {}
    ## Label example: show service URL in `kubectl cluster-info`
    # kubernetes.io/cluster-service: "true"
  ## Limit load balancer source ips to list of CIDRs (where available)
  # loadBalancerSourceRanges: []

ingress:
  enabled: false
  # hosts:
    # - chart-example.local
  # annotations:
  #   kubernetes.io/ingress.class: nginx
  #   kubernetes.io/tls-acme: "true"
  # tls:
    # - secretName: chart-example-tls
    #   hosts:
    #     - chart-example.local

serviceAccount:
  # Specifies whether a service account should be created
  create: false
  # The name of the service account to use.
  # If not set and create is true, a name is generated using the fullname template
  # If set and create is false, the service account must be existing
  name:

livenessProbe:
  enabled: false
  initialDelaySeconds: 30
  timeoutSeconds: 10

readinessProbe:
  enabled: false
  initialDelaySeconds: 30
  timeoutSeconds: 10

# Enable an authproxy. Specify container in extraContainers
authProxyEnabled: false

extraContainers: |
# - name: proxy
#   image: quay.io/gambol99/keycloak-proxy:latest
#   args:
#     - --resource=uri=/*
#     - --discovery-url=https://discovery-url
#     - --client-id=client
#     - --client-secret=secret
#     - --listen=0.0.0.0:5602
#     - --upstream-url=http://127.0.0.1:5601
#   ports:
#     - name: web
#       containerPort: 9090
resources: {}
  # limits:
  #   cpu: 100m
  #   memory: 300Mi
  # requests:
  #   cpu: 100m
  #   memory: 300Mi

priorityClassName: ""

# Affinity for pod assignment
# Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
# affinity: {}

# Tolerations for pod assignment
# Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
tolerations: []

# Node labels for pod assignment
# Ref: https://kubernetes.io/docs/user-guide/node-selection/
nodeSelector: {}

podAnnotations: {}
replicaCount: 1
revisionHistoryLimit: 3

# To export a dashboard from a running Kibana 6.3.x use:
# curl --user <username>:<password> -XGET https://kibana.yourdomain.com:5601/api/kibana/dashboards/export?dashboard=<some-dashboard-uuid> > my-dashboard.json
# A dashboard is defined by a name and a string with the json payload or the download url
dashboardImport:
  timeout: 60
  xpackauth:
    enabled: false
    username: myuser
    password: mypass
  dashboards: {}
    # k8s: https://raw.githubusercontent.com/monotek/kibana-dashboards/master/k8s-fluentd-elasticsearch.json

# List of plugins to install using initContainer
plugins:
  # set to true to enable plugins installation
  enabled: false
  # set to true to remove all kibana plugins before installation
  reset: false
  # Use <plugin_name,version,url> to add/upgrade plugin
  values:
    # - elastalert-kibana-plugin,1.0.1,https://github.com/bitsensor/elastalert-kibana-plugin/releases/download/1.0.1/elastalert-kibana-plugin-1.0.1-6.4.2.zip
    # - logtrail,0.1.30,https://github.com/sivasamyk/logtrail/releases/download/v0.1.30/logtrail-6.4.2-0.1.30.zip
    # - other_plugin
[root@master kibana]# kubectl top nodes
NAME               CPU(cores)   CPU%      MEMORY(bytes)   MEMORY%   
master.smoke.com   251m         12%       1131Mi          30%       
node01.smoke.com   429m         21%       2875Mi          78%       
node02.smoke.com   163m         8%        2909Mi          79%       
[root@master kibana]# helm install --name kib1 --namespace=efk -f values.yaml stable/kibana --version 0.17.0
==> v1/Service
NAME         TYPE      CLUSTER-IP     EXTERNAL-IP  PORT(S)        AGE
kib1-kibana  NodePort  10.97.122.105  <none>       443:30227/TCP  0s
[root@master kibana]# kubectl get pods -n efk
NAME                                  READY     STATUS    RESTARTS   AGE
els1-elasticsearch-59fffdbfdd-45qjj   1/1       Running   4          1d
els1-elasticsearch-59fffdbfdd-lx4g2   1/1       Running   3          1d
els1-elasticsearch-data-0             1/1       Running   1          1d
els1-elasticsearch-data-1             1/1       Running   2          1d
els1-elasticsearch-master-0           1/1       Running   2          1d
els1-elasticsearch-master-1           1/1       Running   1          1d
els1-elasticsearch-master-2           1/1       Running   1          1d
flu1-fluentd-elasticsearch-hdtq4      1/1       Running   1          1d
flu1-fluentd-elasticsearch-vbl9b      1/1       Running   6          1d
flu1-fluentd-elasticsearch-z67qj      1/1       Running   0          1d
kib1-kibana-68f8f7cbb5-9smlv          1/1       Running   0          23s
[root@master kibana]# kubectl top nodes
NAME               CPU(cores)   CPU%      MEMORY(bytes)   MEMORY%   
master.smoke.com   243m         12%       1406Mi          38%       
node01.smoke.com   463m         23%       2781Mi          75%       
node02.smoke.com   217m         10%       2922Mi          79%     
[root@master kibana]# kubectl get svc -n efk
NAME                           TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)         AGE
els1-elasticsearch             ClusterIP   10.111.87.200   <none>        9200/TCP        1d
els1-elasticsearch-discovery   ClusterIP   None            <none>        9300/TCP        1d
flu1-fluentd-elasticsearch     ClusterIP   10.103.94.114   <none>        24231/TCP       1d
kib1-kibana                    NodePort    10.97.122.105   <none>        443:30227/TCP   6m

通过宿主机浏览器访问http://172.20.0.66:30227

点击Management -- Index Paterns,搜索logstash,点击Next step;

选择@timestamp,点击Create index pattern;

点击Discover,这里展示所有pod的日志;

可以点击Visualize -- Create a visualization创建一些图形,点击Pie

点击logstash*  -- Split Slices,  Aggregation选择Terms, Field选择kubernetes.pod_name.keyword, Size选择15个,点击播放