kubectl delete pod [pod name] --force --grace-period=0 -n [namespace]
一、实验环境
k8s部署EFK-elasticsearch+fluentd+kibana
k8s安装storageclass
k8s安装storageclass
二、搭建
2.0. 创建命名空间
- kube_logging.yaml(命名空间)
apiVersion: v1 kind: Namespace metadata: name: kube-logging
2.1. 搭建NFS存储
-
elasticsearch_serviceaccount.yaml
apiVersion: v1 kind: ServiceAccount metadata: **name: nfs-provisioner** -
elasticsearch_rbac.yaml
点击查看代码
# kubectl create clusterrolebinging run-nfs-provisioner --clusterrole=nfs-provisioner-runner --user=nfs-provisioner apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: nfs-provisioner-runner rules: - apiGroups: [""] resources: ["persistentvolumes"] verbs: ["get","list","watch","create","delete"] - apiGroups: [""] resources: ["persistentvolumes"] verbs: ["get","list","watch","update"] - apiGroups: [""] resources: ["storage.k8s.io"] verbs: ["get","list","watch"] - apiGroups: [""] resources: ["events"] verbs: ["get","update","patch"] - apiGroups: [""] resources: ["services","endpoints"] verbs: ["get"] - apiGroups: ["extensions"] resources: ["podsecuritypolicies"] verbs: ["use"] --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding # 将nfs-provisioner通过ClusterRoleBinding 绑定到了ClusterRole上 metadata: name: run-nfs-provisioner subjects: - kind: ServiceAccount name: nfs-provisioner # sa namespace: default roleRef: kind: ClusterRole name: nfs-provisioner-runner apiGroup: rbac.authorization.k8s.io --- apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: name: leader-locking-nfs-provisioner rules: - apiGroups: [""] resources: ["endpoints"] verbs: ["get","list","watch","create","update","patch"] --- apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: name: leader-locking-nfs-provisioner subjects: - kind: ServiceAccount name: nfs-provisioner namespace: default roleRef: kind: Role name: leader-locking-nfs-provisioner apiGroup: rbac.authorization.k8s.io -
elasticsearch_nfsprovisioner.yaml(存储供应商)
apiVersion: apps/v1 kind: Deployment metadata: name: nfs-provisioner spec: selector: matchLabels: app: nfs-provisioner replicas: 1 strategy: type: Recreate template: metadata: labels: app: nfs-provisioner spec: serviceAccount: nfs-provisioner containers: - name: nfs-provisoner image: vbouchaud/nfs-client-provisioner:latest imagePullPolicy: IfNotPresent volumeMounts: - name: nfs-client-root mountPath: /persistentvolumes env: - name: PROVISIONER_NAME value: example.com/nfs - name: NFS_SERVER value: 192.168.56.129 - name: NFS_PATH value: /home/xy/data/elasticsearch/v1 volumes: - name: nfs-client-root nfs: server: 192.168.56.129 path: /home/xy/data/elasticsearch/v1 -
elasticsearch_storageclass.yaml
apiVersion: storage.k8s.io/v1 kind: StorageClass metadata: name: do-block-storage provisioner: example.com/nfs
2.2. 部署ElasticSearch
-
elasticsearch_service.yaml
apiVersion: v1 kind: Service metadata: name: elasticsearch namespace: kube-logging labels: app: elasticsearch spec: selector: app: elasticsearch clusterIP: None ports: - port: 9200 name: rest - port: 9300 name: inter-node -
elasticsearch_statefulset.yaml
点击查看代码
apiVersion: apps/v1 kind: StatefulSet metadata: name: es-cluster # pod的名字: es-cluster-[0,1,2] namespace: kube-logging spec: serviceName: elasticsearch # handlerlessService replicas: 1 selector: matchLabels: app: elasticsearch template: metadata: labels: app: elasticsearch spec: containers: - name: elasticsearch image: docker.elastic.co/elasticsearch/elasticsearch:7.17.1 imagePullPolicy: IfNotPresent resources: limits: cpu: 1000m requests: cpu: 100m ports: - containerPort: 9200 name: rest protocol: TCP - containerPort: 9300 name: inter-node protocol: TCP volumeMounts: - name: data mountPath: /usr/share/elasticsearch/data env: - name: cluster.name value: k8s-logs - name: node.name valueFrom: fieldRef: fieldPath: metadata.name - name: discovery.seed_hosts value: "es-cluster-0.elasticsearch.kube-loging.svc.cluster.local" # pod.handlerlessservice.namespace.svc.cluster.local - name: cluster.initial_master_nodes value: "es-cluster-0" - name: ES_JAVA_OPTS value: "-Xms512m -Xmx512m" initContainers: - name: fix-permissions image: busybox imagePullPolicy: IfNotPresent command: ["sh", "-c", "chown -R 1000:1000 /usr/share/elasticsearch/data"] securityContext: privileged: true volumeMounts: - name: data mountPath: /usr/share/elasticsearch/data - name: increase-vm-max-map image: busybox imagePullPolicy: IfNotPresent command: ["sysctl", "-w", "vm.max_map_count=262144"] securityContext: privileged: true - name: increate-fd-ulimit image: busybox imagePullPolicy: IfNotPresent command: ["sh", "-c", "ulimit -n 65536"] securityContext: privileged: true volumeClaimTemplates: - metadata: name: data labels: app: elasticsearch spec: accessModes: ["ReadWriteOnce"] storageClassName: do-block-storage # 通过此存储类申请PV resources: requests: storage: 10Gi [root@anyu967master1 efk]# kubectl port-forward es-cluster-0 9200:9200 --namespace=kube-logging
2.3. 部署kibana
-
elasticsearch_kibana.yaml
点击查看代码
apiVersion: v1 kind: ConfigMap metadata: name: kibana-config namespace: kube-logging labels: app: kibana data: kibana.yml: | server.name: kibana server.host: "0.0.0.0" i18n.locale: zh-CN elasticsearch: hosts: ${ELASTICSEARCH_HOSTS} --- apiVersion: v1 kind: Service metadata: name: kibana namespace: kube-logging labels: app: kibana spec: ports: - port: 5601 type: NodePort selector: app: kibana --- apiVersion: apps/v1 kind: Deployment metadata: name: kibana namespace: kube-logging labels: app: kibana spec: replicas: 1 selector: matchLabels: app: kibana template: metadata: labels: app: kibana spec: containers: - name: kibana image: docker.elastic.co/kibana/kibana:7.17.1 imagePullPolicy: IfNotPresent resources: limits: cpu: 1000m requests: cpu: 1000m env: - name: ELASTICSEARCH_URL value: http://elasticsearch.kube-logging.svc.cluster.local:9200 - name: ELASTICSEARCH_HOSTS value: http://elasticsearch.kube-logging.svc.cluster.local:9200 ports: - containerPort: 5601 volumeMounts: - name: config mountPath: /usr/share/kibana/config/kibana.yml readOnly: true subPath: kibana.yml volumes: - name: config configMap: name: kibana-config
2.4. 部署Fluentd日志采集程序
-
elasticsearch_fluentd.yaml
点击查看代码
apiVersion: v1 kind: ServiceAccount metadata: name: fluentd namespace: kube-logging labels: app: fluentd --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole # 定义角色 metadata: name: fluentd labels: app: fluentd rules: - apiGroups: - "" resources: - pods - namespaces verbs: - get - list - watch --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding # 把sa:fluentd 通过ClusterRoleBinding绑定到 ClusterRole上 metadata: name: fluentd roleRef: kind: ClusterRole name: fluentd apiGroup: rbac.authorization.k8s.io subjects: - kind: ServiceAccount name: fluentd namespace: kube-logging --- apiVersion: apps/v1 kind: DaemonSet metadata: name: fluentd namespace: kube-logging labels: apps: fluentd spec: selector: matchLabels: app: fluentd template: metadata: labels: app: fluentd spec: serviceAccount: fluentd serviceAccountName: fluentd tolerations: - key: node-role.kubernetes.io/control-plane # kubectl describe nodes anyu967master1 Taints effect: NoSchedule containers: - name: fluentd image: fluentd:latest imagePullPolicy: IfNotPresent env: - name: FLUENT_ELASTICSEARCH_HOST value: "elasticsearch.kube-logging.svc.cluster.local" - name: FLUENT_ELASTICSEARCH_PORT value: "9200" - name: FLUENT_ELASTICSEARCH_SCHEME value: "http" - name: FLUENT_ELASTICSEARCH_CONF value: disable resources: limits: memory: 512Mi requests: cpu: 100m memory: 200Mi volumeMounts: - name: varlog mountPath: /var/log - name: varlibdockercontainers mountPath: /var/lib/docker/containers readOnly: true terminationGracePeriodSeconds: 30 volumes: - name: varlog hostPath: path: /var/log - name: varlibdockercontainers hostPath: path: /var/lib/docker/containers
本文来自博客园,作者:anyu967,转载请注明原文链接:https://www.cnblogs.com/anyu967/articles/17343378.html
浙公网安备 33010602011771号