Kubernetes Operator【ELK Operator】-2024.01.08-实验成功
Operator简介
Operator 是增强型的控制器 (Controller) ,它扩展了Kubernetes API的功能,并基于该扩展管理复杂应用程序
Operator 是 Kubernetes 的展软件,它利用定制的资源类型来增强自动化管理应用及其组件的能力,从而扩展了集群的行为模式
使用自定义资源 (例如CRD)夹管理应用程序及其组件
将应用程序视为单个对象,并提供面向该应用程序的自动化管控操作,例如部署、配置、升级、备份、故障转移和灾难恢复等
#官方网站
https://operatorhub.io/
一、部署 ELK Operator
#ECK
#https://www.elastic.co/guide/en/cloud-on-k8s/current/k8s-deploy-eck.html
#1、Install custom resource definitions:
kubectl create -f https://download.elastic.co/downloads/eck/2.10.0/crds.yaml
customresourcedefinition.apiextensions.k8s.io/agents.agent.k8s.elastic.co created
customresourcedefinition.apiextensions.k8s.io/apmservers.apm.k8s.elastic.co created
customresourcedefinition.apiextensions.k8s.io/beats.beat.k8s.elastic.co created
customresourcedefinition.apiextensions.k8s.io/elasticmapsservers.maps.k8s.elastic.co created
customresourcedefinition.apiextensions.k8s.io/elasticsearchautoscalers.autoscaling.k8s.elastic.co created
customresourcedefinition.apiextensions.k8s.io/elasticsearches.elasticsearch.k8s.elastic.co created
customresourcedefinition.apiextensions.k8s.io/enterprisesearches.enterprisesearch.k8s.elastic.co created
customresourcedefinition.apiextensions.k8s.io/kibanas.kibana.k8s.elastic.co created
customresourcedefinition.apiextensions.k8s.io/logstashes.logstash.k8s.elastic.co created
customresourcedefinition.apiextensions.k8s.io/stackconfigpolicies.stackconfigpolicy.k8s.elastic.co created
[root@k8s-master01 elk-operator]# kubectl get crd
NAME CREATED AT
addresspools.metallb.io 2024-01-06T11:54:56Z
agents.agent.k8s.elastic.co 2024-01-08T04:00:46Z
apmservers.apm.k8s.elastic.co 2024-01-08T04:00:46Z
beats.beat.k8s.elastic.co 2024-01-08T04:00:46Z
bfdprofiles.metallb.io 2024-01-06T11:54:56Z
bgpadvertisements.metallb.io 2024-01-06T11:54:56Z
bgpconfigurations.crd.projectcalico.org 2024-01-05T04:21:29Z
bgppeers.crd.projectcalico.org 2024-01-05T04:21:29Z
bgppeers.metallb.io 2024-01-06T11:54:56Z
blockaffinities.crd.projectcalico.org 2024-01-05T04:21:29Z
blockdeviceclaims.openebs.io 2024-01-07T02:23:35Z
blockdevices.openebs.io 2024-01-07T02:23:35Z
clusterinformations.crd.projectcalico.org 2024-01-05T04:21:29Z
communities.metallb.io 2024-01-06T11:54:56Z
elasticmapsservers.maps.k8s.elastic.co 2024-01-08T04:00:46Z
elasticsearchautoscalers.autoscaling.k8s.elastic.co 2024-01-08T04:00:46Z
elasticsearches.elasticsearch.k8s.elastic.co 2024-01-08T04:00:46Z
enterprisesearches.enterprisesearch.k8s.elastic.co 2024-01-08T04:00:46Z
felixconfigurations.crd.projectcalico.org 2024-01-05T04:21:29Z
globalnetworkpolicies.crd.projectcalico.org 2024-01-05T04:21:29Z
globalnetworksets.crd.projectcalico.org 2024-01-05T04:21:29Z
hostendpoints.crd.projectcalico.org 2024-01-05T04:21:29Z
ipaddresspools.metallb.io 2024-01-06T11:54:56Z
ipamblocks.crd.projectcalico.org 2024-01-05T04:21:29Z
ipamconfigs.crd.projectcalico.org 2024-01-05T04:21:29Z
ipamhandles.crd.projectcalico.org 2024-01-05T04:21:29Z
ippools.crd.projectcalico.org 2024-01-05T04:21:29Z
jivavolumepolicies.openebs.io 2024-01-07T03:03:08Z
jivavolumes.openebs.io 2024-01-07T03:03:08Z
kibanas.kibana.k8s.elastic.co 2024-01-08T04:00:46Z
kubecontrollersconfigurations.crd.projectcalico.org 2024-01-05T04:21:29Z
l2advertisements.metallb.io 2024-01-06T11:54:56Z
logstashes.logstash.k8s.elastic.co 2024-01-08T04:00:46Z
networkpolicies.crd.projectcalico.org 2024-01-05T04:21:29Z
networksets.crd.projectcalico.org 2024-01-05T04:21:29Z
stackconfigpolicies.stackconfigpolicy.k8s.elastic.co 2024-01-08T04:00:46Z
upgradetasks.openebs.io 2024-01-07T03:03:09Z
#有些ELK CRD
[root@k8s-master01 elk-operator]# kubectl api-resources
endpointslices discovery.k8s.io/v1 true EndpointSlice
elasticsearches es elasticsearch.k8s.elastic.co/v1 true Elasticsearch
enterprisesearches ent enterprisesearch.k8s.elastic.co/v1 true EnterpriseSearch
events ev events.k8s.io/v1 true Event
flowschemas flowcontrol.apiserver.k8s.io/v1beta3 false FlowSchema
prioritylevelconfigurations flowcontrol.apiserver.k8s.io/v1beta3 false PriorityLevelConfiguration
kibanas kb kibana.k8s.elastic.co/v1 true Kibana
logstashes ls logstash.k8s.elastic.co/v1alpha1 true Logstash
elasticmapsservers ems maps.k8s.elastic.co/v1alpha1 true ElasticMapsServer
#2、Install the operator with its RBAC rules:
kubectl apply -f https://download.elastic.co/downloads/eck/2.10.0/operator.yaml
namespace/elastic-system created
serviceaccount/elastic-operator created
secret/elastic-webhook-server-cert created
configmap/elastic-operator created
clusterrole.rbac.authorization.k8s.io/elastic-operator created
clusterrole.rbac.authorization.k8s.io/elastic-operator-view created
clusterrole.rbac.authorization.k8s.io/elastic-operator-edit created
clusterrolebinding.rbac.authorization.k8s.io/elastic-operator created
service/elastic-webhook-server created
statefulset.apps/elastic-operator created
validatingwebhookconfiguration.admissionregistration.k8s.io/elastic-webhook.k8s.elastic.co created
#这里的镜像可能无法下载 可以通过 Google云 下载镜像同步到Aliyun上 然后在下载 重新打标记
#docker tag docker.elastic.co/eck/eck-operator:2.10.0 registry.cn-hangzhou.aliyuncs.com/birkhoff/eck-operator:2.10.0
[root@k8s-master01 elk-operator]# kubectl get pods -n elastic-system
NAME READY STATUS RESTARTS AGE
elastic-operator-0 1/1 Running 0 13s
#3、Monitor the operator logs
kubectl -n elastic-system logs -f statefulset.apps/elastic-operator
二、部署 Elasticsearch 资源
#google云下载镜像-阿里云-本地-重新打标记 docker.elastic.co/elasticsearch/elasticsearch:8.11.3
#docker pull docker.elastic.co/elasticsearch/elasticsearch:8.11.3
#docker tag docker.elastic.co/elasticsearch/elasticsearch:8.11.3 registry.cn-hangzhou.aliyuncs.com/birkhoff/elasticsearch:8.11.3
#docker push registry.cn-hangzhou.aliyuncs.com/birkhoff/elasticsearch:8.11.3
[root@k8s-master01 elk-operator]# vim 01-elasticsearch-myes-cluster.yaml
apiVersion: elasticsearch.k8s.elastic.co/v1
kind: Elasticsearch
metadata:
name: myes
namespace: elastic-system
spec:
version: 8.11.3
nodeSets:
- name: default
count: 3
config:
node.store.allow_mmap: false
volumeClaimTemplates:
- metadata:
name: elasticsearch-data
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 5Gi
#storageClassName: nfs-csi
storageClassName: openebs-hostpath
[root@k8s-master01 elk-operator]# kubectl apply -f 01-elasticsearch-myes-cluster.yaml
elasticsearch.elasticsearch.k8s.elastic.co/myes created
[root@k8s-master01 elk-operator]# kubectl get pods -n elastic-system -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
elastic-operator-0 1/1 Running 0 155m 172.16.58.248 k8s-node02 <none> <none>
myes-es-default-0 1/1 Running 0 106s 172.16.58.194 k8s-node02 <none> <none>
#访问ElasticSearch,要通过其名字中以集群名称为前缀(例如myes),以“-es-http”后缀的Service进行,例如下面命令结果中的servcies/myes-es-http。
#svc创建了4个 service
[root@k8s-master01 elk-operator]# kubectl get svc -n elastic-system | grep es
myes-es-default ClusterIP None <none> 9200/TCP 2m3s
myes-es-http ClusterIP 10.103.80.212 <none> 9200/TCP 2m4s
myes-es-internal-http ClusterIP 10.108.105.135 <none> 9200/TCP 2m4s
myes-es-transport ClusterIP None <none> 9300/TCP 2m4s
#我们还要事先获取到访问ElasticSearch的密码,该密码由部署过程自动生成,并保存在了相关名称空间下的Secrets中,
#该Secrets对象以集群名称为前缀,以“-es-elastic-user”为后缀。下面的命令将获取到的密码保存在名为PASSWORD的变量中。
[root@k8s-master01 elk-operator]# kubectl get secret -n elastic-system
NAME TYPE DATA AGE
elastic-webhook-server-cert Opaque 2 155m
myes-es-default-es-config Opaque 1 2m20s
myes-es-default-es-transport-certs Opaque 3 2m20s
myes-es-elastic-user Opaque 1 2m21s
myes-es-file-settings Opaque 1 2m20s
myes-es-http-ca-internal Opaque 2 2m21s
myes-es-http-certs-internal Opaque 3 2m20s
myes-es-http-certs-public Opaque 2 2m20s
myes-es-internal-users Opaque 4 2m21s
myes-es-remote-ca Opaque 1 2m20s
myes-es-transport-ca-internal Opaque 2 2m20s
myes-es-transport-certs-public Opaque 1 2m20s
myes-es-xpack-file-realm Opaque 4 2m21s
#获取密码
[root@k8s-master01 ~]# kubectl get secret myes-es-elastic-user -n elastic-system -o go-template='{{.data.elastic | base64decode}}'
Q1pjhh2FZ02fTY32c9JC184O
或者
PASSWORD=$(kubectl get secret myes-es-elastic-user -n elastic-system -o go-template='{{.data.elastic | base64decode}}')
#随后,我们即可在集群上通过类似如下命令访问部署好的ElasticSearch集群
#获取到的ElasticSearch的Banner信息如下。
kubectl run client-$RANDOM --image ikubernetes/admin-box:v1.2 -it --rm --restart=Never --command -- /bin/bash
root@client-10961 /# curl -u "elastic:Q1pjhh2FZ02fTY32c9JC184O" -k https://myes-es-http.elastic-system:9200
{
"name" : "myes-es-default-0",
"cluster_name" : "myes",
"cluster_uuid" : "9-Uni6JDTT-5a4Ow5Da0RA",
"version" : {
"number" : "8.11.3",
"build_flavor" : "default",
"build_type" : "docker",
"build_hash" : "64cf052f3b56b1fd4449f5454cb88aca7e739d9a",
"build_date" : "2023-12-08T11:33:53.634979452Z",
"build_snapshot" : false,
"lucene_version" : "9.8.0",
"minimum_wire_compatibility_version" : "7.17.0",
"minimum_index_compatibility_version" : "7.0.0"
},
"tagline" : "You Know, for Search"
}
root@client-10961 /# curl -u "elastic:Q1pjhh2FZ02fTY32c9JC184O" -k https://myes-es-http.elastic-system:9200/_cat
=^.^=
/_cat/allocation
/_cat/shards
/_cat/shards/{index}
/_cat/master
/_cat/nodes
/_cat/tasks
/_cat/indices
/_cat/indices/{index}
/_cat/segments
/_cat/segments/{index}
/_cat/count
/_cat/count/{index}
/_cat/recovery
/_cat/recovery/{index}
/_cat/health
/_cat/pending_tasks
/_cat/aliases
/_cat/aliases/{alias}
/_cat/thread_pool
/_cat/thread_pool/{thread_pools}
/_cat/plugins
/_cat/fielddata
/_cat/fielddata/{fields}
/_cat/nodeattrs
/_cat/repositories
/_cat/snapshots/{repository}
/_cat/templates
/_cat/component_templates/_cat/ml/anomaly_detectors
/_cat/ml/anomaly_detectors/{job_id}
/_cat/ml/datafeeds
/_cat/ml/datafeeds/{datafeed_id}
/_cat/ml/trained_models
/_cat/ml/trained_models/{model_id}
/_cat/ml/data_frame/analytics
/_cat/ml/data_frame/analytics/{id}
/_cat/transforms
/_cat/transforms/{transform_id}
root@client-10961 /# curl -u "elastic:Q1pjhh2FZ02fTY32c9JC184O" -k https://myes-es-http.elastic-system:9200/_cat/health 1704696931 06:55:31 myes green 1 1 0 0 0 0 0 0 - 100.0%
三、部署 Filebeat 资源
#docker.elastic.co/beats/filebeat:8.11.3
#Google云
#docker pull docker.elastic.co/beats/filebeat:8.11.3
#docker tag docker.elastic.co/beats/filebeat:8.11.3 registry.cn-hangzhou.aliyuncs.com/birkhoff/filebeat:8.11.3
#docker push registry.cn-hangzhou.aliyuncs.com/birkhoff/filebeat:8.11.3
#本地
#crictl pull registry.cn-hangzhou.aliyuncs.com/birkhoff/filebeat:8.11.3
#ctr -n k8s.io images tag registry.cn-hangzhou.aliyuncs.com/birkhoff/filebeat:8.11.3 docker.elastic.co/beats/filebeat:8.11.3
vim beats-filebeat.yaml
apiVersion: beat.k8s.elastic.co/v1beta1
kind: Beat
metadata:
name: filebeat
namespace: elastic-system
spec:
type: filebeat
version: 8.11.3
elasticsearchRef:
name: "myes"
kibanaRef:
name: "kibana"
config:
filebeat:
autodiscover:
providers:
- type: kubernetes
node: ${NODE_NAME}
hints:
enabled: true
default_config:
type: container
paths:
- /var/log/containers/*${data.kubernetes.container.id}.log
processors:
- add_kubernetes_metadata:
host: ${NODE_NAME}
matchers:
- logs_path:
logs_path: "/var/log/containers/"
- drop_event.when:
or:
- equals:
kubernetes.namespace: "kube-system"
- equals:
kubernetes.namespace: "logging"
- equals:
kubernetes.namespace: "ingress-nginx"
- equals:
kubernetes.namespace: "kube-node-lease"
- equals:
kubernetes.namespace: "elastic-system"
daemonSet:
podTemplate:
spec:
serviceAccountName: filebeat
automountServiceAccountToken: true
terminationGracePeriodSeconds: 30
dnsPolicy: ClusterFirstWithHostNet
hostNetwork: true # Allows to provide richer host metadata
containers:
- name: filebeat
securityContext:
runAsUser: 0
# If using Red Hat OpenShift uncomment this:
#privileged: true
volumeMounts:
- name: varlogcontainers
mountPath: /var/log/containers
- name: varlogpods
mountPath: /var/log/pods
- name: varlibdockercontainers
mountPath: /var/lib/docker/containers
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
volumes:
- name: varlogcontainers
hostPath:
path: /var/log/containers
- name: varlogpods
hostPath:
path: /var/log/pods
- name: varlibdockercontainers
hostPath:
path: /var/lib/docker/containers
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: filebeat
rules:
- apiGroups: [""] # "" indicates the core API group
resources:
- namespaces
- pods
- nodes
verbs:
- get
- watch
- list
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: filebeat
namespace: elastic-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: filebeat
subjects:
- kind: ServiceAccount
name: filebeat
namespace: elastic-system
roleRef:
kind: ClusterRole
name: filebeat
apiGroup: rbac.authorization.k8s.io
---
[root@k8s-master01 elk-operator]# kubectl apply -f beats-filebeat.yaml
[root@k8s-master01 elk-operator]# kubectl get pods -n elastic-system
NAME READY STATUS RESTARTS AGE
elastic-operator-0 1/1 Running 0 178m
filebeat-beat-filebeat-dtz7l 0/1 CrashLoopBackOff 5 (22s ago) 3m26s
filebeat-beat-filebeat-grd5h 0/1 CrashLoopBackOff 5 (27s ago) 3m26s
filebeat-beat-filebeat-kwkrw 0/1 CrashLoopBackOff 5 (18s ago) 3m26s
myes-es-default-0 1/1 Running 0 25m
#此时如果kibana没有启动 会filebeat无法running 等部署好kibana就可以启动成功
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Scheduled 3m38s default-scheduler Successfully assigned elastic-system/filebeat-beat-filebeat-dtz7l to k8s-node01
Normal Pulled 2m3s (x5 over 3m38s) kubelet Container image "docker.elastic.co/beats/filebeat:8.11.3" already present on machine
Normal Created 2m3s (x5 over 3m38s) kubelet Created container filebeat
Normal Started 2m3s (x5 over 3m38s) kubelet Started container filebeat
Warning BackOff 77s (x10 over 3m32s) kubelet Back-off restarting failed container filebeat in pod filebeat-beat-filebeat-dtz7l_elastic-system(af122643-e068-4986-a2fc-a771f5844eff)
四、部署Kibana 资源
#Google 云
#docker pull docker.elastic.co/kibana/kibana:8.11.3
#docker tag docker.elastic.co/kibana/kibana:8.11.3 registry.cn-hangzhou.aliyuncs.com/birkhoff/kibana:8.11.3
#docker push registry.cn-hangzhou.aliyuncs.com/birkhoff/kibana:8.11.3
#本地
#crictl pull registry.cn-hangzhou.aliyuncs.com/birkhoff/kibana:8.11.3
#ctr -n k8s.io images tag registry.cn-hangzhou.aliyuncs.com/birkhoff/kibana:8.11.3 docker.elastic.co/kibana/kibana:8.11.3
vim kibana-myes.yaml
apiVersion: kibana.k8s.elastic.co/v1
kind: Kibana
metadata:
name: kibana
namespace: elastic-system
spec:
version: 8.11.3
count: 1
elasticsearchRef:
name: "myes"
http:
tls:
selfSignedCertificate:
disabled: true
service:
spec:
type: LoadBalancer
---
#apiVersion: networking.k8s.io/v1
#kind: Ingress
#metadata:
# name: kibana
# namespace: elastic-system
#spec:
# ingressClassName: nginx
# rules:
# - host: kibana.magedu.com
# http:
# paths:
# - backend:
# service:
# name: kibana-kb-http
# port:
# number: 5601
# path: /
# pathType: Prefix
# # tls:
# # - hosts:
# # - host-name
# # secretName: tls-secret-name
[root@k8s-master01 elk-operator]# kubectl apply -f kibana-myes.yaml
[root@k8s-master01 elk-operator]# kubectl get pods -n elastic-system
NAME READY STATUS RESTARTS AGE
elastic-operator-0 1/1 Running 0 3h33m
filebeat-beat-filebeat-dtz7l 1/1 Running 12 (7m2s ago) 38m
filebeat-beat-filebeat-grd5h 1/1 Running 12 (6m59s ago) 38m
filebeat-beat-filebeat-kwkrw 1/1 Running 12 (6m38s ago) 38m
kibana-kb-868bb58cbb-5lclv 1/1 Running 0 7m44s
myes-es-default-0 1/1 Running 0 60m
五、测试应用
[root@k8s-master01 elk-operator]# kubectl get svc -n elastic-system
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
elastic-webhook-server ClusterIP 10.108.121.150 <none> 443/TCP 3h35m
kibana-kb-http LoadBalancer 10.111.240.15 192.168.40.52 5601:32100/TCP 9m46s
myes-es-default ClusterIP None <none> 9200/TCP 62m
myes-es-http ClusterIP 10.103.80.212 <none> 9200/TCP 62m
myes-es-internal-http ClusterIP 10.108.105.135 <none> 9200/TCP 62m
myes-es-transport ClusterIP None <none> 9300/TCP 62m
#192.168.40.52:5601
#获取密码
[root@k8s-master01 elk-operator]# kubectl get secret myes-es-elastic-user -n elastic-system -o go-template='{{.data.elastic | base64decode}}'
Q1pjhh2FZ02fTY32c9JC184O