Kubernetes 部署 mall-microservice 项目
一、Basic System
#环境依赖说明
#1主2从 v1.28.2 Ubuntu 22.04 LTS containerd://1.6.28
#Cilium
#持久化存储依赖于两个存储类
#csi-driver-nfs存储,存储类名称为“nfs-csi”;
#openebs存储,存储类名称为“openebs-hostpath”
#Ingress依赖于Cilium ingressclass,需要部署Cilium网络插件,并同时启用Ingress功能;
root@ubuntu-k8s-master01:~# kubectl get nodes -o wide
NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
ubuntu-k8s-master01 Ready control-plane 25d v1.28.2 192.168.40.132 <none> Ubuntu 22.04 LTS 5.15.0-101-generic containerd://1.6.28
ubuntu-k8s-node01 Ready <none> 25d v1.28.2 192.168.40.133 <none> Ubuntu 22.04 LTS 5.15.0-101-generic containerd://1.6.28
ubuntu-k8s-node02 Ready <none> 25d v1.28.2 192.168.40.134 <none> Ubuntu 22.04 LTS 5.15.0-101-generic containerd://1.6.28
#1【部署高可用2个 metric-server】
#https://github.com/BirkhoffXia/learning-k8s/blob/master/metrics-server/high-availability-1.21%2B.yaml
image: registry.k8s.io/metrics-server/metrics-server:v0.7.0 #修改镜像
image: registry.lank8s.cn/metrics-server/metrics-server:v0.7.0
crictl pull registry.lank8s.cn/metrics-server/metrics-server:v0.7.0
#注意这2个配置
- --kubelet-preferred-address-types=InternalIP
- --kubelet-insecure-tls
kubectl get pods -n kube-system | grep metrics
metrics-server-5d666b8f4c-9w7vl 1/1 Running 0 33s
metrics-server-5d666b8f4c-klz9f 1/1 Running 0 33s
kubectl top nodes
kubectl top pods
#2【部署 Kuboard v3】
#部署应用
#https://github.com/BirkhoffXia/learning-k8s/blob/master/Kuboard/deploy.yaml
#部署Ingress 可以选择cilium ingress
#https://github.com/BirkhoffXia/learning-k8s/blob/master/Kuboard/ingress.yaml
kubectl apply -f deploy.yaml
kubectl get pods -n kuboard
NAME READY STATUS RESTARTS AGE
kuboard-v3-6c9f7ddd97-8bxdj 1/1 Running 0 6m9s
kubectl apply -f ingress.yaml
kubectl get ingress -A
NAMESPACE NAME CLASS HOSTS ADDRESS PORTS AGE
kube-system hubble-ui cilium hubble.sheca.com 192.168.40.51 80 24d
kuboard kuboard cilium kuboard.sheca.com 192.168.40.51 80 117s
kubectl get svc -n kuboard
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kuboard-v3 LoadBalancer 10.108.174.10 192.168.40.53 80:30080/TCP,10081:30081/TCP,10081:30081/UDP 5m50s
#Windows hosts文件
192.168.40.51 dzzz.sheca.com hubble.sheca.com kuboard.sheca.com
#访问 admin/Kuboard123 => Kubeconfig配置 cat ~/.kube/config
kuboard.sheca.com
#3【部署 MetaILB】
#安装参考:https://www.cnblogs.com/birkhoffxia/articles/17949510
kubectl get pods -n metallb-system
NAME READY STATUS RESTARTS AGE
controller-786f9df989-d7gs4 1/1 Running 6 (4h26m ago) 24d
speaker-52nw4 1/1 Running 12 (4h24m ago) 24d
speaker-gjrfw 1/1 Running 11 (4h24m ago) 24d
speaker-wt6dg 1/1 Running 11 (4h26m ago) 24d
kubectl get ipaddresspool -n metallb-system
NAME AUTO ASSIGN AVOID BUGGY IPS ADDRESSES
localip-pool true true ["192.168.40.51-192.168.40.80"]
kubectl get l2advertisement -n metallb-system
NAME IPADDRESSPOOLS IPADDRESSPOOL SELECTORS INTERFACES
localip-pool-l2a ["localip-pool"] ["ens33"]
#4【部署 OpenEBS】
#安装参考:https://www.cnblogs.com/birkhoffxia/articles/17943626
wget https://openebs.github.io/charts/openebs-operator.yaml
kubectl apply -f openebs-operator.yaml
kubectl get pods -n openebs
NAME READY STATUS RESTARTS AGE
openebs-localpv-provisioner-6787b599b9-zkphg 1/1 Running 0 14s
openebs-ndm-cluster-exporter-7bfd5746f4-ftwpj 1/1 Running 0 14s
openebs-ndm-node-exporter-blthk 1/1 Running 0 14s
openebs-ndm-node-exporter-f9t47 1/1 Running 0 14s
openebs-ndm-nxrt2 1/1 Running 0 14s
openebs-ndm-operator-845b8858db-xnr6p 1/1 Running 0 14s
openebs-ndm-r6tnk 1/1 Running 0 14s
kubectl get storageclass
NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE
ceph-storage-class-dzzz-xks kubernetes.io/rbd Delete Immediate false 7d3h
nfs-csi nfs.csi.k8s.io Delete Immediate false 3d23h
openebs-device openebs.io/local Delete WaitForFirstConsumer false 58s
openebs-hostpath openebs.io/local Delete WaitForFirstConsumer false 58s
#5【Kubernetes 的 NFS CSI 驱动程序 - csi-driver-nfs】
#参考:https://www.cnblogs.com/birkhoffxia/articles/18087317 中 创建有状态redis应用备份恢复数据-使用csi-nfs
kubectl get sc | grep nfs-csi
nfs-csi nfs.csi.k8s.io Delete Immediate false 3d23h
root@ubuntu-k8s-master01:~# kubectl get pods -n kube-system | grep csi
csi-nfs-controller-558cff4c87-sgfmz 4/4 Running 8 (22h ago) 4d17h
csi-nfs-node-5sd4q 3/3 Running 6 (22h ago) 4d17h
csi-nfs-node-79kjj 3/3 Running 6 (22h ago) 4d17h
csi-nfs-node-qkjvq 3/3 Running 6 (22h ago) 4d17h
二、Prometheus Monitoring System
#prometheus-server:部署Promethues Metrics API Server所需要的各资源配置清单。
#prometheus-adapter:部署基于prometheus的自定义指标API服务器所需要的各资源配置清单。
#podinfo:测试使用的podinfo相关的deployment和service对象的资源配置清单。
#node_exporter:于kubernetes集群各节点部署node_exporter。
#kube-state-metrics:聚合kubernetes资源对象,提供指标数据。
#alertmanager:部署AlertManager告警系统。
#grafana:部署Grafana Dashboard。
部署Prometheus监控系统
#https://github.com/BirkhoffXia/learning-k8s/tree/master/Mall-MicroService/infra-services-with-prometheus/00-Prometheus
kubectl apply -f namespace.yaml
kubectl apply -f prometheus-server/ -n prom
kubectl get pods -n prom
NAME READY STATUS RESTARTS AGE
prometheus-server-5974664c5d-kzx9h 1/1 Running 0 9m18s
#资源解释
prometheus-cfg.yaml:打开prometheus.rules规则、指定alertmanager地址、静态监控资源(kube-state-metrics、node-exporter、kubernetes-apiservers、
kubernetes-nodes、kubernetes-pods、kubernetes-cadvisor、kubernetes-service-endpoints)
prometheus-deploy.yaml:emptyDir卷、prom/prometheus:v2.50.1版本
prometheus-ingress.yaml:cilium ingress
prometheus-rbac.yaml:权限配置
prometheus-rules.yaml:配置rules
prometheus-svc.yaml:NodePort 30090 => 9090
#hosts配置
192.168.40.51 dzzz.sheca.com hubble.sheca.com kuboard.sheca.com prom.magedu.com
#prom.magedu.com
#2【部署node-exporter】
#https://github.com/BirkhoffXia/learning-k8s/tree/master/Mall-MicroService/infra-services-with-prometheus/00-Prometheus/node_exporter
kubectl apply -f node_exporter/
kubectl get pods -n prom
NAME READY STATUS RESTARTS AGE
kube-state-metrics-7bfbbcdbb7-wcrkl 1/1 Running 0 11m
prometheus-node-exporter-4x2cv 1/1 Running 0 2m13s
prometheus-node-exporter-mdksf 1/1 Running 0 2m13s
prometheus-server-5974664c5d-kzx9h 1/1 Running 0 26m
#
node-exporter-ds.yaml:DaemonSet、 prom/node-exporter:v1.7.0、9100、tolerations:NoSchedule。key: node-role.kubernetes.io/master
node-exporter-svc.yaml:9100、hostNetwork和主机IP一致
#
http://192.168.40.133:9100/
http://192.168.40.134:9100/
部署kube-state-metrics,监控Kubernetes集群的服务指标。
#https://github.com/BirkhoffXia/learning-k8s/blob/master/Mall-MicroService/infra-services-with-prometheus/00-Prometheus/kube-state-metrics/kube-state-metrics-deploy.yaml
kubectl apply -f kube-state-metrics/
kubectl get pods -n prom
NAME READY STATUS RESTARTS AGE
kube-state-metrics-7bfbbcdbb7-wcrkl 1/1 Running 0 4m6s
prometheus-server-5974664c5d-kzx9h 1/1 Running 0 19m
kube-state-metrics-deploy.yaml:gcmirrors/kube-state-metrics:v1.9.5、8080
kube-state-metrics-rbac.yaml:权限配置
kube-state-metrics-svc.yaml:创建有一个Service
部署AlertManager,为Prometheus-Server提供可用的告警发送服务。
#https://github.com/BirkhoffXia/learning-k8s/tree/master/Mall-MicroService/infra-services-with-prometheus/00-Prometheus/alertmanager
kubectl apply -f alertmanager/
kubectl get pods -n prom
NAME READY STATUS RESTARTS AGE
alertmanager-86d456df96-k96c6 1/1 Running 0 3m9s
kube-state-metrics-7bfbbcdbb7-wcrkl 1/1 Running 0 19m
prometheus-node-exporter-4x2cv 1/1 Running 0 11m
prometheus-node-exporter-mdksf 1/1 Running 0 11m
prometheus-server-5974664c5d-kzx9h 1/1 Running 0 35m
#
alertmanager-cfg.yaml:定义规则
alertmanager-deployment.yaml:prom/alertmanager:v0.27.0 、emptyDir、9093
alertmanager-service.yaml:9093、LoadBalancer
alertmanager-templates-cfg.yaml:消息推送-模板文件
#5【部署 Grafana】
#https://github.com/BirkhoffXia/learning-k8s/tree/master/Mall-MicroService/infra-services-with-prometheus/00-Prometheus/grafana
01-grafana-cfg.yaml:配置 DataSource 源 http://prometheus.prom.svc.cluster.local.:9090 作为ConfigMap
02-grafana-service.yaml:NodePort 3000
03-grafana-pvc.yaml:sc nfs-csi ReadWriteMany
04-grafana-deployment.yaml:grafana/grafana:10.2.5、有个初始化容器解决权限问题
05-grafana-ingress.yaml:如果使用cilium ingress 需要修改 grafana.magedu.com
kubectl apply -f grafana/
kubectl get pods -n prom
NAME READY STATUS RESTARTS AGE
alertmanager-86d456df96-k96c6 1/1 Running 0 35m
grafana-69465f69d6-k4f4w 1/1 Running 0 4m58s
kube-state-metrics-7bfbbcdbb7-wcrkl 1/1 Running 0 52m
prometheus-node-exporter-4x2cv 1/1 Running 0 43m
prometheus-node-exporter-mdksf 1/1 Running 0 43m
prometheus-server-5974664c5d-kzx9h 1/1 Running 0 67m
kubectl get pvc -n prom
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS VOLUMEATTRIBUTESCLASS AGE
grafana-pvc Bound pvc-e9466812-893a-41e4-8581-4b544b16f681 5Gi RWX nfs-csi <unset> 5s
#hosts
192.168.40.51 dzzz.sheca.com hubble.sheca.com kuboard.sheca.com prom.magedu.com grafana.magedu.com
#
grafana.magedu.com
三、部署Nacos集群
#https://github.com/BirkhoffXia/learning-k8s/tree/master/Mall-MicroService/infra-services-with-prometheus/01-Nacos
#
examples:项目文件
01-secrets-mysql.yaml:mysql 账户密码
02-mysql-persistent.yaml:mysql 主从pod文件 、StatefulSet、 nacos/nacos-mysql:5.7、openebs-hostpath、主备同步
03-nacos-cfg.yaml:nacos配置文件
04-nacos-persistent.yaml:nacos的卷
05-nacos-service.yaml:nacos svc
06-nacos-ingress.yaml:nacos cilium ingress
kubectl create namespace nacos
#部署MySQL主从复制集群
#这里 卷改为 nfs-csi
kubectl apply -f 01-secrets-mysql.yaml -f 02-mysql-persistent.yaml -n nacos
root@ubuntu-k8s-master01:~/learn/learning-k8s-master/Mall-MicroService/infra-services-with-prometheus/01-Nacos# kubectl get pods -n nacos
NAME READY STATUS RESTARTS AGE
mysql-0 3/3 Running 0 9m35s
mysql-1 3/3 Running 0 9m16s
root@ubuntu-k8s-master01:~/learn/learning-k8s-master/Mall-MicroService/infra-services-with-prometheus/01-Nacos# kubectl get pvc -n nacos
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS VOLUMEATTRIBUTESCLASS AGE
data-mysql-0 Bound pvc-b44dae8d-d918-4507-b733-7741494aa06e 10Gi RWX nfs-csi <unset> 9m42s
data-mysql-1 Bound pvc-20a00ba4-8ddf-4914-acd0-1e1ef61e040e 10Gi RWX nfs-csi <unset> 9m23s
访问入口
读请求:mysql-read.mall.svc.cluster.local
写请求:mysql-0.mysql.mall.svc.cluster.local
创建用户账号
在mysql上创建nacos专用的用户账号,本示例中,Naocs默认使用nacos用户名和"magedu.com"密码访问mysql服务上的nacosdb数据库。
kubectl exec -it mysql-0 -n nacos -- mysql -uroot -hlocalhost
在mysql的提示符下运行如下SQL语句后退出即可
mysql> GRANT ALL ON nacosdb.* TO nacos@'%' IDENTIFIED BY 'magedu.com';
#部署Nacos
#这里 卷改为 nfs-csi
kubectl apply -f 03-nacos-cfg.yaml -f 04-nacos-persistent.yaml -f 05-nacos-service.yaml -n nacos
kubectl get pods -n nacos
NAME READY STATUS RESTARTS AGE
mysql-0 3/3 Running 0 30m
mysql-1 3/3 Running 0 30m
nacos-0 1/1 Running 0 19m
nacos-1 1/1 Running 0 19m
nacos-2 1/1 Running 0 19m
kubectl apply -f 06-nacos-ingress.yaml -n nacos
kubectl get ingress -n nacos
NAME CLASS HOSTS ADDRESS PORTS AGE
nacos cilium nacos.magedu.com 192.168.40.51 80 9s
#登录nacos
#hosts
192.168.40.51 dzzz.sheca.com hubble.sheca.com kuboard.sheca.com prom.magedu.com grafana.magedu.com nacos.magedu.com
http://nacos.magedu.com/nacos
#导入数据的方法示例
curl --location --request POST 'http://nacos-0.nacos:8848/nacos/v1/cs/configs?import=true&namespace=public' \
--form 'policy=OVERWRITE' --form 'file=@"/PATH/TO/ZIP_FILE"'
kubectl get pods -n nacos -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
mysql-0 3/3 Running 0 35m 10.244.1.162 ubuntu-k8s-node01 <none> <none>
mysql-1 3/3 Running 0 35m 10.244.2.223 ubuntu-k8s-node02 <none> <none>
nacos-0 1/1 Running 0 25m 10.244.1.180 ubuntu-k8s-node01 <none> <none>
nacos-1 1/1 Running 0 25m 10.244.2.197 ubuntu-k8s-node02 <none> <none>
nacos-2 1/1 Running 0 25m 10.244.2.37 ubuntu-k8s-node02 <none> <none>
例如,下面的命令可以导入指定的示例文件中的配置,其中的10.244.1.180是nacos进程监听地址
curl --location -XPOST 'http://10.244.1.180:8848/nacos/v1/cs/configs?import=true&namespace=public' --form 'policy=OVERWRITE' --form 'file=@"examples/nacos_config_20231029.zip"'
{"code":200,"message":"导入成功","data":{"succCount":5,"skipCount":0}}r
四、Elasticsearch
#部署Elasticsearch及相关组件
创建名称空间
kubectl create namespace elastic
#部署elasticsearch
kubectl apply -f 01-elasticsearch-cluster-persistent.yaml -n elastic
kubectl get pods -n elastic -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
es-cluster-0 2/2 Running 0 20m 10.244.2.127 ubuntu-k8s-node02 <none> <none>
es-cluster-1 2/2 Running 0 10m 10.244.1.79 ubuntu-k8s-node01 <none> <none>
es-cluster-2 2/2 Running 0 2m56s 10.244.1.207 ubuntu-k8s-node01 <none> <none>
curl 10.244.2.127:9200
{
"name" : "es-cluster-0",
"cluster_name" : "k8s-logs",
"cluster_uuid" : "ae9kXeVHQ4u08cbrUpKi9w",
"version" : {
"number" : "7.17.7",
"build_flavor" : "default",
"build_type" : "docker",
"build_hash" : "78dcaaa8cee33438b91eca7f5c7f56a70fec9e80",
"build_date" : "2022-10-17T15:29:54.167373105Z",
"build_snapshot" : false,
"lucene_version" : "8.11.1",
"minimum_wire_compatibility_version" : "6.8.0",
"minimum_index_compatibility_version" : "6.0.0-beta1"
},
"tagline" : "You Know, for Search"
}
kubectl get pvc -n elastic
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS VOLUMEATTRIBUTESCLASS AGE
data-es-cluster-0 Bound pvc-58d0c2b7-e02c-4b66-ba21-f9b3a4e9d6b5 3Gi RWX nfs-csi <unset> 12s
#待ES的相关Pod就绪后,即可部署fluentd
kubectl apply -f 02-fluentbit.yaml -n elastic
kubectl get pods -n elastic
NAME READY STATUS RESTARTS AGE
es-cluster-0 2/2 Running 0 22m
es-cluster-1 2/2 Running 0 12m
es-cluster-2 2/2 Running 0 4m59s
fluent-bit-27scs 1/1 Running 0 6s
fluent-bit-6kr2g 1/1 Running 0 6s
fluent-bit-c4rzg 1/1 Running 0 6s
#部署kibana - 文件已经 加入了Ingress
kubectl apply -f 03-kibana.yaml -n elastic
kubectl get pods -n elastic -w
NAME READY STATUS RESTARTS AGE
es-cluster-0 2/2 Running 0 16h
es-cluster-1 2/2 Running 0 16h
es-cluster-2 2/2 Running 0 16h
fluent-bit-27scs 1/1 Running 0 16h
fluent-bit-6kr2g 1/1 Running 0 16h
fluent-bit-c4rzg 1/1 Running 0 16h
kibana-5ddc4d94fc-cb9kn 1/1 Running 0 16h
kubectl get service kibana -n elastic
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kibana LoadBalancer 10.101.201.21 192.168.40.55 5601:30493/TCP 16h
#配置hosts
192.168.40.51 dzzz.sheca.com hubble.sheca.com kuboard.sheca.com prom.magedu.com grafana.magedu.com nacos.magedu.com kibana.magedu.com
#
kibana.magedu.com
五、Redis
#https://github.com/BirkhoffXia/learning-k8s/tree/master/Mall-MicroService/infra-services-with-prometheus/03-Redis
#部署redis
可以部署的独立的名称空间,也可以部署在目标应用的名称空间中,如redis。
部署redis
首先,运行如下命令,创建名称空间。
kubectl create namespace redis
#部署redis replication cluster
#改为卷:nfs-csi
kubectl apply -f . -n redis
kubectl get pods -n redis
NAME READY STATUS RESTARTS AGE
redis-0 2/2 Running 0 17m
redis-1 2/2 Running 0 11m
redis-2 2/2 Running 0 6m20s
部署完成后,其master的访问地址为“redis-0.redis.redis.svc”,客户端可通过此地址向redis发起存取请求。
#部署sentinel(可选)
最后,部署redis sentinel。此为可选步骤。
kubectl apply -f ./sentinel/ -n redis
#Grafana Dashboard
Dashboard ID: 763
六、RabbitMQ
#部署Rabbit Cluster
首先,运行如下命令,创建名称空间。
kubectl create namespace rabbit
#部署Rabbit Cluster。
#改为卷:nfs-csi
kubectl apply -f ./ -n rabbit
kubectl get pods -n rabbit
NAME READY STATUS RESTARTS AGE
rabbitmq-0 1/1 Running 0 6m44s
rabbitmq-1 1/1 Running 0 101s
rabbitmq-2 1/1 Running 0 60s
kubectl get pvc -n rabbit
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS VOLUMEATTRIBUTESCLASS AGE
data-rabbitmq-0 Bound pvc-bc4203ca-89ff-4716-a7c7-bcf14baf5761 3Gi RWX nfs-csi <unset> 7s
#hosts
192.168.40.51 dzzz.sheca.com hubble.sheca.com kuboard.sheca.com prom.magedu.com grafana.magedu.com nacos.magedu.com kibana.magedu.com rabbitmq.magedu.com
类似如下的URL可用于访问RabbitMQ内置的管理Web UI。 http://rabbitmq.magedu.com
默认的用户名和密码是“admin/magedu.com”。
为mall-microservice提供服务时,需要创建新的用户malladmin/magedu.com,并创建新的vhost,名称为/mall,并授权给malladmin用户。
七、MongoDB
#https://github.com/BirkhoffXia/learning-k8s/tree/master/Mall-MicroService/infra-services-with-prometheus/05-MongoDB
#部署MongoDB ReplicaSet集群
#修改nfs-csi
#部署
kubectl create namespace mongo
kubectl apply -f . -n mongo
kubectl get pvc -n mongo
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS VOLUMEATTRIBUTESCLASS AGE
data-mongodb-0 Bound pvc-a507bd28-caa8-488a-a4ef-ac36cdca8fc4 5Gi RWX nfs-csi <unset> 18s
kubectl get pods -n mongo
NAME READY STATUS RESTARTS AGE
mongodb-0 2/2 Running 0 11m
mongodb-1 2/2 Running 0 4m45s
mongodb-2 2/2 Running 0 5s
查看集群状态
kubectl exec -it mongodb-0 -n mongo -- mongo
kubectl exec -it mongodb-0 -n mongo -- mongo
Defaulted container "mongodb" out of: mongodb, mongodb-exporter
MongoDB shell version v5.0.24
connecting to: mongodb://127.0.0.1:27017/?compressors=disabled&gssapiServiceName=mongodb
Implicit session: session { "id" : UUID("01d2f58d-cdfb-4c27-ad56-2b5ddf58d2e8") }
MongoDB server version: 5.0.24
================
Warning: the "mongo" shell has been superseded by "mongosh",
which delivers improved usability and compatibility.The "mongo" shell has been deprecated and will be removed in
an upcoming release.
For installation instructions, see
https://docs.mongodb.com/mongodb-shell/install/
================
replicaset:PRIMARY> rs.status()
{
"set" : "replicaset",
"date" : ISODate("2024-03-27T02:30:02.096Z"),
"myState" : 1,
"term" : NumberLong(2),
"syncSourceHost" : "",
"syncSourceId" : -1,
"heartbeatIntervalMillis" : NumberLong(2000),
"majorityVoteCount" : 2,
"writeMajorityCount" : 2,
"votingMembersCount" : 3,
"writableVotingMembersCount" : 3,
"optimes" : {
"lastCommittedOpTime" : {
"ts" : Timestamp(1711506601, 1),
"t" : NumberLong(2)
},
"lastCommittedWallTime" : ISODate("2024-03-27T02:30:01.792Z"),
"readConcernMajorityOpTime" : {
"ts" : Timestamp(1711506601, 1),
"t" : NumberLong(2)
},
"appliedOpTime" : {
"ts" : Timestamp(1711506601, 1),
"t" : NumberLong(2)
},
"durableOpTime" : {
"ts" : Timestamp(1711506601, 1),
"t" : NumberLong(2)
},
"lastAppliedWallTime" : ISODate("2024-03-27T02:30:01.792Z"),
"lastDurableWallTime" : ISODate("2024-03-27T02:30:01.792Z")
},
"lastStableRecoveryTimestamp" : Timestamp(1711506537, 1),
"electionCandidateMetrics" : {
"lastElectionReason" : "electionTimeout",
"lastElectionDate" : ISODate("2024-03-27T02:25:07.659Z"),
"electionTerm" : NumberLong(2),
"lastCommittedOpTimeAtElection" : {
"ts" : Timestamp(0, 0),
"t" : NumberLong(-1)
},
"lastSeenOpTimeAtElection" : {
"ts" : Timestamp(1711506304, 5),
"t" : NumberLong(1)
},
"numVotesNeeded" : 1,
"priorityAtElection" : 5,
"electionTimeoutMillis" : NumberLong(10000),
"newTermStartDate" : ISODate("2024-03-27T02:25:07.664Z"),
"wMajorityWriteAvailabilityDate" : ISODate("2024-03-27T02:25:07.666Z")
},
"members" : [
{
"_id" : 0,
"name" : "mongodb-0.mongodb:27017",
"health" : 1,
"state" : 1,
"stateStr" : "PRIMARY",
"uptime" : 296,
"optime" : {
"ts" : Timestamp(1711506601, 1),
"t" : NumberLong(2)
},
"optimeDate" : ISODate("2024-03-27T02:30:01Z"),
"lastAppliedWallTime" : ISODate("2024-03-27T02:30:01.792Z"),
"lastDurableWallTime" : ISODate("2024-03-27T02:30:01.792Z"),
"syncSourceHost" : "",
"syncSourceId" : -1,
"infoMessage" : "",
"electionTime" : Timestamp(1711506307, 1),
"electionDate" : ISODate("2024-03-27T02:25:07Z"),
"configVersion" : 7,
"configTerm" : 2,
"self" : true,
"lastHeartbeatMessage" : ""
},
{
"_id" : 1,
"name" : "mongodb-2.mongodb:27017",
"health" : 1,
"state" : 2,
"stateStr" : "SECONDARY",
"uptime" : 11,
"optime" : {
"ts" : Timestamp(1711506599, 1),
"t" : NumberLong(2)
},
"optimeDurable" : {
"ts" : Timestamp(1711506599, 1),
"t" : NumberLong(2)
},
"optimeDate" : ISODate("2024-03-27T02:29:59Z"),
"optimeDurableDate" : ISODate("2024-03-27T02:29:59Z"),
"lastAppliedWallTime" : ISODate("2024-03-27T02:30:01.792Z"),
"lastDurableWallTime" : ISODate("2024-03-27T02:30:01.792Z"),
"lastHeartbeat" : ISODate("2024-03-27T02:30:01.795Z"),
"lastHeartbeatRecv" : ISODate("2024-03-27T02:30:01.802Z"),
"pingMs" : NumberLong(0),
"lastHeartbeatMessage" : "",
"syncSourceHost" : "mongodb-0.mongodb:27017",
"syncSourceId" : 0,
"infoMessage" : "",
"configVersion" : 6,
"configTerm" : 2
},
{
"_id" : 2,
"name" : "mongodb-1.mongodb:27017",
"health" : 1,
"state" : 2,
"stateStr" : "SECONDARY",
"uptime" : 10,
"optime" : {
"ts" : Timestamp(1711506601, 1),
"t" : NumberLong(2)
},
"optimeDurable" : {
"ts" : Timestamp(1711506601, 1),
"t" : NumberLong(2)
},
"optimeDate" : ISODate("2024-03-27T02:30:01Z"),
"optimeDurableDate" : ISODate("2024-03-27T02:30:01Z"),
"lastAppliedWallTime" : ISODate("2024-03-27T02:30:01.792Z"),
"lastDurableWallTime" : ISODate("2024-03-27T02:30:01.792Z"),
"lastHeartbeat" : ISODate("2024-03-27T02:30:01.797Z"),
"lastHeartbeatRecv" : ISODate("2024-03-27T02:30:01.803Z"),
"pingMs" : NumberLong(0),
"lastHeartbeatMessage" : "",
"syncSourceHost" : "mongodb-0.mongodb:27017",
"syncSourceId" : 0,
"infoMessage" : "",
"configVersion" : 6,
"configTerm" : 2
}
],
"ok" : 1,
"$clusterTime" : {
"clusterTime" : Timestamp(1711506601, 1),
"signature" : {
"hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="),
"keyId" : NumberLong(0)
}
},
"operationTime" : Timestamp(1711506601, 1)
}
八、minio
#部署MinIO
#依赖于一个支持PV动态置备的StorageClass,本示例中使用nfs-csi
#部署
kubectl create namespace minio
kubectl apply -f ./ -n minio
kubectl get pvc -n minio
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS VOLUMEATTRIBUTESCLASS AGE
data-minio-0 Bound pvc-17bac251-b172-41ad-bc3b-a296215d16a4 4Gi RWX nfs-csi <unset> 6s
data-minio-1 Bound pvc-2d671f59-b230-4823-b327-3ad90499501c 4Gi RWX nfs-csi <unset> 6s
data-minio-2 Bound pvc-644e8407-33f4-4753-a727-b8361e8cb126 4Gi RWX nfs-csi <unset> 6s
data-minio-3 Bound pvc-1630c270-6168-45aa-a2a1-19410506cdfc 4Gi RWX nfs-csi <unset> 6s
kubectl get pods -n minio
NAME READY STATUS RESTARTS AGE
minio-0 1/1 Running 0 4m24s
minio-1 1/1 Running 0 4m24s
minio-2 1/1 Running 0 4m24s
minio-3 1/1 Running 0 4m24s
#hosts
192.168.40.51 dzzz.sheca.com hubble.sheca.com kuboard.sheca.com prom.magedu.com grafana.magedu.com nacos.magedu.com kibana.magedu.com rabbitmq.magedu.com minio.magedu.com
#访问console
通过Ingress定义的Host访问,地址如下,注意要使用https协议。 https://minio.magedu.com/
默认的用户名和密码是“minioadmin/magedu.com”。
九、Skywalking
#部署SkyWalking及UI
kubectl create namespace tracing
部署Skywalking OAP。需要说明的是,下面命令中用到的配置文件,依赖于部署在elastic名称空间中的elasticsearch服务。
kubectl apply -f 01-skywalking-oap.yaml -n tracing
kubectl apply -f 01-skywalking-ui.yaml -n tracing
kubectl get pods -n tracing
NAME READY STATUS RESTARTS AGE
skywalking-oap-88cc84f5c-rkkps 1/1 Running 0 12m
skywalking-ui-f69f457b6-v7zgl 1/1 Running 0 12m
#Ingress Cilium 访问
#hosts配置
192.168.40.51 dzzz.sheca.com hubble.sheca.com kuboard.sheca.com prom.magedu.com grafana.magedu.com nacos.magedu.com kibana.magedu.com rabbitmq.magedu.com minio.magedu.com skywalking.magedu.com
skywalking.magedu.com
十、部署Mall 应用
#https://gitee.com/mageedu/mall-microservice
#https://github.com/BirkhoffXia/learning-k8s/tree/master/Mall-MicroService/mall-and-skywalking
mall microservice项目相关的部署文件
本示例中,各service的配置中均启用了skywalking java agent,它们会将链路跟踪相关的数据发送至指定的Skywalking OAP服务中。
依赖的基础环境
本示例中的mall microservice依赖于MySQL、Nacos、Redis、MongoDB、RabbitMQ、ElasticSearch(需要部署中文分词插件)和MinIO等相关的服务。
具体的过程,请参考infra-services或infra-services-with-prometheus目录中的部署方法。
#部署方法
kubectl create namespace mall
#https://gitee.com/mageedu/mall-microservice/blob/master/document/README.md
#【导入数据库】
#将document/sql/mall.sql导入到MySQL数据库中,这里直接使用了nacos依赖的MySQL数据库服务mysql-0.msql.nacos.svc.
#mysql < document/sql/mall.sql
#提示:若需要远程访问MySQL,需要事先提供一个可远程访问MySQL服务的管理员账号;
#导入完成后,它会创建两个用户账号,用于访问mall数据库:
#malladmin, 对数据库mall拥有全部访问权限,密码为magedu.com;
#malluser, 对数据库mall仅有查询权限,密码为magedu.com;
git clone https://gitee.com/mageedu/mall-microservice.git
kubectl get pods -n nacos -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
mysql-0 3/3 Running 0 21h 10.244.1.162 ubuntu-k8s-node01 <none> <none>
mysql-1 3/3 Running 1 (95m ago) 21h 10.244.2.223 ubuntu-k8s-node02 <none> <none>
nacos-0 1/1 Running 0 21h 10.244.1.180 ubuntu-k8s-node01 <none> <none>
nacos-1 1/1 Running 0 21h 10.244.2.197 ubuntu-k8s-node02 <none> <none>
nacos-2 1/1 Running 0 21h 10.244.2.37 ubuntu-k8s-node02 <none> <none>
root@ubuntu-k8s-master01:/testing# apt install -y mysql-client
root@ubuntu-k8s-master01:/testing# mysql -uroot -p -h10.244.1.162 < mall-microservice/document/sql/mall.sql
Enter password:
root@ubuntu-k8s-master01:/testing# mysql -uroot -p -h10.244.1.162
Enter password:
Welcome to the MySQL monitor. Commands end with ; or \g.
Your MySQL connection id is 87219
Server version: 5.7.39-log MySQL Community Server (GPL)
Copyright (c) 2000, 2024, Oracle and/or its affiliates.
Oracle is a registered trademark of Oracle Corporation and/or its
affiliates. Other names may be trademarks of their respective
owners.
Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.
mysql> show databases;
+------------------------+
| Database |
+------------------------+
| information_schema |
| mall |
| mysql |
| nacosdb |
| performance_schema |
| sys |
| xtrabackup_backupfiles |
+------------------------+
7 rows in set (0.01 sec)
use mall;
show tables;
kubectl apply -f ./ -n mall
kubectl get ingress -n mall
NAME CLASS HOSTS ADDRESS PORTS AGE
mall-admin-web cilium mall-admin-web.magedu.com 192.168.40.51 80 131m
mall-gateway cilium mall-gateway.magedu.com 192.168.40.51 80 131m
mall-monitor cilium mall-monitor.magedu.com 192.168.40.51 80 131m
#hosts
192.168.40.51 dzzz.sheca.com hubble.sheca.com kuboard.sheca.com prom.magedu.com grafana.magedu.com nacos.magedu.com kibana.magedu.com rabbitmq.magedu.com minio.magedu.com skywalking.magedu.com mall-admin-web.magedu.com mall-gateway.magedu.com mall-monitor.magedu.com