![]()
![]()
![]()
![]()
![]()
![]()
![]()
1.docker-compose.yaml
[root@k8s-master01 statsd-sink-and-prometheus]# cat docker-compose.yaml
version: '3.3'
services:
envoy:
image: envoyproxy/envoy:v1.23-latest
environment:
- ENVOY_UID=0
- ENVOY_GID=0
volumes:
- ./front-envoy.yaml:/etc/envoy/envoy.yaml
networks:
envoymesh:
ipv4_address: 172.31.68.10
aliases:
- front-proxy
expose:
# Expose ports 80 (for general traffic) and 9901 (for the admin server)
- "80"
- "9901"
service_blue:
image: ikubernetes/servicemesh-app:latest
volumes:
- ./service_blue/service-envoy.yaml:/etc/envoy/envoy.yaml
networks:
envoymesh:
ipv4_address: 172.31.68.11
aliases:
- myservice
- blue
environment:
- SERVICE_NAME=blue
expose:
- "80"
service_green:
image: ikubernetes/servicemesh-app:latest
volumes:
- ./service_green/service-envoy.yaml:/etc/envoy/envoy.yaml
networks:
envoymesh:
ipv4_address: 172.31.68.12
aliases:
- myservice
- green
environment:
- SERVICE_NAME=green
expose:
- "80"
service_red:
image: ikubernetes/servicemesh-app:latest
volumes:
- ./service_red/service-envoy.yaml:/etc/envoy/envoy.yaml
networks:
envoymesh:
ipv4_address: 172.31.68.13
aliases:
- myservice
- red
environment:
- SERVICE_NAME=red
expose:
- "80"
statsd_exporter:
image: prom/statsd-exporter:v0.22.7
networks:
envoymesh:
ipv4_address: 172.31.68.6
aliases:
- statsd_exporter
ports:
- 9125:9125
- 9102:9102
prometheus:
image: prom/prometheus:v2.37.0
volumes:
- "./prometheus/config.yaml:/etc/prometheus.yaml"
networks:
envoymesh:
ipv4_address: 172.31.68.7
aliases:
- prometheus
ports:
- 9090:9090
command: "--config.file=/etc/prometheus.yaml"
grafana:
image: grafana/grafana:9.0.6
volumes:
- "./grafana/grafana.ini:/etc/grafana/grafana.ini"
- "./grafana/datasource.yaml:/etc/grafana/provisioning/datasources/datasource.yaml"
networks:
envoymesh:
ipv4_address: 172.31.68.8
aliases:
- grafana
ports:
- 3000:3000
networks:
envoymesh:
driver: bridge
ipam:
config:
- subnet: 172.31.68.0/24
2.front-envoy.yaml
stats_sinks:
- name: envoy.statsd
config:
tcp_cluster_name: statsd_exporter
prefix: front-envoy
#添加一个端点为 statsd_exporter
- name: statsd_exporter
node:
id: front-envoy
cluster: mycluster
admin:
access_log_path: "/dev/null"
address:
socket_address:
address: 0.0.0.0
port_value: 9901
stats_sinks:
- name: envoy.statsd
config:
tcp_cluster_name: statsd_exporter
prefix: front-envoy
static_resources:
listeners:
- address:
socket_address:
address: 0.0.0.0
port_value: 80
name: listener_http
filter_chains:
- filters:
- name: envoy.http_connection_manager
typed_config:
"@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager
codec_type: auto
stat_prefix: ingress_http
route_config:
name: local_route
virtual_hosts:
- name: vh_001
domains: ["*"]
routes:
- match:
prefix: "/"
route:
cluster: mycluster
http_filters:
- name: envoy.router
clusters:
- name: mycluster
connect_timeout: 0.25s
type: strict_dns
lb_policy: ROUND_ROBIN
http2_protocol_options: {}
load_assignment:
cluster_name: blue
endpoints:
- lb_endpoints:
- endpoint:
address:
socket_address:
address: myservice
port_value: 80
- name: statsd_exporter
connect_timeout: 0.25s
type: strict_dns
lb_policy: ROUND_ROBIN
http2_protocol_options: {}
load_assignment:
cluster_name: statsd_exporter
endpoints:
- lb_endpoints:
- endpoint:
address:
socket_address:
address: statsd_exporter
port_value: 9125
3. service_blue/service-envoy.yaml
stats_sinks:
- name: envoy.statsd
typed_config:
"@type": type.googleapis.com/envoy.config.metrics.v3.StatsdSink
tcp_cluster_name: statsd_exporter
prefix: service_blue
#
- name: statsd_exporter
[root@k8s-master01 service_blue]# cat service-envoy.yaml
node:
id: service_blue
cluster: mycluster
stats_sinks:
- name: envoy.statsd
typed_config:
"@type": type.googleapis.com/envoy.config.metrics.v3.StatsdSink
tcp_cluster_name: statsd_exporter
prefix: service_blue
admin:
profile_path: /tmp/envoy.prof
access_log_path: /tmp/admin_access.log
address:
socket_address:
address: 0.0.0.0
port_value: 9901
layered_runtime:
layers:
- name: admin
admin_layer: {}
static_resources:
listeners:
- name: listener_0
address:
socket_address: { address: 0.0.0.0, port_value: 80 }
filter_chains:
- filters:
- name: envoy.filters.network.http_connection_manager
typed_config:
"@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager
stat_prefix: ingress_http
codec_type: AUTO
route_config:
name: local_route
virtual_hosts:
- name: service
domains: ["*"]
routes:
- match:
prefix: "/"
route:
cluster: local_service
http_filters:
- name: envoy.filters.http.fault
typed_config:
"@type": type.googleapis.com/envoy.extensions.filters.http.fault.v3.HTTPFault
max_active_faults: 100
abort:
http_status: 503
percentage:
numerator: 10
denominator: HUNDRED
- name: envoy.filters.http.router
typed_config: {}
clusters:
- name: local_service
connect_timeout: 0.25s
type: strict_dns
lb_policy: round_robin
load_assignment:
cluster_name: local_service
endpoints:
- lb_endpoints:
- endpoint:
address:
socket_address:
address: 127.0.0.1
port_value: 8080
- name: statsd_exporter
connect_timeout: 0.25s
type: strict_dns
lb_policy: ROUND_ROBIN
load_assignment:
cluster_name: statsd_exporter
endpoints:
- lb_endpoints:
- endpoint:
address:
socket_address:
address: statsd_exporter
port_value: 9125
测试
#启动
docker-compose up
#访问Prometheus URL
http://192.168.40.112:9090/graph
#访问前置地址
[root@k8s-node02 statsd-sink-and-prometheus]# while true;do curl 172.31.68.10/service/colored; sleep 0.4;done
Hello from App behind Envoy (service green)! hostname: d338d26d1ec4 resolved hostname: 172.31.68.12
Hello from App behind Envoy (service green)! hostname: d338d26d1ec4 resolved hostname: 172.31.68.12
Hello from App behind Envoy (service blue)! hostname: 68d84a6897cb resolved hostname: 172.31.68.11
Hello from App behind Envoy (service blue)! hostname: 68d84a6897cb resolved hostname: 172.31.68.11
Hello from App behind Envoy (service red)! hostname: c689a0fb46c9 resolved hostname: 172.31.68.13
Hello from App behind Envoy (service red)! hostname: c689a0fb46c9 resolved hostname: 172.31.68.13
Hello from App behind Envoy (service blue)! hostname: 68d84a6897cb resolved hostname: 172.31.68.11
![]()
![]()
![]()
![]()
![]()