集群级断路器配置示例:
1.README.md
#README.md
[root@k8s-master01 circuit-breaker]# cat README.md
# Locality Weighted Cluster Demo
### 环境说明
五个Service:
- envoy:Front Proxy,地址为172.31.35.2
- webserver01:第一个后端服务
- webserver01-sidecar:第一个后端服务的Sidecar Proxy,地址为172.31.35.11, 别名为red和webservice1
- webserver02:第二个后端服务
- webserver02-sidecar:第一个后端服务的Sidecar Proxy,地址为172.31.35.12, 别名为blue和webservice1
- webserver03:第三个后端服务
- webserver03-sidecar:第一个后端服务的Sidecar Proxy,地址为172.31.35.13, 别名为green和webservice1
- webserver04:第四个后端服务
- webserver04-sidecar:第四个后端服务的Sidecar Proxy,地址为172.31.35.14, 别名为gray和webservice2
- webserver05:第五个后端服务
- webserver05-sidecar:第五个后端服务的Sidecar Proxy,地址为172.31.35.15, 别名为black和webservice2
### 运行和测试
1. 创建
```
docker-compose up
```
2. 测试
```
# 通过send-requests.sh脚本进行webcluster1的请求测试,可发现,有部分请求的响应码为5xx,这其实就是被熔断的处理结果;
./send-requests.sh http://172.31.35.2/ 300
```
3. 停止后清理
```
docker-compose down
```
2.docker-compose.yaml
#docker-compose.yaml
[root@k8s-master01 circuit-breaker]# cat docker-compose.yaml
version: '3'
services:
front-envoy:
#image: envoyproxy/envoy-alpine:v1.21-latest
image: envoyproxy/envoy:v1.23-latest
environment:
- ENVOY_UID=0
- ENVOY_GID=0
volumes:
- ./front-envoy.yaml:/etc/envoy/envoy.yaml
networks:
- envoymesh
expose:
# Expose ports 80 (for general traffic) and 9901 (for the admin server)
- "80"
- "9901"
webserver01-sidecar:
image: envoyproxy/envoy:v1.23-latest
environment:
- ENVOY_UID=0
- ENVOY_GID=0
volumes:
- ./envoy-sidecar-proxy.yaml:/etc/envoy/envoy.yaml
hostname: red
networks:
envoymesh:
ipv4_address: 172.31.35.11
aliases:
- webservice1
- red
webserver01:
image: ikubernetes/demoapp:v1.0
environment:
- PORT=8080
- HOST=127.0.0.1
network_mode: "service:webserver01-sidecar"
depends_on:
- webserver01-sidecar
webserver02-sidecar:
image: envoyproxy/envoy:v1.23-latest
environment:
- ENVOY_UID=0
- ENVOY_GID=0
volumes:
- ./envoy-sidecar-proxy.yaml:/etc/envoy/envoy.yaml
hostname: blue
networks:
envoymesh:
ipv4_address: 172.31.35.12
aliases:
- webservice1
- blue
webserver02:
image: ikubernetes/demoapp:v1.0
environment:
- PORT=8080
- HOST=127.0.0.1
network_mode: "service:webserver02-sidecar"
depends_on:
- webserver02-sidecar
webserver03-sidecar:
image: envoyproxy/envoy:v1.23-latest
environment:
- ENVOY_UID=0
- ENVOY_GID=0
volumes:
- ./envoy-sidecar-proxy.yaml:/etc/envoy/envoy.yaml
hostname: green
networks:
envoymesh:
ipv4_address: 172.31.35.13
aliases:
- webservice1
- green
webserver03:
image: ikubernetes/demoapp:v1.0
environment:
- PORT=8080
- HOST=127.0.0.1
network_mode: "service:webserver03-sidecar"
depends_on:
- webserver03-sidecar
webserver04-sidecar:
image: envoyproxy/envoy:v1.23-latest
environment:
- ENVOY_UID=0
- ENVOY_GID=0
volumes:
- ./envoy-sidecar-proxy.yaml:/etc/envoy/envoy.yaml
hostname: gray
networks:
envoymesh:
ipv4_address: 172.31.35.14
aliases:
- webservice2
- gray
webserver04:
image: ikubernetes/demoapp:v1.0
environment:
- PORT=8080
- HOST=127.0.0.1
network_mode: "service:webserver04-sidecar"
depends_on:
- webserver04-sidecar
webserver05-sidecar:
image: envoyproxy/envoy:v1.23-latest
environment:
- ENVOY_UID=0
- ENVOY_GID=0
volumes:
- ./envoy-sidecar-proxy.yaml:/etc/envoy/envoy.yaml
hostname: black
networks:
envoymesh:
ipv4_address: 172.31.35.15
aliases:
- webservice2
- black
webserver05:
image: ikubernetes/demoapp:v1.0
environment:
- PORT=8080
- HOST=127.0.0.1
network_mode: "service:webserver05-sidecar"
depends_on:
- webserver05-sidecar
networks:
envoymesh:
driver: bridge
ipam:
config:
- subnet: 172.31.35.0/24
3.front-envoy.yaml
circuit_breakers:
thresholds:
max_connections: 1
max_pending_requests: 1
max_retries: 3
outlier_detection:
interval: "1s"
consecutive_5xx: "3"
consecutive_gateway_failure: "3"
base_ejection_time: "10s"
enforcing_consecutive_gateway_failure: "100"
max_ejection_percent: "30"
success_rate_minimum_hosts: "2"
# front-envoy.yaml
#这里配置这样由于用了 image: envoyproxy/envoy:v1.23-latest
# http_filters:
# - name: envoy.filters.http.router
# typed_config:
# "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router
[root@k8s-master01 circuit-breaker]# cat front-envoy.yaml
admin:
access_log_path: "/dev/null"
address:
socket_address: { address: 0.0.0.0, port_value: 9901 }
static_resources:
listeners:
- address:
socket_address: { address: 0.0.0.0, port_value: 80 }
name: listener_http
filter_chains:
- filters:
- name: envoy.filters.network.http_connection_manager
typed_config:
"@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager
codec_type: auto
stat_prefix: ingress_http
route_config:
name: local_route
virtual_hosts:
- name: backend
domains:
- "*"
routes:
- match:
prefix: "/livez"
route:
cluster: webcluster2
- match:
prefix: "/"
route:
cluster: webcluster1
http_filters:
- name: envoy.filters.http.router
typed_config:
"@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router
clusters:
- name: webcluster1
connect_timeout: 0.25s
type: STRICT_DNS
lb_policy: ROUND_ROBIN
load_assignment:
cluster_name: webcluster1
endpoints:
- lb_endpoints:
- endpoint:
address:
socket_address:
address: webservice1
port_value: 80
circuit_breakers:
thresholds:
max_connections: 1
max_pending_requests: 1
max_retries: 3
- name: webcluster2
connect_timeout: 0.25s
type: STRICT_DNS
lb_policy: ROUND_ROBIN
load_assignment:
cluster_name: webcluster2
endpoints:
- lb_endpoints:
- endpoint:
address:
socket_address:
address: webservice2
port_value: 80
outlier_detection:
interval: "1s"
consecutive_5xx: "3"
consecutive_gateway_failure: "3"
base_ejection_time: "10s"
enforcing_consecutive_gateway_failure: "100"
max_ejection_percent: "30"
success_rate_minimum_hosts: "2"
4.envoy-sidecar-proxy.yaml
circuit_breakers:
thresholds:
max_connections: 1
max_pending_requests: 1
max_retries: 2
# envoy-sidecar-proxy.yaml
[root@k8s-master01 circuit-breaker]# cat envoy-sidecar-proxy.yaml
admin:
profile_path: /tmp/envoy.prof
access_log_path: /tmp/admin_access.log
address:
socket_address:
address: 0.0.0.0
port_value: 9901
static_resources:
listeners:
- name: listener_0
address:
socket_address: { address: 0.0.0.0, port_value: 80 }
filter_chains:
- filters:
- name: envoy.filters.network.http_connection_manager
typed_config:
"@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager
stat_prefix: ingress_http
codec_type: AUTO
route_config:
name: local_route
virtual_hosts:
- name: local_service
domains: ["*"]
routes:
- match: { prefix: "/" }
route: { cluster: local_cluster }
http_filters:
- name: envoy.filters.http.router
typed_config:
"@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router
clusters:
- name: local_cluster
connect_timeout: 0.25s
type: STATIC
lb_policy: ROUND_ROBIN
load_assignment:
cluster_name: local_cluster
endpoints:
- lb_endpoints:
- endpoint:
address:
socket_address: { address: 127.0.0.1, port_value: 8080 }
circuit_breakers:
thresholds:
max_connections: 1
max_pending_requests: 1
max_retries: 2
5.send-requests.sh
[root@k8s-master01 circuit-breaker]# cat send-requests.sh
#!/bin/bash
#
if [ $# -ne 2 ]
then
echo "USAGE: $0 <URL> <COUNT>"
exit 1;
fi
URL=$1
COUNT=$2
c=1
#interval="0.2"
while [[ ${c} -le ${COUNT} ]];
do
#echo "Sending GET request: ${URL}"
curl -o /dev/null -w '%{http_code}\n' -s ${URL} &
(( c++ ))
# sleep $interval
done
wait
6.测试
# 通过send-requests.sh脚本进行webcluster1的请求测试,可发现,有部分请求的响应码为5xx,这其实就是被熔断的处理结果;
[root@k8s-master01 circuit-breaker]# ./send-requests.sh http://172.31.35.2/ 300
503
200
200
200
200
200
200
200
200
200
200
200
200
200
200
200
200
200
200
200
200
200
200
200
200
200
200
200
200
200
200
200
200
200
200
200
200
200
200
200
200
200
200
200
200
200
200
200
200
200
200
503
503
200
503
200
#查看clusters
# webcluster1::default_priority::max_connections::1
# webcluster1::default_priority::max_pending_requests::1
# webcluster1::default_priority::max_requests::1024
# webcluster1::default_priority::max_retries::3
# 可以看到请求流量错误 次数 成功
#webcluster1::172.31.35.13:80::cx_active::4
#webcluster1::172.31.35.13:80::cx_connect_fail::0
#webcluster1::172.31.35.13:80::cx_total::4
#webcluster1::172.31.35.13:80::rq_active::0
#webcluster1::172.31.35.13:80::rq_error::8
#webcluster1::172.31.35.13:80::rq_success::82
#webcluster1::172.31.35.13:80::rq_timeout::0
#webcluster1::172.31.35.13:80::rq_total::90
[root@k8s-master01 circuit-breaker]# curl 172.31.35.2:9901/clusters
webcluster1::observability_name::webcluster1
webcluster1::default_priority::max_connections::1
webcluster1::default_priority::max_pending_requests::1
webcluster1::default_priority::max_requests::1024
webcluster1::default_priority::max_retries::3
webcluster1::high_priority::max_connections::1024
webcluster1::high_priority::max_pending_requests::1024
webcluster1::high_priority::max_requests::1024
webcluster1::high_priority::max_retries::3
webcluster1::added_via_api::false
webcluster1::172.31.35.11:80::cx_active::4
webcluster1::172.31.35.11:80::cx_connect_fail::0
webcluster1::172.31.35.11:80::cx_total::4
webcluster1::172.31.35.11:80::rq_active::0
webcluster1::172.31.35.11:80::rq_error::8
webcluster1::172.31.35.11:80::rq_success::82
webcluster1::172.31.35.11:80::rq_timeout::0
webcluster1::172.31.35.11:80::rq_total::90
webcluster1::172.31.35.11:80::hostname::webservice1
webcluster1::172.31.35.11:80::health_flags::healthy
webcluster1::172.31.35.11:80::weight::1
webcluster1::172.31.35.11:80::region::
webcluster1::172.31.35.11:80::zone::
webcluster1::172.31.35.11:80::sub_zone::
webcluster1::172.31.35.11:80::canary::false
webcluster1::172.31.35.11:80::priority::0
webcluster1::172.31.35.11:80::success_rate::-1
webcluster1::172.31.35.11:80::local_origin_success_rate::-1
webcluster1::172.31.35.13:80::cx_active::4
webcluster1::172.31.35.13:80::cx_connect_fail::0
webcluster1::172.31.35.13:80::cx_total::4
webcluster1::172.31.35.13:80::rq_active::0
webcluster1::172.31.35.13:80::rq_error::8
webcluster1::172.31.35.13:80::rq_success::82
webcluster1::172.31.35.13:80::rq_timeout::0
webcluster1::172.31.35.13:80::rq_total::90
webcluster1::172.31.35.13:80::hostname::webservice1
webcluster1::172.31.35.13:80::health_flags::healthy
webcluster1::172.31.35.13:80::weight::1
webcluster1::172.31.35.13:80::region::
webcluster1::172.31.35.13:80::zone::
webcluster1::172.31.35.13:80::sub_zone::
webcluster1::172.31.35.13:80::canary::false
webcluster1::172.31.35.13:80::priority::0
webcluster1::172.31.35.13:80::success_rate::-1
webcluster1::172.31.35.13:80::local_origin_success_rate::-1
webcluster1::172.31.35.12:80::cx_active::4
webcluster1::172.31.35.12:80::cx_connect_fail::0
webcluster1::172.31.35.12:80::cx_total::4
webcluster1::172.31.35.12:80::rq_active::0
webcluster1::172.31.35.12:80::rq_error::10
webcluster1::172.31.35.12:80::rq_success::84
webcluster1::172.31.35.12:80::rq_timeout::0
webcluster1::172.31.35.12:80::rq_total::94
webcluster1::172.31.35.12:80::hostname::webservice1
webcluster1::172.31.35.12:80::health_flags::healthy
webcluster1::172.31.35.12:80::weight::1
webcluster1::172.31.35.12:80::region::
webcluster1::172.31.35.12:80::zone::
webcluster1::172.31.35.12:80::sub_zone::
webcluster1::172.31.35.12:80::canary::false
webcluster1::172.31.35.12:80::priority::0
webcluster1::172.31.35.12:80::success_rate::-1
webcluster1::172.31.35.12:80::local_origin_success_rate::-1
webcluster2::observability_name::webcluster2
webcluster2::outlier::success_rate_average::-1
webcluster2::outlier::success_rate_ejection_threshold::-1
webcluster2::outlier::local_origin_success_rate_average::-1
webcluster2::outlier::local_origin_success_rate_ejection_threshold::-1
webcluster2::default_priority::max_connections::1024
webcluster2::default_priority::max_pending_requests::1024
webcluster2::default_priority::max_requests::1024
webcluster2::default_priority::max_retries::3
webcluster2::high_priority::max_connections::1024
webcluster2::high_priority::max_pending_requests::1024
webcluster2::high_priority::max_requests::1024
webcluster2::high_priority::max_retries::3
webcluster2::added_via_api::false
webcluster2::172.31.35.15:80::cx_active::0
webcluster2::172.31.35.15:80::cx_connect_fail::0
webcluster2::172.31.35.15:80::cx_total::0
webcluster2::172.31.35.15:80::rq_active::0
webcluster2::172.31.35.15:80::rq_error::0
webcluster2::172.31.35.15:80::rq_success::0
webcluster2::172.31.35.15:80::rq_timeout::0
webcluster2::172.31.35.15:80::rq_total::0
webcluster2::172.31.35.15:80::hostname::webservice2
webcluster2::172.31.35.15:80::health_flags::healthy
webcluster2::172.31.35.15:80::weight::1
webcluster2::172.31.35.15:80::region::
webcluster2::172.31.35.15:80::zone::
webcluster2::172.31.35.15:80::sub_zone::
webcluster2::172.31.35.15:80::canary::false
webcluster2::172.31.35.15:80::priority::0
webcluster2::172.31.35.15:80::success_rate::-1
webcluster2::172.31.35.15:80::local_origin_success_rate::-1
webcluster2::172.31.35.14:80::cx_active::0
webcluster2::172.31.35.14:80::cx_connect_fail::0
webcluster2::172.31.35.14:80::cx_total::0
webcluster2::172.31.35.14:80::rq_active::0
webcluster2::172.31.35.14:80::rq_error::0
webcluster2::172.31.35.14:80::rq_success::0
webcluster2::172.31.35.14:80::rq_timeout::0
webcluster2::172.31.35.14:80::rq_total::0
webcluster2::172.31.35.14:80::hostname::webservice2
webcluster2::172.31.35.14:80::health_flags::healthy
webcluster2::172.31.35.14:80::weight::1
webcluster2::172.31.35.14:80::region::
webcluster2::172.31.35.14:80::zone::
webcluster2::172.31.35.14:80::sub_zone::
webcluster2::172.31.35.14:80::canary::false
webcluster2::172.31.35.14:80::priority::0
webcluster2::172.31.35.14:80::success_rate::-1
webcluster2::172.31.35.14:80::local_origin_success_rate::-1