Envoy-基于文件系统订阅(EDS、LDS、CDS)【四】
一、基于文件系统的订阅

配置示例:基于文件系统订阅(EDS)


部署实验拓扑

部署实验文字描述
Front-proxy监听80端口 => 转给eds.yaml配置的 172.31.11.11:80(webserver01-sidercar),webserver01-sidercar配置监听0.0.0.0:80端口 转给后端 127.0.0.1:8080服务
172.31.11.12:80(webserver02-sidercar),webserver02-sidercar配置监听0.0.0.0:80端口 转给后端 127.0.0.1:8080服务
部署实验操作
#eds-filesystem实验
[root@xksmaster1 eds-filesystem]# ll
total 16
-rw-r--r-- 1 root root 1473 Aug 5 2022 docker-compose.yaml
drwxr-xr-x 2 root root 60 Aug 5 2022 eds.conf.d
-rw-r--r-- 1 root root 1222 Aug 5 2022 envoy-sidecar-proxy.yaml
-rw-r--r-- 1 root root 1185 Aug 5 2022 front-envoy.yaml
-rw-r--r-- 1 root root 1097 Aug 5 2022 README.md
#Readme
[root@xksmaster1 eds-filesystem]# cat README.md
# EDS Filesystem demo
### 环境说明
五个Service:
- envoy:Front Proxy,地址为172.31.11.2
- webserver01:第一个后端服务
- webserver01-sidecar:第一个后端服务的Sidecar Proxy,地址为172.31.11.11
- webserver02:第二个后端服务
- webserver02-sidecar:第二个后端服务的Sidecar Proxy,地址为172.31.11.12
#运行和测试
1. 创建
docker-compose up
2. 测试
# 查看Cluster中的Endpoint信息
curl 172.31.11.2:9901/clusters
# 接入front proxy envoy容器的交互式接口,修改eds.conf文件中的内容,将另一个endpoint添加进文件中;
docker exec -it eds-filesystem_envoy_1 /bin/sh
cd /etc/envoy/eds.conf.d/
cat eds.conf.v2 > eds.conf
# 运行下面的命令强制激活文件更改,以便基于inode监视的工作机制可被触发
mv eds.conf temp && mv temp eds.conf
# 再次查看Cluster中的Endpoint信息
curl 172.31.11.2:9901/clusters
3. 停止后清理
docker-compose down
#docker-compose文件
[root@xksmaster1 eds-filesystem]# cat docker-compose.yaml
version: '3.3'
services:
envoy:
image: envoyproxy/envoy-alpine:v1.21-latest
environment:
- ENVOY_UID=0
- ENVOY_GID=0
volumes:
- ./front-envoy.yaml:/etc/envoy/envoy.yaml
- ./eds.conf.d/:/etc/envoy/eds.conf.d/
networks:
envoymesh:
ipv4_address: 172.31.11.2
aliases:
- front-proxy
depends_on:
- webserver01-sidecar
- webserver02-sidecar
webserver01-sidecar:
image: envoyproxy/envoy-alpine:v1.21-latest
environment:
- ENVOY_UID=0
- ENVOY_GID=0
volumes:
- ./envoy-sidecar-proxy.yaml:/etc/envoy/envoy.yaml
hostname: webserver01
networks:
envoymesh:
ipv4_address: 172.31.11.11
aliases:
- webserver01-sidecar
webserver01:
image: ikubernetes/demoapp:v1.0
environment:
- PORT=8080
- HOST=127.0.0.1
network_mode: "service:webserver01-sidecar"
depends_on:
- webserver01-sidecar
webserver02-sidecar:
image: envoyproxy/envoy-alpine:v1.21-latest
environment:
- ENVOY_UID=0
- ENVOY_GID=0
volumes:
- ./envoy-sidecar-proxy.yaml:/etc/envoy/envoy.yaml
hostname: webserver02
networks:
envoymesh:
ipv4_address: 172.31.11.12
aliases:
- webserver02-sidecar
webserver02:
image: ikubernetes/demoapp:v1.0
environment:
- PORT=8080
- HOST=127.0.0.1
network_mode: "service:webserver02-sidecar"
depends_on:
- webserver02-sidecar
networks:
envoymesh:
driver: bridge
ipam:
config:
- subnet: 172.31.11.0/24
#front-envoy.yaml-有bootstrap node配置段
[root@xksmaster1 eds-filesystem]# cat front-envoy.yaml
node:
id: envoy_front_proxy
cluster: MageEdu_Cluster
admin:
profile_path: /tmp/envoy.prof
access_log_path: /tmp/admin_access.log
address:
socket_address:
address: 0.0.0.0
port_value: 9901
static_resources:
listeners:
- name: listener_0
address:
socket_address: { address: 0.0.0.0, port_value: 80 }
filter_chains:
- filters:
- name: envoy.filters.network.http_connection_manager
typed_config:
"@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager
stat_prefix: ingress_http
codec_type: AUTO
route_config:
name: local_route
virtual_hosts:
- name: web_service_01
domains: ["*"]
routes:
- match: { prefix: "/" }
route: { cluster: webcluster }
http_filters:
- name: envoy.filters.http.router
clusters:
- name: webcluster
connect_timeout: 0.25s
type: EDS
lb_policy: ROUND_ROBIN
eds_cluster_config:
service_name: webcluster
eds_config:
path: '/etc/envoy/eds.conf.d/eds.yaml'
#eds.yaml
[root@xksmaster1 eds.conf.d]# cat eds.yaml
resources:
- "@type": type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment
cluster_name: webcluster
endpoints:
- lb_endpoints:
- endpoint:
address:
socket_address:
address: 172.31.11.11
port_value: 80
#eds.yaml.v2
[root@xksmaster1 eds.conf.d]# cat eds.yaml.v2
version_info: '2'
resources:
- "@type": type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment
cluster_name: webcluster
endpoints:
- lb_endpoints:
- endpoint:
address:
socket_address:
address: 172.31.11.11
port_value: 80
- endpoint:
address:
socket_address:
address: 172.31.11.12
port_value: 80
#envoy-sidecar-proxy.yaml 以sidercar进行注入到容器中 和业务容器共享同一个127.0.0.1网络
[root@xksmaster1 eds-filesystem]# cat envoy-sidecar-proxy.yaml
admin:
profile_path: /tmp/envoy.prof
access_log_path: /tmp/admin_access.log
address:
socket_address:
address: 0.0.0.0
port_value: 9901
static_resources:
listeners:
- name: listener_0
address:
socket_address: { address: 0.0.0.0, port_value: 80 }
filter_chains:
- filters:
- name: envoy.filters.network.http_connection_manager
typed_config:
"@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager
stat_prefix: ingress_http
codec_type: AUTO
route_config:
name: local_route
virtual_hosts:
- name: local_service
domains: ["*"]
routes:
- match: { prefix: "/" }
route: { cluster: local_cluster }
http_filters:
- name: envoy.filters.http.router
clusters:
- name: local_cluster
connect_timeout: 0.25s
type: STATIC
lb_policy: ROUND_ROBIN
load_assignment:
cluster_name: local_cluster
endpoints:
- lb_endpoints:
- endpoint:
address:
socket_address: { address: 127.0.0.1, port_value: 8080 }
#测试
# docker-compose up
## 此时可以看到默认情况下,cluster中只包含了172.31.11.11:80
# curl 172.31.11.2:9901/clusters
webcluster::observability_name::webcluster
webcluster::default_priority::max_connections::1024
webcluster::default_priority::max_pending_requests::1024
webcluster::default_priority::max_requests::1024
webcluster::default_priority::max_retries::3
webcluster::high_priority::max_connections::1024
webcluster::high_priority::max_pending_requests::1024
webcluster::high_priority::max_requests::1024
webcluster::high_priority::max_retries::3
webcluster::added_via_api::false
webcluster::172.31.11.11:80::cx_active::0
webcluster::172.31.11.11:80::cx_connect_fail::0
webcluster::172.31.11.11:80::cx_total::0
webcluster::172.31.11.11:80::rq_active::0
webcluster::172.31.11.11:80::rq_error::0
webcluster::172.31.11.11:80::rq_success::0
webcluster::172.31.11.11:80::rq_timeout::0
webcluster::172.31.11.11:80::rq_total::0
webcluster::172.31.11.11:80::hostname::
webcluster::172.31.11.11:80::health_flags::healthy
webcluster::172.31.11.11:80::weight::1
webcluster::172.31.11.11:80::region::
webcluster::172.31.11.11:80::zone::
webcluster::172.31.11.11:80::sub_zone::
webcluster::172.31.11.11:80::canary::false
webcluster::172.31.11.11:80::priority::0
webcluster::172.31.11.11:80::success_rate::-1.0
webcluster::172.31.11.11:80::local_origin_success_rate::-1.0
## 进入容器
# docker exec -it c73086aa69f7 sh
/ # cd /etc/envoy/eds.conf.d
/etc/envoy/eds.conf.d # cat eds.yaml
resources:
- "@type": type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment
cluster_name: webcluster
endpoints:
- lb_endpoints:
- endpoint:
address:
socket_address:
address: 172.31.11.11
port_value: 80
## 给eds.yaml追加配置后
/etc/envoy/eds.conf.d # cat eds.yaml
version_info: '2'
resources:
- "@type": type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment
cluster_name: webcluster
endpoints:
- lb_endpoints:
- endpoint:
address:
socket_address:
address: 172.31.11.11
port_value: 80
- endpoint:
address:
socket_address:
address: 172.31.11.12
port_value: 80
## 强行改名后触发配置生效
/etc/envoy/eds.conf.d # mv eds.yaml tmp && mv tmp eds.yaml
## 此时在docker容器之外可以看到,集群下除了172.31.11.11还有172.31.11.12
# curl 172.31.11.2:9901/clusters
webcluster::observability_name::webcluster
webcluster::default_priority::max_connections::1024
webcluster::default_priority::max_pending_requests::1024
webcluster::default_priority::max_requests::1024
webcluster::default_priority::max_retries::3
webcluster::high_priority::max_connections::1024
webcluster::high_priority::max_pending_requests::1024
webcluster::high_priority::max_requests::1024
webcluster::high_priority::max_retries::3
webcluster::added_via_api::false
webcluster::172.31.11.11:80::cx_active::0
webcluster::172.31.11.11:80::cx_connect_fail::0
webcluster::172.31.11.11:80::cx_total::0
webcluster::172.31.11.11:80::rq_active::0
webcluster::172.31.11.11:80::rq_error::0
webcluster::172.31.11.11:80::rq_success::0
webcluster::172.31.11.11:80::rq_timeout::0
webcluster::172.31.11.11:80::rq_total::0
webcluster::172.31.11.11:80::hostname::
webcluster::172.31.11.11:80::health_flags::healthy
webcluster::172.31.11.11:80::weight::1
webcluster::172.31.11.11:80::region::
webcluster::172.31.11.11:80::zone::
webcluster::172.31.11.11:80::sub_zone::
webcluster::172.31.11.11:80::canary::false
webcluster::172.31.11.11:80::priority::0
webcluster::172.31.11.11:80::success_rate::-1.0
webcluster::172.31.11.11:80::local_origin_success_rate::-1.0
webcluster::172.31.11.12:80::cx_active::0
webcluster::172.31.11.12:80::cx_connect_fail::0
webcluster::172.31.11.12:80::cx_total::0
webcluster::172.31.11.12:80::rq_active::0
webcluster::172.31.11.12:80::rq_error::0
webcluster::172.31.11.12:80::rq_success::0
webcluster::172.31.11.12:80::rq_timeout::0
webcluster::172.31.11.12:80::rq_total::0
webcluster::172.31.11.12:80::hostname::
webcluster::172.31.11.12:80::health_flags::healthy
webcluster::172.31.11.12:80::weight::1
webcluster::172.31.11.12:80::region::
webcluster::172.31.11.12:80::zone::
webcluster::172.31.11.12:80::sub_zone::
webcluster::172.31.11.12:80::canary::false
webcluster::172.31.11.12:80::priority::0
webcluster::172.31.11.12:80::success_rate::-1.0
webcluster::172.31.11.12:80::local_origin_success_rate::-1.0
## 尝试请求listener,会分别向2个ep调度请求
root@k8s-node-1:~# curl 172.31.11.2
iKubernetes demoapp v1.0 !! ClientIP: 127.0.0.1, ServerName: webserver01, ServerIP: 172.31.11.11!
root@k8s-node-1:~# curl 172.31.11.2
iKubernetes demoapp v1.0 !! ClientIP: 127.0.0.1, ServerName: webserver01, ServerIP: 172.31.11.11!
root@k8s-node-1:~# curl 172.31.11.2
iKubernetes demoapp v1.0 !! ClientIP: 127.0.0.1, ServerName: webserver02, ServerIP: 172.31.11.12!
root@k8s-node-1:~# curl 172.31.11.2
iKubernetes demoapp v1.0 !! ClientIP: 127.0.0.1, ServerName: webserver01, ServerIP: 172.31.11.11!
## 当webserver1出现故障时,对文件进行修改移除故障的endpoint
/etc/envoy/eds.conf.d # cat eds.yaml
version_info: '2'
resources:
- "@type": type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment
cluster_name: webcluster
endpoints:
- lb_endpoints:
- endpoint:
address:
socket_address:
address: 172.31.11.12
port_value: 80
## 刷新服务
/etc/envoy/eds.conf.d # mv eds.yaml bak && mv bak eds.yaml
## 此时查看clusters的情况可以看到172.31.11.11已经被移除
# curl 172.31.11.2:9901/clusters
webcluster::observability_name::webcluster
webcluster::default_priority::max_connections::1024
webcluster::default_priority::max_pending_requests::1024
webcluster::default_priority::max_requests::1024
webcluster::default_priority::max_retries::3
webcluster::high_priority::max_connections::1024
webcluster::high_priority::max_pending_requests::1024
webcluster::high_priority::max_requests::1024
webcluster::high_priority::max_retries::3
webcluster::added_via_api::false
webcluster::172.31.11.12:80::cx_active::0
webcluster::172.31.11.12:80::cx_connect_fail::0
webcluster::172.31.11.12:80::cx_total::0
webcluster::172.31.11.12:80::rq_active::0
webcluster::172.31.11.12:80::rq_error::0
webcluster::172.31.11.12:80::rq_success::0
webcluster::172.31.11.12:80::rq_timeout::0
webcluster::172.31.11.12:80::rq_total::0
webcluster::172.31.11.12:80::hostname::
webcluster::172.31.11.12:80::health_flags::healthy
webcluster::172.31.11.12:80::weight::1
webcluster::172.31.11.12:80::region::
webcluster::172.31.11.12:80::zone::
webcluster::172.31.11.12:80::sub_zone::
webcluster::172.31.11.12:80::canary::false
webcluster::172.31.11.12:80::priority::0
webcluster::172.31.11.12:80::success_rate::-1.0
webcluster::172.31.11.12:80::local_origin_success_rate::-1.0
## 访问集群只会被调度到172.31.11.12上了
# curl 172.31.11.2
iKubernetes demoapp v1.0 !! ClientIP: 127.0.0.1, ServerName: webserver02, ServerIP: 172.31.11.12!
# curl 172.31.11.2
iKubernetes demoapp v1.0 !! ClientIP: 127.0.0.1, ServerName: webserver02, ServerIP: 172.31.11.12!
# curl 172.31.11.2
iKubernetes demoapp v1.0 !! ClientIP: 127.0.0.1, ServerName: webserver02, ServerIP: 172.31.11.12!
# curl 172.31.11.2
iKubernetes demoapp v1.0 !! ClientIP: 127.0.0.1, ServerName: webserver02, ServerIP: 172.31.11.12!
二、基于文件系统订阅(LDS、CDS)
dynamic_resources: {
lds_config:
配置源(三选一):
path: 基于指定路径上的文件发现
api_config_source:基于MS单独进行发现
特殊依赖:事先定义好MS组成的集群,该集群配置一定是静态配置的,而且为了安全,一般应该使用tls协议传输配置
ads:基于MS发现所有类型的动态配置
cds_config: {}
}


部署服务实验操作
#EDS Filesystem demo
环境说明
五个Service:
envoy:Front Proxy,地址为172.31.12.2
webserver01:第一个后端服务
webserver01-sidecar:第一个后端服务的Sidecar Proxy,地址为172.31.12.11
webserver02:第二个后端服务
webserver02-sidecar:第二个后端服务的Sidecar Proxy,地址为172.31.12.12
#运行和测试
#创建
docker-compose up
#测试
# 查看Cluster的信息
curl 172.31.12.2:9901/clusters
# 查看Listener的信息
curl 172.31.12.2:9901/listeners
# 接入front proxy envoy容器的交互式接口
docker exec -it eds-filesystem_envoy_1 /bin/sh
cd /etc/envoy/conf.d/
# 修改lds.yaml或cds.yaml的内容满足需要后,
# 运行类似下面的命令强制激活文件更改,以便基于inode监视的工作机制可被触发
mv lds.yaml temp && mv temp lds.yaml
# 再次验证相关的配置信息
curl 172.31.12.2:9901/clusters
停止后清理
docker-compose down
#docker文件
cat docker-compose.yaml
version: '3.3'
services:
envoy:
image: envoyproxy/envoy-alpine:v1.21-latest
environment:
- ENVOY_UID=0
- ENVOY_GID=0
volumes:
- ./front-envoy.yaml:/etc/envoy/envoy.yaml
- ./conf.d/:/etc/envoy/conf.d/
networks:
envoymesh:
ipv4_address: 172.31.12.2
aliases:
- front-proxy
depends_on:
- webserver01
- webserver01-app
- webserver02
- webserver02-app
webserver01:
image: envoyproxy/envoy-alpine:v1.21-latest
environment:
- ENVOY_UID=0
- ENVOY_GID=0
volumes:
- ./envoy-sidecar-proxy.yaml:/etc/envoy/envoy.yaml
hostname: webserver01
networks:
envoymesh:
ipv4_address: 172.31.12.11
aliases:
- webserver01-sidecar
webserver01-app:
image: ikubernetes/demoapp:v1.0
environment:
- PORT=8080
- HOST=127.0.0.1
network_mode: "service:webserver01"
depends_on:
- webserver01
webserver02:
image: envoyproxy/envoy-alpine:v1.21-latest
environment:
- ENVOY_UID=0
- ENVOY_GID=0
volumes:
- ./envoy-sidecar-proxy.yaml:/etc/envoy/envoy.yaml
hostname: webserver02
networks:
envoymesh:
ipv4_address: 172.31.12.12
aliases:
- webserver02-sidecar
webserver02-app:
image: ikubernetes/demoapp:v1.0
environment:
- PORT=8080
- HOST=127.0.0.1
network_mode: "service:webserver02"
depends_on:
- webserver02
networks:
envoymesh:
driver: bridge
ipam:
config:
- subnet: 172.31.12.0/24
#front-envoy.yaml 配置了node 以及dynamic_resources:lds、cds
#这里就相对之前的配置简单很多,除了node的id标记和用于查询admin之外,其他配置只有5行.
#通过发现L(listener)发现下面的VirtualHost,Route配置 、通过Route调用Client 、通过发现Client,发现下面的Endpoint配置 、需要指定L和C
cat front-envoy.yaml
node:
id: envoy_front_proxy
cluster: MageEdu_Cluster
admin:
profile_path: /tmp/envoy.prof
access_log_path: /tmp/admin_access.log
address:
socket_address:
address: 0.0.0.0
port_value: 9901
dynamic_resources:
lds_config:
path: /etc/envoy/conf.d/lds.yaml
cds_config:
path: /etc/envoy/conf.d/cds.yaml
#envoy-sidecar-proxy.yaml
admin:
profile_path: /tmp/envoy.prof
access_log_path: /tmp/admin_access.log
address:
socket_address:
address: 0.0.0.0
port_value: 9901
static_resources:
listeners:
- name: listener_0
address:
socket_address: { address: 0.0.0.0, port_value: 80 }
filter_chains:
- filters:
- name: envoy.filters.network.http_connection_manager
typed_config:
"@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager
stat_prefix: ingress_http
codec_type: AUTO
route_config:
name: local_route
virtual_hosts:
- name: local_service
domains: ["*"]
routes:
- match: { prefix: "/" }
route: { cluster: local_cluster }
http_filters:
- name: envoy.filters.http.router
clusters:
- name: local_cluster
connect_timeout: 0.25s
type: STATIC
lb_policy: ROUND_ROBIN
load_assignment:
cluster_name: local_cluster
endpoints:
- lb_endpoints:
- endpoint:
address:
socket_address: { address: 127.0.0.1, port_value: 8080 }
#cond.d/lds.yaml 这部分的设定和之前静态的配置内容几乎相同,将后端cluster转发到webcluster
resources:
- "@type": type.googleapis.com/envoy.config.listener.v3.Listener
name: listener_http
address:
socket_address: { address: 0.0.0.0, port_value: 80 }
filter_chains:
- filters:
name: envoy.http_connection_manager
typed_config:
"@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager
stat_prefix: ingress_http
route_config:
name: local_route
virtual_hosts:
- name: local_service
domains: ["*"]
routes:
- match:
prefix: "/"
route:
cluster: webcluster
http_filters:
- name: envoy.filters.http.router
#cond.d/cds.yaml 这里type设置为了静态.也可以将type设置成EDS,在使用eds_config去做endpoint的动态发现
resources:
- "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster
name: webcluster
connect_timeout: 1s
type: STRICT_DNS
load_assignment:
cluster_name: webcluster
endpoints:
- lb_endpoints:
- endpoint:
address:
socket_address:
address: webserver01
port_value: 80
- endpoint:
address:
socket_address:
address: webserver02
port_value: 80
#运行测试
# docker-compose up
## 可以看到在12.11和12.12之间轮询
# curl 172.31.12.2
iKubernetes demoapp v1.0 !! ClientIP: 127.0.0.1, ServerName: webserver01, ServerIP: 172.31.12.11!
# curl 172.31.12.2
iKubernetes demoapp v1.0 !! ClientIP: 127.0.0.1, ServerName: webserver02, ServerIP: 172.31.12.12!
# curl 172.31.12.2
iKubernetes demoapp v1.0 !! ClientIP: 127.0.0.1, ServerName: webserver01, ServerIP: 172.31.12.11!
# curl 172.31.12.2
iKubernetes demoapp v1.0 !! ClientIP: 127.0.0.1, ServerName: webserver02, ServerIP: 172.31.12.12!
# curl 172.31.12.2
iKubernetes demoapp v1.0 !! ClientIP: 127.0.0.1, ServerName: webserver01, ServerIP: 172.31.12.11!
## cluster中也可以看到2个endpoint
# curl 172.31.12.2:9901/clusters
webcluster::observability_name::webcluster
webcluster::default_priority::max_connections::1024
webcluster::default_priority::max_pending_requests::1024
webcluster::default_priority::max_requests::1024
webcluster::default_priority::max_retries::3
webcluster::high_priority::max_connections::1024
webcluster::high_priority::max_pending_requests::1024
webcluster::high_priority::max_requests::1024
webcluster::high_priority::max_retries::3
webcluster::added_via_api::true
webcluster::172.31.12.11:80::cx_active::2
webcluster::172.31.12.11:80::cx_connect_fail::0
webcluster::172.31.12.11:80::cx_total::2
webcluster::172.31.12.11:80::rq_active::0
webcluster::172.31.12.11:80::rq_error::0
webcluster::172.31.12.11:80::rq_success::4
webcluster::172.31.12.11:80::rq_timeout::0
webcluster::172.31.12.11:80::rq_total::4
webcluster::172.31.12.11:80::hostname::webserver01
webcluster::172.31.12.11:80::health_flags::healthy
webcluster::172.31.12.11:80::weight::1
webcluster::172.31.12.11:80::region::
webcluster::172.31.12.11:80::zone::
webcluster::172.31.12.11:80::sub_zone::
webcluster::172.31.12.11:80::canary::false
webcluster::172.31.12.11:80::priority::0
webcluster::172.31.12.11:80::success_rate::-1.0
webcluster::172.31.12.11:80::local_origin_success_rate::-1.0
webcluster::172.31.12.12:80::cx_active::1
webcluster::172.31.12.12:80::cx_connect_fail::0
webcluster::172.31.12.12:80::cx_total::1
webcluster::172.31.12.12:80::rq_active::0
webcluster::172.31.12.12:80::rq_error::0
webcluster::172.31.12.12:80::rq_success::2
webcluster::172.31.12.12:80::rq_timeout::0
webcluster::172.31.12.12:80::rq_total::2
webcluster::172.31.12.12:80::hostname::webserver02
webcluster::172.31.12.12:80::health_flags::healthy
webcluster::172.31.12.12:80::weight::1
webcluster::172.31.12.12:80::region::
webcluster::172.31.12.12:80::zone::
webcluster::172.31.12.12:80::sub_zone::
webcluster::172.31.12.12:80::canary::false
webcluster::172.31.12.12:80::priority::0
webcluster::172.31.12.12:80::success_rate::-1.0
webcluster::172.31.12.12:80::local_origin_success_rate::-1.0
# curl 172.31.12.2:9901/listeners
listener_http::0.0.0.0:80
#测试修改lds:将listener的监听由80改为8081,并使用mv触发变更生效
resources:
- "@type": type.googleapis.com/envoy.config.listener.v3.Listener
name: listener_http
address:
socket_address: { address: 0.0.0.0, port_value: 8081 }
filter_chains:
- filters:
name: envoy.http_connection_manager
typed_config:
"@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager
stat_prefix: ingress_http
route_config:
name: local_route
virtual_hosts:
- name: local_service
domains: ["*"]
routes:
- match:
prefix: "/"
route:
cluster: webcluster
http_filters:
- name: envoy.filters.http.router
/etc/envoy/conf.d # mv lds.yaml tmp && mv tmp lds.yaml
#可以看到监听 已经变为了8081,尝试访问8081
root@k8s-node-1:~# curl 172.31.12.2:9901/listeners
listener_http::0.0.0.0:8081
root@k8s-node-1:~# curl 172.31.12.2:8081
iKubernetes demoapp v1.0 !! ClientIP: 127.0.0.1, ServerName: webserver01, ServerIP: 172.31.12.11!
root@k8s-node-1:~# curl 172.31.12.2:8081
iKubernetes demoapp v1.0 !! ClientIP: 127.0.0.1, ServerName: webserver02, ServerIP: 172.31.12.12!
root@k8s-node-1:~# curl 172.31.12.2:8081
iKubernetes demoapp v1.0 !! ClientIP: 127.0.0.1, ServerName: webserver02, ServerIP: 172.31.12.12!
root@k8s-node-1:~# curl 172.31.12.2:8081
iKubernetes demoapp v1.0 !! ClientIP: 127.0.0.1, ServerName: webserver01, ServerIP: 172.31.12.11!
#测试修改cds:在cds中删除12服务部分
resources:
- "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster
name: webcluster
connect_timeout: 1s
type: STRICT_DNS
load_assignment:
cluster_name: webcluster
endpoints:
- lb_endpoints:
- endpoint:
address:
socket_address:
address: webserver01
port_value: 80
## 触发配置生效
/etc/envoy/conf.d # mv cds.yaml tmp&& mv tmp cds.yaml
#此时集群中endpoint只有11了
root@k8s-node-1:~# curl 172.31.12.2:9901/clusters
webcluster::observability_name::webcluster
webcluster::default_priority::max_connections::1024
webcluster::default_priority::max_pending_requests::1024
webcluster::default_priority::max_requests::1024
webcluster::default_priority::max_retries::3
webcluster::high_priority::max_connections::1024
webcluster::high_priority::max_pending_requests::1024
webcluster::high_priority::max_requests::1024
webcluster::high_priority::max_retries::3
webcluster::added_via_api::true
webcluster::172.31.12.11:80::cx_active::0
webcluster::172.31.12.11:80::cx_connect_fail::0
webcluster::172.31.12.11:80::cx_total::0
webcluster::172.31.12.11:80::rq_active::0
webcluster::172.31.12.11:80::rq_error::0
webcluster::172.31.12.11:80::rq_success::0
webcluster::172.31.12.11:80::rq_timeout::0
webcluster::172.31.12.11:80::rq_total::0
webcluster::172.31.12.11:80::hostname::webserver01
webcluster::172.31.12.11:80::health_flags::healthy
webcluster::172.31.12.11:80::weight::1
webcluster::172.31.12.11:80::region::
webcluster::172.31.12.11:80::zone::
webcluster::172.31.12.11:80::sub_zone::
webcluster::172.31.12.11:80::canary::false
webcluster::172.31.12.11:80::priority::0
webcluster::172.31.12.11:80::success_rate::-1.0
webcluster::172.31.12.11:80::local_origin_success_rate::-1.0
root@k8s-node-1:~# curl 172.31.12.2:8081
iKubernetes demoapp v1.0 !! ClientIP: 127.0.0.1, ServerName: webserver01, ServerIP: 172.31.12.11!
root@k8s-node-1:~# curl 172.31.12.2:8081
iKubernetes demoapp v1.0 !! ClientIP: 127.0.0.1, ServerName: webserver01, ServerIP: 172.31.12.11!
root@k8s-node-1:~# curl 172.31.12.2:8081
iKubernetes demoapp v1.0 !! ClientIP: 127.0.0.1, ServerName: webserver01, ServerIP: 172.31.12.11!
root@k8s-node-1:~# curl 172.31.12.2:8081
iKubernetes demoapp v1.0 !! ClientIP: 127.0.0.1, ServerName: webserver01, ServerIP: 172.31.12.11!

浙公网安备 33010602011771号