Kubernetes MetaILB + 部署Ingress Controller + Ingress TLS发布【2024-02-01测试成功】

一、部署MetaILB

#参考之前文档
https://www.cnblogs.com/birkhoffxia/articles/17949510

二、部署Ingress Controller

#https://github.com/kubernetes/ingress-nginx

#K8S Server :"v1.25.0"

#https://kubernetes.github.io/ingress-nginx/deploy/

#部署1.9.5版本 Ingress-controller
#https://github.com/kubernetes/ingress-nginx/blob/controller-v1.9.5/deploy/static/provider/cloud/deploy.yaml
下载编辑内容registry.k8s.io 改为 registry.lank8s.cn
#下载镜像
crictl pull registry.lank8s.cn/ingress-nginx/controller:v1.9.5@sha256:b3aba22b1da80e7acfc52b115cae1d4c687172cbf2b742d5b502419c25ff340e
crictl pull registry.lank8s.cn/ingress-nginx/kube-webhook-certgen:v20231011-8b53cabe0@sha256:a7943503b45d552785aa3b5e457f169a5661fb94d82b8a3373bcd9ebaf9aac80

[root@ca-k8s-master01 Ingress]# kubectl apply -f ingressdeploy.yaml
namespace/ingress-nginx created
serviceaccount/ingress-nginx created
serviceaccount/ingress-nginx-admission created
role.rbac.authorization.k8s.io/ingress-nginx created
role.rbac.authorization.k8s.io/ingress-nginx-admission created
clusterrole.rbac.authorization.k8s.io/ingress-nginx created
clusterrole.rbac.authorization.k8s.io/ingress-nginx-admission created
rolebinding.rbac.authorization.k8s.io/ingress-nginx created
rolebinding.rbac.authorization.k8s.io/ingress-nginx-admission created
clusterrolebinding.rbac.authorization.k8s.io/ingress-nginx created
clusterrolebinding.rbac.authorization.k8s.io/ingress-nginx-admission created
configmap/ingress-nginx-controller created
service/ingress-nginx-controller created
service/ingress-nginx-controller-admission created
deployment.apps/ingress-nginx-controller created
job.batch/ingress-nginx-admission-create created
job.batch/ingress-nginx-admission-patch created
ingressclass.networking.k8s.io/nginx created
validatingwebhookconfiguration.admissionregistration.k8s.io/ingress-nginx-admission created

#创建一个 ingressclass 类型为 nginx
[root@ca-k8s-master01 Ingress]# kubectl get ingressclass
NAME    CONTROLLER             PARAMETERS   AGE
nginx   k8s.io/ingress-nginx   <none>       52m

#创建三个pod 
[root@ca-k8s-master01 Ingress]# kubectl get pods -n ingress-nginx
NAME                                        READY   STATUS      RESTARTS   AGE
ingress-nginx-admission-create-kzxxt        0/1     Completed   0          10s
ingress-nginx-admission-patch-sk9jk         0/1     Completed   1          10s
ingress-nginx-controller-6678cdb9d8-gzrlx   0/1     Running     0          10s

#创建了一个svc为LoadBalance 类型 并且分配了 地址192.168.40.53
[root@ca-k8s-master01 Ingress]# kubectl get svc -n ingress-nginx
NAME                                 TYPE           CLUSTER-IP       EXTERNAL-IP     PORT(S)                      AGE
ingress-nginx-controller             LoadBalancer   10.108.61.66     192.168.40.53   80:32467/TCP,443:32073/TCP   56s
ingress-nginx-controller-admission   ClusterIP      10.103.191.223   <none>          443/TCP                      56s

#  externalTrafficPolicy: Local 查看svc的 流量策略为 Local
#  流量接进来有可能没办法提供正常对外提供服务的 因为只是负责从那个节点进来调度给当前节点本地隶属于该属于Service的Pod上
[root@ca-k8s-master01 Ingress]# kubectl get svc ingress-nginx-controller -n ingress-nginx -o yaml
  externalTrafficPolicy: Local
#如果以后通过NodePort类型接入进来的话 推荐改为 externalTrafficPolicy: Cluster

#访问 EXTERNAL-IP:192.168.40.53

三、Ingress类型

1、Simple fanout

准备环境:2个Service (demoappv10 和 demoappv11)

#创建demoappv10应用
[root@ca-k8s-master01 Simple_fanout]# kubectl create deployment demoappv10 --image=ikubernetes/demoapp:v1.0 --replicas=3 -o yaml --dry-run=client > demoappv10.yaml
[root@ca-k8s-master01 Simple_fanout]# cat demoappv10.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  creationTimestamp: null
  labels:
    app: demoappv10
  name: demoappv10
spec:
  replicas: 3
  selector:
    matchLabels:
      app: demoappv10
  strategy: {}
  template:
    metadata:
      creationTimestamp: null
      labels:
        app: demoappv10
    spec:
      containers:
      - image: ikubernetes/demoapp:v1.0
        name: demoapp
        resources: {}
status: {}

[root@ca-k8s-master01 Simple_fanout]# kubectl apply -f demoappv10.yaml
[root@ca-k8s-master01 Simple_fanout]# kubectl get pods -o wide | grep demoapp
demoappv10-6d8d6c58c9-kj4v2                             1/1     Running       0                4m35s   10.244.36.28     ca-k8s-node01   <none>           <none>
demoappv10-6d8d6c58c9-mtfmg                             1/1     Running       0                4m35s   10.244.132.200   ca-k8s-node02   <none>           <none>
demoappv10-6d8d6c58c9-vrs2b                             1/1     Running       0                4m35s   10.244.132.216   ca-k8s-node02   <none>           <none>

#创建demoappv10服务
[root@ca-k8s-master01 Simple_fanout]# kubectl create service clusterip demoappv10 --tcp=80:80 --dry-run=client -o yaml
apiVersion: v1
kind: Service
metadata:
  creationTimestamp: null
  labels:
    app: demoappv10
  name: demoappv10
spec:
  ports:
  - name: 80-80
    port: 80
    protocol: TCP
    targetPort: 80
  selector:
    app: demoappv10
  type: ClusterIP
status:
  loadBalancer: {}
[root@ca-k8s-master01 Simple_fanout]# kubectl create service clusterip demoappv10 --tcp=80:80 --dry-run=client -o yaml > demoappv10-service.yaml
[root@ca-k8s-master01 Simple_fanout]# kubectl apply -f demoappv10-service.yaml
[root@ca-k8s-master01 Simple_fanout]# kubectl get svc | grep demoappv10
demoappv10                                   ClusterIP      10.102.101.226   <none>                                                     80/TCP                                               12s
[root@ca-k8s-master01 Simple_fanout]# curl 10.102.101.226
iKubernetes demoapp v1.0 !! ClientIP: 10.244.237.0, ServerName: demoappv10-6d8d6c58c9-kj4v2, ServerIP: 10.244.36.28!
[root@ca-k8s-master01 Simple_fanout]# curl 10.102.101.226
iKubernetes demoapp v1.0 !! ClientIP: 10.244.237.0, ServerName: demoappv10-6d8d6c58c9-vrs2b, ServerIP: 10.244.132.216!
[root@ca-k8s-master01 Simple_fanout]# curl 10.102.101.226
iKubernetes demoapp v1.0 !! ClientIP: 10.244.237.0, ServerName: demoappv10-6d8d6c58c9-mtfmg, ServerIP: 10.244.132.200!

#创建demoappv11 服务
[root@ca-k8s-master01 Simple_fanout]# kubectl create deployment demoappv11 --image=ikubernetes/demoapp:v1.1 --replicas=2 -o yaml --dry-run=client > demoappv11.yaml
[root@ca-k8s-master01 Simple_fanout]# cat demoappv11.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  creationTimestamp: null
  labels:
    app: demoappv11
  name: demoappv11
spec:
  replicas: 2
  selector:
    matchLabels:
      app: demoappv11
  strategy: {}
  template:
    metadata:
      creationTimestamp: null
      labels:
        app: demoappv11
    spec:
      containers:
      - image: ikubernetes/demoapp:v1.1
        name: demoapp
        resources: {}
status: {}
[root@ca-k8s-master01 Simple_fanout]# kubectl apply -f demoappv11.yaml
deployment.apps/demoappv11 created
[root@ca-k8s-master01 Simple_fanout]# kubectl get pods -o wide | grep demoappv11
demoappv11-78cb5f7-9tblz                                1/1     Running            0               12s    10.244.36.60     ca-k8s-node01   <none>           <none>
demoappv11-78cb5f7-bwpx9                                1/1     Running            0               12s    10.244.132.233   ca-k8s-node02   <none>           <none>

#创建demoappv11服务
[root@ca-k8s-master01 Simple_fanout]# kubectl create service clusterip demoappv11 --tcp=80:80 --dry-run=client -o yaml > demoappv11-service.yaml
[root@ca-k8s-master01 Simple_fanout]# cat demoappv11-service.yaml
apiVersion: v1
kind: Service
metadata:
  creationTimestamp: null
  labels:
    app: demoappv11
  name: demoappv11
spec:
  ports:
  - name: 80-80
    port: 80
    protocol: TCP
    targetPort: 80
  selector:
    app: demoappv11
  type: ClusterIP
status:
  loadBalancer: {}
[root@ca-k8s-master01 Simple_fanout]# kubectl apply -f demoappv11-service.yaml
service/demoappv11 created
[root@ca-k8s-master01 Simple_fanout]# kubectl get svc | grep demoappv11
demoappv11                                   ClusterIP      10.98.11.11      <none>                                                     80/TCP                                               7s
[root@ca-k8s-master01 Simple_fanout]# curl 10.98.11.11
iKubernetes demoapp v1.1 !! ClientIP: 10.244.237.0, ServerName: demoappv11-78cb5f7-9tblz, ServerIP: 10.244.36.60!
[root@ca-k8s-master01 Simple_fanout]# curl 10.98.11.11
iKubernetes demoapp v1.1 !! ClientIP: 10.244.237.0, ServerName: demoappv11-78cb5f7-bwpx9, ServerIP: 10.244.132.233!

创建Ingress资源

#基于URI方式代理不同应用的请求时,后端应用的URI若与代理时使用的URI不同,则需要启用URL Rewrite完成URI重写
#Ingress-Nginx支持使用“annotation nginx.ingress.kubernetes.io/rewrite-target”注解进行

#示例1:对于发往dzzz.sheca.com的请求
#将“/v10”代理至service/demoappv10
#将“/v11”代理至service/demoappv11
[root@ca-k8s-master01 Simple_fanout]# 
kubectl create ingress dzzz \
--rule="dzzz.sheca.com/v10=demoappv10:80" \
--rule="dzzz.sheca.com/v11=demoappv11:80" \
--class=nginx \
--annotation nginx.ingress.kubernetes.io/rewrite-target="/" \
--dry-run=client -o yaml > dzzz-ingress.yaml

[root@ca-k8s-master01 Simple_fanout]# cat dzzz-ingress.yaml
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
  annotations:
    nginx.ingress.kubernetes.io/rewrite-target: /
  creationTimestamp: null
  name: dzzz
spec:
  ingressClassName: nginx
  rules:
  - host: dzzz.sheca.com
    http:
      paths:
      - backend:
          service:
            name: demoappv10
            port:
              number: 80
        path: /v10
        pathType: Exact
      - backend:
          service:
            name: demoappv11
            port:
              number: 80
        path: /v11
        pathType: Exact
status:
  loadBalancer: {}

[root@ca-k8s-master01 Simple_fanout]# kubectl apply -f dzzz-ingress.yaml
ingress.networking.k8s.io/dzzz created
[root@ca-k8s-master01 Simple_fanout]# kubectl get ingress
NAME                CLASS   HOSTS            ADDRESS         PORTS     AGE
dzzz                nginx   dzzz.sheca.com                   80        6s
[root@ca-k8s-master01 Simple_fanout]# kubectl get pods -n ingress-nginx
NAME                                        READY   STATUS    RESTARTS   AGE
ingress-nginx-controller-6678cdb9d8-xnsg6   1/1     Running   0          12h
[root@ca-k8s-master01 Simple_fanout]# kubectl describe ingress dzzz
Name:             dzzz
Labels:           <none>
Namespace:        default
Address:          192.168.40.53
Ingress Class:    nginx
Default backend:  <default>
Rules:
  Host            Path  Backends
  ----            ----  --------
  dzzz.sheca.com
                  /v10   demoappv10:80 (10.244.132.200:80,10.244.132.216:80,10.244.36.28:80)
                  /v11   demoappv11:80 (10.244.132.233:80,10.244.36.60:80)
Annotations:      nginx.ingress.kubernetes.io/rewrite-target: /
Events:
  Type    Reason  Age                    From                      Message
  ----    ------  ----                   ----                      -------
  Normal  Sync    3m34s (x2 over 3m48s)  nginx-ingress-controller  Scheduled for sync
[root@ca-k8s-master01 Simple_fanout]# kubectl get svc -n ingress-nginx
NAME                                 TYPE           CLUSTER-IP       EXTERNAL-IP     PORT(S)                      AGE
ingress-nginx-controller             LoadBalancer   10.108.61.66     192.168.40.53   80:32467/TCP,443:32073/TCP   36h
ingress-nginx-controller-admission   ClusterIP      10.103.191.223   <none>          443/TCP                      36h
[root@ca-k8s-master01 Simple_fanout]# kubectl get ingress
NAME                CLASS   HOSTS            ADDRESS         PORTS     AGE
dzzz                nginx   dzzz.sheca.com   192.168.40.53   80        4m
#访问资源 配置hosts文件 192.168.40.53 dzzz.sheca.com

#示例2:功能同上 但使用URI的前缀匹配,而非精确匹,且其于正则表达式模式进行url rewrite
kubectl create ingress dzzz \
--rule="dzzz.sheca.com/v10(/|$)(.*)=demoappv10:80" \
--rule="dzzz.sheca.com/v11(/|$)(.*)=demoappv11:80" \
--class=nginx \
--annotation nginx.ingress.kubernetes.io/rewrite-target="/$2" \
--dry-run=client -o yaml > dzzz-ingress.yaml

#!默认pathType=Exact 或者Prefix 会报错 改为ImplementationSpecific
#[root@ca-k8s-master01 Simple_fanout]# kubectl apply -f dzzz-ingress.yaml
#Warning: path /v10(/|$)(.*) cannot be used with pathType Prefix
#Warning: path /v11(/|$)(.*) cannot be used with pathType Prefix
#参考官方文档:https://kubernetes.io/zh-cn/docs/concepts/services-networking/ingress/ 中 《路径类型》
[root@ca-k8s-master01 Simple_fanout]# cat dzzz-ingress.yaml
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
  annotations:
    nginx.ingress.kubernetes.io/rewrite-target: /
  creationTimestamp: null
  name: dzzz
spec:
  ingressClassName: nginx
  rules:
  - host: dzzz.sheca.com
    http:
      paths:
      - backend:
          service:
            name: demoappv10
            port:
              number: 80
        path: /v10(/|$)(.*)
        pathType: ImplementationSpecific
      - backend:
          service:
            name: demoappv11
            port:
              number: 80
        path: /v11(/|$)(.*)
        pathType: ImplementationSpecific
status:
  loadBalancer: {}

[root@ca-k8s-master01 Simple_fanout]# kubectl apply -f dzzz-ingress.yaml
ingress.networking.k8s.io/dzzz created

#正常访问

#nginx pod 内部配置
        ## start server dzzz.sheca.com
        server {
                server_name dzzz.sheca.com ;

                listen 80  ;
                listen [::]:80  ;
                listen 443  ssl http2 ;
                listen [::]:443  ssl http2 ;

                set $proxy_upstream_name "-";

                ssl_certificate_by_lua_block {
                        certificate.call()
                }

                location ~* "^/v11" {

                        set $namespace      "default";
                        set $ingress_name   "dzzz";
                        set $service_name   "demoappv11";
                        set $service_port   "80";
                        set $location_path  "/v11";
                        set $global_rate_limit_exceeding n;

                        rewrite_by_lua_block {
                                lua_ingress.rewrite({
                                        force_ssl_redirect = false,
                                        ssl_redirect = true,
                                        force_no_ssl_redirect = false,
                                        preserve_trailing_slash = false,
                                        use_port_in_redirects = false,
                                        global_throttle = { namespace = "", limit = 0, window_size = 0, key = { }, ignored_cidrs = { } },
                                })
                                balancer.rewrite()
                                plugins.run()
                        }

                        # be careful with `access_by_lua_block` and `satisfy any` directives as satisfy any
                        # will always succeed when there's `access_by_lua_block` that does not have any lua code doing `ngx.exit(ngx.DECLINED)`
                        # other authentication method such as basic auth or external auth useless - all requests will be allowed.
                        #access_by_lua_block {
                        #}

                        header_filter_by_lua_block {
                                lua_ingress.header()
                                plugins.run()
                        }

                        body_filter_by_lua_block {
                                plugins.run()
                        }

                        log_by_lua_block {
                                balancer.log()

                                monitor.call()

                                plugins.run()
                        }

                        port_in_redirect off;

                        set $balancer_ewma_score -1;
                        set $proxy_upstream_name "default-demoappv11-80";
                        set $proxy_host          $proxy_upstream_name;
                        set $pass_access_scheme  $scheme;

                        set $pass_server_port    $server_port;

                        set $best_http_host      $http_host;
                        set $pass_port           $pass_server_port;

                        set $proxy_alternative_upstream_name "";
                        client_max_body_size                    1m;

                        proxy_set_header Host                   $best_http_host;

                        # Pass the extracted client certificate to the backend

                        # Allow websocket connections
                        proxy_set_header                        Upgrade           $http_upgrade;

                        proxy_set_header                        Connection        $connection_upgrade;

                        proxy_set_header X-Request-ID           $req_id;
                        proxy_set_header X-Real-IP              $remote_addr;

                        proxy_set_header X-Forwarded-For        $remote_addr;

                        proxy_set_header X-Forwarded-Host       $best_http_host;
                        proxy_set_header X-Forwarded-Port       $pass_port;
                        proxy_set_header X-Forwarded-Proto      $pass_access_scheme;
                        proxy_set_header X-Forwarded-Scheme     $pass_access_scheme;

                        proxy_set_header X-Scheme               $pass_access_scheme;

                        # Pass the original X-Forwarded-For
                        proxy_set_header X-Original-Forwarded-For $http_x_forwarded_for;

                        # mitigate HTTPoxy Vulnerability
                        # https://www.nginx.com/blog/mitigating-the-httpoxy-vulnerability-with-nginx/
                        proxy_set_header Proxy                  "";

                        # Custom headers to proxied server

                        proxy_connect_timeout                   5s;
                        proxy_send_timeout                      60s;
                        proxy_read_timeout                      60s;

                        proxy_buffering                         off;
                        proxy_buffer_size                       4k;
                        proxy_buffers                           4 4k;

                        proxy_max_temp_file_size                1024m;

                        proxy_request_buffering                 on;
                        proxy_http_version                      1.1;

                        proxy_cookie_domain                     off;
                        proxy_cookie_path                       off;

                        # In case of errors try the next upstream server before returning an error
                        proxy_next_upstream                     error timeout;
                        proxy_next_upstream_timeout             0;
                        proxy_next_upstream_tries               3;

                        rewrite "(?i)/v11" / break;
                        proxy_pass http://upstream_balancer;

                        proxy_redirect                          off;

                }

                location ~* "^/v10" {

                        set $namespace      "default";
                        set $ingress_name   "dzzz";
                        set $service_name   "demoappv10";
                        set $service_port   "80";
                        set $location_path  "/v10";
                        set $global_rate_limit_exceeding n;

                        rewrite_by_lua_block {
                                lua_ingress.rewrite({
                                        force_ssl_redirect = false,
                                        ssl_redirect = true,
                                        force_no_ssl_redirect = false,
                                        ssl_redirect = true,
                                        force_no_ssl_redirect = false,
                                        preserve_trailing_slash = false,
                                        use_port_in_redirects = false,
                                        global_throttle = { namespace = "", limit = 0, window_size = 0, key = { }, ignored_cidrs = { } },
                                })
                                balancer.rewrite()
                                plugins.run()
                        }

                        # be careful with `access_by_lua_block` and `satisfy any` directives as satisfy any
                        # will always succeed when there's `access_by_lua_block` that does not have any lua code doing `ngx.exit(ngx.DECLINED)`
                        # other authentication method such as basic auth or external auth useless - all requests will be allowed.
                        #access_by_lua_block {
                        #}

                        header_filter_by_lua_block {
                                lua_ingress.header()
                                plugins.run()
                        }

                        body_filter_by_lua_block {
                                plugins.run()
                        }

                        log_by_lua_block {
                                balancer.log()

                                monitor.call()

                                plugins.run()
                        }

                        port_in_redirect off;

                        set $balancer_ewma_score -1;
                        set $proxy_upstream_name "default-demoappv10-80";
                        set $proxy_host          $proxy_upstream_name;
                        set $pass_access_scheme  $scheme;

                        set $pass_server_port    $server_port;

                        set $best_http_host      $http_host;
                        set $pass_port           $pass_server_port;

                        set $proxy_alternative_upstream_name "";

                        client_max_body_size                    1m;

                        proxy_set_header Host                   $best_http_host;

                        # Pass the extracted client certificate to the backend

                        # Allow websocket connections
                        proxy_set_header                        Upgrade           $http_upgrade;

                        proxy_set_header                        Connection        $connection_upgrade;

                        proxy_set_header X-Request-ID           $req_id;
                        proxy_set_header X-Real-IP              $remote_addr;

                        proxy_set_header X-Forwarded-For        $remote_addr;

                        proxy_set_header X-Forwarded-Host       $best_http_host;
                        proxy_set_header X-Forwarded-Port       $pass_port;
                        proxy_set_header X-Forwarded-Proto      $pass_access_scheme;
                        proxy_set_header X-Forwarded-Scheme     $pass_access_scheme;

                        proxy_set_header X-Scheme               $pass_access_scheme;

                        # Pass the original X-Forwarded-For
                        proxy_set_header X-Original-Forwarded-For $http_x_forwarded_for;

                        # mitigate HTTPoxy Vulnerability
                        # https://www.nginx.com/blog/mitigating-the-httpoxy-vulnerability-with-nginx/
                        proxy_set_header Proxy                  "";

                        # Custom headers to proxied server

                        proxy_connect_timeout                   5s;
                        proxy_send_timeout                      60s;
                        proxy_read_timeout                      60s;

                        proxy_buffering                         off;
                        proxy_buffer_size                       4k;
                        proxy_buffers                           4 4k;

                        proxy_max_temp_file_size                1024m;

                        proxy_request_buffering                 on;
                        proxy_http_version                      1.1;

                        proxy_cookie_domain                     off;
                        proxy_cookie_path                       off;

                        # In case of errors try the next upstream server before returning an error
                        proxy_next_upstream                     error timeout;
                        proxy_next_upstream_timeout             0;
                        proxy_next_upstream_tries               3;

                        rewrite "(?i)/v11" / break;
                        proxy_pass http://upstream_balancer;

                        proxy_redirect                          off;

                }

2、Name based virtual hosting

#基于FQDN名称代理不同应用的请求时,需要事先准备好多个域名,且确保对这些域名的解析能够到达IngessController 
#示例: 
#对dzzzv10.sheca.com的请求代理至service/demoappvl0
#对dzzzv11.sheca.com的请求代理至service/demoappvl1

#这里dzzzv10.sheca.com/* pathType=Prefix 
#如果dzzzv10.sheca.com/  pathType=Exact 
kubectl create ingress dzzz \
--rule="dzzzv10.sheca.com/*=demoappv10:80" \
--rule="dzzzv11.sheca.com/*=demoappv11:80" \
--class=nginx \
--dry-run=client -o yaml > dzzz-ingress.yaml

[root@ca-k8s-master01 Simple_fanout]# cat dzzz-ingress.yaml
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
  creationTimestamp: null
  name: dzzz
spec:
  ingressClassName: nginx
  rules:
  - host: dzzzv10.sheca.com
    http:
      paths:
      - backend:
          service:
            name: demoappv10
            port:
              number: 80
        path: /
        pathType: Prefix
  - host: dzzzv11.sheca.com
    http:
      paths:
      - backend:
          service:
            name: demoappv11
            port:
              number: 80
        path: /
        pathType: Prefix
status:
  loadBalancer: {}

[root@ca-k8s-master01 Simple_fanout]# kubectl get ingress
NAME                CLASS   HOSTS                                 ADDRESS         PORTS     AGE
dzzz                nginx   dzzzv10.sheca.com,dzzzv11.sheca.com   192.168.40.53   80        20

#配置hosts 
192.168.40.53 dzzzv10.sheca.com dzzzv11.sheca.com

3、TLS

[root@ca-k8s-master01 Ingress]# openssl genrsa -out dzzz.key 2048

[root@ca-k8s-master01 Simple_fanout]# openssl req -new -x509 -sha256 -days 3650 -key dzzz.key -out dzzz.crt
You are about to be asked to enter information that will be incorporated
into your certificate request.
What you are about to enter is what is called a Distinguished Name or a DN.
There are quite a few fields but you can leave some blank
For some fields there will be a default value,
If you enter '.', the field will be left blank.
-----
Country Name (2 letter code) [XX]:CH
State or Province Name (full name) []:SH
Locality Name (eg, city) [Default City]:SH
Organization Name (eg, company) [Default Company Ltd]:dzzz.sheca.com
Organizational Unit Name (eg, section) []:dzzz.sheca.com
Common Name (eg, your name or your servers hostname) []:dzzz.sheca.com
Email Address []:

#创建TLS SECRET
[root@ca-k8s-master01 Simple_fanout]# kubectl create secret tls dzzz-tls --key=./dzzz.key --cert=./dzzz.crt -o yaml --dry-run=client
apiVersion: v1
data:
  tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUR0ekNDQXArZ0F3SUJBZ0lKQU16L0VSV2R5SjVkTUEwR0NTcUdTSWIzRFFFQkN3VUFNSEl4Q3pBSkJnTlYKQkFZVEFrTklNUXN3Q1FZRFZRUUlEQUpUU0RFTE1Ba0dBMVVFQnd3Q1UwZ3hGekFWQmdOVkJBb01EbVI2ZW5vdQpjMmhsWTJFdVkyOXRNUmN3RlFZRFZRUUxEQTVrZW5wNkxuTm9aV05oTG1OdmJURVhNQlVHQTFVRUF3d09aSHA2CmVpNXphR1ZqWVM1amIyMHdIaGNOTWpRd01qQXhNRFl4TVRNd1doY05NelF3TVRJNU1EWXhNVE13V2pCeU1Rc3cKQ1FZRFZRUUdFd0pEU0RFTE1Ba0dBMVVFQ0F3Q1UwZ3hDekFKQmdOVkJBY01BbE5JTVJjd0ZRWURWUVFLREE1awplbnA2TG5Ob1pXTmhMbU52YlRFWE1CVUdBMVVFQ3d3T1pIcDZlaTV6YUdWallTNWpiMjB4RnpBVkJnTlZCQU1NCkRtUjZlbm91YzJobFkyRXVZMjl0TUlJQklqQU5CZ2txaGtpRzl3MEJBUUVGQUFPQ0FROEFNSUlCQ2dLQ0FRRUEKeHBTcGduMjVCQng3cTNpZzFsNWhHL0tIMUYwZHZqU0xKbEJTaGU0SlZjYU9vVkZkeWVLMEYzczAySVlxUGZPcgpNSTRCQi9DaXBld1Uzb29BNUx0OXQ3N09YK25Gd3VUSkNBcksvUEVGcXlkTzVkN3RpYmFNOUo3OWtzOXBpWTVaCktrRmE2OXJjQnJHVHI2TTN3YTJUUmVRT09QVi96TlNnZWU3b2NyM2VlaGxUSmpZQmRlRkxjTm1YU1lWNEVPb1IKcWg0NEhZWkxoQjlFUnUyZDhqbzFpMllHWFFIdE16UUx3cDdOZXdUa1lLRVNrOUJzbTRGejljVXFMSmgrQThwWgppZkYrNnhMdDZVOENRck1EREJHZkhyaEtkRmlCbDhKQVhUVmczcHdkVndCU1g5alUzUTI0aWtzdThuWE9SOStyCmdEQ0FKSk41NEN4N2xpVGlENVJRbFFJREFRQUJvMUF3VGpBZEJnTlZIUTRFRmdRVWxaeGdWU01PeGdTZXUzOGIKalVyeVRFeG04Mkl3SHdZRFZSMGpCQmd3Rm9BVWxaeGdWU01PeGdTZXUzOGJqVXJ5VEV4bTgySXdEQVlEVlIwVApCQVV3QXdFQi96QU5CZ2txaGtpRzl3MEJBUXNGQUFPQ0FRRUFFa2pEZ1BFWnF3YURVNmZ0RUdwSjJJWndqV0tJCkpNbDcvMTFnWXd6eDR1M0VkVG15VU1wUkdzOWNWYUhOQVlCdDNkRUhqcHZkdXFpbWxJK01ucHFxWE5Tbm5aYlcKS1l3YnFWNVdyUFZ4eFloVFdreSt4Z2ZzRUlRUjZ2eFFTRUtCa3ZPV2tZTG1WZk1hMmdqTWJqTkRRQVBQTkZWMgpkVUFTNi8xWCtMOVhRbm5RNFg1VE43cTU5dSs0blFDUHYwNEtTNGdFdDNDcjA3UnFjMzRhajZkRTNoeVdlZFFFClBWenZlT1RiSGNvc1hzb1VNSUtlRHh5NVJsQllRVldrMHhYRWhSc0IvSGhyTWl6S1BCS1laNmFUL1Bjakg4RDQKWG5aWGhnQVJNZWJja2RScFZQTTlKbDZKc1ZjSVk4dzJsTHh3NURubDY2R3EwTTM3bVl3V1MrTURqQT09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K
  tls.key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcEFJQkFBS0NBUUVBeHBTcGduMjVCQng3cTNpZzFsNWhHL0tIMUYwZHZqU0xKbEJTaGU0SlZjYU9vVkZkCnllSzBGM3MwMklZcVBmT3JNSTRCQi9DaXBld1Uzb29BNUx0OXQ3N09YK25Gd3VUSkNBcksvUEVGcXlkTzVkN3QKaWJhTTlKNzlrczlwaVk1WktrRmE2OXJjQnJHVHI2TTN3YTJUUmVRT09QVi96TlNnZWU3b2NyM2VlaGxUSmpZQgpkZUZMY05tWFNZVjRFT29ScWg0NEhZWkxoQjlFUnUyZDhqbzFpMllHWFFIdE16UUx3cDdOZXdUa1lLRVNrOUJzCm00Rno5Y1VxTEpoK0E4cFppZkYrNnhMdDZVOENRck1EREJHZkhyaEtkRmlCbDhKQVhUVmczcHdkVndCU1g5alUKM1EyNGlrc3U4blhPUjkrcmdEQ0FKSk41NEN4N2xpVGlENVJRbFFJREFRQUJBb0lCQURFNUptOXZKSTV0VlFaagpCMU03Mzh4aXFPS3VGakpRcWZGY1ZhaFhJYjgrVVpxUkpiTm1XNGU2aFpYcVk4d3BlMVMwem5LZVptTEVpMFBDClo0aExKVldIbVhaUzV5OXhNSmVxdlRoSHNrVURvRjRBalZibWhqZ1l0UGhRSzNQcWNoVzBUNHBKSE9NWDMyaloKQjNnZHczUyttL3ZvZHI1YU1sRW5ORDFFN1VITVZ4NFlJTFAxaGpYRHhLeXVnS0JSaEh1WG84ckpOc1JjYjBOVwpQajFGVTcyeUV2a0NtaEVQbzdDOUxjWTZwa2V0eHdDVXR4OFJ0R2xzM0FNSzdCZDM1TDQwOGtUS0N1RnNSbjQxClpXRVFjV3lRb1k0ZGk3N2xoR0JTZGxVM0pjVURoZ3Vzb1J0QzhKU3ZtK3FiQXdKQ2ZOVENSWW55VmNsaEJySUYKYVpTbG9mMENnWUVBODlMdXJjeUp1Ym9sb2F2OHhZRWZBVk5QZTdUS3NvcEJIRzJqQWJjUDI4S016T1kzbnJTago5SkE2Nlg2VTZQbklCR05lRkpjb29WVXNYNTZ6TUZNeTFrbHI4Z0RQY25jSzhqTmFtUVE1bHd5a0tFcVlhMDBiCmNZa2pGcVh3bkpZYi80N2RKRnNNMStXQWJRdkI2dWVkTmN3aGF1MHcvWmt4c1Nnb2FOU2ZFR2NDZ1lFQTBIOVcKQ0dtUUkrY05EVnRNSWNvVkZlWnlOZXJ1V1B5WFEySUJneXFzUStVM2s3WjJTN25tSXhLcmhZR2JnK29GdUhqWQp0NHl4cFo1U3hMbWdpOUg0RUJvSE1KTEVORlgxOXNaK29yRVFSRURCU2NDL29kd0JjcGhsVS9NV2RScnl2ZVMvCjY1SzVBU3p4dEJQU0txNEpaV2VzeG9PVWVlci94UEhpZ202MHlhTUNnWUVBbW9NU0krdFpYK3c1eXNGTjF1OWoKY1ppVTV3VUQ2cTY0WWthTUpSc1lIcUNrVDNpWXlLY2F1M0huRTI3VytPakpPeGFwV1EwTmYyZVZUaWNvMExmNApvRms0NGN4bnFkY01oYjNMYk9xRGdGaFBjd1VLbi96bUprWE1pWjZ3NHRveGRmclg0eUViKzNrQnNKamgwM1hoCloyVXJ6REh4ZU5QcTdLUlJMMVdHVTRNQ2dZQk9kS3dXREVzT0I0S1UxUmNIMi9nbFBvc1ptT2JKbTNyOEF5UVkKakhVNE0wZS9rTGhnOUhvU0ZwTnVjSC9wbXFVK2JsTDJkcjNlV1g1Qndxd1VFUldzVVc4Rkt5VWpvSGttdkRpbwoxY0lPUE9PKzhnWHFUVlJLSCtoNU40N3EwQklwdktzdlpRMnkwSXUvUEhvb1QwYi8zb3hZUXVrYkczL1ZSMFA1CjJ0aXl5d0tCZ1FEWkpNUEhneGZZTzNST0xHakxmYUpqS2lxdUJzeTJOdDQxZUMzd2FUK3VCT1JUVEVGeExua2UKQU93emRqT3VJQyt2Z01TU1hMYkZiTEp1ODVOU0JMcy81a3EwQXFYcitxTXRSY1BpejNPQmZSL2dlSTgwZ3ZBeApYc1dJcGg3b1MrdGFsVENHZS9iamVab29KWDNKcTVZM2lEWHVxc04zbTRWVWoyRGlpWG5GSUE9PQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo=
kind: Secret
metadata:
  creationTimestamp: null
  name: dzzz-tls
type: kubernetes.io/tls

[root@ca-k8s-master01 Simple_fanout]# kubectl create secret tls dzzz-tls --key=./dzzz.key --cert=./dzzz.crt
secret/dzzz-tls created

[root@ca-k8s-master01 Simple_fanout]# kubectl get secret
NAME                          TYPE                 DATA   AGE
dzzz-tls                      kubernetes.io/tls    2      18s

#删除以前的ingress
[root@ca-k8s-master01 Ingress]# kubectl delete -f dzzz-ingress.yaml
ingress.networking.k8s.io "dzzz" deleted

#重新创建一个新的 TLS Ingress 这里必须加上rewrite-target="/" 否者找不到资源 因为demoappv10:80/
[root@ca-k8s-master01 Ingress]# 
kubectl create ingress dzzz \
--rule="dzzz.sheca.com/v10=demoappv10:80,tls=dzzz-tls" \
--rule="dzzz.sheca.com/v11=demoappv11:80,tls=dzzz-tls" \
--annotation nginx.ingress.kubernetes.io/rewrite-target="/" \
--class='nginx' \
-n default -o yaml --dry-run=client > dzzz-ingress.yaml

[root@ca-k8s-master01 Simple_fanout]# cat dzzz-ingress.yaml
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
  annotations:
    nginx.ingress.kubernetes.io/rewrite-target: /
  creationTimestamp: null
  name: dzzz
  namespace: default
spec:
  ingressClassName: nginx
  rules:
  - host: dzzz.sheca.com
    http:
      paths:
      - backend:
          service:
            name: demoappv10
            port:
              number: 80
        path: /v10
        pathType: Exact
      - backend:
          service:
            name: demoappv11
            port:
              number: 80
        path: /v11
        pathType: Exact
  tls:
  - hosts:
    - dzzz.sheca.com
    secretName: dzzz-tls
status:
  loadBalancer: {}

[root@ca-k8s-master01 Simple_fanout]# kubectl apply -f dzzz-ingress.yaml
ingress.networking.k8s.io/dzzz created
[root@ca-k8s-master01 Simple_fanout]# kubectl describe ingress dzzz
Name:             dzzz
Labels:           <none>
Namespace:        default
Address:
Ingress Class:    nginx
Default backend:  <default>
TLS:
  dzzz-tls terminates dzzz.sheca.com
Rules:
  Host            Path  Backends
  ----            ----  --------
  dzzz.sheca.com
                  /v10   demoappv10:80 (10.244.132.200:80,10.244.132.216:80,10.244.36.28:80)
                  /v11   demoappv11:80 (10.244.132.233:80,10.244.36.60:80)
Annotations:      nginx.ingress.kubernetes.io/rewrite-target: /
Events:
  Type    Reason  Age   From                      Message
  ----    ------  ----  ----                      -------
  Normal  Sync    6s    nginx-ingress-controller  Scheduled for sync

#访问https 协议 会自动跳转协议
https://dzzz.sheca.com/v10
https://dzzz.sheca.com/v11

#注意: 启用https后,该域名下的所有URI默头为强制将http资求跳转至https,若不希望使用该功能,可以使用如下注解
选项 -annotation nginx.ingress.kubernetes.io/ssl-redirect=faise

 

posted @ 2024-01-31 00:06  しみずよしだ  阅读(340)  评论(1)    收藏  举报