Knative-入门部署【一】
#环境要求
◼ For prototyping purposes
◆单节点的Kubernetes集群,有2个可用的CPU核心,以及4g内存;
◼ For production purposes
◆单节点的Kubernetes集群,需要至少有6个CPU核心、6G内存和30G磁盘空间
◆多节点的Kubernetes集群中,每个节点至少有2个CPU核心,4G内存和20G磁盘空间
◆Kubernetes版本最低为v1.21
#安装步骤
◼ 部署Serving核心组件
◼ 部署网络层(networking layer)组件
◆Istio、Contour和Kourier三选一
◼ (可选)配置DNS
◼ (可选)部署Serving扩展
◆HPA:用于支持Kubernetes的HPA
◆Cert Manager:用于为工作负载自动签发TLS证书
◆Encrypt HTTP01:用于为工作负载自动签发TLS证书
Install the Knative Serving component
K8S Version:1.25.1
[root@xianchaomaster1 08-argo-rollouts]# kubectl version
WARNING: This version information is deprecated and will be replaced with the output from kubectl version --short. Use --output=yaml|json to get the full version.
Client Version: version.Info{Major:"1", Minor:"25", GitVersion:"v1.25.1", GitCommit:"e4d4e1ab7cf1bf15273ef97303551b279f0920a9", GitTreeState:"clean", BuildDate:"2022-09-14T19:49:27Z", GoVersion:"go1.19.1", Compiler:"gc", Platform:"linux/amd64"}
Kustomize Version: v4.5.7
Server Version: version.Info{Major:"1", Minor:"25", GitVersion:"v1.25.1", GitCommit:"e4d4e1ab7cf1bf15273ef97303551b279f0920a9", GitTreeState:"clean", BuildDate:"2022-09-14T19:42:30Z", GoVersion:"go1.19.1", Compiler:"gc", Platform:"linux/amd64"}
#serving-crds.yaml
#kubectl apply -f https://github.com/knative/serving/releases/download/knative-v1.10.2/serving-crds.yaml
#1.7.1:
kubectl apply -f https://github.com/knative/serving/releases/download/knative-v1.7.1/serving-crds.yaml
[root@xksmaster1 Knative]# kubectl api-versions
admissionregistration.k8s.io/v1
apiextensions.k8s.io/v1
apiregistration.k8s.io/v1
apps/v1
authentication.k8s.io/v1
authorization.k8s.io/v1
autoscaling.internal.knative.dev/v1alpha1
autoscaling/v1
autoscaling/v2
batch/v1
caching.internal.knative.dev/v1alpha1
certificates.k8s.io/v1
coordination.k8s.io/v1
crd.projectcalico.org/v1
discovery.k8s.io/v1
events.k8s.io/v1
extensions.istio.io/v1alpha1
flowcontrol.apiserver.k8s.io/v1beta2
flowcontrol.apiserver.k8s.io/v1beta3
install.istio.io/v1alpha1
kuboard.cn/v1
networking.internal.knative.dev/v1alpha1
networking.istio.io/v1alpha3
networking.istio.io/v1beta1
networking.k8s.io/v1
node.k8s.io/v1
policy/v1
rbac.authorization.k8s.io/v1
scheduling.k8s.io/v1
security.istio.io/v1
security.istio.io/v1beta1
serving.knative.dev/v1
serving.knative.dev/v1alpha1
serving.knative.dev/v1beta1
storage.k8s.io/v1
storage.k8s.io/v1beta1
telemetry.istio.io/v1alpha1
v1
#Serving有如下几个专用的CRD
◼ serving.knative.dev群组
◆Service
◆Configuration
◆Revision
◆Route
◆DomainMapping
◼ autoscaling.internal.knative.dev群组
◆Metric
◆PodAutoscaler
◼ networking.internal.knative.dev群组
◆ServerlessService
◆ClusterDomainClaim
◆Certific
[root@xksmaster1 Knative]# kubectl api-resources --api-group=serving.knative.dev
NAME SHORTNAMES APIVERSION NAMESPACED KIND
configurations config,cfg serving.knative.dev/v1 true Configuration
domainmappings dm serving.knative.dev/v1beta1 true DomainMapping
revisions rev serving.knative.dev/v1 true Revision
routes rt serving.knative.dev/v1 true Route
services kservice,ksvc serving.knative.dev/v1 true Service
[root@xksmaster1 Knative]# kubectl apply -f serving-crds.yaml
customresourcedefinition.apiextensions.k8s.io/certificates.networking.internal.knative.dev created
customresourcedefinition.apiextensions.k8s.io/configurations.serving.knative.dev created
customresourcedefinition.apiextensions.k8s.io/clusterdomainclaims.networking.internal.knative.dev created
customresourcedefinition.apiextensions.k8s.io/domainmappings.serving.knative.dev created
customresourcedefinition.apiextensions.k8s.io/ingresses.networking.internal.knative.dev created
customresourcedefinition.apiextensions.k8s.io/metrics.autoscaling.internal.knative.dev created
customresourcedefinition.apiextensions.k8s.io/podautoscalers.autoscaling.internal.knative.dev created
customresourcedefinition.apiextensions.k8s.io/revisions.serving.knative.dev created
customresourcedefinition.apiextensions.k8s.io/routes.serving.knative.dev created
customresourcedefinition.apiextensions.k8s.io/serverlessservices.networking.internal.knative.dev created
customresourcedefinition.apiextensions.k8s.io/services.serving.knative.dev created
customresourcedefinition.apiextensions.k8s.io/images.caching.internal.knative.dev created
#serving-core.yaml
#提前下载镜像 因为会访问不到 源地址: gcr.io/knative-releases/knative.dev/serving/cmd/queue@sha256:dabaecec38860ca4c972e6821d5dc825549faf50c6feb8feb4c04802f2338b8a
#使用这个域名 。相关文档:https://liangyuanpeng.com/post/service-lank8s.cn/
#下载 https://github.com/knative/serving/releases/download/knative-v1.10.2/serving-core.yaml 文件 重新编写
#把所有gcr.io/knative-releases 改为 gcr.lank8s.cn/knative-releases 这样进行配置 需要修改7处
spec:
# This is the Go import path for the binary that is containerized
# and substituted here.
image: gcr.lank8s.cn/knative-releases/knative.dev/serving/cmd/queue@sha256:dabaecec38860ca4c972e6821d5dc825549faf50c6feb8feb4c04802f2338b8a
#使用新域名进行下载镜像【1.10.1】
crictl pull gcr.lank8s.cn/knative-releases/knative.dev/serving/cmd/activator@sha256:c2994c2b6c2c7f38ad1b85c71789bf1753cc8979926423c83231e62258837cb9
crictl pull gcr.lank8s.cn/knative-releases/knative.dev/serving/cmd/autoscaler@sha256:8319aa662b4912e8175018bd7cc90c63838562a27515197b803bdcd5634c7007
crictl pull gcr.lank8s.cn/knative-releases/knative.dev/serving/cmd/controller@sha256:98a2cc7fd62ee95e137116504e7166c32c65efef42c3d1454630780410abf943
crictl pull gcr.lank8s.cn/knative-releases/knative.dev/serving/cmd/domain-mapping@sha256:f66c41ad7a73f5d4f4bdfec4294d5459c477f09f3ce52934d1a215e32316b59b
crictl pull gcr.lank8s.cn/knative-releases/knative.dev/serving/cmd/domain-mapping-webhook@sha256:7368aaddf2be8d8784dc7195f5bc272ecfe49d429697f48de0ddc44f278167aa
crictl pull gcr.lank8s.cn/knative-releases/knative.dev/serving/cmd/webhook@sha256:4305209ce498caf783f39c8f3e85dfa635ece6947033bf50b0b627983fd65953
crictl pull gcr.lank8s.cn/knative-releases/knative.dev/serving/cmd/queue@sha256:dabaecec38860ca4c972e6821d5dc825549faf50c6feb8feb4c04802f2338b8a
#使用新域名进行下载镜像【1.7.1】
crictl pull gcr.lank8s.cn/knative-releases/knative.dev/serving/cmd/queue@sha256:f0e506209c784eb65fe79c1d5b6c329c33c9df2b2f8bce44fb4566d12457c54d
crictl pull gcr.lank8s.cn/knative-releases/knative.dev/serving/cmd/activator@sha256:6fec2cb19a70b7da56d23178d3e6d2478fd053a83efed120851788a6b1704050
crictl pull gcr.lank8s.cn/knative-releases/knative.dev/serving/cmd/autoscaler@sha256:d23cc5fceab552f4924bef1b38af850d6ad29046f9e4d6f0e1e562cfec350746
crictl pull gcr.lank8s.cn/knative-releases/knative.dev/serving/cmd/controller@sha256:8ce127dd294a646b67200beae1321197b4a5de9d5b42daf9206849dba06d4197
crictl pull gcr.lank8s.cn/knative-releases/knative.dev/serving/cmd/domain-mapping@sha256:c78a29b606fbfd9781e49abb547ac592a2a59fd554e504d5b5b9d25ec3a59ee9
crictl pull gcr.lank8s.cn/knative-releases/knative.dev/serving/cmd/domain-mapping-webhook@sha256:e6ceb71d79fc3b5bab843fd213f5cf752f258aa70fbdb3f3046b9944e6c94200
crictl pull gcr.lank8s.cn/knative-releases/knative.dev/serving/cmd/webhook@sha256:2abda5baf78e30888fa0c5ca8442b32d613fe0854c3a473da4511a64922cb30d
#两台nodes 都要进行配置
[root@xianchaonode1 ~]# crictl images | grep gcr.lank8s
W0705 11:37:53.319935 117150 util_unix.go:103] Using "/run/containerd/containerd.sock" as endpoint is deprecated, please consider using full url format "unix:///run/containerd/containerd.sock".
gcr.lank8s.cn/knative-releases/knative.dev/serving/cmd/activator <none> a727957e66177 16MB
gcr.lank8s.cn/knative-releases/knative.dev/serving/cmd/autoscaler <none> 0c1bd73002056 16.3MB
gcr.lank8s.cn/knative-releases/knative.dev/serving/cmd/controller <none> 522262008e8e3 18.1MB
gcr.lank8s.cn/knative-releases/knative.dev/serving/cmd/domain-mapping-webhook <none> cf92650596b37 15.4MB
gcr.lank8s.cn/knative-releases/knative.dev/serving/cmd/domain-mapping <none> 3ad2f6298bbfe 16MB
gcr.lank8s.cn/knative-releases/knative.dev/serving/cmd/queue <none> 5962cd526e20b 10.1MB
gcr.lank8s.cn/knative-releases/knative.dev/serving/cmd/webhook <none> 59cc11329cfc9 15.9MB
[root@xianchaomaster1 KnativeSrc]# crictl images | grep gcr.lank8s
W0705 11:38:27.359052 101253 util_unix.go:103] Using "/run/containerd/containerd.sock" as endpoint is deprecated, please consider using full url format "unix:///run/containerd/containerd.sock".
gcr.lank8s.cn/knative-releases/knative.dev/serving/cmd/activator <none> a727957e66177 16MB
gcr.lank8s.cn/knative-releases/knative.dev/serving/cmd/autoscaler <none> 0c1bd73002056 16.3MB
gcr.lank8s.cn/knative-releases/knative.dev/serving/cmd/controller <none> 522262008e8e3 18.1MB
gcr.lank8s.cn/knative-releases/knative.dev/serving/cmd/domain-mapping-webhook <none> cf92650596b37 15.4MB
gcr.lank8s.cn/knative-releases/knative.dev/serving/cmd/domain-mapping <none> 3ad2f6298bbfe 16MB
gcr.lank8s.cn/knative-releases/knative.dev/serving/cmd/queue <none> 5962cd526e20b 10.1MB
gcr.lank8s.cn/knative-releases/knative.dev/serving/cmd/webhook <none> 59cc11329cfc9 15.9MB
[root@xksmaster1 Knative]# kubectl apply -f serving-core.yaml
namespace/knative-serving created
clusterrole.rbac.authorization.k8s.io/knative-serving-aggregated-addressable-resolver created
clusterrole.rbac.authorization.k8s.io/knative-serving-addressable-resolver created
clusterrole.rbac.authorization.k8s.io/knative-serving-namespaced-admin created
clusterrole.rbac.authorization.k8s.io/knative-serving-namespaced-edit created
clusterrole.rbac.authorization.k8s.io/knative-serving-namespaced-view created
clusterrole.rbac.authorization.k8s.io/knative-serving-core created
clusterrole.rbac.authorization.k8s.io/knative-serving-podspecable-binding created
serviceaccount/controller created
clusterrole.rbac.authorization.k8s.io/knative-serving-admin created
clusterrolebinding.rbac.authorization.k8s.io/knative-serving-controller-admin created
clusterrolebinding.rbac.authorization.k8s.io/knative-serving-controller-addressable-resolver created
customresourcedefinition.apiextensions.k8s.io/images.caching.internal.knative.dev unchanged
customresourcedefinition.apiextensions.k8s.io/certificates.networking.internal.knative.dev unchanged
customresourcedefinition.apiextensions.k8s.io/configurations.serving.knative.dev unchanged
customresourcedefinition.apiextensions.k8s.io/clusterdomainclaims.networking.internal.knative.dev unchanged
customresourcedefinition.apiextensions.k8s.io/domainmappings.serving.knative.dev unchanged
customresourcedefinition.apiextensions.k8s.io/ingresses.networking.internal.knative.dev unchanged
customresourcedefinition.apiextensions.k8s.io/metrics.autoscaling.internal.knative.dev unchanged
customresourcedefinition.apiextensions.k8s.io/podautoscalers.autoscaling.internal.knative.dev unchanged
customresourcedefinition.apiextensions.k8s.io/revisions.serving.knative.dev unchanged
customresourcedefinition.apiextensions.k8s.io/routes.serving.knative.dev unchanged
customresourcedefinition.apiextensions.k8s.io/serverlessservices.networking.internal.knative.dev unchanged
customresourcedefinition.apiextensions.k8s.io/services.serving.knative.dev unchanged
secret/serving-certs-ctrl-ca created
secret/knative-serving-certs created
secret/control-serving-certs created
secret/routing-serving-certs created
image.caching.internal.knative.dev/queue-proxy created
configmap/config-autoscaler created
configmap/config-defaults created
configmap/config-deployment created
configmap/config-domain created
configmap/config-features created
configmap/config-gc created
configmap/config-leader-election created
configmap/config-logging created
configmap/config-network created
configmap/config-observability created
configmap/config-tracing created
horizontalpodautoscaler.autoscaling/activator created
poddisruptionbudget.policy/activator-pdb created
deployment.apps/activator created
service/activator-service created
deployment.apps/autoscaler created
service/autoscaler created
deployment.apps/controller created
service/controller created
deployment.apps/domain-mapping created
deployment.apps/domainmapping-webhook created
service/domainmapping-webhook created
horizontalpodautoscaler.autoscaling/webhook created
poddisruptionbudget.policy/webhook-pdb created
deployment.apps/webhook created
service/webhook created
validatingwebhookconfiguration.admissionregistration.k8s.io/config.webhook.serving.knative.dev created
mutatingwebhookconfiguration.admissionregistration.k8s.io/webhook.serving.knative.dev created
mutatingwebhookconfiguration.admissionregistration.k8s.io/webhook.domainmapping.serving.knative.dev created
secret/domainmapping-webhook-certs created
validatingwebhookconfiguration.admissionregistration.k8s.io/validation.webhook.domainmapping.serving.knative.dev created
validatingwebhookconfiguration.admissionregistration.k8s.io/validation.webhook.serving.knative.dev created
secret/webhook-certs created
[root@xksmaster1 Knative]# kubectl get ns
NAME STATUS AGE
default Active 104d
demo Active 3d22h
istio-system Active 83d
keycloak Active 28h
knative-serving Active 25s
kube-node-lease Active 104d
kube-public Active 104d
kube-system Active 104d
kubernetes-dashboard Active 101d
kuboard Active 99d
oauth2-proxy Active 23h
[root@xksmaster1 Knative]# kubectl get pods -n knative-serving
NAME READY STATUS RESTARTS AGE
activator-79846b8bc7-xqlbj 1/1 Running 0 28s
autoscaler-546c8b96d9-b8q45 1/1 Running 0 27s
controller-5db4df7769-cbth9 1/1 Running 0 25s
domain-mapping-5d8f7f8946-6h5k4 1/1 Running 0 24s
domainmapping-webhook-75ff4dd7f6-zj689 1/1 Running 0 24s
webhook-6748648769-2kzkg 1/1 Running 0 23s
Install a networking layer-Istio
#如果有Istio 进行卸载、完整卸载istio
istioctl x uninstall --purge
1.【1.10.1版本下载镜像】
crictl pull docker.io/istio/pilot:1.17.1
crictl pull docker.io/istio/proxyv2:1.17.1
【1.7.1版本下载镜像】
crictl pull docker.io/istio/proxyv2:1.14.3
crictl pull docker.io/istio/pilot:1.14.3
crictl pull busybox:1.28
【1.10.1版本官网执行】
kubectl apply -l knative.dev/crd-install=true -f https://github.com/knative/net-istio/releases/download/knative-v1.10.1/istio.yaml
kubectl apply -f https://github.com/knative/net-istio/releases/download/knative-v1.10.1/istio.yaml
【1.7.1版本官网执行】
kubectl apply -l knative.dev/crd-install=true -f https://github.com/knative-sandbox/net-istio/releases/download/knative-v1.7.1/istio.yaml
kubectl apply -f https://github.com/knative-sandbox/net-istio/releases/download/knative-v1.7.1/istio.yaml
#修改istio.yaml 如果只有两台服务器
8164行:3 改 2
8587行 3 改 2
#修改policy/v1beta 为 policy/v1
2.【自己下载 文件 在应用】
[root@xksmaster1 Knative]# kubectl apply -l knative.dev/crd-install=true -f istio.yaml
customresourcedefinition.apiextensions.k8s.io/authorizationpolicies.security.istio.io created
customresourcedefinition.apiextensions.k8s.io/destinationrules.networking.istio.io created
customresourcedefinition.apiextensions.k8s.io/envoyfilters.networking.istio.io created
customresourcedefinition.apiextensions.k8s.io/gateways.networking.istio.io created
customresourcedefinition.apiextensions.k8s.io/istiooperators.install.istio.io created
customresourcedefinition.apiextensions.k8s.io/peerauthentications.security.istio.io created
customresourcedefinition.apiextensions.k8s.io/proxyconfigs.networking.istio.io created
customresourcedefinition.apiextensions.k8s.io/requestauthentications.security.istio.io created
customresourcedefinition.apiextensions.k8s.io/serviceentries.networking.istio.io created
customresourcedefinition.apiextensions.k8s.io/sidecars.networking.istio.io created
customresourcedefinition.apiextensions.k8s.io/telemetries.telemetry.istio.io created
customresourcedefinition.apiextensions.k8s.io/virtualservices.networking.istio.io created
customresourcedefinition.apiextensions.k8s.io/wasmplugins.extensions.istio.io created
customresourcedefinition.apiextensions.k8s.io/workloadentries.networking.istio.io created
customresourcedefinition.apiextensions.k8s.io/workloadgroups.networking.istio.io created
报错忽略
#kubectl apply -f https://github.com/knative/net-istio/releases/download/knative-v1.10.1/istio.yaml
[root@xksmaster1 Knative]# kubectl apply -f istio.yaml
service/istio-ingressgateway created
service/istiod created
Warning: autoscaling/v2beta2 HorizontalPodAutoscaler is deprecated in v1.23+, unavailable in v1.26+; use autoscaling/v2 HorizontalPodAutoscaler
horizontalpodautoscaler.autoscaling/istiod created
poddisruptionbudget.policy/istio-ingressgateway created
poddisruptionbudget.policy/istiod created
mutatingwebhookconfiguration.admissionregistration.k8s.io/istio-sidecar-injector created
validatingwebhookconfiguration.admissionregistration.k8s.io/istio-validator-istio-system created
envoyfilter.networking.istio.io/stats-filter-1.11 created
envoyfilter.networking.istio.io/stats-filter-1.12 created
envoyfilter.networking.istio.io/stats-filter-1.13 created
envoyfilter.networking.istio.io/stats-filter-1.14 created
envoyfilter.networking.istio.io/stats-filter-1.15 created
envoyfilter.networking.istio.io/tcp-stats-filter-1.11 created
envoyfilter.networking.istio.io/tcp-stats-filter-1.12 created
envoyfilter.networking.istio.io/tcp-stats-filter-1.13 created
envoyfilter.networking.istio.io/tcp-stats-filter-1.14 created
envoyfilter.networking.istio.io/tcp-stats-filter-1.15 created
[root@xksmaster1 Knative]# kubectl get ns
NAME STATUS AGE
default Active 105d
demo Active 5d19h
istio-system Active 105s
keycloak Active 3d1h
knative-serving Active 17m
kube-node-lease Active 105d
kube-public Active 105d
kube-system Active 105d
kubernetes-dashboard Active 103d
kuboard Active 100d
oauth2-proxy Active 2d19h
[root@xianchaomaster1 ~]# kubectl get pods -n istio-system -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
istio-ingressgateway-694694cb6c-926rg 1/1 Running 0 6m39s 10.244.121.25 xianchaonode1 <none> <none>
istio-ingressgateway-694694cb6c-wkf88 1/1 Running 1 (2m5s ago) 6m39s 10.244.121.23 xianchaonode1 <none> <none>
istiod-644b755854-dpxvw 1/1 Running 0 6m24s 10.244.121.26 xianchaonode1 <none> <none>
istiod-644b755854-g976c 1/1 Running 2 (55s ago) 6m39s 10.244.121.21 xianchaonode1 <none> <none>
#下载net-istio.yaml
【1.10.1】
https://github.com/knative/net-istio/releases/download/knative-v1.10.1/net-istio.yaml
【1.7.1】
https://github.com/knative-sandbox/net-istio/releases/download/knative-v1.7.1/net-istio.yaml
[root@xksmaster1 Knative]# wget https://github.com/knative-sandbox/net-istio/releases/download/knative-v1.7.1/net-istio.yaml
#image: gcr.io/knative-releases/knative.dev/net-istio/cmd/controller@sha256:c110b0b5d545561f220d23bdb48a6c75f5591d068de9fb079baad47c82903e28
#image: gcr.io/knative-releases/knative.dev/net-istio/cmd/webhook@sha256:d74e79f7db426c1d24e060009e31344cad2d6e8c7e161184f121fde78b2f4a1d
#修改里面的镜像配置
#gcr.io/knative-releases 改为 gcr.lank8s.cn/knative-releases
#准备镜像
#node1\node2 都要下载
crictl pull gcr.lank8s.cn/knative-releases/knative.dev/net-istio/cmd/controller@sha256:c110b0b5d545561f220d23bdb48a6c75f5591d068de9fb079baad47c82903e28
crictl pull gcr.lank8s.cn/knative-releases/knative.dev/net-istio/cmd/webhook@sha256:d74e79f7db426c1d24e060009e31344cad2d6e8c7e161184f121fde78b2f4a1d
[root@xksmaster1 Knative]# kubectl apply -f net-istio.yaml
clusterrole.rbac.authorization.k8s.io/knative-serving-istio created
gateway.networking.istio.io/knative-ingress-gateway created
gateway.networking.istio.io/knative-local-gateway created
service/knative-local-gateway created
configmap/config-istio created
peerauthentication.security.istio.io/webhook created
peerauthentication.security.istio.io/domainmapping-webhook created
peerauthentication.security.istio.io/net-istio-webhook created
deployment.apps/net-istio-controller created
deployment.apps/net-istio-webhook created
secret/net-istio-webhook-certs created
service/net-istio-webhook created
mutatingwebhookconfiguration.admissionregistration.k8s.io/webhook.istio.networking.internal.knative.dev created
validatingwebhookconfiguration.admissionregistration.k8s.io/config.webhook.istio.networking.internal.knative.dev created
[root@xianchaomaster1 KnativeSrc]# kubectl get pods -n istio-system
NAME READY STATUS RESTARTS AGE
istio-ingressgateway-694694cb6c-926rg 1/1 Running 0 11m
istio-ingressgateway-694694cb6c-wkf88 1/1 Running 1 (7m3s ago) 11m
istiod-644b755854-dpxvw 1/1 Running 0 11m
istiod-644b755854-g976c 1/1 Running 2 (5m53s ago) 11m
[root@xianchaomaster1 KnativeSrc]# kubectl get pods -n knative-serving
NAME READY STATUS RESTARTS AGE
activator-ccbcfcdd-7f2v5 1/1 Running 1 (7m17s ago) 38m
autoscaler-7d5cbdff9c-w5bw9 1/1 Running 1 (7m17s ago) 38m
controller-66bb75dbdf-lddrp 1/1 Running 1 (7m17s ago) 38m
domain-mapping-66cdbd9c7d-mtmsq 1/1 Running 1 (7m17s ago) 38m
domainmapping-webhook-f974847c7-l428n 1/1 Running 1 (7m17s ago) 38m
net-istio-controller-54ff6778f8-n5sg8 1/1 Running 0 44s
net-istio-webhook-64d9b7b8bf-c7w75 1/1 Running 0 44s
webhook-848bcf68f7-9blcp 1/1 Running 2 (6m8s ago) 38m
#Serving的子组件
#Serving依赖于几个关键的组件协同其管理能力
◼ Activator:Revision中的Pod数量收缩至0时,activator负责接收并缓存相关的请求,同时报告指标数据给Autoscaler,并在Autoscaler在Revision上扩展出必要的Pod后,再将请求路由至相应的Revision;
◼ Autoscaler:Knative通过注入一个称为queue-proxy容器的sidecar代理来了解它部署的Pod上的请求,而Autoscaler会为每个服务使用“每秒请求数”来自动缩放其Revision上的Pod;
◼ Controller:负责监视Serving CRD(KService、Configuration、Route和Revision)相关的API对象并管理它们的生命周期,是Serving声明式API的关键保障;
◼ Webhook:为Kubernetes提供的外置Admission Controller,兼具Validation和Mutation的功能,主要作用于Serving专有的几个API资源类型之上,以及相关的ConfigMap资源上;
◼ Domain-mapping:将指定的域名映射至Service、KService,甚至是Knative Route之上,从而使用自定义域名访问特定的服务;
◼ Domainmapping-Webhook:Domain-mapping专用的Admission Controller
◼ net-certmanager-controller:与Cert Manager协同时使用的专用的控制器;
◼ net-istio-controller:与Istio协同时使用的专用控制器
Verify the installation
#找一台服务器加一个子接口192.168.40.190
ifconfig ens33:0 192.168.40.190/24
ip addr
cp /etc/sysconfig/network-scripts/ifcfg-ens33 /etc/sysconfig/network-scripts/ifcfg-ens33:0
vim /etc/sysconfig/network-scripts/ifcfg-ens33:0
DEVICE=ens33:0
IPADDR=192.168.40.190
PREFIX=24
ONPARENT=yes
systemctl restart network
ip addr
#配置EXTERNAL-IP 192.168.40.190
[root@xianchaomaster1 KnativeSrc]# kubectl edit svc istio-ingressgateway -n istio-system
externalIPs:
- 192.168.40.190
[root@xianchaomaster1 KnativeSrc]# kubectl --namespace istio-system get service istio-ingressgateway
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
istio-ingressgateway LoadBalancer 10.96.196.211 192.168.40.190 15021:30508/TCP,80:30590/TCP,443:30796/TCP 26m
Install optional Serving extensions
kubectl apply -f https://github.com/knative/serving/releases/download/knative-v1.10.2/serving-hpa.yaml
Configure DNS
使用curl 命令行 请求头
Install optional Serving extensions
#调用 K8S 的 HPA 功能,原来HPA依赖于Metrics server 现在 HPA调用 KPA
【Knative also supports the use of the Kubernetes Horizontal Pod Autoscaler (HPA) for driving autoscaling decisions.】
【1.10.1】
kubectl apply -f https://github.com/knative/serving/releases/download/knative-v1.10.2/serving-hpa.yaml
【1.7.1】
kubectl apply -f https://github.com/knative/serving/releases/download/knative-v1.7.1/serving-hpa.yaml
#修改镜像地址
gcr.io/knative-releases/knative.dev/serving/cmd/autoscaler-hpa@sha256:b7ae0eeb7d5c269f5ef4f92fd8c9ac1fd18f24f1e3b4457c370772208cf29a09
改为
gcr.lank8s.cn/knative-releases/knative.dev/serving/cmd/autoscaler-hpa@sha256:b7ae0eeb7d5c269f5ef4f92fd8c9ac1fd18f24f1e3b4457c370772208cf29a09
#下载镜像 2台都要
crictl pull gcr.lank8s.cn/knative-releases/knative.dev/serving/cmd/autoscaler-hpa@sha256:b7ae0eeb7d5c269f5ef4f92fd8c9ac1fd18f24f1e3b4457c370772208cf29a09
[root@xianchaomaster1 KnativeSrc]# kubectl apply -f serving-hpa.yaml
deployment.apps/autoscaler-hpa created
service/autoscaler-hpa created
#autoscaler-hpa
[root@xianchaomaster1 KnativeSrc]# kubectl get pods -n knative-serving
NAME READY STATUS RESTARTS AGE
activator-ccbcfcdd-7f2v5 1/1 Running 1 (47m ago) 79m
autoscaler-7d5cbdff9c-w5bw9 1/1 Running 1 (47m ago) 79m
autoscaler-hpa-7ddf58b94f-dswmf 1/1 Running 0 33s
controller-66bb75dbdf-lddrp 1/1 Running 1 (47m ago) 79m
domain-mapping-66cdbd9c7d-mtmsq 1/1 Running 1 (47m ago) 79m
domainmapping-webhook-f974847c7-l428n 1/1 Running 1 (47m ago) 79m
net-istio-controller-54ff6778f8-n5sg8 1/1 Running 0 41m
net-istio-webhook-64d9b7b8bf-c7w75 1/1 Running 0 41m
webhook-848bcf68f7-9blcp 1/1 Running 2 (46m ago) 79m
Install the knative Serving component
https://knative.dev/docs/getting-started/quickstart-install/#install-the-knative-cli
#下载地址
【1.10.1】
wget https://github.com/knative/client/releases/download/knative-v1.10.0/kn-linux-amd64
【1.7.1】
wget https://github.com/knative/client/releases/download/knative-v1.7.1/kn-linux-amd64
[root@xianchaomaster1 KnativeSrc]# cp kn-linux-amd64 /usr/local/bin/kn
[root@xianchaomaster1 KnativeSrc]# chmod +x /usr/local/bin/kn
[root@xianchaomaster1 KnativeSrc]# kn --help
kn is the command line interface for managing Knative Serving and Eventing resources
Find more information about Knative at: https://knative.dev
Serving Commands:
service Manage Knative services
revision Manage service revisions
route List and describe service routes
domain Manage domain mappings
container Manage service's containers (experimental)
Eventing Commands:
source Manage event sources
broker Manage message brokers
trigger Manage event triggers
channel Manage event channels
subscription Manage event subscriptions
eventtype Manage eventtypes
Other Commands:
plugin Manage kn plugins
secret Manage secrets
completion Output shell completion code
version Show the version of this client
Use "kn <command> --help" for more information about a given command.
Use "kn options" for a list of global command-line options (applies to all commands).'
[root@xianchaomaster1 KnativeSrc]# kn version
Version: v1.7.1
Build Date: 2022-10-11 10:18:30
Git Revision: e2f6caf3
Supported APIs:
* Serving
- serving.knative.dev/v1 (knative-serving v1.7.0)
* Eventing
- sources.knative.dev/v1 (knative-eventing v1.7.1)
- eventing.knative.dev/v1 (knative-eventing v1.7.1)
#使用KNative进行 Servless 测试
kn service create demoapp --image=ikubernetes/demoapp:v1.0
kn service create helloworld-java1 --image=docker.io/abreaking/helloworld-java
[root@xianchaomaster1 KnativeSrc]# kubectl get svc -n istio-system
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
istio-ingressgateway LoadBalancer 10.96.196.211 192.168.40.190 15021:30508/TCP,80:30590/TCP,443:30796/TCP 36m
istiod ClusterIP 10.96.53.240 <none> 15010/TCP,15012/TCP,443/TCP,15014/TCP 36m
knative-local-gateway ClusterIP 10.96.27.221 <none> 80/TCP 25m
#两台都要镜像下载
[root@xianchaomaster1 KnativeSrc]# crictl pull ikubernetes/demoapp:v1.0
[root@xianchaonode1 ~]# crictl pull ikubernetes/demoapp:v1.0
#使用kn 命令创建服务
[root@xianchaomaster1 KnativeSrc]# kn service create demoapp --image=ikubernetes/demoapp:v1.0
Creating service 'demoapp' in namespace 'default':
0.028s The Route is still working to reflect the latest desired specification.
0.056s ...
0.067s Configuration "demoapp" is waiting for a Revision to become ready.
4.342s ...
4.361s Ingress has not yet been reconciled.
4.421s Waiting for load balancer to be ready
4.601s Ready to serve.
Service 'demoapp' created to latest revision 'demoapp-00001' is available at URL:
http://demoapp.default.example.com
#访问服务
[root@xianchaomaster1 KnativeSrc]# curl -H "Host: demoapp.default.example.com" 192.168.40.190
iKubernetes demoapp v1.0 !! ClientIP: 127.0.0.1, ServerName: demoapp-00001-deployment-575c455cd8-fp6zd, ServerIP: 10.244.121.29!
[root@xianchaomaster1 KnativeSrc]# curl -H "Host: demoapp.default.example.com" 192.168.40.190 iKubernetes demoapp v1.0 !! ClientIP: 127.0.0.1, ServerName: demoapp-00001-deployment-575c455cd8-v2k86, ServerIP: 10.244.121.30!
#可以看到第一次不访问 正在结束服务 第二次访问 又重新启动一个pod
[root@xianchaomaster1 KnativeSrc]# kubectl get pods -w
NAME READY STATUS RESTARTS AGE
demoapp-00001-deployment-575c455cd8-fp6zd 2/2 Terminating 0 2m35s
demoapp-00001-deployment-575c455cd8-v2k86 2/2 Running 0 5s
#查看 serverless 参数配置
[root@xianchaomaster1 KnativeSrc]# kubectl get deploy
NAME READY UP-TO-DATE AVAILABLE AGE
demoapp-00001-deployment 1/1 1 1 3m4s
[root@xianchaomaster1 KnativeSrc]# kubectl get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
demoapp ExternalName <none> knative-local-gateway.istio-system.svc.cluster.local 80/TCP 3m9s
demoapp-00001 ClusterIP 10.96.0.74 <none> 80/TCP,443/TCP 3m10s
demoapp-00001-private ClusterIP 10.96.92.180 <none> 80/TCP,443/TCP,9090/TCP,9091/TCP,8022/TCP,8012/TCP 3m10s
kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 10d
[root@xianchaomaster1 KnativeSrc]# kubectl get configuration
NAME LATESTCREATED LATESTREADY READY REASON
demoapp demoapp-00001 demoapp-00001 True
You have new mail in /var/spool/mail/root
[root@xianchaomaster1 KnativeSrc]# kubectl get revision
NAME CONFIG NAME K8S SERVICE NAME GENERATION READY REASON ACTUAL REPLICAS DESIRED REPLICAS
demoapp-00001 demoapp 1 True 0 0
[root@xianchaomaster1 KnativeSrc]# kubectl get route
NAME URL READY REASON
demoapp http://demoapp.default.example.com True
[root@xianchaomaster1 KnativeSrc]# kubectl get vs
NAME GATEWAYS HOSTS AGE
demoapp-ingress ["knative-serving/knative-ingress-gateway","knative-serving/knative-local-gateway"] ["demoapp.default","demoapp.default.example.com","demoapp.default.svc","demoapp.default.svc.cluster.local"] 5m57s
demoapp-mesh ["mesh"] ["demoapp.default","demoapp.default.svc","demoapp.default.svc.cluster.local"] 5m57s
补充:网关mesh和knative-local-gateway 作用
如果K8S上本身没有启用 service mesh机制的话,那么 knative-local-gateway 发挥作用
[root@xianchaomaster1 KnativeSrc]# kubectl get vs
NAME GATEWAYS HOSTS AGE
demoapp-ingress ["knative-serving/knative-ingress-gateway","knative-serving/knative-local-gateway"] ["demoapp.default","demoapp.default.example.com","demoapp.default.svc","demoapp.default.svc.cluster.local"] 5m57s
demoapp-mesh ["mesh"] ["demoapp.default","demoapp.default.svc","demoapp.default.svc.cluster.local"] 5m57s
#["mesh"]
1.如果激活了服务网格 istio-sidecar 那么借助mesh 来进行调度
2.如果未激活服务网格 那么knative-local-gateway进行调度 服务

浙公网安备 33010602011771号