kubernetes学习(二)

# kubernetes学习(二)

关于cfssl工具:

  1. cfssl:证书签发的主要工具
  2. cfssl-json:将cfssl生成的证书(json格式)变为文件承载式证书【转换格式】
  3. cfssl-certinfo:验证证书的信息
[root@hdss7-200 certs]# cfssl-certinfo -cert apiserver.pem 
{
  "subject": {
    "common_name": "k8s-apiserver",
    "country": "CN",
    "organization": "od",
    "organizational_unit": "ops",
    "locality": "beijing",
    "province": "beijing",
    "names": [
      "CN",
      "beijing",
      "beijing",
      "od",
      "ops",
      "k8s-apiserver"
    ]
  },
  "issuer": {
    "common_name": "OldboyEdu",
    "country": "CN",
    "organization": "od",
    "organizational_unit": "ops",
    "locality": "beijing",
    "province": "beijing",
    "names": [
      "CN",
      "beijing",
      "beijing",
      "od",
      "ops",
      "OldboyEdu"
    ]
  },
  "serial_number": "62468170061019590709031283934966329926677673387",
  "sans": [
    "kubernetes.default",
    "kubernetes.default.svc",
    "kubernetes.default.svc.cluster",
    "kubernetes.default.svc.cluster.local",
    "127.0.0.1",
    "192.168.0.1",
    "10.4.7.10",
    "10.4.7.21",
    "10.4.7.22",
    "10.4.7.23"
  ],
  "not_before": "2020-08-17T10:09:00Z",
  "not_after": "2040-08-12T10:09:00Z",
  "sigalg": "SHA256WithRSA",
  "authority_key_id": "5E:EB:5:C5:1B:EE:B8:55:48:0:63:F6:32:66:FA:35:79:1:F7:B9",
  "subject_key_id": "C4:C5:92:33:BD:B9:44:4:27:C8:ED:C0:30:E6:37:AF:AF:60:D2:E9",
  "pem": "-----BEGIN CERTIFICATE-----\nMIIEbzCCA1egAwIBAgIUCvErbIhHL2mGy2uJBpK6BygrtaswDQYJKoZIhvcNAQEL\nBQAwYDELMAkGA1UEBhMCQ04xEDAOBgNVBAgTB2JlaWppbmcxEDAOBgNVBAcTB2Jl\naWppbmcxCzAJBgNVBAoTAm9kMQwwCgYDVQQLEwNvcHMxEjAQBgNVBAMTCU9sZGJv\neUVkdTAeFw0yMDA4MTcxMDA5MDBaFw00MDA4MTIxMDA5MDBaMGQxCzAJBgNVBAYT\nAkNOMRAwDgYDVQQIEwdiZWlqaW5nMRAwDgYDVQQHEwdiZWlqaW5nMQswCQYDVQQK\nEwJvZDEMMAoGA1UECxMDb3BzMRYwFAYDVQQDEw1rOHMtYXBpc2VydmVyMIIBIjAN\nBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAyMmBlrvSNqqdDAjAtQqkFyR3CVah\ny19JruRxN2ot8/ohB+3GPhH4gn0ugYjMiieIN0G66ntGWAG3OGZHCJYvms3TPA2V\nRvAJIc+EMRuHKhw9ifqHiG2SbKPvCZF4Cd/4kz1iaZ3rZTrP0OS1o/MYNRB3Tg8C\nMS9e2gyvoTVyuESXtTp4RuQZZlP9qgLFIKAJrxKTS92321UKQx4UZVwWWSVSAOj5\nTR0uYmRPEAwHJ5w1LfcJ3DfwDLTxXdN1rtiu3Tqb2B7OLSnSjSBHjvoewgGr+uv8\nvMw1I8PAyxTLoyW1+YAYN1s73YUm0/keuWRNaqt/td8zoGFyso2zjOSGiwIDAQAB\no4IBGzCCARcwDgYDVR0PAQH/BAQDAgWgMBMGA1UdJQQMMAoGCCsGAQUFBwMBMAwG\nA1UdEwEB/wQCMAAwHQYDVR0OBBYEFMTFkjO9uUQEJ8jtwDDmN6+vYNLpMB8GA1Ud\nIwQYMBaAFF7rBcUb7rhVSABj9jJm+jV5Afe5MIGhBgNVHREEgZkwgZaCEmt1YmVy\nbmV0ZXMuZGVmYXVsdIIWa3ViZXJuZXRlcy5kZWZhdWx0LnN2Y4Iea3ViZXJuZXRl\ncy5kZWZhdWx0LnN2Yy5jbHVzdGVygiRrdWJlcm5ldGVzLmRlZmF1bHQuc3ZjLmNs\ndXN0ZXIubG9jYWyHBH8AAAGHBMCoAAGHBAoEBwqHBAoEBxWHBAoEBxaHBAoEBxcw\nDQYJKoZIhvcNAQELBQADggEBAMYCHMsgy79xecztAvH8+e2UqP41wRP3fJeWEcYM\ntXmcSQ2E3STYzHviro6/ZqX2J3NYPLU84Gr5nxih5IKKkrIo/UrvH0nf+QUEuIAt\n0HRNGet2CuFcVXc9CVPprzByi0nf0xMpTfSNepX5Kg2D6wF5m6DATfqTBoOKsB3G\n56W6gk8svrDBU1WdiCWEi/t4rD9sLO7B9QjRb2cjcnd5f5rwBgi/ngjheEofZTx5\n64RF34UUdajWwl7stxKy5ElYLax5uYA50hh1I7zTlz76r17AclLN+6HBDJpIxb76\ny4m1dPigs6FYX37ki1lg9/975pBDDFQ4l05La3dswKOk5XU=\n-----END CERTIFICATE-----\n"
}

[root@hdss7-200 certs]# cfssl-certinfo -domain www.baidu.com
{
  "subject": {
    "common_name": "baidu.com",
    "country": "CN",
    "organization": "Beijing Baidu Netcom Science Technology Co., Ltd",
    "organizational_unit": "service operation department",
    "locality": "beijing",
    "province": "beijing",
    "names": [
      "CN",
      "beijing",
      "beijing",
      "service operation department",
      "Beijing Baidu Netcom Science Technology Co., Ltd",
      "baidu.com"
    ]
  },
  "issuer": {
    "common_name": "GlobalSign Organization Validation CA - SHA256 - G2",
    "country": "BE",
    "organization": "GlobalSign nv-sa",
    "names": [
      "BE",
      "GlobalSign nv-sa",
      "GlobalSign Organization Validation CA - SHA256 - G2"
    ]
  },
  "serial_number": "35388244279832734960132917320",
  "sans": [
    "baidu.com",
    "baifubao.com",
    "www.baidu.cn",
    "www.baidu.com.cn",
    "mct.y.nuomi.com",
    "apollo.auto",
    "dwz.cn",
    "*.baidu.com",
    "*.baifubao.com",
    "*.baidustatic.com",
    "*.bdstatic.com",
    "*.bdimg.com",
    "*.hao123.com",
    "*.nuomi.com",
    "*.chuanke.com",
    "*.trustgo.com",
    "*.bce.baidu.com",
    "*.eyun.baidu.com",
    "*.map.baidu.com",
    "*.mbd.baidu.com",
    "*.fanyi.baidu.com",
    "*.baidubce.com",
    "*.mipcdn.com",
    "*.news.baidu.com",
    "*.baidupcs.com",
    "*.aipage.com",
    "*.aipage.cn",
    "*.bcehost.com",
    "*.safe.baidu.com",
    "*.im.baidu.com",
    "*.baiducontent.com",
    "*.dlnel.com",
    "*.dlnel.org",
    "*.dueros.baidu.com",
    "*.su.baidu.com",
    "*.91.com",
    "*.hao123.baidu.com",
    "*.apollo.auto",
    "*.xueshu.baidu.com",
    "*.bj.baidubce.com",
    "*.gz.baidubce.com",
    "*.smartapps.cn",
    "*.bdtjrcv.com",
    "*.hao222.com",
    "*.haokan.com",
    "*.pae.baidu.com",
    "*.vd.bdstatic.com",
    "click.hm.baidu.com",
    "log.hm.baidu.com",
    "cm.pos.baidu.com",
    "wn.pos.baidu.com",
    "update.pan.baidu.com"
  ],
  "not_before": "2020-04-02T07:04:58Z",
  "not_after": "2021-07-26T05:31:02Z",
  "sigalg": "SHA256WithRSA",
  "authority_key_id": "96:DE:61:F1:BD:1C:16:29:53:1C:C0:CC:7D:3B:83:0:40:E6:1A:7C",
  "subject_key_id": "9E:C9:79:D7:E9:5B:AB:8A:16:CC:32:8E:C6:99:E6:9F:20:42:35:87",
  "pem": "-----BEGIN CERTIFICATE-----\nMIIKLjCCCRagAwIBAgIMclh4Nm6fVugdQYhIMA0GCSqGSIb3DQEBCwUAMGYxCzAJ\nBgNVBAYTAkJFMRkwFwYDVQQKExBHbG9iYWxTaWduIG52LXNhMTwwOgYDVQQDEzNH\nbG9iYWxTaWduIE9yZ2FuaXphdGlvbiBWYWxpZGF0aW9uIENBIC0gU0hBMjU2IC0g\nRzIwHhcNMjAwNDAyMDcwNDU4WhcNMjEwNzI2MDUzMTAyWjCBpzELMAkGA1UEBhMC\nQ04xEDAOBgNVBAgTB2JlaWppbmcxEDAOBgNVBAcTB2JlaWppbmcxJTAjBgNVBAsT\nHHNlcnZpY2Ugb3BlcmF0aW9uIGRlcGFydG1lbnQxOTA3BgNVBAoTMEJlaWppbmcg\nQmFpZHUgTmV0Y29tIFNjaWVuY2UgVGVjaG5vbG9neSBDby4sIEx0ZDESMBAGA1UE\nAxMJYmFpZHUuY29tMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAwamw\nrkca0lfrHRUfblyy5PgLINvqAN8p/6RriSZLnyMv7FewirhGQCp+vNxaRZdPrUEO\nvCCGSwxdVSFH4jE8V6fsmUfrRw1y18gWVHXv00URD0vOYHpGXCh0ro4bvthwZnuo\nk0ko0qN2lFXefCfyD/eYDK2G2sau/Z/w2YEympfjIe4EkpbkeBHlxBAOEDF6Speg\n68ebxNqJN6nDN9dWsX9Sx9kmCtavOBaxbftzebFoeQOQ64h7jEiRmFGlB5SGpXhG\neY9Ym+k1Wafxe1cxCpDPJM4NJOeSsmrp5pY3Crh8hy900lzoSwpfZhinQYbPJqYI\njqVJF5JTs5Glz1OwMQIDAQABo4IGmDCCBpQwDgYDVR0PAQH/BAQDAgWgMIGgBggr\nBgEFBQcBAQSBkzCBkDBNBggrBgEFBQcwAoZBaHR0cDovL3NlY3VyZS5nbG9iYWxz\naWduLmNvbS9jYWNlcnQvZ3Nvcmdhbml6YXRpb252YWxzaGEyZzJyMS5jcnQwPwYI\nKwYBBQUHMAGGM2h0dHA6Ly9vY3NwMi5nbG9iYWxzaWduLmNvbS9nc29yZ2FuaXph\ndGlvbnZhbHNoYTJnMjBWBgNVHSAETzBNMEEGCSsGAQQBoDIBFDA0MDIGCCsGAQUF\nBwIBFiZodHRwczovL3d3dy5nbG9iYWxzaWduLmNvbS9yZXBvc2l0b3J5LzAIBgZn\ngQwBAgIwCQYDVR0TBAIwADBJBgNVHR8EQjBAMD6gPKA6hjhodHRwOi8vY3JsLmds\nb2JhbHNpZ24uY29tL2dzL2dzb3JnYW5pemF0aW9udmFsc2hhMmcyLmNybDCCA04G\nA1UdEQSCA0UwggNBggliYWlkdS5jb22CDGJhaWZ1YmFvLmNvbYIMd3d3LmJhaWR1\nLmNughB3d3cuYmFpZHUuY29tLmNugg9tY3QueS5udW9taS5jb22CC2Fwb2xsby5h\ndXRvggZkd3ouY26CCyouYmFpZHUuY29tgg4qLmJhaWZ1YmFvLmNvbYIRKi5iYWlk\ndXN0YXRpYy5jb22CDiouYmRzdGF0aWMuY29tggsqLmJkaW1nLmNvbYIMKi5oYW8x\nMjMuY29tggsqLm51b21pLmNvbYINKi5jaHVhbmtlLmNvbYINKi50cnVzdGdvLmNv\nbYIPKi5iY2UuYmFpZHUuY29tghAqLmV5dW4uYmFpZHUuY29tgg8qLm1hcC5iYWlk\ndS5jb22CDyoubWJkLmJhaWR1LmNvbYIRKi5mYW55aS5iYWlkdS5jb22CDiouYmFp\nZHViY2UuY29tggwqLm1pcGNkbi5jb22CECoubmV3cy5iYWlkdS5jb22CDiouYmFp\nZHVwY3MuY29tggwqLmFpcGFnZS5jb22CCyouYWlwYWdlLmNugg0qLmJjZWhvc3Qu\nY29tghAqLnNhZmUuYmFpZHUuY29tgg4qLmltLmJhaWR1LmNvbYISKi5iYWlkdWNv\nbnRlbnQuY29tggsqLmRsbmVsLmNvbYILKi5kbG5lbC5vcmeCEiouZHVlcm9zLmJh\naWR1LmNvbYIOKi5zdS5iYWlkdS5jb22CCCouOTEuY29tghIqLmhhbzEyMy5iYWlk\ndS5jb22CDSouYXBvbGxvLmF1dG+CEioueHVlc2h1LmJhaWR1LmNvbYIRKi5iai5i\nYWlkdWJjZS5jb22CESouZ3ouYmFpZHViY2UuY29tgg4qLnNtYXJ0YXBwcy5jboIN\nKi5iZHRqcmN2LmNvbYIMKi5oYW8yMjIuY29tggwqLmhhb2thbi5jb22CDyoucGFl\nLmJhaWR1LmNvbYIRKi52ZC5iZHN0YXRpYy5jb22CEmNsaWNrLmhtLmJhaWR1LmNv\nbYIQbG9nLmhtLmJhaWR1LmNvbYIQY20ucG9zLmJhaWR1LmNvbYIQd24ucG9zLmJh\naWR1LmNvbYIUdXBkYXRlLnBhbi5iYWlkdS5jb20wHQYDVR0lBBYwFAYIKwYBBQUH\nAwEGCCsGAQUFBwMCMB8GA1UdIwQYMBaAFJbeYfG9HBYpUxzAzH07gwBA5hp8MB0G\nA1UdDgQWBBSeyXnX6VurihbMMo7GmeafIEI1hzCCAX4GCisGAQQB1nkCBAIEggFu\nBIIBagFoAHYAXNxDkv7mq0VEsV6a1FbmEDf71fpH3KFzlLJe5vbHDsoAAAFxObU8\nugAABAMARzBFAiBphmgxIbNZXaPWiUqXRWYLaRST38KecoekKIof5fXmsgIhAMkZ\ntF8XyKCu/nZll1e9vIlKbW8RrUr/74HpmScVRRsBAHYAb1N2rDHwMRnYmQCkURX/\ndxUcEdkCwQApBo2yCJo32RMAAAFxObU85AAABAMARzBFAiBURWwwTgXZ+9IV3mhm\nE0EOzbg901DLRszbLIpafDY/XgIhALsvEGqbBVrpGxhKoTVlz7+GWom8SrfUeHcn\n4+9Dn7xGAHYA9lyUL9F3MCIUVBgIMJRWjuNNExkzv98MLyALzE7xZOMAAAFxObU8\nqwAABAMARzBFAiBFBYPxKEdhlf6bqbwxQY7tskgdoFulPxPmdrzS5tNpPwIhAKnK\nqwzch98lINQYzLAV52+C8GXZPXFZNfhfpM4tQ6xbMA0GCSqGSIb3DQEBCwUAA4IB\nAQC83ALQ2d6MxeLZ/k3vutEiizRCWYSSMYLVCrxANdsGshNuyM8B8V/A57c0Nzqo\nCPKfMtX5IICfv9P/bUecdtHL8cfx24MzN+U/GKcA4r3a/k8pRVeHeF9ThQ2zo1xj\nk/7gJl75koztdqNfOeYiBTbFMnPQzVGqyMMfqKxbJrfZlGAIgYHT9bd6T985IVgz\ntRVjAoy4IurZenTsWkG7PafJ4kAh6jQaSu1zYEbHljuZ5PXlkhPO9DwW1WIPug6Z\nrlylLTTYmlW3WETOATi70HYsZN6NACuZ4t1hEO3AsF7lqjdA2HwTN10FX2HuaUvf\n5OzP+PKupV9VKw8x8mQKU6vr\n-----END CERTIFICATE-----\n"
}

比较两个文件md5值

[root@hdss7-22 conf]# md5sum kubelet.kubeconfig
2a6242ea63d7befc08caf71e4e149726  kubelet.kubeconfig

管理K8S核心资源的三种基本方法:

  • 陈述式管理方法--主要是依赖命令行CLI工具进行管理
  • 声明式管理方法--主要依赖统一资源配置清单(manifest)进行管理
  • GUI式管理方法--主要依赖图形化操作界面(web页面)进行管理

1.K8S的核心资源管理方法

1.1.陈述式资源管理方法

任意的K8S运算节点上:

1.1.1.管理名称空间资源

1.1.1.1.查看名称空间
[root@hdss7-21 ~]# kubectl get namespace
NAME              STATUS   AGE
default           Active   13h
kube-node-lease   Active   13h
kube-public       Active   13h
kube-system       Active   13h

namespace可以简写:ns

[root@hdss7-21 ~]# kubectl get ns
NAME              STATUS   AGE
default           Active   13h
kube-node-lease   Active   13h
kube-public       Active   13h
kube-system       Active   13h
1.1.1.2.查看名称空间内的资源
[root@hdss7-21 ~]# kubectl get all -n default
NAME                 READY   STATUS    RESTARTS   AGE
pod/nginx-ds-ml28t   1/1     Running   1          13h
pod/nginx-ds-vwzn6   1/1     Running   1          13h


NAME                 TYPE        CLUSTER-IP    EXTERNAL-IP   PORT(S)   AGE
service/kubernetes   ClusterIP   192.168.0.1   <none>        443/TCP   13h

NAME                      DESIRED   CURRENT   READY   UP-TO-DATE   AVAILABLE   NODE SELECTOR   AGE
daemonset.apps/nginx-ds   2         2         2       2            2           <none>          13h

不写名称空间默认是default

1.1.1.3.创建名称空间
[root@hdss7-21 ~]# kubectl create namespace app
namespace/app created
[root@hdss7-21 ~]# kubectl get ns
NAME              STATUS   AGE
app               Active   25s
default           Active   16h
kube-node-lease   Active   16h
kube-public       Active   16h
kube-system       Active   16h
1.1.1.4.删除名称空间
[root@hdss7-21 ~]# kubectl delete namespace app 
namespace "app" deleted
[root@hdss7-21 ~]# kubectl get ns
NAME              STATUS   AGE
default           Active   16h
kube-node-lease   Active   16h
kube-public       Active   16h
kube-system       Active   16h

1.1.2.管理Deployment资源

1.1.2.1.创建【pod控制器】deployment

在kube-public命名空间中创建了一个pod控制器,这个pod控制器的类型是deployment,在这个pod控制器中使用的镜像是harbor.od.com下的public里面的nginx

[root@hdss7-21 ~]# kubectl create deployment nginx-dp --image=harbor.od.com/public/nginx:v1.7.9 -n kube-public
deployment.apps/nginx-dp created
1.1.2.2.查看【pod控制器】deployment
[root@hdss7-21 ~]# kubectl get deployment -n kube-public
NAME       READY   UP-TO-DATE   AVAILABLE   AGE
nginx-dp   1/1     1            1           31s

`deployment`可以缩写成:`deploy`
那么也可以这样去查询

[root@hdss7-21 ~]# kubectl  get deploy -n kube-public
NAME       READY   UP-TO-DATE   AVAILABLE   AGE
nginx-dp   1/1     1            1           15m

#详细查看

[root@hdss7-21 ~]# kubectl describe deployment nginx-dp -n kube-public 
Name:                   nginx-dp
Namespace:              kube-public
CreationTimestamp:      Tue, 18 Aug 2020 12:42:06 +0800
Labels:                 app=nginx-dp
Annotations:            deployment.kubernetes.io/revision: 1
Selector:               app=nginx-dp
Replicas:               1 desired | 1 updated | 1 total | 1 available | 0 unavailable
StrategyType:           RollingUpdate
MinReadySeconds:        0
RollingUpdateStrategy:  25% max unavailable, 25% max surge
Pod Template:
  Labels:  app=nginx-dp
  Containers:
   nginx:
    Image:        harbor.od.com/public/nginx:v1.7.9
    Port:         <none>
    Host Port:    <none>
    Environment:  <none>
    Mounts:       <none>
  Volumes:        <none>
Conditions:
  Type           Status  Reason
  ----           ------  ------
  Available      True    MinimumReplicasAvailable
  Progressing    True    NewReplicaSetAvailable
OldReplicaSets:  <none>
NewReplicaSet:   nginx-dp-5dfc689474 (1/1 replicas created)
Events:
  Type    Reason             Age   From                   Message
  ----    ------             ----  ----                   -------
  Normal  ScalingReplicaSet  23m   deployment-controller  Scaled up replica set nginx-dp-5dfc689474 to 1



1.1.2.3.查看pod资源
[root@hdss7-21 ~]# kubectl get pods -n kube-public
NAME                        READY   STATUS    RESTARTS   AGE
nginx-dp-5dfc689474-9l8s8   1/1     Running   0          12m

# -o wide 扩展的方式查询资源

[root@hdss7-21 ~]# kubectl get pods -n kube-public -o wide 
NAME                        READY   STATUS    RESTARTS   AGE   IP           NODE                NOMINATED NODE   READINESS GATES
nginx-dp-5dfc689474-9l8s8   1/1     Running   0          13m   172.7.21.3   hdss7-21.host.com   <none>           <none>
1.1.2.4.进入pod资源
[root@hdss7-21 ~]# kubectl get pods -n kube-public 
NAME                        READY   STATUS    RESTARTS   AGE
nginx-dp-5dfc689474-9l8s8   1/1     Running   0          49m
[root@hdss7-21 ~]# kubectl exec -it nginx-dp-5dfc689474-9l8s8 /bin/bash -n kube-public 
root@nginx-dp-5dfc689474-9l8s8:/# ip add 
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
8: eth0@if9: <BROADCAST,MULTICAST,UP,LOWER_UP,M-DOWN> mtu 1500 qdisc noqueue state UP 
    link/ether 02:42:ac:07:15:03 brd ff:ff:ff:ff:ff:ff
    inet 172.7.21.3/24 brd 172.7.21.255 scope global eth0
       valid_lft forever preferred_lft forever

注:也可以用docker exec进入容器中,但是注意IP。

1.1.2.5.删除pod资源(重启)
[root@hdss7-21 ~]# kubectl delete pods nginx-dp-5dfc689474-9l8s8 -n kube-public 
pod "nginx-dp-5dfc689474-9l8s8" deleted
[root@hdss7-21 ~]# kubectl get pods -n kube-public 
NAME                        READY   STATUS    RESTARTS   AGE
nginx-dp-5dfc689474-bj7qj   1/1     Running   0          38s
[root@hdss7-21 ~]# kubectl get pods -n kube-public -o wide 
NAME                        READY   STATUS    RESTARTS   AGE   IP           NODE                NOMINATED NODE   READINESS GATES
nginx-dp-5dfc689474-bj7qj   1/1     Running   0          45s   172.7.22.3   hdss7-22.host.com   <none>           <none>

使用watch观察pod重建状态的变化【watch -n 1 $(kubectl describe deployment nginx-dp -n kube-public|grep -C 5 Event)】

强制删除参数:--force--grace-period=0

[root@hdss7-21 ~]# kubectl delete pods nginx-dp-5dfc689474-bj7qj -n kube-public --force --grace-period=0 
warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.
pod "nginx-dp-5dfc689474-bj7qj" force deleted

[root@hdss7-21 ~]# kubectl get pods -n kube-public 
NAME                        READY   STATUS    RESTARTS   AGE
nginx-dp-5dfc689474-xqbp9   1/1     Running   0          7s

注解:删除了pod但是没有删除pod控制器,由于存在pod控制器有一个预期必须存在一个pod,故删除一个pod,pod控制器会再次创建一个pod。pod控制器保证了集群中无论怎么删除都会存在一个pod,以到达预期效果。所以说delete pod是重启pod的一种重要方法。

1.1.2.6.删除deployment
[root@hdss7-21 ~]# kubectl delete deployment nginx-dp   -n kube-public 
deployment.extensions "nginx-dp" deleted

#查看pod控制器
[root@hdss7-21 ~]# kubectl get  deployment -n kube-public 
No resources found.

#查看pod
[root@hdss7-21 ~]# kubectl get pods -n kube-public 
No resources found.

1.1.3.管理Service资源

1.1.3.1.创建service
[root@hdss7-21 ~]# kubectl expose deployment nginx-dp --port=80 -n kube-public
service/nginx-dp exposed

[root@hdss7-21 ~]# kubectl get all -n kube-public 
NAME                            READY   STATUS    RESTARTS   AGE
pod/nginx-dp-5dfc689474-vpj6g   1/1     Running   0          32m


NAME               TYPE        CLUSTER-IP        EXTERNAL-IP   PORT(S)   AGE
service/nginx-dp   ClusterIP   192.168.232.253   <none>        80/TCP    24s


NAME                       READY   UP-TO-DATE   AVAILABLE   AGE
deployment.apps/nginx-dp   1/1     1            1           32m

NAME                                  DESIRED   CURRENT   READY   AGE
replicaset.apps/nginx-dp-5dfc689474   1         1         1       32m
1.1.3.2.查看service
[root@hdss7-22 ~]# kubectl describe svc nginx-dp -n kube-public 
Name:              nginx-dp
Namespace:         kube-public
Labels:            app=nginx-dp
Annotations:       <none>
Selector:          app=nginx-dp
Type:              ClusterIP
IP:                192.168.232.253
Port:              <unset>  80/TCP
TargetPort:        80/TCP
Endpoints:         172.7.22.3:80
Session Affinity:  None
Events:            <none>

1.1.4.kubectl用法总结

[root@hdss7-22 ~]# kubectl --help

陈述式资源管理方法小结:

  1. kubernetes集群管理集群资源的唯一入口是通过相应的方法调用apiserver的接口

  2. kubectl是官方CLI命令行工具,用于与apiserver进行通信,将用户在命令行输入的命令,组织并转化为apiserver能识别的信息,进而实现管理K8S各种资源的一种有效途径

  3. kubeclt的命令大全

    kubectl --help
    
    http://docs.kubernetes.org.cn
    
  4. 陈述式资源管理方法可以满足90%以上的资源管理需求,但它的缺点也很明显

    ​ 命令冗长、复杂、难以记忆

    ​ 特定场景下,无法实现管理需求

    ​ 对资源的增、删、查操作比较容易,改就比较麻烦

1.2.声明式资源管理方法

1.2.1.陈述式资源管理方法的局限性

[root@hdss7-21 ~]# kubectl expose daemonset nginx-ds --port=80
error: cannot expose a DaemonSet.extensions
  • 声明式资源管理方法依赖于-----资源配置清单(yaml/json)

  • 查看资源配置清单的方法

    ​ kubectl get svc nginx-dp -o yaml -n kube-public

  • 解释资源配置清单

    ​ kubectl explain service

  • 创建资源配置清单

    ​ vim /root/nginx-ds-svc.yaml

  • 应用资源配置清单

​ kubectl apply -f nginx-ds-svc.yaml

  • 修改资源配置清单并应用

    ​ 在线修改

    ​ 离线修改

  • 删除资源配置清单

    ​ 陈述式删除

    ​ 声明式删除

1.2.2.查看资源配置清单

[root@hdss7-21 ~]# kubectl get svc nginx-dp -o yaml -n kube-public 
apiVersion: v1
kind: Service
metadata:
  creationTimestamp: "2020-08-18T07:04:28Z"
  labels:
    app: nginx-dp
  name: nginx-dp
  namespace: kube-public
  resourceVersion: "36993"
  selfLink: /api/v1/namespaces/kube-public/services/nginx-dp
  uid: accd6442-9420-4c45-8865-77666479fd7a
spec:
  clusterIP: 192.168.232.253
  ports:
  - port: 80
    protocol: TCP
    targetPort: 80
  selector:
    app: nginx-dp
  sessionAffinity: None
  type: ClusterIP
status:
  loadBalancer: {}

1.2.3.解释资源配置清单

[root@hdss7-21 ~]# kubectl explain service.metadata
KIND:     Service
VERSION:  v1

RESOURCE: metadata <Object>

DESCRIPTION:
     Standard object's metadata. More info:
     https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata

     ObjectMeta is metadata that all persisted resources must have, which
     includes all objects users must create.

FIELDS:
   annotations	<map[string]string>
     Annotations is an unstructured key value map stored with a resource that
     may be set by external tools to store and retrieve arbitrary metadata. They
     are not queryable and should be preserved when modifying objects. More
     info: http://kubernetes.io/docs/user-guide/annotations

   clusterName	<string>
     The name of the cluster which the object belongs to. This is used to
     distinguish resources with same name and namespace in different clusters.
     This field is not set anywhere right now and apiserver is going to ignore
     it if set in create or update request.

   creationTimestamp	<string>
     CreationTimestamp is a timestamp representing the server time when this
     object was created. It is not guaranteed to be set in happens-before order
     across separate operations. Clients may not set this value. It is
     represented in RFC3339 form and is in UTC. Populated by the system.
     Read-only. Null for lists. More info:
     https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata

   deletionGracePeriodSeconds	<integer>
     Number of seconds allowed for this object to gracefully terminate before it
     will be removed from the system. Only set when deletionTimestamp is also
     set. May only be shortened. Read-only.

   deletionTimestamp	<string>
     DeletionTimestamp is RFC 3339 date and time at which this resource will be
     deleted. This field is set by the server when a graceful deletion is
     requested by the user, and is not directly settable by a client. The
     resource is expected to be deleted (no longer visible from resource lists,
     and not reachable by name) after the time in this field, once the
     finalizers list is empty. As long as the finalizers list contains items,
     deletion is blocked. Once the deletionTimestamp is set, this value may not
     be unset or be set further into the future, although it may be shortened or
     the resource may be deleted prior to this time. For example, a user may
     request that a pod is deleted in 30 seconds. The Kubelet will react by
     sending a graceful termination signal to the containers in the pod. After
     that 30 seconds, the Kubelet will send a hard termination signal (SIGKILL)
     to the container and after cleanup, remove the pod from the API. In the
     presence of network partitions, this object may still exist after this
     timestamp, until an administrator or automated process can determine the
     resource is fully terminated. If not set, graceful deletion of the object
     has not been requested. Populated by the system when a graceful deletion is
     requested. Read-only. More info:
     https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata

   finalizers	<[]string>
     Must be empty before the object is deleted from the registry. Each entry is
     an identifier for the responsible component that will remove the entry from
     the list. If the deletionTimestamp of the object is non-nil, entries in
     this list can only be removed.

   generateName	<string>
     GenerateName is an optional prefix, used by the server, to generate a
     unique name ONLY IF the Name field has not been provided. If this field is
     used, the name returned to the client will be different than the name
     passed. This value will also be combined with a unique suffix. The provided
     value has the same validation rules as the Name field, and may be truncated
     by the length of the suffix required to make the value unique on the
     server. If this field is specified and the generated name exists, the
     server will NOT return a 409 - instead, it will either return 201 Created
     or 500 with Reason ServerTimeout indicating a unique name could not be
     found in the time allotted, and the client should retry (optionally after
     the time indicated in the Retry-After header). Applied only if Name is not
     specified. More info:
     https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency

   generation	<integer>
     A sequence number representing a specific generation of the desired state.
     Populated by the system. Read-only.

   initializers	<Object>
     An initializer is a controller which enforces some system invariant at
     object creation time. This field is a list of initializers that have not
     yet acted on this object. If nil or empty, this object has been completely
     initialized. Otherwise, the object is considered uninitialized and is
     hidden (in list/watch and get calls) from clients that haven't explicitly
     asked to observe uninitialized objects. When an object is created, the
     system will populate this list with the current set of initializers. Only
     privileged users may set or modify this list. Once it is empty, it may not
     be modified further by any user. DEPRECATED - initializers are an alpha
     field and will be removed in v1.15.

   labels	<map[string]string>
     Map of string keys and values that can be used to organize and categorize
     (scope and select) objects. May match selectors of replication controllers
     and services. More info: http://kubernetes.io/docs/user-guide/labels

   managedFields	<[]Object>
     ManagedFields maps workflow-id and version to the set of fields that are
     managed by that workflow. This is mostly for internal housekeeping, and
     users typically shouldn't need to set or understand this field. A workflow
     can be the user's name, a controller's name, or the name of a specific
     apply path like "ci-cd". The set of fields is always in the version that
     the workflow used when modifying the object. This field is alpha and can be
     changed or removed without notice.

   name	<string>
     Name must be unique within a namespace. Is required when creating
     resources, although some resources may allow a client to request the
     generation of an appropriate name automatically. Name is primarily intended
     for creation idempotence and configuration definition. Cannot be updated.
     More info: http://kubernetes.io/docs/user-guide/identifiers#names

   namespace	<string>
     Namespace defines the space within each name must be unique. An empty
     namespace is equivalent to the "default" namespace, but "default" is the
     canonical representation. Not all objects are required to be scoped to a
     namespace - the value of this field for those objects will be empty. Must
     be a DNS_LABEL. Cannot be updated. More info:
     http://kubernetes.io/docs/user-guide/namespaces

   ownerReferences	<[]Object>
     List of objects depended by this object. If ALL objects in the list have
     been deleted, this object will be garbage collected. If this object is
     managed by a controller, then an entry in this list will point to this
     controller, with the controller field set to true. There cannot be more
     than one managing controller.

   resourceVersion	<string>
     An opaque value that represents the internal version of this object that
     can be used by clients to determine when objects have changed. May be used
     for optimistic concurrency, change detection, and the watch operation on a
     resource or set of resources. Clients must treat these values as opaque and
     passed unmodified back to the server. They may only be valid for a
     particular resource or set of resources. Populated by the system.
     Read-only. Value must be treated as opaque by clients and . More info:
     https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency

   selfLink	<string>
     SelfLink is a URL representing this object. Populated by the system.
     Read-only.

   uid	<string>
     UID is the unique in time and space value for this object. It is typically
     generated by the server on successful creation of a resource and is not
     allowed to change on PUT operations. Populated by the system. Read-only.
     More info: http://kubernetes.io/docs/user-guide/identifiers#uids

1.2.4.创建资源配置清单

[root@hdss7-21 ~]# vim nginx-ds-svc.yaml

apiVersion: v1
kind: Service
metadata:
  labels:
    app: nginx-ds
  name: nginx-ds
  namespace: default
spec:
  ports:
  - port: 80
    protocol: TCP
    targetPort: 80
  selector:
    app: nginx-ds
  sessionAffinity: None
  type: ClusterIP

1.2.5.应用资源配置清单

[root@hdss7-21 ~]# kubectl create -f nginx-ds-svc.yaml 
service/nginx-ds created
[root@hdss7-21 ~]# kubectl get svc -n default
NAME         TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)   AGE
kubernetes   ClusterIP   192.168.0.1     <none>        443/TCP   10d
nginx-ds     ClusterIP   192.168.29.12   <none>        80/TCP    37s

[root@hdss7-21 ~]# kubectl get svc nginx-ds -o yaml 
apiVersion: v1
kind: Service
metadata:
  creationTimestamp: "2020-08-28T10:42:22Z"
  labels:
    app: nginx-ds
  name: nginx-ds
  namespace: default
  resourceVersion: "85244"
  selfLink: /api/v1/namespaces/default/services/nginx-ds
  uid: 3138f6b9-21ca-4aa2-afeb-8173f00358e3
spec:
  clusterIP: 192.168.29.12
  ports:
  - port: 80
    protocol: TCP
    targetPort: 80
  selector:
    app: nginx-ds
  sessionAffinity: None
  type: ClusterIP
status:
  loadBalancer: {}

1.2.6.修改资源配置清单并应用

  • 离线修改

​ 修改nginx-ds-svc.yaml文件,并用kubectl apply -f nginx-ds-svc.yaml文件使之生效

  • 在线修改

​ 直接使用kubectl edit service nginx-ds 在线编辑资源配置清单并保存生效

1.2.7.删除资源配置清单

  • 陈述式删除

kubectl delete service nginx-ds -n kube-public

  • 声明式删除

kubectl delete -f nginx-ds-svc.yaml

1.2.8.查看并使用Service资源

[root@hdss7-21 ~]# kubectl get svc -o wide 
NAME         TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)   AGE     SELECTOR
kubernetes   ClusterIP   192.168.0.1     <none>        443/TCP   10d     <none>
nginx-ds     ClusterIP   192.168.29.12   <none>        80/TCP    8m39s   app=nginx-ds

1.2.9.声明式资源管理方法小结

  1. 声明式资源管理方法,依赖统一资源配置清单文件对资源进行管理

  2. 对资源的管理,是通过事先定义在统一资源配置清单内,再通过陈述式命令应用到K8S集群中

  3. 语法格式: kubectl create/apple/delete -f /path/to/yaml

  4. 资源配置清单的学习方法

    ​ tip1:多看别人(官方)写的,能读懂

    ​ tip2:能照着现成的文件改着用

    ​ tips3:遇到不懂的,善用kubectl explain 查

    ​ tips4:初学切忌上来就无中生有,自己憋着写

2.K8S的核心插件(addons)

kubernetes设计了网络模型,但却将它的实现交给了网络插件,CNI网络插件最主要的功能实现POD资源能够跨宿主机进行通信

常见的CNI网络插件:

  • Flannel 38% ✔
  • Calico 35%
  • Canal 5%
  • Contiv
  • OpenContrail
  • NSX-T
  • Kube-router

2.1.K8S的CNI插件-Flannel

2.1.1.集群规划

主机名 角色 IP地址
hdss7-21.host.com flannel 10.4.7.21
hdss7-22.host.com flannel 10.4.7.22

注意:这里部署文档以HDSS7-21.host.com主机为例,另外一台运算节点安装部署方法类似

2.1.2.下载软件,解压,做软连接

[root@hdss7-21 ~]# cd /opt/src/
[root@hdss7-21 src]# wget https://github.com/coreos/flannel/releases/download/v0.11.0/flannel-v0.11.0-linux-amd64.tar.gz

[root@hdss7-21 src]# mkdir /opt/flannel-v0.11.0
[root@hdss7-21 src]# tar xf flannel-v0.11.0-linux-amd64.tar.gz  -C /opt/flannel-v0.11.0/
[root@hdss7-21 src]# ln -s /opt/flannel-v0.11.0/ /opt/flannel

2.1.3.创建目录

[root@hdss7-21 src]# cd /opt/flannel
[root@hdss7-21 flannel]# mkdir /opt/flannel/cert && cd /opt/flannel/cert

2.1.4.拷贝证书

[root@hdss7-21 cert]# scp hdss7-200:/opt/certs/ca.pem .
[root@hdss7-21 cert]# scp hdss7-200:/opt/certs/client.pem .
[root@hdss7-21 cert]# scp hdss7-200:/opt/certs/client-key.pem .

2.1.5.创建配置

[root@hdss7-21 cert]# cd ..
[root@hdss7-21 flannel]# vim subnet.env
FLANNEL_NETWORK=172.7.0.0/16
FLANNEL_SUBNET=172.7.21.1/24
FLANNEL_MTU=1500
FLANNEL_IPMASQ=false

2.1.6.创建启动脚本

[root@hdss7-21 flannel]# vim flanneld.sh
#!/bin/sh
./flanneld \
  --public-ip=10.4.7.21 \
  --etcd-endpoints=https://10.4.7.12:2379,https://10.4.7.21:2379,https://10.4.7.22:2379 \
  --etcd-keyfile=./cert/client-key.pem \
  --etcd-certfile=./cert/client.pem \
  --etcd-cafile=./cert/ca.pem \
  --iface=eth0 \
  --subnet-file=./subnet.env \
  --healthz-port=2401

2.1.7.检查配置、权限、创建日志目录

[root@hdss7-21 flannel]# chmod +x flanneld.sh 
[root@hdss7-21 flannel]# mkdir -p /data/logs/flanneld

2.1.8.创建supervisor配置

[root@hdss7-21 flannel]# vi /etc/supervisord.d/flannel.ini
[program:flanneld-7-21]
command=/opt/flannel/flanneld.sh                             ; the program (relative uses PATH, can take args)
numprocs=1                                                   ; number of processes copies to start (def 1)
directory=/opt/flannel                                       ; directory to cwd to before exec (def no cwd)
autostart=true                                               ; start at supervisord start (default: true)
autorestart=true                                             ; retstart at unexpected quit (default: true)
startsecs=30                                                 ; number of secs prog must stay running (def. 1)
startretries=3                                               ; max # of serial start failures (default 3)
exitcodes=0,2                                                ; 'expected' exit codes for process (default 0,2)
stopsignal=QUIT                                              ; signal used to kill process (default TERM)
stopwaitsecs=10                                              ; max num secs to wait b4 SIGKILL (default 10)
user=root                                                    ; setuid to this UNIX account to run the program
redirect_stderr=true                                         ; redirect proc stderr to stdout (default false)
stdout_logfile=/data/logs/flanneld/flanneld.stdout.log       ; stderr log path, NONE for none; default AUTO
stdout_logfile_maxbytes=64MB                                 ; max # logfile bytes b4 rotation (default 50MB)
stdout_logfile_backups=4                                     ; # of stdout logfile backups (default 10)
stdout_capture_maxbytes=1MB                                  ; number of bytes in 'capturemode' (default 0)
stdout_events_enabled=false                                  ; emit events on stdout writes (default false)

2.1.9.操作etcd,增加host-gw

只需要在HDSS7-21.host.com执行

[root@hdss7-21 etcd]# ./etcdctl set /coreos.com/network/config '{"Network": "172.7.0.0/16", "Backend": {"Type": "host-gw"}}'
{"Network": "172.7.0.0/16", "Backend": {"Type": "host-gw"}}
验证:
[root@hdss7-21 etcd]# ./etcdctl get /coreos.com/network/config
{"Network": "172.7.0.0/16", "Backend": {"Type": "host-gw"}}

2.1.10.启动服务并检查

[root@hdss7-21 flannel]# supervisorctl  update
[root@hdss7-21 flannel]# supervisorctl status

2.1.11.安装部署启动检查

subnet.env  
FLANNEL_SUBNET=172.7.21.1/24

flanneld.sh
 --public-ip=10.4.7.21
 
/etc/supervisord.d/flannel.ini
[program:flanneld-7-21]

[root@hdss7-22 flannel]# ping 172.7.21.1
PING 172.7.21.1 (172.7.21.1) 56(84) bytes of data.
64 bytes from 172.7.21.1: icmp_seq=1 ttl=64 time=0.251 ms
64 bytes from 172.7.21.1: icmp_seq=2 ttl=64 time=0.279 ms

[root@hdss7-22 flannel]# route -n 
Kernel IP routing table
Destination     Gateway         Genmask         Flags Metric Ref    Use Iface
0.0.0.0         10.4.7.254      0.0.0.0         UG    100    0        0 eth0
10.4.7.0        0.0.0.0         255.255.255.0   U     100    0        0 eth0
172.7.21.0      10.4.7.21       255.255.255.0   UG    0      0        0 eth0
172.7.22.0      0.0.0.0         255.255.255.0   U     0      0        0 docker0

flannel vxlan模型(三选一)

1.supervisor stop flanneld-7-[21.22]
2.删除host-gw模型创建的路由
route del -net 172.7.21.0/24 gw 10.4.7.21     hdss7-22.host.com上
route del -net 172.7.22.0/24 gw 10.4.7.22     hdss7-21.host.com上
3.在etcd修改
./etcdctl get /coreos.com/network/config
./etcdctl rm /coreos.com/network/config
./etcdctl set /coreos.com/network/config '{"Network": "172.7.0.0/16", "Backend": {"Type": "VxLAN"}}'
4.supervisor start flanneld-7-[21.22]

flannel 直接路由模型(三选一)

'{"Network": "172.7.0.0/16", "Backend": {"Type": "VxLAN","Directrouting": true}}'

2.1.12.iptables规则

安装iptables-services并设置开机启动

[root@hdss7-21 ~]# yum install iptables-services  -y
[root@hdss7-21 ~]# systemctl start iptables
[root@hdss7-21 ~]# systemctl enable  iptables

2.1.13.在各运算节点上优化iptables规则

注意:iptables规则各主机的略有不同,其他运算节点上执行时注意修改。

​ 优化SNAT规则,各运算节点之间的各POD之间的网络通信不再出网

[root@hdss7-21 ~]# iptables-save|grep -i postrouting
[root@hdss7-21 ~]# iptables -t nat -D POSTROUTING -s 172.7.21.0/24 ! -o docker0 -j MASQUERADE
[root@hdss7-21 ~]# iptables -t nat -I POSTROUTING -s 172.7.21.0/24 ! -d 172.7.0.0/16 ! -o docker0 -j MASQUERADE


[root@hdss7-22 ~]# iptables -t nat -D POSTROUTING -s 172.7.22.0/24 ! -o docker0 -j MASQUERADE
[root@hdss7-22 ~]# iptables -t nat -I POSTROUTING -s 172.7.22.0/24 ! -d 172.7.0.0/16 ! -o docker0 -j MASQUERADE

10.4.7.21主机上的,来源是172.7.21.0/24段的docker的IP。目标IP不是172.7.0.0/16段,网络发包不从docker0桥设备出战,才进行SNAT转换。

2.1.14.各运算节点保存iptables规则

删除iptables原有 reject规则

iptables-save |grep -i reject
iptables -t filter -D INPUT -j REJECT --reject-with icmp-host-prohibited
iptables -t filter -D FORWARD -j REJECT --reject-with icmp-host-prohibited
iptables-save > /etc/sysconfig/iptables
service iptables save

2.2.K8S的服务发现插件-CoreDNS

简单来说,服务发现就是服务(应用)之间的相互定位的过程。

服务发现并非云计算时代独有的,传统的单体架构时代也会用到,以下应用场景下,更需要服务发现。

  • 服务(应用)的动态性强
  • 服务(应用)更新发布频繁
  • 服务(应用)支持自动伸缩

在K8S集群里面,POD的IP是不断变化的,如何做到“以不变应万变”呢?

  • 抽象出了Service资源,通过标签选择器,。关联一组POD
  • 抽象出了集群网络,通过相对固定的“集群IP”,使服务接入点固定

那么如何自动关联Service资源的“名称”和“集群网络”,从而达到服务被集群自动发现的目的呢?

  • 考虑传统DNS的模型:hdss7-21.host.com-----> 10.4.7.21
  • 能否在K8S里面建立这样的模型: nginx-ds----->192.168.0.5

K8S里面服务发现的方式-----DNS

实现K8S里DNS功能的插件(软件)

  • kube-dns-----kubernetes-v1.2至kubenetes-v1.10
  • Coredns-----kubernetes-v1.11至今

注意:

​ K8S里面的DNS不是万能的!!它应该只负责自动维护“服务名”——>“集群网络IP”之间的关系

2.2.1.部署K8S的内网资源配置清单http服务

在运维主机HDSS7-200.host.com上,配置一个nginx虚拟主机,用以提供K8S统一的资源配置清单访问入口

配置nginx

HDSS7-200.host.com

[root@hdss7-200 ~]# vim  /etc/nginx/conf.d/k8s-yaml.od.com.conf
server{
    listen			80;
    server_name     k8s-yaml.od.com;
    
    location  /{
        	autoindex on;
        	default_type text/plain;
        	root /data/k8s-yaml;
    }
}

[root@hdss7-200 ~]# mkdir /data/k8s-yaml 
[root@hdss7-200 ~]# nginx -t 
nginx: the configuration file /etc/nginx/nginx.conf syntax is ok
nginx: configuration file /etc/nginx/nginx.conf test is successful
[root@hdss7-200 ~]# nginx -s reload 
[root@hdss7-200 ~]# cd /data/k8s-yaml/

配置内网DNS解析

HDSS7-11.host.com

[root@hdss7-11 ~]# vim /var/named/od.com.zone
$ORIGIN od.com.
$TTL 600    ; 10 minutes
@           IN SOA  dns.od.com. dnsadmin.od.com. (
                2020032003 ; serial
                10800      ; refresh (3 hours)
                900        ; retry (15 minutes)
                604800     ; expire (1 week)
                86400      ; minimum (1 day)
                )
                NS   dns.od.com.
$TTL 60 ; 1 minute
dns                A    10.4.7.11
harbor             A    10.4.7.200
k8s-yaml           A    10.4.7.200

[root@hdss7-11 ~]# systemctl restart named
[root@hdss7-11 ~]# dig -t A k8s-yaml.od.com @10.4.7.11 +short
10.4.7.200

以后所有的资源配置清单统一放置在运维主机的/data/k8s-yaml目录下即可

[root@hdss7-200 ~]# nginx -s reload 

2.2.2.部署coredns

2.2.2.1.准备coredns-v1.6.1镜像

运维主机HDSS7-200.host.com上:

[root@hdss7-200 k8s-yaml]# cd coredns

[root@hdss7-200 coredns]# docker pull docker.io/coredns/coredns:1.6.1
1.6.1: Pulling from coredns/coredns
c6568d217a00: Pull complete 
d7ef34146932: Pull complete 
Digest: sha256:9ae3b6fcac4ee821362277de6bd8fd2236fa7d3e19af2ef0406d80b595620a7a
Status: Downloaded newer image for coredns/coredns:1.6.1
docker.io/coredns/coredns:1.6.1

[root@hdss7-200 coredns]# docker images | grep coredns
coredns/coredns                 1.6.1                      c0f6e815079e        13 months ago      

[root@hdss7-200 coredns]# docker tag  c0f6e815079e harbor.od.com/public/coredns:v1.6.1

[root@hdss7-200 coredns]# docker push harbor.od.com/public/coredns:v1.6.1
The push refers to repository [harbor.od.com/public/coredns]
da1ec456edc8: Pushed 
225df95e717c: Pushed 
v1.6.1: digest: sha256:c7bf0ce4123212c87db74050d4cbab77d8f7e0b49c041e894a35ef15827cf938 size: 739

2.2.2.2.准备资源配置清单

运维主机HDSS7-200.host.com上:

[root@hdss7-200 coredns]# vim /data/k8s-yaml/coredns/rbac.yaml

apiVersion: v1
kind: ServiceAccount
metadata:
  name: coredns
  namespace: kube-system
  labels:
      kubernetes.io/cluster-service: "true"
      addonmanager.kubernetes.io/mode: Reconcile
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  labels:
    kubernetes.io/bootstrapping: rbac-defaults
    addonmanager.kubernetes.io/mode: Reconcile
  name: system:coredns
rules:
- apiGroups:
  - ""
  resources:
  - endpoints
  - services
  - pods
  - namespaces
  verbs:
  - list
  - watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  annotations:
    rbac.authorization.kubernetes.io/autoupdate: "true"
  labels:
    kubernetes.io/bootstrapping: rbac-defaults
    addonmanager.kubernetes.io/mode: EnsureExists
  name: system:coredns
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:coredns
subjects:
- kind: ServiceAccount
  name: coredns
  namespace: kube-system
[root@hdss7-200 coredns]# vim /data/k8s-yaml/coredns/cm.yaml
apiVersion: v1
kind: ConfigMap
metadata:
  name: coredns
  namespace: kube-system
data:
  Corefile: |
    .:53 {
        errors
        log
        health
        ready
        kubernetes cluster.local 192.168.0.0/16
        forward . 10.4.7.11
        cache 30
        loop
        reload
        loadbalance
       }
[root@hdss7-200 coredns]# vim /data/k8s-yaml/coredns/dp.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: coredns
  namespace: kube-system
  labels:
    k8s-app: coredns
    kubernetes.io/name: "CoreDNS"
spec:
  replicas: 1
  selector:
    matchLabels:
      k8s-app: coredns
  template:
    metadata:
      labels:
        k8s-app: coredns
    spec:
      priorityClassName: system-cluster-critical
      serviceAccountName: coredns
      containers:
      - name: coredns
        image: harbor.od.com/public/coredns:v1.6.1
        args:
        - -conf
        - /etc/coredns/Corefile
        volumeMounts:
        - name: config-volume
          mountPath: /etc/coredns
        ports:
        - containerPort: 53
          name: dns
          protocol: UDP
        - containerPort: 53
          name: dns-tcp
          protocol: TCP
        - containerPort: 9153
          name: metrics
          protocol: TCP
        livenessProbe:
          httpGet:
            path: /health
            port: 8080
            scheme: HTTP
          initialDelaySeconds: 60
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
      dnsPolicy: Default
      volumes:
        - name: config-volume
          configMap:
            name: coredns
            items:
            - key: Corefile
              path: Corefile
[root@hdss7-200 coredns]# vim /data/k8s-yaml/coredns/svc.yaml
apiVersion: v1
kind: Service
metadata:
  name: coredns
  namespace: kube-system
  labels:
    k8s-app: coredns
    kubernetes.io/cluster-service: "true"
    kubernetes.io/name: "CoreDNS"
spec:
  selector:
    k8s-app: coredns
  clusterIP: 192.168.0.2
  ports:
  - name: dns
    port: 53
    protocol: UDP
  - name: dns-tcp
    port: 53
  - name: metrics
    port: 9153
    protocol: TCP
2.2.2.3.依次执行创建
[root@hdss7-21 ~]# kubectl apply -f http://k8s-yaml.od.com/coredns/rbac.yaml
[root@hdss7-21 ~]# kubectl apply -f http://k8s-yaml.od.com/coredns/cm.yaml
[root@hdss7-21 ~]# kubectl apply -f http://k8s-yaml.od.com/coredns/dp.yaml
[root@hdss7-21 ~]# kubectl apply -f http://k8s-yaml.od.com/coredns/svc.yaml
2.2.2.4.检查
[root@hdss7-21 ~]# kubectl get all -n kube-system
NAME                           READY   STATUS    RESTARTS   AGE
pod/coredns-6b6c4f9648-rns45   1/1     Running   0          32s


NAME              TYPE        CLUSTER-IP    EXTERNAL-IP   PORT(S)                  AGE
service/coredns   ClusterIP   192.168.0.2   <none>        53/UDP,53/TCP,9153/TCP   27s


NAME                      READY   UP-TO-DATE   AVAILABLE   AGE
deployment.apps/coredns   1/1     1            1           32s

NAME                                 DESIRED   CURRENT   READY   AGE
replicaset.apps/coredns-6b6c4f9648   1         1         1       32s
2.2.2.5.验证coreDNS
[root@hdss7-21 ~]# dig -t A www.baidu.com @192.168.0.2 +short 
www.a.shifen.com.
14.215.177.39
14.215.177.38
[root@hdss7-21 ~]# dig -t A hdss7-21.host.com @192.168.0.2 +short
10.4.7.21

2.3.K8S的服务暴露插件-Traelik

K8S的DNS实现了服务在集群“内”被自动发现,那如何使得服务在K8S集群“外”被使用和访问呢?

​ 1、使用NodePort型的Service

​ 注意:无法使用kube-proxy的ipvs模型,只能使用iptables模型

​ 2、使用Ingress资源

​ 注意:Ingress只能调度并暴露7层应用,特指https和http协议

Ingress是K8S API的标准资源类型之一,也是一种核心资源,它其实就是一组基于域名和URL路径,把用户的请求转发至指定Service资源的规则

可以将集群外部的请求流量,转发至集群内部,从而实现“服务暴露”

Ingress控制器是能够为Ingress资源监听某套接字,然后根据Ingress规则匹配机制路由调度流量的一个组件

说白了,Ingress没有什么神秘的,就是一个nginx+一段go脚本而已

常用的Ingress控制器的实现软件

  • Ingress-nginx
  • HAProxy
  • Traelik

2.3.1.使用NodePort型Service暴露服务

注意:使用这种方法暴露服务,要求kube-proxy的代理类型修改为:iptables

[root@hdss7-21 ~]# vim /opt/kubernetes/server/bin/kube-proxy.sh
#!/bin/sh
./kube-proxy \
  --cluster-cidr 172.7.0.0/16 \
  --hostname-override hdss7-21.host.com \
  --proxy-mode=iptables \
  --ipvs-scheduler=rr \
  --kubeconfig ./conf/kube-proxy.kubeconfig
 
重启supervisorctl restart
[root@hdss7-21 ~]# tail -f 200 /data/logs/kubernetes/kube-proxy/proxy.stdout.log

[root@hdss7-21 ~]# ipvsadm -Ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
  -> RemoteAddress:Port           Forward Weight ActiveConn InActConn
TCP  192.168.0.1:443 nq
  -> 10.4.7.21:6443               Masq    1      0          0         
  -> 10.4.7.22:6443               Masq    1      1          0         
TCP  192.168.0.2:53 nq
  -> 172.7.21.3:53                Masq    1      0          0         
TCP  192.168.0.2:9153 nq
  -> 172.7.21.3:9153              Masq    1      0          0         
TCP  192.168.29.12:80 nq
  -> 172.7.21.2:80                Masq    1      0          0         
  -> 172.7.22.2:80                Masq    1      0          0         
TCP  192.168.29.12:8080 nq
  -> 172.7.21.2:80                Masq    1      0          0         
  -> 172.7.22.2:80                Masq    1      0          0         
TCP  192.168.232.253:80 nq
  -> 172.7.22.3:80                Masq    1      0          0         
UDP  192.168.0.2:53 nq
  -> 172.7.21.3:53                Masq    1      0          0    
  
[root@hdss7-21 ~]# ipvsadm -D -t  192.168.0.1:443
[root@hdss7-21 ~]# ipvsadm -D -t  192.168.0.2:53
[root@hdss7-21 ~]# ipvsadm -D -t  192.168.0.2:9153
[root@hdss7-21 ~]# ipvsadm -D -t  192.168.29.12:80
[root@hdss7-21 ~]# ipvsadm -D -t  192.168.29.12:8080
[root@hdss7-21 ~]# ipvsadm -D -t  192.168.232.253:80
[root@hdss7-21 ~]# ipvsadm -D -u  192.168.0.2:53
 
2.3.1.1.修改nginx-ds的service资源配置清单
[root@hdss7-21 ~]# vim /root/nginx-ds-svc.yaml

apiVersion: v1
kind: Service
metadata:
  labels:
  	apps: nginx-ds
  name: nginx-ds	
  namespace: default
spec:
  ports:
  - port: 80
    portocol: TCP
    nodePort: 8000
  selector:
    app: nginx-ds
  sessionAffinity: None
  type: NodePort
  
[root@hdss7-21 ~]# kubectl apply -f nginx-ds-svc.yaml
[root@hdss7-21 ~]# netstat -luntp|grep 8000

2.3.1.2.重建nginx-ds的service资源
[root@hdss7-21 ~]# kubectl delete -f nginx-ds-svc.yaml
service "nginx-ds" deleted
[root@hdss7-21 ~]# kubectl apply -f nginx-ds-svc.yaml
service/nginx-ds created
2.3.1.3.查看service
[root@hdss7-21 ~]# kubectl get svc -o wide 
NAME         TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)    AGE    SELECTOR
kubernetes   ClusterIP   192.168.0.1     <none>        443/TCP    14d    <none>
nginx-ds     ClusterIP   192.168.29.12   <none>        80:8080/TCP   3d6h   app=nginx-ds

2.3.1.4.浏览器访问

http://10.4.7.21:8000

http://10.4.7.22:8000

2.3.2.部署traefik(ingress控制器)

hdss7-200.host.com

[root@hdss7-200 k8s-yaml]# docker pull traefik:v1.7.2-alpine
[root@hdss7-200 k8s-yaml]# docker images|grep traefik
[root@hdss7-200 k8s-yaml]# docker tag add5fac61ae5 harbor.od.com/public/traefik:v1.7.2
[root@hdss7-200 k8s-yaml]# docker push  harbor.od.com/public/traefik:v1.7.2

准备资源配置清单

[root@hdss7-200 k8s-yaml]# mkdir traefik
[root@hdss7-200 k8s-yaml]# cd traefik/
[root@hdss7-200 ~]# vim /data/k8s-yaml/traefik/rbac.yaml

apiVersion: v1
kind: ServiceAccount
metadata:
  name: traefik-ingress-controller
  namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
  name: traefik-ingress-controller
rules:
  - apiGroups:
      - ""
    resources:
      - services
      - endpoints
      - secrets
    verbs:
      - get
      - list
      - watch
  - apiGroups:
      - extensions
    resources:
      - ingresses
    verbs:
      - get
      - list
      - watch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
  name: traefik-ingress-controller
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: traefik-ingress-controller
subjects:
- kind: ServiceAccount
  name: traefik-ingress-controller
  namespace: kube-system
[root@hdss7-200 ~]# vim /data/k8s-yaml/traefik/ds.ymal

apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
  name: traefik-ingress
  namespace: kube-system
  labels:
    k8s-app: traefik-ingress
spec:
  template:
    metadata:
      labels:
        k8s-app: traefik-ingress
        name: traefik-ingress
    spec:
      serviceAccountName: traefik-ingress-controller
      terminationGracePeriodSeconds: 60
      containers:
      - image: harbor.od.com/public/traefik:v1.7.2
        name: traefik-ingress
        ports:
        - name: controller
          containerPort: 80
          hostPort: 81
        - name: admin-web
          containerPort: 8080
        securityContext:
          capabilities:
            drop:
            - ALL
            add:
            - NET_BIND_SERVICE
        args:
        - --api
        - --kubernetes
        - --logLevel=INFO
        - --insecureskipverify=true
        - --kubernetes.endpoint=https://10.4.7.10:7443
        - --accesslog
        - --accesslog.filepath=/var/log/traefik_access.log
        - --traefiklog
        - --traefiklog.filepath=/var/log/traefik.log
        - --metrics.prometheus
[root@hdss7-200 ~]# vim /data/k8s-yaml/traefik/svc.yaml

apiVersion: v1
kind: Service
metadata:
  name: traefik-ingress-service
  namespace: kube-system
spec:
  selector:
    k8s-app: traefik-ingress
  ports:
    - protocol: TCP
      port: 80
      name: controller
    - protocol: TCP
      port: 8080
      name: admin-web

[root@hdss7-200 ~]# vim /data/k8s-yaml/traefik/ingress.yaml

apiVersion: extensions/v1beta1
kind: Ingress
metadata:
  name: traefik-web-ui
  namespace: kube-system
  annotations:
    kubernetes.io/ingress.class: traefik
spec:
  rules:
  - host: traefik.od.com
    http:
      paths:
      - path: /
        backend:
          serviceName: traefik-ingress-service
          servicePort: 8080

应用资源配置清单

[root@hdss7-21 ~]# kubectl apply -f http://k8s-yaml.od.com/traefik/rbac.yaml
[root@hdss7-21 ~]# kubectl apply -f http://k8s-yaml.od.com/traefik/ds.yaml
[root@hdss7-21 ~]# kubectl apply -f http://k8s-yaml.od.com/traefik/svc.yaml
[root@hdss7-21 ~]# kubectl apply -f http://k8s-yaml.od.com/traefik/ingress.yaml

检查创建资源

[root@hdss7-21 ~]# kubectl get pods -n kube-system

2.3.3.解析域名

[root@hdss7-11 ~]# vi /var/named/od.com.zone
$ORIGIN od.com.
$TTL 600        ; 10 minutes
@               IN SOA  dns.od.com. dnsadmin.od.com. (
                                2020042604 ; serial
                                10800      ; refresh (3 hours)
                                900        ; retry (15 minutes)
                                604800     ; expire (1 week)
                                86400      ; minimum (1 day)
                                )
                                NS   dns.od.com.
$TTL 60 ; 1 minute
dns                A    10.4.7.11
harbor             A    10.4.7.200
k8s-yaml           A    10.4.7.200
traefik            A    10.4.7.10

2.3.4.配置反代

[root@hdss7-11 ~]# vi /etc/nginx/conf.d/od.com.conf
upstream default_backend_traefik {
    server 10.4.7.21:81    max_fails=3 fail_timeout=10s;
    server 10.4.7.22:81    max_fails=3 fail_timeout=10s;
}
server {
    server_name *.od.com;
  
    location / {
        proxy_pass http://default_backend_traefik;
        proxy_set_header Host       $http_host;
        proxy_set_header x-forwarded-for $proxy_add_x_forwarded_for;
    }
}
[root@hdss7-11 ~]# nginx -t
[root@hdss7-11 ~]# nginx -s reload

2.3.5.浏览器访问

http://traefik.od.com/

2.4.K8S的GUI资源管理插件-Dashboard

2.4.1.准备dashboard镜像

[root@hdss7-200 harbor]# docker pull k8scn/kubernetes-dashboard-amd64:v1.8.3
[root@hdss7-200 harbor]# docker images|grep dashboard
[root@hdss7-200 harbor]# docker tag fcac9aa03fd6  harbor.od.com/public/dashboard:v1.8.3
[root@hdss7-200 harbor]# docker push harbor.od.com/public/dashboard:v1.8.3

2.4.2.创建资源配置清单

hdss7-200.host.com

[root@hdss7-200 harbor]# mkdir -p /data/k8s-yaml/dashboard && cd /data/k8s-yaml/dashboard
[root@hdss7-200 ~]# vim /data/k8s-yaml/dashboard/rbac.yaml

apiVersion: v1
kind: ServiceAccount
metadata:
  labels:
    k8s-app: kubernetes-dashboard
    addonmanager.kubernetes.io/mode: Reconcile
  name: kubernetes-dashboard-admin
  namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: kubernetes-dashboard-admin
  namespace: kube-system
  labels:
    k8s-app: kubernetes-dashboard
    addonmanager.kubernetes.io/mode: Reconcile
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: cluster-admin
subjects:
- kind: ServiceAccount
  name: kubernetes-dashboard-admin
  namespace: kube-system
[root@hdss7-200 ~]# vim /data/k8s-yaml/dashboard/dp.yaml

apiVersion: apps/v1
kind: Deployment
metadata:
  name: kubernetes-dashboard
  namespace: kube-system
  labels:
    k8s-app: kubernetes-dashboard
    kubernetes.io/cluster-service: "true"
    addonmanager.kubernetes.io/mode: Reconcile
spec:
  selector:
    matchLabels:
      k8s-app: kubernetes-dashboard
  template:
    metadata:
      labels:
        k8s-app: kubernetes-dashboard
      annotations:
        scheduler.alpha.kubernetes.io/critical-pod: ''
    spec:
      priorityClassName: system-cluster-critical
      containers:
      - name: kubernetes-dashboard
        image: harbor.od.com/public/dashboard:v1.8.3
        resources:
          limits:
            cpu: 100m
            memory: 300Mi
          requests:
            cpu: 50m
            memory: 100Mi
        ports:
        - containerPort: 8443
          protocol: TCP
        args:
          # PLATFORM-SPECIFIC ARGS HERE
          - --auto-generate-certificates
        volumeMounts:
        - name: tmp-volume
          mountPath: /tmp
        livenessProbe:
          httpGet:
            scheme: HTTPS
            path: /
            port: 8443
          initialDelaySeconds: 30
          timeoutSeconds: 30
      volumes:
      - name: tmp-volume
        emptyDir: {}
      serviceAccountName: kubernetes-dashboard-admin
      tolerations:
      - key: "CriticalAddonsOnly"
        operator: "Exists"
[root@hdss7-200 ~]# vim /data/k8s-yaml/dashboard/svc.yaml

apiVersion: v1
kind: Service
metadata:
  name: kubernetes-dashboard
  namespace: kube-system
  labels:
    k8s-app: kubernetes-dashboard
    kubernetes.io/cluster-service: "true"
    addonmanager.kubernetes.io/mode: Reconcile
spec:
  selector:
    k8s-app: kubernetes-dashboard
  ports:
  - port: 443
    targetPort: 8443
[root@hdss7-200 ~]# vim /data/k8s-yaml/dashboard/ingress.yaml

apiVersion: extensions/v1beta1
kind: Ingress
metadata:
  name: kubernetes-dashboard
  namespace: kube-system
  annotations:
    kubernetes.io/ingress.class: traefik
spec:
  rules:
  - host: dashboard.od.com
    http:
      paths:
      - backend:
          serviceName: kubernetes-dashboard
          servicePort: 443 

2.4.3.应用资源配置清单

[root@hdss7-21 containers]# kubectl apply -f http://k8s-yaml.od.com/dashboard/rbac.yaml
[root@hdss7-21 containers]# kubectl apply -f http://k8s-yaml.od.com/dashboard/dp.yaml
[root@hdss7-21 containers]# kubectl apply -f http://k8s-yaml.od.com/dashboard/svc.yaml
[root@hdss7-21 containers]# kubectl apply -f http://k8s-yaml.od.com/dashboard/ingress.yaml

2.4.4.查看创建的资源

[root@hdss7-21 containers]# kubectl get pods -n kube-system
[root@hdss7-21 containers]# kubectl get svc -n kube-system
[root@hdss7-21 containers]# kubectl get ingress -n kube-system

2.4.5.解析域名

[root@hdss7-11 conf.d]# vi /var/named/od.com.zone
$ORIGIN od.com.
$TTL 600        ; 10 minutes
@               IN SOA  dns.od.com. dnsadmin.od.com. (
                                2020042602 ; serial            //前滚一个序列号
                                10800      ; refresh (3 hours)
                                900        ; retry (15 minutes)
                                604800     ; expire (1 week)
                                86400      ; minimum (1 day)
                                )
                                NS   dns.od.com.
$TTL 60 ; 1 minute
dns                A    10.4.7.11
harbor             A    10.4.7.200
k8s-yaml           A    10.4.7.200
traefik            A    10.4.7.10
dashboard          A    10.4.7.10

[root@hdss7-11 conf.d]# systemctl restart named

2.4.6.浏览器访问

http://dashboard.od.com/

2.4.7.令牌命令行获取方式

[root@hdss7-21 ~]# kubectl get secret -n kube-system
NAME                                     TYPE                                  DATA   AGE
coredns-token-phxkw                      kubernetes.io/service-account-token   3      25h
default-token-shsl4                      kubernetes.io/service-account-token   3      14d
kubernetes-dashboard-admin-token-zs4rf   kubernetes.io/service-account-token   3      3h19m
kubernetes-dashboard-key-holder          Opaque                                2      3h16m
traefik-ingress-controller-token-lg68k   kubernetes.io/service-account-token   3      4h8m
[root@hdss7-21 ~]# kubectl describe secret kubernetes-dashboard-admin-token-zs4rf -n kube-system
Name:         kubernetes-dashboard-admin-token-zs4rf
Namespace:    kube-system
Labels:       <none>
Annotations:  kubernetes.io/service-account.name: kubernetes-dashboard-admin
              kubernetes.io/service-account.uid: 1a06938f-e404-4ef6-aab8-3985b85bbec7

Type:  kubernetes.io/service-account-token

Data
====
ca.crt:     1346 bytes
namespace:  11 bytes
token:      eyJhbGciOiJSUzI1NiIsImtpZCI6IiJ9.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJrdWJlcm5ldGVzLWRhc2hib2FyZC1hZG1pbi10b2tlbi16czRyZiIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50Lm5hbWUiOiJrdWJlcm5ldGVzLWRhc2hib2FyZC1hZG1pbiIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50LnVpZCI6IjFhMDY5MzhmLWU0MDQtNGVmNi1hYWI4LTM5ODViODViYmVjNyIsInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDprdWJlLXN5c3RlbTprdWJlcm5ldGVzLWRhc2hib2FyZC1hZG1pbiJ9.l_7MEkfGtRGVll66UbQH403DTYZhnhzBYxRYnjC4gIp9tu76Gad3nHhaQPLixXwUr35Mb47D4-2D1wB-rpSPLXt1Uc5OX3tzcFoFD6CH3kczuneGNmnWPMD7ehTSK1KNyIzew9TsiNfSWXmpOe8gos0CFFXWkza6jmv8qH8cOCluqwzvciD_lhnmfnhtn6ml1ZvapJkZRfj6WydzilrUelHVw1sM62ujJMOWrIPVaCFQxjKV65WGOcIPVkC_VBCKY6daJBXx-cBDZg250D8xQmXA6WB3PA133VELaJd7-uM1240gxFRYtFW4GlFg29D2wqr2ZFNcFXBIFeRVmegE6Q

添加https认证 openssl

hdss7-200.host.com

[root@hdss7-200 ~]# (umask 077;openssl genrsa -out dashboard.od.com.key 2048)
Generating RSA private key, 2048 bit long modulus
.........................+++
...........+++
e is 65537 (0x10001)


[root@hdss7-200 ~]# openssl req -new -key dashboard.od.com.key -out dashboard.od.com.csr -subj "/CN=dashboard.od.com/C=CN/ST=BJ/L=Beijing/O=OldboyEdu/OU=ops"

[root@hdss7-200 ~]# ll |grep dashboard 
-rw-r--r--  1 root root 1005 Sep  1 16:27 dashboard.od.com.csr
-rw-------  1 root root 1675 Sep  1 16:24 dashboard.od.com.key


[root@hdss7-200 certs]# openssl x509 -req -in dashboard.od.com.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial  -out dashboard.od.com.crt  -days 3650
Signature ok
subject=/CN=dashboard.od.com/C=CN/ST=BJ/L=Beijing/O=OldboyEdu/OU=ops
Getting CA Private Key


[root@hdss7-11 ~]# cd /etc/nginx/
[root@hdss7-11 nginx]# mkdir certs
[root@hdss7-11 nginx]# cd certs

[root@hdss7-11 certs]# scp hdss7-200:/opt/certs/dashboard.od.com.crt . 
The authenticity of host 'hdss7-200 (10.4.7.200)' can't be established.
ECDSA key fingerprint is SHA256:D9fKu1ZtQFmpH98Ghx/ES3ZUnaJcylD+w3ksyIFzPTo.
ECDSA key fingerprint is MD5:be:14:7a:e5:e2:9b:76:8b:54:4c:e8:b0:f7:68:e2:94.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added 'hdss7-200' (ECDSA) to the list of known hosts.
dashboard.od.com.crt                                       100% 1196   685.9KB/s   00:00    
[root@hdss7-11 certs]# scp hdss7-200:/opt/certs/dashboard.od.com.key . 
dashboard.od.com.key                                       100% 1675   957.1KB/s   00:00    
[root@hdss7-11 certs]# ll
total 8
-rw-r--r-- 1 root root 1196 Sep  1 16:35 dashboard.od.com.crt
-rw------- 1 root root 1675 Sep  1 16:35 dashboard.od.com.key


[root@hdss7-11 certs]# cd ..
[root@hdss7-11 nginx]# cd conf.d/


[root@hdss7-11 conf.d]# vim dashboard.od.conf

server {
	listen    80;
	server_name dashboard.od.com;

	rewrite ^(.*)$ https://${server_name}$1 permanent;
}
server {
	listen    443 ssl;
	server_name  dashboard.od.com;

	ssl_certificate "certs/dashboard.od.com.crt";
	ssl_certificate_key "certs/dashboard.od.com.key";
	ssl_session_cache  shared:SSL:1m;
	ssl_session_timeout 10m;
	ssl_ciphers HIGH:!aNULL:!MD5;
	ssl_prefer_server_ciphers on;
	
	location  / {
	    proxy_pass http://default_backend_traefik;
	    proxy_set_header Host      $http_host;
	    proxy_set_header x-forwarded-for $proxy_add_x_forwarded_for;			

   } 

}


[root@hdss7-11 conf.d]# nginx -t 
nginx: the configuration file /etc/nginx/nginx.conf syntax is ok
nginx: configuration file /etc/nginx/nginx.conf test is successful
[root@hdss7-11 conf.d]# nginx -s reload 


升级dashboard

hdss7-200.host.com

[root@hdss7-200 ~]# docker pull  hexun/kubernetes-dashboard-amd64:v1.10.1
[root@hdss7-200 ~]# docker images |grep dashboard
[root@hdss7-200 ~]# docker tag f9aed6605b81 harbor.od.com/public/dashboard:v1.10.1
[root@hdss7-200 ~]# docker push harbor.od.com/public/dashboard:v1.10.1
[root@hdss7-200 ~]# cd /data/k8s-yaml/dashboard/
[root@hdss7-200 dashboard]# vim dp.yaml

image: harbor.od.com/public/dashboard:v1.10.1

hdss7-21.host.com

[root@hdss7-21 ~]# kubectl apply -f http://k8s-yaml.od.com/dashboard/dp.yaml

posted @ 2020-09-01 11:59  水煮沉浮~大刀肉  阅读(1428)  评论(0)    收藏  举报