K8s_Kubeadmin
Kubeadm 安装k8s 1.15.12版
环境要求
| 主机 | ip地址 | cpu/内存 |
|---|---|---|
| kubernetes-master | 10.0.0.11 | 2c/2g |
| kubernetes-node1 | 10.0.0.12 | 2c/2g |
1.安装指定版本的docker
#所有节点
curl -o /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo
wget -O /etc/yum.repos.d/docker-ce.repo https://download.docker.com/linux/centos/docker-ce.repo
sed -i 's+download.docker.com+mirrors.tuna.tsinghua.edu.cn/docker-ce+' /etc/yum.repos.d/docker-ce.repo
yum list docker-ce --showduplicates #查看docker-ce的版本信息
#安装指定版本的docker
yum install docker-ce-18.09.9 -y
2.安装Kubeadm
#所有节点
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
yum install kubelet-1.15.12 kubeadm-1.15.12 kubectl-1.15.12 -y
systemctl enable kubelet && systemctl start kubelet
3.Kubeadm初始化k8s集群
#所有节点
cat <<EOF | sudo tee /etc/modules-load.d/k8s.conf
br_netfilter
EOF
cat <<EOF | sudo tee /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
sudo sysctl --system
swapoff -a
vim /etc/fstab
#UUID=72aabc10-44b8-4c05-86bd-049157d771f8 swap swap defaults 0 0
#控制节点
kubeadm init --kubernetes-version=v1.15.12 --image-repository registry.aliyuncs.com/google_containers --pod-network-cidr=10.244.0.0/16 --service-cidr=10.254.0.0/16
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
4.K8s集群加入node节点
kubeadm join 10.0.0.11:6443 --token snj3qf.gbekg1lj4u7a6zhh \
> --discovery-token-ca-cert-hash sha256:c39dc4a0612c9dce0638ece08d3a56116efc5ed4a6d85456fe28636df123762f
5.为集群配置网络插件
wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
[root@kubernetes-master ~]# cat kube-flannel.yml
---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: psp.flannel.unprivileged
annotations:
seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default
seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default
apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
spec:
privileged: false
volumes:
- configMap
- secret
- emptyDir
- hostPath
allowedHostPaths:
- pathPrefix: "/etc/cni/net.d"
- pathPrefix: "/etc/kube-flannel"
- pathPrefix: "/run/flannel"
readOnlyRootFilesystem: false
# Users and groups
runAsUser:
rule: RunAsAny
supplementalGroups:
rule: RunAsAny
fsGroup:
rule: RunAsAny
# Privilege Escalation
allowPrivilegeEscalation: false
defaultAllowPrivilegeEscalation: false
# Capabilities
allowedCapabilities: ['NET_ADMIN', 'NET_RAW']
defaultAddCapabilities: []
requiredDropCapabilities: []
# Host namespaces
hostPID: false
hostIPC: false
hostNetwork: true
hostPorts:
- min: 0
max: 65535
# SELinux
seLinux:
# SELinux is unused in CaaSP
rule: 'RunAsAny'
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: flannel
rules:
- apiGroups: ['extensions']
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames: ['psp.flannel.unprivileged']
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: flannel
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: flannel
subjects:
- kind: ServiceAccount
name: flannel
namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: flannel
namespace: kube-system
---
kind: ConfigMap
apiVersion: v1
metadata:
name: kube-flannel-cfg
namespace: kube-system
labels:
tier: node
app: flannel
data:
cni-conf.json: |
{
"name": "cbr0",
"cniVersion": "0.3.1",
"plugins": [
{
"type": "flannel",
"delegate": {
"hairpinMode": true,
"isDefaultGateway": true
}
},
{
"type": "portmap",
"capabilities": {
"portMappings": true
}
}
]
}
net-conf.json: |
{
"Network": "10.244.0.0/16",
"Backend": {
"Type": "vxlan"
}
}
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-flannel-ds
namespace: kube-system
labels:
tier: node
app: flannel
spec:
selector:
matchLabels:
app: flannel
template:
metadata:
labels:
tier: node
app: flannel
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/os
operator: In
values:
- linux
hostNetwork: true
priorityClassName: system-node-critical
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: flannel
initContainers:
- name: install-cni
image: quay.io/coreos/flannel:v0.13.1-rc2
command:
- cp
args:
- -f
- /etc/kube-flannel/cni-conf.json
- /etc/cni/net.d/10-flannel.conflist
volumeMounts:
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
containers:
- name: kube-flannel
image: quay.io/coreos/flannel:v0.13.1-rc2
command:
- /opt/bin/flanneld
args:
- --ip-masq
- --kube-subnet-mgr
resources:
requests:
cpu: "100m"
memory: "50Mi"
limits:
cpu: "100m"
memory: "50Mi"
securityContext:
privileged: false
capabilities:
add: ["NET_ADMIN", "NET_RAW"]
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: run
mountPath: /run/flannel
- name: flannel-cfg
mountPath: /etc/kube-flannel/
volumes:
- name: run
hostPath:
path: /run/flannel
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg
kubectl create -f kube-flannel.yml
kubectl get all -n kube-system
kubectl get nodes
5.1 静态资源pod和动态资源pod
静态资源pod是kubectl直接启动的pod,就是静态的pod;放在/etc/kubernetes/manifests此目录下的yaml文件会直接被kubectl启动;
动态资源的pod是apiserver控制下启动的pod,例如:通过deployment、rc启动的pod;
5.2 token过期,处理方式
删除过期的token,创建新的token,kubeadm token create --print-join-command
再将node加入进来
操作过程:
1.删除:
kubeadm token list
kubeadm token delete token_ID
2.新生成token
kubeadm token create --print-join-command
kubeadm token list
3.将node加入集群
kubeadm join 10.0.0.11:6443 --token xxxx_token_ID --discovery-token-ca-cert-hash sha256:xxxxxxxx
5.3 命令行自动补全
source <(kubectl completion bash)
vim ~/.bashrc
source <(kubectl completion bash)
5.4 污点
默认不会往master节点进行调度,因为在初始化的时候打了一个污点
污点的种类:
1.NoSchedule #不接受新pod,不会驱赶老pod
2.PreferNoSchedule #不强制
3.NoExecute #驱赶所有pod(新的、老的)
命令:
查看master的污点 kubectl describe nodes kubernetes-master |grep -i taints
删除污点:kubectl taint node kubernetes-master node-role.kubernetes.io/master-
加污点:kubectl taint node kubernetes-master node-role.kubernetes.io/master=:PreferNoSchedule
添加污点需要注意:
#先打标签才能加污点
kubectl get nodes --show-labels
kubectl label nodes kubernetes-node1 node-role.kubernetes.io/node=
kubectl taint node kubernetes-node1 node-role.kubernetes.io/node=:PreferNoSchedule
5.5 容忍度
kubectl label node kubernetes-node1 disk=ssh
kubectl taint node kubernetes-node1 disk=ssh:NoExecute
#添加在pod的spec下
tolerations:
- key: "disk"
value: "ssh"
effect: "NoExecute"
#检查
kubectl get pod -o wide
5.6 secrets资源
#方法一:
#通过secret --> SA --> pod
kubectl create secret docker-registry harbor-secret --namespace=default --docker-username=admin --docker-password=a123456 --docker-server=blog.xxx.xxxx
vi k8s_sa_harbor.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: docker-image
namespace: default
imagePullSecrets:
- name: harbor-secret
vi k8s_pod.yaml
iapiVersion: v1
kind: Pod
metadata:
name: static-pod
spec:
serviceAccount: docker-image
containers:
- name: nginx
image: blog.oldqiang.com/oldboy/nginx:1.13
ports:
- containerPort: 80
方法二:
#secrets --> pod #比第一种简单
kubectl create secret docker-registry regcred --docker-server=blog.oldqiang.com --docker-username=admin --docker-password=a123456
#验证
[root@k8s-master ~]# kubectl get secrets
NAME TYPE DATA AGE
default-token-vgc4l kubernetes.io/service-account-token 3 2d19h
regcred kubernetes.io/dockerconfigjson 1 114s
[root@k8s-master ~]# cat k8s_pod.yaml
apiVersion: v1
kind: Pod
metadata:
name: static-pod
spec:
nodeName: 10.0.0.12
imagePullSecrets:
- name: regcred
containers:
- name: nginx
image: blog.oldqiang.com/oldboy/nginx:1.13
ports:
- containerPort: 80
serviceaccount 账号
role 普通角色,局部资源
clusterrole 集群角色,全局资源
RoleBinding 局部角色绑定
ClusterRoleBinging 全局角色绑定
5.7 configmap资源
vi /opt/81.conf
i server {
listen 81;
server_name localhost;
root /html;
index index.html index.htm;
location / {
}
}
kubectl create configmap 81.conf --from-file=/opt/81.conf
#验证
kubectl get cm
vi k8s_deploy.yaml
iapiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: nginx
spec:
replicas: 2
template:
metadata:
labels:
app: nginx
spec:
volumes:
- name: nginx-config
configMap:
name: 81.conf
items:
- key: 81.conf
path: 81.conf
containers:
- name: nginx
image: nginx:1.13
volumeMounts:
- name: nginx-config
mountPath: /etc/nginx/conf.d
ports:
- containerPort: 80
name: port1
- containerPort: 81
name: port2
kubectl apply -f k8s_deploy.yaml
#上条命令不成功可以用 kubectl replace -f k8s_deploy.yaml
#类似于持久化卷,但不是,会覆盖掉容器/etc/nginx/conf.d目录中的所有文件
6. k8s常用服务
6.1 部署dashboard服务
[root@kubernetes-master dashbord]# cat kubernetes-dashboard.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------- Dashboard Secret ------------------- #
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-certs
namespace: kube-system
type: Opaque
---
# ------------------- Dashboard Service Account ------------------- #
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
---
# ------------------- Dashboard Role & Role Binding ------------------- #
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kubernetes-dashboard-minimal
namespace: kube-system
rules:
# Allow Dashboard to create 'kubernetes-dashboard-key-holder' secret.
- apiGroups: [""]
resources: ["secrets"]
verbs: ["create"]
# Allow Dashboard to create 'kubernetes-dashboard-settings' config map.
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["create"]
# Allow Dashboard to get, update and delete Dashboard exclusive secrets.
- apiGroups: [""]
resources: ["secrets"]
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs"]
verbs: ["get", "update", "delete"]
# Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
- apiGroups: [""]
resources: ["configmaps"]
resourceNames: ["kubernetes-dashboard-settings"]
verbs: ["get", "update"]
# Allow Dashboard to get metrics from heapster.
- apiGroups: [""]
resources: ["services"]
resourceNames: ["heapster"]
verbs: ["proxy"]
- apiGroups: [""]
resources: ["services/proxy"]
resourceNames: ["heapster", "http:heapster:", "https:heapster:"]
verbs: ["get"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: kubernetes-dashboard-minimal
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kubernetes-dashboard-minimal
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kube-system
---
# ------------------- Dashboard Deployment ------------------- #
kind: Deployment
apiVersion: apps/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: kubernetes-dashboard
template:
metadata:
labels:
k8s-app: kubernetes-dashboard
spec:
containers:
- name: kubernetes-dashboard
image: 192.168.14.250:5000/kubernetes-dashboard-amd64:v1.10.1
ports:
- containerPort: 8443
protocol: TCP
args:
- --auto-generate-certificates
# Uncomment the following line to manually specify Kubernetes API server Host
# If not specified, Dashboard will attempt to auto discover the API server and connect
# to it. Uncomment only if the default does not work.
# - --apiserver-host=http://my-address:port
volumeMounts:
- name: kubernetes-dashboard-certs
mountPath: /certs
# Create on-disk volume to store exec logs
- mountPath: /tmp
name: tmp-volume
livenessProbe:
httpGet:
scheme: HTTPS
path: /
port: 8443
initialDelaySeconds: 30
timeoutSeconds: 30
volumes:
- name: kubernetes-dashboard-certs
secret:
secretName: kubernetes-dashboard-certs
- name: tmp-volume
emptyDir: {}
serviceAccountName: kubernetes-dashboard
# Comment the following tolerations if Dashboard must not be deployed on master
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
---
# ------------------- Dashboard Service ------------------- #
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
spec:
type: NodePort
ports:
- port: 443
nodePort: 8443
targetPort: 8443
selector:
k8s-app: kubernetes-dashboard
#修改其镜像地址image: registry.aliyuncs.com/google_containers/kubernetes-dashboard-amd64:v1.10.1
#修改svc的类型为NodePort类型
PS:此配置不能够在Goole浏览器进行访问,且登陆上权限太小,不能够正常的使用
解决权限小的问题
#使用火狐浏览器访问
[root@kubernetes-master dashbord]# cat dashboard-admin.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: kubernetes-dashboard
name: admin
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kubernetes-dashboard-minimal
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: admin
namespace: kube-system
kubectl create -f dashboard-admin.yaml
#再次访问,测试
解决Goole浏览器不能登录问题,且用kubeconfig进行登录
1.Goole浏览器不能登录问题
#创建目录
mkdir key && cd key
#生成证书
openssl genrsa -out dashboard.key 2048
openssl req -new -out dashboard.csr -key dashboard.key -subj '/CN=10.0.0.11'
openssl x509 -req -in dashboard.csr -signkey dashboard.key -out dashboard.crt
#删除原有的证书secret
kubectl delete secret kubernetes-dashboard-certs -n kube-system
#创建新的证书secret
kubectl create secret generic kubernetes-dashboard-certs --from-file=dashboard.key --from-file=dashboard.crt -n kube-system
#查看pod
kubectl get pod -n kube-system
#删除pod,启动新pod生效
kubectl delete pod -n kube-sytem kubernetes-dashboard-7c697b776b-zph98
#查看证书的过期时间
[root@kubernetes-master pki]# pwd
/etc/kubernetes/pki
[root@kubernetes-master pki]# openssl x509 -in ca.crt -noout -dates
notBefore=Mar 16 02:30:04 2021 GMT
notAfter=Mar 14 02:30:04 2031 GMT
2.kubeconfig进行登录
kubectl get serviceaccount -n kube-system
kubectl describe serviceaccount admin -n kube-system
kubectl describe secret admin-token-qb25s -n kube-system
#将token的值保存下来修改到下面,直接运行下面的命令
DASH_TOCKEN='eyJhbGciOiJSUzI1NiIsImtpZCI6IiJ9.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJhZG1pbi10b2tlbi1xYjI1cyIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50Lm5hbWUiOiJhZG1pbiIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50LnVpZCI6ImQ5ZmNmN2M3LTJiMTYtNDg5ZS1hMTJiLWRiMjI2YzYzY2I0MyIsInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDprdWJlLXN5c3RlbTphZG1pbiJ9.KnCf09NQYEoG5_1C0YIfKZC4E4AMB3bhD-pLSqQV1zuS6kEi_SY5iEwdnv_sELa8ohQ-e3cFn8B_Z2H2G1a1rHMCDQ4xDiqwAJLwngWJ2gC_pC-6dv0uNIO_gUN0P1VHIv6ygFQ7LqswP0MskMaSmhjDSMAbfQspX3tD8rpcqWvaFvIAOTDPEJ0Ifk5LAvSeo20NbFPXlNAGdB2zZuMHfQ65oZgjj5lJbIivDkJc8CGOkVh8TSNolhMAxGoSnR-SqH0V4-zMORxW1THPjI_QcZs0mTLCPsow6vYKgbeXoew_mQQ3BsUXA5JnKDCwBVgwUeTNe5MGlcwPu8YRd1UAWQ'
kubectl config set-cluster kubernetes --server=10.0.0.11:6443 --kubeconfig=/root/dashbord-admin.conf
kubectl config set-credentials admin --token=$DASH_TOCKEN --kubeconfig=/root/dashbord-admin.conf
kubectl config set-context admin --cluster=kubernetes --user=admin --kubeconfig=/root/dashbord-admin.conf
kubectl config use-context admin --kubeconfig=/root/dashbord-admin.conf
#运行完,在/root目录下,将dashbord-admin.conf下载到桌面,在浏览器登录网址,用kubeconfig进行登录测试
6.2 部署dns服务
vi coredns.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: coredns
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
kubernetes.io/bootstrapping: rbac-defaults
addonmanager.kubernetes.io/mode: Reconcile
name: system:coredns
rules:
- apiGroups:
- ""
resources:
- endpoints
- services
- pods
- namespaces
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
labels:
kubernetes.io/bootstrapping: rbac-defaults
addonmanager.kubernetes.io/mode: EnsureExists
name: system:coredns
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:coredns
subjects:
- kind: ServiceAccount
name: coredns
namespace: kube-system
---
apiVersion: v1
kind: ConfigMap
metadata:
name: coredns
namespace: kube-system
labels:
addonmanager.kubernetes.io/mode: EnsureExists
data:
Corefile: |
.:53 {
errors
health
kubernetes cluster.local in-addr.arpa ip6.arpa {
pods insecure
upstream
fallthrough in-addr.arpa ip6.arpa
ttl 30
}
prometheus :9153
forward . /etc/resolv.conf
cache 30
loop
reload
loadbalance
}
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: coredns
namespace: kube-system
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/name: "CoreDNS"
spec:
# replicas: not specified here:
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
selector:
matchLabels:
k8s-app: kube-dns
template:
metadata:
labels:
k8s-app: kube-dns
annotations:
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
spec:
priorityClassName: system-cluster-critical
serviceAccountName: coredns
tolerations:
- key: "CriticalAddonsOnly"
operator: "Exists"
nodeSelector:
beta.kubernetes.io/os: linux
containers:
- name: coredns
image: coredns/coredns:1.3.1
imagePullPolicy: IfNotPresent
resources:
limits:
memory: 100Mi
requests:
cpu: 100m
memory: 70Mi
args: [ "-conf", "/etc/coredns/Corefile" ]
volumeMounts:
- name: config-volume
mountPath: /etc/coredns
readOnly: true
- name: tmp
mountPath: /tmp
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
- containerPort: 9153
name: metrics
protocol: TCP
livenessProbe:
httpGet:
path: /health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
readinessProbe:
httpGet:
path: /health
port: 8080
scheme: HTTP
securityContext:
allowPrivilegeEscalation: false
capabilities:
add:
- NET_BIND_SERVICE
drop:
- all
readOnlyRootFilesystem: true
dnsPolicy: Default
volumes:
- name: tmp
emptyDir: {}
- name: config-volume
configMap:
name: coredns
items:
- key: Corefile
path: Corefile
---
apiVersion: v1
kind: Service
metadata:
name: kube-dns
namespace: kube-system
annotations:
prometheus.io/port: "9153"
prometheus.io/scrape: "true"
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/name: "CoreDNS"
spec:
selector:
k8s-app: kube-dns
clusterIP: 10.254.230.254
ports:
- name: dns
port: 53
protocol: UDP
- name: dns-tcp
port: 53
protocol: TCP
- name: metrics
port: 9153
protocol: TCP
#测试
yum install bind-utils.x86_64 -y
dig @10.254.230.254 kubernetes.default.svc.cluster.local +short
7. k8s的网络访问
7.1 k8s的映射
#改传统svc的nodeport类型
#准备数据库
yum install mariadb-server -y
systemctl start mariadb
mysql_secure_installation
mysql>grant all on *.* to root@'%' identified by '123456';
#删除mysql的rc和svc
kubectl delete rc mysql
kubectl delete svc mysql
#创建endpoint和svc
cat mysql-endpoint.yaml
apiVersion: v1
kind: Endpoints
metadata:
name: mysql
subsets:
- addresses:
- ip: 10.0.0.12
ports:
- name: mysql
port: 3306
protocol: TCP
cat mysql-svc.yaml
apiVersion: v1
kind: Service
metadata:
name: mysql
spec:
ports:
- name: mysql
port: 3306
protocol: TCP
targetPort: 3306
type: ClusterIP
#在web页面重新访问
#验证
mysql -e 'show databases;'
7.2 kube-proxy的ipvs模式
用iptables模式会产生粘包,ipvs就不会,非常规则的进行调度
yum install conntrack-tools -y
yum install ipvsadm.x86_64 -y
#命令行输入
modprobe ip_vs_sh
modprobe ip_vs
modprobe ip_vs_rr
modprobe ip_vs_wrr
#修改mode为ipvs模式
kubectl get cm -n kube-system |grep -i proxy
kubectl edit -n kube-system cm
#找到mode修改
mode: "ipvs"
kubectl get pod -n kube-system |grep -i proxy
kubectl delete pod -n kube-system kube-proxy-fml6t #删除上条命令查找到的pod,重新拉起
8.ingress

Traefik 是一款开源的反向代理与负载均衡工具。它最大的优点是能够与常见的微服务系统直接整合,可以实现自动化动态配置。目前支持 Docker、Swarm、Mesos/Marathon、 Mesos、Kubernetes、Consul、Etcd、Zookeeper、BoltDB、Rest API 等等后端模型。
ingress需要与k8s的版本兼容(官方文档查找)
1.创建rbac
cat rabc.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: traefik-ingress-controller
namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: traefik-ingress-controller
rules:
- apiGroups:
- ""
resources:
- services
- endpoints
- secrets
verbs:
- get
- list
- watch
- apiGroups:
- extensions
resources:
- ingresses
verbs:
- get
- list
- watch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: traefik-ingress-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: traefik-ingress-controller
subjects:
- kind: ServiceAccount
name: traefik-ingress-controller
namespace: kube-system
kubectl create -f rbac.yaml
2.部署traefik服务
cat traefik.yaml
kind: DaemonSet
apiVersion: extensions/v1beta1
metadata:
name: traefik-ingress-controller
namespace: kube-system
labels:
k8s-app: traefik-ingress-lb
spec:
selector:
matchLabels:
k8s-app: traefik-ingress-lb
template:
metadata:
labels:
k8s-app: traefik-ingress-lb
name: traefik-ingress-lb
spec:
serviceAccountName: traefik-ingress-controller
terminationGracePeriodSeconds: 60
hostNetwork: true
containers:
- image: 192.168.14.250:5000/traefik:v1.7.2
imagePullPolicy: IfNotPresent
name: traefik-ingress-lb
ports:
- name: http
containerPort: 80
hostPort: 80
- name: admin
containerPort: 8080
args:
- --api
- --kubernetes
- --logLevel=INFO
---
kind: Service
apiVersion: v1
metadata:
name: traefik-ingress-service
namespace: kube-system
spec:
selector:
k8s-app: traefik-ingress-lb
ports:
- protocol: TCP
port: 80
name: web
- protocol: TCP
port: 8080
name: admin
type: NodePort
kubectl create -f traefik.yaml
#访问10.0.0.11:8080进行访问
3.创建Ingress 对象
cat ingress.yaml
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: traefik-myweb
annotations:
kubernetes.io/ingress.class: traefik
spec:
rules:
- host: nginx.ming.com
http:
paths:
- backend:
serviceName: myweb
servicePort: 8080
kubectl create -f ingress.yaml
4. 测试访问
#hosts解析
10.0.0.11 nginx.ming.com
#访问nginx.ming.com测试
#查看traefik dashboard,此时多了一条记录
#ingress_nginx(nginx的例子)
apiVersion: v1
kind: Namespace
metadata:
name: ingress-nginx
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: default-http-backend
labels:
app.kubernetes.io/name: default-http-backend
app.kubernetes.io/part-of: ingress-nginx
namespace: ingress-nginx
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: default-http-backend
app.kubernetes.io/part-of: ingress-nginx
template:
metadata:
labels:
app.kubernetes.io/name: default-http-backend
app.kubernetes.io/part-of: ingress-nginx
spec:
terminationGracePeriodSeconds: 60
containers:
- name: default-http-backend
# Any image is permissible as long as:
# 1. It serves a 404 page at /
# 2. It serves 200 on a /healthz endpoint
image: netonline/defaultbackend:1.4
livenessProbe:
httpGet:
path: /healthz
port: 8080
scheme: HTTP
initialDelaySeconds: 30
timeoutSeconds: 5
ports:
- containerPort: 8080
resources:
limits:
cpu: 10m
memory: 20Mi
requests:
cpu: 10m
memory: 20Mi
---
apiVersion: v1
kind: Service
metadata:
name: default-http-backend
namespace: ingress-nginx
labels:
app.kubernetes.io/name: default-http-backend
app.kubernetes.io/part-of: ingress-nginx
spec:
ports:
- port: 80
targetPort: 8080
selector:
app.kubernetes.io/name: default-http-backend
app.kubernetes.io/part-of: ingress-nginx
---
kind: ConfigMap
apiVersion: v1
metadata:
name: nginx-configuration
namespace: ingress-nginx
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tcp-services
namespace: ingress-nginx
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
---
kind: ConfigMap
apiVersion: v1
metadata:
name: udp-services
namespace: ingress-nginx
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: nginx-ingress-serviceaccount
namespace: ingress-nginx
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: nginx-ingress-clusterrole
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
rules:
- apiGroups:
- ""
resources:
- configmaps
- endpoints
- nodes
- pods
- secrets
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- apiGroups:
- ""
resources:
- services
verbs:
- get
- list
- watch
- apiGroups:
- "extensions"
resources:
- ingresses
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- apiGroups:
- "extensions"
resources:
- ingresses/status
verbs:
- update
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: Role
metadata:
name: nginx-ingress-role
namespace: ingress-nginx
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
rules:
- apiGroups:
- ""
resources:
- configmaps
- pods
- secrets
- namespaces
verbs:
- get
- apiGroups:
- ""
resources:
- configmaps
resourceNames:
# Defaults to "<election-id>-<ingress-class>"
# Here: "<ingress-controller-leader>-<nginx>"
# This has to be adapted if you change either parameter
# when launching the nginx-ingress-controller.
- "ingress-controller-leader-nginx"
verbs:
- get
- update
- apiGroups:
- ""
resources:
- configmaps
verbs:
- create
- apiGroups:
- ""
resources:
- endpoints
verbs:
- get
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: RoleBinding
metadata:
name: nginx-ingress-role-nisa-binding
namespace: ingress-nginx
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: nginx-ingress-role
subjects:
- kind: ServiceAccount
name: nginx-ingress-serviceaccount
namespace: ingress-nginx
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: nginx-ingress-clusterrole-nisa-binding
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: nginx-ingress-clusterrole
subjects:
- kind: ServiceAccount
name: nginx-ingress-serviceaccount
namespace: ingress-nginx
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: nginx-ingress-controller
namespace: ingress-nginx
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
template:
metadata:
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
annotations:
prometheus.io/port: "10254"
prometheus.io/scrape: "true"
spec:
serviceAccountName: nginx-ingress-serviceaccount
hostNetwork: true
containers:
- name: nginx-ingress-controller
image: quay.mirrors.ustc.edu.cn/kubernetes-ingress-controller/nginx-ingress-controller:0.20.0
args:
- /nginx-ingress-controller
- --default-backend-service=$(POD_NAMESPACE)/default-http-backend
- --configmap=$(POD_NAMESPACE)/nginx-configuration
- --tcp-services-configmap=$(POD_NAMESPACE)/tcp-services
- --udp-services-configmap=$(POD_NAMESPACE)/udp-services
- --publish-service=$(POD_NAMESPACE)/ingress-nginx
- --annotations-prefix=nginx.ingress.kubernetes.io
securityContext:
capabilities:
drop:
- ALL
add:
- NET_BIND_SERVICE
# www-data -> 33
runAsUser: 33
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
ports:
- name: http
containerPort: 80
- name: https
containerPort: 443
livenessProbe:
failureThreshold: 3
httpGet:
path: /healthz
port: 10254
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
readinessProbe:
failureThreshold: 3
httpGet:
path: /healthz
port: 10254
scheme: HTTP
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
---
#Ingress的HTTPS配置
1.参考https://www.cnblogs.com/netonline/p/10968046.html
2.外部创建SLB进行HTTPS的配置,还可以进行HTTP的跳转
9.k8s弹性伸缩
#k8s 在v1.12版本之后废弃了heapster,改用了metrics进行k8s集群的监控
安装
cat auth-delegator.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: metrics-server:system:auth-delegator
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:auth-delegator
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
cat auth-reader.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: metrics-server-auth-reader
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: extension-apiserver-authentication-reader
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
cat metrics-apiservice.yaml
apiVersion: apiregistration.k8s.io/v1beta1
kind: APIService
metadata:
name: v1beta1.metrics.k8s.io
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
service:
name: metrics-server
namespace: kube-system
group: metrics.k8s.io
version: v1beta1
insecureSkipTLSVerify: true
groupPriorityMinimum: 100
versionPriority: 100
cat metrics-server-deployment.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: metrics-server
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
---
apiVersion: v1
kind: ConfigMap
metadata:
name: metrics-server-config
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: EnsureExists
data:
NannyConfiguration: |-
apiVersion: nannyconfig/v1alpha1
kind: NannyConfiguration
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: metrics-server-v0.3.3
namespace: kube-system
labels:
k8s-app: metrics-server
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
version: v0.3.3
spec:
selector:
matchLabels:
k8s-app: metrics-server
version: v0.3.3
template:
metadata:
name: metrics-server
labels:
k8s-app: metrics-server
version: v0.3.3
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
spec:
priorityClassName: system-cluster-critical
serviceAccountName: metrics-server
containers:
- name: metrics-server
image: 192.168.14.250:5000/metrics-server-amd64:v0.3.3
command:
- /metrics-server
- --metric-resolution=30s
# These are needed for GKE, which doesn't support secure communication yet.
# Remove these lines for non-GKE clusters, and when GKE supports token-based auth.
#- --kubelet-port=10255
#- --deprecated-kubelet-completely-insecure=true
- --kubelet-insecure-tls
- --kubelet-preferred-address-types=InternalIP,Hostname,InternalDNS,ExternalDNS,ExternalIP
ports:
- containerPort: 443
name: https
protocol: TCP
- name: metrics-server-nanny
image: 192.168.14.250:5000/addon-resizer:1.8.5
resources:
limits:
cpu: 100m
memory: 300Mi
requests:
cpu: 5m
memory: 50Mi
env:
- name: MY_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: MY_POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: metrics-server-config-volume
mountPath: /etc/config
command:
- /pod_nanny
- --config-dir=/etc/config
#- --cpu=80m
- --extra-cpu=0.5m
#- --memory=80Mi
#- --extra-memory=8Mi
- --threshold=5
- --deployment=metrics-server-v0.3.3
- --container=metrics-server
- --poll-period=300000
- --estimator=exponential
- --minClusterSize=2
# Specifies the smallest cluster (defined in number of nodes)
# resources will be scaled to.
#- --minClusterSize={{ metrics_server_min_cluster_size }}
volumes:
- name: metrics-server-config-volume
configMap:
name: metrics-server-config
tolerations:
- key: "CriticalAddonsOnly"
operator: "Exists"
cat metrics-server-service.yaml
apiVersion: v1
kind: Service
metadata:
name: metrics-server
namespace: kube-system
labels:
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "Metrics-server"
spec:
selector:
k8s-app: metrics-server
ports:
- port: 443
protocol: TCP
targetPort: https
cat resource-reader.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: system:metrics-server
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
rules:
- apiGroups:
- ""
resources:
- pods
- nodes
- nodes/stats
- namespaces
verbs:
- get
- list
- watch
- apiGroups:
- "extensions"
resources:
- deployments
verbs:
- get
- list
- update
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: system:metrics-server
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:metrics-server
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
#创建完这些yaml文件后进行创建
kubectl create -f .
PS:需要修改镜像的下载地址,可以采用进行加速
#测试
kubectl top node
kubectl top pod
镜像加速
1、docker.io镜像加速
官方形式:
docker pull grafana/grafana:6.0.1
加速形式:
docker pull docker.mirrors.ustc.edu.cn/grafana/grafana:6.0.1
2、gcr.io镜像加速
官方形式:
docker pull gcr.io/xxx/yyy:zzz
加速形式:
docker pull gcr.mirrors.ustc.edu.cn/xxx/yyy:zzz
docker pull gcr.azk8s.cn/xxx/yyy:zzz #建议
3、k8s.gcr.io镜像加速
对于kubernetes相关的镜像,我们会使用到k8s.gcr.io开头的镜像。
k8s.gcr.io等价于gcr.io/google-containers
官方形式:
docker pull k8s.gcr.io/xxx:yyy 实质上相当于docker pull gcr.io/google-containers/xxx:yyy
加速形式:
docker pull gcr.mirrors.ustc.edu.cn/google-containers/xxx:yyy
docker pull gcr.azk8s.cn/google-containers/xxx:yyy 建议
4、quay.io镜像加速
官方形式:
docker pull quay.io/coreos/k8s-prometheus-adapter-amd64:v0.4.1
加速形式:
docker pull quay.mirrors.ustc.edu.cn/coreos/k8s-prometheus-adapter-amd64:v0.4.1
docker pull quay.azk8s.cn/coreos/k8s-prometheus-adapter-amd64:v0.4.1 建议
配置弹性伸缩
在yaml中配置上资源限制,例如:
cat tomcat-rc.yml
apiVersion: v1
kind: ReplicationController
metadata:
name: myweb
spec:
replicas: 1
selector:
app: myweb
template:
metadata:
labels:
app: myweb
spec:
nodeName: 10.0.0.13
containers:
- name: myweb
image: kubeguide/tomcat-app:v2
resources:
limits:
cpu: 150m
requests:
cpu: 150m
ports:
- containerPort: 8080
env:
- name: MYSQL_SERVICE_HOST
value: 'mysql'
- name: MYSQL_SERVICE_PORT
value: '3306'
#创建弹性规则,例如:
kubectl autoscale rc myweb --max=3 --min=1 --cpu-percent=10
#删除弹性规则
kubectl delete hpa myweb
10.k8s的动态存储

图示:
展示了两种方式:一种:StorageClass+NFS,一种是StorageClassNFS+ceph存储;
Kubernetes提供了一套可以自动创建PV的机制,即:Dynamic Provisioning.而这个机制的核心在于:StorageClass这个API对象.
配置
新建IP为10.0.0.13机器
1.安装nfs(所有节点都要安装)
yum install -y nfs-utils
2. 10.0.0.13节点进行配置
cat /etc/exports
/data 10.0.0.0/24(rw,sync,no_root_squash,no_all_squash)
3. 10.0.0.13节点创建数据目录
mkdir /data
4. 10.0.0.13节点启动服务
systemctl start nfs
systemctl enable nfs
#k8s matser节点进行配置
cat nfs-client-class.yaml
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: course-nfs-storage
provisioner: fuseim.pri/ifs
cat nfs-client-sa.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: nfs-client-provisioner
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: nfs-client-provisioner-runner
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["create", "delete", "get", "list", "watch", "patch", "update"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: run-nfs-client-provisioner
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
namespace: default
roleRef:
kind: ClusterRole
name: nfs-client-provisioner-runner
apiGroup: rbac.authorization.k8s.io
cat nfs-client.yaml
kind: Deployment
apiVersion: apps/v1
metadata:
name: nfs-client-provisioner
spec:
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app: nfs-client-provisioner
template:
metadata:
labels:
app: nfs-client-provisioner
spec:
serviceAccountName: nfs-client-provisioner
containers:
- name: nfs-client-provisioner
image: 192.168.14.250:5000/nfs-client-provisioner:latest
volumeMounts:
- name: nfs-client-root
mountPath: /persistentvolumes
env:
- name: PROVISIONER_NAME
value: fuseim.pri/ifs
- name: NFS_SERVER
value: 10.0.0.13
- name: NFS_PATH
value: /data
volumes:
- name: nfs-client-root
nfs:
server: 10.0.0.13
path: /data
kubectl create -f .
设置默认动态存储
kubectl patch storageclass course-nfs-storage -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}'
StorageClass默认的回收策略为Delete
#具体的测试回收策略,查看文章: https://www.cnblogs.com/panwenbin-logs/p/12196286.html (Retain 删除pvc,数据不会丢失)
11.Helm管理包
安装
#下载
wget https://github.com/helm/helm/releases/tag/v2.17.0
#解压
tar xf helm-v2.17.0-linux-amd64.tar.gz
#配置
cd linux-amd64/
rm -f helm-v2.17.0-linux-amd64.tar.gz
helm init
mv helm /usr/bin/
helm init
cat rbac.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: tiller
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: tiller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: tiller
namespace: kube-system
kubectl create -f rbac.yaml
kubectl patch deploy --namespace kube-system tiller-deploy -p '{"spec":{"template":{"spec":{"serviceAccount":"tiller"}}}}'
helm version
#让其可以table键补全
helm completion bash > ~/.helmrc && echo "source ~/.helmrc" >> ~/.bashrc
source ~/.helmrc
#命令:
helm install --debug --dry-run ./xxx 查看渲染效果,并不是真正的运行
helm install ./xxx 真正运行
helm list 查看release等信息
helm delete release_name 删除release
helm upgrade release_name --set image.tag=1.15 升级
helm rollback 回滚
helm repo add http://xxxx #添加仓库
[root@kubernetes-master mysql]# tree
.
├── Chart.yaml #helm包的信息
├── README.md #文档
├── templates 模板,k8s 资源清单文件
│ ├── configurationFiles-configmap.yaml
│ ├── deployment.yaml
│ ├── _helpers.tpl
│ ├── initializationFiles-configmap.yaml
│ ├── NOTES.txt #安装helm输出提示信息
│ ├── pvc.yaml
│ ├── secrets.yaml
│ ├── serviceaccount.yaml
│ ├── servicemonitor.yaml
│ ├── svc.yaml
│ └── tests
│ ├── test-configmap.yaml
│ └── test.yaml
└── values.yaml #变量的值
helm install --name mysql2 --namespace mysql2 --set persistence.size=10Gi stable/mysql
stable/mysql helm包,chart包
安装一次chart,产生一个release,部署很多k8s资源
12.Kubesphere
Kubesphere是k8s的管理平台
官方地址 https://kubesphere.com.cn/docs/installing-on-linux/introduction/multioverview/

浙公网安备 33010602011771号