Shell Install【CentOS(7.8) + K8S(1.28.2) 】
¡¾»·¾³¡¿
CentOS 7.1810
Kubernetes:1.28.2
¡¾·þÎñÆ÷ÅäÖá¿-3̨
cd /etc/yum.repos.d/ && mkdir CentOS && mv CentOS* CentOS/
vi redhat.repo
[redhat]
baseurl=file:///mnt/cdrom
gpgcheck=0
enabled=1
mkdir /mnt/cdrom && mount /dev/cdrom /mnt/cdrom && yum clean all && yum repolist all && yum makecache
systemctl stop firewalld && \
systemctl disable firewalld && \
setenforce 0
vim /etc/selinux/config
#disabled
yum install -y device-mapper-persistent-data lvm2 wget net-tools nfs-utils lrzsz gcc gcc-c++ make cmake libxml2-devel openssl-devel curl curl-devel unzip sudo ntp libaio-devel wget vim ncurses-devel autoconf automake zlib-devel python-devel epel-release openssh-server socat ipvsadm conntrack telnet ipvsadm
¡¾ÅäÖÃÖ÷»úhostsÎļþ£¬Ï໥֮¼äͨ¹ýÖ÷»úÃû»¥Ïà·ÃÎÊ¡¿
vim /etc/hosts
192.168.40.150 k8s-sheca-master
192.168.40.151 k8s-sheca-node1
192.168.40.152 k8s-sheca-node2
¡¾ÅäÖÃÖ÷»úÖ®¼äÎÞÃÜÂëµÇ¼¡¿-master
ssh-keygen
ssh-copy-id k8s-sheca-node1
ssh-copy-id k8s-sheca-node2
¡¾¹Ø±Õ½»»»·ÖÇøswap£¬ÌáÉýÐÔÄÜ£¬ÖØÆô·þÎñÆ÷¡¿- 3̨
swapoff -a
vim /etc/fstab
#/dev/mapper/centos-swap swap swap defaults 0 0
¡¾Ð޸ĻúÆ÷Äں˲ÎÊý¡¿- 3̨
modprobe br_netfilter
cat > /etc/sysctl.d/k8s.conf <<EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
EOF
sysctl -p /etc/sysctl.d/k8s.conf
¡¾ÅäÖð¢ÀïÔÆµÄrepoÔ´¡¿-3̨
yum install yum-utils -y
yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
¡¾ÅäÖð²×°k8s×é¼þÐèÒªµÄ°¢ÀïÔÆµÄrepoÔ´¡¿-3̨
cat > /etc/yum.repos.d/kubernetes.repo <<EOF
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=0
EOF
¡¾ÅäÖÃʱ¼äͬ²½¡¿-3̨
yum install ntpdate -y
ntpdate cn.pool.ntp.org
crontab -e
* * * * * /usr/sbin/ntpdate cn.pool.ntp.org
service crond restart
¡¾°²×°containerd·þÎñ¡¿-3̨
yum list | grep containerd
yum install -y container-selinux
yum install containerd.io-1.6.31 -y
mkdir -p /etc/containerd
containerd config default > /etc/containerd/config.toml
vim /etc/containerd/config.toml
SystemdCgroup = falseÐ޸ijÉSystemdCgroup = true
sandbox_image = "k8s.gcr.io/pause:3.6" ÐÞ¸Ä³É sandbox_image="registry.aliyuncs.com/google_containers/pause:3.9"
[ÅäÖÃcontainerd¾µÏñ¼ÓËÙÆ÷]
mkdir /etc/containerd/certs.d/docker.io/ -p
vim /etc/containerd/config.toml
config_path = "/etc/containerd/certs.d"
vim /etc/containerd/certs.d/docker.io/hosts.toml
[host."https://qryj5zfu.mirror.aliyuncs.com",host."https://registry.docker-cn.com"]
capabilities = ["pull"]
systemctl restart containerd
systemctl enable containerd --now
yum install docker-ce -y
systemctl enable docker --now
vim /etc/docker/daemon.json
{
"registry-mirrors":["https://qryj5zfu.mirror.aliyuncs.com","https://registry.docker-cn.com","https://docker.mirrors.ustc.edu.cn","https://dockerhub.azk8s.cn","http://hub-mirror.c.163.com"]
}
systemctl restart docker
¡¾°²×°³õʼ»¯k8sÐèÒªµÄÈí¼þ°ü¡¿-3̨
yum list | grep kubelet
yum install -y kubelet-1.28.2 kubeadm-1.28.2 kubectl-1.28.2
systemctl enable kubelet
¡¾kubeadm³õʼ»¯k8s¼¯Èº¡¿
crictl config runtime-endpoint unix:///run/containerd/containerd.sock
¡¾Ê¹ÓÃkubeadm³õʼ»¯k8s¼¯Èº¡¿-master
kubeadm config print init-defaults > kubeadm.yaml
cat kubeadm.yaml
apiVersion: kubeadm.k8s.io/v1beta3
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
token: abcdef.0123456789abcdef
ttl: 24h0m0s
usages:
- signing
- authentication
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.40.150
bindPort: 6443
nodeRegistration:
criSocket: unix:///run/containerd/containerd.sock
imagePullPolicy: IfNotPresent
name: k8s-sheca-master
taints: null
---
apiServer:
timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta3
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
dns: {}
etcd:
local:
dataDir: /var/lib/etcd
imageRepository: registry.cn-hangzhou.aliyuncs.com/google_containers
kind: ClusterConfiguration
kubernetesVersion: 1.28.2
networking:
dnsDomain: cluster.local
podSubnet: 10.244.0.0/16
serviceSubnet: 10.96.0.0/12
scheduler: {}
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: ipvs
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
cgroupDriver: systemd
kubeadm config images list --kubernetes-version v1.28.2
registry.k8s.io/kube-apiserver:v1.28.2
registry.k8s.io/kube-controller-manager:v1.28.2
registry.k8s.io/kube-scheduler:v1.28.2
registry.k8s.io/kube-proxy:v1.28.2
registry.k8s.io/pause:3.9
registry.k8s.io/etcd:3.5.9-0
registry.k8s.io/coredns/coredns:v1.10.1
crictl pull registry.lank8s.cn/kube-apiserver:v1.28.2 &&
crictl pull registry.lank8s.cn/kube-controller-manager:v1.28.2 &&
crictl pull registry.lank8s.cn/kube-scheduler:v1.28.2 &&
crictl pull registry.lank8s.cn/kube-proxy:v1.28.2 &&
crictl pull registry.lank8s.cn/pause:3.9 &&
crictl pull registry.lank8s.cn/etcd:3.5.9-0 &&
crictl pull registry.lank8s.cn/coredns/coredns:v1.10.1
#ÖØÐ´ò±ê¼Ç ÒòΪĬÈÏ»áÈ¥registry.k8s.io È¥ÏÂÔØ¾µÏñ Ò²¿ÉÒÔÖ¸¶¨°¢ÀïÔÆÔ´
ctr -n k8s.io images tag registry.lank8s.cn/kube-controller-manager:v1.28.2 registry.k8s.io/kube-controller-manager:v1.28.2
ctr -n k8s.io images tag registry.lank8s.cn/kube-scheduler:v1.28.2 registry.k8s.io/kube-scheduler:v1.28.2
ctr -n k8s.io images tag registry.lank8s.cn/kube-proxy:v1.28.2 registry.k8s.io/kube-proxy:v1.28.2
ctr -n k8s.io images tag registry.lank8s.cn/pause:3.9 registry.k8s.io/pause:3.9
ctr -n k8s.io images tag registry.lank8s.cn/etcd:3.5.9-0 registry.k8s.io/etcd:3.5.9-0
ctr -n k8s.io images tag registry.lank8s.cn/coredns/coredns:v1.10.1 registry.k8s.io/coredns/coredns:v1.10.1
kubeadm init --config=kubeadm.yaml --ignore-preflight-errors=SystemVerification --ignore-preflight-errors=Swap
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
kubeadm join 192.168.40.150:6443 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:d127af31bf06540722208578d1219bc997fcfa0aec616e45c71b1db207430204
kubectl get nodes -o wide
NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
k8s-sheca-master NotReady control-plane 74s v1.28.2 192.168.40.150 <none> CentOS Linux 7 (Core) 3.10.0-957.el7.x86_64 containerd://1.6.31
k8s-sheca-node1 NotReady <none> 7s v1.28.2 192.168.40.151 <none> CentOS Linux 7 (Core) 3.10.0-957.el7.x86_64 containerd://1.6.31
k8s-sheca-node2 NotReady <none> 4s v1.28.2 192.168.40.152 <none> CentOS Linux 7 (Core) 3.10.0-957.el7.x86_64 containerd://1.6.31
¡¾°²×°kubernetesÍøÂç²å¼þcalico¡¿- v3.26.4
#https://docs.tigera.io/calico/3.26/getting-started/kubernetes/self-managed-onprem/onpremises#install-calico-with-kubernetes-api-datastore-50-nodes-or-less
curl https://raw.githubusercontent.com/projectcalico/calico/v3.26.4/manifests/calico.yaml -O
kubectl apply -f calico.yaml
crictl pull docker.io/calico/cni:v3.26.4 &&
crictl pull docker.io/calico/node:v3.26.4 &&
crictl pull docker.io/calico/kube-controllers:v3.26.4
kubectl get nodes -o wide
NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
k8s-sheca-master Ready control-plane 56m v1.28.2 192.168.40.150 <none> CentOS Linux 7 (Core) 3.10.0-957.el7.x86_64 containerd://1.6.31
k8s-sheca-node1 Ready <none> 55m v1.28.2 192.168.40.151 <none> CentOS Linux 7 (Core) 3.10.0-957.el7.x86_64 containerd://1.6.31
k8s-sheca-node2 Ready <none> 55m v1.28.2 192.168.40.152 <none> CentOS Linux 7 (Core) 3.10.0-957.el7.x86_64 containerd://1.6.31
#½ø¶ÈÌõÒ»Ö±¿¨×Å ÓÉÓÚÅäÖôíÎóÀÕ
ÖØÆô-e-Ìí¼Ó rw inti=/sysroot/bin/sh - ctrl+x - chroot /sysroot - vi /etc/sysconfig/selinux
==============================================================================================
【metrice-server】
wget https://github.com/kubernetes-sigs/metrics-server/releases/download/v0.7.0/components.yaml
vim components.yaml
1.registry.k8s.io/metrics-server/metrics-server:v0.7.0 ¸ÄΪ registry.lank8s.cn/metrics-server/metrics-server:v0.7.0
2. - --kubelet-preferred-address-types=InternalIP
3. - --kubelet-insecure-tls
kubectl apply -f components.yaml
==============================================================================================
【MetaILB】
kubectl get cm -n kube-system
kubectl edit cm kube-proxy -n kube-system
strictARP: true
wget https://raw.githubusercontent.com/metallb/metallb/v0.14.4/config/manifests/metallb-native.yaml
kubectl apply -f metallb-native.yaml
kubectl get ns
kubectl get pods -n metallb-system
kubectl get pods speaker-8k9gk -o jsonpath={.status.hostIP} -n metallb-system
kubectl get pods speaker-pv2dk -o jsonpath={.status.hostIP} -n metallb-system
kubectl get pods speaker-w49ww -o jsonpath={.status.hostIP} -n metallb-system
kubectl api-versions
vim metallb-ipaddresspool.yaml
apiVersion: metallb.io/v1beta1
kind: IPAddressPool
metadata:
name: localip-pool
namespace: metallb-system
spec:
addresses:
- 192.168.40.51-192.168.40.80
autoAssign: true
avoidBuggyIPs: true
kubectl apply -f metallb-ipaddresspool.yaml
kubectl get ipaddresspool -n metallb-system
vim metallb-l2advertisement.yaml
apiVersion: metallb.io/v1beta1
kind: L2Advertisement
metadata:
name: localip-pool-l2a
namespace: metallb-system
spec:
ipAddressPools:
- localip-pool
interfaces:
- ens33
kubectl apply -f metallb-l2advertisement.yaml
kubectl get l2advertisement -n metallb-system
==============================================================================================
【csi-driver-nfs】
yum install -y nfs-utils
systemctl start nfs-server
systemctl status nfs-server
mkdir /data/nfs -p
vim /etc/exports
/data/nfs 192.168.40.0/24(rw,fsid=0,async,no_subtree_check,no_auth_nlm,insecure,no_root_squash)
exportfs -arv
git clone https://github.com/kubernetes-csi/csi-driver-nfs.git
grep image: csi-driver-nfs/deploy/v4.6.0/*
vi csi-nfs-controller.yaml
%s/registry.k8s.cn/registry.lank8s.cn/g
vi csi-nfs-node.yaml
%s/registry.k8s.cn/registry.lank8s.cn/g
vi csi-snapshot-controller.yaml
%s/registry.k8s.cn/registry.lank8s.cn/g
cd csi-driver-nfs/deploy/v4.6.0/
kubectl apply -f ./
kubectl get crds | grep storage.k8s.io
yum install -y yum install nfs-utils :3̨
vim csi-storageclass.yaml
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: nfs-csi
provisioner: nfs.csi.k8s.io
parameters:
server: 192.168.40.104
share: /data/nfs
reclaimPolicy: Delete
volumeBindingMode: Immediate
kubectl apply -f csi-storageclass.yaml
kubectl get sc
vim nfs-pvc-demo.yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: nfs-pvc
annotations:
velero.io/csi-volumesnapshot-class: "nfs-csi"
spec:
storageClassName: nfs-csi
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi
kubectl apply -f nfs-pvc-demo.yaml
kubectl get pvc
vim redis-with-nfs-pvc.yaml
apiVersion: v1
kind: Pod
metadata:
name: redis-with-nfs-pvc
spec:
containers:
- name: redis
image: redis:7-alpine
ports:
- containerPort: 6379
name: redis
volumeMounts:
- mountPath: /data
name: data-storage
volumes:
- name: data-storage
persistentVolumeClaim:
claimName: nfs-pvc
kubectl apply -f redis-with-nfs-pvc.yaml
kubectl get pods
kubectl exec -it redis-with-nfs-pvc -- /bin/sh
/data # redis-cli
127.0.0.1:6379> set mykey "BIRKHOFF 2024-04-18"
127.0.0.1:6379> BGSAVE
127.0.0.1:6379> get mykey
"BIRKHOFF 2024-04-18"
==============================================================================================
【Ingress】
wget https://github.com/kubernetes/ingress-nginx/blob/controller-v1.9.5/deploy/static/provider/cloud/deploy.yaml
vim deploy.yaml
registry.k8s.io 改为 registry.lank8s.cn 2处
kubectl apply -f deploy.yaml
kubectl get ingressclass
kubectl get pods -n ingress-nginx
kubectl get svc -n ingress-nginx
==============================================================================================
【Knative】
wget https://github.com/knative/serving/releases/download/knative-v1.13.1/serving-crds.yaml
kubectl apply -f serving-crds.yaml
wget https://github.com/knative/serving/releases/download/knative-v1.13.1/serving-core.yaml
vim serving-core.yaml
#gcr.io/knative-releases 改为 gcr.lank8s.cn/knative-releases
gcr.lank8s.cn/knative-releases/knative.dev/serving/cmd/queue@sha256:e52286fc4843470383e917abc9c1b0c8d10f585c4274c57b612279869bc86f0d
gcr.lank8s.cn/knative-releases/knative.dev/serving/cmd/queue@sha256:e52286fc4843470383e917abc9c1b0c8d10f585c4274c57b612279869bc86f0d
gcr.lank8s.cn/knative-releases/knative.dev/serving/cmd/activator@sha256:21f8e11a44bf1e260602d30e6762a3dc433c608d1dd0e309c0ff89728e71901d
gcr.lank8s.cn/knative-releases/knative.dev/serving/cmd/autoscaler@sha256:34796e9f760bb67065c6f101296513b38d04d39d11888e919692ac46fa6dc7c2
gcr.lank8s.cn/knative-releases/knative.dev/serving/cmd/controller@sha256:53d9aa4d2c7a82f5a01202e386f7503b21839cbe2e5e62f1e9bda2aa5f11b518
gcr.lank8s.cn/knative-releases/knative.dev/serving/cmd/webhook@sha256:700c69915dc7cd86dffb61c26b0ba34427fab809de1e3344589dd955b6440882
kubectl apply -f serving-core.yaml
kubectl get ns
kubectl get pods -n knative-serving
#Istio
#如果有Istio 进行卸载、完整卸载istio
istioctl x uninstall --purge
wget https://github.com/knative/net-istio/releases/download/knative-v1.13.1/istio.yaml
#docker.io/istio/proxyv2:1.20.3
#docker.io/istio/pilot:1.20.3
#修改istio.yaml 如果只有两台服务器
9853行:3 改 2
kubectl apply -l knative.dev/crd-install=true -f istio.yaml
kubectl apply -f istio.yaml
wget https://github.com/knative/net-istio/releases/download/knative-v1.13.1/net-istio.yaml
#gcr.io/knative-releases 改为 gcr.lank8s.cn/knative-releases
kubectl apply -f net-istio.yaml
kubectl get pods -n knative-serving
kubectl --namespace istio-system get service istio-ingressgateway
#serving-hpa
wget https://github.com/knative/serving/releases/download/knative-v1.13.1/serving-hpa.yaml
#gcr.io/knative-releases 改为 gcr.lank8s.cn/knative-releases
kubectl apply -f serving-hpa.yaml
#kn client
wget https://github.com/knative/client/releases/download/knative-v1.13.0/kn-linux-amd64
cp kn-linux-amd64 /usr/local/bin/kn
chmod +x /usr/local/bin/kn
kn --help
#使用KNative进行 Servless 测试
kn service create demoapp --image=ikubernetes/demoapp:v1.0
kn service create helloworld-java1 --image=docker.io/abreaking/helloworld-java
==============================================================================================
【Knative】-1.10.2
wget https://github.com/knative/serving/releases/download/knative-v1.10.2/serving-crds.yaml
kubectl apply -f serving-crds.yaml
wget https://github.com/knative/serving/releases/download/knative-v1.10.2/serving-core.yaml
vim serving-core.yaml - 8处
gcr.io/knative-releases 改为 gcr.lank8s.cn/knative-releases
crictl pull gcr.lank8s.cn/knative-releases/knative.dev/serving/cmd/queue@sha256:dabaecec38860ca4c972e6821d5dc825549faf50c6feb8feb4c04802f2338b8a &&
crictl pull gcr.lank8s.cn/knative-releases/knative.dev/serving/cmd/activator@sha256:c2994c2b6c2c7f38ad1b85c71789bf1753cc8979926423c83231e62258837cb9 &&
crictl pull gcr.lank8s.cn/knative-releases/knative.dev/serving/cmd/autoscaler@sha256:8319aa662b4912e8175018bd7cc90c63838562a27515197b803bdcd5634c7007 &&
crictl pull gcr.lank8s.cn/knative-releases/knative.dev/serving/cmd/controller@sha256:98a2cc7fd62ee95e137116504e7166c32c65efef42c3d1454630780410abf943 &&
crictl pull gcr.lank8s.cn/knative-releases/knative.dev/serving/cmd/domain-mapping@sha256:f66c41ad7a73f5d4f4bdfec4294d5459c477f09f3ce52934d1a215e32316b59b &&
crictl pull gcr.lank8s.cn/knative-releases/knative.dev/serving/cmd/domain-mapping-webhook@sha256:7368aaddf2be8d8784dc7195f5bc272ecfe49d429697f48de0ddc44f278167aa &&
crictl pull gcr.lank8s.cn/knative-releases/knative.dev/serving/cmd/webhook@sha256:4305209ce498caf783f39c8f3e85dfa635ece6947033bf50b0b627983fd65953
kubectl apply -f serving-core.yaml
kubectl get ns
#Istio
crictl pull docker.io/istio/pilot:1.17.1
crictl pull docker.io/istio/proxyv2:1.17.1
wget https://github.com/knative/net-istio/releases/download/knative-v1.10.1/istio.yaml
kubectl apply -l knative.dev/crd-install=true -f istio.yaml
kubectl apply -f istio.yaml
kubectl get pods -n istio-system -o wide
kubectl get service istio-ingressgateway -n istio-system
wget https://github.com/knative/net-istio/releases/download/knative-v1.10.1/net-istio.yaml
vim net-istio.yaml
gcr.io/knative-releases 改为 gcr.lank8s.cn/knative-releases
kubectl apply -f net-istio.yaml
wget https://github.com/knative/serving/releases/download/knative-v1.10.2/serving-hpa.yaml
vim serving-hpa.yaml
gcr.io/knative-releases 改为 gcr.lank8s.cn/knative-releases
kubectl apply -f serving-hpa.yaml
wget https://github.com/knative/client/releases/download/knative-v1.10.0/kn-linux-amd64
cp kn-linux-amd64 /usr/local/bin/kn
chmod +x /usr/local/bin/kn
kn --help
kubectl get pods -n knative-serving