CentOS 7.4 部署 Docker 和单机版 K8s 完整流程
一、系统环境准备
# 1. 更新系统
yum update -y
# 2. 安装必要工具
yum install -y yum-utils device-mapper-persistent-data lvm2 wget net-tools ntp
# 3. 关闭防火墙和 SELinux
systemctl stop firewalld
systemctl disable firewalld
setenforce 0
sed -i 's/^SELINUX=enforcing$/SELINUX=disabled/' /etc/selinux/config
# 4. 关闭并禁用 swap
swapoff -a
sed -i '/swap/d' /etc/fstab
# 5. 配置内核参数
cat > /etc/sysctl.d/k8s.conf << EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
vm.swappiness = 0
EOF
# 应用配置
sysctl --system
# 6. 加载必要的内核模块
cat > /etc/modules-load.d/k8s.conf << EOF
br_netfilter
ip_vs
ip_vs_rr
ip_vs_wrr
ip_vs_sh
nf_conntrack
EOF
# 加载模块
modprobe br_netfilter
modprobe ip_vs
modprobe ip_vs_rr
modprobe ip_vs_wrr
modprobe ip_vs_sh
modprobe nf_conntrack
# 7. 配置时间同步
yum install -y chrony
systemctl enable chronyd
systemctl start chronyd
chronyc sources
二、安装 Docker
# 1. 添加 Docker 源(使用阿里云镜像)
yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
# 2. 安装指定版本的 Docker
yum install -y docker-ce-20.10.23 docker-ce-cli-20.10.23 containerd.io
# 3. 创建 Docker 配置目录
mkdir -p /etc/docker
# 4. 配置 Docker daemon
cat > /etc/docker/daemon.json << EOF
{
"exec-opts": ["native.cgroupdriver=systemd"],
"log-driver": "json-file",
"log-opts": {
"max-size": "100m"
},
"storage-driver": "overlay2",
"storage-opts": [
"overlay2.override_kernel_check=true"
],
"registry-mirrors": [
"https://registry.cn-hangzhou.aliyuncs.com",
"https://mirror.ccs.tencentyun.com"
]
}
EOF
# 5. 创建 systemd 目录
mkdir -p /etc/systemd/system/docker.service.d
# 6. 启动 Docker
systemctl daemon-reload
systemctl enable docker
systemctl start docker
三、安装 Kubernetes 组件
# 1. 添加 Kubernetes 源
cat > /etc/yum.repos.d/kubernetes.repo << EOF
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
# 2. 安装 kubernetes 组件(指定版本 1.23.0,这个版本比较稳定)
yum install -y kubelet-1.23.0 kubeadm-1.23.0 kubectl-1.23.0
# 3. 启动 kubelet
systemctl enable kubelet
systemctl start kubelet
四、初始化 Kubernetes 集群
# 1. 创建 kubeadm 配置文件
cat > kubeadm-config.yaml << EOF
apiVersion: kubeadm.k8s.io/v1beta3
kind: InitConfiguration
nodeRegistration:
criSocket: /var/run/dockershim.sock
---
apiVersion: kubeadm.k8s.io/v1beta3
kind: ClusterConfiguration
networking:
podSubnet: "10.244.0.0/16"
kubernetesVersion: "v1.23.0"
imageRepository: "registry.cn-hangzhou.aliyuncs.com/google_containers"
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
cgroupDriver: systemd
EOF
# 2. 预先拉取镜像
kubeadm config images pull --config kubeadm-config.yaml
# 3. 初始化集群
kubeadm init --config kubeadm-config.yaml
# 4. 配置 kubectl(根据 kubeadm init 输出的提示执行)
mkdir -p $HOME/.kube
cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
chown $(id -u):$(id -g) $HOME/.kube/config
五、安装网络插件(Flannel)
# 1. 下载并应用 Flannel 配置
wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
kubectl apply -f kube-flannel.yml
# 2. 允许 master 节点运行 Pod(单节点集群)
kubectl taint nodes --all node-role.kubernetes.io/master-
六、验证部署
# 1. 检查节点状态
kubectl get nodes
# 2. 检查所有 Pod 状态
kubectl get pods --all-namespaces
# 3. 部署测试应用
kubectl create deployment nginx --image=nginx:alpine
kubectl expose deployment nginx --port=80 --type=NodePort
# 4. 查看服务状态
kubectl get svc nginx
七、安装 Dashboard(可选)
1. 首先删除冲突的 ClusterRoleBinding
# 删除已存在的 ClusterRoleBinding
kubectl delete clusterrolebinding kubernetes-dashboard
2. 创建新的配置文件(修复版本)
cat > dashboard-fixed.yaml << EOF
---
apiVersion: v1
kind: Namespace
metadata:
name: kubernetes-dashboard
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
kind: Service
apiVersion: v1
metadata:
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
type: NodePort
ports:
- port: 443
targetPort: 8443
nodePort: 30443
selector:
k8s-app: kubernetes-dashboard
---
apiVersion: v1
kind: Secret
metadata:
name: kubernetes-dashboard-certs
namespace: kubernetes-dashboard
type: Opaque
---
apiVersion: v1
kind: Secret
metadata:
name: kubernetes-dashboard-csrf
namespace: kubernetes-dashboard
type: Opaque
data:
csrf: ""
---
apiVersion: v1
kind: Secret
metadata:
name: kubernetes-dashboard-key-holder
namespace: kubernetes-dashboard
type: Opaque
---
kind: ConfigMap
apiVersion: v1
metadata:
name: kubernetes-dashboard-settings
namespace: kubernetes-dashboard
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kubernetes-dashboard
namespace: kubernetes-dashboard
rules:
- apiGroups: [""]
resources: ["secrets"]
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]
verbs: ["get", "update", "delete"]
- apiGroups: [""]
resources: ["configmaps"]
resourceNames: ["kubernetes-dashboard-settings"]
verbs: ["get", "update"]
- apiGroups: [""]
resources: ["services/proxy"]
resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"]
verbs: ["get"]
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kubernetes-dashboard-role
rules:
- apiGroups: ["metrics.k8s.io"]
resources: ["pods", "nodes"]
verbs: ["get", "list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: kubernetes-dashboard
namespace: kubernetes-dashboard
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kubernetes-dashboard
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kubernetes-dashboard-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kubernetes-dashboard-role
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: kubernetes-dashboard
template:
metadata:
labels:
k8s-app: kubernetes-dashboard
spec:
containers:
- name: kubernetes-dashboard
image: registry.cn-hangzhou.aliyuncs.com/google_containers/dashboard:v2.7.0
imagePullPolicy: Always
ports:
- containerPort: 8443
protocol: TCP
args:
- --auto-generate-certificates
- --namespace=kubernetes-dashboard
- --token-ttl=43200
volumeMounts:
- name: kubernetes-dashboard-certs
mountPath: /certs
- name: tmp-volume
mountPath: /tmp
livenessProbe:
httpGet:
scheme: HTTPS
path: /
port: 8443
initialDelaySeconds: 60
timeoutSeconds: 30
resources:
requests:
cpu: 100m
memory: 200Mi
limits:
cpu: 200m
memory: 400Mi
volumes:
- name: kubernetes-dashboard-certs
secret:
secretName: kubernetes-dashboard-certs
- name: tmp-volume
emptyDir: {}
serviceAccountName: kubernetes-dashboard
nodeSelector:
"kubernetes.io/os": linux
---
kind: Service
apiVersion: v1
metadata:
name: dashboard-metrics-scraper
namespace: kubernetes-dashboard
spec:
ports:
- port: 8000
targetPort: 8000
selector:
k8s-app: dashboard-metrics-scraper
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: dashboard-metrics-scraper
namespace: kubernetes-dashboard
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: dashboard-metrics-scraper
template:
metadata:
labels:
k8s-app: dashboard-metrics-scraper
spec:
containers:
- name: dashboard-metrics-scraper
image: registry.cn-hangzhou.aliyuncs.com/google_containers/metrics-scraper:v1.0.8
ports:
- containerPort: 8000
protocol: TCP
livenessProbe:
httpGet:
scheme: HTTP
path: /
port: 8000
initialDelaySeconds: 30
timeoutSeconds: 30
volumeMounts:
- mountPath: /tmp
name: tmp-volume
volumes:
- name: tmp-volume
emptyDir: {}
serviceAccountName: kubernetes-dashboard
nodeSelector:
"kubernetes.io/os": linux
EOF
# 3. 应用新配置
kubectl apply -f dashboard-fixed.yaml
# 4. 创建管理员用户
cat > dashboard-admin-user.yaml << EOF
apiVersion: v1
kind: ServiceAccount
metadata:
name: admin-user
namespace: kubernetes-dashboard
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: admin-user
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: admin-user
namespace: kubernetes-dashboard
EOF
# 5. 应用管理员用户配置
kubectl apply -f dashboard-admin-user.yaml
# 6. 验证部署
kubectl get pods -n kubernetes-dashboard
# 7. 获取访问令牌
kubectl -n kubernetes-dashboard create token admin-user
# 如果上面的命令不工作,使用这个:
kubectl -n kubernetes-dashboard describe secret $(kubectl -n kubernetes-dashboard get secret | grep admin-user | awk '{print $1}')
# 8. 获取访问地址
NODE_IP=$(hostname -I | awk '{print $1}')
echo "访问地址: https://$NODE_IP:30443"
3. 验证部署状态
# 检查所有资源
kubectl get all -n kubernetes-dashboard
# 检查 Pod 状态
kubectl get pods -n kubernetes-dashboard
# 如果有问题,查看详细信息
kubectl describe pod -n kubernetes-dashboard $(kubectl get pods -n kubernetes-dashboard -o jsonpath='{.items[0].metadata.name}')
这个修复版本:
更改了 ClusterRole 和 ClusterRoleBinding 的名称以避免冲突
简化了配置以减少出错可能
移除了一些可能导致问题的不必要配置
如果执行后还有问题,请提供具体的错误信息,我会继续帮您解决。
八、常见问题解决方案
1. 如果 Docker 启动失败
# 检查 Docker 状态
systemctl status docker
# 查看 Docker 日志
journalctl -xeu docker
2. 如果 kubelet 启动失败
# 检查 kubelet 状态
systemctl status kubelet
# 查看 kubelet 日志
journalctl -xeu kubelet
3. 如果拉取镜像失败
# 手动拉取测试
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.6
4. 如果网络插件无法正常工作
# 检查网络插件状态
kubectl get pods -n kube-system | grep flannel
# 查看网络插件日志
kubectl logs -n kube-system kube-flannel-ds-xxxxx
九、维护命令
# 1. 查看集群状态
kubectl cluster-info
# 2. 查看节点详细信息
kubectl describe node $(hostname)
# 3. 查看系统组件状态
kubectl get componentstatuses
# 4. 查看资源使用情况
kubectl top nodes
kubectl top pods --all-namespaces
注意事项:
1.确保服务器配置满足要求:
CPU:至少 2 核
内存:至少 2GB
磁盘:至少 20GB
2.重要配置检查:
# 检查 Docker 配置
docker info | grep -i cgroup
# 检查 kubelet 配置
cat /var/lib/kubelet/config.yaml
# 检查系统限制
ulimit -a
3.议定期备份
# 备份 etcd
ETCDCTL_API=3 etcdctl snapshot save snapshot.db
如果在部署过程中遇到问题,请查看相关服务的日志:
Docker 日志:journalctl -xeu docker
Kubelet 日志:journalctl -xeu kubelet
系统日志:tail -f /var/log/messages

浙公网安备 33010602011771号