k8s配置nfs存储类

1.开启nfs-server

# 在 NFS 服务器上安装并配置
sudo apt install nfs-kernel-server  # Ubuntu/Debian
apt install -y nfs-common #客户端安装
sudo mkdir -p /data/nfs sudo chmod 777 /data/nfs # 编辑 /etc/exports 添加: # /data/nfs 192.168.1.0/24(rw,sync,no_root_squash,no_subtree_check) sudo exportfs -ra

2.在master节点部署

---
# 1. RBAC 权限配置
apiVersion: v1
kind: ServiceAccount
metadata:
  name: nfs-client-provisioner
  namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: nfs-client-provisioner-runner
rules:
  - apiGroups: [""]
    resources: ["nodes"]
    verbs: ["get", "list", "watch"]
  - apiGroups: [""]
    resources: ["persistentvolumes"]
    verbs: ["get", "list", "watch", "create", "delete"]
  - apiGroups: [""]
    resources: ["persistentvolumeclaims"]
    verbs: ["get", "list", "watch", "update"]
  - apiGroups: ["storage.k8s.io"]
    resources: ["storageclasses"]
    verbs: ["get", "list", "watch"]
  - apiGroups: [""]
    resources: ["events"]
    verbs: ["create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: run-nfs-client-provisioner
subjects:
  - kind: ServiceAccount
    name: nfs-client-provisioner
    namespace: kube-system
roleRef:
  kind: ClusterRole
  name: nfs-client-provisioner-runner
  apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: leader-locking-nfs-client-provisioner
  namespace: kube-system
rules:
  - apiGroups: [""]
    resources: ["endpoints"]
    verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: leader-locking-nfs-client-provisioner
  namespace: kube-system
subjects:
  - kind: ServiceAccount
    name: nfs-client-provisioner
    namespace: kube-system
roleRef:
  kind: Role
  name: leader-locking-nfs-client-provisioner
  apiGroup: rbac.authorization.k8s.io

---
# 2. NFS Client Provisioner 部署
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nfs-client-provisioner
  namespace: kube-system
  labels:
    app: nfs-client-provisioner
spec:
  replicas: 1
  strategy:
    type: Recreate
  selector:
    matchLabels:
      app: nfs-client-provisioner
  template:
    metadata:
      labels:
        app: nfs-client-provisioner
    spec:
      serviceAccountName: nfs-client-provisioner
      containers:
        - name: nfs-client-provisioner
          image: registry.k8s.io/sig-storage/nfs-subdir-external-provisioner:v4.0.2
          volumeMounts:
            - name: nfs-client-root
              mountPath: /persistentvolumes
          env:
            - name: PROVISIONER_NAME
              value: k8s-sigs.io/nfs-subdir-external-provisioner
            - name: NFS_SERVER
              value: 192.168.1.100      # ⚠️ 修改为你的 NFS 服务器 IP
            - name: NFS_PATH
              value: /data/nfs           # ⚠️ 修改为你的 NFS 共享路径
            - name: ENABLE_LEADER_ELECTION
              value: "true"
            # 可选:设置子目录命名规则
            - name: PATH_PATTERN
              value: "${.PVC.namespace}-${.PVC.name}"
      volumes:
        - name: nfs-client-root
          nfs:
            server: 192.168.1.100        # ⚠️ 修改为你的 NFS 服务器 IP
            path: /data/nfs              # ⚠️ 修改为你的 NFS 共享路径

---
# 3. StorageClass 定义
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: nfs-client                    # 使用时的 storageClassName
  annotations:
    storageclass.kubernetes.io/is-default-class: "true"  # 设为默认存储类(可选)
provisioner: k8s-sigs.io/nfs-subdir-external-provisioner
parameters:
  archiveOnDelete: "false"            # 删除 PVC 时是否归档(true=重命名保留,false=直接删除)
  # pathPattern: "${.PVC.namespace}/${.PVC.name}"  # 自定义路径模式
mountOptions:
  - hard                              # 硬挂载
  - nfsvers=4.1                       # NFS 版本
  - nolock                            # 不使用文件锁
  - noatime                           # 不更新访问时间
reclaimPolicy: Delete                 # 回收策略:Delete 或 Retain
volumeBindingMode: Immediate          # Immediate 或 WaitForFirstConsumer
allowVolumeExpansion: true            # 允许扩容

3.创建pvc验证

# 查看 StorageClass
kubectl get sc

# 查看 Provisioner 运行状态
kubectl get pods -n kube-system -l app=nfs-client-provisioner

# 测试创建 PVC
cat <<EOF | kubectl apply -f -
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: test-nfs-pvc
spec:
  accessModes:
    - ReadWriteMany
  storageClassName: nfs-client
  resources:
    requests:
      storage: 1Gi
EOF

kubectl get pvc test-nfs-pvc

4.启用nfs-server磁盘的大小配额(XFS Project Quota)

UUID=xxxxx  /data  xfs  defaults,prjquota  0  0
 mount -o remount,prjquota /data
mount | grep " /data "
# 应显示 prjquota
sudo tee /usr/local/bin/set-nfs-quota.sh << 'EOF'
#!/bin/bash
# 为 /data/nfs 下所有子目录设置 50G project quota
# 注意:xfs_quota 在 /data 挂载点操作,但项目路径是完整的

NFS_ROOT="/data/nfs"
MOUNT_POINT="/data"  # XFS 挂载点
MAX_PROJECT_ID=$(cat /etc/projects 2>/dev/null | awk -F: '{print $1}' | sort -n | tail -1)
[ -z "$MAX_PROJECT_ID" ] && MAX_PROJECT_ID=100

# 确保 nfs 目录存在
[ -d "$NFS_ROOT" ] || exit 0

# 遍历所有子目录
for dir in "$NFS_ROOT"/*/; do
    [ -d "$dir" ] || continue
    
    # 标准化路径(去除尾部斜杠)
    dir=$(realpath "$dir")
    dirname=$(basename "$dir")
    project_name="nfs_${dirname}"
    project_id=""
    
    # 跳过非目录项
    [ "$dirname" == "nfs" ] && continue
    
    # 检查是否已在 /etc/projects 中
    if grep -q ":${dir}$" /etc/projects 2>/dev/null; then
        project_id=$(grep ":${dir}$" /etc/projects | head -1 | cut -d: -f1)
        echo "Existing project found: $project_name (ID: $project_id) for $dir"
    else
        # 分配新 project ID
        MAX_PROJECT_ID=$((MAX_PROJECT_ID + 1))
        project_id=$MAX_PROJECT_ID
        echo "${project_id}:${dir}" >> /etc/projects
        echo "Created new project: $project_name (ID: $project_id) for $dir"
    fi
    
    # 写入 /etc/projid
    if ! grep -q "^${project_name}:${project_id}$" /etc/projid 2>/dev/null; then
        # 删除旧的同名 project(如果有)
        grep -v "^${project_name}:" /etc/projid > /tmp/projid.tmp 2>/dev/null || true
        mv /tmp/projid.tmp /etc/projid 2>/dev/null || true
        echo "${project_name}:${project_id}" >> /etc/projid
    fi
    
    # 设置 project ID 到目录(在挂载点 /data 上操作)
    xfs_quota -x -c "project -s ${project_name}" "$MOUNT_POINT" 2>/dev/null
    
    # 设置 50G 硬限制
    xfs_quota -x -c "limit -p bhard=50g ${project_name}" "$MOUNT_POINT"
    
    echo "Set 50G quota for: $dir"
done
EOF

sudo chmod +x /usr/local/bin/set-nfs-quota.sh
# 创建空的 projects 和 projid 文件(如果不存在)
sudo touch /etc/projects /etc/projid

设置监控脚本

sudo tee /usr/local/bin/watch-nfs-dirs.sh << 'EOF'
#!/bin/bash
NFS_ROOT="/data/nfs"
LOG_FILE="/var/log/nfs-quota.log"

# 检查 inotifywait 是否可用
if ! command -v inotifywait &> /dev/null; then
    echo "Error: inotifywait not found. Install inotify-tools."
    exit 1
fi

echo "$(date): Starting NFS quota watcher on $NFS_ROOT" >> "$LOG_FILE"

inotifywait -m "$NFS_ROOT" -e create -e moved_to --format '%w%f' 2>/dev/null | while read NEW_PATH; do
    if [ -d "$NEW_PATH" ]; then
        echo "$(date): New directory detected: $NEW_PATH" >> "$LOG_FILE"
        sleep 1
        /usr/local/bin/set-nfs-quota.sh >> "$LOG_FILE" 2>&1
    fi
done
EOF

sudo chmod +x /usr/local/bin/watch-nfs-dirs.sh

设置systemd

sudo tee /etc/systemd/system/nfs-quota-watcher.service << 'EOF'
[Unit]
Description=NFS Directory Quota Watcher
After=network.target

[Service]
Type=simple
ExecStart=/usr/local/bin/watch-nfs-dirs.sh
Restart=always
RestartSec=5

[Install]
WantedBy=multi-user.target
EOF

sudo systemctl daemon-reload
sudo systemctl enable nfs-quota-watcher
sudo systemctl start nfs-quota-watcher

安装依赖并启动

sudo apt update && sudo apt install -y inotify-tools xfsprogs

# 立即初始化现有目录
sudo /usr/local/bin/set-nfs-quota.sh

# 启动监控服务
sudo systemctl daemon-reload
sudo systemctl enable nfs-quota-watcher
sudo systemctl start nfs-quota-watcher

验证

# 查看 XFS 挂载参数(确认 prjquota)
mount | grep /data

# 查看所有 project 配额(在 /data 挂载点)
sudo xfs_quota -x -c "report -pbh" /data

# 查看详细项目信息
sudo xfs_quota -x -c "project -l" /data

# 测试新目录
mkdir /data/nfs/testdir
# 等待几秒后检查
sudo xfs_quota -x -c "report -pbh" /data

在目录下生成大文件并验证

# 在当前目录生成
fallocate -l 60G ./largefile

 

posted @ 2026-04-18 14:37  ZANAN  阅读(6)  评论(0)    收藏  举报