Ceph部署-xianchao

 

 

。。。


[root@master1-admin yum.repos.d]# ceph --version
ceph version 10.2.11 (e4b061b47f07f583c92a050d9e84b1813a35671e)
[root@master1-admin yum.repos.d]# cd /etc/ceph/
[root@master1-admin ceph]# ceph-deploy new master1-admin node1-monitor node2-osd
[ceph_deploy.conf][DEBUG ] found configuration file at: /root/.cephdeploy.conf
[ceph_deploy.cli][INFO  ] Invoked (1.5.39): /usr/bin/ceph-deploy new master1-admin node1-monitor node2-osd
[ceph_deploy.cli][INFO  ] ceph-deploy options:
[ceph_deploy.cli][INFO  ]  username                      : None
[ceph_deploy.cli][INFO  ]  func                          : <function new at 0x7fcd50d3ab90>
[ceph_deploy.cli][INFO  ]  verbose                       : False
[ceph_deploy.cli][INFO  ]  overwrite_conf                : False
[ceph_deploy.cli][INFO  ]  quiet                         : False
[ceph_deploy.cli][INFO  ]  cd_conf                       : <ceph_deploy.conf.cephdeploy.Conf instance at 0x7fcd504abe60>
[ceph_deploy.cli][INFO  ]  cluster                       : ceph
[ceph_deploy.cli][INFO  ]  ssh_copykey                   : True
[ceph_deploy.cli][INFO  ]  mon                           : ['master1-admin', 'node1-monitor', 'node2-osd']
[ceph_deploy.cli][INFO  ]  public_network                : None
[ceph_deploy.cli][INFO  ]  ceph_conf                     : None
[ceph_deploy.cli][INFO  ]  cluster_network               : None
[ceph_deploy.cli][INFO  ]  default_release               : False
[ceph_deploy.cli][INFO  ]  fsid                          : None
[ceph_deploy.new][DEBUG ] Creating new cluster named ceph
[ceph_deploy.new][INFO  ] making sure passwordless SSH succeeds
[master1-admin][DEBUG ] connected to host: master1-admin
[master1-admin][DEBUG ] detect platform information from remote host
[master1-admin][DEBUG ] detect machine type
[master1-admin][DEBUG ] find the location of an executable
[master1-admin][INFO  ] Running command: /usr/sbin/ip link show
[master1-admin][INFO  ] Running command: /usr/sbin/ip addr show
[master1-admin][DEBUG ] IP addresses found: [u'192.168.40.200']
[ceph_deploy.new][DEBUG ] Resolving host master1-admin
[ceph_deploy.new][DEBUG ] Monitor master1-admin at 192.168.40.200
[ceph_deploy.new][INFO  ] making sure passwordless SSH succeeds
[node1-monitor][DEBUG ] connected to host: master1-admin
[node1-monitor][INFO  ] Running command: ssh -CT -o BatchMode=yes node1-monitor
[node1-monitor][DEBUG ] connected to host: node1-monitor
[node1-monitor][DEBUG ] detect platform information from remote host
[node1-monitor][DEBUG ] detect machine type
[node1-monitor][DEBUG ] find the location of an executable
[node1-monitor][INFO  ] Running command: /usr/sbin/ip link show
[node1-monitor][INFO  ] Running command: /usr/sbin/ip addr show
[node1-monitor][DEBUG ] IP addresses found: [u'192.168.40.201']
[ceph_deploy.new][DEBUG ] Resolving host node1-monitor
[ceph_deploy.new][DEBUG ] Monitor node1-monitor at 192.168.40.201
[ceph_deploy.new][INFO  ] making sure passwordless SSH succeeds
[node2-osd][DEBUG ] connected to host: master1-admin
[node2-osd][INFO  ] Running command: ssh -CT -o BatchMode=yes node2-osd
[node2-osd][DEBUG ] connected to host: node2-osd
[node2-osd][DEBUG ] detect platform information from remote host
[node2-osd][DEBUG ] detect machine type
[node2-osd][DEBUG ] find the location of an executable
[node2-osd][INFO  ] Running command: /usr/sbin/ip link show
[node2-osd][INFO  ] Running command: /usr/sbin/ip addr show
[node2-osd][DEBUG ] IP addresses found: [u'192.168.40.202']
[ceph_deploy.new][DEBUG ] Resolving host node2-osd
[ceph_deploy.new][DEBUG ] Monitor node2-osd at 192.168.40.202
[ceph_deploy.new][DEBUG ] Monitor initial members are ['master1-admin', 'node1-monitor', 'node2-osd']
[ceph_deploy.new][DEBUG ] Monitor addrs are ['192.168.40.200', '192.168.40.201', '192.168.40.202']
[ceph_deploy.new][DEBUG ] Creating a random mon key...
[ceph_deploy.new][DEBUG ] Writing monitor keyring to ceph.mon.keyring...
[ceph_deploy.new][DEBUG ] Writing initial config to ceph.conf...

#创建 monitor 节点
#创建一个目录,用于保存 ceph-deploy 生成的配置文件信息的
#生成了如下配置文件
[root@master1-admin ceph]# cd /etc/ceph/
[root@master1-admin ceph]# ll
total 20
-rw-r--r--. 1 root root  261 May  3 16:18 ceph.conf #Ceph 配置文件
-rw-r--r--. 1 root root 5183 May  3 16:18 ceph-deploy-ceph.log #一个日志文件
-rw-------. 1 root root   73 May  3 16:18 ceph.mon.keyring #一个 monitor 密钥环
-rw-r--r--. 1 root root   92 Jul 10  2018 rbdmap

#3 安装 ceph-monitor
[root@master1-admin ceph]# vim ceph.conf
[global]
fsid = 2bd10cba-1485-4395-9755-60ee8ca71fae
mon_initial_members = master1-admin, node1-monitor, node2-osd
mon_host = 192.168.40.200,192.168.40.201,192.168.40.202
auth_cluster_required = cephx
auth_service_required = cephx
auth_client_required = cephx
filestore_xattr_use_omap = true
osd_pool_default_size = 2
mon clock drift allowed = 0.500
mon clock drift warn backoff = 10


#mon clock drift allowed #监视器间允许的时钟漂移量默认值 0.05
#mon clock drift warn backoff #时钟偏移警告的退避指数。默认值 5
#ceph 对每个 mon 之间的时间同步延时默认要求在 0.05s 之间,这个时间有的时候太短了。所以如果 ceph 集群如果出现 clock 问题就检查 ntp 时间同步或者适当放宽这个误差时间。
#cephx 是认证机制是整个 Ceph 系统的用户名/密码


#配置初始 monitor、收集所有的密钥
[root@master1-admin ceph]# cd /etc/ceph/
You have new mail in /var/spool/mail/root
[root@master1-admin ceph]# ll
total 24
-rw-r--r--. 1 root root  385 May  3 16:36 ceph.conf
-rw-r--r--. 1 root root  261 May  3 16:25 ceph.conf.bak
-rw-r--r--. 1 root root 5183 May  3 16:18 ceph-deploy-ceph.log
-rw-------. 1 root root   73 May  3 16:18 ceph.mon.keyring
-rw-r--r--. 1 root root   92 Jul 10  2018 rbdmap
[root@master1-admin ceph]# ceph-de
ceph-dencoder     ceph-deploy       ceph-detect-init
[root@master1-admin ceph]# ceph-de
ceph-dencoder     ceph-deploy       ceph-detect-init
[root@master1-admin ceph]# ceph-deploy mon create-initial
[ceph_deploy.conf][DEBUG ] found configuration file at: /root/.cephdeploy.conf
[ceph_deploy.cli][INFO  ] Invoked (1.5.39): /usr/bin/ceph-deploy mon create-initial
[ceph_deploy.cli][INFO  ] ceph-deploy options:
[ceph_deploy.cli][INFO  ]  username                      : None
[ceph_deploy.cli][INFO  ]  verbose                       : False
[ceph_deploy.cli][INFO  ]  overwrite_conf                : False
[ceph_deploy.cli][INFO  ]  subcommand                    : create-initial
[ceph_deploy.cli][INFO  ]  quiet                         : False
[ceph_deploy.cli][INFO  ]  cd_conf                       : <ceph_deploy.conf.cephdeploy.Conf instance at 0x7f07b27d6560>
[ceph_deploy.cli][INFO  ]  cluster                       : ceph
[ceph_deploy.cli][INFO  ]  func                          : <function mon at 0x7f07b27aec80>
[ceph_deploy.cli][INFO  ]  ceph_conf                     : None
[ceph_deploy.cli][INFO  ]  default_release               : False
[ceph_deploy.cli][INFO  ]  keyrings                      : None
[ceph_deploy.mon][DEBUG ] Deploying mon, cluster ceph hosts master1-admin node1-monitor node2-osd
[ceph_deploy.mon][DEBUG ] detecting platform for host master1-admin ...
[master1-admin][DEBUG ] connected to host: master1-admin
[master1-admin][DEBUG ] detect platform information from remote host
[master1-admin][DEBUG ] detect machine type
[master1-admin][DEBUG ] find the location of an executable
[ceph_deploy.mon][INFO  ] distro info: CentOS Linux 7.9.2009 Core
[master1-admin][DEBUG ] determining if provided host has same hostname in remote
[master1-admin][DEBUG ] get remote short hostname
[master1-admin][DEBUG ] deploying mon to master1-admin
[master1-admin][DEBUG ] get remote short hostname
[master1-admin][DEBUG ] remote hostname: master1-admin
[master1-admin][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf
[master1-admin][DEBUG ] create the mon path if it does not exist
[master1-admin][DEBUG ] checking for done path: /var/lib/ceph/mon/ceph-master1-admin/done
[master1-admin][DEBUG ] done path does not exist: /var/lib/ceph/mon/ceph-master1-admin/done
[master1-admin][INFO  ] creating keyring file: /var/lib/ceph/tmp/ceph-master1-admin.mon.keyring
[master1-admin][DEBUG ] create the monitor keyring file
[master1-admin][INFO  ] Running command: ceph-mon --cluster ceph --mkfs -i master1-admin --keyring /var/lib/ceph/tmp/ceph-master1-admin.mon.keyring --setuser 167 --setgroup 167
[master1-admin][DEBUG ] ceph-mon: mon.noname-a 192.168.40.200:6789/0 is local, renaming to mon.master1-admin
[master1-admin][DEBUG ] ceph-mon: set fsid to 2bd10cba-1485-4395-9755-60ee8ca71fae
[master1-admin][DEBUG ] ceph-mon: created monfs at /var/lib/ceph/mon/ceph-master1-admin for mon.master1-admin
[master1-admin][INFO  ] unlinking keyring file /var/lib/ceph/tmp/ceph-master1-admin.mon.keyring
[master1-admin][DEBUG ] create a done file to avoid re-doing the mon deployment
[master1-admin][DEBUG ] create the init path if it does not exist
[master1-admin][INFO  ] Running command: systemctl enable ceph.target
[master1-admin][INFO  ] Running command: systemctl enable ceph-mon@master1-admin
[master1-admin][WARNIN] Created symlink from /etc/systemd/system/ceph-mon.target.wants/ceph-mon@master1-admin.service to /usr/lib/systemd/system/ceph-mon@.service.
[master1-admin][INFO  ] Running command: systemctl start ceph-mon@master1-admin
[master1-admin][INFO  ] Running command: ceph --cluster=ceph --admin-daemon /var/run/ceph/ceph-mon.master1-admin.asok mon_status
[master1-admin][DEBUG ] ********************************************************************************
[master1-admin][DEBUG ] status for monitor: mon.master1-admin
[master1-admin][DEBUG ] {
[master1-admin][DEBUG ]   "election_epoch": 0,
[master1-admin][DEBUG ]   "extra_probe_peers": [
[master1-admin][DEBUG ]     "192.168.40.201:6789/0",
[master1-admin][DEBUG ]     "192.168.40.202:6789/0"
[master1-admin][DEBUG ]   ],
[master1-admin][DEBUG ]   "monmap": {
[master1-admin][DEBUG ]     "created": "2023-05-03 16:37:41.100777",
[master1-admin][DEBUG ]     "epoch": 0,
[master1-admin][DEBUG ]     "fsid": "2bd10cba-1485-4395-9755-60ee8ca71fae",
[master1-admin][DEBUG ]     "modified": "2023-05-03 16:37:41.100777",
[master1-admin][DEBUG ]     "mons": [
[master1-admin][DEBUG ]       {
[master1-admin][DEBUG ]         "addr": "192.168.40.200:6789/0",
[master1-admin][DEBUG ]         "name": "master1-admin",
[master1-admin][DEBUG ]         "rank": 0
[master1-admin][DEBUG ]       },
[master1-admin][DEBUG ]       {
[master1-admin][DEBUG ]         "addr": "0.0.0.0:0/1",
[master1-admin][DEBUG ]         "name": "node1-monitor",
[master1-admin][DEBUG ]         "rank": 1
[master1-admin][DEBUG ]       },
[master1-admin][DEBUG ]       {
[master1-admin][DEBUG ]         "addr": "0.0.0.0:0/2",
[master1-admin][DEBUG ]         "name": "node2-osd",
[master1-admin][DEBUG ]         "rank": 2
[master1-admin][DEBUG ]       }
[master1-admin][DEBUG ]     ]
[master1-admin][DEBUG ]   },
[master1-admin][DEBUG ]   "name": "master1-admin",
[master1-admin][DEBUG ]   "outside_quorum": [
[master1-admin][DEBUG ]     "master1-admin"
[master1-admin][DEBUG ]   ],
[master1-admin][DEBUG ]   "quorum": [],
[master1-admin][DEBUG ]   "rank": 0,
[master1-admin][DEBUG ]   "state": "probing",
[master1-admin][DEBUG ]   "sync_provider": []
[master1-admin][DEBUG ] }
[master1-admin][DEBUG ] ********************************************************************************
[master1-admin][INFO  ] monitor: mon.master1-admin is running
[master1-admin][INFO  ] Running command: ceph --cluster=ceph --admin-daemon /var/run/ceph/ceph-mon.master1-admin.asok mon_status
[ceph_deploy.mon][DEBUG ] detecting platform for host node1-monitor ...
[node1-monitor][DEBUG ] connected to host: node1-monitor
[node1-monitor][DEBUG ] detect platform information from remote host
[node1-monitor][DEBUG ] detect machine type
[node1-monitor][DEBUG ] find the location of an executable
[ceph_deploy.mon][INFO  ] distro info: CentOS Linux 7.9.2009 Core
[node1-monitor][DEBUG ] determining if provided host has same hostname in remote
[node1-monitor][DEBUG ] get remote short hostname
[node1-monitor][DEBUG ] deploying mon to node1-monitor
[node1-monitor][DEBUG ] get remote short hostname
[node1-monitor][DEBUG ] remote hostname: node1-monitor
[node1-monitor][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf
[node1-monitor][DEBUG ] create the mon path if it does not exist
[node1-monitor][DEBUG ] checking for done path: /var/lib/ceph/mon/ceph-node1-monitor/done
[node1-monitor][DEBUG ] done path does not exist: /var/lib/ceph/mon/ceph-node1-monitor/done
[node1-monitor][INFO  ] creating keyring file: /var/lib/ceph/tmp/ceph-node1-monitor.mon.keyring
[node1-monitor][DEBUG ] create the monitor keyring file
[node1-monitor][INFO  ] Running command: ceph-mon --cluster ceph --mkfs -i node1-monitor --keyring /var/lib/ceph/tmp/ceph-node1-monitor.mon.keyring --setuser 167 --setgroup 167
[node1-monitor][DEBUG ] ceph-mon: mon.noname-b 192.168.40.201:6789/0 is local, renaming to mon.node1-monitor
[node1-monitor][DEBUG ] ceph-mon: set fsid to 2bd10cba-1485-4395-9755-60ee8ca71fae
[node1-monitor][DEBUG ] ceph-mon: created monfs at /var/lib/ceph/mon/ceph-node1-monitor for mon.node1-monitor
[node1-monitor][INFO  ] unlinking keyring file /var/lib/ceph/tmp/ceph-node1-monitor.mon.keyring
[node1-monitor][DEBUG ] create a done file to avoid re-doing the mon deployment
[node1-monitor][DEBUG ] create the init path if it does not exist
[node1-monitor][INFO  ] Running command: systemctl enable ceph.target
[node1-monitor][INFO  ] Running command: systemctl enable ceph-mon@node1-monitor
[node1-monitor][WARNIN] Created symlink from /etc/systemd/system/ceph-mon.target.wants/ceph-mon@node1-monitor.service to /usr/lib/systemd/system/ceph-mon@.service.
[node1-monitor][INFO  ] Running command: systemctl start ceph-mon@node1-monitor
[node1-monitor][INFO  ] Running command: ceph --cluster=ceph --admin-daemon /var/run/ceph/ceph-mon.node1-monitor.asok mon_status
[node1-monitor][DEBUG ] ********************************************************************************
[node1-monitor][DEBUG ] status for monitor: mon.node1-monitor
[node1-monitor][DEBUG ] {
[node1-monitor][DEBUG ]   "election_epoch": 1,
[node1-monitor][DEBUG ]   "extra_probe_peers": [
[node1-monitor][DEBUG ]     "192.168.40.200:6789/0",
[node1-monitor][DEBUG ]     "192.168.40.202:6789/0"
[node1-monitor][DEBUG ]   ],
[node1-monitor][DEBUG ]   "monmap": {
[node1-monitor][DEBUG ]     "created": "2023-05-03 16:37:43.936527",
[node1-monitor][DEBUG ]     "epoch": 0,
[node1-monitor][DEBUG ]     "fsid": "2bd10cba-1485-4395-9755-60ee8ca71fae",
[node1-monitor][DEBUG ]     "modified": "2023-05-03 16:37:43.936527",
[node1-monitor][DEBUG ]     "mons": [
[node1-monitor][DEBUG ]       {
[node1-monitor][DEBUG ]         "addr": "192.168.40.200:6789/0",
[node1-monitor][DEBUG ]         "name": "master1-admin",
[node1-monitor][DEBUG ]         "rank": 0
[node1-monitor][DEBUG ]       },
[node1-monitor][DEBUG ]       {
[node1-monitor][DEBUG ]         "addr": "192.168.40.201:6789/0",
[node1-monitor][DEBUG ]         "name": "node1-monitor",
[node1-monitor][DEBUG ]         "rank": 1
[node1-monitor][DEBUG ]       },
[node1-monitor][DEBUG ]       {
[node1-monitor][DEBUG ]         "addr": "0.0.0.0:0/2",
[node1-monitor][DEBUG ]         "name": "node2-osd",
[node1-monitor][DEBUG ]         "rank": 2
[node1-monitor][DEBUG ]       }
[node1-monitor][DEBUG ]     ]
[node1-monitor][DEBUG ]   },
[node1-monitor][DEBUG ]   "name": "node1-monitor",
[node1-monitor][DEBUG ]   "outside_quorum": [],
[node1-monitor][DEBUG ]   "quorum": [],
[node1-monitor][DEBUG ]   "rank": 1,
[node1-monitor][DEBUG ]   "state": "electing",
[node1-monitor][DEBUG ]   "sync_provider": []
[node1-monitor][DEBUG ] }
[node1-monitor][DEBUG ] ********************************************************************************
[node1-monitor][INFO  ] monitor: mon.node1-monitor is running
[node1-monitor][INFO  ] Running command: ceph --cluster=ceph --admin-daemon /var/run/ceph/ceph-mon.node1-monitor.asok mon_status
[ceph_deploy.mon][DEBUG ] detecting platform for host node2-osd ...
[node2-osd][DEBUG ] connected to host: node2-osd
[node2-osd][DEBUG ] detect platform information from remote host
[node2-osd][DEBUG ] detect machine type
[node2-osd][DEBUG ] find the location of an executable
[ceph_deploy.mon][INFO  ] distro info: CentOS Linux 7.9.2009 Core
[node2-osd][DEBUG ] determining if provided host has same hostname in remote
[node2-osd][DEBUG ] get remote short hostname
[node2-osd][DEBUG ] deploying mon to node2-osd
[node2-osd][DEBUG ] get remote short hostname
[node2-osd][DEBUG ] remote hostname: node2-osd
[node2-osd][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf
[node2-osd][DEBUG ] create the mon path if it does not exist
[node2-osd][DEBUG ] checking for done path: /var/lib/ceph/mon/ceph-node2-osd/done
[node2-osd][DEBUG ] done path does not exist: /var/lib/ceph/mon/ceph-node2-osd/done
[node2-osd][INFO  ] creating keyring file: /var/lib/ceph/tmp/ceph-node2-osd.mon.keyring
[node2-osd][DEBUG ] create the monitor keyring file
[node2-osd][INFO  ] Running command: ceph-mon --cluster ceph --mkfs -i node2-osd --keyring /var/lib/ceph/tmp/ceph-node2-osd.mon.keyring --setuser 167 --setgroup 167
[node2-osd][DEBUG ] ceph-mon: mon.noname-c 192.168.40.202:6789/0 is local, renaming to mon.node2-osd
[node2-osd][DEBUG ] ceph-mon: set fsid to 2bd10cba-1485-4395-9755-60ee8ca71fae
[node2-osd][DEBUG ] ceph-mon: created monfs at /var/lib/ceph/mon/ceph-node2-osd for mon.node2-osd
[node2-osd][INFO  ] unlinking keyring file /var/lib/ceph/tmp/ceph-node2-osd.mon.keyring
[node2-osd][DEBUG ] create a done file to avoid re-doing the mon deployment
[node2-osd][DEBUG ] create the init path if it does not exist
[node2-osd][INFO  ] Running command: systemctl enable ceph.target
[node2-osd][INFO  ] Running command: systemctl enable ceph-mon@node2-osd
[node2-osd][WARNIN] Created symlink from /etc/systemd/system/ceph-mon.target.wants/ceph-mon@node2-osd.service to /usr/lib/systemd/system/ceph-mon@.service.
[node2-osd][INFO  ] Running command: systemctl start ceph-mon@node2-osd
[node2-osd][INFO  ] Running command: ceph --cluster=ceph --admin-daemon /var/run/ceph/ceph-mon.node2-osd.asok mon_status
[node2-osd][DEBUG ] ********************************************************************************
[node2-osd][DEBUG ] status for monitor: mon.node2-osd
[node2-osd][DEBUG ] {
[node2-osd][DEBUG ]   "election_epoch": 1,
[node2-osd][DEBUG ]   "extra_probe_peers": [
[node2-osd][DEBUG ]     "192.168.40.200:6789/0",
[node2-osd][DEBUG ]     "192.168.40.201:6789/0"
[node2-osd][DEBUG ]   ],
[node2-osd][DEBUG ]   "monmap": {
[node2-osd][DEBUG ]     "created": "2023-05-03 16:37:46.846068",
[node2-osd][DEBUG ]     "epoch": 0,
[node2-osd][DEBUG ]     "fsid": "2bd10cba-1485-4395-9755-60ee8ca71fae",
[node2-osd][DEBUG ]     "modified": "2023-05-03 16:37:46.846068",
[node2-osd][DEBUG ]     "mons": [
[node2-osd][DEBUG ]       {
[node2-osd][DEBUG ]         "addr": "192.168.40.200:6789/0",
[node2-osd][DEBUG ]         "name": "master1-admin",
[node2-osd][DEBUG ]         "rank": 0
[node2-osd][DEBUG ]       },
[node2-osd][DEBUG ]       {
[node2-osd][DEBUG ]         "addr": "192.168.40.201:6789/0",
[node2-osd][DEBUG ]         "name": "node1-monitor",
[node2-osd][DEBUG ]         "rank": 1
[node2-osd][DEBUG ]       },
[node2-osd][DEBUG ]       {
[node2-osd][DEBUG ]         "addr": "192.168.40.202:6789/0",
[node2-osd][DEBUG ]         "name": "node2-osd",
[node2-osd][DEBUG ]         "rank": 2
[node2-osd][DEBUG ]       }
[node2-osd][DEBUG ]     ]
[node2-osd][DEBUG ]   },
[node2-osd][DEBUG ]   "name": "node2-osd",
[node2-osd][DEBUG ]   "outside_quorum": [],
[node2-osd][DEBUG ]   "quorum": [],
[node2-osd][DEBUG ]   "rank": 2,
[node2-osd][DEBUG ]   "state": "electing",
[node2-osd][DEBUG ]   "sync_provider": []
[node2-osd][DEBUG ] }
[node2-osd][DEBUG ] ********************************************************************************
[node2-osd][INFO  ] monitor: mon.node2-osd is running
[node2-osd][INFO  ] Running command: ceph --cluster=ceph --admin-daemon /var/run/ceph/ceph-mon.node2-osd.asok mon_status
[ceph_deploy.mon][INFO  ] processing monitor mon.master1-admin
[master1-admin][DEBUG ] connected to host: master1-admin
[master1-admin][DEBUG ] detect platform information from remote host
[master1-admin][DEBUG ] detect machine type
[master1-admin][DEBUG ] find the location of an executable
[master1-admin][INFO  ] Running command: ceph --cluster=ceph --admin-daemon /var/run/ceph/ceph-mon.master1-admin.asok mon_status
[ceph_deploy.mon][WARNIN] mon.master1-admin monitor is not yet in quorum, tries left: 5
[ceph_deploy.mon][WARNIN] waiting 5 seconds before retrying
[master1-admin][INFO  ] Running command: ceph --cluster=ceph --admin-daemon /var/run/ceph/ceph-mon.master1-admin.asok mon_status
[ceph_deploy.mon][INFO  ] mon.master1-admin monitor has reached quorum!
[ceph_deploy.mon][INFO  ] processing monitor mon.node1-monitor
[node1-monitor][DEBUG ] connected to host: node1-monitor
[node1-monitor][DEBUG ] detect platform information from remote host
[node1-monitor][DEBUG ] detect machine type
[node1-monitor][DEBUG ] find the location of an executable
[node1-monitor][INFO  ] Running command: ceph --cluster=ceph --admin-daemon /var/run/ceph/ceph-mon.node1-monitor.asok mon_status
[ceph_deploy.mon][INFO  ] mon.node1-monitor monitor has reached quorum!
[ceph_deploy.mon][INFO  ] processing monitor mon.node2-osd
[node2-osd][DEBUG ] connected to host: node2-osd
[node2-osd][DEBUG ] detect platform information from remote host
[node2-osd][DEBUG ] detect machine type
[node2-osd][DEBUG ] find the location of an executable
[node2-osd][INFO  ] Running command: ceph --cluster=ceph --admin-daemon /var/run/ceph/ceph-mon.node2-osd.asok mon_status
[ceph_deploy.mon][INFO  ] mon.node2-osd monitor has reached quorum!
[ceph_deploy.mon][INFO  ] all initial monitors are running and have formed quorum
[ceph_deploy.mon][INFO  ] Running gatherkeys...
[ceph_deploy.gatherkeys][INFO  ] Storing keys in temp directory /tmp/tmp1dTsp_
[master1-admin][DEBUG ] connected to host: master1-admin
[master1-admin][DEBUG ] detect platform information from remote host
[master1-admin][DEBUG ] detect machine type
[master1-admin][DEBUG ] get remote short hostname
[master1-admin][DEBUG ] fetch remote file
[master1-admin][INFO  ] Running command: /usr/bin/ceph --connect-timeout=25 --cluster=ceph --admin-daemon=/var/run/ceph/ceph-mon.master1-admin.asok mon_status
[master1-admin][INFO  ] Running command: /usr/bin/ceph --connect-timeout=25 --cluster=ceph --name mon. --keyring=/var/lib/ceph/mon/ceph-master1-admin/keyring auth get client.admin
[master1-admin][INFO  ] Running command: /usr/bin/ceph --connect-timeout=25 --cluster=ceph --name mon. --keyring=/var/lib/ceph/mon/ceph-master1-admin/keyring auth get client.bootstrap-mds
[master1-admin][INFO  ] Running command: /usr/bin/ceph --connect-timeout=25 --cluster=ceph --name mon. --keyring=/var/lib/ceph/mon/ceph-master1-admin/keyring auth get client.bootstrap-mgr
[master1-admin][INFO  ] Running command: /usr/bin/ceph --connect-timeout=25 --cluster=ceph --name mon. --keyring=/var/lib/ceph/mon/ceph-master1-admin/keyring auth get-or-create client.bootstrap-mgr mon allow profile bootstrap-mgr
[master1-admin][INFO  ] Running command: /usr/bin/ceph --connect-timeout=25 --cluster=ceph --name mon. --keyring=/var/lib/ceph/mon/ceph-master1-admin/keyring auth get client.bootstrap-osd
[master1-admin][INFO  ] Running command: /usr/bin/ceph --connect-timeout=25 --cluster=ceph --name mon. --keyring=/var/lib/ceph/mon/ceph-master1-admin/keyring auth get client.bootstrap-rgw
[ceph_deploy.gatherkeys][INFO  ] keyring 'ceph.client.admin.keyring' already exists
[ceph_deploy.gatherkeys][INFO  ] Storing ceph.bootstrap-mds.keyring
[ceph_deploy.gatherkeys][INFO  ] Storing ceph.bootstrap-mgr.keyring
[ceph_deploy.gatherkeys][INFO  ] keyring 'ceph.mon.keyring' already exists
[ceph_deploy.gatherkeys][INFO  ] Storing ceph.bootstrap-osd.keyring
[ceph_deploy.gatherkeys][INFO  ] Storing ceph.bootstrap-rgw.keyring
[ceph_deploy.gatherkeys][INFO  ] Destroy temp directory /tmp/tmp1dTsp_

[root@master1-admin ceph]# ll *.keyring
-rw-------. 1 root root 113 May  3 16:37 ceph.bootstrap-mds.keyring
-rw-------. 1 root root  71 May  3 16:37 ceph.bootstrap-mgr.keyring
-rw-------. 1 root root 113 May  3 16:37 ceph.bootstrap-osd.keyring
-rw-------. 1 root root 113 May  3 16:37 ceph.bootstrap-rgw.keyring
-rw-------. 1 ceph ceph 129 May  3 16:37 ceph.client.admin.keyring
-rw-------. 1 root root  73 May  3 16:18 ceph.mon.keyring

#部署 osd 服务
#准备 osd
root@ master1-admin ceph]# cd /etc/ceph/
[root@master1-admin ceph]# ceph-deploy osd prepare master1-admin:/dev/sdb 
[ceph_deploy.osd][DEBUG ] Host master1-admi is now ready for osd use.

[root@master1-admin ceph]# ceph-deploy osd prepare node1-monitor:/dev/sdb 
[ceph_deploy.osd][DEBUG ] Host node1-monitor is now ready for osd use.

[root@master1-admin ceph]# ceph-deploy osd prepare node2-osd:/dev/sdb
[ceph_deploy.osd][DEBUG ] Host node2-osd is now ready for osd use.

#激活 osd
[root@master1-admin ceph]# ceph-deploy osd activate master1-admin:/dev/sdb1
[ceph_deploy.conf][DEBUG ] found configuration file at: /root/.cephdeploy.conf
[ceph_deploy.cli][INFO  ] Invoked (1.5.39): /usr/bin/ceph-deploy osd activate master1-admin:/dev/sdb1
[ceph_deploy.cli][INFO  ] ceph-deploy options:
[ceph_deploy.cli][INFO  ]  username                      : None
[ceph_deploy.cli][INFO  ]  verbose                       : False
[ceph_deploy.cli][INFO  ]  overwrite_conf                : False
[ceph_deploy.cli][INFO  ]  subcommand                    : activate
[ceph_deploy.cli][INFO  ]  quiet                         : False
[ceph_deploy.cli][INFO  ]  cd_conf                       : <ceph_deploy.conf.cephdeploy.Conf instance at 0x7f84247707a0>
[ceph_deploy.cli][INFO  ]  cluster                       : ceph
[ceph_deploy.cli][INFO  ]  func                          : <function osd at 0x7f84247c0578>
[ceph_deploy.cli][INFO  ]  ceph_conf                     : None
[ceph_deploy.cli][INFO  ]  default_release               : False
[ceph_deploy.cli][INFO  ]  disk                          : [('master1-admin', '/dev/sdb1', None)]
[ceph_deploy.osd][DEBUG ] Activating cluster ceph disks master1-admin:/dev/sdb1:
[master1-admin][DEBUG ] connected to host: master1-admin
[master1-admin][DEBUG ] detect platform information from remote host
[master1-admin][DEBUG ] detect machine type
[master1-admin][DEBUG ] find the location of an executable
[ceph_deploy.osd][INFO  ] Distro info: CentOS Linux 7.9.2009 Core
[ceph_deploy.osd][DEBUG ] activating host master1-admin disk /dev/sdb1
[ceph_deploy.osd][DEBUG ] will use init type: systemd
[master1-admin][DEBUG ] find the location of an executable
[master1-admin][INFO  ] Running command: /usr/sbin/ceph-disk -v activate --mark-init systemd --mount /dev/sdb1
[master1-admin][WARNIN] main_activate: path = /dev/sdb1
[master1-admin][WARNIN] get_dm_uuid: get_dm_uuid /dev/sdb1 uuid path is /sys/dev/block/8:17/dm/uuid
[master1-admin][WARNIN] command: Running command: /usr/sbin/blkid -o udev -p /dev/sdb1
[master1-admin][WARNIN] command: Running command: /sbin/blkid -p -s TYPE -o value -- /dev/sdb1
[master1-admin][WARNIN] command: Running command: /usr/bin/ceph-conf --cluster=ceph --name=osd. --lookup osd_mount_options_xfs
[master1-admin][WARNIN] command: Running command: /usr/bin/ceph-conf --cluster=ceph --name=osd. --lookup osd_fs_mount_options_xfs
[master1-admin][WARNIN] mount: Mounting /dev/sdb1 on /var/lib/ceph/tmp/mnt.qLXZ2f with options noatime,inode64
[master1-admin][WARNIN] command_check_call: Running command: /usr/bin/mount -t xfs -o noatime,inode64 -- /dev/sdb1 /var/lib/ceph/tmp/mnt.qLXZ2f
[master1-admin][WARNIN] command: Running command: /usr/sbin/restorecon /var/lib/ceph/tmp/mnt.qLXZ2f
[master1-admin][WARNIN] activate: Cluster uuid is 2bd10cba-1485-4395-9755-60ee8ca71fae
[master1-admin][WARNIN] command: Running command: /usr/bin/ceph-osd --cluster=ceph --show-config-value=fsid
[master1-admin][WARNIN] activate: Cluster name is ceph
[master1-admin][WARNIN] activate: OSD uuid is b1ab1ad0-9db0-4f67-9a0c-4a2064020473
[master1-admin][WARNIN] activate: OSD id is 0
[master1-admin][WARNIN] activate: Marking with init system systemd
[master1-admin][WARNIN] command: Running command: /usr/sbin/restorecon -R /var/lib/ceph/tmp/mnt.qLXZ2f/systemd
[master1-admin][WARNIN] command: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/tmp/mnt.qLXZ2f/systemd
[master1-admin][WARNIN] activate: ceph osd.0 data dir is ready at /var/lib/ceph/tmp/mnt.qLXZ2f
[master1-admin][WARNIN] mount_activate: ceph osd.0 already mounted in position; unmounting ours.
[master1-admin][WARNIN] unmount: Unmounting /var/lib/ceph/tmp/mnt.qLXZ2f
[master1-admin][WARNIN] command_check_call: Running command: /bin/umount -- /var/lib/ceph/tmp/mnt.qLXZ2f
[master1-admin][WARNIN] start_daemon: Starting ceph osd.0...
[master1-admin][WARNIN] command_check_call: Running command: /usr/bin/systemctl disable ceph-osd@0
[master1-admin][WARNIN] command_check_call: Running command: /usr/bin/systemctl disable ceph-osd@0 --runtime
[master1-admin][WARNIN] Removed symlink /run/systemd/system/ceph-osd.target.wants/ceph-osd@0.service.
[master1-admin][WARNIN] command_check_call: Running command: /usr/bin/systemctl enable ceph-osd@0 --runtime
[master1-admin][WARNIN] Created symlink from /run/systemd/system/ceph-osd.target.wants/ceph-osd@0.service to /usr/lib/systemd/system/ceph-osd@.service.
[master1-admin][WARNIN] command_check_call: Running command: /usr/bin/systemctl start ceph-osd@0
[master1-admin][INFO  ] checking OSD status...
[master1-admin][DEBUG ] find the location of an executable
[master1-admin][INFO  ] Running command: /bin/ceph --cluster=ceph osd stat --format=json
[master1-admin][INFO  ] Running command: systemctl enable ceph.target

[root@master1-admin ceph]# ceph-deploy osd activate node1-monitor:/dev/sdb1
[root@master1-admin ceph]# ceph-deploy osd activate node2-osd:/dev/sdb1

#查看状态:
[root@ master1-admin ceph]# ceph-deploy osd list master1-admin node1-monitor node2-osd
[root@master1-admin ceph]# ceph-deploy osd list master1-admin node1-monitor node2-osd
[ceph_deploy.conf][DEBUG ] found configuration file at: /root/.cephdeploy.conf
[ceph_deploy.cli][INFO  ] Invoked (1.5.39): /usr/bin/ceph-deploy osd list master1-admin node1-monitor node2-osd
[ceph_deploy.cli][INFO  ] ceph-deploy options:
[ceph_deploy.cli][INFO  ]  username                      : None
[ceph_deploy.cli][INFO  ]  verbose                       : False
[ceph_deploy.cli][INFO  ]  overwrite_conf                : False
[ceph_deploy.cli][INFO  ]  subcommand                    : list
[ceph_deploy.cli][INFO  ]  quiet                         : False
[ceph_deploy.cli][INFO  ]  cd_conf                       : <ceph_deploy.conf.cephdeploy.Conf instance at 0x7ff5cb7527a0>
[ceph_deploy.cli][INFO  ]  cluster                       : ceph
[ceph_deploy.cli][INFO  ]  func                          : <function osd at 0x7ff5cb7a2578>
[ceph_deploy.cli][INFO  ]  ceph_conf                     : None
[ceph_deploy.cli][INFO  ]  default_release               : False
[ceph_deploy.cli][INFO  ]  disk                          : [('master1-admin', None, None), ('node1-monitor', None, None), ('node2-osd', None, None)]
[master1-admin][DEBUG ] connected to host: master1-admin
[master1-admin][DEBUG ] detect platform information from remote host
[master1-admin][DEBUG ] detect machine type
[master1-admin][DEBUG ] find the location of an executable
[master1-admin][DEBUG ] find the location of an executable
[master1-admin][INFO  ] Running command: /bin/ceph --cluster=ceph osd tree --format=json
[master1-admin][DEBUG ] connected to host: master1-admin
[master1-admin][DEBUG ] detect platform information from remote host
[master1-admin][DEBUG ] detect machine type
[master1-admin][DEBUG ] find the location of an executable
[master1-admin][INFO  ] Running command: /usr/sbin/ceph-disk list
[master1-admin][INFO  ] ----------------------------------------
[master1-admin][INFO  ] ceph-0
[master1-admin][INFO  ] ----------------------------------------
[master1-admin][INFO  ] Path           /var/lib/ceph/osd/ceph-0
[master1-admin][INFO  ] ID             0
[master1-admin][INFO  ] Name           osd.0
[master1-admin][INFO  ] Status         up
[master1-admin][INFO  ] Reweight       1.0
[master1-admin][INFO  ] Magic          ceph osd volume v026
[master1-admin][INFO  ] Journal_uuid   4efda1b4-e440-4a83-8303-07b7eb1563ec
[master1-admin][INFO  ] Active         ok
[master1-admin][INFO  ] Device         /dev/sdb1
[master1-admin][INFO  ] Whoami         0
[master1-admin][INFO  ] Journal path   /dev/sdb2
[master1-admin][INFO  ] ----------------------------------------
[node1-monitor][DEBUG ] connected to host: node1-monitor
[node1-monitor][DEBUG ] detect platform information from remote host
[node1-monitor][DEBUG ] detect machine type
[node1-monitor][DEBUG ] find the location of an executable
[node1-monitor][INFO  ] Running command: /usr/sbin/ceph-disk list
[node1-monitor][INFO  ] ----------------------------------------
[node1-monitor][INFO  ] ceph-1
[node1-monitor][INFO  ] ----------------------------------------
[node1-monitor][INFO  ] Path           /var/lib/ceph/osd/ceph-1
[node1-monitor][INFO  ] ID             1
[node1-monitor][INFO  ] Name           osd.1
[node1-monitor][INFO  ] Status         up
[node1-monitor][INFO  ] Reweight       1.0
[node1-monitor][INFO  ] Magic          ceph osd volume v026
[node1-monitor][INFO  ] Journal_uuid   74164538-fadc-429c-9ec0-b36b284dade2
[node1-monitor][INFO  ] Active         ok
[node1-monitor][INFO  ] Device         /dev/sdb1
[node1-monitor][INFO  ] Whoami         1
[node1-monitor][INFO  ] Journal path   /dev/sdb2
[node1-monitor][INFO  ] ----------------------------------------
[node2-osd][DEBUG ] connected to host: node2-osd
[node2-osd][DEBUG ] detect platform information from remote host
[node2-osd][DEBUG ] detect machine type
[node2-osd][DEBUG ] find the location of an executable
[node2-osd][INFO  ] Running command: /usr/sbin/ceph-disk list
[node2-osd][INFO  ] ----------------------------------------
[node2-osd][INFO  ] ceph-2
[node2-osd][INFO  ] ----------------------------------------
[node2-osd][INFO  ] Path           /var/lib/ceph/osd/ceph-2
[node2-osd][INFO  ] ID             2
[node2-osd][INFO  ] Name           osd.2
[node2-osd][INFO  ] Status         up
[node2-osd][INFO  ] Reweight       1.0
[node2-osd][INFO  ] Magic          ceph osd volume v026
[node2-osd][INFO  ] Journal_uuid   338599c8-058d-4a16-b375-02cba2f83ca5
[node2-osd][INFO  ] Active         ok
[node2-osd][INFO  ] Device         /dev/sdb1
[node2-osd][INFO  ] Whoami         2
[node2-osd][INFO  ] Journal path   /dev/sdb2
[node2-osd][INFO  ] ----------------------------------------

#查看fdisk -l
Disk /dev/sdb: 64.4 GB, 64424509440 bytes, 125829120 sectors
Units = sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disk label type: gpt
Disk identifier: 1CBBE73E-8BAE-4A37-BC58-BDF9D4D730E5


#         Start          End    Size  Type            Name
 1     10487808    125829086     55G  Ceph OSD        ceph data
 2         2048     10487807      5G  Ceph Journal    ceph journal

#要使用 Ceph 文件系统,你的 Ceph 的存储集群里至少需要存在一个 Ceph 的元数据服务器(mds)。
[root@master1-admin ceph]# ceph-deploy mds create master1-admin node1-monitor node2-osd
[ceph_deploy.conf][DEBUG ] found configuration file at: /root/.cephdeploy.conf
[ceph_deploy.cli][INFO  ] Invoked (1.5.39): /usr/bin/ceph-deploy mds create master1-admin node1-monitor node2-osd
[ceph_deploy.cli][INFO  ] ceph-deploy options:
[ceph_deploy.cli][INFO  ]  username                      : None
[ceph_deploy.cli][INFO  ]  verbose                       : False
[ceph_deploy.cli][INFO  ]  overwrite_conf                : False
[ceph_deploy.cli][INFO  ]  subcommand                    : create
[ceph_deploy.cli][INFO  ]  quiet                         : False
[ceph_deploy.cli][INFO  ]  cd_conf                       : <ceph_deploy.conf.cephdeploy.Conf instance at 0x7f9492395998>
[ceph_deploy.cli][INFO  ]  cluster                       : ceph
[ceph_deploy.cli][INFO  ]  func                          : <function mds at 0x7f9492369c80>
[ceph_deploy.cli][INFO  ]  ceph_conf                     : None
[ceph_deploy.cli][INFO  ]  mds                           : [('master1-admin', 'master1-admin'), ('node1-monitor', 'node1-monitor'), ('node2-osd', 'node2-osd')]
[ceph_deploy.cli][INFO  ]  default_release               : False
[ceph_deploy.mds][DEBUG ] Deploying mds, cluster ceph hosts master1-admin:master1-admin node1-monitor:node1-monitor node2-osd:node2-osd
[master1-admin][DEBUG ] connected to host: master1-admin
[master1-admin][DEBUG ] detect platform information from remote host
[master1-admin][DEBUG ] detect machine type
[ceph_deploy.mds][INFO  ] Distro info: CentOS Linux 7.9.2009 Core
[ceph_deploy.mds][DEBUG ] remote host will use systemd
[ceph_deploy.mds][DEBUG ] deploying mds bootstrap to master1-admin
[master1-admin][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf
[master1-admin][DEBUG ] create path if it doesn't exist
[master1-admin][INFO  ] Running command: ceph --cluster ceph --name client.bootstrap-mds --keyring /var/lib/ceph/bootstrap-mds/ceph.keyring auth get-or-create mds.master1-admin osd allow rwx mds allow mon allow profile mds -o /var/lib/ceph/mds/ceph-master1-admin/keyring
[master1-admin][INFO  ] Running command: systemctl enable ceph-mds@master1-admin
[master1-admin][WARNIN] Created symlink from /etc/systemd/system/ceph-mds.target.wants/ceph-mds@master1-admin.service to /usr/lib/systemd/system/ceph-mds@.service.
[master1-admin][INFO  ] Running command: systemctl start ceph-mds@master1-admin
[master1-admin][INFO  ] Running command: systemctl enable ceph.target
[node1-monitor][DEBUG ] connected to host: node1-monitor
[node1-monitor][DEBUG ] detect platform information from remote host
[node1-monitor][DEBUG ] detect machine type
[ceph_deploy.mds][INFO  ] Distro info: CentOS Linux 7.9.2009 Core
[ceph_deploy.mds][DEBUG ] remote host will use systemd
[ceph_deploy.mds][DEBUG ] deploying mds bootstrap to node1-monitor
[node1-monitor][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf
[node1-monitor][DEBUG ] create path if it doesn't exist
[node1-monitor][INFO  ] Running command: ceph --cluster ceph --name client.bootstrap-mds --keyring /var/lib/ceph/bootstrap-mds/ceph.keyring auth get-or-create mds.node1-monitor osd allow rwx mds allow mon allow profile mds -o /var/lib/ceph/mds/ceph-node1-monitor/keyring
[node1-monitor][INFO  ] Running command: systemctl enable ceph-mds@node1-monitor
[node1-monitor][WARNIN] Created symlink from /etc/systemd/system/ceph-mds.target.wants/ceph-mds@node1-monitor.service to /usr/lib/systemd/system/ceph-mds@.service.
[node1-monitor][INFO  ] Running command: systemctl start ceph-mds@node1-monitor
[node1-monitor][INFO  ] Running command: systemctl enable ceph.target
[node2-osd][DEBUG ] connected to host: node2-osd
[node2-osd][DEBUG ] detect platform information from remote host
[node2-osd][DEBUG ] detect machine type
[ceph_deploy.mds][INFO  ] Distro info: CentOS Linux 7.9.2009 Core
[ceph_deploy.mds][DEBUG ] remote host will use systemd
[ceph_deploy.mds][DEBUG ] deploying mds bootstrap to node2-osd
[node2-osd][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf
[node2-osd][DEBUG ] create path if it doesn't exist
[node2-osd][INFO  ] Running command: ceph --cluster ceph --name client.bootstrap-mds --keyring /var/lib/ceph/bootstrap-mds/ceph.keyring auth get-or-create mds.node2-osd osd allow rwx mds allow mon allow profile mds -o /var/lib/ceph/mds/ceph-node2-osd/keyring
[node2-osd][INFO  ] Running command: systemctl enable ceph-mds@node2-osd
[node2-osd][WARNIN] Created symlink from /etc/systemd/system/ceph-mds.target.wants/ceph-mds@node2-osd.service to /usr/lib/systemd/system/ceph-mds@.service.
[node2-osd][INFO  ] Running command: systemctl start ceph-mds@node2-osd
[node2-osd][INFO  ] Running command: systemctl enable ceph.target

[root@master1-admin ceph]# ceph fs ls
No filesystems enabled

#一个 cephfs 至少要求两个 librados 存储池,一个为 data,一个为 metadata。当配置这两个存储池时,注意:
1. 为 metadata pool 设置较高级别的副本级别,因为 metadata 的损坏可能导致整个文件系统不用
2. 建议,metadata pool 使用低延时存储,比如 SSD,因为 metadata 会直接影响客户端的响应速度。

#创建存储池
[root@ master1-admin ceph]# ceph osd pool create cephfs_data 128
pool 'cephfs_data' created
[root@ master1-admin ceph]# ceph osd pool create cephfs_metadata 128
pool 'cephfs_metadata' created
关于创建存储池
确定 pg_num 取值是强制性的,因为不能自动计算。下面是几个常用的值:
*少于 5 个 OSD 时可把 pg_num 设置为 128
*OSD 数量在 5 到 10 个时,可把 pg_num 设置为 512
*OSD 数量在 10 到 50 个时,可把 pg_num 设置为 4096
*OSD 数量大于 50 时,你得理解权衡方法、以及如何自己计算 pg_num 取值
*自己计算 pg_num 取值时可借助 pgcalc 工具
随着 OSD 数量的增加,正确的 pg_num 取值变得更加重要,因为它显著地影响着集群的行为、以及出错时的数据持久性(即灾难性事件导致数据丢失的概率)。


A#创建文件系统
创建好存储池后,你就可以用 fs new 命令创建文件系统了
[root@ master1-admin ceph]# ceph fs new xianchao cephfs_metadata cephfs_data
new fs with metadata pool 2 and data pool 1
其中:new 后的 fsname 可自定义
[root@ master1-admin ceph]# ceph fs ls #查看创建后的 cephfs
[root@master1-admin ceph]# ceph mds stat #查看 mds 节点状态
e7: 1/1/1 up {0=node2-osd=up:active}, 2 up:standby

[root@master1-admin ceph]# ceph -s
    cluster 2bd10cba-1485-4395-9755-60ee8ca71fae
     health HEALTH_OK
     monmap e1: 3 mons at {master1-admin=192.168.40.200:6789/0,node1-monitor=192.168.40.201:6789/0,node2-osd=192.168.40.202:6789/0}
            election epoch 10, quorum 0,1,2 master1-admin,node1-monitor,node2-osd
      fsmap e7: 1/1/1 up {0=node2-osd=up:active}, 2 up:standby
     osdmap e20: 3 osds: 3 up, 3 in
            flags sortbitwise,require_jewel_osds
      pgmap v54: 320 pgs, 3 pools, 2068 bytes data, 20 objects
            326 MB used, 164 GB / 164 GB avail
                 320 active+clean
HEALTH_OK 表示 ceph 集群正常

 

测试 k8s 挂载 ceph rbd

[root@xianchaomaster1 ~]# kubectl get nodes
NAME              STATUS   ROLES                  AGE   VERSION
xianchaomaster1   Ready    control-plane,master   39d   v1.20.6
xianchaonode1     Ready    worker                 39d   v1.20.6
xianchaonode2     Ready    worker                 39d   v1.20.6
# kubernetes 要想使用 ceph,需要在 k8s 的每个 node 节点安装 ceph-common,把 ceph节点上的 ceph.repo 文件拷贝到 k8s 各个节点/etc/yum.repos.d/目录下,然后在 k8s 的各个节点 yum install ceph-common -y
[root@master1-admin yum.repos.d]# scp ceph.repo 192.168.40.180:/etc/yum.repos.d/
[root@master1-admin yum.repos.d]# scp ceph.repo 192.168.40.181:/etc/yum.repos.d/
[root@master1-admin yum.repos.d]# scp ceph.repo 192.168.40.182:/etc/yum.repos.d/

xianchaomaster1:yum install ceph-common -y
xianchaonode1:yum install ceph-common -y
xianchaonode2:yum install ceph-common -y

#将 ceph 配置文件拷贝到 k8s 的控制节点和工作节点
[root@master1-admin ~]# scp /etc/ceph/* 192.168.40.180:/etc/ceph/
[root@master1-admin ~]# scp /etc/ceph/* 192.168.40.181:/etc/ceph/
[root@master1-admin ~]# scp /etc/ceph/* 192.168.40.182:/etc/ceph/

#创建 ceph rbd
[root@master1-admin ~]# ceph osd pool create k8srbd1 56
pool 'k8srbd' created
[root@master1-admin ~]# rbd create rbda -s 1024 -p k8srbd1

# 禁用一些没用的特性
[root@master1-admin ~]# rbd feature disable k8srbd1/rbda object-map fast-diff deep-flatten
#创建 pod,挂载 ceph rbd
#把 nginx.tar.gz 上传到 xianchaonode1 上,手动解压
[root@xianchaonode1 ~]# docker load -i nginx.tar.gz
[root@xianchaonode2 ~]# docker load -i nginx.tar.gz

[root@xianchaomaster1 ~]# vim pod.yaml
apiVersion: v1
kind: Pod
metadata:
  name: testrbd
spec:
  containers:
    - image: nginx
      name: nginx
      imagePullPolicy: IfNotPresent
      volumeMounts:
      - name: testrbd
        mountPath: /mnt
  volumes:
    - name: testrbd
      rbd:
        monitors:
        - '192.168.40.201:6789'
        - '192.168.40.200:6789'
        - '192.168.40.202:6789'
        pool: k8srbd1
        image: rbda
        fsType: xfs
        readOnly: false
        user: admin
        keyring: /etc/ceph/ceph.client.admin.keyring

#更新资源清单文件
[root@xianchaomaster1 ~]# kubectl apply -f pod.yaml
#查看 pod 是否创建成功
[root@xianchaomaster1 ceph]# kubectl get pods -o wide | grep test
testrbd                                          1/1     Running                 0          55s   10.244.121.26    xianchaonode1   <none>           <none>

#注意: k8srbd1 下的 rbda 被 pod 挂载了,那其他 pod 就不能占用这个 k8srbd1 下的rbda 了
例:创建一个 pod-1.yaml
[root@xianchaomaster1 ~]# cat pod-1.yaml 
apiVersion: v1
kind: Pod
metadata:
  name: testrbd1
spec:
  containers:
    - image: nginx
      name: nginx
      imagePullPolicy: IfNotPresent
      volumeMounts:
      - name: testrbd
        mountPath: /mnt
  volumes:
    - name: testrbd
      rbd:
        monitors:
        - '192.168.40.201:6789'
        - '192.168.40.200:6789'
        - '192.168.40.202:6789'
        pool: k8srbd1
        image: rbda1
        fsType: xfs
        readOnly: false
        user: admin
        keyring: /etc/ceph/ceph.client.admin.keyring

[root@xianchaomaster1 ceph]# kubectl get pods -o wide | grep test
testrbd                                          1/1     Running             0          3m48s   10.244.121.26    xianchaonode1   <none>           <none>
testrbd1                                         0/1     ContainerCreating   0          56s     <none>           xianchaonode1   <none>           <none>

 

基于 ceph rbd 生成 pv 

#1、创建 ceph-secret 这个 k8s secret 对象,这个 secret 对象用于 k8s volume 插件访问ceph 集群,获取 client.admin 的 keyring 值,
#并用 base64 编码,在 master1-admin(ceph管理节点)操作
[root@master1-admin ~]# ceph auth get-key client.admin | base64
QVFCZ0hWSmtGbklUSkJBQUNYWUMzellicW83a2JXL0hFRGRlYmc9PQ==

#2.创建 ceph 的 secret,在 k8s 的控制节点操作:
[root@xianchaomaster1 ceph]# cat ceph-secret.yaml
apiVersion: v1
kind: Secret
metadata:
  name: ceph-secret
data:
  key: QVFCZ0hWSmtGbklUSkJBQUNYWUMzellicW83a2JXL0hFRGRlYmc9PQ==

[root@xianchaomaster1 ceph]# kubectl apply -f ceph-secret.yaml
secret/ceph-secret created


#3.回到 ceph 管理节点创建 pool 池
[root@master1-admin ~]# ceph osd pool create k8stest 56
pool 'k8stest' created
[root@master1-admin ~]# rbd create rbda -s 1024 -p k8stest
[root@master1-admin ~]# rbd feature disable k8stest/rbda object-map fast-diff deep-flatten

#4、创建 pv
[root@xianchaomaster1 ceph]# cat pv.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
  name: ceph-pv
spec:
   capacity:
     storage: 1Gi
   accessModes:
   - ReadWriteOnce
   rbd:
         monitors:
         - '192.168.40.201:6789'
         - '192.168.40.200:6789'
         - '192.168.40.202:6789'
         pool: k8stest
         image: rbda
         user: admin
         secretRef:
             name: ceph-secret
         fsType: xfs
         readOnly: false
   persistentVolumeReclaimPolicy: Recycle

[root@xianchaomaster1 ceph]#  kubectl apply -f pv.yaml
persistentvolume/ceph-pv created

[root@xianchaomaster1 ceph]#  kubectl get pv | grep ceph
ceph-pv                             1Gi        RWO            Recycle          Available                                                                        15s

#5、创建 pvc
[root@xianchaomaster1 ~]# cat pvc.yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
  name: ceph-pvc
spec:
  accessModes:
  - ReadWriteOnce
  resources:
   requests:
    storage: 1Gi

[root@xianchaomaster1 ceph]# kubectl apply -f pvc.yaml
persistentvolumeclaim/ceph-pvc created
[root@xianchaomaster1 ceph]# kubectl get pvc | grep ceph
ceph-pvc                             Bound    ceph-pv                             1Gi        RWO                           4s

#6、测试挂载 pvc
[root@xianchaomaster1 ceph]# cat pod-2.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-deployment
spec:
  selector:
    matchLabels:
     app: nginx
  replicas: 2 # tells deployment to run 2 pods matching the template
  template: # create pods using pod definition in this template
    metadata:
      labels:
        app: nginx
    spec:
      containers:
      - name: nginx
        image: nginx
        imagePullPolicy: IfNotPresent
        ports:
        - containerPort: 80
        volumeMounts:
          - mountPath: "/ceph-data"
            name: ceph-data
      volumes:
      - name: ceph-data
        persistentVolumeClaim:
            claimName: ceph-pvc

[root@xianchaomaster1 ceph]#  kubectl apply -f pod-2.yaml
deployment.apps/nginx-deployment created
[root@xianchaomaster1 ceph]#  kubectl get pods -l app=nginx
NAME                               READY   STATUS    RESTARTS   AGE
nginx-deployment-fdff5b9c8-9gr8j   1/1     Running   0          4s
nginx-deployment-fdff5b9c8-czlzt   1/1     Running   0          4s

#pod可以以 ReadWriteOnce 共享挂载相同的 pvc的
[root@xianchaomaster1 ceph]# cat pod-3.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-deployment-pod3
spec:
  selector:
    matchLabels:
     app: nginxv1
  replicas: 2 # tells deployment to run 2 pods matching the template
  template: # create pods using pod definition in this template
    metadata:
      labels:
        app: nginxv1
    spec:
      containers:
      - name: nginx
        image: nginx
        imagePullPolicy: IfNotPresent
        ports:
        - containerPort: 80
        volumeMounts:
          - mountPath: "/ceph-data"
            name: ceph-data
      volumes:
      - name: ceph-data
        persistentVolumeClaim:
            claimName: ceph-pvc

[root@xianchaomaster1 ceph]# kubectl apply -f pod-3.yaml
deployment.apps/nginx-deployment-pod3 created

[root@xianchaomaster1 ceph]# kubectl get pods -l app=nginxv1
NAME                                     READY   STATUS    RESTARTS   AGE
nginx-deployment-pod3-5b7b6f7455-b4bzl   1/1     Running   0          13s
nginx-deployment-pod3-5b7b6f7455-p8k5k   1/1     Running   0          13s
【注意】:ceph rbd 块存储的特点
ceph rbd 块存储能在同一个 node 上跨 pod 以 ReadWriteOnce 共享挂载
ceph rbd 块存储能在同一个 node 上同一个 pod 多个容器中以 ReadWriteOnce 共享挂载
ceph rbd 块存储不能跨 node 以 ReadWriteOnce 共享挂载
如果一个使用ceph rdb 的pod所在的node挂掉,这个pod虽然会被调度到其它node,但是由于 rbd 不能跨 node 多次挂载和挂掉的 pod 不能自动解绑 pv 的问题,这个新pod 不会正常运行

Deployment 更新特性:
deployment 触发更新的时候,它确保至少所需 Pods 75% 处于运行状态(最大不可用比例为 25%)。故像一个 pod 的情况,肯定是新创建一个新的 pod,新 pod 运行正常之
后,再关闭老的 pod。
默认情况下,它可确保启动的 Pod 个数比期望个数最多多出 25%

问题:
结合 ceph rbd 共享挂载的特性和 deployment 更新的特性,我们发现原因如下:
由于 deployment 触发更新,为了保证服务的可用性,deployment 要先创建一个 pod
并运行正常之后,再去删除老 pod。而如果新创建的 pod 和老 pod 不在一个 node,就会导致此故障。
解决办法:
1,使用能支持跨 node 和 pod 之间挂载的共享存储,例如 cephfs,GlusterFS 等
2,给 node 添加 label,只允许 deployment 所管理的 pod 调度到一个固定的 node 上。(不建议,这个 node 挂掉的话,服务就故障了)

 基于 storageclass 动态生成 pv

#1、创建 rbd 的供应商 provisioner
#把 rbd-provisioner.tar.gz 上传到 xianchaonode1 上,手动解压
[root@xianchaonode1 ~]# docker load -i rbd-provisioner.tar.gz
[root@xianchaonode2 ~]# docker load -i rbd-provisioner.tar.gz

[root@xianchaomaster1 ceph]# cat rbd-provisioner.yaml
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: rbd-provisioner
rules:
  - apiGroups: [""]
    resources: ["persistentvolumes"]
    verbs: ["get", "list", "watch", "create", "delete"]
  - apiGroups: [""]
    resources: ["persistentvolumeclaims"]
    verbs: ["get", "list", "watch", "update"]
  - apiGroups: ["storage.k8s.io"]
    resources: ["storageclasses"]
    verbs: ["get", "list", "watch"]
  - apiGroups: [""]
    resources: ["events"]
    verbs: ["create", "update", "patch"]
  - apiGroups: [""]
    resources: ["services"]
    resourceNames: ["kube-dns","coredns"]
    verbs: ["list", "get"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: rbd-provisioner
subjects:
  - kind: ServiceAccount
    name: rbd-provisioner
    namespace: default
roleRef:
  kind: ClusterRole
  name: rbd-provisioner
  apiGroup: rbac.authorization.k8s.io
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
  name: rbd-provisioner
rules:
- apiGroups: [""]
  resources: ["secrets"]
  verbs: ["get"]
- apiGroups: [""]
  resources: ["endpoints"]
  verbs: ["get", "list", "watch", "create", "update", "patch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
  name: rbd-provisioner
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: Role
  name: rbd-provisioner
subjects:
- kind: ServiceAccount
  name: rbd-provisioner
  namespace: default
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: rbd-provisioner
spec:
  selector:
    matchLabels:
      app: rbd-provisioner
  replicas: 1
  strategy:
    type: Recreate
  template:
    metadata:
      labels:
        app: rbd-provisioner
    spec:
      containers:
      - name: rbd-provisioner
        image: quay.io/xianchao/external_storage/rbd-provisioner:v1
        imagePullPolicy: IfNotPresent
        volumeMounts:
        - name: ceph-conf
          mountPath: /etc/ceph
        env:
        - name: PROVISIONER_NAME
          value: ceph.com/rbd
      serviceAccount: rbd-provisioner
      volumes:
      - name: ceph-conf
        hostPath:
          path: /etc/ceph
---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: rbd-provisioner

[root@xianchaomaster1 ceph]#  kubectl apply -f rbd-provisioner.yaml
clusterrole.rbac.authorization.k8s.io/rbd-provisioner created
clusterrolebinding.rbac.authorization.k8s.io/rbd-provisioner created
role.rbac.authorization.k8s.io/rbd-provisioner created
rolebinding.rbac.authorization.k8s.io/rbd-provisioner created
deployment.apps/rbd-provisioner created
serviceaccount/rbd-provisioner created
[root@xianchaomaster1 ceph]# kubectl get pods -l app=rbd-provisioner
NAME                               READY   STATUS    RESTARTS   AGE
rbd-provisioner-685746688f-hwvx8   1/1     Running   0          11s

#2、创建 ceph-secret 、创建 pool 池
[root@xianchaomaster1 ceph]# ceph osd pool create k8stest1 56
pool 'k8stest1' created

[root@xianchaomaster1 ceph]# cat ceph-secret-1.yaml
apiVersion: v1
kind: Secret
metadata:
  name: ceph-secret-1
type: "ceph.com/rbd"
data:
  key: QVFCdkJkWmdzRFNaS3hBQWFuKzVybnNqcjJ6aUEvYXRxRm5RT0E9PQ==

[root@xianchaomaster1 ceph]# kubectl apply -f ceph-secret-1.yaml
secret/ceph-secret-1 created

[root@xianchaomaster1 ceph]# kubectl get secret | grep ceph
ceph-secret                        Opaque                                1      28m
ceph-secret-1                      ceph.com/rbd                          1      22s

#3、创建 storageclass
[root@xianchaomaster1 ceph]# cat storageclass.yaml
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: k8s-rbd
provisioner: ceph.com/rbd
parameters:
  monitors: 192.168.40.201:6789,192.168.40.200:6789,192.168.40.202:6789
  adminId: admin
  adminSecretName: ceph-secret-1
  pool: k8stest1
  userId: admin
  userSecretName: ceph-secret-1
  fsType: xfs
  imageFormat: "2"
  imageFeatures: "layering"

[root@xianchaomaster1 ceph]# kubectl apply -f storageclass.yaml
storageclass.storage.k8s.io/k8s-rbd created
[root@xianchaomaster1 ceph]# kubectl get storageclass
NAME      PROVISIONER    RECLAIMPOLICY   VOLUMEBINDINGMODE   ALLOWVOLUMEEXPANSION   AGE
k8s-rbd   ceph.com/rbd   Delete          Immediate           false                  8m14s

[root@xianchaomaster1 ceph]# kubectl get pods
NAME                                             READY   STATUS                  RESTARTS   AGE
details-v1-65bbfd4f58-2kvrt                      1/2     Running                 8          23d
myapp-777f4ccb8c-g4t85                           1/1     Running                 4          14d
myserver-consumer-deployment-cf88bd494-jgv4w     1/1     Running                 2          10d
myserver-dubboadmin-deployment-fb59448b8-9jfwq   1/1     Running                 2          10d
myserver-provider-deployment-54f5fdc479-lvzs8    1/1     Running                 10         10d
mysql-0                                          2/3     Running                 12         24d
mysql-1                                          0/3     Init:CrashLoopBackOff   23         23d
nginx-deployment-fdff5b9c8-9gr8j                 1/1     Running                 0          32m
nginx-deployment-fdff5b9c8-czlzt                 1/1     Running                 0          32m
nginx-deployment-pod3-5b7b6f7455-b4bzl           1/1     Running                 0          29m
nginx-deployment-pod3-5b7b6f7455-p8k5k           1/1     Running                 0          29m
nginx-v1-79bc94ff97-2gsrs                        1/1     Running                 4          14d
nginx-v2-5f885975d5-7wvlk                        1/1     Running                 4          14d
productpage-v1-6b746f74dc-dnqr9                  1/2     Running                 8          24d
ratings-v1-b45758b-vklgv                         1/2     Running                 8          23d
rbd-provisioner-685746688f-hwvx8                 1/1     Running                 0          18m
reviews-v1-74894b48c8-2w9vw                      1/1     Running                 0          19m
reviews-v1-74894b48c8-mvzhd                      0/2     Evicted                 0          24d
reviews-v2-f649764d-s7jmf                        1/1     Running                 0          19m
reviews-v2-f649764d-zb9b9                        0/2     Evicted                 0          24d
reviews-v3-6c675c6774-c2qc2                      0/2     Evicted                 0          24d
reviews-v3-6c675c6774-vrdrx                      1/1     Running                 0          19m
sleep-557747455f-5kprc                           1/2     Running                 8          23d
testrbd                                          1/1     Running                 0          47m
testrbd1                                         0/1     ContainerCreating       0          45m
zookeeper1-5d9b4645f8-vd5lk                      1/1     Running                 2          10d
zookeeper2-9686b95bf-ghpzm                       1/1     Running                 2          10d
zookeeper3-66f55c8548-fk4ww                      1/1     Running                 2          10d
[root@xianchaomaster1 ceph]# kubectl logs -f rbd-provisioner-685746688f-hwvx8
I0503 12:18:08.709990       1 main.go:85] Creating RBD provisioner ceph.com/rbd with identity: ceph.com/rbd
I0503 12:18:08.710701       1 leaderelection.go:185] attempting to acquire leader lease  default/ceph.com-rbd...
E0503 12:18:08.715278       1 event.go:259] Could not construct reference to: '&v1.Endpoints{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ObjectMeta:v1.ObjectMeta{Name:"ceph.com-rbd", GenerateName:"", Namespace:"default", SelfLink:"", UID:"071c941e-c14f-400a-a034-7be8024ce713", ResourceVersion:"4953792", Generation:0, CreationTimestamp:v1.Time{Time:time.Time{wall:0x0, ext:63818713088, loc:(*time.Location)(0x1bc94e0)}}, DeletionTimestamp:(*v1.Time)(nil), DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string(nil), Annotations:map[string]string{"control-plane.alpha.kubernetes.io/leader":"{\"holderIdentity\":\"rbd-provisioner-685746688f-hwvx8_908c22f1-e9ac-11ed-a80d-96ca89ff6a76\",\"leaseDurationSeconds\":15,\"acquireTime\":\"2023-05-03T12:18:08Z\",\"renewTime\":\"2023-05-03T12:18:08Z\",\"leaderTransitions\":0}"}, OwnerReferences:[]v1.OwnerReference(nil), Initializers:(*v1.Initializers)(nil), Finalizers:[]string(nil), ClusterName:""}, Subsets:[]v1.EndpointSubset(nil)}' due to: 'selfLink was empty, can't make reference'. Will not report event: 'Normal' 'LeaderElection' 'rbd-provisioner-685746688f-hwvx8_908c22f1-e9ac-11ed-a80d-96ca89ff6a76 became leader'
I0503 12:18:08.715325       1 leaderelection.go:194] successfully acquired lease default/ceph.com-rbd
I0503 12:18:08.715347       1 controller.go:631] Starting provisioner controller ceph.com/rbd_rbd-provisioner-685746688f-hwvx8_908c22f1-e9ac-11ed-a80d-96ca89ff6a76!
I0503 12:18:08.815639       1 controller.go:680] Started provisioner controller ceph.com/rbd_rbd-provisioner-685746688f-hwvx8_908c22f1-e9ac-11ed-a80d-96ca89ff6a76!

注意:
k8s1.20 版本通过 rbd provisioner 动态生成 pv 会报错:
[root@xianchaomaster1 ~]# kubectl logs rbd-provisioner-685746688f-8mbz
E0418 15:50:09.610071 1 controller.go:1004] provision "default/rbd-pvc" class 
"k8s-rbd": unexpected error getting claim reference: selfLink was empty, can't make 
reference,报错原因是 1.20 版本仅用了 selfLink,解决方法如下;
编辑/etc/kubernetes/manifests/kube-apiserver.yaml
在这里:
spec:
 containers:
 - command:
 - kube-apiserver
添加这一行:
- --feature-gates=RemoveSelfLink=false
===
spec:
  containers:
  - command:
    - kube-apiserver
    - --feature-gates=RemoveSelfLink=false
===
[root@xianchaomaster1 ~]# kubectl apply -f /etc/kubernetes/manifests/kube-apiserver.yaml
[root@xianchaomaster1 ~]# kubectl get pods -n kube-system

#4、创建 pvc
[root@xianchaomaster1 ceph]# cat rbd-pvc.yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
  name: rbd-pvc
spec:
  accessModes:
    - ReadWriteOnce
  volumeMode: Filesystem
  resources:
    requests:
      storage: 1Gi
  storageClassName: k8s-rbd

[root@xianchaomaster1 ceph]# kubectl apply -f rbd-pvc.yaml

#Pending一直报错 没法解决
[root@xianchaomaster1 ceph]# kubectl describe pvc rbd-pvc
Name:          rbd-pvc
Namespace:     default
StorageClass:  k8s-rbd
Status:        Pending
Volume:
Labels:        <none>
Annotations:   volume.beta.kubernetes.io/storage-provisioner: ceph.com/rbd
Finalizers:    [kubernetes.io/pvc-protection]
Capacity:
Access Modes:
VolumeMode:    Filesystem
Used By:       <none>
Events:
  Type     Reason                Age               From                                                                                Messag
  ----     ------                ----              ----                                                                                ------
  Normal   ExternalProvisioning  7s (x3 over 20s)  persistentvolume-controller                                                         waitin or manually created by system administrator
  Normal   Provisioning          4s (x2 over 20s)  ceph.com/rbd_rbd-provisioner-6fcc78b6cc-drjml_afa8dae3-e9bd-11ed-be1c-3e1004e084af  Extern
  Warning  ProvisioningFailed    4s (x2 over 19s)  ceph.com/rbd_rbd-provisioner-6fcc78b6cc-drjml_afa8dae3-e9bd-11ed-be1c-3e1004e084af  failede: exit status 1, command output: rbd: couldn't connect to the cluster!

k8s 挂载 cephfs

1、创建 ceph 子目录

[root@master1-admin ceph]# ceph fs ls
name: xianchao, metadata pool: cephfs_metadata, data pools: [cephfs_data ]
[root@master1-admin ceph]# cat /etc/ceph/ceph.c
ceph.client.admin.keyring  ceph.conf                  ceph.conf.bak
[root@master1-admin ceph]# cat /etc/ceph/ceph.c
ceph.client.admin.keyring  ceph.conf                  ceph.conf.bak
[root@master1-admin ceph]# cat /etc/ceph/ceph.client.admin.keyring
[client.admin]
        key = AQBgHVJkFnITJBAACXYC3zYbqo7kbW/HEDdebg==
        caps mds = "allow *"
        caps mon = "allow *"
        caps osd = "allow *"
[root@master1-admin ceph]# cat /etc/ceph/ceph.client.admin.keyring | grep key
        key = AQBgHVJkFnITJBAACXYC3zYbqo7kbW/HEDdebg==
[root@master1-admin ceph]# cat /etc/ceph/ceph.client.admin.keyring | grep key | awk -F " " '{print $3}'
AQBgHVJkFnITJBAACXYC3zYbqo7kbW/HEDdebg==
You have new mail in /var/spool/mail/root
[root@master1-admin ceph]# cat /etc/ceph/ceph.client.admin.keyring | grep key | awk -F " " '{print $3}' > /etc/ceph/admin.secret

挂载 cephfs 的根目录到集群的 mon 节点下的一个目录,比如 xianchao_data,因为挂载后,
我们就可以直接在 xianchao_data 下面用 Linux 命令创建子目录了。

[root@master1-admin ceph]# cd
[root@master1-admin ~]# mkdir xianchao_data
[root@master1-admin ~]# mount -t ceph 192.168.40.201:6789:/ /root/xianchao_data -o name=admin,secretfile=/etc/ceph/admin.secret
You have new mail in /var/spool/mail/root
[root@master1-admin ~]# df -h
Filesystem                              Size  Used Avail Use% Mounted on
devtmpfs                                2.0G     0  2.0G   0% /dev
tmpfs                                   2.0G     0  2.0G   0% /dev/shm
tmpfs                                   2.0G   12M  2.0G   1% /run
tmpfs                                   2.0G     0  2.0G   0% /sys/fs/cgroup
/dev/mapper/centos_master1--admin-root   50G  3.3G   47G   7% /
/dev/sda1                              1014M  164M  851M  17% /boot
/dev/mapper/centos_master1--admin-home   46G   33M   46G   1% /home
tmpfs                                   394M     0  394M   0% /run/user/0
/dev/sdb1                                55G  120M   55G   1% /var/lib/ceph/osd/ceph-0
192.168.40.201:6789:/                   165G  368M  165G   1% /root/xianchao_data
[root@master1-admin ~]# cd /root/xianchao_data/
[root@master1-admin xianchao_data]# mkdir testfile
[root@master1-admin xianchao_data]# chmod 0777 testfile/
[root@master1-admin xianchao_data]# echo 123 > testtxt.txt
[root@master1-admin xianchao_data]# ll
total 1
drwxrwxrwx 1 root root 0 May  3 22:35 testfile
-rw-r--r-- 1 root root 4 May  3 22:36 testtxt.txt

2、测试 k8s 的 pod 挂载 cephfs

[root@master1-admin xianchao_data]# cat /etc/ceph/ceph.client.admin.keyring
[client.admin]
        key = AQBgHVJkFnITJBAACXYC3zYbqo7kbW/HEDdebg==
        caps mds = "allow *"
        caps mon = "allow *"
        caps osd = "allow *"
[root@master1-admin xianchao_data]# echo "AQBgHVJkFnITJBAACXYC3zYbqo7kbW/HEDdebg==" | base64
QVFCZ0hWSmtGbklUSkJBQUNYWUMzellicW83a2JXL0hFRGRlYmc9PQo=

[root@xianchaomaster1 ceph]# cat cephfs-secret.yaml
apiVersion: v1
kind: Secret
metadata:
  name: cephfs-secret
data:
  key: QVFCZ0hWSmtGbklUSkJBQUNYWUMzellicW83a2JXL0hFRGRlYmc9PQo=

[root@xianchaomaster1 ceph]# kubectl apply -f cephfs-secret.yaml
secret/cephfs-secret created
[root@xianchaomaster1 ceph]# kubectl get secret | grep cephfs-secret
cephfs-secret                      Opaque                                1      23s

[root@xianchaomaster1 ceph]# cat cephfs-pv.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
  name: cephfs-pv
spec:
  capacity:
    storage: 1Gi
  accessModes:
    - ReadWriteMany
  cephfs:
    monitors:
      - 192.168.40.201:6789
    path: /testfile
    user: admin
    readOnly: false
    secretRef:
        name: cephfs-secret
  persistentVolumeReclaimPolicy: Recycle

[root@xianchaomaster1 ceph]# kubectl apply -f cephfs-pv.yaml
persistentvolume/cephfs-pv created
[root@xianchaomaster1 ceph]# kubectl get pv | grep  cephfs-pv
cephfs-pv                           1Gi        RWX            Recycle          Available                                                                        17s
[root@xianchaomaster1 ceph]# cat cephfs-pvc.yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
  name: cephfs-pvc
spec:
  accessModes:
    - ReadWriteMany
  volumeName: cephfs-pv
  resources:
    requests:
      storage: 1Gi
[root@xianchaomaster1 ceph]# kubectl apply -f cephfs-pvc.yaml
persistentvolumeclaim/cephfs-pvc created
[root@xianchaomaster1 ceph]#  kubectl get pvc | grep cephfs-pvc
cephfs-pvc                           Bound     cephfs-pv                           1Gi        RWX                           14s
[root@xianchaomaster1 ceph]# cat cephfs-pod-1.yaml
apiVersion: v1
kind: Pod
metadata:
  name: cephfs-pod-1
spec:
  containers:
    - image: nginx
      name: nginx
      imagePullPolicy: IfNotPresent
      volumeMounts:
      - name: test-v1
        mountPath: /mnt
  volumes:
  - name: test-v1
    persistentVolumeClaim:
      claimName: cephfs-pvc

[root@xianchaomaster1 ceph]#  kubectl apply -f cephfs-pod-1.yaml
pod/cephfs-pod-1 created

[root@xianchaomaster1 ceph]#  cat cephfs-pod-2.yaml
apiVersion: v1
kind: Pod
metadata:
  name: cephfs-pod-2
spec:
  containers:
    - image: nginx
      name: nginx
      imagePullPolicy: IfNotPresent
      volumeMounts:
      - name: test-v1
        mountPath: /mnt
  volumes:
  - name: test-v1
    persistentVolumeClaim:
      claimName: cephfs-pvc
[root@xianchaomaster1 ceph]#  kubectl apply -f cephfs-pod-2.yaml
pod/cephfs-pod-2 created
[root@xianchaomaster1 ceph]#  kubectl exec -it cephfs-pod-1 -- /bin/sh
# cd /mnt
# pwd
/mnt
# touch xks
# exit

[root@xianchaomaster1 ceph]#  kubectl exec -it cephfs-pod-2 -- /bin/sh
# cd /mnt
# touch he-2w^H
# touch he222
# exit

#检验是否写入成功
[root@master1-admin testfile]# pwd
/root/xianchao_data/testfile
[root@master1-admin testfile]# ll
total 0
-rw-r--r-- 1 root root 0 May  3 22:48 he222
-rw-r--r-- 1 root root 0 May  3 22:48 he-2w?
-rw-r--r-- 1 root root 0 May  3 22:48 xks

 

posted @ 2023-05-03 22:53  しみずよしだ  阅读(15)  评论(0)    收藏  举报