ceph集群部署

1.环境准备

服务器配置

四台服务器使用Centos7.6操作系统,admin为管理节点
192.168.30.15 admin
192.168.30.16 storage1
192.168.30.17 storage2
192.168.30.18 storage3

admin节点配置yum仓库配置

vim /etc/yum.repos.d/ceph.repo
[Ceph]
name=Ceph packages for $basearch
baseurl=https://mirrors.aliyun.com/ceph/rpm-mimic/el7/x86_64/
enabled=1
gpgcheck=1
type=rpm-md
gpgkey=https://download.ceph.com/keys/release.asc
priority=1

[Ceph-noarch]
name=Ceph noarch packages
baseurl=https://mirrors.aliyun.com/ceph/rpm-mimic/el7/noarch/
enabled=1
gpgcheck=1
type=rpm-md
gpgkey=https://download.ceph.com/keys/release.asc
priority=1

[ceph-source]
name=Ceph source packages
baseurl=https://mirrors.aliyun.com/ceph/rpm-mimic/el7/SRPMS/
enabled=1
gpgcheck=1
type=rpm-md
gpgkey=https://download.ceph.com/keys/release.asc
priority=1

hosts添加解析

vim /etc/hosts
192.168.30.15 admin
192.168.30.16 storage1
192.168.30.17 storage2
192.168.30.18 storage3

配置ssh免密码登陆

ssh-keygen
ssh-copy-id storage1
ssh-copy-id storage2
ssh-copy-id storage3

2.部署ceph集群

#更新软件源并安装ceph-deploy管理工具
yum clean all && yum list
yum install python-setuptools ceph-deploy -y 
#创建集群配置文件目录
mkdir /etc/ceph && cd /etc/ceph
#初始化monitor节点,准备创建集群
ceph-deploy new admin
#配置ceph.conf配置文件,默认副本数为3,修改副本数改为2
osd_pool_default_size = 2
#安装ceph软件
ceph-deploy install admin storage1 storage2 storage3
#生成monitor检测集群使用的秘钥
ceph-deploy mon create-initial
#分发配置文件到集群每个节点
ceph-deploy admin storage1 storage2 storage3
#配置mgr,用于管理集群
ceph-deploy mgr storage1 storage2 storage3
#使用ceph -s命令进行验证,集群搭建完成
[root@admin ceph]# ceph -s
  cluster:
    id:     eae1fd09-7410-446a-bb50-08717bc335ee
    health: HEALTH_OK
 
  services:
    mon: 1 daemons, quorum admin
    mgr: storage1(active), standbys: storage2, storage3
    osd: 3 osds: 3 up, 3 in
    rgw: 3 daemons active
 
  data:
    pools:   6 pools, 288 pgs
    objects: 221  objects, 2.2 KiB
    usage:   3.0 GiB used, 57 GiB / 60 GiB avail
    pgs:     288 active+clean

#部署rgw用来提供对象存储
ceph-deploy rgw storage1 storage2 storage3

#部署mds用来提供cephfs【暂时未使用到】
ceph-deploy mds create storage1 storage2 storage3

#向集群添加osd
ceph-deploy osd create storage1 --data /dev/sdb
ceph-deploy osd create storage2 --data /dev/sdb
ceph-deploy osd create storage3 --data /dev/sdb

#使用ceph -s命令查看osd状态

 3.挂载cephfs

挂载:
ceph-fuse -m 192.168.30.15:6789 /opt/ -c ceph.client.admin.keyring
卸载:
umount /opt/

 

posted @ 2020-10-09 14:16  luchuangao  阅读(731)  评论(0编辑  收藏  举报