Ceph luminous 安装配置
#环境centos7.4
#Ceph version 12.2.4 luminous (stable)
########################### 以下在所有节点操作
#使用阿里源
mkdir -p /etc/yum.repos.d/remark && mv /etc/yum.repos.d/*.repo /etc/yum.repos.d/remark/ #备份原始源
curl -o /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo
curl -o /etc/yum.repos.d/epel.repo http://mirrors.aliyun.com/repo/epel-7.repo
sed -i '/aliyuncs.com/d' /etc/yum.repos.d/*.repo #删除阿里内网地址
#创建阿里的ceph源
echo '#阿里源
[ceph]
name=ceph
baseurl=http://mirrors.aliyun.com/ceph/rpm-luminous/el7/x86_64/
gpgcheck=0
[ceph-noarch]
name=cephnoarch
baseurl=http://mirrors.aliyun.com/ceph/rpm-luminous/el7/noarch/
gpgcheck=0
[ceph-source]
name=ceph-source
baseurl=http://mirrors.aliyun.com/ceph/rpm-luminous/el7/SRPMS/
gpgcheck=0
#'>/etc/yum.repos.d/ceph.repo
yum clean all && yum makecache #生成缓存
#磁盘(2快磁盘) 物理机 做完raid后 每个节点分了2个盘做 osd使用
# [root@controller1 ~]# lsblk
# sdb 0 1.4T 0 disk
# sdc 0 1.4T 0 disk
###########################
#ceph安装配置
###########################
#
#如果要重新安装 执行下面的 清空软件包及数据
#ceph-deploy purge {ceph-node} [{ceph-node}]
#清空配置,不清除安装包
#ceph-deploy purgedata {ceph-node} [{ceph-node}]
##
#全部在controller1操作
# #Ceph节点简介
# Mon(Montior)监控节点
# OSD(Object Storage Device)存储节点
# mgr 守护进程 从12版本开始必须要创建了
#controller1作为部署节点
#controller1 controller2 controller3作为Mon
#controller{1,2,3} compute{1,2}作为OSD 为了节省资源 我把所有节点的盘都用于创建osd
#安装ceph-deploy配置工具
yum install -y ceph-deploy
#创建配置目录 所有节点执行 创建目录
mkdir /etc/ceph
cd /etc/ceph/
#初始化Mon配置
ceph-deploy new controller{1,2,3}
# #修改冗余份数为2,日志大小2G
echo '
mon_clock_drift_allowed = 2
osd_journal_size = 4086
osd_pool_default_pg_num = 128 #根据公式计算 Total PGs = ((Total_number_of_OSD * Target PGs per OSD(一般是100)) / max_replication_count(备份数量)) / pool_count
osd_pool_default_pgp_num = 128
osd pool default size = 3
osd pool default min size = 1
rbd_default_features = 1
client_quota = true
'>>./ceph.conf
#安装Ceph
#ceph-deploy install controller{1,2,3} compute{1,2}
yum install -y ceph ceph-radosgw #上面实际上是安装这2个rpm 为了安装同步进行 省时间 我每个节点 同时执行安装
#初始化monitor和key
cd /etc/ceph/
ceph-deploy --overwrite-conf mon create-initial
#拷贝配置及密钥
ceph-deploy admin controller{1,2,3} compute{1,2}
chmod 644 /etc/ceph/ceph.client.admin.keyring
#创建存储节点(使用sdb,sdc磁盘)
#创建osd
ceph-deploy osd create --data /dev/sdb controller1
ceph-deploy osd create --data /dev/sdc controller1
ceph-deploy osd create --data /dev/sdb controller2
ceph-deploy osd create --data /dev/sdc controller2
ceph-deploy osd create --data /dev/sdb controller3
ceph-deploy osd create --data /dev/sdc controller3
ceph-deploy osd create --data /dev/sdb compute1
ceph-deploy osd create --data /dev/sdc compute1
ceph-deploy osd create --data /dev/sdb compute2
ceph-deploy osd create --data /dev/sdc compute2
#创建mon
ceph-deploy --overwrite-conf mon create controller1
ceph-deploy --overwrite-conf admin controller1
ceph-deploy --overwrite-conf mon create controller2
ceph-deploy --overwrite-conf admin controller2
ceph-deploy --overwrite-conf mon create controller3
ceph-deploy --overwrite-conf admin controller3
ceph-deploy --overwrite-conf mon create compute1
ceph-deploy --overwrite-conf admin compute1
ceph-deploy --overwrite-conf mon create compute2
ceph-deploy --overwrite-conf admin compute2
#添加mgr
ceph-deploy mgr create controller1
ceph-deploy mgr create controller2
ceph-deploy mgr create controller3
#启用dashboard (在mon节点)
ceph mgr module enable dashboard
#http://ip:7000 访问dashboard
# 设置dashboard的ip和端口 后面设置好 vip之后 会在设置 Ui的访问的
# ceph config-key put mgr/dashboard/server_addr 192.168.0.71
# ceph config-key put mgr/dashboard/server_port 7000
# systemctl restart ceph-mgr@controller1
#创建POOL
ceph osd pool create volumes 128
ceph osd pool create images 128
ceph osd pool create vms 128
##初始化pool
rbd pool init volumes
rbd pool init images
rbd pool init vms
###########################
#查看相关命令
ceph health
ceph -s
ceph osd tree
ceph df
ceph mon stat
ceph osd stat
ceph pg stat
ceph osd lspools
ceph auth list
###########################
#参考
http://docs.ceph.com/docs/master/start/quick-ceph-deploy/