OpenStack之Ceph后端存储
Ceph后端存储
1. 创建后端需要的存储池
- cinder卷的存储池
ceph osd pool create volumes 12
- glance存储池
ceph osd pool create images 12
- 备份存储池
ceph osd pool create backups 12
- 创建实例存储池
ceph osd pool create vms 12
2. 创建后端用户
-
在ceph上创建cinder、glance、cinder-backup、nova用户创建密钥,允许访问使用Ceph存储池
-
切换到ceph目录
cd /etc/ceph/
- 创建用户client.cinder,对volumes存储池有rwx权限,对vms存储池有rwx权限,对images池有rx权限
ceph auth get-or-create client.cinder mon "allow r" osd "allow class-read object_prefix rbd_children,allow rwx pool=volumes,allow rwx pool=vms,allow rx pool=images"
# class-read:x的子集,授予用户调用类读取方法的能力
# object_prefix 通过对象名称前缀。下例将访问限制为任何池中名称仅以 rbd_children 为开头的对象。
- 创建用户client.glance,对images存储池有rwx权限
ceph auth get-or-create client.glance mon "allow r" osd "allow class-read object_prefix rbd_children,allow rwx pool=images"
- 创建用户client.cinder-backup,对backups存储池有rwx权限
ceph auth get-or-create client.cinder-backup mon "profile rbd" osd "profile rbd pool=backups"
# 使用 rbd profile 为新的 cinder-backup 用户帐户定义访问权限。然后,客户端应用使用这一帐户基于块来访问利用了 RADOS 块设备的 Ceph 存储。
- 导出密钥发送到控制节点
ceph auth get client.glance -o ceph.client.glance.keyring
ceph auth get client.cinder -o ceph.client.cinder.keyring
ceph auth get client.cinder-backup -o ceph.client.cinder-backup.keyring
# 如果控制节点没有ceph目录,创建即可
scp ceph.client.glance.keyring root@controller:/etc/ceph/
scp ceph.client.cinder.keyring root@controller:/etc/ceph/
scp ceph.client.cinder.keyring root@compute:/etc/ceph/
scp ceph.conf root@controller:/etc/ceph/
scp ceph.conf root@compute:/etc/ceph/
# backup服务节点
scp ceph.client.cinder-backup.keyring root@10.0.0.11:/etc/ceph/
=======================控制节点操作========================
chown glance.glance /etc/ceph/ceph.client.glance.keyring
chown cinder.cinder /etc/ceph/ceph.client.cinder.keyring
=======================计算节点操作========================
chown cinder.cinder /etc/ceph/ceph.client.cinder.keyring
chown cinder.cinder /etc/ceph/ceph.client.cinder-backup.keyring
=======================所有节点操作========================
apt install -y ceph-common
3. 计算节点添加libvirt密钥
- 生成密钥(PS:注意,如果有多个计算节点,它们的UUID必须一致)
cd /etc/ceph/
UUID=$(uuidgen)
cat >> secret.xml << EOF
<secret ephemeral='no' private='no'>
<uuid>$UUID</uuid>
<usage type='ceph'>
<name>client.cinder secret</name>
</usage>
</secret>
EOF
- 执行命令写入secret
[root@compute ~]# virsh secret-define --file secret.xml
Secret bf168fa8-8d5b-4991-ba4c-12ae622a98b1 created
- 加入key
# 将key值复制出来
[root@compute ~]# cat ceph.client.cinder.keyring
AQALyS1jHz4dDRAAEmt+c8JlXWyzxmCx5vobZg==
[root@compute ~]# virsh secret-set-value --secret ${UUID} --base64 $(cat ceph.client.cinder.keyring | grep key | awk -F ' ' '{print $3}')
- 查看添加后端密钥
virsh secret-list
4. 配置glance后端存储
- 修改配置文件
vim /etc/glance/glance-api.conf
[glance_store]
#stores = file,http
#default_store = file
#filesystem_store_datadir = /var/lib/glance/images/
stores = rbd,file,http
default_store = rbd
rbd_store_pool = images
rbd_store_user = glance
rbd_store_ceph_conf = /etc/ceph/ceph.conf
rbd_store_chunk_size = 8
- 重启服务
service glance-api restart
# 如果出现 boto3 模块缺少,执行如下命令
pip3 install boto3 -i https://pypi.douban.com/simple
- 上传镜像
openstack image create cirros04 --disk-format qcow2 --file cirros-0.4.0-x86_64-disk.img
- ceph验证
rbd images ls
5. 配置cinder后端存储
- 修改配置文件(控制节点)
vim /etc/cinder/cinder.conf
[DEFAULT]
# 指定存储类型,否则在创建卷时,类型为 __DEFAULT__
default_volume_type = ceph
# 重启服务
service cinder-scheduler restart
- 修改配置文件(存储节点)
vim /etc/cinder/cinder.conf
[DEFAULT]
enabled_backends = ceph,lvm
[lvm]
volume_backend_name = cinder-volumes
[ceph]
volume_driver = cinder.volume.drivers.rbd.RBDDriver
rbd_pool = volumes
rbd_ceph_conf = /etc/ceph/ceph.conf
rbd_flatten_volume_from_snapshot = false
rbd_max_clone_depth = 5
rbd_store_chunk_size = 4
rados_connect_timeout = -1
glance_api_version = 2
rbd_user = cinder
rbd_secret_uuid = bf168fa8-8d5b-4991-ba4c-12ae622a98b1
volume_backend_name = ceph
# 重启服务
service cinder-volume restart
- 创建卷类型
web页面创建卷类型
添加元数据 volume_backend_name = ceph 和 volume_backend_name = cinder-volumes
手动创建ceph与lvm的卷类型即可
6. 配置卷备份
- 安装服务(存储节点)
apt install cinder-backup -y
- 修改配置文件(存储节点)
vim /etc/cinder/cinder.conf
[DEFAULT]
backup_driver = cinder.backup.drivers.ceph.CephBackupDriver
backup_ceph_conf=/etc/ceph/ceph.conf
backup_ceph_user = cinder-backup
backup_ceph_chunk_size = 4194304
backup_ceph_pool = backups
backup_ceph_stripe_unit = 0
backup_ceph_stripe_count = 0
restore_discard_excess_bytes = true
service cinder-backup restart
- 控制节点开始备份web(控制节点)
vim /etc/openstack-dashboard/local_settings.py
OPENSTACK_CINDER_FEATURES = {
'enable_backup': True,
}
service apache2 restart
7. 配置nova集成ceph
- 修改配置文件(计算节点)
vim /etc/nova/nova.conf
[DEFAULT]
live_migration_flag = "VIR_MIGRATE_UNDEFINE_SOURCE,VIR_MIGRATE_PEER2PEER,VIR_MIGRATE_LIVE"
[libvirt]
images_type = rbd
images_rbd_pool = vms
images_rbd_ceph_conf = /etc/ceph/ceph.conf
rbd_user = cinder
rbd_secret_uuid = bf168fa8-8d5b-4991-ba4c-12ae622a98b1
7.1 热迁移配置(所有计算节点)
vim /etc/libvirt/libvirtd.conf
listen_tls = 0
listen_tcp = 1
tcp_port = "16509"
listen_addr = "10.0.0.12" # 注意自己的主机地址
auth_tcp = "none"
vim /etc/default/libvirtd
LIBVIRTD_ARGS="--listen"
systemctl mask libvirtd.socket libvirtd-ro.socket libvirtd-admin.socket libvirtd-tls.socket libvirtd-tcp.socket
systemctl restart libvirtd
# 测试是否能互相通信连接
virsh -c qemu+tcp://compute02/system
service libvirtd restart
- 重启计算节点nova服务
service nova-compute restart
- 不适用卷存储创建实例即可