openstack pike + ceph +高可用集成 -- Glance 镜像服务 Cluster (七)

##Glance 镜像服务Cluster

###############以下在controller1节点执行

#Glance群集需要使用共享存储,用来存储镜像文件,这里以NFS为例
#先在controller1节点配置,然后拷贝配置到其它节点controller2,controller3
#创建Glance数据库、用户、认证,前面已设置

# keystone上服务注册 ,创建glance服务实体,API端点(公有、私有、admin)
source ./admin-openstack.sh || { echo "加载前面设置的admin-openstack.sh环境变量脚本";exit; }
openstack service create --name glance --description "OpenStack Image" image
openstack endpoint create --region RegionOne image public http://controller:9292
openstack endpoint create --region RegionOne image internal http://controller:9292
openstack endpoint create --region RegionOne image admin http://controller:9292

# Glance 安装
yum install -y openstack-glance python-glance
#配置
cp /etc/glance/glance-api.conf{,.bak}
cp /etc/glance/glance-registry.conf{,.bak}
# images默认/var/lib/glance/images/
Imgdir=/date/glance
mkdir -p $Imgdir
chown glance:nobody $Imgdir
echo "镜像目录: $Imgdir"
echo "#
#
[DEFAULT]
debug = False
verbose = True
bind_host = controller1
bind_port = 9292
auth_region = RegionOne
registry_client_protocol = http
[database]
connection = mysql+pymysql://glance:glance@controller/glance
[keystone_authtoken]
auth_uri = http://controller:5000/v3
auth_url = http://controller:35357/v3
memcached_servers = controller1:11211,controller2:11211,controller3:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = glance
password = glance
[paste_deploy]
flavor = keystone
#[glance_store]
#stores = file,http
#default_store = file
#filesystem_store_datadir = /date/glance
[oslo_messaging_rabbit]
rabbit_userid =openstack
rabbit_password = openstack
rabbit_durable_queues=true
rabbit_ha_queues = True
rabbit_max_retries=0
rabbit_port = 5672  
rabbit_hosts = controller1:5672,controller2:5672,controller3:5672
#
[glance_store]
stores = rbd,file
default_store = rbd
rbd_store_pool = images
rbd_store_user = glance
rbd_store_ceph_conf = /etc/ceph/ceph.conf
rbd_store_chunk_size = 8
#">/etc/glance/glance-api.conf
#
echo "#
#
[DEFAULT]
debug = False
verbose = True
bind_host = controller1
bind_port = 9191
workers = 2
[database]
connection = mysql+pymysql://glance:glance@controller/glance
[keystone_authtoken]
auth_uri = http://controller:5000/v3
auth_url = http://controller:35357/v3
memcached_servers = controller1:11211,controller2:11211,controller3:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = glance
password = glance
[paste_deploy]
flavor = keystone
[oslo_messaging_rabbit]
rabbit_userid =openstack
rabbit_password = openstack
rabbit_durable_queues=true
rabbit_ha_queues = True
rabbit_max_retries=0
rabbit_port = 5672  
rabbit_hosts = controller1:5672,controller2:5672,controller3:5672
#">/etc/glance/glance-registry.conf

#同步数据库,检查数据库
su -s /bin/sh -c "glance-manage db_sync" glance
mysql -h controller -u glance -pglance -e "use glance;show tables;"


##在controller1节点执行
#创建ceph用户和密钥
cd /etc/ceph/
ceph auth get-or-create client.glance mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=images' 

#查询用户,写入文件
ceph auth get-or-create client.glance >/etc/ceph/ceph.client.glance.keyring

#拷贝秘钥到对应节点,修改权限
#glance
Node=controller1
scp /etc/ceph/ceph.client.glance.keyring $Node:/etc/ceph/
ssh $Node chown glance:glance /etc/ceph/ceph.client.glance.keyring
Node=controller2
scp /etc/ceph/ceph.client.glance.keyring $Node:/etc/ceph/
ssh $Node chown glance:glance /etc/ceph/ceph.client.glance.keyring
Node=controller3
scp /etc/ceph/ceph.client.glance.keyring $Node:/etc/ceph/
ssh $Node chown glance:glance /etc/ceph/ceph.client.glance.keyring
#启动服务并设置开机自启动
systemctl enable openstack-glance-api openstack-glance-registry
systemctl restart openstack-glance-api openstack-glance-registry
sleep 3
netstat -antp|grep python2 #检测服务端口
#netstat -antp|egrep '9292|9191' #检测服务端口

#haproxy代理设置
echo '
#glance_api_cluster
listen glance_api_cluster
  bind controller:9292
  #balance  source
  option  tcpka
  option  httpchk
  option  tcplog
  server controller1 controller1:9292 check inter 2000 rise 2 fall 5
  server controller2 controller2:9292 check inter 2000 rise 2 fall 5
  server controller3 controller3:9292 check inter 2000 rise 2 fall 5
'>>/etc/haproxy/haproxy.cfg
systemctl restart haproxy.service
netstat -antp|grep haproxy

#镜像测试,下载有时很慢
wget http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img #下载测试镜像源
#使用raw磁盘格式,bare容器格式,上传镜像到镜像服务并设置公共可见
source ./admin-openstack.sh
openstack image create "cirros" \
  --file cirros-0.3.5-x86_64-disk.img \
  --disk-format raw--container-format bare \
  --public

#检查是否上传成功
openstack image list
 
#删除镜像 glance image-delete 镜像id


############### 配置其它节点controller2、controller3###############
#以下操作同样是在controller1执行

#同步controller1配置到其它节点(用scp会改变属性)
rsync -avzP  -e 'ssh -p 22'  /etc/glance/* controller2:/etc/glance/
rsync -avzP  -e 'ssh -p 22'  /etc/glance/* controller3:/etc/glance/
rsync -avzP  -e 'ssh -p 22'  /etc/haproxy/haproxy.cfg controller2:/etc/haproxy/
rsync -avzP  -e 'ssh -p 22'  /etc/haproxy/haproxy.cfg controller3:/etc/haproxy/
#更改配置
ssh controller2  "sed -i '1,10s/controller1/controller2/' /etc/glance/glance-api.conf /etc/glance/glance-registry.conf"
ssh controller3  "sed -i '1,10s/controller1/controller3/' /etc/glance/glance-api.conf /etc/glance/glance-registry.conf"
#启动服
ssh controller2 "systemctl enable openstack-glance-api openstack-glance-registry"
ssh controller2 "systemctl restart openstack-glance-api openstack-glance-registry haproxy.service;"
ssh controller3 "systemctl enable openstack-glance-api openstack-glance-registry"
ssh controller3 "systemctl restart openstack-glance-api openstack-glance-registry haproxy.service;"

 

posted @ 2018-03-21 15:16  小⑦  阅读(293)  评论(0编辑  收藏  举报