1、openstack虚拟机跨机迁移
2、openstack实现基于lvm及NFS实现云盘动态拉伸
3、openstack高可用实现

#==============================================================

1 openstack 虚拟机跨机迁移

#迁移实例,cp镜像到其他node时,用户为nova

#nova用户改为可登录shell类型
#各node节点更改nova用户shell类型
# usermod -s /bin/bash nova
#设置nova用户密码
# echo nova | passwd --stdin nova
#切换至nova用户生成秘钥,相互cp至其他node
# su - nova
$ ssh-keygen -t rsa -P '' -f ~/.ssh/id_rsa
$ ssh-copy-id nova@10.0.0.47

#管理员->实例->迁移实例->确认 调整大小/迁移

#===================================================================================
#实例类型动态调整
#项目->计算->实例->对应实例->调整实例大小->新的实例类型:选设定的实例类型
                                      调整大小
                                      确认 调整大小/迁移

2 openstack 实现基于 lvm 及 NFS 实现云盘动态拉伸

2.1 基于 lvm 实现云盘动态拉伸

2.1.1 数据库配置

#创建cinder数据库
# mysql -e "create database cinder"
# mysql -e "grant all privileges on cinder.* to 'cinder'@'%' identified by 'cinder123'"

2.1.2 controller 节点

# source admin-openrc.sh

#创建cinder用户
# openstack user create --domain default --password-prompt cinder
 输两次密码:cinder

#授权cinder用户对project(service)有admin权限
# openstack role add --project service --user cinder admin

#创建service entity(实体)        #用于注册后端服务器,相当于k8s-service name
# openstack service create --name cinderv2 --description "OpenStack Block Storage" volumev2
# openstack service create --name cinderv3 --description "OpenStack Block Storage" volumev3

#创建service endpoint                #相当于k8s的pod
# openstack endpoint create --region RegionOne volumev2 public http://openstack-vip.testou.com:8776/v2/%\(project_id\)s
# openstack endpoint create --region RegionOne volumev2 internal http://openstack-vip.testou.com:8776/v2/%\(project_id\)s
# openstack endpoint create --region RegionOne volumev2 admin http://openstack-vip.testou.com:8776/v2/%\(project_id\)s
# openstack endpoint create --region RegionOne volumev3 public http://openstack-vip.testou.com:8776/v3/%\(project_id\)s
# openstack endpoint create --region RegionOne volumev3 internal http://openstack-vip.testou.com:8776/v3/%\(project_id\)s
# openstack endpoint create --region RegionOne volumev3 admin http://openstack-vip.testou.com:8776/v3/%\(project_id\)s

#=================================================================================
#haproxy主机添加四层代理配置
# tail -n4 /etc/haproxy/haproxy.cfg
listen openstack-cinder-8776
  bind 172.20.0.248:8776
  mode tcp        #必须tcp,默认http
  server 172.20.0.7 172.20.0.7:8776 check inter 3s fall 3 rise 5
# systemctl reload haproxy

#=================================================================================
#安装cinder服务
# yum -y install openstack-cinder

#修改cinder配置文件
# vi /etc/cinder/cinder.conf
[database]
connection = mysql+pymysql://cinder:cinder123@openstack-vip.testou.com/cinder
[DEFAULT]
transport_url = rabbit://openstack:openstack123@openstack-vip.testou.com
auth_strategy = keystone
my_ip = 172.20.0.7
[keystone_authtoken]
www_authenticate_uri = http://openstack-vip.testou.com:5000
auth_url = http://openstack-vip.testou.com:5000
memcached_servers = openstack-vip.testou.com:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = cinder
password = cinder
[oslo_concurrency]
lock_path = /var/lib/cinder/tmp

#初始化cinder数据库
# su - cinder -s /bin/sh -c "cinder-manage db sync"

#确认cinder数据库相关表生成数据
# mysql -ucinder -pcinder123 -h172.20.0.248 -e "show tables from cinder"

#修改nova配置文件
# vi /etc/nova/nova.conf
[cinder]
os_region_name = RegionOne            #向nova指明cinder在哪个region

#重启nova服务
# systemctl restart openstack-nova-api

#开启cinder-api和cinder-scheduler服务并设置开机自启
# systemctl enable --now openstack-cinder-api.service openstack-cinder-scheduler

#=================================================================================
#验证
#查看日志
# tail -f /var/log/cinder/*

#curl返回json数据则正常
# curl http://openstack-vip.testou.com:8776

#查看服务状态,cinder-scheduler服务up状态
# openstack volume service list

2.1.3 cinder 存储节点

#基础配置

#配置yum源
#安装openstack-train版本yum仓库
# yum -y install centos-release-openstack-train
#更改软件源为阿里云
# sed -ri '/mirrorlist.centos.org/s/(.*)/#\1/' /etc/yum.repos.d/CentOS-OpenStack-train.repo
# sed -ri '/mirror.centos.org/s/#(.*)mirror.centos.org(.*)/\1mirrors.aliyun.com\2/' /etc/yum.repos.d/CentOS-OpenStack-train.repo

#安装rdo yum仓库,包含新的稳定版本的包(rdo: redhat enterprise linux openstack platform)
# yum -y install https://rdoproject.org/repos/rdo-release.rpm
#更改软件源为阿里云
# sed -ri '/mirrorlist.centos.org/s/(.*)/#\1/' /etc/yum.repos.d/rdo-qemu-ev.repo
# sed -ri '/mirror.centos.org/s/#(.*)mirror.centos.org(.*)/\1mirrors.aliyun.com\2/' /etc/yum.repos.d/rdo-qemu-ev.repo
# sed -ri '/mirrorlist.centos.org/s/(.*)/#\1/' /etc/yum.repos.d/rdo-release.repo
# sed -ri '/mirror.centos.org/s/#(.*)mirror.centos.org(.*)/\1mirrors.aliyun.com\2/' /etc/yum.repos.d/rdo-release.repo

#配置hosts文件
# vi /etc/hosts
 ...
 172.20.0.248 openstack-vip.testou.com

#安装openstack客户端、openstack-selinux管理包
# yum -y install python-openstackclient openstack-selinux

#=================================================================================
#存储节点加块磁盘
#扫描磁盘
# for i in {0..2}; do echo - - - > /sys/class/scsi_host/host${i}/scan; done

#安装lvm管理包
# yum install lvm2 device-mapper-persistent-data

#开启lvm元数据服务并设置开机自启
# systemctl enable --now lvm2-lvmetad

#创建pv/vg
# pvcreate /dev/sdb
# vgcreate cinder-volumes /dev/sdb

#重启lvm元数据服务
# systemctl restart lvm2-lvmetad

#=================================================================================
#安装cinder服务
# yum -y install openstack-cinder targetcli python-keystone

#修改cinder配置文件
# vi /etc/cinder/cinder.conf
[database]
connection = mysql+pymysql://cinder:cinder123@openstack-vip.testou.com/cinder
[DEFAULT]
transport_url = rabbit://openstack:openstack123@openstack-vip.testou.com
auth_strategy = keystone
my_ip = 172.20.0.57
enabled_backends = lvm
glance_api_servers = http://openstack-vip.testou.com:9292
[keystone_authtoken]
www_authenticate_uri = http://openstack-vip.testou.com:5000
auth_url = http://openstack-vip.testou.com:5000
memcached_servers = openstack-vip.testou.com:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = cinder
password = cinder
[lvm]
volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver
volume_group = cinder-volumes
target_protocol = iscsi
target_helper = lioadm
[oslo_concurrency]
lock_path = /var/lib/cinder/tmp

#开启cinder-volume和target服务并设置开机自启
# systemctl enable --now openstack-cinder-volume target

#=================================================================================
#验证

#日志
# tail -f /var/log/cinder/volume.log

#查看服务状态,cinder-volumer服务up状态
# openstack volume service list

#web界面重登录,左侧项目->卷,管理员->卷

#创建卷
 项目->卷->卷->创建卷->卷名称:cinder-lvm-20G
                       大小(GiB):20
                       可用域:nova
                       创建卷

#存储节点
# lvs | grep cinder
  cinder-volumes-pool                         cinder-volumes twi-aotz-- 190.00g                            0.00   10.43         
  volume-e96f750a-25e9-46cf-8d72-48d7e54ea33a cinder-volumes Vwi-a-tz--  20.00g cinder-volumes-pool        0.00

#连接卷
 项目->卷->卷->cinder-lvm-20G->管理连接:选择实例vm1
                               连接卷
 
#进实例vm1,测试格式化并挂载
# mkfs.ext4 /dev/vdb
# mount /dev/vdb /mnt
# df -h | grep mnt
 /dev/vdb         20G   45M   19G   1% /mnt
#写入数据
# cp /etc/passwd /mnt/

#测试扩容
#进vm1卸载
# umount /mnt
#分离卷
 项目->卷->卷->cinder-lvm-20G->管理连接:分离卷
                               分离卷
#扩展卷
 项目->卷->卷->cinder-lvm-20G->管理连接->扩展卷->新大小(GiB):25
                                                扩展卷
#再次连接卷
 项目->卷->卷->cinder-lvm-20G->管理连接:选择实例vm1
                               连接卷
#进实例vm1,挂载
# mount /dev/vdb /mnt
# df -h | grep mnt                                #卷为原始大小
 /dev/vdb         20G   45M   19G   1% /mnt
# resize2fs /dev/vdb                            #执行resize2fs
#  df -h | grep mnt                                #扩容成功
 /dev/vdb         25G   44M   24G   1% /mnt
#旧数据仍存在
# ls /mnt/passwd 
 /mnt/passwd

2.2 基于 NFS 实现云盘动态拉伸

2.2.1 数据库配置

#创建cinder数据库
# mysql -e "create database cinder"
# mysql -e "grant all privileges on cinder.* to 'cinder'@'%' identified by 'cinder123'"

2.2.2 controller 节点

# source admin-openrc.sh

#创建cinder用户
# openstack user create --domain default --password-prompt cinder
 输两次密码:cinder

#授权cinder用户对project(service)有admin权限
# openstack role add --project service --user cinder admin

#创建service entity(实体)        #用于注册后端服务器,相当于k8s-service name
# openstack service create --name cinderv2 --description "OpenStack Block Storage" volumev2
# openstack service create --name cinderv3 --description "OpenStack Block Storage" volumev3

#创建service endpoint                #相当于k8s的pod
# openstack endpoint create --region RegionOne volumev2 public http://openstack-vip.testou.com:8776/v2/%\(project_id\)s
# openstack endpoint create --region RegionOne volumev2 internal http://openstack-vip.testou.com:8776/v2/%\(project_id\)s
# openstack endpoint create --region RegionOne volumev2 admin http://openstack-vip.testou.com:8776/v2/%\(project_id\)s
# openstack endpoint create --region RegionOne volumev3 public http://openstack-vip.testou.com:8776/v3/%\(project_id\)s
# openstack endpoint create --region RegionOne volumev3 internal http://openstack-vip.testou.com:8776/v3/%\(project_id\)s
# openstack endpoint create --region RegionOne volumev3 admin http://openstack-vip.testou.com:8776/v3/%\(project_id\)s

#=================================================================================
#haproxy主机添加四层代理配置
# tail -n4 /etc/haproxy/haproxy.cfg
listen openstack-cinder-8776
  bind 172.20.0.248:8776
  mode tcp        #必须tcp,默认http
  server 172.20.0.7 172.20.0.7:8776 check inter 3s fall 3 rise 5
# systemctl reload haproxy

#=================================================================================
#安装cinder服务
# yum -y install openstack-cinder

#修改cinder配置文件
# vi /etc/cinder/cinder.conf
[database]
connection = mysql+pymysql://cinder:cinder123@openstack-vip.testou.com/cinder
[DEFAULT]
transport_url = rabbit://openstack:openstack123@openstack-vip.testou.com
auth_strategy = keystone
my_ip = 172.20.0.7
[keystone_authtoken]
www_authenticate_uri = http://openstack-vip.testou.com:5000
auth_url = http://openstack-vip.testou.com:5000
memcached_servers = openstack-vip.testou.com:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = cinder
password = cinder
[oslo_concurrency]
lock_path = /var/lib/cinder/tmp

#初始化cinder数据库
# su - cinder -s /bin/sh -c "cinder-manage db sync"

#确认cinder数据库相关表生成数据
# mysql -ucinder -pcinder123 -h172.20.0.248 -e "show tables from cinder"

#修改nova配置文件
# vi /etc/nova/nova.conf
[cinder]
os_region_name = RegionOne            #向nova指明cinder在哪个region

#重启nova服务
# systemctl restart openstack-nova-api

#开启cinder-api和cinder-scheduler服务并设置开机自启
# systemctl enable --now openstack-cinder-api.service openstack-cinder-scheduler

#=================================================================================
#验证
#查看日志
# tail -f /var/log/cinder/*

#curl返回json数据则正常
# curl http://openstack-vip.testou.com:8776

#查看服务状态,cinder-scheduler服务up状态
# openstack volume service list

2.2.3 cinder 存储节点

#基础配置

#配置yum源
#安装openstack-train版本yum仓库
# yum -y install centos-release-openstack-train
#更改软件源为阿里云
# sed -ri '/mirrorlist.centos.org/s/(.*)/#\1/' /etc/yum.repos.d/CentOS-OpenStack-train.repo
# sed -ri '/mirror.centos.org/s/#(.*)mirror.centos.org(.*)/\1mirrors.aliyun.com\2/' /etc/yum.repos.d/CentOS-OpenStack-train.repo

#安装rdo yum仓库,包含新的稳定版本的包(rdo: redhat enterprise linux openstack platform)
# yum -y install https://rdoproject.org/repos/rdo-release.rpm
#更改软件源为阿里云
# sed -ri '/mirrorlist.centos.org/s/(.*)/#\1/' /etc/yum.repos.d/rdo-qemu-ev.repo
# sed -ri '/mirror.centos.org/s/#(.*)mirror.centos.org(.*)/\1mirrors.aliyun.com\2/' /etc/yum.repos.d/rdo-qemu-ev.repo
# sed -ri '/mirrorlist.centos.org/s/(.*)/#\1/' /etc/yum.repos.d/rdo-release.repo
# sed -ri '/mirror.centos.org/s/#(.*)mirror.centos.org(.*)/\1mirrors.aliyun.com\2/' /etc/yum.repos.d/rdo-release.repo

#配置hosts文件
# vi /etc/hosts
 ...
 172.20.0.248 openstack-vip.testou.com

#安装openstack客户端、openstack-selinux管理包
# yum -y install python-openstackclient openstack-selinux

#=================================================================================
#安装nfs服务
# yum -y install nfs-utils

#修改nfs配置文件
# vi /etc/exports
 /nfsdata *(rw,no_root_squash)

#创建目录
# mkdir /nfsdata

#开启nfs服务并设置开机自启
# systemctl enable --now nfs-server

#验证
# showmount -e 172.20.0.57

#=================================================================================
#安装cinder服务
# yum -y install openstack-cinder targetcli python-keystone

#修改cinder配置文件
# vi /etc/cinder/cinder.conf
[database]
connection = mysql+pymysql://cinder:cinder123@openstack-vip.testou.com/cinder
[DEFAULT]
transport_url = rabbit://openstack:openstack123@openstack-vip.testou.com
auth_strategy = keystone
my_ip = 172.20.0.57
enabled_backends = nfs
glance_api_servers = http://openstack-vip.testou.com:9292
[keystone_authtoken]
www_authenticate_uri = http://openstack-vip.testou.com:5000
auth_url = http://openstack-vip.testou.com:5000
memcached_servers = openstack-vip.testou.com:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = cinder
password = cinder
[nfs]
volume_backend_name = openstack-NFS                        #定义名称,后面做关联的时候使用
volume_driver = cinder.volume.drivers.nfs.NfsDriver     #驱动
nfs_shares_config = /etc/cinder/nfs_shares                 #定义NFS挂载的配置文件路径
nfs_mount_point_base = $state_path/mnt                     #定义NFS挂载点
[oslo_concurrency]
lock_path = /var/lib/cinder/tmp

#创建NFS挂载配置文件
# echo '172.20.0.57:/nfsdata' > /etc/cinder/nfs_shares
# chown root.cinder /etc/cinder/nfs_shares

#开启cinder-volume和target服务并设置开机自启
# systemctl enable --now openstack-cinder-volume target

#=================================================================================
#验证

#nfs自动挂载
# df -h | grep nfsdata
 172.20.0.57:/nfsdata     100G  2.4G   98G   3% /var/lib/cinder/mnt/a66c6662ee2eb327f21f2f89d4183c1b

#日志
# tail -f /var/log/cinder/volume.log

#查看服务状态,cinder-volumer服务up状态
# openstack volume service list

#创建类型并关联
# openstack volume type create nfs
# cinder type-key nfs set volume_backend_name=openstack-NFS

#web界面重登录,左侧项目->卷,管理员->卷

#创建卷
 项目->卷->卷->创建卷->卷名称:cinder-nfs-20G
                       类型:nfs
                       大小(GiB):20
                       可用域:nova
                       创建卷

#连接卷
 项目->卷->卷->cinder-nfs-20G->管理连接:选择实例vm1
                               连接卷
 
#进实例vm1,测试格式化并挂载
# mkfs.ext4 /dev/vdb
# mount /dev/vdb /mnt
# df -h | grep mnt
 /dev/vdb         20G   45M   19G   1% /mnt
#写入数据
# cp /etc/passwd /mnt/

#测试扩容
#进vm1卸载
# umount /mnt
#分离卷
 项目->卷->卷->cinder-nfs-20G->管理连接:分离卷
                               分离卷
#扩展卷
 项目->卷->卷->cinder-nfs-20G->管理连接->扩展卷->新大小(GiB):25
                                                扩展卷
#再次连接卷
 项目->卷->卷->cinder-nfs-20G->管理连接:选择实例vm1
                               连接卷
#进实例vm1,挂载
# mount /dev/vdb /mnt
# df -h | grep mnt                                #卷为原始大小
 /dev/vdb         20G   45M   19G   1% /mnt
# resize2fs /dev/vdb                            #执行resize2fs
#  df -h | grep mnt                                #扩容成功
 /dev/vdb         25G   44M   24G   1% /mnt
#旧数据仍存在
# ls /mnt/passwd 
 /mnt/passwd

#在nfs节点验证磁盘
# ll /nfsdata/volume-4b4322be-7c29-4a5f-9c93-37affd4515d8

3 openstack 高可用实现

   在 controller1 环境基础上,新建 controller2

3.1 基础配置

#配置yum源
#安装openstack-train版本yum仓库
# yum -y install centos-release-openstack-train
#更改软件源为阿里云
# sed -ri '/mirrorlist.centos.org/s/(.*)/#\1/' /etc/yum.repos.d/CentOS-OpenStack-train.repo
# sed -ri '/mirror.centos.org/s/#(.*)mirror.centos.org(.*)/\1mirrors.aliyun.com\2/' /etc/yum.repos.d/CentOS-OpenStack-train.repo

#安装rdo yum仓库,包含新的稳定版本的包(rdo: redhat enterprise linux openstack platform)
# yum -y install https://rdoproject.org/repos/rdo-release.rpm
#更改软件源为阿里云
# sed -ri '/mirrorlist.centos.org/s/(.*)/#\1/' /etc/yum.repos.d/rdo-qemu-ev.repo
# sed -ri '/mirror.centos.org/s/#(.*)mirror.centos.org(.*)/\1mirrors.aliyun.com\2/' /etc/yum.repos.d/rdo-qemu-ev.repo
# sed -ri '/mirrorlist.centos.org/s/(.*)/#\1/' /etc/yum.repos.d/rdo-release.repo
# sed -ri '/mirror.centos.org/s/#(.*)mirror.centos.org(.*)/\1mirrors.aliyun.com\2/' /etc/yum.repos.d/rdo-release.repo

#配置hosts文件
# echo '172.20.0.248 openstack-vip.testou.com' >> /etc/hosts

#安装openstack客户端、openstack-selinux管理包
# yum -y install python-openstackclient openstack-selinux

#安装连接mysql组件(只有controller可以连接mysql)
# yum -y install python2-PyMySQL

#安装连接memcached组件
# yum -y install python-memcached

#安装libibverbs,提供库文件libibverbs.so.1
# yum -y install libibverbs

3.2 keystone

#安装keystone组件
# yum -y install openstack-keystone httpd mod_wsgi        #mod_wsgi是httpd代理python的组件

#修改keystone配置文件
# vi /etc/keystone/keystone.conf
 [database]                        #数据库地址、账号、密码
 connection = mysql+pymysql://keystone:keystone123@openstack-vip.testou.com/keystone
 [token]                        #默认有效期1小时
 [credential]
 provider = ferent

#将controller1初始化fernet密钥存储库后生成的密钥,复制到controller2
# rsync -a /etc/keystone/fernet-keys 10.0.0.47:/etc/keystone/
# rsync -a /etc/keystone/credential-keys 10.0.0.47:/etc/keystone/

#将controller1 admin/myuser用户环境变量配置脚本、nova/neutron重启脚本,复制到controller2
# rsync -a admin-openrc.sh demo-openrc.sh neutron-restart.sh nova-restart.sh 10.0.0.47:/root

# source admin-openrc.sh

#=====================================================================================
#haproxy主机添加四层代理配置
# tail -n4 /etc/haproxy/haproxy.cfg
listen openstack-keystone-5000
  bind 172.20.0.248:5000
  #server 172.20.0.7 172.20.0.7:5000 check inter 3s fall 3 rise 5
  server 172.20.0.47 172.20.0.47:5000 check inter 3s fall 3 rise 5
# systemctl reload haproxy

#=====================================================================================
#配置httpd
# vi /etc/httpd/conf/httpd.conf
 ...
 ServerName 172.20.0.47:80
# ln -s /usr/share/keystone/wsgi-keystone.conf /etc/httpd/conf.d/
#开启httpd服务并设置开机自启
# systemctl enable --now httpd

#测试httpd服务,返回json数据则正常
# curl http://openstack-vip.testou.com:5000
{"versions": {"values": [{"status": "stable", "updated": "2019-07-19T00:00:00Z", "media-types": [{"base": "application/json", "type": "application/vnd.openstack.identity-v3+json"}], "id": "v3.13", "links": [{"href": "http://openstack-vip.testou.com:5000/v3/", "rel": "self"}]}]}}

3.3 glance

#安装glance服务
# yum -y install openstack-glance

#修改glance配置文件
# vi /etc/glance/glance-api.conf
 [database]
 connection = mysql+pymysql://glance:glance123@openstack-vip.testou.com/glance
 [keystone_authtoken]
 www_authenticate_uri = http://openstack-vip.testou.com:5000
 auth_url = http://openstack-vip.testou.com:5000
 memcache_servers = openstack-vip.testou.com:11211
 auth_type = password
 project_domain_name = Default
 user_domain_name = Default
 project_name = service
 username = glance
 password = glance
 [paste_deploy]
 flavor = keystone
 [glance_store]
 stores = file,http
 default_store = file
 filesystem_store_datadir = /var/lib/glance/images

#挂载NFS存储
# mkdir -p /var/lib/glance/images
# chown glance.glance /var/lib/glance/images
# vi /etc/fstab
 ...
 172.20.0.37:/data/glance /var/lib/glance/images nfs defaults,_netdev 0 0
# mount -a

#开启glance服务并设置开机自启
# systemctl enable --now openstack-glance-api

#=================================================================================
#haproxy主机添加四层代理配置
# tail -n4 /etc/haproxy/haproxy.cfg
listen openstack-glance-9292
  bind 172.20.0.248:9292
  mode tcp        #必须tcp,默认http
  #server 172.20.0.7 172.20.0.7:9292 check inter 3s fall 3 rise 5
  server 172.20.0.47 172.20.0.47:9292 check inter 3s fall 3 rise 5
# systemctl reload haproxy

#=================================================================================
#验证
#查看镜像名称id
# openstack image list
# glance image-list

3.4 placement

#安装placement服务
# yum -y install openstack-placement-api

#修改placement配置文件
# vi /etc/placement/placement.conf
 [placement_database]
 connection = mysql+pymysql://placement:placement123@openstack-vip.testou.com/placement
 [api]
 auth_strategy = keystone
 [keystone_authtoken]
 auth_url = http://openstack-vip.testou.com:5000/v3
 memcached_servers = openstack-vip.testou.com:11211
 auth_type = password
 project_domain_name = Default
 user_domain_name = Default
 project_name = service
 username = placement
 password = placement

#直接启动有问题,BUG,Train版没写解决方法,R版nova组件中有
#BUG:后期访问httpd-api,调用/usr/bin/placement-api由于没有访问权限,会报错403
#解决方法:授权/usr/bin目录访问权限
# vi /etc/httpd/conf.d/00-placement-api.conf
 <Directory /usr/bin>
   <IfVersion >= 2.4>
     Require all granted
   </IfVersion>
   <IfVersion < 2.4>
     Order allow,deny
     Allow from all
   </IfVersion>
 </Directory>
# systemctl restart httpd

#=================================================================================
#haproxy主机添加四层代理配置
# tail -n4 /etc/haproxy/haproxy.cfg
listen openstack-placement-8778
  bind 172.20.0.248:8778
  mode tcp        #必须tcp,默认http
  #server 172.20.0.7 172.20.0.7:8778 check inter 3s fall 3 rise 5
  server 172.20.0.47 172.20.0.47:8778 check inter 3s fall 3 rise 5
# systemctl reload haproxy

#=================================================================================
#验证
#检查placement状态
# placement-status upgrade check

3.5 nova

#安装nova管理端服务
# yum -y install openstack-nova-api openstack-nova-conductor openstack-nova-novncproxy openstack-nova-scheduler

#修改nova配置文件
# vi /etc/nova/nova.conf
 [DEFAULT]
 enabled_apis = osapi_compute,metadata
 transport_url = rabbit://openstack:openstack123@openstack-vip.testou.com:5672/
 use_neutron = true
 firewall_driver = nova.virt.firewall.NoopFirewallDriver        #驱动,和neutron交互,库文件/usr/lib/python2.7/seite-packages/nova/virt/firewall.py
 [api_database]        #连接nova_api数据库
 connection = mysql+pymysql://nova:nova123@openstack-vip.testou.com/nova_api
 [database]            #连接nova数据库
 connection = mysql+pymysql://nova:nova123@openstack-vip.testou.com/nova
 [api]
 auth_strategy = keystone
 [keystone_authtoken]
 www_authenticate_uri = http://openstack-vip.testou.com:5000/
 auth_url = http://openstack-vip.testou.com:5000/
 memcached_servers = openstack-vip.testou.com:11211
 auth_type = password
 project_domain_name = Default
 user_domain_name = Default
 project_name = service
 username = nova
 password = nova
 [vnc]                    #后期日志如果报1006|1005状态码,为vnc连不上
 enabled = true
 server_listen = 10.0.0.47
 server_proxyclient_address = 10.0.0.47
 [glance]
 api_servers = http://openstack-vip.testou.com:9292
 [oslo_concurrency]        #锁路径,创建虚拟机时的操作步骤必须顺序执行,一个操作由一个组件执行
 lock_path = /var/lib/nova/tmp
 [placement]
 region_name = RegionOne
 project_domain_name = Default
 project_name = service
 auth_type = password
 user_domain_name = Default
 auth_url = http://openstack-vip.testou.com:5000/v3
 username = placement
 password = placement

#启动nova服务并设置开机自启
# systemctl enable --now openstack-nova-api openstack-nova-scheduler openstack-nova-conductor openstack-nova-novncproxy

#======================================================================================
#haproxy主机添加四层代理配置
# tail -n8 /etc/haproxy/haproxy.cfg
listen openstack-nova-controller-8774
  bind 172.20.0.248:8774
  mode tcp        #必须tcp,默认http
  #server 172.20.0.7 172.20.0.7:8774 check inter 3s fall 3 rise 5
  server 172.20.0.47 172.20.0.47:8774 check inter 3s fall 3 rise 5
listen openstack-nova-novncproxy-6080
  bind 172.20.0.248:6080
  #server 172.20.0.7 172.20.0.7:6080 check inter 3s fall 3 rise 5
  server 172.20.0.47 172.20.0.47:6080 check inter 3s fall 3 rise 5
# systemctl reload haproxy

#======================================================================================
#验证
# openstack compute service list

3.6 neutron

#安装neutron提供者网络(桥接)管理端服务
# yum -y install openstack-neutron openstack-neutron-ml2 openstack-neutron-linuxbridge ebtables        #etables类似阿里云安全组

#修改neutron配置文件
# vi /etc/neutron/neutron.conf
 [database]
 connection = mysql+pymysql://neutron:neutron123@openstack-vip.testou.com/neutron
 [DEFAULT]
 core_plugin = ml2        #二层插件,桥接
 service_plugins =        #三层插件,不安装
 transport_url = rabbit://openstack:openstack123@openstack-vip.testou.com
 auth_strategy = keystone
 notify_nova_on_port_status_changes = true    #通知网络变化
 notify_nova_on_port_data_changes = true
 [keystone_authtoken]
 www_authenticate_uri = http://openstack-vip.testou.com:5000
 auth_url = http://openstack-vip.testou.com:5000
 memcached_servers = openstack-vip.testou.com:11211
 auth_type = password
 project_domain_name = default
 user_domain_name = default
 project_name = service
 username = neutron
 password = neutron
 [nova]            #在最后添加
 auth_url = http://openstack-vip.testou.com:5000
 auth_type = password
 project_domain_name = default
 user_domain_name = default
 region_name = RegionOne
 project_name = service
 username = nova
 password = nova
 [oslo_concurrency]
 lock_path = /var/lib/neutron/tmp

#配置二层插件
#原始文件缺失部分配置项,使用其他的版本替换:http://docs.openstack.org/newton/config-reference/networking/samples/ml2_conf.ini.html
# vi /etc/neutron/plugins/ml2/ml2_conf.ini
 [ml2]
 type_drivers = flat,vlan        #单一扁平网络,就是桥接网络
 tenant_network_types =
 mechanism_drivers = linuxbridge        #桥接
 extension_drivers = port_security        #端口安全扩展驱动,基于iptables规则,一般不装
 [ml2_type_flat]
 flat_networks = external            #将提供者网络配置为扁平网络,声明桥接网络名称
 [securitygroup]
 enable_security_group = false
 enable_ipset = true                #启用ipset提高安全组规则的效率

#链接二层插件配置
# ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini

#配置linux bridge代理
#原始文件缺失部分配置项,使用其他的版本替换:http://docs.openstack.org/newton/config-reference/networking/samples/linuxbridge_agent.ini.html
# vi /etc/neutron/plugins/ml2/linuxbridge_agent.ini
 [linux_bridge]                #定义虚拟网络和物理网卡的对应关系
 physical_interface_mappings = external:eth0                #eth0为宿主机连接外网网卡
 [securitygroup]                #安全组设置
 enable_security_group = false
 firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
 [vxlan]
 enable_vxlan = false            #不使用自服务网络则关闭

#开启内核参数
# vi /etc/sysctl.conf
 ...
 net.bridge.bridge-nf-call-iptables = 1            #允许流量经过宿主机
 net.bridge.bridge-nf-call-ip6tables = 1
#直接执行sysctl -p将报错,因为还未开始neutron服务
#或先加载模块
# modprobe br_netfilter
# sysctl -p

#配置DHCP代理
# vi /etc/neutron/dhcp_agent.ini
 [DEFAULT]
 interface_driver = linuxbridge
 dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
 enable_isolated_metadata = true

#配置元数据代理
# vi /etc/neutron/metadata_agent.ini            #配置元数据主机和密钥,用于下一步nova连接neutron时认证
 [DEFAULT]
 nova_metadata_host = openstack-vip.testou.com
 metadata_proxy_shared_secret = magedu20200412

#配置nova连接neutron,获取网络信息
# vi /etc/nova/nova.conf
 [neutron]
 auth_url = http://openstack-vip.testou.com:5000
 auth_type = password
 project_domain_name = default
 user_domain_name = default
 region_name = RegionOne
 project_name = service
 username = neutron
 password = neutron
 service_metadata_proxy = true
 metadata_proxy_shared_secret = magedu20200412

#======================================================================================
#早期BUG: TypeError: unsupported operand type(s) for -: 'NoneType' and 'int'
#解决方法:修改源码,否则创建的虚拟机网卡不会桥接到宿主机网卡
#原因:官方文档有三个网络,虚拟机不会桥接到管理网,早期其他版本不存在这问题
# vi /usr/lib/python2.7/site-packages/neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py
 注释399-400行

#重启nova服务
# systemctl restart openstack-nova-api

#======================================================================================
#开启neutron服务并设置开机自启
# systemctl enable --now neutron-server neutron-linuxbridge-agent neutron-dhcp-agent neutron-metadata-agent

#===================================================================================
#haproxy主机添加四层代理配置
# tail -n4 /etc/haproxy/haproxy.cfg
listen openstack-neutron-9696
  bind 172.20.0.248:9696
  mode tcp        #必须tcp,默认http
  #server 172.20.0.7 172.20.0.7:9696 check inter 3s fall 3 rise 5
  server 172.20.0.47 172.20.0.47:9696 check inter 3s fall 3 rise 5
listen openstack-nova-api:8775
  bind 172.20.0.248:8775
  mode tcp      #必须tcp,默认http
  #server 172.20.0.7 172.20.0.7:8775 check inter 3s fall 3 rise 5
  server 172.20.0.47 172.20.0.47:8775 check inter 3s fall 3 rise 5
# systemctl reload haproxy

#===================================================================================
#验证
# openstack network agent list

3.7 horizon

#安装dashboard服务
# yum -y install openstack-dashboard

#修改dashboard配置文件
# vi /etc/openstack-dashboard/local_settings
 OPENSTACK_HOST = "172.20.0.47"        #dashboard服务所在controller节点地址,默认127.0.0.1
 ALLOWED_HOSTS = ['172.20.0.47', 'openstack-vip.testou.com']    #允许访问dashboard服务的主机,不在列表的主机不响应,类似nginx多域名
                                                                #*为允许所有
 #---------------------------------------------------------------------------------
 #配置session保存到memcached 
 SESSION_ENGINE = 'django.contrib.sessions.backends.cache'
 CACHES = {
     'default': {
          'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
          'LOCATION': 'openstack-vip.testou.com:11211',
     }
 }
 #---------------------------------------------------------------------------------
 #开启v3版本的API认证,默认开启
 OPENSTACK_KEYSTONE_URL = "http://%s:5000/v3" % OPENSTACK_HOST
 #---------------------------------------------------------------------------------
 #开启支持domain
 OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = True
 #---------------------------------------------------------------------------------
 #添加API版本
 OPENSTACK_API_VERSIONS = {
     "identity": 3,
     "image": 2,
     "volume": 3,
 }
 #---------------------------------------------------------------------------------
 #设置通过dashboard创建的用户默认domain为Default
 OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = "Default"
 #---------------------------------------------------------------------------------
 #设置通过dashboard创建的用户默认role为user
 OPENSTACK_KEYSTONE_DEFAULT_ROLE = "user"
 #---------------------------------------------------------------------------------
 #如果是提供者网络(桥接),关闭三层网络服务
 OPENSTACK_NEUTRON_NETWORK = {
     ...
     'enable_router': False,
     'enable_quotas': False,
     'enable_distributed_router': False,
     'enable_ha_router': False,
     'enable_lb': False,
     'enable_firewall': False,
     'enable_vpn': False,
     'enable_fip_topology_check': False,
 }
 #---------------------------------------------------------------------------------
 #设置时区
 TIME_ZONE = "Asia/Shanghai"
 #---------------------------------------------------------------------------------
 #添加WEBROOT
 WEBROOT = '/dashboard'
 #---------------------------------------------------------------------------------
 #解决500报错,token目录无法访问问题
 LOCAL_PATH = '/var/lib/openstack-dashboard'        #apache对该目录必须有权限
 #---------------------------------------------------------------------------------

#===================================================================================
#添加httpd配置
# vi /etc/httpd/conf.d/openstack-dashboard.conf
 WSGIApplicationGroup %{GLOBAL}
#重启httpd
# systemctl restart httpd

#重启httpd后,生成token所有者所属组为root,改为apache
# chown apache.apache /var/lib/openstack-dashboard/.secret_key_store

#===================================================================================
#haproxy主机添加四层代理配置
# tail -n4 /etc/haproxy/haproxy.cfg
listen openstack-dashboard-80
  bind 172.20.0.248:80
  #server 172.20.0.7 172.20.0.7:80 check inter 3s fall 3 rise 5
  server 172.20.0.47 172.20.0.47:80 check inter 3s fall 3 rise 5
# systemctl reload haproxy

#===================================================================================
#验证
#win主机添加hosts解析
#浏览器访问:http://openstack-vip.testou.com/dashboard
 域:default
 用户名:admin
 密码:admin
posted on 2023-08-18 17:28  不期而至  阅读(12)  评论(0编辑  收藏  举报