openstack pike + ceph +高可用集成 -- Nova控制节点 Cluster (八)

##Nova控制节点Cluster
# 在controller 上面安装

#########以下在controller1执行
#创建Nova数据库、用户、认证,前面已设置
source ./admin-openstack.sh || { echo "加载前面设置的admin-openstack.sh环境变量脚本";exit; }

# keystone上服务注册 ,创建nova用户、服务、API
# nova用户前面已建
openstack service create --name nova --description "OpenStack Compute" compute
openstack endpoint create --region RegionOne compute public http://controller:8774/v2.1
openstack endpoint create --region RegionOne compute internal http://controller:8774/v2.1
openstack endpoint create --region RegionOne compute admin http://controller:8774/v2.1
#创建placement用户、服务、API
openstack user create --domain default --password=placement placement
openstack role add --project service --user placement admin
openstack service create --name placement --description "Placement API" placement
openstack endpoint create --region RegionOne placement public http://controller:8778
openstack endpoint create --region RegionOne placement internal http://controller:8778
openstack endpoint create --region RegionOne placement admin http://controller:8778
#
#openstack endpoint delete id  删除命令

## 安装nova控制节点  所有controller节点 同时安装省时间
yum install -y openstack-nova-api openstack-nova-conductor \
  openstack-nova-console openstack-nova-novncproxy \
  openstack-nova-scheduler openstack-nova-placement-api
#
cp /etc/nova/nova.conf{,.bak}
##以下在controller1执行 # #nova控制节点配置 echo '#
#
[DEFAULT]
my_ip = controller1
use_neutron = True
osapi_compute_listen = controller1
osapi_compute_listen_port = 8774
metadata_listen = controller1
metadata_listen_port=8775
firewall_driver = nova.virt.firewall.NoopFirewallDriver
enabled_apis = osapi_compute,metadata
transport_url = rabbit://openstack:openstack@controller:5673

[api_database]
connection = mysql+pymysql://nova:nova@controller/nova_api
[database]
connection = mysql+pymysql://nova:nova@controller/nova

[api]
auth_strategy = keystone

[keystone_authtoken]
auth_uri = http://controller:5000
auth_url = http://controller:35357
memcached_servers = controller1:11211,controller2:11211,controller3:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = nova
password = nova

[vnc]
enabled = true
vncserver_listen = $my_ip
vncserver_proxyclient_address = $my_ip
novncproxy_host=192.168.0.71
novncproxy_port=6080

[glance]
api_servers = http://controller:9292
[oslo_concurrency]
lock_path = /var/lib/nova/tmp

[placement]
os_region_name = RegionOne
project_domain_name = Default
project_name = service
auth_type = password
user_domain_name = Default
auth_url = http://controller:35357/v3
username = placement
password = placement

[scheduler]
discover_hosts_in_cells_interval = 300

[cache]
enabled = true
backend = oslo_cache.memcache_pool
memcache_servers = controller1:11211,controller2:11211,controller3:11211
#

#
[neutron]
url = http://controller:9696
auth_url = http://controller:35357
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = neutron
password = neutron
service_metadata_proxy = true
metadata_proxy_shared_secret = metadata
#

[cinder]
os_region_name = RegionOne
#'>/etc/nova/nova.conf

echo "

#Placement API
<Directory /usr/bin>
   <IfVersion >= 2.4>
      Require all granted
   </IfVersion>
   <IfVersion < 2.4>
      Order allow,deny
      Allow from all
   </IfVersion>
</Directory>
">>/etc/httpd/conf.d/00-nova-placement-api.conf
systemctl restart httpd
sleep 2

#同步数据库
su -s /bin/sh -c "nova-manage api_db sync" nova
su -s /bin/sh -c "nova-manage cell_v2 map_cell0" nova
su -s /bin/sh -c "nova-manage cell_v2 create_cell --name=cell1 --verbose" nova
su -s /bin/sh -c "nova-manage db sync" nova

#检测数据
nova-manage cell_v2 list_cells
mysql -h controller -u nova -pnova -e "use nova_api;show tables;"
mysql -h controller -u nova -pnova -e "use nova;show tables;" 
mysql -h controller -u nova -pnova -e "use nova_cell0;show tables;"

#更改默认端口8778给集群VIP使用
sed -i 's/8778/9778/' /etc/httpd/conf.d/00-nova-placement-api.conf
systemctl restart httpd

#haproxy高可用配置
echo '
##nova_compute
listen nova_compute_api_cluster
bind controller:8774
balance source
option tcpka
option httpchk
option tcplog
server controller1 controller1:8774 check inter 2000 rise 2 fall 5
server controller2 controller2:8774 check inter 2000 rise 2 fall 5
server controller3 controller3:8774 check inter 2000 rise 2 fall 5
#Nova-api-metadata
listen Nova-api-metadata_cluster
bind controller:8775
balance source
option tcpka
option httpchk
option tcplog
server controller1 controller1:8775 check inter 2000 rise 2 fall 5
server controller2 controller2:8775 check inter 2000 rise 2 fall 5
server controller3 controller3:8775 check inter 2000 rise 2 fall 5
#nova_placement
listen nova_placement_cluster
bind controller:8778
balance source
option tcpka
option tcplog
server controller1 controller1:9778 check inter 2000 rise 2 fall 5
server controller2 controller2:9778 check inter 2000 rise 2 fall 5
server controller3 controller3:9778 check inter 2000 rise 2 fall 5
'>>/etc/haproxy/haproxy.cfg
systemctl restart haproxy.service
netstat -antp|grep haproxy

netstat -antp|egrep '8774|8778|6080'

#开机自启动
 systemctl enable openstack-nova-api.service \
  openstack-nova-consoleauth.service openstack-nova-scheduler.service \
  openstack-nova-conductor.service openstack-nova-novncproxy.service
#启动服务
systemctl start openstack-nova-api.service \
  openstack-nova-consoleauth.service openstack-nova-scheduler.service \
  openstack-nova-conductor.service openstack-nova-novncproxy.service

#查看节点
#nova service-list 
openstack catalog list
nova-status upgrade check
openstack compute service list


#########在controller2节点安装配置
# 安装nova控制节点
#上面已经安装 ##yum install -y openstack-nova-api openstack-nova-conductor \ ## openstack-nova-console openstack-nova-novncproxy \ ## openstack-nova-scheduler openstack-nova-placement-api #同步controller1配置并修改 rsync -avzP -e 'ssh -p 22' controller1:/etc/nova/* /etc/nova/ rsync -avzP -e 'ssh -p 22' controller1:/etc/httpd/conf.d/00-nova-placement-api.conf /etc/httpd/conf.d/ rsync -avzP -e 'ssh -p 22' controller1:/etc/haproxy/* /etc/haproxy/ sed -i '1,9s/controller1/controller2/' /etc/nova/nova.conf
##注意查看 cat /etc/nova/nova.conf 查看 controller1是否改为controller2了 没有手动编辑以下 #开机自启动,启动服务nova服务 ,同上 #重启服务 systemctl restart httpd haproxy #########controller3节点安装配置
#同步controller1配置并修改
rsync -avzP  -e 'ssh -p 22'  controller1:/etc/nova/*  /etc/nova/
rsync -avzP  -e 'ssh -p 22'  controller1:/etc/httpd/conf.d/00-nova-placement-api.conf /etc/httpd/conf.d/
rsync -avzP  -e 'ssh -p 22'  controller1:/etc/haproxy/* /etc/haproxy/
sed -i '1,9s/controller1/controller3/'  /etc/nova/nova.conf
##注意查看 cat /etc/nova/nova.conf 查看 controller1是否改为controller3了 没有手动编辑以下
 

 

posted @ 2018-03-21 15:43  小⑦  阅读(321)  评论(0编辑  收藏  举报