安装Rocky版OpenStack 1控制节点+1计算节点环境部署脚本

 

在上一篇文章中叙述了具体的安装部署过程,在这里把相应的部署脚本写出来,供大家参考:

 

一、执行部署的setup.sh脚本:

#!/bin/bash

############################################################
##                   执行脚本前手动配置项              #####
############################################################

# 每台服务器两块网卡,网卡eth0用于管理网络,网卡eth1用于业务网络
# eth0可选择仅主机模式,静态ip,eth1选择桥接模式,自动获取ip,便于开始安装软件
# 网卡名修改,进入安装系统按e,输入net.ifnames=0 biosdevname=0,回车进入安装
# 每台服务器首先修改主机名
# 控制节点和其他节点必须先做免密登录
# 将OpenStack包和local_settings文件上传到控制节点/root
# 将cirros-0.3.5-x86_64-disk.img上传到控制节点/root


################################################################
##         设置环境参数,输入服务器角色对应的ip地址        #####
################################################################

## 以下为参考用例,如需修改在如下两部分都需修改,且一致
MGMTNETWORK=192.168.3.0/24
CONTROLLER_IP=192.168.3.10
COMPUTER1_IP=192.168.3.11
BLOCK1_IP=192.168.3.11
OBJECT1_IP=192.168.3.12

CONTROLLER_NAME=controller
COMPUTER1_NAME=compute
BLOCK1_NAME=compute
OBJECT1_NAME=object
MYSQLUSERNAME=root
MYSQLPASSWORD=root


cat <<EOF > /root/install/environment
#!/bin/bash

MGMTNETWORK=192.168.3.0/24
CONTROLLER_IP=192.168.3.10
COMPUTER1_IP=192.168.3.11
BLOCK1_IP=192.168.3.11
OBJECT1_IP=192.168.3.12

CONTROLLER_NAME=controller
COMPUTER1_NAME=compute
BLOCK1_NAME=compute
OBJECT1_NAME=object
MYSQLUSERNAME=root
MYSQLPASSWORD=root
EOF


cat /root/install/environment > /root/install/controller-install.sh
cat /root/install/controller.sh >> /root/install/controller-install.sh
cat /root/install/environment > /root/install/compute-install.sh
cat /root/install/compute.sh >> /root/install/compute-install.sh
chmod 777 /root/install/controller-install.sh
chmod 777 /root/install/compute-install.sh

##################################################################
##########                 控制节点安装                     ######
##################################################################

bash /root/install/controller-install.sh

echo -------------------------------------------------------------
echo                 controller  node install is OK!
echo -------------------------------------------------------------


#################################################################
##########                    计算节点安装                 ######
#################################################################

scp /root/install/compute-install.sh root@$COMPUTER1_IP:/root 
ssh root@$COMPUTER1_IP bash /root/compute-install.sh

echo ------------------------------------------------------------
echo                  compute node install is OK! 
echo ------------------------------------------------------------


################################################################
##########                控制节点发现计算节点            ######
################################################################

cd /root
. admin
openstack compute service list --service nova-compute
su -s /bin/sh -c "nova-manage cell_v2 discover_hosts --verbose" nova
systemctl -t servie | grep nova
nova hypervisor-list
openstack token issue
openstack project list
openstack endpoint list
glance image-list
openstack user list
openstack compute service list
openstack catalog list

 

二、控制节点预安装controller.sh脚本

##############################################################
##########         控制节点环境准备                     ######
##############################################################

#hostnamectl set-hostname $CONTROLLER_NAME

cat <<EOF >> /etc/hosts
$CONTROLLER_IP    $CONTROLLER_NAME
$COMPUTER1_IP    $COMPUTER1_NAME
$BLOCK1_IP    $BLOCK1_NAME
$OBJECT1_IP    $OBJECT1_NAME
EOF

## 创建admin环境变量
cat <<EOF > /root/admin
export OS_PROJECT_DOMAIN_NAME=Default
export OS_USER_DOMAIN_NAME=Default
export OS_PROJECT_NAME=admin
export OS_USERNAME=admin
export OS_PASSWORD=ADMIN_PASS
export OS_AUTH_URL=http://$CONTROLLER_IP:5000/v3
export OS_IDENTITY_API_VERSION=3
export OS_IMAGE_API_VERSION=2
EOF
chmod 777 /root/admin

## 创建demo环境变量
cat <<EOF > /root/demo
export OS_PROJECT_DOMAIN_NAME=Default
export OS_USER_DOMAIN_NAME=Default
export OS_PROJECT_NAME=myproject
export OS_USERNAME=myuser
export OS_PASSWORD=MYUSER_PASS
export OS_AUTH_URL=http://$CONTROLLER_IP:5000/v3
export OS_IDENTITY_API_VERSION=3
export OS_IMAGE_API_VERSION=2
EOF
chmod 777 /root/demo

## 时间同步
yum install -y http://dl.fedoraproject.org/pub/epel/7Server/x86_64/Packages/e/epel-release-7-11.noarch.rpm
yum install chrony -y
cp /etc/chrony.conf /etc/chrony.conf.bak
sed -i "/^server/d" /etc/chrony.conf
echo server $CONTROLLER_IP >> /etc/chrony.conf
echo allow $MGMTNETWORK >> /etc/chrony.conf
echo "local stratum 10" >> /etc/chrony.conf

systemctl restart chronyd
systemctl enable chronyd
systemctl status chronyd
chronyc sources -v
sleep 5

## 安装rabbitmq
yum install rabbitmq-server -y
systemctl enable rabbitmq-server.service
systemctl restart rabbitmq-server.service
systemctl status rabbitmq-server.service

rabbitmqctl add_user openstack RABBIT_PASS
rabbitmqctl set_permissions openstack ".*" ".*" ".*"
rabbitmq-plugins enable rabbitmq_management

## 安装memcache
yum install memcached python-memcached -y
cp /etc/sysconfig/memcached /etc/sysconfig/memcached.bak
sed -i "s/::1/$CONTROLLER_IP/g" /etc/sysconfig/memcached
systemctl enable memcached.service
systemctl restart memcached.service
systemctl status memcached.service

## 安装数据库
yum install mariadb mariadb-server python2-PyMySQL -y
cat <<EOF > /etc/my.cnf.d/openstack.cnf
[mysqld]
bind-address = $CONTROLLER_IP
default-storage-engine = innodb
innodb_file_per_table = on
max_connections = 4096
collation-server = utf8_general_ci
character-set-server = utf8
EOF

systemctl enable mariadb.service
systemctl restart mariadb.service
systemctl status mariadb.service

## 初始化数据库
yum install expect -y
cat <<EOF > /root/install/mysqlinstall.sh
#!/usr/bin/expect
spawn mysql_secure_installation
expect "Enter current password for root (enter for none):"
send "\r"
expect "Set root password? "
send "Y\r"
expect "New password: "
send "$MYSQLPASSWORD\r"
expect "Re-enter new password: "
send "$MYSQLPASSWORD\r"
expect "Remove anonymous users?"
send "Y\r"
expect "Disallow root login remotely?"
send "n\r"
expect "Remove test database and access to it?"
send "Y\r"
expect "Reload privilege tables now?"
send "Y\r"
interact
EOF

sleep 5
chmod 777 /root/install/mysqlinstall.sh
cd /root/install
./mysqlinstall.sh
sleep 5

## 创建数据库
mysql -u$MYSQLUSERNAME -p$MYSQLPASSWORD <<EOF
CREATE DATABASE keystone;
CREATE DATABASE glance;
CREATE DATABASE nova_api;
CREATE DATABASE nova;
CREATE DATABASE nova_cell0;
CREATE DATABASE placement;
CREATE DATABASE neutron;
CREATE DATABASE cinder;
GRANT ALL PRIVILEGES ON keystone.* to 'keystone'@'localhost'IDENTIFIED BY 'KEYSTONE_DBPASS';
GRANT ALL PRIVILEGES ON keystone.* to 'keystone'@'%'IDENTIFIED BY 'KEYSTONE_DBPASS';
GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'localhost' IDENTIFIED BY 'GLANCE_DBPASS';
GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'%' IDENTIFIED BY 'GLANCE_DBPASS';
GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'localhost' IDENTIFIED BY 'NOVA_DBPASS';
GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'%' IDENTIFIED BY 'NOVA_DBPASS';
GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'localhost' IDENTIFIED BY 'NOVA_DBPASS';
GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%' IDENTIFIED BY 'NOVA_DBPASS';
GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'localhost' IDENTIFIED BY 'NOVA_DBPASS';
GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'%' IDENTIFIED BY 'NOVA_DBPASS';
GRANT ALL PRIVILEGES ON placement.* TO 'placement'@'localhost' IDENTIFIED BY 'PLACEMENT_DBPASS';
GRANT ALL PRIVILEGES ON placement.* TO 'placement'@'%' IDENTIFIED BY 'PLACEMENT_DBPASS';
GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'localhost' IDENTIFIED BY 'NEUTRON_DBPASS';
GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'%' IDENTIFIED BY 'NEUTRON_DBPASS';
GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'localhost' IDENTIFIED BY 'CINDER_DBPASS';
GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'%' IDENTIFIED BY 'CINDER_DBPASS';
EOF

## 下载测试镜像cirros
#wget http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img

## 配置OpenStack yum源
yum install vsftpd -y
systemctl restart vsftpd
systemctl enable vsftpd
systemctl status vsftpd
mkdir /etc/yum.repos.d/save
mv /etc/yum.repos.d/C* /etc/yum.repos.d/save
mv /etc/yum.repos.d/epel* /etc/yum.repos.d/save
mv /root/openstack /var/ftp/pub
cat <<EOF > /etc/yum.repos.d/yum.repo
[rocky]
name=rocky-openstack
baseurl=ftp://$CONTROLLER_IP/pub/openstack
enable=true
gpgcheck=0
EOF
yum clean all
yum makecache

## 设置eth1网卡
cat <<EOF > /etc/sysconfig/network-scripts/ifcfg-eth1
TYPE=Ethernet
BOOTPROTO=none
DEVICE=eth0
ONBOOT=yes
EOF

sleep 5
echo ---------------------------------------------------------------
echo                 controller node environment is OK!                                                                         
echo ----------------------------------------------------------------
sleep 5

#####################################################################
##########              控制节点keystone安装                   ######
#####################################################################

yum install python-openstackclient -y
yum install openstack-selinux -y
yum install openstack-keystone httpd mod_wsgi -y
cp /etc/keystone/keystone.conf /etc/keystone/keystone.conf.bak
sed -i "/^\[database\]$/a\connection = mysql+pymysql://keystone:KEYSTONE_DBPASS@$CONTROLLER_IP/keystone" /etc/keystone/keystone.conf
sed -i "/^\[token\]$/a\provider = fernet" /etc/keystone/keystone.conf
su -s /bin/sh -c "keystone-manage db_sync" keystone
keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone
keystone-manage credential_setup --keystone-user keystone --keystone-group keystone
keystone-manage bootstrap --bootstrap-password ADMIN_PASS \
--bootstrap-admin-url http://$CONTROLLER_IP:5000/v3/ \
--bootstrap-internal-url http://$CONTROLLER_IP:5000/v3/ \
--bootstrap-public-url http://$CONTROLLER_IP:5000/v3/ \
--bootstrap-region-id RegionOne
echo -e ServerName $CONTROLLER_NAME >> /etc/httpd/conf/httpd.conf
ln -s /usr/share/keystone/wsgi-keystone.conf /etc/httpd/conf.d/
systemctl enable httpd.service
systemctl restart httpd.service
systemctl status httpd.service

export OS_USERNAME=admin
export OS_PASSWORD=ADMIN_PASS
export OS_PROJECT_NAME=admin
export OS_USER_DOMAIN_NAME=Default
export OS_PROJECT_DOMAIN_NAME=Default
export OS_AUTH_URL=http://$CONTROLLER_IP:5000/v3
export OS_IDENTITY_API_VERSION=3

openstack domain create --description "An Example Domain" example
openstack project create --domain default --description "Service Project" service
openstack project create --domain default --description "Demo Project" myproject

openstack user create --domain default --password MYUSER_PASS myuser
openstack role create myrole
openstack role add --project myproject --user myuser myrole
openstack --os-auth-url http://$CONTROLLER_IP:5000/v3 --os-project-domain-name Default --os-user-domain-name Default --os-project-name admin --os-username admin token issue
openstack --os-auth-url http://$CONTROLLER_IP:5000/v3 --os-project-domain-name Default --os-user-domain-name Default --os-project-name myproject --os-username myuser token issue

. /root/admin
openstack token issue

sleep 5
echo ----------------------------------------------------------------
echo                 controller node keystone is OK! 
echo ----------------------------------------------------------------
sleep 5


#####################################################################
##########                 控制节点glance安装                  ######
#####################################################################

. /root/admin
openstack user create --domain default --password GLANCE_PASS glance
openstack role add --project service --user glance admin
openstack service create --name glance --description "OpenStack Image" image
openstack endpoint create --region RegionOne image public http://$CONTROLLER_IP:9292
openstack endpoint create --region RegionOne image internal http://$CONTROLLER_IP:9292
openstack endpoint create --region RegionOne image admin http://$CONTROLLER_IP:9292
yum install openstack-glance -y
cp /etc/glance/glance-api.conf /etc/glance/glance-api.conf.bak
sed -i "/^\[database\]$/a\connection = mysql+pymysql://glance:GLANCE_DBPASS@$CONTROLLER_IP/glance" /etc/glance/glance-api.conf                                 
sed -i "/^\[keystone_authtoken\]$/a\www_authenticate_uri = http://$CONTROLLER_IP:5000\nauth_url = http://$CONTROLLER_IP:5000\nmemcached_servers = $CONTROLLER_IP:11211\nauth_type = password\nproject_domain_name = Default\nuser_domain_name = Default\nproject_name = service\nusername = glance\npassword = GLANCE_PASS" /etc/glance/glance-api.conf 
sed -i "/^\[paste_deploy\]$/a\flavor = keystone" /etc/glance/glance-api.conf
sed -i "/^\[glance_store\]$/a\stores = file,http\ndefault_store = file\nfilesystem_store_datadir = /var/lib/glance/images/" /etc/glance/glance-api.conf
cp /etc/glance/glance-registry.conf /etc/glance/glance-registry.conf.bak
sed -i "/^\[database\]$/a\connection = mysql+pymysql://glance:GLANCE_DBPASS@$CONTROLLER_IP/glance" /etc/glance/glance-registry.conf                                 
sed -i "/^\[keystone_authtoken\]$/a\www_authenticate_uri = http://$CONTROLLER_IP:5000\nauth_url = http://$CONTROLLER_IP:5000\nmemcached_servers = $CONTROLLER_IP:11211\nauth_type = password\nproject_domain_name = Default\nuser_domain_name = Default\nproject_name = service\nusername = glance\npassword = GLANCE_PASS" /etc/glance/glance-registry.conf 
sed -i "/^\[paste_deploy\]$/a\flavor = keystone" /etc/glance/glance-registry.conf
su -s /bin/sh -c "glance-manage db_sync" glance
systemctl enable openstack-glance-api.service openstack-glance-registry.service
systemctl restart openstack-glance-api.service openstack-glance-registry.service
systemctl status openstack-glance-api.service openstack-glance-registry.service

sleep 5
openstack image create "cirros" --file /root/cirros-0.3.5-x86_64-disk.img --disk-format qcow2 --container-format bare --public
openstack image list

sleep 5
echo -------------------------------------------------------------
echo              controller node glance is OK!
echo -------------------------------------------------------------
sleep 5

##################################################################
##########                 控制节点nova安装                 ######
##################################################################

./root/admin
openstack user create --domain default --password NOVA_PASS nova
openstack role add --project service --user nova admin
openstack service create --name nova --description "OpenStack Compute" compute
openstack endpoint create --region RegionOne compute public http://$CONTROLLER_IP:8774/v2.1
openstack endpoint create --region RegionOne compute internal http://$CONTROLLER_IP:8774/v2.1
openstack endpoint create --region RegionOne compute admin http://$CONTROLLER_IP:8774/v2.1
openstack user create --domain default --password PLACEMENT_PASS placement
openstack role add --project service --user placement admin
openstack service create --name placement --description "Placement API" placement
openstack endpoint create --region RegionOne placement public http://$CONTROLLER_IP:8778
openstack endpoint create --region RegionOne placement internal http://$CONTROLLER_IP:8778
openstack endpoint create --region RegionOne placement admin http://$CONTROLLER_IP:8778
yum install openstack-nova-api openstack-nova-conductor openstack-nova-console openstack-nova-novncproxy openstack-nova-scheduler openstack-nova-placement-api -y
cp /etc/nova/nova.conf /etc/nova/nova.conf.bak
sed -i "/^\[DEFAULT\]$/a\enabled_apis = osapi_compute,metadata\ntransport_url = rabbit://openstack:RABBIT_PASS@$CONTROLLER_IP\nmy_ip = $CONTROLLER_IP\nuse_neutron = true\nfirewall_driver = nova.virt.firewall.NoopFirewallDriver" /etc/nova/nova.conf
sed -i "/^\[api_database\]$/a\connection = mysql+pymysql://nova:NOVA_DBPASS@$CONTROLLER_IP/nova_api" /etc/nova/nova.conf
sed -i "/^\[database\]$/a\connection = mysql+pymysql://nova:NOVA_DBPASS@$CONTROLLER_IP/nova" /etc/nova/nova.conf
sed -i "/^\[placement_database\]$/a\connection = mysql+pymysql://placement:PLACEMENT_DBPASS@$CONTROLLER_IP/placement" /etc/nova/nova.conf
sed -i "/^\[api\]$/a\auth_strategy = keystone" /etc/nova/nova.conf
sed -i "/^\[keystone_authtoken\]$/a\auth_url = http://$CONTROLLER_IP:5000/v3\nmemcached_servers = $CONTROLLER_IP:11211\nauth_type = password\nproject_domain_name = default\nuser_domain_name = default\nproject_name = service\nusername = nova\npassword = NOVA_PASS" /etc/nova/nova.conf
sed -i "/^\[vnc\]$/a\enabled = true\nserver_listen = $CONTROLLER_IP\nserver_proxyclient_address = $CONTROLLER_IP" /etc/nova/nova.conf
sed -i "/^\[glance\]$/a\api_servers = http://$CONTROLLER_IP:9292" /etc/nova/nova.conf
sed -i "/^\[oslo_concurrency\]$/a\lock_path = /var/lib/nova/tmp" /etc/nova/nova.conf
sed -i "/^\[placement\]$/a\region_name = RegionOne\nproject_domain_name = Default\nproject_name = service\nauth_type = password\nuser_domain_name = Default\nauth_url = http://$CONTROLLER_IP:5000/v3\nusername = placement\npassword = PLACEMENT_PASS" /etc/nova/nova.conf
echo -e "\n\n<Directory /usr/bin>\n<IfVersion >= 2.4>\nRequire all granted\n</IfVersion>\n<IfVersion < 2.4>\nOrder allow,deny\nAllow from all\n</IfVersion>\n</Directory>" >> /etc/httpd/conf.d/00-nova-placement-api.conf
systemctl restart httpd
systemctl status httpd
su -s /bin/sh -c "nova-manage api_db sync" nova
su -s /bin/sh -c "nova-manage cell_v2 map_cell0" nova
su -s /bin/sh -c "nova-manage cell_v2 create_cell --name=cell1 --verbose" nova
su -s /bin/sh -c "nova-manage db sync" nova
su -s /bin/sh -c "nova-manage cell_v2 list_cells" nova
systemctl enable openstack-nova-api.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service openstack-nova-consoleauth.service
systemctl restart openstack-nova-api.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service openstack-nova-consoleauth.service
systemctl status openstack-nova-api.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service openstack-nova-consoleauth.service

sleep 5
echo ---------------------------------------------------------
echo                 controller node nova is OK!
echo ---------------------------------------------------------
sleep 5


##############################################################
##########               控制节点neutron安装            ######
##############################################################

. /root/admin
openstack user create --domain default --password NEUTRON_PASS neutron
openstack role add --project service --user neutron admin
openstack service create --name neutron --description "OpenStack Networking" network
openstack endpoint create --region RegionOne network public http://$CONTROLLER_IP:9696
openstack endpoint create --region RegionOne network internal http://$CONTROLLER_IP:9696
openstack endpoint create --region RegionOne network admin http://$CONTROLLER_IP:9696
yum install openstack-neutron openstack-neutron-ml2 openstack-neutron-linuxbridge ebtables -y

cp /etc/neutron/neutron.conf /etc/neutron/neutron.conf.bak
sed -i "{
/^#/d
/^$/d
/^\[database\]$/a\connection = mysql+pymysql://neutron:NEUTRON_DBPASS@$CONTROLLER_IP/neutron
/^\[DEFAULT\]$/a\core_plugin = ml2\nservice_plugins = router\nallow_overlapping_ips = true\ntransport_url = rabbit://openstack:RABBIT_PASS@$CONTROLLER_IP\nauth_strategy = keystone\nnotify_nova_on_port_status_changes = true\nnotify_nova_on_port_data_changes = true
/^\[keystone_authtoken\]$/a\www_authenticate_uri = http://$CONTROLLER_IP:5000\nauth_url = http://$CONTROLLER_IP:5000\nmemcached_servers = $CONTROLLER_IP:11211\nauth_type = password\nproject_domain_name = default\nuser_domain_name = default\nproject_name = service\nusername = neutron\npassword = NEUTRON_PASS
/^\[oslo_concurrency\]$/a\lock_path = /var/lib/neutron/tmp
/^\[nova\]$/a\auth_url = http://$CONTROLLER_IP:5000\nauth_type = password\nproject_domain_name = default\nuser_domain_name = default\nregion_name = RegionOne\nproject_name = service\nusername = nova\npassword = NOVA_PASS
}" /etc/neutron/neutron.conf

cp /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugins/ml2/ml2_conf.ini.bak
sed -i "{
/^#/d
/^$/d
/^\[ml2\]$/a\type_drivers = flat,vlan,vxlan\ntenant_network_types = vxlan\nmechanism_drivers = linuxbridge,l2population\nextension_drivers = port_security
/^\[ml2_type_flat\]$/a\flat_networks = provider
/^\[ml2_type_vxlan\]$/a\vni_ranges = 1:1000
/^\[securitygroup\]$/a\enable_ipset = true
}" /etc/neutron/plugins/ml2/ml2_conf.ini

cp /etc/neutron/plugins/ml2/linuxbridge_agent.ini /etc/neutron/plugins/ml2/linuxbridge_agent.ini.bak
sed -i "{
/^#/d
/^$/d
/^\[linux_bridge\]$/a\physical_interface_mappings = provider:eth1
/^\[vxlan\]$/a\enable_vxlan = true\nlocal_ip = $CONTROLLER_IP\nl2_population = true
/^\[securitygroup\]$/a\enable_security_group = true\nfirewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
}" /etc/neutron/plugins/ml2/linuxbridge_agent.ini

cp /etc/neutron/l3_agent.ini /etc/neutron/l3_agent.ini.bak
sed -i "{
/^#/d
/^$/d
/^\[DEFAULT\]$/a\interface_driver = linuxbridge
}" /etc/neutron/l3_agent.ini

cp /etc/neutron/dhcp_agent.ini /etc/neutron/dhcp_agent.ini.bak
sed -i "{
/^#/d
/^$/d
/^\[DEFAULT\]$/a\interface_driver = linuxbridge\ndhcp_driver = neutron.agent.linux.dhcp.Dnsmasq\nenable_isolated_metadata = true
}" /etc/neutron/dhcp_agent.ini

cp /etc/neutron/metadata_agent.ini /etc/neutron/metadata_agent.ini.bak
sed -i "{
/^#/d;
/^$/d;
/^\[DEFAULT\]$/a\nova_metadata_host = $CONTROLLER_IP\nmetadata_proxy_shared_secret = METADATA_SECRET
}" /etc/neutron/metadata_agent.ini

sed -i "{
/^#/d;
/^$/d;
/^\[neutron\]$/a\url = http://$CONTROLLER_IP:9696\nauth_url = http://$CONTROLLER_IP:5000\nauth_type = password\nproject_domain_name = default\nuser_domain_name = default\nregion_name = RegionOne\nproject_name = service\nusername = neutron\npassword = NEUTRON_PASS\nservice_metadata_proxy = true\nmetadata_proxy_shared_secret = METADATA_SECRET
}" /etc/nova/nova.conf

ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini
su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron

systemctl restart openstack-nova-api.service
systemctl enable neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service
systemctl restart neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service
systemctl status neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service

## 对于网络选项2,还启用并启动第3层服务:
systemctl enable neutron-l3-agent.service
systemctl restart neutron-l3-agent.service
systemctl status neutron-l3-agent.service

sleep 5
echo ----------------------------------------------------------
echo               controller node neutron is OK!   
echo ----------------------------------------------------------
sleep 5

###############################################################
##########                控制节点cinder安装             ######
###############################################################

. /root/admin
openstack user create --domain default --password CINDER_PASS cinder
openstack role add --project service --user cinder admin
openstack service create --name cinderv2 --description "OpenStack Block Storage" volumev2
openstack service create --name cinderv3 --description "OpenStack Block Storage" volumev3
openstack endpoint create --region RegionOne volumev2 public http://$CONTROLLER_IP:8776/v2/%\(project_id\)s
openstack endpoint create --region RegionOne volumev2 internal http://$CONTROLLER_IP:8776/v2/%\(project_id\)s
openstack endpoint create --region RegionOne volumev2 admin http://$CONTROLLER_IP:8776/v2/%\(project_id\)s
openstack endpoint create --region RegionOne volumev3 public http://$CONTROLLER_IP:8776/v3/%\(project_id\)s
openstack endpoint create --region RegionOne volumev3 internal http://$CONTROLLER_IP:8776/v3/%\(project_id\)s
openstack endpoint create --region RegionOne volumev3 admin http://$CONTROLLER_IP:8776/v3/%\(project_id\)s

yum install openstack-cinder -y
mv /etc/cinder/cinder.conf /etc/cinder/cinder.conf.bak
cat /etc/cinder/cinder.conf.bak | egrep -v "^#|^$" > /etc/cinder/cinder.conf
sed -i "/^\[DEFAULT\]$/a\transport_url = rabbit://openstack:RABBIT_PASS@$CONTROLLER_IP\nauth_strategy = keystone\nmy_ip = $CONTROLLER_IP" /etc/cinder/cinder.conf
sed -i "/^\[database\]$/a\connection = mysql+pymysql://cinder:CINDER_DBPASS@$CONTROLLER_IP/cinder" /etc/cinder/cinder.conf
sed -i "/^\[keystone_authtoken\]$/a\auth_uri = http://$CONTROLLER_IP:5000\nauth_url = http://$CONTROLLER_IP:5000\nmemcached_servers = $CONTROLLER_IP:11211\nauth_type = password\nproject_domain_id = default\nuser_domain_id = default\nproject_name = service\nusername = cinder\npassword = CINDER_PASS" /etc/cinder/cinder.conf
sed -i "/^\[oslo_concurrency\]$/a\lock_path = /var/lib/cinder/tmp" /etc/cinder/cinder.conf

su -s /bin/sh -c "cinder-manage db sync" cinder
sed -i "/^\[cinder\]$/a\os_region_name = RegionOne"  /etc/nova/nova.conf

systemctl restart openstack-nova-api.service
systemctl enable openstack-cinder-api.service openstack-cinder-scheduler.service
systemctl restart openstack-cinder-api.service openstack-cinder-scheduler.service

sleep 5
echo ----------------------------------------------------------
echo                 controller node cinder is OK!
echo ----------------------------------------------------------
sleep 5

###############################################################
##########                 控制节点horizon安装           ######
###############################################################

yum install openstack-dashboard -y
mv /etc/openstack-dashboard/local_settings /etc/openstack-dashboard/local_settings.bak
cp /root/local_settings /etc/openstack-dashboard/local_settings
echo "WSGIApplicationGroup %{GLOBAL}" >> /etc/httpd/conf.d/openstack-dashboard.conf
systemctl restart httpd.service

echo -------------------------------------------------------------
echo                  controller node horizon is OK!   
echo -------------------------------------------------------------

 

三、计算节点部署compute.sh脚本

#############################################################
##########             计算节点环境准备                ######
#############################################################

#hostnamectl set-hostname $COMPUTER1_NAME

cat <<EOF >> /etc/hosts
$CONTROLLER_IP    $CONTROLLER_NAME
$COMPUTER1_IP    $COMPUTER1_NAME
$BLOCK1_IP    $BLOCK1_NAME
$OBJECT1_IP    $OBJECT1_NAME
EOF

## 时间同步
yum install -y http://dl.fedoraproject.org/pub/epel/7Server/x86_64/Packages/e/epel-release-7-11.noarch.rpm
yum makecache
yum install chrony -y
cp /etc/chrony.conf /etc/chrony.conf.bak
sed -i "/^server/d" /etc/chrony.conf
echo server $CONTROLLER_IP >> /etc/chrony.conf
systemctl restart chronyd
systemctl enable chronyd
systemctl status chronyd
chronyc sources -v

## 设置eth1网卡
cat <<EOF > /etc/sysconfig/network-scripts/ifcfg-eth1
TYPE=Ethernet
BOOTPROTO=none
DEVICE=eth0
ONBOOT=yes
EOF


## 配置OpenStack yum源
yum clean all
mkdir /etc/yum.repos.d/save
mv /etc/yum.repos.d/C* /etc/yum.repos.d/save
mv /etc/yum.repos.d/epel* /etc/yum.repos.d/save
cat <<EOF > /etc/yum.repos.d/yum.repo
[rocky]
name=rocky-openstack
baseurl=ftp://$CONTROLLER_IP/pub/openstack
enable=true
gpgcheck=0
EOF
yum clean all
yum makecache

sleep 5
echo --------------------------------------------------------
echo          compute node environment is OK!
echo --------------------------------------------------------
sleep 5

#############################################################
##########            计算节点Nova安装                 ######
#############################################################

yum install openstack-nova-compute -y
cp /etc/nova/nova.conf /etc/nova/nova.conf.bak
sed -i "/^\[DEFAULT\]$/a\enabled_apis = osapi_compute,metadata\ntransport_url = rabbit://openstack:RABBIT_PASS@$CONTROLLER_IP\nmy_ip = $COMPUTER1_IP\nuse_neutron = true\nfirewall_driver = nova.virt.firewall.NoopFirewallDriver" /etc/nova/nova.conf
sed -i "/^\[api\]$/a\auth_strategy = keystone" /etc/nova/nova.conf
sed -i "/^\[keystone_authtoken\]$/a\auth_url = http://$CONTROLLER_IP:5000/v3\nmemcached_servers = $CONTROLLER_IP:11211\nauth_type = password\nproject_domain_name = default\nuser_domain_name = default\nproject_name = service\nusername = nova\npassword = NOVA_PASS" /etc/nova/nova.conf 
sed -i "/^\[vnc\]$/a\enabled = true\nserver_listen = 0.0.0.0\nserver_proxyclient_address = $COMPUTER1_IP\nnovncproxy_base_url = http://$CONTROLLER_IP:6080/vnc_auto.html" /etc/nova/nova.conf
sed -i "/^\[glance\]$/a\api_servers = http://$CONTROLLER_IP:9292" /etc/nova/nova.conf
sed -i "/^\[libvirt\]$/a\virt_type = qemu" /etc/nova/nova.conf
sed -i "/^\[oslo_concurrency\]$/a\lock_path = /var/lib/nova/tmp" /etc/nova/nova.conf
sed -i "/^\[placement\]$/a\region_name = RegionOne\nproject_domain_name = Default\nproject_name = service\nauth_type = password\nuser_domain_name = Default\nauth_url = http://$CONTROLLER_IP:5000/v3\nusername = placement\npassword = PLACEMENT_PASS" /etc/nova/nova.conf
systemctl enable libvirtd.service openstack-nova-compute.service
systemctl restart libvirtd.service openstack-nova-compute.service
systemctl status libvirtd.service openstack-nova-compute.service

sleep 5
echo -------------------------------------------------------
echo                  compute node nova is OK!
echo -------------------------------------------------------
sleep 5


############################################################
##########          计算节点neutron安装               ######
############################################################

yum install openstack-neutron-linuxbridge ebtables ipset -y

cp /etc/neutron/neutron.conf /etc/neutron/neutron.conf.bak
sed -i "{
/^#/d
/^$/d
/^\[DEFAULT\]$/a\transport_url = rabbit://openstack:RABBIT_PASS@$CONTROLLER_IP\nauth_strategy = keystone
/^\[keystone_authtoken\]$/a\www_authenticate_uri = http://$CONTROLLER_IP:5000\nauth_url = http://$CONTROLLER_IP:5000\nmemcached_servers = $CONTROLLER_IP:11211\nauth_type = password\nproject_domain_name = default\nuser_domain_name = default\nproject_name = service\nusername = neutron\npassword = NEUTRON_PASS
/^\[oslo_concurrency\]$/a\lock_path = /var/lib/neutron/tmp
}" /etc/neutron/neutron.conf

cp /etc/neutron/plugins/ml2/linuxbridge_agent.ini /etc/neutron/plugins/ml2/linuxbridge_agent.ini.bak
sed -i "{
/^#/d
/^$/d
/^\[linux_bridge\]$/a\physical_interface_mappings = provider:eth1
/^\[vxlan\]$/a\enable_vxlan = true\nlocal_ip = $COMPUTER1_IP\nl2_population = true
/^\[securitygroup\]$/a\enable_security_group = true\nfirewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
}" /etc/neutron/plugins/ml2/linuxbridge_agent.ini

sed -i "{
/^#/d
/^$/d
/^\[neutron\]$/a\url = http://$CONTROLLER_IP:9696\nauth_url = http://$CONTROLLER_IP:5000\nauth_type = password\nproject_domain_name = default\nuser_domain_name = default\nregion_name = RegionOne\nproject_name = service\nusername = neutron\npassword = NEUTRON_PASS
}" /etc/nova/nova.conf

systemctl restart openstack-nova-compute.service
systemctl enable neutron-linuxbridge-agent.service
systemctl restart neutron-linuxbridge-agent.service
systemctl status neutron-linuxbridge-agent.service

sleep 5
echo --------------------------------------------------------
echo                  compute node neutron is OK!
echo --------------------------------------------------------
sleep 5

#############################################################
##########               存储节点cinder安装            ######
#############################################################

yum install lvm2 device-mapper-persistent-data -y
systemctl enable lvm2-lvmetad.service
systemctl start lvm2-lvmetad.service

pvcreate /dev/sdb
vgcreate cinder-volumes /dev/sdb
mv /etc/lvm/lvm.conf /etc/lvm/lvm.conf.bak
cat /etc/lvm/lvm.conf.bak | egrep -v "^#|^$" > /etc/lvm/lvm.conf
sed -i '/^\devices {$/a\filter = [ "a/sdb/", "r/.*/"]' /etc/lvm/lvm.conf


yum install openstack-cinder targetcli python-keystone -y
mv /etc/cinder/cinder.conf /etc/cinder/cinder.conf.bak
cat /etc/cinder/cinder.conf.bak | egrep -v "^#|^$" > /etc/cinder/cinder.conf
sed -i "/^\[database\]$/a\connection = mysql+pymysql://cinder:CINDER_DBPASS@$CONTROLLER_IP/cinder" /etc/cinder/cinder.conf
sed -i "/^\[DEFAULT\]$/a\transport_url = rabbit://openstack:RABBIT_PASS@$CONTROLLER_IP\nauth_strategy = keystone\nmy_ip = $BLOCK1_IP\nenabled_backends = lvm\nglance_api_servers = http://$CONTROLLER_IP:9292" /etc/cinder/cinder.conf
sed -i "/^\[keystone_authtoken\]$/a\www_authenticate_uri = http://$CONTROLLER_IP:5000\nauth_url = http://$CONTROLLER_IP:5000\nmemcached_servers = $CONTROLLER_IP:11211\nauth_type = password\nproject_domain_id = default\nuser_domain_id = default\nproject_name = service\nusername = cinder\npassword = CINDER_PASS" /etc/cinder/cinder.conf
sed -i "/^\[oslo_concurrency\]$/a\lock_path = /var/lib/cinder/tmp" /etc/cinder/cinder.conf
cat <<EOF >> /etc/cinder/cinder.conf
[lvm]
volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver
volume_group = cinder-volumes
iscsi_protocol = iscsi
iscsi_helper = lioadm
EOF

systemctl enable openstack-cinder-volume.service target.service
systemctl start openstack-cinder-volume.service target.service

sleep 5
echo ----------------------------------------------------------
echo                  compute node cinder is OK! 
echo -----------------------------------------------------------
sleep 5

 

posted @ 2019-04-01 18:49  陈LI  阅读(805)  评论(0编辑  收藏  举报