Openstack搭建(流水账)

Openstack管理三大资源:
1、网络资源
2、计算资源
3、存储资源


Keystone 做服务注册 Glance 提供镜像服务 Nova 提供计算服务 Nova scheduler
决策虚拟主机创建在哪个主机(计算节点)上 Neutron 控制网络服务

 

##安装过程(环境redhat7.5)
#base

##yum install -y
http://fedoraproject.org/pub/epel/7/x86_64/e/epel-release-7-5.noarch.rpm
yum -y install centos-release-openstack-liberty python-openstackclient

#nova linux-node2
yum -y install openstack-nova-compute sysfsutils

#Neutron linux-node2
yum -y install openstack-neutron openstack-neutron-linuxbridge ebtables ipset

 


[root@linux-node1 ~]# vim /etc/chrony.conf
# Allow NTP client access from local network.
allow 192.168.0.0/16
[root@linux-node1 ~]# systemctl enable chronyd.service

[root@linux-node1 ~]# timedatectl set-timezone Asia/Shanghai

#MySQL
[root@linux-node1 ~]# yum -y install mariadb mariadb-server MySQL-python

#RabbitMQ
[root@linux-node1 ~]# yum -y install rabbitmq-server

#Keystone
yum -y install openstack-keystone httpd mod_wsgi memcached python-memcached

#Glance
[root@linux-node1 ~]# yum -y install openstack-glance python-glance python-glanceclient

#Nova
[root@linux-node1 ~]# yum -y install openstack-nova-api openstack-nova-cert openstack-nova-conductor openstack-nova-console openstack-nova-novncproxy openstack-nova-scheduler python-novaclient

#Neutron
[root@linux-node1 ~]# yum -y install openstack-neutron openstack-neutron-ml2 openstack-neutron-linuxbridge python-neutronclient ebtables ipset

#Dashboard
yum -y install openstack-dashboard

-----------------------------------------
如果装不上openstack-keystone
执行yum install https://buildlogs.centos.org/centos/7/cloud/x86_64/openstack-liberty/centos-release-openstack-liberty-1-3.el7.noarch.rpm
-----------------------------------------

[root@linux-node1 ~]# \cp /usr/share/mysql/my-medium.cnf /etc/my.cnf

#修改/etc/my.cnf
[mysqld]
default-storage-engine = innodb
innodb_file_per_table #使用独享的空间
collation-server = utf8_general_ci #校对规则
init-connect = 'SET NAMES utf8'
character-set-server = utf8 #默认字符集

[root@linux-node1 ~]# systemctl enable mariadb.service
[root@linux-node1 ~]# systemctl start mariadb.service

#mysql初始化
[root@linux-node1 ~]# mysql_secure_installation


创建数据库

#keystone
mysql -u root -p -e "CREATE DATABASE keystone;"
mysql -u root -p -e "GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'localhost' IDENTIFIED BY 'keystone';"
mysql -u root -p -e "GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'%' IDENTIFIED BY 'keystone';"

#glance
mysql -u root -p -e "CREATE DATABASE glance;"
mysql -u root -p -e "GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'localhost' IDENTIFIED BY 'glance';"
mysql -u root -p -e "GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'%' IDENTIFIED BY 'glance';"

#nova
mysql -u root -p -e "CREATE DATABASE nova;"
mysql -u root -p -e "GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'loaclhost' IDENTIFIED BY 'nova';"
mysql -u root -p -e "GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%' IDENTIFIED BY 'nova';"

#neutron
mysql -u root -p -e "CREATE DATABASE neutron;"
mysql -u root -p -e "GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'localhost' IDENTIFIED BY 'neutron';"
mysql -u root -p -e "GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'%' IDENTIFIED BY 'neutron';"

#cinder
mysql -u root -p -e "CREATE DATABASE cinder;"
mysql -u root -p -e "GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'localhost' IDENTIFIED BY 'cinder';"
mysql -u root -p -e "GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'%' IDENTIFIED BY 'cinder';"

#启动rabbitmq消息队列 监听端口5672
[root@linux-node1 ~]# systemctl enable rabbitmq-server.service
[root@linux-node1 ~]# systemctl start rabbitmq-server.service

#创建用户密码
[root@linux-node1 ~]# rabbitmqctl add_user openstack openstack
#授权
[root@linux-node1 ~]# rabbitmqctl set_permissions openstack ".*" ".*" ".*"
#查看支持插件
[root@linux-node1 ~]# rabbitmq-plugins list
#启用管理插件
[root@linux-node1 ~]# rabbitmq-plugins enable rabbitmq_management
#重启rabbitmq
[root@linux-node1 ~]# systemctl restart rabbitmq-server.service

访问192.168.56.11:15672 默认账号密码guest

#keystone 服务注册
用户与认证:用户权限与用户行为跟踪
服务目录:提供一个服务目录,包括所有服务项与相关Api的端点

User:用户
Tenant:租户,项目
Token:令牌
Role:角色
Service:服务
Endpoint:端点

-----------------------------------------
vim /etc/keystone/keystone.conf

[DEFAULT]
12行 admin_token = 8d869454a5089ee5e56a
[database]
495行 connection = mysql://keystone:keystone@192.168.56.11/keystone
[memcache]
1313 servers = 192.168.56.11:11211
[token]
1911 provider = uuid
1916 driver = memcache
[revoke]
1718 driver = sql
107 verbose = true ##可选 debug输出
-----------------------------------------

[root@linux-node1 keystone]# grep '^[a-z]' /etc/keystone/keystone.conf
admin_token = 8d869454a5089ee5e56a
connection = mysql://keystone:keystone@192.168.56.11/keystone
servers = 192.168.56.11:11211
driver = sql
provider = uuid
driver = memcache


[root@linux-node1 ~]#systemctl enable memcached.service
[root@linux-node1 ~]#systemctl start memcached.service
#同步数据库
[root@linux-node1 ~]# su -s /bin/sh -c "keystone-manage db_sync" keystone

[root@linux-node1 keystone]# vim /etc/httpd/conf.d/wsgi-keystone.conf

=================================================================
Listen 5000
Listen 35357

<VirtualHost *:5000>
WSGIDaemonProcess keystone-public processes=5 threads=1 user=keystone group=keystone display-name=%{Group}
WSGIProcessGroup keystone-public
WSGIScriptAlias / /usr/bin/keystone-wsgi-public
WSGIApplicationGroup %{GLOBAL}
WSGIPassAuthorization On
<IfVersion >= 2.4>
ErrorLogFormat "%{cu}t %M"
</IfVersion>
ErrorLog /var/log/httpd/keystone-error.log
CustomLog /var/log/httpd/keystone-access.log combined

<Directory /usr/bin>
<IfVersion >= 2.4>
Require all granted
</IfVersion>
<IfVersion < 2.4>
Order allow,deny
Allow from all
</IfVersion>
</Directory>
</VirtualHost>

<VirtualHost *:35357>
WSGIDaemonProcess keystone-admin processes=5 threads=1 user=keystone group=keystone display-name=%{GROUP}
WSGIProcessGroup keystone-admin
WSGIScriptAlias / /usr/bin/keystone-wsgi-admin
WSGIApplicationGroup %{GLOBAL}
WSGIPassAuthorization On
<IfVersion >= 2.4>
ErrorLogFormat "%{cu}t %M"
</IfVersion>
ErrorLog /var/log/httpd/keystone-error.log
Customlog /var/log/httpd/keystone-access.log combined

<Directory /usr/bin>
<IfVersion >= 2.4>
Require all granted
</IfVersion>
<IfVersion < 2.4>
Order allow,deny
Allow from all
</IfVersion>
</Directory>
</VirtualHost>

=====================================================================


[root@linux-node1 conf.d]# vim /etc/httpd/conf/httpd.conf
ServerName 192.168.56.11:80

[root@linux-node1 conf.d]# systemctl enable httpd
[root@linux-node1 conf.d]# systemctl start httpd
[root@linux-node1 ~]# export OS_TOKEN=8d869454a5089ee5e56a
[root@linux-node1 ~]# export OS_URL=http://192.168.56.11:35357/v3
[root@linux-node1 ~]# export OS_IDENTITY_API_VERSION=3

yum -y install python-openstackclient
[root@linux-node1 ~]# openstack project create --domain default --description "Admin Project" admin

[root@linux-node1 ~]# openstack user create --domain default --password-prompt admin

#创建admin角色
[root@linux-node1 ~]# openstack role create admin

#把admin用户加到admin项目赋予admin权限
[root@linux-node1 ~]# openstack role add --project admin --user admin admin

[root@linux-node1 ~]# openstack project create --domain default --description "Demo Project" demo

[root@linux-node1 ~]# openstack user create --domain default --password=demo demo

[root@linux-node1 ~]# openstack role create user

[root@linux-node1 ~]# openstack role add --project demo --user demo user

[root@linux-node1 ~]# openstack project create --domain default --description "Service Project" service

[root@linux-node1 ~]# openstack service create --name keystone --description "OpenStack Identity" identity


openstack endpoint create --region RegionOne identity public http://192.168.56.11:5000/v2.0

openstack endpoint create --region RegionOne identity internal http://192.168.56.11:5000/v2.0

openstack endpoint create --region RegionOne identity admin http://192.168.56.11:35357/v2.0

[root@linux-node1 ~]# openstack endpoint list

[root@linux-node1 ~]# unset OS_TOKEN
[root@linux-node1 ~]# unset OS_URL

[root@linux-node1 ~]# openstack --os-auth-url http://192.168.56.11:35357/v3 --os-project-domain-id default --os-user-domain-id default --os-project-name admin --os-username admin --os-auth-type password token issue

#配置keystone环境变量,方便执行命令

cat >> admin-openrc.sh << EOF
export OS_PROJECT_DOMAIN_ID=default
export OS_USER_DOMAIN_ID=default
export OS_PROJECT_NAME=admin
export OS_TENANT_NAME=admin
export OS_USERNAME=admin
export OS_PASSWORD=admin
export OS_AUTH_URL=http://192.168.56.11:35357/v3
export OS_IDENTITY_API_VERSION=3
EOF


cat >> demo-openrc.sh << EOF
export OS_PROJECT_DOMAIN_ID=default
export OS_USER_DOMAIN_ID=default
export OS_PROJECT_NAME=demo
export OS_TENANT_NAME=demo
export OS_USERNAME=demo
export OS_PASSWORD=demo
export OS_AUTH_URL=http://192.168.56.11:5000/v3
export OS_IDENTITY_API_VERSION=3
EOF


[root@linux-node1 ~]# chmod +x admin-openrc.sh demo-openrc.sh

[root@linux-node1 ~]# . admin-openrc.sh

[root@linux-node1 ~]# openstack token issue

keystone 搭建完成

 

##Glance

分成三个部分: glance-api glance-registry 以及 image store

glance-api接受云系统镜像的创建、删除、读取请求

Glance-Registry :云系统的镜像注册服务

[root@linux-node1 ~]# vim /etc/glance/glance-api.conf

538 connection=mysql://glance:glance@192.168.56.11/glance

[root@linux-node1 ~]# vim /etc/glance/glance-registry.conf

363 connection=mysql://glance:glance@192.168.56.11/glance

[keystone_authtoken]
auth_uri = http://192.168.56.11:5000
auth_url = http://192.168.56.11:35357
auth_plugin = password
project_domain_id = default
user_domain_id = default
project_name = service
username = glance
password = glance

flavor=keystone

 

[root@linux-node1 ~]# su -s /bin/sh -c "glance-manage db_sync" glance
No handlers could be found for logger "oslo_config.cfg"

[root@linux-node1 ~]# mysql -h 192.168.56.11 -u glance -pglance
use glance
show tables #查看有没有表

[root@linux-node1 ~]# openstack user create --domain default --password=glance glance
[root@linux-node1 ~]# openstack role add --project service --user glance admin


[root@linux-node1 ~]# vim /etc/glance/glance-api.conf


verbose=True
notification_driver = noop
connection=mysql://glance:glance@192.168.56.11/glance
default_store=file
filesystem_store_datadir=/var/lib/glance/images/

[keystone_authtoken]
auth_uri = http://192.168.56.11:5000
auth_url = http://192.168.56.11:35357
auth_plugin = password
project_domain_id = default
user_domain_id = default
project_name = service
username = glance
password = glance

flavor=keystone

 

systemctl enable openstack-glance-api
systemctl enable openstack-glance-registry
systemctl start openstack-glance-api
systemctl start openstack-glance-registry


#registry 监听9191 api监听9292端口

[root@linux-node1 ~]# openstack service create --name glance --description "OpenStack Image service" image


openstack endpoint create --region RegionOne image public http://192.168.56.11:9292
openstack endpoint create --region RegionOne image internal http://192.168.56.11:9292
openstack endpoint create --region RegionOne image admin http://192.168.56.11:9292

[root@linux-node1 ~]# echo "export OS_IMAGE_API_VERSION=2" | tee -a admin-openrc.sh demo-openrc.sh

[root@linux-node1 ~]# glance image-list #测试是否成功
+----+------+
| ID | Name |
+----+------+
+----+------+

[root@linux-node1 ~]# wget
http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-x86_64-disk.img 下载镜像

glance image-create --name "cirros" --file cirros-0.3.4-x86_64-disk.img \
--disk-format qcow2 --container-format bare --visibility public --progress


nova配置
[root@linux-node1 ~]# vim /etc/nova/nova.conf

connection=mysql://nova:nova@192.168.56.11/nova

[root@linux-node1 ~]# su -s /bin/sh -c "nova-manage db sync" nova

[root@linux-node1 ~]# openstack user create --domain default --password=nova nova

[root@linux-node1 ~]# openstack role add --project service --user nova admin

================================================================================
[root@linux-node1 ~]# vim /etc/nova/nova.conf

198:my_ip=192.168.56.11
344:enabled_apis=osapi_compute,metadata
506:auth_strategy=keystone #[DEFAULT]下
838:network_api_class=nova.network.neutronv2.api.API
930:linuxnet_interface_driver=nova.network.linux_net.NeutronLinuxBridgeInterfaceDriver
1064:security_group_api=neutron
1241:firewall_driver = nova.virt.firewall.NoopFirewallDriver
1423:rpc_backend=rabbit
1743:connection=mysql://nova:nova@192.168.56.11/nova
1944:host=$my_ip
2122:auth_uri = http://192.168.56.11:5000
2123:auth_url = http://192.168.56.11:35357
2124:auth_plugin = password
2125:project_domain_id = default
2126:user_domain_id = default
2127:project_name = service
2128:username = nova
2129:password = nova
2752:lock_path=/var/lib/nova/tmp
2932:rabbit_host=192.168.56.11
2936:rabbit_port=5672
2948:rabbit_userid=openstack
2952:rabbit_password=openstack
3319:vncserver_listen=$my_ip
3324:vncserver_proxyclient_address=$my_ip
================================================================================

[root@linux-node1 ~]# systemctl enable openstack-nova-api.service openstack-nova-consoleauth.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service openstack-nova-cert.service

[root@linux-node1 ~]# systemctl start openstack-nova-api.service openstack-nova-consoleauth.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service openstack-nova-cert.service

[root@linux-node1 ~]# openstack service create --name nova --description "OpenStack Compute" compute

[root@linux-node1 ~]# openstack endpoint create --region RegionOne compute public http://192.168.56.11:8774/v2/%\(tenant_id\)s

[root@linux-node1 ~]# openstack endpoint create --region RegionOne compute internal http://192.168.56.11:8774/v2/%\(tenant_id\)s

[root@linux-node1 ~]# openstack endpoint create --region RegionOne compute
admin http://192.168.56.11:8774/v2/%\(tenant_id\)s

[root@linux-node1 ~]# openstack host list
+---------------------------+-------------+----------+
| Host Name | Service | Zone |
+---------------------------+-------------+----------+
| linux-node1.oldboyedu.com | consoleauth | internal |
| linux-node1.oldboyedu.com | conductor | internal |
| linux-node1.oldboyedu.com | scheduler | internal |
| linux-node1.oldboyedu.com | cert | internal |
+---------------------------+-------------+----------+

 

 

192.168.56.12
[root@linux-node2 yum.repos.d]# yum -y install openstack-nova-compute sysfsutils

 

#copy 56.11nova.conf到56.11
[root@linux-node1 yum.repos.d]# scp /etc/nova/nova.conf 192.168.56.12:/etc/nova/nova.conf

 

-----------------------------------------------------------------
[root@linux-node2 yum.repos.d]# grep '^[a-z]' /etc/nova/nova.conf
my_ip=192.168.56.12
enabled_apis=osapi_compute,metadata
auth_strategy=keystone
network_api_class=nova.network.neutronv2.api.API
linuxnet_interface_driver=nova.network.linux_net.NeutronLinuxBridgeInterfaceDriver
security_group_api=neutron
firewall_driver = nova.virt.firewall.NoopFirewallDriver
rpc_backend=rabbit
connection=mysql://nova:nova@192.168.56.11/nova
host=192.168.56.11
auth_uri = http://192.168.56.11:5000
auth_url = http://192.168.56.11:35357
auth_plugin = password
project_domain_id = default
user_domain_id = default
project_name = service
username = nova
password = nova
virt_type=kvm
lock_path=/var/lib/nova/tmp
rabbit_host=192.168.56.11
rabbit_port=5672
rabbit_userid=openstack
rabbit_password=openstack
novncproxy_base_url=http://192.168.56.11:6080/vnc_auto.html
vncserver_listen=0.0.0.0
vncserver_proxyclient_address=$my_ip
enabled=true
keymap=en-us

-----------------------------------------------------------------

[root@linux-node2 yum.repos.d]# vim /etc/chrony.conf

server 192.168.56.11 iburst #其他全删掉


[root@linux-node2 ~]# systemctl enable chronyd
[root@linux-node2 ~]# systemctl restart chronyd
[root@linux-node2 ~]# chronyc sources

[root@linux-node2 ~]# systemctl enable libvirtd openstack-nova-compute
[root@linux-node2 ~]# systemctl start libvirtd openstack-nova-compute


[root@linux-node2 ~]# systemctl status openstack-nova-compute
● openstack-nova-compute.service - OpenStack Nova Compute Server
Loaded: loaded (/usr/lib/systemd/system/openstack-nova-compute.service; enabled; vendor preset: disabled)
Active: active (running) since Thu 2018-06-21 12:37:32 CST; 44s ago

[root@linux-node2 ~]# systemctl status libvirtd
● libvirtd.service - Virtualization daemon
Loaded: loaded (/usr/lib/systemd/system/libvirtd.service; enabled; vendor preset: enabled)
Active: active (running) since Thu 2018-06-21 12:37:26 CST; 1min 16s ago

[root@linux-node1 yum.repos.d]# openstack host list #在控制节点执行
+---------------------------+-------------+----------+
| Host Name | Service | Zone |
+---------------------------+-------------+----------+
| linux-node1.oldboyedu.com | consoleauth | internal |
| linux-node1.oldboyedu.com | conductor | internal |
| linux-node1.oldboyedu.com | scheduler | internal |
| linux-node1.oldboyedu.com | cert | internal |
| linux-node2.oldboyedu.com | compute | nova |
+---------------------------+-------------+----------+


[root@linux-node1 yum.repos.d]# nova image-list
+--------------------------------------+--------+--------+--------+
| ID | Name | Status | Server |
+--------------------------------------+--------+--------+--------+
| 41f4eb56-064e-4d9b-ace4-c147fb702dcf | cirros | ACTIVE | |
+--------------------------------------+--------+--------+--------+


###neutron配置
[root@linux-node1 ~]# openstack service create --name neutron --description "OpenStack Networking" network

[root@linux-node1 ~]# openstack endpoint create --region RegionOne network public http://192.168.56.11:9696

[root@linux-node1 ~]# openstack endpoint create --region RegionOne network internal http://192.168.56.11:9696

[root@linux-node1 ~]# openstack endpoint create --region RegionOne network admin http://192.168.56.11:9696


-----------------------------------------------------------------
[root@linux-node1 ~]# grep '^[a-z]' /etc/neutron/neutron.conf
core_plugin = ml2
service_plugins = router
auth_strategy = keystone
notify_nova_on_port_status_changes = True
notify_nova_on_port_data_changes = True
nova_url = http://192.168.56.11:8774/v2
auth_uri = http://192.168.56.11:5000
auth_url = http://192.168.56.11:35357
auth_plugin = password
project_domain_id = default
user_domain_id = default
project_name = service
username = neutron
password = neutron
connection = mysql://neutron:neutron@192.168.56.11:3306/neutron
auth_url = http://192.168.56.11:35357
auth_plugin = password
project_domain_id = default
user_domain_id = default
region_name = RegionOne
project_name = service
username = nova
password = nova
lock_path = $state_path/lock
rabbit_host = 192.168.56.11
rabbit_port = 5672
rabbit_userid = openstack
rabbit_password = openstack

---------------------------------------------------------------

[root@linux-node1 ~]# grep '^[a-z]' /etc/neutron/plugins/ml2/ml2_conf.ini
type_drivers = flat,vlan,gre,vxlan,geneve
tenant_network_types = vlan,gre,vxlan,geneve
mechanism_drivers = openvswitch,linuxbridge
extension_drivers = port_security
flat_networks = physnet1
enable_ipset = True


[root@linux-node1 ~]# grep '^[a-z]' /etc/neutron/plugins/ml2/linuxbridge_agent.ini
physical_interface_mappings = physnet1:eth0
enable_vxlan = False
prevent_arp_spoofing = True
firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
enable_security_group = True

 

[root@linux-node1 ~]# grep '^[a-z]' /etc/neutron/dhcp_agent.ini
interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver
dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
enable_isolated_metadata = True

 

[root@linux-node1 ~]# grep '^[a-z]' /etc/neutron/metadata_agent.ini
auth_uri = http://192.168.56.11:5000
auth_url = http://192.168.56.11:35357
auth_region = RegionOne
auth_plugin = password
project_domain_id = default
user_domain_id = default
project_name = service
username = neutron
password = neutron
nova_metadata_ip = 192.168.56.11
metadata_proxy_shared_secret = neutron

 

=====================================================


[root@linux-node1 ~]# grep '^[a-z\[]' /etc/nova/nova.conf -n
1:[DEFAULT]
198:my_ip=192.168.56.11
344:enabled_apis=osapi_compute,metadata
506:auth_strategy=keystone
838:network_api_class=nova.network.neutronv2.api.API
930:linuxnet_interface_driver=nova.network.linux_net.NeutronLinuxBridgeInterfaceDriver
1064:security_group_api=neutron
1241:firewall_driver = nova.virt.firewall.NoopFirewallDriver
1423:rpc_backend=rabbit
1454:[api_database]
1504:[barbican]
1523:[cells]
1618:[cinder]
1644:[conductor]
1664:[cors]
1692:[cors.subdomain]
1720:[database]
1743:connection=mysql://nova:nova@192.168.56.11/nova
1917:[ephemeral_storage_encryption]
1937:[glance]
1944:host=$my_ip
1972:[guestfs]
1982:[hyperv]
2052:[image_file_url]
2063:[ironic]
2108:[keymgr]
2121:[keystone_authtoken]
2122:auth_uri = http://192.168.56.11:5000
2123:auth_url = http://192.168.56.11:35357
2124:auth_plugin = password
2125:project_domain_id = default
2126:user_domain_id = default
2127:project_name = service
2128:username = nova
2129:password = nova
2292:[libvirt]
2503:[matchmaker_redis]
2519:[matchmaker_ring]
2530:[metrics]
2559:[neutron]
2560:url = http://192.168.56.11:9696
2561:auth_url = http://192.168.56.11:35357
2562:auth_plugin = password
2563:project_domain_id = default
2564:user_domain_id = default
2565:region_name = RegionOne
2566:project_name = service
2567:username = neutron
2568:password = neutron
2576:service_metadata_proxy=true
2579:metadata_proxy_shared_secret = neutron
2715:[osapi_v21]
2746:[oslo_concurrency]
2761:lock_path=/var/lib/nova/tmp
2764:[oslo_messaging_amqp]
2814:[oslo_messaging_qpid]
2887:[oslo_messaging_rabbit]
2941:rabbit_host=192.168.56.11
2945:rabbit_port=5672
2957:rabbit_userid=openstack
2961:rabbit_password=openstack
3003:[oslo_middleware]
3024:[rdp]
3038:[serial_console]
3069:[spice]
3104:[ssl]
3120:[trusted_computing]
3148:[upgrade_levels]
3206:[vmware]
3310:[vnc]
3328:vncserver_listen=$my_ip
3333:vncserver_proxyclient_address=$my_ip
3344:[workarounds]
3383:[xenserver]
3571:[zookeeper]

===================================================================

[root@linux-node1 ~]# ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini

[root@linux-node1 ~]# openstack user create --domain default --password=neutron neutron

[root@linux-node1 ~]# openstack role add --project service --user neutron admin

[root@linux-node1 ~]# su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron

[root@linux-node1 ~]# systemctl restart openstack-nova-api


[root@linux-node1 ~]# systemctl enable neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service

[root@linux-node1 ~]# systemctl start neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service


[root@linux-node1 ~]# neutron agent-list
+--------------------------------------+--------------------+---------------------------+-------+----------------+---------------------------+
| id | agent_type | host | alive | admin_state_up | binary |
+--------------------------------------+--------------------+---------------------------+-------+----------------+---------------------------+
| dea54bb5-c414-4dd5-80f2-59ae86772add | Metadata agent | linux-node1.oldboyedu.com | :-) | True | neutron-metadata-agent |
| df89893e-6bc9-440f-8a87-74899d616457 | DHCP agent | linux-node1.oldboyedu.com | :-) | True | neutron-dhcp-agent |
| fbc70f3e-1fbd-43f4-9982-e7538a569153 | Linux bridge agent | linux-node1.oldboyedu.com | :-) | True | neutron-linuxbridge-agent |
+--------------------------------------+--------------------+---------------------------+-------+----------------+---------------------------+


[root@linux-node1 ~]# scp /etc/neutron/plugins/ml2/linuxbridge_agent.ini 192.168.56.12:/etc/neutron/plugins/ml2/

[root@linux-node1 ~]# scp /etc/neutron/neutron.conf 192.168.56.12:/etc/neutron/

[root@linux-node1 ~]# scp /etc/neutron/plugins/ml2/ml2_conf.ini 192.168.56.12:/etc/neutron/plugins/ml2/


====================================================================
[root@linux-node2 ~]# grep '^[a-z[]' /etc/nova/nova.conf
[DEFAULT]
my_ip=192.168.56.12
enabled_apis=osapi_compute,metadata
auth_strategy=keystone
network_api_class=nova.network.neutronv2.api.API
linuxnet_interface_driver=nova.network.linux_net.NeutronLinuxBridgeInterfaceDriver
security_group_api=neutron
firewall_driver = nova.virt.firewall.NoopFirewallDriver
rpc_backend=rabbit
[api_database]
[barbican]
[cells]
[cinder]
[conductor]
[cors]
[cors.subdomain]
[database]
connection=mysql://nova:nova@192.168.56.11/nova
[ephemeral_storage_encryption]
[glance]
host=192.168.56.11
[guestfs]
[hyperv]
[image_file_url]
[ironic]
[keymgr]
[keystone_authtoken]
auth_uri = http://192.168.56.11:5000
auth_url = http://192.168.56.11:35357
auth_plugin = password
project_domain_id = default
user_domain_id = default
project_name = service
username = nova
password = nova
[libvirt]
virt_type=kvm
[matchmaker_redis]
[matchmaker_ring]
[metrics]
[neutron]
url = http://192.168.56.11:9696
auth_url = http://192.168.56.11:35357
auth_plugin = password
project_domain_id = default
user_domain_id = default
region_name = RegionOne
project_name = service
username = neutron
password = neutron
[osapi_v21]
[oslo_concurrency]
lock_path=/var/lib/nova/tmp
[oslo_messaging_amqp]
[oslo_messaging_qpid]
[oslo_messaging_rabbit]
rabbit_host=192.168.56.11
rabbit_port=5672
rabbit_userid=openstack
rabbit_password=openstack
[oslo_middleware]
[rdp]
[serial_console]
[spice]
[ssl]
[trusted_computing]
[upgrade_levels]
[vmware]
[vnc]
novncproxy_base_url=http://192.168.56.11:6080/vnc_auto.html
vncserver_listen=0.0.0.0
vncserver_proxyclient_address=192.168.56.12
enabled=true
keymap=en-us
[workarounds]
[xenserver]
[zookeeper]
====================================================================

 

[root@linux-node2 ~]# systemctl restart openstack-nova-compute

[root@linux-node2 ml2]# ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini

[root@linux-node2 ml2]# systemctl enable neutron-linuxbridge-agent.service

[root@linux-node2 ml2]# systemctl start neutron-linuxbridge-agent.service


[root@linux-node1 ~]# neutron agent-list
+--------------------------------------+--------------------+---------------------------+-------+----------------+---------------------------+
| id | agent_type | host | alive | admin_state_up | binary |
+--------------------------------------+--------------------+---------------------------+-------+----------------+---------------------------+
| 1979ef5a-a7d1-4e20-b2d3-10be3ede1e95 | Linux bridge agent | linux-node2.oldboyedu.com | :-) | True | neutron-linuxbridge-agent |
| dea54bb5-c414-4dd5-80f2-59ae86772add | Metadata agent | linux-node1.oldboyedu.com | :-) | True | neutron-metadata-agent |
| df89893e-6bc9-440f-8a87-74899d616457 | DHCP agent | linux-node1.oldboyedu.com | :-) | True | neutron-dhcp-agent |
| fbc70f3e-1fbd-43f4-9982-e7538a569153 | Linux bridge agent | linux-node1.oldboyedu.com | :-) | True | neutron-linuxbridge-agent |
+--------------------------------------+--------------------+---------------------------+-------+----------------+---------------------------+

 


#配置网络

[root@linux-node1 ~]# neutron net-create flat --shared --provider:physical_network physnet1 --provider:network_type flat


报错:Running without keystone AuthN requires that tenant_id is specified

解决:在/etc/neutron/neutron.con中添加auth_strategy = keystone

[root@linux-node1 ~]# neutron subnet-create flat 192.168.56.0/24 --name flat-subnet --allocation-pool start=192.168.56.100,end=192.168.56.200 --dns-nameserver 192.168.56.2 --gateway 192.168.56.2

 

[root@linux-node1 ~]# neutron subnet-list
+--------------------------------------+-------------+-----------------+------------------------------------------------------+
| id | name | cidr | allocation_pools |
+--------------------------------------+-------------+-----------------+------------------------------------------------------+
| aaa18205-8cec-4367-9a3d-bb77cf96cda2 | flat-subnet | 192.168.56.0/24 | {"start": "192.168.56.100", "end": "192.168.56.200"} |
+--------------------------------------+-------------+-----------------+------------------------------------------------------+


#创建虚拟机

[root@linux-node1 ~]# ssh-keygen -q -N ""

[root@linux-node1 ~]# nova keypair-add --pub-key .ssh/id_rsa.pub mykey

[root@linux-node1 ~]# nova keypair-list
+-------+-------------------------------------------------+
| Name | Fingerprint |
+-------+-------------------------------------------------+
| mykey | 51:80:1e:1d:d0:12:ac:b1:7f:b4:dc:fe:e3:16:09:5b |
+-------+-------------------------------------------------+


[root@linux-node1 ~]# nova secgroup-add-rule default icmp -1 -1 0.0.0.0/0

[root@linux-node1 ~]# nova secgroup-add-rule default tcp 22 22 0.0.0.0/0

[root@linux-node1 ~]# nova flavor-list
+----+-----------+-----------+------+-----------+------+-------+-------------+-----------+
| ID | Name | Memory_MB | Disk | Ephemeral | Swap | VCPUs | RXTX_Factor | Is_Public |
+----+-----------+-----------+------+-----------+------+-------+-------------+-----------+
| 1 | m1.tiny | 512 | 1 | 0 | | 1 | 1.0 | True |
| 2 | m1.small | 2048 | 20 | 0 | | 1 | 1.0 | True |
| 3 | m1.medium | 4096 | 40 | 0 | | 2 | 1.0 | True |
| 4 | m1.large | 8192 | 80 | 0 | | 4 | 1.0 | True |
| 5 | m1.xlarge | 16384 | 160 | 0 | | 8 | 1.0 | True |
+----+-----------+-----------+------+-----------+------+-------+-------------+-----------+

 

[root@linux-node1 ~]# nova image-list
+--------------------------------------+--------+--------+--------+
| ID | Name | Status | Server |
+--------------------------------------+--------+--------+--------+
| 41f4eb56-064e-4d9b-ace4-c147fb702dcf | cirros | ACTIVE | |
+--------------------------------------+--------+--------+--------+

 

[root@linux-node1 ~]# nova net-list
+--------------------------------------+-------+------+
| ID | Label | CIDR |
+--------------------------------------+-------+------+
| 617c5e41-adbc-4446-9f99-79e4293c1d71 | flat | None |
+--------------------------------------+-------+------+

#创建虚拟机的时候网络必须制定ID

[root@linux-node1 ~]# nova boot --flavor m1.tiny --image cirros --nic net-id=617c5e41-adbc-4446-9f99-79e4293c1d71 --security-group default --key-name mykey hello-instance


+--------------------------------------+-----------------------------------------------+
| Property | Value |
+--------------------------------------+-----------------------------------------------+
| OS-DCF:diskConfig | MANUAL |
| OS-EXT-AZ:availability_zone | |
| OS-EXT-STS:power_state | 0 |
| OS-EXT-STS:task_state | scheduling |
| OS-EXT-STS:vm_state | building |
| OS-SRV-USG:launched_at | - |
| OS-SRV-USG:terminated_at | - |
| accessIPv4 | |
| accessIPv6 | |
| adminPass | yHARd7MLhog9 |
| config_drive | |
| created | 2018-06-20T20:46:24Z |
| flavor | m1.tiny (1) |
| hostId | |
| id | b206eb7c-c252-4d1d-a4cb-bc15ed53bd6f |
| image | cirros (41f4eb56-064e-4d9b-ace4-c147fb702dcf) |
| key_name | mykey |
| metadata | {} |
| name | hello-instance |
| os-extended-volumes:volumes_attached | [] |
| progress | 0 |
| security_groups | default |
| status | BUILD |
| tenant_id | af59596f072b4a4fbcf773f0bca865da |
| updated | 2018-06-20T20:46:26Z |
| user_id | 69c76116829644cba88e8036ad1e0c8a |
+--------------------------------------+-----------------------------------------------+


##查看是否成功创建

[root@linux-node1 ~]# nova list
+--------------------------------------+----------------+--------+------------+-------------+---------------------+
| ID | Name | Status | Task State | Power State | Networks |
+--------------------------------------+----------------+--------+------------+-------------+---------------------+
| b206eb7c-c252-4d1d-a4cb-bc15ed53bd6f | hello-instance | ACTIVE | - | Running | flat=192.168.56.101 |
+--------------------------------------+----------------+--------+------------+-------------+---------------------+

[root@linux-node1 ~]# ssh cirros@192.168.56.101

#获取虚拟机的网页地址
[root@linux-node1 ~]# nova get-vnc-console hello-instance novnc

------------------------------------------------------------------
[root@linux-node1 conf.d]# vim /etc/openstack-dashboard/local_settings

ALLOWED_HOSTS = ['*',]
OPENSTACK_HOST = "192.168.56.11"
OPENSTACK_KEYSTONE_DEFAULT_ROLE = "user"

CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': '192.168.56.11:11211',
}
}

TIME_ZONE = "Asia/Shanghai"
---------------------------------------------------------------------

[root@linux-node1 conf.d]# systemctl restart httpd


##安装cinder
[root@linux-node1 ~]# yum -y install openstack-cinder python-cinderclient


[root@linux-node1 ~]# vim /etc/cinder/cinder.conf

2516 connection = mysql://cinder:cinder@192.168.56.11/cinder

##同步数据库
[root@linux-node1 ~]# su -s /bin/sh -c "cinder-manage db sync" cinder

##查看是否创建表成功
[root@linux-node1 ~]# mysql -h 192.168.56.11 -u cinder -pcinder -e "use cinder;show tables;"
+----------------------------+
| Tables_in_cinder |
+----------------------------+
| backups |
| cgsnapshots |
| consistencygroups |
| driver_initiator_data |
| encryption |
| image_volume_cache_entries |
| iscsi_targets |
| migrate_version |
| quality_of_service_specs |
| quota_classes |
| quota_usages |
| quotas |
| reservations |
| services |
| snapshot_metadata |
| snapshots |
| transfers |
| volume_admin_metadata |
| volume_attachment |
| volume_glance_metadata |
| volume_metadata |
| volume_type_extra_specs |
| volume_type_projects |
| volume_types |
| volumes |
+----------------------------+

[root@linux-node1 ~]# source admin-openrc.sh

[root@linux-node1 ~]# openstack user create --domain default --password-prompt cinder

[root@linux-node1 ~]# openstack role add --project service --user cinder admin

[root@linux-node1 ~]# vim /etc/nova/nova.conf

[cinder]
os_region_name = RegionOne

 

[root@linux-node1 ~]# grep "^[a-z[]" /etc/cinder/cinder.conf
[DEFAULT]
glance_host = 192.168.56.11
auth_strategy = keystone
rpc_backend = rabbit
[database]
connection = mysql://cinder:cinder@192.168.56.11/cinder
[fc-zone-manager]
[keymgr]
[keystone_authtoken]
auth_uri = http://192.168.56.11:5000
auth_url = http://192.168.56.11:35357
auth_plugin = password
project_domain_id = default
user_domain_id = default
project_name = service
username = cinder
password = cinder
lock_path = /var/lib/cinder/tmp
[oslo_messaging_amqp]
[oslo_messaging_qpid]
[oslo_messaging_rabbit]
rabbit_host = 192.168.56.11
rabbit_port = 5672
rabbit_userid = openstack
rabbit_password = openstack

[root@linux-node1 ~]# systemctl restart openstack-nova-api.service
[root@linux-node1 ~]# systemctl enable openstack-cinder-api.service openstack-cinder-scheduler.service
[root@linux-node1 ~]# systemctl start openstack-cinder-api.service openstack-cinder-scheduler.service

#创建服务
[root@linux-node1 ~]# openstack service create --name cinder --description "OpenStack Block Storage" volume

[root@linux-node1 ~]# openstack service create --name cinderv2 --description "OpenStack Block Storage" volumev2

[root@linux-node1 ~]# openstack endpoint create --region RegionOne volume public http://192.168.56.11:8776/v1/%\(tenant_id\)s

[root@linux-node1 ~]# openstack endpoint create --region RegionOne volume
internal http://192.168.56.11:8776/v1/%\(tenant_id\)s

[root@linux-node1 ~]# openstack endpoint create --region RegionOne volume
admin http://192.168.56.11:8776/v1/%\(tenant_id\)s

[root@linux-node1 ~]# openstack endpoint create --region RegionOne volumev2 public http://192.168.56.11:8776/v1/%\(tenant_id\)s

[root@linux-node1 ~]# openstack endpoint create --region RegionOne volumev2
internal http://192.168.56.11:8776/v1/%\(tenant_id\)s

[root@linux-node1 ~]# openstack endpoint create --region RegionOne volumev2
admin http://192.168.56.11:8776/v1/%\(tenant_id\)s

#添加一块硬盘
[root@linux-node2 ~]# pvcreate /dev/sdb
Physical volume "/dev/sdb" successfully created.

[root@linux-node2 ~]# vgcreate cinder-volumes /dev/sdb
Volume group "cinder-volumes" successfully created

[root@linux-node2 ~]# vim /etc/lvm/lvm.conf

142 filter = [ "a/sdb/", "r/.*/" ]

[root@linux-node2 ~]# yum -y install openstack-cinder targetcli python-oslo-policy


##将控制节点的配置文件拷贝到计算节点
[root@linux-node1 ~]# scp /etc/cinder/cinder.conf 192.168.56.12:/etc/cinder/cinder.conf


#在计算节点添加如下信息
[root@linux-node2 ~]# vim /etc/cinder/cinder.conf

enabled_backends = lvm
[lvm]
volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver
volume_group = cinder-volumes
iscsi_protocol = iscsi
iscsi_helper = lioadm


[root@linux-node2 ~]# systemctl enable openstack-cinder-volume.service target.service

[root@linux-node2 ~]# systemctl start openstack-cinder-volume.service target.service

 

 

 

 

 

 


#打开浏览器输入http://192.168.56.11/dashboard

#创建虚拟机四个阶段
1、和keystone交互,进行认证,获取auth_token
2、和nova组件之间进行交互、nova进行调度,选择一个novacompute
3、nova compute和其他的服务进行交互,获取虚拟机创建需要的资源(镜像、网络、硬盘)
4、nova compute调用libvirt api调用kvm创建虚拟机

 

posted @ 2018-07-05 09:18  谭普利特  阅读(257)  评论(0编辑  收藏  举报