记录第二次搭建openstack环境过程

  1 controller
  2     eth0 192.168.101.211
  3     eth1 192.168.200.211
  4 
  5 compute
  6     eth0 192.168.101.212
  7     eth1 192.168.200.212
  8 
  9 network
 10     eth0 192.168.101.213
 11     eth1 192.168.200.213
 12 
 13 #我们使用的是mini版CentOS-7-x86_64-Minimal-1511.iso
 14 
 15 #一.以下步骤三台服务器都要配置
 16 #停止selinux,iptables
 17 #1.先安装vim
 18 yum install vim
 19 
 20 
 21 #2.配置hosts文件解析
 22 vim /etc/hosts
 23 192.168.101.211 controller
 24 192.168.101.212 compute
 25 192.168.101.213 network
 26 
 27 
 28 #3.安装ifconfig工具等其他网络工具,同时更新软件包
 29 yum makecache && yum install net-tools -y&& yum update -y
 30 
 31 #4.安装时间同步部署
 32 yum install chrony -y
 33 
 34 
 35 #控制节点配置
 36 vim /etc/chrony.conf
 37 server controller iburst
 38 allow 192.168.0.0/24
 39 
 40 
 41 #其余节点
 42 vim /etc/chrony.conf
 43 server controller iburst
 44 
 45 #公共配置操作
 46 systemctl enable chronyd.service
 47 systemctl start chronyd.service
 48 
 49 #将硬件时钟调整为与本地时钟一致, 0 为设置为 UTC 时间
 50 timedatectl set-local-rtc 1
 51 #设置系统时区为上海
 52 timedatectl set-timezone Asia/Shanghai
 53 
 54 #防止自动更新
 55 yum install yum-plugin-priorities -y
 56 
 57 #安装yum源
 58 yum install centos-release-openstack-mitaka -y
 59 
 60 
 61 #所有节点执行
 62 yum upgrade
 63 yum install python-openstackclient -y
 64 yum install openstack-selinux -y
 65 
 66 #三:部署mariadb数据库
 67 #控制节点:
 68 yum install mariadb mariadb-server python2-PyMySQL -y
 69 
 70 
 71 vim /etc/my.cnf.d/openstack.cnf
 72 [mysqld]
 73 #控制节点管理网络ip
 74 bind-address = 192.168.101.211
 75 default-storage-engine = innodb
 76 innodb_file_per_table
 77 max_connections = 4096
 78 collation-server = utf8_general_ci
 79 character-set-server = utf8
 80 
 81 
 82 systemctl enable mariadb.service
 83 systemctl start mariadb.service
 84 mysql_secure_installation
 85 
 86 
 87 #四:部署mongoDB
 88 #控制节点:
 89 yum install mongodb-server mongodb -y
 90 
 91 vim /etc/mongod.conf
 92 #控制节点管理网络ip
 93 bind_ip = 192.168.101.211
 94 smallfiles = true
 95 
 96 
 97 systemctl enable mongod.service
 98 systemctl start mongod.service
 99 
100 #五:部署消息队列rabbitmq(验证方式:http://192.168.101.211:15672 用户guest 密码guest)
101 #控制节点
102 yum install rabbitmq-server -y
103 
104 systemctl enable rabbitmq-server.service
105 systemctl start rabbitmq-server.service
106 
107 #设置rabbitmq用户密码
108 rabbitmqctl add_user openstack password
109 #为新建用户设置权限
110 rabbitmqctl set_permissions openstack ".*" ".*" ".*"
111 
112 
113 #六:部署memcached缓存(为keystone服务缓存tokens)
114 #控制节点
115 yum install memcached python-memcached -y
116 
117 systemctl enable memcached.service
118 systemctl start memcached.service
119 
120 
121 
122 
123 
124 
125 
126 
127 #第二部分: 认证服务keystone部署
128 #一:安装和配置服务
129 #1.建库建用户
130 mysql -u root -p
131 CREATE DATABASE keystone;
132 GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'localhost' IDENTIFIED BY 'password';
133 GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'%' IDENTIFIED BY 'password';
134 GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'controller' IDENTIFIED BY 'password';
135 flush privileges;
136 
137 #2.安装wsgi
138 yum install openstack-keystone httpd mod_wsgi -y
139 
140 #3.配置keystone
141 vim /etc/keystone/keystone.conf
142 [DEFAULT]
143 admin_token = password #建议用命令制作token:openssl rand -hex 10
144 
145 [database]
146 connection = mysql+pymysql://keystone:password@controller/keystone
147 
148 [token]
149 provider = fernet
150 #Token Provider:UUID, PKI, PKIZ, or Fernet
151 #http://blog.csdn.net/miss_yang_cloud/article/details/49633719
152 
153 
154 #4.同步修改到数据库
155 su -s /bin/sh -c "keystone-manage db_sync" keystone
156 
157 #5.初始化fernet keys
158 keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone
159 
160 
161 #6.配置apache服务
162 vim /etc/httpd/conf/httpd.conf
163 ServerName controller
164 
165 vim /etc/httpd/conf.d/wsgi-keystone.conf
166 Listen 5000
167 Listen 35357
168 
169 <VirtualHost *:5000>
170 WSGIDaemonProcess keystone-public processes=5 threads=1 user=keystone group=keystone display-name=%{GROUP}
171 WSGIProcessGroup keystone-public
172 WSGIScriptAlias / /usr/bin/keystone-wsgi-public
173 WSGIApplicationGroup %{GLOBAL}
174 WSGIPassAuthorization On
175 ErrorLogFormat "%{cu}t %M"
176 ErrorLog /var/log/httpd/keystone-error.log
177 CustomLog /var/log/httpd/keystone-access.log combined
178 
179 <Directory /usr/bin>
180 Require all granted
181 </Directory>
182 </VirtualHost>
183 
184 <VirtualHost *:35357>
185 WSGIDaemonProcess keystone-admin processes=5 threads=1 user=keystone group=keystone display-name=%{GROUP}
186 WSGIProcessGroup keystone-admin
187 WSGIScriptAlias / /usr/bin/keystone-wsgi-admin
188 WSGIApplicationGroup %{GLOBAL}
189 WSGIPassAuthorization On
190 ErrorLogFormat "%{cu}t %M"
191 ErrorLog /var/log/httpd/keystone-error.log
192 CustomLog /var/log/httpd/keystone-access.log combined
193 
194 <Directory /usr/bin>
195 Require all granted
196 </Directory>
197 </VirtualHost>
198 
199 
200 #7.启动服务:
201 systemctl enable httpd.service
202 systemctl start httpd.service
203 
204 
205 
206 #二:创建服务实体和访问端点
207 
208 #1.实现配置管理员环境变量,用于获取后面创建的权限
209 export OS_TOKEN=password
210 export OS_URL=http://controller:35357/v3
211 export OS_IDENTITY_API_VERSION=3
212 
213 #2.基于上一步给的权限,创建认证服务实体(目录服务)
214 openstack service create --name keystone --description "OpenStack Identity" identity
215 #3.基于上一步建立的服务实体,创建访问该实体的三个api端点
216 
217 openstack endpoint create --region RegionOne identity public http://controller:5000/v3
218 openstack endpoint create --region RegionOne identity internal http://controller:5000/v3
219 openstack endpoint create --region RegionOne identity admin http://controller:35357/v3
220 
221 
222 
223 
224 #三:创建域,租户,用户,角色,把四个元素关联到一起
225 #建立一个公共的域名:
226 openstack domain create --description "Default Domain" default
227 
228 #管理员:admin
229 openstack project create --domain default --description "Admin Project" admin
230 #密码admin
231 openstack user create --domain default --password-prompt admin
232 openstack role create admin
233 openstack role add --project admin --user admin admin
234 
235 #普通用户:demo
236 openstack project create --domain default --description "Demo Project" demo
237 #密码demo
238 openstack user create --domain default --password-prompt demo
239 openstack role create user
240 openstack role add --project demo --user demo user
241 
242 #为后续的服务创建统一租户service
243 #解释:后面每搭建一个新的服务都需要在keystone中执行四种操作:1.建租户 2.建用户 3.建角色 4.做关联
244 #后面所有的服务公用一个租户service,都是管理员角色admin,所以实际上后续的服务安装关于keysotne
245 #的操作只剩2,4
246 openstack project create --domain default --description "Service Project" service
247 
248 
249 
250 
251 #四:验证操作:
252 vim /etc/keystone/keystone-paste.ini
253 #在[pipeline:public_api], [pipeline:admin_api], and [pipeline:api_v3] 三个地方
254 #移走:admin_token_auth
255 
256 unset OS_TOKEN OS_URL
257 
258 openstack --os-auth-url http://controller:35357/v3 --os-project-domain-name default --os-user-domain-name default --os-project-name admin --os-username admin token issue
259 Password:密码admin
260 +------------+-----------------------------------------------------------------+
261 | Field      | Value                                                           |
262 +------------+-----------------------------------------------------------------+
263 | expires    | 2016-10-06T11:13:54.248329Z                                     |
264 | id         | gAAAAABX9iPib_UNpY-8RZmatyjqnWlIz5rN3HknZ-OB260hzODXgzK8GO7him2 |
265 |            | aAnpOTtLagMaKBxhej5FqLcbqrgyvzYwfG23eEGTZAWNxhCePNmnVVZSDDzalFE |
266 |            | WhvbWb5BiPkVyoPJx2KxLmPsLsmUsHZmLOim5qqeAl5kg8CQNviob50Ls       |
267 | project_id | a47533d927aa475a8bcef222c61421ee                                |
268 | user_id    | acfaac29ce1d48958c2a1e22a3daab12                                |
269 +------------+-----------------------------------------------------------------+
270 
271 
272 
273 
274 #五:新建客户端脚本文件
275 
276 #管理员:admin-openrc
277 vim admin-openrc
278 export OS_PROJECT_DOMAIN_NAME=default
279 export OS_USER_DOMAIN_NAME=default
280 export OS_PROJECT_NAME=admin
281 export OS_USERNAME=admin
282 export OS_PASSWORD=admin
283 export OS_AUTH_URL=http://controller:35357/v3
284 export OS_IDENTITY_API_VERSION=3
285 export OS_IMAGE_API_VERSION=2
286 
287 #普通用户demo:demo-openrc
288 vim demo-openrc
289 export OS_PROJECT_DOMAIN_NAME=default
290 export OS_USER_DOMAIN_NAME=default
291 export OS_PROJECT_NAME=demo
292 export OS_USERNAME=demo
293 export OS_PASSWORD=demo
294 export OS_AUTH_URL=http://controller:5000/v3
295 export OS_IDENTITY_API_VERSION=3
296 export OS_IMAGE_API_VERSION=2
297 
298 #效果:
299 source admin-openrc
300 openstack token issue
301 
302 
303 
304 
305 
306 #第三部分:部署镜像服务
307 #一:安装和配置服务
308 #1.建库建用户
309 mysql -u root -p
310 CREATE DATABASE glance;
311 GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'localhost' IDENTIFIED BY 'password';
312 GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'%' IDENTIFIED BY 'password';
313 GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'controller' IDENTIFIED BY 'password';
314 flush privileges;
315 
316 #2.keystone认证操作:
317 #上面提到过:所有后续项目的部署都统一放到一个租户service里,然后需要为每个项目建立用户,建管理员角色,建立关联
318 . admin-openrc
319 #密码password
320 openstack user create --domain default --password-prompt glance
321 
322 openstack role add --project service --user glance admin
323 
324 #建立服务实体
325 openstack service create --name glance --description "OpenStack Image" image
326 
327 #建端点
328 openstack endpoint create --region RegionOne image public http://controller:9292
329 openstack endpoint create --region RegionOne image internal http://controller:9292
330 openstack endpoint create --region RegionOne image admin http://controller:9292
331 
332 #3.安装软件
333 yum install openstack-glance -y
334 
335 
336 
337 #4.修改配置:
338 vim /etc/glance/glance-api.conf
339 
340 [database]
341 #这里的数据库连接配置是用来初始化生成数据库表结构,不配置无法生成数据库表结构
342 #glance-api不配置database对创建vm无影响,对使用metada有影响
343 #日志报错:ERROR glance.api.v2.metadef_namespaces
344 connection = mysql+pymysql://glance:password@controller/glance
345 
346 [keystone_authtoken]
347 auth_url = http://controller:5000
348 memcached_servers = controller:11211
349 auth_type = password
350 project_domain_name = default
351 user_domain_name = default
352 project_name = service
353 username = glance
354 password = password
355 
356 [paste_deploy]
357 flavor = keystone
358 
359 [glance_store]
360 stores = file,http
361 default_store = file
362 filesystem_store_datadir = /var/lib/glance/images/
363 
364 vim /etc/glance/glance-registry.conf
365 
366 [database]
367 #这里的数据库配置是用来glance-registry检索镜像元数据
368 connection = mysql+pymysql://glance:password@controller/glance
369 
370 
371 #新建目录:
372 mkdir /var/lib/glance/images/
373 chown glance. /var/lib/glance/images/
374 
375 #同步数据库:(此处会报一些关于future的问题,自行忽略)
376 su -s /bin/sh -c "glance-manage db_sync" glance
377 
378 #启动服务:
379 systemctl enable openstack-glance-api.service openstack-glance-registry.service
380 systemctl start openstack-glance-api.service openstack-glance-registry.service
381 
382 
383 
384 二:验证操作:
385 . admin-openrc
386 wget http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-x86_64-disk.img
387 #(本地下载:wget http://172.16.209.100/cirros-0.3.4-x86_64-disk.img)
388 
389 openstack image create "cirros" --file cirros-0.3.4-x86_64-disk.img --disk-format qcow2 --container-format bare --public
390 
391 openstack image list
392 
393 
394 
395 
396 
397 
398 
399 #第四部分:部署compute服务
400 
401 #一:控制节点配置
402 #1.建库建用户
403 CREATE DATABASE nova_api;
404 CREATE DATABASE nova;
405 GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'localhost' IDENTIFIED BY 'password';
406 GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'%' IDENTIFIED BY 'password';
407 GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'controller' IDENTIFIED BY 'password';
408 GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'localhost' IDENTIFIED BY 'password';
409 GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%' IDENTIFIED BY 'password';
410 GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'controller' IDENTIFIED BY 'password';
411 
412 flush privileges;
413 
414 #2.keystone相关操作
415 
416 . admin-openrc
417 openstack user create --domain default --password-prompt nova
418 openstack role add --project service --user nova admin
419 openstack service create --name nova --description "OpenStack Compute" compute
420 
421 openstack endpoint create --region RegionOne compute public http://controller:8774/v2.1/%\(tenant_id\)s
422 openstack endpoint create --region RegionOne compute internal http://controller:8774/v2.1/%\(tenant_id\)s
423 openstack endpoint create --region RegionOne compute admin http://controller:8774/v2.1/%\(tenant_id\)s
424 
425 
426 #3.安装软件包:
427 yum install openstack-nova-api openstack-nova-conductor openstack-nova-console openstack-nova-novncproxy openstack-nova-scheduler -y
428 
429 
430 #4.修改配置:
431 vim /etc/nova/nova.conf
432 
433 [DEFAULT]
434 enabled_apis = osapi_compute,metadata
435 rpc_backend = rabbit
436 auth_strategy = keystone
437 #下面的为管理ip
438 my_ip = 192.168.101.211
439 use_neutron = True
440 firewall_driver = nova.virt.firewall.NoopFirewallDriver
441 
442 [api_database]
443 connection = mysql+pymysql://nova:password@controller/nova_api
444 
445 [database]
446 connection = mysql+pymysql://nova:password@controller/nova
447 
448 [oslo_messaging_rabbit]
449 rabbit_host = controller
450 rabbit_userid = openstack
451 rabbit_password = password
452 
453 [keystone_authtoken]
454 auth_url = http://controller:5000
455 memcached_servers = controller:11211
456 auth_type = password
457 project_domain_name = default
458 user_domain_name = default
459 project_name = service
460 username = nova
461 password = password
462 
463 
464 [vnc]
465 #下面的为管理ip
466 vncserver_listen = 192.168.101.211
467 #下面的为管理ip
468 vncserver_proxyclient_address = 192.168.101.211
469 
470 [oslo_concurrency]
471 lock_path = /var/lib/nova/tmp
472 
473 #5.同步数据库:(此处会报一些关于future的问题,自行忽略)
474 su -s /bin/sh -c "nova-manage api_db sync" nova
475 su -s /bin/sh -c "nova-manage db sync" nova
476 
477 #6.启动服务
478 systemctl enable openstack-nova-api.service openstack-nova-consoleauth.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service
479 systemctl start openstack-nova-api.service openstack-nova-consoleauth.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service
480 
481 
482 
483 
484 
485 
486 
487 #二:计算节点配置
488 #1.安装软件包:
489 yum install openstack-nova-compute libvirt-daemon-lxc -y
490 
491 #2.修改配置:
492 vim /etc/nova/nova.conf
493 
494 [DEFAULT]
495 rpc_backend = rabbit
496 auth_strategy = keystone
497 #计算节点管理网络ip
498 my_ip = 192.168.101.212
499 use_neutron = True
500 firewall_driver = nova.virt.firewall.NoopFirewallDriver
501 
502 [oslo_messaging_rabbit]
503 rabbit_host = controller
504 rabbit_userid = openstack
505 rabbit_password = password
506 
507 [vnc]
508 enabled = True
509 vncserver_listen = 0.0.0.0
510 #计算节点管理网络ip
511 vncserver_proxyclient_address = 192.168.101.212
512 #控制节点管理网络ip
513 novncproxy_base_url = http://192.168.101.211:6080/vnc_auto.html
514 
515 [glance]
516 api_servers = http://controller:9292
517 
518 [oslo_concurrency]
519 lock_path = /var/lib/nova/tmp
520 
521 #3.如果在不支持虚拟化的机器上部署nova,请确认
522 egrep -c '(vmx|svm)' /proc/cpuinfo
523 #结果为0
524 vim /etc/nova/nova.conf
525 [libvirt]
526 virt_type = qemu
527 
528 #4.启动服务
529 systemctl enable libvirtd.service openstack-nova-compute.service
530 systemctl start libvirtd.service openstack-nova-compute.service
531 
532 
533 
534 
535 
536 
537 
538 #三:验证
539 #控制节点
540 source admin-openrc
541 openstack compute service list
542 [root@controller ~]# openstack compute service list
543 +----+------------------+------------+----------+---------+-------+----------------------------+
544 | Id | Binary           | Host       | Zone     | Status  | State | Updated At                 |
545 +----+------------------+------------+----------+---------+-------+----------------------------+
546 |  1 | nova-scheduler   | controller | internal | enabled | up    | 2016-10-06T15:11:52.000000 |
547 |  2 | nova-conductor   | controller | internal | enabled | up    | 2016-10-06T15:11:52.000000 |
548 |  3 | nova-consoleauth | controller | internal | enabled | up    | 2016-10-06T15:11:51.000000 |
549 |  6 | nova-compute     | compute    | nova     | enabled | up    | 2016-10-06T15:11:50.000000 |
550 +----+------------------+------------+----------+---------+-------+----------------------------+
551 
552 
553 
554 
555 
556 
557 
558 
559 
560 #第五部分:部署网络服务
561 
562 #一:控制节点配置
563 #1.建库建用户
564 CREATE DATABASE neutron;
565 GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'localhost' IDENTIFIED BY 'password';
566 GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'%' IDENTIFIED BY 'password';
567 GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'controller' IDENTIFIED BY 'password';
568 flush privileges;
569 
570 #2.keystone相关
571 . admin-openrc
572 #密码password
573 openstack user create --domain default --password-prompt neutron
574 openstack role add --project service --user neutron admin
575 openstack service create --name neutron --description "OpenStack Networking" network
576 openstack endpoint create --region RegionOne network public http://controller:9696
577 openstack endpoint create --region RegionOne network internal http://controller:9696
578 openstack endpoint create --region RegionOne network admin http://controller:9696
579 
580 
581 #3.安装软件包
582 yum install openstack-neutron openstack-neutron-ml2 python-neutronclient which  -y
583 
584 
585 #4.配置服务器组件
586 vim /etc/neutron/neutron.conf
587 #在[数据库]节中,配置数据库访问:
588 [DEFAULT]
589 core_plugin = ml2
590 service_plugins = router
591 #下面配置:启用重叠IP地址功能
592 allow_overlapping_ips = True
593 rpc_backend = rabbit
594 auth_strategy = keystone
595 notify_nova_on_port_status_changes = True
596 notify_nova_on_port_data_changes = True
597 
598 [oslo_messaging_rabbit]
599 rabbit_host = controller
600 rabbit_userid = openstack
601 rabbit_password = password
602 
603 [database]
604 connection = mysql+pymysql://neutron:password@controller/neutron
605 
606 [keystone_authtoken]
607 auth_url = http://controller:5000
608 memcached_servers = controller:11211
609 auth_type = password
610 project_domain_name = default
611 user_domain_name = default
612 project_name = service
613 username = neutron
614 password = password
615 
616 [nova]
617 auth_url = http://controller:5000
618 auth_type = password
619 project_domain_name = default
620 user_domain_name = default
621 region_name = RegionOne
622 project_name = service
623 username = nova
624 password = password
625 
626 [oslo_concurrency]
627 lock_path = /var/lib/neutron/tmp
628 
629 vim /etc/neutron/plugins/ml2/ml2_conf.ini
630 [ml2]
631 type_drivers = flat,vlan,vxlan,gre
632 tenant_network_types = vxlan
633 mechanism_drivers = openvswitch,l2population
634 extension_drivers = port_security
635 
636 [ml2_type_flat]
637 flat_networks = provider
638 
639 [ml2_type_vxlan]
640 vni_ranges = 1:1000
641 
642 [securitygroup]
643 enable_ipset = True
644 
645 
646 vim /etc/nova/nova.conf
647 [neutron]
648 url = http://controller:9696
649 auth_url = http://controller:5000
650 auth_type = password
651 project_domain_name = default
652 user_domain_name = default
653 region_name = RegionOne
654 project_name = service
655 username = neutron
656 password = password
657 service_metadata_proxy = True
658 
659 #5.创建连接
660 
661 ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini
662 
663 #6.同步数据库:(此处会报一些关于future的问题,自行忽略)
664 su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron
665 
666 #7.重启nova服务
667 systemctl restart openstack-nova-api.service
668 
669 #8.启动neutron服务
670 systemctl enable neutron-server.service
671 systemctl start neutron-server.service
672 
673 
674 
675 
676 
677 
678 
679 
680 #二:网络节点配置
681 
682 #1. 编辑 /etc/sysctl.conf
683 net.ipv4.ip_forward=1
684 net.ipv4.conf.all.rp_filter=0
685 net.ipv4.conf.default.rp_filter=0
686 
687 #2.执行下列命令,立即生效
688 sysctl -p
689 
690 #3.安装软件包
691 yum install openstack-neutron openstack-neutron-ml2 openstack-neutron-openvswitch -y
692 
693 #4.配置组件
694 vim /etc/neutron/neutron.conf
695 [DEFAULT]
696 core_plugin = ml2
697 service_plugins = router
698 allow_overlapping_ips = True
699 rpc_backend = rabbit
700 auth_strategy = keystone
701 
702 
703 [oslo_messaging_rabbit]
704 rabbit_host = controller
705 rabbit_userid = openstack
706 rabbit_password = password
707 
708 [oslo_concurrency]
709 lock_path = /var/lib/neutron/tmp
710 
711 #6、编辑
712 vim /etc/neutron/plugins/ml2/openvswitch_agent.ini
713 [ovs]
714 #下面ip为网络节点数据网络ip
715 local_ip=192.168.200.213
716 bridge_mappings=external:br-ex
717 
718 [agent]
719 tunnel_types=gre,vxlan
720 l2_population=True
721 prevent_arp_spoofing=True
722 
723 
724 #7.配置L3代理。编辑
725 vim /etc/neutron/l3_agent.ini
726 [DEFAULT]
727 interface_driver=neutron.agent.linux.interface.OVSInterfaceDriver
728 external_network_bridge=br-ex
729 
730 #8.配置DHCP代理。编辑
731 vim /etc/neutron/dhcp_agent.ini
732 
733 [DEFAULT]
734 interface_driver=neutron.agent.linux.interface.OVSInterfaceDriver
735 dhcp_driver=neutron.agent.linux.dhcp.Dnsmasq
736 enable_isolated_metadata=True
737 
738 #9.配置元数据代理。编辑
739 vim /etc/neutron/metadata_agent.ini
740 [DEFAULT]
741 nova_metadata_ip=controller
742 metadata_proxy_shared_secret=password
743 
744 #10.启动服务
745 
746 #网路节点:
747 systemctl enable neutron-openvswitch-agent.service neutron-l3-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service
748 systemctl start neutron-openvswitch-agent.service neutron-l3-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service
749 
750 
751 
752 #12.建网桥
753 #如果你单独一块网卡作为桥接网卡,使用这步指定你的网卡名,如果是虚拟网卡桥接,则按后面的网桥方式添加
754 ovs-vsctl add-br br-ex
755 # 这块为真实的网卡
756 ovs-vsctl add-port br-ex eth2
757 
758 
759 #注意,如果网卡数量有限,想用网路节点的管理网络网卡作为br-ex绑定的物理网卡
760 #那么需要将网络节点管理网络网卡ip去掉,建立br-ex的配置文件,ip使用原管理网ip
761 ovs-vsctl add-br br-ex
762 [root@network ~]# cat /etc/sysconfig/network-scripts/ifcfg-eth0
763 DEVICE=eth0
764 TYPE=Ethernet
765 ONBOOT="yes"
766 BOOTPROTO="none"
767 [root@network ~]# cat /etc/sysconfig/network-scripts/ifcfg-br-ex
768 DEVICE=br-ex
769 TYPE=Ethernet
770 ONBOOT="yes"
771 BOOTPROTO="none"
772 HWADDR=bc:ee:7b:78:7b:a7
773 IPADDR=192.168.101.213
774 GATEWAY=192.168.101.1
775 NETMASK=255.255.255.0
776 DNS1=202.106.0.20
777 DNS1=8.8.8.8
778 NM_CONTROLLED=no #注意加上这一句否则网卡可能启动不成功
779 
780 systemctl restart network
781 ovs-vsctl add-port br-ex eth0
782 
783 
784 
785 
786 
787 
788 
789 
790 
791 #三:计算节点配置
792 #1. 编辑
793 vim /etc/sysctl.conf
794 net.ipv4.conf.all.rp_filter=0
795 net.ipv4.conf.default.rp_filter=0
796 net.bridge.bridge-nf-call-iptables=1
797 net.bridge.bridge-nf-call-ip6tables=1
798 
799 #2.
800 sysctl -p
801 
802 #3.
803 yum install openstack-neutron openstack-neutron-ml2 openstack-neutron-openvswitch -y
804 
805 #4.编辑
806 vim /etc/neutron/neutron.conf
807 
808 [DEFAULT]
809 rpc_backend = rabbit
810 auth_strategy = keystone
811 
812 
813 [oslo_messaging_rabbit]
814 rabbit_host = controller
815 rabbit_userid = openstack
816 rabbit_password = password
817 
818 [oslo_concurrency]
819 lock_path = /var/lib/neutron/tmp
820 
821 #5.编辑
822 vim /etc/neutron/plugins/ml2/openvswitch_agent.ini
823 [ovs]
824 #下面ip为计算节点数据网络ip
825 local_ip = 192.168.200.212
826 #bridge_mappings = vlan:br-vlan
827 [agent]
828 tunnel_types = gre,vxlan
829 l2_population = True
830 prevent_arp_spoofing = True
831 
832 [securitygroup]
833 firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
834 enable_security_group = True
835 
836 #7.编辑
837 vim /etc/nova/nova.conf
838 
839 [neutron]
840 url = http://controller:9696
841 auth_url = http://controller:5000
842 auth_type = password
843 project_domain_name = default
844 user_domain_name = default
845 region_name = RegionOne
846 project_name = service
847 username = neutron
848 password = password
849 
850 #8.启动服务
851 systemctl enable neutron-openvswitch-agent.service
852 systemctl start neutron-openvswitch-agent.service
853 systemctl restart openstack-nova-compute.service
854 
855 
856 
857 
858 
859 
860 
861 
862 
863 #第六部分:部署控制面板dashboard
864 #在控制节点
865 #1.安装软件包
866 yum install openstack-dashboard -y
867 
868 #2.配置
869 vim /etc/openstack-dashboard/local_settings
870 
871 
872 ALLOWED_HOSTS = ['*', ]
873 
874 SESSION_ENGINE = 'django.contrib.sessions.backends.cache'
875 
876 OPENSTACK_API_VERSIONS = {
877     "identity": 3,
878     "image": 2,
879     "volume": 2,
880 }
881 
882 OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = True
883 
884 OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = "default"
885 
886 CACHES = {
887     'default': {
888         'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
889         'LOCATION': 'controller:11211',
890     }
891 }
892 
893 OPENSTACK_HOST = "controller"
894 OPENSTACK_KEYSTONE_URL = "http://%s:5000/v3" % OPENSTACK_HOST
895 OPENSTACK_KEYSTONE_DEFAULT_ROLE = "user"
896 
897 
898 TIME_ZONE = "UTC"
899 
900 #3.启动服务
901 systemctl enable httpd.service memcached.service
902 systemctl restart httpd.service memcached.service
903 
904 
905 #4.验证;
906 http://192.168.101.211/dashboard
907 
908 
909 
910 
911 
912 #总结:
913 #与keystone打交道的只有api层,所以不要到处乱配
914 #建主机的时候由nova-compute负责调用各个api,所以不要再控制节点配置啥调用
915 #ml2是neutron的core plugin,只需要在控制节点配置
916 #网络节点只需要配置相关的agent
917 #各组件的api除了接收请求外还有很多其他功能,比方说验证请求的合理性,控制节点nova.conf需要配neutron的api、认证,因为nova boot时需要去验证用户提交网络的合理性,控制节点neutron.conf需要配nova的api、认证,因为你删除网络端口时需要通过nova-api去查是否有主机正在使用端口。计算几点nova.conf需要配neutron,因为nova-compute发送请求给neutron-server来创建端口。这里的端口值得是'交换机上的端口'
918 #不明白为啥?或者不懂我在说什么,请好好研究openstack各组件通信机制和主机创建流程,或者来听我的课哦,一般博文都不教真的。
919 #
920 #网路故障排查:
921 #网络节点:
922 [root@network ~]# ip netns show
923 qrouter-7096bd89-908a-4e9d-90dc-a539b024f1d5
924 qdhcp-8f4fb890-4328-4e87-a3c4-b4906e7e34fb
925 qdhcp-0b550e2d-7c6f-42fa-84f0-13a4f9a58c50
926 [root@network ~]# ip netns exec qrouter-7096bd89-908a-4e9d-90dc-a539b024f1d5 bash
927 [root@network ~]# ping www.baidu.com
928 PING www.a.shifen.com (61.135.169.125) 56(84) bytes of data.
929 64 bytes from 61.135.169.125: icmp_seq=1 ttl=56 time=7.01 ms
930 64 bytes from 61.135.169.125: icmp_seq=2 ttl=56 time=11.6 ms
931 
932 
933 #如果无法ping通,那么退出namespace
934 ovs-vsctl del-br br-ex
935 ovs-vsctl del-br br-int
936 ovs-vsctl del-br br-tun
937 ovs-vsctl add-br br-int
938 ovs-vsctl add-br br-ex
939 ovs-vsctl add-port br-ex eth0
940 systemctl restart neutron-openvswitch-agent.service neutron-l3-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service

 

posted @ 2016-10-07 01:08  曾春云  阅读(836)  评论(0)    收藏  举报