openstack-mitaka安装部署

1、基础环境

vi /etc/resolv.conf
nameserver 8.8.8.8
yum -y install lrzsz net-tools
systemctl stop firewalld.service
systemctl disable firewalld.service
setenforce 0
vi /etc/sysconfig/selinux

vi /etc/hosts
192.168.2.1 node1
192.168.2.2 node2
192.168.2.3 node3

2、修改主机名

hostnamectl set-hostname node1

#后需要重启主机生效

3、安装ntp服务

vi /etc/ntp.conf 添加以下内容:
restrict default nomodify
server  127.127.1.0
fudge   127.127.1.0 stratum 8

重启NTP服务
sudo systemctl restart ntpd
sudo systemctl enable ntpd.service
请调准NTP server端的时间

date -s "2016-06-21 16:43:00"
watch -n 1 ntpq -p

硬件时钟与系统时钟同步:
# hwclock --hctosys 或者 
# clock --hctosys 

  

4、添加互信

ssh-keygen
            
ssh-copy-id root@node1
ssh-copy-id root@node2
ssh-copy-id root@node3

 

5、配置openstack的yum源

cd /etc/yum.repos

vi m.repo
[mitaka]
name=mitaka.repo
baseurl=http://mirrors.163.com/centos/7.3.1611/cloud/x86_64/openstack-mitaka/
enabled=1
gpgcheck=0

  随时间变化,路径有所变化

6、缓存yum源,并更新

yum makecache &&  yum update -y
centos:

yum install yum-plugin-priorities -y #防止自动更新

yum install centos-release-openstack-mitaka -y #如果不使用我的自定义yum那么请执行这一步

7、在所有节点更新,安装openstack客户端

yum upgrade -y
yum install python-openstackclient -y
yum install openstack-selinux -y

  

8、控制节点部署mariadb数据库

yum install mariadb mariadb-server python2-PyMySQL -y


编辑:

vi /etc/my.cnf.d/openstack.cnf


[mysqld]

bind-address = 10.112.17.80
default-storage-engine = innodb
innodb_file_per_table
max_connections = 4096
collation-server = utf8_general_ci
character-set-server = utf8


启服务:

systemctl enable mariadb.service
systemctl start mariadb.service

初始化数据库
mysql_secure_installation

  

9、为Telemetry 服务部署MongoDB

yum install mongodb-server mongodb -y


编辑:/etc/mongod.conf

bind_ip = 192.168.2.1
smallfiles = true


启动服务:

systemctl enable mongod.service
systemctl start mongod.service

  

10、部署消息队列rabbitmq(验证方式:http://172.16.209.104:15672/ 用户:guest 密码:guest)

控制节点:

yum install rabbitmq-server -y


启动服务:

systemctl enable rabbitmq-server.service
systemctl start rabbitmq-server.service


新建rabbitmq用户密码:

rabbitmqctl add_user openstack 123


为新建的用户openstack设定权限:

rabbitmqctl set_permissions openstack ".*" ".*" ".*"

  

11、部署memcached缓存(为keystone服务缓存tokens)

控制节点:

yum install memcached python-memcached -y


启动服务:

systemctl enable memcached.service
systemctl start memcached.service

systemctl restart memcached.service

  

12、认证服务keystone部署

1.建库建用户

mysql -u root -p

CREATE DATABASE keystone;
GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'linux-3' IDENTIFIED BY '123';
GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'localhost' IDENTIFIED BY '123';
GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'%' IDENTIFIED BY '123';
flush privileges;

 
2.yum install openstack-keystone httpd mod_wsgi -y


3.编辑 vi /etc/keystone/keystone.conf


[DEFAULT]

admin_token = 123 #建议用命令制作token:openssl rand -hex 10


[database]

connection = mysql+pymysql://keystone:123@linux-3/keystone


[token]

provider = fernet

#Token Provider:UUID, PKI, PKIZ, or Fernet #http://blog.csdn.net/miss_yang_cloud/article/details/49633719



4.同步修改到数据库

su -s /bin/sh -c "keystone-manage db_sync" keystone


5.初始化fernet keys

keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone

  

13、配置apache服务

编辑:vi /etc/httpd/conf/httpd.conf

ServerName node1


编辑:vi /etc/httpd/conf.d/wsgi-keystone.conf

新增配置

Listen 5000
Listen 35357
<VirtualHost *:5000>
    WSGIDaemonProcess keystone-public processes=5 threads=1 user=keystone group=keystone display-name=%{GROUP}
    WSGIProcessGroup keystone-public
    WSGIScriptAlias / /usr/bin/keystone-wsgi-public
    WSGIApplicationGroup %{GLOBAL}
    WSGIPassAuthorization On
    ErrorLogFormat "%{cu}t %M"
    ErrorLog /var/log/httpd/keystone-error.log
    CustomLog /var/log/httpd/keystone-access.log combined
    <Directory /usr/bin>
        Require all granted
    </Directory>
</VirtualHost>
<VirtualHost *:35357>
    WSGIDaemonProcess keystone-admin processes=5 threads=1 user=keystone group=keystone display-name=%{GROUP}
    WSGIProcessGroup keystone-admin
    WSGIScriptAlias / /usr/bin/keystone-wsgi-admin
    WSGIApplicationGroup %{GLOBAL}
    WSGIPassAuthorization On
    ErrorLogFormat "%{cu}t %M"
    ErrorLog /var/log/httpd/keystone-error.log
    CustomLog /var/log/httpd/keystone-access.log combined
    <Directory /usr/bin>
        Require all granted
    </Directory>
</VirtualHost>

7.启动服务:

systemctl enable httpd.service
systemctl start httpd.service

systemctl restart httpd.service

  

14、创建服务实体和访问端点

1.实现配置管理员环境变量,用于获取后面创建的权限

export OS_TOKEN=123
export OS_URL=http://linux-3:35357/v3
export OS_IDENTITY_API_VERSION=3


2.基于上一步给的权限,创建认证服务实体(目录服务)

openstack service create \
  --name keystone --description "OpenStack Identity" identity

3.基于上一步建立的服务实体,创建访问该实体的三个api端点


openstack endpoint create --region RegionOne \
  identity public http://linux-3:5000/v3

openstack endpoint create --region RegionOne \
  identity internal http://linux-3:5000/v3

openstack endpoint create --region RegionOne \
  identity admin http://linux-3:35357/v3
  
openstack catalog list
三:创建域,租户,用户,角色,把四个元素关联到一起

建立一个公共的域名:

openstack domain create --description "Default Domain" default


管理员:admin

openstack project create --domain default \
  --description "Admin Project" admin

openstack user create --domain default \
  --password-prompt admin
123
  
openstack role create admin
openstack role add --project admin --user admin admin


普通用户:demo

openstack project create --domain default \
  --description "Demo Project" demo

openstack user create --domain default \
  --password-prompt demo
123
  
openstack role create user
openstack role add --project demo --user demo user


为后续的服务创建统一租户service

解释:后面每搭建一个新的服务都需要在keystone中执行四种操作:1.建租户 2.建用户 3.建角色 4.做关联

后面所有的服务公用一个租户service,都是管理员角色admin,所以实际上后续的服务安装关于keysotne

的操作只剩2,4

openstack project create --domain default \
  --description "Service Project" service

  

  

四:验证操作:

编辑:/etc/keystone/keystone-paste.ini

在[pipeline:public_api], [pipeline:admin_api], and [pipeline:api_v3] 三个地方

移走:admin_token_auth 


unset OS_TOKEN OS_URL


openstack --os-auth-url http://linux-3:35357/v3 \
  --os-project-domain-name default --os-user-domain-name default \
  --os-project-name admin --os-username admin token issue

Password:

+------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+

| Field      | Value                                                                                                                                                                                   |

+------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+

| expires    | 2016-08-17T08:29:18.528637Z                                                                                                                                                             |

| id         | gAAAAABXtBJO-mItMcPR15TSELJVB2iwelryjAGGpaCaWTW3YuEnPpUeg799klo0DaTfhFBq69AiFB2CbFF4CE6qgIKnTauOXhkUkoQBL6iwJkpmwneMo5csTBRLAieomo4z2vvvoXfuxg2FhPUTDEbw-DPgponQO-9FY1IAEJv_QV1qRaCRAY0 |

| project_id | 9783750c34914c04900b606ddaa62920                                                                                                                                                        |

| user_id    | 8bc9b323a3b948758697cb17da304035                                                                                                                                                        |

+------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+



五:新建客户端脚本文件


管理员:admin-openrc

export OS_PROJECT_DOMAIN_NAME=default
export OS_USER_DOMAIN_NAME=default
export OS_PROJECT_NAME=admin
export OS_USERNAME=admin
export OS_PASSWORD=123
export OS_AUTH_URL=http://linux-3:35357/v3
export OS_IDENTITY_API_VERSION=3
export OS_IMAGE_API_VERSION=2


普通用户demo:demo-openrc

export OS_PROJECT_DOMAIN_NAME=default
export OS_USER_DOMAIN_NAME=default
export OS_PROJECT_NAME=demo
export OS_USERNAME=demo
export OS_PASSWORD=123
export OS_AUTH_URL=http://linux-3:5000/v3
export OS_IDENTITY_API_VERSION=3
export OS_IMAGE_API_VERSION=2

效果:

source admin-openrc 

[root@controller01 ~]# openstack token issue

  

15、部署镜像服务

一:安装和配置服务

1.建库建用户

mysql -u root -p

CREATE DATABASE glance;
GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'linux-3' \
  IDENTIFIED BY '123';
GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'localhost' \
  IDENTIFIED BY '123';
GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'%' \
  IDENTIFIED BY '123';
flush privileges;
 

2.keystone认证操作:

上面提到过:所有后续项目的部署都统一放到一个租户service里,然后需要为每个项目建立用户,建管理员角色,建立关联

. admin-openrc

openstack user create --domain default --password-prompt glance
123

openstack role add --project service --user glance admin


建立服务实体

openstack service create --name glance \
  --description "OpenStack Image" image

  

建端点

openstack endpoint create --region RegionOne \
  image public http://linux-3:9292 
openstack endpoint create --region RegionOne \
  image internal http://linux-3:9292
openstack endpoint create --region RegionOne \
  image admin http://linux-3:9292


3.安装软件

yum install openstack-glance -y


4.修改配置:

编辑:vi /etc/glance/glance-api.conf 


[database]

#这里的数据库连接配置是用来初始化生成数据库表结构,不配置无法生成数据库表结构

#glance-api不配置database对创建vm无影响,对使用metada有影响

#日志报错:ERROR glance.api.v2.metadef_namespaces

connection = mysql+pymysql://glance:123@linux-3/glance


[keystone_authtoken]

auth_url = http://linux-3:5000
memcached_servers = linux-3:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = glance
password = 123



auth_uri = http://linux-3:5000
auth_url = http://linux:35357
memcached_servers = linux:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = glance
password = 123




[paste_deploy]
flavor = keystone

[glance_store]
stores = file,http
default_store = file    #指定本地存储,ceph是rdb协议
filesystem_store_datadir = /var/lib/glance/images/




编辑:vi /etc/glance/glance-registry.conf


[database]

#这里的数据库配置是用来glance-registry检索镜像元数据

connection = mysql+pymysql://glance:123@linux-3/glance

[keystone_authtoken]
...
auth_uri = http://linux-3:5000
auth_url = http://linux:35357
memcached_servers = linux:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = glance
password = 123

[paste_deploy]
...
flavor = keystone

新建目录:

mkdir /var/lib/glance/images/
chown glance. /var/lib/glance/images/


同步数据库:(此处会报一些关于future的问题,自行忽略)

su -s /bin/sh -c "glance-manage db_sync" glance


启动服务:

systemctl enable openstack-glance-api.service \
  openstack-glance-registry.service

systemctl start openstack-glance-api.service \
  openstack-glance-registry.service

  
systemctl restart openstack-glance-api.service \
  openstack-glance-registry.service
  

二:验证操作:

. admin-openrc

wget http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-x86_64-disk.img

(本地下载:wget http://172.16.209.100/cirros-0.3.4-x86_64-disk.img)


openstack image create "cirros" \
  --file cirros-0.3.4-x86_64-disk.img \
  --disk-format qcow2 --container-format bare \
  --public

  Error finding address for http://linux-3:9292/v2/schemas/image: HTTPConnectionPool(host='linux-3', port=9292): Max retries exceeded with url: /v2/schemas/image (Caused by NewConnectionError('<requests.packages.urllib3.connection.HTTPConnection object at 0x3654710>: Failed to establish a new connection: [Errno 111] Connection refused',))


openstack image list

  

16、部署compute服务

一:控制节点配置

1.建库建用户

CREATE DATABASE nova_api;
CREATE DATABASE nova;
GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'linux-3' \
  IDENTIFIED BY '123';
GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'localhost' \
  IDENTIFIED BY '123';
GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'%' \
  IDENTIFIED BY '123';
GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'linux-3' \
  IDENTIFIED BY '123';
GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'localhost' \
  IDENTIFIED BY '123';
GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%' \
  IDENTIFIED BY '123'; 
flush privileges;


2.keystone相关操作


. admin-openrc

openstack user create --domain default \
  --password-prompt nova
123
openstack role add --project service --user nova admin

openstack service create --name nova \
  --description "OpenStack Compute" compute

  

openstack endpoint create --region RegionOne \
  compute public http://linux-3:8774/v2.1/%\(tenant_id\)s
openstack endpoint create --region RegionOne \
  compute internal http://linux-3:8774/v2.1/%\(tenant_id\)s
openstack endpoint create --region RegionOne \
  compute admin http://linux-3:8774/v2.1/%\(tenant_id\)s

  

  

3.安装软件包:

yum install openstack-nova-api openstack-nova-conductor \
  openstack-nova-console openstack-nova-novncproxy \
  openstack-nova-scheduler -y


4.修改配置:

编辑vi /etc/nova/nova.conf


[DEFAULT]
osapi   #代码多,防止代码冗余的额        metadata开通云主机后注入一些代码,如开机装服务什么的
enabled_apis = osapi_compute,metadata
rpc_backend = rabbit
auth_strategy = keystone

#下面的为管理ip

my_ip = 10.112.17.80
use_neutron = True
firewall_driver = nova.virt.firewall.NoopFirewallDriver


[api_database]

connection = mysql+pymysql://nova:123@linux-3/nova_api


[database]

connection = mysql+pymysql://nova:123@linux-3/nova


[oslo_messaging_rabbit]

rabbit_host = linux-3
rabbit_userid = openstack
rabbit_password = 123

[keystone_authtoken]
auth_url = http://linux-3:5000
memcached_servers = linux-3:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = nova
password = 123



[vnc]

#下面的为管理ip

vncserver_listen = 10.112.17.80

#下面的为管理ip

vncserver_proxyclient_address = 10.112.17.80


[oslo_concurrency]

lock_path = /var/lib/nova/tmp


5.同步数据库:(此处会报一些关于future的问题,自行忽略)

su -s /bin/sh -c "nova-manage api_db sync" nova
su -s /bin/sh -c "nova-manage db sync" nova


6.启动服务

systemctl enable openstack-nova-api.service \
  openstack-nova-consoleauth.service openstack-nova-scheduler.service \
  openstack-nova-conductor.service openstack-nova-novncproxy.service

systemctl start openstack-nova-api.service \
  openstack-nova-consoleauth.service openstack-nova-scheduler.service \
  openstack-nova-conductor.service openstack-nova-novncproxy.service

  

二:计算节点配置


1.安装软件包:

yum install openstack-nova-compute libvirt-daemon-lxc -y


2.修改配置:

编辑/etc/nova/nova.conf


[DEFAULT]

rpc_backend = rabbit
auth_strategy = keystone

#计算节点管理网络ip

my_ip = 10.112.17.80
use_neutron = True
firewall_driver = nova.virt.firewall.NoopFirewallDriver


[oslo_messaging_rabbit]

rabbit_host = linux-3
rabbit_userid = openstack
rabbit_password = 123


[vnc]

enabled = True
vncserver_listen = 0.0.0.0

#计算节点管理网络ip

vncserver_proxyclient_address = 10.112.17.80

#控制节点管理网络ip

novncproxy_base_url = http://10.112.17.80:6080/vnc_auto.html


[glance]

api_servers = http://linux-3:9292


[oslo_concurrency]

lock_path = /var/lib/nova/tmp


3.如果在不支持虚拟化的机器上部署nova,请确认

egrep -c '(vmx|svm)' /proc/cpuinfo结果为0

则编辑/etc/nova/nova.conf

[libvirt]

virt_type = qemu


4.启动服务

systemctl enable libvirtd.service openstack-nova-compute.service
systemctl start libvirtd.service openstack-nova-compute.service


三:验证

控制节点

[root@controller01 ~]# source admin-openrc

[root@controller01 ~]# openstack compute service list

+----+------------------+--------------+----------+---------+-------+----------------------------+

| Id | Binary           | Host         | Zone     | Status  | State | Updated At                 |

+----+------------------+--------------+----------+---------+-------+----------------------------+

|  1 | nova-consoleauth | controller01 | internal | enabled | up    | 2016-08-17T08:51:37.000000 |

|  2 | nova-conductor   | controller01 | internal | enabled | up    | 2016-08-17T08:51:29.000000 |

|  8 | nova-scheduler   | controller01 | internal | enabled | up    | 2016-08-17T08:51:38.000000 |

| 12 | nova-compute     | compute01    | nova     | enabled | up    | 2016-08-17T08:51:30.000000 |

  

17、部署网络服务

一:控制节点配置

1.建库建用户

CREATE DATABASE neutron;
GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'linux-3' \
  IDENTIFIED BY '123';
GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'localhost' \
  IDENTIFIED BY '123';
GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'%' \
  IDENTIFIED BY '123';
flush privileges;


2.keystone相关

. admin-openrc


openstack user create --domain default --password-prompt neutron
123
openstack role add --project service --user neutron admin
openstack service create --name neutron \
  --description "OpenStack Networking" network
openstack endpoint create --region RegionOne \
  network public http://linux-3:9696
openstack endpoint create --region RegionOne \
  network internal http://linux-3:9696
openstack endpoint create --region RegionOne \
  network admin http://linux-3:9696 

3.安装软件包

yum install openstack-neutron openstack-neutron-ml2 python-neutronclient which  -y


4.配置服务器组件 

编辑 /etc/neutron/neutron.conf文件,并完成以下动作: 

在[数据库]节中,配置数据库访问:

[DEFAULT]

core_plugin = ml2
service_plugins = router

#下面配置:启用重叠IP地址功能

allow_overlapping_ips = True
rpc_backend = rabbit
auth_strategy = keystone
notify_nova_on_port_status_changes = True
notify_nova_on_port_data_changes = True

[oslo_messaging_rabbit]
rabbit_host = linux-3
rabbit_userid = openstack
rabbit_password = 123

[database]

connection = mysql+pymysql://neutron:123@linux-3/neutron


[keystone_authtoken]
auth_url = http://linux-3:5000
memcached_servers = linux-3:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = neutron
password = 123

[nova]
auth_url = http://linux-3:5000
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = nova
password = 123

[oslo_concurrency]

lock_path = /var/lib/neutron/tmp


编辑/etc/neutron/plugins/ml2/ml2_conf.ini文件 

[ml2]

type_drivers = flat,vlan,vxlan,gre
tenant_network_types = vxlan
mechanism_drivers = openvswitch,l2population
extension_drivers = port_security

[ml2_type_flat]

flat_networks = provider


[ml2_type_vxlan]

vni_ranges = 1:1000


[securitygroup]

enable_ipset = True



编辑/etc/nova/nova.conf文件:

[neutron]
url = http://linux-3:9696
auth_url = http://linux-3:5000
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = neutron
password = 123
service_metadata_proxy = True

5.创建连接


ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini


6.同步数据库:(此处会报一些关于future的问题,自行忽略)

su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf \
 --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron

 

7.重启nova服务

systemctl restart openstack-nova-api.service


8.启动neutron服务

systemctl enable neutron-server.service
systemctl start neutron-server.service


二:网络节点配置


1. 编辑 /etc/sysctl.conf

net.ipv4.ip_forward=1
net.ipv4.conf.all.rp_filter=0
net.ipv4.conf.default.rp_filter=0


2.执行下列命令,立即生效

sysctl -p


3.安装软件包

yum install openstack-neutron openstack-neutron-ml2 openstack-neutron-openvswitch -y


4.配置组件 

编辑/etc/neutron/neutron.conf文件

[DEFAULT]
core_plugin = ml2
service_plugins = router
allow_overlapping_ips = True
rpc_backend = rabbit
auth_strategy = keystone

[oslo_messaging_rabbit]
rabbit_host = linux-3
rabbit_userid = openstack
rabbit_password = 123


[oslo_concurrency]

lock_path = /var/lib/neutron/tmp


6、编辑 /etc/neutron/plugins/ml2/openvswitch_agent.ini文件:

[ovs]

#下面ip为网络节点数据网络ip

local_ip=10.112.17.80
bridge_mappings=external:br-ex


[agent]

tunnel_types=gre,vxlan
l2_population=True
prevent_arp_spoofing=True



7.配置L3代理。编辑 /etc/neutron/l3_agent.ini文件:

[DEFAULT]

interface_driver=neutron.agent.linux.interface.OVSInterfaceDriver
external_network_bridge=br-ex


8.配置DHCP代理。编辑 /etc/neutron/dhcp_agent.ini文件:


[DEFAULT]

interface_driver=neutron.agent.linux.interface.OVSInterfaceDriver
dhcp_driver=neutron.agent.linux.dhcp.Dnsmasq
enable_isolated_metadata=True


9.配置元数据代理。编辑 /etc/neutron/metadata_agent.ini文件:

[DEFAULT]

nova_metadata_ip=linux-3
metadata_proxy_shared_secret=123


10.启动服务


网路节点:

systemctl start neutron-openvswitch-agent.service neutron-l3-agent.service \
neutron-dhcp-agent.service neutron-metadata-agent.service

systemctl enable neutron-openvswitch-agent.service neutron-l3-agent.service \
neutron-dhcp-agent.service neutron-metadata-agent.service




12.建网桥

ovs-vsctl add-br br-ex

ovs-vsctl add-port br-ex eth2



注意,如果网卡数量有限,想用网路节点的管理网络网卡作为br-ex绑定的物理网卡

#那么需要将网络节点管理网络网卡ip去掉,建立br-ex的配置文件,ip使用原管理网ip



[root@network01 ~]# cat /etc/sysconfig/network-scripts/ifcfg-eth0 

DEVICE=eth0
TYPE=Ethernet
ONBOOT="yes"
BOOTPROTO="none"
NM_CONTROLLED=no

[root@network01 ~]# cat /etc/sysconfig/network-scripts/ifcfg-br-ex 
10.112.17.80
10.112.16.1
211.136.192.6
211.136.17.107

DEVICE=br-ex
TYPE=Ethernet
ONBOOT="yes"
BOOTPROTO="none"
HWADDR=fa:16:3e:74:eb:de
IPADDR=10.112.17.80
GATEWAY=10.112.16.1
NETMASK=255.0.0.0
DNS1=211.136.192.6
DNS1=211.136.17.107
NM_CONTROLLED=no #注意加上这一句否则网卡可能启动不成功

ovs-vsctl add-br br-ex
ovs-vsctl add-port br-ex eth0
/etc/init.d/network restart && ovs-vsctl add-port br-ex eth0

systemctl restart neutron-openvswitch-agent.service neutron-l3-agent.service \
neutron-dhcp-agent.service neutron-metadata-agent.service



[root@linux-3 network-scripts]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN qlen 1
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host 
       valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
    link/ether fa:16:3e:74:eb:de brd ff:ff:ff:ff:ff:ff
    inet 10.112.17.80/22 brd 10.112.19.255 scope global eth0
       valid_lft forever preferred_lft forever
    inet6 fe80::f816:3eff:fe74:ebde/64 scope link 
       valid_lft forever preferred_lft forever
3: ovs-system: <BROADCAST,MULTICAST> mtu 1500 qdisc noop state DOWN qlen 1000
    link/ether 7a:fd:9f:1f:e5:57 brd ff:ff:ff:ff:ff:ff
4: br-int: <BROADCAST,MULTICAST> mtu 1500 qdisc noop state DOWN qlen 1000
    link/ether 1a:17:22:92:85:49 brd ff:ff:ff:ff:ff:ff
[root@linux-3 network-scripts]# ifconfig eth0
eth0: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500
        inet 10.112.17.80  netmask 255.255.252.0  broadcast 10.112.19.255
        inet6 fe80::f816:3eff:fe74:ebde  prefixlen 64  scopeid 0x20<link>
        ether fa:16:3e:74:eb:de  txqueuelen 1000  (Ethernet)
        RX packets 14710  bytes 2185896 (2.0 MiB)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 14771  bytes 4133409 (3.9 MiB)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0





三:计算节点配置

1. 编辑 /etc/sysctl.conf

net.ipv4.conf.all.rp_filter=0
net.ipv4.conf.default.rp_filter=0
net.bridge.bridge-nf-call-iptables=1
net.bridge.bridge-nf-call-ip6tables=1

2.sysctl -p


3.yum install openstack-neutron openstack-neutron-ml2 openstack-neutron-openvswitch -y


4.编辑 /etc/neutron/neutron.conf文件


[DEFAULT]

rpc_backend = rabbit
auth_strategy = keystone

[oslo_messaging_rabbit]
rabbit_host = node1
rabbit_userid = openstack
rabbit_password = 123


[oslo_concurrency]

lock_path = /var/lib/neutron/tmp


5.编辑 /etc/neutron/plugins/ml2/openvswitch_agent.ini

[ovs]

#下面ip为计算节点数据网络ip

local_ip = 10.1.1.2

#bridge_mappings = vlan:br-vlan

[agent]

tunnel_types = gre,vxlan
l2_population = True
prevent_arp_spoofing = True

[securitygroup]

firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
enable_security_group = True


7.编辑 /etc/nova/nova.conf


[neutron]

url = http://node1:9696
auth_url = http://node1:5000
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = neutron
password = 123

8.启动服务

systemctl enable neutron-openvswitch-agent.service
systemctl start neutron-openvswitch-agent.service

systemctl restart openstack-nova-compute.service

systemctl restart neutron-openvswitch-agent.service


ovs-vsctl   show

  

18、部署控制面板dashboard

在控制节点

1.安装软件包

yum install openstack-dashboard -y


2.配置/etc/openstack-dashboard/local_settings



OPENSTACK_HOST = "linux-3"
ALLOWED_HOSTS = ['*', ]
SESSION_ENGINE = 'django.contrib.sessions.backends.cache'
CACHES = {
    'default': {
         'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
         'LOCATION': 'linux-3:11211',
    }
}

OPENSTACK_KEYSTONE_URL = "http://%s:5000/v3" % OPENSTACK_HOST
OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = True

OPENSTACK_API_VERSIONS = {
    "identity": 3,
    "image": 2,
    "volume": 2,
}

OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = "default"
OPENSTACK_KEYSTONE_DEFAULT_ROLE = "user"
TIME_ZONE = "UTC"


3.启动服务

systemctl enable httpd.service memcached.service
systemctl restart httpd.service memcached.service



4.验证;

http://10.112.17.80/dashboard

总结:

    与keystone打交道的只有api层,所以不要到处乱配

    建主机的时候由nova-compute负责调用各个api,所以不要再控制节点配置啥调用

    ml2是neutron的core plugin,只需要在控制节点配置

    网络节点只需要配置相关的agent

    各组件的api除了接收请求外还有很多其他功能,比方说验证请求的合理性,控制节点nova.conf需要配neutron的api、认证,因为nova boot时需要去验证用户提交网络的合理性,控制节点neutron.conf需要配nova的api、认证,因为你删除网络端口时需要通过nova-api去查是否有主机正在使用端口。计算几点nova.conf需要配neutron,因为nova-compute发送请求给neutron-server来创建端口。这里的端口值得是'交换机上的端口'

    不明白为啥?或者不懂我在说什么,请好好研究openstack各组件通信机制和主机创建流程,或者来听我的课哦,一般博文都不教真的。


网路故障排查:

网络节点:

[root@network02 ~]# ip netns show

qdhcp-e63ab886-0835-450f-9d88-7ea781636eb8
qdhcp-b25baebb-0a54-4f59-82f3-88374387b1ec
qrouter-ff2ddb48-86f7-4b49-8bf4-0335e8dbaa83


[root@network02 ~]# ip netns exec qrouter-ff2ddb48-86f7-4b49-8bf4-0335e8dbaa83 bash


[root@network02 ~]# ping -c2 www.baidu.com
PING www.a.shifen.com (61.135.169.125) 56(84) bytes of data.
64 bytes from 61.135.169.125: icmp_seq=1 ttl=52 time=33.5 ms
64 bytes from 61.135.169.125: icmp_seq=2 ttl=52 time=25.9 ms

如果无法ping通,那么退出namespace

ovs-vsctl del-br br-ex
ovs-vsctl del-br br-int
ovs-vsctl del-br br-tun
ovs-vsctl add-br br-int
ovs-vsctl add-br br-ex
ovs-vsctl add-port br-ex eth0

systemctl restart neutron-openvswitch-agent.service neutron-l3-agent.service \
neutron-dhcp-agent.service neutron-metadata-agent.service




https://console-beijing-1.cmecloud.cn:6080/vnc_auto.html?token=10826633-c33f-4c50-8239-c15ea51801c1


root

linux123

  

19、

20、

21、

22、

 

posted @ 2017-05-23 10:48  larlly  阅读(853)  评论(0)    收藏  举报