openstack grizzly版cloud控制节点安装

openstack-ubuntu-create
参考官方文档

三个节点:
cloud :控制节点
内网:10.10.10.10
外网:172.16.56.252

network:网络节点
内网:10.10.10.9
外网:172.16.56.100

c01:计算节点
内网:10.10.10.11
外网:172.16.56.153

一.cloud:控制节点的配置.

1.安装完操作系统已经apt源配置完成之后,一定要执行 apt-get update

root@cloud:~# mv /etc/apt/sources.list /etc/apt/sources.list.bak   #备份一下原来的源
root@cloud:~# vi /etc/apt/sources.list  
    deb http://mirrors.163.com/ubuntu/ precise main restricted universe multiverse
    deb http://mirrors.163.com/ubuntu/ precise-security main restricted universe multiverse
    deb http://mirrors.163.com/ubuntu/ precise-updates main restricted universe multiverse
    deb http://mirrors.163.com/ubuntu/ precise-proposed main restricted universe multiverse
    deb http://mirrors.163.com/ubuntu/ precise-backports main restricted universe multiverse
    deb-src http://mirrors.163.com/ubuntu/ precise main restricted universe multiverse
    deb-src http://mirrors.163.com/ubuntu/ precise-security main restricted universe multiverse
    deb-src http://mirrors.163.com/ubuntu/ precise-updates main restricted universe multiverse
    deb-src http://mirrors.163.com/ubuntu/ precise-proposed main restricted universe multiverse
    deb-src http://mirrors.163.com/ubuntu/ precise-backports main restricted universe multiverse

#加入163源,163源每6个小时与官方源站同步一次。
root@cloud:~# apt-get update
root@cloud:~# apt-get install ubuntu-cloud-keyring   #安装cloud版本密钥(我这么理解)
root@cloud:~# vi /etc/apt/sources.list.d/cloud-archive.list #加入版本库地址
deb http://ubuntu-cloud.archive.canonical.com/ubuntu precise-updates/grizzly main

2.加入版本库地址.

root@cloud:~# vi /etc/apt/sources.list.d/cloud-archive.list 
deb http://ubuntu-cloud.archive.canonical.com/ubuntu precise-updates/grizzly main
root@cloud:~# apt-get update && apt-get upgrade 

3.配置grizzly源

root@cloud:~# vi /etc/apt/sources.list.d/grizzly.list 加入grizzly源
deb http://archive.gplhost.com/debian grizzly main
deb http://archive.gplhost.com/debian grizzly-backports main
root@cloud:~# apt-get update  #执行的时候会出错,记录NO_PUBKEY后面的密钥。 
W: GPG error: http://archive.gplhost.com grizzly Release: The following signatures couldn't be verified because the public key is not available: NO_PUBKEY 64AA94D00B849883
W: GPG error: http://archive.gplhost.com grizzly-backports Release: The following signatures couldn't be verified because the public key is not available: NO_PUBKEY 64AA94D00B849883
上面错误解决办法:(注意--recy-key 后面的内容,是你出错时记录的,不要原本照着复制 )

root@cloud:~# gpg --keyserver pgpkeys.mit.edu --recv-key 64AA94D00B849883
root@cloud:~# gpg -a --export 64AA94D00B849883 | sudo apt-key add -
root@cloud:~# apt-get update
root@cloud:~# apt-get install gplhost-archive-keyring
root@cloud:~# apt-get upgrade

4.网卡配置:/etc/network/interface

auto lo
iface lo inet loopback

# The eth0 network interface
auto eth0
iface eth0 inet static
address 10.10.10.10
netmask 255.255.255.0

# The eth1 network interface
auto eth1
iface eth1 inet dhcp  #我的网卡是自动获取的  

5.开启包检查:

root@cloud:~# vi /etc/sysctl.conf  #添加如下内容。
net.ipv4.conf.all.rp_filter = 0      #reverse-path filtering 反向过滤技术,系统在接收到一个IP包后,检查该IP是不是合乎要求,不合要求的IP包会被系统丢弃
net.ipv4.conf.default.rp_filter = 0
重启网卡:
root@cloud:~# /etc/init.d/networking restart
应用网卡设置:
root@cloud:~# sysctl -e -p /etc/sysctl.conf
net.ipv4.conf.all.rp_filter = 0
net.ipv4.conf.default.rp_filter = 0
root@cloud:~#

6.修改hosts文件:

root@cloud:~# vi /etc/hosts  添加如下设置.

10.10.10.10 cloud
10.10.10.9 network
10.10.10.11 c01

7.安装ntp:

root@cloud:~# apt-get install -y ntp
root@cloud:~# /etc/init.d/ntp restart
#可以设置自己的时钟源,但是必须保持以后三个节点的时间一致

8.安装mysql

root@cloud:~# apt-get install -y python-mysqldb mysql-server
这个过程会要求你设置mysql的root用户的密码。

root@cloud:~# /etc/init.d/mysql restart
允许其他两个节点访问mysql:

root@cloud:~# sed -i 's/127.0.0.1/0.0.0.0/g' /etc/mysql/my.cnf
root@cloud:~# /etc/init.d/mysql restart

9 .建立:nova,cinder,glance,keystone,quantum库

mysql -u root -p <<EOF
CREATE DATABASE nova;
GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'localhost' IDENTIFIED BY 'openstack';
CREATE DATABASE cinder;
GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'localhost' IDENTIFIED BY 'openstack';
CREATE DATABASE glance;
GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'localhost' IDENTIFIED BY 'openstack';
CREATE DATABASE keystone;
GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'localhost' IDENTIFIED BY 'openstack';
CREATE DATABASE quantum;
GRANT ALL PRIVILEGES ON quantum.* TO 'quantum'@'localhost' IDENTIFIED BY 'openstack';
GRANT ALL PRIVILEGES ON quantum.* TO 'quantum'@'10.10.10.9' IDENTIFIED BY 'openstack';
GRANT ALL PRIVILEGES ON quantum.* TO 'quantum'@'10.10.10.11' IDENTIFIED BY 'openstack';
FLUSH PRIVILEGES;
EOF

10.安装rabbitmq消息队列服务:

root@cloud:~# apt-get install -y rabbitmq-server

#rabbitmqctl status 查看rabbitmq的状态;
#rabbitmqctl user

# rabbitmqctl list_users 查看rabbit用户
Listing users ...
guest   [administrator] #guest 用户 admin...权限
...done.

#rabbitmqctl change_passowrd guest openstack #修改guest用户密码,如果nova,glance,等服务不能访问rabbit,检查用户密码,
                            #没有问题的话就强制重置rabbit,然后再执行一次修改密码,(看rabbitmqctl --help)

11.keystone 验证服务安装:

root@cloud:~# apt-get install -y keystone python-keystone python-keystoneclient

root@cloud:
~# vi /etc/keystone/keystone.conf #编辑keystone配置文件 [DEFAULT] admin_token = openstack debug = True # 开启测试信息记录 verbose = True #开启日志的冗长记录 [sql] connection = mysql://keystone:openstack@localhost/keystone #使用mysql数据库,kesytone数据库用户, #openstack数据库密码,最后一个keytsone是要使用的数据库
root@cloud:~# service keystone restart
keystone stop/waiting
keystone start/running, process 32376
root@cloud:~# keystone-manage db_sync #初始化keystone
root@cloud:~#
##############################
查看keystone的日志,默认在/var/log/keystone/下。没有任何错误之后继续余下的工作。
2013-10-25 14:09:41    DEBUG [migrate.versioning.script.base] Script /usr/lib/python2.7/dist-packages/keystone/common/sql/migrate_repo/versions/019_fixup_role.py loaded successfully
2013-10-25 14:09:41    DEBUG [migrate.versioning.script.base] Loading script /usr/lib/python2.7/dist-packages/keystone/common/sql/migrate_repo/versions/020_migrate_metadata_table_roles.py...
2013-10-25 14:09:41    DEBUG [migrate.versioning.script.base] Script /usr/lib/python2.7/dist-packages/keystone/common/sql/migrate_repo/versions/020_migrate_metadata_table_roles.py loaded successfully
2013-10-25 14:09:41    DEBUG [migrate.versioning.script.base] Loading script /usr/lib/python2.7/dist-packages/keystone/common/sql/migrate_repo/versions/021_add_trust_to_token.py...
2013-10-25 14:09:41    DEBUG [migrate.versioning.script.base] Script /usr/lib/python2.7/dist-packages/keystone/common/sql/migrate_repo/versions/021_add_trust_to_token.py loaded successfully
日志结尾会提示大量的successfully

新建一个隐藏环境变量文件,并使其开机之后也能生效 (为了安全起见建议建立隐藏文件):

root@cloud:~# vi .openrc

export OS_TENANT_NAME=admin
export OS_USERNAME=admin
export OS_PASSWORD=openstack
export OS_AUTH_URL="http://10.10.10.10:5000/v2.0/"  #这里的两个url必须与后面endpoint端口中建立的url一致.
export OS_SERVICE_ENDPOINT="http://10.10.10.10:35357/v2.0" #这里的两个url必须与后面endpoint端口中建立的url一致.
export OS_SERVICE_TOKEN=openstack
root@cloud:~# source /root/.openrc
root@cloud:~# echo "source /root/.openrc" >> .bashrc #这一步非常重要,如果不导入 ,你在开启一个,新的ssh服务没有这一步,没有办法执行keystone
测试:

root@cloud:~# keystone user-list
WARNING: Bypassing authentication using a token & endpoint (authentication credentials are being ignored).

root@cloud:~#
说明keystone成功。


官方给出的脚本我只执行成功了一次,所以以后就没用过,手动建立keystone服务,后期的研究一下这个脚本。
建立三个租户(类似房东,承租人),一个admin,一个demo,一个service
root@cloud:~# keystone user-list
WARNING: Bypassing authentication using a token & endpoint (authentication credentials are being ignored).

root@cloud:~# keystone tenant-create --name=admin
WARNING: Bypassing authentication using a token & endpoint (authentication credentials are being ignored).
+-------------+----------------------------------+
|   Property  |              Value               |
+-------------+----------------------------------+
| description |                                  |
|   enabled   |               True               |
|      id     | 139ea7b2f2444bcd85c327c1671708e1 |
|     name    |              admin               |
+-------------+----------------------------------+
root@cloud:~# keystone tenant-create --name=demo
WARNING: Bypassing authentication using a token & endpoint (authentication credentials are being ignored).
+-------------+----------------------------------+
|   Property  |              Value               |
+-------------+----------------------------------+
| description |                                  |
|   enabled   |               True               |
|      id     | 8d428dd34477470d95ad6ad4df0d2dd4 |
|     name    |               demo               |
+-------------+----------------------------------+
root@cloud:~# keystone tenant-create --name=service
WARNING: Bypassing authentication using a token & endpoint (authentication credentials are being ignored).
+-------------+----------------------------------+
|   Property  |              Value               |
+-------------+----------------------------------+
| description |                                  |
|   enabled   |               True               |
|      id     | 9fa61d1bd95d49d4be39658dc5b2a527 |
|     name    |             service              |
+-------------+----------------------------------+
root@cloud:~#
root@cloud:~# keystone tenant-list #查看tenant
WARNING: Bypassing authentication using a token & endpoint (authentication credentials are being ignored).
+----------------------------------+---------+---------+
|                id                |   name  | enabled |
+----------------------------------+---------+---------+
| 139ea7b2f2444bcd85c327c1671708e1 |  admin  |   True  |
| 8d428dd34477470d95ad6ad4df0d2dd4 |   demo  |   True  |
| 9fa61d1bd95d49d4be39658dc5b2a527 | service |   True  |
+----------------------------------+---------+---------+
root@cloud:~#

keystone手动创建用户点击这里

 12.镜像服务器的安装和配置:

root@cloud:~# vi /etc/glance/glance-api.conf
sql_connection = mysql://glance:openstack@localhost/glance
[keystone_authtoken]
admin_tenant_name = service
admin_user = glance
admin_password = openstack
[paste_deploy]
flavor=keystone
sql_connection = mysql://glance:openstack@localhost/glance
[keystone_authtoken]
admin_tenant_name = service
admin_user = glance
admin_password = openstack
[paste_deploy]
flavor=keystone

因为之前我们没有在keystone中建立keystone验证,这里再创创建。


root@cloud:~# keystone user-create --name=glance --pass=openstack --tenant-id 9fa61d1bd95d49d4be39658dc5b2a527 --email=glance@domain.com
WARNING: Bypassing authentication using a token & endpoint (authentication credentials are being ignored).
+----------+----------------------------------+
| Property |              Value               |
+----------+----------------------------------+
|  email   |        glance@domain.com         |
| enabled  |               True               |
|    id    | 1e57c770917242f0b516917e36552c4f |
|   name   |              glance              |
| tenantId | 9fa61d1bd95d49d4be39658dc5b2a527 |
+----------+----------------------------------+

root@cloud:~# keystone user-role-add --tenant-id 9fa61d1bd95d49d4be39658dc5b2a527 --user-id 1e57c770917242f0b516917e36552c4f --role-id 844d47b695c04c74bc8799b41450dc90
WARNING: Bypassing authentication using a token & endpoint (authentication credentials are being ignored).
root@cloud:~#
建立端口:

root@cloud:~# keystone service-list
WARNING: Bypassing authentication using a token & endpoint (authentication credentials are being ignored).
+----------------------------------+----------+----------+------------------------------+
|                id                |   name   |   type   |         description          |
+----------------------------------+----------+----------+------------------------------+
| 09dd98bd962b4e61b4377ba70a5d9c69 |  cinder  |  volume  |   OpenStack Volume Service   |
| d2738dff2cd54615b7b2002081e21ff9 |   ec2    |   ec2    |    OpenStack EC2 service     |
| e0ef8e829b914147b6c49d281e4817db |  glance  |  image   |   OpenStack Image Service    |
| 9ccb0db9159648d89f276527c339cc40 | keystone | identity |      OpenStack Identity      |
| 50195c63e78749c4bfb0c4da8a121033 |   nova   | compute  |  OpenStack Compute Service   |
| 8b3f86f20bf146068b898aac0dd3334f | quantum  | network  | OpenStack Networking service |
+----------------------------------+----------+----------+------------------------------+


root@cloud:~# keystone endpoint-create --region RegionOne --service-id e0ef8e829b914147b6c49d281e4817db --publicurl 'http://10.10.10.10:9292' --adminurl 'http://10.10.10.10:9292' --internalurl 'http://10.10.10.10:9292'
WARNING: Bypassing authentication using a token & endpoint (authentication credentials are being ignored).
+-------------+----------------------------------+
|   Property  |              Value               |
+-------------+----------------------------------+
|   adminurl  |     http://10.10.10.10:9292      |
|      id     | 5843789e7cd24907930d4a953d67c0b3 |
| internalurl |     http://10.10.10.10:9292      |
|  publicurl  |     http://10.10.10.10:9292      |
|    region   |            RegionOne             |
|  service_id | e0ef8e829b914147b6c49d281e4817db |
+-------------+----------------------------------+
root@cloud:~#
root@cloud:/var/log/glance# service glance-api restart && service glance-registry restart  启动glance服务
glance-api stop/waiting
glance-api start/running, process 1196
glance-registry stop/waiting
glance-registry start/running, process 1201
root@cloud:/var/log/glance#
root@cloud:/var/log/glance#
root@cloud:/var/log/glance# glance-manage db_sync  #初始化
2013-10-25 21:17:42.988 1212 INFO glance.db.sqlalchemy.migration [-] Upgrading database to version latest
2013-10-25 21:17:43.000 1212 INFO glance.db.sqlalchemy.migrate_repo.schema [-] creating table images
2013-10-25 21:17:43.025 1212 INFO glance.db.sqlalchemy.migrate_repo.schema [-] creating table image_properties
2013-10-25 21:17:43.093 1212 INFO glance.db.sqlalchemy.migrate_repo.schema [-] creating table image_members
2013-10-25 21:17:43.193 1212 INFO glance.db.sqlalchemy.migrate_repo.schema [-] creating table image_tags
2013-10-25 21:17:43.272 1212 INFO glance.db.sqlalchemy.migrate_repo.schema [-] creating table image_locations
root@cloud:/var/log/glance# glance image-list #这里测试一下,如果返回空说明成功 ,因为还没有导入镜像。

root@cloud:/var/log/glance#
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
下载导入镜像:
root@cloud:~# wget http://download.cirros-cloud.net/0.3.1/cirros-0.3.1-x86_64-disk.img

root@cloud:~# glance image-create --is-public true --disk-format qcow2 --container-format bare --name "CirrOS 0.3.1" < cirros-0.3.1-x86_64-disk.img         
+------------------+--------------------------------------+
| Property         | Value                                |
+------------------+--------------------------------------+
| checksum         | d972013792949d0d3ba628fbe8685bce     |
| container_format | bare                                 |
| created_at       | 2013-10-25T13:27:29                  |
| deleted          | False                                |
| deleted_at       | None                                 |
| disk_format      | qcow2                                |
| id               | 9e4f93e8-7adc-44e6-9512-8837a73947ed |
| is_public        | True                                 |
| min_disk         | 0                                    |
| min_ram          | 0                                    |
| name             | CirrOS 0.3.1                         |
| owner            | 139ea7b2f2444bcd85c327c1671708e1     |
| protected        | False                                |
| size             | 13147648                             |
| status           | active                               |
| updated_at       | 2013-10-25T13:27:30                  |
+------------------+--------------------------------------+
root@cloud:~#
root@cloud:~# glance image-list  #查看导入镜像(当你在web页面查看你的镜像没有镜像时,记得执行一下:glance index)
+--------------------------------------+--------------+-------------+------------------+----------+--------+
| ID                                   | Name         | Disk Format | Container Format | Size     | Status |
+--------------------------------------+--------------+-------------+------------------+----------+--------+
| 9e4f93e8-7adc-44e6-9512-8837a73947ed | CirrOS 0.3.1 | qcow2       | bare             | 13147648 | active |
+--------------------------------------+--------------+-------------+------------------+----------+--------+
root@cloud:~#

 13.1控制服务安装配置

root@cloud:~# apt-get install -y nova-api nova-cert nova-common nova-conductor nova-scheduler python-nova python-novaclient nova-consoleauth novnc  nova-novncproxy
Reading package lists... Done
Building dependency tree       
Reading state information... Done
Some packages could not be installed. This may mean that you have
requested an impossible situation or if you are using the unstable
distribution that some required packages have not yet been created
or been moved out of Incoming.
The following information may help to resolve the situation:

The following packages have unmet dependencies:
 novnc : Depends: libjs-swfobject but it is not installable #这个错误的解决办法
E: Unable to correct problems, you have held broken packages.
root@cloud:~#
root@cloud:~# wget http://archive.ubuntu.com/ubuntu/pool/universe/libj/libjs-swfobject/libjs-swfobject_2.2+dfsg-1_all.deb
root@cloud:~# dpkg -i libjs-swfobject_2.2+dfsg-1_all.deb
root@cloud:~# apt-get install -y nova-api nova-cert nova-common nova-conductor nova-scheduler python-nova python-novaclient nova-consoleauth novnc  nova-novncproxy


配置 nova
root@cloud:~# vi  /etc/nova/api-paste.ini #编辑下面内容修改如下 
[filter:authtoken]
paste.filter_factory = keystoneclient.middleware.auth_token:filter_factory
auth_host = 127.0.0.1
auth_port = 35357
auth_protocol = http
admin_tenant_name = service
admin_user = nova
admin_password = openstack
root@cloud:~# vi /etc/nova/nova.conf
sql_connection=mysql://nova:openstack@localhost/nova
my_ip=10.10.10.10
rabbit_password=openstack
auth_strategy=keystone

# Networking
network_api_class=nova.network.quantumv2.api.API
quantum_url=http://10.10.10.10:9696
quantum_auth_strategy=keystone
quantum_admin_tenant_name=service
quantum_admin_username=quantum
quantum_admin_password=openstack
quantum_admin_auth_url=http://10.10.10.10:35357/v2.0
libvirt_vif_driver=nova.virt.libvirt.vif.LibvirtHybridOVSBridgeDriver
linuxnet_interface_driver=nova.network.linux_net.LinuxOVSInterfaceDriver  

# Security Groups                                    
firewall_driver=nova.virt.firewall.NoopFirewallDriver
security_group_api=quantum                           
                                                     
# Metadata                                           
quantum_metadata_proxy_shared_secret=openstack         
service_quantum_metadata_proxy=true                  
metadata_listen = 10.10.10.10        
metadata_listen_port = 8775 

13.2在keystone中建立nova用户以及服务,端点。

root@cloud:~# keystone user-create --name=nova --pass=openstack --email=nova@domain.com --tenant-id 9fa61d1bd95d49d4be39658dc5b2a527 
WARNING: Bypassing authentication using a token & endpoint (authentication credentials are being ignored).
+----------+----------------------------------+
| Property |              Value               |
+----------+----------------------------------+
|  email   |         nova@domain.com          |
| enabled  |               True               |
|    id    | 249c5a6dcd3943a5b2dfce2c60531b35 |
|   name   |               nova               |
| tenantId | 9fa61d1bd95d49d4be39658dc5b2a527 |
+----------+----------------------------------+
root@cloud:~# keystone user-role-add --tenant-id 9fa61d1bd95d49d4be39658dc5b2a527 --user-id 249c5a6dcd3943a5b2dfce2c60531b35 --role-id 844d47b695c04c74bc8799b41450dc90
WARNING: Bypassing authentication using a token & endpoint (authentication credentials are being ignored).
root@cloud:~# 
之前已经建立了compute的端点,这里查看一下:
root@cloud:~# keystone endpoint-list
WARNING: Bypassing authentication using a token & endpoint (authentication credentials are being ignored).
+----------------------------------+-----------+------------------------------------------+------------------------------+------------------------------------------+----------------------------------+
|                id                |   region  |                publicurl                 |         internalurl          |                 adminurl                 |            service_id            |
+----------------------------------+-----------+------------------------------------------+------------------------------+------------------------------------------+----------------------------------+
| 520470e44d7a4bf088a93beb000a6f6b | RegionOne | http://10.10.10.10:8774/v2/$(tenant_id)s |                              | http://10.10.10.10:8774/v2/$(tenant_id)s | 50195c63e78749c4bfb0c4da8a121033 |
| 5843789e7cd24907930d4a953d67c0b3 | RegionOne |         http://10.10.10.10:9292          |   http://10.10.10.10:9292    |         http://10.10.10.10:9292          | e0ef8e829b914147b6c49d281e4817db |
| 59f4ccf564ce47fe8b141692288cb497 | RegionOne |       http://10.10.10.10:5000/v2.0       | http://10.10.10.10:5000/v2.0 |      http://10.10.10.10:35357/v2.0       | 9ccb0db9159648d89f276527c339cc40 |
+----------------------------------+-----------+------------------------------------------+------------------------------+------------------------------------------+----------------------------------+
root@cloud:~# 
-------+----------------------------------+
root@cloud:~# nova-manage db sync  #初始化这行失败的原因,是/etc/nova/api-paste.ini配置文件里的密码写错了
Command failed, please check log for more info
2013-10-25 22:04:10.245 6203 CRITICAL nova [-] (OperationalError) (1045, "Access denied for user 'nova'@'localhost' (using password: YES)") None None
初始化之后:
root@cloud:~# /etc/init.d/nova-api restart     
root@cloud:~# /etc/init.d/nova-cert restart   
root@cloud:~# /etc/init.d/nova-conductor restart    
root@cloud:~# /etc/init.d/nova-consoleauth restart    
root@cloud:~# /etc/init.d/nova-novncproxy restart
root@cloud:~# /etc/init.d/nova-scheduler restart
root@cloud:~#


并且查看nova下面的相应的日志
nova-api.conf
.............
2013-10-25 22:12:57.697 6311 INFO nova.wsgi [-] osapi_compute listening on 0.0.0.0:8774
2013-10-25 22:12:57.697 6311 INFO nova.service [-] Starting 1 workers
2013-10-25 22:12:57.699 6311 INFO nova.service [-] Started child 6317
2013-10-25 22:12:57.701 6311 INFO nova.network.driver [-] Loading network driver 'nova.network.linux_net'
2013-10-25 22:12:57.702 6317 INFO nova.osapi_compute.wsgi.server [-] (6317) wsgi starting up on http://0.0.0.0:8774/

2013-10-25 22:12:57.706 6311 INFO nova.wsgi [-] metadata listening on 10.10.10.10:8775
2013-10-25 22:12:57.706 6311 INFO nova.service [-] Starting 1 workers
2013-10-25 22:12:57.708 6311 INFO nova.service [-] Started child 6318
2013-10-25 22:12:57.811 6318 INFO nova.metadata.wsgi.server [-] (6318) wsgi starting up on http://10.10.10.10:8775/

root@cloud:/var/log/nova#
........................
nova-cert.conf
........................
2013-10-25 22:13:01.924 5895 INFO nova.service [-] Caught SIGTERM, exiting
2013-10-25 22:13:02.355 6343 AUDIT nova.service [-] Starting cert node (version 2013.1.3)
2013-10-25 22:13:02.619 INFO nova.openstack.common.rpc.common [req-7f16229f-16db-4f49-a65f-0057d3245544 None None] Connected to AMQP server on localhost:5672
root@cloud:/var/log/nova#
........................
nova-conductor.log
........................
2013-10-25 22:13:08.567 5931 INFO nova.service [-] Caught SIGTERM, exiting
2013-10-25 22:13:08.997 6367 AUDIT nova.service [-] Starting conductor node (version 2013.1.3)
2013-10-25 22:13:09.258 INFO nova.openstack.common.rpc.common [req-fba5feeb-696a-4665-8f73-55f1ca36d63a None None] Connected to AMQP server on localhost:5672
root@cloud:/var/log/nova#
........................................................................

14.安装块存储服务:(系统要求需要另外一块硬盘)

root@cloud:~# fdisk -l

Disk /dev/sda: 32.2 GB, 32212254720 bytes
255 heads, 63 sectors/track, 3916 cylinders, total 62914560 sectors
Units = sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disk identifier: 0x000edd32

   Device Boot      Start         End      Blocks   Id  System
/dev/sda1   *        2048    46139391    23068672   83  Linux
/dev/sda2        46141438    62912511     8385537    5  Extended
/dev/sda5        46141440    62912511     8385536   82  Linux swap / Solaris

Disk /dev/sdb: 53.7 GB, 53687091200 bytes
255 heads, 63 sectors/track, 6527 cylinders, total 104857600 sectors
Units = sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disk identifier: 0x00000000

Disk /dev/sdb doesn't contain a valid partition table
root@cloud:~# 
root@cloud:~# apt-get install -y cinder-api cinder-scheduler cinder-volume iscsitarget  open-iscsi iscsitarget-dkms python-cinderclient linux-headers-`uname -r`

The following packages have unmet dependencies:
 cinder-volume : Depends: tgt but it is not going to be installed
E: Unable to correct problems, you have held broken packages.
root@cloud:~# 
上面的错误解决。
root@cloud:~# apt-get install libibverbs1

14.1配置iscsi存储:

root@cloud:~# sed -i 's/false/true/g' /etc/default/iscsitarget
root@cloud:~# service iscsitarget start
 * Starting iSCSI enterprise target service                                                                                                           [ OK ] 
                                                                                                                                                      [ OK ]
root@cloud:~# 
root@cloud:~# /etc/init.d/open-iscsi restart
 * Disconnecting iSCSI targets                                                                                                                        [ OK ]
 * Stopping iSCSI initiator service                                                                                                                   [ OK ]
 * Starting iSCSI initiator service iscsid                                                                                                            [ OK ]
 * Setting up iSCSI targets                                                                                                                           [ OK ]
root@cloud:~#


14.2配置cinder服务:

root@cloud:~# vi /etc/cinder/cinder.conf  添加如下两行
sql_connection = mysql://cinder:openstack@localhost/cinder
rabbit_password = openstack

root@cloud:~# vi /etc/cinder/api-paste.ini 修改下面内容
admin_tenant_name = service
admin_user = cinder
admin_password = openstack

14.3在keystone中为cinder添加用户:

root@cloud:~# keystone user-create --name=cinder --pass=openstack --email=cinder@domain.com --tenant-id 9fa61d1bd95d49d4be39658dc5b2a527
WARNING: Bypassing authentication using a token & endpoint (authentication credentials are being ignored).
+----------+----------------------------------+
| Property |              Value               |
+----------+----------------------------------+
|  email   |        cinder@domain.com         |
| enabled  |               True               |
|    id    | 5e1aea243f8f4a40a4fe4fc1d0d6e2b1 |
|   name   |              cinder              |
| tenantId | 9fa61d1bd95d49d4be39658dc5b2a527 |
+----------+----------------------------------+
root@cloud:~# keystone user-role-add --tenant-id 9fa61d1bd95d49d4be39658dc5b2a527 --user-id 5e1aea243f8f4a40a4fe4fc1d0d6e2b1 --role-id 844d47b695c04c74bc8799b41450dc90
WARNING: Bypassing authentication using a token & endpoint (authentication credentials are being ignored).
root@cloud:~# 
root@cloud:~# keystone endpoint-create --region RegionOne --service-id 09dd98bd962b4e61b4377ba70a5d9c69 --publicurl 'http://10.10.10.10:8776/v1/$(tenant_id)s' --adminurl 'http://10.10.10.10:8776/v1/$(tenant_id)s' --internalurl 'http://10.10.10.10:8776/v1/$(tenant_id)s'
WARNING: Bypassing authentication using a token & endpoint (authentication credentials are being ignored).
+-------------+------------------------------------------+
|   Property  |                  Value                   |
+-------------+------------------------------------------+
|   adminurl  | http://10.10.10.10:8776/v1/$(tenant_id)s |
|      id     |     5554c424b983406fb955c90c1379d28e     |
| internalurl | http://10.10.10.10:8776/v1/$(tenant_id)s |
|  publicurl  | http://10.10.10.10:8776/v1/$(tenant_id)s |
|    region   |                RegionOne                 |
|  service_id |     09dd98bd962b4e61b4377ba70a5d9c69     |
+-------------+------------------------------------------+
root@cloud:~# 

14.4磁盘操作:

root@cloud:~# pvcreate /dev/sdb 初始化磁盘为物理卷
  Physical volume "/dev/sdb" successfully created
root@cloud:~# 

root@cloud:~# vgcreate cinder-volumes /dev/sdb  创建名为cinder-volumes的卷组
  Volume group "cinder-volumes" successfully created
root@cloud:~# 

14.5初始化cinder:

root@cloud:~# cinder-manage db sync
2013-10-25 23:01:00     INFO [migrate.versioning.api] 0 -> 1... 
2013-10-25 23:01:00     INFO [migrate.versioning.api] done
2013-10-25 23:01:00     INFO [migrate.versioning.api] 1 -> 2... 
2013-10-25 23:01:00     INFO [migrate.versioning.api] done
2013-10-25 23:01:00     INFO [migrate.versioning.api] 2 -> 3... 
2013-10-25 23:01:00     INFO [migrate.versioning.api] done
2013-10-25 23:01:00     INFO [migrate.versioning.api] 3 -> 4... 
2013-10-25 23:01:00     INFO [004_volume_type_to_uuid] Created foreign key volume_type_extra_specs_ibfk_1
2013-10-25 23:01:00     INFO [migrate.versioning.api] done
2013-10-25 23:01:00     INFO [migrate.versioning.api] 4 -> 5... 
2013-10-25 23:01:00     INFO [migrate.versioning.api] done
2013-10-25 23:01:00     INFO [migrate.versioning.api] 5 -> 6... 
2013-10-25 23:01:00     INFO [migrate.versioning.api] done
2013-10-25 23:01:00     INFO [migrate.versioning.api] 6 -> 7... 
2013-10-25 23:01:00     INFO [migrate.versioning.api] done
2013-10-25 23:01:00     INFO [migrate.versioning.api] 7 -> 8... 
2013-10-25 23:01:00     INFO [migrate.versioning.api] done
2013-10-25 23:01:00     INFO [migrate.versioning.api] 8 -> 9... 
2013-10-25 23:01:00     INFO [migrate.versioning.api] done
root@cloud:~# 

14.6重启cinder服务:

root@cloud:~# /etc/init.d/cinder-api restart   
root@cloud:~# /etc/init.d/cinder-scheduler restart  
root@cloud:~# /etc/init.d/cinder-volume restart
root@cloud:~# 
一定要记得查看日志

15.控制节点网络服务安装:

root@cloud:~# apt-get install -y quantum-server
root@cloud:~# vi /etc/quantum/quantum.conf 修改如下内容
[DEFAULT]
verbose = True
rabbit_password = openstack
admin_tenant_name = service
admin_user = quantum
admin_password = openstack
root@cloud:~# vi  /etc/quantum/plugins/openvswitch/ovs_quantum_plugin.ini 修改如下内容
[DATABASE]
sql_connection = mysql://quantum:password@localhost/quantum
[OVS]
tenant_network_type = gre
tunnel_id_ranges = 1:1000
enable_tunneling = True
local_ip = 10.10.0.2
[SECURITYGROUP]
firewall_driver = quantum.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver

root@cloud:~# ln -s /etc/quantum/plugins/openvswitch/ovs_quantum_plugin.ini /etc/quantum/plugin.ini  软连接配置文件            
root@cloud:~#

在keystone中建立quantum用户,以及端点。

root@cloud:~# keystone user-create --name=quantum --pass=openstack --email=quantum@domain.com --tenant-id 9fa61d1bd95d49d4be39658dc5b2a527
WARNING: Bypassing authentication using a token & endpoint (authentication credentials are being ignored).
+----------+----------------------------------+
| Property |              Value               |
+----------+----------------------------------+
|  email   |        quantum@domain.com        |
| enabled  |               True               |
|    id    | 32f1a0529b524c62b89668683b153bb3 |
|   name   |             quantum              |
| tenantId | 9fa61d1bd95d49d4be39658dc5b2a527 |
+----------+----------------------------------+
root@cloud:~# 
root@cloud:~# keystone user-role-add --tenant-id 9fa61d1bd95d49d4be39658dc5b2a527 --role-id 844d47b695c04c74bc8799b41450dc90 --user-id 32f1a0529b524c62b89668683b153bb3
WARNING: Bypassing authentication using a token & endpoint (authentication credentials are being ignored).
root@cloud:~# keystone endpoint-create --region RegionOne --service-id 8b3f86f20bf146068b898aac0dd3334f --publicurl 'http://10.10.10.10:9696/' --adminurl 'http://10.10.10.10:9696/' --internalurl 'http://10.10.10.10:9696/'
WARNING: Bypassing authentication using a token & endpoint (authentication credentials are being ignored).
+-------------+----------------------------------+
|   Property  |              Value               |
+-------------+----------------------------------+
|   adminurl  |     http://10.10.10.10:9696/     |
|      id     | 56b29aa9d7894a6e86450a930075c102 |
| internalurl |     http://10.10.10.10:9696/     |
|  publicurl  |     http://10.10.10.10:9696/     |
|    region   |            RegionOne             |
|  service_id | 8b3f86f20bf146068b898aac0dd3334f |
+-------------+----------------------------------+
root@cloud:~# 

重启 quantum:

root@cloud:~# service quantum-server restart
查看一下日志:
server.log
2013-10-25 23:26:59     INFO [quantum.service] Quantum service started, listening on 0.0.0.0:9696
root@cloud:/var/log/quantum# lsof -i:9696
COMMAND   PID    USER   FD   TYPE DEVICE SIZE/OFF NODE NAME
python  19740 quantum    7u  IPv4  61514      0t0  TCP *:9696 (LISTEN)
root@cloud:/var/log/quantum#

 

安装OpenStack web控制台(dashboard):

root@cloud:~# apt-get install -y openstack-dashboard memcached python-memcache

如果不喜欢ubuntu主题可以使用下面命令删除 :
apt-get remove --purge openstack-dashboard-ubuntu-theme  

移除ubuntu主题之后:

 通过你的网络访问:http://x.x.x./horizon  用户名/密码:demo/openstack 或者admin/openstack (至此控制节点安装完成,如果你的控制台不出现错误提示说明安装成功)

posted @ 2013-10-25 14:32  osxlinux  阅读(1264)  评论(0编辑  收藏  举报