1、openstack集群部署
2、openstack自定义ubuntu、centos镜像

#==================================================================

1 openstack集群部署

1.1 宿主机基础环境准备

#配置:4C5g(controller)/4C3G(node),双网卡(1nat,1仅主机),开启虚拟化
#关防火墙、selinux
#时间同步、时区相同
#不使用epel源,包更新破坏兼容性
#优化内核参数
#系统资源限制优化

1.2 部署 mysql-rabbitmq-memcached-haproxy-keepalived

#配置yum源
#安装openstack-train版本yum仓库
# yum -y install centos-release-openstack-train
#更改软件源为阿里云
# sed -ri '/mirrorlist.centos.org/s/(.*)/#\1/' /etc/yum.repos.d/CentOS-OpenStack-train.repo
# sed -ri '/mirror.centos.org/s/#(.*)mirror.centos.org(.*)/\1mirrors.aliyun.com\2/' /etc/yum.repos.d/CentOS-OpenStack-train.repo

#安装rdo yum仓库,包含新的稳定版本的包(rdo: redhat enterprise linux openstack platform)
# yum -y install https://rdoproject.org/repos/rdo-release.rpm
#更改软件源为阿里云
# sed -ri '/mirrorlist.centos.org/s/(.*)/#\1/' /etc/yum.repos.d/rdo-qemu-ev.repo
# sed -ri '/mirror.centos.org/s/#(.*)mirror.centos.org(.*)/\1mirrors.aliyun.com\2/' /etc/yum.repos.d/rdo-qemu-ev.repo
# sed -ri '/mirrorlist.centos.org/s/(.*)/#\1/' /etc/yum.repos.d/rdo-release.repo
# sed -ri '/mirror.centos.org/s/#(.*)mirror.centos.org(.*)/\1mirrors.aliyun.com\2/' /etc/yum.repos.d/rdo-release.repo

#配置hosts文件
# vi /etc/hosts
 ...
 172.20.0.37 openstack-rabbitmq openstack-rabbitmq.testou.com
 172.20.0.37 openstack-mysql.testou.com
 172.20.0.37 openstack-memcached.testou.com
 172.20.0.248 openstack-vip.testou.com

#================================================================================
#安装mysql
# yum -y install mariadb mariadb-server
#修改mysql配置文件
# cat /etc/my.cnf.d/openstack.cnf
 [mysqld]
 bind-address = 172.20.0.37
 default-storage-engine = innodb
 innodb_file_per_table = on
 max_connections = 4096
 collation-server = utf8_general_ci
 character-set-server = utf8
#启动mysql并设置开机自启
# systemctl enable --now mariadb
#测试mysql可进入
# mysql

#================================================================================
#安装rabbitmq
# yum -y install rabbitmq-server
#修改配置文件
# vi /etc/rabbitmq/rabbitmq.config
 ...
   {tcp_listeners, [{"172.20.0.37", 5672}]}
#启动rabbitmq并设置开机自启
# systemctl enable --now rabbitmq-server
#创建controller/node连接mq的账号
# rabbitmqctl add_user openstack openstack123
#分配mq账号权限
# rabbitmqctl set_permissions openstack ".*" ".*" ".*"
#开启web管理插件
# rabbitmq-plugins enable rabbitmq_management
#测试web登录mq
# ss -tnl| grep 15672
#浏览器访问:http://172.20.0.37:15672,用户名密码都是guest

#================================================================================
#安装memcached
# yum -y install memcached
#修改配置文件
# cat /etc/sysconfig/memcached
 PORT="11211"
 USER="memcached"
 MAXCONN="1024"
 CACHESIZE="1024"                #修改内存
 OPTIONS="-l 172.20.0.37,::1"    #修改监听地址
#启动memcached并设置开机自启
# systemctl enable --now memcached
#确认端口已监听
# ss -ntl | grep 11211

#================================================================================
#安装keepalived
# yum -y install keepalived
#修改配置文件
# cat /etc/keepalived/keepalived.conf
global_defs {
   ...
   vrrp_iptables                    #清除iptables规则
}

vrrp_instance VI_1 {
    state MASTER
    interface eth1                    #绑定网卡
    virtual_router_id 51
    priority 100
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 1111
    }
    virtual_ipaddress {
        172.20.0.248/32 dev eth1 label eth1:0            #设置VIP
    }
}

#启动keepalived并设置开机自启
# systemctl enable --now keepalived
#在其他主机确认VIP可ping通
# ping openstack-vip.testou.com

#================================================================================
#安装haproxy
# yum -y install haproxy
#修改配置文件
# tail -n12 /etc/haproxy/haproxy.cfg
listen openstack-mysql-3306
  bind 172.20.0.248:3306 
  mode tcp        #必须tcp,默认http
  server 172.20.0.37 172.20.0.37:3306 check inter 3s fall 3 rise 5
listen openstack-rabbitmq-5672
  bind 172.20.0.248:5672
  mode tcp        #必须tcp,默认http
  server 172.20.0.37 172.20.0.37:5672 check inter 3s fall 3 rise 5
listen openstack-memcached-11211
  bind 172.20.0.248:11211
  mode tcp        #必须tcp,默认http
  server 172.20.0.37 172.20.0.37:11211 check inter 3s fall 3 rise 5
#启动haproxy并设置开机自启
# systemctl enable --now haproxy
#在其他节点测试openstack-vip telnet 3306/5672/11211可通
# telnet openstack-vip.testou.com 3306
# telnet openstack-vip.testou.com 5672
# telnet openstack-vip.testou.com 11211

1.3 部署 controller 节点

1.3.1 基础配置

#配置yum源
#安装openstack-train版本yum仓库
# yum -y install centos-release-openstack-train
#更改软件源为阿里云
# sed -ri '/mirrorlist.centos.org/s/(.*)/#\1/' /etc/yum.repos.d/CentOS-OpenStack-train.repo
# sed -ri '/mirror.centos.org/s/#(.*)mirror.centos.org(.*)/\1mirrors.aliyun.com\2/' /etc/yum.repos.d/CentOS-OpenStack-train.repo

#安装rdo yum仓库,包含新的稳定版本的包(rdo: redhat enterprise linux openstack platform)
# yum -y install https://rdoproject.org/repos/rdo-release.rpm
#更改软件源为阿里云
# sed -ri '/mirrorlist.centos.org/s/(.*)/#\1/' /etc/yum.repos.d/rdo-qemu-ev.repo
# sed -ri '/mirror.centos.org/s/#(.*)mirror.centos.org(.*)/\1mirrors.aliyun.com\2/' /etc/yum.repos.d/rdo-qemu-ev.repo
# sed -ri '/mirrorlist.centos.org/s/(.*)/#\1/' /etc/yum.repos.d/rdo-release.repo
# sed -ri '/mirror.centos.org/s/#(.*)mirror.centos.org(.*)/\1mirrors.aliyun.com\2/' /etc/yum.repos.d/rdo-release.repo

#配置hosts文件
# vi /etc/hosts
 ...
 172.20.0.37 openstack-rabbitmq openstack-rabbitmq.testou.com
 172.20.0.37 openstack-mysql.testou.com
 172.20.0.37 openstack-memcached.testou.com
 172.20.0.248 openstack-vip.testou.com

#安装openstack客户端、openstack-selinux管理包
# yum -y install python-openstackclient openstack-selinux

#安装连接mysql组件(只有controller可以连接mysql)
# yum -y install python2-PyMySQL

#安装连接memcached组件
# yum -y install python-memcached

#安装libibverbs,提供库文件libibverbs.so.1
# yum -y install libibverbs

1.3.2 keystone 认证 tcp:5000

#在mysql服务端创建keystone数据库
# mysql -e "create database keystone"
#在mysql服务端创建keystone用户并设置密码keystone123
# mysql -e "grant all privileges on keystone.* to 'keystone'@'%' identified by 'keystone123'"

#在controller节点测试连接mysql
# yum -y install mariadb
# mysql -ukeystone -pkeystone123 -h172.20.0.248

#安装keystone组件
# yum -y install openstack-keystone httpd mod_wsgi        #mod_wsgi是httpd代理python的组件

#修改keystone配置文件
# vi /etc/keystone/keystone.conf
 [database]                    #数据库地址、账号、密码
 connection = mysql+pymysql://keystone:keystone123@openstack-vip.testou.com/keystone
 [token]                        #默认有效期1小时
 [credential]
 provider = ferent

#初始化keystone数据库
# su keystone -s /bin/sh -c "keystone-manage db_sync"

#keystone数据库生成相关表
# mysql -ukeystone -pkeystone123 -h172.20.0.248 -e "show tables from keystone"

#初始化若不成功,日志也会报错
# tail /var/log/message

#初始化fernet密钥存储库
#生成密钥
# keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone
# keystone-manage credential_setup --keystone-user keystone --keystone-group keystone

#密钥存放位置
# ll /etc/keystone/fernet-keys/
# ll /etc/keystone/credential-keys/

#haproxy主机添加四层代理配置
# tail -n4 /etc/haproxy/haproxy.cfg
listen openstack-keystone-5000
  bind 172.20.0.248:5000
  server 172.20.0.7 172.20.0.7:5000 check inter 3s fall 3 rise 5
# systemctl reload haproxy

#对openstack进行初始化
# keystone-manage bootstrap --bootstrap-password admin \
    --bootstrap-admin-url http://openstack-vip.testou.com:5000/v3/ \
    --bootstrap-internal-url http://openstack-vip.testou.com:5000/v3/ \
    --bootstrap-public-url http://openstack-vip.testou.com:5000/v3/ \
    --bootstrap-region-id RegionOne

#keystone数据库相关表生成数据                #service|user|project|endpoint
# mysql -ukeystone -pkeystone123 -h172.20.0.248 -e "select * from keystone.service"
# mysql -ukeystone -pkeystone123 -h172.20.0.248 -e "select * from keystone.user" 

#配置httpd
# vi /etc/httpd/conf/httpd.conf
 ...
 ServerName 172.20.0.7:80
# ln -s /usr/share/keystone/wsgi-keystone.conf /etc/httpd/conf.d/
#开启httpd服务并设置开机自启
# systemctl enable --now httpd

#测试httpd服务,返回json数据则正常
# curl http://openstack-vip.testou.com:5000
{"versions": {"values": [{"status": "stable", "updated": "2019-07-19T00:00:00Z", "media-types": [{"base": "application/json", "type": "application/vnd.openstack.identity-v3+json"}], "id": "v3.13", "links": [{"href": "http://172.20.0.7:5000/v3/", "rel": "self"}]}]}}

#配置admin用户访问的环境变量
# vi admin-openrc.sh
 #!/bin/bash
 export OS_USERNAME=admin
 export OS_PASSWORD=admin
 export OS_PROJECT_NAME=admin
 export OS_USER_DOMAIN_NAME=Default
 export OS_PROJECT_DOMAIN_NAME=Default
 export OS_AUTH_URL=http://openstack-vip.testou.com:5000/v3
 export OS_IDENTITY_API_VERSION=3
# source admin-openrc.sh

#==========================================================================
#查看用户
# openstack user list

#创建domain            #domain,类似机房级别
                      #project,项目,通过domain隔离project,通过project隔离业务
# openstack domain create --description "An Example Domain" example

#创建项目
# openstack project create --domain default --description "Service Project" service

#查看domain和project
# openstack domain list
# openstack project list

#创建项目:myproject
# openstack project create --domain default --description "Demo Project" myproject
#创建用户:myuser
# openstack user create --domain default --password-prompt myuser
 输2次密码:myuser
#创建role:myrole
# openstack role create myrole
#role与project、user绑定
# openstack role add --project myproject --user myuser myrole

#===========================================================================
#验证操作
# unset OS_AUTH_URL OS_PASSWORD
#获取admin用户token信息
# openstack --os-auth-url http://openstack-vip.testou.com:5000/v3 \
    --os-project-domain-name Default --os-user-domain-name Default \
    --os-project-name admin --os-username admin token issue
 输两次密码:admin
#获取myuser用户token信息
# openstack --os-auth-url http://openstack-vip.testou.com:5000/v3 \
    --os-project-domain-name Default --os-user-domain-name Default \
    --os-project-name myproject --os-username myuser token issue
 输两次密码:myuser

#查看user和project
# source admin-openrc.sh 
# openstack user list
# openstack project list

#查看admin用户token
# openstack token issue

#添加环境变量
#vi admin-openrc.sh
 ...
 export OS_IMAGE_API_VERSION=2
 
#配置myuser用户访问的环境变量
# vi demo-openrc.sh
#!/bin/bash
export OS_USERNAME=myuser
export OS_PASSWORD=myuser
export OS_PROJECT_NAME=myproject
export OS_USER_DOMAIN_NAME=Default
export OS_PROJECT_DOMAIN_NAME=Default
export OS_AUTH_URL=http://openstack-vip.testou.com:5000/v3
export OS_IDENTITY_API_VERSION=3
export OS_IMAGE_API_VERSION=2
# source demo-openrc.sh

#查看admin用户token
# openstack token issue

1.3.3 glance 镜像 tcp:9292

#默认镜像只存在于controller本地,通过controller节点挂载NFS相同路径进行镜像共享

#nfs配置
# mkdir -p /data/glance
# yum -y install nfs-utils
# vi /etc/exports
 /data/glance *(rw,no_root_squash)
# systemctl enable --now nfs
# showmount -e 172.20.0.37

#=================================================================================
#glance与两个地方交互:1.到keystone验证用户token
                     2.到mysql数据库查询镜像元数据
                     
#在mysql服务端创建glance数据库
# mysql -e "create database glance"
# mysql -e "grant all privileges on glance.* to 'glance'@'%' identified by 'glance123'"

#=================================================================================
# source admin-openrc.sh

#创建glnace用户
# openstack user create --domain default --password-prompt glance
 输两次密码:glance
 
#授权glance用户对project(service)有admin权限
# openstack role add --project service --user glance admin

#查看endpoint                    #输出URL列相当于:k8s-service
# openstack endpoint list
#查看service
# openstack service list

#创建service entity(实体)            #用于注册后端服务器,相当于k8s-service name
# openstack service create --name glance --description "OpenStack Image" image
#查看service
# openstack service list        #新增glance service

#创建service endpoint                #相当于k8s的pod
# openstack endpoint create --region RegionOne image public http://openstack-vip.testou.com:9292
# openstack endpoint create --region RegionOne image internal http://openstack-vip.testou.com:9292
# openstack endpoint create --region RegionOne image admin http://openstack-vip.testou.com:9292

#查看endpoint    
# openstack endpoint list

#=================================================================================
#haproxy主机添加四层代理配置
# tail -n4 /etc/haproxy/haproxy.cfg
listen openstack-glance-9292
  bind 172.20.0.248:9292
  mode tcp        #必须tcp,默认http
  server 172.20.0.7 172.20.0.7:9292 check inter 3s fall 3 rise 5
# systemctl reload haproxy

#=================================================================================
#安装glance服务
# yum -y install openstack-glance

#修改glance配置文件
# vi /etc/glance/glance-api.conf
 [database]
 connection = mysql+pymysql://glance:glance123@openstack-vip.testou.com/glance
 [keystone_authtoken]
 www_authenticate_uri = http://openstack-vip.testou.com:5000
 auth_url = http://openstack-vip.testou.com:5000
 memcache_servers = openstack-vip.testou.com:11211
 auth_type = password
 project_domain_name = Default
 user_domain_name = Default
 project_name = service
 username = glance
 password = glance
 [paste_deploy]
 flavor = keystone
 [glance_store]
 stores = file,http
 default_store = file
 filesystem_store_datadir = /var/lib/glance/images

#初始化glance数据库
# su - glance -s /bin/sh -c "glance-manage db_sync"

#glance数据库相关表生成数据
# mysql -uglance -pglance123 -h172.20.0.248 -e "show tables from glance"

#开启glance服务并设置开机自启
# systemctl enable --now openstack-glance-api
# tail -f /var/log/glance/api.log

#挂载NFS存储
# systemctl stop openstack-glance-api
# vi /etc/fstab
 ...
 172.20.0.37:/data/glance /var/lib/glance/images nfs defaults,_netdev 0 0
# mount -a
# id glance
 uid=161(glance) gid=161(glance) groups=161(glance)
# chown -R 161:161 /var/lib/glance/images
# systemctl start openstack-glance-api
# tail -f /var/log/glance/api.log

#=================================================================================
#验证

#下载验证镜像
# wget http://download.cirros-cloud.net/0.4.0/cirros-0.4.0-x86_64-disk.img

#上传镜像到glance
# source admin-openrc.sh
# glance image-create --name "cirros" --file cirros-0.4.0-x86_64-disk.img --disk-format qcow2 \
    --container-format bare(裸磁盘) --visibility public(相当于harbor公共项目)

#查看镜像名称id
# openstack image list
# glance image-list

#删除镜像
# glance image-list
# glance image-delete 2bf04f73-b2c1-4452-adf8-bcf2b22386a1

#重新上传镜像到glance
# glance image-create --name "cirros" --file cirros-0.4.0-x86_64-disk.img --disk-format qcow2 \
    --container-format bare(裸磁盘) --visibility public(相当于harbor公共项目)

1.3.4 placement 资源统计 tcp:8778

#placement:由nova拆出来的服务,用于controller/node节点可用资源统计
#按官方文档装有BUG

#=================================================================================
#在mysql服务端创建placement数据库
# mysql -e "create database placement"
# mysql -e "grant all privileges on placement.* to 'placement'@'%' identified by 'placement123'"

#=================================================================================
# source admin-openrc.sh

#创建placement用户
# openstack user create --domain default --password-prompt placement
 输2次密码:placement

#授权placement用户在project(service)中有admin权限
# openstack role add --project service --user placement admin

#创建service entity实体        #用于注册后端服务器,相当于k8s-service name
# openstack service create --name placement --description "Placement API" placement

#查看service entity实体
# openstack service list        #新增placement service

#创建service endpoint        #相当于k8s的pod
# openstack endpoint create --region RegionOne placement public http://openstack-vip.testou.com:8778
# openstack endpoint create --region RegionOne placement internal http://openstack-vip.testou.com:8778
# openstack endpoint create --region RegionOne placement admin http://openstack-vip.testou.com:8778

#查看endpoint
# openstack endpoint list

#=================================================================================
#haproxy主机添加四层代理配置
# tail -n4 /etc/haproxy/haproxy.cfg
listen openstack-placement-8778
  bind 172.20.0.248:8778
  mode tcp        #必须tcp,默认http
  server 172.20.0.7 172.20.0.7:8778 check inter 3s fall 3 rise 5
# systemctl reload haproxy

#=================================================================================
#安装placement服务
# yum -y install openstack-placement-api

#修改placement配置文件
# vi /etc/placement/placement.conf
 [placement_database]
 connection = mysql+pymysql://placement:placement123@openstack-vip.testou.com/placement
 [api]
 auth_strategy = keystone
 [keystone_authtoken]
 auth_url = http://openstack-vip.testou.com:5000/v3
 memcached_servers = openstack-vip.testou.com:11211
 auth_type = password
 project_domain_name = Default
 user_domain_name = Default
 project_name = service
 username = placement
 password = placement

#初始化placement数据库
# su - placement -s /bin/sh -c "placement-manage db sync"        #警告,可忽略
 /usr/lib/python2.7/site-packages/pymysql/cursors.py:170: Warning: (1280, u"Name 'alembic_version_pkc' ignored for PRIMARY key.")
  result = self._query(query)

#直接启动有问题,BUG,Train版没写解决方法,R版nova组件中有
#BUG:后期访问httpd-api,调用/usr/bin/placement-api由于没有访问权限,会报错403
#解决方法:授权/usr/bin目录访问权限
# vi /etc/httpd/conf.d/00-placement-api.conf
 <Directory /usr/bin>
   <IfVersion >= 2.4>
     Require all granted
   </IfVersion>
   <IfVersion < 2.4>
     Order allow,deny
     Allow from all
   </IfVersion>
 </Directory>
# systemctl restart httpd

#检查placement状态
# placement-status upgrade check
#返回json数据就没问题
# curl http://openstack-vip.testou.com:8778
{"versions": [{"status": "CURRENT", "min_version": "1.0", "max_version": "1.36", "id": "v1.0", "links": [{"href": "", "rel": "self"}]}]}

1.3.5 nova 计算 tcp:8774

#角色:控制节点、计算节点
    controller:nova管理端服务,接收响应外部请求,筛选node节点,下发创建虚拟机指令到mq,node节点监听mq
    node:agent,nova客户端服务,计算节点通过libvirt调用kvm创建虚拟机
    两者通过rabbitmq通信

#组件:
 nova-api            #接收和响应外部请求,端口8774
 nova-api-metadata    #获取主机元数据
 nova-compute        #node节点组件,通过libvirt调用kvm创建虚拟机
 nova-scheduler        #调度虚拟机到指定物理机
 nova-conductor        #计算节点访问mysql的中间件,nova-computer->mq<->controller->conductor->mysql
 nova-novncproxy    #VNC代理,显示虚拟机操作终端
 nova-spicehtml5proxy    #html5代理,显示虚拟机操作终端

#region        #地区
#domain        #机房

#nova-scheduler调度策略:
1)根据policy过滤node
2)计算node权重

#======================================================================================
#controller节点配置

#在mysql服务端创建nova数据库
# mysql -e "create database nova_api"
# mysql -e "create database nova"
# mysql -e "create database nova_cell0"
# mysql -e "grant all privileges on nova_api.* to 'nova'@'%' identified by 'nova123'"
# mysql -e "grant all privileges on nova.* to 'nova'@'%' identified by 'nova123'"
# mysql -e "grant all privileges on nova_cell0.* to 'nova'@'%' identified by 'nova123'"

#======================================================================================
# source admin-openrc.sh

#创建nova用户
# openstack user create --domain default --password-prompt nova
    输2次密码nova

#授权nova用户在project(service)中有admin权限
# openstack role add --project service --user nova admin

#创建service entity实体        #用于注册后端服务器,相当于k8s-service name
# openstack service create --name nova --description "OpenStack Compute" compute
#查看service
# openstack service list        #新增compute service

#创建service endpoint            #相当于k8s的pod
openstack endpoint create --region RegionOne compute public http://openstack-vip.testou.com:8774/v2.1
openstack endpoint create --region RegionOne compute internal http://openstack-vip.testou.com:8774/v2.1
openstack endpoint create --region RegionOne compute admin http://openstack-vip.testou.com:8774/v2.1
#查看endpoint
openstack endpoint list

#======================================================================================
#haproxy主机添加四层代理配置
# tail -n4 /etc/haproxy/haproxy.cfg
listen openstack-nova-controller-8774
  bind 172.20.0.248:8774
  mode tcp        #必须tcp,默认http
  server 172.20.0.7 172.20.0.7:8774 check inter 3s fall 3 rise 5
# systemctl reload haproxy

#======================================================================================
#安装nova管理端服务
# yum -y install openstack-nova-api openstack-nova-conductor openstack-nova-novncproxy openstack-nova-scheduler

#修改nova配置文件
# vi /etc/nova/nova.conf
 [DEFAULT]
 enabled_apis = osapi_compute,metadata
 transport_url = rabbit://openstack:openstack123@openstack-vip.testou.com:5672/
 use_neutron = true
 firewall_driver = nova.virt.firewall.NoopFirewallDriver        #驱动,和neutron交互,库文件/usr/lib/python2.7/seite-packages/nova/virt/firewall.py
 #配置虚拟机自启动(还原虚拟机状态:开机状态/关机状态)
 resume_guests_state_on_host_boot=true
 #配置CPU超限
 cpu_allocation_ratio=1.0
 initial_cpu_allocation_ratio=1.0
 #内存超限
 ram_allocation_ratio=1.0            #最好不开
 initial_ram_allocation_ratio=1.0
 #磁盘超限
 disk_allocation_ratio=1.0            #不开
 initial_disk_allocation_ratio=1.0    
 #配置宿主机保留磁盘空间
 reserved_host_disk_mb=20480            #20G
 #宿主机保留内存
  reserved_host_memory_mb=1024        #生产保留4G
 #允许虚拟机类型动态调整
 allow_resize_to_same_host=true
 #node筛选策略
baremetal_enabled_filters=RetryFilter,AvailabilityZoneFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ExactRamFilter,ExactDiskFilter,ExactCoreFilter
 [api_database]        #连接nova_api数据库
 connection = mysql+pymysql://nova:nova123@openstack-vip.testou.com/nova_api
 [database]            #连接nova数据库
 connection = mysql+pymysql://nova:nova123@openstack-vip.testou.com/nova
 [api]
 auth_strategy = keystone
 [keystone_authtoken]
 www_authenticate_uri = http://openstack-vip.testou.com:5000/
 auth_url = http://openstack-vip.testou.com:5000/
 memcached_servers = openstack-vip.testou.com:11211
 auth_type = password
 project_domain_name = Default
 user_domain_name = Default
 project_name = service
 username = nova
 password = nova
 [vnc]                    #后期日志如果报1006|1005状态码,为vnc连不上
 enabled = true
 server_listen = 10.0.0.7
 server_proxyclient_address = 10.0.0.7
 [glance]
 api_servers = http://openstack-vip.testou.com:9292
 [oslo_concurrency]        #锁路径,创建虚拟机时的操作步骤必须顺序执行,一个操作由一个组件执行
 lock_path = /var/lib/nova/tmp
 [placement]
 region_name = RegionOne
 project_domain_name = Default
 project_name = service
 auth_type = password
 user_domain_name = Default
 auth_url = http://openstack-vip.testou.com:5000/v3
 username = placement
 password = placement

#初始化nova_api数据库
# su - nova -s /bin/sh -c "nova-manage api_db sync"
#确认nova_api数据库生成相关表
# mysql -unova -pnova123 -h172.20.0.248 -e "show tables from nova_api"

#注册cell0数据库
# su - nova -s /bin/sh -c "nova-manage cell_v2 map_cell0"

#创建cell1 cell
# su - nova -s /bin/sh -c "nova-manage cell_v2 create_cell --name=cell1 --verbose"

#初始化nova数据库
# su - nova -s /bin/sh -c "nova-manage db sync"            #警告,忽略
#确认nova数据库生成相关表
# mysql -unova -pnova123 -h172.20.0.248 -e "show tables from nova"

#验证cell0和cell1是否注册成功
# su - nova -s /bin/sh -c "nova-manage cell_v2 list_cells"

#启动nova服务并设置开机自启
# systemctl enable --now openstack-nova-api openstack-nova-scheduler openstack-nova-conductor openstack-nova-novncproxy

#nova日志
# tail -f /var/log/nova/*.log

#======================================================================================
#haproxy主机添加四层代理配置
# tail -n4 /etc/haproxy/haproxy.cfg
listen openstack-nova-novncproxy-6080
  bind 172.20.0.248:6080
  server 172.20.0.7 172.20.0.7:6080 check inter 3s fall 3 rise 5
# systemctl reload haproxy

#nova日志报错,和mysql连接中断,由代理连接mysql,超过超时时间没有和数据库进行交互,就断开
#修改haproxy超时时间
# vi /etc/haproxy/haproxy.cfg
...
defaults
  timeout client 5m    #默认1分钟
  timeout server 5m    #默认1分钟
  timeout http-keep-alive 5m    #默认10s
  maxconn    30000    #默认3000
# systemctl reload haproxy

#======================================================================================
#编写nova重启脚本,便于后期维护
# vi nova-restart.sh
 #!/bin/bash
 systemctl restart openstack-nova-api openstack-nova-scheduler openstack-nova-conductor openstack-nova-novncproxy

1.3.6 neutron 网络 tcp:9696

#角色:控制节点、计算节点
    controller:neutron管理端服务
    node:neutron客户端服务

#网络类型:
 提供者网络:即桥接,虚拟机网络和宿主机网络在同一网段
             如:vm1_eth0-->网桥设备-->宿主机eth0
 自服务网络:由openstack创建一个独立于宿主机网段,连接虚拟路由器
            自己创建的网络,最终通过虚拟路由器连接外网,公有云使用多

#===================================================================================
#controller节点配置

#在mysql服务端创建neutron数据库
# mysql -e "create database neutron"
# mysql -e "grant all privileges on neutron.* to 'neutron'@'%' identified by 'neutron123'"

#===================================================================================
# source admin-openrc.sh

#创建neutron用户
# openstack user create --domain default --password-prompt neutron
    输2次neutron

#授权nova用户在project(service)中有admin权限
# openstack role add --project service --user neutron admin

#创建service entity实体        #用于注册后端服务器,相当于k8s-service name
# openstack service create --name neutron --description "OpenStack Networking" network

#查看service
# openstack service list

#创建service endpoint            #相当于k8s的pod
# openstack endpoint create --region RegionOne network public http://openstack-vip.testou.com:9696
# openstack endpoint create --region RegionOne network internal http://openstack-vip.testou.com:9696
# openstack endpoint create --region RegionOne network admin http://openstack-vip.testou.com:9696

#查看endpoint
# openstack endpoint list

#===================================================================================
#haproxy主机添加四层代理配置
# tail -n4 /etc/haproxy/haproxy.cfg
listen openstack-neutron-9696
  bind 172.20.0.248:9696
  mode tcp        #必须tcp,默认http
  server 172.20.0.7 172.20.0.7:9696 check inter 3s fall 3 rise 5
listen openstack-nova-api:8775
  bind 172.20.0.248:8775
  mode tcp      #必须tcp,默认http
  server 172.20.0.7 172.20.0.7:8775 check inter 3s fall 3 rise 5
# systemctl reload haproxy

#===================================================================================
#安装neutron提供者网络(桥接)管理端服务
# yum -y install openstack-neutron openstack-neutron-ml2 openstack-neutron-linuxbridge ebtables        #etables类似阿里云安全组

#修改neutron配置文件
# vi /etc/neutron/neutron.conf
 [database]
 connection = mysql+pymysql://neutron:neutron123@openstack-vip.testou.com/neutron
 [DEFAULT]
 core_plugin = ml2        #二层插件,桥接
 service_plugins =        #三层插件,不安装
 transport_url = rabbit://openstack:openstack123@openstack-vip.testou.com
 auth_strategy = keystone
 notify_nova_on_port_status_changes = true    #通知网络变化
 notify_nova_on_port_data_changes = true
 [keystone_authtoken]
 www_authenticate_uri = http://openstack-vip.testou.com:5000
 auth_url = http://openstack-vip.testou.com:5000
 memcached_servers = openstack-vip.testou.com:11211
 auth_type = password
 project_domain_name = default
 user_domain_name = default
 project_name = service
 username = neutron
 password = neutron
 [nova]            #在最后添加
 auth_url = http://openstack-vip.testou.com:5000
 auth_type = password
 project_domain_name = default
 user_domain_name = default
 region_name = RegionOne
 project_name = service
 username = nova
 password = nova
 [oslo_concurrency]
 lock_path = /var/lib/neutron/tmp
 [quotas]
 quota_network = 100
 quota_subnet = 100
 quota_port = 5000
 quota_driver = neutron.db.quota.driver.DbQuotaDriver
 quota_router = 100
 quota_floatingip = 1000
 quota_security_group = 100
 quota_security_group_rule = 200

#配置二层插件
#原始文件缺失部分配置项,使用其他的版本替换:http://docs.openstack.org/newton/config-reference/networking/samples/ml2_conf.ini.html
# vi /etc/neutron/plugins/ml2/ml2_conf.ini
 [ml2]
 type_drivers = flat,vlan        #单一扁平网络,就是桥接网络
 tenant_network_types =
 mechanism_drivers = linuxbridge        #桥接
 extension_drivers = port_security        #端口安全扩展驱动,基于iptables规则,一般不装
 [ml2_type_flat]
 flat_networks = external            #将提供者网络配置为扁平网络,声明桥接网络名称
 [securitygroup]
 enable_security_group = false
 enable_ipset = true                #启用ipset提高安全组规则的效率

#链接二层插件配置
# ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini

#配置linux bridge代理
#原始文件缺失部分配置项,使用其他的版本替换:http://docs.openstack.org/newton/config-reference/networking/samples/linuxbridge_agent.ini.html
# vi /etc/neutron/plugins/ml2/linuxbridge_agent.ini
 [linux_bridge]                #定义虚拟网络和物理网卡的对应关系
 physical_interface_mappings = external:eth0                #eth0为宿主机连接外网网卡
 [securitygroup]                #安全组设置
 enable_security_group = false
 firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
 [vxlan]
 enable_vxlan = false            #不使用自服务网络则关闭

#开启内核参数
# vi /etc/sysctl.conf
 ...
 net.bridge.bridge-nf-call-iptables = 1            #允许流量经过宿主机
 net.bridge.bridge-nf-call-ip6tables = 1
#直接执行sysctl -p将报错,因为还未开始neutron服务
#或先加载模块
# modprobe br_netfilter
# sysctl -p

#配置DHCP代理
# vi /etc/neutron/dhcp_agent.ini
 [DEFAULT]
 interface_driver = linuxbridge
 dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
 enable_isolated_metadata = true

#配置元数据代理
# vi /etc/neutron/metadata_agent.ini            #配置元数据主机和密钥,用于下一步nova连接neutron时认证
 [DEFAULT]
 nova_metadata_host = openstack-vip.testou.com        #nova获取元数据的主机,tcp_8775
 metadata_proxy_shared_secret = magedu20200412

#配置nova连接neutron,获取网络信息
# vi /etc/nova/nova.conf
 [neutron]
 auth_url = http://openstack-vip.testou.com:5000
 auth_type = password
 project_domain_name = default
 user_domain_name = default
 region_name = RegionOne
 project_name = service
 username = neutron
 password = neutron
 service_metadata_proxy = true
 metadata_proxy_shared_secret = magedu20200412

#初始化neutron数据库
# su - neutron -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf \
    --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head"
#确认neutron数据库生成相关表
# mysql -uneutron -pneutron123 -h172.20.0.248 -e "show tables from neutron"

#======================================================================================
#早期BUG: TypeError: unsupported operand type(s) for -: 'NoneType' and 'int'
#解决方法:修改源码,否则创建的虚拟机网卡不会桥接到宿主机网卡
#原因:官方文档有三个网络,虚拟机不会桥接到管理网,早期其他版本不存在这问题
# vi /usr/lib/python2.7/site-packages/neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py
 注释399-400行

#重启nova服务
# systemctl restart openstack-nova-api

#======================================================================================
#开启neutron服务并设置开机自启
# systemctl enable --now neutron-server neutron-linuxbridge-agent neutron-dhcp-agent neutron-metadata-agent

#日志
#tail -f /var/log/neutron/*.log

#======================================================================================
#编写nova重启脚本,便于后期维护
# vi neutron-restart.sh
 #!/bin/bash
 systemctl restart neutron-server neutron-linuxbridge-agent neutron-dhcp-agent neutron-metadata-agent

1.3.7 horizon 可视化

#T/S/R/Q版官网配置不全,装好不能用,缺配置

#安装dashboard服务
# yum -y install openstack-dashboard

#修改dashboard配置文件
# vi /etc/openstack-dashboard/local_settings
 OPENSTACK_HOST = "172.20.0.7"        #dashboard服务所在controller节点地址,默认127.0.0.1
 ALLOWED_HOSTS = ['172.20.0.7', 'openstack-vip.testou.com']    #允许访问dashboard服务的主机,不在列表的主机不响应,类似nginx多域名
                                                                #*为允许所有
 #---------------------------------------------------------------------------------
 #配置session保存到memcached 
 SESSION_ENGINE = 'django.contrib.sessions.backends.cache'
 CACHES = {
     'default': {
          'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
          'LOCATION': 'openstack-vip.testou.com:11211',
     }
 }
 #---------------------------------------------------------------------------------
 #开启v3版本的API认证,默认开启
 OPENSTACK_KEYSTONE_URL = "http://%s:5000/v3" % OPENSTACK_HOST
 #---------------------------------------------------------------------------------
 #开启支持domain
 OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = True
 #---------------------------------------------------------------------------------
 #添加API版本
 OPENSTACK_API_VERSIONS = {
     "identity": 3,
     "image": 2,
     "volume": 3,
 }
 #---------------------------------------------------------------------------------
 #设置通过dashboard创建的用户默认domain为Default
 OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = "Default"
 #---------------------------------------------------------------------------------
 #设置通过dashboard创建的用户默认role为user
 OPENSTACK_KEYSTONE_DEFAULT_ROLE = "user"
 #---------------------------------------------------------------------------------
 #如果是提供者网络(桥接),关闭三层网络服务
 OPENSTACK_NEUTRON_NETWORK = {
     ...
     'enable_router': False,
     'enable_quotas': False,
     'enable_distributed_router': False,
     'enable_ha_router': False,
     'enable_lb': False,
     'enable_firewall': False,
     'enable_vpn': False,
     'enable_fip_topology_check': False,
 }
 #---------------------------------------------------------------------------------
 #设置时区
 TIME_ZONE = "Asia/Shanghai"
 #---------------------------------------------------------------------------------
 #添加WEBROOT
 WEBROOT = '/dashboard'
 #---------------------------------------------------------------------------------
 #解决500报错,token目录无法访问问题
 LOCAL_PATH = '/var/lib/openstack-dashboard'        #apache对该目录必须有权限
 #---------------------------------------------------------------------------------
 
#===================================================================================
#haproxy主机添加四层代理配置
# tail -n4 /etc/haproxy/haproxy.cfg
listen openstack-dashboard-80
  bind 172.20.0.248:80
  server 172.20.0.7 172.20.0.7:80 check inter 3s fall 3 rise 5
# systemctl reload haproxy

#===================================================================================
#添加httpd配置
# vi /etc/httpd/conf.d/openstack-dashboard.conf
 WSGIApplicationGroup %{GLOBAL}
#重启httpd
# systemctl restart httpd

#重启httpd后,生成token所有者所属组为root,改为apache
# chown apache.apache /var/lib/openstack-dashboard/.secret_key_store

#===================================================================================
#验证
#win主机添加hosts解析
#浏览器访问:http://openstack-vip.testou.com/dashboard
 域:default
 用户名:admin
 密码:admin

1.3.7.1 dashboard 排错

#500错误,原因:httpd生成token目录为/tmp,无写入权限
#修改dashboard配置文件
# vi /etc/openstack-dashboard/local_settings
#LOCAL_PATH = '/tmp'                                                                                                             LOCAL_PATH = '/var/lib/openstack-dashboard'        #apache对该目录必须有权限

#重启httpd
# systemctl restart httpd

#重启httpd后,生成token所有者所属组为root,改为apache
# chown apache.apache /var/lib/openstack-dashboard/.secret_key_store

#浏览器访问:http://openstack-vip.testou.com/dashboard
 域:default
 用户名:admin
 密码:admin

1.4 部署 node 节点

1.4.1 基础配置

#配置yum源
#安装openstack-train版本yum仓库
# yum -y install centos-release-openstack-train
#更改软件源为阿里云
# sed -ri '/mirrorlist.centos.org/s/(.*)/#\1/' /etc/yum.repos.d/CentOS-OpenStack-train.repo
# sed -ri '/mirror.centos.org/s/#(.*)mirror.centos.org(.*)/\1mirrors.aliyun.com\2/' /etc/yum.repos.d/CentOS-OpenStack-train.repo

#安装rdo yum仓库,包含新的稳定版本的包(rdo: redhat enterprise linux openstack platform)
# yum -y install https://rdoproject.org/repos/rdo-release.rpm
#更改软件源为阿里云
# sed -ri '/mirrorlist.centos.org/s/(.*)/#\1/' /etc/yum.repos.d/rdo-qemu-ev.repo
# sed -ri '/mirror.centos.org/s/#(.*)mirror.centos.org(.*)/\1mirrors.aliyun.com\2/' /etc/yum.repos.d/rdo-qemu-ev.repo
# sed -ri '/mirrorlist.centos.org/s/(.*)/#\1/' /etc/yum.repos.d/rdo-release.repo
# sed -ri '/mirror.centos.org/s/#(.*)mirror.centos.org(.*)/\1mirrors.aliyun.com\2/' /etc/yum.repos.d/rdo-release.repo

#配置hosts文件
# vi /etc/hosts
 ...
 172.20.0.37 openstack-rabbitmq openstack-rabbitmq.testou.com
 172.20.0.37 openstack-mysql.testou.com
 172.20.0.37 openstack-memcached.testou.com
 172.20.0.248 openstack-vip.testou.com

#安装openstack客户端、openstack-selinux管理包
# yum -y install python-openstackclient openstack-selinux

1.4.2 nova 计算

#node节点配置

#安装nova-compute服务
# yum -y install openstack-nova-compute

#修改nova配置文件
# vi /etc/nova/nova.conf
[DEFAULT]
enabled_apis = osapi_compute,metadata
transport_url = rabbit://openstack:openstack123@openstack-vip.testou.com
use_neutron = true
firewall_driver = nova.virt.firewall.NoopFirewallDriver        #驱动,和neutron交互
#配置虚拟机自启动(还原虚拟机状态:开机状态/关机状态)
resume_guests_state_on_host_boot=true
#配置CPU超限
cpu_allocation_ratio=1.0
initial_cpu_allocation_ratio=1.0
#内存超限
ram_allocation_ratio=1.0            #最好不开
initial_ram_allocation_ratio=1.0
#磁盘超限
disk_allocation_ratio=1.0            #不开
initial_disk_allocation_ratio=1.0    
#配置宿主机保留磁盘空间
reserved_host_disk_mb=20480            #20G
#宿主机保留内存
reserved_host_memory_mb=1024        #生产保留4G
#允许虚拟机类型动态调整
allow_resize_to_same_host=true
#node筛选策略
baremetal_enabled_filters=RetryFilter,AvailabilityZoneFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ExactRamFilter,ExactDiskFilter,ExactCoreFilter
[api]
auth_strategy = keystone
[keystone_authtoken]
www_authenticate_uri = http://openstack-vip.testou.com:5000/
auth_url = http://openstack-vip.testou.com:5000/
memcached_servers = openstack-vip.testou.com:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = nova
password = nova
[vnc]    #1006|1005状态码,vnc连不上
enabled = true
server_listen = 0.0.0.0
server_proxyclient_address = 10.0.0.27        #本机地址,可连外网
novncproxy_base_url = http://openstack-vip.testou.com:6080/vnc_auto.html
[glance]
api_servers = http://openstack-vip.testou.com:9292
[oslo_concurrency]            #锁路径,创建虚拟机时的操作步骤必须顺序执行,一个操作由一个组件执行
lock_path = /var/lib/nova/tmp
[placement]
region_name = RegionOne
project_domain_name = Default
project_name = service
auth_type = password
user_domain_name = Default
auth_url = http://openstack-vip.testou.com:5000/v3
username = placement
password = placement
[libvirt]                                        #更改虚拟化类型
cpu_mode = host-passthrough                        #cpu模式,透传,直接使用宿主机的CPU模式
hw_machine_type = x86_64=pc-i440fx-rhel7.2.0

#查看node节点是否支持硬件辅助虚拟化
# egrep -c '(vmx|svm)' /proc/cpuinfo

#虚拟化类型
# virsh capabilities

#开启nova-compute服务
# systemctl enable --now libvirtd openstack-nova-compute

#日志,info级别,kernel不支持AMD SEV,忽略
# tail -f /var/log/nova/nova-compute.log

#======================================================================================
#编写nova重启脚本,便于后期维护
# vi nova-restart.sh
 #!/bin/bash
 systemctl restart libvirtd openstack-nova-compute

#============================================================================
#在控制节点验证
# bash admin-oprc.sh

#nova计算节点是否注册到controller
# openstack compute service list --service nova-compute            #默认注册到nova zone

#计算节点主动发现
# su - nova -s /bin/sh -c "nova-manage cell_v2 discover_hosts --verbose"
#对应controller nova配置文件
# vi /etc/nova/nova.conf
 [scheduler]
 discover_hosts_in_cells_interval = 300            #300s扫描一次,刚开始创建可以改短点

#验证
# bash admin-oprc.sh

#计算服务是否注册到controller
# openstack compute service list

#检查api是否正常,endpoint
# openstack catalog list

#镜像是否能拿到
# openstack image list

#检查cell状态是否正常
# nova-status upgrade check            #cell和placement状态为success

#controller和node日志不能有任何error

#nova服务状态检查
# nova service-list                    #状态为up

1.4.3 neutron 网络

#node节点配置

#安装neutron提供者网络(桥接)客户端服务
# yum -y install openstack-neutron-linuxbridge ebtables ipset

#修改neutron配置文件
# vi /etc/neutron/neutron.conf
 [DEFAULT]
 transport_url = rabbit://openstack:openstack123@openstack-vip.testou.com
 auth_strategy = keystone
 [keystone_authtoken]
 www_authenticate_uri = http://openstack-vip.testou.com:5000
 auth_url = http://openstack-vip.testou.com:5000
 memcached_servers = openstack-vip.testou.com:11211
 auth_type = password
 project_domain_name = default
 user_domain_name = default
 project_name = service
 username = neutron
 password = neutron
 [oslo_concurrency]
 lock_path = /var/lib/neutron/tmp
 [quotas]
 quota_network = 100
 quota_subnet = 100
 quota_port = 5000
 quota_driver = neutron.db.quota.driver.DbQuotaDriver
 quota_router = 100
 quota_floatingip = 1000
 quota_security_group = 100
 quota_security_group_rule = 200

#配置linux bridge代理
#原始文件缺失部分配置项,使用其他的版本替换:http://docs.openstack.org/newton/config-reference/networking/samples/linuxbridge_agent.ini.html
# vi /etc/neutron/plugins/ml2/linuxbridge_agent.ini
 [linux_bridge]            #定义虚拟网络和物理网卡的对应关系
 physical_interface_mappings = external:eth0                        #eth0为宿主机连接外网网卡
 [securitygroup]                    #安全组设置
 enable_security_group = false        #不开启安全组
 firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
 [vxlan]
 enable_vxlan = false        #不使用自服务网络则关闭

#开启内核参数
# vi /etc/sysctl.conf
 ...
 net.bridge.bridge-nf-call-iptables = 1            #允许流量经过宿主机
 net.bridge.bridge-nf-call-ip6tables = 1
#直接执行sysctl -p将报错,因为还未开始neutron服务
#或先加载模块
# modprobe br_netfilter
# sysctl -p

#配置nova连接neutron,获取网络信息
# vi /etc/nova/nova.conf
 [neutron]
 auth_url = http://openstack-vip.testou.com:5000
 auth_type = password
 project_domain_name = default
 user_domain_name = default
 region_name = RegionOne
 project_name = service
 username = neutron
 password = neutron

#重启nova服务
# systemctl restart openstack-nova-compute

#开启neutron服务并设置开机自启
# systemctl enable --now neutron-linuxbridge-agent

#日志
# tail -f /var/log/neutron/*.log

#======================================================================================
#编写nova重启脚本,便于后期维护
# vi neutron-restart.sh
 #!/bin/bash
 systemctl restart neutron-linuxbridge-agent

#======================================================================================
#在controller节点验证
# source admin-openrc.sh
# openstack network agent list
# neutron agent-list            #新版将废弃
# nova service-list             #up状态
# brctl show                    #无,还没有创建网络,创建后会桥接

1.5 创建实例

1.5.1 命令行创建实例

# source admin-openrc.sh

#创建实例前,先创建网络
#自建提供者网络
# openstack network create --share --external \            #声明创建的是外部网络,默认为internal(内部网络)
    --provider-physical-network external \                #在openstack定义的网络上创建自己的网络
    --provider-network-type flat external-net            #类型为flat(桥接),external-net为创建的网络名称

#查看网络
# openstack network list            #没有子网
+--------------------------------------+--------------+---------+
| ID                                   | Name         | Subnets |
+--------------------------------------+--------------+---------+
| 67ad5723-6f35-48ab-8f46-fe1bfbc5254a | external-net |         |
+--------------------------------------+--------------+---------+

#在自建的网络上,创建子网
# openstack subnet create --network external-net \
  --allocation-pool start=10.0.0.201,end=10.0.0.250 \
  --dns-nameserver 223.5.5.5 --gateway 10.0.0.2 \
  --subnet-range 10.0.0.0/24 external-sub

#查看网络
# openstack network list
+--------------------------------------+--------------+--------------------------------------+
| ID                                   | Name         | Subnets                              |
+--------------------------------------+--------------+--------------------------------------+
| 67ad5723-6f35-48ab-8f46-fe1bfbc5254a | external-net | c1ac4b7b-6740-4ebc-884b-f22c1471c062 |
+--------------------------------------+--------------+--------------------------------------+

#查看网卡桥接情况
#  brctl show
 bridge name        bridge id            STP enabled        interfaces
 brq67ad5723-6f        8000.000c29a51663    no                eth0
                                                        tap69e6b71f-74

#================================================================================
#创建虚拟机类型
# openstack flavor create --id 0 --vcpus 1 --ram 64 --disk 1 m1.nano

#生成密钥对用于免密登录虚拟机
# ssh-keygen -q -N ""

#上传密钥到openstack
# openstack keypair create --public-key ~/.ssh/id_rsa.pub mykey

#查看密钥
# openstack keypair list

#在default安全组添加规则:放行icmp和ssh
# openstack security group rule create --proto icmp default
# openstack security group rule create --proto tcp --dst-port 22 default
#查看规则
# openstack security group rule list

#================================================================================
#创建实例前验证
#查看虚拟机类型
# openstack flavor list            #m1.nano类型
#查看镜像
# openstack image list            #cirros镜像
#查看网络
# openstack network list            #有自建网络,external-net
#查看安全组
# openstack security group list        #default安全组

#================================================================================
#创建虚拟机
# openstack server create --flavor m1.nano --image cirros \
  --nic net-id=67ad5723-6f35-48ab-8f46-fe1bfbc5254a --security-group default \
  --key-name mykey linux39-vm1

#查看日志
# tail -f /var/log/neutron/*.log

#查看虚拟机状态
# openstack server list            #状态为ACTIVE
+--------------------------------------+-------------+--------+-------------------------+--------+---------+
| ID                                   | Name        | Status | Networks                | Image  | Flavor  |
+--------------------------------------+-------------+--------+-------------------------+--------+---------+
| 48a9ceab-affb-46fb-90f3-58eb3f4f44d6 | linux39-vm1 | ACTIVE | external-net=10.0.0.235 | cirros | m1.nano |
+--------------------------------------+-------------+--------+-------------------------+--------+---------+

#查看虚拟机console链接
# openstack console url show linux39-vm1
+-------+---------------------------------------------------------------------------------------------------------+
| Field | Value                                                                                                   |
+-------+---------------------------------------------------------------------------------------------------------+
| type  | novnc                                                                                                   |
| url   | http://openstack-vip.testou.com:6080/vnc_auto.html?path=%3Ftoken%3D310447df-1327-45c4-957b-c6219808684c |
+-------+---------------------------------------------------------------------------------------------------------+

#win主机添加hosts解析
#浏览器访问:http://openstack-vip.testou.com:6080/vnc_auto.html?path=%3Ftoken%3D310447df-1327-45c4-957b-c6219808684c
 处于GRUB状态,原因:虚拟化类型不对

#更改虚拟化类型
# vi /etc/nova/nova.conf                #node节点修改
 cpu_mode = host-passthrough            #cpu模式,透传,直接使用宿主机的CPU模式
 hw_machine_type = x86_64=pc-i440fx-rhel7.2.0

#重启node节点nova服务
# bash nova-restart.sh

#================================================================================
#再次创建虚拟机
# openstack server create --flavor m1.nano --image cirros \
  --nic net-id=67ad5723-6f35-48ab-8f46-fe1bfbc5254a --security-group default \
  --key-name mykey linux39-vm2

#查看虚拟机状态
# openstack server list
+--------------------------------------+-------------+---------+-------------------------+--------+---------+
| ID                                   | Name        | Status  | Networks                | Image  | Flavor  |
+--------------------------------------+-------------+---------+-------------------------+--------+---------+
| d5c5f4e3-2e1d-4ada-8b76-8aca9a2fefe8 | linux39-vm2 | ACTIVE  | external-net=10.0.0.203 | cirros | m1.nano |
| 48a9ceab-affb-46fb-90f3-58eb3f4f44d6 | linux39-vm1 | SHUTOFF | external-net=10.0.0.235 | cirros | m1.nano |
+--------------------------------------+-------------+---------+-------------------------+--------+---------+

#查看虚拟机console链接
# openstack console url show linux39-vm2
++-------+---------------------------------------------------------------------------------------------------------+
| Field | Value                                                                                                   |
+-------+---------------------------------------------------------------------------------------------------------+
| type  | novnc                                                                                                   |
| url   | http://openstack-vip.testou.com:6080/vnc_auto.html?path=%3Ftoken%3Dc670a250-2f54-48ec-a6a3-4dd9edf5e959 |
+-------+---------------------------------------------------------------------------------------------------------+

#浏览器重新访问:http://openstack-vip.testou.com:6080/vnc_auto.html?path=%3Ftoken%3Dc670a250-2f54-48ec-a6a3-4dd9edf5e959
 访问成功,用户名cirros,密码:gocubsgo

#controller节点测试连接虚拟机
# ping 10.0.0.203
# ssh cirros@10.0.0.203
$ ping 10.0.0.2                #虚拟机可通网关
$ ping www.baidu.com          #虚拟机可通外网

1.5.2 dashboard 创建实例

#===================================================================================
#创建镜像
 管理员->计算->镜像->创建镜像->镜像名称:Centos-7.2.1511
                             文件:选中制作好的镜像文件
                             镜像格式:QCOW2
                             架构:x86_64
                             最小磁盘 (GB):10
                             最低内存 (MB):1024
                             可见性:共享的
                             创建镜像

#===================================================================================
#创建实例类型
 管理员->计算->实例类型->创建实例类型->名称:1C-1G-50G
                                     ID:auto
                                     VCPU数量:1
                                     内存 (MB):1024
                                     根磁盘(GB):50
                                     创建实例类型
               
#===================================================================================
#创建虚拟机
 项目->计算->实例->创建实例->详情->实例名称:linux39-vm1
                                 描述:linux39-vm1
                                 可用域:nova
                                 数量:1
                             ->源->选择源:Image
                                 可用配额:选中镜像
                             ->实例类型->可用配额:选中实例类型
                             创建实例

 

1.5.3 区分不同项目组创建实例(主机聚合)

 

#管理员->主机聚合->创建主机聚合->名称:projectA
                             可用域:projectA
                             管理聚合内的主机:选中node节点
                             创建主机聚合

#项目->计算->实例->创建实例->详情->实例名称:projectA-vm1
                                描述:projectA-vm1
                                可用域:projectA
                                数量:1

1.5.4 创建实例并分配固定 IP

#仅限命令创建虚拟机,才能分配固定IP
 自定义分配的地址可在dhcp地址池内
#创建虚拟机时,如果没创建其他可用域,默认为nova

#===================================================================================
#单网卡
# nova boot --image centos7.9-template --flavor 1C-1G-20G \
    --availability-zone projectA:openstack-centos7-27-node1.testou.com \
    --nic net-name=internal-net,v4-fixed-ip=172.20.8.248 projectA-vm1


#多网卡
# nova boot --image centos7.9-template --flavor 1C-1G-20G \
    --availability-zone projectA:openstack-centos7-27-node1.testou.com \
    --nic net-name=internal-net,v4-fixed-ip=172.20.8.248 \
    --nic net-name=external-net,v4-fixed-ip=10.0.0.248  projectA-vm1

2 openstack自定义 centos 镜像

#新建磁盘
# qemu-img create -f qcow2 /var/lib/image/centos7.9-ext4-template.qcow2 10G

#创建虚拟机
# virt-install --virt-type kvm \
    --name centos7.9-template \
    --vcpus 1 --memory 1024 \
    --disk path=/var/lib/image/centos7.9-ext4-template.qcow2 \
    --network bridge=br0 \
    --graphics vnc,listen=0.0.0.0 \
    --noautoconsole \
    --autostart \
    --cdrom=/usr/local/src/CentOS-7-x86_64-Everything-2009.iso

#安装
# virt-manager

#===================================================================================
#初始化配置

#恢复网卡传统命名
# sed -ri '/quiet"$/s#(.*)"#\1 net.ifnames=0"#' /etc/default/grub
# grub2-mkconfig -o /boot/grub2/grub.cfg

#配置网卡
# cat > /etc/sysconfig/network-scripts/ifcfg-eth0 <<EOF
DEVICE=eth0
NAME=eth0
BOOTPROTO=static
IPADDR=10.0.0.88
PREFIX=24
GATEWAY=10.0.0.2
DNS1=223.5.5.5
ONBOOT=yes
EOF

#配置yum源
# mkdir /etc/yum.repos.d/repodir
# mv /etc/yum.repos.d/*.repo /etc/yum.repos.d/repodir
#替换repo文件
# cat > /etc/yum.repos.d/CentOS7.repo <<EOF
[base]
name=base
baseurl=https://mirrors.aliyun.com/centos/\$releasever/os/\$basearch/
gpgcheck=0
[extras]
name=extras
baseurl=https://mirrors.aliyun.com/centos/\$releasever/extras/\$basearch/
gpgcheck=0
[epel]
name=epel
baseurl=https://mirrors.aliyun.com/epel/\$releasever/\$basearch/
gpgcheck=0
EOF
    
#安装常用软件包
# yum clean all; yum -y install bash-completion vim autofs lvm2 traceroute mtr mailx postfix bc lrzsz nmap psmisc tree wget expect sysstat pcp-system-tools iotop iftop nload glances lsof screen tmux at fuse-sshfs sshpass pssh aide chrony genisoimage rsync tcpdump strace ltrace zip unzip socat gcc gcc-c++ glibc glibc-devel pcre pcre-devel openssl openssl-devel zlib-devel ntpdate telnet libevent libevent-devel iproute make net-tools

#关selinux
# sed -ri '/^SELINUX=/s#(.*=).*#\1disabled#' /etc/selinux/config

#关防火墙
# systemctl disable --now firewalld

#时间同步
# sed -ri '/^server/s/(.*)/#\1/' /etc/chrony.conf
# echo 'server ntp.aliyun.com iburst' >> /etc/chrony.conf
# systemctl enable --now chronyd

#设置时区
# timedatectl set-timezone Asia/Shanghai

#修改sshd配置文件
# vi /etc/ssh/sshd_config
 #UseDNS yes
 #GSSAPIAuthentication yes
# systemctl restart sshd

#设置别名
# sed -ri '/alias mv/a alias vi=vim' /root/.bashrc

#设置vim
# cat > /root/.vimrc <<EOF
set paste
set ts=4
set cursorline
set autoindent
set ignorecase
EOF

#系统资源限制优化
#内核参数优化

#历史命令添加时间和用户
# echo 'export HISTTIMEFORMAT="%F %T `whoami` "' >> /etc/profile

#===================================================================================
#电源管理
# yum -y install acpid
# systemctl enable --now acpid

#cloud-init服务,用于动态磁盘拉伸
# yum -y install cloud-init
#cloud-init所需磁盘扩展工具
# yum -y install cloud-utils-growpart

#===================================================================================
#openstack-controller节点

# mkdir -p /var/lib/image
#模板机镜像复制到controller节点
# scp centos7.9-ext4-template.qcow2 10.0.0.7:/var/lib/image/

# source admin-openrc.sh

#上传镜像到glance
# openstack image create "centos7.9-template" --file /var/lib/image/centos7.9-ext4-template.qcow2 --disk-format qcow2 \
--container-format bare --public

#查看镜像
# openstack image list

#===================================================================================
#web创建实例类型1:1C-1G-20G
#web创建虚拟机:vm1

#修改sshd配置文件,允许ssh使用密码登录
# grep ^PasswordAuth /etc/ssh/sshd_config 
 PasswordAuthentication yes
# sshd -t && systemctl restart sshd

#根分区拉伸为20G
# df -h | grep 20G
/dev/vda2        20G  1.6G   17G   9% /

#web创建实例类型2:1C-1G-30G

#web调整实例大小
 项目->计算->实例->vm1->调整实例大小->新的实例类型:1C-1G-30G
                                     调整大小
 
#报错:
 错误:No valid host was found. No valid host found for resize (HTTP 400) (Request-ID: req-8f814e90-bcb6-4539-a194-2b07c70a285a) #解决方法:修改controller/compute节点nova配置
 # vi /etc/nova/nova.conf
  [DEFAULT]
  allow_resize_to_same_host=true        #允许虚拟机类型动态调整
 #重启nova服务
 # sh nova-restart.sh

#web调整实例大小
 项目->计算->实例->vm1->调整实例大小->新的实例类型:1C-1G-30G
                                     调整大小
                                     确认 调整大小/迁移
posted on 2023-08-17 16:45  不期而至  阅读(74)  评论(0)    收藏  举报