Centos7.4部署OpenStack-Queens

一、准备工作

创建两台虚拟机:linux-node1、linux-node2

node1:

修改主机名

[root@localhost ~]# hostnamectl set-hostname linux-node1.example.com

[root@localhost ~]# su -

Last login: Sat May 19 18:28:37 CST 2018 from 192.168.43.1 on pts/0

[root@linux-node1 ~]#

配置IP:

[root@linux-node1 ~]# cat /etc/sysconfig/network-scripts/ifcfg-ens32

TYPE=Ethernet

BOOTPROTO=static

NAME=ens32

DEVICE=ens32

ONBOOT=yes

IPADDR=192.168.43.11

NETMASK=255.255.255.0

GATEWAY=192.168.43.2

DNS1=192.168.43.2

关闭防火墙、永久关闭selinux:

[root@linux-node1 ~]# systemctl disable NetworkManager

[root@linux-node1 ~]# systemctl disable firewalld

[root@linux-node1 ~]# setenforce 0

[root@linux-node1 ~]# vi /etc/selinux/config

修改hosts文件:

[root@linux-node1 ~]# cat /etc/hosts

127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4

::1 localhost localhost.localdomain localhost6 localhost6.localdomain6

192.168.43.11 linux-node1 linux-node1.example.com

192.168.43.12 linux-node2 linux-node2.example.com

配置yum源:

[root@linux-node1 ~]# mv /etc/yum.repos.d/CentOS-Base.repo /etc/yum.repos.d/CentOS-Base.repo.backup

[root@linux-node1 ~]# curl -o /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo

[root@linux-node1 ~]# yum makecache

[root@linux-node1 ~]# yum install wget -y

[root@linux-node1 ~]# wget -O /etc/yum.repos.d/epel.repo http://mirrors.aliyun.com/repo/epel-7.repo

安装常用工具包:

[root@linux-node1 ~]# yum install -y net-tools vim lrzsz tree screen lsof tcpdump nc mtr nmap

以上的环境都接下来把linux-node1克隆:

然后我们就只修改node2的主机名、IP:

[root@linux-node1 ~]# hostnamectl set-hostname linux-node2.example.com

[root@linux-node1 ~]# su -

Last login: Sat May 19 18:51:51 CST 2018 from 192.168.43.1 on pts/0

[root@linux-node2 ~]#

[root@linux-node2 ~]# cat /etc/sysconfig/network-scripts/ifcfg-ens32

TYPE=Ethernet

BOOTPROTO=static

NAME=ens32

DEVICE=ens32

ONBOOT=yes

IPADDR=192.168.43.12

NETMASK=255.255.255.0

GATEWAY=192.168.43.2

DNS1=192.168.43.2

node1上操作:

安装OpenStack仓库:

yum install -y centos-release-openstack-queens

安装OpenStack客户端:

yum install -y python-openstackclient

安装OpenStack SElinux管理包:

yum install -y openstack-selinux

二.MySQL数据库部署

1.MySQL安装

[root@linux-node1 ~]# yum install -y mariadb mariadb-server python2-PyMySQL

2.修改MySQL配置文件

[root@linux-node1 ~]# vim /etc/my.cnf.d/openstack.cnf

[mysqld]

bind-address = 192.168.43.11 #设置监听的IP地址

default-storage-engine = innodb #设置默认的存储引擎

innodb_file_per_table = on#使用独享表空间

collation-server = utf8_general_ci #服务器的默认校对规则

character-set-server = utf8 #服务器安装时指定的默认字符集设定

max_connections = 4096 #设置MySQL的最大连接数,生产请根据实际情况设置。

3.启动MySQL Server并设置开机启动

[root@linux-node1 ~]# systemctl enable mariadb.service

[root@linux-node1 ~]# systemctl start mariadb.service

4.进行数据库安全设置

[root@linux-node1 ~]# mysql_secure_installation

5.数据库创建

[root@linux-node1 ~]# mysql -u root -p

Enter password:

MariaDB [(none)]>

Keystone数据库

CREATE DATABASE keystone;

GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'localhost' IDENTIFIED BY 'keystone';

GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'%' IDENTIFIED BY 'keystone';

Glance数据库

CREATE DATABASE glance;

GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'localhost' IDENTIFIED BY 'glance';

GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'%' IDENTIFIED BY 'glance';

Nova数据库

CREATE DATABASE nova;

GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'localhost' IDENTIFIED BY 'nova';

GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%' IDENTIFIED BY 'nova';

CREATE DATABASE nova_api;

GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'localhost' IDENTIFIED BY 'nova';

GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'%' IDENTIFIED BY 'nova';

CREATE DATABASE nova_cell0;

GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'localhost' IDENTIFIED BY 'nova';

GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'%' IDENTIFIED BY 'nova';

Neutron 数据库

CREATE DATABASE neutron;

GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'localhost' IDENTIFIED BY 'neutron';

GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'%' IDENTIFIED BY 'neutron';

Cinder数据库

CREATE DATABASE cinder;

GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'localhost' IDENTIFIED BY 'cinder';

GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'%' IDENTIFIED BY 'cinder';

三:消息代理RabbitMQ

1.安装RabbitMQ

[root@linux-node1 ~]# yum install -y rabbitmq-server

2.设置开启启动,并启动RabbitMQ

[root@linux-node1 ~]# systemctl enable rabbitmq-server.service

[root@linux-node1 ~]# systemctl start rabbitmq-server.service

3.添加openstack用户。

[root@linux-node1 ~]# rabbitmqctl add_user openstack openstack

Creating user "openstack" ...

4.给刚才创建的openstack用户,创建权限。

[root@linux-node1 ~]# rabbitmqctl set_permissions openstack ".*" ".*" ".*"

Setting permissions for user "openstack" in vhost "/" ...

5.启用Web监控插件

[root@linux-node1 ~]# rabbitmq-plugins list

[root@linux-node1 ~]# rabbitmq-plugins enable rabbitmq_management

四、部署Keystone

1.安装keystone

# yum install -y openstack-keystone httpd mod_wsgi memcached python-memcached

2.设置Memcache开启启动并启动Memcached

[root@linux-node1 ~]# systemctl enable memcached.service

[root@linux-node1 ~]# vim /etc/sysconfig/memcached

PORT="11211"

USER="memcached"

MAXCONN="1024"

CACHESIZE="64"

OPTIONS="-l 192.168.43.11,::1"

[root@linux-node1 ~]# systemctl start memcached.service

3.Keystone配置

1)配置KeyStone数据库

[root@linux-node1 ~]# vim /etc/keystone/keystone.conf

[database]

connection = mysql+pymysql://keystone:[email protected]/keystone

2)设置Token和Memcached

[token]

provider = fernet

3).同步数据库:

[root@linux-node1 ~]# su -s /bin/sh -c "keystone-manage db_sync" keystone

[root@linux-node1 ~]# mysql -h 192.168.56.11 -ukeystone -pkeystone -e " use keystone;show tables;"

4)初始化fernet keys

[root@linux-node1 ~]# keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone

[root@linux-node1 ~]# keystone-manage credential_setup --keystone-user keystone --keystone-group keystone

5)初始化keystone

[root@linux-node1 ~]# keystone-manage bootstrap --bootstrap-password admin \

--bootstrap-admin-url http://192.168.43.11:35357/v3/ \

--bootstrap-internal-url http://192.168.43.11:35357/v3/ \

--bootstrap-public-url http://192.168.43.11:5000/v3/ \

--bootstrap-region-id RegionOne

 6).验证Keystone配置

[root@linux-node1 ~]# grep "^[a-z]" /etc/keystone/keystone.conf

connection = mysql+pymysql://keystone:[email protected]/keystone

provider = fernet

7)KeyStone启动

[root@linux-node1 ~]# vim /etc/httpd/conf/httpd.conf

ServerName 192.168.43.11:80

创建配置文件

[root@linux-node1 ~]# ln -s /usr/share/keystone/wsgi-keystone.conf /etc/httpd/conf.d/

启动keystone,并查看端口。

[root@linux-node1 ~]# systemctl enable httpd.service

[root@linux-node1 ~]# systemctl start httpd.service

设置环境变量

[root@linux-node1 ~]# export OS_USERNAME=admin

[root@linux-node1 ~]# export OS_PASSWORD=admin

[root@linux-node1 ~]# export OS_PROJECT_NAME=admin

[root@linux-node1 ~]# export OS_USER_DOMAIN_NAME=Default

[root@linux-node1 ~]# export OS_PROJECT_DOMAIN_NAME=Default

[root@linux-node1 ~]# export OS_AUTH_URL=http://192.168.43.11:35357/v3

[root@linux-node1 ~]# export OS_IDENTITY_API_VERSION=3

创建项目和demo用户

# openstack project create --domain default --description "Demo Project" demo

# openstack user create --domain default --password demo demo

# openstack role create user # openstack role add --project demo --user demo user

创建Service项目

# openstack project create --domain default --description "Service Project" service

创建glance用户

# openstack user create --domain default --password glance glance

# openstack role add --project service --user glance admin

创建nova用户

# openstack user create --domain default --password nova nova

# openstack role add --project service --user nova admin

创建placement用户

# openstack user create --domain default --password placement placement

# openstack role add --project service --user placement admin

创建Neutron用户

# openstack user create --domain default --password neutron neutron

# openstack role add --project service --user neutron admin

创建cinder用户

# openstack user create --domain default --password cinder cinder

# openstack role add --project service --user cinder admin

验证Keystone

[root@linux-node1 ~]# unset OS_AUTH_URL OS_PASSWORD

[root@linux-node1 ~]# openstack --os-auth-url http://192.168.43.11:35357/v3 \

--os-project-domain-name default --os-user-domain-name default \

--os-project-name admin --os-username admin token issue

Password: …

[root@linux-node1 ~]# openstack --os-auth-url http://192.168.43.11:5000/v3 \

--os-project-domain-name default --os-user-domain-name default \

--os-project-name demo --os-username demo token issue

Password:

[root@linux-node1 ~]# vim /root/admin-openstack.sh

export OS_PROJECT_DOMAIN_NAME=Default

export OS_USER_DOMAIN_NAME=Default

export OS_PROJECT_NAME=admin

export OS_USERNAME=admin

export OS_PASSWORD=admin

export OS_AUTH_URL=http://192.168.43.11:35357/v3

export OS_IDENTITY_API_VERSION=3

export OS_IMAGE_API_VERSION=2

[root@linux-node1 ~]# vim /root/demo-openstack.sh

export OS_PROJECT_DOMAIN_NAME=Default

export OS_USER_DOMAIN_NAME=Default

export OS_PROJECT_NAME=demo

export OS_USERNAME=demo

export OS_PASSWORD=demo

export OS_AUTH_URL=http://192.168.43.11:5000/v3

export OS_IDENTITY_API_VERSION=3

export OS_IMAGE_API_VERSION=2

[root@linux-node1 ~]# source admin-openstack.sh

[root@linux-node1 ~]# openstack token issue

[root@linux-node1 ~]# source demo-openstack.sh

[root@linux-node1 ~]# openstack token issue

五、部署glance服务

1.安装Glance

[root@linux-node1 ~]# yum install -y openstack-glance

2.Glance数据库配置

Glance-api.conf

[root@linux-node1 ~]# vim /etc/glance/glance-api.conf

[database]

connection= mysql+pymysql://glance:[email protected]/glance

glance-registry.conf

[root@linux-node1 ~]# vim /etc/glance/glance-registry.conf

[database]

connection= mysql+pymysql://glance:[email protected]/glance

3.设置Keystone

[root@linux-node1 ~]# vim /etc/glance/glance-api.conf

[keystone_authtoken]

auth_uri = http://192.168.43.11:5000

auth_url = http://192.168.43.11:35357

memcached_servers = 192.168.43.11:11211

auth_type = password

project_domain_name = default

user_domain_name = default

project_name = service

username = glance

password = glance

[paste_deploy]

flavor=keystone

glance-registry.conf配置

[root@linux-node1 ~]# vim /etc/glance/glance-registry.conf

[keystone_authtoken]

auth_uri = http://192.168.43.11:5000

auth_url = http://192.168.43.11:35357

memcached_servers = 192.168.43.11:11211

auth_type = password

project_domain_name = default

user_domain_name = default

project_name = service

username = glance

password = glance

[paste_deploy]

flavor=keystone

4.设置Glance镜像存储

[root@linux-node1 ~]# vim /etc/glance/glance-api.conf

[glance_store]

stores = file,http

default_store=file

filesystem_store_datadir=/var/lib/glance/images/

5.同步数据库

[root@linux-node1 ~]# su -s /bin/sh -c "glance-manage db_sync" glance

6.启动Glance服务

# systemctl enable openstack-glance-api.service

# systemctl enable openstack-glance-registry.service

# systemctl start openstack-glance-api.service

# systemctl start openstack-glance-registry.service

7.Glance服务注册

   想要让别的服务可以使用Glance,就需要在Keystone上完成服务的注册。注意需要先source一下admin的环境变量。

[root@linux-node1 ~]# source admin-openstack.sh

# openstack service create --name glance --description "OpenStack Image service" image

# openstack endpoint create --region RegionOne image public http://192.168.43.11:9292

# openstack endpoint create --region RegionOne image internal http://192.168.43.11:9292

# openstack endpoint create --region RegionOne image admin http://192.168.43.11:9292

8.测试Glance状态

[root@linux-node1 ~]# source admin-openstack.sh

[root@linux-node1 ~]# openstack image list

9.Glance镜像

在刚开始实施OpenStack平台阶段,如果没有制作镜像。可以使用一个实验的镜像进行测试,这是一个小的Linux系统。

[root@linux-node1 ~]# cd /usr/local/src

[root@linux-node1 src]# wget http://download.cirros-cloud.n ... k.img

[root@linux-node1 src]# openstack image create "cirros" --disk-format qcow2 \

--container-format bare --file cirros-0.3.5-x86_64-disk.img --public

[root@linux-node1 src]# openstack image list

+--------------------------------------+--------+--------+ | ID | Name | Status | +--------------------------------------+--------+--------+ | cf154a84-a73a-451b-bcb3-83c98e7c0d3e | cirros | active | +--------------------------------------+--------+--------+

六、部署nova服务

1.控制节点安装

[root@linux-node1 ~]# yum install -y openstack-nova-api openstack-nova-placement-api \

openstack-nova-conductor openstack-nova-console \

openstack-nova-novncproxy openstack-nova-scheduler

2.数据库配置

[root@linux-node1 ~]# vim /etc/nova/nova.conf

[api_database]

connection= mysql+pymysql://nova:[email protected]/nova_api

[database]

connection= mysql+pymysql://nova:[email protected]/nova

3.RabbitMQ配置

[root@linux-node1 ~]# vim /etc/nova/nova.conf

[DEFAULT]

transport_url = rabbit://openstack:[email protected]

4.Keystone相关配置

[root@linux-node1 ~]# vim /etc/nova/nova.conf

[api] auth_strategy=keystone

[keystone_authtoken]

auth_uri = http://192.168.43.11:5000

auth_url = http://192.168.43.11:35357

memcached_servers = 192.168.43.11:11211

auth_type = password

project_domain_name = default

user_domain_name = default

project_name = service

username = nova

password = nova

5.关闭Nova的防火墙功能

[DEFAULT]

use_neutron=true

firewall_driver = nova.virt.firewall.NoopFirewallDriver

6.VNC配置

[root@linux-node1 ~]# vim /etc/nova/nova.conf

[vnc]

enabled=true

server_listen = 0.0.0.0

server_proxyclient_address = 192.168.43.11

7.设置glance

[glance]

api_servers = http://192.168.43.11:9292

8.在 [oslo_concurrency] 部分,配置锁路径:

[oslo_concurrency]

lock_path=/var/lib/nova/tmp

9.设置启用的api

[DEFAULT]

enabled_apis=osapi_compute,metadata

10.设置placement

[placement]

os_region_name = RegionOne

project_domain_name = Default

project_name = service

auth_type = password

user_domain_name = Default

auth_url = http://192.168.43.11:35357/v3

username = placement

password = placement

11.修改nova-placement-api.conf

[root@linux-node1 ~]# vim /etc/httpd/conf.d/00-nova-placement-api.conf

<Directory /usr/bin>

<IfVersion >= 2.4>

Require all granted

</IfVersion>

<IfVersion < 2.4>

Order allow,deny Allow from all

</IfVersion>

</Directory>

</VirtualHost>

# systemctl restart httpd

12.同步数据库

[root@linux-node1 ~]# su -s /bin/sh -c "nova-manage api_db sync" nova

注册cell0数据库

[root@linux-node1 ~]# su -s /bin/sh -c "nova-manage cell_v2 map_cell0" nova

13.创建cell1的cell

[root@linux-node1 ~]# su -s /bin/sh -c "nova-manage cell_v2 create_cell --name=cell1 --verbose" nova

14.同步nova数据库

[root@linux-node1 ~]# su -s /bin/sh -c "nova-manage db sync" nova

15.验证cell0和cell1的注册是否正确

[root@linux-node1 ~]# nova-manage cell_v2 list_cells

16.测试数据库同步情况

[root@linux-node1 ~]#mysql -h 192.168.43.11 -unova -pnova -e " use nova;show tables;" [root@linux-node1 ~]#mysql -h 192.168.43.11 -unova -pnova -e " use nova_api;show tables;"

17.启动Nova Service

# systemctl enable openstack-nova-api.service \

openstack-nova-consoleauth.service \

openstack-nova-scheduler.service \

openstack-nova-conductor.service \

openstack-nova-novncproxy.service

# systemctl start openstack-nova-api.service \

openstack-nova-consoleauth.service \

openstack-nova-scheduler.service openstack-nova-conductor.service \

openstack-nova-novncproxy.service

11.Nova服务注册

# source admin-openstack.sh # openstack service create --name nova --description "OpenStack Compute" compute

# openstack endpoint create --region RegionOne compute public http://192.168.43.11:8774/v2.1

# openstack endpoint create --region RegionOne compute internal http://192.168.43.11:8774/v2.1 # openstack endpoint create --region RegionOne compute admin http://192.168.43.11:8774/v2.1

# openstack service create --name placement --description "Placement API" placement

# openstack endpoint create --region RegionOne placement public http://192.168.43.11:8778

# openstack endpoint create --region RegionOne placement internal http://192.168.43.11:8778

# openstack endpoint create --region RegionOne placement admin http://192.168.43.11:8778

验证控制节点服务

[root@linux-node1 ~]# openstack host list

计算节点安装

[root@linux-node2 ~]# yum install -y centos-release-openstack-queens

[root@linux-node2 ~]# yum install -y openstack-nova-compute sysfsutils

[root@linux-node1 ~]# scp /etc/nova/nova.conf 192.168.43.12:/etc/nova/nova.conf

[root@linux-node2 ~]# chown root:nova /etc/nova/nova.conf

删掉多余配置:

[root@linux-node2 ~]# vim /etc/nova/nova.conf

connection=mysql+pymysql://nova:[email protected]/nova_api

connection=mysql+pymysql://nova:[email protected]/nova

2.修改VNC配置

计算节点需要监听所有IP,同时设置novncproxy的访问地址

[vnc]

enabled=true

server_listen = 0.0.0.0

server_proxyclient_address = 192.168.43.12

novncproxy_base_url = http://192.168.43.11:6080/vnc_auto.html

3.虚拟化适配

[root@linux-node2 ~]# egrep -c '(vmx|svm)' /proc/cpuinfo

[libvirt]

virt_type=qemu

如果返回的是非0的值,那么表示计算节点服务器支持硬件虚拟化,需要在nova.conf里面设置

[libvirt]

virt_type=kvm

启动nova-compute

# systemctl enable libvirtd.service openstack-nova-compute.service

# systemctl start libvirtd.service openstack-nova-compute.service

验证计算节点

[root@linux-node1 ~]# openstack host list

七、部署neutron服务

1.Neutron安装

[root@linux-node1 ~]# yum install -y openstack-neutron openstack-neutron-ml2 \

openstack-neutron-linuxbridge ebtables

2.Neutron数据库配置

[root@linux-node1 ~]# vim /etc/neutron/neutron.conf

[database]

connection = mysql+pymysql://neutron:[email protected]:3306/neutron

3.Keystone连接配置

[DEFAULT]

auth_strategy = keystone

[keystone_authtoken]

auth_uri = http://192.168.43.11:5000

auth_url = http://192.168.43.11:35357

memcached_servers = 192.168.43.11:11211

auth_type = password

project_domain_name = default

user_domain_name = default

project_name = service

username = neutron

password = neutron

4.RabbitMQ相关设置

[root@linux-node1 ~]# vim /etc/neutron/neutron.conf

[DEFAULT]

transport_url = rabbit://openstack:[email protected]

5.Neutron网络基础配置

[DEFAULT]

core_plugin = ml2

service_plugins =

6.网络拓扑变化Nova通知配置

[DEFAULT]

notify_nova_on_port_status_changes = True

notify_nova_on_port_data_changes = True

[nova]

auth_url = http://192.168.43.11:35357

auth_type = password

project_domain_name = default

user_domain_name = default

region_name = RegionOne

project_name = service

username = nova

password = nova

7.在 [oslo_concurrency] 部分,配置锁路径:

[oslo_concurrency]

lock_path = /var/lib/neutron/tmp

8.Neutron ML2配置

[root@linux-node1 ~]# vim /etc/neutron/plugins/ml2/ml2_conf.ini

[ml2]

type_drivers = flat,vlan,gre,vxlan,geneve #支持多选,所以把所有的驱动都选择上。 tenant_network_types = flat,vlan,gre,vxlan,geneve #支持多项,所以把所有的网络类型都选择上。 mechanism_drivers = linuxbridge,openvswitch,l2population #选择插件驱动,支持多选,开源的有linuxbridge和openvswitch

#启用端口安全扩展驱动

extension_drivers = port_security,qos

[ml2_type_flat] #设置网络提供

flat_networks = provider

[securitygroup] #启用ipset

enable_ipset = True

9.Neutron Linuxbridge配置

[root@linux-node1 ~]# vim /etc/neutron/plugins/ml2/linuxbridge_agent.ini

[linux_bridge]

physical_interface_mappings = provider:eth0

[vxlan] #禁止vxlan网络

enable_vxlan = False

[securitygroup]

firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver

enable_security_group = True

10.Neutron DHCP-Agent配置

[root@linux-node1 ~]# vim /etc/neutron/dhcp_agent.ini

[DEFAULT]

interface_driver = linuxbridge

dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq

enable_isolated_metadata = True

11.Neutron metadata配置

   

[root@linux-node1 ~]# vim /etc/neutron/metadata_agent.ini

[DEFAULT]

nova_metadata_host = 192.168.43.11

metadata_proxy_shared_secret = unixhot.com

12.Neutron相关配置在nova.conf

[root@linux-node1 ~]# vim /etc/nova/nova.conf

[neutron]

url = http://192.168.43.11:9696

auth_url = http://192.168.43.11:35357

auth_type = password

project_domain_name = default

user_domain_name = default

region_name = RegionOne

project_name = service

username = neutron

password = neutron

service_metadata_proxy = True

metadata_proxy_shared_secret = unixhot.com

[root@linux-node1 ~]# ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini

同步数据库

[root@linux-node1 ~]# su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf \

--config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron

13.重启计算API 服务

# systemctl restart openstack-nova-api.service

启动网络服务并配置他们开机自启动。

# systemctl enable neutron-server.service \

neutron-linuxbridge-agent.service neutron-dhcp-agent.service \

neutron-metadata-agent.service

# systemctl start neutron-server.service \

neutron-linuxbridge-agent.service neutron-dhcp-agent.service \

neutron-metadata-agent.service

14.Neutron服务注册

# openstack service create --name neutron --description "OpenStack Networking" network 创建endpoint

# openstack endpoint create --region RegionOne network public http://192.168.43.11:9696

# openstack endpoint create --region RegionOne network internal http://192.168.43.11:9696

# openstack endpoint create --region RegionOne network admin http://192.168.43.11:9696

15.测试Neutron安装

[root@linux-node1 ~]# openstack network agent list

Neutron计算节点部署

安装软件包

[root@linux-node2 ~]# yum install -y openstack-neutron openstack-neutron-linuxbridge ebtables

1.Keystone连接配置

[root@linux-node2 ~]# vim /etc/neutron/neutron.conf

[DEFAULT]

auth_strategy = keystone

[keystone_authtoken]

auth_uri = http://192.168.43.11:5000

auth_url = http://192.168.43.11:35357

memcached_servers = 192.168.43.11:11211

auth_type = password

project_domain_name = default

user_domain_name = default

project_name = service

username = neutron

password = neutron

2.RabbitMQ相关设置

[root@linux-node2 ~]# vim /etc/neutron/neutron.conf

[DEFAULT]

transport_url = rabbit://openstack:[email protected] #请注意是在DEFAULT配置栏目下,因为该配置文件有多个transport_url的配置

3.锁路径

[oslo_concurrency]

lock_path = /var/lib/neutron/tmp

4.配置LinuxBridge配置

[root@linux-node1 ~]# scp /etc/neutron/plugins/ml2/linuxbridge_agent.ini 192.168.43.12:/etc/neutron/plugins/ml2/

5.设置计算节点的nova.conf

[root@linux-node2 ~]# vim /etc/nova/nova.conf

[neutron]

url = http://192.168.43.11:9696

auth_url = http://192.168.43.11:35357

auth_type = password

project_domain_name = default

user_domain_name = default

region_name = RegionOne

project_name = service

username = neutron

password = neutron

重启计算服务

[root@linux-node2 ~]# systemctl restart openstack-nova-compute.service

启动计算节点linuxbridge-agent

[root@linux-node2 ~]# systemctl enable neutron-linuxbridge-agent.service

[root@linux-node2 ~]# systemctl start neutron-linuxbridge-agent.service

在控制节点上测试Neutron安装

[root@linux-node1 ~]# source admin-openstack.sh

[root@linux-node1 ~]# openstack network agent list

使用命令创建云主机;后者也可以通过web界面创建

1.创建网络

[root@linux-node1 ~]# openstack network create --share --external \

--provider-physical-network provider \

--provider-network-type flat provider

2.创建子网

[root@linux-node1 ~]# openstack subnet create --network provider \

--allocation-pool start=192.168.43.100,end=192.168.43.200 \

--dns-nameserver 223.5.5.5 --gateway 192.168.43.2 \

--subnet-range 192.168.43.0/24 provider-subnet

3. 创建云主机类型

[root@linux-node1 ~]# openstack flavor create --id 0 --vcpus 1 --ram 64 --disk 1 m1.nano

4.创建密钥对

[root@linux-node1 ~]# source demo-openstack.sh

[root@linux-node1 ~]# ssh-keygen -q -N ""

[root@linux-node1 ~]# openstack keypair create --public-key ~/.ssh/id_rsa.pub mykey [root@linux-node1 ~]# openstack keypair list

5.添加安全组规则

[root@linux-node1 ~]# openstack security group rule create --proto icmp default

[root@linux-node1 ~]# openstack security group rule create --proto tcp --dst-port 22 default

启动实例

[root@linux-node1 ~]# source demo-openstack.sh

[root@linux-node1 ~]# openstack flavor list

1.查看可用的镜像

[root@linux-node1 ~]# openstack image list

2.查看可用的网络

[root@linux-node1 ~]# openstack network list

3.查看可用的安全组

[root@linux-node1 ~]# openstack security group list

4.创建虚拟机

[root@linux-node1 ~]# openstack server create --flavor m1.nano --image cirros \

--nic net-id=5c4d0706-24cd-4d42-ba78-36a05b6c81c8 --security-group default \

--key-name mykey demo-instance #注意指定网络的时候需要使用ID,而不是名称

5.查看虚拟机

[root@linux-node1 ~]# openstack server list

[root@linux-node1 ~]# openstack console url show demo-instance

安装Horizon服务

1.安装Horizon

[root@linux-node2 ~]# yum install -y openstack-dashboard

2.Horizon配置

[root@linux-node2 ~]# vim /etc/openstack-dashboard/local_settings

OPENSTACK_HOST = "192.168.43.11"

#允许所有主机访问

ALLOWED_HOSTS = ['*', ]

#设置API版本

OPENSTACK_API_VERSIONS = {

"identity": 3,

"volume": 2,

"compute": 2,

}

开启多域支持

OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = True

设置默认的域

OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = 'Default'

#设置Keystone地址

OPENSTACK_HOST = "192.168.43.11"

OPENSTACK_KEYSTONE_URL = "http://%s:5000/v3" % OPENSTACK_HOST

#为通过仪表盘创建的用户配置默认的 user 角色

OPENSTACK_KEYSTONE_DEFAULT_ROLE = "user"

#设置Session存储到Memcached

SESSION_ENGINE = 'django.contrib.sessions.backends.cache'

CACHES = {

'default': {

'BACKEND':'django.core.cache.backends.memcached.MemcachedCache',

'LOCATION': '192.168.43.11:11211',

   }

}

#启用Web界面上修改密码

OPENSTACK_HYPERVISOR_FEATURES = {

'can_set_mount_point': True,

'can_set_password': True,

'requires_keypair': False,

}

#设置时区

TIME_ZONE = "Asia/Shanghai"

#禁用自服务网络的一些高级特性

OPENSTACK_NEUTRON_NETWORK = {

...

'enable_router': False,

'enable_quotas': False,

'enable_distributed_router': False,

'enable_ha_router': False,

'enable_lb': False,

'enable_firewall': False,

'enable_vpn': False,

'enable_fip_topology_check': False,

}

3.启动服务

[root@linux-node2 ~]# systemctl enable httpd.service

[root@linux-node2 ~]# systemctl restart httpd.service

猜你喜欢

转载自my.oschina.net/xiaoliangxiansen/blog/1819797