OpenStack学习安装记录

两台机器,搭建了:
KVM, NTP, MariaDB, RabbitMQ, Memcached, Etcd, Placement, Keystone, Glance, Nova, Neutron, Horizon, Cinder, Swift,能够跑起来。是用的VMware Workstation, 两台CentOS7的虚机,分别有两个网卡,Storage/Compute节点有四个虚拟硬盘。

// Two machines, everyone has two NICs, Compute Node has three HardDisk
// Edit /etc/hosts
    10.189.189.11 centos1
    10.189.189.12 centos2
// When install the HW server, use CentOS7, and make sure the NIC is eth0 and eth1
    vmlinuz initrd=initrd.img inst.stage2=hd:LABEL=CenOS\x207\x20x86_64 quiet net.ifnames=0 biosdevname=0
// Configure Controller Node eth0 has static IP 10.189.189.11, Compute Node eth0 has static IP 10.189.189.12
// Configure both eth1 don't have static IP (Sample file: /etc/sysconfig/network-scripts)
    HWADDR=00:0c:29:8a:ac:04    //Keep unchanged
    DEVICE=eth1
    TYPE=Ethernet
    BOOTPROTO=none
    UUID=45576831-a680-3207-acbb-2e0ff2d4b246   //Keep unchanged
    ONBOOT=yes

egrep -E '(vmx|svm)' /proc/cpuinfo
alias grep
yum install -y qemu-kvm libvirt
yum install -y virt-install
systemctl enable libvirtd
systemctl start libvirtd

yum install -y tigervnc-server
systemctl stop firewalld.service
systemctl disable firewalld.service
/usr/bin/vncserver

yum install centos-release-openstack-train
yum upgrade
yum install python-openstackclient
yum install openstack-selinux
yum install chrony

// Edit /etc/chrony.conf on controller node
    server time1.aliyun.com iburst

// Repeat install chrony on compute node
// Edit /etc/chrony.conf on compute node
    server 10.189.189.11 iburst
systemctl enable chronyd.service
systemctl start chronyd.service
yum install python-openstackclient
yum install mariadb mariadb-server python2-PyMySQL

// Create /etc/my.cnf.d/openstack.cnf
    [mysqld]
    bind-address = 10.189.189.11
    default-storage-engine = innodb
    innodb_file_per_table = on
    max_connections = 4096
    collation-server = utf8_general_ci
    character-set-server = utf8
systemctl enable mariadb.service
systemctl start mariadb.service
mysql_secure_installation

yum install rabbitmq-server
systemctl enable rabbitmq-server.service
systemctl start rabbitmq-server.service
rabbitmqctl add_user openstack openstack
rabbitmqctl set_permissions openstack ".*" ".*" ".*"
yum install memcached python-memcached

// Edit /etc/sysconfig/memcached
    OPTIONS="-l 127.0.0.1,::1,10.189.189.11"
systemctl enable memcached.service
systemctl start memcached.service
rabbitmq-plugins list
rabbitmq-plugins enable rabbitmq_management

yum install etcd

// Edit the /etc/etcd/etcd.conf
    #[Member]
    ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
    ETCD_LISTEN_PEER_URLS="http://10.189.189.11:2380"
    ETCD_LISTEN_CLIENT_URLS="http://10.189.189.11:2379"
    ETCD_NAME="centos1"
    #[Clustering]
    ETCD_INITIAL_ADVERTISE_PEER_URLS="http://10.189.189.11:2380"
    ETCD_ADVERTISE_CLIENT_URLS="http://10.189.189.11:2379"
    ETCD_INITIAL_CLUSTER="centos1=http://10.189.189.11:2380"
    ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster-01"
    ETCD_INITIAL_CLUSTER_STATE="new"
systemctl enable etcd.service
systemctl start etcd.service

mysql -uroot -proot
// MariaDB [(none)]>
CREATE DATABASE keystone;
GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'localhost' \
IDENTIFIED BY 'keystone';
GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'%' \
IDENTIFIED BY 'keystone';

yum install openstack-keystone httpd mod_wsgi

// Edit the /etc/keystone/keystone.conf
    [database]
    connection = mysql+pymysql://keystone:keystone@10.189.189.11/keystone
    [token]
    provider = fernet

su -s /bin/sh -c "keystone-manage db_sync" keystone
keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone
keystone-manage credential_setup --keystone-user keystone --keystone-group keystone

keystone-manage bootstrap --bootstrap-password admin \
--bootstrap-admin-url http://10.189.189.11:5000/v3/ \
--bootstrap-internal-url http://10.189.189.11:5000/v3/ \
--bootstrap-public-url http://10.189.189.11:5000/v3/ \
--bootstrap-region-id RegionOne

// Edit the /etc/httpd/conf/httpd.conf
    ServerName 10.189.189.11
ln -s /usr/share/keystone/wsgi-keystone.conf /etc/httpd/conf.d/
systemctl enable httpd.service
systemctl start httpd.service

openstack project create --domain default \
--description "Service Project" service
openstack project create --domain default \
--description "Demo Project" myproject
openstack user create --domain default \
--password-prompt myuser
openstack role create myrole
openstack role add --project myproject --user myuser myrole

unset OS_AUTH_URL OS_PASSWORD
openstack --os-auth-url http://10.189.189.11:5000/v3 \
--os-project-domain-name Default --os-user-domain-name Default \
--os-project-name admin --os-username admin token issue
openstack --os-auth-url http://10.189.189.11:5000/v3 \
--os-project-domain-name Default --os-user-domain-name Default \
--os-project-name myproject --os-username myuser token issue

// Edit /root/admin-open:
    export OS_PROJECT_DOMAIN_NAME=Default
    export OS_USER_DOMAIN_NAME=Default
    export OS_PROJECT_NAME=admin
    export OS_USERNAME=admin
    export OS_PASSWORD=admin
    export OS_AUTH_URL=http://10.189.189.11:5000/v3
    export OS_IDENTITY_API_VERSION=3
    export OS_IMAGE_API_VERSION=2

// Edit /root/demo-open:
    export OS_PROJECT_DOMAIN_NAME=Default
    export OS_USER_DOMAIN_NAME=Default
    export OS_PROJECT_NAME=myproject
    export OS_USERNAME=myuser
    export OS_PASSWORD=myuser
    export OS_AUTH_URL=http://10.189.189.11:5000/v3
    export OS_IDENTITY_API_VERSION=3
    export OS_IMAGE_API_VERSION=2

source admin-open
openstack token issue

mysql -u root -proot
// MariaDB [(none)]>
CREATE DATABASE glance;
GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'localhost' \
IDENTIFIED BY 'glance';
GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'%' \
IDENTIFIED BY 'glance';

openstack user create --domain default --password-prompt glance
openstack role add --project service --user glance admin
openstack service create --name glance \
--description "OpenStack Image" image
openstack endpoint create --region RegionOne \
image public http://10.189.189.11:9292
openstack endpoint create --region RegionOne \
image internal http://10.189.189.11:9292
openstack endpoint create --region RegionOne \
image admin http://10.189.189.11:9292

yum install openstack-glance

// Edit the /etc/glance/glance-api.conf
    [database]
    connection = mysql+pymysql://glance:glance@10.189.189.11/glance
    [keystone_authtoken]
    www_authenticate_uri  = http://10.189.189.11:5000
    auth_url = http://10.189.189.11:5000
    memcached_servers = 10.189.189.11:11211
    auth_type = password
    project_domain_name = Default
    user_domain_name = Default
    project_name = service
    username = glance
    password = glance
    [paste_deploy]
    flavor = keystone
    [glance_store]
    stores = file,http
    default_store = file
    filesystem_store_datadir = /var/lib/glance/images/
su -s /bin/sh -c "glance-manage db_sync" glance
systemctl enable openstack-glance-api.service
systemctl start openstack-glance-api.service
cd /tmp
wget http://download.cirros-cloud.net/0.4.0/cirros-0.4.0-x86_64-disk.img
glance image-create --name "cirros" \
--file /tmp/cirros-0.4.0-x86_64-disk.img \
--disk-format qcow2 --container-format bare \
--visibility public
glance image-list

mysql -u root -proot
// MariaDB [(none)]>
CREATE DATABASE placement;
GRANT ALL PRIVILEGES ON placement.* TO 'placement'@'localhost' \
IDENTIFIED BY 'placement';
GRANT ALL PRIVILEGES ON placement.* TO 'placement'@'%' \
IDENTIFIED BY 'placement';

openstack user create --domain default --password-prompt placement
openstack role add --project service --user placement admin
openstack service create --name placement \
--description "Placement API" placement
openstack endpoint create --region RegionOne \
placement public http://10.189.189.11:8778
openstack endpoint create --region RegionOne \
placement internal http://10.189.189.11:8778
openstack endpoint create --region RegionOne \
placement admin http://10.189.189.11:8778
yum install openstack-placement-api

// Edit the /etc/placement/placement.conf
    [placement_database]
    connection = mysql+pymysql://placement:placement@10.189.189.11/placement
    [api]
    auth_strategy = keystone
    [keystone_authtoken]
    auth_url = http://10.189.189.11:5000/v3
    memcached_servers = 10.189.189.11:11211
    auth_type = password
    project_domain_name = Default
    user_domain_name = Default
    project_name = service
    username = placement
    password = placement

su -s /bin/sh -c "placement-manage db sync" placement
systemctl restart httpd.service

mysql -u root -proot
// MariaDB [(none)]> 
CREATE DATABASE nova_api;
CREATE DATABASE nova;
CREATE DATABASE nova_cell0;
GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'localhost' \
IDENTIFIED BY 'nova';
GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'%' \
IDENTIFIED BY 'nova';
GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'localhost' \
IDENTIFIED BY 'nova';
GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%' \
IDENTIFIED BY 'nova';
GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'localhost' \
IDENTIFIED BY 'nova';
GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'%' \
IDENTIFIED BY 'nova';

source /root/admin-open
openstack user create --domain default --password-prompt nova
openstack role add --project service --user nova admin

openstack service create --name nova \
--description "OpenStack Compute" compute
openstack endpoint create --region RegionOne \
compute public http://10.189.189.11:8774/v2.1
openstack endpoint create --region RegionOne \
compute internal http://10.189.189.11:8774/v2.1
openstack endpoint create --region RegionOne \
compute admin http://10.189.189.11:8774/v2.1

yum install openstack-nova-api openstack-nova-conductor \
openstack-nova-novncproxy openstack-nova-scheduler

// Edit the /etc/nova/nova.conf
    [DEFAULT]
    my_ip = 10.189.189.11
    enabled_apis = osapi_compute,metadata
    transport_url = rabbit://openstack:openstack@10.189.189.11:5672/
    [api_database]
    connection = mysql+pymysql://nova:nova@10.189.189.11/nova_api
    use_neutron = true
    firewall_driver = nova.virt.firewall.NoopFirewallDriver
    [database]
    connection = mysql+pymysql://nova:nova@10.189.189.11/nova
    [api]
    auth_strategy = keystone
    [keystone_authtoken]
    www_authenticate_uri = http://10.189.189.11:5000/
    auth_url = http://10.189.189.11:5000/
    memcached_servers = 10.189.189.11:11211
    auth_type = password
    project_domain_name = Default
    user_domain_name = Default
    project_name = service
    username = nova
    password = nova
    [vnc]
    enabled = true
    server_listen = $my_ip
    server_proxyclient_address = $my_ip
    [glance]
    api_servers = http://10.189.189.11:9292
    [oslo_concurrency]
    lock_path = /var/lib/nova/tmp
    [placement]
    region_name = RegionOne
    project_domain_name = Default
    project_name = service
    auth_type = password
    user_domain_name = Default
    auth_url = http://10.189.189.11:5000/v3
    username = placement
    password = placement

su -s /bin/sh -c "nova-manage api_db sync" nova
su -s /bin/sh -c "nova-manage cell_v2 map_cell0" nova
su -s /bin/sh -c "nova-manage cell_v2 create_cell --name=cell1 --verbose" nova
su -s /bin/sh -c "nova-manage db sync" nova
su -s /bin/sh -c "nova-manage cell_v2 list_cells" nova
systemctl enable \
openstack-nova-api.service \
openstack-nova-scheduler.service \
openstack-nova-conductor.service \
openstack-nova-novncproxy.service
systemctl start \
openstack-nova-api.service \
openstack-nova-scheduler.service \
openstack-nova-conductor.service \
openstack-nova-novncproxy.service

// Switch to Compute node:
yum install openstack-nova-compute

// Edit the /etc/nova/nova.conf
    [DEFAULT]
    my_ip = 10.189.189.12
    enabled_apis = osapi_compute,metadata
    transport_url = rabbit://openstack:openstack@10.189.189.11
    use_neutron = true
    firewall_driver = nova.virt.firewall.NoopFirewallDriver
    [api]
    auth_strategy = keystone
    [keystone_authtoken]
    www_authenticate_uri = http://10.189.189.11:5000/
    auth_url = http://10.189.189.11:5000/
    memcached_servers = 10.189.189.11:11211
    auth_type = password
    project_domain_name = Default
    user_domain_name = Default
    project_name = service
    username = nova
    password = nova
    [vnc]
    enabled = true
    server_listen = 0.0.0.0
    server_proxyclient_address = $my_ip
    novncproxy_base_url = http://10.189.189.11:6080/vnc_auto.html
    [glance]
    api_servers = http://10.189.189.11:9292
    [oslo_concurrency]
    lock_path = /var/lib/nova/tmp
    [placement]
    region_name = RegionOne
    project_domain_name = Default
    project_name = service
    auth_type = password
    user_domain_name = Default
    auth_url = http://10.189.189.11:5000/v3
    username = placement
    password = placement
    [libvirt]
    virt_type = kvm

systemctl enable libvirtd.service openstack-nova-compute.service
systemctl start libvirtd.service openstack-nova-compute.service

Switch back to Controler node:
source /root/admin-open
openstack compute service list --service nova-compute
su -s /bin/sh -c "nova-manage cell_v2 discover_hosts --verbose" nova

// When add new compute nodes, run nova-manage cell_v2 discover_hosts on the controller node
// Alternatively, you can set an appropriate interval in /etc/nova/nova.conf
    [scheduler]
    discover_hosts_in_cells_interval = 300
openstack compute service list
openstack catalog list
openstack image list
nova-status upgrade check //Maybe error, we haven't install related API

mysql -uroot -proot
// MariaDB [(none)]>
CREATE DATABASE neutron;
GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'localhost' \
IDENTIFIED BY 'neutron';
GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'%' \
IDENTIFIED BY 'neutron';

openstack user create --domain default --password-prompt neutron
openstack role add --project service --user neutron admin
openstack service create --name neutron \
--description "OpenStack Networking" network
openstack endpoint create --region RegionOne \
network public http://10.189.189.11:9696
openstack endpoint create --region RegionOne \
network internal http://10.189.189.11:9696
openstack endpoint create --region RegionOne \
network admin http://10.189.189.11:9696

yum install openstack-neutron openstack-neutron-ml2 \
openstack-neutron-linuxbridge ebtables

// Edit the /etc/neutron/neutron.conf
    [database]
    connection = mysql+pymysql://neutron:neutron@10.189.189.11/neutron
    [DEFAULT]
    core_plugin = ml2
    service_plugins =
    transport_url = rabbit://openstack:openstack@10.189.189.11
    auth_strategy = keystone
    notify_nova_on_port_status_changes = true
    notify_nova_on_port_data_changes = true
    [keystone_authtoken]
    www_authenticate_uri = http://10.189.189.11:5000
    auth_url = http://10.189.189.11:5000
    memcached_servers = 10.189.189.11:11211
    auth_type = password
    project_domain_name = default
    user_domain_name = default
    project_name = service
    username = neutron
    password = neutron
    [nova]
    auth_url = http://10.189.189.11:5000
    auth_type = password
    project_domain_name = default
    user_domain_name = default
    region_name = RegionOne
    project_name = service
    username = nova
    password = nova
    [oslo_concurrency]
    lock_path = /var/lib/neutron/tmp

// Edit the /etc/neutron/plugins/ml2/ml2_conf.ini
    [ml2]
    type_drivers = flat,vlan
    tenant_network_types =
    mechanism_drivers = linuxbridge
    extension_drivers = port_security
    [ml2_type_flat]
    flat_networks = provider
    [securitygroup]
    enable_ipset = true

// Edit the /etc/neutron/plugins/ml2/linuxbridge_agent.ini 
    [linux_bridge]
    physical_interface_mappings = provider:eth1
    [vxlan]
    enable_vxlan = false
    [securitygroup]
    enable_security_group = true
    firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver

echo 'net.bridge.bridge-nf-call-iptables=1
net.bridge.bridge-nf-call-ip6tables=1' >> /etc/sysctl.conf
modprobe br_netfilter
sysctl -p
//    net.bridge.bridge-nf-call-iptables = 1
//    net.bridge.bridge-nf-call-ip6tables = 1


// Edit the /etc/neutron/dhcp_agent.ini
    [DEFAULT]
    interface_driver = linuxbridge
    dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
    enable_isolated_metadata = true

// Edit the /etc/neutron/metadata_agent.ini
    [DEFAULT]
    nova_metadata_host = 10.189.189.11
    metadata_proxy_shared_secret = lovechina

// Edit the /etc/nova/nova.conf
    [neutron]
    auth_url = http://10.189.189.11:5000
    auth_type = password
    project_domain_name = default
    user_domain_name = default
    region_name = RegionOne
    project_name = service
    username = neutron
    password = neutron
    service_metadata_proxy = true
    metadata_proxy_shared_secret = lovechina

ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini
su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf \
--config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron
systemctl restart openstack-nova-api.service
systemctl enable neutron-server.service \
neutron-linuxbridge-agent.service neutron-dhcp-agent.service \
neutron-metadata-agent.service
systemctl start neutron-server.service \
neutron-linuxbridge-agent.service neutron-dhcp-agent.service \
neutron-metadata-agent.service

// Switch to Compute node:
source /root/admin-open
yum install openstack-neutron-linuxbridge ebtables ipset

// Edit the /etc/neutron/neutron.conf
    [DEFAULT]
    transport_url = rabbit://openstack:openstack@10.189.189.11
    auth_strategy = keystone
    [keystone_authtoken]
    www_authenticate_uri = http://10.189.189.11:5000
    auth_url = http://10.189.189.11:5000
    memcached_servers = 10.189.189.11:11211
    auth_type = password
    project_domain_name = default
    user_domain_name = default
    project_name = service
    username = neutron
    password = neutron

// Edit the /etc/neutron/plugins/ml2/linuxbridge_agent.ini
    [linux_bridge]
    physical_interface_mappings = provider:eth1
    [vxlan]
    enable_vxlan = false
    [securitygroup]
    enable_security_group = true
    firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver

echo 'net.bridge.bridge-nf-call-iptables=1
net.bridge.bridge-nf-call-ip6tables=1' >> /etc/sysctl.conf
modprobe br_netfilter
sysctl -p
//    net.bridge.bridge-nf-call-iptables = 1
//    net.bridge.bridge-nf-call-ip6tables = 1

// Edit the /etc/nova/nova.conf
    [neutron]
    auth_url = http://10.189.189.11:5000
    auth_type = password
    project_domain_name = default
    user_domain_name = default
    region_name = RegionOne
    project_name = service
    username = neutron
    password = neutron

systemctl restart openstack-nova-compute.service
systemctl enable neutron-linuxbridge-agent.service
systemctl start neutron-linuxbridge-agent.service

// Switch to Controller node:
su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf \
--config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron
systemctl restart openstack-nova-api.service \
neutron-server.service \
neutron-linuxbridge-agent \
neutron-dhcp-agent \
neutron-metadata-agent

openstack extension list --network
openstack network agent list

// Install Horizon on Compute node:
yum install openstack-dashboard

// Edit the /etc/openstack-dashboard/local_settings
    OPENSTACK_HOST = "10.189.189.11"
    OPENSTACK_KEYSTONE_URL = "http://%s:5000/v3" % OPENSTACK_HOST
    OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = True
    OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = "Default"
    OPENSTACK_KEYSTONE_DEFAULT_ROLE = "user"
    ALLOWED_HOSTS = ['*']
    
    SESSION_ENGINE = 'django.contrib.sessions.backends.cache'
    
    CACHES = {
        'default': {
             'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
             'LOCATION': '10.189.189.11:11211',
        }
    }
    
    OPENSTACK_API_VERSIONS = {
        "identity": 3,
        "image": 2,
        "volume": 3,
    }
    
    OPENSTACK_NEUTRON_NETWORK = {
        'enable_router': False,
        'enable_quotas': False,
        'enable_distributed_router': False,
        'enable_ha_router': False,
        'enable_lb': False,
        'enable_firewall': False,
        'enable_vpn': False,
        'enable_fip_topology_check': False,
    }
    
    TIME_ZONE = "Asia/Shanghai"

cd /usr/share/openstack-dashboard
python manage.py make_web_conf --apache > /etc/httpd/conf.d/openstack-dashboard.conf

systemctl restart httpd.service

// Switch to Controller node to restart memcached
systemctl restart memcached.service

// Both nodes:
yum remove NetworkManager

neutron net-create --shared --provider:physical_network provider --provider:network_type flat WAN
neutron subnet-create --name subnet-wan --allocation-pool \
start=10.189.189.21,end=10.189.189.29 --dns-nameserver 202.96.209.133 \
--gateway 10.189.189.1 WAN 10.189.189.0/24
openstack network list
openstack subnet list
openstack flavor create --id 0 --vcpus 1 --ram 64 --disk 1 m1.nano
openstack flavor list

// Login into Dashboard to create Instance
// Swith to Compute node:
// Edit /etc/nova/nova.conf
    [DEFAULT]
    vif_plugging_is_fatal = false
    vif_plugging_timeout = 0
systemctl restart openstack-nova-compute.service 
 
mysql -uroot -proot
// MariaDB [(none)]>
CREATE DATABASE cinder;
GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'localhost' \
IDENTIFIED BY 'cinder';
GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'%' \
IDENTIFIED BY 'cinder';

source /root/admin-open
openstack user create --domain default --password-prompt cinder
openstack role add --project service --user cinder admin
openstack service create --name cinderv2 \
--description "OpenStack Block Storage" volumev2
openstack service create --name cinderv3 \
--description "OpenStack Block Storage" volumev3
openstack endpoint create --region RegionOne \
volumev2 public http://10.189.189.11:8776/v2/%\(project_id\)s
openstack endpoint create --region RegionOne \
volumev2 internal http://10.189.189.11:8776/v2/%\(project_id\)s
openstack endpoint create --region RegionOne \
volumev2 admin http://10.189.189.11:8776/v2/%\(project_id\)s
openstack endpoint create --region RegionOne \
volumev3 public http://10.189.189.11:8776/v3/%\(project_id\)s
openstack endpoint create --region RegionOne \
volumev3 internal http://10.189.189.11:8776/v3/%\(project_id\)s
openstack endpoint create --region RegionOne \
volumev3 admin http://10.189.189.11:8776/v3/%\(project_id\)s

yum install openstack-cinder

// Edit the /etc/cinder/cinder.conf
    [database]
    connection = mysql+pymysql://cinder:cinder@10.189.189.11/cinder
    [DEFAULT]
    my_ip = 10.189.189.11
    transport_url = rabbit://openstack:openstack@10.189.189.11
    auth_strategy = keystone
    [keystone_authtoken]
    www_authenticate_uri = http://10.189.189.11:5000
    auth_url = http://10.189.189.11:5000
    memcached_servers = 10.189.189.11:11211
    auth_type = password
    project_domain_name = default
    user_domain_name = default
    project_name = service
    username = cinder
    password = cinder
    [oslo_concurrency]
    lock_path = /var/lib/cinder/tmp

su -s /bin/sh -c "cinder-manage db sync" cinder

// On Compute node as Storage node:
// Edit the /etc/nova/nova.conf
    [cinder]
    os_region_name = RegionOne
    
// Back to Controller node as Cinder Storage Controller node:
systemctl restart openstack-nova-api.service

systemctl enable openstack-cinder-api.service openstack-cinder-scheduler.service
systemctl start openstack-cinder-api.service openstack-cinder-scheduler.service
cinder service-list

// Switch to Compute node:
yum install lvm2 device-mapper-persistent-data
systemctl enable lvm2-lvmetad.service
systemctl start lvm2-lvmetad.service

pvcreate /dev/sdb
vgcreate cinder-volumes /dev/sdb

// Edit /etc/lvm/lvm.conf
    devices {
    filter = [ "a/sdb/", "r/.*/"]

yum install openstack-cinder targetcli python-keystone

// Edit /etc/cinder/cinder.conf
    [database]
    connection = mysql+pymysql://cinder:cinder@10.189.189.11/cinder
    [DEFAULT]
    my_ip = 10.189.189.12
    transport_url = rabbit://openstack:openstack@10.189.189.11
    auth_strategy = keystone
    enabled_backends = lvm
    glance_api_servers = http://10.189.189.11:9292
    [keystone_authtoken]
    www_authenticate_uri = http://10.189.189.11:5000
    auth_url = http://10.189.189.11:5000
    memcached_servers = 10.189.189.11:11211
    auth_type = password
    project_domain_name = default
    user_domain_name = default
    project_name = service
    username = cinder
    password = cinder
    [lvm]
    volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver
    volume_group = cinder-volumes
    target_protocol = iscsi
    target_helper = lioadm
    [oslo_concurrency]
    lock_path = /var/lib/cinder/tmp

systemctl enable openstack-cinder-volume.service target.service
systemctl start openstack-cinder-volume.service target.service

source /root/admin-open
openstack volume service list

// Switch to Controller node:
openstack volume create --size 10 volume1
openstack volume list
openstack server add volume zgx1 volume1
openstack volume list

// If the swift object storage is configured, your can setup the backup:
yum install openstack-cinder

// Edit /etc/cinder/cinder.conf
    [DEFAULT]
    backup_driver = cinder.backup.drivers.swift.SwiftBackupDriver
    backup_swift_url = SWIFT_URL //Input SWIFT server URL
openstack catalog show object-store
systemctl enable openstack-cinder-backup.service
systemctl start openstack-cinder-backup.service

// Configure SWIFT Controller Node:
source /root/admin-open
openstack user create --domain default --password-prompt swift
openstack role add --project service --user swift admin
openstack service create --name swift \
--description "OpenStack Object Storage" object-store
openstack endpoint create --region RegionOne \
object-store public http://10.189.189.11:8080/v1/AUTH_%\(project_id\)s
openstack endpoint create --region RegionOne \
object-store internal http://10.189.189.11:8080/v1/AUTH_%\(project_id\)s
openstack endpoint create --region RegionOne \
object-store admin http://10.189.189.11:8080/v1

yum install openstack-swift-proxy python-swiftclient \
python-keystoneclient python-keystonemiddleware \
memcached

curl -o /etc/swift/proxy-server.conf https://opendev.org/openstack/swift/raw/branch/master/etc/proxy-server.conf-sample

// Edit the /etc/swift/proxy-server.conf
    [DEFAULT]
    bind_port = 8080
    user = swift
    swift_dir = /etc/swift
    [pipeline:main]
    pipeline = catch_errors gatekeeper healthcheck proxy-logging cache container_sync bulk ratelimit authtoken keystoneauth container-quotas account-quotas slo dlo versioned_writes proxy-logging proxy-server
    [app:proxy-server]
    use = egg:swift#proxy
    account_autocreate = True
    [filter:keystoneauth]
    use = egg:swift#keystoneauth
    operator_roles = admin,user
    [filter:authtoken]
    paste.filter_factory = keystonemiddleware.auth_token:filter_factory
    www_authenticate_uri = http://10.189.189.11:5000
    auth_url = http://10.189.189.11:5000
    memcached_servers = 10.189.189.11:11211
    auth_type = password
    project_domain_id = default
    user_domain_id = default
    project_name = service
    username = swift
    password = swift
    delay_auth_decision = True
    [filter:cache]
    use = egg:swift#memcache
    memcache_servers = 10.189.189.11:11211

// Configure SWIFT Storage Node:
yum install xfsprogs rsync

// Format the /dev/sdc and /dev/sdd devices as XFS:
mkfs.xfs /dev/sdc
mkfs.xfs /dev/sdd
mkdir -p /srv/node/sdc
mkdir -p /srv/node/sdd

// Edit /etc/fstab
    /dev/sdc /srv/node/sdc xfs noatime,nodiratime,logbufs=8 0 2
    /dev/sdd /srv/node/sdd xfs noatime,nodiratime,logbufs=8 0 2

mount /srv/node/sdc
mount /srv/node/sdd

// Edit /etc/rsyncd.conf 
    uid = swift
    gid = swift
    log file = /var/log/rsyncd.log
    pid file = /var/run/rsyncd.pid
    address = 10.189.189.12
    [account]
    max connections = 2
    path = /srv/node/
    read only = False
    lock file = /var/lock/account.lock
    [container]
    max connections = 2
    path = /srv/node/
    read only = False
    lock file = /var/lock/container.lock
    [object]
    max connections = 2
    path = /srv/node/
    read only = False
    lock file = /var/lock/object.lock

systemctl enable rsyncd.service
systemctl start rsyncd.service

// Still on Storage Node:
yum install openstack-swift-account openstack-swift-container \
openstack-swift-object
curl -o /etc/swift/account-server.conf https://opendev.org/openstack/swift/raw/branch/master/etc/account-server.conf-sample
curl -o /etc/swift/container-server.conf https://opendev.org/openstack/swift/raw/branch/master/etc/container-server.conf-sample
curl -o /etc/swift/object-server.conf https://opendev.org/openstack/swift/raw/branch/master/etc/object-server.conf-sample

// Edit the /etc/swift/account-server.conf
    [DEFAULT]
    bind_ip = 10.189.189.12
    bind_port = 6202
    user = swift
    swift_dir = /etc/swift
    devices = /srv/node
    mount_check = True
    [pipeline:main]
    pipeline = healthcheck recon account-server
    [filter:recon]
    use = egg:swift#recon
    recon_cache_path = /var/cache/swift

// Edit /etc/swift/container-server.conf
    [DEFAULT]
    bind_ip = 10.189.189.12
    bind_port = 6201
    user = swift
    swift_dir = /etc/swift
    devices = /srv/node
    mount_check = True
    [pipeline:main]
    pipeline = healthcheck recon container-server
    [filter:recon]
    use = egg:swift#recon
    recon_cache_path = /var/cache/swift

// Edit the /etc/swift/object-server.conf
    [DEFAULT]
    bind_ip = 10.189.189.12
    bind_port = 6200
    user = swift
    swift_dir = /etc/swift
    devices = /srv/node
    mount_check = True
    [pipeline:main]
    pipeline = healthcheck recon object-server
    [filter:recon]
    use = egg:swift#recon
    recon_cache_path = /var/cache/swift
    recon_lock_path = /var/lock

chown -R swift:swift /srv/node
mkdir -p /var/cache/swift
chown -R root:swift /var/cache/swift
chmod -R 775 /var/cache/swift

// Back to SWIFT Controller node:
cd /etc/swift
swift-ring-builder account.builder create 10 3 1
swift-ring-builder account.builder add \
--region 1 --zone 1 --ip 10.189.189.12 --port 6202 --device sdc --weight 100
swift-ring-builder account.builder add \
--region 1 --zone 1 --ip 10.189.189.12 --port 6202 --device sdd --weight 100

swift-ring-builder account.builder
swift-ring-builder account.builder rebalance

cd /etc/swift
swift-ring-builder container.builder create 10 3 1
swift-ring-builder container.builder add \
--region 1 --zone 1 --ip 10.189.189.12 --port 6201 --device sdc --weight 100
swift-ring-builder container.builder add \
--region 1 --zone 1 --ip 10.189.189.12 --port 6201 --device sdd --weight 100

swift-ring-builder container.builder
swift-ring-builder container.builder rebalance

cd /etc/swift
swift-ring-builder object.builder create 10 3 1
swift-ring-builder object.builder add \
--region 1 --zone 1 --ip 10.189.189.12 --port 6201 --device sdc --weight 100
swift-ring-builder object.builder add \
--region 1 --zone 1 --ip 10.189.189.12 --port 6201 --device sdd --weight 100

swift-ring-builder object.builder
swift-ring-builder object.builder rebalance

// Copy account.ring.gz, container.ring.gz, and object.ring.gz files to every storage node /etc/swift

// Obtain: /etc/swift/swift.conf
curl -o /etc/swift/swift.conf \
https://opendev.org/openstack/swift/raw/branch/master/etc/swift.conf-sample

// Edit the /etc/swift/swift.conf
    [swift-hash]
    swift_hash_path_suffix = HASH_PATH_SUFFIX
    swift_hash_path_prefix = HASH_PATH_PREFIX
    [storage-policy:0]
    name = Policy-0
    default = yes
    
// Copy the swift.conf file to the /etc/swift directory on each storage node and any additional nodes running the proxy service.

// On all nodes:
chown -R root:swift /etc/swift

systemctl enable openstack-swift-proxy.service memcached.service
systemctl start openstack-swift-proxy.service memcached.service

// On Storage Node:
systemctl enable openstack-swift-account.service openstack-swift-account-auditor.service \
openstack-swift-account-reaper.service openstack-swift-account-replicator.service
systemctl start openstack-swift-account.service openstack-swift-account-auditor.service \
openstack-swift-account-reaper.service openstack-swift-account-replicator.service
systemctl enable openstack-swift-container.service \
openstack-swift-container-auditor.service openstack-swift-container-replicator.service \
openstack-swift-container-updater.service
systemctl start openstack-swift-container.service \
openstack-swift-container-auditor.service openstack-swift-container-replicator.service \
openstack-swift-container-updater.service
systemctl enable openstack-swift-object.service openstack-swift-object-auditor.service \
openstack-swift-object-replicator.service openstack-swift-object-updater.service
systemctl start openstack-swift-object.service openstack-swift-object-auditor.service \
openstack-swift-object-replicator.service openstack-swift-object-updater.service

chcon -R system_u:object_r:swift_data_t:s0 /srv/node // In case for SELinux Issue

source /root/demo-open
swift stat
openstack container create container1
openstack object create container1 FILE // Replace FILE with the name of a local file to upload to the container1 container.
openstack object list container1
openstack object save container1 FILE
发布了9 篇原创文章 · 获赞 2 · 访问量 473

猜你喜欢

转载自blog.csdn.net/Cloud_View/article/details/104452526