91超碰碰碰碰久久久久久综合_超碰av人澡人澡人澡人澡人掠_国产黄大片在线观看画质优化_txt小说免费全本

溫馨提示×

溫馨提示×

您好,登錄后才能下訂單哦!

密碼登錄×
登錄注冊×
其他方式登錄
點擊 登錄注冊 即表示同意《億速云用戶服務條款》

Centos7.3 Openstack-liberty安裝部署記錄

發布時間:2020-08-18 19:33:59 來源:網絡 閱讀:11461 作者:koumm 欄目:建站服務器

一、環境

1.1 安全

本指南會告訴你如何使用 Red Hat Enterprise Linux 7和其衍生的EPEL倉庫安裝OpenStack。    
說明:目前統一采用Centos7.3版本進行Openstack-liberty版本的安裝。測試實驗了KVM環境下的創建安裝使用虛擬機。

1.2 主機網絡

1. 控制節點/計算節點 關閉防火墻和SELinux

systemctl stop iptables    
systemctl stop firewalld  
systemctl disable firewalld    
setenforce 0    
sed -i 's#SELINUX=enforcing#SELINUX=disabled#g' /etc/sysconfig/selinux

yum install vim net-tools

 

2. 配置hosts文件

控制節點/計算節點配置hosts    
echo "192.168.0.231 controller" >> /etc/hosts    
echo "192.168.0.232 compute1" >> /etc/hosts

 

1.3 設置時間同步

1) 控制節點

# yum install -y chrony    
# vim /etc/chrony.conf    
allow 192.168/16 #允許那些服務器和自己同步時間

# systemctl enable chronyd.service           #開機啟動    
# systemctl start chronyd.service    
# timedatectl set-timezone Asia/Shanghai     #設置時區    
# timedatectl status

 

2)計算節點

# yum install -y chrony    
# vim /etc/chrony.conf    
server 192.168.1.17 iburst #只留一行

# systemctl enable chronyd.service    
# systemctl start chronyd.service    
# timedatectl set-timezone Asia/Shanghai    
# chronyc sources

 

1.4 安裝Openstack軟件包

1. 準備OpenStack安裝包 yum源

# vi CentOS-OpenStack-liberty.repo

[centos-openstack-liberty]    
name=CentOS-7 - OpenStack liberty    
baseurl=http://mirrors.aliyun.com/centos/7/cloud/$basearch/openstack-liberty/    
gpgcheck=0    
enabled=1    
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-Centos-7

[centos-openstack-liberty-test]    
name=CentOS-7 - OpenStack liberty Testing    
baseurl=http://buildlogs.centos.org/centos/7/cloud/$basearch/openstack-liberty/    
gpgcheck=0    
enabled=0


# 或采用 CentOS7安裝OpenStack提供的epel源

# yum install -y centos-release-openstack-liberty

 

2. 安裝openstack
1) 控制節點安裝 openstack

#Base    
yum install -y http://dl.fedoraproject.org/pub/epel/7/x86_64/e/epel-release-7-8.noarch.rpm    
yum install -y centos-release-openstack-liberty    
yum install -y python-openstackclient

##MySQL    
yum install -y mariadb mariadb-server MySQL-python

##RabbitMQ    
yum install -y rabbitmq-server

##Keystone    
yum install -y openstack-keystone httpd mod_wsgi memcached python-memcached

##Glance    
yum install -y openstack-glance python-glance python-glanceclient

##Nova    
yum install -y openstack-nova-api openstack-nova-cert openstack-nova-conductor openstack-nova-console openstack-nova-novncproxy openstack-nova-scheduler python-novaclient

##Neutron linux-node1.example.com    
yum install -y openstack-neutron openstack-neutron-ml2 openstack-neutron-linuxbridge python-neutronclient ebtables ipset

##Dashboard    
yum install -y openstack-dashboard

##Cinder    
yum install -y openstack-cinder python-cinderclient

 

2)計算節點安裝 openstack

##Base    
yum install -y http://dl.fedoraproject.org/pub/epel/7/x86_64/e/epel-release-7-8.noarch.rpm    
yum install centos-release-openstack-liberty    
yum install python-openstackclient

##Nova    
yum install -y openstack-nova-compute sysfsutils

##Neutron    
yum install -y openstack-neutron openstack-neutron-linuxbridge ebtables ipset

##Cinder    
yum install -y openstack-cinder python-cinderclient targetcli python-oslo-policy

 

1.5 安裝SQL數據庫

1. 安裝數據庫

[root@controller ~]# yum install mariadb mariadb-server MySQL-python    
[root@controller ~]# vi /etc/my.cnf.d/mariadb_openstack.cnf

[mysqld]    
bind-address = 192.168.0.231    
default-storage-engine = innodb    
innodb_file_per_table    
collation-server = utf8_general_ci    
init-connect = 'SET NAMES utf8'    
character-set-server = utf8    
max_connections=1000

[root@controller ~]# systemctl enable mariadb.service    
[root@controller ~]# systemctl start mariadb.service

2. 創建密碼: openstack

[root@controller ~]# mysql_secure_installation

3. 調整數據庫最大連接數:

[root@controller ~]# vi /usr/lib/systemd/system/mariadb.service

[Service]新添加兩行如下參數:    
LimitNOFILE=10000    
LimitNPROC=10000

systemctl --system daemon-reload    
systemctl restart mariadb.service

mysql -uroot -popenstack    
SQL> show variables like 'max_connections';

 

4. 創建數據庫

# mysql -u root -p

CREATE DATABASE keystone;    
GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'localhost' IDENTIFIED BY 'openstack';    
GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'%' IDENTIFIED BY 'openstack';

CREATE DATABASE glance;    
GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'localhost' IDENTIFIED BY 'openstack';    
GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'%' IDENTIFIED BY 'openstack';

CREATE DATABASE nova;    
GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'localhost' IDENTIFIED BY 'openstack';    
GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%' IDENTIFIED BY 'openstack';

CREATE DATABASE neutron;    
GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'localhost' IDENTIFIED BY 'openstack';    
GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'%' IDENTIFIED BY 'openstack';

CREATE DATABASE cinder;    
GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'localhost' IDENTIFIED BY 'cinder';    
GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'%' IDENTIFIED BY 'openstack';

FLUSH PRIVILEGES;    
SHOW DATABASES;

 

1.6 消息隊列 rabbitmq支持集群。

1)在控制節點上安裝與啟動rabbitmq, 監聽5672端口

yum install rabbitmq-server

systemctl enable rabbitmq-server.service    
systemctl start rabbitmq-server.service

# netstat -tunlp  | grep 5672    
tcp        0      0 0.0.0.0:15672           0.0.0.0:*               LISTEN      1694/beam.smp      
tcp        0      0 0.0.0.0:25672           0.0.0.0:*               LISTEN      1694/beam.smp      
tcp6       0      0 :::5672                 :::*                    LISTEN      1694/beam.smp

 

2) 添加 openstack 用戶:

# rabbitmqctl add_user openstack openstack

3) 授權用戶 openstack 讀、寫、配置的權限

# rabbitmqctl set_permissions openstack ".*" ".*" ".*"

4) 查看安裝插件

# rabbitmq-plugins list

5) 啟用管理插件

# rabbitmq-plugins enable rabbitmq_management

The following plugins have been enabled:    
  mochiweb    
  webmachine    
  rabbitmq_web_dispatch    
  amqp_client    
  rabbitmq_management_agent    
  rabbitmq_management    
Applying plugin configuration to rabbit@controller... started 6 plugins.

# rabbitmq-plugins list

#會啟用如下服務:    
Configured: E = explicitly enabled; e = implicitly enabled    
| Status:   * = running on rabbit@controller    
|/    
[e*] amqp_client                       3.6.5    
[e*] mochiweb                          2.13.1    
[E*] rabbitmq_management               3.6.5    
[e*] rabbitmq_management_agent         3.6.5    
[e*] rabbitmq_web_dispatch             3.6.5    
[e*] webmachine                        1.10.3

 

6)重啟

# systemctl start rabbitmq-server.service

會啟動15672端口的web界面

 

7) 登錄以及添加openstack為administrator

http://192.168.0.231:15672

guest/guest 默認密碼,也為管理員    
Centos7.3 Openstack-liberty安裝部署記錄

注:配置openstack/openstack為tags為administrator

 

二、 配置keystone

keystone 安裝在 controller 節點,為了提高服務性能,使用 apache 提供WEB請求,由 memcached 來保存 Token 信息

2.1 安裝keystone軟件包

# yum install openstack-keystone httpd mod_wsgi memcached python-memcached

 

2.2 配置 keystone

注意:不同版本號的keystone,其默認配置可能會有所不同

openssl rand -hex 10

c885b63d0ce5760ff23e

隨機一個值。改成admin_token值

cat /etc/keystone/keystone.conf |grep -v "^#" | grep -v "^$"

[DEFAULT]    
admin_token = c885b63d0ce5760ff23e

[database]    
connection = mysql://keystone:openstack@192.168.0.231/keystone

[memcache]    
servers = 192.168.0.231:11211

[revoke]    
driver = sql

[token]    
provider = uuid    
driver = memcache

 

2.3 初始化數據庫

1)初始化數據庫

# chown -R keystone:keystone /var/log/keystone    
# su -s /bin/sh -c "keystone-manage db_sync" keystone

##############################################################################    
會在/var/log/keystone/ 下生成一個keystone.log日志,keystone在啟動時會寫該文件。    
##############################################################################

2)驗證數據庫

# mysql -h 192.168.0.231 -ukeystone -popenstack -e "use keystone;show tables;"

3)啟動memcache

systemctl enable memcached.service    
systemctl start memcached.service

# netstat -tunlp | grep 11211    
tcp        0      0 127.0.0.1:11211         0.0.0.0:*               LISTEN      3288/memcached     
tcp6       0      0 ::1:11211               :::*                    LISTEN      3288/memcached     
udp        0      0 127.0.0.1:11211         0.0.0.0:*                           3288/memcached     
udp6       0      0 ::1:11211               :::*                                3288/memcached

 

2.4 配置HTTP服務器

1) 修改服務器名

# vi /etc/httpd/conf/httpd.conf    
ServerName 192.168.0.231:80

 

2) 添加 keystone 的服務

# vi /etc/httpd/conf.d/wsgi-keystone.conf

Listen 5000    
Listen 35357

<VirtualHost *:5000>    
    WSGIDaemonProcess keystone-public processes=5 threads=1 user=keystone group=keystone display-name=%{GROUP}    
    WSGIProcessGroup keystone-public    
    WSGIScriptAlias / /usr/bin/keystone-wsgi-public    
    WSGIApplicationGroup %{GLOBAL}    
    WSGIPassAuthorization On    
    <IfVersion >= 2.4>    
      ErrorLogFormat "%{cu}t %M"    
    </IfVersion>    
    ErrorLog /var/log/httpd/keystone-error.log    
    CustomLog /var/log/httpd/keystone-access.log combined

    <Directory /usr/bin>    
        <IfVersion >= 2.4>    
            Require all granted    
        </IfVersion>    
        <IfVersion < 2.4>    
            Order allow,deny    
            Allow from all    
        </IfVersion>    
    </Directory>    
</VirtualHost>

<VirtualHost *:35357>    
    WSGIDaemonProcess keystone-admin processes=5 threads=1 user=keystone group=keystone display-name=%{GROUP}    
    WSGIProcessGroup keystone-admin    
    WSGIScriptAlias / /usr/bin/keystone-wsgi-admin    
    WSGIApplicationGroup %{GLOBAL}    
    WSGIPassAuthorization On    
    <IfVersion >= 2.4>    
      ErrorLogFormat "%{cu}t %M"    
    </IfVersion>    
    ErrorLog /var/log/httpd/keystone-error.log    
    CustomLog /var/log/httpd/keystone-access.log combined

    <Directory /usr/bin>    
        <IfVersion >= 2.4>    
            Require all granted    
        </IfVersion>    
        <IfVersion < 2.4>    
            Order allow,deny    
            Allow from all    
        </IfVersion>    
    </Directory>    
</VirtualHost>

 

3) 啟動HTTP服務器

systemctl enable httpd.service    
systemctl start httpd.service

驗證:

[root@controller ~]# ss -ntl | grep -E "5000|35357"    
LISTEN     0      128          *:35357                    *:*                 
LISTEN     0      128          *:5000                     *:*                 

 

2.5 注冊服務實體及API

2.5.1 配置環境變量

[root@controller ~]#

export OS_TOKEN=c885b63d0ce5760ff23e    
export OS_URL=http://192.168.0.231:35357/v3    
export OS_IDENTITY_API_VERSION=3

 

2.5.2 keystone服務注冊

[root@controller ~]# openstack service create --name keystone --description "OpenStack Identity" identity

2.5.3 keystone API注冊admin 管理的、public公共的、internal內部的

[root@controller ~]# openstack endpoint create --region RegionOne identity public http://192.168.0.231:5000/v2.0    
[root@controller ~]# openstack endpoint create --region RegionOne identity internal http://192.168.0.231:5000/v2.0    
[root@controller ~]# openstack endpoint create --region RegionOne identity admin http://192.168.0.231:35357/v2.0

[root@controller ~]# openstack endpoint list

+----------------------------------+-----------+--------------+--------------+---------+-----------+---------------------------------+    
| ID                               | Region    | Service Name | Service Type | Enabled | Interface | URL                             |    
+----------------------------------+-----------+--------------+--------------+---------+-----------+---------------------------------+    
| 05a5e9b559664d848b45d353d12594c1 | RegionOne | keystone     | identity     | True    | admin     | http://192.168.0.231:35357/v2.0 |    
| 9a240664c4dc438aa8b9f892c668cb27 | RegionOne | keystone     | identity     | True    | internal  | http://192.168.0.231:5000/v2.0  |    
| e63642b80e4f45b69866825e9e1b9837 | RegionOne | keystone     | identity     | True    | public    | http://192.168.0.231:5000/v2.0  |    
+----------------------------------+-----------+--------------+--------------+---------+-----------+---------------------------------+

2.6 創建項目、用戶及規則

2.6.1 創建 admin 項目\admin 用戶\ admin 角色并把 admin 項目、admin 角色、admin 用戶關聯起來

[root@controller ~]# openstack project create --domain default --description "Admin Project" admin    
[root@controller ~]# openstack user create --domain default --password=openstack admin    
[root@controller ~]# openstack role create admin    
[root@controller ~]# openstack role add --project admin --user admin admin

[root@controller ~]# openstack user list    
+----------------------------------+-------+    
| ID                               | Name  |    
+----------------------------------+-------+    
| e2bae88d31b54e4ab1a4cb2251da8a6a | admin |    
+----------------------------------+-------+

2.6.2 創建 demo 項目\創建 demo 用戶\創建 user 角色\ 并把 demo 項目、user 角色、demo 用戶關聯起來

[root@controller ~]# openstack project create --domain default --description "Demo Project" demo    
[root@controller ~]# openstack user create --domain default --password=openstack demo    
[root@controller ~]# openstack role create user    
[root@controller ~]# openstack role add --project demo --user demo user

[root@controller ~]# openstack user list    
+----------------------------------+-------+    
| ID                               | Name  |    
+----------------------------------+-------+    
| 4151e2b9b78842d282250d4cfb31ebba | demo  |    
| 508b377f6f3a478f80a5a019e2c5b10a | admin |    
+----------------------------------+-------+

2.6.3 創建 service 項目

[root@controller ~]# openstack project create --domain default --description "Service Project" service

查看項目:    
[root@controller ~]# openstack project list    
+----------------------------------+---------+    
| ID                               | Name    |    
+----------------------------------+---------+    
| 184655bf46de4c3fbc0f8f13d1d9bfb8 | service |    
| 3bfa1c4208d7482a8f21709d458f924e | demo    |    
| 77f86bae2d344a658f26f71d03933c45 | admin   |    
+----------------------------------+---------+

[root@linux-node1 ~]# openstack endpoint delete ID    

 

2.7 密碼驗證keystone正確性

為了驗證,臨時改環境變量, 要使用用戶名密碼驗證,不需要token驗證,要去掉環境變量。

[root@controller ~]# unset OS_TOKEN OS_URL

為 admin 用戶請求 token

[root@controller ~]# openstack --os-auth-url http://192.168.0.231:35357/v3 \    
   --os-project-domain-id default --os-user-domain-id default \    
   --os-project-name admin --os-username admin --os-auth-type password token issue    
Password:    
+------------+----------------------------------+    
| Field      | Value                            |    
+------------+----------------------------------+    
| expires    | 2017-05-10T03:12:15.764769Z      |    
| id         | b28410f9c6314cd8aebeca0beb478bf9 |    
| project_id | 79d295e81e5a4255a02a8ea26ae4606a |    
| user_id    | 4015e1151aee4ab7811f320378ce6031 |    
+------------+----------------------------------+

為 domo 用戶請求 token    
[root@controller ~]# openstack --os-auth-url http://192.168.0.231:5000/v3 \    
   --os-project-domain-name default --os-user-domain-name default \    
   --os-project-name demo --os-username demo token issue    
Password:    
+------------+----------------------------------+    
| Field      | Value                            |    
+------------+----------------------------------+    
| expires    | 2017-05-10T03:12:59.252178Z      |    
| id         | 110b9597c5fd49ac9ac3c1957648ede7 |    
| project_id | ce0af495eb844e199db649d7f7baccb4 |    
| user_id    | afd908684eee42aaa7d73e22671eee24 |    
+------------+----------------------------------+

 

2.8 使用環境變量腳本

1.創建 admin 用戶的環境腳本

[root@controller ~]# vim admin-openrc.sh    
export OS_PROJECT_DOMAIN_ID=default    
export OS_USER_DOMAIN_ID=default    
export OS_PROJECT_NAME=admin    
export OS_TENANT_NAME=admin    
export OS_USERNAME=admin    
export OS_PASSWORD=openstack    
export OS_AUTH_URL=http://192.168.0.231:35357/v3    
export OS_IDENTITY_API_VERSION=3

 

2.創建 demo 用戶的環境腳本

[root@controller ~]# vim demo-openrc.sh    
export OS_PROJECT_DOMAIN_ID=default    
export OS_USER_DOMAIN_ID=default    
export OS_PROJECT_NAME=demo    
export OS_TENANT_NAME=demo    
export OS_USERNAME=demo    
export OS_PASSWORD=openstack    
export OS_AUTH_URL=http://192.168.0.231:5000/v3    
export OS_IDENTITY_API_VERSION=3

 

3. 使用腳本測試    
[root@controller ~]# source admin-openrc.sh    
[root@controller ~]# openstack token issue    
+------------+----------------------------------+    
| Field      | Value                            |    
+------------+----------------------------------+    
| expires    | 2017-05-10T03:19:08.928697Z      |    
| id         | df25646c15cb433ab7251dcd0308ecbf |    
| project_id | 79d295e81e5a4255a02a8ea26ae4606a |    
| user_id    | 4015e1151aee4ab7811f320378ce6031 |    
+------------+----------------------------------+

 

三、安裝鏡像服務(glance)

glance為用戶提供虛擬機鏡像的發現、注冊和取回服務。默認把鏡像存放在 /var/lib/glance/p_w_picpaths/ 目錄下

3.1 環境準備

使用 admin 權限

[root@controller ~]# source admin-openrc.sh

1) 創建 glance 用戶\把 service 項目、admin角色、glance 用戶關聯起來

[root@controller ~]# openstack user create --domain default --password=openstack glance    
[root@controller ~]# openstack role add --project service --user glance admin

2) 注冊名為p_w_picpath的服務

[root@controller ~]# openstack service create --name glance --description "OpenStack Image service" p_w_picpath

3) 注冊API端點

[root@controller ~]# openstack endpoint create --region RegionOne  p_w_picpath public http://192.168.0.231:9292    
[root@controller ~]# openstack endpoint create --region RegionOne  p_w_picpath internal http://192.168.0.231:9292    
[root@controller ~]# openstack endpoint create --region RegionOne  p_w_picpath admin http://192.168.0.231:9292

 

3.2 安裝與配置

3.2.1 安裝程序包

[root@controller ~]# yum install openstack-glance python-glance python-glanceclient

 

3.2.2 修改glance配置

1. 配置鏡像的創建刪除回收服務

cat /etc/glance/glance-api.conf |grep -v "^#" | grep -v "^$"

[DEFAULT]    
verbose=True    
notification_driver = noop

[database]    
connection = mysql://glance:openstack@192.168.0.231/glance

[glance_store]    
default_store=file    
filesystem_store_datadir=/var/lib/glance/p_w_picpaths/

[keystone_authtoken]    
auth_uri=http://192.168.0.231:5000    
auth_url=http://192.168.0.231:35357    
auth_plugin=password    
project_domain_id=default    
user_domain_id=default    
project_name=service    
username=glance    
password=openstack

[paste_deploy]    
flavor=keystone

 

2. 配置云系統的鏡像注冊服務

cat /etc/glance/glance-registry.conf |grep -v "^#" | grep -v "^$"

[DEFAULT]    
verbose=True    
notification_driver = noop

[database]    
connection = mysql://glance:openstack@192.168.0.231/glance

[keystone_authtoken]    
auth_uri=http://192.168.0.231:5000    
auth_url=http://192.168.0.231:35357    
auth_plugin=password    
project_domain_id=default    
user_domain_id=default    
project_name=service    
username=glance    
password=openstack

[paste_deploy]    
flavor=keystone

 

3.2.3 導入數據

1)初始化數據庫

[root@controller ~]# su -s /bin/sh -c "glance-manage db_sync" glance

No handlers could be found for logger "oslo_config.cfg"    
/usr/lib64/python2.7/site-packages/sqlalchemy/engine/default.py:450: Warning: Duplicate index `ix_p_w_picpath_properties_p_w_picpath_id_name`. This is deprecated and will be disallowed in a future release.    
  cursor.execute(statement, parameters)

這個錯誤可以忽略, 可以用mysql測試數據庫登錄與創建是否成功。

 

2)驗證數據庫

# mysql -h 192.168.0.231 -uglance -popenstack -e "use glance;show tables;"

 

3.3 啟動openstack-glance服務

# systemctl enable openstack-glance-api.service openstack-glance-registry.service    
# systemctl start openstack-glance-api.service openstack-glance-registry.service

驗證:

[root@controller ~]# ss -ntl | grep -E "9191|9292"    
LISTEN     0      128          *:9292                     *:*                 
LISTEN     0      128          *:9191                     *:*          

 

3.4  glance安裝驗證

我們使用一個非常小的系統鏡像來驗證 glance 是否成功部署

1. 修改環境變量腳本

[root@controller ~]# echo "export OS_IMAGE_API_VERSION=2" | tee -a admin-openrc.sh demo-openrc.sh    
[root@controller ~]# source admin-openrc.sh    
[root@controller ~]# wget http://cloud.centos.org/centos/7/p_w_picpaths/CentOS-7-x86_64-GenericCloud.qcow2

 

2. 上傳鏡像給 glance

[root@controller ~]# glance p_w_picpath-create --name "CentOS-7-x86_64" --file CentOS-7-x86_64-GenericCloud.qcow2 --disk-format qcow2 --container-format bare \    
                     --visibility public --progress

[=============================>] 100%    
+------------------+--------------------------------------+    
| Property         | Value                                |    
+------------------+--------------------------------------+    
| checksum         | 212b6a881800cad892347073f0de2117     |    
| container_format | bare                                 |    
| created_at       | 2017-05-22T10:13:24Z                 |    
| disk_format      | qcow2                                |    
| id               | e7e2316a-f585-488e-9fd9-85ce75b098d4 |    
| min_disk         | 0                                    |    
| min_ram          | 0                                    |    
| name             | CentOS-7-x86_64                      |    
| owner            | be420231d13848809da36178cbac4d22     |    
| protected        | False                                |    
| size             | 741539840                            |    
| status           | active                               |    
| tags             | []                                   |    
| updated_at       | 2017-05-22T10:13:31Z                 |    
| virtual_size     | None                                 |    
| visibility       | public                               |    
+------------------+--------------------------------------+

 

3. 查看已上傳的鏡像

[root@controller ~]# glance p_w_picpath-list    
+--------------------------------------+-----------------+    
| ID                                   | Name            |    
+--------------------------------------+-----------------+    
| 2ac90c0c-b923-43ff-8f99-294195a64ced | CentOS-7-x86_64 |    
+--------------------------------------+-----------------+

查看磁盤上的文件:    
[root@controller ~]# ll /var/lib/glance/p_w_picpaths/    
總用量 12980    
-rw-r-----. 1 glance glance 1569390592 Aug 26 12:50 2ac90c0c-b923-43ff-8f99-294195a64ced

 

四、安裝計算服務(nova)

4.1 安裝及配置控制節點(controller)

這一部分講述的是 nova 在控制節點(compute)上的部署

4.2 控制節點創建 nova 用戶才及注冊

1. 使用 admin 用戶權限

[root@controller ~]#  source admin-openrc.sh

2. 創建 nova 用戶\角色\服務

[root@controller ~]# openstack user create --domain default --password=openstack nova    
[root@controller ~]# openstack role add --project service --user nova admin    
[root@controller ~]# openstack service create --name nova --description "OpenStack Compute" compute

3. 注冊API

[root@controller ~]# openstack endpoint create --region RegionOne compute public http://192.168.0.231:8774/v2/%\(tenant_id\)s    
[root@controller ~]# openstack endpoint create --region RegionOne compute internal http://192.168.0.231:8774/v2/%\(tenant_id\)s    
[root@controller ~]# openstack endpoint create --region RegionOne  compute admin http://192.168.0.231:8774/v2/%\(tenant_id\)s

 

4.3 控制節點安裝以及配置組件

4.3.1 安裝組件

# yum install openstack-nova-api openstack-nova-cert \    
                     openstack-nova-conductor openstack-nova-console \    
                     openstack-nova-novncproxy openstack-nova-scheduler \    
                     python-novaclient

 

4.3.2 修改nova配置

cat /etc/nova/nova.conf|grep -v "^#" | grep -v "^$"

[DEFAULT]    
my_ip=192.168.0.231    
enabled_apis=osapi_compute,metadata    
auth_strategy=keystone    
allow_resize_to_same_host=True    
network_api_class=nova.network.neutronv2.api.API    
linuxnet_interface_driver=nova.network.linux_net.NeutronLinuxBridgeInterfaceDriver    
security_group_api=neutron    
scheduler_default_filters=RetryFilter,AvailabilityZoneFilter,RamFilter,DiskFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter    
firewall_driver=nova.virt.firewall.NoopFirewallDriver    
verbose=true    
rpc_backend=rabbit

[database]    
connection=mysql://nova:openstack@192.168.0.231/nova

[glance]    
host=192.168.0.231

[keystone_authtoken]    
auth_uri=http://192.168.0.231:5000    
auth_url=http://192.168.0.231:35357    
auth_plugin=password    
project_domain_id=default    
user_domain_id=default    
project_name=service    
username=nova    
password=openstack

[libvirt]    
virt_type=kvm

[neutron]    
url=http://192.168.0.231:9696    
auth_url=http://192.168.0.231:35357    
auth_plugin=password    
project_domain_id=default    
user_domain_id=default    
region_name=RegionOne    
project_name=service    
username=neutron    
password=openstack    
service_metadata_proxy=true    
metadata_proxy_shared_secret=METADATA_SECRET    
lock_path=/var/lib/nova/tmp

[oslo_concurrency]    
lock_path=/var/lib/nova/tmp

[oslo_messaging_rabbit]    
rabbit_host=192.168.0.231    
rabbit_port=5672    
rabbit_userid=openstack    
rabbit_password=openstack

[vnc]    
vncserver_listen=$my_ip    
vncserver_proxyclient_address=$my_ip

 

4.3.3 數據導入

[root@controller ~]# su -s /bin/sh -c "nova-manage db sync" nova

No handlers could be found for logger "oslo_config.cfg"    
/usr/lib64/python2.7/site-packages/sqlalchemy/engine/default.py:450: Warning: Duplicate index `block_device_mapping_instance_uuid_virtual_name_device_name_idx`. This is deprecated and will be disallowed in a future release.    
  cursor.execute(statement, parameters)    
/usr/lib64/python2.7/site-packages/sqlalchemy/engine/default.py:450: Warning: Duplicate index `uniq_instances0uuid`. This is deprecated and will be disallowed in a future release.    
  cursor.execute(statement, parameters)

# mysql -h 192.168.0.231 -unova -popenstack -e "use nova;show tables;"

 

4.3.4 完成安裝

# systemctl enable openstack-nova-api.service \    
  openstack-nova-cert.service openstack-nova-consoleauth.service \    
  openstack-nova-scheduler.service openstack-nova-conductor.service \    
  openstack-nova-novncproxy.service

# systemctl start openstack-nova-api.service \    
  openstack-nova-cert.service openstack-nova-consoleauth.service \    
  openstack-nova-scheduler.service openstack-nova-conductor.service \    
  openstack-nova-novncproxy.service

[root@controller ~]# openstack host list   
+------------+-------------+----------+    
| Host Name  | Service     | Zone     |    
+------------+-------------+----------+    
| controller | consoleauth | internal | //consoleauth用來做控制臺驗證的    
| controller | conductor   | internal | //conductor用來訪問數據庫    
| controller | cert        | internal | //cert用來作身份驗證    
| controller | scheduler   | internal | //scheduler用來作調度的    
+------------+-------------+----------+

 

4.4 安裝及配置計算節點(compute)

這一部分講述的是 nova 在計算節點(compute)上的部署

4.4.1 安裝計算節點安裝

[root@compute1 ~]# yum install openstack-nova-compute sysfsutils

4.4.2 拷貝控制節點nova.conf并編輯 /etc/nova/nova.conf

[DEFAULT]    
my_ip=192.168.0.232

[vnc]    
enabled = True    
vncserver_listen = 0.0.0.0    
vncserver_proxyclient_address = $my_ip    
novncproxy_base_url = http://192.168.0.231:6080/vnc_auto.html    
keymap=en-us

[glance]    
host = 192.168.0.231

[libvirt]    
virt_type=kvm


查看配置文件是否正常:

[root@compute1 ~]# cat /etc/nova/nova.conf |grep -v "^#" | grep -v "^$"

[DEFAULT]    
my_ip=192.168.0.232    
enabled_apis=osapi_compute,metadata    
auth_strategy=keystone    
allow_resize_to_same_host=True    
network_api_class=nova.network.neutronv2.api.API    
linuxnet_interface_driver=nova.network.linux_net.NeutronLinuxBridgeInterfaceDriver    
security_group_api=neutron    
scheduler_default_filters=RetryFilter,AvailabilityZoneFilter,RamFilter,DiskFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter    
firewall_driver=nova.virt.firewall.NoopFirewallDriver    
verbose=true    
rpc_backend=rabbit

[database]    
connection=mysql://nova:openstack@192.168.0.231/nova

[glance]    
host=192.168.0.231

[keystone_authtoken]    
auth_uri=http://192.168.0.231:5000    
auth_url=http://192.168.0.231:35357    
auth_plugin=password    
project_domain_id=default    
user_domain_id=default    
project_name=service    
username=nova    
password=openstack

[libvirt]    
virt_type=kvm    
inject_password =true    
inject_key = true

[neutron]    
url=http://192.168.0.231:9696    
auth_url=http://192.168.0.231:35357    
auth_plugin=password    
project_domain_id=default    
user_domain_id=default    
region_name=RegionOne    
project_name=service    
username=neutron    
password=openstack    
service_metadata_proxy=true    
metadata_proxy_shared_secret=METADATA_SECRET    
lock_path=/var/lib/nova/tmp

[oslo_concurrency]    
lock_path=/var/lib/nova/tmp

[oslo_messaging_rabbit]    
rabbit_host=192.168.0.231    
rabbit_port=5672    
rabbit_userid=openstack    
rabbit_password=openstack

[vnc]    
novncproxy_base_url=http://192.168.0.231:6080/vnc_auto.html    
vncserver_listen=0.0.0.0    
vncserver_proxyclient_address=$my_ip    
enabled=true


檢查服務器是否支持硬件虛擬化:

[root@compute1 ~]# egrep -c '(vmx|svm)' /proc/cpuinfo    
4

如果顯示的數字是0,則表示不支持硬件虛擬化.

 

4.4.3 完成安裝 啟動服務

[root@compute1 ~]# systemctl enable libvirtd.service openstack-nova-compute.service    
[root@compute1 ~]# systemctl start libvirtd.service openstack-nova-compute.service

4.4.4 驗證,拉取環境變量配置腳本

[root@compute1 ~]# scp controller:~/*openrc.sh .    
root@controller's password:    
admin-openrc.sh                                                                    100%  289     0.3KB/s   00:00   
demo-openrc.sh                                                                     100%  285     0.3KB/s   00:00

[root@compute1 ~]# source admin-openrc.sh

1. 安裝是否成功并注冊測試與glance是否正常

[root@compute1 ~]# nova p_w_picpath-list    
+--------------------------------------+-----------------+--------+--------+    
| ID                                   | Name            | Status | Server |    
+--------------------------------------+-----------------+--------+--------+    
| 2ac90c0c-b923-43ff-8f99-294195a64ced | CentOS-6-x86_64 | ACTIVE |        |    
+--------------------------------------+-----------------+--------+--------+

[root@compute1 ~]# openstack host list    
+------------+-------------+----------+    
| Host Name  | Service     | Zone     |    
+------------+-------------+----------+    
| controller | consoleauth | internal |    
| controller | conductor   | internal |    
| controller | cert        | internal |    
| controller | scheduler   | internal |    
| compute1   | compute     | nova     |    
+------------+-------------+----------+

2. 查看 nova 服務組件

[root@compute1 ~]# nova service-list    
+----+------------------+------------+----------+---------+-------+----------------------------+-----------------+    
| Id | Binary           | Host       | Zone     | Status  | State | Updated_at                 | Disabled Reason |    
+----+------------------+------------+----------+---------+-------+----------------------------+-----------------+    
| 1  | nova-consoleauth | controller | internal | enabled | up    | 2017-05-10T09:17:29.000000 | -               |    
| 2  | nova-conductor   | controller | internal | enabled | up    | 2017-05-10T09:17:31.000000 | -               |    
| 4  | nova-cert        | controller | internal | enabled | up    | 2017-05-10T09:17:29.000000 | -               |    
| 5  | nova-scheduler   | controller | internal | enabled | up    | 2017-05-10T09:17:29.000000 | -               |    
| 6  | nova-compute     | compute1   | nova     | enabled | up    | 2017-05-10T09:17:33.000000 | -               |    
+----+------------------+------------+----------+---------+-------+----------------------------+-----------------+

3. 查看API端點(可以忽 WARNING 級別的信息)

[root@compute1 ~]# nova endpoints

 

五、安裝網絡組件(neutron)

5.1 安裝及配置 controller 節點

1.  使用 admin 權限

[root@controller ~]# source admin-openrc.sh    
[root@controller ~]# openstack user create --domain default --password=openstack neutron    
[root@controller ~]# openstack role add --project service --user neutron admin    
[root@controller ~]# openstack service create --name neutron --description "OpenStack Networking" network    
[root@controller ~]# openstack endpoint create --region RegionOne  network public http://192.168.0.231:9696    
[root@controller ~]# openstack endpoint create --region RegionOne network internal http://192.168.0.231:9696    
[root@controller ~]# openstack endpoint create --region RegionOne  network admin http://192.168.0.231:9696

 

2. 配置網絡,本例采用扁平網絡
(1)安裝相關組件

[root@controller ~]# yum install openstack-neutron openstack-neutron-ml2 openstack-neutron-linuxbridge python-neutronclient ebtables ipset

 

(2)配置 neutron 服務端組件

服務端組件配置包含數據庫、認證、消息隊列、拓樸變化通知、插件

vi /etc/neutron/neutron.conf

[DEFAULT]    
state_path = /var/lib/neutron    
core_plugin = ml2    
service_plugins = router    
rpc_backend=rabbit    
auth_strategy=keystone    
notify_nova_on_port_status_changes=True    
notify_nova_on_port_data_changes=True    
nova_url=http://192.168.0.231:8774/v2    
verbose=True

[database]    
connection = mysql://neutron:openstack@192.168.0.231/neutron

[oslo_messaging_rabbit]    
rabbit_host = 192.168.0.231    
rabbit_port = 5672    
rabbit_userid = openstack    
rabbit_password = openstack

[oslo_concurrency]    
lock_path = $state_path/lock

[keystone_authtoken]    
auth_uri=http://192.168.0.231:5000    
auth_url=http://192.168.0.231:35357    
auth_plugin=password    
project_domain_id=default    
user_domain_id=default    
project_name=service    
username=neutron    
password=openstack    
admin_tenant_name = %SERVICE_TENANT_NAME%    
admin_user = %SERVICE_USER%    
admin_password = %SERVICE_PASSWORD%

[nova]    
auth_url=http://192.168.0.231:35357    
auth_plugin=password    
project_domain_id=default    
user_domain_id=default    
region_name=RegionOne    
project_name=service    
username=nova    
password=openstack

 

(3)配置 ML2 plug-in (二層網絡插件)

vi /etc/neutron/plugins/ml2/ml2_conf.ini

[ml2]    
# 注意:啟用ML2后,如果刪除了type_drivers的值將導致數據庫異常    
type_drivers = flat,vlan,gre,vxlan,geneve    
tenant_network_types = vlan,gre,vxlan,geneve    
mechanism_drivers = openvswitch,linuxbridge    
extension_drivers = port_security

[ml2_type_flat]    
flat_networks = physnet1

[securitygroup]    
enable_ipset = True

 

(4)配置 Linux bridge agent

vi /etc/neutron/plugins/ml2/linuxbridge_agent.ini

[linux_bridge]    
physical_interface_mappings = physnet1:eth0

[vxlan]    
enable_vxlan = False

[agent]    
prevent_arp_spoofing = True

[securitygroup]    
enable_security_group = True    
firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver

 

(5)配置 DHCP Agent

vi /etc/neutron/dhcp_agent.ini

[DEFAULT]    
interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver    
dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq    
enable_isolated_metadata = True

 

(6)配置 metadata agent

vi /etc/neutron/metadata_agent.ini

[DEFAULT]    
auth_uri = http://192.168.0.231:5000    
auth_url = http://192.168.0.231:35357    
auth_region = RegionOne    
auth_plugin = password    
project_domain_id = default    
user_domain_id = default    
project_name = service    
username = neutron    
password = openstack    
nova_metadata_ip = 192.168.0.231    
metadata_proxy_shared_secret = METADATA_SECRET    
verbose = True    
admin_tenant_name = %SERVICE_TENANT_NAME%    
admin_user = %SERVICE_USER%    
admin_password = %SERVICE_PASSWORD%

(7)完成安裝,建立鏈接

[root@controller ~]# ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini

同步數據

[root@controller ~]# su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf \    
   --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron

重啟nova-api服務

[root@controller ~]# systemctl restart openstack-nova-api.service

啟動及配置開機啟動

[root@controller ~]# systemctl enable neutron-server.service \    
   neutron-linuxbridge-agent.service neutron-dhcp-agent.service \    
   neutron-metadata-agent.service

[root@controller ~]# systemctl start neutron-server.service \    
   neutron-linuxbridge-agent.service neutron-dhcp-agent.service \    
   neutron-metadata-agent.service


[root@controller ~]# source admin-openrc.sh    
[root@controller ~]# neutron agent-list

需要等60秒以上才能出來。    
+--------------------------------------+--------------------+------------+-------+----------------+---------------------------+    
| id                                   | agent_type         | host       | alive | admin_state_up | binary                    |    
+--------------------------------------+--------------------+------------+-------+----------------+---------------------------+    
| 5d05a4fc-3a5e-49ef-b9da-28c7f4969532 | DHCP agent         | controller | :-)   | True           | neutron-dhcp-agent        |    
| 6e1979c0-c576-42d1-a7d7-5d28cfa74793 | Metadata agent     | controller | :-)   | True           | neutron-metadata-agent    |    
| f4af7059-0f36-430a-beee-f168ff55fd90 | Linux bridge agent | controller | :-)   | True           | neutron-linuxbridge-agent |    
+--------------------------------------+--------------------+------------+-------+----------------+---------------------------+

 

5.2 安裝及配置 compute 節點

1. 組件安裝

# yum install openstack-neutron openstack-neutron-linuxbridge ebtables ipset

 

2. 公共組件配置

網絡公共組件配置包含認證、消息隊列和插件,直接從控制節點上拷貝。

[root@controller ~]# scp /etc/neutron/neutron.conf 192.168.0.232:/etc/neutron/    
[root@controller ~]# scp /etc/neutron/plugins/ml2/linuxbridge_agent.ini 192.168.0.232:/etc/neutron/plugins/ml2/    
[root@controller ~]# scp /etc/neutron/plugins/ml2/ml2_conf.ini 192.168.0.232:/etc/neutron/plugins/ml2/

完成安裝,建立鏈接

[root@compute1 ~]# ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini

[root@compute1 ~]# vi /etc/neutron/neutron.conf

[database]    
# 注釋掉該模塊的所有配置,因不需要 compute 節點直接連接數據庫

重啟compute服務:

[root@compute1 ~]# systemctl restart openstack-nova-compute.service

啟動Linux bridge agent并設置開機自啟動

[root@compute1 ~]# systemctl enable neutron-linuxbridge-agent.service    
[root@compute1 ~]# systemctl start neutron-linuxbridge-agent.service

 

5.3 驗證

以下命令在controller節點上執行

[root@controller ~]# source admin-openrc.sh    
[root@controller ~]# neutron ext-list    
+-----------------------+-----------------------------------------------+    
| alias                 | name                                          |    
+-----------------------+-----------------------------------------------+    
| dns-integration       | DNS Integration                               |    
| ext-gw-mode           | Neutron L3 Configurable external gateway mode |    
| binding               | Port Binding                                  |    
| agent                 | agent                                         |    
| subnet_allocation     | Subnet Allocation                             |    
| l3_agent_scheduler    | L3 Agent Scheduler                            |    
| external-net          | Neutron external network                      |    
| flavors               | Neutron Service Flavors                       |    
| net-mtu               | Network MTU                                   |    
| quotas                | Quota management support                      |    
| l3-ha                 | HA Router extension                           |    
| provider              | Provider Network                              |    
| multi-provider        | Multi Provider Network                        |    
| extraroute            | Neutron Extra Route                           |    
| router                | Neutron L3 Router                             |    
| extra_dhcp_opt        | Neutron Extra DHCP opts                       |    
| security-group        | security-group                                |    
| dhcp_agent_scheduler  | DHCP Agent Scheduler                          |    
| rbac-policies         | RBAC Policies                                 |    
| port-security         | Port Security                                 |    
| allowed-address-pairs | Allowed Address Pairs                         |    
| dvr                   | Distributed Virtual Router                    |    
+-----------------------+-----------------------------------------------+


[root@controller ~]# neutron agent-list    
+--------------------------------------+--------------------+------------+-------+----------------+---------------------------+    
| id                                   | agent_type         | host       | alive | admin_state_up | binary                    |    
+--------------------------------------+--------------------+------------+-------+----------------+---------------------------+    
| 5d05a4fc-3a5e-49ef-b9da-28c7f4969532 | DHCP agent         | controller | :-)   | True           | neutron-dhcp-agent        |    
| 6e1979c0-c576-42d1-a7d7-5d28cfa74793 | Metadata agent     | controller | :-)   | True           | neutron-metadata-agent    |    
| f0aa7ff3-01c9-450f-bcc4-63ffee250bd7 | Linux bridge agent | compute1   | :-)   | True           | neutron-linuxbridge-agent |    
| f4af7059-0f36-430a-beee-f168ff55fd90 | Linux bridge agent | controller | :-)   | True           | neutron-linuxbridge-agent |    
+--------------------------------------+--------------------+------------+-------+----------------+---------------------------+

下面這個應該能看到4個agent,3個在controller節點,1個在compute1節點

 

六、創建虛擬機實例

6.1 創建虛擬網絡

6.1.1 創建共享網絡

[root@controller ~]# source admin-openrc.sh    
[root@controller ~]# neutron net-create public --shared --provider:physical_network physnet1 --provider:network_type flat

Created a new network:    
+---------------------------+--------------------------------------+    
| Field                     | Value                                |    
+---------------------------+--------------------------------------+    
| admin_state_up            | True                                 |    
| id                        | 6759f3eb-a4c8-4503-b92b-da6daacf0ab4 |    
| mtu                       | 0                                    |    
| name                      | public                               |    
| port_security_enabled     | True                                 |    
| provider:network_type     | flat                                 |    
| provider:physical_network | physnet1                             |    
| provider:segmentation_id  |                                      |    
| router:external           | False                                |    
| shared                    | True                                 |    
| status                    | ACTIVE                               |    
| subnets                   |                                      |    
| tenant_id                 | 10952875490e43938d80d921337cb053     |    
+---------------------------+--------------------------------------+    
--shared 表示允許所有的項目使用該網絡

 

6.1.2 創建子網

[root@controller ~]# neutron subnet-create public 192.168.0.0/24 --name public-subunet --allocation-pool start=192.168.0.200,end=192.168.0.210\    
   --dns-nameserver 202.100.192.68 --gateway 192.168.0.253

Created a new subnet:    
+-------------------+----------------------------------------------------+    
| Field             | Value                                              |    
+-------------------+----------------------------------------------------+    
| allocation_pools  | {"start": "192.168.0.200", "end": "192.168.0.210"} |    
| cidr              | 192.168.0.0/24                                     |    
| dns_nameservers   | 202.100.192.68                                     |    
| enable_dhcp       | True                                               |    
| gateway_ip        | 192.168.0.253                                      |    
| host_routes       |                                                    |    
| id                | da75b2db-56f4-45d2-b3f3-2ccf172f8798               |    
| ip_version        | 4                                                  |    
| ipv6_address_mode |                                                    |    
| ipv6_ra_mode      |                                                    |    
| name              | public-subunet                                     |    
| network_id        | 2e098da8-70f9-40bc-a393-868ed9a446cf               |    
| subnetpool_id     |                                                    |    
| tenant_id         | be420231d13848809da36178cbac4d22                   |    
+-------------------+----------------------------------------------------+

 

6.1.3 查看子網

[root@controller ~]# neutron net-list 

+--------------------------------------+--------+-----------------------------------------------------+    
| id                                   | name   | subnets                                             |    
+--------------------------------------+--------+-----------------------------------------------------+    
| 2e098da8-70f9-40bc-a393-868ed9a446cf | public | da75b2db-56f4-45d2-b3f3-2ccf172f8798 192.168.0.0/24 |    
+--------------------------------------+--------+-----------------------------------------------------+

[root@controller ~]#  neutron subnet-list    
+--------------------------------------+----------------+----------------+----------------------------------------------------+    
| id                                   | name           | cidr           | allocation_pools                                   |    
+--------------------------------------+----------------+----------------+----------------------------------------------------+    
| da75b2db-56f4-45d2-b3f3-2ccf172f8798 | public-subunet | 192.168.0.0/24 | {"start": "192.168.0.200", "end": "192.168.0.210"} |    
+--------------------------------------+----------------+----------------+----------------------------------------------------+

 

6.2 生成密鑰對

6.2.1 使用admin權限

[root@controller ~]#  source admin-openrc.sh

6.2.2 生成密鑰對

如果已有密鑰,則可以不使用 ssh-keygen 重新生成

[root@controller ~]# ssh-keygen -q -N ""    
Enter file in which to save the key (/root/.ssh/id_rsa):    
[root@controller ~]# nova keypair-add --pub-key ~/.ssh/id_rsa.pub mykey

6.2.3 查看有哪些可用的密鑰

[root@controller ~]# nova keypair-list    
+-------+-------------------------------------------------+    
| Name  | Fingerprint                                     |    
+-------+-------------------------------------------------+    
| mykey | bc:ca:8e:bb:61:01:7f:8a:ab:5e:d8:b2:2c:35:b7:83 |    
+-------+-------------------------------------------------+

6.3 添加安全規則組

默認情況下,安全規則組 default 會應用到所有的實例當中,它會通過防火墻規則來拒絕所有的遠程訪問。一般來說,我們通常會放行 ICMP 和 SSH 這兩種協議的訪問。

[root@controller ~]# nova secgroup-add-rule default icmp -1 -1 0.0.0.0/0    
[root@controller ~]# nova secgroup-add-rule default tcp 22 22 0.0.0.0/0

 

七、控制面板(horizon)

7.1 通過虛擬控制臺訪問實例基于WEB的管理界面一般安裝在 controller 節點上。

[root@controller ~]# yum install openstack-dashboard

7.2 配置dashboard參數

[root@controller ~]# vi /etc/openstack-dashboard/local_settings

OPENSTACK_HOST = "controller"    
ALLOWED_HOSTS = ['*', ]    
CACHES = {    
    'default': {    
         'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',    
         'LOCATION': '127.0.0.1:11211',    
    }    
}    
OPENSTACK_KEYSTONE_DEFAULT_ROLE = "user"    
OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = True    
OPENSTACK_API_VERSIONS = {    
    "identity": 3,    
    "volume": 2,    
}    
OPENSTACK_NEUTRON_NETWORK = {    
    ...    
    'enable_router': False,    
    'enable_quotas': False,    
    'enable_distributed_router': False,    
    'enable_ha_router': False,    
    'enable_lb': False,    
    'enable_firewall': False,    
    'enable_***': False,    
    'enable_fip_topology_check': False,    
}    
#時區設置    
TIME_ZONE = "Asia/Shanghai"

#創建虛擬機的時候可以修改密碼

OPENSTACK_HYPERVISOR_FEATURES = {    
    'can_set_mount_point': True,    
    'can_set_password': True,    
    'requires_keypair': True,    
}

 

7.3 配置開機啟動

[root@controller ~]# systemctl enable httpd.service memcached.service    
[root@controller ~]# systemctl restart httpd.service memcached.service

 

7.4 驗證安裝是否成功

用瀏覽器打開:http://192.168.0.231/dashboard

域: default    
用戶: admin 或 demo 密碼為自己創建的密碼。

Centos7.3 Openstack-liberty安裝部署記錄

Centos7.3 Openstack-liberty安裝部署記錄

7.5  創建虛擬機實例

7.5.1 創建虛擬機實例過程

說明:官方下載的centos7鏡像。不知道密碼,需要在創建指定一個密碼,默認是centos7 可以SSH登錄,但是無法默認讓root直接ssh登錄,需要在創建虛擬機實例時取消root ssh登錄 。

如果你希望SSH可以使用密碼登錄。那么你需要用腳本修改ssh root登錄設置,不推薦用那個cirros鏡像測試。  
#!/bin/sh    
sed -i 's/PasswordAuthentication no/PasswordAuthentication yes/g' /etc/ssh/sshd_config    
systemctl restart sshd

Centos7.3 Openstack-liberty安裝部署記錄

Centos7.3 Openstack-liberty安裝部署記錄

Centos7.3 Openstack-liberty安裝部署記錄

Centos7.3 Openstack-liberty安裝部署記錄

Centos7.3 Openstack-liberty安裝部署記錄

Centos7.3 Openstack-liberty安裝部署記錄

Centos7.3 Openstack-liberty安裝部署記錄

7.5.2 虛擬機控制臺登錄

Centos7.3 Openstack-liberty安裝部署記錄    
Centos7.3 Openstack-liberty安裝部署記錄

Centos7.3 Openstack-liberty安裝部署記錄

測試SSH登錄

Centos7.3 Openstack-liberty安裝部署記錄

7.5.1 小節

通過安裝openstack的過程理解openstack各個組件的工作原理以及具體實現方式,在這個基礎上可以擴展其它內容。

向AI問一下細節

免責聲明:本站發布的內容(圖片、視頻和文字)以原創、轉載和分享為主,文章觀點不代表本網站立場,如果涉及侵權請聯系站長郵箱:is@yisu.com進行舉報,并提供相關證據,一經查實,將立刻刪除涉嫌侵權內容。

AI

南昌市| 永昌县| 文水县| 崇礼县| 长沙市| 富川| 洮南市| 万安县| 延川县| 合山市| 都兰县| 英超| 滦平县| 潞西市| 沙坪坝区| 页游| 庆云县| 克什克腾旗| 尼玛县| 延长县| 阿尔山市| 和平县| 东光县| 五华县| 吴忠市| 和田市| 武清区| 顺义区| 黄骅市| 天镇县| 兴隆县| 济南市| 汝南县| 龙山县| 莱西市| 桦川县| 积石山| 登封市| 南丰县| 淄博市| 体育|