注:centos8单机版

注:本次实验手动配置密码均为admin

环境准备:配置hosts文件

192.168.116.85为本机IP

echo '192.168.116.85 controller vip myip' >> /etc/hosts
yum upgrade -y

禁用防火墙 与selinux

systemctl disable firewalld  --now
sed -i '/^SELINUX=/s/enforcing/disabled/' /etc/selinux/config
setenforce 0

注:网络组件与NetworkManager冲突,所有网络上需要禁用NetworkManager,启用network服务

# 安装Network服务
dnf install network-scripts -y
# 停用NetworkManager并禁止开机自启
systemctl stop NetworkManager && systemctl disable NetworkManager
# 启用 Network并设置开机自启
systemctl start network && systemctl enable network
#如果启用network报错,执行如下命令
/usr/lib/systemd/systemd-sysv-install enable network

1. 更换yum源

wget http://mirrors.aliyun.com/repo/Centos-8.repo

2. 下载openstack源

yum install  -y centos-release-openstack-ussuri

sed -i 's/^mirrorlist=http:\/\/mirrorlist.centos.org/#mirrorlist=http:\/\/mirrorlist.centos.org/g' /etc/yum.repos.d/C*
sed -i 's/^#baseurl=http:\/\/mirror.centos.org/baseurl=https:\/\/vault.centos.org/g' /etc/yum.repos.d/C*
sed -i 's/gpgcheck=1/gpgcheck=0/g' /etc/yum.repos.d/C* yum config-manager --set-enabled powertools
yum install -y python3-openstackclient
yum install -y openstack-selinux

3. 本地数据库配置

  • bind-address=127.0.0.1 #只允许本机访问。
  • bind-address=某个网卡的ip #例如bind-address=192.168.116.85,只能通过ip为192.168.116.85的网卡访问。
  • bind-address=0.0.0.0 #此规则是系统默认配置,监听所有网卡,即允许所有ip访问。
yum install -y mariadb mariadb-server python3-PyMySQL

cat > /etc/my.cnf.d/openstack.cnf << EOF
[mysqld]
bind-address = 192.168.116.85
default-storage-engine = innodb
innodb_file_per_table = on
max_connections = 4096
collation-server = utf8_general_ci
character-set-server = utf8
EOF

systemctl enable mariadb --now

mysql_secure_installation

4. 配置rabbitmq

  遇到报错:缺libSDL2,erlang安装失败,rabbitmq安装失败;

  尝试单独下载erlang源,再次安装,依然失败:curl -s https://packagecloud.io/install/repositories/rabbitmq/erlang/script.rpm.sh|sh;失败后删除本操作产生的额外erlang源

  解决:wget https://pkgs.dyn.su/el8/extras/x86_64/SDL2-2.0.14-5.el8.x86_64.rpm;yum -y install SDL2-2.0.14-5.el8.x86_64.rpm   或yum -y upgrade试试

yum install -y rabbitmq-server
systemctl enable rabbitmq-server --now rabbitmqctl add_user openstack openstack

rabbitmqctl set_permissions openstack ".*" ".*" ".*"

5. 配置memcached

注:192.168.116.8为本机IP

yum install -y memcached python3-memcached

sed -i 's/OPTIONS=".*"/OPTIONS="-l 127.0.0.1,::1,192.168.116.85"/' /etc/sysconfig/memcached

systemctl enable memcached --now

6. 配置etcd

单节点可不部署

yum install -y etcd

7. 配置keystone

报错:
openstack token issue
Failed to discover available identity versions when contacting http://vip:5000/v3. Attempting to parse version from URL.
Unexpected exception for http://vip:5000/v3/auth/tokens: Failed to parse: http://vip:5000/v3/auth/tokens

解决:
yum -y upgrade

  

mysql -uroot -pAdmin123! -e'create database if not exists keystone;
grant all privileges on keystone.* to keystone@localhost identified by "keystone";
grant all privileges on keystone.* to keystone@"%" identified by "keystone";
flush privileges;'
yum -y install openstack-keystone httpd python3-mod_wsgi
sed -i -e '/^\[database\]/a connection \= mysql\+pymysql\:\/\/keystone:keystone\@vip\/keystone' -e '/^\[token\]/a provider \= fernet' /etc/keystone/keystone.conf
su -s /bin/sh -c "keystone-manage db_sync" keystone

keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone
keystone-manage credential_setup --keystone-user keystone --keystone-group keystone
keystone-manage bootstrap --bootstrap-password admin   --bootstrap-admin-url http://vip:5000/v3/   --bootstrap-internal-url http://vip:5000/v3/   --bootstrap-public-url http://vip:5000/v3/   --bootstrap-region-id RegionOne

sed -i '/^\#ServerName/i ServerName 192.168.116.85' /etc/httpd/conf/httpd.conf
ln -s /usr/share/keystone/wsgi-keystone.conf /etc/httpd/conf.d/
systemctl restart httpd
systemctl enable httpd --now
cat > openstack-admin.sh << EOF
export OS_USERNAME=admin
export OS_PASSWORD=admin
export OS_PROJECT_NAME=admin
export OS_USER_DOMAIN_NAME=Default
export OS_PROJECT_DOMAIN_NAME=Default
export OS_AUTH_URL=http://vip:5000/v3
export OS_IDENTITY_API_VERSION=3
export OS_IMAGE_API_VERSION=2
EOF source openstack-admin.sh

openstack domain create --description "An Example Domain" example #测试,创建域
openstack token issue #检错
openstack domain list #查看
openstack domain set  example --disable #禁用
openstack domain delete example #删除
openstack project create --domain default --description "Service Project" service
openstack project create --domain default --description "Demo Project" demo
openstack user create --domain default --password-prompt demo
openstack role create user
openstack role add --project demo --user demo user

8. 配置glance

mysql -uroot  -pAdmin123! -e'create database if not exists glance;
grant all privileges on glance.* to glance@localhost identified by "glance";
grant all privileges on glance.* to glance@"%" identified by "glance";
flush privileges;'
openstack user create --domain default --password-prompt glance

openstack role add --project service --user glance admin
openstack service create --name glance --description "OpenStack Image" image
openstack endpoint create --region RegionOne image public http://vip:9292
openstack endpoint create --region RegionOne image internal http://vip:9292
openstack endpoint create --region RegionOne image admin http://vip:9292
yum -y install openstack-glance

sed -i '/^\[database\]/a connection = mysql\+pymysql\:\/\/glance:glance\@vip\/glance' /etc/glance/glance-api.conf
sed -i '/^\[glance_store\]/a stores = file,http \ndefault_store = file \nfilesystem_store_datadir = /var/lib/glance/images/' /etc/glance/glance-api.conf
sed -i '/^\[keystone_authtoken\]/a www_authenticate_uri = http://vip:5000\nauth_url = http://vip:5000 \nmemcached_servers = 192.168.116.85:11211 \nauth_type = password \nproject_domain_name = Default \nuser_domain_name = Default \nproject_name = service \nusername = glance \npassword = admin' /etc/glance/glance-api.conf
sed -i '/^\[paste_deploy\]/a flavor = keystone' /etc/glance/glance-api.conf
su -s /bin/sh -c "glance-manage db_sync" glance

systemctl enable openstack-glance-api.service --now

9. 配置placement

mysql -uroot -pAdmin123! -e'create database placement;
grant all privileges on placement.* to placement@localhost identified by "placement";
grant all privileges on placement.* to placement@"%" identified by "placement";
flush privileges;'

openstack user create --domain default --password-prompt placement

openstack role add --project service --user placement admin
openstack service create --name placement --description "Placement API" placement
openstack endpoint create --region RegionOne placement public http://vip:8778
openstack endpoint create --region RegionOne placement internal http://vip:8778
openstack endpoint create --region RegionOne placement admin http://vip:8778
yum install -y openstack-placement-api

sed -i '/^\[placement_database\]/a connection = mysql+pymysql://placement:placement@vip/placement' /etc/placement/placement.conf
sed -i '/^\[api\]/a auth_strategy = keystone' /etc/placement/placement.conf
sed -i '/^\[keystone_authtoken\]/a www_authenticate_uri = http://vip:5000 \nauth_url = http://vip:5000/v3 \nmemcached_servers = 192.168.116.85:11211 \nauth_type = password \nproject_domain_name = Default \nuser_domain_name = Default \nproject_name = service \nusername = placement \npassword = admin' /etc/placement/placement.conf
su -s /bin/sh -c "placement-manage db sync" placement
sed -i  '/<\/VirtualHost>/i <Directory /usr/bin> \n   <IfVersion >= 2.4> \n      Require all granted \n   </IfVersion> \n   <IfVersion < 2.4> \n      Order allow,deny \n      Allow from all \n   </IfVersion> \n</Directory> ' /etc/httpd/conf.d/00-placement-api.conf

systemctl restart httpd

10. 配置nova

mysql -uroot -pAdmin123! -e"
create database nova_api;
create database nova;
create database nova_cell0;
grant all privileges on nova_api.* to 'nova'@'localhost' identified by 'nova';
grant all privileges on nova_api.* to 'nova'@'%' identified by 'nova';
grant all privileges on nova.* to 'nova'@'localhost' identified by 'nova';
grant all privileges on nova.* to 'nova'@'%' identified by 'nova';
grant all privileges on nova_cell0.* to 'nova'@'localhost' identified by 'nova';
grant all privileges on nova_cell0.* to 'nova'@'%' identified by 'nova';
flush privileges;"
openstack user create --domain default --password-prompt nova

openstack role add --project service --user nova admin
openstack service create --name nova --description "OpenStack Compute" compute
openstack endpoint create --region RegionOne compute public http://vip:8774/v2.1
openstack endpoint create --region RegionOne compute internal http://vip:8774/v2.1
openstack endpoint create --region RegionOne compute admin http://vip:8774/v2.1
yum install -y openstack-nova-api openstack-nova-conductor   openstack-nova-novncproxy openstack-nova-scheduler

sed -i '/^\[DEFAULT\]/a enabled_apis = osapi_compute,metadata \ntransport_url = rabbit://openstack:openstack@vip:5672/ \nmy_ip = 192.168.116.85' /etc/nova/nova.conf
sed -i '/^\[api\]/a auth_strategy = keystone' /etc/nova/nova.conf
sed -i '/^\[api_database\]/a connection = mysql+pymysql://nova:nova@vip/nova_api' /etc/nova/nova.conf
sed -i '/^\[database\]/a connection = mysql+pymysql://nova:nova@vip/nova' /etc/nova/nova.conf
sed -i '/^\[glance\]/a api_servers = http://vip:9292' /etc/nova/nova.conf
sed -i '/^\[keystone_authtoken\]/a www_authenticate_uri = http://vip:5000/ \nauth_url = http://vip:5000/ \nmemcached_servers = 192.168.116.85:11211 \nauth_type = password \nproject_domain_name = Default \nuser_domain_name = Default \nproject_name = service \nusername = nova \npassword = admin' /etc/nova/nova.conf
sed -i '/^\[oslo_concurrency\]/a lock_path = /var/lib/nova/tmp' /etc/nova/nova.conf
sed -i '/^\[placement\]/a region_name = RegionOne \nproject_domain_name = Default \nproject_name = service \nauth_type = password \nuser_domain_name = Default \nauth_url = http://vip:5000/v3 \nusername = placement \npassword = admin' /etc/nova/nova.conf
sed -i '/^\[vnc\]/a enabled = true \nserver_listen = $my_ip \nserver_proxyclient_address = $my_ip' /etc/nova/nova.conf
su -s /bin/sh -c "nova-manage api_db sync" nova
su -s /bin/sh -c "nova-manage cell_v2 map_cell0" nova
su -s /bin/sh -c "nova-manage cell_v2 create_cell --name=cell1 --verbose" nova
su -s /bin/sh -c "nova-manage db sync" nova

验证:su -s /bin/sh -c "nova-manage cell_v2 list_cells" nova
systemctl enable openstack-nova-api openstack-nova-scheduler  openstack-nova-conductor openstack-nova-novncproxy
systemctl restart openstack-nova-api openstack-nova-scheduler openstack-nova-conductor openstack-nova-novncproxy

实验的时候发现了一个很好用的ini配置文件编辑工具:crudini

[libvirt]
#virt_type = kvm #物理机配置openstack
#virt_type = qemu #虚拟机配置openstack
####(官方:虚拟机必须配置libvirt为使用qemu而不是kvm。)####

  

yum install -y openstack-nova-compute
yum install -y crudini crudini --set /etc/nova/nova.conf vnc server_listen '0.0.0.0'
crudini --set /etc/nova/nova.conf vnc novncproxy_base_url http://VIP:6080/vnc_auto.html
crudini --set /etc/nova/nova.conf libvirt virt_type qemu
discover_hosts_in_cells_interval = 300
先启动:
systemctl restart libvirtd-tcp.socket
再启动:
systemctl enable libvirtd openstack-nova-compute
systemctl restart libvirtd openstack-nova-compute
openstack compute service list --service nova-compute

su -s /bin/sh -c "nova-manage cell_v2 discover_hosts --verbose" nova

验证:
openstack compute service list
openstack catalog list
openstack image list
nova-status upgrade check

11. neutron配置

mysql -uroot -pAdmin123! -e'create database if not exists neutron;
grant all privileges on neutron.* to neutron@localhost identified by "neutron";
grant all privileges on neutron.* to neutron@"%" identified by "neutron";
flush privileges;'
openstack user create --domain default --password-prompt neutron

openstack role add --project service  --user neutron admin
openstack service create --name neutron --description "OpenStack NetWorking" network
openstack endpoint create --region RegionOne network public http://vip:9696
openstack endpoint create --region RegionOne network internal http://vip:9696
openstack endpoint create --region RegionOne network admin http://vip:9696
yum install -y openstack-neutron openstack-neutron-ml2   openstack-neutron-linuxbridge ebtables ipset iproute

crudini --set /etc/neutron/neutron.conf DEFAULT core_plugin ml2
crudini --set /etc/neutron/neutron.conf DEFAULT service_plugins router
crudini --set /etc/neutron/neutron.conf DEFAULT transport_url rabbit://openstack:openstack@vip
crudini --set /etc/neutron/neutron.conf DEFAULT auth_strategy keystone
crudini --set /etc/neutron/neutron.conf DEFAULT notify_nova_on_port_status_changes true
crudini --set /etc/neutron/neutron.conf DEFAULT notify_nova_on_port_data_changes true
crudini --set /etc/neutron/neutron.conf database connection mysql+pymysql://neutron:neutron@vip/neutron
crudini --set /etc/neutron/neutron.conf keystone_authtoken www_authenticate_uri http://vip:5000
crudini --set /etc/neutron/neutron.conf keystone_authtoken auth_url http://VIP:5000
crudini --set /etc/neutron/neutron.conf keystone_authtoken memcached_servers 192.168.116.85:11211
crudini --set /etc/neutron/neutron.conf keystone_authtoken auth_type password
crudini --set /etc/neutron/neutron.conf keystone_authtoken project_domain_name default
crudini --set /etc/neutron/neutron.conf keystone_authtoken user_domain_name default
crudini --set /etc/neutron/neutron.conf keystone_authtoken project_name service
crudini --set /etc/neutron/neutron.conf keystone_authtoken username neutron
crudini --set /etc/neutron/neutron.conf keystone_authtoken password admin
crudini --set /etc/neutron/neutron.conf nova auth_url http://vip:5000
crudini --set /etc/neutron/neutron.conf nova auth_type password
crudini --set /etc/neutron/neutron.conf nova project_domain_name default
crudini --set /etc/neutron/neutron.conf nova user_domain_name default
crudini --set /etc/neutron/neutron.conf nova region_name RegionOne
crudini --set /etc/neutron/neutron.conf nova project_name service
crudini --set /etc/neutron/neutron.conf nova username nova
crudini --set /etc/neutron/neutron.conf nova password admin
crudini --set /etc/neutron/neutron.conf oslo_concurrency lock_path /var/lib/neutron/tmp crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 type_drivers flat,vlan,vxlan
crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 tenant_network_types vxlan
crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 mechanism_drivers linuxbridge,l2population
crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 extension_drivers port_security
crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2_type_flat flat_networks provider
crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2_type_vxlan vni_ranges 1:1000
crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini securitygroup enable_ipset true crudini --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini linux_bridge physical_interface_mappings provider:ens192
crudini --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini vxlan enable_vxlan true
crudini --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini vxlan local_ip 192.168.116.85
crudini --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini vxlan l2_population true
crudini --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini securitygroup enable_security_group true
crudini --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini securitygroup firewall_driver neutron.agent.linux.iptables_firewall.IptablesFirewallDriver

crudini --set /etc/neutron/l3_agent.ini DEFAULT interface_driver linuxbridge

crudini --set /etc/neutron/dhcp_agent.ini DEFAULT interface_driver linuxbridge
crudini --set /etc/neutron/dhcp_agent.ini DEFAULT dhcp_driver neutron.agent.linux.dhcp.Dnsmasq
crudini --set /etc/neutron/dhcp_agent.ini DEFAULT enable_isolated_metadata true crudini --set /etc/neutron/metadata_agent.ini DEFAULT nova_metadata_host 192.168.116.85
crudini --set /etc/neutron/metadata_agent.ini DEFAULT metadata_proxy_shared_secret METADATA_SECRET crudini --set /etc/nova/nova.conf neutron auth_url http://vip:5000
crudini --set /etc/nova/nova.conf neutron auth_type password
crudini --set /etc/nova/nova.conf neutron project_domain_name default
crudini --set /etc/nova/nova.conf neutron user_domain_name default
crudini --set /etc/nova/nova.conf neutron region_name RegionOne
crudini --set /etc/nova/nova.conf neutron project_name service
crudini --set /etc/nova/nova.conf neutron username neutron
crudini --set /etc/nova/nova.conf neutron password admin
crudini --set /etc/nova/nova.conf neutron service_metadata_proxy true
crudini --set /etc/nova/nova.conf neutron metadata_proxy_shared_secret METADATA_SECRET
ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini
su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron
systemctl restart openstack-nova-api
systemctl enable neutron-server neutron-linuxbridge-agent neutron-dhcp-agent neutron-metadata-agent
systemctl start neutron-server neutron-linuxbridge-agent neutron-dhcp-agent neutron-metadata-agent
systemctl enable neutron-l3-agent
systemctl start neutron-l3-agent

12. cinder配置

mysql -uroot -pAdmin123! -e'create database if not exists cinder;
grant all privileges on cinder.* to cinder@localhost identified by "cinder";
grant all privileges on cinder.* to cinder@"%" identified by "cinder";
flush privileges;'
openstack user create --domain default --password-prompt cinder

openstack role add --project service --user cinder admin
openstack service create --name cinderv2 --description "OpenStack Block Storage" volumev2
openstack service create --name cinderv3 --description "OpenStack Block Storage" volumev3 openstack endpoint create --region RegionOne volumev2 public http://vip:8776/v2/%\(project_id\)s
openstack endpoint create --region RegionOne volumev2 internal http://vip:8776/v2/%\(project_id\)s
openstack endpoint create --region RegionOne volumev2 admin http://vip:8776/v2/%\(project_id\)s
openstack endpoint create --region RegionOne volumev3 public http://vip:8776/v3/%\(project_id\)s
openstack endpoint create --region RegionOne volumev3 internal http://vip:8776/v3/%\(project_id\)s
openstack endpoint create --region RegionOne volumev3 admin http://vip:8776/v3/%\(project_id\)s
yum install -y openstack-cinder

crudini --set /etc/cinder/cinder.conf DEFAULT transport_url rabbit://openstack:openstack@vip
crudini --set /etc/cinder/cinder.conf DEFAULT auth_strategy keystone
crudini --set /etc/cinder/cinder.conf DEFAULT my_ip 192.168.116.85
crudini --set /etc/cinder/cinder.conf database connection mysql+pymysql://cinder:cinder@vip/cinder
crudini --set /etc/cinder/cinder.conf oslo_concurrency lock_path /var/lib/cinder/tmp
crudini --set /etc/cinder/cinder.conf keystone_authtoken www_authenticate_uri http://vip:5000
crudini --set /etc/cinder/cinder.conf keystone_authtoken auth_url http://vip:5000
crudini --set /etc/cinder/cinder.conf keystone_authtoken memcached_servers 192.168.116.85:11211
crudini --set /etc/cinder/cinder.conf keystone_authtoken auth_type password
crudini --set /etc/cinder/cinder.conf keystone_authtoken project_domain_name default
crudini --set /etc/cinder/cinder.conf keystone_authtoken user_domain_name default
crudini --set /etc/cinder/cinder.conf keystone_authtoken project_name service
crudini --set /etc/cinder/cinder.conf keystone_authtoken username cinder
crudini --set /etc/cinder/cinder.conf keystone_authtoken password admin
su -s /bin/sh -c "cinder-manage db sync" cinder
crudini --set /etc/nova/nova.conf cinder os_region_name RegionOne systemctl restart openstack-nova-api
systemctl enable openstack-cinder-api openstack-cinder-scheduler
systemctl start openstack-cinder-api openstack-cinder-scheduler

存储

#yum install -y lvm2 device-mapper-persistent-data 

#systemctl enable lvm2-lvmetad
#systemctl start lvm2-lvmetad vgcreate cinder-volumes /dev/sdb sed -i '/sysfs_scan =/i \\tfilter = [ "a/sdb/", "r/.*/"]' /etc/lvm/lvm.conf crudini --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm
crudini --set /etc/cinder/cinder.conf DEFAULT glance_api_servers http://vip:9292 crudini --set /etc/cinder/cinder.conf lvm volume_driver cinder.volume.drivers.lvm.LVMVolumeDriver
crudini --set /etc/cinder/cinder.conf lvm volume_group cinder-volumes
crudini --set /etc/cinder/cinder.conf lvm target_protocol iscsi
crudini --set /etc/cinder/cinder.conf lvm target_helper lioadm systemctl enable openstack-cinder-volume target
systemctl start openstack-cinder-volume target
crudini --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm
crudini --set /etc/cinder/cinder.conf lvm target_helper lioadm
crudini --set /etc/cinder/cinder.conf lvm target_protocol iscsi
crudini --set /etc/cinder/cinder.conf lvm target_ip_address 192.168.116.85
crudini --set /etc/cinder/cinder.conf lvm volume_group vg_volume01
crudini --set /etc/cinder/cinder.conf lvm volume_driver cinder.volume.drivers.lvm.LVMVolumeDriver
crudini --set /etc/cinder/cinder.conf lvm volumes_dir $state_path/volumes
systemctl restart openstack-cinder-volume yum install -y openstack-cinder
yum install -y nfs-utils sed -i '/^#Domain =/a Domain = srv.world' /etc/idmapd.conf crudini --set /etc/cinder/cinder.conf DEFAULT backup_driver cinder.backup.drivers.nfs.NFSBackupDriver
crudini --set /etc/cinder/cinder.conf DEFAULT backup_mount_point_base /var/lib/cinder/backup_nfs
crudini --set /etc/cinder/cinder.conf DEFAULT backup_share nfs.srv.world:/var/lib/cinder-backup systemctl enable openstack-cinder-backup
systemctl start openstack-cinder-backup chown -R cinder. /var/lib/cinder/backup_nfs openstack volume backup create --name bk-disk_nfs-01 --incremental --force disk_nfs
openstack volume backup restore bk-disk_nfs-01 disk_nfs

openstack单机部署 未完成的更多相关文章

  1. 在虚拟机单机部署OpenStack Grizzly

    安装过程 安装Ubuntu 我手头有的是Ubuntu Server 12.04 64位版,就直接用了,默认安装即可,配置的时候很简单,如下 内存:1G 硬盘:20G 处理器:2 网络:NAT 装好以后 ...

  2. openstack_swift源代码分析——Swift单机部署

    本文对在单机部署swift 当中每个细节做具体的介绍,并对配置做对应的解释 PC物理机    Ubuntu-12.04-desktop-64位 Swift 版本号:1.13.1 Swift-clien ...

  3. 深入理解Openstack自动化部署

    前言 说实话,看到自己在博客园的排名感到惭愧,因为自己最近两年没有持续地在博客园上写技术博客了,有人私下问我是不是荒废了?翻翻15年和16年的博客,真的是少的可怜.一方面的确由于岗位的变化,导致了工作 ...

  4. Hadoop系列之(一):Hadoop单机部署

    1. Hadoop介绍 Hadoop是一个能够对海量数据进行分布式处理的系统架构. Hadoop框架的核心是:HDFS和MapReduce. HDFS分布式文件系统为海量的数据提供了存储, MapRe ...

  5. OpenStack Havana 部署在Ubuntu 12.04 Server 【OVS+GRE】(三)——计算节点的安装

    序:OpenStack Havana 部署在Ubuntu 12.04 Server [OVS+GRE] 计算节点: 1.准备结点 安装好ubuntu 12.04 Server 64bits后,进入ro ...

  6. OpenStack Havana 部署在Ubuntu 12.04 Server 【OVS+GRE】(二)——网络节点的安装

    序:OpenStack Havana 部署在Ubuntu 12.04 Server [OVS+GRE] 网络节点: 1.安装前更新系统 安装好ubuntu 12.04 Server 64bits后,进 ...

  7. OpenStack Havana 部署在Ubuntu 12.04 Server 【OVS+GRE】(一)——控制节点的安装

      序:OpenStack Havana 部署在Ubuntu 12.04 Server [OVS+GRE] 控制节点: 1.准备Ubuntu 安装好Ubuntu12.04 server 64bits后 ...

  8. OpenStack Havana 部署在Ubuntu 12.04 Server 【OVS+GRE】——序

    OpenStack Havana 部署在Ubuntu 12.04 Server [OVS+GRE](一)——控制节点的安装 OpenStack Havana 部署在Ubuntu 12.04 Serve ...

  9. OpenStack安装部署管理中常见问题解决方法

    一.网络问题-network 更多网络原理机制可以参考<OpenStack云平台的网络模式及其工作机制>. 1.1.控制节点与网络控制器区别 OpenStack平台中有两种类型的物理节点, ...

  10. Ecstore安装篇-2.单机部署【linux】

    单机部署实施-linux 单机部署实施-linux author :James,jimingsong@vip.qq.com since :2015-03-02 系统环境需求 软件来源 底层依赖 1. ...

随机推荐

  1. [SDR] GNU Radio 系列教程(二) —— 绘制第一个信号分析流程图

    目录 1.前言 2.启动 GNU Radio 3.新增块 4.运行 本文视频 参考链接 1.前言 本文将介绍如何在 GNU Radio 中创建和运行第一个流程图. 2.启动 GNU Radio GNU ...

  2. KingbaseES 逻辑备份还原加密

    KingbaseEs 支持在sys_dump备份时使用key进行加密.在sys_restore时,如果没提供key,或者key值不对,将无法进行恢复. [kingbase@dbhost03 ~]$ s ...

  3. KingbaseES R3 集群删除test库导致主备无法切换问题

    案例说明: 在KingbaseES R3集群中,kingbasecluster进程会通过test库访问,连接后台数据库服务测试:如果删除test数据库,导致后台数据库服务访问失败,在集群主备切换时,无 ...

  4. 【设计模式】Java设计模式 - 装饰者模式

    Java设计模式 - 装饰者模式 不断学习才是王道 继续踏上学习之路,学之分享笔记 总有一天我也能像各位大佬一样 原创作品,更多关注我CSDN: 一个有梦有戏的人 准备将博客园.CSDN一起记录分享自 ...

  5. Logstash:使用 Logstash 导入 CSV 文件示例

    转载自:https://elasticstack.blog.csdn.net/article/details/114374804 在今天的文章中,我将展示如何使用 file input 结合 mult ...

  6. springboot自动配置原理以及手动实现配置类

    springboot自动配置原理以及手动实现配置类 1.原理 spring有一个思想是"约定大于配置". 配置类自动配置可以帮助开发人员更加专注于业务逻辑开发,springboot ...

  7. Django 之ModelAdmin对象

    一.后台管理系统配置 1 在该表对应的models类里面添加一个Meta类 class Meta: verbose_name_plural = '用户表' #指定该表在admin后台的名字为:用户表  ...

  8. Springboot 之 Filter 实现超大响应 JSON 数据压缩

    简介 项目中,请求时发送超大 json 数据外:响应时也有可能返回超大 json数据.上一篇实现了请求数据的 gzip 压缩.本篇通过 filter 实现对响应 json 数据的压缩. 先了解一下以下 ...

  9. KVM导入Ubuntu Cloud 镜像创建虚机及调整磁盘大小

    Ubuntu Cloud Images Ubuntu官网会给各种公有云平台提供cloud镜像, 例如AWS, MS Azure, Google Cloud, 以及本地虚机环境例如 QEMU, VMwa ...

  10. 计算机网络(Learning Records)

    背景:没想到本专业并不开设这门课程,感觉过于逆天,之前开发的时候了解过相关知识 但是从来没有系统地学过,就自己看了书,总结一下 参考:<TCP/IP详解 卷1:协议> 概述 大多数网络应用 ...