一、使用ceph做glance后端

1.1 创建用于存储镜像的池

[root@serverc ~]#  ceph osd pool create images 128 128

  1. pool 'images' created

[root@serverc ~]# ceph osd pool application enable images rbd

  1. enabled application 'rbd' on pool 'images'

1.2 创建client.glance账号并授权

[root@serverc ~]# ceph auth get-or-create client.glance mon 'allow r' osd 'allow class-read object_prefix rbd_children,allow rwx pool=images' -o /etc/ceph/ceph.client.glance.keyring

[root@serverc ~]# ll /etc/ceph/ceph.client.glance.keyring

  1. -rw-r--r-- 1 root root 64 Mar 31 10:33 /etc/ceph/ceph.client.glance.keyring

1.3 在glance服务器上安装ceph客户端

[root@serverb ~]# yum -y install ceph-common

从ceph服务端将ceph.conf以及ceph.client.glance.keyring复制到glance服务器

[root@serverc ceph]# scp -r /etc/ceph/ceph.conf /etc/ceph/ceph.client.glance.keyring serverb:/etc/ceph/

  1. ceph.conf % .5MB/s :
  2. ceph.client.glance.keyring % .1KB/s :

1.4 在客户端修改相关权限

[root@serverb ~]#  chown glance.glance /etc/ceph/ceph.client.glance.keyring

1.5 修改配合文件

修改客户端的/etc/ceph/ceph.conf

[root@serverb ~]# vim /etc/ceph/ceph.conf

  1. [client.glance]
  2. keyring = /etc/ceph/ceph.client.glance.keyring

修改/etc/glance/glance-api.conf

  1. [glance_store]
  2. stores = rbd
  3. default_store = rbd
  4. filesystem_store_datadir = /var/lib/glance/images/
  5. rbd_store_chunk_size =
  6. rbd_store_pool = images
  7. rbd_store_user = glance
  8. rbd_store_ceph_conf = /etc/ceph/ceph.conf
  9. os_region_name=RegionOne

[root@serverb ~]# grep -Ev "^$|^[#;]" /etc/glance/glance-api.conf

  1. [DEFAULT]
  2. bind_host = 0.0.0.0
  3. bind_port =
  4. workers =
  5. image_cache_dir = /var/lib/glance/image-cache
  6. registry_host = 0.0.0.0
  7. debug = False
  8. log_file = /var/log/glance/api.log
  9. log_dir = /var/log/glance
  10. [cors]
  11. [cors.subdomain]
  12. [database]
  13. connection = mysql+pymysql://glance:27c082e7c4a9413c@172.25.250.11/glance
  14. [glance_store]
  15. stores = rbd
  16. default_store = rbd
  17. filesystem_store_datadir = /var/lib/glance/images/
  18. rbd_store_chunk_size =
  19. rbd_store_pool = images
  20. rbd_store_user = glance
  21. rbd_store_ceph_conf = /etc/ceph/ceph.conf
  22. os_region_name=RegionOne
  23. [image_format]
  24. [keystone_authtoken]
  25. auth_uri = http://172.25.250.11:5000/v2.0
  26. auth_type = password
  27. project_name=services
  28. username=glance
  29. password=99b29d9142514f0f
  30. auth_url=http://172.25.250.11:35357
  31. [matchmaker_redis]
  32. [oslo_concurrency]
  33. [oslo_messaging_amqp]
  34. [oslo_messaging_notifications]
  35. [oslo_messaging_rabbit]
  36. [oslo_messaging_zmq]
  37. [oslo_middleware]
  38. [oslo_policy]
  39. policy_file = /etc/glance/policy.json
  40. [paste_deploy]
  41. flavor = keystone
  42. [profiler]
  43. [store_type_location_strategy]
  44. [task]
  45. [taskflow_executor] 

重启glance-api

[root@serverb ~]# systemctl restart openstack-glance-api

1.6 验证

下载镜像

[root@foundation ~]#  wget http://download.cirros-cloud.net/0.4.0/cirros-0.4.0-x86_64-disk.img

[root@foundation ~]# scp cirros-0.4.0-x86_64-disk.img root@serverb:/tmp/

[root@serverb ~]# cd /tmp/

  1. -rw-r--r--  1 root root 12716032 Mar 31 10:39 cirros-0.4.0-x86_64-disk.img
  2. -rw-r--r--. root root Mar : rht
  3. -rw-r--r--. root root Mar : rht-vm-hosts
  4. -rw-r--r--. root root Mar : rht-wks
  5. drwx------ root root Mar : systemd-private-2bf1332dd8ae4d5f95d9f9a64e0ef5ee-chronyd.service-I1ANDV
  6. drwx------ root root Mar : systemd-private-2bf1332dd8ae4d5f95d9f9a64e0ef5ee-epmd@0.0.0.0.service-0il3SD
  7. drwx------ root root Mar : systemd-private-2bf1332dd8ae4d5f95d9f9a64e0ef5ee-httpd.service-mWaw6A
  8. drwx------ root root Mar : systemd-private-2bf1332dd8ae4d5f95d9f9a64e0ef5ee-mariadb.service-xt5VbD
  9. drwx------ root root Mar : systemd-private-2bf1332dd8ae4d5f95d9f9a64e0ef5ee-openstack-glance-api.service-RVKYpk
  10. drwx------ root root Mar : systemd-private-2bf1332dd8ae4d5f95d9f9a64e0ef5ee-openstack-glance-registry.service-Bc5DYB

[root@serverb tmp]# glance image-list

  1. You must provide a username via either --os-username or env[OS_USERNAME]

[root@serverb ~]# source keystonerc_admin

[root@serverb ~(keystone_admin)]# glance image-list

  1. +----+------+
  2. | ID | Name |
  3. +----+------+
  4. +----+------+

[root@serverb ~(keystone_admin)]# glance image-create  --name cirros  --file  /tmp/cirros-0.4.0-x86_64-disk.img --disk-format  qcow2 --container-format  bare  --progress

  1. [=============================>] %
  2. +------------------+--------------------------------------+
  3. | Property | Value |
  4. +------------------+--------------------------------------+
  5. | checksum | 443b7623e27ecf03dc9e01ee93f67afe |
  6. | container_format | bare |
  7. | created_at | --30T10::44Z |
  8. | disk_format | qcow2 |
  9. | id | 79cfc319-f60a-45d4-834f-b70dc20c7975 |
  10. | min_disk | |
  11. | min_ram | |
  12. | name | cirros |
  13. | owner | 79cf145d371e48ef96f608cbf85d1788 |
  14. | protected | False |
  15. | size | |
  16. | status | active |
  17. | tags | [] |
  18. | updated_at | --30T10::47Z |
  19. | virtual_size | None |
  20. | visibility | private |
  21. +------------------+--------------------------------------+

[root@serverb ~(keystone_admin)]# glance image-create  --name cirros  --file  /tmp/cirros-0.4.0-x86_64-disk.img --disk-format  qcow2 --container-format  bare  --progress

  1. [=============================>] %
  2. +------------------+--------------------------------------+
  3. | Property | Value |
  4. +------------------+--------------------------------------+
  5. | checksum | 443b7623e27ecf03dc9e01ee93f67afe |
  6. | container_format | bare |
  7. | created_at | --30T01::49Z |
  8. | disk_format | qcow2 |
  9. | id | ab67abe6-7d65-407f-88e9-7b46d873b477 |
  10. | min_disk | |
  11. | min_ram | |
  12. | name | cirros |
  13. | owner | 79cf145d371e48ef96f608cbf85d1788 |
  14. | protected | False |
  15. | size | |
  16. | status | active |
  17. | tags | [] |
  18. | updated_at | --30T01::49Z |
  19. | virtual_size | None |
  20. | visibility | private |
  21. +------------------+--------------------------------------+

[root@serverb ~(keystone_admin)]# glance image-list

  1. +--------------------------------------+--------+
  2. | ID | Name |
  3. +--------------------------------------+--------+
  4. | 79cfc319-f60a-45d4-834f-b70dc20c7975 | cirros |
  5. | ab67abe6-7d65-407f-88e9-7b46d873b477 | cirros |
  6. +--------------------------------------+--------+

1.7 删除一个image

[root@serverb tmp(keystone_admin)]#  glance image-delete ab67abe6-7d65-407f-88e9-7b46d873b477

[root@serverb ~(keystone_admin)]# glance image-list

  1. +--------------------------------------+--------+
  2. | ID | Name |
  3. +--------------------------------------+--------+
  4. | 79cfc319-f60a-45d4-834f-b70dc20c7975 | cirros |
  5. +--------------------------------------+--------+

[root@serverc ~]#  rados -p  images ls

  1. rbd_object_map.105f76fe073c.0000000000000004
  2. rbd_directory
  3. rbd_data.105f76fe073c.0000000000000001
  4. rbd_info
  5. rbd_id.79cfc319-f60a-45d4-834f-b70dc20c7975
  6. rbd_object_map.105f76fe073c
  7. rbd_data.105f76fe073c.0000000000000000
  8. rbd_header.105f76fe073c

[root@serverc ~]# rbd ls images

  1. 79cfc319-f60a-45d4-834f-b70dc20c7975

[root@serverc ~]# rbd info images/79cfc319-f60a-45d4-834f-b70dc20c7975

  1. rbd image '79cfc319-f60a-45d4-834f-b70dc20c7975':
  2. size kB in objects
  3. order ( kB objects)
  4. block_name_prefix: rbd_data.105f76fe073c
  5. format:
  6. features: layering, exclusive-lock, object-map, fast-diff, deep-flatten
  7. flags:
  8. create_timestamp: Sun Mar ::

[root@serverb ~(keystone_admin)]# nova flavor-list

  1. +----+-----------+-----------+------+-----------+------+-------+-------------+-----------+
  2. | ID | Name | Memory_MB | Disk | Ephemeral | Swap | VCPUs | RXTX_Factor | Is_Public |
  3. +----+-----------+-----------+------+-----------+------+-------+-------------+-----------+
  4. | | m1.tiny | | | | | | 1.0 | True |
  5. | | m1.small | | | | | | 1.0 | True |
  6. | | m1.medium | | | | | | 1.0 | True |
  7. | | m1.large | | | | | | 1.0 | True |
  8. | | m1.xlarge | | | | | | 1.0 | True |
  9. +----+-----------+-----------+------+-----------+------+-------+-------------+-----------+

1.8 使用上传的镜像创建一个nova虚拟机

# nova boot --flavor 1 --image <image id> <虚拟机名称>

[root@serverb ~(keystone_admin)]# nova boot --flavor  1 --image 3d80ba00-b4c7-4f3c-98b8-17d9fd140216 vm1

  1. +--------------------------------------+-----------------------------------------------+
  2. | Property | Value |
  3. +--------------------------------------+-----------------------------------------------+
  4. | OS-DCF:diskConfig | MANUAL |
  5. | OS-EXT-AZ:availability_zone | |
  6. | OS-EXT-SRV-ATTR:host | - |
  7. | OS-EXT-SRV-ATTR:hostname | vm1 |
  8. | OS-EXT-SRV-ATTR:hypervisor_hostname | - |
  9. | OS-EXT-SRV-ATTR:instance_name | |
  10. | OS-EXT-SRV-ATTR:kernel_id | |
  11. | OS-EXT-SRV-ATTR:launch_index | |
  12. | OS-EXT-SRV-ATTR:ramdisk_id | |
  13. | OS-EXT-SRV-ATTR:reservation_id | r-7ygb36rz |
  14. | OS-EXT-SRV-ATTR:root_device_name | - |
  15. | OS-EXT-SRV-ATTR:user_data | - |
  16. | OS-EXT-STS:power_state | |
  17. | OS-EXT-STS:task_state | scheduling |
  18. | OS-EXT-STS:vm_state | building |
  19. | OS-SRV-USG:launched_at | - |
  20. | OS-SRV-USG:terminated_at | - |
  21. | accessIPv4 | |
  22. | accessIPv6 | |
  23. | adminPass | 3j2dpZjCXZn8 |
  24. | config_drive | |
  25. | created | --29T12::30Z |
  26. | description | - |
  27. | flavor | m1.tiny () |
  28. | hostId | |
  29. | host_status | |
  30. | id | dec39eb4-75f5-47eb-b335-1e2b1833253d |
  31. | image | cirros (3d80ba00-b4c7-4f3c-98b8-17d9fd140216) |
  32. | key_name | - |
  33. | locked | False |
  34. | metadata | {} |
  35. | name | vm1 |
  36. | os-extended-volumes:volumes_attached | [] |
  37. | progress | |
  38. | security_groups | default |
  39. | status | BUILD |
  40. | tags | [] |
  41. | tenant_id | 79cf145d371e48ef96f608cbf85d1788 |
  42. | updated | --29T12::30Z |
  43. | user_id | 8e0be34493e04722ba03ab30fbbf3bf8 |
  44. +--------------------------------------+-----------------------------------------------+

[root@serverb ~(keystone_admin)]# nova list

  1. +--------------------------------------+------+--------+------------+-------------+----------------------------+
  2. | ID | Name | Status | Task State | Power State | Networks |
  3. +--------------------------------------+------+--------+------------+-------------+----------------------------+
  4. | dec39eb4-75f5-47eb-b335-1e2b1833253d | vm1 | ERROR | - | NOSTATE | novanetwork=192.168.32.255 |
  5. +--------------------------------------+------+--------+------------+-------------+----------------------------+

虚拟机状态错误,有与Openstack的配置需要完善,后续可以继续更新

二、 使用ceph作为cinder后端

[root@serverb ~(keystone_admin)]# cinder list

  1. +----+--------+------+------+-------------+----------+-------------+
  2. | ID | Status | Name | Size | Volume Type | Bootable | Attached to |
  3. +----+--------+------+------+-------------+----------+-------------+
  4. +----+--------+------+------+-------------+----------+-------------+

2.1 为cinder也创建一个rbd池

[root@serverc ~]# ceph osd pool create volumes 64 64

  1. pool 'volumes' created

[root@serverc ~]# ceph osd pool application enable volumes rbd

  1. enabled application 'rbd' on pool 'volumes'

2.2 为client.cinder用户授权

[root@serverc ~]# ceph auth get-or-create client.cinder mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=images, allow rwx pool=volumes' -o /etc/ceph/ceph.client.cinder.keyring

[root@serverc ~]# ceph auth get-key client.cinder -o /etc/ceph/temp.client.cinder.key

[root@serverc ~]# cat /etc/ceph/temp.client.cinder.key

  1. AQA5KaBcszk/JxAAPdymqbMzqrfhZ+GyqZgUvg==

[root@serverc ~]# scp -r /etc/ceph/ceph.client.glance.keyring /etc/ceph/ceph.client.cinder.keyring serverb:/etc/ceph/

  1. root@serverb's password:
  2.  
  3. ceph.client.glance.keyring % .6KB/s :
  4.  
  5. ceph.client.cinder.keyring % .5KB/s :

2.3 在cinder服务器上安装ceph的客户端

[root@serverb ~]# yum -y install ceph-commom

[root@serverb tmp]# chown cinder.cinder -R /etc/ceph/ceph.client.cinder.keyring

2.4 修改客户端/etc/ceph/ceph.conf

[root@serverb tmp]# vim /etc/ceph/ceph.conf

  1. [client.cinder]
  2. keyring = /etc/ceph/ceph.client.cinder.keyring

2.5  生成libvirt的secret

[root@serverb tmp(keystone_admin)]# uuidgen

  1. ade72e47-ce6f-4f44-a97d-d7dff6aef99c

[root@serverb tmp(keystone_admin)]# vim /etc/ceph/secret.xml

  1. <secret ephemeral="no" private="no">
  2. <uuid>ade72e47-ce6f-4f44-a97d-d7dff6aef99c</uuid>
  3. <usage type="ceph">
  4. <name>client.cinder secret</name>
  5. </usage>
  6. </secret>

[root@serverb tmp(keystone_admin)]# virsh secret-define --file /etc/ceph/secret.xml

  1. Secret ade72e47-ce6f-4f44-a97d-d7dff6aef99c created

[root@serverb tmp(keystone_admin)]#  virsh secret-list

  1. UUID Usage
  2. --------------------------------------------------------------------------------
  3. ade72e47-ce6f-4f44-a97d-d7dff6aef99c ceph client.cinder secret

2.6 将密钥设置到secret中

[root@serverb tmp(keystone_admin)]# virsh secret-set-value --secret ade72e47-ce6f-4f44-a97d-d7dff6aef99c --base64 $(cat /etc/ceph/temp.client.cinder.key)

  1. Secret value set

[root@serverb tmp(keystone_admin)]# virsh secret-list

  1. UUID Usage
  2. --------------------------------------------------------------------------------
  3. ade72e47-ce6f-4f44-a97d-d7dff6aef99c ceph client.cinder secret

[root@serverb tmp(keystone_admin)]# ceph -s --id cinder

  1. cluster:
  2. id: 70ec7a0b-7b4d-4c4d--3eb5ce3e8e50
  3. health: HEALTH_OK
  4.  
  5. services:
  6. mon: daemons, quorum serverc,serverd,servere
  7. mgr: servere(active), standbys: serverc, serverd
  8. osd: osds: up, in
  9.  
  10. data:
  11. pools: pools, pgs
  12. objects: objects, kB
  13. usage: MB used, GB / GB avail
  14. pgs: active+clean

2.7 修改 /etc/cinder/cinder.conf

[root@serverb tmp(keystone_admin)]#  vim /etc/cinder/cinder.conf

  1. [DEFALUT]
  2. enabled_backends = rbd2
  3. default_volume_type = rbd2
  4. glance_api_version =
  5.  
  6. [rbd2]
  7. volume_driver = cinder.volume.drivers.rbd.RBDDriver
  8. rbd_pool = volumes
  9. rbd_user = cinder
  10. rbd_ceph_conf = /etc/ceph/ceph.conf
  11. rbd_flatten_volume_from_snapshot = false
  12. rbd_secret_uuid = f50719e8-e5b7-404e-980a-c80254e4541c
  13. rbd_max_clone_depth =
  14. rbd_store_chunk_size =
  15. rados_connect_timeout = -
  16. # 指定volume_backend_name,可忽略
  17. #volume_backend_name = rbd2

2.8 创建指定的cinder的volume类型

[root@serverb tmp(keystone_admin)]# cinder type-create rbd2

  1. +--------------------------------------+------+-------------+-----------+
  2. | ID | Name | Description | Is_Public |
  3. +--------------------------------------+------+-------------+-----------+
  4. | c92590e9-33f8---945dc3eb4548 | rbd2 | - | True |
  5. +--------------------------------------+------+-------------+-----------+

[root@serverb tmp(keystone_admin)]#  cinder type-key rbd2 set volume_backend_name=rbd2

[root@serverb tmp(keystone_admin)]# systemctl restart openstack-cinder-volume

[root@serverb tmp(keystone_admin)]# systemctl restart openstack-cinder-api

[root@serverb tmp(keystone_admin)]# systemctl restart openstack-cinder-scheduler

2.9 验证

[root@serverb tmp(keystone_admin)]# cinder create --name new-volume --display-name 'ceph storage' 2 --volume_type rbd2

  1. +--------------------------------+--------------------------------------+
  2. | Property | Value |
  3. +--------------------------------+--------------------------------------+
  4. | attachments | [] |
  5. | availability_zone | nova |
  6. | bootable | false |
  7. | consistencygroup_id | None |
  8. | created_at | --31T03::34.000000 |
  9. | description | None |
  10. | encrypted | False |
  11. | id | 5aa151ad-978c-40b3-bca9-ead7c34358ff |
  12. | metadata | {} |
  13. | migration_status | None |
  14. | multiattach | False |
  15. | name | ceph storage |
  16. | os-vol-host-attr:host | None |
  17. | os-vol-mig-status-attr:migstat | None |
  18. | os-vol-mig-status-attr:name_id | None |
  19. | os-vol-tenant-attr:tenant_id | 79cf145d371e48ef96f608cbf85d1788 |
  20. | replication_status | disabled |
  21. | size | |
  22. | snapshot_id | None |
  23. | source_volid | None |
  24. | status | creating |
  25. | updated_at | None |
  26. | user_id | 8e0be34493e04722ba03ab30fbbf3bf8 |
  27. | volume_type | rbd2 |
  28. +--------------------------------+--------------------------------------+

[root@serverb tmp(keystone_admin)]# cinder list

  1. +--------------------------------------+-----------+--------------+------+-------------+----------+-------------+
  2. | ID | Status | Name | Size | Volume Type | Bootable | Attached to |
  3. +--------------------------------------+-----------+--------------+------+-------------+----------+-------------+
  4. | 5aa151ad-978c-40b3-bca9-ead7c34358ff | available | ceph storage | | rbd2 | false | |
  5. +--------------------------------------+-----------+--------------+------+-------------+----------+-------------+

三、使用keystone验证rados网关

3.1 配置rados服务器端

[root@serverc ~]# yum install -y ceph-radosgw

[root@serverc ~]# ceph auth get-or-create client.rgw.serverc mon 'allow rwx' osd 'allow rwx' -o /etc/ceph/ceph.client.rgw.serverc.keyring

[root@serverc ~]# vim /etc/ceph/ceph.conf

[root@serverc ~]# systemctl restart ceph-radosgw@rgw.serverc

[root@serverc ~]# ps -ef|grep rados

  1. root : ? :: /usr/bin/radosgw -f --cluster ceph --name client.rgw.serverc --setuser ceph --setgroup ceph

3.2  在keystone上创建服务与端点

[root@serverb tmp(keystone_admin)]# openstack service create --description "Swift Service" --name swift object-store

  1. +-------------+----------------------------------+
  2. | Field | Value |
  3. +-------------+----------------------------------+
  4. | description | Swift Service |
  5. | enabled | True |
  6. | id | 1dd0d40cd61d4bed870cc3c302a001da |
  7. | name | swift |
  8. | type | object-store |
  9. +-------------+----------------------------------+

[root@serverb tmp(keystone_admin)]# openstack endpoint create --region RegionOne --publicurl "http://serverc.lab.example.com/swift/v1" --adminurl "http://serverc.lab.example.com/swift/v1" --internalurl "http://serverc.lab.example.com/swift/v1" swift

  1. +--------------+-----------------------------------------+
  2. | Field | Value |
  3. +--------------+-----------------------------------------+
  4. | adminurl | http://serverc.lab.example.com/swift/v1 |
  5. | id | 47f906c29a904571a44dcd99ea27561c |
  6. | internalurl | http://serverc.lab.example.com/swift/v1 |
  7. | publicurl | http://serverc.lab.example.com/swift/v1 |
  8. | region | RegionOne |
  9. | service_id | 1dd0d40cd61d4bed870cc3c302a001da |
  10. | service_name | swift |
  11. | service_type | object-store |
  12. +--------------+-----------------------------------------+

[root@serverb tmp(keystone_admin)]# openstack service list

  1. +----------------------------------+----------+--------------+
  2. | ID | Name | Type |
  3. +----------------------------------+----------+--------------+
  4. | 1dd0d40cd61d4bed870cc3c302a001da | swift | object-store |
  5. | 26a3d56178cd4da2bca93e775ce4efac | cinderv3 | volumev3 |
  6. | 834ee6fe73b2425fb5bb667ccdfdf6a7 | cinderv2 | volumev2 |
  7. | 9581f6be4b4e4112bdb8d1cb8ef2794b | keystone | identity |
  8. | a43b4be139364c4fbf9555e12eeabfed | glance | image |
  9. | a63dad7778b744bfbc263dd73caf0fdb | cinder | volume |
  10. | f3f2b987cdc14d7996bacbd13d3301e1 | nova | compute |
  11. +----------------------------------+----------+--------------+

[root@serverb tmp(keystone_admin)]# openstack service show swift

  1. +-------------+----------------------------------+
  2. | Field | Value |
  3. +-------------+----------------------------------+
  4. | description | Swift Service |
  5. | enabled | True |
  6. | id | 1dd0d40cd61d4bed870cc3c302a001da |
  7. | name | swift |
  8. | type | object-store |
  9. +-------------+----------------------------------+

[root@serverb tmp(keystone_admin)]# openstack endpoint  show swift

  1. +--------------+-----------------------------------------+
  2. | Field | Value |
  3. +--------------+-----------------------------------------+
  4. | adminurl | http://serverc.lab.example.com/swift/v1 |
  5. | enabled | True |
  6. | id | 47f906c29a904571a44dcd99ea27561c |
  7. | internalurl | http://serverc.lab.example.com/swift/v1 |
  8. | publicurl | http://serverc.lab.example.com/swift/v1 |
  9. | region | RegionOne |
  10. | service_id | 1dd0d40cd61d4bed870cc3c302a001da |
  11. | service_name | swift |
  12. | service_type | object-store |
  13. +--------------+-----------------------------------------+

3.3 获取keystone admin token

[root@serverb tmp(keystone_admin)]# cat /etc/keystone/keystone.conf |grep admin_token

  1. # value is ignored and the `admin_token` middleware is effectively disabled.
  2. # However, to completely disable `admin_token` in production (highly
  3. # `AdminTokenAuthMiddleware` (the `admin_token_auth` filter) from your paste
  4. #admin_token = <None>
  5. admin_token = fb032ccf285a432b81c6fe347be8a07d

3.4 修改  /etc/ceph/ceph.conf

[root@serverc ~]# vim /etc/ceph/ceph.conf

  1. [client.rgw.serverc]
  2. host = serverc
  3. keyring = /etc/ceph/ceph.client.rgw.serverc.keyring
  4. rgw_frontends = civetweb port= num_threads=
  5. log = /var/log/ceph/$cluster.$name.log
  6. rgw_dns_name = serverc.lab.example.com
  7.  
  8. rgw_keystone_url = http://serverb.lab.example.com:5000
  9. rgw_keystone_admin_token = fb032ccf285a432b81c6fe347be8a07d
  10. rgw_keystone_accepted_roles = admin member swiftoperator
  11. rgw_keystone_token_cache_size =
  12. rgw_keystone_revocation_interval =
  13. rgw_keystone_verify_ssl = false

[root@serverc ~]# systemctl restart ceph-radosgw@rgw.serverc

[root@serverc ~]# ps -ef|grep rados

  1. ceph : ? :: /usr/bin/radosgw -f --cluster ceph --name client.rgw.serverc --setuser ceph --setgroup ceph

[root@serverb tmp(keystone_admin)]# ps -ef |grep keystone

  1. keystone : ? :: keystone-admin -DFOREGROUND
  2. keystone : ? :: keystone-admin -DFOREGROUND
  3. keystone : ? :: keystone-main -DFOREGROUND
  4. keystone : ? :: keystone-main -DFOREGROUND

[root@serverb tmp(keystone_admin)]# netstat -ntlp |grep 987

  1. tcp6 ::: :::* LISTEN /httpd
  2. tcp6 ::: :::* LISTEN /httpd
  3. tcp6 ::: :::* LISTEN /httpd

3.5 客户端验证

[root@serverb tmp(keystone_admin)]# swift list

[root@serverb tmp(keystone_admin)]# swift post testbucket

[root@serverb tmp(keystone_admin)]# swift list

  1. testbucket

[root@serverc ~]# ceph osd pool ls

  1. images
  2. volumes
  3. .rgw.root
  4. default.rgw.control
  5. default.rgw.meta
  6. default.rgw.log
  7. default.rgw.buckets.index

[root@serverc ~]#  rados -p  default.rgw.buckets.index ls

  1. .dir.ce5b2073-728f-42d5-8fac-b2e0aa2a41a3.4333.1

[root@serverb tmp(keystone_admin)]# swift upload testbucket  /etc/ceph/secret.xml

  1. etc/ceph/secret.xml

[root@serverc ~]#  rados -p  default.rgw.buckets.data ls

  1. ce5b2073-728f-42d5-8fac-b2e0aa2a41a3..1_etc/ceph/secret.xml

博主声明:本文的内容来源主要来自誉天教育晏威老师,由本人实验完成操作验证,需要的博友请联系誉天教育(http://www.yutianedu.com/),获得官方同意或者晏老师(https://www.cnblogs.com/breezey/)本人同意即可转载,谢谢!

020 ceph作openstack的后端存储的更多相关文章

  1. 使用 ceph 作为 openstack 的后端

    openstack 与 ceph 集成 在 ceph 上创建 openstack 需要的 pool. sudo ceph osd pool create volumes 128 sudo ceph o ...

  2. 配置Ceph集群为OpenStack后端存储

    配置Ceph存储为OpenStack的后端存储 1  前期配置 Ceph官网提供的配置Ceph块存储为OpenStack后端存储的文档说明链接地址:http://docs.ceph.com/docs/ ...

  3. k8s使用ceph的rbd作后端存储

    k8s使用rbd作后端存储 k8s里的存储方式主要有三种.分别是volume.persistent volumes和dynamic volume provisioning. volume: 就是直接挂 ...

  4. Openstack_后端存储平台Ceph

    框架图 介绍 一种为优秀的性能.可靠性和可扩展性而设计的统一的.分布式文件系统 特点 CRUSH算法 Crush算法是ceph的两大创新之一,简单来说,ceph摒弃了传统的集中式存储元数据寻址的方案, ...

  5. OpenStack Cinder 与各种后端存储技术的集成叙述与实践

    先说下下loop设备 loop设备及losetup命令介绍 1. loop设备介绍 在类 UNIX 系统里,loop 设备是一种伪设备(pseudo-device),或者也可以说是仿真设备.它能使我们 ...

  6. 配置cinder-backup服务使用ceph作为后端存储

    在ceph监视器上执行 CINDER_PASSWD='cinder1234!'controllerHost='controller'RABBIT_PASSWD='0penstackRMQ' 1.创建p ...

  7. 配置cinder-volume服务使用ceph作为后端存储

    在ceph监视器上执行 CINDER_PASSWD='cinder1234!'controllerHost='controller'RABBIT_PASSWD='0penstackRMQ' 1.创建p ...

  8. Openstack入门篇(十八)之Cinder服务-->使用NFS作为后端存储

    1.安装cinder-volume组件以及nfs [root@linux-node2 ~]# yum install -y openstack-cinder python-keystone [root ...

  9. Openstack使用NFS作为后端存储

    续:Openstack块存储cinder安装配置 接上使用ISCSI作为后端存储,使用NFS作为后端存储配置 参考官方文档:https://wiki.openstack.org/wiki/How_to ...

随机推荐

  1. 使用css制作三角

    1. 字符实现三角效果关于字符实现三角我早在09年的时候就介绍了:使用字符实现兼容性的圆角尖角效果.一转眼两年过去了,这个技术开始被越来越多的人所熟知.使用的字符是正棱形“◆”字符,编码表示为◆ . ...

  2. 薪资管理系统(Java面向对象思想)

    package com.test3; import java.util.*; import java.io.*; /** * @author qingfeng * 重要思想:面向对象思想(添加员工管理 ...

  3. @bzoj - 4378@ [POI2015] Logistyka

    目录 @description@ @solution@ @accepted code@ @details@ @description@ 维护一个长度为 n 的序列,一开始都是 0,支持以下两种操作: ...

  4. 初识block

    我们可以把Block当做Objective-C的匿名函数.Block允许开发者在两个对象之间将任意的语句当做数据进行传递,往往这要比引用定义在别处的函数直观.另外,block的实现具有封闭性(clos ...

  5. UTF-8与UTF-8 BOM

    在我们通常使用的windows系统中,我发现了一个有趣的现象.我新建一个空的文本文档,点击文件-另存为-编码选择UTF-8,然后保存.此时这个文件明明是空的,却占了3字节大小.原因在于:此时保存的编码 ...

  6. 洛谷 3177 [HAOI2015] 树上染色

    题目描述 有一棵点数为 N 的树,树边有边权.给你一个在 0~ N 之内的正整数 K ,你要在这棵树中选择 K个点,将其染成黑色,并将其他 的N-K个点染成白色 . 将所有点染色后,你会获得黑点两两之 ...

  7. git之本地仓库关联远程仓库

    首先新建一个github respository 然后在自己本地新建一个maven项目,里面写点东西 如下图,将自己的项目所在地设置为本地git仓库 将本地仓库与远程关联,首先获取远程仓库的地址,点击 ...

  8. Pytorch的LSTM的理解

    class torch.nn.LSTM(*args, **kwargs) 参数列表 input_size:x的特征维度 hidden_size:隐藏层的特征维度 num_layers:lstm隐层的层 ...

  9. webpack学习(二)初识打包配置

    前言:webpack打包工具让整个项目的不同文件夹相互关联,遵循我们想要的规则.想 .vue文件, .scss文件浏览器并不认识,因此webpage暗中做了很多转译,编译等工作. 事实上,如果我们在没 ...

  10. vue用法父组件调用子组件方法--->$refs

    vue下载excel模板(放入弹框独立出来)后再导入表格 子组件 <el-dialog title="导入" :visible.sync="dialogVisibl ...