ceph osd pool create volumes 128 ceph osd pool create images 128 ceph osd pool create backups 128 ceph osd pool create vms 128
ssh {your-openstack-server} sudo tee /etc/ceph/ceph.conf </etc/ceph/ceph.conf
$ceph auth get-or-create client.cinder mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=volumes, allow rwx pool=vms, allow rx pool=images' $ ceph auth get-or-create client.glance mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=images' $ ceph auth get-or-create client.cinder-backup mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=backups'
ceph auth get-or-create client.glance | ssh {your-glance-api-server} sudo tee /etc/ceph/ceph.client.glance.keyring ssh {your-glance-api-server} sudo chown glance:glance /etc/ceph/ceph.client.glance.keyring ceph auth get-or-create client.cinder | ssh {your-volume-server} sudo tee /etc/ceph/ceph.client.cinder.keyring ssh {your-cinder-volume-server} sudo chown cinder:cinder /etc/ceph/ceph.client.cinder.keyring ceph auth get-or-create client.cinder-backup | ssh {your-cinder-backup-server} sudo tee /etc/ceph/ceph.client.cinder-backup.keyring ssh {your-cinder-backup-server} sudo chown cinder:cinder /etc/ceph/ceph.client.cinder-backup.keyring
ceph auth get-or-create client.cinder | ssh {your-nova-compute-server} sudo tee /etc/ceph/ceph.client.cinder.keyring
ceph auth get-key client.cinder | ssh {your-compute-node} tee client.cinder.key
$ uuidgen 22003ebb-0f32-400e-9584-fa90b6efd874 cat > secret.xml <<EOF <secret ephemeral='no' private='no'> <uuid>22003ebb-0f32-400e-9584-fa90b6efd874</uuid> <usage type='ceph'> <name>client.cinder secret</name> </usage> </secret> EOF # virsh secret-define --file secret.xml #Secret 22003ebb-0f32-400e-9584-fa90b6efd874 created # virsh secret-set-value --secret 22003ebb-0f32-400e-9584-fa90b6efd874 --base64 $(cat client.cinder.key) && rm client.cinder.key secret.xml Secret value set
[DEFAULT] ... default_store = rbd ... [glance_store] stores = rbd rbd_store_pool = images rbd_store_user = glance rbd_store_ceph_conf = /etc/ceph/ceph.conf rbd_store_chunk_size = 8
[DEFAULT] ... enabled_backends = ceph ... [ceph] volume_driver = cinder.volume.drivers.rbd.RBDDriver rbd_pool = volumes rbd_ceph_conf = /etc/ceph/ceph.conf rbd_flatten_volume_from_snapshot = false rbd_max_clone_depth = 5 rbd_store_chunk_size = 4 rados_connect_timeout = -1 glance_api_version = 2
[ceph] ... rbd_user = cinder rbd_secret_uuid = 22003ebb-0f32-400e-9584-fa90b6efd874
Configuration option = Default value | Description |
---|---|
[DEFAULT] | |
rados_connect_timeout = -1 |
(IntOpt) Timeout value (in seconds) used when connecting to ceph cluster. If value < 0, no timeout is set and default librados value is used. |
rados_connection_interval = 5 |
(IntOpt) Interval value (in seconds) between connection retries to ceph cluster. |
rados_connection_retries = 3 |
(IntOpt) Number of retries if connection to ceph cluster failed. |
rbd_ceph_conf = |
(StrOpt) Path to the ceph configuration file |
rbd_cluster_name = ceph |
(StrOpt) The name of ceph cluster |
rbd_flatten_volume_from_snapshot = False |
(BoolOpt) Flatten volumes created from snapshots to remove dependency from volume to snapshot |
rbd_max_clone_depth = 5 |
(IntOpt) Maximum number of nested volume clones that are taken before a flatten occurs. Set to 0 to disable cloning. |
rbd_pool = rbd |
(StrOpt) The RADOS pool where rbd volumes are stored |
rbd_secret_uuid = None |
(StrOpt) The libvirt uuid of the secret for the rbd_user volumes |
rbd_store_chunk_size = 4 |
(IntOpt) Volumes will be chunked into objects of this size (in megabytes). |
rbd_user = None |
(StrOpt) The RADOS client name for accessing rbd volumes - only set when using cephx authentication |
volume_tmp_dir = None |
(StrOpt) Directory where temporary image files are stored when the volume driver does not write them directly to the volume. Warning: this option is now deprecated, please use image_conversion_dir instead. |
backup_driver = cinder.backup.drivers.ceph backup_ceph_conf = /etc/ceph/ceph.conf backup_ceph_user = cinder-backup backup_ceph_chunk_size = 134217728 backup_ceph_pool = backups backup_ceph_stripe_unit = 0 backup_ceph_stripe_count = 0 restore_discard_excess_bytes = true
To enable the Ceph backup driver, include the following option in the cinder.conf
file:
backup_driver = cinder.backup.drivers.ceph
The following configuration options are available for the Ceph backup driver.
Configuration option = Default value | Description |
---|---|
[DEFAULT] | |
backup_ceph_chunk_size = 134217728 |
(IntOpt) The chunk size, in bytes, that a backup is broken into before transfer to the Ceph object store. |
backup_ceph_conf = /etc/ceph/ceph.conf |
(StrOpt) Ceph configuration file to use. |
backup_ceph_pool = backups |
(StrOpt) The Ceph pool where volume backups are stored. |
backup_ceph_stripe_count = 0 |
(IntOpt) RBD stripe count to use when creating a backup image. |
backup_ceph_stripe_unit = 0 |
(IntOpt) RBD stripe unit to use when creating a backup image. |
backup_ceph_user = cinder |
(StrOpt) The Ceph user to connect with. Default here is to use the same user as for Cinder volumes. If not using cephx this should be set to None. |
restore_discard_excess_bytes = True |
(BoolOpt) If True, always discard excess bytes when restoring volumes i.e. pad with zeroes. |
This example shows the default options for the Ceph backup driver.
backup_ceph_conf=/etc/ceph/ceph.conf backup_ceph_user = cinder backup_ceph_chunk_size = 134217728 backup_ceph_pool = backups backup_ceph_stripe_unit = 0 backup_ceph_stripe_count = 0
[client] rbd cache = true rbd cache writethrough until flush = true admin socket = /var/run/ceph/guests/$cluster-$type.$id.$pid.$cctid.asok log file = /var/log/qemu/qemu-guest-$pid.log rbd concurrent management ops = 20
mkdir -p /var/run/ceph/guests/ /var/log/qemu/ chown qemu:libvirt /var/run/ceph/guests /var/log/qemu/
[libvirt] images_type = rbd images_rbd_pool = vms images_rbd_ceph_conf = /etc/ceph/ceph.conf rbd_user = cinder rbd_secret_uuid = 22003ebb-0f32-400e-9584-fa90b6efd874 disk_cachemodes="network=writeback"
inject_password = false inject_key = false inject_partition = -2
live_migration_flag="VIR_MIGRATE_UNDEFINE_SOURCE,VIR_MIGRATE_PEER2PEER,VIR_MIGRATE_LIVE,VIR_MIGRATE_PERSIST_DEST,VIR_MIGRATE_TUNNELLED"
sudo service openstack-glance-api restart sudo service openstack-nova-compute restart sudo service openstack-cinder-volume restart sudo service openstack-cinder-backup restart
在 CentOS 7.1 上安装分布式存储系统 Ceph http://www.linuxidc.com/Linux/2015-08/120990.htm
Ceph环境配置文档 PDF http://www.linuxidc.com/Linux/2013-05/85212.htm
CentOS 6.3上部署Ceph http://www.linuxidc.com/Linux/2013-05/85213.htm
Ceph的安装过程 http://www.linuxidc.com/Linux/2013-05/85210.htm
HOWTO Install Ceph On FC12, FC上安装Ceph分布式文件系统 http://www.linuxidc.com/Linux/2013-05/85209.htm
Ceph 文件系统安装 http://www.linuxidc.com/Linux/2013-05/85208.htm
CentOS 6.2 64位上安装Ceph 0.47.2 http://www.linuxidc.com/Linux/2013-05/85206.htm
Ubuntu 12.04 Ceph分布式文件系统 http://www.linuxidc.com/Linux/2013-04/82588.htm
Ubuntu 16.04快速安装Ceph集群 http://www.linuxidc.com/Linux/2016-09/135261.htm
Ceph 的详细介绍:请点这里
Ceph 的下载地址:请点这里
本文永久更新链接地址:http://www.linuxidc.com/Linux/2016-11/137095.htm