DevStack集成Ceph块存储适配麒麟V10

2021-09-02   


集成过程中,需要在OpenStack的controller节点和compute节点安装Ceph软件包,作为Ceph客户端。环境所需的rpm包点击下载链接地址提取码:e6c9

1.在所有节点上做如下操作

#修改系统自带的yum源,在最后增加ceph的安装源
[root@compute1 ~]# vim /etc/yum.repos.d/kylin_aarch64.repo
[ceph]
name = ceph
baseurl = http://update.cs2c.com.cn:8080/NS/V10/V10SP2/os/adv/lic/base/aarch64/
enabled = 1
gpgcheck = 0

#更新yum源,并安装好python2、3-rdb python2、3-rados python2、3-prettytable
[root@compute1 ~]# yum makecache

#将rpm.tar.gz通过远程软件放入/root目录下,并安装好软件
[root@compute1 ~]# tar -xf rpm.tar.gz
[root@compute1 ~]# cd rpm
[root@compute1 rpm]# yum -y install ./*

#设置控制节点、计算节点和ceph节点之前的本地解析
[root@compute1 rpm]# vim /etc/hosts
173.9.1.11      ceph1
173.9.1.12      ceph2
173.9.1.14      ceph3
173.9.1.15      controller1     con1
173.9.1.16      controller2     con2
173.9.1.17      controller3     con3
173.9.1.18      compute1        com1
173.9.1.19      compute2        com2
173.9.1.20      compute3        com3

#设置ssh的时候不用敲yes
[root@compute1 rpm]# vim /etc/ssh/ssh_config
#将第33号改成
StrictHostKeyChecking no

#传递秘钥设置免密登录,由于之前做过控制节点和计算节点之间的免密,这里就只做与ceph端的
[root@compute1 rpm]# for i in 11 12 14;do ssh-copy-id 173.9.1.$i;done

#创建glance用户和cinder用户,计算节点只需要添加cinder用户
[root@compute1 rpm]# useradd glance
[root@compute1 rpm]# useradd cinder

2.在ceph集群1上进行操作

#配置ceph环境,在/etc/hosts后面添加
[root@ceph1 ~]# vim /etc/hosts
173.9.1.16  controller2	con2
173.9.1.17  controller3 con3
173.9.1.18  compute1            com1
173.9.1.19  comoute2            com2
173.9.1.20  comoute3            com3

#设置ssh的时候不用敲yes
[root@ceph1 ceph]# vim /etc/ssh/ssh_config
#将第33号改成
StrictHostKeyChecking no

#重启sshd服务并传递秘钥
[root@ceph1 ceph]# systemctl restart sshd
[root@ceph1 ceph]# for i in {16..20};do ssh-copy-id root@173.9.1.$i;done

#在ceph1创建所需的存储池,可以修改PG数目
[root@ceph1 ~]# ceph osd pool create volumes 32
[root@ceph1 ~]# ceph osd pool create images 32
[root@ceph1 ~]# ceph osd pool create backups 32
[root@ceph1 ~]# ceph osd pool create vms 32
[root@ceph1 ~]# ceph osd pool ls
volumes
compute
backups
vms
images

#向所有节点同步ceph.conf配置文件
[root@ceph1 ~]# cd /etc/ceph
[root@ceph1 ceph]# ceph-deploy --overwrite-conf admin ceph1 con2 con3 com1 com2 com3

#在ceph1上为cinder、glance、cinder-backup用户创建keyring
[root@ceph1 ceph]# ceph auth get-or-create client.cinder mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=volumes, allow rwx pool=vms, allow rx pool=images'
[root@ceph1 ceph]# ceph auth get-or-create client.glance mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=images'
[root@ceph1 ceph]# ceph auth get-or-create client.cinder-backup mon 'profile rbd' osd 'profile rbd pool=backups'

#将keyring分发到对应的节点,并授权允许其访问Ceph存储池
[root@ceph1 ceph]# ceph auth get-or-create client.glance | ssh controller2 tee /etc/ceph/ceph.client.glance.keyring
[root@ceph1 ceph]# ssh con2 chown glance:glance /etc/ceph/ceph.client.glance.keyring
[root@ceph1 ceph]# ceph auth get-or-create client.glance | ssh controller3 tee /etc/ceph/ceph.client.glance.keyring
[root@ceph1 ceph]# ssh con3 chown glance:glance /etc/ceph/ceph.client.glance.keyring
[root@ceph1 ceph]# ceph auth get-or-create client.cinder | ssh con2 tee /etc/ceph/ceph.client.cinder.keyring
[root@ceph1 ceph]# ssh con2 chown cinder:cinder /etc/ceph/ceph.client.cinder.keyring
[root@ceph1 ceph]# ceph auth get-or-create client.cinder | ssh con3 tee /etc/ceph/ceph.client.cinder.keyring
[root@ceph1 ceph]# ssh con3 chown cinder:cinder /etc/ceph/ceph.client.cinder.keyring
[root@ceph1 ceph]# ceph auth get-or-create client.cinder | ssh com1 tee /etc/ceph/ceph.client.cinder.keyring
[root@ceph1 ceph]# ssh com1 chown cinder:cinder /etc/ceph/ceph.client.cinder.keyring
[root@ceph1 ceph]# ceph auth get-or-create client.cinder | ssh com2 tee /etc/ceph/ceph.client.cinder.keyring
[root@ceph1 ceph]# ssh com2 chown cinder:cinder /etc/ceph/ceph.client.cinder.keyring
[root@ceph1 ceph]# ceph auth get-or-create client.cinder | ssh com3 tee /etc/ceph/ceph.client.cinder.keyring
[root@ceph1 ceph]# ssh com3 chown cinder:cinder /etc/ceph/ceph.client.cinder.keyring
[root@ceph1 ceph]# ceph auth get-or-create client.cinder-backup | ssh con2 tee /etc/ceph/ceph.client.cinder-backup.keyring
[root@ceph1 ceph]# ssh con2 chown cinder:cinder /etc/ceph/ceph.client.cinder-backup.keyring
[root@ceph1 ceph]# ceph auth get-or-create client.cinder-backup | ssh con3 tee /etc/ceph/ceph.client.cinder-backup.keyring
[root@ceph1 ceph]# ssh con3 chown cinder:cinder /etc/ceph/ceph.client.cinder-backup.keyring
[root@ceph1 ceph]# ceph auth get-or-create client.cinder-backup | ssh com1 tee /etc/ceph/ceph.client.cinder-backup.keyring
[root@ceph1 ceph]# ssh com1 chown cinder:cinder /etc/ceph/ceph.client.cinder-backup.keyring
[root@ceph1 ceph]# ceph auth get-or-create client.cinder-backup | ssh com2 tee /etc/ceph/ceph.client.cinder-backup.keyring
[root@ceph1 ceph]# ssh com2 chown cinder:cinder /etc/ceph/ceph.client.cinder-backup.keyring
[root@ceph1 ceph]# ceph auth get-or-create client.cinder-backup | ssh com3 tee /etc/ceph/ceph.client.cinder-backup.keyring
[root@ceph1 ceph]# ssh com3 chown cinder:cinder /etc/ceph/ceph.client.cinder-backup.keyring
[root@ceph1 ceph]# ceph auth get-key client.cinder | ssh con2 tee client.cinder.key
[root@ceph1 ceph]# ceph auth get-key client.cinder | ssh con3 tee client.cinder.key
[root@ceph1 ceph]# ceph auth get-key client.cinder | ssh com1 tee client.cinder.key
[root@ceph1 ceph]# ceph auth get-key client.cinder | ssh com2 tee client.cinder.key
[root@ceph1 ceph]# ceph auth get-key client.cinder | ssh com3 tee client.cinder.key

3.在计算节点compute1做如下操作

#查看keyring是否同步过来,权限是否正确
[root@compute1 ~]# cd /etc/ceph
[root@compute1 ceph]# ll -d

#在计算节点(compute)上向libvirt添加秘钥,放在/etc/ceph/目录下(在什么目录没有影响,放到/etc/ceph目录方便管理)
[root@compute1 ceph]# UUID=$(uuidgen)
[root@compute1 ceph]# cat > secret.xml <<EOF
<secret ephemeral='no' private='no'>
<uuid>${UUID}</uuid>
<usage type='ceph'>
<name>client.cinder secret</name>
</usage>
</secret>
EOF

#记录一下uuid的值,后面的配置文件需要用到
[root@compute1 ceph]# echo ${UUID}
e32b9064-f168-4090-8e85-cb21ad7ace77

[root@compute1 ceph]# virsh secret-define --file secret.xml
[root@compute1 ceph]# virsh secret-set-value --secret e32b9064-f168-4090-8e85-cb21ad7ace77 --base64 $(cat /etc/ceph/ceph.client.cinder.keyring | grep key | awk -F ' ' '{ print $3 }')
#如果出现这个错误:Passing secret value as command-line argument is insecure!
#是警告用户“传递秘密值作为命令行参数是不安全的!”不是报错,可以忽略

#在计算节点配置Cinder集成Ceph
[root@compute1 ceph]# vim /etc/cinder/cinder.conf
#注释第23行和24行,在第35行后增加
default_volume_type = ceph
enabled_backends = ceph

[ceph]
volume_driver = cinder.volume.drivers.rbd.RBDDriver
volume_backend_name = ceph
rbd_pool = volumes
rbd_ceph_conf = /etc/ceph/ceph.conf
rbd_flatten_volume_from_snapshot = false
rbd_max_clone_depth = 5
rbd_store_chunk_size = 4
rados_connect_timeout = -1
glance_api_version = 2
rbd_user = cinder
rbd_secret_uuid = e32b9064-f168-4090-8e85-cb21ad7ace77

#在计算节点配置Nova集成Ceph
[root@compute1 ceph]# vim /etc/nova/nova.conf
#在第26行后面添加
live_migration_flag="VIR_MIGRATE_UNDEFINE_SOURCE,VIR_MIGRATE_PEER2PEER,VIR_MIGRATE_LIVE,VIR_MIGRATE_PERSIST_DEST,VIR_MIGRATE_TUNNELLED"

[libvirt]
...
#在第94行后面添加
images_type = rbd
images_rbd_pool = vms
images_rbd_ceph_conf = /etc/ceph/ceph.conf
disk_cachemodes="network=writeback"
rbd_user = cinder
rbd_secret_uuid = e32b9064-f168-4090-8e85-cb21ad7ace77

#重启devstack所有服务
[root@compute1 ceph]# systemctl restart devstack@*

4.在计算节点compute2和compute3上进行操作

#同步com1的secret.xml
[root@compute2 ~]# rsync -az com1:/etc/ceph/secret.xml /etc/ceph/

#查看keyring是否同步过来和权限是否正确
[root@compute2 ~]#ll -d /etc/ceph/

[root@compute2 ~]# virsh secret-define --file /etc/ceph/secret.xml
[root@compute1 rpm]# virsh secret-set-value --secret e32b9064-f168-4090-8e85-cb21ad7ace77 --base64 $(cat /etc/ceph/ceph.client.cinder.keyring | grep key | awk -F ' ' '{ print $3 }')

#在计算节点配置Cinder集成Ceph
[root@compute2 ~]# vim /etc/cinder/cinder.conf
#注释第23行和24行,在第35行后增加
default_volume_type = ceph
enabled_backends = ceph

[ceph]
volume_driver = cinder.volume.drivers.rbd.RBDDriver
volume_backend_name = ceph
rbd_pool = volumes
rbd_ceph_conf = /etc/ceph/ceph.conf
rbd_flatten_volume_from_snapshot = false
rbd_max_clone_depth = 5
rbd_store_chunk_size = 4
rados_connect_timeout = -1
glance_api_version = 2
rbd_user = cinder
rbd_secret_uuid = e32b9064-f168-4090-8e85-cb21ad7ace77

#在计算节点配置Nova集成Ceph
[root@compute2 rpm]# vim /etc/nova/nova.conf
#在第26行后面添加
live_migration_flag="VIR_MIGRATE_UNDEFINE_SOURCE,VIR_MIGRATE_PEER2PEER,VIR_MIGRATE_LIVE,VIR_MIGRATE_PERSIST_DEST,VIR_MIGRATE_TUNNELLED"

[libvirt]
...
#在第94行后面添加
images_type = rbd
images_rbd_pool = vms
images_rbd_ceph_conf = /etc/ceph/ceph.conf
disk_cachemodes="network=writeback"
rbd_user = cinder
rbd_secret_uuid = e32b9064-f168-4090-8e85-cb21ad7ace77

#重启devstack所有服务
[root@compute2 ~]# systemctl restart devstack@*

5.在控制节点controller2和controller3上进行操作

#同步com1的secret.xml
[root@controller2 ~]# rsync -az com1:/etc/ceph/secret.xml /etc/ceph/

#查看keyring是否同步过来和权限是否正确
[root@controller2 ~]# ll -d /etc/ceph/

[root@controller2 ~]# virsh secret-define --file /etc/ceph/secret.xml
[root@controller2 ~]# virsh secret-set-value --secret e32b9064-f168-4090-8e85-cb21ad7ace77 --base64 $(cat /etc/ceph/ceph.client.cinder.keyring | grep key | awk -F ' ' '{ print $3 }')

#在控制节点配置Glance集成Ceph
#这一步在控制节点controller3上就直接使用rsync -az --delete con2:/etc/glance/glance-api.conf /etc/glance/进行同步就好
[root@controller2 ~]# vim /etc/glance/glance-api.conf
[DEFAULT]
...
#在第11行后面添加以下内容,如果有enable COW cloning of images就注释掉
#enable COW cloning of images
show_image_direct_url = True

[glance_store]
stores = rbd
default_store = rbd
rbd_store_pool = images
rbd_store_user = glance
rbd_store_ceph_conf = /etc/ceph/ceph.conf
rbd_store_chunk_size = 8

[paste_deploy]
flavor = keystone

#在控制节点配置Cinder集成Ceph
[root@controller2 rpm]# vim /etc/cinder/cinder.conf
[DEFAULT]
...
#注释第25行和26行,在第35行后增加
default_volume_type = ceph
enabled_backends = ceph

[ceph]
volume_driver = cinder.volume.drivers.rbd.RBDDriver
volume_backend_name = ceph
rbd_pool = volumes
rbd_ceph_conf = /etc/ceph/ceph.conf
rbd_flatten_volume_from_snapshot = false
rbd_max_clone_depth = 5
rbd_store_chunk_size = 4
rados_connect_timeout = -1
glance_api_version = 2
rbd_user = cinder
rbd_secret_uuid = e32b9064-f168-4090-8e85-cb21ad7ace77

#配置Nova集成Ceph
[root@controller2 rpm]# vim /etc/nova/nova.conf
#在第27行后面添加
live_migration_flag="VIR_MIGRATE_UNDEFINE_SOURCE,VIR_MIGRATE_PEER2PEER,VIR_MIGRATE_LIVE,VIR_MIGRATE_PERSIST_DEST,VIR_MIGRATE_TUNNELLED"

[libvirt]
...
#在第109行后面添加
images_type = rbd
images_rbd_pool = vms
images_rbd_ceph_conf = /etc/ceph/ceph.conf
disk_cachemodes="network=writeback"
rbd_user = cinder
rbd_secret_uuid = e32b9064-f168-4090-8e85-cb21ad7ace77

#重启devstack所有服务
root@controller2 rpm]# systemctl restart devstack@*

6.验证各节点的openstack是否集成ceph

[root@controller2 ~]# source /home/stack/devstack/openrc admin admin
WARNING: setting legacy OS_TENANT_NAME to support cli tools.
[root@controller2 ~]# virsh secret-list
 UUID                                   用量
-------------------------------------------------------------------
 e32b9064-f168-4090-8e85-cb21ad7ace77   ceph client.cinder secret

[root@controller2 ~]# cinder service-list
+------------------+-------------------------+------+---------+-------+----------------------------+-----------------+
| Binary           | Host                    | Zone | Status  | State | Updated_at                 | Disabled Reason |
+------------------+-------------------------+------+---------+-------+----------------------------+-----------------+
| cinder-scheduler | controller2             | nova | enabled | up    | 2021-09-03T01:40:58.000000 | -               |
| cinder-scheduler | controller3             | nova | enabled | up    | 2021-09-03T01:40:58.000000 | -               |
| cinder-volume    | compute1@ceph           | nova | enabled | up    | 2021-09-03T01:40:53.000000 | -               |
| cinder-volume    | compute2@ceph           | nova | enabled | up    | 2021-09-03T01:40:51.000000 | -               |
| cinder-volume    | compute3@ceph           | nova | enabled | up    | 2021-09-03T01:40:55.000000 | -               |
| cinder-volume    | controller2@ceph        | nova | enabled | up    | 2021-09-03T01:40:53.000000 | -               |
| cinder-volume    | controller2@lvmdriver-1 | nova | enabled | down  | 2021-09-01T03:36:10.000000 | -               |
| cinder-volume    | controller3@ceph        | nova | enabled | up    | 2021-09-03T01:40:58.000000 | -               |
| cinder-volume    | controller3@lvmdriver-1 | nova | enabled | down  | 2021-09-01T03:36:10.000000 | -               |
+------------------+-------------------------+------+---------+-------+----------------------------+-----------------+

[root@controller2 ~]# openstack volume service list
+------------------+-------------------------+------+---------+-------+----------------------------+
| Binary           | Host                    | Zone | Status  | State | Updated At                 |
+------------------+-------------------------+------+---------+-------+----------------------------+
| cinder-scheduler | controller2             | nova | enabled | up    | 2021-09-03T01:41:48.000000 |
| cinder-volume    | controller2@lvmdriver-1 | nova | enabled | down  | 2021-09-01T03:36:10.000000 |
| cinder-volume    | controller3@lvmdriver-1 | nova | enabled | down  | 2021-09-01T03:36:10.000000 |
| cinder-scheduler | controller3             | nova | enabled | up    | 2021-09-03T01:41:48.000000 |
| cinder-volume    | controller3@ceph        | nova | enabled | up    | 2021-09-03T01:41:48.000000 |
| cinder-volume    | controller2@ceph        | nova | enabled | up    | 2021-09-03T01:41:43.000000 |
| cinder-volume    | compute1@ceph           | nova | enabled | up    | 2021-09-03T01:41:43.000000 |
| cinder-volume    | compute3@ceph           | nova | enabled | up    | 2021-09-03T01:41:45.000000 |
| cinder-volume    | compute2@ceph           | nova | enabled | up    | 2021-09-03T01:41:51.000000 |
+------------------+-------------------------+------+---------+-------+----------------------------+

[root@controller2 ~]# openstack service list
+----------------------------------+-------------+----------------+
| ID                               | Name        | Type           |
+----------------------------------+-------------+----------------+
| 1bfcd040dd3742748a5f59a9831a48a6 | glance      | image          |
| 4f0ea69d3f69423788e64a81c395f403 | nova        | compute        |
| 5bff6f6312d846e4b4698a13532fafad | keystone    | identity       |
| 5c33290bea464e17849d0067bbab4631 | placement   | placement      |
| 630caa1438ec4e578740eb0265a894b2 | cinder      | block-storage  |
| 8f419ad590fa4e818a8a26a73b00a546 | cinderv2    | volumev2       |
| 8f77839436594375b8102716473e0557 | nova_legacy | compute_legacy |
| c92f164b2ced433c84d9f9530df793d5 | cinderv3    | volumev3       |
| e010f5721bf6455da10a30ca289830ab | neutron     | network        |
+----------------------------------+-------------+----------------+

Q.E.D.