目录内容如下
[root@mon-1 cluster]# ll
total 12
-rw-r--r-- 1 root root 197 Dec 6 02:24 ceph.conf
-rw-r--r-- 1 root root 2921 Dec 6 02:24 ceph-deploy-ceph.log
-rw------- 1 root root 73 Dec 6 02:24 ceph.mon.keyring
根据自己的IP配置向ceph.conf中添加public_network,并稍微增大mon之间时差允许范围(默认为0.05s,现改为2s):
[root@mon-1 cluster]# echo public_network=192.168.50.0/24 >>ceph.conf
[root@mon-1 cluster]# echo mon_clock_drift_allowed = 2 >> ceph.conf
[root@mon-1 cluster]# cat ceph.conf
[global]
fsid = 0865fe85-1655-4208-bed6-274cae945746
mon_initial_members = mon-1, osd-1, osd-2
mon_host = 192.168.50.123,192.168.50.124,192.168.50.125
auth_cluster_required = cephx
auth_service_required = cephx
auth_client_required = cephx
public_network=192.168.50.0/24
mon_clock_drift_allowed = 2
部署监控节点
[root@mon-1 cluster]# ceph-deploy mon create-initial
[ceph_deploy.conf][DEBUG ] found configuration file at: /root/.cephdeploy.conf
[ceph_deploy.cli][INFO ] Invoked (1.5.36): /usr/bin/ceph-deploy mon create-initial
[ceph_deploy.cli][INFO ] ceph-deploy options:
[ceph_deploy.cli][INFO ] username : None
[ceph_deploy.cli][INFO ] verbose : False
[ceph_deploy.cli][INFO ] overwrite_conf : False
[ceph_deploy.cli][INFO ] subcommand : create-initial
[ceph_deploy.cli][INFO ] quiet : False
[ceph_deploy.cli][INFO ] cd_conf : <ceph_deploy.conf.cephdeploy.Conf instance at 0x2023fc8>
[ceph_deploy.cli][INFO ] cluster : ceph
[ceph_deploy.cli][INFO ] func : <function mon at 0x201d140>
[ceph_deploy.cli][INFO ] ceph_conf : None
[ceph_deploy.cli][INFO ] default_release : False
[ceph_deploy.cli][INFO ] keyrings : None
[ceph_deploy.mon][DEBUG ] Deploying mon, cluster ceph hosts mon-1 osd-1 osd-2
......省略
[ceph_deploy.gatherkeys][INFO ] Storing ceph.client.admin.keyring
[ceph_deploy.gatherkeys][INFO ] Storing ceph.bootstrap-mds.keyring
[ceph_deploy.gatherkeys][INFO ] keyring 'ceph.mon.keyring' already exists
[ceph_deploy.gatherkeys][INFO ] Storing ceph.bootstrap-osd.keyring
[ceph_deploy.gatherkeys][INFO ] Storing ceph.bootstrap-rgw.keyring
[ceph_deploy.gatherkeys][INFO ] Destroy temp directory /tmp/tmpVLiIFr
目录内容如下
[root@mon-1 cluster]# ll
total 76
-rw------- 1 root root 113 Dec 6 22:49 ceph.bootstrap-mds.keyring
-rw------- 1 root root 113 Dec 6 22:49 ceph.bootstrap-osd.keyring
-rw------- 1 root root 113 Dec 6 22:49 ceph.bootstrap-rgw.keyring
-rw------- 1 root root 129 Dec 6 22:49 ceph.client.admin.keyring
-rw-r--r-- 1 root root 300 Dec 6 22:47 ceph.conf
-rw-r--r-- 1 root root 50531 Dec 6 22:49 ceph-deploy-ceph.log
-rw------- 1 root root 73 Dec 6 22:46 ceph.mon.keyring
查看集群状态
[root@mon-1 ceph]# ceph -s
cluster 1b27aaf2-8b29-49b1-b50e-7ccb1f72d1fa
health HEALTH_ERR
no osds
monmap e1: 1 mons at {mon-1=192.168.50.123:6789/0}
election epoch 3, quorum 0 mon-1
osdmap e1: 0 osds: 0 up, 0 in
flags sortbitwise
pgmap v2: 64 pgs, 1 pools, 0 bytes data, 0 objects
0 kB used, 0 kB / 0 kB avail
64 creating
开始部署OSD:
[root@mon-1 cluster]# ceph-deploy --overwrite-conf osd prepare mon-1:/dev/sdb mon-1:/dev/sdc mon-1:/dev/sdd osd-1:/dev/sdb osd-1:/dev/sdc osd-1:/dev/sdd osd-2:/dev/sdb osd-2:/dev/sdc osd-2:/dev/sdd --zap-disk
这里如果部署osd-2有问题,就删除程序和目录,重新分区以后成功创建osd-2
ceph-deploy --overwrite-conf osd prepare osd-2:/dev/sdb osd-2:/dev/sdc osd-2:/dev/sdd
[root@mon-1 ~]# lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
fd0 2:0 1 4K 0 disk
sda 8:0 0 20G 0 disk
├─sda1 8:1 0 500M 0 part /boot
└─sda2 8:2 0 19.5G 0 part
├─CentOS-root 253:0 0 18.5G 0 lvm /
└─centos-swap 253:1 0 1G 0 lvm [SWAP]
sdb 8:16 0 2T 0 disk
├─sdb1 8:17 0 2T 0 part /var/lib/ceph/osd/ceph-0
└─sdb2 8:18 0 5G 0 part
sdc 8:32 0 2T 0 disk
├─sdc1 8:33 0 2T 0 part /var/lib/ceph/osd/ceph-1
└─sdc2 8:34 0 5G 0 part
sdd 8:48 0 2T 0 disk
├─sdd1 8:49 0 2T 0 part /var/lib/ceph/osd/ceph-2
└─sdd2 8:50 0 5G 0 part
sr0 11:0 1 603M 0 rom
[root@osd-1 ~]# lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
fd0 2:0 1 4K 0 disk
sda 8:0 0 20G 0 disk
├─sda1 8:1 0 500M 0 part /boot
└─sda2 8:2 0 19.5G 0 part
├─centos-root 253:0 0 18.5G 0 lvm /
└─centos-swap 253:1 0 1G 0 lvm [SWAP]
sdb 8:16 0 2T 0 disk
├─sdb1 8:17 0 2T 0 part /var/lib/ceph/osd/ceph-3
└─sdb2 8:18 0 5G 0 part
sdc 8:32 0 2T 0 disk
├─sdc1 8:33 0 2T 0 part /var/lib/ceph/osd/ceph-4
└─sdc2 8:34 0 5G 0 part
sdd 8:48 0 2T 0 disk
├─sdd1 8:49 0 2T 0 part /var/lib/ceph/osd/ceph-5
└─sdd2 8:50 0 5G 0 part
sr0 11:0 1 603M 0 rom
[root@osd-2 ~]# lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
fd0 2:0 1 4K 0 disk
sda 8:0 0 20G 0 disk
├─sda1 8:1 0 500M 0 part /boot
└─sda2 8:2 0 19.5G 0 part
├─centos-root 253:0 0 18.5G 0 lvm /
└─centos-swap 253:1 0 1G 0 lvm [SWAP]
sdb 8:16 0 2T 0 disk
├─sdb1 8:17 0 2T 0 part /var/lib/ceph/osd/ceph-6
└─sdb2 8:18 0 5G 0 part
sdc 8:32 0 2T 0 disk
├─sdc1 8:33 0 2T 0 part /var/lib/ceph/osd/ceph-7
└─sdc2 8:34 0 5G 0 part
sdd 8:48 0 2T 0 disk
├─sdd1 8:49 0 2T 0 part /var/lib/ceph/osd/ceph-8
└─sdd2 8:50 0 5G 0 part
sr0 11:0 1 603M 0 rom
这里有个WARN,去掉这个WARN只需要增加rbd池的PG就好
[root@mon-1 cluster]# ceph osd pool set rbd pg_num 128
set pool 0 pg_num to 128
[root@mon-1 cluster]# ceph osd pool set rbd pgp_num 128
set pool 0 pgp_num to 128
[root@mon-1 cluster]# ceph -s
cluster 0865fe85-1655-4208-bed6-274cae945746
health HEALTH_OK
monmap e3: 2 mons at {mon-1=192.168.50.123:6789/0,osd-1=192.168.50.124:6789/0}
election epoch 30, quorum 0,1 mon-1,osd-1
osdmap e58: 9 osds: 9 up, 9 in
flags sortbitwise
pgmap v161: 128 pgs, 1 pools, 0 bytes data, 0 objects
310 MB used, 18377 GB / 18378 GB avail
128 active+clean
给各个节点推送config配置
请不要直接修改某个节点的/etc/ceph/ceph.conf文件的,而是要去部署节点(此处为ceph-1:/root/cluster/ceph.conf)目录下修改。因为节点到几十个的时候,不可能一个个去修改的,采用推送的方式快捷安全!
修改完毕后,执行如下指令,将conf文件推送至各个节点:
[root@mon-1 cluster]# ceph-deploy --overwrite-conf config push mon-1 osd-1 osd-2
[ceph_deploy.conf][DEBUG ] found configuration file at: /root/.cephdeploy.conf
[ceph_deploy.cli][INFO ] Invoked (1.5.36): /usr/bin/ceph-deploy admin mon-1 osd-1 osd-2
[ceph_deploy.cli][INFO ] ceph-deploy options:
[ceph_deploy.cli][INFO ] username : None
[ceph_deploy.cli][INFO ] verbose : False
[ceph_deploy.cli][INFO ] overwrite_conf : False
[ceph_deploy.cli][INFO ] quiet : False
[ceph_deploy.cli][INFO ] cd_conf : <ceph_deploy.conf.cephdeploy.Conf instance at 0x1a34e18>
[ceph_deploy.cli][INFO ] cluster : ceph
[ceph_deploy.cli][INFO ] client : ['mon-1', 'osd-1', 'osd-2']
[ceph_deploy.cli][INFO ] func : <function admin at 0x1964f50>
[ceph_deploy.cli][INFO ] ceph_conf : None
[ceph_deploy.cli][INFO ] default_release : False
[ceph_deploy.admin][DEBUG ] Pushing admin keys and conf to mon-1
[mon-1][DEBUG ] connected to host: mon-1
[mon-1][DEBUG ] detect platform information from remote host
[mon-1][DEBUG ] detect machine type
[mon-1][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf
[root@mon-1 cluster]# ceph-deploy --overwrite-conf config push ceph-1 ceph-2 ceph-3
[ceph_deploy.admin][DEBUG ] Pushing admin keys and conf to osd-1
[osd-1][DEBUG ] connected to host: osd-1
[osd-1][DEBUG ] detect platform information from remote host
[osd-1][DEBUG ] detect machine type
[osd-1][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf
[ceph_deploy.admin][DEBUG ] Pushing admin keys and conf to osd-2
[osd-2][DEBUG ] connected to host: osd-2
[osd-2][DEBUG ] detect platform information from remote host
[osd-2][DEBUG ] detect machine type
[osd-2][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf
此时,需要重启各个节点的monitor服务
mon和osd的启动方式
#mon-1为各个monitor所在节点的主机名。
systemctl start ceph-mon@mon-1.service
systemctl restart ceph-mon@mon-1.service
systemctl stop ceph-mon@mon-1.service
#0为该节点的OSD的id,可以通过`ceph osd tree`查看
systemctl start/stop/restart ceph-osd@0.service
遇到的报错
1. Monitor clock skew detected
http://www.linuxidc.com/Linux/2017-03/141309.htm
1. Monitor clock skew detected
[root@mon-1 cluster]# ceph -s
cluster f25ad2c5-fd2a-4fcc-a522-344eb498fee5
health HEALTH_ERR
clock skew detected on mon.osd-2
64 pgs are stuck inactive for more than 300 seconds
64 pgs stuck inactive
no osds
Monitor clock skew detected
添加配置参数:
vim /etc/ceph/ceph.conf
mon clock drift allowed = 2
mon clock drift warn backoff = 30
同步配置文件
ceph-deploy --overwrite-conf admin osd-1 osd-2
重启mon服务
systemctl restart ceph-mon@osd-2.service
问题总结:
本问题主要是mon节点服务器,时间偏差比较大导致,本次遇到问题为测试环境,通过修改ceph对时间偏差阀值,规避的告警信息,线上业务环境,注意排查服务器时间同步问题。
本文永久更新链接地址:http://www.linuxidc.com/Linux/2017-03/141308.htm