ceph如何快速卸载所有osd及擦除磁盘分区表和内容并重新加入

我的ceph集群中有4台服务器,16个osd,因为某些原因,集群中的数据都不要了,但是集群要保留给新的应用使用,集群现有的osd情况如下
[root@ceph-host-01 ~]# cat /etc/hosts
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
10.30.1.221 ceph-host-01
10.30.1.222 ceph-host-02
10.30.1.223 ceph-host-03
10.30.1.224 ceph-host-04
 
[root@ceph-host-01 ceph-cluster]# ceph osd tree
ID CLASS WEIGHT  TYPE NAME             STATUS REWEIGHT PRI-AFF
-1       1.23207 root default                                  
-3       0.30800     host ceph-host-01                         
0   hdd 0.07700         osd.0             up  1.00000 1.00000
4   hdd 0.07700         osd.4             up  1.00000 1.00000
8   hdd 0.07700         osd.8             up  1.00000 1.00000
12   hdd 0.07700         osd.12            up  1.00000 1.00000
-5       0.30807     host ceph-host-02                         
1   hdd 0.07700         osd.1             up  1.00000 1.00000
5   hdd 0.07700         osd.5             up  1.00000 1.00000
9   hdd 0.07700         osd.9             up  1.00000 1.00000
15   hdd 0.07709         osd.15            up  1.00000 1.00000
-7       0.30800     host ceph-host-03                         
2   hdd 0.07700         osd.2             up  1.00000 1.00000
6   hdd 0.07700         osd.6             up  1.00000 1.00000
10   hdd 0.07700         osd.10            up  1.00000 1.00000
13   hdd 0.07700         osd.13            up  1.00000 1.00000
-9       0.30800     host ceph-host-04                         
3   hdd 0.07700         osd.3             up  1.00000 1.00000
7   hdd 0.07700         osd.7             up  1.00000 1.00000
11   hdd 0.07700         osd.11            up  1.00000 1.00000
14   hdd 0.07700         osd.14            up  1.00000 1.00000
 
下面是卸载所有osd的脚本 
 
for i in 0 4 8 12;do
    ceph osd out osd.${i}
    ssh ceph-host-01 systemctl stop ceph-osd@${i}
    ssh ceph-host-01 systemctl disable ceph-osd@${i}
    ceph osd crush remove osd.${i}
    ceph auth del osd.${i}
    ceph osd rm osd.${i}
    ssh ceph-host-01 umount /var/lib/ceph/osd/ceph-${i}
done
 
 
for i in 1 5 9 15;do
    ceph osd out osd.${i}
    ssh ceph-host-02 systemctl stop ceph-osd@${i}
    ssh ceph-host-02 systemctl disable ceph-osd@${i}
    ceph osd crush remove osd.${i}
    ceph auth del osd.${i}
    ceph osd rm osd.${i}
    ssh ceph-host-02 umount /var/lib/ceph/osd/ceph-${i}
done
 
 
for i in 2 6 10 13;do
    ceph osd out osd.${i}
    ssh ceph-host-03 systemctl stop "ceph-osd@${i}"
    ssh ceph-host-03 systemctl disable "ceph-osd@${i}"
    ceph osd crush remove osd.${i}
    ceph auth del osd.${i}
    ceph osd rm osd.${i}
    ssh ceph-host-03 umount /var/lib/ceph/osd/ceph-${i}
done
 
 
for i in 3 7 11 14;do
    ceph osd out osd.${i}
    ssh ceph-host-03 systemctl stop ceph-osd@${i}
    ssh ceph-host-03 systemctl disable ceph-osd@${i}
    ceph osd crush remove osd.${i}
    ceph auth del osd.${i}
    ceph osd rm osd.${i}
    ssh ceph-host-04 umount /var/lib/ceph/osd/ceph-${i}
done
 
下面的是擦除所有磁盘数据并重新把osd加入集群的脚本
for x in `seq 1 4`;do
  vgname=`ssh ceph-host-0${x} lvdisplay  | grep -i 'ceph' | grep -i 'vg' | awk '{print $3}'`
  for s in ${vgname};do
    ssh ceph-host-0${x} lvremove ${s} -f
  done
  for i in b c d e;do
    ceph-deploy disk zap ceph-host-0${x} /dev/vd${i}
    ceph-deploy osd create --data /dev/vd${i} ceph-host-0${x}
  done
done
 

猜你喜欢

转载自www.cnblogs.com/dexter-wang/p/12320700.html