ceph集群部署
ceph理解:
Ceph是一个分布式存储,可以提供对象存储、块存储和文件存储,其中对象存储和块存储可以很好地和各大云平台集成。其他具体介绍可见官网简介:http://docs.ceph.com/docs/master/start/intro/
三个节点地址及主机名对应:
192.168.10.100 node01
192.168.10.101 node02
192.168.10.102 node03
软件环境
操作系统:Centos 7.3
Openstack:N
Ceph:Jewel
[root@centos ~]# hostnamectl set-hostname node01
[root@centos ~]# hostnamectl set-hostname node02
[root@centos ~]# hostnamectl set-hostname node03
关闭firewalld、修改selinux
[root@node01 ~]# systemctl stop firewalld
[root@node01 ~]# systemctl disable firewalld
[root@node01 ~]# setenforce 0
[root@node01 ~]# sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/sysconfig/selinux
yum install wget vim -y
[root@node01 ~]# cat /etc/hosts
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.10.100 node01
192.168.10.101 node02
192.168.10.102 node03
ssh实现三个节点免密登陆
[root@node01 ~]# ssh-keygen -t rsa -P '' -f ~/.ssh/id_rsa
[root@node01 ~]# ssh-copy-id root@node01
[root@node01 ~]# ssh-copy-id root@node02
[root@node01 ~]# ssh-copy-id root@node03
三个节点:
wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo
wget -O /etc/yum.repos.d/epel.repo http://mirrors.aliyun.com/repo/epel-7.repo
[root@node01 yum.repos.d]# sed -i '/aliyuncs/d' /etc/yum.repos.d/CentOS-Base.repo
[root@node01 yum.repos.d]# sed -i '/aliyuncs/d' /etc/yum.repos.d/epel.repo
[root@node01 yum.repos.d]# sed -i 's/$releasever/7/g' /etc/yum.repos.d/CentOS-Base.repo
[root@node01 yum.repos.d]# cat ceph.repo
[ceph]
name=ceph
baseurl=http://mirrors.163.com/ceph/rpm-luminous/el7/x86_64/
gpgcheck=0
[ceph-noarch]
name=cephnoarch
baseurl=http://mirrors.163.com/ceph/rpm-luminous/el7/noarch/
gpgcheck=0
root@node01 yum.repos.d]#yum clean all
root@node01 yum.repos.d]#yum repolist
[root@node01 yum.repos.d]# scp /etc/yum.repos.d/* node02:/etc/yum.repos.d/
[root@node01 yum.repos.d]# scp /etc/yum.repos.d/* node03:/etc/yum.repos.d/
[root@node01 yum.repos.d]# scp /etc/hosts node02:/etc/
[root@node01 yum.repos.d]# scp /etc/hosts node03:/etc/
安装ntp,在所有ceph节点上执行
[root@node01 yum.repos.d]# yum install ntp ntpdate -y
[root@node01 yum.repos.d]# systemctl start ntpd
[root@node01 yum.repos.d]# systemctl enable ntpd
[root@node01 yum.repos.d]#ntpdate cn.ntp.org.cn
[root@node01 ]# yum install ceph-deploy ceph -y 三个节点
创建集群,生成一个新的ceph集群,集群包括ceph配置文件以及monitor的密钥环。
[root@node01 ~]# mkdir my-cluster
[root@node01 ~]# cd my-cluster/
[root@node01 my-cluster]# ceph-deploy new node01 node02 node03
安装ceph软件包(如果安装过程出现问题,可以重新执行得以解决)
yum -y install ceph ceph-radosgw ceph-release
[root@node01 my-cluster]# ceph-deploy install node01 node02 node03
在ceph-node1上创建第一个ceph monitor
ceph-deploy mon create-initial
[root@node01 my-cluster]# ceph-deploy mon create-initial
执行完命令后,当前目录会生成如下几个keyring:
[root@node01 yum.repos.d]#• {cluster-name}.client.admin.keyring
[root@node01 yum.repos.d]#• {cluster-name}.bootstrap-osd.keyring
[root@node01 yum.repos.d]#• {cluster-name}.bootstrap-mds.keyring
[root@node01 yum.repos.d]#• {cluster-name}.bootstrap-rgw.keyring
创建集群
[root@node01 yum.repos.d]#ceph-deploy new node01 node02 node03
在node01上创建osd
ceph-deploy disk list ceph-node01(列出disk)
[root@node01 my-cluster]# ceph-deploy osd create node01:/dev/sdb node01:/dev/sdc node02:/dev/sdb node02:/dev/sdc node03:/dev/sdb node03:/dev/sdc
查看集群状态
[root@node01 my-cluster]# ceph -s
cluster e1923c3b-8b3b-4284-9d18-33539e38b040
health HEALTH_WARN
clock skew detected on mon.node02, mon.node03
Monitor clock skew detected
monmap e1: 3 mons at
{node01=192.168.10.100:6789/0,node02=192.168.10.101:6789/0,node03=192.168.10.102:6789/0}
election epoch 6, quorum 0,1,2 node01,node02,node03
osdmap e29: 6 osds: 6 up, 6 in
flags sortbitwise,require_jewel_osds
pgmap v64: 64 pgs, 1 pools, 0 bytes data, 0 objects
202 MB used, 149 GB / 149 GB avail
64 active+clean