GlusterFS部署安装

GlusterFS部署安装

新建两台虚拟机,每个虚拟机增加2块硬盘

1、关闭iptables 和 selinux

2、修改/etc/hosts 和 主机名

192.168.79.101 node1
192.168.79.102 node2

3、两台虚拟机时间同步

yum install ntp -y
ntpdate ntp1.aliyun.com

4、安装epel源、安装glusterfs

yum install centos-release-gluster
yum install glusterfs-server
[root@localhost ~]# glusterfs -V
glusterfs 4.1.2
Repository revision: git://git.gluster.org/glusterfs.git
Copyright (c) 2006-2016 Red Hat, Inc. <https://www.gluster.org/>
GlusterFS comes with ABSOLUTELY NO WARRANTY.
It is licensed to you under your choice of the GNU Lesser
General Public License, version 3 or any later version (LGPLv3
or later), or the GNU General Public License, version 2 (GPLv2),
in all cases as published by the Free Software Foundation.

5、启动glusterfs服务,并添加开机启动

[root@localhost ~]# systemctl start glusterd
[root@localhost ~]# systemctl enable glusterd
Created symlink from /etc/systemd/system/multi-user.target.wants/glusterd.service to /usr/lib/systemd/system/glusterd.service.

6、存储主机加入信任存储池

[root@localhost ~]# gluster peer probe node2
peer probe: success.
[root@localhost ~]# gluster peer status
Number of Peers: 1

Hostname: node2
Uuid: 6e9671d7-be94-433b-a3fb-46d52db77e43
State: Peer in Cluster (Connected)

==可在任意一台glusterfs服务器添加除自己以外的节点,并且添加完成后,可在任意节点查看==

7、所有节点格式化新增的磁盘

[root@localhost ~]# mkfs.xfs -f /dev/sdb
meta-data=/dev/sdb               isize=512    agcount=4, agsize=131072 blks
         =                       sectsz=512   attr=2, projid32bit=1
         =                       crc=1        finobt=0, sparse=0
data     =                       bsize=4096   blocks=524288, imaxpct=25
         =                       sunit=0      swidth=0 blks
naming   =version 2              bsize=4096   ascii-ci=0 ftype=1
log      =internal log           bsize=4096   blocks=2560, version=2
         =                       sectsz=512   sunit=0 blks, lazy-count=1
realtime =none                   extsz=4096   blocks=0, rtextents=0
[root@localhost ~]# mkfs.xfs -f /dev/sdc
meta-data=/dev/sdc               isize=512    agcount=4, agsize=131072 blks
         =                       sectsz=512   attr=2, projid32bit=1
         =                       crc=1        finobt=0, sparse=0
data     =                       bsize=4096   blocks=524288, imaxpct=25
         =                       sunit=0      swidth=0 blks
naming   =version 2              bsize=4096   ascii-ci=0 ftype=1
log      =internal log           bsize=4096   blocks=2560, version=2
         =                       sectsz=512   sunit=0 blks, lazy-count=1
realtime =none                   extsz=4096   blocks=0, rtextents=0

8、挂在新增的磁盘,在每个节点上执行,并做开机自动挂载

mkdir -p /storage/brick{1,2}
mount /dev/sdb /storage/brick1
mount /dev/sdc /storage/brick2
[root@localhost ~]# df -h
Filesystem               Size  Used Avail Use% Mounted on
/dev/mapper/centos-root   17G  1.9G   16G  12% /
devtmpfs                 485M     0  485M   0% /dev
tmpfs                    496M     0  496M   0% /dev/shm
tmpfs                    496M  7.1M  489M   2% /run
tmpfs                    496M     0  496M   0% /sys/fs/cgroup
/dev/sda1               1014M  130M  885M  13% /boot
tmpfs                    100M     0  100M   0% /run/user/0
/dev/sdb                 2.0G   33M  2.0G   2% /storage/brick1
/dev/sdc                 2.0G   33M  2.0G   2% /storage/brick2

echo "/dev/sdb /storage/brick1 xfs defaults 0 0" >> /etc/fstab
echo "/dev/sdc /storage/brick2 xfs defaults 0 0" >> /etc/fstab

[root@localhost ~]# tail -2 /etc/fstab
/dev/sdb /storage/brick1 xfs defaults 0 0
/dev/sdc /storage/brick2 xfs defaults 0 0

9、创建volume及其操作

  1. Distributed,分布式卷,文件通过hash算法随机分布到有bricks组成的卷上。
  2. Replicated,复制卷,类似raid1,replica数必须等于volume中brick所包含的存储服务器数,可用性高。
  3. striped,条带式卷,类似raid0,stripe数必须等于volume中的brick所包含的存储服务器数,文件被分成数据块,以Round Robin的方式存储在brick中,并发粒度是数据块,大文件性能好。
  4. Distributed Striped,分布式条带卷,volume中brick所包含的存储服务器必须是stripe的倍数,兼顾分布式和条带式的功能
  5. Distributed Replicated,分布式的复制卷,volume中的brick所包含的存储服务器数必须是replica的倍数,兼顾分布式和复制式的功能。
  • 创建分布卷

[root@node1 ~]# gluster volume create gv1 node1:/storage/brick1 node2:/storage/brick1 force
volume create: gv1: success: please start the volume to access data

#启动创建的卷
[root@node1 ~]# gluster volume start gv1
volume start: gv1: success

#在node2上查看
[root@node2 ~]# gluster volume info
Volume Name: gv1
Type: Distribute
Volume ID: 5d0cc54c-cb65-4af9-8541-3b5e8d772e45
Status: Started
Snapshot Count: 0
Number of Bricks: 2
Transport-type: tcp
Bricks:
Brick1: node1:/storage/brick1
Brick2: node2:/storage/brick1
Options Reconfigured:
transport.address-family: inet
nfs.disable: on

#挂载卷到目录
[root@node1 ~]# mkdir /mnt/gv1
[root@node1 ~]# mount -t glusterfs 127.0.0.1:/gv1 /mnt/gv1
[root@node1 ~]# df -h
Filesystem               Size  Used Avail Use% Mounted on
/dev/mapper/centos-root   17G  1.9G   16G  12% /
devtmpfs                 485M     0  485M   0% /dev
tmpfs                    496M     0  496M   0% /dev/shm
tmpfs                    496M  7.1M  489M   2% /run
tmpfs                    496M     0  496M   0% /sys/fs/cgroup
/dev/sda1               1014M  130M  885M  13% /boot
tmpfs                    100M     0  100M   0% /run/user/0
/dev/sdb                 2.0G   33M  2.0G   2% /storage/brick1
/dev/sdc                 2.0G   33M  2.0G   2% /storage/brick2
127.0.0.1:/gv1           4.0G  106M  3.9G   3% /mnt/gv1

#使用nfs方式挂载
mount -o mountproto=tcp -t nfs node1:/gv1 /mnt
#测试失败提示如下:
[root@localhost ~]# mount -o mountproto=tcp -t nfs 192.168.79.101:/gv1 /mnt
mount.nfs: requested NFS version or transport protocol is not supported
  • 创建复制卷

[root@node1 ~]# gluster volume create gv2 replica 2 node1:/storage/brick2 node2:/storage/brick2 force
volume create: gv2: success: please start the volume to access data
[root@node1 ~]# gluster volume start gv2
volume start: gv2: success
[root@node1 ~]# mkdir /mnt/gv2
[root@node1 ~]# mount -t glusterfs 127.0.0.1:/gv2 /mnt/gv2
[root@node1 ~]# df -h
Filesystem               Size  Used Avail Use% Mounted on
/dev/mapper/centos-root   17G  1.9G   16G  12% /
devtmpfs                 485M     0  485M   0% /dev
tmpfs                    496M     0  496M   0% /dev/shm
tmpfs                    496M  7.1M  489M   2% /run
tmpfs                    496M     0  496M   0% /sys/fs/cgroup
/dev/sda1               1014M  130M  885M  13% /boot
tmpfs                    100M     0  100M   0% /run/user/0
/dev/sdb                 2.0G   33M  2.0G   2% /storage/brick1
/dev/sdc                 2.0G   33M  2.0G   2% /storage/brick2
127.0.0.1:/gv1           4.0G  106M  3.9G   3% /mnt/gv1
127.0.0.1:/gv2           2.0G   53M  2.0G   3% /mnt/gv2
  • 创建条带卷

[root@node1 brick1]# gluster volume create gv3 stripe 2 node1:/storage/brick1 node2:/storage/brick1 force
volume create: gv3: success: please start the volume to access data
[root@node1 brick1]# gluster volume start gv3
volume start: gv3: success
[root@node1 brick1]# gluster volume info
Volume Name: gv3
Type: Stripe
Volume ID: e5b33b2f-5572-4518-b116-e24e843085b3
Status: Started
Snapshot Count: 0
Number of Bricks: 1 x 2 = 2
Transport-type: tcp
Bricks:
Brick1: node1:/storage/brick1
Brick2: node2:/storage/brick1
Options Reconfigured:
transport.address-family: inet
nfs.disable: on

[root@node1 brick1]# mkdir /mnt/gv3
[root@node1 brick1]# mount -t glusterfs 127.0.0.1:/gv3 /mnt/gv3
[root@node1 brick1]# df -h
Filesystem               Size  Used Avail Use% Mounted on
/dev/mapper/centos-root   17G  1.9G   16G  12% /
devtmpfs                 485M     0  485M   0% /dev
tmpfs                    496M     0  496M   0% /dev/shm
tmpfs                    496M  7.1M  489M   2% /run
tmpfs                    496M     0  496M   0% /sys/fs/cgroup
/dev/sda1               1014M  130M  885M  13% /boot
tmpfs                    100M     0  100M   0% /run/user/0
/dev/sdb                 2.0G   33M  2.0G   2% /storage/brick1
/dev/sdc                 2.0G   33M  2.0G   2% /storage/brick2
127.0.0.1:/gv3           4.0G  106M  3.9G   3% /mnt/gv3

==报错:执行了gluster volume delete gv1后创建gv3,挂载gv3在gv3目录下执行ls 报错,ls: reading directory .: Stale file handle==

解决方案:删除gv3,并且删除/storage/brick1目录下.glusterfs目录,然后重新创建gv3解决。

  • 分布式复制卷

# 在复制卷的基础上操作
[root@node1 gv1]# gluster volume stop gv1
Stopping volume will make its data inaccessible. Do you want to continue? (y/n) y
volume stop: gv1: success
[root@node1 gv1]# gluster volume add-brick gv1 replica 2 node1:/storage/brick2 node2:/storage/brick2 force
volume add-brick: success

==注意:当你给分布式复制卷和分布式条带卷重增加bricks时,增加的bricks的数目必须是复制或者条带数目的倍数,例如:你给一个分布式复制卷的replica为2,你在增加bricks的时候数量必须为2、4、6、8等。==

[root@node1 gv1]# gluster volume start gv1
volume start: gv1: success
[root@node1 gv1]# gluster volume status
Status of volume: gv1
Gluster process                             TCP Port  RDMA Port  Online  Pid
------------------------------------------------------------------------------
Brick node1:/storage/brick1                 49152     0          Y       18828
Brick node2:/storage/brick1                 49152     0          Y       18054
Brick node1:/storage/brick2                 49153     0          Y       19033
Brick node2:/storage/brick2                 49153     0          Y       18156
Self-heal Daemon on localhost               N/A       N/A        Y       19056
Self-heal Daemon on node2                   N/A       N/A        Y       18179

Task Status of Volume gv1
------------------------------------------------------------------------------
There are no active volume tasks

[root@node1 gv1]# df -h
Filesystem               Size  Used Avail Use% Mounted on
/dev/mapper/centos-root   17G  1.9G   16G  12% /
devtmpfs                 485M     0  485M   0% /dev
tmpfs                    496M     0  496M   0% /dev/shm
tmpfs                    496M  7.1M  489M   2% /run
tmpfs                    496M     0  496M   0% /sys/fs/cgroup
/dev/sda1               1014M  130M  885M  13% /boot
/dev/sdb                 2.0G   98M  1.9G   5% /storage/brick1
/dev/sdc                 2.0G   33M  2.0G   2% /storage/brick2
tmpfs                    100M     0  100M   0% /run/user/0
127.0.0.1:/gv1           4.0G  171M  3.9G   5% /mnt/gv1

[root@node1 gv1]# gluster volume info
Volume Name: gv1
Type: Distributed-Replicate
Volume ID: 5ea20894-e15f-4e55-b0f3-17cfbc09c0e2
Status: Started
Snapshot Count: 0
Number of Bricks: 2 x 2 = 4
Transport-type: tcp
Bricks:
Brick1: node1:/storage/brick1
Brick2: node2:/storage/brick1
Brick3: node1:/storage/brick2
Brick4: node2:/storage/brick2
Options Reconfigured:
transport.address-family: inet
nfs.disable: on
performance.client-io-threads: off

==注意:主动添加的节点,glusterfs为了保证数据的安全,需要进行一次数据平衡,否则新的数据会延用之前的算法,不会使用新的节点。==

[root@node2 gv1]# gluster volume rebalance gv1 start
volume rebalance: gv1: success: Rebalance on gv1 has been started successfully. Use rebalance status command to check status of the rebalance process.
ID: cf72d1b0-599c-40f6-a2a1-c2f9d178da40
[root@node2 gv1]# ll /storage/brick1
total 0
-rw-r--r-- 2 root root 0 Aug 21 15:02 1
-rw-r--r-- 2 root root 0 Aug 21 15:05 5
-rw-r--r-- 2 root root 0 Aug 21 15:05 7
-rw-r--r-- 2 root root 0 Aug 21 15:05 8
[root@node2 gv1]# ll /storage/brick2
total 0
-rw-r--r-- 2 root root 0 Aug 21 15:02 2
-rw-r--r-- 2 root root 0 Aug 21 15:02 3
-rw-r--r-- 2 root root 0 Aug 21 15:02 4
-rw-r--r-- 2 root root 0 Aug 21 15:05 6
  • 移除卷

[root@node2 gv1]# gluster volume stop gv1
Stopping volume will make its data inaccessible. Do you want to continue? (y/n) y
volume stop: gv1: success
[root@node2 gv1]# gluster volume remove-brick gv1 replica 2 node1:/storage/brick1 node2:/storage/brick1 force
Remove-brick force will not migrate files from the removed bricks, so they will no longer be available on the volume.
Do you want to continue? (y/n) y
volume remove-brick commit force: success
#移除后数据丢失,可重新添加回去
  • 删除卷

gluster volume stop gv1
gluster volume delete gv1

猜你喜欢

转载自www.cnblogs.com/banyungong666/p/9644917.html