2 GlusterFS servers

For this lab, we disabled both firewall and SELinux

[root@gluster1 ~]# egrep "^SELINUX=" /etc/sysconfig/selinux
SELINUX=disabled
[root@gluster1 ~]# systemctl is-enabled firewalld
disabled

[root@gluster2 ~]# egrep "^SELINUX=" /etc/sysconfig/selinux
SELINUX=disabled
[root@gluster2 ~]# systemctl is-enabled firewalld
disabled

Install “wget” package :

[root@gluster1 ~]# yum -y install wget
[root@gluster2 ~]# yum -y install wget

EPEL installation

  • Retrieve EPEL repository :
[root@gluster1 ~]# wget http://dl.fedoraproject.org/pub/epel/7/x86_64/e/epel-release-7-5.noarch.rpm
[root@gluster2 ~]# wget http://dl.fedoraproject.org/pub/epel/7/x86_64/e/epel-release-7-5.noarch.rpm
  • Install EPEL :
[root@gluster1 ~]# rpm -ivh epel-release-7-5.noarch.rpm
[root@gluster2 ~]# rpm -ivh epel-release-7-5.noarch.rpm

Gluster installation :

  • Retrieve GlusterFS repository :
[root@gluster1 ~]# wget -P /etc/yum.repos.d http://download.gluster.org/pub/gluster/glusterfs/LATEST/RHEL/glusterfs-epel.repo
[root@gluster1 ~]# ls /etc/yum.repos.d/glusterfs-epel.repo
 /etc/yum.repos.d/glusterfs-epel.repo
[root@gluster2 ~]# wget -P /etc/yum.repos.d http://download.gluster.org/pub/gluster/glusterfs/LATEST/RHEL/glusterfs-epel.repo
[root@gluster2 ~]# ls /etc/yum.repos.d/glusterfs-epel.repo
 /etc/yum.repos.d/glusterfs-epel.repo

Update   :

[root@gluster1 ~]# yum -y update
[root@gluster2 ~]# yum -y update

Download GlusterFS packages :

[root@gluster1 ~]# yum -y install glusterfs-server
[root@gluster2 ~]# yum -y install glusterfs-server

Now, start the service and keep it started across reboot :

[root@gluster1 ~]# systemctl start glusterd && systemctl enable glusterd
Created symlink from /etc/systemd/system/multi-user.target.wants/glusterd.service to /usr/lib/systemd/system/glusterd.service.
[root@gluster2 ~]# systemctl start glusterd && systemctl enable glusterd
Created symlink from /etc/systemd/system/multi-user.target.wants/glusterd.service to /usr/lib/systemd/system/glusterd.service.

Update “/etc/hosts” :

[root@gluster1 ~]# grep gluster /etc/hosts
192.168.1.115   gluster1        gluster1.argonay.wou
192.168.1.116   gluster2        gluster2.argonay.wou
[root@gluster2 ~]# grep gluster /etc/hosts
192.168.1.115   gluster1        gluster1.argonay.wou
192.168.1.116   gluster2        gluster2.argonay.wou

Check the 2 GlusterFS servers :

[root@gluster1 ~]# gluster peer probe gluster2
peer probe: success.
[root@gluster2 ~]# gluster peer probe gluster1
peer probe: success. Host gluster1 port 24007 already in peer list

Check the 2 Peers :

[root@gluster1 ~]# gluster peer status
Number of Peers: 1

Hostname: gluster2
Uuid: 0e57f998-6871-4dd3-9644-e26b095db5b2
State: Peer in Cluster (Connected)
[root@gluster2 ~]# gluster peer status
Number of Peers: 1

Hostname: gluster1
Uuid: 3632b650-02f8-4490-9736-c1dea05b2dc6
State: Peer in Cluster (Connected)

Check the 2 storage pools :

[root@gluster1 ~]# gluster pool list
UUID                                    Hostname        State
0e57f998-6871-4dd3-9644-e26b095db5b2    gluster2        Connected
3632b650-02f8-4490-9736-c1dea05b2dc6    localhost       Connected
[root@gluster2 ~]# gluster pool list
UUID                                    Hostname        State
3632b650-02f8-4490-9736-c1dea05b2dc6    gluster1        Connected
0e57f998-6871-4dd3-9644-e26b095db5b2    localhost       Connected

Create GlusterFS volume

We add 2 new drives from VMware ESXi (in fact, the 2 GlusterFS servers are 2 virtual machines) :

“gluster1” :

[root@gluster1 ~]# for HOST in $(ls /sys/class/scsi_host) ; do echo '- - -'>"/sys/class/scsi_host/$HOST/scan" ; done
[root@gluster1 ~]# lsblk -f
NAME                 FSTYPE      LABEL             UUID                                   MOUNTPOINT
fd0
sda
├─sda1               xfs                           0312525b-f102-4817-a137-ee592d897213   /boot
└─sda2               LVM2_member                   t3Xf0A-EC1u-p4Hz-R8Br-sCrL-juAo-QnV0n4
  ├─centos_pc76-root xfs                           41173beb-d3f9-493a-827f-0c4f4f3a1215   /
  └─centos_pc76-swap swap                          6d0c68fa-7cf6-466e-8f2f-899622839a66   [SWAP]
sdb
sr0                  iso9660     CentOS 7.2 x86_64 2016-02-08-16-01-07-00
[root@gluster1 ~]# parted -s /dev/sdb mklabel gpt && parted -s /dev/sdb mkpart primary xfs 0% 100% && mkfs.xfs -f /dev/sdb
meta-data=/dev/sdb               isize=256    agcount=4, agsize=655360 blks
         =                       sectsz=512   attr=2, projid32bit=1
         =                       crc=0        finobt=0
data     =                       bsize=4096   blocks=2621440, imaxpct=25
         =                       sunit=0      swidth=0 blks
naming   =version 2              bsize=4096   ascii-ci=0 ftype=0
log      =internal log           bsize=4096   blocks=2560, version=2
         =                       sectsz=512   sunit=0 blks, lazy-count=1
realtime =none                   extsz=4096   blocks=0, rtextents=0
[root@gluster1 ~]# mkdir /gluster_volume
[root@gluster1 ~]# grep -i gluster /etc/fstab
# GlusterFS :
/dev/sdb        /gluster_volume         xfs     defaults        0 0
[root@gluster1 ~]# mount /gluster_volume

“gluster2” :

[root@gluster2 ~]# for HOST in $(ls /sys/class/scsi_host) ; do echo '- - -'>"/sys/class/scsi_host/$HOST/scan" ; done
[root@gluster2 ~]# lsblk -f
NAME                 FSTYPE      LABEL             UUID                                   MOUNTPOINT
fd0
sda
├─sda1               xfs                           d823e97f-7d91-437f-b879-7d9c0bdd1282   /boot
└─sda2               LVM2_member                   JqIBnI-ezrT-pNg1-pw9F-0CT7-BOBL-zbXdm2
  ├─centos_pc77-root xfs                           1b226b89-f5f0-453a-aaf3-c1f6186572cc   /
  └─centos_pc77-swap swap                          e0853f94-4998-447d-89ce-972dc6a4fd9f   [SWAP]
sdb
sr0                  iso9660     CentOS 7.2 x86_64 2016-02-08-16-01-07-00
[root@gluster2 ~]# parted -s /dev/sdb mklabel gpt && parted -s /dev/sdb mkpart primary xfs 0% 100% && mkfs.xfs -f /dev/sdb
meta-data=/dev/sdb               isize=256    agcount=4, agsize=655360 blks
         =                       sectsz=512   attr=2, projid32bit=1
         =                       crc=0        finobt=0
data     =                       bsize=4096   blocks=2621440, imaxpct=25
         =                       sunit=0      swidth=0 blks
naming   =version 2              bsize=4096   ascii-ci=0 ftype=0
log      =internal log           bsize=4096   blocks=2560, version=2
         =                       sectsz=512   sunit=0 blks, lazy-count=1
realtime =none                   extsz=4096   blocks=0, rtextents=0
[root@gluster2 ~]# mkdir /gluster_volume
[root@gluster2 ~]# grep -i gluster /etc/fstab
# GlusterFS :
/dev/sdb        /gluster_volume         xfs     defaults        0 0
[root@gluster2 ~]# mount /gluster_volume

Create a directory within the file system on both servers :

[root@gluster1 ~]# mkdir -p /gluster_volume/briks
[root@gluster2 ~]# mkdir -p /gluster_volume/briks

Create the GlusterFS volume based on those 2 replicas :

[root@gluster1 ~]# gluster volume create my_volume replica 2 gluster1:/gluster_volume/briks gluster2:/gluster_volume/briks
volume create: my_volume: success: please start the volume to access data

Then, start this volume :

[root@gluster1 ~]# gluster volume start my_volume
volume start: my_volume: success

Now, this GlusterFS volume is OK :

  • On “gluster1” :
[root@gluster1 ~]# gluster volume info

Volume Name: my_volume
Type: Replicate
Volume ID: 32e5daf9-6533-42b6-8cb1-58dfa5786c42
Status: Started
Number of Bricks: 1 x 2 = 2
Transport-type: tcp
Bricks:
Brick1: gluster1:/gluster_volume/briks
Brick2: gluster2:/gluster_volume/briks
Options Reconfigured:
performance.readdir-ahead: on
[root@gluster1 ~]# gluster volume status
Status of volume: my_volume
Gluster process                             TCP Port  RDMA Port  Online  Pid
------------------------------------------------------------------------------
Brick gluster1:/gluster_volume/briks        49152     0          Y       2029
Brick gluster2:/gluster_volume/briks        49152     0          Y       2023
NFS Server on localhost                     2049      0          Y       2019
Self-heal Daemon on localhost               N/A       N/A        Y       2024
NFS Server on gluster2                      2049      0          Y       2009
Self-heal Daemon on gluster2                N/A       N/A        Y       2015

Task Status of Volume my_volume
------------------------------------------------------------------------------
There are no active volume tasks
  • On “gluster2” :
[root@gluster2 ~]# gluster volume info

Volume Name: my_volume
Type: Replicate
Volume ID: 32e5daf9-6533-42b6-8cb1-58dfa5786c42
Status: Started
Number of Bricks: 1 x 2 = 2
Transport-type: tcp
Bricks:
Brick1: gluster1:/gluster_volume/briks
Brick2: gluster2:/gluster_volume/briks
Options Reconfigured:
performance.readdir-ahead: on
[root@gluster2 ~]# gluster volume status
Status of volume: my_volume
Gluster process                             TCP Port  RDMA Port  Online  Pid
------------------------------------------------------------------------------
Brick gluster1:/gluster_volume/briks        49152     0          Y       2029
Brick gluster2:/gluster_volume/briks        49152     0          Y       2023
NFS Server on localhost                     2049      0          Y       2009
Self-heal Daemon on localhost               N/A       N/A        Y       2015
NFS Server on gluster1                      2049      0          Y       2019
Self-heal Daemon on gluster1                N/A       N/A        Y       2024

Task Status of Volume my_volume
------------------------------------------------------------------------------
There are no active volume tasks

Client side

Both firewall and SELinux are disabled :

[root@gluster-client ~]# egrep "^SELINUX=" /etc/sysconfig/selinux
SELINUX=disabled
[root@gluster-client ~]# systemctl is-enabled firewalld
disabled

Add entries in “/etc/hosts” :

[root@gluster-client ~]# grep gluster /etc/hosts
192.168.1.115   gluster1        gluster1.argonay.wou
192.168.1.116   gluster2        gluster2.argonay.wou

Install required packages :

[root@gluster-client ~]# [root@gluster-client ~]# yum -y install fuse fuse-libs glusterfs-fuse glusterfs

Mount GlusterFS volume :

[root@gluster-client ~]# mount.glusterfs  gluster1:/my_volume /mnt

Add entry in “/etc/fstab” to get this GlusterFS file system mounted :

[root@gluster-client ~]# grep -i gluster /etc/fstab
# GlusterFS
gluster1:/my_volume     /mnt            glusterfs       defaults,_netdev        0 0

This file system is still mounted after reboot :

[root@gluster-client ~]# df -h /mnt
Filesystem           Size  Used Avail Use% Mounted on
gluster1:/my_volume   10G   33M   10G   1% /mnt

Replication test

Create any file in this GlusterFS file system :

[root@gluster-client ~]# echo "here is a GlusterFS file" > /mnt/file

This file has been created on both servers :

[root@gluster1 ~]# cat /gluster_volume/briks/file
here is a GlusterFS file
[root@gluster2 ~]# cat /gluster_volume/briks/file
here is a GlusterFS file

What’s append if we stop 1 server :

[root@gluster1 ~]# shutdown -h 0

The file is still here (stored on “gluster2”) :

[root@gluster-client ~]# cat /mnt/file
here is a GlusterFS file

We create another file within this GlusterFS file system :

[root@gluster-client ~]# echo "here is a second GlusterFS file" > /mnt/other_file

And we restart “gluster1” server, so we discover that this file has been copied from “gluster2” to “gluster1”  :

[root@gluster1 ~]# cat /gluster_volume/briks/other_file
here is a second GlusterFS file

 

 

en.pdf24.org    Send article as PDF   

Leave a Reply

Your email address will not be published. Required fields are marked *


*