Install glusterFS on 5 nodes.

###  all with sudo (account) on every single node ###

# add user to sudo group
/usr/sbin/usermod -aG sudo [username]

#install driver for external Ethernet and Graphics (Fujitsu S720)
apt install firmware-realtek
apt install firmware-amd-graphics

#edit /etc/hosts and add cluster server IPs and FQDN
nano /etc/hosts
#get disk or partition to use for glusterFS (ex. sdb1)
lsblk

## optional ##
     #install parted to partition disk
     apt install parted
     #partition disk (ex. sdb) -> only recommended for arbitter bricks
     parted /dev/sdb

     (parted) mklabel gpt
     (parted) mkpart primary ext4 0% 50%
     (parted) mkpart primary ext4 50% 100%

     ## check with ##
     (parted) print 
     (parted) align-check optimal 1 
     (parted) align-check optimal 2

     fdisk -l /dev/sdb

#create filesystem on disk/partitions (ex. sdb1)
mkfs.xfs -i size=512 /dev/sdb1
#create mount folder
mkdir -p /data/brick1
#write mountpoint to /etc/fstab (ex. sdb1)
echo '/dev/sdb1 /data/brick1 xfs defaults 1 2' >> /etc/fstab

## optional for second partition##
     mkfs.xfs -i size=512 /dev/sda2
     mkdir -p /data/brick2
     echo '/dev/sda2 /data/brick2 xfs defaults 1 2' >> /etc/fstab

#mount the new mountpoints and show all mounts
mount -a && mount

#check if mount ready
cd /data/brick1/
#create Volume Folder if desired
mkdir -p /data/brick1/gv0

#install glusterFS   
apt install glusterfs-server
#start gluster
service glusterd start  #to start once
systemctl enable --now glusterd  #to start on boot
#checkstatus
service glusterd status

###  connect all nodes and build gluster Cluster  ###
#select one node as master node (ex. storage01) and connect all other nodes from there
gluster peer probe storage02
gluster peer probe storage03
gluster peer probe storage04
gluster peer probe storage05

#connect the master node from one other node
[from storage02:] gluster peer probe storage01

#check status/info from ex. master node
gluster peer status
gluster volume info
 

#create gluster Volume (ex. arbitrated Replicated where storage5 holds both arbitter)
gluster volume create gv0 replica 3 arbiter 1 \\
storage01:/data/brick1/gv0 storage02:/data/brick1/gv0 storage05:/data/brick1/gv0 \\
storage03:/data/brick1/gv0 storage04:/data/brick1/gv0 storage05:/data/brick2/gv0

#start volume
gluster volume start gv0
#check volume(s)
gluster volume info
gluster volume status gv0
gluster volume status all
gluster volume status gv0 detail gv0
 

#test volume
mkdir /mnt/gv0
mount -t glusterfs storage01:/gv0 /mnt/gv0
cd /mnt/gv0/
mkdir test

#to stop the volume
gluster volume stop gv0
#to remove the volume
gluster volume delete gv0
#to get volumes
gluster volume list

###  NFS on every node ###
#install nfs to connect via nfs share on every node
apt install nfs-ganesha-gluster
gluster volume get gv0 nfs.disable 
     #if nfs.disable false set to true
     gluster volume set vol_distributed nfs.disable on
     # if NFS server is running, disable it
     systemctl disable --now nfs-server

mv /etc/ganesha/ganesha.conf /etc/ganesha/ganesha.conf.org
nano /etc/ganesha/ganesha.conf
#insert folowing text and edit as you like >>

#------------------------------------------
# create new
NFS_CORE_PARAM {
    # possible to mount with NFSv3 to NFSv4 Pseudo path
    mount_path_pseudo = true;
    # NFS protocol
    Protocols = 3,4;
}
EXPORT_DEFAULTS {
    # default access mode
    Access_Type = RW;
}
EXPORT {
    # unique ID
    Export_Id = 101;
    # mount path of Gluster Volume
    Path = "/gv0";
    FSAL {
        # any name
        name = GLUSTER;
        # hostname or IP address of this Node
        hostname="storage01";
        # Gluster volume name
        volume="gv0";
    }
    # config for root Squash
    Squash="No_root_squash";
    # NFSv4 Pseudo path
    Pseudo="/gv0";
    # allowed security options
    SecType = "sys";
}
LOG {
    # default log level
    Default_Log_Level = WARN;
}

#------------------------------------------

#restart nfs to load new config
systemctl restart nfs-ganesha
#check mounts
showmount -e localhost