[Gluster-devel] glusterFS on rocks4.3?

Ricardo Garcia Mayoral ricardo at torroja.dmt.upm.es
Thu Apr 24 14:33:30 UTC 2008


Ok, it seems we got glusterfs installed in our nodes and frontend, but 
are having problems getting it to run. Our setup is basically a 
clustered filesystem with afr and unify. With 6 nodes, 'compute-7-0' to 
'compute-7-5', we make three afr couples, and then unify the 3 resulting 
afr bricks. The namespace is served by our frontend 'vulcano'. The 
volume specification files are as follows:


Server spec at the frontend:
# cat /etc/glusterfs/glusterfs-server.vol
### File: /etc/glusterfs-server.vol - GlusterFS Server Volume Specification

### Export volume "brick-ns" with the contents of "/opt/gluster-ns" 
directory.
volume brick-ns
        type storage/posix
        option directory /opt/gluster-ns
end-volume

### Add network serving capability to above brick.
volume server
        type protocol/server
        option transport-type tcp/server     # For TCP/IP transport
#       option bind-address 10.255.255.253
        option listen-port 6996
        subvolumes brick-ns
        option auth.ip.brick-ns.allow 10.*.*.* # Allow access to 
"brick-ns" volume
end-volume


Server spec at the nodes:
# cat /etc/glusterfs/glusterfs-server.vol
### File: /etc/glusterfs-server.vol - GlusterFS Server Volume Specification

### Export volume "brick" with the contents of 
"/state/partition1/glfsdir/" directory.
volume brick
        type storage/posix
        option directory /state/partition1/glfsdir/
end-volume

### Add network serving capability to above brick.
volume server
        type protocol/server
        option transport-type tcp/server     # For TCP/IP transport
#       option bind-address 10.255.255.253
        option listen-port 6996
        subvolumes brick
        option auth.ip.brick.allow 10.*.*.* # Allow access to "brick" volume
end-volume


Client spec everywhere:
# cat /etc/glusterfs/glusterfs-client.vol
### File: /etc/glusterfs-client.vol - GlusterFS Client Volume Specification

### Add client feature and attach to remote subvolume of server1
volume brick7-0
        type protocol/client
        option transport-type tcp/client                        # for 
TCP/IP transport
        option remote-host compute-7-0  # 10.255.255.216        # IP 
address of the remote brick
        option remote-subvolume brick                           # name 
of the remote volume
end-volume

volume brick7-1
        type protocol/client
        option transport-type tcp/client
        option remote-host compute-7-1  # 10.255.255.215
        option remote-subvolume brick
end-volume

volume brick7-2
        type protocol/client
        option transport-type tcp/client
        option remote-host compute-7-2  # 10.255.255.214
        option remote-subvolume brick
end-volume

volume brick7-3
        type protocol/client
        option transport-type tcp/client
        option remote-host compute-7-3  # 10.255.255.213
        option remote-subvolume brick
end-volume

volume brick7-4
        type protocol/client
        option transport-type tcp/client
        option remote-host compute-7-4  # 10.255.255.212
        option remote-subvolume brick
end-volume

volume brick7-5
        type protocol/client
        option transport-type tcp/client
        option remote-host compute-7-5  # 10.255.255.211
        option remote-subvolume brick
end-volume


###  Namespace brick
volume local-ns
        type protocol/client
        option transport-type tcp/client                        # for 
TCP/IP transport
        option remote-host vulcano      # 10.1.1.1              # IP 
address of the remote brick
        option remote-subvolume brick-ns                        # name 
of the remote volume
end-volume


###  Automatic File Replication
volume afr7-0_7-3
        type cluster/afr
        subvolumes brick7-0 brick7-3
        option replicate *:2
end-volume

volume afr7-1_7-4
        type cluster/afr
        subvolumes brick7-1 brick7-4
        option replicate *:2
end-volume

volume afr7-2_7-5
        type cluster/afr
        subvolumes brick7-2 brick7-5
        option replicate *:2
end-volume


###  Unify
volume unify
        type cluster/unify
        subvolumes afr7-0_7-3 afr7-1_7-4 afr7-2_7-5
        option namespace local-ns
# ALU scheduler
        option scheduler alu                    # use the ALU scheduler
        option alu.limits.min-free-disk  5%     # Don't create files on 
a volume with less than 5% free diskspace
#   When deciding where to place a file, first look at the write-usage, 
then at
#   read-usage, disk-usage, open files, and finally the disk-speed-usage.
        option alu.order 
write-usage:read-usage:disk-usage:open-files-usage:disk-speed-usage
        option alu.write-usage.entry-threshold 20%   # Kick in when the 
write-usage discrepancy is 20%
        option alu.write-usage.exit-threshold  15%   # Don't stop until 
the discrepancy has been reduced to 5%
        option alu.read-usage.entry-threshold  20%   # Kick in when the 
read-usage discrepancy is 20%
        option alu.read-usage.exit-threshold    4%   # Don't stop until 
the discrepancy has been reduced to 16% (20% - 4%)
        option alu.disk-usage.entry-threshold 10GB   # Kick in if the 
discrepancy in disk-usage between volumes is more than 10GB
        option alu.disk-usage.exit-threshold   1GB   # Don't stop 
writing to the least-used volume until the discrepancy is 9GB
        option alu.open-files-usage.entry-threshold 1024   # Kick in if 
the discrepancy in open files is 1024
        option alu.open-files-usage.exit-threshold    32   # Don't stop 
until 992 files have been written in the least-used volume
#       option alu.disk-speed-usage.entry-threshold  # NEVER SET IT. 
SPEED IS CONSTANT!!!
#       option alu.disk-speed-usage.exit-threshold   # NEVER SET IT. 
SPEED IS CONSTANT!!!
        option alu.stat-refresh.interval 10sec   # Refresh the 
statistics used for decision-making every 10 seconds
#       option alu.stat-refresh.num-file-create 10   # Refresh the 
statistics used for decision-making after creating 10 files
# NUFA scheduler
#       option scheduler nufa
#       option nufa.local-volume-name afr1
end-volume



We have also done, at the frontend
# mkdir /opt/gluster-ns
and everywhere
# mkdir /mnt/glusterfs

At the frontend, '/etc/fstab' looks like
# cat /etc/fstab
# This file is edited by fstab-sync - see 'man fstab-sync' for details
LABEL=/1                /                       ext3    defaults        1 1
none                    /dev/pts                devpts  gid=5,mode=620  0 0
none                    /dev/shm                tmpfs   defaults        0 0
LABEL=/export           /export                 ext3    defaults        1 2
none                    /proc                   proc    defaults        0 0
LABEL=/scratch          /scratch                ext3    defaults        1 2
none                    /sys                    sysfs   defaults        0 0
LABEL=/var              /var                    ext3    defaults        1 2
LABEL=SWAP-sda2         swap                    swap    defaults        0 0
# The ram-backed filesystem for ganglia RRD graph databases.
tmpfs /var/lib/ganglia/rrds tmpfs 
size=2036835000,gid=nobody,uid=nobody,defaults 1 0
/dev/hdb                /media/cdrom            auto    
pamconsole,exec,noauto,managed 0 0
/etc/glusterfs/glusterfs-client.vol     /mnt/glusterfs  glusterfs   
defaults    0 0

and at the nodes:
# cat /etc/fstab
# This file is edited by fstab-sync - see 'man fstab-sync' for details
LABEL=/                 /                       ext3    defaults        1 1
none                    /dev/pts                devpts  gid=5,mode=620  0 0
none                    /dev/shm                tmpfs   defaults        0 0
none                    /proc                   proc    defaults        0 0
LABEL=/state/partition  /state/partition1       ext3    defaults        1 2
none                    /sys                    sysfs   defaults        0 0
LABEL=/var              /var                    ext3    defaults        1 2
/dev/sda3               swap                    swap    defaults        0 0
/etc/glusterfs/glusterfs-client.vol     /mnt/glusterfs  glusterfs   
defaults    0 0

Now, everywhere we do:
# glusterfsd -f /etc/glusterfs/glusterfs-server.vol
# glusterfs -f /etc/glusterfs/glusterfs-client.vol /mnt/glusterfs

But 'df -h' shoes no gluster partition. Any ideas?
Thanks,

-- 
Ricardo Garcia Mayoral
Computational Fluid Mechanics
ETSI Aeronauticos, Universidad Politecnica de Madrid
Pz Cardenal Cisneros 3, 28040 Madrid, Spain.
Phone: (+34) 913363291  Fax: (+34) 913363295
e-mail: ricardo at torroja.dmt.upm.es






More information about the Gluster-devel mailing list