[Gluster-users] GlusterFS performance is really slow

Todd Wu s2213005 at gmail.com
Thu Sep 2 10:13:27 UTC 2010


Dear all,

I am setting up a big scale GlusterFS with tow servers and about 400
clients.

The problems taht I encountered is every time I log in client, it takes 10
to 30 secs as well as ls command(even not in share dirs).
Could any help to increase the performance? Thanks.

Environment:

Servers:
Two RadHat 5 x86_64 machines.
Each shares 2TB storage making RAID 1 for HA.

Clients:
400 machines with Two RadHat 5 x86_64OS and install fuse-2.6.3-3.slc5.
I have tried this guide, it doesn't work. (
http://www.mail-archive.com/gluster-devel@nongnu.org/msg01985.html )

Files:
In gluster01 already have 1T files and gluster02 not really sync yet.
I have tried close gluster02 to prevent sync latency, but it still takes
about 5 to 10 secs.
Most of files are less than 10M but been read really frequently.

Configs:

gluster01-export.vol
## file auto generated by /usr/bin/glusterfs-volgen (export.vol)
# Cmd line:
# $ /usr/bin/glusterfs-volgen --name gluster --raid 1
gluster01:/opt/exp_soft/ gluster02:/opt/exp_soft/

volume posix1
  type storage/posix
  option directory /opt/exp_soft/
end-volume

volume locks1
    type features/locks
    subvolumes posix1
end-volume

volume brick1
    type performance/io-threads
    option thread-count 16
    subvolumes locks1
end-volume

volume server-tcp
    type protocol/server
    option transport-type tcp
    option auth.addr.brick1.allow *
    option transport.socket.bind-address MY-IP-1
    option transport.socket.listen-port 6996
    option transport.socket.nodelay on
    subvolumes brick1
end-volume
====================================================================================
gluster02-export.vol
gluster01-export.vol
## file auto generated by /usr/bin/glusterfs-volgen (export.vol)
# Cmd line:
# $ /usr/bin/glusterfs-volgen --name gluster --raid 1
gluster01:/opt/exp_soft/ gluster02:/opt/exp_soft/

volume posix1
  type storage/posix
  option directory /opt/exp_soft/
end-volume

volume locks1
    type features/locks
    subvolumes posix1
end-volume

volume brick1
    type performance/io-threads
    option thread-count 16
    subvolumes locks1
end-volume

volume server-tcp
    type protocol/server
    option transport-type tcp
    option auth.addr.brick1.allow *
    option transport.socket.bind-address MY-IP-2
    option transport.socket.listen-port 6996
    option transport.socket.nodelay on
    subvolumes brick1
end-volume
====================================================================================
Clients: glusterfs.vol
## file auto generated by /usr/bin/glusterfs-volgen (mount.vol)
# Cmd line:
# $ /usr/bin/glusterfs-volgen --name gluster --raid 1
gluster01:/opt/exp_soft/ gluster02:/opt/exp_soft/

# RAID 1
# TRANSPORT-TYPE tcp
volume gluster02-1
    type protocol/client
    option transport-type tcp
    option remote-host gluster02
    option transport.socket.nodelay on
    option transport.remote-port 6996
    option remote-subvolume brick1
end-volume

volume gluster01-1
    type protocol/client
    option transport-type tcp
    option remote-host gluster01
    option transport.socket.nodelay on
    option transport.remote-port 6996
    option remote-subvolume brick1
end-volume

volume mirror-0
    type cluster/replicate
    subvolumes gluster01-1 gluster02-1
end-volume

volume readahead
    type performance/read-ahead
    option page-count 4
    subvolumes mirror-0
end-volume

volume iocache
    type performance/io-cache
    option cache-size `echo $(( $(grep 'MemTotal' /proc/meminfo | sed
's/[^0-9]//g') / 5120 ))`MB
    option cache-timeout 1
    subvolumes readahead
end-volume

volume quickread
    type performance/quick-read
    option cache-timeout 1
    option max-file-size 64kB
    subvolumes iocache
end-volume

volume writebehind
    type performance/write-behind
    option cache-size 4MB
    subvolumes quickread
end-volume

volume statprefetch
    type performance/stat-prefetch
    subvolumes writebehind
end-volume


BR,
Todd


More information about the Gluster-users mailing list