[Gluster-users] Slower than NFS
Greg
greg at easyflirt.com
Wed Apr 1 13:01:08 UTC 2009
Hi,
I have a production setup, with clients that are both NFS and GlusterFS
clients. I've made a _very_ quick benchmark (repeated ten times), to
compare read performance between local hardrives, a NFS mount and a
GlusterFS mount.
- a 17k file :
cache_c21f969b5f03d33d43e04f8f136e7682_c57d91c82f2ed2e96c13765e33fcd5ae
- a 50M file : users0.new.spi
- /nfs is a NFS mounted point, server with SATA drives
- /gluster is a GlusterFS mounted point, 2x servers with SAS drives
php-14:~# for f in
/var/www/www/cache/cache_c/cache_cf/cache_cfb/cache_c21f969b5f03d33d43e04f8f136e7682_c57d91c82f2ed2e96c13765e33fcd5ae
/nfs/tmp/cache_c21f969b5f03d33d43e04f8f136e7682_c57d91c82f2ed2e96c13765e33fcd5ae
/tmp/cache_c21f969b5f03d33d43e04f8f136e7682_c57d91c82f2ed2e96c13765e33fcd5ae
/gluster/sphinx/users0.new.spi /nfs/tmp/users0.new.spi
/tmp/users0.new.spi; do echo $f; time cat $f >/dev/null; done
/var/www/www/cache/cache_c/cache_cf/cache_cfb/cache_c21f969b5f03d33d43e04f8f136e7682_c57d91c82f2ed2e96c13765e33fcd5ae
real 0m0.011s
user 0m0.000s
sys 0m0.000s
/nfs/tmp/cache_c21f969b5f03d33d43e04f8f136e7682_c57d91c82f2ed2e96c13765e33fcd5ae
real 0m0.002s
user 0m0.000s
sys 0m0.000s
/tmp/cache_c21f969b5f03d33d43e04f8f136e7682_c57d91c82f2ed2e96c13765e33fcd5ae
real 0m0.001s
user 0m0.000s
sys 0m0.000s
/gluster/sphinx/users0.new.spi
real 0m0.081s
user 0m0.000s
sys 0m0.068s
/nfs/tmp/users0.new.spi
real 0m0.037s
user 0m0.000s
sys 0m0.036s
/tmp/users0.new.spi
real 0m0.028s
user 0m0.000s
sys 0m0.028s
Configuration:
# file: /etc/glusterfs/glusterfsd.vol
#
# Volumes
#
volume media-small
type storage/posix
option directory /var/local/glusterfs/media_small
end-volume
volume media-medium
type storage/posix
option directory /var/local/glusterfs/media_medium
end-volume
# Lock posix
volume media-small-locks
type features/posix-locks
option mandatory-locks on
subvolumes media-small
# subvolumes trash # enable this if you need trash can support (NOTE:
not present in 1.3.0-pre5+ releases)
end-volume
volume media-medium-locks
type features/posix-locks
option mandatory-locks on
subvolumes media-medium
# subvolumes trash # enable this if you need trash can support (NOTE:
not present in 1.3.0-pre5+ releases)
end-volume
#
# Performance
#
volume media-small-iot
type performance/io-threads
subvolumes media-small-locks
option thread-count 4 # default value is 1
end-volume
volume media-small-wb
type performance/write-behind
option flush-behind on # default is off
subvolumes media-small-iot
end-volume
volume media-medium-iot
type performance/io-threads
subvolumes media-medium-locks
option thread-count 4 # default value is 1
end-volume
volume media-medium-wb
type performance/write-behind
option flush-behind on # default is off
subvolumes media-medium-iot
end-volume
#
# Serveur
#
volume server
type protocol/server
option transport-type tcp/server
option auth.addr.media-small-wb.allow 10.0.*.*
option auth.addr.media-medium-wb.allow 10.0.*.*
# Autoconfiguration, e.g. :
# glusterfs -l /tmp/glusterfs.log --server=filer-04 ./Cache
option client-volume-filename /etc/glusterfs/glusterfs.vol
subvolumes media-small-wb media-medium-wb # volumes exportés
end-volume
# file: /etc/glusterfs/glusterfs.vol
#
# Clients
#
volume media-small-filer-04
type protocol/client
option transport-type tcp/client
option remote-host filer-04.local
option remote-subvolume media-small-wb
end-volume
volume media-small-filer-05
type protocol/client
option transport-type tcp/client
option remote-host filer-05.local
option remote-subvolume media-small-wb
end-volume
volume media-medium-filer-04
type protocol/client
option transport-type tcp/client
option remote-host filer-04.local
option remote-subvolume media-medium-wb
end-volume
volume media-medium-filer-05
type protocol/client
option transport-type tcp/client
option remote-host filer-05.local
option remote-subvolume media-medium-wb
end-volume
#
# Volume principal
#
volume afr-small
# AFR has been renamed to "Replicate" for simplicity.
type cluster/replicate
# Il faut mettre le serveur avec le moins d'espace disque en 1er :
# "When doing a "df -h" on a client, the AVAILABLE disk space
will display the maximum disk space of the first AFR sub volume defined
in the spec file. So if you have two servers with 50 gigs and 100 gigs
of free disk space, and the server with 100 gigs is listed first, then
you will see 100 gigs available even though one server only has 50 gigs
free. "
subvolumes media-small-filer-04 media-small-filer-05
end-volume
volume afr-medium
# AFR has been renamed to "Replicate" for simplicity.
type cluster/replicate
subvolumes media-medium-filer-04 media-medium-filer-05
end-volume
#
# Performance
#
volume iot-small
type performance/io-threads
subvolumes afr-small
end-volume
volume readahead-small
type performance/read-ahead
subvolumes iot-small
option page-size 1MB # default is 256KB
option page-count 4 # default is 2 - cache per file =
(page-count x page-size)
option force-atime-update no # defalut is 'no'
end-volume
volume iocache-small
type performance/io-cache
option cache-size 64MB # default is 32MB
option page-size 256KB # default is 128KB
subvolumes readahead-small
end-volume
volume writeback-small
type performance/write-behind
option block-size 1MB # max 4MB
option flush-behind on # default is off
subvolumes iocache-small
end-volume
volume iot-medium
type performance/io-threads
subvolumes afr-medium
end-volume
volume readahead-medium
type performance/read-ahead
subvolumes iot-medium
option page-size 1MB # default is 256KB
option page-count 4 # default is 2 - cache per file =
(page-count x page-size)
option force-atime-update no # defalut is 'no'
end-volume
volume iocache-medium
type performance/io-cache
option cache-size 64MB # default is 32MB
option page-size 256KB # default is 128KB
option page-count 2
subvolumes readahead-medium
end-volume
volume writeback-medium
type performance/write-behind
option block-size 1MB # max 4MB
option flush-behind on # default is off
subvolumes iocache-medium
end-volume
Regards,
--
Greg
More information about the Gluster-users
mailing list