[Gluster-users] request for help on performance issue on using glusterfs via cifs

prometheus__0 at hotmail.com prometheus__0 at hotmail.com
Sat Dec 4 11:20:41 UTC 2010


hi list,

at the end of the mail ill post my configuration.
Whats the environment?
My current purpose is to export 4 Sata disks as one big storage. All 
disks are attached to the same host.
Since i want that also windows user can access the data i created a 
server.vol and client.vol and thus
i have 2 glusterfs processes running.
one for providing the glusterfs server to export the disks
and one for mounting this export on localhost so i can reexport this 
mount via samba.

Some numbers:
disk access via dd: write 17MB/s, read 50MB/s
file transfer from client ONLY to a samba directory without glusterfs in 
between but with destination disk: write 7MB/s, read 12MB/s
file transfer from client via samba and glusterfs: write 1.8MB/s, read 8MB/s

altough this numbers arent really mind blowing i really tought to get 
near samba values using glusterfs
i expected something like write 5MB read 11MB or such so i can tune 
other stuff but gluster seems to be priority one here

im using glusterfs 3.0.2 cause newer versions seem to have a memory leak 
for my configuration
anyway, i dont think the performance problem is not solved by upgrading 
since i already tried this
and the numbers keep the same
i also tried using ftp via glusterfs resulting in the same speed as 
samba via gluster
so i think my configuration is just bad

i also read about the possibility to provide server/client within a 
single process, but didnt find any usefull
documentation on how i need to start glusterfs or how i have to write 
the configuration
thats why im using 2 glusterfs processes

cause i dont think that the issue can be found on the hardware side or 
samba itself
i would be glad if someone can provide suggestions for better 
configuration AND also provide why the suggestion is better

i also tried many performance translators, which none really gave much 
benefit
so i turned back to the most basic config to start with

hardware: via mainboard with VIA C7 Processor 1800MHz and 1024mb ram

thx in advance
jd


                                  client.vol

volume node_ab8f19a5-c187-4b7e-bd2a-7781f646b3a8
  type protocol/client
# unix domain socket isnt faster for this config
# option transport-type unix
# option transport.socket.connect-path /tmp/.glusterfs.server
  option transport-type tcp # for TCP/IP transport
  option remote-host 127.0.0.1
  option remote-port 6996
  option remote-subvolume Data_ab8f19a5-c187-4b7e-bd2a-7781f646b3a8
end-volume


volume node_bf8adbcf-8c6d-48f1-a622-c26cb3792c49
  type protocol/client
# option transport-type unix
# option transport.socket.connect-path /tmp/.glusterfs.server
  option transport-type tcp # for TCP/IP transport
  option remote-host 127.0.0.1
  option remote-port 6996
  option remote-subvolume Data_bf8adbcf-8c6d-48f1-a622-c26cb3792c49
end-volume


volume node_839dc61d-c7df-4630-b375-b1f86ee0ace9
  type protocol/client
# option transport-type unix
# option transport.socket.connect-path /tmp/.glusterfs.server
  option transport-type tcp # for TCP/IP transport
  option remote-host 127.0.0.1
  option remote-port 6996
  option remote-subvolume Data_839dc61d-c7df-4630-b375-b1f86ee0ace9
end-volume


volume node_4c3c37f0-0ebc-46ff-9f37-0fa3dac56560
  type protocol/client
# option transport-type unix
# option transport.socket.connect-path /tmp/.glusterfs.server
  option transport-type tcp # for TCP/IP transport
  option remote-host 127.0.0.1
  option remote-port 6996
  option remote-subvolume Data_4c3c37f0-0ebc-46ff-9f37-0fa3dac56560
end-volume


volume distributeData
   type cluster/distribute
   subvolumes  node_ab8f19a5-c187-4b7e-bd2a-7781f646b3a8 
node_bf8adbcf-8c6d-48f1-a622-c26cb3792c49 
node_839dc61d-c7df-4630-b375-b1f86ee0ace9 
node_4c3c37f0-0ebc-46ff-9f37-0fa3dac56560
end-volume
volume Data
   type performance/io-threads
   option thread-count 16
   subvolumes distributeData
end-volume



                server.vol

volume posix_ab8f19a5-c187-4b7e-bd2a-7781f646b3a8
   type storage/posix
   option directory /media/ab8f19a5-c187-4b7e-bd2a-7781f646b3a8/storage/Data
end-volume
volume Data_ab8f19a5-c187-4b7e-bd2a-7781f646b3a8
   type features/locks
   subvolumes posix_ab8f19a5-c187-4b7e-bd2a-7781f646b3a8
end-volume


volume posix_bf8adbcf-8c6d-48f1-a622-c26cb3792c49
   type storage/posix
   option directory /media/bf8adbcf-8c6d-48f1-a622-c26cb3792c49/storage/Data
end-volume
volume Data_bf8adbcf-8c6d-48f1-a622-c26cb3792c49
   type features/locks
   subvolumes posix_bf8adbcf-8c6d-48f1-a622-c26cb3792c49
end-volume


volume posix_839dc61d-c7df-4630-b375-b1f86ee0ace9
   type storage/posix
   option directory /media/839dc61d-c7df-4630-b375-b1f86ee0ace9/storage/Data
end-volume
volume Data_839dc61d-c7df-4630-b375-b1f86ee0ace9
   type features/locks
   subvolumes posix_839dc61d-c7df-4630-b375-b1f86ee0ace9
end-volume


volume posix_4c3c37f0-0ebc-46ff-9f37-0fa3dac56560
   type storage/posix
   option directory /media/4c3c37f0-0ebc-46ff-9f37-0fa3dac56560/storage/Data
end-volume
volume Data_4c3c37f0-0ebc-46ff-9f37-0fa3dac56560
   type features/locks
   subvolumes posix_4c3c37f0-0ebc-46ff-9f37-0fa3dac56560
end-volume


volume server
  type protocol/server
# option transport-type unix
# option transport.socket.listen-path /tmp/.glusterfs.server
  option transport-type tcp
  option transport.socket.listen-port 6996

  option auth.addr.Data_ab8f19a5-c187-4b7e-bd2a-7781f646b3a8.allow *
  option auth.addr.Data_bf8adbcf-8c6d-48f1-a622-c26cb3792c49.allow *
  option auth.addr.Data_839dc61d-c7df-4630-b375-b1f86ee0ace9.allow *
  option auth.addr.Data_4c3c37f0-0ebc-46ff-9f37-0fa3dac56560.allow *
  subvolumes  Data_ab8f19a5-c187-4b7e-bd2a-7781f646b3a8 
Data_bf8adbcf-8c6d-48f1-a622-c26cb3792c49 
Data_839dc61d-c7df-4630-b375-b1f86ee0ace9 
Data_4c3c37f0-0ebc-46ff-9f37-0fa3dac56560
end-volume




More information about the Gluster-users mailing list