[Gluster-users] 1client+2server performance problem

eagleeyes eagleeyes at 126.com
Tue Feb 17 06:14:21 UTC 2009


Hello everybody:
     I have some problem with performance of 1client+2server application. 
     
     The system is  AS5.2, 2.6.18-92.el5PAE  kernel.
     GFS is 1.3.9 built on Feb 11 2009 16:07:40 ,Repository revision: glusterfs--mainline--2.5--patch-770
     FUSE is  fuse-2.7.3glfs10 
======================================================
 GFS server  has 4G memory , no raid ,PC server   
172.20.22.0    
/dev/sda10            665G  198M  631G   1% /opt
/dev/sda8              24G  373M   23G   2% /var
/dev/sda7              26G  2.6G   22G  11% /usr
/dev/sda9             116G  191M  109G   1% /usr/local
/dev/sda6              22G  173M   21G   1% /tmp
/dev/sda5              28G  173M   26G   1% /home
/dev/sda1             114M   13M   96M  12% /boot
/dev/sdb1             917G  1.8G  869G   1% /disk1
/dev/sdc1             917G  1.7G  869G   1% /disk2
tmpfs                 2.0G     0  2.0G   0% /dev/shm

172.20.22.9
/dev/hda9             510G  199M  483G   1% /opt
/dev/hda5              45G  383M   42G   1% /var
/dev/hda3              25G  2.5G   21G  11% /usr
/dev/hda6             282G  194M  268G   1% /usr/local
/dev/hda2              20G  173M   19G   1% /home
/dev/hda1              99M   13M   81M  14% /boot
tmpfs                 2.0G     0  2.0G   0% /dev/shm
/dev/hdc1             917G  1.8G  869G   1% /disk1
/dev/hdd1             917G  1.7G  869G   1% /disk2
======================================================
GFS client  has  4G memory , virtual terminal 
Filesystem            Size  Used Avail Use% Mounted on
/dev/sda3             141G  5.6G  128G   5% /
/dev/sda1             190M   12M  169M   7% /boot
tmpfs                 948M     0  948M   0% /dev/shm
glusterfs             1.8T  3.4G  1.7T   1% /data1   
======================================================
server  configure 
volume disk1
  type storage/posix                   # POSIX FS translator
  option directory /disk1/data       # Export this directory
end-volume

volume disk2
  type storage/posix                   # POSIX FS translator
  option directory /disk2/data       # Export this directory
end-volume

volume ns
  type storage/posix                   # POSIX FS translator
  option directory /disk1/export-ns       # Export this directory
end-volume

volume iot1
  type performance/io-threads
  subvolumes disk1 
  option thread-count 16 # default value is 1
  option cache-size 4MB # default is 64MB (This is per thread, so configure it according to your RAM size and thread-count.
end-volume

volume iot2
  type performance/io-threads
  subvolumes disk2
  option thread-count 16 # default value is 1
  option cache-size 4MB # default is 64MB (This is per thread, so configure it according to your RAM size and thread-count.
end-volume

volume server
  type protocol/server
  option transport-type tcp/server     # For TCP/IP transport
 subvolumes  disk1 disk2 iot1 iot2 ns
 option auth.ip.disk1.allow * # Allow access to "disk1" volume
 option auth.ip.disk2.allow * # Allow access to "disk2" volume
 option auth.ip.ns.allow * # Allow access to "ns" volume
 option auth.ip.iot1.allow * # Allow access to "disk2" volume
 option auth.ip.iot2.allow * # Allow access to "disk2" volume
end-volume
=====================================================
client configure

volume client1
  type protocol/client
  option transport-type tcp/client     # for TCP/IP transport
  option remote-host 172.20.22.0         # IP address of the remote brick
  option remote-subvolume iot1        # name of the remote volume
  option transport-timeout 20 
end-volume

volume client2
 type protocol/client
  option transport-type tcp/client     # for TCP/IP transport
  option remote-host 172.20.22.0         # IP address of the remote brick
  option remote-subvolume iot2
   option transport-timeout 20
end-volume

volume client3
 type protocol/client
  option transport-type tcp/client     # for TCP/IP transport
  option remote-host 172.20.22.9         # IP address of the remote brick
  option remote-subvolume iot1
 option transport-timeout 20
end-volume

volume client4
 type protocol/client
  option transport-type tcp/client     # for TCP/IP transport
  option remote-host 172.20.22.9         # IP address of the remote brick
  option remote-subvolume iot2
  option transport-timeout 20
end-volume

volume ns1
 type protocol/client
  option transport-type tcp/client     # for TCP/IP transport
  option remote-host 172.20.22.0        # IP address of the remote brick
  option remote-subvolume ns
option transport-timeout 20
end-volume

volume ns2
 type protocol/client
  option transport-type tcp/client     # for TCP/IP transport
  option remote-host 172.20.22.9         # IP address of the remote brick
  option remote-subvolume ns
 option transport-timeout 20
end-volume

volume afr1
 type cluster/afr
 subvolumes client1 client3 
end-volume

volume afr2
 type cluster/afr
 subvolumes client2 client4
end-volume

volume afr-ns
 type cluster/afr
 subvolumes ns1 ns2
end-volume

volume unify
 type cluster/unify
 option namespace afr-ns
 option scheduler rr
 subvolumes afr1 afr2 
end-volume

volume iot
  type performance/io-threads
  option thread-count 32
  option cache-size   4MB
  subvolumes  unify 
end-volume
=======================================================
When i writed  some data into gluster directory /data1  by using dozen dd commands , 
i use  "time ll  -h " in  /data1  with  result : 
real    0m8.388s
user    0m0.000s
sys     0m0.000s

How could  i improve my performance ? what should i do ? waiting for your return ,thanks a lot 

Could some tell me what your GFS configure?
2009-02-17 



eagleeyes 
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://supercolony.gluster.org/pipermail/gluster-users/attachments/20090217/b08c2f39/attachment.html>


More information about the Gluster-users mailing list