[Gluster-devel] GlusterFS performance tuning

admin at hostyle.it admin at hostyle.it
Thu Feb 12 22:41:20 UTC 2009


Dear All,

I have this scenario:

2 Server with GlusterFS 1.3.12

1 client with that mount Gluster
in AFR mode.

Hardware configuration:

Server and client (total of 3):
Dell Poweredge 2950, 2 Gb of memory,
dual core Intel Xeon, 2 Hard Disk (sata) in Raid 1
by dedicated SAS controller, Mellanox IB card (MHEA28)

Infiniband are in SDR configuration, switch are a Voltaire ISR 9024-DM

Finished installation and some config settings, I have retry this:
time dd if=/dev/zero of=/mnt/glusterfs/dssdfsdfsf.img bs=1M count=200
...... 53.8 MB/s

How I can modify my settings to have best performace?

There are my config files:

##### CLIENT #####
volume remote1
  type protocol/client
  option ib-verbs-work-request-send-size 512kB # 2MB 
  option ib-verbs-work-request-send-count 16
  option ib-verbs-work-request-recv-size 256kB 
  option ib-verbs-work-request-recv-count 16
  option transport-type ib-verbs/client # for ib-verbs transport 
  option remote-host 10.0.0.10
  option remote-subvolume brick
end-volume

volume remote2
  type protocol/client
  option ib-verbs-work-request-send-size 512kB # 2MB 
  option ib-verbs-work-request-send-count 16
  option ib-verbs-work-request-recv-size 256kB 
  option ib-verbs-work-request-recv-count 16
  option transport-type ib-verbs/client # for ib-verbs transport 
  option remote-host 10.0.0.11
  option remote-subvolume brick
end-volume

volume mirror0
  type cluster/afr
  subvolumes remote1 remote2
end-volume

volume iot
 type performance/io-threads
  option thread-count 8  # deault is 1
  option cache-size 64MB
 subvolumes mirror0
end-volume

volume wb
 type performance/write-behind
  option aggregate-size 512kB # 2MB # default is 0bytes
  option flush-behind on    # default is 'off'
 subvolumes iot
end-volume

volume ra
 type performance/read-ahead
  option page-size  256kB # is the default option
  option page-count 32           # 2 is default option
  option force-atime-update off # default is off
 subvolumes wb
end-volume

volume ioc
 type performance/io-cache
  option cache-size 64MB             # default is 32MB
  option page-size 1MB               #128KB is default option
  #option priority *.h:3,*.html:2,*:1 # default is '*:0'
  option force-revalidate-timeout 2  # default is 1
 subvolumes ra
end-volume


#### SERVER ####
volume brick
  type storage/posix                   # POSIX FS translator
  option directory /home/export        # Export this directory
end-volume

volume posix-locks
  type features/posix-locks
  option mandatory on
  subvolumes brick
end-volume

volume iot
 type performance/io-threads
  option thread-count 8  # deault is 1
  option cache-size 64MB
 subvolumes posix-locks
end-volume

volume wb
 type performance/write-behind
  option aggregate-size 512kB
  option flush-behind on
 subvolumes iot
end-volume

volume ra
 type performance/read-ahead
  option page-size  256kB
  option page-count 16
  option force-atime-update off # default is off
 subvolumes wb
end-volume

volume ioc
 type performance/io-cache
  option cache-size 64MB             # default is 32MB
  option page-size 1MB               #128KB is default option
  #option priority *.h:3,*.html:2,*:1 # default is '*:0'
  option force-revalidate-timeout 2  # default is 1
 subvolumes ra
end-volume

volume server
  type protocol/server
  option ib-verbs-work-request-send-size 512kB # 2MB 
  option ib-verbs-work-request-send-count 16
  option ib-verbs-work-request-recv-size 256kB 
  option ib-verbs-work-request-recv-count 16
  option transport-type ib-verbs/server # For ib-verbs transport
  option bind-address 10.0.0.10     # Default is to listen on all
interfaces
  option listen-port 6996              # Default is 6996
  subvolumes brick
  option auth.ip.brick.allow * # Allow access to "brick" volume
end-volume

#### END OF CONFIG FILES ####

Many Thanks,

Enrico





More information about the Gluster-devel mailing list