[Gluster-users] losing small files attributes with dht

Anand Avati avati at gluster.com
Wed Oct 14 13:34:24 UTC 2009


Are your files present in the backend directories?

Avati

On Wed, Oct 14, 2009 at 6:36 PM, Andre Felipe Machado
<andremachado at techforce.com.br> wrote:
> Hello,
> I am trying to optimize glusterfs (back to debian 2.0.4 official squeeze
> packages installed on lenny) distributed for small files (php sessions).
> But at our last round (perceived after removing read ahead translator and lookup
> unhashed off) we are struggling with missing files and or its attributes. A
> simple ls -la shows around 80 files missing and without attributes among 1200
> (by now). They are listed by ls, but they are not reachable anymore. It seems
> they are not being completely deleted...
> Log files do not show anything weird.
> Please, what am I doing wrong?
> Andre Felipe Machado
>
>
> client files:
>
> volume remote1
>  type protocol/client
>  option transport-type tcp
>  option remote-host 10.200.113.170             # IP address of the remote brick
> # option transport.socket.remote-port 6996      # default server port is 6996
>  option ping-timeout 10                        # seconds to wait for a reply
>                                                # from server for each request
>  option transport.socket.nodelay on            # undocumented option for speed
>        # http://gluster.org/pipermail/gluster-users/2009-September/003158.html
>  option remote-subvolume brick                 # name of the remote volume
> end-volume
>
>
>
> volume remote2
>  type protocol/client
>  option transport-type tcp
>  option remote-host 10.200.113.171             # IP address of the remote brick
> # option transport.socket.remote-port 6996      # default server port is 6996
>  option ping-timeout 10                        # seconds to wait for a reply
>                                                # from server for each request
>  option transport.socket.nodelay on            # undocumented option for speed
>         # http://gluster.org/pipermail/gluster-users/2009-September/003158.html
>  option remote-subvolume brick                 # name of the remote volume
> end-volume
>
>
> volume remote3
>  type protocol/client
>  option transport-type tcp
>  option remote-host 10.200.113.172             # IP address of the remote brick
> # option transport.socket.remote-port 6996      # default server port is 6996
>  option ping-timeout 10                        # seconds to wait for a reply
>                                                # from server for each request
>  option transport.socket.nodelay on            # undocumented option for speed
>         # http://gluster.org/pipermail/gluster-users/2009-September/003158.html
>  option remote-subvolume brick                 # name of the remote volume
> end-volume
>
>
> volume remote4
>  type protocol/client
>  option transport-type tcp
>  option remote-host 10.200.113.173             # IP address of the remote brick
> # option transport.socket.remote-port 6996      # default server port is 6996
>  option ping-timeout 10                        # seconds to wait for a reply
>                                                # from server for each request
>  option transport.socket.nodelay on            # undocumented option for speed
>         # http://gluster.org/pipermail/gluster-users/2009-September/003158.html
>  option remote-subvolume brick                 # name of the remote volume
> end-volume
>
>
>
> volume distributed
>  type cluster/distribute
>  option lookup-unhashed off            # off will reduce cpu usage, and network
> #  option min-free-disk 20%
>   subvolumes remote1 remote2 remote3 remote4
> end-volume
>
>
>
> ### Performance translators below
>
> ### Add IO-Cache feature
> volume iocache
>  type performance/io-cache
>  option cache-size 1000MB              # default is 32MB
> #  option priority *.h:3,*.html:2,*:1   # default is '*:0'
>  option cache-timeout 1                # default is 1 second
>  subvolumes distributed
> end-volume
>
> ### Add writeback feature
> volume writeback
>  type performance/write-behind
> #  option aggregate-size 2MB    # deprecated option
>  option cache-size 500MB       # default is equal to aggregate-size
>  option flush-behind off       # default is 'off'
>                                # too aggressive and slow background flush!
>                                # do not enable for php sessions behaviour
>  subvolumes iocache
> end-volume
>
>
>
>
>
> servers configuration:
>
>
> ### Export volume "brick" with the contents of /srv/export/php_sessions directory.
> volume posix
>  type storage/posix                            # POSIX FS translator
>  option directory /srv/export/php_sessions     # Export this directory
> end-volume
>
> volume locks
>  type features/locks
>  option mandatory-locks on
>  subvolumes posix
> end-volume
>
> volume iothreads
>  type performance/io-threads
>  option thread-count 16 # default is 16
>  subvolumes locks
> end-volume
>
> volume writebehind
>  type performance/write-behind
>  option cache-size 1000MB      # default is equal to aggregate-size
>  option flush-behind off       # default is 'off'
>                                # too aggressive and slow background flush!
>                                # do not enable for php sessions behaviour
>  subvolumes iothreads
> end-volume
>
> volume brick
>  type performance/io-cache
>  option cache-size 2000MB              # default is 32MB
> #  option priority *.h:3,*.html:2,*:1   # default is '*:0'
>  option cache-timeout 1                # default is 1 second
>  subvolumes writebehind
> end-volume
>
> ### Add network serving capability to above brick.
> volume server
>  type protocol/server
>  option transport-type tcp
>  option transport.socket.nodelay on    # undocumented option for speed
>        # http://gluster.org/pipermail/gluster-users/2009-September/003158.html
>
> # option transport.socket.listen-port 6996              # Default is 6996
>  subvolumes brick
>  option auth.addr.brick.allow * # Allow access to "brick" volume
> end-volume
>
>
>
>
> _______________________________________________
> Gluster-users mailing list
> Gluster-users at gluster.org
> http://gluster.org/cgi-bin/mailman/listinfo/gluster-users
>
>



More information about the Gluster-users mailing list