[Gluster-users] Failsafe replication between two DC - getting trash in files.

Alexander Voronin alexander.voronin at gmail.com
Tue Oct 5 13:11:51 UTC 2010


Hi there. I'm trying to create failsafe distributed FS using gluster using
such scheme as described on this image -
http://img716.imageshack.us/img716/4896/glusterfs.png

As You see any node of FS may be broken down and system will stay stable.
However I still can't make it work. I'm using lates git version of
glusterfs. Here is configs that I'm using on servers and clients. dfs1,
dfs2, dfs3, dfs4 is a canonical names for FS1, FS2, FS3 and FS4 on image and
sure it has DNS arrdesses. Difference between server and client configs is
just in remote-host option in brick2 and brick3 volumes (see image).

What is really strange that the system is stable and looks like working but
I'm getting trash in files when copying them to gluster FS.

#########################################################################
# server config
#########################################################################
volume posix1
  type storage/posix
  option directory /storage
end-volume

volume brick1
  type features/posix-locks
  subvolumes posix1
end-volume

# --------------------------

volume brick2
   type protocol/client
   option transport-type tcp/client
   option remote-host dfs3
   option remote-subvolume brick1
end-volume

# ---------------------------

volume brick3
   type protocol/client
   option transport-type tcp/client
   option remote-host dfs4
   option remote-subvolume brick1
end-volume

# ---------------------------

volume replicate
   type cluster/replicate
   subvolumes brick1 brick2 brick3
end-volume

volume server
   type protocol/server
   option transport-type tcp/server
   subvolumes replicate
   option auth.addr.brick1.allow 192.168.*,127.0.0.1
   option auth.addr.replicate.allow 192.168.*,127.0.0.1
end-volume

#########################################################################
# client config
#########################################################################

volume fs1-1
    type protocol/client
    option transport-type tcp
    option remote-host dfs1
    option transport.socket.nodelay on
    option transport.socket.remote-port 6969
    option remote-subvolume replicate
end-volume

volume fs2-1
    type protocol/client
    option transport-type tcp
    option remote-host dfs3
    option transport.socket.nodelay on
    option transport.socket.remote-port 6969
    option remote-subvolume replicate
end-volume

volume mirror-0
    type cluster/replicate
    subvolumes fs1-1 fs2-1
end-volume

volume readahead
    type performance/read-ahead
    option page-count 4
    subvolumes mirror-0
end-volume

volume iocache
    type performance/io-cache
    option cache-size `echo $(( $(grep 'MemTotal' /proc/meminfo | sed
's/[^0-9]
    option cache-timeout 1
    subvolumes readahead
end-volume

volume writebehind
    type performance/write-behind
    option cache-size 32MB
    subvolumes iocache #quickread
end-volume

volume statprefetch
    type performance/stat-prefetch
    subvolumes writebehind
end-volume


More information about the Gluster-users mailing list