[Gluster-users] Shared files occasionally unreadable from some nodes

Jonathan nilsson jnilsson.edge at gmail.com
Thu Jun 24 15:52:29 UTC 2010


Hello all,

I am new to gluster and I've been seeing some inconsistent behavior. When I
write files to the gluster about 1 in 1000 will be unreadable on one node.
>From that node I can see the file with ls and ls does report the correct
size. However running cat on the file produces no output and vim thinks that
it is full of the ^@ character. If I try to read the file from another node
it is fine.

After some Googling I've read that an ls -lR can fix similar problems but it
hasn't had any effect for me. Running touch on the file does restore its
contents. I am running Glusterfs 3.0.4 on RHEL 5.4. I generated the config
files with the volgen tool and didn't make any changes.

Is this a known issue or something that could've happened if I screwed up
the configuration?

Here is my glusterfs.vol
## file auto generated by /usr/bin/glusterfs-volgen (mount.vol)
# Cmd line:
# $ /usr/bin/glusterfs-volgen -n warehouse --raid 1
gluster1:/export/warehouse gluster2:/export/warehouse
gluster3:/export/warehouse gluster4:/export/warehouse

# RAID 1
# TRANSPORT-TYPE tcp
volume gluster4-1
    type protocol/client
    option transport-type tcp
    option remote-host gluster4
    option transport.socket.nodelay on
    option transport.remote-port 6996
    option remote-subvolume brick1
end-volume

volume gluster2-1
    type protocol/client
    option transport-type tcp
    option remote-host gluster2
    option transport.socket.nodelay on
    option transport.remote-port 6996
    option remote-subvolume brick1
end-volume

volume gluster3-1
    type protocol/client
    option transport-type tcp
    option remote-host gluster3
    option transport.socket.nodelay on
    option transport.remote-port 6996
    option remote-subvolume brick1
end-volume

volume gluster1-1
    type protocol/client
    option transport-type tcp
    option remote-host gluster1
    option transport.socket.nodelay on
    option transport.remote-port 6996
    option remote-subvolume brick1
end-volume

volume mirror-0
    type cluster/replicate
    subvolumes gluster1-1 gluster2-1
end-volume

volume mirror-1
    type cluster/replicate
    subvolumes gluster3-1 gluster4-1
end-volume

volume distribute
    type cluster/distribute
    subvolumes mirror-0 mirror-1
end-volume

volume readahead
    type performance/read-ahead
    option page-count 4
    subvolumes distribute
end-volume

volume iocache
    type performance/io-cache
    option cache-size `echo $(( $(grep 'MemTotal' /proc/meminfo | sed
's/[^0-9]//g') / 5120 ))`MB
    option cache-timeout 1
    subvolumes readahead
end-volume

volume quickread
    type performance/quick-read
    option cache-timeout 1
    option max-file-size 64kB
    subvolumes iocache
end-volume

volume writebehind
    type performance/write-behind
    option cache-size 4MB
    subvolumes quickread
end-volume

volume statprefetch
    type performance/stat-prefetch
    subvolumes writebehind
end-volume

------------------------------------------------------------## file auto
generated by /usr/bin/glusterfs-volgen (export.vol)
# Cmd line:
# $ /usr/bin/glusterfs-volgen -n warehouse --raid 1
gluster1:/export/warehouse gluster2:/export/warehouse
gluster3:/export/warehouse gluster4:/export/warehouse

volume posix1
  type storage/posix
  option directory /export/warehouse
end-volume

volume locks1
    type features/locks
    subvolumes posix1
end-volume

volume brick1
    type performance/io-threads
    option thread-count 8
    subvolumes locks1
end-volume

volume server-tcp
    type protocol/server
    option transport-type tcp
    option auth.addr.brick1.allow *
    option transport.socket.listen-port 6996
    option transport.socket.nodelay on
    subvolumes brick1
end-volume

and here is my glusterfsd.vol


More information about the Gluster-users mailing list