[Gluster-users] Probable file corruption on a simple setup with Debian Lenny?

Giovanni Toraldo scurippio at gmail.com
Tue May 4 15:31:34 UTC 2010


Hi,

I am evaluating GlusterFS for the first time, and I have a problem with
a pretty simple setup:

I have 1 server and 9 clients with Debian Lenny, glusterfs installed
from backports (2.0.9-3~bpo50+1);

GlusterFS is used to export /home directory to clients.

What I see, is that randomly people on clients get their iceweasel
profiles locked out: "Another instance of Icewesel is already running,
try to reboot, etc". Obviously there isn't any sign of running iceweasel
process nor segfaults. I usually recover this situation doing an 'rm
.mozilla/firefox/Profiles/*/.parent' of the user reporting the problem.
Today also happened that an Openoffice istance keep crashing on every
launch, and I resolved it doing a 'rm -fr .openoffice.org'.

I constantly check both server and clients logs, without seeing any
warning nor error.

Any hint? Someone that is using GlusterFS from Debian backports with
success?

Thanks.




# mount
/dev/md0 on / type xfs (rw,relatime)
tmpfs on /lib/init/rw type tmpfs (rw,nosuid,mode=0755)
proc on /proc type proc (rw,noexec,nosuid,nodev)
sysfs on /sys type sysfs (rw,noexec,nosuid,nodev)
procbususb on /proc/bus/usb type usbfs (rw)
udev on /dev type tmpfs (rw,mode=0755)
tmpfs on /dev/shm type tmpfs (rw,nosuid,nodev)
devpts on /dev/pts type devpts (rw,noexec,nosuid,gid=5,mode=620)
fusectl on /sys/fs/fuse/connections type fusectl (rw)
/dev/sda1 on /boot type ext3 (rw,relatime)
/dev/md3 on /home type xfs (rw,relatime)
rpc_pipefs on /var/lib/nfs/rpc_pipefs type rpc_pipefs (rw)
nfsd on /proc/fs/nfsd type nfsd (rw)
server-nfs.lan on /media/gluster type fuse.glusterfs
(rw,allow_other,default_permissions,max_read=131072)





# cat /etc/glusterfs/glusterfsd.vol
volume posix
  type storage/posix                   # POSIX FS translator
  option directory /home/nfs_homes/
end-volume

volume locks
  type features/locks
  subvolumes posix
end-volume

volume brick
  type performance/io-threads
  option thread-count 8
  subvolumes locks
end-volume

### Add network serving capability to above brick.
volume server
  type protocol/server
  option transport-type tcp
  subvolumes brick
  option client-volume-filename /etc/glusterfs/glusterfs.vol
  option auth.addr.brick.allow * # Allow access to "brick" volume
end-volume





# cat /etc/glusterfs/glusterfs.vol
volume remote1
  type protocol/client
  option transport-type tcp
  option remote-host server-nfs.lan         # IP address of the
remote brick
  option remote-subvolume brick        # name of the remote volume
end-volume



-- 
Giovanni Toraldo



More information about the Gluster-users mailing list