[Gluster-users] Gluster crashing with posix-locks translater
Will Rouesnel
electricitylikesme at hotmail.com
Mon Jul 7 07:34:21 UTC 2008
I'm trying to run glusterfs with the posix-locks translaters on my bricks
but after a certain amount of time (or file accesses) they seem to bring
down the server-side process. My logfiles show the following each time it
goes down:
TLA Repo Revision: glusterfs--mainline--3.0--patch-198
Time : 2008-07-07 17:08:33
Signal Number : 11
glusterfs -f /usr/etc/glusterfs/glusterfs-client.vol -l
/usr/var/log/glusterfs/glusterfs.log -L WARNING /mnt/storage
volume fuse
type mount/fuse
option direct-io-mode 1
option entry-timeout 1
option attr-timeout 1
option mount-point /mnt/storage
subvolumes unify
end-volume
volume unify
type cluster/unify
option alu.limits.min-free-disk 1
option alu.stat-refresh.num-file-create 1
option alu.disk-usage.exit-threshold 4GB
option alu.disk-usage.entry-threshold 10GB
option alu.order disk-usage
option scheduler alu
option namespace brick-ns
subvolumes brick0 brick1 brick2 brick3 brick4 brick5 brick6
end-volume
volume brick-ns
type protocol/client
option version 1.4.0qa22
option remote-subvolume brick-ns-locks
option remote-host 127.0.0.1
option transport-type tcp
end-volume
volume brick6
type protocol/client
option version 1.4.0qa22
option remote-subvolume brick6-locks
option remote-host 127.0.0.1
option transport-type tcp
end-volume
volume brick5
type protocol/client
option version 1.4.0qa22
option remote-subvolume brick5-locks
option remote-host 127.0.0.1
option transport-type tcp
end-volume
volume brick4
type protocol/client
option version 1.4.0qa22
option remote-subvolume brick4-locks
option remote-host 127.0.0.1
option transport-type tcp
end-volume
volume brick3
type protocol/client
option version 1.4.0qa22
option remote-subvolume brick3-locks
option remote-host 127.0.0.1
option transport-type tcp
end-volume
volume brick2
type protocol/client
option version 1.4.0qa22
option remote-subvolume brick2-locks
option remote-host 127.0.0.1
option transport-type tcp
end-volume
volume brick1
type protocol/client
option version 1.4.0qa22
option remote-subvolume brick1-locks
option remote-host 127.0.0.1
option transport-type tcp
end-volume
volume brick0
type protocol/client
option version 1.4.0qa22
option remote-subvolume brick0-locks
option remote-host 127.0.0.1
option transport-type tcp
end-volume
frame : type(1) op(30)
frame : type(2) op(0)
frame : type(2) op(0)
frame : type(2) op(0)
frame : type(2) op(0)
frame : type(2) op(0)
frame : type(2) op(0)
frame : type(2) op(0)
frame : type(2) op(0)
2008-07-07 17:08:33 C [common-utils.c:155:gf_print_bytes] : xfer ==
31617539801, rcvd == 407355746
/lib/libc.so.6[0x2b239b3e0110]
/usr/lib/glusterfs/1.4.0qa22/xlator/protocol/client.so(client_fchmod_cbk+0x3
3e)[0x2aaaaaab87ce]
/usr/lib/glusterfs/1.4.0qa22/xlator/protocol/client.so(protocol_client_inter
pret+0x1ff)[0x2aaaaaab1e4f]
/usr/lib/glusterfs/1.4.0qa22/xlator/protocol/client.so(protocol_client_polli
n+0xbe)[0x2aaaaaab208e]
/usr/lib/glusterfs/1.4.0qa22/xlator/protocol/client.so(notify+0x18f)[0x2aaaa
aab2d3f]
/usr/lib/glusterfs/1.4.0qa22/transport/tcp.so[0x2aaaab01644a]
/usr/lib/libglusterfs.so.0[0x2b239b08da91]
[glusterfs](main+0x66a)[0x402bea]
/lib/libc.so.6(__libc_start_main+0xda)[0x2b239b3cd4ca]
[glusterfs][0x4020aa]
My Server side volume spec looks like this:
### file: glusterfs-server.vol
# Local volumes exported by the server
volume brick0
type storage/posix # POSIX FS translator
option directory /mnt/brick0 # Export this directory
end-volume
volume brick0-locks
type features/posix-locks
subvolumes brick0
option mandatory on
end-volume
volume brick1
type storage/posix # POSIX FS translator
option directory /mnt/brick1 # Export this directory
end-volume
volume brick1-locks
type features/posix-locks
subvolumes brick1
option mandatory on
end-volume
volume brick2
type storage/posix # POSIX FS translator
option directory /mnt/brick2 # Export this directory
end-volume
volume brick2-locks
type features/posix-locks
subvolumes brick2
option mandatory on
end-volume
volume brick3
type storage/posix # POSIX FS translator
option directory /mnt/brick3 # Export this directory
end-volume
volume brick3-locks
type features/posix-locks
subvolumes brick3
option mandatory on
end-volume
volume brick4
type storage/posix # POSIX FS translator
option directory /mnt/brick4 # Export this directory
end-volume
volume brick4-locks
type features/posix-locks
subvolumes brick4
option mandatory on
end-volume
volume brick5
type storage/posix # POSIX FS translator
option directory /mnt/brick5 # Export this directory
end-volume
volume brick5-locks
type features/posix-locks
subvolumes brick5
option mandatory on
end-volume
volume brick6
type storage/posix # POSIX FS translator
option directory /mnt/brick6 # Export this directory
end-volume
volume brick6-locks
type features/posix-locks
subvolumes brick6
option mandatory on
end-volume
volume brick-ns
type storage/posix # POSIX FS translator
option directory /mnt/brick-ns # Export this directory
end-volume
volume brick-ns-locks
type features/posix-locks
subvolumes brick-ns
option mandatory on
end-volume
# Configure the glusterfs server side features
volume server
type protocol/server
option transport-type tcp/server
option client-volume-filename
/usr/etc/glusterfs/glusterfs-client.vol
subvolumes brick0-locks brick1-locks brick2-locks brick3-locks
brick4-locks brick5-locks brick6-locks brick-ns-locks
option auth.ip.brick0-locks.allow 127.0.0.1,192.168.1.1
option auth.ip.brick1-locks.allow 127.0.0.1,192.168.1.1
option auth.ip.brick2-locks.allow 127.0.0.1,192.168.1.1
option auth.ip.brick3-locks.allow 127.0.0.1,192.168.1.1
option auth.ip.brick4-locks.allow 127.0.0.1,192.168.1.1
option auth.ip.brick5-locks.allow 127.0.0.1,192.168.1.1
option auth.ip.brick6-locks.allow 127.0.0.1,192.168.1.1
option auth.ip.brick-ns-locks.allow 127.0.0.1,192.168.1.1
end-volume
Any ideas as to why this is happening? If I run without posix-locks gluster
kicks out numerous errors recommending I enable them, and I think I need
them since my Apple clients can't use the cluster apparently because they
cannot modify files on the cluster (neither can anyone else but it's less of
an issue, still annoying though).
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://supercolony.gluster.org/pipermail/gluster-users/attachments/20080707/c3a5dcb0/attachment.html>
More information about the Gluster-users
mailing list