[Gluster-devel] 1.4.0rc6 glusterfsd crash report

Kevan Benson kbenson at a-1networks.com
Tue Dec 23 07:05:51 UTC 2008


Volume is currently used as backuppc back-end (which does a lot of 
linking, which looks to be related to the crash).  Was stable using 
older 1.3 series (quite old, actually).  Glusterfsd running on CentOS 
v4.7, kernel 2.6.9-78.0.8.ELsmp x86.  Client similarly running CentOS 
v4.7 but with 2.6.9-78.0.8.ELxenU x86_64 kernel (virtual system).  Both 
are running fuse version 2.7.3glfs10.  Let me know if you need any 
information missing from this, or if I should be reporting elsewhere.

 From glusterfsd log:

2008-12-22 20:14:31 E [server-helpers.c:98:server_loc_fill] backuppc: 
paths differ for inode(26017826): client path = /pc/php5-d/136/f%2f/f
usr/fbin/fup2date-nox. dentry path = 
/cpool/2/9/3/293ae36a31e549e809002b5ebe260a55
2008-12-22 20:14:32 E [inode.c:860:inode_path] backuppc/inode: no dentry 
for non-root inode 32376402
2008-12-22 20:14:32 E [server-helpers.c:89:server_loc_fill] backuppc: 
failed to build path for 32376402: No such file or directory
pending frames:
frame : type(1) op(8)
frame : type(1) op(8)

Signal received: 11
configuration details:argp 1
backtrace 1
dlfcn 1
fdatasync 1
libpthread 1
llistxattr 1
setfsid 1
spinlock 1
epoll.h 1
xattr.h 1
tv_nsec 1
package-string: glusterfs 1.4.0rc6
/lib/tls/libc.so.6[0x991918]
/usr/lib/glusterfs/1.4.0rc6/xlator/protocol/server.so(server_link_resume+0x1d6)[0xce0d17]
/usr/lib/libglusterfs.so.0(call_resume+0x327)[0x161ecd]
/usr/lib/glusterfs/1.4.0rc6/xlator/protocol/server.so(server_stub_resume+0x7b)[0xcd9007]
/usr/lib/glusterfs/1.4.0rc6/xlator/protocol/server.so[0xce4e35]
/usr/lib/libglusterfs.so.0[0x15488a]
/usr/lib/libglusterfs.so.0[0x15488a]
/usr/lib/glusterfs/1.4.0rc6/xlator/storage/posix.so(posix_lookup+0x18b)[0x8a3f24]
/usr/lib/libglusterfs.so.0(default_lookup+0xa6)[0x154938]
/usr/lib/libglusterfs.so.0(default_lookup+0xa6)[0x154938]
/usr/lib/glusterfs/1.4.0rc6/xlator/protocol/server.so(do_path_lookup+0x497)[0xce552e]
/usr/lib/glusterfs/1.4.0rc6/xlator/protocol/server.so(server_link+0x17a)[0xce0eae]
/usr/lib/glusterfs/1.4.0rc6/xlator/protocol/server.so(protocol_server_interpret+0x1e5)[0xce365c]
/usr/lib/glusterfs/1.4.0rc6/xlator/protocol/server.so(protocol_server_pollin+0xad)[0xce48b4]
/usr/lib/glusterfs/1.4.0rc6/xlator/protocol/server.so(notify+0x54)[0xce4b74]
/usr/lib/glusterfs/1.4.0rc6/transport/socket.so[0x6d6f70]
/usr/lib/libglusterfs.so.0[0x1670b7]
/usr/lib/libglusterfs.so.0(event_dispatch+0x21)[0x16740a]
/usr/sbin/glusterfsd(main+0x797)[0x804aca1]
/lib/tls/libc.so.6(__libc_start_main+0xd3)[0x97edf3]
/usr/sbin/glusterfsd[0x8049a01]
---------



Backtrace:

#0  inode_ref (inode=0x0) at inode.c:394
#1  0x00ce0d17 in server_link_resume (frame=0xad016a18, this=0x9a05f28, 
oldloc=0xad01d0a8, newloc=0x0) at server-protocol.c:5976
#2  0x00161ecd in call_resume (stub=0xad01d090) at call-stub.c:2174
#3  0x00cd9007 in server_stub_resume (stub=0xad01d090, op_ret=0, 
op_errno=22, inode=0xad001930, parent=0x9a94278)
     at server-protocol.c:3249
#4  0x00ce4e35 in __do_path_resolve_cbk (frame=0xad011e10, 
cookie=0xad009310, this=0x9a05f28, op_ret=0, op_errno=22, inode=0xad001930,
     stbuf=0x0, dict=0x0) at server-dentry.c:265
#5  0x0015488a in default_lookup_cbk (frame=0xae6ff4, cookie=0xad04a110, 
this=0x9a05d10, op_ret=0, op_errno=0, inode=0x0, buf=0x0,
     dict=0x0) at defaults.c:46
#6  0x0015488a in default_lookup_cbk (frame=0xae6ff4, cookie=0xad01e2b8, 
this=0x9a05b78, op_ret=0, op_errno=0, inode=0x0, buf=0x0,
     dict=0x0) at defaults.c:46
#7  0x008a3f24 in posix_lookup (frame=0xad01e2b8, this=0x9a059c0, 
loc=0xad02d928, need_xattr=0) at posix.c:277
#8  0x00154938 in default_lookup (frame=0xad04a110, this=0x9a05b78, 
loc=0x0, need_xattr=0) at defaults.c:61
#9  0x00154938 in default_lookup (frame=0xad009310, this=0x9a05d10, 
loc=0x0, need_xattr=0) at defaults.c:61
#10 0x00ce552e in do_path_lookup (stub=0xad01d090, loc=0xad01ce90) at 
server-dentry.c:338
#11 0x00ce0eae in server_link (frame=0xad016a18, this=0x9a05d10, 
hdr=0xad041698, hdrlen=126, buf=0x0, buflen=0) at server-protocol.c:6056
#12 0x00ce365c in protocol_server_interpret (this=0x9a05f28, 
trans=0x9a093e8, hdr_p=0xad041698 "", hdrlen=126, buf=0x0, buflen=0)
     at server-protocol.c:7238
#13 0x00ce48b4 in protocol_server_pollin (this=0x9a05f28, 
trans=0x9a093e8) at server-protocol.c:7681
#14 0x00ce4b74 in notify (this=0xae6ff4, event=0, data=0xad016a18) at 
server-protocol.c:7733
#15 0x006d6f70 in socket_event_handler (fd=7, idx=1, data=0x9a093e8, 
poll_in=1, poll_out=0, poll_err=0) at socket.c:678
#16 0x001670b7 in event_dispatch_epoll (event_pool=0x9a00198) at event.c:804
#17 0x0016740a in event_dispatch (event_pool=0xad016a18) at event.c:975
#18 0x0804aca1 in main (argc=3, argv=0xbfff4bd4) at glusterfsd.c:1068


server.vol (only the backuppc volume has any real use):

# Default data store volume
volume ds-raw
         type storage/posix
         option directory /data/glusterfs/share
end-volume

volume ds
         type features/posix-locks
         subvolumes ds-raw
end-volume

volume ds-threaded
            type performance/io-threads
            option thread-count 8
            option cache-size 64MB
            subvolumes ds
end-volume

# BackupPC export
volume backuppc-raw
         type storage/posix
         option directory /data/backuppc/
end-volume

volume backuppc-lock
         type features/posix-locks
         subvolumes backuppc-raw
end-volume

volume backuppc
            type performance/io-threads
            option thread-count 8
            option cache-size 64MB
            subvolumes backuppc-lock
end-volume

# Export of volumes
volume server
         type protocol/server
         option transport-type tcp
         subvolumes ds-threaded backuppc
         option auth.addr.ds-threaded.allow *
         option auth.addr.backuppc.allow *
end-volume


client.vol:

volume backuppc
         type protocol/client
         option transport-type tcp
         option transport-timeout 10
         option remote-host 10.142.11.11
         option remote-subvolume backuppc
end-volume


-- 

-Kevan Benson
-A-1 Networks





More information about the Gluster-devel mailing list