[Gluster-users] storage/bdb glusterfs crash at dir creation or file add
Renatas Ulevičius
renaulev at etalink.lt
Sat Mar 21 18:47:36 UTC 2009
Hello, i want to use sorage/dbd for web cluster (many small files). I found no examples in the documentation for this storage so write by own. Unfortaly glusterfs crashes when i want to add dir
Is anyone have cluster running with sorage/dbd?
my config file same on 2 servers
---
volume bdb
type storage/bdb
option directory /cluster_dir
end-volume
volume locks
type features/locks
subvolumes bdb
end-volume
volume brick
type performance/io-threads
subvolumes locks
end-volume
volume server
type protocol/server
option transport-type tcp
option auth.addr.brick.allow *
#option auth.login.foo-brick.allow foo
#option auth.login.foo.password foo-password
subvolumes brick
end-volume
volume remote1
type protocol/client
option transport-type tcp
option remote-host 192.168.0.5
option transport.socket.listen-port 1023
option remote-subvolume brick
#option username foo
#option password foo-password
end-volume
volume remote2
type protocol/client
option transport-type tcp
option remote-host 192.168.0.230
option transport.socket.listen-port 1023
option remote-subvolume brick
#option username foo
#option password foo-password
end-volume
volume replicate
type cluster/replicate
subvolumes remote1 remote2
option scheduler nufa
#option nufa.local-volume-name brick
end-volume
volume writebehind
type performance/write-behind
option aggregate-size 128KB
option window-size 1MB
subvolumes replicate
end-volume
volume cache
type performance/io-cache
option cache-size 512MB
subvolumes writebehind
end-volume
volume readahead
type performance/read-ahead
option page-size 65536 # unit in bytes
option page-count 16 # cache per file = (page-count x page-size)
subvolumes writebehind
end-volume
---
after mkdir tt
gluster fs crashes on all servers with
2009-03-21 18:55:36 D [fuse-bridge.c:457:fuse_lookup] glusterfs-fuse: 49: LOOKUP /tt
2009-03-21 18:55:36 D [inode.c:471:__inode_create] fuse/inode: create inode(0)
2009-03-21 18:55:36 D [inode.c:293:__inode_activate] fuse/inode: activating inode(0), lru=0/0 active=2 purge=0
2009-03-21 18:55:36 D [bdb-ll.c:468:bdb_db_get] bdb-ll: failed to do DB->get() for key: tt. key not found in storage DB
2009-03-21 18:55:36 D [bdb.c:1098:bdb_lookup] bdb: returning ENOENT for /tt
2009-03-21 18:55:36 D [name.c:214:af_inet_client_get_remote_sockaddr] remote2: option remote-port missing in volume remote2. Defaulting to 6996
2009-03-21 18:55:36 D [common-utils.c:85:gf_resolve_ip6] resolver: flushing DNS cache
2009-03-21 18:55:36 D [common-utils.c:92:gf_resolve_ip6] resolver: DNS cache not present, freshly probing hostname: 192.168.0.230
2009-03-21 18:55:36 D [common-utils.c:129:gf_resolve_ip6] resolver: returning ip-192.168.0.230 (port-6996) for hostname: 192.168.0.230 and port: 6996
2009-03-21 18:55:36 D [fuse-bridge.c:408:fuse_entry_cbk] glusterfs-fuse: 49: LOOKUP() /tt => -1 (No such file or directory)
2009-03-21 18:55:36 D [inode.c:336:__inode_retire] fuse/inode: retiring inode(0) lru=0/0 active=1 purge=1
2009-03-21 18:55:36 D [socket.c:175:__socket_disconnect] remote2: shutdown() returned -1. set connection state to -1
2009-03-21 18:55:36 D [client-protocol.c:6046:protocol_client_cleanup] remote2: cleaning up state in transport object 0x805ab30
2009-03-21 18:55:36 D [socket.c:90:__socket_rwv] remote2: EOF from peer
2009-03-21 18:55:36 D [socket.c:561:__socket_proto_state_machine] remote2: read (Transport endpoint is not connected) in state 1 ()
2009-03-21 18:55:36 D [client-protocol.c:6046:protocol_client_cleanup] remote2: cleaning up state in transport object 0x805ab30
2009-03-21 18:55:36 D [inode.c:471:__inode_create] fuse/inode: create inode(0)
2009-03-21 18:55:36 D [inode.c:293:__inode_activate] fuse/inode: activating inode(0), lru=0/0 active=2 purge=0
2009-03-21 18:55:36 D [fuse-bridge.c:1133:fuse_mkdir] glusterfs-fuse: 50: MKDIR /tt
pending frames:
frame : type(1) op(MKDIR)
frame : type(1) op(MKDIR)
patchset: cb602a1d7d41587c24379cb2636961ab91446f86 +
signal received: 11
configuration details:argp 1
backtrace 1
db.h 1
dlfcn 1
fdatasync 1
libpthread 1
llistxattr 1
setfsid 1
spinlock 1
epoll.h 1
xattr.h 1
st_atim.tv_nsec 1
package-string: glusterfs 2.0.0rc4
[0xb7f51420]
/glusterfs/lib/libglusterfs.so.0(default_xattrop+0x117)[0xb7f2e529]
/glusterfs/lib/libglusterfs.so.0(default_xattrop+0x117)[0xb7f2e529]
/glusterfs/lib/glusterfs/2.0.0rc4/xlator/protocol/client.so(client_xattrop+0x1cb)[0xb7b5061c]
/glusterfs/lib/glusterfs/2.0.0rc4/xlator/cluster/replicate.so(afr_changelog_pre_op+0x88f)[0xb7b2ae46]
/glusterfs/lib/glusterfs/2.0.0rc4/xlator/cluster/replicate.so[0xb7b2b20a]
/glusterfs/lib/glusterfs/2.0.0rc4/xlator/cluster/replicate.so(afr_lock_cbk+0x172)[0xb7b2aff1]
/glusterfs/lib/glusterfs/2.0.0rc4/xlator/protocol/client.so[0xb7b48a6e]
/glusterfs/lib/libglusterfs.so.0[0xb7f2ed09]
/glusterfs/lib/glusterfs/2.0.0rc4/xlator/features/locks.so(pl_entrylk+0x32c)[0xb7b8abdb]
/glusterfs/lib/libglusterfs.so.0(default_entrylk+0x11e)[0xb7f2ee2e]
/glusterfs/lib/glusterfs/2.0.0rc4/xlator/protocol/client.so(client_entrylk+0x168)[0xb7b54352]
/glusterfs/lib/glusterfs/2.0.0rc4/xlator/cluster/replicate.so[0xb7b2bae7]
/glusterfs/lib/glusterfs/2.0.0rc4/xlator/cluster/replicate.so(afr_lock+0x2d)[0xb7b2bb29]
/glusterfs/lib/glusterfs/2.0.0rc4/xlator/cluster/replicate.so(afr_transaction+0xd1)[0xb7b2bcf4]
/glusterfs/lib/glusterfs/2.0.0rc4/xlator/cluster/replicate.so(afr_mkdir+0x36e)[0xb7b1b3ae]
/glusterfs/lib/libglusterfs.so.0(default_mkdir+0x10d)[0xb7f2c700]
/glusterfs/lib/libglusterfs.so.0(default_mkdir+0x10d)[0xb7f2c700]
/glusterfs/lib/glusterfs/2.0.0rc4/xlator/mount/fuse.so[0xb7ae78a2]
/usr/local/lib/libfuse.so.2[0xb7aceebf]
/usr/local/lib/libfuse.so.2[0xb7acfc0d]
/usr/local/lib/libfuse.so.2(fuse_session_process+0x26)[0xb7ad14d6]
/glusterfs/lib/glusterfs/2.0.0rc4/xlator/mount/fuse.so[0xb7aed67d]
/lib/tls/libpthread.so.0[0xb7e220bd]
/lib/tls/libc.so.6(__clone+0x5e)[0xb7db701e]
---------
Segmentation fault (core dumped
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://supercolony.gluster.org/pipermail/gluster-users/attachments/20090321/654b0836/attachment.html>
More information about the Gluster-users
mailing list