[Gluster-users] glusterfsd crashed
Greg
greg at easyflirt.com
Wed Apr 1 15:36:21 UTC 2009
Hi,
I've just upgraded to 2.0.0-rc7, and add read-ahead + io-cache
translators on server side, and got this error on both servers :
2009-04-01 17:26:21 N [server-protocol.c:7513:mop_setvolume] server:
accepted client from 10.0.8.3:954
2009-04-01 17:26:21 N [server-protocol.c:7513:mop_setvolume] server:
accepted client from 10.0.8.3:953
pending frames:
<D4>J5Y<DB>^?
patchset: 4e5c297d7c3480d0d3ab1c0c2a184c6a4fb801ef
signal received: 11
configuration details:argp 1
backtrace 1
bdb->cursor->get 1
db.h 1
dlfcn 1
fdatasync 1
libpthread 1
llistxattr 1
setfsid 1
spinlock 1
epoll.h 1
xattr.h 1
st_atim.tv_nsec 1
package-string: glusterfs 2.0.0rc7
/lib/libc.so.6[0x7fdb58be9f60]
/lib/libpthread.so.0(pthread_spin_lock+0x0)[0x7fdb58f16630]
/usr/lib/libglusterfs.so.0(fd_ref+0x1a)[0x7fdb5935139a]
/usr/lib/libglusterfs.so.0(fop_flush_stub+0x4a)[0x7fdb5934b69a]
/usr/lib/glusterfs/2.0.0rc7/xlator/performance/io-threads.so(iot_flush+0x28)[0x7fdb5859a848]
/usr/lib/libglusterfs.so.0(default_flush+0xaa)[0x7fdb59340eba]
/usr/lib/glusterfs/2.0.0rc7/xlator/performance/write-behind.so(wb_flush+0x268)[0x7fdb5818a088]
/usr/lib/glusterfs/2.0.0rc7/xlator/performance/read-ahead.so(ra_flush+0xe0)[0x7fdb57f7fb30]
/usr/lib/glusterfs/2.0.0rc7/xlator/protocol/server.so(server_release+0xf9)[0x7fdb57d69e79]
/usr/lib/glusterfs/2.0.0rc7/xlator/protocol/server.so(protocol_server_pollin+0xa6)[0x7fdb57d647d6]
/usr/lib/glusterfs/2.0.0rc7/xlator/protocol/server.so(notify+0x38)[0x7fdb57d64818]
/usr/lib/glusterfs/2.0.0rc7/transport/socket.so(socket_event_handler+0xe0)[0x7fdb57b57b80]
/usr/lib/libglusterfs.so.0[0x7fdb593531ef]
/usr/sbin/glusterfsd(main+0xa81)[0x403a21]
/lib/libc.so.6(__libc_start_main+0xe6)[0x7fdb58bd61a6]
/usr/sbin/glusterfsd[0x402519]
---------
Configuration:
# file: /etc/glusterfs/glusterfsd.vol
#
# Volumes
#
volume media-small
type storage/posix
option directory /var/local/glusterfs/media_small
end-volume
volume media-medium
type storage/posix
option directory /var/local/glusterfs/media_medium
end-volume
# Lock posix
volume media-small-locks
type features/posix-locks
option mandatory-locks on
subvolumes media-small
# subvolumes trash # enable this if you need trash can support
(NOTE: not present in 1.3.0-pre5+ releases)
end-volume
volume media-medium-locks
type features/posix-locks
option mandatory-locks on
subvolumes media-medium
# subvolumes trash # enable this if you need trash can support
(NOTE: not present in 1.3.0-pre5+ releases)
end-volume
#
# Performance
#
volume media-small-iot
type performance/io-threads
subvolumes media-small-locks
option thread-count 4 # default value is 1
end-volume
volume media-small-ioc
type performance/io-cache
option cache-size 128MB # default is 32MB
option page-size 128KB # default is 128KB
subvolumes media-small-iot
end-volume
volume media-small-wb
type performance/write-behind
option flush-behind on # default is off
subvolumes media-small-ioc
end-volume
volume media-small-ra
type performance/read-ahead
subvolumes media-small-wb
option page-size 256KB # default is 256KB
option page-count 4 # default is 2 - cache per file
= (page-count x page-size)
option force-atime-update no # defalut is 'no'
end-volume
volume media-medium-iot
type performance/io-threads
subvolumes media-medium-locks
option thread-count 4 # default value is 1
end-volume
volume media-medium-ioc
type performance/io-cache
option cache-size 128MB # default is 32MB
option page-size 128KB # default is 128KB
subvolumes media-medium-iot
end-volume
volume media-medium-wb
type performance/write-behind
option flush-behind on # default is off
subvolumes media-medium-ioc
end-volume
volume media-medium-ra
type performance/read-ahead
subvolumes media-medium-wb
option page-size 256KB # default is 256KB
option page-count 4 # default is 2 - cache per file
= (page-count x page-size)
option force-atime-update no # defalut is 'no'
end-volume
#
# Serveur
#
volume server
type protocol/server
option transport-type tcp/server
option auth.addr.media-small-ra.allow 10.0.*.*
option auth.addr.media-medium-ra.allow 10.0.*.*
# Autoconfiguration, e.g. :
# glusterfs -l /tmp/glusterfs.log --server=filer-04 ./Cache
option client-volume-filename /etc/glusterfs/glusterfs.vol
subvolumes media-small-ra media-medium-ra # volumes exportés
end-volume
# file: /etc/glusterfs/glusterfs.vol
#
# Clients
#
volume media-small-filer-04
type protocol/client
option transport-type tcp/client
option remote-host filer-04.local
option remote-subvolume media-small-ra
end-volume
volume media-small-filer-05
type protocol/client
option transport-type tcp/client
option remote-host filer-05.local
option remote-subvolume media-small-ra
end-volume
volume media-medium-filer-04
type protocol/client
option transport-type tcp/client
option remote-host filer-04.local
option remote-subvolume media-medium-ra
end-volume
volume media-medium-filer-05
type protocol/client
option transport-type tcp/client
option remote-host filer-05.local
option remote-subvolume media-medium-ra
end-volume
#
# Volume principal
#
volume afr-small
# AFR has been renamed to "Replicate" for simplicity.
type cluster/replicate
# Il faut mettre le serveur avec le moins d'espace disque en 1er :
# "When doing a "df -h" on a client, the AVAILABLE disk space
will display the maximum disk space of the first AFR sub volume defined
in the spec file. So if you have two servers with 50 gigs and 100 gigs
of free disk space, and the server with 100 gigs is listed first, then
you will see 100 gigs available even though one server only has 50 gigs
free. "
subvolumes media-small-filer-04 media-small-filer-05
end-volume
volume afr-medium
# AFR has been renamed to "Replicate" for simplicity.
type cluster/replicate
subvolumes media-medium-filer-04 media-medium-filer-05
end-volume
#
# Performance
#
volume iot-small
type performance/io-threads
option thread-count 8 # default is 1
subvolumes afr-small
end-volume
volume readahead-small
type performance/read-ahead
subvolumes iot-small
option page-size 1MB # default is 256KB
option page-count 4 # default is 2 - cache per file =
(page-count x page-size)
option force-atime-update no # defalut is 'no'
end-volume
volume iocache-small
type performance/io-cache
option cache-size 64MB # default is 32MB
option page-size 256KB # default is 128KB
subvolumes readahead-small
end-volume
volume wb-small
type performance/write-behind
option window-size 1MB # max 4MB
option flush-behind on # default is off
subvolumes iocache-small
end-volume
volume iot-medium
type performance/io-threads
option thread-count 8 # default is 1
subvolumes afr-medium
end-volume
volume readahead-medium
type performance/read-ahead
subvolumes iot-medium
option page-size 1MB # default is 256KB
option page-count 4 # default is 2 - cache per file =
(page-count x page-size)
option force-atime-update no # defalut is 'no'
end-volume
volume iocache-medium
type performance/io-cache
option cache-size 64MB # default is 32MB
option page-size 256KB # default is 128KB
subvolumes readahead-medium
end-volume
volume wb-medium
type performance/write-behind
option window-size 1MB # max 4MB
option flush-behind on # default is off
subvolumes iocache-medium
end-volume
Clients mount afr-* subvolumes, not with performance translators. I'm
testing performance translators on servers side.
Regards,
--
Greg
More information about the Gluster-users
mailing list