[Gluster-users] [Gluster-devel] Crash in glusterfs!!!

ABHISHEK PALIWAL abhishpaliwal at gmail.com
Fri Sep 21 10:02:17 UTC 2018


Hi Sanju,

Here is the output of 't a a bt'

(gdb) t a a bt

Thread 7 (LWP 444):
#0  0x00003fff7a4d4ccc in __pthread_cond_timedwait (cond=0x10059a98,
mutex=0x10059a70, abstime=0x3fff77a50670) at pthread_cond_timedwait.c:198
#1  0x00003fff7a5f1e74 in syncenv_task (proc=0x10053eb0) at syncop.c:607
#2  0x00003fff7a5f2cdc in syncenv_processor (thdata=0x10053eb0) at
syncop.c:699
#3  0x00003fff7a4ccb30 in start_thread (arg=0x3fff77a51160) at
pthread_create.c:462
#4  0x00003fff7a4170fc in .__clone () at
../sysdeps/unix/sysv/linux/powerpc/powerpc64/clone.S:96

Thread 6 (LWP 447):
#0  0x00003fff7a4d8e38 in __lll_lock_wait (futex=0x100272a8,
private=<optimized out>) at lowlevellock.c:46
#1  0x00003fff7a4cff64 in __GI___pthread_mutex_lock (mutex=0x100272a8) at
../nptl/pthread_mutex_lock.c:81
#2  0x00003fff7a59de8c in _gf_msg (domain=0x3fff70006c90
"c_glusterfs-client-1", file=0x3fff76165de0 "client.c",
    function=0x3fff76165cd8 <__FUNCTION__.18849> "notify", line=<optimized
out>, level=<optimized out>, errnum=<optimized out>, trace=<optimized out>,
msgid=114020,
    fmt=0x3fff76166350 "parent translators are ready, attempting connect on
transport") at logging.c:2058
#3  0x00003fff761394ac in notify (this=0x3fff700061f0, event=<optimized
out>, data=0x3fff70008c50) at client.c:2116
#4  0x00003fff7a599ca0 in xlator_notify (xl=0x3fff700061f0,
event=<optimized out>, data=<optimized out>) at xlator.c:491
#5  0x00003fff7a5b8700 in default_notify (this=0x3fff70008c50,
event=<optimized out>, data=<optimized out>) at defaults.c:2302
#6  0x00003fff760fa22c in afr_notify (this=0x3fff70008c50, event=1,
data=0x3fff7000a4c0, data2=<optimized out>) at afr-common.c:3967
#7  0x00003fff76105994 in notify (this=<optimized out>, event=<optimized
out>, data=<optimized out>) at afr.c:38
#8  0x00003fff7a599ca0 in xlator_notify (xl=0x3fff70008c50,
event=<optimized out>, data=<optimized out>) at xlator.c:491
#9  0x00003fff7a5b8700 in default_notify (this=0x3fff7000a4c0,
event=<optimized out>, data=<optimized out>) at defaults.c:2302
#10 0x00003fff7609c1e4 in notify (this=<optimized out>, event=<optimized
out>, data=<optimized out>) at io-stats.c:3064
#11 0x00003fff7a599ca0 in xlator_notify (xl=0x3fff7000a4c0,
event=<optimized out>, data=<optimized out>) at xlator.c:491
#12 0x00003fff7a5ee560 in glusterfs_graph_parent_up (graph=<optimized out>)
at graph.c:440
#13 0x00003fff7a5eeb90 in glusterfs_graph_activate (graph=0x3fff70000af0,
ctx=0x10027010) at graph.c:688
#14 0x000000001000a49c in glusterfs_process_volfp (ctx=0x10027010,
fp=0x3fff70001cd0) at glusterfsd.c:2221
#15 0x000000001000fd08 in mgmt_getspec_cbk (req=<optimized out>,
iov=<optimized out>, count=<optimized out>, myframe=0x3fff7921b06c) at
glusterfsd-mgmt.c:1561
#16 0x00003fff7a552ec4 in rpc_clnt_handle_reply (clnt=0x10089020,
pollin=0x3fff70001760) at rpc-clnt.c:775
#17 0x00003fff7a5533d0 in rpc_clnt_notify (trans=<optimized out>,
mydata=0x10089050, event=<optimized out>, data=<optimized out>) at
rpc-clnt.c:933
#18 0x00003fff7a54e4fc in rpc_transport_notify (this=<optimized out>,
event=<optimized out>, data=<optimized out>) at rpc-transport.c:546
#19 0x00003fff76a32d44 in socket_event_poll_in (this=this at entry=0x1008ab80)
at socket.c:2236
#20 0x00003fff76a3589c in socket_event_handler (fd=<optimized out>,
idx=<optimized out>, data=0x1008ab80, poll_in=<optimized out>,
poll_out=<optimized out>,
---Type <return> to continue, or q <return> to quit---
    poll_err=<optimized out>) at socket.c:2349
#21 0x00003fff7a613874 in event_dispatch_epoll_handler
(event=0x3fff7697e6a0, event_pool=0x10045bc0) at event-epoll.c:575
#22 event_dispatch_epoll_worker (data=0x1008bd50) at event-epoll.c:678
#23 0x00003fff7a4ccb30 in start_thread (arg=0x3fff7697f160) at
pthread_create.c:462
#24 0x00003fff7a4170fc in .__clone () at
../sysdeps/unix/sysv/linux/powerpc/powerpc64/clone.S:96

Thread 5 (LWP 453):
#0  0x00003fff7a4d8e38 in __lll_lock_wait (futex=0x100272a8,
private=<optimized out>) at lowlevellock.c:46
#1  0x00003fff7a4cff64 in __GI___pthread_mutex_lock (mutex=0x100272a8) at
../nptl/pthread_mutex_lock.c:81
#2  0x00003fff7a59de8c in _gf_msg (domain=0x3fff7a63ae38 "epoll",
file=0x3fff7a63ae28 "event-epoll.c",
    function=0x3fff7a63adb8 <__FUNCTION__.8510>
"event_dispatch_epoll_worker", line=<optimized out>, level=<optimized out>,
errnum=<optimized out>,
    trace=<optimized out>, msgid=101190, fmt=0x3fff7a63af48 "Started thread
with index %d") at logging.c:2058
#3  0x00003fff7a61365c in event_dispatch_epoll_worker (data=0x3fff7002ffd0)
at event-epoll.c:631
#4  0x00003fff7a4ccb30 in start_thread (arg=0x3fff759ae160) at
pthread_create.c:462
#5  0x00003fff7a4170fc in .__clone () at
../sysdeps/unix/sysv/linux/powerpc/powerpc64/clone.S:96

Thread 4 (LWP 441):
#0  0x00003fff7a4ce084 in pthread_join (threadid=70366438879584,
thread_return=0x0) at pthread_join.c:90
#1  0x00003fff7a613ca0 in event_dispatch_epoll (event_pool=0x10045bc0) at
event-epoll.c:762
#2  0x00003fff7a5dc7e4 in event_dispatch (event_pool=<optimized out>) at
event.c:128
#3  0x0000000010005ea8 in main (argc=<optimized out>, argv=<optimized out>)
at glusterfsd.c:2380
#4  0x00003fff7a32f318 in generic_start_main (main=0x10025718 <main>,
argc=<optimized out>, argv=0x3fffd839d7a8, auxvec=0x3fffd839d838,
init=<optimized out>,
    rtld_fini=<optimized out>, stack_end=<optimized out>, fini=<optimized
out>) at ../csu/libc-start.c:289
#5  0x00003fff7a32f54c in __libc_start_main (argc=<optimized out>,
argv=<optimized out>, ev=<optimized out>, auxvec=<optimized out>,
rtld_fini=<optimized out>,
    stinfo=<optimized out>, stack_on_entry=<optimized out>) at
../sysdeps/unix/sysv/linux/powerpc/libc-start.c:93
#6  0x0000000000000000 in ?? ()

Thread 3 (LWP 442):
#0  0x00003fff7a4da150 in .__nanosleep () at
../sysdeps/unix/syscall-template.S:84
#1  0x00003fff7a5c4f28 in gf_timer_proc (ctx=0x10027010) at timer.c:205
#2  0x00003fff7a4ccb30 in start_thread (arg=0x3fff78a51160) at
pthread_create.c:462
#3  0x00003fff7a4170fc in .__clone () at
../sysdeps/unix/sysv/linux/powerpc/powerpc64/clone.S:96
---Type <return> to continue, or q <return> to quit---

Thread 2 (LWP 445):
#0  0x00003fff7a4d4ccc in __pthread_cond_timedwait (cond=0x10059a98,
mutex=0x10059a70, abstime=0x3fff77250670) at pthread_cond_timedwait.c:198
#1  0x00003fff7a5f1e74 in syncenv_task (proc=0x10054468) at syncop.c:607
#2  0x00003fff7a5f2cdc in syncenv_processor (thdata=0x10054468) at
syncop.c:699
#3  0x00003fff7a4ccb30 in start_thread (arg=0x3fff77251160) at
pthread_create.c:462
#4  0x00003fff7a4170fc in .__clone () at
../sysdeps/unix/sysv/linux/powerpc/powerpc64/clone.S:96

Thread 1 (LWP 443):
#0  0x00003fff7a3953b0 in _IO_unbuffer_all () at genops.c:960
#1  _IO_cleanup () at genops.c:1020
#2  0x00003fff7a34fd00 in __run_exit_handlers (status=<optimized out>,
listp=<optimized out>, run_list_atexit=run_list_atexit at entry=true) at
exit.c:95
#3  0x00003fff7a34fe1c in __GI_exit (status=<optimized out>) at exit.c:104
#4  0x000000001000984c in cleanup_and_exit (signum=<optimized out>) at
glusterfsd.c:1295
#5  0x0000000010009a64 in glusterfs_sigwaiter (arg=<optimized out>) at
glusterfsd.c:2016
#6  0x00003fff7a4ccb30 in start_thread (arg=0x3fff78251160) at
pthread_create.c:462
#7  0x00003fff7a4170fc in .__clone () at
../sysdeps/unix/sysv/linux/powerpc/powerpc64/clone.S:96
(gdb)
(gdb)

On Fri, Sep 21, 2018 at 3:06 PM Sanju Rakonde <srakonde at redhat.com> wrote:

> Hi Abhishek,
>
> Can you please share the output of "t a a bt" with us?
>
> Thanks,
> Sanju
>
> On Fri, Sep 21, 2018 at 2:55 PM, ABHISHEK PALIWAL <abhishpaliwal at gmail.com
> > wrote:
>
>>
>> We have seen a SIGSEGV crash on glusterfs process on kernel restart at
>> start up.
>>
>> (gdb) bt
>> #0  0x00003fffad4463b0 in _IO_unbuffer_all () at genops.c:960
>> #1  _IO_cleanup () at genops.c:1020
>> #2  0x00003fffad400d00 in __run_exit_handlers (status=<optimized out>,
>> listp=<optimized out>, run_list_atexit=run_list_atexit at entry=true) at
>> exit.c:95
>> #3  0x00003fffad400e1c in __GI_exit (status=<optimized out>) at
>> exit.c:104
>> #4  0x000000001000984c in cleanup_and_exit (signum=<optimized out>) at
>> glusterfsd.c:1295
>> #5  0x0000000010009a64 in *glusterfs_sigwaiter *(arg=<optimized out>) at
>> glusterfsd.c:2016
>> #6  0x00003fffad57db30 in start_thread (arg=0x3fffab302160) at
>> pthread_create.c:462
>> #7  0x00003fffad4c7cdc in .__clone () at
>> ../sysdeps/unix/sysv/linux/powerpc/powerpc64/clone.S:96
>>
>> (gdb) bt full
>> #0  0x00003fffad4463b0 in _IO_unbuffer_all () at genops.c:960
>>     __result = 0
>>     __self = 0x3fffab302160
>>     cnt = 1
>>     fp = 0x3fffa4001f00
>> #1  _IO_cleanup () at genops.c:1020
>>     result = 0
>> #2  0x00003fffad400d00 in __run_exit_handlers (status=<optimized out>,
>> listp=<optimized out>, run_list_atexit=run_list_atexit at entry=true) at
>> exit.c:95
>>     ptr = 0x3fffad557000 <__elf_set___libc_atexit_element__IO_cleanup__>
>> #3  0x00003fffad400e1c in __GI_exit (status=<optimized out>) at
>> exit.c:104
>> No locals.
>> #4  0x000000001000984c in cleanup_and_exit (signum=<optimized out>) at
>> glusterfsd.c:1295
>>     ctx = <optimized out>
>>     trav = <optimized out>
>>     __FUNCTION__ = <error reading variable __FUNCTION__ (Cannot access
>> memory at address 0x10010e38)>
>> #5  0x0000000010009a64 in glusterfs_sigwaiter (arg=<optimized out>) at
>> glusterfsd.c:2016
>>     set = {__val = {18947, 0 <repeats 15 times>}}
>>     ret = <optimized out>
>>     sig = 15
>> #6  0x00003fffad57db30 in start_thread (arg=0x3fffab302160) at
>> pthread_create.c:462
>>     pd = 0x3fffab302160
>>     now = <optimized out>
>>     unwind_buf = {cancel_jmp_buf = {{jmp_buf = {5451414826039278896,
>> 70367357615104, 5451414826003312788, 0, 0, 70367312883712, 70367321268768,
>> 8388608,
>>             70367357575200, 70367913735952, 268595776, 70367357600728,
>> 268588656, 3, 0, 70367357600744, 70367913735600, 70367913735656, 4001536,
>>             70367357576216, 70367321265984, -3187653564, 0 <repeats 42
>> times>}, mask_was_saved = 0}}, priv = {pad = {0x0, 0x0, 0x0, 0x0}, data =
>> {prev = 0x0,
>>           cleanup = 0x0, canceltype = 0}}}
>>     not_first_call = <optimized out>
>>     pagesize_m1 = <optimized out>
>>     sp = <optimized out>
>>     freesize = <optimized out>
>>     __PRETTY_FUNCTION__ = "start_thread"
>> ---Type <return> to continue, or q <return> to quit---
>> #7  0x00003fffad4c7cdc in .__clone () at
>> ../sysdeps/unix/sysv/linux/powerpc/powerpc64/clone.S:96
>> No locals
>>
>> *Can you please help us in finding the cause for SIGSEGV. ?*
>> *Also please share your understanding on this issue.*
>> --
>> Regards
>> Abhishek Paliwal
>>
>> _______________________________________________
>> Gluster-devel mailing list
>> Gluster-devel at gluster.org
>> https://lists.gluster.org/mailman/listinfo/gluster-devel
>>
>
>
>
> --
> Thanks,
> Sanju
>


-- 




Regards
Abhishek Paliwal
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://lists.gluster.org/pipermail/gluster-users/attachments/20180921/45c01fde/attachment.html>


More information about the Gluster-users mailing list