[Gluster-users] Gluster volume quota limit-usage crashes glusterd

符永涛 yongtaofu at gmail.com
Tue Jul 23 15:16:41 UTC 2013


More debug info:

(gdb) f 5
#5  0x0000003182214ff3 in _dict_set (this=0x7f26dd50b80c,
    key=<value optimized out>, value=0x7f26dd34d7c0) at dict.c:248
248 in dict.c
(gdb) p *this
$2 = {is_static = 0 '\000', hash_size = 1, count = 4, refcount = 1,
  members = 0x7f26dd3fb4e4, members_list = 0x7f26dd40e590, extra_free =
0x0,
  extra_stdfree = 0x0, lock = 0}
(gdb) p *this->members_list
$3 = {hash_next = 0x7f26dd40e54c, prev = 0x0, next = 0x7f26dd40e54c,
  value = 0x7f26dd329bc0, key = 0x2edf7e0 "server.allow-insecure"}
(gdb) p *this->members_list->next
$4 = {hash_next = 0x7f26dd409aa8, prev = 0x7f26dd40e590,
  next = 0x7f26dd409aa8, value = 0x7f26dd329b8c,
  key = 0x2fd7330 "features.quota"}
(gdb) p *this->members_list->next->next
$5 = {hash_next = 0x7f26dd3fb528, prev = 0x7f26dd40e54c,
  next = 0x7f26dd3fb528, value = 0x7f26dd329b58,
  key = 0x2fd72f0 "features.quota-timeout"}
(gdb) p *this->members_list->next->next->next
$6 = {hash_next = 0x0, prev = 0x7f26dd409aa8, next = 0x0,
  value = 0x7f26dd34d7c0, key = 0x30dd470 "features.limit-usage"}
(gdb) p *this->members_list->next->next->next->next
Cannot access memory at address 0x0
(gdb) p *unref_data
$7 = {is_static = 0 '\000', is_const = 0 '\000', is_stdalloc = 0 '\000',
  len = 7, data = 0x30c98a0 "/lib64/libgcc_s.so.1", refcount = 0, lock = 1}
(gdb) p *unref_data->data
$8 = 47 '/'
(gdb) p *(*unref_data->data)
Cannot access memory at address 0x2f
(gdb) p *unref_data->data
$9 = 47 '/'
(gdb) p *this->members_list->next->next->next->next->value
Cannot access memory at address 0x18
(gdb) p *this->members_list->next->next->next->next
Cannot access memory at address 0x0
(gdb) p *this->members_list->next
$10 = {hash_next = 0x7f26dd409aa8, prev = 0x7f26dd40e590,
  next = 0x7f26dd409aa8, value = 0x7f26dd329b8c,
  key = 0x2fd7330 "features.quota"}
(gdb) p *this->members_list->next->next
$11 = {hash_next = 0x7f26dd3fb528, prev = 0x7f26dd40e54c,
  next = 0x7f26dd3fb528, value = 0x7f26dd329b58,
  key = 0x2fd72f0 "features.quota-timeout"}
(gdb) p *this->members_list->next->next->next
$12 = {hash_next = 0x0, prev = 0x7f26dd409aa8, next = 0x0,
  value = 0x7f26dd34d7c0, key = 0x30dd470 "features.limit-usage"}
(gdb) p *this->members_list->next->next->next->value
$13 = {is_static = 1 '\001', is_const = 0 '\000', is_stdalloc = 0 '\000',
  len = 8, data = 0x2fc7400 "/:300TB", refcount = 1, lock = 1}
(gdb) p *unref_data
$14 = {is_static = 0 '\000', is_const = 0 '\000', is_stdalloc = 0 '\000',
  len = 7, data = 0x30c98a0 "/lib64/libgcc_s.so.1", refcount = 0, lock = 1}
(gdb) p *this->members_list->next->next->next->value->data
$15 = 47 '/'
(gdb) p *unref_data->data
$16 = 47 '/'
(gdb) p *key
$17 = 102 'f'
(gdb) p *value
$18 = {is_static = 1 '\001', is_const = 0 '\000', is_stdalloc = 0 '\000',
  len = 8, data = 0x2fc7400 "/:300TB", refcount = 1, lock = 1}
(gdb) p *pair
value has been optimized out
(gdb) p *pair->key
value has been optimized out
(gdb) p *pair->value

Any one known this issue?

Thank you.


2013/7/23 符永涛 <yongtaofu at gmail.com>

> Dear glusterfs experts,
>
> Recently we have encountered an issue related to glusterfs quota.
> Run the following command on one of our volumes will crash glusterd
> process.
> Here's the core:
>
> Missing separate debuginfos, use: debuginfo-install
> glusterfs-server-3.3.0.5rhs_iqiyi_6-1.el6.x86_64
> (gdb) bt
> #0  0x0000003ef50328a5 in raise () from /lib64/libc.so.6
> #1  0x0000003ef5034085 in abort () from /lib64/libc.so.6
> #2  0x0000003ef506fa37 in __libc_message () from /lib64/libc.so.6
> #3  0x0000003ef5075366 in malloc_printerr () from /lib64/libc.so.6
> #4  0x0000003182214365 in data_destroy (data=0x7f26dd329b24) at dict.c:135
> #5  0x0000003182214ff3 in _dict_set (this=0x7f26dd50b80c,
>     key=<value optimized out>, value=0x7f26dd34d7c0) at dict.c:248
> #6  dict_set (this=0x7f26dd50b80c, key=<value optimized out>,
>     value=0x7f26dd34d7c0) at dict.c:302
> #7  0x00007f26db2e0c9e in glusterd_quota_limit_usage (volinfo=0x32e9170,
>     dict=0x7f26dd517074, op_errstr=0x7fffa6fa8d78) at glusterd-quota.c:593
> #8  0x00007f26db2e1195 in glusterd_op_quota (dict=0x7f26dd517074,
>     op_errstr=0x7fffa6fa8d78) at glusterd-quota.c:724
> #9  0x00007f26db2acb48 in glusterd_op_commit_perform (op=<value optimized
> out>,
>     dict=0x7f26dd517074, op_errstr=<value optimized out>,
>     rsp_dict=0x7f26dd517020) at glusterd-op-sm.c:3429
> #10 0x00007f26db2adf4d in glusterd_op_ac_commit_op (event=<value optimized
> out>,
>     ctx=0x33017b0) at glusterd-op-sm.c:3195
> #11 0x00007f26db2abbd6 in glusterd_op_sm () at glusterd-op-sm.c:5014
> #12 0x00007f26db292f5b in glusterd_handle_commit_op (req=<value optimized
> out>)
>     at glusterd-handler.c:669
> #13 0x0000003182a0a2e3 in rpcsvc_handle_rpc_call (svc=0x2e69d20,
>     trans=<value optimized out>, msg=<value optimized out>) at rpcsvc.c:513
> ---Type <return> to continue, or q <return> to quit---
> #14 0x0000003182a0a453 in rpcsvc_notify (trans=0x3318310,
>     mydata=<value optimized out>, event=<value optimized out>,
>     data=<value optimized out>) at rpcsvc.c:612
> #15 0x0000003182a0aeb8 in rpc_transport_notify (this=<value optimized
> out>,
>     event=<value optimized out>, data=<value optimized out>)
>     at rpc-transport.c:489
> #16 0x00007f26db006784 in socket_event_poll_in (this=0x3318310) at
> socket.c:1677
> #17 0x00007f26db006867 in socket_event_handler (fd=<value optimized out>,
>     idx=41, data=0x3318310, poll_in=1, poll_out=0,
>     poll_err=<value optimized out>) at socket.c:1792
> #18 0x000000318223e4e4 in event_dispatch_epoll_handler
> (event_pool=0x2e64e50)
>     at event.c:785
> #19 event_dispatch_epoll (event_pool=0x2e64e50) at event.c:847
> #20 0x0000000000407420 in main ()
> (gdb)
>
>
> Any one known this issue? Thank you very much.
>
>
> --
> 符永涛
>



-- 
符永涛
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://supercolony.gluster.org/pipermail/gluster-users/attachments/20130723/e17c258a/attachment.html>


More information about the Gluster-users mailing list