Re: [Gluster-devel] corrupted hash table

2014-05-21 Thread Emmanuel Dreyfus
Nobody has an idea on this one?

 This is master branch, client side:
 
 Program terminated with signal 11, Segmentation fault.
 #0  uuid_unpack (in=0xffc0 Address 0xffc0 out of bounds,
 uu=0xbf7fd7b0) at ../../contrib/uuid/unpack.c:43
 
 warning: Source file is more recent than executable.
 43  tmp = *ptr++;
 (gdb) print tmp
 Cannot access memory at address 0xffc0
 (gdb) bt
 #0  uuid_unpack (in=0xffc0 Address 0xffc0 out of bounds,
 uu=0xbf7fd7b0) at ../../contrib/uuid/unpack.c:43
 #1  0xbb788f63 in uuid_compare (
 uu1=0xffc0 Address 0xffc0 out of bounds,
 uu2=0xb811f938 k\350_6) at ../../contrib/uuid/compare.c:46
 #2  0xbb769993 in __inode_find (table=0xbb213368, gfid=0xb811f938 k\350_6)
 at inode.c:763
 #3  0xbb769cdc in __inode_link (inode=0x5a70b768, parent=optimized out,
 name=0x5a7e3148 conf24746.file, iatt=0xb811f930) at inode.c:831
 #4  0xbb769f3f in inode_link (inode=0x5a70b768, parent=0x5af47728,
 name=0x5a7e3148 conf24746.file, iatt=0xb811f930) at inode.c:892
 #5  0xbb36bdaa in fuse_create_cbk (frame=0xba417c44, cookie=0xbb28cb98,
 this=0xb9cbe018, op_ret=0, op_errno=0, fd=0xb799e808, inode=0x5a70b768,
 buf=0xb811f930, preparent=0xb811f998, postparent=0xb811fa00, xdata=0x0)
 at fuse-bridge.c:1888
 #6  0xb92a30a0 in io_stats_create_cbk (frame=0xbb28cb98, cookie=0xbb287418,
 this=0xb9df2018, op_ret=0, op_errno=0, fd=0xb799e808, inode=0x5a70b768,
 buf=0xb811f930, preparent=0xb811f998, postparent=0xb811fa00, xdata=0x0)
 at io-stats.c:1260
 #7  0xb92afd80 in mdc_create_cbk (frame=0xbb287418, cookie=0xbb28d7d8,
 this=0xb9df1018, op_ret=0, op_errno=0, fd=0xb799e808, inode=0x5a70b768,
 buf=0xb811f930, preparent=0xb811f998, postparent=0xb811fa00, xdata=0x0)
 at md-cache.c:1404
 #8  0xb92c790f in ioc_create_cbk (frame=0xbb28d7d8, cookie=0xbb28b008,
 this=0xb9dee018, op_ret=0, op_errno=0, fd=0xb799e808, inode=0x5a70b768,
 buf=0xb811f930, preparent=0xb811f998, postparent=0xb811fa00, xdata=0x0)
 at io-cache.c:701
 #9  0xbb3079ba in ra_create_cbk (frame=0xbb28b008, cookie=0xbb287f08,
 this=0xb9dec018, op_ret=0, op_errno=0, fd=0xb799e808, inode=0x5a70b768,
 buf=0xb811f930, preparent=0xb811f998, postparent=0xb811fa00, xdata=0x0)
 at read-ahead.c:173
 #10 0xb92f66b9 in dht_create_cbk (frame=0xbb287f08, cookie=0xbb28f988,
 this=0xb9cff018, op_ret=0, op_errno=0, fd=0xb799e808, inode=0x5a70b768,
 stbuf=0xb811f930, preparent=0xb811f998, postparent=0xb811fa00,
 xdata=0x5c491028) at dht-common.c:3942
 #11 0xb932fa22 in afr_create_unwind (frame=0xba40439c, this=0xb9cfd018)
 at afr-dir-write.c:397
 #12 0xb9330a02 in __afr_dir_write_cbk (frame=0xba40439c, cookie=0x2,
 this=0xb9cfd018, op_ret=0, op_errno=0, buf=0xbf7fdfe4,
 preparent=0xbf7fdf7c, postparent=0xbf7fdf14, preparent2=0x0,
 postparent2=0x0, xdata=0x5cb61ea8) at afr-dir-write.c:244
 #13 0xb939a401 in client3_3_create_cbk (req=0xb805f028, iov=0xb805f048,
 count=1, myframe=0xbb28e4f8) at client-rpc-fops.c:2211
 #14 0xbb7daecf in rpc_clnt_handle_reply (clnt=0xb9cd93b8, pollin=0x5a7dbe38)
 at rpc-clnt.c:767
 #15 0xbb7db7a4 in rpc_clnt_notify (trans=0xb80a7018, mydata=0xb9cd93d8,
 ---Type return to continue, or q return to quit---
 event=RPC_TRANSPORT_MSG_RECEIVED, data=0x5a7dbe38) at rpc-clnt.c:895
 #16 0xbb7d7d9c in rpc_transport_notify (this=0xb80a7018,
 event=RPC_TRANSPORT_MSG_RECEIVED, data=0x5a7dbe38) at rpc-transport.c:512
 #17 0xbb3214ab in socket_event_poll_in (this=0xb80a7018) at socket.c:2120
 #18 0xbb3246fc in socket_event_handler (fd=16, idx=4, data=0xb80a7018,
 poll_in=1, poll_out=0, poll_err=0) at socket.c:2233
 #19 0xbb7a4c9a in event_dispatch_poll_handler (i=4, ufds=0xbb285118,
 event_pool=0xbb242098) at event-poll.c:357
 #20 event_dispatch_poll (event_pool=0xbb242098) at event-poll.c:436
 #21 0xbb77a160 in event_dispatch (event_pool=0xbb242098) at event.c:113
 #22 0x08050567 in main (argc=4, argv=0xbf7fe880) at glusterfsd.c:2023
 (gdb) frame 2  
 #2  0xbb769993 in __inode_find (table=0xbb213368, gfid=0xb811f938 k\350_6)
 at inode.c:763
 763 if (uuid_compare (tmp-gfid, gfid) == 0) {
 (gdb) list 
 758 return table-root;
 759 
 760 hash = hash_gfid (gfid, 65536);
 761 
 762 list_for_each_entry (tmp, table-inode_hash[hash], hash) {
 763 if (uuid_compare (tmp-gfid, gfid) == 0) {
 764 inode = tmp;
 765 break;
 766 }
 767 }
 -- 
 Emmanuel Dreyfus
 http://hcpnet.free.fr/pubz
 m...@netbsd.org
 ___
 Gluster-devel mailing list
 Gluster-devel@gluster.org
 http://supercolony.gluster.org/mailman/listinfo/gluster-devel

-- 
Emmanuel Dreyfus
m...@netbsd.org
___
Gluster-devel mailing list
Gluster-devel@gluster.org

Re: [Gluster-devel] corrupted hash table

2014-05-21 Thread Raghavendra Gowdappa
Hi Emmanuel,

Is it possible to get valgrind reports (or a test case which caused this 
crash)? The inode table is corrupted in this case.

regards,
Raghavendra
- Original Message -
 From: Emmanuel Dreyfus m...@netbsd.org
 To: gluster-devel@gluster.org
 Sent: Wednesday, May 21, 2014 12:43:50 PM
 Subject: Re: [Gluster-devel] corrupted hash table
 
 Nobody has an idea on this one?
 
  This is master branch, client side:
  
  Program terminated with signal 11, Segmentation fault.
  #0  uuid_unpack (in=0xffc0 Address 0xffc0 out of bounds,
  uu=0xbf7fd7b0) at ../../contrib/uuid/unpack.c:43
  
  warning: Source file is more recent than executable.
  43  tmp = *ptr++;
  (gdb) print tmp
  Cannot access memory at address 0xffc0
  (gdb) bt
  #0  uuid_unpack (in=0xffc0 Address 0xffc0 out of bounds,
  uu=0xbf7fd7b0) at ../../contrib/uuid/unpack.c:43
  #1  0xbb788f63 in uuid_compare (
  uu1=0xffc0 Address 0xffc0 out of bounds,
  uu2=0xb811f938 k\350_6) at ../../contrib/uuid/compare.c:46
  #2  0xbb769993 in __inode_find (table=0xbb213368, gfid=0xb811f938
  k\350_6)
  at inode.c:763
  #3  0xbb769cdc in __inode_link (inode=0x5a70b768, parent=optimized out,
  name=0x5a7e3148 conf24746.file, iatt=0xb811f930) at inode.c:831
  #4  0xbb769f3f in inode_link (inode=0x5a70b768, parent=0x5af47728,
  name=0x5a7e3148 conf24746.file, iatt=0xb811f930) at inode.c:892
  #5  0xbb36bdaa in fuse_create_cbk (frame=0xba417c44, cookie=0xbb28cb98,
  this=0xb9cbe018, op_ret=0, op_errno=0, fd=0xb799e808, inode=0x5a70b768,
  buf=0xb811f930, preparent=0xb811f998, postparent=0xb811fa00, xdata=0x0)
  at fuse-bridge.c:1888
  #6  0xb92a30a0 in io_stats_create_cbk (frame=0xbb28cb98, cookie=0xbb287418,
  this=0xb9df2018, op_ret=0, op_errno=0, fd=0xb799e808, inode=0x5a70b768,
  buf=0xb811f930, preparent=0xb811f998, postparent=0xb811fa00, xdata=0x0)
  at io-stats.c:1260
  #7  0xb92afd80 in mdc_create_cbk (frame=0xbb287418, cookie=0xbb28d7d8,
  this=0xb9df1018, op_ret=0, op_errno=0, fd=0xb799e808, inode=0x5a70b768,
  buf=0xb811f930, preparent=0xb811f998, postparent=0xb811fa00, xdata=0x0)
  at md-cache.c:1404
  #8  0xb92c790f in ioc_create_cbk (frame=0xbb28d7d8, cookie=0xbb28b008,
  this=0xb9dee018, op_ret=0, op_errno=0, fd=0xb799e808, inode=0x5a70b768,
  buf=0xb811f930, preparent=0xb811f998, postparent=0xb811fa00, xdata=0x0)
  at io-cache.c:701
  #9  0xbb3079ba in ra_create_cbk (frame=0xbb28b008, cookie=0xbb287f08,
  this=0xb9dec018, op_ret=0, op_errno=0, fd=0xb799e808, inode=0x5a70b768,
  buf=0xb811f930, preparent=0xb811f998, postparent=0xb811fa00, xdata=0x0)
  at read-ahead.c:173
  #10 0xb92f66b9 in dht_create_cbk (frame=0xbb287f08, cookie=0xbb28f988,
  this=0xb9cff018, op_ret=0, op_errno=0, fd=0xb799e808, inode=0x5a70b768,
  stbuf=0xb811f930, preparent=0xb811f998, postparent=0xb811fa00,
  xdata=0x5c491028) at dht-common.c:3942
  #11 0xb932fa22 in afr_create_unwind (frame=0xba40439c, this=0xb9cfd018)
  at afr-dir-write.c:397
  #12 0xb9330a02 in __afr_dir_write_cbk (frame=0xba40439c, cookie=0x2,
  this=0xb9cfd018, op_ret=0, op_errno=0, buf=0xbf7fdfe4,
  preparent=0xbf7fdf7c, postparent=0xbf7fdf14, preparent2=0x0,
  postparent2=0x0, xdata=0x5cb61ea8) at afr-dir-write.c:244
  #13 0xb939a401 in client3_3_create_cbk (req=0xb805f028, iov=0xb805f048,
  count=1, myframe=0xbb28e4f8) at client-rpc-fops.c:2211
  #14 0xbb7daecf in rpc_clnt_handle_reply (clnt=0xb9cd93b8,
  pollin=0x5a7dbe38)
  at rpc-clnt.c:767
  #15 0xbb7db7a4 in rpc_clnt_notify (trans=0xb80a7018, mydata=0xb9cd93d8,
  ---Type return to continue, or q return to quit---
  event=RPC_TRANSPORT_MSG_RECEIVED, data=0x5a7dbe38) at rpc-clnt.c:895
  #16 0xbb7d7d9c in rpc_transport_notify (this=0xb80a7018,
  event=RPC_TRANSPORT_MSG_RECEIVED, data=0x5a7dbe38) at
  rpc-transport.c:512
  #17 0xbb3214ab in socket_event_poll_in (this=0xb80a7018) at socket.c:2120
  #18 0xbb3246fc in socket_event_handler (fd=16, idx=4, data=0xb80a7018,
  poll_in=1, poll_out=0, poll_err=0) at socket.c:2233
  #19 0xbb7a4c9a in event_dispatch_poll_handler (i=4, ufds=0xbb285118,
  event_pool=0xbb242098) at event-poll.c:357
  #20 event_dispatch_poll (event_pool=0xbb242098) at event-poll.c:436
  #21 0xbb77a160 in event_dispatch (event_pool=0xbb242098) at event.c:113
  #22 0x08050567 in main (argc=4, argv=0xbf7fe880) at glusterfsd.c:2023
  (gdb) frame 2
  #2  0xbb769993 in __inode_find (table=0xbb213368, gfid=0xb811f938
  k\350_6)
  at inode.c:763
  763 if (uuid_compare (tmp-gfid, gfid) == 0) {
  (gdb) list
  758 return table-root;
  759
  760 hash = hash_gfid (gfid, 65536);
  761
  762 list_for_each_entry (tmp, table-inode_hash[hash], hash) {
  763 if (uuid_compare (tmp-gfid, gfid) == 0) {
  764 inode = tmp;
  765

Re: [Gluster-devel] corrupted hash table

2014-05-21 Thread Emmanuel Dreyfus
Raghavendra Gowdappa rgowd...@redhat.com wrote:

 Is it possible to get valgrind reports

Still no valgrind for NetBSD

 (or a test case which caused this crash)? 

It happens after some time, but I have no way 
to reproduce it quickly.


-- 
Emmanuel Dreyfus
http://hcpnet.free.fr/pubz
m...@netbsd.org
___
Gluster-devel mailing list
Gluster-devel@gluster.org
http://supercolony.gluster.org/mailman/listinfo/gluster-devel


[Gluster-devel] corrupted hash table

2014-05-17 Thread Emmanuel Dreyfus
This is master branch, client side:

Program terminated with signal 11, Segmentation fault.
#0  uuid_unpack (in=0xffc0 Address 0xffc0 out of bounds,
uu=0xbf7fd7b0) at ../../contrib/uuid/unpack.c:43

warning: Source file is more recent than executable.
43  tmp = *ptr++;
(gdb) print tmp
Cannot access memory at address 0xffc0
(gdb) bt
#0  uuid_unpack (in=0xffc0 Address 0xffc0 out of bounds,
uu=0xbf7fd7b0) at ../../contrib/uuid/unpack.c:43
#1  0xbb788f63 in uuid_compare (
uu1=0xffc0 Address 0xffc0 out of bounds,
uu2=0xb811f938 k\350_6) at ../../contrib/uuid/compare.c:46
#2  0xbb769993 in __inode_find (table=0xbb213368, gfid=0xb811f938 k\350_6)
at inode.c:763
#3  0xbb769cdc in __inode_link (inode=0x5a70b768, parent=optimized out,
name=0x5a7e3148 conf24746.file, iatt=0xb811f930) at inode.c:831
#4  0xbb769f3f in inode_link (inode=0x5a70b768, parent=0x5af47728,
name=0x5a7e3148 conf24746.file, iatt=0xb811f930) at inode.c:892
#5  0xbb36bdaa in fuse_create_cbk (frame=0xba417c44, cookie=0xbb28cb98,
this=0xb9cbe018, op_ret=0, op_errno=0, fd=0xb799e808, inode=0x5a70b768,
buf=0xb811f930, preparent=0xb811f998, postparent=0xb811fa00, xdata=0x0)
at fuse-bridge.c:1888
#6  0xb92a30a0 in io_stats_create_cbk (frame=0xbb28cb98, cookie=0xbb287418,
this=0xb9df2018, op_ret=0, op_errno=0, fd=0xb799e808, inode=0x5a70b768,
buf=0xb811f930, preparent=0xb811f998, postparent=0xb811fa00, xdata=0x0)
at io-stats.c:1260
#7  0xb92afd80 in mdc_create_cbk (frame=0xbb287418, cookie=0xbb28d7d8,
this=0xb9df1018, op_ret=0, op_errno=0, fd=0xb799e808, inode=0x5a70b768,
buf=0xb811f930, preparent=0xb811f998, postparent=0xb811fa00, xdata=0x0)
at md-cache.c:1404
#8  0xb92c790f in ioc_create_cbk (frame=0xbb28d7d8, cookie=0xbb28b008,
this=0xb9dee018, op_ret=0, op_errno=0, fd=0xb799e808, inode=0x5a70b768,
buf=0xb811f930, preparent=0xb811f998, postparent=0xb811fa00, xdata=0x0)
at io-cache.c:701
#9  0xbb3079ba in ra_create_cbk (frame=0xbb28b008, cookie=0xbb287f08,
this=0xb9dec018, op_ret=0, op_errno=0, fd=0xb799e808, inode=0x5a70b768,
buf=0xb811f930, preparent=0xb811f998, postparent=0xb811fa00, xdata=0x0)
at read-ahead.c:173
#10 0xb92f66b9 in dht_create_cbk (frame=0xbb287f08, cookie=0xbb28f988,
this=0xb9cff018, op_ret=0, op_errno=0, fd=0xb799e808, inode=0x5a70b768,
stbuf=0xb811f930, preparent=0xb811f998, postparent=0xb811fa00,
xdata=0x5c491028) at dht-common.c:3942
#11 0xb932fa22 in afr_create_unwind (frame=0xba40439c, this=0xb9cfd018)
at afr-dir-write.c:397
#12 0xb9330a02 in __afr_dir_write_cbk (frame=0xba40439c, cookie=0x2,
this=0xb9cfd018, op_ret=0, op_errno=0, buf=0xbf7fdfe4,
preparent=0xbf7fdf7c, postparent=0xbf7fdf14, preparent2=0x0,
postparent2=0x0, xdata=0x5cb61ea8) at afr-dir-write.c:244
#13 0xb939a401 in client3_3_create_cbk (req=0xb805f028, iov=0xb805f048,
count=1, myframe=0xbb28e4f8) at client-rpc-fops.c:2211
#14 0xbb7daecf in rpc_clnt_handle_reply (clnt=0xb9cd93b8, pollin=0x5a7dbe38)
at rpc-clnt.c:767
#15 0xbb7db7a4 in rpc_clnt_notify (trans=0xb80a7018, mydata=0xb9cd93d8,
---Type return to continue, or q return to quit---
event=RPC_TRANSPORT_MSG_RECEIVED, data=0x5a7dbe38) at rpc-clnt.c:895
#16 0xbb7d7d9c in rpc_transport_notify (this=0xb80a7018,
event=RPC_TRANSPORT_MSG_RECEIVED, data=0x5a7dbe38) at rpc-transport.c:512
#17 0xbb3214ab in socket_event_poll_in (this=0xb80a7018) at socket.c:2120
#18 0xbb3246fc in socket_event_handler (fd=16, idx=4, data=0xb80a7018,
poll_in=1, poll_out=0, poll_err=0) at socket.c:2233
#19 0xbb7a4c9a in event_dispatch_poll_handler (i=4, ufds=0xbb285118,
event_pool=0xbb242098) at event-poll.c:357
#20 event_dispatch_poll (event_pool=0xbb242098) at event-poll.c:436
#21 0xbb77a160 in event_dispatch (event_pool=0xbb242098) at event.c:113
#22 0x08050567 in main (argc=4, argv=0xbf7fe880) at glusterfsd.c:2023
(gdb) frame 2  
#2  0xbb769993 in __inode_find (table=0xbb213368, gfid=0xb811f938 k\350_6)
at inode.c:763
763 if (uuid_compare (tmp-gfid, gfid) == 0) {
(gdb) list 
758 return table-root;
759 
760 hash = hash_gfid (gfid, 65536);
761 
762 list_for_each_entry (tmp, table-inode_hash[hash], hash) {
763 if (uuid_compare (tmp-gfid, gfid) == 0) {
764 inode = tmp;
765 break;
766 }
767 }
-- 
Emmanuel Dreyfus
http://hcpnet.free.fr/pubz
m...@netbsd.org
___
Gluster-devel mailing list
Gluster-devel@gluster.org
http://supercolony.gluster.org/mailman/listinfo/gluster-devel