[Bugs] [Bug 1236050] Disperse volume: fuse mount hung after self healing

bugzilla at redhat.com bugzilla at redhat.com
Thu Aug 6 08:49:10 UTC 2015


https://bugzilla.redhat.com/show_bug.cgi?id=1236050



--- Comment #5 from Backer <mdfakkeer at gmail.com> ---
I am getting random test results after disabled and enabled the perf-xlators.
Please refer the attachment.

root at gfs-tst-08:/home/qubevaultadmin# gluster --version
glusterfs 3.7.3 built on Jul 31 2015 17:03:01
Repository revision: git://git.gluster.com/glusterfs.git
Copyright (c) 2006-2011 Gluster Inc. <http://www.gluster.com>
GlusterFS comes with ABSOLUTELY NO WARRANTY.
You may redistribute copies of GlusterFS under the terms of the GNU General
Public License.


root at gfs-tst-08:/home/gfsadmin# gluster volume  info

Volume Name: vaulttest39
Type: Disperse
Volume ID: fcbed6b5-0654-489c-a29e-d18f737ac2f7
Status: Started
Number of Bricks: 1 x (3 + 1) = 4
Transport-type: tcp
Bricks:
Brick1: 10.1.2.238:/media/disk1
Brick2: 10.1.2.238:/media/disk2
Brick3: 10.1.2.238:/media/disk3
Brick4: 10.1.2.238:/media/disk4
Options Reconfigured:
performance.readdir-ahead: on
performance.quick-read: off
performance.io-cache: off
performance.write-behind: off
performance.stat-prefetch: off
performance.read-ahead: off
performance.open-behind: off




gfsadmin at gfs-tst-08:~$ sudo gluster volume status
Status of volume: vaulttest39
Gluster process                             TCP Port  RDMA Port  Online  Pid
------------------------------------------------------------------------------
Brick 10.1.2.238:/media/disk1               49152     0          Y       1560
Brick 10.1.2.238:/media/disk2               49153     0          Y       1568
Brick 10.1.2.238:/media/disk3               49154     0          Y       1576
Brick 10.1.2.238:/media/disk4               49155     0          Y       1582
NFS Server on localhost                     2049      0          Y       1544

Task Status of Volume vaulttest39
------------------------------------------------------------------------------
There are no active volume tasks

gfsadmin at gfs-tst-08:~$ sudo kill -9 1560
gfsadmin at gfs-tst-08:~$ sudo gluster volume status
Status of volume: vaulttest39
Gluster process                             TCP Port  RDMA Port  Online  Pid
------------------------------------------------------------------------------
Brick 10.1.2.238:/media/disk1               N/A       N/A        N       N/A
Brick 10.1.2.238:/media/disk2               49153     0          Y       1568
Brick 10.1.2.238:/media/disk3               49154     0          Y       1576
Brick 10.1.2.238:/media/disk4               49155     0          Y       1582
NFS Server on localhost                     2049      0          Y       1544

Task Status of Volume vaulttest39
------------------------------------------------------------------------------
There are no active volume tasks


root at gfs-tst-09:/mnt/gluster# dd if=/dev/urandom of=2.txt bs=1M count=2
2+0 records in
2+0 records out
2097152 bytes (2.1 MB) copied, 0.226147 s, 9.3 MB/s
root at gfs-tst-09:/mnt/gluster# md5sum 2.txt
cd9db53f9c090958ff8c033161576b95  2.txt


gfsadmin at gfs-tst-08:~$ ls -l -h /media/disk{1..4}
/media/disk1:
total 960K
-rw-r--r-- 2 root root 683K Aug  6 13:58 1.txt

/media/disk2:
total 1.9M
-rw-r--r-- 2 root root 683K Aug  6 13:58 1.txt
-rw-r--r-- 2 root root 683K Aug  6 13:59 2.txt

/media/disk3:
total 1.9M
-rw-r--r-- 2 root root 683K Aug  6 13:58 1.txt
-rw-r--r-- 2 root root 683K Aug  6 13:59 2.txt

/media/disk4:
total 1.9M
-rw-r--r-- 2 root root 683K Aug  6 13:58 1.txt
-rw-r--r-- 2 root root 683K Aug  6 13:59 2.txt




root at gfs-tst-08:/home/gfsadmin# gluster v start vaulttest39 force
volume start: vaulttest39: success
root at gfs-tst-08:/home/gfsadmin#  gluster v heal  vaulttest39
Launching heal operation to perform index self heal on volume vaulttest39 has
been successful
Use heal info commands to check status
root at gfs-tst-08:/home/gfsadmin# gluster v heal  vaulttest39 info
Brick gfs-tst-08:/media/disk1/
Number of entries: 0

Brick gfs-tst-08:/media/disk2/
Number of entries: 0

Brick gfs-tst-08:/media/disk3/
Number of entries: 0

Brick gfs-tst-08:/media/disk4/
Number of entries: 0

root at gfs-tst-08:/home/gfsadmin# gluster v heal vaulttest39
Launching heal operation to perform index self heal on volume vaulttest39 has
been successful
Use heal info commands to check status
root at gfs-tst-08:/home/gfsadmin# gluster v heal vaulttest39 info
Brick gfs-tst-08:/media/disk1/
Number of entries: 0

Brick gfs-tst-08:/media/disk2/
Number of entries: 0

Brick gfs-tst-08:/media/disk3/
Number of entries: 0

Brick gfs-tst-08:/media/disk4/
Number of entries: 0

root at gfs-tst-08:/home/gfsadmin#  ls -l -h /media/disk{1..4}
/media/disk1:
total 1004K
-rw-r--r-- 2 root root 683K Aug  6 13:58 1.txt
-rw-r--r-- 2 root root 683K Aug  6 13:59 2.txt

/media/disk2:
total 1.9M
-rw-r--r-- 2 root root 683K Aug  6 13:58 1.txt
-rw-r--r-- 2 root root 683K Aug  6 13:59 2.txt

/media/disk3:
total 1.9M
-rw-r--r-- 2 root root 683K Aug  6 13:58 1.txt
-rw-r--r-- 2 root root 683K Aug  6 13:59 2.txt

/media/disk4:
total 1.9M
-rw-r--r-- 2 root root 683K Aug  6 13:58 1.txt
-rw-r--r-- 2 root root 683K Aug  6 13:59 2.txt

root at gfs-tst-08:/home/gfsadmin# gluster volume status
Status of volume: vaulttest39
Gluster process                             TCP Port  RDMA Port  Online  Pid
------------------------------------------------------------------------------
Brick 10.1.2.238:/media/disk1               49152     0          Y       1721
Brick 10.1.2.238:/media/disk2               49153     0          Y       1568
Brick 10.1.2.238:/media/disk3               49154     0          Y       1576
Brick 10.1.2.238:/media/disk4               49155     0          Y       1582
NFS Server on localhost                     2049      0          Y       1740

Task Status of Volume vaulttest39
------------------------------------------------------------------------------
There are no active volume tasks


root at gfs-tst-08:/home/gfsadmin# kill -9 1582
root at gfs-tst-08:/home/gfsadmin# gluster volume status
Status of volume: vaulttest39
Gluster process                             TCP Port  RDMA Port  Online  Pid
------------------------------------------------------------------------------
Brick 10.1.2.238:/media/disk1               49152     0          Y       1721
Brick 10.1.2.238:/media/disk2               49153     0          Y       1568
Brick 10.1.2.238:/media/disk3               49154     0          Y       1576
Brick 10.1.2.238:/media/disk4               N/A       N/A        N       N/A
NFS Server on localhost                     2049      0          Y       1740

Task Status of Volume vaulttest39
------------------------------------------------------------------------------
There are no active volume tasks


root at gfs-tst-09:/mnt/gluster# md5sum 2.txt
cd9db53f9c090958ff8c033161576b95  2.txt
root at gfs-tst-09:/mnt/gluster# md5sum 2.txt
cd9db53f9c090958ff8c033161576b95  2.txt
root at gfs-tst-09:/mnt/gluster# ls
1.txt  2.txt
root at gfs-tst-09:/mnt/gluster# ls
1.txt  2.txt
root at gfs-tst-09:/mnt/gluster# md5sum 2.txt
70b40a7e3f5dc85345e466968416cde1  2.txt
root at gfs-tst-09:/mnt/gluster# md5sum 2.txt
70b40a7e3f5dc85345e466968416cde1  2.txt
root at gfs-tst-09:/mnt/gluster# md5sum 2.txt
70b40a7e3f5dc85345e466968416cde1  2.txt
root at gfs-tst-09:/mnt/gluster#

-- 
You are receiving this mail because:
You are on the CC list for the bug.
Unsubscribe from this bug https://bugzilla.redhat.com/token.cgi?t=Dfp033lBUE&a=cc_unsubscribe


More information about the Bugs mailing list