[Bugs] [Bug 1238167] memory corruption in the way we maintain migration information in inodes.
bugzilla at redhat.com
bugzilla at redhat.com
Mon Jul 6 13:47:23 UTC 2015
https://bugzilla.redhat.com/show_bug.cgi?id=1238167
Amit Chaurasia <achauras at redhat.com> changed:
What |Removed |Added
----------------------------------------------------------------------------
Status|ON_QA |ASSIGNED
--- Comment #4 from Amit Chaurasia <achauras at redhat.com> ---
I tried verifying this in the following steps:
1. Being creating a file of 10G on a fuse mount point.
[root at dht-rhs-19 newtest]# dd if=/dev/urandom of=testfile bs=1024
count=10000000
2. This file is read from another terminal of the same machine.
dd if=testfile of=/dev/null count=10000000 bs=512
3. Another read operation is started and this time the file is read into
another file in the same folder.
4. [root at dht-rhs-20 newtest]#
[root at dht-rhs-20 newtest]# dd if=testfile-4 of=newtestfile count=10000000
bs=512
5. Then I begin renaming these files in a loop.
[root at dht-rhs-20 newtest]# for j in {1..1000}
> do
> for i in `ls`
> do
> mv $i $i-$j
> done
> done
6. The operation was left to run for a while and the files were being
read,written and renamed.
7. Suddenly, I get this Transport endpoint error.
[root at dht-rhs-20 ~]# strace -p 4179
Process 4179 attached
8. Rename is hung:
[root at dht-rhs-20 newtest]# for j in {1..1000}
> do
> for i in `ls`
> do
> mv $i $i-$j
> done
> done
^C
^C^C^C^C^C^C^C^C^C^C^C^C^C^C^C^C^C^C^C^Ccc
[root at dht-rhs-19 newtest]# ls
newtestfile-1-1-2-3-4-5-6-7-8-9-10-11-12-13-14-15-16-17-18-19-20-21-22-23-24-25-26-27-28-29-30-31-32-33-34-35-36-37-38-39-40-41-42-43
[root at dht-rhs-19 newtest]# ls
newtestfile-1-1-2-3-4-5-6-7-8-9-10-11-12-13-14-15-16-17-18-19-20-21-22-23-24-25-26-27-28-29-30-31-32-33-34-35-36-37-38-39-40-41-42-43
[root at dht-rhs-19 newtest]#
[root at dht-rhs-19 newtest]#
[root at dht-rhs-20 ~]# ps -eaf | grep mv
root 4179 20597 0 Jul06 pts/1 00:00:00 mv -i
newtestfile-1-1-2-3-4-5-6-7-8-9-10-11-12-13-14-15-16-17-18-19-20-21-22-23-24-25-26-27-28-29-30-31-32-33-34-35-36-37-38-39-40-41-42
newtestfile-1-1-2-3-4-5-6-7-8-9-10-11-12-13-14-15-16-17-18-19-20-21-22-23-24-25-26-27-28-29-30-31-32-33-34-35-36-37-38-39-40-41-42-43
root 4883 12061 0 00:25 pts/0 00:00:00 grep mv
[root at dht-rhs-20 ~]#
[root at dht-rhs-20 ~]# ps -eaf | grep strace
root 4557 4286 0 00:03 pts/2 00:00:00 strace -p 4179
root 4898 12061 0 00:26 pts/0 00:00:00 grep strace
[root at dht-rhs-20 ~]#
[root at dht-rhs-19 newtest]# dd if=/dev/urandom of=testfile bs=1024
count=10000000
dd: writing `testfile': Transport endpoint is not connected
dd: closing output file `testfile': Transport endpoint is not connected
[root at dht-rhs-19 newtest]#
[root at dht-rhs-19 newtest]#
[root at dht-rhs-19 newtest]#
[root at dht-rhs-19 newtest]# pwd
/mnt/glusterfs/newtest
[root at dht-rhs-19 newtest]#
[root at dht-rhs-19 newtest]# tail -f
/var/log/glusterfs/etc-glusterfs-glusterd.vol.log
[2015-07-06 18:44:42.443567] W [socket.c:642:__socket_rwv] 0-management: readv
on /var/run/gluster/88f62090f6b9a3ed397d9b66a512f0af.socket failed (Invalid
argument)
[2015-07-06 18:44:45.444036] W [socket.c:642:__socket_rwv] 0-management: readv
on /var/run/gluster/88f62090f6b9a3ed397d9b66a512f0af.socket failed (Invalid
argument)
[2015-07-06 18:44:48.444705] W [socket.c:642:__socket_rwv] 0-management: readv
on /var/run/gluster/88f62090f6b9a3ed397d9b66a512f0af.socket failed (Invalid
argument)
[2015-07-06 18:44:51.445249] W [socket.c:642:__socket_rwv] 0-management: readv
on /var/run/gluster/88f62090f6b9a3ed397d9b66a512f0af.socket failed (Invalid
argument)
[2015-07-06 18:44:54.445711] W [socket.c:642:__socket_rwv] 0-management: readv
on /var/run/gluster/88f62090f6b9a3ed397d9b66a512f0af.socket failed (Invalid
argument)
======
The log files for bricks contains :
[root at dht-rhs-19 newtest]# vim
/var/log/glusterfs/bricks/bricks-brick1-testvol_brick2.log
brick1/testvol_brick2/.glusterfs/7e/fc/7efc026e-b861-4603-8a21-3c78002856f2
[No such file or directory]
[2015-07-06 17:30:14.002629] E [MSGID: 113104]
[posix-handle.c:154:posix_make_ancestryfromgfid] 0-testvol-posix: could not
read the link from the gfid handle
/bricks/brick1/testvol_brick2/.glusterfs/7e/fc/7efc026e-b861-4603-8a21-3c78002856f2
[No such file or directory]
[2015-07-06 17:40:15.006786] E [MSGID: 113104]
[posix-handle.c:154:posix_make_ancestryfromgfid] 0-testvol-posix: could not
read the link from the gfid handle
/bricks/brick1/testvol_brick2/.glusterfs/7e/fc/7efc026e-b861-4603-8a21-3c78002856f2
[No such file or directory]
[2015-07-06 17:50:16.002135] E [MSGID: 113104]
[posix-handle.c:154:posix_make_ancestryfromgfid] 0-testvol-posix: could not
read the link from the gfid handle
/bricks/brick1/testvol_brick2/.glusterfs/7e/fc/7efc026e-b861-4603-8a21-3c78002856f2
[No such file or directory]
[2015-07-06 18:00:17.002120] E [MSGID: 113104]
[posix-handle.c:154:posix_make_ancestryfromgfid] 0-testvol-posix: could not
read the link from the gfid handle
/bricks/brick1/testvol_brick2/.glusterfs/7e/fc/7efc026e-b861-4603-8a21-3c78002856f2
[No such file or directory]
[2015-07-06 18:02:08.343647] I [MSGID: 115029]
[server-handshake.c:593:server_setvolume] 0-testvol-server: accepted client
from dht-rhs-19-10891-2015/07/06-18:02:08:213514-testvol-client-2-0-0 (version:
3.7.1)
[2015-07-06 18:08:57.693231] W [marker-quota.c:3063:mq_reduce_parent_size_task]
(--> /usr/lib64/libglusterfs.so.0(_gf_log_callingfn+0x1e0)[0x7f69ab163520] (-->
/usr/lib64/glusterfs/3.7.1/xlator/features/marker.so(mq_reduce_parent_size_task+0x206)[0x7f69975bd9c6]
(--> /usr/lib64/libglusterfs.so.0(synctask_wrap+0x12)[0x7f69ab1a71f2] (-->
/lib64/libc.so.6(+0x33d5a438f0)[0x7f69a9b0f8f0] )))) 0-testvol-marker: ctx for
the node /newtest/newtestfile-1 is NULL
[2015-07-06 18:09:00.190722] W [marker-quota.c:3063:mq_reduce_parent_size_task]
(--> /usr/lib64/libglusterfs.so.0(_gf_log_callingfn+0x1e0)[0x7f69ab163520] (-->
/usr/lib64/glusterfs/3.7.1/xlator/features/marker.so(mq_reduce_parent_size_task+0x206)[0x7f69975bd9c6]
(--> /usr/lib64/libglusterfs.so.0(synctask_wrap+0x12)[0x7f69ab1a71f2] (-->
/lib64/libc.so.6(+0x33d5a438f0)[0x7f69a9b0f8f0] )))) 0-testvol-marker: ctx for
the node /newtest/newtestfile-1-1-2-3-4-5-6-7-8 is NULL
[2015-07-06 18:09:03.452685] E [marker-quota.c:2502:mq_get_metadata] (-->
/usr/lib64/libglusterfs.so.0(_gf_log_callingfn+0x1e0)[0x7f69ab163520] (-->
/usr/lib64/glusterfs/3.7.1/xlator/features/marker.so(mq_get_metadata+0x15a)[0x7f69975bef3a]
(-->
/usr/lib64/glusterfs/3.7.1/xlator/features/marker.so(mq_get_delta+0x8c)[0x7f69975bf0ec]
(-->
/usr/lib64/glusterfs/3.7.1/xlator/features/marker.so(mq_start_quota_txn_v2+0x1b4)[0x7f69975bf3e4]
(-->
/usr/lib64/glusterfs/3.7.1/xlator/features/marker.so(mq_initiate_quota_task+0x8c)[0x7f69975bf8dc]
))))) 0-testvol-marker: Failed to get metadata for
/newtest/newtestfile-1-1-2-3-4-5-6-7-8-9-10-11-12-13-14-15-16
[2015-07-06 18:09:43.217581] E [marker-quota.c:2502:mq_get_metadata] (-->
/usr/lib64/libglusterfs.so.0(_gf_log_callingfn+0x1e0)[0x7f69ab163520] (-->
/usr/lib64/glusterfs/3.7.1/xlator/features/marker.so(mq_get_metadata+0x15a)[0x7f69975bef3a]
(-->
/usr/lib64/glusterfs/3.7.1/xlator/features/marker.so(mq_get_delta+0x8c)[0x7f69975bf0ec]
(-->
/usr/lib64/glusterfs/3.7.1/xlator/features/marker.so(mq_start_quota_txn_v2+0x1b4)[0x7f69975bf3e4]
(-->
/usr/lib64/glusterfs/3.7.1/xlator/features/marker.so(mq_initiate_quota_task+0x8c)[0x7f69975bf8dc]
))))) 0-testvol-marker: Failed to get metadata for
/newtest/newtestfile-1-1-2-3-4-5-6-7-8-9-10-11-12-13-14-15-16-17-18-19-20-21-22-23-24-25-26-27-28-29-30-31-32-33-34-35-36
[2015-07-06 18:10:18.002187] E [MSGID: 113104]
[posix-handle.c:154:posix_make_ancestryfromgfid] 0-testvol-posix: could not
read the link from the gfid handle
/bricks/brick1/testvol_brick2/.glusterfs/7e/fc/7efc026e-b861-4603-8a21-3c78002856f2
[No such file or directory]
[2015-07-06 18:20:20.002396] E [MSGID: 113104]
[posix-handle.c:154:posix_make_ancestryfromgfid] 0-testvol-posix: could not
read the link from the gfid handle
/bricks/brick1/testvol_brick2/.glusterfs/7e/fc/7efc026e-b861-4603-8a21-3c78002856f2
[No such file or directory]
~
And the gluster volume status is:
[root at dht-rhs-19 newtest]# gluster v status
Status of volume: testvol
Gluster process TCP Port RDMA Port Online Pid
------------------------------------------------------------------------------
Brick 10.70.47.98:/bricks/brick0/testvol_br
ick0 49648 0 Y 3671
Brick 10.70.47.99:/bricks/brick0/testvol_br
ick1 49275 0 Y 12558
Brick 10.70.47.98:/bricks/brick1/testvol_br
ick2 N/A N/A N N/A
Brick 10.70.47.99:/bricks/brick1/testvol_br
ick3 N/A N/A N N/A
NFS Server on localhost 2049 0 Y 3709
Self-heal Daemon on localhost N/A N/A Y 3717
Quota Daemon on localhost N/A N/A Y 3785
NFS Server on 10.70.47.99 2049 0 Y 12596
Self-heal Daemon on 10.70.47.99 N/A N/A Y 12603
Quota Daemon on 10.70.47.99 N/A N/A Y 12712
Task Status of Volume testvol
------------------------------------------------------------------------------
Task : Rebalance
ID : 28022cc2-181e-41a5-8480-c8a4d118dcbe
Status : completed
[root at dht-rhs-19 ~]# gluster v info
Volume Name: testvol
Type: Distributed-Replicate
Volume ID: b876b9ff-0af9-48d5-a236-51ecd9e9b061
Status: Started
Number of Bricks: 2 x 2 = 4
Transport-type: tcp
Bricks:
Brick1: 10.70.47.98:/bricks/brick0/testvol_brick0
Brick2: 10.70.47.99:/bricks/brick0/testvol_brick1
Brick3: 10.70.47.98:/bricks/brick1/testvol_brick2
Brick4: 10.70.47.99:/bricks/brick1/testvol_brick3
Options Reconfigured:
cluster.min-free-disk: 40%
features.quota-deem-statfs: on
features.inode-quota: on
features.quota: on
performance.readdir-ahead: on
[root at dht-rhs-19 ~]#
On other node:
[root at dht-rhs-20 ~]# vim
/var/log/glusterfs/bricks/bricks-brick1-testvol_brick3.log
[root at dht-rhs-20 ~]#
brick1/testvol_brick3/.glusterfs/31/07/3107fcc5-8db7-4507-b2aa-1894bdb54a02
[No such file or directory]
[2015-07-06 18:02:07.416457] I [MSGID: 115029]
[server-handshake.c:593:server_setvolume] 0-testvol-server: accepted client
from dht-rhs-19-10891-2015/07/06-18:02:08:213514-testvol-client-3-0-0 (version:
3.7.1)
[2015-07-06 18:08:56.758164] E [marker-quota.c:2502:mq_get_metadata] (-->
/usr/lib64/libglusterfs.so.0(_gf_log_callingfn+0x1e0)[0x7fba413be520] (-->
/usr/lib64/glusterfs/3.7.1/xlator/features/marker.so(mq_get_metadata+0x15a)[0x7fba31904f3a]
(-->
/usr/lib64/glusterfs/3.7.1/xlator/features/marker.so(mq_get_delta+0x8c)[0x7fba319050ec]
(-->
/usr/lib64/glusterfs/3.7.1/xlator/features/marker.so(mq_start_quota_txn_v2+0x1b4)[0x7fba319053e4]
(-->
/usr/lib64/glusterfs/3.7.1/xlator/features/marker.so(mq_initiate_quota_task+0x8c)[0x7fba319058dc]
))))) 0-testvol-marker: Failed to get metadata for /newtest/newtestfile-1
[2015-07-06 18:08:59.250377] W [marker-quota.c:3063:mq_reduce_parent_size_task]
(--> /usr/lib64/libglusterfs.so.0(_gf_log_callingfn+0x1e0)[0x7fba413be520] (-->
/usr/lib64/glusterfs/3.7.1/xlator/features/marker.so(mq_reduce_parent_size_task+0x206)[0x7fba319039c6]
(--> /usr/lib64/libglusterfs.so.0(synctask_wrap+0x12)[0x7fba414021f2] (-->
/lib64/libc.so.6(+0x3f3f0438f0)[0x7fba3fd6a8f0] )))) 0-testvol-marker: ctx for
the node /newtest/newtestfile-1-1-2-3-4-5-6-7-8 is NULL
[2015-07-06 18:09:02.539936] W [marker-quota.c:3063:mq_reduce_parent_size_task]
(--> /usr/lib64/libglusterfs.so.0(_gf_log_callingfn+0x1e0)[0x7fba413be520] (-->
/usr/lib64/glusterfs/3.7.1/xlator/features/marker.so(mq_reduce_parent_size_task+0x206)[0x7fba319039c6]
(--> /usr/lib64/libglusterfs.so.0(synctask_wrap+0x12)[0x7fba414021f2] (-->
/lib64/libc.so.6(+0x3f3f0438f0)[0x7fba3fd6a8f0] )))) 0-testvol-marker: ctx for
the node /newtest/newtestfile-1-1-2-3-4-5-6-7-8-9-10-11-12-13-14-15-16 is NULL
[2015-07-06 18:09:24.096474] E [marker-quota.c:2502:mq_get_metadata] (-->
/usr/lib64/libglusterfs.so.0(_gf_log_callingfn+0x1e0)[0x7fba413be520] (-->
/usr/lib64/glusterfs/3.7.1/xlator/features/marker.so(mq_get_metadata+0x15a)[0x7fba31904f3a]
(-->
/usr/lib64/glusterfs/3.7.1/xlator/features/marker.so(mq_get_delta+0x8c)[0x7fba319050ec]
(-->
/usr/lib64/glusterfs/3.7.1/xlator/features/marker.so(mq_start_quota_txn_v2+0x1b4)[0x7fba319053e4]
(-->
/usr/lib64/glusterfs/3.7.1/xlator/features/marker.so(mq_initiate_quota_task+0x8c)[0x7fba319058dc]
))))) 0-testvol-marker: Failed to get metadata for
/newtest/newtestfile-1-1-2-3-4-5-6-7-8-9-10-11-12-13-14-15-16-17-18-19-20-21-22-23-24-25
[2015-07-06 18:09:25.801226] W [marker-quota.c:3063:mq_reduce_parent_size_task]
(--> /usr/lib64/libglusterfs.so.0(_gf_log_callingfn+0x1e0)[0x7fba413be520] (-->
/usr/lib64/glusterfs/3.7.1/xlator/features/marker.so(mq_reduce_parent_size_task+0x206)[0x7fba319039c6]
(--> /usr/lib64/libglusterfs.so.0(synctask_wrap+0x12)[0x7fba414021f2] (-->
/lib64/libc.so.6(+0x3f3f0438f0)[0x7fba3fd6a8f0] )))) 0-testvol-marker: ctx for
the node
/newtest/newtestfile-1-1-2-3-4-5-6-7-8-9-10-11-12-13-14-15-16-17-18-19-20-21-22-23-24-25-26-27-28
is NULL
[2015-07-06 18:09:42.258580] E [marker-quota.c:2502:mq_get_metadata] (-->
/usr/lib64/libglusterfs.so.0(_gf_log_callingfn+0x1e0)[0x7fba413be520] (-->
/usr/lib64/glusterfs/3.7.1/xlator/features/marker.so(mq_get_metadata+0x15a)[0x7fba31904f3a]
(-->
/usr/lib64/glusterfs/3.7.1/xlator/features/marker.so(mq_get_delta+0x8c)[0x7fba319050ec]
(-->
/usr/lib64/glusterfs/3.7.1/xlator/features/marker.so(mq_start_quota_txn_v2+0x1b4)[0x7fba319053e4]
(-->
/usr/lib64/glusterfs/3.7.1/xlator/features/marker.so(mq_initiate_quota_task+0x8c)[0x7fba319058dc]
))))) 0-testvol-marker: Failed to get metadata for
/newtest/newtestfile-1-1-2-3-4-5-6-7-8-9-10-11-12-13-14-15-16-17-18-19-20-21-22-23-24-25-26-27-28-29-30-31-32-33-34-35-36
[2015-07-06 18:10:18.588848] E [MSGID: 113104]
[posix-handle.c:154:posix_make_ancestryfromgfid] 0-testvol-posix: could not
read the link from the gfid handle
/bricks/brick1/testvol_brick3/.glusterfs/31/07/3107fcc5-8db7-4507-b2aa-1894bdb54a02
[No such file or directory]
Tried this just once but I am sure this can be reproduced again.
[root at dht-rhs-20 ~]# gluster v status
Status of volume: testvol
Gluster process TCP Port RDMA Port Online Pid
------------------------------------------------------------------------------
Brick 10.70.47.98:/bricks/brick0/testvol_br
ick0 49648 0 Y 3671
Brick 10.70.47.99:/bricks/brick0/testvol_br
ick1 49275 0 Y 12558
Brick 10.70.47.98:/bricks/brick1/testvol_br
ick2 N/A N/A N N/A
Brick 10.70.47.99:/bricks/brick1/testvol_br
ick3 N/A N/A N N/A
NFS Server on localhost 2049 0 Y 12596
Self-heal Daemon on localhost N/A N/A Y 12603
Quota Daemon on localhost N/A N/A Y 12712
NFS Server on 10.70.47.98 2049 0 Y 3709
Self-heal Daemon on 10.70.47.98 N/A N/A Y 3717
Quota Daemon on 10.70.47.98 N/A N/A Y 3785
Task Status of Volume testvol
------------------------------------------------------------------------------
Task : Rebalance
ID : 28022cc2-181e-41a5-8480-c8a4d118dcbe
Status : completed
[root at dht-rhs-20 ~]#
[root at dht-rhs-19 ~]# df -h
Filesystem Size Used Avail Use% Mounted on
/dev/vda2 39G 1.7G 35G 5% /
tmpfs 4.0G 0 4.0G 0% /dev/shm
/dev/vda1 477M 32M 420M 8% /boot
/dev/mapper/VG01-LV00
50G 387M 50G 1% /bricks/brick0
/dev/mapper/VG01-LV01
20G 1.8G 19G 9% /bricks/brick1
/dev/mapper/VG01-LV02
10G 33M 10G 1% /bricks/brick2
/dev/mapper/VG01-LV03
10G 33M 10G 1% /bricks/brick3
/dev/mapper/VG01-LV04
10G 33M 10G 1% /bricks/brick4
/dev/mapper/VG01-LV05
10G 33M 10G 1% /bricks/brick5
/dev/mapper/VG01-LV06
10G 33M 10G 1% /bricks/brick6
/dev/mapper/VG01-LV07
10G 33M 10G 1% /bricks/brick7
/dev/mapper/VG01-LV08
10G 33M 10G 1% /bricks/brick8
/dev/mapper/VG01-LV09
10G 33M 10G 1% /bricks/brick9
localhost:testvol 50G 387M 50G 1% /var/run/gluster/testvol
10.70.47.99:/testvol 50G 387M 50G 1% /mnt/glusterfs
[root at dht-rhs-19 ~]#
=======
[root at dht-rhs-20 ~]# df -h
Filesystem Size Used Avail Use% Mounted on
/dev/vda2 34G 1.7G 31G 6% /
tmpfs 4.0G 0 4.0G 0% /dev/shm
/dev/vda1 477M 32M 420M 8% /boot
/dev/mapper/VG01-LV00
50G 280M 50G 1% /bricks/brick0
/dev/mapper/VG01-LV01
20G 1.3G 19G 7% /bricks/brick1
/dev/mapper/VG01-LV02
10G 33M 10G 1% /bricks/brick2
/dev/mapper/VG01-LV03
10G 33M 10G 1% /bricks/brick3
/dev/mapper/VG01-LV04
10G 33M 10G 1% /bricks/brick4
/dev/mapper/VG01-LV05
10G 33M 10G 1% /bricks/brick5
/dev/mapper/VG01-LV06
10G 33M 10G 1% /bricks/brick6
/dev/mapper/VG01-LV07
10G 33M 10G 1% /bricks/brick7
/dev/mapper/VG01-LV08
10G 33M 10G 1% /bricks/brick8
/dev/mapper/VG01-LV09
10G 33M 10G 1% /bricks/brick9
10.70.47.98:testvol 50G 387M 50G 1% /mnt/glusterfs
[root at dht-rhs-20 ~]#
[root at dht-rhs-19 ~]# rpm -qa | grep -i gluster
glusterfs-cli-3.7.1-6.el6rhs.x86_64
gluster-nagios-common-0.2.0-1.el6rhs.noarch
vdsm-gluster-4.16.20-1.2.el6rhs.noarch
glusterfs-3.7.1-6.el6rhs.x86_64
glusterfs-client-xlators-3.7.1-6.el6rhs.x86_64
glusterfs-libs-3.7.1-6.el6rhs.x86_64
glusterfs-api-3.7.1-6.el6rhs.x86_64
glusterfs-fuse-3.7.1-6.el6rhs.x86_64
nfs-ganesha-gluster-2.2.0-3.el6rhs.x86_64
glusterfs-rdma-3.7.1-6.el6rhs.x86_64
python-gluster-3.7.1-6.el6rhs.x86_64
glusterfs-server-3.7.1-6.el6rhs.x86_64
gluster-nagios-addons-0.2.4-2.el6rhs.x86_64
glusterfs-ganesha-3.7.1-6.el6rhs.x86_64
glusterfs-geo-replication-3.7.1-6.el6rhs.x86_64
[root at dht-rhs-19 ~]#
[root at dht-rhs-20 ~]# rpm -qa | grep -i
glusterglusterfs-cli-3.7.1-6.el6rhs.x86_64
gluster-nagios-common-0.2.0-1.el6rhs.noarch
vdsm-gluster-4.16.20-1.2.el6rhs.noarch
glusterfs-3.7.1-6.el6rhs.x86_64
glusterfs-client-xlators-3.7.1-6.el6rhs.x86_64
glusterfs-libs-3.7.1-6.el6rhs.x86_64
glusterfs-api-3.7.1-6.el6rhs.x86_64
glusterfs-fuse-3.7.1-6.el6rhs.x86_64
nfs-ganesha-gluster-2.2.0-3.el6rhs.x86_64
glusterfs-rdma-3.7.1-6.el6rhs.x86_64
python-gluster-3.7.1-6.el6rhs.x86_64
glusterfs-server-3.7.1-6.el6rhs.x86_64
gluster-nagios-addons-0.2.4-2.el6rhs.x86_64
glusterfs-ganesha-3.7.1-6.el6rhs.x86_64
glusterfs-geo-replication-3.7.1-6.el6rhs.x86_64
[root at dht-rhs-20 ~]#
[root at dht-rhs-19 ~]# mount
/dev/vda2 on / type ext4 (rw)
proc on /proc type proc (rw)
sysfs on /sys type sysfs (rw)
devpts on /dev/pts type devpts (rw,gid=5,mode=620)
tmpfs on /dev/shm type tmpfs (rw,rootcontext="system_u:object_r:tmpfs_t:s0")
/dev/vda1 on /boot type ext4 (rw)
none on /proc/sys/fs/binfmt_misc type binfmt_misc (rw)
sunrpc on /var/lib/nfs/rpc_pipefs type rpc_pipefs (rw)
/dev/mapper/VG01-LV00 on /bricks/brick0 type xfs
(rw,noatime,nodiratime,inode64)
/dev/mapper/VG01-LV01 on /bricks/brick1 type xfs
(rw,noatime,nodiratime,inode64)
/dev/mapper/VG01-LV02 on /bricks/brick2 type xfs
(rw,noatime,nodiratime,inode64)
/dev/mapper/VG01-LV03 on /bricks/brick3 type xfs
(rw,noatime,nodiratime,inode64)
/dev/mapper/VG01-LV04 on /bricks/brick4 type xfs
(rw,noatime,nodiratime,inode64)
/dev/mapper/VG01-LV05 on /bricks/brick5 type xfs
(rw,noatime,nodiratime,inode64)
/dev/mapper/VG01-LV06 on /bricks/brick6 type xfs
(rw,noatime,nodiratime,inode64)
/dev/mapper/VG01-LV07 on /bricks/brick7 type xfs
(rw,noatime,nodiratime,inode64)
/dev/mapper/VG01-LV08 on /bricks/brick8 type xfs
(rw,noatime,nodiratime,inode64)
/dev/mapper/VG01-LV09 on /bricks/brick9 type xfs
(rw,noatime,nodiratime,inode64)
localhost:testvol on /var/run/gluster/testvol type fuse.glusterfs
(rw,default_permissions,allow_other,max_read=131072)
10.70.47.99:/testvol on /mnt/glusterfs type fuse.glusterfs
(rw,default_permissions,allow_other,max_read=131072)
===========================
[root at dht-rhs-20 ~]# mount
/dev/vda2 on / type ext4 (rw)
proc on /proc type proc (rw)
sysfs on /sys type sysfs (rw)
devpts on /dev/pts type devpts (rw,gid=5,mode=620)
tmpfs on /dev/shm type tmpfs (rw,rootcontext="system_u:object_r:tmpfs_t:s0")
/dev/vda1 on /boot type ext4 (rw)
none on /proc/sys/fs/binfmt_misc type binfmt_misc (rw)
sunrpc on /var/lib/nfs/rpc_pipefs type rpc_pipefs (rw)
/dev/mapper/VG01-LV00 on /bricks/brick0 type xfs
(rw,noatime,nodiratime,inode64)
/dev/mapper/VG01-LV01 on /bricks/brick1 type xfs
(rw,noatime,nodiratime,inode64)
/dev/mapper/VG01-LV02 on /bricks/brick2 type xfs
(rw,noatime,nodiratime,inode64)
/dev/mapper/VG01-LV03 on /bricks/brick3 type xfs
(rw,noatime,nodiratime,inode64)
/dev/mapper/VG01-LV04 on /bricks/brick4 type xfs
(rw,noatime,nodiratime,inode64)
/dev/mapper/VG01-LV05 on /bricks/brick5 type xfs
(rw,noatime,nodiratime,inode64)
/dev/mapper/VG01-LV06 on /bricks/brick6 type xfs
(rw,noatime,nodiratime,inode64)
/dev/mapper/VG01-LV07 on /bricks/brick7 type xfs
(rw,noatime,nodiratime,inode64)
/dev/mapper/VG01-LV08 on /bricks/brick8 type xfs
(rw,noatime,nodiratime,inode64)
/dev/mapper/VG01-LV09 on /bricks/brick9 type xfs
(rw,noatime,nodiratime,inode64)
10.70.47.98:testvol on /mnt/glusterfs type fuse.glusterfs
(rw,default_permissions,allow_other,max_read=131072)
[root at dht-rhs-20 ~]#
dht-rhs-19 : 10.70.47.98
dht-rhs-20 : 10.70.47.99
--
You are receiving this mail because:
You are on the CC list for the bug.
Unsubscribe from this bug https://bugzilla.redhat.com/token.cgi?t=kWabtaR5r9&a=cc_unsubscribe
More information about the Bugs
mailing list