=~=~=~=~=~=~=~=~=~=~=~= PuTTY log 2019.01.22 16:57:37 =~=~=~=~=~=~=~=~=~=~=~= oc prjoject glusterfs Already on project "glusterfs" on server "https://master.matrix.orange.lab:8443". [root@master ~]# oc get po NAME READY STATUS RESTARTS AGE glusterblock-storage-provisioner-dc-1-6nwq1 1/1 Running 2 32d glusterfs-storage-q4ptb 1/1 Running 0 2h glusterfs-storage-ssvjp 1/1 Running 6 34d glusterfs-storage-vll7x 1/1 Running 1 34d heketi-storage-1-zm77z 1/1 Running 38 32d [root@master ~]# oc rsh glusterfs-storage-q4ptb sh-4.2# gluster --version glusterfs 4.1.6 Repository revision: git://git.gluster.org/glusterfs.git Copyright (c) 2006-2016 Red Hat, Inc. GlusterFS comes with ABSOLUTELY NO WARRANTY. It is licensed to you under your choice of the GNU Lesser General Public License, version 3 or any later version (LGPLv3 or later), or the GNU General Public License, version 2 (GPLv2), in all cases as published by the Free Software Foundation. sh-4.2# gluster --version volume info Volume Name: glusterfs-registry-volume Type: Replicate Volume ID: 4b4580d7-c35c-414d-a95e-192cc1e79c81 Status: Started Snapshot Count: 0 Number of Bricks: 1 x 3 = 3 Transport-type: tcp Bricks: Brick1: 192.168.3.15:/var/lib/heketi/mounts/vg_462ea199185376b03e4b0317363bb88c/brick_1d82bb0c9fe071b9d1ee218d161f05e1/brick Brick2: 192.168.3.6:/var/lib/heketi/mounts/vg_5ecc3820ac1e5ea87c3721b8bd2d6813/brick_b8881a7d5f2d4cda27be4fcc835c573e/brick Brick3: 192.168.3.5:/var/lib/heketi/mounts/vg_5377cf444a65b645bb0e085e56d00fcf/brick_a4c798f0e7c633c4cf4d5886575b5f65/brick Options Reconfigured: transport.address-family: inet nfs.disable: on performance.client-io-threads: off Volume Name: heketidbstorage Type: Replicate Volume ID: dd0c0083-b8d6-4529-a4e4-8520d3257135 Status: Started Snapshot Count: 0 Number of Bricks: 1 x 3 = 3 Transport-type: tcp Bricks: Brick1: 192.168.3.6:/var/lib/heketi/mounts/vg_5ecc3820ac1e5ea87c3721b8bd2d6813/brick_afc9ecbfbcc96ddfb1935b088dfe7d1e/brick Brick2: 192.168.3.5:/var/lib/heketi/mounts/vg_5377cf444a65b645bb0e085e56d00fcf/brick_9d87ed61a9d0aa7024238aad1e869399/brick Brick3: 192.168.3.15:/var/lib/heketi/mounts/vg_8a0e03bf456117514f3a3ca023e9e0cd/brick_56ae93ad9edaf7a878424cf47f277c02/brick Options Reconfigured: transport.address-family: inet nfs.disable: on performance.client-io-threads: off Volume Name: vol_16c57180902d04440ff9bfa202f7abf1 Type: Replicate Volume ID: 564a16be-782d-49a7-b839-cf5e7422cc91 Status: Started Snapshot Count: 0 Number of Bricks: 1 x 3 = 3 Transport-type: tcp Bricks: Brick1: 192.168.3.6:/var/lib/heketi/mounts/vg_12f02e7d735ee9a66b6f746af8eb71e6/brick_ac5214e9fddec233c4d8b2e227f5a8b8/brick Brick2: 192.168.3.5:/var/lib/heketi/mounts/vg_19ff564b6fd157df6a734b58fd28455d/brick_62dc1633d74c86234f08260a49a5377c/brick Brick3: 192.168.3.15:/var/lib/heketi/mounts/vg_8a0e03bf456117514f3a3ca023e9e0cd/brick_7c90e2bc40f55cc01418742754fa6de7/brick Options Reconfigured: transport.address-family: inet nfs.disable: on performance.client-io-threads: off Volume Name: vol_1bc2533893950d1f357b4d690882d2c2 Type: Replicate Volume ID: 3d07685e-7c74-44f4-88c5-f70b77f1894c Status: Started Snapshot Count: 0 Number of Bricks: 1 x 3 = 3 Transport-type: tcp Bricks: Brick1: 192.168.3.15:/var/lib/heketi/mounts/vg_462ea199185376b03e4b0317363bb88c/brick_7aa4edca89b48e92a57ebd9abccb46f8/brick Brick2: 192.168.3.6:/var/lib/heketi/mounts/vg_5ecc3820ac1e5ea87c3721b8bd2d6813/brick_fafbd17e42fe449463a94d3018b8cd93/brick Brick3: 192.168.3.5:/var/lib/heketi/mounts/vg_5377cf444a65b645bb0e085e56d00fcf/brick_226e300741f3806581bb2aee189b66d7/brick Options Reconfigured: transport.address-family: inet nfs.disable: on performance.client-io-threads: off Volume Name: vol_258b8b659ad3b593dfb5672fae9ab57a Type: Replicate Volume ID: 17ba1f42-5be3-497e-8e6e-b67aeb6c1d17 Status: Started Snapshot Count: 0 Number of Bricks: 1 x 3 = 3 Transport-type: tcp Bricks: Brick1: 192.168.3.5:/var/lib/heketi/mounts/vg_48ace24b28b3a8fde08c7575846f24f8/brick_a02f4382cc1bce68936dad1590faa1e9/brick Brick2: 192.168.3.6:/var/lib/heketi/mounts/vg_28708570b029e5eff0a996c453a11691/brick_e5cfd20d1c5f06c3ce00f4e1bd523a46/brick Brick3: 192.168.3.15:/var/lib/heketi/mounts/vg_8a0e03bf456117514f3a3ca023e9e0cd/brick_f200b815c7993f02c92b3a215b5754e2/brick Options Reconfigured: performance.client-io-threads: off nfs.disable: on transport.address-family: inet Volume Name: vol_3442e86b6d994a14de73f1b8c82cf0b8 Type: Replicate Volume ID: 15477f36-22e8-4757-a0ce-9000b63fa849 Status: Started Snapshot Count: 0 Number of Bricks: 1 x 3 = 3 Transport-type: tcp Bricks: Brick1: 192.168.3.6:/var/lib/heketi/mounts/vg_ca57f326195c243be2380ce4e42a4191/brick_952d75fd193c7209c9a81acbc23a3747/brick Brick2: 192.168.3.5:/var/lib/heketi/mounts/vg_d5f17487744584e3652d3ca943b0b91b/brick_e15c12cceae12c8ab7782dd57cf5b6c1/brick Brick3: 192.168.3.15:/var/lib/heketi/mounts/vg_462ea199185376b03e4b0317363bb88c/brick_1736459d19e8aaa1dcb5a87f48747d04/brick Options Reconfigured: diagnostics.brick-log-level: INFO performance.client-io-threads: off nfs.disable: on transport.address-family: inet Volume Name: vol_3bc80e4effa4fc16b189e156d9f5c75e Type: Replicate Volume ID: d03aa8af-c26d-4347-bc12-2959de50800b Status: Started Snapshot Count: 0 Number of Bricks: 1 x 3 = 3 Transport-type: tcp Bricks: Brick1: 192.168.3.5:/var/lib/heketi/mounts/vg_48ace24b28b3a8fde08c7575846f24f8/brick_b59e8ae56d122c59d964065438570185/brick Brick2: 192.168.3.6:/var/lib/heketi/mounts/vg_28708570b029e5eff0a996c453a11691/brick_46f96c044e5eaf3e2d414bd9be2197ff/brick Brick3: 192.168.3.15:/var/lib/heketi/mounts/vg_8a0e03bf456117514f3a3ca023e9e0cd/brick_721cbe15a8d6bde8be6a63487d1b823c/brick Options Reconfigured: transport.address-family: inet nfs.disable: on performance.client-io-threads: off Volume Name: vol_59581a21d2113b0c0d4b2fa729a6a932 Type: Replicate Volume ID: 53f6243d-6fce-4bd1-8790-9dbe9f433119 Status: Started Snapshot Count: 0 Number of Bricks: 1 x 3 = 3 Transport-type: tcp Bricks: Brick1: 192.168.3.6:/var/lib/heketi/mounts/vg_ca57f326195c243be2380ce4e42a4191/brick_93ffd88e3e362ee02eaf209ad55a94df/brick Brick2: 192.168.3.5:/var/lib/heketi/mounts/vg_5d370cba9af0211a6f7899fb39a768b7/brick_95c2b9519e1f02c1230b5b38d2b282d0/brick Brick3: 192.168.3.15:/var/lib/heketi/mounts/vg_462ea199185376b03e4b0317363bb88c/brick_29a0d34a6daa4c0e2223c83b9366d905/brick Options Reconfigured: performance.client-io-threads: off nfs.disable: on transport.address-family: inet Volume Name: vol_6954c7e3a47641f1bd4fbb66d2e8aebe Type: Replicate Volume ID: cfd70ca4-82b5-4e67-bec9-7a63f6568cf1 Status: Started Snapshot Count: 0 Number of Bricks: 1 x 3 = 3 Transport-type: tcp Bricks: Brick1: 192.168.3.15:/var/lib/heketi/mounts/vg_8a0e03bf456117514f3a3ca023e9e0cd/brick_9689339f41404fe22492e6bf060ac273/brick Brick2: 192.168.3.6:/var/lib/heketi/mounts/vg_ca57f326195c243be2380ce4e42a4191/brick_868c984beb58b42ec855a5d29e88be98/brick Brick3: 192.168.3.5:/var/lib/heketi/mounts/vg_5d370cba9af0211a6f7899fb39a768b7/brick_7bd2a10c558321325da0206ca8289a82/brick Options Reconfigured: transport.address-family: inet nfs.disable: on performance.client-io-threads: off Volume Name: vol_6d36831a039b04088d29ecb477828ba7 Type: Replicate Volume ID: 9f1657c6-56da-4a78-b604-8a088db20c5a Status: Started Snapshot Count: 0 Number of Bricks: 1 x 3 = 3 Transport-type: tcp Bricks: Brick1: 192.168.3.15:/var/lib/heketi/mounts/vg_8a0e03bf456117514f3a3ca023e9e0cd/brick_cc66ac0d966b599f82bdbe0147b2fabe/brick Brick2: 192.168.3.6:/var/lib/heketi/mounts/vg_5ecc3820ac1e5ea87c3721b8bd2d6813/brick_f255d45da7b18f3a428215a423e18f61/brick Brick3: 192.168.3.5:/var/lib/heketi/mounts/vg_5d370cba9af0211a6f7899fb39a768b7/brick_24faac47c6f300c233d67c5245a3a48e/brick Options Reconfigured: performance.client-io-threads: off nfs.disable: on transport.address-family: inet Volume Name: vol_6d45975bc09622845d3674a36581cb20 Type: Replicate Volume ID: bf70163b-a77f-4bbc-8c12-f77d8918edb0 Status: Started Snapshot Count: 0 Number of Bricks: 1 x 3 = 3 Transport-type: tcp Bricks: Brick1: 192.168.3.5:/var/lib/heketi/mounts/vg_4a7379cf157991e4c4b672fed081fc7d/brick_062ad572b1a6c3a7c642f375786f0863/brick Brick2: 192.168.3.6:/var/lib/heketi/mounts/vg_28708570b029e5eff0a996c453a11691/brick_2b988fdacdc748a46974fa50fc7f9e50/brick Brick3: 192.168.3.15:/var/lib/heketi/mounts/vg_8a0e03bf456117514f3a3ca023e9e0cd/brick_f22c2473409756ec3ee1ab3a36ab9eb8/brick Options Reconfigured: performance.client-io-threads: off nfs.disable: on transport.address-family: inet Volume Name: vol_8285c4fc4d6e4064ac1527d035eee2f8 Type: Replicate Volume ID: bc64436b-2e91-40f3-9869-15f611053017 Status: Started Snapshot Count: 0 Number of Bricks: 1 x 3 = 3 Transport-type: tcp Bricks: Brick1: 192.168.3.6:/var/lib/heketi/mounts/vg_526f35058433c6b03130bba4e0a7dd87/brick_c04b2f3fc2aed6c127e0095bb79a9c2b/brick Brick2: 192.168.3.5:/var/lib/heketi/mounts/vg_48ace24b28b3a8fde08c7575846f24f8/brick_b45a44c0902a9e6fa0bb434f72f8fa39/brick Brick3: 192.168.3.15:/var/lib/heketi/mounts/vg_462ea199185376b03e4b0317363bb88c/brick_7ee133d5c95aee0fc0c71e8ce0e0cfcb/brick Options Reconfigured: transport.address-family: inet nfs.disable: on performance.client-io-threads: off Volume Name: vol_87d44d5b3e6923bd42a620cbc838b253 Type: Replicate Volume ID: 29518352-4389-4bb2-a687-4674b68abd73 Status: Started Snapshot Count: 0 Number of Bricks: 1 x 3 = 3 Transport-type: tcp Bricks: Brick1: 192.168.3.6:/var/lib/heketi/mounts/vg_56391bec3c8bfe4fc116de7bddfc2af4/brick_0a254839cbdeb13bbacd77acc40ebce9/brick Brick2: 192.168.3.5:/var/lib/heketi/mounts/vg_4a7379cf157991e4c4b672fed081fc7d/brick_a485c279b7ad15b0b1ed28fa385e3db4/brick Brick3: 192.168.3.15:/var/lib/heketi/mounts/vg_8a0e03bf456117514f3a3ca023e9e0cd/brick_bf339a847db6e16612b9c97f9604518e/brick Options Reconfigured: transport.address-family: inet nfs.disable: on performance.client-io-threads: off Volume Name: vol_93cf4b391b72534c5732df0c82206f50 Type: Replicate Volume ID: 2df603c8-dad6-402b-b8be-e454c06b106d Status: Started Snapshot Count: 0 Number of Bricks: 1 x 3 = 3 Transport-type: tcp Bricks: Brick1: 192.168.3.5:/var/lib/heketi/mounts/vg_19ff564b6fd157df6a734b58fd28455d/brick_be8530b4eb003c98b4f65bb2b7fa2e8b/brick Brick2: 192.168.3.15:/var/lib/heketi/mounts/vg_8a0e03bf456117514f3a3ca023e9e0cd/brick_d32c89e08b6fa041a08d624121dc8f6d/brick Brick3: 192.168.3.6:/var/lib/heketi/mounts/vg_ca57f326195c243be2380ce4e42a4191/brick_c1edf8bec1b388c54f0484319e46ee7f/brick Options Reconfigured: transport.address-family: inet nfs.disable: on performance.client-io-threads: off Volume Name: vol_bfc74a6caa795f5f1d67b2223de42517 Type: Replicate Volume ID: b55b4cd1-4f04-48f0-8ccc-5a01c8e319c7 Status: Started Snapshot Count: 0 Number of Bricks: 1 x 3 = 3 Transport-type: tcp Bricks: Brick1: 192.168.3.6:/var/lib/heketi/mounts/vg_12f02e7d735ee9a66b6f746af8eb71e6/brick_299a2bfb4702dfd575ca66c58fece9f9/brick Brick2: 192.168.3.5:/var/lib/heketi/mounts/vg_d5f17487744584e3652d3ca943b0b91b/brick_b288743446d8234f1d6e32f4c09f5029/brick Brick3: 192.168.3.15:/var/lib/heketi/mounts/vg_8a0e03bf456117514f3a3ca023e9e0cd/brick_683da433b3d145682f2b92c5d7a936b5/brick Options Reconfigured: transport.address-family: inet nfs.disable: on performance.client-io-threads: off Volume Name: vol_ca1258696cc5eb793a2003d3c3b9e98e Type: Replicate Volume ID: ce275446-8d8d-4267-afdc-21e837169d1b Status: Started Snapshot Count: 0 Number of Bricks: 1 x 3 = 3 Transport-type: tcp Bricks: Brick1: 192.168.3.5:/var/lib/heketi/mounts/vg_5377cf444a65b645bb0e085e56d00fcf/brick_fb19adaea0fab795b3eef8bcc75faff5/brick Brick2: 192.168.3.6:/var/lib/heketi/mounts/vg_56391bec3c8bfe4fc116de7bddfc2af4/brick_595043e897901aa6057b1227d430b20d/brick Brick3: 192.168.3.15:/var/lib/heketi/mounts/vg_8a0e03bf456117514f3a3ca023e9e0cd/brick_d4ad1bac0b813f11a5475245e95fbf55/brick Options Reconfigured: transport.address-family: inet nfs.disable: on performance.client-io-threads: off Volume Name: vol_d936fff4500a9cb9491963d0624214fb Type: Replicate Volume ID: f8a2bd70-5eb8-49d6-8161-c35fac76cc3c Status: Started Snapshot Count: 0 Number of Bricks: 1 x 3 = 3 Transport-type: tcp Bricks: Brick1: 192.168.3.15:/var/lib/heketi/mounts/vg_462ea199185376b03e4b0317363bb88c/brick_57b2ef73d93718d544f699683989659b/brick Brick2: 192.168.3.6:/var/lib/heketi/mounts/vg_5ecc3820ac1e5ea87c3721b8bd2d6813/brick_46ede70d760848493be42438bd8ccd88/brick Brick3: 192.168.3.5:/var/lib/heketi/mounts/vg_5d370cba9af0211a6f7899fb39a768b7/brick_89187962355d4e6ed981460fec90291f/brick Options Reconfigured: transport.address-family: inet nfs.disable: on performance.client-io-threads: off Volume Name: vol_e1aa1283d5917485d88c4a742eeff422 Type: Replicate Volume ID: dffd3f96-6b5a-4f4d-8779-38273d07e64d Status: Started Snapshot Count: 0 Number of Bricks: 1 x 3 = 3 Transport-type: tcp Bricks: Brick1: 192.168.3.5:/var/lib/heketi/mounts/vg_4a7379cf157991e4c4b672fed081fc7d/brick_5a53164a9194a16fcac0532f243bf9e2/brick Brick2: 192.168.3.6:/var/lib/heketi/mounts/vg_526f35058433c6b03130bba4e0a7dd87/brick_9e7c382e5f853d471c347bc5590359af/brick Brick3: 192.168.3.15:/var/lib/heketi/mounts/vg_8a0e03bf456117514f3a3ca023e9e0cd/brick_8512e67774e641dd49472518d5bfe5b1/brick Options Reconfigured: performance.client-io-threads: off nfs.disable: on transport.address-family: inet Volume Name: vol_f0ed498d7e781d7bb896244175b31f9e Type: Replicate Volume ID: a98add0e-d1d5-4fbd-8297-a3b86877d3d3 Status: Started Snapshot Count: 0 Number of Bricks: 1 x 3 = 3 Transport-type: tcp Bricks: Brick1: 192.168.3.5:/var/lib/heketi/mounts/vg_5377cf444a65b645bb0e085e56d00fcf/brick_101b9522dfb66a222bd080649103e804/brick Brick2: 192.168.3.6:/var/lib/heketi/mounts/vg_56391bec3c8bfe4fc116de7bddfc2af4/brick_47ed9e0663ad0f6f676ddd6ad7e3dcde/brick Brick3: 192.168.3.15:/var/lib/heketi/mounts/vg_8a0e03bf456117514f3a3ca023e9e0cd/brick_0079eb6e88a1a36fbe77fbe0a114e1d8/brick Options Reconfigured: transport.address-family: inet nfs.disable: on performance.client-io-threads: off Volume Name: vol_f387519c9b004ec14e80696db88ef0f8 Type: Replicate Volume ID: 55767962-5c62-47aa-9947-2d02758941bf Status: Started Snapshot Count: 0 Number of Bricks: 1 x 3 = 3 Transport-type: tcp Bricks: Brick1: 192.168.3.6:/var/lib/heketi/mounts/vg_56391bec3c8bfe4fc116de7bddfc2af4/brick_06ad6c73dfbf6a5fc21334f98c9973c2/brick Brick2: 192.168.3.5:/var/lib/heketi/mounts/vg_4a7379cf157991e4c4b672fed081fc7d/brick_d39bc4e6008cb1fdd2f88e079ca16d2a/brick Brick3: 192.168.3.15:/var/lib/heketi/mounts/vg_8a0e03bf456117514f3a3ca023e9e0cd/brick_bea9f19217b6019e1ed89f262ec28bed/brick Options Reconfigured: transport.address-family: inet nfs.disable: on performance.client-io-threads: off Volume Name: vol_f8ca343c60e6efe541fe02d16ca02a7d Type: Replicate Volume ID: 10a96ce0-a673-491a-8753-f1d470cd7928 Status: Started Snapshot Count: 0 Number of Bricks: 1 x 3 = 3 Transport-type: tcp Bricks: Brick1: 192.168.3.6:/var/lib/heketi/mounts/vg_526f35058433c6b03130bba4e0a7dd87/brick_525225f65753b05dfe33aeaeb9c5de39/brick Brick2: 192.168.3.5:/var/lib/heketi/mounts/vg_48ace24b28b3a8fde08c7575846f24f8/brick_757f9ce05ffd5fe41e81db48e06c9845/brick Brick3: 192.168.3.15:/var/lib/heketi/mounts/vg_8a0e03bf456117514f3a3ca023e9e0cd/brick_6a81a627e1837aa61d211876d2d22845/brick Options Reconfigured: transport.address-family: inet nfs.disable: on performance.client-io-threads: off Volume Name: vol_fe882e074c0512fd9271fc2ff5a0bfe1 Type: Replicate Volume ID: 00a34555-f7f9-438d-884d-5763456d19c9 Status: Started Snapshot Count: 0 Number of Bricks: 1 x 3 = 3 Transport-type: tcp Bricks: Brick1: 192.168.3.6:/var/lib/heketi/mounts/vg_28708570b029e5eff0a996c453a11691/brick_d4f30d6e465a8544b759a7016fb5aab5/brick Brick2: 192.168.3.5:/var/lib/heketi/mounts/vg_48ace24b28b3a8fde08c7575846f24f8/brick_823352587197d646bad2be88f32dd4e5/brick Brick3: 192.168.3.15:/var/lib/heketi/mounts/vg_8a0e03bf456117514f3a3ca023e9e0cd/brick_09838aaf30ae140768ffc7292eabb0f0/brick Options Reconfigured: transport.address-family: inet nfs.disable: on performance.client-io-threads: off sh-4.2# gluster volume info status Status of volume: glusterfs-registry-volume Gluster process TCP Port RDMA Port Online Pid ------------------------------------------------------------------------------ Brick 192.168.3.15:/var/lib/heketi/mounts/v g_462ea199185376b03e4b0317363bb88c/brick_1d 82bb0c9fe071b9d1ee218d161f05e1/brick 49152 0 Y 198 Brick 192.168.3.6:/var/lib/heketi/mounts/vg _5ecc3820ac1e5ea87c3721b8bd2d6813/brick_b88 81a7d5f2d4cda27be4fcc835c573e/brick 49152 0 Y 205 Brick 192.168.3.5:/var/lib/heketi/mounts/vg _5377cf444a65b645bb0e085e56d00fcf/brick_a4c 798f0e7c633c4cf4d5886575b5f65/brick 49154 0 Y 7743 Self-heal Daemon on localhost N/A N/A Y 14077 Self-heal Daemon on matrix1.matrix.orange.l ab N/A N/A Y 42407 Self-heal Daemon on 192.168.3.15 N/A N/A Y 83271 Task Status of Volume glusterfs-registry-volume ------------------------------------------------------------------------------ There are no active volume tasks Status of volume: heketidbstorage Gluster process TCP Port RDMA Port Online Pid ------------------------------------------------------------------------------ Brick 192.168.3.6:/var/lib/heketi/mounts/vg _5ecc3820ac1e5ea87c3721b8bd2d6813/brick_afc 9ecbfbcc96ddfb1935b088dfe7d1e/brick 49153 0 Y 214 Brick 192.168.3.5:/var/lib/heketi/mounts/vg _5377cf444a65b645bb0e085e56d00fcf/brick_9d8 7ed61a9d0aa7024238aad1e869399/brick 49152 0 Y 262 Brick 192.168.3.15:/var/lib/heketi/mounts/v g_8a0e03bf456117514f3a3ca023e9e0cd/brick_56 ae93ad9edaf7a878424cf47f277c02/brick 49153 0 Y 207 Self-heal Daemon on localhost N/A N/A Y 14077 Self-heal Daemon on matrix1.matrix.orange.l ab N/A N/A Y 42407 Self-heal Daemon on 192.168.3.15 N/A N/A Y 83271 Task Status of Volume heketidbstorage ------------------------------------------------------------------------------ There are no active volume tasks Status of volume: vol_16c57180902d04440ff9bfa202f7abf1 Gluster process TCP Port RDMA Port Online Pid ------------------------------------------------------------------------------ Brick 192.168.3.6:/var/lib/heketi/mounts/vg _12f02e7d735ee9a66b6f746af8eb71e6/brick_ac5 214e9fddec233c4d8b2e227f5a8b8/brick 49154 0 Y 223 Brick 192.168.3.5:/var/lib/heketi/mounts/vg _19ff564b6fd157df6a734b58fd28455d/brick_62d c1633d74c86234f08260a49a5377c/brick N/A N/A N N/A Brick 192.168.3.15:/var/lib/heketi/mounts/v g_8a0e03bf456117514f3a3ca023e9e0cd/brick_7c 90e2bc40f55cc01418742754fa6de7/brick 49156 0 Y 12714 Self-heal Daemon on localhost N/A N/A Y 14077 Self-heal Daemon on matrix1.matrix.orange.l ab N/A N/A Y 42407 Self-heal Daemon on 192.168.3.15 N/A N/A Y 83271 Task Status of Volume vol_16c57180902d04440ff9bfa202f7abf1 ------------------------------------------------------------------------------ There are no active volume tasks Status of volume: vol_1bc2533893950d1f357b4d690882d2c2 Gluster process TCP Port RDMA Port Online Pid ------------------------------------------------------------------------------ Brick 192.168.3.15:/var/lib/heketi/mounts/v g_462ea199185376b03e4b0317363bb88c/brick_7a a4edca89b48e92a57ebd9abccb46f8/brick 49154 0 Y 7994 Brick 192.168.3.6:/var/lib/heketi/mounts/vg _5ecc3820ac1e5ea87c3721b8bd2d6813/brick_faf bd17e42fe449463a94d3018b8cd93/brick 49155 0 Y 232 Brick 192.168.3.5:/var/lib/heketi/mounts/vg _5377cf444a65b645bb0e085e56d00fcf/brick_226 e300741f3806581bb2aee189b66d7/brick 49156 0 Y 279 Self-heal Daemon on localhost N/A N/A Y 14077 Self-heal Daemon on matrix1.matrix.orange.l ab N/A N/A Y 42407 Self-heal Daemon on 192.168.3.15 N/A N/A Y 83271 Task Status of Volume vol_1bc2533893950d1f357b4d690882d2c2 ------------------------------------------------------------------------------ There are no active volume tasks Status of volume: vol_258b8b659ad3b593dfb5672fae9ab57a Gluster process TCP Port RDMA Port Online Pid ------------------------------------------------------------------------------ Brick 192.168.3.5:/var/lib/heketi/mounts/vg _48ace24b28b3a8fde08c7575846f24f8/brick_a02 f4382cc1bce68936dad1590faa1e9/brick 49160 0 Y 7752 Brick 192.168.3.6:/var/lib/heketi/mounts/vg _28708570b029e5eff0a996c453a11691/brick_e5c fd20d1c5f06c3ce00f4e1bd523a46/brick 49156 0 Y 240 Brick 192.168.3.15:/var/lib/heketi/mounts/v g_8a0e03bf456117514f3a3ca023e9e0cd/brick_f2 00b815c7993f02c92b3a215b5754e2/brick 49172 0 Y 216 Self-heal Daemon on localhost N/A N/A Y 14077 Self-heal Daemon on matrix1.matrix.orange.l ab N/A N/A Y 42407 Self-heal Daemon on 192.168.3.15 N/A N/A Y 83271 Task Status of Volume vol_258b8b659ad3b593dfb5672fae9ab57a ------------------------------------------------------------------------------ There are no active volume tasks Status of volume: vol_3442e86b6d994a14de73f1b8c82cf0b8 Gluster process TCP Port RDMA Port Online Pid ------------------------------------------------------------------------------ Brick 192.168.3.6:/var/lib/heketi/mounts/vg _ca57f326195c243be2380ce4e42a4191/brick_952 d75fd193c7209c9a81acbc23a3747/brick 49157 0 Y 250 Brick 192.168.3.5:/var/lib/heketi/mounts/vg _d5f17487744584e3652d3ca943b0b91b/brick_e15 c12cceae12c8ab7782dd57cf5b6c1/brick N/A N/A N N/A Brick 192.168.3.15:/var/lib/heketi/mounts/v g_462ea199185376b03e4b0317363bb88c/brick_17 36459d19e8aaa1dcb5a87f48747d04/brick 49173 0 Y 225 Self-heal Daemon on localhost N/A N/A Y 14077 Self-heal Daemon on matrix1.matrix.orange.l ab N/A N/A Y 42407 Self-heal Daemon on 192.168.3.15 N/A N/A Y 83271 Task Status of Volume vol_3442e86b6d994a14de73f1b8c82cf0b8 ------------------------------------------------------------------------------ There are no active volume tasks Status of volume: vol_3bc80e4effa4fc16b189e156d9f5c75e Gluster process TCP Port RDMA Port Online Pid ------------------------------------------------------------------------------ Brick 192.168.3.5:/var/lib/heketi/mounts/vg _48ace24b28b3a8fde08c7575846f24f8/brick_b59 e8ae56d122c59d964065438570185/brick N/A N/A N N/A Brick 192.168.3.6:/var/lib/heketi/mounts/vg _28708570b029e5eff0a996c453a11691/brick_46f 96c044e5eaf3e2d414bd9be2197ff/brick 49158 0 Y 259 Brick 192.168.3.15:/var/lib/heketi/mounts/v g_8a0e03bf456117514f3a3ca023e9e0cd/brick_72 1cbe15a8d6bde8be6a63487d1b823c/brick 49174 0 Y 234 Self-heal Daemon on localhost N/A N/A Y 14077 Self-heal Daemon on matrix1.matrix.orange.l ab N/A N/A Y 42407 Self-heal Daemon on 192.168.3.15 N/A N/A Y 83271 Task Status of Volume vol_3bc80e4effa4fc16b189e156d9f5c75e ------------------------------------------------------------------------------ There are no active volume tasks Status of volume: vol_59581a21d2113b0c0d4b2fa729a6a932 Gluster process TCP Port RDMA Port Online Pid ------------------------------------------------------------------------------ Brick 192.168.3.6:/var/lib/heketi/mounts/vg _ca57f326195c243be2380ce4e42a4191/brick_93f fd88e3e362ee02eaf209ad55a94df/brick 49159 0 Y 268 Brick 192.168.3.5:/var/lib/heketi/mounts/vg _5d370cba9af0211a6f7899fb39a768b7/brick_95c 2b9519e1f02c1230b5b38d2b282d0/brick 49162 0 Y 7761 Brick 192.168.3.15:/var/lib/heketi/mounts/v g_462ea199185376b03e4b0317363bb88c/brick_29 a0d34a6daa4c0e2223c83b9366d905/brick 49175 0 Y 243 Self-heal Daemon on localhost N/A N/A Y 14077 Self-heal Daemon on matrix1.matrix.orange.l ab N/A N/A Y 42407 Self-heal Daemon on 192.168.3.15 N/A N/A Y 83271 Task Status of Volume vol_59581a21d2113b0c0d4b2fa729a6a932 ------------------------------------------------------------------------------ There are no active volume tasks Status of volume: vol_6954c7e3a47641f1bd4fbb66d2e8aebe Gluster process TCP Port RDMA Port Online Pid ------------------------------------------------------------------------------ Brick 192.168.3.15:/var/lib/heketi/mounts/v g_8a0e03bf456117514f3a3ca023e9e0cd/brick_96 89339f41404fe22492e6bf060ac273/brick 49177 0 Y 261 Brick 192.168.3.6:/var/lib/heketi/mounts/vg _ca57f326195c243be2380ce4e42a4191/brick_868 c984beb58b42ec855a5d29e88be98/brick 49161 0 Y 286 Brick 192.168.3.5:/var/lib/heketi/mounts/vg _5d370cba9af0211a6f7899fb39a768b7/brick_7bd 2a10c558321325da0206ca8289a82/brick N/A N/A N N/A Self-heal Daemon on localhost N/A N/A Y 14077 Self-heal Daemon on matrix1.matrix.orange.l ab N/A N/A Y 42407 Self-heal Daemon on 192.168.3.15 N/A N/A Y 83271 Task Status of Volume vol_6954c7e3a47641f1bd4fbb66d2e8aebe ------------------------------------------------------------------------------ There are no active volume tasks Status of volume: vol_6d36831a039b04088d29ecb477828ba7 Gluster process TCP Port RDMA Port Online Pid ------------------------------------------------------------------------------ Brick 192.168.3.15:/var/lib/heketi/mounts/v g_8a0e03bf456117514f3a3ca023e9e0cd/brick_cc 66ac0d966b599f82bdbe0147b2fabe/brick 49178 0 Y 270 Brick 192.168.3.6:/var/lib/heketi/mounts/vg _5ecc3820ac1e5ea87c3721b8bd2d6813/brick_f25 5d45da7b18f3a428215a423e18f61/brick 49162 0 Y 295 Brick 192.168.3.5:/var/lib/heketi/mounts/vg _5d370cba9af0211a6f7899fb39a768b7/brick_24f aac47c6f300c233d67c5245a3a48e/brick 49158 0 Y 297 Self-heal Daemon on localhost N/A N/A Y 14077 Self-heal Daemon on matrix1.matrix.orange.l ab N/A N/A Y 42407 Self-heal Daemon on 192.168.3.15 N/A N/A Y 83271 Task Status of Volume vol_6d36831a039b04088d29ecb477828ba7 ------------------------------------------------------------------------------ There are no active volume tasks Status of volume: vol_6d45975bc09622845d3674a36581cb20 Gluster process TCP Port RDMA Port Online Pid ------------------------------------------------------------------------------ Brick 192.168.3.5:/var/lib/heketi/mounts/vg _4a7379cf157991e4c4b672fed081fc7d/brick_062 ad572b1a6c3a7c642f375786f0863/brick N/A N/A N N/A Brick 192.168.3.6:/var/lib/heketi/mounts/vg _28708570b029e5eff0a996c453a11691/brick_2b9 88fdacdc748a46974fa50fc7f9e50/brick 49163 0 Y 304 Brick 192.168.3.15:/var/lib/heketi/mounts/v g_8a0e03bf456117514f3a3ca023e9e0cd/brick_f2 2c2473409756ec3ee1ab3a36ab9eb8/brick 49179 0 Y 279 Self-heal Daemon on localhost N/A N/A Y 14077 Self-heal Daemon on matrix1.matrix.orange.l ab N/A N/A Y 42407 Self-heal Daemon on 192.168.3.15 N/A N/A Y 83271 Task Status of Volume vol_6d45975bc09622845d3674a36581cb20 ------------------------------------------------------------------------------ There are no active volume tasks Status of volume: vol_8285c4fc4d6e4064ac1527d035eee2f8 Gluster process TCP Port RDMA Port Online Pid ------------------------------------------------------------------------------ Brick 192.168.3.6:/var/lib/heketi/mounts/vg _526f35058433c6b03130bba4e0a7dd87/brick_c04 b2f3fc2aed6c127e0095bb79a9c2b/brick 49176 0 Y 14054 Brick 192.168.3.5:/var/lib/heketi/mounts/vg _48ace24b28b3a8fde08c7575846f24f8/brick_b45 a44c0902a9e6fa0bb434f72f8fa39/brick 49166 0 Y 42384 Brick 192.168.3.15:/var/lib/heketi/mounts/v g_462ea199185376b03e4b0317363bb88c/brick_7e e133d5c95aee0fc0c71e8ce0e0cfcb/brick 49160 0 Y 83248 Self-heal Daemon on localhost N/A N/A Y 14077 Self-heal Daemon on matrix1.matrix.orange.l ab N/A N/A Y 42407 Self-heal Daemon on 192.168.3.15 N/A N/A Y 83271 Task Status of Volume vol_8285c4fc4d6e4064ac1527d035eee2f8 ------------------------------------------------------------------------------ There are no active volume tasks Status of volume: vol_87d44d5b3e6923bd42a620cbc838b253 Gluster process TCP Port RDMA Port Online Pid ------------------------------------------------------------------------------ Brick 192.168.3.6:/var/lib/heketi/mounts/vg _56391bec3c8bfe4fc116de7bddfc2af4/brick_0a2 54839cbdeb13bbacd77acc40ebce9/brick 49164 0 Y 313 Brick 192.168.3.5:/var/lib/heketi/mounts/vg _4a7379cf157991e4c4b672fed081fc7d/brick_a48 5c279b7ad15b0b1ed28fa385e3db4/brick N/A N/A N N/A Brick 192.168.3.15:/var/lib/heketi/mounts/v g_8a0e03bf456117514f3a3ca023e9e0cd/brick_bf 339a847db6e16612b9c97f9604518e/brick 49180 0 Y 288 Self-heal Daemon on localhost N/A N/A Y 14077 Self-heal Daemon on matrix1.matrix.orange.l ab N/A N/A Y 42407 Self-heal Daemon on 192.168.3.15 N/A N/A Y 83271 Task Status of Volume vol_87d44d5b3e6923bd42a620cbc838b253 ------------------------------------------------------------------------------ There are no active volume tasks Status of volume: vol_93cf4b391b72534c5732df0c82206f50 Gluster process TCP Port RDMA Port Online Pid ------------------------------------------------------------------------------ Brick 192.168.3.5:/var/lib/heketi/mounts/vg _19ff564b6fd157df6a734b58fd28455d/brick_be8 530b4eb003c98b4f65bb2b7fa2e8b/brick 49159 0 Y 307 Brick 192.168.3.15:/var/lib/heketi/mounts/v g_8a0e03bf456117514f3a3ca023e9e0cd/brick_d3 2c89e08b6fa041a08d624121dc8f6d/brick 49181 0 Y 297 Brick 192.168.3.6:/var/lib/heketi/mounts/vg _ca57f326195c243be2380ce4e42a4191/brick_c1e df8bec1b388c54f0484319e46ee7f/brick 49165 0 Y 322 Self-heal Daemon on localhost N/A N/A Y 14077 Self-heal Daemon on matrix1.matrix.orange.l ab N/A N/A Y 42407 Self-heal Daemon on 192.168.3.15 N/A N/A Y 83271 Task Status of Volume vol_93cf4b391b72534c5732df0c82206f50 ------------------------------------------------------------------------------ There are no active volume tasks Status of volume: vol_bfc74a6caa795f5f1d67b2223de42517 Gluster process TCP Port RDMA Port Online Pid ------------------------------------------------------------------------------ Brick 192.168.3.6:/var/lib/heketi/mounts/vg _12f02e7d735ee9a66b6f746af8eb71e6/brick_299 a2bfb4702dfd575ca66c58fece9f9/brick 49174 0 Y 10329 Brick 192.168.3.5:/var/lib/heketi/mounts/vg _d5f17487744584e3652d3ca943b0b91b/brick_b28 8743446d8234f1d6e32f4c09f5029/brick 49153 0 Y 38181 Brick 192.168.3.15:/var/lib/heketi/mounts/v g_8a0e03bf456117514f3a3ca023e9e0cd/brick_68 3da433b3d145682f2b92c5d7a936b5/brick 49158 0 Y 79532 Self-heal Daemon on localhost N/A N/A Y 14077 Self-heal Daemon on matrix1.matrix.orange.l ab N/A N/A Y 42407 Self-heal Daemon on 192.168.3.15 N/A N/A Y 83271 Task Status of Volume vol_bfc74a6caa795f5f1d67b2223de42517 ------------------------------------------------------------------------------ There are no active volume tasks Status of volume: vol_ca1258696cc5eb793a2003d3c3b9e98e Gluster process TCP Port RDMA Port Online Pid ------------------------------------------------------------------------------ Brick 192.168.3.5:/var/lib/heketi/mounts/vg _5377cf444a65b645bb0e085e56d00fcf/brick_fb1 9adaea0fab795b3eef8bcc75faff5/brick 49161 0 Y 316 Brick 192.168.3.6:/var/lib/heketi/mounts/vg _56391bec3c8bfe4fc116de7bddfc2af4/brick_595 043e897901aa6057b1227d430b20d/brick 49167 0 Y 339 Brick 192.168.3.15:/var/lib/heketi/mounts/v g_8a0e03bf456117514f3a3ca023e9e0cd/brick_d4 ad1bac0b813f11a5475245e95fbf55/brick 49183 0 Y 315 Self-heal Daemon on localhost N/A N/A Y 14077 Self-heal Daemon on 192.168.3.15 N/A N/A Y 83271 Self-heal Daemon on matrix1.matrix.orange.l ab N/A N/A Y 42407 Task Status of Volume vol_ca1258696cc5eb793a2003d3c3b9e98e ------------------------------------------------------------------------------ There are no active volume tasks Status of volume: vol_d936fff4500a9cb9491963d0624214fb Gluster process TCP Port RDMA Port Online Pid ------------------------------------------------------------------------------ Brick 192.168.3.15:/var/lib/heketi/mounts/v g_462ea199185376b03e4b0317363bb88c/brick_57 b2ef73d93718d544f699683989659b/brick 49157 0 Y 12884 Brick 192.168.3.6:/var/lib/heketi/mounts/vg _5ecc3820ac1e5ea87c3721b8bd2d6813/brick_46e de70d760848493be42438bd8ccd88/brick 49168 0 Y 349 Brick 192.168.3.5:/var/lib/heketi/mounts/vg _5d370cba9af0211a6f7899fb39a768b7/brick_891 87962355d4e6ed981460fec90291f/brick 49163 0 Y 324 Self-heal Daemon on localhost N/A N/A Y 14077 Self-heal Daemon on matrix1.matrix.orange.l ab N/A N/A Y 42407 Self-heal Daemon on 192.168.3.15 N/A N/A Y 83271 Task Status of Volume vol_d936fff4500a9cb9491963d0624214fb ------------------------------------------------------------------------------ There are no active volume tasks Status of volume: vol_e1aa1283d5917485d88c4a742eeff422 Gluster process TCP Port RDMA Port Online Pid ------------------------------------------------------------------------------ Brick 192.168.3.5:/var/lib/heketi/mounts/vg _4a7379cf157991e4c4b672fed081fc7d/brick_5a5 3164a9194a16fcac0532f243bf9e2/brick 49164 0 Y 7770 Brick 192.168.3.6:/var/lib/heketi/mounts/vg _526f35058433c6b03130bba4e0a7dd87/brick_9e7 c382e5f853d471c347bc5590359af/brick 49169 0 Y 358 Brick 192.168.3.15:/var/lib/heketi/mounts/v g_8a0e03bf456117514f3a3ca023e9e0cd/brick_85 12e67774e641dd49472518d5bfe5b1/brick 49184 0 Y 324 Self-heal Daemon on localhost N/A N/A Y 14077 Self-heal Daemon on matrix1.matrix.orange.l ab N/A N/A Y 42407 Self-heal Daemon on 192.168.3.15 N/A N/A Y 83271 Task Status of Volume vol_e1aa1283d5917485d88c4a742eeff422 ------------------------------------------------------------------------------ There are no active volume tasks Status of volume: vol_f0ed498d7e781d7bb896244175b31f9e Gluster process TCP Port RDMA Port Online Pid ------------------------------------------------------------------------------ Brick 192.168.3.5:/var/lib/heketi/mounts/vg _5377cf444a65b645bb0e085e56d00fcf/brick_101 b9522dfb66a222bd080649103e804/brick N/A N/A N N/A Brick 192.168.3.6:/var/lib/heketi/mounts/vg _56391bec3c8bfe4fc116de7bddfc2af4/brick_47e d9e0663ad0f6f676ddd6ad7e3dcde/brick 49170 0 Y 367 Brick 192.168.3.15:/var/lib/heketi/mounts/v g_8a0e03bf456117514f3a3ca023e9e0cd/brick_00 79eb6e88a1a36fbe77fbe0a114e1d8/brick 49185 0 Y 333 Self-heal Daemon on localhost N/A N/A Y 14077 Self-heal Daemon on matrix1.matrix.orange.l ab N/A N/A Y 42407 Self-heal Daemon on 192.168.3.15 N/A N/A Y 83271 Task Status of Volume vol_f0ed498d7e781d7bb896244175b31f9e ------------------------------------------------------------------------------ There are no active volume tasks Status of volume: vol_f387519c9b004ec14e80696db88ef0f8 Gluster process TCP Port RDMA Port Online Pid ------------------------------------------------------------------------------ Brick 192.168.3.6:/var/lib/heketi/mounts/vg _56391bec3c8bfe4fc116de7bddfc2af4/brick_06a d6c73dfbf6a5fc21334f98c9973c2/brick 49171 0 Y 376 Brick 192.168.3.5:/var/lib/heketi/mounts/vg _4a7379cf157991e4c4b672fed081fc7d/brick_d39 bc4e6008cb1fdd2f88e079ca16d2a/brick 49174 0 Y 334 Brick 192.168.3.15:/var/lib/heketi/mounts/v g_8a0e03bf456117514f3a3ca023e9e0cd/brick_be a9f19217b6019e1ed89f262ec28bed/brick 49186 0 Y 342 Self-heal Daemon on localhost N/A N/A Y 14077 Self-heal Daemon on matrix1.matrix.orange.l ab N/A N/A Y 42407 Self-heal Daemon on 192.168.3.15 N/A N/A Y 83271 Task Status of Volume vol_f387519c9b004ec14e80696db88ef0f8 ------------------------------------------------------------------------------ There are no active volume tasks Status of volume: vol_f8ca343c60e6efe541fe02d16ca02a7d Gluster process TCP Port RDMA Port Online Pid ------------------------------------------------------------------------------ Brick 192.168.3.6:/var/lib/heketi/mounts/vg _526f35058433c6b03130bba4e0a7dd87/brick_525 225f65753b05dfe33aeaeb9c5de39/brick 49172 0 Y 385 Brick 192.168.3.5:/var/lib/heketi/mounts/vg _48ace24b28b3a8fde08c7575846f24f8/brick_757 f9ce05ffd5fe41e81db48e06c9845/brick 49175 0 Y 342 Brick 192.168.3.15:/var/lib/heketi/mounts/v g_8a0e03bf456117514f3a3ca023e9e0cd/brick_6a 81a627e1837aa61d211876d2d22845/brick 49187 0 Y 351 Self-heal Daemon on localhost N/A N/A Y 14077 Self-heal Daemon on matrix1.matrix.orange.l ab N/A N/A Y 42407 Self-heal Daemon on 192.168.3.15 N/A N/A Y 83271 Task Status of Volume vol_f8ca343c60e6efe541fe02d16ca02a7d ------------------------------------------------------------------------------ There are no active volume tasks Status of volume: vol_fe882e074c0512fd9271fc2ff5a0bfe1 Gluster process TCP Port RDMA Port Online Pid ------------------------------------------------------------------------------ Brick 192.168.3.6:/var/lib/heketi/mounts/vg _28708570b029e5eff0a996c453a11691/brick_d4f 30d6e465a8544b759a7016fb5aab5/brick 49173 0 Y 394 Brick 192.168.3.5:/var/lib/heketi/mounts/vg _48ace24b28b3a8fde08c7575846f24f8/brick_823 352587197d646bad2be88f32dd4e5/brick 49176 0 Y 351 Brick 192.168.3.15:/var/lib/heketi/mounts/v g_8a0e03bf456117514f3a3ca023e9e0cd/brick_09 838aaf30ae140768ffc7292eabb0f0/brick 49155 0 Y 12341 Self-heal Daemon on localhost N/A N/A Y 14077 Self-heal Daemon on matrix1.matrix.orange.l ab N/A N/A Y 42407 Self-heal Daemon on 192.168.3.15 N/A N/A Y 83271 Task Status of Volume vol_fe882e074c0512fd9271fc2ff5a0bfe1 ------------------------------------------------------------------------------ There are no active volume tasks sh-4.2# glsut uster peer status Number of Peers: 2 Hostname: matrix1.matrix.orange.lab Uuid: 955b8b0a-4ba8-4d7d-9f2e-9e525844a98f State: Peer in Cluster (Connected) Hostname: 192.168.3.15 Uuid: 73389b0f-281b-4e57-b435-216093ee4749 State: Peer in Cluster (Connected) sh-4.2# ps -ax | grep glusterfsd 205 ? Ssl 0:02 /usr/sbin/glusterfsd -s 192.168.3.6 --volfile-id glusterfs-registry-volume.192.168.3.6.var-lib-heketi-mounts-vg_5ecc3820ac1e5ea87c3721b8bd2d6813-brick_b8881a7d5f2d4cda27be4fcc835c573e-brick -p /var/run/gluster/vols/glusterfs-registry-volume/192.168.3.6-var-lib-heketi-mounts-vg_5ecc3820ac1e5ea87c3721b8bd2d6813-brick_b8881a7d5f2d4cda27be4fcc835c573e-brick.pid -S /var/run/gluster/1f8c6c6f24948e78.socket --brick-name /var/lib/heketi/mounts/vg_5ecc3820ac1e5ea87c3721b8bd2d6813/brick_b8881a7d5f2d4cda27be4fcc835c573e/brick -l /var/log/glusterfs/bricks/var-lib-heketi-mounts-vg_5ecc3820ac1e5ea87c3721b8bd2d6813-brick_b8881a7d5f2d4cda27be4fcc835c573e-brick.log --xlator-option *-posix.glusterd-uuid=fb376410-85de-4b35-a6ef-39eee8abd460 --process-name brick --brick-port 49152 --xlator-option glusterfs-registry-volume-server.listen-port=49152 214 ? Ssl 0:01 /usr/sbin/glusterfsd -s 192.168.3.6 --volfile-id heketidbstorage.192.168.3.6.var-lib-heketi-mounts-vg_5ecc3820ac1e5ea87c3721b8bd2d6813-brick_afc9ecbfbcc96ddfb1935b088dfe7d1e-brick -p /var/run/gluster/vols/heketidbstorage/192.168.3.6-var-lib-heketi-mounts-vg_5ecc3820ac1e5ea87c3721b8bd2d6813-brick_afc9ecbfbcc96ddfb1935b088dfe7d1e-brick.pid -S /var/run/gluster/8e1ed3f94b1ee11a.socket --brick-name /var/lib/heketi/mounts/vg_5ecc3820ac1e5ea87c3721b8bd2d6813/brick_afc9ecbfbcc96ddfb1935b088dfe7d1e/brick -l /var/log/glusterfs/bricks/var-lib-heketi-mounts-vg_5ecc3820ac1e5ea87c3721b8bd2d6813-brick_afc9ecbfbcc96ddfb1935b088dfe7d1e-brick.log --xlator-option *-posix.glusterd-uuid=fb376410-85de-4b35-a6ef-39eee8abd460 --process-name brick --brick-port 49153 --xlator-option heketidbstorage-server.listen-port=49153 223 ? Ssl 1:04 /usr/sbin/glusterfsd -s 192.168.3.6 --volfile-id vol_16c57180902d04440ff9bfa202f7abf1.192.168.3.6.var-lib-heketi-mounts-vg_12f02e7d735ee9a66b6f746af8eb71e6-brick_ac5214e9fddec233c4d8b2e227f5a8b8-brick -p /var/run/gluster/vols/vol_16c57180902d04440ff9bfa202f7abf1/192.168.3.6-var-lib-heketi-mounts-vg_12f02e7d735ee9a66b6f746af8eb71e6-brick_ac5214e9fddec233c4d8b2e227f5a8b8-brick.pid -S /var/run/gluster/f7b69bc33ac01a7a.socket --brick-name /var/lib/heketi/mounts/vg_12f02e7d735ee9a66b6f746af8eb71e6/brick_ac5214e9fddec233c4d8b2e227f5a8b8/brick -l /var/log/glusterfs/bricks/var-lib-heketi-mounts-vg_12f02e7d735ee9a66b6f746af8eb71e6-brick_ac5214e9fddec233c4d8b2e227f5a8b8-brick.log --xlator-option *-posix.glusterd-uuid=fb376410-85de-4b35-a6ef-39eee8abd460 --process-name brick --brick-port 49154 --xlator-option vol_16c57180902d04440ff9bfa202f7abf1-server.listen-port=49154 232 ? Ssl 0:03 /usr/sbin/glusterfsd -s 192.168.3.6 --volfile-id vol_1bc2533893950d1f357b4d690882d2c2.192.168.3.6.var-lib-heketi-mounts-vg_5ecc3820ac1e5ea87c3721b8bd2d6813-brick_fafbd17e42fe449463a94d3018b8cd93-brick -p /var/run/gluster/vols/vol_1bc2533893950d1f357b4d690882d2c2/192.168.3.6-var-lib-heketi-mounts-vg_5ecc3820ac1e5ea87c3721b8bd2d6813-brick_fafbd17e42fe449463a94d3018b8cd93-brick.pid -S /var/run/gluster/206211a7ec0778ee.socket --brick-name /var/lib/heketi/mounts/vg_5ecc3820ac1e5ea87c3721b8bd2d6813/brick_fafbd17e42fe449463a94d3018b8cd93/brick -l /var/log/glusterfs/bricks/var-lib-heketi-mounts-vg_5ecc3820ac1e5ea87c3721b8bd2d6813-brick_fafbd17e42fe449463a94d3018b8cd93-brick.log --xlator-option *-posix.glusterd-uuid=fb376410-85de-4b35-a6ef-39eee8abd460 --process-name brick --brick-port 49155 --xlator-option vol_1bc2533893950d1f357b4d690882d2c2-server.listen-port=49155 240 ? Ssl 0:00 /usr/sbin/glusterfsd -s 192.168.3.6 --volfile-id vol_258b8b659ad3b593dfb5672fae9ab57a.192.168.3.6.var-lib-heketi-mounts-vg_28708570b029e5eff0a996c453a11691-brick_e5cfd20d1c5f06c3ce00f4e1bd523a46-brick -p /var/run/gluster/vols/vol_258b8b659ad3b593dfb5672fae9ab57a/192.168.3.6-var-lib-heketi-mounts-vg_28708570b029e5eff0a996c453a11691-brick_e5cfd20d1c5f06c3ce00f4e1bd523a46-brick.pid -S /var/run/gluster/f901b0c3d5441cfe.socket --brick-name /var/lib/heketi/mounts/vg_28708570b029e5eff0a996c453a11691/brick_e5cfd20d1c5f06c3ce00f4e1bd523a46/brick -l /var/log/glusterfs/bricks/var-lib-heketi-mounts-vg_28708570b029e5eff0a996c453a11691-brick_e5cfd20d1c5f06c3ce00f4e1bd523a46-brick.log --xlator-option *-posix.glusterd-uuid=fb376410-85de-4b35-a6ef-39eee8abd460 --process-name brick --brick-port 49156 --xlator-option vol_258b8b659ad3b593dfb5672fae9ab57a-server.listen-port=49156 250 ? Ssl 5:50 /usr/sbin/glusterfsd -s 192.168.3.6 --volfile-id vol_3442e86b6d994a14de73f1b8c82cf0b8.192.168.3.6.var-lib-heketi-mounts-vg_ca57f326195c243be2380ce4e42a4191-brick_952d75fd193c7209c9a81acbc23a3747-brick -p /var/run/gluster/vols/vol_3442e86b6d994a14de73f1b8c82cf0b8/192.168.3.6-var-lib-heketi-mounts-vg_ca57f326195c243be2380ce4e42a4191-brick_952d75fd193c7209c9a81acbc23a3747-brick.pid -S /var/run/gluster/5cb15e50398f0885.socket --brick-name /var/lib/heketi/mounts/vg_ca57f326195c243be2380ce4e42a4191/brick_952d75fd193c7209c9a81acbc23a3747/brick -l /var/log/glusterfs/bricks/var-lib-heketi-mounts-vg_ca57f326195c243be2380ce4e42a4191-brick_952d75fd193c7209c9a81acbc23a3747-brick.log --xlator-option *-posix.glusterd-uuid=fb376410-85de-4b35-a6ef-39eee8abd460 --process-name brick --brick-port 49157 --xlator-option vol_3442e86b6d994a14de73f1b8c82cf0b8-server.listen-port=49157 259 ? Ssl 0:44 /usr/sbin/glusterfsd -s 192.168.3.6 --volfile-id vol_3bc80e4effa4fc16b189e156d9f5c75e.192.168.3.6.var-lib-heketi-mounts-vg_28708570b029e5eff0a996c453a11691-brick_46f96c044e5eaf3e2d414bd9be2197ff-brick -p /var/run/gluster/vols/vol_3bc80e4effa4fc16b189e156d9f5c75e/192.168.3.6-var-lib-heketi-mounts-vg_28708570b029e5eff0a996c453a11691-brick_46f96c044e5eaf3e2d414bd9be2197ff-brick.pid -S /var/run/gluster/54e6999c2d8df1a3.socket --brick-name /var/lib/heketi/mounts/vg_28708570b029e5eff0a996c453a11691/brick_46f96c044e5eaf3e2d414bd9be2197ff/brick -l /var/log/glusterfs/bricks/var-lib-heketi-mounts-vg_28708570b029e5eff0a996c453a11691-brick_46f96c044e5eaf3e2d414bd9be2197ff-brick.log --xlator-option *-posix.glusterd-uuid=fb376410-85de-4b35-a6ef-39eee8abd460 --process-name brick --brick-port 49158 --xlator-option vol_3bc80e4effa4fc16b189e156d9f5c75e-server.listen-port=49158 268 ? Ssl 0:00 /usr/sbin/glusterfsd -s 192.168.3.6 --volfile-id vol_59581a21d2113b0c0d4b2fa729a6a932.192.168.3.6.var-lib-heketi-mounts-vg_ca57f326195c243be2380ce4e42a4191-brick_93ffd88e3e362ee02eaf209ad55a94df-brick -p /var/run/gluster/vols/vol_59581a21d2113b0c0d4b2fa729a6a932/192.168.3.6-var-lib-heketi-mounts-vg_ca57f326195c243be2380ce4e42a4191-brick_93ffd88e3e362ee02eaf209ad55a94df-brick.pid -S /var/run/gluster/0a3ee02b20b637ae.socket --brick-name /var/lib/heketi/mounts/vg_ca57f326195c243be2380ce4e42a4191/brick_93ffd88e3e362ee02eaf209ad55a94df/brick -l /var/log/glusterfs/bricks/var-lib-heketi-mounts-vg_ca57f326195c243be2380ce4e42a4191-brick_93ffd88e3e362ee02eaf209ad55a94df-brick.log --xlator-option *-posix.glusterd-uuid=fb376410-85de-4b35-a6ef-39eee8abd460 --process-name brick --brick-port 49159 --xlator-option vol_59581a21d2113b0c0d4b2fa729a6a932-server.listen-port=49159 277 ? Ssl 0:00 /usr/sbin/glusterfsd -s 192.168.3.6 --volfile-id vol_63854b105c40802bdec77290e91858ea.192.168.3.6.var-lib-heketi-mounts-vg_526f35058433c6b03130bba4e0a7dd87-brick_909763003002ae54adf8b9c9c368e665-brick -p /var/run/gluster/vols/vol_63854b105c40802bdec77290e91858ea/192.168.3.6-var-lib-heketi-mounts-vg_526f35058433c6b03130bba4e0a7dd87-brick_909763003002ae54adf8b9c9c368e665-brick.pid -S /var/run/gluster/2e2cf75807bc4a1c.socket --brick-name /var/lib/heketi/mounts/vg_526f35058433c6b03130bba4e0a7dd87/brick_909763003002ae54adf8b9c9c368e665/brick -l /var/log/glusterfs/bricks/var-lib-heketi-mounts-vg_526f35058433c6b03130bba4e0a7dd87-brick_909763003002ae54adf8b9c9c368e665-brick.log --xlator-option *-posix.glusterd-uuid=fb376410-85de-4b35-a6ef-39eee8abd460 --process-name brick --brick-port 49160 --xlator-option vol_63854b105c40802bdec77290e91858ea-server.listen-port=49160 286 ? Ssl 1:05 /usr/sbin/glusterfsd -s 192.168.3.6 --volfile-id vol_6954c7e3a47641f1bd4fbb66d2e8aebe.192.168.3.6.var-lib-heketi-mounts-vg_ca57f326195c243be2380ce4e42a4191-brick_868c984beb58b42ec855a5d29e88be98-brick -p /var/run/gluster/vols/vol_6954c7e3a47641f1bd4fbb66d2e8aebe/192.168.3.6-var-lib-heketi-mounts-vg_ca57f326195c243be2380ce4e42a4191-brick_868c984beb58b42ec855a5d29e88be98-brick.pid -S /var/run/gluster/83fbe45db9af47ce.socket --brick-name /var/lib/heketi/mounts/vg_ca57f326195c243be2380ce4e42a4191/brick_868c984beb58b42ec855a5d29e88be98/brick -l /var/log/glusterfs/bricks/var-lib-heketi-mounts-vg_ca57f326195c243be2380ce4e42a4191-brick_868c984beb58b42ec855a5d29e88be98-brick.log --xlator-option *-posix.glusterd-uuid=fb376410-85de-4b35-a6ef-39eee8abd460 --process-name brick --brick-port 49161 --xlator-option vol_6954c7e3a47641f1bd4fbb66d2e8aebe-server.listen-port=49161 295 ? Ssl 0:01 /usr/sbin/glusterfsd -s 192.168.3.6 --volfile-id vol_6d36831a039b04088d29ecb477828ba7.192.168.3.6.var-lib-heketi-mounts-vg_5ecc3820ac1e5ea87c3721b8bd2d6813-brick_f255d45da7b18f3a428215a423e18f61-brick -p /var/run/gluster/vols/vol_6d36831a039b04088d29ecb477828ba7/192.168.3.6-var-lib-heketi-mounts-vg_5ecc3820ac1e5ea87c3721b8bd2d6813-brick_f255d45da7b18f3a428215a423e18f61-brick.pid -S /var/run/gluster/25a93d6dd87500d1.socket --brick-name /var/lib/heketi/mounts/vg_5ecc3820ac1e5ea87c3721b8bd2d6813/brick_f255d45da7b18f3a428215a423e18f61/brick -l /var/log/glusterfs/bricks/var-lib-heketi-mounts-vg_5ecc3820ac1e5ea87c3721b8bd2d6813-brick_f255d45da7b18f3a428215a423e18f61-brick.log --xlator-option *-posix.glusterd-uuid=fb376410-85de-4b35-a6ef-39eee8abd460 --process-name brick --brick-port 49162 --xlator-option vol_6d36831a039b04088d29ecb477828ba7-server.listen-port=49162 304 ? Ssl 0:00 /usr/sbin/glusterfsd -s 192.168.3.6 --volfile-id vol_6d45975bc09622845d3674a36581cb20.192.168.3.6.var-lib-heketi-mounts-vg_28708570b029e5eff0a996c453a11691-brick_2b988fdacdc748a46974fa50fc7f9e50-brick -p /var/run/gluster/vols/vol_6d45975bc09622845d3674a36581cb20/192.168.3.6-var-lib-heketi-mounts-vg_28708570b029e5eff0a996c453a11691-brick_2b988fdacdc748a46974fa50fc7f9e50-brick.pid -S /var/run/gluster/dac1fd63c2ba531e.socket --brick-name /var/lib/heketi/mounts/vg_28708570b029e5eff0a996c453a11691/brick_2b988fdacdc748a46974fa50fc7f9e50/brick -l /var/log/glusterfs/bricks/var-lib-heketi-mounts-vg_28708570b029e5eff0a996c453a11691-brick_2b988fdacdc748a46974fa50fc7f9e50-brick.log --xlator-option *-posix.glusterd-uuid=fb376410-85de-4b35-a6ef-39eee8abd460 --process-name brick --brick-port 49163 --xlator-option vol_6d45975bc09622845d3674a36581cb20-server.listen-port=49163 313 ? Ssl 29:01 /usr/sbin/glusterfsd -s 192.168.3.6 --volfile-id vol_87d44d5b3e6923bd42a620cbc838b253.192.168.3.6.var-lib-heketi-mounts-vg_56391bec3c8bfe4fc116de7bddfc2af4-brick_0a254839cbdeb13bbacd77acc40ebce9-brick -p /var/run/gluster/vols/vol_87d44d5b3e6923bd42a620cbc838b253/192.168.3.6-var-lib-heketi-mounts-vg_56391bec3c8bfe4fc116de7bddfc2af4-brick_0a254839cbdeb13bbacd77acc40ebce9-brick.pid -S /var/run/gluster/d6dac5126a3f4c28.socket --brick-name /var/lib/heketi/mounts/vg_56391bec3c8bfe4fc116de7bddfc2af4/brick_0a254839cbdeb13bbacd77acc40ebce9/brick -l /var/log/glusterfs/bricks/var-lib-heketi-mounts-vg_56391bec3c8bfe4fc116de7bddfc2af4-brick_0a254839cbdeb13bbacd77acc40ebce9-brick.log --xlator-option *-posix.glusterd-uuid=fb376410-85de-4b35-a6ef-39eee8abd460 --process-name brick --brick-port 49164 --xlator-option vol_87d44d5b3e6923bd42a620cbc838b253-server.listen-port=49164 322 ? Ssl 0:01 /usr/sbin/glusterfsd -s 192.168.3.6 --volfile-id vol_93cf4b391b72534c5732df0c82206f50.192.168.3.6.var-lib-heketi-mounts-vg_ca57f326195c243be2380ce4e42a4191-brick_c1edf8bec1b388c54f0484319e46ee7f-brick -p /var/run/gluster/vols/vol_93cf4b391b72534c5732df0c82206f50/192.168.3.6-var-lib-heketi-mounts-vg_ca57f326195c243be2380ce4e42a4191-brick_c1edf8bec1b388c54f0484319e46ee7f-brick.pid -S /var/run/gluster/b7bb0463a2d142eb.socket --brick-name /var/lib/heketi/mounts/vg_ca57f326195c243be2380ce4e42a4191/brick_c1edf8bec1b388c54f0484319e46ee7f/brick -l /var/log/glusterfs/bricks/var-lib-heketi-mounts-vg_ca57f326195c243be2380ce4e42a4191-brick_c1edf8bec1b388c54f0484319e46ee7f-brick.log --xlator-option *-posix.glusterd-uuid=fb376410-85de-4b35-a6ef-39eee8abd460 --process-name brick --brick-port 49165 --xlator-option vol_93cf4b391b72534c5732df0c82206f50-server.listen-port=49165 331 ? Ssl 0:00 /usr/sbin/glusterfsd -s 192.168.3.6 --volfile-id vol_ba5d6d6b945eb60ccddbe44457792a8c.192.168.3.6.var-lib-heketi-mounts-vg_ca57f326195c243be2380ce4e42a4191-brick_f940f52d3cba979166e64e8ac72d224c-brick -p /var/run/gluster/vols/vol_ba5d6d6b945eb60ccddbe44457792a8c/192.168.3.6-var-lib-heketi-mounts-vg_ca57f326195c243be2380ce4e42a4191-brick_f940f52d3cba979166e64e8ac72d224c-brick.pid -S /var/run/gluster/36549be7cfbc16a1.socket --brick-name /var/lib/heketi/mounts/vg_ca57f326195c243be2380ce4e42a4191/brick_f940f52d3cba979166e64e8ac72d224c/brick -l /var/log/glusterfs/bricks/var-lib-heketi-mounts-vg_ca57f326195c243be2380ce4e42a4191-brick_f940f52d3cba979166e64e8ac72d224c-brick.log --xlator-option *-posix.glusterd-uuid=fb376410-85de-4b35-a6ef-39eee8abd460 --process-name brick --brick-port 49166 --xlator-option vol_ba5d6d6b945eb60ccddbe44457792a8c-server.listen-port=49166 339 ? Ssl 0:10 /usr/sbin/glusterfsd -s 192.168.3.6 --volfile-id vol_ca1258696cc5eb793a2003d3c3b9e98e.192.168.3.6.var-lib-heketi-mounts-vg_56391bec3c8bfe4fc116de7bddfc2af4-brick_595043e897901aa6057b1227d430b20d-brick -p /var/run/gluster/vols/vol_ca1258696cc5eb793a2003d3c3b9e98e/192.168.3.6-var-lib-heketi-mounts-vg_56391bec3c8bfe4fc116de7bddfc2af4-brick_595043e897901aa6057b1227d430b20d-brick.pid -S /var/run/gluster/cc935c6c14cdb23d.socket --brick-name /var/lib/heketi/mounts/vg_56391bec3c8bfe4fc116de7bddfc2af4/brick_595043e897901aa6057b1227d430b20d/brick -l /var/log/glusterfs/bricks/var-lib-heketi-mounts-vg_56391bec3c8bfe4fc116de7bddfc2af4-brick_595043e897901aa6057b1227d430b20d-brick.log --xlator-option *-posix.glusterd-uuid=fb376410-85de-4b35-a6ef-39eee8abd460 --process-name brick --brick-port 49167 --xlator-option vol_ca1258696cc5eb793a2003d3c3b9e98e-server.listen-port=49167 349 ? Ssl 1:35 /usr/sbin/glusterfsd -s 192.168.3.6 --volfile-id vol_d936fff4500a9cb9491963d0624214fb.192.168.3.6.var-lib-heketi-mounts-vg_5ecc3820ac1e5ea87c3721b8bd2d6813-brick_46ede70d760848493be42438bd8ccd88-brick -p /var/run/gluster/vols/vol_d936fff4500a9cb9491963d0624214fb/192.168.3.6-var-lib-heketi-mounts-vg_5ecc3820ac1e5ea87c3721b8bd2d6813-brick_46ede70d760848493be42438bd8ccd88-brick.pid -S /var/run/gluster/9b1c6a225bfc391b.socket --brick-name /var/lib/heketi/mounts/vg_5ecc3820ac1e5ea87c3721b8bd2d6813/brick_46ede70d760848493be42438bd8ccd88/brick -l /var/log/glusterfs/bricks/var-lib-heketi-mounts-vg_5ecc3820ac1e5ea87c3721b8bd2d6813-brick_46ede70d760848493be42438bd8ccd88-brick.log --xlator-option *-posix.glusterd-uuid=fb376410-85de-4b35-a6ef-39eee8abd460 --process-name brick --brick-port 49168 --xlator-option vol_d936fff4500a9cb9491963d0624214fb-server.listen-port=49168 358 ? Ssl 0:00 /usr/sbin/glusterfsd -s 192.168.3.6 --volfile-id vol_e1aa1283d5917485d88c4a742eeff422.192.168.3.6.var-lib-heketi-mounts-vg_526f35058433c6b03130bba4e0a7dd87-brick_9e7c382e5f853d471c347bc5590359af-brick -p /var/run/gluster/vols/vol_e1aa1283d5917485d88c4a742eeff422/192.168.3.6-var-lib-heketi-mounts-vg_526f35058433c6b03130bba4e0a7dd87-brick_9e7c382e5f853d471c347bc5590359af-brick.pid -S /var/run/gluster/d79eceff3f7f246c.socket --brick-name /var/lib/heketi/mounts/vg_526f35058433c6b03130bba4e0a7dd87/brick_9e7c382e5f853d471c347bc5590359af/brick -l /var/log/glusterfs/bricks/var-lib-heketi-mounts-vg_526f35058433c6b03130bba4e0a7dd87-brick_9e7c382e5f853d471c347bc5590359af-brick.log --xlator-option *-posix.glusterd-uuid=fb376410-85de-4b35-a6ef-39eee8abd460 --process-name brick --brick-port 49169 --xlator-option vol_e1aa1283d5917485d88c4a742eeff422-server.listen-port=49169 367 ? Ssl 33:33 /usr/sbin/glusterfsd -s 192.168.3.6 --volfile-id vol_f0ed498d7e781d7bb896244175b31f9e.192.168.3.6.var-lib-heketi-mounts-vg_56391bec3c8bfe4fc116de7bddfc2af4-brick_47ed9e0663ad0f6f676ddd6ad7e3dcde-brick -p /var/run/gluster/vols/vol_f0ed498d7e781d7bb896244175b31f9e/192.168.3.6-var-lib-heketi-mounts-vg_56391bec3c8bfe4fc116de7bddfc2af4-brick_47ed9e0663ad0f6f676ddd6ad7e3dcde-brick.pid -S /var/run/gluster/fb9d342a14ed07e1.socket --brick-name /var/lib/heketi/mounts/vg_56391bec3c8bfe4fc116de7bddfc2af4/brick_47ed9e0663ad0f6f676ddd6ad7e3dcde/brick -l /var/log/glusterfs/bricks/var-lib-heketi-mounts-vg_56391bec3c8bfe4fc116de7bddfc2af4-brick_47ed9e0663ad0f6f676ddd6ad7e3dcde-brick.log --xlator-option *-posix.glusterd-uuid=fb376410-85de-4b35-a6ef-39eee8abd460 --process-name brick --brick-port 49170 --xlator-option vol_f0ed498d7e781d7bb896244175b31f9e-server.listen-port=49170 376 ? Ssl 0:01 /usr/sbin/glusterfsd -s 192.168.3.6 --volfile-id vol_f387519c9b004ec14e80696db88ef0f8.192.168.3.6.var-lib-heketi-mounts-vg_56391bec3c8bfe4fc116de7bddfc2af4-brick_06ad6c73dfbf6a5fc21334f98c9973c2-brick -p /var/run/gluster/vols/vol_f387519c9b004ec14e80696db88ef0f8/192.168.3.6-var-lib-heketi-mounts-vg_56391bec3c8bfe4fc116de7bddfc2af4-brick_06ad6c73dfbf6a5fc21334f98c9973c2-brick.pid -S /var/run/gluster/d052a7450e30a0a7.socket --brick-name /var/lib/heketi/mounts/vg_56391bec3c8bfe4fc116de7bddfc2af4/brick_06ad6c73dfbf6a5fc21334f98c9973c2/brick -l /var/log/glusterfs/bricks/var-lib-heketi-mounts-vg_56391bec3c8bfe4fc116de7bddfc2af4-brick_06ad6c73dfbf6a5fc21334f98c9973c2-brick.log --xlator-option *-posix.glusterd-uuid=fb376410-85de-4b35-a6ef-39eee8abd460 --process-name brick --brick-port 49171 --xlator-option vol_f387519c9b004ec14e80696db88ef0f8-server.listen-port=49171 385 ? Ssl 0:00 /usr/sbin/glusterfsd -s 192.168.3.6 --volfile-id vol_f8ca343c60e6efe541fe02d16ca02a7d.192.168.3.6.var-lib-heketi-mounts-vg_526f35058433c6b03130bba4e0a7dd87-brick_525225f65753b05dfe33aeaeb9c5de39-brick -p /var/run/gluster/vols/vol_f8ca343c60e6efe541fe02d16ca02a7d/192.168.3.6-var-lib-heketi-mounts-vg_526f35058433c6b03130bba4e0a7dd87-brick_525225f65753b05dfe33aeaeb9c5de39-brick.pid -S /var/run/gluster/aad704602eeea9e1.socket --brick-name /var/lib/heketi/mounts/vg_526f35058433c6b03130bba4e0a7dd87/brick_525225f65753b05dfe33aeaeb9c5de39/brick -l /var/log/glusterfs/bricks/var-lib-heketi-mounts-vg_526f35058433c6b03130bba4e0a7dd87-brick_525225f65753b05dfe33aeaeb9c5de39-brick.log --xlator-option *-posix.glusterd-uuid=fb376410-85de-4b35-a6ef-39eee8abd460 --process-name brick --brick-port 49172 --xlator-option vol_f8ca343c60e6efe541fe02d16ca02a7d-server.listen-port=49172 394 ? Ssl 0:00 /usr/sbin/glusterfsd -s 192.168.3.6 --volfile-id vol_fe882e074c0512fd9271fc2ff5a0bfe1.192.168.3.6.var-lib-heketi-mounts-vg_28708570b029e5eff0a996c453a11691-brick_d4f30d6e465a8544b759a7016fb5aab5-brick -p /var/run/gluster/vols/vol_fe882e074c0512fd9271fc2ff5a0bfe1/192.168.3.6-var-lib-heketi-mounts-vg_28708570b029e5eff0a996c453a11691-brick_d4f30d6e465a8544b759a7016fb5aab5-brick.pid -S /var/run/gluster/a5a1d06f226d1359.socket --brick-name /var/lib/heketi/mounts/vg_28708570b029e5eff0a996c453a11691/brick_d4f30d6e465a8544b759a7016fb5aab5/brick -l /var/log/glusterfs/bricks/var-lib-heketi-mounts-vg_28708570b029e5eff0a996c453a11691-brick_d4f30d6e465a8544b759a7016fb5aab5-brick.log --xlator-option *-posix.glusterd-uuid=fb376410-85de-4b35-a6ef-39eee8abd460 --process-name brick --brick-port 49173 --xlator-option vol_fe882e074c0512fd9271fc2ff5a0bfe1-server.listen-port=49173 10329 ? Ssl 0:00 /usr/sbin/glusterfsd -s 192.168.3.6 --volfile-id vol_bfc74a6caa795f5f1d67b2223de42517.192.168.3.6.var-lib-heketi-mounts-vg_12f02e7d735ee9a66b6f746af8eb71e6-brick_299a2bfb4702dfd575ca66c58fece9f9-brick -p /var/run/gluster/vols/vol_bfc74a6caa795f5f1d67b2223de42517/192.168.3.6-var-lib-heketi-mounts-vg_12f02e7d735ee9a66b6f746af8eb71e6-brick_299a2bfb4702dfd575ca66c58fece9f9-brick.pid -S /var/run/gluster/780d2c8462642d3e.socket --brick-name /var/lib/heketi/mounts/vg_12f02e7d735ee9a66b6f746af8eb71e6/brick_299a2bfb4702dfd575ca66c58fece9f9/brick -l /var/log/glusterfs/bricks/var-lib-heketi-mounts-vg_12f02e7d735ee9a66b6f746af8eb71e6-brick_299a2bfb4702dfd575ca66c58fece9f9-brick.log --xlator-option *-posix.glusterd-uuid=fb376410-85de-4b35-a6ef-39eee8abd460 --process-name brick --brick-port 49174 --xlator-option vol_bfc74a6caa795f5f1d67b2223de42517-server.listen-port=49174 14054 ? Ssl 0:00 /usr/sbin/glusterfsd -s 192.168.3.6 --volfile-id vol_8285c4fc4d6e4064ac1527d035eee2f8.192.168.3.6.var-lib-heketi-mounts-vg_526f35058433c6b03130bba4e0a7dd87-brick_c04b2f3fc2aed6c127e0095bb79a9c2b-brick -p /var/run/gluster/vols/vol_8285c4fc4d6e4064ac1527d035eee2f8/192.168.3.6-var-lib-heketi-mounts-vg_526f35058433c6b03130bba4e0a7dd87-brick_c04b2f3fc2aed6c127e0095bb79a9c2b-brick.pid -S /var/run/gluster/040c4b26ead00a41.socket --brick-name /var/lib/heketi/mounts/vg_526f35058433c6b03130bba4e0a7dd87/brick_c04b2f3fc2aed6c127e0095bb79a9c2b/brick -l /var/log/glusterfs/bricks/var-lib-heketi-mounts-vg_526f35058433c6b03130bba4e0a7dd87-brick_c04b2f3fc2aed6c127e0095bb79a9c2b-brick.log --xlator-option *-posix.glusterd-uuid=fb376410-85de-4b35-a6ef-39eee8abd460 --process-name brick --brick-port 49176 --xlator-option vol_8285c4fc4d6e4064ac1527d035eee2f8-server.listen-port=49176 15825 pts/4 S+ 0:00 grep glusterfsd sh-4.2# =~=~=~=~=~=~=~=~=~=~=~= PuTTY log 2019.01.22 17:11:52 =~=~=~=~=~=~=~=~=~=~=~= sh-4.2# exit exit [root@master ~]# oc get po NAME READY STATUS RESTARTS AGE glusterblock-storage-provisioner-dc-1-6nwq1 1/1 Running 2 32d glusterfs-storage-q4ptb 1/1 Running 0 2h glusterfs-storage-ssvjp 1/1 Running 6 34d glusterfs-storage-vll7x 1/1 Running 1 34d heketi-storage-1-zm77z 1/1 Running 38 32d [root@master ~]# oc rsh glusterfs-storage-q4ptb sh-4.2# sh-4.2# sh-4.2# sh-4.2# sh-4.2# sh-4.2# sh-4.2# sh-4.2# sh-4.2# sh-4.2# sh-4.2# sh-4.2# sh-4.2# sh-4.2# sh-4.2# sh-4.2# sh-4.2# sh-4.2# sh-4.2# sh-4.2# sh-4.2# sh-4.2# sh-4.2# sh-4.2# sh-4.2# sh-4.2# sh-4.2# sh-4.2# sh-4.2# sh-4.2# sh-4.2# sh-4.2# sh-4.2# sh-4.2# sh-4.2# sh-4.2# sh-4.2# sh-4.2# sh-4.2# sh-4.2# sh-4.2# sh-4.2# sh-4.2# cat /var/log/glusterfs/glustershd.log d.log [2019-01-20 09:57:13.572196] I [MSGID: 106131] [glusterd-proc-mgmt.c:83:glusterd_proc_stop] 0-management: nfs already stopped [2019-01-20 09:57:13.572271] I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: nfs service is stopped [2019-01-20 09:57:13.572304] I [MSGID: 106599] [glusterd-nfs-svc.c:82:glusterd_nfssvc_manager] 0-management: nfs/server.so xlator is not installed [2019-01-20 09:57:13.582323] I [MSGID: 106568] [glusterd-proc-mgmt.c:87:glusterd_proc_stop] 0-management: Stopping glustershd daemon running in pid: 31360 [2019-01-20 09:57:14.583339] I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: glustershd service is stopped [2019-01-20 09:57:14.583424] I [MSGID: 106567] [glusterd-svc-mgmt.c:203:glusterd_svc_start] 0-management: Starting glustershd service [2019-01-20 09:57:15.592917] I [MSGID: 106131] [glusterd-proc-mgmt.c:83:glusterd_proc_stop] 0-management: bitd already stopped [2019-01-20 09:57:15.592977] I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: bitd service is stopped [2019-01-20 09:57:15.593729] I [MSGID: 106131] [glusterd-proc-mgmt.c:83:glusterd_proc_stop] 0-management: scrub already stopped [2019-01-20 09:57:15.593757] I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: scrub service is stopped [2019-01-20 09:57:15.610641] I [run.c:241:runner_log] (-->/usr/lib64/glusterfs/4.1.5/xlator/mgmt/glusterd.so(+0xe2b3a) [0x7f527124ab3a] -->/usr/lib64/glusterfs/4.1.5/xlator/mgmt/glusterd.so(+0xe2605) [0x7f527124a605] -->/lib64/libglusterfs.so.0(runner_log+0x115) [0x7f52765800e5] ) 0-management: Ran script: /var/lib/glusterd/hooks/1/start/post/S29CTDBsetup.sh --volname=vol_3442e86b6d994a14de73f1b8c82cf0b8 --first=no --version=1 --volume-op=start --gd-workdir=/var/lib/glusterd [2019-01-20 09:57:15.622596] E [run.c:241:runner_log] (-->/usr/lib64/glusterfs/4.1.5/xlator/mgmt/glusterd.so(+0xe2b3a) [0x7f527124ab3a] -->/usr/lib64/glusterfs/4.1.5/xlator/mgmt/glusterd.so(+0xe2563) [0x7f527124a563] -->/lib64/libglusterfs.so.0(runner_log+0x115) [0x7f52765800e5] ) 0-management: Failed to execute script: /var/lib/glusterd/hooks/1/start/post/S30samba-start.sh --volname=vol_3442e86b6d994a14de73f1b8c82cf0b8 --first=no --version=1 --volume-op=start --gd-workdir=/var/lib/glusterd [2019-01-20 09:57:23.397991] I [MSGID: 106599] [glusterd-nfs-svc.c:82:glusterd_nfssvc_manager] 0-management: nfs/server.so xlator is not installed [2019-01-20 09:57:23.408064] I [MSGID: 106568] [glusterd-proc-mgmt.c:87:glusterd_proc_stop] 0-management: Stopping glustershd daemon running in pid: 97120 [2019-01-20 09:57:24.408417] I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: glustershd service is stopped [2019-01-20 09:57:24.408498] I [MSGID: 106567] [glusterd-svc-mgmt.c:203:glusterd_svc_start] 0-management: Starting glustershd service [2019-01-20 09:57:25.417268] I [MSGID: 106131] [glusterd-proc-mgmt.c:83:glusterd_proc_stop] 0-management: bitd already stopped [2019-01-20 09:57:25.417317] I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: bitd service is stopped [2019-01-20 09:57:25.417911] I [MSGID: 106131] [glusterd-proc-mgmt.c:83:glusterd_proc_stop] 0-management: scrub already stopped [2019-01-20 09:57:25.462990] I [run.c:241:runner_log] (-->/usr/lib64/glusterfs/4.1.5/xlator/mgmt/glusterd.so(+0xe2b3a) [0x7f527124ab3a] -->/usr/lib64/glusterfs/4.1.5/xlator/mgmt/glusterd.so(+0xe2605) [0x7f527124a605] -->/lib64/libglusterfs.so.0(runner_log+0x115) [0x7f52765800e5] ) 0-management: Ran script: /var/lib/glusterd/hooks/1/start/post/S29CTDBsetup.sh --volname=vol_3442e86b6d994a14de73f1b8c82cf0b8 --first=no --version=1 --volume-op=start --gd-workdir=/var/lib/glusterd [2019-01-20 09:57:25.474578] E [run.c:241:runner_log] (-->/usr/lib64/glusterfs/4.1.5/xlator/mgmt/glusterd.so(+0xe2b3a) [0x7f527124ab3a] -->/usr/lib64/glusterfs/4.1.5/xlator/mgmt/glusterd.so(+0xe2563) [0x7f527124a563] -->/lib64/libglusterfs.so.0(runner_log+0x115) [0x7f52765800e5] ) 0-management: Failed to execute script: /var/lib/glusterd/hooks/1/start/post/S30samba-start.sh --volname=vol_3442e86b6d994a14de73f1b8c82cf0b8 --first=no --version=1 --volume-op=start --gd-workdir=/var/lib/glusterd The message "I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: nfs service is stopped" repeated 2 times between [2019-01-20 09:57:13.572271] and [2019-01-20 09:57:33.585240] [2019-01-20 09:57:33.585275] I [MSGID: 106599] [glusterd-nfs-svc.c:82:glusterd_nfssvc_manager] 0-management: nfs/server.so xlator is not installed [2019-01-20 09:57:33.595422] I [MSGID: 106568] [glusterd-proc-mgmt.c:87:glusterd_proc_stop] 0-management: Stopping glustershd daemon running in pid: 97205 [2019-01-20 09:57:34.595889] I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: glustershd service is stopped [2019-01-20 09:57:34.595967] I [MSGID: 106567] [glusterd-svc-mgmt.c:203:glusterd_svc_start] 0-management: Starting glustershd service [2019-01-20 09:57:35.605129] I [MSGID: 106131] [glusterd-proc-mgmt.c:83:glusterd_proc_stop] 0-management: bitd already stopped [2019-01-20 09:57:35.605178] I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: bitd service is stopped [2019-01-20 09:57:35.652723] I [run.c:241:runner_log] (-->/usr/lib64/glusterfs/4.1.5/xlator/mgmt/glusterd.so(+0xe2b3a) [0x7f527124ab3a] -->/usr/lib64/glusterfs/4.1.5/xlator/mgmt/glusterd.so(+0xe2605) [0x7f527124a605] -->/lib64/libglusterfs.so.0(runner_log+0x115) [0x7f52765800e5] ) 0-management: Ran script: /var/lib/glusterd/hooks/1/start/post/S29CTDBsetup.sh --volname=vol_3442e86b6d994a14de73f1b8c82cf0b8 --first=no --version=1 --volume-op=start --gd-workdir=/var/lib/glusterd [2019-01-20 09:57:35.665204] E [run.c:241:runner_log] (-->/usr/lib64/glusterfs/4.1.5/xlator/mgmt/glusterd.so(+0xe2b3a) [0x7f527124ab3a] -->/usr/lib64/glusterfs/4.1.5/xlator/mgmt/glusterd.so(+0xe2563) [0x7f527124a563] -->/lib64/libglusterfs.so.0(runner_log+0x115) [0x7f52765800e5] ) 0-management: Failed to execute script: /var/lib/glusterd/hooks/1/start/post/S30samba-start.sh --volname=vol_3442e86b6d994a14de73f1b8c82cf0b8 --first=no --version=1 --volume-op=start --gd-workdir=/var/lib/glusterd The message "I [MSGID: 106131] [glusterd-proc-mgmt.c:83:glusterd_proc_stop] 0-management: nfs already stopped" repeated 2 times between [2019-01-20 09:57:13.572196] and [2019-01-20 09:57:33.585225] [2019-01-20 09:57:35.605769] I [MSGID: 106131] [glusterd-proc-mgmt.c:83:glusterd_proc_stop] 0-management: scrub already stopped The message "I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: scrub service is stopped" repeated 2 times between [2019-01-20 09:57:15.593757] and [2019-01-20 09:57:35.605783] [2019-01-20 09:57:43.761064] I [MSGID: 106131] [glusterd-proc-mgmt.c:83:glusterd_proc_stop] 0-management: nfs already stopped [2019-01-20 09:57:43.761097] I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: nfs service is stopped [2019-01-20 09:57:43.761129] I [MSGID: 106599] [glusterd-nfs-svc.c:82:glusterd_nfssvc_manager] 0-management: nfs/server.so xlator is not installed [2019-01-20 09:57:43.771180] I [MSGID: 106568] [glusterd-proc-mgmt.c:87:glusterd_proc_stop] 0-management: Stopping glustershd daemon running in pid: 97280 [2019-01-20 09:57:44.771510] I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: glustershd service is stopped [2019-01-20 09:57:44.771617] I [MSGID: 106567] [glusterd-svc-mgmt.c:203:glusterd_svc_start] 0-management: Starting glustershd service [2019-01-20 09:57:45.780413] I [MSGID: 106131] [glusterd-proc-mgmt.c:83:glusterd_proc_stop] 0-management: bitd already stopped [2019-01-20 09:57:45.780457] I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: bitd service is stopped [2019-01-20 09:57:45.781047] I [MSGID: 106131] [glusterd-proc-mgmt.c:83:glusterd_proc_stop] 0-management: scrub already stopped [2019-01-20 09:57:45.781068] I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: scrub service is stopped [2019-01-20 09:57:45.789734] I [run.c:241:runner_log] (-->/usr/lib64/glusterfs/4.1.5/xlator/mgmt/glusterd.so(+0xe2b3a) [0x7f527124ab3a] -->/usr/lib64/glusterfs/4.1.5/xlator/mgmt/glusterd.so(+0xe2605) [0x7f527124a605] -->/lib64/libglusterfs.so.0(runner_log+0x115) [0x7f52765800e5] ) 0-management: Ran script: /var/lib/glusterd/hooks/1/start/post/S29CTDBsetup.sh --volname=vol_3442e86b6d994a14de73f1b8c82cf0b8 --first=no --version=1 --volume-op=start --gd-workdir=/var/lib/glusterd [2019-01-20 09:57:45.802683] E [run.c:241:runner_log] (-->/usr/lib64/glusterfs/4.1.5/xlator/mgmt/glusterd.so(+0xe2b3a) [0x7f527124ab3a] -->/usr/lib64/glusterfs/4.1.5/xlator/mgmt/glusterd.so(+0xe2563) [0x7f527124a563] -->/lib64/libglusterfs.so.0(runner_log+0x115) [0x7f52765800e5] ) 0-management: Failed to execute script: /var/lib/glusterd/hooks/1/start/post/S30samba-start.sh --volname=vol_3442e86b6d994a14de73f1b8c82cf0b8 --first=no --version=1 --volume-op=start --gd-workdir=/var/lib/glusterd [2019-01-20 10:02:43.309697] W [rpc-clnt.c:1753:rpc_clnt_submit] 0-glustershd: error returned while attempting to connect to host:(null), port:0 [2019-01-20 10:03:33.988281] W [rpc-clnt.c:1753:rpc_clnt_submit] 0-glustershd: error returned while attempting to connect to host:(null), port:0 [2019-01-20 10:20:05.354397] I [MSGID: 106131] [glusterd-proc-mgmt.c:83:glusterd_proc_stop] 0-management: nfs already stopped [2019-01-20 10:20:05.354440] I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: nfs service is stopped [2019-01-20 10:20:05.354467] I [MSGID: 106599] [glusterd-nfs-svc.c:82:glusterd_nfssvc_manager] 0-management: nfs/server.so xlator is not installed [2019-01-20 10:20:05.364222] I [MSGID: 106568] [glusterd-proc-mgmt.c:87:glusterd_proc_stop] 0-management: Stopping glustershd daemon running in pid: 97372 [2019-01-20 10:20:06.364528] I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: glustershd service is stopped [2019-01-20 10:20:06.364605] I [MSGID: 106567] [glusterd-svc-mgmt.c:203:glusterd_svc_start] 0-management: Starting glustershd service [2019-01-20 10:20:07.373924] I [MSGID: 106131] [glusterd-proc-mgmt.c:83:glusterd_proc_stop] 0-management: bitd already stopped [2019-01-20 10:20:07.373984] I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: bitd service is stopped [2019-01-20 10:20:07.374658] I [MSGID: 106131] [glusterd-proc-mgmt.c:83:glusterd_proc_stop] 0-management: scrub already stopped [2019-01-20 10:20:07.374684] I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: scrub service is stopped [2019-01-20 10:20:07.383919] I [run.c:241:runner_log] (-->/usr/lib64/glusterfs/4.1.5/xlator/mgmt/glusterd.so(+0xe2b3a) [0x7f527124ab3a] -->/usr/lib64/glusterfs/4.1.5/xlator/mgmt/glusterd.so(+0xe2605) [0x7f527124a605] -->/lib64/libglusterfs.so.0(runner_log+0x115) [0x7f52765800e5] ) 0-management: Ran script: /var/lib/glusterd/hooks/1/start/post/S29CTDBsetup.sh --volname=vol_3442e86b6d994a14de73f1b8c82cf0b8 --first=no --version=1 --volume-op=start --gd-workdir=/var/lib/glusterd [2019-01-20 10:20:07.396892] E [run.c:241:runner_log] (-->/usr/lib64/glusterfs/4.1.5/xlator/mgmt/glusterd.so(+0xe2b3a) [0x7f527124ab3a] -->/usr/lib64/glusterfs/4.1.5/xlator/mgmt/glusterd.so(+0xe2563) [0x7f527124a563] -->/lib64/libglusterfs.so.0(runner_log+0x115) [0x7f52765800e5] ) 0-management: Failed to execute script: /var/lib/glusterd/hooks/1/start/post/S30samba-start.sh --volname=vol_3442e86b6d994a14de73f1b8c82cf0b8 --first=no --version=1 --volume-op=start --gd-workdir=/var/lib/glusterd [2019-01-20 13:28:14.497667] I [MSGID: 106499] [glusterd-handler.c:4314:__glusterd_handle_status_volume] 0-management: Received status volume req for volume vol_3bc80e4effa4fc16b189e156d9f5c75e [2019-01-20 13:28:31.140127] I [MSGID: 106533] [glusterd-volume-ops.c:938:__glusterd_handle_cli_heal_volume] 0-management: Received heal vol req for volume vol_3bc80e4effa4fc16b189e156d9f5c75e [2019-01-20 13:28:31.141936] W [rpc-clnt.c:1753:rpc_clnt_submit] 0-glustershd: error returned while attempting to connect to host:(null), port:0 [2019-01-20 13:29:15.971418] I [MSGID: 106131] [glusterd-proc-mgmt.c:83:glusterd_proc_stop] 0-management: nfs already stopped [2019-01-20 13:29:15.971453] I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: nfs service is stopped [2019-01-20 13:29:15.971475] I [MSGID: 106599] [glusterd-nfs-svc.c:82:glusterd_nfssvc_manager] 0-management: nfs/server.so xlator is not installed [2019-01-20 13:29:15.981631] I [MSGID: 106568] [glusterd-proc-mgmt.c:87:glusterd_proc_stop] 0-management: Stopping glustershd daemon running in pid: 99089 [2019-01-20 13:29:16.982140] I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: glustershd service is stopped [2019-01-20 13:29:16.982220] I [MSGID: 106567] [glusterd-svc-mgmt.c:203:glusterd_svc_start] 0-management: Starting glustershd service [2019-01-20 13:29:17.992214] I [MSGID: 106131] [glusterd-proc-mgmt.c:83:glusterd_proc_stop] 0-management: bitd already stopped [2019-01-20 13:29:17.992267] I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: bitd service is stopped [2019-01-20 13:29:17.992852] I [MSGID: 106131] [glusterd-proc-mgmt.c:83:glusterd_proc_stop] 0-management: scrub already stopped [2019-01-20 13:29:17.992871] I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: scrub service is stopped [2019-01-20 13:29:20.033686] I [run.c:241:runner_log] (-->/usr/lib64/glusterfs/4.1.5/xlator/mgmt/glusterd.so(+0xe2b3a) [0x7f527124ab3a] -->/usr/lib64/glusterfs/4.1.5/xlator/mgmt/glusterd.so(+0xe2605) [0x7f527124a605] -->/lib64/libglusterfs.so.0(runner_log+0x115) [0x7f52765800e5] ) 0-management: Ran script: /var/lib/glusterd/hooks/1/start/post/S29CTDBsetup.sh --volname=vol_3bc80e4effa4fc16b189e156d9f5c75e --first=no --version=1 --volume-op=start --gd-workdir=/var/lib/glusterd [2019-01-20 13:29:20.046624] E [run.c:241:runner_log] (-->/usr/lib64/glusterfs/4.1.5/xlator/mgmt/glusterd.so(+0xe2b3a) [0x7f527124ab3a] -->/usr/lib64/glusterfs/4.1.5/xlator/mgmt/glusterd.so(+0xe2563) [0x7f527124a563] -->/lib64/libglusterfs.so.0(runner_log+0x115) [0x7f52765800e5] ) 0-management: Failed to execute script: /var/lib/glusterd/hooks/1/start/post/S30samba-start.sh --volname=vol_3bc80e4effa4fc16b189e156d9f5c75e --first=no --version=1 --volume-op=start --gd-workdir=/var/lib/glusterd [2019-01-20 13:29:29.235725] I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: nfs service is stopped [2019-01-20 13:29:29.235748] I [MSGID: 106599] [glusterd-nfs-svc.c:82:glusterd_nfssvc_manager] 0-management: nfs/server.so xlator is not installed [2019-01-20 13:29:29.246662] I [MSGID: 106568] [glusterd-proc-mgmt.c:87:glusterd_proc_stop] 0-management: Stopping glustershd daemon running in pid: 112967 [2019-01-20 13:29:30.247116] I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: glustershd service is stopped [2019-01-20 13:29:30.247247] I [MSGID: 106567] [glusterd-svc-mgmt.c:203:glusterd_svc_start] 0-management: Starting glustershd service [2019-01-20 13:29:31.255725] I [MSGID: 106131] [glusterd-proc-mgmt.c:83:glusterd_proc_stop] 0-management: bitd already stopped [2019-01-20 13:29:31.255776] I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: bitd service is stopped [2019-01-20 13:29:31.256374] I [MSGID: 106131] [glusterd-proc-mgmt.c:83:glusterd_proc_stop] 0-management: scrub already stopped [2019-01-20 13:29:33.295804] I [run.c:241:runner_log] (-->/usr/lib64/glusterfs/4.1.5/xlator/mgmt/glusterd.so(+0xe2b3a) [0x7f527124ab3a] -->/usr/lib64/glusterfs/4.1.5/xlator/mgmt/glusterd.so(+0xe2605) [0x7f527124a605] -->/lib64/libglusterfs.so.0(runner_log+0x115) [0x7f52765800e5] ) 0-management: Ran script: /var/lib/glusterd/hooks/1/start/post/S29CTDBsetup.sh --volname=vol_3bc80e4effa4fc16b189e156d9f5c75e --first=no --version=1 --volume-op=start --gd-workdir=/var/lib/glusterd [2019-01-20 13:29:33.307768] E [run.c:241:runner_log] (-->/usr/lib64/glusterfs/4.1.5/xlator/mgmt/glusterd.so(+0xe2b3a) [0x7f527124ab3a] -->/usr/lib64/glusterfs/4.1.5/xlator/mgmt/glusterd.so(+0xe2563) [0x7f527124a563] -->/lib64/libglusterfs.so.0(runner_log+0x115) [0x7f52765800e5] ) 0-management: Failed to execute script: /var/lib/glusterd/hooks/1/start/post/S30samba-start.sh --volname=vol_3bc80e4effa4fc16b189e156d9f5c75e --first=no --version=1 --volume-op=start --gd-workdir=/var/lib/glusterd [2019-01-20 13:29:29.235711] I [MSGID: 106131] [glusterd-proc-mgmt.c:83:glusterd_proc_stop] 0-management: nfs already stopped [2019-01-20 13:29:31.256397] I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: scrub service is stopped The message "I [MSGID: 106499] [glusterd-handler.c:4314:__glusterd_handle_status_volume] 0-management: Received status volume req for volume vol_3bc80e4effa4fc16b189e156d9f5c75e" repeated 3 times between [2019-01-20 13:28:14.497667] and [2019-01-20 13:29:36.706157] [2019-01-20 13:29:42.608447] I [MSGID: 106131] [glusterd-proc-mgmt.c:83:glusterd_proc_stop] 0-management: nfs already stopped [2019-01-20 13:29:42.608475] I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: nfs service is stopped [2019-01-20 13:29:42.608499] I [MSGID: 106599] [glusterd-nfs-svc.c:82:glusterd_nfssvc_manager] 0-management: nfs/server.so xlator is not installed [2019-01-20 13:29:42.619647] I [MSGID: 106568] [glusterd-proc-mgmt.c:87:glusterd_proc_stop] 0-management: Stopping glustershd daemon running in pid: 113056 [2019-01-20 13:29:43.620141] I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: glustershd service is stopped [2019-01-20 13:29:43.620329] I [MSGID: 106567] [glusterd-svc-mgmt.c:203:glusterd_svc_start] 0-management: Starting glustershd service [2019-01-20 13:29:43.628924] I [MSGID: 106131] [glusterd-proc-mgmt.c:83:glusterd_proc_stop] 0-management: bitd already stopped [2019-01-20 13:29:43.628960] I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: bitd service is stopped [2019-01-20 13:29:43.629563] I [MSGID: 106131] [glusterd-proc-mgmt.c:83:glusterd_proc_stop] 0-management: scrub already stopped [2019-01-20 13:29:43.629581] I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: scrub service is stopped [2019-01-20 13:29:45.664879] I [run.c:241:runner_log] (-->/usr/lib64/glusterfs/4.1.5/xlator/mgmt/glusterd.so(+0xe2b3a) [0x7f527124ab3a] -->/usr/lib64/glusterfs/4.1.5/xlator/mgmt/glusterd.so(+0xe2605) [0x7f527124a605] -->/lib64/libglusterfs.so.0(runner_log+0x115) [0x7f52765800e5] ) 0-management: Ran script: /var/lib/glusterd/hooks/1/start/post/S29CTDBsetup.sh --volname=vol_3bc80e4effa4fc16b189e156d9f5c75e --first=no --version=1 --volume-op=start --gd-workdir=/var/lib/glusterd [2019-01-20 13:29:45.676616] E [run.c:241:runner_log] (-->/usr/lib64/glusterfs/4.1.5/xlator/mgmt/glusterd.so(+0xe2b3a) [0x7f527124ab3a] -->/usr/lib64/glusterfs/4.1.5/xlator/mgmt/glusterd.so(+0xe2563) [0x7f527124a563] -->/lib64/libglusterfs.so.0(runner_log+0x115) [0x7f52765800e5] ) 0-management: Failed to execute script: /var/lib/glusterd/hooks/1/start/post/S30samba-start.sh --volname=vol_3bc80e4effa4fc16b189e156d9f5c75e --first=no --version=1 --volume-op=start --gd-workdir=/var/lib/glusterd [2019-01-20 13:29:49.663473] I [MSGID: 106499] [glusterd-handler.c:4314:__glusterd_handle_status_volume] 0-management: Received status volume req for volume vol_3bc80e4effa4fc16b189e156d9f5c75e [2019-01-21 08:22:32.514146] I [MSGID: 106131] [glusterd-proc-mgmt.c:83:glusterd_proc_stop] 0-management: nfs already stopped [2019-01-21 08:22:32.514181] I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: nfs service is stopped [2019-01-21 08:22:32.514220] I [MSGID: 106599] [glusterd-nfs-svc.c:82:glusterd_nfssvc_manager] 0-management: nfs/server.so xlator is not installed [2019-01-21 08:22:32.525291] I [MSGID: 106568] [glusterd-proc-mgmt.c:87:glusterd_proc_stop] 0-management: Stopping glustershd daemon running in pid: 113173 [2019-01-21 08:22:33.525642] I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: glustershd service is stopped [2019-01-21 08:22:33.525735] I [MSGID: 106567] [glusterd-svc-mgmt.c:203:glusterd_svc_start] 0-management: Starting glustershd service [2019-01-21 08:22:34.534993] I [MSGID: 106131] [glusterd-proc-mgmt.c:83:glusterd_proc_stop] 0-management: bitd already stopped [2019-01-21 08:22:34.535062] I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: bitd service is stopped [2019-01-21 08:22:34.535674] I [MSGID: 106131] [glusterd-proc-mgmt.c:83:glusterd_proc_stop] 0-management: scrub already stopped [2019-01-21 08:22:34.535699] I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: scrub service is stopped [2019-01-21 08:22:34.588274] I [run.c:241:runner_log] (-->/usr/lib64/glusterfs/4.1.5/xlator/mgmt/glusterd.so(+0xe2b3a) [0x7f527124ab3a] -->/usr/lib64/glusterfs/4.1.5/xlator/mgmt/glusterd.so(+0xe2605) [0x7f527124a605] -->/lib64/libglusterfs.so.0(runner_log+0x115) [0x7f52765800e5] ) 0-management: Ran script: /var/lib/glusterd/hooks/1/start/post/S29CTDBsetup.sh --volname=vol_3442e86b6d994a14de73f1b8c82cf0b8 --first=no --version=1 --volume-op=start --gd-workdir=/var/lib/glusterd [2019-01-21 08:22:34.600978] E [run.c:241:runner_log] (-->/usr/lib64/glusterfs/4.1.5/xlator/mgmt/glusterd.so(+0xe2b3a) [0x7f527124ab3a] -->/usr/lib64/glusterfs/4.1.5/xlator/mgmt/glusterd.so(+0xe2563) [0x7f527124a563] -->/lib64/libglusterfs.so.0(runner_log+0x115) [0x7f52765800e5] ) 0-management: Failed to execute script: /var/lib/glusterd/hooks/1/start/post/S30samba-start.sh --volname=vol_3442e86b6d994a14de73f1b8c82cf0b8 --first=no --version=1 --volume-op=start --gd-workdir=/var/lib/glusterd [2019-01-21 13:15:25.023810] I [MSGID: 106488] [glusterd-handler.c:1549:__glusterd_handle_cli_get_volume] 0-management: Received get vol req The message "I [MSGID: 106488] [glusterd-handler.c:1549:__glusterd_handle_cli_get_volume] 0-management: Received get vol req" repeated 45 times between [2019-01-21 13:15:25.023810] and [2019-01-21 13:15:43.547653] [2019-01-21 13:21:48.846721] E [MSGID: 106061] [glusterd-utils.c:10170:glusterd_max_opversion_use_rsp_dict] 0-management: Maximum supported op-version not set in destination dictionary [2019-01-22 06:18:02.566653] I [MSGID: 106131] [glusterd-proc-mgmt.c:83:glusterd_proc_stop] 0-management: nfs already stopped [2019-01-22 06:18:02.566696] I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: nfs service is stopped [2019-01-22 06:18:02.566729] I [MSGID: 106599] [glusterd-nfs-svc.c:82:glusterd_nfssvc_manager] 0-management: nfs/server.so xlator is not installed [2019-01-22 06:18:02.576532] I [MSGID: 106568] [glusterd-proc-mgmt.c:87:glusterd_proc_stop] 0-management: Stopping glustershd daemon running in pid: 65196 [2019-01-22 06:18:03.576970] I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: glustershd service is stopped [2019-01-22 06:18:03.577059] I [MSGID: 106567] [glusterd-svc-mgmt.c:203:glusterd_svc_start] 0-management: Starting glustershd service [2019-01-22 06:18:03.586603] I [MSGID: 106131] [glusterd-proc-mgmt.c:83:glusterd_proc_stop] 0-management: bitd already stopped [2019-01-22 06:18:03.586656] I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: bitd service is stopped [2019-01-22 06:18:03.587258] I [MSGID: 106131] [glusterd-proc-mgmt.c:83:glusterd_proc_stop] 0-management: scrub already stopped [2019-01-22 06:18:03.587287] I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: scrub service is stopped [2019-01-22 06:18:03.602293] I [run.c:241:runner_log] (-->/usr/lib64/glusterfs/4.1.5/xlator/mgmt/glusterd.so(+0xe2b3a) [0x7f527124ab3a] -->/usr/lib64/glusterfs/4.1.5/xlator/mgmt/glusterd.so(+0xe2605) [0x7f527124a605] -->/lib64/libglusterfs.so.0(runner_log+0x115) [0x7f52765800e5] ) 0-management: Ran script: /var/lib/glusterd/hooks/1/start/post/S29CTDBsetup.sh --volname=vol_3442e86b6d994a14de73f1b8c82cf0b8 --first=no --version=1 --volume-op=start --gd-workdir=/var/lib/glusterd [2019-01-22 06:18:03.614527] E [run.c:241:runner_log] (-->/usr/lib64/glusterfs/4.1.5/xlator/mgmt/glusterd.so(+0xe2b3a) [0x7f527124ab3a] -->/usr/lib64/glusterfs/4.1.5/xlator/mgmt/glusterd.so(+0xe2563) [0x7f527124a563] -->/lib64/libglusterfs.so.0(runner_log+0x115) [0x7f52765800e5] ) 0-management: Failed to execute script: /var/lib/glusterd/hooks/1/start/post/S30samba-start.sh --volname=vol_3442e86b6d994a14de73f1b8c82cf0b8 --first=no --version=1 --volume-op=start --gd-workdir=/var/lib/glusterd [2019-01-22 06:23:07.882171] W [MSGID: 101095] [xlator.c:452:xlator_dynload] 0-xlator: /usr/lib64/glusterfs/4.1.5/xlator/nfs/server.so: cannot open shared object file: No such file or directory [2019-01-22 06:23:07.962889] W [MSGID: 101095] [xlator.c:181:xlator_volopt_dynload] 0-xlator: /usr/lib64/glusterfs/4.1.5/xlator/nfs/server.so: cannot open shared object file: No such file or directory [2019-01-22 06:23:07.992667] I [MSGID: 106599] [glusterd-nfs-svc.c:163:glusterd_nfssvc_reconfigure] 0-management: nfs/server.so xlator is not installed [2019-01-22 06:23:08.028942] I [MSGID: 106131] [glusterd-proc-mgmt.c:83:glusterd_proc_stop] 0-management: quotad already stopped [2019-01-22 06:23:08.028974] I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: quotad service is stopped [2019-01-22 06:23:08.030354] I [MSGID: 106131] [glusterd-proc-mgmt.c:83:glusterd_proc_stop] 0-management: bitd already stopped [2019-01-22 06:23:08.030373] I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: bitd service is stopped [2019-01-22 06:23:08.031773] I [MSGID: 106131] [glusterd-proc-mgmt.c:83:glusterd_proc_stop] 0-management: scrub already stopped [2019-01-22 06:23:08.031794] I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: scrub service is stopped [2019-01-22 06:23:08.042923] I [run.c:241:runner_log] (-->/usr/lib64/glusterfs/4.1.5/xlator/mgmt/glusterd.so(+0xe2b3a) [0x7f527124ab3a] -->/usr/lib64/glusterfs/4.1.5/xlator/mgmt/glusterd.so(+0xe2605) [0x7f527124a605] -->/lib64/libglusterfs.so.0(runner_log+0x115) [0x7f52765800e5] ) 0-management: Ran script: /var/lib/glusterd/hooks/1/set/post/S30samba-set.sh --volname=vol_3442e86b6d994a14de73f1b8c82cf0b8 -o diagnostics.brick-log-level=DEBUG --gd-workdir=/var/lib/glusterd [2019-01-22 06:23:08.049399] I [run.c:241:runner_log] (-->/usr/lib64/glusterfs/4.1.5/xlator/mgmt/glusterd.so(+0xe2b3a) [0x7f527124ab3a] -->/usr/lib64/glusterfs/4.1.5/xlator/mgmt/glusterd.so(+0xe2605) [0x7f527124a605] -->/lib64/libglusterfs.so.0(runner_log+0x115) [0x7f52765800e5] ) 0-management: Ran script: /var/lib/glusterd/hooks/1/set/post/S32gluster_enable_shared_storage.sh --volname=vol_3442e86b6d994a14de73f1b8c82cf0b8 -o diagnostics.brick-log-level=DEBUG --gd-workdir=/var/lib/glusterd [2019-01-22 06:23:21.222746] I [MSGID: 106131] [glusterd-proc-mgmt.c:83:glusterd_proc_stop] 0-management: nfs already stopped [2019-01-22 06:23:21.222784] I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: nfs service is stopped [2019-01-22 06:23:21.222817] I [MSGID: 106599] [glusterd-nfs-svc.c:82:glusterd_nfssvc_manager] 0-management: nfs/server.so xlator is not installed [2019-01-22 06:23:21.232477] I [MSGID: 106568] [glusterd-proc-mgmt.c:87:glusterd_proc_stop] 0-management: Stopping glustershd daemon running in pid: 32069 [2019-01-22 06:23:22.232728] I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: glustershd service is stopped [2019-01-22 06:23:22.232806] I [MSGID: 106567] [glusterd-svc-mgmt.c:203:glusterd_svc_start] 0-management: Starting glustershd service [2019-01-22 06:23:22.241715] I [MSGID: 106131] [glusterd-proc-mgmt.c:83:glusterd_proc_stop] 0-management: bitd already stopped [2019-01-22 06:23:22.241763] I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: bitd service is stopped [2019-01-22 06:23:22.242392] I [MSGID: 106131] [glusterd-proc-mgmt.c:83:glusterd_proc_stop] 0-management: scrub already stopped [2019-01-22 06:23:22.242415] I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: scrub service is stopped [2019-01-22 06:23:22.257649] I [run.c:241:runner_log] (-->/usr/lib64/glusterfs/4.1.5/xlator/mgmt/glusterd.so(+0xe2b3a) [0x7f527124ab3a] -->/usr/lib64/glusterfs/4.1.5/xlator/mgmt/glusterd.so(+0xe2605) [0x7f527124a605] -->/lib64/libglusterfs.so.0(runner_log+0x115) [0x7f52765800e5] ) 0-management: Ran script: /var/lib/glusterd/hooks/1/start/post/S29CTDBsetup.sh --volname=vol_3442e86b6d994a14de73f1b8c82cf0b8 --first=no --version=1 --volume-op=start --gd-workdir=/var/lib/glusterd [2019-01-22 06:23:22.270208] E [run.c:241:runner_log] (-->/usr/lib64/glusterfs/4.1.5/xlator/mgmt/glusterd.so(+0xe2b3a) [0x7f527124ab3a] -->/usr/lib64/glusterfs/4.1.5/xlator/mgmt/glusterd.so(+0xe2563) [0x7f527124a563] -->/lib64/libglusterfs.so.0(runner_log+0x115) [0x7f52765800e5] ) 0-management: Failed to execute script: /var/lib/glusterd/hooks/1/start/post/S30samba-start.sh --volname=vol_3442e86b6d994a14de73f1b8c82cf0b8 --first=no --version=1 --volume-op=start --gd-workdir=/var/lib/glusterd [2019-01-22 07:03:45.496823] W [glusterfsd.c:1514:cleanup_and_exit] (-->/lib64/libpthread.so.0(+0x7e25) [0x7f527538ee25] -->/usr/sbin/glusterd(glusterfs_sigwaiter+0xe5) [0x5565c15aad65] -->/usr/sbin/glusterd(cleanup_and_exit+0x6b) [0x5565c15aab8b] ) 0-: received signum (15), shutting down [2019-01-22 07:25:37.635224] I [MSGID: 100030] [glusterfsd.c:2741:main] 0-/usr/sbin/glusterd: Started running /usr/sbin/glusterd version 4.1.5 (args: /usr/sbin/glusterd -p /var/run/glusterd.pid --log-level INFO) [2019-01-22 07:25:37.642693] I [MSGID: 106478] [glusterd.c:1423:init] 0-management: Maximum allowed open file descriptors set to 65536 [2019-01-22 07:25:37.642727] I [MSGID: 106479] [glusterd.c:1481:init] 0-management: Using /var/lib/glusterd as working directory [2019-01-22 07:25:37.642734] I [MSGID: 106479] [glusterd.c:1486:init] 0-management: Using /var/run/gluster as pid file working directory [2019-01-22 07:25:37.647865] W [MSGID: 103071] [rdma.c:4629:__gf_rdma_ctx_create] 0-rpc-transport/rdma: rdma_cm event channel creation failed [No such device] [2019-01-22 07:25:37.647905] W [MSGID: 103055] [rdma.c:4938:init] 0-rdma.management: Failed to initialize IB Device [2019-01-22 07:25:37.647912] W [rpc-transport.c:351:rpc_transport_load] 0-rpc-transport: 'rdma' initialization failed [2019-01-22 07:25:37.647985] W [rpcsvc.c:1781:rpcsvc_create_listener] 0-rpc-service: cannot create listener, initing the transport failed [2019-01-22 07:25:37.647992] E [MSGID: 106244] [glusterd.c:1764:init] 0-management: creation of 1 listeners failed, continuing with succeeded transport [2019-01-22 07:25:38.459098] I [MSGID: 106513] [glusterd-store.c:2240:glusterd_restore_op_version] 0-glusterd: retrieved op-version: 40100 [2019-01-22 07:25:38.612530] I [MSGID: 106544] [glusterd.c:158:glusterd_uuid_init] 0-management: retrieved UUID: fb376410-85de-4b35-a6ef-39eee8abd460 [2019-01-22 07:25:38.647198] I [MSGID: 106498] [glusterd-handler.c:3614:glusterd_friend_add_from_peerinfo] 0-management: connect returned 0 [2019-01-22 07:25:38.647286] I [MSGID: 106498] [glusterd-handler.c:3614:glusterd_friend_add_from_peerinfo] 0-management: connect returned 0 [2019-01-22 07:25:38.647331] W [MSGID: 106061] [glusterd-handler.c:3408:glusterd_transport_inet_options_build] 0-glusterd: Failed to get tcp-user-timeout [2019-01-22 07:25:38.647356] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-management: setting frame-timeout to 600 [2019-01-22 07:25:38.651169] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-management: setting frame-timeout to 600 Final graph: +------------------------------------------------------------------------------+ 1: volume management 2: type mgmt/glusterd 3: option rpc-auth.auth-glusterfs on 4: option rpc-auth.auth-unix on 5: option rpc-auth.auth-null on 6: option rpc-auth-allow-insecure on 7: option transport.listen-backlog 10 8: option event-threads 1 9: option ping-timeout 0 10: option transport.socket.read-fail-log off 11: option transport.socket.keepalive-interval 2 12: option transport.socket.keepalive-time 10 13: option transport-type rdma 14: option working-directory /var/lib/glusterd 15: end-volume 16: +------------------------------------------------------------------------------+ [2019-01-22 07:25:38.651161] W [MSGID: 106061] [glusterd-handler.c:3408:glusterd_transport_inet_options_build] 0-glusterd: Failed to get tcp-user-timeout [2019-01-22 07:25:38.653922] I [MSGID: 101190] [event-epoll.c:617:event_dispatch_epoll_worker] 0-epoll: Started thread with index 1 [2019-01-22 07:25:39.672502] I [MSGID: 106493] [glusterd-rpc-ops.c:486:__glusterd_friend_add_cbk] 0-glusterd: Received ACC from uuid: 73389b0f-281b-4e57-b435-216093ee4749, host: 192.168.3.15, port: 0 [2019-01-22 07:25:39.673962] I [glusterd-utils.c:5994:glusterd_brick_start] 0-management: discovered already-running brick /var/lib/heketi/mounts/vg_5ecc3820ac1e5ea87c3721b8bd2d6813/brick_b8881a7d5f2d4cda27be4fcc835c573e/brick [2019-01-22 07:25:39.673989] I [MSGID: 106142] [glusterd-pmap.c:297:pmap_registry_bind] 0-pmap: adding brick /var/lib/heketi/mounts/vg_5ecc3820ac1e5ea87c3721b8bd2d6813/brick_b8881a7d5f2d4cda27be4fcc835c573e/brick on port 49163 [2019-01-22 07:25:39.674025] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-management: setting frame-timeout to 600 [2019-01-22 07:25:39.675058] I [MSGID: 106492] [glusterd-handler.c:2726:__glusterd_handle_friend_update] 0-glusterd: Received friend update from uuid: 73389b0f-281b-4e57-b435-216093ee4749 [2019-01-22 07:25:39.675083] I [MSGID: 106502] [glusterd-handler.c:2771:__glusterd_handle_friend_update] 0-management: Received my uuid as Friend [2019-01-22 07:25:39.675833] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-nfs: setting frame-timeout to 600 [2019-01-22 07:25:39.675988] I [MSGID: 106131] [glusterd-proc-mgmt.c:83:glusterd_proc_stop] 0-management: nfs already stopped [2019-01-22 07:25:39.676016] I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: nfs service is stopped [2019-01-22 07:25:39.676042] I [MSGID: 106599] [glusterd-nfs-svc.c:82:glusterd_nfssvc_manager] 0-management: nfs/server.so xlator is not installed [2019-01-22 07:25:39.676091] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-glustershd: setting frame-timeout to 600 [2019-01-22 07:25:39.690855] I [MSGID: 106131] [glusterd-proc-mgmt.c:83:glusterd_proc_stop] 0-management: glustershd already stopped [2019-01-22 07:25:39.690919] I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: glustershd service is stopped [2019-01-22 07:25:39.690972] I [MSGID: 106567] [glusterd-svc-mgmt.c:203:glusterd_svc_start] 0-management: Starting glustershd service [2019-01-22 07:25:40.698156] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-quotad: setting frame-timeout to 600 [2019-01-22 07:25:40.699408] I [MSGID: 106131] [glusterd-proc-mgmt.c:83:glusterd_proc_stop] 0-management: quotad already stopped [2019-01-22 07:25:40.699431] I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: quotad service is stopped [2019-01-22 07:25:40.699462] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-bitd: setting frame-timeout to 600 [2019-01-22 07:25:40.700148] I [MSGID: 106131] [glusterd-proc-mgmt.c:83:glusterd_proc_stop] 0-management: bitd already stopped [2019-01-22 07:25:40.700160] I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: bitd service is stopped [2019-01-22 07:25:40.700191] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-scrub: setting frame-timeout to 600 [2019-01-22 07:25:40.700839] I [MSGID: 106131] [glusterd-proc-mgmt.c:83:glusterd_proc_stop] 0-management: scrub already stopped [2019-01-22 07:25:40.700855] I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: scrub service is stopped [2019-01-22 07:25:40.705934] I [glusterd-utils.c:5994:glusterd_brick_start] 0-management: discovered already-running brick /var/lib/heketi/mounts/vg_5ecc3820ac1e5ea87c3721b8bd2d6813/brick_afc9ecbfbcc96ddfb1935b088dfe7d1e/brick [2019-01-22 07:25:40.705954] I [MSGID: 106142] [glusterd-pmap.c:297:pmap_registry_bind] 0-pmap: adding brick /var/lib/heketi/mounts/vg_5ecc3820ac1e5ea87c3721b8bd2d6813/brick_afc9ecbfbcc96ddfb1935b088dfe7d1e/brick on port 49164 [2019-01-22 07:25:40.705984] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-management: setting frame-timeout to 600 [2019-01-22 07:25:40.710399] I [glusterd-utils.c:6089:glusterd_brick_start] 0-management: starting a fresh brick process for brick /var/lib/heketi/mounts/vg_12f02e7d735ee9a66b6f746af8eb71e6/brick_ac5214e9fddec233c4d8b2e227f5a8b8/brick [2019-01-22 07:25:40.716475] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-management: setting frame-timeout to 600 [2019-01-22 07:25:40.720771] I [glusterd-utils.c:6089:glusterd_brick_start] 0-management: starting a fresh brick process for brick /var/lib/heketi/mounts/vg_5ecc3820ac1e5ea87c3721b8bd2d6813/brick_fafbd17e42fe449463a94d3018b8cd93/brick [2019-01-22 07:25:40.726630] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-management: setting frame-timeout to 600 [2019-01-22 07:25:40.731042] I [glusterd-utils.c:5994:glusterd_brick_start] 0-management: discovered already-running brick /var/lib/heketi/mounts/vg_28708570b029e5eff0a996c453a11691/brick_e5cfd20d1c5f06c3ce00f4e1bd523a46/brick [2019-01-22 07:25:40.731061] I [MSGID: 106142] [glusterd-pmap.c:297:pmap_registry_bind] 0-pmap: adding brick /var/lib/heketi/mounts/vg_28708570b029e5eff0a996c453a11691/brick_e5cfd20d1c5f06c3ce00f4e1bd523a46/brick on port 49165 [2019-01-22 07:25:40.731096] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-management: setting frame-timeout to 600 [2019-01-22 07:25:40.735044] I [glusterd-utils.c:5994:glusterd_brick_start] 0-management: discovered already-running brick /var/lib/heketi/mounts/vg_ca57f326195c243be2380ce4e42a4191/brick_952d75fd193c7209c9a81acbc23a3747/brick [2019-01-22 07:25:40.735060] I [MSGID: 106142] [glusterd-pmap.c:297:pmap_registry_bind] 0-pmap: adding brick /var/lib/heketi/mounts/vg_ca57f326195c243be2380ce4e42a4191/brick_952d75fd193c7209c9a81acbc23a3747/brick on port 49166 [2019-01-22 07:25:40.735090] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-management: setting frame-timeout to 600 [2019-01-22 07:25:40.738524] I [glusterd-utils.c:6089:glusterd_brick_start] 0-management: starting a fresh brick process for brick /var/lib/heketi/mounts/vg_28708570b029e5eff0a996c453a11691/brick_46f96c044e5eaf3e2d414bd9be2197ff/brick [2019-01-22 07:25:40.744444] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-management: setting frame-timeout to 600 [2019-01-22 07:25:40.749405] I [glusterd-utils.c:5994:glusterd_brick_start] 0-management: discovered already-running brick /var/lib/heketi/mounts/vg_ca57f326195c243be2380ce4e42a4191/brick_93ffd88e3e362ee02eaf209ad55a94df/brick [2019-01-22 07:25:40.749427] I [MSGID: 106142] [glusterd-pmap.c:297:pmap_registry_bind] 0-pmap: adding brick /var/lib/heketi/mounts/vg_ca57f326195c243be2380ce4e42a4191/brick_93ffd88e3e362ee02eaf209ad55a94df/brick on port 49156 [2019-01-22 07:25:40.749468] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-management: setting frame-timeout to 600 [2019-01-22 07:25:40.753587] I [glusterd-utils.c:5994:glusterd_brick_start] 0-management: discovered already-running brick /var/lib/heketi/mounts/vg_526f35058433c6b03130bba4e0a7dd87/brick_909763003002ae54adf8b9c9c368e665/brick [2019-01-22 07:25:40.753607] I [MSGID: 106142] [glusterd-pmap.c:297:pmap_registry_bind] 0-pmap: adding brick /var/lib/heketi/mounts/vg_526f35058433c6b03130bba4e0a7dd87/brick_909763003002ae54adf8b9c9c368e665/brick on port 49157 [2019-01-22 07:25:40.753638] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-management: setting frame-timeout to 600 [2019-01-22 07:25:40.757614] I [glusterd-utils.c:6089:glusterd_brick_start] 0-management: starting a fresh brick process for brick /var/lib/heketi/mounts/vg_ca57f326195c243be2380ce4e42a4191/brick_868c984beb58b42ec855a5d29e88be98/brick [2019-01-22 07:25:40.763468] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-management: setting frame-timeout to 600 [2019-01-22 07:25:40.768363] I [glusterd-utils.c:6089:glusterd_brick_start] 0-management: starting a fresh brick process for brick /var/lib/heketi/mounts/vg_5ecc3820ac1e5ea87c3721b8bd2d6813/brick_f255d45da7b18f3a428215a423e18f61/brick [2019-01-22 07:25:40.773862] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-management: setting frame-timeout to 600 [2019-01-22 07:25:40.777996] I [glusterd-utils.c:5994:glusterd_brick_start] 0-management: discovered already-running brick /var/lib/heketi/mounts/vg_28708570b029e5eff0a996c453a11691/brick_2b988fdacdc748a46974fa50fc7f9e50/brick [2019-01-22 07:25:40.778018] I [MSGID: 106142] [glusterd-pmap.c:297:pmap_registry_bind] 0-pmap: adding brick /var/lib/heketi/mounts/vg_28708570b029e5eff0a996c453a11691/brick_2b988fdacdc748a46974fa50fc7f9e50/brick on port 49158 [2019-01-22 07:25:40.778056] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-management: setting frame-timeout to 600 [2019-01-22 07:25:40.782312] I [glusterd-utils.c:6089:glusterd_brick_start] 0-management: starting a fresh brick process for brick /var/lib/heketi/mounts/vg_56391bec3c8bfe4fc116de7bddfc2af4/brick_0a254839cbdeb13bbacd77acc40ebce9/brick [2019-01-22 07:25:40.787988] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-management: setting frame-timeout to 600 [2019-01-22 07:25:40.792346] I [glusterd-utils.c:6089:glusterd_brick_start] 0-management: starting a fresh brick process for brick /var/lib/heketi/mounts/vg_ca57f326195c243be2380ce4e42a4191/brick_c1edf8bec1b388c54f0484319e46ee7f/brick [2019-01-22 07:25:40.797549] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-management: setting frame-timeout to 600 [2019-01-22 07:25:40.801516] I [glusterd-utils.c:5994:glusterd_brick_start] 0-management: discovered already-running brick /var/lib/heketi/mounts/vg_ca57f326195c243be2380ce4e42a4191/brick_f940f52d3cba979166e64e8ac72d224c/brick [2019-01-22 07:25:40.801535] I [MSGID: 106142] [glusterd-pmap.c:297:pmap_registry_bind] 0-pmap: adding brick /var/lib/heketi/mounts/vg_ca57f326195c243be2380ce4e42a4191/brick_f940f52d3cba979166e64e8ac72d224c/brick on port 49159 [2019-01-22 07:25:40.801572] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-management: setting frame-timeout to 600 [2019-01-22 07:25:40.805216] I [glusterd-utils.c:5994:glusterd_brick_start] 0-management: discovered already-running brick /var/lib/heketi/mounts/vg_56391bec3c8bfe4fc116de7bddfc2af4/brick_595043e897901aa6057b1227d430b20d/brick [2019-01-22 07:25:40.805236] I [MSGID: 106142] [glusterd-pmap.c:297:pmap_registry_bind] 0-pmap: adding brick /var/lib/heketi/mounts/vg_56391bec3c8bfe4fc116de7bddfc2af4/brick_595043e897901aa6057b1227d430b20d/brick on port 49157 [2019-01-22 07:25:40.805277] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-management: setting frame-timeout to 600 [2019-01-22 07:25:40.808993] I [glusterd-utils.c:6089:glusterd_brick_start] 0-management: starting a fresh brick process for brick /var/lib/heketi/mounts/vg_5ecc3820ac1e5ea87c3721b8bd2d6813/brick_46ede70d760848493be42438bd8ccd88/brick [2019-01-22 07:25:40.814988] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-management: setting frame-timeout to 600 [2019-01-22 07:25:40.819849] I [glusterd-utils.c:5994:glusterd_brick_start] 0-management: discovered already-running brick /var/lib/heketi/mounts/vg_526f35058433c6b03130bba4e0a7dd87/brick_9e7c382e5f853d471c347bc5590359af/brick [2019-01-22 07:25:40.819870] I [MSGID: 106142] [glusterd-pmap.c:297:pmap_registry_bind] 0-pmap: adding brick /var/lib/heketi/mounts/vg_526f35058433c6b03130bba4e0a7dd87/brick_9e7c382e5f853d471c347bc5590359af/brick on port 49161 [2019-01-22 07:25:40.819910] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-management: setting frame-timeout to 600 [2019-01-22 07:25:40.824129] I [glusterd-utils.c:5994:glusterd_brick_start] 0-management: discovered already-running brick /var/lib/heketi/mounts/vg_56391bec3c8bfe4fc116de7bddfc2af4/brick_47ed9e0663ad0f6f676ddd6ad7e3dcde/brick [2019-01-22 07:25:40.824149] I [MSGID: 106142] [glusterd-pmap.c:297:pmap_registry_bind] 0-pmap: adding brick /var/lib/heketi/mounts/vg_56391bec3c8bfe4fc116de7bddfc2af4/brick_47ed9e0663ad0f6f676ddd6ad7e3dcde/brick on port 49171 [2019-01-22 07:25:40.824196] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-management: setting frame-timeout to 600 [2019-01-22 07:25:40.828035] I [glusterd-utils.c:5994:glusterd_brick_start] 0-management: discovered already-running brick /var/lib/heketi/mounts/vg_56391bec3c8bfe4fc116de7bddfc2af4/brick_06ad6c73dfbf6a5fc21334f98c9973c2/brick [2019-01-22 07:25:40.828054] I [MSGID: 106142] [glusterd-pmap.c:297:pmap_registry_bind] 0-pmap: adding brick /var/lib/heketi/mounts/vg_56391bec3c8bfe4fc116de7bddfc2af4/brick_06ad6c73dfbf6a5fc21334f98c9973c2/brick on port 49172 [2019-01-22 07:25:40.828094] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-management: setting frame-timeout to 600 [2019-01-22 07:25:40.831949] I [glusterd-utils.c:6089:glusterd_brick_start] 0-management: starting a fresh brick process for brick /var/lib/heketi/mounts/vg_526f35058433c6b03130bba4e0a7dd87/brick_525225f65753b05dfe33aeaeb9c5de39/brick [2019-01-22 07:25:40.837967] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-management: setting frame-timeout to 600 [2019-01-22 07:25:40.843165] I [glusterd-utils.c:6089:glusterd_brick_start] 0-management: starting a fresh brick process for brick /var/lib/heketi/mounts/vg_28708570b029e5eff0a996c453a11691/brick_d4f30d6e465a8544b759a7016fb5aab5/brick [2019-01-22 07:25:40.849113] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-management: setting frame-timeout to 600 [2019-01-22 07:25:40.853253] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-snapd: setting frame-timeout to 600 [2019-01-22 07:25:40.853420] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-snapd: setting frame-timeout to 600 [2019-01-22 07:25:40.853562] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-snapd: setting frame-timeout to 600 [2019-01-22 07:25:40.853695] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-snapd: setting frame-timeout to 600 [2019-01-22 07:25:40.853820] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-snapd: setting frame-timeout to 600 [2019-01-22 07:25:40.853953] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-snapd: setting frame-timeout to 600 [2019-01-22 07:25:40.854080] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-snapd: setting frame-timeout to 600 [2019-01-22 07:25:40.854229] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-snapd: setting frame-timeout to 600 [2019-01-22 07:25:40.854378] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-snapd: setting frame-timeout to 600 [2019-01-22 07:25:40.854515] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-snapd: setting frame-timeout to 600 [2019-01-22 07:25:40.854640] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-snapd: setting frame-timeout to 600 [2019-01-22 07:25:40.854779] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-snapd: setting frame-timeout to 600 [2019-01-22 07:25:40.854914] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-snapd: setting frame-timeout to 600 [2019-01-22 07:25:40.855039] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-snapd: setting frame-timeout to 600 [2019-01-22 07:25:40.855168] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-snapd: setting frame-timeout to 600 [2019-01-22 07:25:40.855302] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-snapd: setting frame-timeout to 600 [2019-01-22 07:25:40.855422] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-snapd: setting frame-timeout to 600 [2019-01-22 07:25:40.855544] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-snapd: setting frame-timeout to 600 [2019-01-22 07:25:40.855667] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-snapd: setting frame-timeout to 600 [2019-01-22 07:25:40.855787] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-snapd: setting frame-timeout to 600 [2019-01-22 07:25:40.855904] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-snapd: setting frame-timeout to 600 [2019-01-22 07:25:40.856019] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-snapd: setting frame-timeout to 600 [2019-01-22 07:25:40.856136] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-gfproxyd: setting frame-timeout to 600 [2019-01-22 07:25:40.856331] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-gfproxyd: setting frame-timeout to 600 [2019-01-22 07:25:40.856499] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-gfproxyd: setting frame-timeout to 600 [2019-01-22 07:25:40.856667] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-gfproxyd: setting frame-timeout to 600 [2019-01-22 07:25:40.856830] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-gfproxyd: setting frame-timeout to 600 [2019-01-22 07:25:40.857005] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-gfproxyd: setting frame-timeout to 600 [2019-01-22 07:25:40.857180] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-gfproxyd: setting frame-timeout to 600 [2019-01-22 07:25:40.857376] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-gfproxyd: setting frame-timeout to 600 [2019-01-22 07:25:40.857539] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-gfproxyd: setting frame-timeout to 600 [2019-01-22 07:25:40.857708] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-gfproxyd: setting frame-timeout to 600 [2019-01-22 07:25:40.857857] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-gfproxyd: setting frame-timeout to 600 [2019-01-22 07:25:40.858012] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-gfproxyd: setting frame-timeout to 600 [2019-01-22 07:25:40.858168] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-gfproxyd: setting frame-timeout to 600 [2019-01-22 07:25:40.858329] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-gfproxyd: setting frame-timeout to 600 [2019-01-22 07:25:40.858486] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-gfproxyd: setting frame-timeout to 600 [2019-01-22 07:25:40.858648] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-gfproxyd: setting frame-timeout to 600 [2019-01-22 07:25:40.858803] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-gfproxyd: setting frame-timeout to 600 [2019-01-22 07:25:40.858968] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-gfproxyd: setting frame-timeout to 600 [2019-01-22 07:25:40.859124] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-gfproxyd: setting frame-timeout to 600 [2019-01-22 07:25:40.859280] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-gfproxyd: setting frame-timeout to 600 [2019-01-22 07:25:40.859475] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-gfproxyd: setting frame-timeout to 600 [2019-01-22 07:25:40.859636] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-gfproxyd: setting frame-timeout to 600 [2019-01-22 07:25:40.868841] I [MSGID: 106493] [glusterd-rpc-ops.c:702:__glusterd_friend_update_cbk] 0-management: Received ACC from uuid: 73389b0f-281b-4e57-b435-216093ee4749 [2019-01-22 07:25:40.868946] I [MSGID: 106493] [glusterd-rpc-ops.c:486:__glusterd_friend_add_cbk] 0-glusterd: Received ACC from uuid: 955b8b0a-4ba8-4d7d-9f2e-9e525844a98f, host: matrix1.matrix.orange.lab, port: 0 [2019-01-22 07:25:40.869927] I [glusterd-utils.c:5994:glusterd_brick_start] 0-management: discovered already-running brick /var/lib/heketi/mounts/vg_5ecc3820ac1e5ea87c3721b8bd2d6813/brick_b8881a7d5f2d4cda27be4fcc835c573e/brick [2019-01-22 07:25:40.869968] I [MSGID: 106142] [glusterd-pmap.c:297:pmap_registry_bind] 0-pmap: adding brick /var/lib/heketi/mounts/vg_5ecc3820ac1e5ea87c3721b8bd2d6813/brick_b8881a7d5f2d4cda27be4fcc835c573e/brick on port 49163 [2019-01-22 07:25:40.882338] I [socket.c:2632:socket_event_handler] 0-transport: EPOLLERR - disconnecting now [2019-01-22 07:25:40.888024] I [MSGID: 106005] [glusterd-handler.c:6131:__glusterd_brick_rpc_notify] 0-management: Brick 192.168.3.6:/var/lib/heketi/mounts/vg_12f02e7d735ee9a66b6f746af8eb71e6/brick_ac5214e9fddec233c4d8b2e227f5a8b8/brick has disconnected from glusterd. [2019-01-22 07:25:40.893858] I [socket.c:2632:socket_event_handler] 0-transport: EPOLLERR - disconnecting now [2019-01-22 07:25:40.900829] I [MSGID: 106005] [glusterd-handler.c:6131:__glusterd_brick_rpc_notify] 0-management: Brick 192.168.3.6:/var/lib/heketi/mounts/vg_5ecc3820ac1e5ea87c3721b8bd2d6813/brick_fafbd17e42fe449463a94d3018b8cd93/brick has disconnected from glusterd. [2019-01-22 07:25:40.917810] I [socket.c:2632:socket_event_handler] 0-transport: EPOLLERR - disconnecting now [2019-01-22 07:25:40.923404] I [MSGID: 106005] [glusterd-handler.c:6131:__glusterd_brick_rpc_notify] 0-management: Brick 192.168.3.6:/var/lib/heketi/mounts/vg_28708570b029e5eff0a996c453a11691/brick_46f96c044e5eaf3e2d414bd9be2197ff/brick has disconnected from glusterd. [2019-01-22 07:25:40.944692] I [socket.c:2632:socket_event_handler] 0-transport: EPOLLERR - disconnecting now [2019-01-22 07:25:40.950213] I [MSGID: 106005] [glusterd-handler.c:6131:__glusterd_brick_rpc_notify] 0-management: Brick 192.168.3.6:/var/lib/heketi/mounts/vg_ca57f326195c243be2380ce4e42a4191/brick_868c984beb58b42ec855a5d29e88be98/brick has disconnected from glusterd. [2019-01-22 07:25:40.955881] I [socket.c:2632:socket_event_handler] 0-transport: EPOLLERR - disconnecting now [2019-01-22 07:25:40.961448] I [MSGID: 106005] [glusterd-handler.c:6131:__glusterd_brick_rpc_notify] 0-management: Brick 192.168.3.6:/var/lib/heketi/mounts/vg_5ecc3820ac1e5ea87c3721b8bd2d6813/brick_f255d45da7b18f3a428215a423e18f61/brick has disconnected from glusterd. [2019-01-22 07:25:40.972619] I [socket.c:2632:socket_event_handler] 0-transport: EPOLLERR - disconnecting now [2019-01-22 07:25:40.978156] I [MSGID: 106005] [glusterd-handler.c:6131:__glusterd_brick_rpc_notify] 0-management: Brick 192.168.3.6:/var/lib/heketi/mounts/vg_56391bec3c8bfe4fc116de7bddfc2af4/brick_0a254839cbdeb13bbacd77acc40ebce9/brick has disconnected from glusterd. [2019-01-22 07:25:40.983907] I [socket.c:2632:socket_event_handler] 0-transport: EPOLLERR - disconnecting now [2019-01-22 07:25:40.989496] I [MSGID: 106005] [glusterd-handler.c:6131:__glusterd_brick_rpc_notify] 0-management: Brick 192.168.3.6:/var/lib/heketi/mounts/vg_ca57f326195c243be2380ce4e42a4191/brick_c1edf8bec1b388c54f0484319e46ee7f/brick has disconnected from glusterd. [2019-01-22 07:25:41.008334] I [socket.c:2632:socket_event_handler] 0-transport: EPOLLERR - disconnecting now [2019-01-22 07:25:41.014650] I [MSGID: 106005] [glusterd-handler.c:6131:__glusterd_brick_rpc_notify] 0-management: Brick 192.168.3.6:/var/lib/heketi/mounts/vg_5ecc3820ac1e5ea87c3721b8bd2d6813/brick_46ede70d760848493be42438bd8ccd88/brick has disconnected from glusterd. [2019-01-22 07:25:41.039606] I [socket.c:2632:socket_event_handler] 0-transport: EPOLLERR - disconnecting now [2019-01-22 07:25:41.045750] I [MSGID: 106005] [glusterd-handler.c:6131:__glusterd_brick_rpc_notify] 0-management: Brick 192.168.3.6:/var/lib/heketi/mounts/vg_526f35058433c6b03130bba4e0a7dd87/brick_525225f65753b05dfe33aeaeb9c5de39/brick has disconnected from glusterd. [2019-01-22 07:25:41.052204] I [socket.c:2632:socket_event_handler] 0-transport: EPOLLERR - disconnecting now [2019-01-22 07:25:41.058077] I [MSGID: 106005] [glusterd-handler.c:6131:__glusterd_brick_rpc_notify] 0-management: Brick 192.168.3.6:/var/lib/heketi/mounts/vg_28708570b029e5eff0a996c453a11691/brick_d4f30d6e465a8544b759a7016fb5aab5/brick has disconnected from glusterd. [2019-01-22 07:25:41.063351] I [MSGID: 106163] [glusterd-handshake.c:1356:__glusterd_mgmt_hndsk_versions_ack] 0-management: using the op-version 40100 [2019-01-22 07:25:41.064004] I [MSGID: 106493] [glusterd-rpc-ops.c:702:__glusterd_friend_update_cbk] 0-management: Received ACC from uuid: 955b8b0a-4ba8-4d7d-9f2e-9e525844a98f [2019-01-22 07:25:41.064256] I [MSGID: 106492] [glusterd-handler.c:2726:__glusterd_handle_friend_update] 0-glusterd: Received friend update from uuid: 955b8b0a-4ba8-4d7d-9f2e-9e525844a98f [2019-01-22 07:25:41.064308] I [MSGID: 106502] [glusterd-handler.c:2771:__glusterd_handle_friend_update] 0-management: Received my uuid as Friend [2019-01-22 07:25:41.075550] I [MSGID: 106163] [glusterd-handshake.c:1356:__glusterd_mgmt_hndsk_versions_ack] 0-management: using the op-version 40100 [2019-01-22 07:25:41.076177] I [MSGID: 106490] [glusterd-handler.c:2548:__glusterd_handle_incoming_friend_req] 0-glusterd: Received probe from uuid: 73389b0f-281b-4e57-b435-216093ee4749 [2019-01-22 07:25:41.087073] I [MSGID: 106493] [glusterd-handler.c:3811:glusterd_xfer_friend_add_resp] 0-glusterd: Responded to 192.168.3.15 (0), ret: 0, op_ret: 0 [2019-01-22 07:25:41.089188] I [MSGID: 106490] [glusterd-handler.c:2548:__glusterd_handle_incoming_friend_req] 0-glusterd: Received probe from uuid: 955b8b0a-4ba8-4d7d-9f2e-9e525844a98f [2019-01-22 07:25:41.101097] I [MSGID: 106493] [glusterd-handler.c:3811:glusterd_xfer_friend_add_resp] 0-glusterd: Responded to matrix1.matrix.orange.lab (0), ret: 0, op_ret: 0 [2019-01-22 07:25:41.102618] I [MSGID: 106492] [glusterd-handler.c:2726:__glusterd_handle_friend_update] 0-glusterd: Received friend update from uuid: 73389b0f-281b-4e57-b435-216093ee4749 [2019-01-22 07:25:41.102667] I [MSGID: 106502] [glusterd-handler.c:2771:__glusterd_handle_friend_update] 0-management: Received my uuid as Friend [2019-01-22 07:25:41.103921] I [MSGID: 106493] [glusterd-rpc-ops.c:702:__glusterd_friend_update_cbk] 0-management: Received ACC from uuid: 73389b0f-281b-4e57-b435-216093ee4749 [2019-01-22 07:25:41.103956] I [MSGID: 106492] [glusterd-handler.c:2726:__glusterd_handle_friend_update] 0-glusterd: Received friend update from uuid: 955b8b0a-4ba8-4d7d-9f2e-9e525844a98f [2019-01-22 07:25:41.103975] I [MSGID: 106502] [glusterd-handler.c:2771:__glusterd_handle_friend_update] 0-management: Received my uuid as Friend [2019-01-22 07:25:41.104512] I [MSGID: 106493] [glusterd-rpc-ops.c:702:__glusterd_friend_update_cbk] 0-management: Received ACC from uuid: 955b8b0a-4ba8-4d7d-9f2e-9e525844a98f [2019-01-22 07:25:41.762700] I [MSGID: 106142] [glusterd-pmap.c:297:pmap_registry_bind] 0-pmap: adding brick /var/lib/heketi/mounts/vg_5ecc3820ac1e5ea87c3721b8bd2d6813/brick_fafbd17e42fe449463a94d3018b8cd93/brick on port 49157 [2019-01-22 07:25:41.762772] I [MSGID: 106142] [glusterd-pmap.c:297:pmap_registry_bind] 0-pmap: adding brick /var/lib/heketi/mounts/vg_12f02e7d735ee9a66b6f746af8eb71e6/brick_ac5214e9fddec233c4d8b2e227f5a8b8/brick on port 49156 [2019-01-22 07:25:41.783040] I [MSGID: 106142] [glusterd-pmap.c:297:pmap_registry_bind] 0-pmap: adding brick /var/lib/heketi/mounts/vg_28708570b029e5eff0a996c453a11691/brick_46f96c044e5eaf3e2d414bd9be2197ff/brick on port 49158 [2019-01-22 07:25:41.797729] I [MSGID: 106142] [glusterd-pmap.c:297:pmap_registry_bind] 0-pmap: adding brick /var/lib/heketi/mounts/vg_ca57f326195c243be2380ce4e42a4191/brick_868c984beb58b42ec855a5d29e88be98/brick on port 49159 [2019-01-22 07:25:41.806882] I [MSGID: 106142] [glusterd-pmap.c:297:pmap_registry_bind] 0-pmap: adding brick /var/lib/heketi/mounts/vg_5ecc3820ac1e5ea87c3721b8bd2d6813/brick_f255d45da7b18f3a428215a423e18f61/brick on port 49160 [2019-01-22 07:25:41.822778] I [MSGID: 106142] [glusterd-pmap.c:297:pmap_registry_bind] 0-pmap: adding brick /var/lib/heketi/mounts/vg_56391bec3c8bfe4fc116de7bddfc2af4/brick_0a254839cbdeb13bbacd77acc40ebce9/brick on port 49161 [2019-01-22 07:25:41.834878] I [MSGID: 106142] [glusterd-pmap.c:297:pmap_registry_bind] 0-pmap: adding brick /var/lib/heketi/mounts/vg_ca57f326195c243be2380ce4e42a4191/brick_c1edf8bec1b388c54f0484319e46ee7f/brick on port 49162 [2019-01-22 07:25:41.848170] I [MSGID: 106142] [glusterd-pmap.c:297:pmap_registry_bind] 0-pmap: adding brick /var/lib/heketi/mounts/vg_5ecc3820ac1e5ea87c3721b8bd2d6813/brick_46ede70d760848493be42438bd8ccd88/brick on port 49174 [2019-01-22 07:25:41.868516] I [MSGID: 106142] [glusterd-pmap.c:297:pmap_registry_bind] 0-pmap: adding brick /var/lib/heketi/mounts/vg_526f35058433c6b03130bba4e0a7dd87/brick_525225f65753b05dfe33aeaeb9c5de39/brick on port 49175 [2019-01-22 07:25:41.878317] I [MSGID: 106142] [glusterd-pmap.c:297:pmap_registry_bind] 0-pmap: adding brick /var/lib/heketi/mounts/vg_28708570b029e5eff0a996c453a11691/brick_d4f30d6e465a8544b759a7016fb5aab5/brick on port 49176 [2019-01-22 07:27:38.750720] W [glusterfsd.c:1514:cleanup_and_exit] (-->/lib64/libpthread.so.0(+0x7e25) [0x7f63e17b6e25] -->/usr/sbin/glusterd(glusterfs_sigwaiter+0xe5) [0x557543699d65] -->/usr/sbin/glusterd(cleanup_and_exit+0x6b) [0x557543699b8b] ) 0-: received signum (15), shutting down [2019-01-22 07:49:43.724306] I [MSGID: 100030] [glusterfsd.c:2741:main] 0-/usr/sbin/glusterd: Started running /usr/sbin/glusterd version 4.1.5 (args: /usr/sbin/glusterd -p /var/run/glusterd.pid --log-level INFO) [2019-01-22 07:49:43.732557] I [MSGID: 106478] [glusterd.c:1423:init] 0-management: Maximum allowed open file descriptors set to 65536 [2019-01-22 07:49:43.732594] I [MSGID: 106479] [glusterd.c:1481:init] 0-management: Using /var/lib/glusterd as working directory [2019-01-22 07:49:43.732600] I [MSGID: 106479] [glusterd.c:1486:init] 0-management: Using /var/run/gluster as pid file working directory [2019-01-22 07:49:43.737896] W [MSGID: 103071] [rdma.c:4629:__gf_rdma_ctx_create] 0-rpc-transport/rdma: rdma_cm event channel creation failed [No such device] [2019-01-22 07:49:43.737958] W [MSGID: 103055] [rdma.c:4938:init] 0-rdma.management: Failed to initialize IB Device [2019-01-22 07:49:43.737965] W [rpc-transport.c:351:rpc_transport_load] 0-rpc-transport: 'rdma' initialization failed [2019-01-22 07:49:43.738042] W [rpcsvc.c:1781:rpcsvc_create_listener] 0-rpc-service: cannot create listener, initing the transport failed [2019-01-22 07:49:43.738049] E [MSGID: 106244] [glusterd.c:1764:init] 0-management: creation of 1 listeners failed, continuing with succeeded transport [2019-01-22 07:49:44.655669] I [MSGID: 106513] [glusterd-store.c:2240:glusterd_restore_op_version] 0-glusterd: retrieved op-version: 40100 [2019-01-22 07:49:44.803124] I [MSGID: 106544] [glusterd.c:158:glusterd_uuid_init] 0-management: retrieved UUID: fb376410-85de-4b35-a6ef-39eee8abd460 [2019-01-22 07:49:44.837849] I [MSGID: 106498] [glusterd-handler.c:3614:glusterd_friend_add_from_peerinfo] 0-management: connect returned 0 [2019-01-22 07:49:44.837930] I [MSGID: 106498] [glusterd-handler.c:3614:glusterd_friend_add_from_peerinfo] 0-management: connect returned 0 [2019-01-22 07:49:44.837981] W [MSGID: 106061] [glusterd-handler.c:3408:glusterd_transport_inet_options_build] 0-glusterd: Failed to get tcp-user-timeout [2019-01-22 07:49:44.838005] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-management: setting frame-timeout to 600 [2019-01-22 07:49:44.842181] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-management: setting frame-timeout to 600 Final graph: +------------------------------------------------------------------------------+ 1: volume management 2: type mgmt/glusterd 3: option rpc-auth.auth-glusterfs on 4: option rpc-auth.auth-unix on 5: option rpc-auth.auth-null on 6: option rpc-auth-allow-insecure on 7: option transport.listen-backlog 10 8: option event-threads 1 9: option ping-timeout 0 10: option transport.socket.read-fail-log off 11: option transport.socket.keepalive-interval 2 12: option transport.socket.keepalive-time 10 13: option transport-type rdma 14: option working-directory /var/lib/glusterd 15: end-volume 16: +------------------------------------------------------------------------------+ [2019-01-22 07:49:44.842173] W [MSGID: 106061] [glusterd-handler.c:3408:glusterd_transport_inet_options_build] 0-glusterd: Failed to get tcp-user-timeout [2019-01-22 07:49:44.845383] I [MSGID: 101190] [event-epoll.c:617:event_dispatch_epoll_worker] 0-epoll: Started thread with index 1 [2019-01-22 07:49:45.051423] I [MSGID: 106493] [glusterd-rpc-ops.c:486:__glusterd_friend_add_cbk] 0-glusterd: Received ACC from uuid: 955b8b0a-4ba8-4d7d-9f2e-9e525844a98f, host: matrix1.matrix.orange.lab, port: 0 [2019-01-22 07:49:45.053402] I [MSGID: 106492] [glusterd-handler.c:2726:__glusterd_handle_friend_update] 0-glusterd: Received friend update from uuid: 955b8b0a-4ba8-4d7d-9f2e-9e525844a98f [2019-01-22 07:49:45.053434] I [MSGID: 106502] [glusterd-handler.c:2771:__glusterd_handle_friend_update] 0-management: Received my uuid as Friend [2019-01-22 07:49:45.054156] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-nfs: setting frame-timeout to 600 [2019-01-22 07:49:45.054291] I [MSGID: 106131] [glusterd-proc-mgmt.c:83:glusterd_proc_stop] 0-management: nfs already stopped [2019-01-22 07:49:45.054318] I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: nfs service is stopped [2019-01-22 07:49:45.054338] I [MSGID: 106599] [glusterd-nfs-svc.c:82:glusterd_nfssvc_manager] 0-management: nfs/server.so xlator is not installed [2019-01-22 07:49:45.054371] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-glustershd: setting frame-timeout to 600 [2019-01-22 07:49:45.064833] I [MSGID: 106131] [glusterd-proc-mgmt.c:83:glusterd_proc_stop] 0-management: glustershd already stopped [2019-01-22 07:49:45.064890] I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: glustershd service is stopped [2019-01-22 07:49:45.064933] I [MSGID: 106567] [glusterd-svc-mgmt.c:203:glusterd_svc_start] 0-management: Starting glustershd service [2019-01-22 07:49:46.070852] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-quotad: setting frame-timeout to 600 [2019-01-22 07:49:46.072213] I [MSGID: 106131] [glusterd-proc-mgmt.c:83:glusterd_proc_stop] 0-management: quotad already stopped [2019-01-22 07:49:46.072239] I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: quotad service is stopped [2019-01-22 07:49:46.072287] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-bitd: setting frame-timeout to 600 [2019-01-22 07:49:46.073026] I [MSGID: 106131] [glusterd-proc-mgmt.c:83:glusterd_proc_stop] 0-management: bitd already stopped [2019-01-22 07:49:46.073043] I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: bitd service is stopped [2019-01-22 07:49:46.073077] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-scrub: setting frame-timeout to 600 [2019-01-22 07:49:46.073763] I [MSGID: 106131] [glusterd-proc-mgmt.c:83:glusterd_proc_stop] 0-management: scrub already stopped [2019-01-22 07:49:46.073782] I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: scrub service is stopped [2019-01-22 07:49:46.073878] I [glusterd-utils.c:5994:glusterd_brick_start] 0-management: discovered already-running brick /var/lib/heketi/mounts/vg_5ecc3820ac1e5ea87c3721b8bd2d6813/brick_b8881a7d5f2d4cda27be4fcc835c573e/brick [2019-01-22 07:49:46.073907] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-management: setting frame-timeout to 600 [2019-01-22 07:49:46.079188] I [glusterd-utils.c:6089:glusterd_brick_start] 0-management: starting a fresh brick process for brick /var/lib/heketi/mounts/vg_5ecc3820ac1e5ea87c3721b8bd2d6813/brick_afc9ecbfbcc96ddfb1935b088dfe7d1e/brick [2019-01-22 07:49:46.084434] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-management: setting frame-timeout to 600 [2019-01-22 07:49:46.089353] I [glusterd-utils.c:6089:glusterd_brick_start] 0-management: starting a fresh brick process for brick /var/lib/heketi/mounts/vg_12f02e7d735ee9a66b6f746af8eb71e6/brick_ac5214e9fddec233c4d8b2e227f5a8b8/brick [2019-01-22 07:49:46.094701] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-management: setting frame-timeout to 600 [2019-01-22 07:49:46.099566] I [glusterd-utils.c:5994:glusterd_brick_start] 0-management: discovered already-running brick /var/lib/heketi/mounts/vg_5ecc3820ac1e5ea87c3721b8bd2d6813/brick_fafbd17e42fe449463a94d3018b8cd93/brick [2019-01-22 07:49:46.099586] I [MSGID: 106142] [glusterd-pmap.c:297:pmap_registry_bind] 0-pmap: adding brick /var/lib/heketi/mounts/vg_5ecc3820ac1e5ea87c3721b8bd2d6813/brick_fafbd17e42fe449463a94d3018b8cd93/brick on port 49157 [2019-01-22 07:49:46.099626] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-management: setting frame-timeout to 600 [2019-01-22 07:49:46.103640] I [glusterd-utils.c:6089:glusterd_brick_start] 0-management: starting a fresh brick process for brick /var/lib/heketi/mounts/vg_28708570b029e5eff0a996c453a11691/brick_e5cfd20d1c5f06c3ce00f4e1bd523a46/brick [2019-01-22 07:49:46.108977] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-management: setting frame-timeout to 600 [2019-01-22 07:49:46.113682] I [glusterd-utils.c:5994:glusterd_brick_start] 0-management: discovered already-running brick /var/lib/heketi/mounts/vg_ca57f326195c243be2380ce4e42a4191/brick_952d75fd193c7209c9a81acbc23a3747/brick [2019-01-22 07:49:46.113726] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-management: setting frame-timeout to 600 [2019-01-22 07:49:46.117596] I [glusterd-utils.c:6089:glusterd_brick_start] 0-management: starting a fresh brick process for brick /var/lib/heketi/mounts/vg_28708570b029e5eff0a996c453a11691/brick_46f96c044e5eaf3e2d414bd9be2197ff/brick [2019-01-22 07:49:46.122595] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-management: setting frame-timeout to 600 [2019-01-22 07:49:46.127111] I [glusterd-utils.c:6089:glusterd_brick_start] 0-management: starting a fresh brick process for brick /var/lib/heketi/mounts/vg_ca57f326195c243be2380ce4e42a4191/brick_93ffd88e3e362ee02eaf209ad55a94df/brick [2019-01-22 07:49:46.131975] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-management: setting frame-timeout to 600 [2019-01-22 07:49:46.136100] I [glusterd-utils.c:5994:glusterd_brick_start] 0-management: discovered already-running brick /var/lib/heketi/mounts/vg_526f35058433c6b03130bba4e0a7dd87/brick_909763003002ae54adf8b9c9c368e665/brick [2019-01-22 07:49:46.136124] I [MSGID: 106142] [glusterd-pmap.c:297:pmap_registry_bind] 0-pmap: adding brick /var/lib/heketi/mounts/vg_526f35058433c6b03130bba4e0a7dd87/brick_909763003002ae54adf8b9c9c368e665/brick on port 49157 [2019-01-22 07:49:46.136163] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-management: setting frame-timeout to 600 [2019-01-22 07:49:46.139957] I [glusterd-utils.c:6089:glusterd_brick_start] 0-management: starting a fresh brick process for brick /var/lib/heketi/mounts/vg_ca57f326195c243be2380ce4e42a4191/brick_868c984beb58b42ec855a5d29e88be98/brick [2019-01-22 07:49:46.145824] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-management: setting frame-timeout to 600 [2019-01-22 07:49:46.150476] I [glusterd-utils.c:6089:glusterd_brick_start] 0-management: starting a fresh brick process for brick /var/lib/heketi/mounts/vg_5ecc3820ac1e5ea87c3721b8bd2d6813/brick_f255d45da7b18f3a428215a423e18f61/brick [2019-01-22 07:49:46.155952] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-management: setting frame-timeout to 600 [2019-01-22 07:49:46.161062] I [glusterd-utils.c:5994:glusterd_brick_start] 0-management: discovered already-running brick /var/lib/heketi/mounts/vg_28708570b029e5eff0a996c453a11691/brick_2b988fdacdc748a46974fa50fc7f9e50/brick [2019-01-22 07:49:46.161085] I [MSGID: 106142] [glusterd-pmap.c:297:pmap_registry_bind] 0-pmap: adding brick /var/lib/heketi/mounts/vg_28708570b029e5eff0a996c453a11691/brick_2b988fdacdc748a46974fa50fc7f9e50/brick on port 49158 [2019-01-22 07:49:46.161122] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-management: setting frame-timeout to 600 [2019-01-22 07:49:46.165046] I [glusterd-utils.c:6089:glusterd_brick_start] 0-management: starting a fresh brick process for brick /var/lib/heketi/mounts/vg_56391bec3c8bfe4fc116de7bddfc2af4/brick_0a254839cbdeb13bbacd77acc40ebce9/brick [2019-01-22 07:49:46.170390] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-management: setting frame-timeout to 600 [2019-01-22 07:49:46.175345] I [glusterd-utils.c:6089:glusterd_brick_start] 0-management: starting a fresh brick process for brick /var/lib/heketi/mounts/vg_ca57f326195c243be2380ce4e42a4191/brick_c1edf8bec1b388c54f0484319e46ee7f/brick [2019-01-22 07:49:46.180248] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-management: setting frame-timeout to 600 [2019-01-22 07:49:46.185199] I [glusterd-utils.c:5994:glusterd_brick_start] 0-management: discovered already-running brick /var/lib/heketi/mounts/vg_ca57f326195c243be2380ce4e42a4191/brick_f940f52d3cba979166e64e8ac72d224c/brick [2019-01-22 07:49:46.185224] I [MSGID: 106142] [glusterd-pmap.c:297:pmap_registry_bind] 0-pmap: adding brick /var/lib/heketi/mounts/vg_ca57f326195c243be2380ce4e42a4191/brick_f940f52d3cba979166e64e8ac72d224c/brick on port 49159 [2019-01-22 07:49:46.185275] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-management: setting frame-timeout to 600 [2019-01-22 07:49:46.189311] I [glusterd-utils.c:6089:glusterd_brick_start] 0-management: starting a fresh brick process for brick /var/lib/heketi/mounts/vg_56391bec3c8bfe4fc116de7bddfc2af4/brick_595043e897901aa6057b1227d430b20d/brick [2019-01-22 07:49:46.194914] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-management: setting frame-timeout to 600 [2019-01-22 07:49:46.199541] I [glusterd-utils.c:6089:glusterd_brick_start] 0-management: starting a fresh brick process for brick /var/lib/heketi/mounts/vg_5ecc3820ac1e5ea87c3721b8bd2d6813/brick_46ede70d760848493be42438bd8ccd88/brick [2019-01-22 07:49:46.204505] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-management: setting frame-timeout to 600 [2019-01-22 07:49:46.209336] I [glusterd-utils.c:5994:glusterd_brick_start] 0-management: discovered already-running brick /var/lib/heketi/mounts/vg_526f35058433c6b03130bba4e0a7dd87/brick_9e7c382e5f853d471c347bc5590359af/brick [2019-01-22 07:49:46.209363] I [MSGID: 106142] [glusterd-pmap.c:297:pmap_registry_bind] 0-pmap: adding brick /var/lib/heketi/mounts/vg_526f35058433c6b03130bba4e0a7dd87/brick_9e7c382e5f853d471c347bc5590359af/brick on port 49161 [2019-01-22 07:49:46.209404] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-management: setting frame-timeout to 600 [2019-01-22 07:49:46.213374] I [glusterd-utils.c:5994:glusterd_brick_start] 0-management: discovered already-running brick /var/lib/heketi/mounts/vg_56391bec3c8bfe4fc116de7bddfc2af4/brick_47ed9e0663ad0f6f676ddd6ad7e3dcde/brick [2019-01-22 07:49:46.213414] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-management: setting frame-timeout to 600 [2019-01-22 07:49:46.216894] I [glusterd-utils.c:5994:glusterd_brick_start] 0-management: discovered already-running brick /var/lib/heketi/mounts/vg_56391bec3c8bfe4fc116de7bddfc2af4/brick_06ad6c73dfbf6a5fc21334f98c9973c2/brick [2019-01-22 07:49:46.216933] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-management: setting frame-timeout to 600 [2019-01-22 07:49:46.220909] I [glusterd-utils.c:6089:glusterd_brick_start] 0-management: starting a fresh brick process for brick /var/lib/heketi/mounts/vg_526f35058433c6b03130bba4e0a7dd87/brick_525225f65753b05dfe33aeaeb9c5de39/brick [2019-01-22 07:49:46.226348] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-management: setting frame-timeout to 600 [2019-01-22 07:49:46.230408] I [glusterd-utils.c:6089:glusterd_brick_start] 0-management: starting a fresh brick process for brick /var/lib/heketi/mounts/vg_28708570b029e5eff0a996c453a11691/brick_d4f30d6e465a8544b759a7016fb5aab5/brick [2019-01-22 07:49:46.235926] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-management: setting frame-timeout to 600 [2019-01-22 07:49:46.240701] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-snapd: setting frame-timeout to 600 [2019-01-22 07:49:46.240905] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-snapd: setting frame-timeout to 600 [2019-01-22 07:49:46.241092] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-snapd: setting frame-timeout to 600 [2019-01-22 07:49:46.241222] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-snapd: setting frame-timeout to 600 [2019-01-22 07:49:46.241362] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-snapd: setting frame-timeout to 600 [2019-01-22 07:49:46.241491] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-snapd: setting frame-timeout to 600 [2019-01-22 07:49:46.241612] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-snapd: setting frame-timeout to 600 [2019-01-22 07:49:46.241738] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-snapd: setting frame-timeout to 600 [2019-01-22 07:49:46.241851] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-snapd: setting frame-timeout to 600 [2019-01-22 07:49:46.241972] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-snapd: setting frame-timeout to 600 [2019-01-22 07:49:46.242090] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-snapd: setting frame-timeout to 600 [2019-01-22 07:49:46.242211] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-snapd: setting frame-timeout to 600 [2019-01-22 07:49:46.242335] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-snapd: setting frame-timeout to 600 [2019-01-22 07:49:46.242461] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-snapd: setting frame-timeout to 600 [2019-01-22 07:49:46.242583] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-snapd: setting frame-timeout to 600 [2019-01-22 07:49:46.242683] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-snapd: setting frame-timeout to 600 [2019-01-22 07:49:46.242790] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-snapd: setting frame-timeout to 600 [2019-01-22 07:49:46.242901] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-snapd: setting frame-timeout to 600 [2019-01-22 07:49:46.243004] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-snapd: setting frame-timeout to 600 [2019-01-22 07:49:46.243115] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-snapd: setting frame-timeout to 600 [2019-01-22 07:49:46.243219] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-snapd: setting frame-timeout to 600 [2019-01-22 07:49:46.243334] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-snapd: setting frame-timeout to 600 [2019-01-22 07:49:46.243445] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-gfproxyd: setting frame-timeout to 600 [2019-01-22 07:49:46.243616] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-gfproxyd: setting frame-timeout to 600 [2019-01-22 07:49:46.243775] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-gfproxyd: setting frame-timeout to 600 [2019-01-22 07:49:46.243938] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-gfproxyd: setting frame-timeout to 600 [2019-01-22 07:49:46.244091] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-gfproxyd: setting frame-timeout to 600 [2019-01-22 07:49:46.244241] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-gfproxyd: setting frame-timeout to 600 [2019-01-22 07:49:46.244408] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-gfproxyd: setting frame-timeout to 600 [2019-01-22 07:49:46.244566] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-gfproxyd: setting frame-timeout to 600 [2019-01-22 07:49:46.244713] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-gfproxyd: setting frame-timeout to 600 [2019-01-22 07:49:46.244861] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-gfproxyd: setting frame-timeout to 600 [2019-01-22 07:49:46.245017] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-gfproxyd: setting frame-timeout to 600 [2019-01-22 07:49:46.245187] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-gfproxyd: setting frame-timeout to 600 [2019-01-22 07:49:46.245342] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-gfproxyd: setting frame-timeout to 600 [2019-01-22 07:49:46.245494] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-gfproxyd: setting frame-timeout to 600 [2019-01-22 07:49:46.245643] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-gfproxyd: setting frame-timeout to 600 [2019-01-22 07:49:46.245792] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-gfproxyd: setting frame-timeout to 600 [2019-01-22 07:49:46.245937] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-gfproxyd: setting frame-timeout to 600 [2019-01-22 07:49:46.246102] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-gfproxyd: setting frame-timeout to 600 [2019-01-22 07:49:46.246247] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-gfproxyd: setting frame-timeout to 600 [2019-01-22 07:49:46.246447] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-gfproxyd: setting frame-timeout to 600 [2019-01-22 07:49:46.246609] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-gfproxyd: setting frame-timeout to 600 [2019-01-22 07:49:46.246775] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-gfproxyd: setting frame-timeout to 600 [2019-01-22 07:49:46.255878] I [MSGID: 106493] [glusterd-rpc-ops.c:702:__glusterd_friend_update_cbk] 0-management: Received ACC from uuid: 955b8b0a-4ba8-4d7d-9f2e-9e525844a98f [2019-01-22 07:49:46.269952] I [socket.c:2632:socket_event_handler] 0-transport: EPOLLERR - disconnecting now [2019-01-22 07:49:46.276416] I [MSGID: 106005] [glusterd-handler.c:6131:__glusterd_brick_rpc_notify] 0-management: Brick 192.168.3.6:/var/lib/heketi/mounts/vg_5ecc3820ac1e5ea87c3721b8bd2d6813/brick_afc9ecbfbcc96ddfb1935b088dfe7d1e/brick has disconnected from glusterd. [2019-01-22 07:49:46.282743] I [socket.c:2632:socket_event_handler] 0-transport: EPOLLERR - disconnecting now [2019-01-22 07:49:46.289277] I [MSGID: 106005] [glusterd-handler.c:6131:__glusterd_brick_rpc_notify] 0-management: Brick 192.168.3.6:/var/lib/heketi/mounts/vg_12f02e7d735ee9a66b6f746af8eb71e6/brick_ac5214e9fddec233c4d8b2e227f5a8b8/brick has disconnected from glusterd. [2019-01-22 07:49:46.302392] I [socket.c:2632:socket_event_handler] 0-transport: EPOLLERR - disconnecting now [2019-01-22 07:49:46.308743] I [MSGID: 106005] [glusterd-handler.c:6131:__glusterd_brick_rpc_notify] 0-management: Brick 192.168.3.6:/var/lib/heketi/mounts/vg_28708570b029e5eff0a996c453a11691/brick_e5cfd20d1c5f06c3ce00f4e1bd523a46/brick has disconnected from glusterd. [2019-01-22 07:49:46.320968] I [socket.c:2632:socket_event_handler] 0-transport: EPOLLERR - disconnecting now [2019-01-22 07:49:46.327112] I [MSGID: 106005] [glusterd-handler.c:6131:__glusterd_brick_rpc_notify] 0-management: Brick 192.168.3.6:/var/lib/heketi/mounts/vg_28708570b029e5eff0a996c453a11691/brick_46f96c044e5eaf3e2d414bd9be2197ff/brick has disconnected from glusterd. [2019-01-22 07:49:46.333401] I [socket.c:2632:socket_event_handler] 0-transport: EPOLLERR - disconnecting now [2019-01-22 07:49:46.339922] I [MSGID: 106005] [glusterd-handler.c:6131:__glusterd_brick_rpc_notify] 0-management: Brick 192.168.3.6:/var/lib/heketi/mounts/vg_ca57f326195c243be2380ce4e42a4191/brick_93ffd88e3e362ee02eaf209ad55a94df/brick has disconnected from glusterd. [2019-01-22 07:49:46.352764] I [socket.c:2632:socket_event_handler] 0-transport: EPOLLERR - disconnecting now [2019-01-22 07:49:46.358910] I [MSGID: 106005] [glusterd-handler.c:6131:__glusterd_brick_rpc_notify] 0-management: Brick 192.168.3.6:/var/lib/heketi/mounts/vg_ca57f326195c243be2380ce4e42a4191/brick_868c984beb58b42ec855a5d29e88be98/brick has disconnected from glusterd. [2019-01-22 07:49:46.365364] I [socket.c:2632:socket_event_handler] 0-transport: EPOLLERR - disconnecting now [2019-01-22 07:49:46.372303] I [MSGID: 106005] [glusterd-handler.c:6131:__glusterd_brick_rpc_notify] 0-management: Brick 192.168.3.6:/var/lib/heketi/mounts/vg_5ecc3820ac1e5ea87c3721b8bd2d6813/brick_f255d45da7b18f3a428215a423e18f61/brick has disconnected from glusterd. [2019-01-22 07:49:46.385520] I [socket.c:2632:socket_event_handler] 0-transport: EPOLLERR - disconnecting now [2019-01-22 07:49:46.391750] I [MSGID: 106005] [glusterd-handler.c:6131:__glusterd_brick_rpc_notify] 0-management: Brick 192.168.3.6:/var/lib/heketi/mounts/vg_56391bec3c8bfe4fc116de7bddfc2af4/brick_0a254839cbdeb13bbacd77acc40ebce9/brick has disconnected from glusterd. [2019-01-22 07:49:46.398251] I [socket.c:2632:socket_event_handler] 0-transport: EPOLLERR - disconnecting now [2019-01-22 07:49:46.404669] I [MSGID: 106005] [glusterd-handler.c:6131:__glusterd_brick_rpc_notify] 0-management: Brick 192.168.3.6:/var/lib/heketi/mounts/vg_ca57f326195c243be2380ce4e42a4191/brick_c1edf8bec1b388c54f0484319e46ee7f/brick has disconnected from glusterd. [2019-01-22 07:49:46.417403] I [socket.c:2632:socket_event_handler] 0-transport: EPOLLERR - disconnecting now [2019-01-22 07:49:46.423448] I [MSGID: 106005] [glusterd-handler.c:6131:__glusterd_brick_rpc_notify] 0-management: Brick 192.168.3.6:/var/lib/heketi/mounts/vg_56391bec3c8bfe4fc116de7bddfc2af4/brick_595043e897901aa6057b1227d430b20d/brick has disconnected from glusterd. [2019-01-22 07:49:46.429923] I [socket.c:2632:socket_event_handler] 0-transport: EPOLLERR - disconnecting now [2019-01-22 07:49:46.436002] I [MSGID: 106005] [glusterd-handler.c:6131:__glusterd_brick_rpc_notify] 0-management: Brick 192.168.3.6:/var/lib/heketi/mounts/vg_5ecc3820ac1e5ea87c3721b8bd2d6813/brick_46ede70d760848493be42438bd8ccd88/brick has disconnected from glusterd. [2019-01-22 07:49:46.461670] I [socket.c:2632:socket_event_handler] 0-transport: EPOLLERR - disconnecting now [2019-01-22 07:49:46.467963] I [MSGID: 106005] [glusterd-handler.c:6131:__glusterd_brick_rpc_notify] 0-management: Brick 192.168.3.6:/var/lib/heketi/mounts/vg_526f35058433c6b03130bba4e0a7dd87/brick_525225f65753b05dfe33aeaeb9c5de39/brick has disconnected from glusterd. [2019-01-22 07:49:46.474793] I [socket.c:2632:socket_event_handler] 0-transport: EPOLLERR - disconnecting now [2019-01-22 07:49:46.481333] I [MSGID: 106005] [glusterd-handler.c:6131:__glusterd_brick_rpc_notify] 0-management: Brick 192.168.3.6:/var/lib/heketi/mounts/vg_28708570b029e5eff0a996c453a11691/brick_d4f30d6e465a8544b759a7016fb5aab5/brick has disconnected from glusterd. [2019-01-22 07:49:46.481678] I [MSGID: 106163] [glusterd-handshake.c:1356:__glusterd_mgmt_hndsk_versions_ack] 0-management: using the op-version 40100 [2019-01-22 07:49:46.494524] I [MSGID: 106490] [glusterd-handler.c:2548:__glusterd_handle_incoming_friend_req] 0-glusterd: Received probe from uuid: 955b8b0a-4ba8-4d7d-9f2e-9e525844a98f [2019-01-22 07:49:46.496498] I [MSGID: 106009] [glusterd-utils.c:3393:glusterd_compare_friend_volume] 0-management: Version of volume vol_3442e86b6d994a14de73f1b8c82cf0b8 differ. local version = 3, remote version = 4 on peer matrix1.matrix.orange.lab [2019-01-22 07:49:46.511837] I [MSGID: 106493] [glusterd-handler.c:3811:glusterd_xfer_friend_add_resp] 0-glusterd: Responded to matrix1.matrix.orange.lab (0), ret: 0, op_ret: 0 [2019-01-22 07:49:46.514794] I [glusterd-utils.c:5994:glusterd_brick_start] 0-management: discovered already-running brick /var/lib/heketi/mounts/vg_ca57f326195c243be2380ce4e42a4191/brick_952d75fd193c7209c9a81acbc23a3747/brick [2019-01-22 07:49:46.514817] I [MSGID: 106142] [glusterd-pmap.c:297:pmap_registry_bind] 0-pmap: adding brick /var/lib/heketi/mounts/vg_ca57f326195c243be2380ce4e42a4191/brick_952d75fd193c7209c9a81acbc23a3747/brick on port 49166 [2019-01-22 07:49:46.514848] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-management: setting frame-timeout to 600 [2019-01-22 07:49:46.551096] I [MSGID: 106131] [glusterd-proc-mgmt.c:83:glusterd_proc_stop] 0-management: nfs already stopped [2019-01-22 07:49:46.551126] I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: nfs service is stopped [2019-01-22 07:49:46.551139] I [MSGID: 106599] [glusterd-nfs-svc.c:82:glusterd_nfssvc_manager] 0-management: nfs/server.so xlator is not installed [2019-01-22 07:49:46.561323] I [MSGID: 106568] [glusterd-proc-mgmt.c:87:glusterd_proc_stop] 0-management: Stopping glustershd daemon running in pid: 246 [2019-01-22 07:49:47.561759] I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: glustershd service is stopped [2019-01-22 07:49:47.561847] I [MSGID: 106567] [glusterd-svc-mgmt.c:203:glusterd_svc_start] 0-management: Starting glustershd service [2019-01-22 07:49:47.568851] I [MSGID: 106131] [glusterd-proc-mgmt.c:83:glusterd_proc_stop] 0-management: quotad already stopped [2019-01-22 07:49:47.568900] I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: quotad service is stopped [2019-01-22 07:49:47.569572] I [MSGID: 106131] [glusterd-proc-mgmt.c:83:glusterd_proc_stop] 0-management: bitd already stopped [2019-01-22 07:49:47.569590] I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: bitd service is stopped [2019-01-22 07:49:47.570185] I [MSGID: 106131] [glusterd-proc-mgmt.c:83:glusterd_proc_stop] 0-management: scrub already stopped [2019-01-22 07:49:47.570204] I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: scrub service is stopped [2019-01-22 07:49:47.581175] I [MSGID: 106492] [glusterd-handler.c:2726:__glusterd_handle_friend_update] 0-glusterd: Received friend update from uuid: 955b8b0a-4ba8-4d7d-9f2e-9e525844a98f [2019-01-22 07:49:47.581213] I [MSGID: 106502] [glusterd-handler.c:2771:__glusterd_handle_friend_update] 0-management: Received my uuid as Friend [2019-01-22 07:49:47.582140] I [MSGID: 106493] [glusterd-rpc-ops.c:702:__glusterd_friend_update_cbk] 0-management: Received ACC from uuid: 955b8b0a-4ba8-4d7d-9f2e-9e525844a98f [2019-01-22 07:49:47.660921] I [MSGID: 106493] [glusterd-rpc-ops.c:486:__glusterd_friend_add_cbk] 0-glusterd: Received ACC from uuid: 73389b0f-281b-4e57-b435-216093ee4749, host: 192.168.3.15, port: 0 [2019-01-22 07:49:47.663161] I [glusterd-utils.c:5994:glusterd_brick_start] 0-management: discovered already-running brick /var/lib/heketi/mounts/vg_5ecc3820ac1e5ea87c3721b8bd2d6813/brick_fafbd17e42fe449463a94d3018b8cd93/brick [2019-01-22 07:49:47.663205] I [MSGID: 106142] [glusterd-pmap.c:297:pmap_registry_bind] 0-pmap: adding brick /var/lib/heketi/mounts/vg_5ecc3820ac1e5ea87c3721b8bd2d6813/brick_fafbd17e42fe449463a94d3018b8cd93/brick on port 49157 [2019-01-22 07:49:47.663325] I [glusterd-utils.c:5994:glusterd_brick_start] 0-management: discovered already-running brick /var/lib/heketi/mounts/vg_ca57f326195c243be2380ce4e42a4191/brick_952d75fd193c7209c9a81acbc23a3747/brick [2019-01-22 07:49:47.663339] I [MSGID: 106142] [glusterd-pmap.c:297:pmap_registry_bind] 0-pmap: adding brick /var/lib/heketi/mounts/vg_ca57f326195c243be2380ce4e42a4191/brick_952d75fd193c7209c9a81acbc23a3747/brick on port 49166 [2019-01-22 07:49:47.663472] I [glusterd-utils.c:5994:glusterd_brick_start] 0-management: discovered already-running brick /var/lib/heketi/mounts/vg_526f35058433c6b03130bba4e0a7dd87/brick_909763003002ae54adf8b9c9c368e665/brick [2019-01-22 07:49:47.663483] I [MSGID: 106142] [glusterd-pmap.c:297:pmap_registry_bind] 0-pmap: adding brick /var/lib/heketi/mounts/vg_526f35058433c6b03130bba4e0a7dd87/brick_909763003002ae54adf8b9c9c368e665/brick on port 49157 [2019-01-22 07:49:47.663606] I [glusterd-utils.c:5994:glusterd_brick_start] 0-management: discovered already-running brick /var/lib/heketi/mounts/vg_28708570b029e5eff0a996c453a11691/brick_2b988fdacdc748a46974fa50fc7f9e50/brick [2019-01-22 07:49:47.663617] I [MSGID: 106142] [glusterd-pmap.c:297:pmap_registry_bind] 0-pmap: adding brick /var/lib/heketi/mounts/vg_28708570b029e5eff0a996c453a11691/brick_2b988fdacdc748a46974fa50fc7f9e50/brick on port 49158 [2019-01-22 07:49:47.663746] I [glusterd-utils.c:5994:glusterd_brick_start] 0-management: discovered already-running brick /var/lib/heketi/mounts/vg_ca57f326195c243be2380ce4e42a4191/brick_f940f52d3cba979166e64e8ac72d224c/brick [2019-01-22 07:49:47.663757] I [MSGID: 106142] [glusterd-pmap.c:297:pmap_registry_bind] 0-pmap: adding brick /var/lib/heketi/mounts/vg_ca57f326195c243be2380ce4e42a4191/brick_f940f52d3cba979166e64e8ac72d224c/brick on port 49159 [2019-01-22 07:49:47.663880] I [glusterd-utils.c:5994:glusterd_brick_start] 0-management: discovered already-running brick /var/lib/heketi/mounts/vg_526f35058433c6b03130bba4e0a7dd87/brick_9e7c382e5f853d471c347bc5590359af/brick [2019-01-22 07:49:47.663892] I [MSGID: 106142] [glusterd-pmap.c:297:pmap_registry_bind] 0-pmap: adding brick /var/lib/heketi/mounts/vg_526f35058433c6b03130bba4e0a7dd87/brick_9e7c382e5f853d471c347bc5590359af/brick on port 49161 [2019-01-22 07:49:47.663955] I [glusterd-utils.c:5994:glusterd_brick_start] 0-management: discovered already-running brick /var/lib/heketi/mounts/vg_56391bec3c8bfe4fc116de7bddfc2af4/brick_47ed9e0663ad0f6f676ddd6ad7e3dcde/brick [2019-01-22 07:49:47.663964] I [MSGID: 106142] [glusterd-pmap.c:297:pmap_registry_bind] 0-pmap: adding brick /var/lib/heketi/mounts/vg_56391bec3c8bfe4fc116de7bddfc2af4/brick_47ed9e0663ad0f6f676ddd6ad7e3dcde/brick on port 49171 [2019-01-22 07:49:47.664030] I [glusterd-utils.c:5994:glusterd_brick_start] 0-management: discovered already-running brick /var/lib/heketi/mounts/vg_56391bec3c8bfe4fc116de7bddfc2af4/brick_06ad6c73dfbf6a5fc21334f98c9973c2/brick [2019-01-22 07:49:47.664351] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-snapd: setting frame-timeout to 600 [2019-01-22 07:49:47.664922] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-gfproxyd: setting frame-timeout to 600 [2019-01-22 07:49:47.673149] I [MSGID: 106493] [glusterd-rpc-ops.c:702:__glusterd_friend_update_cbk] 0-management: Received ACC from uuid: 73389b0f-281b-4e57-b435-216093ee4749 [2019-01-22 07:49:47.673812] I [MSGID: 106492] [glusterd-handler.c:2726:__glusterd_handle_friend_update] 0-glusterd: Received friend update from uuid: 73389b0f-281b-4e57-b435-216093ee4749 [2019-01-22 07:49:47.673839] I [MSGID: 106502] [glusterd-handler.c:2771:__glusterd_handle_friend_update] 0-management: Received my uuid as Friend [2019-01-22 07:49:47.674672] I [MSGID: 106163] [glusterd-handshake.c:1356:__glusterd_mgmt_hndsk_versions_ack] 0-management: using the op-version 40100 [2019-01-22 07:49:47.686079] I [MSGID: 106490] [glusterd-handler.c:2548:__glusterd_handle_incoming_friend_req] 0-glusterd: Received probe from uuid: 73389b0f-281b-4e57-b435-216093ee4749 [2019-01-22 07:49:47.696865] I [MSGID: 106493] [glusterd-handler.c:3811:glusterd_xfer_friend_add_resp] 0-glusterd: Responded to 192.168.3.15 (0), ret: 0, op_ret: 0 [2019-01-22 07:49:47.699125] I [MSGID: 106492] [glusterd-handler.c:2726:__glusterd_handle_friend_update] 0-glusterd: Received friend update from uuid: 73389b0f-281b-4e57-b435-216093ee4749 [2019-01-22 07:49:47.699155] I [MSGID: 106502] [glusterd-handler.c:2771:__glusterd_handle_friend_update] 0-management: Received my uuid as Friend [2019-01-22 07:49:47.699881] I [MSGID: 106493] [glusterd-rpc-ops.c:702:__glusterd_friend_update_cbk] 0-management: Received ACC from uuid: 73389b0f-281b-4e57-b435-216093ee4749 [2019-01-22 07:49:49.123148] I [MSGID: 106142] [glusterd-pmap.c:297:pmap_registry_bind] 0-pmap: adding brick /var/lib/heketi/mounts/vg_5ecc3820ac1e5ea87c3721b8bd2d6813/brick_afc9ecbfbcc96ddfb1935b088dfe7d1e/brick on port 49152 [2019-01-22 07:49:49.131141] I [MSGID: 106142] [glusterd-pmap.c:297:pmap_registry_bind] 0-pmap: adding brick /var/lib/heketi/mounts/vg_12f02e7d735ee9a66b6f746af8eb71e6/brick_ac5214e9fddec233c4d8b2e227f5a8b8/brick on port 49153 [2019-01-22 07:49:49.148854] I [MSGID: 106142] [glusterd-pmap.c:297:pmap_registry_bind] 0-pmap: adding brick /var/lib/heketi/mounts/vg_28708570b029e5eff0a996c453a11691/brick_e5cfd20d1c5f06c3ce00f4e1bd523a46/brick on port 49154 [2019-01-22 07:49:49.167209] I [MSGID: 106142] [glusterd-pmap.c:297:pmap_registry_bind] 0-pmap: adding brick /var/lib/heketi/mounts/vg_28708570b029e5eff0a996c453a11691/brick_46f96c044e5eaf3e2d414bd9be2197ff/brick on port 49155 [2019-01-22 07:49:49.168723] I [MSGID: 106142] [glusterd-pmap.c:297:pmap_registry_bind] 0-pmap: adding brick /var/lib/heketi/mounts/vg_ca57f326195c243be2380ce4e42a4191/brick_93ffd88e3e362ee02eaf209ad55a94df/brick on port 49163 [2019-01-22 07:49:49.181759] I [MSGID: 106142] [glusterd-pmap.c:297:pmap_registry_bind] 0-pmap: adding brick /var/lib/heketi/mounts/vg_ca57f326195c243be2380ce4e42a4191/brick_868c984beb58b42ec855a5d29e88be98/brick on port 49164 [2019-01-22 07:49:49.191327] I [MSGID: 106142] [glusterd-pmap.c:297:pmap_registry_bind] 0-pmap: adding brick /var/lib/heketi/mounts/vg_5ecc3820ac1e5ea87c3721b8bd2d6813/brick_f255d45da7b18f3a428215a423e18f61/brick on port 49165 [2019-01-22 07:49:49.206350] I [MSGID: 106142] [glusterd-pmap.c:297:pmap_registry_bind] 0-pmap: adding brick /var/lib/heketi/mounts/vg_56391bec3c8bfe4fc116de7bddfc2af4/brick_0a254839cbdeb13bbacd77acc40ebce9/brick on port 49166 [2019-01-22 07:49:49.218324] I [MSGID: 106142] [glusterd-pmap.c:297:pmap_registry_bind] 0-pmap: adding brick /var/lib/heketi/mounts/vg_ca57f326195c243be2380ce4e42a4191/brick_c1edf8bec1b388c54f0484319e46ee7f/brick on port 49167 [2019-01-22 07:49:49.233066] I [MSGID: 106142] [glusterd-pmap.c:297:pmap_registry_bind] 0-pmap: adding brick /var/lib/heketi/mounts/vg_56391bec3c8bfe4fc116de7bddfc2af4/brick_595043e897901aa6057b1227d430b20d/brick on port 49168 [2019-01-22 07:49:49.240019] I [MSGID: 106142] [glusterd-pmap.c:297:pmap_registry_bind] 0-pmap: adding brick /var/lib/heketi/mounts/vg_5ecc3820ac1e5ea87c3721b8bd2d6813/brick_46ede70d760848493be42438bd8ccd88/brick on port 49169 [2019-01-22 07:49:49.261876] I [MSGID: 106142] [glusterd-pmap.c:297:pmap_registry_bind] 0-pmap: adding brick /var/lib/heketi/mounts/vg_526f35058433c6b03130bba4e0a7dd87/brick_525225f65753b05dfe33aeaeb9c5de39/brick on port 49170 [2019-01-22 07:49:49.269910] I [MSGID: 106142] [glusterd-pmap.c:297:pmap_registry_bind] 0-pmap: adding brick /var/lib/heketi/mounts/vg_28708570b029e5eff0a996c453a11691/brick_d4f30d6e465a8544b759a7016fb5aab5/brick on port 49171 [2019-01-22 07:49:49.780479] W [glusterd-handler.c:6124:__glusterd_brick_rpc_notify] 0-management: got disconnect from stale rpc on /var/lib/heketi/mounts/vg_ca57f326195c243be2380ce4e42a4191/brick_952d75fd193c7209c9a81acbc23a3747/brick [2019-01-22 07:51:44.944280] W [glusterfsd.c:1514:cleanup_and_exit] (-->/lib64/libpthread.so.0(+0x7e25) [0x7f66c3f20e25] -->/usr/sbin/glusterd(glusterfs_sigwaiter+0xe5) [0x55ac41fbbd65] -->/usr/sbin/glusterd(cleanup_and_exit+0x6b) [0x55ac41fbbb8b] ) 0-: received signum (15), shutting down [2019-01-22 09:20:15.100893] I [MSGID: 100030] [glusterfsd.c:2741:main] 0-/usr/sbin/glusterd: Started running /usr/sbin/glusterd version 4.1.6 (args: /usr/sbin/glusterd -p /var/run/glusterd.pid --log-level INFO) [2019-01-22 09:20:15.110530] I [MSGID: 106478] [glusterd.c:1423:init] 0-management: Maximum allowed open file descriptors set to 65536 [2019-01-22 09:20:15.110574] I [MSGID: 106479] [glusterd.c:1481:init] 0-management: Using /var/lib/glusterd as working directory [2019-01-22 09:20:15.110588] I [MSGID: 106479] [glusterd.c:1486:init] 0-management: Using /var/run/gluster as pid file working directory [2019-01-22 09:20:15.117605] W [MSGID: 103071] [rdma.c:4629:__gf_rdma_ctx_create] 0-rpc-transport/rdma: rdma_cm event channel creation failed [No such device] [2019-01-22 09:20:15.117656] W [MSGID: 103055] [rdma.c:4938:init] 0-rdma.management: Failed to initialize IB Device [2019-01-22 09:20:15.117679] W [rpc-transport.c:351:rpc_transport_load] 0-rpc-transport: 'rdma' initialization failed [2019-01-22 09:20:15.117791] W [rpcsvc.c:1781:rpcsvc_create_listener] 0-rpc-service: cannot create listener, initing the transport failed [2019-01-22 09:20:15.117802] E [MSGID: 106244] [glusterd.c:1764:init] 0-management: creation of 1 listeners failed, continuing with succeeded transport [2019-01-22 09:20:15.979226] I [MSGID: 106513] [glusterd-store.c:2240:glusterd_restore_op_version] 0-glusterd: retrieved op-version: 40100 [2019-01-22 09:20:16.116879] I [MSGID: 106544] [glusterd.c:158:glusterd_uuid_init] 0-management: retrieved UUID: fb376410-85de-4b35-a6ef-39eee8abd460 [2019-01-22 09:20:16.166533] I [MSGID: 106498] [glusterd-handler.c:3614:glusterd_friend_add_from_peerinfo] 0-management: connect returned 0 [2019-01-22 09:20:16.166765] I [MSGID: 106498] [glusterd-handler.c:3614:glusterd_friend_add_from_peerinfo] 0-management: connect returned 0 [2019-01-22 09:20:16.166823] W [MSGID: 106061] [glusterd-handler.c:3408:glusterd_transport_inet_options_build] 0-glusterd: Failed to get tcp-user-timeout [2019-01-22 09:20:16.166849] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-management: setting frame-timeout to 600 [2019-01-22 09:20:16.170919] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-management: setting frame-timeout to 600 Final graph: +------------------------------------------------------------------------------+ 1: volume management 2: type mgmt/glusterd 3: option rpc-auth.auth-glusterfs on 4: option rpc-auth.auth-unix on 5: option rpc-auth.auth-null on 6: option rpc-auth-allow-insecure on 7: option transport.listen-backlog 10 8: option event-threads 1 9: option ping-timeout 0 10: option transport.socket.read-fail-log off 11: option transport.socket.keepalive-interval 2 12: option transport.socket.keepalive-time 10 13: option transport-type rdma 14: option working-directory /var/lib/glusterd 15: end-volume 16: +------------------------------------------------------------------------------+ [2019-01-22 09:20:16.170911] W [MSGID: 106061] [glusterd-handler.c:3408:glusterd_transport_inet_options_build] 0-glusterd: Failed to get tcp-user-timeout [2019-01-22 09:20:16.173646] I [MSGID: 101190] [event-epoll.c:617:event_dispatch_epoll_worker] 0-epoll: Started thread with index 1 [2019-01-22 09:20:16.198271] I [MSGID: 106493] [glusterd-rpc-ops.c:486:__glusterd_friend_add_cbk] 0-glusterd: Received ACC from uuid: 955b8b0a-4ba8-4d7d-9f2e-9e525844a98f, host: matrix1.matrix.orange.lab, port: 0 [2019-01-22 09:20:16.199122] I [MSGID: 106492] [glusterd-handler.c:2726:__glusterd_handle_friend_update] 0-glusterd: Received friend update from uuid: 955b8b0a-4ba8-4d7d-9f2e-9e525844a98f [2019-01-22 09:20:16.199145] I [MSGID: 106502] [glusterd-handler.c:2771:__glusterd_handle_friend_update] 0-management: Received my uuid as Friend [2019-01-22 09:20:16.199780] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-nfs: setting frame-timeout to 600 [2019-01-22 09:20:16.199916] I [MSGID: 106131] [glusterd-proc-mgmt.c:83:glusterd_proc_stop] 0-management: nfs already stopped [2019-01-22 09:20:16.199940] I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: nfs service is stopped [2019-01-22 09:20:16.199956] I [MSGID: 106599] [glusterd-nfs-svc.c:82:glusterd_nfssvc_manager] 0-management: nfs/server.so xlator is not installed [2019-01-22 09:20:16.199989] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-glustershd: setting frame-timeout to 600 [2019-01-22 09:20:16.211546] I [MSGID: 106131] [glusterd-proc-mgmt.c:83:glusterd_proc_stop] 0-management: glustershd already stopped [2019-01-22 09:20:16.211568] I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: glustershd service is stopped [2019-01-22 09:20:16.211592] I [MSGID: 106567] [glusterd-svc-mgmt.c:203:glusterd_svc_start] 0-management: Starting glustershd service [2019-01-22 09:20:17.219253] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-quotad: setting frame-timeout to 600 [2019-01-22 09:20:17.220631] I [MSGID: 106131] [glusterd-proc-mgmt.c:83:glusterd_proc_stop] 0-management: quotad already stopped [2019-01-22 09:20:17.220652] I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: quotad service is stopped [2019-01-22 09:20:17.220680] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-bitd: setting frame-timeout to 600 [2019-01-22 09:20:17.221380] I [MSGID: 106131] [glusterd-proc-mgmt.c:83:glusterd_proc_stop] 0-management: bitd already stopped [2019-01-22 09:20:17.221393] I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: bitd service is stopped [2019-01-22 09:20:17.221418] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-scrub: setting frame-timeout to 600 [2019-01-22 09:20:17.222150] I [MSGID: 106131] [glusterd-proc-mgmt.c:83:glusterd_proc_stop] 0-management: scrub already stopped [2019-01-22 09:20:17.222162] I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: scrub service is stopped [2019-01-22 09:20:17.222315] I [glusterd-utils.c:6090:glusterd_brick_start] 0-management: starting a fresh brick process for brick /var/lib/heketi/mounts/vg_5ecc3820ac1e5ea87c3721b8bd2d6813/brick_b8881a7d5f2d4cda27be4fcc835c573e/brick [2019-01-22 09:20:17.227155] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-management: setting frame-timeout to 600 [2019-01-22 09:20:17.232107] I [glusterd-utils.c:6090:glusterd_brick_start] 0-management: starting a fresh brick process for brick /var/lib/heketi/mounts/vg_5ecc3820ac1e5ea87c3721b8bd2d6813/brick_afc9ecbfbcc96ddfb1935b088dfe7d1e/brick [2019-01-22 09:20:17.236924] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-management: setting frame-timeout to 600 [2019-01-22 09:20:17.241047] I [glusterd-utils.c:6090:glusterd_brick_start] 0-management: starting a fresh brick process for brick /var/lib/heketi/mounts/vg_12f02e7d735ee9a66b6f746af8eb71e6/brick_ac5214e9fddec233c4d8b2e227f5a8b8/brick [2019-01-22 09:20:17.245829] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-management: setting frame-timeout to 600 [2019-01-22 09:20:17.249615] I [glusterd-utils.c:6090:glusterd_brick_start] 0-management: starting a fresh brick process for brick /var/lib/heketi/mounts/vg_5ecc3820ac1e5ea87c3721b8bd2d6813/brick_fafbd17e42fe449463a94d3018b8cd93/brick [2019-01-22 09:20:17.254351] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-management: setting frame-timeout to 600 [2019-01-22 09:20:17.258173] I [glusterd-utils.c:6090:glusterd_brick_start] 0-management: starting a fresh brick process for brick /var/lib/heketi/mounts/vg_28708570b029e5eff0a996c453a11691/brick_e5cfd20d1c5f06c3ce00f4e1bd523a46/brick [2019-01-22 09:20:17.262856] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-management: setting frame-timeout to 600 [2019-01-22 09:20:17.266790] I [glusterd-utils.c:6090:glusterd_brick_start] 0-management: starting a fresh brick process for brick /var/lib/heketi/mounts/vg_ca57f326195c243be2380ce4e42a4191/brick_952d75fd193c7209c9a81acbc23a3747/brick [2019-01-22 09:20:17.271478] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-management: setting frame-timeout to 600 [2019-01-22 09:20:17.276136] I [glusterd-utils.c:6090:glusterd_brick_start] 0-management: starting a fresh brick process for brick /var/lib/heketi/mounts/vg_28708570b029e5eff0a996c453a11691/brick_46f96c044e5eaf3e2d414bd9be2197ff/brick [2019-01-22 09:20:17.280965] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-management: setting frame-timeout to 600 [2019-01-22 09:20:17.285018] I [glusterd-utils.c:6090:glusterd_brick_start] 0-management: starting a fresh brick process for brick /var/lib/heketi/mounts/vg_ca57f326195c243be2380ce4e42a4191/brick_93ffd88e3e362ee02eaf209ad55a94df/brick [2019-01-22 09:20:17.291597] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-management: setting frame-timeout to 600 [2019-01-22 09:20:17.295701] I [glusterd-utils.c:6090:glusterd_brick_start] 0-management: starting a fresh brick process for brick /var/lib/heketi/mounts/vg_526f35058433c6b03130bba4e0a7dd87/brick_909763003002ae54adf8b9c9c368e665/brick [2019-01-22 09:20:17.300825] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-management: setting frame-timeout to 600 [2019-01-22 09:20:17.304545] I [glusterd-utils.c:6090:glusterd_brick_start] 0-management: starting a fresh brick process for brick /var/lib/heketi/mounts/vg_ca57f326195c243be2380ce4e42a4191/brick_868c984beb58b42ec855a5d29e88be98/brick [2019-01-22 09:20:17.309371] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-management: setting frame-timeout to 600 [2019-01-22 09:20:17.313122] I [glusterd-utils.c:6090:glusterd_brick_start] 0-management: starting a fresh brick process for brick /var/lib/heketi/mounts/vg_5ecc3820ac1e5ea87c3721b8bd2d6813/brick_f255d45da7b18f3a428215a423e18f61/brick [2019-01-22 09:20:17.317910] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-management: setting frame-timeout to 600 [2019-01-22 09:20:17.321885] I [glusterd-utils.c:6090:glusterd_brick_start] 0-management: starting a fresh brick process for brick /var/lib/heketi/mounts/vg_28708570b029e5eff0a996c453a11691/brick_2b988fdacdc748a46974fa50fc7f9e50/brick [2019-01-22 09:20:17.326571] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-management: setting frame-timeout to 600 [2019-01-22 09:20:17.330608] I [glusterd-utils.c:6090:glusterd_brick_start] 0-management: starting a fresh brick process for brick /var/lib/heketi/mounts/vg_56391bec3c8bfe4fc116de7bddfc2af4/brick_0a254839cbdeb13bbacd77acc40ebce9/brick [2019-01-22 09:20:17.335080] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-management: setting frame-timeout to 600 [2019-01-22 09:20:17.339132] I [glusterd-utils.c:6090:glusterd_brick_start] 0-management: starting a fresh brick process for brick /var/lib/heketi/mounts/vg_ca57f326195c243be2380ce4e42a4191/brick_c1edf8bec1b388c54f0484319e46ee7f/brick [2019-01-22 09:20:17.343692] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-management: setting frame-timeout to 600 [2019-01-22 09:20:17.347649] I [glusterd-utils.c:6090:glusterd_brick_start] 0-management: starting a fresh brick process for brick /var/lib/heketi/mounts/vg_ca57f326195c243be2380ce4e42a4191/brick_f940f52d3cba979166e64e8ac72d224c/brick [2019-01-22 09:20:17.352175] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-management: setting frame-timeout to 600 [2019-01-22 09:20:17.356561] I [glusterd-utils.c:6090:glusterd_brick_start] 0-management: starting a fresh brick process for brick /var/lib/heketi/mounts/vg_56391bec3c8bfe4fc116de7bddfc2af4/brick_595043e897901aa6057b1227d430b20d/brick [2019-01-22 09:20:17.361528] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-management: setting frame-timeout to 600 [2019-01-22 09:20:17.366830] I [glusterd-utils.c:6090:glusterd_brick_start] 0-management: starting a fresh brick process for brick /var/lib/heketi/mounts/vg_5ecc3820ac1e5ea87c3721b8bd2d6813/brick_46ede70d760848493be42438bd8ccd88/brick [2019-01-22 09:20:17.371521] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-management: setting frame-timeout to 600 [2019-01-22 09:20:17.375317] I [glusterd-utils.c:6090:glusterd_brick_start] 0-management: starting a fresh brick process for brick /var/lib/heketi/mounts/vg_526f35058433c6b03130bba4e0a7dd87/brick_9e7c382e5f853d471c347bc5590359af/brick [2019-01-22 09:20:17.380132] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-management: setting frame-timeout to 600 [2019-01-22 09:20:17.383923] I [glusterd-utils.c:6090:glusterd_brick_start] 0-management: starting a fresh brick process for brick /var/lib/heketi/mounts/vg_56391bec3c8bfe4fc116de7bddfc2af4/brick_47ed9e0663ad0f6f676ddd6ad7e3dcde/brick [2019-01-22 09:20:17.388605] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-management: setting frame-timeout to 600 [2019-01-22 09:20:17.392523] I [glusterd-utils.c:6090:glusterd_brick_start] 0-management: starting a fresh brick process for brick /var/lib/heketi/mounts/vg_56391bec3c8bfe4fc116de7bddfc2af4/brick_06ad6c73dfbf6a5fc21334f98c9973c2/brick [2019-01-22 09:20:17.397166] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-management: setting frame-timeout to 600 [2019-01-22 09:20:17.400880] I [glusterd-utils.c:6090:glusterd_brick_start] 0-management: starting a fresh brick process for brick /var/lib/heketi/mounts/vg_526f35058433c6b03130bba4e0a7dd87/brick_525225f65753b05dfe33aeaeb9c5de39/brick [2019-01-22 09:20:17.405556] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-management: setting frame-timeout to 600 [2019-01-22 09:20:17.409330] I [glusterd-utils.c:6090:glusterd_brick_start] 0-management: starting a fresh brick process for brick /var/lib/heketi/mounts/vg_28708570b029e5eff0a996c453a11691/brick_d4f30d6e465a8544b759a7016fb5aab5/brick [2019-01-22 09:20:17.414075] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-management: setting frame-timeout to 600 [2019-01-22 09:20:17.418019] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-snapd: setting frame-timeout to 600 [2019-01-22 09:20:17.418226] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-snapd: setting frame-timeout to 600 [2019-01-22 09:20:17.418375] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-snapd: setting frame-timeout to 600 [2019-01-22 09:20:17.418506] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-snapd: setting frame-timeout to 600 [2019-01-22 09:20:17.418636] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-snapd: setting frame-timeout to 600 [2019-01-22 09:20:17.418777] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-snapd: setting frame-timeout to 600 [2019-01-22 09:20:17.418895] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-snapd: setting frame-timeout to 600 [2019-01-22 09:20:17.419015] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-snapd: setting frame-timeout to 600 [2019-01-22 09:20:17.419136] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-snapd: setting frame-timeout to 600 [2019-01-22 09:20:17.419256] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-snapd: setting frame-timeout to 600 [2019-01-22 09:20:17.419376] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-snapd: setting frame-timeout to 600 [2019-01-22 09:20:17.419487] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-snapd: setting frame-timeout to 600 [2019-01-22 09:20:17.419599] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-snapd: setting frame-timeout to 600 [2019-01-22 09:20:17.419711] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-snapd: setting frame-timeout to 600 [2019-01-22 09:20:17.419823] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-snapd: setting frame-timeout to 600 [2019-01-22 09:20:17.419935] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-snapd: setting frame-timeout to 600 [2019-01-22 09:20:17.420047] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-snapd: setting frame-timeout to 600 [2019-01-22 09:20:17.420156] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-snapd: setting frame-timeout to 600 [2019-01-22 09:20:17.420273] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-snapd: setting frame-timeout to 600 [2019-01-22 09:20:17.420387] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-snapd: setting frame-timeout to 600 [2019-01-22 09:20:17.420493] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-snapd: setting frame-timeout to 600 [2019-01-22 09:20:17.420599] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-snapd: setting frame-timeout to 600 [2019-01-22 09:20:17.420711] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-gfproxyd: setting frame-timeout to 600 [2019-01-22 09:20:17.420875] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-gfproxyd: setting frame-timeout to 600 [2019-01-22 09:20:17.421020] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-gfproxyd: setting frame-timeout to 600 [2019-01-22 09:20:17.421178] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-gfproxyd: setting frame-timeout to 600 [2019-01-22 09:20:17.421336] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-gfproxyd: setting frame-timeout to 600 [2019-01-22 09:20:17.421490] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-gfproxyd: setting frame-timeout to 600 [2019-01-22 09:20:17.421644] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-gfproxyd: setting frame-timeout to 600 [2019-01-22 09:20:17.421803] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-gfproxyd: setting frame-timeout to 600 [2019-01-22 09:20:17.421951] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-gfproxyd: setting frame-timeout to 600 [2019-01-22 09:20:17.422100] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-gfproxyd: setting frame-timeout to 600 [2019-01-22 09:20:17.422249] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-gfproxyd: setting frame-timeout to 600 [2019-01-22 09:20:17.422441] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-gfproxyd: setting frame-timeout to 600 [2019-01-22 09:20:17.422599] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-gfproxyd: setting frame-timeout to 600 [2019-01-22 09:20:17.422748] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-gfproxyd: setting frame-timeout to 600 [2019-01-22 09:20:17.422898] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-gfproxyd: setting frame-timeout to 600 [2019-01-22 09:20:17.423065] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-gfproxyd: setting frame-timeout to 600 [2019-01-22 09:20:17.423218] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-gfproxyd: setting frame-timeout to 600 [2019-01-22 09:20:17.423392] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-gfproxyd: setting frame-timeout to 600 [2019-01-22 09:20:17.423555] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-gfproxyd: setting frame-timeout to 600 [2019-01-22 09:20:17.423711] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-gfproxyd: setting frame-timeout to 600 [2019-01-22 09:20:17.423866] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-gfproxyd: setting frame-timeout to 600 [2019-01-22 09:20:17.424024] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-gfproxyd: setting frame-timeout to 600 [2019-01-22 09:20:17.424286] I [MSGID: 106163] [glusterd-handshake.c:1379:__glusterd_mgmt_hndsk_versions_ack] 0-management: using the op-version 40100 [2019-01-22 09:20:17.424840] I [MSGID: 106493] [glusterd-rpc-ops.c:702:__glusterd_friend_update_cbk] 0-management: Received ACC from uuid: 955b8b0a-4ba8-4d7d-9f2e-9e525844a98f [2019-01-22 09:20:17.430087] I [socket.c:2632:socket_event_handler] 0-transport: EPOLLERR - disconnecting now [2019-01-22 09:20:17.433347] I [MSGID: 106005] [glusterd-handler.c:6131:__glusterd_brick_rpc_notify] 0-management: Brick 192.168.3.6:/var/lib/heketi/mounts/vg_5ecc3820ac1e5ea87c3721b8bd2d6813/brick_b8881a7d5f2d4cda27be4fcc835c573e/brick has disconnected from glusterd. [2019-01-22 09:20:17.433443] E [MSGID: 101012] [common-utils.c:4010:gf_is_service_running] 0-: Unable to read pidfile: /var/run/gluster/vols/glusterfs-registry-volume/192.168.3.6-var-lib-heketi-mounts-vg_5ecc3820ac1e5ea87c3721b8bd2d6813-brick_b8881a7d5f2d4cda27be4fcc835c573e-brick.pid [2019-01-22 09:20:17.436879] I [socket.c:2632:socket_event_handler] 0-transport: EPOLLERR - disconnecting now [2019-01-22 09:20:17.440068] I [MSGID: 106005] [glusterd-handler.c:6131:__glusterd_brick_rpc_notify] 0-management: Brick 192.168.3.6:/var/lib/heketi/mounts/vg_5ecc3820ac1e5ea87c3721b8bd2d6813/brick_afc9ecbfbcc96ddfb1935b088dfe7d1e/brick has disconnected from glusterd. [2019-01-22 09:20:17.440152] E [MSGID: 101012] [common-utils.c:4010:gf_is_service_running] 0-: Unable to read pidfile: /var/run/gluster/vols/heketidbstorage/192.168.3.6-var-lib-heketi-mounts-vg_5ecc3820ac1e5ea87c3721b8bd2d6813-brick_afc9ecbfbcc96ddfb1935b088dfe7d1e-brick.pid [2019-01-22 09:20:17.443310] I [socket.c:2632:socket_event_handler] 0-transport: EPOLLERR - disconnecting now [2019-01-22 09:20:17.446374] I [MSGID: 106005] [glusterd-handler.c:6131:__glusterd_brick_rpc_notify] 0-management: Brick 192.168.3.6:/var/lib/heketi/mounts/vg_12f02e7d735ee9a66b6f746af8eb71e6/brick_ac5214e9fddec233c4d8b2e227f5a8b8/brick has disconnected from glusterd. [2019-01-22 09:20:17.446457] E [MSGID: 101012] [common-utils.c:4010:gf_is_service_running] 0-: Unable to read pidfile: /var/run/gluster/vols/vol_16c57180902d04440ff9bfa202f7abf1/192.168.3.6-var-lib-heketi-mounts-vg_12f02e7d735ee9a66b6f746af8eb71e6-brick_ac5214e9fddec233c4d8b2e227f5a8b8-brick.pid [2019-01-22 09:20:17.449664] I [socket.c:2632:socket_event_handler] 0-transport: EPOLLERR - disconnecting now [2019-01-22 09:20:17.452757] I [MSGID: 106005] [glusterd-handler.c:6131:__glusterd_brick_rpc_notify] 0-management: Brick 192.168.3.6:/var/lib/heketi/mounts/vg_5ecc3820ac1e5ea87c3721b8bd2d6813/brick_fafbd17e42fe449463a94d3018b8cd93/brick has disconnected from glusterd. [2019-01-22 09:20:17.452840] E [MSGID: 101012] [common-utils.c:4010:gf_is_service_running] 0-: Unable to read pidfile: /var/run/gluster/vols/vol_1bc2533893950d1f357b4d690882d2c2/192.168.3.6-var-lib-heketi-mounts-vg_5ecc3820ac1e5ea87c3721b8bd2d6813-brick_fafbd17e42fe449463a94d3018b8cd93-brick.pid [2019-01-22 09:20:17.456030] I [socket.c:2632:socket_event_handler] 0-transport: EPOLLERR - disconnecting now [2019-01-22 09:20:17.459157] I [MSGID: 106005] [glusterd-handler.c:6131:__glusterd_brick_rpc_notify] 0-management: Brick 192.168.3.6:/var/lib/heketi/mounts/vg_28708570b029e5eff0a996c453a11691/brick_e5cfd20d1c5f06c3ce00f4e1bd523a46/brick has disconnected from glusterd. [2019-01-22 09:20:17.459239] E [MSGID: 101012] [common-utils.c:4010:gf_is_service_running] 0-: Unable to read pidfile: /var/run/gluster/vols/vol_258b8b659ad3b593dfb5672fae9ab57a/192.168.3.6-var-lib-heketi-mounts-vg_28708570b029e5eff0a996c453a11691-brick_e5cfd20d1c5f06c3ce00f4e1bd523a46-brick.pid [2019-01-22 09:20:17.462381] I [socket.c:2632:socket_event_handler] 0-transport: EPOLLERR - disconnecting now [2019-01-22 09:20:17.465482] I [MSGID: 106005] [glusterd-handler.c:6131:__glusterd_brick_rpc_notify] 0-management: Brick 192.168.3.6:/var/lib/heketi/mounts/vg_ca57f326195c243be2380ce4e42a4191/brick_952d75fd193c7209c9a81acbc23a3747/brick has disconnected from glusterd. [2019-01-22 09:20:17.465576] E [MSGID: 101012] [common-utils.c:4010:gf_is_service_running] 0-: Unable to read pidfile: /var/run/gluster/vols/vol_3442e86b6d994a14de73f1b8c82cf0b8/192.168.3.6-var-lib-heketi-mounts-vg_ca57f326195c243be2380ce4e42a4191-brick_952d75fd193c7209c9a81acbc23a3747-brick.pid [2019-01-22 09:20:17.468651] I [socket.c:2632:socket_event_handler] 0-transport: EPOLLERR - disconnecting now [2019-01-22 09:20:17.471620] I [MSGID: 106005] [glusterd-handler.c:6131:__glusterd_brick_rpc_notify] 0-management: Brick 192.168.3.6:/var/lib/heketi/mounts/vg_28708570b029e5eff0a996c453a11691/brick_46f96c044e5eaf3e2d414bd9be2197ff/brick has disconnected from glusterd. [2019-01-22 09:20:17.471702] E [MSGID: 101012] [common-utils.c:4010:gf_is_service_running] 0-: Unable to read pidfile: /var/run/gluster/vols/vol_3bc80e4effa4fc16b189e156d9f5c75e/192.168.3.6-var-lib-heketi-mounts-vg_28708570b029e5eff0a996c453a11691-brick_46f96c044e5eaf3e2d414bd9be2197ff-brick.pid [2019-01-22 09:20:17.474752] I [socket.c:2632:socket_event_handler] 0-transport: EPOLLERR - disconnecting now [2019-01-22 09:20:17.477812] I [MSGID: 106005] [glusterd-handler.c:6131:__glusterd_brick_rpc_notify] 0-management: Brick 192.168.3.6:/var/lib/heketi/mounts/vg_ca57f326195c243be2380ce4e42a4191/brick_93ffd88e3e362ee02eaf209ad55a94df/brick has disconnected from glusterd. [2019-01-22 09:20:17.477892] E [MSGID: 101012] [common-utils.c:4010:gf_is_service_running] 0-: Unable to read pidfile: /var/run/gluster/vols/vol_59581a21d2113b0c0d4b2fa729a6a932/192.168.3.6-var-lib-heketi-mounts-vg_ca57f326195c243be2380ce4e42a4191-brick_93ffd88e3e362ee02eaf209ad55a94df-brick.pid [2019-01-22 09:20:17.480872] I [socket.c:2632:socket_event_handler] 0-transport: EPOLLERR - disconnecting now [2019-01-22 09:20:17.483803] I [MSGID: 106005] [glusterd-handler.c:6131:__glusterd_brick_rpc_notify] 0-management: Brick 192.168.3.6:/var/lib/heketi/mounts/vg_526f35058433c6b03130bba4e0a7dd87/brick_909763003002ae54adf8b9c9c368e665/brick has disconnected from glusterd. [2019-01-22 09:20:17.483882] E [MSGID: 101012] [common-utils.c:4010:gf_is_service_running] 0-: Unable to read pidfile: /var/run/gluster/vols/vol_63854b105c40802bdec77290e91858ea/192.168.3.6-var-lib-heketi-mounts-vg_526f35058433c6b03130bba4e0a7dd87-brick_909763003002ae54adf8b9c9c368e665-brick.pid [2019-01-22 09:20:17.486974] I [socket.c:2632:socket_event_handler] 0-transport: EPOLLERR - disconnecting now [2019-01-22 09:20:17.489950] I [MSGID: 106005] [glusterd-handler.c:6131:__glusterd_brick_rpc_notify] 0-management: Brick 192.168.3.6:/var/lib/heketi/mounts/vg_ca57f326195c243be2380ce4e42a4191/brick_868c984beb58b42ec855a5d29e88be98/brick has disconnected from glusterd. [2019-01-22 09:20:17.490030] E [MSGID: 101012] [common-utils.c:4010:gf_is_service_running] 0-: Unable to read pidfile: /var/run/gluster/vols/vol_6954c7e3a47641f1bd4fbb66d2e8aebe/192.168.3.6-var-lib-heketi-mounts-vg_ca57f326195c243be2380ce4e42a4191-brick_868c984beb58b42ec855a5d29e88be98-brick.pid [2019-01-22 09:20:17.493090] I [socket.c:2632:socket_event_handler] 0-transport: EPOLLERR - disconnecting now [2019-01-22 09:20:17.496189] I [MSGID: 106005] [glusterd-handler.c:6131:__glusterd_brick_rpc_notify] 0-management: Brick 192.168.3.6:/var/lib/heketi/mounts/vg_5ecc3820ac1e5ea87c3721b8bd2d6813/brick_f255d45da7b18f3a428215a423e18f61/brick has disconnected from glusterd. [2019-01-22 09:20:17.496273] E [MSGID: 101012] [common-utils.c:4010:gf_is_service_running] 0-: Unable to read pidfile: /var/run/gluster/vols/vol_6d36831a039b04088d29ecb477828ba7/192.168.3.6-var-lib-heketi-mounts-vg_5ecc3820ac1e5ea87c3721b8bd2d6813-brick_f255d45da7b18f3a428215a423e18f61-brick.pid [2019-01-22 09:20:17.499509] I [socket.c:2632:socket_event_handler] 0-transport: EPOLLERR - disconnecting now [2019-01-22 09:20:17.502560] I [MSGID: 106005] [glusterd-handler.c:6131:__glusterd_brick_rpc_notify] 0-management: Brick 192.168.3.6:/var/lib/heketi/mounts/vg_28708570b029e5eff0a996c453a11691/brick_2b988fdacdc748a46974fa50fc7f9e50/brick has disconnected from glusterd. [2019-01-22 09:20:17.502653] E [MSGID: 101012] [common-utils.c:4010:gf_is_service_running] 0-: Unable to read pidfile: /var/run/gluster/vols/vol_6d45975bc09622845d3674a36581cb20/192.168.3.6-var-lib-heketi-mounts-vg_28708570b029e5eff0a996c453a11691-brick_2b988fdacdc748a46974fa50fc7f9e50-brick.pid [2019-01-22 09:20:17.505795] I [socket.c:2632:socket_event_handler] 0-transport: EPOLLERR - disconnecting now [2019-01-22 09:20:17.508841] I [MSGID: 106005] [glusterd-handler.c:6131:__glusterd_brick_rpc_notify] 0-management: Brick 192.168.3.6:/var/lib/heketi/mounts/vg_56391bec3c8bfe4fc116de7bddfc2af4/brick_0a254839cbdeb13bbacd77acc40ebce9/brick has disconnected from glusterd. [2019-01-22 09:20:17.508924] E [MSGID: 101012] [common-utils.c:4010:gf_is_service_running] 0-: Unable to read pidfile: /var/run/gluster/vols/vol_87d44d5b3e6923bd42a620cbc838b253/192.168.3.6-var-lib-heketi-mounts-vg_56391bec3c8bfe4fc116de7bddfc2af4-brick_0a254839cbdeb13bbacd77acc40ebce9-brick.pid [2019-01-22 09:20:17.512039] I [socket.c:2632:socket_event_handler] 0-transport: EPOLLERR - disconnecting now [2019-01-22 09:20:17.515106] I [MSGID: 106005] [glusterd-handler.c:6131:__glusterd_brick_rpc_notify] 0-management: Brick 192.168.3.6:/var/lib/heketi/mounts/vg_ca57f326195c243be2380ce4e42a4191/brick_c1edf8bec1b388c54f0484319e46ee7f/brick has disconnected from glusterd. [2019-01-22 09:20:17.515190] E [MSGID: 101012] [common-utils.c:4010:gf_is_service_running] 0-: Unable to read pidfile: /var/run/gluster/vols/vol_93cf4b391b72534c5732df0c82206f50/192.168.3.6-var-lib-heketi-mounts-vg_ca57f326195c243be2380ce4e42a4191-brick_c1edf8bec1b388c54f0484319e46ee7f-brick.pid [2019-01-22 09:20:17.518301] I [socket.c:2632:socket_event_handler] 0-transport: EPOLLERR - disconnecting now [2019-01-22 09:20:17.521246] I [MSGID: 106005] [glusterd-handler.c:6131:__glusterd_brick_rpc_notify] 0-management: Brick 192.168.3.6:/var/lib/heketi/mounts/vg_ca57f326195c243be2380ce4e42a4191/brick_f940f52d3cba979166e64e8ac72d224c/brick has disconnected from glusterd. [2019-01-22 09:20:17.521340] E [MSGID: 101012] [common-utils.c:4010:gf_is_service_running] 0-: Unable to read pidfile: /var/run/gluster/vols/vol_ba5d6d6b945eb60ccddbe44457792a8c/192.168.3.6-var-lib-heketi-mounts-vg_ca57f326195c243be2380ce4e42a4191-brick_f940f52d3cba979166e64e8ac72d224c-brick.pid [2019-01-22 09:20:17.524503] I [socket.c:2632:socket_event_handler] 0-transport: EPOLLERR - disconnecting now [2019-01-22 09:20:17.527616] I [MSGID: 106005] [glusterd-handler.c:6131:__glusterd_brick_rpc_notify] 0-management: Brick 192.168.3.6:/var/lib/heketi/mounts/vg_56391bec3c8bfe4fc116de7bddfc2af4/brick_595043e897901aa6057b1227d430b20d/brick has disconnected from glusterd. [2019-01-22 09:20:17.527697] E [MSGID: 101012] [common-utils.c:4010:gf_is_service_running] 0-: Unable to read pidfile: /var/run/gluster/vols/vol_ca1258696cc5eb793a2003d3c3b9e98e/192.168.3.6-var-lib-heketi-mounts-vg_56391bec3c8bfe4fc116de7bddfc2af4-brick_595043e897901aa6057b1227d430b20d-brick.pid [2019-01-22 09:20:17.530841] I [socket.c:2632:socket_event_handler] 0-transport: EPOLLERR - disconnecting now [2019-01-22 09:20:17.533876] I [MSGID: 106005] [glusterd-handler.c:6131:__glusterd_brick_rpc_notify] 0-management: Brick 192.168.3.6:/var/lib/heketi/mounts/vg_5ecc3820ac1e5ea87c3721b8bd2d6813/brick_46ede70d760848493be42438bd8ccd88/brick has disconnected from glusterd. [2019-01-22 09:20:17.533960] E [MSGID: 101012] [common-utils.c:4010:gf_is_service_running] 0-: Unable to read pidfile: /var/run/gluster/vols/vol_d936fff4500a9cb9491963d0624214fb/192.168.3.6-var-lib-heketi-mounts-vg_5ecc3820ac1e5ea87c3721b8bd2d6813-brick_46ede70d760848493be42438bd8ccd88-brick.pid [2019-01-22 09:20:17.537087] I [socket.c:2632:socket_event_handler] 0-transport: EPOLLERR - disconnecting now [2019-01-22 09:20:17.540112] I [MSGID: 106005] [glusterd-handler.c:6131:__glusterd_brick_rpc_notify] 0-management: Brick 192.168.3.6:/var/lib/heketi/mounts/vg_526f35058433c6b03130bba4e0a7dd87/brick_9e7c382e5f853d471c347bc5590359af/brick has disconnected from glusterd. [2019-01-22 09:20:17.540196] E [MSGID: 101012] [common-utils.c:4010:gf_is_service_running] 0-: Unable to read pidfile: /var/run/gluster/vols/vol_e1aa1283d5917485d88c4a742eeff422/192.168.3.6-var-lib-heketi-mounts-vg_526f35058433c6b03130bba4e0a7dd87-brick_9e7c382e5f853d471c347bc5590359af-brick.pid [2019-01-22 09:20:17.543282] I [socket.c:2632:socket_event_handler] 0-transport: EPOLLERR - disconnecting now [2019-01-22 09:20:17.546327] I [MSGID: 106005] [glusterd-handler.c:6131:__glusterd_brick_rpc_notify] 0-management: Brick 192.168.3.6:/var/lib/heketi/mounts/vg_56391bec3c8bfe4fc116de7bddfc2af4/brick_47ed9e0663ad0f6f676ddd6ad7e3dcde/brick has disconnected from glusterd. [2019-01-22 09:20:17.546408] E [MSGID: 101012] [common-utils.c:4010:gf_is_service_running] 0-: Unable to read pidfile: /var/run/gluster/vols/vol_f0ed498d7e781d7bb896244175b31f9e/192.168.3.6-var-lib-heketi-mounts-vg_56391bec3c8bfe4fc116de7bddfc2af4-brick_47ed9e0663ad0f6f676ddd6ad7e3dcde-brick.pid [2019-01-22 09:20:17.549459] I [socket.c:2632:socket_event_handler] 0-transport: EPOLLERR - disconnecting now [2019-01-22 09:20:17.552488] I [MSGID: 106005] [glusterd-handler.c:6131:__glusterd_brick_rpc_notify] 0-management: Brick 192.168.3.6:/var/lib/heketi/mounts/vg_56391bec3c8bfe4fc116de7bddfc2af4/brick_06ad6c73dfbf6a5fc21334f98c9973c2/brick has disconnected from glusterd. [2019-01-22 09:20:17.552575] E [MSGID: 101012] [common-utils.c:4010:gf_is_service_running] 0-: Unable to read pidfile: /var/run/gluster/vols/vol_f387519c9b004ec14e80696db88ef0f8/192.168.3.6-var-lib-heketi-mounts-vg_56391bec3c8bfe4fc116de7bddfc2af4-brick_06ad6c73dfbf6a5fc21334f98c9973c2-brick.pid [2019-01-22 09:20:17.555735] I [socket.c:2632:socket_event_handler] 0-transport: EPOLLERR - disconnecting now [2019-01-22 09:20:17.558804] I [MSGID: 106005] [glusterd-handler.c:6131:__glusterd_brick_rpc_notify] 0-management: Brick 192.168.3.6:/var/lib/heketi/mounts/vg_526f35058433c6b03130bba4e0a7dd87/brick_525225f65753b05dfe33aeaeb9c5de39/brick has disconnected from glusterd. [2019-01-22 09:20:17.558888] E [MSGID: 101012] [common-utils.c:4010:gf_is_service_running] 0-: Unable to read pidfile: /var/run/gluster/vols/vol_f8ca343c60e6efe541fe02d16ca02a7d/192.168.3.6-var-lib-heketi-mounts-vg_526f35058433c6b03130bba4e0a7dd87-brick_525225f65753b05dfe33aeaeb9c5de39-brick.pid [2019-01-22 09:20:17.562043] I [socket.c:2632:socket_event_handler] 0-transport: EPOLLERR - disconnecting now [2019-01-22 09:20:17.565179] I [MSGID: 106005] [glusterd-handler.c:6131:__glusterd_brick_rpc_notify] 0-management: Brick 192.168.3.6:/var/lib/heketi/mounts/vg_28708570b029e5eff0a996c453a11691/brick_d4f30d6e465a8544b759a7016fb5aab5/brick has disconnected from glusterd. [2019-01-22 09:20:17.565266] E [MSGID: 101012] [common-utils.c:4010:gf_is_service_running] 0-: Unable to read pidfile: /var/run/gluster/vols/vol_fe882e074c0512fd9271fc2ff5a0bfe1/192.168.3.6-var-lib-heketi-mounts-vg_28708570b029e5eff0a996c453a11691-brick_d4f30d6e465a8544b759a7016fb5aab5-brick.pid [2019-01-22 09:20:17.565509] I [MSGID: 106490] [glusterd-handler.c:2548:__glusterd_handle_incoming_friend_req] 0-glusterd: Received probe from uuid: 955b8b0a-4ba8-4d7d-9f2e-9e525844a98f [2019-01-22 09:20:17.567677] I [MSGID: 106009] [glusterd-utils.c:3393:glusterd_compare_friend_volume] 0-management: Version of volume vol_63854b105c40802bdec77290e91858ea differ. local version = 2, remote version = 19 on peer matrix1.matrix.orange.lab [2019-01-22 09:20:17.567946] I [MSGID: 106009] [glusterd-utils.c:3393:glusterd_compare_friend_volume] 0-management: Version of volume vol_ba5d6d6b945eb60ccddbe44457792a8c differ. local version = 2, remote version = 21 on peer matrix1.matrix.orange.lab [2019-01-22 09:20:17.583616] I [MSGID: 106493] [glusterd-handler.c:3811:glusterd_xfer_friend_add_resp] 0-glusterd: Responded to matrix1.matrix.orange.lab (0), ret: 0, op_ret: 0 [2019-01-22 09:20:17.585926] E [MSGID: 106028] [glusterd-utils.c:8222:glusterd_brick_signal] 0-glusterd: Unable to get pid of brick process [2019-01-22 09:20:17.585968] I [MSGID: 106492] [glusterd-handler.c:2726:__glusterd_handle_friend_update] 0-glusterd: Received friend update from uuid: 955b8b0a-4ba8-4d7d-9f2e-9e525844a98f [2019-01-22 09:20:17.585996] I [MSGID: 106502] [glusterd-handler.c:2771:__glusterd_handle_friend_update] 0-management: Received my uuid as Friend [2019-01-22 09:20:17.590954] I [MSGID: 106493] [glusterd-rpc-ops.c:702:__glusterd_friend_update_cbk] 0-management: Received ACC from uuid: 955b8b0a-4ba8-4d7d-9f2e-9e525844a98f [2019-01-22 09:20:17.617806] E [MSGID: 106028] [glusterd-utils.c:8222:glusterd_brick_signal] 0-glusterd: Unable to get pid of brick process [2019-01-22 09:20:17.640682] W [socket.c:599:__socket_rwv] 0-socket.management: writev on 192.168.3.15:49055 failed (Broken pipe) [2019-01-22 09:20:17.641025] I [MSGID: 106131] [glusterd-proc-mgmt.c:83:glusterd_proc_stop] 0-management: nfs already stopped [2019-01-22 09:20:17.641045] I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: nfs service is stopped [2019-01-22 09:20:17.641056] I [MSGID: 106599] [glusterd-nfs-svc.c:82:glusterd_nfssvc_manager] 0-management: nfs/server.so xlator is not installed [2019-01-22 09:20:17.649628] E [MSGID: 101012] [common-utils.c:4010:gf_is_service_running] 0-: Unable to read pidfile: /var/run/gluster/glustershd/glustershd.pid [2019-01-22 09:20:17.649650] I [MSGID: 106131] [glusterd-proc-mgmt.c:83:glusterd_proc_stop] 0-management: glustershd already stopped [2019-01-22 09:20:17.649681] I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: glustershd service is stopped [2019-01-22 09:20:17.649700] E [MSGID: 101012] [common-utils.c:4010:gf_is_service_running] 0-: Unable to read pidfile: /var/run/gluster/glustershd/glustershd.pid [2019-01-22 09:20:17.649718] I [MSGID: 106567] [glusterd-svc-mgmt.c:203:glusterd_svc_start] 0-management: Starting glustershd service [2019-01-22 09:20:17.656872] I [MSGID: 106131] [glusterd-proc-mgmt.c:83:glusterd_proc_stop] 0-management: quotad already stopped [2019-01-22 09:20:17.656911] I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: quotad service is stopped [2019-01-22 09:20:17.657619] I [MSGID: 106131] [glusterd-proc-mgmt.c:83:glusterd_proc_stop] 0-management: bitd already stopped [2019-01-22 09:20:17.657637] I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: bitd service is stopped [2019-01-22 09:20:17.658331] I [MSGID: 106131] [glusterd-proc-mgmt.c:83:glusterd_proc_stop] 0-management: scrub already stopped [2019-01-22 09:20:17.658349] I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: scrub service is stopped [2019-01-22 09:20:18.261005] I [MSGID: 106142] [glusterd-pmap.c:297:pmap_registry_bind] 0-pmap: adding brick /var/lib/heketi/mounts/vg_5ecc3820ac1e5ea87c3721b8bd2d6813/brick_afc9ecbfbcc96ddfb1935b088dfe7d1e/brick on port 49153 [2019-01-22 09:20:18.266134] I [MSGID: 106142] [glusterd-pmap.c:297:pmap_registry_bind] 0-pmap: adding brick /var/lib/heketi/mounts/vg_5ecc3820ac1e5ea87c3721b8bd2d6813/brick_b8881a7d5f2d4cda27be4fcc835c573e/brick on port 49152 [2019-01-22 09:20:18.273614] I [MSGID: 106142] [glusterd-pmap.c:297:pmap_registry_bind] 0-pmap: adding brick /var/lib/heketi/mounts/vg_12f02e7d735ee9a66b6f746af8eb71e6/brick_ac5214e9fddec233c4d8b2e227f5a8b8/brick on port 49154 [2019-01-22 09:20:18.280529] I [MSGID: 106142] [glusterd-pmap.c:297:pmap_registry_bind] 0-pmap: adding brick /var/lib/heketi/mounts/vg_5ecc3820ac1e5ea87c3721b8bd2d6813/brick_fafbd17e42fe449463a94d3018b8cd93/brick on port 49155 [2019-01-22 09:20:18.288941] I [MSGID: 106142] [glusterd-pmap.c:297:pmap_registry_bind] 0-pmap: adding brick /var/lib/heketi/mounts/vg_28708570b029e5eff0a996c453a11691/brick_e5cfd20d1c5f06c3ce00f4e1bd523a46/brick on port 49156 [2019-01-22 09:20:18.302917] I [MSGID: 106142] [glusterd-pmap.c:297:pmap_registry_bind] 0-pmap: adding brick /var/lib/heketi/mounts/vg_ca57f326195c243be2380ce4e42a4191/brick_952d75fd193c7209c9a81acbc23a3747/brick on port 49157 [2019-01-22 09:20:18.307542] I [MSGID: 106493] [glusterd-rpc-ops.c:486:__glusterd_friend_add_cbk] 0-glusterd: Received ACC from uuid: 73389b0f-281b-4e57-b435-216093ee4749, host: 192.168.3.15, port: 0 [2019-01-22 09:20:18.309614] I [glusterd-utils.c:5995:glusterd_brick_start] 0-management: discovered already-running brick /var/lib/heketi/mounts/vg_5ecc3820ac1e5ea87c3721b8bd2d6813/brick_b8881a7d5f2d4cda27be4fcc835c573e/brick [2019-01-22 09:20:18.309637] I [MSGID: 106142] [glusterd-pmap.c:297:pmap_registry_bind] 0-pmap: adding brick /var/lib/heketi/mounts/vg_5ecc3820ac1e5ea87c3721b8bd2d6813/brick_b8881a7d5f2d4cda27be4fcc835c573e/brick on port 49152 [2019-01-22 09:20:18.309706] I [glusterd-utils.c:5995:glusterd_brick_start] 0-management: discovered already-running brick /var/lib/heketi/mounts/vg_5ecc3820ac1e5ea87c3721b8bd2d6813/brick_afc9ecbfbcc96ddfb1935b088dfe7d1e/brick [2019-01-22 09:20:18.309716] I [MSGID: 106142] [glusterd-pmap.c:297:pmap_registry_bind] 0-pmap: adding brick /var/lib/heketi/mounts/vg_5ecc3820ac1e5ea87c3721b8bd2d6813/brick_afc9ecbfbcc96ddfb1935b088dfe7d1e/brick on port 49153 [2019-01-22 09:20:18.309795] I [glusterd-utils.c:5995:glusterd_brick_start] 0-management: discovered already-running brick /var/lib/heketi/mounts/vg_12f02e7d735ee9a66b6f746af8eb71e6/brick_ac5214e9fddec233c4d8b2e227f5a8b8/brick [2019-01-22 09:20:18.309805] I [MSGID: 106142] [glusterd-pmap.c:297:pmap_registry_bind] 0-pmap: adding brick /var/lib/heketi/mounts/vg_12f02e7d735ee9a66b6f746af8eb71e6/brick_ac5214e9fddec233c4d8b2e227f5a8b8/brick on port 49154 [2019-01-22 09:20:18.309879] I [glusterd-utils.c:5995:glusterd_brick_start] 0-management: discovered already-running brick /var/lib/heketi/mounts/vg_5ecc3820ac1e5ea87c3721b8bd2d6813/brick_fafbd17e42fe449463a94d3018b8cd93/brick [2019-01-22 09:20:18.309888] I [MSGID: 106142] [glusterd-pmap.c:297:pmap_registry_bind] 0-pmap: adding brick /var/lib/heketi/mounts/vg_5ecc3820ac1e5ea87c3721b8bd2d6813/brick_fafbd17e42fe449463a94d3018b8cd93/brick on port 49155 [2019-01-22 09:20:18.309950] I [glusterd-utils.c:5995:glusterd_brick_start] 0-management: discovered already-running brick /var/lib/heketi/mounts/vg_28708570b029e5eff0a996c453a11691/brick_e5cfd20d1c5f06c3ce00f4e1bd523a46/brick [2019-01-22 09:20:18.309959] I [MSGID: 106142] [glusterd-pmap.c:297:pmap_registry_bind] 0-pmap: adding brick /var/lib/heketi/mounts/vg_28708570b029e5eff0a996c453a11691/brick_e5cfd20d1c5f06c3ce00f4e1bd523a46/brick on port 49156 [2019-01-22 09:20:18.310023] I [glusterd-utils.c:5995:glusterd_brick_start] 0-management: discovered already-running brick /var/lib/heketi/mounts/vg_ca57f326195c243be2380ce4e42a4191/brick_952d75fd193c7209c9a81acbc23a3747/brick [2019-01-22 09:20:18.310032] I [MSGID: 106142] [glusterd-pmap.c:297:pmap_registry_bind] 0-pmap: adding brick /var/lib/heketi/mounts/vg_ca57f326195c243be2380ce4e42a4191/brick_952d75fd193c7209c9a81acbc23a3747/brick on port 49157 [2019-01-22 09:20:18.310080] E [MSGID: 101012] [common-utils.c:4010:gf_is_service_running] 0-: Unable to read pidfile: /var/run/gluster/vols/vol_3bc80e4effa4fc16b189e156d9f5c75e/192.168.3.6-var-lib-heketi-mounts-vg_28708570b029e5eff0a996c453a11691-brick_46f96c044e5eaf3e2d414bd9be2197ff-brick.pid [2019-01-22 09:20:18.310113] E [MSGID: 101012] [common-utils.c:4010:gf_is_service_running] 0-: Unable to read pidfile: /var/run/gluster/vols/vol_59581a21d2113b0c0d4b2fa729a6a932/192.168.3.6-var-lib-heketi-mounts-vg_ca57f326195c243be2380ce4e42a4191-brick_93ffd88e3e362ee02eaf209ad55a94df-brick.pid [2019-01-22 09:20:18.310142] E [MSGID: 101012] [common-utils.c:4010:gf_is_service_running] 0-: Unable to read pidfile: /var/run/gluster/vols/vol_6954c7e3a47641f1bd4fbb66d2e8aebe/192.168.3.6-var-lib-heketi-mounts-vg_ca57f326195c243be2380ce4e42a4191-brick_868c984beb58b42ec855a5d29e88be98-brick.pid [2019-01-22 09:20:18.310171] E [MSGID: 101012] [common-utils.c:4010:gf_is_service_running] 0-: Unable to read pidfile: /var/run/gluster/vols/vol_6d36831a039b04088d29ecb477828ba7/192.168.3.6-var-lib-heketi-mounts-vg_5ecc3820ac1e5ea87c3721b8bd2d6813-brick_f255d45da7b18f3a428215a423e18f61-brick.pid [2019-01-22 09:20:18.310198] E [MSGID: 101012] [common-utils.c:4010:gf_is_service_running] 0-: Unable to read pidfile: /var/run/gluster/vols/vol_6d45975bc09622845d3674a36581cb20/192.168.3.6-var-lib-heketi-mounts-vg_28708570b029e5eff0a996c453a11691-brick_2b988fdacdc748a46974fa50fc7f9e50-brick.pid [2019-01-22 09:20:18.310225] E [MSGID: 101012] [common-utils.c:4010:gf_is_service_running] 0-: Unable to read pidfile: /var/run/gluster/vols/vol_87d44d5b3e6923bd42a620cbc838b253/192.168.3.6-var-lib-heketi-mounts-vg_56391bec3c8bfe4fc116de7bddfc2af4-brick_0a254839cbdeb13bbacd77acc40ebce9-brick.pid [2019-01-22 09:20:18.310252] E [MSGID: 101012] [common-utils.c:4010:gf_is_service_running] 0-: Unable to read pidfile: /var/run/gluster/vols/vol_93cf4b391b72534c5732df0c82206f50/192.168.3.6-var-lib-heketi-mounts-vg_ca57f326195c243be2380ce4e42a4191-brick_c1edf8bec1b388c54f0484319e46ee7f-brick.pid [2019-01-22 09:20:18.310291] E [MSGID: 101012] [common-utils.c:4010:gf_is_service_running] 0-: Unable to read pidfile: /var/run/gluster/vols/vol_ca1258696cc5eb793a2003d3c3b9e98e/192.168.3.6-var-lib-heketi-mounts-vg_56391bec3c8bfe4fc116de7bddfc2af4-brick_595043e897901aa6057b1227d430b20d-brick.pid [2019-01-22 09:20:18.310320] E [MSGID: 101012] [common-utils.c:4010:gf_is_service_running] 0-: Unable to read pidfile: /var/run/gluster/vols/vol_d936fff4500a9cb9491963d0624214fb/192.168.3.6-var-lib-heketi-mounts-vg_5ecc3820ac1e5ea87c3721b8bd2d6813-brick_46ede70d760848493be42438bd8ccd88-brick.pid [2019-01-22 09:20:18.310347] E [MSGID: 101012] [common-utils.c:4010:gf_is_service_running] 0-: Unable to read pidfile: /var/run/gluster/vols/vol_e1aa1283d5917485d88c4a742eeff422/192.168.3.6-var-lib-heketi-mounts-vg_526f35058433c6b03130bba4e0a7dd87-brick_9e7c382e5f853d471c347bc5590359af-brick.pid [2019-01-22 09:20:18.310373] E [MSGID: 101012] [common-utils.c:4010:gf_is_service_running] 0-: Unable to read pidfile: /var/run/gluster/vols/vol_f0ed498d7e781d7bb896244175b31f9e/192.168.3.6-var-lib-heketi-mounts-vg_56391bec3c8bfe4fc116de7bddfc2af4-brick_47ed9e0663ad0f6f676ddd6ad7e3dcde-brick.pid [2019-01-22 09:20:18.310399] E [MSGID: 101012] [common-utils.c:4010:gf_is_service_running] 0-: Unable to read pidfile: /var/run/gluster/vols/vol_f387519c9b004ec14e80696db88ef0f8/192.168.3.6-var-lib-heketi-mounts-vg_56391bec3c8bfe4fc116de7bddfc2af4-brick_06ad6c73dfbf6a5fc21334f98c9973c2-brick.pid [2019-01-22 09:20:18.310428] E [MSGID: 101012] [common-utils.c:4010:gf_is_service_running] 0-: Unable to read pidfile: /var/run/gluster/vols/vol_f8ca343c60e6efe541fe02d16ca02a7d/192.168.3.6-var-lib-heketi-mounts-vg_526f35058433c6b03130bba4e0a7dd87-brick_525225f65753b05dfe33aeaeb9c5de39-brick.pid [2019-01-22 09:20:18.310456] E [MSGID: 101012] [common-utils.c:4010:gf_is_service_running] 0-: Unable to read pidfile: /var/run/gluster/vols/vol_fe882e074c0512fd9271fc2ff5a0bfe1/192.168.3.6-var-lib-heketi-mounts-vg_28708570b029e5eff0a996c453a11691-brick_d4f30d6e465a8544b759a7016fb5aab5-brick.pid [2019-01-22 09:20:18.311462] I [MSGID: 106492] [glusterd-handler.c:2726:__glusterd_handle_friend_update] 0-glusterd: Received friend update from uuid: 73389b0f-281b-4e57-b435-216093ee4749 [2019-01-22 09:20:18.311485] I [MSGID: 106502] [glusterd-handler.c:2771:__glusterd_handle_friend_update] 0-management: Received my uuid as Friend [2019-01-22 09:20:18.315486] I [MSGID: 106493] [glusterd-rpc-ops.c:702:__glusterd_friend_update_cbk] 0-management: Received ACC from uuid: 73389b0f-281b-4e57-b435-216093ee4749 [2019-01-22 09:20:18.315553] I [MSGID: 106142] [glusterd-pmap.c:297:pmap_registry_bind] 0-pmap: adding brick /var/lib/heketi/mounts/vg_28708570b029e5eff0a996c453a11691/brick_46f96c044e5eaf3e2d414bd9be2197ff/brick on port 49158 [2019-01-22 09:20:18.315644] I [MSGID: 106163] [glusterd-handshake.c:1379:__glusterd_mgmt_hndsk_versions_ack] 0-management: using the op-version 40100 [2019-01-22 09:20:18.320454] I [MSGID: 106142] [glusterd-pmap.c:297:pmap_registry_bind] 0-pmap: adding brick /var/lib/heketi/mounts/vg_ca57f326195c243be2380ce4e42a4191/brick_93ffd88e3e362ee02eaf209ad55a94df/brick on port 49159 [2019-01-22 09:20:18.327136] I [MSGID: 106490] [glusterd-handler.c:2548:__glusterd_handle_incoming_friend_req] 0-glusterd: Received probe from uuid: 73389b0f-281b-4e57-b435-216093ee4749 [2019-01-22 09:20:18.338143] I [MSGID: 106493] [glusterd-handler.c:3811:glusterd_xfer_friend_add_resp] 0-glusterd: Responded to 192.168.3.15 (0), ret: 0, op_ret: 0 [2019-01-22 09:20:18.340161] I [MSGID: 106142] [glusterd-pmap.c:297:pmap_registry_bind] 0-pmap: adding brick /var/lib/heketi/mounts/vg_526f35058433c6b03130bba4e0a7dd87/brick_909763003002ae54adf8b9c9c368e665/brick on port 49160 [2019-01-22 09:20:18.340511] I [MSGID: 106142] [glusterd-pmap.c:297:pmap_registry_bind] 0-pmap: adding brick /var/lib/heketi/mounts/vg_ca57f326195c243be2380ce4e42a4191/brick_868c984beb58b42ec855a5d29e88be98/brick on port 49161 [2019-01-22 09:20:18.340555] I [MSGID: 106492] [glusterd-handler.c:2726:__glusterd_handle_friend_update] 0-glusterd: Received friend update from uuid: 73389b0f-281b-4e57-b435-216093ee4749 [2019-01-22 09:20:18.340577] I [MSGID: 106502] [glusterd-handler.c:2771:__glusterd_handle_friend_update] 0-management: Received my uuid as Friend [2019-01-22 09:20:18.341327] I [MSGID: 106493] [glusterd-rpc-ops.c:702:__glusterd_friend_update_cbk] 0-management: Received ACC from uuid: 73389b0f-281b-4e57-b435-216093ee4749 [2019-01-22 09:20:18.357408] I [MSGID: 106142] [glusterd-pmap.c:297:pmap_registry_bind] 0-pmap: adding brick /var/lib/heketi/mounts/vg_5ecc3820ac1e5ea87c3721b8bd2d6813/brick_f255d45da7b18f3a428215a423e18f61/brick on port 49162 [2019-01-22 09:20:18.360237] I [MSGID: 106142] [glusterd-pmap.c:297:pmap_registry_bind] 0-pmap: adding brick /var/lib/heketi/mounts/vg_28708570b029e5eff0a996c453a11691/brick_2b988fdacdc748a46974fa50fc7f9e50/brick on port 49163 [2019-01-22 09:20:18.368392] I [MSGID: 106142] [glusterd-pmap.c:297:pmap_registry_bind] 0-pmap: adding brick /var/lib/heketi/mounts/vg_56391bec3c8bfe4fc116de7bddfc2af4/brick_0a254839cbdeb13bbacd77acc40ebce9/brick on port 49164 [2019-01-22 09:20:18.371411] I [MSGID: 106142] [glusterd-pmap.c:297:pmap_registry_bind] 0-pmap: adding brick /var/lib/heketi/mounts/vg_ca57f326195c243be2380ce4e42a4191/brick_c1edf8bec1b388c54f0484319e46ee7f/brick on port 49165 [2019-01-22 09:20:18.380059] I [MSGID: 106142] [glusterd-pmap.c:297:pmap_registry_bind] 0-pmap: adding brick /var/lib/heketi/mounts/vg_ca57f326195c243be2380ce4e42a4191/brick_f940f52d3cba979166e64e8ac72d224c/brick on port 49166 [2019-01-22 09:20:18.391453] I [MSGID: 106142] [glusterd-pmap.c:297:pmap_registry_bind] 0-pmap: adding brick /var/lib/heketi/mounts/vg_56391bec3c8bfe4fc116de7bddfc2af4/brick_595043e897901aa6057b1227d430b20d/brick on port 49167 [2019-01-22 09:20:18.400167] I [MSGID: 106142] [glusterd-pmap.c:297:pmap_registry_bind] 0-pmap: adding brick /var/lib/heketi/mounts/vg_5ecc3820ac1e5ea87c3721b8bd2d6813/brick_46ede70d760848493be42438bd8ccd88/brick on port 49168 [2019-01-22 09:20:18.407014] I [MSGID: 106142] [glusterd-pmap.c:297:pmap_registry_bind] 0-pmap: adding brick /var/lib/heketi/mounts/vg_526f35058433c6b03130bba4e0a7dd87/brick_9e7c382e5f853d471c347bc5590359af/brick on port 49169 [2019-01-22 09:20:18.419040] I [MSGID: 106142] [glusterd-pmap.c:297:pmap_registry_bind] 0-pmap: adding brick /var/lib/heketi/mounts/vg_56391bec3c8bfe4fc116de7bddfc2af4/brick_47ed9e0663ad0f6f676ddd6ad7e3dcde/brick on port 49170 [2019-01-22 09:20:18.429976] I [MSGID: 106142] [glusterd-pmap.c:297:pmap_registry_bind] 0-pmap: adding brick /var/lib/heketi/mounts/vg_56391bec3c8bfe4fc116de7bddfc2af4/brick_06ad6c73dfbf6a5fc21334f98c9973c2/brick on port 49171 [2019-01-22 09:20:18.430039] I [MSGID: 106142] [glusterd-pmap.c:297:pmap_registry_bind] 0-pmap: adding brick /var/lib/heketi/mounts/vg_526f35058433c6b03130bba4e0a7dd87/brick_525225f65753b05dfe33aeaeb9c5de39/brick on port 49172 [2019-01-22 09:20:18.438892] I [MSGID: 106142] [glusterd-pmap.c:297:pmap_registry_bind] 0-pmap: adding brick /var/lib/heketi/mounts/vg_28708570b029e5eff0a996c453a11691/brick_d4f30d6e465a8544b759a7016fb5aab5/brick on port 49173 [2019-01-22 09:21:29.522601] I [run.c:241:runner_log] (-->/usr/lib64/glusterfs/4.1.6/xlator/mgmt/glusterd.so(+0x3a7a5) [0x7f30137d57a5] -->/usr/lib64/glusterfs/4.1.6/xlator/mgmt/glusterd.so(+0xe2765) [0x7f301387d765] -->/lib64/libglusterfs.so.0(runner_log+0x115) [0x7f3018bb30f5] ) 0-management: Ran script: /var/lib/glusterd/hooks/1/stop/pre/S29CTDB-teardown.sh --volname=vol_63854b105c40802bdec77290e91858ea --last=no [2019-01-22 09:21:29.528796] E [run.c:241:runner_log] (-->/usr/lib64/glusterfs/4.1.6/xlator/mgmt/glusterd.so(+0x3a7a5) [0x7f30137d57a5] -->/usr/lib64/glusterfs/4.1.6/xlator/mgmt/glusterd.so(+0xe26c3) [0x7f301387d6c3] -->/lib64/libglusterfs.so.0(runner_log+0x115) [0x7f3018bb30f5] ) 0-management: Failed to execute script: /var/lib/glusterd/hooks/1/stop/pre/S30samba-stop.sh --volname=vol_63854b105c40802bdec77290e91858ea --last=no [2019-01-22 09:21:29.532839] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-snapd: setting frame-timeout to 600 [2019-01-22 09:21:29.532991] I [MSGID: 106131] [glusterd-proc-mgmt.c:83:glusterd_proc_stop] 0-management: nfs already stopped [2019-01-22 09:21:29.533020] I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: nfs service is stopped [2019-01-22 09:21:29.533037] I [MSGID: 106599] [glusterd-nfs-svc.c:82:glusterd_nfssvc_manager] 0-management: nfs/server.so xlator is not installed [2019-01-22 09:21:29.542583] I [MSGID: 106568] [glusterd-proc-mgmt.c:87:glusterd_proc_stop] 0-management: Stopping glustershd daemon running in pid: 404 [2019-01-22 09:21:29.551059] W [socket.c:599:__socket_rwv] 0-glustershd: readv on /var/run/gluster/6d27e67a556a28fd.socket failed (No data available) [2019-01-22 09:21:30.545123] I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: glustershd service is stopped [2019-01-22 09:21:30.545200] I [MSGID: 106567] [glusterd-svc-mgmt.c:203:glusterd_svc_start] 0-management: Starting glustershd service [2019-01-22 09:21:30.552865] I [MSGID: 106131] [glusterd-proc-mgmt.c:83:glusterd_proc_stop] 0-management: bitd already stopped [2019-01-22 09:21:30.552894] I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: bitd service is stopped [2019-01-22 09:21:30.553588] I [MSGID: 106131] [glusterd-proc-mgmt.c:83:glusterd_proc_stop] 0-management: scrub already stopped [2019-01-22 09:21:30.553603] I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: scrub service is stopped [2019-01-22 09:21:33.773834] I [run.c:241:runner_log] (-->/usr/lib64/glusterfs/4.1.6/xlator/mgmt/glusterd.so(+0x3a7a5) [0x7f30137d57a5] -->/usr/lib64/glusterfs/4.1.6/xlator/mgmt/glusterd.so(+0xe2765) [0x7f301387d765] -->/lib64/libglusterfs.so.0(runner_log+0x115) [0x7f3018bb30f5] ) 0-management: Ran script: /var/lib/glusterd/hooks/1/stop/pre/S29CTDB-teardown.sh --volname=vol_ba5d6d6b945eb60ccddbe44457792a8c --last=no [2019-01-22 09:21:33.780178] E [run.c:241:runner_log] (-->/usr/lib64/glusterfs/4.1.6/xlator/mgmt/glusterd.so(+0x3a7a5) [0x7f30137d57a5] -->/usr/lib64/glusterfs/4.1.6/xlator/mgmt/glusterd.so(+0xe26c3) [0x7f301387d6c3] -->/lib64/libglusterfs.so.0(runner_log+0x115) [0x7f3018bb30f5] ) 0-management: Failed to execute script: /var/lib/glusterd/hooks/1/stop/pre/S30samba-stop.sh --volname=vol_ba5d6d6b945eb60ccddbe44457792a8c --last=no [2019-01-22 09:21:33.784278] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-snapd: setting frame-timeout to 600 [2019-01-22 09:21:33.784418] I [MSGID: 106131] [glusterd-proc-mgmt.c:83:glusterd_proc_stop] 0-management: nfs already stopped [2019-01-22 09:21:33.784443] I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: nfs service is stopped [2019-01-22 09:21:33.784456] I [MSGID: 106599] [glusterd-nfs-svc.c:82:glusterd_nfssvc_manager] 0-management: nfs/server.so xlator is not installed [2019-01-22 09:21:33.794388] I [MSGID: 106568] [glusterd-proc-mgmt.c:87:glusterd_proc_stop] 0-management: Stopping glustershd daemon running in pid: 1106 [2019-01-22 09:21:34.795602] I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: glustershd service is stopped [2019-01-22 09:21:34.795672] I [MSGID: 106567] [glusterd-svc-mgmt.c:203:glusterd_svc_start] 0-management: Starting glustershd service [2019-01-22 09:21:34.803135] I [MSGID: 106131] [glusterd-proc-mgmt.c:83:glusterd_proc_stop] 0-management: bitd already stopped [2019-01-22 09:21:34.803164] I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: bitd service is stopped [2019-01-22 09:21:34.803781] I [MSGID: 106131] [glusterd-proc-mgmt.c:83:glusterd_proc_stop] 0-management: scrub already stopped [2019-01-22 09:21:34.803798] I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: scrub service is stopped [2019-01-22 09:21:36.029974] I [run.c:241:runner_log] (-->/usr/lib64/glusterfs/4.1.6/xlator/mgmt/glusterd.so(+0x3a7a5) [0x7f30137d57a5] -->/usr/lib64/glusterfs/4.1.6/xlator/mgmt/glusterd.so(+0xe2765) [0x7f301387d765] -->/lib64/libglusterfs.so.0(runner_log+0x115) [0x7f3018bb30f5] ) 0-management: Ran script: /var/lib/glusterd/hooks/1/delete/pre/S10selinux-del-fcontext.sh --volname=vol_63854b105c40802bdec77290e91858ea [2019-01-22 09:21:36.102969] I [MSGID: 106495] [glusterd-handler.c:3073:__glusterd_handle_getwd] 0-glusterd: Received getwd req [2019-01-22 09:21:36.108326] I [run.c:241:runner_log] (-->/usr/lib64/glusterfs/4.1.6/xlator/mgmt/glusterd.so(+0xe2c9a) [0x7f301387dc9a] -->/usr/lib64/glusterfs/4.1.6/xlator/mgmt/glusterd.so(+0xe2765) [0x7f301387d765] -->/lib64/libglusterfs.so.0(runner_log+0x115) [0x7f3018bb30f5] ) 0-management: Ran script: /var/lib/glusterd/hooks/1/delete/post/S57glusterfind-delete-post --volname=vol_63854b105c40802bdec77290e91858ea [2019-01-22 09:21:37.167692] I [run.c:241:runner_log] (-->/usr/lib64/glusterfs/4.1.6/xlator/mgmt/glusterd.so(+0x3a7a5) [0x7f30137d57a5] -->/usr/lib64/glusterfs/4.1.6/xlator/mgmt/glusterd.so(+0xe2765) [0x7f301387d765] -->/lib64/libglusterfs.so.0(runner_log+0x115) [0x7f3018bb30f5] ) 0-management: Ran script: /var/lib/glusterd/hooks/1/delete/pre/S10selinux-del-fcontext.sh --volname=vol_ba5d6d6b945eb60ccddbe44457792a8c [2019-01-22 09:21:37.238454] I [run.c:241:runner_log] (-->/usr/lib64/glusterfs/4.1.6/xlator/mgmt/glusterd.so(+0xe2c9a) [0x7f301387dc9a] -->/usr/lib64/glusterfs/4.1.6/xlator/mgmt/glusterd.so(+0xe2765) [0x7f301387d765] -->/lib64/libglusterfs.so.0(runner_log+0x115) [0x7f3018bb30f5] ) 0-management: Ran script: /var/lib/glusterd/hooks/1/delete/post/S57glusterfind-delete-post --volname=vol_ba5d6d6b945eb60ccddbe44457792a8c [2019-01-22 09:21:37.233580] I [MSGID: 106495] [glusterd-handler.c:3073:__glusterd_handle_getwd] 0-glusterd: Received getwd req [2019-01-22 09:22:09.686124] I [MSGID: 106487] [glusterd-handler.c:1486:__glusterd_handle_cli_list_friends] 0-glusterd: Received cli list req [2019-01-22 09:22:28.516038] I [MSGID: 106499] [glusterd-handler.c:4314:__glusterd_handle_status_volume] 0-management: Received status volume req for volume glusterfs-registry-volume [2019-01-22 09:22:28.520119] I [MSGID: 106499] [glusterd-handler.c:4314:__glusterd_handle_status_volume] 0-management: Received status volume req for volume heketidbstorage [2019-01-22 09:22:28.523550] I [MSGID: 106499] [glusterd-handler.c:4314:__glusterd_handle_status_volume] 0-management: Received status volume req for volume vol_16c57180902d04440ff9bfa202f7abf1 [2019-01-22 09:22:28.526873] I [MSGID: 106499] [glusterd-handler.c:4314:__glusterd_handle_status_volume] 0-management: Received status volume req for volume vol_1bc2533893950d1f357b4d690882d2c2 [2019-01-22 09:22:28.530199] I [MSGID: 106499] [glusterd-handler.c:4314:__glusterd_handle_status_volume] 0-management: Received status volume req for volume vol_258b8b659ad3b593dfb5672fae9ab57a [2019-01-22 09:22:28.533428] I [MSGID: 106499] [glusterd-handler.c:4314:__glusterd_handle_status_volume] 0-management: Received status volume req for volume vol_3442e86b6d994a14de73f1b8c82cf0b8 [2019-01-22 09:22:28.536552] I [MSGID: 106499] [glusterd-handler.c:4314:__glusterd_handle_status_volume] 0-management: Received status volume req for volume vol_3bc80e4effa4fc16b189e156d9f5c75e [2019-01-22 09:22:28.539909] I [MSGID: 106499] [glusterd-handler.c:4314:__glusterd_handle_status_volume] 0-management: Received status volume req for volume vol_59581a21d2113b0c0d4b2fa729a6a932 [2019-01-22 09:22:28.543131] I [MSGID: 106499] [glusterd-handler.c:4314:__glusterd_handle_status_volume] 0-management: Received status volume req for volume vol_6954c7e3a47641f1bd4fbb66d2e8aebe [2019-01-22 09:22:28.546873] I [MSGID: 106499] [glusterd-handler.c:4314:__glusterd_handle_status_volume] 0-management: Received status volume req for volume vol_6d36831a039b04088d29ecb477828ba7 [2019-01-22 09:22:28.550452] I [MSGID: 106499] [glusterd-handler.c:4314:__glusterd_handle_status_volume] 0-management: Received status volume req for volume vol_6d45975bc09622845d3674a36581cb20 [2019-01-22 09:22:28.554041] I [MSGID: 106499] [glusterd-handler.c:4314:__glusterd_handle_status_volume] 0-management: Received status volume req for volume vol_87d44d5b3e6923bd42a620cbc838b253 [2019-01-22 09:22:28.557607] I [MSGID: 106499] [glusterd-handler.c:4314:__glusterd_handle_status_volume] 0-management: Received status volume req for volume vol_93cf4b391b72534c5732df0c82206f50 [2019-01-22 09:22:28.561115] I [MSGID: 106499] [glusterd-handler.c:4314:__glusterd_handle_status_volume] 0-management: Received status volume req for volume vol_ca1258696cc5eb793a2003d3c3b9e98e [2019-01-22 09:22:28.564748] I [MSGID: 106499] [glusterd-handler.c:4314:__glusterd_handle_status_volume] 0-management: Received status volume req for volume vol_d936fff4500a9cb9491963d0624214fb [2019-01-22 09:22:28.568220] I [MSGID: 106499] [glusterd-handler.c:4314:__glusterd_handle_status_volume] 0-management: Received status volume req for volume vol_e1aa1283d5917485d88c4a742eeff422 [2019-01-22 09:22:28.573058] I [MSGID: 106499] [glusterd-handler.c:4314:__glusterd_handle_status_volume] 0-management: Received status volume req for volume vol_f0ed498d7e781d7bb896244175b31f9e [2019-01-22 09:22:28.576289] I [MSGID: 106499] [glusterd-handler.c:4314:__glusterd_handle_status_volume] 0-management: Received status volume req for volume vol_f387519c9b004ec14e80696db88ef0f8 [2019-01-22 09:22:28.579673] I [MSGID: 106499] [glusterd-handler.c:4314:__glusterd_handle_status_volume] 0-management: Received status volume req for volume vol_f8ca343c60e6efe541fe02d16ca02a7d [2019-01-22 09:22:28.583839] I [MSGID: 106499] [glusterd-handler.c:4314:__glusterd_handle_status_volume] 0-management: Received status volume req for volume vol_fe882e074c0512fd9271fc2ff5a0bfe1 [2019-01-22 09:22:46.453027] I [MSGID: 106499] [glusterd-handler.c:4314:__glusterd_handle_status_volume] 0-management: Received status volume req for volume vol_3442e86b6d994a14de73f1b8c82cf0b8 [2019-01-22 09:24:20.574494] I [MSGID: 106131] [glusterd-proc-mgmt.c:83:glusterd_proc_stop] 0-management: nfs already stopped [2019-01-22 09:24:20.574519] I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: nfs service is stopped [2019-01-22 09:24:20.574532] I [MSGID: 106599] [glusterd-nfs-svc.c:82:glusterd_nfssvc_manager] 0-management: nfs/server.so xlator is not installed [2019-01-22 09:24:20.583992] I [MSGID: 106568] [glusterd-proc-mgmt.c:87:glusterd_proc_stop] 0-management: Stopping glustershd daemon running in pid: 1175 [2019-01-22 09:24:21.584365] I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: glustershd service is stopped [2019-01-22 09:24:21.584441] I [MSGID: 106567] [glusterd-svc-mgmt.c:203:glusterd_svc_start] 0-management: Starting glustershd service [2019-01-22 09:24:22.592859] I [MSGID: 106131] [glusterd-proc-mgmt.c:83:glusterd_proc_stop] 0-management: bitd already stopped [2019-01-22 09:24:22.592900] I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: bitd service is stopped [2019-01-22 09:24:22.593445] I [MSGID: 106131] [glusterd-proc-mgmt.c:83:glusterd_proc_stop] 0-management: scrub already stopped [2019-01-22 09:24:22.593462] I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: scrub service is stopped [2019-01-22 09:24:24.628152] I [run.c:241:runner_log] (-->/usr/lib64/glusterfs/4.1.6/xlator/mgmt/glusterd.so(+0xe2c9a) [0x7f301387dc9a] -->/usr/lib64/glusterfs/4.1.6/xlator/mgmt/glusterd.so(+0xe2765) [0x7f301387d765] -->/lib64/libglusterfs.so.0(runner_log+0x115) [0x7f3018bb30f5] ) 0-management: Ran script: /var/lib/glusterd/hooks/1/start/post/S29CTDBsetup.sh --volname=vol_3442e86b6d994a14de73f1b8c82cf0b8 --first=no --version=1 --volume-op=start --gd-workdir=/var/lib/glusterd [2019-01-22 09:24:24.639466] E [run.c:241:runner_log] (-->/usr/lib64/glusterfs/4.1.6/xlator/mgmt/glusterd.so(+0xe2c9a) [0x7f301387dc9a] -->/usr/lib64/glusterfs/4.1.6/xlator/mgmt/glusterd.so(+0xe26c3) [0x7f301387d6c3] -->/lib64/libglusterfs.so.0(runner_log+0x115) [0x7f3018bb30f5] ) 0-management: Failed to execute script: /var/lib/glusterd/hooks/1/start/post/S30samba-start.sh --volname=vol_3442e86b6d994a14de73f1b8c82cf0b8 --first=no --version=1 --volume-op=start --gd-workdir=/var/lib/glusterd [2019-01-22 09:24:27.352996] I [MSGID: 106499] [glusterd-handler.c:4314:__glusterd_handle_status_volume] 0-management: Received status volume req for volume vol_3442e86b6d994a14de73f1b8c82cf0b8 [2019-01-22 09:53:33.675590] I [MSGID: 106499] [glusterd-handler.c:4314:__glusterd_handle_status_volume] 0-management: Received status volume req for volume vol_f0ed498d7e781d7bb896244175b31f9e [2019-01-22 09:53:54.453359] I [MSGID: 106131] [glusterd-proc-mgmt.c:83:glusterd_proc_stop] 0-management: nfs already stopped [2019-01-22 09:53:54.453401] I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: nfs service is stopped [2019-01-22 09:53:54.453413] I [MSGID: 106599] [glusterd-nfs-svc.c:82:glusterd_nfssvc_manager] 0-management: nfs/server.so xlator is not installed [2019-01-22 09:53:54.463204] I [MSGID: 106568] [glusterd-proc-mgmt.c:87:glusterd_proc_stop] 0-management: Stopping glustershd daemon running in pid: 1664 [2019-01-22 09:53:55.463514] I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: glustershd service is stopped [2019-01-22 09:53:55.463580] I [MSGID: 106567] [glusterd-svc-mgmt.c:203:glusterd_svc_start] 0-management: Starting glustershd service [2019-01-22 09:53:56.471584] I [MSGID: 106131] [glusterd-proc-mgmt.c:83:glusterd_proc_stop] 0-management: bitd already stopped [2019-01-22 09:53:56.471624] I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: bitd service is stopped [2019-01-22 09:53:56.472167] I [MSGID: 106131] [glusterd-proc-mgmt.c:83:glusterd_proc_stop] 0-management: scrub already stopped [2019-01-22 09:53:56.472201] I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: scrub service is stopped [2019-01-22 09:53:58.513258] I [run.c:241:runner_log] (-->/usr/lib64/glusterfs/4.1.6/xlator/mgmt/glusterd.so(+0xe2c9a) [0x7f301387dc9a] -->/usr/lib64/glusterfs/4.1.6/xlator/mgmt/glusterd.so(+0xe2765) [0x7f301387d765] -->/lib64/libglusterfs.so.0(runner_log+0x115) [0x7f3018bb30f5] ) 0-management: Ran script: /var/lib/glusterd/hooks/1/start/post/S29CTDBsetup.sh --volname=vol_f0ed498d7e781d7bb896244175b31f9e --first=no --version=1 --volume-op=start --gd-workdir=/var/lib/glusterd [2019-01-22 09:53:58.525314] E [run.c:241:runner_log] (-->/usr/lib64/glusterfs/4.1.6/xlator/mgmt/glusterd.so(+0xe2c9a) [0x7f301387dc9a] -->/usr/lib64/glusterfs/4.1.6/xlator/mgmt/glusterd.so(+0xe26c3) [0x7f301387d6c3] -->/lib64/libglusterfs.so.0(runner_log+0x115) [0x7f3018bb30f5] ) 0-management: Failed to execute script: /var/lib/glusterd/hooks/1/start/post/S30samba-start.sh --volname=vol_f0ed498d7e781d7bb896244175b31f9e --first=no --version=1 --volume-op=start --gd-workdir=/var/lib/glusterd [2019-01-22 09:54:01.114209] I [MSGID: 106499] [glusterd-handler.c:4314:__glusterd_handle_status_volume] 0-management: Received status volume req for volume vol_f0ed498d7e781d7bb896244175b31f9e [2019-01-22 09:54:21.769357] I [MSGID: 106499] [glusterd-handler.c:4314:__glusterd_handle_status_volume] 0-management: Received status volume req for volume list [2019-01-22 09:54:21.770923] E [MSGID: 106525] [glusterd-op-sm.c:4347:glusterd_dict_set_volid] 0-management: Volume list does not exist [2019-01-22 09:54:21.770942] E [MSGID: 106289] [glusterd-syncop.c:1967:gd_sync_task_begin] 0-management: Failed to build payload for operation 'Volume Status' [2019-01-22 10:09:54.150479] I [MSGID: 106499] [glusterd-handler.c:4314:__glusterd_handle_status_volume] 0-management: Received status volume req for volume glusterfs-registry-volume [2019-01-22 10:09:54.154573] I [MSGID: 106499] [glusterd-handler.c:4314:__glusterd_handle_status_volume] 0-management: Received status volume req for volume heketidbstorage [2019-01-22 10:09:54.158081] I [MSGID: 106499] [glusterd-handler.c:4314:__glusterd_handle_status_volume] 0-management: Received status volume req for volume vol_16c57180902d04440ff9bfa202f7abf1 [2019-01-22 10:09:54.161293] I [MSGID: 106499] [glusterd-handler.c:4314:__glusterd_handle_status_volume] 0-management: Received status volume req for volume vol_1bc2533893950d1f357b4d690882d2c2 [2019-01-22 10:09:54.164674] I [MSGID: 106499] [glusterd-handler.c:4314:__glusterd_handle_status_volume] 0-management: Received status volume req for volume vol_258b8b659ad3b593dfb5672fae9ab57a [2019-01-22 10:09:54.167939] I [MSGID: 106499] [glusterd-handler.c:4314:__glusterd_handle_status_volume] 0-management: Received status volume req for volume vol_3442e86b6d994a14de73f1b8c82cf0b8 [2019-01-22 10:09:54.171025] I [MSGID: 106499] [glusterd-handler.c:4314:__glusterd_handle_status_volume] 0-management: Received status volume req for volume vol_3bc80e4effa4fc16b189e156d9f5c75e [2019-01-22 10:09:54.174253] I [MSGID: 106499] [glusterd-handler.c:4314:__glusterd_handle_status_volume] 0-management: Received status volume req for volume vol_59581a21d2113b0c0d4b2fa729a6a932 [2019-01-22 10:09:54.177384] I [MSGID: 106499] [glusterd-handler.c:4314:__glusterd_handle_status_volume] 0-management: Received status volume req for volume vol_6954c7e3a47641f1bd4fbb66d2e8aebe [2019-01-22 10:09:54.180486] I [MSGID: 106499] [glusterd-handler.c:4314:__glusterd_handle_status_volume] 0-management: Received status volume req for volume vol_6d36831a039b04088d29ecb477828ba7 [2019-01-22 10:09:54.183451] I [MSGID: 106499] [glusterd-handler.c:4314:__glusterd_handle_status_volume] 0-management: Received status volume req for volume vol_6d45975bc09622845d3674a36581cb20 [2019-01-22 10:09:54.186211] I [MSGID: 106499] [glusterd-handler.c:4314:__glusterd_handle_status_volume] 0-management: Received status volume req for volume vol_87d44d5b3e6923bd42a620cbc838b253 [2019-01-22 10:09:54.189303] I [MSGID: 106499] [glusterd-handler.c:4314:__glusterd_handle_status_volume] 0-management: Received status volume req for volume vol_93cf4b391b72534c5732df0c82206f50 [2019-01-22 10:09:54.192545] I [MSGID: 106499] [glusterd-handler.c:4314:__glusterd_handle_status_volume] 0-management: Received status volume req for volume vol_ca1258696cc5eb793a2003d3c3b9e98e [2019-01-22 10:09:54.196044] I [MSGID: 106499] [glusterd-handler.c:4314:__glusterd_handle_status_volume] 0-management: Received status volume req for volume vol_d936fff4500a9cb9491963d0624214fb [2019-01-22 10:09:54.199218] I [MSGID: 106499] [glusterd-handler.c:4314:__glusterd_handle_status_volume] 0-management: Received status volume req for volume vol_e1aa1283d5917485d88c4a742eeff422 [2019-01-22 10:09:54.202433] I [MSGID: 106499] [glusterd-handler.c:4314:__glusterd_handle_status_volume] 0-management: Received status volume req for volume vol_f0ed498d7e781d7bb896244175b31f9e [2019-01-22 10:09:54.205282] I [MSGID: 106499] [glusterd-handler.c:4314:__glusterd_handle_status_volume] 0-management: Received status volume req for volume vol_f387519c9b004ec14e80696db88ef0f8 [2019-01-22 10:09:54.208475] I [MSGID: 106499] [glusterd-handler.c:4314:__glusterd_handle_status_volume] 0-management: Received status volume req for volume vol_f8ca343c60e6efe541fe02d16ca02a7d [2019-01-22 10:09:54.211824] I [MSGID: 106499] [glusterd-handler.c:4314:__glusterd_handle_status_volume] 0-management: Received status volume req for volume vol_fe882e074c0512fd9271fc2ff5a0bfe1 [2019-01-22 10:11:46.915959] I [MSGID: 106499] [glusterd-handler.c:4314:__glusterd_handle_status_volume] 0-management: Received status volume req for volume vol_16c57180902d04440ff9bfa202f7abf1 [2019-01-22 10:40:43.122308] W [MSGID: 101095] [xlator.c:181:xlator_volopt_dynload] 0-xlator: /usr/lib64/glusterfs/4.1.6/xlator/nfs/server.so: cannot open shared object file: No such file or directory [2019-01-22 10:40:43.155473] I [run.c:241:runner_log] (-->/usr/lib64/glusterfs/4.1.6/xlator/mgmt/glusterd.so(+0xe2c9a) [0x7f301387dc9a] -->/usr/lib64/glusterfs/4.1.6/xlator/mgmt/glusterd.so(+0xe2765) [0x7f301387d765] -->/lib64/libglusterfs.so.0(runner_log+0x115) [0x7f3018bb30f5] ) 0-management: Ran script: /var/lib/glusterd/hooks/1/create/post/S10selinux-label-brick.sh --volname=vol_bfc74a6caa795f5f1d67b2223de42517 [2019-01-22 10:40:43.436238] I [glusterd-utils.c:6090:glusterd_brick_start] 0-management: starting a fresh brick process for brick /var/lib/heketi/mounts/vg_12f02e7d735ee9a66b6f746af8eb71e6/brick_299a2bfb4702dfd575ca66c58fece9f9/brick [2019-01-22 10:40:43.474254] I [MSGID: 106142] [glusterd-pmap.c:297:pmap_registry_bind] 0-pmap: adding brick /var/lib/heketi/mounts/vg_12f02e7d735ee9a66b6f746af8eb71e6/brick_299a2bfb4702dfd575ca66c58fece9f9/brick on port 49174 [2019-01-22 10:40:43.474801] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-management: setting frame-timeout to 600 [2019-01-22 10:40:43.479026] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-snapd: setting frame-timeout to 600 [2019-01-22 10:40:43.479283] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-gfproxyd: setting frame-timeout to 600 [2019-01-22 10:40:43.479476] I [MSGID: 106131] [glusterd-proc-mgmt.c:83:glusterd_proc_stop] 0-management: nfs already stopped [2019-01-22 10:40:43.479496] I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: nfs service is stopped [2019-01-22 10:40:43.479512] I [MSGID: 106599] [glusterd-nfs-svc.c:82:glusterd_nfssvc_manager] 0-management: nfs/server.so xlator is not installed [2019-01-22 10:40:43.490602] I [MSGID: 106568] [glusterd-proc-mgmt.c:87:glusterd_proc_stop] 0-management: Stopping glustershd daemon running in pid: 5066 [2019-01-22 10:40:44.491412] I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: glustershd service is stopped [2019-01-22 10:40:44.491483] I [MSGID: 106567] [glusterd-svc-mgmt.c:203:glusterd_svc_start] 0-management: Starting glustershd service [2019-01-22 10:40:44.499800] I [MSGID: 106131] [glusterd-proc-mgmt.c:83:glusterd_proc_stop] 0-management: bitd already stopped [2019-01-22 10:40:44.499845] I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: bitd service is stopped [2019-01-22 10:40:44.500372] I [MSGID: 106131] [glusterd-proc-mgmt.c:83:glusterd_proc_stop] 0-management: scrub already stopped [2019-01-22 10:40:44.500390] I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: scrub service is stopped [2019-01-22 10:40:45.607960] I [run.c:241:runner_log] (-->/usr/lib64/glusterfs/4.1.6/xlator/mgmt/glusterd.so(+0xe2c9a) [0x7f301387dc9a] -->/usr/lib64/glusterfs/4.1.6/xlator/mgmt/glusterd.so(+0xe2765) [0x7f301387d765] -->/lib64/libglusterfs.so.0(runner_log+0x115) [0x7f3018bb30f5] ) 0-management: Ran script: /var/lib/glusterd/hooks/1/start/post/S29CTDBsetup.sh --volname=vol_bfc74a6caa795f5f1d67b2223de42517 --first=no --version=1 --volume-op=start --gd-workdir=/var/lib/glusterd [2019-01-22 10:40:45.619700] E [run.c:241:runner_log] (-->/usr/lib64/glusterfs/4.1.6/xlator/mgmt/glusterd.so(+0xe2c9a) [0x7f301387dc9a] -->/usr/lib64/glusterfs/4.1.6/xlator/mgmt/glusterd.so(+0xe26c3) [0x7f301387d6c3] -->/lib64/libglusterfs.so.0(runner_log+0x115) [0x7f3018bb30f5] ) 0-management: Failed to execute script: /var/lib/glusterd/hooks/1/start/post/S30samba-start.sh --volname=vol_bfc74a6caa795f5f1d67b2223de42517 --first=no --version=1 --volume-op=start --gd-workdir=/var/lib/glusterd [2019-01-22 11:01:38.954856] W [MSGID: 101095] [xlator.c:181:xlator_volopt_dynload] 0-xlator: /usr/lib64/glusterfs/4.1.6/xlator/nfs/server.so: cannot open shared object file: No such file or directory [2019-01-22 11:01:38.989963] I [run.c:241:runner_log] (-->/usr/lib64/glusterfs/4.1.6/xlator/mgmt/glusterd.so(+0xe2c9a) [0x7f301387dc9a] -->/usr/lib64/glusterfs/4.1.6/xlator/mgmt/glusterd.so(+0xe2765) [0x7f301387d765] -->/lib64/libglusterfs.so.0(runner_log+0x115) [0x7f3018bb30f5] ) 0-management: Ran script: /var/lib/glusterd/hooks/1/create/post/S10selinux-label-brick.sh --volname=vol_6b34aa2cf291756dbbefaae7d51ff331 [2019-01-22 11:01:40.313041] I [glusterd-utils.c:6090:glusterd_brick_start] 0-management: starting a fresh brick process for brick /var/lib/heketi/mounts/vg_56391bec3c8bfe4fc116de7bddfc2af4/brick_46576971634b93fca1e07cf0fa8be0c8/brick [2019-01-22 11:01:40.350130] I [MSGID: 106142] [glusterd-pmap.c:297:pmap_registry_bind] 0-pmap: adding brick /var/lib/heketi/mounts/vg_56391bec3c8bfe4fc116de7bddfc2af4/brick_46576971634b93fca1e07cf0fa8be0c8/brick on port 49175 [2019-01-22 11:01:40.350661] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-management: setting frame-timeout to 600 [2019-01-22 11:01:40.354572] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-snapd: setting frame-timeout to 600 [2019-01-22 11:01:40.354836] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-gfproxyd: setting frame-timeout to 600 [2019-01-22 11:01:40.355017] I [MSGID: 106131] [glusterd-proc-mgmt.c:83:glusterd_proc_stop] 0-management: nfs already stopped [2019-01-22 11:01:40.355037] I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: nfs service is stopped [2019-01-22 11:01:40.355053] I [MSGID: 106599] [glusterd-nfs-svc.c:82:glusterd_nfssvc_manager] 0-management: nfs/server.so xlator is not installed [2019-01-22 11:01:40.366482] I [MSGID: 106568] [glusterd-proc-mgmt.c:87:glusterd_proc_stop] 0-management: Stopping glustershd daemon running in pid: 10352 [2019-01-22 11:01:41.368946] I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: glustershd service is stopped [2019-01-22 11:01:41.369019] I [MSGID: 106567] [glusterd-svc-mgmt.c:203:glusterd_svc_start] 0-management: Starting glustershd service [2019-01-22 11:01:41.377509] I [MSGID: 106131] [glusterd-proc-mgmt.c:83:glusterd_proc_stop] 0-management: bitd already stopped [2019-01-22 11:01:41.377550] I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: bitd service is stopped [2019-01-22 11:01:41.378217] I [MSGID: 106131] [glusterd-proc-mgmt.c:83:glusterd_proc_stop] 0-management: scrub already stopped [2019-01-22 11:01:41.378234] I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: scrub service is stopped [2019-01-22 11:01:41.503388] I [run.c:241:runner_log] (-->/usr/lib64/glusterfs/4.1.6/xlator/mgmt/glusterd.so(+0xe2c9a) [0x7f301387dc9a] -->/usr/lib64/glusterfs/4.1.6/xlator/mgmt/glusterd.so(+0xe2765) [0x7f301387d765] -->/lib64/libglusterfs.so.0(runner_log+0x115) [0x7f3018bb30f5] ) 0-management: Ran script: /var/lib/glusterd/hooks/1/start/post/S29CTDBsetup.sh --volname=vol_6b34aa2cf291756dbbefaae7d51ff331 --first=no --version=1 --volume-op=start --gd-workdir=/var/lib/glusterd [2019-01-22 11:01:41.515555] E [run.c:241:runner_log] (-->/usr/lib64/glusterfs/4.1.6/xlator/mgmt/glusterd.so(+0xe2c9a) [0x7f301387dc9a] -->/usr/lib64/glusterfs/4.1.6/xlator/mgmt/glusterd.so(+0xe26c3) [0x7f301387d6c3] -->/lib64/libglusterfs.so.0(runner_log+0x115) [0x7f3018bb30f5] ) 0-management: Failed to execute script: /var/lib/glusterd/hooks/1/start/post/S30samba-start.sh --volname=vol_6b34aa2cf291756dbbefaae7d51ff331 --first=no --version=1 --volume-op=start --gd-workdir=/var/lib/glusterd [2019-01-22 11:08:51.569810] I [run.c:241:runner_log] (-->/usr/lib64/glusterfs/4.1.6/xlator/mgmt/glusterd.so(+0x3a7a5) [0x7f30137d57a5] -->/usr/lib64/glusterfs/4.1.6/xlator/mgmt/glusterd.so(+0xe2765) [0x7f301387d765] -->/lib64/libglusterfs.so.0(runner_log+0x115) [0x7f3018bb30f5] ) 0-management: Ran script: /var/lib/glusterd/hooks/1/stop/pre/S29CTDB-teardown.sh --volname=vol_6b34aa2cf291756dbbefaae7d51ff331 --last=no [2019-01-22 11:08:51.576168] E [run.c:241:runner_log] (-->/usr/lib64/glusterfs/4.1.6/xlator/mgmt/glusterd.so(+0x3a7a5) [0x7f30137d57a5] -->/usr/lib64/glusterfs/4.1.6/xlator/mgmt/glusterd.so(+0xe26c3) [0x7f301387d6c3] -->/lib64/libglusterfs.so.0(runner_log+0x115) [0x7f3018bb30f5] ) 0-management: Failed to execute script: /var/lib/glusterd/hooks/1/stop/pre/S30samba-stop.sh --volname=vol_6b34aa2cf291756dbbefaae7d51ff331 --last=no [2019-01-22 11:08:51.576286] I [MSGID: 106542] [glusterd-utils.c:8253:glusterd_brick_signal] 0-glusterd: sending signal 15 to brick with pid 12821 [2019-01-22 11:08:52.576448] I [MSGID: 106143] [glusterd-pmap.c:397:pmap_registry_remove] 0-pmap: removing brick /var/lib/heketi/mounts/vg_56391bec3c8bfe4fc116de7bddfc2af4/brick_46576971634b93fca1e07cf0fa8be0c8/brick on port 49175 [2019-01-22 11:08:52.582178] W [glusterd-handler.c:6124:__glusterd_brick_rpc_notify] 0-management: got disconnect from stale rpc on /var/lib/heketi/mounts/vg_56391bec3c8bfe4fc116de7bddfc2af4/brick_46576971634b93fca1e07cf0fa8be0c8/brick [2019-01-22 11:08:52.590483] I [MSGID: 106131] [glusterd-proc-mgmt.c:83:glusterd_proc_stop] 0-management: nfs already stopped [2019-01-22 11:08:52.590523] I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: nfs service is stopped [2019-01-22 11:08:52.590537] I [MSGID: 106599] [glusterd-nfs-svc.c:82:glusterd_nfssvc_manager] 0-management: nfs/server.so xlator is not installed [2019-01-22 11:08:52.600047] I [MSGID: 106568] [glusterd-proc-mgmt.c:87:glusterd_proc_stop] 0-management: Stopping glustershd daemon running in pid: 12855 [2019-01-22 11:08:53.600372] I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: glustershd service is stopped [2019-01-22 11:08:53.600431] I [MSGID: 106567] [glusterd-svc-mgmt.c:203:glusterd_svc_start] 0-management: Starting glustershd service [2019-01-22 11:08:53.608416] I [MSGID: 106131] [glusterd-proc-mgmt.c:83:glusterd_proc_stop] 0-management: bitd already stopped [2019-01-22 11:08:53.608458] I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: bitd service is stopped [2019-01-22 11:08:53.609165] I [MSGID: 106131] [glusterd-proc-mgmt.c:83:glusterd_proc_stop] 0-management: scrub already stopped [2019-01-22 11:08:53.609185] I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: scrub service is stopped [2019-01-22 11:08:55.889485] I [run.c:241:runner_log] (-->/usr/lib64/glusterfs/4.1.6/xlator/mgmt/glusterd.so(+0x3a7a5) [0x7f30137d57a5] -->/usr/lib64/glusterfs/4.1.6/xlator/mgmt/glusterd.so(+0xe2765) [0x7f301387d765] -->/lib64/libglusterfs.so.0(runner_log+0x115) [0x7f3018bb30f5] ) 0-management: Ran script: /var/lib/glusterd/hooks/1/delete/pre/S10selinux-del-fcontext.sh --volname=vol_6b34aa2cf291756dbbefaae7d51ff331 [2019-01-22 11:08:55.951638] I [MSGID: 106495] [glusterd-handler.c:3073:__glusterd_handle_getwd] 0-glusterd: Received getwd req [2019-01-22 11:08:55.956429] I [run.c:241:runner_log] (-->/usr/lib64/glusterfs/4.1.6/xlator/mgmt/glusterd.so(+0xe2c9a) [0x7f301387dc9a] -->/usr/lib64/glusterfs/4.1.6/xlator/mgmt/glusterd.so(+0xe2765) [0x7f301387d765] -->/lib64/libglusterfs.so.0(runner_log+0x115) [0x7f3018bb30f5] ) 0-management: Ran script: /var/lib/glusterd/hooks/1/delete/post/S57glusterfind-delete-post --volname=vol_6b34aa2cf291756dbbefaae7d51ff331 [2019-01-22 11:09:40.114362] W [MSGID: 101095] [xlator.c:181:xlator_volopt_dynload] 0-xlator: /usr/lib64/glusterfs/4.1.6/xlator/nfs/server.so: cannot open shared object file: No such file or directory [2019-01-22 11:09:40.144544] I [run.c:241:runner_log] (-->/usr/lib64/glusterfs/4.1.6/xlator/mgmt/glusterd.so(+0xe2c9a) [0x7f301387dc9a] -->/usr/lib64/glusterfs/4.1.6/xlator/mgmt/glusterd.so(+0xe2765) [0x7f301387d765] -->/lib64/libglusterfs.so.0(runner_log+0x115) [0x7f3018bb30f5] ) 0-management: Ran script: /var/lib/glusterd/hooks/1/create/post/S10selinux-label-brick.sh --volname=vol_8285c4fc4d6e4064ac1527d035eee2f8 [2019-01-22 11:09:40.460893] I [glusterd-utils.c:6090:glusterd_brick_start] 0-management: starting a fresh brick process for brick /var/lib/heketi/mounts/vg_526f35058433c6b03130bba4e0a7dd87/brick_c04b2f3fc2aed6c127e0095bb79a9c2b/brick [2019-01-22 11:09:40.500149] I [MSGID: 106142] [glusterd-pmap.c:297:pmap_registry_bind] 0-pmap: adding brick /var/lib/heketi/mounts/vg_526f35058433c6b03130bba4e0a7dd87/brick_c04b2f3fc2aed6c127e0095bb79a9c2b/brick on port 49176 [2019-01-22 11:09:40.500617] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-management: setting frame-timeout to 600 [2019-01-22 11:09:40.504879] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-snapd: setting frame-timeout to 600 [2019-01-22 11:09:40.505107] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-gfproxyd: setting frame-timeout to 600 [2019-01-22 11:09:40.505320] I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: nfs service is stopped [2019-01-22 11:09:40.505325] I [MSGID: 106599] [glusterd-nfs-svc.c:82:glusterd_nfssvc_manager] 0-management: nfs/server.so xlator is not installed [2019-01-22 11:09:40.516653] I [MSGID: 106568] [glusterd-proc-mgmt.c:87:glusterd_proc_stop] 0-management: Stopping glustershd daemon running in pid: 13753 [2019-01-22 11:09:41.519185] I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: glustershd service is stopped [2019-01-22 11:09:41.519254] I [MSGID: 106567] [glusterd-svc-mgmt.c:203:glusterd_svc_start] 0-management: Starting glustershd service [2019-01-22 11:09:41.527474] I [MSGID: 106131] [glusterd-proc-mgmt.c:83:glusterd_proc_stop] 0-management: bitd already stopped [2019-01-22 11:09:41.527515] I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: bitd service is stopped [2019-01-22 11:09:41.528106] I [MSGID: 106131] [glusterd-proc-mgmt.c:83:glusterd_proc_stop] 0-management: scrub already stopped [2019-01-22 11:09:41.528124] I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: scrub service is stopped [2019-01-22 11:09:42.619389] I [run.c:241:runner_log] (-->/usr/lib64/glusterfs/4.1.6/xlator/mgmt/glusterd.so(+0xe2c9a) [0x7f301387dc9a] -->/usr/lib64/glusterfs/4.1.6/xlator/mgmt/glusterd.so(+0xe2765) [0x7f301387d765] -->/lib64/libglusterfs.so.0(runner_log+0x115) [0x7f3018bb30f5] ) 0-management: Ran script: /var/lib/glusterd/hooks/1/start/post/S29CTDBsetup.sh --volname=vol_8285c4fc4d6e4064ac1527d035eee2f8 --first=no --version=1 --volume-op=start --gd-workdir=/var/lib/glusterd [2019-01-22 11:09:42.631109] E [run.c:241:runner_log] (-->/usr/lib64/glusterfs/4.1.6/xlator/mgmt/glusterd.so(+0xe2c9a) [0x7f301387dc9a] -->/usr/lib64/glusterfs/4.1.6/xlator/mgmt/glusterd.so(+0xe26c3) [0x7f301387d6c3] -->/lib64/libglusterfs.so.0(runner_log+0x115) [0x7f3018bb30f5] ) 0-management: Failed to execute script: /var/lib/glusterd/hooks/1/start/post/S30samba-start.sh --volname=vol_8285c4fc4d6e4064ac1527d035eee2f8 --first=no --version=1 --volume-op=start --gd-workdir=/var/lib/glusterd [2019-01-22 11:09:40.505309] I [MSGID: 106131] [glusterd-proc-mgmt.c:83:glusterd_proc_stop] 0-management: nfs already stopped [2019-01-22 11:16:59.440544] I [MSGID: 106488] [glusterd-handler.c:1549:__glusterd_handle_cli_get_volume] 0-management: Received get vol req The message "I [MSGID: 106488] [glusterd-handler.c:1549:__glusterd_handle_cli_get_volume] 0-management: Received get vol req" repeated 22 times between [2019-01-22 11:16:59.440544] and [2019-01-22 11:16:59.447182] [2019-01-22 11:20:30.561626] I [MSGID: 106488] [glusterd-handler.c:1549:__glusterd_handle_cli_get_volume] 0-management: Received get vol req [2019-01-22 11:20:45.609366] I [MSGID: 106499] [glusterd-handler.c:4314:__glusterd_handle_status_volume] 0-management: Received status volume req for volume glusterfs-registry-volume [2019-01-22 11:20:45.614672] I [MSGID: 106499] [glusterd-handler.c:4314:__glusterd_handle_status_volume] 0-management: Received status volume req for volume heketidbstorage [2019-01-22 11:20:45.618797] I [MSGID: 106499] [glusterd-handler.c:4314:__glusterd_handle_status_volume] 0-management: Received status volume req for volume vol_16c57180902d04440ff9bfa202f7abf1 [2019-01-22 11:20:45.621880] I [MSGID: 106499] [glusterd-handler.c:4314:__glusterd_handle_status_volume] 0-management: Received status volume req for volume vol_1bc2533893950d1f357b4d690882d2c2 [2019-01-22 11:20:45.625938] I [MSGID: 106499] [glusterd-handler.c:4314:__glusterd_handle_status_volume] 0-management: Received status volume req for volume vol_258b8b659ad3b593dfb5672fae9ab57a [2019-01-22 11:20:45.630666] I [MSGID: 106499] [glusterd-handler.c:4314:__glusterd_handle_status_volume] 0-management: Received status volume req for volume vol_3442e86b6d994a14de73f1b8c82cf0b8 [2019-01-22 11:20:45.634830] I [MSGID: 106499] [glusterd-handler.c:4314:__glusterd_handle_status_volume] 0-management: Received status volume req for volume vol_3bc80e4effa4fc16b189e156d9f5c75e [2019-01-22 11:20:45.638699] I [MSGID: 106499] [glusterd-handler.c:4314:__glusterd_handle_status_volume] 0-management: Received status volume req for volume vol_59581a21d2113b0c0d4b2fa729a6a932 [2019-01-22 11:20:45.642299] I [MSGID: 106499] [glusterd-handler.c:4314:__glusterd_handle_status_volume] 0-management: Received status volume req for volume vol_6954c7e3a47641f1bd4fbb66d2e8aebe [2019-01-22 11:20:45.646667] I [MSGID: 106499] [glusterd-handler.c:4314:__glusterd_handle_status_volume] 0-management: Received status volume req for volume vol_6d36831a039b04088d29ecb477828ba7 [2019-01-22 11:20:45.650655] I [MSGID: 106499] [glusterd-handler.c:4314:__glusterd_handle_status_volume] 0-management: Received status volume req for volume vol_6d45975bc09622845d3674a36581cb20 [2019-01-22 11:20:45.654601] I [MSGID: 106499] [glusterd-handler.c:4314:__glusterd_handle_status_volume] 0-management: Received status volume req for volume vol_8285c4fc4d6e4064ac1527d035eee2f8 [2019-01-22 11:20:45.658937] I [MSGID: 106499] [glusterd-handler.c:4314:__glusterd_handle_status_volume] 0-management: Received status volume req for volume vol_87d44d5b3e6923bd42a620cbc838b253 [2019-01-22 11:20:45.662100] I [MSGID: 106499] [glusterd-handler.c:4314:__glusterd_handle_status_volume] 0-management: Received status volume req for volume vol_93cf4b391b72534c5732df0c82206f50 [2019-01-22 11:20:45.666095] I [MSGID: 106499] [glusterd-handler.c:4314:__glusterd_handle_status_volume] 0-management: Received status volume req for volume vol_bfc74a6caa795f5f1d67b2223de42517 [2019-01-22 11:20:45.669297] I [MSGID: 106499] [glusterd-handler.c:4314:__glusterd_handle_status_volume] 0-management: Received status volume req for volume vol_ca1258696cc5eb793a2003d3c3b9e98e [2019-01-22 11:20:45.673512] I [MSGID: 106499] [glusterd-handler.c:4314:__glusterd_handle_status_volume] 0-management: Received status volume req for volume vol_d936fff4500a9cb9491963d0624214fb [2019-01-22 11:20:45.676963] I [MSGID: 106499] [glusterd-handler.c:4314:__glusterd_handle_status_volume] 0-management: Received status volume req for volume vol_e1aa1283d5917485d88c4a742eeff422 [2019-01-22 11:20:45.680801] I [MSGID: 106499] [glusterd-handler.c:4314:__glusterd_handle_status_volume] 0-management: Received status volume req for volume vol_f0ed498d7e781d7bb896244175b31f9e [2019-01-22 11:20:45.684538] I [MSGID: 106499] [glusterd-handler.c:4314:__glusterd_handle_status_volume] 0-management: Received status volume req for volume vol_f387519c9b004ec14e80696db88ef0f8 [2019-01-22 11:20:45.687952] I [MSGID: 106499] [glusterd-handler.c:4314:__glusterd_handle_status_volume] 0-management: Received status volume req for volume vol_f8ca343c60e6efe541fe02d16ca02a7d [2019-01-22 11:20:45.690693] I [MSGID: 106499] [glusterd-handler.c:4314:__glusterd_handle_status_volume] 0-management: Received status volume req for volume vol_fe882e074c0512fd9271fc2ff5a0bfe1 The message "I [MSGID: 106488] [glusterd-handler.c:1549:__glusterd_handle_cli_get_volume] 0-management: Received get vol req" repeated 22 times between [2019-01-22 11:20:30.561626] and [2019-01-22 11:20:30.567955] [2019-01-22 11:23:16.743570] I [MSGID: 106488] [glusterd-handler.c:1549:__glusterd_handle_cli_get_volume] 0-management: Received get vol req [2019-01-22 11:23:23.726991] I [MSGID: 106499] [glusterd-handler.c:4314:__glusterd_handle_status_volume] 0-management: Received status volume req for volume glusterfs-registry-volume [2019-01-22 11:23:23.730927] I [MSGID: 106499] [glusterd-handler.c:4314:__glusterd_handle_status_volume] 0-management: Received status volume req for volume heketidbstorage [2019-01-22 11:23:23.733844] I [MSGID: 106499] [glusterd-handler.c:4314:__glusterd_handle_status_volume] 0-management: Received status volume req for volume vol_16c57180902d04440ff9bfa202f7abf1 [2019-01-22 11:23:23.737006] I [MSGID: 106499] [glusterd-handler.c:4314:__glusterd_handle_status_volume] 0-management: Received status volume req for volume vol_1bc2533893950d1f357b4d690882d2c2 [2019-01-22 11:23:23.740129] I [MSGID: 106499] [glusterd-handler.c:4314:__glusterd_handle_status_volume] 0-management: Received status volume req for volume vol_258b8b659ad3b593dfb5672fae9ab57a [2019-01-22 11:23:23.743382] I [MSGID: 106499] [glusterd-handler.c:4314:__glusterd_handle_status_volume] 0-management: Received status volume req for volume vol_3442e86b6d994a14de73f1b8c82cf0b8 [2019-01-22 11:23:23.746477] I [MSGID: 106499] [glusterd-handler.c:4314:__glusterd_handle_status_volume] 0-management: Received status volume req for volume vol_3bc80e4effa4fc16b189e156d9f5c75e [2019-01-22 11:23:23.749258] I [MSGID: 106499] [glusterd-handler.c:4314:__glusterd_handle_status_volume] 0-management: Received status volume req for volume vol_59581a21d2113b0c0d4b2fa729a6a932 [2019-01-22 11:23:23.752156] I [MSGID: 106499] [glusterd-handler.c:4314:__glusterd_handle_status_volume] 0-management: Received status volume req for volume vol_6954c7e3a47641f1bd4fbb66d2e8aebe [2019-01-22 11:23:23.755002] I [MSGID: 106499] [glusterd-handler.c:4314:__glusterd_handle_status_volume] 0-management: Received status volume req for volume vol_6d36831a039b04088d29ecb477828ba7 [2019-01-22 11:23:23.757868] I [MSGID: 106499] [glusterd-handler.c:4314:__glusterd_handle_status_volume] 0-management: Received status volume req for volume vol_6d45975bc09622845d3674a36581cb20 [2019-01-22 11:23:23.760790] I [MSGID: 106499] [glusterd-handler.c:4314:__glusterd_handle_status_volume] 0-management: Received status volume req for volume vol_8285c4fc4d6e4064ac1527d035eee2f8 [2019-01-22 11:23:23.763695] I [MSGID: 106499] [glusterd-handler.c:4314:__glusterd_handle_status_volume] 0-management: Received status volume req for volume vol_87d44d5b3e6923bd42a620cbc838b253 [2019-01-22 11:23:23.766488] I [MSGID: 106499] [glusterd-handler.c:4314:__glusterd_handle_status_volume] 0-management: Received status volume req for volume vol_93cf4b391b72534c5732df0c82206f50 [2019-01-22 11:23:23.769118] I [MSGID: 106499] [glusterd-handler.c:4314:__glusterd_handle_status_volume] 0-management: Received status volume req for volume vol_bfc74a6caa795f5f1d67b2223de42517 [2019-01-22 11:23:23.771763] I [MSGID: 106499] [glusterd-handler.c:4314:__glusterd_handle_status_volume] 0-management: Received status volume req for volume vol_ca1258696cc5eb793a2003d3c3b9e98e [2019-01-22 11:23:23.774514] I [MSGID: 106499] [glusterd-handler.c:4314:__glusterd_handle_status_volume] 0-management: Received status volume req for volume vol_d936fff4500a9cb9491963d0624214fb [2019-01-22 11:23:23.777233] I [MSGID: 106499] [glusterd-handler.c:4314:__glusterd_handle_status_volume] 0-management: Received status volume req for volume vol_e1aa1283d5917485d88c4a742eeff422 [2019-01-22 11:23:23.780033] I [MSGID: 106499] [glusterd-handler.c:4314:__glusterd_handle_status_volume] 0-management: Received status volume req for volume vol_f0ed498d7e781d7bb896244175b31f9e [2019-01-22 11:23:23.782807] I [MSGID: 106499] [glusterd-handler.c:4314:__glusterd_handle_status_volume] 0-management: Received status volume req for volume vol_f387519c9b004ec14e80696db88ef0f8 [2019-01-22 11:23:23.785757] I [MSGID: 106499] [glusterd-handler.c:4314:__glusterd_handle_status_volume] 0-management: Received status volume req for volume vol_f8ca343c60e6efe541fe02d16ca02a7d [2019-01-22 11:23:23.789114] I [MSGID: 106499] [glusterd-handler.c:4314:__glusterd_handle_status_volume] 0-management: Received status volume req for volume vol_fe882e074c0512fd9271fc2ff5a0bfe1 [2019-01-22 11:23:42.454482] I [MSGID: 106487] [glusterd-handler.c:1486:__glusterd_handle_cli_list_friends] 0-glusterd: Received cli list req The message "I [MSGID: 106488] [glusterd-handler.c:1549:__glusterd_handle_cli_get_volume] 0-management: Received get vol req" repeated 22 times between [2019-01-22 11:23:16.743570] and [2019-01-22 11:23:16.749367] [2019-01-22 11:24:05.010650] I [MSGID: 106487] [glusterd-handler.c:1486:__glusterd_handle_cli_list_friends] 0-glusterd: Received cli list req [2019-01-22 11:24:53.877362] I [run.c:241:runner_log] (-->/usr/lib64/glusterfs/4.1.6/xlator/mgmt/glusterd.so(+0x3a7a5) [0x7f30137d57a5] -->/usr/lib64/glusterfs/4.1.6/xlator/mgmt/glusterd.so(+0xe2765) [0x7f301387d765] -->/lib64/libglusterfs.so.0(runner_log+0x115) [0x7f3018bb30f5] ) 0-management: Ran script: /var/lib/glusterd/hooks/1/stop/pre/S29CTDB-teardown.sh --volname=vol_8285c4fc4d6e4064ac1527d035eee2f8 --last=no [2019-01-22 11:24:53.885115] E [run.c:241:runner_log] (-->/usr/lib64/glusterfs/4.1.6/xlator/mgmt/glusterd.so(+0x3a7a5) [0x7f30137d57a5] -->/usr/lib64/glusterfs/4.1.6/xlator/mgmt/glusterd.so(+0xe26c3) [0x7f301387d6c3] -->/lib64/libglusterfs.so.0(runner_log+0x115) [0x7f3018bb30f5] ) 0-management: Failed to execute script: /var/lib/glusterd/hooks/1/stop/pre/S30samba-stop.sh --volname=vol_8285c4fc4d6e4064ac1527d035eee2f8 --last=no [2019-01-22 11:24:53.885253] I [MSGID: 106542] [glusterd-utils.c:8253:glusterd_brick_signal] 0-glusterd: sending signal 15 to brick with pid 14054 [2019-01-22 11:24:54.898100] I [MSGID: 106131] [glusterd-proc-mgmt.c:83:glusterd_proc_stop] 0-management: nfs already stopped [2019-01-22 11:24:54.898134] I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: nfs service is stopped [2019-01-22 11:24:54.898149] I [MSGID: 106599] [glusterd-nfs-svc.c:82:glusterd_nfssvc_manager] 0-management: nfs/server.so xlator is not installed [2019-01-22 11:24:54.908082] I [MSGID: 106568] [glusterd-proc-mgmt.c:87:glusterd_proc_stop] 0-management: Stopping glustershd daemon running in pid: 14077 [2019-01-22 11:24:55.908302] I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: glustershd service is stopped [2019-01-22 11:24:55.908378] I [MSGID: 106567] [glusterd-svc-mgmt.c:203:glusterd_svc_start] 0-management: Starting glustershd service [2019-01-22 11:24:55.916703] I [MSGID: 106131] [glusterd-proc-mgmt.c:83:glusterd_proc_stop] 0-management: bitd already stopped [2019-01-22 11:24:55.916744] I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: bitd service is stopped [2019-01-22 11:24:55.917381] I [MSGID: 106131] [glusterd-proc-mgmt.c:83:glusterd_proc_stop] 0-management: scrub already stopped [2019-01-22 11:24:55.917409] I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: scrub service is stopped [2019-01-22 11:24:55.917563] I [MSGID: 106143] [glusterd-pmap.c:397:pmap_registry_remove] 0-pmap: removing brick /var/lib/heketi/mounts/vg_526f35058433c6b03130bba4e0a7dd87/brick_c04b2f3fc2aed6c127e0095bb79a9c2b/brick on port 49176 [2019-01-22 11:24:56.646112] I [run.c:241:runner_log] (-->/usr/lib64/glusterfs/4.1.6/xlator/mgmt/glusterd.so(+0x3a7a5) [0x7f30137d57a5] -->/usr/lib64/glusterfs/4.1.6/xlator/mgmt/glusterd.so(+0xe2765) [0x7f301387d765] -->/lib64/libglusterfs.so.0(runner_log+0x115) [0x7f3018bb30f5] ) 0-management: Ran script: /var/lib/glusterd/hooks/1/delete/pre/S10selinux-del-fcontext.sh --volname=vol_8285c4fc4d6e4064ac1527d035eee2f8 [2019-01-22 11:24:56.723370] I [MSGID: 106495] [glusterd-handler.c:3073:__glusterd_handle_getwd] 0-glusterd: Received getwd req [2019-01-22 11:24:56.728103] I [run.c:241:runner_log] (-->/usr/lib64/glusterfs/4.1.6/xlator/mgmt/glusterd.so(+0xe2c9a) [0x7f301387dc9a] -->/usr/lib64/glusterfs/4.1.6/xlator/mgmt/glusterd.so(+0xe2765) [0x7f301387d765] -->/lib64/libglusterfs.so.0(runner_log+0x115) [0x7f3018bb30f5] ) 0-management: Ran script: /var/lib/glusterd/hooks/1/delete/post/S57glusterfind-delete-post --volname=vol_8285c4fc4d6e4064ac1527d035eee2f8 [2019-01-22 11:26:47.511746] W [MSGID: 101095] [xlator.c:181:xlator_volopt_dynload] 0-xlator: /usr/lib64/glusterfs/4.1.6/xlator/nfs/server.so: cannot open shared object file: No such file or directory [2019-01-22 11:26:47.543531] I [run.c:241:runner_log] (-->/usr/lib64/glusterfs/4.1.6/xlator/mgmt/glusterd.so(+0xe2c9a) [0x7f301387dc9a] -->/usr/lib64/glusterfs/4.1.6/xlator/mgmt/glusterd.so(+0xe2765) [0x7f301387d765] -->/lib64/libglusterfs.so.0(runner_log+0x115) [0x7f3018bb30f5] ) 0-management: Ran script: /var/lib/glusterd/hooks/1/create/post/S10selinux-label-brick.sh --volname=vol_04cfd6ac347fc1c37e9f18ebd59dc381 [2019-01-22 11:26:47.864179] I [glusterd-utils.c:6090:glusterd_brick_start] 0-management: starting a fresh brick process for brick /var/lib/heketi/mounts/vg_56391bec3c8bfe4fc116de7bddfc2af4/brick_0d34ed3d30869da79db26babcc080b60/brick [2019-01-22 11:26:47.908979] I [MSGID: 106142] [glusterd-pmap.c:297:pmap_registry_bind] 0-pmap: adding brick /var/lib/heketi/mounts/vg_56391bec3c8bfe4fc116de7bddfc2af4/brick_0d34ed3d30869da79db26babcc080b60/brick on port 49175 [2019-01-22 11:26:47.909444] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-management: setting frame-timeout to 600 [2019-01-22 11:26:47.914435] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-snapd: setting frame-timeout to 600 [2019-01-22 11:26:47.914678] I [rpc-clnt.c:1059:rpc_clnt_connection_init] 0-gfproxyd: setting frame-timeout to 600 [2019-01-22 11:26:47.914866] I [MSGID: 106131] [glusterd-proc-mgmt.c:83:glusterd_proc_stop] 0-management: nfs already stopped [2019-01-22 11:26:47.914885] I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: nfs service is stopped [2019-01-22 11:26:47.914896] I [MSGID: 106599] [glusterd-nfs-svc.c:82:glusterd_nfssvc_manager] 0-management: nfs/server.so xlator is not installed [2019-01-22 11:26:47.924677] I [MSGID: 106568] [glusterd-proc-mgmt.c:87:glusterd_proc_stop] 0-management: Stopping glustershd daemon running in pid: 15897 [2019-01-22 11:26:48.927191] I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: glustershd service is stopped [2019-01-22 11:26:48.927255] I [MSGID: 106567] [glusterd-svc-mgmt.c:203:glusterd_svc_start] 0-management: Starting glustershd service [2019-01-22 11:26:48.935812] I [MSGID: 106131] [glusterd-proc-mgmt.c:83:glusterd_proc_stop] 0-management: bitd already stopped [2019-01-22 11:26:48.935852] I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: bitd service is stopped [2019-01-22 11:26:48.936551] I [MSGID: 106131] [glusterd-proc-mgmt.c:83:glusterd_proc_stop] 0-management: scrub already stopped [2019-01-22 11:26:48.936573] I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: scrub service is stopped [2019-01-22 11:26:50.026928] I [run.c:241:runner_log] (-->/usr/lib64/glusterfs/4.1.6/xlator/mgmt/glusterd.so(+0xe2c9a) [0x7f301387dc9a] -->/usr/lib64/glusterfs/4.1.6/xlator/mgmt/glusterd.so(+0xe2765) [0x7f301387d765] -->/lib64/libglusterfs.so.0(runner_log+0x115) [0x7f3018bb30f5] ) 0-management: Ran script: /var/lib/glusterd/hooks/1/start/post/S29CTDBsetup.sh --volname=vol_04cfd6ac347fc1c37e9f18ebd59dc381 --first=no --version=1 --volume-op=start --gd-workdir=/var/lib/glusterd [2019-01-22 11:26:50.039084] E [run.c:241:runner_log] (-->/usr/lib64/glusterfs/4.1.6/xlator/mgmt/glusterd.so(+0xe2c9a) [0x7f301387dc9a] -->/usr/lib64/glusterfs/4.1.6/xlator/mgmt/glusterd.so(+0xe26c3) [0x7f301387d6c3] -->/lib64/libglusterfs.so.0(runner_log+0x115) [0x7f3018bb30f5] ) 0-management: Failed to execute script: /var/lib/glusterd/hooks/1/start/post/S30samba-start.sh --volname=vol_04cfd6ac347fc1c37e9f18ebd59dc381 --first=no --version=1 --volume-op=start --gd-workdir=/var/lib/glusterd [2019-01-22 11:30:34.307767] I [run.c:241:runner_log] (-->/usr/lib64/glusterfs/4.1.6/xlator/mgmt/glusterd.so(+0x3a7a5) [0x7f30137d57a5] -->/usr/lib64/glusterfs/4.1.6/xlator/mgmt/glusterd.so(+0xe2765) [0x7f301387d765] -->/lib64/libglusterfs.so.0(runner_log+0x115) [0x7f3018bb30f5] ) 0-management: Ran script: /var/lib/glusterd/hooks/1/stop/pre/S29CTDB-teardown.sh --volname=vol_04cfd6ac347fc1c37e9f18ebd59dc381 --last=no [2019-01-22 11:30:34.314478] E [run.c:241:runner_log] (-->/usr/lib64/glusterfs/4.1.6/xlator/mgmt/glusterd.so(+0x3a7a5) [0x7f30137d57a5] -->/usr/lib64/glusterfs/4.1.6/xlator/mgmt/glusterd.so(+0xe26c3) [0x7f301387d6c3] -->/lib64/libglusterfs.so.0(runner_log+0x115) [0x7f3018bb30f5] ) 0-management: Failed to execute script: /var/lib/glusterd/hooks/1/stop/pre/S30samba-stop.sh --volname=vol_04cfd6ac347fc1c37e9f18ebd59dc381 --last=no [2019-01-22 11:30:34.314608] I [MSGID: 106542] [glusterd-utils.c:8253:glusterd_brick_signal] 0-glusterd: sending signal 15 to brick with pid 16315 [2019-01-22 11:30:35.314843] I [MSGID: 106143] [glusterd-pmap.c:397:pmap_registry_remove] 0-pmap: removing brick /var/lib/heketi/mounts/vg_56391bec3c8bfe4fc116de7bddfc2af4/brick_0d34ed3d30869da79db26babcc080b60/brick on port 49175 [2019-01-22 11:30:35.331165] I [MSGID: 106131] [glusterd-proc-mgmt.c:83:glusterd_proc_stop] 0-management: nfs already stopped [2019-01-22 11:30:35.331195] I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: nfs service is stopped [2019-01-22 11:30:35.331212] I [MSGID: 106599] [glusterd-nfs-svc.c:82:glusterd_nfssvc_manager] 0-management: nfs/server.so xlator is not installed [2019-01-22 11:30:35.341824] I [MSGID: 106568] [glusterd-proc-mgmt.c:87:glusterd_proc_stop] 0-management: Stopping glustershd daemon running in pid: 16338 [2019-01-22 11:30:36.342866] I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: glustershd service is stopped [2019-01-22 11:30:36.342947] I [MSGID: 106567] [glusterd-svc-mgmt.c:203:glusterd_svc_start] 0-management: Starting glustershd service [2019-01-22 11:30:37.351688] I [MSGID: 106131] [glusterd-proc-mgmt.c:83:glusterd_proc_stop] 0-management: bitd already stopped [2019-01-22 11:30:37.351734] I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: bitd service is stopped [2019-01-22 11:30:37.352364] I [MSGID: 106131] [glusterd-proc-mgmt.c:83:glusterd_proc_stop] 0-management: scrub already stopped [2019-01-22 11:30:37.352401] I [MSGID: 106568] [glusterd-svc-mgmt.c:235:glusterd_svc_stop] 0-management: scrub service is stopped [2019-01-22 11:30:39.641198] I [run.c:241:runner_log] (-->/usr/lib64/glusterfs/4.1.6/xlator/mgmt/glusterd.so(+0x3a7a5) [0x7f30137d57a5] -->/usr/lib64/glusterfs/4.1.6/xlator/mgmt/glusterd.so(+0xe2765) [0x7f301387d765] -->/lib64/libglusterfs.so.0(runner_log+0x115) [0x7f3018bb30f5] ) 0-management: Ran script: /var/lib/glusterd/hooks/1/delete/pre/S10selinux-del-fcontext.sh --volname=vol_04cfd6ac347fc1c37e9f18ebd59dc381 [2019-01-22 11:30:39.705421] I [MSGID: 106495] [glusterd-handler.c:3073:__glusterd_handle_getwd] 0-glusterd: Received getwd req [2019-01-22 11:30:39.710038] I [run.c:241:runner_log] (-->/usr/lib64/glusterfs/4.1.6/xlator/mgmt/glusterd.so(+0xe2c9a) [0x7f301387dc9a] -->/usr/lib64/glusterfs/4.1.6/xlator/mgmt/glusterd.so(+0xe2765) [0x7f301387d765] -->/lib64/libglusterfs.so.0(runner_log+0x115) [0x7f3018bb30f5] ) 0-management: Ran script: /var/lib/glusterd/hooks/1/delete/post/S57glusterfind-delete-post --volname=vol_04cfd6ac347fc1c37e9f18ebd59dc381 sh-4.2#