[Gluster-users] glfs_fallocate() looks completely broken on disperse volumes with sharding enabled
Dmitry Antipov
dmantipov at yandex.ru
Thu Apr 9 08:53:05 UTC 2020
(closely related to https://github.com/gluster/glusterfs/issues/1148)
1)
# prove -vf ./tests/bugs/shard/zero-flag.t
./tests/bugs/shard/zero-flag.t ..
1..34
ok 1 [ 190/ 1454] < 13> 'glusterd'
ok 2 [ 9/ 5] < 14> 'pidof glusterd'
ok 3 [ 9/ 201] < 15> 'gluster --mode=script --wignore volume create patchy replica 2 localhost.localdomain:/d/backends/patchy0 localhost.localdomain:/d/backends/patchy1
localhost.localdomain:/d/backends/patchy2 localhost.localdomain:/d/backends/patchy3'
ok 4 [ 13/ 155] < 16> 'gluster --mode=script --wignore volume set patchy features.shard on'
ok 5 [ 30/ 198] < 17> 'gluster --mode=script --wignore volume set patchy features.shard-block-size 4MB'
ok 6 [ 30/ 1463] < 18> 'gluster --mode=script --wignore volume start patchy'
ok 7 [ 11/ 57] < 20> '_GFS --attribute-timeout=0 --entry-timeout=0 --volfile-id=patchy --volfile-server=localhost.localdomain /mnt/glusterfs/0'
ok 8 [ 10/ 90] < 21> 'build_tester ./tests/bugs/shard/shard-fallocate.c -lgfapi -Wall -O2'
ok 9 [ 10/ 7] < 25> 'touch /mnt/glusterfs/0/tmp'
ok 10 [ 13/ 1] < 26> ''
ok 11 [ 10/ 7] < 27> 'touch /mnt/glusterfs/0/file1'
ok 12 [ 20/ 11049] < 31> './tests/bugs/shard/shard-fallocate localhost.localdomain patchy 0 0 6291456 /file1 /opt/glusterfs/var/log/glusterfs/glfs-patchy.log'
ok 13 [ 17/ 4] < 33> '6291456 stat -c %s /mnt/glusterfs/0/file1'
ok 14 [ 10/ 2] < 36> 'stat /d/backends/patchy0/.shard'
ok 15 [ 10/ 2] < 37> 'stat /d/backends/patchy1/.shard'
ok 16 [ 9/ 2] < 38> 'stat /d/backends/patchy2/.shard'
ok 17 [ 9/ 2] < 39> 'stat /d/backends/patchy3/.shard'
ok 18 [ 25/ 2] < 41> '2097152 echo 2097152 2097152'
ok 19 [ 9/ 17] < 42> '1 file_all_zeroes /mnt/glusterfs/0/file1'
ok 20 [ 8/ 7] < 47> 'truncate -s 6M /mnt/glusterfs/0/file2'
ok 21 [ 9/ 6] < 48> 'dd if=/mnt/glusterfs/0/tmp of=/mnt/glusterfs/0/file2 bs=1 seek=3145728 count=26 conv=notrunc'
ok 22 [ 32/ 11045] < 51> './tests/bugs/shard/shard-fallocate localhost.localdomain patchy 0 3145728 26 /file2 /opt/glusterfs/var/log/glusterfs/glfs-patchy.log'
ok 23 [ 17/ 4] < 53> '6291456 stat -c %s /mnt/glusterfs/0/file2'
ok 24 [ 27/ 2] < 54> '007d0186a1231a3a874a6aa09a1b7dcf echo 007d0186a1231a3a874a6aa09a1b7dcf'
ok 25 [ 9/ 7] < 59> 'touch /mnt/glusterfs/0/file3'
ok 26 [ 13/ 7] < 63> 'dd if=/mnt/glusterfs/0/tmp of=/mnt/glusterfs/0/file3 bs=1 seek=9437184 count=26 conv=notrunc'
ok 27 [ 10/ 2] < 64> '! stat /d/backends/patchy*/.shard/cfa95fe6-8367-4478-957c-edf8561dab21.1'
ok 28 [ 9/ 2] < 65> 'stat /d/backends/patchy0/.shard/cfa95fe6-8367-4478-957c-edf8561dab21.2 /d/backends/patchy1/.shard/cfa95fe6-8367-4478-957c-edf8561dab21.2'
ok 29 [ 52/ 2] < 67> '1048602 echo 1048602 1048602'
ok 30 [ 15/ 11046] < 69> './tests/bugs/shard/shard-fallocate localhost.localdomain patchy 0 5242880 1048576 /file3 /opt/glusterfs/var/log/glusterfs/glfs-patchy.log'
ok 31 [ 41/ 2] < 70> 'db677843bb19004c18b71597026b2181 echo db677843bb19004c18b71597026b2181'
ok 32 [ 9/ 7] < 72> 'Y force_umount /mnt/glusterfs/0'
ok 33 [ 9/ 5152] < 73> 'gluster --mode=script --wignore volume stop patchy'
ok 34 [ 17/ 580] < 74> 'gluster --mode=script --wignore volume delete patchy'
ok
All tests successful.
Files=1, Tests=34, 43 wallclock secs ( 0.03 usr 0.00 sys + 1.14 cusr 0.67 csys = 1.84 CPU)
Result: PASS
2)
diff --git a/tests/bugs/shard/zero-flag.t b/tests/bugs/shard/zero-flag.t
index 1f39787ab..9332a7fc7 100644
--- a/tests/bugs/shard/zero-flag.t
+++ b/tests/bugs/shard/zero-flag.t
@@ -12,7 +12,7 @@ require_fallocate -z -l 512k $M0/file && rm -f $M0/file
TEST glusterd
TEST pidof glusterd
-TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1,2,3}
+TEST $CLI volume create $V0 disperse 3 redundancy 1 $H0:$B0/${V0}{0,1,2}
TEST $CLI volume set $V0 features.shard on
TEST $CLI volume set $V0 features.shard-block-size 4MB
TEST $CLI volume start $V0
3)
# prove -vf ./tests/bugs/shard/zero-flag.t
./tests/bugs/shard/zero-flag.t ..
1..34
ok 1 [ 191/ 1435] < 13> 'glusterd'
ok 2 [ 9/ 5] < 14> 'pidof glusterd'
ok 3 [ 9/ 159] < 15> 'gluster --mode=script --wignore volume create patchy disperse 3 redundancy 1 localhost.localdomain:/d/backends/patchy0 localhost.localdomain:/d/backends/patchy1
localhost.localdomain:/d/backends/patchy2'
ok 4 [ 9/ 138] < 16> 'gluster --mode=script --wignore volume set patchy features.shard on'
ok 5 [ 10/ 135] < 17> 'gluster --mode=script --wignore volume set patchy features.shard-block-size 4MB'
ok 6 [ 12/ 1397] < 18> 'gluster --mode=script --wignore volume start patchy'
ok 7 [ 12/ 43] < 20> '_GFS --attribute-timeout=0 --entry-timeout=0 --volfile-id=patchy --volfile-server=localhost.localdomain /mnt/glusterfs/0'
ok 8 [ 10/ 92] < 21> 'build_tester ./tests/bugs/shard/shard-fallocate.c -lgfapi -Wall -O2'
ok 9 [ 10/ 6] < 25> 'touch /mnt/glusterfs/0/tmp'
ok 10 [ 13/ 1] < 26> ''
ok 11 [ 10/ 6] < 27> 'touch /mnt/glusterfs/0/file1'
ok 12 [ 19/ 13044] < 31> './tests/bugs/shard/shard-fallocate localhost.localdomain patchy 0 0 6291456 /file1 /opt/glusterfs/var/log/glusterfs/glfs-patchy.log'
ok 13 [ 16/ 4] < 33> '6291456 stat -c %s /mnt/glusterfs/0/file1'
ok 14 [ 10/ 2] < 36> 'stat /d/backends/patchy0/.shard'
ok 15 [ 11/ 2] < 37> 'stat /d/backends/patchy1/.shard'
ok 16 [ 11/ 2] < 38> 'stat /d/backends/patchy2/.shard'
stat: cannot stat '/d/backends/patchy3/.shard': No such file or directory
not ok 17 [ 11/ 2] < 39> 'stat /d/backends/patchy3/.shard' -> ''
not ok 18 [ 27/ 3] < 41> '2097152 echo 1048576 1048576 1048576' -> 'Got "1048576 1048576 1048576" instead of "2097152"'
ok 19 [ 11/ 22] < 42> '1 file_all_zeroes /mnt/glusterfs/0/file1'
ok 20 [ 11/ 6] < 47> 'truncate -s 6M /mnt/glusterfs/0/file2'
dd: error writing '/mnt/glusterfs/0/file2': Transport endpoint is not connected
dd: closing input file '/mnt/glusterfs/0/tmp': Transport endpoint is not connected
not ok 21 [ 11/ 360] < 48> 'dd if=/mnt/glusterfs/0/tmp of=/mnt/glusterfs/0/file2 bs=1 seek=3145728 count=26 conv=notrunc' -> ''
md5sum: /mnt/glusterfs/0/file2: Transport endpoint is not connected
ok 22 [ 17/ 13054] < 51> './tests/bugs/shard/shard-fallocate localhost.localdomain patchy 0 3145728 26 /file2 /opt/glusterfs/var/log/glusterfs/glfs-patchy.log'
stat: cannot stat '/mnt/glusterfs/0/file2': Transport endpoint is not connected
not ok 23 [ 16/ 3] < 53> '6291456 stat -c %s /mnt/glusterfs/0/file2' -> 'Got "" instead of "6291456"'
md5sum: /mnt/glusterfs/0/file2: Transport endpoint is not connected
ok 24 [ 13/ 2] < 54> ' echo'
touch: cannot touch '/mnt/glusterfs/0/file3': Transport endpoint is not connected
not ok 25 [ 10/ 2] < 59> 'touch /mnt/glusterfs/0/file3' -> ''
dd: failed to open '/mnt/glusterfs/0/tmp': Transport endpoint is not connected
not ok 26 [ 13/ 1] < 63> 'dd if=/mnt/glusterfs/0/tmp of=/mnt/glusterfs/0/file3 bs=1 seek=9437184 count=26 conv=notrunc' -> ''
ok 27 [ 8/ 1] < 64> '! stat /d/backends/patchy*/.shard/.1'
stat: cannot stat '/d/backends/patchy*/.shard/.2': No such file or directory
not ok 28 [ 9/ 2] < 65> 'stat /d/backends/patchy*/.shard/.2' -> ''
md5sum: /mnt/glusterfs/0/file3: Transport endpoint is not connected
stat: missing operand
Try 'stat --help' for more information.
not ok 29 [ 24/ 2] < 67> '1048602 echo' -> 'Got "" instead of "1048602"'
glfs_open: returned NULL
ok 30 [ 13/ 13054] < 69> './tests/bugs/shard/shard-fallocate localhost.localdomain patchy 0 5242880 1048576 /file3 /opt/glusterfs/var/log/glusterfs/glfs-patchy.log'
md5sum: /mnt/glusterfs/0/file3: Transport endpoint is not connected
ok 31 [ 21/ 3] < 70> ' echo'
ok 32 [ 11/ 7] < 72> 'Y force_umount /mnt/glusterfs/0'
ok 33 [ 11/ 4160] < 73> 'gluster --mode=script --wignore volume stop patchy'
ok 34 [ 17/ 586] < 74> 'gluster --mode=script --wignore volume delete patchy'
Failed 8/34 subtests
Test Summary Report
-------------------
./tests/bugs/shard/zero-flag.t (Wstat: 0 Tests: 34 Failed: 8)
Failed tests: 17-18, 21, 23, 25-26, 28-29
Files=1, Tests=34, 48 wallclock secs ( 0.02 usr 0.01 sys + 1.00 cusr 0.64 csys = 1.67 CPU)
Result: FAIL
Dmitry
More information about the Gluster-users
mailing list