[Gluster-devel] spurious failure ./tests/bugs/bug-830665.t [22]

Pranith Kumar Karampuri pkarampu at redhat.com
Wed May 28 02:42:55 UTC 2014


I am looking into this.

Patch                     ==> http://review.gluster.com/#/c/7841/2
Author                    ==>  Soumya Koduri skoduri at redhat.com
Build triggered by        ==> amarts
Build-url                 ==> http://build.gluster.org/job/regression/4589/consoleFull
Download-log-at           ==> http://build.gluster.org:443/logs/regression/glusterfs-logs-20140527:18:51:06.tgz
Test written by           ==> Author: Jeff Darcy <jdarcy at redhat.com>

./tests/bugs/bug-830665.t [22]
      #!/bin/bash
      
      . $(dirname $0)/../include.rc
      . $(dirname $0)/../nfs.rc
      
      cleanup;
      
      function recreate {
      	rm -rf $1 && mkdir -p $1
      }
      
    1 TEST glusterd
    2 TEST pidof glusterd
    3 TEST $CLI volume info;
      
      ## Start and create a volume
      recreate ${B0}/${V0}-0
      recreate ${B0}/${V0}-1
    4 TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}-{0,1}
      
      function volinfo_field()
      {
          local vol=$1;
          local field=$2;
      
          $CLI volume info $vol | grep "^$field: " | sed 's/.*: //';
      }
      
      
      ## Verify volume is created
    5 EXPECT "$V0" volinfo_field $V0 'Volume Name';
    6 EXPECT 'Created' volinfo_field $V0 'Status';
      
      ## Make sure stat-prefetch doesn't prevent self-heal checks.
    7 TEST $CLI volume set $V0 performance.stat-prefetch off;
      
      ## Make sure automatic self-heal doesn't perturb our results.
    8 TEST $CLI volume set $V0 cluster.self-heal-daemon off
    9 TEST $CLI volume set $V0 cluster.background-self-heal-count 0
      
      ## Start volume and verify
   10 TEST $CLI volume start $V0;
   11 EXPECT 'Started' volinfo_field $V0 'Status';
      
      
   12 EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available;
      ## Mount NFS
   13 TEST mount_nfs $H0:/$V0 $N0 nolock;
      
      ## Create some files and directories
      echo "test_data" > $N0/a_file;
      mkdir $N0/a_dir;
      echo "more_test_data" > $N0/a_dir/another_file;
      
      ## Unmount and stop the volume.
   14 TEST umount $N0;
   15 TEST $CLI volume stop $V0;
      
      # Recreate the brick. Note that because of http://review.gluster.org/#change,4202
      # we need to preserve and restore the volume ID or else the brick (and thus the
      # entire not-very-HA-any-more volume) won't start. When that bug is fixed, we can
      # remove the [gs]etxattr calls.
      volid=$(getfattr -e hex -n trusted.glusterfs.volume-id $B0/${V0}-0 2> /dev/null \
      	| grep = | cut -d= -f2)
      rm -rf $B0/${V0}-0;
      mkdir $B0/${V0}-0;
      setfattr -n trusted.glusterfs.volume-id -v $volid $B0/${V0}-0
      
      ## Restart and remount. Note that we use actimeo=0 so that the stat calls
      ## we need for self-heal don't get blocked by the NFS client.
   16 TEST $CLI volume start $V0;
   17 EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available;
   18 TEST mount_nfs $H0:/$V0 $N0 nolock,actimeo=0;
      
      ## The Linux NFS client has a really charming habit of caching stuff right
      ## after mount, even though we set actimeo=0 above. Life would be much easier
      ## if NFS developers cared as much about correctness as they do about shaving
      ## a few seconds off of benchmarks.
      ls -l $N0 &> /dev/null;
      sleep 5;
      
      ## Force entry self-heal.
   19 TEST $CLI volume set $V0 cluster.self-heal-daemon on
      sleep 1
   20 TEST gluster volume heal $V0 full
      #ls -lR $N0 > /dev/null;
      
      ## Do NOT check through the NFS mount here. That will force a new self-heal
      ## check, but we want to test whether self-heal already happened.
      
      ## Make sure everything's in order on the recreated brick.
   21 EXPECT_WITHIN $HEAL_TIMEOUT 'test_data' cat $B0/${V0}-0/a_file;
***22 EXPECT_WITHIN $HEAL_TIMEOUT 'more_test_data' cat $B0/${V0}-0/a_dir/another_file;
      
      if [ "$EXIT_EARLY" = "1" ]; then
      	exit 0;
      fi
      
      ## Finish up
   23 TEST umount $N0;
   24 TEST $CLI volume stop $V0;
   25 EXPECT 'Stopped' volinfo_field $V0 'Status';
      
   26 TEST $CLI volume delete $V0;
   27 TEST ! $CLI volume info $V0;
      
      cleanup;


More information about the Gluster-devel mailing list