9ae3f9
From 63cfdd987b1dfbf97486f0f884380faee0ae25d0 Mon Sep 17 00:00:00 2001
9ae3f9
From: Ravishankar N <ravishankar@redhat.com>
9ae3f9
Date: Wed, 4 Sep 2019 11:27:30 +0530
9ae3f9
Subject: [PATCH 416/449] tests: fix spurious failure of
9ae3f9
 bug-1402841.t-mt-dir-scan-race.t
9ae3f9
9ae3f9
Upstream patch: https://review.gluster.org/23352
9ae3f9
9ae3f9
Problem:
9ae3f9
Since commit 600ba94183333c4af9b4a09616690994fd528478, shd starts
9ae3f9
healing as soon as it is toggled from disabled to enabled. This was
9ae3f9
causing the following line in the .t to fail on a 'fast' machine (always
9ae3f9
on my laptop and sometimes on the jenkins slaves).
9ae3f9
9ae3f9
EXPECT_NOT "^0$" get_pending_heal_count $V0
9ae3f9
9ae3f9
because by the time shd was disabled, the heal was already completed.
9ae3f9
9ae3f9
Fix:
9ae3f9
Increase the no. of files to be healed and make it a variable called
9ae3f9
FILE_COUNT, should we need to bump it up further because the machines
9ae3f9
become even faster. Also created pending metadata heals to increase the
9ae3f9
time taken to heal a file.
9ae3f9
9ae3f9
>fixes: bz#1748744
9ae3f9
>Change-Id: I5a26b08e45b8c19bce3c01ce67bdcc28ed48198d
9ae3f9
Signed-off-by: Ravishankar N <ravishankar@redhat.com>
9ae3f9
9ae3f9
BUG: 1844359
9ae3f9
Change-Id: Ie3676c6c2c27e7574b958d2eaac23801dfaed3a9
9ae3f9
Reviewed-on: https://code.engineering.redhat.com/gerrit/202481
9ae3f9
Tested-by: RHGS Build Bot <nigelb@redhat.com>
9ae3f9
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
9ae3f9
---
9ae3f9
 tests/bugs/core/bug-1402841.t-mt-dir-scan-race.t | 9 +++++----
9ae3f9
 1 file changed, 5 insertions(+), 4 deletions(-)
9ae3f9
9ae3f9
diff --git a/tests/bugs/core/bug-1402841.t-mt-dir-scan-race.t b/tests/bugs/core/bug-1402841.t-mt-dir-scan-race.t
9ae3f9
index 6351ba2..a1b9a85 100755
9ae3f9
--- a/tests/bugs/core/bug-1402841.t-mt-dir-scan-race.t
9ae3f9
+++ b/tests/bugs/core/bug-1402841.t-mt-dir-scan-race.t
9ae3f9
@@ -3,6 +3,8 @@
9ae3f9
 . $(dirname $0)/../../volume.rc
9ae3f9
 cleanup;
9ae3f9
 
9ae3f9
+FILE_COUNT=500
9ae3f9
+
9ae3f9
 TEST glusterd
9ae3f9
 TEST pidof glusterd
9ae3f9
 TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
9ae3f9
@@ -11,15 +13,14 @@ TEST $CLI volume set $V0 cluster.shd-wait-qlength 100
9ae3f9
 TEST $CLI volume start $V0
9ae3f9
 
9ae3f9
 TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0;
9ae3f9
-touch $M0/file{1..200}
9ae3f9
-
9ae3f9
+for i in `seq 1 $FILE_COUNT`;  do touch $M0/file$i; done
9ae3f9
 TEST kill_brick $V0 $H0 $B0/${V0}1
9ae3f9
-for i in {1..200}; do echo hello>$M0/file$i; done
9ae3f9
+for i in `seq 1 $FILE_COUNT`; do echo hello>$M0/file$i; chmod -x $M0/file$i; done
9ae3f9
 TEST $CLI volume start $V0 force
9ae3f9
 EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
9ae3f9
 EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
9ae3f9
 
9ae3f9
-EXPECT "200" get_pending_heal_count $V0
9ae3f9
+EXPECT "$FILE_COUNT" get_pending_heal_count $V0
9ae3f9
 TEST $CLI volume set $V0 self-heal-daemon on
9ae3f9
 
9ae3f9
 EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
9ae3f9
-- 
9ae3f9
1.8.3.1
9ae3f9