d84cf8
From 63cfdd987b1dfbf97486f0f884380faee0ae25d0 Mon Sep 17 00:00:00 2001
d84cf8
From: Ravishankar N <ravishankar@redhat.com>
d84cf8
Date: Wed, 4 Sep 2019 11:27:30 +0530
d84cf8
Subject: [PATCH 416/449] tests: fix spurious failure of
d84cf8
 bug-1402841.t-mt-dir-scan-race.t
d84cf8
d84cf8
Upstream patch: https://review.gluster.org/23352
d84cf8
d84cf8
Problem:
d84cf8
Since commit 600ba94183333c4af9b4a09616690994fd528478, shd starts
d84cf8
healing as soon as it is toggled from disabled to enabled. This was
d84cf8
causing the following line in the .t to fail on a 'fast' machine (always
d84cf8
on my laptop and sometimes on the jenkins slaves).
d84cf8
d84cf8
EXPECT_NOT "^0$" get_pending_heal_count $V0
d84cf8
d84cf8
because by the time shd was disabled, the heal was already completed.
d84cf8
d84cf8
Fix:
d84cf8
Increase the no. of files to be healed and make it a variable called
d84cf8
FILE_COUNT, should we need to bump it up further because the machines
d84cf8
become even faster. Also created pending metadata heals to increase the
d84cf8
time taken to heal a file.
d84cf8
d84cf8
>fixes: bz#1748744
d84cf8
>Change-Id: I5a26b08e45b8c19bce3c01ce67bdcc28ed48198d
d84cf8
Signed-off-by: Ravishankar N <ravishankar@redhat.com>
d84cf8
d84cf8
BUG: 1844359
d84cf8
Change-Id: Ie3676c6c2c27e7574b958d2eaac23801dfaed3a9
d84cf8
Reviewed-on: https://code.engineering.redhat.com/gerrit/202481
d84cf8
Tested-by: RHGS Build Bot <nigelb@redhat.com>
d84cf8
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
d84cf8
---
d84cf8
 tests/bugs/core/bug-1402841.t-mt-dir-scan-race.t | 9 +++++----
d84cf8
 1 file changed, 5 insertions(+), 4 deletions(-)
d84cf8
d84cf8
diff --git a/tests/bugs/core/bug-1402841.t-mt-dir-scan-race.t b/tests/bugs/core/bug-1402841.t-mt-dir-scan-race.t
d84cf8
index 6351ba2..a1b9a85 100755
d84cf8
--- a/tests/bugs/core/bug-1402841.t-mt-dir-scan-race.t
d84cf8
+++ b/tests/bugs/core/bug-1402841.t-mt-dir-scan-race.t
d84cf8
@@ -3,6 +3,8 @@
d84cf8
 . $(dirname $0)/../../volume.rc
d84cf8
 cleanup;
d84cf8
 
d84cf8
+FILE_COUNT=500
d84cf8
+
d84cf8
 TEST glusterd
d84cf8
 TEST pidof glusterd
d84cf8
 TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
d84cf8
@@ -11,15 +13,14 @@ TEST $CLI volume set $V0 cluster.shd-wait-qlength 100
d84cf8
 TEST $CLI volume start $V0
d84cf8
 
d84cf8
 TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0;
d84cf8
-touch $M0/file{1..200}
d84cf8
-
d84cf8
+for i in `seq 1 $FILE_COUNT`;  do touch $M0/file$i; done
d84cf8
 TEST kill_brick $V0 $H0 $B0/${V0}1
d84cf8
-for i in {1..200}; do echo hello>$M0/file$i; done
d84cf8
+for i in `seq 1 $FILE_COUNT`; do echo hello>$M0/file$i; chmod -x $M0/file$i; done
d84cf8
 TEST $CLI volume start $V0 force
d84cf8
 EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
d84cf8
 EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
d84cf8
 
d84cf8
-EXPECT "200" get_pending_heal_count $V0
d84cf8
+EXPECT "$FILE_COUNT" get_pending_heal_count $V0
d84cf8
 TEST $CLI volume set $V0 self-heal-daemon on
d84cf8
 
d84cf8
 EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
d84cf8
-- 
d84cf8
1.8.3.1
d84cf8