Blob Blame History Raw
From 5f8f80190c154bbb159a3cebbb7d3e12014275ed Mon Sep 17 00:00:00 2001
From: Nigel Babu <nbabu@redhat.com>
Date: Mon, 30 Apr 2018 11:28:06 +0530
Subject: [PATCH 512/529] Test fixes for rhgs-3.4 downstream

This patch includes test fixes and two tests are removed
because they're not supported downstream.

Label: DOWNSTREAM ONLY
Change-Id: I99072130cea4780654980837522c76eab38e79d3
Signed-off-by: Krutika Dhananjay <kdhananj@redhat.com>
Signed-off-by: Sanju Rakonde <srakonde@redhat.com>
Signed-off-by: Ravishankar N <ravishankar@redhat.com>
Signed-off-by: Sunil Kumar Acharya <sheggodu@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/162177
---
 tests/basic/bd.t                                   | 142 ---------------------
 tests/basic/ec/ec-1468261.t                        |   1 +
 tests/bugs/cli/bug-1169302.t                       |   4 +-
 tests/bugs/core/multiplex-limit-issue-151.t        |   2 +-
 tests/bugs/distribute/bug-882278.t                 |  73 -----------
 tests/bugs/glusterd/brick-mux-fd-cleanup.t         |   3 +
 .../glusterd/bug-1245045-remove-brick-validation.t |  16 ++-
 .../glusterd/bug-1293414-import-brickinfo-uuid.t   |   1 +
 .../bug-1483058-replace-brick-quorum-validation.t  |   9 +-
 tests/bugs/glusterd/bug-1595320.t                  |   2 +-
 .../df-results-post-replace-brick-operations.t     |   3 +
 tests/bugs/posix/bug-990028.t                      |   2 +-
 tests/bugs/readdir-ahead/bug-1439640.t             |   1 +
 .../replicate/bug-1591193-assign-gfid-and-heal.t   |   5 +-
 .../bug-1637802-arbiter-stale-data-heal-lock.t     |   1 +
 tests/bugs/shard/zero-flag.t                       |   1 +
 tests/cluster.rc                                   |  10 ++
 tests/include.rc                                   |   1 +
 18 files changed, 48 insertions(+), 229 deletions(-)
 delete mode 100755 tests/basic/bd.t
 delete mode 100755 tests/bugs/distribute/bug-882278.t

diff --git a/tests/basic/bd.t b/tests/basic/bd.t
deleted file mode 100755
index 63622ed..0000000
--- a/tests/basic/bd.t
+++ /dev/null
@@ -1,142 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../include.rc
-. $(dirname $0)/../volume.rc
-
-function execute()
-{
-        cmd=$1
-        shift
-        ${cmd} $@ >/dev/null 2>&1
-}
-
-function bd_cleanup()
-{
-        execute vgremove -f ${V0}
-        execute pvremove ${ld}
-        execute losetup -d ${ld}
-        execute rm ${BD_DISK}
-        cleanup
-}
-
-function check()
-{
-        if [ $? -ne 0 ]; then
-                echo prerequsite $@ failed
-                bd_cleanup
-                exit
-        fi
-}
-
-SIZE=256 #in MB
-
-bd_cleanup;
-
-## Configure environment needed for BD backend volumes
-## Create a file with configured size and
-## set it as a temporary loop device to create
-## physical volume & VG. These are basic things needed
-## for testing BD xlator if anyone of these steps fail,
-## test script exits
-function configure()
-{
-    GLDIR=`$CLI system:: getwd`
-    BD_DISK=${GLDIR}/bd_disk
-
-    execute truncate -s${SIZE}M ${BD_DISK}
-    check ${BD_DISK} creation
-
-    execute losetup -f
-    check losetup
-    ld=`losetup -f`
-
-    execute losetup ${ld} ${BD_DISK}
-    check losetup ${BD_DISK}
-    execute pvcreate -f ${ld}
-    check pvcreate ${ld}
-    execute vgcreate ${V0} ${ld}
-    check vgcreate ${V0}
-    execute lvcreate --thin ${V0}/pool --size 128M
-}
-
-function volinfo_field()
-{
-    local vol=$1;
-    local field=$2;
-    $CLI volume info $vol | grep "^$field: " | sed 's/.*: //';
-}
-
-function volume_type()
-{
-        getfattr -n volume.type $M0/. --only-values --absolute-names -e text
-}
-
-case $OSTYPE in
-NetBSD)
-        echo "Skip test on LVM which is not available on NetBSD" >&2
-        SKIP_TESTS
-        exit 0
-        ;;
-*)      
-        ;;
-esac 
-
-TEST glusterd
-TEST pidof glusterd
-configure
-
-TEST $CLI volume create $V0 ${H0}:/$B0/$V0?${V0}
-EXPECT "$V0" volinfo_field $V0 'Volume Name';
-EXPECT 'Created' volinfo_field $V0 'Status';
-
-## Start volume and verify
-TEST $CLI volume start $V0;
-EXPECT 'Started' volinfo_field $V0 'Status'
-
-TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0;
-EXPECT '1' volume_type
-
-## Create posix file
-TEST touch $M0/posix
-
-TEST touch $M0/lv
-gfid=`getfattr -n glusterfs.gfid.string $M0/lv --only-values --absolute-names`
-TEST setfattr -n user.glusterfs.bd -v "lv:4MB" $M0/lv
-# Check if LV is created
-TEST stat /dev/$V0/${gfid}
-
-## Create filesystem
-sleep 1
-TEST mkfs.ext4 -qF $M0/lv
-# Cloning
-TEST touch $M0/lv_clone
-gfid=`getfattr -n glusterfs.gfid.string $M0/lv_clone --only-values --absolute-names`
-TEST setfattr -n clone -v ${gfid} $M0/lv
-TEST stat /dev/$V0/${gfid}
-
-sleep 1
-## Check mounting
-TEST mount -o loop $M0/lv $M1
-EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M1
-
-# Snapshot
-TEST touch $M0/lv_sn
-gfid=`getfattr -n glusterfs.gfid.string $M0/lv_sn --only-values --absolute-names`
-TEST setfattr -n snapshot -v ${gfid} $M0/lv
-TEST stat /dev/$V0/${gfid}
-
-# Merge
-sleep 1
-TEST setfattr -n merge -v "$M0/lv_sn" $M0/lv_sn
-TEST ! stat $M0/lv_sn
-TEST ! stat /dev/$V0/${gfid}
-
-
-rm $M0/* -f
-
-EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
-TEST $CLI volume stop ${V0}
-EXPECT 'Stopped' volinfo_field $V0 'Status';
-TEST $CLI volume delete ${V0}
-
-bd_cleanup
diff --git a/tests/basic/ec/ec-1468261.t b/tests/basic/ec/ec-1468261.t
index d687d7b..b2d92fc 100644
--- a/tests/basic/ec/ec-1468261.t
+++ b/tests/basic/ec/ec-1468261.t
@@ -14,6 +14,7 @@ TEST glusterd
 TEST pidof glusterd
 TEST $CLI volume create $V0 disperse 6 redundancy 2 $H0:$B0/${V0}{0..5}
 TEST $CLI volume set $V0 disperse.optimistic-change-log on
+TEST $CLI volume set $V0 disperse.other-eager-lock on
 TEST $CLI volume start $V0
 
 #Mount the volume
diff --git a/tests/bugs/cli/bug-1169302.t b/tests/bugs/cli/bug-1169302.t
index 24355e5..05c006c 100755
--- a/tests/bugs/cli/bug-1169302.t
+++ b/tests/bugs/cli/bug-1169302.t
@@ -40,7 +40,9 @@ cleanup_statedump
 # hostname or IP-address with the connection from the bug-1169302 executable.
 # In our CI it seems not possible to use $H0, 'localhost', $(hostname --fqdn)
 # or even "127.0.0.1"....
-TEST $CLI_3 volume statedump $V0 client $H1:$GFAPI_PID
+sleep 2
+host=`netstat -nap | grep $GFAPI_PID | grep 24007 |  awk '{print $4}' | cut -d: -f1`
+TEST $CLI_3 volume statedump $V0 client $host:$GFAPI_PID
 EXPECT_WITHIN $STATEDUMP_TIMEOUT "Y" path_exists $statedumpdir/glusterdump.$GFAPI_PID*
 
 kill $GFAPI_PID
diff --git a/tests/bugs/core/multiplex-limit-issue-151.t b/tests/bugs/core/multiplex-limit-issue-151.t
index 9511756..c5bbbda 100644
--- a/tests/bugs/core/multiplex-limit-issue-151.t
+++ b/tests/bugs/core/multiplex-limit-issue-151.t
@@ -50,7 +50,7 @@ EXPECT_WITHIN $PROCESS_UP_TIMEOUT 3 count_brick_pids
 EXPECT_WITHIN $PROCESS_UP_TIMEOUT 7 count_up_bricks
 
 TEST $CLI volume remove-brick $V0 $H0:$B0/brick3 start
-TEST $CLI volume remove-brick $V0 $H0:$B0/brick3 commit
+TEST $CLI volume remove-brick $V0 $H0:$B0/brick3 force
 
 EXPECT_WITHIN $PROCESS_UP_TIMEOUT 3 count_brick_processes
 EXPECT_WITHIN $PROCESS_UP_TIMEOUT 3 count_brick_pids
diff --git a/tests/bugs/distribute/bug-882278.t b/tests/bugs/distribute/bug-882278.t
deleted file mode 100755
index 8cb5147..0000000
--- a/tests/bugs/distribute/bug-882278.t
+++ /dev/null
@@ -1,73 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
-cleanup
-
-# Is there a good reason to require --fqdn elsewhere?  It's worse than useless
-# here.
-H0=$(hostname -s)
-
-function recreate {
-	# The rm is necessary so we don't get fooled by leftovers from old runs.
-	rm -rf $1 && mkdir -p $1
-}
-
-function count_lines {
-	grep "$1" $2/* | wc -l
-}
-
-TEST glusterd
-TEST pidof glusterd
-TEST $CLI volume info;
-
-## Start and create a volume
-TEST recreate ${B0}/${V0}-0
-TEST recreate ${B0}/${V0}-1
-TEST $CLI volume create $V0 $H0:$B0/${V0}-{0,1}
-TEST $CLI volume set $V0 cluster.nufa on
-
-function volinfo_field()
-{
-    local vol=$1;
-    local field=$2;
-
-    $CLI volume info $vol | grep "^$field: " | sed 's/.*: //';
-}
-
-
-## Verify volume is created
-EXPECT "$V0" volinfo_field $V0 'Volume Name';
-EXPECT 'Created' volinfo_field $V0 'Status';
-
-## Start volume and verify
-TEST $CLI volume start $V0;
-EXPECT 'Started' volinfo_field $V0 'Status';
-
-## Mount native
-special_option="--xlator-option ${V0}-dht.local-volume-name=${V0}-client-1"
-TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $special_option $M0
-
-## Create a bunch of test files.
-for i in $(seq 0 99); do
-	echo hello > $(printf $M0/file%02d $i)
-done
-
-## Make sure the files went to the right place.  There might be link files in
-## the other brick, but they won't have any contents.
-EXPECT "0" count_lines hello ${B0}/${V0}-0
-EXPECT "100" count_lines hello ${B0}/${V0}-1
-
-if [ "$EXIT_EARLY" = "1" ]; then
-	exit 0;
-fi
-
-## Finish up
-EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
-TEST $CLI volume stop $V0;
-EXPECT 'Stopped' volinfo_field $V0 'Status';
-
-TEST $CLI volume delete $V0;
-TEST ! $CLI volume info $V0;
-
-cleanup;
diff --git a/tests/bugs/glusterd/brick-mux-fd-cleanup.t b/tests/bugs/glusterd/brick-mux-fd-cleanup.t
index de11c17..2ac7f9c 100644
--- a/tests/bugs/glusterd/brick-mux-fd-cleanup.t
+++ b/tests/bugs/glusterd/brick-mux-fd-cleanup.t
@@ -76,3 +76,6 @@ EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
 EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M1
 
 cleanup
+
+#delay-gen in not present downstream
+#G_TESTDEF_TEST_STATUS_CENTOS6=BAD_TEST,BUG=000000
diff --git a/tests/bugs/glusterd/bug-1245045-remove-brick-validation.t b/tests/bugs/glusterd/bug-1245045-remove-brick-validation.t
index 597c40c..a931d29 100644
--- a/tests/bugs/glusterd/bug-1245045-remove-brick-validation.t
+++ b/tests/bugs/glusterd/bug-1245045-remove-brick-validation.t
@@ -3,12 +3,16 @@
 . $(dirname $0)/../../include.rc
 . $(dirname $0)/../../cluster.rc
 
+function peer_count {
+eval \$CLI_$1 peer status | grep 'Peer in Cluster (Connected)' | wc -l
+}
+
 cleanup
 
 TEST launch_cluster 3;
 TEST $CLI_1 peer probe $H2;
 TEST $CLI_1 peer probe $H3;
-EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count
+EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count 1
 
 TEST $CLI_1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0
 TEST $CLI_1 volume start $V0
@@ -21,7 +25,9 @@ TEST ! $CLI_1 volume remove-brick $V0 $H2:$B2/${V0} start
 TEST start_glusterd 2
 EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status_1 $V0 $H2 $B2/${V0}
 
-EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count
+EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count 1
+EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count 2
+EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count 3
 
 #volume status should work
 TEST $CLI_2 volume status
@@ -36,7 +42,7 @@ TEST ! $CLI_1 volume remove-brick $V0 $H2:$B2/${V0} commit
 TEST start_glusterd 2
 EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status_1 $V0 $H2 $B2/${V0}
 
-EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count
+EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count 1
 
 #volume status should work
 TEST $CLI_2 volume status
@@ -44,12 +50,12 @@ TEST $CLI_2 volume status
 TEST $CLI_1 volume remove-brick $V0 $H2:$B2/${V0} stop
 
 kill_glusterd 3
-EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count 1
 
 TEST $CLI_1 volume remove-brick $V0 $H2:$B2/${V0} start
 
 TEST start_glusterd 3
-EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count
+EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count 1
 
 TEST $CLI_3 volume status
 
diff --git a/tests/bugs/glusterd/bug-1293414-import-brickinfo-uuid.t b/tests/bugs/glusterd/bug-1293414-import-brickinfo-uuid.t
index 9f67e4c..977276e 100755
--- a/tests/bugs/glusterd/bug-1293414-import-brickinfo-uuid.t
+++ b/tests/bugs/glusterd/bug-1293414-import-brickinfo-uuid.t
@@ -24,6 +24,7 @@ EXPECT_WITHIN $PROBE_TIMEOUT 3 peer_count
 TEST ! $CLI_3 peer detach $H1
 TEST ! $CLI_3 peer detach $H2
 
+EXPECT_WITHIN $PROBE_TIMEOUT 3 peer_count
 
 # peer not hosting bricks should be detachable
 TEST $CLI_3 peer detach $H4
diff --git a/tests/bugs/glusterd/bug-1483058-replace-brick-quorum-validation.t b/tests/bugs/glusterd/bug-1483058-replace-brick-quorum-validation.t
index 3dbe28a..2d9e528 100644
--- a/tests/bugs/glusterd/bug-1483058-replace-brick-quorum-validation.t
+++ b/tests/bugs/glusterd/bug-1483058-replace-brick-quorum-validation.t
@@ -45,7 +45,14 @@ TEST start_glusterd 2
 
 EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count
 
+# checking peer_count is not enough to call that quorum is regained as
+# peer_count is based on peerinfo->connected where as quorum is calculated based
+# on peerinfo->quorum_contrib. To avoid this spurious race of replace brick
+# commit force to execute and fail before the quorum is regained run the command
+# in EXPECT_WITHIN to ensure that with multiple attempts the command goes
+# through once the quorum is regained.
+
 # Now quorum is met. replace-brick will execute successfuly
-TEST  $CLI_1 volume replace-brick $V0 $H2:$B2/${V0}1 $H1:$B1/${V0}1_new commit force
+EXPECT_WITHIN $PEER_SYNC_TIMEOUT 0 attempt_replace_brick 1 $V0 $H2:$B2/${V0}1 $H1:$B1/${V0}1_new
 
 #cleanup;
diff --git a/tests/bugs/glusterd/bug-1595320.t b/tests/bugs/glusterd/bug-1595320.t
index f41df9d..3a289f3 100644
--- a/tests/bugs/glusterd/bug-1595320.t
+++ b/tests/bugs/glusterd/bug-1595320.t
@@ -25,7 +25,7 @@ TEST pidof glusterd
 
 # Create volume and enable brick multiplexing
 TEST $CLI volume create $V0 $H0:$L1 $H0:$L2 $H0:$L3
-gluster v set all cluster.brick-multiplex on
+TEST $CLI v set all cluster.brick-multiplex on
 
 # Start the volume
 TEST $CLI volume start $V0
diff --git a/tests/bugs/glusterd/df-results-post-replace-brick-operations.t b/tests/bugs/glusterd/df-results-post-replace-brick-operations.t
index 443911c..04f7588 100644
--- a/tests/bugs/glusterd/df-results-post-replace-brick-operations.t
+++ b/tests/bugs/glusterd/df-results-post-replace-brick-operations.t
@@ -53,6 +53,9 @@ total_space=$(df -P $M0 | tail -1 | awk '{ print $2}')
 TEST $CLI volume replace-brick $V0 $H0:$B0/${V0}1/brick1 $H0:$B0/${V0}4/brick1 commit force
 TEST $CLI volume replace-brick $V0 $H0:$B0/${V0}2/brick1 $H0:$B0/${V0}5/brick1 commit force
 
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1
+
 # check for the size at mount point, it should be same as previous
 total_space_new=$(df -P $M0 | tail -1 | awk '{ print $2}')
 TEST [ $total_space -eq $total_space_new ]
diff --git a/tests/bugs/posix/bug-990028.t b/tests/bugs/posix/bug-990028.t
index c864214..bef36a8 100755
--- a/tests/bugs/posix/bug-990028.t
+++ b/tests/bugs/posix/bug-990028.t
@@ -78,7 +78,7 @@ function links_across_directories()
     TEST [ $LINES = 2 ]
 
     for i in $(seq 1  2); do
-        HL=`getfattr -m "trusted.pgfid.*" -de hex $B0/brick/dir$i/file$i 2>&1 | grep "trusted.pgfid" | cut -d$'\n' -f$i | cut -d'=' -f2`
+        HL=`getfattr -m "trusted.pgfid.*" -de hex $B0/brick/dir$i/file$i 2>&1 | grep "trusted.pgfid" | awk -v n=$i 'NR==n' | cut -d'=' -f2`
         TEST_IN_LOOP [ $HL = "0x00000001" ]
     done
 
diff --git a/tests/bugs/readdir-ahead/bug-1439640.t b/tests/bugs/readdir-ahead/bug-1439640.t
index cc6c829..dcd5407 100755
--- a/tests/bugs/readdir-ahead/bug-1439640.t
+++ b/tests/bugs/readdir-ahead/bug-1439640.t
@@ -8,6 +8,7 @@ cleanup;
 TEST glusterd
 
 TEST $CLI volume create $V0 $H0:$B{0..1}/$V0
+TEST $CLI volume set $V0 readdir-ahead on
 TEST $CLI volume start $V0
 
 TEST ! $CLI volume set $V0 parallel-readdir sdf
diff --git a/tests/bugs/replicate/bug-1591193-assign-gfid-and-heal.t b/tests/bugs/replicate/bug-1591193-assign-gfid-and-heal.t
index d3b5f9a..a2abaf6 100644
--- a/tests/bugs/replicate/bug-1591193-assign-gfid-and-heal.t
+++ b/tests/bugs/replicate/bug-1591193-assign-gfid-and-heal.t
@@ -108,10 +108,7 @@ TEST stat $M0/file2
 
 # Though file is created on all 3 bricks, lookup will fail as arbiter blames the
 # other 2 bricks and ariter is not 'readable'.
-# TEST ! stat $M0/file3
-# But the checks for failing lookups when quorum is not met is not yet there in
-# rhgs-3.4.0, so stat will succeed.
-TEST  stat $M0/file3
+TEST ! stat $M0/file3
 
 # Launch index heal to complete any pending data/metadata heals.
 TEST $CLI volume heal $V0
diff --git a/tests/bugs/replicate/bug-1637802-arbiter-stale-data-heal-lock.t b/tests/bugs/replicate/bug-1637802-arbiter-stale-data-heal-lock.t
index 91ed39b..d7d1f28 100644
--- a/tests/bugs/replicate/bug-1637802-arbiter-stale-data-heal-lock.t
+++ b/tests/bugs/replicate/bug-1637802-arbiter-stale-data-heal-lock.t
@@ -32,6 +32,7 @@ EXPECT 2 get_pending_heal_count $V0
 # Bring it back up and let heal complete.
 TEST $CLI volume start $V0 force
 EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}2
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
 EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
 EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
 EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2
diff --git a/tests/bugs/shard/zero-flag.t b/tests/bugs/shard/zero-flag.t
index 84cb963..1f39787 100644
--- a/tests/bugs/shard/zero-flag.t
+++ b/tests/bugs/shard/zero-flag.t
@@ -14,6 +14,7 @@ TEST glusterd
 TEST pidof glusterd
 TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1,2,3}
 TEST $CLI volume set $V0 features.shard on
+TEST $CLI volume set $V0 features.shard-block-size 4MB
 TEST $CLI volume start $V0
 
 TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0
diff --git a/tests/cluster.rc b/tests/cluster.rc
index c1ff8ab..e258b58 100644
--- a/tests/cluster.rc
+++ b/tests/cluster.rc
@@ -142,6 +142,16 @@ function peer_count() {
     $CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l
 }
 
+function attempt_replace_brick {
+    local cli_no=$1
+    local vol=$2;
+    local src_brick=$3;
+    local dst_brick=$4;
+
+    eval \$CLI_$cli_no volume replace-brick $vol $src_brick $dst_brick commit force;
+    echo $?
+}
+
 function cluster_rebalance_status_field {
         #The rebalance status can be up to 3 words, (e.g.:'fix-layout in progress'), hence the awk-print $7 thru $9.
         #But if the status is less than 3 words, it also prints the next field i.e the run_time_in_secs.(e.g.:'completed 3.00').
diff --git a/tests/include.rc b/tests/include.rc
index aca4c4a..81146f4 100644
--- a/tests/include.rc
+++ b/tests/include.rc
@@ -74,6 +74,7 @@ PROCESS_UP_TIMEOUT=30
 NFS_EXPORT_TIMEOUT=20
 CHILD_UP_TIMEOUT=20
 PROBE_TIMEOUT=60
+PEER_SYNC_TIMEOUT=20
 REBALANCE_TIMEOUT=360
 REOPEN_TIMEOUT=20
 HEAL_TIMEOUT=80
-- 
1.8.3.1