233933
From 01bb17a0910a638e89a44a6da4b1359123940498 Mon Sep 17 00:00:00 2001
233933
From: Hari Gowtham <hgowtham@redhat.com>
233933
Date: Wed, 17 Apr 2019 12:17:27 +0530
233933
Subject: [PATCH 130/141] tier/test: new-tier-cmds.t fails after a glusterd
233933
 restart
233933
233933
Problem: new-tier-cmds.t does a restart of gluster processes and
233933
after the restart the bricks and the tier process takes more
233933
time than before to come online. This causes the detach start to
233933
fail.
233933
233933
Fix: Give it enough time to come online after the restart.
233933
233933
label: DOWNSTREAM ONLY
233933
233933
Change-Id: I0f50b0bb77fe49ebd3a0292e190d0350d7994cfe
233933
Signed-off-by: Hari Gowtham <hgowtham@redhat.com>
233933
Reviewed-on: https://code.engineering.redhat.com/gerrit/168130
233933
Tested-by: RHGS Build Bot <nigelb@redhat.com>
233933
Reviewed-by: Atin Mukherjee <amukherj@redhat.com>
233933
---
233933
 tests/basic/tier/new-tier-cmds.t | 45 ++++++++++++++++++++++++++--------------
233933
 tests/volume.rc                  |  8 +++++++
233933
 2 files changed, 37 insertions(+), 16 deletions(-)
233933
233933
diff --git a/tests/basic/tier/new-tier-cmds.t b/tests/basic/tier/new-tier-cmds.t
233933
index b9c9390..92881ac 100644
233933
--- a/tests/basic/tier/new-tier-cmds.t
233933
+++ b/tests/basic/tier/new-tier-cmds.t
233933
@@ -19,14 +19,6 @@ function create_dist_tier_vol () {
233933
         TEST $CLI_1 volume tier $V0 attach replica 2 $H1:$B1/${V0}_h1 $H2:$B2/${V0}_h2 $H3:$B3/${V0}_h3 $H1:$B1/${V0}_h4 $H2:$B2/${V0}_h5 $H3:$B3/${V0}_h6
233933
 }
233933
 
233933
-function tier_daemon_status {
233933
-        local _VAR=CLI_$1
233933
-        local xpath_sel='//node[hostname="Tier Daemon"][path="localhost"]/status'
233933
-        ${!_VAR} --xml volume status $V0 \
233933
-                | xmllint --xpath "$xpath_sel" - \
233933
-                | sed -n '/.*<status>\([0-9]*\).*/s//\1/p'
233933
-}
233933
-
233933
 function detach_xml_status {
233933
         $CLI_1 volume tier $V0 detach status --xml | sed -n \
233933
         '/.*<opErrstr>Detach tier status successful/p' | wc -l
233933
@@ -70,7 +62,20 @@ TEST $glusterd_2;
233933
 EXPECT_WITHIN $PROBE_TIMEOUT 2 check_peers;
233933
 
233933
 #after starting detach tier the detach tier status should display the status
233933
-sleep 2
233933
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 1 $V0 $H1 $B1/${V0}_b1
233933
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 1 $V0 $H1 $B1/${V0}_b4
233933
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 1 $V0 $H1 $B1/${V0}_h1
233933
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 1 $V0 $H1 $B1/${V0}_h4
233933
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 2 $V0 $H2 $B2/${V0}_b2
233933
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 2 $V0 $H2 $B2/${V0}_b5
233933
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 2 $V0 $H2 $B2/${V0}_h2
233933
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 2 $V0 $H2 $B2/${V0}_h5
233933
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 3 $V0 $H3 $B3/${V0}_b3
233933
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 3 $V0 $H3 $B3/${V0}_b6
233933
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 3 $V0 $H3 $B3/${V0}_h3
233933
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 3 $V0 $H3 $B3/${V0}_h6
233933
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "3" get_shd_count
233933
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "3" get_tierd_count
233933
 $CLI_1 volume status
233933
 TEST $CLI_1 volume tier $V0 detach start
233933
 
233933
@@ -91,13 +96,21 @@ EXPECT_WITHIN $PROBE_TIMEOUT 2 check_peers;
233933
 EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 1 $V0 $H2 $B2/${V0}_b2
233933
 EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 1 $V0 $H2 $B2/${V0}_h2
233933
 
233933
-# Parsing normal output doesn't work because of line-wrap issues on our
233933
-# regression machines, and the version of xmllint there doesn't support --xpath
233933
-# so we can't do it that way either.  In short, there's no way for us to detect
233933
-# when we can stop waiting, so we just have to wait the maximum time every time
233933
-# and hope any failures will show up later in the script.
233933
-sleep $PROCESS_UP_TIMEOUT
233933
-#XPECT_WITHIN $PROCESS_UP_TIMEOUT 1 tier_daemon_status 2
233933
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 1 $V0 $H1 $B1/${V0}_b1
233933
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 1 $V0 $H1 $B1/${V0}_b4
233933
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 1 $V0 $H1 $B1/${V0}_h1
233933
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 1 $V0 $H1 $B1/${V0}_h4
233933
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 2 $V0 $H2 $B2/${V0}_b2
233933
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 2 $V0 $H2 $B2/${V0}_b5
233933
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 2 $V0 $H2 $B2/${V0}_h2
233933
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 2 $V0 $H2 $B2/${V0}_h5
233933
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 3 $V0 $H3 $B3/${V0}_b3
233933
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 3 $V0 $H3 $B3/${V0}_b6
233933
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 3 $V0 $H3 $B3/${V0}_h3
233933
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 3 $V0 $H3 $B3/${V0}_h6
233933
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "3" get_shd_count
233933
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" get_tierd_count
233933
+$CLI_1 volume status
233933
 
233933
 EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" tier_detach_status
233933
 
233933
diff --git a/tests/volume.rc b/tests/volume.rc
233933
index 289b197..b326098 100644
233933
--- a/tests/volume.rc
233933
+++ b/tests/volume.rc
233933
@@ -719,6 +719,14 @@ function get_snapd_count {
233933
         ps auxww | grep glusterfs | grep snapd.pid | grep -v grep | wc -l
233933
 }
233933
 
233933
+function get_tierd_count {
233933
+        ps auxww | grep glusterfs | grep tierd.pid | grep -v grep | wc -l
233933
+}
233933
+
233933
+function get_shd_count {
233933
+        ps auxww | grep glusterfs | grep shd.pid | grep -v grep | wc -l
233933
+}
233933
+
233933
 function drop_cache() {
233933
 	case $OSTYPE in
233933
 	Linux)
233933
-- 
233933
1.8.3.1
233933