256ebe
From 01bb17a0910a638e89a44a6da4b1359123940498 Mon Sep 17 00:00:00 2001
256ebe
From: Hari Gowtham <hgowtham@redhat.com>
256ebe
Date: Wed, 17 Apr 2019 12:17:27 +0530
256ebe
Subject: [PATCH 130/141] tier/test: new-tier-cmds.t fails after a glusterd
256ebe
 restart
256ebe
256ebe
Problem: new-tier-cmds.t does a restart of gluster processes and
256ebe
after the restart the bricks and the tier process takes more
256ebe
time than before to come online. This causes the detach start to
256ebe
fail.
256ebe
256ebe
Fix: Give it enough time to come online after the restart.
256ebe
256ebe
label: DOWNSTREAM ONLY
256ebe
256ebe
Change-Id: I0f50b0bb77fe49ebd3a0292e190d0350d7994cfe
256ebe
Signed-off-by: Hari Gowtham <hgowtham@redhat.com>
256ebe
Reviewed-on: https://code.engineering.redhat.com/gerrit/168130
256ebe
Tested-by: RHGS Build Bot <nigelb@redhat.com>
256ebe
Reviewed-by: Atin Mukherjee <amukherj@redhat.com>
256ebe
---
256ebe
 tests/basic/tier/new-tier-cmds.t | 45 ++++++++++++++++++++++++++--------------
256ebe
 tests/volume.rc                  |  8 +++++++
256ebe
 2 files changed, 37 insertions(+), 16 deletions(-)
256ebe
256ebe
diff --git a/tests/basic/tier/new-tier-cmds.t b/tests/basic/tier/new-tier-cmds.t
256ebe
index b9c9390..92881ac 100644
256ebe
--- a/tests/basic/tier/new-tier-cmds.t
256ebe
+++ b/tests/basic/tier/new-tier-cmds.t
256ebe
@@ -19,14 +19,6 @@ function create_dist_tier_vol () {
256ebe
         TEST $CLI_1 volume tier $V0 attach replica 2 $H1:$B1/${V0}_h1 $H2:$B2/${V0}_h2 $H3:$B3/${V0}_h3 $H1:$B1/${V0}_h4 $H2:$B2/${V0}_h5 $H3:$B3/${V0}_h6
256ebe
 }
256ebe
 
256ebe
-function tier_daemon_status {
256ebe
-        local _VAR=CLI_$1
256ebe
-        local xpath_sel='//node[hostname="Tier Daemon"][path="localhost"]/status'
256ebe
-        ${!_VAR} --xml volume status $V0 \
256ebe
-                | xmllint --xpath "$xpath_sel" - \
256ebe
-                | sed -n '/.*<status>\([0-9]*\).*/s//\1/p'
256ebe
-}
256ebe
-
256ebe
 function detach_xml_status {
256ebe
         $CLI_1 volume tier $V0 detach status --xml | sed -n \
256ebe
         '/.*<opErrstr>Detach tier status successful/p' | wc -l
256ebe
@@ -70,7 +62,20 @@ TEST $glusterd_2;
256ebe
 EXPECT_WITHIN $PROBE_TIMEOUT 2 check_peers;
256ebe
 
256ebe
 #after starting detach tier the detach tier status should display the status
256ebe
-sleep 2
256ebe
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 1 $V0 $H1 $B1/${V0}_b1
256ebe
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 1 $V0 $H1 $B1/${V0}_b4
256ebe
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 1 $V0 $H1 $B1/${V0}_h1
256ebe
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 1 $V0 $H1 $B1/${V0}_h4
256ebe
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 2 $V0 $H2 $B2/${V0}_b2
256ebe
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 2 $V0 $H2 $B2/${V0}_b5
256ebe
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 2 $V0 $H2 $B2/${V0}_h2
256ebe
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 2 $V0 $H2 $B2/${V0}_h5
256ebe
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 3 $V0 $H3 $B3/${V0}_b3
256ebe
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 3 $V0 $H3 $B3/${V0}_b6
256ebe
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 3 $V0 $H3 $B3/${V0}_h3
256ebe
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 3 $V0 $H3 $B3/${V0}_h6
256ebe
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "3" get_shd_count
256ebe
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "3" get_tierd_count
256ebe
 $CLI_1 volume status
256ebe
 TEST $CLI_1 volume tier $V0 detach start
256ebe
 
256ebe
@@ -91,13 +96,21 @@ EXPECT_WITHIN $PROBE_TIMEOUT 2 check_peers;
256ebe
 EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 1 $V0 $H2 $B2/${V0}_b2
256ebe
 EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 1 $V0 $H2 $B2/${V0}_h2
256ebe
 
256ebe
-# Parsing normal output doesn't work because of line-wrap issues on our
256ebe
-# regression machines, and the version of xmllint there doesn't support --xpath
256ebe
-# so we can't do it that way either.  In short, there's no way for us to detect
256ebe
-# when we can stop waiting, so we just have to wait the maximum time every time
256ebe
-# and hope any failures will show up later in the script.
256ebe
-sleep $PROCESS_UP_TIMEOUT
256ebe
-#XPECT_WITHIN $PROCESS_UP_TIMEOUT 1 tier_daemon_status 2
256ebe
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 1 $V0 $H1 $B1/${V0}_b1
256ebe
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 1 $V0 $H1 $B1/${V0}_b4
256ebe
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 1 $V0 $H1 $B1/${V0}_h1
256ebe
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 1 $V0 $H1 $B1/${V0}_h4
256ebe
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 2 $V0 $H2 $B2/${V0}_b2
256ebe
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 2 $V0 $H2 $B2/${V0}_b5
256ebe
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 2 $V0 $H2 $B2/${V0}_h2
256ebe
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 2 $V0 $H2 $B2/${V0}_h5
256ebe
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 3 $V0 $H3 $B3/${V0}_b3
256ebe
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 3 $V0 $H3 $B3/${V0}_b6
256ebe
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 3 $V0 $H3 $B3/${V0}_h3
256ebe
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 3 $V0 $H3 $B3/${V0}_h6
256ebe
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "3" get_shd_count
256ebe
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" get_tierd_count
256ebe
+$CLI_1 volume status
256ebe
 
256ebe
 EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" tier_detach_status
256ebe
 
256ebe
diff --git a/tests/volume.rc b/tests/volume.rc
256ebe
index 289b197..b326098 100644
256ebe
--- a/tests/volume.rc
256ebe
+++ b/tests/volume.rc
256ebe
@@ -719,6 +719,14 @@ function get_snapd_count {
256ebe
         ps auxww | grep glusterfs | grep snapd.pid | grep -v grep | wc -l
256ebe
 }
256ebe
 
256ebe
+function get_tierd_count {
256ebe
+        ps auxww | grep glusterfs | grep tierd.pid | grep -v grep | wc -l
256ebe
+}
256ebe
+
256ebe
+function get_shd_count {
256ebe
+        ps auxww | grep glusterfs | grep shd.pid | grep -v grep | wc -l
256ebe
+}
256ebe
+
256ebe
 function drop_cache() {
256ebe
 	case $OSTYPE in
256ebe
 	Linux)
256ebe
-- 
256ebe
1.8.3.1
256ebe