e3c68b
From 01bb17a0910a638e89a44a6da4b1359123940498 Mon Sep 17 00:00:00 2001
e3c68b
From: Hari Gowtham <hgowtham@redhat.com>
e3c68b
Date: Wed, 17 Apr 2019 12:17:27 +0530
e3c68b
Subject: [PATCH 130/141] tier/test: new-tier-cmds.t fails after a glusterd
e3c68b
 restart
e3c68b
e3c68b
Problem: new-tier-cmds.t does a restart of gluster processes and
e3c68b
after the restart the bricks and the tier process takes more
e3c68b
time than before to come online. This causes the detach start to
e3c68b
fail.
e3c68b
e3c68b
Fix: Give it enough time to come online after the restart.
e3c68b
e3c68b
label: DOWNSTREAM ONLY
e3c68b
e3c68b
Change-Id: I0f50b0bb77fe49ebd3a0292e190d0350d7994cfe
e3c68b
Signed-off-by: Hari Gowtham <hgowtham@redhat.com>
e3c68b
Reviewed-on: https://code.engineering.redhat.com/gerrit/168130
e3c68b
Tested-by: RHGS Build Bot <nigelb@redhat.com>
e3c68b
Reviewed-by: Atin Mukherjee <amukherj@redhat.com>
e3c68b
---
e3c68b
 tests/basic/tier/new-tier-cmds.t | 45 ++++++++++++++++++++++++++--------------
e3c68b
 tests/volume.rc                  |  8 +++++++
e3c68b
 2 files changed, 37 insertions(+), 16 deletions(-)
e3c68b
e3c68b
diff --git a/tests/basic/tier/new-tier-cmds.t b/tests/basic/tier/new-tier-cmds.t
e3c68b
index b9c9390..92881ac 100644
e3c68b
--- a/tests/basic/tier/new-tier-cmds.t
e3c68b
+++ b/tests/basic/tier/new-tier-cmds.t
e3c68b
@@ -19,14 +19,6 @@ function create_dist_tier_vol () {
e3c68b
         TEST $CLI_1 volume tier $V0 attach replica 2 $H1:$B1/${V0}_h1 $H2:$B2/${V0}_h2 $H3:$B3/${V0}_h3 $H1:$B1/${V0}_h4 $H2:$B2/${V0}_h5 $H3:$B3/${V0}_h6
e3c68b
 }
e3c68b
 
e3c68b
-function tier_daemon_status {
e3c68b
-        local _VAR=CLI_$1
e3c68b
-        local xpath_sel='//node[hostname="Tier Daemon"][path="localhost"]/status'
e3c68b
-        ${!_VAR} --xml volume status $V0 \
e3c68b
-                | xmllint --xpath "$xpath_sel" - \
e3c68b
-                | sed -n '/.*<status>\([0-9]*\).*/s//\1/p'
e3c68b
-}
e3c68b
-
e3c68b
 function detach_xml_status {
e3c68b
         $CLI_1 volume tier $V0 detach status --xml | sed -n \
e3c68b
         '/.*<opErrstr>Detach tier status successful/p' | wc -l
e3c68b
@@ -70,7 +62,20 @@ TEST $glusterd_2;
e3c68b
 EXPECT_WITHIN $PROBE_TIMEOUT 2 check_peers;
e3c68b
 
e3c68b
 #after starting detach tier the detach tier status should display the status
e3c68b
-sleep 2
e3c68b
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 1 $V0 $H1 $B1/${V0}_b1
e3c68b
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 1 $V0 $H1 $B1/${V0}_b4
e3c68b
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 1 $V0 $H1 $B1/${V0}_h1
e3c68b
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 1 $V0 $H1 $B1/${V0}_h4
e3c68b
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 2 $V0 $H2 $B2/${V0}_b2
e3c68b
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 2 $V0 $H2 $B2/${V0}_b5
e3c68b
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 2 $V0 $H2 $B2/${V0}_h2
e3c68b
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 2 $V0 $H2 $B2/${V0}_h5
e3c68b
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 3 $V0 $H3 $B3/${V0}_b3
e3c68b
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 3 $V0 $H3 $B3/${V0}_b6
e3c68b
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 3 $V0 $H3 $B3/${V0}_h3
e3c68b
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 3 $V0 $H3 $B3/${V0}_h6
e3c68b
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "3" get_shd_count
e3c68b
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "3" get_tierd_count
e3c68b
 $CLI_1 volume status
e3c68b
 TEST $CLI_1 volume tier $V0 detach start
e3c68b
 
e3c68b
@@ -91,13 +96,21 @@ EXPECT_WITHIN $PROBE_TIMEOUT 2 check_peers;
e3c68b
 EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 1 $V0 $H2 $B2/${V0}_b2
e3c68b
 EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 1 $V0 $H2 $B2/${V0}_h2
e3c68b
 
e3c68b
-# Parsing normal output doesn't work because of line-wrap issues on our
e3c68b
-# regression machines, and the version of xmllint there doesn't support --xpath
e3c68b
-# so we can't do it that way either.  In short, there's no way for us to detect
e3c68b
-# when we can stop waiting, so we just have to wait the maximum time every time
e3c68b
-# and hope any failures will show up later in the script.
e3c68b
-sleep $PROCESS_UP_TIMEOUT
e3c68b
-#XPECT_WITHIN $PROCESS_UP_TIMEOUT 1 tier_daemon_status 2
e3c68b
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 1 $V0 $H1 $B1/${V0}_b1
e3c68b
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 1 $V0 $H1 $B1/${V0}_b4
e3c68b
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 1 $V0 $H1 $B1/${V0}_h1
e3c68b
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 1 $V0 $H1 $B1/${V0}_h4
e3c68b
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 2 $V0 $H2 $B2/${V0}_b2
e3c68b
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 2 $V0 $H2 $B2/${V0}_b5
e3c68b
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 2 $V0 $H2 $B2/${V0}_h2
e3c68b
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 2 $V0 $H2 $B2/${V0}_h5
e3c68b
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 3 $V0 $H3 $B3/${V0}_b3
e3c68b
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 3 $V0 $H3 $B3/${V0}_b6
e3c68b
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 3 $V0 $H3 $B3/${V0}_h3
e3c68b
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 3 $V0 $H3 $B3/${V0}_h6
e3c68b
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "3" get_shd_count
e3c68b
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" get_tierd_count
e3c68b
+$CLI_1 volume status
e3c68b
 
e3c68b
 EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" tier_detach_status
e3c68b
 
e3c68b
diff --git a/tests/volume.rc b/tests/volume.rc
e3c68b
index 289b197..b326098 100644
e3c68b
--- a/tests/volume.rc
e3c68b
+++ b/tests/volume.rc
e3c68b
@@ -719,6 +719,14 @@ function get_snapd_count {
e3c68b
         ps auxww | grep glusterfs | grep snapd.pid | grep -v grep | wc -l
e3c68b
 }
e3c68b
 
e3c68b
+function get_tierd_count {
e3c68b
+        ps auxww | grep glusterfs | grep tierd.pid | grep -v grep | wc -l
e3c68b
+}
e3c68b
+
e3c68b
+function get_shd_count {
e3c68b
+        ps auxww | grep glusterfs | grep shd.pid | grep -v grep | wc -l
e3c68b
+}
e3c68b
+
e3c68b
 function drop_cache() {
e3c68b
 	case $OSTYPE in
e3c68b
 	Linux)
e3c68b
-- 
e3c68b
1.8.3.1
e3c68b