50dc83
From 01bb17a0910a638e89a44a6da4b1359123940498 Mon Sep 17 00:00:00 2001
50dc83
From: Hari Gowtham <hgowtham@redhat.com>
50dc83
Date: Wed, 17 Apr 2019 12:17:27 +0530
50dc83
Subject: [PATCH 130/141] tier/test: new-tier-cmds.t fails after a glusterd
50dc83
 restart
50dc83
50dc83
Problem: new-tier-cmds.t does a restart of gluster processes and
50dc83
after the restart the bricks and the tier process takes more
50dc83
time than before to come online. This causes the detach start to
50dc83
fail.
50dc83
50dc83
Fix: Give it enough time to come online after the restart.
50dc83
50dc83
label: DOWNSTREAM ONLY
50dc83
50dc83
Change-Id: I0f50b0bb77fe49ebd3a0292e190d0350d7994cfe
50dc83
Signed-off-by: Hari Gowtham <hgowtham@redhat.com>
50dc83
Reviewed-on: https://code.engineering.redhat.com/gerrit/168130
50dc83
Tested-by: RHGS Build Bot <nigelb@redhat.com>
50dc83
Reviewed-by: Atin Mukherjee <amukherj@redhat.com>
50dc83
---
50dc83
 tests/basic/tier/new-tier-cmds.t | 45 ++++++++++++++++++++++++++--------------
50dc83
 tests/volume.rc                  |  8 +++++++
50dc83
 2 files changed, 37 insertions(+), 16 deletions(-)
50dc83
50dc83
diff --git a/tests/basic/tier/new-tier-cmds.t b/tests/basic/tier/new-tier-cmds.t
50dc83
index b9c9390..92881ac 100644
50dc83
--- a/tests/basic/tier/new-tier-cmds.t
50dc83
+++ b/tests/basic/tier/new-tier-cmds.t
50dc83
@@ -19,14 +19,6 @@ function create_dist_tier_vol () {
50dc83
         TEST $CLI_1 volume tier $V0 attach replica 2 $H1:$B1/${V0}_h1 $H2:$B2/${V0}_h2 $H3:$B3/${V0}_h3 $H1:$B1/${V0}_h4 $H2:$B2/${V0}_h5 $H3:$B3/${V0}_h6
50dc83
 }
50dc83
 
50dc83
-function tier_daemon_status {
50dc83
-        local _VAR=CLI_$1
50dc83
-        local xpath_sel='//node[hostname="Tier Daemon"][path="localhost"]/status'
50dc83
-        ${!_VAR} --xml volume status $V0 \
50dc83
-                | xmllint --xpath "$xpath_sel" - \
50dc83
-                | sed -n '/.*<status>\([0-9]*\).*/s//\1/p'
50dc83
-}
50dc83
-
50dc83
 function detach_xml_status {
50dc83
         $CLI_1 volume tier $V0 detach status --xml | sed -n \
50dc83
         '/.*<opErrstr>Detach tier status successful/p' | wc -l
50dc83
@@ -70,7 +62,20 @@ TEST $glusterd_2;
50dc83
 EXPECT_WITHIN $PROBE_TIMEOUT 2 check_peers;
50dc83
 
50dc83
 #after starting detach tier the detach tier status should display the status
50dc83
-sleep 2
50dc83
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 1 $V0 $H1 $B1/${V0}_b1
50dc83
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 1 $V0 $H1 $B1/${V0}_b4
50dc83
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 1 $V0 $H1 $B1/${V0}_h1
50dc83
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 1 $V0 $H1 $B1/${V0}_h4
50dc83
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 2 $V0 $H2 $B2/${V0}_b2
50dc83
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 2 $V0 $H2 $B2/${V0}_b5
50dc83
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 2 $V0 $H2 $B2/${V0}_h2
50dc83
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 2 $V0 $H2 $B2/${V0}_h5
50dc83
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 3 $V0 $H3 $B3/${V0}_b3
50dc83
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 3 $V0 $H3 $B3/${V0}_b6
50dc83
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 3 $V0 $H3 $B3/${V0}_h3
50dc83
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 3 $V0 $H3 $B3/${V0}_h6
50dc83
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "3" get_shd_count
50dc83
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "3" get_tierd_count
50dc83
 $CLI_1 volume status
50dc83
 TEST $CLI_1 volume tier $V0 detach start
50dc83
 
50dc83
@@ -91,13 +96,21 @@ EXPECT_WITHIN $PROBE_TIMEOUT 2 check_peers;
50dc83
 EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 1 $V0 $H2 $B2/${V0}_b2
50dc83
 EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 1 $V0 $H2 $B2/${V0}_h2
50dc83
 
50dc83
-# Parsing normal output doesn't work because of line-wrap issues on our
50dc83
-# regression machines, and the version of xmllint there doesn't support --xpath
50dc83
-# so we can't do it that way either.  In short, there's no way for us to detect
50dc83
-# when we can stop waiting, so we just have to wait the maximum time every time
50dc83
-# and hope any failures will show up later in the script.
50dc83
-sleep $PROCESS_UP_TIMEOUT
50dc83
-#XPECT_WITHIN $PROCESS_UP_TIMEOUT 1 tier_daemon_status 2
50dc83
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 1 $V0 $H1 $B1/${V0}_b1
50dc83
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 1 $V0 $H1 $B1/${V0}_b4
50dc83
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 1 $V0 $H1 $B1/${V0}_h1
50dc83
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 1 $V0 $H1 $B1/${V0}_h4
50dc83
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 2 $V0 $H2 $B2/${V0}_b2
50dc83
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 2 $V0 $H2 $B2/${V0}_b5
50dc83
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 2 $V0 $H2 $B2/${V0}_h2
50dc83
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 2 $V0 $H2 $B2/${V0}_h5
50dc83
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 3 $V0 $H3 $B3/${V0}_b3
50dc83
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 3 $V0 $H3 $B3/${V0}_b6
50dc83
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 3 $V0 $H3 $B3/${V0}_h3
50dc83
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 3 $V0 $H3 $B3/${V0}_h6
50dc83
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "3" get_shd_count
50dc83
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" get_tierd_count
50dc83
+$CLI_1 volume status
50dc83
 
50dc83
 EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" tier_detach_status
50dc83
 
50dc83
diff --git a/tests/volume.rc b/tests/volume.rc
50dc83
index 289b197..b326098 100644
50dc83
--- a/tests/volume.rc
50dc83
+++ b/tests/volume.rc
50dc83
@@ -719,6 +719,14 @@ function get_snapd_count {
50dc83
         ps auxww | grep glusterfs | grep snapd.pid | grep -v grep | wc -l
50dc83
 }
50dc83
 
50dc83
+function get_tierd_count {
50dc83
+        ps auxww | grep glusterfs | grep tierd.pid | grep -v grep | wc -l
50dc83
+}
50dc83
+
50dc83
+function get_shd_count {
50dc83
+        ps auxww | grep glusterfs | grep shd.pid | grep -v grep | wc -l
50dc83
+}
50dc83
+
50dc83
 function drop_cache() {
50dc83
 	case $OSTYPE in
50dc83
 	Linux)
50dc83
-- 
50dc83
1.8.3.1
50dc83