74b1de
From 01bb17a0910a638e89a44a6da4b1359123940498 Mon Sep 17 00:00:00 2001
74b1de
From: Hari Gowtham <hgowtham@redhat.com>
74b1de
Date: Wed, 17 Apr 2019 12:17:27 +0530
74b1de
Subject: [PATCH 130/141] tier/test: new-tier-cmds.t fails after a glusterd
74b1de
 restart
74b1de
74b1de
Problem: new-tier-cmds.t does a restart of gluster processes and
74b1de
after the restart the bricks and the tier process takes more
74b1de
time than before to come online. This causes the detach start to
74b1de
fail.
74b1de
74b1de
Fix: Give it enough time to come online after the restart.
74b1de
74b1de
label: DOWNSTREAM ONLY
74b1de
74b1de
Change-Id: I0f50b0bb77fe49ebd3a0292e190d0350d7994cfe
74b1de
Signed-off-by: Hari Gowtham <hgowtham@redhat.com>
74b1de
Reviewed-on: https://code.engineering.redhat.com/gerrit/168130
74b1de
Tested-by: RHGS Build Bot <nigelb@redhat.com>
74b1de
Reviewed-by: Atin Mukherjee <amukherj@redhat.com>
74b1de
---
74b1de
 tests/basic/tier/new-tier-cmds.t | 45 ++++++++++++++++++++++++++--------------
74b1de
 tests/volume.rc                  |  8 +++++++
74b1de
 2 files changed, 37 insertions(+), 16 deletions(-)
74b1de
74b1de
diff --git a/tests/basic/tier/new-tier-cmds.t b/tests/basic/tier/new-tier-cmds.t
74b1de
index b9c9390..92881ac 100644
74b1de
--- a/tests/basic/tier/new-tier-cmds.t
74b1de
+++ b/tests/basic/tier/new-tier-cmds.t
74b1de
@@ -19,14 +19,6 @@ function create_dist_tier_vol () {
74b1de
         TEST $CLI_1 volume tier $V0 attach replica 2 $H1:$B1/${V0}_h1 $H2:$B2/${V0}_h2 $H3:$B3/${V0}_h3 $H1:$B1/${V0}_h4 $H2:$B2/${V0}_h5 $H3:$B3/${V0}_h6
74b1de
 }
74b1de
 
74b1de
-function tier_daemon_status {
74b1de
-        local _VAR=CLI_$1
74b1de
-        local xpath_sel='//node[hostname="Tier Daemon"][path="localhost"]/status'
74b1de
-        ${!_VAR} --xml volume status $V0 \
74b1de
-                | xmllint --xpath "$xpath_sel" - \
74b1de
-                | sed -n '/.*<status>\([0-9]*\).*/s//\1/p'
74b1de
-}
74b1de
-
74b1de
 function detach_xml_status {
74b1de
         $CLI_1 volume tier $V0 detach status --xml | sed -n \
74b1de
         '/.*<opErrstr>Detach tier status successful/p' | wc -l
74b1de
@@ -70,7 +62,20 @@ TEST $glusterd_2;
74b1de
 EXPECT_WITHIN $PROBE_TIMEOUT 2 check_peers;
74b1de
 
74b1de
 #after starting detach tier the detach tier status should display the status
74b1de
-sleep 2
74b1de
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 1 $V0 $H1 $B1/${V0}_b1
74b1de
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 1 $V0 $H1 $B1/${V0}_b4
74b1de
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 1 $V0 $H1 $B1/${V0}_h1
74b1de
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 1 $V0 $H1 $B1/${V0}_h4
74b1de
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 2 $V0 $H2 $B2/${V0}_b2
74b1de
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 2 $V0 $H2 $B2/${V0}_b5
74b1de
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 2 $V0 $H2 $B2/${V0}_h2
74b1de
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 2 $V0 $H2 $B2/${V0}_h5
74b1de
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 3 $V0 $H3 $B3/${V0}_b3
74b1de
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 3 $V0 $H3 $B3/${V0}_b6
74b1de
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 3 $V0 $H3 $B3/${V0}_h3
74b1de
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 3 $V0 $H3 $B3/${V0}_h6
74b1de
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "3" get_shd_count
74b1de
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "3" get_tierd_count
74b1de
 $CLI_1 volume status
74b1de
 TEST $CLI_1 volume tier $V0 detach start
74b1de
 
74b1de
@@ -91,13 +96,21 @@ EXPECT_WITHIN $PROBE_TIMEOUT 2 check_peers;
74b1de
 EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 1 $V0 $H2 $B2/${V0}_b2
74b1de
 EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 1 $V0 $H2 $B2/${V0}_h2
74b1de
 
74b1de
-# Parsing normal output doesn't work because of line-wrap issues on our
74b1de
-# regression machines, and the version of xmllint there doesn't support --xpath
74b1de
-# so we can't do it that way either.  In short, there's no way for us to detect
74b1de
-# when we can stop waiting, so we just have to wait the maximum time every time
74b1de
-# and hope any failures will show up later in the script.
74b1de
-sleep $PROCESS_UP_TIMEOUT
74b1de
-#XPECT_WITHIN $PROCESS_UP_TIMEOUT 1 tier_daemon_status 2
74b1de
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 1 $V0 $H1 $B1/${V0}_b1
74b1de
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 1 $V0 $H1 $B1/${V0}_b4
74b1de
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 1 $V0 $H1 $B1/${V0}_h1
74b1de
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 1 $V0 $H1 $B1/${V0}_h4
74b1de
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 2 $V0 $H2 $B2/${V0}_b2
74b1de
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 2 $V0 $H2 $B2/${V0}_b5
74b1de
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 2 $V0 $H2 $B2/${V0}_h2
74b1de
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 2 $V0 $H2 $B2/${V0}_h5
74b1de
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 3 $V0 $H3 $B3/${V0}_b3
74b1de
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 3 $V0 $H3 $B3/${V0}_b6
74b1de
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 3 $V0 $H3 $B3/${V0}_h3
74b1de
+EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 3 $V0 $H3 $B3/${V0}_h6
74b1de
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "3" get_shd_count
74b1de
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" get_tierd_count
74b1de
+$CLI_1 volume status
74b1de
 
74b1de
 EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" tier_detach_status
74b1de
 
74b1de
diff --git a/tests/volume.rc b/tests/volume.rc
74b1de
index 289b197..b326098 100644
74b1de
--- a/tests/volume.rc
74b1de
+++ b/tests/volume.rc
74b1de
@@ -719,6 +719,14 @@ function get_snapd_count {
74b1de
         ps auxww | grep glusterfs | grep snapd.pid | grep -v grep | wc -l
74b1de
 }
74b1de
 
74b1de
+function get_tierd_count {
74b1de
+        ps auxww | grep glusterfs | grep tierd.pid | grep -v grep | wc -l
74b1de
+}
74b1de
+
74b1de
+function get_shd_count {
74b1de
+        ps auxww | grep glusterfs | grep shd.pid | grep -v grep | wc -l
74b1de
+}
74b1de
+
74b1de
 function drop_cache() {
74b1de
 	case $OSTYPE in
74b1de
 	Linux)
74b1de
-- 
74b1de
1.8.3.1
74b1de