f338ef
From 684a4949552164d3469329b3f959de4369d54faa Mon Sep 17 00:00:00 2001
f338ef
From: Atin Mukherjee <amukherj@redhat.com>
f338ef
Date: Sun, 14 Jul 2019 08:06:11 +0530
f338ef
Subject: [PATCH 255/255] Revert "tier/shd/glusterd: with shd mux, the shd
f338ef
 volfile path have to be updated for tier-heald.t"
f338ef
f338ef
This reverts commit 6e7d333625ecd9f7402c2e839338350fa86eaf45.
f338ef
f338ef
Updates: bz#1471742
f338ef
Change-Id: I6c27634999f72b5bbb35d5d13cdebda7af072b01
f338ef
Reviewed-on: https://code.engineering.redhat.com/gerrit/176017
f338ef
Tested-by: RHGS Build Bot <nigelb@redhat.com>
f338ef
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
f338ef
---
f338ef
 tests/basic/tier/tier-heald.t | 35 +++++++++++++++--------------------
f338ef
 1 file changed, 15 insertions(+), 20 deletions(-)
f338ef
f338ef
diff --git a/tests/basic/tier/tier-heald.t b/tests/basic/tier/tier-heald.t
f338ef
index 0ec9e43..a8e634f 100644
f338ef
--- a/tests/basic/tier/tier-heald.t
f338ef
+++ b/tests/basic/tier/tier-heald.t
f338ef
@@ -11,7 +11,7 @@ cleanup;
f338ef
 TEST glusterd
f338ef
 TEST pidof glusterd
f338ef
 
f338ef
-r2_volfile=$(gluster system:: getwd)"/vols/r2/r2-shd.vol"
f338ef
+volfile=$(gluster system:: getwd)"/glustershd/glustershd-server.vol"
f338ef
 
f338ef
 # Commands should fail when both tiers are not of distribute type.
f338ef
 # Glustershd shouldn't be running as long as there are no replicate/disperse
f338ef
@@ -34,56 +34,51 @@ TEST $CLI volume tier r2 attach $H0:$B0/r2_hot
f338ef
 EXPECT_WITHIN $PROCESS_UP_TIMEOUT "[0-9][0-9]*" get_shd_process_pid
f338ef
 TEST $CLI volume heal r2 enable
f338ef
 EXPECT "enable" volume_option r2 "cluster.self-heal-daemon"
f338ef
-EXPECT "enable" volgen_volume_option $r2_volfile r2-replicate-0 cluster replicate self-heal-daemon
f338ef
+EXPECT "enable" volgen_volume_option $volfile r2-replicate-0 cluster replicate self-heal-daemon
f338ef
 EXPECT_WITHIN $PROCESS_UP_TIMEOUT "[0-9][0-9]*" get_shd_process_pid
f338ef
 TEST $CLI volume heal r2 disable
f338ef
 EXPECT "disable" volume_option r2 "cluster.self-heal-daemon"
f338ef
-EXPECT "disable" volgen_volume_option $r2_volfile r2-replicate-0 cluster replicate self-heal-daemon
f338ef
+EXPECT "disable" volgen_volume_option $volfile r2-replicate-0 cluster replicate self-heal-daemon
f338ef
 EXPECT_WITHIN $PROCESS_UP_TIMEOUT "[0-9][0-9]*" get_shd_process_pid
f338ef
 # Commands should work on disperse volume.
f338ef
 TEST $CLI volume create ec2 disperse 3 redundancy 1 $H0:$B0/ec2_0 $H0:$B0/ec2_1 $H0:$B0/ec2_2
f338ef
 TEST $CLI volume start ec2
f338ef
 
f338ef
-ec2_volfile=$(gluster system:: getwd)"/vols/ec2/ec2-shd.vol"
f338ef
-
f338ef
 TEST $CLI volume tier ec2 attach replica 2 $H0:$B0/ec2_hot{1..4}
f338ef
 EXPECT_WITHIN $PROCESS_UP_TIMEOUT "[0-9][0-9]*" get_shd_process_pid
f338ef
 TEST $CLI volume heal ec2 enable
f338ef
 EXPECT "enable" volume_option ec2 "cluster.disperse-self-heal-daemon"
f338ef
-EXPECT "enable" volgen_volume_option $ec2_volfile ec2-disperse-0 cluster disperse self-heal-daemon
f338ef
+EXPECT "enable" volgen_volume_option $volfile ec2-disperse-0 cluster disperse self-heal-daemon
f338ef
 EXPECT_WITHIN $PROCESS_UP_TIMEOUT "[0-9][0-9]*" get_shd_process_pid
f338ef
 TEST $CLI volume heal ec2 disable
f338ef
 EXPECT "disable" volume_option ec2 "cluster.disperse-self-heal-daemon"
f338ef
-EXPECT "disable" volgen_volume_option $ec2_volfile ec2-disperse-0 cluster disperse self-heal-daemon
f338ef
+EXPECT "disable" volgen_volume_option $volfile ec2-disperse-0 cluster disperse self-heal-daemon
f338ef
 EXPECT_WITHIN $PROCESS_UP_TIMEOUT "[0-9][0-9]*" get_shd_process_pid
f338ef
 
f338ef
 #Check that shd graph is rewritten correctly on volume stop/start
f338ef
-EXPECT "Y" volgen_volume_exists $ec2_volfile ec2-disperse-0 cluster disperse
f338ef
-EXPECT "Y" volgen_volume_exists $r2_volfile r2-replicate-0 cluster replicate
f338ef
+EXPECT "Y" volgen_volume_exists $volfile ec2-disperse-0 cluster disperse
f338ef
+EXPECT "Y" volgen_volume_exists $volfile r2-replicate-0 cluster replicate
f338ef
 TEST $CLI volume stop r2
f338ef
-EXPECT "Y" volgen_volume_exists $ec2_volfile ec2-disperse-0 cluster disperse
f338ef
-
f338ef
-# Has been commented as the validations after stop using volfile dont hold true.
f338ef
-#EXPECT "N" volgen_volume_exists $r2_volfile r2-replicate-0 cluster replicate
f338ef
+EXPECT "Y" volgen_volume_exists $volfile ec2-disperse-0 cluster disperse
f338ef
+EXPECT "N" volgen_volume_exists $volfile r2-replicate-0 cluster replicate
f338ef
 TEST $CLI volume stop ec2
f338ef
 # When both the volumes are stopped glustershd volfile is not modified just the
f338ef
 # process is stopped
f338ef
 TEST "[ -z $(get_shd_process_pid) ]"
f338ef
 
f338ef
 TEST $CLI volume start r2
f338ef
-# Has been commented as the validations after stop using volfile dont hold true.
f338ef
-#EXPECT "N" volgen_volume_exists $ec2_volfile ec2-disperse-0 cluster disperse
f338ef
-EXPECT "Y" volgen_volume_exists $r2_volfile r2-replicate-0 cluster replicate
f338ef
+EXPECT "N" volgen_volume_exists $volfile ec2-disperse-0 cluster disperse
f338ef
+EXPECT "Y" volgen_volume_exists $volfile r2-replicate-0 cluster replicate
f338ef
 
f338ef
 TEST $CLI volume start ec2
f338ef
 
f338ef
-EXPECT "Y" volgen_volume_exists $ec2_volfile ec2-disperse-0 cluster disperse
f338ef
-EXPECT "Y" volgen_volume_exists $ec2_volfile ec2-replicate-0 cluster replicate
f338ef
+EXPECT "Y" volgen_volume_exists $volfile ec2-disperse-0 cluster disperse
f338ef
+EXPECT "Y" volgen_volume_exists $volfile ec2-replicate-0 cluster replicate
f338ef
 
f338ef
 TEST $CLI volume tier ec2 detach force
f338ef
 
f338ef
-EXPECT "Y" volgen_volume_exists $ec2_volfile ec2-disperse-0 cluster disperse
f338ef
-EXPECT "N" volgen_volume_exists $ec2_volfile ec2-replicate-0 cluster replicate
f338ef
+EXPECT "Y" volgen_volume_exists $volfile ec2-disperse-0 cluster disperse
f338ef
+EXPECT "N" volgen_volume_exists $volfile ec2-replicate-0 cluster replicate
f338ef
 
f338ef
 TEST $CLI volume set r2 self-heal-daemon on
f338ef
 TEST $CLI volume set r2 cluster.self-heal-daemon off
f338ef
-- 
f338ef
1.8.3.1
f338ef