74b1de
From 48f7be493588fdf5e99dff0c3b91327e07da05f3 Mon Sep 17 00:00:00 2001
74b1de
From: Mohammed Rafi KC <rkavunga@redhat.com>
74b1de
Date: Thu, 11 Jul 2019 12:48:34 +0530
74b1de
Subject: [PATCH 242/255] Revert "tests/shd: Add test coverage for shd mux"
74b1de
74b1de
This reverts commit b7f832288d2d2e57231d90765afc049ad7cb2f9d.
74b1de
74b1de
BUG: 1471742
74b1de
Change-Id: Ifccac5150f07b98006714e43c77c5a4b1fd38cb8
74b1de
Signed-off-by: Mohammed Rafi KC <rkavunga@redhat.com>
74b1de
Reviewed-on: https://code.engineering.redhat.com/gerrit/175951
74b1de
Tested-by: RHGS Build Bot <nigelb@redhat.com>
74b1de
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
74b1de
---
74b1de
 tests/basic/glusterd-restart-shd-mux.t |  96 ---------------------
74b1de
 tests/basic/shd-mux.t                  | 149 ---------------------------------
74b1de
 tests/basic/volume-scale-shd-mux.t     | 112 -------------------------
74b1de
 tests/volume.rc                        |  15 ----
74b1de
 4 files changed, 372 deletions(-)
74b1de
 delete mode 100644 tests/basic/glusterd-restart-shd-mux.t
74b1de
 delete mode 100644 tests/basic/shd-mux.t
74b1de
 delete mode 100644 tests/basic/volume-scale-shd-mux.t
74b1de
74b1de
diff --git a/tests/basic/glusterd-restart-shd-mux.t b/tests/basic/glusterd-restart-shd-mux.t
74b1de
deleted file mode 100644
74b1de
index a50af9d..0000000
74b1de
--- a/tests/basic/glusterd-restart-shd-mux.t
74b1de
+++ /dev/null
74b1de
@@ -1,96 +0,0 @@
74b1de
-#!/bin/bash
74b1de
-
74b1de
-. $(dirname $0)/../include.rc
74b1de
-. $(dirname $0)/../volume.rc
74b1de
-
74b1de
-cleanup;
74b1de
-
74b1de
-TESTS_EXPECTED_IN_LOOP=20
74b1de
-
74b1de
-TEST glusterd
74b1de
-TEST pidof glusterd
74b1de
-TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2,3,4,5}
74b1de
-TEST $CLI volume set $V0 cluster.background-self-heal-count 0
74b1de
-TEST $CLI volume set $V0 cluster.eager-lock off
74b1de
-TEST $CLI volume set $V0 performance.flush-behind off
74b1de
-TEST $CLI volume start $V0
74b1de
-
74b1de
-for i in $(seq 1 3); do
74b1de
-   TEST $CLI volume create ${V0}_afr$i replica 3 $H0:$B0/${V0}_afr${i}{0,1,2,3,4,5}
74b1de
-   TEST $CLI volume start ${V0}_afr$i
74b1de
-   TEST $CLI volume create ${V0}_ec$i disperse 6 redundancy 2 $H0:$B0/${V0}_ec${i}{0,1,2,3,4,5}
74b1de
-   TEST $CLI volume start ${V0}_ec$i
74b1de
-done
74b1de
-
74b1de
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
74b1de
-
74b1de
-#Stop the glusterd
74b1de
-TEST pkill glusterd
74b1de
-#Only stopping glusterd, so there will be one shd
74b1de
-EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^1$" shd_count
74b1de
-TEST glusterd
74b1de
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
74b1de
-#Check the thread count become to number of volumes*number of ec subvolume (3*6=18)
74b1de
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^18$" number_healer_threads_shd $V0 "__ec_shd_healer_wait"
74b1de
-#Check the thread count become to number of volumes*number of afr subvolume (4*6=24)
74b1de
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^24$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
74b1de
-
74b1de
-shd_pid=$(get_shd_mux_pid $V0)
74b1de
-for i in $(seq 1 3); do
74b1de
-    afr_path="/var/run/gluster/shd/${V0}_afr$i/${V0}_afr$i-shd.pid"
74b1de
-    EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" cat $afr_path
74b1de
-    ec_path="/var/run/gluster/shd/${V0}_ec$i/${V0}_ec${i}-shd.pid"
74b1de
-    EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" cat $ec_path
74b1de
-done
74b1de
-
74b1de
-#Reboot a node scenario
74b1de
-TEST pkill gluster
74b1de
-#Only stopped glusterd, so there will be one shd
74b1de
-EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^0$" shd_count
74b1de
-
74b1de
-TEST glusterd
74b1de
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
74b1de
-
74b1de
-#Check the thread count become to number of volumes*number of ec subvolume (3*6=18)
74b1de
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^18$" number_healer_threads_shd $V0 "__ec_shd_healer_wait"
74b1de
-#Check the thread count become to number of volumes*number of afr subvolume (4*6=24)
74b1de
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^24$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
74b1de
-
74b1de
-shd_pid=$(get_shd_mux_pid $V0)
74b1de
-for i in $(seq 1 3); do
74b1de
-    afr_path="/var/run/gluster/shd/${V0}_afr$i/${V0}_afr$i-shd.pid"
74b1de
-    EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" cat $afr_path
74b1de
-    ec_path="/var/run/gluster/shd/${V0}_ec$i/${V0}_ec${i}-shd.pid"
74b1de
-    EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" cat $ec_path
74b1de
-done
74b1de
-
74b1de
-for i in $(seq 1 3); do
74b1de
-   TEST $CLI volume stop ${V0}_afr$i
74b1de
-   TEST $CLI volume stop ${V0}_ec$i
74b1de
-done
74b1de
-
74b1de
-EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^6$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
74b1de
-
74b1de
-TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
74b1de
-
74b1de
-TEST kill_brick $V0 $H0 $B0/${V0}0
74b1de
-TEST kill_brick $V0 $H0 $B0/${V0}3
74b1de
-
74b1de
-TEST touch $M0/foo{1..100}
74b1de
-
74b1de
-EXPECT_WITHIN $HEAL_TIMEOUT "^204$" get_pending_heal_count $V0
74b1de
-
74b1de
-TEST $CLI volume start ${V0} force
74b1de
-
74b1de
-EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
74b1de
-
74b1de
-TEST rm -rf $M0/*
74b1de
-EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
74b1de
-
74b1de
-
74b1de
-TEST $CLI volume stop ${V0}
74b1de
-TEST $CLI volume delete ${V0}
74b1de
-
74b1de
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^0$" shd_count
74b1de
-
74b1de
-cleanup
74b1de
diff --git a/tests/basic/shd-mux.t b/tests/basic/shd-mux.t
74b1de
deleted file mode 100644
74b1de
index e42a34a..0000000
74b1de
--- a/tests/basic/shd-mux.t
74b1de
+++ /dev/null
74b1de
@@ -1,149 +0,0 @@
74b1de
-#!/bin/bash
74b1de
-
74b1de
-. $(dirname $0)/../include.rc
74b1de
-. $(dirname $0)/../volume.rc
74b1de
-
74b1de
-cleanup;
74b1de
-
74b1de
-TESTS_EXPECTED_IN_LOOP=16
74b1de
-
74b1de
-TEST glusterd
74b1de
-TEST pidof glusterd
74b1de
-TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2,3,4,5}
74b1de
-TEST $CLI volume set $V0 cluster.background-self-heal-count 0
74b1de
-TEST $CLI volume set $V0 cluster.eager-lock off
74b1de
-TEST $CLI volume set $V0 performance.flush-behind off
74b1de
-TEST $CLI volume start $V0
74b1de
-TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
74b1de
-
74b1de
-shd_pid=$(get_shd_mux_pid $V0)
74b1de
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^6$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
74b1de
-
74b1de
-#Create a one more volume
74b1de
-TEST $CLI volume create ${V0}_1 replica 3 $H0:$B0/${V0}_1{0,1,2,3,4,5}
74b1de
-TEST $CLI volume start ${V0}_1
74b1de
-
74b1de
-#Check whether the shd has multiplexed or not
74b1de
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
74b1de
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" get_shd_mux_pid ${V0}_1
74b1de
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" get_shd_mux_pid ${V0}
74b1de
-
74b1de
-TEST $CLI volume set ${V0}_1 cluster.background-self-heal-count 0
74b1de
-TEST $CLI volume set ${V0}_1 cluster.eager-lock off
74b1de
-TEST $CLI volume set ${V0}_1 performance.flush-behind off
74b1de
-TEST $GFS --volfile-id=/${V0}_1 --volfile-server=$H0 $M1
74b1de
-
74b1de
-TEST kill_brick $V0 $H0 $B0/${V0}0
74b1de
-TEST kill_brick $V0 $H0 $B0/${V0}4
74b1de
-TEST kill_brick ${V0}_1 $H0 $B0/${V0}_10
74b1de
-TEST kill_brick ${V0}_1 $H0 $B0/${V0}_14
74b1de
-
74b1de
-TEST touch $M0/foo{1..100}
74b1de
-TEST touch $M1/foo{1..100}
74b1de
-
74b1de
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^204$" get_pending_heal_count $V0
74b1de
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^204$" get_pending_heal_count ${V0}_1
74b1de
-
74b1de
-TEST $CLI volume start ${V0} force
74b1de
-TEST $CLI volume start ${V0}_1 force
74b1de
-
74b1de
-EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
74b1de
-EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count ${V0}_1
74b1de
-
74b1de
-TEST rm -rf $M0/*
74b1de
-TEST rm -rf $M1/*
74b1de
-EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M1
74b1de
-
74b1de
-#Stop the volume
74b1de
-TEST $CLI volume stop ${V0}_1
74b1de
-TEST $CLI volume delete ${V0}_1
74b1de
-
74b1de
-#Check the stop succeeded and detached the volume with out restarting it
74b1de
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
74b1de
-
74b1de
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" get_shd_mux_pid $V0
74b1de
-
74b1de
-#Check the thread count become to earlier number after stopping
74b1de
-EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^6$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
74b1de
-
74b1de
-
74b1de
-#Now create a  ec volume and check mux works
74b1de
-TEST $CLI volume create ${V0}_2 disperse 6 redundancy 2 $H0:$B0/${V0}_2{0,1,2,3,4,5}
74b1de
-TEST $CLI volume start ${V0}_2
74b1de
-
74b1de
-#Check whether the shd has multiplexed or not
74b1de
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
74b1de
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" get_shd_mux_pid ${V0}_2
74b1de
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" get_shd_mux_pid ${V0}
74b1de
-
74b1de
-TEST $CLI volume set ${V0}_2 cluster.background-self-heal-count 0
74b1de
-TEST $CLI volume set ${V0}_2 cluster.eager-lock off
74b1de
-TEST $CLI volume set ${V0}_2 performance.flush-behind off
74b1de
-TEST $GFS --volfile-id=/${V0}_2 --volfile-server=$H0 $M1
74b1de
-
74b1de
-TEST kill_brick $V0 $H0 $B0/${V0}0
74b1de
-TEST kill_brick $V0 $H0 $B0/${V0}4
74b1de
-TEST kill_brick ${V0}_2 $H0 $B0/${V0}_20
74b1de
-TEST kill_brick ${V0}_2 $H0 $B0/${V0}_22
74b1de
-
74b1de
-TEST touch $M0/foo{1..100}
74b1de
-TEST touch $M1/foo{1..100}
74b1de
-
74b1de
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^204$" get_pending_heal_count $V0
74b1de
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^404$" get_pending_heal_count ${V0}_2
74b1de
-
74b1de
-TEST $CLI volume start ${V0} force
74b1de
-TEST $CLI volume start ${V0}_2 force
74b1de
-
74b1de
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^6$" number_healer_threads_shd $V0 "__ec_shd_healer_wait"
74b1de
-
74b1de
-EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
74b1de
-EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count ${V0}_2
74b1de
-
74b1de
-TEST rm -rf $M0/*
74b1de
-TEST rm -rf $M1/*
74b1de
-
74b1de
-
74b1de
-#Stop the volume
74b1de
-TEST $CLI volume stop ${V0}_2
74b1de
-TEST $CLI volume delete ${V0}_2
74b1de
-
74b1de
-#Check the stop succeeded and detached the volume with out restarting it
74b1de
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
74b1de
-
74b1de
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" get_shd_mux_pid $V0
74b1de
-
74b1de
-#Check the thread count become to zero for ec related threads
74b1de
-EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^0$" number_healer_threads_shd $V0 "__ec_shd_healer_wait"
74b1de
-#Check the thread count become to earlier number after stopping
74b1de
-EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^6$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
74b1de
-
74b1de
-for i in $(seq 1 3); do
74b1de
-   TEST $CLI volume create ${V0}_afr$i replica 3 $H0:$B0/${V0}_afr${i}{0,1,2,3,4,5}
74b1de
-   TEST $CLI volume start ${V0}_afr$i
74b1de
-   TEST $CLI volume create ${V0}_ec$i disperse 6 redundancy 2 $H0:$B0/${V0}_ec${i}{0,1,2,3,4,5}
74b1de
-   TEST $CLI volume start ${V0}_ec$i
74b1de
-done
74b1de
-
74b1de
-#Check the thread count become to number of volumes*number of ec subvolume (3*6=18)
74b1de
-EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^18$" number_healer_threads_shd $V0 "__ec_shd_healer_wait"
74b1de
-#Check the thread count become to number of volumes*number of afr subvolume (4*6=24)
74b1de
-EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^24$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
74b1de
-#Delete the volumes
74b1de
-for i in $(seq 1 3); do
74b1de
-   TEST $CLI volume stop ${V0}_afr$i
74b1de
-   TEST $CLI volume stop ${V0}_ec$i
74b1de
-   TEST $CLI volume delete ${V0}_afr$i
74b1de
-   TEST $CLI volume delete ${V0}_ec$i
74b1de
-done
74b1de
-
74b1de
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" get_shd_mux_pid $V0
74b1de
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
74b1de
-
74b1de
-EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^6$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
74b1de
-
74b1de
-TEST $CLI volume stop ${V0}
74b1de
-TEST $CLI volume delete ${V0}
74b1de
-EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^0$" shd_count
74b1de
-
74b1de
-cleanup
74b1de
diff --git a/tests/basic/volume-scale-shd-mux.t b/tests/basic/volume-scale-shd-mux.t
74b1de
deleted file mode 100644
74b1de
index dd9cf83..0000000
74b1de
--- a/tests/basic/volume-scale-shd-mux.t
74b1de
+++ /dev/null
74b1de
@@ -1,112 +0,0 @@
74b1de
-#!/bin/bash
74b1de
-
74b1de
-. $(dirname $0)/../include.rc
74b1de
-. $(dirname $0)/../volume.rc
74b1de
-
74b1de
-cleanup;
74b1de
-
74b1de
-TESTS_EXPECTED_IN_LOOP=6
74b1de
-
74b1de
-TEST glusterd
74b1de
-TEST pidof glusterd
74b1de
-TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2,3,4,5}
74b1de
-TEST $CLI volume set $V0 cluster.background-self-heal-count 0
74b1de
-TEST $CLI volume set $V0 cluster.eager-lock off
74b1de
-TEST $CLI volume set $V0 performance.flush-behind off
74b1de
-TEST $CLI volume start $V0
74b1de
-
74b1de
-for i in $(seq 1 2); do
74b1de
-   TEST $CLI volume create ${V0}_afr$i replica 3 $H0:$B0/${V0}_afr${i}{0,1,2,3,4,5}
74b1de
-   TEST $CLI volume start ${V0}_afr$i
74b1de
-   TEST $CLI volume create ${V0}_ec$i disperse 6 redundancy 2 $H0:$B0/${V0}_ec${i}{0,1,2,3,4,5}
74b1de
-   TEST $CLI volume start ${V0}_ec$i
74b1de
-done
74b1de
-
74b1de
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
74b1de
-
74b1de
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
74b1de
-#Check the thread count become to number of volumes*number of ec subvolume (2*6=12)
74b1de
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^12$" number_healer_threads_shd $V0 "__ec_shd_healer_wait"
74b1de
-#Check the thread count become to number of volumes*number of afr subvolume (3*6=18)
74b1de
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^18$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
74b1de
-
74b1de
-TEST $CLI volume add-brick $V0 replica 3 $H0:$B0/${V0}{6,7,8};
74b1de
-#Check the thread count become to number of volumes*number of afr subvolume plus 3 additional threads from newly added bricks (3*6+3=21)
74b1de
-
74b1de
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
74b1de
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^21$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
74b1de
-
74b1de
-#Remove the brick and check the detach is successful
74b1de
-$CLI volume remove-brick $V0 $H0:$B0/${V0}{6,7,8} force
74b1de
-
74b1de
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^18$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
74b1de
-
74b1de
-TEST $CLI volume add-brick ${V0}_ec1 $H0:$B0/${V0}_ec1_add{0,1,2,3,4,5};
74b1de
-#Check the thread count become to number of volumes*number of ec subvolume plus 2 additional threads from newly added bricks (2*6+6=18)
74b1de
-
74b1de
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
74b1de
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^18$" number_healer_threads_shd $V0 "__ec_shd_healer_wait"
74b1de
-
74b1de
-#Remove the brick and check the detach is successful
74b1de
-$CLI volume remove-brick ${V0}_ec1 $H0:$B0/${V0}_ec1_add{0,1,2,3,4,5} force
74b1de
-
74b1de
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^12$" number_healer_threads_shd $V0 "__ec_shd_healer_wait"
74b1de
-
74b1de
-
74b1de
-for i in $(seq 1 2); do
74b1de
-   TEST $CLI volume stop ${V0}_afr$i
74b1de
-   TEST $CLI volume stop ${V0}_ec$i
74b1de
-done
74b1de
-
74b1de
-EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^6$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
74b1de
-
74b1de
-TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
74b1de
-
74b1de
-TEST kill_brick $V0 $H0 $B0/${V0}0
74b1de
-TEST kill_brick $V0 $H0 $B0/${V0}4
74b1de
-
74b1de
-TEST touch $M0/foo{1..100}
74b1de
-
74b1de
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^204$" get_pending_heal_count $V0
74b1de
-
74b1de
-TEST $CLI volume start ${V0} force
74b1de
-
74b1de
-EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
74b1de
-
74b1de
-TEST rm -rf $M0/*
74b1de
-EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
74b1de
-shd_pid=$(get_shd_mux_pid $V0)
74b1de
-TEST $CLI volume create ${V0}_distribute1 $H0:$B0/${V0}_distribute10
74b1de
-TEST $CLI volume start ${V0}_distribute1
74b1de
-
74b1de
-#Creating a non-replicate/non-ec volume should not have any effect in shd
74b1de
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^6$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
74b1de
-EXPECT "^${shd_pid}$" get_shd_mux_pid $V0
74b1de
-
74b1de
-TEST mkdir $B0/add/
74b1de
-#Now convert the distributed volume to replicate
74b1de
-TEST $CLI volume add-brick ${V0}_distribute1 replica 3 $H0:$B0/add/{2..3}
74b1de
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^9$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
74b1de
-
74b1de
-#scale down the volume
74b1de
-TEST $CLI volume remove-brick ${V0}_distribute1 replica 1 $H0:$B0/add/{2..3} force
74b1de
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^6$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
74b1de
-
74b1de
-TEST $CLI volume stop ${V0}
74b1de
-TEST $CLI volume delete ${V0}
74b1de
-EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^0$" shd_count
74b1de
-
74b1de
-TEST rm -rf $B0/add/
74b1de
-TEST mkdir $B0/add/
74b1de
-#Now convert the distributed volume back to replicate and make sure that a new shd is spawned
74b1de
-TEST $CLI volume add-brick ${V0}_distribute1 replica 3 $H0:$B0/add/{2..3};
74b1de
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
74b1de
-EXPECT_WITHIN $HEAL_TIMEOUT "^3$" number_healer_threads_shd ${V0}_distribute1 "__afr_shd_healer_wait"
74b1de
-
74b1de
-#Now convert the replica volume to distribute again and make sure the shd is now stopped
74b1de
-TEST $CLI volume remove-brick ${V0}_distribute1 replica 1 $H0:$B0/add/{2..3} force
74b1de
-TEST rm -rf $B0/add/
74b1de
-
74b1de
-EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^0$" shd_count
74b1de
-
74b1de
-cleanup
74b1de
diff --git a/tests/volume.rc b/tests/volume.rc
74b1de
index 6a78c37..022d972 100644
74b1de
--- a/tests/volume.rc
74b1de
+++ b/tests/volume.rc
74b1de
@@ -913,21 +913,6 @@ function volgen_check_ancestry {
74b1de
         fi
74b1de
 }
74b1de
 
74b1de
-function get_shd_mux_pid {
74b1de
-   local volume=$1
74b1de
-   pid=`$CLI volume status $volume shd | awk '/Self-heal/{print $8}'`
74b1de
-   echo $pid
74b1de
-}
74b1de
-
74b1de
-function shd_count {
74b1de
-   ps aux | grep "glustershd" | grep -v grep | wc -l
74b1de
-}
74b1de
-
74b1de
-function number_healer_threads_shd {
74b1de
-   local pid=$(get_shd_mux_pid $1)
74b1de
-   pstack $pid | grep $2 | wc -l
74b1de
-}
74b1de
-
74b1de
 function get_mtime {
74b1de
     local time=$(get-mdata-xattr -m $1)
74b1de
     if [ $time == "-1" ];
74b1de
-- 
74b1de
1.8.3.1
74b1de