256ebe
From b7f832288d2d2e57231d90765afc049ad7cb2f9d Mon Sep 17 00:00:00 2001
256ebe
From: Mohammed Rafi KC <rkavunga@redhat.com>
256ebe
Date: Thu, 9 May 2019 14:07:48 +0530
256ebe
Subject: [PATCH 165/169] tests/shd: Add test coverage for shd mux
256ebe
256ebe
This patch add more test cases for shd mux test cases
256ebe
The test case includes
256ebe
1) Createing multiple volumes to check the attach and detach
256ebe
   of self heal daemon requests.
256ebe
2) Make sure the healing happens in all sceanarios
256ebe
3) After a volume detach make sure the threads of the detached
256ebe
   volume is all cleaned.
256ebe
4) Repeat all the above tests for ec volume
256ebe
5) Node Reboot case
256ebe
6) glusterd restart cases
256ebe
7) Add-brick/remove brick
256ebe
8) Convert a distributed volume to disperse volume
256ebe
9) Convert a replicated volume to distributed volume
256ebe
256ebe
Backport of: https://review.gluster.org/#/c/glusterfs/+/22697/
256ebe
256ebe
>Change-Id: I7c317ef9d23a45ffd831157e4890d7c83a8fce7b
256ebe
>fixes: bz#1708929
256ebe
>Signed-off-by: Mohammed Rafi KC <rkavunga@redhat.com>
256ebe
256ebe
Change-Id: Ie732ead9413bd32b8c262303468a0720538334fb
256ebe
BUG: 1704562
256ebe
Signed-off-by: Mohammed Rafi KC <rkavunga@redhat.com>
256ebe
Reviewed-on: https://code.engineering.redhat.com/gerrit/172634
256ebe
Tested-by: RHGS Build Bot <nigelb@redhat.com>
256ebe
Reviewed-by: Atin Mukherjee <amukherj@redhat.com>
256ebe
---
256ebe
 tests/basic/glusterd-restart-shd-mux.t |  96 +++++++++++++++++++++
256ebe
 tests/basic/shd-mux.t                  | 149 +++++++++++++++++++++++++++++++++
256ebe
 tests/basic/volume-scale-shd-mux.t     | 112 +++++++++++++++++++++++++
256ebe
 tests/volume.rc                        |  15 ++++
256ebe
 4 files changed, 372 insertions(+)
256ebe
 create mode 100644 tests/basic/glusterd-restart-shd-mux.t
256ebe
 create mode 100644 tests/basic/shd-mux.t
256ebe
 create mode 100644 tests/basic/volume-scale-shd-mux.t
256ebe
256ebe
diff --git a/tests/basic/glusterd-restart-shd-mux.t b/tests/basic/glusterd-restart-shd-mux.t
256ebe
new file mode 100644
256ebe
index 0000000..a50af9d
256ebe
--- /dev/null
256ebe
+++ b/tests/basic/glusterd-restart-shd-mux.t
256ebe
@@ -0,0 +1,96 @@
256ebe
+#!/bin/bash
256ebe
+
256ebe
+. $(dirname $0)/../include.rc
256ebe
+. $(dirname $0)/../volume.rc
256ebe
+
256ebe
+cleanup;
256ebe
+
256ebe
+TESTS_EXPECTED_IN_LOOP=20
256ebe
+
256ebe
+TEST glusterd
256ebe
+TEST pidof glusterd
256ebe
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2,3,4,5}
256ebe
+TEST $CLI volume set $V0 cluster.background-self-heal-count 0
256ebe
+TEST $CLI volume set $V0 cluster.eager-lock off
256ebe
+TEST $CLI volume set $V0 performance.flush-behind off
256ebe
+TEST $CLI volume start $V0
256ebe
+
256ebe
+for i in $(seq 1 3); do
256ebe
+   TEST $CLI volume create ${V0}_afr$i replica 3 $H0:$B0/${V0}_afr${i}{0,1,2,3,4,5}
256ebe
+   TEST $CLI volume start ${V0}_afr$i
256ebe
+   TEST $CLI volume create ${V0}_ec$i disperse 6 redundancy 2 $H0:$B0/${V0}_ec${i}{0,1,2,3,4,5}
256ebe
+   TEST $CLI volume start ${V0}_ec$i
256ebe
+done
256ebe
+
256ebe
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
256ebe
+
256ebe
+#Stop the glusterd
256ebe
+TEST pkill glusterd
256ebe
+#Only stopping glusterd, so there will be one shd
256ebe
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^1$" shd_count
256ebe
+TEST glusterd
256ebe
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
256ebe
+#Check the thread count become to number of volumes*number of ec subvolume (3*6=18)
256ebe
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^18$" number_healer_threads_shd $V0 "__ec_shd_healer_wait"
256ebe
+#Check the thread count become to number of volumes*number of afr subvolume (4*6=24)
256ebe
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^24$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
256ebe
+
256ebe
+shd_pid=$(get_shd_mux_pid $V0)
256ebe
+for i in $(seq 1 3); do
256ebe
+    afr_path="/var/run/gluster/shd/${V0}_afr$i/${V0}_afr$i-shd.pid"
256ebe
+    EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" cat $afr_path
256ebe
+    ec_path="/var/run/gluster/shd/${V0}_ec$i/${V0}_ec${i}-shd.pid"
256ebe
+    EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" cat $ec_path
256ebe
+done
256ebe
+
256ebe
+#Reboot a node scenario
256ebe
+TEST pkill gluster
256ebe
+#Only stopped glusterd, so there will be one shd
256ebe
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^0$" shd_count
256ebe
+
256ebe
+TEST glusterd
256ebe
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
256ebe
+
256ebe
+#Check the thread count become to number of volumes*number of ec subvolume (3*6=18)
256ebe
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^18$" number_healer_threads_shd $V0 "__ec_shd_healer_wait"
256ebe
+#Check the thread count become to number of volumes*number of afr subvolume (4*6=24)
256ebe
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^24$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
256ebe
+
256ebe
+shd_pid=$(get_shd_mux_pid $V0)
256ebe
+for i in $(seq 1 3); do
256ebe
+    afr_path="/var/run/gluster/shd/${V0}_afr$i/${V0}_afr$i-shd.pid"
256ebe
+    EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" cat $afr_path
256ebe
+    ec_path="/var/run/gluster/shd/${V0}_ec$i/${V0}_ec${i}-shd.pid"
256ebe
+    EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" cat $ec_path
256ebe
+done
256ebe
+
256ebe
+for i in $(seq 1 3); do
256ebe
+   TEST $CLI volume stop ${V0}_afr$i
256ebe
+   TEST $CLI volume stop ${V0}_ec$i
256ebe
+done
256ebe
+
256ebe
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^6$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
256ebe
+
256ebe
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
256ebe
+
256ebe
+TEST kill_brick $V0 $H0 $B0/${V0}0
256ebe
+TEST kill_brick $V0 $H0 $B0/${V0}3
256ebe
+
256ebe
+TEST touch $M0/foo{1..100}
256ebe
+
256ebe
+EXPECT_WITHIN $HEAL_TIMEOUT "^204$" get_pending_heal_count $V0
256ebe
+
256ebe
+TEST $CLI volume start ${V0} force
256ebe
+
256ebe
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
256ebe
+
256ebe
+TEST rm -rf $M0/*
256ebe
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
256ebe
+
256ebe
+
256ebe
+TEST $CLI volume stop ${V0}
256ebe
+TEST $CLI volume delete ${V0}
256ebe
+
256ebe
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^0$" shd_count
256ebe
+
256ebe
+cleanup
256ebe
diff --git a/tests/basic/shd-mux.t b/tests/basic/shd-mux.t
256ebe
new file mode 100644
256ebe
index 0000000..e42a34a
256ebe
--- /dev/null
256ebe
+++ b/tests/basic/shd-mux.t
256ebe
@@ -0,0 +1,149 @@
256ebe
+#!/bin/bash
256ebe
+
256ebe
+. $(dirname $0)/../include.rc
256ebe
+. $(dirname $0)/../volume.rc
256ebe
+
256ebe
+cleanup;
256ebe
+
256ebe
+TESTS_EXPECTED_IN_LOOP=16
256ebe
+
256ebe
+TEST glusterd
256ebe
+TEST pidof glusterd
256ebe
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2,3,4,5}
256ebe
+TEST $CLI volume set $V0 cluster.background-self-heal-count 0
256ebe
+TEST $CLI volume set $V0 cluster.eager-lock off
256ebe
+TEST $CLI volume set $V0 performance.flush-behind off
256ebe
+TEST $CLI volume start $V0
256ebe
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
256ebe
+
256ebe
+shd_pid=$(get_shd_mux_pid $V0)
256ebe
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^6$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
256ebe
+
256ebe
+#Create a one more volume
256ebe
+TEST $CLI volume create ${V0}_1 replica 3 $H0:$B0/${V0}_1{0,1,2,3,4,5}
256ebe
+TEST $CLI volume start ${V0}_1
256ebe
+
256ebe
+#Check whether the shd has multiplexed or not
256ebe
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
256ebe
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" get_shd_mux_pid ${V0}_1
256ebe
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" get_shd_mux_pid ${V0}
256ebe
+
256ebe
+TEST $CLI volume set ${V0}_1 cluster.background-self-heal-count 0
256ebe
+TEST $CLI volume set ${V0}_1 cluster.eager-lock off
256ebe
+TEST $CLI volume set ${V0}_1 performance.flush-behind off
256ebe
+TEST $GFS --volfile-id=/${V0}_1 --volfile-server=$H0 $M1
256ebe
+
256ebe
+TEST kill_brick $V0 $H0 $B0/${V0}0
256ebe
+TEST kill_brick $V0 $H0 $B0/${V0}4
256ebe
+TEST kill_brick ${V0}_1 $H0 $B0/${V0}_10
256ebe
+TEST kill_brick ${V0}_1 $H0 $B0/${V0}_14
256ebe
+
256ebe
+TEST touch $M0/foo{1..100}
256ebe
+TEST touch $M1/foo{1..100}
256ebe
+
256ebe
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^204$" get_pending_heal_count $V0
256ebe
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^204$" get_pending_heal_count ${V0}_1
256ebe
+
256ebe
+TEST $CLI volume start ${V0} force
256ebe
+TEST $CLI volume start ${V0}_1 force
256ebe
+
256ebe
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
256ebe
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count ${V0}_1
256ebe
+
256ebe
+TEST rm -rf $M0/*
256ebe
+TEST rm -rf $M1/*
256ebe
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M1
256ebe
+
256ebe
+#Stop the volume
256ebe
+TEST $CLI volume stop ${V0}_1
256ebe
+TEST $CLI volume delete ${V0}_1
256ebe
+
256ebe
+#Check the stop succeeded and detached the volume with out restarting it
256ebe
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
256ebe
+
256ebe
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" get_shd_mux_pid $V0
256ebe
+
256ebe
+#Check the thread count become to earlier number after stopping
256ebe
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^6$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
256ebe
+
256ebe
+
256ebe
+#Now create a  ec volume and check mux works
256ebe
+TEST $CLI volume create ${V0}_2 disperse 6 redundancy 2 $H0:$B0/${V0}_2{0,1,2,3,4,5}
256ebe
+TEST $CLI volume start ${V0}_2
256ebe
+
256ebe
+#Check whether the shd has multiplexed or not
256ebe
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
256ebe
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" get_shd_mux_pid ${V0}_2
256ebe
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" get_shd_mux_pid ${V0}
256ebe
+
256ebe
+TEST $CLI volume set ${V0}_2 cluster.background-self-heal-count 0
256ebe
+TEST $CLI volume set ${V0}_2 cluster.eager-lock off
256ebe
+TEST $CLI volume set ${V0}_2 performance.flush-behind off
256ebe
+TEST $GFS --volfile-id=/${V0}_2 --volfile-server=$H0 $M1
256ebe
+
256ebe
+TEST kill_brick $V0 $H0 $B0/${V0}0
256ebe
+TEST kill_brick $V0 $H0 $B0/${V0}4
256ebe
+TEST kill_brick ${V0}_2 $H0 $B0/${V0}_20
256ebe
+TEST kill_brick ${V0}_2 $H0 $B0/${V0}_22
256ebe
+
256ebe
+TEST touch $M0/foo{1..100}
256ebe
+TEST touch $M1/foo{1..100}
256ebe
+
256ebe
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^204$" get_pending_heal_count $V0
256ebe
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^404$" get_pending_heal_count ${V0}_2
256ebe
+
256ebe
+TEST $CLI volume start ${V0} force
256ebe
+TEST $CLI volume start ${V0}_2 force
256ebe
+
256ebe
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^6$" number_healer_threads_shd $V0 "__ec_shd_healer_wait"
256ebe
+
256ebe
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
256ebe
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count ${V0}_2
256ebe
+
256ebe
+TEST rm -rf $M0/*
256ebe
+TEST rm -rf $M1/*
256ebe
+
256ebe
+
256ebe
+#Stop the volume
256ebe
+TEST $CLI volume stop ${V0}_2
256ebe
+TEST $CLI volume delete ${V0}_2
256ebe
+
256ebe
+#Check the stop succeeded and detached the volume with out restarting it
256ebe
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
256ebe
+
256ebe
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" get_shd_mux_pid $V0
256ebe
+
256ebe
+#Check the thread count become to zero for ec related threads
256ebe
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^0$" number_healer_threads_shd $V0 "__ec_shd_healer_wait"
256ebe
+#Check the thread count become to earlier number after stopping
256ebe
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^6$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
256ebe
+
256ebe
+for i in $(seq 1 3); do
256ebe
+   TEST $CLI volume create ${V0}_afr$i replica 3 $H0:$B0/${V0}_afr${i}{0,1,2,3,4,5}
256ebe
+   TEST $CLI volume start ${V0}_afr$i
256ebe
+   TEST $CLI volume create ${V0}_ec$i disperse 6 redundancy 2 $H0:$B0/${V0}_ec${i}{0,1,2,3,4,5}
256ebe
+   TEST $CLI volume start ${V0}_ec$i
256ebe
+done
256ebe
+
256ebe
+#Check the thread count become to number of volumes*number of ec subvolume (3*6=18)
256ebe
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^18$" number_healer_threads_shd $V0 "__ec_shd_healer_wait"
256ebe
+#Check the thread count become to number of volumes*number of afr subvolume (4*6=24)
256ebe
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^24$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
256ebe
+#Delete the volumes
256ebe
+for i in $(seq 1 3); do
256ebe
+   TEST $CLI volume stop ${V0}_afr$i
256ebe
+   TEST $CLI volume stop ${V0}_ec$i
256ebe
+   TEST $CLI volume delete ${V0}_afr$i
256ebe
+   TEST $CLI volume delete ${V0}_ec$i
256ebe
+done
256ebe
+
256ebe
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" get_shd_mux_pid $V0
256ebe
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
256ebe
+
256ebe
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^6$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
256ebe
+
256ebe
+TEST $CLI volume stop ${V0}
256ebe
+TEST $CLI volume delete ${V0}
256ebe
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^0$" shd_count
256ebe
+
256ebe
+cleanup
256ebe
diff --git a/tests/basic/volume-scale-shd-mux.t b/tests/basic/volume-scale-shd-mux.t
256ebe
new file mode 100644
256ebe
index 0000000..dd9cf83
256ebe
--- /dev/null
256ebe
+++ b/tests/basic/volume-scale-shd-mux.t
256ebe
@@ -0,0 +1,112 @@
256ebe
+#!/bin/bash
256ebe
+
256ebe
+. $(dirname $0)/../include.rc
256ebe
+. $(dirname $0)/../volume.rc
256ebe
+
256ebe
+cleanup;
256ebe
+
256ebe
+TESTS_EXPECTED_IN_LOOP=6
256ebe
+
256ebe
+TEST glusterd
256ebe
+TEST pidof glusterd
256ebe
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2,3,4,5}
256ebe
+TEST $CLI volume set $V0 cluster.background-self-heal-count 0
256ebe
+TEST $CLI volume set $V0 cluster.eager-lock off
256ebe
+TEST $CLI volume set $V0 performance.flush-behind off
256ebe
+TEST $CLI volume start $V0
256ebe
+
256ebe
+for i in $(seq 1 2); do
256ebe
+   TEST $CLI volume create ${V0}_afr$i replica 3 $H0:$B0/${V0}_afr${i}{0,1,2,3,4,5}
256ebe
+   TEST $CLI volume start ${V0}_afr$i
256ebe
+   TEST $CLI volume create ${V0}_ec$i disperse 6 redundancy 2 $H0:$B0/${V0}_ec${i}{0,1,2,3,4,5}
256ebe
+   TEST $CLI volume start ${V0}_ec$i
256ebe
+done
256ebe
+
256ebe
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
256ebe
+
256ebe
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
256ebe
+#Check the thread count become to number of volumes*number of ec subvolume (2*6=12)
256ebe
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^12$" number_healer_threads_shd $V0 "__ec_shd_healer_wait"
256ebe
+#Check the thread count become to number of volumes*number of afr subvolume (3*6=18)
256ebe
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^18$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
256ebe
+
256ebe
+TEST $CLI volume add-brick $V0 replica 3 $H0:$B0/${V0}{6,7,8};
256ebe
+#Check the thread count become to number of volumes*number of afr subvolume plus 3 additional threads from newly added bricks (3*6+3=21)
256ebe
+
256ebe
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
256ebe
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^21$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
256ebe
+
256ebe
+#Remove the brick and check the detach is successful
256ebe
+$CLI volume remove-brick $V0 $H0:$B0/${V0}{6,7,8} force
256ebe
+
256ebe
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^18$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
256ebe
+
256ebe
+TEST $CLI volume add-brick ${V0}_ec1 $H0:$B0/${V0}_ec1_add{0,1,2,3,4,5};
256ebe
+#Check the thread count become to number of volumes*number of ec subvolume plus 2 additional threads from newly added bricks (2*6+6=18)
256ebe
+
256ebe
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
256ebe
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^18$" number_healer_threads_shd $V0 "__ec_shd_healer_wait"
256ebe
+
256ebe
+#Remove the brick and check the detach is successful
256ebe
+$CLI volume remove-brick ${V0}_ec1 $H0:$B0/${V0}_ec1_add{0,1,2,3,4,5} force
256ebe
+
256ebe
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^12$" number_healer_threads_shd $V0 "__ec_shd_healer_wait"
256ebe
+
256ebe
+
256ebe
+for i in $(seq 1 2); do
256ebe
+   TEST $CLI volume stop ${V0}_afr$i
256ebe
+   TEST $CLI volume stop ${V0}_ec$i
256ebe
+done
256ebe
+
256ebe
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^6$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
256ebe
+
256ebe
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
256ebe
+
256ebe
+TEST kill_brick $V0 $H0 $B0/${V0}0
256ebe
+TEST kill_brick $V0 $H0 $B0/${V0}4
256ebe
+
256ebe
+TEST touch $M0/foo{1..100}
256ebe
+
256ebe
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^204$" get_pending_heal_count $V0
256ebe
+
256ebe
+TEST $CLI volume start ${V0} force
256ebe
+
256ebe
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
256ebe
+
256ebe
+TEST rm -rf $M0/*
256ebe
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
256ebe
+shd_pid=$(get_shd_mux_pid $V0)
256ebe
+TEST $CLI volume create ${V0}_distribute1 $H0:$B0/${V0}_distribute10
256ebe
+TEST $CLI volume start ${V0}_distribute1
256ebe
+
256ebe
+#Creating a non-replicate/non-ec volume should not have any effect in shd
256ebe
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^6$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
256ebe
+EXPECT "^${shd_pid}$" get_shd_mux_pid $V0
256ebe
+
256ebe
+TEST mkdir $B0/add/
256ebe
+#Now convert the distributed volume to replicate
256ebe
+TEST $CLI volume add-brick ${V0}_distribute1 replica 3 $H0:$B0/add/{2..3}
256ebe
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^9$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
256ebe
+
256ebe
+#scale down the volume
256ebe
+TEST $CLI volume remove-brick ${V0}_distribute1 replica 1 $H0:$B0/add/{2..3} force
256ebe
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^6$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
256ebe
+
256ebe
+TEST $CLI volume stop ${V0}
256ebe
+TEST $CLI volume delete ${V0}
256ebe
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^0$" shd_count
256ebe
+
256ebe
+TEST rm -rf $B0/add/
256ebe
+TEST mkdir $B0/add/
256ebe
+#Now convert the distributed volume back to replicate and make sure that a new shd is spawned
256ebe
+TEST $CLI volume add-brick ${V0}_distribute1 replica 3 $H0:$B0/add/{2..3};
256ebe
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
256ebe
+EXPECT_WITHIN $HEAL_TIMEOUT "^3$" number_healer_threads_shd ${V0}_distribute1 "__afr_shd_healer_wait"
256ebe
+
256ebe
+#Now convert the replica volume to distribute again and make sure the shd is now stopped
256ebe
+TEST $CLI volume remove-brick ${V0}_distribute1 replica 1 $H0:$B0/add/{2..3} force
256ebe
+TEST rm -rf $B0/add/
256ebe
+
256ebe
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^0$" shd_count
256ebe
+
256ebe
+cleanup
256ebe
diff --git a/tests/volume.rc b/tests/volume.rc
256ebe
index a0ea3b8..bb400cc 100644
256ebe
--- a/tests/volume.rc
256ebe
+++ b/tests/volume.rc
256ebe
@@ -912,3 +912,18 @@ function volgen_check_ancestry {
256ebe
                 echo "N"
256ebe
         fi
256ebe
 }
256ebe
+
256ebe
+function get_shd_mux_pid {
256ebe
+   local volume=$1
256ebe
+   pid=`$CLI volume status $volume shd | awk '/Self-heal/{print $8}'`
256ebe
+   echo $pid
256ebe
+}
256ebe
+
256ebe
+function shd_count {
256ebe
+   ps aux | grep "glustershd" | grep -v grep | wc -l
256ebe
+}
256ebe
+
256ebe
+function number_healer_threads_shd {
256ebe
+   local pid=$(get_shd_mux_pid $1)
256ebe
+   pstack $pid | grep $2 | wc -l
256ebe
+}
256ebe
-- 
256ebe
1.8.3.1
256ebe