17b94a
From b7f832288d2d2e57231d90765afc049ad7cb2f9d Mon Sep 17 00:00:00 2001
17b94a
From: Mohammed Rafi KC <rkavunga@redhat.com>
17b94a
Date: Thu, 9 May 2019 14:07:48 +0530
17b94a
Subject: [PATCH 165/169] tests/shd: Add test coverage for shd mux
17b94a
17b94a
This patch add more test cases for shd mux test cases
17b94a
The test case includes
17b94a
1) Createing multiple volumes to check the attach and detach
17b94a
   of self heal daemon requests.
17b94a
2) Make sure the healing happens in all sceanarios
17b94a
3) After a volume detach make sure the threads of the detached
17b94a
   volume is all cleaned.
17b94a
4) Repeat all the above tests for ec volume
17b94a
5) Node Reboot case
17b94a
6) glusterd restart cases
17b94a
7) Add-brick/remove brick
17b94a
8) Convert a distributed volume to disperse volume
17b94a
9) Convert a replicated volume to distributed volume
17b94a
17b94a
Backport of: https://review.gluster.org/#/c/glusterfs/+/22697/
17b94a
17b94a
>Change-Id: I7c317ef9d23a45ffd831157e4890d7c83a8fce7b
17b94a
>fixes: bz#1708929
17b94a
>Signed-off-by: Mohammed Rafi KC <rkavunga@redhat.com>
17b94a
17b94a
Change-Id: Ie732ead9413bd32b8c262303468a0720538334fb
17b94a
BUG: 1704562
17b94a
Signed-off-by: Mohammed Rafi KC <rkavunga@redhat.com>
17b94a
Reviewed-on: https://code.engineering.redhat.com/gerrit/172634
17b94a
Tested-by: RHGS Build Bot <nigelb@redhat.com>
17b94a
Reviewed-by: Atin Mukherjee <amukherj@redhat.com>
17b94a
---
17b94a
 tests/basic/glusterd-restart-shd-mux.t |  96 +++++++++++++++++++++
17b94a
 tests/basic/shd-mux.t                  | 149 +++++++++++++++++++++++++++++++++
17b94a
 tests/basic/volume-scale-shd-mux.t     | 112 +++++++++++++++++++++++++
17b94a
 tests/volume.rc                        |  15 ++++
17b94a
 4 files changed, 372 insertions(+)
17b94a
 create mode 100644 tests/basic/glusterd-restart-shd-mux.t
17b94a
 create mode 100644 tests/basic/shd-mux.t
17b94a
 create mode 100644 tests/basic/volume-scale-shd-mux.t
17b94a
17b94a
diff --git a/tests/basic/glusterd-restart-shd-mux.t b/tests/basic/glusterd-restart-shd-mux.t
17b94a
new file mode 100644
17b94a
index 0000000..a50af9d
17b94a
--- /dev/null
17b94a
+++ b/tests/basic/glusterd-restart-shd-mux.t
17b94a
@@ -0,0 +1,96 @@
17b94a
+#!/bin/bash
17b94a
+
17b94a
+. $(dirname $0)/../include.rc
17b94a
+. $(dirname $0)/../volume.rc
17b94a
+
17b94a
+cleanup;
17b94a
+
17b94a
+TESTS_EXPECTED_IN_LOOP=20
17b94a
+
17b94a
+TEST glusterd
17b94a
+TEST pidof glusterd
17b94a
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2,3,4,5}
17b94a
+TEST $CLI volume set $V0 cluster.background-self-heal-count 0
17b94a
+TEST $CLI volume set $V0 cluster.eager-lock off
17b94a
+TEST $CLI volume set $V0 performance.flush-behind off
17b94a
+TEST $CLI volume start $V0
17b94a
+
17b94a
+for i in $(seq 1 3); do
17b94a
+   TEST $CLI volume create ${V0}_afr$i replica 3 $H0:$B0/${V0}_afr${i}{0,1,2,3,4,5}
17b94a
+   TEST $CLI volume start ${V0}_afr$i
17b94a
+   TEST $CLI volume create ${V0}_ec$i disperse 6 redundancy 2 $H0:$B0/${V0}_ec${i}{0,1,2,3,4,5}
17b94a
+   TEST $CLI volume start ${V0}_ec$i
17b94a
+done
17b94a
+
17b94a
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
17b94a
+
17b94a
+#Stop the glusterd
17b94a
+TEST pkill glusterd
17b94a
+#Only stopping glusterd, so there will be one shd
17b94a
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^1$" shd_count
17b94a
+TEST glusterd
17b94a
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
17b94a
+#Check the thread count become to number of volumes*number of ec subvolume (3*6=18)
17b94a
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^18$" number_healer_threads_shd $V0 "__ec_shd_healer_wait"
17b94a
+#Check the thread count become to number of volumes*number of afr subvolume (4*6=24)
17b94a
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^24$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
17b94a
+
17b94a
+shd_pid=$(get_shd_mux_pid $V0)
17b94a
+for i in $(seq 1 3); do
17b94a
+    afr_path="/var/run/gluster/shd/${V0}_afr$i/${V0}_afr$i-shd.pid"
17b94a
+    EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" cat $afr_path
17b94a
+    ec_path="/var/run/gluster/shd/${V0}_ec$i/${V0}_ec${i}-shd.pid"
17b94a
+    EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" cat $ec_path
17b94a
+done
17b94a
+
17b94a
+#Reboot a node scenario
17b94a
+TEST pkill gluster
17b94a
+#Only stopped glusterd, so there will be one shd
17b94a
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^0$" shd_count
17b94a
+
17b94a
+TEST glusterd
17b94a
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
17b94a
+
17b94a
+#Check the thread count become to number of volumes*number of ec subvolume (3*6=18)
17b94a
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^18$" number_healer_threads_shd $V0 "__ec_shd_healer_wait"
17b94a
+#Check the thread count become to number of volumes*number of afr subvolume (4*6=24)
17b94a
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^24$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
17b94a
+
17b94a
+shd_pid=$(get_shd_mux_pid $V0)
17b94a
+for i in $(seq 1 3); do
17b94a
+    afr_path="/var/run/gluster/shd/${V0}_afr$i/${V0}_afr$i-shd.pid"
17b94a
+    EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" cat $afr_path
17b94a
+    ec_path="/var/run/gluster/shd/${V0}_ec$i/${V0}_ec${i}-shd.pid"
17b94a
+    EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" cat $ec_path
17b94a
+done
17b94a
+
17b94a
+for i in $(seq 1 3); do
17b94a
+   TEST $CLI volume stop ${V0}_afr$i
17b94a
+   TEST $CLI volume stop ${V0}_ec$i
17b94a
+done
17b94a
+
17b94a
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^6$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
17b94a
+
17b94a
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
17b94a
+
17b94a
+TEST kill_brick $V0 $H0 $B0/${V0}0
17b94a
+TEST kill_brick $V0 $H0 $B0/${V0}3
17b94a
+
17b94a
+TEST touch $M0/foo{1..100}
17b94a
+
17b94a
+EXPECT_WITHIN $HEAL_TIMEOUT "^204$" get_pending_heal_count $V0
17b94a
+
17b94a
+TEST $CLI volume start ${V0} force
17b94a
+
17b94a
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
17b94a
+
17b94a
+TEST rm -rf $M0/*
17b94a
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
17b94a
+
17b94a
+
17b94a
+TEST $CLI volume stop ${V0}
17b94a
+TEST $CLI volume delete ${V0}
17b94a
+
17b94a
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^0$" shd_count
17b94a
+
17b94a
+cleanup
17b94a
diff --git a/tests/basic/shd-mux.t b/tests/basic/shd-mux.t
17b94a
new file mode 100644
17b94a
index 0000000..e42a34a
17b94a
--- /dev/null
17b94a
+++ b/tests/basic/shd-mux.t
17b94a
@@ -0,0 +1,149 @@
17b94a
+#!/bin/bash
17b94a
+
17b94a
+. $(dirname $0)/../include.rc
17b94a
+. $(dirname $0)/../volume.rc
17b94a
+
17b94a
+cleanup;
17b94a
+
17b94a
+TESTS_EXPECTED_IN_LOOP=16
17b94a
+
17b94a
+TEST glusterd
17b94a
+TEST pidof glusterd
17b94a
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2,3,4,5}
17b94a
+TEST $CLI volume set $V0 cluster.background-self-heal-count 0
17b94a
+TEST $CLI volume set $V0 cluster.eager-lock off
17b94a
+TEST $CLI volume set $V0 performance.flush-behind off
17b94a
+TEST $CLI volume start $V0
17b94a
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
17b94a
+
17b94a
+shd_pid=$(get_shd_mux_pid $V0)
17b94a
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^6$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
17b94a
+
17b94a
+#Create a one more volume
17b94a
+TEST $CLI volume create ${V0}_1 replica 3 $H0:$B0/${V0}_1{0,1,2,3,4,5}
17b94a
+TEST $CLI volume start ${V0}_1
17b94a
+
17b94a
+#Check whether the shd has multiplexed or not
17b94a
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
17b94a
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" get_shd_mux_pid ${V0}_1
17b94a
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" get_shd_mux_pid ${V0}
17b94a
+
17b94a
+TEST $CLI volume set ${V0}_1 cluster.background-self-heal-count 0
17b94a
+TEST $CLI volume set ${V0}_1 cluster.eager-lock off
17b94a
+TEST $CLI volume set ${V0}_1 performance.flush-behind off
17b94a
+TEST $GFS --volfile-id=/${V0}_1 --volfile-server=$H0 $M1
17b94a
+
17b94a
+TEST kill_brick $V0 $H0 $B0/${V0}0
17b94a
+TEST kill_brick $V0 $H0 $B0/${V0}4
17b94a
+TEST kill_brick ${V0}_1 $H0 $B0/${V0}_10
17b94a
+TEST kill_brick ${V0}_1 $H0 $B0/${V0}_14
17b94a
+
17b94a
+TEST touch $M0/foo{1..100}
17b94a
+TEST touch $M1/foo{1..100}
17b94a
+
17b94a
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^204$" get_pending_heal_count $V0
17b94a
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^204$" get_pending_heal_count ${V0}_1
17b94a
+
17b94a
+TEST $CLI volume start ${V0} force
17b94a
+TEST $CLI volume start ${V0}_1 force
17b94a
+
17b94a
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
17b94a
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count ${V0}_1
17b94a
+
17b94a
+TEST rm -rf $M0/*
17b94a
+TEST rm -rf $M1/*
17b94a
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M1
17b94a
+
17b94a
+#Stop the volume
17b94a
+TEST $CLI volume stop ${V0}_1
17b94a
+TEST $CLI volume delete ${V0}_1
17b94a
+
17b94a
+#Check the stop succeeded and detached the volume with out restarting it
17b94a
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
17b94a
+
17b94a
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" get_shd_mux_pid $V0
17b94a
+
17b94a
+#Check the thread count become to earlier number after stopping
17b94a
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^6$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
17b94a
+
17b94a
+
17b94a
+#Now create a  ec volume and check mux works
17b94a
+TEST $CLI volume create ${V0}_2 disperse 6 redundancy 2 $H0:$B0/${V0}_2{0,1,2,3,4,5}
17b94a
+TEST $CLI volume start ${V0}_2
17b94a
+
17b94a
+#Check whether the shd has multiplexed or not
17b94a
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
17b94a
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" get_shd_mux_pid ${V0}_2
17b94a
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" get_shd_mux_pid ${V0}
17b94a
+
17b94a
+TEST $CLI volume set ${V0}_2 cluster.background-self-heal-count 0
17b94a
+TEST $CLI volume set ${V0}_2 cluster.eager-lock off
17b94a
+TEST $CLI volume set ${V0}_2 performance.flush-behind off
17b94a
+TEST $GFS --volfile-id=/${V0}_2 --volfile-server=$H0 $M1
17b94a
+
17b94a
+TEST kill_brick $V0 $H0 $B0/${V0}0
17b94a
+TEST kill_brick $V0 $H0 $B0/${V0}4
17b94a
+TEST kill_brick ${V0}_2 $H0 $B0/${V0}_20
17b94a
+TEST kill_brick ${V0}_2 $H0 $B0/${V0}_22
17b94a
+
17b94a
+TEST touch $M0/foo{1..100}
17b94a
+TEST touch $M1/foo{1..100}
17b94a
+
17b94a
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^204$" get_pending_heal_count $V0
17b94a
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^404$" get_pending_heal_count ${V0}_2
17b94a
+
17b94a
+TEST $CLI volume start ${V0} force
17b94a
+TEST $CLI volume start ${V0}_2 force
17b94a
+
17b94a
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^6$" number_healer_threads_shd $V0 "__ec_shd_healer_wait"
17b94a
+
17b94a
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
17b94a
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count ${V0}_2
17b94a
+
17b94a
+TEST rm -rf $M0/*
17b94a
+TEST rm -rf $M1/*
17b94a
+
17b94a
+
17b94a
+#Stop the volume
17b94a
+TEST $CLI volume stop ${V0}_2
17b94a
+TEST $CLI volume delete ${V0}_2
17b94a
+
17b94a
+#Check the stop succeeded and detached the volume with out restarting it
17b94a
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
17b94a
+
17b94a
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" get_shd_mux_pid $V0
17b94a
+
17b94a
+#Check the thread count become to zero for ec related threads
17b94a
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^0$" number_healer_threads_shd $V0 "__ec_shd_healer_wait"
17b94a
+#Check the thread count become to earlier number after stopping
17b94a
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^6$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
17b94a
+
17b94a
+for i in $(seq 1 3); do
17b94a
+   TEST $CLI volume create ${V0}_afr$i replica 3 $H0:$B0/${V0}_afr${i}{0,1,2,3,4,5}
17b94a
+   TEST $CLI volume start ${V0}_afr$i
17b94a
+   TEST $CLI volume create ${V0}_ec$i disperse 6 redundancy 2 $H0:$B0/${V0}_ec${i}{0,1,2,3,4,5}
17b94a
+   TEST $CLI volume start ${V0}_ec$i
17b94a
+done
17b94a
+
17b94a
+#Check the thread count become to number of volumes*number of ec subvolume (3*6=18)
17b94a
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^18$" number_healer_threads_shd $V0 "__ec_shd_healer_wait"
17b94a
+#Check the thread count become to number of volumes*number of afr subvolume (4*6=24)
17b94a
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^24$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
17b94a
+#Delete the volumes
17b94a
+for i in $(seq 1 3); do
17b94a
+   TEST $CLI volume stop ${V0}_afr$i
17b94a
+   TEST $CLI volume stop ${V0}_ec$i
17b94a
+   TEST $CLI volume delete ${V0}_afr$i
17b94a
+   TEST $CLI volume delete ${V0}_ec$i
17b94a
+done
17b94a
+
17b94a
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" get_shd_mux_pid $V0
17b94a
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
17b94a
+
17b94a
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^6$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
17b94a
+
17b94a
+TEST $CLI volume stop ${V0}
17b94a
+TEST $CLI volume delete ${V0}
17b94a
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^0$" shd_count
17b94a
+
17b94a
+cleanup
17b94a
diff --git a/tests/basic/volume-scale-shd-mux.t b/tests/basic/volume-scale-shd-mux.t
17b94a
new file mode 100644
17b94a
index 0000000..dd9cf83
17b94a
--- /dev/null
17b94a
+++ b/tests/basic/volume-scale-shd-mux.t
17b94a
@@ -0,0 +1,112 @@
17b94a
+#!/bin/bash
17b94a
+
17b94a
+. $(dirname $0)/../include.rc
17b94a
+. $(dirname $0)/../volume.rc
17b94a
+
17b94a
+cleanup;
17b94a
+
17b94a
+TESTS_EXPECTED_IN_LOOP=6
17b94a
+
17b94a
+TEST glusterd
17b94a
+TEST pidof glusterd
17b94a
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2,3,4,5}
17b94a
+TEST $CLI volume set $V0 cluster.background-self-heal-count 0
17b94a
+TEST $CLI volume set $V0 cluster.eager-lock off
17b94a
+TEST $CLI volume set $V0 performance.flush-behind off
17b94a
+TEST $CLI volume start $V0
17b94a
+
17b94a
+for i in $(seq 1 2); do
17b94a
+   TEST $CLI volume create ${V0}_afr$i replica 3 $H0:$B0/${V0}_afr${i}{0,1,2,3,4,5}
17b94a
+   TEST $CLI volume start ${V0}_afr$i
17b94a
+   TEST $CLI volume create ${V0}_ec$i disperse 6 redundancy 2 $H0:$B0/${V0}_ec${i}{0,1,2,3,4,5}
17b94a
+   TEST $CLI volume start ${V0}_ec$i
17b94a
+done
17b94a
+
17b94a
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
17b94a
+
17b94a
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
17b94a
+#Check the thread count become to number of volumes*number of ec subvolume (2*6=12)
17b94a
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^12$" number_healer_threads_shd $V0 "__ec_shd_healer_wait"
17b94a
+#Check the thread count become to number of volumes*number of afr subvolume (3*6=18)
17b94a
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^18$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
17b94a
+
17b94a
+TEST $CLI volume add-brick $V0 replica 3 $H0:$B0/${V0}{6,7,8};
17b94a
+#Check the thread count become to number of volumes*number of afr subvolume plus 3 additional threads from newly added bricks (3*6+3=21)
17b94a
+
17b94a
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
17b94a
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^21$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
17b94a
+
17b94a
+#Remove the brick and check the detach is successful
17b94a
+$CLI volume remove-brick $V0 $H0:$B0/${V0}{6,7,8} force
17b94a
+
17b94a
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^18$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
17b94a
+
17b94a
+TEST $CLI volume add-brick ${V0}_ec1 $H0:$B0/${V0}_ec1_add{0,1,2,3,4,5};
17b94a
+#Check the thread count become to number of volumes*number of ec subvolume plus 2 additional threads from newly added bricks (2*6+6=18)
17b94a
+
17b94a
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
17b94a
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^18$" number_healer_threads_shd $V0 "__ec_shd_healer_wait"
17b94a
+
17b94a
+#Remove the brick and check the detach is successful
17b94a
+$CLI volume remove-brick ${V0}_ec1 $H0:$B0/${V0}_ec1_add{0,1,2,3,4,5} force
17b94a
+
17b94a
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^12$" number_healer_threads_shd $V0 "__ec_shd_healer_wait"
17b94a
+
17b94a
+
17b94a
+for i in $(seq 1 2); do
17b94a
+   TEST $CLI volume stop ${V0}_afr$i
17b94a
+   TEST $CLI volume stop ${V0}_ec$i
17b94a
+done
17b94a
+
17b94a
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^6$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
17b94a
+
17b94a
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
17b94a
+
17b94a
+TEST kill_brick $V0 $H0 $B0/${V0}0
17b94a
+TEST kill_brick $V0 $H0 $B0/${V0}4
17b94a
+
17b94a
+TEST touch $M0/foo{1..100}
17b94a
+
17b94a
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^204$" get_pending_heal_count $V0
17b94a
+
17b94a
+TEST $CLI volume start ${V0} force
17b94a
+
17b94a
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
17b94a
+
17b94a
+TEST rm -rf $M0/*
17b94a
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
17b94a
+shd_pid=$(get_shd_mux_pid $V0)
17b94a
+TEST $CLI volume create ${V0}_distribute1 $H0:$B0/${V0}_distribute10
17b94a
+TEST $CLI volume start ${V0}_distribute1
17b94a
+
17b94a
+#Creating a non-replicate/non-ec volume should not have any effect in shd
17b94a
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^6$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
17b94a
+EXPECT "^${shd_pid}$" get_shd_mux_pid $V0
17b94a
+
17b94a
+TEST mkdir $B0/add/
17b94a
+#Now convert the distributed volume to replicate
17b94a
+TEST $CLI volume add-brick ${V0}_distribute1 replica 3 $H0:$B0/add/{2..3}
17b94a
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^9$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
17b94a
+
17b94a
+#scale down the volume
17b94a
+TEST $CLI volume remove-brick ${V0}_distribute1 replica 1 $H0:$B0/add/{2..3} force
17b94a
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^6$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
17b94a
+
17b94a
+TEST $CLI volume stop ${V0}
17b94a
+TEST $CLI volume delete ${V0}
17b94a
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^0$" shd_count
17b94a
+
17b94a
+TEST rm -rf $B0/add/
17b94a
+TEST mkdir $B0/add/
17b94a
+#Now convert the distributed volume back to replicate and make sure that a new shd is spawned
17b94a
+TEST $CLI volume add-brick ${V0}_distribute1 replica 3 $H0:$B0/add/{2..3};
17b94a
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
17b94a
+EXPECT_WITHIN $HEAL_TIMEOUT "^3$" number_healer_threads_shd ${V0}_distribute1 "__afr_shd_healer_wait"
17b94a
+
17b94a
+#Now convert the replica volume to distribute again and make sure the shd is now stopped
17b94a
+TEST $CLI volume remove-brick ${V0}_distribute1 replica 1 $H0:$B0/add/{2..3} force
17b94a
+TEST rm -rf $B0/add/
17b94a
+
17b94a
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^0$" shd_count
17b94a
+
17b94a
+cleanup
17b94a
diff --git a/tests/volume.rc b/tests/volume.rc
17b94a
index a0ea3b8..bb400cc 100644
17b94a
--- a/tests/volume.rc
17b94a
+++ b/tests/volume.rc
17b94a
@@ -912,3 +912,18 @@ function volgen_check_ancestry {
17b94a
                 echo "N"
17b94a
         fi
17b94a
 }
17b94a
+
17b94a
+function get_shd_mux_pid {
17b94a
+   local volume=$1
17b94a
+   pid=`$CLI volume status $volume shd | awk '/Self-heal/{print $8}'`
17b94a
+   echo $pid
17b94a
+}
17b94a
+
17b94a
+function shd_count {
17b94a
+   ps aux | grep "glustershd" | grep -v grep | wc -l
17b94a
+}
17b94a
+
17b94a
+function number_healer_threads_shd {
17b94a
+   local pid=$(get_shd_mux_pid $1)
17b94a
+   pstack $pid | grep $2 | wc -l
17b94a
+}
17b94a
-- 
17b94a
1.8.3.1
17b94a