74b1de
From b7f832288d2d2e57231d90765afc049ad7cb2f9d Mon Sep 17 00:00:00 2001
74b1de
From: Mohammed Rafi KC <rkavunga@redhat.com>
74b1de
Date: Thu, 9 May 2019 14:07:48 +0530
74b1de
Subject: [PATCH 165/169] tests/shd: Add test coverage for shd mux
74b1de
74b1de
This patch add more test cases for shd mux test cases
74b1de
The test case includes
74b1de
1) Createing multiple volumes to check the attach and detach
74b1de
   of self heal daemon requests.
74b1de
2) Make sure the healing happens in all sceanarios
74b1de
3) After a volume detach make sure the threads of the detached
74b1de
   volume is all cleaned.
74b1de
4) Repeat all the above tests for ec volume
74b1de
5) Node Reboot case
74b1de
6) glusterd restart cases
74b1de
7) Add-brick/remove brick
74b1de
8) Convert a distributed volume to disperse volume
74b1de
9) Convert a replicated volume to distributed volume
74b1de
74b1de
Backport of: https://review.gluster.org/#/c/glusterfs/+/22697/
74b1de
74b1de
>Change-Id: I7c317ef9d23a45ffd831157e4890d7c83a8fce7b
74b1de
>fixes: bz#1708929
74b1de
>Signed-off-by: Mohammed Rafi KC <rkavunga@redhat.com>
74b1de
74b1de
Change-Id: Ie732ead9413bd32b8c262303468a0720538334fb
74b1de
BUG: 1704562
74b1de
Signed-off-by: Mohammed Rafi KC <rkavunga@redhat.com>
74b1de
Reviewed-on: https://code.engineering.redhat.com/gerrit/172634
74b1de
Tested-by: RHGS Build Bot <nigelb@redhat.com>
74b1de
Reviewed-by: Atin Mukherjee <amukherj@redhat.com>
74b1de
---
74b1de
 tests/basic/glusterd-restart-shd-mux.t |  96 +++++++++++++++++++++
74b1de
 tests/basic/shd-mux.t                  | 149 +++++++++++++++++++++++++++++++++
74b1de
 tests/basic/volume-scale-shd-mux.t     | 112 +++++++++++++++++++++++++
74b1de
 tests/volume.rc                        |  15 ++++
74b1de
 4 files changed, 372 insertions(+)
74b1de
 create mode 100644 tests/basic/glusterd-restart-shd-mux.t
74b1de
 create mode 100644 tests/basic/shd-mux.t
74b1de
 create mode 100644 tests/basic/volume-scale-shd-mux.t
74b1de
74b1de
diff --git a/tests/basic/glusterd-restart-shd-mux.t b/tests/basic/glusterd-restart-shd-mux.t
74b1de
new file mode 100644
74b1de
index 0000000..a50af9d
74b1de
--- /dev/null
74b1de
+++ b/tests/basic/glusterd-restart-shd-mux.t
74b1de
@@ -0,0 +1,96 @@
74b1de
+#!/bin/bash
74b1de
+
74b1de
+. $(dirname $0)/../include.rc
74b1de
+. $(dirname $0)/../volume.rc
74b1de
+
74b1de
+cleanup;
74b1de
+
74b1de
+TESTS_EXPECTED_IN_LOOP=20
74b1de
+
74b1de
+TEST glusterd
74b1de
+TEST pidof glusterd
74b1de
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2,3,4,5}
74b1de
+TEST $CLI volume set $V0 cluster.background-self-heal-count 0
74b1de
+TEST $CLI volume set $V0 cluster.eager-lock off
74b1de
+TEST $CLI volume set $V0 performance.flush-behind off
74b1de
+TEST $CLI volume start $V0
74b1de
+
74b1de
+for i in $(seq 1 3); do
74b1de
+   TEST $CLI volume create ${V0}_afr$i replica 3 $H0:$B0/${V0}_afr${i}{0,1,2,3,4,5}
74b1de
+   TEST $CLI volume start ${V0}_afr$i
74b1de
+   TEST $CLI volume create ${V0}_ec$i disperse 6 redundancy 2 $H0:$B0/${V0}_ec${i}{0,1,2,3,4,5}
74b1de
+   TEST $CLI volume start ${V0}_ec$i
74b1de
+done
74b1de
+
74b1de
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
74b1de
+
74b1de
+#Stop the glusterd
74b1de
+TEST pkill glusterd
74b1de
+#Only stopping glusterd, so there will be one shd
74b1de
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^1$" shd_count
74b1de
+TEST glusterd
74b1de
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
74b1de
+#Check the thread count become to number of volumes*number of ec subvolume (3*6=18)
74b1de
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^18$" number_healer_threads_shd $V0 "__ec_shd_healer_wait"
74b1de
+#Check the thread count become to number of volumes*number of afr subvolume (4*6=24)
74b1de
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^24$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
74b1de
+
74b1de
+shd_pid=$(get_shd_mux_pid $V0)
74b1de
+for i in $(seq 1 3); do
74b1de
+    afr_path="/var/run/gluster/shd/${V0}_afr$i/${V0}_afr$i-shd.pid"
74b1de
+    EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" cat $afr_path
74b1de
+    ec_path="/var/run/gluster/shd/${V0}_ec$i/${V0}_ec${i}-shd.pid"
74b1de
+    EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" cat $ec_path
74b1de
+done
74b1de
+
74b1de
+#Reboot a node scenario
74b1de
+TEST pkill gluster
74b1de
+#Only stopped glusterd, so there will be one shd
74b1de
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^0$" shd_count
74b1de
+
74b1de
+TEST glusterd
74b1de
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
74b1de
+
74b1de
+#Check the thread count become to number of volumes*number of ec subvolume (3*6=18)
74b1de
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^18$" number_healer_threads_shd $V0 "__ec_shd_healer_wait"
74b1de
+#Check the thread count become to number of volumes*number of afr subvolume (4*6=24)
74b1de
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^24$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
74b1de
+
74b1de
+shd_pid=$(get_shd_mux_pid $V0)
74b1de
+for i in $(seq 1 3); do
74b1de
+    afr_path="/var/run/gluster/shd/${V0}_afr$i/${V0}_afr$i-shd.pid"
74b1de
+    EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" cat $afr_path
74b1de
+    ec_path="/var/run/gluster/shd/${V0}_ec$i/${V0}_ec${i}-shd.pid"
74b1de
+    EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" cat $ec_path
74b1de
+done
74b1de
+
74b1de
+for i in $(seq 1 3); do
74b1de
+   TEST $CLI volume stop ${V0}_afr$i
74b1de
+   TEST $CLI volume stop ${V0}_ec$i
74b1de
+done
74b1de
+
74b1de
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^6$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
74b1de
+
74b1de
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
74b1de
+
74b1de
+TEST kill_brick $V0 $H0 $B0/${V0}0
74b1de
+TEST kill_brick $V0 $H0 $B0/${V0}3
74b1de
+
74b1de
+TEST touch $M0/foo{1..100}
74b1de
+
74b1de
+EXPECT_WITHIN $HEAL_TIMEOUT "^204$" get_pending_heal_count $V0
74b1de
+
74b1de
+TEST $CLI volume start ${V0} force
74b1de
+
74b1de
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
74b1de
+
74b1de
+TEST rm -rf $M0/*
74b1de
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
74b1de
+
74b1de
+
74b1de
+TEST $CLI volume stop ${V0}
74b1de
+TEST $CLI volume delete ${V0}
74b1de
+
74b1de
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^0$" shd_count
74b1de
+
74b1de
+cleanup
74b1de
diff --git a/tests/basic/shd-mux.t b/tests/basic/shd-mux.t
74b1de
new file mode 100644
74b1de
index 0000000..e42a34a
74b1de
--- /dev/null
74b1de
+++ b/tests/basic/shd-mux.t
74b1de
@@ -0,0 +1,149 @@
74b1de
+#!/bin/bash
74b1de
+
74b1de
+. $(dirname $0)/../include.rc
74b1de
+. $(dirname $0)/../volume.rc
74b1de
+
74b1de
+cleanup;
74b1de
+
74b1de
+TESTS_EXPECTED_IN_LOOP=16
74b1de
+
74b1de
+TEST glusterd
74b1de
+TEST pidof glusterd
74b1de
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2,3,4,5}
74b1de
+TEST $CLI volume set $V0 cluster.background-self-heal-count 0
74b1de
+TEST $CLI volume set $V0 cluster.eager-lock off
74b1de
+TEST $CLI volume set $V0 performance.flush-behind off
74b1de
+TEST $CLI volume start $V0
74b1de
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
74b1de
+
74b1de
+shd_pid=$(get_shd_mux_pid $V0)
74b1de
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^6$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
74b1de
+
74b1de
+#Create a one more volume
74b1de
+TEST $CLI volume create ${V0}_1 replica 3 $H0:$B0/${V0}_1{0,1,2,3,4,5}
74b1de
+TEST $CLI volume start ${V0}_1
74b1de
+
74b1de
+#Check whether the shd has multiplexed or not
74b1de
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
74b1de
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" get_shd_mux_pid ${V0}_1
74b1de
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" get_shd_mux_pid ${V0}
74b1de
+
74b1de
+TEST $CLI volume set ${V0}_1 cluster.background-self-heal-count 0
74b1de
+TEST $CLI volume set ${V0}_1 cluster.eager-lock off
74b1de
+TEST $CLI volume set ${V0}_1 performance.flush-behind off
74b1de
+TEST $GFS --volfile-id=/${V0}_1 --volfile-server=$H0 $M1
74b1de
+
74b1de
+TEST kill_brick $V0 $H0 $B0/${V0}0
74b1de
+TEST kill_brick $V0 $H0 $B0/${V0}4
74b1de
+TEST kill_brick ${V0}_1 $H0 $B0/${V0}_10
74b1de
+TEST kill_brick ${V0}_1 $H0 $B0/${V0}_14
74b1de
+
74b1de
+TEST touch $M0/foo{1..100}
74b1de
+TEST touch $M1/foo{1..100}
74b1de
+
74b1de
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^204$" get_pending_heal_count $V0
74b1de
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^204$" get_pending_heal_count ${V0}_1
74b1de
+
74b1de
+TEST $CLI volume start ${V0} force
74b1de
+TEST $CLI volume start ${V0}_1 force
74b1de
+
74b1de
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
74b1de
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count ${V0}_1
74b1de
+
74b1de
+TEST rm -rf $M0/*
74b1de
+TEST rm -rf $M1/*
74b1de
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M1
74b1de
+
74b1de
+#Stop the volume
74b1de
+TEST $CLI volume stop ${V0}_1
74b1de
+TEST $CLI volume delete ${V0}_1
74b1de
+
74b1de
+#Check the stop succeeded and detached the volume with out restarting it
74b1de
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
74b1de
+
74b1de
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" get_shd_mux_pid $V0
74b1de
+
74b1de
+#Check the thread count become to earlier number after stopping
74b1de
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^6$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
74b1de
+
74b1de
+
74b1de
+#Now create a  ec volume and check mux works
74b1de
+TEST $CLI volume create ${V0}_2 disperse 6 redundancy 2 $H0:$B0/${V0}_2{0,1,2,3,4,5}
74b1de
+TEST $CLI volume start ${V0}_2
74b1de
+
74b1de
+#Check whether the shd has multiplexed or not
74b1de
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
74b1de
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" get_shd_mux_pid ${V0}_2
74b1de
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" get_shd_mux_pid ${V0}
74b1de
+
74b1de
+TEST $CLI volume set ${V0}_2 cluster.background-self-heal-count 0
74b1de
+TEST $CLI volume set ${V0}_2 cluster.eager-lock off
74b1de
+TEST $CLI volume set ${V0}_2 performance.flush-behind off
74b1de
+TEST $GFS --volfile-id=/${V0}_2 --volfile-server=$H0 $M1
74b1de
+
74b1de
+TEST kill_brick $V0 $H0 $B0/${V0}0
74b1de
+TEST kill_brick $V0 $H0 $B0/${V0}4
74b1de
+TEST kill_brick ${V0}_2 $H0 $B0/${V0}_20
74b1de
+TEST kill_brick ${V0}_2 $H0 $B0/${V0}_22
74b1de
+
74b1de
+TEST touch $M0/foo{1..100}
74b1de
+TEST touch $M1/foo{1..100}
74b1de
+
74b1de
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^204$" get_pending_heal_count $V0
74b1de
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^404$" get_pending_heal_count ${V0}_2
74b1de
+
74b1de
+TEST $CLI volume start ${V0} force
74b1de
+TEST $CLI volume start ${V0}_2 force
74b1de
+
74b1de
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^6$" number_healer_threads_shd $V0 "__ec_shd_healer_wait"
74b1de
+
74b1de
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
74b1de
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count ${V0}_2
74b1de
+
74b1de
+TEST rm -rf $M0/*
74b1de
+TEST rm -rf $M1/*
74b1de
+
74b1de
+
74b1de
+#Stop the volume
74b1de
+TEST $CLI volume stop ${V0}_2
74b1de
+TEST $CLI volume delete ${V0}_2
74b1de
+
74b1de
+#Check the stop succeeded and detached the volume with out restarting it
74b1de
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
74b1de
+
74b1de
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" get_shd_mux_pid $V0
74b1de
+
74b1de
+#Check the thread count become to zero for ec related threads
74b1de
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^0$" number_healer_threads_shd $V0 "__ec_shd_healer_wait"
74b1de
+#Check the thread count become to earlier number after stopping
74b1de
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^6$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
74b1de
+
74b1de
+for i in $(seq 1 3); do
74b1de
+   TEST $CLI volume create ${V0}_afr$i replica 3 $H0:$B0/${V0}_afr${i}{0,1,2,3,4,5}
74b1de
+   TEST $CLI volume start ${V0}_afr$i
74b1de
+   TEST $CLI volume create ${V0}_ec$i disperse 6 redundancy 2 $H0:$B0/${V0}_ec${i}{0,1,2,3,4,5}
74b1de
+   TEST $CLI volume start ${V0}_ec$i
74b1de
+done
74b1de
+
74b1de
+#Check the thread count become to number of volumes*number of ec subvolume (3*6=18)
74b1de
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^18$" number_healer_threads_shd $V0 "__ec_shd_healer_wait"
74b1de
+#Check the thread count become to number of volumes*number of afr subvolume (4*6=24)
74b1de
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^24$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
74b1de
+#Delete the volumes
74b1de
+for i in $(seq 1 3); do
74b1de
+   TEST $CLI volume stop ${V0}_afr$i
74b1de
+   TEST $CLI volume stop ${V0}_ec$i
74b1de
+   TEST $CLI volume delete ${V0}_afr$i
74b1de
+   TEST $CLI volume delete ${V0}_ec$i
74b1de
+done
74b1de
+
74b1de
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" get_shd_mux_pid $V0
74b1de
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
74b1de
+
74b1de
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^6$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
74b1de
+
74b1de
+TEST $CLI volume stop ${V0}
74b1de
+TEST $CLI volume delete ${V0}
74b1de
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^0$" shd_count
74b1de
+
74b1de
+cleanup
74b1de
diff --git a/tests/basic/volume-scale-shd-mux.t b/tests/basic/volume-scale-shd-mux.t
74b1de
new file mode 100644
74b1de
index 0000000..dd9cf83
74b1de
--- /dev/null
74b1de
+++ b/tests/basic/volume-scale-shd-mux.t
74b1de
@@ -0,0 +1,112 @@
74b1de
+#!/bin/bash
74b1de
+
74b1de
+. $(dirname $0)/../include.rc
74b1de
+. $(dirname $0)/../volume.rc
74b1de
+
74b1de
+cleanup;
74b1de
+
74b1de
+TESTS_EXPECTED_IN_LOOP=6
74b1de
+
74b1de
+TEST glusterd
74b1de
+TEST pidof glusterd
74b1de
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2,3,4,5}
74b1de
+TEST $CLI volume set $V0 cluster.background-self-heal-count 0
74b1de
+TEST $CLI volume set $V0 cluster.eager-lock off
74b1de
+TEST $CLI volume set $V0 performance.flush-behind off
74b1de
+TEST $CLI volume start $V0
74b1de
+
74b1de
+for i in $(seq 1 2); do
74b1de
+   TEST $CLI volume create ${V0}_afr$i replica 3 $H0:$B0/${V0}_afr${i}{0,1,2,3,4,5}
74b1de
+   TEST $CLI volume start ${V0}_afr$i
74b1de
+   TEST $CLI volume create ${V0}_ec$i disperse 6 redundancy 2 $H0:$B0/${V0}_ec${i}{0,1,2,3,4,5}
74b1de
+   TEST $CLI volume start ${V0}_ec$i
74b1de
+done
74b1de
+
74b1de
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
74b1de
+
74b1de
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
74b1de
+#Check the thread count become to number of volumes*number of ec subvolume (2*6=12)
74b1de
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^12$" number_healer_threads_shd $V0 "__ec_shd_healer_wait"
74b1de
+#Check the thread count become to number of volumes*number of afr subvolume (3*6=18)
74b1de
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^18$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
74b1de
+
74b1de
+TEST $CLI volume add-brick $V0 replica 3 $H0:$B0/${V0}{6,7,8};
74b1de
+#Check the thread count become to number of volumes*number of afr subvolume plus 3 additional threads from newly added bricks (3*6+3=21)
74b1de
+
74b1de
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
74b1de
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^21$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
74b1de
+
74b1de
+#Remove the brick and check the detach is successful
74b1de
+$CLI volume remove-brick $V0 $H0:$B0/${V0}{6,7,8} force
74b1de
+
74b1de
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^18$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
74b1de
+
74b1de
+TEST $CLI volume add-brick ${V0}_ec1 $H0:$B0/${V0}_ec1_add{0,1,2,3,4,5};
74b1de
+#Check the thread count become to number of volumes*number of ec subvolume plus 2 additional threads from newly added bricks (2*6+6=18)
74b1de
+
74b1de
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
74b1de
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^18$" number_healer_threads_shd $V0 "__ec_shd_healer_wait"
74b1de
+
74b1de
+#Remove the brick and check the detach is successful
74b1de
+$CLI volume remove-brick ${V0}_ec1 $H0:$B0/${V0}_ec1_add{0,1,2,3,4,5} force
74b1de
+
74b1de
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^12$" number_healer_threads_shd $V0 "__ec_shd_healer_wait"
74b1de
+
74b1de
+
74b1de
+for i in $(seq 1 2); do
74b1de
+   TEST $CLI volume stop ${V0}_afr$i
74b1de
+   TEST $CLI volume stop ${V0}_ec$i
74b1de
+done
74b1de
+
74b1de
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^6$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
74b1de
+
74b1de
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
74b1de
+
74b1de
+TEST kill_brick $V0 $H0 $B0/${V0}0
74b1de
+TEST kill_brick $V0 $H0 $B0/${V0}4
74b1de
+
74b1de
+TEST touch $M0/foo{1..100}
74b1de
+
74b1de
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^204$" get_pending_heal_count $V0
74b1de
+
74b1de
+TEST $CLI volume start ${V0} force
74b1de
+
74b1de
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
74b1de
+
74b1de
+TEST rm -rf $M0/*
74b1de
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
74b1de
+shd_pid=$(get_shd_mux_pid $V0)
74b1de
+TEST $CLI volume create ${V0}_distribute1 $H0:$B0/${V0}_distribute10
74b1de
+TEST $CLI volume start ${V0}_distribute1
74b1de
+
74b1de
+#Creating a non-replicate/non-ec volume should not have any effect in shd
74b1de
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^6$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
74b1de
+EXPECT "^${shd_pid}$" get_shd_mux_pid $V0
74b1de
+
74b1de
+TEST mkdir $B0/add/
74b1de
+#Now convert the distributed volume to replicate
74b1de
+TEST $CLI volume add-brick ${V0}_distribute1 replica 3 $H0:$B0/add/{2..3}
74b1de
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^9$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
74b1de
+
74b1de
+#scale down the volume
74b1de
+TEST $CLI volume remove-brick ${V0}_distribute1 replica 1 $H0:$B0/add/{2..3} force
74b1de
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^6$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
74b1de
+
74b1de
+TEST $CLI volume stop ${V0}
74b1de
+TEST $CLI volume delete ${V0}
74b1de
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^0$" shd_count
74b1de
+
74b1de
+TEST rm -rf $B0/add/
74b1de
+TEST mkdir $B0/add/
74b1de
+#Now convert the distributed volume back to replicate and make sure that a new shd is spawned
74b1de
+TEST $CLI volume add-brick ${V0}_distribute1 replica 3 $H0:$B0/add/{2..3};
74b1de
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
74b1de
+EXPECT_WITHIN $HEAL_TIMEOUT "^3$" number_healer_threads_shd ${V0}_distribute1 "__afr_shd_healer_wait"
74b1de
+
74b1de
+#Now convert the replica volume to distribute again and make sure the shd is now stopped
74b1de
+TEST $CLI volume remove-brick ${V0}_distribute1 replica 1 $H0:$B0/add/{2..3} force
74b1de
+TEST rm -rf $B0/add/
74b1de
+
74b1de
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^0$" shd_count
74b1de
+
74b1de
+cleanup
74b1de
diff --git a/tests/volume.rc b/tests/volume.rc
74b1de
index a0ea3b8..bb400cc 100644
74b1de
--- a/tests/volume.rc
74b1de
+++ b/tests/volume.rc
74b1de
@@ -912,3 +912,18 @@ function volgen_check_ancestry {
74b1de
                 echo "N"
74b1de
         fi
74b1de
 }
74b1de
+
74b1de
+function get_shd_mux_pid {
74b1de
+   local volume=$1
74b1de
+   pid=`$CLI volume status $volume shd | awk '/Self-heal/{print $8}'`
74b1de
+   echo $pid
74b1de
+}
74b1de
+
74b1de
+function shd_count {
74b1de
+   ps aux | grep "glustershd" | grep -v grep | wc -l
74b1de
+}
74b1de
+
74b1de
+function number_healer_threads_shd {
74b1de
+   local pid=$(get_shd_mux_pid $1)
74b1de
+   pstack $pid | grep $2 | wc -l
74b1de
+}
74b1de
-- 
74b1de
1.8.3.1
74b1de