14f8ab
From b7f832288d2d2e57231d90765afc049ad7cb2f9d Mon Sep 17 00:00:00 2001
14f8ab
From: Mohammed Rafi KC <rkavunga@redhat.com>
14f8ab
Date: Thu, 9 May 2019 14:07:48 +0530
14f8ab
Subject: [PATCH 165/169] tests/shd: Add test coverage for shd mux
14f8ab
14f8ab
This patch add more test cases for shd mux test cases
14f8ab
The test case includes
14f8ab
1) Createing multiple volumes to check the attach and detach
14f8ab
   of self heal daemon requests.
14f8ab
2) Make sure the healing happens in all sceanarios
14f8ab
3) After a volume detach make sure the threads of the detached
14f8ab
   volume is all cleaned.
14f8ab
4) Repeat all the above tests for ec volume
14f8ab
5) Node Reboot case
14f8ab
6) glusterd restart cases
14f8ab
7) Add-brick/remove brick
14f8ab
8) Convert a distributed volume to disperse volume
14f8ab
9) Convert a replicated volume to distributed volume
14f8ab
14f8ab
Backport of: https://review.gluster.org/#/c/glusterfs/+/22697/
14f8ab
14f8ab
>Change-Id: I7c317ef9d23a45ffd831157e4890d7c83a8fce7b
14f8ab
>fixes: bz#1708929
14f8ab
>Signed-off-by: Mohammed Rafi KC <rkavunga@redhat.com>
14f8ab
14f8ab
Change-Id: Ie732ead9413bd32b8c262303468a0720538334fb
14f8ab
BUG: 1704562
14f8ab
Signed-off-by: Mohammed Rafi KC <rkavunga@redhat.com>
14f8ab
Reviewed-on: https://code.engineering.redhat.com/gerrit/172634
14f8ab
Tested-by: RHGS Build Bot <nigelb@redhat.com>
14f8ab
Reviewed-by: Atin Mukherjee <amukherj@redhat.com>
14f8ab
---
14f8ab
 tests/basic/glusterd-restart-shd-mux.t |  96 +++++++++++++++++++++
14f8ab
 tests/basic/shd-mux.t                  | 149 +++++++++++++++++++++++++++++++++
14f8ab
 tests/basic/volume-scale-shd-mux.t     | 112 +++++++++++++++++++++++++
14f8ab
 tests/volume.rc                        |  15 ++++
14f8ab
 4 files changed, 372 insertions(+)
14f8ab
 create mode 100644 tests/basic/glusterd-restart-shd-mux.t
14f8ab
 create mode 100644 tests/basic/shd-mux.t
14f8ab
 create mode 100644 tests/basic/volume-scale-shd-mux.t
14f8ab
14f8ab
diff --git a/tests/basic/glusterd-restart-shd-mux.t b/tests/basic/glusterd-restart-shd-mux.t
14f8ab
new file mode 100644
14f8ab
index 0000000..a50af9d
14f8ab
--- /dev/null
14f8ab
+++ b/tests/basic/glusterd-restart-shd-mux.t
14f8ab
@@ -0,0 +1,96 @@
14f8ab
+#!/bin/bash
14f8ab
+
14f8ab
+. $(dirname $0)/../include.rc
14f8ab
+. $(dirname $0)/../volume.rc
14f8ab
+
14f8ab
+cleanup;
14f8ab
+
14f8ab
+TESTS_EXPECTED_IN_LOOP=20
14f8ab
+
14f8ab
+TEST glusterd
14f8ab
+TEST pidof glusterd
14f8ab
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2,3,4,5}
14f8ab
+TEST $CLI volume set $V0 cluster.background-self-heal-count 0
14f8ab
+TEST $CLI volume set $V0 cluster.eager-lock off
14f8ab
+TEST $CLI volume set $V0 performance.flush-behind off
14f8ab
+TEST $CLI volume start $V0
14f8ab
+
14f8ab
+for i in $(seq 1 3); do
14f8ab
+   TEST $CLI volume create ${V0}_afr$i replica 3 $H0:$B0/${V0}_afr${i}{0,1,2,3,4,5}
14f8ab
+   TEST $CLI volume start ${V0}_afr$i
14f8ab
+   TEST $CLI volume create ${V0}_ec$i disperse 6 redundancy 2 $H0:$B0/${V0}_ec${i}{0,1,2,3,4,5}
14f8ab
+   TEST $CLI volume start ${V0}_ec$i
14f8ab
+done
14f8ab
+
14f8ab
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
14f8ab
+
14f8ab
+#Stop the glusterd
14f8ab
+TEST pkill glusterd
14f8ab
+#Only stopping glusterd, so there will be one shd
14f8ab
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^1$" shd_count
14f8ab
+TEST glusterd
14f8ab
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
14f8ab
+#Check the thread count become to number of volumes*number of ec subvolume (3*6=18)
14f8ab
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^18$" number_healer_threads_shd $V0 "__ec_shd_healer_wait"
14f8ab
+#Check the thread count become to number of volumes*number of afr subvolume (4*6=24)
14f8ab
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^24$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
14f8ab
+
14f8ab
+shd_pid=$(get_shd_mux_pid $V0)
14f8ab
+for i in $(seq 1 3); do
14f8ab
+    afr_path="/var/run/gluster/shd/${V0}_afr$i/${V0}_afr$i-shd.pid"
14f8ab
+    EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" cat $afr_path
14f8ab
+    ec_path="/var/run/gluster/shd/${V0}_ec$i/${V0}_ec${i}-shd.pid"
14f8ab
+    EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" cat $ec_path
14f8ab
+done
14f8ab
+
14f8ab
+#Reboot a node scenario
14f8ab
+TEST pkill gluster
14f8ab
+#Only stopped glusterd, so there will be one shd
14f8ab
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^0$" shd_count
14f8ab
+
14f8ab
+TEST glusterd
14f8ab
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
14f8ab
+
14f8ab
+#Check the thread count become to number of volumes*number of ec subvolume (3*6=18)
14f8ab
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^18$" number_healer_threads_shd $V0 "__ec_shd_healer_wait"
14f8ab
+#Check the thread count become to number of volumes*number of afr subvolume (4*6=24)
14f8ab
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^24$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
14f8ab
+
14f8ab
+shd_pid=$(get_shd_mux_pid $V0)
14f8ab
+for i in $(seq 1 3); do
14f8ab
+    afr_path="/var/run/gluster/shd/${V0}_afr$i/${V0}_afr$i-shd.pid"
14f8ab
+    EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" cat $afr_path
14f8ab
+    ec_path="/var/run/gluster/shd/${V0}_ec$i/${V0}_ec${i}-shd.pid"
14f8ab
+    EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" cat $ec_path
14f8ab
+done
14f8ab
+
14f8ab
+for i in $(seq 1 3); do
14f8ab
+   TEST $CLI volume stop ${V0}_afr$i
14f8ab
+   TEST $CLI volume stop ${V0}_ec$i
14f8ab
+done
14f8ab
+
14f8ab
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^6$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
14f8ab
+
14f8ab
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
14f8ab
+
14f8ab
+TEST kill_brick $V0 $H0 $B0/${V0}0
14f8ab
+TEST kill_brick $V0 $H0 $B0/${V0}3
14f8ab
+
14f8ab
+TEST touch $M0/foo{1..100}
14f8ab
+
14f8ab
+EXPECT_WITHIN $HEAL_TIMEOUT "^204$" get_pending_heal_count $V0
14f8ab
+
14f8ab
+TEST $CLI volume start ${V0} force
14f8ab
+
14f8ab
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
14f8ab
+
14f8ab
+TEST rm -rf $M0/*
14f8ab
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
14f8ab
+
14f8ab
+
14f8ab
+TEST $CLI volume stop ${V0}
14f8ab
+TEST $CLI volume delete ${V0}
14f8ab
+
14f8ab
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^0$" shd_count
14f8ab
+
14f8ab
+cleanup
14f8ab
diff --git a/tests/basic/shd-mux.t b/tests/basic/shd-mux.t
14f8ab
new file mode 100644
14f8ab
index 0000000..e42a34a
14f8ab
--- /dev/null
14f8ab
+++ b/tests/basic/shd-mux.t
14f8ab
@@ -0,0 +1,149 @@
14f8ab
+#!/bin/bash
14f8ab
+
14f8ab
+. $(dirname $0)/../include.rc
14f8ab
+. $(dirname $0)/../volume.rc
14f8ab
+
14f8ab
+cleanup;
14f8ab
+
14f8ab
+TESTS_EXPECTED_IN_LOOP=16
14f8ab
+
14f8ab
+TEST glusterd
14f8ab
+TEST pidof glusterd
14f8ab
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2,3,4,5}
14f8ab
+TEST $CLI volume set $V0 cluster.background-self-heal-count 0
14f8ab
+TEST $CLI volume set $V0 cluster.eager-lock off
14f8ab
+TEST $CLI volume set $V0 performance.flush-behind off
14f8ab
+TEST $CLI volume start $V0
14f8ab
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
14f8ab
+
14f8ab
+shd_pid=$(get_shd_mux_pid $V0)
14f8ab
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^6$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
14f8ab
+
14f8ab
+#Create a one more volume
14f8ab
+TEST $CLI volume create ${V0}_1 replica 3 $H0:$B0/${V0}_1{0,1,2,3,4,5}
14f8ab
+TEST $CLI volume start ${V0}_1
14f8ab
+
14f8ab
+#Check whether the shd has multiplexed or not
14f8ab
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
14f8ab
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" get_shd_mux_pid ${V0}_1
14f8ab
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" get_shd_mux_pid ${V0}
14f8ab
+
14f8ab
+TEST $CLI volume set ${V0}_1 cluster.background-self-heal-count 0
14f8ab
+TEST $CLI volume set ${V0}_1 cluster.eager-lock off
14f8ab
+TEST $CLI volume set ${V0}_1 performance.flush-behind off
14f8ab
+TEST $GFS --volfile-id=/${V0}_1 --volfile-server=$H0 $M1
14f8ab
+
14f8ab
+TEST kill_brick $V0 $H0 $B0/${V0}0
14f8ab
+TEST kill_brick $V0 $H0 $B0/${V0}4
14f8ab
+TEST kill_brick ${V0}_1 $H0 $B0/${V0}_10
14f8ab
+TEST kill_brick ${V0}_1 $H0 $B0/${V0}_14
14f8ab
+
14f8ab
+TEST touch $M0/foo{1..100}
14f8ab
+TEST touch $M1/foo{1..100}
14f8ab
+
14f8ab
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^204$" get_pending_heal_count $V0
14f8ab
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^204$" get_pending_heal_count ${V0}_1
14f8ab
+
14f8ab
+TEST $CLI volume start ${V0} force
14f8ab
+TEST $CLI volume start ${V0}_1 force
14f8ab
+
14f8ab
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
14f8ab
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count ${V0}_1
14f8ab
+
14f8ab
+TEST rm -rf $M0/*
14f8ab
+TEST rm -rf $M1/*
14f8ab
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M1
14f8ab
+
14f8ab
+#Stop the volume
14f8ab
+TEST $CLI volume stop ${V0}_1
14f8ab
+TEST $CLI volume delete ${V0}_1
14f8ab
+
14f8ab
+#Check the stop succeeded and detached the volume with out restarting it
14f8ab
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
14f8ab
+
14f8ab
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" get_shd_mux_pid $V0
14f8ab
+
14f8ab
+#Check the thread count become to earlier number after stopping
14f8ab
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^6$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
14f8ab
+
14f8ab
+
14f8ab
+#Now create a  ec volume and check mux works
14f8ab
+TEST $CLI volume create ${V0}_2 disperse 6 redundancy 2 $H0:$B0/${V0}_2{0,1,2,3,4,5}
14f8ab
+TEST $CLI volume start ${V0}_2
14f8ab
+
14f8ab
+#Check whether the shd has multiplexed or not
14f8ab
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
14f8ab
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" get_shd_mux_pid ${V0}_2
14f8ab
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" get_shd_mux_pid ${V0}
14f8ab
+
14f8ab
+TEST $CLI volume set ${V0}_2 cluster.background-self-heal-count 0
14f8ab
+TEST $CLI volume set ${V0}_2 cluster.eager-lock off
14f8ab
+TEST $CLI volume set ${V0}_2 performance.flush-behind off
14f8ab
+TEST $GFS --volfile-id=/${V0}_2 --volfile-server=$H0 $M1
14f8ab
+
14f8ab
+TEST kill_brick $V0 $H0 $B0/${V0}0
14f8ab
+TEST kill_brick $V0 $H0 $B0/${V0}4
14f8ab
+TEST kill_brick ${V0}_2 $H0 $B0/${V0}_20
14f8ab
+TEST kill_brick ${V0}_2 $H0 $B0/${V0}_22
14f8ab
+
14f8ab
+TEST touch $M0/foo{1..100}
14f8ab
+TEST touch $M1/foo{1..100}
14f8ab
+
14f8ab
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^204$" get_pending_heal_count $V0
14f8ab
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^404$" get_pending_heal_count ${V0}_2
14f8ab
+
14f8ab
+TEST $CLI volume start ${V0} force
14f8ab
+TEST $CLI volume start ${V0}_2 force
14f8ab
+
14f8ab
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^6$" number_healer_threads_shd $V0 "__ec_shd_healer_wait"
14f8ab
+
14f8ab
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
14f8ab
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count ${V0}_2
14f8ab
+
14f8ab
+TEST rm -rf $M0/*
14f8ab
+TEST rm -rf $M1/*
14f8ab
+
14f8ab
+
14f8ab
+#Stop the volume
14f8ab
+TEST $CLI volume stop ${V0}_2
14f8ab
+TEST $CLI volume delete ${V0}_2
14f8ab
+
14f8ab
+#Check the stop succeeded and detached the volume with out restarting it
14f8ab
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
14f8ab
+
14f8ab
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" get_shd_mux_pid $V0
14f8ab
+
14f8ab
+#Check the thread count become to zero for ec related threads
14f8ab
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^0$" number_healer_threads_shd $V0 "__ec_shd_healer_wait"
14f8ab
+#Check the thread count become to earlier number after stopping
14f8ab
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^6$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
14f8ab
+
14f8ab
+for i in $(seq 1 3); do
14f8ab
+   TEST $CLI volume create ${V0}_afr$i replica 3 $H0:$B0/${V0}_afr${i}{0,1,2,3,4,5}
14f8ab
+   TEST $CLI volume start ${V0}_afr$i
14f8ab
+   TEST $CLI volume create ${V0}_ec$i disperse 6 redundancy 2 $H0:$B0/${V0}_ec${i}{0,1,2,3,4,5}
14f8ab
+   TEST $CLI volume start ${V0}_ec$i
14f8ab
+done
14f8ab
+
14f8ab
+#Check the thread count become to number of volumes*number of ec subvolume (3*6=18)
14f8ab
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^18$" number_healer_threads_shd $V0 "__ec_shd_healer_wait"
14f8ab
+#Check the thread count become to number of volumes*number of afr subvolume (4*6=24)
14f8ab
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^24$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
14f8ab
+#Delete the volumes
14f8ab
+for i in $(seq 1 3); do
14f8ab
+   TEST $CLI volume stop ${V0}_afr$i
14f8ab
+   TEST $CLI volume stop ${V0}_ec$i
14f8ab
+   TEST $CLI volume delete ${V0}_afr$i
14f8ab
+   TEST $CLI volume delete ${V0}_ec$i
14f8ab
+done
14f8ab
+
14f8ab
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" get_shd_mux_pid $V0
14f8ab
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
14f8ab
+
14f8ab
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^6$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
14f8ab
+
14f8ab
+TEST $CLI volume stop ${V0}
14f8ab
+TEST $CLI volume delete ${V0}
14f8ab
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^0$" shd_count
14f8ab
+
14f8ab
+cleanup
14f8ab
diff --git a/tests/basic/volume-scale-shd-mux.t b/tests/basic/volume-scale-shd-mux.t
14f8ab
new file mode 100644
14f8ab
index 0000000..dd9cf83
14f8ab
--- /dev/null
14f8ab
+++ b/tests/basic/volume-scale-shd-mux.t
14f8ab
@@ -0,0 +1,112 @@
14f8ab
+#!/bin/bash
14f8ab
+
14f8ab
+. $(dirname $0)/../include.rc
14f8ab
+. $(dirname $0)/../volume.rc
14f8ab
+
14f8ab
+cleanup;
14f8ab
+
14f8ab
+TESTS_EXPECTED_IN_LOOP=6
14f8ab
+
14f8ab
+TEST glusterd
14f8ab
+TEST pidof glusterd
14f8ab
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2,3,4,5}
14f8ab
+TEST $CLI volume set $V0 cluster.background-self-heal-count 0
14f8ab
+TEST $CLI volume set $V0 cluster.eager-lock off
14f8ab
+TEST $CLI volume set $V0 performance.flush-behind off
14f8ab
+TEST $CLI volume start $V0
14f8ab
+
14f8ab
+for i in $(seq 1 2); do
14f8ab
+   TEST $CLI volume create ${V0}_afr$i replica 3 $H0:$B0/${V0}_afr${i}{0,1,2,3,4,5}
14f8ab
+   TEST $CLI volume start ${V0}_afr$i
14f8ab
+   TEST $CLI volume create ${V0}_ec$i disperse 6 redundancy 2 $H0:$B0/${V0}_ec${i}{0,1,2,3,4,5}
14f8ab
+   TEST $CLI volume start ${V0}_ec$i
14f8ab
+done
14f8ab
+
14f8ab
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
14f8ab
+
14f8ab
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
14f8ab
+#Check the thread count become to number of volumes*number of ec subvolume (2*6=12)
14f8ab
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^12$" number_healer_threads_shd $V0 "__ec_shd_healer_wait"
14f8ab
+#Check the thread count become to number of volumes*number of afr subvolume (3*6=18)
14f8ab
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^18$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
14f8ab
+
14f8ab
+TEST $CLI volume add-brick $V0 replica 3 $H0:$B0/${V0}{6,7,8};
14f8ab
+#Check the thread count become to number of volumes*number of afr subvolume plus 3 additional threads from newly added bricks (3*6+3=21)
14f8ab
+
14f8ab
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
14f8ab
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^21$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
14f8ab
+
14f8ab
+#Remove the brick and check the detach is successful
14f8ab
+$CLI volume remove-brick $V0 $H0:$B0/${V0}{6,7,8} force
14f8ab
+
14f8ab
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^18$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
14f8ab
+
14f8ab
+TEST $CLI volume add-brick ${V0}_ec1 $H0:$B0/${V0}_ec1_add{0,1,2,3,4,5};
14f8ab
+#Check the thread count become to number of volumes*number of ec subvolume plus 2 additional threads from newly added bricks (2*6+6=18)
14f8ab
+
14f8ab
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
14f8ab
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^18$" number_healer_threads_shd $V0 "__ec_shd_healer_wait"
14f8ab
+
14f8ab
+#Remove the brick and check the detach is successful
14f8ab
+$CLI volume remove-brick ${V0}_ec1 $H0:$B0/${V0}_ec1_add{0,1,2,3,4,5} force
14f8ab
+
14f8ab
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^12$" number_healer_threads_shd $V0 "__ec_shd_healer_wait"
14f8ab
+
14f8ab
+
14f8ab
+for i in $(seq 1 2); do
14f8ab
+   TEST $CLI volume stop ${V0}_afr$i
14f8ab
+   TEST $CLI volume stop ${V0}_ec$i
14f8ab
+done
14f8ab
+
14f8ab
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^6$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
14f8ab
+
14f8ab
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
14f8ab
+
14f8ab
+TEST kill_brick $V0 $H0 $B0/${V0}0
14f8ab
+TEST kill_brick $V0 $H0 $B0/${V0}4
14f8ab
+
14f8ab
+TEST touch $M0/foo{1..100}
14f8ab
+
14f8ab
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^204$" get_pending_heal_count $V0
14f8ab
+
14f8ab
+TEST $CLI volume start ${V0} force
14f8ab
+
14f8ab
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
14f8ab
+
14f8ab
+TEST rm -rf $M0/*
14f8ab
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
14f8ab
+shd_pid=$(get_shd_mux_pid $V0)
14f8ab
+TEST $CLI volume create ${V0}_distribute1 $H0:$B0/${V0}_distribute10
14f8ab
+TEST $CLI volume start ${V0}_distribute1
14f8ab
+
14f8ab
+#Creating a non-replicate/non-ec volume should not have any effect in shd
14f8ab
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^6$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
14f8ab
+EXPECT "^${shd_pid}$" get_shd_mux_pid $V0
14f8ab
+
14f8ab
+TEST mkdir $B0/add/
14f8ab
+#Now convert the distributed volume to replicate
14f8ab
+TEST $CLI volume add-brick ${V0}_distribute1 replica 3 $H0:$B0/add/{2..3}
14f8ab
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^9$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
14f8ab
+
14f8ab
+#scale down the volume
14f8ab
+TEST $CLI volume remove-brick ${V0}_distribute1 replica 1 $H0:$B0/add/{2..3} force
14f8ab
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^6$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
14f8ab
+
14f8ab
+TEST $CLI volume stop ${V0}
14f8ab
+TEST $CLI volume delete ${V0}
14f8ab
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^0$" shd_count
14f8ab
+
14f8ab
+TEST rm -rf $B0/add/
14f8ab
+TEST mkdir $B0/add/
14f8ab
+#Now convert the distributed volume back to replicate and make sure that a new shd is spawned
14f8ab
+TEST $CLI volume add-brick ${V0}_distribute1 replica 3 $H0:$B0/add/{2..3};
14f8ab
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
14f8ab
+EXPECT_WITHIN $HEAL_TIMEOUT "^3$" number_healer_threads_shd ${V0}_distribute1 "__afr_shd_healer_wait"
14f8ab
+
14f8ab
+#Now convert the replica volume to distribute again and make sure the shd is now stopped
14f8ab
+TEST $CLI volume remove-brick ${V0}_distribute1 replica 1 $H0:$B0/add/{2..3} force
14f8ab
+TEST rm -rf $B0/add/
14f8ab
+
14f8ab
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^0$" shd_count
14f8ab
+
14f8ab
+cleanup
14f8ab
diff --git a/tests/volume.rc b/tests/volume.rc
14f8ab
index a0ea3b8..bb400cc 100644
14f8ab
--- a/tests/volume.rc
14f8ab
+++ b/tests/volume.rc
14f8ab
@@ -912,3 +912,18 @@ function volgen_check_ancestry {
14f8ab
                 echo "N"
14f8ab
         fi
14f8ab
 }
14f8ab
+
14f8ab
+function get_shd_mux_pid {
14f8ab
+   local volume=$1
14f8ab
+   pid=`$CLI volume status $volume shd | awk '/Self-heal/{print $8}'`
14f8ab
+   echo $pid
14f8ab
+}
14f8ab
+
14f8ab
+function shd_count {
14f8ab
+   ps aux | grep "glustershd" | grep -v grep | wc -l
14f8ab
+}
14f8ab
+
14f8ab
+function number_healer_threads_shd {
14f8ab
+   local pid=$(get_shd_mux_pid $1)
14f8ab
+   pstack $pid | grep $2 | wc -l
14f8ab
+}
14f8ab
-- 
14f8ab
1.8.3.1
14f8ab