50dc83
From b7f832288d2d2e57231d90765afc049ad7cb2f9d Mon Sep 17 00:00:00 2001
50dc83
From: Mohammed Rafi KC <rkavunga@redhat.com>
50dc83
Date: Thu, 9 May 2019 14:07:48 +0530
50dc83
Subject: [PATCH 165/169] tests/shd: Add test coverage for shd mux
50dc83
50dc83
This patch add more test cases for shd mux test cases
50dc83
The test case includes
50dc83
1) Createing multiple volumes to check the attach and detach
50dc83
   of self heal daemon requests.
50dc83
2) Make sure the healing happens in all sceanarios
50dc83
3) After a volume detach make sure the threads of the detached
50dc83
   volume is all cleaned.
50dc83
4) Repeat all the above tests for ec volume
50dc83
5) Node Reboot case
50dc83
6) glusterd restart cases
50dc83
7) Add-brick/remove brick
50dc83
8) Convert a distributed volume to disperse volume
50dc83
9) Convert a replicated volume to distributed volume
50dc83
50dc83
Backport of: https://review.gluster.org/#/c/glusterfs/+/22697/
50dc83
50dc83
>Change-Id: I7c317ef9d23a45ffd831157e4890d7c83a8fce7b
50dc83
>fixes: bz#1708929
50dc83
>Signed-off-by: Mohammed Rafi KC <rkavunga@redhat.com>
50dc83
50dc83
Change-Id: Ie732ead9413bd32b8c262303468a0720538334fb
50dc83
BUG: 1704562
50dc83
Signed-off-by: Mohammed Rafi KC <rkavunga@redhat.com>
50dc83
Reviewed-on: https://code.engineering.redhat.com/gerrit/172634
50dc83
Tested-by: RHGS Build Bot <nigelb@redhat.com>
50dc83
Reviewed-by: Atin Mukherjee <amukherj@redhat.com>
50dc83
---
50dc83
 tests/basic/glusterd-restart-shd-mux.t |  96 +++++++++++++++++++++
50dc83
 tests/basic/shd-mux.t                  | 149 +++++++++++++++++++++++++++++++++
50dc83
 tests/basic/volume-scale-shd-mux.t     | 112 +++++++++++++++++++++++++
50dc83
 tests/volume.rc                        |  15 ++++
50dc83
 4 files changed, 372 insertions(+)
50dc83
 create mode 100644 tests/basic/glusterd-restart-shd-mux.t
50dc83
 create mode 100644 tests/basic/shd-mux.t
50dc83
 create mode 100644 tests/basic/volume-scale-shd-mux.t
50dc83
50dc83
diff --git a/tests/basic/glusterd-restart-shd-mux.t b/tests/basic/glusterd-restart-shd-mux.t
50dc83
new file mode 100644
50dc83
index 0000000..a50af9d
50dc83
--- /dev/null
50dc83
+++ b/tests/basic/glusterd-restart-shd-mux.t
50dc83
@@ -0,0 +1,96 @@
50dc83
+#!/bin/bash
50dc83
+
50dc83
+. $(dirname $0)/../include.rc
50dc83
+. $(dirname $0)/../volume.rc
50dc83
+
50dc83
+cleanup;
50dc83
+
50dc83
+TESTS_EXPECTED_IN_LOOP=20
50dc83
+
50dc83
+TEST glusterd
50dc83
+TEST pidof glusterd
50dc83
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2,3,4,5}
50dc83
+TEST $CLI volume set $V0 cluster.background-self-heal-count 0
50dc83
+TEST $CLI volume set $V0 cluster.eager-lock off
50dc83
+TEST $CLI volume set $V0 performance.flush-behind off
50dc83
+TEST $CLI volume start $V0
50dc83
+
50dc83
+for i in $(seq 1 3); do
50dc83
+   TEST $CLI volume create ${V0}_afr$i replica 3 $H0:$B0/${V0}_afr${i}{0,1,2,3,4,5}
50dc83
+   TEST $CLI volume start ${V0}_afr$i
50dc83
+   TEST $CLI volume create ${V0}_ec$i disperse 6 redundancy 2 $H0:$B0/${V0}_ec${i}{0,1,2,3,4,5}
50dc83
+   TEST $CLI volume start ${V0}_ec$i
50dc83
+done
50dc83
+
50dc83
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
50dc83
+
50dc83
+#Stop the glusterd
50dc83
+TEST pkill glusterd
50dc83
+#Only stopping glusterd, so there will be one shd
50dc83
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^1$" shd_count
50dc83
+TEST glusterd
50dc83
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
50dc83
+#Check the thread count become to number of volumes*number of ec subvolume (3*6=18)
50dc83
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^18$" number_healer_threads_shd $V0 "__ec_shd_healer_wait"
50dc83
+#Check the thread count become to number of volumes*number of afr subvolume (4*6=24)
50dc83
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^24$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
50dc83
+
50dc83
+shd_pid=$(get_shd_mux_pid $V0)
50dc83
+for i in $(seq 1 3); do
50dc83
+    afr_path="/var/run/gluster/shd/${V0}_afr$i/${V0}_afr$i-shd.pid"
50dc83
+    EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" cat $afr_path
50dc83
+    ec_path="/var/run/gluster/shd/${V0}_ec$i/${V0}_ec${i}-shd.pid"
50dc83
+    EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" cat $ec_path
50dc83
+done
50dc83
+
50dc83
+#Reboot a node scenario
50dc83
+TEST pkill gluster
50dc83
+#Only stopped glusterd, so there will be one shd
50dc83
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^0$" shd_count
50dc83
+
50dc83
+TEST glusterd
50dc83
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
50dc83
+
50dc83
+#Check the thread count become to number of volumes*number of ec subvolume (3*6=18)
50dc83
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^18$" number_healer_threads_shd $V0 "__ec_shd_healer_wait"
50dc83
+#Check the thread count become to number of volumes*number of afr subvolume (4*6=24)
50dc83
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^24$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
50dc83
+
50dc83
+shd_pid=$(get_shd_mux_pid $V0)
50dc83
+for i in $(seq 1 3); do
50dc83
+    afr_path="/var/run/gluster/shd/${V0}_afr$i/${V0}_afr$i-shd.pid"
50dc83
+    EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" cat $afr_path
50dc83
+    ec_path="/var/run/gluster/shd/${V0}_ec$i/${V0}_ec${i}-shd.pid"
50dc83
+    EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" cat $ec_path
50dc83
+done
50dc83
+
50dc83
+for i in $(seq 1 3); do
50dc83
+   TEST $CLI volume stop ${V0}_afr$i
50dc83
+   TEST $CLI volume stop ${V0}_ec$i
50dc83
+done
50dc83
+
50dc83
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^6$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
50dc83
+
50dc83
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
50dc83
+
50dc83
+TEST kill_brick $V0 $H0 $B0/${V0}0
50dc83
+TEST kill_brick $V0 $H0 $B0/${V0}3
50dc83
+
50dc83
+TEST touch $M0/foo{1..100}
50dc83
+
50dc83
+EXPECT_WITHIN $HEAL_TIMEOUT "^204$" get_pending_heal_count $V0
50dc83
+
50dc83
+TEST $CLI volume start ${V0} force
50dc83
+
50dc83
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
50dc83
+
50dc83
+TEST rm -rf $M0/*
50dc83
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
50dc83
+
50dc83
+
50dc83
+TEST $CLI volume stop ${V0}
50dc83
+TEST $CLI volume delete ${V0}
50dc83
+
50dc83
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^0$" shd_count
50dc83
+
50dc83
+cleanup
50dc83
diff --git a/tests/basic/shd-mux.t b/tests/basic/shd-mux.t
50dc83
new file mode 100644
50dc83
index 0000000..e42a34a
50dc83
--- /dev/null
50dc83
+++ b/tests/basic/shd-mux.t
50dc83
@@ -0,0 +1,149 @@
50dc83
+#!/bin/bash
50dc83
+
50dc83
+. $(dirname $0)/../include.rc
50dc83
+. $(dirname $0)/../volume.rc
50dc83
+
50dc83
+cleanup;
50dc83
+
50dc83
+TESTS_EXPECTED_IN_LOOP=16
50dc83
+
50dc83
+TEST glusterd
50dc83
+TEST pidof glusterd
50dc83
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2,3,4,5}
50dc83
+TEST $CLI volume set $V0 cluster.background-self-heal-count 0
50dc83
+TEST $CLI volume set $V0 cluster.eager-lock off
50dc83
+TEST $CLI volume set $V0 performance.flush-behind off
50dc83
+TEST $CLI volume start $V0
50dc83
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
50dc83
+
50dc83
+shd_pid=$(get_shd_mux_pid $V0)
50dc83
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^6$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
50dc83
+
50dc83
+#Create a one more volume
50dc83
+TEST $CLI volume create ${V0}_1 replica 3 $H0:$B0/${V0}_1{0,1,2,3,4,5}
50dc83
+TEST $CLI volume start ${V0}_1
50dc83
+
50dc83
+#Check whether the shd has multiplexed or not
50dc83
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
50dc83
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" get_shd_mux_pid ${V0}_1
50dc83
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" get_shd_mux_pid ${V0}
50dc83
+
50dc83
+TEST $CLI volume set ${V0}_1 cluster.background-self-heal-count 0
50dc83
+TEST $CLI volume set ${V0}_1 cluster.eager-lock off
50dc83
+TEST $CLI volume set ${V0}_1 performance.flush-behind off
50dc83
+TEST $GFS --volfile-id=/${V0}_1 --volfile-server=$H0 $M1
50dc83
+
50dc83
+TEST kill_brick $V0 $H0 $B0/${V0}0
50dc83
+TEST kill_brick $V0 $H0 $B0/${V0}4
50dc83
+TEST kill_brick ${V0}_1 $H0 $B0/${V0}_10
50dc83
+TEST kill_brick ${V0}_1 $H0 $B0/${V0}_14
50dc83
+
50dc83
+TEST touch $M0/foo{1..100}
50dc83
+TEST touch $M1/foo{1..100}
50dc83
+
50dc83
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^204$" get_pending_heal_count $V0
50dc83
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^204$" get_pending_heal_count ${V0}_1
50dc83
+
50dc83
+TEST $CLI volume start ${V0} force
50dc83
+TEST $CLI volume start ${V0}_1 force
50dc83
+
50dc83
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
50dc83
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count ${V0}_1
50dc83
+
50dc83
+TEST rm -rf $M0/*
50dc83
+TEST rm -rf $M1/*
50dc83
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M1
50dc83
+
50dc83
+#Stop the volume
50dc83
+TEST $CLI volume stop ${V0}_1
50dc83
+TEST $CLI volume delete ${V0}_1
50dc83
+
50dc83
+#Check the stop succeeded and detached the volume with out restarting it
50dc83
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
50dc83
+
50dc83
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" get_shd_mux_pid $V0
50dc83
+
50dc83
+#Check the thread count become to earlier number after stopping
50dc83
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^6$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
50dc83
+
50dc83
+
50dc83
+#Now create a  ec volume and check mux works
50dc83
+TEST $CLI volume create ${V0}_2 disperse 6 redundancy 2 $H0:$B0/${V0}_2{0,1,2,3,4,5}
50dc83
+TEST $CLI volume start ${V0}_2
50dc83
+
50dc83
+#Check whether the shd has multiplexed or not
50dc83
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
50dc83
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" get_shd_mux_pid ${V0}_2
50dc83
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" get_shd_mux_pid ${V0}
50dc83
+
50dc83
+TEST $CLI volume set ${V0}_2 cluster.background-self-heal-count 0
50dc83
+TEST $CLI volume set ${V0}_2 cluster.eager-lock off
50dc83
+TEST $CLI volume set ${V0}_2 performance.flush-behind off
50dc83
+TEST $GFS --volfile-id=/${V0}_2 --volfile-server=$H0 $M1
50dc83
+
50dc83
+TEST kill_brick $V0 $H0 $B0/${V0}0
50dc83
+TEST kill_brick $V0 $H0 $B0/${V0}4
50dc83
+TEST kill_brick ${V0}_2 $H0 $B0/${V0}_20
50dc83
+TEST kill_brick ${V0}_2 $H0 $B0/${V0}_22
50dc83
+
50dc83
+TEST touch $M0/foo{1..100}
50dc83
+TEST touch $M1/foo{1..100}
50dc83
+
50dc83
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^204$" get_pending_heal_count $V0
50dc83
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^404$" get_pending_heal_count ${V0}_2
50dc83
+
50dc83
+TEST $CLI volume start ${V0} force
50dc83
+TEST $CLI volume start ${V0}_2 force
50dc83
+
50dc83
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^6$" number_healer_threads_shd $V0 "__ec_shd_healer_wait"
50dc83
+
50dc83
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
50dc83
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count ${V0}_2
50dc83
+
50dc83
+TEST rm -rf $M0/*
50dc83
+TEST rm -rf $M1/*
50dc83
+
50dc83
+
50dc83
+#Stop the volume
50dc83
+TEST $CLI volume stop ${V0}_2
50dc83
+TEST $CLI volume delete ${V0}_2
50dc83
+
50dc83
+#Check the stop succeeded and detached the volume with out restarting it
50dc83
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
50dc83
+
50dc83
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" get_shd_mux_pid $V0
50dc83
+
50dc83
+#Check the thread count become to zero for ec related threads
50dc83
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^0$" number_healer_threads_shd $V0 "__ec_shd_healer_wait"
50dc83
+#Check the thread count become to earlier number after stopping
50dc83
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^6$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
50dc83
+
50dc83
+for i in $(seq 1 3); do
50dc83
+   TEST $CLI volume create ${V0}_afr$i replica 3 $H0:$B0/${V0}_afr${i}{0,1,2,3,4,5}
50dc83
+   TEST $CLI volume start ${V0}_afr$i
50dc83
+   TEST $CLI volume create ${V0}_ec$i disperse 6 redundancy 2 $H0:$B0/${V0}_ec${i}{0,1,2,3,4,5}
50dc83
+   TEST $CLI volume start ${V0}_ec$i
50dc83
+done
50dc83
+
50dc83
+#Check the thread count become to number of volumes*number of ec subvolume (3*6=18)
50dc83
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^18$" number_healer_threads_shd $V0 "__ec_shd_healer_wait"
50dc83
+#Check the thread count become to number of volumes*number of afr subvolume (4*6=24)
50dc83
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^24$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
50dc83
+#Delete the volumes
50dc83
+for i in $(seq 1 3); do
50dc83
+   TEST $CLI volume stop ${V0}_afr$i
50dc83
+   TEST $CLI volume stop ${V0}_ec$i
50dc83
+   TEST $CLI volume delete ${V0}_afr$i
50dc83
+   TEST $CLI volume delete ${V0}_ec$i
50dc83
+done
50dc83
+
50dc83
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" get_shd_mux_pid $V0
50dc83
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
50dc83
+
50dc83
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^6$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
50dc83
+
50dc83
+TEST $CLI volume stop ${V0}
50dc83
+TEST $CLI volume delete ${V0}
50dc83
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^0$" shd_count
50dc83
+
50dc83
+cleanup
50dc83
diff --git a/tests/basic/volume-scale-shd-mux.t b/tests/basic/volume-scale-shd-mux.t
50dc83
new file mode 100644
50dc83
index 0000000..dd9cf83
50dc83
--- /dev/null
50dc83
+++ b/tests/basic/volume-scale-shd-mux.t
50dc83
@@ -0,0 +1,112 @@
50dc83
+#!/bin/bash
50dc83
+
50dc83
+. $(dirname $0)/../include.rc
50dc83
+. $(dirname $0)/../volume.rc
50dc83
+
50dc83
+cleanup;
50dc83
+
50dc83
+TESTS_EXPECTED_IN_LOOP=6
50dc83
+
50dc83
+TEST glusterd
50dc83
+TEST pidof glusterd
50dc83
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2,3,4,5}
50dc83
+TEST $CLI volume set $V0 cluster.background-self-heal-count 0
50dc83
+TEST $CLI volume set $V0 cluster.eager-lock off
50dc83
+TEST $CLI volume set $V0 performance.flush-behind off
50dc83
+TEST $CLI volume start $V0
50dc83
+
50dc83
+for i in $(seq 1 2); do
50dc83
+   TEST $CLI volume create ${V0}_afr$i replica 3 $H0:$B0/${V0}_afr${i}{0,1,2,3,4,5}
50dc83
+   TEST $CLI volume start ${V0}_afr$i
50dc83
+   TEST $CLI volume create ${V0}_ec$i disperse 6 redundancy 2 $H0:$B0/${V0}_ec${i}{0,1,2,3,4,5}
50dc83
+   TEST $CLI volume start ${V0}_ec$i
50dc83
+done
50dc83
+
50dc83
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
50dc83
+
50dc83
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
50dc83
+#Check the thread count become to number of volumes*number of ec subvolume (2*6=12)
50dc83
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^12$" number_healer_threads_shd $V0 "__ec_shd_healer_wait"
50dc83
+#Check the thread count become to number of volumes*number of afr subvolume (3*6=18)
50dc83
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^18$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
50dc83
+
50dc83
+TEST $CLI volume add-brick $V0 replica 3 $H0:$B0/${V0}{6,7,8};
50dc83
+#Check the thread count become to number of volumes*number of afr subvolume plus 3 additional threads from newly added bricks (3*6+3=21)
50dc83
+
50dc83
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
50dc83
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^21$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
50dc83
+
50dc83
+#Remove the brick and check the detach is successful
50dc83
+$CLI volume remove-brick $V0 $H0:$B0/${V0}{6,7,8} force
50dc83
+
50dc83
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^18$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
50dc83
+
50dc83
+TEST $CLI volume add-brick ${V0}_ec1 $H0:$B0/${V0}_ec1_add{0,1,2,3,4,5};
50dc83
+#Check the thread count become to number of volumes*number of ec subvolume plus 2 additional threads from newly added bricks (2*6+6=18)
50dc83
+
50dc83
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
50dc83
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^18$" number_healer_threads_shd $V0 "__ec_shd_healer_wait"
50dc83
+
50dc83
+#Remove the brick and check the detach is successful
50dc83
+$CLI volume remove-brick ${V0}_ec1 $H0:$B0/${V0}_ec1_add{0,1,2,3,4,5} force
50dc83
+
50dc83
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^12$" number_healer_threads_shd $V0 "__ec_shd_healer_wait"
50dc83
+
50dc83
+
50dc83
+for i in $(seq 1 2); do
50dc83
+   TEST $CLI volume stop ${V0}_afr$i
50dc83
+   TEST $CLI volume stop ${V0}_ec$i
50dc83
+done
50dc83
+
50dc83
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^6$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
50dc83
+
50dc83
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
50dc83
+
50dc83
+TEST kill_brick $V0 $H0 $B0/${V0}0
50dc83
+TEST kill_brick $V0 $H0 $B0/${V0}4
50dc83
+
50dc83
+TEST touch $M0/foo{1..100}
50dc83
+
50dc83
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^204$" get_pending_heal_count $V0
50dc83
+
50dc83
+TEST $CLI volume start ${V0} force
50dc83
+
50dc83
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
50dc83
+
50dc83
+TEST rm -rf $M0/*
50dc83
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
50dc83
+shd_pid=$(get_shd_mux_pid $V0)
50dc83
+TEST $CLI volume create ${V0}_distribute1 $H0:$B0/${V0}_distribute10
50dc83
+TEST $CLI volume start ${V0}_distribute1
50dc83
+
50dc83
+#Creating a non-replicate/non-ec volume should not have any effect in shd
50dc83
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^6$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
50dc83
+EXPECT "^${shd_pid}$" get_shd_mux_pid $V0
50dc83
+
50dc83
+TEST mkdir $B0/add/
50dc83
+#Now convert the distributed volume to replicate
50dc83
+TEST $CLI volume add-brick ${V0}_distribute1 replica 3 $H0:$B0/add/{2..3}
50dc83
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^9$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
50dc83
+
50dc83
+#scale down the volume
50dc83
+TEST $CLI volume remove-brick ${V0}_distribute1 replica 1 $H0:$B0/add/{2..3} force
50dc83
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^6$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
50dc83
+
50dc83
+TEST $CLI volume stop ${V0}
50dc83
+TEST $CLI volume delete ${V0}
50dc83
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^0$" shd_count
50dc83
+
50dc83
+TEST rm -rf $B0/add/
50dc83
+TEST mkdir $B0/add/
50dc83
+#Now convert the distributed volume back to replicate and make sure that a new shd is spawned
50dc83
+TEST $CLI volume add-brick ${V0}_distribute1 replica 3 $H0:$B0/add/{2..3};
50dc83
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
50dc83
+EXPECT_WITHIN $HEAL_TIMEOUT "^3$" number_healer_threads_shd ${V0}_distribute1 "__afr_shd_healer_wait"
50dc83
+
50dc83
+#Now convert the replica volume to distribute again and make sure the shd is now stopped
50dc83
+TEST $CLI volume remove-brick ${V0}_distribute1 replica 1 $H0:$B0/add/{2..3} force
50dc83
+TEST rm -rf $B0/add/
50dc83
+
50dc83
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^0$" shd_count
50dc83
+
50dc83
+cleanup
50dc83
diff --git a/tests/volume.rc b/tests/volume.rc
50dc83
index a0ea3b8..bb400cc 100644
50dc83
--- a/tests/volume.rc
50dc83
+++ b/tests/volume.rc
50dc83
@@ -912,3 +912,18 @@ function volgen_check_ancestry {
50dc83
                 echo "N"
50dc83
         fi
50dc83
 }
50dc83
+
50dc83
+function get_shd_mux_pid {
50dc83
+   local volume=$1
50dc83
+   pid=`$CLI volume status $volume shd | awk '/Self-heal/{print $8}'`
50dc83
+   echo $pid
50dc83
+}
50dc83
+
50dc83
+function shd_count {
50dc83
+   ps aux | grep "glustershd" | grep -v grep | wc -l
50dc83
+}
50dc83
+
50dc83
+function number_healer_threads_shd {
50dc83
+   local pid=$(get_shd_mux_pid $1)
50dc83
+   pstack $pid | grep $2 | wc -l
50dc83
+}
50dc83
-- 
50dc83
1.8.3.1
50dc83