cb8e9e
From 836b89d895539c2c214d145fcd74b25abd598530 Mon Sep 17 00:00:00 2001
cb8e9e
From: Pranith Kumar K <pkarampu@redhat.com>
cb8e9e
Date: Tue, 30 Jun 2015 23:01:36 +0530
cb8e9e
Subject: [PATCH 200/200] cluster/ec: Make background healing optional behavior
cb8e9e
cb8e9e
        Backport of http://review.gluster.com/11473
cb8e9e
cb8e9e
Provide options to control number of active background heal count and qlen.
cb8e9e
cb8e9e
Change-Id: Idc2419219d881f47e7d2e9bbc1dcdd999b372033
cb8e9e
BUG: 1230612
cb8e9e
Signed-off-by: Pranith Kumar K <pkarampu@redhat.com>
cb8e9e
Reviewed-on: https://code.engineering.redhat.com/gerrit/52256
cb8e9e
---
cb8e9e
 libglusterfs/src/globals.h                         |   12 +++--
cb8e9e
 tests/basic/afr/arbiter.t                          |    2 +-
cb8e9e
 tests/basic/afr/client-side-heal.t                 |   10 ++--
cb8e9e
 tests/basic/afr/replace-brick-self-heal.t          |    2 +-
cb8e9e
 tests/basic/afr/root-squash-self-heal.t            |    2 +-
cb8e9e
 tests/basic/afr/self-heal.t                        |   16 +++---
cb8e9e
 tests/basic/afr/self-heald.t                       |   24 +++++-----
cb8e9e
 tests/basic/afr/sparse-file-self-heal.t            |    4 +-
cb8e9e
 tests/basic/afr/split-brain-resolution.t           |    4 +-
cb8e9e
 tests/bugs/glusterfs/bug-861015-index.t            |    2 +-
cb8e9e
 tests/bugs/quota/afr-quota-xattr-mdata-heal.t      |    2 +-
cb8e9e
 tests/bugs/replicate/bug-1180545.t                 |    2 +-
cb8e9e
 .../bug-1190069-afr-stale-index-entries.t          |    2 +-
cb8e9e
 tests/bugs/replicate/bug-918437-sh-mtime.t         |    2 +-
cb8e9e
 tests/volume.rc                                    |    2 +-
cb8e9e
 xlators/cluster/ec/src/ec-heal.c                   |   14 ++---
cb8e9e
 xlators/cluster/ec/src/ec.c                        |   50 ++++++++++++++++++-
cb8e9e
 xlators/cluster/ec/src/ec.h                        |    2 +
cb8e9e
 xlators/mgmt/glusterd/src/glusterd-volume-set.c    |    8 +++
cb8e9e
 19 files changed, 109 insertions(+), 53 deletions(-)
cb8e9e
cb8e9e
diff --git a/libglusterfs/src/globals.h b/libglusterfs/src/globals.h
cb8e9e
index 07185a8..9aca3c3 100644
cb8e9e
--- a/libglusterfs/src/globals.h
cb8e9e
+++ b/libglusterfs/src/globals.h
cb8e9e
@@ -24,7 +24,7 @@
cb8e9e
  * RHS-2.1 u5   - 20105
cb8e9e
  * RHS-3.0      - 30000
cb8e9e
  * RHS-3.0.4    - 30004
cb8e9e
- * RHGS-3.1     - 30702
cb8e9e
+ * RHGS-3.1     - 30703
cb8e9e
  *
cb8e9e
  *
cb8e9e
  * NOTE:
cb8e9e
@@ -42,9 +42,11 @@
cb8e9e
  */
cb8e9e
 #define GD_OP_VERSION_MIN  1 /* MIN is the fresh start op-version, mostly
cb8e9e
                                 should not change */
cb8e9e
-#define GD_OP_VERSION_MAX  30702 /* MAX VERSION is the maximum count in VME
cb8e9e
-                                    table, should keep changing with
cb8e9e
-                                    introduction of newer versions */
cb8e9e
+#define GD_OP_VERSION_MAX  GD_OP_VERSION_3_7_3 /* MAX VERSION is the maximum
cb8e9e
+                                                  count in VME table, should
cb8e9e
+                                                  keep changing with
cb8e9e
+                                                  introduction of newer
cb8e9e
+                                                  versions */
cb8e9e
 
cb8e9e
 #define GD_OP_VERSION_RHS_3_0    30000 /* Op-Version of RHS 3.0 */
cb8e9e
 
cb8e9e
@@ -60,6 +62,8 @@
cb8e9e
 
cb8e9e
 #define GD_OP_VERSION_3_7_2    30702 /* Op-version for GlusterFS 3.7.2 */
cb8e9e
 
cb8e9e
+#define GD_OP_VERSION_3_7_3    30703 /* Op-version for GlusterFS 3.7.3 */
cb8e9e
+
cb8e9e
 #include "xlator.h"
cb8e9e
 
cb8e9e
 /* THIS */
cb8e9e
diff --git a/tests/basic/afr/arbiter.t b/tests/basic/afr/arbiter.t
cb8e9e
index 8a983fb..f06fdb1 100644
cb8e9e
--- a/tests/basic/afr/arbiter.t
cb8e9e
+++ b/tests/basic/afr/arbiter.t
cb8e9e
@@ -60,7 +60,7 @@ EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
cb8e9e
 EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2
cb8e9e
 EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
cb8e9e
 TEST $CLI volume heal $V0
cb8e9e
-EXPECT 0 afr_get_pending_heal_count $V0
cb8e9e
+EXPECT 0 get_pending_heal_count $V0
cb8e9e
 
cb8e9e
 # I/O can resume again.
cb8e9e
 TEST cat $M0/file
cb8e9e
diff --git a/tests/basic/afr/client-side-heal.t b/tests/basic/afr/client-side-heal.t
cb8e9e
index c9b3e35..18f7626 100644
cb8e9e
--- a/tests/basic/afr/client-side-heal.t
cb8e9e
+++ b/tests/basic/afr/client-side-heal.t
cb8e9e
@@ -33,7 +33,7 @@ TEST chmod +x $M0/mdatafile
cb8e9e
 #pending entry heal. Also causes pending metadata/data heals on file{1..5}
cb8e9e
 TEST touch $M0/dir/file{1..5}
cb8e9e
 
cb8e9e
-EXPECT 8 afr_get_pending_heal_count $V0
cb8e9e
+EXPECT 8 get_pending_heal_count $V0
cb8e9e
 
cb8e9e
 #After brick comes back up, access from client should not trigger heals
cb8e9e
 TEST $CLI volume start $V0 force
cb8e9e
@@ -54,7 +54,7 @@ TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 --entry-timeout=0 $M0;
cb8e9e
 TEST ls $M0/dir
cb8e9e
 
cb8e9e
 #No heal must have happened
cb8e9e
-EXPECT 8 afr_get_pending_heal_count $V0
cb8e9e
+EXPECT 8 get_pending_heal_count $V0
cb8e9e
 
cb8e9e
 #Enable heal client side heal options and trigger heals
cb8e9e
 TEST $CLI volume set $V0 cluster.data-self-heal on
cb8e9e
@@ -63,7 +63,7 @@ TEST $CLI volume set $V0 cluster.entry-self-heal on
cb8e9e
 
cb8e9e
 #Metadata heal is triggered by lookup without need for inode refresh.
cb8e9e
 TEST ls $M0/mdatafile
cb8e9e
-EXPECT 7 afr_get_pending_heal_count $V0
cb8e9e
+EXPECT 7 get_pending_heal_count $V0
cb8e9e
 
cb8e9e
 #Inode refresh must trigger data and entry heals.
cb8e9e
 #To trigger inode refresh for sure, the volume is unmounted and mounted each time.
cb8e9e
@@ -74,7 +74,7 @@ TEST cat $M0/datafile
cb8e9e
 EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
cb8e9e
 TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 --entry-timeout=0 $M0;
cb8e9e
 TEST ls $M0/dir
cb8e9e
-EXPECT 5 afr_get_pending_heal_count $V0
cb8e9e
+EXPECT 5 get_pending_heal_count $V0
cb8e9e
 
cb8e9e
 TEST cat  $M0/dir/file1
cb8e9e
 TEST cat  $M0/dir/file2
cb8e9e
@@ -82,5 +82,5 @@ TEST cat  $M0/dir/file3
cb8e9e
 TEST cat  $M0/dir/file4
cb8e9e
 TEST cat  $M0/dir/file5
cb8e9e
 
cb8e9e
-EXPECT 0 afr_get_pending_heal_count $V0
cb8e9e
+EXPECT 0 get_pending_heal_count $V0
cb8e9e
 cleanup;
cb8e9e
diff --git a/tests/basic/afr/replace-brick-self-heal.t b/tests/basic/afr/replace-brick-self-heal.t
cb8e9e
index 8ced7df..fef671a 100644
cb8e9e
--- a/tests/basic/afr/replace-brick-self-heal.t
cb8e9e
+++ b/tests/basic/afr/replace-brick-self-heal.t
cb8e9e
@@ -43,7 +43,7 @@ EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
cb8e9e
 TEST $CLI volume heal $V0
cb8e9e
 
cb8e9e
 # Wait for heal to complete
cb8e9e
-EXPECT_WITHIN $HEAL_TIMEOUT "0" afr_get_pending_heal_count $V0
cb8e9e
+EXPECT_WITHIN $HEAL_TIMEOUT "0" get_pending_heal_count $V0
cb8e9e
 
cb8e9e
 # Check if entry-heal has happened
cb8e9e
 TEST diff <(ls $B0/${V0}0 | sort) <(ls $B0/${V0}1_new | sort)
cb8e9e
diff --git a/tests/basic/afr/root-squash-self-heal.t b/tests/basic/afr/root-squash-self-heal.t
cb8e9e
index fa9a163..8337432 100644
cb8e9e
--- a/tests/basic/afr/root-squash-self-heal.t
cb8e9e
+++ b/tests/basic/afr/root-squash-self-heal.t
cb8e9e
@@ -20,6 +20,6 @@ echo abc > $M0/a
cb8e9e
 TEST $CLI volume start $V0 force
cb8e9e
 EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
cb8e9e
 find $M0 | xargs stat > /dev/null
cb8e9e
-EXPECT_WITHIN $HEAL_TIMEOUT "0" afr_get_pending_heal_count $V0
cb8e9e
+EXPECT_WITHIN $HEAL_TIMEOUT "0" get_pending_heal_count $V0
cb8e9e
 
cb8e9e
 cleanup
cb8e9e
diff --git a/tests/basic/afr/self-heal.t b/tests/basic/afr/self-heal.t
cb8e9e
index dbd8961..e1ac17c 100644
cb8e9e
--- a/tests/basic/afr/self-heal.t
cb8e9e
+++ b/tests/basic/afr/self-heal.t
cb8e9e
@@ -53,7 +53,7 @@ EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
cb8e9e
 EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
cb8e9e
 EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
cb8e9e
 TEST $CLI volume heal $V0
cb8e9e
-EXPECT_WITHIN $HEAL_TIMEOUT "0" afr_get_pending_heal_count $V0
cb8e9e
+EXPECT_WITHIN $HEAL_TIMEOUT "0" get_pending_heal_count $V0
cb8e9e
 
cb8e9e
 #check all files created/deleted on brick1 are also replicated on brick 0
cb8e9e
 #(i.e. no reverse heal has happened)
cb8e9e
@@ -82,7 +82,7 @@ EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
cb8e9e
 EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
cb8e9e
 EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
cb8e9e
 TEST $CLI volume heal $V0
cb8e9e
-EXPECT_WITHIN $HEAL_TIMEOUT "0" afr_get_pending_heal_count $V0
cb8e9e
+EXPECT_WITHIN $HEAL_TIMEOUT "0" get_pending_heal_count $V0
cb8e9e
 
cb8e9e
 #check heal has happened in the correct direction
cb8e9e
 TEST test -d $B0/brick0/file
cb8e9e
@@ -105,7 +105,7 @@ EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
cb8e9e
 EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
cb8e9e
 EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
cb8e9e
 TEST $CLI volume heal $V0
cb8e9e
-EXPECT_WITHIN $HEAL_TIMEOUT "0" afr_get_pending_heal_count $V0
cb8e9e
+EXPECT_WITHIN $HEAL_TIMEOUT "0" get_pending_heal_count $V0
cb8e9e
 
cb8e9e
 #check heal has happened in the correct direction
cb8e9e
 EXPECT "777" stat -c %a $B0/brick0/file
cb8e9e
@@ -129,7 +129,7 @@ EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
cb8e9e
 EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
cb8e9e
 EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
cb8e9e
 TEST $CLI volume heal $V0
cb8e9e
-EXPECT_WITHIN $HEAL_TIMEOUT "0" afr_get_pending_heal_count $V0
cb8e9e
+EXPECT_WITHIN $HEAL_TIMEOUT "0" get_pending_heal_count $V0
cb8e9e
 
cb8e9e
 #check heal has happened in the correct direction
cb8e9e
 EXPECT "$NEW_UID$NEW_GID" stat -c %u%g $B0/brick0/file
cb8e9e
@@ -160,7 +160,7 @@ EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
cb8e9e
 EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
cb8e9e
 EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
cb8e9e
 TEST $CLI volume heal $V0
cb8e9e
-EXPECT_WITHIN $HEAL_TIMEOUT "0" afr_get_pending_heal_count $V0
cb8e9e
+EXPECT_WITHIN $HEAL_TIMEOUT "0" get_pending_heal_count $V0
cb8e9e
 
cb8e9e
 #check heal has happened in the correct direction
cb8e9e
 EXPECT 0 stat -c %s $B0/brick1/file
cb8e9e
@@ -183,7 +183,7 @@ EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
cb8e9e
 EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
cb8e9e
 EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
cb8e9e
 TEST $CLI volume heal $V0
cb8e9e
-EXPECT_WITHIN $HEAL_TIMEOUT "0" afr_get_pending_heal_count $V0
cb8e9e
+EXPECT_WITHIN $HEAL_TIMEOUT "0" get_pending_heal_count $V0
cb8e9e
 
cb8e9e
 #check heal has happened in the correct direction
cb8e9e
 EXPECT "$GFID" gf_get_gfid_xattr $B0/brick0/file
cb8e9e
@@ -207,7 +207,7 @@ EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
cb8e9e
 EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
cb8e9e
 EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
cb8e9e
 TEST $CLI volume heal $V0
cb8e9e
-EXPECT_WITHIN $HEAL_TIMEOUT "0" afr_get_pending_heal_count $V0
cb8e9e
+EXPECT_WITHIN $HEAL_TIMEOUT "0" get_pending_heal_count $V0
cb8e9e
 
cb8e9e
 #check heal has happened in the correct direction
cb8e9e
 TEST test -f $B0/brick0/hard_link_to_file
cb8e9e
@@ -233,7 +233,7 @@ EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
cb8e9e
 EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
cb8e9e
 EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
cb8e9e
 TEST $CLI volume heal $V0
cb8e9e
-EXPECT_WITHIN $HEAL_TIMEOUT "0" afr_get_pending_heal_count $V0
cb8e9e
+EXPECT_WITHIN $HEAL_TIMEOUT "0" get_pending_heal_count $V0
cb8e9e
 
cb8e9e
 TEST diff <(echo "user.myattr_1=\"My_attribute_1_modified\"") <(getfattr -n user.myattr_1 $B0/brick1/file|grep user.myattr_1)
cb8e9e
 TEST diff <(echo "user.myattr_3=\"My_attribute_3\"") <(getfattr -n user.myattr_3 $B0/brick1/file|grep user.myattr_3)
cb8e9e
diff --git a/tests/basic/afr/self-heald.t b/tests/basic/afr/self-heald.t
cb8e9e
index ee0afaf..b8bee5c 100644
cb8e9e
--- a/tests/basic/afr/self-heald.t
cb8e9e
+++ b/tests/basic/afr/self-heald.t
cb8e9e
@@ -68,7 +68,7 @@ done
cb8e9e
 HEAL_FILES=$(($HEAL_FILES + 3))
cb8e9e
 
cb8e9e
 cd ~
cb8e9e
-EXPECT "$HEAL_FILES" afr_get_pending_heal_count $V0
cb8e9e
+EXPECT "$HEAL_FILES" get_pending_heal_count $V0
cb8e9e
 
cb8e9e
 #When bricks are down, it says Transport End point Not connected for them
cb8e9e
 EXPECT "3" disconnected_brick_count $V0
cb8e9e
@@ -78,12 +78,12 @@ EXPECT "3" disconnected_brick_count $V0
cb8e9e
 #replica pair.
cb8e9e
 for i in {11..20}; do echo abc > $M0/$i; done
cb8e9e
 HEAL_FILES=$(($HEAL_FILES + 10)) #count extra 10 files
cb8e9e
-EXPECT "$HEAL_FILES" afr_get_pending_heal_count $V0
cb8e9e
+EXPECT "$HEAL_FILES" get_pending_heal_count $V0
cb8e9e
 #delete the files now, so that stale indices will remain.
cb8e9e
 for i in {11..20}; do rm -f $M0/$i; done
cb8e9e
 #After deleting files they should not appear in heal info
cb8e9e
 HEAL_FILES=$(($HEAL_FILES - 10))
cb8e9e
-EXPECT "$HEAL_FILES" afr_get_pending_heal_count $V0
cb8e9e
+EXPECT "$HEAL_FILES" get_pending_heal_count $V0
cb8e9e
 
cb8e9e
 
cb8e9e
 TEST ! $CLI volume heal $V0
cb8e9e
@@ -99,10 +99,10 @@ check_bricks_up $V0
cb8e9e
 TEST $CLI volume heal $V0
cb8e9e
 sleep 5 #Until the heal-statistics command implementation
cb8e9e
 #check that this heals the contents partially
cb8e9e
-TEST [ $HEAL_FILES -gt $(afr_get_pending_heal_count $V0) ]
cb8e9e
+TEST [ $HEAL_FILES -gt $(get_pending_heal_count $V0) ]
cb8e9e
 
cb8e9e
 TEST $CLI volume heal $V0 full
cb8e9e
-EXPECT_WITHIN 30 "0" afr_get_pending_heal_count $V0
cb8e9e
+EXPECT_WITHIN 30 "0" get_pending_heal_count $V0
cb8e9e
 
cb8e9e
 #Test that ongoing IO is not considered as Pending heal
cb8e9e
 (dd if=/dev/zero of=$M0/file1 bs=1k 2>/dev/null 1>/dev/null)&
cb8e9e
@@ -115,7 +115,7 @@ back_pid3=$!;
cb8e9e
 back_pid4=$!;
cb8e9e
 (dd if=/dev/zero of=$M0/file5 bs=1k 2>/dev/null 1>/dev/null)&
cb8e9e
 back_pid5=$!;
cb8e9e
-EXPECT 0 afr_get_pending_heal_count $V0
cb8e9e
+EXPECT 0 get_pending_heal_count $V0
cb8e9e
 kill -SIGTERM $back_pid1;
cb8e9e
 kill -SIGTERM $back_pid2;
cb8e9e
 kill -SIGTERM $back_pid3;
cb8e9e
@@ -132,13 +132,13 @@ TEST $CLI volume set $V0 cluster.data-self-heal off
cb8e9e
 EXPECT "off" volume_option $V0 cluster.data-self-heal
cb8e9e
 kill_multiple_bricks $V0 $H0 $B0
cb8e9e
 echo abc > $M0/f
cb8e9e
-EXPECT 1 afr_get_pending_heal_count $V0
cb8e9e
+EXPECT 1 get_pending_heal_count $V0
cb8e9e
 TEST $CLI volume start $V0 force
cb8e9e
 EXPECT_WITHIN 20 "Y" glustershd_up_status
cb8e9e
 check_bricks_up $V0
cb8e9e
 
cb8e9e
 TEST $CLI volume heal $V0
cb8e9e
-EXPECT_WITHIN 30 "0" afr_get_pending_heal_count $V0
cb8e9e
+EXPECT_WITHIN 30 "0" get_pending_heal_count $V0
cb8e9e
 TEST $CLI volume set $V0 cluster.data-self-heal on
cb8e9e
 
cb8e9e
 #METADATA
cb8e9e
@@ -147,13 +147,13 @@ EXPECT "off" volume_option $V0 cluster.metadata-self-heal
cb8e9e
 kill_multiple_bricks $V0 $H0 $B0
cb8e9e
 
cb8e9e
 TEST chmod 777 $M0/f
cb8e9e
-EXPECT 1 afr_get_pending_heal_count $V0
cb8e9e
+EXPECT 1 get_pending_heal_count $V0
cb8e9e
 TEST $CLI volume start $V0 force
cb8e9e
 EXPECT_WITHIN 20 "Y" glustershd_up_status
cb8e9e
 check_bricks_up $V0
cb8e9e
 
cb8e9e
 TEST $CLI volume heal $V0
cb8e9e
-EXPECT_WITHIN 30 "0" afr_get_pending_heal_count $V0
cb8e9e
+EXPECT_WITHIN 30 "0" get_pending_heal_count $V0
cb8e9e
 TEST $CLI volume set $V0 cluster.metadata-self-heal on
cb8e9e
 
cb8e9e
 #ENTRY
cb8e9e
@@ -163,13 +163,13 @@ kill_multiple_bricks $V0 $H0 $B0
cb8e9e
 TEST touch $M0/d/a
cb8e9e
 # 4 if mtime/ctime is modified for d in bricks without a
cb8e9e
 # 2 otherwise
cb8e9e
-PENDING=$( afr_get_pending_heal_count $V0 )
cb8e9e
+PENDING=$( get_pending_heal_count $V0 )
cb8e9e
 TEST test $PENDING -eq 2 -o $PENDING -eq 4
cb8e9e
 TEST $CLI volume start $V0 force
cb8e9e
 EXPECT_WITHIN 20 "Y" glustershd_up_status
cb8e9e
 check_bricks_up $V0
cb8e9e
 TEST $CLI volume heal $V0
cb8e9e
-EXPECT_WITHIN 30 "0" afr_get_pending_heal_count $V0
cb8e9e
+EXPECT_WITHIN 30 "0" get_pending_heal_count $V0
cb8e9e
 TEST $CLI volume set $V0 cluster.entry-self-heal on
cb8e9e
 
cb8e9e
 #Negative test cases
cb8e9e
diff --git a/tests/basic/afr/sparse-file-self-heal.t b/tests/basic/afr/sparse-file-self-heal.t
cb8e9e
index 1bc915e..4101e6d 100644
cb8e9e
--- a/tests/basic/afr/sparse-file-self-heal.t
cb8e9e
+++ b/tests/basic/afr/sparse-file-self-heal.t
cb8e9e
@@ -49,7 +49,7 @@ EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
cb8e9e
 EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
cb8e9e
 EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
cb8e9e
 TEST gluster volume heal $V0 full
cb8e9e
-EXPECT_WITHIN $HEAL_TIMEOUT "0" afr_get_pending_heal_count $V0
cb8e9e
+EXPECT_WITHIN $HEAL_TIMEOUT "0" get_pending_heal_count $V0
cb8e9e
 
cb8e9e
 big_md5sum_0=$(md5sum $B0/${V0}0/big | awk '{print $1}')
cb8e9e
 small_md5sum_0=$(md5sum $B0/${V0}0/small | awk '{print $1}')
cb8e9e
@@ -114,7 +114,7 @@ EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
cb8e9e
 EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
cb8e9e
 EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
cb8e9e
 TEST gluster volume heal $V0 full
cb8e9e
-EXPECT_WITHIN $HEAL_TIMEOUT "0" afr_get_pending_heal_count $V0
cb8e9e
+EXPECT_WITHIN $HEAL_TIMEOUT "0" get_pending_heal_count $V0
cb8e9e
 
cb8e9e
 big_md5sum_0=$(md5sum $B0/${V0}0/big | awk '{print $1}')
cb8e9e
 small_md5sum_0=$(md5sum $B0/${V0}0/small | awk '{print $1}')
cb8e9e
diff --git a/tests/basic/afr/split-brain-resolution.t b/tests/basic/afr/split-brain-resolution.t
cb8e9e
index fa1342e..84b2cc8 100644
cb8e9e
--- a/tests/basic/afr/split-brain-resolution.t
cb8e9e
+++ b/tests/basic/afr/split-brain-resolution.t
cb8e9e
@@ -38,7 +38,7 @@ TEST $CLI volume start $V0 force
cb8e9e
 EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
cb8e9e
 EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
cb8e9e
 
cb8e9e
-EXPECT 4 afr_get_pending_heal_count $V0
cb8e9e
+EXPECT 4 get_pending_heal_count $V0
cb8e9e
 
cb8e9e
 TEST ! cat $M0/data-split-brain.txt
cb8e9e
 TEST ! getfattr -n user.test $M0/metadata-split-brain.txt
cb8e9e
@@ -82,6 +82,6 @@ TEST setfattr -n replica.split-brain-heal-finalize -v $V0-client-1 $M0/data-spli
cb8e9e
 EXPECT "brick0" get_text_xattr user.test $M0/metadata-split-brain.txt
cb8e9e
 EXPECT "brick1_alive" cat $M0/data-split-brain.txt
cb8e9e
 
cb8e9e
-EXPECT 0 afr_get_pending_heal_count $V0
cb8e9e
+EXPECT 0 get_pending_heal_count $V0
cb8e9e
 
cb8e9e
 cleanup;
cb8e9e
diff --git a/tests/bugs/glusterfs/bug-861015-index.t b/tests/bugs/glusterfs/bug-861015-index.t
cb8e9e
index 05f3e8b..74ffc45 100644
cb8e9e
--- a/tests/bugs/glusterfs/bug-861015-index.t
cb8e9e
+++ b/tests/bugs/glusterfs/bug-861015-index.t
cb8e9e
@@ -25,7 +25,7 @@ done
cb8e9e
 HEAL_FILES=$(($HEAL_FILES+3)) #count brick root distribute-subvol num of times
cb8e9e
 
cb8e9e
 cd ~
cb8e9e
-EXPECT "$HEAL_FILES" afr_get_pending_heal_count $V0
cb8e9e
+EXPECT "$HEAL_FILES" get_pending_heal_count $V0
cb8e9e
 TEST rm -f $M0/*
cb8e9e
 EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
cb8e9e
 TEST $CLI volume heal $V0 info
cb8e9e
diff --git a/tests/bugs/quota/afr-quota-xattr-mdata-heal.t b/tests/bugs/quota/afr-quota-xattr-mdata-heal.t
cb8e9e
index 486b038..6aa2d83 100644
cb8e9e
--- a/tests/bugs/quota/afr-quota-xattr-mdata-heal.t
cb8e9e
+++ b/tests/bugs/quota/afr-quota-xattr-mdata-heal.t
cb8e9e
@@ -55,7 +55,7 @@ TEST $CLI volume set $V0 cluster.self-heal-daemon on
cb8e9e
 TEST $CLI volume start $V0 force
cb8e9e
 EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
cb8e9e
 EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
cb8e9e
-EXPECT_WITHIN $HEAL_TIMEOUT "0" afr_get_pending_heal_count $V0
cb8e9e
+EXPECT_WITHIN $HEAL_TIMEOUT "0" get_pending_heal_count $V0
cb8e9e
 
cb8e9e
 #Check external xattrs match
cb8e9e
 EXPECT "bar" echo $(getfattr -d -m. -e text $B0/${V0}0/d | grep trusted.foo)
cb8e9e
diff --git a/tests/bugs/replicate/bug-1180545.t b/tests/bugs/replicate/bug-1180545.t
cb8e9e
index 748d5de..e953162 100644
cb8e9e
--- a/tests/bugs/replicate/bug-1180545.t
cb8e9e
+++ b/tests/bugs/replicate/bug-1180545.t
cb8e9e
@@ -43,6 +43,6 @@ TEST $CLI volume heal $V0
cb8e9e
 EXPECT_WITHIN $HEAL_TIMEOUT '2' count_sh_entries $B0/brick0
cb8e9e
 EXPECT_WITHIN $HEAL_TIMEOUT '2' count_sh_entries $B0/brick1
cb8e9e
 #Two entries for DIR and two for FILE
cb8e9e
-EXPECT_WITHIN $HEAL_TIMEOUT "4" afr_get_pending_heal_count $V0
cb8e9e
+EXPECT_WITHIN $HEAL_TIMEOUT "4" get_pending_heal_count $V0
cb8e9e
 TEST diff <(ls $B0/brick0/DIR) <(ls $B0/brick1/DIR)
cb8e9e
 cleanup
cb8e9e
diff --git a/tests/bugs/replicate/bug-1190069-afr-stale-index-entries.t b/tests/bugs/replicate/bug-1190069-afr-stale-index-entries.t
cb8e9e
index a476563..2da960f 100644
cb8e9e
--- a/tests/bugs/replicate/bug-1190069-afr-stale-index-entries.t
cb8e9e
+++ b/tests/bugs/replicate/bug-1190069-afr-stale-index-entries.t
cb8e9e
@@ -50,7 +50,7 @@ TEST setfattr -n trusted.afr.$V0-client-1 -v 0x000000000000000000000000 $B0/${V0
cb8e9e
 TEST setfattr -n trusted.afr.$V0-client-0 -v 0x000000000000000000000000 $B0/${V0}1/mdatafile
cb8e9e
 
cb8e9e
 TEST $CLI volume heal $V0
cb8e9e
-EXPECT_WITHIN $HEAL_TIMEOUT "0" afr_get_pending_heal_count $V0
cb8e9e
+EXPECT_WITHIN $HEAL_TIMEOUT "0" get_pending_heal_count $V0
cb8e9e
 EXPECT_WITHIN $HEAL_TIMEOUT '0' echo $(count_sh_entries $B0/$V0"0")
cb8e9e
 EXPECT_WITHIN $HEAL_TIMEOUT '0' echo $(count_sh_entries $B0/$V0"1")
cb8e9e
 
cb8e9e
diff --git a/tests/bugs/replicate/bug-918437-sh-mtime.t b/tests/bugs/replicate/bug-918437-sh-mtime.t
cb8e9e
index 04ac02f..6a194b1 100644
cb8e9e
--- a/tests/bugs/replicate/bug-918437-sh-mtime.t
cb8e9e
+++ b/tests/bugs/replicate/bug-918437-sh-mtime.t
cb8e9e
@@ -54,7 +54,7 @@ TEST stat $M0/b
cb8e9e
 TEST gluster volume heal $V0 full
cb8e9e
 EXPECT_WITHIN $HEAL_TIMEOUT "Y" file_exists $B0/gfs0/brick01/a
cb8e9e
 EXPECT_WITHIN $HEAL_TIMEOUT "Y" file_exists $B0/gfs0/brick02/b
cb8e9e
-EXPECT_WITHIN $HEAL_TIMEOUT 0 afr_get_pending_heal_count $V0
cb8e9e
+EXPECT_WITHIN $HEAL_TIMEOUT 0 get_pending_heal_count $V0
cb8e9e
 
cb8e9e
 size=`stat -c '%s' /etc/passwd`
cb8e9e
 EXPECT $size stat -c '%s' $B0/gfs0/brick01/a
cb8e9e
diff --git a/tests/volume.rc b/tests/volume.rc
cb8e9e
index 47ca722..2d8dd72 100644
cb8e9e
--- a/tests/volume.rc
cb8e9e
+++ b/tests/volume.rc
cb8e9e
@@ -216,7 +216,7 @@ function afr_get_changelog_xattr {
cb8e9e
         echo $xval
cb8e9e
 }
cb8e9e
 
cb8e9e
-function afr_get_pending_heal_count {
cb8e9e
+function get_pending_heal_count {
cb8e9e
         local vol=$1
cb8e9e
         gluster volume heal $vol info | grep "Number of entries" | awk '{ sum+=$4} END {print sum}'
cb8e9e
 }
cb8e9e
diff --git a/xlators/cluster/ec/src/ec-heal.c b/xlators/cluster/ec/src/ec-heal.c
cb8e9e
index 3168535..6ee1f9e 100644
cb8e9e
--- a/xlators/cluster/ec/src/ec-heal.c
cb8e9e
+++ b/xlators/cluster/ec/src/ec-heal.c
cb8e9e
@@ -26,9 +26,6 @@
cb8e9e
 #include "syncop-utils.h"
cb8e9e
 #include "cluster-syncop.h"
cb8e9e
 
cb8e9e
-#define EC_MAX_BACKGROUND_HEALS 8
cb8e9e
-#define EC_MAX_HEAL_WAITERS 128
cb8e9e
-
cb8e9e
 #define alloca0(size) ({void *__ptr; __ptr = alloca(size); memset(__ptr, 0, size); __ptr; })
cb8e9e
 #define EC_COUNT(array, max) ({int __i; int __res = 0; for (__i = 0; __i < max; __i++) if (array[__i]) __res++; __res; })
cb8e9e
 #define EC_INTERSECT(dst, src1, src2, max) ({int __i; for (__i = 0; __i < max; __i++) dst[__i] = src1[__i] && src2[__i]; })
cb8e9e
@@ -2329,10 +2326,9 @@ __ec_dequeue_heals (ec_t *ec)
cb8e9e
         if (list_empty (&ec->heal_waiting))
cb8e9e
                 goto none;
cb8e9e
 
cb8e9e
-        if (ec->healers == EC_MAX_BACKGROUND_HEALS)
cb8e9e
+        if ((ec->background_heals > 0) && (ec->healers >= ec->background_heals))
cb8e9e
                 goto none;
cb8e9e
 
cb8e9e
-        GF_ASSERT (ec->healers < EC_MAX_BACKGROUND_HEALS);
cb8e9e
         fop = list_entry(ec->heal_waiting.next, ec_fop_data_t, healer);
cb8e9e
         ec->heal_waiters--;
cb8e9e
         list_del_init(&fop->healer);
cb8e9e
@@ -2400,12 +2396,14 @@ ec_heal_throttle (xlator_t *this, ec_fop_data_t *fop)
cb8e9e
 
cb8e9e
                 LOCK (&ec->lock);
cb8e9e
                 {
cb8e9e
-                        if (ec->heal_waiters >= EC_MAX_HEAL_WAITERS) {
cb8e9e
-                                can_heal = _gf_false;
cb8e9e
-                        } else {
cb8e9e
+                        if ((ec->background_heals > 0) &&
cb8e9e
+                            (ec->heal_wait_qlen + ec->background_heals) >
cb8e9e
+                                             (ec->heal_waiters + ec->healers)) {
cb8e9e
                                 list_add_tail(&fop->healer, &ec->heal_waiting);
cb8e9e
                                 ec->heal_waiters++;
cb8e9e
                                 fop = __ec_dequeue_heals (ec);
cb8e9e
+                        } else {
cb8e9e
+                                can_heal = _gf_false;
cb8e9e
                         }
cb8e9e
                 }
cb8e9e
                 UNLOCK (&ec->lock);
cb8e9e
diff --git a/xlators/cluster/ec/src/ec.c b/xlators/cluster/ec/src/ec.c
cb8e9e
index dd51630..e28f402 100644
cb8e9e
--- a/xlators/cluster/ec/src/ec.c
cb8e9e
+++ b/xlators/cluster/ec/src/ec.c
cb8e9e
@@ -219,15 +219,35 @@ int32_t mem_acct_init(xlator_t * this)
cb8e9e
     return 0;
cb8e9e
 }
cb8e9e
 
cb8e9e
+void
cb8e9e
+ec_configure_background_heal_opts (ec_t *ec, int background_heals,
cb8e9e
+                                   int heal_wait_qlen)
cb8e9e
+{
cb8e9e
+        if (background_heals == 0) {
cb8e9e
+                ec->heal_wait_qlen = 0;
cb8e9e
+        } else {
cb8e9e
+                ec->heal_wait_qlen = heal_wait_qlen;
cb8e9e
+        }
cb8e9e
+        ec->background_heals = background_heals;
cb8e9e
+}
cb8e9e
+
cb8e9e
 int32_t
cb8e9e
 reconfigure (xlator_t *this, dict_t *options)
cb8e9e
 {
cb8e9e
-        ec_t *ec = this->private;
cb8e9e
+        ec_t     *ec              = this->private;
cb8e9e
+        uint32_t heal_wait_qlen   = 0;
cb8e9e
+        uint32_t background_heals = 0;
cb8e9e
 
cb8e9e
-        GF_OPTION_RECONF ("self-heal-daemon", ec->shd.enabled, options, bool, failed);
cb8e9e
+        GF_OPTION_RECONF ("self-heal-daemon", ec->shd.enabled, options, bool,
cb8e9e
+                          failed);
cb8e9e
         GF_OPTION_RECONF ("iam-self-heal-daemon", ec->shd.iamshd, options,
cb8e9e
                           bool, failed);
cb8e9e
-
cb8e9e
+        GF_OPTION_RECONF ("background-heals", background_heals, options,
cb8e9e
+                          uint32, failed);
cb8e9e
+        GF_OPTION_RECONF ("heal-wait-qlength", heal_wait_qlen, options,
cb8e9e
+                          uint32, failed);
cb8e9e
+        ec_configure_background_heal_opts (ec, background_heals,
cb8e9e
+                                           heal_wait_qlen);
cb8e9e
         return 0;
cb8e9e
 failed:
cb8e9e
         return -1;
cb8e9e
@@ -577,6 +597,10 @@ init (xlator_t *this)
cb8e9e
     ec_method_initialize();
cb8e9e
     GF_OPTION_INIT ("self-heal-daemon", ec->shd.enabled, bool, failed);
cb8e9e
     GF_OPTION_INIT ("iam-self-heal-daemon", ec->shd.iamshd, bool, failed);
cb8e9e
+    GF_OPTION_INIT ("background-heals", ec->background_heals, uint32, failed);
cb8e9e
+    GF_OPTION_INIT ("heal-wait-qlength", ec->heal_wait_qlen, uint32, failed);
cb8e9e
+    ec_configure_background_heal_opts (ec, ec->background_heals,
cb8e9e
+                                       ec->heal_wait_qlen);
cb8e9e
 
cb8e9e
     if (ec->shd.iamshd)
cb8e9e
             ec_selfheal_daemon_init (this);
cb8e9e
@@ -1188,6 +1212,10 @@ int32_t ec_dump_private(xlator_t *this)
cb8e9e
     gf_proc_dump_write("childs_up", "%u", ec->xl_up_count);
cb8e9e
     gf_proc_dump_write("childs_up_mask", "%s",
cb8e9e
                        ec_bin(tmp, sizeof(tmp), ec->xl_up, ec->nodes));
cb8e9e
+    gf_proc_dump_write("background-heals", "%d", ec->background_heals);
cb8e9e
+    gf_proc_dump_write("heal-wait-qlength", "%d", ec->heal_wait_qlen);
cb8e9e
+    gf_proc_dump_write("healers", "%d", ec->healers);
cb8e9e
+    gf_proc_dump_write("heal-waiters", "%d", ec->heal_waiters);
cb8e9e
 
cb8e9e
     return 0;
cb8e9e
 }
cb8e9e
@@ -1271,5 +1299,21 @@ struct volume_options options[] =
cb8e9e
                      "translator is running as part of self-heal-daemon "
cb8e9e
                      "or not."
cb8e9e
     },
cb8e9e
+    { .key = {"background-heals"},
cb8e9e
+      .type = GF_OPTION_TYPE_INT,
cb8e9e
+      .min = 0,/*Disabling background heals*/
cb8e9e
+      .max = 256,
cb8e9e
+      .default_value = "8",
cb8e9e
+      .description = "This option can be used to control number of parallel"
cb8e9e
+                     " heals",
cb8e9e
+    },
cb8e9e
+    { .key = {"heal-wait-qlength"},
cb8e9e
+      .type = GF_OPTION_TYPE_INT,
cb8e9e
+      .min = 0,
cb8e9e
+      .max = 65536, /*Around 100MB as of now with sizeof(ec_fop_data_t) at 1800*/
cb8e9e
+      .default_value = "128",
cb8e9e
+      .description = "This option can be used to control number of heals"
cb8e9e
+                     " that can wait",
cb8e9e
+    },
cb8e9e
     { }
cb8e9e
 };
cb8e9e
diff --git a/xlators/cluster/ec/src/ec.h b/xlators/cluster/ec/src/ec.h
cb8e9e
index 7f14020..f335fd5 100644
cb8e9e
--- a/xlators/cluster/ec/src/ec.h
cb8e9e
+++ b/xlators/cluster/ec/src/ec.h
cb8e9e
@@ -47,6 +47,8 @@ struct _ec
cb8e9e
     gf_lock_t         lock;
cb8e9e
     gf_timer_t *      timer;
cb8e9e
     gf_boolean_t      shutdown;
cb8e9e
+    uint32_t          background_heals;
cb8e9e
+    uint32_t          heal_wait_qlen;
cb8e9e
     struct list_head  pending_fops;
cb8e9e
     struct list_head  heal_waiting;
cb8e9e
     struct list_head  healing;
cb8e9e
diff --git a/xlators/mgmt/glusterd/src/glusterd-volume-set.c b/xlators/mgmt/glusterd/src/glusterd-volume-set.c
cb8e9e
index 7bbd729..b536d18 100644
cb8e9e
--- a/xlators/mgmt/glusterd/src/glusterd-volume-set.c
cb8e9e
+++ b/xlators/mgmt/glusterd/src/glusterd-volume-set.c
cb8e9e
@@ -2015,6 +2015,14 @@ struct volopt_map_entry glusterd_volopt_map[] = {
cb8e9e
           .voltype     = "features/upcall",
cb8e9e
           .op_version  = GD_OP_VERSION_3_7_0,
cb8e9e
         },
cb8e9e
+        { .key         = "disperse.background-heals",
cb8e9e
+          .voltype     = "cluster/disperse",
cb8e9e
+          .op_version  = GD_OP_VERSION_3_7_3,
cb8e9e
+        },
cb8e9e
+        { .key         = "disperse.heal-wait-qlength",
cb8e9e
+          .voltype     = "cluster/disperse",
cb8e9e
+          .op_version  = GD_OP_VERSION_3_7_3,
cb8e9e
+        },
cb8e9e
         { .key         = NULL
cb8e9e
         }
cb8e9e
 };
cb8e9e
-- 
cb8e9e
1.7.1
cb8e9e