Blob Blame History Raw
From 56077a61e19d01452f95aaa8406cf24faa55d044 Mon Sep 17 00:00:00 2001
From: Ravishankar N <ravishankar@redhat.com>
Date: Fri, 5 Jun 2015 12:20:04 +0530
Subject: [PATCH 13/18] afr: honour selfheal enable/disable volume set options

Patch URL in upstream master branch :http://review.gluster.org/#/c/11012/
Patch URL in upstream 3.7 branch : http://review.gluster.org/#/c/11062/

afr-v1 had the following volume set options that are used to enable/ disable
self-heals from happening in AFR xlator when loaded in the client graph:
cluster.metadata-self-heal
cluster.data-self-heal
cluster.entry-self-heal

In afr-v2, these 3 heals can happen from the client if there is an inode
refresh. This patch allows such heals to proceed only if the corresponding
volume set options are set to true.

Change-Id: Iba83102fe5cea109bc4fc8c3b711a711929313f8
BUG: 1228518
Signed-off-by: Ravishankar N <ravishankar@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/50094
Reviewed-by: Pranith Kumar Karampuri <pkarampu@redhat.com>
Tested-by: Pranith Kumar Karampuri <pkarampu@redhat.com>
---
 tests/basic/afr/client-side-heal.t             | 86 ++++++++++++++++++++++++++
 xlators/cluster/afr/src/afr-common.c           |  3 +
 xlators/cluster/afr/src/afr-self-heal-common.c | 11 +++-
 3 files changed, 97 insertions(+), 3 deletions(-)
 create mode 100644 tests/basic/afr/client-side-heal.t

diff --git a/tests/basic/afr/client-side-heal.t b/tests/basic/afr/client-side-heal.t
new file mode 100644
index 0000000..c9b3e35
--- /dev/null
+++ b/tests/basic/afr/client-side-heal.t
@@ -0,0 +1,86 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
+TEST $CLI volume set $V0 cluster.entry-self-heal off
+TEST $CLI volume set $V0 cluster.data-self-heal off
+TEST $CLI volume set $V0 cluster.metadata-self-heal off
+
+TEST $CLI volume start $V0
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 --entry-timeout=0 $M0;
+echo "some data" > $M0/datafile
+EXPECT 0 echo $?
+TEST touch $M0/mdatafile
+TEST mkdir $M0/dir
+
+#Kill a brick and perform I/O to have pending heals.
+TEST kill_brick $V0 $H0 $B0/${V0}0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" afr_child_up_status $V0 0
+
+#pending data heal
+echo "some more data" >> $M0/datafile
+EXPECT 0 echo $?
+
+#pending metadata heal
+TEST chmod +x $M0/mdatafile
+
+#pending entry heal. Also causes pending metadata/data heals on file{1..5}
+TEST touch $M0/dir/file{1..5}
+
+EXPECT 8 afr_get_pending_heal_count $V0
+
+#After brick comes back up, access from client should not trigger heals
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+
+#Medatada heal via explicit lookup must not happen
+TEST ls $M0/mdatafile
+
+#Inode refresh must not trigger data and entry heals.
+#To trigger inode refresh for sure, the volume is unmounted and mounted each time.
+#Check that data heal does not happen.
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 --entry-timeout=0 $M0;
+TEST cat $M0/datafile
+#Check that entry heal does not happen.
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 --entry-timeout=0 $M0;
+TEST ls $M0/dir
+
+#No heal must have happened
+EXPECT 8 afr_get_pending_heal_count $V0
+
+#Enable heal client side heal options and trigger heals
+TEST $CLI volume set $V0 cluster.data-self-heal on
+TEST $CLI volume set $V0 cluster.metadata-self-heal on
+TEST $CLI volume set $V0 cluster.entry-self-heal on
+
+#Metadata heal is triggered by lookup without need for inode refresh.
+TEST ls $M0/mdatafile
+EXPECT 7 afr_get_pending_heal_count $V0
+
+#Inode refresh must trigger data and entry heals.
+#To trigger inode refresh for sure, the volume is unmounted and mounted each time.
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 --entry-timeout=0 $M0;
+TEST cat $M0/datafile
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 --entry-timeout=0 $M0;
+TEST ls $M0/dir
+EXPECT 5 afr_get_pending_heal_count $V0
+
+TEST cat  $M0/dir/file1
+TEST cat  $M0/dir/file2
+TEST cat  $M0/dir/file3
+TEST cat  $M0/dir/file4
+TEST cat  $M0/dir/file5
+
+EXPECT 0 afr_get_pending_heal_count $V0
+cleanup;
diff --git a/xlators/cluster/afr/src/afr-common.c b/xlators/cluster/afr/src/afr-common.c
index f478fea..410d31d 100644
--- a/xlators/cluster/afr/src/afr-common.c
+++ b/xlators/cluster/afr/src/afr-common.c
@@ -1757,6 +1757,9 @@ afr_can_start_metadata_self_heal(call_frame_t *frame, xlator_t *this)
         replies = local->replies;
         priv = this->private;
 
+        if (!priv->metadata_self_heal)
+                return _gf_false;
+
         for (i = 0; i < priv->child_count; i++) {
                 if(!replies[i].valid || replies[i].op_ret == -1)
                         continue;
diff --git a/xlators/cluster/afr/src/afr-self-heal-common.c b/xlators/cluster/afr/src/afr-self-heal-common.c
index f3d1f8b..207e9b9 100644
--- a/xlators/cluster/afr/src/afr-self-heal-common.c
+++ b/xlators/cluster/afr/src/afr-self-heal-common.c
@@ -1308,6 +1308,11 @@ afr_selfheal_do (call_frame_t *frame, xlator_t *this, uuid_t gfid)
 	gf_boolean_t  data_selfheal     = _gf_false;
 	gf_boolean_t  metadata_selfheal = _gf_false;
 	gf_boolean_t  entry_selfheal    = _gf_false;
+        afr_private_t *priv            = NULL;
+        gf_boolean_t dataheal_enabled   = _gf_false;
+
+        priv = this->private;
+        gf_string2boolean (priv->data_self_heal, &dataheal_enabled);
 
 	ret = afr_selfheal_unlocked_inspect (frame, this, gfid, &inode,
 					     &data_selfheal,
@@ -1321,13 +1326,13 @@ afr_selfheal_do (call_frame_t *frame, xlator_t *this, uuid_t gfid)
                 goto out;
         }
 
-	if (data_selfheal)
+	if (data_selfheal && dataheal_enabled)
                 data_ret = afr_selfheal_data (frame, this, inode);
 
-	if (metadata_selfheal)
+	if (metadata_selfheal && priv->metadata_self_heal)
                 metadata_ret = afr_selfheal_metadata (frame, this, inode);
 
-	if (entry_selfheal)
+	if (entry_selfheal && priv->entry_self_heal)
                 entry_ret = afr_selfheal_entry (frame, this, inode);
 
         or_ret = (data_ret | metadata_ret | entry_ret);
-- 
1.9.3