cb8e9e
From 56077a61e19d01452f95aaa8406cf24faa55d044 Mon Sep 17 00:00:00 2001
cb8e9e
From: Ravishankar N <ravishankar@redhat.com>
cb8e9e
Date: Fri, 5 Jun 2015 12:20:04 +0530
cb8e9e
Subject: [PATCH 13/18] afr: honour selfheal enable/disable volume set options
cb8e9e
cb8e9e
Patch URL in upstream master branch :http://review.gluster.org/#/c/11012/
cb8e9e
Patch URL in upstream 3.7 branch : http://review.gluster.org/#/c/11062/
cb8e9e
cb8e9e
afr-v1 had the following volume set options that are used to enable/ disable
cb8e9e
self-heals from happening in AFR xlator when loaded in the client graph:
cb8e9e
cluster.metadata-self-heal
cb8e9e
cluster.data-self-heal
cb8e9e
cluster.entry-self-heal
cb8e9e
cb8e9e
In afr-v2, these 3 heals can happen from the client if there is an inode
cb8e9e
refresh. This patch allows such heals to proceed only if the corresponding
cb8e9e
volume set options are set to true.
cb8e9e
cb8e9e
Change-Id: Iba83102fe5cea109bc4fc8c3b711a711929313f8
cb8e9e
BUG: 1228518
cb8e9e
Signed-off-by: Ravishankar N <ravishankar@redhat.com>
cb8e9e
Reviewed-on: https://code.engineering.redhat.com/gerrit/50094
cb8e9e
Reviewed-by: Pranith Kumar Karampuri <pkarampu@redhat.com>
cb8e9e
Tested-by: Pranith Kumar Karampuri <pkarampu@redhat.com>
cb8e9e
---
cb8e9e
 tests/basic/afr/client-side-heal.t             | 86 ++++++++++++++++++++++++++
cb8e9e
 xlators/cluster/afr/src/afr-common.c           |  3 +
cb8e9e
 xlators/cluster/afr/src/afr-self-heal-common.c | 11 +++-
cb8e9e
 3 files changed, 97 insertions(+), 3 deletions(-)
cb8e9e
 create mode 100644 tests/basic/afr/client-side-heal.t
cb8e9e
cb8e9e
diff --git a/tests/basic/afr/client-side-heal.t b/tests/basic/afr/client-side-heal.t
cb8e9e
new file mode 100644
cb8e9e
index 0000000..c9b3e35
cb8e9e
--- /dev/null
cb8e9e
+++ b/tests/basic/afr/client-side-heal.t
cb8e9e
@@ -0,0 +1,86 @@
cb8e9e
+#!/bin/bash
cb8e9e
+. $(dirname $0)/../../include.rc
cb8e9e
+. $(dirname $0)/../../volume.rc
cb8e9e
+
cb8e9e
+cleanup;
cb8e9e
+
cb8e9e
+TEST glusterd
cb8e9e
+TEST pidof glusterd
cb8e9e
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
cb8e9e
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
cb8e9e
+TEST $CLI volume set $V0 cluster.entry-self-heal off
cb8e9e
+TEST $CLI volume set $V0 cluster.data-self-heal off
cb8e9e
+TEST $CLI volume set $V0 cluster.metadata-self-heal off
cb8e9e
+
cb8e9e
+TEST $CLI volume start $V0
cb8e9e
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 --entry-timeout=0 $M0;
cb8e9e
+echo "some data" > $M0/datafile
cb8e9e
+EXPECT 0 echo $?
cb8e9e
+TEST touch $M0/mdatafile
cb8e9e
+TEST mkdir $M0/dir
cb8e9e
+
cb8e9e
+#Kill a brick and perform I/O to have pending heals.
cb8e9e
+TEST kill_brick $V0 $H0 $B0/${V0}0
cb8e9e
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" afr_child_up_status $V0 0
cb8e9e
+
cb8e9e
+#pending data heal
cb8e9e
+echo "some more data" >> $M0/datafile
cb8e9e
+EXPECT 0 echo $?
cb8e9e
+
cb8e9e
+#pending metadata heal
cb8e9e
+TEST chmod +x $M0/mdatafile
cb8e9e
+
cb8e9e
+#pending entry heal. Also causes pending metadata/data heals on file{1..5}
cb8e9e
+TEST touch $M0/dir/file{1..5}
cb8e9e
+
cb8e9e
+EXPECT 8 afr_get_pending_heal_count $V0
cb8e9e
+
cb8e9e
+#After brick comes back up, access from client should not trigger heals
cb8e9e
+TEST $CLI volume start $V0 force
cb8e9e
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
cb8e9e
+
cb8e9e
+#Medatada heal via explicit lookup must not happen
cb8e9e
+TEST ls $M0/mdatafile
cb8e9e
+
cb8e9e
+#Inode refresh must not trigger data and entry heals.
cb8e9e
+#To trigger inode refresh for sure, the volume is unmounted and mounted each time.
cb8e9e
+#Check that data heal does not happen.
cb8e9e
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
cb8e9e
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 --entry-timeout=0 $M0;
cb8e9e
+TEST cat $M0/datafile
cb8e9e
+#Check that entry heal does not happen.
cb8e9e
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
cb8e9e
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 --entry-timeout=0 $M0;
cb8e9e
+TEST ls $M0/dir
cb8e9e
+
cb8e9e
+#No heal must have happened
cb8e9e
+EXPECT 8 afr_get_pending_heal_count $V0
cb8e9e
+
cb8e9e
+#Enable heal client side heal options and trigger heals
cb8e9e
+TEST $CLI volume set $V0 cluster.data-self-heal on
cb8e9e
+TEST $CLI volume set $V0 cluster.metadata-self-heal on
cb8e9e
+TEST $CLI volume set $V0 cluster.entry-self-heal on
cb8e9e
+
cb8e9e
+#Metadata heal is triggered by lookup without need for inode refresh.
cb8e9e
+TEST ls $M0/mdatafile
cb8e9e
+EXPECT 7 afr_get_pending_heal_count $V0
cb8e9e
+
cb8e9e
+#Inode refresh must trigger data and entry heals.
cb8e9e
+#To trigger inode refresh for sure, the volume is unmounted and mounted each time.
cb8e9e
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
cb8e9e
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 --entry-timeout=0 $M0;
cb8e9e
+TEST cat $M0/datafile
cb8e9e
+
cb8e9e
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
cb8e9e
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 --entry-timeout=0 $M0;
cb8e9e
+TEST ls $M0/dir
cb8e9e
+EXPECT 5 afr_get_pending_heal_count $V0
cb8e9e
+
cb8e9e
+TEST cat  $M0/dir/file1
cb8e9e
+TEST cat  $M0/dir/file2
cb8e9e
+TEST cat  $M0/dir/file3
cb8e9e
+TEST cat  $M0/dir/file4
cb8e9e
+TEST cat  $M0/dir/file5
cb8e9e
+
cb8e9e
+EXPECT 0 afr_get_pending_heal_count $V0
cb8e9e
+cleanup;
cb8e9e
diff --git a/xlators/cluster/afr/src/afr-common.c b/xlators/cluster/afr/src/afr-common.c
cb8e9e
index f478fea..410d31d 100644
cb8e9e
--- a/xlators/cluster/afr/src/afr-common.c
cb8e9e
+++ b/xlators/cluster/afr/src/afr-common.c
cb8e9e
@@ -1757,6 +1757,9 @@ afr_can_start_metadata_self_heal(call_frame_t *frame, xlator_t *this)
cb8e9e
         replies = local->replies;
cb8e9e
         priv = this->private;
cb8e9e
 
cb8e9e
+        if (!priv->metadata_self_heal)
cb8e9e
+                return _gf_false;
cb8e9e
+
cb8e9e
         for (i = 0; i < priv->child_count; i++) {
cb8e9e
                 if(!replies[i].valid || replies[i].op_ret == -1)
cb8e9e
                         continue;
cb8e9e
diff --git a/xlators/cluster/afr/src/afr-self-heal-common.c b/xlators/cluster/afr/src/afr-self-heal-common.c
cb8e9e
index f3d1f8b..207e9b9 100644
cb8e9e
--- a/xlators/cluster/afr/src/afr-self-heal-common.c
cb8e9e
+++ b/xlators/cluster/afr/src/afr-self-heal-common.c
cb8e9e
@@ -1308,6 +1308,11 @@ afr_selfheal_do (call_frame_t *frame, xlator_t *this, uuid_t gfid)
cb8e9e
 	gf_boolean_t  data_selfheal     = _gf_false;
cb8e9e
 	gf_boolean_t  metadata_selfheal = _gf_false;
cb8e9e
 	gf_boolean_t  entry_selfheal    = _gf_false;
cb8e9e
+        afr_private_t *priv            = NULL;
cb8e9e
+        gf_boolean_t dataheal_enabled   = _gf_false;
cb8e9e
+
cb8e9e
+        priv = this->private;
cb8e9e
+        gf_string2boolean (priv->data_self_heal, &dataheal_enabled);
cb8e9e
 
cb8e9e
 	ret = afr_selfheal_unlocked_inspect (frame, this, gfid, &inode,
cb8e9e
 					     &data_selfheal,
cb8e9e
@@ -1321,13 +1326,13 @@ afr_selfheal_do (call_frame_t *frame, xlator_t *this, uuid_t gfid)
cb8e9e
                 goto out;
cb8e9e
         }
cb8e9e
 
cb8e9e
-	if (data_selfheal)
cb8e9e
+	if (data_selfheal && dataheal_enabled)
cb8e9e
                 data_ret = afr_selfheal_data (frame, this, inode);
cb8e9e
 
cb8e9e
-	if (metadata_selfheal)
cb8e9e
+	if (metadata_selfheal && priv->metadata_self_heal)
cb8e9e
                 metadata_ret = afr_selfheal_metadata (frame, this, inode);
cb8e9e
 
cb8e9e
-	if (entry_selfheal)
cb8e9e
+	if (entry_selfheal && priv->entry_self_heal)
cb8e9e
                 entry_ret = afr_selfheal_entry (frame, this, inode);
cb8e9e
 
cb8e9e
         or_ret = (data_ret | metadata_ret | entry_ret);
cb8e9e
-- 
cb8e9e
1.9.3
cb8e9e