3604df
From dea12a9b7f1d259ecde00ae1830076f31c00b8b9 Mon Sep 17 00:00:00 2001
3604df
From: Krutika Dhananjay <kdhananj@redhat.com>
3604df
Date: Fri, 18 Nov 2016 15:38:00 +0530
3604df
Subject: [PATCH 200/206] features/index: Delete granular entry indices of
3604df
 already healed directories during crawl
3604df
3604df
        Backport of: http://review.gluster.org/15880
3604df
3604df
If granular name indices are already in existence for a volume, and
3604df
before they are healed, granular entry heal be disabled, a crawl on
3604df
indices/xattrop will clear the changelogs on these directories. When
3604df
their corresponding entry-changes indices are crawled subsequently,
3604df
if it is found that the directories don't need heal anymore, the
3604df
granular indices are not cleaned up.
3604df
This patch fixes that problem by ensuring that the zero-xattrop
3604df
also deletes the stale indices at the level of index translator.
3604df
3604df
Change-Id: I13bac40e8d215df9fc46440374c9c04c5363bc0c
3604df
BUG: 1385474
3604df
Signed-off-by: Krutika Dhananjay <kdhananj@redhat.com>
3604df
Reviewed-on: https://code.engineering.redhat.com/gerrit/91373
3604df
Reviewed-by: Pranith Kumar Karampuri <pkarampu@redhat.com>
3604df
Tested-by: Pranith Kumar Karampuri <pkarampu@redhat.com>
3604df
---
3604df
 .../granular-indices-but-non-granular-heal.t       | 76 ++++++++++++++++++++++
3604df
 xlators/features/index/src/index.c                 | 23 ++++++-
3604df
 2 files changed, 97 insertions(+), 2 deletions(-)
3604df
 create mode 100644 tests/basic/afr/granular-esh/granular-indices-but-non-granular-heal.t
3604df
3604df
diff --git a/tests/basic/afr/granular-esh/granular-indices-but-non-granular-heal.t b/tests/basic/afr/granular-esh/granular-indices-but-non-granular-heal.t
3604df
new file mode 100644
3604df
index 0000000..2da90a9
3604df
--- /dev/null
3604df
+++ b/tests/basic/afr/granular-esh/granular-indices-but-non-granular-heal.t
3604df
@@ -0,0 +1,76 @@
3604df
+#!/bin/bash
3604df
+
3604df
+. $(dirname $0)/../../../include.rc
3604df
+. $(dirname $0)/../../../volume.rc
3604df
+. $(dirname $0)/../../../afr.rc
3604df
+
3604df
+cleanup
3604df
+
3604df
+TESTS_EXPECTED_IN_LOOP=4
3604df
+
3604df
+TEST glusterd
3604df
+TEST pidof glusterd
3604df
+
3604df
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
3604df
+TEST $CLI volume start $V0
3604df
+TEST $CLI volume set $V0 cluster.data-self-heal off
3604df
+TEST $CLI volume set $V0 cluster.metadata-self-heal off
3604df
+TEST $CLI volume set $V0 cluster.entry-self-heal off
3604df
+TEST $CLI volume set $V0 self-heal-daemon off
3604df
+TEST $CLI volume set $V0 granular-entry-heal on
3604df
+
3604df
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0
3604df
+
3604df
+# Kill brick-0.
3604df
+TEST kill_brick $V0 $H0 $B0/${V0}0
3604df
+
3604df
+# Create files under root
3604df
+for i in {1..2}
3604df
+do
3604df
+        echo $i > $M0/f$i
3604df
+done
3604df
+
3604df
+# Test that the index associated with '/' is created on B1.
3604df
+TEST stat $B0/${V0}1/.glusterfs/indices/entry-changes/$ROOT_GFID
3604df
+
3604df
+# Check for successful creation of granular entry indices
3604df
+for i in {1..2}
3604df
+do
3604df
+        TEST_IN_LOOP stat $B0/${V0}1/.glusterfs/indices/entry-changes/$ROOT_GFID/f$i
3604df
+done
3604df
+
3604df
+# Now disable granular-entry-heal
3604df
+TEST $CLI volume set $V0 granular-entry-heal off
3604df
+
3604df
+# Start the brick that was down
3604df
+TEST $CLI volume start $V0 force
3604df
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
3604df
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
3604df
+
3604df
+# Enable shd
3604df
+TEST gluster volume set $V0 cluster.self-heal-daemon on
3604df
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
3604df
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
3604df
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
3604df
+
3604df
+# Now the indices created are granular but the heal is going to be of the
3604df
+# normal kind. We test to make sure that heal still completes fine and that
3604df
+# the stale granular indices are going to be deleted
3604df
+
3604df
+TEST $CLI volume heal $V0
3604df
+
3604df
+# Wait for heal to complete
3604df
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
3604df
+
3604df
+# Test if data was healed
3604df
+for i in {1..2}
3604df
+do
3604df
+        TEST_IN_LOOP diff $B0/${V0}0/f$i $B0/${V0}1/f$i
3604df
+done
3604df
+
3604df
+# Now verify that there are no name indices left after self-heal
3604df
+TEST ! stat $B0/${V0}1/.glusterfs/indices/entry-changes/$ROOT_GFID/f1
3604df
+TEST ! stat $B0/${V0}1/.glusterfs/indices/entry-changes/$ROOT_GFID/f2
3604df
+TEST ! stat $B0/${V0}1/.glusterfs/indices/entry-changes/$ROOT_GFID
3604df
+
3604df
+cleanup
3604df
diff --git a/xlators/features/index/src/index.c b/xlators/features/index/src/index.c
3604df
index 7b8713c..f68dd55 100644
3604df
--- a/xlators/features/index/src/index.c
3604df
+++ b/xlators/features/index/src/index.c
3604df
@@ -648,6 +648,8 @@ index_del (xlator_t *this, uuid_t gfid, const char *subdir, int type)
3604df
         index_priv_t *priv = NULL;
3604df
         int          ret = 0;
3604df
         char         gfid_path[PATH_MAX] = {0};
3604df
+        char         rename_dst[PATH_MAX] = {0,};
3604df
+        uuid_t uuid;
3604df
 
3604df
         priv = this->private;
3604df
         GF_ASSERT_AND_GOTO_WITH_ERROR (this->name, !gf_uuid_is_null (gfid),
3604df
@@ -655,10 +657,27 @@ index_del (xlator_t *this, uuid_t gfid, const char *subdir, int type)
3604df
         make_gfid_path (priv->index_basepath, subdir, gfid,
3604df
                         gfid_path, sizeof (gfid_path));
3604df
 
3604df
-        if ((strcmp (subdir, ENTRY_CHANGES_SUBDIR)) == 0)
3604df
+        if ((strcmp (subdir, ENTRY_CHANGES_SUBDIR)) == 0) {
3604df
                 ret = sys_rmdir (gfid_path);
3604df
-        else
3604df
+                /* rmdir above could fail with ENOTEMPTY if the indices under
3604df
+                 * it were created when granular-entry-heal was enabled, whereas
3604df
+                 * the actual heal that happened was non-granular (or full) in
3604df
+                 * nature, resulting in name indices getting left out. To
3604df
+                 * clean up this directory without it affecting the IO path perf,
3604df
+                 * the directory is renamed to a unique name under
3604df
+                 * indices/entry-changes. Self-heal will pick up this entry
3604df
+                 * during crawl and on lookup into the file system figure that
3604df
+                 * the index is stale and subsequently wipe it out using rmdir().
3604df
+                 */
3604df
+                if ((ret) && (errno == ENOTEMPTY)) {
3604df
+                        gf_uuid_generate (uuid);
3604df
+                        make_gfid_path (priv->index_basepath, subdir, uuid,
3604df
+                                        rename_dst, sizeof (rename_dst));
3604df
+                        ret = sys_rename (gfid_path, rename_dst);
3604df
+                }
3604df
+        } else {
3604df
                 ret = sys_unlink (gfid_path);
3604df
+        }
3604df
 
3604df
         if (ret && (errno != ENOENT)) {
3604df
                 gf_log (this->name, GF_LOG_ERROR, "%s: failed to delete"
3604df
-- 
3604df
2.9.3
3604df