c5d8c8
From 87138f86b8cb98d1c9d1a4c9a2393e7978d20b1d Mon Sep 17 00:00:00 2001
c5d8c8
From: karthik-us <ksubrahm@redhat.com>
c5d8c8
Date: Tue, 5 Oct 2021 12:33:01 +0530
c5d8c8
Subject: [PATCH 590/610] cluster/afr: Don't check for stale entry-index
c5d8c8
c5d8c8
Problem:
c5d8c8
In every entry index heal there is a check to see if the
c5d8c8
index is stale or not.
c5d8c8
    1. If a file is created when the brick is down this
c5d8c8
will lead to an extra index lookup because the name is not stale.
c5d8c8
    2. If a file is deleted when the brick is down this will also lead to
c5d8c8
      and extra index lookup because the name is not stale.
c5d8c8
    3. If a file is created and deleted when the brick is down then the
c5d8c8
      index is stale and this will save entry-heal i.e. 2 entrylks and 2 lookups
c5d8c8
c5d8c8
Since 1, 2 happen significantly more than 3, this is a bad tradeoff.
c5d8c8
c5d8c8
Fix:
c5d8c8
Let stale index be removed as part of normal entry heal detecting 'the
c5d8c8
name is already deleted' code path.
c5d8c8
c5d8c8
> Upstream patch: https://github.com/gluster/glusterfs/pull/2612
c5d8c8
> fixes: gluster#2611
c5d8c8
> Change-Id: I29bcc07f2480877a83b30dbd7e2e5631a74df8e8
c5d8c8
> Signed-off-by: Pranith Kumar K <pranith.karampuri@phonepe.com>
c5d8c8
c5d8c8
BUG: 1994593
c5d8c8
Change-Id: I29bcc07f2480877a83b30dbd7e2e5631a74df8e8
c5d8c8
Signed-off-by: karthik-us <ksubrahm@redhat.com>
c5d8c8
Reviewed-on: https://code.engineering.redhat.com/gerrit/c/rhs-glusterfs/+/279606
c5d8c8
Tested-by: RHGS Build Bot <nigelb@redhat.com>
c5d8c8
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
c5d8c8
---
c5d8c8
 xlators/cluster/afr/src/afr-self-heal-entry.c | 46 +++++++--------------------
c5d8c8
 1 file changed, 11 insertions(+), 35 deletions(-)
c5d8c8
c5d8c8
diff --git a/xlators/cluster/afr/src/afr-self-heal-entry.c b/xlators/cluster/afr/src/afr-self-heal-entry.c
c5d8c8
index a17dd93..14b7417 100644
c5d8c8
--- a/xlators/cluster/afr/src/afr-self-heal-entry.c
c5d8c8
+++ b/xlators/cluster/afr/src/afr-self-heal-entry.c
c5d8c8
@@ -933,37 +933,8 @@ afr_selfheal_entry_granular_dirent(xlator_t *subvol, gf_dirent_t *entry,
c5d8c8
                                    loc_t *parent, void *data)
c5d8c8
 {
c5d8c8
     int ret = 0;
c5d8c8
-    loc_t loc = {
c5d8c8
-        0,
c5d8c8
-    };
c5d8c8
-    struct iatt iatt = {
c5d8c8
-        0,
c5d8c8
-    };
c5d8c8
     afr_granular_esh_args_t *args = data;
c5d8c8
 
c5d8c8
-    /* Look up the actual inode associated with entry. If the lookup returns
c5d8c8
-     * ESTALE or ENOENT, then it means we have a stale index. Remove it.
c5d8c8
-     * This is analogous to the check in afr_shd_index_heal() except that
c5d8c8
-     * here it is achieved through LOOKUP and in afr_shd_index_heal() through
c5d8c8
-     * a GETXATTR.
c5d8c8
-     */
c5d8c8
-
c5d8c8
-    loc.inode = inode_new(args->xl->itable);
c5d8c8
-    loc.parent = inode_ref(args->heal_fd->inode);
c5d8c8
-    gf_uuid_copy(loc.pargfid, loc.parent->gfid);
c5d8c8
-    loc.name = entry->d_name;
c5d8c8
-
c5d8c8
-    ret = syncop_lookup(args->xl, &loc, &iatt, NULL, NULL, NULL);
c5d8c8
-    if ((ret == -ENOENT) || (ret == -ESTALE)) {
c5d8c8
-        /* The name indices under the pgfid index dir are guaranteed
c5d8c8
-         * to be regular files. Hence the hardcoding.
c5d8c8
-         */
c5d8c8
-        afr_shd_entry_purge(subvol, parent->inode, entry->d_name, IA_IFREG);
c5d8c8
-        ret = 0;
c5d8c8
-        goto out;
c5d8c8
-    }
c5d8c8
-    /* TBD: afr_shd_zero_xattrop? */
c5d8c8
-
c5d8c8
     ret = afr_selfheal_entry_dirent(args->frame, args->xl, args->heal_fd,
c5d8c8
                                     entry->d_name, parent->inode, subvol,
c5d8c8
                                     _gf_false);
c5d8c8
@@ -974,8 +945,6 @@ afr_selfheal_entry_granular_dirent(xlator_t *subvol, gf_dirent_t *entry,
c5d8c8
     if (ret == -1)
c5d8c8
         args->mismatch = _gf_true;
c5d8c8
 
c5d8c8
-out:
c5d8c8
-    loc_wipe(&loc;;
c5d8c8
     return ret;
c5d8c8
 }
c5d8c8
 
c5d8c8
@@ -1050,7 +1019,9 @@ afr_selfheal_entry_do(call_frame_t *frame, xlator_t *this, fd_t *fd, int source,
c5d8c8
     local = frame->local;
c5d8c8
 
c5d8c8
     gf_msg(this->name, GF_LOG_INFO, 0, AFR_MSG_SELF_HEAL_INFO,
c5d8c8
-           "performing entry selfheal on %s", uuid_utoa(fd->inode->gfid));
c5d8c8
+           "performing %s entry selfheal on %s",
c5d8c8
+           (local->need_full_crawl ? "full" : "granular"),
c5d8c8
+           uuid_utoa(fd->inode->gfid));
c5d8c8
 
c5d8c8
     for (i = 0; i < priv->child_count; i++) {
c5d8c8
         /* Expunge */
c5d8c8
@@ -1112,6 +1083,7 @@ __afr_selfheal_entry(call_frame_t *frame, xlator_t *this, fd_t *fd,
c5d8c8
     afr_local_t *local = NULL;
c5d8c8
     afr_private_t *priv = NULL;
c5d8c8
     gf_boolean_t did_sh = _gf_true;
c5d8c8
+    char *heal_type = "granular entry";
c5d8c8
 
c5d8c8
     priv = this->private;
c5d8c8
     local = frame->local;
c5d8c8
@@ -1194,11 +1166,15 @@ postop_unlock:
c5d8c8
     afr_selfheal_unentrylk(frame, this, fd->inode, this->name, NULL,
c5d8c8
                            postop_lock, NULL);
c5d8c8
 out:
c5d8c8
-    if (did_sh)
c5d8c8
-        afr_log_selfheal(fd->inode->gfid, this, ret, "entry", source, sources,
c5d8c8
+    if (did_sh) {
c5d8c8
+        if (local->need_full_crawl) {
c5d8c8
+            heal_type = "full entry";
c5d8c8
+        }
c5d8c8
+        afr_log_selfheal(fd->inode->gfid, this, ret, heal_type, source, sources,
c5d8c8
                          healed_sinks);
c5d8c8
-    else
c5d8c8
+    } else {
c5d8c8
         ret = 1;
c5d8c8
+    }
c5d8c8
 
c5d8c8
     if (locked_replies)
c5d8c8
         afr_replies_wipe(locked_replies, priv->child_count);
c5d8c8
-- 
c5d8c8
1.8.3.1
c5d8c8