Blob Blame History Raw
From 547bd2260fba782cb0a7a54589f9b6f75226776a Mon Sep 17 00:00:00 2001
From: Pranith Kumar K <pkarampu@redhat.com>
Date: Mon, 5 Dec 2016 13:20:51 +0530
Subject: [PATCH 232/235] cluster/afr: Remove backward compatibility for locks
 with v1

When we have cascading locks with same lk-owner there is a possibility for
a deadlock to happen. One example is as follows:

self-heal takes a lock in data-domain for big name with 256 chars of "aaaa...a"
and starts heal in a 3-way replication when brick-0 is offline and healing from
brick-1 to brick-2 is in progress. So this lock is active on brick-1 and
brick-2. Now brick-0 comes online and an operation wants to take full lock and
the lock is granted at brick-0 and it is waiting for lock on brick-1. As part
of entry healing it takes full locks on all the available bricks and then
proceeds with healing the entry. Now this lock will start waiting on brick-0
because some other operation already has a granted lock on it. This leads to a
deadlock. Operation is waiting for unlock on "aaaa..." by heal where as heal is
waiting for the operation to unlock on brick-0. Initially I thought this is
happening because healing is trying to take a lock on all the available bricks
instead of just the bricks that are participating in heal. But later realized
that same kind of deadlock can happen if a brick goes down after the heal
starts but comes back before it completes. So the essential problem is the
cascading locks with same lk-owner which were added for backward compatibility
with afr-v1 which can be safely removed now that versions with afr-v1 are
already EOL. This patch removes the compatibility with v1 which requires
cascading locks with same lk-owner.

In the next version we can make locking-scheme option a dummy and switch
completely to v2.

 >BUG: 1401404
 >Change-Id: Ic9afab8260f5ff4dff5329eb0429811bcb879079
 >Signed-off-by: Pranith Kumar K <pkarampu@redhat.com>
 >Reviewed-on: http://review.gluster.org/16024
 >Smoke: Gluster Build System <jenkins@build.gluster.org>
 >Reviewed-by: Ravishankar N <ravishankar@redhat.com>
 >NetBSD-regression: NetBSD Build System <jenkins@build.gluster.org>
 >CentOS-regression: Gluster Build System <jenkins@build.gluster.org>

BUG: 1398188
Change-Id: Iae73b761f8247af1bb46f2ab82d5c7f7e86b8c17
Signed-off-by: Pranith Kumar K <pkarampu@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/92520
---
 xlators/cluster/afr/src/afr-common.c          | 48 +++++++--------------------
 xlators/cluster/afr/src/afr-self-heal-entry.c | 34 +------------------
 2 files changed, 13 insertions(+), 69 deletions(-)

diff --git a/xlators/cluster/afr/src/afr-common.c b/xlators/cluster/afr/src/afr-common.c
index 55f10e7..b825b19 100644
--- a/xlators/cluster/afr/src/afr-common.c
+++ b/xlators/cluster/afr/src/afr-common.c
@@ -5218,7 +5218,6 @@ afr_selfheal_locked_data_inspect (call_frame_t *frame, xlator_t *this,
                                   gf_boolean_t *pflag)
 {
         int ret = -1;
-        unsigned char *locked_on = NULL;
         unsigned char *data_lock = NULL;
         unsigned char *sources = NULL;
         unsigned char *sinks = NULL;
@@ -5227,12 +5226,8 @@ afr_selfheal_locked_data_inspect (call_frame_t *frame, xlator_t *this,
         afr_private_t   *priv = NULL;
         fd_t          *fd = NULL;
         struct afr_reply *locked_replies = NULL;
-        gf_boolean_t granular_locks = _gf_false;
 
         priv = this->private;
-	if (strcmp ("granular", priv->locking_scheme) == 0)
-	        granular_locks = _gf_true;
-        locked_on = alloca0 (priv->child_count);
         data_lock = alloca0 (priv->child_count);
         sources = alloca0 (priv->child_count);
         sinks = alloca0 (priv->child_count);
@@ -5253,42 +5248,23 @@ afr_selfheal_locked_data_inspect (call_frame_t *frame, xlator_t *this,
 
         locked_replies = alloca0 (sizeof (*locked_replies) * priv->child_count);
 
-        if (!granular_locks) {
-                ret = afr_selfheal_tryinodelk (frame, this, inode,
-                                              priv->sh_domain, 0, 0, locked_on);
-        }
+        ret = afr_selfheal_inodelk (frame, this, inode, this->name,
+                                    0, 0, data_lock);
         {
-                if (!granular_locks && (ret == 0)) {
+                if (ret == 0) {
                         ret = -afr_final_errno (frame->local, priv);
                         if (ret == 0)
-                                ret = -ENOTCONN;/* all invalid responses */
+                                ret = -ENOTCONN; /* all invalid responses */
                         goto out;
                 }
-                ret = afr_selfheal_inodelk (frame, this, inode, this->name,
-                                            0, 0, data_lock);
-                {
-                        if (ret == 0) {
-                                ret = -afr_final_errno (frame->local, priv);
-                                if (ret == 0)
-                                        ret = -ENOTCONN;
-                                /* all invalid responses */
-                                goto unlock;
-                        }
-                        ret = __afr_selfheal_data_prepare (frame, this, inode,
-                                                           data_lock, sources,
-                                                           sinks, healed_sinks,
-                                                           undid_pending,
-                                                           locked_replies,
-                                                           pflag);
-                        *dsh = afr_decide_heal_info (priv, sources, ret);
-                }
-                afr_selfheal_uninodelk (frame, this, inode, this->name, 0, 0,
-                                        data_lock);
-        }
-unlock:
-        if (!granular_locks)
-                afr_selfheal_uninodelk (frame, this, inode, priv->sh_domain, 0,
-                                        0, locked_on);
+                ret = __afr_selfheal_data_prepare (frame, this, inode,
+                                                   data_lock, sources, sinks,
+                                                   healed_sinks, undid_pending,
+                                                   locked_replies, pflag);
+                *dsh = afr_decide_heal_info (priv, sources, ret);
+        }
+        afr_selfheal_uninodelk (frame, this, inode, this->name, 0, 0,
+                                data_lock);
 out:
         if (locked_replies)
                 afr_replies_wipe (locked_replies, priv->child_count);
diff --git a/xlators/cluster/afr/src/afr-self-heal-entry.c b/xlators/cluster/afr/src/afr-self-heal-entry.c
index 6b5340a..2efeedf 100644
--- a/xlators/cluster/afr/src/afr-self-heal-entry.c
+++ b/xlators/cluster/afr/src/afr-self-heal-entry.c
@@ -21,11 +21,6 @@
  * this name can ever come, entry-lock with this name is going to prevent
  * self-heals from older versions while the granular entry-self-heal is going
  * on in newer version.*/
-#define LONG_FILENAME "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"\
-                      "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"\
-                      "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"\
-                      "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"\
-                      "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
 
 static int
 afr_selfheal_entry_delete (xlator_t *this, inode_t *dir, const char *name,
@@ -1051,21 +1046,16 @@ afr_selfheal_entry (call_frame_t *frame, xlator_t *this, inode_t *inode)
 {
 	afr_private_t *priv = NULL;
 	unsigned char *locked_on = NULL;
-        unsigned char *long_name_locked = NULL;
 	fd_t *fd = NULL;
 	int ret = 0;
-	gf_boolean_t granular_locks = _gf_false;
 
 	priv = this->private;
-	if (strcmp ("granular", priv->locking_scheme) == 0)
-	        granular_locks = _gf_true;
 
 	fd = afr_selfheal_data_opendir (this, inode);
 	if (!fd)
 		return -EIO;
 
 	locked_on = alloca0 (priv->child_count);
-	long_name_locked = alloca0 (priv->child_count);
 
 	ret = afr_selfheal_tie_breaker_entrylk (frame, this, inode,
 	                                        priv->sh_domain, NULL,
@@ -1085,29 +1075,7 @@ afr_selfheal_entry (call_frame_t *frame, xlator_t *this, inode_t *inode)
 			goto unlock;
 		}
 
-                if (!granular_locks) {
-                        ret = afr_selfheal_tryentrylk (frame, this, inode,
-                                                      this->name, LONG_FILENAME,
-                                                      long_name_locked);
-                }
-                {
-                        if (!granular_locks && ret < 1) {
-                                gf_msg_debug (this->name, 0, "%s: Skipping"
-                                              " entry self-heal as only %d "
-                                              "sub-volumes could be "
-                                              "locked in special-filename "
-                                              "domain",
-                                              uuid_utoa (fd->inode->gfid),
-                                              ret);
-                                ret = -ENOTCONN;
-                                goto unlock;
-                        }
-                        ret = __afr_selfheal_entry (frame, this, fd, locked_on);
-                }
-                if (!granular_locks)
-                        afr_selfheal_unentrylk (frame, this, inode, this->name,
-                                               LONG_FILENAME, long_name_locked,
-                                               NULL);
+                ret = __afr_selfheal_entry (frame, this, fd, locked_on);
 	}
 unlock:
 	afr_selfheal_unentrylk (frame, this, inode, priv->sh_domain, NULL,
-- 
2.9.3