Blob Blame History Raw
From d1b1a5186903cf9166a2aba7a2eadafd83038708 Mon Sep 17 00:00:00 2001
From: Pranith Kumar K <pkarampu@redhat.com>
Date: Thu, 1 Dec 2016 09:42:19 +0530
Subject: [PATCH 229/235] cluster/afr: Serialize conflicting locks on all
 subvols

Problem:
1) When a blocking lock is issued and the parallel lock phase fails
on all subvolumes with EAGAIN, it is not switching to serialized
locking phase.
2) When quorum is enabled and locks fail partially it is better
to give errno returned by brick rather than the default
quorum errno.

Fix:
Handled this error case and changed op_errno to reflect the actual
errno in case of quorum error.

 >BUG: 1369077
 >Change-Id: Ifac2e4a13686e9fde601873012700966d56a7f31
 >Signed-off-by: Pranith Kumar K <pkarampu@redhat.com>
 >Reviewed-on: http://review.gluster.org/15984
 >Smoke: Gluster Build System <jenkins@build.gluster.org>
 >NetBSD-regression: NetBSD Build System <jenkins@build.gluster.org>
 >CentOS-regression: Gluster Build System <jenkins@build.gluster.org>
 >Reviewed-by: Ravishankar N <ravishankar@redhat.com>

BUG: 1393694
Change-Id: Id8ab11e633400383097d4487c8dc2f440afe43e7
Signed-off-by: Pranith Kumar K <pkarampu@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/92316
Tested-by: Ravishankar Narayanankutty <ravishankar@redhat.com>
Reviewed-by: Ravishankar Narayanankutty <ravishankar@redhat.com>
---
 xlators/cluster/afr/src/afr-common.c      | 82 +++++++++++++++++++------------
 xlators/cluster/afr/src/afr-transaction.c |  4 +-
 2 files changed, 53 insertions(+), 33 deletions(-)

diff --git a/xlators/cluster/afr/src/afr-common.c b/xlators/cluster/afr/src/afr-common.c
index a87428a..55f10e7 100644
--- a/xlators/cluster/afr/src/afr-common.c
+++ b/xlators/cluster/afr/src/afr-common.c
@@ -3510,43 +3510,19 @@ afr_fop_lock_wind (call_frame_t *frame, xlator_t *this, int child_index,
         }
 }
 
-static int32_t
-afr_unlock_partial_lock_cbk (call_frame_t *frame, void *cookie,
-                             xlator_t *this, int32_t op_ret,
-                             int32_t op_errno, dict_t *xdata)
-
+void
+afr_fop_lock_proceed (call_frame_t *frame)
 {
         afr_local_t *local = NULL;
         afr_private_t *priv = NULL;
-        int call_count = -1;
-        int child_index = (long)cookie;
-        uuid_t  gfid = {0};
 
         local = frame->local;
-        priv = this->private;
-
-        if (op_ret < 0 && op_errno != ENOTCONN) {
-                if (local->fd)
-                        gf_uuid_copy (gfid, local->fd->inode->gfid);
-                else
-                        loc_gfid (&local->loc, gfid);
-                gf_msg (this->name, GF_LOG_ERROR, op_errno,
-                        AFR_MSG_UNLOCK_FAIL,
-                        "%s: Failed to unlock %s on %s "
-                        "with lk_owner: %s", uuid_utoa (gfid),
-                        gf_fop_list[local->op],
-                        priv->children[child_index]->name,
-                        lkowner_utoa (&frame->root->lk_owner));
-        }
-
-        call_count = afr_frame_return (frame);
-        if (call_count)
-                goto out;
+        priv = frame->this->private;
 
         if (local->fop_lock_state != AFR_FOP_LOCK_PARALLEL) {
                 afr_fop_lock_unwind (frame, local->op, local->op_ret,
                                      local->op_errno, local->xdata_rsp);
-                goto out;
+                return;
         }
         /* At least one child is up */
         /*
@@ -3590,8 +3566,42 @@ afr_unlock_partial_lock_cbk (call_frame_t *frame, void *cookie,
         default:
                 break;
         }
-        afr_serialized_lock_wind (frame, this);
-out:
+        afr_serialized_lock_wind (frame, frame->this);
+}
+
+static int32_t
+afr_unlock_partial_lock_cbk (call_frame_t *frame, void *cookie,
+                             xlator_t *this, int32_t op_ret,
+                             int32_t op_errno, dict_t *xdata)
+
+{
+        afr_local_t *local = NULL;
+        afr_private_t *priv = NULL;
+        int call_count = -1;
+        int child_index = (long)cookie;
+        uuid_t  gfid = {0};
+
+        local = frame->local;
+        priv = this->private;
+
+        if (op_ret < 0 && op_errno != ENOTCONN) {
+                if (local->fd)
+                        gf_uuid_copy (gfid, local->fd->inode->gfid);
+                else
+                        loc_gfid (&local->loc, gfid);
+                gf_msg (this->name, GF_LOG_ERROR, op_errno,
+                        AFR_MSG_UNLOCK_FAIL,
+                        "%s: Failed to unlock %s on %s "
+                        "with lk_owner: %s", uuid_utoa (gfid),
+                        gf_fop_list[local->op],
+                        priv->children[child_index]->name,
+                        lkowner_utoa (&frame->root->lk_owner));
+        }
+
+        call_count = afr_frame_return (frame);
+        if (call_count == 0)
+                afr_fop_lock_proceed (frame);
+
         return 0;
 }
 
@@ -3603,6 +3613,11 @@ afr_unlock_locks_and_proceed (call_frame_t *frame, xlator_t *this,
         afr_private_t *priv = NULL;
         afr_local_t *local = NULL;
 
+        if (call_count == 0) {
+                afr_fop_lock_proceed (frame);
+                goto out;
+        }
+
         local = frame->local;
         priv = this->private;
         local->call_count = call_count;
@@ -3639,6 +3654,7 @@ afr_unlock_locks_and_proceed (call_frame_t *frame, xlator_t *this,
                         break;
         }
 
+out:
         return 0;
 }
 
@@ -3681,7 +3697,7 @@ afr_fop_lock_done (call_frame_t *frame, xlator_t *this)
                 local->op_errno = local->replies[i].op_errno;
         }
 
-        if (afr_fop_lock_is_unlock (frame) || (lock_count == 0))
+        if (afr_fop_lock_is_unlock (frame))
                 goto unwind;
 
         if (afr_is_conflicting_lock_present (local->op_ret, local->op_errno)) {
@@ -3689,7 +3705,9 @@ afr_fop_lock_done (call_frame_t *frame, xlator_t *this)
         } else if (priv->quorum_count && !afr_has_quorum (success, this)) {
                 local->fop_lock_state = AFR_FOP_LOCK_QUORUM_FAILED;
                 local->op_ret = -1;
-                local->op_errno = afr_quorum_errno (priv);
+                local->op_errno = afr_final_errno (local, priv);
+                if (local->op_errno == 0)
+                        local->op_errno = afr_quorum_errno (priv);
                 afr_unlock_locks_and_proceed (frame, this, lock_count);
         } else {
                 goto unwind;
diff --git a/xlators/cluster/afr/src/afr-transaction.c b/xlators/cluster/afr/src/afr-transaction.c
index 479f71f..f397ae7 100644
--- a/xlators/cluster/afr/src/afr-transaction.c
+++ b/xlators/cluster/afr/src/afr-transaction.c
@@ -1666,7 +1666,9 @@ afr_changelog_pre_op (call_frame_t *frame, xlator_t *this)
          * quorum number of nodes.
          */
         if (priv->quorum_count && !afr_has_fop_quorum (frame)) {
-                op_errno = afr_quorum_errno (priv);
+                op_errno = int_lock->lock_op_errno;
+                if (op_errno == 0)
+                        op_errno = afr_quorum_errno (priv);
                 goto err;
         }
 
-- 
2.9.3