Blob Blame History Raw
From 3008985af3ee25b8be7b6bc95302a867698c597e Mon Sep 17 00:00:00 2001
From: Ashish Pandey <aspandey@redhat.com>
Date: Fri, 4 Mar 2016 13:05:09 +0530
Subject: [PATCH 38/80] cluster/ec: Provide an option to enable/disable eager lock

Problem: If a fop takes lock, and completes its operation,
it waits for 1 second before releasing the lock. However,
If ec find any lock contention within this time period,
it release the lock immediately before time expires. As we
take lock on first brick, for few operations, like read, it
might happen that discovery of lock contention might take
long time and can degrades the performance.

Solution: Provide an option to enable/disable eager lock.
If eager lock is disabled, lock will be released as soon
as fop completes.

gluster v set <VOLUME NAME> disperse.eager-lock on
gluster v set <VOLUME NAME> disperse.eager-lock off

master -
http://review.gluster.org/#/c/13605/
release-3.7 -
http://review.gluster.org/#/c/13773/

Change-Id: I000985a787eba3c190fdcd5981dfbf04e64af166
BUG: 1320412
Signed-off-by: Ashish Pandey <aspandey@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/70536
Reviewed-by: Pranith Kumar Karampuri <pkarampu@redhat.com>
Tested-by: Pranith Kumar Karampuri <pkarampu@redhat.com>
---
 libglusterfs/src/globals.h                      |    2 ++
 tests/bugs/replicate/bug-1297695.t              |    2 +-
 tests/bugs/replicate/bug-821056.t               |    2 +-
 tests/bugs/replicate/bug-921231.t               |    4 ++--
 tests/bugs/replicate/bug-966018.t               |    2 +-
 tests/bugs/replicate/bug-976800.t               |    2 +-
 tests/bugs/replicate/bug-979365.t               |    2 +-
 xlators/cluster/ec/src/ec-common.c              |    9 ++++++---
 xlators/cluster/ec/src/ec.c                     |    9 +++++++++
 xlators/cluster/ec/src/ec.h                     |    1 +
 xlators/mgmt/glusterd/src/glusterd-volume-set.c |    5 +++++
 11 files changed, 30 insertions(+), 10 deletions(-)

diff --git a/libglusterfs/src/globals.h b/libglusterfs/src/globals.h
index cc23b6c..ad0aef8 100644
--- a/libglusterfs/src/globals.h
+++ b/libglusterfs/src/globals.h
@@ -72,6 +72,8 @@
 
 #define GD_OP_VERSION_3_7_7    30707 /* Op-version for GlusterFS 3.7.7 */
 
+#define GD_OP_VERSION_3_7_10    30710 /* Op-version for GlusterFS 3.7.10 */
+
 #include "xlator.h"
 
 /* THIS */
diff --git a/tests/bugs/replicate/bug-1297695.t b/tests/bugs/replicate/bug-1297695.t
index e0f4316..d5c1a21 100644
--- a/tests/bugs/replicate/bug-1297695.t
+++ b/tests/bugs/replicate/bug-1297695.t
@@ -13,7 +13,7 @@ TEST glusterd
 TEST pidof glusterd
 TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}0 $H0:$B0/${V0}1
 
-TEST $CLI volume set $V0 eager-lock on
+TEST $CLI volume set $V0 cluster.eager-lock on
 TEST $CLI volume set $V0 post-op-delay-secs 3
 TEST $CLI volume set $V0 cluster.entry-self-heal off
 TEST $CLI volume set $V0 cluster.data-self-heal off
diff --git a/tests/bugs/replicate/bug-821056.t b/tests/bugs/replicate/bug-821056.t
index 02a9c78..a163300 100644
--- a/tests/bugs/replicate/bug-821056.t
+++ b/tests/bugs/replicate/bug-821056.t
@@ -8,7 +8,7 @@ TEST glusterd
 TEST pidof glusterd
 
 TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
-TEST $CLI volume set $V0 eager-lock off
+TEST $CLI volume set $V0 cluster.eager-lock off
 TEST $CLI volume set $V0 cluster.self-heal-daemon off
 TEST $CLI volume set $V0 performance.quick-read off
 TEST $CLI volume set $V0 performance.open-behind off
diff --git a/tests/bugs/replicate/bug-921231.t b/tests/bugs/replicate/bug-921231.t
index 93c642b..8150461 100644
--- a/tests/bugs/replicate/bug-921231.t
+++ b/tests/bugs/replicate/bug-921231.t
@@ -3,7 +3,7 @@
 . $(dirname $0)/../../include.rc
 . $(dirname $0)/../../volume.rc
 
-# This test writes to same file with 2 fds and tests that eager-lock is not
+# This test writes to same file with 2 fds and tests that cluster.eager-lock is not
 # causing extra delay because of post-op-delay-secs
 cleanup;
 
@@ -14,7 +14,7 @@ function write_to_file {
 TEST glusterd
 TEST pidof glusterd
 TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}0 $H0:$B0/${V0}1
-TEST $CLI volume set $V0 eager-lock on
+TEST $CLI volume set $V0 cluster.eager-lock on
 TEST $CLI volume set $V0 post-op-delay-secs 3
 TEST $CLI volume set $V0 client-log-level DEBUG
 TEST $CLI volume start $V0
diff --git a/tests/bugs/replicate/bug-966018.t b/tests/bugs/replicate/bug-966018.t
index be4d0b9..ec3beb1 100644
--- a/tests/bugs/replicate/bug-966018.t
+++ b/tests/bugs/replicate/bug-966018.t
@@ -4,7 +4,7 @@
 . $(dirname $0)/../../volume.rc
 . $(dirname $0)/../../nfs.rc
 
-#This tests if eager-lock blocks metadata operations on nfs/fuse mounts.
+#This tests if cluster.eager-lock blocks metadata operations on nfs/fuse mounts.
 #If it is not woken up, INODELK from the next command waits
 #for post-op-delay secs.
 
diff --git a/tests/bugs/replicate/bug-976800.t b/tests/bugs/replicate/bug-976800.t
index 8311734..27f8b27 100644
--- a/tests/bugs/replicate/bug-976800.t
+++ b/tests/bugs/replicate/bug-976800.t
@@ -20,7 +20,7 @@ TEST glusterd
 TEST pidof glusterd
 TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
 TEST $CLI volume set $V0 ensure-durability off
-TEST $CLI volume set $V0 eager-lock off
+TEST $CLI volume set $V0 cluster.eager-lock off
 TEST $CLI volume set $V0 flush-behind off
 TEST $CLI volume start $V0
 TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
diff --git a/tests/bugs/replicate/bug-979365.t b/tests/bugs/replicate/bug-979365.t
index b1396c2..c09c7d5 100755
--- a/tests/bugs/replicate/bug-979365.t
+++ b/tests/bugs/replicate/bug-979365.t
@@ -15,7 +15,7 @@ TEST glusterd
 TEST pidof glusterd
 TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
 TEST $CLI volume set $V0 ensure-durability on
-TEST $CLI volume set $V0 eager-lock off
+TEST $CLI volume set $V0 cluster.eager-lock off
 TEST $CLI volume start $V0
 TEST $CLI volume profile $V0 start
 TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0
diff --git a/xlators/cluster/ec/src/ec-common.c b/xlators/cluster/ec/src/ec-common.c
index 8c6ff78..de0e597 100644
--- a/xlators/cluster/ec/src/ec-common.c
+++ b/xlators/cluster/ec/src/ec-common.c
@@ -2034,11 +2034,13 @@ void ec_flush_size_version(ec_fop_data_t *fop)
 void ec_lock_reuse(ec_fop_data_t *fop)
 {
     ec_cbk_data_t *cbk;
+    ec_t *ec = NULL;
     int32_t i, count;
     gf_boolean_t release = _gf_false;
-
+    ec = fop->xl->private;
     cbk = fop->answer;
-    if (cbk != NULL) {
+
+    if (ec->eager_lock && cbk != NULL) {
         if (cbk->xdata != NULL) {
             if ((dict_get_int32(cbk->xdata, GLUSTERFS_INODELK_COUNT,
                                 &count) == 0) && (count > 1)) {
@@ -2050,7 +2052,8 @@ void ec_lock_reuse(ec_fop_data_t *fop)
             }
         }
     } else {
-        /* If we haven't get an answer with enough quorum, we always release
+        /* If eager lock is disabled or If we haven't get
+         * an answer with enough quorum, we always release
          * the lock. */
         release = _gf_true;
     }
diff --git a/xlators/cluster/ec/src/ec.c b/xlators/cluster/ec/src/ec.c
index e8acc23..3133962 100644
--- a/xlators/cluster/ec/src/ec.c
+++ b/xlators/cluster/ec/src/ec.c
@@ -261,6 +261,8 @@ reconfigure (xlator_t *this, dict_t *options)
                           failed);
         GF_OPTION_RECONF ("iam-self-heal-daemon", ec->shd.iamshd, options,
                           bool, failed);
+        GF_OPTION_RECONF ("eager-lock", ec->eager_lock, options,
+                          bool, failed);
         GF_OPTION_RECONF ("background-heals", background_heals, options,
                           uint32, failed);
         GF_OPTION_RECONF ("heal-wait-qlength", heal_wait_qlen, options,
@@ -599,6 +601,7 @@ init (xlator_t *this)
     ec_method_initialize();
     GF_OPTION_INIT ("self-heal-daemon", ec->shd.enabled, bool, failed);
     GF_OPTION_INIT ("iam-self-heal-daemon", ec->shd.iamshd, bool, failed);
+    GF_OPTION_INIT ("eager-lock", ec->eager_lock, bool, failed);
     GF_OPTION_INIT ("background-heals", ec->background_heals, uint32, failed);
     GF_OPTION_INIT ("heal-wait-qlength", ec->heal_wait_qlen, uint32, failed);
     ec_configure_background_heal_opts (ec, ec->background_heals,
@@ -1309,6 +1312,12 @@ struct volume_options options[] =
                      "translator is running as part of self-heal-daemon "
                      "or not."
     },
+    { .key = {"eager-lock"},
+      .type = GF_OPTION_TYPE_BOOL,
+      .default_value = "on",
+      .description = "This option will enable/diable eager lock for"
+                     "disperse volume "
+    },
     { .key = {"background-heals"},
       .type = GF_OPTION_TYPE_INT,
       .min = 0,/*Disabling background heals*/
diff --git a/xlators/cluster/ec/src/ec.h b/xlators/cluster/ec/src/ec.h
index 480125e..49af5c2 100644
--- a/xlators/cluster/ec/src/ec.h
+++ b/xlators/cluster/ec/src/ec.h
@@ -54,6 +54,7 @@ struct _ec
     gf_lock_t         lock;
     gf_timer_t *      timer;
     gf_boolean_t      shutdown;
+    gf_boolean_t      eager_lock;
     uint32_t          background_heals;
     uint32_t          heal_wait_qlen;
     struct list_head  pending_fops;
diff --git a/xlators/mgmt/glusterd/src/glusterd-volume-set.c b/xlators/mgmt/glusterd/src/glusterd-volume-set.c
index ec99c8a..f5746c8 100644
--- a/xlators/mgmt/glusterd/src/glusterd-volume-set.c
+++ b/xlators/mgmt/glusterd/src/glusterd-volume-set.c
@@ -1080,6 +1080,11 @@ struct volopt_map_entry glusterd_volopt_map[] = {
           .op_version = 1,
           .flags      = OPT_FLAG_CLIENT_OPT
         },
+        { .key        = "disperse.eager-lock",
+          .voltype    = "cluster/disperse",
+          .op_version = GD_OP_VERSION_3_7_10,
+          .flags      = OPT_FLAG_CLIENT_OPT
+        },
         { .key        = "cluster.quorum-type",
           .voltype    = "cluster/replicate",
           .option     = "quorum-type",
-- 
1.7.1