233933
From f0c3af09fd919e3646aae2821b0d6bfe4e2fd89c Mon Sep 17 00:00:00 2001
233933
From: Mohammed Rafi KC <rkavunga@redhat.com>
233933
Date: Thu, 11 Jul 2019 12:45:58 +0530
233933
Subject: [PATCH 237/255] Revert "ec/fini: Fix race between xlator cleanup and
233933
 on going async fop"
233933
233933
This reverts commit 9fd966aa6879ac9867381629f82eca24b950d731.
233933
233933
BUG: 1471742
233933
Change-Id: I557ec138174b01d8b8f8d090acd34c179e2c632d
233933
Signed-off-by: Mohammed Rafi KC <rkavunga@redhat.com>
233933
Reviewed-on: https://code.engineering.redhat.com/gerrit/175946
233933
Tested-by: RHGS Build Bot <nigelb@redhat.com>
233933
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
233933
---
233933
 xlators/cluster/ec/src/ec-common.c | 10 ----------
233933
 xlators/cluster/ec/src/ec-common.h |  2 --
233933
 xlators/cluster/ec/src/ec-data.c   |  4 +---
233933
 xlators/cluster/ec/src/ec-heal.c   | 17 ++---------------
233933
 xlators/cluster/ec/src/ec-types.h  |  1 -
233933
 xlators/cluster/ec/src/ec.c        | 37 ++++++++++++-------------------------
233933
 6 files changed, 15 insertions(+), 56 deletions(-)
233933
233933
diff --git a/xlators/cluster/ec/src/ec-common.c b/xlators/cluster/ec/src/ec-common.c
233933
index 35c2256..e2e582f 100644
233933
--- a/xlators/cluster/ec/src/ec-common.c
233933
+++ b/xlators/cluster/ec/src/ec-common.c
233933
@@ -2956,13 +2956,3 @@ ec_manager(ec_fop_data_t *fop, int32_t error)
233933
 
233933
     __ec_manager(fop, error);
233933
 }
233933
-
233933
-gf_boolean_t
233933
-__ec_is_last_fop(ec_t *ec)
233933
-{
233933
-    if ((list_empty(&ec->pending_fops)) &&
233933
-        (GF_ATOMIC_GET(ec->async_fop_count) == 0)) {
233933
-        return _gf_true;
233933
-    }
233933
-    return _gf_false;
233933
-}
233933
diff --git a/xlators/cluster/ec/src/ec-common.h b/xlators/cluster/ec/src/ec-common.h
233933
index bf6c97d..e948342 100644
233933
--- a/xlators/cluster/ec/src/ec-common.h
233933
+++ b/xlators/cluster/ec/src/ec-common.h
233933
@@ -204,6 +204,4 @@ void
233933
 ec_reset_entry_healing(ec_fop_data_t *fop);
233933
 char *
233933
 ec_msg_str(ec_fop_data_t *fop);
233933
-gf_boolean_t
233933
-__ec_is_last_fop(ec_t *ec);
233933
 #endif /* __EC_COMMON_H__ */
233933
diff --git a/xlators/cluster/ec/src/ec-data.c b/xlators/cluster/ec/src/ec-data.c
233933
index 8d2d9a1..6ef9340 100644
233933
--- a/xlators/cluster/ec/src/ec-data.c
233933
+++ b/xlators/cluster/ec/src/ec-data.c
233933
@@ -202,13 +202,11 @@ ec_handle_last_pending_fop_completion(ec_fop_data_t *fop, gf_boolean_t *notify)
233933
 {
233933
     ec_t *ec = fop->xl->private;
233933
 
233933
-    *notify = _gf_false;
233933
-
233933
     if (!list_empty(&fop->pending_list)) {
233933
         LOCK(&ec->lock);
233933
         {
233933
             list_del_init(&fop->pending_list);
233933
-            *notify = __ec_is_last_fop(ec);
233933
+            *notify = list_empty(&ec->pending_fops);
233933
         }
233933
         UNLOCK(&ec->lock);
233933
     }
233933
diff --git a/xlators/cluster/ec/src/ec-heal.c b/xlators/cluster/ec/src/ec-heal.c
233933
index 237fea2..8844c29 100644
233933
--- a/xlators/cluster/ec/src/ec-heal.c
233933
+++ b/xlators/cluster/ec/src/ec-heal.c
233933
@@ -2814,20 +2814,8 @@ int
233933
 ec_replace_heal_done(int ret, call_frame_t *heal, void *opaque)
233933
 {
233933
     ec_t *ec = opaque;
233933
-    gf_boolean_t last_fop = _gf_false;
233933
 
233933
-    if (GF_ATOMIC_DEC(ec->async_fop_count) == 0) {
233933
-        LOCK(&ec->lock);
233933
-        {
233933
-            last_fop = __ec_is_last_fop(ec);
233933
-        }
233933
-        UNLOCK(&ec->lock);
233933
-    }
233933
     gf_msg_debug(ec->xl->name, 0, "getxattr on bricks is done ret %d", ret);
233933
-
233933
-    if (last_fop)
233933
-        ec_pending_fops_completed(ec);
233933
-
233933
     return 0;
233933
 }
233933
 
233933
@@ -2881,15 +2869,14 @@ ec_launch_replace_heal(ec_t *ec)
233933
 {
233933
     int ret = -1;
233933
 
233933
+    if (!ec)
233933
+        return ret;
233933
     ret = synctask_new(ec->xl->ctx->env, ec_replace_brick_heal_wrap,
233933
                        ec_replace_heal_done, NULL, ec);
233933
-
233933
     if (ret < 0) {
233933
         gf_msg_debug(ec->xl->name, 0, "Heal failed for replace brick ret = %d",
233933
                      ret);
233933
-        ec_replace_heal_done(-1, NULL, ec);
233933
     }
233933
-
233933
     return ret;
233933
 }
233933
 
233933
diff --git a/xlators/cluster/ec/src/ec-types.h b/xlators/cluster/ec/src/ec-types.h
233933
index 4dbf4a3..1c295c0 100644
233933
--- a/xlators/cluster/ec/src/ec-types.h
233933
+++ b/xlators/cluster/ec/src/ec-types.h
233933
@@ -643,7 +643,6 @@ struct _ec {
233933
     uintptr_t xl_notify;      /* Bit flag representing
233933
                                  notification for bricks. */
233933
     uintptr_t node_mask;
233933
-    gf_atomic_t async_fop_count; /* Number of on going asynchronous fops. */
233933
     xlator_t **xl_list;
233933
     gf_lock_t lock;
233933
     gf_timer_t *timer;
233933
diff --git a/xlators/cluster/ec/src/ec.c b/xlators/cluster/ec/src/ec.c
233933
index f0d58c0..df5912c 100644
233933
--- a/xlators/cluster/ec/src/ec.c
233933
+++ b/xlators/cluster/ec/src/ec.c
233933
@@ -355,7 +355,6 @@ ec_notify_cbk(void *data)
233933
     ec_t *ec = data;
233933
     glusterfs_event_t event = GF_EVENT_MAXVAL;
233933
     gf_boolean_t propagate = _gf_false;
233933
-    gf_boolean_t launch_heal = _gf_false;
233933
 
233933
     LOCK(&ec->lock);
233933
     {
233933
@@ -385,11 +384,6 @@ ec_notify_cbk(void *data)
233933
              * still bricks DOWN, they will be healed when they
233933
              * come up. */
233933
             ec_up(ec->xl, ec);
233933
-
233933
-            if (ec->shd.iamshd && !ec->shutdown) {
233933
-                launch_heal = _gf_true;
233933
-                GF_ATOMIC_INC(ec->async_fop_count);
233933
-            }
233933
         }
233933
 
233933
         propagate = _gf_true;
233933
@@ -397,12 +391,13 @@ ec_notify_cbk(void *data)
233933
 unlock:
233933
     UNLOCK(&ec->lock);
233933
 
233933
-    if (launch_heal) {
233933
-        /* We have just brought the volume UP, so we trigger
233933
-         * a self-heal check on the root directory. */
233933
-        ec_launch_replace_heal(ec);
233933
-    }
233933
     if (propagate) {
233933
+        if ((event == GF_EVENT_CHILD_UP) && ec->shd.iamshd) {
233933
+            /* We have just brought the volume UP, so we trigger
233933
+             * a self-heal check on the root directory. */
233933
+            ec_launch_replace_heal(ec);
233933
+        }
233933
+
233933
         default_notify(ec->xl, event, NULL);
233933
     }
233933
 }
233933
@@ -430,7 +425,7 @@ ec_disable_delays(ec_t *ec)
233933
 {
233933
     ec->shutdown = _gf_true;
233933
 
233933
-    return __ec_is_last_fop(ec);
233933
+    return list_empty(&ec->pending_fops);
233933
 }
233933
 
233933
 void
233933
@@ -608,10 +603,7 @@ ec_notify(xlator_t *this, int32_t event, void *data, void *data2)
233933
         if (event == GF_EVENT_CHILD_UP) {
233933
             /* We need to trigger a selfheal if a brick changes
233933
              * to UP state. */
233933
-            if (ec_set_up_state(ec, mask, mask) && ec->shd.iamshd &&
233933
-                !ec->shutdown) {
233933
-                needs_shd_check = _gf_true;
233933
-            }
233933
+            needs_shd_check = ec_set_up_state(ec, mask, mask);
233933
         } else if (event == GF_EVENT_CHILD_DOWN) {
233933
             ec_set_up_state(ec, mask, 0);
233933
         }
233933
@@ -641,21 +633,17 @@ ec_notify(xlator_t *this, int32_t event, void *data, void *data2)
233933
             }
233933
         } else {
233933
             propagate = _gf_false;
233933
-            needs_shd_check = _gf_false;
233933
-        }
233933
-
233933
-        if (needs_shd_check) {
233933
-            GF_ATOMIC_INC(ec->async_fop_count);
233933
         }
233933
     }
233933
 unlock:
233933
     UNLOCK(&ec->lock);
233933
 
233933
 done:
233933
-    if (needs_shd_check) {
233933
-        ec_launch_replace_heal(ec);
233933
-    }
233933
     if (propagate) {
233933
+        if (needs_shd_check && ec->shd.iamshd) {
233933
+            ec_launch_replace_heal(ec);
233933
+        }
233933
+
233933
         error = default_notify(this, event, data);
233933
     }
233933
 
233933
@@ -717,7 +705,6 @@ init(xlator_t *this)
233933
     ec->xl = this;
233933
     LOCK_INIT(&ec->lock);
233933
 
233933
-    GF_ATOMIC_INIT(ec->async_fop_count, 0);
233933
     INIT_LIST_HEAD(&ec->pending_fops);
233933
     INIT_LIST_HEAD(&ec->heal_waiting);
233933
     INIT_LIST_HEAD(&ec->healing);
233933
-- 
233933
1.8.3.1
233933