233933
From 998d9b8b5e271f407e1c654c34f45f0db36abc71 Mon Sep 17 00:00:00 2001
233933
From: Mohammed Rafi KC <rkavunga@redhat.com>
233933
Date: Tue, 21 May 2019 17:15:07 +0530
233933
Subject: [PATCH 172/178] ec/fini: Fix race with ec_fini and ec_notify
233933
233933
During a graph cleanup, we first sent a PARENT_DOWN and wait for
233933
a child down to ultimately free the xlator and the graph.
233933
233933
In the ec xlator, we cleanup the threads when we get a PARENT_DOWN event.
233933
But a racing event like CHILD_UP or event xl_op may trigger healing threads
233933
after threads cleanup.
233933
233933
So there is a chance that the threads might access a freed private variabe
233933
233933
Upstream patch: https://review.gluster.org/#/c/glusterfs/+/22758/
233933
233933
>Change-Id: I252d10181bb67b95900c903d479de707a8489532
233933
>fixes: bz#1703948
233933
>Signed-off-by: Mohammed Rafi KC <rkavunga@redhat.com>
233933
233933
Change-Id: I84a10352d9fb3e68d4147b3791e3af45ab79050e
233933
BUG: 1703434
233933
Signed-off-by: Mohammed Rafi KC <rkavunga@redhat.com>
233933
Reviewed-on: https://code.engineering.redhat.com/gerrit/172285
233933
Tested-by: RHGS Build Bot <nigelb@redhat.com>
233933
Reviewed-by: Atin Mukherjee <amukherj@redhat.com>
233933
---
233933
 libglusterfs/src/glusterfs/xlator.h |  3 +++
233933
 libglusterfs/src/libglusterfs.sym   |  1 +
233933
 libglusterfs/src/xlator.c           | 21 +++++++++++++++++++++
233933
 xlators/cluster/ec/src/ec-heal.c    |  4 ++++
233933
 xlators/cluster/ec/src/ec-heald.c   |  6 ++++++
233933
 xlators/cluster/ec/src/ec.c         |  3 +++
233933
 6 files changed, 38 insertions(+)
233933
233933
diff --git a/libglusterfs/src/glusterfs/xlator.h b/libglusterfs/src/glusterfs/xlator.h
233933
index 8998976..09e463e 100644
233933
--- a/libglusterfs/src/glusterfs/xlator.h
233933
+++ b/libglusterfs/src/glusterfs/xlator.h
233933
@@ -1092,4 +1092,7 @@ gluster_graph_take_reference(xlator_t *tree);
233933
 
233933
 gf_boolean_t
233933
 mgmt_is_multiplexed_daemon(char *name);
233933
+
233933
+gf_boolean_t
233933
+xlator_is_cleanup_starting(xlator_t *this);
233933
 #endif /* _XLATOR_H */
233933
diff --git a/libglusterfs/src/libglusterfs.sym b/libglusterfs/src/libglusterfs.sym
233933
index ec474e7..7a2edef 100644
233933
--- a/libglusterfs/src/libglusterfs.sym
233933
+++ b/libglusterfs/src/libglusterfs.sym
233933
@@ -1161,3 +1161,4 @@ glusterfs_process_svc_attach_volfp
233933
 glusterfs_mux_volfile_reconfigure
233933
 glusterfs_process_svc_detach
233933
 mgmt_is_multiplexed_daemon
233933
+xlator_is_cleanup_starting
233933
diff --git a/libglusterfs/src/xlator.c b/libglusterfs/src/xlator.c
233933
index 022c3ed..fbfbbe2 100644
233933
--- a/libglusterfs/src/xlator.c
233933
+++ b/libglusterfs/src/xlator.c
233933
@@ -1486,3 +1486,24 @@ mgmt_is_multiplexed_daemon(char *name)
233933
     }
233933
     return _gf_false;
233933
 }
233933
+
233933
+gf_boolean_t
233933
+xlator_is_cleanup_starting(xlator_t *this)
233933
+{
233933
+    gf_boolean_t cleanup = _gf_false;
233933
+    glusterfs_graph_t *graph = NULL;
233933
+    xlator_t *xl = NULL;
233933
+
233933
+    if (!this)
233933
+        goto out;
233933
+    graph = this->graph;
233933
+
233933
+    if (!graph)
233933
+        goto out;
233933
+
233933
+    xl = graph->first;
233933
+    if (xl && xl->cleanup_starting)
233933
+        cleanup = _gf_true;
233933
+out:
233933
+    return cleanup;
233933
+}
233933
diff --git a/xlators/cluster/ec/src/ec-heal.c b/xlators/cluster/ec/src/ec-heal.c
233933
index 2fa1f11..8844c29 100644
233933
--- a/xlators/cluster/ec/src/ec-heal.c
233933
+++ b/xlators/cluster/ec/src/ec-heal.c
233933
@@ -2855,6 +2855,10 @@ ec_replace_brick_heal_wrap(void *opaque)
233933
         itable = ec->xl->itable;
233933
     else
233933
         goto out;
233933
+
233933
+    if (xlator_is_cleanup_starting(ec->xl))
233933
+        goto out;
233933
+
233933
     ret = ec_replace_heal(ec, itable->root);
233933
 out:
233933
     return ret;
233933
diff --git a/xlators/cluster/ec/src/ec-heald.c b/xlators/cluster/ec/src/ec-heald.c
233933
index edf5e11..91512d7 100644
233933
--- a/xlators/cluster/ec/src/ec-heald.c
233933
+++ b/xlators/cluster/ec/src/ec-heald.c
233933
@@ -444,6 +444,9 @@ unlock:
233933
 int
233933
 ec_shd_full_healer_spawn(xlator_t *this, int subvol)
233933
 {
233933
+    if (xlator_is_cleanup_starting(this))
233933
+        return -1;
233933
+
233933
     return ec_shd_healer_spawn(this, NTH_FULL_HEALER(this, subvol),
233933
                                ec_shd_full_healer);
233933
 }
233933
@@ -451,6 +454,9 @@ ec_shd_full_healer_spawn(xlator_t *this, int subvol)
233933
 int
233933
 ec_shd_index_healer_spawn(xlator_t *this, int subvol)
233933
 {
233933
+    if (xlator_is_cleanup_starting(this))
233933
+        return -1;
233933
+
233933
     return ec_shd_healer_spawn(this, NTH_INDEX_HEALER(this, subvol),
233933
                                ec_shd_index_healer);
233933
 }
233933
diff --git a/xlators/cluster/ec/src/ec.c b/xlators/cluster/ec/src/ec.c
233933
index 264582a..df5912c 100644
233933
--- a/xlators/cluster/ec/src/ec.c
233933
+++ b/xlators/cluster/ec/src/ec.c
233933
@@ -486,6 +486,9 @@ ec_set_up_state(ec_t *ec, uintptr_t index_mask, uintptr_t new_state)
233933
 {
233933
     uintptr_t current_state = 0;
233933
 
233933
+    if (xlator_is_cleanup_starting(ec->xl))
233933
+        return _gf_false;
233933
+
233933
     if ((ec->xl_notify & index_mask) == 0) {
233933
         ec->xl_notify |= index_mask;
233933
         ec->xl_notify_count++;
233933
-- 
233933
1.8.3.1
233933