74b1de
From 4d65506ddfa0245dcaa13b14ca13b2ea762df37d Mon Sep 17 00:00:00 2001
74b1de
From: Mohammed Rafi KC <rkavunga@redhat.com>
74b1de
Date: Thu, 11 Jul 2019 12:48:51 +0530
74b1de
Subject: [PATCH 243/255] Revert "glusterfsd/cleanup: Protect graph object
74b1de
 under a lock"
74b1de
74b1de
This reverts commit 11b64d494c52004002f900888694d20ef8af6df6.
74b1de
74b1de
BUG: 1471742
74b1de
Change-Id: I2717207d87ad213722de33c24e451502ed4aff48
74b1de
Signed-off-by: Mohammed Rafi KC <rkavunga@redhat.com>
74b1de
Reviewed-on: https://code.engineering.redhat.com/gerrit/175952
74b1de
Tested-by: RHGS Build Bot <nigelb@redhat.com>
74b1de
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
74b1de
---
74b1de
 libglusterfs/src/graph.c                        | 58 ++++++++++---------------
74b1de
 libglusterfs/src/statedump.c                    | 16 ++-----
74b1de
 tests/bugs/glusterd/optimized-basic-testcases.t |  4 +-
74b1de
 3 files changed, 28 insertions(+), 50 deletions(-)
74b1de
74b1de
diff --git a/libglusterfs/src/graph.c b/libglusterfs/src/graph.c
74b1de
index 18fb2d9..4c8b02d 100644
74b1de
--- a/libglusterfs/src/graph.c
74b1de
+++ b/libglusterfs/src/graph.c
74b1de
@@ -1392,12 +1392,8 @@ glusterfs_graph_cleanup(void *arg)
74b1de
     }
74b1de
     pthread_mutex_unlock(&ctx->notify_lock);
74b1de
 
74b1de
-    pthread_mutex_lock(&ctx->cleanup_lock);
74b1de
-    {
74b1de
-        glusterfs_graph_fini(graph);
74b1de
-        glusterfs_graph_destroy(graph);
74b1de
-    }
74b1de
-    pthread_mutex_unlock(&ctx->cleanup_lock);
74b1de
+    glusterfs_graph_fini(graph);
74b1de
+    glusterfs_graph_destroy(graph);
74b1de
 out:
74b1de
     return NULL;
74b1de
 }
74b1de
@@ -1472,37 +1468,31 @@ glusterfs_process_svc_detach(glusterfs_ctx_t *ctx, gf_volfile_t *volfile_obj)
74b1de
 
74b1de
     if (!ctx || !ctx->active || !volfile_obj)
74b1de
         goto out;
74b1de
+    parent_graph = ctx->active;
74b1de
+    graph = volfile_obj->graph;
74b1de
+    if (!graph)
74b1de
+        goto out;
74b1de
+    if (graph->first)
74b1de
+        xl = graph->first;
74b1de
 
74b1de
-    pthread_mutex_lock(&ctx->cleanup_lock);
74b1de
-    {
74b1de
-        parent_graph = ctx->active;
74b1de
-        graph = volfile_obj->graph;
74b1de
-        if (!graph)
74b1de
-            goto unlock;
74b1de
-        if (graph->first)
74b1de
-            xl = graph->first;
74b1de
-
74b1de
-        last_xl = graph->last_xl;
74b1de
-        if (last_xl)
74b1de
-            last_xl->next = NULL;
74b1de
-        if (!xl || xl->cleanup_starting)
74b1de
-            goto unlock;
74b1de
+    last_xl = graph->last_xl;
74b1de
+    if (last_xl)
74b1de
+        last_xl->next = NULL;
74b1de
+    if (!xl || xl->cleanup_starting)
74b1de
+        goto out;
74b1de
 
74b1de
-        xl->cleanup_starting = 1;
74b1de
-        gf_msg("mgmt", GF_LOG_INFO, 0, LG_MSG_GRAPH_DETACH_STARTED,
74b1de
-               "detaching child %s", volfile_obj->vol_id);
74b1de
+    xl->cleanup_starting = 1;
74b1de
+    gf_msg("mgmt", GF_LOG_INFO, 0, LG_MSG_GRAPH_DETACH_STARTED,
74b1de
+           "detaching child %s", volfile_obj->vol_id);
74b1de
 
74b1de
-        list_del_init(&volfile_obj->volfile_list);
74b1de
-        glusterfs_mux_xlator_unlink(parent_graph->top, xl);
74b1de
-        parent_graph->last_xl = glusterfs_get_last_xlator(parent_graph);
74b1de
-        parent_graph->xl_count -= graph->xl_count;
74b1de
-        parent_graph->leaf_count -= graph->leaf_count;
74b1de
-        default_notify(xl, GF_EVENT_PARENT_DOWN, xl);
74b1de
-        parent_graph->id++;
74b1de
-        ret = 0;
74b1de
-    }
74b1de
-unlock:
74b1de
-    pthread_mutex_unlock(&ctx->cleanup_lock);
74b1de
+    list_del_init(&volfile_obj->volfile_list);
74b1de
+    glusterfs_mux_xlator_unlink(parent_graph->top, xl);
74b1de
+    parent_graph->last_xl = glusterfs_get_last_xlator(parent_graph);
74b1de
+    parent_graph->xl_count -= graph->xl_count;
74b1de
+    parent_graph->leaf_count -= graph->leaf_count;
74b1de
+    default_notify(xl, GF_EVENT_PARENT_DOWN, xl);
74b1de
+    parent_graph->id++;
74b1de
+    ret = 0;
74b1de
 out:
74b1de
     if (!ret) {
74b1de
         list_del_init(&volfile_obj->volfile_list);
74b1de
diff --git a/libglusterfs/src/statedump.c b/libglusterfs/src/statedump.c
74b1de
index 0d58f8f..0cf80c0 100644
74b1de
--- a/libglusterfs/src/statedump.c
74b1de
+++ b/libglusterfs/src/statedump.c
74b1de
@@ -805,17 +805,11 @@ gf_proc_dump_info(int signum, glusterfs_ctx_t *ctx)
74b1de
     int brick_count = 0;
74b1de
     int len = 0;
74b1de
 
74b1de
+    gf_proc_dump_lock();
74b1de
+
74b1de
     if (!ctx)
74b1de
         goto out;
74b1de
 
74b1de
-    /*
74b1de
-     * Multiplexed daemons can change the active graph when attach/detach
74b1de
-     * is called. So this has to be protected with the cleanup lock.
74b1de
-     */
74b1de
-    if (mgmt_is_multiplexed_daemon(ctx->cmd_args.process_name))
74b1de
-        pthread_mutex_lock(&ctx->cleanup_lock);
74b1de
-    gf_proc_dump_lock();
74b1de
-
74b1de
     if (!mgmt_is_multiplexed_daemon(ctx->cmd_args.process_name) &&
74b1de
         (ctx && ctx->active)) {
74b1de
         top = ctx->active->first;
74b1de
@@ -929,11 +923,7 @@ gf_proc_dump_info(int signum, glusterfs_ctx_t *ctx)
74b1de
 out:
74b1de
     GF_FREE(dump_options.dump_path);
74b1de
     dump_options.dump_path = NULL;
74b1de
-    if (ctx) {
74b1de
-        gf_proc_dump_unlock();
74b1de
-        if (mgmt_is_multiplexed_daemon(ctx->cmd_args.process_name))
74b1de
-            pthread_mutex_unlock(&ctx->cleanup_lock);
74b1de
-    }
74b1de
+    gf_proc_dump_unlock();
74b1de
 
74b1de
     return;
74b1de
 }
74b1de
diff --git a/tests/bugs/glusterd/optimized-basic-testcases.t b/tests/bugs/glusterd/optimized-basic-testcases.t
74b1de
index 110f1b9..d700b5e 100644
74b1de
--- a/tests/bugs/glusterd/optimized-basic-testcases.t
74b1de
+++ b/tests/bugs/glusterd/optimized-basic-testcases.t
74b1de
@@ -289,9 +289,7 @@ mkdir -p /xyz/var/lib/glusterd/abc
74b1de
 TEST  $CLI volume create "test" $H0:/xyz/var/lib/glusterd/abc
74b1de
 EXPECT 'Created' volinfo_field "test" 'Status';
74b1de
 
74b1de
-#While taking a statedump, there is a TRY_LOCK on call_frame, which might may cause
74b1de
-#failure. So Adding a EXPECT_WITHIN
74b1de
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" generate_statedump_and_check_for_glusterd_info
74b1de
+EXPECT "1" generate_statedump_and_check_for_glusterd_info
74b1de
 
74b1de
 cleanup_statedump `pidof glusterd`
74b1de
 cleanup
74b1de
-- 
74b1de
1.8.3.1
74b1de