233933
From ec629963d61c3ec084c95366eec5ee3a976b1213 Mon Sep 17 00:00:00 2001
233933
From: Mohammed Rafi KC <rkavunga@redhat.com>
233933
Date: Thu, 11 Jul 2019 12:57:45 +0530
233933
Subject: [PATCH 250/255] Revert "mgmt/shd: Implement multiplexing in self heal
233933
 daemon"
233933
233933
This reverts commit 2cede2b87fb3e3e0673be9cf67e7d6eec3f7879c.
233933
233933
BUG: 1471742
233933
Change-Id: I3830d9189dfdb567a44935aa97dc963f4594dfdb
233933
fixes: bz#1471742
233933
Signed-off-by: Mohammed Rafi KC <rkavunga@redhat.com>
233933
Reviewed-on: https://code.engineering.redhat.com/gerrit/175959
233933
Tested-by: RHGS Build Bot <nigelb@redhat.com>
233933
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
233933
---
233933
 glusterfsd/src/glusterfsd-messages.h               |   2 +-
233933
 glusterfsd/src/glusterfsd-mgmt.c                   | 238 +------
233933
 glusterfsd/src/glusterfsd.c                        |  20 +-
233933
 libglusterfs/src/defaults-tmpl.c                   |  19 +-
233933
 libglusterfs/src/glusterfs/glusterfs.h             |   7 -
233933
 libglusterfs/src/glusterfs/libglusterfs-messages.h |   4 +-
233933
 libglusterfs/src/glusterfs/xlator.h                |   3 -
233933
 libglusterfs/src/graph.c                           | 451 -------------
233933
 libglusterfs/src/graph.y                           |   3 -
233933
 libglusterfs/src/libglusterfs.sym                  |   5 -
233933
 libglusterfs/src/statedump.c                       |   3 +-
233933
 libglusterfs/src/xlator.c                          |  16 -
233933
 rpc/rpc-lib/src/protocol-common.h                  |   2 -
233933
 tests/basic/glusterd/heald.t                       |  49 +-
233933
 .../reset-brick-and-daemons-follow-quorum.t        |   8 +-
233933
 tests/volume.rc                                    |   6 +-
233933
 xlators/mgmt/glusterd/src/Makefile.am              |   6 +-
233933
 xlators/mgmt/glusterd/src/glusterd-brick-ops.c     |   2 +-
233933
 xlators/mgmt/glusterd/src/glusterd-conn-mgmt.c     |  42 --
233933
 xlators/mgmt/glusterd/src/glusterd-conn-mgmt.h     |   4 +-
233933
 xlators/mgmt/glusterd/src/glusterd-gfproxyd-svc.c  |   3 +-
233933
 xlators/mgmt/glusterd/src/glusterd-handler.c       |  11 +-
233933
 xlators/mgmt/glusterd/src/glusterd-handshake.c     |  21 -
233933
 xlators/mgmt/glusterd/src/glusterd-mem-types.h     |   1 -
233933
 xlators/mgmt/glusterd/src/glusterd-messages.h      |   4 +-
233933
 xlators/mgmt/glusterd/src/glusterd-op-sm.c         |  84 +--
233933
 .../mgmt/glusterd/src/glusterd-shd-svc-helper.c    | 140 ----
233933
 .../mgmt/glusterd/src/glusterd-shd-svc-helper.h    |  45 --
233933
 xlators/mgmt/glusterd/src/glusterd-shd-svc.c       | 540 ++--------------
233933
 xlators/mgmt/glusterd/src/glusterd-shd-svc.h       |  17 +-
233933
 xlators/mgmt/glusterd/src/glusterd-sm.c            |  12 +-
233933
 xlators/mgmt/glusterd/src/glusterd-snapd-svc.c     |   3 +-
233933
 xlators/mgmt/glusterd/src/glusterd-statedump.c     |   3 +
233933
 xlators/mgmt/glusterd/src/glusterd-svc-helper.c    | 715 +--------------------
233933
 xlators/mgmt/glusterd/src/glusterd-svc-helper.h    |  40 +-
233933
 xlators/mgmt/glusterd/src/glusterd-svc-mgmt.c      | 246 ++-----
233933
 xlators/mgmt/glusterd/src/glusterd-svc-mgmt.h      |  27 -
233933
 xlators/mgmt/glusterd/src/glusterd-tier.c          |   3 +-
233933
 xlators/mgmt/glusterd/src/glusterd-tierd-svc.c     |   4 +-
233933
 xlators/mgmt/glusterd/src/glusterd-utils.c         | 137 +---
233933
 xlators/mgmt/glusterd/src/glusterd-utils.h         |   4 -
233933
 xlators/mgmt/glusterd/src/glusterd-volgen.c        |  60 +-
233933
 xlators/mgmt/glusterd/src/glusterd-volgen.h        |  11 +-
233933
 xlators/mgmt/glusterd/src/glusterd-volume-ops.c    |   8 +-
233933
 xlators/mgmt/glusterd/src/glusterd.c               |  12 +-
233933
 xlators/mgmt/glusterd/src/glusterd.h               |  30 +-
233933
 xlators/protocol/client/src/client.c               |  31 +-
233933
 47 files changed, 292 insertions(+), 2810 deletions(-)
233933
 delete mode 100644 xlators/mgmt/glusterd/src/glusterd-shd-svc-helper.c
233933
 delete mode 100644 xlators/mgmt/glusterd/src/glusterd-shd-svc-helper.h
233933
233933
diff --git a/glusterfsd/src/glusterfsd-messages.h b/glusterfsd/src/glusterfsd-messages.h
233933
index 280624c..ce6c8ca 100644
233933
--- a/glusterfsd/src/glusterfsd-messages.h
233933
+++ b/glusterfsd/src/glusterfsd-messages.h
233933
@@ -36,6 +36,6 @@ GLFS_MSGID(GLUSTERFSD, glusterfsd_msg_1, glusterfsd_msg_2, glusterfsd_msg_3,
233933
            glusterfsd_msg_31, glusterfsd_msg_32, glusterfsd_msg_33,
233933
            glusterfsd_msg_34, glusterfsd_msg_35, glusterfsd_msg_36,
233933
            glusterfsd_msg_37, glusterfsd_msg_38, glusterfsd_msg_39,
233933
-           glusterfsd_msg_40, glusterfsd_msg_41, glusterfsd_msg_42, glusterfsd_msg_43);
233933
+           glusterfsd_msg_40);
233933
 
233933
 #endif /* !_GLUSTERFSD_MESSAGES_H_ */
233933
diff --git a/glusterfsd/src/glusterfsd-mgmt.c b/glusterfsd/src/glusterfsd-mgmt.c
233933
index 1d2cd1a..15acc10 100644
233933
--- a/glusterfsd/src/glusterfsd-mgmt.c
233933
+++ b/glusterfsd/src/glusterfsd-mgmt.c
233933
@@ -48,20 +48,7 @@ int
233933
 glusterfs_graph_unknown_options(glusterfs_graph_t *graph);
233933
 int
233933
 emancipate(glusterfs_ctx_t *ctx, int ret);
233933
-int
233933
-glusterfs_process_svc_attach_volfp(glusterfs_ctx_t *ctx, FILE *fp,
233933
-                                   char *volfile_id, char *checksum);
233933
-int
233933
-glusterfs_mux_volfile_reconfigure(FILE *newvolfile_fp, glusterfs_ctx_t *ctx,
233933
-                                  gf_volfile_t *volfile_obj, char *checksum);
233933
-int
233933
-glusterfs_process_svc_attach_volfp(glusterfs_ctx_t *ctx, FILE *fp,
233933
-                                   char *volfile_id, char *checksum);
233933
-int
233933
-glusterfs_process_svc_detach(glusterfs_ctx_t *ctx, gf_volfile_t *volfile_obj);
233933
 
233933
-gf_boolean_t
233933
-mgmt_is_multiplexed_daemon(char *name);
233933
 int
233933
 mgmt_cbk_spec(struct rpc_clnt *rpc, void *mydata, void *data)
233933
 {
233933
@@ -75,96 +62,6 @@ mgmt_cbk_spec(struct rpc_clnt *rpc, void *mydata, void *data)
233933
 }
233933
 
233933
 int
233933
-mgmt_process_volfile(const char *volfile, ssize_t size, char *volfile_id)
233933
-{
233933
-    glusterfs_ctx_t *ctx = NULL;
233933
-    int ret = 0;
233933
-    FILE *tmpfp = NULL;
233933
-    gf_volfile_t *volfile_obj = NULL;
233933
-    gf_volfile_t *volfile_tmp = NULL;
233933
-    char sha256_hash[SHA256_DIGEST_LENGTH] = {
233933
-        0,
233933
-    };
233933
-    int tmp_fd = -1;
233933
-    char template[] = "/tmp/glfs.volfile.XXXXXX";
233933
-
233933
-    glusterfs_compute_sha256((const unsigned char *)volfile, size, sha256_hash);
233933
-    ctx = THIS->ctx;
233933
-    LOCK(&ctx->volfile_lock);
233933
-    {
233933
-        list_for_each_entry(volfile_obj, &ctx->volfile_list, volfile_list)
233933
-        {
233933
-            if (!strcmp(volfile_id, volfile_obj->vol_id)) {
233933
-                if (!memcmp(sha256_hash, volfile_obj->volfile_checksum,
233933
-                            sizeof(volfile_obj->volfile_checksum))) {
233933
-                    UNLOCK(&ctx->volfile_lock);
233933
-                    gf_msg(THIS->name, GF_LOG_INFO, 0, glusterfsd_msg_40,
233933
-                           "No change in volfile, continuing");
233933
-                    goto out;
233933
-                }
233933
-                volfile_tmp = volfile_obj;
233933
-                break;
233933
-            }
233933
-        }
233933
-
233933
-        /* coverity[secure_temp] mkstemp uses 0600 as the mode */
233933
-        tmp_fd = mkstemp(template);
233933
-        if (-1 == tmp_fd) {
233933
-            UNLOCK(&ctx->volfile_lock);
233933
-            gf_msg(THIS->name, GF_LOG_ERROR, 0, glusterfsd_msg_39,
233933
-                   "Unable to create temporary file: %s", template);
233933
-            ret = -1;
233933
-            goto out;
233933
-        }
233933
-
233933
-        /* Calling unlink so that when the file is closed or program
233933
-         * terminates the temporary file is deleted.
233933
-         */
233933
-        ret = sys_unlink(template);
233933
-        if (ret < 0) {
233933
-            gf_msg(THIS->name, GF_LOG_INFO, 0, glusterfsd_msg_39,
233933
-                   "Unable to delete temporary file: %s", template);
233933
-            ret = 0;
233933
-        }
233933
-
233933
-        tmpfp = fdopen(tmp_fd, "w+b");
233933
-        if (!tmpfp) {
233933
-            ret = -1;
233933
-            goto unlock;
233933
-        }
233933
-
233933
-        fwrite(volfile, size, 1, tmpfp);
233933
-        fflush(tmpfp);
233933
-        if (ferror(tmpfp)) {
233933
-            ret = -1;
233933
-            goto unlock;
233933
-        }
233933
-
233933
-        if (!volfile_tmp) {
233933
-            /* There is no checksum in the list, which means simple attach
233933
-             * the volfile
233933
-             */
233933
-            ret = glusterfs_process_svc_attach_volfp(ctx, tmpfp, volfile_id,
233933
-                                                     sha256_hash);
233933
-            goto unlock;
233933
-        }
233933
-        ret = glusterfs_mux_volfile_reconfigure(tmpfp, ctx, volfile_obj,
233933
-                                                sha256_hash);
233933
-        if (ret < 0) {
233933
-            gf_msg_debug("glusterfsd-mgmt", EINVAL, "Reconfigure failed !!");
233933
-        }
233933
-    }
233933
-unlock:
233933
-    UNLOCK(&ctx->volfile_lock);
233933
-out:
233933
-    if (tmpfp)
233933
-        fclose(tmpfp);
233933
-    else if (tmp_fd != -1)
233933
-        sys_close(tmp_fd);
233933
-    return ret;
233933
-}
233933
-
233933
-int
233933
 mgmt_cbk_event(struct rpc_clnt *rpc, void *mydata, void *data)
233933
 {
233933
     return 0;
233933
@@ -1069,110 +966,6 @@ glusterfs_handle_attach(rpcsvc_request_t *req)
233933
 }
233933
 
233933
 int
233933
-glusterfs_handle_svc_attach(rpcsvc_request_t *req)
233933
-{
233933
-    int32_t ret = -1;
233933
-    gd1_mgmt_brick_op_req xlator_req = {
233933
-        0,
233933
-    };
233933
-    xlator_t *this = NULL;
233933
-    glusterfs_ctx_t *ctx = NULL;
233933
-
233933
-    GF_ASSERT(req);
233933
-    this = THIS;
233933
-    GF_ASSERT(this);
233933
-
233933
-    ctx = this->ctx;
233933
-    ret = xdr_to_generic(req->msg[0], &xlator_req,
233933
-                         (xdrproc_t)xdr_gd1_mgmt_brick_op_req);
233933
-
233933
-    if (ret < 0) {
233933
-        /*failed to decode msg;*/
233933
-        req->rpc_err = GARBAGE_ARGS;
233933
-        goto out;
233933
-    }
233933
-    gf_msg(THIS->name, GF_LOG_INFO, 0, glusterfsd_msg_41,
233933
-           "received attach "
233933
-           "request for volfile-id=%s",
233933
-           xlator_req.name);
233933
-    ret = 0;
233933
-
233933
-    if (ctx->active) {
233933
-        ret = mgmt_process_volfile(xlator_req.input.input_val,
233933
-                                   xlator_req.input.input_len, xlator_req.name);
233933
-    } else {
233933
-        gf_msg(this->name, GF_LOG_WARNING, EINVAL, glusterfsd_msg_42,
233933
-               "got attach for %s but no active graph", xlator_req.name);
233933
-    }
233933
-out:
233933
-    if (xlator_req.input.input_val)
233933
-        free(xlator_req.input.input_val);
233933
-    if (xlator_req.name)
233933
-        free(xlator_req.name);
233933
-    glusterfs_translator_info_response_send(req, ret, NULL, NULL);
233933
-    return 0;
233933
-}
233933
-
233933
-int
233933
-glusterfs_handle_svc_detach(rpcsvc_request_t *req)
233933
-{
233933
-    gd1_mgmt_brick_op_req xlator_req = {
233933
-        0,
233933
-    };
233933
-    ssize_t ret;
233933
-    glusterfs_ctx_t *ctx = NULL;
233933
-    gf_volfile_t *volfile_obj = NULL;
233933
-    gf_volfile_t *volfile_tmp = NULL;
233933
-
233933
-    ret = xdr_to_generic(req->msg[0], &xlator_req,
233933
-                         (xdrproc_t)xdr_gd1_mgmt_brick_op_req);
233933
-    if (ret < 0) {
233933
-        req->rpc_err = GARBAGE_ARGS;
233933
-        return -1;
233933
-    }
233933
-    ctx = glusterfsd_ctx;
233933
-
233933
-    LOCK(&ctx->volfile_lock);
233933
-    {
233933
-        list_for_each_entry(volfile_obj, &ctx->volfile_list, volfile_list)
233933
-        {
233933
-            if (!strcmp(xlator_req.name, volfile_obj->vol_id)) {
233933
-                volfile_tmp = volfile_obj;
233933
-                break;
233933
-            }
233933
-        }
233933
-
233933
-        if (!volfile_tmp) {
233933
-            UNLOCK(&ctx->volfile_lock);
233933
-            gf_msg(THIS->name, GF_LOG_ERROR, 0, glusterfsd_msg_41,
233933
-                   "can't detach %s - not found", xlator_req.name);
233933
-            /*
233933
-             * Used to be -ENOENT.  However, the caller asked us to
233933
-             * make sure it's down and if it's already down that's
233933
-             * good enough.
233933
-             */
233933
-            ret = 0;
233933
-            goto out;
233933
-        }
233933
-        ret = glusterfs_process_svc_detach(ctx, volfile_tmp);
233933
-        if (ret) {
233933
-            UNLOCK(&ctx->volfile_lock);
233933
-            gf_msg("glusterfsd-mgmt", GF_LOG_ERROR, EINVAL, glusterfsd_msg_41,
233933
-                   "Could not detach "
233933
-                   "old graph. Aborting the reconfiguration operation");
233933
-            goto out;
233933
-        }
233933
-    }
233933
-    UNLOCK(&ctx->volfile_lock);
233933
-out:
233933
-    glusterfs_terminate_response_send(req, ret);
233933
-    free(xlator_req.name);
233933
-    xlator_req.name = NULL;
233933
-
233933
-    return 0;
233933
-}
233933
-
233933
-int
233933
 glusterfs_handle_dump_metrics(rpcsvc_request_t *req)
233933
 {
233933
     int32_t ret = -1;
233933
@@ -2056,13 +1849,6 @@ rpcsvc_actor_t glusterfs_actors[GLUSTERD_BRICK_MAXVALUE] = {
233933
 
233933
     [GLUSTERD_DUMP_METRICS] = {"DUMP METRICS", GLUSTERD_DUMP_METRICS,
233933
                                glusterfs_handle_dump_metrics, NULL, 0, DRC_NA},
233933
-
233933
-    [GLUSTERD_SVC_ATTACH] = {"ATTACH CLIENT", GLUSTERD_SVC_ATTACH,
233933
-                             glusterfs_handle_svc_attach, NULL, 0, DRC_NA},
233933
-
233933
-    [GLUSTERD_SVC_DETACH] = {"DETACH CLIENT", GLUSTERD_SVC_DETACH,
233933
-                             glusterfs_handle_svc_detach, NULL, 0, DRC_NA},
233933
-
233933
 };
233933
 
233933
 struct rpcsvc_program glusterfs_mop_prog = {
233933
@@ -2210,17 +1996,14 @@ mgmt_getspec_cbk(struct rpc_req *req, struct iovec *iov, int count,
233933
     }
233933
 
233933
 volfile:
233933
+    ret = 0;
233933
     size = rsp.op_ret;
233933
-    volfile_id = frame->local;
233933
-    if (mgmt_is_multiplexed_daemon(ctx->cmd_args.process_name)) {
233933
-        ret = mgmt_process_volfile((const char *)rsp.spec, size, volfile_id);
233933
-        goto post_graph_mgmt;
233933
-    }
233933
 
233933
-    ret = 0;
233933
     glusterfs_compute_sha256((const unsigned char *)rsp.spec, size,
233933
                              sha256_hash);
233933
 
233933
+    volfile_id = frame->local;
233933
+
233933
     LOCK(&ctx->volfile_lock);
233933
     {
233933
         locked = 1;
233933
@@ -2322,7 +2105,6 @@ volfile:
233933
             }
233933
 
233933
             INIT_LIST_HEAD(&volfile_tmp->volfile_list);
233933
-            volfile_tmp->graph = ctx->active;
233933
             list_add(&volfile_tmp->volfile_list, &ctx->volfile_list);
233933
             snprintf(volfile_tmp->vol_id, sizeof(volfile_tmp->vol_id), "%s",
233933
                      volfile_id);
233933
@@ -2334,7 +2116,6 @@ volfile:
233933
 
233933
     locked = 0;
233933
 
233933
-post_graph_mgmt:
233933
     if (!is_mgmt_rpc_reconnect) {
233933
         need_emancipate = 1;
233933
         glusterfs_mgmt_pmap_signin(ctx);
233933
@@ -2488,21 +2269,10 @@ glusterfs_volfile_fetch(glusterfs_ctx_t *ctx)
233933
 {
233933
     xlator_t *server_xl = NULL;
233933
     xlator_list_t *trav;
233933
-    gf_volfile_t *volfile_obj = NULL;
233933
-    int ret = 0;
233933
+    int ret;
233933
 
233933
     LOCK(&ctx->volfile_lock);
233933
     {
233933
-        if (ctx->active &&
233933
-            mgmt_is_multiplexed_daemon(ctx->cmd_args.process_name)) {
233933
-            list_for_each_entry(volfile_obj, &ctx->volfile_list, volfile_list)
233933
-            {
233933
-                ret |= glusterfs_volfile_fetch_one(ctx, volfile_obj->vol_id);
233933
-            }
233933
-            UNLOCK(&ctx->volfile_lock);
233933
-            return ret;
233933
-        }
233933
-
233933
         if (ctx->active) {
233933
             server_xl = ctx->active->first;
233933
             if (strcmp(server_xl->type, "protocol/server") != 0) {
233933
diff --git a/glusterfsd/src/glusterfsd.c b/glusterfsd/src/glusterfsd.c
233933
index 2172af4..5b5e996 100644
233933
--- a/glusterfsd/src/glusterfsd.c
233933
+++ b/glusterfsd/src/glusterfsd.c
233933
@@ -2593,6 +2593,24 @@ out:
233933
 #endif
233933
 
233933
 int
233933
+glusterfs_graph_fini(glusterfs_graph_t *graph)
233933
+{
233933
+    xlator_t *trav = NULL;
233933
+
233933
+    trav = graph->first;
233933
+
233933
+    while (trav) {
233933
+        if (trav->init_succeeded) {
233933
+            trav->fini(trav);
233933
+            trav->init_succeeded = 0;
233933
+        }
233933
+        trav = trav->next;
233933
+    }
233933
+
233933
+    return 0;
233933
+}
233933
+
233933
+int
233933
 glusterfs_process_volfp(glusterfs_ctx_t *ctx, FILE *fp)
233933
 {
233933
     glusterfs_graph_t *graph = NULL;
233933
@@ -2791,7 +2809,7 @@ main(int argc, char *argv[])
233933
 
233933
     /* set brick_mux mode only for server process */
233933
     if ((ctx->process_mode != GF_SERVER_PROCESS) && cmd->brick_mux) {
233933
-        gf_msg("glusterfs", GF_LOG_CRITICAL, 0, glusterfsd_msg_43,
233933
+        gf_msg("glusterfs", GF_LOG_CRITICAL, 0, glusterfsd_msg_40,
233933
                "command line argument --brick-mux is valid only for brick "
233933
                "process");
233933
         goto out;
233933
diff --git a/libglusterfs/src/defaults-tmpl.c b/libglusterfs/src/defaults-tmpl.c
233933
index 82e7f78..5bf64e8 100644
233933
--- a/libglusterfs/src/defaults-tmpl.c
233933
+++ b/libglusterfs/src/defaults-tmpl.c
233933
@@ -127,12 +127,6 @@ default_notify(xlator_t *this, int32_t event, void *data, ...)
233933
     GF_UNUSED int ret = 0;
233933
     xlator_t *victim = data;
233933
 
233933
-    glusterfs_graph_t *graph = NULL;
233933
-
233933
-    GF_VALIDATE_OR_GOTO("notify", this, out);
233933
-    graph = this->graph;
233933
-    GF_VALIDATE_OR_GOTO(this->name, graph, out);
233933
-
233933
     switch (event) {
233933
         case GF_EVENT_PARENT_UP:
233933
         case GF_EVENT_PARENT_DOWN: {
233933
@@ -165,17 +159,6 @@ default_notify(xlator_t *this, int32_t event, void *data, ...)
233933
                     xlator_notify(parent->xlator, event, this, NULL);
233933
                 parent = parent->next;
233933
             }
233933
-
233933
-            if (event == GF_EVENT_CHILD_DOWN &&
233933
-                !(this->ctx && this->ctx->master) && (graph->top == this)) {
233933
-                /* Make sure this is not a daemon with master xlator */
233933
-                pthread_mutex_lock(&graph->mutex);
233933
-                {
233933
-                    graph->used = 0;
233933
-                    pthread_cond_broadcast(&graph->child_down_cond);
233933
-                }
233933
-                pthread_mutex_unlock(&graph->mutex);
233933
-            }
233933
         } break;
233933
         case GF_EVENT_UPCALL: {
233933
             xlator_list_t *parent = this->parents;
233933
@@ -222,7 +205,7 @@ default_notify(xlator_t *this, int32_t event, void *data, ...)
233933
              * nothing to do with readability.
233933
              */
233933
     }
233933
-out:
233933
+
233933
     return 0;
233933
 }
233933
 
233933
diff --git a/libglusterfs/src/glusterfs/glusterfs.h b/libglusterfs/src/glusterfs/glusterfs.h
233933
index 9ec2365..2cedf1a 100644
233933
--- a/libglusterfs/src/glusterfs/glusterfs.h
233933
+++ b/libglusterfs/src/glusterfs/glusterfs.h
233933
@@ -597,10 +597,6 @@ struct _glusterfs_graph {
233933
     int used; /* Should be set when fuse gets
233933
                         first CHILD_UP */
233933
     uint32_t volfile_checksum;
233933
-    void *last_xl; /* Stores the last xl of the graph, as of now only populated
233933
-                      in client multiplexed code path */
233933
-    pthread_mutex_t mutex;
233933
-    pthread_cond_t child_down_cond; /* for broadcasting CHILD_DOWN */
233933
 };
233933
 typedef struct _glusterfs_graph glusterfs_graph_t;
233933
 
233933
@@ -743,7 +739,6 @@ typedef struct {
233933
     char volfile_checksum[SHA256_DIGEST_LENGTH];
233933
     char vol_id[NAME_MAX + 1];
233933
     struct list_head volfile_list;
233933
-    glusterfs_graph_t *graph;
233933
 
233933
 } gf_volfile_t;
233933
 
233933
@@ -827,6 +822,4 @@ gf_free_mig_locks(lock_migration_info_t *locks);
233933
 
233933
 int
233933
 glusterfs_read_secure_access_file(void);
233933
-int
233933
-glusterfs_graph_fini(glusterfs_graph_t *graph);
233933
 #endif /* _GLUSTERFS_H */
233933
diff --git a/libglusterfs/src/glusterfs/libglusterfs-messages.h b/libglusterfs/src/glusterfs/libglusterfs-messages.h
233933
index ea2aa60..1b72f6d 100644
233933
--- a/libglusterfs/src/glusterfs/libglusterfs-messages.h
233933
+++ b/libglusterfs/src/glusterfs/libglusterfs-messages.h
233933
@@ -109,8 +109,6 @@ GLFS_MSGID(
233933
     LG_MSG_PTHREAD_ATTR_INIT_FAILED, LG_MSG_INVALID_INODE_LIST,
233933
     LG_MSG_COMPACT_FAILED, LG_MSG_COMPACT_STATUS, LG_MSG_UTIMENSAT_FAILED,
233933
     LG_MSG_PTHREAD_NAMING_FAILED, LG_MSG_SYSCALL_RETURNS_WRONG,
233933
-    LG_MSG_XXH64_TO_GFID_FAILED, LG_MSG_ASYNC_WARNING, LG_MSG_ASYNC_FAILURE,
233933
-    LG_MSG_GRAPH_CLEANUP_FAILED, LG_MSG_GRAPH_SETUP_FAILED,
233933
-    LG_MSG_GRAPH_DETACH_STARTED, LG_MSG_GRAPH_ATTACH_FAILED);
233933
+    LG_MSG_XXH64_TO_GFID_FAILED);
233933
 
233933
 #endif /* !_LG_MESSAGES_H_ */
233933
diff --git a/libglusterfs/src/glusterfs/xlator.h b/libglusterfs/src/glusterfs/xlator.h
233933
index 8998976..b78daad 100644
233933
--- a/libglusterfs/src/glusterfs/xlator.h
233933
+++ b/libglusterfs/src/glusterfs/xlator.h
233933
@@ -1089,7 +1089,4 @@ handle_default_options(xlator_t *xl, dict_t *options);
233933
 
233933
 void
233933
 gluster_graph_take_reference(xlator_t *tree);
233933
-
233933
-gf_boolean_t
233933
-mgmt_is_multiplexed_daemon(char *name);
233933
 #endif /* _XLATOR_H */
233933
diff --git a/libglusterfs/src/graph.c b/libglusterfs/src/graph.c
233933
index a492dd8..bb5e67a 100644
233933
--- a/libglusterfs/src/graph.c
233933
+++ b/libglusterfs/src/graph.c
233933
@@ -114,53 +114,6 @@ out:
233933
     return cert_depth;
233933
 }
233933
 
233933
-xlator_t *
233933
-glusterfs_get_last_xlator(glusterfs_graph_t *graph)
233933
-{
233933
-    xlator_t *trav = graph->first;
233933
-    if (!trav)
233933
-        return NULL;
233933
-
233933
-    while (trav->next)
233933
-        trav = trav->next;
233933
-
233933
-    return trav;
233933
-}
233933
-
233933
-xlator_t *
233933
-glusterfs_mux_xlator_unlink(xlator_t *pxl, xlator_t *cxl)
233933
-{
233933
-    xlator_list_t *unlink = NULL;
233933
-    xlator_list_t *prev = NULL;
233933
-    xlator_list_t **tmp = NULL;
233933
-    xlator_t *next_child = NULL;
233933
-    xlator_t *xl = NULL;
233933
-
233933
-    for (tmp = &pxl->children; *tmp; tmp = &(*tmp)->next) {
233933
-        if ((*tmp)->xlator == cxl) {
233933
-            unlink = *tmp;
233933
-            *tmp = (*tmp)->next;
233933
-            if (*tmp)
233933
-                next_child = (*tmp)->xlator;
233933
-            break;
233933
-        }
233933
-        prev = *tmp;
233933
-    }
233933
-
233933
-    if (!prev)
233933
-        xl = pxl;
233933
-    else if (prev->xlator)
233933
-        xl = prev->xlator->graph->last_xl;
233933
-
233933
-    if (xl)
233933
-        xl->next = next_child;
233933
-    if (next_child)
233933
-        next_child->prev = xl;
233933
-
233933
-    GF_FREE(unlink);
233933
-    return next_child;
233933
-}
233933
-
233933
 int
233933
 glusterfs_xlator_link(xlator_t *pxl, xlator_t *cxl)
233933
 {
233933
@@ -1139,8 +1092,6 @@ glusterfs_graph_destroy_residual(glusterfs_graph_t *graph)
233933
     ret = xlator_tree_free_memacct(graph->first);
233933
 
233933
     list_del_init(&graph->list);
233933
-    pthread_mutex_destroy(&graph->mutex);
233933
-    pthread_cond_destroy(&graph->child_down_cond);
233933
     GF_FREE(graph);
233933
 
233933
     return ret;
233933
@@ -1183,25 +1134,6 @@ out:
233933
 }
233933
 
233933
 int
233933
-glusterfs_graph_fini(glusterfs_graph_t *graph)
233933
-{
233933
-    xlator_t *trav = NULL;
233933
-
233933
-    trav = graph->first;
233933
-
233933
-    while (trav) {
233933
-        if (trav->init_succeeded) {
233933
-            trav->cleanup_starting = 1;
233933
-            trav->fini(trav);
233933
-            trav->init_succeeded = 0;
233933
-        }
233933
-        trav = trav->next;
233933
-    }
233933
-
233933
-    return 0;
233933
-}
233933
-
233933
-int
233933
 glusterfs_graph_attach(glusterfs_graph_t *orig_graph, char *path,
233933
                        glusterfs_graph_t **newgraph)
233933
 {
233933
@@ -1324,386 +1256,3 @@ glusterfs_graph_attach(glusterfs_graph_t *orig_graph, char *path,
233933
 
233933
     return 0;
233933
 }
233933
-int
233933
-glusterfs_muxsvc_cleanup_parent(glusterfs_ctx_t *ctx,
233933
-                                glusterfs_graph_t *parent_graph)
233933
-{
233933
-    if (parent_graph) {
233933
-        if (parent_graph->first) {
233933
-            xlator_destroy(parent_graph->first);
233933
-        }
233933
-        ctx->active = NULL;
233933
-        GF_FREE(parent_graph);
233933
-        parent_graph = NULL;
233933
-    }
233933
-    return 0;
233933
-}
233933
-
233933
-void *
233933
-glusterfs_graph_cleanup(void *arg)
233933
-{
233933
-    glusterfs_graph_t *graph = NULL;
233933
-    glusterfs_ctx_t *ctx = THIS->ctx;
233933
-    int ret = -1;
233933
-    graph = arg;
233933
-
233933
-    if (!graph)
233933
-        return NULL;
233933
-
233933
-    /* To destroy the graph, fitst sent a GF_EVENT_PARENT_DOWN
233933
-     * Then wait for GF_EVENT_CHILD_DOWN to get on the top
233933
-     * xl. Once we have GF_EVENT_CHILD_DOWN event, then proceed
233933
-     * to fini.
233933
-     *
233933
-     * During fini call, this will take a last unref on rpc and
233933
-     * rpc_transport_object.
233933
-     */
233933
-    if (graph->first)
233933
-        default_notify(graph->first, GF_EVENT_PARENT_DOWN, graph->first);
233933
-
233933
-    ret = pthread_mutex_lock(&graph->mutex);
233933
-    if (ret != 0) {
233933
-        gf_msg("glusterfs", GF_LOG_ERROR, EAGAIN, LG_MSG_GRAPH_CLEANUP_FAILED,
233933
-               "Failed to aquire a lock");
233933
-        goto out;
233933
-    }
233933
-    /* check and wait for CHILD_DOWN for top xlator*/
233933
-    while (graph->used) {
233933
-        ret = pthread_cond_wait(&graph->child_down_cond, &graph->mutex);
233933
-        if (ret != 0)
233933
-            gf_msg("glusterfs", GF_LOG_INFO, 0, LG_MSG_GRAPH_CLEANUP_FAILED,
233933
-                   "cond wait failed ");
233933
-    }
233933
-
233933
-    ret = pthread_mutex_unlock(&graph->mutex);
233933
-    if (ret != 0) {
233933
-        gf_msg("glusterfs", GF_LOG_ERROR, EAGAIN, LG_MSG_GRAPH_CLEANUP_FAILED,
233933
-               "Failed to release a lock");
233933
-    }
233933
-
233933
-    /* Though we got a child down on top xlator, we have to wait until
233933
-     * all the notifier to exit. Because there should not be any threads
233933
-     * that access xl variables.
233933
-     */
233933
-    pthread_mutex_lock(&ctx->notify_lock);
233933
-    {
233933
-        while (ctx->notifying)
233933
-            pthread_cond_wait(&ctx->notify_cond, &ctx->notify_lock);
233933
-    }
233933
-    pthread_mutex_unlock(&ctx->notify_lock);
233933
-
233933
-    glusterfs_graph_fini(graph);
233933
-    glusterfs_graph_destroy(graph);
233933
-out:
233933
-    return NULL;
233933
-}
233933
-
233933
-glusterfs_graph_t *
233933
-glusterfs_muxsvc_setup_parent_graph(glusterfs_ctx_t *ctx, char *name,
233933
-                                    char *type)
233933
-{
233933
-    glusterfs_graph_t *parent_graph = NULL;
233933
-    xlator_t *ixl = NULL;
233933
-    int ret = -1;
233933
-    parent_graph = GF_CALLOC(1, sizeof(*parent_graph),
233933
-                             gf_common_mt_glusterfs_graph_t);
233933
-    if (!parent_graph)
233933
-        goto out;
233933
-
233933
-    INIT_LIST_HEAD(&parent_graph->list);
233933
-
233933
-    ctx->active = parent_graph;
233933
-    ixl = GF_CALLOC(1, sizeof(*ixl), gf_common_mt_xlator_t);
233933
-    if (!ixl)
233933
-        goto out;
233933
-
233933
-    ixl->ctx = ctx;
233933
-    ixl->graph = parent_graph;
233933
-    ixl->options = dict_new();
233933
-    if (!ixl->options)
233933
-        goto out;
233933
-
233933
-    ixl->name = gf_strdup(name);
233933
-    if (!ixl->name)
233933
-        goto out;
233933
-
233933
-    ixl->is_autoloaded = 1;
233933
-
233933
-    if (xlator_set_type(ixl, type) == -1) {
233933
-        gf_msg("glusterfs", GF_LOG_ERROR, EINVAL, LG_MSG_GRAPH_SETUP_FAILED,
233933
-               "%s (%s) set type failed", name, type);
233933
-        goto out;
233933
-    }
233933
-
233933
-    glusterfs_graph_set_first(parent_graph, ixl);
233933
-    parent_graph->top = ixl;
233933
-    ixl = NULL;
233933
-
233933
-    gettimeofday(&parent_graph->dob, NULL);
233933
-    fill_uuid(parent_graph->graph_uuid, 128);
233933
-    parent_graph->id = ctx->graph_id++;
233933
-    ret = 0;
233933
-out:
233933
-    if (ixl)
233933
-        xlator_destroy(ixl);
233933
-
233933
-    if (ret) {
233933
-        glusterfs_muxsvc_cleanup_parent(ctx, parent_graph);
233933
-        parent_graph = NULL;
233933
-    }
233933
-    return parent_graph;
233933
-}
233933
-
233933
-int
233933
-glusterfs_process_svc_detach(glusterfs_ctx_t *ctx, gf_volfile_t *volfile_obj)
233933
-{
233933
-    xlator_t *last_xl = NULL;
233933
-    glusterfs_graph_t *graph = NULL;
233933
-    glusterfs_graph_t *parent_graph = NULL;
233933
-    pthread_t clean_graph = {
233933
-        0,
233933
-    };
233933
-    int ret = -1;
233933
-    xlator_t *xl = NULL;
233933
-
233933
-    if (!ctx || !ctx->active || !volfile_obj)
233933
-        goto out;
233933
-    parent_graph = ctx->active;
233933
-    graph = volfile_obj->graph;
233933
-    if (graph && graph->first)
233933
-        xl = graph->first;
233933
-
233933
-    last_xl = graph->last_xl;
233933
-    if (last_xl)
233933
-        last_xl->next = NULL;
233933
-    if (!xl || xl->cleanup_starting)
233933
-        goto out;
233933
-
233933
-    xl->cleanup_starting = 1;
233933
-    gf_msg("mgmt", GF_LOG_INFO, 0, LG_MSG_GRAPH_DETACH_STARTED,
233933
-           "detaching child %s", volfile_obj->vol_id);
233933
-
233933
-    list_del_init(&volfile_obj->volfile_list);
233933
-    glusterfs_mux_xlator_unlink(parent_graph->top, xl);
233933
-    parent_graph->last_xl = glusterfs_get_last_xlator(parent_graph);
233933
-    parent_graph->xl_count -= graph->xl_count;
233933
-    parent_graph->leaf_count -= graph->leaf_count;
233933
-    default_notify(xl, GF_EVENT_PARENT_DOWN, xl);
233933
-    parent_graph->id++;
233933
-    ret = 0;
233933
-out:
233933
-    if (!ret) {
233933
-        list_del_init(&volfile_obj->volfile_list);
233933
-        if (graph) {
233933
-            ret = gf_thread_create_detached(
233933
-                &clean_graph, glusterfs_graph_cleanup, graph, "graph_clean");
233933
-            if (ret) {
233933
-                gf_msg("glusterfs", GF_LOG_ERROR, EINVAL,
233933
-                       LG_MSG_GRAPH_CLEANUP_FAILED,
233933
-                       "%s failed to create clean "
233933
-                       "up thread",
233933
-                       volfile_obj->vol_id);
233933
-                ret = 0;
233933
-            }
233933
-        }
233933
-        GF_FREE(volfile_obj);
233933
-    }
233933
-    return ret;
233933
-}
233933
-
233933
-int
233933
-glusterfs_process_svc_attach_volfp(glusterfs_ctx_t *ctx, FILE *fp,
233933
-                                   char *volfile_id, char *checksum)
233933
-{
233933
-    glusterfs_graph_t *graph = NULL;
233933
-    glusterfs_graph_t *parent_graph = NULL;
233933
-    glusterfs_graph_t *clean_graph = NULL;
233933
-    int ret = -1;
233933
-    xlator_t *xl = NULL;
233933
-    xlator_t *last_xl = NULL;
233933
-    gf_volfile_t *volfile_obj = NULL;
233933
-    pthread_t thread_id = {
233933
-        0,
233933
-    };
233933
-
233933
-    if (!ctx)
233933
-        goto out;
233933
-    parent_graph = ctx->active;
233933
-    graph = glusterfs_graph_construct(fp);
233933
-    if (!graph) {
233933
-        gf_msg("glusterfsd", GF_LOG_ERROR, EINVAL, LG_MSG_GRAPH_ATTACH_FAILED,
233933
-               "failed to construct the graph");
233933
-        goto out;
233933
-    }
233933
-    graph->last_xl = glusterfs_get_last_xlator(graph);
233933
-
233933
-    for (xl = graph->first; xl; xl = xl->next) {
233933
-        if (strcmp(xl->type, "mount/fuse") == 0) {
233933
-            gf_msg("glusterfsd", GF_LOG_ERROR, EINVAL,
233933
-                   LG_MSG_GRAPH_ATTACH_FAILED,
233933
-                   "fuse xlator cannot be specified in volume file");
233933
-            goto out;
233933
-        }
233933
-    }
233933
-
233933
-    graph->leaf_count = glusterfs_count_leaves(glusterfs_root(graph));
233933
-    xl = graph->first;
233933
-    /* TODO memory leaks everywhere need to free graph in case of error */
233933
-    if (glusterfs_graph_prepare(graph, ctx, xl->name)) {
233933
-        gf_msg("glusterfsd", GF_LOG_WARNING, EINVAL, LG_MSG_GRAPH_ATTACH_FAILED,
233933
-               "failed to prepare graph for xlator %s", xl->name);
233933
-        ret = -1;
233933
-        goto out;
233933
-    } else if (glusterfs_graph_init(graph)) {
233933
-        gf_msg("glusterfsd", GF_LOG_WARNING, EINVAL, LG_MSG_GRAPH_ATTACH_FAILED,
233933
-               "failed to initialize graph for xlator %s", xl->name);
233933
-        ret = -1;
233933
-        goto out;
233933
-    } else if (glusterfs_graph_parent_up(graph)) {
233933
-        gf_msg("glusterfsd", GF_LOG_WARNING, EINVAL, LG_MSG_GRAPH_ATTACH_FAILED,
233933
-               "failed to link the graphs for xlator %s ", xl->name);
233933
-        ret = -1;
233933
-        goto out;
233933
-    }
233933
-
233933
-    if (!parent_graph) {
233933
-        parent_graph = glusterfs_muxsvc_setup_parent_graph(ctx, "glustershd",
233933
-                                                           "debug/io-stats");
233933
-        if (!parent_graph)
233933
-            goto out;
233933
-        ((xlator_t *)parent_graph->top)->next = xl;
233933
-        clean_graph = parent_graph;
233933
-    } else {
233933
-        last_xl = parent_graph->last_xl;
233933
-        if (last_xl)
233933
-            last_xl->next = xl;
233933
-        xl->prev = last_xl;
233933
-    }
233933
-    parent_graph->last_xl = graph->last_xl;
233933
-
233933
-    ret = glusterfs_xlator_link(parent_graph->top, xl);
233933
-    if (ret) {
233933
-        gf_msg("graph", GF_LOG_ERROR, 0, LG_MSG_EVENT_NOTIFY_FAILED,
233933
-               "parent up notification failed");
233933
-        goto out;
233933
-    }
233933
-    parent_graph->xl_count += graph->xl_count;
233933
-    parent_graph->leaf_count += graph->leaf_count;
233933
-    parent_graph->id++;
233933
-
233933
-    if (!volfile_obj) {
233933
-        volfile_obj = GF_CALLOC(1, sizeof(gf_volfile_t), gf_common_volfile_t);
233933
-        if (!volfile_obj) {
233933
-            ret = -1;
233933
-            goto out;
233933
-        }
233933
-    }
233933
-
233933
-    graph->used = 1;
233933
-    parent_graph->id++;
233933
-    list_add(&graph->list, &ctx->graphs);
233933
-    INIT_LIST_HEAD(&volfile_obj->volfile_list);
233933
-    volfile_obj->graph = graph;
233933
-    snprintf(volfile_obj->vol_id, sizeof(volfile_obj->vol_id), "%s",
233933
-             volfile_id);
233933
-    memcpy(volfile_obj->volfile_checksum, checksum,
233933
-           sizeof(volfile_obj->volfile_checksum));
233933
-    list_add_tail(&volfile_obj->volfile_list, &ctx->volfile_list);
233933
-
233933
-    gf_log_dump_graph(fp, graph);
233933
-    graph = NULL;
233933
-
233933
-    ret = 0;
233933
-out:
233933
-    if (ret) {
233933
-        if (graph) {
233933
-            gluster_graph_take_reference(graph->first);
233933
-            ret = gf_thread_create_detached(&thread_id, glusterfs_graph_cleanup,
233933
-                                            graph, "graph_clean");
233933
-            if (ret) {
233933
-                gf_msg("glusterfs", GF_LOG_ERROR, EINVAL,
233933
-                       LG_MSG_GRAPH_CLEANUP_FAILED,
233933
-                       "%s failed to create clean "
233933
-                       "up thread",
233933
-                       volfile_id);
233933
-                ret = 0;
233933
-            }
233933
-        }
233933
-        if (clean_graph)
233933
-            glusterfs_muxsvc_cleanup_parent(ctx, clean_graph);
233933
-    }
233933
-    return ret;
233933
-}
233933
-
233933
-int
233933
-glusterfs_mux_volfile_reconfigure(FILE *newvolfile_fp, glusterfs_ctx_t *ctx,
233933
-                                  gf_volfile_t *volfile_obj, char *checksum)
233933
-{
233933
-    glusterfs_graph_t *oldvolfile_graph = NULL;
233933
-    glusterfs_graph_t *newvolfile_graph = NULL;
233933
-
233933
-    int ret = -1;
233933
-
233933
-    if (!ctx) {
233933
-        gf_msg("glusterfsd-mgmt", GF_LOG_ERROR, 0, LG_MSG_CTX_NULL,
233933
-               "ctx is NULL");
233933
-        goto out;
233933
-    }
233933
-
233933
-    /* Change the message id */
233933
-    if (!volfile_obj) {
233933
-        gf_msg("glusterfsd-mgmt", GF_LOG_ERROR, 0, LG_MSG_CTX_NULL,
233933
-               "failed to get volfile object");
233933
-        goto out;
233933
-    }
233933
-
233933
-    oldvolfile_graph = volfile_obj->graph;
233933
-    if (!oldvolfile_graph) {
233933
-        goto out;
233933
-    }
233933
-
233933
-    newvolfile_graph = glusterfs_graph_construct(newvolfile_fp);
233933
-
233933
-    if (!newvolfile_graph) {
233933
-        goto out;
233933
-    }
233933
-    newvolfile_graph->last_xl = glusterfs_get_last_xlator(newvolfile_graph);
233933
-
233933
-    glusterfs_graph_prepare(newvolfile_graph, ctx, newvolfile_graph->first);
233933
-
233933
-    if (!is_graph_topology_equal(oldvolfile_graph, newvolfile_graph)) {
233933
-        ret = glusterfs_process_svc_detach(ctx, volfile_obj);
233933
-        if (ret) {
233933
-            gf_msg("glusterfsd-mgmt", GF_LOG_ERROR, EINVAL,
233933
-                   LG_MSG_GRAPH_CLEANUP_FAILED,
233933
-                   "Could not detach "
233933
-                   "old graph. Aborting the reconfiguration operation");
233933
-            goto out;
233933
-        }
233933
-        ret = glusterfs_process_svc_attach_volfp(ctx, newvolfile_fp,
233933
-                                                 volfile_obj->vol_id, checksum);
233933
-        goto out;
233933
-    }
233933
-
233933
-    gf_msg_debug("glusterfsd-mgmt", 0,
233933
-                 "Only options have changed in the"
233933
-                 " new graph");
233933
-
233933
-    ret = glusterfs_graph_reconfigure(oldvolfile_graph, newvolfile_graph);
233933
-    if (ret) {
233933
-        gf_msg_debug("glusterfsd-mgmt", 0,
233933
-                     "Could not reconfigure "
233933
-                     "new options in old graph");
233933
-        goto out;
233933
-    }
233933
-    memcpy(volfile_obj->volfile_checksum, checksum,
233933
-           sizeof(volfile_obj->volfile_checksum));
233933
-
233933
-    ret = 0;
233933
-out:
233933
-
233933
-    if (newvolfile_graph)
233933
-        glusterfs_graph_destroy(newvolfile_graph);
233933
-
233933
-    return ret;
233933
-}
233933
diff --git a/libglusterfs/src/graph.y b/libglusterfs/src/graph.y
233933
index c60ff38..5b92985 100644
233933
--- a/libglusterfs/src/graph.y
233933
+++ b/libglusterfs/src/graph.y
233933
@@ -542,9 +542,6 @@ glusterfs_graph_new ()
233933
 
233933
         INIT_LIST_HEAD (&graph->list);
233933
 
233933
-        pthread_mutex_init(&graph->mutex, NULL);
233933
-        pthread_cond_init(&graph->child_down_cond, NULL);
233933
-
233933
         gettimeofday (&graph->dob, NULL);
233933
 
233933
         return graph;
233933
diff --git a/libglusterfs/src/libglusterfs.sym b/libglusterfs/src/libglusterfs.sym
233933
index 05f93b4..4dca7de 100644
233933
--- a/libglusterfs/src/libglusterfs.sym
233933
+++ b/libglusterfs/src/libglusterfs.sym
233933
@@ -1155,8 +1155,3 @@ gf_changelog_register_generic
233933
 gf_gfid_generate_from_xxh64
233933
 find_xlator_option_in_cmd_args_t
233933
 gf_d_type_from_ia_type
233933
-glusterfs_graph_fini
233933
-glusterfs_process_svc_attach_volfp
233933
-glusterfs_mux_volfile_reconfigure
233933
-glusterfs_process_svc_detach
233933
-mgmt_is_multiplexed_daemon
233933
diff --git a/libglusterfs/src/statedump.c b/libglusterfs/src/statedump.c
233933
index 0cf80c0..d18b50f 100644
233933
--- a/libglusterfs/src/statedump.c
233933
+++ b/libglusterfs/src/statedump.c
233933
@@ -810,8 +810,7 @@ gf_proc_dump_info(int signum, glusterfs_ctx_t *ctx)
233933
     if (!ctx)
233933
         goto out;
233933
 
233933
-    if (!mgmt_is_multiplexed_daemon(ctx->cmd_args.process_name) &&
233933
-        (ctx && ctx->active)) {
233933
+    if (ctx && ctx->active) {
233933
         top = ctx->active->first;
233933
         for (trav_p = &top->children; *trav_p; trav_p = &(*trav_p)->next) {
233933
             brick_count++;
233933
diff --git a/libglusterfs/src/xlator.c b/libglusterfs/src/xlator.c
233933
index 022c3ed..6bd4f09 100644
233933
--- a/libglusterfs/src/xlator.c
233933
+++ b/libglusterfs/src/xlator.c
233933
@@ -1470,19 +1470,3 @@ gluster_graph_take_reference(xlator_t *tree)
233933
     }
233933
     return;
233933
 }
233933
-
233933
-gf_boolean_t
233933
-mgmt_is_multiplexed_daemon(char *name)
233933
-{
233933
-    const char *mux_daemons[] = {"glustershd", NULL};
233933
-    int i;
233933
-
233933
-    if (!name)
233933
-        return _gf_false;
233933
-
233933
-    for (i = 0; mux_daemons[i]; i++) {
233933
-        if (!strcmp(name, mux_daemons[i]))
233933
-            return _gf_true;
233933
-    }
233933
-    return _gf_false;
233933
-}
233933
diff --git a/rpc/rpc-lib/src/protocol-common.h b/rpc/rpc-lib/src/protocol-common.h
233933
index 7275d75..779878f 100644
233933
--- a/rpc/rpc-lib/src/protocol-common.h
233933
+++ b/rpc/rpc-lib/src/protocol-common.h
233933
@@ -245,8 +245,6 @@ enum glusterd_brick_procnum {
233933
     GLUSTERD_NODE_BITROT,
233933
     GLUSTERD_BRICK_ATTACH,
233933
     GLUSTERD_DUMP_METRICS,
233933
-    GLUSTERD_SVC_ATTACH,
233933
-    GLUSTERD_SVC_DETACH,
233933
     GLUSTERD_BRICK_MAXVALUE,
233933
 };
233933
 
233933
diff --git a/tests/basic/glusterd/heald.t b/tests/basic/glusterd/heald.t
233933
index 7dae3c3..ca112ad 100644
233933
--- a/tests/basic/glusterd/heald.t
233933
+++ b/tests/basic/glusterd/heald.t
233933
@@ -7,16 +7,11 @@
233933
 # Covers enable/disable at the moment. Will be enhanced later to include
233933
 # the other commands as well.
233933
 
233933
-function is_pid_running {
233933
-    local pid=$1
233933
-    num=`ps auxww | grep glustershd | grep $pid | grep -v grep | wc -l`
233933
-    echo $num
233933
-}
233933
-
233933
 cleanup;
233933
 TEST glusterd
233933
 TEST pidof glusterd
233933
 
233933
+volfile=$(gluster system:: getwd)"/glustershd/glustershd-server.vol"
233933
 #Commands should fail when volume doesn't exist
233933
 TEST ! $CLI volume heal non-existent-volume enable
233933
 TEST ! $CLI volume heal non-existent-volume disable
233933
@@ -25,55 +20,51 @@ TEST ! $CLI volume heal non-existent-volume disable
233933
 # volumes
233933
 TEST $CLI volume create dist $H0:$B0/dist
233933
 TEST $CLI volume start dist
233933
-TEST "[ -z $(get_shd_process_pid dist)]"
233933
+TEST "[ -z $(get_shd_process_pid)]"
233933
 TEST ! $CLI volume heal dist enable
233933
 TEST ! $CLI volume heal dist disable
233933
 
233933
 # Commands should work on replicate/disperse volume.
233933
 TEST $CLI volume create r2 replica 2 $H0:$B0/r2_0 $H0:$B0/r2_1
233933
-TEST "[ -z $(get_shd_process_pid r2)]"
233933
+TEST "[ -z $(get_shd_process_pid)]"
233933
 TEST $CLI volume start r2
233933
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "[0-9][0-9]*" get_shd_process_pid r2
233933
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "[0-9][0-9]*" get_shd_process_pid
233933
 TEST $CLI volume heal r2 enable
233933
 EXPECT "enable" volume_option r2 "cluster.self-heal-daemon"
233933
-volfiler2=$(gluster system:: getwd)"/vols/r2/r2-shd.vol"
233933
-EXPECT "enable" volgen_volume_option $volfiler2 r2-replicate-0 cluster replicate self-heal-daemon
233933
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "[0-9][0-9]*" get_shd_process_pid r2
233933
-pid=$( get_shd_process_pid r2 )
233933
+EXPECT "enable" volgen_volume_option $volfile r2-replicate-0 cluster replicate self-heal-daemon
233933
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "[0-9][0-9]*" get_shd_process_pid
233933
 TEST $CLI volume heal r2 disable
233933
 EXPECT "disable" volume_option r2 "cluster.self-heal-daemon"
233933
-EXPECT "disable" volgen_volume_option $volfiler2 r2-replicate-0 cluster replicate self-heal-daemon
233933
-EXPECT "1" is_pid_running $pid
233933
+EXPECT "disable" volgen_volume_option $volfile r2-replicate-0 cluster replicate self-heal-daemon
233933
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "[0-9][0-9]*" get_shd_process_pid
233933
 
233933
 # Commands should work on disperse volume.
233933
 TEST $CLI volume create ec2 disperse 3 redundancy 1 $H0:$B0/ec2_0 $H0:$B0/ec2_1 $H0:$B0/ec2_2
233933
 TEST $CLI volume start ec2
233933
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "[0-9][0-9]*" get_shd_process_pid ec2
233933
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "[0-9][0-9]*" get_shd_process_pid
233933
 TEST $CLI volume heal ec2 enable
233933
 EXPECT "enable" volume_option ec2 "cluster.disperse-self-heal-daemon"
233933
-volfileec2=$(gluster system:: getwd)"/vols/ec2/ec2-shd.vol"
233933
-EXPECT "enable" volgen_volume_option $volfileec2 ec2-disperse-0 cluster disperse self-heal-daemon
233933
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "[0-9][0-9]*" get_shd_process_pid ec2
233933
-pid=$(get_shd_process_pid ec2)
233933
+EXPECT "enable" volgen_volume_option $volfile ec2-disperse-0 cluster disperse self-heal-daemon
233933
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "[0-9][0-9]*" get_shd_process_pid
233933
 TEST $CLI volume heal ec2 disable
233933
 EXPECT "disable" volume_option ec2 "cluster.disperse-self-heal-daemon"
233933
-EXPECT "disable" volgen_volume_option $volfileec2 ec2-disperse-0 cluster disperse self-heal-daemon
233933
-EXPECT "1" is_pid_running $pid
233933
+EXPECT "disable" volgen_volume_option $volfile ec2-disperse-0 cluster disperse self-heal-daemon
233933
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "[0-9][0-9]*" get_shd_process_pid
233933
 
233933
 #Check that shd graph is rewritten correctly on volume stop/start
233933
-EXPECT "Y" volgen_volume_exists $volfileec2 ec2-disperse-0 cluster disperse
233933
-
233933
-EXPECT "Y" volgen_volume_exists $volfiler2 r2-replicate-0 cluster replicate
233933
+EXPECT "Y" volgen_volume_exists $volfile ec2-disperse-0 cluster disperse
233933
+EXPECT "Y" volgen_volume_exists $volfile r2-replicate-0 cluster replicate
233933
 TEST $CLI volume stop r2
233933
-EXPECT "Y" volgen_volume_exists $volfileec2 ec2-disperse-0 cluster disperse
233933
+EXPECT "Y" volgen_volume_exists $volfile ec2-disperse-0 cluster disperse
233933
+EXPECT "N" volgen_volume_exists $volfile r2-replicate-0 cluster replicate
233933
 TEST $CLI volume stop ec2
233933
 # When both the volumes are stopped glustershd volfile is not modified just the
233933
 # process is stopped
233933
-TEST "[ -z $(get_shd_process_pid dist) ]"
233933
-TEST "[ -z $(get_shd_process_pid ec2) ]"
233933
+TEST "[ -z $(get_shd_process_pid) ]"
233933
 
233933
 TEST $CLI volume start r2
233933
-EXPECT "Y" volgen_volume_exists $volfiler2 r2-replicate-0 cluster replicate
233933
+EXPECT "N" volgen_volume_exists $volfile ec2-disperse-0 cluster disperse
233933
+EXPECT "Y" volgen_volume_exists $volfile r2-replicate-0 cluster replicate
233933
 
233933
 TEST $CLI volume set r2 self-heal-daemon on
233933
 TEST $CLI volume set r2 cluster.self-heal-daemon off
233933
diff --git a/tests/bugs/glusterd/reset-brick-and-daemons-follow-quorum.t b/tests/bugs/glusterd/reset-brick-and-daemons-follow-quorum.t
233933
index e6e65c4..cdb1a33 100644
233933
--- a/tests/bugs/glusterd/reset-brick-and-daemons-follow-quorum.t
233933
+++ b/tests/bugs/glusterd/reset-brick-and-daemons-follow-quorum.t
233933
@@ -55,9 +55,9 @@ TEST kill_glusterd 1
233933
 #Bring back 1st glusterd
233933
 TEST $glusterd_1
233933
 
233933
-# We need to wait till PROCESS_UP_TIMEOUT and then check shd service started
233933
-#on node 2, because once glusterd regains quorum, it will restart all volume
233933
-#level daemons
233933
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" shd_up_status_2
233933
+# We need to wait till PROCESS_UP_TIMEOUT and then check shd service does not
233933
+# come up on node 2
233933
+sleep $PROCESS_UP_TIMEOUT
233933
+EXPECT "N" shd_up_status_2
233933
 
233933
 cleanup;
233933
diff --git a/tests/volume.rc b/tests/volume.rc
233933
index 022d972..76a8fd4 100644
233933
--- a/tests/volume.rc
233933
+++ b/tests/volume.rc
233933
@@ -237,13 +237,11 @@ function ec_child_up_count_shd {
233933
 }
233933
 
233933
 function get_shd_process_pid {
233933
-        local vol=$1
233933
-        ps auxww | grep "process-name\ glustershd" | awk '{print $2}' | head -1
233933
+        ps auxww | grep glusterfs | grep -E "glustershd/glustershd.pid" | awk '{print $2}' | head -1
233933
 }
233933
 
233933
 function generate_shd_statedump {
233933
-        local vol=$1
233933
-        generate_statedump $(get_shd_process_pid $vol)
233933
+        generate_statedump $(get_shd_process_pid)
233933
 }
233933
 
233933
 function generate_nfs_statedump {
233933
diff --git a/xlators/mgmt/glusterd/src/Makefile.am b/xlators/mgmt/glusterd/src/Makefile.am
233933
index 11ae189..5fe5156 100644
233933
--- a/xlators/mgmt/glusterd/src/Makefile.am
233933
+++ b/xlators/mgmt/glusterd/src/Makefile.am
233933
@@ -18,12 +18,11 @@ glusterd_la_SOURCES = glusterd.c glusterd-handler.c glusterd-sm.c \
233933
 	glusterd-locks.c glusterd-snapshot.c glusterd-mgmt-handler.c \
233933
 	glusterd-mgmt.c glusterd-peer-utils.c glusterd-statedump.c \
233933
 	glusterd-snapshot-utils.c glusterd-conn-mgmt.c \
233933
-	glusterd-proc-mgmt.c glusterd-svc-mgmt.c \
233933
+	glusterd-proc-mgmt.c glusterd-svc-mgmt.c glusterd-shd-svc.c \
233933
 	glusterd-nfs-svc.c glusterd-quotad-svc.c glusterd-svc-helper.c \
233933
 	glusterd-conn-helper.c glusterd-snapd-svc.c glusterd-snapd-svc-helper.c \
233933
 	glusterd-bitd-svc.c glusterd-scrub-svc.c glusterd-server-quorum.c \
233933
 	glusterd-reset-brick.c glusterd-tierd-svc.c glusterd-tierd-svc-helper.c \
233933
-        glusterd-shd-svc.c glusterd-shd-svc-helper.c \
233933
         glusterd-gfproxyd-svc.c glusterd-gfproxyd-svc-helper.c
233933
 
233933
 
233933
@@ -39,12 +38,11 @@ noinst_HEADERS = glusterd.h glusterd-utils.h glusterd-op-sm.h \
233933
 	glusterd-mgmt.h glusterd-messages.h glusterd-peer-utils.h \
233933
 	glusterd-statedump.h glusterd-snapshot-utils.h glusterd-geo-rep.h \
233933
 	glusterd-conn-mgmt.h glusterd-conn-helper.h glusterd-proc-mgmt.h \
233933
-	glusterd-svc-mgmt.h glusterd-nfs-svc.h \
233933
+	glusterd-svc-mgmt.h glusterd-shd-svc.h glusterd-nfs-svc.h \
233933
 	glusterd-quotad-svc.h glusterd-svc-helper.h glusterd-snapd-svc.h \
233933
 	glusterd-snapd-svc-helper.h glusterd-rcu.h glusterd-bitd-svc.h \
233933
 	glusterd-scrub-svc.h glusterd-server-quorum.h glusterd-errno.h \
233933
 	glusterd-tierd-svc.h glusterd-tierd-svc-helper.h \
233933
-        glusterd-shd-svc.h glusterd-shd-svc-helper.h \
233933
         glusterd-gfproxyd-svc.h glusterd-gfproxyd-svc-helper.h \
233933
 	$(CONTRIBDIR)/userspace-rcu/rculist-extra.h
233933
 
233933
diff --git a/xlators/mgmt/glusterd/src/glusterd-brick-ops.c b/xlators/mgmt/glusterd/src/glusterd-brick-ops.c
233933
index 042a805..ad9a572 100644
233933
--- a/xlators/mgmt/glusterd/src/glusterd-brick-ops.c
233933
+++ b/xlators/mgmt/glusterd/src/glusterd-brick-ops.c
233933
@@ -2863,7 +2863,7 @@ glusterd_op_remove_brick(dict_t *dict, char **op_errstr)
233933
     }
233933
 
233933
     if (start_remove && volinfo->status == GLUSTERD_STATUS_STARTED) {
233933
-        ret = glusterd_svcs_reconfigure(volinfo);
233933
+        ret = glusterd_svcs_reconfigure();
233933
         if (ret) {
233933
             gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_NFS_RECONF_FAIL,
233933
                    "Unable to reconfigure NFS-Server");
233933
diff --git a/xlators/mgmt/glusterd/src/glusterd-conn-mgmt.c b/xlators/mgmt/glusterd/src/glusterd-conn-mgmt.c
233933
index 16eefa1..c6d7a00 100644
233933
--- a/xlators/mgmt/glusterd/src/glusterd-conn-mgmt.c
233933
+++ b/xlators/mgmt/glusterd/src/glusterd-conn-mgmt.c
233933
@@ -138,45 +138,3 @@ glusterd_conn_build_socket_filepath(char *rundir, uuid_t uuid, char *socketpath,
233933
     glusterd_set_socket_filepath(sockfilepath, socketpath, len);
233933
     return 0;
233933
 }
233933
-
233933
-int
233933
-__glusterd_muxsvc_conn_common_notify(struct rpc_clnt *rpc, void *mydata,
233933
-                                     rpc_clnt_event_t event, void *data)
233933
-{
233933
-    glusterd_conf_t *conf = THIS->private;
233933
-    glusterd_svc_proc_t *mux_proc = mydata;
233933
-    int ret = -1;
233933
-
233933
-    /* Silently ignoring this error, exactly like the current
233933
-     * implementation */
233933
-    if (!mux_proc)
233933
-        return 0;
233933
-
233933
-    if (event == RPC_CLNT_DESTROY) {
233933
-        /*RPC_CLNT_DESTROY will only called after mux_proc detached from the
233933
-         * list. So it is safe to call without lock. Processing
233933
-         * RPC_CLNT_DESTROY under a lock will lead to deadlock.
233933
-         */
233933
-        if (mux_proc->data) {
233933
-            glusterd_volinfo_unref(mux_proc->data);
233933
-            mux_proc->data = NULL;
233933
-        }
233933
-        GF_FREE(mux_proc);
233933
-        ret = 0;
233933
-    } else {
233933
-        pthread_mutex_lock(&conf->attach_lock);
233933
-        {
233933
-            ret = mux_proc->notify(mux_proc, event);
233933
-        }
233933
-        pthread_mutex_unlock(&conf->attach_lock);
233933
-    }
233933
-    return ret;
233933
-}
233933
-
233933
-int
233933
-glusterd_muxsvc_conn_common_notify(struct rpc_clnt *rpc, void *mydata,
233933
-                                   rpc_clnt_event_t event, void *data)
233933
-{
233933
-    return glusterd_big_locked_notify(rpc, mydata, event, data,
233933
-                                      __glusterd_muxsvc_conn_common_notify);
233933
-}
233933
diff --git a/xlators/mgmt/glusterd/src/glusterd-conn-mgmt.h b/xlators/mgmt/glusterd/src/glusterd-conn-mgmt.h
233933
index d1c4607..602c0ba 100644
233933
--- a/xlators/mgmt/glusterd/src/glusterd-conn-mgmt.h
233933
+++ b/xlators/mgmt/glusterd/src/glusterd-conn-mgmt.h
233933
@@ -43,11 +43,9 @@ glusterd_conn_disconnect(glusterd_conn_t *conn);
233933
 int
233933
 glusterd_conn_common_notify(struct rpc_clnt *rpc, void *mydata,
233933
                             rpc_clnt_event_t event, void *data);
233933
-int
233933
-glusterd_muxsvc_conn_common_notify(struct rpc_clnt *rpc, void *mydata,
233933
-                                   rpc_clnt_event_t event, void *data);
233933
 
233933
 int32_t
233933
 glusterd_conn_build_socket_filepath(char *rundir, uuid_t uuid, char *socketpath,
233933
                                     int len);
233933
+
233933
 #endif
233933
diff --git a/xlators/mgmt/glusterd/src/glusterd-gfproxyd-svc.c b/xlators/mgmt/glusterd/src/glusterd-gfproxyd-svc.c
233933
index b01fd4d..f9c8617 100644
233933
--- a/xlators/mgmt/glusterd/src/glusterd-gfproxyd-svc.c
233933
+++ b/xlators/mgmt/glusterd/src/glusterd-gfproxyd-svc.c
233933
@@ -370,7 +370,6 @@ int
233933
 glusterd_gfproxydsvc_restart()
233933
 {
233933
     glusterd_volinfo_t *volinfo = NULL;
233933
-    glusterd_volinfo_t *tmp = NULL;
233933
     int ret = -1;
233933
     xlator_t *this = THIS;
233933
     glusterd_conf_t *conf = NULL;
233933
@@ -381,7 +380,7 @@ glusterd_gfproxydsvc_restart()
233933
     conf = this->private;
233933
     GF_VALIDATE_OR_GOTO(this->name, conf, out);
233933
 
233933
-    cds_list_for_each_entry_safe(volinfo, tmp, &conf->volumes, vol_list)
233933
+    cds_list_for_each_entry(volinfo, &conf->volumes, vol_list)
233933
     {
233933
         /* Start per volume gfproxyd svc */
233933
         if (volinfo->status == GLUSTERD_STATUS_STARTED) {
233933
diff --git a/xlators/mgmt/glusterd/src/glusterd-handler.c b/xlators/mgmt/glusterd/src/glusterd-handler.c
233933
index ac788a0..cb2666b 100644
233933
--- a/xlators/mgmt/glusterd/src/glusterd-handler.c
233933
+++ b/xlators/mgmt/glusterd/src/glusterd-handler.c
233933
@@ -5940,11 +5940,6 @@ glusterd_get_state(rpcsvc_request_t *req, dict_t *dict)
233933
 
233933
         GF_FREE(rebal_data);
233933
 
233933
-        fprintf(fp, "Volume%d.shd_svc.online_status: %s\n", count,
233933
-                volinfo->shd.svc.online ? "Online" : "Offline");
233933
-        fprintf(fp, "Volume%d.shd_svc.inited: %s\n", count,
233933
-                volinfo->shd.svc.inited ? "True" : "False");
233933
-
233933
         if (volinfo->type == GF_CLUSTER_TYPE_TIER) {
233933
             ret = glusterd_volume_get_hot_tier_type_str(volinfo,
233933
                                                         &hot_tier_type_str);
233933
@@ -6014,6 +6009,12 @@ glusterd_get_state(rpcsvc_request_t *req, dict_t *dict)
233933
 
233933
     fprintf(fp, "\n[Services]\n");
233933
 
233933
+    if (priv->shd_svc.inited) {
233933
+        fprintf(fp, "svc%d.name: %s\n", ++count, priv->shd_svc.name);
233933
+        fprintf(fp, "svc%d.online_status: %s\n\n", count,
233933
+                priv->shd_svc.online ? "Online" : "Offline");
233933
+    }
233933
+
233933
     if (priv->nfs_svc.inited) {
233933
         fprintf(fp, "svc%d.name: %s\n", ++count, priv->nfs_svc.name);
233933
         fprintf(fp, "svc%d.online_status: %s\n\n", count,
233933
diff --git a/xlators/mgmt/glusterd/src/glusterd-handshake.c b/xlators/mgmt/glusterd/src/glusterd-handshake.c
233933
index 1ba58c3..5599a63 100644
233933
--- a/xlators/mgmt/glusterd/src/glusterd-handshake.c
233933
+++ b/xlators/mgmt/glusterd/src/glusterd-handshake.c
233933
@@ -30,7 +30,6 @@
233933
 #include "rpcsvc.h"
233933
 #include "rpc-common-xdr.h"
233933
 #include "glusterd-gfproxyd-svc-helper.h"
233933
-#include "glusterd-shd-svc-helper.h"
233933
 
233933
 extern struct rpc_clnt_program gd_peer_prog;
233933
 extern struct rpc_clnt_program gd_mgmt_prog;
233933
@@ -329,26 +328,6 @@ build_volfile_path(char *volume_id, char *path, size_t path_len,
233933
         goto out;
233933
     }
233933
 
233933
-    volid_ptr = strstr(volume_id, "shd/");
233933
-    if (volid_ptr) {
233933
-        volid_ptr = strchr(volid_ptr, '/');
233933
-        if (!volid_ptr) {
233933
-            ret = -1;
233933
-            goto out;
233933
-        }
233933
-        volid_ptr++;
233933
-
233933
-        ret = glusterd_volinfo_find(volid_ptr, &volinfo);
233933
-        if (ret == -1) {
233933
-            gf_log(this->name, GF_LOG_ERROR, "Couldn't find volinfo");
233933
-            goto out;
233933
-        }
233933
-
233933
-        glusterd_svc_build_shd_volfile_path(volinfo, path, path_len);
233933
-        ret = 0;
233933
-        goto out;
233933
-    }
233933
-
233933
     volid_ptr = strstr(volume_id, "/snaps/");
233933
     if (volid_ptr) {
233933
         ret = get_snap_volname_and_volinfo(volid_ptr, &volname, &volinfo);
233933
diff --git a/xlators/mgmt/glusterd/src/glusterd-mem-types.h b/xlators/mgmt/glusterd/src/glusterd-mem-types.h
233933
index 17052ce..7a784db 100644
233933
--- a/xlators/mgmt/glusterd/src/glusterd-mem-types.h
233933
+++ b/xlators/mgmt/glusterd/src/glusterd-mem-types.h
233933
@@ -51,7 +51,6 @@ typedef enum gf_gld_mem_types_ {
233933
     gf_gld_mt_missed_snapinfo_t,
233933
     gf_gld_mt_snap_create_args_t,
233933
     gf_gld_mt_glusterd_brick_proc_t,
233933
-    gf_gld_mt_glusterd_svc_proc_t,
233933
     gf_gld_mt_end,
233933
 } gf_gld_mem_types_t;
233933
 #endif
233933
diff --git a/xlators/mgmt/glusterd/src/glusterd-messages.h b/xlators/mgmt/glusterd/src/glusterd-messages.h
233933
index 424e15f..c7b3ca8 100644
233933
--- a/xlators/mgmt/glusterd/src/glusterd-messages.h
233933
+++ b/xlators/mgmt/glusterd/src/glusterd-messages.h
233933
@@ -298,8 +298,6 @@ GLFS_MSGID(
233933
     GD_MSG_LOCALTIME_LOGGING_ENABLE, GD_MSG_LOCALTIME_LOGGING_DISABLE,
233933
     GD_MSG_PORTS_EXHAUSTED, GD_MSG_CHANGELOG_GET_FAIL,
233933
     GD_MSG_MANAGER_FUNCTION_FAILED, GD_MSG_NFS_GANESHA_DISABLED,
233933
-    GD_MSG_GANESHA_NOT_RUNNING, GD_MSG_DAEMON_LOG_LEVEL_VOL_OPT_VALIDATE_FAIL,
233933
-    GD_MSG_SHD_START_FAIL, GD_MSG_SHD_OBJ_GET_FAIL, GD_MSG_SVC_ATTACH_FAIL,
233933
-    GD_MSG_ATTACH_INFO, GD_MSG_DETACH_INFO, GD_MSG_SVC_DETACH_FAIL);
233933
+    GD_MSG_GANESHA_NOT_RUNNING, GD_MSG_DAEMON_LOG_LEVEL_VOL_OPT_VALIDATE_FAIL);
233933
 
233933
 #endif /* !_GLUSTERD_MESSAGES_H_ */
233933
diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.c b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
233933
index 9ea695e..0d29de2 100644
233933
--- a/xlators/mgmt/glusterd/src/glusterd-op-sm.c
233933
+++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
233933
@@ -44,7 +44,6 @@
233933
 #include "glusterd-snapshot-utils.h"
233933
 #include "glusterd-svc-mgmt.h"
233933
 #include "glusterd-svc-helper.h"
233933
-#include "glusterd-shd-svc-helper.h"
233933
 #include "glusterd-shd-svc.h"
233933
 #include "glusterd-nfs-svc.h"
233933
 #include "glusterd-quotad-svc.h"
233933
@@ -2225,11 +2224,6 @@ glusterd_options_reset(glusterd_volinfo_t *volinfo, char *key,
233933
     if (ret)
233933
         goto out;
233933
 
233933
-    svc = &(volinfo->shd.svc);
233933
-    ret = svc->reconfigure(volinfo);
233933
-    if (ret)
233933
-        goto out;
233933
-
233933
     ret = glusterd_create_volfiles_and_notify_services(volinfo);
233933
     if (ret) {
233933
         gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLFILE_CREATE_FAIL,
233933
@@ -2244,7 +2238,7 @@ glusterd_options_reset(glusterd_volinfo_t *volinfo, char *key,
233933
         goto out;
233933
 
233933
     if (GLUSTERD_STATUS_STARTED == volinfo->status) {
233933
-        ret = glusterd_svcs_reconfigure(volinfo);
233933
+        ret = glusterd_svcs_reconfigure();
233933
         if (ret)
233933
             goto out;
233933
     }
233933
@@ -2700,11 +2694,6 @@ glusterd_op_set_all_volume_options(xlator_t *this, dict_t *dict,
233933
                 if (ret)
233933
                     goto out;
233933
 
233933
-                svc = &(volinfo->shd.svc);
233933
-                ret = svc->reconfigure(volinfo);
233933
-                if (ret)
233933
-                    goto out;
233933
-
233933
                 ret = glusterd_create_volfiles_and_notify_services(volinfo);
233933
                 if (ret) {
233933
                     gf_msg(this->name, GF_LOG_ERROR, 0,
233933
@@ -2718,7 +2707,7 @@ glusterd_op_set_all_volume_options(xlator_t *this, dict_t *dict,
233933
                 }
233933
             }
233933
             if (svcs_reconfigure) {
233933
-                ret = glusterd_svcs_reconfigure(NULL);
233933
+                ret = glusterd_svcs_reconfigure();
233933
                 if (ret) {
233933
                     gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SVC_RESTART_FAIL,
233933
                            "Unable to restart "
233933
@@ -3103,11 +3092,6 @@ glusterd_op_set_volume(dict_t *dict, char **errstr)
233933
         if (ret)
233933
             goto out;
233933
 
233933
-        svc = &(volinfo->shd.svc);
233933
-        ret = svc->reconfigure(volinfo);
233933
-        if (ret)
233933
-            goto out;
233933
-
233933
         ret = glusterd_create_volfiles_and_notify_services(volinfo);
233933
         if (ret) {
233933
             gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLFILE_CREATE_FAIL,
233933
@@ -3123,7 +3107,7 @@ glusterd_op_set_volume(dict_t *dict, char **errstr)
233933
             goto out;
233933
 
233933
         if (GLUSTERD_STATUS_STARTED == volinfo->status) {
233933
-            ret = glusterd_svcs_reconfigure(volinfo);
233933
+            ret = glusterd_svcs_reconfigure();
233933
             if (ret) {
233933
                 gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SVC_RESTART_FAIL,
233933
                        "Unable to restart services");
233933
@@ -3156,11 +3140,6 @@ glusterd_op_set_volume(dict_t *dict, char **errstr)
233933
             if (ret)
233933
                 goto out;
233933
 
233933
-            svc = &(volinfo->shd.svc);
233933
-            ret = svc->reconfigure(volinfo);
233933
-            if (ret)
233933
-                goto out;
233933
-
233933
             ret = glusterd_create_volfiles_and_notify_services(volinfo);
233933
             if (ret) {
233933
                 gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLFILE_CREATE_FAIL,
233933
@@ -3176,7 +3155,7 @@ glusterd_op_set_volume(dict_t *dict, char **errstr)
233933
                 goto out;
233933
 
233933
             if (GLUSTERD_STATUS_STARTED == volinfo->status) {
233933
-                ret = glusterd_svcs_reconfigure(volinfo);
233933
+                ret = glusterd_svcs_reconfigure();
233933
                 if (ret) {
233933
                     gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SVC_RESTART_FAIL,
233933
                            "Unable to restart services");
233933
@@ -3383,7 +3362,7 @@ glusterd_op_stats_volume(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
233933
         goto out;
233933
 
233933
     if (GLUSTERD_STATUS_STARTED == volinfo->status) {
233933
-        ret = glusterd_svcs_reconfigure(volinfo);
233933
+        ret = glusterd_svcs_reconfigure();
233933
         if (ret)
233933
             goto out;
233933
     }
233933
@@ -3666,6 +3645,14 @@ glusterd_op_status_volume(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
233933
         other_count++;
233933
         node_count++;
233933
 
233933
+    } else if ((cmd & GF_CLI_STATUS_SHD) != 0) {
233933
+        ret = glusterd_add_node_to_dict(priv->shd_svc.name, rsp_dict, 0,
233933
+                                        vol_opts);
233933
+        if (ret)
233933
+            goto out;
233933
+        other_count++;
233933
+        node_count++;
233933
+
233933
     } else if ((cmd & GF_CLI_STATUS_QUOTAD) != 0) {
233933
         ret = glusterd_add_node_to_dict(priv->quotad_svc.name, rsp_dict, 0,
233933
                                         vol_opts);
233933
@@ -3699,12 +3686,6 @@ glusterd_op_status_volume(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
233933
             goto out;
233933
         other_count++;
233933
         node_count++;
233933
-    } else if ((cmd & GF_CLI_STATUS_SHD) != 0) {
233933
-        ret = glusterd_add_shd_to_dict(volinfo, rsp_dict, other_index);
233933
-        if (ret)
233933
-            goto out;
233933
-        other_count++;
233933
-        node_count++;
233933
     } else if ((cmd & GF_CLI_STATUS_BRICK) != 0) {
233933
         ret = dict_get_strn(dict, "brick", SLEN("brick"), &brick);
233933
         if (ret)
233933
@@ -3767,19 +3748,6 @@ glusterd_op_status_volume(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
233933
                 node_count++;
233933
             }
233933
 
233933
-            if (glusterd_is_shd_compatible_volume(volinfo)) {
233933
-                shd_enabled = gd_is_self_heal_enabled(volinfo, vol_opts);
233933
-                if (shd_enabled) {
233933
-                    ret = glusterd_add_shd_to_dict(volinfo, rsp_dict,
233933
-                                                   other_index);
233933
-                    if (ret)
233933
-                        goto out;
233933
-                    other_count++;
233933
-                    other_index++;
233933
-                    node_count++;
233933
-                }
233933
-            }
233933
-
233933
             nfs_disabled = dict_get_str_boolean(vol_opts, NFS_DISABLE_MAP_KEY,
233933
                                                 _gf_false);
233933
             if (!nfs_disabled) {
233933
@@ -3792,6 +3760,18 @@ glusterd_op_status_volume(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
233933
                 node_count++;
233933
             }
233933
 
233933
+            if (glusterd_is_shd_compatible_volume(volinfo))
233933
+                shd_enabled = gd_is_self_heal_enabled(volinfo, vol_opts);
233933
+            if (shd_enabled) {
233933
+                ret = glusterd_add_node_to_dict(priv->shd_svc.name, rsp_dict,
233933
+                                                other_index, vol_opts);
233933
+                if (ret)
233933
+                    goto out;
233933
+                other_count++;
233933
+                node_count++;
233933
+                other_index++;
233933
+            }
233933
+
233933
             if (glusterd_is_volume_quota_enabled(volinfo)) {
233933
                 ret = glusterd_add_node_to_dict(priv->quotad_svc.name, rsp_dict,
233933
                                                 other_index, vol_opts);
233933
@@ -6904,18 +6884,16 @@ glusterd_shd_select_brick_xlator(dict_t *dict, gf_xl_afr_op_t heal_op,
233933
     int ret = -1;
233933
     glusterd_conf_t *priv = NULL;
233933
     xlator_t *this = NULL;
233933
-    glusterd_svc_t *svc = NULL;
233933
 
233933
     this = THIS;
233933
     GF_ASSERT(this);
233933
     priv = this->private;
233933
     GF_ASSERT(priv);
233933
-    svc = &(volinfo->shd.svc);
233933
 
233933
     switch (heal_op) {
233933
         case GF_SHD_OP_INDEX_SUMMARY:
233933
         case GF_SHD_OP_STATISTICS_HEAL_COUNT:
233933
-            if (!svc->online) {
233933
+            if (!priv->shd_svc.online) {
233933
                 if (!rsp_dict) {
233933
                     gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_OPCTX_NULL,
233933
                            "Received "
233933
@@ -6936,7 +6914,7 @@ glusterd_shd_select_brick_xlator(dict_t *dict, gf_xl_afr_op_t heal_op,
233933
             break;
233933
 
233933
         case GF_SHD_OP_STATISTICS_HEAL_COUNT_PER_REPLICA:
233933
-            if (!svc->online) {
233933
+            if (!priv->shd_svc.online) {
233933
                 if (!rsp_dict) {
233933
                     gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_OPCTX_NULL,
233933
                            "Received "
233933
@@ -7071,7 +7049,7 @@ glusterd_bricks_select_heal_volume(dict_t *dict, char **op_errstr,
233933
         ret = -1;
233933
         goto out;
233933
     } else {
233933
-        pending_node->node = &(volinfo->shd.svc);
233933
+        pending_node->node = &(priv->shd_svc);
233933
         pending_node->type = GD_NODE_SHD;
233933
         cds_list_add_tail(&pending_node->list, selected);
233933
         pending_node = NULL;
233933
@@ -7205,7 +7183,6 @@ glusterd_bricks_select_status_volume(dict_t *dict, char **op_errstr,
233933
     glusterd_pending_node_t *pending_node = NULL;
233933
     xlator_t *this = NULL;
233933
     glusterd_conf_t *priv = NULL;
233933
-    glusterd_svc_t *svc = NULL;
233933
 
233933
     GF_ASSERT(dict);
233933
 
233933
@@ -7301,8 +7278,7 @@ glusterd_bricks_select_status_volume(dict_t *dict, char **op_errstr,
233933
 
233933
         ret = 0;
233933
     } else if ((cmd & GF_CLI_STATUS_SHD) != 0) {
233933
-        svc = &(volinfo->shd.svc);
233933
-        if (!svc->online) {
233933
+        if (!priv->shd_svc.online) {
233933
             ret = -1;
233933
             gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SELF_HEALD_DISABLED,
233933
                    "Self-heal daemon is not running");
233933
@@ -7314,7 +7290,7 @@ glusterd_bricks_select_status_volume(dict_t *dict, char **op_errstr,
233933
             ret = -1;
233933
             goto out;
233933
         }
233933
-        pending_node->node = svc;
233933
+        pending_node->node = &(priv->shd_svc);
233933
         pending_node->type = GD_NODE_SHD;
233933
         pending_node->index = 0;
233933
         cds_list_add_tail(&pending_node->list, selected);
233933
diff --git a/xlators/mgmt/glusterd/src/glusterd-shd-svc-helper.c b/xlators/mgmt/glusterd/src/glusterd-shd-svc-helper.c
233933
deleted file mode 100644
233933
index 9196758..0000000
233933
--- a/xlators/mgmt/glusterd/src/glusterd-shd-svc-helper.c
233933
+++ /dev/null
233933
@@ -1,140 +0,0 @@
233933
-/*
233933
-   Copyright (c) 2016 Red Hat, Inc. <http://www.redhat.com>
233933
-   This file is part of GlusterFS.
233933
-
233933
-   This file is licensed to you under your choice of the GNU Lesser
233933
-   General Public License, version 3 or any later version (LGPLv3 or
233933
-   later), or the GNU General Public License, version 2 (GPLv2), in all
233933
-   cases as published by the Free Software Foundation.
233933
-*/
233933
-
233933
-#include "glusterd.h"
233933
-#include "glusterd-utils.h"
233933
-#include "glusterd-shd-svc-helper.h"
233933
-#include "glusterd-messages.h"
233933
-#include "glusterd-volgen.h"
233933
-
233933
-void
233933
-glusterd_svc_build_shd_socket_filepath(glusterd_volinfo_t *volinfo, char *path,
233933
-                                       int path_len)
233933
-{
233933
-    char sockfilepath[PATH_MAX] = {
233933
-        0,
233933
-    };
233933
-    char rundir[PATH_MAX] = {
233933
-        0,
233933
-    };
233933
-    int32_t len = 0;
233933
-    glusterd_conf_t *priv = THIS->private;
233933
-
233933
-    if (!priv)
233933
-        return;
233933
-
233933
-    GLUSTERD_GET_SHD_RUNDIR(rundir, volinfo, priv);
233933
-    len = snprintf(sockfilepath, sizeof(sockfilepath), "%s/run-%s", rundir,
233933
-                   uuid_utoa(MY_UUID));
233933
-    if ((len < 0) || (len >= sizeof(sockfilepath))) {
233933
-        sockfilepath[0] = 0;
233933
-    }
233933
-
233933
-    glusterd_set_socket_filepath(sockfilepath, path, path_len);
233933
-}
233933
-
233933
-void
233933
-glusterd_svc_build_shd_pidfile(glusterd_volinfo_t *volinfo, char *path,
233933
-                               int path_len)
233933
-{
233933
-    char rundir[PATH_MAX] = {
233933
-        0,
233933
-    };
233933
-    glusterd_conf_t *priv = THIS->private;
233933
-
233933
-    if (!priv)
233933
-        return;
233933
-
233933
-    GLUSTERD_GET_SHD_RUNDIR(rundir, volinfo, priv);
233933
-
233933
-    snprintf(path, path_len, "%s/%s-shd.pid", rundir, volinfo->volname);
233933
-}
233933
-
233933
-void
233933
-glusterd_svc_build_shd_volfile_path(glusterd_volinfo_t *volinfo, char *path,
233933
-                                    int path_len)
233933
-{
233933
-    char workdir[PATH_MAX] = {
233933
-        0,
233933
-    };
233933
-    glusterd_conf_t *priv = THIS->private;
233933
-
233933
-    if (!priv)
233933
-        return;
233933
-
233933
-    GLUSTERD_GET_VOLUME_DIR(workdir, volinfo, priv);
233933
-
233933
-    snprintf(path, path_len, "%s/%s-shd.vol", workdir, volinfo->volname);
233933
-}
233933
-
233933
-void
233933
-glusterd_svc_build_shd_logdir(char *logdir, char *volname, size_t len)
233933
-{
233933
-    snprintf(logdir, len, "%s/shd/%s", DEFAULT_LOG_FILE_DIRECTORY, volname);
233933
-}
233933
-
233933
-void
233933
-glusterd_svc_build_shd_logfile(char *logfile, char *logdir, size_t len)
233933
-{
233933
-    snprintf(logfile, len, "%s/shd.log", logdir);
233933
-}
233933
-
233933
-void
233933
-glusterd_shd_svcproc_cleanup(glusterd_shdsvc_t *shd)
233933
-{
233933
-    glusterd_svc_proc_t *svc_proc = NULL;
233933
-    glusterd_svc_t *svc = NULL;
233933
-    glusterd_conf_t *conf = NULL;
233933
-    gf_boolean_t need_unref = _gf_false;
233933
-    rpc_clnt_t *rpc = NULL;
233933
-
233933
-    conf = THIS->private;
233933
-    if (!conf)
233933
-        return;
233933
-
233933
-    GF_VALIDATE_OR_GOTO(THIS->name, conf, out);
233933
-    GF_VALIDATE_OR_GOTO(THIS->name, shd, out);
233933
-
233933
-    svc = &shd->svc;
233933
-    shd->attached = _gf_false;
233933
-
233933
-    if (svc->conn.rpc) {
233933
-        rpc_clnt_unref(svc->conn.rpc);
233933
-        svc->conn.rpc = NULL;
233933
-    }
233933
-
233933
-    pthread_mutex_lock(&conf->attach_lock);
233933
-    {
233933
-        svc_proc = svc->svc_proc;
233933
-        svc->svc_proc = NULL;
233933
-        svc->inited = _gf_false;
233933
-        cds_list_del_init(&svc->mux_svc);
233933
-        glusterd_unlink_file(svc->proc.pidfile);
233933
-
233933
-        if (svc_proc && cds_list_empty(&svc_proc->svcs)) {
233933
-            cds_list_del_init(&svc_proc->svc_proc_list);
233933
-            /* We cannot free svc_proc list from here. Because
233933
-             * if there are pending events on the rpc, it will
233933
-             * try to access the corresponding svc_proc, so unrefing
233933
-             * rpc request and then cleaning up the memory is carried
233933
-             * from the notify function upon RPC_CLNT_DESTROY destroy.
233933
-             */
233933
-            need_unref = _gf_true;
233933
-            rpc = svc_proc->rpc;
233933
-            svc_proc->rpc = NULL;
233933
-        }
233933
-    }
233933
-    pthread_mutex_unlock(&conf->attach_lock);
233933
-    /*rpc unref has to be performed outside the lock*/
233933
-    if (need_unref && rpc)
233933
-        rpc_clnt_unref(rpc);
233933
-out:
233933
-    return;
233933
-}
233933
diff --git a/xlators/mgmt/glusterd/src/glusterd-shd-svc-helper.h b/xlators/mgmt/glusterd/src/glusterd-shd-svc-helper.h
233933
deleted file mode 100644
233933
index c70702c..0000000
233933
--- a/xlators/mgmt/glusterd/src/glusterd-shd-svc-helper.h
233933
+++ /dev/null
233933
@@ -1,45 +0,0 @@
233933
-/*
233933
-   Copyright (c) 2016 Red Hat, Inc. <http://www.redhat.com>
233933
-   This file is part of GlusterFS.
233933
-
233933
-   This file is licensed to you under your choice of the GNU Lesser
233933
-   General Public License, version 3 or any later version (LGPLv3 or
233933
-   later), or the GNU General Public License, version 2 (GPLv2), in all
233933
-   cases as published by the Free Software Foundation.
233933
-*/
233933
-
233933
-#ifndef _GLUSTERD_SHD_SVC_HELPER_H_
233933
-#define _GLUSTERD_SHD_SVC_HELPER_H_
233933
-
233933
-#include "glusterd.h"
233933
-#include "glusterd-svc-mgmt.h"
233933
-
233933
-void
233933
-glusterd_svc_build_shd_socket_filepath(glusterd_volinfo_t *volinfo, char *path,
233933
-                                       int path_len);
233933
-
233933
-void
233933
-glusterd_svc_build_shd_pidfile(glusterd_volinfo_t *volinfo, char *path,
233933
-                               int path_len);
233933
-
233933
-void
233933
-glusterd_svc_build_shd_volfile_path(glusterd_volinfo_t *volinfo, char *path,
233933
-                                    int path_len);
233933
-
233933
-void
233933
-glusterd_svc_build_shd_logdir(char *logdir, char *volname, size_t len);
233933
-
233933
-void
233933
-glusterd_svc_build_shd_logfile(char *logfile, char *logdir, size_t len);
233933
-
233933
-void
233933
-glusterd_shd_svcproc_cleanup(glusterd_shdsvc_t *shd);
233933
-
233933
-int
233933
-glusterd_recover_shd_attach_failure(glusterd_volinfo_t *volinfo,
233933
-                                    glusterd_svc_t *svc, int flags);
233933
-
233933
-int
233933
-glusterd_shdsvc_create_volfile(glusterd_volinfo_t *volinfo);
233933
-
233933
-#endif
233933
diff --git a/xlators/mgmt/glusterd/src/glusterd-shd-svc.c b/xlators/mgmt/glusterd/src/glusterd-shd-svc.c
233933
index 4789843..f5379b0 100644
233933
--- a/xlators/mgmt/glusterd/src/glusterd-shd-svc.c
233933
+++ b/xlators/mgmt/glusterd/src/glusterd-shd-svc.c
233933
@@ -13,10 +13,9 @@
233933
 #include "glusterd.h"
233933
 #include "glusterd-utils.h"
233933
 #include "glusterd-volgen.h"
233933
+#include "glusterd-svc-mgmt.h"
233933
 #include "glusterd-shd-svc.h"
233933
-#include "glusterd-shd-svc-helper.h"
233933
 #include "glusterd-svc-helper.h"
233933
-#include "glusterd-store.h"
233933
 
233933
 #define GD_SHD_PROCESS_NAME "--process-name"
233933
 char *shd_svc_name = "glustershd";
233933
@@ -24,145 +23,27 @@ char *shd_svc_name = "glustershd";
233933
 void
233933
 glusterd_shdsvc_build(glusterd_svc_t *svc)
233933
 {
233933
-    int ret = -1;
233933
-    ret = snprintf(svc->name, sizeof(svc->name), "%s", shd_svc_name);
233933
-    if (ret < 0)
233933
-        return;
233933
-
233933
-    CDS_INIT_LIST_HEAD(&svc->mux_svc);
233933
     svc->manager = glusterd_shdsvc_manager;
233933
     svc->start = glusterd_shdsvc_start;
233933
-    svc->stop = glusterd_shdsvc_stop;
233933
-    svc->reconfigure = glusterd_shdsvc_reconfigure;
233933
+    svc->stop = glusterd_svc_stop;
233933
 }
233933
 
233933
 int
233933
-glusterd_shdsvc_init(void *data, glusterd_conn_t *mux_conn,
233933
-                     glusterd_svc_proc_t *mux_svc)
233933
+glusterd_shdsvc_init(glusterd_svc_t *svc)
233933
 {
233933
-    int ret = -1;
233933
-    char rundir[PATH_MAX] = {
233933
-        0,
233933
-    };
233933
-    char sockpath[PATH_MAX] = {
233933
-        0,
233933
-    };
233933
-    char pidfile[PATH_MAX] = {
233933
-        0,
233933
-    };
233933
-    char volfile[PATH_MAX] = {
233933
-        0,
233933
-    };
233933
-    char logdir[PATH_MAX] = {
233933
-        0,
233933
-    };
233933
-    char logfile[PATH_MAX] = {
233933
-        0,
233933
-    };
233933
-    char volfileid[256] = {0};
233933
-    glusterd_svc_t *svc = NULL;
233933
-    glusterd_volinfo_t *volinfo = NULL;
233933
-    glusterd_conf_t *priv = NULL;
233933
-    glusterd_muxsvc_conn_notify_t notify = NULL;
233933
-    xlator_t *this = NULL;
233933
-    char *volfileserver = NULL;
233933
-    int32_t len = 0;
233933
-
233933
-    this = THIS;
233933
-    GF_VALIDATE_OR_GOTO(THIS->name, this, out);
233933
-
233933
-    priv = this->private;
233933
-    GF_VALIDATE_OR_GOTO(this->name, priv, out);
233933
-
233933
-    volinfo = data;
233933
-    GF_VALIDATE_OR_GOTO(this->name, data, out);
233933
-    GF_VALIDATE_OR_GOTO(this->name, mux_svc, out);
233933
-
233933
-    svc = &(volinfo->shd.svc);
233933
-
233933
-    ret = snprintf(svc->name, sizeof(svc->name), "%s", shd_svc_name);
233933
-    if (ret < 0)
233933
-        goto out;
233933
-
233933
-    notify = glusterd_muxsvc_common_rpc_notify;
233933
-    glusterd_store_perform_node_state_store(volinfo);
233933
-
233933
-    GLUSTERD_GET_SHD_RUNDIR(rundir, volinfo, priv);
233933
-    glusterd_svc_create_rundir(rundir);
233933
-
233933
-    glusterd_svc_build_shd_logdir(logdir, volinfo->volname, sizeof(logdir));
233933
-    glusterd_svc_build_shd_logfile(logfile, logdir, sizeof(logfile));
233933
-
233933
-    /* Initialize the connection mgmt */
233933
-    if (mux_conn && mux_svc->rpc) {
233933
-        /* multiplexed svc */
233933
-        svc->conn.frame_timeout = mux_conn->frame_timeout;
233933
-        /* This will be unrefed from glusterd_shd_svcproc_cleanup*/
233933
-        svc->conn.rpc = rpc_clnt_ref(mux_svc->rpc);
233933
-        ret = snprintf(svc->conn.sockpath, sizeof(svc->conn.sockpath), "%s",
233933
-                       mux_conn->sockpath);
233933
-    } else {
233933
-        ret = mkdir_p(logdir, 0755, _gf_true);
233933
-        if ((ret == -1) && (EEXIST != errno)) {
233933
-            gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_CREATE_DIR_FAILED,
233933
-                   "Unable to create logdir %s", logdir);
233933
-            goto out;
233933
-        }
233933
-
233933
-        glusterd_svc_build_shd_socket_filepath(volinfo, sockpath,
233933
-                                               sizeof(sockpath));
233933
-        ret = glusterd_muxsvc_conn_init(&(svc->conn), mux_svc, sockpath, 600,
233933
-                                        notify);
233933
-        if (ret)
233933
-            goto out;
233933
-        /* This will be unrefed when the last svcs is detached from the list */
233933
-        if (!mux_svc->rpc)
233933
-            mux_svc->rpc = rpc_clnt_ref(svc->conn.rpc);
233933
-    }
233933
-
233933
-    /* Initialize the process mgmt */
233933
-    glusterd_svc_build_shd_pidfile(volinfo, pidfile, sizeof(pidfile));
233933
-    glusterd_svc_build_shd_volfile_path(volinfo, volfile, PATH_MAX);
233933
-    len = snprintf(volfileid, sizeof(volfileid), "shd/%s", volinfo->volname);
233933
-    if ((len < 0) || (len >= sizeof(volfileid))) {
233933
-        ret = -1;
233933
-        goto out;
233933
-    }
233933
-
233933
-    if (dict_get_strn(this->options, "transport.socket.bind-address",
233933
-                      SLEN("transport.socket.bind-address"),
233933
-                      &volfileserver) != 0) {
233933
-        volfileserver = "localhost";
233933
-    }
233933
-    ret = glusterd_proc_init(&(svc->proc), shd_svc_name, pidfile, logdir,
233933
-                             logfile, volfile, volfileid, volfileserver);
233933
-    if (ret)
233933
-        goto out;
233933
-
233933
-out:
233933
-    gf_msg_debug(this ? this->name : "glusterd", 0, "Returning %d", ret);
233933
-    return ret;
233933
+    return glusterd_svc_init(svc, shd_svc_name);
233933
 }
233933
 
233933
-int
233933
-glusterd_shdsvc_create_volfile(glusterd_volinfo_t *volinfo)
233933
+static int
233933
+glusterd_shdsvc_create_volfile()
233933
 {
233933
     char filepath[PATH_MAX] = {
233933
         0,
233933
     };
233933
-
233933
     int ret = -1;
233933
+    glusterd_conf_t *conf = THIS->private;
233933
     dict_t *mod_dict = NULL;
233933
 
233933
-    glusterd_svc_build_shd_volfile_path(volinfo, filepath, PATH_MAX);
233933
-    if (!glusterd_is_shd_compatible_volume(volinfo)) {
233933
-        /* If volfile exist, delete it. This case happens when we
233933
-         * change from replica/ec to distribute.
233933
-         */
233933
-        (void)glusterd_unlink_file(filepath);
233933
-        ret = 0;
233933
-        goto out;
233933
-    }
233933
     mod_dict = dict_new();
233933
     if (!mod_dict)
233933
         goto out;
233933
@@ -183,7 +64,9 @@ glusterd_shdsvc_create_volfile(glusterd_volinfo_t *volinfo)
233933
     if (ret)
233933
         goto out;
233933
 
233933
-    ret = glusterd_shdsvc_generate_volfile(volinfo, filepath, mod_dict);
233933
+    glusterd_svc_build_volfile_path(shd_svc_name, conf->workdir, filepath,
233933
+                                    sizeof(filepath));
233933
+    ret = glusterd_create_global_volfile(build_shd_graph, filepath, mod_dict);
233933
     if (ret) {
233933
         gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_VOLFILE_CREATE_FAIL,
233933
                "Failed to create volfile");
233933
@@ -198,89 +81,26 @@ out:
233933
     return ret;
233933
 }
233933
 
233933
-gf_boolean_t
233933
-glusterd_svcs_shd_compatible_volumes_stopped(glusterd_svc_t *svc)
233933
-{
233933
-    glusterd_svc_proc_t *svc_proc = NULL;
233933
-    glusterd_shdsvc_t *shd = NULL;
233933
-    glusterd_svc_t *temp_svc = NULL;
233933
-    glusterd_volinfo_t *volinfo = NULL;
233933
-    gf_boolean_t comp = _gf_false;
233933
-    glusterd_conf_t *conf = THIS->private;
233933
-
233933
-    GF_VALIDATE_OR_GOTO("glusterd", conf, out);
233933
-    GF_VALIDATE_OR_GOTO("glusterd", svc, out);
233933
-    pthread_mutex_lock(&conf->attach_lock);
233933
-    {
233933
-        svc_proc = svc->svc_proc;
233933
-        if (!svc_proc)
233933
-            goto unlock;
233933
-        cds_list_for_each_entry(temp_svc, &svc_proc->svcs, mux_svc)
233933
-        {
233933
-            /* Get volinfo->shd from svc object */
233933
-            shd = cds_list_entry(svc, glusterd_shdsvc_t, svc);
233933
-            if (!shd) {
233933
-                gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_SHD_OBJ_GET_FAIL,
233933
-                       "Failed to get shd object "
233933
-                       "from shd service");
233933
-                goto unlock;
233933
-            }
233933
-
233933
-            /* Get volinfo from shd */
233933
-            volinfo = cds_list_entry(shd, glusterd_volinfo_t, shd);
233933
-            if (!volinfo) {
233933
-                gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_VOLINFO_GET_FAIL,
233933
-                       "Failed to get volinfo from "
233933
-                       "from shd");
233933
-                goto unlock;
233933
-            }
233933
-            if (!glusterd_is_shd_compatible_volume(volinfo))
233933
-                continue;
233933
-            if (volinfo->status == GLUSTERD_STATUS_STARTED)
233933
-                goto unlock;
233933
-        }
233933
-        comp = _gf_true;
233933
-    }
233933
-unlock:
233933
-    pthread_mutex_unlock(&conf->attach_lock);
233933
-out:
233933
-    return comp;
233933
-}
233933
-
233933
 int
233933
 glusterd_shdsvc_manager(glusterd_svc_t *svc, void *data, int flags)
233933
 {
233933
-    int ret = -1;
233933
+    int ret = 0;
233933
     glusterd_volinfo_t *volinfo = NULL;
233933
 
233933
-    volinfo = data;
233933
-    GF_VALIDATE_OR_GOTO("glusterd", svc, out);
233933
-    GF_VALIDATE_OR_GOTO("glusterd", volinfo, out);
233933
-
233933
-    if (volinfo)
233933
-        glusterd_volinfo_ref(volinfo);
233933
-
233933
-    ret = glusterd_shdsvc_create_volfile(volinfo);
233933
-    if (ret)
233933
-        goto out;
233933
-
233933
-    if (!glusterd_is_shd_compatible_volume(volinfo)) {
233933
-        ret = 0;
233933
-        if (svc->inited) {
233933
-            /* This means glusterd was running for this volume and now
233933
-             * it was converted to a non-shd volume. So just stop the shd
233933
-             */
233933
-            ret = svc->stop(svc, SIGTERM);
233933
+    if (!svc->inited) {
233933
+        ret = glusterd_shdsvc_init(svc);
233933
+        if (ret) {
233933
+            gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_FAILED_INIT_SHDSVC,
233933
+                   "Failed to init shd "
233933
+                   "service");
233933
+            goto out;
233933
+        } else {
233933
+            svc->inited = _gf_true;
233933
+            gf_msg_debug(THIS->name, 0, "shd service initialized");
233933
         }
233933
-        goto out;
233933
     }
233933
 
233933
-    ret = glusterd_shd_svc_mux_init(volinfo, svc);
233933
-    if (ret) {
233933
-        gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_FAILED_INIT_SHDSVC,
233933
-               "Failed to init shd service");
233933
-        goto out;
233933
-    }
233933
+    volinfo = data;
233933
 
233933
     /* If all the volumes are stopped or all shd compatible volumes
233933
      * are stopped then stop the service if:
233933
@@ -290,26 +110,31 @@ glusterd_shdsvc_manager(glusterd_svc_t *svc, void *data, int flags)
233933
      * - volinfo is NULL or
233933
      * - volinfo is present and volume is shd compatible
233933
      */
233933
-    if (glusterd_svcs_shd_compatible_volumes_stopped(svc)) {
233933
-        /* TODO
233933
-         * Take a lock and detach all svc's to stop the process
233933
-         * also reset the init flag
233933
-         */
233933
-        ret = svc->stop(svc, SIGTERM);
233933
-    } else if (volinfo) {
233933
-        ret = svc->stop(svc, SIGTERM);
233933
-        if (ret)
233933
-            goto out;
233933
+    if (glusterd_are_all_volumes_stopped() ||
233933
+        glusterd_all_shd_compatible_volumes_stopped()) {
233933
+        if (!(volinfo && !glusterd_is_shd_compatible_volume(volinfo))) {
233933
+            ret = svc->stop(svc, SIGTERM);
233933
+        }
233933
+    } else {
233933
+        if (!(volinfo && !glusterd_is_shd_compatible_volume(volinfo))) {
233933
+            ret = glusterd_shdsvc_create_volfile();
233933
+            if (ret)
233933
+                goto out;
233933
+
233933
+            ret = svc->stop(svc, SIGTERM);
233933
+            if (ret)
233933
+                goto out;
233933
 
233933
-        if (volinfo->status == GLUSTERD_STATUS_STARTED) {
233933
             ret = svc->start(svc, flags);
233933
             if (ret)
233933
                 goto out;
233933
+
233933
+            ret = glusterd_conn_connect(&(svc->conn));
233933
+            if (ret)
233933
+                goto out;
233933
         }
233933
     }
233933
 out:
233933
-    if (volinfo)
233933
-        glusterd_volinfo_unref(volinfo);
233933
     if (ret)
233933
         gf_event(EVENT_SVC_MANAGER_FAILED, "svc_name=%s", svc->name);
233933
     gf_msg_debug(THIS->name, 0, "Returning %d", ret);
233933
@@ -318,7 +143,7 @@ out:
233933
 }
233933
 
233933
 int
233933
-glusterd_new_shd_svc_start(glusterd_svc_t *svc, int flags)
233933
+glusterd_shdsvc_start(glusterd_svc_t *svc, int flags)
233933
 {
233933
     int ret = -1;
233933
     char glusterd_uuid_option[PATH_MAX] = {0};
233933
@@ -363,136 +188,31 @@ glusterd_new_shd_svc_start(glusterd_svc_t *svc, int flags)
233933
         goto out;
233933
 
233933
     ret = glusterd_svc_start(svc, flags, cmdline);
233933
-    if (ret)
233933
-        goto out;
233933
 
233933
-    ret = glusterd_conn_connect(&(svc->conn));
233933
 out:
233933
     if (cmdline)
233933
         dict_unref(cmdline);
233933
-    return ret;
233933
-}
233933
 
233933
-int
233933
-glusterd_recover_shd_attach_failure(glusterd_volinfo_t *volinfo,
233933
-                                    glusterd_svc_t *svc, int flags)
233933
-{
233933
-    int ret = -1;
233933
-    glusterd_svc_proc_t *mux_proc = NULL;
233933
-    glusterd_conf_t *conf = NULL;
233933
-
233933
-    conf = THIS->private;
233933
-
233933
-    if (!conf || !volinfo || !svc)
233933
-        return -1;
233933
-    glusterd_shd_svcproc_cleanup(&volinfo->shd);
233933
-    mux_proc = glusterd_svcprocess_new();
233933
-    if (!mux_proc) {
233933
-        return -1;
233933
-    }
233933
-    ret = glusterd_shdsvc_init(volinfo, NULL, mux_proc);
233933
-    if (ret)
233933
-        return -1;
233933
-    pthread_mutex_lock(&conf->attach_lock);
233933
-    {
233933
-        cds_list_add_tail(&mux_proc->svc_proc_list, &conf->shd_procs);
233933
-        svc->svc_proc = mux_proc;
233933
-        cds_list_del_init(&svc->mux_svc);
233933
-        cds_list_add_tail(&svc->mux_svc, &mux_proc->svcs);
233933
-    }
233933
-    pthread_mutex_unlock(&conf->attach_lock);
233933
-
233933
-    ret = glusterd_new_shd_svc_start(svc, flags);
233933
-    if (!ret) {
233933
-        volinfo->shd.attached = _gf_true;
233933
-    }
233933
-    return ret;
233933
-}
233933
-
233933
-int
233933
-glusterd_shdsvc_start(glusterd_svc_t *svc, int flags)
233933
-{
233933
-    int ret = -1;
233933
-    glusterd_shdsvc_t *shd = NULL;
233933
-    glusterd_volinfo_t *volinfo = NULL;
233933
-    glusterd_conf_t *conf = NULL;
233933
-
233933
-    GF_VALIDATE_OR_GOTO("glusterd", svc, out);
233933
-    conf = THIS->private;
233933
-    GF_VALIDATE_OR_GOTO("glusterd", conf, out);
233933
-
233933
-    /* Get volinfo->shd from svc object */
233933
-    shd = cds_list_entry(svc, glusterd_shdsvc_t, svc);
233933
-    if (!shd) {
233933
-        gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_SHD_OBJ_GET_FAIL,
233933
-               "Failed to get shd object "
233933
-               "from shd service");
233933
-        return -1;
233933
-    }
233933
-
233933
-    /* Get volinfo from shd */
233933
-    volinfo = cds_list_entry(shd, glusterd_volinfo_t, shd);
233933
-    if (!volinfo) {
233933
-        gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_VOLINFO_GET_FAIL,
233933
-               "Failed to get volinfo from "
233933
-               "from shd");
233933
-        return -1;
233933
-    }
233933
-
233933
-    if (volinfo->status != GLUSTERD_STATUS_STARTED)
233933
-        return -1;
233933
-
233933
-    glusterd_volinfo_ref(volinfo);
233933
-    if (!svc->inited) {
233933
-        ret = glusterd_shd_svc_mux_init(volinfo, svc);
233933
-        if (ret)
233933
-            goto out;
233933
-    }
233933
-
233933
-    if (shd->attached) {
233933
-        ret = glusterd_attach_svc(svc, volinfo, flags);
233933
-        if (ret) {
233933
-            gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_VOLINFO_GET_FAIL,
233933
-                   "Failed to attach shd svc(volume=%s) to pid=%d. Starting"
233933
-                   "a new process",
233933
-                   volinfo->volname, glusterd_proc_get_pid(&svc->proc));
233933
-            ret = glusterd_recover_shd_attach_failure(volinfo, svc, flags);
233933
-        }
233933
-        goto out;
233933
-    }
233933
-    ret = glusterd_new_shd_svc_start(svc, flags);
233933
-    if (!ret) {
233933
-        shd->attached = _gf_true;
233933
-    }
233933
-out:
233933
-    if (volinfo)
233933
-        glusterd_volinfo_unref(volinfo);
233933
     gf_msg_debug(THIS->name, 0, "Returning %d", ret);
233933
 
233933
     return ret;
233933
 }
233933
 
233933
 int
233933
-glusterd_shdsvc_reconfigure(glusterd_volinfo_t *volinfo)
233933
+glusterd_shdsvc_reconfigure()
233933
 {
233933
     int ret = -1;
233933
     xlator_t *this = NULL;
233933
+    glusterd_conf_t *priv = NULL;
233933
     gf_boolean_t identical = _gf_false;
233933
-    dict_t *mod_dict = NULL;
233933
-    glusterd_svc_t *svc = NULL;
233933
 
233933
     this = THIS;
233933
     GF_VALIDATE_OR_GOTO("glusterd", this, out);
233933
 
233933
-    if (!volinfo) {
233933
-        /* reconfigure will be called separately*/
233933
-        ret = 0;
233933
-        goto out;
233933
-    }
233933
+    priv = this->private;
233933
+    GF_VALIDATE_OR_GOTO(this->name, priv, out);
233933
 
233933
-    glusterd_volinfo_ref(volinfo);
233933
-    svc = &(volinfo->shd.svc);
233933
-    if (glusterd_svcs_shd_compatible_volumes_stopped(svc))
233933
+    if (glusterd_all_shd_compatible_volumes_stopped())
233933
         goto manager;
233933
 
233933
     /*
233933
@@ -500,42 +220,8 @@ glusterd_shdsvc_reconfigure(glusterd_volinfo_t *volinfo)
233933
      * and cksum i.e. "character-by-character". If YES, then
233933
      * NOTHING has been changed, just return.
233933
      */
233933
-
233933
-    if (!glusterd_is_shd_compatible_volume(volinfo)) {
233933
-        if (svc->inited)
233933
-            goto manager;
233933
-
233933
-        /* Nothing to do if not shd compatible */
233933
-        ret = 0;
233933
-        goto out;
233933
-    }
233933
-    mod_dict = dict_new();
233933
-    if (!mod_dict)
233933
-        goto out;
233933
-
233933
-    ret = dict_set_uint32(mod_dict, "cluster.background-self-heal-count", 0);
233933
-    if (ret)
233933
-        goto out;
233933
-
233933
-    ret = dict_set_str(mod_dict, "cluster.data-self-heal", "on");
233933
-    if (ret)
233933
-        goto out;
233933
-
233933
-    ret = dict_set_str(mod_dict, "cluster.metadata-self-heal", "on");
233933
-    if (ret)
233933
-        goto out;
233933
-
233933
-    ret = dict_set_int32(mod_dict, "graph-check", 1);
233933
-    if (ret)
233933
-        goto out;
233933
-
233933
-    ret = dict_set_str(mod_dict, "cluster.entry-self-heal", "on");
233933
-    if (ret)
233933
-        goto out;
233933
-
233933
-    ret = glusterd_volume_svc_check_volfile_identical(
233933
-        "glustershd", mod_dict, volinfo, glusterd_shdsvc_generate_volfile,
233933
-        &identical);
233933
+    ret = glusterd_svc_check_volfile_identical(priv->shd_svc.name,
233933
+                                               build_shd_graph, &identical);
233933
     if (ret)
233933
         goto out;
233933
 
233933
@@ -550,9 +236,8 @@ glusterd_shdsvc_reconfigure(glusterd_volinfo_t *volinfo)
233933
      * changed, then inform the xlator to reconfigure the options.
233933
      */
233933
     identical = _gf_false; /* RESET the FLAG */
233933
-    ret = glusterd_volume_svc_check_topology_identical(
233933
-        "glustershd", mod_dict, volinfo, glusterd_shdsvc_generate_volfile,
233933
-        &identical);
233933
+    ret = glusterd_svc_check_topology_identical(priv->shd_svc.name,
233933
+                                                build_shd_graph, &identical);
233933
     if (ret)
233933
         goto out;
233933
 
233933
@@ -560,7 +245,7 @@ glusterd_shdsvc_reconfigure(glusterd_volinfo_t *volinfo)
233933
      * options to shd volfile, so that shd will be reconfigured.
233933
      */
233933
     if (identical) {
233933
-        ret = glusterd_shdsvc_create_volfile(volinfo);
233933
+        ret = glusterd_shdsvc_create_volfile();
233933
         if (ret == 0) { /* Only if above PASSES */
233933
             ret = glusterd_fetchspec_notify(THIS);
233933
         }
233933
@@ -568,129 +253,12 @@ glusterd_shdsvc_reconfigure(glusterd_volinfo_t *volinfo)
233933
     }
233933
 manager:
233933
     /*
233933
-     * shd volfile's topology has been changed. volfile needs
233933
-     * to be RECONFIGURED to ACT on the changed volfile.
233933
+     * shd volfile's topology has been changed. shd server needs
233933
+     * to be RESTARTED to ACT on the changed volfile.
233933
      */
233933
-    ret = svc->manager(svc, volinfo, PROC_START_NO_WAIT);
233933
+    ret = priv->shd_svc.manager(&(priv->shd_svc), NULL, PROC_START_NO_WAIT);
233933
 
233933
 out:
233933
-    if (volinfo)
233933
-        glusterd_volinfo_unref(volinfo);
233933
-    if (mod_dict)
233933
-        dict_unref(mod_dict);
233933
     gf_msg_debug(this ? this->name : "glusterd", 0, "Returning %d", ret);
233933
     return ret;
233933
 }
233933
-
233933
-int
233933
-glusterd_shdsvc_restart()
233933
-{
233933
-    glusterd_volinfo_t *volinfo = NULL;
233933
-    glusterd_volinfo_t *tmp = NULL;
233933
-    int ret = -1;
233933
-    xlator_t *this = THIS;
233933
-    glusterd_conf_t *conf = NULL;
233933
-    glusterd_svc_t *svc = NULL;
233933
-
233933
-    GF_VALIDATE_OR_GOTO("glusterd", this, out);
233933
-
233933
-    conf = this->private;
233933
-    GF_VALIDATE_OR_GOTO(this->name, conf, out);
233933
-
233933
-    pthread_mutex_lock(&conf->volume_lock);
233933
-    cds_list_for_each_entry_safe(volinfo, tmp, &conf->volumes, vol_list)
233933
-    {
233933
-        glusterd_volinfo_ref(volinfo);
233933
-        pthread_mutex_unlock(&conf->volume_lock);
233933
-        /* Start per volume shd svc */
233933
-        if (volinfo->status == GLUSTERD_STATUS_STARTED) {
233933
-            svc = &(volinfo->shd.svc);
233933
-            ret = svc->manager(svc, volinfo, PROC_START_NO_WAIT);
233933
-            if (ret) {
233933
-                gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SHD_START_FAIL,
233933
-                       "Couldn't start shd for "
233933
-                       "vol: %s on restart",
233933
-                       volinfo->volname);
233933
-                gf_event(EVENT_SVC_MANAGER_FAILED, "volume=%s;svc_name=%s",
233933
-                         volinfo->volname, svc->name);
233933
-                glusterd_volinfo_unref(volinfo);
233933
-                goto out;
233933
-            }
233933
-        }
233933
-        glusterd_volinfo_unref(volinfo);
233933
-        pthread_mutex_lock(&conf->volume_lock);
233933
-    }
233933
-    pthread_mutex_unlock(&conf->volume_lock);
233933
-out:
233933
-    return ret;
233933
-}
233933
-
233933
-int
233933
-glusterd_shdsvc_stop(glusterd_svc_t *svc, int sig)
233933
-{
233933
-    int ret = -1;
233933
-    glusterd_svc_proc_t *svc_proc = NULL;
233933
-    glusterd_shdsvc_t *shd = NULL;
233933
-    glusterd_volinfo_t *volinfo = NULL;
233933
-    gf_boolean_t empty = _gf_false;
233933
-    glusterd_conf_t *conf = NULL;
233933
-    int pid = -1;
233933
-
233933
-    conf = THIS->private;
233933
-    GF_VALIDATE_OR_GOTO("glusterd", svc, out);
233933
-    svc_proc = svc->svc_proc;
233933
-    GF_VALIDATE_OR_GOTO("glusterd", svc_proc, out);
233933
-    GF_VALIDATE_OR_GOTO("glusterd", conf, out);
233933
-
233933
-    /* Get volinfo->shd from svc object */
233933
-    shd = cds_list_entry(svc, glusterd_shdsvc_t, svc);
233933
-    if (!shd) {
233933
-        gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_SHD_OBJ_GET_FAIL,
233933
-               "Failed to get shd object "
233933
-               "from shd service");
233933
-        return -1;
233933
-    }
233933
-
233933
-    /* Get volinfo from shd */
233933
-    volinfo = cds_list_entry(shd, glusterd_volinfo_t, shd);
233933
-    if (!volinfo) {
233933
-        gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_VOLINFO_GET_FAIL,
233933
-               "Failed to get volinfo from "
233933
-               "from shd");
233933
-        return -1;
233933
-    }
233933
-
233933
-    glusterd_volinfo_ref(volinfo);
233933
-    pthread_mutex_lock(&conf->attach_lock);
233933
-    {
233933
-        gf_is_service_running(svc->proc.pidfile, &pid;;
233933
-        cds_list_del_init(&svc->mux_svc);
233933
-        empty = cds_list_empty(&svc_proc->svcs);
233933
-    }
233933
-    pthread_mutex_unlock(&conf->attach_lock);
233933
-    if (empty) {
233933
-        /* Unref will happen when destroying the connection */
233933
-        glusterd_volinfo_ref(volinfo);
233933
-        svc_proc->data = volinfo;
233933
-        ret = glusterd_svc_stop(svc, sig);
233933
-    }
233933
-    if (!empty && pid != -1) {
233933
-        ret = glusterd_detach_svc(svc, volinfo, sig);
233933
-        if (ret)
233933
-            gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_SVC_STOP_FAIL,
233933
-                   "shd service is failed to detach volume %s from pid %d",
233933
-                   volinfo->volname, glusterd_proc_get_pid(&svc->proc));
233933
-        else
233933
-            gf_msg(THIS->name, GF_LOG_INFO, 0, GD_MSG_SVC_STOP_SUCCESS,
233933
-                   "Shd service is detached for volume %s from pid %d",
233933
-                   volinfo->volname, glusterd_proc_get_pid(&svc->proc));
233933
-    }
233933
-    svc->online = _gf_false;
233933
-    (void)glusterd_unlink_file((char *)svc->proc.pidfile);
233933
-    glusterd_shd_svcproc_cleanup(shd);
233933
-    ret = 0;
233933
-    glusterd_volinfo_unref(volinfo);
233933
-out:
233933
-    gf_msg_debug(THIS->name, 0, "Returning %d", ret);
233933
-    return ret;
233933
-}
233933
diff --git a/xlators/mgmt/glusterd/src/glusterd-shd-svc.h b/xlators/mgmt/glusterd/src/glusterd-shd-svc.h
233933
index 55b409f..775a9d4 100644
233933
--- a/xlators/mgmt/glusterd/src/glusterd-shd-svc.h
233933
+++ b/xlators/mgmt/glusterd/src/glusterd-shd-svc.h
233933
@@ -12,20 +12,12 @@
233933
 #define _GLUSTERD_SHD_SVC_H_
233933
 
233933
 #include "glusterd-svc-mgmt.h"
233933
-#include "glusterd.h"
233933
-
233933
-typedef struct glusterd_shdsvc_ glusterd_shdsvc_t;
233933
-struct glusterd_shdsvc_ {
233933
-    glusterd_svc_t svc;
233933
-    gf_boolean_t attached;
233933
-};
233933
 
233933
 void
233933
 glusterd_shdsvc_build(glusterd_svc_t *svc);
233933
 
233933
 int
233933
-glusterd_shdsvc_init(void *data, glusterd_conn_t *mux_conn,
233933
-                     glusterd_svc_proc_t *svc_proc);
233933
+glusterd_shdsvc_init(glusterd_svc_t *svc);
233933
 
233933
 int
233933
 glusterd_shdsvc_manager(glusterd_svc_t *svc, void *data, int flags);
233933
@@ -35,11 +27,4 @@ glusterd_shdsvc_start(glusterd_svc_t *svc, int flags);
233933
 
233933
 int
233933
 glusterd_shdsvc_reconfigure();
233933
-
233933
-int
233933
-glusterd_shdsvc_restart();
233933
-
233933
-int
233933
-glusterd_shdsvc_stop(glusterd_svc_t *svc, int sig);
233933
-
233933
 #endif
233933
diff --git a/xlators/mgmt/glusterd/src/glusterd-sm.c b/xlators/mgmt/glusterd/src/glusterd-sm.c
233933
index 943b1c6..54a7bd1 100644
233933
--- a/xlators/mgmt/glusterd/src/glusterd-sm.c
233933
+++ b/xlators/mgmt/glusterd/src/glusterd-sm.c
233933
@@ -748,16 +748,6 @@ glusterd_peer_detach_cleanup(glusterd_conf_t *priv)
233933
                 }
233933
             }
233933
 
233933
-            if (glusterd_is_shd_compatible_volume(volinfo)) {
233933
-                svc = &(volinfo->shd.svc);
233933
-                ret = svc->stop(svc, SIGTERM);
233933
-                if (ret) {
233933
-                    gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_SVC_STOP_FAIL,
233933
-                           "Failed "
233933
-                           "to stop shd daemon service");
233933
-                }
233933
-            }
233933
-
233933
             if (glusterd_is_gfproxyd_enabled(volinfo)) {
233933
                 svc = &(volinfo->gfproxyd.svc);
233933
                 ret = svc->stop(svc, SIGTERM);
233933
@@ -785,7 +775,7 @@ glusterd_peer_detach_cleanup(glusterd_conf_t *priv)
233933
     }
233933
 
233933
     /*Reconfigure all daemon services upon peer detach*/
233933
-    ret = glusterd_svcs_reconfigure(NULL);
233933
+    ret = glusterd_svcs_reconfigure();
233933
     if (ret) {
233933
         gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_SVC_STOP_FAIL,
233933
                "Failed to reconfigure all daemon services.");
233933
diff --git a/xlators/mgmt/glusterd/src/glusterd-snapd-svc.c b/xlators/mgmt/glusterd/src/glusterd-snapd-svc.c
233933
index 1da4076..56bab07 100644
233933
--- a/xlators/mgmt/glusterd/src/glusterd-snapd-svc.c
233933
+++ b/xlators/mgmt/glusterd/src/glusterd-snapd-svc.c
233933
@@ -366,7 +366,6 @@ int
233933
 glusterd_snapdsvc_restart()
233933
 {
233933
     glusterd_volinfo_t *volinfo = NULL;
233933
-    glusterd_volinfo_t *tmp = NULL;
233933
     int ret = 0;
233933
     xlator_t *this = THIS;
233933
     glusterd_conf_t *conf = NULL;
233933
@@ -377,7 +376,7 @@ glusterd_snapdsvc_restart()
233933
     conf = this->private;
233933
     GF_ASSERT(conf);
233933
 
233933
-    cds_list_for_each_entry_safe(volinfo, tmp, &conf->volumes, vol_list)
233933
+    cds_list_for_each_entry(volinfo, &conf->volumes, vol_list)
233933
     {
233933
         /* Start per volume snapd svc */
233933
         if (volinfo->status == GLUSTERD_STATUS_STARTED) {
233933
diff --git a/xlators/mgmt/glusterd/src/glusterd-statedump.c b/xlators/mgmt/glusterd/src/glusterd-statedump.c
233933
index 69d4cf4..f5ecde7 100644
233933
--- a/xlators/mgmt/glusterd/src/glusterd-statedump.c
233933
+++ b/xlators/mgmt/glusterd/src/glusterd-statedump.c
233933
@@ -202,6 +202,9 @@ glusterd_dump_priv(xlator_t *this)
233933
         gf_proc_dump_build_key(key, "glusterd", "ping-timeout");
233933
         gf_proc_dump_write(key, "%d", priv->ping_timeout);
233933
 
233933
+        gf_proc_dump_build_key(key, "glusterd", "shd.online");
233933
+        gf_proc_dump_write(key, "%d", priv->shd_svc.online);
233933
+
233933
         gf_proc_dump_build_key(key, "glusterd", "nfs.online");
233933
         gf_proc_dump_write(key, "%d", priv->nfs_svc.online);
233933
 
233933
diff --git a/xlators/mgmt/glusterd/src/glusterd-svc-helper.c b/xlators/mgmt/glusterd/src/glusterd-svc-helper.c
233933
index e42703c..ca19a75 100644
233933
--- a/xlators/mgmt/glusterd/src/glusterd-svc-helper.c
233933
+++ b/xlators/mgmt/glusterd/src/glusterd-svc-helper.c
233933
@@ -7,7 +7,6 @@
233933
    later), or the GNU General Public License, version 2 (GPLv2), in all
233933
    cases as published by the Free Software Foundation.
233933
 */
233933
-#include <signal.h>
233933
 
233933
 #include <glusterfs/globals.h>
233933
 #include <glusterfs/run.h>
233933
@@ -21,14 +20,12 @@
233933
 #include "glusterd-bitd-svc.h"
233933
 #include "glusterd-tierd-svc.h"
233933
 #include "glusterd-tierd-svc-helper.h"
233933
-#include "glusterd-shd-svc-helper.h"
233933
 #include "glusterd-scrub-svc.h"
233933
 #include "glusterd-svc-helper.h"
233933
 #include <glusterfs/syscall.h>
233933
-#include "glusterd-snapshot-utils.h"
233933
 
233933
 int
233933
-glusterd_svcs_reconfigure(glusterd_volinfo_t *volinfo)
233933
+glusterd_svcs_reconfigure()
233933
 {
233933
     int ret = 0;
233933
     xlator_t *this = THIS;
233933
@@ -46,11 +43,9 @@ glusterd_svcs_reconfigure(glusterd_volinfo_t *volinfo)
233933
         goto out;
233933
 
233933
     svc_name = "self-heald";
233933
-    if (volinfo) {
233933
-        ret = glusterd_shdsvc_reconfigure(volinfo);
233933
-        if (ret)
233933
-            goto out;
233933
-    }
233933
+    ret = glusterd_shdsvc_reconfigure();
233933
+    if (ret)
233933
+        goto out;
233933
 
233933
     if (conf->op_version == GD_OP_VERSION_MIN)
233933
         goto out;
233933
@@ -74,7 +69,7 @@ out:
233933
 }
233933
 
233933
 int
233933
-glusterd_svcs_stop(glusterd_volinfo_t *volinfo)
233933
+glusterd_svcs_stop()
233933
 {
233933
     int ret = 0;
233933
     xlator_t *this = NULL;
233933
@@ -90,15 +85,13 @@ glusterd_svcs_stop(glusterd_volinfo_t *volinfo)
233933
     if (ret)
233933
         goto out;
233933
 
233933
-    ret = glusterd_svc_stop(&(priv->quotad_svc), SIGTERM);
233933
+    ret = glusterd_svc_stop(&(priv->shd_svc), SIGTERM);
233933
     if (ret)
233933
         goto out;
233933
 
233933
-    if (volinfo) {
233933
-        ret = glusterd_svc_stop(&(volinfo->shd.svc), PROC_START_NO_WAIT);
233933
-        if (ret)
233933
-            goto out;
233933
-    }
233933
+    ret = glusterd_svc_stop(&(priv->quotad_svc), SIGTERM);
233933
+    if (ret)
233933
+        goto out;
233933
 
233933
     ret = glusterd_svc_stop(&(priv->bitd_svc), SIGTERM);
233933
     if (ret)
233933
@@ -128,6 +121,12 @@ glusterd_svcs_manager(glusterd_volinfo_t *volinfo)
233933
     if (ret)
233933
         goto out;
233933
 
233933
+    ret = conf->shd_svc.manager(&(conf->shd_svc), volinfo, PROC_START_NO_WAIT);
233933
+    if (ret == -EINVAL)
233933
+        ret = 0;
233933
+    if (ret)
233933
+        goto out;
233933
+
233933
     if (conf->op_version == GD_OP_VERSION_MIN)
233933
         goto out;
233933
 
233933
@@ -144,15 +143,6 @@ glusterd_svcs_manager(glusterd_volinfo_t *volinfo)
233933
     if (ret)
233933
         goto out;
233933
 
233933
-    if (volinfo) {
233933
-        ret = volinfo->shd.svc.manager(&(volinfo->shd.svc), volinfo,
233933
-                                       PROC_START_NO_WAIT);
233933
-        if (ret == -EINVAL)
233933
-            ret = 0;
233933
-        if (ret)
233933
-            goto out;
233933
-    }
233933
-
233933
     ret = conf->scrub_svc.manager(&(conf->scrub_svc), NULL, PROC_START_NO_WAIT);
233933
     if (ret == -EINVAL)
233933
         ret = 0;
233933
@@ -279,678 +269,3 @@ out:
233933
         GF_FREE(tmpvol);
233933
     return ret;
233933
 }
233933
-
233933
-int
233933
-glusterd_volume_svc_check_volfile_identical(
233933
-    char *svc_name, dict_t *mode_dict, glusterd_volinfo_t *volinfo,
233933
-    glusterd_vol_graph_builder_t builder, gf_boolean_t *identical)
233933
-{
233933
-    char orgvol[PATH_MAX] = {
233933
-        0,
233933
-    };
233933
-    char *tmpvol = NULL;
233933
-    xlator_t *this = NULL;
233933
-    int ret = -1;
233933
-    int need_unlink = 0;
233933
-    int tmp_fd = -1;
233933
-
233933
-    this = THIS;
233933
-
233933
-    GF_VALIDATE_OR_GOTO(this->name, this, out);
233933
-    GF_VALIDATE_OR_GOTO(this->name, identical, out);
233933
-
233933
-    /* This builds volfile for volume level dameons */
233933
-    glusterd_volume_svc_build_volfile_path(svc_name, volinfo, orgvol,
233933
-                                           sizeof(orgvol));
233933
-
233933
-    ret = gf_asprintf(&tmpvol, "/tmp/g%s-XXXXXX", svc_name);
233933
-    if (ret < 0) {
233933
-        goto out;
233933
-    }
233933
-
233933
-    /* coverity[secure_temp] mkstemp uses 0600 as the mode and is safe */
233933
-    tmp_fd = mkstemp(tmpvol);
233933
-    if (tmp_fd < 0) {
233933
-        gf_msg(this->name, GF_LOG_WARNING, errno, GD_MSG_FILE_OP_FAILED,
233933
-               "Unable to create temp file"
233933
-               " %s:(%s)",
233933
-               tmpvol, strerror(errno));
233933
-        ret = -1;
233933
-        goto out;
233933
-    }
233933
-
233933
-    need_unlink = 1;
233933
-
233933
-    ret = builder(volinfo, tmpvol, mode_dict);
233933
-    if (ret)
233933
-        goto out;
233933
-
233933
-    ret = glusterd_check_files_identical(orgvol, tmpvol, identical);
233933
-out:
233933
-    if (need_unlink)
233933
-        sys_unlink(tmpvol);
233933
-
233933
-    if (tmpvol != NULL)
233933
-        GF_FREE(tmpvol);
233933
-
233933
-    if (tmp_fd >= 0)
233933
-        sys_close(tmp_fd);
233933
-
233933
-    return ret;
233933
-}
233933
-
233933
-int
233933
-glusterd_volume_svc_check_topology_identical(
233933
-    char *svc_name, dict_t *mode_dict, glusterd_volinfo_t *volinfo,
233933
-    glusterd_vol_graph_builder_t builder, gf_boolean_t *identical)
233933
-{
233933
-    char orgvol[PATH_MAX] = {
233933
-        0,
233933
-    };
233933
-    char *tmpvol = NULL;
233933
-    glusterd_conf_t *conf = NULL;
233933
-    xlator_t *this = THIS;
233933
-    int ret = -1;
233933
-    int tmpclean = 0;
233933
-    int tmpfd = -1;
233933
-
233933
-    if ((!identical) || (!this) || (!this->private))
233933
-        goto out;
233933
-
233933
-    conf = this->private;
233933
-    GF_VALIDATE_OR_GOTO(this->name, conf, out);
233933
-
233933
-    /* This builds volfile for volume level dameons */
233933
-    glusterd_volume_svc_build_volfile_path(svc_name, volinfo, orgvol,
233933
-                                           sizeof(orgvol));
233933
-    /* Create the temporary volfile */
233933
-    ret = gf_asprintf(&tmpvol, "/tmp/g%s-XXXXXX", svc_name);
233933
-    if (ret < 0) {
233933
-        goto out;
233933
-    }
233933
-
233933
-    /* coverity[secure_temp] mkstemp uses 0600 as the mode and is safe */
233933
-    tmpfd = mkstemp(tmpvol);
233933
-    if (tmpfd < 0) {
233933
-        gf_msg(this->name, GF_LOG_WARNING, errno, GD_MSG_FILE_OP_FAILED,
233933
-               "Unable to create temp file"
233933
-               " %s:(%s)",
233933
-               tmpvol, strerror(errno));
233933
-        ret = -1;
233933
-        goto out;
233933
-    }
233933
-
233933
-    tmpclean = 1; /* SET the flag to unlink() tmpfile */
233933
-
233933
-    ret = builder(volinfo, tmpvol, mode_dict);
233933
-    if (ret)
233933
-        goto out;
233933
-
233933
-    /* Compare the topology of volfiles */
233933
-    ret = glusterd_check_topology_identical(orgvol, tmpvol, identical);
233933
-out:
233933
-    if (tmpfd >= 0)
233933
-        sys_close(tmpfd);
233933
-    if (tmpclean)
233933
-        sys_unlink(tmpvol);
233933
-    if (tmpvol != NULL)
233933
-        GF_FREE(tmpvol);
233933
-    return ret;
233933
-}
233933
-
233933
-void *
233933
-__gf_find_compatible_svc(gd_node_type daemon)
233933
-{
233933
-    glusterd_svc_proc_t *svc_proc = NULL;
233933
-    glusterd_svc_proc_t *return_proc = NULL;
233933
-    glusterd_svc_t *parent_svc = NULL;
233933
-    struct cds_list_head *svc_procs = NULL;
233933
-    glusterd_conf_t *conf = NULL;
233933
-    int pid = -1;
233933
-
233933
-    conf = THIS->private;
233933
-    GF_VALIDATE_OR_GOTO("glusterd", conf, out);
233933
-
233933
-    if (daemon == GD_NODE_SHD) {
233933
-        svc_procs = &conf->shd_procs;
233933
-        if (!svc_procs)
233933
-            goto out;
233933
-    }
233933
-
233933
-    cds_list_for_each_entry(svc_proc, svc_procs, svc_proc_list)
233933
-    {
233933
-        parent_svc = cds_list_entry(svc_proc->svcs.next, glusterd_svc_t,
233933
-                                    mux_svc);
233933
-        if (!return_proc)
233933
-            return_proc = svc_proc;
233933
-
233933
-        /* If there is an  already running shd daemons, select it. Otehrwise
233933
-         * select the first one.
233933
-         */
233933
-        if (parent_svc && gf_is_service_running(parent_svc->proc.pidfile, &pid))
233933
-            return (void *)svc_proc;
233933
-        /*
233933
-         * Logic to select one process goes here. Currently there is only one
233933
-         * shd_proc. So selecting the first one;
233933
-         */
233933
-    }
233933
-out:
233933
-    return return_proc;
233933
-}
233933
-
233933
-glusterd_svc_proc_t *
233933
-glusterd_svcprocess_new()
233933
-{
233933
-    glusterd_svc_proc_t *new_svcprocess = NULL;
233933
-
233933
-    new_svcprocess = GF_CALLOC(1, sizeof(*new_svcprocess),
233933
-                               gf_gld_mt_glusterd_svc_proc_t);
233933
-
233933
-    if (!new_svcprocess)
233933
-        return NULL;
233933
-
233933
-    CDS_INIT_LIST_HEAD(&new_svcprocess->svc_proc_list);
233933
-    CDS_INIT_LIST_HEAD(&new_svcprocess->svcs);
233933
-    new_svcprocess->notify = glusterd_muxsvc_common_rpc_notify;
233933
-    return new_svcprocess;
233933
-}
233933
-
233933
-int
233933
-glusterd_shd_svc_mux_init(glusterd_volinfo_t *volinfo, glusterd_svc_t *svc)
233933
-{
233933
-    int ret = -1;
233933
-    glusterd_svc_proc_t *mux_proc = NULL;
233933
-    glusterd_conn_t *mux_conn = NULL;
233933
-    glusterd_conf_t *conf = NULL;
233933
-    glusterd_svc_t *parent_svc = NULL;
233933
-    int pid = -1;
233933
-
233933
-    GF_VALIDATE_OR_GOTO("glusterd", svc, out);
233933
-    GF_VALIDATE_OR_GOTO("glusterd", volinfo, out);
233933
-    conf = THIS->private;
233933
-    GF_VALIDATE_OR_GOTO("glusterd", conf, out);
233933
-    GF_VALIDATE_OR_GOTO("glusterd", svc, out);
233933
-
233933
-    pthread_mutex_lock(&conf->attach_lock);
233933
-    {
233933
-        if (!svc->inited) {
233933
-            if (gf_is_service_running(svc->proc.pidfile, &pid)) {
233933
-                /* Just connect is required, but we don't know what happens
233933
-                 * during the disconnect. So better to reattach.
233933
-                 */
233933
-                mux_proc = __gf_find_compatible_svc_from_pid(GD_NODE_SHD, pid);
233933
-            }
233933
-
233933
-            if (!mux_proc) {
233933
-                if (pid != -1 && sys_access(svc->proc.pidfile, R_OK) == 0) {
233933
-                    /* stale pid file, unlink it. */
233933
-                    kill(pid, SIGTERM);
233933
-                    sys_unlink(svc->proc.pidfile);
233933
-                }
233933
-                mux_proc = __gf_find_compatible_svc(GD_NODE_SHD);
233933
-            }
233933
-            if (mux_proc) {
233933
-                /* Take first entry from the process */
233933
-                parent_svc = cds_list_entry(mux_proc->svcs.next, glusterd_svc_t,
233933
-                                            mux_svc);
233933
-                sys_link(parent_svc->proc.pidfile, svc->proc.pidfile);
233933
-                mux_conn = &parent_svc->conn;
233933
-                if (volinfo)
233933
-                    volinfo->shd.attached = _gf_true;
233933
-            } else {
233933
-                mux_proc = glusterd_svcprocess_new();
233933
-                if (!mux_proc) {
233933
-                    ret = -1;
233933
-                    goto unlock;
233933
-                }
233933
-                cds_list_add_tail(&mux_proc->svc_proc_list, &conf->shd_procs);
233933
-            }
233933
-            svc->svc_proc = mux_proc;
233933
-            cds_list_del_init(&svc->mux_svc);
233933
-            cds_list_add_tail(&svc->mux_svc, &mux_proc->svcs);
233933
-            ret = glusterd_shdsvc_init(volinfo, mux_conn, mux_proc);
233933
-            if (ret) {
233933
-                pthread_mutex_unlock(&conf->attach_lock);
233933
-                gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_FAILED_INIT_SHDSVC,
233933
-                       "Failed to init shd "
233933
-                       "service");
233933
-                goto out;
233933
-            }
233933
-            gf_msg_debug(THIS->name, 0, "shd service initialized");
233933
-            svc->inited = _gf_true;
233933
-        }
233933
-        ret = 0;
233933
-    }
233933
-unlock:
233933
-    pthread_mutex_unlock(&conf->attach_lock);
233933
-out:
233933
-    return ret;
233933
-}
233933
-
233933
-void *
233933
-__gf_find_compatible_svc_from_pid(gd_node_type daemon, pid_t pid)
233933
-{
233933
-    glusterd_svc_proc_t *svc_proc = NULL;
233933
-    struct cds_list_head *svc_procs = NULL;
233933
-    glusterd_svc_t *svc = NULL;
233933
-    pid_t mux_pid = -1;
233933
-    glusterd_conf_t *conf = NULL;
233933
-
233933
-    conf = THIS->private;
233933
-    if (!conf)
233933
-        return NULL;
233933
-
233933
-    if (daemon == GD_NODE_SHD) {
233933
-        svc_procs = &conf->shd_procs;
233933
-        if (!svc_proc)
233933
-            return NULL;
233933
-    } /* Can be moved to switch when mux is implemented for other daemon; */
233933
-
233933
-    cds_list_for_each_entry(svc_proc, svc_procs, svc_proc_list)
233933
-    {
233933
-        cds_list_for_each_entry(svc, &svc_proc->svcs, mux_svc)
233933
-        {
233933
-            if (gf_is_service_running(svc->proc.pidfile, &mux_pid)) {
233933
-                if (mux_pid == pid) {
233933
-                    /*TODO
233933
-                     * inefficient loop, but at the moment, there is only
233933
-                     * one shd.
233933
-                     */
233933
-                    return svc_proc;
233933
-                }
233933
-            }
233933
-        }
233933
-    }
233933
-    return NULL;
233933
-}
233933
-
233933
-static int32_t
233933
-my_callback(struct rpc_req *req, struct iovec *iov, int count, void *v_frame)
233933
-{
233933
-    call_frame_t *frame = v_frame;
233933
-    xlator_t *this = NULL;
233933
-    glusterd_conf_t *conf = NULL;
233933
-
233933
-    GF_VALIDATE_OR_GOTO("glusterd", frame, out);
233933
-    this = frame->this;
233933
-    GF_VALIDATE_OR_GOTO("glusterd", this, out);
233933
-    conf = this->private;
233933
-    GF_VALIDATE_OR_GOTO(this->name, conf, out);
233933
-
233933
-    GF_ATOMIC_DEC(conf->blockers);
233933
-
233933
-    STACK_DESTROY(frame->root);
233933
-out:
233933
-    return 0;
233933
-}
233933
-
233933
-static int32_t
233933
-glusterd_svc_attach_cbk(struct rpc_req *req, struct iovec *iov, int count,
233933
-                        void *v_frame)
233933
-{
233933
-    call_frame_t *frame = v_frame;
233933
-    glusterd_volinfo_t *volinfo = NULL;
233933
-    glusterd_shdsvc_t *shd = NULL;
233933
-    glusterd_svc_t *svc = frame->cookie;
233933
-    glusterd_svc_t *parent_svc = NULL;
233933
-    glusterd_svc_proc_t *mux_proc = NULL;
233933
-    glusterd_conf_t *conf = NULL;
233933
-    int *flag = (int *)frame->local;
233933
-    xlator_t *this = THIS;
233933
-    int pid = -1;
233933
-    int ret = -1;
233933
-    gf_getspec_rsp rsp = {
233933
-        0,
233933
-    };
233933
-
233933
-    GF_VALIDATE_OR_GOTO("glusterd", this, out);
233933
-    conf = this->private;
233933
-    GF_VALIDATE_OR_GOTO("glusterd", conf, out);
233933
-    GF_VALIDATE_OR_GOTO("glusterd", frame, out);
233933
-    GF_VALIDATE_OR_GOTO("glusterd", svc, out);
233933
-
233933
-    frame->local = NULL;
233933
-    frame->cookie = NULL;
233933
-
233933
-    if (!strcmp(svc->name, "glustershd")) {
233933
-        /* Get volinfo->shd from svc object */
233933
-        shd = cds_list_entry(svc, glusterd_shdsvc_t, svc);
233933
-        if (!shd) {
233933
-            gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_SHD_OBJ_GET_FAIL,
233933
-                   "Failed to get shd object "
233933
-                   "from shd service");
233933
-            goto out;
233933
-        }
233933
-
233933
-        /* Get volinfo from shd */
233933
-        volinfo = cds_list_entry(shd, glusterd_volinfo_t, shd);
233933
-        if (!volinfo) {
233933
-            gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_VOLINFO_GET_FAIL,
233933
-                   "Failed to get volinfo from "
233933
-                   "from shd");
233933
-            goto out;
233933
-        }
233933
-    }
233933
-
233933
-    if (!iov) {
233933
-        gf_msg(frame->this->name, GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL,
233933
-               "iov is NULL");
233933
-        ret = -1;
233933
-        goto out;
233933
-    }
233933
-
233933
-    ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_getspec_rsp);
233933
-    if (ret < 0) {
233933
-        gf_msg(frame->this->name, GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL,
233933
-               "XDR decoding error");
233933
-        ret = -1;
233933
-        goto out;
233933
-    }
233933
-
233933
-    if (rsp.op_ret == 0) {
233933
-        pthread_mutex_lock(&conf->attach_lock);
233933
-        {
233933
-            if (!strcmp(svc->name, "glustershd")) {
233933
-                mux_proc = svc->svc_proc;
233933
-                if (mux_proc &&
233933
-                    !gf_is_service_running(svc->proc.pidfile, &pid)) {
233933
-                    /*
233933
-                     * When svc's are restarting, there is a chance that the
233933
-                     * attached svc might not have updated it's pid. Because
233933
-                     * it was at connection stage. So in that case, we need
233933
-                     * to retry the pid file copy.
233933
-                     */
233933
-                    parent_svc = cds_list_entry(mux_proc->svcs.next,
233933
-                                                glusterd_svc_t, mux_svc);
233933
-                    if (parent_svc)
233933
-                        sys_link(parent_svc->proc.pidfile, svc->proc.pidfile);
233933
-                }
233933
-            }
233933
-            svc->online = _gf_true;
233933
-        }
233933
-        pthread_mutex_unlock(&conf->attach_lock);
233933
-        gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_SVC_ATTACH_FAIL,
233933
-               "svc %s of volume %s attached successfully to pid %d", svc->name,
233933
-               volinfo->volname, glusterd_proc_get_pid(&svc->proc));
233933
-    } else {
233933
-        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SVC_ATTACH_FAIL,
233933
-               "svc %s of volume %s failed to "
233933
-               "attach to pid %d. Starting a new process",
233933
-               svc->name, volinfo->volname, glusterd_proc_get_pid(&svc->proc));
233933
-        if (!strcmp(svc->name, "glustershd")) {
233933
-            glusterd_recover_shd_attach_failure(volinfo, svc, *flag);
233933
-        }
233933
-    }
233933
-out:
233933
-    if (flag) {
233933
-        GF_FREE(flag);
233933
-    }
233933
-    GF_ATOMIC_DEC(conf->blockers);
233933
-    STACK_DESTROY(frame->root);
233933
-    return 0;
233933
-}
233933
-
233933
-extern size_t
233933
-build_volfile_path(char *volume_id, char *path, size_t path_len,
233933
-                   char *trusted_str);
233933
-
233933
-int
233933
-__glusterd_send_svc_configure_req(glusterd_svc_t *svc, int flags,
233933
-                                  struct rpc_clnt *rpc, char *volfile_id,
233933
-                                  int op)
233933
-{
233933
-    int ret = -1;
233933
-    struct iobuf *iobuf = NULL;
233933
-    struct iobref *iobref = NULL;
233933
-    struct iovec iov = {
233933
-        0,
233933
-    };
233933
-    char path[PATH_MAX] = {
233933
-        '\0',
233933
-    };
233933
-    struct stat stbuf = {
233933
-        0,
233933
-    };
233933
-    int32_t spec_fd = -1;
233933
-    size_t file_len = -1;
233933
-    char *volfile_content = NULL;
233933
-    ssize_t req_size = 0;
233933
-    call_frame_t *frame = NULL;
233933
-    gd1_mgmt_brick_op_req brick_req;
233933
-    void *req = &brick_req;
233933
-    void *errlbl = &&err;
233933
-    struct rpc_clnt_connection *conn;
233933
-    xlator_t *this = THIS;
233933
-    glusterd_conf_t *conf = THIS->private;
233933
-    extern struct rpc_clnt_program gd_brick_prog;
233933
-    fop_cbk_fn_t cbkfn = my_callback;
233933
-
233933
-    if (!rpc) {
233933
-        gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_PARAM_NULL,
233933
-               "called with null rpc");
233933
-        return -1;
233933
-    }
233933
-
233933
-    conn = &rpc->conn;
233933
-    if (!conn->connected || conn->disconnected) {
233933
-        gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_CONNECT_RETURNED,
233933
-               "not connected yet");
233933
-        return -1;
233933
-    }
233933
-
233933
-    brick_req.op = op;
233933
-    brick_req.name = volfile_id;
233933
-    brick_req.input.input_val = NULL;
233933
-    brick_req.input.input_len = 0;
233933
-
233933
-    frame = create_frame(this, this->ctx->pool);
233933
-    if (!frame) {
233933
-        goto *errlbl;
233933
-    }
233933
-
233933
-    if (op == GLUSTERD_SVC_ATTACH) {
233933
-        (void)build_volfile_path(volfile_id, path, sizeof(path), NULL);
233933
-
233933
-        ret = sys_stat(path, &stbuf);
233933
-        if (ret < 0) {
233933
-            gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SVC_ATTACH_FAIL,
233933
-                   "Unable to stat %s (%s)", path, strerror(errno));
233933
-            ret = -EINVAL;
233933
-            goto *errlbl;
233933
-        }
233933
-
233933
-        file_len = stbuf.st_size;
233933
-        volfile_content = GF_MALLOC(file_len + 1, gf_common_mt_char);
233933
-        if (!volfile_content) {
233933
-            ret = -ENOMEM;
233933
-            goto *errlbl;
233933
-        }
233933
-        spec_fd = open(path, O_RDONLY);
233933
-        if (spec_fd < 0) {
233933
-            gf_msg(THIS->name, GF_LOG_WARNING, 0, GD_MSG_SVC_ATTACH_FAIL,
233933
-                   "failed to read volfile %s", path);
233933
-            ret = -EIO;
233933
-            goto *errlbl;
233933
-        }
233933
-        ret = sys_read(spec_fd, volfile_content, file_len);
233933
-        if (ret == file_len) {
233933
-            brick_req.input.input_val = volfile_content;
233933
-            brick_req.input.input_len = file_len;
233933
-        } else {
233933
-            gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SVC_ATTACH_FAIL,
233933
-                   "read failed on path %s. File size=%" GF_PRI_SIZET
233933
-                   "read size=%d",
233933
-                   path, file_len, ret);
233933
-            ret = -EIO;
233933
-            goto *errlbl;
233933
-        }
233933
-
233933
-        frame->cookie = svc;
233933
-        frame->local = GF_CALLOC(1, sizeof(int), gf_gld_mt_int);
233933
-        *((int *)frame->local) = flags;
233933
-        cbkfn = glusterd_svc_attach_cbk;
233933
-    }
233933
-
233933
-    req_size = xdr_sizeof((xdrproc_t)xdr_gd1_mgmt_brick_op_req, req);
233933
-    iobuf = iobuf_get2(rpc->ctx->iobuf_pool, req_size);
233933
-    if (!iobuf) {
233933
-        goto *errlbl;
233933
-    }
233933
-    errlbl = &&maybe_free_iobuf;
233933
-
233933
-    iov.iov_base = iobuf->ptr;
233933
-    iov.iov_len = iobuf_pagesize(iobuf);
233933
-
233933
-    iobref = iobref_new();
233933
-    if (!iobref) {
233933
-        goto *errlbl;
233933
-    }
233933
-    errlbl = &&free_iobref;
233933
-
233933
-    iobref_add(iobref, iobuf);
233933
-    /*
233933
-     * Drop our reference to the iobuf.  The iobref should already have
233933
-     * one after iobref_add, so when we unref that we'll free the iobuf as
233933
-     * well.  This allows us to pass just the iobref as frame->local.
233933
-     */
233933
-    iobuf_unref(iobuf);
233933
-    /* Set the pointer to null so we don't free it on a later error. */
233933
-    iobuf = NULL;
233933
-
233933
-    /* Create the xdr payload */
233933
-    ret = xdr_serialize_generic(iov, req, (xdrproc_t)xdr_gd1_mgmt_brick_op_req);
233933
-    if (ret == -1) {
233933
-        goto *errlbl;
233933
-    }
233933
-    iov.iov_len = ret;
233933
-
233933
-    /* Send the msg */
233933
-    GF_ATOMIC_INC(conf->blockers);
233933
-    ret = rpc_clnt_submit(rpc, &gd_brick_prog, op, cbkfn, &iov, 1, NULL, 0,
233933
-                          iobref, frame, NULL, 0, NULL, 0, NULL);
233933
-    GF_FREE(volfile_content);
233933
-    if (spec_fd >= 0)
233933
-        sys_close(spec_fd);
233933
-    return ret;
233933
-
233933
-free_iobref:
233933
-    iobref_unref(iobref);
233933
-maybe_free_iobuf:
233933
-    if (iobuf) {
233933
-        iobuf_unref(iobuf);
233933
-    }
233933
-err:
233933
-    GF_FREE(volfile_content);
233933
-    if (spec_fd >= 0)
233933
-        sys_close(spec_fd);
233933
-    if (frame)
233933
-        STACK_DESTROY(frame->root);
233933
-    return -1;
233933
-}
233933
-
233933
-int
233933
-glusterd_attach_svc(glusterd_svc_t *svc, glusterd_volinfo_t *volinfo, int flags)
233933
-{
233933
-    glusterd_conf_t *conf = THIS->private;
233933
-    int ret = -1;
233933
-    int tries;
233933
-    rpc_clnt_t *rpc = NULL;
233933
-
233933
-    GF_VALIDATE_OR_GOTO("glusterd", conf, out);
233933
-    GF_VALIDATE_OR_GOTO("glusterd", svc, out);
233933
-    GF_VALIDATE_OR_GOTO("glusterd", volinfo, out);
233933
-
233933
-    gf_msg("glusterd", GF_LOG_INFO, 0, GD_MSG_ATTACH_INFO,
233933
-           "adding svc %s (volume=%s) to existing "
233933
-           "process with pid %d",
233933
-           svc->name, volinfo->volname, glusterd_proc_get_pid(&svc->proc));
233933
-
233933
-    rpc = rpc_clnt_ref(svc->conn.rpc);
233933
-    for (tries = 15; tries > 0; --tries) {
233933
-        if (rpc) {
233933
-            pthread_mutex_lock(&conf->attach_lock);
233933
-            {
233933
-                ret = __glusterd_send_svc_configure_req(
233933
-                    svc, flags, rpc, svc->proc.volfileid, GLUSTERD_SVC_ATTACH);
233933
-            }
233933
-            pthread_mutex_unlock(&conf->attach_lock);
233933
-            if (!ret) {
233933
-                volinfo->shd.attached = _gf_true;
233933
-                goto out;
233933
-            }
233933
-        }
233933
-        /*
233933
-         * It might not actually be safe to manipulate the lock
233933
-         * like this, but if we don't then the connection can
233933
-         * never actually complete and retries are useless.
233933
-         * Unfortunately, all of the alternatives (e.g. doing
233933
-         * all of this in a separate thread) are much more
233933
-         * complicated and risky.
233933
-         * TBD: see if there's a better way
233933
-         */
233933
-        synclock_unlock(&conf->big_lock);
233933
-        sleep(1);
233933
-        synclock_lock(&conf->big_lock);
233933
-    }
233933
-    ret = -1;
233933
-    gf_msg("glusterd", GF_LOG_WARNING, 0, GD_MSG_SVC_ATTACH_FAIL,
233933
-           "attach failed for %s(volume=%s)", svc->name, volinfo->volname);
233933
-out:
233933
-    if (rpc)
233933
-        rpc_clnt_unref(rpc);
233933
-    return ret;
233933
-}
233933
-
233933
-int
233933
-glusterd_detach_svc(glusterd_svc_t *svc, glusterd_volinfo_t *volinfo, int sig)
233933
-{
233933
-    glusterd_conf_t *conf = THIS->private;
233933
-    int ret = -1;
233933
-    int tries;
233933
-    rpc_clnt_t *rpc = NULL;
233933
-
233933
-    GF_VALIDATE_OR_GOTO(THIS->name, conf, out);
233933
-    GF_VALIDATE_OR_GOTO(THIS->name, svc, out);
233933
-    GF_VALIDATE_OR_GOTO(THIS->name, volinfo, out);
233933
-
233933
-    gf_msg(THIS->name, GF_LOG_INFO, 0, GD_MSG_DETACH_INFO,
233933
-           "removing svc %s (volume=%s) from existing "
233933
-           "process with pid %d",
233933
-           svc->name, volinfo->volname, glusterd_proc_get_pid(&svc->proc));
233933
-
233933
-    rpc = rpc_clnt_ref(svc->conn.rpc);
233933
-    for (tries = 15; tries > 0; --tries) {
233933
-        if (rpc) {
233933
-            /*For detach there is no flags, and we are not using sig.*/
233933
-            pthread_mutex_lock(&conf->attach_lock);
233933
-            {
233933
-                ret = __glusterd_send_svc_configure_req(svc, 0, svc->conn.rpc,
233933
-                                                        svc->proc.volfileid,
233933
-                                                        GLUSTERD_SVC_DETACH);
233933
-            }
233933
-            pthread_mutex_unlock(&conf->attach_lock);
233933
-            if (!ret) {
233933
-                goto out;
233933
-            }
233933
-        }
233933
-        /*
233933
-         * It might not actually be safe to manipulate the lock
233933
-         * like this, but if we don't then the connection can
233933
-         * never actually complete and retries are useless.
233933
-         * Unfortunately, all of the alternatives (e.g. doing
233933
-         * all of this in a separate thread) are much more
233933
-         * complicated and risky.
233933
-         * TBD: see if there's a better way
233933
-         */
233933
-        synclock_unlock(&conf->big_lock);
233933
-        sleep(1);
233933
-        synclock_lock(&conf->big_lock);
233933
-    }
233933
-    ret = -1;
233933
-    gf_msg("glusterd", GF_LOG_WARNING, 0, GD_MSG_SVC_DETACH_FAIL,
233933
-           "detach failed for %s(volume=%s)", svc->name, volinfo->volname);
233933
-out:
233933
-    if (rpc)
233933
-        rpc_clnt_unref(rpc);
233933
-    return ret;
233933
-}
233933
diff --git a/xlators/mgmt/glusterd/src/glusterd-svc-helper.h b/xlators/mgmt/glusterd/src/glusterd-svc-helper.h
233933
index 5def246..cc98e78 100644
233933
--- a/xlators/mgmt/glusterd/src/glusterd-svc-helper.h
233933
+++ b/xlators/mgmt/glusterd/src/glusterd-svc-helper.h
233933
@@ -16,10 +16,10 @@
233933
 #include "glusterd-volgen.h"
233933
 
233933
 int
233933
-glusterd_svcs_reconfigure(glusterd_volinfo_t *volinfo);
233933
+glusterd_svcs_reconfigure();
233933
 
233933
 int
233933
-glusterd_svcs_stop(glusterd_volinfo_t *vol);
233933
+glusterd_svcs_stop();
233933
 
233933
 int
233933
 glusterd_svcs_manager(glusterd_volinfo_t *volinfo);
233933
@@ -41,41 +41,5 @@ int
233933
 glusterd_svc_check_tier_topology_identical(char *svc_name,
233933
                                            glusterd_volinfo_t *volinfo,
233933
                                            gf_boolean_t *identical);
233933
-int
233933
-glusterd_volume_svc_check_volfile_identical(char *svc_name, dict_t *mode_dict,
233933
-                                            glusterd_volinfo_t *volinfo,
233933
-                                            glusterd_vol_graph_builder_t,
233933
-                                            gf_boolean_t *identical);
233933
-int
233933
-glusterd_volume_svc_check_topology_identical(char *svc_name, dict_t *mode_dict,
233933
-                                             glusterd_volinfo_t *volinfo,
233933
-                                             glusterd_vol_graph_builder_t,
233933
-                                             gf_boolean_t *identical);
233933
-void
233933
-glusterd_volume_svc_build_volfile_path(char *server, glusterd_volinfo_t *vol,
233933
-                                       char *volfile, size_t len);
233933
-void *
233933
-__gf_find_compatible_svc(gd_node_type daemon);
233933
-
233933
-glusterd_svc_proc_t *
233933
-glusterd_svcprocess_new();
233933
-
233933
-int
233933
-glusterd_shd_svc_mux_init(glusterd_volinfo_t *volinfo, glusterd_svc_t *svc);
233933
-
233933
-void *
233933
-__gf_find_compatible_svc_from_pid(gd_node_type daemon, pid_t pid);
233933
-
233933
-int
233933
-glusterd_attach_svc(glusterd_svc_t *svc, glusterd_volinfo_t *volinfo,
233933
-                    int flags);
233933
-
233933
-int
233933
-glusterd_detach_svc(glusterd_svc_t *svc, glusterd_volinfo_t *volinfo, int sig);
233933
-
233933
-int
233933
-__glusterd_send_svc_configure_req(glusterd_svc_t *svc, int flag,
233933
-                                  struct rpc_clnt *rpc, char *volfile_id,
233933
-                                  int op);
233933
 
233933
 #endif
233933
diff --git a/xlators/mgmt/glusterd/src/glusterd-svc-mgmt.c b/xlators/mgmt/glusterd/src/glusterd-svc-mgmt.c
233933
index f32dafc..4cd4cea 100644
233933
--- a/xlators/mgmt/glusterd/src/glusterd-svc-mgmt.c
233933
+++ b/xlators/mgmt/glusterd/src/glusterd-svc-mgmt.c
233933
@@ -18,7 +18,6 @@
233933
 #include "glusterd-conn-mgmt.h"
233933
 #include "glusterd-messages.h"
233933
 #include <glusterfs/syscall.h>
233933
-#include "glusterd-shd-svc-helper.h"
233933
 
233933
 int
233933
 glusterd_svc_create_rundir(char *rundir)
233933
@@ -168,75 +167,68 @@ glusterd_svc_start(glusterd_svc_t *svc, int flags, dict_t *cmdline)
233933
     GF_ASSERT(this);
233933
 
233933
     priv = this->private;
233933
-    GF_VALIDATE_OR_GOTO("glusterd", priv, out);
233933
-    GF_VALIDATE_OR_GOTO("glusterd", svc, out);
233933
-
233933
-    pthread_mutex_lock(&priv->attach_lock);
233933
-    {
233933
-        if (glusterd_proc_is_running(&(svc->proc))) {
233933
-            ret = 0;
233933
-            goto unlock;
233933
-        }
233933
+    GF_ASSERT(priv);
233933
 
233933
-        ret = sys_access(svc->proc.volfile, F_OK);
233933
-        if (ret) {
233933
-            gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLFILE_NOT_FOUND,
233933
-                   "Volfile %s is not present", svc->proc.volfile);
233933
-            goto unlock;
233933
-        }
233933
+    if (glusterd_proc_is_running(&(svc->proc))) {
233933
+        ret = 0;
233933
+        goto out;
233933
+    }
233933
 
233933
-        runinit(&runner);
233933
+    ret = sys_access(svc->proc.volfile, F_OK);
233933
+    if (ret) {
233933
+        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLFILE_NOT_FOUND,
233933
+               "Volfile %s is not present", svc->proc.volfile);
233933
+        goto out;
233933
+    }
233933
 
233933
-        if (this->ctx->cmd_args.valgrind) {
233933
-            len = snprintf(valgrind_logfile, PATH_MAX, "%s/valgrind-%s.log",
233933
-                           svc->proc.logfile, svc->name);
233933
-            if ((len < 0) || (len >= PATH_MAX)) {
233933
-                ret = -1;
233933
-                goto unlock;
233933
-            }
233933
+    runinit(&runner);
233933
 
233933
-            runner_add_args(&runner, "valgrind", "--leak-check=full",
233933
-                            "--trace-children=yes", "--track-origins=yes",
233933
-                            NULL);
233933
-            runner_argprintf(&runner, "--log-file=%s", valgrind_logfile);
233933
+    if (this->ctx->cmd_args.valgrind) {
233933
+        len = snprintf(valgrind_logfile, PATH_MAX, "%s/valgrind-%s.log",
233933
+                       svc->proc.logfile, svc->name);
233933
+        if ((len < 0) || (len >= PATH_MAX)) {
233933
+            ret = -1;
233933
+            goto out;
233933
         }
233933
 
233933
-        runner_add_args(&runner, SBIN_DIR "/glusterfs", "-s",
233933
-                        svc->proc.volfileserver, "--volfile-id",
233933
-                        svc->proc.volfileid, "-p", svc->proc.pidfile, "-l",
233933
-                        svc->proc.logfile, "-S", svc->conn.sockpath, NULL);
233933
+        runner_add_args(&runner, "valgrind", "--leak-check=full",
233933
+                        "--trace-children=yes", "--track-origins=yes", NULL);
233933
+        runner_argprintf(&runner, "--log-file=%s", valgrind_logfile);
233933
+    }
233933
 
233933
-        if (dict_get_strn(priv->opts, GLUSTERD_LOCALTIME_LOGGING_KEY,
233933
-                          SLEN(GLUSTERD_LOCALTIME_LOGGING_KEY),
233933
-                          &localtime_logging) == 0) {
233933
-            if (strcmp(localtime_logging, "enable") == 0)
233933
-                runner_add_arg(&runner, "--localtime-logging");
233933
-        }
233933
-        if (dict_get_strn(priv->opts, GLUSTERD_DAEMON_LOG_LEVEL_KEY,
233933
-                          SLEN(GLUSTERD_DAEMON_LOG_LEVEL_KEY),
233933
-                          &log_level) == 0) {
233933
-            snprintf(daemon_log_level, 30, "--log-level=%s", log_level);
233933
-            runner_add_arg(&runner, daemon_log_level);
233933
-        }
233933
+    runner_add_args(&runner, SBIN_DIR "/glusterfs", "-s",
233933
+                    svc->proc.volfileserver, "--volfile-id",
233933
+                    svc->proc.volfileid, "-p", svc->proc.pidfile, "-l",
233933
+                    svc->proc.logfile, "-S", svc->conn.sockpath, NULL);
233933
+
233933
+    if (dict_get_strn(priv->opts, GLUSTERD_LOCALTIME_LOGGING_KEY,
233933
+                      SLEN(GLUSTERD_LOCALTIME_LOGGING_KEY),
233933
+                      &localtime_logging) == 0) {
233933
+        if (strcmp(localtime_logging, "enable") == 0)
233933
+            runner_add_arg(&runner, "--localtime-logging");
233933
+    }
233933
+    if (dict_get_strn(priv->opts, GLUSTERD_DAEMON_LOG_LEVEL_KEY,
233933
+                      SLEN(GLUSTERD_DAEMON_LOG_LEVEL_KEY), &log_level) == 0) {
233933
+        snprintf(daemon_log_level, 30, "--log-level=%s", log_level);
233933
+        runner_add_arg(&runner, daemon_log_level);
233933
+    }
233933
 
233933
-        if (cmdline)
233933
-            dict_foreach(cmdline, svc_add_args, (void *)&runner);
233933
+    if (cmdline)
233933
+        dict_foreach(cmdline, svc_add_args, (void *)&runner);
233933
 
233933
-        gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_SVC_START_SUCCESS,
233933
-               "Starting %s service", svc->name);
233933
+    gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_SVC_START_SUCCESS,
233933
+           "Starting %s service", svc->name);
233933
 
233933
-        if (flags == PROC_START_NO_WAIT) {
233933
-            ret = runner_run_nowait(&runner);
233933
-        } else {
233933
-            synclock_unlock(&priv->big_lock);
233933
-            {
233933
-                ret = runner_run(&runner);
233933
-            }
233933
-            synclock_lock(&priv->big_lock);
233933
+    if (flags == PROC_START_NO_WAIT) {
233933
+        ret = runner_run_nowait(&runner);
233933
+    } else {
233933
+        synclock_unlock(&priv->big_lock);
233933
+        {
233933
+            ret = runner_run(&runner);
233933
         }
233933
+        synclock_lock(&priv->big_lock);
233933
     }
233933
-unlock:
233933
-    pthread_mutex_unlock(&priv->attach_lock);
233933
+
233933
 out:
233933
     gf_msg_debug(this->name, 0, "Returning %d", ret);
233933
 
233933
@@ -289,8 +281,7 @@ glusterd_svc_build_volfile_path(char *server, char *workdir, char *volfile,
233933
 
233933
     glusterd_svc_build_svcdir(server, workdir, dir, sizeof(dir));
233933
 
233933
-    if (!strcmp(server, "quotad"))
233933
-        /*quotad has different volfile name*/
233933
+    if (!strcmp(server, "quotad")) /*quotad has different volfile name*/
233933
         snprintf(volfile, len, "%s/%s.vol", dir, server);
233933
     else
233933
         snprintf(volfile, len, "%s/%s-server.vol", dir, server);
233933
@@ -375,138 +366,3 @@ glusterd_svc_common_rpc_notify(glusterd_conn_t *conn, rpc_clnt_event_t event)
233933
 
233933
     return ret;
233933
 }
233933
-
233933
-void
233933
-glusterd_volume_svc_build_volfile_path(char *server, glusterd_volinfo_t *vol,
233933
-                                       char *volfile, size_t len)
233933
-{
233933
-    GF_ASSERT(len == PATH_MAX);
233933
-
233933
-    if (!strcmp(server, "glustershd")) {
233933
-        glusterd_svc_build_shd_volfile_path(vol, volfile, len);
233933
-    }
233933
-}
233933
-
233933
-int
233933
-glusterd_muxsvc_common_rpc_notify(glusterd_svc_proc_t *mux_proc,
233933
-                                  rpc_clnt_event_t event)
233933
-{
233933
-    int ret = 0;
233933
-    glusterd_svc_t *svc = NULL;
233933
-    glusterd_svc_t *tmp = NULL;
233933
-    xlator_t *this = NULL;
233933
-    gf_boolean_t need_logging = _gf_false;
233933
-
233933
-    this = THIS;
233933
-    GF_ASSERT(this);
233933
-
233933
-    if (!mux_proc) {
233933
-        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SVC_GET_FAIL,
233933
-               "Failed to get the svc proc data");
233933
-        return -1;
233933
-    }
233933
-
233933
-    /* Currently this function was used for shd svc, if this function is
233933
-     * using for another svc, change ths glustershd reference. We can get
233933
-     * the svc name from any of the attached svc's
233933
-     */
233933
-    switch (event) {
233933
-        case RPC_CLNT_CONNECT:
233933
-            gf_msg_debug(this->name, 0,
233933
-                         "glustershd has connected with glusterd.");
233933
-            gf_event(EVENT_SVC_CONNECTED, "svc_name=glustershd");
233933
-            cds_list_for_each_entry_safe(svc, tmp, &mux_proc->svcs, mux_svc)
233933
-            {
233933
-                if (svc->online)
233933
-                    continue;
233933
-                svc->online = _gf_true;
233933
-            }
233933
-            break;
233933
-
233933
-        case RPC_CLNT_DISCONNECT:
233933
-            cds_list_for_each_entry_safe(svc, tmp, &mux_proc->svcs, mux_svc)
233933
-            {
233933
-                if (svc->online) {
233933
-                    if (!need_logging)
233933
-                        need_logging = _gf_true;
233933
-                    svc->online = _gf_false;
233933
-                }
233933
-            }
233933
-            if (need_logging) {
233933
-                gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_NODE_DISCONNECTED,
233933
-                       "glustershd has disconnected from glusterd.");
233933
-                gf_event(EVENT_SVC_DISCONNECTED, "svc_name=glustershd");
233933
-            }
233933
-            break;
233933
-
233933
-        default:
233933
-            gf_msg_trace(this->name, 0, "got some other RPC event %d", event);
233933
-            break;
233933
-    }
233933
-
233933
-    return ret;
233933
-}
233933
-
233933
-int
233933
-glusterd_muxsvc_conn_init(glusterd_conn_t *conn, glusterd_svc_proc_t *mux_proc,
233933
-                          char *sockpath, int frame_timeout,
233933
-                          glusterd_muxsvc_conn_notify_t notify)
233933
-{
233933
-    int ret = -1;
233933
-    dict_t *options = NULL;
233933
-    struct rpc_clnt *rpc = NULL;
233933
-    xlator_t *this = THIS;
233933
-    glusterd_svc_t *svc = NULL;
233933
-
233933
-    options = dict_new();
233933
-    if (!this || !options)
233933
-        goto out;
233933
-
233933
-    svc = cds_list_entry(conn, glusterd_svc_t, conn);
233933
-    if (!svc) {
233933
-        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SVC_GET_FAIL,
233933
-               "Failed to get the service");
233933
-        goto out;
233933
-    }
233933
-
233933
-    ret = rpc_transport_unix_options_build(options, sockpath, frame_timeout);
233933
-    if (ret)
233933
-        goto out;
233933
-
233933
-    ret = dict_set_int32n(options, "transport.socket.ignore-enoent",
233933
-                          SLEN("transport.socket.ignore-enoent"), 1);
233933
-    if (ret)
233933
-        goto out;
233933
-
233933
-    /* @options is free'd by rpc_transport when destroyed */
233933
-    rpc = rpc_clnt_new(options, this, (char *)svc->name, 16);
233933
-    if (!rpc) {
233933
-        ret = -1;
233933
-        goto out;
233933
-    }
233933
-
233933
-    ret = rpc_clnt_register_notify(rpc, glusterd_muxsvc_conn_common_notify,
233933
-                                   mux_proc);
233933
-    if (ret)
233933
-        goto out;
233933
-
233933
-    ret = snprintf(conn->sockpath, sizeof(conn->sockpath), "%s", sockpath);
233933
-    if (ret < 0)
233933
-        goto out;
233933
-    else
233933
-        ret = 0;
233933
-
233933
-    conn->frame_timeout = frame_timeout;
233933
-    conn->rpc = rpc;
233933
-    mux_proc->notify = notify;
233933
-out:
233933
-    if (options)
233933
-        dict_unref(options);
233933
-    if (ret) {
233933
-        if (rpc) {
233933
-            rpc_clnt_unref(rpc);
233933
-            rpc = NULL;
233933
-        }
233933
-    }
233933
-    return ret;
233933
-}
233933
diff --git a/xlators/mgmt/glusterd/src/glusterd-svc-mgmt.h b/xlators/mgmt/glusterd/src/glusterd-svc-mgmt.h
233933
index fbc5225..c850bfd 100644
233933
--- a/xlators/mgmt/glusterd/src/glusterd-svc-mgmt.h
233933
+++ b/xlators/mgmt/glusterd/src/glusterd-svc-mgmt.h
233933
@@ -13,12 +13,9 @@
233933
 
233933
 #include "glusterd-proc-mgmt.h"
233933
 #include "glusterd-conn-mgmt.h"
233933
-#include "glusterd-rcu.h"
233933
 
233933
 struct glusterd_svc_;
233933
-
233933
 typedef struct glusterd_svc_ glusterd_svc_t;
233933
-typedef struct glusterd_svc_proc_ glusterd_svc_proc_t;
233933
 
233933
 typedef void (*glusterd_svc_build_t)(glusterd_svc_t *svc);
233933
 
233933
@@ -28,17 +25,6 @@ typedef int (*glusterd_svc_start_t)(glusterd_svc_t *svc, int flags);
233933
 typedef int (*glusterd_svc_stop_t)(glusterd_svc_t *svc, int sig);
233933
 typedef int (*glusterd_svc_reconfigure_t)(void *data);
233933
 
233933
-typedef int (*glusterd_muxsvc_conn_notify_t)(glusterd_svc_proc_t *mux_proc,
233933
-                                             rpc_clnt_event_t event);
233933
-
233933
-struct glusterd_svc_proc_ {
233933
-    struct cds_list_head svc_proc_list;
233933
-    struct cds_list_head svcs;
233933
-    glusterd_muxsvc_conn_notify_t notify;
233933
-    rpc_clnt_t *rpc;
233933
-    void *data;
233933
-};
233933
-
233933
 struct glusterd_svc_ {
233933
     char name[NAME_MAX];
233933
     glusterd_conn_t conn;
233933
@@ -49,8 +35,6 @@ struct glusterd_svc_ {
233933
     gf_boolean_t online;
233933
     gf_boolean_t inited;
233933
     glusterd_svc_reconfigure_t reconfigure;
233933
-    glusterd_svc_proc_t *svc_proc;
233933
-    struct cds_list_head mux_svc;
233933
 };
233933
 
233933
 int
233933
@@ -85,15 +69,4 @@ glusterd_svc_reconfigure(int (*create_volfile)());
233933
 int
233933
 glusterd_svc_common_rpc_notify(glusterd_conn_t *conn, rpc_clnt_event_t event);
233933
 
233933
-int
233933
-glusterd_muxsvc_common_rpc_notify(glusterd_svc_proc_t *conn,
233933
-                                  rpc_clnt_event_t event);
233933
-
233933
-int
233933
-glusterd_proc_get_pid(glusterd_proc_t *proc);
233933
-
233933
-int
233933
-glusterd_muxsvc_conn_init(glusterd_conn_t *conn, glusterd_svc_proc_t *mux_proc,
233933
-                          char *sockpath, int frame_timeout,
233933
-                          glusterd_muxsvc_conn_notify_t notify);
233933
 #endif
233933
diff --git a/xlators/mgmt/glusterd/src/glusterd-tier.c b/xlators/mgmt/glusterd/src/glusterd-tier.c
233933
index 23a9592..4dc0d44 100644
233933
--- a/xlators/mgmt/glusterd/src/glusterd-tier.c
233933
+++ b/xlators/mgmt/glusterd/src/glusterd-tier.c
233933
@@ -27,7 +27,6 @@
233933
 #include "glusterd-messages.h"
233933
 #include "glusterd-mgmt.h"
233933
 #include "glusterd-syncop.h"
233933
-#include "glusterd-shd-svc-helper.h"
233933
 
233933
 #include <sys/wait.h>
233933
 #include <dlfcn.h>
233933
@@ -616,7 +615,7 @@ glusterd_op_remove_tier_brick(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
233933
 
233933
     if (cmd == GF_DEFRAG_CMD_DETACH_START &&
233933
         volinfo->status == GLUSTERD_STATUS_STARTED) {
233933
-        ret = glusterd_svcs_reconfigure(volinfo);
233933
+        ret = glusterd_svcs_reconfigure();
233933
         if (ret) {
233933
             gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_NFS_RECONF_FAIL,
233933
                    "Unable to reconfigure NFS-Server");
233933
diff --git a/xlators/mgmt/glusterd/src/glusterd-tierd-svc.c b/xlators/mgmt/glusterd/src/glusterd-tierd-svc.c
233933
index ab463f1..04ceec5 100644
233933
--- a/xlators/mgmt/glusterd/src/glusterd-tierd-svc.c
233933
+++ b/xlators/mgmt/glusterd/src/glusterd-tierd-svc.c
233933
@@ -83,6 +83,7 @@ glusterd_tierdsvc_init(void *data)
233933
         goto out;
233933
 
233933
     notify = glusterd_svc_common_rpc_notify;
233933
+    glusterd_store_perform_node_state_store(volinfo);
233933
 
233933
     volinfo->type = GF_CLUSTER_TYPE_TIER;
233933
 
233933
@@ -394,7 +395,6 @@ int
233933
 glusterd_tierdsvc_restart()
233933
 {
233933
     glusterd_volinfo_t *volinfo = NULL;
233933
-    glusterd_volinfo_t *tmp = NULL;
233933
     int ret = 0;
233933
     xlator_t *this = THIS;
233933
     glusterd_conf_t *conf = NULL;
233933
@@ -405,7 +405,7 @@ glusterd_tierdsvc_restart()
233933
     conf = this->private;
233933
     GF_VALIDATE_OR_GOTO(this->name, conf, out);
233933
 
233933
-    cds_list_for_each_entry_safe(volinfo, tmp, &conf->volumes, vol_list)
233933
+    cds_list_for_each_entry(volinfo, &conf->volumes, vol_list)
233933
     {
233933
         /* Start per volume tierd svc */
233933
         if (volinfo->status == GLUSTERD_STATUS_STARTED &&
233933
diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c
233933
index 4525ec7..2aa975b 100644
233933
--- a/xlators/mgmt/glusterd/src/glusterd-utils.c
233933
+++ b/xlators/mgmt/glusterd/src/glusterd-utils.c
233933
@@ -61,7 +61,6 @@
233933
 #include "glusterd-server-quorum.h"
233933
 #include <glusterfs/quota-common-utils.h>
233933
 #include <glusterfs/common-utils.h>
233933
-#include "glusterd-shd-svc-helper.h"
233933
 
233933
 #include "xdr-generic.h"
233933
 #include <sys/resource.h>
233933
@@ -625,17 +624,13 @@ glusterd_volinfo_t *
233933
 glusterd_volinfo_unref(glusterd_volinfo_t *volinfo)
233933
 {
233933
     int refcnt = -1;
233933
-    glusterd_conf_t *conf = THIS->private;
233933
 
233933
-    pthread_mutex_lock(&conf->volume_lock);
233933
+    pthread_mutex_lock(&volinfo->reflock);
233933
     {
233933
-        pthread_mutex_lock(&volinfo->reflock);
233933
-        {
233933
-            refcnt = --volinfo->refcnt;
233933
-        }
233933
-        pthread_mutex_unlock(&volinfo->reflock);
233933
+        refcnt = --volinfo->refcnt;
233933
     }
233933
-    pthread_mutex_unlock(&conf->volume_lock);
233933
+    pthread_mutex_unlock(&volinfo->reflock);
233933
+
233933
     if (!refcnt) {
233933
         glusterd_volinfo_delete(volinfo);
233933
         return NULL;
233933
@@ -707,7 +702,6 @@ glusterd_volinfo_new(glusterd_volinfo_t **volinfo)
233933
     glusterd_snapdsvc_build(&new_volinfo->snapd.svc);
233933
     glusterd_tierdsvc_build(&new_volinfo->tierd.svc);
233933
     glusterd_gfproxydsvc_build(&new_volinfo->gfproxyd.svc);
233933
-    glusterd_shdsvc_build(&new_volinfo->shd.svc);
233933
 
233933
     pthread_mutex_init(&new_volinfo->reflock, NULL);
233933
     *volinfo = glusterd_volinfo_ref(new_volinfo);
233933
@@ -1073,11 +1067,11 @@ glusterd_volinfo_delete(glusterd_volinfo_t *volinfo)
233933
     gf_store_handle_destroy(volinfo->snapd.handle);
233933
 
233933
     glusterd_auth_cleanup(volinfo);
233933
-    glusterd_shd_svcproc_cleanup(&volinfo->shd);
233933
 
233933
     pthread_mutex_destroy(&volinfo->reflock);
233933
     GF_FREE(volinfo);
233933
     ret = 0;
233933
+
233933
 out:
233933
     gf_msg_debug(THIS->name, 0, "Returning %d", ret);
233933
     return ret;
233933
@@ -3929,7 +3923,6 @@ glusterd_spawn_daemons(void *opaque)
233933
     ret = glusterd_snapdsvc_restart();
233933
     ret = glusterd_tierdsvc_restart();
233933
     ret = glusterd_gfproxydsvc_restart();
233933
-    ret = glusterd_shdsvc_restart();
233933
     return ret;
233933
 }
233933
 
233933
@@ -4880,9 +4873,6 @@ glusterd_delete_stale_volume(glusterd_volinfo_t *stale_volinfo,
233933
         svc = &(stale_volinfo->snapd.svc);
233933
         (void)svc->manager(svc, stale_volinfo, PROC_START_NO_WAIT);
233933
     }
233933
-    svc = &(stale_volinfo->shd.svc);
233933
-    (void)svc->manager(svc, stale_volinfo, PROC_START_NO_WAIT);
233933
-
233933
     (void)glusterd_volinfo_remove(stale_volinfo);
233933
 
233933
     return 0;
233933
@@ -4997,15 +4987,6 @@ glusterd_import_friend_volume(dict_t *peer_data, int count)
233933
         glusterd_volinfo_unref(old_volinfo);
233933
     }
233933
 
233933
-    ret = glusterd_store_volinfo(new_volinfo, GLUSTERD_VOLINFO_VER_AC_NONE);
233933
-    if (ret) {
233933
-        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLINFO_STORE_FAIL,
233933
-               "Failed to store "
233933
-               "volinfo for volume %s",
233933
-               new_volinfo->volname);
233933
-        goto out;
233933
-    }
233933
-
233933
     if (glusterd_is_volume_started(new_volinfo)) {
233933
         (void)glusterd_start_bricks(new_volinfo);
233933
         if (glusterd_is_snapd_enabled(new_volinfo)) {
233933
@@ -5014,10 +4995,15 @@ glusterd_import_friend_volume(dict_t *peer_data, int count)
233933
                 gf_event(EVENT_SVC_MANAGER_FAILED, "svc_name=%s", svc->name);
233933
             }
233933
         }
233933
-        svc = &(new_volinfo->shd.svc);
233933
-        if (svc->manager(svc, new_volinfo, PROC_START_NO_WAIT)) {
233933
-            gf_event(EVENT_SVC_MANAGER_FAILED, "svc_name=%s", svc->name);
233933
-        }
233933
+    }
233933
+
233933
+    ret = glusterd_store_volinfo(new_volinfo, GLUSTERD_VOLINFO_VER_AC_NONE);
233933
+    if (ret) {
233933
+        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLINFO_STORE_FAIL,
233933
+               "Failed to store "
233933
+               "volinfo for volume %s",
233933
+               new_volinfo->volname);
233933
+        goto out;
233933
     }
233933
 
233933
     ret = glusterd_create_volfiles_and_notify_services(new_volinfo);
233933
@@ -5521,7 +5507,9 @@ glusterd_add_node_to_dict(char *server, dict_t *dict, int count,
233933
     glusterd_svc_build_pidfile_path(server, priv->rundir, pidfile,
233933
                                     sizeof(pidfile));
233933
 
233933
-    if (strcmp(server, priv->nfs_svc.name) == 0)
233933
+    if (strcmp(server, priv->shd_svc.name) == 0)
233933
+        svc = &(priv->shd_svc);
233933
+    else if (strcmp(server, priv->nfs_svc.name) == 0)
233933
         svc = &(priv->nfs_svc);
233933
     else if (strcmp(server, priv->quotad_svc.name) == 0)
233933
         svc = &(priv->quotad_svc);
233933
@@ -5552,6 +5540,9 @@ glusterd_add_node_to_dict(char *server, dict_t *dict, int count,
233933
     if (!strcmp(server, priv->nfs_svc.name))
233933
         ret = dict_set_nstrn(dict, key, keylen, "NFS Server",
233933
                              SLEN("NFS Server"));
233933
+    else if (!strcmp(server, priv->shd_svc.name))
233933
+        ret = dict_set_nstrn(dict, key, keylen, "Self-heal Daemon",
233933
+                             SLEN("Self-heal Daemon"));
233933
     else if (!strcmp(server, priv->quotad_svc.name))
233933
         ret = dict_set_nstrn(dict, key, keylen, "Quota Daemon",
233933
                              SLEN("Quota Daemon"));
233933
@@ -9115,21 +9106,6 @@ glusterd_friend_remove_cleanup_vols(uuid_t uuid)
233933
                            "to stop snapd daemon service");
233933
                 }
233933
             }
233933
-
233933
-            if (glusterd_is_shd_compatible_volume(volinfo)) {
233933
-                /*
233933
-                 * Sending stop request for all volumes. So it is fine
233933
-                 * to send stop for mux shd
233933
-                 */
233933
-                svc = &(volinfo->shd.svc);
233933
-                ret = svc->stop(svc, SIGTERM);
233933
-                if (ret) {
233933
-                    gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_SVC_STOP_FAIL,
233933
-                           "Failed "
233933
-                           "to stop shd daemon service");
233933
-                }
233933
-            }
233933
-
233933
             if (volinfo->type == GF_CLUSTER_TYPE_TIER) {
233933
                 svc = &(volinfo->tierd.svc);
233933
                 ret = svc->stop(svc, SIGTERM);
233933
@@ -9155,7 +9131,7 @@ glusterd_friend_remove_cleanup_vols(uuid_t uuid)
233933
     }
233933
 
233933
     /* Reconfigure all daemon services upon peer detach */
233933
-    ret = glusterd_svcs_reconfigure(NULL);
233933
+    ret = glusterd_svcs_reconfigure();
233933
     if (ret) {
233933
         gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_SVC_STOP_FAIL,
233933
                "Failed to reconfigure all daemon services.");
233933
@@ -14746,74 +14722,3 @@ glusterd_is_profile_on(glusterd_volinfo_t *volinfo)
233933
         return _gf_true;
233933
     return _gf_false;
233933
 }
233933
-
233933
-int32_t
233933
-glusterd_add_shd_to_dict(glusterd_volinfo_t *volinfo, dict_t *dict,
233933
-                         int32_t count)
233933
-{
233933
-    int ret = -1;
233933
-    int32_t pid = -1;
233933
-    int32_t brick_online = -1;
233933
-    char key[64] = {0};
233933
-    int keylen;
233933
-    char *pidfile = NULL;
233933
-    xlator_t *this = NULL;
233933
-    char *uuid_str = NULL;
233933
-
233933
-    this = THIS;
233933
-    GF_VALIDATE_OR_GOTO(THIS->name, this, out);
233933
-
233933
-    GF_VALIDATE_OR_GOTO(this->name, volinfo, out);
233933
-    GF_VALIDATE_OR_GOTO(this->name, dict, out);
233933
-
233933
-    keylen = snprintf(key, sizeof(key), "brick%d.hostname", count);
233933
-    ret = dict_set_nstrn(dict, key, keylen, "Self-heal Daemon",
233933
-                         SLEN("Self-heal Daemon"));
233933
-    if (ret)
233933
-        goto out;
233933
-
233933
-    keylen = snprintf(key, sizeof(key), "brick%d.path", count);
233933
-    uuid_str = gf_strdup(uuid_utoa(MY_UUID));
233933
-    if (!uuid_str) {
233933
-        ret = -1;
233933
-        goto out;
233933
-    }
233933
-    ret = dict_set_dynstrn(dict, key, keylen, uuid_str);
233933
-    if (ret)
233933
-        goto out;
233933
-    uuid_str = NULL;
233933
-
233933
-    /* shd doesn't have a port. but the cli needs a port key with
233933
-     * a zero value to parse.
233933
-     * */
233933
-
233933
-    keylen = snprintf(key, sizeof(key), "brick%d.port", count);
233933
-    ret = dict_set_int32n(dict, key, keylen, 0);
233933
-    if (ret)
233933
-        goto out;
233933
-
233933
-    pidfile = volinfo->shd.svc.proc.pidfile;
233933
-
233933
-    brick_online = gf_is_service_running(pidfile, &pid;;
233933
-
233933
-    /* If shd is not running, then don't print the pid */
233933
-    if (!brick_online)
233933
-        pid = -1;
233933
-    keylen = snprintf(key, sizeof(key), "brick%d.pid", count);
233933
-    ret = dict_set_int32n(dict, key, keylen, pid);
233933
-    if (ret)
233933
-        goto out;
233933
-
233933
-    keylen = snprintf(key, sizeof(key), "brick%d.status", count);
233933
-    ret = dict_set_int32n(dict, key, keylen, brick_online);
233933
-
233933
-out:
233933
-    if (uuid_str)
233933
-        GF_FREE(uuid_str);
233933
-    if (ret)
233933
-        gf_msg(this ? this->name : "glusterd", GF_LOG_ERROR, 0,
233933
-               GD_MSG_DICT_SET_FAILED,
233933
-               "Returning %d. adding values to dict failed", ret);
233933
-
233933
-    return ret;
233933
-}
233933
diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.h b/xlators/mgmt/glusterd/src/glusterd-utils.h
233933
index 5c6a453..ead16b2 100644
233933
--- a/xlators/mgmt/glusterd/src/glusterd-utils.h
233933
+++ b/xlators/mgmt/glusterd/src/glusterd-utils.h
233933
@@ -881,8 +881,4 @@ glusterd_is_profile_on(glusterd_volinfo_t *volinfo);
233933
 
233933
 char *
233933
 search_brick_path_from_proc(pid_t brick_pid, char *brickpath);
233933
-
233933
-int32_t
233933
-glusterd_add_shd_to_dict(glusterd_volinfo_t *volinfo, dict_t *dict,
233933
-                         int32_t count);
233933
 #endif
233933
diff --git a/xlators/mgmt/glusterd/src/glusterd-volgen.c b/xlators/mgmt/glusterd/src/glusterd-volgen.c
233933
index 8b58d40..5e0214e 100644
233933
--- a/xlators/mgmt/glusterd/src/glusterd-volgen.c
233933
+++ b/xlators/mgmt/glusterd/src/glusterd-volgen.c
233933
@@ -36,7 +36,6 @@
233933
 #include "glusterd-svc-mgmt.h"
233933
 #include "glusterd-svc-helper.h"
233933
 #include "glusterd-snapd-svc-helper.h"
233933
-#include "glusterd-shd-svc-helper.h"
233933
 #include "glusterd-gfproxyd-svc-helper.h"
233933
 
233933
 struct gd_validate_reconf_opts {
233933
@@ -4865,7 +4864,7 @@ volgen_get_shd_key(int type)
233933
 static int
233933
 volgen_set_shd_key_enable(dict_t *set_dict, const int type)
233933
 {
233933
-    int ret = 0;
233933
+    int ret = -1;
233933
 
233933
     switch (type) {
233933
         case GF_CLUSTER_TYPE_REPLICATE:
233933
@@ -5156,15 +5155,24 @@ out:
233933
 static int
233933
 build_shd_volume_graph(xlator_t *this, volgen_graph_t *graph,
233933
                        glusterd_volinfo_t *volinfo, dict_t *mod_dict,
233933
-                       dict_t *set_dict, gf_boolean_t graph_check)
233933
+                       dict_t *set_dict, gf_boolean_t graph_check,
233933
+                       gf_boolean_t *valid_config)
233933
 {
233933
     volgen_graph_t cgraph = {0};
233933
     int ret = 0;
233933
     int clusters = -1;
233933
 
233933
+    if (!graph_check && (volinfo->status != GLUSTERD_STATUS_STARTED))
233933
+        goto out;
233933
+
233933
     if (!glusterd_is_shd_compatible_volume(volinfo))
233933
         goto out;
233933
 
233933
+    /* Shd graph is valid only when there is at least one
233933
+     * replica/disperse volume is present
233933
+     */
233933
+    *valid_config = _gf_true;
233933
+
233933
     ret = prepare_shd_volume_options(volinfo, mod_dict, set_dict);
233933
     if (ret)
233933
         goto out;
233933
@@ -5194,16 +5202,19 @@ out:
233933
 }
233933
 
233933
 int
233933
-build_shd_graph(glusterd_volinfo_t *volinfo, volgen_graph_t *graph,
233933
-                dict_t *mod_dict)
233933
+build_shd_graph(volgen_graph_t *graph, dict_t *mod_dict)
233933
 {
233933
+    glusterd_volinfo_t *voliter = NULL;
233933
     xlator_t *this = NULL;
233933
+    glusterd_conf_t *priv = NULL;
233933
     dict_t *set_dict = NULL;
233933
     int ret = 0;
233933
+    gf_boolean_t valid_config = _gf_false;
233933
     xlator_t *iostxl = NULL;
233933
     gf_boolean_t graph_check = _gf_false;
233933
 
233933
     this = THIS;
233933
+    priv = this->private;
233933
 
233933
     set_dict = dict_new();
233933
     if (!set_dict) {
233933
@@ -5213,18 +5224,26 @@ build_shd_graph(glusterd_volinfo_t *volinfo, volgen_graph_t *graph,
233933
 
233933
     if (mod_dict)
233933
         graph_check = dict_get_str_boolean(mod_dict, "graph-check", 0);
233933
-    iostxl = volgen_graph_add_as(graph, "debug/io-stats", volinfo->volname);
233933
+    iostxl = volgen_graph_add_as(graph, "debug/io-stats", "glustershd");
233933
     if (!iostxl) {
233933
         ret = -1;
233933
         goto out;
233933
     }
233933
 
233933
-    ret = build_shd_volume_graph(this, graph, volinfo, mod_dict, set_dict,
233933
-                                 graph_check);
233933
+    cds_list_for_each_entry(voliter, &priv->volumes, vol_list)
233933
+    {
233933
+        ret = build_shd_volume_graph(this, graph, voliter, mod_dict, set_dict,
233933
+                                     graph_check, &valid_config);
233933
+        ret = dict_reset(set_dict);
233933
+        if (ret)
233933
+            goto out;
233933
+    }
233933
 
233933
 out:
233933
     if (set_dict)
233933
         dict_unref(set_dict);
233933
+    if (!valid_config)
233933
+        ret = -EINVAL;
233933
     return ret;
233933
 }
233933
 
233933
@@ -6541,10 +6560,6 @@ glusterd_create_volfiles(glusterd_volinfo_t *volinfo)
233933
     if (ret)
233933
         gf_log(this->name, GF_LOG_ERROR, "Could not generate gfproxy volfiles");
233933
 
233933
-    ret = glusterd_shdsvc_create_volfile(volinfo);
233933
-    if (ret)
233933
-        gf_log(this->name, GF_LOG_ERROR, "Could not generate shd volfiles");
233933
-
233933
     dict_del_sizen(volinfo->dict, "skip-CLIOT");
233933
 
233933
 out:
233933
@@ -6625,7 +6640,7 @@ validate_shdopts(glusterd_volinfo_t *volinfo, dict_t *val_dict,
233933
     ret = dict_set_int32_sizen(val_dict, "graph-check", 1);
233933
     if (ret)
233933
         goto out;
233933
-    ret = build_shd_graph(volinfo, &graph, val_dict);
233933
+    ret = build_shd_graph(&graph, val_dict);
233933
     if (!ret)
233933
         ret = graph_reconf_validateopt(&graph.graph, op_errstr);
233933
 
233933
@@ -7002,22 +7017,3 @@ gd_is_boolean_option(char *key)
233933
 
233933
     return _gf_false;
233933
 }
233933
-
233933
-int
233933
-glusterd_shdsvc_generate_volfile(glusterd_volinfo_t *volinfo, char *filename,
233933
-                                 dict_t *mode_dict)
233933
-{
233933
-    int ret = -1;
233933
-    volgen_graph_t graph = {
233933
-        0,
233933
-    };
233933
-
233933
-    graph.type = GF_SHD;
233933
-    ret = build_shd_graph(volinfo, &graph, mode_dict);
233933
-    if (!ret)
233933
-        ret = volgen_write_volfile(&graph, filename);
233933
-
233933
-    volgen_graph_free(&graph);
233933
-
233933
-    return ret;
233933
-}
233933
diff --git a/xlators/mgmt/glusterd/src/glusterd-volgen.h b/xlators/mgmt/glusterd/src/glusterd-volgen.h
233933
index 897d8fa..f9fc068 100644
233933
--- a/xlators/mgmt/glusterd/src/glusterd-volgen.h
233933
+++ b/xlators/mgmt/glusterd/src/glusterd-volgen.h
233933
@@ -66,7 +66,6 @@ typedef enum {
233933
     GF_REBALANCED = 1,
233933
     GF_QUOTAD,
233933
     GF_SNAPD,
233933
-    GF_SHD,
233933
 } glusterd_graph_type_t;
233933
 
233933
 struct volgen_graph {
233933
@@ -78,8 +77,6 @@ typedef struct volgen_graph volgen_graph_t;
233933
 
233933
 typedef int (*glusterd_graph_builder_t)(volgen_graph_t *graph,
233933
                                         dict_t *mod_dict);
233933
-typedef int (*glusterd_vol_graph_builder_t)(glusterd_volinfo_t *,
233933
-                                            char *filename, dict_t *mod_dict);
233933
 
233933
 #define COMPLETE_OPTION(key, completion, ret)                                  \
233933
     do {                                                                       \
233933
@@ -204,8 +201,7 @@ void
233933
 glusterd_get_shd_filepath(char *filename);
233933
 
233933
 int
233933
-build_shd_graph(glusterd_volinfo_t *volinfo, volgen_graph_t *graph,
233933
-                dict_t *mod_dict);
233933
+build_shd_graph(volgen_graph_t *graph, dict_t *mod_dict);
233933
 
233933
 int
233933
 build_nfs_graph(volgen_graph_t *graph, dict_t *mod_dict);
233933
@@ -317,9 +313,4 @@ glusterd_generate_gfproxyd_volfile(glusterd_volinfo_t *volinfo);
233933
 
233933
 int
233933
 glusterd_build_gfproxyd_volfile(glusterd_volinfo_t *volinfo, char *filename);
233933
-
233933
-int
233933
-glusterd_shdsvc_generate_volfile(glusterd_volinfo_t *volinfo, char *filename,
233933
-                                 dict_t *mode_dict);
233933
-
233933
 #endif
233933
diff --git a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c
233933
index 4c3ad50..1ea8ba6 100644
233933
--- a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c
233933
+++ b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c
233933
@@ -1940,7 +1940,7 @@ static int
233933
 glusterd_handle_heal_cmd(xlator_t *this, glusterd_volinfo_t *volinfo,
233933
                          dict_t *dict, char **op_errstr)
233933
 {
233933
-    glusterd_svc_t *svc = NULL;
233933
+    glusterd_conf_t *priv = NULL;
233933
     gf_xl_afr_op_t heal_op = GF_SHD_OP_INVALID;
233933
     int ret = 0;
233933
     char msg[2408] = {
233933
@@ -1950,6 +1950,7 @@ glusterd_handle_heal_cmd(xlator_t *this, glusterd_volinfo_t *volinfo,
233933
         "Self-heal daemon is not running. "
233933
         "Check self-heal daemon log file.";
233933
 
233933
+    priv = this->private;
233933
     ret = dict_get_int32n(dict, "heal-op", SLEN("heal-op"),
233933
                           (int32_t *)&heal_op);
233933
     if (ret) {
233933
@@ -1958,7 +1959,6 @@ glusterd_handle_heal_cmd(xlator_t *this, glusterd_volinfo_t *volinfo,
233933
         goto out;
233933
     }
233933
 
233933
-    svc = &(volinfo->shd.svc);
233933
     switch (heal_op) {
233933
         case GF_SHD_OP_INVALID:
233933
         case GF_SHD_OP_HEAL_ENABLE: /* This op should be handled in volume-set*/
233933
@@ -1988,7 +1988,7 @@ glusterd_handle_heal_cmd(xlator_t *this, glusterd_volinfo_t *volinfo,
233933
                 goto out;
233933
             }
233933
 
233933
-            if (!svc->online) {
233933
+            if (!priv->shd_svc.online) {
233933
                 ret = -1;
233933
                 *op_errstr = gf_strdup(offline_msg);
233933
                 goto out;
233933
@@ -2009,7 +2009,7 @@ glusterd_handle_heal_cmd(xlator_t *this, glusterd_volinfo_t *volinfo,
233933
                 goto out;
233933
             }
233933
 
233933
-            if (!svc->online) {
233933
+            if (!priv->shd_svc.online) {
233933
                 ret = -1;
233933
                 *op_errstr = gf_strdup(offline_msg);
233933
                 goto out;
233933
diff --git a/xlators/mgmt/glusterd/src/glusterd.c b/xlators/mgmt/glusterd/src/glusterd.c
233933
index c0973cb..d360312 100644
233933
--- a/xlators/mgmt/glusterd/src/glusterd.c
233933
+++ b/xlators/mgmt/glusterd/src/glusterd.c
233933
@@ -1537,6 +1537,14 @@ init(xlator_t *this)
233933
         exit(1);
233933
     }
233933
 
233933
+    ret = glusterd_init_var_run_dirs(this, rundir, GLUSTERD_GLUSTERSHD_RUN_DIR);
233933
+    if (ret) {
233933
+        gf_msg(this->name, GF_LOG_CRITICAL, 0, GD_MSG_CREATE_DIR_FAILED,
233933
+               "Unable to create "
233933
+               "glustershd running directory");
233933
+        exit(1);
233933
+    }
233933
+
233933
     ret = glusterd_init_var_run_dirs(this, rundir, GLUSTERD_NFS_RUN_DIR);
233933
     if (ret) {
233933
         gf_msg(this->name, GF_LOG_CRITICAL, 0, GD_MSG_CREATE_DIR_FAILED,
233933
@@ -1811,9 +1819,6 @@ init(xlator_t *this)
233933
     CDS_INIT_LIST_HEAD(&conf->snapshots);
233933
     CDS_INIT_LIST_HEAD(&conf->missed_snaps_list);
233933
     CDS_INIT_LIST_HEAD(&conf->brick_procs);
233933
-    CDS_INIT_LIST_HEAD(&conf->shd_procs);
233933
-    pthread_mutex_init(&conf->attach_lock, NULL);
233933
-    pthread_mutex_init(&conf->volume_lock, NULL);
233933
 
233933
     pthread_mutex_init(&conf->mutex, NULL);
233933
     conf->rpc = rpc;
233933
@@ -1894,6 +1899,7 @@ init(xlator_t *this)
233933
     glusterd_mgmt_v3_lock_timer_init();
233933
     glusterd_txn_opinfo_dict_init();
233933
 
233933
+    glusterd_shdsvc_build(&conf->shd_svc);
233933
     glusterd_nfssvc_build(&conf->nfs_svc);
233933
     glusterd_quotadsvc_build(&conf->quotad_svc);
233933
     glusterd_bitdsvc_build(&conf->bitd_svc);
233933
diff --git a/xlators/mgmt/glusterd/src/glusterd.h b/xlators/mgmt/glusterd/src/glusterd.h
233933
index 0fbc9dd..2be005c 100644
233933
--- a/xlators/mgmt/glusterd/src/glusterd.h
233933
+++ b/xlators/mgmt/glusterd/src/glusterd.h
233933
@@ -28,7 +28,6 @@
233933
 #include "glusterd-sm.h"
233933
 #include "glusterd-snapd-svc.h"
233933
 #include "glusterd-tierd-svc.h"
233933
-#include "glusterd-shd-svc.h"
233933
 #include "glusterd-bitd-svc.h"
233933
 #include "glusterd1-xdr.h"
233933
 #include "protocol-common.h"
233933
@@ -173,6 +172,7 @@ typedef struct {
233933
     char workdir[VALID_GLUSTERD_PATHMAX];
233933
     char rundir[VALID_GLUSTERD_PATHMAX];
233933
     rpcsvc_t *rpc;
233933
+    glusterd_svc_t shd_svc;
233933
     glusterd_svc_t nfs_svc;
233933
     glusterd_svc_t bitd_svc;
233933
     glusterd_svc_t scrub_svc;
233933
@@ -181,7 +181,6 @@ typedef struct {
233933
     struct cds_list_head volumes;
233933
     struct cds_list_head snapshots;   /*List of snap volumes */
233933
     struct cds_list_head brick_procs; /* List of brick processes */
233933
-    struct cds_list_head shd_procs;   /* List of shd processes */
233933
     pthread_mutex_t xprt_lock;
233933
     struct list_head xprt_list;
233933
     pthread_mutex_t import_volumes;
233933
@@ -222,11 +221,6 @@ typedef struct {
233933
     gf_atomic_t blockers;
233933
     uint32_t mgmt_v3_lock_timeout;
233933
     gf_boolean_t restart_bricks;
233933
-    pthread_mutex_t attach_lock; /* Lock can be per process or a common one */
233933
-    pthread_mutex_t volume_lock; /* We release the big_lock from lot of places
233933
-                                    which might lead the modification of volinfo
233933
-                                    list.
233933
-                                 */
233933
     gf_atomic_t thread_count;
233933
 } glusterd_conf_t;
233933
 
233933
@@ -519,7 +513,6 @@ struct glusterd_volinfo_ {
233933
 
233933
     glusterd_snapdsvc_t snapd;
233933
     glusterd_tierdsvc_t tierd;
233933
-    glusterd_shdsvc_t shd;
233933
     glusterd_gfproxydsvc_t gfproxyd;
233933
     int32_t quota_xattr_version;
233933
     gf_boolean_t stage_deleted;         /* volume has passed staging
233933
@@ -646,6 +639,7 @@ typedef enum {
233933
 #define GLUSTERD_DEFAULT_SNAPS_BRICK_DIR "/gluster/snaps"
233933
 #define GLUSTERD_BITD_RUN_DIR "/bitd"
233933
 #define GLUSTERD_SCRUB_RUN_DIR "/scrub"
233933
+#define GLUSTERD_GLUSTERSHD_RUN_DIR "/glustershd"
233933
 #define GLUSTERD_NFS_RUN_DIR "/nfs"
233933
 #define GLUSTERD_QUOTAD_RUN_DIR "/quotad"
233933
 #define GLUSTER_SHARED_STORAGE_BRICK_DIR GLUSTERD_DEFAULT_WORKDIR "/ss_brick"
233933
@@ -701,26 +695,6 @@ typedef ssize_t (*gd_serialize_t)(struct iovec outmsg, void *args);
233933
         }                                                                      \
233933
     } while (0)
233933
 
233933
-#define GLUSTERD_GET_SHD_RUNDIR(path, volinfo, priv)                           \
233933
-    do {                                                                       \
233933
-        int32_t _shd_dir_len;                                                  \
233933
-        _shd_dir_len = snprintf(path, PATH_MAX, "%s/shd/%s", priv->rundir,     \
233933
-                                volinfo->volname);                             \
233933
-        if ((_shd_dir_len < 0) || (_shd_dir_len >= PATH_MAX)) {                \
233933
-            path[0] = 0;                                                       \
233933
-        }                                                                      \
233933
-    } while (0)
233933
-
233933
-#define GLUSTERD_GET_SHD_PID_FILE(path, volinfo, priv)                         \
233933
-    do {                                                                       \
233933
-        int32_t _shd_pid_len;                                                  \
233933
-        _shd_pid_len = snprintf(path, PATH_MAX, "%s/shd/%s-shd.pid",           \
233933
-                                priv->rundir, volinfo->volname);               \
233933
-        if ((_shd_pid_len < 0) || (_shd_pid_len >= PATH_MAX)) {                \
233933
-            path[0] = 0;                                                       \
233933
-        }                                                                      \
233933
-    } while (0)
233933
-
233933
 #define GLUSTERD_GET_VOLUME_PID_DIR(path, volinfo, priv)                       \
233933
     do {                                                                       \
233933
         int32_t _vol_pid_len;                                                  \
233933
diff --git a/xlators/protocol/client/src/client.c b/xlators/protocol/client/src/client.c
233933
index 532ef35..e156d4d 100644
233933
--- a/xlators/protocol/client/src/client.c
233933
+++ b/xlators/protocol/client/src/client.c
233933
@@ -46,6 +46,7 @@ client_fini_complete(xlator_t *this)
233933
     GF_VALIDATE_OR_GOTO(this->name, this->private, out);
233933
 
233933
     clnt_conf_t *conf = this->private;
233933
+
233933
     if (!conf->destroy)
233933
         return 0;
233933
 
233933
@@ -68,11 +69,6 @@ client_notify_dispatch_uniq(xlator_t *this, int32_t event, void *data, ...)
233933
         return 0;
233933
 
233933
     return client_notify_dispatch(this, event, data);
233933
-
233933
-    /* Please avoid any code that access xlator object here
233933
-     * Because for a child down event, once we do the signal
233933
-     * we will start cleanup.
233933
-     */
233933
 }
233933
 
233933
 int
233933
@@ -109,11 +105,6 @@ client_notify_dispatch(xlator_t *this, int32_t event, void *data, ...)
233933
     }
233933
     pthread_mutex_unlock(&ctx->notify_lock);
233933
 
233933
-    /* Please avoid any code that access xlator object here
233933
-     * Because for a child down event, once we do the signal
233933
-     * we will start cleanup.
233933
-     */
233933
-
233933
     return ret;
233933
 }
233933
 
233933
@@ -2287,7 +2278,6 @@ client_rpc_notify(struct rpc_clnt *rpc, void *mydata, rpc_clnt_event_t event,
233933
 {
233933
     xlator_t *this = NULL;
233933
     clnt_conf_t *conf = NULL;
233933
-    gf_boolean_t is_parent_down = _gf_false;
233933
     int ret = 0;
233933
 
233933
     this = mydata;
233933
@@ -2351,19 +2341,6 @@ client_rpc_notify(struct rpc_clnt *rpc, void *mydata, rpc_clnt_event_t event,
233933
                     if (conf->portmap_err_logged)
233933
                         conf->disconnect_err_logged = 1;
233933
                 }
233933
-                /*
233933
-                 * Once we complete the child down notification,
233933
-                 * There is a chance that the graph might get freed,
233933
-                 * So it is not safe to access any xlator contens
233933
-                 * So here we are checking whether the parent is down
233933
-                 * or not.
233933
-                 */
233933
-                pthread_mutex_lock(&conf->lock);
233933
-                {
233933
-                    is_parent_down = conf->parent_down;
233933
-                }
233933
-                pthread_mutex_unlock(&conf->lock);
233933
-
233933
                 /* If the CHILD_DOWN event goes to parent xlator
233933
                    multiple times, the logic of parent xlator notify
233933
                    may get screwed up.. (eg. CHILD_MODIFIED event in
233933
@@ -2371,12 +2348,6 @@ client_rpc_notify(struct rpc_clnt *rpc, void *mydata, rpc_clnt_event_t event,
233933
                    to parent are genuine */
233933
                 ret = client_notify_dispatch_uniq(this, GF_EVENT_CHILD_DOWN,
233933
                                                   NULL);
233933
-                if (is_parent_down) {
233933
-                    /* If parent is down, then there should not be any
233933
-                     * operation after a child down.
233933
-                     */
233933
-                    goto out;
233933
-                }
233933
                 if (ret)
233933
                     gf_msg(this->name, GF_LOG_INFO, 0,
233933
                            PC_MSG_CHILD_DOWN_NOTIFY_FAILED,
233933
-- 
233933
1.8.3.1
233933