Blob Blame History Raw
From 783c36e573a9c937422e63af038bb35648483b9e Mon Sep 17 00:00:00 2001
From: Sanju Rakonde <srakonde@redhat.com>
Date: Fri, 30 Nov 2018 16:16:55 +0530
Subject: [PATCH 490/493] glusterd: migrating rebalance commands to mgmt_v3
 framework

Current rebalance commands use the op_state machine framework.
Porting it to use the mgmt_v3 framework.

> Change-Id: I6faf4a6335c2e2f3d54bbde79908a7749e4613e7
> fixes: bz#1655827
> Signed-off-by: Sanju Rakonde <srakonde@redhat.com>

upstream patch: https://review.gluster.org/#/c/glusterfs/+/21762/

Change-Id: I6faf4a6335c2e2f3d54bbde79908a7749e4613e7
BUG: 1652466
Signed-off-by: Sanju Rakonde <srakonde@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/158917
Reviewed-by: Atin Mukherjee <amukherj@redhat.com>
Tested-by: RHGS Build Bot <nigelb@redhat.com>
---
 libglusterfs/src/globals.h                     |   2 +
 xlators/mgmt/glusterd/src/glusterd-handler.c   |   4 +-
 xlators/mgmt/glusterd/src/glusterd-mgmt.c      | 128 ++++++-
 xlators/mgmt/glusterd/src/glusterd-mgmt.h      |   5 +-
 xlators/mgmt/glusterd/src/glusterd-op-sm.h     |   3 +
 xlators/mgmt/glusterd/src/glusterd-rebalance.c | 495 ++++++++++++++++++++++++-
 xlators/mgmt/glusterd/src/glusterd-syncop.c    |   9 +
 xlators/mgmt/glusterd/src/glusterd-utils.c     |   4 +-
 xlators/mgmt/glusterd/src/glusterd.h           |   9 +
 9 files changed, 637 insertions(+), 22 deletions(-)

diff --git a/libglusterfs/src/globals.h b/libglusterfs/src/globals.h
index 343263c..5e3b180 100644
--- a/libglusterfs/src/globals.h
+++ b/libglusterfs/src/globals.h
@@ -111,6 +111,8 @@
 
 #define GD_OP_VERSION_3_13_3   31303 /* Op-version for GlusterFS 3.13.3 */
 
+#define GD_OP_VERSION_6_0      60000 /* Op-version for GlusterFS 6.0 */
+
 /* Downstream only change */
 #define GD_OP_VERSION_3_11_2   31102 /* Op-version for RHGS 3.3.1-async */
 #define GD_OP_VERSION_3_13_3   31303 /* Op-version for RHGS-3.4-Batch Update-1*/
diff --git a/xlators/mgmt/glusterd/src/glusterd-handler.c b/xlators/mgmt/glusterd/src/glusterd-handler.c
index d40de89..d8e3335 100644
--- a/xlators/mgmt/glusterd/src/glusterd-handler.c
+++ b/xlators/mgmt/glusterd/src/glusterd-handler.c
@@ -3120,7 +3120,9 @@ __glusterd_handle_cli_profile_volume (rpcsvc_request_t *req)
                 glusterd_friend_sm();
                 glusterd_op_sm();
         } else {
-                ret = glusterd_mgmt_v3_initiate_profile_phases(req, cli_op, dict);
+                ret = glusterd_mgmt_v3_initiate_all_phases_with_brickop_phase(req,
+                                                                              cli_op,
+                                                                              dict);
         }
 
 out:
diff --git a/xlators/mgmt/glusterd/src/glusterd-mgmt.c b/xlators/mgmt/glusterd/src/glusterd-mgmt.c
index d98c6bc..ef8a2d9 100644
--- a/xlators/mgmt/glusterd/src/glusterd-mgmt.c
+++ b/xlators/mgmt/glusterd/src/glusterd-mgmt.c
@@ -224,6 +224,16 @@ gd_mgmt_v3_pre_validate_fn (glusterd_op_t op, dict_t *dict,
                 }
                 break;
 
+        case GD_OP_REBALANCE:
+        case GD_OP_DEFRAG_BRICK_VOLUME:
+                ret = glusterd_mgmt_v3_op_stage_rebalance(dict, op_errstr);
+                if (ret) {
+                        gf_log(this->name, GF_LOG_WARNING,
+                               "Rebalance Prevalidate Failed");
+                        goto out;
+                }
+                break;
+
         case GD_OP_MAX_OPVERSION:
                 ret = 0;
                 break;
@@ -264,6 +274,8 @@ gd_mgmt_v3_brick_op_fn (glusterd_op_t op, dict_t *dict,
                 break;
         }
         case GD_OP_PROFILE_VOLUME:
+        case GD_OP_REBALANCE:
+        case GD_OP_DEFRAG_BRICK_VOLUME:
         {
                 ret = gd_brick_op_phase(op, rsp_dict, dict, op_errstr);
                 if (ret) {
@@ -438,6 +450,19 @@ gd_mgmt_v3_commit_fn (glusterd_op_t op, dict_t *dict,
                         }
                         break;
                 }
+                case GD_OP_REBALANCE:
+                case GD_OP_DEFRAG_BRICK_VOLUME:
+                {
+                        ret = glusterd_mgmt_v3_op_rebalance(dict, op_errstr,
+                                                            rsp_dict);
+                        if (ret) {
+                                gf_msg(this->name, GF_LOG_ERROR, 0,
+                                       GD_MSG_COMMIT_OP_FAIL,
+                                       "Rebalance Commit Failed");
+                                goto out;
+                        }
+                        break;
+                }
 
                default:
                        break;
@@ -880,6 +905,8 @@ glusterd_pre_validate_aggr_rsp_dict (glusterd_op_t op,
         case GD_OP_TIER_START_STOP:
         case GD_OP_REMOVE_TIER_BRICK:
         case GD_OP_PROFILE_VOLUME:
+        case GD_OP_DEFRAG_BRICK_VOLUME:
+        case GD_OP_REBALANCE:
                 break;
         case GD_OP_MAX_OPVERSION:
                 break;
@@ -1197,6 +1224,7 @@ glusterd_mgmt_v3_build_payload (dict_t **req, char **op_errstr, dict_t *dict,
                 break;
         case GD_OP_START_VOLUME:
         case GD_OP_ADD_BRICK:
+        case GD_OP_DEFRAG_BRICK_VOLUME:
         case GD_OP_REPLACE_BRICK:
         case GD_OP_RESET_BRICK:
         case GD_OP_ADD_TIER_BRICK:
@@ -1221,6 +1249,30 @@ glusterd_mgmt_v3_build_payload (dict_t **req, char **op_errstr, dict_t *dict,
                         dict_copy (dict, req_dict);
                 }
                         break;
+
+        case GD_OP_REBALANCE: {
+                if (gd_set_commit_hash(dict) != 0) {
+                        ret = -1;
+                        goto out;
+                }
+                ret = dict_get_str (dict, "volname", &volname);
+                if (ret) {
+                        gf_msg(this->name, GF_LOG_CRITICAL, errno,
+                               GD_MSG_DICT_GET_FAILED,
+                               "volname is not present in "
+                               "operation ctx");
+                        goto out;
+                }
+
+                if (strcasecmp(volname, "all")) {
+                        ret = glusterd_dict_set_volid(dict, volname, op_errstr);
+                        if (ret)
+                                goto out;
+                }
+                dict_copy(dict, req_dict);
+        }
+                break;
+
         case GD_OP_TIER_START_STOP:
         case GD_OP_REMOVE_TIER_BRICK:
         case GD_OP_DETACH_TIER_STATUS:
@@ -1247,6 +1299,7 @@ gd_mgmt_v3_brick_op_cbk_fn (struct rpc_req *req, struct iovec *iov,
         call_frame_t               *frame         = NULL;
         int32_t                     op_ret        = -1;
         int32_t                     op_errno      = -1;
+        dict_t                     *rsp_dict      = NULL;
         xlator_t                   *this          = NULL;
         uuid_t                     *peerid        = NULL;
 
@@ -1278,10 +1331,45 @@ gd_mgmt_v3_brick_op_cbk_fn (struct rpc_req *req, struct iovec *iov,
         if (ret < 0)
                 goto out;
 
+        if (rsp.dict.dict_len) {
+                /* Unserialize the dictionary */
+                rsp_dict  = dict_new ();
+
+                ret = dict_unserialize (rsp.dict.dict_val,
+                                        rsp.dict.dict_len,
+                                        &rsp_dict);
+                if (ret < 0) {
+                        free (rsp.dict.dict_val);
+                        goto out;
+                } else {
+                        rsp_dict->extra_stdfree = rsp.dict.dict_val;
+                }
+        }
+
         gf_uuid_copy (args->uuid, rsp.uuid);
+        pthread_mutex_lock (&args->lock_dict);
+        {
+                if (rsp.op == GD_OP_DEFRAG_BRICK_VOLUME)
+                        ret = glusterd_syncop_aggr_rsp_dict (rsp.op, args->dict,
+                                                             rsp_dict);
+        }
+        pthread_mutex_unlock (&args->lock_dict);
 
-        op_ret = rsp.op_ret;
-        op_errno = rsp.op_errno;
+        if (ret) {
+                gf_msg (this->name, GF_LOG_ERROR, 0,
+                        GD_MSG_RESP_AGGR_FAIL, "%s",
+                        "Failed to aggregate response from "
+                        " node/brick");
+                if (!rsp.op_ret)
+                        op_ret = ret;
+                else {
+                        op_ret = rsp.op_ret;
+                        op_errno = rsp.op_errno;
+                }
+        } else {
+                op_ret = rsp.op_ret;
+                op_errno = rsp.op_errno;
+        }
 
 out:
         gd_mgmt_v3_collate_errors (args, op_ret, op_errno, rsp.op_errstr,
@@ -1353,11 +1441,12 @@ out:
 }
 
 int
-glusterd_mgmt_v3_brick_op (glusterd_op_t op, dict_t *rsp_dict, dict_t *req_dict,
+glusterd_mgmt_v3_brick_op (glusterd_op_t op, dict_t *op_ctx, dict_t *req_dict,
                            char **op_errstr, uint32_t txn_generation)
 {
         int32_t              ret        = -1;
         int32_t              peer_cnt   = 0;
+        dict_t              *rsp_dict   = NULL;
         glusterd_peerinfo_t *peerinfo   = NULL;
         struct syncargs      args       = {0};
         uuid_t               peer_uuid  = {0};
@@ -1372,6 +1461,13 @@ glusterd_mgmt_v3_brick_op (glusterd_op_t op, dict_t *rsp_dict, dict_t *req_dict,
         GF_ASSERT (req_dict);
         GF_ASSERT (op_errstr);
 
+        rsp_dict = dict_new();
+        if (!rsp_dict) {
+                gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_CREATE_FAIL,
+                       "Failed to create response dictionary");
+                goto out;
+        }
+
         /* Perform brick op on local node */
         ret = gd_mgmt_v3_brick_op_fn (op, req_dict, op_errstr,
                                      rsp_dict);
@@ -1395,9 +1491,21 @@ glusterd_mgmt_v3_brick_op (glusterd_op_t op, dict_t *rsp_dict, dict_t *req_dict,
                 }
                 goto out;
         }
+        if (op == GD_OP_DEFRAG_BRICK_VOLUME || op == GD_OP_PROFILE_VOLUME) {
+                ret = glusterd_syncop_aggr_rsp_dict(op, op_ctx, rsp_dict);
+                if (ret) {
+                        gf_log(this->name, GF_LOG_ERROR, "%s",
+                               "Failed to aggregate response from "
+                               " node/brick");
+                        goto out;
+                }
+        }
+
+        dict_unref(rsp_dict);
+        rsp_dict = NULL;
 
         /* Sending brick op req to other nodes in the cluster */
-        gd_syncargs_init (&args, rsp_dict);
+        gd_syncargs_init (&args, op_ctx);
         synctask_barrier_init((&args));
         peer_cnt = 0;
 
@@ -1616,6 +1724,13 @@ glusterd_mgmt_v3_commit (glusterd_op_t op, dict_t *op_ctx, dict_t *req_dict,
         GF_ASSERT (op_errstr);
         GF_VALIDATE_OR_GOTO (this->name, op_errno, out);
 
+        if (op == GD_OP_REBALANCE || op == GD_OP_DEFRAG_BRICK_VOLUME) {
+                ret = glusterd_set_rebalance_id_in_rsp_dict(req_dict, op_ctx);
+                if (ret) {
+                        gf_log(this->name, GF_LOG_WARNING,
+                               "Failed to set rebalance id in dict.");
+                }
+        }
         rsp_dict = dict_new ();
         if (!rsp_dict) {
                 gf_msg (this->name, GF_LOG_ERROR, 0,
@@ -2140,8 +2255,9 @@ out:
 }
 
 int32_t
-glusterd_mgmt_v3_initiate_profile_phases (rpcsvc_request_t *req,
-                                          glusterd_op_t op, dict_t *dict)
+glusterd_mgmt_v3_initiate_all_phases_with_brickop_phase (rpcsvc_request_t *req,
+                                                         glusterd_op_t op,
+                                                         dict_t *dict)
 {
         int32_t                     ret              = -1;
         int32_t                     op_ret           = -1;
diff --git a/xlators/mgmt/glusterd/src/glusterd-mgmt.h b/xlators/mgmt/glusterd/src/glusterd-mgmt.h
index eff070d..ef0fe10 100644
--- a/xlators/mgmt/glusterd/src/glusterd-mgmt.h
+++ b/xlators/mgmt/glusterd/src/glusterd-mgmt.h
@@ -37,8 +37,9 @@ glusterd_mgmt_v3_initiate_all_phases (rpcsvc_request_t *req, glusterd_op_t op,
                                      dict_t *dict);
 
 int32_t
-glusterd_mgmt_v3_initiate_profile_phases(rpcsvc_request_t *req,
-                                         glusterd_op_t op, dict_t *dict);
+glusterd_mgmt_v3_initiate_all_phases_with_brickop_phase(rpcsvc_request_t *req,
+                                                        glusterd_op_t op,
+                                                        dict_t *dict);
 
 int32_t
 glusterd_mgmt_v3_initiate_snap_phases(rpcsvc_request_t *req, glusterd_op_t op,
diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.h b/xlators/mgmt/glusterd/src/glusterd-op-sm.h
index e64d368..cf1e61c 100644
--- a/xlators/mgmt/glusterd/src/glusterd-op-sm.h
+++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.h
@@ -318,4 +318,7 @@ glusterd_op_stats_volume (dict_t *dict, char **op_errstr, dict_t *rsp_dict);
 
 int
 glusterd_op_stage_stats_volume (dict_t *dict, char **op_errstr);
+
+int
+gd_set_commit_hash(dict_t *dict);
 #endif
diff --git a/xlators/mgmt/glusterd/src/glusterd-rebalance.c b/xlators/mgmt/glusterd/src/glusterd-rebalance.c
index 5ab828c..7ba5f65 100644
--- a/xlators/mgmt/glusterd/src/glusterd-rebalance.c
+++ b/xlators/mgmt/glusterd/src/glusterd-rebalance.c
@@ -23,6 +23,7 @@
 #include "glusterd.h"
 #include "glusterd-sm.h"
 #include "glusterd-op-sm.h"
+#include "glusterd-mgmt.h"
 #include "glusterd-utils.h"
 #include "glusterd-messages.h"
 #include "glusterd-store.h"
@@ -501,6 +502,7 @@ __glusterd_handle_defrag_volume (rpcsvc_request_t *req)
         int32_t                 ret       = -1;
         gf_cli_req              cli_req   = {{0,}};
         glusterd_conf_t        *priv      = NULL;
+        int32_t                 op        = GD_OP_NONE;
         dict_t                 *dict      = NULL;
         char                   *volname   = NULL;
         gf_cli_defrag_type      cmd       = 0;
@@ -563,17 +565,25 @@ __glusterd_handle_defrag_volume (rpcsvc_request_t *req)
             (cmd == GF_DEFRAG_CMD_STOP_DETACH_TIER) ||
             (cmd == GF_DEFRAG_CMD_STOP) ||
             (cmd == GF_DEFRAG_CMD_DETACH_STATUS)) {
-                ret = glusterd_op_begin (req, GD_OP_DEFRAG_BRICK_VOLUME,
-                                         dict, msg, sizeof (msg));
+                op = GD_OP_DEFRAG_BRICK_VOLUME;
         } else
-                ret = glusterd_op_begin (req, GD_OP_REBALANCE, dict,
-                                         msg, sizeof (msg));
-
+                op =  GD_OP_REBALANCE;
+
+        if (priv->op_version < GD_OP_VERSION_6_0) {
+                gf_msg_debug(this->name, 0,
+                             "The cluster is operating at "
+                             "version less than %d. Falling back "
+                             "to op-sm framework.",
+                             GD_OP_VERSION_6_0);
+                ret = glusterd_op_begin(req, op, dict, msg, sizeof(msg));
+                glusterd_friend_sm();
+                glusterd_op_sm();
+        } else {
+                ret = glusterd_mgmt_v3_initiate_all_phases_with_brickop_phase(req,
+                                                                              op,
+                                                                              dict);
+        }
 out:
-
-        glusterd_friend_sm ();
-        glusterd_op_sm ();
-
         if (ret) {
                 if (msg[0] == '\0')
                         snprintf (msg, sizeof (msg), "Operation failed");
@@ -583,8 +593,8 @@ out:
         }
 
         free (cli_req.dict.dict_val);//malloced by xdr
-
-        return 0;
+        gf_msg_debug(this->name, 0, "Returning %d", ret);
+        return ret;
 }
 
 int
@@ -628,6 +638,469 @@ glusterd_brick_validation  (dict_t *dict, char *key, data_t *value,
 }
 
 int
+glusterd_set_rebalance_id_in_rsp_dict(dict_t *req_dict, dict_t *rsp_dict)
+{
+        int                    ret         = -1;
+        int32_t                cmd         = 0;
+        char                  *volname     = NULL;
+        glusterd_volinfo_t    *volinfo     = NULL;
+        char                   msg[2048]   = {0};
+        char                  *task_id_str = NULL;
+        xlator_t              *this        = NULL;
+
+        this = THIS;
+        GF_ASSERT(this);
+
+        GF_ASSERT(rsp_dict);
+        GF_ASSERT(req_dict);
+
+        ret = dict_get_str(rsp_dict, "volname", &volname);
+        if (ret) {
+                gf_msg_debug(this->name, 0, "volname not found");
+                goto out;
+        }
+
+        ret = dict_get_int32(rsp_dict, "rebalance-command", &cmd);
+        if (ret) {
+                gf_msg_debug(this->name, 0, "cmd not found");
+                goto out;
+        }
+
+        ret = glusterd_rebalance_cmd_validate(cmd, volname, &volinfo, msg,
+                                              sizeof(msg));
+        if (ret) {
+                gf_msg_debug(this->name, 0, "failed to validate");
+                goto out;
+        }
+
+        /* reblance id is generted in glusterd_mgmt_v3_op_stage_rebalance(), but
+         * rsp_dict is unavailable there. So copying it to rsp_dict from req_dict
+         * here. So that cli can display the rebalance id.*/
+        if ((cmd == GF_DEFRAG_CMD_START) ||
+            (cmd == GF_DEFRAG_CMD_START_LAYOUT_FIX) ||
+            (cmd == GF_DEFRAG_CMD_START_FORCE) ||
+            (cmd == GF_DEFRAG_CMD_START_TIER)) {
+                if (is_origin_glusterd(rsp_dict)) {
+                        ret = dict_get_str(req_dict, GF_REBALANCE_TID_KEY,
+                                           &task_id_str);
+                        if (ret) {
+                                snprintf(msg, sizeof(msg), "Missing rebalance-id");
+                                gf_msg(this->name, GF_LOG_WARNING, 0,
+                                       GD_MSG_REBALANCE_ID_MISSING, "%s", msg);
+                                ret = 0;
+                        } else {
+                                gf_uuid_parse(task_id_str,
+                                              volinfo->rebal.rebalance_id);
+                                ret = glusterd_copy_uuid_to_dict(
+                                      volinfo->rebal.rebalance_id, rsp_dict,
+                                      GF_REBALANCE_TID_KEY);
+                                if (ret) {
+                                        snprintf(msg, sizeof(msg),
+                                                 "Failed to set rebalance id for volume %s",
+                                                 volname);
+                                        gf_msg(this->name, GF_LOG_WARNING, 0,
+                                               GD_MSG_DICT_SET_FAILED, "%s",
+                                               msg);
+                                }
+                        }
+                }
+        }
+
+        /* Set task-id, if available, in rsp_dict for operations other than
+         * start. This is needed when we want rebalance id in xml output
+         */
+        if (cmd == GF_DEFRAG_CMD_STATUS || cmd == GF_DEFRAG_CMD_STOP ||
+            cmd == GF_DEFRAG_CMD_STATUS_TIER) {
+                if (!gf_uuid_is_null(volinfo->rebal.rebalance_id)) {
+                        if (GD_OP_REMOVE_BRICK == volinfo->rebal.op)
+                                ret = glusterd_copy_uuid_to_dict(
+                                      volinfo->rebal.rebalance_id, rsp_dict,
+                                      GF_REMOVE_BRICK_TID_KEY);
+                        else
+                                ret = glusterd_copy_uuid_to_dict(
+                                      volinfo->rebal.rebalance_id,
+                                      rsp_dict, GF_REBALANCE_TID_KEY);
+                        if (ret) {
+                                gf_msg(this->name, GF_LOG_ERROR, 0,
+                                       GD_MSG_DICT_SET_FAILED,
+                                       "Failed to set task-id for volume %s",
+                                       volname);
+                                goto out;
+                        }
+                }
+        }
+out:
+        return ret;
+}
+
+int
+glusterd_mgmt_v3_op_stage_rebalance(dict_t *dict, char **op_errstr)
+{
+        char               *volname     = NULL;
+        char               *cmd_str     = NULL;
+        int                 ret         = 0;
+        int32_t             cmd         = 0;
+        char                msg[2048]   = {0};
+        glusterd_volinfo_t *volinfo     = NULL;
+        char               *task_id_str = NULL;
+        xlator_t           *this        = 0;
+        int32_t             is_force    = 0;
+
+        this = THIS;
+        GF_ASSERT(this);
+
+        ret = dict_get_str (dict, "volname", &volname);
+        if (ret) {
+                gf_msg_debug(this->name, 0, "volname not found");
+                goto out;
+        }
+
+        ret = dict_get_int32 (dict, "rebalance-command", &cmd);
+        if (ret) {
+                gf_msg_debug(this->name, 0, "cmd not found");
+                goto out;
+        }
+
+        ret = glusterd_rebalance_cmd_validate(cmd, volname, &volinfo, msg,
+                                              sizeof(msg));
+        if (ret) {
+                gf_msg_debug(this->name, 0, "failed to validate");
+                goto out;
+        }
+        switch (cmd) {
+        case GF_DEFRAG_CMD_START_TIER:
+                ret = dict_get_int32 (dict, "force", &is_force);
+                if (ret)
+                        is_force = 0;
+
+                if (volinfo->type != GF_CLUSTER_TYPE_TIER) {
+                        gf_asprintf(op_errstr,
+                                    "volume %s is not a tier "
+                                    "volume.",
+                                    volinfo->volname);
+                        ret = -1;
+                        goto out;
+                }
+                if ((!is_force) && glusterd_is_tier_daemon_running(volinfo)) {
+                        ret = gf_asprintf(op_errstr,
+                                          "A Tier daemon is "
+                                          "already running on volume %s",
+                                          volname);
+                        ret = -1;
+                        goto out;
+                }
+                /* Fall through */
+        case GF_DEFRAG_CMD_START:
+        case GF_DEFRAG_CMD_START_LAYOUT_FIX:
+                /* Check if the connected clients are all of version
+                 * glusterfs-3.6 and higher. This is needed to prevent some data
+                 * loss issues that could occur when older clients are connected
+                 * when rebalance is run. This check can be bypassed by using
+                 * 'force'
+                 */
+                ret = glusterd_check_client_op_version_support(volname,
+                                                               GD_OP_VERSION_RHS_3_0,
+                                                               NULL);
+                if (ret) {
+                        ret = gf_asprintf(op_errstr,
+                                          "Volume %s has one or "
+                                          "more connected clients of a version"
+                                          " lower than GlusterFS-v3.6.0. "
+                                          "Starting rebalance in this state "
+                                          "could lead to data loss.\nPlease "
+                                          "disconnect those clients before "
+                                          "attempting this command again.",
+                                          volname);
+                        goto out;
+                }
+                /* Fall through */
+        case GF_DEFRAG_CMD_START_FORCE:
+                if (is_origin_glusterd(dict)) {
+                        ret = glusterd_generate_and_set_task_id(dict,
+                                                                GF_REBALANCE_TID_KEY);
+                        if (ret) {
+                                gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_TASKID_GEN_FAIL,
+                                       "Failed to generate task-id");
+                                goto out;
+                        }
+                } else {
+                        ret = dict_get_str(dict, GF_REBALANCE_TID_KEY, &task_id_str);
+                        if (ret) {
+                                snprintf(msg, sizeof(msg), "Missing rebalance-id");
+                                gf_msg(this->name, GF_LOG_WARNING, 0,
+                                       GD_MSG_REBALANCE_ID_MISSING, "%s", msg);
+                                ret = 0;
+                        }
+                }
+                ret = glusterd_defrag_start_validate(volinfo, msg, sizeof(msg),
+                                                     GD_OP_REBALANCE);
+                if (ret) {
+                        gf_msg_debug(this->name, 0,
+                                     "defrag start validate "
+                                     "failed for volume %s.",
+                                     volinfo->volname);
+                        goto out;
+                }
+                break;
+        case GF_DEFRAG_CMD_STATUS_TIER:
+        case GF_DEFRAG_CMD_STATUS:
+        case GF_DEFRAG_CMD_STOP:
+
+                ret = dict_get_str(dict, "cmd-str", &cmd_str);
+                if (ret) {
+                        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+                               "Failed to get "
+                               "command string");
+                        ret = -1;
+                        goto out;
+                }
+                if ((strstr(cmd_str, "rebalance") != NULL) &&
+                    (volinfo->rebal.op != GD_OP_REBALANCE)) {
+                        snprintf(msg, sizeof(msg),
+                                 "Rebalance not started "
+                                 "for volume %s.", volinfo->volname);
+                        ret = -1;
+                        goto out;
+                }
+
+                if (strstr(cmd_str, "remove-brick") != NULL) {
+                        if (volinfo->rebal.op != GD_OP_REMOVE_BRICK) {
+                                snprintf(msg, sizeof(msg),
+                                         "remove-brick not "
+                                         "started for volume %s.",
+                                         volinfo->volname);
+                                ret = -1;
+                                goto out;
+                        }
+
+                        /* For remove-brick status/stop command check whether
+                         * given input brick is part of volume or not.*/
+
+                        ret = dict_foreach_fnmatch(dict, "brick*",
+                                                   glusterd_brick_validation, volinfo);
+                        if (ret == -1) {
+                                snprintf(msg, sizeof(msg),
+                                         "Incorrect brick for volume %s",
+                                         volinfo->volname);
+                                goto out;
+                        }
+                }
+                if (cmd == GF_DEFRAG_CMD_STATUS_TIER) {
+                        if (volinfo->type != GF_CLUSTER_TYPE_TIER) {
+                                snprintf(msg, sizeof(msg),
+                                         "volume %s is not "
+                                         "a tier volume.",
+                                         volinfo->volname);
+                                ret = -1;
+                                goto out;
+                        }
+                }
+
+                break;
+
+        case GF_DEFRAG_CMD_STOP_DETACH_TIER:
+        case GF_DEFRAG_CMD_DETACH_STATUS:
+                if (volinfo->type != GF_CLUSTER_TYPE_TIER) {
+                        snprintf(msg, sizeof(msg),
+                                 "volume %s is not "
+                                 "a tier volume.",
+                                 volinfo->volname);
+                        ret = -1;
+                        goto out;
+                }
+
+                if (volinfo->rebal.op != GD_OP_REMOVE_BRICK) {
+                        snprintf(msg, sizeof(msg),
+                                 "Detach-tier "
+                                 "not started");
+                        ret = -1;
+                        goto out;
+                }
+                break;
+        default:
+                break;
+        }
+
+        ret = 0;
+out:
+        if (ret && op_errstr && msg[0])
+                *op_errstr = gf_strdup(msg);
+
+        return ret;
+}
+
+int
+glusterd_mgmt_v3_op_rebalance(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
+{
+        char                 *volname        = NULL;
+        int                   ret            = 0;
+        int32_t               cmd            = 0;
+        char                  msg[2048]      = {0};
+        glusterd_volinfo_t   *volinfo        = NULL;
+        glusterd_brickinfo_t *brickinfo      = NULL;
+        glusterd_brickinfo_t *tmp            = NULL;
+        gf_boolean_t          volfile_update = _gf_false;
+        char                 *task_id_str    = NULL;
+        xlator_t             *this           = NULL;
+        uint32_t              commit_hash;
+        int32_t               is_force       = 0;
+
+        this = THIS;
+        GF_ASSERT(this);
+
+        ret = dict_get_str(dict, "volname", &volname);
+        if (ret) {
+                gf_msg_debug(this->name, 0, "volname not given");
+                goto out;
+        }
+
+        ret = dict_get_int32(dict, "rebalance-command", &cmd);
+        if (ret) {
+                gf_msg_debug(this->name, 0, "command not given");
+                goto out;
+        }
+
+        ret = glusterd_rebalance_cmd_validate(cmd, volname, &volinfo, msg,
+                                              sizeof(msg));
+        if (ret) {
+                gf_msg_debug(this->name, 0, "cmd validate failed");
+                goto out;
+        }
+
+        switch (cmd) {
+        case GF_DEFRAG_CMD_START:
+        case GF_DEFRAG_CMD_START_LAYOUT_FIX:
+        case GF_DEFRAG_CMD_START_FORCE:
+        case GF_DEFRAG_CMD_START_TIER:
+
+                ret = dict_get_int32(dict, "force", &is_force);
+                if (ret)
+                        is_force = 0;
+                if (!is_force) {
+                        /* Reset defrag status to 'NOT STARTED' whenever a
+                         * remove-brick/rebalance command is issued to remove
+                         * stale information from previous run.
+                         */
+                        volinfo->rebal.defrag_status = GF_DEFRAG_STATUS_NOT_STARTED;
+
+                        ret = dict_get_str(dict, GF_REBALANCE_TID_KEY, &task_id_str);
+                        if (ret) {
+                                gf_msg_debug(this->name, 0,
+                                             "Missing rebalance id");
+                                ret = 0;
+                        } else {
+                                gf_uuid_parse(task_id_str, volinfo->rebal.rebalance_id);
+                                volinfo->rebal.op = GD_OP_REBALANCE;
+                        }
+                        if (!gd_should_i_start_rebalance(volinfo)) {
+                                /* Store the rebalance-id and rebalance command
+                                 * even if the peer isn't starting a rebalance
+                                 * process. On peers where a rebalance process
+                                 * is started, glusterd_handle_defrag_start
+                                 * performs the storing.
+                                 * Storing this is needed for having
+                                 * 'volume status' work correctly.
+                                 */
+                                glusterd_store_perform_node_state_store(volinfo);
+                                break;
+                        }
+                        if (dict_get_uint32(dict, "commit-hash", &commit_hash) == 0) {
+                                volinfo->rebal.commit_hash = commit_hash;
+                        }
+                        ret = glusterd_handle_defrag_start(volinfo, msg, sizeof(msg),
+                                                           cmd, NULL, GD_OP_REBALANCE);
+                        break;
+                } else {
+                        /* Reset defrag status to 'STARTED' so that the
+                         * pid is checked and restarted accordingly.
+                         * If the pid is not running it executes the
+                         * "NOT_STARTED" case and restarts the process
+                         */
+                        volinfo->rebal.defrag_status = GF_DEFRAG_STATUS_STARTED;
+                        volinfo->rebal.defrag_cmd = cmd;
+                        volinfo->rebal.op = GD_OP_REBALANCE;
+
+                        ret = dict_get_str(dict, GF_REBALANCE_TID_KEY, &task_id_str);
+                        if (ret) {
+                                gf_msg_debug(this->name, 0,
+                                             "Missing rebalance id");
+                                ret = 0;
+                        } else {
+                                gf_uuid_parse(task_id_str, volinfo->rebal.rebalance_id);
+                                volinfo->rebal.op = GD_OP_REBALANCE;
+                        }
+                        if (dict_get_uint32(dict, "commit-hash", &commit_hash) == 0) {
+                                volinfo->rebal.commit_hash = commit_hash;
+                        }
+                        ret = glusterd_restart_rebalance_for_volume(volinfo);
+                        break;
+                }
+        case GF_DEFRAG_CMD_STOP:
+        case GF_DEFRAG_CMD_STOP_DETACH_TIER:
+                /* Clear task-id only on explicitly stopping rebalance.
+                 * Also clear the stored operation, so it doesn't cause trouble
+                 * with future rebalance/remove-brick starts
+                 */
+                gf_uuid_clear(volinfo->rebal.rebalance_id);
+                volinfo->rebal.op = GD_OP_NONE;
+
+                /* Fall back to the old volume file in case of decommission*/
+                cds_list_for_each_entry_safe(brickinfo, tmp, &volinfo->bricks,
+                                             brick_list)
+                {
+                        if (!brickinfo->decommissioned)
+                            continue;
+                        brickinfo->decommissioned = 0;
+                        volfile_update = _gf_true;
+                }
+
+                if (volfile_update == _gf_false) {
+                        ret = 0;
+                        break;
+                }
+
+                ret = glusterd_create_volfiles_and_notify_services(volinfo);
+                if (ret) {
+                        gf_msg(this->name, GF_LOG_WARNING, 0,
+                               GD_MSG_VOLFILE_CREATE_FAIL, "failed to create volfiles");
+                        goto out;
+                }
+
+                ret = glusterd_store_volinfo(volinfo,
+                                             GLUSTERD_VOLINFO_VER_AC_INCREMENT);
+                if (ret) {
+                        gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_VOLINFO_SET_FAIL,
+                               "failed to store volinfo");
+                        goto out;
+                }
+
+                if (volinfo->type == GF_CLUSTER_TYPE_TIER &&
+                    cmd == GF_OP_CMD_STOP_DETACH_TIER) {
+                        glusterd_defrag_info_set(volinfo, dict,
+                                                 GF_DEFRAG_CMD_START_TIER,
+                                                 GF_DEFRAG_CMD_START, GD_OP_REBALANCE);
+                        glusterd_restart_rebalance_for_volume(volinfo);
+                }
+
+                ret = 0;
+                break;
+
+        case GF_DEFRAG_CMD_START_DETACH_TIER:
+        case GF_DEFRAG_CMD_STATUS:
+        case GF_DEFRAG_CMD_STATUS_TIER:
+                break;
+        default:
+                break;
+        }
+
+out:
+        if (ret && op_errstr && msg[0])
+                *op_errstr = gf_strdup(msg);
+
+        return ret;
+}
+
+int
 glusterd_op_stage_rebalance (dict_t *dict, char **op_errstr)
 {
         char                    *volname     = NULL;
diff --git a/xlators/mgmt/glusterd/src/glusterd-syncop.c b/xlators/mgmt/glusterd/src/glusterd-syncop.c
index 9a67d1c..7baef64 100644
--- a/xlators/mgmt/glusterd/src/glusterd-syncop.c
+++ b/xlators/mgmt/glusterd/src/glusterd-syncop.c
@@ -317,6 +317,15 @@ glusterd_syncop_aggr_rsp_dict (glusterd_op_t op, dict_t *aggr, dict_t *rsp)
                 ret = glusterd_max_opversion_use_rsp_dict (aggr, rsp);
         break;
 
+        case GD_OP_PROFILE_VOLUME:
+                ret = glusterd_profile_volume_use_rsp_dict(aggr, rsp);
+                break;
+
+        case GD_OP_REBALANCE:
+        case GD_OP_DEFRAG_BRICK_VOLUME:
+                ret = glusterd_volume_rebalance_use_rsp_dict(aggr, rsp);
+                break;
+
         case GD_OP_TIER_STATUS:
         case GD_OP_DETACH_TIER_STATUS:
         case GD_OP_REMOVE_TIER_BRICK:
diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c
index 6468ecb..0fe56eb 100644
--- a/xlators/mgmt/glusterd/src/glusterd-utils.c
+++ b/xlators/mgmt/glusterd/src/glusterd-utils.c
@@ -10884,7 +10884,7 @@ glusterd_volume_rebalance_use_rsp_dict (dict_t *aggr, dict_t *rsp_dict)
         int                  ret           = 0;
         int32_t              index         = 0;
         int32_t              count         = 0;
-        int32_t              current_index = 2;
+        int32_t              current_index = 1;
         int32_t              value32       = 0;
         uint64_t             value         = 0;
         char                *peer_uuid_str = NULL;
@@ -10925,7 +10925,7 @@ glusterd_volume_rebalance_use_rsp_dict (dict_t *aggr, dict_t *rsp_dict)
         if (ret)
                 gf_msg ("glusterd", GF_LOG_ERROR, 0,
                         GD_MSG_DICT_GET_FAILED,
-                        "failed to get index");
+                        "failed to get index from rsp dict");
 
         memset (key, 0, 256);
         snprintf (key, 256, "node-uuid-%d", index);
diff --git a/xlators/mgmt/glusterd/src/glusterd.h b/xlators/mgmt/glusterd/src/glusterd.h
index 42c8821..f1e41be 100644
--- a/xlators/mgmt/glusterd/src/glusterd.h
+++ b/xlators/mgmt/glusterd/src/glusterd.h
@@ -1223,6 +1223,15 @@ int glusterd_op_stage_add_brick (dict_t *dict, char **op_errstr,
                                  dict_t *rsp_dict);
 int glusterd_op_stage_remove_brick (dict_t *dict, char **op_errstr);
 
+int
+glusterd_set_rebalance_id_in_rsp_dict(dict_t *req_dict, dict_t *rsp_dict);
+
+int
+glusterd_mgmt_v3_op_stage_rebalance(dict_t *dict, char **op_errstr);
+
+int
+glusterd_mgmt_v3_op_rebalance(dict_t *dict, char **op_errstr, dict_t *rsp_dict);
+
 int glusterd_op_stage_rebalance (dict_t *dict, char **op_errstr);
 int glusterd_op_rebalance (dict_t *dict, char **op_errstr, dict_t *rsp_dict);
 
-- 
1.8.3.1