887953
From 783c36e573a9c937422e63af038bb35648483b9e Mon Sep 17 00:00:00 2001
887953
From: Sanju Rakonde <srakonde@redhat.com>
887953
Date: Fri, 30 Nov 2018 16:16:55 +0530
887953
Subject: [PATCH 490/493] glusterd: migrating rebalance commands to mgmt_v3
887953
 framework
887953
887953
Current rebalance commands use the op_state machine framework.
887953
Porting it to use the mgmt_v3 framework.
887953
887953
> Change-Id: I6faf4a6335c2e2f3d54bbde79908a7749e4613e7
887953
> fixes: bz#1655827
887953
> Signed-off-by: Sanju Rakonde <srakonde@redhat.com>
887953
887953
upstream patch: https://review.gluster.org/#/c/glusterfs/+/21762/
887953
887953
Change-Id: I6faf4a6335c2e2f3d54bbde79908a7749e4613e7
887953
BUG: 1652466
887953
Signed-off-by: Sanju Rakonde <srakonde@redhat.com>
887953
Reviewed-on: https://code.engineering.redhat.com/gerrit/158917
887953
Reviewed-by: Atin Mukherjee <amukherj@redhat.com>
887953
Tested-by: RHGS Build Bot <nigelb@redhat.com>
887953
---
887953
 libglusterfs/src/globals.h                     |   2 +
887953
 xlators/mgmt/glusterd/src/glusterd-handler.c   |   4 +-
887953
 xlators/mgmt/glusterd/src/glusterd-mgmt.c      | 128 ++++++-
887953
 xlators/mgmt/glusterd/src/glusterd-mgmt.h      |   5 +-
887953
 xlators/mgmt/glusterd/src/glusterd-op-sm.h     |   3 +
887953
 xlators/mgmt/glusterd/src/glusterd-rebalance.c | 495 ++++++++++++++++++++++++-
887953
 xlators/mgmt/glusterd/src/glusterd-syncop.c    |   9 +
887953
 xlators/mgmt/glusterd/src/glusterd-utils.c     |   4 +-
887953
 xlators/mgmt/glusterd/src/glusterd.h           |   9 +
887953
 9 files changed, 637 insertions(+), 22 deletions(-)
887953
887953
diff --git a/libglusterfs/src/globals.h b/libglusterfs/src/globals.h
887953
index 343263c..5e3b180 100644
887953
--- a/libglusterfs/src/globals.h
887953
+++ b/libglusterfs/src/globals.h
887953
@@ -111,6 +111,8 @@
887953
 
887953
 #define GD_OP_VERSION_3_13_3   31303 /* Op-version for GlusterFS 3.13.3 */
887953
 
887953
+#define GD_OP_VERSION_6_0      60000 /* Op-version for GlusterFS 6.0 */
887953
+
887953
 /* Downstream only change */
887953
 #define GD_OP_VERSION_3_11_2   31102 /* Op-version for RHGS 3.3.1-async */
887953
 #define GD_OP_VERSION_3_13_3   31303 /* Op-version for RHGS-3.4-Batch Update-1*/
887953
diff --git a/xlators/mgmt/glusterd/src/glusterd-handler.c b/xlators/mgmt/glusterd/src/glusterd-handler.c
887953
index d40de89..d8e3335 100644
887953
--- a/xlators/mgmt/glusterd/src/glusterd-handler.c
887953
+++ b/xlators/mgmt/glusterd/src/glusterd-handler.c
887953
@@ -3120,7 +3120,9 @@ __glusterd_handle_cli_profile_volume (rpcsvc_request_t *req)
887953
                 glusterd_friend_sm();
887953
                 glusterd_op_sm();
887953
         } else {
887953
-                ret = glusterd_mgmt_v3_initiate_profile_phases(req, cli_op, dict);
887953
+                ret = glusterd_mgmt_v3_initiate_all_phases_with_brickop_phase(req,
887953
+                                                                              cli_op,
887953
+                                                                              dict);
887953
         }
887953
 
887953
 out:
887953
diff --git a/xlators/mgmt/glusterd/src/glusterd-mgmt.c b/xlators/mgmt/glusterd/src/glusterd-mgmt.c
887953
index d98c6bc..ef8a2d9 100644
887953
--- a/xlators/mgmt/glusterd/src/glusterd-mgmt.c
887953
+++ b/xlators/mgmt/glusterd/src/glusterd-mgmt.c
887953
@@ -224,6 +224,16 @@ gd_mgmt_v3_pre_validate_fn (glusterd_op_t op, dict_t *dict,
887953
                 }
887953
                 break;
887953
 
887953
+        case GD_OP_REBALANCE:
887953
+        case GD_OP_DEFRAG_BRICK_VOLUME:
887953
+                ret = glusterd_mgmt_v3_op_stage_rebalance(dict, op_errstr);
887953
+                if (ret) {
887953
+                        gf_log(this->name, GF_LOG_WARNING,
887953
+                               "Rebalance Prevalidate Failed");
887953
+                        goto out;
887953
+                }
887953
+                break;
887953
+
887953
         case GD_OP_MAX_OPVERSION:
887953
                 ret = 0;
887953
                 break;
887953
@@ -264,6 +274,8 @@ gd_mgmt_v3_brick_op_fn (glusterd_op_t op, dict_t *dict,
887953
                 break;
887953
         }
887953
         case GD_OP_PROFILE_VOLUME:
887953
+        case GD_OP_REBALANCE:
887953
+        case GD_OP_DEFRAG_BRICK_VOLUME:
887953
         {
887953
                 ret = gd_brick_op_phase(op, rsp_dict, dict, op_errstr);
887953
                 if (ret) {
887953
@@ -438,6 +450,19 @@ gd_mgmt_v3_commit_fn (glusterd_op_t op, dict_t *dict,
887953
                         }
887953
                         break;
887953
                 }
887953
+                case GD_OP_REBALANCE:
887953
+                case GD_OP_DEFRAG_BRICK_VOLUME:
887953
+                {
887953
+                        ret = glusterd_mgmt_v3_op_rebalance(dict, op_errstr,
887953
+                                                            rsp_dict);
887953
+                        if (ret) {
887953
+                                gf_msg(this->name, GF_LOG_ERROR, 0,
887953
+                                       GD_MSG_COMMIT_OP_FAIL,
887953
+                                       "Rebalance Commit Failed");
887953
+                                goto out;
887953
+                        }
887953
+                        break;
887953
+                }
887953
 
887953
                default:
887953
                        break;
887953
@@ -880,6 +905,8 @@ glusterd_pre_validate_aggr_rsp_dict (glusterd_op_t op,
887953
         case GD_OP_TIER_START_STOP:
887953
         case GD_OP_REMOVE_TIER_BRICK:
887953
         case GD_OP_PROFILE_VOLUME:
887953
+        case GD_OP_DEFRAG_BRICK_VOLUME:
887953
+        case GD_OP_REBALANCE:
887953
                 break;
887953
         case GD_OP_MAX_OPVERSION:
887953
                 break;
887953
@@ -1197,6 +1224,7 @@ glusterd_mgmt_v3_build_payload (dict_t **req, char **op_errstr, dict_t *dict,
887953
                 break;
887953
         case GD_OP_START_VOLUME:
887953
         case GD_OP_ADD_BRICK:
887953
+        case GD_OP_DEFRAG_BRICK_VOLUME:
887953
         case GD_OP_REPLACE_BRICK:
887953
         case GD_OP_RESET_BRICK:
887953
         case GD_OP_ADD_TIER_BRICK:
887953
@@ -1221,6 +1249,30 @@ glusterd_mgmt_v3_build_payload (dict_t **req, char **op_errstr, dict_t *dict,
887953
                         dict_copy (dict, req_dict);
887953
                 }
887953
                         break;
887953
+
887953
+        case GD_OP_REBALANCE: {
887953
+                if (gd_set_commit_hash(dict) != 0) {
887953
+                        ret = -1;
887953
+                        goto out;
887953
+                }
887953
+                ret = dict_get_str (dict, "volname", &volname);
887953
+                if (ret) {
887953
+                        gf_msg(this->name, GF_LOG_CRITICAL, errno,
887953
+                               GD_MSG_DICT_GET_FAILED,
887953
+                               "volname is not present in "
887953
+                               "operation ctx");
887953
+                        goto out;
887953
+                }
887953
+
887953
+                if (strcasecmp(volname, "all")) {
887953
+                        ret = glusterd_dict_set_volid(dict, volname, op_errstr);
887953
+                        if (ret)
887953
+                                goto out;
887953
+                }
887953
+                dict_copy(dict, req_dict);
887953
+        }
887953
+                break;
887953
+
887953
         case GD_OP_TIER_START_STOP:
887953
         case GD_OP_REMOVE_TIER_BRICK:
887953
         case GD_OP_DETACH_TIER_STATUS:
887953
@@ -1247,6 +1299,7 @@ gd_mgmt_v3_brick_op_cbk_fn (struct rpc_req *req, struct iovec *iov,
887953
         call_frame_t               *frame         = NULL;
887953
         int32_t                     op_ret        = -1;
887953
         int32_t                     op_errno      = -1;
887953
+        dict_t                     *rsp_dict      = NULL;
887953
         xlator_t                   *this          = NULL;
887953
         uuid_t                     *peerid        = NULL;
887953
 
887953
@@ -1278,10 +1331,45 @@ gd_mgmt_v3_brick_op_cbk_fn (struct rpc_req *req, struct iovec *iov,
887953
         if (ret < 0)
887953
                 goto out;
887953
 
887953
+        if (rsp.dict.dict_len) {
887953
+                /* Unserialize the dictionary */
887953
+                rsp_dict  = dict_new ();
887953
+
887953
+                ret = dict_unserialize (rsp.dict.dict_val,
887953
+                                        rsp.dict.dict_len,
887953
+                                        &rsp_dict);
887953
+                if (ret < 0) {
887953
+                        free (rsp.dict.dict_val);
887953
+                        goto out;
887953
+                } else {
887953
+                        rsp_dict->extra_stdfree = rsp.dict.dict_val;
887953
+                }
887953
+        }
887953
+
887953
         gf_uuid_copy (args->uuid, rsp.uuid);
887953
+        pthread_mutex_lock (&args->lock_dict);
887953
+        {
887953
+                if (rsp.op == GD_OP_DEFRAG_BRICK_VOLUME)
887953
+                        ret = glusterd_syncop_aggr_rsp_dict (rsp.op, args->dict,
887953
+                                                             rsp_dict);
887953
+        }
887953
+        pthread_mutex_unlock (&args->lock_dict);
887953
 
887953
-        op_ret = rsp.op_ret;
887953
-        op_errno = rsp.op_errno;
887953
+        if (ret) {
887953
+                gf_msg (this->name, GF_LOG_ERROR, 0,
887953
+                        GD_MSG_RESP_AGGR_FAIL, "%s",
887953
+                        "Failed to aggregate response from "
887953
+                        " node/brick");
887953
+                if (!rsp.op_ret)
887953
+                        op_ret = ret;
887953
+                else {
887953
+                        op_ret = rsp.op_ret;
887953
+                        op_errno = rsp.op_errno;
887953
+                }
887953
+        } else {
887953
+                op_ret = rsp.op_ret;
887953
+                op_errno = rsp.op_errno;
887953
+        }
887953
 
887953
 out:
887953
         gd_mgmt_v3_collate_errors (args, op_ret, op_errno, rsp.op_errstr,
887953
@@ -1353,11 +1441,12 @@ out:
887953
 }
887953
 
887953
 int
887953
-glusterd_mgmt_v3_brick_op (glusterd_op_t op, dict_t *rsp_dict, dict_t *req_dict,
887953
+glusterd_mgmt_v3_brick_op (glusterd_op_t op, dict_t *op_ctx, dict_t *req_dict,
887953
                            char **op_errstr, uint32_t txn_generation)
887953
 {
887953
         int32_t              ret        = -1;
887953
         int32_t              peer_cnt   = 0;
887953
+        dict_t              *rsp_dict   = NULL;
887953
         glusterd_peerinfo_t *peerinfo   = NULL;
887953
         struct syncargs      args       = {0};
887953
         uuid_t               peer_uuid  = {0};
887953
@@ -1372,6 +1461,13 @@ glusterd_mgmt_v3_brick_op (glusterd_op_t op, dict_t *rsp_dict, dict_t *req_dict,
887953
         GF_ASSERT (req_dict);
887953
         GF_ASSERT (op_errstr);
887953
 
887953
+        rsp_dict = dict_new();
887953
+        if (!rsp_dict) {
887953
+                gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_CREATE_FAIL,
887953
+                       "Failed to create response dictionary");
887953
+                goto out;
887953
+        }
887953
+
887953
         /* Perform brick op on local node */
887953
         ret = gd_mgmt_v3_brick_op_fn (op, req_dict, op_errstr,
887953
                                      rsp_dict);
887953
@@ -1395,9 +1491,21 @@ glusterd_mgmt_v3_brick_op (glusterd_op_t op, dict_t *rsp_dict, dict_t *req_dict,
887953
                 }
887953
                 goto out;
887953
         }
887953
+        if (op == GD_OP_DEFRAG_BRICK_VOLUME || op == GD_OP_PROFILE_VOLUME) {
887953
+                ret = glusterd_syncop_aggr_rsp_dict(op, op_ctx, rsp_dict);
887953
+                if (ret) {
887953
+                        gf_log(this->name, GF_LOG_ERROR, "%s",
887953
+                               "Failed to aggregate response from "
887953
+                               " node/brick");
887953
+                        goto out;
887953
+                }
887953
+        }
887953
+
887953
+        dict_unref(rsp_dict);
887953
+        rsp_dict = NULL;
887953
 
887953
         /* Sending brick op req to other nodes in the cluster */
887953
-        gd_syncargs_init (&args, rsp_dict);
887953
+        gd_syncargs_init (&args, op_ctx);
887953
         synctask_barrier_init((&args));
887953
         peer_cnt = 0;
887953
 
887953
@@ -1616,6 +1724,13 @@ glusterd_mgmt_v3_commit (glusterd_op_t op, dict_t *op_ctx, dict_t *req_dict,
887953
         GF_ASSERT (op_errstr);
887953
         GF_VALIDATE_OR_GOTO (this->name, op_errno, out);
887953
 
887953
+        if (op == GD_OP_REBALANCE || op == GD_OP_DEFRAG_BRICK_VOLUME) {
887953
+                ret = glusterd_set_rebalance_id_in_rsp_dict(req_dict, op_ctx);
887953
+                if (ret) {
887953
+                        gf_log(this->name, GF_LOG_WARNING,
887953
+                               "Failed to set rebalance id in dict.");
887953
+                }
887953
+        }
887953
         rsp_dict = dict_new ();
887953
         if (!rsp_dict) {
887953
                 gf_msg (this->name, GF_LOG_ERROR, 0,
887953
@@ -2140,8 +2255,9 @@ out:
887953
 }
887953
 
887953
 int32_t
887953
-glusterd_mgmt_v3_initiate_profile_phases (rpcsvc_request_t *req,
887953
-                                          glusterd_op_t op, dict_t *dict)
887953
+glusterd_mgmt_v3_initiate_all_phases_with_brickop_phase (rpcsvc_request_t *req,
887953
+                                                         glusterd_op_t op,
887953
+                                                         dict_t *dict)
887953
 {
887953
         int32_t                     ret              = -1;
887953
         int32_t                     op_ret           = -1;
887953
diff --git a/xlators/mgmt/glusterd/src/glusterd-mgmt.h b/xlators/mgmt/glusterd/src/glusterd-mgmt.h
887953
index eff070d..ef0fe10 100644
887953
--- a/xlators/mgmt/glusterd/src/glusterd-mgmt.h
887953
+++ b/xlators/mgmt/glusterd/src/glusterd-mgmt.h
887953
@@ -37,8 +37,9 @@ glusterd_mgmt_v3_initiate_all_phases (rpcsvc_request_t *req, glusterd_op_t op,
887953
                                      dict_t *dict);
887953
 
887953
 int32_t
887953
-glusterd_mgmt_v3_initiate_profile_phases(rpcsvc_request_t *req,
887953
-                                         glusterd_op_t op, dict_t *dict);
887953
+glusterd_mgmt_v3_initiate_all_phases_with_brickop_phase(rpcsvc_request_t *req,
887953
+                                                        glusterd_op_t op,
887953
+                                                        dict_t *dict);
887953
 
887953
 int32_t
887953
 glusterd_mgmt_v3_initiate_snap_phases(rpcsvc_request_t *req, glusterd_op_t op,
887953
diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.h b/xlators/mgmt/glusterd/src/glusterd-op-sm.h
887953
index e64d368..cf1e61c 100644
887953
--- a/xlators/mgmt/glusterd/src/glusterd-op-sm.h
887953
+++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.h
887953
@@ -318,4 +318,7 @@ glusterd_op_stats_volume (dict_t *dict, char **op_errstr, dict_t *rsp_dict);
887953
 
887953
 int
887953
 glusterd_op_stage_stats_volume (dict_t *dict, char **op_errstr);
887953
+
887953
+int
887953
+gd_set_commit_hash(dict_t *dict);
887953
 #endif
887953
diff --git a/xlators/mgmt/glusterd/src/glusterd-rebalance.c b/xlators/mgmt/glusterd/src/glusterd-rebalance.c
887953
index 5ab828c..7ba5f65 100644
887953
--- a/xlators/mgmt/glusterd/src/glusterd-rebalance.c
887953
+++ b/xlators/mgmt/glusterd/src/glusterd-rebalance.c
887953
@@ -23,6 +23,7 @@
887953
 #include "glusterd.h"
887953
 #include "glusterd-sm.h"
887953
 #include "glusterd-op-sm.h"
887953
+#include "glusterd-mgmt.h"
887953
 #include "glusterd-utils.h"
887953
 #include "glusterd-messages.h"
887953
 #include "glusterd-store.h"
887953
@@ -501,6 +502,7 @@ __glusterd_handle_defrag_volume (rpcsvc_request_t *req)
887953
         int32_t                 ret       = -1;
887953
         gf_cli_req              cli_req   = {{0,}};
887953
         glusterd_conf_t        *priv      = NULL;
887953
+        int32_t                 op        = GD_OP_NONE;
887953
         dict_t                 *dict      = NULL;
887953
         char                   *volname   = NULL;
887953
         gf_cli_defrag_type      cmd       = 0;
887953
@@ -563,17 +565,25 @@ __glusterd_handle_defrag_volume (rpcsvc_request_t *req)
887953
             (cmd == GF_DEFRAG_CMD_STOP_DETACH_TIER) ||
887953
             (cmd == GF_DEFRAG_CMD_STOP) ||
887953
             (cmd == GF_DEFRAG_CMD_DETACH_STATUS)) {
887953
-                ret = glusterd_op_begin (req, GD_OP_DEFRAG_BRICK_VOLUME,
887953
-                                         dict, msg, sizeof (msg));
887953
+                op = GD_OP_DEFRAG_BRICK_VOLUME;
887953
         } else
887953
-                ret = glusterd_op_begin (req, GD_OP_REBALANCE, dict,
887953
-                                         msg, sizeof (msg));
887953
-
887953
+                op =  GD_OP_REBALANCE;
887953
+
887953
+        if (priv->op_version < GD_OP_VERSION_6_0) {
887953
+                gf_msg_debug(this->name, 0,
887953
+                             "The cluster is operating at "
887953
+                             "version less than %d. Falling back "
887953
+                             "to op-sm framework.",
887953
+                             GD_OP_VERSION_6_0);
887953
+                ret = glusterd_op_begin(req, op, dict, msg, sizeof(msg));
887953
+                glusterd_friend_sm();
887953
+                glusterd_op_sm();
887953
+        } else {
887953
+                ret = glusterd_mgmt_v3_initiate_all_phases_with_brickop_phase(req,
887953
+                                                                              op,
887953
+                                                                              dict);
887953
+        }
887953
 out:
887953
-
887953
-        glusterd_friend_sm ();
887953
-        glusterd_op_sm ();
887953
-
887953
         if (ret) {
887953
                 if (msg[0] == '\0')
887953
                         snprintf (msg, sizeof (msg), "Operation failed");
887953
@@ -583,8 +593,8 @@ out:
887953
         }
887953
 
887953
         free (cli_req.dict.dict_val);//malloced by xdr
887953
-
887953
-        return 0;
887953
+        gf_msg_debug(this->name, 0, "Returning %d", ret);
887953
+        return ret;
887953
 }
887953
 
887953
 int
887953
@@ -628,6 +638,469 @@ glusterd_brick_validation  (dict_t *dict, char *key, data_t *value,
887953
 }
887953
 
887953
 int
887953
+glusterd_set_rebalance_id_in_rsp_dict(dict_t *req_dict, dict_t *rsp_dict)
887953
+{
887953
+        int                    ret         = -1;
887953
+        int32_t                cmd         = 0;
887953
+        char                  *volname     = NULL;
887953
+        glusterd_volinfo_t    *volinfo     = NULL;
887953
+        char                   msg[2048]   = {0};
887953
+        char                  *task_id_str = NULL;
887953
+        xlator_t              *this        = NULL;
887953
+
887953
+        this = THIS;
887953
+        GF_ASSERT(this);
887953
+
887953
+        GF_ASSERT(rsp_dict);
887953
+        GF_ASSERT(req_dict);
887953
+
887953
+        ret = dict_get_str(rsp_dict, "volname", &volname);
887953
+        if (ret) {
887953
+                gf_msg_debug(this->name, 0, "volname not found");
887953
+                goto out;
887953
+        }
887953
+
887953
+        ret = dict_get_int32(rsp_dict, "rebalance-command", &cmd);
887953
+        if (ret) {
887953
+                gf_msg_debug(this->name, 0, "cmd not found");
887953
+                goto out;
887953
+        }
887953
+
887953
+        ret = glusterd_rebalance_cmd_validate(cmd, volname, &volinfo, msg,
887953
+                                              sizeof(msg));
887953
+        if (ret) {
887953
+                gf_msg_debug(this->name, 0, "failed to validate");
887953
+                goto out;
887953
+        }
887953
+
887953
+        /* reblance id is generted in glusterd_mgmt_v3_op_stage_rebalance(), but
887953
+         * rsp_dict is unavailable there. So copying it to rsp_dict from req_dict
887953
+         * here. So that cli can display the rebalance id.*/
887953
+        if ((cmd == GF_DEFRAG_CMD_START) ||
887953
+            (cmd == GF_DEFRAG_CMD_START_LAYOUT_FIX) ||
887953
+            (cmd == GF_DEFRAG_CMD_START_FORCE) ||
887953
+            (cmd == GF_DEFRAG_CMD_START_TIER)) {
887953
+                if (is_origin_glusterd(rsp_dict)) {
887953
+                        ret = dict_get_str(req_dict, GF_REBALANCE_TID_KEY,
887953
+                                           &task_id_str);
887953
+                        if (ret) {
887953
+                                snprintf(msg, sizeof(msg), "Missing rebalance-id");
887953
+                                gf_msg(this->name, GF_LOG_WARNING, 0,
887953
+                                       GD_MSG_REBALANCE_ID_MISSING, "%s", msg);
887953
+                                ret = 0;
887953
+                        } else {
887953
+                                gf_uuid_parse(task_id_str,
887953
+                                              volinfo->rebal.rebalance_id);
887953
+                                ret = glusterd_copy_uuid_to_dict(
887953
+                                      volinfo->rebal.rebalance_id, rsp_dict,
887953
+                                      GF_REBALANCE_TID_KEY);
887953
+                                if (ret) {
887953
+                                        snprintf(msg, sizeof(msg),
887953
+                                                 "Failed to set rebalance id for volume %s",
887953
+                                                 volname);
887953
+                                        gf_msg(this->name, GF_LOG_WARNING, 0,
887953
+                                               GD_MSG_DICT_SET_FAILED, "%s",
887953
+                                               msg);
887953
+                                }
887953
+                        }
887953
+                }
887953
+        }
887953
+
887953
+        /* Set task-id, if available, in rsp_dict for operations other than
887953
+         * start. This is needed when we want rebalance id in xml output
887953
+         */
887953
+        if (cmd == GF_DEFRAG_CMD_STATUS || cmd == GF_DEFRAG_CMD_STOP ||
887953
+            cmd == GF_DEFRAG_CMD_STATUS_TIER) {
887953
+                if (!gf_uuid_is_null(volinfo->rebal.rebalance_id)) {
887953
+                        if (GD_OP_REMOVE_BRICK == volinfo->rebal.op)
887953
+                                ret = glusterd_copy_uuid_to_dict(
887953
+                                      volinfo->rebal.rebalance_id, rsp_dict,
887953
+                                      GF_REMOVE_BRICK_TID_KEY);
887953
+                        else
887953
+                                ret = glusterd_copy_uuid_to_dict(
887953
+                                      volinfo->rebal.rebalance_id,
887953
+                                      rsp_dict, GF_REBALANCE_TID_KEY);
887953
+                        if (ret) {
887953
+                                gf_msg(this->name, GF_LOG_ERROR, 0,
887953
+                                       GD_MSG_DICT_SET_FAILED,
887953
+                                       "Failed to set task-id for volume %s",
887953
+                                       volname);
887953
+                                goto out;
887953
+                        }
887953
+                }
887953
+        }
887953
+out:
887953
+        return ret;
887953
+}
887953
+
887953
+int
887953
+glusterd_mgmt_v3_op_stage_rebalance(dict_t *dict, char **op_errstr)
887953
+{
887953
+        char               *volname     = NULL;
887953
+        char               *cmd_str     = NULL;
887953
+        int                 ret         = 0;
887953
+        int32_t             cmd         = 0;
887953
+        char                msg[2048]   = {0};
887953
+        glusterd_volinfo_t *volinfo     = NULL;
887953
+        char               *task_id_str = NULL;
887953
+        xlator_t           *this        = 0;
887953
+        int32_t             is_force    = 0;
887953
+
887953
+        this = THIS;
887953
+        GF_ASSERT(this);
887953
+
887953
+        ret = dict_get_str (dict, "volname", &volname);
887953
+        if (ret) {
887953
+                gf_msg_debug(this->name, 0, "volname not found");
887953
+                goto out;
887953
+        }
887953
+
887953
+        ret = dict_get_int32 (dict, "rebalance-command", &cmd);
887953
+        if (ret) {
887953
+                gf_msg_debug(this->name, 0, "cmd not found");
887953
+                goto out;
887953
+        }
887953
+
887953
+        ret = glusterd_rebalance_cmd_validate(cmd, volname, &volinfo, msg,
887953
+                                              sizeof(msg));
887953
+        if (ret) {
887953
+                gf_msg_debug(this->name, 0, "failed to validate");
887953
+                goto out;
887953
+        }
887953
+        switch (cmd) {
887953
+        case GF_DEFRAG_CMD_START_TIER:
887953
+                ret = dict_get_int32 (dict, "force", &is_force);
887953
+                if (ret)
887953
+                        is_force = 0;
887953
+
887953
+                if (volinfo->type != GF_CLUSTER_TYPE_TIER) {
887953
+                        gf_asprintf(op_errstr,
887953
+                                    "volume %s is not a tier "
887953
+                                    "volume.",
887953
+                                    volinfo->volname);
887953
+                        ret = -1;
887953
+                        goto out;
887953
+                }
887953
+                if ((!is_force) && glusterd_is_tier_daemon_running(volinfo)) {
887953
+                        ret = gf_asprintf(op_errstr,
887953
+                                          "A Tier daemon is "
887953
+                                          "already running on volume %s",
887953
+                                          volname);
887953
+                        ret = -1;
887953
+                        goto out;
887953
+                }
887953
+                /* Fall through */
887953
+        case GF_DEFRAG_CMD_START:
887953
+        case GF_DEFRAG_CMD_START_LAYOUT_FIX:
887953
+                /* Check if the connected clients are all of version
887953
+                 * glusterfs-3.6 and higher. This is needed to prevent some data
887953
+                 * loss issues that could occur when older clients are connected
887953
+                 * when rebalance is run. This check can be bypassed by using
887953
+                 * 'force'
887953
+                 */
887953
+                ret = glusterd_check_client_op_version_support(volname,
887953
+                                                               GD_OP_VERSION_RHS_3_0,
887953
+                                                               NULL);
887953
+                if (ret) {
887953
+                        ret = gf_asprintf(op_errstr,
887953
+                                          "Volume %s has one or "
887953
+                                          "more connected clients of a version"
887953
+                                          " lower than GlusterFS-v3.6.0. "
887953
+                                          "Starting rebalance in this state "
887953
+                                          "could lead to data loss.\nPlease "
887953
+                                          "disconnect those clients before "
887953
+                                          "attempting this command again.",
887953
+                                          volname);
887953
+                        goto out;
887953
+                }
887953
+                /* Fall through */
887953
+        case GF_DEFRAG_CMD_START_FORCE:
887953
+                if (is_origin_glusterd(dict)) {
887953
+                        ret = glusterd_generate_and_set_task_id(dict,
887953
+                                                                GF_REBALANCE_TID_KEY);
887953
+                        if (ret) {
887953
+                                gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_TASKID_GEN_FAIL,
887953
+                                       "Failed to generate task-id");
887953
+                                goto out;
887953
+                        }
887953
+                } else {
887953
+                        ret = dict_get_str(dict, GF_REBALANCE_TID_KEY, &task_id_str);
887953
+                        if (ret) {
887953
+                                snprintf(msg, sizeof(msg), "Missing rebalance-id");
887953
+                                gf_msg(this->name, GF_LOG_WARNING, 0,
887953
+                                       GD_MSG_REBALANCE_ID_MISSING, "%s", msg);
887953
+                                ret = 0;
887953
+                        }
887953
+                }
887953
+                ret = glusterd_defrag_start_validate(volinfo, msg, sizeof(msg),
887953
+                                                     GD_OP_REBALANCE);
887953
+                if (ret) {
887953
+                        gf_msg_debug(this->name, 0,
887953
+                                     "defrag start validate "
887953
+                                     "failed for volume %s.",
887953
+                                     volinfo->volname);
887953
+                        goto out;
887953
+                }
887953
+                break;
887953
+        case GF_DEFRAG_CMD_STATUS_TIER:
887953
+        case GF_DEFRAG_CMD_STATUS:
887953
+        case GF_DEFRAG_CMD_STOP:
887953
+
887953
+                ret = dict_get_str(dict, "cmd-str", &cmd_str);
887953
+                if (ret) {
887953
+                        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
887953
+                               "Failed to get "
887953
+                               "command string");
887953
+                        ret = -1;
887953
+                        goto out;
887953
+                }
887953
+                if ((strstr(cmd_str, "rebalance") != NULL) &&
887953
+                    (volinfo->rebal.op != GD_OP_REBALANCE)) {
887953
+                        snprintf(msg, sizeof(msg),
887953
+                                 "Rebalance not started "
887953
+                                 "for volume %s.", volinfo->volname);
887953
+                        ret = -1;
887953
+                        goto out;
887953
+                }
887953
+
887953
+                if (strstr(cmd_str, "remove-brick") != NULL) {
887953
+                        if (volinfo->rebal.op != GD_OP_REMOVE_BRICK) {
887953
+                                snprintf(msg, sizeof(msg),
887953
+                                         "remove-brick not "
887953
+                                         "started for volume %s.",
887953
+                                         volinfo->volname);
887953
+                                ret = -1;
887953
+                                goto out;
887953
+                        }
887953
+
887953
+                        /* For remove-brick status/stop command check whether
887953
+                         * given input brick is part of volume or not.*/
887953
+
887953
+                        ret = dict_foreach_fnmatch(dict, "brick*",
887953
+                                                   glusterd_brick_validation, volinfo);
887953
+                        if (ret == -1) {
887953
+                                snprintf(msg, sizeof(msg),
887953
+                                         "Incorrect brick for volume %s",
887953
+                                         volinfo->volname);
887953
+                                goto out;
887953
+                        }
887953
+                }
887953
+                if (cmd == GF_DEFRAG_CMD_STATUS_TIER) {
887953
+                        if (volinfo->type != GF_CLUSTER_TYPE_TIER) {
887953
+                                snprintf(msg, sizeof(msg),
887953
+                                         "volume %s is not "
887953
+                                         "a tier volume.",
887953
+                                         volinfo->volname);
887953
+                                ret = -1;
887953
+                                goto out;
887953
+                        }
887953
+                }
887953
+
887953
+                break;
887953
+
887953
+        case GF_DEFRAG_CMD_STOP_DETACH_TIER:
887953
+        case GF_DEFRAG_CMD_DETACH_STATUS:
887953
+                if (volinfo->type != GF_CLUSTER_TYPE_TIER) {
887953
+                        snprintf(msg, sizeof(msg),
887953
+                                 "volume %s is not "
887953
+                                 "a tier volume.",
887953
+                                 volinfo->volname);
887953
+                        ret = -1;
887953
+                        goto out;
887953
+                }
887953
+
887953
+                if (volinfo->rebal.op != GD_OP_REMOVE_BRICK) {
887953
+                        snprintf(msg, sizeof(msg),
887953
+                                 "Detach-tier "
887953
+                                 "not started");
887953
+                        ret = -1;
887953
+                        goto out;
887953
+                }
887953
+                break;
887953
+        default:
887953
+                break;
887953
+        }
887953
+
887953
+        ret = 0;
887953
+out:
887953
+        if (ret && op_errstr && msg[0])
887953
+                *op_errstr = gf_strdup(msg);
887953
+
887953
+        return ret;
887953
+}
887953
+
887953
+int
887953
+glusterd_mgmt_v3_op_rebalance(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
887953
+{
887953
+        char                 *volname        = NULL;
887953
+        int                   ret            = 0;
887953
+        int32_t               cmd            = 0;
887953
+        char                  msg[2048]      = {0};
887953
+        glusterd_volinfo_t   *volinfo        = NULL;
887953
+        glusterd_brickinfo_t *brickinfo      = NULL;
887953
+        glusterd_brickinfo_t *tmp            = NULL;
887953
+        gf_boolean_t          volfile_update = _gf_false;
887953
+        char                 *task_id_str    = NULL;
887953
+        xlator_t             *this           = NULL;
887953
+        uint32_t              commit_hash;
887953
+        int32_t               is_force       = 0;
887953
+
887953
+        this = THIS;
887953
+        GF_ASSERT(this);
887953
+
887953
+        ret = dict_get_str(dict, "volname", &volname);
887953
+        if (ret) {
887953
+                gf_msg_debug(this->name, 0, "volname not given");
887953
+                goto out;
887953
+        }
887953
+
887953
+        ret = dict_get_int32(dict, "rebalance-command", &cmd);
887953
+        if (ret) {
887953
+                gf_msg_debug(this->name, 0, "command not given");
887953
+                goto out;
887953
+        }
887953
+
887953
+        ret = glusterd_rebalance_cmd_validate(cmd, volname, &volinfo, msg,
887953
+                                              sizeof(msg));
887953
+        if (ret) {
887953
+                gf_msg_debug(this->name, 0, "cmd validate failed");
887953
+                goto out;
887953
+        }
887953
+
887953
+        switch (cmd) {
887953
+        case GF_DEFRAG_CMD_START:
887953
+        case GF_DEFRAG_CMD_START_LAYOUT_FIX:
887953
+        case GF_DEFRAG_CMD_START_FORCE:
887953
+        case GF_DEFRAG_CMD_START_TIER:
887953
+
887953
+                ret = dict_get_int32(dict, "force", &is_force);
887953
+                if (ret)
887953
+                        is_force = 0;
887953
+                if (!is_force) {
887953
+                        /* Reset defrag status to 'NOT STARTED' whenever a
887953
+                         * remove-brick/rebalance command is issued to remove
887953
+                         * stale information from previous run.
887953
+                         */
887953
+                        volinfo->rebal.defrag_status = GF_DEFRAG_STATUS_NOT_STARTED;
887953
+
887953
+                        ret = dict_get_str(dict, GF_REBALANCE_TID_KEY, &task_id_str);
887953
+                        if (ret) {
887953
+                                gf_msg_debug(this->name, 0,
887953
+                                             "Missing rebalance id");
887953
+                                ret = 0;
887953
+                        } else {
887953
+                                gf_uuid_parse(task_id_str, volinfo->rebal.rebalance_id);
887953
+                                volinfo->rebal.op = GD_OP_REBALANCE;
887953
+                        }
887953
+                        if (!gd_should_i_start_rebalance(volinfo)) {
887953
+                                /* Store the rebalance-id and rebalance command
887953
+                                 * even if the peer isn't starting a rebalance
887953
+                                 * process. On peers where a rebalance process
887953
+                                 * is started, glusterd_handle_defrag_start
887953
+                                 * performs the storing.
887953
+                                 * Storing this is needed for having
887953
+                                 * 'volume status' work correctly.
887953
+                                 */
887953
+                                glusterd_store_perform_node_state_store(volinfo);
887953
+                                break;
887953
+                        }
887953
+                        if (dict_get_uint32(dict, "commit-hash", &commit_hash) == 0) {
887953
+                                volinfo->rebal.commit_hash = commit_hash;
887953
+                        }
887953
+                        ret = glusterd_handle_defrag_start(volinfo, msg, sizeof(msg),
887953
+                                                           cmd, NULL, GD_OP_REBALANCE);
887953
+                        break;
887953
+                } else {
887953
+                        /* Reset defrag status to 'STARTED' so that the
887953
+                         * pid is checked and restarted accordingly.
887953
+                         * If the pid is not running it executes the
887953
+                         * "NOT_STARTED" case and restarts the process
887953
+                         */
887953
+                        volinfo->rebal.defrag_status = GF_DEFRAG_STATUS_STARTED;
887953
+                        volinfo->rebal.defrag_cmd = cmd;
887953
+                        volinfo->rebal.op = GD_OP_REBALANCE;
887953
+
887953
+                        ret = dict_get_str(dict, GF_REBALANCE_TID_KEY, &task_id_str);
887953
+                        if (ret) {
887953
+                                gf_msg_debug(this->name, 0,
887953
+                                             "Missing rebalance id");
887953
+                                ret = 0;
887953
+                        } else {
887953
+                                gf_uuid_parse(task_id_str, volinfo->rebal.rebalance_id);
887953
+                                volinfo->rebal.op = GD_OP_REBALANCE;
887953
+                        }
887953
+                        if (dict_get_uint32(dict, "commit-hash", &commit_hash) == 0) {
887953
+                                volinfo->rebal.commit_hash = commit_hash;
887953
+                        }
887953
+                        ret = glusterd_restart_rebalance_for_volume(volinfo);
887953
+                        break;
887953
+                }
887953
+        case GF_DEFRAG_CMD_STOP:
887953
+        case GF_DEFRAG_CMD_STOP_DETACH_TIER:
887953
+                /* Clear task-id only on explicitly stopping rebalance.
887953
+                 * Also clear the stored operation, so it doesn't cause trouble
887953
+                 * with future rebalance/remove-brick starts
887953
+                 */
887953
+                gf_uuid_clear(volinfo->rebal.rebalance_id);
887953
+                volinfo->rebal.op = GD_OP_NONE;
887953
+
887953
+                /* Fall back to the old volume file in case of decommission*/
887953
+                cds_list_for_each_entry_safe(brickinfo, tmp, &volinfo->bricks,
887953
+                                             brick_list)
887953
+                {
887953
+                        if (!brickinfo->decommissioned)
887953
+                            continue;
887953
+                        brickinfo->decommissioned = 0;
887953
+                        volfile_update = _gf_true;
887953
+                }
887953
+
887953
+                if (volfile_update == _gf_false) {
887953
+                        ret = 0;
887953
+                        break;
887953
+                }
887953
+
887953
+                ret = glusterd_create_volfiles_and_notify_services(volinfo);
887953
+                if (ret) {
887953
+                        gf_msg(this->name, GF_LOG_WARNING, 0,
887953
+                               GD_MSG_VOLFILE_CREATE_FAIL, "failed to create volfiles");
887953
+                        goto out;
887953
+                }
887953
+
887953
+                ret = glusterd_store_volinfo(volinfo,
887953
+                                             GLUSTERD_VOLINFO_VER_AC_INCREMENT);
887953
+                if (ret) {
887953
+                        gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_VOLINFO_SET_FAIL,
887953
+                               "failed to store volinfo");
887953
+                        goto out;
887953
+                }
887953
+
887953
+                if (volinfo->type == GF_CLUSTER_TYPE_TIER &&
887953
+                    cmd == GF_OP_CMD_STOP_DETACH_TIER) {
887953
+                        glusterd_defrag_info_set(volinfo, dict,
887953
+                                                 GF_DEFRAG_CMD_START_TIER,
887953
+                                                 GF_DEFRAG_CMD_START, GD_OP_REBALANCE);
887953
+                        glusterd_restart_rebalance_for_volume(volinfo);
887953
+                }
887953
+
887953
+                ret = 0;
887953
+                break;
887953
+
887953
+        case GF_DEFRAG_CMD_START_DETACH_TIER:
887953
+        case GF_DEFRAG_CMD_STATUS:
887953
+        case GF_DEFRAG_CMD_STATUS_TIER:
887953
+                break;
887953
+        default:
887953
+                break;
887953
+        }
887953
+
887953
+out:
887953
+        if (ret && op_errstr && msg[0])
887953
+                *op_errstr = gf_strdup(msg);
887953
+
887953
+        return ret;
887953
+}
887953
+
887953
+int
887953
 glusterd_op_stage_rebalance (dict_t *dict, char **op_errstr)
887953
 {
887953
         char                    *volname     = NULL;
887953
diff --git a/xlators/mgmt/glusterd/src/glusterd-syncop.c b/xlators/mgmt/glusterd/src/glusterd-syncop.c
887953
index 9a67d1c..7baef64 100644
887953
--- a/xlators/mgmt/glusterd/src/glusterd-syncop.c
887953
+++ b/xlators/mgmt/glusterd/src/glusterd-syncop.c
887953
@@ -317,6 +317,15 @@ glusterd_syncop_aggr_rsp_dict (glusterd_op_t op, dict_t *aggr, dict_t *rsp)
887953
                 ret = glusterd_max_opversion_use_rsp_dict (aggr, rsp);
887953
         break;
887953
 
887953
+        case GD_OP_PROFILE_VOLUME:
887953
+                ret = glusterd_profile_volume_use_rsp_dict(aggr, rsp);
887953
+                break;
887953
+
887953
+        case GD_OP_REBALANCE:
887953
+        case GD_OP_DEFRAG_BRICK_VOLUME:
887953
+                ret = glusterd_volume_rebalance_use_rsp_dict(aggr, rsp);
887953
+                break;
887953
+
887953
         case GD_OP_TIER_STATUS:
887953
         case GD_OP_DETACH_TIER_STATUS:
887953
         case GD_OP_REMOVE_TIER_BRICK:
887953
diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c
887953
index 6468ecb..0fe56eb 100644
887953
--- a/xlators/mgmt/glusterd/src/glusterd-utils.c
887953
+++ b/xlators/mgmt/glusterd/src/glusterd-utils.c
887953
@@ -10884,7 +10884,7 @@ glusterd_volume_rebalance_use_rsp_dict (dict_t *aggr, dict_t *rsp_dict)
887953
         int                  ret           = 0;
887953
         int32_t              index         = 0;
887953
         int32_t              count         = 0;
887953
-        int32_t              current_index = 2;
887953
+        int32_t              current_index = 1;
887953
         int32_t              value32       = 0;
887953
         uint64_t             value         = 0;
887953
         char                *peer_uuid_str = NULL;
887953
@@ -10925,7 +10925,7 @@ glusterd_volume_rebalance_use_rsp_dict (dict_t *aggr, dict_t *rsp_dict)
887953
         if (ret)
887953
                 gf_msg ("glusterd", GF_LOG_ERROR, 0,
887953
                         GD_MSG_DICT_GET_FAILED,
887953
-                        "failed to get index");
887953
+                        "failed to get index from rsp dict");
887953
 
887953
         memset (key, 0, 256);
887953
         snprintf (key, 256, "node-uuid-%d", index);
887953
diff --git a/xlators/mgmt/glusterd/src/glusterd.h b/xlators/mgmt/glusterd/src/glusterd.h
887953
index 42c8821..f1e41be 100644
887953
--- a/xlators/mgmt/glusterd/src/glusterd.h
887953
+++ b/xlators/mgmt/glusterd/src/glusterd.h
887953
@@ -1223,6 +1223,15 @@ int glusterd_op_stage_add_brick (dict_t *dict, char **op_errstr,
887953
                                  dict_t *rsp_dict);
887953
 int glusterd_op_stage_remove_brick (dict_t *dict, char **op_errstr);
887953
 
887953
+int
887953
+glusterd_set_rebalance_id_in_rsp_dict(dict_t *req_dict, dict_t *rsp_dict);
887953
+
887953
+int
887953
+glusterd_mgmt_v3_op_stage_rebalance(dict_t *dict, char **op_errstr);
887953
+
887953
+int
887953
+glusterd_mgmt_v3_op_rebalance(dict_t *dict, char **op_errstr, dict_t *rsp_dict);
887953
+
887953
 int glusterd_op_stage_rebalance (dict_t *dict, char **op_errstr);
887953
 int glusterd_op_rebalance (dict_t *dict, char **op_errstr, dict_t *rsp_dict);
887953
 
887953
-- 
887953
1.8.3.1
887953