|
|
887953 |
From e68845ff7018e5d81d7979684b18e6eda449b088 Mon Sep 17 00:00:00 2001
|
|
|
887953 |
From: Sanju Rakonde <srakonde@redhat.com>
|
|
|
887953 |
Date: Thu, 8 Nov 2018 18:50:18 +0530
|
|
|
887953 |
Subject: [PATCH 471/493] glusterd: migrating profile commands to mgmt_v3
|
|
|
887953 |
framework
|
|
|
887953 |
|
|
|
887953 |
Current profile commands use the op_state machine framework.
|
|
|
887953 |
Porting it to use the mgmt_v3 framework.
|
|
|
887953 |
|
|
|
887953 |
The following tests were performed on the patch:
|
|
|
887953 |
case 1:
|
|
|
887953 |
1. On a 3 node cluster, created and started 3 volumes
|
|
|
887953 |
2. Mounted all the three volumes and wrote some data
|
|
|
887953 |
3. Started profile operation for all the volumes
|
|
|
887953 |
4. Ran "gluster v status" from N1,
|
|
|
887953 |
"gluster v profile <volname1> info" form N2,
|
|
|
887953 |
"gluster v profile <volname2> info" from N3 simultaneously in a
|
|
|
887953 |
loop for around 10000 times
|
|
|
887953 |
5. Didn't find any cores generated.
|
|
|
887953 |
|
|
|
887953 |
case 2:
|
|
|
887953 |
1. Repeat the steps 1,2 and 3 from case 1.
|
|
|
887953 |
2. Ran "gluster v status" from N1,
|
|
|
887953 |
"gluster v profile <volname1> info" form N2(terminal 1),
|
|
|
887953 |
"gluster v profile <volname2> info" from N2(terminal 2)
|
|
|
887953 |
simultaneously in a loop.
|
|
|
887953 |
3. No cores were generated.
|
|
|
887953 |
|
|
|
887953 |
> fixes: bz#1654181
|
|
|
887953 |
> Change-Id: I83044cf5aee3970ef94066c89fcc41783ed468a6
|
|
|
887953 |
> Signed-off-by: Sanju Rakonde <srakonde@redhat.com>
|
|
|
887953 |
|
|
|
887953 |
upstream patch: https://review.gluster.org/#/c/glusterfs/+/21736/
|
|
|
887953 |
|
|
|
887953 |
Change-Id: I83044cf5aee3970ef94066c89fcc41783ed468a6
|
|
|
887953 |
BUG: 1639476
|
|
|
887953 |
Signed-off-by: Sanju Rakonde <srakonde@redhat.com>
|
|
|
887953 |
Reviewed-on: https://code.engineering.redhat.com/gerrit/158631
|
|
|
887953 |
Tested-by: RHGS Build Bot <nigelb@redhat.com>
|
|
|
887953 |
Reviewed-by: Atin Mukherjee <amukherj@redhat.com>
|
|
|
887953 |
---
|
|
|
887953 |
libglusterfs/src/globals.h | 2 +
|
|
|
887953 |
xlators/mgmt/glusterd/src/glusterd-handler.c | 18 +-
|
|
|
887953 |
xlators/mgmt/glusterd/src/glusterd-mgmt.c | 240 +++++++++++++++++++++++++--
|
|
|
887953 |
xlators/mgmt/glusterd/src/glusterd-mgmt.h | 6 +-
|
|
|
887953 |
xlators/mgmt/glusterd/src/glusterd-op-sm.c | 4 +-
|
|
|
887953 |
xlators/mgmt/glusterd/src/glusterd-op-sm.h | 6 +
|
|
|
887953 |
6 files changed, 252 insertions(+), 24 deletions(-)
|
|
|
887953 |
|
|
|
887953 |
diff --git a/libglusterfs/src/globals.h b/libglusterfs/src/globals.h
|
|
|
887953 |
index 1bede2e..d2b0964 100644
|
|
|
887953 |
--- a/libglusterfs/src/globals.h
|
|
|
887953 |
+++ b/libglusterfs/src/globals.h
|
|
|
887953 |
@@ -111,6 +111,8 @@
|
|
|
887953 |
|
|
|
887953 |
#define GD_OP_VERSION_3_13_3 31303 /* Op-version for GlusterFS 3.13.3 */
|
|
|
887953 |
|
|
|
887953 |
+#define GD_OP_VERSION_6_0 60000 /* Op-version for GlusterFS 6.0 */
|
|
|
887953 |
+
|
|
|
887953 |
/* Downstream only change */
|
|
|
887953 |
#define GD_OP_VERSION_3_11_2 31102 /* Op-version for RHGS 3.3.1-async */
|
|
|
887953 |
#define GD_OP_VERSION_3_13_3 31303 /* Op-version for RHGS-3.4-Batch Update-1*/
|
|
|
887953 |
diff --git a/xlators/mgmt/glusterd/src/glusterd-handler.c b/xlators/mgmt/glusterd/src/glusterd-handler.c
|
|
|
887953 |
index 7486f51..90eaa95 100644
|
|
|
887953 |
--- a/xlators/mgmt/glusterd/src/glusterd-handler.c
|
|
|
887953 |
+++ b/xlators/mgmt/glusterd/src/glusterd-handler.c
|
|
|
887953 |
@@ -28,6 +28,7 @@
|
|
|
887953 |
#include "glusterd-sm.h"
|
|
|
887953 |
#include "glusterd-op-sm.h"
|
|
|
887953 |
#include "glusterd-utils.h"
|
|
|
887953 |
+#include "glusterd-mgmt.h"
|
|
|
887953 |
#include "glusterd-server-quorum.h"
|
|
|
887953 |
#include "glusterd-store.h"
|
|
|
887953 |
#include "glusterd-locks.h"
|
|
|
887953 |
@@ -3065,10 +3066,13 @@ __glusterd_handle_cli_profile_volume (rpcsvc_request_t *req)
|
|
|
887953 |
int32_t op = 0;
|
|
|
887953 |
char err_str[2048] = {0,};
|
|
|
887953 |
xlator_t *this = NULL;
|
|
|
887953 |
+ glusterd_conf_t *conf = NULL;
|
|
|
887953 |
|
|
|
887953 |
GF_ASSERT (req);
|
|
|
887953 |
this = THIS;
|
|
|
887953 |
GF_ASSERT (this);
|
|
|
887953 |
+ conf = this->private;
|
|
|
887953 |
+ GF_VALIDATE_OR_GOTO(this->name, conf, out);
|
|
|
887953 |
|
|
|
887953 |
ret = xdr_to_generic (req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
|
|
|
887953 |
if (ret < 0) {
|
|
|
887953 |
@@ -3109,12 +3113,18 @@ __glusterd_handle_cli_profile_volume (rpcsvc_request_t *req)
|
|
|
887953 |
goto out;
|
|
|
887953 |
}
|
|
|
887953 |
|
|
|
887953 |
- ret = glusterd_op_begin (req, cli_op, dict, err_str, sizeof (err_str));
|
|
|
887953 |
+ if (conf->op_version < GD_OP_VERSION_6_0) {
|
|
|
887953 |
+ gf_msg_debug(this->name, 0, "The cluster is operating at "
|
|
|
887953 |
+ "version less than %d. Falling back to op-sm "
|
|
|
887953 |
+ "framework.", GD_OP_VERSION_6_0);
|
|
|
887953 |
+ ret = glusterd_op_begin(req, cli_op, dict, err_str, sizeof(err_str));
|
|
|
887953 |
+ glusterd_friend_sm();
|
|
|
887953 |
+ glusterd_op_sm();
|
|
|
887953 |
+ } else {
|
|
|
887953 |
+ ret = glusterd_mgmt_v3_initiate_profile_phases(req, cli_op, dict);
|
|
|
887953 |
+ }
|
|
|
887953 |
|
|
|
887953 |
out:
|
|
|
887953 |
- glusterd_friend_sm ();
|
|
|
887953 |
- glusterd_op_sm ();
|
|
|
887953 |
-
|
|
|
887953 |
free (cli_req.dict.dict_val);
|
|
|
887953 |
|
|
|
887953 |
if (ret) {
|
|
|
887953 |
diff --git a/xlators/mgmt/glusterd/src/glusterd-mgmt.c b/xlators/mgmt/glusterd/src/glusterd-mgmt.c
|
|
|
887953 |
index d7da3c1..751d6e4 100644
|
|
|
887953 |
--- a/xlators/mgmt/glusterd/src/glusterd-mgmt.c
|
|
|
887953 |
+++ b/xlators/mgmt/glusterd/src/glusterd-mgmt.c
|
|
|
887953 |
@@ -19,6 +19,7 @@
|
|
|
887953 |
#include "glusterd-locks.h"
|
|
|
887953 |
#include "glusterd-mgmt.h"
|
|
|
887953 |
#include "glusterd-op-sm.h"
|
|
|
887953 |
+#include "glusterd-server-quorum.h"
|
|
|
887953 |
#include "glusterd-volgen.h"
|
|
|
887953 |
#include "glusterd-store.h"
|
|
|
887953 |
#include "glusterd-snapshot-utils.h"
|
|
|
887953 |
@@ -213,6 +214,16 @@ gd_mgmt_v3_pre_validate_fn (glusterd_op_t op, dict_t *dict,
|
|
|
887953 |
}
|
|
|
887953 |
break;
|
|
|
887953 |
|
|
|
887953 |
+ case GD_OP_PROFILE_VOLUME:
|
|
|
887953 |
+ ret = glusterd_op_stage_stats_volume(dict, op_errstr);
|
|
|
887953 |
+ if (ret) {
|
|
|
887953 |
+ gf_msg(this->name, GF_LOG_WARNING, 0,
|
|
|
887953 |
+ GD_MSG_PRE_VALIDATION_FAIL,
|
|
|
887953 |
+ "prevalidation failed for profile operation.");
|
|
|
887953 |
+ goto out;
|
|
|
887953 |
+ }
|
|
|
887953 |
+ break;
|
|
|
887953 |
+
|
|
|
887953 |
case GD_OP_MAX_OPVERSION:
|
|
|
887953 |
ret = 0;
|
|
|
887953 |
break;
|
|
|
887953 |
@@ -252,6 +263,16 @@ gd_mgmt_v3_brick_op_fn (glusterd_op_t op, dict_t *dict,
|
|
|
887953 |
}
|
|
|
887953 |
break;
|
|
|
887953 |
}
|
|
|
887953 |
+ case GD_OP_PROFILE_VOLUME:
|
|
|
887953 |
+ {
|
|
|
887953 |
+ ret = gd_brick_op_phase(op, rsp_dict, dict, op_errstr);
|
|
|
887953 |
+ if (ret) {
|
|
|
887953 |
+ gf_log(this->name, GF_LOG_WARNING, "%s brickop failed",
|
|
|
887953 |
+ gd_op_list[op]);
|
|
|
887953 |
+ goto out;
|
|
|
887953 |
+ }
|
|
|
887953 |
+ break;
|
|
|
887953 |
+ }
|
|
|
887953 |
default:
|
|
|
887953 |
break;
|
|
|
887953 |
}
|
|
|
887953 |
@@ -406,6 +427,17 @@ gd_mgmt_v3_commit_fn (glusterd_op_t op, dict_t *dict,
|
|
|
887953 |
break;
|
|
|
887953 |
|
|
|
887953 |
}
|
|
|
887953 |
+ case GD_OP_PROFILE_VOLUME:
|
|
|
887953 |
+ {
|
|
|
887953 |
+ ret = glusterd_op_stats_volume(dict, op_errstr, rsp_dict);
|
|
|
887953 |
+ if (ret) {
|
|
|
887953 |
+ gf_msg(this->name, GF_LOG_ERROR, 0,
|
|
|
887953 |
+ GD_MSG_COMMIT_OP_FAIL, "commit failed "
|
|
|
887953 |
+ "volume profile operation.");
|
|
|
887953 |
+ goto out;
|
|
|
887953 |
+ }
|
|
|
887953 |
+ break;
|
|
|
887953 |
+ }
|
|
|
887953 |
|
|
|
887953 |
default:
|
|
|
887953 |
break;
|
|
|
887953 |
@@ -847,6 +879,7 @@ glusterd_pre_validate_aggr_rsp_dict (glusterd_op_t op,
|
|
|
887953 |
case GD_OP_DETACH_TIER_STATUS:
|
|
|
887953 |
case GD_OP_TIER_START_STOP:
|
|
|
887953 |
case GD_OP_REMOVE_TIER_BRICK:
|
|
|
887953 |
+ case GD_OP_PROFILE_VOLUME:
|
|
|
887953 |
break;
|
|
|
887953 |
case GD_OP_MAX_OPVERSION:
|
|
|
887953 |
break;
|
|
|
887953 |
@@ -1039,6 +1072,16 @@ glusterd_mgmt_v3_pre_validate (glusterd_op_t op, dict_t *req_dict,
|
|
|
887953 |
goto out;
|
|
|
887953 |
}
|
|
|
887953 |
|
|
|
887953 |
+ if (op == GD_OP_PROFILE_VOLUME) {
|
|
|
887953 |
+ ret = glusterd_validate_quorum(this, op, req_dict, op_errstr);
|
|
|
887953 |
+ if (ret) {
|
|
|
887953 |
+ gf_msg (this->name, GF_LOG_ERROR, 0,
|
|
|
887953 |
+ GD_MSG_SERVER_QUORUM_NOT_MET, "Server quorum "
|
|
|
887953 |
+ "not met. Rejecting operation.");
|
|
|
887953 |
+ goto out;
|
|
|
887953 |
+ }
|
|
|
887953 |
+ }
|
|
|
887953 |
+
|
|
|
887953 |
/* Pre Validation on local node */
|
|
|
887953 |
ret = gd_mgmt_v3_pre_validate_fn (op, req_dict, op_errstr,
|
|
|
887953 |
rsp_dict, op_errno);
|
|
|
887953 |
@@ -1157,6 +1200,7 @@ glusterd_mgmt_v3_build_payload (dict_t **req, char **op_errstr, dict_t *dict,
|
|
|
887953 |
case GD_OP_REPLACE_BRICK:
|
|
|
887953 |
case GD_OP_RESET_BRICK:
|
|
|
887953 |
case GD_OP_ADD_TIER_BRICK:
|
|
|
887953 |
+ case GD_OP_PROFILE_VOLUME:
|
|
|
887953 |
{
|
|
|
887953 |
ret = dict_get_str (dict, "volname", &volname);
|
|
|
887953 |
if (ret) {
|
|
|
887953 |
@@ -1309,12 +1353,11 @@ out:
|
|
|
887953 |
}
|
|
|
887953 |
|
|
|
887953 |
int
|
|
|
887953 |
-glusterd_mgmt_v3_brick_op (glusterd_op_t op, dict_t *req_dict, char **op_errstr,
|
|
|
887953 |
- uint32_t txn_generation)
|
|
|
887953 |
+glusterd_mgmt_v3_brick_op (glusterd_op_t op, dict_t *rsp_dict, dict_t *req_dict,
|
|
|
887953 |
+ char **op_errstr, uint32_t txn_generation)
|
|
|
887953 |
{
|
|
|
887953 |
int32_t ret = -1;
|
|
|
887953 |
int32_t peer_cnt = 0;
|
|
|
887953 |
- dict_t *rsp_dict = NULL;
|
|
|
887953 |
glusterd_peerinfo_t *peerinfo = NULL;
|
|
|
887953 |
struct syncargs args = {0};
|
|
|
887953 |
uuid_t peer_uuid = {0};
|
|
|
887953 |
@@ -1329,14 +1372,6 @@ glusterd_mgmt_v3_brick_op (glusterd_op_t op, dict_t *req_dict, char **op_errstr,
|
|
|
887953 |
GF_ASSERT (req_dict);
|
|
|
887953 |
GF_ASSERT (op_errstr);
|
|
|
887953 |
|
|
|
887953 |
- rsp_dict = dict_new ();
|
|
|
887953 |
- if (!rsp_dict) {
|
|
|
887953 |
- gf_msg (this->name, GF_LOG_ERROR, 0,
|
|
|
887953 |
- GD_MSG_DICT_CREATE_FAIL,
|
|
|
887953 |
- "Failed to create response dictionary");
|
|
|
887953 |
- goto out;
|
|
|
887953 |
- }
|
|
|
887953 |
-
|
|
|
887953 |
/* Perform brick op on local node */
|
|
|
887953 |
ret = gd_mgmt_v3_brick_op_fn (op, req_dict, op_errstr,
|
|
|
887953 |
rsp_dict);
|
|
|
887953 |
@@ -1361,11 +1396,8 @@ glusterd_mgmt_v3_brick_op (glusterd_op_t op, dict_t *req_dict, char **op_errstr,
|
|
|
887953 |
goto out;
|
|
|
887953 |
}
|
|
|
887953 |
|
|
|
887953 |
- dict_unref (rsp_dict);
|
|
|
887953 |
- rsp_dict = NULL;
|
|
|
887953 |
-
|
|
|
887953 |
/* Sending brick op req to other nodes in the cluster */
|
|
|
887953 |
- gd_syncargs_init (&args, NULL);
|
|
|
887953 |
+ gd_syncargs_init (&args, rsp_dict);
|
|
|
887953 |
synctask_barrier_init((&args));
|
|
|
887953 |
peer_cnt = 0;
|
|
|
887953 |
|
|
|
887953 |
@@ -2108,6 +2140,180 @@ out:
|
|
|
887953 |
}
|
|
|
887953 |
|
|
|
887953 |
int32_t
|
|
|
887953 |
+glusterd_mgmt_v3_initiate_profile_phases (rpcsvc_request_t *req,
|
|
|
887953 |
+ glusterd_op_t op, dict_t *dict)
|
|
|
887953 |
+{
|
|
|
887953 |
+ int32_t ret = -1;
|
|
|
887953 |
+ int32_t op_ret = -1;
|
|
|
887953 |
+ dict_t *req_dict = NULL;
|
|
|
887953 |
+ dict_t *tmp_dict = NULL;
|
|
|
887953 |
+ glusterd_conf_t *conf = NULL;
|
|
|
887953 |
+ char *op_errstr = NULL;
|
|
|
887953 |
+ xlator_t *this = NULL;
|
|
|
887953 |
+ gf_boolean_t is_acquired = _gf_false;
|
|
|
887953 |
+ uuid_t *originator_uuid = NULL;
|
|
|
887953 |
+ uint32_t txn_generation = 0;
|
|
|
887953 |
+ uint32_t op_errno = 0;
|
|
|
887953 |
+
|
|
|
887953 |
+ this = THIS;
|
|
|
887953 |
+ GF_ASSERT (this);
|
|
|
887953 |
+ GF_ASSERT (req);
|
|
|
887953 |
+ GF_ASSERT (dict);
|
|
|
887953 |
+ conf = this->private;
|
|
|
887953 |
+ GF_ASSERT (conf);
|
|
|
887953 |
+
|
|
|
887953 |
+ /* Save the peer list generation */
|
|
|
887953 |
+ txn_generation = conf->generation;
|
|
|
887953 |
+ cmm_smp_rmb ();
|
|
|
887953 |
+ /* This read memory barrier makes sure that this assignment happens here
|
|
|
887953 |
+ * only and is not reordered and optimized by either the compiler or the
|
|
|
887953 |
+ * processor.
|
|
|
887953 |
+ */
|
|
|
887953 |
+
|
|
|
887953 |
+ /* Save the MY_UUID as the originator_uuid. This originator_uuid
|
|
|
887953 |
+ * will be used by is_origin_glusterd() to determine if a node
|
|
|
887953 |
+ * is the originator node for a command. */
|
|
|
887953 |
+ originator_uuid = GF_CALLOC (1, sizeof(uuid_t),
|
|
|
887953 |
+ gf_common_mt_uuid_t);
|
|
|
887953 |
+ if (!originator_uuid) {
|
|
|
887953 |
+ ret = -1;
|
|
|
887953 |
+ goto out;
|
|
|
887953 |
+ }
|
|
|
887953 |
+
|
|
|
887953 |
+ gf_uuid_copy (*originator_uuid, MY_UUID);
|
|
|
887953 |
+ ret = dict_set_bin (dict, "originator_uuid",
|
|
|
887953 |
+ originator_uuid, sizeof (uuid_t));
|
|
|
887953 |
+ if (ret) {
|
|
|
887953 |
+ gf_msg (this->name, GF_LOG_ERROR, 0,
|
|
|
887953 |
+ GD_MSG_DICT_SET_FAILED,
|
|
|
887953 |
+ "Failed to set originator_uuid.");
|
|
|
887953 |
+ GF_FREE (originator_uuid);
|
|
|
887953 |
+ goto out;
|
|
|
887953 |
+ }
|
|
|
887953 |
+
|
|
|
887953 |
+ /* Marking the operation as complete synctasked */
|
|
|
887953 |
+ ret = dict_set_int32 (dict, "is_synctasked", _gf_true);
|
|
|
887953 |
+ if (ret) {
|
|
|
887953 |
+ gf_msg (this->name, GF_LOG_ERROR, 0,
|
|
|
887953 |
+ GD_MSG_DICT_SET_FAILED,
|
|
|
887953 |
+ "Failed to set synctasked flag.");
|
|
|
887953 |
+ goto out;
|
|
|
887953 |
+ }
|
|
|
887953 |
+
|
|
|
887953 |
+ /* Use a copy at local unlock as cli response will be sent before
|
|
|
887953 |
+ * the unlock and the volname in the dict might be removed */
|
|
|
887953 |
+ tmp_dict = dict_new();
|
|
|
887953 |
+ if (!tmp_dict) {
|
|
|
887953 |
+ gf_msg (this->name, GF_LOG_ERROR, 0,
|
|
|
887953 |
+ GD_MSG_DICT_CREATE_FAIL, "Unable to create dict");
|
|
|
887953 |
+ goto out;
|
|
|
887953 |
+ }
|
|
|
887953 |
+ dict_copy (dict, tmp_dict);
|
|
|
887953 |
+
|
|
|
887953 |
+ /* LOCKDOWN PHASE - Acquire mgmt_v3 locks */
|
|
|
887953 |
+ ret = glusterd_mgmt_v3_initiate_lockdown (op, dict, &op_errstr,
|
|
|
887953 |
+ &op_errno, &is_acquired,
|
|
|
887953 |
+ txn_generation);
|
|
|
887953 |
+ if (ret) {
|
|
|
887953 |
+ gf_msg (this->name, GF_LOG_ERROR, 0,
|
|
|
887953 |
+ GD_MSG_MGMTV3_LOCKDOWN_FAIL,
|
|
|
887953 |
+ "mgmt_v3 lockdown failed.");
|
|
|
887953 |
+ goto out;
|
|
|
887953 |
+ }
|
|
|
887953 |
+
|
|
|
887953 |
+ /* BUILD PAYLOAD */
|
|
|
887953 |
+ ret = glusterd_mgmt_v3_build_payload (&req_dict, &op_errstr, dict, op);
|
|
|
887953 |
+ if (ret) {
|
|
|
887953 |
+ gf_msg (this->name, GF_LOG_ERROR, 0,
|
|
|
887953 |
+ GD_MSG_MGMTV3_PAYLOAD_BUILD_FAIL, LOGSTR_BUILD_PAYLOAD,
|
|
|
887953 |
+ gd_op_list[op]);
|
|
|
887953 |
+ if (op_errstr == NULL)
|
|
|
887953 |
+ gf_asprintf (&op_errstr, OPERRSTR_BUILD_PAYLOAD);
|
|
|
887953 |
+ goto out;
|
|
|
887953 |
+ }
|
|
|
887953 |
+
|
|
|
887953 |
+ /* PRE-COMMIT VALIDATE PHASE */
|
|
|
887953 |
+ ret = glusterd_mgmt_v3_pre_validate (op, req_dict, &op_errstr,
|
|
|
887953 |
+ &op_errno, txn_generation);
|
|
|
887953 |
+ if (ret) {
|
|
|
887953 |
+ gf_msg (this->name, GF_LOG_ERROR, 0,
|
|
|
887953 |
+ GD_MSG_PRE_VALIDATION_FAIL, "Pre Validation Failed");
|
|
|
887953 |
+ goto out;
|
|
|
887953 |
+ }
|
|
|
887953 |
+
|
|
|
887953 |
+ /* BRICK-OPS */
|
|
|
887953 |
+ ret = glusterd_mgmt_v3_brick_op(op, dict, req_dict, &op_errstr,
|
|
|
887953 |
+ txn_generation);
|
|
|
887953 |
+ if (ret) {
|
|
|
887953 |
+ gf_log(this->name, GF_LOG_ERROR, "Brick Op Failed");
|
|
|
887953 |
+ goto out;
|
|
|
887953 |
+ }
|
|
|
887953 |
+
|
|
|
887953 |
+ /* COMMIT OP PHASE */
|
|
|
887953 |
+ ret = glusterd_mgmt_v3_commit (op, dict, req_dict, &op_errstr,
|
|
|
887953 |
+ &op_errno, txn_generation);
|
|
|
887953 |
+ if (ret) {
|
|
|
887953 |
+ gf_msg (this->name, GF_LOG_ERROR, 0,
|
|
|
887953 |
+ GD_MSG_COMMIT_OP_FAIL, "Commit Op Failed");
|
|
|
887953 |
+ goto out;
|
|
|
887953 |
+ }
|
|
|
887953 |
+
|
|
|
887953 |
+ /* POST-COMMIT VALIDATE PHASE */
|
|
|
887953 |
+ /* As of now, post_validate is not trying to cleanup any failed
|
|
|
887953 |
+ commands. So as of now, I am sending 0 (op_ret as 0).
|
|
|
887953 |
+ */
|
|
|
887953 |
+ ret = glusterd_mgmt_v3_post_validate (op, 0, dict, req_dict, &op_errstr,
|
|
|
887953 |
+ txn_generation);
|
|
|
887953 |
+ if (ret) {
|
|
|
887953 |
+ gf_msg (this->name, GF_LOG_ERROR, 0,
|
|
|
887953 |
+ GD_MSG_POST_VALIDATION_FAIL, "Post Validation Failed");
|
|
|
887953 |
+ goto out;
|
|
|
887953 |
+ }
|
|
|
887953 |
+
|
|
|
887953 |
+ ret = 0;
|
|
|
887953 |
+out:
|
|
|
887953 |
+ op_ret = ret;
|
|
|
887953 |
+ /* UNLOCK PHASE FOR PEERS*/
|
|
|
887953 |
+ (void) glusterd_mgmt_v3_release_peer_locks (op, dict, op_ret,
|
|
|
887953 |
+ &op_errstr, is_acquired,
|
|
|
887953 |
+ txn_generation);
|
|
|
887953 |
+
|
|
|
887953 |
+ /* LOCAL VOLUME(S) UNLOCK */
|
|
|
887953 |
+ if (is_acquired) {
|
|
|
887953 |
+ /* Trying to release multiple mgmt_v3 locks */
|
|
|
887953 |
+ ret = glusterd_multiple_mgmt_v3_unlock (tmp_dict, MY_UUID);
|
|
|
887953 |
+ if (ret) {
|
|
|
887953 |
+ gf_msg (this->name, GF_LOG_ERROR, 0,
|
|
|
887953 |
+ GD_MSG_MGMTV3_UNLOCK_FAIL,
|
|
|
887953 |
+ "Failed to release mgmt_v3 locks on localhost");
|
|
|
887953 |
+ op_ret = ret;
|
|
|
887953 |
+ }
|
|
|
887953 |
+ }
|
|
|
887953 |
+
|
|
|
887953 |
+ if (op_ret && (op_errno == 0))
|
|
|
887953 |
+ op_errno = EG_INTRNL;
|
|
|
887953 |
+
|
|
|
887953 |
+ if (op != GD_OP_MAX_OPVERSION) {
|
|
|
887953 |
+ /* SEND CLI RESPONSE */
|
|
|
887953 |
+ glusterd_op_send_cli_response (op, op_ret, op_errno, req,
|
|
|
887953 |
+ dict, op_errstr);
|
|
|
887953 |
+ }
|
|
|
887953 |
+
|
|
|
887953 |
+ if (req_dict)
|
|
|
887953 |
+ dict_unref (req_dict);
|
|
|
887953 |
+
|
|
|
887953 |
+ if (tmp_dict)
|
|
|
887953 |
+ dict_unref (tmp_dict);
|
|
|
887953 |
+
|
|
|
887953 |
+ if (op_errstr) {
|
|
|
887953 |
+ GF_FREE (op_errstr);
|
|
|
887953 |
+ op_errstr = NULL;
|
|
|
887953 |
+ }
|
|
|
887953 |
+
|
|
|
887953 |
+ return 0;
|
|
|
887953 |
+}
|
|
|
887953 |
+
|
|
|
887953 |
+int32_t
|
|
|
887953 |
glusterd_mgmt_v3_initiate_all_phases (rpcsvc_request_t *req, glusterd_op_t op,
|
|
|
887953 |
dict_t *dict)
|
|
|
887953 |
{
|
|
|
887953 |
@@ -2465,7 +2671,7 @@ glusterd_mgmt_v3_initiate_snap_phases (rpcsvc_request_t *req, glusterd_op_t op,
|
|
|
887953 |
goto out;
|
|
|
887953 |
}
|
|
|
887953 |
|
|
|
887953 |
- ret = glusterd_mgmt_v3_brick_op (op, req_dict, &op_errstr,
|
|
|
887953 |
+ ret = glusterd_mgmt_v3_brick_op (op, dict, req_dict, &op_errstr,
|
|
|
887953 |
txn_generation);
|
|
|
887953 |
if (ret) {
|
|
|
887953 |
gf_msg (this->name, GF_LOG_ERROR, 0,
|
|
|
887953 |
@@ -2526,7 +2732,7 @@ unbarrier:
|
|
|
887953 |
goto out;
|
|
|
887953 |
}
|
|
|
887953 |
|
|
|
887953 |
- ret = glusterd_mgmt_v3_brick_op (op, req_dict, &op_errstr,
|
|
|
887953 |
+ ret = glusterd_mgmt_v3_brick_op (op, dict, req_dict, &op_errstr,
|
|
|
887953 |
txn_generation);
|
|
|
887953 |
|
|
|
887953 |
if (ret) {
|
|
|
887953 |
diff --git a/xlators/mgmt/glusterd/src/glusterd-mgmt.h b/xlators/mgmt/glusterd/src/glusterd-mgmt.h
|
|
|
887953 |
index 2215f17..eff070d 100644
|
|
|
887953 |
--- a/xlators/mgmt/glusterd/src/glusterd-mgmt.h
|
|
|
887953 |
+++ b/xlators/mgmt/glusterd/src/glusterd-mgmt.h
|
|
|
887953 |
@@ -37,7 +37,11 @@ glusterd_mgmt_v3_initiate_all_phases (rpcsvc_request_t *req, glusterd_op_t op,
|
|
|
887953 |
dict_t *dict);
|
|
|
887953 |
|
|
|
887953 |
int32_t
|
|
|
887953 |
-glusterd_mgmt_v3_initiate_snap_phases (rpcsvc_request_t *req, glusterd_op_t op,
|
|
|
887953 |
+glusterd_mgmt_v3_initiate_profile_phases(rpcsvc_request_t *req,
|
|
|
887953 |
+ glusterd_op_t op, dict_t *dict);
|
|
|
887953 |
+
|
|
|
887953 |
+int32_t
|
|
|
887953 |
+glusterd_mgmt_v3_initiate_snap_phases(rpcsvc_request_t *req, glusterd_op_t op,
|
|
|
887953 |
dict_t *dict);
|
|
|
887953 |
|
|
|
887953 |
int
|
|
|
887953 |
diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.c b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
|
|
|
887953 |
index 52a3db0..9f76ab3 100644
|
|
|
887953 |
--- a/xlators/mgmt/glusterd/src/glusterd-op-sm.c
|
|
|
887953 |
+++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
|
|
|
887953 |
@@ -2032,7 +2032,7 @@ glusterd_op_stage_status_volume (dict_t *dict, char **op_errstr)
|
|
|
887953 |
return ret;
|
|
|
887953 |
}
|
|
|
887953 |
|
|
|
887953 |
-static int
|
|
|
887953 |
+int
|
|
|
887953 |
glusterd_op_stage_stats_volume (dict_t *dict, char **op_errstr)
|
|
|
887953 |
{
|
|
|
887953 |
int ret = -1;
|
|
|
887953 |
@@ -3322,7 +3322,7 @@ glusterd_remove_profile_volume_options (glusterd_volinfo_t *volinfo)
|
|
|
887953 |
dict_del (volinfo->dict, fd_stats_key);
|
|
|
887953 |
}
|
|
|
887953 |
|
|
|
887953 |
-static int
|
|
|
887953 |
+int
|
|
|
887953 |
glusterd_op_stats_volume (dict_t *dict, char **op_errstr,
|
|
|
887953 |
dict_t *rsp_dict)
|
|
|
887953 |
{
|
|
|
887953 |
diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.h b/xlators/mgmt/glusterd/src/glusterd-op-sm.h
|
|
|
887953 |
index f2aee9c..e64d368 100644
|
|
|
887953 |
--- a/xlators/mgmt/glusterd/src/glusterd-op-sm.h
|
|
|
887953 |
+++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.h
|
|
|
887953 |
@@ -312,4 +312,10 @@ glusterd_dict_set_volid (dict_t *dict, char *volname, char **op_errstr);
|
|
|
887953 |
|
|
|
887953 |
int32_t
|
|
|
887953 |
glusterd_tier_op (xlator_t *this, void *data);
|
|
|
887953 |
+
|
|
|
887953 |
+int
|
|
|
887953 |
+glusterd_op_stats_volume (dict_t *dict, char **op_errstr, dict_t *rsp_dict);
|
|
|
887953 |
+
|
|
|
887953 |
+int
|
|
|
887953 |
+glusterd_op_stage_stats_volume (dict_t *dict, char **op_errstr);
|
|
|
887953 |
#endif
|
|
|
887953 |
--
|
|
|
887953 |
1.8.3.1
|
|
|
887953 |
|