|
|
12a457 |
From 87501a984a96223012d7878ccc689510b2c3d77b Mon Sep 17 00:00:00 2001
|
|
|
12a457 |
From: hari gowtham <hgowtham@redhat.com>
|
|
|
12a457 |
Date: Mon, 16 May 2016 10:55:17 +0530
|
|
|
12a457 |
Subject: [PATCH 165/167] tier/cli : printing a warning instead of skipping the node
|
|
|
12a457 |
|
|
|
12a457 |
back-port of : http://review.gluster.org/#/c/14347/8
|
|
|
12a457 |
back-port of : http://review.gluster.org/#/c/14459/
|
|
|
12a457 |
|
|
|
12a457 |
Problem: skipping the status of the nodes down creates confusion
|
|
|
12a457 |
to the user as one might see the status as completed for all nodes
|
|
|
12a457 |
and while performing detach commit, the operation will fail as the
|
|
|
12a457 |
node is down
|
|
|
12a457 |
|
|
|
12a457 |
Fix: Display a warning message
|
|
|
12a457 |
|
|
|
12a457 |
Note: When the last node is down (as per the peer list) then
|
|
|
12a457 |
warning message can't be displayed as the total number of peers
|
|
|
12a457 |
participating in the transaction is considered to be the total count.
|
|
|
12a457 |
|
|
|
12a457 |
>Change-Id: Ib7afbd1b26df3378e4d537db06f41f5c105ad86e
|
|
|
12a457 |
>BUG: 1324439
|
|
|
12a457 |
>Signed-off-by: hari gowtham <hgowtham@redhat.com>
|
|
|
12a457 |
|
|
|
12a457 |
>Change-Id: Id797bb45433c442f63b189ad16e0e95492a43721
|
|
|
12a457 |
>BUG: 1337908
|
|
|
12a457 |
>Signed-off-by: hari gowtham <hgowtham@redhat.com>
|
|
|
12a457 |
|
|
|
12a457 |
Change-Id: Ifed8767729eaf6835ce4aef4f519d3ea509101a4
|
|
|
12a457 |
BUG: 1322695
|
|
|
12a457 |
Signed-off-by: hari gowtham <hgowtham@redhat.com>
|
|
|
12a457 |
Reviewed-on: https://code.engineering.redhat.com/gerrit/74790
|
|
|
12a457 |
Reviewed-by: Atin Mukherjee <amukherj@redhat.com>
|
|
|
12a457 |
Tested-by: Atin Mukherjee <amukherj@redhat.com>
|
|
|
12a457 |
---
|
|
|
12a457 |
cli/src/cli-rpc-ops.c | 30 +++++++++++++++++++++++++-----
|
|
|
12a457 |
tests/basic/tier/new-tier-cmds.t | 16 ++--------------
|
|
|
12a457 |
tests/tier.rc | 13 +++++++++++++
|
|
|
12a457 |
3 files changed, 40 insertions(+), 19 deletions(-)
|
|
|
12a457 |
|
|
|
12a457 |
diff --git a/cli/src/cli-rpc-ops.c b/cli/src/cli-rpc-ops.c
|
|
|
12a457 |
index a3787c4..ea25b0a 100644
|
|
|
12a457 |
--- a/cli/src/cli-rpc-ops.c
|
|
|
12a457 |
+++ b/cli/src/cli-rpc-ops.c
|
|
|
12a457 |
@@ -1509,7 +1509,8 @@ out:
|
|
|
12a457 |
}
|
|
|
12a457 |
|
|
|
12a457 |
int
|
|
|
12a457 |
-gf_cli_print_rebalance_status (dict_t *dict, enum gf_task_types task_type)
|
|
|
12a457 |
+gf_cli_print_rebalance_status (dict_t *dict, enum gf_task_types task_type,
|
|
|
12a457 |
+ gf_boolean_t is_tier)
|
|
|
12a457 |
{
|
|
|
12a457 |
int ret = -1;
|
|
|
12a457 |
int count = 0;
|
|
|
12a457 |
@@ -1528,6 +1529,7 @@ gf_cli_print_rebalance_status (dict_t *dict, enum gf_task_types task_type)
|
|
|
12a457 |
int hrs = 0;
|
|
|
12a457 |
int min = 0;
|
|
|
12a457 |
int sec = 0;
|
|
|
12a457 |
+ gf_boolean_t down = _gf_false;
|
|
|
12a457 |
|
|
|
12a457 |
ret = dict_get_int32 (dict, "count", &count);
|
|
|
12a457 |
if (ret) {
|
|
|
12a457 |
@@ -1562,6 +1564,7 @@ gf_cli_print_rebalance_status (dict_t *dict, enum gf_task_types task_type)
|
|
|
12a457 |
gf_log ("cli", GF_LOG_TRACE, "failed to get status");
|
|
|
12a457 |
gf_log ("cli", GF_LOG_ERROR, "node down and has failed"
|
|
|
12a457 |
" to set dict");
|
|
|
12a457 |
+ down = _gf_true;
|
|
|
12a457 |
continue;
|
|
|
12a457 |
/* skip this node if value not available*/
|
|
|
12a457 |
} else if (ret) {
|
|
|
12a457 |
@@ -1650,6 +1653,11 @@ gf_cli_print_rebalance_status (dict_t *dict, enum gf_task_types task_type)
|
|
|
12a457 |
}
|
|
|
12a457 |
GF_FREE(size_str);
|
|
|
12a457 |
}
|
|
|
12a457 |
+ if (is_tier && down)
|
|
|
12a457 |
+ cli_out ("WARNING: glusterd might be down on one or more nodes."
|
|
|
12a457 |
+ " Please check the nodes that are down using \'gluster"
|
|
|
12a457 |
+ " peer status\' and start the glusterd on those nodes,"
|
|
|
12a457 |
+ " else tier detach commit might fail!");
|
|
|
12a457 |
out:
|
|
|
12a457 |
return ret;
|
|
|
12a457 |
}
|
|
|
12a457 |
@@ -1667,6 +1675,7 @@ gf_cli_print_tier_status (dict_t *dict, enum gf_task_types task_type)
|
|
|
12a457 |
gf_defrag_status_t status_rcd = GF_DEFRAG_STATUS_NOT_STARTED;
|
|
|
12a457 |
char *status_str = NULL;
|
|
|
12a457 |
char *size_str = NULL;
|
|
|
12a457 |
+ gf_boolean_t down = _gf_false;
|
|
|
12a457 |
|
|
|
12a457 |
ret = dict_get_int32 (dict, "count", &count);
|
|
|
12a457 |
if (ret) {
|
|
|
12a457 |
@@ -1695,6 +1704,7 @@ gf_cli_print_tier_status (dict_t *dict, enum gf_task_types task_type)
|
|
|
12a457 |
"failed to get status", count, i);
|
|
|
12a457 |
gf_log ("cli", GF_LOG_ERROR, "node down and has failed"
|
|
|
12a457 |
" to set dict");
|
|
|
12a457 |
+ down = _gf_true;
|
|
|
12a457 |
continue;
|
|
|
12a457 |
/*skipping this node as value unavailable*/
|
|
|
12a457 |
} else if (ret) {
|
|
|
12a457 |
@@ -1733,8 +1743,11 @@ gf_cli_print_tier_status (dict_t *dict, enum gf_task_types task_type)
|
|
|
12a457 |
status_str = cli_vol_task_status_str[status_rcd];
|
|
|
12a457 |
cli_out ("%-20s %-20"PRIu64" %-20"PRIu64" %-20s",
|
|
|
12a457 |
node_name, promoted, demoted, status_str);
|
|
|
12a457 |
-
|
|
|
12a457 |
}
|
|
|
12a457 |
+ if (down)
|
|
|
12a457 |
+ cli_out ("WARNING: glusterd might be down on one or more nodes."
|
|
|
12a457 |
+ " Please check the nodes that are down using \'gluster"
|
|
|
12a457 |
+ " peer status\' and start the glusterd on those nodes.");
|
|
|
12a457 |
out:
|
|
|
12a457 |
return ret;
|
|
|
12a457 |
}
|
|
|
12a457 |
@@ -1893,9 +1906,14 @@ gf_cli_defrag_volume_cbk (struct rpc_req *req, struct iovec *iov,
|
|
|
12a457 |
|
|
|
12a457 |
if (cmd == GF_DEFRAG_CMD_STATUS_TIER)
|
|
|
12a457 |
ret = gf_cli_print_tier_status (dict, GF_TASK_TYPE_REBALANCE);
|
|
|
12a457 |
+ else if (cmd == GF_DEFRAG_CMD_DETACH_STATUS)
|
|
|
12a457 |
+ ret = gf_cli_print_rebalance_status (dict,
|
|
|
12a457 |
+ GF_TASK_TYPE_REBALANCE,
|
|
|
12a457 |
+ _gf_true);
|
|
|
12a457 |
else
|
|
|
12a457 |
ret = gf_cli_print_rebalance_status (dict,
|
|
|
12a457 |
- GF_TASK_TYPE_REBALANCE);
|
|
|
12a457 |
+ GF_TASK_TYPE_REBALANCE,
|
|
|
12a457 |
+ _gf_false);
|
|
|
12a457 |
|
|
|
12a457 |
if (ret)
|
|
|
12a457 |
gf_log ("cli", GF_LOG_ERROR,
|
|
|
12a457 |
@@ -2491,7 +2509,8 @@ xml_output:
|
|
|
12a457 |
goto out;
|
|
|
12a457 |
}
|
|
|
12a457 |
|
|
|
12a457 |
- ret = gf_cli_print_rebalance_status (dict, GF_TASK_TYPE_REMOVE_BRICK);
|
|
|
12a457 |
+ ret = gf_cli_print_rebalance_status (dict, GF_TASK_TYPE_REMOVE_BRICK,
|
|
|
12a457 |
+ _gf_true);
|
|
|
12a457 |
if (ret) {
|
|
|
12a457 |
gf_log ("cli", GF_LOG_ERROR, "Failed to print remove-brick "
|
|
|
12a457 |
"rebalance status");
|
|
|
12a457 |
@@ -2669,7 +2688,8 @@ xml_output:
|
|
|
12a457 |
goto out;
|
|
|
12a457 |
}
|
|
|
12a457 |
|
|
|
12a457 |
- ret = gf_cli_print_rebalance_status (dict, GF_TASK_TYPE_REMOVE_BRICK);
|
|
|
12a457 |
+ ret = gf_cli_print_rebalance_status (dict, GF_TASK_TYPE_REMOVE_BRICK,
|
|
|
12a457 |
+ _gf_false);
|
|
|
12a457 |
if (ret) {
|
|
|
12a457 |
gf_log ("cli", GF_LOG_ERROR, "Failed to print remove-brick "
|
|
|
12a457 |
"rebalance status");
|
|
|
12a457 |
diff --git a/tests/basic/tier/new-tier-cmds.t b/tests/basic/tier/new-tier-cmds.t
|
|
|
12a457 |
index ce8dbae..ad2c84d 100644
|
|
|
12a457 |
--- a/tests/basic/tier/new-tier-cmds.t
|
|
|
12a457 |
+++ b/tests/basic/tier/new-tier-cmds.t
|
|
|
12a457 |
@@ -19,18 +19,6 @@ function create_dist_tier_vol () {
|
|
|
12a457 |
TEST $CLI_1 volume attach-tier $V0 $H1:$B1/${V0}_h1 $H2:$B2/${V0}_h2 $H3:$B3/${V0}_h3
|
|
|
12a457 |
}
|
|
|
12a457 |
|
|
|
12a457 |
-function tier_detach_commit () {
|
|
|
12a457 |
- $CLI_1 volume tier $V0 detach commit | grep "success" | wc -l
|
|
|
12a457 |
-}
|
|
|
12a457 |
-
|
|
|
12a457 |
-function tier_detach_status_node_down () {
|
|
|
12a457 |
- $CLI_1 volume tier $V0 detach status | wc -l
|
|
|
12a457 |
-}
|
|
|
12a457 |
-
|
|
|
12a457 |
-function tier_status_node_down () {
|
|
|
12a457 |
- $CLI_1 volume tier $V0 status | wc -l
|
|
|
12a457 |
-}
|
|
|
12a457 |
-
|
|
|
12a457 |
cleanup;
|
|
|
12a457 |
|
|
|
12a457 |
#setup cluster and test volume
|
|
|
12a457 |
@@ -58,10 +46,10 @@ TEST $CLI_1 volume tier $V0 detach status
|
|
|
12a457 |
TEST kill_node 2
|
|
|
12a457 |
|
|
|
12a457 |
#check if we have the rest of the node available printed in the output of detach status
|
|
|
12a457 |
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "4" tier_detach_status_node_down
|
|
|
12a457 |
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" tier_detach_status_node_down
|
|
|
12a457 |
|
|
|
12a457 |
#check if we have the rest of the node available printed in the output of tier status
|
|
|
12a457 |
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "5" tier_status_node_down
|
|
|
12a457 |
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" tier_status_node_down
|
|
|
12a457 |
|
|
|
12a457 |
TEST $glusterd_2;
|
|
|
12a457 |
|
|
|
12a457 |
diff --git a/tests/tier.rc b/tests/tier.rc
|
|
|
12a457 |
index ee37e07..69512c3 100644
|
|
|
12a457 |
--- a/tests/tier.rc
|
|
|
12a457 |
+++ b/tests/tier.rc
|
|
|
12a457 |
@@ -134,3 +134,16 @@ function rebalance_run_time () {
|
|
|
12a457 |
|
|
|
12a457 |
echo $total;
|
|
|
12a457 |
}
|
|
|
12a457 |
+
|
|
|
12a457 |
+function tier_detach_commit () {
|
|
|
12a457 |
+ $CLI_1 volume tier $V0 detach commit | grep "success" | wc -l
|
|
|
12a457 |
+}
|
|
|
12a457 |
+
|
|
|
12a457 |
+function tier_detach_status_node_down () {
|
|
|
12a457 |
+ $CLI_1 volume tier $V0 detach status | grep "WARNING" | wc -l
|
|
|
12a457 |
+}
|
|
|
12a457 |
+
|
|
|
12a457 |
+function tier_status_node_down () {
|
|
|
12a457 |
+ $CLI_1 volume tier $V0 status | grep "WARNING" | wc -l
|
|
|
12a457 |
+}
|
|
|
12a457 |
+
|
|
|
12a457 |
--
|
|
|
12a457 |
1.7.1
|
|
|
12a457 |
|