|
|
1df6c8 |
From 852c475040a599ed35798dbb388c6b59c1d0a820 Mon Sep 17 00:00:00 2001
|
|
|
1df6c8 |
From: Sanju Rakonde <srakonde@redhat.com>
|
|
|
1df6c8 |
Date: Tue, 22 Oct 2019 15:06:29 +0530
|
|
|
1df6c8 |
Subject: [PATCH 323/335] cli: display detailed rebalance info
|
|
|
1df6c8 |
|
|
|
1df6c8 |
Problem: When one of the node is down in cluster,
|
|
|
1df6c8 |
rebalance status is not displaying detailed
|
|
|
1df6c8 |
information.
|
|
|
1df6c8 |
|
|
|
1df6c8 |
Cause: In glusterd_volume_rebalance_use_rsp_dict()
|
|
|
1df6c8 |
we are aggregating rsp from all the nodes into a
|
|
|
1df6c8 |
dictionary and sending it to cli for printing. While
|
|
|
1df6c8 |
assigning a index to keys we are considering all the
|
|
|
1df6c8 |
peers instead of considering only the peers which are
|
|
|
1df6c8 |
up. Because of which, index is not reaching till 1.
|
|
|
1df6c8 |
while parsing the rsp cli unable to find status-1
|
|
|
1df6c8 |
key in dictionary and going out without printing
|
|
|
1df6c8 |
any information.
|
|
|
1df6c8 |
|
|
|
1df6c8 |
Solution: The simplest fix for this without much
|
|
|
1df6c8 |
code change is to continue to look for other keys
|
|
|
1df6c8 |
when status-1 key is not found.
|
|
|
1df6c8 |
|
|
|
1df6c8 |
> upstream patch: https://review.gluster.org/#/c/glusterfs/+/23588
|
|
|
1df6c8 |
> fixes: bz#1764119
|
|
|
1df6c8 |
> Change-Id: I0062839933c9706119eb85416256eade97e976dc
|
|
|
1df6c8 |
> Signed-off-by: Sanju Rakonde <srakonde@redhat.com>
|
|
|
1df6c8 |
|
|
|
1df6c8 |
BUG: 1761326
|
|
|
1df6c8 |
Change-Id: I0062839933c9706119eb85416256eade97e976dc
|
|
|
1df6c8 |
Signed-off-by: Sanju Rakonde <srakonde@redhat.com>
|
|
|
1df6c8 |
Reviewed-on: https://code.engineering.redhat.com/gerrit/185749
|
|
|
1df6c8 |
Tested-by: RHGS Build Bot <nigelb@redhat.com>
|
|
|
1df6c8 |
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
|
|
|
1df6c8 |
---
|
|
|
1df6c8 |
cli/src/cli-rpc-ops.c | 21 ++++++++++++++-------
|
|
|
1df6c8 |
tests/bugs/glusterd/rebalance-in-cluster.t | 9 +++++++++
|
|
|
1df6c8 |
2 files changed, 23 insertions(+), 7 deletions(-)
|
|
|
1df6c8 |
|
|
|
1df6c8 |
diff --git a/cli/src/cli-rpc-ops.c b/cli/src/cli-rpc-ops.c
|
|
|
1df6c8 |
index b167e26..4e91265 100644
|
|
|
1df6c8 |
--- a/cli/src/cli-rpc-ops.c
|
|
|
1df6c8 |
+++ b/cli/src/cli-rpc-ops.c
|
|
|
1df6c8 |
@@ -1597,13 +1597,20 @@ gf_cli_print_rebalance_status(dict_t *dict, enum gf_task_types task_type,
|
|
|
1df6c8 |
goto out;
|
|
|
1df6c8 |
}
|
|
|
1df6c8 |
|
|
|
1df6c8 |
- snprintf(key, sizeof(key), "status-1");
|
|
|
1df6c8 |
-
|
|
|
1df6c8 |
- ret = dict_get_int32(dict, key, (int32_t *)&status_rcd);
|
|
|
1df6c8 |
- if (ret) {
|
|
|
1df6c8 |
- gf_log("cli", GF_LOG_TRACE, "count %d %d", count, 1);
|
|
|
1df6c8 |
- gf_log("cli", GF_LOG_TRACE, "failed to get status");
|
|
|
1df6c8 |
- goto out;
|
|
|
1df6c8 |
+ for (i = 1; i <= count; i++) {
|
|
|
1df6c8 |
+ snprintf(key, sizeof(key), "status-%d", i);
|
|
|
1df6c8 |
+ ret = dict_get_int32(dict, key, (int32_t *)&status_rcd);
|
|
|
1df6c8 |
+ /* If information from a node is missing we should skip
|
|
|
1df6c8 |
+ * the node and try to fetch information of other nodes.
|
|
|
1df6c8 |
+ * If information is not found for all nodes, we should
|
|
|
1df6c8 |
+ * error out.
|
|
|
1df6c8 |
+ */
|
|
|
1df6c8 |
+ if (!ret)
|
|
|
1df6c8 |
+ break;
|
|
|
1df6c8 |
+ if (ret && i == count) {
|
|
|
1df6c8 |
+ gf_log("cli", GF_LOG_TRACE, "failed to get status");
|
|
|
1df6c8 |
+ goto out;
|
|
|
1df6c8 |
+ }
|
|
|
1df6c8 |
}
|
|
|
1df6c8 |
|
|
|
1df6c8 |
/* Fix layout will be sent to all nodes for the volume
|
|
|
1df6c8 |
diff --git a/tests/bugs/glusterd/rebalance-in-cluster.t b/tests/bugs/glusterd/rebalance-in-cluster.t
|
|
|
1df6c8 |
index 9565fae..469ec6c 100644
|
|
|
1df6c8 |
--- a/tests/bugs/glusterd/rebalance-in-cluster.t
|
|
|
1df6c8 |
+++ b/tests/bugs/glusterd/rebalance-in-cluster.t
|
|
|
1df6c8 |
@@ -4,6 +4,10 @@
|
|
|
1df6c8 |
. $(dirname $0)/../../cluster.rc
|
|
|
1df6c8 |
. $(dirname $0)/../../volume.rc
|
|
|
1df6c8 |
|
|
|
1df6c8 |
+function rebalance_status_field_1 {
|
|
|
1df6c8 |
+ $CLI_1 volume rebalance $1 status | awk '{print $7}' | sed -n 3p
|
|
|
1df6c8 |
+}
|
|
|
1df6c8 |
+
|
|
|
1df6c8 |
cleanup;
|
|
|
1df6c8 |
TEST launch_cluster 2;
|
|
|
1df6c8 |
TEST $CLI_1 peer probe $H2;
|
|
|
1df6c8 |
@@ -29,6 +33,11 @@ TEST $CLI_1 volume add-brick $V0 $H1:$B1/${V0}1 $H2:$B2/${V0}1
|
|
|
1df6c8 |
TEST $CLI_1 volume rebalance $V0 start
|
|
|
1df6c8 |
EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" cluster_rebalance_status_field 1 $V0
|
|
|
1df6c8 |
|
|
|
1df6c8 |
+#bug - 1764119 - rebalance status should display detailed info when any of the node is dowm
|
|
|
1df6c8 |
+TEST kill_glusterd 2
|
|
|
1df6c8 |
+EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" rebalance_status_field_1 $V0
|
|
|
1df6c8 |
+
|
|
|
1df6c8 |
+TEST start_glusterd 2
|
|
|
1df6c8 |
#bug-1245142
|
|
|
1df6c8 |
|
|
|
1df6c8 |
$CLI_1 volume rebalance $V0 start &
|
|
|
1df6c8 |
--
|
|
|
1df6c8 |
1.8.3.1
|
|
|
1df6c8 |
|