|
|
256ebe |
From d7795a592883cfb01da76b6905a7c9eb1e912bef Mon Sep 17 00:00:00 2001
|
|
|
256ebe |
From: Mohit Agrawal <moagrawa@redhat.com>
|
|
|
256ebe |
Date: Tue, 28 May 2019 08:28:29 +0530
|
|
|
256ebe |
Subject: [PATCH 147/169] glusterd: bulkvoldict thread is not handling all
|
|
|
256ebe |
volumes
|
|
|
256ebe |
|
|
|
256ebe |
Problem: In commit ac70f66c5805e10b3a1072bd467918730c0aeeb4 I
|
|
|
256ebe |
missed one condition to populate volume dictionary in
|
|
|
256ebe |
multiple threads while brick_multiplex is enabled.Due
|
|
|
256ebe |
to that glusterd is not sending volume dictionary for
|
|
|
256ebe |
all volumes to peer.
|
|
|
256ebe |
|
|
|
256ebe |
Solution: Update the condition in code as well as update test case
|
|
|
256ebe |
also to avoid the issue
|
|
|
256ebe |
|
|
|
256ebe |
> Change-Id: I06522dbdfee4f7e995d9cc7b7098fdf35340dc52
|
|
|
256ebe |
> fixes: bz#1711250
|
|
|
256ebe |
> Cherry pick from commit 4a5fb52eb1c5387a0fb8bfa1253e5227c7c255e8
|
|
|
256ebe |
> Reviewed on upstream link https://review.gluster.org/#/c/glusterfs/+/22739/
|
|
|
256ebe |
|
|
|
256ebe |
BUG: 1711249
|
|
|
256ebe |
Change-Id: I06522dbdfee4f7e995d9cc7b7098fdf35340dc52
|
|
|
256ebe |
Signed-off-by: Mohit Agrawal <moagrawa@redhat.com>
|
|
|
256ebe |
Reviewed-on: https://code.engineering.redhat.com/gerrit/171589
|
|
|
256ebe |
Tested-by: RHGS Build Bot <nigelb@redhat.com>
|
|
|
256ebe |
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
|
|
|
256ebe |
---
|
|
|
256ebe |
tests/bugs/glusterd/bug-1699339.t | 16 ++++++++++------
|
|
|
256ebe |
xlators/mgmt/glusterd/src/glusterd-utils.c | 2 +-
|
|
|
256ebe |
2 files changed, 11 insertions(+), 7 deletions(-)
|
|
|
256ebe |
|
|
|
256ebe |
diff --git a/tests/bugs/glusterd/bug-1699339.t b/tests/bugs/glusterd/bug-1699339.t
|
|
|
256ebe |
index 3e950f4..bb8d4f4 100644
|
|
|
256ebe |
--- a/tests/bugs/glusterd/bug-1699339.t
|
|
|
256ebe |
+++ b/tests/bugs/glusterd/bug-1699339.t
|
|
|
256ebe |
@@ -52,18 +52,22 @@ done
|
|
|
256ebe |
|
|
|
256ebe |
TEST kill_glusterd 1
|
|
|
256ebe |
|
|
|
256ebe |
-vol1=$(printf "%s-vol%02d" $V0 1)
|
|
|
256ebe |
+TESTS_EXPECTED_IN_LOOP=4
|
|
|
256ebe |
+for i in `seq 1 3 15`
|
|
|
256ebe |
+do
|
|
|
256ebe |
+vol1=$(printf "%s-vol%02d" $V0 $i)
|
|
|
256ebe |
TEST $CLI_2 volume set $vol1 performance.readdir-ahead on
|
|
|
256ebe |
-vol2=$(printf "%s-vol%02d" $V0 2)
|
|
|
256ebe |
-TEST $CLI_2 volume set $vol2 performance.readdir-ahead on
|
|
|
256ebe |
+done
|
|
|
256ebe |
|
|
|
256ebe |
# Bring back 1st glusterd
|
|
|
256ebe |
TEST $glusterd_1
|
|
|
256ebe |
EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count
|
|
|
256ebe |
|
|
|
256ebe |
+TESTS_EXPECTED_IN_LOOP=4
|
|
|
256ebe |
+for i in `seq 1 3 15`
|
|
|
256ebe |
+do
|
|
|
256ebe |
+vol1=$(printf "%s-vol%02d" $V0 $i)
|
|
|
256ebe |
EXPECT_WITHIN $PROBE_TIMEOUT "on" volinfo_field_1 $vol1 performance.readdir-ahead
|
|
|
256ebe |
-
|
|
|
256ebe |
-vol_name=$(printf "%s-vol%02d" $V0 2)
|
|
|
256ebe |
-EXPECT_WITHIN $PROBE_TIMEOUT "on" volinfo_field_1 $vol2 performance.readdir-ahead
|
|
|
256ebe |
+done
|
|
|
256ebe |
|
|
|
256ebe |
cleanup
|
|
|
256ebe |
diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c
|
|
|
256ebe |
index efa5a86..8f1525e 100644
|
|
|
256ebe |
--- a/xlators/mgmt/glusterd/src/glusterd-utils.c
|
|
|
256ebe |
+++ b/xlators/mgmt/glusterd/src/glusterd-utils.c
|
|
|
256ebe |
@@ -3542,7 +3542,7 @@ glusterd_add_volumes_to_export_dict(dict_t **peer_data)
|
|
|
256ebe |
if ((i + 1) != totthread) {
|
|
|
256ebe |
arg->end = ((i + 1) * vol_per_thread_limit);
|
|
|
256ebe |
} else {
|
|
|
256ebe |
- arg->end = ((i * vol_per_thread_limit) + endindex);
|
|
|
256ebe |
+ arg->end = (((i + 1) * vol_per_thread_limit) + endindex);
|
|
|
256ebe |
}
|
|
|
256ebe |
th_ret = gf_thread_create_detached(
|
|
|
256ebe |
&th_id, glusterd_add_bulk_volumes_create_thread, arg,
|
|
|
256ebe |
--
|
|
|
256ebe |
1.8.3.1
|
|
|
256ebe |
|