From da38c139d41c839244cd5acc0464ddf06fa51c78 Mon Sep 17 00:00:00 2001
From: Sanju Rakonde <srakonde@redhat.com>
Date: Wed, 3 Oct 2018 23:58:37 +0530
Subject: [PATCH 411/444] glusterd: ensure volinfo->caps is set to correct
value
With the commit febf5ed4848, during the volume create op,
we are setting volinfo->caps to 0, only if any of the bricks
belong to the same node and brickinfo->vg[0] is null.
Previously, we used to set volinfo->caps to 0, when
either brick doesn't belong to the same node or brickinfo->vg[0]
is null.
With this patch, we set volinfo->caps to 0, when either brick
doesn't belong to the same node or brickinfo->vg[0] is null.
(as we do earlier without commit febf5ed4848).
> fixes: bz#1635820
> Change-Id: I00a97415786b775fb088ac45566ad52b402f1a49
> Signed-off-by: Sanju Rakonde <srakonde@redhat.com>
upstream patch: https://review.gluster.org/#/c/glusterfs/+/21336/
Change-Id: I00a97415786b775fb088ac45566ad52b402f1a49
BUG: 1635136
Signed-off-by: Sanju Rakonde <srakonde@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/154909
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
.../bug-1636957-peer-reject-on-glusterd-reboot.t | 29 ++++++++++++++++++++++
xlators/mgmt/glusterd/src/glusterd-volume-ops.c | 2 ++
2 files changed, 31 insertions(+)
create mode 100644 tests/bugs/glusterd/bug-1636957-peer-reject-on-glusterd-reboot.t
diff --git a/tests/bugs/glusterd/bug-1636957-peer-reject-on-glusterd-reboot.t b/tests/bugs/glusterd/bug-1636957-peer-reject-on-glusterd-reboot.t
new file mode 100644
index 0000000..b462b38
--- /dev/null
+++ b/tests/bugs/glusterd/bug-1636957-peer-reject-on-glusterd-reboot.t
@@ -0,0 +1,29 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../cluster.rc
+. $(dirname $0)/../../volume.rc
+
+function peer_count {
+eval \$CLI_$1 peer status | grep 'Peer in Cluster (Connected)' | wc -l
+}
+
+cleanup
+
+TEST launch_cluster 2
+
+TEST $CLI_1 peer probe $H2;
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count 1
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count 2
+
+TEST $CLI_1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0
+
+# rebooting a node which doesn't host bricks for any one volume
+# peer should not go into rejected state
+TEST kill_glusterd 2
+TEST start_glusterd 2
+
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count 1
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count 2
+
+cleanup
diff --git a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c
index 36d9bff..87b7acc 100644
--- a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c
+++ b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c
@@ -2485,6 +2485,8 @@ glusterd_op_create_volume (dict_t *dict, char **op_errstr)
caps = 0;
}
#endif
+ } else {
+ caps = 0;
}
cds_list_add_tail (&brickinfo->brick_list, &volinfo->bricks);
--
1.8.3.1