887953
From da38c139d41c839244cd5acc0464ddf06fa51c78 Mon Sep 17 00:00:00 2001
887953
From: Sanju Rakonde <srakonde@redhat.com>
887953
Date: Wed, 3 Oct 2018 23:58:37 +0530
887953
Subject: [PATCH 411/444] glusterd: ensure volinfo->caps is set to correct
887953
 value
887953
887953
With the commit febf5ed4848, during the volume create op,
887953
we are setting volinfo->caps to 0, only if any of the bricks
887953
belong to the same node and brickinfo->vg[0] is null.
887953
Previously, we used to set volinfo->caps to 0, when
887953
either brick doesn't belong to the same node or brickinfo->vg[0]
887953
is null.
887953
887953
With this patch, we set volinfo->caps to 0, when either brick
887953
doesn't belong to the same node or brickinfo->vg[0] is null.
887953
(as we do earlier without commit febf5ed4848).
887953
887953
> fixes: bz#1635820
887953
> Change-Id: I00a97415786b775fb088ac45566ad52b402f1a49
887953
> Signed-off-by: Sanju Rakonde <srakonde@redhat.com>
887953
887953
upstream patch: https://review.gluster.org/#/c/glusterfs/+/21336/
887953
887953
Change-Id: I00a97415786b775fb088ac45566ad52b402f1a49
887953
BUG: 1635136
887953
Signed-off-by: Sanju Rakonde <srakonde@redhat.com>
887953
Reviewed-on: https://code.engineering.redhat.com/gerrit/154909
887953
Tested-by: RHGS Build Bot <nigelb@redhat.com>
887953
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
887953
---
887953
 .../bug-1636957-peer-reject-on-glusterd-reboot.t   | 29 ++++++++++++++++++++++
887953
 xlators/mgmt/glusterd/src/glusterd-volume-ops.c    |  2 ++
887953
 2 files changed, 31 insertions(+)
887953
 create mode 100644 tests/bugs/glusterd/bug-1636957-peer-reject-on-glusterd-reboot.t
887953
887953
diff --git a/tests/bugs/glusterd/bug-1636957-peer-reject-on-glusterd-reboot.t b/tests/bugs/glusterd/bug-1636957-peer-reject-on-glusterd-reboot.t
887953
new file mode 100644
887953
index 0000000..b462b38
887953
--- /dev/null
887953
+++ b/tests/bugs/glusterd/bug-1636957-peer-reject-on-glusterd-reboot.t
887953
@@ -0,0 +1,29 @@
887953
+#!/bin/bash
887953
+
887953
+. $(dirname $0)/../../include.rc
887953
+. $(dirname $0)/../../cluster.rc
887953
+. $(dirname $0)/../../volume.rc
887953
+
887953
+function peer_count {
887953
+eval \$CLI_$1 peer status | grep 'Peer in Cluster (Connected)' | wc -l
887953
+}
887953
+
887953
+cleanup
887953
+
887953
+TEST launch_cluster 2
887953
+
887953
+TEST $CLI_1 peer probe $H2;
887953
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count 1
887953
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count 2
887953
+
887953
+TEST $CLI_1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0
887953
+
887953
+# rebooting a node which doesn't host bricks for any one volume
887953
+# peer should not go into rejected state
887953
+TEST kill_glusterd 2
887953
+TEST start_glusterd 2
887953
+
887953
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count 1
887953
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count 2
887953
+
887953
+cleanup
887953
diff --git a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c
887953
index 36d9bff..87b7acc 100644
887953
--- a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c
887953
+++ b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c
887953
@@ -2485,6 +2485,8 @@ glusterd_op_create_volume (dict_t *dict, char **op_errstr)
887953
                                 caps = 0;
887953
                         }
887953
 #endif
887953
+                } else {
887953
+                        caps = 0;
887953
                 }
887953
 
887953
                 cds_list_add_tail (&brickinfo->brick_list, &volinfo->bricks);
887953
-- 
887953
1.8.3.1
887953