9ae3f9
From 6e15fca1621b06270983f57ac146f0f8e52f0797 Mon Sep 17 00:00:00 2001
9ae3f9
From: Mohit Agrawal <moagrawal@redhat.com>
9ae3f9
Date: Tue, 9 Jun 2020 15:38:12 +0530
9ae3f9
Subject: [PATCH 449/449] test: Test case brick-mux-validation-in-cluster.t is
9ae3f9
 failing on RHEL-8
9ae3f9
9ae3f9
Brick process are not properly attached on any cluster node while
9ae3f9
some volume options are changed on peer node and glusterd is down on
9ae3f9
that specific node.
9ae3f9
9ae3f9
Solution: At the time of restart glusterd it got a friend update request
9ae3f9
from a peer node if peer node having some changes on volume.If the brick
9ae3f9
process is started before received a friend update request in that case
9ae3f9
brick_mux behavior is not workingproperly. All bricks are attached to
9ae3f9
the same process even volumes options are not the same. To avoid the
9ae3f9
issue introduce an atomic flag volpeerupdate and update the value while
9ae3f9
glusterd has received a friend update request from peer for a specific
9ae3f9
volume.If volpeerupdate flag is 1 volume is started by
9ae3f9
glusterd_import_friend_volume synctask
9ae3f9
9ae3f9
> Change-Id: I4c026f1e7807ded249153670e6967a2be8d22cb7
9ae3f9
> Credit: Sanju Rakaonde <srakonde@redhat.com>
9ae3f9
> fixes: #1290
9ae3f9
> Signed-off-by: Mohit Agrawal <moagrawal@redhat.com>
9ae3f9
> (Reviewed on upstream link https://review.gluster.org/#/c/glusterfs/+/24540/)
9ae3f9
> (Cherry pick from commit 955bfd567329cf7fe63e9c3b89d333a55e5e9a20)
9ae3f9
9ae3f9
BUG: 1844359
9ae3f9
Change-Id: I4c026f1e7807ded249153670e6967a2be8d22cb7
9ae3f9
Signed-off-by: Mohit Agrawal <moagrawal@redhat.com>
9ae3f9
Reviewed-on: https://code.engineering.redhat.com/gerrit/202812
9ae3f9
Tested-by: Mohit Agrawal <moagrawa@redhat.com>
9ae3f9
Reviewed-by: Sanju Rakonde <srakonde@redhat.com>
9ae3f9
Tested-by: RHGS Build Bot <nigelb@redhat.com>
9ae3f9
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
9ae3f9
---
9ae3f9
 tests/bugs/glusterd/brick-mux-validation-in-cluster.t | 4 +---
9ae3f9
 xlators/mgmt/glusterd/src/glusterd-utils.c            | 7 +++++--
9ae3f9
 xlators/mgmt/glusterd/src/glusterd.h                  | 4 ++++
9ae3f9
 3 files changed, 10 insertions(+), 5 deletions(-)
9ae3f9
9ae3f9
diff --git a/tests/bugs/glusterd/brick-mux-validation-in-cluster.t b/tests/bugs/glusterd/brick-mux-validation-in-cluster.t
9ae3f9
index f088dbb..b6af487 100644
9ae3f9
--- a/tests/bugs/glusterd/brick-mux-validation-in-cluster.t
9ae3f9
+++ b/tests/bugs/glusterd/brick-mux-validation-in-cluster.t
9ae3f9
@@ -100,10 +100,8 @@ $CLI_2 volume set $V0 performance.readdir-ahead on
9ae3f9
 $CLI_2 volume set $V1 performance.readdir-ahead on
9ae3f9
 
9ae3f9
 TEST $glusterd_1;
9ae3f9
+EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count
9ae3f9
 
9ae3f9
-sleep 10
9ae3f9
-
9ae3f9
-EXPECT 4 count_brick_processes
9ae3f9
 EXPECT_WITHIN $PROCESS_UP_TIMEOUT 4 count_brick_pids
9ae3f9
 EXPECT_WITHIN $PROCESS_UP_TIMEOUT 0  count_N/A_brick_pids
9ae3f9
 
9ae3f9
diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c
9ae3f9
index 2eb2a76..6f904ae 100644
9ae3f9
--- a/xlators/mgmt/glusterd/src/glusterd-utils.c
9ae3f9
+++ b/xlators/mgmt/glusterd/src/glusterd-utils.c
9ae3f9
@@ -3758,6 +3758,7 @@ glusterd_compare_friend_volume(dict_t *peer_data, int32_t count,
9ae3f9
                "Version of volume %s differ. local version = %d, "
9ae3f9
                "remote version = %d on peer %s",
9ae3f9
                volinfo->volname, volinfo->version, version, hostname);
9ae3f9
+        GF_ATOMIC_INIT(volinfo->volpeerupdate, 1);
9ae3f9
         *status = GLUSTERD_VOL_COMP_UPDATE_REQ;
9ae3f9
         goto out;
9ae3f9
     } else if (version < volinfo->version) {
9ae3f9
@@ -4784,7 +4785,8 @@ glusterd_volinfo_stop_stale_bricks(glusterd_volinfo_t *new_volinfo,
9ae3f9
          * or if it's part of the new volume and is pending a snap,
9ae3f9
          * then stop the brick process
9ae3f9
          */
9ae3f9
-        if (ret || (new_brickinfo->snap_status == -1)) {
9ae3f9
+        if (ret || (new_brickinfo->snap_status == -1) ||
9ae3f9
+            GF_ATOMIC_GET(old_volinfo->volpeerupdate)) {
9ae3f9
             /*TODO: may need to switch to 'atomic' flavour of
9ae3f9
              * brick_stop, once we make peer rpc program also
9ae3f9
              * synctask enabled*/
9ae3f9
@@ -6490,7 +6492,8 @@ glusterd_brick_start(glusterd_volinfo_t *volinfo,
9ae3f9
      * three different triggers for an attempt to start the brick process
9ae3f9
      * due to the quorum handling code in glusterd_friend_sm.
9ae3f9
      */
9ae3f9
-    if (brickinfo->status == GF_BRICK_STARTING || brickinfo->start_triggered) {
9ae3f9
+    if (brickinfo->status == GF_BRICK_STARTING || brickinfo->start_triggered ||
9ae3f9
+        GF_ATOMIC_GET(volinfo->volpeerupdate)) {
9ae3f9
         gf_msg_debug(this->name, 0,
9ae3f9
                      "brick %s is already in starting "
9ae3f9
                      "phase",
9ae3f9
diff --git a/xlators/mgmt/glusterd/src/glusterd.h b/xlators/mgmt/glusterd/src/glusterd.h
9ae3f9
index 1c6c3b1..f739b5d 100644
9ae3f9
--- a/xlators/mgmt/glusterd/src/glusterd.h
9ae3f9
+++ b/xlators/mgmt/glusterd/src/glusterd.h
9ae3f9
@@ -523,6 +523,10 @@ struct glusterd_volinfo_ {
9ae3f9
     pthread_mutex_t store_volinfo_lock; /* acquire lock for
9ae3f9
                                          * updating the volinfo
9ae3f9
                                          */
9ae3f9
+    gf_atomic_t volpeerupdate;
9ae3f9
+    /* Flag to check about volume has received updates
9ae3f9
+       from peer
9ae3f9
+    */
9ae3f9
 };
9ae3f9
 
9ae3f9
 typedef enum gd_snap_status_ {
9ae3f9
-- 
9ae3f9
1.8.3.1
9ae3f9