d84cf8
From 6e15fca1621b06270983f57ac146f0f8e52f0797 Mon Sep 17 00:00:00 2001
d84cf8
From: Mohit Agrawal <moagrawal@redhat.com>
d84cf8
Date: Tue, 9 Jun 2020 15:38:12 +0530
d84cf8
Subject: [PATCH 449/449] test: Test case brick-mux-validation-in-cluster.t is
d84cf8
 failing on RHEL-8
d84cf8
d84cf8
Brick process are not properly attached on any cluster node while
d84cf8
some volume options are changed on peer node and glusterd is down on
d84cf8
that specific node.
d84cf8
d84cf8
Solution: At the time of restart glusterd it got a friend update request
d84cf8
from a peer node if peer node having some changes on volume.If the brick
d84cf8
process is started before received a friend update request in that case
d84cf8
brick_mux behavior is not workingproperly. All bricks are attached to
d84cf8
the same process even volumes options are not the same. To avoid the
d84cf8
issue introduce an atomic flag volpeerupdate and update the value while
d84cf8
glusterd has received a friend update request from peer for a specific
d84cf8
volume.If volpeerupdate flag is 1 volume is started by
d84cf8
glusterd_import_friend_volume synctask
d84cf8
d84cf8
> Change-Id: I4c026f1e7807ded249153670e6967a2be8d22cb7
d84cf8
> Credit: Sanju Rakaonde <srakonde@redhat.com>
d84cf8
> fixes: #1290
d84cf8
> Signed-off-by: Mohit Agrawal <moagrawal@redhat.com>
d84cf8
> (Reviewed on upstream link https://review.gluster.org/#/c/glusterfs/+/24540/)
d84cf8
> (Cherry pick from commit 955bfd567329cf7fe63e9c3b89d333a55e5e9a20)
d84cf8
d84cf8
BUG: 1844359
d84cf8
Change-Id: I4c026f1e7807ded249153670e6967a2be8d22cb7
d84cf8
Signed-off-by: Mohit Agrawal <moagrawal@redhat.com>
d84cf8
Reviewed-on: https://code.engineering.redhat.com/gerrit/202812
d84cf8
Tested-by: Mohit Agrawal <moagrawa@redhat.com>
d84cf8
Reviewed-by: Sanju Rakonde <srakonde@redhat.com>
d84cf8
Tested-by: RHGS Build Bot <nigelb@redhat.com>
d84cf8
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
d84cf8
---
d84cf8
 tests/bugs/glusterd/brick-mux-validation-in-cluster.t | 4 +---
d84cf8
 xlators/mgmt/glusterd/src/glusterd-utils.c            | 7 +++++--
d84cf8
 xlators/mgmt/glusterd/src/glusterd.h                  | 4 ++++
d84cf8
 3 files changed, 10 insertions(+), 5 deletions(-)
d84cf8
d84cf8
diff --git a/tests/bugs/glusterd/brick-mux-validation-in-cluster.t b/tests/bugs/glusterd/brick-mux-validation-in-cluster.t
d84cf8
index f088dbb..b6af487 100644
d84cf8
--- a/tests/bugs/glusterd/brick-mux-validation-in-cluster.t
d84cf8
+++ b/tests/bugs/glusterd/brick-mux-validation-in-cluster.t
d84cf8
@@ -100,10 +100,8 @@ $CLI_2 volume set $V0 performance.readdir-ahead on
d84cf8
 $CLI_2 volume set $V1 performance.readdir-ahead on
d84cf8
 
d84cf8
 TEST $glusterd_1;
d84cf8
+EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count
d84cf8
 
d84cf8
-sleep 10
d84cf8
-
d84cf8
-EXPECT 4 count_brick_processes
d84cf8
 EXPECT_WITHIN $PROCESS_UP_TIMEOUT 4 count_brick_pids
d84cf8
 EXPECT_WITHIN $PROCESS_UP_TIMEOUT 0  count_N/A_brick_pids
d84cf8
 
d84cf8
diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c
d84cf8
index 2eb2a76..6f904ae 100644
d84cf8
--- a/xlators/mgmt/glusterd/src/glusterd-utils.c
d84cf8
+++ b/xlators/mgmt/glusterd/src/glusterd-utils.c
d84cf8
@@ -3758,6 +3758,7 @@ glusterd_compare_friend_volume(dict_t *peer_data, int32_t count,
d84cf8
                "Version of volume %s differ. local version = %d, "
d84cf8
                "remote version = %d on peer %s",
d84cf8
                volinfo->volname, volinfo->version, version, hostname);
d84cf8
+        GF_ATOMIC_INIT(volinfo->volpeerupdate, 1);
d84cf8
         *status = GLUSTERD_VOL_COMP_UPDATE_REQ;
d84cf8
         goto out;
d84cf8
     } else if (version < volinfo->version) {
d84cf8
@@ -4784,7 +4785,8 @@ glusterd_volinfo_stop_stale_bricks(glusterd_volinfo_t *new_volinfo,
d84cf8
          * or if it's part of the new volume and is pending a snap,
d84cf8
          * then stop the brick process
d84cf8
          */
d84cf8
-        if (ret || (new_brickinfo->snap_status == -1)) {
d84cf8
+        if (ret || (new_brickinfo->snap_status == -1) ||
d84cf8
+            GF_ATOMIC_GET(old_volinfo->volpeerupdate)) {
d84cf8
             /*TODO: may need to switch to 'atomic' flavour of
d84cf8
              * brick_stop, once we make peer rpc program also
d84cf8
              * synctask enabled*/
d84cf8
@@ -6490,7 +6492,8 @@ glusterd_brick_start(glusterd_volinfo_t *volinfo,
d84cf8
      * three different triggers for an attempt to start the brick process
d84cf8
      * due to the quorum handling code in glusterd_friend_sm.
d84cf8
      */
d84cf8
-    if (brickinfo->status == GF_BRICK_STARTING || brickinfo->start_triggered) {
d84cf8
+    if (brickinfo->status == GF_BRICK_STARTING || brickinfo->start_triggered ||
d84cf8
+        GF_ATOMIC_GET(volinfo->volpeerupdate)) {
d84cf8
         gf_msg_debug(this->name, 0,
d84cf8
                      "brick %s is already in starting "
d84cf8
                      "phase",
d84cf8
diff --git a/xlators/mgmt/glusterd/src/glusterd.h b/xlators/mgmt/glusterd/src/glusterd.h
d84cf8
index 1c6c3b1..f739b5d 100644
d84cf8
--- a/xlators/mgmt/glusterd/src/glusterd.h
d84cf8
+++ b/xlators/mgmt/glusterd/src/glusterd.h
d84cf8
@@ -523,6 +523,10 @@ struct glusterd_volinfo_ {
d84cf8
     pthread_mutex_t store_volinfo_lock; /* acquire lock for
d84cf8
                                          * updating the volinfo
d84cf8
                                          */
d84cf8
+    gf_atomic_t volpeerupdate;
d84cf8
+    /* Flag to check about volume has received updates
d84cf8
+       from peer
d84cf8
+    */
d84cf8
 };
d84cf8
 
d84cf8
 typedef enum gd_snap_status_ {
d84cf8
-- 
d84cf8
1.8.3.1
d84cf8