74b1de
From 646292b4f73bf1b506d034b85787f794963d7196 Mon Sep 17 00:00:00 2001
74b1de
From: Mohammed Rafi KC <rkavunga@redhat.com>
74b1de
Date: Mon, 6 May 2019 23:35:08 +0530
74b1de
Subject: [PATCH 137/141] shd/glusterd: Serialize shd manager to prevent race
74b1de
 condition
74b1de
74b1de
At the time of a glusterd restart, while doing a handshake
74b1de
there is a possibility that multiple shd manager might get
74b1de
executed. Because of this, there is a chance that multiple
74b1de
shd get spawned during a glusterd restart
74b1de
74b1de
> upstream patch : https://review.gluster.org/#/c/glusterfs/+/22667/
74b1de
74b1de
>Change-Id: Ie20798441e07d7d7a93b7d38dfb924cea178a920
74b1de
>fixes: bz#1707081
74b1de
>Signed-off-by: Mohammed Rafi KC <rkavunga@redhat.com>
74b1de
74b1de
BUG: 1704851
74b1de
Change-Id: Ie20798441e07d7d7a93b7d38dfb924cea178a920
74b1de
Signed-off-by: Mohammed Rafi KC <rkavunga@redhat.com>
74b1de
Reviewed-on: https://code.engineering.redhat.com/gerrit/169947
74b1de
Tested-by: RHGS Build Bot <nigelb@redhat.com>
74b1de
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
74b1de
---
74b1de
 .../serialize-shd-manager-glusterd-restart.t       | 54 ++++++++++++++++++++++
74b1de
 xlators/mgmt/glusterd/src/glusterd-shd-svc.c       | 14 ++++++
74b1de
 xlators/mgmt/glusterd/src/glusterd.c               |  1 +
74b1de
 xlators/mgmt/glusterd/src/glusterd.h               |  3 ++
74b1de
 4 files changed, 72 insertions(+)
74b1de
 create mode 100644 tests/bugs/glusterd/serialize-shd-manager-glusterd-restart.t
74b1de
74b1de
diff --git a/tests/bugs/glusterd/serialize-shd-manager-glusterd-restart.t b/tests/bugs/glusterd/serialize-shd-manager-glusterd-restart.t
74b1de
new file mode 100644
74b1de
index 0000000..3a27c2a
74b1de
--- /dev/null
74b1de
+++ b/tests/bugs/glusterd/serialize-shd-manager-glusterd-restart.t
74b1de
@@ -0,0 +1,54 @@
74b1de
+#! /bin/bash
74b1de
+
74b1de
+. $(dirname $0)/../../include.rc
74b1de
+. $(dirname $0)/../../cluster.rc
74b1de
+
74b1de
+function check_peers {
74b1de
+count=`$CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l`
74b1de
+echo $count
74b1de
+}
74b1de
+
74b1de
+function check_shd {
74b1de
+ps aux | grep $1 | grep glustershd | wc -l
74b1de
+}
74b1de
+
74b1de
+cleanup
74b1de
+
74b1de
+
74b1de
+TEST launch_cluster 6
74b1de
+
74b1de
+TESTS_EXPECTED_IN_LOOP=25
74b1de
+for i in $(seq 2 6); do
74b1de
+    hostname="H$i"
74b1de
+    TEST $CLI_1 peer probe ${!hostname}
74b1de
+done
74b1de
+
74b1de
+
74b1de
+EXPECT_WITHIN $PROBE_TIMEOUT 5 check_peers;
74b1de
+for i in $(seq 1 5); do
74b1de
+
74b1de
+    TEST $CLI_1 volume create ${V0}_$i replica 3 $H1:$B1/${V0}_$i $H2:$B2/${V0}_$i $H3:$B3/${V0}_$i $H4:$B4/${V0}_$i $H5:$B5/${V0}_$i $H6:$B6/${V0}_$i
74b1de
+    TEST $CLI_1 volume start ${V0}_$i force
74b1de
+
74b1de
+done
74b1de
+
74b1de
+#kill a node
74b1de
+TEST kill_node 3
74b1de
+
74b1de
+TEST $glusterd_3;
74b1de
+EXPECT_WITHIN $PROBE_TIMEOUT 5 check_peers
74b1de
+
74b1de
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 1 check_shd $H3
74b1de
+
74b1de
+for i in $(seq 1 5); do
74b1de
+
74b1de
+    TEST $CLI_1 volume stop ${V0}_$i
74b1de
+    TEST $CLI_1 volume delete ${V0}_$i
74b1de
+
74b1de
+done
74b1de
+
74b1de
+for i in $(seq 1 6); do
74b1de
+    hostname="H$i"
74b1de
+    EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT 0 check_shd ${!hostname}
74b1de
+done
74b1de
+cleanup
74b1de
diff --git a/xlators/mgmt/glusterd/src/glusterd-shd-svc.c b/xlators/mgmt/glusterd/src/glusterd-shd-svc.c
74b1de
index a9eab42..75f9a07 100644
74b1de
--- a/xlators/mgmt/glusterd/src/glusterd-shd-svc.c
74b1de
+++ b/xlators/mgmt/glusterd/src/glusterd-shd-svc.c
74b1de
@@ -254,14 +254,26 @@ glusterd_shdsvc_manager(glusterd_svc_t *svc, void *data, int flags)
74b1de
 {
74b1de
     int ret = -1;
74b1de
     glusterd_volinfo_t *volinfo = NULL;
74b1de
+    glusterd_conf_t *conf = NULL;
74b1de
+    gf_boolean_t shd_restart = _gf_false;
74b1de
 
74b1de
+    conf = THIS->private;
74b1de
     volinfo = data;
74b1de
+    GF_VALIDATE_OR_GOTO("glusterd", conf, out);
74b1de
     GF_VALIDATE_OR_GOTO("glusterd", svc, out);
74b1de
     GF_VALIDATE_OR_GOTO("glusterd", volinfo, out);
74b1de
 
74b1de
     if (volinfo)
74b1de
         glusterd_volinfo_ref(volinfo);
74b1de
 
74b1de
+    while (conf->restart_shd) {
74b1de
+        synclock_unlock(&conf->big_lock);
74b1de
+        sleep(2);
74b1de
+        synclock_lock(&conf->big_lock);
74b1de
+    }
74b1de
+    conf->restart_shd = _gf_true;
74b1de
+    shd_restart = _gf_true;
74b1de
+
74b1de
     ret = glusterd_shdsvc_create_volfile(volinfo);
74b1de
     if (ret)
74b1de
         goto out;
74b1de
@@ -310,6 +322,8 @@ glusterd_shdsvc_manager(glusterd_svc_t *svc, void *data, int flags)
74b1de
         }
74b1de
     }
74b1de
 out:
74b1de
+    if (shd_restart)
74b1de
+        conf->restart_shd = _gf_false;
74b1de
     if (volinfo)
74b1de
         glusterd_volinfo_unref(volinfo);
74b1de
     if (ret)
74b1de
diff --git a/xlators/mgmt/glusterd/src/glusterd.c b/xlators/mgmt/glusterd/src/glusterd.c
74b1de
index c0973cb..6d7dd4a 100644
74b1de
--- a/xlators/mgmt/glusterd/src/glusterd.c
74b1de
+++ b/xlators/mgmt/glusterd/src/glusterd.c
74b1de
@@ -1819,6 +1819,7 @@ init(xlator_t *this)
74b1de
     conf->rpc = rpc;
74b1de
     conf->uds_rpc = uds_rpc;
74b1de
     conf->gfs_mgmt = &gd_brick_prog;
74b1de
+    conf->restart_shd = _gf_false;
74b1de
     this->private = conf;
74b1de
     /* conf->workdir and conf->rundir are smaller than PATH_MAX; gcc's
74b1de
      * snprintf checking will throw an error here if sprintf is used.
74b1de
diff --git a/xlators/mgmt/glusterd/src/glusterd.h b/xlators/mgmt/glusterd/src/glusterd.h
74b1de
index bd9f509..2ea8560 100644
74b1de
--- a/xlators/mgmt/glusterd/src/glusterd.h
74b1de
+++ b/xlators/mgmt/glusterd/src/glusterd.h
74b1de
@@ -222,6 +222,9 @@ typedef struct {
74b1de
     gf_atomic_t blockers;
74b1de
     uint32_t mgmt_v3_lock_timeout;
74b1de
     gf_boolean_t restart_bricks;
74b1de
+    gf_boolean_t restart_shd;    /* This flag prevents running two shd manager
74b1de
+                                    simultaneously
74b1de
+                                 */
74b1de
     pthread_mutex_t attach_lock; /* Lock can be per process or a common one */
74b1de
     pthread_mutex_t volume_lock; /* We release the big_lock from lot of places
74b1de
                                     which might lead the modification of volinfo
74b1de
-- 
74b1de
1.8.3.1
74b1de