Blob Blame History Raw
From 8d7cb72921b983fbe2ab0a2bf166d5c57036e402 Mon Sep 17 00:00:00 2001
From: Gaurav Kumar Garg <garg.gaurav52@gmail.com>
Date: Wed, 12 Aug 2015 14:27:58 +0530
Subject: [PATCH 247/279] glusterd: Pass NULL in glusterd_svc_manager in glusterd_restart_bricks

On restarting glusterd quota daemon is not started when  more than one
volumes are configured and quota is enabled only on 2nd volume.
This is because of while restarting glusterd it will restart all the bricks.
During brick restart it will start respective daemon by passing volinfo of
first volume. Passing volinfo to glusterd_svc_manager will imply daemon
managers will take action based on the same volume's configuration which
is incorrect for per node daemons.

Fix is to pass volinfo NULL while restarting bricks.

Change-Id: I69f0df80dc56e1d2e2598ab143bf487743042d5d
BUG: 1238071
Signed-off-by: Gaurav Kumar Garg <ggarg@redhat.com>
Backport of: http://review.gluster.org/#/c/11658/
Reviewed-on: https://code.engineering.redhat.com/gerrit/54970
Reviewed-by: Atin Mukherjee <amukherj@redhat.com>
Tested-by: Atin Mukherjee <amukherj@redhat.com>
---
 .../bug-1242875-do-not-pass-volinfo-quota.t        |   38 ++++++++++++++++++++
 tests/volume.rc                                    |    4 ++
 xlators/mgmt/glusterd/src/glusterd-utils.c         |    2 +-
 3 files changed, 43 insertions(+), 1 deletions(-)
 create mode 100644 tests/bugs/glusterd/bug-1242875-do-not-pass-volinfo-quota.t

diff --git a/tests/bugs/glusterd/bug-1242875-do-not-pass-volinfo-quota.t b/tests/bugs/glusterd/bug-1242875-do-not-pass-volinfo-quota.t
new file mode 100644
index 0000000..c229d43
--- /dev/null
+++ b/tests/bugs/glusterd/bug-1242875-do-not-pass-volinfo-quota.t
@@ -0,0 +1,38 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../cluster.rc
+
+cleanup;
+
+## Start glusterd
+TEST glusterd;
+TEST pidof glusterd;
+
+## Lets create volume V0 and start the volume
+TEST $CLI volume create $V0 $H0:$B0/${V0}0 $H0:$B0/${V0}1
+TEST $CLI volume start $V0
+
+## Lets create volume V1 and start the volume
+TEST $CLI volume create $V1 $H0:$B0/${V0}2 $H0:$B0/${V0}3
+TEST $CLI volume start $V1
+
+## Enable quota on 2nd volume
+TEST $CLI volume quota $V1 enable
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" get_quotad_count
+
+## Killing all gluster process
+pkill gluster;
+
+## there should not be any quota daemon running after killing quota process
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" get_quotad_count
+
+## Start glusterd
+TEST glusterd;
+TEST pidof glusterd;
+
+## Quotad daemon should start on restarting the glusterd
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" get_quotad_count
+
+cleanup;
diff --git a/tests/volume.rc b/tests/volume.rc
index 09a8d51..570d87d 100644
--- a/tests/volume.rc
+++ b/tests/volume.rc
@@ -543,6 +543,10 @@ function get_scrubd_count {
         ps auxww | grep glusterfs | grep scrub.pid | grep -v grep | wc -l
 }
 
+function get_quotad_count {
+        ps auxww | grep glusterfs | grep quotad.pid | grep -v grep | wc -l
+}
+
 function quota_list_field () {
         local QUOTA_PATH=$1
         local FIELD=$2
diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c
index 7781773..5b9d5e3 100644
--- a/xlators/mgmt/glusterd/src/glusterd-utils.c
+++ b/xlators/mgmt/glusterd/src/glusterd-utils.c
@@ -4488,7 +4488,7 @@ glusterd_restart_bricks (glusterd_conf_t *conf)
                         continue;
                 if (start_svcs == _gf_false) {
                         start_svcs = _gf_true;
-                        glusterd_svcs_manager (volinfo);
+                        glusterd_svcs_manager (NULL);
                 }
                 gf_msg_debug (this->name, 0, "starting the volume %s",
                         volinfo->volname);
-- 
1.7.1