|
|
d1681e |
From 4bf98e63a481aea6143e8f404aa4650f7a80e317 Mon Sep 17 00:00:00 2001
|
|
|
d1681e |
From: Atin Mukherjee <amukherj@redhat.com>
|
|
|
d1681e |
Date: Wed, 3 Jan 2018 14:29:51 +0530
|
|
|
d1681e |
Subject: [PATCH 120/128] glusterd: connect to an existing brick process when
|
|
|
d1681e |
qourum status is NOT_APPLICABLE_QUORUM
|
|
|
d1681e |
|
|
|
d1681e |
First of all, this patch reverts commit 635c1c3 as the same is causing a
|
|
|
d1681e |
regression with bricks not coming up on time when a node is rebooted.
|
|
|
d1681e |
This patch tries to fix the problem in a different way by just trying to
|
|
|
d1681e |
connect to an existing running brick when quorum status is not
|
|
|
d1681e |
applicable.
|
|
|
d1681e |
|
|
|
d1681e |
> upstream patch : https://review.gluster.org/#/c/19134/
|
|
|
d1681e |
|
|
|
d1681e |
Change-Id: I0efb5901832824b1c15dcac529bffac85173e097
|
|
|
d1681e |
BUG: 1509102
|
|
|
d1681e |
Signed-off-by: Atin Mukherjee <amukherj@redhat.com>
|
|
|
d1681e |
Reviewed-on: https://code.engineering.redhat.com/gerrit/126996
|
|
|
d1681e |
Tested-by: RHGS Build Bot <nigelb@redhat.com>
|
|
|
d1681e |
---
|
|
|
d1681e |
xlators/mgmt/glusterd/src/glusterd-brick-ops.c | 2 +-
|
|
|
d1681e |
xlators/mgmt/glusterd/src/glusterd-handshake.c | 2 +-
|
|
|
d1681e |
xlators/mgmt/glusterd/src/glusterd-op-sm.c | 1 +
|
|
|
d1681e |
xlators/mgmt/glusterd/src/glusterd-replace-brick.c | 3 ++-
|
|
|
d1681e |
xlators/mgmt/glusterd/src/glusterd-server-quorum.c | 27 ++++++++++++++++++----
|
|
|
d1681e |
xlators/mgmt/glusterd/src/glusterd-utils.c | 13 +++++++----
|
|
|
d1681e |
xlators/mgmt/glusterd/src/glusterd-utils.h | 3 ++-
|
|
|
d1681e |
xlators/mgmt/glusterd/src/glusterd-volume-ops.c | 3 ++-
|
|
|
d1681e |
8 files changed, 40 insertions(+), 14 deletions(-)
|
|
|
d1681e |
|
|
|
d1681e |
diff --git a/xlators/mgmt/glusterd/src/glusterd-brick-ops.c b/xlators/mgmt/glusterd/src/glusterd-brick-ops.c
|
|
|
d1681e |
index e88fa3f..416412e 100644
|
|
|
d1681e |
--- a/xlators/mgmt/glusterd/src/glusterd-brick-ops.c
|
|
|
d1681e |
+++ b/xlators/mgmt/glusterd/src/glusterd-brick-ops.c
|
|
|
d1681e |
@@ -1554,7 +1554,7 @@ glusterd_op_perform_add_bricks (glusterd_volinfo_t *volinfo, int32_t count,
|
|
|
d1681e |
}
|
|
|
d1681e |
}
|
|
|
d1681e |
ret = glusterd_brick_start (volinfo, brickinfo,
|
|
|
d1681e |
- _gf_true);
|
|
|
d1681e |
+ _gf_true, _gf_false);
|
|
|
d1681e |
if (ret)
|
|
|
d1681e |
goto out;
|
|
|
d1681e |
i++;
|
|
|
d1681e |
diff --git a/xlators/mgmt/glusterd/src/glusterd-handshake.c b/xlators/mgmt/glusterd/src/glusterd-handshake.c
|
|
|
d1681e |
index 35aeca3..3d1dfb2 100644
|
|
|
d1681e |
--- a/xlators/mgmt/glusterd/src/glusterd-handshake.c
|
|
|
d1681e |
+++ b/xlators/mgmt/glusterd/src/glusterd-handshake.c
|
|
|
d1681e |
@@ -658,7 +658,7 @@ glusterd_create_missed_snap (glusterd_missed_snap_info *missed_snapinfo,
|
|
|
d1681e |
}
|
|
|
d1681e |
|
|
|
d1681e |
brickinfo->snap_status = 0;
|
|
|
d1681e |
- ret = glusterd_brick_start (snap_vol, brickinfo, _gf_false);
|
|
|
d1681e |
+ ret = glusterd_brick_start (snap_vol, brickinfo, _gf_false, _gf_false);
|
|
|
d1681e |
if (ret) {
|
|
|
d1681e |
gf_msg (this->name, GF_LOG_WARNING, 0,
|
|
|
d1681e |
GD_MSG_BRICK_DISCONNECTED, "starting the "
|
|
|
d1681e |
diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.c b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
|
|
|
d1681e |
index 86f18f0..b1a6e06 100644
|
|
|
d1681e |
--- a/xlators/mgmt/glusterd/src/glusterd-op-sm.c
|
|
|
d1681e |
+++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
|
|
|
d1681e |
@@ -2437,6 +2437,7 @@ glusterd_start_bricks (glusterd_volinfo_t *volinfo)
|
|
|
d1681e |
pthread_mutex_lock (&brickinfo->restart_mutex);
|
|
|
d1681e |
{
|
|
|
d1681e |
ret = glusterd_brick_start (volinfo, brickinfo,
|
|
|
d1681e |
+ _gf_false,
|
|
|
d1681e |
_gf_false);
|
|
|
d1681e |
}
|
|
|
d1681e |
pthread_mutex_unlock (&brickinfo->restart_mutex);
|
|
|
d1681e |
diff --git a/xlators/mgmt/glusterd/src/glusterd-replace-brick.c b/xlators/mgmt/glusterd/src/glusterd-replace-brick.c
|
|
|
d1681e |
index b11adf1..a037323 100644
|
|
|
d1681e |
--- a/xlators/mgmt/glusterd/src/glusterd-replace-brick.c
|
|
|
d1681e |
+++ b/xlators/mgmt/glusterd/src/glusterd-replace-brick.c
|
|
|
d1681e |
@@ -429,7 +429,8 @@ glusterd_op_perform_replace_brick (glusterd_volinfo_t *volinfo,
|
|
|
d1681e |
goto out;
|
|
|
d1681e |
|
|
|
d1681e |
if (GLUSTERD_STATUS_STARTED == volinfo->status) {
|
|
|
d1681e |
- ret = glusterd_brick_start (volinfo, new_brickinfo, _gf_false);
|
|
|
d1681e |
+ ret = glusterd_brick_start (volinfo, new_brickinfo, _gf_false,
|
|
|
d1681e |
+ _gf_false);
|
|
|
d1681e |
if (ret)
|
|
|
d1681e |
goto out;
|
|
|
d1681e |
}
|
|
|
d1681e |
diff --git a/xlators/mgmt/glusterd/src/glusterd-server-quorum.c b/xlators/mgmt/glusterd/src/glusterd-server-quorum.c
|
|
|
d1681e |
index 995a568..b01bfaa 100644
|
|
|
d1681e |
--- a/xlators/mgmt/glusterd/src/glusterd-server-quorum.c
|
|
|
d1681e |
+++ b/xlators/mgmt/glusterd/src/glusterd-server-quorum.c
|
|
|
d1681e |
@@ -314,6 +314,7 @@ glusterd_do_volume_quorum_action (xlator_t *this, glusterd_volinfo_t *volinfo,
|
|
|
d1681e |
glusterd_brickinfo_t *brickinfo = NULL;
|
|
|
d1681e |
gd_quorum_status_t quorum_status = NOT_APPLICABLE_QUORUM;
|
|
|
d1681e |
gf_boolean_t follows_quorum = _gf_false;
|
|
|
d1681e |
+ gf_boolean_t quorum_status_unchanged = _gf_false;
|
|
|
d1681e |
|
|
|
d1681e |
if (volinfo->status != GLUSTERD_STATUS_STARTED) {
|
|
|
d1681e |
volinfo->quorum_status = NOT_APPLICABLE_QUORUM;
|
|
|
d1681e |
@@ -341,9 +342,10 @@ glusterd_do_volume_quorum_action (xlator_t *this, glusterd_volinfo_t *volinfo,
|
|
|
d1681e |
* the bricks that are down are brought up again. In this process it
|
|
|
d1681e |
* also brings up the brick that is purposefully taken down.
|
|
|
d1681e |
*/
|
|
|
d1681e |
- if (quorum_status != NOT_APPLICABLE_QUORUM &&
|
|
|
d1681e |
- volinfo->quorum_status == quorum_status)
|
|
|
d1681e |
+ if (volinfo->quorum_status == quorum_status) {
|
|
|
d1681e |
+ quorum_status_unchanged = _gf_true;
|
|
|
d1681e |
goto out;
|
|
|
d1681e |
+ }
|
|
|
d1681e |
|
|
|
d1681e |
if (quorum_status == MEETS_QUORUM) {
|
|
|
d1681e |
gf_msg (this->name, GF_LOG_CRITICAL, 0,
|
|
|
d1681e |
@@ -368,9 +370,10 @@ glusterd_do_volume_quorum_action (xlator_t *this, glusterd_volinfo_t *volinfo,
|
|
|
d1681e |
if (!brickinfo->start_triggered) {
|
|
|
d1681e |
pthread_mutex_lock (&brickinfo->restart_mutex);
|
|
|
d1681e |
{
|
|
|
d1681e |
- glusterd_brick_start (volinfo,
|
|
|
d1681e |
- brickinfo,
|
|
|
d1681e |
- _gf_false);
|
|
|
d1681e |
+ ret = glusterd_brick_start (volinfo,
|
|
|
d1681e |
+ brickinfo,
|
|
|
d1681e |
+ _gf_false,
|
|
|
d1681e |
+ _gf_false);
|
|
|
d1681e |
}
|
|
|
d1681e |
pthread_mutex_unlock (&brickinfo->restart_mutex);
|
|
|
d1681e |
}
|
|
|
d1681e |
@@ -392,6 +395,20 @@ glusterd_do_volume_quorum_action (xlator_t *this, glusterd_volinfo_t *volinfo,
|
|
|
d1681e |
}
|
|
|
d1681e |
}
|
|
|
d1681e |
out:
|
|
|
d1681e |
+ if (quorum_status_unchanged) {
|
|
|
d1681e |
+ list_for_each_entry (brickinfo, &volinfo->bricks, brick_list) {
|
|
|
d1681e |
+ if (!glusterd_is_local_brick (this, volinfo, brickinfo))
|
|
|
d1681e |
+ continue;
|
|
|
d1681e |
+ ret = glusterd_brick_start (volinfo, brickinfo,
|
|
|
d1681e |
+ _gf_false, _gf_true);
|
|
|
d1681e |
+ if (ret) {
|
|
|
d1681e |
+ gf_msg (this->name, GF_LOG_ERROR, 0,
|
|
|
d1681e |
+ GD_MSG_BRICK_DISCONNECTED, "Failed to "
|
|
|
d1681e |
+ "connect to %s:%s", brickinfo->hostname,
|
|
|
d1681e |
+ brickinfo->path);
|
|
|
d1681e |
+ }
|
|
|
d1681e |
+ }
|
|
|
d1681e |
+ }
|
|
|
d1681e |
return;
|
|
|
d1681e |
}
|
|
|
d1681e |
|
|
|
d1681e |
diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c
|
|
|
d1681e |
index 1b2cc43..f1b365f 100644
|
|
|
d1681e |
--- a/xlators/mgmt/glusterd/src/glusterd-utils.c
|
|
|
d1681e |
+++ b/xlators/mgmt/glusterd/src/glusterd-utils.c
|
|
|
d1681e |
@@ -5796,7 +5796,8 @@ glusterd_get_sock_from_brick_pid (int pid, char *sockpath, size_t len)
|
|
|
d1681e |
int
|
|
|
d1681e |
glusterd_brick_start (glusterd_volinfo_t *volinfo,
|
|
|
d1681e |
glusterd_brickinfo_t *brickinfo,
|
|
|
d1681e |
- gf_boolean_t wait)
|
|
|
d1681e |
+ gf_boolean_t wait,
|
|
|
d1681e |
+ gf_boolean_t only_connect)
|
|
|
d1681e |
{
|
|
|
d1681e |
int ret = -1;
|
|
|
d1681e |
xlator_t *this = NULL;
|
|
|
d1681e |
@@ -5847,7 +5848,9 @@ glusterd_brick_start (glusterd_volinfo_t *volinfo,
|
|
|
d1681e |
ret = 0;
|
|
|
d1681e |
goto out;
|
|
|
d1681e |
}
|
|
|
d1681e |
- brickinfo->start_triggered = _gf_true;
|
|
|
d1681e |
+ if (!only_connect)
|
|
|
d1681e |
+ brickinfo->start_triggered = _gf_true;
|
|
|
d1681e |
+
|
|
|
d1681e |
GLUSTERD_GET_BRICK_PIDFILE (pidfile, volinfo, brickinfo, conf);
|
|
|
d1681e |
if (gf_is_service_running (pidfile, &pid)) {
|
|
|
d1681e |
if (brickinfo->status != GF_BRICK_STARTING &&
|
|
|
d1681e |
@@ -5905,6 +5908,8 @@ glusterd_brick_start (glusterd_volinfo_t *volinfo,
|
|
|
d1681e |
}
|
|
|
d1681e |
return 0;
|
|
|
d1681e |
}
|
|
|
d1681e |
+ if (only_connect)
|
|
|
d1681e |
+ return 0;
|
|
|
d1681e |
|
|
|
d1681e |
run:
|
|
|
d1681e |
ret = _mk_rundir_p (volinfo);
|
|
|
d1681e |
@@ -6032,7 +6037,7 @@ glusterd_restart_bricks (glusterd_conf_t *conf)
|
|
|
d1681e |
{
|
|
|
d1681e |
glusterd_brick_start
|
|
|
d1681e |
(volinfo, brickinfo,
|
|
|
d1681e |
- _gf_false);
|
|
|
d1681e |
+ _gf_false, _gf_false);
|
|
|
d1681e |
}
|
|
|
d1681e |
pthread_mutex_unlock
|
|
|
d1681e |
(&brickinfo->restart_mutex);
|
|
|
d1681e |
@@ -6081,7 +6086,7 @@ glusterd_restart_bricks (glusterd_conf_t *conf)
|
|
|
d1681e |
{
|
|
|
d1681e |
glusterd_brick_start
|
|
|
d1681e |
(volinfo, brickinfo,
|
|
|
d1681e |
- _gf_false);
|
|
|
d1681e |
+ _gf_false, _gf_false);
|
|
|
d1681e |
}
|
|
|
d1681e |
pthread_mutex_unlock
|
|
|
d1681e |
(&brickinfo->restart_mutex);
|
|
|
d1681e |
diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.h b/xlators/mgmt/glusterd/src/glusterd-utils.h
|
|
|
d1681e |
index abaec4b..9194da0 100644
|
|
|
d1681e |
--- a/xlators/mgmt/glusterd/src/glusterd-utils.h
|
|
|
d1681e |
+++ b/xlators/mgmt/glusterd/src/glusterd-utils.h
|
|
|
d1681e |
@@ -277,7 +277,8 @@ glusterd_all_volume_cond_check (glusterd_condition_func func, int status,
|
|
|
d1681e |
int
|
|
|
d1681e |
glusterd_brick_start (glusterd_volinfo_t *volinfo,
|
|
|
d1681e |
glusterd_brickinfo_t *brickinfo,
|
|
|
d1681e |
- gf_boolean_t wait);
|
|
|
d1681e |
+ gf_boolean_t wait,
|
|
|
d1681e |
+ gf_boolean_t only_connect);
|
|
|
d1681e |
int
|
|
|
d1681e |
glusterd_brick_stop (glusterd_volinfo_t *volinfo,
|
|
|
d1681e |
glusterd_brickinfo_t *brickinfo,
|
|
|
d1681e |
diff --git a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c
|
|
|
d1681e |
index de97e6a..414f9ba 100644
|
|
|
d1681e |
--- a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c
|
|
|
d1681e |
+++ b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c
|
|
|
d1681e |
@@ -2564,7 +2564,8 @@ glusterd_start_volume (glusterd_volinfo_t *volinfo, int flags,
|
|
|
d1681e |
if (flags & GF_CLI_FLAG_OP_FORCE) {
|
|
|
d1681e |
brickinfo->start_triggered = _gf_false;
|
|
|
d1681e |
}
|
|
|
d1681e |
- ret = glusterd_brick_start (volinfo, brickinfo, wait);
|
|
|
d1681e |
+ ret = glusterd_brick_start (volinfo, brickinfo, wait,
|
|
|
d1681e |
+ _gf_false);
|
|
|
d1681e |
/* If 'force' try to start all bricks regardless of success or
|
|
|
d1681e |
* failure
|
|
|
d1681e |
*/
|
|
|
d1681e |
--
|
|
|
d1681e |
1.8.3.1
|
|
|
d1681e |
|