From d8e094d1bdd2dff5e8f81c0786ca62f6d6dc45ee Mon Sep 17 00:00:00 2001
From: Atin Mukherjee <amukherj@redhat.com>
Date: Tue, 31 Jul 2018 12:33:49 +0530
Subject: [PATCH 365/385] glusterd: ignore importing volume which is undergoing
a delete operation
Problem explanation:
Assuming in a 3 nodes cluster, if N1 originates a delete operation and
while N1's commit phase completes, either glusterd service of N2 or N3
gets disconnected from N1 (before completing the commit phase), N1 will
attempt to end up importing the volume which is in-flight for a delete
in other nodes as a fresh resulting into an incorrect configuration
state.
Fix:
Mark a volume as stage deleted once a volume delete operation passes
it's staging phase and reset this flag during unlock phase. Now during
this intermediate phase if the same volume gets imported to other peers,
it shouldn't considered to be recreated.
An automated .t is quite tough to implement with the current infra.
Test Case:
1. Keep creating and deleting volumes in a loop on a 3 node cluster
2. Simulate n/w failure between the peers (ifdown followed by ifup)
3. Check if output of 'gluster v list | wc -l' is same across all 3
nodes during 1 & 2.
>upstream patch : https://review.gluster.org/#/c/glusterfs/+/20592
>Change-Id: Ifdd5dc39699120258d7fdd42fe2deb9de25c6246
>Fixes: bz#1605077
>Signed-off-by: Atin Mukherjee <amukherj@redhat.com>
Change-Id: Ifdd5dc39699120258d7fdd42fe2deb9de25c6246
BUG: 1618221
Signed-off-by: Atin Mukherjee <amukherj@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/149872
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
xlators/mgmt/glusterd/src/glusterd-locks.c | 13 +++++++++--
xlators/mgmt/glusterd/src/glusterd-utils.c | 30 ++++++++++++++++++++++---
xlators/mgmt/glusterd/src/glusterd-utils.h | 2 +-
xlators/mgmt/glusterd/src/glusterd-volume-ops.c | 2 +-
xlators/mgmt/glusterd/src/glusterd.h | 3 +++
5 files changed, 43 insertions(+), 7 deletions(-)
diff --git a/xlators/mgmt/glusterd/src/glusterd-locks.c b/xlators/mgmt/glusterd/src/glusterd-locks.c
index 831be20..f4e0225 100644
--- a/xlators/mgmt/glusterd/src/glusterd-locks.c
+++ b/xlators/mgmt/glusterd/src/glusterd-locks.c
@@ -790,6 +790,7 @@ glusterd_mgmt_v3_unlock (const char *name, uuid_t uuid, char *type)
int32_t ret = -1;
gf_boolean_t is_valid = _gf_true;
glusterd_conf_t *priv = NULL;
+ glusterd_volinfo_t *volinfo = NULL;
glusterd_mgmt_v3_lock_timer *mgmt_lock_timer = NULL;
uuid_t owner = {0};
xlator_t *this = NULL;
@@ -888,8 +889,7 @@ glusterd_mgmt_v3_unlock (const char *name, uuid_t uuid, char *type)
"Lock for %s %s successfully released",
type, name);
- ret = 0;
- /* Release owner refernce which was held during lock */
+ /* Release owner reference which was held during lock */
if (mgmt_lock_timer->timer) {
ret = -1;
mgmt_lock_timer_xl = mgmt_lock_timer->xl;
@@ -906,6 +906,15 @@ glusterd_mgmt_v3_unlock (const char *name, uuid_t uuid, char *type)
dict_del (priv->mgmt_v3_lock_timer, key_dup);
mgmt_lock_timer->timer = NULL;
}
+ ret = glusterd_volinfo_find (name, &volinfo);
+ if (volinfo && volinfo->stage_deleted) {
+ /* this indicates a volume still exists and the volume delete
+ * operation has failed in some of the phases, need to ensure
+ * stage_deleted flag is set back to false
+ */
+ volinfo->stage_deleted = _gf_false;
+ }
+ ret = 0;
out:
gf_msg_trace (this->name, 0, "Returning %d", ret);
diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c
index 4fd8575..7f52602 100644
--- a/xlators/mgmt/glusterd/src/glusterd-utils.c
+++ b/xlators/mgmt/glusterd/src/glusterd-utils.c
@@ -1699,7 +1699,7 @@ glusterd_volinfo_find_by_volume_id (uuid_t volume_id, glusterd_volinfo_t **volin
}
int32_t
-glusterd_volinfo_find (char *volname, glusterd_volinfo_t **volinfo)
+glusterd_volinfo_find (const char *volname, glusterd_volinfo_t **volinfo)
{
glusterd_volinfo_t *tmp_volinfo = NULL;
int32_t ret = -1;
@@ -2952,6 +2952,11 @@ glusterd_add_volume_to_dict (glusterd_volinfo_t *volinfo,
if (ret)
goto out;
+ snprintf (key, sizeof (key), "%s%d.stage_deleted", prefix, count);
+ ret = dict_set_uint32 (dict, key, (uint32_t)volinfo->stage_deleted);
+ if (ret)
+ goto out;
+
/* tiering related variables */
memset (key, 0, sizeof (key));
@@ -3355,6 +3360,7 @@ glusterd_compare_friend_volume (dict_t *peer_data, int32_t count,
uint32_t cksum = 0;
uint32_t quota_cksum = 0;
uint32_t quota_version = 0;
+ uint32_t stage_deleted = 0;
int32_t version = 0;
xlator_t *this = NULL;
@@ -3370,9 +3376,15 @@ glusterd_compare_friend_volume (dict_t *peer_data, int32_t count,
goto out;
ret = glusterd_volinfo_find (volname, &volinfo);
-
if (ret) {
- *status = GLUSTERD_VOL_COMP_UPDATE_REQ;
+ snprintf (key, sizeof (key), "volume%d.stage_deleted", count);
+ ret = dict_get_uint32 (peer_data, key, &stage_deleted);
+ /* stage_deleted = 1 means the volume is still in the process of
+ * deleting a volume, so we shouldn't be trying to create a
+ * fresh volume here which would lead to a stale entry
+ */
+ if (stage_deleted == 0)
+ *status = GLUSTERD_VOL_COMP_UPDATE_REQ;
ret = 0;
goto out;
}
@@ -3929,6 +3941,7 @@ glusterd_import_volinfo (dict_t *peer_data, int count,
char *rebalance_id_str = NULL;
int op_version = 0;
int client_op_version = 0;
+ uint32_t stage_deleted = 0;
GF_ASSERT (peer_data);
GF_ASSERT (volinfo);
@@ -3941,6 +3954,17 @@ glusterd_import_volinfo (dict_t *peer_data, int count,
goto out;
}
+ snprintf (key, sizeof (key), "%s%d.stage_deleted", prefix, count);
+ ret = dict_get_uint32 (peer_data, key, &stage_deleted);
+ /* stage_deleted = 1 means the volume is still in the process of
+ * deleting a volume, so we shouldn't be trying to create a
+ * fresh volume here which would lead to a stale entry
+ */
+ if (stage_deleted) {
+ ret = 0;
+ goto out;
+ }
+
ret = glusterd_volinfo_new (&new_volinfo);
if (ret)
goto out;
diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.h b/xlators/mgmt/glusterd/src/glusterd-utils.h
index 4835728..ffcc636 100644
--- a/xlators/mgmt/glusterd/src/glusterd-utils.h
+++ b/xlators/mgmt/glusterd/src/glusterd-utils.h
@@ -159,7 +159,7 @@ glusterd_brickinfo_new_from_brick (char *brick,
char **op_errstr);
int32_t
-glusterd_volinfo_find (char *volname, glusterd_volinfo_t **volinfo);
+glusterd_volinfo_find (const char *volname, glusterd_volinfo_t **volinfo);
int
glusterd_volinfo_find_by_volume_id (uuid_t volume_id, glusterd_volinfo_t **volinfo);
diff --git a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c
index 8bb0b6d..94e07cb 100644
--- a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c
+++ b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c
@@ -1828,7 +1828,7 @@ glusterd_op_stage_delete_volume (dict_t *dict, char **op_errstr)
snprintf (msg, sizeof(msg), "Some of the peers are down");
goto out;
}
-
+ volinfo->stage_deleted = _gf_true;
ret = 0;
out:
diff --git a/xlators/mgmt/glusterd/src/glusterd.h b/xlators/mgmt/glusterd/src/glusterd.h
index 4ec609f..d4f4f7e 100644
--- a/xlators/mgmt/glusterd/src/glusterd.h
+++ b/xlators/mgmt/glusterd/src/glusterd.h
@@ -475,6 +475,9 @@ struct glusterd_volinfo_ {
glusterd_snapdsvc_t snapd;
glusterd_tierdsvc_t tierd;
int32_t quota_xattr_version;
+ gf_boolean_t stage_deleted; /* volume has passed staging
+ * for delete operation
+ */
};
typedef enum gd_snap_status_ {
--
1.8.3.1