Blob Blame History Raw
From 80e2b34a8f87a3d4675f93e62c11f7ef296a9c08 Mon Sep 17 00:00:00 2001
From: Atin Mukherjee <amukherj@redhat.com>
Date: Thu, 9 Jun 2016 18:22:43 +0530
Subject: [PATCH 194/195] glusterd: fail volume delete if one of the node is down

Backport of http://review.gluster.org/14681

Deleting a volume on a cluster where one of the node in the cluster is down is
buggy since once that node comes back the resync of the same volume will happen.
Till we bring in the soft delete feature tracked in
http://review.gluster.org/12963 this is a safe guard to block the volume
deletion.

Change-Id: I9c13869c4a7e7a947f88842c6dc6f231c0eeda6c
BUG: 1344625
Signed-off-by: Atin Mukherjee <amukherj@redhat.com>
Reviewed-on: http://review.gluster.org/14681
Smoke: Gluster Build System <jenkins@build.gluster.com>
CentOS-regression: Gluster Build System <jenkins@build.gluster.com>
Reviewed-by: Kaushal M <kaushal@redhat.com>
NetBSD-regression: NetBSD Build System <jenkins@build.gluster.org>
Reviewed-on: https://code.engineering.redhat.com/gerrit/76322
---
 .../bug-1344407-volume-delete-on-node-down.t       |   19 +++++++++++++
 xlators/mgmt/glusterd/src/glusterd-peer-utils.c    |   29 ++++++++++++++++++++
 xlators/mgmt/glusterd/src/glusterd-peer-utils.h    |    3 ++
 xlators/mgmt/glusterd/src/glusterd-volume-ops.c    |    6 ++++
 4 files changed, 57 insertions(+), 0 deletions(-)
 create mode 100755 tests/bugs/glusterd/bug-1344407-volume-delete-on-node-down.t

diff --git a/tests/bugs/glusterd/bug-1344407-volume-delete-on-node-down.t b/tests/bugs/glusterd/bug-1344407-volume-delete-on-node-down.t
new file mode 100755
index 0000000..5081c37
--- /dev/null
+++ b/tests/bugs/glusterd/bug-1344407-volume-delete-on-node-down.t
@@ -0,0 +1,19 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../cluster.rc
+
+cleanup;
+
+TEST launch_cluster 2;
+
+TEST $CLI_1 peer probe $H2;
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
+
+TEST $CLI_1 volume create $V0 $H1:$B1/$V0
+
+TEST kill_glusterd 2
+TEST ! $CLI_1 volume delete $V0
+
+cleanup;
diff --git a/xlators/mgmt/glusterd/src/glusterd-peer-utils.c b/xlators/mgmt/glusterd/src/glusterd-peer-utils.c
index 155add4..72d6b17 100644
--- a/xlators/mgmt/glusterd/src/glusterd-peer-utils.c
+++ b/xlators/mgmt/glusterd/src/glusterd-peer-utils.c
@@ -394,6 +394,35 @@ gd_peer_uuid_str (glusterd_peerinfo_t *peerinfo)
 }
 
 gf_boolean_t
+glusterd_are_all_peers_up ()
+{
+        glusterd_peerinfo_t *peerinfo = NULL;
+        xlator_t            *this = NULL;
+        glusterd_conf_t     *conf = NULL;
+        gf_boolean_t         peers_up = _gf_false;
+
+        this = THIS;
+        GF_VALIDATE_OR_GOTO ("glusterd", this, out);
+
+        conf = this->private;
+        GF_VALIDATE_OR_GOTO (this->name, conf, out);
+
+        rcu_read_lock ();
+        cds_list_for_each_entry_rcu (peerinfo, &conf->peers, uuid_list) {
+                if (!peerinfo->connected) {
+                        rcu_read_unlock ();
+                        goto out;
+                }
+        }
+        rcu_read_unlock ();
+
+        peers_up = _gf_true;
+
+out:
+        return peers_up;
+}
+
+gf_boolean_t
 glusterd_are_vol_all_peers_up (glusterd_volinfo_t *volinfo,
                                struct cds_list_head *peers,
                                char **down_peerstr)
diff --git a/xlators/mgmt/glusterd/src/glusterd-peer-utils.h b/xlators/mgmt/glusterd/src/glusterd-peer-utils.h
index bd30e33..9332cf2 100644
--- a/xlators/mgmt/glusterd/src/glusterd-peer-utils.h
+++ b/xlators/mgmt/glusterd/src/glusterd-peer-utils.h
@@ -43,6 +43,9 @@ char*
 gd_peer_uuid_str (glusterd_peerinfo_t *peerinfo);
 
 gf_boolean_t
+glusterd_are_all_peers_up ();
+
+gf_boolean_t
 glusterd_are_vol_all_peers_up (glusterd_volinfo_t *volinfo,
                                struct cds_list_head *peers,
                                char **down_peerstr);
diff --git a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c
index 50870cc..aabcd34 100644
--- a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c
+++ b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c
@@ -1747,6 +1747,12 @@ glusterd_op_stage_delete_volume (dict_t *dict, char **op_errstr)
                 goto out;
         }
 
+        if (!glusterd_are_all_peers_up ()) {
+                ret = -1;
+                snprintf (msg, sizeof(msg), "Some of the peers are down");
+                goto out;
+        }
+
         ret = 0;
 
 out:
-- 
1.7.1