12a457
From 80e2b34a8f87a3d4675f93e62c11f7ef296a9c08 Mon Sep 17 00:00:00 2001
12a457
From: Atin Mukherjee <amukherj@redhat.com>
12a457
Date: Thu, 9 Jun 2016 18:22:43 +0530
12a457
Subject: [PATCH 194/195] glusterd: fail volume delete if one of the node is down
12a457
12a457
Backport of http://review.gluster.org/14681
12a457
12a457
Deleting a volume on a cluster where one of the node in the cluster is down is
12a457
buggy since once that node comes back the resync of the same volume will happen.
12a457
Till we bring in the soft delete feature tracked in
12a457
http://review.gluster.org/12963 this is a safe guard to block the volume
12a457
deletion.
12a457
12a457
Change-Id: I9c13869c4a7e7a947f88842c6dc6f231c0eeda6c
12a457
BUG: 1344625
12a457
Signed-off-by: Atin Mukherjee <amukherj@redhat.com>
12a457
Reviewed-on: http://review.gluster.org/14681
12a457
Smoke: Gluster Build System <jenkins@build.gluster.com>
12a457
CentOS-regression: Gluster Build System <jenkins@build.gluster.com>
12a457
Reviewed-by: Kaushal M <kaushal@redhat.com>
12a457
NetBSD-regression: NetBSD Build System <jenkins@build.gluster.org>
12a457
Reviewed-on: https://code.engineering.redhat.com/gerrit/76322
12a457
---
12a457
 .../bug-1344407-volume-delete-on-node-down.t       |   19 +++++++++++++
12a457
 xlators/mgmt/glusterd/src/glusterd-peer-utils.c    |   29 ++++++++++++++++++++
12a457
 xlators/mgmt/glusterd/src/glusterd-peer-utils.h    |    3 ++
12a457
 xlators/mgmt/glusterd/src/glusterd-volume-ops.c    |    6 ++++
12a457
 4 files changed, 57 insertions(+), 0 deletions(-)
12a457
 create mode 100755 tests/bugs/glusterd/bug-1344407-volume-delete-on-node-down.t
12a457
12a457
diff --git a/tests/bugs/glusterd/bug-1344407-volume-delete-on-node-down.t b/tests/bugs/glusterd/bug-1344407-volume-delete-on-node-down.t
12a457
new file mode 100755
12a457
index 0000000..5081c37
12a457
--- /dev/null
12a457
+++ b/tests/bugs/glusterd/bug-1344407-volume-delete-on-node-down.t
12a457
@@ -0,0 +1,19 @@
12a457
+#!/bin/bash
12a457
+
12a457
+. $(dirname $0)/../../include.rc
12a457
+. $(dirname $0)/../../volume.rc
12a457
+. $(dirname $0)/../../cluster.rc
12a457
+
12a457
+cleanup;
12a457
+
12a457
+TEST launch_cluster 2;
12a457
+
12a457
+TEST $CLI_1 peer probe $H2;
12a457
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
12a457
+
12a457
+TEST $CLI_1 volume create $V0 $H1:$B1/$V0
12a457
+
12a457
+TEST kill_glusterd 2
12a457
+TEST ! $CLI_1 volume delete $V0
12a457
+
12a457
+cleanup;
12a457
diff --git a/xlators/mgmt/glusterd/src/glusterd-peer-utils.c b/xlators/mgmt/glusterd/src/glusterd-peer-utils.c
12a457
index 155add4..72d6b17 100644
12a457
--- a/xlators/mgmt/glusterd/src/glusterd-peer-utils.c
12a457
+++ b/xlators/mgmt/glusterd/src/glusterd-peer-utils.c
12a457
@@ -394,6 +394,35 @@ gd_peer_uuid_str (glusterd_peerinfo_t *peerinfo)
12a457
 }
12a457
 
12a457
 gf_boolean_t
12a457
+glusterd_are_all_peers_up ()
12a457
+{
12a457
+        glusterd_peerinfo_t *peerinfo = NULL;
12a457
+        xlator_t            *this = NULL;
12a457
+        glusterd_conf_t     *conf = NULL;
12a457
+        gf_boolean_t         peers_up = _gf_false;
12a457
+
12a457
+        this = THIS;
12a457
+        GF_VALIDATE_OR_GOTO ("glusterd", this, out);
12a457
+
12a457
+        conf = this->private;
12a457
+        GF_VALIDATE_OR_GOTO (this->name, conf, out);
12a457
+
12a457
+        rcu_read_lock ();
12a457
+        cds_list_for_each_entry_rcu (peerinfo, &conf->peers, uuid_list) {
12a457
+                if (!peerinfo->connected) {
12a457
+                        rcu_read_unlock ();
12a457
+                        goto out;
12a457
+                }
12a457
+        }
12a457
+        rcu_read_unlock ();
12a457
+
12a457
+        peers_up = _gf_true;
12a457
+
12a457
+out:
12a457
+        return peers_up;
12a457
+}
12a457
+
12a457
+gf_boolean_t
12a457
 glusterd_are_vol_all_peers_up (glusterd_volinfo_t *volinfo,
12a457
                                struct cds_list_head *peers,
12a457
                                char **down_peerstr)
12a457
diff --git a/xlators/mgmt/glusterd/src/glusterd-peer-utils.h b/xlators/mgmt/glusterd/src/glusterd-peer-utils.h
12a457
index bd30e33..9332cf2 100644
12a457
--- a/xlators/mgmt/glusterd/src/glusterd-peer-utils.h
12a457
+++ b/xlators/mgmt/glusterd/src/glusterd-peer-utils.h
12a457
@@ -43,6 +43,9 @@ char*
12a457
 gd_peer_uuid_str (glusterd_peerinfo_t *peerinfo);
12a457
 
12a457
 gf_boolean_t
12a457
+glusterd_are_all_peers_up ();
12a457
+
12a457
+gf_boolean_t
12a457
 glusterd_are_vol_all_peers_up (glusterd_volinfo_t *volinfo,
12a457
                                struct cds_list_head *peers,
12a457
                                char **down_peerstr);
12a457
diff --git a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c
12a457
index 50870cc..aabcd34 100644
12a457
--- a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c
12a457
+++ b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c
12a457
@@ -1747,6 +1747,12 @@ glusterd_op_stage_delete_volume (dict_t *dict, char **op_errstr)
12a457
                 goto out;
12a457
         }
12a457
 
12a457
+        if (!glusterd_are_all_peers_up ()) {
12a457
+                ret = -1;
12a457
+                snprintf (msg, sizeof(msg), "Some of the peers are down");
12a457
+                goto out;
12a457
+        }
12a457
+
12a457
         ret = 0;
12a457
 
12a457
 out:
12a457
-- 
12a457
1.7.1
12a457