Blob Blame History Raw
From f8c28163309870048415cf58668c06a6ba2d700d Mon Sep 17 00:00:00 2001
From: Avra Sengupta <asengupt@redhat.com>
Date: Thu, 14 May 2015 15:00:59 +0530
Subject: [PATCH 09/18] glusterd/shared_storage: Provide a volume set option to
 create and mount the shared storage

     Backport of http://review.gluster.org/#/c/10793/

Introducing a global volume set option(cluster.enable-shared-storage)
which helps create and set-up the shared storage meta volume.

gluster volume set all cluster.enable-shared-storage enable

On enabling this option, the system analyzes the number of peers
in the cluster, which are currently connected, and chooses three
such peers(including the node the command is issued from). From these
peers a volume(gluster_shared_storage) is created. Depending on the
number of peers available the volume is either a replica 3
volume(if there are 3 connected peers),  or a replica 2 volume(if there
are 2 connected peers). "/var/run/gluster/ss_brick" serves as the
brick path on each node for the shared storage volume. We also mount
the shared storage at "/var/run/gluster/shared_storage" on all the nodes
in the cluster as part of enabling this option. If there is only one node
in the cluster,  or only one node is up then the command will fail

Once the volume is created, and mounted the maintainance of the
volume like adding-bricks, removing bricks etc., is expected to
be the onus of the user.

On disabling the option, we provide the user a warning, and on
affirmation from the user we stop the shared storage volume, and unmount
it from all the nodes in the cluster.

gluster volume set all cluster.enable-shared-storage disable

Change-Id: Idd92d67b93f444244f99ede9f634ef18d2945dbc
BUG: 1223201
Signed-off-by: Avra Sengupta <asengupt@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/50103
Reviewed-by: Rajesh Joseph <rjoseph@redhat.com>
Tested-by: Rajesh Joseph <rjoseph@redhat.com>
---
 cli/src/cli-cmd-parser.c                           |  56 ++++--
 cli/src/cli-cmd-volume.c                           |  51 +++++-
 cli/src/cli-cmd.h                                  |   2 +
 cli/src/cli.h                                      |   4 +-
 extras/hook-scripts/set/post/Makefile.am           |   2 +-
 .../set/post/S32gluster_enable_shared_storage.sh   | 124 +++++++++++++
 glusterfs.spec.in                                  |   4 +
 libglusterfs/src/globals.h                         |   2 +
 xlators/mgmt/glusterd/src/glusterd-hooks.c         |  40 ++++-
 xlators/mgmt/glusterd/src/glusterd-op-sm.c         | 196 ++++++++++++++++++++-
 xlators/mgmt/glusterd/src/glusterd-op-sm.h         |   4 +
 xlators/mgmt/glusterd/src/glusterd-rpc-ops.c       |   9 +
 xlators/mgmt/glusterd/src/glusterd-sm.c            |  12 ++
 xlators/mgmt/glusterd/src/glusterd-snapshot.c      |   6 +-
 xlators/mgmt/glusterd/src/glusterd-utils.c         |  31 ++++
 xlators/mgmt/glusterd/src/glusterd-utils.h         |   2 +
 xlators/mgmt/glusterd/src/glusterd-volume-set.c    |   6 +
 xlators/mgmt/glusterd/src/glusterd.c               | 107 +++++++----
 xlators/mgmt/glusterd/src/glusterd.h               |   3 +
 19 files changed, 588 insertions(+), 73 deletions(-)
 create mode 100755 extras/hook-scripts/set/post/S32gluster_enable_shared_storage.sh

diff --git a/cli/src/cli-cmd-parser.c b/cli/src/cli-cmd-parser.c
index 2390822..caa3191 100644
--- a/cli/src/cli-cmd-parser.c
+++ b/cli/src/cli-cmd-parser.c
@@ -1406,17 +1406,19 @@ out:
 }
 
 int32_t
-cli_cmd_volume_set_parse (const char **words, int wordcount, dict_t **options,
-                          char **op_errstr)
+cli_cmd_volume_set_parse (struct cli_state *state, const char **words,
+                          int wordcount, dict_t **options, char **op_errstr)
 {
-        dict_t                  *dict = NULL;
-        char                    *volname = NULL;
-        int                     ret = -1;
-        int                     count = 0;
-        char                    *key = NULL;
-        char                    *value = NULL;
-        int                     i = 0;
-        char                    str[50] = {0,};
+        dict_t                 *dict      = NULL;
+        char                   *volname   = NULL;
+        int                     ret       = -1;
+        int                     count     = 0;
+        char                   *key       = NULL;
+        char                   *value     = NULL;
+        int                     i         = 0;
+        char                    str[50]   = {0,};
+        const char             *question  = NULL;
+        gf_answer_t             answer    = GF_ANSWER_NO;
 
         GF_ASSERT (words);
         GF_ASSERT (options);
@@ -1438,6 +1440,22 @@ cli_cmd_volume_set_parse (const char **words, int wordcount, dict_t **options,
         if (ret)
                 goto out;
 
+        if (!strcmp (volname, "all")) {
+                ret = dict_set_str (dict, "globalname", "All");
+                if (ret) {
+                        gf_log (THIS->name, GF_LOG_ERROR,
+                                "dict set on global key failed.");
+                        goto out;
+                }
+
+                ret = dict_set_int32 (dict, "hold_global_locks", _gf_true);
+                if (ret) {
+                        gf_log (THIS->name, GF_LOG_ERROR,
+                                "dict set on global key failed.");
+                        goto out;
+                }
+        }
+
         if ((!strcmp (volname, "help") || !strcmp (volname, "help-xml"))
             && wordcount == 3 ) {
                 ret = dict_set_str (dict, volname, volname);
@@ -1507,6 +1525,24 @@ cli_cmd_volume_set_parse (const char **words, int wordcount, dict_t **options,
 
                 if (ret)
                         goto out;
+
+                if ((!strcmp (key, "cluster.enable-shared-storage")) &&
+                    (!strcmp (value, "disable"))) {
+                        question = "Disabling cluster.enable-shared-storage "
+                                   "will delete the shared storage volume"
+                                   "(gluster_shared_storage), which is used "
+                                   "by snapshot scheduler, geo-replication "
+                                   "and NFS-Ganesha. Do you still want to "
+                                   "continue?";
+                        answer = cli_cmd_get_confirmation (state, question);
+                        if (GF_ANSWER_NO == answer) {
+                                gf_log ("cli", GF_LOG_ERROR, "Operation "
+                                        "cancelled, exiting");
+                                *op_errstr = gf_strdup ("Aborted by user.");
+                                ret = -1;
+                                goto out;
+                        }
+                }
         }
 
         ret = dict_set_int32 (dict, "count", wordcount-3);
diff --git a/cli/src/cli-cmd-volume.c b/cli/src/cli-cmd-volume.c
index da6603c..6dd3058 100644
--- a/cli/src/cli-cmd-volume.c
+++ b/cli/src/cli-cmd-volume.c
@@ -286,22 +286,28 @@ cli_cmd_volume_delete_cbk (struct cli_state *state, struct cli_cmd_word *word,
                 goto out;
         }
 
-        answer = cli_cmd_get_confirmation (state, question);
-
-        if (GF_ANSWER_NO == answer) {
-                ret = 0;
-                goto out;
-        }
-
         volname = (char *)words[2];
 
         ret = dict_set_str (dict, "volname", volname);
-
         if (ret) {
                 gf_log (THIS->name, GF_LOG_WARNING, "dict set failed");
                 goto out;
         }
 
+        if (!strcmp (volname, GLUSTER_SHARED_STORAGE)) {
+                question = "Deleting the shared storage volume"
+                           "(gluster_shared_storage), will affect features "
+                           "like snapshot scheduler, geo-replication "
+                           "and NFS-Ganesha. Do you still want to "
+                           "continue?";
+        }
+
+        answer = cli_cmd_get_confirmation (state, question);
+        if (GF_ANSWER_NO == answer) {
+                ret = 0;
+                goto out;
+        }
+
         CLI_LOCAL_INIT (local, words, frame, dict);
 
         if (proc->fn) {
@@ -473,6 +479,14 @@ cli_cmd_volume_stop_cbk (struct cli_state *state, struct cli_cmd_word *word,
                 goto out;
         }
 
+        if (!strcmp (volname, GLUSTER_SHARED_STORAGE)) {
+                question = "Stopping the shared storage volume"
+                           "(gluster_shared_storage), will affect features "
+                           "like snapshot scheduler, geo-replication "
+                           "and NFS-Ganesha. Do you still want to "
+                           "continue?";
+        }
+
         if (wordcount == 4) {
                 if (!strcmp("force", words[3])) {
                         flags |= GF_CLI_FLAG_OP_FORCE;
@@ -483,6 +497,7 @@ cli_cmd_volume_stop_cbk (struct cli_state *state, struct cli_cmd_word *word,
                         goto out;
                 }
         }
+
         ret = dict_set_int32 (dict, "flags", flags);
         if (ret) {
                 gf_log (THIS->name, GF_LOG_ERROR,
@@ -732,7 +747,8 @@ cli_cmd_volume_set_cbk (struct cli_state *state, struct cli_cmd_word *word,
         if (!frame)
                 goto out;
 
-        ret = cli_cmd_volume_set_parse (words, wordcount, &options, &op_errstr);
+        ret = cli_cmd_volume_set_parse (state, words, wordcount,
+                                        &options, &op_errstr);
         if (ret) {
                 if (op_errstr) {
                     cli_err ("%s", op_errstr);
@@ -1612,6 +1628,7 @@ cli_cmd_volume_remove_brick_cbk (struct cli_state *state,
         int                     parse_error = 0;
         int                     need_question = 0;
         cli_local_t             *local = NULL;
+        char                    *volname = NULL;
 
         const char *question = "Removing brick(s) can result in data loss. "
                                "Do you want to Continue?";
@@ -1628,6 +1645,22 @@ cli_cmd_volume_remove_brick_cbk (struct cli_state *state,
                 goto out;
         }
 
+        ret = dict_get_str (options, "volname", &volname);
+        if (ret || !volname) {
+                gf_log ("cli", GF_LOG_ERROR, "Failed to fetch volname");
+                ret = -1;
+                goto out;
+        }
+
+        if (!strcmp (volname, GLUSTER_SHARED_STORAGE)) {
+                question = "Removing brick from the shared storage volume"
+                           "(gluster_shared_storage), will affect features "
+                           "like snapshot scheduler, geo-replication "
+                           "and NFS-Ganesha. Do you still want to "
+                           "continue?";
+                need_question = _gf_true;
+        }
+
         if (!(state->mode & GLUSTER_MODE_SCRIPT) && need_question) {
                 /* we need to ask question only in case of 'commit or force' */
                 answer = cli_cmd_get_confirmation (state, question);
diff --git a/cli/src/cli-cmd.h b/cli/src/cli-cmd.h
index 94fa3e9..3a689c4 100644
--- a/cli/src/cli-cmd.h
+++ b/cli/src/cli-cmd.h
@@ -20,6 +20,8 @@
 #include "cli.h"
 #include "list.h"
 
+#define GLUSTER_SHARED_STORAGE      "gluster_shared_storage"
+
 #define CLI_LOCAL_INIT(local, words, frame, dictionary) \
         do {                                                 \
                 local = cli_local_get ();                    \
diff --git a/cli/src/cli.h b/cli/src/cli.h
index c9283b5..6c0fbce 100644
--- a/cli/src/cli.h
+++ b/cli/src/cli.h
@@ -248,8 +248,8 @@ int32_t
 cli_cmd_bitrot_parse (const char **words, int wordcount, dict_t **opt);
 
 int32_t
-cli_cmd_volume_set_parse (const char **words, int wordcount,
-                          dict_t **options, char **op_errstr);
+cli_cmd_volume_set_parse (struct cli_state *state, const char **words,
+                          int wordcount, dict_t **options, char **op_errstr);
 int32_t
 cli_cmd_ganesha_parse (struct cli_state *state, const char **words,
                        int wordcount, dict_t **options, char **op_errstr);
diff --git a/extras/hook-scripts/set/post/Makefile.am b/extras/hook-scripts/set/post/Makefile.am
index 3ec25d9..99dfaa3 100644
--- a/extras/hook-scripts/set/post/Makefile.am
+++ b/extras/hook-scripts/set/post/Makefile.am
@@ -1 +1 @@
-EXTRA_DIST = S30samba-set.sh S31ganesha-set.sh
+EXTRA_DIST = S30samba-set.sh S31ganesha-set.sh S32gluster_enable_shared_storage.sh
diff --git a/extras/hook-scripts/set/post/S32gluster_enable_shared_storage.sh b/extras/hook-scripts/set/post/S32gluster_enable_shared_storage.sh
new file mode 100755
index 0000000..28fa0e5
--- /dev/null
+++ b/extras/hook-scripts/set/post/S32gluster_enable_shared_storage.sh
@@ -0,0 +1,124 @@
+#!/bin/bash
+
+key=`echo $3 | cut -d '=' -f 1`
+val=`echo $3 | cut -d '=' -f 2`
+if [ "$key" != "cluster.enable-shared-storage" ]; then
+    exit;
+fi
+if [ "$val" != 'enable' ]; then
+    if [ "$val" != 'disable' ]; then
+        exit;
+    fi
+fi
+
+option=$val
+
+key_val_pair1=`echo $4 | cut -d ',' -f 1`
+key_val_pair2=`echo $4 | cut -d ',' -f 2`
+
+key=`echo $key_val_pair1 | cut -d '=' -f 1`
+val=`echo $key_val_pair1 | cut -d '=' -f 2`
+if [ "$key" != "is_originator" ]; then
+    exit;
+fi
+is_originator=$val;
+
+key=`echo $key_val_pair2 | cut -d '=' -f 1`
+val=`echo $key_val_pair2 | cut -d '=' -f 2`
+if [ "$key" != "local_node_hostname" ]; then
+    exit;
+fi
+local_node_hostname=$val;
+
+# Read gluster peer status to find the peers
+# which are in 'Peer in Cluster' mode and
+# are connected.
+
+number_of_connected_peers=0
+while read -r line
+do
+    # Already got two connected peers. Including the current node
+    # we have 3 peers which is enough to create a shared storage
+    # with replica 3
+    if [ "$number_of_connected_peers" == "2" ]; then
+        break;
+    fi
+
+    key=`echo $line | cut -d ':' -f 1`
+    if [ "$key" == "Hostname" ]; then
+        hostname=`echo $line | cut -d ':' -f 2 | xargs`
+    fi
+
+    if [ "$key" == "State" ]; then
+        peer_state=`echo $line | cut -d ':' -f 2 | cut -d '(' -f 1 | xargs`
+        conn_state=`echo $line | cut -d '(' -f 2 | cut -d ')' -f 1 | xargs`
+
+        if [ "$peer_state" == "Peer in Cluster" ]; then
+            if [ "$conn_state" == "Connected" ]; then
+                ((number_of_connected_peers++))
+                connected_peer[$number_of_connected_peers]=$hostname
+            fi
+        fi
+    fi
+
+done < <(gluster peer status)
+
+# Include current node in connected peer list
+((number_of_connected_peers++))
+connected_peer[$number_of_connected_peers]=$local_node_hostname
+
+# forming the create vol command
+create_cmd="gluster --mode=script --wignore volume create \
+            gluster_shared_storage replica $number_of_connected_peers"
+
+# Adding the brick names in the command
+for i in "${connected_peer[@]}"
+do
+    create_cmd=$create_cmd" "$i:/var/run/gluster/ss_brick
+done
+
+if [ "$option" == "disable" ]; then
+    # Unmount the volume on all the nodes
+    umount /var/run/gluster/shared_storage
+fi
+
+if [ "$is_originator" == 1 ]; then
+    if [ "$option" == "enable" ]; then
+        # Create and start the volume
+        $create_cmd
+        gluster --mode=script --wignore volume start gluster_shared_storage
+    fi
+
+    if [ "$option" == "disable" ]; then
+        # Stop and delete the volume
+        gluster --mode=script --wignore volume stop gluster_shared_storage
+        gluster --mode=script --wignore volume delete gluster_shared_storage
+    fi
+fi
+
+function check_volume_status()
+{
+    status=`gluster volume info gluster_shared_storage  | grep Status | cut -d ':' -f 2 | xargs`
+    echo $status
+}
+
+mount_cmd="mount -t glusterfs "$local_node_hostname":/gluster_shared_storage \
+           /var/run/gluster/shared_storage"
+
+if [ "$option" == "enable" ]; then
+    retry=0;
+    # Wait for volume to start before mounting
+    status=$(check_volume_status)
+    while [ "$status" != "Started" ]; do
+        sleep 5;
+        ((retry++))
+        if [ "$retry" == 3 ]; then
+            break;
+        fi
+        status = check_volume_status;
+    done
+    # Mount the volume on all the nodes
+    umount /var/run/gluster/shared_storage
+    mkdir -p /var/run/gluster/shared_storage
+    $mount_cmd
+fi
diff --git a/glusterfs.spec.in b/glusterfs.spec.in
index 0ec8201..a5a884f 100644
--- a/glusterfs.spec.in
+++ b/glusterfs.spec.in
@@ -1115,6 +1115,7 @@ fi
 %config %{_sharedstatedir}/glusterd/hooks/1/add-brick/pre/S28Quota-enable-root-xattr-heal.sh
 %config %{_sharedstatedir}/glusterd/hooks/1/set/post/S30samba-set.sh
 %config %{_sharedstatedir}/glusterd/hooks/1/set/post/S31ganesha-set.sh
+%config %{_sharedstatedir}/glusterd/hooks/1/set/post/S32gluster_enable_shared_storage.sh
 %config %{_sharedstatedir}/glusterd/hooks/1/start/post/S29CTDBsetup.sh
 %config %{_sharedstatedir}/glusterd/hooks/1/start/post/S30samba-start.sh
 %config %{_sharedstatedir}/glusterd/hooks/1/stop/pre/S30samba-stop.sh
@@ -1208,6 +1209,9 @@ fi
 * Sat May 31 2015 Aravinda VK <avishwan@redhat.com>
 - Added stop-all-gluster-processes.sh in glusterfs-server section (#1225331)
 
+* Tue May 19 2015 Avra Sengupta <asengupt@redhat.com>
+- Added S32gluster_enable_shared_storage.sh as volume set hook script (#1222013)
+
 * Fri May 29 2015 Anand Nekkunti <anekkunt@redhat.com>
 - glusterd.socket file cleanup during post run upgrade (#1222869)
 
diff --git a/libglusterfs/src/globals.h b/libglusterfs/src/globals.h
index b9eaf8a..ed1bb7c 100644
--- a/libglusterfs/src/globals.h
+++ b/libglusterfs/src/globals.h
@@ -43,6 +43,8 @@
 
 #define GD_OP_VERSION_3_7_0    30700 /* Op-version for GlusterFS 3.7.0 */
 
+#define GD_OP_VERSION_3_7_1    30701 /* Op-version for GlusterFS 3.7.1 */
+
 #define GD_OP_VER_PERSISTENT_AFR_XATTRS GD_OP_VERSION_3_6_0
 
 #include "xlator.h"
diff --git a/xlators/mgmt/glusterd/src/glusterd-hooks.c b/xlators/mgmt/glusterd/src/glusterd-hooks.c
index 569f274..3e3f189 100644
--- a/xlators/mgmt/glusterd/src/glusterd-hooks.c
+++ b/xlators/mgmt/glusterd/src/glusterd-hooks.c
@@ -162,6 +162,34 @@ glusterd_hooks_add_hooks_version (runner_t* runner)
         runner_argprintf (runner, "--version=%d", GLUSTERD_HOOK_VER);
 }
 
+static void
+glusterd_hooks_add_custom_args (dict_t *dict, runner_t *runner)
+{
+        char      *hooks_args     = NULL;
+        int32_t    ret            = -1;
+        xlator_t  *this           = NULL;
+
+        this = THIS;
+        GF_VALIDATE_OR_GOTO ("glusterd", this, out);
+        GF_VALIDATE_OR_GOTO (this->name, dict, out);
+        GF_VALIDATE_OR_GOTO (this->name, runner, out);
+
+        ret = dict_get_str (dict, "hooks_args", &hooks_args);
+        if (ret)
+                gf_log (this->name, GF_LOG_DEBUG,
+                        "No Hooks Arguments.");
+        else
+                gf_log (this->name, GF_LOG_DEBUG,
+                        "Hooks Args = %s", hooks_args);
+
+        if (hooks_args)
+                runner_argprintf (runner, "%s", hooks_args);
+
+out:
+        return;
+}
+
+
 int
 glusterd_hooks_set_volume_args (dict_t *dict, runner_t *runner)
 {
@@ -196,6 +224,8 @@ glusterd_hooks_set_volume_args (dict_t *dict, runner_t *runner)
                 runner_argprintf (runner, "%s=%s", key, value);
         }
 
+        glusterd_hooks_add_custom_args (dict, runner);
+
         ret = 0;
 out:
         return ret;
@@ -263,15 +293,7 @@ glusterd_hooks_add_op_args (runner_t *runner, glusterd_op_t op,
                         break;
 
                 case GD_OP_GSYNC_CREATE:
-                        ret = dict_get_str (op_ctx, "hooks_args", &hooks_args);
-                        if (ret)
-                                gf_log ("", GF_LOG_DEBUG,
-                                        "No Hooks Arguments.");
-                        else
-                                gf_log ("", GF_LOG_DEBUG,
-                                        "Hooks Args = %s", hooks_args);
-                        if (hooks_args)
-                                runner_argprintf (runner, "%s", hooks_args);
+                        glusterd_hooks_add_custom_args (op_ctx, runner);
                         break;
 
                 case GD_OP_ADD_BRICK:
diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.c b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
index 326968e..9b9684e 100644
--- a/xlators/mgmt/glusterd/src/glusterd-op-sm.c
+++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
@@ -55,11 +55,36 @@
 #include <signal.h>
 #include <sys/wait.h>
 
+extern char ss_brick_path[PATH_MAX];
+extern char local_node_hostname[PATH_MAX];
+static int
+glusterd_set_shared_storage (dict_t *dict, char *key, char *value,
+                             char **op_errstr);
+
+/* Valid options for all volumes to be listed in the *
+ * valid_all_vol_opts table. To add newer options to *
+ * all volumes, we can just add more entries to this *
+ * table                                             *
+ */
+glusterd_all_vol_opts   valid_all_vol_opts[] = {
+        { GLUSTERD_QUORUM_RATIO_KEY },
+        { GLUSTERD_SHARED_STORAGE_KEY },
+        { NULL },
+};
+
 #define ALL_VOLUME_OPTION_CHECK(volname, key, ret, op_errstr, label)           \
         do {                                                                   \
-                gf_boolean_t    _all = !strcmp ("all", volname);               \
-                gf_boolean_t    _ratio = !strcmp (key,                         \
-                                                  GLUSTERD_QUORUM_RATIO_KEY);  \
+                gf_boolean_t    _all   = !strcmp ("all", volname);             \
+                gf_boolean_t    _ratio = _gf_false;                            \
+                int32_t         i      = 0;                                    \
+                                                                               \
+                for (i = 0; valid_all_vol_opts[i].option; i++) {               \
+                        if (!strcmp (key, valid_all_vol_opts[i].option)) {     \
+                                _ratio = _gf_true;                             \
+                                break;                                         \
+                        }                                                      \
+                }                                                              \
+                                                                               \
                 if (_all && !_ratio) {                                         \
                         ret = -1;                                              \
                         *op_errstr = gf_strdup ("Not a valid option for all "  \
@@ -682,6 +707,71 @@ out:
 }
 
 static int
+glusterd_validate_shared_storage (char *key, char *value, char *errstr)
+{
+        int32_t       ret      = -1;
+        int32_t       exists   = -1;
+        int32_t       count    = -1;
+        xlator_t     *this     = NULL;
+
+        this = THIS;
+        GF_VALIDATE_OR_GOTO ("glusterd", this, out);
+        GF_VALIDATE_OR_GOTO (this->name, key, out);
+        GF_VALIDATE_OR_GOTO (this->name, value, out);
+        GF_VALIDATE_OR_GOTO (this->name, errstr, out);
+
+        ret = 0;
+
+        if (strcmp (key, GLUSTERD_SHARED_STORAGE_KEY)) {
+                goto out;
+        }
+
+        if ((strcmp (value, "enable")) &&
+            (strcmp (value, "disable"))) {
+                snprintf (errstr, PATH_MAX,
+                          "Invalid option(%s). Valid options "
+                          "are 'enable' and 'disable'", value);
+                gf_log (this->name, GF_LOG_ERROR, "%s", errstr);
+                ret = -1;
+                goto out;
+        }
+
+        if (strcmp (value, "enable")) {
+                goto out;
+        }
+
+        exists = glusterd_check_volume_exists (GLUSTER_SHARED_STORAGE);
+        if (exists) {
+                snprintf (errstr, PATH_MAX,
+                          "Shared storage volume("GLUSTER_SHARED_STORAGE
+                          ") already exists.");
+                gf_log (this->name, GF_LOG_ERROR, "%s", errstr);
+                ret = -1;
+                goto out;
+        }
+
+        ret = glusterd_count_connected_peers (&count);
+        if (ret) {
+                snprintf (errstr, PATH_MAX,
+                          "Failed to calculate number of connected peers.");
+                gf_log (this->name, GF_LOG_ERROR, "%s", errstr);
+                goto out;
+        }
+
+        if (count <= 1) {
+                snprintf (errstr, PATH_MAX,
+                          "More than one node should "
+                          "be up/present in the cluster to enable this option");
+                gf_log (this->name, GF_LOG_ERROR, "%s", errstr);
+                ret = -1;
+                goto out;
+        }
+
+out:
+        return ret;
+}
+
+static int
 glusterd_op_stage_set_volume (dict_t *dict, char **op_errstr)
 {
         int                             ret                     = -1;
@@ -696,7 +786,7 @@ glusterd_op_stage_set_volume (dict_t *dict, char **op_errstr)
         int                             trash_path_len          = 0;
         int                             count                   = 0;
         int                             dict_count              = 0;
-        char                            errstr[2048]            = {0, };
+        char                            errstr[PATH_MAX]        = {0, };
         glusterd_volinfo_t              *volinfo                = NULL;
         glusterd_brickinfo_t            *brickinfo              = NULL;
         dict_t                          *val_dict               = NULL;
@@ -1000,6 +1090,14 @@ glusterd_op_stage_set_volume (dict_t *dict, char **op_errstr)
                         }
                 }
 
+                ret = glusterd_validate_shared_storage (key, value, errstr);
+                if (ret) {
+                        gf_log (this->name, GF_LOG_ERROR,
+                                "Failed to validate shared "
+                                "storage volume options");
+                        goto out;
+                }
+
                 if (!strcmp(key, "features.trash-dir") && trash_enabled) {
                         if (strchr (value, '/')) {
                                 snprintf (errstr, sizeof (errstr),
@@ -1918,7 +2016,8 @@ out:
 }
 
 static int
-glusterd_op_set_all_volume_options (xlator_t *this, dict_t *dict)
+glusterd_op_set_all_volume_options (xlator_t *this, dict_t *dict,
+                                    char **op_errstr)
 {
         char            *key            = NULL;
         char            *key_fixed      = NULL;
@@ -1942,6 +2041,7 @@ glusterd_op_set_all_volume_options (xlator_t *this, dict_t *dict)
                         "invalid key,value pair in 'volume set'");
                 goto out;
         }
+
         ret = glusterd_check_option_exists (key, &key_fixed);
         if (ret <= 0) {
                 gf_log (this->name, GF_LOG_ERROR, "Invalid key %s", key);
@@ -1952,6 +2052,13 @@ glusterd_op_set_all_volume_options (xlator_t *this, dict_t *dict)
         if (key_fixed)
                 key = key_fixed;
 
+        ret = glusterd_set_shared_storage (dict, key, value, op_errstr);
+        if (ret) {
+                gf_log (this->name, GF_LOG_ERROR,
+                        "Failed to set shared storage option");
+                goto out;
+        }
+
         /* If the key is cluster.op-version, set conf->op_version to the value
          * if needed and save it.
          */
@@ -2030,6 +2137,82 @@ out:
 }
 
 static int
+glusterd_set_shared_storage (dict_t *dict, char *key, char *value,
+                             char **op_errstr)
+{
+        int32_t       ret                  = -1;
+        int32_t       exists               = -1;
+        int32_t       count                = -1;
+        char          hooks_args[PATH_MAX] = {0, };
+        char          errstr[PATH_MAX]     = {0, };
+        xlator_t     *this                 = NULL;
+
+        this = THIS;
+        GF_VALIDATE_OR_GOTO ("glusterd", this, out);
+        GF_VALIDATE_OR_GOTO (this->name, dict, out);
+        GF_VALIDATE_OR_GOTO (this->name, key, out);
+        GF_VALIDATE_OR_GOTO (this->name, value, out);
+        GF_VALIDATE_OR_GOTO (this->name, op_errstr, out);
+
+        ret = 0;
+
+        if (strcmp (key, GLUSTERD_SHARED_STORAGE_KEY)) {
+                goto out;
+        }
+
+        /* Re-create the brick path so as to be *
+         * able to re-use it                    *
+         */
+        ret = recursive_rmdir (ss_brick_path);
+        if (ret) {
+                snprintf (errstr, PATH_MAX,
+                          "Failed to remove shared "
+                          "storage brick(%s). "
+                          "Reason: %s", ss_brick_path,
+                          strerror (errno));
+                gf_log (this->name, GF_LOG_ERROR, "%s", errstr);
+                ret = -1;
+                goto out;
+        }
+
+        ret = mkdir_p (ss_brick_path, 0777, _gf_true);
+        if (-1 == ret) {
+                snprintf (errstr, PATH_MAX,
+                          "Failed to create shared "
+                          "storage brick(%s). "
+                          "Reason: %s", ss_brick_path,
+                          strerror (errno));
+                gf_log (this->name, GF_LOG_ERROR, "%s", errstr);
+                goto out;
+        }
+
+        if (is_origin_glusterd (dict)) {
+                snprintf(hooks_args, sizeof(hooks_args),
+                         "is_originator=1,local_node_hostname=%s",
+                         local_node_hostname);
+        } else {
+                snprintf(hooks_args, sizeof(hooks_args),
+                         "is_originator=0,local_node_hostname=%s",
+                         local_node_hostname);
+        }
+
+        ret = dict_set_dynstr_with_alloc (dict, "hooks_args", hooks_args);
+        if (ret) {
+                gf_log (this->name, GF_LOG_ERROR, "Failed to set"
+                        " hooks_args in dict.");
+                goto out;
+        }
+
+out:
+        if (ret && strlen(errstr)) {
+                *op_errstr = gf_strdup (errstr);
+        }
+
+        return ret;
+}
+
+
+static int
 glusterd_op_set_volume (dict_t *dict, char **errstr)
 {
         int                                      ret = 0;
@@ -2083,7 +2266,8 @@ glusterd_op_set_volume (dict_t *dict, char **errstr)
         }
 
         if (strcasecmp (volname, "all") == 0) {
-                ret = glusterd_op_set_all_volume_options (this, dict);
+                ret = glusterd_op_set_all_volume_options (this, dict,
+                                                          &op_errstr);
                 goto out;
         }
 
diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.h b/xlators/mgmt/glusterd/src/glusterd-op-sm.h
index 6a1baa4..368bb04 100644
--- a/xlators/mgmt/glusterd/src/glusterd-op-sm.h
+++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.h
@@ -168,6 +168,10 @@ typedef enum cli_cmd_type_ {
         ALL_HEAL_XL,
  } cli_cmd_type;
 
+typedef struct glusterd_all_volume_options {
+        char          *option;
+} glusterd_all_vol_opts;
+
 int
 glusterd_op_sm_new_event (glusterd_op_sm_event_type_t event_type,
                           glusterd_op_sm_event_t **new_event);
diff --git a/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c b/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c
index 415d04f..dcd257c 100644
--- a/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c
+++ b/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c
@@ -1472,6 +1472,15 @@ glusterd_rpc_friend_add (call_frame_t *frame, xlator_t *this,
                 goto out;
         }
 
+        ret = dict_set_dynstr_with_alloc (peer_data,
+                                          "hostname_in_cluster",
+                                          peerinfo->hostname);
+        if (ret) {
+                gf_log (this->name, GF_LOG_ERROR,
+                        "Unable to add hostname of the peer");
+                goto out;
+        }
+
         if (priv->op_version >= GD_OP_VERSION_3_6_0) {
                 ret = glusterd_add_missed_snaps_to_export_dict (peer_data);
                 if (ret) {
diff --git a/xlators/mgmt/glusterd/src/glusterd-sm.c b/xlators/mgmt/glusterd/src/glusterd-sm.c
index f8228b0..0d8654a 100644
--- a/xlators/mgmt/glusterd/src/glusterd-sm.c
+++ b/xlators/mgmt/glusterd/src/glusterd-sm.c
@@ -38,6 +38,8 @@
 #include "glusterd-snapshot-utils.h"
 #include "glusterd-server-quorum.h"
 
+char local_node_hostname[PATH_MAX] = {0, };
+
 static struct cds_list_head gd_friend_sm_queue;
 
 static  char *glusterd_friend_sm_state_names[] = {
@@ -733,6 +735,7 @@ glusterd_ac_handle_friend_add_req (glusterd_friend_sm_event_t *event, void *ctx)
         int32_t                         op_ret = -1;
         int32_t                         op_errno = 0;
         xlator_t                       *this       = NULL;
+        char                           *hostname   = NULL;
 
         this = THIS;
         GF_ASSERT (this);
@@ -837,6 +840,15 @@ glusterd_ac_handle_friend_add_req (glusterd_friend_sm_event_t *event, void *ctx)
 
         new_event->ctx = new_ev_ctx;
 
+        ret = dict_get_str (ev_ctx->vols, "hostname_in_cluster",
+                            &hostname);
+        if (ret || !hostname) {
+                gf_log (this->name, GF_LOG_DEBUG,
+                        "Unable to fetch local hostname from peer");
+        } else
+                strncpy (local_node_hostname, hostname,
+                         sizeof(local_node_hostname));
+
         glusterd_friend_sm_inject_event (new_event);
         new_event = NULL;
 
diff --git a/xlators/mgmt/glusterd/src/glusterd-snapshot.c b/xlators/mgmt/glusterd/src/glusterd-snapshot.c
index fec7494..e5a7cac 100644
--- a/xlators/mgmt/glusterd/src/glusterd-snapshot.c
+++ b/xlators/mgmt/glusterd/src/glusterd-snapshot.c
@@ -66,7 +66,7 @@
 
 #include "lvm-defaults.h"
 
-char snap_mount_folder[PATH_MAX];
+char snap_mount_dir[PATH_MAX];
 struct snap_create_args_ {
         xlator_t             *this;
         dict_t               *dict;
@@ -4558,7 +4558,7 @@ glusterd_snap_brick_create (glusterd_volinfo_t *snap_volinfo,
         GF_ASSERT (brickinfo);
 
         snprintf (snap_brick_mount_path, sizeof (snap_brick_mount_path),
-                  "%s/%s/brick%d",  snap_mount_folder, snap_volinfo->volname,
+                  "%s/%s/brick%d",  snap_mount_dir, snap_volinfo->volname,
                   brick_count + 1);
 
         ret = mkdir_p (snap_brick_mount_path, 0777, _gf_true);
@@ -4748,7 +4748,7 @@ glusterd_add_brick_to_snap_volume (dict_t *dict, dict_t *rsp_dict,
          * <snap-uuid>/<original-brick#>/snap-brick-dir *
          */
         snprintf (snap_brick_path, sizeof(snap_brick_path),
-                  "%s/%s/brick%d%s", snap_mount_folder,
+                  "%s/%s/brick%d%s", snap_mount_dir,
                   snap_vol->volname, brick_count+1,
                   snap_brick_dir);
 
diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c
index 86b22f8..0721110 100644
--- a/xlators/mgmt/glusterd/src/glusterd-utils.c
+++ b/xlators/mgmt/glusterd/src/glusterd-utils.c
@@ -9763,3 +9763,34 @@ glusterd_disallow_op_for_tier (glusterd_volinfo_t *volinfo, glusterd_op_t op,
 out:
         return ret;
 }
+
+int32_t
+glusterd_count_connected_peers (int32_t *count)
+{
+        glusterd_peerinfo_t  *peerinfo  = NULL;
+        glusterd_conf_t      *conf      = NULL;
+        int32_t               ret       = -1;
+        xlator_t             *this      = NULL;
+
+        this = THIS;
+        GF_VALIDATE_OR_GOTO ("glusterd", this, out);
+        conf = this->private;
+        GF_VALIDATE_OR_GOTO (this->name, conf, out);
+        GF_VALIDATE_OR_GOTO (this->name, count, out);
+
+        *count = 1;
+
+        rcu_read_lock ();
+        cds_list_for_each_entry_rcu (peerinfo, &conf->peers, uuid_list) {
+                /* Find peer who is connected and is a friend */
+                if ((peerinfo->connected) &&
+                     (peerinfo->state.state == GD_FRIEND_STATE_BEFRIENDED)) {
+                        (*count)++;
+                }
+        }
+        rcu_read_unlock ();
+
+        ret = 0;
+out:
+        return ret;
+}
diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.h b/xlators/mgmt/glusterd/src/glusterd-utils.h
index 246d3ed..d617f60 100644
--- a/xlators/mgmt/glusterd/src/glusterd-utils.h
+++ b/xlators/mgmt/glusterd/src/glusterd-utils.h
@@ -663,5 +663,7 @@ glusterd_defrag_rpc_put (glusterd_defrag_info_t *defrag);
 int
 glusterd_disallow_op_for_tier (glusterd_volinfo_t *volinfo, glusterd_op_t op,
                                int cmd);
+int32_t
+glusterd_count_connected_peers (int32_t *count);
 
 #endif
diff --git a/xlators/mgmt/glusterd/src/glusterd-volume-set.c b/xlators/mgmt/glusterd/src/glusterd-volume-set.c
index 02d59ed..b8ca6be 100644
--- a/xlators/mgmt/glusterd/src/glusterd-volume-set.c
+++ b/xlators/mgmt/glusterd/src/glusterd-volume-set.c
@@ -1740,6 +1740,12 @@ struct volopt_map_entry glusterd_volopt_map[] = {
           .voltype     = "features/trash",
           .op_version  = GD_OP_VERSION_3_7_0,
         },
+        { .key         = GLUSTERD_SHARED_STORAGE_KEY,
+          .voltype     = "mgmt/glusterd",
+          .value       = "disable",
+          .type        = GLOBAL_DOC,
+          .op_version  = GD_OP_VERSION_3_7_1,
+        },
 
 #if USE_GFDB /* no GFDB means tiering is disabled */
         /* tier translator - global tunables */
diff --git a/xlators/mgmt/glusterd/src/glusterd.c b/xlators/mgmt/glusterd/src/glusterd.c
index 3735c62..54a5fbd 100644
--- a/xlators/mgmt/glusterd/src/glusterd.c
+++ b/xlators/mgmt/glusterd/src/glusterd.c
@@ -66,7 +66,8 @@ extern struct rpcsvc_program gd_svc_cli_trusted_progs;
 extern struct rpc_clnt_program gd_brick_prog;
 extern struct rpcsvc_program glusterd_mgmt_hndsk_prog;
 
-extern char snap_mount_folder[PATH_MAX];
+extern char snap_mount_dir[PATH_MAX];
+char ss_brick_path[PATH_MAX];
 
 rpcsvc_cbk_program_t glusterd_cbk_prog = {
         .progname  = "Gluster Callback",
@@ -1119,18 +1120,18 @@ glusterd_stop_uds_listener (xlator_t *this)
 }
 
 static int
-glusterd_init_snap_folder (xlator_t *this)
+glusterd_find_correct_var_run_dir (xlator_t *this, char *var_run_dir)
 {
         int             ret = -1;
         struct stat     buf = {0,};
 
-        GF_ASSERT (this);
+        GF_VALIDATE_OR_GOTO ("glusterd", this, out);
+        GF_VALIDATE_OR_GOTO (this->name, var_run_dir, out);
 
-        /* Snapshot volumes are mounted under /var/run/gluster/snaps folder.
-         * But /var/run is normally a symbolic link to /run folder, which
+        /* /var/run is normally a symbolic link to /run dir, which
          * creates problems as the entry point in the mtab for the mount point
          * and glusterd maintained entry point will be different. Therefore
-         * identify the correct run folder and use it for snap volume mounting.
+         * identify the correct run dir and use it
          */
         ret = lstat (GLUSTERD_VAR_RUN_DIR, &buf);
         if (ret != 0) {
@@ -1140,20 +1141,38 @@ glusterd_init_snap_folder (xlator_t *this)
                 goto out;
         }
 
-        /* If /var/run is symlink then use /run folder */
+        /* If /var/run is symlink then use /run dir */
         if (S_ISLNK (buf.st_mode)) {
-                strcpy (snap_mount_folder, GLUSTERD_RUN_DIR);
+                strcpy (var_run_dir, GLUSTERD_RUN_DIR);
         } else {
-                strcpy (snap_mount_folder, GLUSTERD_VAR_RUN_DIR);
+                strcpy (var_run_dir, GLUSTERD_VAR_RUN_DIR);
         }
 
-        strcat (snap_mount_folder, GLUSTERD_DEFAULT_SNAPS_BRICK_DIR);
+        ret = 0;
+out:
+        return ret;
+}
+
+static int
+glusterd_init_var_run_dirs (xlator_t *this, char *var_run_dir,
+                            char *dir_to_be_created)
+{
+        int             ret                = -1;
+        struct stat     buf                = {0,};
+        char            abs_path[PATH_MAX] = {0, };
+
+        GF_VALIDATE_OR_GOTO ("glusterd", this, out);
+        GF_VALIDATE_OR_GOTO (this->name, var_run_dir, out);
+        GF_VALIDATE_OR_GOTO (this->name, dir_to_be_created, out);
+
+        snprintf (abs_path, sizeof(abs_path), "%s%s",
+                  var_run_dir, dir_to_be_created);
 
-        ret = stat (snap_mount_folder, &buf);
+        ret = stat (abs_path, &buf);
         if ((ret != 0) && (ENOENT != errno)) {
                 gf_log (this->name, GF_LOG_ERROR,
                         "stat fails on %s, exiting. (errno = %d)",
-                        snap_mount_folder, errno);
+                        abs_path, errno);
                 ret = -1;
                 goto out;
         }
@@ -1161,19 +1180,19 @@ glusterd_init_snap_folder (xlator_t *this)
         if ((!ret) && (!S_ISDIR(buf.st_mode))) {
                 gf_log (this->name, GF_LOG_CRITICAL,
                         "Provided snap path %s is not a directory,"
-                        "exiting", snap_mount_folder);
+                        "exiting", abs_path);
                 ret = -1;
                 goto out;
         }
 
         if ((-1 == ret) && (ENOENT == errno)) {
-                /* Create missing folders */
-                ret = mkdir_p (snap_mount_folder, 0777, _gf_true);
+                /* Create missing dirs */
+                ret = mkdir_p (abs_path, 0777, _gf_true);
 
                 if (-1 == ret) {
                         gf_log (this->name, GF_LOG_CRITICAL,
                                 "Unable to create directory %s"
-                                " ,errno = %d", snap_mount_folder, errno);
+                                " ,errno = %d", abs_path, errno);
                         goto out;
                 }
         }
@@ -1250,21 +1269,22 @@ out:
 int
 init (xlator_t *this)
 {
-        int32_t            ret               = -1;
-        rpcsvc_t          *rpc               = NULL;
-        rpcsvc_t          *uds_rpc           = NULL;
-        glusterd_conf_t   *conf              = NULL;
-        data_t            *dir_data          = NULL;
-        struct stat        buf               = {0,};
-        char               storedir [PATH_MAX] = {0,};
-        char               workdir [PATH_MAX] = {0,};
-        char               cmd_log_filename [PATH_MAX] = {0,};
-        int                first_time        = 0;
-        char              *mountbroker_root  = NULL;
-        int                i                 = 0;
-        int                total_transport   = 0;
-        char              *valgrind_str      = NULL;
-        char              *transport_type    = NULL;
+        int32_t            ret                        = -1;
+        rpcsvc_t          *rpc                        = NULL;
+        rpcsvc_t          *uds_rpc                    = NULL;
+        glusterd_conf_t   *conf                       = NULL;
+        data_t            *dir_data                   = NULL;
+        struct stat        buf                        = {0,};
+        char               storedir[PATH_MAX]         = {0,};
+        char               workdir[PATH_MAX]          = {0,};
+        char               cmd_log_filename[PATH_MAX] = {0,};
+        int                first_time                 = 0;
+        char              *mountbroker_root           = NULL;
+        int                i                          = 0;
+        int                total_transport            = 0;
+        char              *valgrind_str               = NULL;
+        char              *transport_type             = NULL;
+        char               var_run_dir[PATH_MAX]      = {0,};
 
 #ifndef GF_DARWIN_HOST_OS
         {
@@ -1326,14 +1346,35 @@ init (xlator_t *this)
         gf_log (this->name, GF_LOG_INFO, "Using %s as working directory",
                 workdir);
 
-        ret = glusterd_init_snap_folder (this);
+        ret = glusterd_find_correct_var_run_dir (this, var_run_dir);
+        if (ret) {
+                gf_log (this->name, GF_LOG_CRITICAL, "Unable to find "
+                        "the correct var run dir");
+                exit (1);
+        }
+
+        ret = glusterd_init_var_run_dirs (this, var_run_dir,
+                                      GLUSTERD_DEFAULT_SNAPS_BRICK_DIR);
+        if (ret) {
+                gf_log (this->name, GF_LOG_CRITICAL, "Unable to create "
+                        "snap backend dir");
+                exit (1);
+        }
 
+        snprintf (snap_mount_dir, sizeof(snap_mount_dir), "%s%s",
+                  var_run_dir, GLUSTERD_DEFAULT_SNAPS_BRICK_DIR);
+
+        ret = glusterd_init_var_run_dirs (this, var_run_dir,
+                                      GLUSTER_SHARED_STORAGE_BRICK_DIR);
         if (ret) {
                 gf_log (this->name, GF_LOG_CRITICAL, "Unable to create "
-                        "snap backend folder");
+                        "shared storage brick");
                 exit (1);
         }
 
+        snprintf (ss_brick_path, sizeof(ss_brick_path), "%s%s",
+                  var_run_dir, GLUSTER_SHARED_STORAGE_BRICK_DIR);
+
         snprintf (cmd_log_filename, PATH_MAX, "%s/cmd_history.log",
                   DEFAULT_LOG_FILE_DIRECTORY);
         ret = gf_cmd_log_init (cmd_log_filename);
diff --git a/xlators/mgmt/glusterd/src/glusterd.h b/xlators/mgmt/glusterd/src/glusterd.h
index ab62952..5b5edfd 100644
--- a/xlators/mgmt/glusterd/src/glusterd.h
+++ b/xlators/mgmt/glusterd/src/glusterd.h
@@ -52,6 +52,8 @@
 #define GEO_CONF_MAX_OPT_VALS           6
 #define GLUSTERD_CREATE_HOOK_SCRIPT     "/hooks/1/gsync-create/post/" \
                                         "S56glusterd-geo-rep-create-post.sh"
+#define GLUSTER_SHARED_STORAGE          "gluster_shared_storage"
+#define GLUSTERD_SHARED_STORAGE_KEY     "cluster.enable-shared-storage"
 
 #define GANESHA_HA_CONF  CONFDIR "/ganesha-ha.conf"
 #define GLUSTERD_SNAPS_MAX_HARD_LIMIT 256
@@ -512,6 +514,7 @@ typedef enum {
 #define GLUSTERD_VOL_SNAP_DIR_PREFIX "snaps"
 
 #define GLUSTERD_DEFAULT_SNAPS_BRICK_DIR        "/gluster/snaps"
+#define GLUSTER_SHARED_STORAGE_BRICK_DIR        "/gluster/ss_brick"
 #define GLUSTERD_VAR_RUN_DIR                    "/var/run"
 #define GLUSTERD_RUN_DIR                        "/run"
 
-- 
1.9.3