From c45cba4e8959cc3224c293423fdc1f33d3e657c8 Mon Sep 17 00:00:00 2001
From: Jiffin Tony Thottan <jthottan@redhat.com>
Date: Mon, 16 Oct 2017 14:24:29 +0530
Subject: [PATCH 50/74] Revert "glusterd: (storhaug) remove ganesha"
This reverts commit 843e1b04b554ab887ec656ae7b468bb93ee4e2f7.
Change-Id: I06b5450344c33f26da3d94b6f67051d41dfbba17
Signed-off-by: Jiffin Tony Thottan <jthottan@redhat.com>
---
cli/src/cli-cmd-global.c | 54 ++
cli/src/cli-cmd-parser.c | 106 +++
cli/src/cli-cmd.c | 3 +-
cli/src/cli-rpc-ops.c | 79 ++
cli/src/cli.h | 3 +
xlators/mgmt/glusterd/src/Makefile.am | 4 +-
xlators/mgmt/glusterd/src/glusterd-errno.h | 2 +-
xlators/mgmt/glusterd/src/glusterd-ganesha.c | 898 +++++++++++++++++++++
xlators/mgmt/glusterd/src/glusterd-handler.c | 77 ++
xlators/mgmt/glusterd/src/glusterd-messages.h | 8 +
xlators/mgmt/glusterd/src/glusterd-op-sm.c | 47 ++
.../mgmt/glusterd/src/glusterd-snapshot-utils.c | 196 +++++
.../mgmt/glusterd/src/glusterd-snapshot-utils.h | 7 +
xlators/mgmt/glusterd/src/glusterd-snapshot.c | 96 +++
xlators/mgmt/glusterd/src/glusterd-store.h | 1 +
xlators/mgmt/glusterd/src/glusterd-volume-ops.c | 34 +
xlators/mgmt/glusterd/src/glusterd-volume-set.c | 6 +
xlators/mgmt/glusterd/src/glusterd.h | 16 +-
18 files changed, 1633 insertions(+), 4 deletions(-)
create mode 100644 xlators/mgmt/glusterd/src/glusterd-ganesha.c
diff --git a/cli/src/cli-cmd-global.c b/cli/src/cli-cmd-global.c
index 9873192..881506b 100644
--- a/cli/src/cli-cmd-global.c
+++ b/cli/src/cli-cmd-global.c
@@ -32,6 +32,8 @@ extern rpc_clnt_prog_t *cli_rpc_prog;
int
cli_cmd_global_help_cbk (struct cli_state *state, struct cli_cmd_word *in_word,
const char **words, int wordcount);
+int cli_cmd_ganesha_cbk (struct cli_state *state, struct cli_cmd_word *word,
+ const char **words, int wordcount);
int
cli_cmd_get_state_cbk (struct cli_state *state, struct cli_cmd_word *word,
const char **words, int wordcount);
@@ -46,6 +48,10 @@ struct cli_cmd global_cmds[] = {
cli_cmd_get_state_cbk,
"Get local state representation of mentioned daemon",
},
+ { "nfs-ganesha {enable| disable} ",
+ cli_cmd_ganesha_cbk,
+ "Enable/disable NFS-Ganesha support",
+ },
{NULL, NULL, NULL}
};
@@ -86,6 +92,54 @@ out:
}
+int cli_cmd_ganesha_cbk (struct cli_state *state, struct cli_cmd_word *word,
+ const char **words, int wordcount)
+
+{
+ int sent = 0;
+ int parse_error = 0;
+ int ret = -1;
+ rpc_clnt_procedure_t *proc = NULL;
+ call_frame_t *frame = NULL;
+ dict_t *options = NULL;
+ cli_local_t *local = NULL;
+ char *op_errstr = NULL;
+
+ proc = &cli_rpc_prog->proctable[GLUSTER_CLI_GANESHA];
+
+ frame = create_frame (THIS, THIS->ctx->pool);
+ if (!frame)
+ goto out;
+
+ ret = cli_cmd_ganesha_parse (state, words, wordcount,
+ &options, &op_errstr);
+ if (ret) {
+ if (op_errstr) {
+ cli_err ("%s", op_errstr);
+ GF_FREE (op_errstr);
+ } else
+ cli_usage_out (word->pattern);
+ parse_error = 1;
+ goto out;
+ }
+
+ CLI_LOCAL_INIT (local, words, frame, options);
+
+ if (proc->fn) {
+ ret = proc->fn (frame, THIS, options);
+ }
+
+out:
+ if (ret) {
+ cli_cmd_sent_status_get (&sent);
+ if ((sent == 0) && (parse_error == 0))
+ cli_out ("Setting global option failed");
+ }
+
+ CLI_STACK_DESTROY (frame);
+ return ret;
+}
+
int
cli_cmd_get_state_cbk (struct cli_state *state, struct cli_cmd_word *word,
const char **words, int wordcount)
diff --git a/cli/src/cli-cmd-parser.c b/cli/src/cli-cmd-parser.c
index 216e050..a4c601b 100644
--- a/cli/src/cli-cmd-parser.c
+++ b/cli/src/cli-cmd-parser.c
@@ -840,6 +840,112 @@ out:
return ret;
}
+/* Parsing global option for NFS-Ganesha config
+ * gluster nfs-ganesha enable/disable */
+
+int32_t
+cli_cmd_ganesha_parse (struct cli_state *state,
+ const char **words, int wordcount,
+ dict_t **options, char **op_errstr)
+{
+ dict_t *dict = NULL;
+ int ret = -1;
+ char *key = NULL;
+ char *value = NULL;
+ char *w = NULL;
+ char *opwords[] = { "enable", "disable", NULL };
+ const char *question = NULL;
+ gf_answer_t answer = GF_ANSWER_NO;
+
+
+ GF_ASSERT (words);
+ GF_ASSERT (options);
+
+ dict = dict_new ();
+
+ if (!dict)
+ goto out;
+
+ if (wordcount != 2)
+ goto out;
+
+ key = (char *) words[0];
+ value = (char *) words[1];
+
+ if (!key || !value) {
+ cli_out ("Usage : nfs-ganesha <enable/disable>");
+ ret = -1;
+ goto out;
+ }
+
+ ret = gf_strip_whitespace (value, strlen (value));
+ if (ret == -1)
+ goto out;
+
+ if (strcmp (key, "nfs-ganesha")) {
+ gf_asprintf (op_errstr, "Global option: error: ' %s '"
+ "is not a valid global option.", key);
+ ret = -1;
+ goto out;
+ }
+
+ w = str_getunamb (value, opwords);
+ if (!w) {
+ cli_out ("Invalid global option \n"
+ "Usage : nfs-ganesha <enable/disable>");
+ ret = -1;
+ goto out;
+ }
+
+ question = "Enabling NFS-Ganesha requires Gluster-NFS to be"
+ " disabled across the trusted pool. Do you "
+ "still want to continue?\n";
+
+ if (strcmp (value, "enable") == 0) {
+ answer = cli_cmd_get_confirmation (state, question);
+ if (GF_ANSWER_NO == answer) {
+ gf_log ("cli", GF_LOG_ERROR, "Global operation "
+ "cancelled, exiting");
+ ret = -1;
+ goto out;
+ }
+ }
+ cli_out ("This will take a few minutes to complete. Please wait ..");
+
+ ret = dict_set_str (dict, "key", key);
+ if (ret) {
+ gf_log (THIS->name, GF_LOG_ERROR, "dict set on key failed");
+ goto out;
+ }
+
+ ret = dict_set_str (dict, "value", value);
+ if (ret) {
+ gf_log (THIS->name, GF_LOG_ERROR, "dict set on value failed");
+ goto out;
+ }
+
+ ret = dict_set_str (dict, "globalname", "All");
+ if (ret) {
+ gf_log (THIS->name, GF_LOG_ERROR, "dict set on global"
+ " key failed.");
+ goto out;
+ }
+
+ ret = dict_set_int32 (dict, "hold_global_locks", _gf_true);
+ if (ret) {
+ gf_log (THIS->name, GF_LOG_ERROR, "dict set on global key "
+ "failed.");
+ goto out;
+ }
+
+ *options = dict;
+out:
+ if (ret)
+ dict_unref (dict);
+
+ return ret;
+}
+
int32_t
cli_cmd_get_state_parse (struct cli_state *state,
const char **words, int wordcount,
diff --git a/cli/src/cli-cmd.c b/cli/src/cli-cmd.c
index 236009b..8a75041 100644
--- a/cli/src/cli-cmd.c
+++ b/cli/src/cli-cmd.c
@@ -369,7 +369,8 @@ cli_cmd_submit (struct rpc_clnt* rpc, void *req, call_frame_t *frame,
unsigned timeout = 0;
if ((GLUSTER_CLI_PROFILE_VOLUME == procnum) ||
- (GLUSTER_CLI_HEAL_VOLUME == procnum))
+ (GLUSTER_CLI_HEAL_VOLUME == procnum) ||
+ (GLUSTER_CLI_GANESHA == procnum))
timeout = cli_ten_minutes_timeout;
else
timeout = cli_default_conn_timeout;
diff --git a/cli/src/cli-rpc-ops.c b/cli/src/cli-rpc-ops.c
index eb1ca77..67e29a0 100644
--- a/cli/src/cli-rpc-ops.c
+++ b/cli/src/cli-rpc-ops.c
@@ -2232,6 +2232,60 @@ out:
return ret;
}
+int
+gf_cli_ganesha_cbk (struct rpc_req *req, struct iovec *iov,
+ int count, void *myframe)
+{
+ gf_cli_rsp rsp = {0,};
+ int ret = -1;
+ dict_t *dict = NULL;
+
+ GF_ASSERT (myframe);
+
+ if (-1 == req->rpc_status) {
+ goto out;
+ }
+
+ ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp);
+ if (ret < 0) {
+ gf_log (((call_frame_t *) myframe)->this->name, GF_LOG_ERROR,
+ "Failed to decode xdr response");
+ goto out;
+ }
+
+ gf_log ("cli", GF_LOG_DEBUG, "Received resp to ganesha");
+
+ dict = dict_new ();
+
+ if (!dict) {
+ ret = -1;
+ goto out;
+ }
+
+ ret = dict_unserialize (rsp.dict.dict_val, rsp.dict.dict_len, &dict);
+ if (ret)
+ goto out;
+
+ if (rsp.op_ret) {
+ if (strcmp (rsp.op_errstr, ""))
+ cli_err ("nfs-ganesha: failed: %s", rsp.op_errstr);
+ else
+ cli_err ("nfs-ganesha: failed");
+ }
+
+ else {
+ cli_out("nfs-ganesha : success ");
+ }
+
+ ret = rsp.op_ret;
+
+out:
+ if (dict)
+ dict_unref (dict);
+ cli_cmd_broadcast_response (ret);
+ return ret;
+}
+
char *
is_server_debug_xlator (void *myframe)
{
@@ -4840,6 +4894,30 @@ out:
}
int32_t
+gf_cli_ganesha (call_frame_t *frame, xlator_t *this, void *data)
+{
+ gf_cli_req req = { {0,} } ;
+ int ret = 0;
+ dict_t *dict = NULL;
+
+ if (!frame || !this || !data) {
+ ret = -1;
+ goto out;
+ }
+
+ dict = data;
+
+ ret = cli_to_glusterd (&req, frame, gf_cli_ganesha_cbk,
+ (xdrproc_t) xdr_gf_cli_req, dict,
+ GLUSTER_CLI_GANESHA, this, cli_rpc_prog,
+ NULL);
+out:
+ gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret);
+
+ return ret;
+}
+
+int32_t
gf_cli_set_volume (call_frame_t *frame, xlator_t *this,
void *data)
{
@@ -12008,6 +12086,7 @@ struct rpc_clnt_procedure gluster_cli_actors[GLUSTER_CLI_MAXVALUE] = {
[GLUSTER_CLI_SYS_EXEC] = {"SYS_EXEC", gf_cli_sys_exec},
[GLUSTER_CLI_SNAP] = {"SNAP", gf_cli_snapshot},
[GLUSTER_CLI_BARRIER_VOLUME] = {"BARRIER VOLUME", gf_cli_barrier_volume},
+ [GLUSTER_CLI_GANESHA] = {"GANESHA", gf_cli_ganesha},
[GLUSTER_CLI_GET_VOL_OPT] = {"GET_VOL_OPT", gf_cli_get_vol_opt},
[GLUSTER_CLI_BITROT] = {"BITROT", gf_cli_bitrot},
[GLUSTER_CLI_ATTACH_TIER] = {"ATTACH_TIER", gf_cli_attach_tier},
diff --git a/cli/src/cli.h b/cli/src/cli.h
index 68dcb8c..c9bf93d 100644
--- a/cli/src/cli.h
+++ b/cli/src/cli.h
@@ -255,6 +255,9 @@ cli_cmd_bitrot_parse (const char **words, int wordcount, dict_t **opt);
int32_t
cli_cmd_volume_set_parse (struct cli_state *state, const char **words,
int wordcount, dict_t **options, char **op_errstr);
+int32_t
+cli_cmd_ganesha_parse (struct cli_state *state, const char **words,
+ int wordcount, dict_t **options, char **op_errstr);
int32_t
cli_cmd_get_state_parse (struct cli_state *state, const char **words,
diff --git a/xlators/mgmt/glusterd/src/Makefile.am b/xlators/mgmt/glusterd/src/Makefile.am
index 4858dee..23ebf37 100644
--- a/xlators/mgmt/glusterd/src/Makefile.am
+++ b/xlators/mgmt/glusterd/src/Makefile.am
@@ -5,7 +5,7 @@ glusterd_la_LDFLAGS = -module $(GF_XLATOR_DEFAULT_LDFLAGS)
glusterd_la_SOURCES = glusterd.c glusterd-handler.c glusterd-sm.c \
glusterd-op-sm.c glusterd-utils.c glusterd-rpc-ops.c \
glusterd-store.c glusterd-handshake.c glusterd-pmap.c \
- glusterd-volgen.c glusterd-rebalance.c \
+ glusterd-volgen.c glusterd-rebalance.c glusterd-ganesha.c \
glusterd-quota.c glusterd-bitrot.c glusterd-geo-rep.c \
glusterd-replace-brick.c glusterd-log-ops.c glusterd-tier.c \
glusterd-volume-ops.c glusterd-brick-ops.c glusterd-mountbroker.c \
@@ -48,6 +48,8 @@ AM_CPPFLAGS = $(GF_CPPFLAGS) -I$(top_srcdir)/libglusterfs/src \
-I$(CONTRIBDIR)/mount -I$(CONTRIBDIR)/userspace-rcu \
-DSBIN_DIR=\"$(sbindir)\" -DDATADIR=\"$(localstatedir)\" \
-DGSYNCD_PREFIX=\"$(GLUSTERFS_LIBEXECDIR)\" \
+ -DCONFDIR=\"$(localstatedir)/run/gluster/shared_storage/nfs-ganesha\" \
+ -DGANESHA_PREFIX=\"$(libexecdir)/ganesha\" \
-DSYNCDAEMON_COMPILE=$(SYNCDAEMON_COMPILE) $(XML_CPPFLAGS)
diff --git a/xlators/mgmt/glusterd/src/glusterd-errno.h b/xlators/mgmt/glusterd/src/glusterd-errno.h
index bfb56b5..3301e44 100644
--- a/xlators/mgmt/glusterd/src/glusterd-errno.h
+++ b/xlators/mgmt/glusterd/src/glusterd-errno.h
@@ -27,7 +27,7 @@ enum glusterd_op_errno {
EG_ISSNAP = 30813, /* Volume is a snap volume */
EG_GEOREPRUN = 30814, /* Geo-Replication is running */
EG_NOTTHINP = 30815, /* Bricks are not thinly provisioned */
- EG_NOGANESHA = 30816, /* obsolete ganesha is not enabled */
+ EG_NOGANESHA = 30816, /* Global nfs-ganesha is not enabled */
};
#endif
diff --git a/xlators/mgmt/glusterd/src/glusterd-ganesha.c b/xlators/mgmt/glusterd/src/glusterd-ganesha.c
new file mode 100644
index 0000000..4346bad
--- /dev/null
+++ b/xlators/mgmt/glusterd/src/glusterd-ganesha.c
@@ -0,0 +1,898 @@
+/*
+ Copyright (c) 2015 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+
+
+#include "common-utils.h"
+#include "glusterd.h"
+#include "glusterd-op-sm.h"
+#include "glusterd-store.h"
+#include "glusterd-utils.h"
+#include "glusterd-nfs-svc.h"
+#include "glusterd-volgen.h"
+#include "glusterd-messages.h"
+#include "syscall.h"
+
+#include <ctype.h>
+
+int start_ganesha (char **op_errstr);
+
+
+typedef struct service_command {
+ char *binary;
+ char *service;
+ int (*action) (struct service_command *, char *);
+} service_command;
+
+/* parsing_ganesha_ha_conf will allocate the returned string
+ * to be freed (GF_FREE) by the caller
+ * return NULL if error or not found */
+static char*
+parsing_ganesha_ha_conf(const char *key) {
+#define MAX_LINE 1024
+ char scratch[MAX_LINE * 2] = {0,};
+ char *value = NULL, *pointer = NULL, *end_pointer = NULL;
+ FILE *fp;
+
+ fp = fopen (GANESHA_HA_CONF, "r");
+ if (fp == NULL) {
+ gf_msg (THIS->name, GF_LOG_ERROR, errno,
+ GD_MSG_FILE_OP_FAILED, "couldn't open the file %s",
+ GANESHA_HA_CONF);
+ goto end_ret;
+ }
+ while ((pointer = fgets (scratch, MAX_LINE, fp)) != NULL) {
+ /* Read config file until we get matching "^[[:space:]]*key" */
+ if (*pointer == '#') {
+ continue;
+ }
+ while (isblank(*pointer)) {
+ pointer++;
+ }
+ if (strncmp (pointer, key, strlen (key))) {
+ continue;
+ }
+ pointer += strlen (key);
+ /* key found : if we fail to parse, we'll return an error
+ * rather than trying next one
+ * - supposition : conf file is bash compatible : no space
+ * around the '=' */
+ if (*pointer != '=') {
+ gf_msg (THIS->name, GF_LOG_ERROR, errno,
+ GD_MSG_GET_CONFIG_INFO_FAILED,
+ "Parsing %s failed at key %s",
+ GANESHA_HA_CONF, key);
+ goto end_close;
+ }
+ pointer++; /* jump the '=' */
+
+ if (*pointer == '"' || *pointer == '\'') {
+ /* dont get the quote */
+ pointer++;
+ }
+ end_pointer = pointer;
+ /* stop at the next closing quote or blank/newline */
+ do {
+ end_pointer++;
+ } while (!(*end_pointer == '\'' || *end_pointer == '"' ||
+ isspace(*end_pointer) || *end_pointer == '\0'));
+ *end_pointer = '\0';
+
+ /* got it. copy it and return */
+ value = gf_strdup (pointer);
+ break;
+ }
+
+end_close:
+ fclose(fp);
+end_ret:
+ return value;
+}
+
+static int
+sc_systemctl_action (struct service_command *sc, char *command)
+{
+ runner_t runner = {0,};
+
+ runinit (&runner);
+ runner_add_args (&runner, sc->binary, command, sc->service, NULL);
+ return runner_run (&runner);
+}
+
+static int
+sc_service_action (struct service_command *sc, char *command)
+{
+ runner_t runner = {0,};
+
+ runinit (&runner);
+ runner_add_args (&runner, sc->binary, sc->service, command, NULL);
+ return runner_run (&runner);
+}
+
+static int
+manage_service (char *action)
+{
+ struct stat stbuf = {0,};
+ int i = 0;
+ int ret = 0;
+ struct service_command sc_list[] = {
+ { .binary = "/usr/bin/systemctl",
+ .service = "nfs-ganesha",
+ .action = sc_systemctl_action
+ },
+ { .binary = "/sbin/invoke-rc.d",
+ .service = "nfs-ganesha",
+ .action = sc_service_action
+ },
+ { .binary = "/sbin/service",
+ .service = "nfs-ganesha",
+ .action = sc_service_action
+ },
+ { .binary = NULL
+ }
+ };
+
+ while (sc_list[i].binary != NULL) {
+ ret = sys_stat (sc_list[i].binary, &stbuf);
+ if (ret == 0) {
+ gf_msg_debug (THIS->name, 0,
+ "%s found.", sc_list[i].binary);
+ if (strcmp (sc_list[i].binary, "/usr/bin/systemctl") == 0)
+ ret = sc_systemctl_action (&sc_list[i], action);
+ else
+ ret = sc_service_action (&sc_list[i], action);
+
+ return ret;
+ }
+ i++;
+ }
+ gf_msg (THIS->name, GF_LOG_ERROR, 0,
+ GD_MSG_UNRECOGNIZED_SVC_MNGR,
+ "Could not %s NFS-Ganesha.Service manager for distro"
+ " not recognized.", action);
+ return ret;
+}
+
+/*
+ * Check if the cluster is a ganesha cluster or not *
+ */
+gf_boolean_t
+glusterd_is_ganesha_cluster () {
+ int ret = -1;
+ glusterd_conf_t *priv = NULL;
+ xlator_t *this = NULL;
+ gf_boolean_t ret_bool = _gf_false;
+
+ this = THIS;
+ GF_VALIDATE_OR_GOTO ("ganesha", this, out);
+ priv = this->private;
+ GF_VALIDATE_OR_GOTO (this->name, priv, out);
+
+ ret = dict_get_str_boolean (priv->opts,
+ GLUSTERD_STORE_KEY_GANESHA_GLOBAL,
+ _gf_false);
+ if (ret == _gf_true) {
+ ret_bool = _gf_true;
+ gf_msg_debug (this->name, 0,
+ "nfs-ganesha is enabled for the cluster");
+ } else
+ gf_msg_debug (this->name, 0,
+ "nfs-ganesha is disabled for the cluster");
+
+out:
+ return ret_bool;
+
+}
+
+/* Check if ganesha.enable is set to 'on', that checks if
+ * a particular volume is exported via NFS-Ganesha */
+gf_boolean_t
+glusterd_check_ganesha_export (glusterd_volinfo_t *volinfo) {
+
+ char *value = NULL;
+ gf_boolean_t is_exported = _gf_false;
+ int ret = 0;
+
+ ret = glusterd_volinfo_get (volinfo, "ganesha.enable", &value);
+ if ((ret == 0) && value) {
+ if (strcmp (value, "on") == 0) {
+ gf_msg_debug (THIS->name, 0, "ganesha.enable set"
+ " to %s", value);
+ is_exported = _gf_true;
+ }
+ }
+ return is_exported;
+}
+
+/* *
+ * The below function is called as part of commit phase for volume set option
+ * "ganesha.enable". If the value is "on", it creates export configuration file
+ * and then export the volume via dbus command. Incase of "off", the volume
+ * will be already unexported during stage phase, so it will remove the conf
+ * file from shared storage
+ */
+int
+glusterd_check_ganesha_cmd (char *key, char *value, char **errstr, dict_t *dict)
+{
+ int ret = 0;
+ char *volname = NULL;
+
+ GF_ASSERT (key);
+ GF_ASSERT (value);
+ GF_ASSERT (dict);
+
+ if ((strcmp (key, "ganesha.enable") == 0)) {
+ if ((strcmp (value, "on")) && (strcmp (value, "off"))) {
+ gf_asprintf (errstr, "Invalid value"
+ " for volume set command. Use on/off only.");
+ ret = -1;
+ goto out;
+ }
+ if (strcmp (value, "on") == 0) {
+ ret = glusterd_handle_ganesha_op (dict, errstr, key,
+ value);
+
+ } else if (is_origin_glusterd (dict)) {
+ ret = dict_get_str (dict, "volname", &volname);
+ if (ret) {
+ gf_msg ("glusterd-ganesha", GF_LOG_ERROR, errno,
+ GD_MSG_DICT_GET_FAILED,
+ "Unable to get volume name");
+ goto out;
+ }
+ ret = manage_export_config (volname, "off", errstr);
+ }
+ }
+out:
+ if (ret) {
+ gf_msg ("glusterd-ganesha", GF_LOG_ERROR, 0,
+ GD_MSG_NFS_GNS_OP_HANDLE_FAIL,
+ "Handling NFS-Ganesha"
+ " op failed.");
+ }
+ return ret;
+}
+
+int
+glusterd_op_stage_set_ganesha (dict_t *dict, char **op_errstr)
+{
+ int ret = -1;
+ int value = -1;
+ gf_boolean_t option = _gf_false;
+ char *str = NULL;
+ glusterd_conf_t *priv = NULL;
+ xlator_t *this = NULL;
+
+ GF_ASSERT (dict);
+ this = THIS;
+ GF_ASSERT (this);
+ priv = this->private;
+ GF_ASSERT (priv);
+
+ value = dict_get_str_boolean (dict, "value", _gf_false);
+ if (value == -1) {
+ gf_msg (this->name, GF_LOG_ERROR, errno,
+ GD_MSG_DICT_GET_FAILED,
+ "value not present.");
+ goto out;
+ }
+ /* This dict_get will fail if the user had never set the key before */
+ /*Ignoring the ret value and proceeding */
+ ret = dict_get_str (priv->opts, GLUSTERD_STORE_KEY_GANESHA_GLOBAL, &str);
+ if (ret == -1) {
+ gf_msg (this->name, GF_LOG_WARNING, errno,
+ GD_MSG_DICT_GET_FAILED, "Global dict not present.");
+ ret = 0;
+ goto out;
+ }
+ /* Validity of the value is already checked */
+ ret = gf_string2boolean (str, &option);
+ /* Check if the feature is already enabled, fail in that case */
+ if (value == option) {
+ gf_asprintf (op_errstr, "nfs-ganesha is already %sd.", str);
+ ret = -1;
+ goto out;
+ }
+
+ if (value) {
+ ret = start_ganesha (op_errstr);
+ if (ret) {
+ gf_msg (THIS->name, GF_LOG_ERROR, 0,
+ GD_MSG_NFS_GNS_START_FAIL,
+ "Could not start NFS-Ganesha");
+
+ }
+ } else {
+ ret = stop_ganesha (op_errstr);
+ if (ret)
+ gf_msg_debug (THIS->name, 0, "Could not stop "
+ "NFS-Ganesha.");
+ }
+
+out:
+
+ if (ret) {
+ if (!(*op_errstr)) {
+ *op_errstr = gf_strdup ("Error, Validation Failed");
+ gf_msg_debug (this->name, 0,
+ "Error, Cannot Validate option :%s",
+ GLUSTERD_STORE_KEY_GANESHA_GLOBAL);
+ } else {
+ gf_msg_debug (this->name, 0,
+ "Error, Cannot Validate option");
+ }
+ }
+ return ret;
+}
+
+int
+glusterd_op_set_ganesha (dict_t *dict, char **errstr)
+{
+ int ret = 0;
+ xlator_t *this = NULL;
+ glusterd_conf_t *priv = NULL;
+ char *key = NULL;
+ char *value = NULL;
+ char *next_version = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (dict);
+
+ priv = this->private;
+ GF_ASSERT (priv);
+
+
+ ret = dict_get_str (dict, "key", &key);
+ if (ret) {
+ gf_msg (this->name, GF_LOG_ERROR, errno,
+ GD_MSG_DICT_GET_FAILED,
+ "Couldn't get key in global option set");
+ goto out;
+ }
+
+ ret = dict_get_str (dict, "value", &value);
+ if (ret) {
+ gf_msg (this->name, GF_LOG_ERROR, errno,
+ GD_MSG_DICT_GET_FAILED,
+ "Couldn't get value in global option set");
+ goto out;
+ }
+
+ ret = glusterd_handle_ganesha_op (dict, errstr, key, value);
+ if (ret) {
+ gf_msg (this->name, GF_LOG_ERROR, 0,
+ GD_MSG_NFS_GNS_SETUP_FAIL,
+ "Initial NFS-Ganesha set up failed");
+ ret = -1;
+ goto out;
+ }
+ ret = dict_set_dynstr_with_alloc (priv->opts,
+ GLUSTERD_STORE_KEY_GANESHA_GLOBAL,
+ value);
+ if (ret) {
+ gf_msg (this->name, GF_LOG_WARNING, errno,
+ GD_MSG_DICT_SET_FAILED, "Failed to set"
+ " nfs-ganesha in dict.");
+ goto out;
+ }
+ ret = glusterd_get_next_global_opt_version_str (priv->opts,
+ &next_version);
+ if (ret) {
+ gf_msg_debug (THIS->name, 0, "Could not fetch "
+ " global op version");
+ goto out;
+ }
+ ret = dict_set_str (priv->opts, GLUSTERD_GLOBAL_OPT_VERSION,
+ next_version);
+ if (ret)
+ goto out;
+
+ ret = glusterd_store_options (this, priv->opts);
+ if (ret) {
+ gf_msg (this->name, GF_LOG_ERROR, 0,
+ GD_MSG_STORE_FAIL, "Failed to store options");
+ goto out;
+ }
+
+out:
+ gf_msg_debug (this->name, 0, "returning %d", ret);
+ return ret;
+}
+
+/* Following function parse GANESHA_HA_CONF
+ * The sample file looks like below,
+ * HA_NAME="ganesha-ha-360"
+ * HA_VOL_NAME="ha-state"
+ * HA_CLUSTER_NODES="server1,server2"
+ * VIP_rhs_1="10.x.x.x"
+ * VIP_rhs_2="10.x.x.x." */
+
+/* Check if the localhost is listed as one of nfs-ganesha nodes */
+gf_boolean_t
+check_host_list (void)
+{
+
+ glusterd_conf_t *priv = NULL;
+ char *hostname, *hostlist;
+ gf_boolean_t ret = _gf_false;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ priv = THIS->private;
+ GF_ASSERT (priv);
+
+ hostlist = parsing_ganesha_ha_conf ("HA_CLUSTER_NODES");
+ if (hostlist == NULL) {
+ gf_msg (this->name, GF_LOG_INFO, errno,
+ GD_MSG_GET_CONFIG_INFO_FAILED,
+ "couldn't get HA_CLUSTER_NODES from file %s",
+ GANESHA_HA_CONF);
+ return _gf_false;
+ }
+
+ /* Hostlist is a comma separated list now */
+ hostname = strtok (hostlist, ",");
+ while (hostname != NULL) {
+ ret = gf_is_local_addr (hostname);
+ if (ret) {
+ gf_msg (this->name, GF_LOG_INFO, 0,
+ GD_MSG_NFS_GNS_HOST_FOUND,
+ "ganesha host found "
+ "Hostname is %s", hostname);
+ break;
+ }
+ hostname = strtok (NULL, ",");
+ }
+
+ GF_FREE (hostlist);
+ return ret;
+
+}
+
+int
+manage_export_config (char *volname, char *value, char **op_errstr)
+{
+ runner_t runner = {0,};
+ int ret = -1;
+
+ GF_ASSERT(volname);
+ runinit (&runner);
+ runner_add_args (&runner, "sh",
+ GANESHA_PREFIX"/create-export-ganesha.sh",
+ CONFDIR, value, volname, NULL);
+ ret = runner_run(&runner);
+
+ if (ret)
+ gf_asprintf (op_errstr, "Failed to create"
+ " NFS-Ganesha export config file.");
+
+ return ret;
+}
+
+/* Exports and unexports a particular volume via NFS-Ganesha */
+int
+ganesha_manage_export (dict_t *dict, char *value, char **op_errstr)
+{
+ runner_t runner = {0,};
+ int ret = -1;
+ glusterd_volinfo_t *volinfo = NULL;
+ dict_t *vol_opts = NULL;
+ char *volname = NULL;
+ xlator_t *this = NULL;
+ glusterd_conf_t *priv = NULL;
+ gf_boolean_t option = _gf_false;
+
+ runinit (&runner);
+ this = THIS;
+ GF_ASSERT (this);
+ priv = this->private;
+
+ GF_ASSERT (value);
+ GF_ASSERT (dict);
+ GF_ASSERT (priv);
+
+ ret = dict_get_str (dict, "volname", &volname);
+ if (ret) {
+ gf_msg (this->name, GF_LOG_ERROR, errno,
+ GD_MSG_DICT_GET_FAILED,
+ "Unable to get volume name");
+ goto out;
+ }
+ ret = gf_string2boolean (value, &option);
+ if (ret == -1) {
+ gf_msg (this->name, GF_LOG_ERROR, EINVAL,
+ GD_MSG_INVALID_ENTRY, "invalid value.");
+ goto out;
+ }
+
+ ret = glusterd_volinfo_find (volname, &volinfo);
+ if (ret) {
+ gf_msg (this->name, GF_LOG_ERROR, EINVAL,
+ GD_MSG_VOL_NOT_FOUND,
+ FMTSTR_CHECK_VOL_EXISTS, volname);
+ goto out;
+ }
+
+ ret = glusterd_check_ganesha_export (volinfo);
+ if (ret && option) {
+ gf_asprintf (op_errstr, "ganesha.enable "
+ "is already 'on'.");
+ ret = -1;
+ goto out;
+
+ } else if (!option && !ret) {
+ gf_asprintf (op_errstr, "ganesha.enable "
+ "is already 'off'.");
+ ret = -1;
+ goto out;
+ }
+
+ /* Check if global option is enabled, proceed only then */
+ ret = dict_get_str_boolean (priv->opts,
+ GLUSTERD_STORE_KEY_GANESHA_GLOBAL, _gf_false);
+ if (ret == -1) {
+ gf_msg_debug (this->name, 0, "Failed to get "
+ "global option dict.");
+ gf_asprintf (op_errstr, "The option "
+ "nfs-ganesha should be "
+ "enabled before setting ganesha.enable.");
+ goto out;
+ }
+ if (!ret) {
+ gf_asprintf (op_errstr, "The option "
+ "nfs-ganesha should be "
+ "enabled before setting ganesha.enable.");
+ ret = -1;
+ goto out;
+ }
+
+ /* *
+ * Create the export file from the node where ganesha.enable "on"
+ * is executed
+ * */
+ if (option) {
+ ret = manage_export_config (volname, "on", op_errstr);
+ if (ret) {
+ gf_msg (this->name, GF_LOG_ERROR, 0,
+ GD_MSG_EXPORT_FILE_CREATE_FAIL,
+ "Failed to create"
+ "export file for NFS-Ganesha\n");
+ goto out;
+ }
+ }
+
+ if (check_host_list()) {
+ runner_add_args (&runner, "sh", GANESHA_PREFIX"/dbus-send.sh",
+ CONFDIR, value, volname, NULL);
+ ret = runner_run (&runner);
+ if (ret) {
+ gf_asprintf(op_errstr, "Dynamic export"
+ " addition/deletion failed."
+ " Please see log file for details");
+ goto out;
+ }
+ }
+
+ vol_opts = volinfo->dict;
+ ret = dict_set_dynstr_with_alloc (vol_opts,
+ "features.cache-invalidation", value);
+ if (ret)
+ gf_asprintf (op_errstr, "Cache-invalidation could not"
+ " be set to %s.", value);
+ ret = glusterd_store_volinfo (volinfo,
+ GLUSTERD_VOLINFO_VER_AC_INCREMENT);
+ if (ret)
+ gf_asprintf (op_errstr, "failed to store volinfo for %s"
+ , volinfo->volname);
+
+out:
+ return ret;
+}
+
+int
+tear_down_cluster(gf_boolean_t run_teardown)
+{
+ int ret = 0;
+ runner_t runner = {0,};
+ struct stat st = {0,};
+ DIR *dir = NULL;
+ struct dirent *entry = NULL;
+ struct dirent scratch[2] = {{0,},};
+ char path[PATH_MAX] = {0,};
+
+ if (run_teardown) {
+ runinit (&runner);
+ runner_add_args (&runner, "sh",
+ GANESHA_PREFIX"/ganesha-ha.sh", "teardown",
+ CONFDIR, NULL);
+ ret = runner_run(&runner);
+ /* *
+ * Remove all the entries in CONFDIR expect ganesha.conf and
+ * ganesha-ha.conf
+ */
+ dir = sys_opendir (CONFDIR);
+ if (!dir) {
+ gf_msg_debug (THIS->name, 0, "Failed to open directory %s. "
+ "Reason : %s", CONFDIR, strerror (errno));
+ ret = 0;
+ goto out;
+ }
+
+ GF_FOR_EACH_ENTRY_IN_DIR (entry, dir, scratch);
+ while (entry) {
+ snprintf (path, PATH_MAX, "%s/%s", CONFDIR, entry->d_name);
+ ret = sys_lstat (path, &st);
+ if (ret == -1) {
+ gf_msg_debug (THIS->name, 0, "Failed to stat entry %s :"
+ " %s", path, strerror (errno));
+ goto out;
+ }
+
+ if (strcmp(entry->d_name, "ganesha.conf") == 0 ||
+ strcmp(entry->d_name, "ganesha-ha.conf") == 0)
+ gf_msg_debug (THIS->name, 0, " %s is not required"
+ " to remove", path);
+ else if (S_ISDIR (st.st_mode))
+ ret = recursive_rmdir (path);
+ else
+ ret = sys_unlink (path);
+
+ if (ret) {
+ gf_msg_debug (THIS->name, 0, " Failed to remove %s. "
+ "Reason : %s", path, strerror (errno));
+ }
+
+ gf_msg_debug (THIS->name, 0, "%s %s", ret ?
+ "Failed to remove" : "Removed", entry->d_name);
+ GF_FOR_EACH_ENTRY_IN_DIR (entry, dir, scratch);
+ }
+
+ ret = sys_closedir (dir);
+ if (ret) {
+ gf_msg_debug (THIS->name, 0, "Failed to close dir %s. Reason :"
+ " %s", CONFDIR, strerror (errno));
+ }
+ }
+
+out:
+ return ret;
+}
+
+
+int
+setup_cluster(gf_boolean_t run_setup)
+{
+ int ret = 0;
+ runner_t runner = {0,};
+
+ if (run_setup) {
+ runinit (&runner);
+ runner_add_args (&runner, "sh", GANESHA_PREFIX"/ganesha-ha.sh",
+ "setup", CONFDIR, NULL);
+ ret = runner_run (&runner);
+ }
+ return ret;
+}
+
+
+static int
+teardown (gf_boolean_t run_teardown, char **op_errstr)
+{
+ runner_t runner = {0,};
+ int ret = 1;
+ glusterd_volinfo_t *volinfo = NULL;
+ glusterd_conf_t *priv = NULL;
+ dict_t *vol_opts = NULL;
+
+ priv = THIS->private;
+
+ ret = tear_down_cluster (run_teardown);
+ if (ret == -1) {
+ gf_asprintf (op_errstr, "Cleanup of NFS-Ganesha"
+ " HA config failed.");
+ goto out;
+ }
+
+ runinit (&runner);
+ runner_add_args (&runner, "sh", GANESHA_PREFIX"/ganesha-ha.sh",
+ "cleanup", CONFDIR, NULL);
+ ret = runner_run (&runner);
+ if (ret)
+ gf_msg_debug (THIS->name, 0, "Could not clean up"
+ " NFS-Ganesha related config");
+
+ cds_list_for_each_entry (volinfo, &priv->volumes, vol_list) {
+ vol_opts = volinfo->dict;
+ /* All the volumes exported via NFS-Ganesha will be
+ unexported, hence setting the appropriate keys */
+ ret = dict_set_str (vol_opts, "features.cache-invalidation",
+ "off");
+ if (ret)
+ gf_msg (THIS->name, GF_LOG_WARNING, errno,
+ GD_MSG_DICT_SET_FAILED,
+ "Could not set features.cache-invalidation "
+ "to off for %s", volinfo->volname);
+
+ ret = dict_set_str (vol_opts, "ganesha.enable", "off");
+ if (ret)
+ gf_msg (THIS->name, GF_LOG_WARNING, errno,
+ GD_MSG_DICT_SET_FAILED,
+ "Could not set ganesha.enable to off for %s",
+ volinfo->volname);
+
+ ret = glusterd_store_volinfo (volinfo,
+ GLUSTERD_VOLINFO_VER_AC_INCREMENT);
+ if (ret)
+ gf_msg (THIS->name, GF_LOG_WARNING, 0,
+ GD_MSG_VOLINFO_SET_FAIL,
+ "failed to store volinfo for %s",
+ volinfo->volname);
+ }
+out:
+ return ret;
+}
+
+int
+stop_ganesha (char **op_errstr) {
+
+ int ret = 0;
+ runner_t runner = {0,};
+
+ runinit (&runner);
+ runner_add_args (&runner, "sh", GANESHA_PREFIX"/ganesha-ha.sh",
+ "--setup-ganesha-conf-files", CONFDIR, "no", NULL);
+ ret = runner_run (&runner);
+ if (ret) {
+ gf_asprintf (op_errstr, "removal of symlink ganesha.conf "
+ "in /etc/ganesha failed");
+ }
+
+ if (check_host_list ()) {
+ ret = manage_service ("stop");
+ if (ret)
+ gf_asprintf (op_errstr, "NFS-Ganesha service could not"
+ "be stopped.");
+ }
+ return ret;
+
+}
+
+int
+start_ganesha (char **op_errstr)
+{
+ int ret = -1;
+ dict_t *vol_opts = NULL;
+ glusterd_volinfo_t *volinfo = NULL;
+ glusterd_conf_t *priv = NULL;
+ runner_t runner = {0,};
+
+ priv = THIS->private;
+ GF_ASSERT (priv);
+
+ cds_list_for_each_entry (volinfo, &priv->volumes, vol_list) {
+ vol_opts = volinfo->dict;
+ /* Gluster-nfs has to be disabled across the trusted pool */
+ /* before attempting to start nfs-ganesha */
+ ret = dict_set_str (vol_opts, NFS_DISABLE_MAP_KEY, "on");
+ if (ret)
+ goto out;
+
+ ret = glusterd_store_volinfo (volinfo,
+ GLUSTERD_VOLINFO_VER_AC_INCREMENT);
+ if (ret) {
+ *op_errstr = gf_strdup ("Failed to store the "
+ "Volume information");
+ goto out;
+ }
+ }
+
+ /* If the nfs svc is not initialized it means that the service is not
+ * running, hence we can skip the process of stopping gluster-nfs
+ * service
+ */
+ if (priv->nfs_svc.inited) {
+ ret = priv->nfs_svc.stop (&(priv->nfs_svc), SIGKILL);
+ if (ret) {
+ ret = -1;
+ gf_asprintf (op_errstr, "Gluster-NFS service could"
+ "not be stopped, exiting.");
+ goto out;
+ }
+ }
+
+ if (check_host_list()) {
+ runinit (&runner);
+ runner_add_args (&runner, "sh", GANESHA_PREFIX"/ganesha-ha.sh",
+ "--setup-ganesha-conf-files", CONFDIR, "yes",
+ NULL);
+ ret = runner_run (&runner);
+ if (ret) {
+ gf_asprintf (op_errstr, "creation of symlink ganesha.conf "
+ "in /etc/ganesha failed");
+ goto out;
+ }
+ ret = manage_service ("start");
+ if (ret)
+ gf_asprintf (op_errstr, "NFS-Ganesha failed to start."
+ "Please see log file for details");
+ }
+
+out:
+ return ret;
+}
+
+static int
+pre_setup (gf_boolean_t run_setup, char **op_errstr)
+{
+ int ret = 0;
+
+ ret = check_host_list();
+
+ if (ret) {
+ ret = setup_cluster(run_setup);
+ if (ret == -1)
+ gf_asprintf (op_errstr, "Failed to set up HA "
+ "config for NFS-Ganesha. "
+ "Please check the log file for details");
+ }
+
+ return ret;
+}
+
+int
+glusterd_handle_ganesha_op (dict_t *dict, char **op_errstr,
+ char *key, char *value)
+{
+
+ int32_t ret = -1;
+ gf_boolean_t option = _gf_false;
+
+ GF_ASSERT (dict);
+ GF_ASSERT (op_errstr);
+ GF_ASSERT (key);
+ GF_ASSERT (value);
+
+
+ if (strcmp (key, "ganesha.enable") == 0) {
+ ret = ganesha_manage_export (dict, value, op_errstr);
+ if (ret < 0)
+ goto out;
+ }
+
+ /* It is possible that the key might not be set */
+ ret = gf_string2boolean (value, &option);
+ if (ret == -1) {
+ gf_asprintf (op_errstr, "Invalid value in key-value pair.");
+ goto out;
+ }
+
+ if (strcmp (key, GLUSTERD_STORE_KEY_GANESHA_GLOBAL) == 0) {
+ /* *
+ * The set up/teardown of pcs cluster should be performed only
+ * once. This will done on the node in which the cli command
+ * 'gluster nfs-ganesha <enable/disable>' got executed. So that
+ * node should part of ganesha HA cluster
+ */
+ if (option) {
+ ret = pre_setup (is_origin_glusterd (dict), op_errstr);
+ if (ret < 0)
+ goto out;
+ } else {
+ ret = teardown (is_origin_glusterd (dict), op_errstr);
+ if (ret < 0)
+ goto out;
+ }
+ }
+
+out:
+ return ret;
+}
+
diff --git a/xlators/mgmt/glusterd/src/glusterd-handler.c b/xlators/mgmt/glusterd/src/glusterd-handler.c
index c3b9252..a3e1fdc 100644
--- a/xlators/mgmt/glusterd/src/glusterd-handler.c
+++ b/xlators/mgmt/glusterd/src/glusterd-handler.c
@@ -1884,6 +1884,82 @@ glusterd_op_begin (rpcsvc_request_t *req, glusterd_op_t op, void *ctx,
return ret;
}
+int
+__glusterd_handle_ganesha_cmd (rpcsvc_request_t *req)
+{
+ int32_t ret = -1;
+ gf_cli_req cli_req = { {0,} } ;
+ dict_t *dict = NULL;
+ glusterd_op_t cli_op = GD_OP_GANESHA;
+ char *op_errstr = NULL;
+ char err_str[2048] = {0,};
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+
+ GF_ASSERT (req);
+
+ ret = xdr_to_generic (req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
+ if (ret < 0) {
+ snprintf (err_str, sizeof (err_str), "Failed to decode "
+ "request received from cli");
+ gf_msg (this->name, GF_LOG_ERROR, 0,
+ GD_MSG_REQ_DECODE_FAIL, "%s", err_str);
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ if (cli_req.dict.dict_len) {
+ /* Unserialize the dictionary */
+ dict = dict_new ();
+ if (!dict) {
+ ret = -1;
+ goto out;
+ }
+
+ ret = dict_unserialize (cli_req.dict.dict_val,
+ cli_req.dict.dict_len,
+ &dict);
+ if (ret < 0) {
+ gf_msg (this->name, GF_LOG_ERROR, 0,
+ GD_MSG_DICT_UNSERIALIZE_FAIL,
+ "failed to "
+ "unserialize req-buffer to dictionary");
+ snprintf (err_str, sizeof (err_str), "Unable to decode "
+ "the command");
+ goto out;
+ } else {
+ dict->extra_stdfree = cli_req.dict.dict_val;
+ }
+ }
+
+ gf_msg_trace (this->name, 0, "Received global option request");
+
+ ret = glusterd_op_begin_synctask (req, GD_OP_GANESHA, dict);
+out:
+ if (ret) {
+ if (err_str[0] == '\0')
+ snprintf (err_str, sizeof (err_str),
+ "Operation failed");
+ ret = glusterd_op_send_cli_response (cli_op, ret, 0, req,
+ dict, err_str);
+ }
+ if (op_errstr)
+ GF_FREE (op_errstr);
+ if (dict)
+ dict_unref(dict);
+
+ return ret;
+}
+
+
+int
+glusterd_handle_ganesha_cmd (rpcsvc_request_t *req)
+{
+ return glusterd_big_locked_handler (req, __glusterd_handle_ganesha_cmd);
+}
+
static int
__glusterd_handle_reset_volume (rpcsvc_request_t *req)
{
@@ -6470,6 +6546,7 @@ rpcsvc_actor_t gd_svc_cli_actors[GLUSTER_CLI_MAXVALUE] = {
[GLUSTER_CLI_SYS_EXEC] = {"SYS_EXEC", GLUSTER_CLI_SYS_EXEC, glusterd_handle_sys_exec, NULL, 0, DRC_NA},
[GLUSTER_CLI_SNAP] = {"SNAP", GLUSTER_CLI_SNAP, glusterd_handle_snapshot, NULL, 0, DRC_NA},
[GLUSTER_CLI_BARRIER_VOLUME] = {"BARRIER_VOLUME", GLUSTER_CLI_BARRIER_VOLUME, glusterd_handle_barrier, NULL, 0, DRC_NA},
+ [GLUSTER_CLI_GANESHA] = { "GANESHA" , GLUSTER_CLI_GANESHA, glusterd_handle_ganesha_cmd, NULL, 0, DRC_NA},
[GLUSTER_CLI_GET_VOL_OPT] = {"GET_VOL_OPT", GLUSTER_CLI_GET_VOL_OPT, glusterd_handle_get_vol_opt, NULL, 0, DRC_NA},
[GLUSTER_CLI_BITROT] = {"BITROT", GLUSTER_CLI_BITROT, glusterd_handle_bitrot, NULL, 0, DRC_NA},
[GLUSTER_CLI_GET_STATE] = {"GET_STATE", GLUSTER_CLI_GET_STATE, glusterd_handle_get_state, NULL, 0, DRC_NA},
diff --git a/xlators/mgmt/glusterd/src/glusterd-messages.h b/xlators/mgmt/glusterd/src/glusterd-messages.h
index de9ae92..cc7f371 100644
--- a/xlators/mgmt/glusterd/src/glusterd-messages.h
+++ b/xlators/mgmt/glusterd/src/glusterd-messages.h
@@ -4767,6 +4767,14 @@
* @recommendedaction
*
*/
+#define GD_MSG_NFS_GANESHA_DISABLED (GLUSTERD_COMP_BASE + 589)
+
+/*!
+ * @messageid
+ * @diagnosis
+ * @recommendedaction
+ *
+ */
#define GD_MSG_TIERD_STOP_FAIL (GLUSTERD_COMP_BASE + 590)
/*!
diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.c b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
index 5b8f833..06e9e25 100644
--- a/xlators/mgmt/glusterd/src/glusterd-op-sm.c
+++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
@@ -1126,6 +1126,12 @@ glusterd_op_stage_set_volume (dict_t *dict, char **op_errstr)
if (ret)
goto out;
+ if ((strcmp (key, "ganesha.enable") == 0) &&
+ (strcmp (value, "off") == 0)) {
+ ret = ganesha_manage_export (dict, "off", op_errstr);
+ if (ret)
+ goto out;
+ }
ret = glusterd_check_quota_cmd (key, value, errstr, sizeof (errstr));
if (ret)
goto out;
@@ -1642,6 +1648,21 @@ glusterd_op_stage_reset_volume (dict_t *dict, char **op_errstr)
goto out;
}
+ /* *
+ * If key ganesha.enable is set, then volume should be unexported from
+ * ganesha server. Also it is a volume-level option, perform only when
+ * volume name not equal to "all"(in other words if volinfo != NULL)
+ */
+ if (volinfo && (!strcmp (key, "all") || !strcmp(key, "ganesha.enable"))) {
+ if (glusterd_check_ganesha_export (volinfo)) {
+ ret = ganesha_manage_export (dict, "off", op_errstr);
+ if (ret)
+ gf_msg (this->name, GF_LOG_WARNING, 0,
+ GD_MSG_NFS_GNS_RESET_FAIL,
+ "Could not reset ganesha.enable key");
+ }
+ }
+
if (strcmp(key, "all")) {
exists = glusterd_check_option_exists (key, &key_fixed);
if (exists == -1) {
@@ -2364,6 +2385,16 @@ glusterd_op_reset_volume (dict_t *dict, char **op_rspstr)
}
}
+ if (!strcmp(key, "ganesha.enable") || !strcmp (key, "all")) {
+ if (glusterd_check_ganesha_export (volinfo)) {
+ ret = manage_export_config (volname, "off", op_rspstr);
+ if (ret)
+ gf_msg (this->name, GF_LOG_WARNING, 0,
+ GD_MSG_NFS_GNS_RESET_FAIL,
+ "Could not reset ganesha.enable key");
+ }
+ }
+
out:
GF_FREE (key_fixed);
if (quorum_action)
@@ -2960,6 +2991,9 @@ glusterd_op_set_volume (dict_t *dict, char **errstr)
}
}
+ ret = glusterd_check_ganesha_cmd (key, value, errstr, dict);
+ if (ret == -1)
+ goto out;
if (!is_key_glusterd_hooks_friendly (key)) {
ret = glusterd_check_option_exists (key, &key_fixed);
GF_ASSERT (ret);
@@ -4568,6 +4602,12 @@ glusterd_op_build_payload (dict_t **req, char **op_errstr, dict_t *op_ctx)
}
break;
+ case GD_OP_GANESHA:
+ {
+ dict_copy (dict, req_dict);
+ }
+ break;
+
default:
break;
}
@@ -6062,6 +6102,10 @@ glusterd_op_stage_validate (glusterd_op_t op, dict_t *dict, char **op_errstr,
ret = glusterd_op_stage_set_volume (dict, op_errstr);
break;
+ case GD_OP_GANESHA:
+ ret = glusterd_op_stage_set_ganesha (dict, op_errstr);
+ break;
+
case GD_OP_RESET_VOLUME:
ret = glusterd_op_stage_reset_volume (dict, op_errstr);
break;
@@ -6195,6 +6239,9 @@ glusterd_op_commit_perform (glusterd_op_t op, dict_t *dict, char **op_errstr,
case GD_OP_SET_VOLUME:
ret = glusterd_op_set_volume (dict, op_errstr);
break;
+ case GD_OP_GANESHA:
+ ret = glusterd_op_set_ganesha (dict, op_errstr);
+ break;
case GD_OP_RESET_VOLUME:
diff --git a/xlators/mgmt/glusterd/src/glusterd-snapshot-utils.c b/xlators/mgmt/glusterd/src/glusterd-snapshot-utils.c
index 4cbade1..2a0d321 100644
--- a/xlators/mgmt/glusterd/src/glusterd-snapshot-utils.c
+++ b/xlators/mgmt/glusterd/src/glusterd-snapshot-utils.c
@@ -3702,6 +3702,146 @@ out:
}
+/* *
+ * Here there are two possibilities, either destination is snaphot or
+ * clone. In the case of snapshot nfs_ganesha export file will be copied
+ * to snapdir. If it is clone , then new export file will be created for
+ * the clone in the GANESHA_EXPORT_DIRECTORY, replacing occurences of
+ * volname with clonename
+ */
+int
+glusterd_copy_nfs_ganesha_file (glusterd_volinfo_t *src_vol,
+ glusterd_volinfo_t *dest_vol)
+{
+
+ int32_t ret = -1;
+ char snap_dir[PATH_MAX] = {0,};
+ char src_path[PATH_MAX] = {0,};
+ char dest_path[PATH_MAX] = {0,};
+ char buffer[BUFSIZ] = {0,};
+ char *find_ptr = NULL;
+ char *buff_ptr = NULL;
+ char *tmp_ptr = NULL;
+ xlator_t *this = NULL;
+ glusterd_conf_t *priv = NULL;
+ struct stat stbuf = {0,};
+ FILE *src = NULL;
+ FILE *dest = NULL;
+
+
+ this = THIS;
+ GF_VALIDATE_OR_GOTO ("snapshot", this, out);
+ priv = this->private;
+ GF_VALIDATE_OR_GOTO (this->name, priv, out);
+
+ GF_VALIDATE_OR_GOTO (this->name, src_vol, out);
+ GF_VALIDATE_OR_GOTO (this->name, dest_vol, out);
+
+ if (glusterd_check_ganesha_export(src_vol) == _gf_false) {
+ gf_msg_debug (this->name, 0, "%s is not exported via "
+ "NFS-Ganesha. Skipping copy of export conf.",
+ src_vol->volname);
+ ret = 0;
+ goto out;
+ }
+
+ if (src_vol->is_snap_volume) {
+ GLUSTERD_GET_SNAP_DIR (snap_dir, src_vol->snapshot, priv);
+ ret = snprintf (src_path, PATH_MAX, "%s/export.%s.conf",
+ snap_dir, src_vol->snapshot->snapname);
+ } else {
+ ret = snprintf (src_path, PATH_MAX, "%s/export.%s.conf",
+ GANESHA_EXPORT_DIRECTORY, src_vol->volname);
+ }
+ if (ret < 0 || ret >= PATH_MAX)
+ goto out;
+
+ ret = sys_lstat (src_path, &stbuf);
+ if (ret) {
+ /*
+ * This code path is hit, only when the src_vol is being *
+ * exported via NFS-Ganesha. So if the conf file is not *
+ * available, we fail the snapshot operation. *
+ */
+ gf_msg (this->name, GF_LOG_ERROR, errno,
+ GD_MSG_FILE_OP_FAILED,
+ "Stat on %s failed with %s",
+ src_path, strerror (errno));
+ goto out;
+ }
+
+ if (dest_vol->is_snap_volume) {
+ memset (snap_dir, 0 , PATH_MAX);
+ GLUSTERD_GET_SNAP_DIR (snap_dir, dest_vol->snapshot, priv);
+ ret = snprintf (dest_path, sizeof (dest_path),
+ "%s/export.%s.conf", snap_dir,
+ dest_vol->snapshot->snapname);
+ if (ret < 0)
+ goto out;
+
+ ret = glusterd_copy_file (src_path, dest_path);
+ if (ret) {
+ gf_msg (this->name, GF_LOG_ERROR, ENOMEM,
+ GD_MSG_NO_MEMORY, "Failed to copy %s in %s",
+ src_path, dest_path);
+ goto out;
+ }
+
+ } else {
+ ret = snprintf (dest_path, sizeof (dest_path),
+ "%s/export.%s.conf", GANESHA_EXPORT_DIRECTORY,
+ dest_vol->volname);
+ if (ret < 0)
+ goto out;
+
+ src = fopen (src_path, "r");
+ dest = fopen (dest_path, "w");
+
+ if (!src || !dest) {
+ gf_msg (this->name, GF_LOG_ERROR, 0,
+ GD_MSG_FILE_OP_FAILED,
+ "Failed to open %s",
+ dest ? src_path : dest_path);
+ ret = -1;
+ goto out;
+ }
+
+ /* *
+ * if the source volume is snapshot, the export conf file
+ * consists of orginal volname
+ */
+ if (src_vol->is_snap_volume)
+ find_ptr = gf_strdup (src_vol->parent_volname);
+ else
+ find_ptr = gf_strdup (src_vol->volname);
+
+ if (!find_ptr)
+ goto out;
+
+ /* Replacing volname with clonename */
+ while (fgets(buffer, BUFSIZ, src)) {
+ buff_ptr = buffer;
+ while ((tmp_ptr = strstr(buff_ptr, find_ptr))) {
+ while (buff_ptr < tmp_ptr)
+ fputc((int)*buff_ptr++, dest);
+ fputs(dest_vol->volname, dest);
+ buff_ptr += strlen(find_ptr);
+ }
+ fputs(buff_ptr, dest);
+ memset (buffer, 0, BUFSIZ);
+ }
+ }
+out:
+ if (src)
+ fclose (src);
+ if (dest)
+ fclose (dest);
+ if (find_ptr)
+ GF_FREE(find_ptr);
+
+ return ret;
+}
+
int32_t
glusterd_restore_geo_rep_files (glusterd_volinfo_t *snap_vol)
{
@@ -3792,6 +3932,62 @@ out:
return ret;
}
+int
+glusterd_restore_nfs_ganesha_file (glusterd_volinfo_t *src_vol,
+ glusterd_snap_t *snap)
+{
+
+ int32_t ret = -1;
+ char snap_dir[PATH_MAX] = "";
+ char src_path[PATH_MAX] = "";
+ char dest_path[PATH_MAX] = "";
+ xlator_t *this = NULL;
+ glusterd_conf_t *priv = NULL;
+ struct stat stbuf = {0,};
+
+ this = THIS;
+ GF_VALIDATE_OR_GOTO ("snapshot", this, out);
+ priv = this->private;
+ GF_VALIDATE_OR_GOTO (this->name, priv, out);
+
+ GF_VALIDATE_OR_GOTO (this->name, src_vol, out);
+ GF_VALIDATE_OR_GOTO (this->name, snap, out);
+
+ GLUSTERD_GET_SNAP_DIR (snap_dir, snap, priv);
+
+ ret = snprintf (src_path, sizeof (src_path), "%s/export.%s.conf",
+ snap_dir, snap->snapname);
+ if (ret < 0)
+ goto out;
+
+ ret = sys_lstat (src_path, &stbuf);
+ if (ret) {
+ if (errno == ENOENT) {
+ ret = 0;
+ gf_msg_debug (this->name, 0, "%s not found", src_path);
+ } else
+ gf_msg (this->name, GF_LOG_WARNING, errno,
+ GD_MSG_FILE_OP_FAILED,
+ "Stat on %s failed with %s",
+ src_path, strerror (errno));
+ goto out;
+ }
+
+ ret = snprintf (dest_path, sizeof (dest_path), "%s/export.%s.conf",
+ GANESHA_EXPORT_DIRECTORY, src_vol->volname);
+ if (ret < 0)
+ goto out;
+
+ ret = glusterd_copy_file (src_path, dest_path);
+ if (ret)
+ gf_msg (this->name, GF_LOG_ERROR, ENOMEM,
+ GD_MSG_NO_MEMORY, "Failed to copy %s in %s",
+ src_path, dest_path);
+
+out:
+ return ret;
+
+}
/* Snapd functions */
int
glusterd_is_snapd_enabled (glusterd_volinfo_t *volinfo)
diff --git a/xlators/mgmt/glusterd/src/glusterd-snapshot-utils.h b/xlators/mgmt/glusterd/src/glusterd-snapshot-utils.h
index b13493d..e050166 100644
--- a/xlators/mgmt/glusterd/src/glusterd-snapshot-utils.h
+++ b/xlators/mgmt/glusterd/src/glusterd-snapshot-utils.h
@@ -99,12 +99,19 @@ glusterd_get_geo_rep_session (char *slave_key, char *origin_volname,
int32_t
glusterd_restore_geo_rep_files (glusterd_volinfo_t *snap_vol);
+int
+glusterd_restore_nfs_ganesha_file (glusterd_volinfo_t *src_vol,
+ glusterd_snap_t *snap);
int32_t
glusterd_copy_quota_files (glusterd_volinfo_t *src_vol,
glusterd_volinfo_t *dest_vol,
gf_boolean_t *conf_present);
int
+glusterd_copy_nfs_ganesha_file (glusterd_volinfo_t *src_vol,
+ glusterd_volinfo_t *dest_vol);
+
+int
glusterd_snap_use_rsp_dict (dict_t *aggr, dict_t *rsp_dict);
int
diff --git a/xlators/mgmt/glusterd/src/glusterd-snapshot.c b/xlators/mgmt/glusterd/src/glusterd-snapshot.c
index 6306d29..c38d2ff 100644
--- a/xlators/mgmt/glusterd/src/glusterd-snapshot.c
+++ b/xlators/mgmt/glusterd/src/glusterd-snapshot.c
@@ -904,6 +904,76 @@ out:
return ret;
}
+/*
+ * This function validates the particulat snapshot with respect to the current
+ * cluster. If the snapshot has ganesha enabled, and the cluster is not a nfs
+ * ganesha cluster, we fail the validation. Other scenarios where either the
+ * snapshot does not have ganesha enabled or it has and the cluster is a nfs
+ * ganesha cluster, we pass the validation
+ *
+ * @param snap snap object of the snapshot to be validated
+ * @return Negative value on Failure and 0 in success
+ */
+int32_t
+glusterd_snapshot_validate_ganesha_conf (glusterd_snap_t *snap,
+ char **op_errstr,
+ uint32_t *op_errno)
+{
+ int ret = -1;
+ glusterd_volinfo_t *snap_vol = NULL;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_VALIDATE_OR_GOTO ("snapshot", this, out);
+ GF_VALIDATE_OR_GOTO (this->name, snap, out);
+ GF_VALIDATE_OR_GOTO (this->name, op_errstr, out);
+ GF_VALIDATE_OR_GOTO (this->name, op_errno, out);
+
+ snap_vol = list_entry (snap->volumes.next,
+ glusterd_volinfo_t, vol_list);
+
+ GF_VALIDATE_OR_GOTO (this->name, snap_vol, out);
+
+ /*
+ * Check if the snapshot has ganesha enabled *
+ */
+ if (glusterd_check_ganesha_export(snap_vol) == _gf_false) {
+ /*
+ * If the snapshot has not been exported via ganesha *
+ * then we can proceed. *
+ */
+ ret = 0;
+ goto out;
+ }
+
+ /*
+ * At this point we are certain that the snapshot has been exported *
+ * via ganesha. So we check if the cluster is a nfs-ganesha cluster *
+ * If it a nfs-ganesha cluster, then we proceed. Else we fail. *
+ */
+ if (glusterd_is_ganesha_cluster() != _gf_true) {
+ ret = gf_asprintf (op_errstr, "Snapshot(%s) has a "
+ "nfs-ganesha export conf file. "
+ "cluster.enable-shared-storage and "
+ "nfs-ganesha should be enabled "
+ "before restoring this snapshot.",
+ snap->snapname);
+ *op_errno = EG_NOGANESHA;
+ if (ret < 0) {
+ goto out;
+ }
+
+ gf_msg (this->name, GF_LOG_ERROR, EINVAL,
+ GD_MSG_NFS_GANESHA_DISABLED, "%s", *op_errstr);
+ ret = -1;
+ goto out;
+ }
+
+ ret = 0;
+out:
+ return ret;
+}
+
/* This function is called before actual restore is taken place. This function
* will validate whether the snapshot volumes are ready to be restored or not.
*
@@ -974,6 +1044,15 @@ glusterd_snapshot_restore_prevalidate (dict_t *dict, char **op_errstr,
goto out;
}
+ ret = glusterd_snapshot_validate_ganesha_conf (snap, op_errstr,
+ op_errno);
+ if (ret) {
+ gf_msg (this->name, GF_LOG_ERROR, 0,
+ GD_MSG_SNAPSHOT_OP_FAILED,
+ "ganesha conf validation failed.");
+ goto out;
+ }
+
ret = dict_set_str (rsp_dict, "snapname", snapname);
if (ret) {
gf_msg (this->name, GF_LOG_ERROR, 0,
@@ -5369,6 +5448,13 @@ glusterd_do_snap_vol (glusterd_volinfo_t *origin_vol, glusterd_snap_t *snap,
}
+ ret = glusterd_copy_nfs_ganesha_file (origin_vol, snap_vol);
+ if (ret < 0) {
+ gf_msg (this->name, GF_LOG_ERROR, 0,
+ GD_MSG_VOL_OP_FAILED, "Failed to copy export "
+ "file for volume %s", origin_vol->volname);
+ goto out;
+ }
glusterd_auth_set_username (snap_vol, username);
glusterd_auth_set_password (snap_vol, password);
@@ -9968,6 +10054,16 @@ gd_restore_snap_volume (dict_t *dict, dict_t *rsp_dict,
snap_vol->snapshot->snapname);
}
+ ret = glusterd_restore_nfs_ganesha_file (orig_vol, snap);
+ if (ret) {
+ gf_msg (this->name, GF_LOG_WARNING, 0,
+ GD_MSG_SNAP_RESTORE_FAIL,
+ "Failed to restore "
+ "nfs-ganesha export file for snap %s",
+ snap_vol->snapshot->snapname);
+ goto out;
+ }
+
/* Need not save cksum, as we will copy cksum file in *
* this function *
*/
diff --git a/xlators/mgmt/glusterd/src/glusterd-store.h b/xlators/mgmt/glusterd/src/glusterd-store.h
index 603151a..bf504e0 100644
--- a/xlators/mgmt/glusterd/src/glusterd-store.h
+++ b/xlators/mgmt/glusterd/src/glusterd-store.h
@@ -83,6 +83,7 @@ typedef enum glusterd_store_ver_ac_{
#define GLUSTERD_STORE_KEY_SNAP_MAX_SOFT_LIMIT "snap-max-soft-limit"
#define GLUSTERD_STORE_KEY_SNAPD_PORT "snapd-port"
#define GLUSTERD_STORE_KEY_SNAP_ACTIVATE "snap-activate-on-create"
+#define GLUSTERD_STORE_KEY_GANESHA_GLOBAL "nfs-ganesha"
#define GLUSTERD_STORE_KEY_BRICK_HOSTNAME "hostname"
#define GLUSTERD_STORE_KEY_BRICK_PATH "path"
diff --git a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c
index bec5f72..0914fb1 100644
--- a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c
+++ b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c
@@ -1737,6 +1737,16 @@ glusterd_op_stage_stop_volume (dict_t *dict, char **op_errstr)
ret = -1;
goto out;
}
+ ret = glusterd_check_ganesha_export (volinfo);
+ if (ret) {
+ ret = ganesha_manage_export(dict, "off", op_errstr);
+ if (ret) {
+ gf_msg (THIS->name, GF_LOG_WARNING, 0,
+ GD_MSG_NFS_GNS_UNEXPRT_VOL_FAIL, "Could not "
+ "unexport volume via NFS-Ganesha");
+ ret = 0;
+ }
+ }
if (glusterd_is_defrag_on (volinfo)) {
snprintf (msg, sizeof(msg), "rebalance session is "
@@ -2595,6 +2605,8 @@ glusterd_op_start_volume (dict_t *dict, char **op_errstr)
char *brick_mount_dir = NULL;
char key[PATH_MAX] = "";
char *volname = NULL;
+ char *str = NULL;
+ gf_boolean_t option = _gf_false;
int flags = 0;
glusterd_volinfo_t *volinfo = NULL;
glusterd_brickinfo_t *brickinfo = NULL;
@@ -2657,6 +2669,28 @@ glusterd_op_start_volume (dict_t *dict, char **op_errstr)
}
}
+ ret = dict_get_str (conf->opts, GLUSTERD_STORE_KEY_GANESHA_GLOBAL, &str);
+ if (ret != 0) {
+ gf_msg (this->name, GF_LOG_INFO, 0,
+ GD_MSG_DICT_GET_FAILED, "Global dict not present.");
+ ret = 0;
+
+ } else {
+ ret = gf_string2boolean (str, &option);
+ /* Check if the feature is enabled and set nfs-disable to true */
+ if (option) {
+ gf_msg_debug (this->name, 0, "NFS-Ganesha is enabled");
+ /* Gluster-nfs should not start when NFS-Ganesha is enabled*/
+ ret = dict_set_str (volinfo->dict, NFS_DISABLE_MAP_KEY, "on");
+ if (ret) {
+ gf_msg (this->name, GF_LOG_ERROR, 0,
+ GD_MSG_DICT_SET_FAILED, "Failed to set nfs.disable for"
+ "volume %s", volname);
+ goto out;
+ }
+ }
+ }
+
ret = glusterd_start_volume (volinfo, flags, _gf_true);
if (ret)
goto out;
diff --git a/xlators/mgmt/glusterd/src/glusterd-volume-set.c b/xlators/mgmt/glusterd/src/glusterd-volume-set.c
index 2210b82..7fe76e5 100644
--- a/xlators/mgmt/glusterd/src/glusterd-volume-set.c
+++ b/xlators/mgmt/glusterd/src/glusterd-volume-set.c
@@ -3210,6 +3210,12 @@ struct volopt_map_entry glusterd_volopt_map[] = {
.op_version = GD_OP_VERSION_3_7_0,
.flags = OPT_FLAG_CLIENT_OPT
},
+ { .key = "ganesha.enable",
+ .voltype = "features/ganesha",
+ .value = "off",
+ .option = "ganesha.enable",
+ .op_version = GD_OP_VERSION_3_7_0,
+ },
{ .key = "features.shard",
.voltype = "features/shard",
.value = "off",
diff --git a/xlators/mgmt/glusterd/src/glusterd.h b/xlators/mgmt/glusterd/src/glusterd.h
index 59b1775..2d8dbb9 100644
--- a/xlators/mgmt/glusterd/src/glusterd.h
+++ b/xlators/mgmt/glusterd/src/glusterd.h
@@ -57,6 +57,8 @@
#define GLUSTERD_BRICKMUX_LIMIT_KEY "cluster.max-bricks-per-process"
#define GLUSTERD_LOCALTIME_LOGGING_KEY "cluster.localtime-logging"
+#define GANESHA_HA_CONF CONFDIR "/ganesha-ha.conf"
+#define GANESHA_EXPORT_DIRECTORY CONFDIR"/exports"
#define GLUSTERD_SNAPS_MAX_HARD_LIMIT 256
#define GLUSTERD_SNAPS_DEF_SOFT_LIMIT_PERCENT 90
#define GLUSTERD_SNAPS_MAX_SOFT_LIMIT_PERCENT 100
@@ -117,7 +119,7 @@ typedef enum glusterd_op_ {
GD_OP_GSYNC_CREATE,
GD_OP_SNAP,
GD_OP_BARRIER,
- GD_OP_GANESHA, /* obsolete */
+ GD_OP_GANESHA,
GD_OP_BITROT,
GD_OP_DETACH_TIER,
GD_OP_TIER_MIGRATE,
@@ -1168,8 +1170,20 @@ int glusterd_op_create_volume (dict_t *dict, char **op_errstr);
int glusterd_op_start_volume (dict_t *dict, char **op_errstr);
int glusterd_op_stop_volume (dict_t *dict);
int glusterd_op_delete_volume (dict_t *dict);
+int glusterd_handle_ganesha_op (dict_t *dict, char **op_errstr,
+ char *key, char *value);
+int glusterd_check_ganesha_cmd (char *key, char *value,
+ char **errstr, dict_t *dict);
+int glusterd_op_stage_set_ganesha (dict_t *dict, char **op_errstr);
+int glusterd_op_set_ganesha (dict_t *dict, char **errstr);
+int ganesha_manage_export (dict_t *dict, char *value, char **op_errstr);
int manage_export_config (char *volname, char *value, char **op_errstr);
+gf_boolean_t
+glusterd_is_ganesha_cluster ();
+gf_boolean_t glusterd_check_ganesha_export (glusterd_volinfo_t *volinfo);
+int stop_ganesha (char **op_errstr);
+int tear_down_cluster (gf_boolean_t run_teardown);
int glusterd_op_add_brick (dict_t *dict, char **op_errstr);
int glusterd_op_add_tier_brick (dict_t *dict, char **op_errstr);
int glusterd_op_remove_brick (dict_t *dict, char **op_errstr);
--
1.8.3.1