From 45aa6c07554a20903ad12ee00b0ed9b6b403d8f0 Mon Sep 17 00:00:00 2001
From: Jiffin Tony Thottan <jthottan@redhat.com>
Date: Mon, 27 Jun 2016 15:08:25 +0530
Subject: [PATCH 56/86] ganesha/scripts : remove 'HA_VOL_SERVER' from the code
The parameter HA_VOL_SERVER introduced intially ganesha-ha.conf to
specify gluster server from which to mount the shared data volume.
But after introducing new cli for the same purpose, it become
unnecessary. The existence of that parameter can lead confussion
to the users. This patch will remove/replace all the instance of
HA_VOL_SERVER from the code
Upstream reference:
>Change-Id: I638c61dcd2c21ebdb279bbb141d35bb806bd3ef0
>BUG: 1350371
>Signed-off-by: Jiffin Tony Thottan <jthottan@redhat.com>
>Reviewed-on: http://review.gluster.org/14812
>Tested-by: Kaleb KEITHLEY <kkeithle@redhat.com>
>NetBSD-regression: NetBSD Build System <jenkins@build.gluster.org>
>CentOS-regression: Gluster Build System <jenkins@build.gluster.org>
>Reviewed-by: soumya k <skoduri@redhat.com>
>Smoke: Gluster Build System <jenkins@build.gluster.org>
>Reviewed-by: Kaleb KEITHLEY <kkeithle@redhat.com>
>Signed-off-by: Jiffin Tony Thottan <jthottan@redhat.com>
Change-Id: I638c61dcd2c21ebdb279bbb141d35bb806bd3ef0
BUG: 1348954
Signed-off-by: Jiffin Tony Thottan <jthottan@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/84777
Reviewed-by: Soumya Koduri <skoduri@redhat.com>
Reviewed-by: Atin Mukherjee <amukherj@redhat.com>
Tested-by: Atin Mukherjee <amukherj@redhat.com>
---
extras/ganesha/config/ganesha-ha.conf.sample | 3 -
extras/ganesha/scripts/ganesha-ha.sh | 34 +++------------
xlators/mgmt/glusterd/src/glusterd-ganesha.c | 62 +++++++------------------
xlators/mgmt/glusterd/src/glusterd-op-sm.c | 2 +-
xlators/mgmt/glusterd/src/glusterd.h | 2 +-
5 files changed, 26 insertions(+), 77 deletions(-)
diff --git a/extras/ganesha/config/ganesha-ha.conf.sample b/extras/ganesha/config/ganesha-ha.conf.sample
index 2077800..c22892b 100644
--- a/extras/ganesha/config/ganesha-ha.conf.sample
+++ b/extras/ganesha/config/ganesha-ha.conf.sample
@@ -2,9 +2,6 @@
# must be unique within the subnet
HA_NAME="ganesha-ha-360"
#
-# The gluster server from which to mount the shared data volume.
-HA_VOL_SERVER="server1"
-#
# N.B. you may use short names or long names; you may not use IP addrs.
# Once you select one, stay with it as it will be mildly unpleasant to
# clean up if you switch later on. Ensure that all names - short and/or
diff --git a/extras/ganesha/scripts/ganesha-ha.sh b/extras/ganesha/scripts/ganesha-ha.sh
index 31c0c39..ada21cb 100644
--- a/extras/ganesha/scripts/ganesha-ha.sh
+++ b/extras/ganesha/scripts/ganesha-ha.sh
@@ -341,35 +341,15 @@ string:"EXPORT(Path=/$VOL)" 2>&1)
copy_export_config ()
{
local new_node=${1}
- local tganesha_conf=$(mktemp)
- local tganesha_exports=$(mktemp -d)
- local short_host=$(hostname -s)
- # avoid prompting for password, even with password-less scp
- # scp $host1:$file $host2:$file prompts for the password
- # Ideally all the existing nodes in the cluster should have same
- # copy of the configuration files. Maybe for sanity check, copy
- # the state from HA_VOL_SERVER?
- if [ "${HA_VOL_SERVER}" == $(hostname) ]
- then
- cp ${GANESHA_CONF} ${tganesha_conf}
- else
- scp -oPasswordAuthentication=no -oStrictHostKeyChecking=no -i \
-${SECRET_PEM} ${HA_VOL_SERVER}:${GANESHA_CONF} $short_host:${tganesha_conf}
- fi
+
+ # The add node should be executed from one of the nodes in ganesha
+ # cluster. So all the configuration file will be available in that
+ # node itself. So just copy that to new node
scp -oPasswordAuthentication=no -oStrictHostKeyChecking=no -i \
-${SECRET_PEM} ${tganesha_conf} ${new_node}:${GANESHA_CONF}
- rm -f ${tganesha_conf}
+${SECRET_PEM} ${GANESHA_CONF} ${new_node}:${GANESHA_CONF}
- if [ "${HA_VOL_SERVER}" == $(hostname) ]
- then
- cp -r ${HA_CONFDIR}/exports ${tganesha_exports}
- else
- scp -r -oPasswordAuthentication=no -oStrictHostKeyChecking=no -i \
-${SECRET_PEM} ${HA_VOL_SERVER}:${HA_CONFDIR}/exports/ $short_host:${tganesha_exports}
- fi
scp -r -oPasswordAuthentication=no -oStrictHostKeyChecking=no -i \
-${SECRET_PEM} ${tganesha_exports}/exports ${new_node}:${HA_CONFDIR}/
- rm -rf ${tganesha_exports}
+${SECRET_PEM} ${HA_CONFDIR}/exports/ ${new_node}:${HA_CONFDIR}/
}
@@ -867,8 +847,6 @@ main()
# ignore any comment lines
cfgline=$(grep ^HA_NAME= ${ha_conf})
eval $(echo ${cfgline} | grep -F HA_NAME=)
- cfgline=$(grep ^HA_VOL_SERVER= ${ha_conf})
- eval $(echo ${cfgline} | grep -F HA_VOL_SERVER=)
cfgline=$(grep ^HA_CLUSTER_NODES= ${ha_conf})
eval $(echo ${cfgline} | grep -F HA_CLUSTER_NODES=)
fi
diff --git a/xlators/mgmt/glusterd/src/glusterd-ganesha.c b/xlators/mgmt/glusterd/src/glusterd-ganesha.c
index 3d9a10e..2406519 100644
--- a/xlators/mgmt/glusterd/src/glusterd-ganesha.c
+++ b/xlators/mgmt/glusterd/src/glusterd-ganesha.c
@@ -364,46 +364,14 @@ out:
return ret;
}
-/* Following 2 functions parse GANESHA_HA_CONF
+/* Following function parse GANESHA_HA_CONF
* The sample file looks like below,
* HA_NAME="ganesha-ha-360"
* HA_VOL_NAME="ha-state"
- * HA_VOL_MNT="/mount-point"
- * HA_VOL_SERVER="server1"
* HA_CLUSTER_NODES="server1,server2"
* VIP_rhs_1="10.x.x.x"
* VIP_rhs_2="10.x.x.x." */
-gf_boolean_t
-is_ganesha_host (void)
-{
- char *host_from_file = NULL;
- gf_boolean_t ret = _gf_false;
- xlator_t *this = NULL;
-
- this = THIS;
-
- host_from_file = parsing_ganesha_ha_conf ("HA_VOL_SERVER");
- if (host_from_file == NULL) {
- gf_msg (this->name, GF_LOG_INFO, errno,
- GD_MSG_GET_CONFIG_INFO_FAILED,
- "couldn't get HA_VOL_SERVER from file %s",
- GANESHA_HA_CONF);
- return _gf_false;
- }
-
- ret = gf_is_local_addr (host_from_file);
- if (ret) {
- gf_msg (this->name, GF_LOG_INFO, 0,
- GD_MSG_NFS_GNS_HOST_FOUND,
- "ganesha host found "
- "Hostname is %s", host_from_file);
- }
-
- GF_FREE (host_from_file);
- return ret;
-}
-
/* Check if the localhost is listed as one of nfs-ganesha nodes */
gf_boolean_t
check_host_list (void)
@@ -411,7 +379,7 @@ check_host_list (void)
glusterd_conf_t *priv = NULL;
char *hostname, *hostlist;
- int ret = _gf_false;
+ gf_boolean_t ret = _gf_false;
xlator_t *this = NULL;
this = THIS;
@@ -639,7 +607,7 @@ out:
}
int
-tear_down_cluster(void)
+tear_down_cluster(gf_boolean_t run_teardown)
{
int ret = 0;
runner_t runner = {0,};
@@ -648,7 +616,7 @@ tear_down_cluster(void)
struct dirent *entry = NULL;
char path[PATH_MAX] = {0,};
- if (is_ganesha_host()) {
+ if (run_teardown) {
runinit (&runner);
runner_add_args (&runner, "sh",
GANESHA_PREFIX"/ganesha-ha.sh", "teardown",
@@ -708,12 +676,12 @@ out:
int
-setup_cluster(void)
+setup_cluster(gf_boolean_t run_setup)
{
int ret = 0;
runner_t runner = {0,};
- if (is_ganesha_host()) {
+ if (run_setup) {
runinit (&runner);
runner_add_args (&runner, "sh", GANESHA_PREFIX"/ganesha-ha.sh",
"setup", CONFDIR, NULL);
@@ -724,7 +692,7 @@ setup_cluster(void)
static int
-teardown (char **op_errstr)
+teardown (gf_boolean_t run_teardown, char **op_errstr)
{
runner_t runner = {0,};
int ret = 1;
@@ -734,7 +702,7 @@ teardown (char **op_errstr)
priv = THIS->private;
- ret = tear_down_cluster();
+ ret = tear_down_cluster (run_teardown);
if (ret == -1) {
gf_asprintf (op_errstr, "Cleanup of NFS-Ganesha"
" HA config failed.");
@@ -872,14 +840,14 @@ out:
}
static int
-pre_setup (char **op_errstr)
+pre_setup (gf_boolean_t run_setup, char **op_errstr)
{
int ret = 0;
ret = check_host_list();
if (ret) {
- ret = setup_cluster();
+ ret = setup_cluster(run_setup);
if (ret == -1)
gf_asprintf (op_errstr, "Failed to set up HA "
"config for NFS-Ganesha. "
@@ -926,12 +894,18 @@ glusterd_handle_ganesha_op (dict_t *dict, char **op_errstr,
}
if (strcmp (key, GLUSTERD_STORE_KEY_GANESHA_GLOBAL) == 0) {
+ /* *
+ * The set up/teardown of pcs cluster should be performed only
+ * once. This will done on the node in which the cli command
+ * 'gluster nfs-ganesha <enable/disable>' got executed. So that
+ * node should part of ganesha HA cluster
+ */
if (option) {
- ret = pre_setup (op_errstr);
+ ret = pre_setup (is_origin_glusterd (dict), op_errstr);
if (ret < 0)
goto out;
} else {
- ret = teardown (op_errstr);
+ ret = teardown (is_origin_glusterd (dict), op_errstr);
if (ret < 0)
goto out;
}
diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.c b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
index 4b88570..313e3de 100644
--- a/xlators/mgmt/glusterd/src/glusterd-op-sm.c
+++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
@@ -2047,7 +2047,7 @@ glusterd_op_reset_all_volume_options (xlator_t *this, dict_t *dict)
option = dict_get_str_boolean (conf->opts, GLUSTERD_STORE_KEY_GANESHA_GLOBAL,
_gf_false);
if (option) {
- ret = tear_down_cluster();
+ ret = tear_down_cluster (is_origin_glusterd (dict));
if (ret == -1)
gf_msg (THIS->name, GF_LOG_WARNING, errno,
GD_MSG_DICT_GET_FAILED,
diff --git a/xlators/mgmt/glusterd/src/glusterd.h b/xlators/mgmt/glusterd/src/glusterd.h
index f5e090d..7ca589c 100644
--- a/xlators/mgmt/glusterd/src/glusterd.h
+++ b/xlators/mgmt/glusterd/src/glusterd.h
@@ -1072,7 +1072,7 @@ int ganesha_manage_export (char *volname, char *value, char **op_errstr,
gf_boolean_t reboot);
gf_boolean_t glusterd_check_ganesha_export (glusterd_volinfo_t *volinfo);
int stop_ganesha (char **op_errstr);
-int tear_down_cluster (void);
+int tear_down_cluster (gf_boolean_t run_teardown);
int glusterd_op_add_brick (dict_t *dict, char **op_errstr);
int glusterd_op_remove_brick (dict_t *dict, char **op_errstr);
int glusterd_op_stage_add_brick (dict_t *dict, char **op_errstr,
--
1.7.1