|
|
3604df |
From 45aa6c07554a20903ad12ee00b0ed9b6b403d8f0 Mon Sep 17 00:00:00 2001
|
|
|
3604df |
From: Jiffin Tony Thottan <jthottan@redhat.com>
|
|
|
3604df |
Date: Mon, 27 Jun 2016 15:08:25 +0530
|
|
|
3604df |
Subject: [PATCH 56/86] ganesha/scripts : remove 'HA_VOL_SERVER' from the code
|
|
|
3604df |
|
|
|
3604df |
The parameter HA_VOL_SERVER introduced intially ganesha-ha.conf to
|
|
|
3604df |
specify gluster server from which to mount the shared data volume.
|
|
|
3604df |
But after introducing new cli for the same purpose, it become
|
|
|
3604df |
unnecessary. The existence of that parameter can lead confussion
|
|
|
3604df |
to the users. This patch will remove/replace all the instance of
|
|
|
3604df |
HA_VOL_SERVER from the code
|
|
|
3604df |
|
|
|
3604df |
Upstream reference:
|
|
|
3604df |
>Change-Id: I638c61dcd2c21ebdb279bbb141d35bb806bd3ef0
|
|
|
3604df |
>BUG: 1350371
|
|
|
3604df |
>Signed-off-by: Jiffin Tony Thottan <jthottan@redhat.com>
|
|
|
3604df |
>Reviewed-on: http://review.gluster.org/14812
|
|
|
3604df |
>Tested-by: Kaleb KEITHLEY <kkeithle@redhat.com>
|
|
|
3604df |
>NetBSD-regression: NetBSD Build System <jenkins@build.gluster.org>
|
|
|
3604df |
>CentOS-regression: Gluster Build System <jenkins@build.gluster.org>
|
|
|
3604df |
>Reviewed-by: soumya k <skoduri@redhat.com>
|
|
|
3604df |
>Smoke: Gluster Build System <jenkins@build.gluster.org>
|
|
|
3604df |
>Reviewed-by: Kaleb KEITHLEY <kkeithle@redhat.com>
|
|
|
3604df |
>Signed-off-by: Jiffin Tony Thottan <jthottan@redhat.com>
|
|
|
3604df |
|
|
|
3604df |
Change-Id: I638c61dcd2c21ebdb279bbb141d35bb806bd3ef0
|
|
|
3604df |
BUG: 1348954
|
|
|
3604df |
Signed-off-by: Jiffin Tony Thottan <jthottan@redhat.com>
|
|
|
3604df |
Reviewed-on: https://code.engineering.redhat.com/gerrit/84777
|
|
|
3604df |
Reviewed-by: Soumya Koduri <skoduri@redhat.com>
|
|
|
3604df |
Reviewed-by: Atin Mukherjee <amukherj@redhat.com>
|
|
|
3604df |
Tested-by: Atin Mukherjee <amukherj@redhat.com>
|
|
|
3604df |
---
|
|
|
3604df |
extras/ganesha/config/ganesha-ha.conf.sample | 3 -
|
|
|
3604df |
extras/ganesha/scripts/ganesha-ha.sh | 34 +++------------
|
|
|
3604df |
xlators/mgmt/glusterd/src/glusterd-ganesha.c | 62 +++++++------------------
|
|
|
3604df |
xlators/mgmt/glusterd/src/glusterd-op-sm.c | 2 +-
|
|
|
3604df |
xlators/mgmt/glusterd/src/glusterd.h | 2 +-
|
|
|
3604df |
5 files changed, 26 insertions(+), 77 deletions(-)
|
|
|
3604df |
|
|
|
3604df |
diff --git a/extras/ganesha/config/ganesha-ha.conf.sample b/extras/ganesha/config/ganesha-ha.conf.sample
|
|
|
3604df |
index 2077800..c22892b 100644
|
|
|
3604df |
--- a/extras/ganesha/config/ganesha-ha.conf.sample
|
|
|
3604df |
+++ b/extras/ganesha/config/ganesha-ha.conf.sample
|
|
|
3604df |
@@ -2,9 +2,6 @@
|
|
|
3604df |
# must be unique within the subnet
|
|
|
3604df |
HA_NAME="ganesha-ha-360"
|
|
|
3604df |
#
|
|
|
3604df |
-# The gluster server from which to mount the shared data volume.
|
|
|
3604df |
-HA_VOL_SERVER="server1"
|
|
|
3604df |
-#
|
|
|
3604df |
# N.B. you may use short names or long names; you may not use IP addrs.
|
|
|
3604df |
# Once you select one, stay with it as it will be mildly unpleasant to
|
|
|
3604df |
# clean up if you switch later on. Ensure that all names - short and/or
|
|
|
3604df |
diff --git a/extras/ganesha/scripts/ganesha-ha.sh b/extras/ganesha/scripts/ganesha-ha.sh
|
|
|
3604df |
index 31c0c39..ada21cb 100644
|
|
|
3604df |
--- a/extras/ganesha/scripts/ganesha-ha.sh
|
|
|
3604df |
+++ b/extras/ganesha/scripts/ganesha-ha.sh
|
|
|
3604df |
@@ -341,35 +341,15 @@ string:"EXPORT(Path=/$VOL)" 2>&1)
|
|
|
3604df |
copy_export_config ()
|
|
|
3604df |
{
|
|
|
3604df |
local new_node=${1}
|
|
|
3604df |
- local tganesha_conf=$(mktemp)
|
|
|
3604df |
- local tganesha_exports=$(mktemp -d)
|
|
|
3604df |
- local short_host=$(hostname -s)
|
|
|
3604df |
- # avoid prompting for password, even with password-less scp
|
|
|
3604df |
- # scp $host1:$file $host2:$file prompts for the password
|
|
|
3604df |
- # Ideally all the existing nodes in the cluster should have same
|
|
|
3604df |
- # copy of the configuration files. Maybe for sanity check, copy
|
|
|
3604df |
- # the state from HA_VOL_SERVER?
|
|
|
3604df |
- if [ "${HA_VOL_SERVER}" == $(hostname) ]
|
|
|
3604df |
- then
|
|
|
3604df |
- cp ${GANESHA_CONF} ${tganesha_conf}
|
|
|
3604df |
- else
|
|
|
3604df |
- scp -oPasswordAuthentication=no -oStrictHostKeyChecking=no -i \
|
|
|
3604df |
-${SECRET_PEM} ${HA_VOL_SERVER}:${GANESHA_CONF} $short_host:${tganesha_conf}
|
|
|
3604df |
- fi
|
|
|
3604df |
+
|
|
|
3604df |
+ # The add node should be executed from one of the nodes in ganesha
|
|
|
3604df |
+ # cluster. So all the configuration file will be available in that
|
|
|
3604df |
+ # node itself. So just copy that to new node
|
|
|
3604df |
scp -oPasswordAuthentication=no -oStrictHostKeyChecking=no -i \
|
|
|
3604df |
-${SECRET_PEM} ${tganesha_conf} ${new_node}:${GANESHA_CONF}
|
|
|
3604df |
- rm -f ${tganesha_conf}
|
|
|
3604df |
+${SECRET_PEM} ${GANESHA_CONF} ${new_node}:${GANESHA_CONF}
|
|
|
3604df |
|
|
|
3604df |
- if [ "${HA_VOL_SERVER}" == $(hostname) ]
|
|
|
3604df |
- then
|
|
|
3604df |
- cp -r ${HA_CONFDIR}/exports ${tganesha_exports}
|
|
|
3604df |
- else
|
|
|
3604df |
- scp -r -oPasswordAuthentication=no -oStrictHostKeyChecking=no -i \
|
|
|
3604df |
-${SECRET_PEM} ${HA_VOL_SERVER}:${HA_CONFDIR}/exports/ $short_host:${tganesha_exports}
|
|
|
3604df |
- fi
|
|
|
3604df |
scp -r -oPasswordAuthentication=no -oStrictHostKeyChecking=no -i \
|
|
|
3604df |
-${SECRET_PEM} ${tganesha_exports}/exports ${new_node}:${HA_CONFDIR}/
|
|
|
3604df |
- rm -rf ${tganesha_exports}
|
|
|
3604df |
+${SECRET_PEM} ${HA_CONFDIR}/exports/ ${new_node}:${HA_CONFDIR}/
|
|
|
3604df |
}
|
|
|
3604df |
|
|
|
3604df |
|
|
|
3604df |
@@ -867,8 +847,6 @@ main()
|
|
|
3604df |
# ignore any comment lines
|
|
|
3604df |
cfgline=$(grep ^HA_NAME= ${ha_conf})
|
|
|
3604df |
eval $(echo ${cfgline} | grep -F HA_NAME=)
|
|
|
3604df |
- cfgline=$(grep ^HA_VOL_SERVER= ${ha_conf})
|
|
|
3604df |
- eval $(echo ${cfgline} | grep -F HA_VOL_SERVER=)
|
|
|
3604df |
cfgline=$(grep ^HA_CLUSTER_NODES= ${ha_conf})
|
|
|
3604df |
eval $(echo ${cfgline} | grep -F HA_CLUSTER_NODES=)
|
|
|
3604df |
fi
|
|
|
3604df |
diff --git a/xlators/mgmt/glusterd/src/glusterd-ganesha.c b/xlators/mgmt/glusterd/src/glusterd-ganesha.c
|
|
|
3604df |
index 3d9a10e..2406519 100644
|
|
|
3604df |
--- a/xlators/mgmt/glusterd/src/glusterd-ganesha.c
|
|
|
3604df |
+++ b/xlators/mgmt/glusterd/src/glusterd-ganesha.c
|
|
|
3604df |
@@ -364,46 +364,14 @@ out:
|
|
|
3604df |
return ret;
|
|
|
3604df |
}
|
|
|
3604df |
|
|
|
3604df |
-/* Following 2 functions parse GANESHA_HA_CONF
|
|
|
3604df |
+/* Following function parse GANESHA_HA_CONF
|
|
|
3604df |
* The sample file looks like below,
|
|
|
3604df |
* HA_NAME="ganesha-ha-360"
|
|
|
3604df |
* HA_VOL_NAME="ha-state"
|
|
|
3604df |
- * HA_VOL_MNT="/mount-point"
|
|
|
3604df |
- * HA_VOL_SERVER="server1"
|
|
|
3604df |
* HA_CLUSTER_NODES="server1,server2"
|
|
|
3604df |
* VIP_rhs_1="10.x.x.x"
|
|
|
3604df |
* VIP_rhs_2="10.x.x.x." */
|
|
|
3604df |
|
|
|
3604df |
-gf_boolean_t
|
|
|
3604df |
-is_ganesha_host (void)
|
|
|
3604df |
-{
|
|
|
3604df |
- char *host_from_file = NULL;
|
|
|
3604df |
- gf_boolean_t ret = _gf_false;
|
|
|
3604df |
- xlator_t *this = NULL;
|
|
|
3604df |
-
|
|
|
3604df |
- this = THIS;
|
|
|
3604df |
-
|
|
|
3604df |
- host_from_file = parsing_ganesha_ha_conf ("HA_VOL_SERVER");
|
|
|
3604df |
- if (host_from_file == NULL) {
|
|
|
3604df |
- gf_msg (this->name, GF_LOG_INFO, errno,
|
|
|
3604df |
- GD_MSG_GET_CONFIG_INFO_FAILED,
|
|
|
3604df |
- "couldn't get HA_VOL_SERVER from file %s",
|
|
|
3604df |
- GANESHA_HA_CONF);
|
|
|
3604df |
- return _gf_false;
|
|
|
3604df |
- }
|
|
|
3604df |
-
|
|
|
3604df |
- ret = gf_is_local_addr (host_from_file);
|
|
|
3604df |
- if (ret) {
|
|
|
3604df |
- gf_msg (this->name, GF_LOG_INFO, 0,
|
|
|
3604df |
- GD_MSG_NFS_GNS_HOST_FOUND,
|
|
|
3604df |
- "ganesha host found "
|
|
|
3604df |
- "Hostname is %s", host_from_file);
|
|
|
3604df |
- }
|
|
|
3604df |
-
|
|
|
3604df |
- GF_FREE (host_from_file);
|
|
|
3604df |
- return ret;
|
|
|
3604df |
-}
|
|
|
3604df |
-
|
|
|
3604df |
/* Check if the localhost is listed as one of nfs-ganesha nodes */
|
|
|
3604df |
gf_boolean_t
|
|
|
3604df |
check_host_list (void)
|
|
|
3604df |
@@ -411,7 +379,7 @@ check_host_list (void)
|
|
|
3604df |
|
|
|
3604df |
glusterd_conf_t *priv = NULL;
|
|
|
3604df |
char *hostname, *hostlist;
|
|
|
3604df |
- int ret = _gf_false;
|
|
|
3604df |
+ gf_boolean_t ret = _gf_false;
|
|
|
3604df |
xlator_t *this = NULL;
|
|
|
3604df |
|
|
|
3604df |
this = THIS;
|
|
|
3604df |
@@ -639,7 +607,7 @@ out:
|
|
|
3604df |
}
|
|
|
3604df |
|
|
|
3604df |
int
|
|
|
3604df |
-tear_down_cluster(void)
|
|
|
3604df |
+tear_down_cluster(gf_boolean_t run_teardown)
|
|
|
3604df |
{
|
|
|
3604df |
int ret = 0;
|
|
|
3604df |
runner_t runner = {0,};
|
|
|
3604df |
@@ -648,7 +616,7 @@ tear_down_cluster(void)
|
|
|
3604df |
struct dirent *entry = NULL;
|
|
|
3604df |
char path[PATH_MAX] = {0,};
|
|
|
3604df |
|
|
|
3604df |
- if (is_ganesha_host()) {
|
|
|
3604df |
+ if (run_teardown) {
|
|
|
3604df |
runinit (&runner);
|
|
|
3604df |
runner_add_args (&runner, "sh",
|
|
|
3604df |
GANESHA_PREFIX"/ganesha-ha.sh", "teardown",
|
|
|
3604df |
@@ -708,12 +676,12 @@ out:
|
|
|
3604df |
|
|
|
3604df |
|
|
|
3604df |
int
|
|
|
3604df |
-setup_cluster(void)
|
|
|
3604df |
+setup_cluster(gf_boolean_t run_setup)
|
|
|
3604df |
{
|
|
|
3604df |
int ret = 0;
|
|
|
3604df |
runner_t runner = {0,};
|
|
|
3604df |
|
|
|
3604df |
- if (is_ganesha_host()) {
|
|
|
3604df |
+ if (run_setup) {
|
|
|
3604df |
runinit (&runner);
|
|
|
3604df |
runner_add_args (&runner, "sh", GANESHA_PREFIX"/ganesha-ha.sh",
|
|
|
3604df |
"setup", CONFDIR, NULL);
|
|
|
3604df |
@@ -724,7 +692,7 @@ setup_cluster(void)
|
|
|
3604df |
|
|
|
3604df |
|
|
|
3604df |
static int
|
|
|
3604df |
-teardown (char **op_errstr)
|
|
|
3604df |
+teardown (gf_boolean_t run_teardown, char **op_errstr)
|
|
|
3604df |
{
|
|
|
3604df |
runner_t runner = {0,};
|
|
|
3604df |
int ret = 1;
|
|
|
3604df |
@@ -734,7 +702,7 @@ teardown (char **op_errstr)
|
|
|
3604df |
|
|
|
3604df |
priv = THIS->private;
|
|
|
3604df |
|
|
|
3604df |
- ret = tear_down_cluster();
|
|
|
3604df |
+ ret = tear_down_cluster (run_teardown);
|
|
|
3604df |
if (ret == -1) {
|
|
|
3604df |
gf_asprintf (op_errstr, "Cleanup of NFS-Ganesha"
|
|
|
3604df |
" HA config failed.");
|
|
|
3604df |
@@ -872,14 +840,14 @@ out:
|
|
|
3604df |
}
|
|
|
3604df |
|
|
|
3604df |
static int
|
|
|
3604df |
-pre_setup (char **op_errstr)
|
|
|
3604df |
+pre_setup (gf_boolean_t run_setup, char **op_errstr)
|
|
|
3604df |
{
|
|
|
3604df |
int ret = 0;
|
|
|
3604df |
|
|
|
3604df |
ret = check_host_list();
|
|
|
3604df |
|
|
|
3604df |
if (ret) {
|
|
|
3604df |
- ret = setup_cluster();
|
|
|
3604df |
+ ret = setup_cluster(run_setup);
|
|
|
3604df |
if (ret == -1)
|
|
|
3604df |
gf_asprintf (op_errstr, "Failed to set up HA "
|
|
|
3604df |
"config for NFS-Ganesha. "
|
|
|
3604df |
@@ -926,12 +894,18 @@ glusterd_handle_ganesha_op (dict_t *dict, char **op_errstr,
|
|
|
3604df |
}
|
|
|
3604df |
|
|
|
3604df |
if (strcmp (key, GLUSTERD_STORE_KEY_GANESHA_GLOBAL) == 0) {
|
|
|
3604df |
+ /* *
|
|
|
3604df |
+ * The set up/teardown of pcs cluster should be performed only
|
|
|
3604df |
+ * once. This will done on the node in which the cli command
|
|
|
3604df |
+ * 'gluster nfs-ganesha <enable/disable>' got executed. So that
|
|
|
3604df |
+ * node should part of ganesha HA cluster
|
|
|
3604df |
+ */
|
|
|
3604df |
if (option) {
|
|
|
3604df |
- ret = pre_setup (op_errstr);
|
|
|
3604df |
+ ret = pre_setup (is_origin_glusterd (dict), op_errstr);
|
|
|
3604df |
if (ret < 0)
|
|
|
3604df |
goto out;
|
|
|
3604df |
} else {
|
|
|
3604df |
- ret = teardown (op_errstr);
|
|
|
3604df |
+ ret = teardown (is_origin_glusterd (dict), op_errstr);
|
|
|
3604df |
if (ret < 0)
|
|
|
3604df |
goto out;
|
|
|
3604df |
}
|
|
|
3604df |
diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.c b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
|
|
|
3604df |
index 4b88570..313e3de 100644
|
|
|
3604df |
--- a/xlators/mgmt/glusterd/src/glusterd-op-sm.c
|
|
|
3604df |
+++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
|
|
|
3604df |
@@ -2047,7 +2047,7 @@ glusterd_op_reset_all_volume_options (xlator_t *this, dict_t *dict)
|
|
|
3604df |
option = dict_get_str_boolean (conf->opts, GLUSTERD_STORE_KEY_GANESHA_GLOBAL,
|
|
|
3604df |
_gf_false);
|
|
|
3604df |
if (option) {
|
|
|
3604df |
- ret = tear_down_cluster();
|
|
|
3604df |
+ ret = tear_down_cluster (is_origin_glusterd (dict));
|
|
|
3604df |
if (ret == -1)
|
|
|
3604df |
gf_msg (THIS->name, GF_LOG_WARNING, errno,
|
|
|
3604df |
GD_MSG_DICT_GET_FAILED,
|
|
|
3604df |
diff --git a/xlators/mgmt/glusterd/src/glusterd.h b/xlators/mgmt/glusterd/src/glusterd.h
|
|
|
3604df |
index f5e090d..7ca589c 100644
|
|
|
3604df |
--- a/xlators/mgmt/glusterd/src/glusterd.h
|
|
|
3604df |
+++ b/xlators/mgmt/glusterd/src/glusterd.h
|
|
|
3604df |
@@ -1072,7 +1072,7 @@ int ganesha_manage_export (char *volname, char *value, char **op_errstr,
|
|
|
3604df |
gf_boolean_t reboot);
|
|
|
3604df |
gf_boolean_t glusterd_check_ganesha_export (glusterd_volinfo_t *volinfo);
|
|
|
3604df |
int stop_ganesha (char **op_errstr);
|
|
|
3604df |
-int tear_down_cluster (void);
|
|
|
3604df |
+int tear_down_cluster (gf_boolean_t run_teardown);
|
|
|
3604df |
int glusterd_op_add_brick (dict_t *dict, char **op_errstr);
|
|
|
3604df |
int glusterd_op_remove_brick (dict_t *dict, char **op_errstr);
|
|
|
3604df |
int glusterd_op_stage_add_brick (dict_t *dict, char **op_errstr,
|
|
|
3604df |
--
|
|
|
3604df |
1.7.1
|
|
|
3604df |
|