From a3470f20525912eaf15a6ef67a9f037c3df5fda2 Mon Sep 17 00:00:00 2001 From: CentOS Sources Date: Oct 30 2018 04:51:24 +0000 Subject: import glusterfs-3.12.2-18.el7 --- diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..138577f --- /dev/null +++ b/.gitignore @@ -0,0 +1 @@ +SOURCES/glusterfs-3.12.2.tar.gz diff --git a/.glusterfs.metadata b/.glusterfs.metadata new file mode 100644 index 0000000..91321f9 --- /dev/null +++ b/.glusterfs.metadata @@ -0,0 +1 @@ +561c9bf5aa8fb767dc51fc20a7849c8888a2e5cd SOURCES/glusterfs-3.12.2.tar.gz diff --git a/README.md b/README.md deleted file mode 100644 index 0e7897f..0000000 --- a/README.md +++ /dev/null @@ -1,5 +0,0 @@ -The master branch has no content - -Look at the c7 branch if you are working with CentOS-7, or the c4/c5/c6 branch for CentOS-4, 5 or 6 - -If you find this file in a distro specific branch, it means that no content has been checked in yet diff --git a/SOURCES/0002-glusterd-fix-op-versions-for-RHS-backwards-compatabi.patch b/SOURCES/0002-glusterd-fix-op-versions-for-RHS-backwards-compatabi.patch new file mode 100644 index 0000000..6be9ef0 --- /dev/null +++ b/SOURCES/0002-glusterd-fix-op-versions-for-RHS-backwards-compatabi.patch @@ -0,0 +1,1345 @@ +From 804ac051cc8be7ff0bf0791c0b53db04edce0926 Mon Sep 17 00:00:00 2001 +From: Kaushal M +Date: Thu, 11 Jun 2015 18:21:17 +0530 +Subject: [PATCH 02/74] glusterd: fix op-versions for RHS backwards + compatability + +Backport of https://code.engineering.redhat.com/gerrit/#/c/60485/ + +This change fixes the op-version of different features and checks to maintain +backwards compatability with RHS-3.0 and before. + +Label: DOWNSTREAM ONLY + +Change-Id: Icb282444da179b12fbd6ed9f491514602f1a38c2 +Signed-off-by: Atin Mukherjee +Reviewed-on: https://code.engineering.redhat.com/gerrit/70348 +--- + libglusterfs/src/globals.h | 45 +++-- + xlators/mgmt/glusterd/src/glusterd-brick-ops.c | 8 +- + xlators/mgmt/glusterd/src/glusterd-handler.c | 14 +- + xlators/mgmt/glusterd/src/glusterd-op-sm.c | 16 +- + xlators/mgmt/glusterd/src/glusterd-peer-utils.c | 8 +- + xlators/mgmt/glusterd/src/glusterd-rebalance.c | 4 +- + xlators/mgmt/glusterd/src/glusterd-replace-brick.c | 4 +- + xlators/mgmt/glusterd/src/glusterd-rpc-ops.c | 6 +- + xlators/mgmt/glusterd/src/glusterd-sm.c | 2 +- + .../mgmt/glusterd/src/glusterd-snapshot-utils.c | 12 +- + xlators/mgmt/glusterd/src/glusterd-snapshot.c | 4 +- + xlators/mgmt/glusterd/src/glusterd-store.c | 27 +-- + xlators/mgmt/glusterd/src/glusterd-syncop.c | 2 +- + xlators/mgmt/glusterd/src/glusterd-tier.c | 2 +- + xlators/mgmt/glusterd/src/glusterd-utils.c | 8 +- + xlators/mgmt/glusterd/src/glusterd-volume-ops.c | 12 +- + xlators/mgmt/glusterd/src/glusterd-volume-set.c | 198 +++++++++++---------- + 17 files changed, 193 insertions(+), 179 deletions(-) + +diff --git a/libglusterfs/src/globals.h b/libglusterfs/src/globals.h +index 365183d..bd7cffe 100644 +--- a/libglusterfs/src/globals.h ++++ b/libglusterfs/src/globals.h +@@ -18,23 +18,28 @@ + #define GD_MIN_OP_VERSION_KEY "minimum-operating-version" + #define GD_MAX_OP_VERSION_KEY "maximum-operating-version" + +-/* Gluster versions - OP-VERSION mapping ++/* RHS versions - OP-VERSION mapping + * +- * 3.3.x - 1 +- * 3.4.x - 2 +- * 3.5.0 - 3 +- * 3.5.1 - 30501 +- * 3.6.0 - 30600 +- * 3.7.0 - 30700 +- * 3.7.1 - 30701 +- * 3.7.2 - 30702 ++ * RHS-2.0 Z - 1 ++ * RHS-2.1 Z - 2 ++ * RHS-2.1 u5 - 20105 ++ * RHS-3.0 - 30000 ++ * RHS-3.0.4 - 30004 ++ * RHGS-3.1 - 30702 + * +- * Starting with Gluster v3.6, the op-version will be multi-digit integer values +- * based on the Glusterfs version, instead of a simply incrementing integer +- * value. The op-version for a given X.Y.Z release will be an integer XYZ, with +- * Y and Z 2 digit always 2 digits wide and padded with 0 when needed. This +- * should allow for some gaps between two Y releases for backports of features +- * in Z releases. ++ * ++ * NOTE: ++ * Starting with RHS-3.0, the op-version will be multi-digit integer values ++ * based on the RHS version, instead of a simply incrementing integer value. The ++ * op-version for a given RHS X(Major).Y(Minor).Z(Update) release will be an ++ * integer with digits XYZ. The Y and Z values will be 2 digits wide always ++ * padded with 0 as needed. This should allow for some gaps between two Y ++ * releases for backports of features in Z releases. ++ * ++ * NOTE: ++ * Starting with RHGS-3.1, the op-version will be the same as the upstream ++ * GlusterFS op-versions. This is to allow proper access to upstream clients of ++ * version 3.7.x or greater, proper access to the RHGS volumes. + */ + #define GD_OP_VERSION_MIN 1 /* MIN is the fresh start op-version, mostly + should not change */ +@@ -44,7 +49,13 @@ + introduction of newer + versions */ + +-#define GD_OP_VERSION_3_6_0 30600 /* Op-Version for GlusterFS 3.6.0 */ ++#define GD_OP_VERSION_RHS_3_0 30000 /* Op-Version of RHS 3.0 */ ++ ++#define GD_OP_VER_PERSISTENT_AFR_XATTRS GD_OP_VERSION_RHS_3_0 ++ ++#define GD_OP_VERSION_RHS_2_1_5 20105 /* RHS 2.1 update 5 */ ++ ++#define GD_OP_VERSION_RHS_3_0_4 30004 /* Op-Version of RHS 3.0.4 */ + + #define GD_OP_VERSION_3_7_0 30700 /* Op-version for GlusterFS 3.7.0 */ + +@@ -90,8 +101,6 @@ + + #define GD_OP_VERSION_3_12_2 31202 /* Op-version for GlusterFS 3.12.2 */ + +-#define GD_OP_VER_PERSISTENT_AFR_XATTRS GD_OP_VERSION_3_6_0 +- + #include "xlator.h" + + /* THIS */ +diff --git a/xlators/mgmt/glusterd/src/glusterd-brick-ops.c b/xlators/mgmt/glusterd/src/glusterd-brick-ops.c +index 6d17ff4..e88fa3f 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-brick-ops.c ++++ b/xlators/mgmt/glusterd/src/glusterd-brick-ops.c +@@ -1379,7 +1379,7 @@ glusterd_op_perform_add_bricks (glusterd_volinfo_t *volinfo, int32_t count, + /* A bricks mount dir is required only by snapshots which were + * introduced in gluster-3.6.0 + */ +- if (conf->op_version >= GD_OP_VERSION_3_6_0) { ++ if (conf->op_version >= GD_OP_VERSION_RHS_3_0) { + brick_mount_dir = NULL; + + snprintf (key, sizeof(key), "brick%d.mount_dir", i); +@@ -1926,7 +1926,7 @@ glusterd_op_stage_add_brick (dict_t *dict, char **op_errstr, dict_t *rsp_dict) + /* A bricks mount dir is required only by snapshots which were + * introduced in gluster-3.6.0 + */ +- if (conf->op_version >= GD_OP_VERSION_3_6_0) { ++ if (conf->op_version >= GD_OP_VERSION_RHS_3_0) { + ret = glusterd_get_brick_mount_dir + (brickinfo->path, brickinfo->hostname, + brickinfo->mount_dir); +@@ -2266,12 +2266,12 @@ glusterd_op_stage_remove_brick (dict_t *dict, char **op_errstr) + } + + /* Check if the connected clients are all of version +- * glusterfs-3.6 and higher. This is needed to prevent some data ++ * RHS-2.1u5 and higher. This is needed to prevent some data + * loss issues that could occur when older clients are connected + * when rebalance is run. + */ + ret = glusterd_check_client_op_version_support +- (volname, GD_OP_VERSION_3_6_0, NULL); ++ (volname, GD_OP_VERSION_RHS_2_1_5, NULL); + if (ret) { + ret = gf_asprintf (op_errstr, "Volume %s has one or " + "more connected clients of a version" +diff --git a/xlators/mgmt/glusterd/src/glusterd-handler.c b/xlators/mgmt/glusterd/src/glusterd-handler.c +index 185186a..af9a796 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-handler.c ++++ b/xlators/mgmt/glusterd/src/glusterd-handler.c +@@ -754,7 +754,7 @@ glusterd_op_txn_begin (rpcsvc_request_t *req, glusterd_op_t op, void *ctx, + } + + /* Based on the op_version, acquire a cluster or mgmt_v3 lock */ +- if (priv->op_version < GD_OP_VERSION_3_6_0) { ++ if (priv->op_version < GD_OP_VERSION_RHS_3_0) { + ret = glusterd_lock (MY_UUID); + if (ret) { + gf_msg (this->name, GF_LOG_ERROR, 0, +@@ -804,7 +804,7 @@ glusterd_op_txn_begin (rpcsvc_request_t *req, glusterd_op_t op, void *ctx, + local_locking_done: + /* If no volname is given as a part of the command, locks will + * not be held, hence sending stage event. */ +- if (volname || (priv->op_version < GD_OP_VERSION_3_6_0)) ++ if (volname || (priv->op_version < GD_OP_VERSION_RHS_3_0)) + event_type = GD_OP_EVENT_START_LOCK; + else { + txn_op_info.state.state = GD_OP_STATE_LOCK_SENT; +@@ -836,7 +836,7 @@ out: + if (locked && ret) { + /* Based on the op-version, we release the + * cluster or mgmt_v3 lock */ +- if (priv->op_version < GD_OP_VERSION_3_6_0) ++ if (priv->op_version < GD_OP_VERSION_RHS_3_0) + glusterd_unlock (MY_UUID); + else { + ret = glusterd_mgmt_v3_unlock (volname, MY_UUID, +@@ -4313,11 +4313,11 @@ __glusterd_handle_status_volume (rpcsvc_request_t *req) + } + + if ((cmd & GF_CLI_STATUS_SNAPD) && +- (conf->op_version < GD_OP_VERSION_3_6_0)) { ++ (conf->op_version < GD_OP_VERSION_RHS_3_0)) { + snprintf (err_str, sizeof (err_str), "The cluster is operating " + "at a lesser version than %d. Getting the status of " + "snapd is not allowed in this state", +- GD_OP_VERSION_3_6_0); ++ GD_OP_VERSION_RHS_3_0); + ret = -1; + goto out; + } +@@ -4337,7 +4337,7 @@ __glusterd_handle_status_volume (rpcsvc_request_t *req) + snprintf (err_str, sizeof (err_str), "The cluster is operating " + "at a lesser version than %d. Getting the status of " + "tierd is not allowed in this state", +- GD_OP_VERSION_3_6_0); ++ GD_OP_VERSION_RHS_3_0); + ret = -1; + goto out; + } +@@ -6287,7 +6287,7 @@ __glusterd_peer_rpc_notify (struct rpc_clnt *rpc, void *mydata, + glusterd_friend_sm_state_name_get (peerinfo->state.state)); + + if (peerinfo->connected) { +- if (conf->op_version < GD_OP_VERSION_3_6_0) { ++ if (conf->op_version < GD_OP_VERSION_RHS_3_0) { + glusterd_get_lock_owner (&uuid); + if (!gf_uuid_is_null (uuid) && + !gf_uuid_compare (peerinfo->uuid, uuid)) +diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.c b/xlators/mgmt/glusterd/src/glusterd-op-sm.c +index 7bb3d53..6d5b8cf 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-op-sm.c ++++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.c +@@ -191,7 +191,7 @@ glusterd_generate_txn_id (dict_t *dict, uuid_t **txn_id) + if (!*txn_id) + goto out; + +- if (priv->op_version < GD_OP_VERSION_3_6_0) ++ if (priv->op_version < GD_OP_VERSION_RHS_3_0) + gf_uuid_copy (**txn_id, priv->global_txn_id); + else + gf_uuid_generate (**txn_id); +@@ -1824,11 +1824,11 @@ glusterd_op_stage_status_volume (dict_t *dict, char **op_errstr) + } + + if ((cmd & GF_CLI_STATUS_SNAPD) && +- (priv->op_version < GD_OP_VERSION_3_6_0)) { ++ (priv->op_version < GD_OP_VERSION_RHS_3_0)) { + snprintf (msg, sizeof (msg), "The cluster is operating at " + "version less than %d. Getting the " + "status of snapd is not allowed in this state.", +- GD_OP_VERSION_3_6_0); ++ GD_OP_VERSION_RHS_3_0); + ret = -1; + goto out; + } +@@ -3872,7 +3872,7 @@ glusterd_op_ac_send_lock (glusterd_op_sm_event_t *event, void *ctx) + continue; + + /* Based on the op_version, acquire a cluster or mgmt_v3 lock */ +- if (priv->op_version < GD_OP_VERSION_3_6_0) { ++ if (priv->op_version < GD_OP_VERSION_RHS_3_0) { + proc = &peerinfo->mgmt->proctable + [GLUSTERD_MGMT_CLUSTER_LOCK]; + if (proc->fn) { +@@ -3983,7 +3983,7 @@ glusterd_op_ac_send_unlock (glusterd_op_sm_event_t *event, void *ctx) + continue; + /* Based on the op_version, + * release the cluster or mgmt_v3 lock */ +- if (priv->op_version < GD_OP_VERSION_3_6_0) { ++ if (priv->op_version < GD_OP_VERSION_RHS_3_0) { + proc = &peerinfo->mgmt->proctable + [GLUSTERD_MGMT_CLUSTER_UNLOCK]; + if (proc->fn) { +@@ -5010,7 +5010,7 @@ glusterd_op_modify_op_ctx (glusterd_op_t op, void *ctx) + count = brick_index_max + other_count + 1; + + /* +- * a glusterd lesser than version 3.7 will be sending the ++ * a glusterd lesser than version RHS-3.0.4 will be sending the + * rdma port in older key. Changing that value from here + * to support backward compatibility + */ +@@ -5029,7 +5029,7 @@ glusterd_op_modify_op_ctx (glusterd_op_t op, void *ctx) + } + } + glusterd_volinfo_find (volname, &volinfo); +- if (conf->op_version < GD_OP_VERSION_3_7_0 && ++ if (conf->op_version < GD_OP_VERSION_RHS_3_0_4 && + volinfo->transport_type == GF_TRANSPORT_RDMA) { + ret = glusterd_op_modify_port_key (op_ctx, + brick_index_max); +@@ -5670,7 +5670,7 @@ glusterd_op_txn_complete (uuid_t *txn_id) + glusterd_op_clear_errstr (); + + /* Based on the op-version, we release the cluster or mgmt_v3 lock */ +- if (priv->op_version < GD_OP_VERSION_3_6_0) { ++ if (priv->op_version < GD_OP_VERSION_RHS_3_0) { + ret = glusterd_unlock (MY_UUID); + /* unlock cant/shouldnt fail here!! */ + if (ret) +diff --git a/xlators/mgmt/glusterd/src/glusterd-peer-utils.c b/xlators/mgmt/glusterd/src/glusterd-peer-utils.c +index 31f9e87..592aa16 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-peer-utils.c ++++ b/xlators/mgmt/glusterd/src/glusterd-peer-utils.c +@@ -554,7 +554,7 @@ out: + * @prefix. All the parameters are compulsory. + * + * The complete address list is added to the dict only if the cluster op-version +- * is >= GD_OP_VERSION_3_6_0 ++ * is >= GD_OP_VERSION_3_7_0 + */ + int + gd_add_friend_to_dict (glusterd_peerinfo_t *friend, dict_t *dict, +@@ -609,7 +609,7 @@ gd_add_friend_to_dict (glusterd_peerinfo_t *friend, dict_t *dict, + goto out; + } + +- if (conf->op_version < GD_OP_VERSION_3_6_0) { ++ if (conf->op_version < GD_OP_VERSION_3_7_0) { + ret = 0; + goto out; + } +@@ -799,7 +799,7 @@ gd_update_peerinfo_from_dict (glusterd_peerinfo_t *peerinfo, dict_t *dict, + GF_FREE (peerinfo->hostname); + peerinfo->hostname = gf_strdup (hostname); + +- if (conf->op_version < GD_OP_VERSION_3_6_0) { ++ if (conf->op_version < GD_OP_VERSION_3_7_0) { + ret = 0; + goto out; + } +@@ -913,7 +913,7 @@ gd_add_peer_hostnames_to_dict (glusterd_peerinfo_t *peerinfo, dict_t *dict, + conf = this->private; + GF_VALIDATE_OR_GOTO (this->name, (conf != NULL), out); + +- if (conf->op_version < GD_OP_VERSION_3_6_0) { ++ if (conf->op_version < GD_OP_VERSION_3_7_0) { + ret = 0; + goto out; + } +diff --git a/xlators/mgmt/glusterd/src/glusterd-rebalance.c b/xlators/mgmt/glusterd/src/glusterd-rebalance.c +index fcafb5b..76191c4 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-rebalance.c ++++ b/xlators/mgmt/glusterd/src/glusterd-rebalance.c +@@ -678,13 +678,13 @@ glusterd_op_stage_rebalance (dict_t *dict, char **op_errstr) + case GF_DEFRAG_CMD_START: + case GF_DEFRAG_CMD_START_LAYOUT_FIX: + /* Check if the connected clients are all of version +- * glusterfs-3.6 and higher. This is needed to prevent some data ++ * RHS-2.1u5 and higher. This is needed to prevent some data + * loss issues that could occur when older clients are connected + * when rebalance is run. This check can be bypassed by using + * 'force' + */ + ret = glusterd_check_client_op_version_support +- (volname, GD_OP_VERSION_3_6_0, NULL); ++ (volname, GD_OP_VERSION_RHS_2_1_5, NULL); + if (ret) { + ret = gf_asprintf (op_errstr, "Volume %s has one or " + "more connected clients of a version" +diff --git a/xlators/mgmt/glusterd/src/glusterd-replace-brick.c b/xlators/mgmt/glusterd/src/glusterd-replace-brick.c +index 08a6df0..18fc741 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-replace-brick.c ++++ b/xlators/mgmt/glusterd/src/glusterd-replace-brick.c +@@ -305,7 +305,7 @@ glusterd_op_stage_replace_brick (dict_t *dict, char **op_errstr, + if (ret) + goto out; + +- } else if (priv->op_version >= GD_OP_VERSION_3_6_0) { ++ } else if (priv->op_version >= GD_OP_VERSION_RHS_3_0) { + /* A bricks mount dir is required only by snapshots which were + * introduced in gluster-3.6.0 + */ +@@ -389,7 +389,7 @@ glusterd_op_perform_replace_brick (glusterd_volinfo_t *volinfo, + /* A bricks mount dir is required only by snapshots which were + * introduced in gluster-3.6.0 + */ +- if (conf->op_version >= GD_OP_VERSION_3_6_0) { ++ if (conf->op_version >= GD_OP_VERSION_RHS_3_0) { + ret = dict_get_str (dict, "brick1.mount_dir", &brick_mount_dir); + if (ret) { + gf_msg (this->name, GF_LOG_ERROR, errno, +diff --git a/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c b/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c +index ab52e8d..86e1256 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c ++++ b/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c +@@ -302,11 +302,11 @@ __glusterd_probe_cbk (struct rpc_req *req, struct iovec *iov, + * we need to add the new hostname to the peer. + * + * This addition should only be done for cluster op-version >= +- * GD_OP_VERSION_3_6_0 as address lists are only supported from then on. ++ * GD_OP_VERSION_3_7_0 as address lists are only supported from then on. + * Also, this update should only be done when an explicit CLI probe + * command was used to begin the probe process. + */ +- if ((conf->op_version >= GD_OP_VERSION_3_6_0) && ++ if ((conf->op_version >= GD_OP_VERSION_3_7_0) && + (gf_uuid_compare (rsp.uuid, peerinfo->uuid) == 0)) { + ctx = ((call_frame_t *)myframe)->local; + /* Presence of ctx->req implies this probe was started by a cli +@@ -1591,7 +1591,7 @@ glusterd_rpc_friend_add (call_frame_t *frame, xlator_t *this, + goto out; + } + +- if (priv->op_version >= GD_OP_VERSION_3_6_0) { ++ if (priv->op_version >= GD_OP_VERSION_RHS_3_0) { + ret = glusterd_add_missed_snaps_to_export_dict (peer_data); + if (ret) { + gf_msg (this->name, GF_LOG_ERROR, 0, +diff --git a/xlators/mgmt/glusterd/src/glusterd-sm.c b/xlators/mgmt/glusterd/src/glusterd-sm.c +index f83e851..6c56837 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-sm.c ++++ b/xlators/mgmt/glusterd/src/glusterd-sm.c +@@ -942,7 +942,7 @@ glusterd_ac_handle_friend_add_req (glusterd_friend_sm_event_t *event, void *ctx) + /* Compare missed_snapshot list with the peer * + * if volume comparison is successful */ + if ((op_ret == 0) && +- (conf->op_version >= GD_OP_VERSION_3_6_0)) { ++ (conf->op_version >= GD_OP_VERSION_RHS_3_0)) { + ret = glusterd_import_friend_missed_snap_list (ev_ctx->vols); + if (ret) { + gf_msg (this->name, GF_LOG_ERROR, 0, +diff --git a/xlators/mgmt/glusterd/src/glusterd-snapshot-utils.c b/xlators/mgmt/glusterd/src/glusterd-snapshot-utils.c +index 9e2a75f..6fb49c3 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-snapshot-utils.c ++++ b/xlators/mgmt/glusterd/src/glusterd-snapshot-utils.c +@@ -468,7 +468,7 @@ gd_add_brick_snap_details_to_dict (dict_t *dict, char *prefix, + GF_VALIDATE_OR_GOTO (this->name, (prefix != NULL), out); + GF_VALIDATE_OR_GOTO (this->name, (brickinfo != NULL), out); + +- if (conf->op_version < GD_OP_VERSION_3_6_0) { ++ if (conf->op_version < GD_OP_VERSION_RHS_3_0) { + ret = 0; + goto out; + } +@@ -550,7 +550,7 @@ gd_add_vol_snap_details_to_dict (dict_t *dict, char *prefix, + GF_VALIDATE_OR_GOTO (this->name, (volinfo != NULL), out); + GF_VALIDATE_OR_GOTO (this->name, (prefix != NULL), out); + +- if (conf->op_version < GD_OP_VERSION_3_6_0) { ++ if (conf->op_version < GD_OP_VERSION_RHS_3_0) { + ret = 0; + goto out; + } +@@ -878,7 +878,7 @@ gd_import_new_brick_snap_details (dict_t *dict, char *prefix, + GF_VALIDATE_OR_GOTO (this->name, (prefix != NULL), out); + GF_VALIDATE_OR_GOTO (this->name, (brickinfo != NULL), out); + +- if (conf->op_version < GD_OP_VERSION_3_6_0) { ++ if (conf->op_version < GD_OP_VERSION_RHS_3_0) { + ret = 0; + goto out; + } +@@ -939,8 +939,8 @@ out: + * Imports the snapshot details of a volume if required and available + * + * Snapshot details will be imported only if cluster.op_version is greater than +- * or equal to GD_OP_VERSION_3_6_0, the op-version from which volume snapshot is +- * supported. ++ * or equal to GD_OP_VERSION_RHS_3_0, the op-version from which volume snapshot ++ * is supported. + */ + int + gd_import_volume_snap_details (dict_t *dict, glusterd_volinfo_t *volinfo, +@@ -962,7 +962,7 @@ gd_import_volume_snap_details (dict_t *dict, glusterd_volinfo_t *volinfo, + GF_VALIDATE_OR_GOTO (this->name, (prefix != NULL), out); + GF_VALIDATE_OR_GOTO (this->name, (volname != NULL), out); + +- if (conf->op_version < GD_OP_VERSION_3_6_0) { ++ if (conf->op_version < GD_OP_VERSION_RHS_3_0) { + ret = 0; + goto out; + } +diff --git a/xlators/mgmt/glusterd/src/glusterd-snapshot.c b/xlators/mgmt/glusterd/src/glusterd-snapshot.c +index 31f4d95..6306d29 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-snapshot.c ++++ b/xlators/mgmt/glusterd/src/glusterd-snapshot.c +@@ -9341,7 +9341,7 @@ glusterd_handle_snapshot_fn (rpcsvc_request_t *req) + goto out; + } + +- if (conf->op_version < GD_OP_VERSION_3_6_0) { ++ if (conf->op_version < GD_OP_VERSION_RHS_3_0) { + snprintf (err_str, sizeof (err_str), "Cluster operating version" + " is lesser than the supported version " + "for a snapshot"); +@@ -9349,7 +9349,7 @@ glusterd_handle_snapshot_fn (rpcsvc_request_t *req) + gf_msg (this->name, GF_LOG_ERROR, 0, + GD_MSG_UNSUPPORTED_VERSION, + "%s (%d < %d)", err_str, +- conf->op_version, GD_OP_VERSION_3_6_0); ++ conf->op_version, GD_OP_VERSION_RHS_3_0); + ret = -1; + goto out; + } +diff --git a/xlators/mgmt/glusterd/src/glusterd-store.c b/xlators/mgmt/glusterd/src/glusterd-store.c +index 4d22b63..229391a 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-store.c ++++ b/xlators/mgmt/glusterd/src/glusterd-store.c +@@ -302,7 +302,7 @@ gd_store_brick_snap_details_write (int fd, glusterd_brickinfo_t *brickinfo) + GF_VALIDATE_OR_GOTO (this->name, (fd > 0), out); + GF_VALIDATE_OR_GOTO (this->name, (brickinfo != NULL), out); + +- if (conf->op_version < GD_OP_VERSION_3_6_0) { ++ if (conf->op_version < GD_OP_VERSION_RHS_3_0) { + ret = 0; + goto out; + } +@@ -799,7 +799,7 @@ glusterd_volume_write_snap_details (int fd, glusterd_volinfo_t *volinfo) + GF_VALIDATE_OR_GOTO (this->name, (fd > 0), out); + GF_VALIDATE_OR_GOTO (this->name, (volinfo != NULL), out); + +- if (conf->op_version < GD_OP_VERSION_3_6_0) { ++ if (conf->op_version < GD_OP_VERSION_RHS_3_0) { + ret = 0; + goto out; + } +@@ -968,7 +968,7 @@ glusterd_volume_exclude_options_write (int fd, glusterd_volinfo_t *volinfo) + goto out; + } + +- if (conf->op_version >= GD_OP_VERSION_3_6_0) { ++ if (conf->op_version >= GD_OP_VERSION_RHS_3_0) { + snprintf (buf, sizeof (buf), "%d", volinfo->disperse_count); + ret = gf_store_save_value (fd, + GLUSTERD_STORE_KEY_VOL_DISPERSE_CNT, +@@ -2339,7 +2339,7 @@ glusterd_store_retrieve_snapd (glusterd_volinfo_t *volinfo) + conf = THIS->private; + GF_ASSERT (volinfo); + +- if (conf->op_version < GD_OP_VERSION_3_6_0) { ++ if (conf->op_version < GD_OP_VERSION_RHS_3_0) { + ret = 0; + goto out; + } +@@ -2347,15 +2347,16 @@ glusterd_store_retrieve_snapd (glusterd_volinfo_t *volinfo) + /* + * This is needed for upgrade situations. Say a volume is created with + * older version of glusterfs and upgraded to a glusterfs version equal +- * to or greater than GD_OP_VERSION_3_6_0. The older glusterd would not +- * have created the snapd.info file related to snapshot daemon for user +- * serviceable snapshots. So as part of upgrade when the new glusterd +- * starts, as part of restore (restoring the volume to be precise), it +- * tries to snapd related info from snapd.info file. But since there was +- * no such file till now, the restore operation fails. Thus, to prevent +- * it from happening check whether user serviceable snapshots features +- * is enabled before restoring snapd. If its disbaled, then simply +- * exit by returning success (without even checking for the snapd.info). ++ * to or greater than GD_OP_VERSION_RHS_3_0. The older glusterd would ++ * not have created the snapd.info file related to snapshot daemon for ++ * user serviceable snapshots. So as part of upgrade when the new ++ * glusterd starts, as part of restore (restoring the volume to be ++ * precise), it tries to snapd related info from snapd.info file. But ++ * since there was no such file till now, the restore operation fails. ++ * Thus, to prevent it from happening check whether user serviceable ++ * snapshots features is enabled before restoring snapd. If its ++ * disbaled, then simply exit by returning success (without even ++ * checking for the snapd.info). + */ + + if (!dict_get_str_boolean (volinfo->dict, "features.uss", _gf_false)) { +diff --git a/xlators/mgmt/glusterd/src/glusterd-syncop.c b/xlators/mgmt/glusterd/src/glusterd-syncop.c +index 4be3d97..066c7f9 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-syncop.c ++++ b/xlators/mgmt/glusterd/src/glusterd-syncop.c +@@ -1863,7 +1863,7 @@ gd_sync_task_begin (dict_t *op_ctx, rpcsvc_request_t * req) + goto out; + } + +- if (conf->op_version < GD_OP_VERSION_3_6_0) ++ if (conf->op_version < GD_OP_VERSION_RHS_3_0) + cluster_lock = _gf_true; + + /* Based on the op_version, acquire a cluster or mgmt_v3 lock */ +diff --git a/xlators/mgmt/glusterd/src/glusterd-tier.c b/xlators/mgmt/glusterd/src/glusterd-tier.c +index 45f6ac3..28f02e75 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-tier.c ++++ b/xlators/mgmt/glusterd/src/glusterd-tier.c +@@ -921,7 +921,7 @@ glusterd_op_stage_tier (dict_t *dict, char **op_errstr, dict_t *rsp_dict) + * 'force' + */ + ret = glusterd_check_client_op_version_support +- (volname, GD_OP_VERSION_3_6_0, NULL); ++ (volname, GD_OP_VERSION_RHS_3_0, NULL); + if (ret) { + ret = gf_asprintf (op_errstr, "Volume %s has one or " + "more connected clients of a version" +diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c +index e38f963..f34e218 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-utils.c ++++ b/xlators/mgmt/glusterd/src/glusterd-utils.c +@@ -11758,10 +11758,10 @@ gd_update_volume_op_versions (glusterd_volinfo_t *volinfo) + } + + if (volinfo->type == GF_CLUSTER_TYPE_DISPERSE) { +- if (volinfo->op_version < GD_OP_VERSION_3_6_0) +- volinfo->op_version = GD_OP_VERSION_3_6_0; +- if (volinfo->client_op_version < GD_OP_VERSION_3_6_0) +- volinfo->client_op_version = GD_OP_VERSION_3_6_0; ++ if (volinfo->op_version < GD_OP_VERSION_3_7_0) ++ volinfo->op_version = GD_OP_VERSION_3_7_0; ++ if (volinfo->client_op_version < GD_OP_VERSION_3_7_0) ++ volinfo->client_op_version = GD_OP_VERSION_3_7_0; + } + + return; +diff --git a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c +index 9d34073..834acab 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c ++++ b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c +@@ -1325,7 +1325,7 @@ glusterd_op_stage_create_volume (dict_t *dict, char **op_errstr, + /* A bricks mount dir is required only by snapshots which were + * introduced in gluster-3.6.0 + */ +- if (priv->op_version >= GD_OP_VERSION_3_6_0) { ++ if (priv->op_version >= GD_OP_VERSION_RHS_3_0) { + ret = glusterd_get_brick_mount_dir + (brick_info->path, brick_info->hostname, + brick_info->mount_dir); +@@ -1611,7 +1611,7 @@ glusterd_op_stage_start_volume (dict_t *dict, char **op_errstr, + /* A bricks mount dir is required only by snapshots which were + * introduced in gluster-3.6.0 + */ +- if (priv->op_version >= GD_OP_VERSION_3_6_0) { ++ if (priv->op_version >= GD_OP_VERSION_RHS_3_0) { + if (strlen(brickinfo->mount_dir) < 1) { + ret = glusterd_get_brick_mount_dir + (brickinfo->path, brickinfo->hostname, +@@ -2320,10 +2320,10 @@ glusterd_op_create_volume (dict_t *dict, char **op_errstr) + "redundancy count for volume %s", volname); + goto out; + } +- if (priv->op_version < GD_OP_VERSION_3_6_0) { ++ if (priv->op_version < GD_OP_VERSION_3_7_0) { + gf_msg (this->name, GF_LOG_ERROR, 0, + GD_MSG_UNSUPPORTED_VERSION, "Disperse volume " +- "needs op-version 3.6.0 or higher"); ++ "needs op-version 30700 or higher"); + ret = -1; + goto out; + } +@@ -2428,7 +2428,7 @@ glusterd_op_create_volume (dict_t *dict, char **op_errstr) + /* A bricks mount dir is required only by snapshots which were + * introduced in gluster-3.6.0 + */ +- if (priv->op_version >= GD_OP_VERSION_3_6_0) { ++ if (priv->op_version >= GD_OP_VERSION_RHS_3_0) { + brick_mount_dir = NULL; + snprintf (key, sizeof(key), "brick%d.mount_dir", i); + ret = dict_get_str (dict, key, &brick_mount_dir); +@@ -2623,7 +2623,7 @@ glusterd_op_start_volume (dict_t *dict, char **op_errstr) + /* A bricks mount dir is required only by snapshots which were + * introduced in gluster-3.6.0 + */ +- if (conf->op_version >= GD_OP_VERSION_3_6_0) { ++ if (conf->op_version >= GD_OP_VERSION_RHS_3_0) { + cds_list_for_each_entry (brickinfo, &volinfo->bricks, + brick_list) { + brick_count++; +diff --git a/xlators/mgmt/glusterd/src/glusterd-volume-set.c b/xlators/mgmt/glusterd/src/glusterd-volume-set.c +index 14b9e21..982275e 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-volume-set.c ++++ b/xlators/mgmt/glusterd/src/glusterd-volume-set.c +@@ -1263,7 +1263,7 @@ struct volopt_map_entry glusterd_volopt_map[] = { + { .key = "cluster.subvols-per-directory", + .voltype = "cluster/distribute", + .option = "directory-layout-spread", +- .op_version = 2, ++ .op_version = 1, + .validate_fn = validate_subvols_per_directory, + .flags = OPT_FLAG_CLIENT_OPT + }, +@@ -1275,27 +1275,27 @@ struct volopt_map_entry glusterd_volopt_map[] = { + { .key = "cluster.rsync-hash-regex", + .voltype = "cluster/distribute", + .type = NO_DOC, +- .op_version = 3, ++ .op_version = 2, + .flags = OPT_FLAG_CLIENT_OPT + }, + { .key = "cluster.extra-hash-regex", + .voltype = "cluster/distribute", + .type = NO_DOC, +- .op_version = 3, ++ .op_version = 2, + .flags = OPT_FLAG_CLIENT_OPT + }, + { .key = "cluster.dht-xattr-name", + .voltype = "cluster/distribute", + .option = "xattr-name", + .type = NO_DOC, +- .op_version = 3, ++ .op_version = 2, + .flags = OPT_FLAG_CLIENT_OPT + }, + { .key = "cluster.randomize-hash-range-by-gfid", + .voltype = "cluster/distribute", + .option = "randomize-hash-range-by-gfid", + .type = NO_DOC, +- .op_version = GD_OP_VERSION_3_6_0, ++ .op_version = GD_OP_VERSION_3_7_0, + .flags = OPT_FLAG_CLIENT_OPT, + }, + { .key = "cluster.rebal-throttle", +@@ -1327,12 +1327,12 @@ struct volopt_map_entry glusterd_volopt_map[] = { + .voltype = "cluster/nufa", + .option = "local-volume-name", + .type = NO_DOC, +- .op_version = 3, ++ .op_version = GD_OP_VERSION_RHS_3_0, + .flags = OPT_FLAG_CLIENT_OPT + }, + { .key = "cluster.weighted-rebalance", + .voltype = "cluster/distribute", +- .op_version = GD_OP_VERSION_3_6_0, ++ .op_version = GD_OP_VERSION_3_7_0, + }, + + /* Switch xlator options (Distribute special case) */ +@@ -1340,14 +1340,14 @@ struct volopt_map_entry glusterd_volopt_map[] = { + .voltype = "cluster/distribute", + .option = "!switch", + .type = NO_DOC, +- .op_version = 3, ++ .op_version = GD_OP_VERSION_RHS_3_0, + .flags = OPT_FLAG_CLIENT_OPT + }, + { .key = "cluster.switch-pattern", + .voltype = "cluster/switch", + .option = "pattern.switch.case", + .type = NO_DOC, +- .op_version = 3, ++ .op_version = GD_OP_VERSION_RHS_3_0, + .flags = OPT_FLAG_CLIENT_OPT + }, + +@@ -1475,18 +1475,18 @@ struct volopt_map_entry glusterd_volopt_map[] = { + }, + { .key = "cluster.readdir-failover", + .voltype = "cluster/replicate", +- .op_version = 2, ++ .op_version = 1, + .flags = OPT_FLAG_CLIENT_OPT + }, + { .key = "cluster.ensure-durability", + .voltype = "cluster/replicate", +- .op_version = 3, ++ .op_version = 2, + .flags = OPT_FLAG_CLIENT_OPT + }, + { .key = "cluster.consistent-metadata", + .voltype = "cluster/replicate", + .type = DOC, +- .op_version = GD_OP_VERSION_3_7_0, ++ .op_version = GD_OP_VERSION_RHS_3_0_4, + .flags = OPT_FLAG_CLIENT_OPT + }, + { .key = "cluster.heal-wait-queue-length", +@@ -1562,45 +1562,45 @@ struct volopt_map_entry glusterd_volopt_map[] = { + { .key = "diagnostics.brick-logger", + .voltype = "debug/io-stats", + .option = "!logger", +- .op_version = GD_OP_VERSION_3_6_0, ++ .op_version = GD_OP_VERSION_RHS_3_0, + }, + { .key = "diagnostics.client-logger", + .voltype = "debug/io-stats", + .option = "!logger", +- .op_version = GD_OP_VERSION_3_6_0, ++ .op_version = GD_OP_VERSION_RHS_3_0, + .flags = OPT_FLAG_CLIENT_OPT + }, + { .key = "diagnostics.brick-log-format", + .voltype = "debug/io-stats", + .option = "!log-format", +- .op_version = GD_OP_VERSION_3_6_0, ++ .op_version = GD_OP_VERSION_RHS_3_0, + }, + { .key = "diagnostics.client-log-format", + .voltype = "debug/io-stats", + .option = "!log-format", +- .op_version = GD_OP_VERSION_3_6_0, ++ .op_version = GD_OP_VERSION_RHS_3_0, + .flags = OPT_FLAG_CLIENT_OPT + }, + { .key = "diagnostics.brick-log-buf-size", + .voltype = "debug/io-stats", + .option = "!log-buf-size", +- .op_version = GD_OP_VERSION_3_6_0, ++ .op_version = GD_OP_VERSION_RHS_3_0, + }, + { .key = "diagnostics.client-log-buf-size", + .voltype = "debug/io-stats", + .option = "!log-buf-size", +- .op_version = GD_OP_VERSION_3_6_0, ++ .op_version = GD_OP_VERSION_RHS_3_0, + .flags = OPT_FLAG_CLIENT_OPT + }, + { .key = "diagnostics.brick-log-flush-timeout", + .voltype = "debug/io-stats", + .option = "!log-flush-timeout", +- .op_version = GD_OP_VERSION_3_6_0, ++ .op_version = GD_OP_VERSION_RHS_3_0, + }, + { .key = "diagnostics.client-log-flush-timeout", + .voltype = "debug/io-stats", + .option = "!log-flush-timeout", +- .op_version = GD_OP_VERSION_3_6_0, ++ .op_version = GD_OP_VERSION_RHS_3_0, + .flags = OPT_FLAG_CLIENT_OPT + }, + { .key = "diagnostics.stats-dump-interval", +@@ -1688,6 +1688,10 @@ struct volopt_map_entry glusterd_volopt_map[] = { + .voltype = "performance/io-threads", + .op_version = 1 + }, ++ { .key = "performance.least-rate-limit", ++ .voltype = "performance/io-threads", ++ .op_version = 1 ++ }, + + /* Other perf xlators' options */ + { .key = "performance.cache-size", +@@ -1705,7 +1709,7 @@ struct volopt_map_entry glusterd_volopt_map[] = { + { .key = "performance.nfs.flush-behind", + .voltype = "performance/write-behind", + .option = "flush-behind", +- .op_version = 1, ++ .op_version = GD_OP_VERSION_RHS_3_0, + .flags = OPT_FLAG_CLIENT_OPT + }, + { .key = "performance.write-behind-window-size", +@@ -1731,43 +1735,43 @@ struct volopt_map_entry glusterd_volopt_map[] = { + { .key = "performance.nfs.write-behind-window-size", + .voltype = "performance/write-behind", + .option = "cache-size", +- .op_version = 1, ++ .op_version = GD_OP_VERSION_RHS_3_0, + .flags = OPT_FLAG_CLIENT_OPT + }, + { .key = "performance.strict-o-direct", + .voltype = "performance/write-behind", + .option = "strict-O_DIRECT", +- .op_version = 2, ++ .op_version = 1, + .flags = OPT_FLAG_CLIENT_OPT + }, + { .key = "performance.nfs.strict-o-direct", + .voltype = "performance/write-behind", + .option = "strict-O_DIRECT", +- .op_version = 2, ++ .op_version = GD_OP_VERSION_RHS_3_0, + .flags = OPT_FLAG_CLIENT_OPT + }, + { .key = "performance.strict-write-ordering", + .voltype = "performance/write-behind", + .option = "strict-write-ordering", +- .op_version = 2, ++ .op_version = 1, + .flags = OPT_FLAG_CLIENT_OPT + }, + { .key = "performance.nfs.strict-write-ordering", + .voltype = "performance/write-behind", + .option = "strict-write-ordering", +- .op_version = 2, ++ .op_version = GD_OP_VERSION_RHS_3_0, + .flags = OPT_FLAG_CLIENT_OPT + }, + { .key = "performance.lazy-open", + .voltype = "performance/open-behind", + .option = "lazy-open", +- .op_version = 3, ++ .op_version = 2, + .flags = OPT_FLAG_CLIENT_OPT + }, + { .key = "performance.read-after-open", + .voltype = "performance/open-behind", + .option = "read-after-open", +- .op_version = 3, ++ .op_version = GD_OP_VERSION_RHS_3_0, + .flags = OPT_FLAG_CLIENT_OPT + }, + { .key = "performance.read-ahead-page-count", +@@ -1815,29 +1819,29 @@ struct volopt_map_entry glusterd_volopt_map[] = { + + /* Crypt xlator options */ + +- { .key = "features.encryption", +- .voltype = "encryption/crypt", +- .option = "!feat", +- .value = "off", +- .op_version = 3, +- .description = "enable/disable client-side encryption for " ++ { .key = "features.encryption", ++ .voltype = "encryption/crypt", ++ .option = "!feat", ++ .value = "off", ++ .op_version = GD_OP_VERSION_RHS_3_0, ++ .description = "enable/disable client-side encryption for " + "the volume.", + .flags = OPT_FLAG_CLIENT_OPT | OPT_FLAG_XLATOR_OPT + }, + + { .key = "encryption.master-key", + .voltype = "encryption/crypt", +- .op_version = 3, ++ .op_version = GD_OP_VERSION_RHS_3_0, + .flags = OPT_FLAG_CLIENT_OPT + }, + { .key = "encryption.data-key-size", + .voltype = "encryption/crypt", +- .op_version = 3, ++ .op_version = GD_OP_VERSION_RHS_3_0, + .flags = OPT_FLAG_CLIENT_OPT + }, + { .key = "encryption.block-size", + .voltype = "encryption/crypt", +- .op_version = 3, ++ .op_version = GD_OP_VERSION_RHS_3_0, + .flags = OPT_FLAG_CLIENT_OPT + }, + +@@ -1881,7 +1885,7 @@ struct volopt_map_entry glusterd_volopt_map[] = { + { .key = "network.remote-dio", + .voltype = "protocol/client", + .option = "filter-O_DIRECT", +- .op_version = 2, ++ .op_version = 1, + .flags = OPT_FLAG_CLIENT_OPT + }, + { .key = "client.own-thread", +@@ -1892,7 +1896,7 @@ struct volopt_map_entry glusterd_volopt_map[] = { + }, + { .key = "client.event-threads", + .voltype = "protocol/client", +- .op_version = GD_OP_VERSION_3_7_0, ++ .op_version = GD_OP_VERSION_RHS_3_0_4, + }, + { .key = "client.tcp-user-timeout", + .voltype = "protocol/client", +@@ -1960,17 +1964,17 @@ struct volopt_map_entry glusterd_volopt_map[] = { + { .key = "server.root-squash", + .voltype = "protocol/server", + .option = "root-squash", +- .op_version = 2 ++ .op_version = 1 + }, + { .key = "server.anonuid", + .voltype = "protocol/server", + .option = "anonuid", +- .op_version = 3 ++ .op_version = GD_OP_VERSION_RHS_3_0 + }, + { .key = "server.anongid", + .voltype = "protocol/server", + .option = "anongid", +- .op_version = 3 ++ .op_version = GD_OP_VERSION_RHS_3_0 + }, + { .key = "server.statedump-path", + .voltype = "protocol/server", +@@ -1981,7 +1985,7 @@ struct volopt_map_entry glusterd_volopt_map[] = { + .voltype = "protocol/server", + .option = "rpc.outstanding-rpc-limit", + .type = GLOBAL_DOC, +- .op_version = 3 ++ .op_version = 2 + }, + { .key = "features.lock-heal", + .voltype = "protocol/server", +@@ -2007,11 +2011,11 @@ struct volopt_map_entry glusterd_volopt_map[] = { + .option = "!ssl-allow", + .value = "*", + .type = NO_DOC, +- .op_version = GD_OP_VERSION_3_6_0, ++ .op_version = GD_OP_VERSION_3_7_0, + }, + { .key = "server.manage-gids", + .voltype = "protocol/server", +- .op_version = GD_OP_VERSION_3_6_0, ++ .op_version = GD_OP_VERSION_RHS_3_0, + }, + { .key = "server.dynamic-auth", + .voltype = "protocol/server", +@@ -2020,11 +2024,11 @@ struct volopt_map_entry glusterd_volopt_map[] = { + { .key = "client.send-gids", + .voltype = "protocol/client", + .type = NO_DOC, +- .op_version = GD_OP_VERSION_3_6_0, ++ .op_version = GD_OP_VERSION_RHS_3_0, + }, + { .key = "server.gid-timeout", + .voltype = "protocol/server", +- .op_version = GD_OP_VERSION_3_6_0, ++ .op_version = GD_OP_VERSION_RHS_3_0, + }, + { .key = "server.own-thread", + .voltype = "protocol/server", +@@ -2034,7 +2038,7 @@ struct volopt_map_entry glusterd_volopt_map[] = { + }, + { .key = "server.event-threads", + .voltype = "protocol/server", +- .op_version = GD_OP_VERSION_3_7_0, ++ .op_version = GD_OP_VERSION_RHS_3_0_4, + }, + { .key = "server.tcp-user-timeout", + .voltype = "protocol/server", +@@ -2095,12 +2099,12 @@ struct volopt_map_entry glusterd_volopt_map[] = { + { .key = SSL_CERT_DEPTH_OPT, + .voltype = "rpc-transport/socket", + .option = "!ssl-cert-depth", +- .op_version = GD_OP_VERSION_3_6_0, ++ .op_version = GD_OP_VERSION_3_7_0, + }, + { .key = SSL_CIPHER_LIST_OPT, + .voltype = "rpc-transport/socket", + .option = "!ssl-cipher-list", +- .op_version = GD_OP_VERSION_3_6_0, ++ .op_version = GD_OP_VERSION_3_7_0, + }, + { .key = SSL_DH_PARAM_OPT, + .voltype = "rpc-transport/socket", +@@ -2140,8 +2144,8 @@ struct volopt_map_entry glusterd_volopt_map[] = { + { .key = "performance.readdir-ahead", + .voltype = "performance/readdir-ahead", + .option = "!perf", +- .value = "on", +- .op_version = 3, ++ .value = "off", ++ .op_version = GD_OP_VERSION_RHS_3_0, + .description = "enable/disable readdir-ahead translator in the volume.", + .flags = OPT_FLAG_CLIENT_OPT | OPT_FLAG_XLATOR_OPT + }, +@@ -2263,7 +2267,7 @@ struct volopt_map_entry glusterd_volopt_map[] = { + /* Feature translators */ + { .key = "features.uss", + .voltype = "features/snapview-server", +- .op_version = GD_OP_VERSION_3_6_0, ++ .op_version = GD_OP_VERSION_RHS_3_0, + .value = "off", + .flags = OPT_FLAG_CLIENT_OPT | OPT_FLAG_XLATOR_OPT, + .validate_fn = validate_uss, +@@ -2273,7 +2277,7 @@ struct volopt_map_entry glusterd_volopt_map[] = { + + { .key = "features.snapshot-directory", + .voltype = "features/snapview-client", +- .op_version = GD_OP_VERSION_3_6_0, ++ .op_version = GD_OP_VERSION_RHS_3_0, + .value = ".snaps", + .flags = OPT_FLAG_CLIENT_OPT | OPT_FLAG_XLATOR_OPT, + .validate_fn = validate_uss_dir, +@@ -2284,7 +2288,7 @@ struct volopt_map_entry glusterd_volopt_map[] = { + + { .key = "features.show-snapshot-directory", + .voltype = "features/snapview-client", +- .op_version = GD_OP_VERSION_3_6_0, ++ .op_version = GD_OP_VERSION_RHS_3_0, + .value = "off", + .flags = OPT_FLAG_CLIENT_OPT | OPT_FLAG_XLATOR_OPT, + .description = "show entry point in readdir output of " +@@ -2299,35 +2303,35 @@ struct volopt_map_entry glusterd_volopt_map[] = { + .voltype = "features/cdc", + .option = "!feat", + .value = "off", +- .op_version = 3, ++ .op_version = GD_OP_VERSION_RHS_3_0, + .description = "enable/disable network compression translator", + .flags = OPT_FLAG_XLATOR_OPT + }, + { .key = "network.compression.window-size", + .voltype = "features/cdc", + .option = "window-size", +- .op_version = 3 ++ .op_version = GD_OP_VERSION_RHS_3_0 + }, + { .key = "network.compression.mem-level", + .voltype = "features/cdc", + .option = "mem-level", +- .op_version = 3 ++ .op_version = GD_OP_VERSION_RHS_3_0 + }, + { .key = "network.compression.min-size", + .voltype = "features/cdc", + .option = "min-size", +- .op_version = 3 ++ .op_version = GD_OP_VERSION_RHS_3_0 + }, + { .key = "network.compression.compression-level", + .voltype = "features/cdc", + .option = "compression-level", +- .op_version = 3 ++ .op_version = GD_OP_VERSION_RHS_3_0 + }, + { .key = "network.compression.debug", + .voltype = "features/cdc", + .option = "debug", + .type = NO_DOC, +- .op_version = 3 ++ .op_version = GD_OP_VERSION_RHS_3_0 + }, + #endif + +@@ -2342,25 +2346,25 @@ struct volopt_map_entry glusterd_volopt_map[] = { + .voltype = "features/quota", + .option = "default-soft-limit", + .type = NO_DOC, +- .op_version = 3, ++ .op_version = 2, + }, + { .key = "features.soft-timeout", + .voltype = "features/quota", + .option = "soft-timeout", + .type = NO_DOC, +- .op_version = 3, ++ .op_version = 2, + }, + { .key = "features.hard-timeout", + .voltype = "features/quota", + .option = "hard-timeout", + .type = NO_DOC, +- .op_version = 3, ++ .op_version = 2, + }, + { .key = "features.alert-time", + .voltype = "features/quota", + .option = "alert-time", + .type = NO_DOC, +- .op_version = 3, ++ .op_version = 2, + }, + { .key = "features.quota-deem-statfs", + .voltype = "features/quota", +@@ -2474,25 +2478,25 @@ struct volopt_map_entry glusterd_volopt_map[] = { + .voltype = "debug/error-gen", + .option = "failure", + .type = NO_DOC, +- .op_version = 3 ++ .op_version = 2 + }, + { .key = "debug.error-number", + .voltype = "debug/error-gen", + .option = "error-no", + .type = NO_DOC, +- .op_version = 3 ++ .op_version = 2 + }, + { .key = "debug.random-failure", + .voltype = "debug/error-gen", + .option = "random-failure", + .type = NO_DOC, +- .op_version = 3 ++ .op_version = 2 + }, + { .key = "debug.error-fops", + .voltype = "debug/error-gen", + .option = "enable", + .type = NO_DOC, +- .op_version = 3 ++ .op_version = 2 + }, + + +@@ -2543,7 +2547,7 @@ struct volopt_map_entry glusterd_volopt_map[] = { + .voltype = "nfs/server", + .option = "rpc.outstanding-rpc-limit", + .type = GLOBAL_DOC, +- .op_version = 3 ++ .op_version = 2 + }, + { .key = "nfs.port", + .voltype = "nfs/server", +@@ -2619,7 +2623,7 @@ struct volopt_map_entry glusterd_volopt_map[] = { + .voltype = "nfs/server", + .option = "nfs.acl", + .type = GLOBAL_DOC, +- .op_version = 3 ++ .op_version = 2 + }, + { .key = "nfs.mount-udp", + .voltype = "nfs/server", +@@ -2637,13 +2641,13 @@ struct volopt_map_entry glusterd_volopt_map[] = { + .voltype = "nfs/server", + .option = "nfs.rpc-statd", + .type = NO_DOC, +- .op_version = GD_OP_VERSION_3_6_0, ++ .op_version = GD_OP_VERSION_RHS_3_0, + }, + { .key = "nfs.log-level", + .voltype = "nfs/server", + .option = "nfs.log-level", + .type = NO_DOC, +- .op_version = GD_OP_VERSION_3_6_0, ++ .op_version = GD_OP_VERSION_RHS_3_0, + }, + { .key = "nfs.server-aux-gids", + .voltype = "nfs/server", +@@ -2655,31 +2659,31 @@ struct volopt_map_entry glusterd_volopt_map[] = { + .voltype = "nfs/server", + .option = "nfs.drc", + .type = GLOBAL_DOC, +- .op_version = 3 ++ .op_version = 1 + }, + { .key = "nfs.drc-size", + .voltype = "nfs/server", + .option = "nfs.drc-size", + .type = GLOBAL_DOC, +- .op_version = 3 ++ .op_version = 1 + }, + { .key = "nfs.read-size", + .voltype = "nfs/server", + .option = "nfs3.read-size", + .type = GLOBAL_DOC, +- .op_version = 3 ++ .op_version = 2 + }, + { .key = "nfs.write-size", + .voltype = "nfs/server", + .option = "nfs3.write-size", + .type = GLOBAL_DOC, +- .op_version = 3 ++ .op_version = 2 + }, + { .key = "nfs.readdir-size", + .voltype = "nfs/server", + .option = "nfs3.readdir-size", + .type = GLOBAL_DOC, +- .op_version = 3 ++ .op_version = 2 + }, + { .key = "nfs.rdirplus", + .voltype = "nfs/server", +@@ -2714,7 +2718,7 @@ struct volopt_map_entry glusterd_volopt_map[] = { + { .key = "features.read-only", + .voltype = "features/read-only", + .option = "read-only", +- .op_version = 1, ++ .op_version = 2, + .flags = OPT_FLAG_CLIENT_OPT | OPT_FLAG_XLATOR_OPT + }, + { .key = "features.worm", +@@ -2757,15 +2761,15 @@ struct volopt_map_entry glusterd_volopt_map[] = { + }, + { .key = "storage.batch-fsync-mode", + .voltype = "storage/posix", +- .op_version = 3 ++ .op_version = 2 + }, + { .key = "storage.batch-fsync-delay-usec", + .voltype = "storage/posix", +- .op_version = 3 ++ .op_version = 2 + }, + { .key = "storage.xattr-user-namespace-mode", + .voltype = "storage/posix", +- .op_version = GD_OP_VERSION_3_6_0, ++ .op_version = GD_OP_VERSION_RHS_3_0, + }, + { .key = "storage.owner-uid", + .voltype = "storage/posix", +@@ -2779,16 +2783,16 @@ struct volopt_map_entry glusterd_volopt_map[] = { + }, + { .key = "storage.node-uuid-pathinfo", + .voltype = "storage/posix", +- .op_version = 3 ++ .op_version = 2 + }, + { .key = "storage.health-check-interval", + .voltype = "storage/posix", +- .op_version = 3 ++ .op_version = 2 + }, + { .option = "update-link-count-parent", + .key = "storage.build-pgfid", + .voltype = "storage/posix", +- .op_version = GD_OP_VERSION_3_6_0, ++ .op_version = GD_OP_VERSION_RHS_3_0, + }, + { .option = "gfid2path", + .key = "storage.gfid2path", +@@ -2803,7 +2807,7 @@ struct volopt_map_entry glusterd_volopt_map[] = { + }, + { .key = "storage.bd-aio", + .voltype = "storage/bd", +- .op_version = 3 ++ .op_version = GD_OP_VERSION_RHS_3_0 + }, + { .key = "config.memory-accounting", + .voltype = "mgmt/glusterd", +@@ -2819,43 +2823,43 @@ struct volopt_map_entry glusterd_volopt_map[] = { + { .key = GLUSTERD_QUORUM_TYPE_KEY, + .voltype = "mgmt/glusterd", + .value = "off", +- .op_version = 2 ++ .op_version = 1 + }, + { .key = GLUSTERD_QUORUM_RATIO_KEY, + .voltype = "mgmt/glusterd", + .value = "0", +- .op_version = 2 ++ .op_version = 1 + }, + /* changelog translator - global tunables */ + { .key = "changelog.changelog", + .voltype = "features/changelog", + .type = NO_DOC, +- .op_version = 3 ++ .op_version = 2 + }, + { .key = "changelog.changelog-dir", + .voltype = "features/changelog", + .type = NO_DOC, +- .op_version = 3 ++ .op_version = 2 + }, + { .key = "changelog.encoding", + .voltype = "features/changelog", + .type = NO_DOC, +- .op_version = 3 ++ .op_version = 2 + }, + { .key = "changelog.rollover-time", + .voltype = "features/changelog", + .type = NO_DOC, +- .op_version = 3 ++ .op_version = 2 + }, + { .key = "changelog.fsync-interval", + .voltype = "features/changelog", + .type = NO_DOC, +- .op_version = 3 ++ .op_version = 2 + }, + { .key = "changelog.changelog-barrier-timeout", + .voltype = "features/changelog", + .value = BARRIER_TIMEOUT, +- .op_version = GD_OP_VERSION_3_6_0, ++ .op_version = GD_OP_VERSION_RHS_3_0, + }, + { .key = "changelog.capture-del-path", + .voltype = "features/changelog", +@@ -2866,16 +2870,16 @@ struct volopt_map_entry glusterd_volopt_map[] = { + .voltype = "features/barrier", + .value = "disable", + .type = NO_DOC, +- .op_version = GD_OP_VERSION_3_7_0, ++ .op_version = GD_OP_VERSION_RHS_3_0, + }, + { .key = "features.barrier-timeout", + .voltype = "features/barrier", + .value = BARRIER_TIMEOUT, +- .op_version = GD_OP_VERSION_3_6_0, ++ .op_version = GD_OP_VERSION_RHS_3_0, + }, + { .key = GLUSTERD_GLOBAL_OP_VERSION_KEY, + .voltype = "mgmt/glusterd", +- .op_version = GD_OP_VERSION_3_6_0, ++ .op_version = GD_OP_VERSION_RHS_3_0, + }, + { + .key = GLUSTERD_MAX_OP_VERSION_KEY, +-- +1.8.3.1 + diff --git a/SOURCES/0003-tier-ctr-sql-Dafault-values-for-sql-cache-and-wal-si.patch b/SOURCES/0003-tier-ctr-sql-Dafault-values-for-sql-cache-and-wal-si.patch new file mode 100644 index 0000000..0de06cd --- /dev/null +++ b/SOURCES/0003-tier-ctr-sql-Dafault-values-for-sql-cache-and-wal-si.patch @@ -0,0 +1,56 @@ +From 8fa58c563cf01934a64773e814f74727ee009b42 Mon Sep 17 00:00:00 2001 +From: Joseph Fernandes +Date: Wed, 30 Dec 2015 16:53:25 +0530 +Subject: [PATCH 03/74] tier/ctr/sql : Dafault values for sql cache and wal + size + +Setting default values for sql cache and wal size +cache : 12500 pages +wal : 25000 pages +1 pages - 4096 bytes + +Porting this downstream 3.1.2 patch to 3.1.3 + +Label: DOWNSTREAM ONLY + +> Change-Id: Iae3927e021af2e3f7617d45f84e81de3b7d93f1c +> BUG: 1282729 +> Signed-off-by: Joseph Fernandes +> Reviewed-on: https://code.engineering.redhat.com/gerrit/64642 +> Reviewed-by: Dan Lambright +> Tested-by: Dan Lambright + +Change-Id: Ib3cd951709dff25157371006637b8c0d881f5d61 +Signed-off-by: Joseph Fernandes +Reviewed-on: https://code.engineering.redhat.com/gerrit/70346 +Reviewed-by: Nithya Balachandran +Tested-by: Atin Mukherjee +--- + xlators/mgmt/glusterd/src/glusterd-volume-set.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/xlators/mgmt/glusterd/src/glusterd-volume-set.c b/xlators/mgmt/glusterd/src/glusterd-volume-set.c +index 982275e..93ef85c 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-volume-set.c ++++ b/xlators/mgmt/glusterd/src/glusterd-volume-set.c +@@ -3152,7 +3152,7 @@ struct volopt_map_entry glusterd_volopt_map[] = { + "changetimerecorder xlator." + "The input to this option is in pages." + "Each page is 4096 bytes. Default value is 12500 " +- "pages." ++ "pages i.e ~ 49 MB. " + "The max value is 262144 pages i.e 1 GB and " + "the min value is 1000 pages i.e ~ 4 MB. " + }, +@@ -3166,7 +3166,7 @@ struct volopt_map_entry glusterd_volopt_map[] = { + " changetimerecorder. " + "The input to this option is in pages. " + "Each page is 4096 bytes. Default value is 25000 " +- "pages." ++ "pages i.e ~ 98 MB." + "The max value is 262144 pages i.e 1 GB and " + "the min value is 1000 pages i.e ~4 MB." + }, +-- +1.8.3.1 + diff --git a/SOURCES/0004-rpc-set-bind-insecure-to-off-by-default.patch b/SOURCES/0004-rpc-set-bind-insecure-to-off-by-default.patch new file mode 100644 index 0000000..8f9feda --- /dev/null +++ b/SOURCES/0004-rpc-set-bind-insecure-to-off-by-default.patch @@ -0,0 +1,51 @@ +From b67f788dfe5855c455c8f4b41fe8159a5b41c4bd Mon Sep 17 00:00:00 2001 +From: Prasanna Kumar Kalever +Date: Mon, 21 Mar 2016 13:54:19 +0530 +Subject: [PATCH 04/74] rpc: set bind-insecure to off by default + +commit 243a5b429f225acb8e7132264fe0a0835ff013d5 turn's 'ON' +allow-insecure and bind-insecure by default. + +Problem: +Now with newer versions we have bind-insecure 'ON' by default. +So, while upgrading subset of nodes from a trusted storage pool, +nodes which have older versions of glusterfs will expect +connection from secure ports only (since they still have +bind-insecure off) thus they reject connection from upgraded +nodes which now have insecure ports. + +Hence we will run into connection issues between peers. + +Solution: +This patch will turn bind-insecure 'OFF' by default to avoid +problem explained above. + +Label: DOWNSTREAM ONLY + +Change-Id: Id7a19b4872399d3b019243b0857c9c7af75472f7 +Signed-off-by: Prasanna Kumar Kalever +Reviewed-on: https://code.engineering.redhat.com/gerrit/70313 +Reviewed-by: Atin Mukherjee +Tested-by: Atin Mukherjee +--- + rpc/rpc-lib/src/rpc-transport.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/rpc/rpc-lib/src/rpc-transport.c b/rpc/rpc-lib/src/rpc-transport.c +index fc26f46..94880f4 100644 +--- a/rpc/rpc-lib/src/rpc-transport.c ++++ b/rpc/rpc-lib/src/rpc-transport.c +@@ -258,8 +258,8 @@ rpc_transport_load (glusterfs_ctx_t *ctx, dict_t *options, char *trans_name) + else + trans->bind_insecure = 0; + } else { +- /* By default allow bind insecure */ +- trans->bind_insecure = 1; ++ /* Turning off bind insecure by default*/ ++ trans->bind_insecure = 0; + } + + ret = dict_get_str (options, "transport-type", &type); +-- +1.8.3.1 + diff --git a/SOURCES/0005-glusterd-spec-fixing-autogen-issue.patch b/SOURCES/0005-glusterd-spec-fixing-autogen-issue.patch new file mode 100644 index 0000000..533dd5a --- /dev/null +++ b/SOURCES/0005-glusterd-spec-fixing-autogen-issue.patch @@ -0,0 +1,47 @@ +From 174ed444ad3b2007ecf55992acc3418455c46893 Mon Sep 17 00:00:00 2001 +From: Atin Mukherjee +Date: Mon, 21 Mar 2016 17:07:00 +0530 +Subject: [PATCH 05/74] glusterd/spec: fixing autogen issue + +Backport of https://code.engineering.redhat.com/gerrit/#/c/59463/ + +Because of the incorrect build section, autogen.sh wasn't re-run during the rpm +build process. The `extras/Makefile.in` was not regenerated with the changes +made to `extras/Makefile.am` in the firewalld patch. This meant that +`extras/Makefile` was generated without the firewalld changes. So the firewalld +config wasn't installed during `make install` and rpmbuild later failed when it +failed to find `/usr/lib/firewalld/glusterfs.xml` + +Label: DOWNSTREAM ONLY + +>Reviewed-on: https://code.engineering.redhat.com/gerrit/59463 + +Change-Id: I498bcceeacbd839640282eb6467c9f1464505697 +Signed-off-by: Atin Mukherjee +Reviewed-on: https://code.engineering.redhat.com/gerrit/70343 +Reviewed-by: Milind Changire +--- + glusterfs.spec.in | 7 +------ + 1 file changed, 1 insertion(+), 6 deletions(-) + +diff --git a/glusterfs.spec.in b/glusterfs.spec.in +index f68e38f..50db6cb 100644 +--- a/glusterfs.spec.in ++++ b/glusterfs.spec.in +@@ -651,12 +651,7 @@ CFLAGS=-DUSE_INSECURE_OPENSSL + export CFLAGS + %endif + +-# RHEL6 and earlier need to manually replace config.guess and config.sub +-%if ( 0%{?rhel} && 0%{?rhel} <= 6 ) +-./autogen.sh +-%endif +- +-%configure \ ++./autogen.sh && %configure \ + %{?_with_cmocka} \ + %{?_with_debug} \ + %{?_with_firewalld} \ +-- +1.8.3.1 + diff --git a/SOURCES/0006-libglusterfs-glusterd-Fix-compilation-errors.patch b/SOURCES/0006-libglusterfs-glusterd-Fix-compilation-errors.patch new file mode 100644 index 0000000..984f34d --- /dev/null +++ b/SOURCES/0006-libglusterfs-glusterd-Fix-compilation-errors.patch @@ -0,0 +1,36 @@ +From 69a19b225dd5bc9fb0279ffd729dc5927548428e Mon Sep 17 00:00:00 2001 +From: Atin Mukherjee +Date: Mon, 21 Mar 2016 22:31:02 +0530 +Subject: [PATCH 06/74] libglusterfs/glusterd: Fix compilation errors + +1. Removed duplicate definition of GD_OP_VER_PERSISTENT_AFR_XATTRS introduced in +d367a88 where GD_OP_VER_PERSISTENT_AFR_XATTRS was redfined + +2. Fixed incorrect op-version + +Label: DOWNSTREAM ONLY + +Change-Id: Icfa3206e8a41a11875641f57523732b80837f8f6 +Signed-off-by: Atin Mukherjee +Reviewed-on: https://code.engineering.redhat.com/gerrit/70384 +Reviewed-by: Nithya Balachandran +--- + xlators/mgmt/glusterd/src/glusterd-store.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/xlators/mgmt/glusterd/src/glusterd-store.c b/xlators/mgmt/glusterd/src/glusterd-store.c +index 229391a..8a662ef 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-store.c ++++ b/xlators/mgmt/glusterd/src/glusterd-store.c +@@ -968,7 +968,7 @@ glusterd_volume_exclude_options_write (int fd, glusterd_volinfo_t *volinfo) + goto out; + } + +- if (conf->op_version >= GD_OP_VERSION_RHS_3_0) { ++ if (conf->op_version >= GD_OP_VERSION_3_7_0) { + snprintf (buf, sizeof (buf), "%d", volinfo->disperse_count); + ret = gf_store_save_value (fd, + GLUSTERD_STORE_KEY_VOL_DISPERSE_CNT, +-- +1.8.3.1 + diff --git a/SOURCES/0007-build-remove-ghost-directory-entries.patch b/SOURCES/0007-build-remove-ghost-directory-entries.patch new file mode 100644 index 0000000..1ae41ce --- /dev/null +++ b/SOURCES/0007-build-remove-ghost-directory-entries.patch @@ -0,0 +1,65 @@ +From 6ed11f5918cf21907df99839c9b76cf1144b2572 Mon Sep 17 00:00:00 2001 +From: "Bala.FA" +Date: Mon, 7 Apr 2014 15:24:10 +0530 +Subject: [PATCH 07/74] build: remove ghost directory entries + +ovirt requires hook directories for gluster management and ghost +directories are no more ghost entries + +Label: DOWNSTREAM ONLY + +Change-Id: Iaf1066ba0655619024f87eaaa039f0010578c567 +Signed-off-by: Bala.FA +Reviewed-on: https://code.engineering.redhat.com/gerrit/60133 +Tested-by: Milind Changire +--- + glusterfs.spec.in | 20 ++++++++++++++++++-- + 1 file changed, 18 insertions(+), 2 deletions(-) + +diff --git a/glusterfs.spec.in b/glusterfs.spec.in +index 50db6cb..3be99b6 100644 +--- a/glusterfs.spec.in ++++ b/glusterfs.spec.in +@@ -757,14 +757,29 @@ install -D -p -m 0644 extras/glusterfs-logrotate \ + %{buildroot}%{_sysconfdir}/logrotate.d/glusterfs + + %if ( 0%{!?_without_georeplication:1} ) +-# geo-rep ghosts + mkdir -p %{buildroot}%{_sharedstatedir}/glusterd/geo-replication + touch %{buildroot}%{_sharedstatedir}/glusterd/geo-replication/gsyncd_template.conf + install -D -p -m 0644 extras/glusterfs-georep-logrotate \ + %{buildroot}%{_sysconfdir}/logrotate.d/glusterfs-georep + %endif + +-# the rest of the ghosts ++%if ( 0%{!?_without_syslog:1} ) ++%if ( 0%{?fedora} ) || ( 0%{?rhel} && 0%{?rhel} > 6 ) ++install -D -p -m 0644 extras/gluster-rsyslog-7.2.conf \ ++ %{buildroot}%{_sysconfdir}/rsyslog.d/gluster.conf.example ++%endif ++ ++%if ( 0%{?rhel} && 0%{?rhel} == 6 ) ++install -D -p -m 0644 extras/gluster-rsyslog-5.8.conf \ ++ %{buildroot}%{_sysconfdir}/rsyslog.d/gluster.conf.example ++%endif ++ ++%if ( 0%{?fedora} ) || ( 0%{?rhel} && 0%{?rhel} >= 6 ) ++install -D -p -m 0644 extras/logger.conf.example \ ++ %{buildroot}%{_sysconfdir}/glusterfs/logger.conf.example ++%endif ++%endif ++ + touch %{buildroot}%{_sharedstatedir}/glusterd/glusterd.info + touch %{buildroot}%{_sharedstatedir}/glusterd/options + subdirs=(add-brick create copy-file delete gsync-create remove-brick reset set start stop) +@@ -1262,6 +1277,7 @@ exit 0 + %{_sbindir}/gcron.py + %{_sbindir}/conf.py + ++<<<<<<< 2944c7b6656a36a79551f9f9f24ab7a10467f13a + # /var/lib/glusterd, e.g. hookscripts, etc. + %ghost %attr(0644,-,-) %config(noreplace) %{_sharedstatedir}/glusterd/glusterd.info + %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd +-- +1.8.3.1 + diff --git a/SOURCES/0008-build-add-RHGS-specific-changes.patch b/SOURCES/0008-build-add-RHGS-specific-changes.patch new file mode 100644 index 0000000..17cba78 --- /dev/null +++ b/SOURCES/0008-build-add-RHGS-specific-changes.patch @@ -0,0 +1,878 @@ +From cac41ae2729cffa23a348c4de14486043ef08163 Mon Sep 17 00:00:00 2001 +From: "Bala.FA" +Date: Sat, 11 Nov 2017 10:32:42 +0530 +Subject: [PATCH 08/74] build: add RHGS specific changes + +Label: DOWNSTREAM ONLY + +Bug-Url: https://bugzilla.redhat.com/show_bug.cgi?id=1074947 +Bug-Url: https://bugzilla.redhat.com/show_bug.cgi?id=1097782 +Bug-Url: https://bugzilla.redhat.com/show_bug.cgi?id=1115267 +Bug-Url: https://bugzilla.redhat.com/show_bug.cgi?id=1221743 +Change-Id: I08333334745adf2350e772c6454ffcfe9c08cb89 +Reviewed-on: https://code.engineering.redhat.com/gerrit/24983 +Reviewed-on: https://code.engineering.redhat.com/gerrit/25451 +Reviewed-on: https://code.engineering.redhat.com/gerrit/25518 +Reviewed-on: https://code.engineering.redhat.com/gerrit/25983 +Signed-off-by: Bala.FA +Reviewed-on: https://code.engineering.redhat.com/gerrit/60134 +Tested-by: Milind Changire +--- + glusterfs.spec.in | 605 +++++++++++++++++++++++++++++++++++++++++++++++++++++- + 1 file changed, 597 insertions(+), 8 deletions(-) + +diff --git a/glusterfs.spec.in b/glusterfs.spec.in +index 3be99b6..8458e8a 100644 +--- a/glusterfs.spec.in ++++ b/glusterfs.spec.in +@@ -80,6 +80,23 @@ + %global _without_tiering --disable-tiering + %endif + ++# if you wish not to build server rpms, compile like this. ++# rpmbuild -ta @PACKAGE_NAME@-@PACKAGE_VERSION@.tar.gz --without server ++ ++%global _build_server 1 ++%if "%{?_without_server}" ++%global _build_server 0 ++%endif ++ ++%if ( "%{?dist}" == ".el6rhs" ) || ( "%{?dist}" == ".el7rhs" ) || ( "%{?dist}" == ".el7rhgs" ) ++%global _build_server 1 ++%else ++%global _build_server 0 ++%endif ++ ++%global _without_extra_xlators 1 ++%global _without_regression_tests 1 ++ + ##----------------------------------------------------------------------------- + ## All %%global definitions should be placed here and keep them sorted + ## +@@ -178,7 +195,8 @@ Release: 0.1%{?prereltag:.%{prereltag}}%{?dist} + %else + Name: @PACKAGE_NAME@ + Version: @PACKAGE_VERSION@ +-Release: 0.@PACKAGE_RELEASE@%{?dist} ++Release: @PACKAGE_RELEASE@%{?dist} ++ExclusiveArch: x86_64 aarch64 + %endif + License: GPLv2 or LGPLv3+ + Group: System Environment/Base +@@ -320,7 +338,9 @@ Summary: Development Libraries + Group: Development/Libraries + Requires: %{name}%{?_isa} = %{version}-%{release} + # Needed for the Glupy examples to work +-Requires: %{name}-extra-xlators%{?_isa} = %{version}-%{release} ++%if ( 0%{!?_without_extra_xlators:1} ) ++Requires: %{name}-extra-xlators = %{version}-%{release} ++%endif + + %description devel + GlusterFS is a distributed file-system capable of scaling to several +@@ -333,6 +353,7 @@ is in user space and easily manageable. + + This package provides the development libraries and include files. + ++%if ( 0%{!?_without_extra_xlators:1} ) + %package extra-xlators + Summary: Extra Gluster filesystem Translators + Group: Applications/File +@@ -355,6 +376,7 @@ is in user space and easily manageable. + + This package provides extra filesystem Translators, such as Glupy, + for GlusterFS. ++%endif + + %package fuse + Summary: Fuse client +@@ -381,6 +403,31 @@ is in user space and easily manageable. + This package provides support to FUSE based clients and inlcudes the + glusterfs(d) binary. + ++%if ( 0%{?_build_server} ) ++%package ganesha ++Summary: NFS-Ganesha configuration ++Group: Applications/File ++ ++Requires: %{name}-server%{?_isa} = %{version}-%{release} ++Requires: nfs-ganesha-gluster, pcs, dbus ++%if ( 0%{?rhel} && 0%{?rhel} == 6 ) ++Requires: cman, pacemaker, corosync ++%endif ++ ++%description ganesha ++GlusterFS is a distributed file-system capable of scaling to several ++petabytes. It aggregates various storage bricks over Infiniband RDMA ++or TCP/IP interconnect into one large parallel network file ++system. GlusterFS is one of the most sophisticated file systems in ++terms of features and extensibility. It borrows a powerful concept ++called Translators from GNU Hurd kernel. Much of the code in GlusterFS ++is in user space and easily manageable. ++ ++This package provides the configuration and related files for using ++NFS-Ganesha as the NFS server using GlusterFS ++%endif ++ ++%if ( 0%{?_build_server} ) + %if ( 0%{!?_without_georeplication:1} ) + %package geo-replication + Summary: GlusterFS Geo-replication +@@ -406,6 +453,7 @@ is in userspace and easily manageable. + + This package provides support to geo-replication. + %endif ++%endif + + %if ( 0%{?_with_gnfs:1} ) + %package gnfs +@@ -498,6 +546,8 @@ is in user space and easily manageable. + This package provides support to ib-verbs library. + %endif + ++%if ( 0%{?_build_server} ) ++%if ( 0%{!?_without_regression_tests:1} ) + %package regression-tests + Summary: Development Tools + Group: Development/Tools +@@ -513,7 +563,10 @@ Requires: nfs-utils xfsprogs yajl psmisc bc + %description regression-tests + The Gluster Test Framework, is a suite of scripts used for + regression testing of Gluster. ++%endif ++%endif + ++%if ( 0%{?_build_server} ) + %if ( 0%{!?_without_ocf:1} ) + %package resource-agents + Summary: OCF Resource Agents for GlusterFS +@@ -546,7 +599,9 @@ This package provides the resource agents which plug glusterd into + Open Cluster Framework (OCF) compliant cluster resource managers, + like Pacemaker. + %endif ++%endif + ++%if ( 0%{?_build_server} ) + %package server + Summary: Clustered file-system server + Group: System Environment/Daemons +@@ -602,6 +657,7 @@ called Translators from GNU Hurd kernel. Much of the code in GlusterFS + is in user space and easily manageable. + + This package provides the glusterfs server daemon. ++%endif + + %package client-xlators + Summary: GlusterFS client-side translators +@@ -618,6 +674,7 @@ is in user space and easily manageable. + + This package provides the translators needed on any GlusterFS client. + ++%if ( 0%{?_build_server} ) + %if ( 0%{!?_without_events:1} ) + %package events + Summary: GlusterFS Events +@@ -641,6 +698,7 @@ Requires: python-argparse + GlusterFS Events + + %endif ++%endif + + %prep + %setup -q -n %{name}-%{version}%{?prereltag} +@@ -822,10 +880,12 @@ exit 0 + %post api + /sbin/ldconfig + ++%if ( 0%{?_build_server} ) + %if ( 0%{!?_without_events:1} ) + %post events + %_init_restart glustereventsd + %endif ++%endif + + %if ( 0%{?rhel} == 5 ) + %post fuse +@@ -833,6 +893,7 @@ modprobe fuse + exit 0 + %endif + ++%if ( 0%{?_build_server} ) + %if ( 0%{!?_without_georeplication:1} ) + %post geo-replication + if [ $1 -ge 1 ]; then +@@ -840,10 +901,12 @@ if [ $1 -ge 1 ]; then + fi + exit 0 + %endif ++%endif + + %post libs + /sbin/ldconfig + ++%if ( 0%{?_build_server} ) + %post server + # Legacy server + %_init_enable glusterd +@@ -914,7 +977,7 @@ else + #rpm_script_t context. + rm -f %{_rundir}/glusterd.socket + fi +-exit 0 ++%endif + + ##----------------------------------------------------------------------------- + ## All %%pre should be placed here and keep them sorted +@@ -928,6 +991,7 @@ exit 0 + ##----------------------------------------------------------------------------- + ## All %%preun should be placed here and keep them sorted + ## ++%if ( 0%{?_build_server} ) + %if ( 0%{!?_without_events:1} ) + %preun events + if [ $1 -eq 0 ]; then +@@ -956,7 +1020,7 @@ if [ $1 -ge 1 ]; then + fi + %_init_restart glusterd + fi +-exit 0 ++%endif + + ##----------------------------------------------------------------------------- + ## All %%postun should be placed here and keep them sorted +@@ -986,6 +1050,73 @@ exit 0 + ## All %%files should be placed here and keep them grouped + ## + %files ++# exclude extra-xlators files ++%if ( ! 0%{!?_without_extra_xlators:1} ) ++%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/encryption/rot-13.so ++%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/glupy.so ++%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/quiesce.so ++%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/selinux.so ++%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/testing/features/template.so ++%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/testing/performance/symlink-cache.so ++%exclude %{python_sitelib}/* ++%endif ++# exclude regression-tests files ++%if ( ! 0%{!?_without_regression_tests:1} ) ++%exclude %{_prefix}/share/glusterfs/run-tests.sh ++%exclude %{_prefix}/share/glusterfs/tests/* ++%endif ++%if ( ! 0%{?_build_server} ) ++# exclude ganesha files ++%exclude %{_prefix}/lib/ocf/* ++# exclude geo-replication files ++%exclude %{_sysconfdir}/logrotate.d/glusterfs-georep ++%exclude %{_libexecdir}/glusterfs/* ++%exclude %{_sbindir}/gfind_missing_files ++%exclude %{_datadir}/glusterfs/scripts/get-gfid.sh ++%exclude %{_datadir}/glusterfs/scripts/slave-upgrade.sh ++%exclude %{_datadir}/glusterfs/scripts/gsync-upgrade.sh ++%exclude %{_datadir}/glusterfs/scripts/generate-gfid-file.sh ++%exclude %{_datadir}/glusterfs/scripts/gsync-sync-gfid ++%exclude %{_sharedstatedir}/glusterd/* ++# exclude server files ++%exclude %{_sysconfdir}/glusterfs ++%exclude %{_sysconfdir}/glusterfs/glusterd.vol ++%exclude %{_sysconfdir}/glusterfs/glusterfs-georep-logrotate ++%exclude %{_sysconfdir}/glusterfs/glusterfs-logrotate ++%exclude %{_sysconfdir}/glusterfs/gluster-rsyslog-5.8.conf ++%exclude %{_sysconfdir}/glusterfs/gluster-rsyslog-7.2.conf ++%exclude %{_sysconfdir}/glusterfs/group-virt.example ++%exclude %{_sysconfdir}/glusterfs/logger.conf.example ++%exclude %_init_glusterd ++%exclude %{_sysconfdir}/sysconfig/glusterd ++%exclude %{_bindir}/glusterfind ++%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/arbiter.so ++%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/bit-rot.so ++%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/bitrot-stub.so ++%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/changetimerecorder.so ++%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/index.so ++%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/leases.so ++%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/locks.so ++%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/posix* ++%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/snapview-server.so ++%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/marker.so ++%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/quota* ++%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/trash.so ++%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/upcall.so ++%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/mgmt* ++%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance/decompounder.so ++%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/protocol/server* ++%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/storage* ++%exclude %{_libdir}/libgfdb.so.* ++%exclude %{_sbindir}/gcron.py ++%exclude %{_sbindir}/glfsheal ++%exclude %{_sbindir}/glusterd ++%exclude %{_sbindir}/snap_scheduler.py ++%exclude %{_datadir}/glusterfs/scripts/stop-all-gluster-processes.sh ++%if 0%{?_tmpfilesdir:1} ++%exclude %{_tmpfilesdir}/gluster.conf ++%endif ++%endif + %doc ChangeLog COPYING-GPLV2 COPYING-LGPLV3 INSTALL README.md THANKS + %{_mandir}/man8/*gluster*.8* + %exclude %{_mandir}/man8/gluster.8* +@@ -1044,6 +1175,11 @@ exit 0 + %if 0%{?_tmpfilesdir:1} + %{_tmpfilesdir}/gluster.conf + %endif ++%if ( ! 0%{?_build_server} ) ++%{_libdir}/pkgconfig/libgfchangelog.pc ++%{_libdir}/pkgconfig/libgfdb.pc ++%{_sbindir}/gluster-setgfid2path ++%endif + + %files api + %exclude %{_libdir}/*.so +@@ -1078,9 +1214,11 @@ exit 0 + %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/glupy/debug-trace.* + %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/glupy/helloworld.* + %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/glupy/negative.* +-%{_libdir}/pkgconfig/libgfchangelog.pc +-%if ( 0%{!?_without_tiering:1} ) +-%{_libdir}/pkgconfig/libgfdb.pc ++%if ( 0%{?_build_server} ) ++%exclude %{_libdir}/pkgconfig/libgfchangelog.pc ++%exclude %{_libdir}/pkgconfig/libgfdb.pc ++%exclude %{_sbindir}/gluster-setgfid2path ++%exclude %{_mandir}/man8/gluster-setgfid2path.8* + %endif + + %files client-xlators +@@ -1090,6 +1228,7 @@ exit 0 + %dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/protocol + %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/protocol/client.so + +++%if ( 0%{!?_without_extra_xlators:1} ) + %files extra-xlators + %dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator + %dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/encryption +@@ -1106,6 +1245,11 @@ exit 0 + %dir %{python2_sitelib}/gluster + %dir %{python2_sitelib}/gluster/glupy + %{python2_sitelib}/gluster/glupy/* ++# Don't expect a .egg-info file on EL5 ++%if ( ! ( 0%{?rhel} && 0%{?rhel} < 6 ) ) ++%{python_sitelib}/glusterfs_glupy*.egg-info ++%endif ++%endif + + %files fuse + # glusterfs is a symlink to glusterfsd, -server depends on -fuse. +@@ -1125,6 +1269,7 @@ exit 0 + %endif + %endif + ++%if ( 0%{?_build_server} ) + %if ( 0%{?_with_gnfs:1} ) + %files gnfs + %dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator +@@ -1135,7 +1280,13 @@ exit 0 + %ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/nfs/run + %ghost %attr(0600,-,-) %{_sharedstatedir}/glusterd/nfs/run/nfs.pid + %endif ++%endif ++ ++%if ( 0%{?_build_server} ) ++%files ganesha ++%endif + ++%if ( 0%{?_build_server} ) + %if ( 0%{!?_without_georeplication:1} ) + %files geo-replication + %config(noreplace) %{_sysconfdir}/logrotate.d/glusterfs-georep +@@ -1172,6 +1323,7 @@ exit 0 + %{_datadir}/glusterfs/scripts/gsync-sync-gfid + %{_datadir}/glusterfs/scripts/schedule_georep.py* + %endif ++%endif + + %files libs + %{_libdir}/*.so.* +@@ -1194,19 +1346,26 @@ exit 0 + %{_libdir}/glusterfs/%{version}%{?prereltag}/rpc-transport/rdma* + %endif + ++%if ( 0%{?_build_server} ) + %files regression-tests + %dir %{_datadir}/glusterfs + %{_datadir}/glusterfs/run-tests.sh + %{_datadir}/glusterfs/tests + %exclude %{_datadir}/glusterfs/tests/vagrant ++%exclude %{_datadir}/share/glusterfs/tests/basic/rpm.t ++%endif + ++%if ( 0%{?_build_server} ) + %if ( 0%{!?_without_ocf:1} ) + %files resource-agents + # /usr/lib is the standard for OCF, also on x86_64 + %{_prefix}/lib/ocf/resource.d/glusterfs + %endif ++%endif + ++%if ( 0%{?_build_server} ) + %files server ++%exclude %{_sharedstatedir}/glusterd/hooks/1/gsync-create/post/S56glusterd-geo-rep-create-post.sh + %doc extras/clear_xattrs.sh + # sysconf + %config(noreplace) %{_sysconfdir}/glusterfs +@@ -1277,7 +1436,6 @@ exit 0 + %{_sbindir}/gcron.py + %{_sbindir}/conf.py + +-<<<<<<< 2944c7b6656a36a79551f9f9f24ab7a10467f13a + # /var/lib/glusterd, e.g. hookscripts, etc. + %ghost %attr(0644,-,-) %config(noreplace) %{_sharedstatedir}/glusterd/glusterd.info + %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd +@@ -1354,8 +1512,438 @@ exit 0 + %if ( 0%{?_with_firewalld:1} ) + %{_prefix}/lib/firewalld/services/glusterfs.xml + %endif ++%endif ++ ++ ++##----------------------------------------------------------------------------- ++## All %pretrans should be placed here and keep them sorted ++## ++%if 0%{?_build_server} ++%pretrans -p ++if not posix.access("/bin/bash", "x") then ++ -- initial installation, no shell, no running glusterfsd ++ return 0 ++end ++ ++-- TODO: move this completely to a lua script ++-- For now, we write a temporary bash script and execute that. ++ ++script = [[#!/bin/sh ++pidof -c -o %PPID -x glusterfsd &>/dev/null ++ ++if [ $? -eq 0 ]; then ++ pushd . > /dev/null 2>&1 ++ for volume in /var/lib/glusterd/vols/*; do cd $volume; ++ vol_type=`grep '^type=' info | awk -F'=' '{print $2}'` ++ volume_started=`grep '^status=' info | awk -F'=' '{print $2}'` ++ if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then ++ echo "ERROR: Distribute volumes detected. In-service rolling upgrade requires distribute volume(s) to be stopped." ++ echo "ERROR: Please stop distribute volume(s) before proceeding... exiting!" ++ exit 1; ++ fi ++ done ++ ++ popd > /dev/null 2>&1 ++ echo "WARNING: Updating glusterfs requires its processes to be killed. This action does NOT incur downtime." ++ echo "WARNING: Ensure to wait for the upgraded server to finish healing before proceeding." ++ echo "WARNING: Refer upgrade section of install guide for more details" ++ echo "Please run # service glusterd stop; pkill glusterfs; pkill glusterfsd; pkill gsyncd.py;" ++ exit 1; ++fi ++]] ++ ++-- rpm in RHEL5 does not have os.tmpname() ++-- io.tmpfile() can not be resolved to a filename to pass to bash :-/ ++tmpname = "/tmp/glusterfs_pretrans_" .. os.date("%s") ++tmpfile = io.open(tmpname, "w") ++tmpfile:write(script) ++tmpfile:close() ++ok, how, val = os.execute("/bin/bash " .. tmpname) ++os.remove(tmpname) ++if not (ok == 0) then ++ error("Detected running glusterfs processes", ok) ++end ++ ++ ++ ++%pretrans api -p ++if not posix.access("/bin/bash", "x") then ++ -- initial installation, no shell, no running glusterfsd ++ return 0 ++end ++ ++-- TODO: move this completely to a lua script ++-- For now, we write a temporary bash script and execute that. ++ ++script = [[#!/bin/sh ++pidof -c -o %PPID -x glusterfsd &>/dev/null ++ ++if [ $? -eq 0 ]; then ++ pushd . > /dev/null 2>&1 ++ for volume in /var/lib/glusterd/vols/*; do cd $volume; ++ vol_type=`grep '^type=' info | awk -F'=' '{print $2}'` ++ volume_started=`grep '^status=' info | awk -F'=' '{print $2}'` ++ if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then ++ exit 1; ++ fi ++ done ++ ++ popd > /dev/null 2>&1 ++ exit 1; ++fi ++]] ++ ++-- rpm in RHEL5 does not have os.tmpname() ++-- io.tmpfile() can not be resolved to a filename to pass to bash :-/ ++tmpname = "/tmp/glusterfs-api_pretrans_" .. os.date("%s") ++tmpfile = io.open(tmpname, "w") ++tmpfile:write(script) ++tmpfile:close() ++ok, how, val = os.execute("/bin/bash " .. tmpname) ++os.remove(tmpname) ++if not (ok == 0) then ++ error("Detected running glusterfs processes", ok) ++end ++ ++ ++ ++%pretrans api-devel -p ++if not posix.access("/bin/bash", "x") then ++ -- initial installation, no shell, no running glusterfsd ++ return 0 ++end ++ ++-- TODO: move this completely to a lua script ++-- For now, we write a temporary bash script and execute that. ++ ++script = [[#!/bin/sh ++pidof -c -o %PPID -x glusterfsd &>/dev/null ++ ++if [ $? -eq 0 ]; then ++ pushd . > /dev/null 2>&1 ++ for volume in /var/lib/glusterd/vols/*; do cd $volume; ++ vol_type=`grep '^type=' info | awk -F'=' '{print $2}'` ++ volume_started=`grep '^status=' info | awk -F'=' '{print $2}'` ++ if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then ++ exit 1; ++ fi ++ done ++ ++ popd > /dev/null 2>&1 ++ exit 1; ++fi ++]] ++ ++-- rpm in RHEL5 does not have os.tmpname() ++-- io.tmpfile() can not be resolved to a filename to pass to bash :-/ ++tmpname = "/tmp/glusterfs-api-devel_pretrans_" .. os.date("%s") ++tmpfile = io.open(tmpname, "w") ++tmpfile:write(script) ++tmpfile:close() ++ok, how, val = os.execute("/bin/bash " .. tmpname) ++os.remove(tmpname) ++if not (ok == 0) then ++ error("Detected running glusterfs processes", ok) ++end ++ ++ ++ ++%pretrans devel -p ++if not posix.access("/bin/bash", "x") then ++ -- initial installation, no shell, no running glusterfsd ++ return 0 ++end ++ ++-- TODO: move this completely to a lua script ++-- For now, we write a temporary bash script and execute that. ++ ++script = [[#!/bin/sh ++pidof -c -o %PPID -x glusterfsd &>/dev/null ++ ++if [ $? -eq 0 ]; then ++ pushd . > /dev/null 2>&1 ++ for volume in /var/lib/glusterd/vols/*; do cd $volume; ++ vol_type=`grep '^type=' info | awk -F'=' '{print $2}'` ++ volume_started=`grep '^status=' info | awk -F'=' '{print $2}'` ++ if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then ++ exit 1; ++ fi ++ done ++ ++ popd > /dev/null 2>&1 ++ exit 1; ++fi ++]] ++ ++-- rpm in RHEL5 does not have os.tmpname() ++-- io.tmpfile() can not be resolved to a filename to pass to bash :-/ ++tmpname = "/tmp/glusterfs-devel_pretrans_" .. os.date("%s") ++tmpfile = io.open(tmpname, "w") ++tmpfile:write(script) ++tmpfile:close() ++ok, how, val = os.execute("/bin/bash " .. tmpname) ++os.remove(tmpname) ++if not (ok == 0) then ++ error("Detected running glusterfs processes", ok) ++end ++ ++ ++ ++%pretrans fuse -p ++if not posix.access("/bin/bash", "x") then ++ -- initial installation, no shell, no running glusterfsd ++ return 0 ++end ++ ++-- TODO: move this completely to a lua script ++-- For now, we write a temporary bash script and execute that. ++ ++script = [[#!/bin/sh ++pidof -c -o %PPID -x glusterfsd &>/dev/null ++ ++if [ $? -eq 0 ]; then ++ pushd . > /dev/null 2>&1 ++ for volume in /var/lib/glusterd/vols/*; do cd $volume; ++ vol_type=`grep '^type=' info | awk -F'=' '{print $2}'` ++ volume_started=`grep '^status=' info | awk -F'=' '{print $2}'` ++ if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then ++ exit 1; ++ fi ++ done ++ ++ popd > /dev/null 2>&1 ++ exit 1; ++fi ++]] ++ ++-- rpm in RHEL5 does not have os.tmpname() ++-- io.tmpfile() can not be resolved to a filename to pass to bash :-/ ++tmpname = "/tmp/glusterfs-fuse_pretrans_" .. os.date("%s") ++tmpfile = io.open(tmpname, "w") ++tmpfile:write(script) ++tmpfile:close() ++ok, how, val = os.execute("/bin/bash " .. tmpname) ++os.remove(tmpname) ++if not (ok == 0) then ++ error("Detected running glusterfs processes", ok) ++end ++ ++ ++ ++%if 0%{?_can_georeplicate} ++%if ( 0%{!?_without_georeplication:1} ) ++%pretrans geo-replication -p ++if not posix.access("/bin/bash", "x") then ++ -- initial installation, no shell, no running glusterfsd ++ return 0 ++end ++ ++-- TODO: move this completely to a lua script ++-- For now, we write a temporary bash script and execute that. ++ ++script = [[#!/bin/sh ++pidof -c -o %PPID -x glusterfsd &>/dev/null ++ ++if [ $? -eq 0 ]; then ++ pushd . > /dev/null 2>&1 ++ for volume in /var/lib/glusterd/vols/*; do cd $volume; ++ vol_type=`grep '^type=' info | awk -F'=' '{print $2}'` ++ volume_started=`grep '^status=' info | awk -F'=' '{print $2}'` ++ if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then ++ exit 1; ++ fi ++ done ++ ++ popd > /dev/null 2>&1 ++ exit 1; ++fi ++]] ++ ++-- rpm in RHEL5 does not have os.tmpname() ++-- io.tmpfile() can not be resolved to a filename to pass to bash :-/ ++tmpname = "/tmp/glusterfs-geo-replication_pretrans_" .. os.date("%s") ++tmpfile = io.open(tmpname, "w") ++tmpfile:write(script) ++tmpfile:close() ++ok, how, val = os.execute("/bin/bash " .. tmpname) ++os.remove(tmpname) ++if not (ok == 0) then ++ error("Detected running glusterfs processes", ok) ++end ++%endif ++%endif ++ ++ ++ ++%pretrans libs -p ++if not posix.access("/bin/bash", "x") then ++ -- initial installation, no shell, no running glusterfsd ++ return 0 ++end ++ ++-- TODO: move this completely to a lua script ++-- For now, we write a temporary bash script and execute that. ++ ++script = [[#!/bin/sh ++pidof -c -o %PPID -x glusterfsd &>/dev/null ++ ++if [ $? -eq 0 ]; then ++ pushd . > /dev/null 2>&1 ++ for volume in /var/lib/glusterd/vols/*; do cd $volume; ++ vol_type=`grep '^type=' info | awk -F'=' '{print $2}'` ++ volume_started=`grep '^status=' info | awk -F'=' '{print $2}'` ++ if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then ++ exit 1; ++ fi ++ done ++ ++ popd > /dev/null 2>&1 ++ exit 1; ++fi ++]] ++ ++-- rpm in RHEL5 does not have os.tmpname() ++-- io.tmpfile() can not be resolved to a filename to pass to bash :-/ ++tmpname = "/tmp/glusterfs-libs_pretrans_" .. os.date("%s") ++tmpfile = io.open(tmpname, "w") ++tmpfile:write(script) ++tmpfile:close() ++ok, how, val = os.execute("/bin/bash " .. tmpname) ++os.remove(tmpname) ++if not (ok == 0) then ++ error("Detected running glusterfs processes", ok) ++end ++ ++ ++ ++%if ( 0%{!?_without_rdma:1} ) ++%pretrans rdma -p ++if not posix.access("/bin/bash", "x") then ++ -- initial installation, no shell, no running glusterfsd ++ return 0 ++end ++ ++-- TODO: move this completely to a lua script ++-- For now, we write a temporary bash script and execute that. ++ ++script = [[#!/bin/sh ++pidof -c -o %PPID -x glusterfsd &>/dev/null ++ ++if [ $? -eq 0 ]; then ++ pushd . > /dev/null 2>&1 ++ for volume in /var/lib/glusterd/vols/*; do cd $volume; ++ vol_type=`grep '^type=' info | awk -F'=' '{print $2}'` ++ volume_started=`grep '^status=' info | awk -F'=' '{print $2}'` ++ if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then ++ exit 1; ++ fi ++ done ++ ++ popd > /dev/null 2>&1 ++ exit 1; ++fi ++]] ++ ++-- rpm in RHEL5 does not have os.tmpname() ++-- io.tmpfile() can not be resolved to a filename to pass to bash :-/ ++tmpname = "/tmp/glusterfs-rdma_pretrans_" .. os.date("%s") ++tmpfile = io.open(tmpname, "w") ++tmpfile:write(script) ++tmpfile:close() ++ok, how, val = os.execute("/bin/bash " .. tmpname) ++os.remove(tmpname) ++if not (ok == 0) then ++ error("Detected running glusterfs processes", ok) ++end ++%endif ++ ++ ++ ++%if ( 0%{!?_without_ocf:1} ) ++%pretrans resource-agents -p ++if not posix.access("/bin/bash", "x") then ++ -- initial installation, no shell, no running glusterfsd ++ return 0 ++end ++ ++-- TODO: move this completely to a lua script ++-- For now, we write a temporary bash script and execute that. ++ ++script = [[#!/bin/sh ++pidof -c -o %PPID -x glusterfsd &>/dev/null ++ ++if [ $? -eq 0 ]; then ++ pushd . > /dev/null 2>&1 ++ for volume in /var/lib/glusterd/vols/*; do cd $volume; ++ vol_type=`grep '^type=' info | awk -F'=' '{print $2}'` ++ volume_started=`grep '^status=' info | awk -F'=' '{print $2}'` ++ if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then ++ exit 1; ++ fi ++ done ++ ++ popd > /dev/null 2>&1 ++ exit 1; ++fi ++]] ++ ++-- rpm in RHEL5 does not have os.tmpname() ++-- io.tmpfile() can not be resolved to a filename to pass to bash :-/ ++tmpname = "/tmp/glusterfs-resource-agents_pretrans_" .. os.date("%s") ++tmpfile = io.open(tmpname, "w") ++tmpfile:write(script) ++tmpfile:close() ++ok, how, val = os.execute("/bin/bash " .. tmpname) ++os.remove(tmpname) ++if not (ok == 0) then ++ error("Detected running glusterfs processes", ok) ++end ++%endif ++ ++ ++ ++%pretrans server -p ++if not posix.access("/bin/bash", "x") then ++ -- initial installation, no shell, no running glusterfsd ++ return 0 ++end ++ ++-- TODO: move this completely to a lua script ++-- For now, we write a temporary bash script and execute that. ++ ++script = [[#!/bin/sh ++pidof -c -o %PPID -x glusterfsd &>/dev/null ++ ++if [ $? -eq 0 ]; then ++ pushd . > /dev/null 2>&1 ++ for volume in /var/lib/glusterd/vols/*; do cd $volume; ++ vol_type=`grep '^type=' info | awk -F'=' '{print $2}'` ++ volume_started=`grep '^status=' info | awk -F'=' '{print $2}'` ++ if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then ++ exit 1; ++ fi ++ done ++ ++ popd > /dev/null 2>&1 ++ exit 1; ++fi ++]] ++ ++-- rpm in RHEL5 does not have os.tmpname() ++-- io.tmpfile() can not be resolved to a filename to pass to bash :-/ ++tmpname = "/tmp/glusterfs-server_pretrans_" .. os.date("%s") ++tmpfile = io.open(tmpname, "w") ++tmpfile:write(script) ++tmpfile:close() ++ok, how, val = os.execute("/bin/bash " .. tmpname) ++os.remove(tmpname) ++if not (ok == 0) then ++ error("Detected running glusterfs processes", ok) ++end ++%endif + + # Events ++%if ( 0%{?_build_server} ) + %if ( 0%{!?_without_events:1} ) + %files events + %config(noreplace) %{_sysconfdir}/glusterfs/eventsconfig.json +@@ -1373,6 +1961,7 @@ exit 0 + %{_sysconfdir}/init.d/glustereventsd + %endif + %endif ++%endif + + %changelog + * Tue Aug 22 2017 Kaleb S. KEITHLEY +-- +1.8.3.1 + diff --git a/SOURCES/0009-secalert-remove-setuid-bit-for-fusermount-glusterfs.patch b/SOURCES/0009-secalert-remove-setuid-bit-for-fusermount-glusterfs.patch new file mode 100644 index 0000000..0a36a9a --- /dev/null +++ b/SOURCES/0009-secalert-remove-setuid-bit-for-fusermount-glusterfs.patch @@ -0,0 +1,35 @@ +From bfa0315b0437602ff1e568fb16c43d9937703eb4 Mon Sep 17 00:00:00 2001 +From: "Bala.FA" +Date: Thu, 22 May 2014 08:37:27 +0530 +Subject: [PATCH 09/74] secalert: remove setuid bit for fusermount-glusterfs + +glusterfs-fuse: File /usr/bin/fusermount-glusterfs on x86_64 is setuid +root but is not on the setxid whitelist + +Label: DOWNSTREAM ONLY + +Bug-Url: https://bugzilla.redhat.com/show_bug.cgi?id=989480 +Change-Id: Icf6e5db72ae15ccc60b02be6713fb6c4f4c8a15f +Signed-off-by: Bala.FA +Reviewed-on: https://code.engineering.redhat.com/gerrit/25453 +Signed-off-by: Bala.FA +Reviewed-on: https://code.engineering.redhat.com/gerrit/60135 +Tested-by: Milind Changire +--- + contrib/fuse-util/Makefile.am | 1 - + 1 file changed, 1 deletion(-) + +diff --git a/contrib/fuse-util/Makefile.am b/contrib/fuse-util/Makefile.am +index abbc10e..a071c81 100644 +--- a/contrib/fuse-util/Makefile.am ++++ b/contrib/fuse-util/Makefile.am +@@ -9,6 +9,5 @@ AM_CFLAGS = -Wall $(GF_CFLAGS) + + install-exec-hook: + -chown root $(DESTDIR)$(bindir)/fusermount-glusterfs +- chmod u+s $(DESTDIR)$(bindir)/fusermount-glusterfs + + CLEANFILES = +-- +1.8.3.1 + diff --git a/SOURCES/0010-build-packaging-corrections-for-RHEL-5.patch b/SOURCES/0010-build-packaging-corrections-for-RHEL-5.patch new file mode 100644 index 0000000..ba47ddd --- /dev/null +++ b/SOURCES/0010-build-packaging-corrections-for-RHEL-5.patch @@ -0,0 +1,79 @@ +From b40c05f7c099e860464faddd81722c7a3ab860a4 Mon Sep 17 00:00:00 2001 +From: Niels de Vos +Date: Wed, 10 Jun 2015 16:16:47 +0200 +Subject: [PATCH 10/74] build: packaging corrections for RHEL-5 + +Because the RHEL-5 version of these packages do not contain the -server +bits, some additional changes for the .spec are needed. These changes +are not applicable upstream. + +Label: DOWNSTREAM ONLY + +Change-Id: I3c4237bd986617f42b725efd75d1128a69e5dbe3 +Signed-off-by: Niels de Vos +Reviewed-on: https://code.engineering.redhat.com/gerrit/50447 +Reviewed-by: Balamurugan Arumugam +Reviewed-on: https://code.engineering.redhat.com/gerrit/60136 +Tested-by: Milind Changire +--- + glusterfs.spec.in | 18 ++++++++++-------- + 1 file changed, 10 insertions(+), 8 deletions(-) + +diff --git a/glusterfs.spec.in b/glusterfs.spec.in +index 8458e8a..dbdb818 100644 +--- a/glusterfs.spec.in ++++ b/glusterfs.spec.in +@@ -92,6 +92,7 @@ + %global _build_server 1 + %else + %global _build_server 0 ++%global _without_georeplication --disable-georeplication + %endif + + %global _without_extra_xlators 1 +@@ -1068,17 +1069,14 @@ exit 0 + %if ( ! 0%{?_build_server} ) + # exclude ganesha files + %exclude %{_prefix}/lib/ocf/* +-# exclude geo-replication files +-%exclude %{_sysconfdir}/logrotate.d/glusterfs-georep ++# exclude incrementalapi + %exclude %{_libexecdir}/glusterfs/* + %exclude %{_sbindir}/gfind_missing_files +-%exclude %{_datadir}/glusterfs/scripts/get-gfid.sh +-%exclude %{_datadir}/glusterfs/scripts/slave-upgrade.sh +-%exclude %{_datadir}/glusterfs/scripts/gsync-upgrade.sh +-%exclude %{_datadir}/glusterfs/scripts/generate-gfid-file.sh +-%exclude %{_datadir}/glusterfs/scripts/gsync-sync-gfid +-%exclude %{_sharedstatedir}/glusterd/* ++%exclude %{_libexecdir}/glusterfs/glusterfind ++%exclude %{_bindir}/glusterfind ++%exclude %{_libexecdir}/glusterfs/peer_add_secret_pub + # exclude server files ++%exclude %{_sharedstatedir}/glusterd/* + %exclude %{_sysconfdir}/glusterfs + %exclude %{_sysconfdir}/glusterfs/glusterd.vol + %exclude %{_sysconfdir}/glusterfs/glusterfs-georep-logrotate +@@ -1093,7 +1091,9 @@ exit 0 + %exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/arbiter.so + %exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/bit-rot.so + %exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/bitrot-stub.so ++%if ( 0%{!?_without_tiering:1} ) + %exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/changetimerecorder.so ++%endif + %exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/index.so + %exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/leases.so + %exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/locks.so +@@ -1107,7 +1107,9 @@ exit 0 + %exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance/decompounder.so + %exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/protocol/server* + %exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/storage* ++%if ( 0%{!?_without_tiering:1} ) + %exclude %{_libdir}/libgfdb.so.* ++%endif + %exclude %{_sbindir}/gcron.py + %exclude %{_sbindir}/glfsheal + %exclude %{_sbindir}/glusterd +-- +1.8.3.1 + diff --git a/SOURCES/0011-build-introduce-security-hardening-flags-in-gluster.patch b/SOURCES/0011-build-introduce-security-hardening-flags-in-gluster.patch new file mode 100644 index 0000000..34bfd71 --- /dev/null +++ b/SOURCES/0011-build-introduce-security-hardening-flags-in-gluster.patch @@ -0,0 +1,70 @@ +From ada27d07526acb0ef09f37de7f364fa3dcea0b36 Mon Sep 17 00:00:00 2001 +From: Atin Mukherjee +Date: Wed, 3 Jun 2015 11:09:21 +0530 +Subject: [PATCH 11/74] build: introduce security hardening flags in gluster + +This patch introduces two of the security hardening compiler flags RELRO & PIE +in gluster codebase. Using _hardened_build as 1 doesn't guarantee the existance +of these flags in the compilation as different versions of RHEL have different +redhat-rpm-config macro. So the idea is to export these flags at spec file +level. + +Label: DOWNSTREAM ONLY + +Change-Id: I0a1a56d0a8f54f110d306ba5e55e39b1b073dc84 +Signed-off-by: Atin Mukherjee +Reviewed-on: https://code.engineering.redhat.com/gerrit/49780 +Reviewed-by: Balamurugan Arumugam +Tested-by: Balamurugan Arumugam +Reviewed-on: https://code.engineering.redhat.com/gerrit/60137 +Tested-by: Milind Changire +--- + glusterfs.spec.in | 25 +++++++++++++++++++++++-- + 1 file changed, 23 insertions(+), 2 deletions(-) + +diff --git a/glusterfs.spec.in b/glusterfs.spec.in +index dbdb818..458b8bc 100644 +--- a/glusterfs.spec.in ++++ b/glusterfs.spec.in +@@ -709,6 +709,24 @@ GlusterFS Events + CFLAGS=-DUSE_INSECURE_OPENSSL + export CFLAGS + %endif ++# In RHEL7 few hardening flags are available by default, however the RELRO ++# default behaviour is partial, convert to full ++%if ( 0%{?rhel} && 0%{?rhel} >= 7 ) ++LDFLAGS="$RPM_LD_FLAGS -Wl,-z,relro,-z,now" ++export LDFLAGS ++%else ++%if ( 0%{?rhel} && 0%{?rhel} == 6 ) ++CFLAGS="$RPM_OPT_FLAGS -fPIE -DPIE" ++LDFLAGS="$RPM_LD_FLAGS -pie -Wl,-z,relro,-z,now" ++%else ++#It appears that with gcc-4.1.2 in RHEL5 there is an issue using both -fPIC and ++ # -fPIE that makes -z relro not work; -fPIE seems to undo what -fPIC does ++CFLAGS="$CFLAGS $RPM_OPT_FLAGS" ++LDFLAGS="$RPM_LD_FLAGS -Wl,-z,relro,-z,now" ++%endif ++export CFLAGS ++export LDFLAGS ++%endif + + ./autogen.sh && %configure \ + %{?_with_cmocka} \ +@@ -2110,8 +2128,11 @@ end + * Fri Jun 12 2015 Aravinda VK + - Added rsync as dependency to georeplication rpm (#1231205) + +-* Tue Jun 02 2015 Aravinda VK +-- Added post hook for volume delete as part of glusterfind (#1225465) ++* Thu Jun 11 2015 Atin Mukherjee ++- Security hardening flags inclusion (#1200815) ++ ++* Thu Jun 11 2015 Aravinda VK ++- Added post hook for volume delete as part of glusterfind (#1225551) + + * Wed May 27 2015 Aravinda VK + - Added stop-all-gluster-processes.sh in glusterfs-server section (#1204641) +-- +1.8.3.1 + diff --git a/SOURCES/0012-spec-fix-add-pre-transaction-scripts-for-geo-rep-and.patch b/SOURCES/0012-spec-fix-add-pre-transaction-scripts-for-geo-rep-and.patch new file mode 100644 index 0000000..0d71aeb --- /dev/null +++ b/SOURCES/0012-spec-fix-add-pre-transaction-scripts-for-geo-rep-and.patch @@ -0,0 +1,100 @@ +From 280eddebd49483343cc08b42c12f26d89f6d51e1 Mon Sep 17 00:00:00 2001 +From: Niels de Vos +Date: Wed, 22 Apr 2015 15:39:59 +0200 +Subject: [PATCH 12/74] spec: fix/add pre-transaction scripts for geo-rep and + cli packages + +The cli subpackage never had a %pretrans script, this has been added +now. + +The %pretrans script for ge-repliaction was never included in the RPM +package because it was disable by a undefined macro (_can_georeplicate). +This macro is not used/set anywhere else and _without_georeplication +should take care of it anyway. + +Note: This is a Red Hat Gluster Storage specific patch. Upstream + packaging guidelines do not allow these kind of 'features'. + +Label: DOWNSTREAM ONLY + +Change-Id: I16aab5bba72f1ed178f3bcac47f9d8ef767cfcef +Signed-off-by: Niels de Vos +Signed-off-by: Bala.FA +Reviewed-on: https://code.engineering.redhat.com/gerrit/50491 +Reviewed-on: https://code.engineering.redhat.com/gerrit/60138 +Tested-by: Milind Changire +--- + glusterfs.spec.in | 43 +++++++++++++++++++++++++++++++++++++++++-- + 1 file changed, 41 insertions(+), 2 deletions(-) + +diff --git a/glusterfs.spec.in b/glusterfs.spec.in +index 458b8bc..68eba56 100644 +--- a/glusterfs.spec.in ++++ b/glusterfs.spec.in +@@ -1668,6 +1668,47 @@ end + + + ++%pretrans cli -p ++if not posix.access("/bin/bash", "x") then ++ -- initial installation, no shell, no running glusterfsd ++ return 0 ++end ++ ++-- TODO: move this completely to a lua script ++-- For now, we write a temporary bash script and execute that. ++ ++script = [[#!/bin/sh ++pidof -c -o %PPID -x glusterfsd &>/dev/null ++ ++if [ $? -eq 0 ]; then ++ pushd . > /dev/null 2>&1 ++ for volume in /var/lib/glusterd/vols/*; do cd $volume; ++ vol_type=`grep '^type=' info | awk -F'=' '{print $2}'` ++ volume_started=`grep '^status=' info | awk -F'=' '{print $2}'` ++ if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then ++ exit 1; ++ fi ++ done ++ ++ popd > /dev/null 2>&1 ++ exit 1; ++fi ++]] ++ ++-- rpm in RHEL5 does not have os.tmpname() ++-- io.tmpfile() can not be resolved to a filename to pass to bash :-/ ++tmpname = "/tmp/glusterfs-cli_pretrans_" .. os.date("%s") ++tmpfile = io.open(tmpname, "w") ++tmpfile:write(script) ++tmpfile:close() ++ok, how, val = os.execute("/bin/bash " .. tmpname) ++os.remove(tmpname) ++if not (ok == 0) then ++ error("Detected running glusterfs processes", ok) ++end ++ ++ ++ + %pretrans devel -p + if not posix.access("/bin/bash", "x") then + -- initial installation, no shell, no running glusterfsd +@@ -1750,7 +1791,6 @@ end + + + +-%if 0%{?_can_georeplicate} + %if ( 0%{!?_without_georeplication:1} ) + %pretrans geo-replication -p + if not posix.access("/bin/bash", "x") then +@@ -1791,7 +1831,6 @@ if not (ok == 0) then + error("Detected running glusterfs processes", ok) + end + %endif +-%endif + + + +-- +1.8.3.1 + diff --git a/SOURCES/0013-rpm-glusterfs-devel-for-client-builds-should-not-dep.patch b/SOURCES/0013-rpm-glusterfs-devel-for-client-builds-should-not-dep.patch new file mode 100644 index 0000000..a7a6c98 --- /dev/null +++ b/SOURCES/0013-rpm-glusterfs-devel-for-client-builds-should-not-dep.patch @@ -0,0 +1,84 @@ +From cf8f5a4e4098a6aae9b986dc2da2006eadd4fef1 Mon Sep 17 00:00:00 2001 +From: Niels de Vos +Date: Thu, 18 Jun 2015 12:16:16 +0200 +Subject: [PATCH 13/74] rpm: glusterfs-devel for client-builds should not + depend on -server + +glusterfs-devel for client-side packages should *not* include the +libgfdb.so symlink and libgfdb.pc file or any of the libchangelog +ones. + +Label: DOWNSTREAM ONLY + +Change-Id: Ifb4a9cf48841e5af5dd0a98b6de51e2ee469fc56 +Signed-off-by: Niels de Vos +Reviewed-on: https://code.engineering.redhat.com/gerrit/51019 +Reviewed-by: Balamurugan Arumugam +Tested-by: Balamurugan Arumugam +Reviewed-on: https://code.engineering.redhat.com/gerrit/60139 +Tested-by: Milind Changire +--- + glusterfs.spec.in | 24 +++++++++++++++++++----- + 1 file changed, 19 insertions(+), 5 deletions(-) + +diff --git a/glusterfs.spec.in b/glusterfs.spec.in +index 68eba56..b2fb4d5 100644 +--- a/glusterfs.spec.in ++++ b/glusterfs.spec.in +@@ -1196,9 +1196,10 @@ exit 0 + %{_tmpfilesdir}/gluster.conf + %endif + %if ( ! 0%{?_build_server} ) +-%{_libdir}/pkgconfig/libgfchangelog.pc +-%{_libdir}/pkgconfig/libgfdb.pc +-%{_sbindir}/gluster-setgfid2path ++%exclude %{_libdir}/pkgconfig/libgfchangelog.pc ++%exclude %{_libdir}/pkgconfig/libgfdb.pc ++%exclude %{_sbindir}/gluster-setgfid2path ++%exclude %{_mandir}/man8/gluster-setgfid2path.8* + %endif + + %files api +@@ -1226,6 +1227,12 @@ exit 0 + %{_includedir}/glusterfs/* + %exclude %{_includedir}/glusterfs/api + %exclude %{_libdir}/libgfapi.so ++%if ( ! 0%{?_build_server} ) ++%exclude %{_libdir}/libgfchangelog.so ++%endif ++%if ( 0%{!?_without_tiering:1} && ! 0%{?_build_server}) ++%exclude %{_libdir}/libgfdb.so ++%endif + %{_libdir}/*.so + # Glupy Translator examples + %dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator +@@ -1235,10 +1242,14 @@ exit 0 + %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/glupy/helloworld.* + %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/glupy/negative.* + %if ( 0%{?_build_server} ) ++%{_libdir}/pkgconfig/libgfchangelog.pc ++%else + %exclude %{_libdir}/pkgconfig/libgfchangelog.pc ++%endif ++%if ( 0%{!?_without_tiering:1} && 0%{?_build_server}) ++%{_libdir}/pkgconfig/libgfdb.pc ++%else + %exclude %{_libdir}/pkgconfig/libgfdb.pc +-%exclude %{_sbindir}/gluster-setgfid2path +-%exclude %{_mandir}/man8/gluster-setgfid2path.8* + %endif + + %files client-xlators +@@ -2161,6 +2172,9 @@ end + * Tue Aug 18 2015 Niels de Vos + - Include missing directories for glusterfind hooks scripts (#1225465) + ++* Thu Jun 18 2015 Niels de Vos ++- glusterfs-devel for client-builds should not depend on -server (#1227029) ++ + * Mon Jun 15 2015 Niels de Vos + - Replace hook script S31ganesha-set.sh by S31ganesha-start.sh (#1231738) + +-- +1.8.3.1 + diff --git a/SOURCES/0014-build-add-pretrans-check.patch b/SOURCES/0014-build-add-pretrans-check.patch new file mode 100644 index 0000000..53b0a55 --- /dev/null +++ b/SOURCES/0014-build-add-pretrans-check.patch @@ -0,0 +1,181 @@ +From 59602f5c55a05b9652247803d37efa85f6e8f526 Mon Sep 17 00:00:00 2001 +From: "Bala.FA" +Date: Wed, 17 Jun 2015 21:34:52 +0530 +Subject: [PATCH 14/74] build: add pretrans check + +This patch adds pretrans check for client-xlators, ganesha and +python-gluster sub-packages. + +Label: DOWNSTREAM ONLY + +Change-Id: I454016319832c11902c0ca79a79fbbcf8ac0a121 +Signed-off-by: Bala.FA +Reviewed-on: https://code.engineering.redhat.com/gerrit/50967 +Reviewed-on: https://code.engineering.redhat.com/gerrit/60140 +Tested-by: Milind Changire +--- + glusterfs.spec.in | 127 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ + 1 file changed, 127 insertions(+) + +diff --git a/glusterfs.spec.in b/glusterfs.spec.in +index b2fb4d5..0d1161d 100644 +--- a/glusterfs.spec.in ++++ b/glusterfs.spec.in +@@ -1720,6 +1720,47 @@ end + + + ++%pretrans client-xlators -p ++if not posix.access("/bin/bash", "x") then ++ -- initial installation, no shell, no running glusterfsd ++ return 0 ++end ++ ++-- TODO: move this completely to a lua script ++-- For now, we write a temporary bash script and execute that. ++ ++script = [[#!/bin/sh ++pidof -c -o %PPID -x glusterfsd &>/dev/null ++ ++if [ $? -eq 0 ]; then ++ pushd . > /dev/null 2>&1 ++ for volume in /var/lib/glusterd/vols/*; do cd $volume; ++ vol_type=`grep '^type=' info | awk -F'=' '{print $2}'` ++ volume_started=`grep '^status=' info | awk -F'=' '{print $2}'` ++ if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then ++ exit 1; ++ fi ++ done ++ ++ popd > /dev/null 2>&1 ++ exit 1; ++fi ++]] ++ ++-- rpm in RHEL5 does not have os.tmpname() ++-- io.tmpfile() can not be resolved to a filename to pass to bash :-/ ++tmpname = "/tmp/glusterfs-client-xlators_pretrans_" .. os.date("%s") ++tmpfile = io.open(tmpname, "w") ++tmpfile:write(script) ++tmpfile:close() ++ok, how, val = os.execute("/bin/bash " .. tmpname) ++os.remove(tmpname) ++if not (ok == 0) then ++ error("Detected running glusterfs processes", ok) ++end ++ ++ ++ + %pretrans devel -p + if not posix.access("/bin/bash", "x") then + -- initial installation, no shell, no running glusterfsd +@@ -1802,6 +1843,47 @@ end + + + ++%pretrans ganesha -p ++if not posix.access("/bin/bash", "x") then ++ -- initial installation, no shell, no running glusterfsd ++ return 0 ++end ++ ++-- TODO: move this completely to a lua script ++-- For now, we write a temporary bash script and execute that. ++ ++script = [[#!/bin/sh ++pidof -c -o %PPID -x glusterfsd &>/dev/null ++ ++if [ $? -eq 0 ]; then ++ pushd . > /dev/null 2>&1 ++ for volume in /var/lib/glusterd/vols/*; do cd $volume; ++ vol_type=`grep '^type=' info | awk -F'=' '{print $2}'` ++ volume_started=`grep '^status=' info | awk -F'=' '{print $2}'` ++ if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then ++ exit 1; ++ fi ++ done ++ ++ popd > /dev/null 2>&1 ++ exit 1; ++fi ++]] ++ ++-- rpm in RHEL5 does not have os.tmpname() ++-- io.tmpfile() can not be resolved to a filename to pass to bash :-/ ++tmpname = "/tmp/glusterfs-ganesha_pretrans_" .. os.date("%s") ++tmpfile = io.open(tmpname, "w") ++tmpfile:write(script) ++tmpfile:close() ++ok, how, val = os.execute("/bin/bash " .. tmpname) ++os.remove(tmpname) ++if not (ok == 0) then ++ error("Detected running glusterfs processes", ok) ++end ++ ++ ++ + %if ( 0%{!?_without_georeplication:1} ) + %pretrans geo-replication -p + if not posix.access("/bin/bash", "x") then +@@ -1886,6 +1968,47 @@ end + + + ++%pretrans -n python-gluster -p ++if not posix.access("/bin/bash", "x") then ++ -- initial installation, no shell, no running glusterfsd ++ return 0 ++end ++ ++-- TODO: move this completely to a lua script ++-- For now, we write a temporary bash script and execute that. ++ ++script = [[#!/bin/sh ++pidof -c -o %PPID -x glusterfsd &>/dev/null ++ ++if [ $? -eq 0 ]; then ++ pushd . > /dev/null 2>&1 ++ for volume in /var/lib/glusterd/vols/*; do cd $volume; ++ vol_type=`grep '^type=' info | awk -F'=' '{print $2}'` ++ volume_started=`grep '^status=' info | awk -F'=' '{print $2}'` ++ if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then ++ exit 1; ++ fi ++ done ++ ++ popd > /dev/null 2>&1 ++ exit 1; ++fi ++]] ++ ++-- rpm in RHEL5 does not have os.tmpname() ++-- io.tmpfile() can not be resolved to a filename to pass to bash :-/ ++tmpname = "/tmp/python-gluster_pretrans_" .. os.date("%s") ++tmpfile = io.open(tmpname, "w") ++tmpfile:write(script) ++tmpfile:close() ++ok, how, val = os.execute("/bin/bash " .. tmpname) ++os.remove(tmpname) ++if not (ok == 0) then ++ error("Detected running glusterfs processes", ok) ++end ++ ++ ++ + %if ( 0%{!?_without_rdma:1} ) + %pretrans rdma -p + if not posix.access("/bin/bash", "x") then +@@ -2172,6 +2295,10 @@ end + * Tue Aug 18 2015 Niels de Vos + - Include missing directories for glusterfind hooks scripts (#1225465) + ++* Thu Jun 18 2015 Bala.FA ++- add pretrans check for client-xlators, ganesha and python-gluster ++ sub-packages (#1232641) ++ + * Thu Jun 18 2015 Niels de Vos + - glusterfs-devel for client-builds should not depend on -server (#1227029) + +-- +1.8.3.1 + diff --git a/SOURCES/0015-build-exclude-libgfdb.pc-conditionally.patch b/SOURCES/0015-build-exclude-libgfdb.pc-conditionally.patch new file mode 100644 index 0000000..94f97ae --- /dev/null +++ b/SOURCES/0015-build-exclude-libgfdb.pc-conditionally.patch @@ -0,0 +1,87 @@ +From 444324cfdcd8da750bc0ae04a3a416725489dd06 Mon Sep 17 00:00:00 2001 +From: "Bala.FA" +Date: Fri, 19 Jun 2015 11:09:53 +0530 +Subject: [PATCH 15/74] build: exclude libgfdb.pc conditionally + +This patch fixes rhel-5 build failure where libgfdb.pc is not +applicable. + +Label: DOWNSTREAM ONLY + +Change-Id: Ied3978aa14ff6bd72f25eff9759e501100cb6343 +Signed-off-by: Bala.FA +Reviewed-on: https://code.engineering.redhat.com/gerrit/51099 +Reviewed-on: https://code.engineering.redhat.com/gerrit/60141 +Tested-by: Milind Changire +--- + glusterfs.spec.in | 11 ++++++++++- + 1 file changed, 10 insertions(+), 1 deletion(-) + +diff --git a/glusterfs.spec.in b/glusterfs.spec.in +index 0d1161d..f308f37 100644 +--- a/glusterfs.spec.in ++++ b/glusterfs.spec.in +@@ -1058,12 +1058,14 @@ fi + %postun libs + /sbin/ldconfig + ++%if ( 0%{?_build_server} ) + %postun server + /sbin/ldconfig + %if (0%{?_with_firewalld:1}) + %firewalld_reload + %endif + exit 0 ++%endif + + ##----------------------------------------------------------------------------- + ## All %%files should be placed here and keep them grouped +@@ -1249,8 +1251,10 @@ exit 0 + %if ( 0%{!?_without_tiering:1} && 0%{?_build_server}) + %{_libdir}/pkgconfig/libgfdb.pc + %else ++%if ( 0%{?rhel} && 0%{?rhel} >= 6 ) + %exclude %{_libdir}/pkgconfig/libgfdb.pc + %endif ++%endif + + %files client-xlators + %dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator +@@ -1259,7 +1263,7 @@ exit 0 + %dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/protocol + %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/protocol/client.so + +-+%if ( 0%{!?_without_extra_xlators:1} ) ++%if ( 0%{!?_without_extra_xlators:1} ) + %files extra-xlators + %dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator + %dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/encryption +@@ -1378,6 +1382,7 @@ exit 0 + %endif + + %if ( 0%{?_build_server} ) ++%if ( 0%{!?_without_regression_tests:1} ) + %files regression-tests + %dir %{_datadir}/glusterfs + %{_datadir}/glusterfs/run-tests.sh +@@ -1385,6 +1390,7 @@ exit 0 + %exclude %{_datadir}/glusterfs/tests/vagrant + %exclude %{_datadir}/share/glusterfs/tests/basic/rpm.t + %endif ++%endif + + %if ( 0%{?_build_server} ) + %if ( 0%{!?_without_ocf:1} ) +@@ -2295,6 +2301,9 @@ end + * Tue Aug 18 2015 Niels de Vos + - Include missing directories for glusterfind hooks scripts (#1225465) + ++* Fri Jun 19 2015 Bala.FA ++- exclude libgfdb.pc conditionally for rhel-5 (#1233486) ++ + * Thu Jun 18 2015 Bala.FA + - add pretrans check for client-xlators, ganesha and python-gluster + sub-packages (#1232641) +-- +1.8.3.1 + diff --git a/SOURCES/0016-build-exclude-glusterfs.xml-on-rhel-7-client-build.patch b/SOURCES/0016-build-exclude-glusterfs.xml-on-rhel-7-client-build.patch new file mode 100644 index 0000000..4188bac --- /dev/null +++ b/SOURCES/0016-build-exclude-glusterfs.xml-on-rhel-7-client-build.patch @@ -0,0 +1,33 @@ +From 5b117b1f8cf05d645512bb6f07cbe2803119652f Mon Sep 17 00:00:00 2001 +From: Milind Changire +Date: Thu, 29 Oct 2015 15:55:26 +0530 +Subject: [PATCH 16/74] build: exclude glusterfs.xml on rhel-7 client build + +Label: DOWNSTREAM ONLY + +Change-Id: Iae1ee01b3aa61d4dd150e17646b330871b948ef3 +Signed-off-by: Milind Changire +Reviewed-on: https://code.engineering.redhat.com/gerrit/60433 +Reviewed-by: Balamurugan Arumugam +Tested-by: Balamurugan Arumugam +--- + glusterfs.spec.in | 3 +++ + 1 file changed, 3 insertions(+) + +diff --git a/glusterfs.spec.in b/glusterfs.spec.in +index f308f37..85f7f21 100644 +--- a/glusterfs.spec.in ++++ b/glusterfs.spec.in +@@ -1138,6 +1138,9 @@ exit 0 + %if 0%{?_tmpfilesdir:1} + %exclude %{_tmpfilesdir}/gluster.conf + %endif ++%if ( 0%{?_with_firewalld:1} ) ++%exclude /usr/lib/firewalld/services/glusterfs.xml ++%endif + %endif + %doc ChangeLog COPYING-GPLV2 COPYING-LGPLV3 INSTALL README.md THANKS + %{_mandir}/man8/*gluster*.8* +-- +1.8.3.1 + diff --git a/SOURCES/0017-glusterd-fix-info-file-checksum-mismatch-during-upgr.patch b/SOURCES/0017-glusterd-fix-info-file-checksum-mismatch-during-upgr.patch new file mode 100644 index 0000000..4b491be --- /dev/null +++ b/SOURCES/0017-glusterd-fix-info-file-checksum-mismatch-during-upgr.patch @@ -0,0 +1,56 @@ +From 5d3441530f71047483b5973bad7efd2c73ccfff9 Mon Sep 17 00:00:00 2001 +From: anand +Date: Wed, 18 Nov 2015 16:13:46 +0530 +Subject: [PATCH 17/74] glusterd: fix info file checksum mismatch during + upgrade + +peers are moving rejected state when upgrading from RHS2.1 to RHGS3.1.2 +due to checksum mismatch. + +Label: DOWNSTREAM ONLY + +Change-Id: Ifea6b7dfe8477c7f17eefc5ca87ced58aaa21c84 +Signed-off-by: anand +Reviewed-on: https://code.engineering.redhat.com/gerrit/61774 +Reviewed-by: Atin Mukherjee +Tested-by: Atin Mukherjee +--- + xlators/mgmt/glusterd/src/glusterd-store.c | 21 ++++++++++++--------- + 1 file changed, 12 insertions(+), 9 deletions(-) + +diff --git a/xlators/mgmt/glusterd/src/glusterd-store.c b/xlators/mgmt/glusterd/src/glusterd-store.c +index 8a662ef..42bb8ce 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-store.c ++++ b/xlators/mgmt/glusterd/src/glusterd-store.c +@@ -1014,16 +1014,19 @@ glusterd_volume_exclude_options_write (int fd, glusterd_volinfo_t *volinfo) + goto out; + } + +- snprintf (buf, sizeof (buf), "%d", volinfo->op_version); +- ret = gf_store_save_value (fd, GLUSTERD_STORE_KEY_VOL_OP_VERSION, buf); +- if (ret) +- goto out; ++ if (conf->op_version >= GD_OP_VERSION_RHS_3_0) { ++ snprintf (buf, sizeof (buf), "%d", volinfo->op_version); ++ ret = gf_store_save_value (fd, GLUSTERD_STORE_KEY_VOL_OP_VERSION, buf); ++ if (ret) ++ goto out; ++ ++ snprintf (buf, sizeof (buf), "%d", volinfo->client_op_version); ++ ret = gf_store_save_value (fd, GLUSTERD_STORE_KEY_VOL_CLIENT_OP_VERSION, ++ buf); ++ if (ret) ++ goto out; ++ } + +- snprintf (buf, sizeof (buf), "%d", volinfo->client_op_version); +- ret = gf_store_save_value (fd, GLUSTERD_STORE_KEY_VOL_CLIENT_OP_VERSION, +- buf); +- if (ret) +- goto out; + if (volinfo->caps) { + snprintf (buf, sizeof (buf), "%d", volinfo->caps); + ret = gf_store_save_value (fd, GLUSTERD_STORE_KEY_VOL_CAPS, +-- +1.8.3.1 + diff --git a/SOURCES/0018-build-spec-file-conflict-resolution.patch b/SOURCES/0018-build-spec-file-conflict-resolution.patch new file mode 100644 index 0000000..7f22b92 --- /dev/null +++ b/SOURCES/0018-build-spec-file-conflict-resolution.patch @@ -0,0 +1,61 @@ +From 75d0e5c542c4d1a2df1a49a6f526ccb099f9f53f Mon Sep 17 00:00:00 2001 +From: Milind Changire +Date: Tue, 22 Mar 2016 23:33:13 +0530 +Subject: [PATCH 18/74] build: spec file conflict resolution + +Missed conflict resolution for removing references to +gluster.conf.example as mentioned in patch titled: +packaging: gratuitous dependencies on rsyslog-mm{count,jsonparse} +by Kaleb + +References to hook scripts S31ganesha-start.sh and +S31ganesha-reset.sh got lost in the downstream only +patch conflict resolution. + +Commented blanket reference to %{_sharedsstatedir}/glusterd/* +in section %files server to avoid rpmbuild warning related to +multiple references to hook scripts and other files under +/var/lib/glusterd. + +Label: DOWNSTREAM ONLY + +Change-Id: I9d409f1595ab985ed9f79d9d4f4298877609ba17 +Signed-off-by: Milind Changire +Reviewed-on: https://code.engineering.redhat.com/gerrit/70535 +Reviewed-by: Rajesh Joseph +Tested-by: Rajesh Joseph +--- + glusterfs.spec.in | 17 ----------------- + 1 file changed, 17 deletions(-) + +diff --git a/glusterfs.spec.in b/glusterfs.spec.in +index 85f7f21..fe566e5 100644 +--- a/glusterfs.spec.in ++++ b/glusterfs.spec.in +@@ -840,23 +840,6 @@ install -D -p -m 0644 extras/glusterfs-georep-logrotate \ + %{buildroot}%{_sysconfdir}/logrotate.d/glusterfs-georep + %endif + +-%if ( 0%{!?_without_syslog:1} ) +-%if ( 0%{?fedora} ) || ( 0%{?rhel} && 0%{?rhel} > 6 ) +-install -D -p -m 0644 extras/gluster-rsyslog-7.2.conf \ +- %{buildroot}%{_sysconfdir}/rsyslog.d/gluster.conf.example +-%endif +- +-%if ( 0%{?rhel} && 0%{?rhel} == 6 ) +-install -D -p -m 0644 extras/gluster-rsyslog-5.8.conf \ +- %{buildroot}%{_sysconfdir}/rsyslog.d/gluster.conf.example +-%endif +- +-%if ( 0%{?fedora} ) || ( 0%{?rhel} && 0%{?rhel} >= 6 ) +-install -D -p -m 0644 extras/logger.conf.example \ +- %{buildroot}%{_sysconfdir}/glusterfs/logger.conf.example +-%endif +-%endif +- + touch %{buildroot}%{_sharedstatedir}/glusterd/glusterd.info + touch %{buildroot}%{_sharedstatedir}/glusterd/options + subdirs=(add-brick create copy-file delete gsync-create remove-brick reset set start stop) +-- +1.8.3.1 + diff --git a/SOURCES/0019-build-dependency-error-during-upgrade.patch b/SOURCES/0019-build-dependency-error-during-upgrade.patch new file mode 100644 index 0000000..560b6f0 --- /dev/null +++ b/SOURCES/0019-build-dependency-error-during-upgrade.patch @@ -0,0 +1,36 @@ +From 5c5283f873e72d7305953ca357b709a3ab1919f4 Mon Sep 17 00:00:00 2001 +From: Kaleb S KEITHLEY +Date: Tue, 10 May 2016 12:37:23 -0400 +Subject: [PATCH 19/74] build: dependency error during upgrade + +Not sure who thought config params in the form without_foo were a +good idea. Trying to parse !without_tiering conditionals makes my +head hurt. + +Label: DOWNSTREAM ONLY + +Change-Id: Ie1c43fc60d6f747c27b22e3a1c40539aba3d2cad +Signed-off-by: Kaleb S KEITHLEY +Reviewed-on: https://code.engineering.redhat.com/gerrit/74041 +Reviewed-by: Niels de Vos +--- + glusterfs.spec.in | 3 +++ + 1 file changed, 3 insertions(+) + +diff --git a/glusterfs.spec.in b/glusterfs.spec.in +index fe566e5..f83ae5e 100644 +--- a/glusterfs.spec.in ++++ b/glusterfs.spec.in +@@ -1234,6 +1234,9 @@ exit 0 + %else + %exclude %{_libdir}/pkgconfig/libgfchangelog.pc + %endif ++%if ( 0%{!?_without_tiering:1} && ! 0%{?_build_server}) ++%exclude %{_libdir}/libgfdb.so ++%endif + %if ( 0%{!?_without_tiering:1} && 0%{?_build_server}) + %{_libdir}/pkgconfig/libgfdb.pc + %else +-- +1.8.3.1 + diff --git a/SOURCES/0020-eventsapi-Fix-eventtypes.h-header-generation-with-Py.patch b/SOURCES/0020-eventsapi-Fix-eventtypes.h-header-generation-with-Py.patch new file mode 100644 index 0000000..e35b661 --- /dev/null +++ b/SOURCES/0020-eventsapi-Fix-eventtypes.h-header-generation-with-Py.patch @@ -0,0 +1,88 @@ +From a7570af0bc6dc53044dce2cace9a65e96c571da6 Mon Sep 17 00:00:00 2001 +From: Aravinda VK +Date: Mon, 19 Sep 2016 16:59:30 +0530 +Subject: [PATCH 20/74] eventsapi: Fix eventtypes.h header generation with + Python 2.4 + +eventskeygen.py file generates eventtypes.h and eventtypes.py files +during build. If Python version is old(Version 2.4), then Gluster +Client build will fail. eventskeygen.py uses "with" statement to +open file, which is introduced in Python 2.5 + +Label: DOWNSTREAM ONLY + +Change-Id: I995e102fad0c7bc66e840b1ab9d53ed564266253 +Signed-off-by: Aravinda VK +Reviewed-on: https://code.engineering.redhat.com/gerrit/85060 +Reviewed-by: Milind Changire +Reviewed-by: Atin Mukherjee +--- + events/eventskeygen.py | 47 +++++++++++++++++++++++++---------------------- + 1 file changed, 25 insertions(+), 22 deletions(-) + +diff --git a/events/eventskeygen.py b/events/eventskeygen.py +index 23dfb47..a9c5573 100644 +--- a/events/eventskeygen.py ++++ b/events/eventskeygen.py +@@ -207,33 +207,36 @@ ERRORS = ( + + if gen_header_type == "C_HEADER": + # Generate eventtypes.h +- with open(eventtypes_h, "w") as f: +- f.write("#ifndef __EVENTTYPES_H__\n") +- f.write("#define __EVENTTYPES_H__\n\n") +- f.write("typedef enum {\n") +- for k in ERRORS: +- f.write(" {0},\n".format(k)) +- f.write("} event_errors_t;\n") ++ f = open(eventtypes_h, "w") ++ f.write("#ifndef __EVENTTYPES_H__\n") ++ f.write("#define __EVENTTYPES_H__\n\n") ++ f.write("typedef enum {\n") ++ for k in ERRORS: ++ f.write(" %s,\n" % k) ++ f.write("} event_errors_t;\n") + +- f.write("\n") ++ f.write("\n") + +- f.write("typedef enum {\n") +- for k in keys: +- f.write(" {0},\n".format(k)) ++ f.write("typedef enum {\n") ++ for k in keys: ++ f.write(" %s,\n" % k) + +- f.write(" {0}\n".format(LAST_EVENT)) +- f.write("} eventtypes_t;\n") +- f.write("\n#endif /* __EVENTTYPES_H__ */\n") ++ f.write(" %s\n" % LAST_EVENT) ++ f.write("} eventtypes_t;\n") ++ f.write("\n#endif /* __EVENTTYPES_H__ */\n") ++ f.close() + + if gen_header_type == "PY_HEADER": + # Generate eventtypes.py +- with open(eventtypes_py, "w") as f: +- f.write("# -*- coding: utf-8 -*-\n") +- f.write("all_events = [\n") +- for ev in keys: +- f.write(' "{0}",\n'.format(ev)) ++ f = open(eventtypes_py, "w") ++ f.write("# -*- coding: utf-8 -*-\n") ++ f.write("all_events = [\n") ++ for ev in keys: ++ f.write(' "%s",\n' % ev) + +- f.write("]\n\n") ++ f.write("]\n\n") + +- for idx, ev in enumerate(keys): +- f.write("{0} = {1}\n".format(ev.replace("EVENT_", ""), idx)) ++ for idx, ev in enumerate(keys): ++ f.write("%s = %s\n" % (ev.replace("EVENT_", ""), idx)) ++ ++ f.close() +-- +1.8.3.1 + diff --git a/SOURCES/0021-syscall-remove-preadv-and-pwritev-sys-wrappers.patch b/SOURCES/0021-syscall-remove-preadv-and-pwritev-sys-wrappers.patch new file mode 100644 index 0000000..239cdda --- /dev/null +++ b/SOURCES/0021-syscall-remove-preadv-and-pwritev-sys-wrappers.patch @@ -0,0 +1,86 @@ +From ab44b5af9915e15dbe679ac5a16a80d7b0ae45cc Mon Sep 17 00:00:00 2001 +From: Atin Mukherjee +Date: Tue, 20 Sep 2016 03:09:08 +0530 +Subject: [PATCH 21/74] syscall: remove preadv and pwritev sys wrappers + +Commit 76f1680 introduced sys wrappers for preadv and pwritev where these +syscalls are not supported for RHEL5. These functions are of actually no use +w.r.t downstream code as sys_pwritev is used only in bd xlator which is not +supported in downstream + +Label: DOWNSTREAM ONLY +Change-Id: Ifdc798f1fa74affd77abb06dd14cf9b51f484fe7 +Signed-off-by: Atin Mukherjee +--- + libglusterfs/src/syscall.c | 14 -------------- + libglusterfs/src/syscall.h | 6 ------ + xlators/storage/bd/src/bd.c | 4 ++-- + 3 files changed, 2 insertions(+), 22 deletions(-) + +diff --git a/libglusterfs/src/syscall.c b/libglusterfs/src/syscall.c +index a7d4402..90ef39a 100644 +--- a/libglusterfs/src/syscall.c ++++ b/libglusterfs/src/syscall.c +@@ -318,20 +318,6 @@ sys_write (int fd, const void *buf, size_t count) + + + ssize_t +-sys_preadv (int fd, const struct iovec *iov, int iovcnt, off_t offset) +-{ +- return preadv (fd, iov, iovcnt, offset); +-} +- +- +-ssize_t +-sys_pwritev (int fd, const struct iovec *iov, int iovcnt, off_t offset) +-{ +- return pwritev (fd, iov, iovcnt, offset); +-} +- +- +-ssize_t + sys_pread (int fd, void *buf, size_t count, off_t offset) + { + return pread (fd, buf, count, offset); +diff --git a/libglusterfs/src/syscall.h b/libglusterfs/src/syscall.h +index 0cb61b6..da816cb 100644 +--- a/libglusterfs/src/syscall.h ++++ b/libglusterfs/src/syscall.h +@@ -208,12 +208,6 @@ int + sys_fallocate(int fd, int mode, off_t offset, off_t len); + + ssize_t +-sys_preadv (int fd, const struct iovec *iov, int iovcnt, off_t offset); +- +-ssize_t +-sys_pwritev (int fd, const struct iovec *iov, int iovcnt, off_t offset); +- +-ssize_t + sys_pread(int fd, void *buf, size_t count, off_t offset); + + ssize_t +diff --git a/xlators/storage/bd/src/bd.c b/xlators/storage/bd/src/bd.c +index 07b7ecd..af3ac84 100644 +--- a/xlators/storage/bd/src/bd.c ++++ b/xlators/storage/bd/src/bd.c +@@ -1782,7 +1782,7 @@ __bd_pwritev (int fd, struct iovec *vector, int count, off_t offset, + if (!vector) + return -EFAULT; + +- retval = sys_pwritev (fd, vector, count, offset); ++ retval = pwritev (fd, vector, count, offset); + if (retval == -1) { + int64_t off = offset; + gf_log (THIS->name, GF_LOG_WARNING, +@@ -1805,7 +1805,7 @@ __bd_pwritev (int fd, struct iovec *vector, int count, off_t offset, + vector[index].iov_len = bd_size - internal_offset; + no_space = 1; + } +- retval = sys_pwritev (fd, vector[index].iov_base, ++ retval = pwritev (fd, vector[index].iov_base, + vector[index].iov_len, internal_offset); + if (retval == -1) { + gf_log (THIS->name, GF_LOG_WARNING, +-- +1.8.3.1 + diff --git a/SOURCES/0022-build-ignore-sbindir-conf.py-for-RHEL-5.patch b/SOURCES/0022-build-ignore-sbindir-conf.py-for-RHEL-5.patch new file mode 100644 index 0000000..6d32957 --- /dev/null +++ b/SOURCES/0022-build-ignore-sbindir-conf.py-for-RHEL-5.patch @@ -0,0 +1,32 @@ +From c5b4f68e24c718dcbc5f4ebe0094dcb900ac5314 Mon Sep 17 00:00:00 2001 +From: Milind Changire +Date: Tue, 20 Sep 2016 12:43:43 +0530 +Subject: [PATCH 22/74] build: ignore %{sbindir}/conf.py* for RHEL-5 + +commit dca6f06 has introduced this file in a very wrong location +for a Python file. And rpmbuild is behaving very differently than +RHEL-6 as regards ignoring .pyc and .pyo files. + +Label: DOWNSTREAM ONLY + +Change-Id: I574a500586162917102ae8eb32b939885d2b2d4c +Signed-off-by: Milind Changire +--- + glusterfs.spec.in | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/glusterfs.spec.in b/glusterfs.spec.in +index f83ae5e..8f30020 100644 +--- a/glusterfs.spec.in ++++ b/glusterfs.spec.in +@@ -1118,6 +1118,7 @@ exit 0 + %exclude %{_sbindir}/glusterd + %exclude %{_sbindir}/snap_scheduler.py + %exclude %{_datadir}/glusterfs/scripts/stop-all-gluster-processes.sh ++%exclude %{_sbindir}/conf.py* + %if 0%{?_tmpfilesdir:1} + %exclude %{_tmpfilesdir}/gluster.conf + %endif +-- +1.8.3.1 + diff --git a/SOURCES/0023-build-randomize-temp-file-names-in-pretrans-scriptle.patch b/SOURCES/0023-build-randomize-temp-file-names-in-pretrans-scriptle.patch new file mode 100644 index 0000000..5366d84 --- /dev/null +++ b/SOURCES/0023-build-randomize-temp-file-names-in-pretrans-scriptle.patch @@ -0,0 +1,248 @@ +From fdf4475ea3598b4287803001932f426f2c58f3b1 Mon Sep 17 00:00:00 2001 +From: Milind Changire +Date: Fri, 14 Oct 2016 12:53:27 +0530 +Subject: [PATCH 23/74] build: randomize temp file names in pretrans scriptlets + +Security issue CVE-2015-1795 mentions about possibility of file name +spoof attack for the %pretrans server scriptlet. +Since %pretrans scriptlets are executed only for server builds, we can +use os.tmpname() to randomize temporary file names for all %pretrans +scriptlets using this mechanism. + +Label: DOWNSTREAM ONLY + +Change-Id: Ic82433897432794b6d311d836355aa4bad886369 +Signed-off-by: Milind Changire +Reviewed-on: https://code.engineering.redhat.com/gerrit/86187 +Reviewed-by: Siddharth Sharma +Reviewed-by: Niels de Vos +Reviewed-by: Atin Mukherjee +--- + glusterfs.spec.in | 106 ++++++++++++++++++++++++++++++++---------------------- + 1 file changed, 64 insertions(+), 42 deletions(-) + +diff --git a/glusterfs.spec.in b/glusterfs.spec.in +index 8f30020..ab61688 100644 +--- a/glusterfs.spec.in ++++ b/glusterfs.spec.in +@@ -1579,9 +1579,10 @@ if [ $? -eq 0 ]; then + fi + ]] + +--- rpm in RHEL5 does not have os.tmpname() +--- io.tmpfile() can not be resolved to a filename to pass to bash :-/ +-tmpname = "/tmp/glusterfs_pretrans_" .. os.date("%s") ++-- Since we run pretrans scripts only for RPMs built for a server build, ++-- we can now use os.tmpname() since it is available on RHEL6 and later ++-- platforms which are server platforms. ++tmpname = os.tmpname() + tmpfile = io.open(tmpname, "w") + tmpfile:write(script) + tmpfile:close() +@@ -1620,9 +1621,10 @@ if [ $? -eq 0 ]; then + fi + ]] + +--- rpm in RHEL5 does not have os.tmpname() +--- io.tmpfile() can not be resolved to a filename to pass to bash :-/ +-tmpname = "/tmp/glusterfs-api_pretrans_" .. os.date("%s") ++-- Since we run pretrans scripts only for RPMs built for a server build, ++-- we can now use os.tmpname() since it is available on RHEL6 and later ++-- platforms which are server platforms. ++tmpname = os.tmpname() + tmpfile = io.open(tmpname, "w") + tmpfile:write(script) + tmpfile:close() +@@ -1661,9 +1663,10 @@ if [ $? -eq 0 ]; then + fi + ]] + +--- rpm in RHEL5 does not have os.tmpname() +--- io.tmpfile() can not be resolved to a filename to pass to bash :-/ +-tmpname = "/tmp/glusterfs-api-devel_pretrans_" .. os.date("%s") ++-- Since we run pretrans scripts only for RPMs built for a server build, ++-- we can now use os.tmpname() since it is available on RHEL6 and later ++-- platforms which are server platforms. ++tmpname = os.tmpname() + tmpfile = io.open(tmpname, "w") + tmpfile:write(script) + tmpfile:close() +@@ -1702,9 +1705,10 @@ if [ $? -eq 0 ]; then + fi + ]] + +--- rpm in RHEL5 does not have os.tmpname() +--- io.tmpfile() can not be resolved to a filename to pass to bash :-/ +-tmpname = "/tmp/glusterfs-cli_pretrans_" .. os.date("%s") ++-- Since we run pretrans scripts only for RPMs built for a server build, ++-- we can now use os.tmpname() since it is available on RHEL6 and later ++-- platforms which are server platforms. ++tmpname = os.tmpname() + tmpfile = io.open(tmpname, "w") + tmpfile:write(script) + tmpfile:close() +@@ -1743,9 +1747,10 @@ if [ $? -eq 0 ]; then + fi + ]] + +--- rpm in RHEL5 does not have os.tmpname() +--- io.tmpfile() can not be resolved to a filename to pass to bash :-/ +-tmpname = "/tmp/glusterfs-client-xlators_pretrans_" .. os.date("%s") ++-- Since we run pretrans scripts only for RPMs built for a server build, ++-- we can now use os.tmpname() since it is available on RHEL6 and later ++-- platforms which are server platforms. ++tmpname = os.tmpname() + tmpfile = io.open(tmpname, "w") + tmpfile:write(script) + tmpfile:close() +@@ -1784,9 +1789,10 @@ if [ $? -eq 0 ]; then + fi + ]] + +--- rpm in RHEL5 does not have os.tmpname() +--- io.tmpfile() can not be resolved to a filename to pass to bash :-/ +-tmpname = "/tmp/glusterfs-devel_pretrans_" .. os.date("%s") ++-- Since we run pretrans scripts only for RPMs built for a server build, ++-- we can now use os.tmpname() since it is available on RHEL6 and later ++-- platforms which are server platforms. ++tmpname = os.tmpname() + tmpfile = io.open(tmpname, "w") + tmpfile:write(script) + tmpfile:close() +@@ -1825,9 +1831,10 @@ if [ $? -eq 0 ]; then + fi + ]] + +--- rpm in RHEL5 does not have os.tmpname() +--- io.tmpfile() can not be resolved to a filename to pass to bash :-/ +-tmpname = "/tmp/glusterfs-fuse_pretrans_" .. os.date("%s") ++-- Since we run pretrans scripts only for RPMs built for a server build, ++-- we can now use os.tmpname() since it is available on RHEL6 and later ++-- platforms which are server platforms. ++tmpname = os.tmpname() + tmpfile = io.open(tmpname, "w") + tmpfile:write(script) + tmpfile:close() +@@ -1866,9 +1873,10 @@ if [ $? -eq 0 ]; then + fi + ]] + +--- rpm in RHEL5 does not have os.tmpname() +--- io.tmpfile() can not be resolved to a filename to pass to bash :-/ +-tmpname = "/tmp/glusterfs-ganesha_pretrans_" .. os.date("%s") ++-- Since we run pretrans scripts only for RPMs built for a server build, ++-- we can now use os.tmpname() since it is available on RHEL6 and later ++-- platforms which are server platforms. ++tmpname = os.tmpname() + tmpfile = io.open(tmpname, "w") + tmpfile:write(script) + tmpfile:close() +@@ -1908,9 +1916,10 @@ if [ $? -eq 0 ]; then + fi + ]] + +--- rpm in RHEL5 does not have os.tmpname() +--- io.tmpfile() can not be resolved to a filename to pass to bash :-/ +-tmpname = "/tmp/glusterfs-geo-replication_pretrans_" .. os.date("%s") ++-- Since we run pretrans scripts only for RPMs built for a server build, ++-- we can now use os.tmpname() since it is available on RHEL6 and later ++-- platforms which are server platforms. ++tmpname = os.tmpname() + tmpfile = io.open(tmpname, "w") + tmpfile:write(script) + tmpfile:close() +@@ -1950,9 +1959,10 @@ if [ $? -eq 0 ]; then + fi + ]] + +--- rpm in RHEL5 does not have os.tmpname() +--- io.tmpfile() can not be resolved to a filename to pass to bash :-/ +-tmpname = "/tmp/glusterfs-libs_pretrans_" .. os.date("%s") ++-- Since we run pretrans scripts only for RPMs built for a server build, ++-- we can now use os.tmpname() since it is available on RHEL6 and later ++-- platforms which are server platforms. ++tmpname = os.tmpname() + tmpfile = io.open(tmpname, "w") + tmpfile:write(script) + tmpfile:close() +@@ -1991,9 +2001,10 @@ if [ $? -eq 0 ]; then + fi + ]] + +--- rpm in RHEL5 does not have os.tmpname() +--- io.tmpfile() can not be resolved to a filename to pass to bash :-/ +-tmpname = "/tmp/python-gluster_pretrans_" .. os.date("%s") ++-- Since we run pretrans scripts only for RPMs built for a server build, ++-- we can now use os.tmpname() since it is available on RHEL6 and later ++-- platforms which are server platforms. ++tmpname = os.tmpname() + tmpfile = io.open(tmpname, "w") + tmpfile:write(script) + tmpfile:close() +@@ -2033,9 +2044,10 @@ if [ $? -eq 0 ]; then + fi + ]] + +--- rpm in RHEL5 does not have os.tmpname() +--- io.tmpfile() can not be resolved to a filename to pass to bash :-/ +-tmpname = "/tmp/glusterfs-rdma_pretrans_" .. os.date("%s") ++-- Since we run pretrans scripts only for RPMs built for a server build, ++-- we can now use os.tmpname() since it is available on RHEL6 and later ++-- platforms which are server platforms. ++tmpname = os.tmpname() + tmpfile = io.open(tmpname, "w") + tmpfile:write(script) + tmpfile:close() +@@ -2076,9 +2088,10 @@ if [ $? -eq 0 ]; then + fi + ]] + +--- rpm in RHEL5 does not have os.tmpname() +--- io.tmpfile() can not be resolved to a filename to pass to bash :-/ +-tmpname = "/tmp/glusterfs-resource-agents_pretrans_" .. os.date("%s") ++-- Since we run pretrans scripts only for RPMs built for a server build, ++-- we can now use os.tmpname() since it is available on RHEL6 and later ++-- platforms which are server platforms. ++tmpname = os.tmpname() + tmpfile = io.open(tmpname, "w") + tmpfile:write(script) + tmpfile:close() +@@ -2118,9 +2131,10 @@ if [ $? -eq 0 ]; then + fi + ]] + +--- rpm in RHEL5 does not have os.tmpname() +--- io.tmpfile() can not be resolved to a filename to pass to bash :-/ +-tmpname = "/tmp/glusterfs-server_pretrans_" .. os.date("%s") ++-- Since we run pretrans scripts only for RPMs built for a server build, ++-- we can now use os.tmpname() since it is available on RHEL6 and later ++-- platforms which are server platforms. ++tmpname = os.tmpname() + tmpfile = io.open(tmpname, "w") + tmpfile:write(script) + tmpfile:close() +@@ -2211,6 +2225,13 @@ end + * Thu Nov 24 2016 Jiffin Tony Thottan + - remove S31ganesha-reset.sh from hooks (#1397795) + ++* Fri Oct 14 2016 Milind Changire ++- Changed pretrans scripts to use os.tmpname() for enhanced security ++ for server builds only (#1362044) ++ ++* Tue Sep 27 2016 Milind Changire ++- Added systemd requirement to glusterfs-server and glusterfs-events packages ++ + * Thu Sep 22 2016 Kaleb S. KEITHLEY + - python-ctypes no long exists, now in python stdlib (#1378436) + +@@ -2330,6 +2351,7 @@ end + + * Mon May 18 2015 Milind Changire + - Move file peer_add_secret_pub to the server RPM to support glusterfind (#1221544) ++ + * Sun May 17 2015 Niels de Vos + - Fix building on RHEL-5 based distributions (#1222317) + +-- +1.8.3.1 + diff --git a/SOURCES/0024-glusterd-gNFS-On-post-upgrade-to-3.2-disable-gNFS-fo.patch b/SOURCES/0024-glusterd-gNFS-On-post-upgrade-to-3.2-disable-gNFS-fo.patch new file mode 100644 index 0000000..fcc3532 --- /dev/null +++ b/SOURCES/0024-glusterd-gNFS-On-post-upgrade-to-3.2-disable-gNFS-fo.patch @@ -0,0 +1,80 @@ +From abd66a26f1a6fb998c0b6b60c3004ea8414ffee0 Mon Sep 17 00:00:00 2001 +From: Jiffin Tony Thottan +Date: Thu, 17 Nov 2016 12:44:38 +0530 +Subject: [PATCH 24/74] glusterd/gNFS : On post upgrade to 3.2, disable gNFS + for all volumes + +Currently on 3.2 gNFS is dsiabled for newly created volumes or old volumes +with default value. There will be volumes which have explicitly turn off +nfs.disable option. This change disable gNFS even for that volume as well. + +label : DOWNSTREAM ONLY + +Change-Id: I4ddeb23690271034b0bbb3fc50b359350b5eae87 +Signed-off-by: Jiffin Tony Thottan +Reviewed-on: https://code.engineering.redhat.com/gerrit/90425 +Reviewed-by: Atin Mukherjee +Tested-by: Atin Mukherjee +--- + xlators/mgmt/glusterd/src/glusterd-op-sm.c | 43 +++++++++++++++++------------- + 1 file changed, 25 insertions(+), 18 deletions(-) + +diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.c b/xlators/mgmt/glusterd/src/glusterd-op-sm.c +index 6d5b8cf..09be165 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-op-sm.c ++++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.c +@@ -2437,26 +2437,33 @@ glusterd_update_volumes_dict (glusterd_volinfo_t *volinfo) + GF_VALIDATE_OR_GOTO (this->name, conf, out); + + /* 3.9.0 onwards gNFS will be disabled by default. In case of an upgrade +- * from anything below than 3.9.0 to 3.9.x the volume's dictionary will +- * not have 'nfs.disable' key set which means the same will not be set +- * to on until explicitly done. setnfs.disable to 'on' at op-version +- * bump up flow is the ideal way here. The same is also applicable for +- * transport.address-family where if the transport type is set to tcp +- * then transport.address-family is defaulted to 'inet'. ++ * from anything below than 3.9.0 to 3.9.x, the value for nfs.disable is ++ * set to 'on' for all volumes even if it is explicitly set to 'off' in ++ * previous version. This change is only applicable to downstream code. ++ * Setting nfs.disable to 'on' at op-version bump up flow is the ideal ++ * way here. The same is also applicable for transport.address-family ++ * where if the transport type is set to tcp then transport.address-family ++ * is defaulted to 'inet'. + */ + if (conf->op_version >= GD_OP_VERSION_3_9_0) { +- if (dict_get_str_boolean (volinfo->dict, NFS_DISABLE_MAP_KEY, +- 1)) { +- ret = dict_set_dynstr_with_alloc (volinfo->dict, +- NFS_DISABLE_MAP_KEY, +- "on"); +- if (ret) { +- gf_msg (this->name, GF_LOG_ERROR, errno, +- GD_MSG_DICT_SET_FAILED, "Failed to set " +- "option ' NFS_DISABLE_MAP_KEY ' on " +- "volume %s", volinfo->volname); +- goto out; +- } ++ if (!(dict_get_str_boolean (volinfo->dict, NFS_DISABLE_MAP_KEY, ++ 0))) { ++ gf_msg (this->name, GF_LOG_INFO, 0, 0, "Gluster NFS is" ++ " being deprecated in favor of NFS-Ganesha, " ++ "hence setting nfs.disable to 'on' for volume " ++ "%s. Please re-enable it if requires", ++ volinfo->volname); ++ } ++ ++ ret = dict_set_dynstr_with_alloc (volinfo->dict, ++ NFS_DISABLE_MAP_KEY, ++ "on"); ++ if (ret) { ++ gf_msg (this->name, GF_LOG_ERROR, errno, ++ GD_MSG_DICT_SET_FAILED, "Failed to set " ++ "option ' NFS_DISABLE_MAP_KEY ' on " ++ "volume %s", volinfo->volname); ++ goto out; + } + ret = dict_get_str (volinfo->dict, "transport.address-family", + &address_family_str); +-- +1.8.3.1 + diff --git a/SOURCES/0025-build-Add-dependency-on-netstat-for-glusterfs-ganesh.patch b/SOURCES/0025-build-Add-dependency-on-netstat-for-glusterfs-ganesh.patch new file mode 100644 index 0000000..a84a39d --- /dev/null +++ b/SOURCES/0025-build-Add-dependency-on-netstat-for-glusterfs-ganesh.patch @@ -0,0 +1,58 @@ +From 867536a4ced38d72a7d980cd34bcbf0ce876206a Mon Sep 17 00:00:00 2001 +From: Soumya Koduri +Date: Fri, 18 Nov 2016 12:47:06 +0530 +Subject: [PATCH 25/74] build: Add dependency on netstat for glusterfs-ganesha + pkg + +portblock resource-agent needs netstat command but this dependency +should have been ideally added to resource-agents package. But the +fixes (bug1395594, bug1395596) are going to be available only +in the future RHEL 6.9 and RHEL 7.4 releases. Hence as an interim +workaround, we agreed to add this dependency for glusterfs-ganesha package. + +label : DOWNSTREAM ONLY + +Change-Id: I6ac1003103755d7534dd079c821bbaacd8dd94b8 +Signed-off-by: Soumya Koduri +Reviewed-on: https://code.engineering.redhat.com/gerrit/90529 +Reviewed-by: Jiffin Thottan +Reviewed-by: Milind Changire +Reviewed-by: Atin Mukherjee +--- + glusterfs.spec.in | 13 +++++++++++++ + 1 file changed, 13 insertions(+) + +diff --git a/glusterfs.spec.in b/glusterfs.spec.in +index ab61688..343e88f 100644 +--- a/glusterfs.spec.in ++++ b/glusterfs.spec.in +@@ -414,6 +414,11 @@ Requires: nfs-ganesha-gluster, pcs, dbus + %if ( 0%{?rhel} && 0%{?rhel} == 6 ) + Requires: cman, pacemaker, corosync + %endif ++%if ( 0%{?fedora} ) || ( 0%{?rhel} && 0%{?rhel} > 5 ) ++# we need portblock resource-agent in 3.9.5 and later. ++Requires: resource-agents >= 3.9.5 ++Requires: net-tools ++%endif + + %description ganesha + GlusterFS is a distributed file-system capable of scaling to several +@@ -2225,6 +2230,14 @@ end + * Thu Nov 24 2016 Jiffin Tony Thottan + - remove S31ganesha-reset.sh from hooks (#1397795) + ++* Fri Nov 18 2016 Soumya Koduri ++- As an interim fix add dependency on netstat(/net-tools) for glusterfs-ganesha package (#1395574) ++ ++* Fri Nov 11 2016 Soumya Koduri ++- Add dependency on portblock resource agent for ganesha package (#1278336) ++- Fix incorrect Requires for portblock resource agent (#1278336) ++- Update version checks for portblock resource agent on RHEL (#1278336) ++ + * Fri Oct 14 2016 Milind Changire + - Changed pretrans scripts to use os.tmpname() for enhanced security + for server builds only (#1362044) +-- +1.8.3.1 + diff --git a/SOURCES/0026-glusterd-gNFS-explicitly-set-nfs.disable-to-off-afte.patch b/SOURCES/0026-glusterd-gNFS-explicitly-set-nfs.disable-to-off-afte.patch new file mode 100644 index 0000000..bd5c2e2 --- /dev/null +++ b/SOURCES/0026-glusterd-gNFS-explicitly-set-nfs.disable-to-off-afte.patch @@ -0,0 +1,105 @@ +From 14bfa98824d40ff1f721a905f8e8ffd557f96eef Mon Sep 17 00:00:00 2001 +From: Jiffin Tony Thottan +Date: Thu, 15 Dec 2016 17:14:01 +0530 +Subject: [PATCH 26/74] glusterd/gNFS : explicitly set "nfs.disable" to "off" + after 3.2 upgrade + +Gluster NFS was enabled by default for all volumes till 3.1. But 3.2 onwards +for the new volumes it will be disabled by setting "nfs.disable" to "on". +This take patch will take care of existing volume in such a way that if the +option is not configured, it will set "nfs.disable" to "off" during op-version +bump up. + +Also this patch removes the warning message while enabling gluster NFS for +a volume. + +label : DOWNSTREAM ONLY + +Change-Id: Ib199c3180204f917791b4627c58d846750d18a5a +Signed-off-by: Jiffin Tony Thottan +Reviewed-on: https://code.engineering.redhat.com/gerrit/93146 +Reviewed-by: Soumya Koduri +Reviewed-by: Atin Mukherjee +--- + cli/src/cli-cmd-parser.c | 14 -------------- + xlators/mgmt/glusterd/src/glusterd-op-sm.c | 29 ++++++++++++----------------- + 2 files changed, 12 insertions(+), 31 deletions(-) + +diff --git a/cli/src/cli-cmd-parser.c b/cli/src/cli-cmd-parser.c +index c8ed367..ca4d906 100644 +--- a/cli/src/cli-cmd-parser.c ++++ b/cli/src/cli-cmd-parser.c +@@ -1621,20 +1621,6 @@ cli_cmd_volume_set_parse (struct cli_state *state, const char **words, + goto out; + } + } +- if ((!strcmp (key, "nfs.disable")) && +- (!strcmp (value, "off"))) { +- question = "Gluster NFS is being deprecated in favor " +- "of NFS-Ganesha Enter \"yes\" to continue " +- "using Gluster NFS"; +- answer = cli_cmd_get_confirmation (state, question); +- if (GF_ANSWER_NO == answer) { +- gf_log ("cli", GF_LOG_ERROR, "Operation " +- "cancelled, exiting"); +- *op_errstr = gf_strdup ("Aborted by user."); +- ret = -1; +- goto out; +- } +- } + } + + ret = dict_set_int32 (dict, "count", wordcount-3); +diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.c b/xlators/mgmt/glusterd/src/glusterd-op-sm.c +index 09be165..0557ad8 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-op-sm.c ++++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.c +@@ -2438,9 +2438,9 @@ glusterd_update_volumes_dict (glusterd_volinfo_t *volinfo) + + /* 3.9.0 onwards gNFS will be disabled by default. In case of an upgrade + * from anything below than 3.9.0 to 3.9.x, the value for nfs.disable is +- * set to 'on' for all volumes even if it is explicitly set to 'off' in ++ * set to 'off' for all volumes even if it is not explicitly set in the + * previous version. This change is only applicable to downstream code. +- * Setting nfs.disable to 'on' at op-version bump up flow is the ideal ++ * Setting nfs.disable to 'off' at op-version bump up flow is the ideal + * way here. The same is also applicable for transport.address-family + * where if the transport type is set to tcp then transport.address-family + * is defaulted to 'inet'. +@@ -2448,23 +2448,18 @@ glusterd_update_volumes_dict (glusterd_volinfo_t *volinfo) + if (conf->op_version >= GD_OP_VERSION_3_9_0) { + if (!(dict_get_str_boolean (volinfo->dict, NFS_DISABLE_MAP_KEY, + 0))) { +- gf_msg (this->name, GF_LOG_INFO, 0, 0, "Gluster NFS is" +- " being deprecated in favor of NFS-Ganesha, " +- "hence setting nfs.disable to 'on' for volume " +- "%s. Please re-enable it if requires", +- volinfo->volname); ++ ret = dict_set_dynstr_with_alloc (volinfo->dict, ++ NFS_DISABLE_MAP_KEY, ++ "off"); ++ if (ret) { ++ gf_msg (this->name, GF_LOG_ERROR, errno, ++ GD_MSG_DICT_SET_FAILED, "Failed to turn " ++ "off ' NFS_DISABLE_MAP_KEY ' option for " ++ "volume %s", volinfo->volname); ++ goto out; ++ } + } + +- ret = dict_set_dynstr_with_alloc (volinfo->dict, +- NFS_DISABLE_MAP_KEY, +- "on"); +- if (ret) { +- gf_msg (this->name, GF_LOG_ERROR, errno, +- GD_MSG_DICT_SET_FAILED, "Failed to set " +- "option ' NFS_DISABLE_MAP_KEY ' on " +- "volume %s", volinfo->volname); +- goto out; +- } + ret = dict_get_str (volinfo->dict, "transport.address-family", + &address_family_str); + if (ret) { +-- +1.8.3.1 + diff --git a/SOURCES/0027-glusterd-spawn-nfs-daemon-in-op-version-bump-if-nfs..patch b/SOURCES/0027-glusterd-spawn-nfs-daemon-in-op-version-bump-if-nfs..patch new file mode 100644 index 0000000..ae4bab5 --- /dev/null +++ b/SOURCES/0027-glusterd-spawn-nfs-daemon-in-op-version-bump-if-nfs..patch @@ -0,0 +1,132 @@ +From 52798b6934ea584b25b1ade64cb52a7439c1b113 Mon Sep 17 00:00:00 2001 +From: Atin Mukherjee +Date: Tue, 3 Jan 2017 18:13:29 +0530 +Subject: [PATCH 27/74] glusterd: spawn nfs daemon in op-version bump if + nfs.disable key is absent + +3.2.0 onwards gNFS will be disabled by default. However any cluster +upgraded to 3.2.0 with existing volumes exposed over gNFS should +continue to have gNFS access and hence post upgrade gNFS service should +come up after bumping up the op-version. Although the key nfs.disable +was handled and managed correctly in the upgrade path but gNFS daemon +was never spawned in this case. + +Fix is to spawn gNFS daemon in op-version bump up code path if +nfs.disable option is not set. + +Label : DOWNSTREAM ONLY + +Change-Id: Icac6f3653160f79b271f25f5df0c89690917e702 +Signed-off-by: Atin Mukherjee +Reviewed-on: https://code.engineering.redhat.com/gerrit/94006 +Reviewed-by: Jiffin Thottan +Reviewed-by: Samikshan Bairagya +--- + xlators/mgmt/glusterd/src/glusterd-messages.h | 8 ++++++ + xlators/mgmt/glusterd/src/glusterd-op-sm.c | 35 ++++++++++++++++++++++++--- + 2 files changed, 40 insertions(+), 3 deletions(-) + +diff --git a/xlators/mgmt/glusterd/src/glusterd-messages.h b/xlators/mgmt/glusterd/src/glusterd-messages.h +index 65d4353..8bb4c43 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-messages.h ++++ b/xlators/mgmt/glusterd/src/glusterd-messages.h +@@ -4937,6 +4937,14 @@ + */ + #define GD_MSG_GARBAGE_ARGS (GLUSTERD_COMP_BASE + 611) + ++/*! ++ * @messageid ++ * @diagnosis ++ * @recommendedaction ++ * ++ */ ++#define GD_MSG_SVC_START_FAIL (GLUSTERD_COMP_BASE + 590) ++ + /*------------*/ + + #define glfs_msg_end_x GLFS_MSGID_END, "Invalid: End of messages" +diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.c b/xlators/mgmt/glusterd/src/glusterd-op-sm.c +index 0557ad8..4fc719a 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-op-sm.c ++++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.c +@@ -2423,7 +2423,8 @@ out: + } + + static int +-glusterd_update_volumes_dict (glusterd_volinfo_t *volinfo) ++glusterd_update_volumes_dict (glusterd_volinfo_t *volinfo, ++ gf_boolean_t *start_nfs_svc) + { + int ret = -1; + xlator_t *this = NULL; +@@ -2436,6 +2437,8 @@ glusterd_update_volumes_dict (glusterd_volinfo_t *volinfo) + conf = this->private; + GF_VALIDATE_OR_GOTO (this->name, conf, out); + ++ ret = 0; ++ + /* 3.9.0 onwards gNFS will be disabled by default. In case of an upgrade + * from anything below than 3.9.0 to 3.9.x, the value for nfs.disable is + * set to 'off' for all volumes even if it is not explicitly set in the +@@ -2458,6 +2461,12 @@ glusterd_update_volumes_dict (glusterd_volinfo_t *volinfo) + "volume %s", volinfo->volname); + goto out; + } ++ /* If the volume is started then mark start_nfs_svc to ++ * true such that nfs daemon can be spawned up ++ */ ++ if (GLUSTERD_STATUS_STARTED == volinfo->status) ++ *start_nfs_svc = _gf_true; ++ + } + + ret = dict_get_str (volinfo->dict, "transport.address-family", +@@ -2478,9 +2487,12 @@ glusterd_update_volumes_dict (glusterd_volinfo_t *volinfo) + } + } + } ++ ret = glusterd_store_volinfo (volinfo, ++ GLUSTERD_VOLINFO_VER_AC_INCREMENT); ++ if (ret) ++ goto out; ++ + } +- ret = glusterd_store_volinfo (volinfo, +- GLUSTERD_VOLINFO_VER_AC_INCREMENT); + + out: + return ret; +@@ -2529,6 +2541,7 @@ glusterd_op_set_all_volume_options (xlator_t *this, dict_t *dict, + uint32_t op_version = 0; + glusterd_volinfo_t *volinfo = NULL; + glusterd_svc_t *svc = NULL; ++ gf_boolean_t start_nfs_svc = _gf_false; + + conf = this->private; + ret = dict_get_str (dict, "key1", &key); +@@ -2645,6 +2658,22 @@ glusterd_op_set_all_volume_options (xlator_t *this, dict_t *dict, + "Failed to store op-version."); + } + } ++ cds_list_for_each_entry (volinfo, &conf->volumes, vol_list) { ++ ret = glusterd_update_volumes_dict (volinfo, ++ &start_nfs_svc); ++ if (ret) ++ goto out; ++ } ++ if (start_nfs_svc) { ++ ret = conf->nfs_svc.manager (&(conf->nfs_svc), NULL, ++ PROC_START_NO_WAIT); ++ if (ret) { ++ gf_msg (this->name, GF_LOG_ERROR, 0, ++ GD_MSG_SVC_START_FAIL, ++ "unable to start nfs service"); ++ goto out; ++ } ++ } + /* No need to save cluster.op-version in conf->opts + */ + goto out; +-- +1.8.3.1 + diff --git a/SOURCES/0028-glusterd-parallel-readdir-Change-the-op-version-of-p.patch b/SOURCES/0028-glusterd-parallel-readdir-Change-the-op-version-of-p.patch new file mode 100644 index 0000000..a40db89 --- /dev/null +++ b/SOURCES/0028-glusterd-parallel-readdir-Change-the-op-version-of-p.patch @@ -0,0 +1,42 @@ +From 91489431c48f6fa9bce3ee6f377bc9702602b18d Mon Sep 17 00:00:00 2001 +From: Poornima G +Date: Wed, 26 Apr 2017 14:07:58 +0530 +Subject: [PATCH 28/74] glusterd, parallel-readdir: Change the op-version of + parallel-readdir to 31100 + +Issue: Downstream 3.2 was released with op-version 31001, parallel-readdir +feature in upstream was released in 3.10 and hence with op-version 31000. +With this, parallel-readdir will be allowed in 3.2 cluster/clients as well. +But 3.2 didn't have parallel-readdir feature backported. + +Fix: +Increase the op-version of parallel-readdir feature only in downstream +to 31100(3.3 highest op-version) + +Label: DOWNSTREAM ONLY + +Change-Id: I2640520985627f3a1cb4fb96e28350f8bb9b146c +Signed-off-by: Poornima G +Reviewed-on: https://code.engineering.redhat.com/gerrit/104403 +Reviewed-by: Atin Mukherjee +Tested-by: Atin Mukherjee +--- + xlators/mgmt/glusterd/src/glusterd-volume-set.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/xlators/mgmt/glusterd/src/glusterd-volume-set.c b/xlators/mgmt/glusterd/src/glusterd-volume-set.c +index 93ef85c..9729767 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-volume-set.c ++++ b/xlators/mgmt/glusterd/src/glusterd-volume-set.c +@@ -3376,7 +3376,7 @@ struct volopt_map_entry glusterd_volopt_map[] = { + .option = "parallel-readdir", + .value = "off", + .type = DOC, +- .op_version = GD_OP_VERSION_3_10_0, ++ .op_version = GD_OP_VERSION_3_11_0, + .validate_fn = validate_parallel_readdir, + .description = "If this option is enabled, the readdir operation " + "is performed in parallel on all the bricks, thus " +-- +1.8.3.1 + diff --git a/SOURCES/0029-build-exclude-glusterfssharedstorage.service-and-mou.patch b/SOURCES/0029-build-exclude-glusterfssharedstorage.service-and-mou.patch new file mode 100644 index 0000000..366df42 --- /dev/null +++ b/SOURCES/0029-build-exclude-glusterfssharedstorage.service-and-mou.patch @@ -0,0 +1,68 @@ +From 7562ffbce9d768d5af9d23361cf6dd6ef992bead Mon Sep 17 00:00:00 2001 +From: Jiffin Tony Thottan +Date: Fri, 10 Nov 2017 23:38:14 +0530 +Subject: [PATCH 29/74] build: exclude glusterfssharedstorage.service and + mount-shared-storage.sh from client builds + +Label: DOWNSTREAM ONLY + +Change-Id: I7d76ba0742b5c6a44505eb883eacda0c91efbe51 +Signed-off-by: Jiffin Tony Thottan +Reviewed-on: https://code.engineering.redhat.com/gerrit/109684 +Reviewed-by: Milind Changire +Tested-by: Milind Changire +Reviewed-by: Atin Mukherjee +--- + glusterfs.spec.in | 22 +++++++++++++++++++++- + 1 file changed, 21 insertions(+), 1 deletion(-) + +diff --git a/glusterfs.spec.in b/glusterfs.spec.in +index 343e88f..4596e3f 100644 +--- a/glusterfs.spec.in ++++ b/glusterfs.spec.in +@@ -1083,6 +1083,20 @@ exit 0 + %exclude %{_libexecdir}/glusterfs/glusterfind + %exclude %{_bindir}/glusterfind + %exclude %{_libexecdir}/glusterfs/peer_add_secret_pub ++# exclude eventsapi files ++%exclude %{_sysconfdir}/glusterfs/eventsconfig.json ++%exclude %{_sharedstatedir}/glusterd/events ++%exclude %{_libexecdir}/glusterfs/events ++%exclude %{_libexecdir}/glusterfs/peer_eventsapi.py* ++%exclude %{_sbindir}/glustereventsd ++%exclude %{_sbindir}/gluster-eventsapi ++%exclude %{_datadir}/glusterfs/scripts/eventsdash.py* ++%if ( 0%{?_with_systemd:1} ) ++%exclude %{_unitdir}/glustereventsd.service ++%exclude %_init_glusterfssharedstorage ++%else ++%exclude %{_sysconfdir}/init.d/glustereventsd ++%endif + # exclude server files + %exclude %{_sharedstatedir}/glusterd/* + %exclude %{_sysconfdir}/glusterfs +@@ -1123,6 +1137,9 @@ exit 0 + %exclude %{_sbindir}/glusterd + %exclude %{_sbindir}/snap_scheduler.py + %exclude %{_datadir}/glusterfs/scripts/stop-all-gluster-processes.sh ++%if ( 0%{?_with_systemd:1} ) ++%exclude %{_libexecdir}/glusterfs/mount-shared-storage.sh ++%endif + %exclude %{_sbindir}/conf.py* + %if 0%{?_tmpfilesdir:1} + %exclude %{_tmpfilesdir}/gluster.conf +@@ -2181,7 +2198,10 @@ end + * Thu Jul 13 2017 Kaleb S. KEITHLEY + - various directories not owned by any package + +-* Fri Jun 16 2017 Jiffin Tony Thottan ++* Wed Jun 21 2017 Jiffin Tony Thottan ++- Exclude glusterfssharedstorage.service and mount-shared-storage.sh from client builds ++ ++* Tue Jun 20 2017 Jiffin Tony Thottan + - Add glusterfssharedstorage.service systemd file + + * Fri Jun 9 2017 Poornima G +-- +1.8.3.1 + diff --git a/SOURCES/0030-build-make-gf_attach-available-in-glusterfs-server.patch b/SOURCES/0030-build-make-gf_attach-available-in-glusterfs-server.patch new file mode 100644 index 0000000..aa6fd1d --- /dev/null +++ b/SOURCES/0030-build-make-gf_attach-available-in-glusterfs-server.patch @@ -0,0 +1,50 @@ +From 8279b8c5f23cddd1b7db59c56ed2d8896ac49aa7 Mon Sep 17 00:00:00 2001 +From: Milind Changire +Date: Tue, 4 Jul 2017 17:10:27 +0530 +Subject: [PATCH 30/74] build: make gf_attach available in glusterfs-server + +Problem: +gf_attach was erroneously packaged in glusterfs-fuse + +Solution: +move gf_attach listing to server package +add gf_attach to the exclude listing for client builds + +Label: DOWNSTREAM ONLY + +Change-Id: I0de45700badcbab65febf2385f1ac074c44cfa7c +Signed-off-by: Milind Changire +Reviewed-on: https://code.engineering.redhat.com/gerrit/111001 +Reviewed-by: Atin Mukherjee +--- + glusterfs.spec.in | 7 +++++++ + 1 file changed, 7 insertions(+) + +diff --git a/glusterfs.spec.in b/glusterfs.spec.in +index 4596e3f..600fa6e 100644 +--- a/glusterfs.spec.in ++++ b/glusterfs.spec.in +@@ -1135,6 +1135,7 @@ exit 0 + %exclude %{_sbindir}/gcron.py + %exclude %{_sbindir}/glfsheal + %exclude %{_sbindir}/glusterd ++%exclude %{_sbindir}/gf_attach + %exclude %{_sbindir}/snap_scheduler.py + %exclude %{_datadir}/glusterfs/scripts/stop-all-gluster-processes.sh + %if ( 0%{?_with_systemd:1} ) +@@ -2198,6 +2199,12 @@ end + * Thu Jul 13 2017 Kaleb S. KEITHLEY + - various directories not owned by any package + ++* Tue Jul 04 2017 Milind Changire ++- moved %{_sbindir}/gf_attach from glusterfs-fuse to glusterfs-server ++ ++* Fri Jun 23 2017 Kaleb S. KEITHLEY ++- DOWNSTREAM ONLY remove Requires: selinux-policy for puddle generation ++ + * Wed Jun 21 2017 Jiffin Tony Thottan + - Exclude glusterfssharedstorage.service and mount-shared-storage.sh from client builds + +-- +1.8.3.1 + diff --git a/SOURCES/0031-glusterd-Revert-op-version-for-cluster.max-brick-per.patch b/SOURCES/0031-glusterd-Revert-op-version-for-cluster.max-brick-per.patch new file mode 100644 index 0000000..6ee34e5 --- /dev/null +++ b/SOURCES/0031-glusterd-Revert-op-version-for-cluster.max-brick-per.patch @@ -0,0 +1,37 @@ +From e1f21c716b9a9f245e8ad2c679fb12fd86c8655e Mon Sep 17 00:00:00 2001 +From: Samikshan Bairagya +Date: Mon, 10 Jul 2017 11:54:52 +0530 +Subject: [PATCH 31/74] glusterd: Revert op-version for + "cluster.max-brick-per-process" + +The op-version for the "cluster.max-brick-per-process" option was +set to 3.12.0 in the upstream patch and was backported here: +https://code.engineering.redhat.com/gerrit/#/c/111799. This commit +reverts the op-version for this option to 3.11.1 instead. + +Label: DOWNSTREAM ONLY + +Change-Id: I23639cef43d41915eea0394d019b1e0796a99d7b +Signed-off-by: Samikshan Bairagya +Reviewed-on: https://code.engineering.redhat.com/gerrit/111804 +Reviewed-by: Atin Mukherjee +--- + xlators/mgmt/glusterd/src/glusterd-volume-set.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/xlators/mgmt/glusterd/src/glusterd-volume-set.c b/xlators/mgmt/glusterd/src/glusterd-volume-set.c +index 9729767..2210b82 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-volume-set.c ++++ b/xlators/mgmt/glusterd/src/glusterd-volume-set.c +@@ -3449,7 +3449,7 @@ struct volopt_map_entry glusterd_volopt_map[] = { + { .key = GLUSTERD_BRICKMUX_LIMIT_KEY, + .voltype = "mgmt/glusterd", + .value = "0", +- .op_version = GD_OP_VERSION_3_12_0, ++ .op_version = GD_OP_VERSION_3_11_1, + .validate_fn = validate_mux_limit, + .type = GLOBAL_DOC, + .description = "This option can be used to limit the number of brick " +-- +1.8.3.1 + diff --git a/SOURCES/0032-cli-Add-message-for-user-before-modifying-brick-mult.patch b/SOURCES/0032-cli-Add-message-for-user-before-modifying-brick-mult.patch new file mode 100644 index 0000000..a4faa77 --- /dev/null +++ b/SOURCES/0032-cli-Add-message-for-user-before-modifying-brick-mult.patch @@ -0,0 +1,56 @@ +From 472aebd90fb081db85b00491ce7034a9b971f4e1 Mon Sep 17 00:00:00 2001 +From: Samikshan Bairagya +Date: Wed, 9 Aug 2017 14:32:59 +0530 +Subject: [PATCH 32/74] cli: Add message for user before modifying + brick-multiplex option + +Users should ne notified that brick-multiplexing feature is +supported only for container workloads (CNS/CRS). It should also be +made known to users that it is advisable to either have all volumes +in stopped state or have no bricks running before modifying the +"brick-multiplex" option. This commit makes sure these messages +are displayed to the user before brick-multiplexing is enabled or +disabled. + +Label: DOWNSTREAM ONLY + +Change-Id: Ic40294b26c691ea03185c4d1fce840ef23f95718 +Signed-off-by: Samikshan Bairagya +Reviewed-on: https://code.engineering.redhat.com/gerrit/114793 +Reviewed-by: Atin Mukherjee +--- + cli/src/cli-cmd-parser.c | 18 ++++++++++++++++++ + 1 file changed, 18 insertions(+) + +diff --git a/cli/src/cli-cmd-parser.c b/cli/src/cli-cmd-parser.c +index ca4d906..216e050 100644 +--- a/cli/src/cli-cmd-parser.c ++++ b/cli/src/cli-cmd-parser.c +@@ -1621,6 +1621,24 @@ cli_cmd_volume_set_parse (struct cli_state *state, const char **words, + goto out; + } + } ++ ++ if ((strcmp (key, "cluster.brick-multiplex") == 0)) { ++ question = "Brick-multiplexing is supported only for " ++ "container workloads (CNS/CRS). Also it is " ++ "advised to make sure that either all " ++ "volumes are in stopped state or no bricks " ++ "are running before this option is modified." ++ "Do you still want to continue?"; ++ ++ answer = cli_cmd_get_confirmation (state, question); ++ if (GF_ANSWER_NO == answer) { ++ gf_log ("cli", GF_LOG_ERROR, "Operation " ++ "cancelled, exiting"); ++ *op_errstr = gf_strdup ("Aborted by user."); ++ ret = -1; ++ goto out; ++ } ++ } + } + + ret = dict_set_int32 (dict, "count", wordcount-3); +-- +1.8.3.1 + diff --git a/SOURCES/0033-build-launch-glusterd-upgrade-after-all-new-bits-are.patch b/SOURCES/0033-build-launch-glusterd-upgrade-after-all-new-bits-are.patch new file mode 100644 index 0000000..700f1d7 --- /dev/null +++ b/SOURCES/0033-build-launch-glusterd-upgrade-after-all-new-bits-are.patch @@ -0,0 +1,114 @@ +From 1ce0b65090c888b0e2b28cab03731674f4988aeb Mon Sep 17 00:00:00 2001 +From: Milind Changire +Date: Tue, 10 Oct 2017 09:58:24 +0530 +Subject: [PATCH 33/74] build: launch glusterd upgrade after all new bits are + installed + +Problem: +glusterd upgrade mode needs new bits from glusterfs-rdma which +optional and causes the dependency graph to break since it is +not tied into glusterfs-server requirements + +Solution: +Run glusterd upgrade mode after all new bits are installed +i.e. in %posttrans server section + +Label: DOWNSTREAM ONLY + +Change-Id: I356e02d0bf0eaaef43c20ce07b388262f63093a4 +Signed-off-by: Milind Changire +Reviewed-on: https://code.engineering.redhat.com/gerrit/120094 +Reviewed-by: Atin Mukherjee +Tested-by: RHGS Build Bot +Reviewed-by: Raghavendra Talur +--- + glusterfs.spec.in | 56 ++++++++++++++++++++++++++++++++++--------------------- + 1 file changed, 35 insertions(+), 21 deletions(-) + +diff --git a/glusterfs.spec.in b/glusterfs.spec.in +index 600fa6e..f4386de 100644 +--- a/glusterfs.spec.in ++++ b/glusterfs.spec.in +@@ -963,27 +963,6 @@ fi + %firewalld_reload + %endif + +-pidof -c -o %PPID -x glusterd &> /dev/null +-if [ $? -eq 0 ]; then +- kill -9 `pgrep -f gsyncd.py` &> /dev/null +- +- killall --wait glusterd &> /dev/null +- glusterd --xlator-option *.upgrade=on -N +- +- #Cleaning leftover glusterd socket file which is created by glusterd in +- #rpm_script_t context. +- rm -f %{_rundir}/glusterd.socket +- +- # glusterd _was_ running, we killed it, it exited after *.upgrade=on, +- # so start it again +- %_init_start glusterd +-else +- glusterd --xlator-option *.upgrade=on -N +- +- #Cleaning leftover glusterd socket file which is created by glusterd in +- #rpm_script_t context. +- rm -f %{_rundir}/glusterd.socket +-fi + %endif + + ##----------------------------------------------------------------------------- +@@ -2166,6 +2145,35 @@ os.remove(tmpname) + if not (ok == 0) then + error("Detected running glusterfs processes", ok) + end ++ ++%posttrans server ++pidof -c -o %PPID -x glusterd &> /dev/null ++if [ $? -eq 0 ]; then ++ kill -9 `pgrep -f gsyncd.py` &> /dev/null ++ ++ killall --wait -SIGTERM glusterd &> /dev/null ++ ++ if [ "$?" != "0" ]; then ++ echo "killall failed while killing glusterd" ++ fi ++ ++ glusterd --xlator-option *.upgrade=on -N ++ ++ #Cleaning leftover glusterd socket file which is created by glusterd in ++ #rpm_script_t context. ++ rm -rf /var/run/glusterd.socket ++ ++ # glusterd _was_ running, we killed it, it exited after *.upgrade=on, ++ # so start it again ++ %_init_start glusterd ++else ++ glusterd --xlator-option *.upgrade=on -N ++ ++ #Cleaning leftover glusterd socket file which is created by glusterd in ++ #rpm_script_t context. ++ rm -rf /var/run/glusterd.socket ++fi ++ + %endif + + # Events +@@ -2190,9 +2198,15 @@ end + %endif + + %changelog ++* Tue Oct 10 2017 Milind Changire ++- DOWNSTREAM ONLY patch - launch glusterd in upgrade mode after all new bits have been installed ++ + * Tue Aug 22 2017 Kaleb S. KEITHLEY + - libibverbs-devel, librdmacm-devel -> rdma-core-devel #1483996 + ++* Fri Aug 04 2017 Kaleb S. KEITHLEY ++- /var/lib/glusterd/options made config(noreplace) to avoid losing shared state info ++ + * Thu Jul 20 2017 Aravinda VK + - Added new tool/binary to set the gfid2path xattr on files + +-- +1.8.3.1 + diff --git a/SOURCES/0034-build-remove-pretrans-script-for-python-gluster.patch b/SOURCES/0034-build-remove-pretrans-script-for-python-gluster.patch new file mode 100644 index 0000000..8f0aa31 --- /dev/null +++ b/SOURCES/0034-build-remove-pretrans-script-for-python-gluster.patch @@ -0,0 +1,76 @@ +From 58e52a8862aff553a883ee8b554f38baa2bda9a6 Mon Sep 17 00:00:00 2001 +From: Milind Changire +Date: Tue, 7 Nov 2017 18:32:59 +0530 +Subject: [PATCH 34/74] build: remove pretrans script for python-gluster + +Remove pretrans scriptlet for python-gluster. + +Label: DOWNSTREAM ONLY + +Change-Id: Iee006354c596aedbd70438a3bdd583de28837190 +Signed-off-by: Milind Changire +Reviewed-on: https://code.engineering.redhat.com/gerrit/122556 +Reviewed-by: Prashanth Pai +Reviewed-by: Aravinda Vishwanathapura Krishna Murthy +Reviewed-by: Atin Mukherjee +Tested-by: RHGS Build Bot +--- + glusterfs.spec.in | 42 ------------------------------------------ + 1 file changed, 42 deletions(-) + +diff --git a/glusterfs.spec.in b/glusterfs.spec.in +index f4386de..8c16477 100644 +--- a/glusterfs.spec.in ++++ b/glusterfs.spec.in +@@ -1976,48 +1976,6 @@ end + + + +-%pretrans -n python-gluster -p +-if not posix.access("/bin/bash", "x") then +- -- initial installation, no shell, no running glusterfsd +- return 0 +-end +- +--- TODO: move this completely to a lua script +--- For now, we write a temporary bash script and execute that. +- +-script = [[#!/bin/sh +-pidof -c -o %PPID -x glusterfsd &>/dev/null +- +-if [ $? -eq 0 ]; then +- pushd . > /dev/null 2>&1 +- for volume in /var/lib/glusterd/vols/*; do cd $volume; +- vol_type=`grep '^type=' info | awk -F'=' '{print $2}'` +- volume_started=`grep '^status=' info | awk -F'=' '{print $2}'` +- if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then +- exit 1; +- fi +- done +- +- popd > /dev/null 2>&1 +- exit 1; +-fi +-]] +- +--- Since we run pretrans scripts only for RPMs built for a server build, +--- we can now use os.tmpname() since it is available on RHEL6 and later +--- platforms which are server platforms. +-tmpname = os.tmpname() +-tmpfile = io.open(tmpname, "w") +-tmpfile:write(script) +-tmpfile:close() +-ok, how, val = os.execute("/bin/bash " .. tmpname) +-os.remove(tmpname) +-if not (ok == 0) then +- error("Detected running glusterfs processes", ok) +-end +- +- +- + %if ( 0%{!?_without_rdma:1} ) + %pretrans rdma -p + if not posix.access("/bin/bash", "x") then +-- +1.8.3.1 + diff --git a/SOURCES/0035-glusterd-regenerate-volfiles-on-op-version-bump-up.patch b/SOURCES/0035-glusterd-regenerate-volfiles-on-op-version-bump-up.patch new file mode 100644 index 0000000..63d2dfe --- /dev/null +++ b/SOURCES/0035-glusterd-regenerate-volfiles-on-op-version-bump-up.patch @@ -0,0 +1,99 @@ +From 88ed6bd3e752a028b5372aa948a191fa49377459 Mon Sep 17 00:00:00 2001 +From: Atin Mukherjee +Date: Fri, 10 Nov 2017 19:17:27 +0530 +Subject: [PATCH 35/74] glusterd: regenerate volfiles on op-version bump up + +Please note that LOC of downstream patch differs because of a +downstream only fix https://code.engineering.redhat.com/gerrit/94006 + +Label: DOWNSTREAM ONLY + +>Reviewed-on: https://review.gluster.org/16455 +>NetBSD-regression: NetBSD Build System +>Smoke: Gluster Build System +>CentOS-regression: Gluster Build System +>Reviewed-by: Prashanth Pai +>Reviewed-by: Kaushal M + +Change-Id: I2fe7a3ebea19492d52253ad5a1fdd67ac95c71c8 +Signed-off-by: Atin Mukherjee +Reviewed-on: https://code.engineering.redhat.com/gerrit/96368 +Reviewed-by: Samikshan Bairagya +Reviewed-by: Prashanth Pai +--- + xlators/mgmt/glusterd/src/glusterd-op-sm.c | 38 ++++++++++-------------------- + 1 file changed, 13 insertions(+), 25 deletions(-) + +diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.c b/xlators/mgmt/glusterd/src/glusterd-op-sm.c +index 4fc719a..96e0860 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-op-sm.c ++++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.c +@@ -2612,7 +2612,8 @@ glusterd_op_set_all_volume_options (xlator_t *this, dict_t *dict, + NULL); + if (ret) + goto out; +- ret = glusterd_update_volumes_dict (volinfo); ++ ret = glusterd_update_volumes_dict ++ (volinfo, &start_nfs_svc); + if (ret) + goto out; + if (!volinfo->is_snap_volume) { +@@ -2622,14 +2623,6 @@ glusterd_op_set_all_volume_options (xlator_t *this, dict_t *dict, + if (ret) + goto out; + } +- +- if (volinfo->type == GF_CLUSTER_TYPE_TIER) { +- svc = &(volinfo->tierd.svc); +- ret = svc->reconfigure (volinfo); +- if (ret) +- goto out; +- } +- + ret = glusterd_create_volfiles_and_notify_services (volinfo); + if (ret) { + gf_msg (this->name, GF_LOG_ERROR, 0, +@@ -2651,6 +2644,17 @@ glusterd_op_set_all_volume_options (xlator_t *this, dict_t *dict, + } + } + } ++ if (start_nfs_svc) { ++ ret = conf->nfs_svc.manager (&(conf->nfs_svc), ++ NULL, ++ PROC_START_NO_WAIT); ++ if (ret) { ++ gf_msg (this->name, GF_LOG_ERROR, 0, ++ GD_MSG_SVC_START_FAIL, ++ "unable to start nfs service"); ++ goto out; ++ } ++ } + ret = glusterd_store_global_info (this); + if (ret) { + gf_msg (this->name, GF_LOG_ERROR, 0, +@@ -2658,22 +2662,6 @@ glusterd_op_set_all_volume_options (xlator_t *this, dict_t *dict, + "Failed to store op-version."); + } + } +- cds_list_for_each_entry (volinfo, &conf->volumes, vol_list) { +- ret = glusterd_update_volumes_dict (volinfo, +- &start_nfs_svc); +- if (ret) +- goto out; +- } +- if (start_nfs_svc) { +- ret = conf->nfs_svc.manager (&(conf->nfs_svc), NULL, +- PROC_START_NO_WAIT); +- if (ret) { +- gf_msg (this->name, GF_LOG_ERROR, 0, +- GD_MSG_SVC_START_FAIL, +- "unable to start nfs service"); +- goto out; +- } +- } + /* No need to save cluster.op-version in conf->opts + */ + goto out; +-- +1.8.3.1 + diff --git a/SOURCES/0036-mount-fuse-Fix-parsing-of-vol_id-for-snapshot-volume.patch b/SOURCES/0036-mount-fuse-Fix-parsing-of-vol_id-for-snapshot-volume.patch new file mode 100644 index 0000000..5a50372 --- /dev/null +++ b/SOURCES/0036-mount-fuse-Fix-parsing-of-vol_id-for-snapshot-volume.patch @@ -0,0 +1,50 @@ +From b5f16e56bd1a9e64fa461f22f24790992fd2c008 Mon Sep 17 00:00:00 2001 +From: Mohammed Rafi KC +Date: Thu, 12 Oct 2017 14:31:14 +0530 +Subject: [PATCH 36/74] mount/fuse : Fix parsing of vol_id for snapshot volume + +For supporting sub-dir mount, we changed the volid. Which means anything +after a '/' in volume_id will be considered as sub-dir path. + +But snapshot volume has vol_id stracture of /snaps// +which has to be considered as during the parsing. + +Note 1: sub-dir mount is not supported on snapshot volume +Note 2: With sub-dir mount changes brick based mount for quota cannot be + executed via mount command. It has to be a direct call via glusterfs + +Backport of> +>Change-Id: I0d824de0236b803db8a918f683dabb0cb523cb04 +>BUG: 1501235 +>Signed-off-by: Mohammed Rafi KC +>Upstream patch : https://review.gluster.org/18506 + +Change-Id: I82903bdd0bfcf8454faef958b38f13d4d95a2346 +Signed-off-by: Mohammed Rafi KC +Reviewed-on: https://code.engineering.redhat.com/gerrit/120524 +Tested-by: RHGS Build Bot +Reviewed-by: Atin Mukherjee +--- + xlators/mount/fuse/utils/mount.glusterfs.in | 6 ++++-- + 1 file changed, 4 insertions(+), 2 deletions(-) + +diff --git a/xlators/mount/fuse/utils/mount.glusterfs.in b/xlators/mount/fuse/utils/mount.glusterfs.in +index bd6503a..36b60ff 100755 +--- a/xlators/mount/fuse/utils/mount.glusterfs.in ++++ b/xlators/mount/fuse/utils/mount.glusterfs.in +@@ -675,8 +675,10 @@ main () + [ ${first_char} = '/' ] && { + volume_str_temp=$(echo "$volume_str" | cut -c 2-) + } +- [ $(echo $volume_str_temp | grep -c "/") -eq 1 ] && { +- volume_id=$(echo "$volume_str_temp" | cut -f1 -d '/'); ++ volume_id_temp=$(echo "$volume_str_temp" | cut -f1 -d '/'); ++ [ $(echo $volume_str_temp | grep -c "/") -eq 1 ] && ++ [ "$volume_id_temp" != "snaps" ] && { ++ volume_id=$volume_id_temp; + subdir_mount=$(echo "$volume_str_temp" | cut -f2- -d '/'); + } + } +-- +1.8.3.1 + diff --git a/SOURCES/0037-protocol-auth-use-the-proper-validation-method.patch b/SOURCES/0037-protocol-auth-use-the-proper-validation-method.patch new file mode 100644 index 0000000..d91733f --- /dev/null +++ b/SOURCES/0037-protocol-auth-use-the-proper-validation-method.patch @@ -0,0 +1,141 @@ +From 6d6e3a4100fcb9333d82618d64e96e49ddddcbf4 Mon Sep 17 00:00:00 2001 +From: Amar Tumballi +Date: Mon, 16 Oct 2017 11:44:59 +0530 +Subject: [PATCH 37/74] protocol-auth: use the proper validation method + +Currently, server protocol's init and glusterd's option +validation methods are different, causing an issue. They +should be same for having consistent behavior + +> Upstream: +> Change-Id: Ibbf9a18c7192b2d77f9b7675ae7da9b8d2fe5de4 +> URL: https://review.gluster.org/#/c/18489/ + +Change-Id: Id595a1032b14233ca8f31d20813dca98476b2468 +Signed-off-by: Amar Tumballi +Reviewed-on: https://code.engineering.redhat.com/gerrit/120558 +Tested-by: RHGS Build Bot +Reviewed-by: Atin Mukherjee +--- + libglusterfs/src/options.c | 4 ++-- + libglusterfs/src/options.h | 5 +++++ + tests/features/subdir-mount.t | 4 ++++ + xlators/protocol/server/src/server.c | 40 +++++++----------------------------- + 4 files changed, 18 insertions(+), 35 deletions(-) + +diff --git a/libglusterfs/src/options.c b/libglusterfs/src/options.c +index f0292ea..a0f04c7 100644 +--- a/libglusterfs/src/options.c ++++ b/libglusterfs/src/options.c +@@ -590,7 +590,7 @@ xlator_option_validate_addr (xlator_t *xl, const char *key, const char *value, + return ret; + } + +-static int ++int + xlator_option_validate_addr_list (xlator_t *xl, const char *key, + const char *value, volume_option_t *opt, + char **op_errstr) +@@ -668,7 +668,7 @@ xlator_option_validate_addr_list (xlator_t *xl, const char *key, + out: + if (ret) { + snprintf (errstr, sizeof (errstr), "option %s %s: '%s' is not " +- "a valid internet-address-list", key, value, value); ++ "a valid internet-address-list", key, value, value); + gf_msg (xl->name, GF_LOG_ERROR, 0, LG_MSG_INVALID_ENTRY, "%s", + errstr); + if (op_errstr) +diff --git a/libglusterfs/src/options.h b/libglusterfs/src/options.h +index 3154dce..d259d44 100644 +--- a/libglusterfs/src/options.h ++++ b/libglusterfs/src/options.h +@@ -87,6 +87,11 @@ int xlator_options_validate_list (xlator_t *xl, dict_t *options, + int xlator_option_validate (xlator_t *xl, char *key, char *value, + volume_option_t *opt, char **op_errstr); + int xlator_options_validate (xlator_t *xl, dict_t *options, char **errstr); ++ ++int xlator_option_validate_addr_list (xlator_t *xl, const char *key, ++ const char *value, volume_option_t *opt, ++ char **op_errstr); ++ + volume_option_t * + xlator_volume_option_get (xlator_t *xl, const char *key); + +diff --git a/tests/features/subdir-mount.t b/tests/features/subdir-mount.t +index 2fb0be4..ab7ef35 100644 +--- a/tests/features/subdir-mount.t ++++ b/tests/features/subdir-mount.t +@@ -78,6 +78,10 @@ TEST ! $CLI volume set $V0 auth.allow "subdir2\(1.2.3.4\)" + # support subdir inside subdir + TEST $CLI volume set $V0 auth.allow '/subdir1/subdir1.1/subdir1.2/\(1.2.3.4\|::1\),/\(192.168.10.1\|192.168.11.1\),/subdir2\(1.2.3.4\)' + ++TEST $CLI volume stop $V0 ++ ++TEST $CLI volume start $V0 ++ + # /subdir2 has not allowed IP + TEST $GFS --subdir-mount /subdir2 -s $H0 --volfile-id $V0 $M1 + TEST stat $M1 +diff --git a/xlators/protocol/server/src/server.c b/xlators/protocol/server/src/server.c +index e47acb2..6dc9d0f 100644 +--- a/xlators/protocol/server/src/server.c ++++ b/xlators/protocol/server/src/server.c +@@ -386,9 +386,6 @@ _check_for_auth_option (dict_t *d, char *k, data_t *v, + int ret = 0; + xlator_t *xl = NULL; + char *tail = NULL; +- char *tmp_addr_list = NULL; +- char *addr = NULL; +- char *tmp_str = NULL; + + xl = tmp; + +@@ -417,38 +414,15 @@ _check_for_auth_option (dict_t *d, char *k, data_t *v, + * valid auth.allow. + * Now we verify the ip address + */ +- if (!strcmp (v->data, "*")) { +- ret = 0; +- goto out; +- } +- +- /* TODO-SUBDIR-MOUNT: fix the format */ +- tmp_addr_list = gf_strdup (v->data); +- addr = strtok_r (tmp_addr_list, ",", &tmp_str); +- if (!addr) +- addr = v->data; +- +- while (addr) { +- if (valid_internet_address (addr, _gf_true)) { +- ret = 0; +- } else { +- ret = -1; +- gf_msg (xl->name, GF_LOG_ERROR, 0, +- PS_MSG_INTERNET_ADDR_ERROR, +- "internet address '%s'" +- " does not conform to" +- " standards.", addr); +- goto out; +- } +- if (tmp_str) +- addr = strtok_r (NULL, ",", &tmp_str); +- else +- addr = NULL; +- } ++ ret = xlator_option_validate_addr_list (xl, "auth-*", v->data, ++ NULL, NULL); ++ if (ret) ++ gf_msg (xl->name, GF_LOG_ERROR, 0, ++ PS_MSG_INTERNET_ADDR_ERROR, ++ "internet address '%s' does not conform " ++ "to standards.", v->data); + } + out: +- GF_FREE (tmp_addr_list); +- + return ret; + } + +-- +1.8.3.1 + diff --git a/SOURCES/0038-protocol-server-fix-the-comparision-logic-in-case-of.patch b/SOURCES/0038-protocol-server-fix-the-comparision-logic-in-case-of.patch new file mode 100644 index 0000000..fb8a5fd --- /dev/null +++ b/SOURCES/0038-protocol-server-fix-the-comparision-logic-in-case-of.patch @@ -0,0 +1,111 @@ +From 4fd6388cf08d9c902f20683579d62408847c3766 Mon Sep 17 00:00:00 2001 +From: Amar Tumballi +Date: Mon, 23 Oct 2017 21:17:52 +0200 +Subject: [PATCH 38/74] protocol/server: fix the comparision logic in case of + subdir mount + +without the fix, the stat entry on a file would return inode==1 for +many files, in case of subdir mount + +This happened with the confusion of return value of 'gf_uuid_compare()', +it is more like strcmp, instead of a gf_boolean return value, and hence +resulted in the bug. + +> Upstream: +> URL: https://review.gluster.org/#/c/18558/ +> + +Also fixes the bz1501714 + +Change-Id: I31b8cbd95eaa3af5ff916a969458e8e4020c86bb +Signed-off-by: Amar Tumballi +Reviewed-on: https://code.engineering.redhat.com/gerrit/121726 +Tested-by: RHGS Build Bot +Reviewed-by: Atin Mukherjee +--- + xlators/protocol/server/src/server-common.c | 60 ++++++++++++++--------------- + 1 file changed, 30 insertions(+), 30 deletions(-) + +diff --git a/xlators/protocol/server/src/server-common.c b/xlators/protocol/server/src/server-common.c +index b972918..ce33089 100644 +--- a/xlators/protocol/server/src/server-common.c ++++ b/xlators/protocol/server/src/server-common.c +@@ -12,22 +12,22 @@ + void + server_post_stat (server_state_t *state, gfs3_stat_rsp *rsp, struct iatt *stbuf) + { +- if (state->client->subdir_mount) { +- if (gf_uuid_compare (stbuf->ia_gfid, +- state->client->subdir_gfid)) { +- /* This is very important as when we send iatt of +- root-inode, fuse/client expect the gfid to be 1, +- along with inode number. As for subdirectory mount, +- we use inode table which is shared by everyone, but +- make sure we send fops only from subdir and below, +- we have to alter inode gfid and send it to client */ +- uuid_t gfid = {0,}; +- +- gfid[15] = 1; +- stbuf->ia_ino = 1; +- gf_uuid_copy (stbuf->ia_gfid, gfid); +- } ++ if (state->client->subdir_mount && ++ !gf_uuid_compare (stbuf->ia_gfid, ++ state->client->subdir_gfid)) { ++ /* This is very important as when we send iatt of ++ root-inode, fuse/client expect the gfid to be 1, ++ along with inode number. As for subdirectory mount, ++ we use inode table which is shared by everyone, but ++ make sure we send fops only from subdir and below, ++ we have to alter inode gfid and send it to client */ ++ uuid_t gfid = {0,}; ++ ++ gfid[15] = 1; ++ stbuf->ia_ino = 1; ++ gf_uuid_copy (stbuf->ia_gfid, gfid); + } ++ + gf_stat_from_iatt (&rsp->stat, stbuf); + } + +@@ -185,22 +185,22 @@ void + server_post_fstat (server_state_t *state, gfs3_fstat_rsp *rsp, + struct iatt *stbuf) + { +- if (state->client->subdir_mount) { +- if (gf_uuid_compare (stbuf->ia_gfid, +- state->client->subdir_gfid)) { +- /* This is very important as when we send iatt of +- root-inode, fuse/client expect the gfid to be 1, +- along with inode number. As for subdirectory mount, +- we use inode table which is shared by everyone, but +- make sure we send fops only from subdir and below, +- we have to alter inode gfid and send it to client */ +- uuid_t gfid = {0,}; +- +- gfid[15] = 1; +- stbuf->ia_ino = 1; +- gf_uuid_copy (stbuf->ia_gfid, gfid); +- } ++ if (state->client->subdir_mount && ++ !gf_uuid_compare (stbuf->ia_gfid, ++ state->client->subdir_gfid)) { ++ /* This is very important as when we send iatt of ++ root-inode, fuse/client expect the gfid to be 1, ++ along with inode number. As for subdirectory mount, ++ we use inode table which is shared by everyone, but ++ make sure we send fops only from subdir and below, ++ we have to alter inode gfid and send it to client */ ++ uuid_t gfid = {0,}; ++ ++ gfid[15] = 1; ++ stbuf->ia_ino = 1; ++ gf_uuid_copy (stbuf->ia_gfid, gfid); + } ++ + gf_stat_from_iatt (&rsp->stat, stbuf); + } + +-- +1.8.3.1 + diff --git a/SOURCES/0039-protocol-client-handle-the-subdir-handshake-properly.patch b/SOURCES/0039-protocol-client-handle-the-subdir-handshake-properly.patch new file mode 100644 index 0000000..97d9ccf --- /dev/null +++ b/SOURCES/0039-protocol-client-handle-the-subdir-handshake-properly.patch @@ -0,0 +1,108 @@ +From 0f3a3c9ed32fec80f1b88cc649a98bcdcc234b6a Mon Sep 17 00:00:00 2001 +From: Amar Tumballi +Date: Sun, 22 Oct 2017 12:41:38 +0530 +Subject: [PATCH 39/74] protocol/client: handle the subdir handshake properly + for add-brick + +There should be different way we handle handshake in case of subdir +mount for the first time, and in case of subsequent graph changes. + +> Upstream +> URL: https://review.gluster.org/#/c/18550/ +> + +Change-Id: I2a7ba836433bb0a0f4a861809e2bb0d7fbc4da54 +Signed-off-by: Amar Tumballi +Reviewed-on: https://code.engineering.redhat.com/gerrit/121725 +Tested-by: RHGS Build Bot +Reviewed-by: Atin Mukherjee +--- + tests/features/subdir-mount.t | 31 +++++++++++++++++++++----- + xlators/protocol/client/src/client-handshake.c | 10 ++++++++- + 2 files changed, 35 insertions(+), 6 deletions(-) + +diff --git a/tests/features/subdir-mount.t b/tests/features/subdir-mount.t +index ab7ef35..1742f86 100644 +--- a/tests/features/subdir-mount.t ++++ b/tests/features/subdir-mount.t +@@ -82,17 +82,38 @@ TEST $CLI volume stop $V0 + + TEST $CLI volume start $V0 + +-# /subdir2 has not allowed IP +-TEST $GFS --subdir-mount /subdir2 -s $H0 --volfile-id $V0 $M1 +-TEST stat $M1 +- + TEST $GFS --subdir-mount /subdir1/subdir1.1/subdir1.2 -s $H0 --volfile-id $V0 $M2 + TEST stat $M2 + ++# mount shouldn't fail even after add-brick ++TEST $CLI volume add-brick $V0 replica 2 $H0:$B0/${V0}{5,6}; ++ ++# Give time for client process to get notified and use the new ++# volfile after add-brick ++sleep 1 ++ ++# Existing mount should still be active ++mount_inode=$(stat --format "%i" "$M2") ++TEST test "$mount_inode" == "1" ++ ++TEST umount $M2 ++ ++# because the subdir is not yet 'healed', below should fail. ++TEST $GFS --subdir-mount /subdir2 -s $H0 --volfile-id $V0 $M2 ++mount_inode=$(stat --format "%i" "$M2") ++TEST test "$mount_inode" != "1" ++ ++# Allow the heal to complete ++TEST stat $M0/subdir1/subdir1.1/subdir1.2/subdir1.2_file; ++TEST stat $M0/subdir2/ ++ ++# Now the mount should succeed ++TEST $GFS --subdir-mount /subdir2 -s $H0 --volfile-id $V0 $M1 ++TEST stat $M1 ++ + # umount $M1 / $M2 + TEST umount $M0 + TEST umount $M1 +-TEST umount $M2 + + + TEST $CLI volume stop $V0; +diff --git a/xlators/protocol/client/src/client-handshake.c b/xlators/protocol/client/src/client-handshake.c +index b6dc079..aee6b3a 100644 +--- a/xlators/protocol/client/src/client-handshake.c ++++ b/xlators/protocol/client/src/client-handshake.c +@@ -1079,10 +1079,14 @@ client_setvolume_cbk (struct rpc_req *req, struct iovec *iov, int count, void *m + int32_t op_errno = 0; + gf_boolean_t auth_fail = _gf_false; + uint32_t lk_ver = 0; ++ glusterfs_ctx_t *ctx = NULL; + + frame = myframe; + this = frame->this; + conf = this->private; ++ GF_VALIDATE_OR_GOTO (this->name, conf, out); ++ ctx = this->ctx; ++ GF_VALIDATE_OR_GOTO (this->name, ctx, out); + + if (-1 == req->rpc_status) { + gf_msg (frame->this->name, GF_LOG_WARNING, ENOTCONN, +@@ -1145,9 +1149,13 @@ client_setvolume_cbk (struct rpc_req *req, struct iovec *iov, int count, void *m + auth_fail = _gf_true; + op_ret = 0; + } +- if ((op_errno == ENOENT) && this->ctx->cmd_args.subdir_mount) { ++ if ((op_errno == ENOENT) && this->ctx->cmd_args.subdir_mount && ++ (ctx->graph_id <= 1)) { + /* A case of subdir not being present at the moment, + ride on auth_fail framework to notify the error */ ++ /* Make sure this case is handled only in the new ++ graph, so mount may fail in this case. In case ++ of 'add-brick' etc, we need to continue retry */ + auth_fail = _gf_true; + op_ret = 0; + } +-- +1.8.3.1 + diff --git a/SOURCES/0040-glusterd-delete-source-brick-only-once-in-reset-bric.patch b/SOURCES/0040-glusterd-delete-source-brick-only-once-in-reset-bric.patch new file mode 100644 index 0000000..46a133a --- /dev/null +++ b/SOURCES/0040-glusterd-delete-source-brick-only-once-in-reset-bric.patch @@ -0,0 +1,69 @@ +From 8fb2496f67b1170595144eecb9a3b8f3be35044e Mon Sep 17 00:00:00 2001 +From: Atin Mukherjee +Date: Mon, 30 Oct 2017 15:55:32 +0530 +Subject: [PATCH 40/74] glusterd: delete source brick only once in reset-brick + commit force + +While stopping the brick which is to be reset and replaced delete_brick +flag was passed as true which resulted glusterd to free up to source +brick before the actual operation. This results commit force to fail +failing to find the source brickinfo. + +>upstream patch : https://review.gluster.org/#/c/18581 + +Change-Id: I1aa7508eff7cc9c9b5d6f5163f3bb92736d6df44 +Signed-off-by: Atin Mukherjee +Reviewed-on: https://code.engineering.redhat.com/gerrit/121876 +Tested-by: RHGS Build Bot +--- + .../bug-1507466-reset-brick-commit-force.t | 24 ++++++++++++++++++++++ + xlators/mgmt/glusterd/src/glusterd-reset-brick.c | 2 +- + 2 files changed, 25 insertions(+), 1 deletion(-) + create mode 100644 tests/bugs/glusterd/bug-1507466-reset-brick-commit-force.t + +diff --git a/tests/bugs/glusterd/bug-1507466-reset-brick-commit-force.t b/tests/bugs/glusterd/bug-1507466-reset-brick-commit-force.t +new file mode 100644 +index 0000000..764399d +--- /dev/null ++++ b/tests/bugs/glusterd/bug-1507466-reset-brick-commit-force.t +@@ -0,0 +1,24 @@ ++#!/bin/bash ++. $(dirname $0)/../../include.rc ++. $(dirname $0)/../../cluster.rc ++cleanup; ++ ++function check_peers { ++ $CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l ++} ++ ++TEST launch_cluster 3 ++TEST $CLI_1 peer probe $H2; ++EXPECT_WITHIN $PROBE_TIMEOUT 1 check_peers ++ ++TEST $CLI_1 volume create $V0 replica 2 $H1:$B0/${V0} $H2:$B0/${V0} ++TEST $CLI_1 volume start $V0 ++ ++# Negative case with brick not killed && volume-id xattrs present ++TEST ! $CLI_1 volume reset-brick $V0 $H1:$B0/${V0} $H1:$B0/${V0} commit force ++ ++TEST $CLI_1 volume reset-brick $V0 $H1:$B0/${V0} start ++# Now test if reset-brick commit force works ++TEST $CLI_1 volume reset-brick $V0 $H1:$B0/${V0} $H1:$B0/${V0} commit force ++ ++cleanup; +diff --git a/xlators/mgmt/glusterd/src/glusterd-reset-brick.c b/xlators/mgmt/glusterd/src/glusterd-reset-brick.c +index c127d64..abb44e0 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-reset-brick.c ++++ b/xlators/mgmt/glusterd/src/glusterd-reset-brick.c +@@ -343,7 +343,7 @@ glusterd_op_reset_brick (dict_t *dict, dict_t *rsp_dict) + gf_msg_debug (this->name, 0, "I AM THE DESTINATION HOST"); + ret = glusterd_volume_stop_glusterfs (volinfo, + src_brickinfo, +- _gf_true); ++ _gf_false); + if (ret) { + gf_msg (this->name, GF_LOG_CRITICAL, 0, + GD_MSG_BRICK_STOP_FAIL, +-- +1.8.3.1 + diff --git a/SOURCES/0041-glusterd-persist-brickinfo-s-port-change-into-gluste.patch b/SOURCES/0041-glusterd-persist-brickinfo-s-port-change-into-gluste.patch new file mode 100644 index 0000000..736ebe3 --- /dev/null +++ b/SOURCES/0041-glusterd-persist-brickinfo-s-port-change-into-gluste.patch @@ -0,0 +1,193 @@ +From 548895f0333a0706ec9475efc3b28456d591f093 Mon Sep 17 00:00:00 2001 +From: Gaurav Yadav +Date: Fri, 27 Oct 2017 16:04:46 +0530 +Subject: [PATCH 41/74] glusterd: persist brickinfo's port change into + glusterd's store + +Problem: +Consider a case where node reboot is performed and prior to reboot +brick was listening to 49153. Post reboot glusterd assigned 49152 +to brick and started the brick process but the new port was never +persisted. Now when glusterd restarts glusterd always read the port +from its persisted store i.e 49153 however pmap signin happens with +the correct port i.e 49152. + +Fix: +Make sure when glusterd_brick_start is called, glusterd_store_volinfo is +eventually invoked. + +>upstream mainline patch : https://review.gluster.org/#/c/18579/ + +Change-Id: Ic0efbd48c51d39729ed951a42922d0e59f7115a1 +Signed-off-by: Gaurav Yadav +Reviewed-on: https://code.engineering.redhat.com/gerrit/121878 +Reviewed-by: Atin Mukherjee +Tested-by: Atin Mukherjee +Tested-by: RHGS Build Bot +--- + xlators/mgmt/glusterd/src/glusterd-handshake.c | 18 +++++++++--------- + xlators/mgmt/glusterd/src/glusterd-op-sm.c | 9 ++++++++- + xlators/mgmt/glusterd/src/glusterd-server-quorum.c | 16 ++++++++++++++++ + xlators/mgmt/glusterd/src/glusterd-snapshot-utils.c | 10 ++++++++++ + xlators/mgmt/glusterd/src/glusterd-utils.c | 19 +++++++++++++++++++ + 5 files changed, 62 insertions(+), 10 deletions(-) + +diff --git a/xlators/mgmt/glusterd/src/glusterd-handshake.c b/xlators/mgmt/glusterd/src/glusterd-handshake.c +index c7e419c..8dfb528 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-handshake.c ++++ b/xlators/mgmt/glusterd/src/glusterd-handshake.c +@@ -658,6 +658,15 @@ glusterd_create_missed_snap (glusterd_missed_snap_info *missed_snapinfo, + } + + brickinfo->snap_status = 0; ++ ret = glusterd_brick_start (snap_vol, brickinfo, _gf_false); ++ if (ret) { ++ gf_msg (this->name, GF_LOG_WARNING, 0, ++ GD_MSG_BRICK_DISCONNECTED, "starting the " ++ "brick %s:%s for the snap %s failed", ++ brickinfo->hostname, brickinfo->path, ++ snap->snapname); ++ goto out; ++ } + ret = glusterd_store_volinfo (snap_vol, + GLUSTERD_VOLINFO_VER_AC_NONE); + if (ret) { +@@ -668,15 +677,6 @@ glusterd_create_missed_snap (glusterd_missed_snap_info *missed_snapinfo, + goto out; + } + +- ret = glusterd_brick_start (snap_vol, brickinfo, _gf_false); +- if (ret) { +- gf_msg (this->name, GF_LOG_WARNING, 0, +- GD_MSG_BRICK_DISCONNECTED, "starting the " +- "brick %s:%s for the snap %s failed", +- brickinfo->hostname, brickinfo->path, +- snap->snapname); +- goto out; +- } + out: + if (device) + GF_FREE (device); +diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.c b/xlators/mgmt/glusterd/src/glusterd-op-sm.c +index 96e0860..9641b4f 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-op-sm.c ++++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.c +@@ -2415,8 +2415,15 @@ glusterd_start_bricks (glusterd_volinfo_t *volinfo) + brickinfo->path); + goto out; + } +- } + ++ } ++ ret = glusterd_store_volinfo (volinfo, GLUSTERD_VOLINFO_VER_AC_NONE); ++ if (ret) { ++ gf_msg (THIS->name, GF_LOG_ERROR, 0, GD_MSG_VOLINFO_STORE_FAIL, ++ "Failed to write volinfo for volume %s", ++ volinfo->volname); ++ goto out; ++ } + ret = 0; + out: + return ret; +diff --git a/xlators/mgmt/glusterd/src/glusterd-server-quorum.c b/xlators/mgmt/glusterd/src/glusterd-server-quorum.c +index a4637f8..659ff9d 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-server-quorum.c ++++ b/xlators/mgmt/glusterd/src/glusterd-server-quorum.c +@@ -12,6 +12,7 @@ + #include "glusterd-utils.h" + #include "glusterd-messages.h" + #include "glusterd-server-quorum.h" ++#include "glusterd-store.h" + #include "glusterd-syncop.h" + #include "glusterd-op-sm.h" + +@@ -309,6 +310,7 @@ void + glusterd_do_volume_quorum_action (xlator_t *this, glusterd_volinfo_t *volinfo, + gf_boolean_t meets_quorum) + { ++ int ret = -1; + glusterd_brickinfo_t *brickinfo = NULL; + gd_quorum_status_t quorum_status = NOT_APPLICABLE_QUORUM; + gf_boolean_t follows_quorum = _gf_false; +@@ -365,6 +367,20 @@ glusterd_do_volume_quorum_action (xlator_t *this, glusterd_volinfo_t *volinfo, + glusterd_brick_start (volinfo, brickinfo, _gf_false); + } + volinfo->quorum_status = quorum_status; ++ if (quorum_status == MEETS_QUORUM) { ++ /* bricks might have been restarted and so as the port change ++ * might have happened ++ */ ++ ret = glusterd_store_volinfo (volinfo, ++ GLUSTERD_VOLINFO_VER_AC_NONE); ++ if (ret) { ++ gf_msg (this->name, GF_LOG_ERROR, 0, ++ GD_MSG_VOLINFO_STORE_FAIL, ++ "Failed to write volinfo for volume %s", ++ volinfo->volname); ++ goto out; ++ } ++ } + out: + return; + } +diff --git a/xlators/mgmt/glusterd/src/glusterd-snapshot-utils.c b/xlators/mgmt/glusterd/src/glusterd-snapshot-utils.c +index 6fb49c3..4cbade1 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-snapshot-utils.c ++++ b/xlators/mgmt/glusterd/src/glusterd-snapshot-utils.c +@@ -1680,6 +1680,16 @@ glusterd_import_friend_snap (dict_t *peer_data, int32_t snap_count, + } + if (glusterd_is_volume_started (snap_vol)) { + (void) glusterd_start_bricks (snap_vol); ++ ret = glusterd_store_volinfo ++ (snap_vol, ++ GLUSTERD_VOLINFO_VER_AC_NONE); ++ if (ret) { ++ gf_msg (this->name, GF_LOG_ERROR, 0, ++ GD_MSG_VOLINFO_STORE_FAIL, "Failed to " ++ "write volinfo for volume %s", ++ snap_vol->volname); ++ goto out; ++ } + } else { + (void) glusterd_stop_bricks(snap_vol); + } +diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c +index f34e218..bb236df 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-utils.c ++++ b/xlators/mgmt/glusterd/src/glusterd-utils.c +@@ -6003,6 +6003,15 @@ glusterd_restart_bricks (glusterd_conf_t *conf) + glusterd_brick_start (volinfo, brickinfo, + _gf_false); + } ++ ret = glusterd_store_volinfo ++ (volinfo, GLUSTERD_VOLINFO_VER_AC_NONE); ++ if (ret) { ++ gf_msg (this->name, GF_LOG_ERROR, 0, ++ GD_MSG_VOLINFO_STORE_FAIL, "Failed to " ++ "write volinfo for volume %s", ++ volinfo->volname); ++ goto out; ++ } + } + } + +@@ -6034,6 +6043,16 @@ glusterd_restart_bricks (glusterd_conf_t *conf) + glusterd_brick_start (volinfo, brickinfo, + _gf_false); + } ++ ret = glusterd_store_volinfo ++ (volinfo, GLUSTERD_VOLINFO_VER_AC_NONE); ++ if (ret) { ++ gf_msg (this->name, GF_LOG_ERROR, 0, ++ GD_MSG_VOLINFO_STORE_FAIL, "Failed to " ++ "write volinfo for volume %s", ++ volinfo->volname); ++ goto out; ++ } ++ + } + } + ret = 0; +-- +1.8.3.1 + diff --git a/SOURCES/0042-glusterd-restart-the-brick-if-qorum-status-is-NOT_AP.patch b/SOURCES/0042-glusterd-restart-the-brick-if-qorum-status-is-NOT_AP.patch new file mode 100644 index 0000000..42fd2a4 --- /dev/null +++ b/SOURCES/0042-glusterd-restart-the-brick-if-qorum-status-is-NOT_AP.patch @@ -0,0 +1,39 @@ +From 4ea251b0a23ae8fc0740abc2c5d85c09c31e0c70 Mon Sep 17 00:00:00 2001 +From: Atin Mukherjee +Date: Mon, 6 Nov 2017 13:23:32 +0530 +Subject: [PATCH 42/74] glusterd: restart the brick if qorum status is + NOT_APPLICABLE_QUORUM + +If a volume is not having server quorum enabled and in a trusted storage +pool all the glusterd instances from other peers are down, on restarting +glusterd the brick start trigger doesn't happen resulting into the +brick not coming up. + +> mainline patch : https://review.gluster.org/18669 + +Change-Id: If1458e03b50a113f1653db553bb2350d11577539 +BUG: 1509102 +Signed-off-by: Atin Mukherjee +Reviewed-on: https://code.engineering.redhat.com/gerrit/123055 +Reviewed-by: Gaurav Yadav +--- + xlators/mgmt/glusterd/src/glusterd-server-quorum.c | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/xlators/mgmt/glusterd/src/glusterd-server-quorum.c b/xlators/mgmt/glusterd/src/glusterd-server-quorum.c +index 659ff9d..4706403 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-server-quorum.c ++++ b/xlators/mgmt/glusterd/src/glusterd-server-quorum.c +@@ -341,7 +341,8 @@ glusterd_do_volume_quorum_action (xlator_t *this, glusterd_volinfo_t *volinfo, + * the bricks that are down are brought up again. In this process it + * also brings up the brick that is purposefully taken down. + */ +- if (volinfo->quorum_status == quorum_status) ++ if (quorum_status != NOT_APPLICABLE_QUORUM && ++ volinfo->quorum_status == quorum_status) + goto out; + + if (quorum_status == MEETS_QUORUM) { +-- +1.8.3.1 + diff --git a/SOURCES/0043-glusterd-clean-up-portmap-on-brick-disconnect.patch b/SOURCES/0043-glusterd-clean-up-portmap-on-brick-disconnect.patch new file mode 100644 index 0000000..ac60763 --- /dev/null +++ b/SOURCES/0043-glusterd-clean-up-portmap-on-brick-disconnect.patch @@ -0,0 +1,173 @@ +From 385b61f9a6f818c2810cc0a2223c9d71340cd345 Mon Sep 17 00:00:00 2001 +From: Atin Mukherjee +Date: Tue, 17 Oct 2017 21:32:44 +0530 +Subject: [PATCH 43/74] glusterd: clean up portmap on brick disconnect + +GlusterD's portmap entry for a brick is cleaned up when a PMAP_SIGNOUT event is +initiated by the brick process at the shutdown. But if the brick process crashes +or gets killed through SIGKILL then this event is not initiated and glusterd +ends up with a stale port. Since GlusterD's portmap traversal happens both ways, +forward for allocation and backward for registry search, there is a possibility +that glusterd might end up running with a stale port for a brick which +eventually will end up with clients to fail to connect to the bricks. + +Solution is to clean up the port entry in case the process is down as +part of the brick disconnect event. Although with this the handling +PMAP_SIGNOUT event becomes redundant in most of the cases, but this is +the safeguard method to avoid glusterd getting into the stale port +issues. + +>mainline patch : https://review.gluster.org/#/c/18541 + +Change-Id: I04c5be6d11e772ee4de16caf56dbb37d5c944303 +BUG: 1503244 +Signed-off-by: Atin Mukherjee +Reviewed-on: https://code.engineering.redhat.com/gerrit/123057 +Reviewed-by: Gaurav Yadav +--- + xlators/mgmt/glusterd/src/glusterd-handler.c | 25 +++++++++++++++++++++++++ + xlators/mgmt/glusterd/src/glusterd-pmap.c | 26 +++++++++++++++++--------- + xlators/mgmt/glusterd/src/glusterd-pmap.h | 3 ++- + xlators/mgmt/glusterd/src/glusterd.c | 3 ++- + 4 files changed, 46 insertions(+), 11 deletions(-) + +diff --git a/xlators/mgmt/glusterd/src/glusterd-handler.c b/xlators/mgmt/glusterd/src/glusterd-handler.c +index af9a796..34e751c 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-handler.c ++++ b/xlators/mgmt/glusterd/src/glusterd-handler.c +@@ -5974,8 +5974,10 @@ __glusterd_brick_rpc_notify (struct rpc_clnt *rpc, void *mydata, + glusterd_volinfo_t *volinfo = NULL; + xlator_t *this = NULL; + int temp = 0; ++ int32_t pid = -1; + glusterd_brickinfo_t *brickinfo_tmp = NULL; + glusterd_brick_proc_t *brick_proc = NULL; ++ char pidfile[PATH_MAX] = {0}; + + brickid = mydata; + if (!brickid) +@@ -6074,6 +6076,29 @@ __glusterd_brick_rpc_notify (struct rpc_clnt *rpc, void *mydata, + "peer=%s;volume=%s;brick=%s", + brickinfo->hostname, volinfo->volname, + brickinfo->path); ++ /* In case of an abrupt shutdown of a brick PMAP_SIGNOUT ++ * event is not received by glusterd which can lead to a ++ * stale port entry in glusterd, so forcibly clean up ++ * the same if the process is not running ++ */ ++ GLUSTERD_GET_BRICK_PIDFILE (pidfile, volinfo, ++ brickinfo, conf); ++ if (!gf_is_service_running (pidfile, &pid)) { ++ ret = pmap_registry_remove ( ++ THIS, brickinfo->port, ++ brickinfo->path, ++ GF_PMAP_PORT_BRICKSERVER, ++ NULL, _gf_true); ++ if (ret) { ++ gf_msg (this->name, GF_LOG_WARNING, ++ GD_MSG_PMAP_REGISTRY_REMOVE_FAIL, ++ 0, "Failed to remove pmap " ++ "registry for port %d for " ++ "brick %s", brickinfo->port, ++ brickinfo->path); ++ ret = 0; ++ } ++ } + } + + if (is_brick_mx_enabled()) { +diff --git a/xlators/mgmt/glusterd/src/glusterd-pmap.c b/xlators/mgmt/glusterd/src/glusterd-pmap.c +index 2a75476..1b547e7 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-pmap.c ++++ b/xlators/mgmt/glusterd/src/glusterd-pmap.c +@@ -239,7 +239,8 @@ pmap_assign_port (xlator_t *this, int old_port, const char *path) + + if (old_port) { + ret = pmap_registry_remove (this, 0, path, +- GF_PMAP_PORT_BRICKSERVER, NULL); ++ GF_PMAP_PORT_BRICKSERVER, NULL, ++ _gf_false); + if (ret) { + gf_msg (this->name, GF_LOG_WARNING, + GD_MSG_PMAP_REGISTRY_REMOVE_FAIL, 0, "Failed to" +@@ -342,7 +343,8 @@ pmap_registry_extend (xlator_t *this, int port, const char *brickname) + + int + pmap_registry_remove (xlator_t *this, int port, const char *brickname, +- gf_pmap_port_type_t type, void *xprt) ++ gf_pmap_port_type_t type, void *xprt, ++ gf_boolean_t brick_disconnect) + { + struct pmap_registry *pmap = NULL; + int p = 0; +@@ -389,11 +391,16 @@ remove: + * can delete the entire entry. + */ + if (!pmap->ports[p].xprt) { +- brick_str = pmap->ports[p].brickname; +- if (brick_str) { +- while (*brick_str != '\0') { +- if (*(brick_str++) != ' ') { +- goto out; ++ /* If the signout call is being triggered by brick disconnect ++ * then clean up all the bricks (in case of brick mux) ++ */ ++ if (!brick_disconnect) { ++ brick_str = pmap->ports[p].brickname; ++ if (brick_str) { ++ while (*brick_str != '\0') { ++ if (*(brick_str++) != ' ') { ++ goto out; ++ } + } + } + } +@@ -548,14 +555,15 @@ __gluster_pmap_signout (rpcsvc_request_t *req) + goto fail; + } + rsp.op_ret = pmap_registry_remove (THIS, args.port, args.brick, +- GF_PMAP_PORT_BRICKSERVER, req->trans); ++ GF_PMAP_PORT_BRICKSERVER, req->trans, ++ _gf_false); + + ret = glusterd_get_brickinfo (THIS, args.brick, args.port, &brickinfo); + if (args.rdma_port) { + snprintf(brick_path, PATH_MAX, "%s.rdma", args.brick); + rsp.op_ret = pmap_registry_remove (THIS, args.rdma_port, + brick_path, GF_PMAP_PORT_BRICKSERVER, +- req->trans); ++ req->trans, _gf_false); + } + /* Update portmap status on brickinfo */ + if (brickinfo) +diff --git a/xlators/mgmt/glusterd/src/glusterd-pmap.h b/xlators/mgmt/glusterd/src/glusterd-pmap.h +index 9965a95..253b4cc 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-pmap.h ++++ b/xlators/mgmt/glusterd/src/glusterd-pmap.h +@@ -42,7 +42,8 @@ int pmap_registry_bind (xlator_t *this, int port, const char *brickname, + gf_pmap_port_type_t type, void *xprt); + int pmap_registry_extend (xlator_t *this, int port, const char *brickname); + int pmap_registry_remove (xlator_t *this, int port, const char *brickname, +- gf_pmap_port_type_t type, void *xprt); ++ gf_pmap_port_type_t type, void *xprt, ++ gf_boolean_t brick_disconnect); + int pmap_registry_search (xlator_t *this, const char *brickname, + gf_pmap_port_type_t type, gf_boolean_t destroy); + struct pmap_registry *pmap_registry_get (xlator_t *this); +diff --git a/xlators/mgmt/glusterd/src/glusterd.c b/xlators/mgmt/glusterd/src/glusterd.c +index 4887ff4..81a3206 100644 +--- a/xlators/mgmt/glusterd/src/glusterd.c ++++ b/xlators/mgmt/glusterd/src/glusterd.c +@@ -424,7 +424,8 @@ glusterd_rpcsvc_notify (rpcsvc_t *rpc, void *xl, rpcsvc_event_t event, + pthread_mutex_lock (&priv->xprt_lock); + list_del (&xprt->list); + pthread_mutex_unlock (&priv->xprt_lock); +- pmap_registry_remove (this, 0, NULL, GF_PMAP_PORT_ANY, xprt); ++ pmap_registry_remove (this, 0, NULL, GF_PMAP_PORT_ANY, xprt, ++ _gf_false); + break; + } + +-- +1.8.3.1 + diff --git a/SOURCES/0044-glusterd-fix-brick-restart-parallelism.patch b/SOURCES/0044-glusterd-fix-brick-restart-parallelism.patch new file mode 100644 index 0000000..920801f --- /dev/null +++ b/SOURCES/0044-glusterd-fix-brick-restart-parallelism.patch @@ -0,0 +1,283 @@ +From 938ee38c02cce2a743c672f9c03798ebcbb1e348 Mon Sep 17 00:00:00 2001 +From: Atin Mukherjee +Date: Thu, 26 Oct 2017 14:26:30 +0530 +Subject: [PATCH 44/74] glusterd: fix brick restart parallelism + +glusterd's brick restart logic is not always sequential as there is +atleast three different ways how the bricks are restarted. +1. through friend-sm and glusterd_spawn_daemons () +2. through friend-sm and handling volume quorum action +3. through friend handshaking when there is a mimatch on quorum on +friend import. + +In a brick multiplexing setup, glusterd ended up trying to spawn the +same brick process couple of times as almost in fraction of milliseconds +two threads hit glusterd_brick_start () because of which glusterd didn't +have any choice of rejecting any one of them as for both the case brick +start criteria met. + +As a solution, it'd be better to control this madness by two different +flags, one is a boolean called start_triggered which indicates a brick +start has been triggered and it continues to be true till a brick dies +or killed, the second is a mutex lock to ensure for a particular brick +we don't end up getting into glusterd_brick_start () more than once at +same point of time. + +>mainline patch : https://review.gluster.org/#/c/18577 + +Change-Id: I292f1e58d6971e111725e1baea1fe98b890b43e2 +BUG: 1505363 +Signed-off-by: Atin Mukherjee +Reviewed-on: https://code.engineering.redhat.com/gerrit/123056 +Reviewed-by: Gaurav Yadav +--- + xlators/mgmt/glusterd/src/glusterd-handler.c | 24 ++++++++----- + xlators/mgmt/glusterd/src/glusterd-op-sm.c | 31 ++++++++++------- + xlators/mgmt/glusterd/src/glusterd-server-quorum.c | 15 +++++++-- + xlators/mgmt/glusterd/src/glusterd-utils.c | 39 +++++++++++++++++----- + xlators/mgmt/glusterd/src/glusterd-volume-ops.c | 8 +++++ + xlators/mgmt/glusterd/src/glusterd.h | 2 ++ + 6 files changed, 87 insertions(+), 32 deletions(-) + +diff --git a/xlators/mgmt/glusterd/src/glusterd-handler.c b/xlators/mgmt/glusterd/src/glusterd-handler.c +index 34e751c..c3b9252 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-handler.c ++++ b/xlators/mgmt/glusterd/src/glusterd-handler.c +@@ -5946,16 +5946,22 @@ glusterd_mark_bricks_stopped_by_proc (glusterd_brick_proc_t *brick_proc) { + int ret = -1; + + cds_list_for_each_entry (brickinfo, &brick_proc->bricks, brick_list) { +- ret = glusterd_get_volinfo_from_brick (brickinfo->path, &volinfo); ++ ret = glusterd_get_volinfo_from_brick (brickinfo->path, ++ &volinfo); + if (ret) { +- gf_msg (THIS->name, GF_LOG_ERROR, 0, GD_MSG_VOLINFO_GET_FAIL, +- "Failed to get volinfo from brick(%s)", +- brickinfo->path); ++ gf_msg (THIS->name, GF_LOG_ERROR, 0, ++ GD_MSG_VOLINFO_GET_FAIL, "Failed to get volinfo" ++ " from brick(%s)", brickinfo->path); + goto out; + } +- cds_list_for_each_entry (brickinfo_tmp, &volinfo->bricks, brick_list) { +- if (strcmp (brickinfo->path, brickinfo_tmp->path) == 0) +- glusterd_set_brick_status (brickinfo_tmp, GF_BRICK_STOPPED); ++ cds_list_for_each_entry (brickinfo_tmp, &volinfo->bricks, ++ brick_list) { ++ if (strcmp (brickinfo->path, ++ brickinfo_tmp->path) == 0) { ++ glusterd_set_brick_status (brickinfo_tmp, ++ GF_BRICK_STOPPED); ++ brickinfo_tmp->start_triggered = _gf_false; ++ } + } + } + return 0; +@@ -6129,8 +6135,10 @@ __glusterd_brick_rpc_notify (struct rpc_clnt *rpc, void *mydata, + if (temp == 1) + break; + } +- } else ++ } else { + glusterd_set_brick_status (brickinfo, GF_BRICK_STOPPED); ++ brickinfo->start_triggered = _gf_false; ++ } + break; + + case RPC_CLNT_DESTROY: +diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.c b/xlators/mgmt/glusterd/src/glusterd-op-sm.c +index 9641b4f..5b8f833 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-op-sm.c ++++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.c +@@ -2402,18 +2402,25 @@ glusterd_start_bricks (glusterd_volinfo_t *volinfo) + GF_ASSERT (volinfo); + + cds_list_for_each_entry (brickinfo, &volinfo->bricks, brick_list) { +- ret = glusterd_brick_start (volinfo, brickinfo, _gf_false); +- if (ret) { +- gf_msg (THIS->name, GF_LOG_ERROR, 0, +- GD_MSG_BRICK_DISCONNECTED, +- "Failed to start %s:%s for %s", +- brickinfo->hostname, brickinfo->path, +- volinfo->volname); +- gf_event (EVENT_BRICK_START_FAILED, +- "peer=%s;volume=%s;brick=%s", +- brickinfo->hostname, volinfo->volname, +- brickinfo->path); +- goto out; ++ if (!brickinfo->start_triggered) { ++ pthread_mutex_lock (&brickinfo->restart_mutex); ++ { ++ ret = glusterd_brick_start (volinfo, brickinfo, ++ _gf_false); ++ } ++ pthread_mutex_unlock (&brickinfo->restart_mutex); ++ if (ret) { ++ gf_msg (THIS->name, GF_LOG_ERROR, 0, ++ GD_MSG_BRICK_DISCONNECTED, ++ "Failed to start %s:%s for %s", ++ brickinfo->hostname, brickinfo->path, ++ volinfo->volname); ++ gf_event (EVENT_BRICK_START_FAILED, ++ "peer=%s;volume=%s;brick=%s", ++ brickinfo->hostname, volinfo->volname, ++ brickinfo->path); ++ goto out; ++ } + } + + } +diff --git a/xlators/mgmt/glusterd/src/glusterd-server-quorum.c b/xlators/mgmt/glusterd/src/glusterd-server-quorum.c +index 4706403..995a568 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-server-quorum.c ++++ b/xlators/mgmt/glusterd/src/glusterd-server-quorum.c +@@ -362,10 +362,19 @@ glusterd_do_volume_quorum_action (xlator_t *this, glusterd_volinfo_t *volinfo, + list_for_each_entry (brickinfo, &volinfo->bricks, brick_list) { + if (!glusterd_is_local_brick (this, volinfo, brickinfo)) + continue; +- if (quorum_status == DOESNT_MEET_QUORUM) ++ if (quorum_status == DOESNT_MEET_QUORUM) { + glusterd_brick_stop (volinfo, brickinfo, _gf_false); +- else +- glusterd_brick_start (volinfo, brickinfo, _gf_false); ++ } else { ++ if (!brickinfo->start_triggered) { ++ pthread_mutex_lock (&brickinfo->restart_mutex); ++ { ++ glusterd_brick_start (volinfo, ++ brickinfo, ++ _gf_false); ++ } ++ pthread_mutex_unlock (&brickinfo->restart_mutex); ++ } ++ } + } + volinfo->quorum_status = quorum_status; + if (quorum_status == MEETS_QUORUM) { +diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c +index bb236df..18de517 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-utils.c ++++ b/xlators/mgmt/glusterd/src/glusterd-utils.c +@@ -1084,7 +1084,7 @@ glusterd_brickinfo_new (glusterd_brickinfo_t **brickinfo) + goto out; + + CDS_INIT_LIST_HEAD (&new_brickinfo->brick_list); +- ++ pthread_mutex_init (&new_brickinfo->restart_mutex, NULL); + *brickinfo = new_brickinfo; + + ret = 0; +@@ -2481,7 +2481,7 @@ glusterd_volume_stop_glusterfs (glusterd_volinfo_t *volinfo, + (void) sys_unlink (pidfile); + + brickinfo->status = GF_BRICK_STOPPED; +- ++ brickinfo->start_triggered = _gf_false; + if (del_brick) + glusterd_delete_brick (volinfo, brickinfo); + out: +@@ -5817,13 +5817,14 @@ glusterd_brick_start (glusterd_volinfo_t *volinfo, + * three different triggers for an attempt to start the brick process + * due to the quorum handling code in glusterd_friend_sm. + */ +- if (brickinfo->status == GF_BRICK_STARTING) { ++ if (brickinfo->status == GF_BRICK_STARTING || ++ brickinfo->start_triggered) { + gf_msg_debug (this->name, 0, "brick %s is already in starting " + "phase", brickinfo->path); + ret = 0; + goto out; + } +- ++ brickinfo->start_triggered = _gf_true; + GLUSTERD_GET_BRICK_PIDFILE (pidfile, volinfo, brickinfo, conf); + if (gf_is_service_running (pidfile, &pid)) { + if (brickinfo->status != GF_BRICK_STARTING && +@@ -5936,6 +5937,9 @@ run: + } + + out: ++ if (ret && brickinfo) { ++ brickinfo->start_triggered = _gf_false; ++ } + gf_msg_debug (this->name, 0, "returning %d ", ret); + return ret; + } +@@ -5997,11 +6001,19 @@ glusterd_restart_bricks (glusterd_conf_t *conf) + start_svcs = _gf_true; + glusterd_svcs_manager (NULL); + } +- + cds_list_for_each_entry (brickinfo, &volinfo->bricks, + brick_list) { +- glusterd_brick_start (volinfo, brickinfo, +- _gf_false); ++ if (!brickinfo->start_triggered) { ++ pthread_mutex_lock ++ (&brickinfo->restart_mutex); ++ { ++ glusterd_brick_start ++ (volinfo, brickinfo, ++ _gf_false); ++ } ++ pthread_mutex_unlock ++ (&brickinfo->restart_mutex); ++ } + } + ret = glusterd_store_volinfo + (volinfo, GLUSTERD_VOLINFO_VER_AC_NONE); +@@ -6040,8 +6052,17 @@ glusterd_restart_bricks (glusterd_conf_t *conf) + "volume %s", volinfo->volname); + cds_list_for_each_entry (brickinfo, &volinfo->bricks, + brick_list) { +- glusterd_brick_start (volinfo, brickinfo, +- _gf_false); ++ if (!brickinfo->start_triggered) { ++ pthread_mutex_lock ++ (&brickinfo->restart_mutex); ++ { ++ glusterd_brick_start ++ (volinfo, brickinfo, ++ _gf_false); ++ } ++ pthread_mutex_unlock ++ (&brickinfo->restart_mutex); ++ } + } + ret = glusterd_store_volinfo + (volinfo, GLUSTERD_VOLINFO_VER_AC_NONE); +diff --git a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c +index 834acab..bec5f72 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c ++++ b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c +@@ -2545,6 +2545,14 @@ glusterd_start_volume (glusterd_volinfo_t *volinfo, int flags, + GF_ASSERT (volinfo); + + cds_list_for_each_entry (brickinfo, &volinfo->bricks, brick_list) { ++ /* Mark start_triggered to false so that in case if this brick ++ * was brought down through gf_attach utility, the ++ * brickinfo->start_triggered wouldn't have been updated to ++ * _gf_false ++ */ ++ if (flags & GF_CLI_FLAG_OP_FORCE) { ++ brickinfo->start_triggered = _gf_false; ++ } + ret = glusterd_brick_start (volinfo, brickinfo, wait); + /* If 'force' try to start all bricks regardless of success or + * failure +diff --git a/xlators/mgmt/glusterd/src/glusterd.h b/xlators/mgmt/glusterd/src/glusterd.h +index 722d2f8..d4bb236 100644 +--- a/xlators/mgmt/glusterd/src/glusterd.h ++++ b/xlators/mgmt/glusterd/src/glusterd.h +@@ -240,6 +240,8 @@ struct glusterd_brickinfo { + uint64_t statfs_fsid; + uint32_t fs_share_count; + gf_boolean_t port_registered; ++ gf_boolean_t start_triggered; ++ pthread_mutex_t restart_mutex; + }; + + typedef struct glusterd_brickinfo glusterd_brickinfo_t; +-- +1.8.3.1 + diff --git a/SOURCES/0045-glusterd-introduce-max-port-range.patch b/SOURCES/0045-glusterd-introduce-max-port-range.patch new file mode 100644 index 0000000..b9079d4 --- /dev/null +++ b/SOURCES/0045-glusterd-introduce-max-port-range.patch @@ -0,0 +1,265 @@ +From b027d2fdd184d2ee2b2c4236603200be344156f8 Mon Sep 17 00:00:00 2001 +From: Atin Mukherjee +Date: Thu, 10 Aug 2017 18:31:55 +0530 +Subject: [PATCH 45/74] glusterd: introduce max-port range + +glusterd.vol file always had an option (commented out) to indicate the +base-port to start the portmapper allocation. This patch brings in the +max-port configuration where one can limit the range of ports which +gluster can be allowed to bind. + +>Fixes: #305 +>Change-Id: Id7a864f818227b9530a07e13d605138edacd9aa9 +>Signed-off-by: Atin Mukherjee +>Reviewed-on: https://review.gluster.org/18016 +>Smoke: Gluster Build System +>Reviewed-by: Prashanth Pai +>Reviewed-by: Niels de Vos +>CentOS-regression: Gluster Build System +>Reviewed-by: Gaurav Yadav + +Change-Id: Id7a864f818227b9530a07e13d605138edacd9aa9 +BUG: 1474745 +Signed-off-by: Atin Mukherjee +Reviewed-on: https://code.engineering.redhat.com/gerrit/123060 +Reviewed-by: Gaurav Yadav +--- + extras/glusterd.vol.in | 1 + + xlators/mgmt/glusterd/src/glusterd-messages.h | 10 +++++++++- + xlators/mgmt/glusterd/src/glusterd-pmap.c | 20 +++++++++++--------- + xlators/mgmt/glusterd/src/glusterd-pmap.h | 3 ++- + xlators/mgmt/glusterd/src/glusterd-snapd-svc.c | 8 ++++++++ + xlators/mgmt/glusterd/src/glusterd-utils.c | 18 +++++++++++++++++- + xlators/mgmt/glusterd/src/glusterd.c | 17 +++++++++++++++-- + xlators/mgmt/glusterd/src/glusterd.h | 1 + + 8 files changed, 64 insertions(+), 14 deletions(-) + +diff --git a/extras/glusterd.vol.in b/extras/glusterd.vol.in +index 957b277..0152996 100644 +--- a/extras/glusterd.vol.in ++++ b/extras/glusterd.vol.in +@@ -9,4 +9,5 @@ volume management + option event-threads 1 + # option transport.address-family inet6 + # option base-port 49152 ++# option max-port 65535 + end-volume +diff --git a/xlators/mgmt/glusterd/src/glusterd-messages.h b/xlators/mgmt/glusterd/src/glusterd-messages.h +index 8bb4c43..de9ae92 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-messages.h ++++ b/xlators/mgmt/glusterd/src/glusterd-messages.h +@@ -41,7 +41,7 @@ + + #define GLUSTERD_COMP_BASE GLFS_MSGID_GLUSTERD + +-#define GLFS_NUM_MESSAGES 611 ++#define GLFS_NUM_MESSAGES 612 + + #define GLFS_MSGID_END (GLUSTERD_COMP_BASE + GLFS_NUM_MESSAGES + 1) + /* Messaged with message IDs */ +@@ -4945,6 +4945,14 @@ + */ + #define GD_MSG_SVC_START_FAIL (GLUSTERD_COMP_BASE + 590) + ++/*! ++ * @messageid ++ * @diagnosis ++ * @recommendedaction ++ * ++ */ ++#define GD_MSG_PORTS_EXHAUSTED (GLUSTERD_COMP_BASE + 612) ++ + /*------------*/ + + #define glfs_msg_end_x GLFS_MSGID_END, "Invalid: End of messages" +diff --git a/xlators/mgmt/glusterd/src/glusterd-pmap.c b/xlators/mgmt/glusterd/src/glusterd-pmap.c +index 1b547e7..4f045ab 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-pmap.c ++++ b/xlators/mgmt/glusterd/src/glusterd-pmap.c +@@ -61,8 +61,8 @@ pmap_registry_new (xlator_t *this) + + pmap->base_port = pmap->last_alloc = + ((glusterd_conf_t *)(this->private))->base_port; +- +- for (i = pmap->base_port; i <= GF_PORT_MAX; i++) { ++ pmap->max_port = ((glusterd_conf_t *)(this->private))->max_port; ++ for (i = pmap->base_port; i <= pmap->max_port; i++) { + if (pmap_port_isfree (i)) + pmap->ports[i].type = GF_PMAP_PORT_FREE; + else +@@ -184,10 +184,12 @@ pmap_registry_search_by_xprt (xlator_t *this, void *xprt, + static char * + pmap_registry_search_by_port (xlator_t *this, int port) + { +- struct pmap_registry *pmap = NULL; +- char *brickname = NULL; ++ struct pmap_registry *pmap = NULL; ++ char *brickname = NULL; ++ int max_port = 0; + +- if (port > GF_PORT_MAX) ++ max_port = ((glusterd_conf_t *)(this->private))->max_port; ++ if (port > max_port) + goto out; + + pmap = pmap_registry_get (this); +@@ -209,7 +211,7 @@ pmap_registry_alloc (xlator_t *this) + + pmap = pmap_registry_get (this); + +- for (p = pmap->base_port; p <= GF_PORT_MAX; p++) { ++ for (p = pmap->base_port; p <= pmap->max_port; p++) { + /* GF_PMAP_PORT_FOREIGN may be freed up ? */ + if ((pmap->ports[p].type == GF_PMAP_PORT_FREE) || + (pmap->ports[p].type == GF_PMAP_PORT_FOREIGN)) { +@@ -261,7 +263,7 @@ pmap_registry_bind (xlator_t *this, int port, const char *brickname, + + pmap = pmap_registry_get (this); + +- if (port > GF_PORT_MAX) ++ if (port > pmap->max_port) + goto out; + + p = port; +@@ -297,7 +299,7 @@ pmap_registry_extend (xlator_t *this, int port, const char *brickname) + + pmap = pmap_registry_get (this); + +- if (port > GF_PORT_MAX) { ++ if (port > pmap->max_port) { + return -1; + } + +@@ -357,7 +359,7 @@ pmap_registry_remove (xlator_t *this, int port, const char *brickname, + goto out; + + if (port) { +- if (port > GF_PORT_MAX) ++ if (port > pmap->max_port) + goto out; + + p = port; +diff --git a/xlators/mgmt/glusterd/src/glusterd-pmap.h b/xlators/mgmt/glusterd/src/glusterd-pmap.h +index 253b4cc..f642d66 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-pmap.h ++++ b/xlators/mgmt/glusterd/src/glusterd-pmap.h +@@ -31,8 +31,9 @@ struct pmap_port_status { + + struct pmap_registry { + int base_port; ++ int max_port; + int last_alloc; +- struct pmap_port_status ports[65536]; ++ struct pmap_port_status ports[GF_PORT_MAX + 1]; + }; + + int pmap_assign_port (xlator_t *this, int port, const char *path); +diff --git a/xlators/mgmt/glusterd/src/glusterd-snapd-svc.c b/xlators/mgmt/glusterd/src/glusterd-snapd-svc.c +index 59d8fbd..5621852 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-snapd-svc.c ++++ b/xlators/mgmt/glusterd/src/glusterd-snapd-svc.c +@@ -300,6 +300,14 @@ glusterd_snapdsvc_start (glusterd_svc_t *svc, int flags) + "-S", svc->conn.sockpath, NULL); + + snapd_port = pmap_assign_port (THIS, volinfo->snapd.port, snapd_id); ++ if (!snapd_port) { ++ gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_PORTS_EXHAUSTED, ++ "All the ports in the range are exhausted, can't start " ++ "snapd for volume %s", volinfo->volname); ++ ret = -1; ++ goto out; ++ } ++ + volinfo->snapd.port = snapd_port; + + runner_add_arg (&runner, "--brick-port"); +diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c +index 18de517..55c4fa7 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-utils.c ++++ b/xlators/mgmt/glusterd/src/glusterd-utils.c +@@ -2002,7 +2002,14 @@ glusterd_volume_start_glusterfs (glusterd_volinfo_t *volinfo, + } + + port = pmap_assign_port (THIS, brickinfo->port, brickinfo->path); +- ++ if (!port) { ++ gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_PORTS_EXHAUSTED, ++ "All the ports in the range are exhausted, can't start " ++ "brick %s for volume %s", brickinfo->path, ++ volinfo->volname); ++ ret = -1; ++ goto out; ++ } + /* Build the exp_path, before starting the glusterfsd even in + valgrind mode. Otherwise all the glusterfsd processes start + writing the valgrind log to the same file. +@@ -2076,6 +2083,15 @@ retry: + brickinfo->path); + rdma_port = pmap_assign_port (THIS, brickinfo->rdma_port, + rdma_brick_path); ++ if (!rdma_port) { ++ gf_msg (this->name, GF_LOG_ERROR, 0, ++ GD_MSG_PORTS_EXHAUSTED, "All rdma ports in the " ++ "range are exhausted, can't start brick %s for " ++ "volume %s", rdma_brick_path, ++ volinfo->volname); ++ ret = -1; ++ goto out; ++ } + runner_argprintf (&runner, "%d,%d", port, rdma_port); + runner_add_arg (&runner, "--xlator-option"); + runner_argprintf (&runner, "%s-server.transport.rdma.listen-port=%d", +diff --git a/xlators/mgmt/glusterd/src/glusterd.c b/xlators/mgmt/glusterd/src/glusterd.c +index 81a3206..68d3e90 100644 +--- a/xlators/mgmt/glusterd/src/glusterd.c ++++ b/xlators/mgmt/glusterd/src/glusterd.c +@@ -1824,12 +1824,20 @@ init (xlator_t *this) + if (ret) + goto out; + +- conf->base_port = GF_IANA_PRIV_PORTS_START; +- if (dict_get_uint32(this->options, "base-port", &conf->base_port) == 0) { ++ conf->base_port = GF_IANA_PRIV_PORTS_START; ++ if (dict_get_uint32 (this->options, "base-port", ++ &conf->base_port) == 0) { + gf_msg (this->name, GF_LOG_INFO, 0, + GD_MSG_DICT_SET_FAILED, + "base-port override: %d", conf->base_port); + } ++ conf->max_port = GF_PORT_MAX; ++ if (dict_get_uint32 (this->options, "max-port", ++ &conf->max_port) == 0) { ++ gf_msg (this->name, GF_LOG_INFO, 0, ++ GD_MSG_DICT_SET_FAILED, ++ "max-port override: %d", conf->max_port); ++ } + + /* Set option to run bricks on valgrind if enabled in glusterd.vol */ + this->ctx->cmd_args.valgrind = valgrind; +@@ -2135,6 +2143,11 @@ struct volume_options options[] = { + .type = GF_OPTION_TYPE_INT, + .description = "Sets the base port for portmap query" + }, ++ { .key = {"max-port"}, ++ .type = GF_OPTION_TYPE_INT, ++ .max = GF_PORT_MAX, ++ .description = "Sets the max port for portmap query" ++ }, + { .key = {"snap-brick-path"}, + .type = GF_OPTION_TYPE_STR, + .description = "directory where the bricks for the snapshots will be created" +diff --git a/xlators/mgmt/glusterd/src/glusterd.h b/xlators/mgmt/glusterd/src/glusterd.h +index d4bb236..291f2f7 100644 +--- a/xlators/mgmt/glusterd/src/glusterd.h ++++ b/xlators/mgmt/glusterd/src/glusterd.h +@@ -187,6 +187,7 @@ typedef struct { + gf_boolean_t restart_done; + rpcsvc_t *uds_rpc; /* RPCSVC for the unix domain socket */ + uint32_t base_port; ++ uint32_t max_port; + char *snap_bricks_directory; + gf_store_handle_t *missed_snaps_list_shandle; + struct cds_list_head missed_snaps_list; +-- +1.8.3.1 + diff --git a/SOURCES/0046-Revert-build-conditionally-build-legacy-gNFS-server-.patch b/SOURCES/0046-Revert-build-conditionally-build-legacy-gNFS-server-.patch new file mode 100644 index 0000000..eb696b5 --- /dev/null +++ b/SOURCES/0046-Revert-build-conditionally-build-legacy-gNFS-server-.patch @@ -0,0 +1,424 @@ +From 538b92ebe180186d84e3f5288f168c404e8957d4 Mon Sep 17 00:00:00 2001 +From: Jiffin Tony Thottan +Date: Mon, 13 Nov 2017 18:41:58 +0530 +Subject: [PATCH 46/74] Revert "build: conditionally build legacy gNFS server + and associated sub-packaging" + +This reverts commit 83abcba6b42f94eb5a6495a634d4055362a9d79d. + +Conflicts: + glusterfs.spec.in + xlators/Makefile.am + xlators/mgmt/glusterd/src/glusterd-messages.h +--- + configure.ac | 12 ----- + extras/LinuxRPM/Makefile.am | 4 +- + glusterfs.spec.in | 65 +++++++-------------------- + xlators/Makefile.am | 6 +-- + xlators/mgmt/glusterd/src/Makefile.am | 4 +- + xlators/mgmt/glusterd/src/glusterd-nfs-svc.c | 28 ++++++------ + xlators/mgmt/glusterd/src/glusterd-svc-mgmt.h | 1 + + xlators/mgmt/glusterd/src/glusterd-utils.c | 7 ++- + xlators/mgmt/glusterd/src/glusterd.c | 35 ++++++++++++--- + 9 files changed, 68 insertions(+), 94 deletions(-) + +diff --git a/configure.ac b/configure.ac +index 3841959..dfccd40 100644 +--- a/configure.ac ++++ b/configure.ac +@@ -1345,17 +1345,6 @@ if test "x$enable_glupy" = "xyes"; then + fi + dnl end glupy section + +-dnl gnfs section +-BUILD_GNFS="no" +-AC_ARG_ENABLE([gnfs], +- AC_HELP_STRING([--enable-gnfs], +- [Enable legacy gnfs server xlator.])) +-if test "x$enable_gnfs" = "xyes"; then +- BUILD_GNFS="yes" +-fi +-AM_CONDITIONAL([BUILD_GNFS], [test x$BUILD_GNFS = xyes]) +-dnl end gnfs section +- + dnl Check for userspace-rcu + PKG_CHECK_MODULES([URCU], [liburcu-bp], [], + [AC_CHECK_HEADERS([urcu-bp.h], +@@ -1590,5 +1579,4 @@ echo "Events : $BUILD_EVENTS" + echo "EC dynamic support : $EC_DYNAMIC_SUPPORT" + echo "Use memory pools : $USE_MEMPOOL" + echo "Nanosecond m/atimes : $BUILD_NANOSECOND_TIMESTAMPS" +-echo "Legacy gNFS server : $BUILD_GNFS" + echo +diff --git a/extras/LinuxRPM/Makefile.am b/extras/LinuxRPM/Makefile.am +index f028537..61fd6da 100644 +--- a/extras/LinuxRPM/Makefile.am ++++ b/extras/LinuxRPM/Makefile.am +@@ -18,7 +18,7 @@ autogen: + cd ../.. && \ + rm -rf autom4te.cache && \ + ./autogen.sh && \ +- ./configure --enable-gnfs --with-previous-options ++ ./configure --with-previous-options + + prep: + $(MAKE) -C ../.. dist; +@@ -36,7 +36,7 @@ srcrpm: + mv rpmbuild/SRPMS/* . + + rpms: +- rpmbuild --define '_topdir $(shell pwd)/rpmbuild' --with gnfs -bb rpmbuild/SPECS/glusterfs.spec ++ rpmbuild --define '_topdir $(shell pwd)/rpmbuild' -bb rpmbuild/SPECS/glusterfs.spec + mv rpmbuild/RPMS/*/* . + + # EPEL-5 does not like new versions of rpmbuild and requires some +diff --git a/glusterfs.spec.in b/glusterfs.spec.in +index 8c16477..10339fe 100644 +--- a/glusterfs.spec.in ++++ b/glusterfs.spec.in +@@ -47,10 +47,6 @@ + %global _without_georeplication --disable-georeplication + %endif + +-# if you wish to compile an rpm with the legacy gNFS server xlator +-# rpmbuild -ta @PACKAGE_NAME@-@PACKAGE_VERSION@.tar.gz --with gnfs +-%{?_with_gnfs:%global _with_gnfs --enable-gnfs} +- + # if you wish to compile an rpm without the OCF resource agents... + # rpmbuild -ta @PACKAGE_NAME@-@PACKAGE_VERSION@.tar.gz --without ocf + %{?_without_ocf:%global _without_ocf --without-ocf} +@@ -122,7 +118,7 @@ + %endif + + # From https://fedoraproject.org/wiki/Packaging:Python#Macros +-%if ( 0%{?rhel} && 0%{?rhel} <= 6 ) ++%if ( 0%{?rhel} && 0%{?rhel} <= 5 ) + %{!?python2_sitelib: %global python2_sitelib %(python2 -c "from distutils.sysconfig import get_python_lib; print(get_python_lib())")} + %{!?python2_sitearch: %global python2_sitearch %(python2 -c "from distutils.sysconfig import get_python_lib; print(get_python_lib(1))")} + %global _rundir %{_localstatedir}/run +@@ -461,26 +457,6 @@ This package provides support to geo-replication. + %endif + %endif + +-%if ( 0%{?_with_gnfs:1} ) +-%package gnfs +-Summary: GlusterFS gNFS server +-Group: System Environment/Daemons +-Requires: %{name}%{?_isa} = %{version}-%{release} +-Requires: %{name}-client-xlators%{?_isa} = %{version}-%{release} +-Requires: nfs-utils +- +-%description gnfs +-GlusterFS is a distributed file-system capable of scaling to several +-petabytes. It aggregates various storage bricks over Infiniband RDMA +-or TCP/IP interconnect into one large parallel network file +-system. GlusterFS is one of the most sophisticated file systems in +-terms of features and extensibility. It borrows a powerful concept +-called Translators from GNU Hurd kernel. Much of the code in GlusterFS +-is in user space and easily manageable. +- +-This package provides the glusterfs legacy gNFS server xlator +-%endif +- + %package libs + Summary: GlusterFS common libraries + Group: Applications/File +@@ -621,6 +597,7 @@ Requires: %{name}-api%{?_isa} = %{version}-%{release} + Requires: %{name}-client-xlators%{?_isa} = %{version}-%{release} + # lvm2 for snapshot, and nfs-utils and rpcbind/portmap for gnfs server + Requires: lvm2 ++Requires: nfs-utils + %if ( 0%{?_with_systemd:1} ) + %{?systemd_requires} + %else +@@ -736,19 +713,18 @@ export LDFLAGS + ./autogen.sh && %configure \ + %{?_with_cmocka} \ + %{?_with_debug} \ +- %{?_with_firewalld} \ +- %{?_with_gnfs} \ +- %{?_with_tmpfilesdir} \ + %{?_with_valgrind} \ ++ %{?_with_tmpfilesdir} \ + %{?_without_bd} \ + %{?_without_epoll} \ +- %{?_without_events} \ + %{?_without_fusermount} \ + %{?_without_georeplication} \ ++ %{?_with_firewalld} \ + %{?_without_ocf} \ + %{?_without_rdma} \ + %{?_without_syslog} \ +- %{?_without_tiering} ++ %{?_without_tiering} \ ++ %{?_without_events} + + # fix hardening and remove rpath in shlibs + %if ( 0%{?fedora} && 0%{?fedora} > 17 ) || ( 0%{?rhel} && 0%{?rhel} > 6 ) +@@ -1105,6 +1081,7 @@ exit 0 + %exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/trash.so + %exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/upcall.so + %exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/mgmt* ++%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/nfs* + %exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance/decompounder.so + %exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/protocol/server* + %exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/storage* +@@ -1297,19 +1274,6 @@ exit 0 + %endif + + %if ( 0%{?_build_server} ) +-%if ( 0%{?_with_gnfs:1} ) +-%files gnfs +-%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator +-%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/nfs +- %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/nfs/server.so +-%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/nfs +-%ghost %attr(0600,-,-) %{_sharedstatedir}/glusterd/nfs/nfs-server.vol +-%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/nfs/run +-%ghost %attr(0600,-,-) %{_sharedstatedir}/glusterd/nfs/run/nfs.pid +-%endif +-%endif +- +-%if ( 0%{?_build_server} ) + %files ganesha + %endif + +@@ -1399,11 +1363,6 @@ exit 0 + # sysconf + %config(noreplace) %{_sysconfdir}/glusterfs + %exclude %{_sysconfdir}/glusterfs/eventsconfig.json +-%exclude %{_sharedstatedir}/glusterd/nfs/nfs-server.vol +-%exclude %{_sharedstatedir}/glusterd/nfs/run/nfs.pid +-%if ( 0%{?_with_gnfs:1} ) +-%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/nfs/* +-%endif + %config(noreplace) %{_sysconfdir}/sysconfig/glusterd + %if ( 0%{_for_fedora_koji_builds} ) + %config(noreplace) %{_sysconfdir}/sysconfig/glusterfsd +@@ -1450,6 +1409,7 @@ exit 0 + %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/trash.so + %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/upcall.so + %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/leases.so ++ %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/nfs* + %dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/mgmt + %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/mgmt/glusterd.so + %dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/protocol +@@ -1517,7 +1477,11 @@ exit 0 + %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/stop/pre + %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/stop/pre/S30samba-stop.sh + %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/stop/pre/S29CTDB-teardown.sh +-%config(noreplace) %ghost %attr(0600,-,-) %{_sharedstatedir}/glusterd/options ++%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/nfs ++%ghost %attr(0600,-,-) %{_sharedstatedir}/glusterd/nfs/nfs-server.vol ++%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/nfs/run ++%ghost %attr(0600,-,-) %{_sharedstatedir}/glusterd/nfs/run/nfs.pid ++%ghost %attr(0600,-,-) %{_sharedstatedir}/glusterd/options + %ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/peers + %ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/quotad + %ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/scrub +@@ -2156,6 +2120,9 @@ fi + %endif + + %changelog ++* Mon Nov 13 2017 Jiffin Tony Thottan ++- DOWNSTREAM ONLY - revert of 83abcb(gnfs in an optional subpackage) ++ + * Tue Oct 10 2017 Milind Changire + - DOWNSTREAM ONLY patch - launch glusterd in upgrade mode after all new bits have been installed + +diff --git a/xlators/Makefile.am b/xlators/Makefile.am +index 29549db..c3c9cf2 100644 +--- a/xlators/Makefile.am ++++ b/xlators/Makefile.am +@@ -1,12 +1,8 @@ +-if BUILD_GNFS +- GNFS_DIR = nfs +-endif +- + DIST_SUBDIRS = cluster storage protocol performance debug features encryption \ + mount nfs mgmt system playground meta + + SUBDIRS = cluster storage protocol performance debug features encryption \ +- mount ${GNFS_DIR} mgmt system playground meta ++ mount nfs mgmt system playground meta + + EXTRA_DIST = xlator.sym + +diff --git a/xlators/mgmt/glusterd/src/Makefile.am b/xlators/mgmt/glusterd/src/Makefile.am +index b0f5a9b..4858dee 100644 +--- a/xlators/mgmt/glusterd/src/Makefile.am ++++ b/xlators/mgmt/glusterd/src/Makefile.am +@@ -1,8 +1,6 @@ + xlator_LTLIBRARIES = glusterd.la + xlatordir = $(libdir)/glusterfs/$(PACKAGE_VERSION)/xlator/mgmt +-glusterd_la_CPPFLAGS = $(AM_CPPFLAGS) \ +- -DFILTERDIR=\"$(libdir)/glusterfs/$(PACKAGE_VERSION)/filter\" \ +- -DXLATORDIR=\"$(libdir)/glusterfs/$(PACKAGE_VERSION)/xlator\" ++glusterd_la_CPPFLAGS = $(AM_CPPFLAGS) "-DFILTERDIR=\"$(libdir)/glusterfs/$(PACKAGE_VERSION)/filter\"" + glusterd_la_LDFLAGS = -module $(GF_XLATOR_DEFAULT_LDFLAGS) + glusterd_la_SOURCES = glusterd.c glusterd-handler.c glusterd-sm.c \ + glusterd-op-sm.c glusterd-utils.c glusterd-rpc-ops.c \ +diff --git a/xlators/mgmt/glusterd/src/glusterd-nfs-svc.c b/xlators/mgmt/glusterd/src/glusterd-nfs-svc.c +index 32b1064..eab9746 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-nfs-svc.c ++++ b/xlators/mgmt/glusterd/src/glusterd-nfs-svc.c +@@ -10,7 +10,6 @@ + + #include "globals.h" + #include "run.h" +-#include "syscall.h" + #include "glusterd.h" + #include "glusterd-utils.h" + #include "glusterd-volgen.h" +@@ -18,6 +17,8 @@ + #include "glusterd-messages.h" + #include "glusterd-svc-helper.h" + ++static char *nfs_svc_name = "nfs"; ++ + static gf_boolean_t + glusterd_nfssvc_need_start () + { +@@ -40,13 +41,19 @@ glusterd_nfssvc_need_start () + return start; + } + ++int ++glusterd_nfssvc_init (glusterd_svc_t *svc) ++{ ++ return glusterd_svc_init (svc, nfs_svc_name); ++} ++ + static int + glusterd_nfssvc_create_volfile () + { + char filepath[PATH_MAX] = {0,}; + glusterd_conf_t *conf = THIS->private; + +- glusterd_svc_build_volfile_path (conf->nfs_svc.name, conf->workdir, ++ glusterd_svc_build_volfile_path (nfs_svc_name, conf->workdir, + filepath, sizeof (filepath)); + return glusterd_create_global_volfile (build_nfs_graph, + filepath, NULL); +@@ -58,16 +65,15 @@ glusterd_nfssvc_manager (glusterd_svc_t *svc, void *data, int flags) + int ret = -1; + + if (!svc->inited) { +- ret = glusterd_svc_init (svc, "nfs"); ++ ret = glusterd_nfssvc_init (svc); + if (ret) { + gf_msg (THIS->name, GF_LOG_ERROR, 0, +- GD_MSG_FAILED_INIT_NFSSVC, +- "Failed to init nfs service"); ++ GD_MSG_FAILED_INIT_NFSSVC, "Failed to init nfs " ++ "service"); + goto out; + } else { + svc->inited = _gf_true; +- gf_msg_debug (THIS->name, 0, +- "nfs service initialized"); ++ gf_msg_debug (THIS->name, 0, "nfs service initialized"); + } + } + +@@ -75,14 +81,6 @@ glusterd_nfssvc_manager (glusterd_svc_t *svc, void *data, int flags) + if (ret) + goto out; + +- /* not an error, or a (very) soft error at best */ +- if (sys_access (XLATORDIR "/nfs/server.so", R_OK) != 0) { +- gf_msg (THIS->name, GF_LOG_INFO, 0, +- GD_MSG_GNFS_XLATOR_NOT_INSTALLED, +- "nfs/server.so xlator is not installed"); +- goto out; +- } +- + ret = glusterd_nfssvc_create_volfile (); + if (ret) + goto out; +diff --git a/xlators/mgmt/glusterd/src/glusterd-svc-mgmt.h b/xlators/mgmt/glusterd/src/glusterd-svc-mgmt.h +index 8b70a62..c505d1e 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-svc-mgmt.h ++++ b/xlators/mgmt/glusterd/src/glusterd-svc-mgmt.h +@@ -29,6 +29,7 @@ struct glusterd_svc_ { + char name[PATH_MAX]; + glusterd_conn_t conn; + glusterd_proc_t proc; ++ glusterd_svc_build_t build; + glusterd_svc_manager_t manager; + glusterd_svc_start_t start; + glusterd_svc_stop_t stop; +diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c +index 55c4fa7..f611fbb 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-utils.c ++++ b/xlators/mgmt/glusterd/src/glusterd-utils.c +@@ -668,8 +668,11 @@ glusterd_volinfo_new (glusterd_volinfo_t **volinfo) + + new_volinfo->xl = THIS; + +- glusterd_snapdsvc_build (&new_volinfo->snapd.svc); +- glusterd_tierdsvc_build (&new_volinfo->tierd.svc); ++ new_volinfo->snapd.svc.build = glusterd_snapdsvc_build; ++ new_volinfo->snapd.svc.build (&(new_volinfo->snapd.svc)); ++ ++ new_volinfo->tierd.svc.build = glusterd_tierdsvc_build; ++ new_volinfo->tierd.svc.build (&(new_volinfo->tierd.svc)); + + pthread_mutex_init (&new_volinfo->reflock, NULL); + *volinfo = glusterd_volinfo_ref (new_volinfo); +diff --git a/xlators/mgmt/glusterd/src/glusterd.c b/xlators/mgmt/glusterd/src/glusterd.c +index 68d3e90..6ce4156 100644 +--- a/xlators/mgmt/glusterd/src/glusterd.c ++++ b/xlators/mgmt/glusterd/src/glusterd.c +@@ -1330,6 +1330,34 @@ out: + return ret; + } + ++static void ++glusterd_svcs_build () ++{ ++ xlator_t *this = NULL; ++ glusterd_conf_t *priv = NULL; ++ ++ this = THIS; ++ GF_ASSERT (this); ++ ++ priv = this->private; ++ GF_ASSERT (priv); ++ ++ priv->shd_svc.build = glusterd_shdsvc_build; ++ priv->shd_svc.build (&(priv->shd_svc)); ++ ++ priv->nfs_svc.build = glusterd_nfssvc_build; ++ priv->nfs_svc.build (&(priv->nfs_svc)); ++ ++ priv->quotad_svc.build = glusterd_quotadsvc_build; ++ priv->quotad_svc.build (&(priv->quotad_svc)); ++ ++ priv->bitd_svc.build = glusterd_bitdsvc_build; ++ priv->bitd_svc.build (&(priv->bitd_svc)); ++ ++ priv->scrub_svc.build = glusterd_scrubsvc_build; ++ priv->scrub_svc.build (&(priv->scrub_svc)); ++} ++ + static int + is_upgrade (dict_t *options, gf_boolean_t *upgrade) + { +@@ -1864,12 +1892,7 @@ init (xlator_t *this) + this->private = conf; + glusterd_mgmt_v3_lock_init (); + glusterd_txn_opinfo_dict_init (); +- +- glusterd_shdsvc_build (&conf->shd_svc); +- glusterd_nfssvc_build (&conf->nfs_svc); +- glusterd_quotadsvc_build (&conf->quotad_svc); +- glusterd_bitdsvc_build (&conf->bitd_svc); +- glusterd_scrubsvc_build (&conf->scrub_svc); ++ glusterd_svcs_build (); + + /* Make install copies few of the hook-scripts by creating hooks + * directory. Hence purposefully not doing the check for the presence of +-- +1.8.3.1 + diff --git a/SOURCES/0047-Revert-glusterd-skip-nfs-svc-reconfigure-if-nfs-xlat.patch b/SOURCES/0047-Revert-glusterd-skip-nfs-svc-reconfigure-if-nfs-xlat.patch new file mode 100644 index 0000000..bd323a5 --- /dev/null +++ b/SOURCES/0047-Revert-glusterd-skip-nfs-svc-reconfigure-if-nfs-xlat.patch @@ -0,0 +1,34 @@ +From 7dd54e4e500a41105f375b2aa3620fcd619d5148 Mon Sep 17 00:00:00 2001 +From: Jiffin Tony Thottan +Date: Mon, 13 Nov 2017 18:43:00 +0530 +Subject: [PATCH 47/74] Revert "glusterd: skip nfs svc reconfigure if nfs + xlator is not installed" + +This reverts commit 316e3300cfaa646b7fa45fcc7f57b81c7bb15a0e. +--- + xlators/mgmt/glusterd/src/glusterd-nfs-svc.c | 9 --------- + 1 file changed, 9 deletions(-) + +diff --git a/xlators/mgmt/glusterd/src/glusterd-nfs-svc.c b/xlators/mgmt/glusterd/src/glusterd-nfs-svc.c +index eab9746..da34342 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-nfs-svc.c ++++ b/xlators/mgmt/glusterd/src/glusterd-nfs-svc.c +@@ -154,15 +154,6 @@ glusterd_nfssvc_reconfigure () + priv = this->private; + GF_VALIDATE_OR_GOTO (this->name, priv, out); + +- /* not an error, or a (very) soft error at best */ +- if (sys_access (XLATORDIR "/nfs/server.so", R_OK) != 0) { +- gf_msg (THIS->name, GF_LOG_INFO, 0, +- GD_MSG_GNFS_XLATOR_NOT_INSTALLED, +- "nfs/server.so xlator is not installed"); +- ret = 0; +- goto out; +- } +- + cds_list_for_each_entry (volinfo, &priv->volumes, vol_list) { + if (GLUSTERD_STATUS_STARTED == volinfo->status) { + vol_started = _gf_true; +-- +1.8.3.1 + diff --git a/SOURCES/0048-glusterd-introduce-timer-in-mgmt_v3_lock.patch b/SOURCES/0048-glusterd-introduce-timer-in-mgmt_v3_lock.patch new file mode 100644 index 0000000..2575bd2 --- /dev/null +++ b/SOURCES/0048-glusterd-introduce-timer-in-mgmt_v3_lock.patch @@ -0,0 +1,473 @@ +From f37a409a8c0fa683ad95a61bf71e949f215e2f81 Mon Sep 17 00:00:00 2001 +From: Gaurav Yadav +Date: Thu, 5 Oct 2017 23:44:46 +0530 +Subject: [PATCH 48/74] glusterd : introduce timer in mgmt_v3_lock + +Problem: +In a multinode environment, if two of the op-sm transactions +are initiated on one of the receiver nodes at the same time, +there might be a possibility that glusterd may end up in +stale lock. + +Solution: +During mgmt_v3_lock a registration is made to gf_timer_call_after +which release the lock after certain period of time + +>mainline patch : https://review.gluster.org/#/c/18437 + +Change-Id: I16cc2e5186a2e8a5e35eca2468b031811e093843 +BUG: 1442983 +Signed-off-by: Gaurav Yadav +Reviewed-on: https://code.engineering.redhat.com/gerrit/123069 +Reviewed-by: Atin Mukherjee +--- + extras/glusterd.vol.in | 1 + + libglusterfs/src/common-utils.h | 2 +- + libglusterfs/src/mem-types.h | 1 + + xlators/mgmt/glusterd/src/glusterd-locks.c | 219 +++++++++++++++++++++++++++-- + xlators/mgmt/glusterd/src/glusterd-locks.h | 13 ++ + xlators/mgmt/glusterd/src/glusterd.c | 28 +++- + xlators/mgmt/glusterd/src/glusterd.h | 2 + + 7 files changed, 246 insertions(+), 20 deletions(-) + +diff --git a/extras/glusterd.vol.in b/extras/glusterd.vol.in +index 0152996..fe413a9 100644 +--- a/extras/glusterd.vol.in ++++ b/extras/glusterd.vol.in +@@ -7,6 +7,7 @@ volume management + option transport.socket.read-fail-log off + option ping-timeout 0 + option event-threads 1 ++# option lock-timer 180 + # option transport.address-family inet6 + # option base-port 49152 + # option max-port 65535 +diff --git a/libglusterfs/src/common-utils.h b/libglusterfs/src/common-utils.h +index e1c5f66..0131070 100644 +--- a/libglusterfs/src/common-utils.h ++++ b/libglusterfs/src/common-utils.h +@@ -102,7 +102,7 @@ void trap (void); + #define GF_CLNT_INSECURE_PORT_CEILING (GF_IANA_PRIV_PORTS_START - 1) + #define GF_PORT_MAX 65535 + #define GF_PORT_ARRAY_SIZE ((GF_PORT_MAX + 7) / 8) +- ++#define GF_LOCK_TIMER 180 + #define GF_MINUTE_IN_SECONDS 60 + #define GF_HOUR_IN_SECONDS (60*60) + #define GF_DAY_IN_SECONDS (24*60*60) +diff --git a/libglusterfs/src/mem-types.h b/libglusterfs/src/mem-types.h +index d244fb5..85cb5d2 100644 +--- a/libglusterfs/src/mem-types.h ++++ b/libglusterfs/src/mem-types.h +@@ -177,6 +177,7 @@ enum gf_common_mem_types_ { + gf_common_mt_pthread_t, + gf_common_ping_local_t, + gf_common_volfile_t, ++ gf_common_mt_mgmt_v3_lock_timer_t, + gf_common_mt_end + }; + #endif +diff --git a/xlators/mgmt/glusterd/src/glusterd-locks.c b/xlators/mgmt/glusterd/src/glusterd-locks.c +index 146092d..bd73b37 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-locks.c ++++ b/xlators/mgmt/glusterd/src/glusterd-locks.c +@@ -94,6 +94,50 @@ glusterd_mgmt_v3_lock_fini () + dict_unref (priv->mgmt_v3_lock); + } + ++/* Initialize the global mgmt_v3_timer lock list(dict) when ++ * glusterd is spawned */ ++int32_t ++glusterd_mgmt_v3_lock_timer_init () ++{ ++ int32_t ret = -1; ++ xlator_t *this = NULL; ++ glusterd_conf_t *priv = NULL; ++ ++ this = THIS; ++ GF_VALIDATE_OR_GOTO ("glusterd", this, out); ++ ++ priv = this->private; ++ GF_VALIDATE_OR_GOTO (this->name, priv, out); ++ ++ priv->mgmt_v3_lock_timer = dict_new (); ++ if (!priv->mgmt_v3_lock_timer) ++ goto out; ++ ++ ret = 0; ++out: ++ return ret; ++} ++ ++/* Destroy the global mgmt_v3_timer lock list(dict) when ++ * glusterd cleanup is performed */ ++void ++glusterd_mgmt_v3_lock_timer_fini () ++{ ++ xlator_t *this = NULL; ++ glusterd_conf_t *priv = NULL; ++ ++ this = THIS; ++ GF_VALIDATE_OR_GOTO ("glusterd", this, out); ++ ++ priv = this->private; ++ GF_VALIDATE_OR_GOTO (this->name, priv, out); ++ ++ if (priv->mgmt_v3_lock_timer) ++ dict_unref (priv->mgmt_v3_lock_timer); ++out: ++ return; ++} ++ + int32_t + glusterd_get_mgmt_v3_lock_owner (char *key, uuid_t *uuid) + { +@@ -513,17 +557,23 @@ int32_t + glusterd_mgmt_v3_lock (const char *name, uuid_t uuid, uint32_t *op_errno, + char *type) + { +- char key[PATH_MAX] = ""; +- int32_t ret = -1; +- glusterd_mgmt_v3_lock_obj *lock_obj = NULL; +- glusterd_conf_t *priv = NULL; +- gf_boolean_t is_valid = _gf_true; +- uuid_t owner = {0}; +- xlator_t *this = NULL; +- char *bt = NULL; ++ char key[PATH_MAX] = ""; ++ int32_t ret = -1; ++ glusterd_mgmt_v3_lock_obj *lock_obj = NULL; ++ glusterd_mgmt_v3_lock_timer *mgmt_lock_timer = NULL; ++ glusterd_conf_t *priv = NULL; ++ gf_boolean_t is_valid = _gf_true; ++ uuid_t owner = {0}; ++ xlator_t *this = NULL; ++ char *bt = NULL; ++ struct timespec delay = {0}; ++ char *key_dup = NULL; ++ glusterfs_ctx_t *mgmt_lock_timer_ctx = NULL; ++ xlator_t *mgmt_lock_timer_xl = NULL; + + this = THIS; + GF_ASSERT (this); ++ + priv = this->private; + GF_ASSERT (priv); + +@@ -594,6 +644,42 @@ glusterd_mgmt_v3_lock (const char *name, uuid_t uuid, uint32_t *op_errno, + goto out; + } + ++ mgmt_lock_timer = GF_CALLOC (1, sizeof(glusterd_mgmt_v3_lock_timer), ++ gf_common_mt_mgmt_v3_lock_timer_t); ++ ++ if (!mgmt_lock_timer) { ++ ret = -1; ++ goto out; ++ } ++ ++ mgmt_lock_timer->xl = THIS; ++ key_dup = gf_strdup (key); ++ delay.tv_sec = priv->mgmt_v3_lock_timeout; ++ delay.tv_nsec = 0; ++ ++ ret = -1; ++ mgmt_lock_timer_xl = mgmt_lock_timer->xl; ++ GF_VALIDATE_OR_GOTO (this->name, mgmt_lock_timer_xl, out); ++ ++ mgmt_lock_timer_ctx = mgmt_lock_timer_xl->ctx; ++ GF_VALIDATE_OR_GOTO (this->name, mgmt_lock_timer_ctx, out); ++ ++ mgmt_lock_timer->timer = gf_timer_call_after ++ (mgmt_lock_timer_ctx, delay, ++ gd_mgmt_v3_unlock_timer_cbk, ++ key_dup); ++ ++ ret = dict_set_bin (priv->mgmt_v3_lock_timer, key, mgmt_lock_timer, ++ sizeof (glusterd_mgmt_v3_lock_timer)); ++ if (ret) { ++ gf_msg (this->name, GF_LOG_ERROR, 0, ++ GD_MSG_DICT_SET_FAILED, ++ "Unable to set timer in mgmt_v3 lock"); ++ GF_FREE (mgmt_lock_timer); ++ goto out; ++ } ++ ++ + /* Saving the backtrace into the pre-allocated buffer, ctx->btbuf*/ + if ((bt = gf_backtrace_save (NULL))) { + snprintf (key, sizeof (key), "debug.last-success-bt-%s-%s", +@@ -617,18 +703,98 @@ out: + return ret; + } + ++/* ++ * This call back will ensure to unlock the lock_obj, in case we hit a situation ++ * where unlocking failed and stale lock exist*/ ++void ++gd_mgmt_v3_unlock_timer_cbk (void *data) ++{ ++ xlator_t *this = NULL; ++ glusterd_conf_t *conf = NULL; ++ glusterd_mgmt_v3_lock_timer *mgmt_lock_timer = NULL; ++ char *key = NULL; ++ char *type = NULL; ++ char bt_key[PATH_MAX] = ""; ++ char name[PATH_MAX] = ""; ++ int32_t ret = -1; ++ glusterfs_ctx_t *mgmt_lock_timer_ctx = NULL; ++ xlator_t *mgmt_lock_timer_xl = NULL; ++ ++ this = THIS; ++ GF_VALIDATE_OR_GOTO ("glusterd", this, out); ++ ++ conf = this->private; ++ GF_VALIDATE_OR_GOTO (this->name, conf, out); ++ ++ gf_log (THIS->name, GF_LOG_INFO, "In gd_mgmt_v3_unlock_timer_cbk"); ++ GF_ASSERT (NULL != data); ++ key = (char *)data; ++ ++ dict_del (conf->mgmt_v3_lock, key); ++ ++ type = strrchr (key, '_'); ++ strncpy (name, key, strlen (key) - strlen (type) - 1); ++ ++ ret = snprintf (bt_key, PATH_MAX, "debug.last-success-bt-%s-%s", ++ name, type + 1); ++ if (ret != strlen ("debug.last-success-bt-") + strlen (name) + ++ strlen (type)) { ++ gf_msg (this->name, GF_LOG_ERROR, 0, ++ GD_MSG_CREATE_KEY_FAIL, "Unable to create backtrace " ++ "key"); ++ goto out; ++ } ++ ++ dict_del (conf->mgmt_v3_lock, bt_key); ++ ++ ret = dict_get_bin (conf->mgmt_v3_lock_timer, key, ++ (void **)&mgmt_lock_timer); ++ if (ret) { ++ gf_msg (this->name, GF_LOG_ERROR, 0, ++ GD_MSG_DICT_SET_FAILED, ++ "Unable to get lock owner in mgmt_v3 lock"); ++ goto out; ++ } ++ ++out: ++ if (mgmt_lock_timer->timer) { ++ mgmt_lock_timer_xl = mgmt_lock_timer->xl; ++ GF_VALIDATE_OR_GOTO (this->name, mgmt_lock_timer_xl, ++ ret_function); ++ ++ mgmt_lock_timer_ctx = mgmt_lock_timer_xl->ctx; ++ GF_VALIDATE_OR_GOTO (this->name, mgmt_lock_timer_ctx, ++ ret_function); ++ ++ gf_timer_call_cancel (mgmt_lock_timer_ctx, ++ mgmt_lock_timer->timer); ++ GF_FREE(key); ++ dict_del (conf->mgmt_v3_lock_timer, bt_key); ++ mgmt_lock_timer->timer = NULL; ++ } ++ ++ret_function: ++ ++ return; ++} ++ + int32_t + glusterd_mgmt_v3_unlock (const char *name, uuid_t uuid, char *type) + { +- char key[PATH_MAX] = ""; +- int32_t ret = -1; +- gf_boolean_t is_valid = _gf_true; +- glusterd_conf_t *priv = NULL; +- uuid_t owner = {0}; +- xlator_t *this = NULL; ++ char key[PATH_MAX] = ""; ++ char key_dup[PATH_MAX] = ""; ++ int32_t ret = -1; ++ gf_boolean_t is_valid = _gf_true; ++ glusterd_conf_t *priv = NULL; ++ glusterd_mgmt_v3_lock_timer *mgmt_lock_timer = NULL; ++ uuid_t owner = {0}; ++ xlator_t *this = NULL; ++ glusterfs_ctx_t *mgmt_lock_timer_ctx = NULL; ++ xlator_t *mgmt_lock_timer_xl = NULL; + + this = THIS; + GF_ASSERT (this); ++ + priv = this->private; + GF_ASSERT (priv); + +@@ -657,6 +823,7 @@ glusterd_mgmt_v3_unlock (const char *name, uuid_t uuid, char *type) + ret = -1; + goto out; + } ++ strncpy (key_dup, key, strlen(key)); + + gf_msg_debug (this->name, 0, + "Trying to release lock of %s %s for %s as %s", +@@ -690,6 +857,15 @@ glusterd_mgmt_v3_unlock (const char *name, uuid_t uuid, char *type) + /* Removing the mgmt_v3 lock from the global list */ + dict_del (priv->mgmt_v3_lock, key); + ++ ret = dict_get_bin (priv->mgmt_v3_lock_timer, key, ++ (void **)&mgmt_lock_timer); ++ if (ret) { ++ gf_msg (this->name, GF_LOG_ERROR, 0, ++ GD_MSG_DICT_SET_FAILED, ++ "Unable to get mgmt lock key in mgmt_v3 lock"); ++ goto out; ++ } ++ + /* Remove the backtrace key as well */ + ret = snprintf (key, sizeof(key), "debug.last-success-bt-%s-%s", name, + type); +@@ -708,7 +884,22 @@ glusterd_mgmt_v3_unlock (const char *name, uuid_t uuid, char *type) + type, name); + + ret = 0; ++ /* Release owner refernce which was held during lock */ ++ if (mgmt_lock_timer->timer) { ++ ret = -1; ++ mgmt_lock_timer_xl = mgmt_lock_timer->xl; ++ GF_VALIDATE_OR_GOTO (this->name, mgmt_lock_timer_xl, out); ++ ++ mgmt_lock_timer_ctx = mgmt_lock_timer_xl->ctx; ++ GF_VALIDATE_OR_GOTO (this->name, mgmt_lock_timer_ctx, out); ++ ret = 0; ++ gf_timer_call_cancel (mgmt_lock_timer_ctx, ++ mgmt_lock_timer->timer); ++ dict_del (priv->mgmt_v3_lock_timer, key_dup); ++ mgmt_lock_timer->timer = NULL; ++ } + out: ++ + gf_msg_trace (this->name, 0, "Returning %d", ret); + return ret; + } +diff --git a/xlators/mgmt/glusterd/src/glusterd-locks.h b/xlators/mgmt/glusterd/src/glusterd-locks.h +index 437053d..226d5c6 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-locks.h ++++ b/xlators/mgmt/glusterd/src/glusterd-locks.h +@@ -14,6 +14,11 @@ typedef struct glusterd_mgmt_v3_lock_object_ { + uuid_t lock_owner; + } glusterd_mgmt_v3_lock_obj; + ++typedef struct glusterd_mgmt_v3_lock_timer_ { ++ gf_timer_t *timer; ++ xlator_t *xl; ++} glusterd_mgmt_v3_lock_timer; ++ + typedef struct glusterd_mgmt_v3_lock_valid_entities { + char *type; /* Entity type like vol, snap */ + gf_boolean_t default_value; /* The default value that * +@@ -29,6 +34,12 @@ void + glusterd_mgmt_v3_lock_fini (); + + int32_t ++glusterd_mgmt_v3_lock_timer_init (); ++ ++void ++glusterd_mgmt_v3_lock_timer_fini (); ++ ++int32_t + glusterd_get_mgmt_v3_lock_owner (char *volname, uuid_t *uuid); + + int32_t +@@ -44,4 +55,6 @@ glusterd_multiple_mgmt_v3_lock (dict_t *dict, uuid_t uuid, uint32_t *op_errno); + int32_t + glusterd_multiple_mgmt_v3_unlock (dict_t *dict, uuid_t uuid); + ++void ++gd_mgmt_v3_unlock_timer_cbk(void *data); + #endif +diff --git a/xlators/mgmt/glusterd/src/glusterd.c b/xlators/mgmt/glusterd/src/glusterd.c +index 6ce4156..ed01b93 100644 +--- a/xlators/mgmt/glusterd/src/glusterd.c ++++ b/xlators/mgmt/glusterd/src/glusterd.c +@@ -1858,14 +1858,22 @@ init (xlator_t *this) + gf_msg (this->name, GF_LOG_INFO, 0, + GD_MSG_DICT_SET_FAILED, + "base-port override: %d", conf->base_port); +- } +- conf->max_port = GF_PORT_MAX; +- if (dict_get_uint32 (this->options, "max-port", +- &conf->max_port) == 0) { ++ } ++ conf->max_port = GF_PORT_MAX; ++ if (dict_get_uint32 (this->options, "max-port", ++ &conf->max_port) == 0) { + gf_msg (this->name, GF_LOG_INFO, 0, + GD_MSG_DICT_SET_FAILED, + "max-port override: %d", conf->max_port); +- } ++ } ++ ++ conf->mgmt_v3_lock_timeout = GF_LOCK_TIMER; ++ if (dict_get_uint32 (this->options, "lock-timer", ++ &conf->mgmt_v3_lock_timeout) == 0) { ++ gf_msg (this->name, GF_LOG_INFO, 0, ++ GD_MSG_DICT_SET_FAILED, ++ "lock-timer override: %d", conf->mgmt_v3_lock_timeout); ++ } + + /* Set option to run bricks on valgrind if enabled in glusterd.vol */ + this->ctx->cmd_args.valgrind = valgrind; +@@ -1891,6 +1899,7 @@ init (xlator_t *this) + + this->private = conf; + glusterd_mgmt_v3_lock_init (); ++ glusterd_mgmt_v3_lock_timer_init(); + glusterd_txn_opinfo_dict_init (); + glusterd_svcs_build (); + +@@ -2048,6 +2057,7 @@ fini (xlator_t *this) + gf_store_handle_destroy (conf->handle); + glusterd_sm_tr_log_delete (&conf->op_sm_log); + glusterd_mgmt_v3_lock_fini (); ++ glusterd_mgmt_v3_lock_timer_fini (); + glusterd_txn_opinfo_dict_fini (); + GF_FREE (conf); + +@@ -2171,6 +2181,14 @@ struct volume_options options[] = { + .max = GF_PORT_MAX, + .description = "Sets the max port for portmap query" + }, ++ { .key = {"mgmt-v3-lock-timeout"}, ++ .type = GF_OPTION_TYPE_INT, ++ .max = 600, ++ .description = "Sets the mgmt-v3-lock-timeout for transactions." ++ "Specifes the default timeout value after which " ++ "lock acquired while performing transaction will " ++ "be released." ++ }, + { .key = {"snap-brick-path"}, + .type = GF_OPTION_TYPE_STR, + .description = "directory where the bricks for the snapshots will be created" +diff --git a/xlators/mgmt/glusterd/src/glusterd.h b/xlators/mgmt/glusterd/src/glusterd.h +index 291f2f7..59b1775 100644 +--- a/xlators/mgmt/glusterd/src/glusterd.h ++++ b/xlators/mgmt/glusterd/src/glusterd.h +@@ -174,6 +174,7 @@ typedef struct { + * cluster with no + * transaction ids */ + ++ dict_t *mgmt_v3_lock_timer; + struct cds_list_head mount_specs; + pthread_t brick_thread; + void *hooks_priv; +@@ -195,6 +196,7 @@ typedef struct { + uint32_t generation; + int32_t workers; + uint32_t blockers; ++ uint32_t mgmt_v3_lock_timeout; + } glusterd_conf_t; + + +-- +1.8.3.1 + diff --git a/SOURCES/0049-Revert-packaging-ganesha-remove-glusterfs-ganesha-su.patch b/SOURCES/0049-Revert-packaging-ganesha-remove-glusterfs-ganesha-su.patch new file mode 100644 index 0000000..c112911 --- /dev/null +++ b/SOURCES/0049-Revert-packaging-ganesha-remove-glusterfs-ganesha-su.patch @@ -0,0 +1,514 @@ +From 2278782dddf80611c7305ed982532647e38b5664 Mon Sep 17 00:00:00 2001 +From: Jiffin Tony Thottan +Date: Mon, 16 Oct 2017 14:18:31 +0530 +Subject: [PATCH 49/74] Revert "packaging: (ganesha) remove glusterfs-ganesha + subpackage and related files)" + +This reverts commit 0cf2963f12a8b540a7042605d8c79f638fdf6cee. + +Change-Id: Id6e7585021bd4dd78a59580cfa4838bdd4e539a0 +Signed-off-by: Jiffin Tony Thottan +--- + configure.ac | 3 + + extras/Makefile.am | 2 +- + extras/ganesha/Makefile.am | 2 + + extras/ganesha/config/Makefile.am | 4 + + extras/ganesha/config/ganesha-ha.conf.sample | 19 ++++ + extras/ganesha/scripts/Makefile.am | 4 + + extras/ganesha/scripts/create-export-ganesha.sh | 91 +++++++++++++++ + extras/ganesha/scripts/dbus-send.sh | 61 +++++++++++ + extras/ganesha/scripts/generate-epoch.py | 48 ++++++++ + extras/hook-scripts/start/post/Makefile.am | 2 +- + extras/hook-scripts/start/post/S31ganesha-start.sh | 122 +++++++++++++++++++++ + glusterfs.spec.in | 10 +- + 12 files changed, 362 insertions(+), 6 deletions(-) + create mode 100644 extras/ganesha/Makefile.am + create mode 100644 extras/ganesha/config/Makefile.am + create mode 100644 extras/ganesha/config/ganesha-ha.conf.sample + create mode 100644 extras/ganesha/scripts/Makefile.am + create mode 100755 extras/ganesha/scripts/create-export-ganesha.sh + create mode 100755 extras/ganesha/scripts/dbus-send.sh + create mode 100755 extras/ganesha/scripts/generate-epoch.py + create mode 100755 extras/hook-scripts/start/post/S31ganesha-start.sh + +diff --git a/configure.ac b/configure.ac +index dfccd40..c8e6e44 100644 +--- a/configure.ac ++++ b/configure.ac +@@ -207,6 +207,9 @@ AC_CONFIG_FILES([Makefile + extras/init.d/glustereventsd-Debian + extras/init.d/glustereventsd-Redhat + extras/init.d/glustereventsd-FreeBSD ++ extras/ganesha/Makefile ++ extras/ganesha/config/Makefile ++ extras/ganesha/scripts/Makefile + extras/systemd/Makefile + extras/systemd/glusterd.service + extras/systemd/glustereventsd.service +diff --git a/extras/Makefile.am b/extras/Makefile.am +index 6863772..2812a4c 100644 +--- a/extras/Makefile.am ++++ b/extras/Makefile.am +@@ -8,7 +8,7 @@ EditorModedir = $(docdir) + EditorMode_DATA = glusterfs-mode.el glusterfs.vim + + SUBDIRS = init.d systemd benchmarking hook-scripts $(OCF_SUBDIR) LinuxRPM \ +- $(GEOREP_EXTRAS_SUBDIR) snap_scheduler firewalld cliutils ++ $(GEOREP_EXTRAS_SUBDIR) ganesha snap_scheduler firewalld cliutils + + confdir = $(sysconfdir)/glusterfs + conf_DATA = glusterfs-logrotate gluster-rsyslog-7.2.conf gluster-rsyslog-5.8.conf \ +diff --git a/extras/ganesha/Makefile.am b/extras/ganesha/Makefile.am +new file mode 100644 +index 0000000..542de68 +--- /dev/null ++++ b/extras/ganesha/Makefile.am +@@ -0,0 +1,2 @@ ++SUBDIRS = scripts config ++CLEANFILES = +diff --git a/extras/ganesha/config/Makefile.am b/extras/ganesha/config/Makefile.am +new file mode 100644 +index 0000000..c729273 +--- /dev/null ++++ b/extras/ganesha/config/Makefile.am +@@ -0,0 +1,4 @@ ++EXTRA_DIST= ganesha-ha.conf.sample ++ ++confdir = $(sysconfdir)/ganesha ++conf_DATA = ganesha-ha.conf.sample +diff --git a/extras/ganesha/config/ganesha-ha.conf.sample b/extras/ganesha/config/ganesha-ha.conf.sample +new file mode 100644 +index 0000000..c22892b +--- /dev/null ++++ b/extras/ganesha/config/ganesha-ha.conf.sample +@@ -0,0 +1,19 @@ ++# Name of the HA cluster created. ++# must be unique within the subnet ++HA_NAME="ganesha-ha-360" ++# ++# N.B. you may use short names or long names; you may not use IP addrs. ++# Once you select one, stay with it as it will be mildly unpleasant to ++# clean up if you switch later on. Ensure that all names - short and/or ++# long - are in DNS or /etc/hosts on all machines in the cluster. ++# ++# The subset of nodes of the Gluster Trusted Pool that form the ganesha ++# HA cluster. Hostname is specified. ++HA_CLUSTER_NODES="server1,server2,..." ++#HA_CLUSTER_NODES="server1.lab.redhat.com,server2.lab.redhat.com,..." ++# ++# Virtual IPs for each of the nodes specified above. ++VIP_server1="10.0.2.1" ++VIP_server2="10.0.2.2" ++#VIP_server1_lab_redhat_com="10.0.2.1" ++#VIP_server2_lab_redhat_com="10.0.2.2" +diff --git a/extras/ganesha/scripts/Makefile.am b/extras/ganesha/scripts/Makefile.am +new file mode 100644 +index 0000000..9ee8867 +--- /dev/null ++++ b/extras/ganesha/scripts/Makefile.am +@@ -0,0 +1,4 @@ ++EXTRA_DIST= create-export-ganesha.sh generate-epoch.py dbus-send.sh ++ ++scriptsdir = $(libexecdir)/ganesha ++scripts_SCRIPTS = create-export-ganesha.sh generate-epoch.py +diff --git a/extras/ganesha/scripts/create-export-ganesha.sh b/extras/ganesha/scripts/create-export-ganesha.sh +new file mode 100755 +index 0000000..1ffba42 +--- /dev/null ++++ b/extras/ganesha/scripts/create-export-ganesha.sh +@@ -0,0 +1,91 @@ ++#!/bin/bash ++ ++#This script is called by glusterd when the user ++#tries to export a volume via NFS-Ganesha. ++#An export file specific to a volume ++#is created in GANESHA_DIR/exports. ++ ++# Try loading the config from any of the distro ++# specific configuration locations ++if [ -f /etc/sysconfig/ganesha ] ++ then ++ . /etc/sysconfig/ganesha ++fi ++if [ -f /etc/conf.d/ganesha ] ++ then ++ . /etc/conf.d/ganesha ++fi ++if [ -f /etc/default/ganesha ] ++ then ++ . /etc/default/ganesha ++fi ++ ++GANESHA_DIR=${1%/} ++OPTION=$2 ++VOL=$3 ++CONF=$GANESHA_DIR"/ganesha.conf" ++declare -i EXPORT_ID ++ ++function check_cmd_status() ++{ ++ if [ "$1" != "0" ] ++ then ++ rm -rf $GANESHA_DIR/exports/export.$VOL.conf ++ sed -i /$VOL.conf/d $CONF ++ exit 1 ++ fi ++} ++ ++ ++if [ ! -d "$GANESHA_DIR/exports" ]; ++ then ++ mkdir $GANESHA_DIR/exports ++ check_cmd_status `echo $?` ++fi ++ ++function write_conf() ++{ ++echo -e "# WARNING : Using Gluster CLI will overwrite manual ++# changes made to this file. To avoid it, edit the ++# file and run ganesha-ha.sh --refresh-config." ++ ++echo "EXPORT{" ++echo " Export_Id = 2;" ++echo " Path = \"/$VOL\";" ++echo " FSAL {" ++echo " name = "GLUSTER";" ++echo " hostname=\"localhost\";" ++echo " volume=\"$VOL\";" ++echo " }" ++echo " Access_type = RW;" ++echo " Disable_ACL = true;" ++echo ' Squash="No_root_squash";' ++echo " Pseudo=\"/$VOL\";" ++echo ' Protocols = "3", "4" ;' ++echo ' Transports = "UDP","TCP";' ++echo ' SecType = "sys";' ++echo " }" ++} ++if [ "$OPTION" = "on" ]; ++then ++ if ! (cat $CONF | grep $VOL.conf\"$ ) ++ then ++ write_conf $@ > $GANESHA_DIR/exports/export.$VOL.conf ++ echo "%include \"$GANESHA_DIR/exports/export.$VOL.conf\"" >> $CONF ++ count=`ls -l $GANESHA_DIR/exports/*.conf | wc -l` ++ if [ "$count" = "1" ] ; then ++ EXPORT_ID=2 ++ else ++ EXPORT_ID=`cat $GANESHA_DIR/.export_added` ++ check_cmd_status `echo $?` ++ EXPORT_ID=EXPORT_ID+1 ++ sed -i s/Export_Id.*/"Export_Id= $EXPORT_ID ;"/ \ ++ $GANESHA_DIR/exports/export.$VOL.conf ++ check_cmd_status `echo $?` ++ fi ++ echo $EXPORT_ID > $GANESHA_DIR/.export_added ++ fi ++else ++ rm -rf $GANESHA_DIR/exports/export.$VOL.conf ++ sed -i /$VOL.conf/d $CONF ++fi +diff --git a/extras/ganesha/scripts/dbus-send.sh b/extras/ganesha/scripts/dbus-send.sh +new file mode 100755 +index 0000000..c071d03 +--- /dev/null ++++ b/extras/ganesha/scripts/dbus-send.sh +@@ -0,0 +1,61 @@ ++#!/bin/bash ++ ++# Try loading the config from any of the distro ++# specific configuration locations ++if [ -f /etc/sysconfig/ganesha ] ++ then ++ . /etc/sysconfig/ganesha ++fi ++if [ -f /etc/conf.d/ganesha ] ++ then ++ . /etc/conf.d/ganesha ++fi ++if [ -f /etc/default/ganesha ] ++ then ++ . /etc/default/ganesha ++fi ++ ++GANESHA_DIR=${1%/} ++OPTION=$2 ++VOL=$3 ++CONF=$GANESHA_DIR"/ganesha.conf" ++ ++function check_cmd_status() ++{ ++ if [ "$1" != "0" ] ++ then ++ logger "dynamic export failed on node :${hostname -s}" ++ fi ++} ++ ++#This function keeps track of export IDs and increments it with every new entry ++function dynamic_export_add() ++{ ++ dbus-send --system \ ++--dest=org.ganesha.nfsd /org/ganesha/nfsd/ExportMgr \ ++org.ganesha.nfsd.exportmgr.AddExport string:$GANESHA_DIR/exports/export.$VOL.conf \ ++string:"EXPORT(Path=/$VOL)" ++ check_cmd_status `echo $?` ++} ++ ++#This function removes an export dynamically(uses the export_id of the export) ++function dynamic_export_remove() ++{ ++ removed_id=`cat $GANESHA_DIR/exports/export.$VOL.conf |\ ++grep Export_Id | awk -F"[=,;]" '{print$2}'| tr -d '[[:space:]]'` ++ dbus-send --print-reply --system \ ++--dest=org.ganesha.nfsd /org/ganesha/nfsd/ExportMgr \ ++org.ganesha.nfsd.exportmgr.RemoveExport uint16:$removed_id ++ check_cmd_status `echo $?` ++} ++ ++if [ "$OPTION" = "on" ]; ++then ++ dynamic_export_add $@ ++fi ++ ++if [ "$OPTION" = "off" ]; ++then ++ dynamic_export_remove $@ ++fi ++ +diff --git a/extras/ganesha/scripts/generate-epoch.py b/extras/ganesha/scripts/generate-epoch.py +new file mode 100755 +index 0000000..5db5e56 +--- /dev/null ++++ b/extras/ganesha/scripts/generate-epoch.py +@@ -0,0 +1,48 @@ ++#!/usr/bin/python ++# ++# Copyright (c) 2016 Red Hat, Inc. ++# This file is part of GlusterFS. ++# ++# This file is licensed to you under your choice of the GNU Lesser ++# General Public License, version 3 or any later version (LGPLv3 or ++# later), or the GNU General Public License, version 2 (GPLv2), in all ++# cases as published by the Free Software Foundation. ++# ++# Generates unique epoch value on each gluster node to be used by ++# nfs-ganesha service on that node. ++# ++# Configure 'EPOCH_EXEC' option to this script path in ++# '/etc/sysconfig/ganesha' file used by nfs-ganesha service. ++# ++# Construct epoch as follows - ++# first 32-bit contains the now() time ++# rest 32-bit value contains the local glusterd node uuid ++ ++import time ++import binascii ++ ++# Calculate the now() time into a 64-bit integer value ++def epoch_now(): ++ epoch_time = int(time.mktime(time.localtime())) << 32 ++ return epoch_time ++ ++# Read glusterd UUID and extract first 32-bit of it ++def epoch_uuid(): ++ file_name = '/var/lib/glusterd/glusterd.info' ++ ++ for line in open(file_name): ++ if "UUID" in line: ++ glusterd_uuid = line.split('=')[1].strip() ++ ++ uuid_bin = binascii.unhexlify(glusterd_uuid.replace("-","")) ++ ++ epoch_uuid = int(uuid_bin.encode('hex'), 32) & 0xFFFF0000 ++ return epoch_uuid ++ ++# Construct epoch as follows - ++# first 32-bit contains the now() time ++# rest 32-bit value contains the local glusterd node uuid ++epoch = (epoch_now() | epoch_uuid()) ++print str(epoch) ++ ++exit(0) +diff --git a/extras/hook-scripts/start/post/Makefile.am b/extras/hook-scripts/start/post/Makefile.am +index 384a582..03bb300 100644 +--- a/extras/hook-scripts/start/post/Makefile.am ++++ b/extras/hook-scripts/start/post/Makefile.am +@@ -1,4 +1,4 @@ +-EXTRA_DIST = S29CTDBsetup.sh S30samba-start.sh ++EXTRA_DIST = S29CTDBsetup.sh S30samba-start.sh S31ganesha-start.sh + + hookdir = $(GLUSTERD_WORKDIR)/hooks/1/start/post/ + hook_SCRIPTS = $(EXTRA_DIST) +diff --git a/extras/hook-scripts/start/post/S31ganesha-start.sh b/extras/hook-scripts/start/post/S31ganesha-start.sh +new file mode 100755 +index 0000000..90ba6bc +--- /dev/null ++++ b/extras/hook-scripts/start/post/S31ganesha-start.sh +@@ -0,0 +1,122 @@ ++#!/bin/bash ++PROGNAME="Sganesha-start" ++OPTSPEC="volname:,gd-workdir:" ++VOL= ++declare -i EXPORT_ID ++ganesha_key="ganesha.enable" ++GANESHA_DIR="/var/run/gluster/shared_storage/nfs-ganesha" ++CONF1="$GANESHA_DIR/ganesha.conf" ++GLUSTERD_WORKDIR= ++ ++function parse_args () ++{ ++ ARGS=$(getopt -l $OPTSPEC -o "o" -name $PROGNAME $@) ++ eval set -- "$ARGS" ++ ++ while true; do ++ case $1 in ++ --volname) ++ shift ++ VOL=$1 ++ ;; ++ --gd-workdir) ++ shift ++ GLUSTERD_WORKDIR=$1 ++ ;; ++ *) ++ shift ++ break ++ ;; ++ esac ++ shift ++ done ++} ++ ++ ++ ++#This function generates a new export entry as export.volume_name.conf ++function write_conf() ++{ ++echo -e "# WARNING : Using Gluster CLI will overwrite manual ++# changes made to this file. To avoid it, edit the ++# file, copy it over to all the NFS-Ganesha nodes ++# and run ganesha-ha.sh --refresh-config." ++ ++echo "EXPORT{" ++echo " Export_Id = 2;" ++echo " Path = \"/$VOL\";" ++echo " FSAL {" ++echo " name = \"GLUSTER\";" ++echo " hostname=\"localhost\";" ++echo " volume=\"$VOL\";" ++echo " }" ++echo " Access_type = RW;" ++echo " Disable_ACL = true;" ++echo " Squash=\"No_root_squash\";" ++echo " Pseudo=\"/$VOL\";" ++echo " Protocols = \"3\", \"4\" ;" ++echo " Transports = \"UDP\",\"TCP\";" ++echo " SecType = \"sys\";" ++echo "}" ++} ++ ++#It adds the export dynamically by sending dbus signals ++function export_add() ++{ ++ dbus-send --print-reply --system --dest=org.ganesha.nfsd \ ++/org/ganesha/nfsd/ExportMgr org.ganesha.nfsd.exportmgr.AddExport \ ++string:$GANESHA_DIR/exports/export.$VOL.conf string:"EXPORT(Export_Id=$EXPORT_ID)" ++ ++} ++ ++# based on src/scripts/ganeshactl/Ganesha/export_mgr.py ++function is_exported() ++{ ++ local volume="${1}" ++ ++ dbus-send --type=method_call --print-reply --system \ ++ --dest=org.ganesha.nfsd /org/ganesha/nfsd/ExportMgr \ ++ org.ganesha.nfsd.exportmgr.ShowExports \ ++ | grep -w -q "/${volume}" ++ ++ return $? ++} ++ ++# Check the info file (contains the volume options) to see if Ganesha is ++# enabled for this volume. ++function ganesha_enabled() ++{ ++ local volume="${1}" ++ local info_file="${GLUSTERD_WORKDIR}/vols/${VOL}/info" ++ local enabled="off" ++ ++ enabled=$(grep -w ${ganesha_key} ${info_file} | cut -d"=" -f2) ++ ++ [ "${enabled}" == "on" ] ++ ++ return $? ++} ++ ++parse_args $@ ++ ++if ganesha_enabled ${VOL} && ! is_exported ${VOL} ++then ++ if [ ! -e ${GANESHA_DIR}/exports/export.${VOL}.conf ] ++ then ++ #Remove export entry from nfs-ganesha.conf ++ sed -i /$VOL.conf/d $CONF1 ++ write_conf ${VOL} > ${GANESHA_DIR}/exports/export.${VOL}.conf ++ EXPORT_ID=`cat $GANESHA_DIR/.export_added` ++ EXPORT_ID=EXPORT_ID+1 ++ echo $EXPORT_ID > $GANESHA_DIR/.export_added ++ sed -i s/Export_Id.*/"Export_Id=$EXPORT_ID;"/ \ ++ $GANESHA_DIR/exports/export.$VOL.conf ++ echo "%include \"$GANESHA_DIR/exports/export.$VOL.conf\"" >> $CONF1 ++ else ++ EXPORT_ID=$(grep ^[[:space:]]*Export_Id $GANESHA_DIR/exports/export.$VOL.conf |\ ++ awk -F"[=,;]" '{print $2}' | tr -d '[[:space:]]') ++ fi ++ export_add $VOL ++fi ++ ++exit 0 +diff --git a/glusterfs.spec.in b/glusterfs.spec.in +index 10339fe..6e710e5 100644 +--- a/glusterfs.spec.in ++++ b/glusterfs.spec.in +@@ -262,7 +262,6 @@ Obsoletes: hekafs + Obsoletes: %{name}-common < %{version}-%{release} + Obsoletes: %{name}-core < %{version}-%{release} + Obsoletes: %{name}-ufo +-Obsoletes: %{name}-ganesha + Provides: %{name}-common = %{version}-%{release} + Provides: %{name}-core = %{version}-%{release} + +@@ -1275,6 +1274,9 @@ exit 0 + + %if ( 0%{?_build_server} ) + %files ganesha ++%{_sysconfdir}/ganesha/* ++%{_libexecdir}/ganesha/* ++%{_sharedstatedir}/glusterd/hooks/1/start/post/S31ganesha-start.sh + %endif + + %if ( 0%{?_build_server} ) +@@ -2121,6 +2123,9 @@ fi + + %changelog + * Mon Nov 13 2017 Jiffin Tony Thottan ++- Adding ganesha bits back in gluster repository #1499784 ++ ++* Mon Nov 13 2017 Jiffin Tony Thottan + - DOWNSTREAM ONLY - revert of 83abcb(gnfs in an optional subpackage) + + * Tue Oct 10 2017 Milind Changire +@@ -2178,9 +2183,6 @@ fi + * Thu Feb 16 2017 Niels de Vos + - Obsolete and Provide python-gluster for upgrading from glusterfs < 3.10 + +-* Tue Feb 7 2017 Kaleb S. KEITHLEY +-- remove ganesha (#1418417) +- + * Wed Feb 1 2017 Poornima G + - Install /var/lib/glusterd/groups/metadata-cache by default + +-- +1.8.3.1 + diff --git a/SOURCES/0050-Revert-glusterd-storhaug-remove-ganesha.patch b/SOURCES/0050-Revert-glusterd-storhaug-remove-ganesha.patch new file mode 100644 index 0000000..8a53d04 --- /dev/null +++ b/SOURCES/0050-Revert-glusterd-storhaug-remove-ganesha.patch @@ -0,0 +1,2012 @@ +From c45cba4e8959cc3224c293423fdc1f33d3e657c8 Mon Sep 17 00:00:00 2001 +From: Jiffin Tony Thottan +Date: Mon, 16 Oct 2017 14:24:29 +0530 +Subject: [PATCH 50/74] Revert "glusterd: (storhaug) remove ganesha" + +This reverts commit 843e1b04b554ab887ec656ae7b468bb93ee4e2f7. + +Change-Id: I06b5450344c33f26da3d94b6f67051d41dfbba17 +Signed-off-by: Jiffin Tony Thottan +--- + cli/src/cli-cmd-global.c | 54 ++ + cli/src/cli-cmd-parser.c | 106 +++ + cli/src/cli-cmd.c | 3 +- + cli/src/cli-rpc-ops.c | 79 ++ + cli/src/cli.h | 3 + + xlators/mgmt/glusterd/src/Makefile.am | 4 +- + xlators/mgmt/glusterd/src/glusterd-errno.h | 2 +- + xlators/mgmt/glusterd/src/glusterd-ganesha.c | 898 +++++++++++++++++++++ + xlators/mgmt/glusterd/src/glusterd-handler.c | 77 ++ + xlators/mgmt/glusterd/src/glusterd-messages.h | 8 + + xlators/mgmt/glusterd/src/glusterd-op-sm.c | 47 ++ + .../mgmt/glusterd/src/glusterd-snapshot-utils.c | 196 +++++ + .../mgmt/glusterd/src/glusterd-snapshot-utils.h | 7 + + xlators/mgmt/glusterd/src/glusterd-snapshot.c | 96 +++ + xlators/mgmt/glusterd/src/glusterd-store.h | 1 + + xlators/mgmt/glusterd/src/glusterd-volume-ops.c | 34 + + xlators/mgmt/glusterd/src/glusterd-volume-set.c | 6 + + xlators/mgmt/glusterd/src/glusterd.h | 16 +- + 18 files changed, 1633 insertions(+), 4 deletions(-) + create mode 100644 xlators/mgmt/glusterd/src/glusterd-ganesha.c + +diff --git a/cli/src/cli-cmd-global.c b/cli/src/cli-cmd-global.c +index 9873192..881506b 100644 +--- a/cli/src/cli-cmd-global.c ++++ b/cli/src/cli-cmd-global.c +@@ -32,6 +32,8 @@ extern rpc_clnt_prog_t *cli_rpc_prog; + int + cli_cmd_global_help_cbk (struct cli_state *state, struct cli_cmd_word *in_word, + const char **words, int wordcount); ++int cli_cmd_ganesha_cbk (struct cli_state *state, struct cli_cmd_word *word, ++ const char **words, int wordcount); + int + cli_cmd_get_state_cbk (struct cli_state *state, struct cli_cmd_word *word, + const char **words, int wordcount); +@@ -46,6 +48,10 @@ struct cli_cmd global_cmds[] = { + cli_cmd_get_state_cbk, + "Get local state representation of mentioned daemon", + }, ++ { "nfs-ganesha {enable| disable} ", ++ cli_cmd_ganesha_cbk, ++ "Enable/disable NFS-Ganesha support", ++ }, + {NULL, NULL, NULL} + }; + +@@ -86,6 +92,54 @@ out: + + } + ++int cli_cmd_ganesha_cbk (struct cli_state *state, struct cli_cmd_word *word, ++ const char **words, int wordcount) ++ ++{ ++ int sent = 0; ++ int parse_error = 0; ++ int ret = -1; ++ rpc_clnt_procedure_t *proc = NULL; ++ call_frame_t *frame = NULL; ++ dict_t *options = NULL; ++ cli_local_t *local = NULL; ++ char *op_errstr = NULL; ++ ++ proc = &cli_rpc_prog->proctable[GLUSTER_CLI_GANESHA]; ++ ++ frame = create_frame (THIS, THIS->ctx->pool); ++ if (!frame) ++ goto out; ++ ++ ret = cli_cmd_ganesha_parse (state, words, wordcount, ++ &options, &op_errstr); ++ if (ret) { ++ if (op_errstr) { ++ cli_err ("%s", op_errstr); ++ GF_FREE (op_errstr); ++ } else ++ cli_usage_out (word->pattern); ++ parse_error = 1; ++ goto out; ++ } ++ ++ CLI_LOCAL_INIT (local, words, frame, options); ++ ++ if (proc->fn) { ++ ret = proc->fn (frame, THIS, options); ++ } ++ ++out: ++ if (ret) { ++ cli_cmd_sent_status_get (&sent); ++ if ((sent == 0) && (parse_error == 0)) ++ cli_out ("Setting global option failed"); ++ } ++ ++ CLI_STACK_DESTROY (frame); ++ return ret; ++} ++ + int + cli_cmd_get_state_cbk (struct cli_state *state, struct cli_cmd_word *word, + const char **words, int wordcount) +diff --git a/cli/src/cli-cmd-parser.c b/cli/src/cli-cmd-parser.c +index 216e050..a4c601b 100644 +--- a/cli/src/cli-cmd-parser.c ++++ b/cli/src/cli-cmd-parser.c +@@ -840,6 +840,112 @@ out: + return ret; + } + ++/* Parsing global option for NFS-Ganesha config ++ * gluster nfs-ganesha enable/disable */ ++ ++int32_t ++cli_cmd_ganesha_parse (struct cli_state *state, ++ const char **words, int wordcount, ++ dict_t **options, char **op_errstr) ++{ ++ dict_t *dict = NULL; ++ int ret = -1; ++ char *key = NULL; ++ char *value = NULL; ++ char *w = NULL; ++ char *opwords[] = { "enable", "disable", NULL }; ++ const char *question = NULL; ++ gf_answer_t answer = GF_ANSWER_NO; ++ ++ ++ GF_ASSERT (words); ++ GF_ASSERT (options); ++ ++ dict = dict_new (); ++ ++ if (!dict) ++ goto out; ++ ++ if (wordcount != 2) ++ goto out; ++ ++ key = (char *) words[0]; ++ value = (char *) words[1]; ++ ++ if (!key || !value) { ++ cli_out ("Usage : nfs-ganesha "); ++ ret = -1; ++ goto out; ++ } ++ ++ ret = gf_strip_whitespace (value, strlen (value)); ++ if (ret == -1) ++ goto out; ++ ++ if (strcmp (key, "nfs-ganesha")) { ++ gf_asprintf (op_errstr, "Global option: error: ' %s '" ++ "is not a valid global option.", key); ++ ret = -1; ++ goto out; ++ } ++ ++ w = str_getunamb (value, opwords); ++ if (!w) { ++ cli_out ("Invalid global option \n" ++ "Usage : nfs-ganesha "); ++ ret = -1; ++ goto out; ++ } ++ ++ question = "Enabling NFS-Ganesha requires Gluster-NFS to be" ++ " disabled across the trusted pool. Do you " ++ "still want to continue?\n"; ++ ++ if (strcmp (value, "enable") == 0) { ++ answer = cli_cmd_get_confirmation (state, question); ++ if (GF_ANSWER_NO == answer) { ++ gf_log ("cli", GF_LOG_ERROR, "Global operation " ++ "cancelled, exiting"); ++ ret = -1; ++ goto out; ++ } ++ } ++ cli_out ("This will take a few minutes to complete. Please wait .."); ++ ++ ret = dict_set_str (dict, "key", key); ++ if (ret) { ++ gf_log (THIS->name, GF_LOG_ERROR, "dict set on key failed"); ++ goto out; ++ } ++ ++ ret = dict_set_str (dict, "value", value); ++ if (ret) { ++ gf_log (THIS->name, GF_LOG_ERROR, "dict set on value failed"); ++ goto out; ++ } ++ ++ ret = dict_set_str (dict, "globalname", "All"); ++ if (ret) { ++ gf_log (THIS->name, GF_LOG_ERROR, "dict set on global" ++ " key failed."); ++ goto out; ++ } ++ ++ ret = dict_set_int32 (dict, "hold_global_locks", _gf_true); ++ if (ret) { ++ gf_log (THIS->name, GF_LOG_ERROR, "dict set on global key " ++ "failed."); ++ goto out; ++ } ++ ++ *options = dict; ++out: ++ if (ret) ++ dict_unref (dict); ++ ++ return ret; ++} ++ + int32_t + cli_cmd_get_state_parse (struct cli_state *state, + const char **words, int wordcount, +diff --git a/cli/src/cli-cmd.c b/cli/src/cli-cmd.c +index 236009b..8a75041 100644 +--- a/cli/src/cli-cmd.c ++++ b/cli/src/cli-cmd.c +@@ -369,7 +369,8 @@ cli_cmd_submit (struct rpc_clnt* rpc, void *req, call_frame_t *frame, + unsigned timeout = 0; + + if ((GLUSTER_CLI_PROFILE_VOLUME == procnum) || +- (GLUSTER_CLI_HEAL_VOLUME == procnum)) ++ (GLUSTER_CLI_HEAL_VOLUME == procnum) || ++ (GLUSTER_CLI_GANESHA == procnum)) + timeout = cli_ten_minutes_timeout; + else + timeout = cli_default_conn_timeout; +diff --git a/cli/src/cli-rpc-ops.c b/cli/src/cli-rpc-ops.c +index eb1ca77..67e29a0 100644 +--- a/cli/src/cli-rpc-ops.c ++++ b/cli/src/cli-rpc-ops.c +@@ -2232,6 +2232,60 @@ out: + return ret; + } + ++int ++gf_cli_ganesha_cbk (struct rpc_req *req, struct iovec *iov, ++ int count, void *myframe) ++{ ++ gf_cli_rsp rsp = {0,}; ++ int ret = -1; ++ dict_t *dict = NULL; ++ ++ GF_ASSERT (myframe); ++ ++ if (-1 == req->rpc_status) { ++ goto out; ++ } ++ ++ ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp); ++ if (ret < 0) { ++ gf_log (((call_frame_t *) myframe)->this->name, GF_LOG_ERROR, ++ "Failed to decode xdr response"); ++ goto out; ++ } ++ ++ gf_log ("cli", GF_LOG_DEBUG, "Received resp to ganesha"); ++ ++ dict = dict_new (); ++ ++ if (!dict) { ++ ret = -1; ++ goto out; ++ } ++ ++ ret = dict_unserialize (rsp.dict.dict_val, rsp.dict.dict_len, &dict); ++ if (ret) ++ goto out; ++ ++ if (rsp.op_ret) { ++ if (strcmp (rsp.op_errstr, "")) ++ cli_err ("nfs-ganesha: failed: %s", rsp.op_errstr); ++ else ++ cli_err ("nfs-ganesha: failed"); ++ } ++ ++ else { ++ cli_out("nfs-ganesha : success "); ++ } ++ ++ ret = rsp.op_ret; ++ ++out: ++ if (dict) ++ dict_unref (dict); ++ cli_cmd_broadcast_response (ret); ++ return ret; ++} ++ + char * + is_server_debug_xlator (void *myframe) + { +@@ -4840,6 +4894,30 @@ out: + } + + int32_t ++gf_cli_ganesha (call_frame_t *frame, xlator_t *this, void *data) ++{ ++ gf_cli_req req = { {0,} } ; ++ int ret = 0; ++ dict_t *dict = NULL; ++ ++ if (!frame || !this || !data) { ++ ret = -1; ++ goto out; ++ } ++ ++ dict = data; ++ ++ ret = cli_to_glusterd (&req, frame, gf_cli_ganesha_cbk, ++ (xdrproc_t) xdr_gf_cli_req, dict, ++ GLUSTER_CLI_GANESHA, this, cli_rpc_prog, ++ NULL); ++out: ++ gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret); ++ ++ return ret; ++} ++ ++int32_t + gf_cli_set_volume (call_frame_t *frame, xlator_t *this, + void *data) + { +@@ -12008,6 +12086,7 @@ struct rpc_clnt_procedure gluster_cli_actors[GLUSTER_CLI_MAXVALUE] = { + [GLUSTER_CLI_SYS_EXEC] = {"SYS_EXEC", gf_cli_sys_exec}, + [GLUSTER_CLI_SNAP] = {"SNAP", gf_cli_snapshot}, + [GLUSTER_CLI_BARRIER_VOLUME] = {"BARRIER VOLUME", gf_cli_barrier_volume}, ++ [GLUSTER_CLI_GANESHA] = {"GANESHA", gf_cli_ganesha}, + [GLUSTER_CLI_GET_VOL_OPT] = {"GET_VOL_OPT", gf_cli_get_vol_opt}, + [GLUSTER_CLI_BITROT] = {"BITROT", gf_cli_bitrot}, + [GLUSTER_CLI_ATTACH_TIER] = {"ATTACH_TIER", gf_cli_attach_tier}, +diff --git a/cli/src/cli.h b/cli/src/cli.h +index 68dcb8c..c9bf93d 100644 +--- a/cli/src/cli.h ++++ b/cli/src/cli.h +@@ -255,6 +255,9 @@ cli_cmd_bitrot_parse (const char **words, int wordcount, dict_t **opt); + int32_t + cli_cmd_volume_set_parse (struct cli_state *state, const char **words, + int wordcount, dict_t **options, char **op_errstr); ++int32_t ++cli_cmd_ganesha_parse (struct cli_state *state, const char **words, ++ int wordcount, dict_t **options, char **op_errstr); + + int32_t + cli_cmd_get_state_parse (struct cli_state *state, const char **words, +diff --git a/xlators/mgmt/glusterd/src/Makefile.am b/xlators/mgmt/glusterd/src/Makefile.am +index 4858dee..23ebf37 100644 +--- a/xlators/mgmt/glusterd/src/Makefile.am ++++ b/xlators/mgmt/glusterd/src/Makefile.am +@@ -5,7 +5,7 @@ glusterd_la_LDFLAGS = -module $(GF_XLATOR_DEFAULT_LDFLAGS) + glusterd_la_SOURCES = glusterd.c glusterd-handler.c glusterd-sm.c \ + glusterd-op-sm.c glusterd-utils.c glusterd-rpc-ops.c \ + glusterd-store.c glusterd-handshake.c glusterd-pmap.c \ +- glusterd-volgen.c glusterd-rebalance.c \ ++ glusterd-volgen.c glusterd-rebalance.c glusterd-ganesha.c \ + glusterd-quota.c glusterd-bitrot.c glusterd-geo-rep.c \ + glusterd-replace-brick.c glusterd-log-ops.c glusterd-tier.c \ + glusterd-volume-ops.c glusterd-brick-ops.c glusterd-mountbroker.c \ +@@ -48,6 +48,8 @@ AM_CPPFLAGS = $(GF_CPPFLAGS) -I$(top_srcdir)/libglusterfs/src \ + -I$(CONTRIBDIR)/mount -I$(CONTRIBDIR)/userspace-rcu \ + -DSBIN_DIR=\"$(sbindir)\" -DDATADIR=\"$(localstatedir)\" \ + -DGSYNCD_PREFIX=\"$(GLUSTERFS_LIBEXECDIR)\" \ ++ -DCONFDIR=\"$(localstatedir)/run/gluster/shared_storage/nfs-ganesha\" \ ++ -DGANESHA_PREFIX=\"$(libexecdir)/ganesha\" \ + -DSYNCDAEMON_COMPILE=$(SYNCDAEMON_COMPILE) $(XML_CPPFLAGS) + + +diff --git a/xlators/mgmt/glusterd/src/glusterd-errno.h b/xlators/mgmt/glusterd/src/glusterd-errno.h +index bfb56b5..3301e44 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-errno.h ++++ b/xlators/mgmt/glusterd/src/glusterd-errno.h +@@ -27,7 +27,7 @@ enum glusterd_op_errno { + EG_ISSNAP = 30813, /* Volume is a snap volume */ + EG_GEOREPRUN = 30814, /* Geo-Replication is running */ + EG_NOTTHINP = 30815, /* Bricks are not thinly provisioned */ +- EG_NOGANESHA = 30816, /* obsolete ganesha is not enabled */ ++ EG_NOGANESHA = 30816, /* Global nfs-ganesha is not enabled */ + }; + + #endif +diff --git a/xlators/mgmt/glusterd/src/glusterd-ganesha.c b/xlators/mgmt/glusterd/src/glusterd-ganesha.c +new file mode 100644 +index 0000000..4346bad +--- /dev/null ++++ b/xlators/mgmt/glusterd/src/glusterd-ganesha.c +@@ -0,0 +1,898 @@ ++/* ++ Copyright (c) 2015 Red Hat, Inc. ++ This file is part of GlusterFS. ++ ++ This file is licensed to you under your choice of the GNU Lesser ++ General Public License, version 3 or any later version (LGPLv3 or ++ later), or the GNU General Public License, version 2 (GPLv2), in all ++ cases as published by the Free Software Foundation. ++*/ ++ ++ ++ ++#include "common-utils.h" ++#include "glusterd.h" ++#include "glusterd-op-sm.h" ++#include "glusterd-store.h" ++#include "glusterd-utils.h" ++#include "glusterd-nfs-svc.h" ++#include "glusterd-volgen.h" ++#include "glusterd-messages.h" ++#include "syscall.h" ++ ++#include ++ ++int start_ganesha (char **op_errstr); ++ ++ ++typedef struct service_command { ++ char *binary; ++ char *service; ++ int (*action) (struct service_command *, char *); ++} service_command; ++ ++/* parsing_ganesha_ha_conf will allocate the returned string ++ * to be freed (GF_FREE) by the caller ++ * return NULL if error or not found */ ++static char* ++parsing_ganesha_ha_conf(const char *key) { ++#define MAX_LINE 1024 ++ char scratch[MAX_LINE * 2] = {0,}; ++ char *value = NULL, *pointer = NULL, *end_pointer = NULL; ++ FILE *fp; ++ ++ fp = fopen (GANESHA_HA_CONF, "r"); ++ if (fp == NULL) { ++ gf_msg (THIS->name, GF_LOG_ERROR, errno, ++ GD_MSG_FILE_OP_FAILED, "couldn't open the file %s", ++ GANESHA_HA_CONF); ++ goto end_ret; ++ } ++ while ((pointer = fgets (scratch, MAX_LINE, fp)) != NULL) { ++ /* Read config file until we get matching "^[[:space:]]*key" */ ++ if (*pointer == '#') { ++ continue; ++ } ++ while (isblank(*pointer)) { ++ pointer++; ++ } ++ if (strncmp (pointer, key, strlen (key))) { ++ continue; ++ } ++ pointer += strlen (key); ++ /* key found : if we fail to parse, we'll return an error ++ * rather than trying next one ++ * - supposition : conf file is bash compatible : no space ++ * around the '=' */ ++ if (*pointer != '=') { ++ gf_msg (THIS->name, GF_LOG_ERROR, errno, ++ GD_MSG_GET_CONFIG_INFO_FAILED, ++ "Parsing %s failed at key %s", ++ GANESHA_HA_CONF, key); ++ goto end_close; ++ } ++ pointer++; /* jump the '=' */ ++ ++ if (*pointer == '"' || *pointer == '\'') { ++ /* dont get the quote */ ++ pointer++; ++ } ++ end_pointer = pointer; ++ /* stop at the next closing quote or blank/newline */ ++ do { ++ end_pointer++; ++ } while (!(*end_pointer == '\'' || *end_pointer == '"' || ++ isspace(*end_pointer) || *end_pointer == '\0')); ++ *end_pointer = '\0'; ++ ++ /* got it. copy it and return */ ++ value = gf_strdup (pointer); ++ break; ++ } ++ ++end_close: ++ fclose(fp); ++end_ret: ++ return value; ++} ++ ++static int ++sc_systemctl_action (struct service_command *sc, char *command) ++{ ++ runner_t runner = {0,}; ++ ++ runinit (&runner); ++ runner_add_args (&runner, sc->binary, command, sc->service, NULL); ++ return runner_run (&runner); ++} ++ ++static int ++sc_service_action (struct service_command *sc, char *command) ++{ ++ runner_t runner = {0,}; ++ ++ runinit (&runner); ++ runner_add_args (&runner, sc->binary, sc->service, command, NULL); ++ return runner_run (&runner); ++} ++ ++static int ++manage_service (char *action) ++{ ++ struct stat stbuf = {0,}; ++ int i = 0; ++ int ret = 0; ++ struct service_command sc_list[] = { ++ { .binary = "/usr/bin/systemctl", ++ .service = "nfs-ganesha", ++ .action = sc_systemctl_action ++ }, ++ { .binary = "/sbin/invoke-rc.d", ++ .service = "nfs-ganesha", ++ .action = sc_service_action ++ }, ++ { .binary = "/sbin/service", ++ .service = "nfs-ganesha", ++ .action = sc_service_action ++ }, ++ { .binary = NULL ++ } ++ }; ++ ++ while (sc_list[i].binary != NULL) { ++ ret = sys_stat (sc_list[i].binary, &stbuf); ++ if (ret == 0) { ++ gf_msg_debug (THIS->name, 0, ++ "%s found.", sc_list[i].binary); ++ if (strcmp (sc_list[i].binary, "/usr/bin/systemctl") == 0) ++ ret = sc_systemctl_action (&sc_list[i], action); ++ else ++ ret = sc_service_action (&sc_list[i], action); ++ ++ return ret; ++ } ++ i++; ++ } ++ gf_msg (THIS->name, GF_LOG_ERROR, 0, ++ GD_MSG_UNRECOGNIZED_SVC_MNGR, ++ "Could not %s NFS-Ganesha.Service manager for distro" ++ " not recognized.", action); ++ return ret; ++} ++ ++/* ++ * Check if the cluster is a ganesha cluster or not * ++ */ ++gf_boolean_t ++glusterd_is_ganesha_cluster () { ++ int ret = -1; ++ glusterd_conf_t *priv = NULL; ++ xlator_t *this = NULL; ++ gf_boolean_t ret_bool = _gf_false; ++ ++ this = THIS; ++ GF_VALIDATE_OR_GOTO ("ganesha", this, out); ++ priv = this->private; ++ GF_VALIDATE_OR_GOTO (this->name, priv, out); ++ ++ ret = dict_get_str_boolean (priv->opts, ++ GLUSTERD_STORE_KEY_GANESHA_GLOBAL, ++ _gf_false); ++ if (ret == _gf_true) { ++ ret_bool = _gf_true; ++ gf_msg_debug (this->name, 0, ++ "nfs-ganesha is enabled for the cluster"); ++ } else ++ gf_msg_debug (this->name, 0, ++ "nfs-ganesha is disabled for the cluster"); ++ ++out: ++ return ret_bool; ++ ++} ++ ++/* Check if ganesha.enable is set to 'on', that checks if ++ * a particular volume is exported via NFS-Ganesha */ ++gf_boolean_t ++glusterd_check_ganesha_export (glusterd_volinfo_t *volinfo) { ++ ++ char *value = NULL; ++ gf_boolean_t is_exported = _gf_false; ++ int ret = 0; ++ ++ ret = glusterd_volinfo_get (volinfo, "ganesha.enable", &value); ++ if ((ret == 0) && value) { ++ if (strcmp (value, "on") == 0) { ++ gf_msg_debug (THIS->name, 0, "ganesha.enable set" ++ " to %s", value); ++ is_exported = _gf_true; ++ } ++ } ++ return is_exported; ++} ++ ++/* * ++ * The below function is called as part of commit phase for volume set option ++ * "ganesha.enable". If the value is "on", it creates export configuration file ++ * and then export the volume via dbus command. Incase of "off", the volume ++ * will be already unexported during stage phase, so it will remove the conf ++ * file from shared storage ++ */ ++int ++glusterd_check_ganesha_cmd (char *key, char *value, char **errstr, dict_t *dict) ++{ ++ int ret = 0; ++ char *volname = NULL; ++ ++ GF_ASSERT (key); ++ GF_ASSERT (value); ++ GF_ASSERT (dict); ++ ++ if ((strcmp (key, "ganesha.enable") == 0)) { ++ if ((strcmp (value, "on")) && (strcmp (value, "off"))) { ++ gf_asprintf (errstr, "Invalid value" ++ " for volume set command. Use on/off only."); ++ ret = -1; ++ goto out; ++ } ++ if (strcmp (value, "on") == 0) { ++ ret = glusterd_handle_ganesha_op (dict, errstr, key, ++ value); ++ ++ } else if (is_origin_glusterd (dict)) { ++ ret = dict_get_str (dict, "volname", &volname); ++ if (ret) { ++ gf_msg ("glusterd-ganesha", GF_LOG_ERROR, errno, ++ GD_MSG_DICT_GET_FAILED, ++ "Unable to get volume name"); ++ goto out; ++ } ++ ret = manage_export_config (volname, "off", errstr); ++ } ++ } ++out: ++ if (ret) { ++ gf_msg ("glusterd-ganesha", GF_LOG_ERROR, 0, ++ GD_MSG_NFS_GNS_OP_HANDLE_FAIL, ++ "Handling NFS-Ganesha" ++ " op failed."); ++ } ++ return ret; ++} ++ ++int ++glusterd_op_stage_set_ganesha (dict_t *dict, char **op_errstr) ++{ ++ int ret = -1; ++ int value = -1; ++ gf_boolean_t option = _gf_false; ++ char *str = NULL; ++ glusterd_conf_t *priv = NULL; ++ xlator_t *this = NULL; ++ ++ GF_ASSERT (dict); ++ this = THIS; ++ GF_ASSERT (this); ++ priv = this->private; ++ GF_ASSERT (priv); ++ ++ value = dict_get_str_boolean (dict, "value", _gf_false); ++ if (value == -1) { ++ gf_msg (this->name, GF_LOG_ERROR, errno, ++ GD_MSG_DICT_GET_FAILED, ++ "value not present."); ++ goto out; ++ } ++ /* This dict_get will fail if the user had never set the key before */ ++ /*Ignoring the ret value and proceeding */ ++ ret = dict_get_str (priv->opts, GLUSTERD_STORE_KEY_GANESHA_GLOBAL, &str); ++ if (ret == -1) { ++ gf_msg (this->name, GF_LOG_WARNING, errno, ++ GD_MSG_DICT_GET_FAILED, "Global dict not present."); ++ ret = 0; ++ goto out; ++ } ++ /* Validity of the value is already checked */ ++ ret = gf_string2boolean (str, &option); ++ /* Check if the feature is already enabled, fail in that case */ ++ if (value == option) { ++ gf_asprintf (op_errstr, "nfs-ganesha is already %sd.", str); ++ ret = -1; ++ goto out; ++ } ++ ++ if (value) { ++ ret = start_ganesha (op_errstr); ++ if (ret) { ++ gf_msg (THIS->name, GF_LOG_ERROR, 0, ++ GD_MSG_NFS_GNS_START_FAIL, ++ "Could not start NFS-Ganesha"); ++ ++ } ++ } else { ++ ret = stop_ganesha (op_errstr); ++ if (ret) ++ gf_msg_debug (THIS->name, 0, "Could not stop " ++ "NFS-Ganesha."); ++ } ++ ++out: ++ ++ if (ret) { ++ if (!(*op_errstr)) { ++ *op_errstr = gf_strdup ("Error, Validation Failed"); ++ gf_msg_debug (this->name, 0, ++ "Error, Cannot Validate option :%s", ++ GLUSTERD_STORE_KEY_GANESHA_GLOBAL); ++ } else { ++ gf_msg_debug (this->name, 0, ++ "Error, Cannot Validate option"); ++ } ++ } ++ return ret; ++} ++ ++int ++glusterd_op_set_ganesha (dict_t *dict, char **errstr) ++{ ++ int ret = 0; ++ xlator_t *this = NULL; ++ glusterd_conf_t *priv = NULL; ++ char *key = NULL; ++ char *value = NULL; ++ char *next_version = NULL; ++ ++ this = THIS; ++ GF_ASSERT (this); ++ GF_ASSERT (dict); ++ ++ priv = this->private; ++ GF_ASSERT (priv); ++ ++ ++ ret = dict_get_str (dict, "key", &key); ++ if (ret) { ++ gf_msg (this->name, GF_LOG_ERROR, errno, ++ GD_MSG_DICT_GET_FAILED, ++ "Couldn't get key in global option set"); ++ goto out; ++ } ++ ++ ret = dict_get_str (dict, "value", &value); ++ if (ret) { ++ gf_msg (this->name, GF_LOG_ERROR, errno, ++ GD_MSG_DICT_GET_FAILED, ++ "Couldn't get value in global option set"); ++ goto out; ++ } ++ ++ ret = glusterd_handle_ganesha_op (dict, errstr, key, value); ++ if (ret) { ++ gf_msg (this->name, GF_LOG_ERROR, 0, ++ GD_MSG_NFS_GNS_SETUP_FAIL, ++ "Initial NFS-Ganesha set up failed"); ++ ret = -1; ++ goto out; ++ } ++ ret = dict_set_dynstr_with_alloc (priv->opts, ++ GLUSTERD_STORE_KEY_GANESHA_GLOBAL, ++ value); ++ if (ret) { ++ gf_msg (this->name, GF_LOG_WARNING, errno, ++ GD_MSG_DICT_SET_FAILED, "Failed to set" ++ " nfs-ganesha in dict."); ++ goto out; ++ } ++ ret = glusterd_get_next_global_opt_version_str (priv->opts, ++ &next_version); ++ if (ret) { ++ gf_msg_debug (THIS->name, 0, "Could not fetch " ++ " global op version"); ++ goto out; ++ } ++ ret = dict_set_str (priv->opts, GLUSTERD_GLOBAL_OPT_VERSION, ++ next_version); ++ if (ret) ++ goto out; ++ ++ ret = glusterd_store_options (this, priv->opts); ++ if (ret) { ++ gf_msg (this->name, GF_LOG_ERROR, 0, ++ GD_MSG_STORE_FAIL, "Failed to store options"); ++ goto out; ++ } ++ ++out: ++ gf_msg_debug (this->name, 0, "returning %d", ret); ++ return ret; ++} ++ ++/* Following function parse GANESHA_HA_CONF ++ * The sample file looks like below, ++ * HA_NAME="ganesha-ha-360" ++ * HA_VOL_NAME="ha-state" ++ * HA_CLUSTER_NODES="server1,server2" ++ * VIP_rhs_1="10.x.x.x" ++ * VIP_rhs_2="10.x.x.x." */ ++ ++/* Check if the localhost is listed as one of nfs-ganesha nodes */ ++gf_boolean_t ++check_host_list (void) ++{ ++ ++ glusterd_conf_t *priv = NULL; ++ char *hostname, *hostlist; ++ gf_boolean_t ret = _gf_false; ++ xlator_t *this = NULL; ++ ++ this = THIS; ++ priv = THIS->private; ++ GF_ASSERT (priv); ++ ++ hostlist = parsing_ganesha_ha_conf ("HA_CLUSTER_NODES"); ++ if (hostlist == NULL) { ++ gf_msg (this->name, GF_LOG_INFO, errno, ++ GD_MSG_GET_CONFIG_INFO_FAILED, ++ "couldn't get HA_CLUSTER_NODES from file %s", ++ GANESHA_HA_CONF); ++ return _gf_false; ++ } ++ ++ /* Hostlist is a comma separated list now */ ++ hostname = strtok (hostlist, ","); ++ while (hostname != NULL) { ++ ret = gf_is_local_addr (hostname); ++ if (ret) { ++ gf_msg (this->name, GF_LOG_INFO, 0, ++ GD_MSG_NFS_GNS_HOST_FOUND, ++ "ganesha host found " ++ "Hostname is %s", hostname); ++ break; ++ } ++ hostname = strtok (NULL, ","); ++ } ++ ++ GF_FREE (hostlist); ++ return ret; ++ ++} ++ ++int ++manage_export_config (char *volname, char *value, char **op_errstr) ++{ ++ runner_t runner = {0,}; ++ int ret = -1; ++ ++ GF_ASSERT(volname); ++ runinit (&runner); ++ runner_add_args (&runner, "sh", ++ GANESHA_PREFIX"/create-export-ganesha.sh", ++ CONFDIR, value, volname, NULL); ++ ret = runner_run(&runner); ++ ++ if (ret) ++ gf_asprintf (op_errstr, "Failed to create" ++ " NFS-Ganesha export config file."); ++ ++ return ret; ++} ++ ++/* Exports and unexports a particular volume via NFS-Ganesha */ ++int ++ganesha_manage_export (dict_t *dict, char *value, char **op_errstr) ++{ ++ runner_t runner = {0,}; ++ int ret = -1; ++ glusterd_volinfo_t *volinfo = NULL; ++ dict_t *vol_opts = NULL; ++ char *volname = NULL; ++ xlator_t *this = NULL; ++ glusterd_conf_t *priv = NULL; ++ gf_boolean_t option = _gf_false; ++ ++ runinit (&runner); ++ this = THIS; ++ GF_ASSERT (this); ++ priv = this->private; ++ ++ GF_ASSERT (value); ++ GF_ASSERT (dict); ++ GF_ASSERT (priv); ++ ++ ret = dict_get_str (dict, "volname", &volname); ++ if (ret) { ++ gf_msg (this->name, GF_LOG_ERROR, errno, ++ GD_MSG_DICT_GET_FAILED, ++ "Unable to get volume name"); ++ goto out; ++ } ++ ret = gf_string2boolean (value, &option); ++ if (ret == -1) { ++ gf_msg (this->name, GF_LOG_ERROR, EINVAL, ++ GD_MSG_INVALID_ENTRY, "invalid value."); ++ goto out; ++ } ++ ++ ret = glusterd_volinfo_find (volname, &volinfo); ++ if (ret) { ++ gf_msg (this->name, GF_LOG_ERROR, EINVAL, ++ GD_MSG_VOL_NOT_FOUND, ++ FMTSTR_CHECK_VOL_EXISTS, volname); ++ goto out; ++ } ++ ++ ret = glusterd_check_ganesha_export (volinfo); ++ if (ret && option) { ++ gf_asprintf (op_errstr, "ganesha.enable " ++ "is already 'on'."); ++ ret = -1; ++ goto out; ++ ++ } else if (!option && !ret) { ++ gf_asprintf (op_errstr, "ganesha.enable " ++ "is already 'off'."); ++ ret = -1; ++ goto out; ++ } ++ ++ /* Check if global option is enabled, proceed only then */ ++ ret = dict_get_str_boolean (priv->opts, ++ GLUSTERD_STORE_KEY_GANESHA_GLOBAL, _gf_false); ++ if (ret == -1) { ++ gf_msg_debug (this->name, 0, "Failed to get " ++ "global option dict."); ++ gf_asprintf (op_errstr, "The option " ++ "nfs-ganesha should be " ++ "enabled before setting ganesha.enable."); ++ goto out; ++ } ++ if (!ret) { ++ gf_asprintf (op_errstr, "The option " ++ "nfs-ganesha should be " ++ "enabled before setting ganesha.enable."); ++ ret = -1; ++ goto out; ++ } ++ ++ /* * ++ * Create the export file from the node where ganesha.enable "on" ++ * is executed ++ * */ ++ if (option) { ++ ret = manage_export_config (volname, "on", op_errstr); ++ if (ret) { ++ gf_msg (this->name, GF_LOG_ERROR, 0, ++ GD_MSG_EXPORT_FILE_CREATE_FAIL, ++ "Failed to create" ++ "export file for NFS-Ganesha\n"); ++ goto out; ++ } ++ } ++ ++ if (check_host_list()) { ++ runner_add_args (&runner, "sh", GANESHA_PREFIX"/dbus-send.sh", ++ CONFDIR, value, volname, NULL); ++ ret = runner_run (&runner); ++ if (ret) { ++ gf_asprintf(op_errstr, "Dynamic export" ++ " addition/deletion failed." ++ " Please see log file for details"); ++ goto out; ++ } ++ } ++ ++ vol_opts = volinfo->dict; ++ ret = dict_set_dynstr_with_alloc (vol_opts, ++ "features.cache-invalidation", value); ++ if (ret) ++ gf_asprintf (op_errstr, "Cache-invalidation could not" ++ " be set to %s.", value); ++ ret = glusterd_store_volinfo (volinfo, ++ GLUSTERD_VOLINFO_VER_AC_INCREMENT); ++ if (ret) ++ gf_asprintf (op_errstr, "failed to store volinfo for %s" ++ , volinfo->volname); ++ ++out: ++ return ret; ++} ++ ++int ++tear_down_cluster(gf_boolean_t run_teardown) ++{ ++ int ret = 0; ++ runner_t runner = {0,}; ++ struct stat st = {0,}; ++ DIR *dir = NULL; ++ struct dirent *entry = NULL; ++ struct dirent scratch[2] = {{0,},}; ++ char path[PATH_MAX] = {0,}; ++ ++ if (run_teardown) { ++ runinit (&runner); ++ runner_add_args (&runner, "sh", ++ GANESHA_PREFIX"/ganesha-ha.sh", "teardown", ++ CONFDIR, NULL); ++ ret = runner_run(&runner); ++ /* * ++ * Remove all the entries in CONFDIR expect ganesha.conf and ++ * ganesha-ha.conf ++ */ ++ dir = sys_opendir (CONFDIR); ++ if (!dir) { ++ gf_msg_debug (THIS->name, 0, "Failed to open directory %s. " ++ "Reason : %s", CONFDIR, strerror (errno)); ++ ret = 0; ++ goto out; ++ } ++ ++ GF_FOR_EACH_ENTRY_IN_DIR (entry, dir, scratch); ++ while (entry) { ++ snprintf (path, PATH_MAX, "%s/%s", CONFDIR, entry->d_name); ++ ret = sys_lstat (path, &st); ++ if (ret == -1) { ++ gf_msg_debug (THIS->name, 0, "Failed to stat entry %s :" ++ " %s", path, strerror (errno)); ++ goto out; ++ } ++ ++ if (strcmp(entry->d_name, "ganesha.conf") == 0 || ++ strcmp(entry->d_name, "ganesha-ha.conf") == 0) ++ gf_msg_debug (THIS->name, 0, " %s is not required" ++ " to remove", path); ++ else if (S_ISDIR (st.st_mode)) ++ ret = recursive_rmdir (path); ++ else ++ ret = sys_unlink (path); ++ ++ if (ret) { ++ gf_msg_debug (THIS->name, 0, " Failed to remove %s. " ++ "Reason : %s", path, strerror (errno)); ++ } ++ ++ gf_msg_debug (THIS->name, 0, "%s %s", ret ? ++ "Failed to remove" : "Removed", entry->d_name); ++ GF_FOR_EACH_ENTRY_IN_DIR (entry, dir, scratch); ++ } ++ ++ ret = sys_closedir (dir); ++ if (ret) { ++ gf_msg_debug (THIS->name, 0, "Failed to close dir %s. Reason :" ++ " %s", CONFDIR, strerror (errno)); ++ } ++ } ++ ++out: ++ return ret; ++} ++ ++ ++int ++setup_cluster(gf_boolean_t run_setup) ++{ ++ int ret = 0; ++ runner_t runner = {0,}; ++ ++ if (run_setup) { ++ runinit (&runner); ++ runner_add_args (&runner, "sh", GANESHA_PREFIX"/ganesha-ha.sh", ++ "setup", CONFDIR, NULL); ++ ret = runner_run (&runner); ++ } ++ return ret; ++} ++ ++ ++static int ++teardown (gf_boolean_t run_teardown, char **op_errstr) ++{ ++ runner_t runner = {0,}; ++ int ret = 1; ++ glusterd_volinfo_t *volinfo = NULL; ++ glusterd_conf_t *priv = NULL; ++ dict_t *vol_opts = NULL; ++ ++ priv = THIS->private; ++ ++ ret = tear_down_cluster (run_teardown); ++ if (ret == -1) { ++ gf_asprintf (op_errstr, "Cleanup of NFS-Ganesha" ++ " HA config failed."); ++ goto out; ++ } ++ ++ runinit (&runner); ++ runner_add_args (&runner, "sh", GANESHA_PREFIX"/ganesha-ha.sh", ++ "cleanup", CONFDIR, NULL); ++ ret = runner_run (&runner); ++ if (ret) ++ gf_msg_debug (THIS->name, 0, "Could not clean up" ++ " NFS-Ganesha related config"); ++ ++ cds_list_for_each_entry (volinfo, &priv->volumes, vol_list) { ++ vol_opts = volinfo->dict; ++ /* All the volumes exported via NFS-Ganesha will be ++ unexported, hence setting the appropriate keys */ ++ ret = dict_set_str (vol_opts, "features.cache-invalidation", ++ "off"); ++ if (ret) ++ gf_msg (THIS->name, GF_LOG_WARNING, errno, ++ GD_MSG_DICT_SET_FAILED, ++ "Could not set features.cache-invalidation " ++ "to off for %s", volinfo->volname); ++ ++ ret = dict_set_str (vol_opts, "ganesha.enable", "off"); ++ if (ret) ++ gf_msg (THIS->name, GF_LOG_WARNING, errno, ++ GD_MSG_DICT_SET_FAILED, ++ "Could not set ganesha.enable to off for %s", ++ volinfo->volname); ++ ++ ret = glusterd_store_volinfo (volinfo, ++ GLUSTERD_VOLINFO_VER_AC_INCREMENT); ++ if (ret) ++ gf_msg (THIS->name, GF_LOG_WARNING, 0, ++ GD_MSG_VOLINFO_SET_FAIL, ++ "failed to store volinfo for %s", ++ volinfo->volname); ++ } ++out: ++ return ret; ++} ++ ++int ++stop_ganesha (char **op_errstr) { ++ ++ int ret = 0; ++ runner_t runner = {0,}; ++ ++ runinit (&runner); ++ runner_add_args (&runner, "sh", GANESHA_PREFIX"/ganesha-ha.sh", ++ "--setup-ganesha-conf-files", CONFDIR, "no", NULL); ++ ret = runner_run (&runner); ++ if (ret) { ++ gf_asprintf (op_errstr, "removal of symlink ganesha.conf " ++ "in /etc/ganesha failed"); ++ } ++ ++ if (check_host_list ()) { ++ ret = manage_service ("stop"); ++ if (ret) ++ gf_asprintf (op_errstr, "NFS-Ganesha service could not" ++ "be stopped."); ++ } ++ return ret; ++ ++} ++ ++int ++start_ganesha (char **op_errstr) ++{ ++ int ret = -1; ++ dict_t *vol_opts = NULL; ++ glusterd_volinfo_t *volinfo = NULL; ++ glusterd_conf_t *priv = NULL; ++ runner_t runner = {0,}; ++ ++ priv = THIS->private; ++ GF_ASSERT (priv); ++ ++ cds_list_for_each_entry (volinfo, &priv->volumes, vol_list) { ++ vol_opts = volinfo->dict; ++ /* Gluster-nfs has to be disabled across the trusted pool */ ++ /* before attempting to start nfs-ganesha */ ++ ret = dict_set_str (vol_opts, NFS_DISABLE_MAP_KEY, "on"); ++ if (ret) ++ goto out; ++ ++ ret = glusterd_store_volinfo (volinfo, ++ GLUSTERD_VOLINFO_VER_AC_INCREMENT); ++ if (ret) { ++ *op_errstr = gf_strdup ("Failed to store the " ++ "Volume information"); ++ goto out; ++ } ++ } ++ ++ /* If the nfs svc is not initialized it means that the service is not ++ * running, hence we can skip the process of stopping gluster-nfs ++ * service ++ */ ++ if (priv->nfs_svc.inited) { ++ ret = priv->nfs_svc.stop (&(priv->nfs_svc), SIGKILL); ++ if (ret) { ++ ret = -1; ++ gf_asprintf (op_errstr, "Gluster-NFS service could" ++ "not be stopped, exiting."); ++ goto out; ++ } ++ } ++ ++ if (check_host_list()) { ++ runinit (&runner); ++ runner_add_args (&runner, "sh", GANESHA_PREFIX"/ganesha-ha.sh", ++ "--setup-ganesha-conf-files", CONFDIR, "yes", ++ NULL); ++ ret = runner_run (&runner); ++ if (ret) { ++ gf_asprintf (op_errstr, "creation of symlink ganesha.conf " ++ "in /etc/ganesha failed"); ++ goto out; ++ } ++ ret = manage_service ("start"); ++ if (ret) ++ gf_asprintf (op_errstr, "NFS-Ganesha failed to start." ++ "Please see log file for details"); ++ } ++ ++out: ++ return ret; ++} ++ ++static int ++pre_setup (gf_boolean_t run_setup, char **op_errstr) ++{ ++ int ret = 0; ++ ++ ret = check_host_list(); ++ ++ if (ret) { ++ ret = setup_cluster(run_setup); ++ if (ret == -1) ++ gf_asprintf (op_errstr, "Failed to set up HA " ++ "config for NFS-Ganesha. " ++ "Please check the log file for details"); ++ } ++ ++ return ret; ++} ++ ++int ++glusterd_handle_ganesha_op (dict_t *dict, char **op_errstr, ++ char *key, char *value) ++{ ++ ++ int32_t ret = -1; ++ gf_boolean_t option = _gf_false; ++ ++ GF_ASSERT (dict); ++ GF_ASSERT (op_errstr); ++ GF_ASSERT (key); ++ GF_ASSERT (value); ++ ++ ++ if (strcmp (key, "ganesha.enable") == 0) { ++ ret = ganesha_manage_export (dict, value, op_errstr); ++ if (ret < 0) ++ goto out; ++ } ++ ++ /* It is possible that the key might not be set */ ++ ret = gf_string2boolean (value, &option); ++ if (ret == -1) { ++ gf_asprintf (op_errstr, "Invalid value in key-value pair."); ++ goto out; ++ } ++ ++ if (strcmp (key, GLUSTERD_STORE_KEY_GANESHA_GLOBAL) == 0) { ++ /* * ++ * The set up/teardown of pcs cluster should be performed only ++ * once. This will done on the node in which the cli command ++ * 'gluster nfs-ganesha ' got executed. So that ++ * node should part of ganesha HA cluster ++ */ ++ if (option) { ++ ret = pre_setup (is_origin_glusterd (dict), op_errstr); ++ if (ret < 0) ++ goto out; ++ } else { ++ ret = teardown (is_origin_glusterd (dict), op_errstr); ++ if (ret < 0) ++ goto out; ++ } ++ } ++ ++out: ++ return ret; ++} ++ +diff --git a/xlators/mgmt/glusterd/src/glusterd-handler.c b/xlators/mgmt/glusterd/src/glusterd-handler.c +index c3b9252..a3e1fdc 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-handler.c ++++ b/xlators/mgmt/glusterd/src/glusterd-handler.c +@@ -1884,6 +1884,82 @@ glusterd_op_begin (rpcsvc_request_t *req, glusterd_op_t op, void *ctx, + return ret; + } + ++int ++__glusterd_handle_ganesha_cmd (rpcsvc_request_t *req) ++{ ++ int32_t ret = -1; ++ gf_cli_req cli_req = { {0,} } ; ++ dict_t *dict = NULL; ++ glusterd_op_t cli_op = GD_OP_GANESHA; ++ char *op_errstr = NULL; ++ char err_str[2048] = {0,}; ++ xlator_t *this = NULL; ++ ++ this = THIS; ++ GF_ASSERT (this); ++ ++ GF_ASSERT (req); ++ ++ ret = xdr_to_generic (req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req); ++ if (ret < 0) { ++ snprintf (err_str, sizeof (err_str), "Failed to decode " ++ "request received from cli"); ++ gf_msg (this->name, GF_LOG_ERROR, 0, ++ GD_MSG_REQ_DECODE_FAIL, "%s", err_str); ++ req->rpc_err = GARBAGE_ARGS; ++ goto out; ++ } ++ ++ if (cli_req.dict.dict_len) { ++ /* Unserialize the dictionary */ ++ dict = dict_new (); ++ if (!dict) { ++ ret = -1; ++ goto out; ++ } ++ ++ ret = dict_unserialize (cli_req.dict.dict_val, ++ cli_req.dict.dict_len, ++ &dict); ++ if (ret < 0) { ++ gf_msg (this->name, GF_LOG_ERROR, 0, ++ GD_MSG_DICT_UNSERIALIZE_FAIL, ++ "failed to " ++ "unserialize req-buffer to dictionary"); ++ snprintf (err_str, sizeof (err_str), "Unable to decode " ++ "the command"); ++ goto out; ++ } else { ++ dict->extra_stdfree = cli_req.dict.dict_val; ++ } ++ } ++ ++ gf_msg_trace (this->name, 0, "Received global option request"); ++ ++ ret = glusterd_op_begin_synctask (req, GD_OP_GANESHA, dict); ++out: ++ if (ret) { ++ if (err_str[0] == '\0') ++ snprintf (err_str, sizeof (err_str), ++ "Operation failed"); ++ ret = glusterd_op_send_cli_response (cli_op, ret, 0, req, ++ dict, err_str); ++ } ++ if (op_errstr) ++ GF_FREE (op_errstr); ++ if (dict) ++ dict_unref(dict); ++ ++ return ret; ++} ++ ++ ++int ++glusterd_handle_ganesha_cmd (rpcsvc_request_t *req) ++{ ++ return glusterd_big_locked_handler (req, __glusterd_handle_ganesha_cmd); ++} ++ + static int + __glusterd_handle_reset_volume (rpcsvc_request_t *req) + { +@@ -6470,6 +6546,7 @@ rpcsvc_actor_t gd_svc_cli_actors[GLUSTER_CLI_MAXVALUE] = { + [GLUSTER_CLI_SYS_EXEC] = {"SYS_EXEC", GLUSTER_CLI_SYS_EXEC, glusterd_handle_sys_exec, NULL, 0, DRC_NA}, + [GLUSTER_CLI_SNAP] = {"SNAP", GLUSTER_CLI_SNAP, glusterd_handle_snapshot, NULL, 0, DRC_NA}, + [GLUSTER_CLI_BARRIER_VOLUME] = {"BARRIER_VOLUME", GLUSTER_CLI_BARRIER_VOLUME, glusterd_handle_barrier, NULL, 0, DRC_NA}, ++ [GLUSTER_CLI_GANESHA] = { "GANESHA" , GLUSTER_CLI_GANESHA, glusterd_handle_ganesha_cmd, NULL, 0, DRC_NA}, + [GLUSTER_CLI_GET_VOL_OPT] = {"GET_VOL_OPT", GLUSTER_CLI_GET_VOL_OPT, glusterd_handle_get_vol_opt, NULL, 0, DRC_NA}, + [GLUSTER_CLI_BITROT] = {"BITROT", GLUSTER_CLI_BITROT, glusterd_handle_bitrot, NULL, 0, DRC_NA}, + [GLUSTER_CLI_GET_STATE] = {"GET_STATE", GLUSTER_CLI_GET_STATE, glusterd_handle_get_state, NULL, 0, DRC_NA}, +diff --git a/xlators/mgmt/glusterd/src/glusterd-messages.h b/xlators/mgmt/glusterd/src/glusterd-messages.h +index de9ae92..cc7f371 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-messages.h ++++ b/xlators/mgmt/glusterd/src/glusterd-messages.h +@@ -4767,6 +4767,14 @@ + * @recommendedaction + * + */ ++#define GD_MSG_NFS_GANESHA_DISABLED (GLUSTERD_COMP_BASE + 589) ++ ++/*! ++ * @messageid ++ * @diagnosis ++ * @recommendedaction ++ * ++ */ + #define GD_MSG_TIERD_STOP_FAIL (GLUSTERD_COMP_BASE + 590) + + /*! +diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.c b/xlators/mgmt/glusterd/src/glusterd-op-sm.c +index 5b8f833..06e9e25 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-op-sm.c ++++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.c +@@ -1126,6 +1126,12 @@ glusterd_op_stage_set_volume (dict_t *dict, char **op_errstr) + if (ret) + goto out; + ++ if ((strcmp (key, "ganesha.enable") == 0) && ++ (strcmp (value, "off") == 0)) { ++ ret = ganesha_manage_export (dict, "off", op_errstr); ++ if (ret) ++ goto out; ++ } + ret = glusterd_check_quota_cmd (key, value, errstr, sizeof (errstr)); + if (ret) + goto out; +@@ -1642,6 +1648,21 @@ glusterd_op_stage_reset_volume (dict_t *dict, char **op_errstr) + goto out; + } + ++ /* * ++ * If key ganesha.enable is set, then volume should be unexported from ++ * ganesha server. Also it is a volume-level option, perform only when ++ * volume name not equal to "all"(in other words if volinfo != NULL) ++ */ ++ if (volinfo && (!strcmp (key, "all") || !strcmp(key, "ganesha.enable"))) { ++ if (glusterd_check_ganesha_export (volinfo)) { ++ ret = ganesha_manage_export (dict, "off", op_errstr); ++ if (ret) ++ gf_msg (this->name, GF_LOG_WARNING, 0, ++ GD_MSG_NFS_GNS_RESET_FAIL, ++ "Could not reset ganesha.enable key"); ++ } ++ } ++ + if (strcmp(key, "all")) { + exists = glusterd_check_option_exists (key, &key_fixed); + if (exists == -1) { +@@ -2364,6 +2385,16 @@ glusterd_op_reset_volume (dict_t *dict, char **op_rspstr) + } + } + ++ if (!strcmp(key, "ganesha.enable") || !strcmp (key, "all")) { ++ if (glusterd_check_ganesha_export (volinfo)) { ++ ret = manage_export_config (volname, "off", op_rspstr); ++ if (ret) ++ gf_msg (this->name, GF_LOG_WARNING, 0, ++ GD_MSG_NFS_GNS_RESET_FAIL, ++ "Could not reset ganesha.enable key"); ++ } ++ } ++ + out: + GF_FREE (key_fixed); + if (quorum_action) +@@ -2960,6 +2991,9 @@ glusterd_op_set_volume (dict_t *dict, char **errstr) + } + } + ++ ret = glusterd_check_ganesha_cmd (key, value, errstr, dict); ++ if (ret == -1) ++ goto out; + if (!is_key_glusterd_hooks_friendly (key)) { + ret = glusterd_check_option_exists (key, &key_fixed); + GF_ASSERT (ret); +@@ -4568,6 +4602,12 @@ glusterd_op_build_payload (dict_t **req, char **op_errstr, dict_t *op_ctx) + } + break; + ++ case GD_OP_GANESHA: ++ { ++ dict_copy (dict, req_dict); ++ } ++ break; ++ + default: + break; + } +@@ -6062,6 +6102,10 @@ glusterd_op_stage_validate (glusterd_op_t op, dict_t *dict, char **op_errstr, + ret = glusterd_op_stage_set_volume (dict, op_errstr); + break; + ++ case GD_OP_GANESHA: ++ ret = glusterd_op_stage_set_ganesha (dict, op_errstr); ++ break; ++ + case GD_OP_RESET_VOLUME: + ret = glusterd_op_stage_reset_volume (dict, op_errstr); + break; +@@ -6195,6 +6239,9 @@ glusterd_op_commit_perform (glusterd_op_t op, dict_t *dict, char **op_errstr, + case GD_OP_SET_VOLUME: + ret = glusterd_op_set_volume (dict, op_errstr); + break; ++ case GD_OP_GANESHA: ++ ret = glusterd_op_set_ganesha (dict, op_errstr); ++ break; + + + case GD_OP_RESET_VOLUME: +diff --git a/xlators/mgmt/glusterd/src/glusterd-snapshot-utils.c b/xlators/mgmt/glusterd/src/glusterd-snapshot-utils.c +index 4cbade1..2a0d321 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-snapshot-utils.c ++++ b/xlators/mgmt/glusterd/src/glusterd-snapshot-utils.c +@@ -3702,6 +3702,146 @@ out: + + } + ++/* * ++ * Here there are two possibilities, either destination is snaphot or ++ * clone. In the case of snapshot nfs_ganesha export file will be copied ++ * to snapdir. If it is clone , then new export file will be created for ++ * the clone in the GANESHA_EXPORT_DIRECTORY, replacing occurences of ++ * volname with clonename ++ */ ++int ++glusterd_copy_nfs_ganesha_file (glusterd_volinfo_t *src_vol, ++ glusterd_volinfo_t *dest_vol) ++{ ++ ++ int32_t ret = -1; ++ char snap_dir[PATH_MAX] = {0,}; ++ char src_path[PATH_MAX] = {0,}; ++ char dest_path[PATH_MAX] = {0,}; ++ char buffer[BUFSIZ] = {0,}; ++ char *find_ptr = NULL; ++ char *buff_ptr = NULL; ++ char *tmp_ptr = NULL; ++ xlator_t *this = NULL; ++ glusterd_conf_t *priv = NULL; ++ struct stat stbuf = {0,}; ++ FILE *src = NULL; ++ FILE *dest = NULL; ++ ++ ++ this = THIS; ++ GF_VALIDATE_OR_GOTO ("snapshot", this, out); ++ priv = this->private; ++ GF_VALIDATE_OR_GOTO (this->name, priv, out); ++ ++ GF_VALIDATE_OR_GOTO (this->name, src_vol, out); ++ GF_VALIDATE_OR_GOTO (this->name, dest_vol, out); ++ ++ if (glusterd_check_ganesha_export(src_vol) == _gf_false) { ++ gf_msg_debug (this->name, 0, "%s is not exported via " ++ "NFS-Ganesha. Skipping copy of export conf.", ++ src_vol->volname); ++ ret = 0; ++ goto out; ++ } ++ ++ if (src_vol->is_snap_volume) { ++ GLUSTERD_GET_SNAP_DIR (snap_dir, src_vol->snapshot, priv); ++ ret = snprintf (src_path, PATH_MAX, "%s/export.%s.conf", ++ snap_dir, src_vol->snapshot->snapname); ++ } else { ++ ret = snprintf (src_path, PATH_MAX, "%s/export.%s.conf", ++ GANESHA_EXPORT_DIRECTORY, src_vol->volname); ++ } ++ if (ret < 0 || ret >= PATH_MAX) ++ goto out; ++ ++ ret = sys_lstat (src_path, &stbuf); ++ if (ret) { ++ /* ++ * This code path is hit, only when the src_vol is being * ++ * exported via NFS-Ganesha. So if the conf file is not * ++ * available, we fail the snapshot operation. * ++ */ ++ gf_msg (this->name, GF_LOG_ERROR, errno, ++ GD_MSG_FILE_OP_FAILED, ++ "Stat on %s failed with %s", ++ src_path, strerror (errno)); ++ goto out; ++ } ++ ++ if (dest_vol->is_snap_volume) { ++ memset (snap_dir, 0 , PATH_MAX); ++ GLUSTERD_GET_SNAP_DIR (snap_dir, dest_vol->snapshot, priv); ++ ret = snprintf (dest_path, sizeof (dest_path), ++ "%s/export.%s.conf", snap_dir, ++ dest_vol->snapshot->snapname); ++ if (ret < 0) ++ goto out; ++ ++ ret = glusterd_copy_file (src_path, dest_path); ++ if (ret) { ++ gf_msg (this->name, GF_LOG_ERROR, ENOMEM, ++ GD_MSG_NO_MEMORY, "Failed to copy %s in %s", ++ src_path, dest_path); ++ goto out; ++ } ++ ++ } else { ++ ret = snprintf (dest_path, sizeof (dest_path), ++ "%s/export.%s.conf", GANESHA_EXPORT_DIRECTORY, ++ dest_vol->volname); ++ if (ret < 0) ++ goto out; ++ ++ src = fopen (src_path, "r"); ++ dest = fopen (dest_path, "w"); ++ ++ if (!src || !dest) { ++ gf_msg (this->name, GF_LOG_ERROR, 0, ++ GD_MSG_FILE_OP_FAILED, ++ "Failed to open %s", ++ dest ? src_path : dest_path); ++ ret = -1; ++ goto out; ++ } ++ ++ /* * ++ * if the source volume is snapshot, the export conf file ++ * consists of orginal volname ++ */ ++ if (src_vol->is_snap_volume) ++ find_ptr = gf_strdup (src_vol->parent_volname); ++ else ++ find_ptr = gf_strdup (src_vol->volname); ++ ++ if (!find_ptr) ++ goto out; ++ ++ /* Replacing volname with clonename */ ++ while (fgets(buffer, BUFSIZ, src)) { ++ buff_ptr = buffer; ++ while ((tmp_ptr = strstr(buff_ptr, find_ptr))) { ++ while (buff_ptr < tmp_ptr) ++ fputc((int)*buff_ptr++, dest); ++ fputs(dest_vol->volname, dest); ++ buff_ptr += strlen(find_ptr); ++ } ++ fputs(buff_ptr, dest); ++ memset (buffer, 0, BUFSIZ); ++ } ++ } ++out: ++ if (src) ++ fclose (src); ++ if (dest) ++ fclose (dest); ++ if (find_ptr) ++ GF_FREE(find_ptr); ++ ++ return ret; ++} ++ + int32_t + glusterd_restore_geo_rep_files (glusterd_volinfo_t *snap_vol) + { +@@ -3792,6 +3932,62 @@ out: + return ret; + } + ++int ++glusterd_restore_nfs_ganesha_file (glusterd_volinfo_t *src_vol, ++ glusterd_snap_t *snap) ++{ ++ ++ int32_t ret = -1; ++ char snap_dir[PATH_MAX] = ""; ++ char src_path[PATH_MAX] = ""; ++ char dest_path[PATH_MAX] = ""; ++ xlator_t *this = NULL; ++ glusterd_conf_t *priv = NULL; ++ struct stat stbuf = {0,}; ++ ++ this = THIS; ++ GF_VALIDATE_OR_GOTO ("snapshot", this, out); ++ priv = this->private; ++ GF_VALIDATE_OR_GOTO (this->name, priv, out); ++ ++ GF_VALIDATE_OR_GOTO (this->name, src_vol, out); ++ GF_VALIDATE_OR_GOTO (this->name, snap, out); ++ ++ GLUSTERD_GET_SNAP_DIR (snap_dir, snap, priv); ++ ++ ret = snprintf (src_path, sizeof (src_path), "%s/export.%s.conf", ++ snap_dir, snap->snapname); ++ if (ret < 0) ++ goto out; ++ ++ ret = sys_lstat (src_path, &stbuf); ++ if (ret) { ++ if (errno == ENOENT) { ++ ret = 0; ++ gf_msg_debug (this->name, 0, "%s not found", src_path); ++ } else ++ gf_msg (this->name, GF_LOG_WARNING, errno, ++ GD_MSG_FILE_OP_FAILED, ++ "Stat on %s failed with %s", ++ src_path, strerror (errno)); ++ goto out; ++ } ++ ++ ret = snprintf (dest_path, sizeof (dest_path), "%s/export.%s.conf", ++ GANESHA_EXPORT_DIRECTORY, src_vol->volname); ++ if (ret < 0) ++ goto out; ++ ++ ret = glusterd_copy_file (src_path, dest_path); ++ if (ret) ++ gf_msg (this->name, GF_LOG_ERROR, ENOMEM, ++ GD_MSG_NO_MEMORY, "Failed to copy %s in %s", ++ src_path, dest_path); ++ ++out: ++ return ret; ++ ++} + /* Snapd functions */ + int + glusterd_is_snapd_enabled (glusterd_volinfo_t *volinfo) +diff --git a/xlators/mgmt/glusterd/src/glusterd-snapshot-utils.h b/xlators/mgmt/glusterd/src/glusterd-snapshot-utils.h +index b13493d..e050166 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-snapshot-utils.h ++++ b/xlators/mgmt/glusterd/src/glusterd-snapshot-utils.h +@@ -99,12 +99,19 @@ glusterd_get_geo_rep_session (char *slave_key, char *origin_volname, + int32_t + glusterd_restore_geo_rep_files (glusterd_volinfo_t *snap_vol); + ++int ++glusterd_restore_nfs_ganesha_file (glusterd_volinfo_t *src_vol, ++ glusterd_snap_t *snap); + int32_t + glusterd_copy_quota_files (glusterd_volinfo_t *src_vol, + glusterd_volinfo_t *dest_vol, + gf_boolean_t *conf_present); + + int ++glusterd_copy_nfs_ganesha_file (glusterd_volinfo_t *src_vol, ++ glusterd_volinfo_t *dest_vol); ++ ++int + glusterd_snap_use_rsp_dict (dict_t *aggr, dict_t *rsp_dict); + + int +diff --git a/xlators/mgmt/glusterd/src/glusterd-snapshot.c b/xlators/mgmt/glusterd/src/glusterd-snapshot.c +index 6306d29..c38d2ff 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-snapshot.c ++++ b/xlators/mgmt/glusterd/src/glusterd-snapshot.c +@@ -904,6 +904,76 @@ out: + return ret; + } + ++/* ++ * This function validates the particulat snapshot with respect to the current ++ * cluster. If the snapshot has ganesha enabled, and the cluster is not a nfs ++ * ganesha cluster, we fail the validation. Other scenarios where either the ++ * snapshot does not have ganesha enabled or it has and the cluster is a nfs ++ * ganesha cluster, we pass the validation ++ * ++ * @param snap snap object of the snapshot to be validated ++ * @return Negative value on Failure and 0 in success ++ */ ++int32_t ++glusterd_snapshot_validate_ganesha_conf (glusterd_snap_t *snap, ++ char **op_errstr, ++ uint32_t *op_errno) ++{ ++ int ret = -1; ++ glusterd_volinfo_t *snap_vol = NULL; ++ xlator_t *this = NULL; ++ ++ this = THIS; ++ GF_VALIDATE_OR_GOTO ("snapshot", this, out); ++ GF_VALIDATE_OR_GOTO (this->name, snap, out); ++ GF_VALIDATE_OR_GOTO (this->name, op_errstr, out); ++ GF_VALIDATE_OR_GOTO (this->name, op_errno, out); ++ ++ snap_vol = list_entry (snap->volumes.next, ++ glusterd_volinfo_t, vol_list); ++ ++ GF_VALIDATE_OR_GOTO (this->name, snap_vol, out); ++ ++ /* ++ * Check if the snapshot has ganesha enabled * ++ */ ++ if (glusterd_check_ganesha_export(snap_vol) == _gf_false) { ++ /* ++ * If the snapshot has not been exported via ganesha * ++ * then we can proceed. * ++ */ ++ ret = 0; ++ goto out; ++ } ++ ++ /* ++ * At this point we are certain that the snapshot has been exported * ++ * via ganesha. So we check if the cluster is a nfs-ganesha cluster * ++ * If it a nfs-ganesha cluster, then we proceed. Else we fail. * ++ */ ++ if (glusterd_is_ganesha_cluster() != _gf_true) { ++ ret = gf_asprintf (op_errstr, "Snapshot(%s) has a " ++ "nfs-ganesha export conf file. " ++ "cluster.enable-shared-storage and " ++ "nfs-ganesha should be enabled " ++ "before restoring this snapshot.", ++ snap->snapname); ++ *op_errno = EG_NOGANESHA; ++ if (ret < 0) { ++ goto out; ++ } ++ ++ gf_msg (this->name, GF_LOG_ERROR, EINVAL, ++ GD_MSG_NFS_GANESHA_DISABLED, "%s", *op_errstr); ++ ret = -1; ++ goto out; ++ } ++ ++ ret = 0; ++out: ++ return ret; ++} ++ + /* This function is called before actual restore is taken place. This function + * will validate whether the snapshot volumes are ready to be restored or not. + * +@@ -974,6 +1044,15 @@ glusterd_snapshot_restore_prevalidate (dict_t *dict, char **op_errstr, + goto out; + } + ++ ret = glusterd_snapshot_validate_ganesha_conf (snap, op_errstr, ++ op_errno); ++ if (ret) { ++ gf_msg (this->name, GF_LOG_ERROR, 0, ++ GD_MSG_SNAPSHOT_OP_FAILED, ++ "ganesha conf validation failed."); ++ goto out; ++ } ++ + ret = dict_set_str (rsp_dict, "snapname", snapname); + if (ret) { + gf_msg (this->name, GF_LOG_ERROR, 0, +@@ -5369,6 +5448,13 @@ glusterd_do_snap_vol (glusterd_volinfo_t *origin_vol, glusterd_snap_t *snap, + + } + ++ ret = glusterd_copy_nfs_ganesha_file (origin_vol, snap_vol); ++ if (ret < 0) { ++ gf_msg (this->name, GF_LOG_ERROR, 0, ++ GD_MSG_VOL_OP_FAILED, "Failed to copy export " ++ "file for volume %s", origin_vol->volname); ++ goto out; ++ } + glusterd_auth_set_username (snap_vol, username); + glusterd_auth_set_password (snap_vol, password); + +@@ -9968,6 +10054,16 @@ gd_restore_snap_volume (dict_t *dict, dict_t *rsp_dict, + snap_vol->snapshot->snapname); + } + ++ ret = glusterd_restore_nfs_ganesha_file (orig_vol, snap); ++ if (ret) { ++ gf_msg (this->name, GF_LOG_WARNING, 0, ++ GD_MSG_SNAP_RESTORE_FAIL, ++ "Failed to restore " ++ "nfs-ganesha export file for snap %s", ++ snap_vol->snapshot->snapname); ++ goto out; ++ } ++ + /* Need not save cksum, as we will copy cksum file in * + * this function * + */ +diff --git a/xlators/mgmt/glusterd/src/glusterd-store.h b/xlators/mgmt/glusterd/src/glusterd-store.h +index 603151a..bf504e0 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-store.h ++++ b/xlators/mgmt/glusterd/src/glusterd-store.h +@@ -83,6 +83,7 @@ typedef enum glusterd_store_ver_ac_{ + #define GLUSTERD_STORE_KEY_SNAP_MAX_SOFT_LIMIT "snap-max-soft-limit" + #define GLUSTERD_STORE_KEY_SNAPD_PORT "snapd-port" + #define GLUSTERD_STORE_KEY_SNAP_ACTIVATE "snap-activate-on-create" ++#define GLUSTERD_STORE_KEY_GANESHA_GLOBAL "nfs-ganesha" + + #define GLUSTERD_STORE_KEY_BRICK_HOSTNAME "hostname" + #define GLUSTERD_STORE_KEY_BRICK_PATH "path" +diff --git a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c +index bec5f72..0914fb1 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c ++++ b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c +@@ -1737,6 +1737,16 @@ glusterd_op_stage_stop_volume (dict_t *dict, char **op_errstr) + ret = -1; + goto out; + } ++ ret = glusterd_check_ganesha_export (volinfo); ++ if (ret) { ++ ret = ganesha_manage_export(dict, "off", op_errstr); ++ if (ret) { ++ gf_msg (THIS->name, GF_LOG_WARNING, 0, ++ GD_MSG_NFS_GNS_UNEXPRT_VOL_FAIL, "Could not " ++ "unexport volume via NFS-Ganesha"); ++ ret = 0; ++ } ++ } + + if (glusterd_is_defrag_on (volinfo)) { + snprintf (msg, sizeof(msg), "rebalance session is " +@@ -2595,6 +2605,8 @@ glusterd_op_start_volume (dict_t *dict, char **op_errstr) + char *brick_mount_dir = NULL; + char key[PATH_MAX] = ""; + char *volname = NULL; ++ char *str = NULL; ++ gf_boolean_t option = _gf_false; + int flags = 0; + glusterd_volinfo_t *volinfo = NULL; + glusterd_brickinfo_t *brickinfo = NULL; +@@ -2657,6 +2669,28 @@ glusterd_op_start_volume (dict_t *dict, char **op_errstr) + } + } + ++ ret = dict_get_str (conf->opts, GLUSTERD_STORE_KEY_GANESHA_GLOBAL, &str); ++ if (ret != 0) { ++ gf_msg (this->name, GF_LOG_INFO, 0, ++ GD_MSG_DICT_GET_FAILED, "Global dict not present."); ++ ret = 0; ++ ++ } else { ++ ret = gf_string2boolean (str, &option); ++ /* Check if the feature is enabled and set nfs-disable to true */ ++ if (option) { ++ gf_msg_debug (this->name, 0, "NFS-Ganesha is enabled"); ++ /* Gluster-nfs should not start when NFS-Ganesha is enabled*/ ++ ret = dict_set_str (volinfo->dict, NFS_DISABLE_MAP_KEY, "on"); ++ if (ret) { ++ gf_msg (this->name, GF_LOG_ERROR, 0, ++ GD_MSG_DICT_SET_FAILED, "Failed to set nfs.disable for" ++ "volume %s", volname); ++ goto out; ++ } ++ } ++ } ++ + ret = glusterd_start_volume (volinfo, flags, _gf_true); + if (ret) + goto out; +diff --git a/xlators/mgmt/glusterd/src/glusterd-volume-set.c b/xlators/mgmt/glusterd/src/glusterd-volume-set.c +index 2210b82..7fe76e5 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-volume-set.c ++++ b/xlators/mgmt/glusterd/src/glusterd-volume-set.c +@@ -3210,6 +3210,12 @@ struct volopt_map_entry glusterd_volopt_map[] = { + .op_version = GD_OP_VERSION_3_7_0, + .flags = OPT_FLAG_CLIENT_OPT + }, ++ { .key = "ganesha.enable", ++ .voltype = "features/ganesha", ++ .value = "off", ++ .option = "ganesha.enable", ++ .op_version = GD_OP_VERSION_3_7_0, ++ }, + { .key = "features.shard", + .voltype = "features/shard", + .value = "off", +diff --git a/xlators/mgmt/glusterd/src/glusterd.h b/xlators/mgmt/glusterd/src/glusterd.h +index 59b1775..2d8dbb9 100644 +--- a/xlators/mgmt/glusterd/src/glusterd.h ++++ b/xlators/mgmt/glusterd/src/glusterd.h +@@ -57,6 +57,8 @@ + #define GLUSTERD_BRICKMUX_LIMIT_KEY "cluster.max-bricks-per-process" + #define GLUSTERD_LOCALTIME_LOGGING_KEY "cluster.localtime-logging" + ++#define GANESHA_HA_CONF CONFDIR "/ganesha-ha.conf" ++#define GANESHA_EXPORT_DIRECTORY CONFDIR"/exports" + #define GLUSTERD_SNAPS_MAX_HARD_LIMIT 256 + #define GLUSTERD_SNAPS_DEF_SOFT_LIMIT_PERCENT 90 + #define GLUSTERD_SNAPS_MAX_SOFT_LIMIT_PERCENT 100 +@@ -117,7 +119,7 @@ typedef enum glusterd_op_ { + GD_OP_GSYNC_CREATE, + GD_OP_SNAP, + GD_OP_BARRIER, +- GD_OP_GANESHA, /* obsolete */ ++ GD_OP_GANESHA, + GD_OP_BITROT, + GD_OP_DETACH_TIER, + GD_OP_TIER_MIGRATE, +@@ -1168,8 +1170,20 @@ int glusterd_op_create_volume (dict_t *dict, char **op_errstr); + int glusterd_op_start_volume (dict_t *dict, char **op_errstr); + int glusterd_op_stop_volume (dict_t *dict); + int glusterd_op_delete_volume (dict_t *dict); ++int glusterd_handle_ganesha_op (dict_t *dict, char **op_errstr, ++ char *key, char *value); ++int glusterd_check_ganesha_cmd (char *key, char *value, ++ char **errstr, dict_t *dict); ++int glusterd_op_stage_set_ganesha (dict_t *dict, char **op_errstr); ++int glusterd_op_set_ganesha (dict_t *dict, char **errstr); ++int ganesha_manage_export (dict_t *dict, char *value, char **op_errstr); + int manage_export_config (char *volname, char *value, char **op_errstr); + ++gf_boolean_t ++glusterd_is_ganesha_cluster (); ++gf_boolean_t glusterd_check_ganesha_export (glusterd_volinfo_t *volinfo); ++int stop_ganesha (char **op_errstr); ++int tear_down_cluster (gf_boolean_t run_teardown); + int glusterd_op_add_brick (dict_t *dict, char **op_errstr); + int glusterd_op_add_tier_brick (dict_t *dict, char **op_errstr); + int glusterd_op_remove_brick (dict_t *dict, char **op_errstr); +-- +1.8.3.1 + diff --git a/SOURCES/0051-Revert-storhaug-HA-first-step-remove-resource-agents.patch b/SOURCES/0051-Revert-storhaug-HA-first-step-remove-resource-agents.patch new file mode 100644 index 0000000..ab69430 --- /dev/null +++ b/SOURCES/0051-Revert-storhaug-HA-first-step-remove-resource-agents.patch @@ -0,0 +1,1878 @@ +From 5883eed6d1480d178205d4de42d023c8d144a4ea Mon Sep 17 00:00:00 2001 +From: Jiffin Tony Thottan +Date: Mon, 16 Oct 2017 16:58:28 +0530 +Subject: [PATCH 51/74] Revert "storhaug HA: first step, remove resource agents + and setup script" + +This reverts commit c822e354e16646adf18bbc5123798663faa543b2. + +Change-Id: Idd50fe1a5be5a3258d560518d810f9ec4c57621a +Signed-off-by: Jiffin Tony Thottan +--- + configure.ac | 1 + + extras/ganesha/Makefile.am | 2 +- + extras/ganesha/ocf/Makefile.am | 12 + + extras/ganesha/ocf/ganesha_grace | 222 +++++++ + extras/ganesha/ocf/ganesha_mon | 235 +++++++ + extras/ganesha/ocf/ganesha_nfsd | 168 +++++ + extras/ganesha/scripts/Makefile.am | 6 +- + extras/ganesha/scripts/ganesha-ha.sh | 1126 ++++++++++++++++++++++++++++++++++ + glusterfs.spec.in | 4 +- + 9 files changed, 1772 insertions(+), 4 deletions(-) + create mode 100644 extras/ganesha/ocf/Makefile.am + create mode 100644 extras/ganesha/ocf/ganesha_grace + create mode 100644 extras/ganesha/ocf/ganesha_mon + create mode 100644 extras/ganesha/ocf/ganesha_nfsd + create mode 100644 extras/ganesha/scripts/ganesha-ha.sh + +diff --git a/configure.ac b/configure.ac +index c8e6e44..c9a1cde 100644 +--- a/configure.ac ++++ b/configure.ac +@@ -210,6 +210,7 @@ AC_CONFIG_FILES([Makefile + extras/ganesha/Makefile + extras/ganesha/config/Makefile + extras/ganesha/scripts/Makefile ++ extras/ganesha/ocf/Makefile + extras/systemd/Makefile + extras/systemd/glusterd.service + extras/systemd/glustereventsd.service +diff --git a/extras/ganesha/Makefile.am b/extras/ganesha/Makefile.am +index 542de68..9eaa401 100644 +--- a/extras/ganesha/Makefile.am ++++ b/extras/ganesha/Makefile.am +@@ -1,2 +1,2 @@ +-SUBDIRS = scripts config ++SUBDIRS = scripts config ocf + CLEANFILES = +diff --git a/extras/ganesha/ocf/Makefile.am b/extras/ganesha/ocf/Makefile.am +new file mode 100644 +index 0000000..6aed954 +--- /dev/null ++++ b/extras/ganesha/ocf/Makefile.am +@@ -0,0 +1,12 @@ ++EXTRA_DIST= ganesha_grace ganesha_mon ganesha_nfsd ++ ++# The root of the OCF resource agent hierarchy ++# Per the OCF standard, it's always "lib", ++# not "lib64" (even on 64-bit platforms). ++ocfdir = $(prefix)/lib/ocf ++ ++# The provider directory ++radir = $(ocfdir)/resource.d/heartbeat ++ ++ra_SCRIPTS = ganesha_grace ganesha_mon ganesha_nfsd ++ +diff --git a/extras/ganesha/ocf/ganesha_grace b/extras/ganesha/ocf/ganesha_grace +new file mode 100644 +index 0000000..cb6dcc4 +--- /dev/null ++++ b/extras/ganesha/ocf/ganesha_grace +@@ -0,0 +1,222 @@ ++#!/bin/bash ++# ++# Copyright (c) 2014 Anand Subramanian anands@redhat.com ++# Copyright (c) 2015 Red Hat Inc. ++# All Rights Reserved. ++# ++# This program is free software; you can redistribute it and/or modify ++# it under the terms of version 2 of the GNU General Public License as ++# published by the Free Software Foundation. ++# ++# This program is distributed in the hope that it would be useful, but ++# WITHOUT ANY WARRANTY; without even the implied warranty of ++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. ++# ++# Further, this software is distributed without any warranty that it is ++# free of the rightful claim of any third person regarding infringement ++# or the like. Any license provided herein, whether implied or ++# otherwise, applies only to this software file. Patent licenses, if ++# any, provided herein do not apply to combinations of this program with ++# other software, or any other product whatsoever. ++# ++# You should have received a copy of the GNU General Public License ++# along with this program; if not, write the Free Software Foundation, ++# Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. ++# ++# ++ ++# Initialization: ++: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat} ++. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs ++ ++if [ -n "$OCF_DEBUG_LIBRARY" ]; then ++ . $OCF_DEBUG_LIBRARY ++else ++ : ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat} ++ . ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs ++fi ++ ++OCF_RESKEY_grace_active_default="grace-active" ++: ${OCF_RESKEY_grace_active=${OCF_RESKEY_grace_active_default}} ++ ++ganesha_meta_data() { ++ cat < ++ ++ ++1.0 ++ ++ ++This Linux-specific resource agent acts as a dummy ++resource agent for nfs-ganesha. ++ ++ ++Manages the user-space nfs-ganesha NFS server ++ ++ ++ ++NFS-Ganesha grace active attribute ++NFS-Ganesha grace active attribute ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++END ++ ++return ${OCF_SUCCESS} ++} ++ ++ganesha_grace_usage() { ++ echo "ganesha.nfsd USAGE" ++} ++ ++# Make sure meta-data and usage always succeed ++case $__OCF_ACTION in ++ meta-data) ganesha_meta_data ++ exit ${OCF_SUCCESS} ++ ;; ++ usage|help) ganesha_usage ++ exit ${OCF_SUCCESS} ++ ;; ++ *) ++ ;; ++esac ++ ++ganesha_grace_start() ++{ ++ local rc=${OCF_ERR_GENERIC} ++ local host=$(hostname -s) ++ ++ ocf_log debug "ganesha_grace_start()" ++ # give ganesha_mon RA a chance to set the crm_attr first ++ # I mislike the sleep, but it's not clear that looping ++ # with a small sleep is necessarily better ++ # start has a 40sec timeout, so a 5sec sleep here is okay ++ sleep 5 ++ attr=$(crm_attribute --query --node=${host} --name=${OCF_RESKEY_grace_active} 2> /dev/null) ++ if [ $? -ne 0 ]; then ++ host=$(hostname) ++ attr=$(crm_attribute --query --node=${host} --name=${OCF_RESKEY_grace_active} 2> /dev/null ) ++ if [ $? -ne 0 ]; then ++ ocf_log info "grace start: crm_attribute --query --node=${host} --name=${OCF_RESKEY_grace_active} failed" ++ fi ++ fi ++ ++ # Three possibilities: ++ # 1. There is no attribute at all and attr_updater returns ++ # a zero length string. This happens when ++ # ganesha_mon::monitor hasn't run at least once to set ++ # the attribute. The assumption here is that the system ++ # is coming up. We pretend, for now, that the node is ++ # healthy, to allow the system to continue coming up. ++ # It will cure itself in a few seconds ++ # 2. There is an attribute, and it has the value "1"; this ++ # node is healthy. ++ # 3. There is an attribute, but it has no value or the value ++ # "0"; this node is not healthy. ++ ++ # case 1 ++ if [[ -z "${attr}" ]]; then ++ return ${OCF_SUCCESS} ++ fi ++ ++ # case 2 ++ if [[ "${attr}" = *"value=1" ]]; then ++ return ${OCF_SUCCESS} ++ fi ++ ++ # case 3 ++ return ${OCF_NOT_RUNNING} ++} ++ ++ganesha_grace_stop() ++{ ++ ++ ocf_log debug "ganesha_grace_stop()" ++ return ${OCF_SUCCESS} ++} ++ ++ganesha_grace_notify() ++{ ++ # since this is a clone RA we should only ever see pre-start ++ # or post-stop ++ mode="${OCF_RESKEY_CRM_meta_notify_type}-${OCF_RESKEY_CRM_meta_notify_operation}" ++ case "${mode}" in ++ pre-start | post-stop) ++ dbus-send --print-reply --system --dest=org.ganesha.nfsd /org/ganesha/nfsd/admin org.ganesha.nfsd.admin.grace string:${OCF_RESKEY_CRM_meta_notify_stop_uname} ++ if [ $? -ne 0 ]; then ++ ocf_log info "dbus-send --print-reply --system --dest=org.ganesha.nfsd /org/ganesha/nfsd/admin org.ganesha.nfsd.admin.grace string:${OCF_RESKEY_CRM_meta_notify_stop_uname} failed" ++ fi ++ ;; ++ esac ++ ++ return ${OCF_SUCCESS} ++} ++ ++ganesha_grace_monitor() ++{ ++ local host=$(hostname -s) ++ ++ ocf_log debug "monitor" ++ ++ attr=$(crm_attribute --query --node=${host} --name=${OCF_RESKEY_grace_active} 2> /dev/null) ++ if [ $? -ne 0 ]; then ++ host=$(hostname) ++ attr=$(crm_attribute --query --node=${host} --name=${OCF_RESKEY_grace_active} 2> /dev/null) ++ if [ $? -ne 0 ]; then ++ ocf_log info "crm_attribute --query --node=${host} --name=${OCF_RESKEY_grace_active} failed" ++ fi ++ fi ++ ++ # if there is no attribute (yet), maybe it's because ++ # this RA started before ganesha_mon (nfs-mon) has had ++ # chance to create it. In which case we'll pretend ++ # everything is okay this time around ++ if [[ -z "${attr}" ]]; then ++ return ${OCF_SUCCESS} ++ fi ++ ++ if [[ "${attr}" = *"value=1" ]]; then ++ return ${OCF_SUCCESS} ++ fi ++ ++ return ${OCF_NOT_RUNNING} ++} ++ ++ganesha_grace_validate() ++{ ++ return ${OCF_SUCCESS} ++} ++ ++ganesha_grace_validate ++ ++# Translate each action into the appropriate function call ++case $__OCF_ACTION in ++start) ganesha_grace_start ++ ;; ++stop) ganesha_grace_stop ++ ;; ++status|monitor) ganesha_grace_monitor ++ ;; ++notify) ganesha_grace_notify ++ ;; ++*) ganesha_grace_usage ++ exit ${OCF_ERR_UNIMPLEMENTED} ++ ;; ++esac ++ ++rc=$? ++ ++# The resource agent may optionally log a debug message ++ocf_log debug "${OCF_RESOURCE_INSTANCE} ${__OCF_ACTION} returned $rc" ++exit $rc ++ +diff --git a/extras/ganesha/ocf/ganesha_mon b/extras/ganesha/ocf/ganesha_mon +new file mode 100644 +index 0000000..7d2c268 +--- /dev/null ++++ b/extras/ganesha/ocf/ganesha_mon +@@ -0,0 +1,235 @@ ++#!/bin/bash ++# ++# Copyright (c) 2014 Anand Subramanian anands@redhat.com ++# Copyright (c) 2015 Red Hat Inc. ++# All Rights Reserved. ++# ++# This program is free software; you can redistribute it and/or modify ++# it under the terms of version 2 of the GNU General Public License as ++# published by the Free Software Foundation. ++# ++# This program is distributed in the hope that it would be useful, but ++# WITHOUT ANY WARRANTY; without even the implied warranty of ++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. ++# ++# Further, this software is distributed without any warranty that it is ++# free of the rightful claim of any third person regarding infringement ++# or the like. Any license provided herein, whether implied or ++# otherwise, applies only to this software file. Patent licenses, if ++# any, provided herein do not apply to combinations of this program with ++# other software, or any other product whatsoever. ++# ++# You should have received a copy of the GNU General Public License ++# along with this program; if not, write the Free Software Foundation, ++# Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. ++# ++# ++ ++# Initialization: ++: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat} ++. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs ++ ++if [ -n "${OCF_DEBUG_LIBRARY}" ]; then ++ . ${OCF_DEBUG_LIBRARY} ++else ++ : ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat} ++ . ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs ++fi ++ ++# Defaults ++OCF_RESKEY_ganesha_active_default="ganesha-active" ++OCF_RESKEY_grace_active_default="grace-active" ++OCF_RESKEY_grace_delay_default="5" ++ ++: ${OCF_RESKEY_ganesha_active=${OCF_RESKEY_ganesha_active_default}} ++: ${OCF_RESKEY_grace_active=${OCF_RESKEY_grace_active_default}} ++: ${OCF_RESKEY_grace_delay=${OCF_RESKEY_grace_delay_default}} ++ ++ganesha_meta_data() { ++ cat < ++ ++ ++1.0 ++ ++ ++This Linux-specific resource agent acts as a dummy ++resource agent for nfs-ganesha. ++ ++ ++Manages the user-space nfs-ganesha NFS server ++ ++ ++ ++NFS-Ganesha daemon active attribute ++NFS-Ganesha daemon active attribute ++ ++ ++ ++NFS-Ganesha grace active attribute ++NFS-Ganesha grace active attribute ++ ++ ++ ++ ++NFS-Ganesha grace delay. ++When changing this, adjust the ganesha_grace RA's monitor interval to match. ++ ++NFS-Ganesha grace delay ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++END ++ ++return ${OCF_SUCCESS} ++} ++ ++ganesha_mon_usage() { ++ echo "ganesha.nfsd USAGE" ++} ++ ++# Make sure meta-data and usage always succeed ++case ${__OCF_ACTION} in ++ meta-data) ganesha_meta_data ++ exit ${OCF_SUCCESS} ++ ;; ++ usage|help) ganesha_usage ++ exit ${OCF_SUCCESS} ++ ;; ++ *) ++ ;; ++esac ++ ++ganesha_mon_start() ++{ ++ ocf_log debug "ganesha_mon_start" ++ ganesha_mon_monitor ++ return $OCF_SUCCESS ++} ++ ++ganesha_mon_stop() ++{ ++ ocf_log debug "ganesha_mon_stop" ++ return $OCF_SUCCESS ++} ++ ++ganesha_mon_monitor() ++{ ++ local host=$(hostname -s) ++ local pid_file="/var/run/ganesha.pid" ++ local rhel6_pid_file="/var/run/ganesha.nfsd.pid" ++ local proc_pid="/proc/" ++ ++ # RHEL6 /etc/init.d/nfs-ganesha adds -p /var/run/ganesha.nfsd.pid ++ # RHEL7 systemd does not. Would be nice if all distros used the ++ # same pid file. ++ if [ -e ${rhel6_pid_file} ]; then ++ pid_file=${rhel6_pid_file} ++ fi ++ if [ -e ${pid_file} ]; then ++ proc_pid="${proc_pid}$(cat ${pid_file})" ++ fi ++ ++ if [ "x${proc_pid}" != "x/proc/" -a -d ${proc_pid} ]; then ++ ++ attrd_updater -n ${OCF_RESKEY_ganesha_active} -v 1 ++ if [ $? -ne 0 ]; then ++ ocf_log info "warning: attrd_updater -n ${OCF_RESKEY_ganesha_active} -v 1 failed" ++ fi ++ ++ # ganesha_grace (nfs-grace) RA follows grace-active attr ++ # w/ constraint location ++ attrd_updater -n ${OCF_RESKEY_grace_active} -v 1 ++ if [ $? -ne 0 ]; then ++ ocf_log info "warning: attrd_updater -n ${OCF_RESKEY_grace_active} -v 1 failed" ++ fi ++ ++ # ganesha_mon (nfs-mon) and ganesha_grace (nfs-grace) ++ # track grace-active crm_attr (attr != crm_attr) ++ # we can't just use the attr as there's no way to query ++ # its value in RHEL6 pacemaker ++ ++ crm_attribute --node=${host} --lifetime=forever --name=${OCF_RESKEY_grace_active} --update=1 2> /dev/null ++ if [ $? -ne 0 ]; then ++ host=$(hostname) ++ crm_attribute --node=${host} --lifetime=forever --name=${OCF_RESKEY_grace_active} --update=1 2> /dev/null ++ if [ $? -ne 0 ]; then ++ ocf_log info "mon monitor warning: crm_attribute --node=${host} --lifetime=forever --name=${OCF_RESKEY_grace_active} --update=1 failed" ++ fi ++ fi ++ ++ return ${OCF_SUCCESS} ++ fi ++ ++ # VIP fail-over is triggered by clearing the ++ # ganesha-active node attribute on this node. ++ # ++ # Meanwhile the ganesha_grace notify() runs when its ++ # nfs-grace resource is disabled on a node; which ++ # is triggered by clearing the grace-active attribute ++ # on this node. ++ # ++ # We need to allow time for it to run and put ++ # the remaining ganesha.nfsds into grace before ++ # initiating the VIP fail-over. ++ ++ attrd_updater -D -n ${OCF_RESKEY_grace_active} ++ if [ $? -ne 0 ]; then ++ ocf_log info "warning: attrd_updater -D -n ${OCF_RESKEY_grace_active} failed" ++ fi ++ ++ host=$(hostname -s) ++ crm_attribute --node=${host} --name=${OCF_RESKEY_grace_active} --update=0 2> /dev/null ++ if [ $? -ne 0 ]; then ++ host=$(hostname) ++ crm_attribute --node=${host} --name=${OCF_RESKEY_grace_active} --update=0 2> /dev/null ++ if [ $? -ne 0 ]; then ++ ocf_log info "mon monitor warning: crm_attribute --node=${host} --name=${OCF_RESKEY_grace_active} --update=0 failed" ++ fi ++ fi ++ ++ sleep ${OCF_RESKEY_grace_delay} ++ ++ attrd_updater -D -n ${OCF_RESKEY_ganesha_active} ++ if [ $? -ne 0 ]; then ++ ocf_log info "warning: attrd_updater -D -n ${OCF_RESKEY_ganesha_active} failed" ++ fi ++ ++ return ${OCF_SUCCESS} ++} ++ ++ganesha_mon_validate() ++{ ++ return ${OCF_SUCCESS} ++} ++ ++ganesha_mon_validate ++ ++# Translate each action into the appropriate function call ++case ${__OCF_ACTION} in ++start) ganesha_mon_start ++ ;; ++stop) ganesha_mon_stop ++ ;; ++status|monitor) ganesha_mon_monitor ++ ;; ++*) ganesha_mon_usage ++ exit ${OCF_ERR_UNIMPLEMENTED} ++ ;; ++esac ++ ++rc=$? ++ ++# The resource agent may optionally log a debug message ++ocf_log debug "${OCF_RESOURCE_INSTANCE} ${__OCF_ACTION} returned $rc" ++exit $rc ++ +diff --git a/extras/ganesha/ocf/ganesha_nfsd b/extras/ganesha/ocf/ganesha_nfsd +new file mode 100644 +index 0000000..29e333c +--- /dev/null ++++ b/extras/ganesha/ocf/ganesha_nfsd +@@ -0,0 +1,168 @@ ++#!/bin/bash ++# ++# Copyright (c) 2014 Anand Subramanian anands@redhat.com ++# Copyright (c) 2015 Red Hat Inc. ++# All Rights Reserved. ++# ++# This program is free software; you can redistribute it and/or modify ++# it under the terms of version 2 of the GNU General Public License as ++# published by the Free Software Foundation. ++# ++# This program is distributed in the hope that it would be useful, but ++# WITHOUT ANY WARRANTY; without even the implied warranty of ++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. ++# ++# Further, this software is distributed without any warranty that it is ++# free of the rightful claim of any third person regarding infringement ++# or the like. Any license provided herein, whether implied or ++# otherwise, applies only to this software file. Patent licenses, if ++# any, provided herein do not apply to combinations of this program with ++# other software, or any other product whatsoever. ++# ++# You should have received a copy of the GNU General Public License ++# along with this program; if not, write the Free Software Foundation, ++# Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. ++# ++# ++ ++# Initialization: ++: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat} ++. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs ++ ++if [ -n "${OCF_DEBUG_LIBRARY}" ]; then ++ . ${OCF_DEBUG_LIBRARY} ++else ++ : ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat} ++ . ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs ++fi ++ ++OCF_RESKEY_ha_vol_mnt_default="/var/run/gluster/shared_storage" ++: ${OCF_RESKEY_ha_vol_mnt=${OCF_RESKEY_ha_vol_mnt_default}} ++ ++ganesha_meta_data() { ++ cat < ++ ++ ++1.0 ++ ++ ++This Linux-specific resource agent acts as a dummy ++resource agent for nfs-ganesha. ++ ++ ++Manages the user-space nfs-ganesha NFS server ++ ++ ++ ++HA State Volume Mount Point ++HA_State Volume Mount Point ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++END ++ ++return ${OCF_SUCCESS} ++} ++ ++ganesha_nfsd_usage() { ++ echo "ganesha.nfsd USAGE" ++} ++ ++# Make sure meta-data and usage always succeed ++case $__OCF_ACTION in ++ meta-data) ganesha_meta_data ++ exit ${OCF_SUCCESS} ++ ;; ++ usage|help) ganesha_usage ++ exit ${OCF_SUCCESS} ++ ;; ++ *) ++ ;; ++esac ++ ++ganesha_nfsd_start() ++{ ++ local long_host=$(hostname) ++ ++ if [[ -d /var/lib/nfs ]]; then ++ mv /var/lib/nfs /var/lib/nfs.backup ++ if [ $? -ne 0 ]; then ++ ocf_log notice "mv /var/lib/nfs /var/lib/nfs.backup failed" ++ fi ++ ln -s ${OCF_RESKEY_ha_vol_mnt}/nfs-ganesha/${long_host}/nfs /var/lib/nfs ++ if [ $? -ne 0 ]; then ++ ocf_log notice "ln -s ${OCF_RESKEY_ha_vol_mnt}/nfs-ganesha/${long_host}/nfs /var/lib/nfs failed" ++ fi ++ fi ++ ++ return ${OCF_SUCCESS} ++} ++ ++ganesha_nfsd_stop() ++{ ++ ++ if [ -L /var/lib/nfs -a -d /var/lib/nfs.backup ]; then ++ rm -f /var/lib/nfs ++ if [ $? -ne 0 ]; then ++ ocf_log notice "rm -f /var/lib/nfs failed" ++ fi ++ mv /var/lib/nfs.backup /var/lib/nfs ++ if [ $? -ne 0 ]; then ++ ocf_log notice "mv /var/lib/nfs.backup /var/lib/nfs failed" ++ fi ++ fi ++ ++ return ${OCF_SUCCESS} ++} ++ ++ganesha_nfsd_monitor() ++{ ++ # pacemaker checks to see if RA is already running before starting it. ++ # if we return success, then it's presumed it's already running and ++ # doesn't need to be started, i.e. invoke the start action. ++ # return something other than success to make pacemaker invoke the ++ # start action ++ if [[ -L /var/lib/nfs ]]; then ++ return ${OCF_SUCCESS} ++ fi ++ return ${OCF_NOT_RUNNING} ++} ++ ++ganesha_nfsd_validate() ++{ ++ return ${OCF_SUCCESS} ++} ++ ++ganesha_nfsd_validate ++ ++# ocf_log notice "ganesha_nfsd ${OCF_RESOURCE_INSTANCE} $__OCF_ACTION" ++ ++# Translate each action into the appropriate function call ++case $__OCF_ACTION in ++start) ganesha_nfsd_start ++ ;; ++stop) ganesha_nfsd_stop ++ ;; ++status|monitor) ganesha_nfsd_monitor ++ ;; ++*) ganesha_nfsd_usage ++ exit ${OCF_ERR_UNIMPLEMENTED} ++ ;; ++esac ++ ++rc=$? ++ ++# The resource agent may optionally log a debug message ++ocf_log debug "${OCF_RESOURCE_INSTANCE} ${__OCF_ACTION} returned $rc" ++exit $rc ++ +diff --git a/extras/ganesha/scripts/Makefile.am b/extras/ganesha/scripts/Makefile.am +index 9ee8867..224ed26 100644 +--- a/extras/ganesha/scripts/Makefile.am ++++ b/extras/ganesha/scripts/Makefile.am +@@ -1,4 +1,6 @@ +-EXTRA_DIST= create-export-ganesha.sh generate-epoch.py dbus-send.sh ++EXTRA_DIST= ganesha-ha.sh dbus-send.sh create-export-ganesha.sh \ ++ generate-epoch.py + + scriptsdir = $(libexecdir)/ganesha +-scripts_SCRIPTS = create-export-ganesha.sh generate-epoch.py ++scripts_SCRIPTS = create-export-ganesha.sh dbus-send.sh ganesha-ha.sh \ ++ generate-epoch.py +diff --git a/extras/ganesha/scripts/ganesha-ha.sh b/extras/ganesha/scripts/ganesha-ha.sh +new file mode 100644 +index 0000000..e4135ba +--- /dev/null ++++ b/extras/ganesha/scripts/ganesha-ha.sh +@@ -0,0 +1,1126 @@ ++#!/bin/bash ++ ++# Copyright 2015-2016 Red Hat Inc. All Rights Reserved ++# ++# Pacemaker+Corosync High Availability for NFS-Ganesha ++# ++# setup, teardown, add, delete, refresh-config, and status ++# ++# Each participating node in the cluster is assigned a virtual IP (VIP) ++# which fails over to another node when its associated ganesha.nfsd dies ++# for any reason. After the VIP is moved to another node all the ++# ganesha.nfsds are send a signal using DBUS to put them into NFS GRACE. ++# ++# There are six resource agent types used: ganesha_mon, ganesha_grace, ++# ganesha_nfsd, IPaddr, and Dummy. ganesha_mon is used to monitor the ++# ganesha.nfsd. ganesha_grace is used to send the DBUS signal to put ++# the remaining ganesha.nfsds into grace. ganesha_nfsd is used to start ++# and stop the ganesha.nfsd during setup and teardown. IPaddr manages ++# the VIP. A Dummy resource named $hostname-trigger_ip-1 is used to ++# ensure that the NFS GRACE DBUS signal is sent after the VIP moves to ++# the new host. ++ ++HA_NUM_SERVERS=0 ++HA_SERVERS="" ++HA_VOL_NAME="gluster_shared_storage" ++HA_VOL_MNT="/var/run/gluster/shared_storage" ++HA_CONFDIR=$HA_VOL_MNT"/nfs-ganesha" ++SERVICE_MAN="DISTRO_NOT_FOUND" ++ ++RHEL6_PCS_CNAME_OPTION="--name" ++SECRET_PEM="/var/lib/glusterd/nfs/secret.pem" ++ ++# UNBLOCK RA uses shared_storage which may become unavailable ++# during any of the nodes reboot. Hence increase timeout value. ++PORTBLOCK_UNBLOCK_TIMEOUT="60s" ++ ++# Try loading the config from any of the distro ++# specific configuration locations ++if [ -f /etc/sysconfig/ganesha ] ++ then ++ . /etc/sysconfig/ganesha ++fi ++if [ -f /etc/conf.d/ganesha ] ++ then ++ . /etc/conf.d/ganesha ++fi ++if [ -f /etc/default/ganesha ] ++ then ++ . /etc/default/ganesha ++fi ++ ++GANESHA_CONF= ++ ++function find_rhel7_conf ++{ ++ while [[ $# > 0 ]] ++ do ++ key="$1" ++ case $key in ++ -f) ++ CONFFILE="$2" ++ break; ++ ;; ++ *) ++ ;; ++ esac ++ shift ++ done ++} ++ ++if [ -z $CONFFILE ] ++ then ++ find_rhel7_conf $OPTIONS ++ ++fi ++ ++GANESHA_CONF=${CONFFILE:-/etc/ganesha/ganesha.conf} ++ ++usage() { ++ ++ echo "Usage : add|delete|refresh-config|status" ++ echo "Add-node : ganesha-ha.sh --add \ ++ " ++ echo "Delete-node: ganesha-ha.sh --delete \ ++" ++ echo "Refresh-config : ganesha-ha.sh --refresh-config \ ++" ++ echo "Status : ganesha-ha.sh --status " ++} ++ ++determine_service_manager () { ++ ++ if [ -e "/usr/bin/systemctl" ]; ++ then ++ SERVICE_MAN="/usr/bin/systemctl" ++ elif [ -e "/sbin/invoke-rc.d" ]; ++ then ++ SERVICE_MAN="/sbin/invoke-rc.d" ++ elif [ -e "/sbin/service" ]; ++ then ++ SERVICE_MAN="/sbin/service" ++ fi ++ if [ "$SERVICE_MAN" == "DISTRO_NOT_FOUND" ] ++ then ++ echo "Service manager not recognized, exiting" ++ exit 1 ++ fi ++} ++ ++manage_service () ++{ ++ local action=${1} ++ local new_node=${2} ++ local option= ++ ++ if [ "$action" == "start" ]; then ++ option="yes" ++ else ++ option="no" ++ fi ++ ssh -oPasswordAuthentication=no -oStrictHostKeyChecking=no -i \ ++${SECRET_PEM} root@${new_node} "/usr/libexec/ganesha/ganesha-ha.sh --setup-ganesha-conf-files $HA_CONFDIR $option" ++ ++ if [ "$SERVICE_MAN" == "/usr/bin/systemctl" ] ++ then ++ ssh -oPasswordAuthentication=no -oStrictHostKeyChecking=no -i \ ++${SECRET_PEM} root@${new_node} "$SERVICE_MAN ${action} nfs-ganesha" ++ else ++ ssh -oPasswordAuthentication=no -oStrictHostKeyChecking=no -i \ ++${SECRET_PEM} root@${new_node} "$SERVICE_MAN nfs-ganesha ${action}" ++ fi ++} ++ ++ ++check_cluster_exists() ++{ ++ local name=${1} ++ local cluster_name="" ++ ++ if [ -e /var/run/corosync.pid ]; then ++ cluster_name=$(pcs status | grep "Cluster name:" | cut -d ' ' -f 3) ++ if [ ${cluster_name} -a ${cluster_name} = ${name} ]; then ++ logger "$name already exists, exiting" ++ exit 0 ++ fi ++ fi ++} ++ ++ ++determine_servers() ++{ ++ local cmd=${1} ++ local num_servers=0 ++ local tmp_ifs=${IFS} ++ local ha_servers="" ++ ++ if [ "X${cmd}X" != "XsetupX" -a "X${cmd}X" != "XstatusX" ]; then ++ ha_servers=$(pcs status | grep "Online:" | grep -o '\[.*\]' | sed -e 's/\[//' | sed -e 's/\]//') ++ IFS=$' ' ++ for server in ${ha_servers} ; do ++ num_servers=$(expr ${num_servers} + 1) ++ done ++ IFS=${tmp_ifs} ++ HA_NUM_SERVERS=${num_servers} ++ HA_SERVERS="${ha_servers}" ++ else ++ IFS=$',' ++ for server in ${HA_CLUSTER_NODES} ; do ++ num_servers=$(expr ${num_servers} + 1) ++ done ++ IFS=${tmp_ifs} ++ HA_NUM_SERVERS=${num_servers} ++ HA_SERVERS="${HA_CLUSTER_NODES//,/ }" ++ fi ++} ++ ++ ++setup_cluster() ++{ ++ local name=${1} ++ local num_servers=${2} ++ local servers=${3} ++ local unclean="" ++ local quorum_policy="stop" ++ ++ logger "setting up cluster ${name} with the following ${servers}" ++ ++ pcs cluster auth ${servers} ++ # pcs cluster setup --name ${name} ${servers} ++ pcs cluster setup ${RHEL6_PCS_CNAME_OPTION} ${name} --transport udpu ${servers} ++ if [ $? -ne 0 ]; then ++ logger "pcs cluster setup ${RHEL6_PCS_CNAME_OPTION} ${name} ${servers} failed" ++ exit 1; ++ fi ++ pcs cluster start --all ++ if [ $? -ne 0 ]; then ++ logger "pcs cluster start failed" ++ exit 1; ++ fi ++ ++ sleep 1 ++ # wait for the cluster to elect a DC before querying or writing ++ # to the CIB. BZ 1334092 ++ crmadmin --dc_lookup --timeout=5000 > /dev/null 2>&1 ++ while [ $? -ne 0 ]; do ++ crmadmin --dc_lookup --timeout=5000 > /dev/null 2>&1 ++ done ++ ++ unclean=$(pcs status | grep -u "UNCLEAN") ++ while [[ "${unclean}X" = "UNCLEANX" ]]; do ++ sleep 1 ++ unclean=$(pcs status | grep -u "UNCLEAN") ++ done ++ sleep 1 ++ ++ if [ ${num_servers} -lt 3 ]; then ++ quorum_policy="ignore" ++ fi ++ pcs property set no-quorum-policy=${quorum_policy} ++ if [ $? -ne 0 ]; then ++ logger "warning: pcs property set no-quorum-policy=${quorum_policy} failed" ++ fi ++ ++ pcs property set stonith-enabled=false ++ if [ $? -ne 0 ]; then ++ logger "warning: pcs property set stonith-enabled=false failed" ++ fi ++} ++ ++ ++setup_finalize_ha() ++{ ++ local cibfile=${1} ++ local stopped="" ++ ++ stopped=$(pcs status | grep -u "Stopped") ++ while [[ "${stopped}X" = "StoppedX" ]]; do ++ sleep 1 ++ stopped=$(pcs status | grep -u "Stopped") ++ done ++} ++ ++ ++refresh_config () ++{ ++ local short_host=$(hostname -s) ++ local VOL=${1} ++ local HA_CONFDIR=${2} ++ local short_host=$(hostname -s) ++ ++ local export_id=$(grep ^[[:space:]]*Export_Id $HA_CONFDIR/exports/export.$VOL.conf |\ ++ awk -F"[=,;]" '{print $2}' | tr -d '[[:space:]]') ++ ++ ++ if [ -e ${SECRET_PEM} ]; then ++ while [[ ${3} ]]; do ++ current_host=`echo ${3} | cut -d "." -f 1` ++ if [ ${short_host} != ${current_host} ]; then ++ output=$(ssh -oPasswordAuthentication=no \ ++-oStrictHostKeyChecking=no -i ${SECRET_PEM} root@${current_host} \ ++"dbus-send --print-reply --system --dest=org.ganesha.nfsd \ ++/org/ganesha/nfsd/ExportMgr org.ganesha.nfsd.exportmgr.UpdateExport \ ++string:$HA_CONFDIR/exports/export.$VOL.conf \ ++string:\"EXPORT(Export_Id=$export_id)\" 2>&1") ++ ret=$? ++ logger <<< "${output}" ++ if [ ${ret} -ne 0 ]; then ++ echo "Error: refresh-config failed on ${current_host}." ++ exit 1 ++ else ++ echo "Refresh-config completed on ${current_host}." ++ fi ++ ++ fi ++ shift ++ done ++ else ++ echo "Error: refresh-config failed. Passwordless ssh is not enabled." ++ exit 1 ++ fi ++ ++ # Run the same command on the localhost, ++ output=$(dbus-send --print-reply --system --dest=org.ganesha.nfsd \ ++/org/ganesha/nfsd/ExportMgr org.ganesha.nfsd.exportmgr.UpdateExport \ ++string:$HA_CONFDIR/exports/export.$VOL.conf \ ++string:"EXPORT(Export_Id=$export_id)" 2>&1) ++ ret=$? ++ logger <<< "${output}" ++ if [ ${ret} -ne 0 ] ; then ++ echo "Error: refresh-config failed on localhost." ++ exit 1 ++ else ++ echo "Success: refresh-config completed." ++ fi ++} ++ ++ ++teardown_cluster() ++{ ++ local name=${1} ++ ++ for server in ${HA_SERVERS} ; do ++ if [[ ${HA_CLUSTER_NODES} != *${server}* ]]; then ++ logger "info: ${server} is not in config, removing" ++ ++ pcs cluster stop ${server} --force ++ if [ $? -ne 0 ]; then ++ logger "warning: pcs cluster stop ${server} failed" ++ fi ++ ++ pcs cluster node remove ${server} ++ if [ $? -ne 0 ]; then ++ logger "warning: pcs cluster node remove ${server} failed" ++ fi ++ fi ++ done ++ ++ # BZ 1193433 - pcs doesn't reload cluster.conf after modification ++ # after teardown completes, a subsequent setup will appear to have ++ # 'remembered' the deleted node. You can work around this by ++ # issuing another `pcs cluster node remove $node`, ++ # `crm_node -f -R $server`, or ++ # `cibadmin --delete --xml-text '' ++ ++ pcs cluster stop --all ++ if [ $? -ne 0 ]; then ++ logger "warning pcs cluster stop --all failed" ++ fi ++ ++ pcs cluster destroy ++ if [ $? -ne 0 ]; then ++ logger "error pcs cluster destroy failed" ++ exit 1 ++ fi ++} ++ ++ ++cleanup_ganesha_config () ++{ ++ rm -f /etc/corosync/corosync.conf ++ rm -rf /etc/cluster/cluster.conf* ++ rm -rf /var/lib/pacemaker/cib/* ++} ++ ++do_create_virt_ip_constraints() ++{ ++ local cibfile=${1}; shift ++ local primary=${1}; shift ++ local weight="1000" ++ ++ # first a constraint location rule that says the VIP must be where ++ # there's a ganesha.nfsd running ++ pcs -f ${cibfile} constraint location ${primary}-group rule score=-INFINITY ganesha-active ne 1 ++ if [ $? -ne 0 ]; then ++ logger "warning: pcs constraint location ${primary}-group rule score=-INFINITY ganesha-active ne 1 failed" ++ fi ++ ++ # then a set of constraint location prefers to set the prefered order ++ # for where a VIP should move ++ while [[ ${1} ]]; do ++ pcs -f ${cibfile} constraint location ${primary}-group prefers ${1}=${weight} ++ if [ $? -ne 0 ]; then ++ logger "warning: pcs constraint location ${primary}-group prefers ${1}=${weight} failed" ++ fi ++ weight=$(expr ${weight} + 1000) ++ shift ++ done ++ # and finally set the highest preference for the VIP to its home node ++ # default weight when created is/was 100. ++ # on Fedora setting appears to be additive, so to get the desired ++ # value we adjust the weight ++ # weight=$(expr ${weight} - 100) ++ pcs -f ${cibfile} constraint location ${primary}-group prefers ${primary}=${weight} ++ if [ $? -ne 0 ]; then ++ logger "warning: pcs constraint location ${primary}-group prefers ${primary}=${weight} failed" ++ fi ++} ++ ++ ++wrap_create_virt_ip_constraints() ++{ ++ local cibfile=${1}; shift ++ local primary=${1}; shift ++ local head="" ++ local tail="" ++ ++ # build a list of peers, e.g. for a four node cluster, for node1, ++ # the result is "node2 node3 node4"; for node2, "node3 node4 node1" ++ # and so on. ++ while [[ ${1} ]]; do ++ if [ "${1}" = "${primary}" ]; then ++ shift ++ while [[ ${1} ]]; do ++ tail=${tail}" "${1} ++ shift ++ done ++ else ++ head=${head}" "${1} ++ fi ++ shift ++ done ++ do_create_virt_ip_constraints ${cibfile} ${primary} ${tail} ${head} ++} ++ ++ ++create_virt_ip_constraints() ++{ ++ local cibfile=${1}; shift ++ ++ while [[ ${1} ]]; do ++ wrap_create_virt_ip_constraints ${cibfile} ${1} ${HA_SERVERS} ++ shift ++ done ++} ++ ++ ++setup_create_resources() ++{ ++ local cibfile=$(mktemp -u) ++ ++ # fixup /var/lib/nfs ++ logger "pcs resource create nfs_setup ocf:heartbeat:ganesha_nfsd ha_vol_mnt=${HA_VOL_MNT} --clone" ++ pcs resource create nfs_setup ocf:heartbeat:ganesha_nfsd ha_vol_mnt=${HA_VOL_MNT} --clone ++ if [ $? -ne 0 ]; then ++ logger "warning: pcs resource create nfs_setup ocf:heartbeat:ganesha_nfsd ha_vol_mnt=${HA_VOL_MNT} --clone failed" ++ fi ++ ++ pcs resource create nfs-mon ocf:heartbeat:ganesha_mon --clone ++ if [ $? -ne 0 ]; then ++ logger "warning: pcs resource create nfs-mon ocf:heartbeat:ganesha_mon --clone failed" ++ fi ++ ++ # see comment in (/usr/lib/ocf/resource.d/heartbeat/ganesha_grace ++ # start method. Allow time for ganesha_mon to start and set the ++ # ganesha-active crm_attribute ++ sleep 5 ++ ++ pcs resource create nfs-grace ocf:heartbeat:ganesha_grace --clone meta notify=true ++ if [ $? -ne 0 ]; then ++ logger "warning: pcs resource create nfs-grace ocf:heartbeat:ganesha_grace --clone failed" ++ fi ++ ++ pcs constraint location nfs-grace-clone rule score=-INFINITY grace-active ne 1 ++ if [ $? -ne 0 ]; then ++ logger "warning: pcs constraint location nfs-grace-clone rule score=-INFINITY grace-active ne 1" ++ fi ++ ++ pcs cluster cib ${cibfile} ++ ++ while [[ ${1} ]]; do ++ ++ # this is variable indirection ++ # from a nvs like 'VIP_host1=10.7.6.5' or 'VIP_host1="10.7.6.5"' ++ # (or VIP_host-1=..., or VIP_host-1.my.domain.name=...) ++ # a variable 'clean_name' is created (e.g. w/ value 'VIP_host_1') ++ # and a clean nvs (e.g. w/ value 'VIP_host_1="10_7_6_5"') ++ # after the `eval ${clean_nvs}` there is a variable VIP_host_1 ++ # with the value '10_7_6_5', and the following \$$ magic to ++ # reference it, i.e. `eval tmp_ipaddr=\$${clean_name}` gives us ++ # ${tmp_ipaddr} with 10_7_6_5 and then convert the _s back to .s ++ # to give us ipaddr="10.7.6.5". whew! ++ name="VIP_${1}" ++ clean_name=${name//[-.]/_} ++ nvs=$(grep "^${name}=" ${HA_CONFDIR}/ganesha-ha.conf) ++ clean_nvs=${nvs//[-.]/_} ++ eval ${clean_nvs} ++ eval tmp_ipaddr=\$${clean_name} ++ ipaddr=${tmp_ipaddr//_/.} ++ ++ pcs -f ${cibfile} resource create ${1}-nfs_block ocf:heartbeat:portblock protocol=tcp \ ++ portno=2049 action=block ip=${ipaddr} --group ${1}-group ++ if [ $? -ne 0 ]; then ++ logger "warning pcs resource create ${1}-nfs_block failed" ++ fi ++ pcs -f ${cibfile} resource create ${1}-cluster_ip-1 ocf:heartbeat:IPaddr ip=${ipaddr} \ ++ cidr_netmask=32 op monitor interval=15s --group ${1}-group --after ${1}-nfs_block ++ if [ $? -ne 0 ]; then ++ logger "warning pcs resource create ${1}-cluster_ip-1 ocf:heartbeat:IPaddr ip=${ipaddr} \ ++ cidr_netmask=32 op monitor interval=15s failed" ++ fi ++ ++ pcs -f ${cibfile} constraint order nfs-grace-clone then ${1}-cluster_ip-1 ++ if [ $? -ne 0 ]; then ++ logger "warning: pcs constraint order nfs-grace-clone then ${1}-cluster_ip-1 failed" ++ fi ++ ++ pcs -f ${cibfile} resource create ${1}-nfs_unblock ocf:heartbeat:portblock protocol=tcp \ ++ portno=2049 action=unblock ip=${ipaddr} reset_local_on_unblock_stop=true \ ++ tickle_dir=${HA_VOL_MNT}/nfs-ganesha/tickle_dir/ --group ${1}-group --after ${1}-cluster_ip-1 \ ++ op stop timeout=${PORTBLOCK_UNBLOCK_TIMEOUT} op start timeout=${PORTBLOCK_UNBLOCK_TIMEOUT} \ ++ op monitor interval=10s timeout=${PORTBLOCK_UNBLOCK_TIMEOUT} ++ if [ $? -ne 0 ]; then ++ logger "warning pcs resource create ${1}-nfs_unblock failed" ++ fi ++ ++ ++ shift ++ done ++ ++ create_virt_ip_constraints ${cibfile} ${HA_SERVERS} ++ ++ pcs cluster cib-push ${cibfile} ++ if [ $? -ne 0 ]; then ++ logger "warning pcs cluster cib-push ${cibfile} failed" ++ fi ++ rm -f ${cibfile} ++} ++ ++ ++teardown_resources() ++{ ++ # local mntpt=$(grep ha-vol-mnt ${HA_CONFIG_FILE} | cut -d = -f 2) ++ ++ # restore /var/lib/nfs ++ logger "notice: pcs resource delete nfs_setup-clone" ++ pcs resource delete nfs_setup-clone ++ if [ $? -ne 0 ]; then ++ logger "warning: pcs resource delete nfs_setup-clone failed" ++ fi ++ ++ # delete -clone resource agents ++ # in particular delete the ganesha monitor so we don't try to ++ # trigger anything when we shut down ganesha next. ++ pcs resource delete nfs-mon-clone ++ if [ $? -ne 0 ]; then ++ logger "warning: pcs resource delete nfs-mon-clone failed" ++ fi ++ ++ pcs resource delete nfs-grace-clone ++ if [ $? -ne 0 ]; then ++ logger "warning: pcs resource delete nfs-grace-clone failed" ++ fi ++ ++ while [[ ${1} ]]; do ++ pcs resource delete ${1}-group ++ if [ $? -ne 0 ]; then ++ logger "warning: pcs resource delete ${1}-group failed" ++ fi ++ shift ++ done ++ ++} ++ ++ ++recreate_resources() ++{ ++ local cibfile=${1}; shift ++ ++ while [[ ${1} ]]; do ++ # this is variable indirection ++ # see the comment on the same a few lines up ++ name="VIP_${1}" ++ clean_name=${name//[-.]/_} ++ nvs=$(grep "^${name}=" ${HA_CONFDIR}/ganesha-ha.conf) ++ clean_nvs=${nvs//[-.]/_} ++ eval ${clean_nvs} ++ eval tmp_ipaddr=\$${clean_name} ++ ipaddr=${tmp_ipaddr//_/.} ++ ++ pcs -f ${cibfile} resource create ${1}-nfs_block ocf:heartbeat:portblock protocol=tcp \ ++ portno=2049 action=block ip=${ipaddr} --group ${1}-group ++ if [ $? -ne 0 ]; then ++ logger "warning pcs resource create ${1}-nfs_block failed" ++ fi ++ pcs -f ${cibfile} resource create ${1}-cluster_ip-1 ocf:heartbeat:IPaddr ip=${ipaddr} \ ++ cidr_netmask=32 op monitor interval=15s --group ${1}-group --after ${1}-nfs_block ++ if [ $? -ne 0 ]; then ++ logger "warning pcs resource create ${1}-cluster_ip-1 ocf:heartbeat:IPaddr ip=${ipaddr} \ ++ cidr_netmask=32 op monitor interval=15s failed" ++ fi ++ ++ pcs -f ${cibfile} constraint order nfs-grace-clone then ${1}-cluster_ip-1 ++ if [ $? -ne 0 ]; then ++ logger "warning: pcs constraint order nfs-grace-clone then ${1}-cluster_ip-1 failed" ++ fi ++ ++ pcs -f ${cibfile} resource create ${1}-nfs_unblock ocf:heartbeat:portblock protocol=tcp \ ++ portno=2049 action=unblock ip=${ipaddr} reset_local_on_unblock_stop=true \ ++ tickle_dir=${HA_VOL_MNT}/nfs-ganesha/tickle_dir/ --group ${1}-group --after ${1}-cluster_ip-1 \ ++ op stop timeout=${PORTBLOCK_UNBLOCK_TIMEOUT} op start timeout=${PORTBLOCK_UNBLOCK_TIMEOUT} \ ++ op monitor interval=10s timeout=${PORTBLOCK_UNBLOCK_TIMEOUT} ++ if [ $? -ne 0 ]; then ++ logger "warning pcs resource create ${1}-nfs_unblock failed" ++ fi ++ ++ shift ++ done ++} ++ ++ ++addnode_recreate_resources() ++{ ++ local cibfile=${1}; shift ++ local add_node=${1}; shift ++ local add_vip=${1}; shift ++ ++ recreate_resources ${cibfile} ${HA_SERVERS} ++ ++ pcs -f ${cibfile} resource create ${add_node}-nfs_block ocf:heartbeat:portblock \ ++ protocol=tcp portno=2049 action=block ip=${add_vip} --group ${add_node}-group ++ if [ $? -ne 0 ]; then ++ logger "warning pcs resource create ${add_node}-nfs_block failed" ++ fi ++ pcs -f ${cibfile} resource create ${add_node}-cluster_ip-1 ocf:heartbeat:IPaddr \ ++ ip=${add_vip} cidr_netmask=32 op monitor interval=15s --group ${add_node}-group \ ++ --after ${add_node}-nfs_block ++ if [ $? -ne 0 ]; then ++ logger "warning pcs resource create ${add_node}-cluster_ip-1 ocf:heartbeat:IPaddr \ ++ ip=${add_vip} cidr_netmask=32 op monitor interval=15s failed" ++ fi ++ ++ pcs -f ${cibfile} constraint order nfs-grace-clone then ${add_node}-cluster_ip-1 ++ if [ $? -ne 0 ]; then ++ logger "warning: pcs constraint order nfs-grace-clone then ${add_node}-cluster_ip-1 failed" ++ fi ++ pcs -f ${cibfile} resource create ${add_node}-nfs_unblock ocf:heartbeat:portblock \ ++ protocol=tcp portno=2049 action=unblock ip=${add_vip} reset_local_on_unblock_stop=true \ ++ tickle_dir=${HA_VOL_MNT}/nfs-ganesha/tickle_dir/ --group ${add_node}-group --after \ ++ ${add_node}-cluster_ip-1 op stop timeout=${PORTBLOCK_UNBLOCK_TIMEOUT} op start \ ++ timeout=${PORTBLOCK_UNBLOCK_TIMEOUT} op monitor interval=10s \ ++ timeout=${PORTBLOCK_UNBLOCK_TIMEOUT} ++ if [ $? -ne 0 ]; then ++ logger "warning pcs resource create ${add_node}-nfs_unblock failed" ++ fi ++} ++ ++ ++clear_resources() ++{ ++ local cibfile=${1}; shift ++ ++ while [[ ${1} ]]; do ++ pcs -f ${cibfile} resource delete ${1}-group ++ if [ $? -ne 0 ]; then ++ logger "warning: pcs -f ${cibfile} resource delete ${1}-group" ++ fi ++ ++ shift ++ done ++} ++ ++ ++addnode_create_resources() ++{ ++ local add_node=${1}; shift ++ local add_vip=${1}; shift ++ local cibfile=$(mktemp -u) ++ ++ # start HA on the new node ++ pcs cluster start ${add_node} ++ if [ $? -ne 0 ]; then ++ logger "warning: pcs cluster start ${add_node} failed" ++ fi ++ ++ pcs cluster cib ${cibfile} ++ if [ $? -ne 0 ]; then ++ logger "warning: pcs cluster cib ${cibfile} failed" ++ fi ++ ++ # delete all the -cluster_ip-1 resources, clearing ++ # their constraints, then create them again so we can ++ # recompute their constraints ++ clear_resources ${cibfile} ${HA_SERVERS} ++ addnode_recreate_resources ${cibfile} ${add_node} ${add_vip} ++ ++ HA_SERVERS="${HA_SERVERS} ${add_node}" ++ create_virt_ip_constraints ${cibfile} ${HA_SERVERS} ++ ++ pcs cluster cib-push ${cibfile} ++ if [ $? -ne 0 ]; then ++ logger "warning: pcs cluster cib-push ${cibfile} failed" ++ fi ++ rm -f ${cibfile} ++} ++ ++ ++deletenode_delete_resources() ++{ ++ local node=${1}; shift ++ local ha_servers=$(echo "${HA_SERVERS}" | sed s/${node}//) ++ local cibfile=$(mktemp -u) ++ ++ pcs cluster cib ${cibfile} ++ if [ $? -ne 0 ]; then ++ logger "warning: pcs cluster cib ${cibfile} failed" ++ fi ++ ++ # delete all the -cluster_ip-1 and -trigger_ip-1 resources, ++ # clearing their constraints, then create them again so we can ++ # recompute their constraints ++ clear_resources ${cibfile} ${HA_SERVERS} ++ recreate_resources ${cibfile} ${ha_servers} ++ HA_SERVERS=$(echo "${ha_servers}" | sed -e "s/ / /") ++ ++ create_virt_ip_constraints ${cibfile} ${HA_SERVERS} ++ ++ pcs cluster cib-push ${cibfile} ++ if [ $? -ne 0 ]; then ++ logger "warning: pcs cluster cib-push ${cibfile} failed" ++ fi ++ rm -f ${cibfile} ++ ++} ++ ++ ++deletenode_update_haconfig() ++{ ++ local name="VIP_${1}" ++ local clean_name=${name//[-.]/_} ++ ++ ha_servers=$(echo ${HA_SERVERS} | sed -e "s/ /,/") ++ sed -i -e "s/^HA_CLUSTER_NODES=.*$/HA_CLUSTER_NODES=\"${ha_servers// /,}\"/" -e "s/^${name}=.*$//" -e "/^$/d" ${HA_CONFDIR}/ganesha-ha.conf ++} ++ ++ ++setup_state_volume() ++{ ++ local mnt=${HA_VOL_MNT} ++ local longname="" ++ local shortname="" ++ local dname="" ++ local dirname="" ++ ++ longname=$(hostname) ++ dname=${longname#$(hostname -s)} ++ ++ while [[ ${1} ]]; do ++ ++ if [[ ${1} == *${dname} ]]; then ++ dirname=${1} ++ else ++ dirname=${1}${dname} ++ fi ++ ++ if [ ! -d ${mnt}/nfs-ganesha/tickle_dir ]; then ++ mkdir ${mnt}/nfs-ganesha/tickle_dir ++ fi ++ if [ ! -d ${mnt}/nfs-ganesha/${dirname} ]; then ++ mkdir ${mnt}/nfs-ganesha/${dirname} ++ fi ++ if [ ! -d ${mnt}/nfs-ganesha/${dirname}/nfs ]; then ++ mkdir ${mnt}/nfs-ganesha/${dirname}/nfs ++ fi ++ if [ ! -d ${mnt}/nfs-ganesha/${dirname}/nfs/ganesha ]; then ++ mkdir ${mnt}/nfs-ganesha/${dirname}/nfs/ganesha ++ fi ++ if [ ! -d ${mnt}/nfs-ganesha/${dirname}/nfs/statd ]; then ++ mkdir ${mnt}/nfs-ganesha/${dirname}/nfs/statd ++ fi ++ if [ ! -e ${mnt}/nfs-ganesha/${dirname}/nfs/state ]; then ++ touch ${mnt}/nfs-ganesha/${dirname}/nfs/state ++ fi ++ if [ ! -d ${mnt}/nfs-ganesha/${dirname}/nfs/ganesha/v4recov ]; then ++ mkdir ${mnt}/nfs-ganesha/${dirname}/nfs/ganesha/v4recov ++ fi ++ if [ ! -d ${mnt}/nfs-ganesha/${dirname}/nfs/ganesha/v4old ]; then ++ mkdir ${mnt}/nfs-ganesha/${dirname}/nfs/ganesha/v4old ++ fi ++ if [ ! -d ${mnt}/nfs-ganesha/${dirname}/nfs/statd/sm ]; then ++ mkdir ${mnt}/nfs-ganesha/${dirname}/nfs/statd/sm ++ fi ++ if [ ! -d ${mnt}/nfs-ganesha/${dirname}/nfs/statd/sm.bak ]; then ++ mkdir ${mnt}/nfs-ganesha/${dirname}/nfs/statd/sm.bak ++ fi ++ if [ ! -e ${mnt}/nfs-ganesha/${dirname}/nfs/statd/state ]; then ++ touch ${mnt}/nfs-ganesha/${dirname}/nfs/statd/state ++ fi ++ for server in ${HA_SERVERS} ; do ++ if [ ${server} != ${dirname} ]; then ++ ln -s ${mnt}/nfs-ganesha/${server}/nfs/ganesha ${mnt}/nfs-ganesha/${dirname}/nfs/ganesha/${server} ++ ln -s ${mnt}/nfs-ganesha/${server}/nfs/statd ${mnt}/nfs-ganesha/${dirname}/nfs/statd/${server} ++ fi ++ done ++ shift ++ done ++ ++} ++ ++ ++addnode_state_volume() ++{ ++ local newnode=${1}; shift ++ local mnt=${HA_VOL_MNT} ++ local longname="" ++ local dname="" ++ local dirname="" ++ ++ longname=$(hostname) ++ dname=${longname#$(hostname -s)} ++ ++ if [[ ${newnode} == *${dname} ]]; then ++ dirname=${newnode} ++ else ++ dirname=${newnode}${dname} ++ fi ++ ++ if [ ! -d ${mnt}/nfs-ganesha/${dirname} ]; then ++ mkdir ${mnt}/nfs-ganesha/${dirname} ++ fi ++ if [ ! -d ${mnt}/nfs-ganesha/${dirname}/nfs ]; then ++ mkdir ${mnt}/nfs-ganesha/${dirname}/nfs ++ fi ++ if [ ! -d ${mnt}/nfs-ganesha/${dirname}/nfs/ganesha ]; then ++ mkdir ${mnt}/nfs-ganesha/${dirname}/nfs/ganesha ++ fi ++ if [ ! -d ${mnt}/nfs-ganesha/${dirname}/nfs/statd ]; then ++ mkdir ${mnt}/nfs-ganesha/${dirname}/nfs/statd ++ fi ++ if [ ! -e ${mnt}/nfs-ganesha/${dirname}/nfs/state ]; then ++ touch ${mnt}/nfs-ganesha/${dirname}/nfs/state ++ fi ++ if [ ! -d ${mnt}/nfs-ganesha/${dirname}/nfs/ganesha/v4recov ]; then ++ mkdir ${mnt}/nfs-ganesha/${dirname}/nfs/ganesha/v4recov ++ fi ++ if [ ! -d ${mnt}/nfs-ganesha/${dirname}/nfs/ganesha/v4old ]; then ++ mkdir ${mnt}/nfs-ganesha/${dirname}/nfs/ganesha/v4old ++ fi ++ if [ ! -d ${mnt}/nfs-ganesha/${dirname}/nfs/statd/sm ]; then ++ mkdir ${mnt}/nfs-ganesha/${dirname}/nfs/statd/sm ++ fi ++ if [ ! -d ${mnt}/nfs-ganesha/${dirname}/nfs/statd/sm.bak ]; then ++ mkdir ${mnt}/nfs-ganesha/${dirname}/nfs/statd/sm.bak ++ fi ++ if [ ! -e ${mnt}/nfs-ganesha/${dirname}/nfs/statd/state ]; then ++ touch ${mnt}/nfs-ganesha/${dirname}/nfs/statd/state ++ fi ++ ++ for server in ${HA_SERVERS} ; do ++ ++ if [[ ${server} != ${dirname} ]]; then ++ ln -s ${mnt}/nfs-ganesha/${server}/nfs/ganesha ${mnt}/nfs-ganesha/${dirname}/nfs/ganesha/${server} ++ ln -s ${mnt}/nfs-ganesha/${server}/nfs/statd ${mnt}/nfs-ganesha/${dirname}/nfs/statd/${server} ++ ++ ln -s ${mnt}/nfs-ganesha/${dirname}/nfs/ganesha ${mnt}/nfs-ganesha/${server}/nfs/ganesha/${dirname} ++ ln -s ${mnt}/nfs-ganesha/${dirname}/nfs/statd ${mnt}/nfs-ganesha/${server}/nfs/statd/${dirname} ++ fi ++ done ++ ++} ++ ++ ++delnode_state_volume() ++{ ++ local delnode=${1}; shift ++ local mnt=${HA_VOL_MNT} ++ local longname="" ++ local dname="" ++ local dirname="" ++ ++ longname=$(hostname) ++ dname=${longname#$(hostname -s)} ++ ++ if [[ ${delnode} == *${dname} ]]; then ++ dirname=${delnode} ++ else ++ dirname=${delnode}${dname} ++ fi ++ ++ rm -rf ${mnt}/nfs-ganesha/${dirname} ++ ++ for server in ${HA_SERVERS} ; do ++ if [[ "${server}" != "${dirname}" ]]; then ++ rm -f ${mnt}/nfs-ganesha/${server}/nfs/ganesha/${dirname} ++ rm -f ${mnt}/nfs-ganesha/${server}/nfs/statd/${dirname} ++ fi ++ done ++} ++ ++ ++status() ++{ ++ local scratch=$(mktemp) ++ local regex_str="^${1}-cluster_ip-1" ++ local healthy=0 ++ local index=1 ++ local nodes ++ ++ # change tabs to spaces, strip leading spaces ++ pcs status | sed -e "s/\t/ /g" -e "s/^[ ]*//" > ${scratch} ++ ++ nodes[0]=${1}; shift ++ ++ # make a regex of the configured nodes ++ # and initalize the nodes array for later ++ while [[ ${1} ]]; do ++ ++ regex_str="${regex_str}|^${1}-cluster_ip-1" ++ nodes[${index}]=${1} ++ ((index++)) ++ shift ++ done ++ ++ # print the nodes that are expected to be online ++ grep -E "^Online:" ${scratch} ++ ++ echo ++ ++ # print the VIPs and which node they are on ++ grep -E "${regex_str}" < ${scratch} | cut -d ' ' -f 1,4 ++ ++ echo ++ ++ # check if the VIP and port block/unblock RAs are on the expected nodes ++ for n in ${nodes[*]}; do ++ ++ grep -E -x "${n}-nfs_block \(ocf::heartbeat:portblock\): Started ${n}" > /dev/null 2>&1 ${scratch} ++ result=$? ++ ((healthy+=${result})) ++ grep -E -x "${n}-cluster_ip-1 \(ocf::heartbeat:IPaddr\): Started ${n}" > /dev/null 2>&1 ${scratch} ++ result=$? ++ ((healthy+=${result})) ++ grep -E -x "${n}-nfs_unblock \(ocf::heartbeat:portblock\): Started ${n}" > /dev/null 2>&1 ${scratch} ++ result=$? ++ ((healthy+=${result})) ++ done ++ ++ grep -E "\):\ Stopped|FAILED" > /dev/null 2>&1 ${scratch} ++ result=$? ++ ++ if [ ${result} -eq 0 ]; then ++ echo "Cluster HA Status: BAD" ++ elif [ ${healthy} -eq 0 ]; then ++ echo "Cluster HA Status: HEALTHY" ++ else ++ echo "Cluster HA Status: FAILOVER" ++ fi ++ ++ rm -f ${scratch} ++} ++ ++create_ganesha_conf_file() ++{ ++ if [ $1 == "yes" ]; ++ then ++ if [ -e $GANESHA_CONF ]; ++ then ++ rm -rf $GANESHA_CONF ++ fi ++ # The symlink /etc/ganesha/ganesha.conf need to be ++ # created using ganesha conf file mentioned in the ++ # shared storage. Every node will only have this ++ # link and actual file will stored in shared storage, ++ # so that ganesha conf editing of ganesha conf will ++ # be easy as well as it become more consistent. ++ ++ ln -s $HA_CONFDIR/ganesha.conf $GANESHA_CONF ++ else ++ # Restoring previous file ++ rm -rf $GANESHA_CONF ++ cp $HA_CONFDIR/ganesha.conf $GANESHA_CONF ++ sed -r -i -e '/^%include[[:space:]]+".+\.conf"$/d' $GANESHA_CONF ++ fi ++} ++ ++set_quorum_policy() ++{ ++ local quorum_policy="stop" ++ local num_servers=${1} ++ ++ if [ ${num_servers} -lt 3 ]; then ++ quorum_policy="ignore" ++ fi ++ pcs property set no-quorum-policy=${quorum_policy} ++ if [ $? -ne 0 ]; then ++ logger "warning: pcs property set no-quorum-policy=${quorum_policy} failed" ++ fi ++} ++ ++main() ++{ ++ ++ local cmd=${1}; shift ++ if [[ ${cmd} == *help ]]; then ++ usage ++ exit 0 ++ fi ++ HA_CONFDIR=${1%/}; shift ++ local ha_conf=${HA_CONFDIR}/ganesha-ha.conf ++ local node="" ++ local vip="" ++ ++ # ignore any comment lines ++ cfgline=$(grep ^HA_NAME= ${ha_conf}) ++ eval $(echo ${cfgline} | grep -F HA_NAME=) ++ cfgline=$(grep ^HA_CLUSTER_NODES= ${ha_conf}) ++ eval $(echo ${cfgline} | grep -F HA_CLUSTER_NODES=) ++ ++ case "${cmd}" in ++ ++ setup | --setup) ++ logger "setting up ${HA_NAME}" ++ ++ check_cluster_exists ${HA_NAME} ++ ++ determine_servers "setup" ++ ++ if [ "X${HA_NUM_SERVERS}X" != "X1X" ]; then ++ ++ setup_cluster ${HA_NAME} ${HA_NUM_SERVERS} "${HA_SERVERS}" ++ ++ setup_create_resources ${HA_SERVERS} ++ ++ setup_finalize_ha ++ ++ setup_state_volume ${HA_SERVERS} ++ ++ else ++ ++ logger "insufficient servers for HA, aborting" ++ fi ++ ;; ++ ++ teardown | --teardown) ++ logger "tearing down ${HA_NAME}" ++ ++ determine_servers "teardown" ++ ++ teardown_resources ${HA_SERVERS} ++ ++ teardown_cluster ${HA_NAME} ++ ++ cleanup_ganesha_config ${HA_CONFDIR} ++ ;; ++ ++ cleanup | --cleanup) ++ cleanup_ganesha_config ${HA_CONFDIR} ++ ;; ++ ++ add | --add) ++ node=${1}; shift ++ vip=${1}; shift ++ ++ logger "adding ${node} with ${vip} to ${HA_NAME}" ++ ++ determine_service_manager ++ ++ manage_service "start" ${node} ++ ++ determine_servers "add" ++ ++ pcs cluster node add ${node} ++ if [ $? -ne 0 ]; then ++ logger "warning: pcs cluster node add ${node} failed" ++ fi ++ ++ addnode_create_resources ${node} ${vip} ++ # Subsequent add-node recreates resources for all the nodes ++ # that already exist in the cluster. The nodes are picked up ++ # from the entries in the ganesha-ha.conf file. Adding the ++ # newly added node to the file so that the resources specfic ++ # to this node is correctly recreated in the future. ++ clean_node=${node//[-.]/_} ++ echo "VIP_${node}=\"${vip}\"" >> ${HA_CONFDIR}/ganesha-ha.conf ++ ++ NEW_NODES="$HA_CLUSTER_NODES,${node}" ++ ++ sed -i s/HA_CLUSTER_NODES.*/"HA_CLUSTER_NODES=\"$NEW_NODES\""/ \ ++$HA_CONFDIR/ganesha-ha.conf ++ ++ addnode_state_volume ${node} ++ ++ # addnode_create_resources() already appended ${node} to ++ # HA_SERVERS, so only need to increment HA_NUM_SERVERS ++ # and set quorum policy ++ HA_NUM_SERVERS=$(expr ${HA_NUM_SERVERS} + 1) ++ set_quorum_policy ${HA_NUM_SERVERS} ++ ;; ++ ++ delete | --delete) ++ node=${1}; shift ++ ++ logger "deleting ${node} from ${HA_NAME}" ++ ++ determine_servers "delete" ++ ++ deletenode_delete_resources ${node} ++ ++ pcs cluster node remove ${node} ++ if [ $? -ne 0 ]; then ++ logger "warning: pcs cluster node remove ${node} failed" ++ fi ++ ++ deletenode_update_haconfig ${node} ++ ++ delnode_state_volume ${node} ++ ++ determine_service_manager ++ ++ manage_service "stop" ${node} ++ ++ HA_NUM_SERVERS=$(expr ${HA_NUM_SERVERS} - 1) ++ set_quorum_policy ${HA_NUM_SERVERS} ++ ;; ++ ++ status | --status) ++ determine_servers "status" ++ ++ status ${HA_SERVERS} ++ ;; ++ ++ refresh-config | --refresh-config) ++ VOL=${1} ++ ++ determine_servers "refresh-config" ++ ++ refresh_config ${VOL} ${HA_CONFDIR} ${HA_SERVERS} ++ ;; ++ ++ setup-ganesha-conf-files | --setup-ganesha-conf-files) ++ ++ create_ganesha_conf_file ${1} ++ ;; ++ ++ *) ++ # setup and teardown are not intended to be used by a ++ # casual user ++ usage ++ logger "Usage: ganesha-ha.sh add|delete|status" ++ ;; ++ ++ esac ++} ++ ++main $* ++ +diff --git a/glusterfs.spec.in b/glusterfs.spec.in +index 6e710e5..0bad6cf 100644 +--- a/glusterfs.spec.in ++++ b/glusterfs.spec.in +@@ -405,7 +405,8 @@ Summary: NFS-Ganesha configuration + Group: Applications/File + + Requires: %{name}-server%{?_isa} = %{version}-%{release} +-Requires: nfs-ganesha-gluster, pcs, dbus ++Requires: nfs-ganesha-gluster >= 2.4.1 ++Requires: pcs, dbus + %if ( 0%{?rhel} && 0%{?rhel} == 6 ) + Requires: cman, pacemaker, corosync + %endif +@@ -1276,6 +1277,7 @@ exit 0 + %files ganesha + %{_sysconfdir}/ganesha/* + %{_libexecdir}/ganesha/* ++%{_prefix}/lib/ocf/resource.d/heartbeat/* + %{_sharedstatedir}/glusterd/hooks/1/start/post/S31ganesha-start.sh + %endif + +-- +1.8.3.1 + diff --git a/SOURCES/0052-common-ha-fixes-for-Debian-based-systems.patch b/SOURCES/0052-common-ha-fixes-for-Debian-based-systems.patch new file mode 100644 index 0000000..914204d --- /dev/null +++ b/SOURCES/0052-common-ha-fixes-for-Debian-based-systems.patch @@ -0,0 +1,235 @@ +From a4d7f6c1999f2c7837aaa674177edca9298d1c00 Mon Sep 17 00:00:00 2001 +From: "Kaleb S. KEITHLEY" +Date: Fri, 7 Apr 2017 09:09:29 -0400 +Subject: [PATCH 52/74] common-ha: fixes for Debian-based systems + +1) Debian-based systems don't have /usr/libexec/... and there is +a hard-coded invocation of /usr/libexec/ganesha/ganesha-ha.sh within +ganesha-ha.sh itself. +Fix: save $0 and use it instead for further invocations of self. + +2) default shell is /bin/dash (not /bin/bash). Various runner_run() +invocations for ganesha used what amounts to + exec("sh /usr/$libexec/ganesha/ganesha-ha.sh ...); +which executes the script using the default shell, but there are +some bash-specific idioms that don't work if the shell is dash. +Fix: change to exec("/usr/$libexec/ganesha/ganesha-ha.sh ...); so that +the shebang forces the use of /bin/bash + +3) Fedora and RHEL7 have merged /bin/ and /usr/bin, /bin is a symlink +to /usr/bin. Debian-based systems are not merged, and systemd systems +have /bin/systemctl. The logic to find .../bin/systemctl is backwards. +If the logic looks for /usr/bin/systemctl it will not find it on +Debian-based systems; if it looks for /bin/systemctl it will find it +on Fedora and RHEL by virtue of the symlink. (RHEL6 and others will +find their respective init regardless.) +Fix: change the logic to look for /bin/systemctl instead. + +4) The logic for deciding to run systemctl (or not) is a bit silly. +Fix: simply invoke the found method via the function pointer in the +table. + +Change-Id: I33681b296a73aebb078bda6ac0d3a1d3b9770a21 +Signed-off-by: Kaleb S. KEITHLEY +Reviewed-on: https://review.gluster.org/17013 +Smoke: Gluster Build System +Reviewed-by: Niels de Vos +NetBSD-regression: NetBSD Build System +CentOS-regression: Gluster Build System +Reviewed-by: jiffin tony Thottan +--- + extras/ganesha/scripts/ganesha-ha.sh | 21 ++++++------- + xlators/mgmt/glusterd/src/glusterd-ganesha.c | 44 +++++++++++++--------------- + 2 files changed, 32 insertions(+), 33 deletions(-) + +diff --git a/extras/ganesha/scripts/ganesha-ha.sh b/extras/ganesha/scripts/ganesha-ha.sh +index e4135ba..d4844e4 100644 +--- a/extras/ganesha/scripts/ganesha-ha.sh ++++ b/extras/ganesha/scripts/ganesha-ha.sh +@@ -20,6 +20,7 @@ + # ensure that the NFS GRACE DBUS signal is sent after the VIP moves to + # the new host. + ++GANESHA_HA_SH=$(realpath $0) + HA_NUM_SERVERS=0 + HA_SERVERS="" + HA_VOL_NAME="gluster_shared_storage" +@@ -68,9 +69,9 @@ function find_rhel7_conf + done + } + +-if [ -z $CONFFILE ] ++if [ -z ${CONFFILE} ] + then +- find_rhel7_conf $OPTIONS ++ find_rhel7_conf ${OPTIONS} + + fi + +@@ -90,9 +91,9 @@ usage() { + + determine_service_manager () { + +- if [ -e "/usr/bin/systemctl" ]; ++ if [ -e "/bin/systemctl" ]; + then +- SERVICE_MAN="/usr/bin/systemctl" ++ SERVICE_MAN="/bin/systemctl" + elif [ -e "/sbin/invoke-rc.d" ]; + then + SERVICE_MAN="/sbin/invoke-rc.d" +@@ -100,7 +101,7 @@ determine_service_manager () { + then + SERVICE_MAN="/sbin/service" + fi +- if [ "$SERVICE_MAN" == "DISTRO_NOT_FOUND" ] ++ if [ "${SERVICE_MAN}" == "DISTRO_NOT_FOUND" ] + then + echo "Service manager not recognized, exiting" + exit 1 +@@ -113,21 +114,21 @@ manage_service () + local new_node=${2} + local option= + +- if [ "$action" == "start" ]; then ++ if [ "${action}" == "start" ]; then + option="yes" + else + option="no" + fi + ssh -oPasswordAuthentication=no -oStrictHostKeyChecking=no -i \ +-${SECRET_PEM} root@${new_node} "/usr/libexec/ganesha/ganesha-ha.sh --setup-ganesha-conf-files $HA_CONFDIR $option" ++${SECRET_PEM} root@${new_node} "${GANESHA_HA_SH} --setup-ganesha-conf-files $HA_CONFDIR $option" + +- if [ "$SERVICE_MAN" == "/usr/bin/systemctl" ] ++ if [ "${SERVICE_MAN}" == "/bin/systemctl" ] + then + ssh -oPasswordAuthentication=no -oStrictHostKeyChecking=no -i \ +-${SECRET_PEM} root@${new_node} "$SERVICE_MAN ${action} nfs-ganesha" ++${SECRET_PEM} root@${new_node} "${SERVICE_MAN} ${action} nfs-ganesha" + else + ssh -oPasswordAuthentication=no -oStrictHostKeyChecking=no -i \ +-${SECRET_PEM} root@${new_node} "$SERVICE_MAN nfs-ganesha ${action}" ++${SECRET_PEM} root@${new_node} "${SERVICE_MAN} nfs-ganesha ${action}" + fi + } + +diff --git a/xlators/mgmt/glusterd/src/glusterd-ganesha.c b/xlators/mgmt/glusterd/src/glusterd-ganesha.c +index 4346bad..c16dd72 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-ganesha.c ++++ b/xlators/mgmt/glusterd/src/glusterd-ganesha.c +@@ -119,11 +119,10 @@ sc_service_action (struct service_command *sc, char *command) + static int + manage_service (char *action) + { +- struct stat stbuf = {0,}; + int i = 0; + int ret = 0; + struct service_command sc_list[] = { +- { .binary = "/usr/bin/systemctl", ++ { .binary = "/bin/systemctl", + .service = "nfs-ganesha", + .action = sc_systemctl_action + }, +@@ -140,16 +139,11 @@ manage_service (char *action) + }; + + while (sc_list[i].binary != NULL) { +- ret = sys_stat (sc_list[i].binary, &stbuf); ++ ret = sys_access (sc_list[i].binary, X_OK); + if (ret == 0) { + gf_msg_debug (THIS->name, 0, +- "%s found.", sc_list[i].binary); +- if (strcmp (sc_list[i].binary, "/usr/bin/systemctl") == 0) +- ret = sc_systemctl_action (&sc_list[i], action); +- else +- ret = sc_service_action (&sc_list[i], action); +- +- return ret; ++ "%s found.", sc_list[i].binary); ++ return sc_list[i].action (&sc_list[i], action); + } + i++; + } +@@ -465,9 +459,9 @@ manage_export_config (char *volname, char *value, char **op_errstr) + + GF_ASSERT(volname); + runinit (&runner); +- runner_add_args (&runner, "sh", +- GANESHA_PREFIX"/create-export-ganesha.sh", +- CONFDIR, value, volname, NULL); ++ runner_add_args (&runner, ++ GANESHA_PREFIX"/create-export-ganesha.sh", ++ CONFDIR, value, volname, NULL); + ret = runner_run(&runner); + + if (ret) +@@ -570,8 +564,9 @@ ganesha_manage_export (dict_t *dict, char *value, char **op_errstr) + } + + if (check_host_list()) { +- runner_add_args (&runner, "sh", GANESHA_PREFIX"/dbus-send.sh", +- CONFDIR, value, volname, NULL); ++ runner_add_args (&runner, ++ GANESHA_PREFIX"/dbus-send.sh", ++ CONFDIR, value, volname, NULL); + ret = runner_run (&runner); + if (ret) { + gf_asprintf(op_errstr, "Dynamic export" +@@ -610,9 +605,9 @@ tear_down_cluster(gf_boolean_t run_teardown) + + if (run_teardown) { + runinit (&runner); +- runner_add_args (&runner, "sh", +- GANESHA_PREFIX"/ganesha-ha.sh", "teardown", +- CONFDIR, NULL); ++ runner_add_args (&runner, ++ GANESHA_PREFIX"/ganesha-ha.sh", "teardown", ++ CONFDIR, NULL); + ret = runner_run(&runner); + /* * + * Remove all the entries in CONFDIR expect ganesha.conf and +@@ -675,7 +670,8 @@ setup_cluster(gf_boolean_t run_setup) + + if (run_setup) { + runinit (&runner); +- runner_add_args (&runner, "sh", GANESHA_PREFIX"/ganesha-ha.sh", ++ runner_add_args (&runner, ++ GANESHA_PREFIX"/ganesha-ha.sh", + "setup", CONFDIR, NULL); + ret = runner_run (&runner); + } +@@ -702,8 +698,9 @@ teardown (gf_boolean_t run_teardown, char **op_errstr) + } + + runinit (&runner); +- runner_add_args (&runner, "sh", GANESHA_PREFIX"/ganesha-ha.sh", +- "cleanup", CONFDIR, NULL); ++ runner_add_args (&runner, ++ GANESHA_PREFIX"/ganesha-ha.sh", ++ "cleanup", CONFDIR, NULL); + ret = runner_run (&runner); + if (ret) + gf_msg_debug (THIS->name, 0, "Could not clean up" +@@ -747,7 +744,8 @@ stop_ganesha (char **op_errstr) { + runner_t runner = {0,}; + + runinit (&runner); +- runner_add_args (&runner, "sh", GANESHA_PREFIX"/ganesha-ha.sh", ++ runner_add_args (&runner, ++ GANESHA_PREFIX"/ganesha-ha.sh", + "--setup-ganesha-conf-files", CONFDIR, "no", NULL); + ret = runner_run (&runner); + if (ret) { +@@ -810,7 +808,7 @@ start_ganesha (char **op_errstr) + + if (check_host_list()) { + runinit (&runner); +- runner_add_args (&runner, "sh", GANESHA_PREFIX"/ganesha-ha.sh", ++ runner_add_args (&runner, GANESHA_PREFIX"/ganesha-ha.sh", + "--setup-ganesha-conf-files", CONFDIR, "yes", + NULL); + ret = runner_run (&runner); +-- +1.8.3.1 + diff --git a/SOURCES/0053-ganesha-scripts-Remove-export-entries-from-ganesha.c.patch b/SOURCES/0053-ganesha-scripts-Remove-export-entries-from-ganesha.c.patch new file mode 100644 index 0000000..3c0ad6d --- /dev/null +++ b/SOURCES/0053-ganesha-scripts-Remove-export-entries-from-ganesha.c.patch @@ -0,0 +1,33 @@ +From bfad2cafc9cbb58161386ee71dd086f01176558e Mon Sep 17 00:00:00 2001 +From: Jiffin Tony Thottan +Date: Wed, 22 Feb 2017 14:37:04 +0530 +Subject: [PATCH 53/74] ganesha/scripts : Remove export entries from + ganesha.conf during cleanup + +Change-Id: I288f7c9ced23d258a7ce1242d8efe03a4bf6f746 +Signed-off-by: Jiffin Tony Thottan +Reviewed-on: https://review.gluster.org/16708 +Smoke: Gluster Build System +NetBSD-regression: NetBSD Build System +CentOS-regression: Gluster Build System +Reviewed-by: soumya k +Reviewed-by: Kaleb KEITHLEY +--- + extras/ganesha/scripts/ganesha-ha.sh | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/extras/ganesha/scripts/ganesha-ha.sh b/extras/ganesha/scripts/ganesha-ha.sh +index d4844e4..2d6b06f 100644 +--- a/extras/ganesha/scripts/ganesha-ha.sh ++++ b/extras/ganesha/scripts/ganesha-ha.sh +@@ -342,6 +342,7 @@ cleanup_ganesha_config () + rm -f /etc/corosync/corosync.conf + rm -rf /etc/cluster/cluster.conf* + rm -rf /var/lib/pacemaker/cib/* ++ sed -r -i -e '/^%include[[:space:]]+".+\.conf"$/d' $HA_CONFDIR/ganesha.conf + } + + do_create_virt_ip_constraints() +-- +1.8.3.1 + diff --git a/SOURCES/0054-glusterd-ganesha-During-volume-delete-remove-the-gan.patch b/SOURCES/0054-glusterd-ganesha-During-volume-delete-remove-the-gan.patch new file mode 100644 index 0000000..8b6c8e2 --- /dev/null +++ b/SOURCES/0054-glusterd-ganesha-During-volume-delete-remove-the-gan.patch @@ -0,0 +1,55 @@ +From 1a9717eb9fb0c6ebd4fce5aa85e3ed53bdd69d59 Mon Sep 17 00:00:00 2001 +From: Jiffin Tony Thottan +Date: Wed, 22 Feb 2017 14:20:41 +0530 +Subject: [PATCH 54/74] glusterd/ganesha : During volume delete remove the + ganesha export configuration file + +Change-Id: I0363e7f4d7cefd3f1b3c4f91e495767ec52e230e +Signed-off-by: Jiffin Tony Thottan +Reviewed-on: https://review.gluster.org/16707 +Smoke: Gluster Build System +NetBSD-regression: NetBSD Build System +CentOS-regression: Gluster Build System +Reviewed-by: soumya k +Reviewed-by: Kaleb KEITHLEY +Signed-off-by: Jiffin Tony Thottan +--- + xlators/mgmt/glusterd/src/glusterd-ganesha.c | 2 +- + xlators/mgmt/glusterd/src/glusterd-volume-ops.c | 8 ++++++++ + 2 files changed, 9 insertions(+), 1 deletion(-) + +diff --git a/xlators/mgmt/glusterd/src/glusterd-ganesha.c b/xlators/mgmt/glusterd/src/glusterd-ganesha.c +index c16dd72..e176df9 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-ganesha.c ++++ b/xlators/mgmt/glusterd/src/glusterd-ganesha.c +@@ -464,7 +464,7 @@ manage_export_config (char *volname, char *value, char **op_errstr) + CONFDIR, value, volname, NULL); + ret = runner_run(&runner); + +- if (ret) ++ if (ret && !(*op_errstr)) + gf_asprintf (op_errstr, "Failed to create" + " NFS-Ganesha export config file."); + +diff --git a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c +index 0914fb1..92db458 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c ++++ b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c +@@ -2853,6 +2853,14 @@ glusterd_op_delete_volume (dict_t *dict) + volname); + goto out; + } ++ if (glusterd_check_ganesha_export (volinfo)) { ++ ret = manage_export_config (volname, "off", NULL); ++ if (ret) ++ gf_msg (this->name, GF_LOG_WARNING, 0, 0, ++ "Could not delete ganesha export conf file " ++ "for %s", volname); ++ } ++ + + ret = glusterd_delete_volume (volinfo); + out: +-- +1.8.3.1 + diff --git a/SOURCES/0055-glusterd-ganesha-throw-proper-error-for-gluster-nfs-.patch b/SOURCES/0055-glusterd-ganesha-throw-proper-error-for-gluster-nfs-.patch new file mode 100644 index 0000000..d98316c --- /dev/null +++ b/SOURCES/0055-glusterd-ganesha-throw-proper-error-for-gluster-nfs-.patch @@ -0,0 +1,125 @@ +From 6683f3c2702f635a95623c427f343385a1ad8c63 Mon Sep 17 00:00:00 2001 +From: jiffin tony thottan +Date: Mon, 7 Dec 2015 14:38:54 +0530 +Subject: [PATCH 55/74] glusterd/ganesha : throw proper error for "gluster + nfs-ganesha disable" + +For first time or if "gluster nfs-ganesha enable" fails the global option +"nfs-ganesha" won't be stored in glusterd's dictionary. In both cases the +"gluster nfs-ganesha disable" throws following error : +"nfs-ganesha: failed: nfs-ganesha is already (null)d." + +Also this patch provides the missing prompt for nfs-ganesha disable in 3.10 + +Change-Id: I7c9fd6dabedc0cfb14c5190b3554bc63a6bc0340 +Signed-off-by: Jiffin Tony Thottan +Reviewed-on: https://review.gluster.org/16791 +Smoke: Gluster Build System +NetBSD-regression: NetBSD Build System +CentOS-regression: Gluster Build System +Reviewed-by: soumya k +Reviewed-by: Kaleb KEITHLEY +--- + cli/src/cli-cmd-parser.c | 30 ++++++++++++++++++---------- + xlators/mgmt/glusterd/src/glusterd-ganesha.c | 28 +++++++++----------------- + 2 files changed, 28 insertions(+), 30 deletions(-) + +diff --git a/cli/src/cli-cmd-parser.c b/cli/src/cli-cmd-parser.c +index a4c601b..a35fc74 100644 +--- a/cli/src/cli-cmd-parser.c ++++ b/cli/src/cli-cmd-parser.c +@@ -897,18 +897,26 @@ cli_cmd_ganesha_parse (struct cli_state *state, + goto out; + } + +- question = "Enabling NFS-Ganesha requires Gluster-NFS to be" +- " disabled across the trusted pool. Do you " +- "still want to continue?\n"; +- + if (strcmp (value, "enable") == 0) { +- answer = cli_cmd_get_confirmation (state, question); +- if (GF_ANSWER_NO == answer) { +- gf_log ("cli", GF_LOG_ERROR, "Global operation " +- "cancelled, exiting"); +- ret = -1; +- goto out; +- } ++ question = "Enabling NFS-Ganesha requires Gluster-NFS to be " ++ "disabled across the trusted pool. Do you " ++ "still want to continue?\n"; ++ ++ } else if (strcmp (value, "disable") == 0) { ++ question = "Disabling NFS-Ganesha will tear down the entire " ++ "ganesha cluster across the trusted pool. Do you " ++ "still want to continue?\n"; ++ } else { ++ ret = -1; ++ goto out; ++ } ++ ++ answer = cli_cmd_get_confirmation (state, question); ++ if (GF_ANSWER_NO == answer) { ++ gf_log ("cli", GF_LOG_ERROR, "Global operation " ++ "cancelled, exiting"); ++ ret = -1; ++ goto out; + } + cli_out ("This will take a few minutes to complete. Please wait .."); + +diff --git a/xlators/mgmt/glusterd/src/glusterd-ganesha.c b/xlators/mgmt/glusterd/src/glusterd-ganesha.c +index e176df9..5c582cd 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-ganesha.c ++++ b/xlators/mgmt/glusterd/src/glusterd-ganesha.c +@@ -258,8 +258,7 @@ int + glusterd_op_stage_set_ganesha (dict_t *dict, char **op_errstr) + { + int ret = -1; +- int value = -1; +- gf_boolean_t option = _gf_false; ++ char *value = NULL; + char *str = NULL; + glusterd_conf_t *priv = NULL; + xlator_t *this = NULL; +@@ -270,8 +269,8 @@ glusterd_op_stage_set_ganesha (dict_t *dict, char **op_errstr) + priv = this->private; + GF_ASSERT (priv); + +- value = dict_get_str_boolean (dict, "value", _gf_false); +- if (value == -1) { ++ ret = dict_get_str (dict, "value", &value); ++ if (value == NULL) { + gf_msg (this->name, GF_LOG_ERROR, errno, + GD_MSG_DICT_GET_FAILED, + "value not present."); +@@ -280,22 +279,13 @@ glusterd_op_stage_set_ganesha (dict_t *dict, char **op_errstr) + /* This dict_get will fail if the user had never set the key before */ + /*Ignoring the ret value and proceeding */ + ret = dict_get_str (priv->opts, GLUSTERD_STORE_KEY_GANESHA_GLOBAL, &str); +- if (ret == -1) { +- gf_msg (this->name, GF_LOG_WARNING, errno, +- GD_MSG_DICT_GET_FAILED, "Global dict not present."); +- ret = 0; +- goto out; ++ /* Check if the feature is already enabled/disable, fail in that case */ ++ if (str ? strcmp (value, str) == 0 : strcmp (value, "disable") == 0) { ++ gf_asprintf (op_errstr, "nfs-ganesha is already %sd.", value); ++ ret = -1; ++ goto out; + } +- /* Validity of the value is already checked */ +- ret = gf_string2boolean (str, &option); +- /* Check if the feature is already enabled, fail in that case */ +- if (value == option) { +- gf_asprintf (op_errstr, "nfs-ganesha is already %sd.", str); +- ret = -1; +- goto out; +- } +- +- if (value) { ++ if (strcmp (value, "enable") == 0) { + ret = start_ganesha (op_errstr); + if (ret) { + gf_msg (THIS->name, GF_LOG_ERROR, 0, +-- +1.8.3.1 + diff --git a/SOURCES/0056-ganesha-scripts-Stop-ganesha-process-on-all-nodes-if.patch b/SOURCES/0056-ganesha-scripts-Stop-ganesha-process-on-all-nodes-if.patch new file mode 100644 index 0000000..675b65b --- /dev/null +++ b/SOURCES/0056-ganesha-scripts-Stop-ganesha-process-on-all-nodes-if.patch @@ -0,0 +1,54 @@ +From 72869da97db070bc00cc0934aebdb8f247b05b55 Mon Sep 17 00:00:00 2001 +From: Jiffin Tony Thottan +Date: Thu, 2 Mar 2017 12:22:30 +0530 +Subject: [PATCH 56/74] ganesha/scripts : Stop ganesha process on all nodes if + cluster setup fails + +During staging phase of volume option "nfs-ganesha", symlink "ganesha.conf" +will be created plus ganesha process will be started. The cluster setup +happens during commit phase of that option. So if cluster set up fails, the +ganesha process will be running on all cluster nodes. + +Change-Id: Ib2cb85364b7ef5b702acb4826ffdf8e6f31a2acd +Signed-off-by: Jiffin Tony Thottan +Reviewed-on: https://review.gluster.org/16823 +Smoke: Gluster Build System +Tested-by: Kaleb KEITHLEY +Reviewed-by: soumya k +Reviewed-by: Kaleb KEITHLEY +NetBSD-regression: NetBSD Build System +CentOS-regression: Gluster Build System +--- + extras/ganesha/scripts/ganesha-ha.sh | 9 +++++++++ + 1 file changed, 9 insertions(+) + +diff --git a/extras/ganesha/scripts/ganesha-ha.sh b/extras/ganesha/scripts/ganesha-ha.sh +index 2d6b06f..df4f0b8 100644 +--- a/extras/ganesha/scripts/ganesha-ha.sh ++++ b/extras/ganesha/scripts/ganesha-ha.sh +@@ -175,6 +175,13 @@ determine_servers() + fi + } + ++stop_ganesha_all() ++{ ++ local serverlist=${1} ++ for node in ${serverlist} ; do ++ manage_service "stop" ${node} ++ done ++} + + setup_cluster() + { +@@ -191,6 +198,8 @@ setup_cluster() + pcs cluster setup ${RHEL6_PCS_CNAME_OPTION} ${name} --transport udpu ${servers} + if [ $? -ne 0 ]; then + logger "pcs cluster setup ${RHEL6_PCS_CNAME_OPTION} ${name} ${servers} failed" ++ #set up failed stop all ganesha process and clean up symlinks in cluster ++ stop_ganesha_all ${servers} + exit 1; + fi + pcs cluster start --all +-- +1.8.3.1 + diff --git a/SOURCES/0057-ganesha-allow-refresh-config-and-volume-export-unexp.patch b/SOURCES/0057-ganesha-allow-refresh-config-and-volume-export-unexp.patch new file mode 100644 index 0000000..81e892c --- /dev/null +++ b/SOURCES/0057-ganesha-allow-refresh-config-and-volume-export-unexp.patch @@ -0,0 +1,117 @@ +From 17b75c3bf216c53b4303a9c59adaf89d3da328ea Mon Sep 17 00:00:00 2001 +From: Jiffin Tony Thottan +Date: Wed, 19 Apr 2017 16:12:10 +0530 +Subject: [PATCH 57/74] ganesha : allow refresh-config and volume + export/unexport in failover state + +If ganesha is not running on one of nodes in HA cluster, then alli dbus +commands send to that ganesha server will fail. This results in both +refresh-config and volume export/unepxort failure. This change will +gracefully handle those scenarios. + +Change-Id: I3f1b7b7ca98e54c273c266e56357d8e24dd1b14b +Signed-off-by: Jiffin Tony Thottan +Reviewed-on: https://review.gluster.org/17081 +Smoke: Gluster Build System +NetBSD-regression: NetBSD Build System +CentOS-regression: Gluster Build System +Reviewed-by: soumya k +Reviewed-by: Kaleb KEITHLEY +Signed-off-by: Jiffin Tony Thottan +--- + extras/ganesha/scripts/ganesha-ha.sh | 6 ++---- + xlators/mgmt/glusterd/src/glusterd-ganesha.c | 25 ++++++++++++++++--------- + xlators/mgmt/glusterd/src/glusterd-messages.h | 10 +++++++++- + 3 files changed, 27 insertions(+), 14 deletions(-) + +diff --git a/extras/ganesha/scripts/ganesha-ha.sh b/extras/ganesha/scripts/ganesha-ha.sh +index df4f0b8..db2fa54 100644 +--- a/extras/ganesha/scripts/ganesha-ha.sh ++++ b/extras/ganesha/scripts/ganesha-ha.sh +@@ -275,8 +275,7 @@ string:\"EXPORT(Export_Id=$export_id)\" 2>&1") + ret=$? + logger <<< "${output}" + if [ ${ret} -ne 0 ]; then +- echo "Error: refresh-config failed on ${current_host}." +- exit 1 ++ echo "Refresh-config failed on ${current_host}" + else + echo "Refresh-config completed on ${current_host}." + fi +@@ -297,8 +296,7 @@ string:"EXPORT(Export_Id=$export_id)" 2>&1) + ret=$? + logger <<< "${output}" + if [ ${ret} -ne 0 ] ; then +- echo "Error: refresh-config failed on localhost." +- exit 1 ++ echo "Refresh-config failed on localhost." + else + echo "Success: refresh-config completed." + fi +diff --git a/xlators/mgmt/glusterd/src/glusterd-ganesha.c b/xlators/mgmt/glusterd/src/glusterd-ganesha.c +index 5c582cd..38fa378 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-ganesha.c ++++ b/xlators/mgmt/glusterd/src/glusterd-ganesha.c +@@ -554,15 +554,22 @@ ganesha_manage_export (dict_t *dict, char *value, char **op_errstr) + } + + if (check_host_list()) { +- runner_add_args (&runner, +- GANESHA_PREFIX"/dbus-send.sh", +- CONFDIR, value, volname, NULL); +- ret = runner_run (&runner); +- if (ret) { +- gf_asprintf(op_errstr, "Dynamic export" +- " addition/deletion failed." +- " Please see log file for details"); +- goto out; ++ /* Check whether ganesha is running on this node */ ++ if (manage_service ("status")) { ++ gf_msg (this->name, GF_LOG_WARNING, 0, ++ GD_MSG_GANESHA_NOT_RUNNING, ++ "Export failed, NFS-Ganesha is not running"); ++ } else { ++ runner_add_args (&runner, ++ GANESHA_PREFIX"/dbus-send.sh", ++ CONFDIR, value, volname, NULL); ++ ret = runner_run (&runner); ++ if (ret) { ++ gf_asprintf(op_errstr, "Dynamic export" ++ " addition/deletion failed." ++ " Please see log file for details"); ++ goto out; ++ } + } + } + +diff --git a/xlators/mgmt/glusterd/src/glusterd-messages.h b/xlators/mgmt/glusterd/src/glusterd-messages.h +index cc7f371..fb2079f 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-messages.h ++++ b/xlators/mgmt/glusterd/src/glusterd-messages.h +@@ -41,7 +41,7 @@ + + #define GLUSTERD_COMP_BASE GLFS_MSGID_GLUSTERD + +-#define GLFS_NUM_MESSAGES 612 ++#define GLFS_NUM_MESSAGES 613 + + #define GLFS_MSGID_END (GLUSTERD_COMP_BASE + GLFS_NUM_MESSAGES + 1) + /* Messaged with message IDs */ +@@ -4961,6 +4961,14 @@ + */ + #define GD_MSG_PORTS_EXHAUSTED (GLUSTERD_COMP_BASE + 612) + ++#define GD_MSG_GANESHA_NOT_RUNNING (GLUSTERD_COMP_BASE + 613) ++/*! ++ * @messageid ++ * @diagnosis ++ * @recommendedaction ++ * ++ */ ++ + /*------------*/ + + #define glfs_msg_end_x GLFS_MSGID_END, "Invalid: End of messages" +-- +1.8.3.1 + diff --git a/SOURCES/0058-glusterd-ganesha-perform-removal-of-ganesha.conf-on-.patch b/SOURCES/0058-glusterd-ganesha-perform-removal-of-ganesha.conf-on-.patch new file mode 100644 index 0000000..7d7388b --- /dev/null +++ b/SOURCES/0058-glusterd-ganesha-perform-removal-of-ganesha.conf-on-.patch @@ -0,0 +1,54 @@ +From 06a24c4e6ec4d876bb5c9216a6b5f5364d58dc10 Mon Sep 17 00:00:00 2001 +From: Jiffin Tony Thottan +Date: Fri, 28 Apr 2017 17:27:46 +0530 +Subject: [PATCH 58/74] glusterd/ganesha : perform removal of ganesha.conf on + nodes only in ganesha cluster + +Change-Id: I864ecd9391adf80fb1fa6ad2f9891a9ce77135e7 +Signed-off-by: Jiffin Tony Thottan +Reviewed-on: https://review.gluster.org/17138 +Smoke: Gluster Build System +Reviewed-by: soumya k +NetBSD-regression: NetBSD Build System +CentOS-regression: Gluster Build System +Reviewed-by: Kaleb KEITHLEY +--- + xlators/mgmt/glusterd/src/glusterd-ganesha.c | 21 +++++++++++---------- + 1 file changed, 11 insertions(+), 10 deletions(-) + +diff --git a/xlators/mgmt/glusterd/src/glusterd-ganesha.c b/xlators/mgmt/glusterd/src/glusterd-ganesha.c +index 38fa378..2392341 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-ganesha.c ++++ b/xlators/mgmt/glusterd/src/glusterd-ganesha.c +@@ -740,17 +740,18 @@ stop_ganesha (char **op_errstr) { + int ret = 0; + runner_t runner = {0,}; + +- runinit (&runner); +- runner_add_args (&runner, +- GANESHA_PREFIX"/ganesha-ha.sh", +- "--setup-ganesha-conf-files", CONFDIR, "no", NULL); +- ret = runner_run (&runner); +- if (ret) { +- gf_asprintf (op_errstr, "removal of symlink ganesha.conf " +- "in /etc/ganesha failed"); +- } +- + if (check_host_list ()) { ++ runinit (&runner); ++ runner_add_args (&runner, ++ GANESHA_PREFIX"/ganesha-ha.sh", ++ "--setup-ganesha-conf-files", CONFDIR, ++ "no", NULL); ++ ret = runner_run (&runner); ++ if (ret) { ++ gf_asprintf (op_errstr, "removal of symlink ganesha.conf " ++ "in /etc/ganesha failed"); ++ } ++ + ret = manage_service ("stop"); + if (ret) + gf_asprintf (op_errstr, "NFS-Ganesha service could not" +-- +1.8.3.1 + diff --git a/SOURCES/0059-glusterd-ganesha-update-cache-invalidation-properly-.patch b/SOURCES/0059-glusterd-ganesha-update-cache-invalidation-properly-.patch new file mode 100644 index 0000000..e86575e --- /dev/null +++ b/SOURCES/0059-glusterd-ganesha-update-cache-invalidation-properly-.patch @@ -0,0 +1,134 @@ +From 2cd1f86d0bd47f93f6e278530fc76a1e44aa9333 Mon Sep 17 00:00:00 2001 +From: Jiffin Tony Thottan +Date: Tue, 25 Apr 2017 16:36:40 +0530 +Subject: [PATCH 59/74] glusterd/ganesha : update cache invalidation properly + during volume stop + +As per current code, during volume stop for ganesha enabled volume the +feature.cache-invalidation was turned "off" in ganesha_manage_export(). +And it never turn back to "on" when volume is started. It is not desire +to modify the volume options during stop, this patch fixes above mentioned +issue. + +Change-Id: Iea9c62e5cda4f54805b41ea6055cf0c3652a634c +Signed-off-by: Jiffin Tony Thottan +Reviewed-on: https://review.gluster.org/17111 +Smoke: Gluster Build System +NetBSD-regression: NetBSD Build System +CentOS-regression: Gluster Build System +Reviewed-by: Kaleb KEITHLEY +Reviewed-by: Raghavendra Talur +--- + xlators/mgmt/glusterd/src/glusterd-ganesha.c | 30 ++++++++++++++----------- + xlators/mgmt/glusterd/src/glusterd-op-sm.c | 4 ++-- + xlators/mgmt/glusterd/src/glusterd-volume-ops.c | 2 +- + xlators/mgmt/glusterd/src/glusterd.h | 3 ++- + 4 files changed, 22 insertions(+), 17 deletions(-) + +diff --git a/xlators/mgmt/glusterd/src/glusterd-ganesha.c b/xlators/mgmt/glusterd/src/glusterd-ganesha.c +index 2392341..5d6144a 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-ganesha.c ++++ b/xlators/mgmt/glusterd/src/glusterd-ganesha.c +@@ -463,7 +463,8 @@ manage_export_config (char *volname, char *value, char **op_errstr) + + /* Exports and unexports a particular volume via NFS-Ganesha */ + int +-ganesha_manage_export (dict_t *dict, char *value, char **op_errstr) ++ganesha_manage_export (dict_t *dict, char *value, ++ gf_boolean_t update_cache_invalidation, char **op_errstr) + { + runner_t runner = {0,}; + int ret = -1; +@@ -573,17 +574,20 @@ ganesha_manage_export (dict_t *dict, char *value, char **op_errstr) + } + } + +- vol_opts = volinfo->dict; +- ret = dict_set_dynstr_with_alloc (vol_opts, +- "features.cache-invalidation", value); +- if (ret) +- gf_asprintf (op_errstr, "Cache-invalidation could not" +- " be set to %s.", value); +- ret = glusterd_store_volinfo (volinfo, +- GLUSTERD_VOLINFO_VER_AC_INCREMENT); +- if (ret) +- gf_asprintf (op_errstr, "failed to store volinfo for %s" +- , volinfo->volname); ++ if (update_cache_invalidation) { ++ vol_opts = volinfo->dict; ++ ret = dict_set_dynstr_with_alloc (vol_opts, ++ "features.cache-invalidation", ++ value); ++ if (ret) ++ gf_asprintf (op_errstr, "Cache-invalidation could not" ++ " be set to %s.", value); ++ ret = glusterd_store_volinfo (volinfo, ++ GLUSTERD_VOLINFO_VER_AC_INCREMENT); ++ if (ret) ++ gf_asprintf (op_errstr, "failed to store volinfo for %s" ++ , volinfo->volname); ++ } + + out: + return ret; +@@ -858,7 +862,7 @@ glusterd_handle_ganesha_op (dict_t *dict, char **op_errstr, + + + if (strcmp (key, "ganesha.enable") == 0) { +- ret = ganesha_manage_export (dict, value, op_errstr); ++ ret = ganesha_manage_export (dict, value, _gf_true, op_errstr); + if (ret < 0) + goto out; + } +diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.c b/xlators/mgmt/glusterd/src/glusterd-op-sm.c +index 06e9e25..86f18f0 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-op-sm.c ++++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.c +@@ -1128,7 +1128,7 @@ glusterd_op_stage_set_volume (dict_t *dict, char **op_errstr) + + if ((strcmp (key, "ganesha.enable") == 0) && + (strcmp (value, "off") == 0)) { +- ret = ganesha_manage_export (dict, "off", op_errstr); ++ ret = ganesha_manage_export (dict, "off", _gf_true, op_errstr); + if (ret) + goto out; + } +@@ -1655,7 +1655,7 @@ glusterd_op_stage_reset_volume (dict_t *dict, char **op_errstr) + */ + if (volinfo && (!strcmp (key, "all") || !strcmp(key, "ganesha.enable"))) { + if (glusterd_check_ganesha_export (volinfo)) { +- ret = ganesha_manage_export (dict, "off", op_errstr); ++ ret = ganesha_manage_export (dict, "off", _gf_true, op_errstr); + if (ret) + gf_msg (this->name, GF_LOG_WARNING, 0, + GD_MSG_NFS_GNS_RESET_FAIL, +diff --git a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c +index 92db458..725d194 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c ++++ b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c +@@ -1739,7 +1739,7 @@ glusterd_op_stage_stop_volume (dict_t *dict, char **op_errstr) + } + ret = glusterd_check_ganesha_export (volinfo); + if (ret) { +- ret = ganesha_manage_export(dict, "off", op_errstr); ++ ret = ganesha_manage_export(dict, "off", _gf_false, op_errstr); + if (ret) { + gf_msg (THIS->name, GF_LOG_WARNING, 0, + GD_MSG_NFS_GNS_UNEXPRT_VOL_FAIL, "Could not " +diff --git a/xlators/mgmt/glusterd/src/glusterd.h b/xlators/mgmt/glusterd/src/glusterd.h +index 2d8dbb9..3ad5ed6 100644 +--- a/xlators/mgmt/glusterd/src/glusterd.h ++++ b/xlators/mgmt/glusterd/src/glusterd.h +@@ -1176,7 +1176,8 @@ int glusterd_check_ganesha_cmd (char *key, char *value, + char **errstr, dict_t *dict); + int glusterd_op_stage_set_ganesha (dict_t *dict, char **op_errstr); + int glusterd_op_set_ganesha (dict_t *dict, char **errstr); +-int ganesha_manage_export (dict_t *dict, char *value, char **op_errstr); ++int ganesha_manage_export (dict_t *dict, char *value, ++ gf_boolean_t update_cache_invalidation, char **op_errstr); + int manage_export_config (char *volname, char *value, char **op_errstr); + + gf_boolean_t +-- +1.8.3.1 + diff --git a/SOURCES/0060-glusterd-ganesha-return-proper-value-in-pre_setup.patch b/SOURCES/0060-glusterd-ganesha-return-proper-value-in-pre_setup.patch new file mode 100644 index 0000000..50f8012 --- /dev/null +++ b/SOURCES/0060-glusterd-ganesha-return-proper-value-in-pre_setup.patch @@ -0,0 +1,44 @@ +From 0ae0579f3c92ecf6270eea308905518ce75efb7b Mon Sep 17 00:00:00 2001 +From: Jiffin Tony Thottan +Date: Wed, 22 Feb 2017 18:26:30 +0530 +Subject: [PATCH 60/74] glusterd/ganesha : return proper value in pre_setup() + +Change-Id: I6f7ce82488904c7d418ee078162f26f1ec81e9d9 +Signed-off-by: Jiffin Tony Thottan +Reviewed-on: https://review.gluster.org/16733 +Smoke: Gluster Build System +Reviewed-by: Atin Mukherjee +Reviewed-by: Raghavendra Talur +Tested-by: Raghavendra Talur +NetBSD-regression: NetBSD Build System +CentOS-regression: Gluster Build System +--- + xlators/mgmt/glusterd/src/glusterd-ganesha.c | 7 +++---- + 1 file changed, 3 insertions(+), 4 deletions(-) + +diff --git a/xlators/mgmt/glusterd/src/glusterd-ganesha.c b/xlators/mgmt/glusterd/src/glusterd-ganesha.c +index 5d6144a..cd591df 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-ganesha.c ++++ b/xlators/mgmt/glusterd/src/glusterd-ganesha.c +@@ -834,15 +834,14 @@ pre_setup (gf_boolean_t run_setup, char **op_errstr) + { + int ret = 0; + +- ret = check_host_list(); +- +- if (ret) { ++ if (check_host_list()) { + ret = setup_cluster(run_setup); + if (ret == -1) + gf_asprintf (op_errstr, "Failed to set up HA " + "config for NFS-Ganesha. " + "Please check the log file for details"); +- } ++ } else ++ ret = -1; + + return ret; + } +-- +1.8.3.1 + diff --git a/SOURCES/0061-ganesha-scripts-remove-dependency-over-export-config.patch b/SOURCES/0061-ganesha-scripts-remove-dependency-over-export-config.patch new file mode 100644 index 0000000..e1b2e22 --- /dev/null +++ b/SOURCES/0061-ganesha-scripts-remove-dependency-over-export-config.patch @@ -0,0 +1,51 @@ +From e3dd661b5c8fce818a8e8b601d30bf1af8c3466e Mon Sep 17 00:00:00 2001 +From: Jiffin Tony Thottan +Date: Thu, 23 Feb 2017 16:21:52 +0530 +Subject: [PATCH 61/74] ganesha/scripts : remove dependency over export + configuration file for unexport + +Currently unexport is performed by reading export id from volume configuration +file. So unexport has dependency over that file. This patch will unexport with +help of dbus command ShowExport. And it will only unexport the share which is +added via cli. + +Change-Id: I6f3c9b2bb48f0328b18e9cc0e4b9356174afd596 +Signed-off-by: Jiffin Tony Thottan +Reviewed-on: https://review.gluster.org/16771 +Smoke: Gluster Build System +NetBSD-regression: NetBSD Build System +CentOS-regression: Gluster Build System +Reviewed-by: Kaleb KEITHLEY +Reviewed-by: Raghavendra Talur +--- + extras/ganesha/scripts/dbus-send.sh | 14 ++++++++++++-- + 1 file changed, 12 insertions(+), 2 deletions(-) + +diff --git a/extras/ganesha/scripts/dbus-send.sh b/extras/ganesha/scripts/dbus-send.sh +index c071d03..a602cd4 100755 +--- a/extras/ganesha/scripts/dbus-send.sh ++++ b/extras/ganesha/scripts/dbus-send.sh +@@ -41,8 +41,18 @@ string:"EXPORT(Path=/$VOL)" + #This function removes an export dynamically(uses the export_id of the export) + function dynamic_export_remove() + { +- removed_id=`cat $GANESHA_DIR/exports/export.$VOL.conf |\ +-grep Export_Id | awk -F"[=,;]" '{print$2}'| tr -d '[[:space:]]'` ++ # Below bash fetch all the export from ShowExport command and search ++ # export entry based on path and then get its export entry. ++ # There are two possiblities for path, either entire volume will be ++ # exported or subdir. It handles both cases. But it remove only first ++ # entry from the list based on assumption that entry exported via cli ++ # has lowest export id value ++ removed_id=$(dbus-send --type=method_call --print-reply --system \ ++ --dest=org.ganesha.nfsd /org/ganesha/nfsd/ExportMgr \ ++ org.ganesha.nfsd.exportmgr.ShowExports | grep -B 1 -we \ ++ "/"$VOL -e "/"$VOL"/" | grep uint16 | awk '{print $2}' \ ++ | head -1) ++ + dbus-send --print-reply --system \ + --dest=org.ganesha.nfsd /org/ganesha/nfsd/ExportMgr \ + org.ganesha.nfsd.exportmgr.RemoveExport uint16:$removed_id +-- +1.8.3.1 + diff --git a/SOURCES/0062-glusterd-ganesha-add-proper-NULL-check-in-manage_exp.patch b/SOURCES/0062-glusterd-ganesha-add-proper-NULL-check-in-manage_exp.patch new file mode 100644 index 0000000..2d769b5 --- /dev/null +++ b/SOURCES/0062-glusterd-ganesha-add-proper-NULL-check-in-manage_exp.patch @@ -0,0 +1,34 @@ +From 05c63817ac715fdcf9065568d796b93d97c0be7f Mon Sep 17 00:00:00 2001 +From: Jiffin Tony Thottan +Date: Tue, 2 May 2017 14:06:00 +0530 +Subject: [PATCH 62/74] glusterd/ganesha : add proper NULL check in + manage_export_config + +Change-Id: I872b2b6b027f04e61f60ad85588f50e1ef2f988c +Signed-off-by: Jiffin Tony Thottan +Reviewed-on: https://review.gluster.org/17150 +Smoke: Gluster Build System +Reviewed-by: soumya k +NetBSD-regression: NetBSD Build System +Reviewed-by: Kaleb KEITHLEY +CentOS-regression: Gluster Build System +--- + xlators/mgmt/glusterd/src/glusterd-ganesha.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/xlators/mgmt/glusterd/src/glusterd-ganesha.c b/xlators/mgmt/glusterd/src/glusterd-ganesha.c +index cd591df..7ba25ee 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-ganesha.c ++++ b/xlators/mgmt/glusterd/src/glusterd-ganesha.c +@@ -454,7 +454,7 @@ manage_export_config (char *volname, char *value, char **op_errstr) + CONFDIR, value, volname, NULL); + ret = runner_run(&runner); + +- if (ret && !(*op_errstr)) ++ if (ret && op_errstr) + gf_asprintf (op_errstr, "Failed to create" + " NFS-Ganesha export config file."); + +-- +1.8.3.1 + diff --git a/SOURCES/0063-ganesha-minor-improvments-for-commit-e91cdf4-17081.patch b/SOURCES/0063-ganesha-minor-improvments-for-commit-e91cdf4-17081.patch new file mode 100644 index 0000000..1a96b75 --- /dev/null +++ b/SOURCES/0063-ganesha-minor-improvments-for-commit-e91cdf4-17081.patch @@ -0,0 +1,34 @@ +From 1212ea61f7c2e04529ec6fa40bba447fc2bd0fe8 Mon Sep 17 00:00:00 2001 +From: Jiffin Tony Thottan +Date: Wed, 3 May 2017 12:47:14 +0530 +Subject: [PATCH 63/74] ganesha : minor improvments for commit e91cdf4 (17081) + +Change-Id: I3af13e081c5e46cc6f2c132e7a5106ac3355c850 +Signed-off-by: Jiffin Tony Thottan +Reviewed-on: https://review.gluster.org/17152 +Smoke: Gluster Build System +Reviewed-by: soumya k +NetBSD-regression: NetBSD Build System +CentOS-regression: Gluster Build System +Reviewed-by: Kaleb KEITHLEY +Signed-off-by: Jiffin Tony Thottan +--- + extras/ganesha/scripts/ganesha-ha.sh | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/extras/ganesha/scripts/ganesha-ha.sh b/extras/ganesha/scripts/ganesha-ha.sh +index db2fa54..0e4d23a 100644 +--- a/extras/ganesha/scripts/ganesha-ha.sh ++++ b/extras/ganesha/scripts/ganesha-ha.sh +@@ -275,7 +275,7 @@ string:\"EXPORT(Export_Id=$export_id)\" 2>&1") + ret=$? + logger <<< "${output}" + if [ ${ret} -ne 0 ]; then +- echo "Refresh-config failed on ${current_host}" ++ echo "Refresh-config failed on ${current_host}. Please check logs on ${current_host}" + else + echo "Refresh-config completed on ${current_host}." + fi +-- +1.8.3.1 + diff --git a/SOURCES/0064-common-ha-surviving-ganesha.nfsd-not-put-in-grace-on.patch b/SOURCES/0064-common-ha-surviving-ganesha.nfsd-not-put-in-grace-on.patch new file mode 100644 index 0000000..ab90a1a --- /dev/null +++ b/SOURCES/0064-common-ha-surviving-ganesha.nfsd-not-put-in-grace-on.patch @@ -0,0 +1,51 @@ +From 16d3a7d636d115c44516dc415b26d2c6d0d17424 Mon Sep 17 00:00:00 2001 +From: "Kaleb S. KEITHLEY" +Date: Tue, 13 Jun 2017 07:36:50 -0400 +Subject: [PATCH 64/74] common-ha: surviving ganesha.nfsd not put in grace on + fail-over + +Behavior change is seen in new HA in RHEL 7.4 Beta. Up to now clone +RAs have been created with "pcs resource create ... meta notify=true". +Their notify method is invoked with pre-start or post-stop when one of +the clone RAs is started or stopped. + +In 7.4 Beta the notify method we observe that the notify method is not +invoked when one of the clones is stopped (or started). + +Ken Gaillot, one of the pacemaker devs, wrote: + With the above command, pcs puts the notify=true meta-attribute + on the primitive instead of the clone. Looking at the pcs help, + that seems expected (--clone notify=true would put it on the clone, + meta notify=true puts it on the primitive). If you drop the "meta" + above, I think it will work again. + +And indeed his suggested fix does work on both RHEL 7.4 Beta and RHEL +7.3 and presumably Fedora. + +Change-Id: Idbb539f1366df6d39f77431c357dff4e53a2df6d +Signed-off-by: Kaleb S. KEITHLEY +Reviewed-on: https://review.gluster.org/17534 +Smoke: Gluster Build System +Reviewed-by: soumya k +NetBSD-regression: NetBSD Build System +CentOS-regression: Gluster Build System +--- + extras/ganesha/scripts/ganesha-ha.sh | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/extras/ganesha/scripts/ganesha-ha.sh b/extras/ganesha/scripts/ganesha-ha.sh +index 0e4d23a..ce5ff20 100644 +--- a/extras/ganesha/scripts/ganesha-ha.sh ++++ b/extras/ganesha/scripts/ganesha-ha.sh +@@ -445,7 +445,7 @@ setup_create_resources() + # ganesha-active crm_attribute + sleep 5 + +- pcs resource create nfs-grace ocf:heartbeat:ganesha_grace --clone meta notify=true ++ pcs resource create nfs-grace ocf:heartbeat:ganesha_grace --clone notify=true + if [ $? -ne 0 ]; then + logger "warning: pcs resource create nfs-grace ocf:heartbeat:ganesha_grace --clone failed" + fi +-- +1.8.3.1 + diff --git a/SOURCES/0065-common-ha-enable-and-disable-selinux-ganesha_use_fus.patch b/SOURCES/0065-common-ha-enable-and-disable-selinux-ganesha_use_fus.patch new file mode 100644 index 0000000..42536be --- /dev/null +++ b/SOURCES/0065-common-ha-enable-and-disable-selinux-ganesha_use_fus.patch @@ -0,0 +1,85 @@ +From f16ba446e30197ff1724a5e257b35fb41330835d Mon Sep 17 00:00:00 2001 +From: "Kaleb S. KEITHLEY" +Date: Wed, 21 Jun 2017 10:01:20 -0400 +Subject: [PATCH 65/74] common-ha: enable and disable selinux + ganesha_use_fusefs + +Starting in Fedora 26 and RHEL 7.4 there are new targeted policies +in selinux which include a tuneable to allow ganesha.nfsd to access +the gluster (FUSE) shared_storage volume where ganesha maintains its +state. + +N.B. rpm doesn't have a way to distinguish between RHEL 7.3 or 7.4 +so it can't be enabled for RHEL at this time. /usr/sbin/semanage is +in policycoreutils-python in RHEL (versus policycoreutils-python-utils +in Fedora.) Once RHEL 7.4 GAs we may also wish to specify the version +for RHEL 7 explicitly, i.e. + Requires: selinux-policy >= 3.13.1-160. +But beware, the corresponding version in Fedora 26 seems to be +selinux-policy-3.13.1.258 or so. (Maybe earlier versions, but that's +what's currently in the F26 beta. + +release-3.10 is the upstream master branch for glusterfs-ganesha. For +release-3.11 and later storhaug needs a similar change, which is +tracked by https://github.com/linux-ha-storage/storhaug/issues/11 + +Maybe at some point we would want to consider migrating the targeted +policies for glusterfs (and nfs-ganesha) from selinux-policy to a +glusterfs-selinux (and nfs-ganesha-selinux) subpackage? + +Change-Id: I04a5443edd00636cbded59a2baddfa98095bf7ac +Signed-off-by: Kaleb S. KEITHLEY +Reviewed-on: https://review.gluster.org/17597 +Smoke: Gluster Build System +Reviewed-by: Niels de Vos +Reviewed-by: jiffin tony Thottan +CentOS-regression: Gluster Build System +Signed-off-by: Jiffin Tony Thottan +--- + glusterfs.spec.in | 16 ++++++++++++++++ + 1 file changed, 16 insertions(+) + +diff --git a/glusterfs.spec.in b/glusterfs.spec.in +index 0bad6cf..17f814b 100644 +--- a/glusterfs.spec.in ++++ b/glusterfs.spec.in +@@ -410,6 +410,10 @@ Requires: pcs, dbus + %if ( 0%{?rhel} && 0%{?rhel} == 6 ) + Requires: cman, pacemaker, corosync + %endif ++%if ( 0%{?fedora} && 0%{?fedora} > 25 ) ++Requires(post): policycoreutils-python-utils ++Requires(postun): policycoreutils-python-utils ++%endif + %if ( 0%{?fedora} ) || ( 0%{?rhel} && 0%{?rhel} > 5 ) + # we need portblock resource-agent in 3.9.5 and later. + Requires: resource-agents >= 3.9.5 +@@ -876,6 +880,12 @@ modprobe fuse + exit 0 + %endif + ++%if ( 0%{?fedora} && 0%{?fedora} > 25 ) ++%post ganesha ++semanage boolean -m ganesha_use_fusefs --on ++exit 0 ++%endif ++ + %if ( 0%{?_build_server} ) + %if ( 0%{!?_without_georeplication:1} ) + %post geo-replication +@@ -998,6 +1008,12 @@ fi + %postun api + /sbin/ldconfig + ++%if ( 0%{?fedora} && 0%{?fedora} > 25 ) ++%postun ganesha ++semanage boolean -m ganesha_use_fusefs --off ++exit 0 ++%endif ++ + %postun libs + /sbin/ldconfig + +-- +1.8.3.1 + diff --git a/SOURCES/0066-packaging-glusterfs-ganesha-update-sometimes-fails-s.patch b/SOURCES/0066-packaging-glusterfs-ganesha-update-sometimes-fails-s.patch new file mode 100644 index 0000000..541d5da --- /dev/null +++ b/SOURCES/0066-packaging-glusterfs-ganesha-update-sometimes-fails-s.patch @@ -0,0 +1,66 @@ +From f472b5db12723f1a478ad5886fac82958a04e131 Mon Sep 17 00:00:00 2001 +From: "Kaleb S. KEITHLEY" +Date: Wed, 12 Jul 2017 07:43:51 -0400 +Subject: [PATCH 66/74] packaging: glusterfs-ganesha update sometimes fails + semanage + +Depending on how dnf orders updates, the updated version of +selinux-policy-targeted with ganesha_use_fusefs may not be updated +before the glusterfs-ganesha update execute its %post scriptlet +containing the `semanage ganesha_use_fusefs ...` command. In such +situations the semanage command (silently) fails. + +Use a %trigger (and %triggerun) to run the scriptlet (again) after +selinux-policy-targeted with ganesha_use_fusefs has been installed +or updated. + +Note: the %triggerun is probably unnecessary, but it doesn't hurt. + +The release-3.10 branch is the "upstream master" for the glusterfs- +ganesha subpackage. + +Note: to be merged after https://review.gluster.org/17806 + +Change-Id: I1ad06d79fa1711e4abf038baf9f0a5b7bb665934 +Signed-off-by: Kaleb S. KEITHLEY +Reviewed-on: https://review.gluster.org/17756 +Smoke: Gluster Build System +CentOS-regression: Gluster Build System +Reviewed-by: Niels de Vos +Signed-off-by: Jiffin Tony Thottan +--- + glusterfs.spec.in | 18 ++++++++++++++++++ + 1 file changed, 18 insertions(+) + +diff --git a/glusterfs.spec.in b/glusterfs.spec.in +index 17f814b..e6e2ba3 100644 +--- a/glusterfs.spec.in ++++ b/glusterfs.spec.in +@@ -1027,6 +1027,24 @@ exit 0 + %endif + + ##----------------------------------------------------------------------------- ++## All %%trigger should be placed here and keep them sorted ++## ++%if ( 0%{?fedora} && 0%{?fedora} > 25 ) ++%trigger ganesha -- selinux-policy-targeted ++semanage boolean -m ganesha_use_fusefs --on ++exit 0 ++%endif ++ ++##----------------------------------------------------------------------------- ++## All %%triggerun should be placed here and keep them sorted ++## ++%if ( 0%{?fedora} && 0%{?fedora} > 25 ) ++%triggerun ganesha -- selinux-policy-targeted ++semanage boolean -m ganesha_use_fusefs --off ++exit 0 ++%endif ++ ++##----------------------------------------------------------------------------- + ## All %%files should be placed here and keep them grouped + ## + %files +-- +1.8.3.1 + diff --git a/SOURCES/0067-packaging-own-files-in-var-run-gluster-shared_storag.patch b/SOURCES/0067-packaging-own-files-in-var-run-gluster-shared_storag.patch new file mode 100644 index 0000000..ccaf613 --- /dev/null +++ b/SOURCES/0067-packaging-own-files-in-var-run-gluster-shared_storag.patch @@ -0,0 +1,70 @@ +From aa20e41c3087f9ec68d0a7890ed953e5bc7aa096 Mon Sep 17 00:00:00 2001 +From: "Kaleb S. KEITHLEY" +Date: Wed, 26 Jul 2017 10:36:11 -0400 +Subject: [PATCH 67/74] packaging: own files in + (/var)/run/gluster/shared_storage/nfs-ganesha + +The nfs-ganesha rpm owns /etc/ganesha and /etc/ganesha/ganesha.conf, +but gluster-ganesha installs a ganesha-ha.conf.sample in /etc/ganesha/ + +Ganesha HA expects to find the config files in /var/run/.../nfs-ganesha +and for there to be symlinks from /etc/ganesha/* to /var/run/... + +As exports are created the ganesha export files are written to +/var/run/gluster/shared_storage/nfs-ganesha/exports/* + +This change creates rpm %ghosts for most of these files to manage +ownership within rpm and ensure they are not deleted or overwritten +during package install/upgrade/removal. The name of the exports varies, +so it's not possible AFAIK to create wildcard %ghosts for them. + +Change-Id: Ic4389291c0af0bd72c22fa742cdfa2011b9286f3 +Signed-off-by: Kaleb S. KEITHLEY +Reviewed-on: https://review.gluster.org/17883 +Smoke: Gluster Build System +CentOS-regression: Gluster Build System +Reviewed-by: Niels de Vos +Signed-off-by: Jiffin Tony Thottan +--- + glusterfs.spec.in | 15 ++++++++++++++- + 1 file changed, 14 insertions(+), 1 deletion(-) + +diff --git a/glusterfs.spec.in b/glusterfs.spec.in +index e6e2ba3..cbc77c3 100644 +--- a/glusterfs.spec.in ++++ b/glusterfs.spec.in +@@ -818,6 +818,13 @@ sed -i 's|option working-directory /etc/glusterd|option working-directory %{_sha + install -D -p -m 0644 extras/glusterfs-logrotate \ + %{buildroot}%{_sysconfdir}/logrotate.d/glusterfs + ++# ganesha ghosts ++mkdir -p %{buildroot}%{_sysconfdir}/ganesha ++touch %{buildroot}%{_sysconfdir}/ganesha/ganesha-ha.conf ++mkdir -p %{buildroot}%{_localstatedir}/run/gluster/shared_storage/nfs-ganesha/exports ++touch %{buildroot}%{_localstatedir}/run/gluster/shared_storage/nfs-ganesha/ganesha.conf ++touch %{buildroot}%{_localstatedir}/run/gluster/shared_storage/nfs-ganesha/ganesha-ha.conf ++ + %if ( 0%{!?_without_georeplication:1} ) + mkdir -p %{buildroot}%{_sharedstatedir}/glusterd/geo-replication + touch %{buildroot}%{_sharedstatedir}/glusterd/geo-replication/gsyncd_template.conf +@@ -1309,10 +1316,16 @@ exit 0 + + %if ( 0%{?_build_server} ) + %files ganesha +-%{_sysconfdir}/ganesha/* ++%dir %{_libexecdir}/ganesha + %{_libexecdir}/ganesha/* + %{_prefix}/lib/ocf/resource.d/heartbeat/* + %{_sharedstatedir}/glusterd/hooks/1/start/post/S31ganesha-start.sh ++%{_sysconfdir}/ganesha/ganesha-ha.conf.sample ++%ghost %config(noreplace) %{_sysconfdir}/ganesha/ganesha-ha.conf ++%ghost %dir %{_localstatedir}/run/gluster/shared_storage/nfs-ganesha ++%ghost %dir %{_localstatedir}/run/gluster/shared_storage/nfs-ganesha/exports ++%ghost %config(noreplace) %{_localstatedir}/run/gluster/shared_storage/nfs-ganesha/ganesha.conf ++%ghost %config(noreplace) %{_localstatedir}/run/gluster/shared_storage/nfs-ganesha/ganesha-ha.conf + %endif + + %if ( 0%{?_build_server} ) +-- +1.8.3.1 + diff --git a/SOURCES/0068-common-ha-enable-and-disable-selinux-gluster_use_exe.patch b/SOURCES/0068-common-ha-enable-and-disable-selinux-gluster_use_exe.patch new file mode 100644 index 0000000..f537ebb --- /dev/null +++ b/SOURCES/0068-common-ha-enable-and-disable-selinux-gluster_use_exe.patch @@ -0,0 +1,59 @@ +From c90038f9a3e01d07f1e797c613b0863a43e06d35 Mon Sep 17 00:00:00 2001 +From: "Kaleb S. KEITHLEY" +Date: Mon, 17 Jul 2017 11:07:40 -0400 +Subject: [PATCH 68/74] common-ha: enable and disable selinux + gluster_use_execmem + +Starting in Fedora 26 and RHEL 7.4 there are new targeted policies in +selinux which include a tuneable to allow glusterd->ganesha-ha.sh->pcs +to access the pcs config, i.e. gluster-use-execmem. + +Note. rpm doesn't have a way to distinguish between RHEL 7.3 or 7.4 +or between 3.13.1-X and 3.13.1-Y so it can't be enabled for RHEL at +this time. + +/usr/sbin/semanage is in policycoreutils-python in RHEL (versus +policycoreutils-python-utils in Fedora.) + +Requires selinux-policy >= 3.13.1-160 in RHEL7. The corresponding +version in Fedora 26 seems to be selinux-policy-3.13.1-259 or so. (Maybe +earlier versions, but that's what was in F26 when I checked.) + +Change-Id: Ic474b3f7739ff5be1e99d94d00b55caae4ceb5a0 +Signed-off-by: Kaleb S. KEITHLEY +Reviewed-on: https://review.gluster.org/17806 +Smoke: Gluster Build System +CentOS-regression: Gluster Build System +Reviewed-by: soumya k +Reviewed-by: Atin Mukherjee +--- + extras/ganesha/scripts/ganesha-ha.sh | 6 ++++++ + 1 file changed, 6 insertions(+) + +diff --git a/extras/ganesha/scripts/ganesha-ha.sh b/extras/ganesha/scripts/ganesha-ha.sh +index ce5ff20..0b7642d 100644 +--- a/extras/ganesha/scripts/ganesha-ha.sh ++++ b/extras/ganesha/scripts/ganesha-ha.sh +@@ -984,6 +984,9 @@ main() + usage + exit 0 + fi ++ ++ semanage boolean -m gluster_use_execmem --on ++ + HA_CONFDIR=${1%/}; shift + local ha_conf=${HA_CONFDIR}/ganesha-ha.conf + local node="" +@@ -1129,6 +1132,9 @@ $HA_CONFDIR/ganesha-ha.conf + ;; + + esac ++ ++ semanage boolean -m gluster_use_execmem --off ++ + } + + main $* +-- +1.8.3.1 + diff --git a/SOURCES/0069-ganesha-ha-don-t-set-SELinux-booleans-if-SELinux-is-.patch b/SOURCES/0069-ganesha-ha-don-t-set-SELinux-booleans-if-SELinux-is-.patch new file mode 100644 index 0000000..912f9d1 --- /dev/null +++ b/SOURCES/0069-ganesha-ha-don-t-set-SELinux-booleans-if-SELinux-is-.patch @@ -0,0 +1,53 @@ +From 9b299dff3811a48978a76cbdd5f4f66f0cf68d78 Mon Sep 17 00:00:00 2001 +From: Ambarish +Date: Tue, 12 Sep 2017 18:34:29 +0530 +Subject: [PATCH 69/74] ganesha-ha: don't set SELinux booleans if SELinux is + disabled + +semanage commands inside ganesha-ha.sh script will fail if selinux is +Disabled. This patch introduces a check if selinux is enabled or not, +and subsequently run semange commands only on selinux enabled systems. + +Change-Id: Ibee61cbb1d51a73e6c326b49bac5c7ce06feb310 +Signed-off-by: Ambarish +Reviewed-on: https://review.gluster.org/18264 +Reviewed-by: Niels de Vos +Smoke: Gluster Build System +Reviewed-by: Kaleb KEITHLEY +Reviewed-by: jiffin tony Thottan +Reviewed-by: Daniel Gryniewicz +CentOS-regression: Gluster Build System +--- + extras/ganesha/scripts/ganesha-ha.sh | 9 ++++++--- + 1 file changed, 6 insertions(+), 3 deletions(-) + +diff --git a/extras/ganesha/scripts/ganesha-ha.sh b/extras/ganesha/scripts/ganesha-ha.sh +index 0b7642d..3a18a1a 100644 +--- a/extras/ganesha/scripts/ganesha-ha.sh ++++ b/extras/ganesha/scripts/ganesha-ha.sh +@@ -985,7 +985,9 @@ main() + exit 0 + fi + +- semanage boolean -m gluster_use_execmem --on ++ if (selinuxenabled) ;then ++ semanage boolean -m gluster_use_execmem --on ++ fi + + HA_CONFDIR=${1%/}; shift + local ha_conf=${HA_CONFDIR}/ganesha-ha.conf +@@ -1133,8 +1135,9 @@ $HA_CONFDIR/ganesha-ha.conf + + esac + +- semanage boolean -m gluster_use_execmem --off +- ++ if (selinuxenabled) ;then ++ semanage boolean -m gluster_use_execmem --off ++ fi + } + + main $* +-- +1.8.3.1 + diff --git a/SOURCES/0070-build-remove-ganesha-dependency-on-selinux-policy.patch b/SOURCES/0070-build-remove-ganesha-dependency-on-selinux-policy.patch new file mode 100644 index 0000000..f55f1d7 --- /dev/null +++ b/SOURCES/0070-build-remove-ganesha-dependency-on-selinux-policy.patch @@ -0,0 +1,41 @@ +From 1d9693e542fb63ad985a6d6647c0c613f2e677b6 Mon Sep 17 00:00:00 2001 +From: "Kaleb S. KEITHLEY" +Date: Fri, 23 Jun 2017 20:43:16 +0530 +Subject: [PATCH 70/74] build: remove ganesha dependency on selinux-policy + +Problem: +Puddle creation fails with unresolved dependencies + unresolved deps: +*** selinux-policy >= 0:3.13.1-160 + +Solution: +We know a priori about the version at RHEL 7.4 is already the desired +version. So removing this explicit dependency *should* not be a gluster +test blocker. + +Label: DOWNSTREAM ONLY + +Change-Id: Id53ac0e41adc14704932787ee0dd3143e6615aaf +Signed-off-by: Milind Changire +Reviewed-on: https://code.engineering.redhat.com/gerrit/109945 +Reviewed-by: Kaleb Keithley +Signed-off-by: Jiffin Tony Thottan +--- + glusterfs.spec.in | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/glusterfs.spec.in b/glusterfs.spec.in +index cbc77c3..05eff07 100644 +--- a/glusterfs.spec.in ++++ b/glusterfs.spec.in +@@ -411,6 +411,7 @@ Requires: pcs, dbus + Requires: cman, pacemaker, corosync + %endif + %if ( 0%{?fedora} && 0%{?fedora} > 25 ) ++Requires: selinux-policy >= 3.13.1-160 + Requires(post): policycoreutils-python-utils + Requires(postun): policycoreutils-python-utils + %endif +-- +1.8.3.1 + diff --git a/SOURCES/0071-common-ha-enable-pacemaker-at-end-of-setup.patch b/SOURCES/0071-common-ha-enable-pacemaker-at-end-of-setup.patch new file mode 100644 index 0000000..9725c05 --- /dev/null +++ b/SOURCES/0071-common-ha-enable-pacemaker-at-end-of-setup.patch @@ -0,0 +1,62 @@ +From 50b87b03d6460b7c5d733924f611efdac134f37d Mon Sep 17 00:00:00 2001 +From: "Kaleb S. KEITHLEY" +Date: Wed, 7 Jun 2017 08:15:48 -0400 +Subject: [PATCH 71/74] common-ha: enable pacemaker at end of setup + +Label: DOWNSTREAM ONLY +Change-Id: I3ccd59b67ed364bfc5d27e88321ab5b9f8d471fd +Signed-off-by: Kaleb S. KEITHLEY +Reviewed-on: https://code.engineering.redhat.com/gerrit/108431 +Reviewed-by: Soumya Koduri +--- + extras/ganesha/scripts/ganesha-ha.sh | 20 ++++++++++++++++++++ + 1 file changed, 20 insertions(+) + +diff --git a/extras/ganesha/scripts/ganesha-ha.sh b/extras/ganesha/scripts/ganesha-ha.sh +index 3a18a1a..b252818 100644 +--- a/extras/ganesha/scripts/ganesha-ha.sh ++++ b/extras/ganesha/scripts/ganesha-ha.sh +@@ -787,6 +787,22 @@ setup_state_volume() + } + + ++enable_pacemaker() ++{ ++ while [[ ${1} ]]; do ++ if [ "${SERVICE_MAN}" == "/usr/bin/systemctl" ]; then ++${SECRET_PEM} root@${1} ${SERVICE_MAN} enable pacemaker" ++ ssh -oPasswordAuthentication=no -oStrictHostKeyChecking=no -i \ ++${SECRET_PEM} root@${1} "${SERVICE_MAN} enable pacemaker" ++ else ++ ssh -oPasswordAuthentication=no -oStrictHostKeyChecking=no -i \ ++${SECRET_PEM} root@${1} "${SERVICE_MAN} pacemaker enable" ++ fi ++ shift ++ done ++} ++ ++ + addnode_state_volume() + { + local newnode=${1}; shift +@@ -1011,6 +1027,8 @@ main() + + if [ "X${HA_NUM_SERVERS}X" != "X1X" ]; then + ++ determine_service_manager ++ + setup_cluster ${HA_NAME} ${HA_NUM_SERVERS} "${HA_SERVERS}" + + setup_create_resources ${HA_SERVERS} +@@ -1019,6 +1037,8 @@ main() + + setup_state_volume ${HA_SERVERS} + ++ enable_pacemaker ${HA_SERVERS} ++ + else + + logger "insufficient servers for HA, aborting" +-- +1.8.3.1 + diff --git a/SOURCES/0072-common-ha-Fix-an-incorrect-syntax-during-setup.patch b/SOURCES/0072-common-ha-Fix-an-incorrect-syntax-during-setup.patch new file mode 100644 index 0000000..54d8d63 --- /dev/null +++ b/SOURCES/0072-common-ha-Fix-an-incorrect-syntax-during-setup.patch @@ -0,0 +1,39 @@ +From eeaf040d3231c30d6d559a70c5e8ae36098d3a9a Mon Sep 17 00:00:00 2001 +From: Soumya Koduri +Date: Wed, 14 Jun 2017 15:20:22 +0530 +Subject: [PATCH 72/74] common-ha: Fix an incorrect syntax during setup + +There was an invalid line introduced as part of +https://code.engineering.redhat.com/gerrit/#/c/108431/ + +Detected by rpmdiff - + https://errata.devel.redhat.com/rpmdiff/show/175336?result_id=4796901 + +This change is to fix the same. + +Label: DOWNSTREAM ONLY + +Change-Id: I55cdd7d866cb175fb620dbbd2d02c36eab291a74 +Signed-off-by: Soumya Koduri +Reviewed-on: https://code.engineering.redhat.com/gerrit/109017 +Reviewed-by: Kaleb Keithley +Tested-by: Kaleb Keithley +--- + extras/ganesha/scripts/ganesha-ha.sh | 1 - + 1 file changed, 1 deletion(-) + +diff --git a/extras/ganesha/scripts/ganesha-ha.sh b/extras/ganesha/scripts/ganesha-ha.sh +index b252818..623fb64 100644 +--- a/extras/ganesha/scripts/ganesha-ha.sh ++++ b/extras/ganesha/scripts/ganesha-ha.sh +@@ -791,7 +791,6 @@ enable_pacemaker() + { + while [[ ${1} ]]; do + if [ "${SERVICE_MAN}" == "/usr/bin/systemctl" ]; then +-${SECRET_PEM} root@${1} ${SERVICE_MAN} enable pacemaker" + ssh -oPasswordAuthentication=no -oStrictHostKeyChecking=no -i \ + ${SECRET_PEM} root@${1} "${SERVICE_MAN} enable pacemaker" + else +-- +1.8.3.1 + diff --git a/SOURCES/0073-Fix-build-issues-related-to-nfs-ganesha-package.patch b/SOURCES/0073-Fix-build-issues-related-to-nfs-ganesha-package.patch new file mode 100644 index 0000000..d9f3c9a --- /dev/null +++ b/SOURCES/0073-Fix-build-issues-related-to-nfs-ganesha-package.patch @@ -0,0 +1,103 @@ +From 77c5fda269bffe16d8564a5d26ed838ad9b6fcea Mon Sep 17 00:00:00 2001 +From: Jiffin Tony Thottan +Date: Tue, 14 Nov 2017 12:43:29 +0530 +Subject: [PATCH 73/74] Fix build issues related to nfs-ganesha package + +Signed-off-by: Jiffin Tony Thottan +--- + glusterfs.spec.in | 20 +++++++++++++------- + 1 file changed, 13 insertions(+), 7 deletions(-) + +diff --git a/glusterfs.spec.in b/glusterfs.spec.in +index 05eff07..b6ba91c 100644 +--- a/glusterfs.spec.in ++++ b/glusterfs.spec.in +@@ -822,9 +822,6 @@ install -D -p -m 0644 extras/glusterfs-logrotate \ + # ganesha ghosts + mkdir -p %{buildroot}%{_sysconfdir}/ganesha + touch %{buildroot}%{_sysconfdir}/ganesha/ganesha-ha.conf +-mkdir -p %{buildroot}%{_localstatedir}/run/gluster/shared_storage/nfs-ganesha/exports +-touch %{buildroot}%{_localstatedir}/run/gluster/shared_storage/nfs-ganesha/ganesha.conf +-touch %{buildroot}%{_localstatedir}/run/gluster/shared_storage/nfs-ganesha/ganesha-ha.conf + + %if ( 0%{!?_without_georeplication:1} ) + mkdir -p %{buildroot}%{_sharedstatedir}/glusterd/geo-replication +@@ -888,11 +885,13 @@ modprobe fuse + exit 0 + %endif + ++%if ( 0%{?_build_server} ) + %if ( 0%{?fedora} && 0%{?fedora} > 25 ) + %post ganesha + semanage boolean -m ganesha_use_fusefs --on + exit 0 + %endif ++%endif + + %if ( 0%{?_build_server} ) + %if ( 0%{!?_without_georeplication:1} ) +@@ -1016,11 +1015,13 @@ fi + %postun api + /sbin/ldconfig + ++%if ( 0%{?_build_server} ) + %if ( 0%{?fedora} && 0%{?fedora} > 25 ) + %postun ganesha + semanage boolean -m ganesha_use_fusefs --off + exit 0 + %endif ++%endif + + %postun libs + /sbin/ldconfig +@@ -1037,20 +1038,24 @@ exit 0 + ##----------------------------------------------------------------------------- + ## All %%trigger should be placed here and keep them sorted + ## ++%if ( 0%{?_build_server} ) + %if ( 0%{?fedora} && 0%{?fedora} > 25 ) + %trigger ganesha -- selinux-policy-targeted + semanage boolean -m ganesha_use_fusefs --on + exit 0 + %endif ++%endif + + ##----------------------------------------------------------------------------- + ## All %%triggerun should be placed here and keep them sorted + ## ++%if ( 0%{?_build_server} ) + %if ( 0%{?fedora} && 0%{?fedora} > 25 ) + %triggerun ganesha -- selinux-policy-targeted + semanage boolean -m ganesha_use_fusefs --off + exit 0 + %endif ++%endif + + ##----------------------------------------------------------------------------- + ## All %%files should be placed here and keep them grouped +@@ -1074,6 +1079,11 @@ exit 0 + %if ( ! 0%{?_build_server} ) + # exclude ganesha files + %exclude %{_prefix}/lib/ocf/* ++%exclude %{_libexecdir}/ganesha/* ++%exclude %{_prefix}/lib/ocf/resource.d/heartbeat/* ++%exclude %{_sysconfdir}/ganesha/ganesha-ha.conf.sample ++%exclude %{_sysconfdir}/ganesha/ganesha-ha.conf ++ + # exclude incrementalapi + %exclude %{_libexecdir}/glusterfs/* + %exclude %{_sbindir}/gfind_missing_files +@@ -1323,10 +1333,6 @@ exit 0 + %{_sharedstatedir}/glusterd/hooks/1/start/post/S31ganesha-start.sh + %{_sysconfdir}/ganesha/ganesha-ha.conf.sample + %ghost %config(noreplace) %{_sysconfdir}/ganesha/ganesha-ha.conf +-%ghost %dir %{_localstatedir}/run/gluster/shared_storage/nfs-ganesha +-%ghost %dir %{_localstatedir}/run/gluster/shared_storage/nfs-ganesha/exports +-%ghost %config(noreplace) %{_localstatedir}/run/gluster/shared_storage/nfs-ganesha/ganesha.conf +-%ghost %config(noreplace) %{_localstatedir}/run/gluster/shared_storage/nfs-ganesha/ganesha-ha.conf + %endif + + %if ( 0%{?_build_server} ) +-- +1.8.3.1 + diff --git a/SOURCES/0074-build-make-var-run-available-on-RHEL-6.patch b/SOURCES/0074-build-make-var-run-available-on-RHEL-6.patch new file mode 100644 index 0000000..91a44a8 --- /dev/null +++ b/SOURCES/0074-build-make-var-run-available-on-RHEL-6.patch @@ -0,0 +1,31 @@ +From c59e78a5e8b7a1802f1f7caa1eab480a3395ba91 Mon Sep 17 00:00:00 2001 +From: Milind Changire +Date: Wed, 15 Nov 2017 12:18:57 +0530 +Subject: [PATCH 74/74] build: make /var/run available on RHEL-6 + +make /var/run available on RHEL-6 as well + +Label: DOWNSTREAM ONLY + +Change-Id: Iec79478e2233bd3194030a2c75273fc2ba3d17bb +Signed-off-by: Milind Changire +--- + glusterfs.spec.in | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/glusterfs.spec.in b/glusterfs.spec.in +index b6ba91c..da8a3e5 100644 +--- a/glusterfs.spec.in ++++ b/glusterfs.spec.in +@@ -118,7 +118,7 @@ + %endif + + # From https://fedoraproject.org/wiki/Packaging:Python#Macros +-%if ( 0%{?rhel} && 0%{?rhel} <= 5 ) ++%if ( 0%{?rhel} && 0%{?rhel} <= 6 ) + %{!?python2_sitelib: %global python2_sitelib %(python2 -c "from distutils.sysconfig import get_python_lib; print(get_python_lib())")} + %{!?python2_sitearch: %global python2_sitearch %(python2 -c "from distutils.sysconfig import get_python_lib; print(get_python_lib(1))")} + %global _rundir %{_localstatedir}/run +-- +1.8.3.1 + diff --git a/SOURCES/0075-cli-gluster-help-changes.patch b/SOURCES/0075-cli-gluster-help-changes.patch new file mode 100644 index 0000000..98d47a5 --- /dev/null +++ b/SOURCES/0075-cli-gluster-help-changes.patch @@ -0,0 +1,749 @@ +From fb84f6c69385e35f3a62504dfebc11b21ff4082a Mon Sep 17 00:00:00 2001 +From: N Balachandran +Date: Mon, 6 Nov 2017 09:30:54 +0530 +Subject: [PATCH 075/128] cli: gluster help changes + +gluster cli help now shows only the top level +help commands. gluster help will now show +help commands for . + +> BUG: 1474768 +> Signed-off-by: N Balachandran +> BUG: 1509786 +> https://review.gluster.org/#/c/18666/ +> Signed-off-by: N Balachandran + +(cherry picked from commit 89dc54f50c9f800ca4446ea8fe736e4860588845) +Change-Id: I263f53a0870d80ef4cfaad455fdaa47e2ac4423b +BUG: 1498730 +Signed-off-by: N Balachandran +Reviewed-on: https://code.engineering.redhat.com/gerrit/123525 +Tested-by: RHGS Build Bot +Reviewed-by: Atin Mukherjee +--- + cli/src/cli-cmd-global.c | 3 + + cli/src/cli-cmd-misc.c | 77 ++++++++-- + cli/src/cli-cmd-parser.c | 20 ++- + cli/src/cli-cmd-peer.c | 9 +- + cli/src/cli-cmd-snapshot.c | 5 + + cli/src/cli-cmd-volume.c | 347 ++++++++++++++++++++++++++++++++++++++------- + cli/src/cli.c | 2 +- + 7 files changed, 387 insertions(+), 76 deletions(-) + +diff --git a/cli/src/cli-cmd-global.c b/cli/src/cli-cmd-global.c +index 881506b..1f9cb54 100644 +--- a/cli/src/cli-cmd-global.c ++++ b/cli/src/cli-cmd-global.c +@@ -68,11 +68,14 @@ cli_cmd_global_help_cbk (struct cli_state *state, struct cli_cmd_word *in_word, + count = (sizeof (global_cmds) / sizeof (struct cli_cmd)); + cli_cmd_sort (cmd, count); + ++ cli_out ("\ngluster global commands"); ++ cli_out ("========================\n"); + for (global_cmd = cmd; global_cmd->pattern; global_cmd++) + if (_gf_false == global_cmd->disable) + cli_out ("%s - %s", global_cmd->pattern, + global_cmd->desc); + ++ cli_out ("\n"); + GF_FREE (cmd); + return 0; + } +diff --git a/cli/src/cli-cmd-misc.c b/cli/src/cli-cmd-misc.c +index 9f8c159..c887515 100644 +--- a/cli/src/cli-cmd-misc.c ++++ b/cli/src/cli-cmd-misc.c +@@ -23,6 +23,9 @@ extern struct rpc_clnt *global_rpc; + extern rpc_clnt_prog_t *cli_rpc_prog; + + extern struct cli_cmd volume_cmds[]; ++extern struct cli_cmd bitrot_cmds[]; ++extern struct cli_cmd quota_cmds[]; ++extern struct cli_cmd tier_cmds[]; + extern struct cli_cmd cli_probe_cmds[]; + extern struct cli_cmd cli_log_cmds[]; + extern struct cli_cmd cli_system_cmds[]; +@@ -38,37 +41,76 @@ cli_cmd_quit_cbk (struct cli_state *state, struct cli_cmd_word *word, + exit (0); + } + ++ ++static gf_boolean_t ++cli_is_help_command (const char *pattern) ++{ ++ /* FixFixFix ++ * This is not the best way to determine whether ++ * this is a help command ++ */ ++ if (strstr (pattern, "help")) ++ return _gf_true; ++ ++ return _gf_false; ++} ++ ++ + int + cli_cmd_display_help (struct cli_state *state, struct cli_cmd_word *in_word, + const char **words, int wordcount) + { +- struct cli_cmd *cmd[] = {volume_cmds, cli_probe_cmds, +- cli_misc_cmds, snapshot_cmds, +- global_cmds, NULL}; +- struct cli_cmd *cmd_ind = NULL; +- int i = 0; ++ struct cli_cmd *cmd[] = {cli_misc_cmds, cli_probe_cmds, ++ volume_cmds, bitrot_cmds, quota_cmds, ++#if !defined(__NetBSD__) ++ tier_cmds, ++#endif ++ snapshot_cmds, global_cmds, NULL}; ++ struct cli_cmd *cmd_ind = NULL; ++ int i = 0; ++ gf_boolean_t list_all = _gf_false; + + /* cli_system_cmds commands for internal usage + they are not exposed + */ +- for (i=0; cmd[i]!=NULL; i++) +- for (cmd_ind = cmd[i]; cmd_ind->pattern; cmd_ind++) +- if (_gf_false == cmd_ind->disable) +- cli_out ("%s - %s", cmd_ind->pattern, +- cmd_ind->desc); + ++ /* If "help all" */ ++ if (wordcount == 2) ++ list_all = _gf_true; ++ ++ for (i = 0; cmd[i] != NULL; i++) { ++ for (cmd_ind = cmd[i]; cmd_ind->pattern; cmd_ind++) { ++ if ((_gf_false == cmd_ind->disable) && ++ cli_is_help_command (cmd_ind->pattern)) { ++ if (list_all && (cmd_ind->cbk)) { ++ cmd_ind->cbk (state, in_word, words, ++ wordcount); ++ } else { ++ cli_out (" %-25s- %s", cmd_ind->pattern, ++ cmd_ind->desc); ++ } ++ } ++ } ++ } ++ ++ cli_out ("\n"); + return 0; + } + ++ ++struct cli_cmd cli_help_cmds[] = { ++ { "help [all]", ++ cli_cmd_display_help, ++ "display help for command classes"}, ++ ++ { NULL, NULL, NULL } ++}; ++ ++ + struct cli_cmd cli_misc_cmds[] = { + { "quit", + cli_cmd_quit_cbk, + "quit"}, +- +- { "help", +- cli_cmd_display_help, +- "display command options"}, +- + { "exit", + cli_cmd_quit_cbk, + "exit"}, +@@ -84,7 +126,12 @@ cli_cmd_misc_register (struct cli_state *state) + struct cli_cmd *cmd = NULL; + + for (cmd = cli_misc_cmds; cmd->pattern; cmd++) { ++ ret = cli_cmd_register (&state->tree, cmd); ++ if (ret) ++ goto out; ++ } + ++ for (cmd = cli_help_cmds; cmd->pattern; cmd++) { + ret = cli_cmd_register (&state->tree, cmd); + if (ret) + goto out; +diff --git a/cli/src/cli-cmd-parser.c b/cli/src/cli-cmd-parser.c +index a35fc74..c95b262 100644 +--- a/cli/src/cli-cmd-parser.c ++++ b/cli/src/cli-cmd-parser.c +@@ -1189,8 +1189,13 @@ cli_cmd_quota_parse (const char **words, int wordcount, dict_t **options) + goto out; + } + +- if (wordcount < 4) ++ if (wordcount < 4) { ++ ++ if ((wordcount == 3) && !(strcmp (words[2], "help"))) { ++ ret = 1; ++ } + goto out; ++ } + + volname = (char *)words[2]; + if (!volname) { +@@ -5588,15 +5593,22 @@ cli_cmd_bitrot_parse (const char **words, int wordcount, dict_t **options) + GF_ASSERT (words); + GF_ASSERT (options); + +- dict = dict_new (); +- if (!dict) +- goto out; ++ ++ /* Hack to print out bitrot help properly */ ++ if ((wordcount == 3) && !(strcmp (words[2], "help"))) { ++ ret = 1; ++ return ret; ++ } + + if (wordcount < 4 || wordcount > 5) { + gf_log ("cli", GF_LOG_ERROR, "Invalid syntax"); + goto out; + } + ++ dict = dict_new (); ++ if (!dict) ++ goto out; ++ + volname = (char *)words[2]; + if (!volname) { + ret = -1; +diff --git a/cli/src/cli-cmd-peer.c b/cli/src/cli-cmd-peer.c +index 4802f71..7df60bc 100644 +--- a/cli/src/cli-cmd-peer.c ++++ b/cli/src/cli-cmd-peer.c +@@ -264,7 +264,7 @@ struct cli_cmd cli_probe_cmds[] = { + + { "peer help", + cli_cmd_peer_help_cbk, +- "Help command for peer "}, ++ "display help for peer commands"}, + + { "pool list", + cli_cmd_pool_list_cbk, +@@ -281,17 +281,20 @@ cli_cmd_peer_help_cbk (struct cli_state *state, struct cli_cmd_word *in_word, + struct cli_cmd *probe_cmd = NULL; + int count = 0; + ++ cli_out ("\ngluster peer commands"); ++ cli_out ("======================\n"); ++ + cmd = GF_CALLOC (1, sizeof (cli_probe_cmds), cli_mt_cli_cmd); + memcpy (cmd, cli_probe_cmds, sizeof (cli_probe_cmds)); + count = (sizeof (cli_probe_cmds) / sizeof (struct cli_cmd)); + cli_cmd_sort (cmd, count); + +- +- + for (probe_cmd = cmd; probe_cmd->pattern; probe_cmd++) + cli_out ("%s - %s", probe_cmd->pattern, probe_cmd->desc); + + GF_FREE (cmd); ++ ++ cli_out ("\n"); + return 0; + } + +diff --git a/cli/src/cli-cmd-snapshot.c b/cli/src/cli-cmd-snapshot.c +index e79128c..88b4737 100644 +--- a/cli/src/cli-cmd-snapshot.c ++++ b/cli/src/cli-cmd-snapshot.c +@@ -140,9 +140,14 @@ cli_cmd_snapshot_help_cbk (struct cli_state *state, + count = (sizeof (snapshot_cmds) / sizeof (struct cli_cmd)); + cli_cmd_sort (cmd, count); + ++ cli_out ("\ngluster snapshot commands"); ++ cli_out ("=========================\n"); ++ + for (snap_cmd = cmd; snap_cmd->pattern; snap_cmd++) + if (_gf_false == snap_cmd->disable) + cli_out ("%s - %s", snap_cmd->pattern, snap_cmd->desc); ++ cli_out ("\n"); ++ + GF_FREE (cmd); + return 0; + } +diff --git a/cli/src/cli-cmd-volume.c b/cli/src/cli-cmd-volume.c +index ca9da0a..a1e5c51 100644 +--- a/cli/src/cli-cmd-volume.c ++++ b/cli/src/cli-cmd-volume.c +@@ -36,7 +36,19 @@ extern rpc_clnt_prog_t cli_quotad_clnt; + + int + cli_cmd_volume_help_cbk (struct cli_state *state, struct cli_cmd_word *in_word, +- const char **words, int wordcount); ++ const char **words, int wordcount); ++ ++int ++cli_cmd_bitrot_help_cbk (struct cli_state *state, struct cli_cmd_word *in_word, ++ const char **words, int wordcount); ++ ++int ++cli_cmd_quota_help_cbk (struct cli_state *state, struct cli_cmd_word *in_word, ++ const char **words, int wordcount); ++ ++int ++cli_cmd_tier_help_cbk (struct cli_state *state, struct cli_cmd_word *in_word, ++ const char **words, int wordcount); + + int + cli_cmd_volume_info_cbk (struct cli_state *state, struct cli_cmd_word *word, +@@ -1293,9 +1305,12 @@ cli_cmd_volume_tier_cbk (struct cli_state *state, + + + if (wordcount < 4) { +- cli_usage_out (word->pattern); +- if (wordcount == 3 && !strcmp(words[2], "help")) ++ if (wordcount == 3 && !strcmp(words[2], "help")) { ++ cli_cmd_tier_help_cbk (state, word, words, wordcount); + ret = 0; ++ } else { ++ cli_usage_out (word->pattern); ++ } + goto out; + } + +@@ -1719,6 +1734,8 @@ out: + return ret; + } + ++ ++ + int + cli_cmd_bitrot_cbk (struct cli_state *state, struct cli_cmd_word *word, + const char **words, int wordcount) +@@ -1746,6 +1763,13 @@ cli_cmd_bitrot_cbk (struct cli_state *state, struct cli_cmd_word *word, + goto out; + } + ++ if (ret == 1) { ++ /* this is 'volume bitrot help' */ ++ cli_cmd_bitrot_help_cbk (state, word, words, wordcount); ++ ret = 0; ++ goto out2; ++ } ++ + frame = create_frame (THIS, THIS->ctx->pool); + if (!frame) { + ret = -1; +@@ -1834,7 +1858,7 @@ out: + #endif + + CLI_STACK_DESTROY (frame); +- ++out2: + return ret; + } + +@@ -1866,6 +1890,12 @@ cli_cmd_quota_cbk (struct cli_state *state, struct cli_cmd_word *word, + } + } else { + ret = cli_cmd_quota_parse (words, wordcount, &options); ++ ++ if (ret == 1) { ++ cli_cmd_quota_help_cbk (state, word, words, wordcount); ++ ret = 0; ++ goto out; ++ } + if (ret < 0) { + cli_usage_out (word->pattern); + parse_err = 1; +@@ -3157,7 +3187,159 @@ out: + return ret; + } + ++ ++/* This is a bit of a hack to display the help. The current bitrot cmd ++ * format does not work well when registering the cmds. ++ * Ideally the should have been of the form ++ * gluster volume bitrot ... ++ */ ++ ++struct cli_cmd bitrot_cmds[] = { ++ ++ {"volume bitrot help", ++ cli_cmd_bitrot_help_cbk, ++ "display help for volume bitrot commands" ++ }, ++ ++ {"volume bitrot {enable|disable}", ++ NULL, /*cli_cmd_bitrot_cbk,*/ ++ "Enable/disable bitrot for volume " ++ }, ++ ++ {"volume bitrot scrub-throttle {lazy|normal|aggressive}", ++ NULL, /*cli_cmd_bitrot_cbk,*/ ++ "Set the speed of the scrubber for volume " ++ }, ++ ++ {"volume bitrot scrub-frequency {hourly|daily|weekly|biweekly" ++ "|monthly}", ++ NULL, /*cli_cmd_bitrot_cbk,*/ ++ "Set the frequency of the scrubber for volume " ++ }, ++ ++ {"volume bitrot scrub {pause|resume|status|ondemand}", ++ NULL, /*cli_cmd_bitrot_cbk,*/ ++ "Pause/resume the scrubber for . Status displays the status of " ++ "the scrubber. ondemand starts the scrubber immediately." ++ }, ++ ++ {"volume bitrot {enable|disable}\n" ++ "volume bitrot scrub-throttle {lazy|normal|aggressive}\n" ++ "volume bitrot scrub-frequency {hourly|daily|weekly|biweekly" ++ "|monthly}\n" ++ "volume bitrot scrub {pause|resume|status|ondemand}", ++ cli_cmd_bitrot_cbk, ++ NULL ++ }, ++ ++ { NULL, NULL, NULL } ++}; ++ ++ ++struct cli_cmd quota_cmds[] = { ++ ++ /* Quota commands */ ++ {"volume quota help", ++ cli_cmd_quota_help_cbk, ++ "display help for volume quota commands" ++ }, ++ ++ {"volume quota {enable|disable|list [ ...]| " ++ "list-objects [ ...] | remove | remove-objects | " ++ "default-soft-limit }", ++ cli_cmd_quota_cbk, ++ "Enable/disable and configure quota for " ++ }, ++ ++ {"volume quota {limit-usage []}", ++ cli_cmd_quota_cbk, ++ "Set maximum size for for " ++ }, ++ ++ {"volume quota {limit-objects []}", ++ cli_cmd_quota_cbk, ++ "Set the maximum number of entries allowed in for " ++ }, ++ ++ {"volume quota {alert-time|soft-timeout|hard-timeout} {