|
|
50dc83 |
From aff18f761ef64d55635daa9a1d2140fe35632820 Mon Sep 17 00:00:00 2001
|
|
|
50dc83 |
From: Mohit Agrawal <moagrawal@redhat.com>
|
|
|
50dc83 |
Date: Fri, 29 Mar 2019 11:48:32 +0530
|
|
|
50dc83 |
Subject: [PATCH 109/124] glusterd: Optimize glusterd handshaking code path
|
|
|
50dc83 |
|
|
|
50dc83 |
Problem: At the time of handshaking glusterd populate volume
|
|
|
50dc83 |
data in a dictionary.While no. of volumes are configured
|
|
|
50dc83 |
more than 1500 glusterd takes more than 10 min to generated
|
|
|
50dc83 |
the data.Due to taking more time rpc request times out and
|
|
|
50dc83 |
rpc start bailing of call frames.
|
|
|
50dc83 |
|
|
|
50dc83 |
Solution: To optimize the code done below changes
|
|
|
50dc83 |
1) Spawn multiple threads to populate volumes data in bulk
|
|
|
50dc83 |
in separate dictionary and introduce an option
|
|
|
50dc83 |
glusterd.brick-dict-thread-count to configure no. of threads
|
|
|
50dc83 |
to populate volume data.
|
|
|
50dc83 |
2) Populate tier data only while volume type is tier
|
|
|
50dc83 |
3) Compare snap data only while snap_count is non zero
|
|
|
50dc83 |
|
|
|
50dc83 |
> Fixes: bz#1699339
|
|
|
50dc83 |
> Change-Id: I38dc71970c049217f9d1a06fc0aaf4c26eab18f5
|
|
|
50dc83 |
> Signed-off-by: Mohit Agrawal <moagrawal@redhat.com>
|
|
|
50dc83 |
> (Cherry picked from commit 26a19d9da3ab5604db02d4ca02ce868fb57193a4)
|
|
|
50dc83 |
> (Reviewed on upstream link https://review.gluster.org/#/c/glusterfs/+/22556/)
|
|
|
50dc83 |
|
|
|
50dc83 |
Bug: 1652461
|
|
|
50dc83 |
Change-Id: Ia81671a7e1f173bcb32da9dc439be9e61c18bde1
|
|
|
50dc83 |
Signed-off-by: Mohit Agrawal <moagrawal@redhat.com>
|
|
|
50dc83 |
Reviewed-on: https://code.engineering.redhat.com/gerrit/167981
|
|
|
50dc83 |
Tested-by: Mohit Agrawal <moagrawa@redhat.com>
|
|
|
50dc83 |
Reviewed-by: Atin Mukherjee <amukherj@redhat.com>
|
|
|
50dc83 |
Tested-by: RHGS Build Bot <nigelb@redhat.com>
|
|
|
50dc83 |
---
|
|
|
50dc83 |
libglusterfs/src/glusterfs/globals.h | 4 +-
|
|
|
50dc83 |
tests/bugs/glusterd/bug-1699339.t | 69 ++++++
|
|
|
50dc83 |
xlators/mgmt/glusterd/src/glusterd-op-sm.c | 1 +
|
|
|
50dc83 |
.../mgmt/glusterd/src/glusterd-snapshot-utils.c | 3 +
|
|
|
50dc83 |
xlators/mgmt/glusterd/src/glusterd-utils.c | 269 +++++++++++++++++----
|
|
|
50dc83 |
xlators/mgmt/glusterd/src/glusterd-volume-set.c | 55 +++++
|
|
|
50dc83 |
xlators/mgmt/glusterd/src/glusterd.h | 10 +
|
|
|
50dc83 |
7 files changed, 362 insertions(+), 49 deletions(-)
|
|
|
50dc83 |
create mode 100644 tests/bugs/glusterd/bug-1699339.t
|
|
|
50dc83 |
|
|
|
50dc83 |
diff --git a/libglusterfs/src/glusterfs/globals.h b/libglusterfs/src/glusterfs/globals.h
|
|
|
50dc83 |
index 6642ba0..e45db14 100644
|
|
|
50dc83 |
--- a/libglusterfs/src/glusterfs/globals.h
|
|
|
50dc83 |
+++ b/libglusterfs/src/glusterfs/globals.h
|
|
|
50dc83 |
@@ -50,7 +50,7 @@
|
|
|
50dc83 |
1 /* MIN is the fresh start op-version, mostly \
|
|
|
50dc83 |
should not change */
|
|
|
50dc83 |
#define GD_OP_VERSION_MAX \
|
|
|
50dc83 |
- GD_OP_VERSION_6_0 /* MAX VERSION is the maximum \
|
|
|
50dc83 |
+ GD_OP_VERSION_7_0 /* MAX VERSION is the maximum \
|
|
|
50dc83 |
count in VME table, should \
|
|
|
50dc83 |
keep changing with \
|
|
|
50dc83 |
introduction of newer \
|
|
|
50dc83 |
@@ -134,6 +134,8 @@
|
|
|
50dc83 |
|
|
|
50dc83 |
#define GD_OP_VERSION_6_0 60000 /* Op-version for GlusterFS 6.0 */
|
|
|
50dc83 |
|
|
|
50dc83 |
+#define GD_OP_VERSION_7_0 70000 /* Op-version for GlusterFS 7.0 */
|
|
|
50dc83 |
+
|
|
|
50dc83 |
#include "glusterfs/xlator.h"
|
|
|
50dc83 |
#include "glusterfs/options.h"
|
|
|
50dc83 |
|
|
|
50dc83 |
diff --git a/tests/bugs/glusterd/bug-1699339.t b/tests/bugs/glusterd/bug-1699339.t
|
|
|
50dc83 |
new file mode 100644
|
|
|
50dc83 |
index 0000000..3e950f4
|
|
|
50dc83 |
--- /dev/null
|
|
|
50dc83 |
+++ b/tests/bugs/glusterd/bug-1699339.t
|
|
|
50dc83 |
@@ -0,0 +1,69 @@
|
|
|
50dc83 |
+#!/bin/bash
|
|
|
50dc83 |
+
|
|
|
50dc83 |
+. $(dirname $0)/../../include.rc
|
|
|
50dc83 |
+. $(dirname $0)/../../volume.rc
|
|
|
50dc83 |
+. $(dirname $0)/../../cluster.rc
|
|
|
50dc83 |
+
|
|
|
50dc83 |
+cleanup;
|
|
|
50dc83 |
+
|
|
|
50dc83 |
+NUM_VOLS=15
|
|
|
50dc83 |
+
|
|
|
50dc83 |
+
|
|
|
50dc83 |
+get_brick_base () {
|
|
|
50dc83 |
+ printf "%s/vol%02d" $B0 $1
|
|
|
50dc83 |
+}
|
|
|
50dc83 |
+
|
|
|
50dc83 |
+function count_up_bricks {
|
|
|
50dc83 |
+ vol=$1;
|
|
|
50dc83 |
+ $CLI_1 --xml volume status $vol | grep '<status>1' | wc -l
|
|
|
50dc83 |
+}
|
|
|
50dc83 |
+
|
|
|
50dc83 |
+create_volume () {
|
|
|
50dc83 |
+
|
|
|
50dc83 |
+ local vol_name=$(printf "%s-vol%02d" $V0 $1)
|
|
|
50dc83 |
+
|
|
|
50dc83 |
+ TEST $CLI_1 volume create $vol_name replica 3 $H1:$B1/${vol_name} $H2:$B2/${vol_name} $H3:$B3/${vol_name}
|
|
|
50dc83 |
+ TEST $CLI_1 volume start $vol_name
|
|
|
50dc83 |
+}
|
|
|
50dc83 |
+
|
|
|
50dc83 |
+TEST launch_cluster 3
|
|
|
50dc83 |
+TEST $CLI_1 volume set all cluster.brick-multiplex on
|
|
|
50dc83 |
+
|
|
|
50dc83 |
+# The option accepts the value in the range from 5 to 200
|
|
|
50dc83 |
+TEST ! $CLI_1 volume set all glusterd.vol_count_per_thread 210
|
|
|
50dc83 |
+TEST ! $CLI_1 volume set all glusterd.vol_count_per_thread 4
|
|
|
50dc83 |
+
|
|
|
50dc83 |
+TEST $CLI_1 volume set all glusterd.vol_count_per_thread 5
|
|
|
50dc83 |
+
|
|
|
50dc83 |
+TEST $CLI_1 peer probe $H2;
|
|
|
50dc83 |
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
|
|
|
50dc83 |
+
|
|
|
50dc83 |
+TEST $CLI_1 peer probe $H3;
|
|
|
50dc83 |
+EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count
|
|
|
50dc83 |
+
|
|
|
50dc83 |
+# Our infrastructure can't handle an arithmetic expression here. The formula
|
|
|
50dc83 |
+# is (NUM_VOLS-1)*5 because it sees each TEST/EXPECT once but needs the other
|
|
|
50dc83 |
+# NUM_VOLS-1 and there are 5 such statements in each iteration.
|
|
|
50dc83 |
+TESTS_EXPECTED_IN_LOOP=28
|
|
|
50dc83 |
+for i in $(seq 1 $NUM_VOLS); do
|
|
|
50dc83 |
+ starttime="$(date +%s)";
|
|
|
50dc83 |
+ create_volume $i
|
|
|
50dc83 |
+done
|
|
|
50dc83 |
+
|
|
|
50dc83 |
+TEST kill_glusterd 1
|
|
|
50dc83 |
+
|
|
|
50dc83 |
+vol1=$(printf "%s-vol%02d" $V0 1)
|
|
|
50dc83 |
+TEST $CLI_2 volume set $vol1 performance.readdir-ahead on
|
|
|
50dc83 |
+vol2=$(printf "%s-vol%02d" $V0 2)
|
|
|
50dc83 |
+TEST $CLI_2 volume set $vol2 performance.readdir-ahead on
|
|
|
50dc83 |
+
|
|
|
50dc83 |
+# Bring back 1st glusterd
|
|
|
50dc83 |
+TEST $glusterd_1
|
|
|
50dc83 |
+EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count
|
|
|
50dc83 |
+
|
|
|
50dc83 |
+EXPECT_WITHIN $PROBE_TIMEOUT "on" volinfo_field_1 $vol1 performance.readdir-ahead
|
|
|
50dc83 |
+
|
|
|
50dc83 |
+vol_name=$(printf "%s-vol%02d" $V0 2)
|
|
|
50dc83 |
+EXPECT_WITHIN $PROBE_TIMEOUT "on" volinfo_field_1 $vol2 performance.readdir-ahead
|
|
|
50dc83 |
+
|
|
|
50dc83 |
+cleanup
|
|
|
50dc83 |
diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.c b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
|
|
|
50dc83 |
index 95f9707..94a5e1f 100644
|
|
|
50dc83 |
--- a/xlators/mgmt/glusterd/src/glusterd-op-sm.c
|
|
|
50dc83 |
+++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
|
|
|
50dc83 |
@@ -87,6 +87,7 @@ glusterd_all_vol_opts valid_all_vol_opts[] = {
|
|
|
50dc83 |
* TBD: Discuss the default value for this. Maybe this should be a
|
|
|
50dc83 |
* dynamic value depending on the memory specifications per node */
|
|
|
50dc83 |
{GLUSTERD_BRICKMUX_LIMIT_KEY, GLUSTERD_BRICKMUX_LIMIT_DFLT_VALUE},
|
|
|
50dc83 |
+ {GLUSTERD_VOL_CNT_PER_THRD, GLUSTERD_VOL_CNT_PER_THRD_DEFAULT_VALUE},
|
|
|
50dc83 |
/*{GLUSTERD_LOCALTIME_LOGGING_KEY, "disable"},*/
|
|
|
50dc83 |
{GLUSTERD_DAEMON_LOG_LEVEL_KEY, "INFO"},
|
|
|
50dc83 |
{NULL},
|
|
|
50dc83 |
diff --git a/xlators/mgmt/glusterd/src/glusterd-snapshot-utils.c b/xlators/mgmt/glusterd/src/glusterd-snapshot-utils.c
|
|
|
50dc83 |
index b3c4158..d225854 100644
|
|
|
50dc83 |
--- a/xlators/mgmt/glusterd/src/glusterd-snapshot-utils.c
|
|
|
50dc83 |
+++ b/xlators/mgmt/glusterd/src/glusterd-snapshot-utils.c
|
|
|
50dc83 |
@@ -2099,6 +2099,9 @@ glusterd_compare_friend_snapshots(dict_t *peer_data, char *peername,
|
|
|
50dc83 |
goto out;
|
|
|
50dc83 |
}
|
|
|
50dc83 |
|
|
|
50dc83 |
+ if (!snap_count)
|
|
|
50dc83 |
+ goto out;
|
|
|
50dc83 |
+
|
|
|
50dc83 |
for (i = 1; i <= snap_count; i++) {
|
|
|
50dc83 |
/* Compare one snapshot from peer_data at a time */
|
|
|
50dc83 |
ret = glusterd_compare_snap(peer_data, i, peername, peerid);
|
|
|
50dc83 |
diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c
|
|
|
50dc83 |
index fdd7d91..ff6102b 100644
|
|
|
50dc83 |
--- a/xlators/mgmt/glusterd/src/glusterd-utils.c
|
|
|
50dc83 |
+++ b/xlators/mgmt/glusterd/src/glusterd-utils.c
|
|
|
50dc83 |
@@ -155,6 +155,47 @@ out:
|
|
|
50dc83 |
return ret;
|
|
|
50dc83 |
}
|
|
|
50dc83 |
|
|
|
50dc83 |
+int
|
|
|
50dc83 |
+get_gd_vol_thread_limit(int *thread_limit)
|
|
|
50dc83 |
+{
|
|
|
50dc83 |
+ char *value = NULL;
|
|
|
50dc83 |
+ int ret = -1;
|
|
|
50dc83 |
+ int vol_per_thread_limit = 0;
|
|
|
50dc83 |
+ xlator_t *this = NULL;
|
|
|
50dc83 |
+ glusterd_conf_t *priv = NULL;
|
|
|
50dc83 |
+
|
|
|
50dc83 |
+ this = THIS;
|
|
|
50dc83 |
+ GF_VALIDATE_OR_GOTO("glusterd", this, out);
|
|
|
50dc83 |
+
|
|
|
50dc83 |
+ priv = this->private;
|
|
|
50dc83 |
+ GF_VALIDATE_OR_GOTO(this->name, priv, out);
|
|
|
50dc83 |
+
|
|
|
50dc83 |
+ if (!is_brick_mx_enabled()) {
|
|
|
50dc83 |
+ vol_per_thread_limit = 1;
|
|
|
50dc83 |
+ ret = 0;
|
|
|
50dc83 |
+ goto out;
|
|
|
50dc83 |
+ }
|
|
|
50dc83 |
+
|
|
|
50dc83 |
+ ret = dict_get_strn(priv->opts, GLUSTERD_VOL_CNT_PER_THRD,
|
|
|
50dc83 |
+ SLEN(GLUSTERD_VOL_CNT_PER_THRD), &value);
|
|
|
50dc83 |
+ if (ret) {
|
|
|
50dc83 |
+ value = GLUSTERD_VOL_CNT_PER_THRD_DEFAULT_VALUE;
|
|
|
50dc83 |
+ }
|
|
|
50dc83 |
+ ret = gf_string2int(value, &vol_per_thread_limit);
|
|
|
50dc83 |
+ if (ret)
|
|
|
50dc83 |
+ goto out;
|
|
|
50dc83 |
+
|
|
|
50dc83 |
+out:
|
|
|
50dc83 |
+ *thread_limit = vol_per_thread_limit;
|
|
|
50dc83 |
+
|
|
|
50dc83 |
+ gf_msg_debug("glusterd", 0,
|
|
|
50dc83 |
+ "Per Thread volume limit set to %d glusterd to populate dict "
|
|
|
50dc83 |
+ "data parallel",
|
|
|
50dc83 |
+ *thread_limit);
|
|
|
50dc83 |
+
|
|
|
50dc83 |
+ return ret;
|
|
|
50dc83 |
+}
|
|
|
50dc83 |
+
|
|
|
50dc83 |
extern struct volopt_map_entry glusterd_volopt_map[];
|
|
|
50dc83 |
extern glusterd_all_vol_opts valid_all_vol_opts[];
|
|
|
50dc83 |
|
|
|
50dc83 |
@@ -3070,50 +3111,55 @@ glusterd_add_volume_to_dict(glusterd_volinfo_t *volinfo, dict_t *dict,
|
|
|
50dc83 |
|
|
|
50dc83 |
/* tiering related variables */
|
|
|
50dc83 |
|
|
|
50dc83 |
- snprintf(key, sizeof(key), "%s%d.cold_brick_count", prefix, count);
|
|
|
50dc83 |
- ret = dict_set_uint32(dict, key, volinfo->tier_info.cold_brick_count);
|
|
|
50dc83 |
- if (ret)
|
|
|
50dc83 |
- goto out;
|
|
|
50dc83 |
+ if (volinfo->type == GF_CLUSTER_TYPE_TIER) {
|
|
|
50dc83 |
+ snprintf(key, sizeof(key), "%s%d.cold_brick_count", prefix, count);
|
|
|
50dc83 |
+ ret = dict_set_uint32(dict, key, volinfo->tier_info.cold_brick_count);
|
|
|
50dc83 |
+ if (ret)
|
|
|
50dc83 |
+ goto out;
|
|
|
50dc83 |
|
|
|
50dc83 |
- snprintf(key, sizeof(key), "%s%d.cold_type", prefix, count);
|
|
|
50dc83 |
- ret = dict_set_uint32(dict, key, volinfo->tier_info.cold_type);
|
|
|
50dc83 |
- if (ret)
|
|
|
50dc83 |
- goto out;
|
|
|
50dc83 |
+ snprintf(key, sizeof(key), "%s%d.cold_type", prefix, count);
|
|
|
50dc83 |
+ ret = dict_set_uint32(dict, key, volinfo->tier_info.cold_type);
|
|
|
50dc83 |
+ if (ret)
|
|
|
50dc83 |
+ goto out;
|
|
|
50dc83 |
|
|
|
50dc83 |
- snprintf(key, sizeof(key), "%s%d.cold_replica_count", prefix, count);
|
|
|
50dc83 |
- ret = dict_set_uint32(dict, key, volinfo->tier_info.cold_replica_count);
|
|
|
50dc83 |
- if (ret)
|
|
|
50dc83 |
- goto out;
|
|
|
50dc83 |
+ snprintf(key, sizeof(key), "%s%d.cold_replica_count", prefix, count);
|
|
|
50dc83 |
+ ret = dict_set_uint32(dict, key, volinfo->tier_info.cold_replica_count);
|
|
|
50dc83 |
+ if (ret)
|
|
|
50dc83 |
+ goto out;
|
|
|
50dc83 |
|
|
|
50dc83 |
- snprintf(key, sizeof(key), "%s%d.cold_disperse_count", prefix, count);
|
|
|
50dc83 |
- ret = dict_set_uint32(dict, key, volinfo->tier_info.cold_disperse_count);
|
|
|
50dc83 |
- if (ret)
|
|
|
50dc83 |
- goto out;
|
|
|
50dc83 |
+ snprintf(key, sizeof(key), "%s%d.cold_disperse_count", prefix, count);
|
|
|
50dc83 |
+ ret = dict_set_uint32(dict, key,
|
|
|
50dc83 |
+ volinfo->tier_info.cold_disperse_count);
|
|
|
50dc83 |
+ if (ret)
|
|
|
50dc83 |
+ goto out;
|
|
|
50dc83 |
|
|
|
50dc83 |
- snprintf(key, sizeof(key), "%s%d.cold_redundancy_count", prefix, count);
|
|
|
50dc83 |
- ret = dict_set_uint32(dict, key, volinfo->tier_info.cold_redundancy_count);
|
|
|
50dc83 |
- if (ret)
|
|
|
50dc83 |
- goto out;
|
|
|
50dc83 |
+ snprintf(key, sizeof(key), "%s%d.cold_redundancy_count", prefix, count);
|
|
|
50dc83 |
+ ret = dict_set_uint32(dict, key,
|
|
|
50dc83 |
+ volinfo->tier_info.cold_redundancy_count);
|
|
|
50dc83 |
+ if (ret)
|
|
|
50dc83 |
+ goto out;
|
|
|
50dc83 |
|
|
|
50dc83 |
- snprintf(key, sizeof(key), "%s%d.cold_dist_count", prefix, count);
|
|
|
50dc83 |
- ret = dict_set_uint32(dict, key, volinfo->tier_info.cold_dist_leaf_count);
|
|
|
50dc83 |
- if (ret)
|
|
|
50dc83 |
- goto out;
|
|
|
50dc83 |
+ snprintf(key, sizeof(key), "%s%d.cold_dist_count", prefix, count);
|
|
|
50dc83 |
+ ret = dict_set_uint32(dict, key,
|
|
|
50dc83 |
+ volinfo->tier_info.cold_dist_leaf_count);
|
|
|
50dc83 |
+ if (ret)
|
|
|
50dc83 |
+ goto out;
|
|
|
50dc83 |
|
|
|
50dc83 |
- snprintf(key, sizeof(key), "%s%d.hot_brick_count", prefix, count);
|
|
|
50dc83 |
- ret = dict_set_uint32(dict, key, volinfo->tier_info.hot_brick_count);
|
|
|
50dc83 |
- if (ret)
|
|
|
50dc83 |
- goto out;
|
|
|
50dc83 |
+ snprintf(key, sizeof(key), "%s%d.hot_brick_count", prefix, count);
|
|
|
50dc83 |
+ ret = dict_set_uint32(dict, key, volinfo->tier_info.hot_brick_count);
|
|
|
50dc83 |
+ if (ret)
|
|
|
50dc83 |
+ goto out;
|
|
|
50dc83 |
|
|
|
50dc83 |
- snprintf(key, sizeof(key), "%s%d.hot_type", prefix, count);
|
|
|
50dc83 |
- ret = dict_set_uint32(dict, key, volinfo->tier_info.hot_type);
|
|
|
50dc83 |
- if (ret)
|
|
|
50dc83 |
- goto out;
|
|
|
50dc83 |
+ snprintf(key, sizeof(key), "%s%d.hot_type", prefix, count);
|
|
|
50dc83 |
+ ret = dict_set_uint32(dict, key, volinfo->tier_info.hot_type);
|
|
|
50dc83 |
+ if (ret)
|
|
|
50dc83 |
+ goto out;
|
|
|
50dc83 |
|
|
|
50dc83 |
- snprintf(key, sizeof(key), "%s%d.hot_replica_count", prefix, count);
|
|
|
50dc83 |
- ret = dict_set_uint32(dict, key, volinfo->tier_info.hot_replica_count);
|
|
|
50dc83 |
- if (ret)
|
|
|
50dc83 |
- goto out;
|
|
|
50dc83 |
+ snprintf(key, sizeof(key), "%s%d.hot_replica_count", prefix, count);
|
|
|
50dc83 |
+ ret = dict_set_uint32(dict, key, volinfo->tier_info.hot_replica_count);
|
|
|
50dc83 |
+ if (ret)
|
|
|
50dc83 |
+ goto out;
|
|
|
50dc83 |
+ }
|
|
|
50dc83 |
|
|
|
50dc83 |
snprintf(key, sizeof(key), "%s%d", prefix, count);
|
|
|
50dc83 |
ret = gd_add_vol_snap_details_to_dict(dict, key, volinfo);
|
|
|
50dc83 |
@@ -3363,33 +3409,40 @@ out:
|
|
|
50dc83 |
return ret;
|
|
|
50dc83 |
}
|
|
|
50dc83 |
|
|
|
50dc83 |
-int32_t
|
|
|
50dc83 |
-glusterd_add_volumes_to_export_dict(dict_t **peer_data)
|
|
|
50dc83 |
+void *
|
|
|
50dc83 |
+glusterd_add_bulk_volumes_create_thread(void *data)
|
|
|
50dc83 |
{
|
|
|
50dc83 |
int32_t ret = -1;
|
|
|
50dc83 |
- dict_t *dict = NULL;
|
|
|
50dc83 |
glusterd_conf_t *priv = NULL;
|
|
|
50dc83 |
glusterd_volinfo_t *volinfo = NULL;
|
|
|
50dc83 |
int32_t count = 0;
|
|
|
50dc83 |
- glusterd_dict_ctx_t ctx = {0};
|
|
|
50dc83 |
xlator_t *this = NULL;
|
|
|
50dc83 |
+ glusterd_add_dict_args_t *arg = NULL;
|
|
|
50dc83 |
+ dict_t *dict = NULL;
|
|
|
50dc83 |
+ int start = 0;
|
|
|
50dc83 |
+ int end = 0;
|
|
|
50dc83 |
|
|
|
50dc83 |
- this = THIS;
|
|
|
50dc83 |
- GF_ASSERT(this);
|
|
|
50dc83 |
+ GF_ASSERT(data);
|
|
|
50dc83 |
+
|
|
|
50dc83 |
+ arg = data;
|
|
|
50dc83 |
+ dict = arg->voldict;
|
|
|
50dc83 |
+ start = arg->start;
|
|
|
50dc83 |
+ end = arg->end;
|
|
|
50dc83 |
+ this = arg->this;
|
|
|
50dc83 |
+ THIS = arg->this;
|
|
|
50dc83 |
priv = this->private;
|
|
|
50dc83 |
GF_ASSERT(priv);
|
|
|
50dc83 |
|
|
|
50dc83 |
- dict = dict_new();
|
|
|
50dc83 |
- if (!dict)
|
|
|
50dc83 |
- goto out;
|
|
|
50dc83 |
-
|
|
|
50dc83 |
cds_list_for_each_entry(volinfo, &priv->volumes, vol_list)
|
|
|
50dc83 |
{
|
|
|
50dc83 |
count++;
|
|
|
50dc83 |
+ if ((count < start) || (count > end))
|
|
|
50dc83 |
+ continue;
|
|
|
50dc83 |
+
|
|
|
50dc83 |
ret = glusterd_add_volume_to_dict(volinfo, dict, count, "volume");
|
|
|
50dc83 |
if (ret)
|
|
|
50dc83 |
goto out;
|
|
|
50dc83 |
- if (!glusterd_is_volume_quota_enabled(volinfo))
|
|
|
50dc83 |
+ if (!dict_get_sizen(volinfo->dict, VKEY_FEATURES_QUOTA))
|
|
|
50dc83 |
continue;
|
|
|
50dc83 |
ret = glusterd_vol_add_quota_conf_to_dict(volinfo, dict, count,
|
|
|
50dc83 |
"volume");
|
|
|
50dc83 |
@@ -3397,7 +3450,122 @@ glusterd_add_volumes_to_export_dict(dict_t **peer_data)
|
|
|
50dc83 |
goto out;
|
|
|
50dc83 |
}
|
|
|
50dc83 |
|
|
|
50dc83 |
- ret = dict_set_int32n(dict, "count", SLEN("count"), count);
|
|
|
50dc83 |
+out:
|
|
|
50dc83 |
+ GF_ATOMIC_DEC(priv->thread_count);
|
|
|
50dc83 |
+ free(arg);
|
|
|
50dc83 |
+ return NULL;
|
|
|
50dc83 |
+}
|
|
|
50dc83 |
+
|
|
|
50dc83 |
+int32_t
|
|
|
50dc83 |
+glusterd_add_volumes_to_export_dict(dict_t **peer_data)
|
|
|
50dc83 |
+{
|
|
|
50dc83 |
+ int32_t ret = -1;
|
|
|
50dc83 |
+ dict_t *dict = NULL;
|
|
|
50dc83 |
+ dict_t *dict_arr[128] = {
|
|
|
50dc83 |
+ 0,
|
|
|
50dc83 |
+ };
|
|
|
50dc83 |
+ glusterd_conf_t *priv = NULL;
|
|
|
50dc83 |
+ glusterd_volinfo_t *volinfo = NULL;
|
|
|
50dc83 |
+ int32_t count = 0;
|
|
|
50dc83 |
+ glusterd_dict_ctx_t ctx = {0};
|
|
|
50dc83 |
+ xlator_t *this = NULL;
|
|
|
50dc83 |
+ int totthread = 0;
|
|
|
50dc83 |
+ int volcnt = 0;
|
|
|
50dc83 |
+ int start = 1;
|
|
|
50dc83 |
+ int endindex = 0;
|
|
|
50dc83 |
+ int vol_per_thread_limit = 0;
|
|
|
50dc83 |
+ glusterd_add_dict_args_t *arg = NULL;
|
|
|
50dc83 |
+ pthread_t th_id = {
|
|
|
50dc83 |
+ 0,
|
|
|
50dc83 |
+ };
|
|
|
50dc83 |
+ int th_ret = 0;
|
|
|
50dc83 |
+ int i = 0;
|
|
|
50dc83 |
+
|
|
|
50dc83 |
+ this = THIS;
|
|
|
50dc83 |
+ GF_ASSERT(this);
|
|
|
50dc83 |
+ priv = this->private;
|
|
|
50dc83 |
+ GF_ASSERT(priv);
|
|
|
50dc83 |
+
|
|
|
50dc83 |
+ dict = dict_new();
|
|
|
50dc83 |
+ if (!dict)
|
|
|
50dc83 |
+ goto out;
|
|
|
50dc83 |
+
|
|
|
50dc83 |
+ /* Count the total number of volumes */
|
|
|
50dc83 |
+ cds_list_for_each_entry(volinfo, &priv->volumes, vol_list) volcnt++;
|
|
|
50dc83 |
+
|
|
|
50dc83 |
+ get_gd_vol_thread_limit(&vol_per_thread_limit);
|
|
|
50dc83 |
+
|
|
|
50dc83 |
+ if ((vol_per_thread_limit == 1) || (vol_per_thread_limit > 100)) {
|
|
|
50dc83 |
+ totthread = 0;
|
|
|
50dc83 |
+ } else {
|
|
|
50dc83 |
+ totthread = volcnt / vol_per_thread_limit;
|
|
|
50dc83 |
+ endindex = volcnt % vol_per_thread_limit;
|
|
|
50dc83 |
+ if (endindex)
|
|
|
50dc83 |
+ totthread++;
|
|
|
50dc83 |
+ }
|
|
|
50dc83 |
+
|
|
|
50dc83 |
+ if (totthread == 0) {
|
|
|
50dc83 |
+ cds_list_for_each_entry(volinfo, &priv->volumes, vol_list)
|
|
|
50dc83 |
+ {
|
|
|
50dc83 |
+ count++;
|
|
|
50dc83 |
+ ret = glusterd_add_volume_to_dict(volinfo, dict, count, "volume");
|
|
|
50dc83 |
+ if (ret)
|
|
|
50dc83 |
+ goto out;
|
|
|
50dc83 |
+
|
|
|
50dc83 |
+ if (!dict_get_sizen(volinfo->dict, VKEY_FEATURES_QUOTA))
|
|
|
50dc83 |
+ continue;
|
|
|
50dc83 |
+
|
|
|
50dc83 |
+ ret = glusterd_vol_add_quota_conf_to_dict(volinfo, dict, count,
|
|
|
50dc83 |
+ "volume");
|
|
|
50dc83 |
+ if (ret)
|
|
|
50dc83 |
+ goto out;
|
|
|
50dc83 |
+ }
|
|
|
50dc83 |
+ } else {
|
|
|
50dc83 |
+ for (i = 0; i < totthread; i++) {
|
|
|
50dc83 |
+ arg = calloc(1, sizeof(*arg));
|
|
|
50dc83 |
+ dict_arr[i] = dict_new();
|
|
|
50dc83 |
+ arg->this = this;
|
|
|
50dc83 |
+ arg->voldict = dict_arr[i];
|
|
|
50dc83 |
+ arg->start = start;
|
|
|
50dc83 |
+ if (!endindex) {
|
|
|
50dc83 |
+ arg->end = ((i + 1) * vol_per_thread_limit);
|
|
|
50dc83 |
+ } else {
|
|
|
50dc83 |
+ arg->end = (start + endindex);
|
|
|
50dc83 |
+ }
|
|
|
50dc83 |
+ th_ret = gf_thread_create_detached(
|
|
|
50dc83 |
+ &th_id, glusterd_add_bulk_volumes_create_thread, arg,
|
|
|
50dc83 |
+ "bulkvoldict");
|
|
|
50dc83 |
+ if (th_ret) {
|
|
|
50dc83 |
+ gf_log(this->name, GF_LOG_ERROR,
|
|
|
50dc83 |
+ "glusterd_add_bulk_volume %s"
|
|
|
50dc83 |
+ " thread creation failed",
|
|
|
50dc83 |
+ "bulkvoldict");
|
|
|
50dc83 |
+ free(arg);
|
|
|
50dc83 |
+ goto out;
|
|
|
50dc83 |
+ }
|
|
|
50dc83 |
+
|
|
|
50dc83 |
+ start = start + vol_per_thread_limit;
|
|
|
50dc83 |
+ GF_ATOMIC_INC(priv->thread_count);
|
|
|
50dc83 |
+ gf_log(this->name, GF_LOG_INFO,
|
|
|
50dc83 |
+ "Create thread %d to populate dict data for volume"
|
|
|
50dc83 |
+ " start index is %d end index is %d",
|
|
|
50dc83 |
+ (i + 1), arg->start, arg->end);
|
|
|
50dc83 |
+ }
|
|
|
50dc83 |
+ while (GF_ATOMIC_GET(priv->thread_count)) {
|
|
|
50dc83 |
+ sleep(1);
|
|
|
50dc83 |
+ }
|
|
|
50dc83 |
+
|
|
|
50dc83 |
+ gf_log(this->name, GF_LOG_INFO,
|
|
|
50dc83 |
+ "Finished dictionary popluation in all threads");
|
|
|
50dc83 |
+ for (i = 0; i < totthread; i++) {
|
|
|
50dc83 |
+ dict_copy_with_ref(dict_arr[i], dict);
|
|
|
50dc83 |
+ dict_unref(dict_arr[i]);
|
|
|
50dc83 |
+ }
|
|
|
50dc83 |
+ gf_log(this->name, GF_LOG_INFO,
|
|
|
50dc83 |
+ "Finished merger of all dictionraies into single one");
|
|
|
50dc83 |
+ }
|
|
|
50dc83 |
+
|
|
|
50dc83 |
+ ret = dict_set_int32n(dict, "count", SLEN("count"), volcnt);
|
|
|
50dc83 |
if (ret)
|
|
|
50dc83 |
goto out;
|
|
|
50dc83 |
|
|
|
50dc83 |
@@ -3499,6 +3667,9 @@ glusterd_compare_friend_volume(dict_t *peer_data, int32_t count,
|
|
|
50dc83 |
goto out;
|
|
|
50dc83 |
}
|
|
|
50dc83 |
|
|
|
50dc83 |
+ if (!dict_get_sizen(volinfo->dict, VKEY_FEATURES_QUOTA))
|
|
|
50dc83 |
+ goto skip_quota;
|
|
|
50dc83 |
+
|
|
|
50dc83 |
snprintf(key, sizeof(key), "volume%d.quota-version", count);
|
|
|
50dc83 |
ret = dict_get_uint32(peer_data, key, "a_version);
|
|
|
50dc83 |
if (ret) {
|
|
|
50dc83 |
@@ -3550,6 +3721,8 @@ glusterd_compare_friend_volume(dict_t *peer_data, int32_t count,
|
|
|
50dc83 |
goto out;
|
|
|
50dc83 |
}
|
|
|
50dc83 |
}
|
|
|
50dc83 |
+
|
|
|
50dc83 |
+skip_quota:
|
|
|
50dc83 |
*status = GLUSTERD_VOL_COMP_SCS;
|
|
|
50dc83 |
|
|
|
50dc83 |
out:
|
|
|
50dc83 |
diff --git a/xlators/mgmt/glusterd/src/glusterd-volume-set.c b/xlators/mgmt/glusterd/src/glusterd-volume-set.c
|
|
|
50dc83 |
index 42ca9bb..10aa2ae 100644
|
|
|
50dc83 |
--- a/xlators/mgmt/glusterd/src/glusterd-volume-set.c
|
|
|
50dc83 |
+++ b/xlators/mgmt/glusterd/src/glusterd-volume-set.c
|
|
|
50dc83 |
@@ -1058,6 +1058,51 @@ out:
|
|
|
50dc83 |
}
|
|
|
50dc83 |
|
|
|
50dc83 |
static int
|
|
|
50dc83 |
+validate_volume_per_thread_limit(glusterd_volinfo_t *volinfo, dict_t *dict,
|
|
|
50dc83 |
+ char *key, char *value, char **op_errstr)
|
|
|
50dc83 |
+{
|
|
|
50dc83 |
+ xlator_t *this = NULL;
|
|
|
50dc83 |
+ uint val = 0;
|
|
|
50dc83 |
+ int ret = -1;
|
|
|
50dc83 |
+
|
|
|
50dc83 |
+ this = THIS;
|
|
|
50dc83 |
+ GF_VALIDATE_OR_GOTO("glusterd", this, out);
|
|
|
50dc83 |
+
|
|
|
50dc83 |
+ if (!is_brick_mx_enabled()) {
|
|
|
50dc83 |
+ gf_asprintf(op_errstr,
|
|
|
50dc83 |
+ "Brick-multiplexing is not enabled. "
|
|
|
50dc83 |
+ "Please enable brick multiplexing before trying "
|
|
|
50dc83 |
+ "to set this option.");
|
|
|
50dc83 |
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_WRONG_OPTS_SETTING, "%s",
|
|
|
50dc83 |
+ *op_errstr);
|
|
|
50dc83 |
+ goto out;
|
|
|
50dc83 |
+ }
|
|
|
50dc83 |
+
|
|
|
50dc83 |
+ ret = gf_string2uint(value, &val;;
|
|
|
50dc83 |
+ if (ret) {
|
|
|
50dc83 |
+ gf_asprintf(op_errstr,
|
|
|
50dc83 |
+ "%s is not a valid count. "
|
|
|
50dc83 |
+ "%s expects an unsigned integer.",
|
|
|
50dc83 |
+ value, key);
|
|
|
50dc83 |
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_INVALID_ENTRY, "%s",
|
|
|
50dc83 |
+ *op_errstr);
|
|
|
50dc83 |
+ }
|
|
|
50dc83 |
+
|
|
|
50dc83 |
+ if ((val < 5) || (val > 200)) {
|
|
|
50dc83 |
+ gf_asprintf(
|
|
|
50dc83 |
+ op_errstr,
|
|
|
50dc83 |
+ "Please set this option to a greater than 5 or less than 200 "
|
|
|
50dc83 |
+ "to optimize dict generated while no. of volumes are more");
|
|
|
50dc83 |
+ ret = -1;
|
|
|
50dc83 |
+ goto out;
|
|
|
50dc83 |
+ }
|
|
|
50dc83 |
+out:
|
|
|
50dc83 |
+ gf_msg_debug("glusterd", 0, "Returning %d", ret);
|
|
|
50dc83 |
+
|
|
|
50dc83 |
+ return ret;
|
|
|
50dc83 |
+}
|
|
|
50dc83 |
+
|
|
|
50dc83 |
+static int
|
|
|
50dc83 |
validate_boolean(glusterd_volinfo_t *volinfo, dict_t *dict, char *key,
|
|
|
50dc83 |
char *value, char **op_errstr)
|
|
|
50dc83 |
{
|
|
|
50dc83 |
@@ -3520,6 +3565,16 @@ struct volopt_map_entry glusterd_volopt_map[] = {
|
|
|
50dc83 |
"brick multiplexing. Brick multiplexing ensures that "
|
|
|
50dc83 |
"compatible brick instances can share one single "
|
|
|
50dc83 |
"brick process."},
|
|
|
50dc83 |
+ {.key = GLUSTERD_VOL_CNT_PER_THRD,
|
|
|
50dc83 |
+ .voltype = "mgmt/glusterd",
|
|
|
50dc83 |
+ .value = GLUSTERD_VOL_CNT_PER_THRD_DEFAULT_VALUE,
|
|
|
50dc83 |
+ .op_version = GD_OP_VERSION_7_0,
|
|
|
50dc83 |
+ .validate_fn = validate_volume_per_thread_limit,
|
|
|
50dc83 |
+ .type = GLOBAL_NO_DOC,
|
|
|
50dc83 |
+ .description =
|
|
|
50dc83 |
+ "This option can be used to limit the number of volumes "
|
|
|
50dc83 |
+ "handled by per thread to populate peer data.The option accepts "
|
|
|
50dc83 |
+ " the value in the range of 5 to 200"},
|
|
|
50dc83 |
{.key = GLUSTERD_BRICKMUX_LIMIT_KEY,
|
|
|
50dc83 |
.voltype = "mgmt/glusterd",
|
|
|
50dc83 |
.value = GLUSTERD_BRICKMUX_LIMIT_DFLT_VALUE,
|
|
|
50dc83 |
diff --git a/xlators/mgmt/glusterd/src/glusterd.h b/xlators/mgmt/glusterd/src/glusterd.h
|
|
|
50dc83 |
index 0ac6e63..bd9f509 100644
|
|
|
50dc83 |
--- a/xlators/mgmt/glusterd/src/glusterd.h
|
|
|
50dc83 |
+++ b/xlators/mgmt/glusterd/src/glusterd.h
|
|
|
50dc83 |
@@ -57,8 +57,10 @@
|
|
|
50dc83 |
#define GLUSTER_SHARED_STORAGE "gluster_shared_storage"
|
|
|
50dc83 |
#define GLUSTERD_SHARED_STORAGE_KEY "cluster.enable-shared-storage"
|
|
|
50dc83 |
#define GLUSTERD_BRICK_MULTIPLEX_KEY "cluster.brick-multiplex"
|
|
|
50dc83 |
+#define GLUSTERD_VOL_CNT_PER_THRD "glusterd.vol_count_per_thread"
|
|
|
50dc83 |
#define GLUSTERD_BRICKMUX_LIMIT_KEY "cluster.max-bricks-per-process"
|
|
|
50dc83 |
#define GLUSTERD_BRICKMUX_LIMIT_DFLT_VALUE "250"
|
|
|
50dc83 |
+#define GLUSTERD_VOL_CNT_PER_THRD_DEFAULT_VALUE "100"
|
|
|
50dc83 |
#define GLUSTERD_LOCALTIME_LOGGING_KEY "cluster.localtime-logging"
|
|
|
50dc83 |
#define GLUSTERD_DAEMON_LOG_LEVEL_KEY "cluster.daemon-log-level"
|
|
|
50dc83 |
|
|
|
50dc83 |
@@ -225,8 +227,16 @@ typedef struct {
|
|
|
50dc83 |
which might lead the modification of volinfo
|
|
|
50dc83 |
list.
|
|
|
50dc83 |
*/
|
|
|
50dc83 |
+ gf_atomic_t thread_count;
|
|
|
50dc83 |
} glusterd_conf_t;
|
|
|
50dc83 |
|
|
|
50dc83 |
+typedef struct glusterd_add_dict_args {
|
|
|
50dc83 |
+ xlator_t *this;
|
|
|
50dc83 |
+ dict_t *voldict;
|
|
|
50dc83 |
+ int start;
|
|
|
50dc83 |
+ int end;
|
|
|
50dc83 |
+} glusterd_add_dict_args_t;
|
|
|
50dc83 |
+
|
|
|
50dc83 |
typedef enum gf_brick_status {
|
|
|
50dc83 |
GF_BRICK_STOPPED,
|
|
|
50dc83 |
GF_BRICK_STARTED,
|
|
|
50dc83 |
--
|
|
|
50dc83 |
1.8.3.1
|
|
|
50dc83 |
|