|
|
887953 |
From fad234b5a62df48b7abc726549f2abb6b0af7c04 Mon Sep 17 00:00:00 2001
|
|
|
887953 |
From: Mohit Agrawal <moagrawa@redhat.com>
|
|
|
887953 |
Date: Tue, 16 Oct 2018 07:50:47 +0530
|
|
|
887953 |
Subject: [PATCH 402/404] core: glusterfsd keeping fd open in index xlator
|
|
|
887953 |
|
|
|
887953 |
Problem: At the time of processing GF_EVENT_PARENT_DOWN
|
|
|
887953 |
at brick xlator, it forwards the event to next xlator
|
|
|
887953 |
only while xlator ensures no stub is in progress.
|
|
|
887953 |
At io-thread xlator it decreases stub_cnt before the process
|
|
|
887953 |
a stub and notify EVENT to next xlator
|
|
|
887953 |
|
|
|
887953 |
Solution: Introduce a new counter to save stub_cnt and decrease
|
|
|
887953 |
the counter after process the stub completely at io-thread
|
|
|
887953 |
xlator.
|
|
|
887953 |
To avoid brick crash at the time of call xlator_mem_cleanup
|
|
|
887953 |
move only brick xlator if detach brick name has found in
|
|
|
887953 |
the graph
|
|
|
887953 |
|
|
|
887953 |
Note: Thanks to pranith for sharing a simple reproducer to
|
|
|
887953 |
reproduce the same
|
|
|
887953 |
|
|
|
887953 |
> fixes bz#1637934
|
|
|
887953 |
> Change-Id: I1a694a001f7a5417e8771e3adf92c518969b6baa
|
|
|
887953 |
> (Cherry pick from commit 7bf95631b52bd05b06122180f8bd4aa62c70b1a9)
|
|
|
887953 |
> (Reviewed on upstream link https://review.gluster.org/#/c/glusterfs/+/21379/)
|
|
|
887953 |
|
|
|
887953 |
Change-Id: I54b8ebb19819f9bbcbdd1448474ab084c0fd2eb6
|
|
|
887953 |
BUG: 1631372
|
|
|
887953 |
Signed-off-by: Mohit Agrawal <moagrawa@redhat.com>
|
|
|
887953 |
Reviewed-on: https://code.engineering.redhat.com/gerrit/152908
|
|
|
887953 |
Tested-by: RHGS Build Bot <nigelb@redhat.com>
|
|
|
887953 |
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
|
|
|
887953 |
---
|
|
|
887953 |
glusterfsd/src/glusterfsd-mgmt.c | 15 +----
|
|
|
887953 |
tests/bugs/glusterd/brick-mux-fd-cleanup.t | 78 +++++++++++++++++++++++++
|
|
|
887953 |
xlators/performance/io-threads/src/io-threads.c | 23 ++++----
|
|
|
887953 |
xlators/performance/io-threads/src/io-threads.h | 3 +-
|
|
|
887953 |
4 files changed, 94 insertions(+), 25 deletions(-)
|
|
|
887953 |
create mode 100644 tests/bugs/glusterd/brick-mux-fd-cleanup.t
|
|
|
887953 |
|
|
|
887953 |
diff --git a/glusterfsd/src/glusterfsd-mgmt.c b/glusterfsd/src/glusterfsd-mgmt.c
|
|
|
887953 |
index cbd436a..e3fceeb 100644
|
|
|
887953 |
--- a/glusterfsd/src/glusterfsd-mgmt.c
|
|
|
887953 |
+++ b/glusterfsd/src/glusterfsd-mgmt.c
|
|
|
887953 |
@@ -270,6 +270,7 @@ xlator_mem_cleanup (xlator_t *this) {
|
|
|
887953 |
top = glusterfsd_ctx->active->first;
|
|
|
887953 |
LOCK (&ctx->volfile_lock);
|
|
|
887953 |
/* TODO here we have leak for xlator node in a graph */
|
|
|
887953 |
+ /* Need to move only top xlator from a graph */
|
|
|
887953 |
for (trav_p = &top->children; *trav_p; trav_p = &(*trav_p)->next) {
|
|
|
887953 |
victim = (*trav_p)->xlator;
|
|
|
887953 |
if (victim->call_cleanup && !strcmp (victim->name, this->name)) {
|
|
|
887953 |
@@ -277,20 +278,6 @@ xlator_mem_cleanup (xlator_t *this) {
|
|
|
887953 |
break;
|
|
|
887953 |
}
|
|
|
887953 |
}
|
|
|
887953 |
- /* TODO Sometime brick xlator is not moved from graph so followed below
|
|
|
887953 |
- approach to move brick xlator from a graph, will move specific brick
|
|
|
887953 |
- xlator from graph only while inode table and mem_acct are cleaned up
|
|
|
887953 |
- */
|
|
|
887953 |
- trav_p = &top->children;
|
|
|
887953 |
- while (*trav_p) {
|
|
|
887953 |
- victim = (*trav_p)->xlator;
|
|
|
887953 |
- if (victim->call_cleanup && !victim->itable && !victim->mem_acct) {
|
|
|
887953 |
- (*trav_p) = (*trav_p)->next;
|
|
|
887953 |
- } else {
|
|
|
887953 |
- trav_p = &(*trav_p)->next;
|
|
|
887953 |
- }
|
|
|
887953 |
- }
|
|
|
887953 |
- UNLOCK (&ctx->volfile_lock);
|
|
|
887953 |
}
|
|
|
887953 |
}
|
|
|
887953 |
|
|
|
887953 |
diff --git a/tests/bugs/glusterd/brick-mux-fd-cleanup.t b/tests/bugs/glusterd/brick-mux-fd-cleanup.t
|
|
|
887953 |
new file mode 100644
|
|
|
887953 |
index 0000000..de11c17
|
|
|
887953 |
--- /dev/null
|
|
|
887953 |
+++ b/tests/bugs/glusterd/brick-mux-fd-cleanup.t
|
|
|
887953 |
@@ -0,0 +1,78 @@
|
|
|
887953 |
+#!/bin/bash
|
|
|
887953 |
+
|
|
|
887953 |
+. $(dirname $0)/../../include.rc
|
|
|
887953 |
+. $(dirname $0)/../../volume.rc
|
|
|
887953 |
+
|
|
|
887953 |
+#This .t tests that the fds from client are closed on brick when gluster volume
|
|
|
887953 |
+#stop is executed in brick-mux setup.
|
|
|
887953 |
+
|
|
|
887953 |
+cleanup;
|
|
|
887953 |
+TEST glusterd
|
|
|
887953 |
+TEST pidof glusterd
|
|
|
887953 |
+
|
|
|
887953 |
+function keep_fd_open {
|
|
|
887953 |
+#This function has to be run as background job because opening the fd in
|
|
|
887953 |
+#foreground and running commands is leading to flush calls on these fds
|
|
|
887953 |
+#which is making it very difficult to create the race where fds will be left
|
|
|
887953 |
+#open even after the brick dies.
|
|
|
887953 |
+ exec 5>$M1/a
|
|
|
887953 |
+ exec 6>$M1/b
|
|
|
887953 |
+ while [ -f $M0/a ]; do sleep 1; done
|
|
|
887953 |
+}
|
|
|
887953 |
+
|
|
|
887953 |
+function count_open_files {
|
|
|
887953 |
+ local brick_pid="$1"
|
|
|
887953 |
+ local pattern="$2"
|
|
|
887953 |
+ ls -l /proc/$brick_pid/fd | grep -i "$pattern" | wc -l
|
|
|
887953 |
+}
|
|
|
887953 |
+
|
|
|
887953 |
+TEST $CLI volume set all cluster.brick-multiplex on
|
|
|
887953 |
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
|
|
|
887953 |
+TEST $CLI volume create $V1 replica 2 $H0:$B0/${V1}{2,3}
|
|
|
887953 |
+#Have same configuration on both bricks so that they are multiplexed
|
|
|
887953 |
+#Delay flush fop for a second
|
|
|
887953 |
+TEST $CLI volume heal $V0 disable
|
|
|
887953 |
+TEST $CLI volume heal $V1 disable
|
|
|
887953 |
+TEST $CLI volume set $V0 delay-gen posix
|
|
|
887953 |
+TEST $CLI volume set $V0 delay-gen.enable flush
|
|
|
887953 |
+TEST $CLI volume set $V0 delay-gen.delay-percentage 100
|
|
|
887953 |
+TEST $CLI volume set $V0 delay-gen.delay-duration 1000000
|
|
|
887953 |
+TEST $CLI volume set $V1 delay-gen posix
|
|
|
887953 |
+TEST $CLI volume set $V1 delay-gen.enable flush
|
|
|
887953 |
+TEST $CLI volume set $V1 delay-gen.delay-percentage 100
|
|
|
887953 |
+TEST $CLI volume set $V1 delay-gen.delay-duration 1000000
|
|
|
887953 |
+
|
|
|
887953 |
+TEST $CLI volume start $V0
|
|
|
887953 |
+TEST $CLI volume start $V1
|
|
|
887953 |
+
|
|
|
887953 |
+TEST $GFS -s $H0 --volfile-id=$V0 --direct-io-mode=enable $M0
|
|
|
887953 |
+TEST $GFS -s $H0 --volfile-id=$V1 --direct-io-mode=enable $M1
|
|
|
887953 |
+
|
|
|
887953 |
+TEST touch $M0/a
|
|
|
887953 |
+keep_fd_open &
|
|
|
887953 |
+TEST $CLI volume profile $V1 start
|
|
|
887953 |
+brick_pid=$(get_brick_pid $V1 $H0 $B0/${V1}2)
|
|
|
887953 |
+TEST count_open_files $brick_pid "$B0/${V1}2/a"
|
|
|
887953 |
+TEST count_open_files $brick_pid "$B0/${V1}2/b"
|
|
|
887953 |
+TEST count_open_files $brick_pid "$B0/${V1}3/a"
|
|
|
887953 |
+TEST count_open_files $brick_pid "$B0/${V1}3/b"
|
|
|
887953 |
+
|
|
|
887953 |
+#If any other flush fops are introduced into the system other than the one at
|
|
|
887953 |
+#cleanup it interferes with the race, so test for it
|
|
|
887953 |
+EXPECT "^0$" echo "$($CLI volume profile $V1 info incremental | grep -i flush | wc -l)"
|
|
|
887953 |
+#Stop the volume
|
|
|
887953 |
+TEST $CLI volume stop $V1
|
|
|
887953 |
+
|
|
|
887953 |
+#Wait for cleanup resources or volume V1
|
|
|
887953 |
+EXPECT_WITHIN $GRAPH_SWITCH_TIMEOUT "^0$" count_open_files $brick_pid "$B0/${V1}2/a"
|
|
|
887953 |
+EXPECT_WITHIN $GRAPH_SWITCH_TIMEOUT "^0$" count_open_files $brick_pid "$B0/${V1}2/b"
|
|
|
887953 |
+EXPECT_WITHIN $GRAPH_SWITCH_TIMEOUT "^0$" count_open_files $brick_pid "$B0/${V1}3/a"
|
|
|
887953 |
+EXPECT_WITHIN $GRAPH_SWITCH_TIMEOUT "^0$" count_open_files $brick_pid "$B0/${V1}3/b"
|
|
|
887953 |
+
|
|
|
887953 |
+TEST rm -f $M0/a #Exit keep_fd_open()
|
|
|
887953 |
+wait
|
|
|
887953 |
+
|
|
|
887953 |
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
|
|
|
887953 |
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M1
|
|
|
887953 |
+
|
|
|
887953 |
+cleanup
|
|
|
887953 |
diff --git a/xlators/performance/io-threads/src/io-threads.c b/xlators/performance/io-threads/src/io-threads.c
|
|
|
887953 |
index 41d48ab..2944c7d 100644
|
|
|
887953 |
--- a/xlators/performance/io-threads/src/io-threads.c
|
|
|
887953 |
+++ b/xlators/performance/io-threads/src/io-threads.c
|
|
|
887953 |
@@ -120,7 +120,7 @@ __iot_dequeue (iot_conf_t *conf, int *pri)
|
|
|
887953 |
if (!stub)
|
|
|
887953 |
return NULL;
|
|
|
887953 |
|
|
|
887953 |
- GF_ATOMIC_DEC(conf->queue_size);
|
|
|
887953 |
+ conf->queue_size--;
|
|
|
887953 |
conf->queue_sizes[*pri]--;
|
|
|
887953 |
|
|
|
887953 |
return stub;
|
|
|
887953 |
@@ -153,7 +153,8 @@ __iot_enqueue (iot_conf_t *conf, call_stub_t *stub, int pri)
|
|
|
887953 |
}
|
|
|
887953 |
list_add_tail (&stub->list, &ctx->reqs);
|
|
|
887953 |
|
|
|
887953 |
- GF_ATOMIC_INC(conf->queue_size);
|
|
|
887953 |
+ conf->queue_size++;
|
|
|
887953 |
+ GF_ATOMIC_INC(conf->stub_cnt);
|
|
|
887953 |
conf->queue_sizes[pri]++;
|
|
|
887953 |
}
|
|
|
887953 |
|
|
|
887953 |
@@ -182,7 +183,7 @@ iot_worker (void *data)
|
|
|
887953 |
conf->ac_iot_count[pri]--;
|
|
|
887953 |
pri = -1;
|
|
|
887953 |
}
|
|
|
887953 |
- while (GF_ATOMIC_GET(conf->queue_size) == 0) {
|
|
|
887953 |
+ while (conf->queue_size == 0) {
|
|
|
887953 |
if (conf->down) {
|
|
|
887953 |
bye = _gf_true;/*Avoid sleep*/
|
|
|
887953 |
break;
|
|
|
887953 |
@@ -220,8 +221,10 @@ iot_worker (void *data)
|
|
|
887953 |
}
|
|
|
887953 |
pthread_mutex_unlock (&conf->mutex);
|
|
|
887953 |
|
|
|
887953 |
- if (stub) /* guard against spurious wakeups */
|
|
|
887953 |
+ if (stub) { /* guard against spurious wakeups */
|
|
|
887953 |
call_resume (stub);
|
|
|
887953 |
+ GF_ATOMIC_DEC(conf->stub_cnt);
|
|
|
887953 |
+ }
|
|
|
887953 |
stub = NULL;
|
|
|
887953 |
|
|
|
887953 |
if (bye)
|
|
|
887953 |
@@ -816,7 +819,7 @@ __iot_workers_scale (iot_conf_t *conf)
|
|
|
887953 |
gf_msg_debug (conf->this->name, 0,
|
|
|
887953 |
"scaled threads to %d (queue_size=%d/%d)",
|
|
|
887953 |
conf->curr_count,
|
|
|
887953 |
- GF_ATOMIC_GET(conf->queue_size), scale);
|
|
|
887953 |
+ conf->queue_size, scale);
|
|
|
887953 |
} else {
|
|
|
887953 |
break;
|
|
|
887953 |
}
|
|
|
887953 |
@@ -1030,7 +1033,7 @@ init (xlator_t *this)
|
|
|
887953 |
bool, out);
|
|
|
887953 |
|
|
|
887953 |
conf->this = this;
|
|
|
887953 |
- GF_ATOMIC_INIT(conf->queue_size, 0);
|
|
|
887953 |
+ GF_ATOMIC_INIT(conf->stub_cnt, 0);
|
|
|
887953 |
|
|
|
887953 |
for (i = 0; i < IOT_PRI_MAX; i++) {
|
|
|
887953 |
INIT_LIST_HEAD (&conf->clients[i]);
|
|
|
887953 |
@@ -1075,7 +1078,7 @@ notify (xlator_t *this, int32_t event, void *data, ...)
|
|
|
887953 |
{
|
|
|
887953 |
iot_conf_t *conf = this->private;
|
|
|
887953 |
xlator_t *victim = data;
|
|
|
887953 |
- uint64_t queue_size = 0;
|
|
|
887953 |
+ uint64_t stub_cnt = 0;
|
|
|
887953 |
struct timespec sleep_till = {0, };
|
|
|
887953 |
|
|
|
887953 |
if (GF_EVENT_PARENT_DOWN == event) {
|
|
|
887953 |
@@ -1083,14 +1086,14 @@ notify (xlator_t *this, int32_t event, void *data, ...)
|
|
|
887953 |
clock_gettime(CLOCK_REALTIME, &sleep_till);
|
|
|
887953 |
sleep_till.tv_sec += 1;
|
|
|
887953 |
/* Wait for draining stub from queue before notify PARENT_DOWN */
|
|
|
887953 |
- queue_size = GF_ATOMIC_GET(conf->queue_size);
|
|
|
887953 |
+ stub_cnt = GF_ATOMIC_GET(conf->stub_cnt);
|
|
|
887953 |
|
|
|
887953 |
pthread_mutex_lock(&conf->mutex);
|
|
|
887953 |
{
|
|
|
887953 |
- while (queue_size) {
|
|
|
887953 |
+ while (stub_cnt) {
|
|
|
887953 |
(void)pthread_cond_timedwait(&conf->cond, &conf->mutex,
|
|
|
887953 |
&sleep_till);
|
|
|
887953 |
- queue_size = GF_ATOMIC_GET(conf->queue_size);
|
|
|
887953 |
+ stub_cnt = GF_ATOMIC_GET(conf->stub_cnt);
|
|
|
887953 |
}
|
|
|
887953 |
}
|
|
|
887953 |
pthread_mutex_unlock(&conf->mutex);
|
|
|
887953 |
diff --git a/xlators/performance/io-threads/src/io-threads.h b/xlators/performance/io-threads/src/io-threads.h
|
|
|
887953 |
index 7a6973c..57a136e 100644
|
|
|
887953 |
--- a/xlators/performance/io-threads/src/io-threads.h
|
|
|
887953 |
+++ b/xlators/performance/io-threads/src/io-threads.h
|
|
|
887953 |
@@ -75,7 +75,8 @@ struct iot_conf {
|
|
|
887953 |
int32_t ac_iot_limit[IOT_PRI_MAX];
|
|
|
887953 |
int32_t ac_iot_count[IOT_PRI_MAX];
|
|
|
887953 |
int queue_sizes[IOT_PRI_MAX];
|
|
|
887953 |
- gf_atomic_t queue_size;
|
|
|
887953 |
+ int32_t queue_size;
|
|
|
887953 |
+ gf_atomic_t stub_cnt;
|
|
|
887953 |
pthread_attr_t w_attr;
|
|
|
887953 |
gf_boolean_t least_priority; /*Enable/Disable least-priority */
|
|
|
887953 |
|
|
|
887953 |
--
|
|
|
887953 |
1.8.3.1
|
|
|
887953 |
|