|
|
298366 |
From ba20326a937421c50a775907dc9ac726bb9a9b50 Mon Sep 17 00:00:00 2001
|
|
|
298366 |
From: Stefan Hajnoczi <stefanha@redhat.com>
|
|
|
298366 |
Date: Wed, 11 Sep 2013 16:42:35 +0200
|
|
|
298366 |
Subject: [PATCH] coroutine: add ./configure --disable-coroutine-pool
|
|
|
298366 |
|
|
|
298366 |
The 'gthread' coroutine backend was written before the freelist (aka
|
|
|
298366 |
pool) existed in qemu-coroutine.c.
|
|
|
298366 |
|
|
|
298366 |
This means that every thread is expected to exit when its coroutine
|
|
|
298366 |
terminates. It is not possible to reuse threads from a pool.
|
|
|
298366 |
|
|
|
298366 |
This patch automatically disables the pool when 'gthread' is used. This
|
|
|
298366 |
allows the 'gthread' backend to work again (for example,
|
|
|
298366 |
tests/test-coroutine completes successfully instead of hanging).
|
|
|
298366 |
|
|
|
298366 |
I considered implementing thread reuse but I don't want quirks like CPU
|
|
|
298366 |
affinity differences due to coroutine threads being recycled. The
|
|
|
298366 |
'gthread' backend is a reference backend and it's therefore okay to skip
|
|
|
298366 |
the pool optimization.
|
|
|
298366 |
|
|
|
298366 |
Note this patch also makes it easy to toggle the pool for benchmarking
|
|
|
298366 |
purposes:
|
|
|
298366 |
|
|
|
298366 |
./configure --with-coroutine-backend=ucontext \
|
|
|
298366 |
--disable-coroutine-pool
|
|
|
298366 |
|
|
|
298366 |
Reported-by: Gabriel Kerneis <gabriel@kerneis.info>
|
|
|
298366 |
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
|
|
|
298366 |
Reviewed-by: Gabriel Kerneis <gabriel@kerneis.info>
|
|
|
298366 |
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
|
|
|
298366 |
(cherry picked from commit 70c60c089fdc6bf8a79324e492c13e8c08d55942)
|
|
|
298366 |
|
|
|
298366 |
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
|
|
|
298366 |
---
|
|
|
298366 |
configure | 24 ++++++++++++++++++++++++
|
|
|
298366 |
qemu-coroutine.c | 34 +++++++++++++++++++---------------
|
|
|
298366 |
2 files changed, 43 insertions(+), 15 deletions(-)
|
|
|
298366 |
|
|
|
298366 |
diff --git a/configure b/configure
|
|
|
298366 |
index 18fa608..4cf672d 100755
|
|
|
298366 |
--- a/configure
|
|
|
298366 |
+++ b/configure
|
|
|
298366 |
@@ -235,6 +235,7 @@ guest_agent=""
|
|
|
298366 |
want_tools="yes"
|
|
|
298366 |
libiscsi=""
|
|
|
298366 |
coroutine=""
|
|
|
298366 |
+coroutine_pool=""
|
|
|
298366 |
seccomp=""
|
|
|
298366 |
glusterfs=""
|
|
|
298366 |
glusterfs_discard="no"
|
|
|
298366 |
@@ -871,6 +872,10 @@ for opt do
|
|
|
298366 |
;;
|
|
|
298366 |
--with-coroutine=*) coroutine="$optarg"
|
|
|
298366 |
;;
|
|
|
298366 |
+ --disable-coroutine-pool) coroutine_pool="no"
|
|
|
298366 |
+ ;;
|
|
|
298366 |
+ --enable-coroutine-pool) coroutine_pool="yes"
|
|
|
298366 |
+ ;;
|
|
|
298366 |
--disable-docs) docs="no"
|
|
|
298366 |
;;
|
|
|
298366 |
--enable-docs) docs="yes"
|
|
|
298366 |
@@ -1152,6 +1157,8 @@ echo " --disable-seccomp disable seccomp support"
|
|
|
298366 |
echo " --enable-seccomp enables seccomp support"
|
|
|
298366 |
echo " --with-coroutine=BACKEND coroutine backend. Supported options:"
|
|
|
298366 |
echo " gthread, ucontext, sigaltstack, windows"
|
|
|
298366 |
+echo " --disable-coroutine-pool disable coroutine freelist (worse performance)"
|
|
|
298366 |
+echo " --enable-coroutine-pool enable coroutine freelist (better performance)"
|
|
|
298366 |
echo " --enable-glusterfs enable GlusterFS backend"
|
|
|
298366 |
echo " --disable-glusterfs disable GlusterFS backend"
|
|
|
298366 |
echo " --enable-gcov enable test coverage analysis with gcov"
|
|
|
298366 |
@@ -3240,6 +3247,17 @@ else
|
|
|
298366 |
esac
|
|
|
298366 |
fi
|
|
|
298366 |
|
|
|
298366 |
+if test "$coroutine_pool" = ""; then
|
|
|
298366 |
+ if test "$coroutine" = "gthread"; then
|
|
|
298366 |
+ coroutine_pool=no
|
|
|
298366 |
+ else
|
|
|
298366 |
+ coroutine_pool=yes
|
|
|
298366 |
+ fi
|
|
|
298366 |
+fi
|
|
|
298366 |
+if test "$coroutine" = "gthread" -a "$coroutine_pool" = "yes"; then
|
|
|
298366 |
+ error_exit "'gthread' coroutine backend does not support pool (use --disable-coroutine-pool)"
|
|
|
298366 |
+fi
|
|
|
298366 |
+
|
|
|
298366 |
##########################################
|
|
|
298366 |
# check if we have open_by_handle_at
|
|
|
298366 |
|
|
|
298366 |
@@ -3605,6 +3623,7 @@ echo "libiscsi support $libiscsi"
|
|
|
298366 |
echo "build guest agent $guest_agent"
|
|
|
298366 |
echo "seccomp support $seccomp"
|
|
|
298366 |
echo "coroutine backend $coroutine"
|
|
|
298366 |
+echo "coroutine pool $coroutine_pool"
|
|
|
298366 |
echo "GlusterFS support $glusterfs"
|
|
|
298366 |
echo "virtio-blk-data-plane $virtio_blk_data_plane"
|
|
|
298366 |
echo "gcov $gcov_tool"
|
|
|
298366 |
@@ -3954,6 +3973,11 @@ if test "$rbd" = "yes" ; then
|
|
|
298366 |
fi
|
|
|
298366 |
|
|
|
298366 |
echo "CONFIG_COROUTINE_BACKEND=$coroutine" >> $config_host_mak
|
|
|
298366 |
+if test "$coroutine_pool" = "yes" ; then
|
|
|
298366 |
+ echo "CONFIG_COROUTINE_POOL=1" >> $config_host_mak
|
|
|
298366 |
+else
|
|
|
298366 |
+ echo "CONFIG_COROUTINE_POOL=0" >> $config_host_mak
|
|
|
298366 |
+fi
|
|
|
298366 |
|
|
|
298366 |
if test "$open_by_handle_at" = "yes" ; then
|
|
|
298366 |
echo "CONFIG_OPEN_BY_HANDLE=y" >> $config_host_mak
|
|
|
298366 |
diff --git a/qemu-coroutine.c b/qemu-coroutine.c
|
|
|
298366 |
index 423430d..4708521 100644
|
|
|
298366 |
--- a/qemu-coroutine.c
|
|
|
298366 |
+++ b/qemu-coroutine.c
|
|
|
298366 |
@@ -30,15 +30,17 @@ static unsigned int pool_size;
|
|
|
298366 |
|
|
|
298366 |
Coroutine *qemu_coroutine_create(CoroutineEntry *entry)
|
|
|
298366 |
{
|
|
|
298366 |
- Coroutine *co;
|
|
|
298366 |
-
|
|
|
298366 |
- qemu_mutex_lock(&pool_lock);
|
|
|
298366 |
- co = QSLIST_FIRST(&pool);
|
|
|
298366 |
- if (co) {
|
|
|
298366 |
- QSLIST_REMOVE_HEAD(&pool, pool_next);
|
|
|
298366 |
- pool_size--;
|
|
|
298366 |
+ Coroutine *co = NULL;
|
|
|
298366 |
+
|
|
|
298366 |
+ if (CONFIG_COROUTINE_POOL) {
|
|
|
298366 |
+ qemu_mutex_lock(&pool_lock);
|
|
|
298366 |
+ co = QSLIST_FIRST(&pool);
|
|
|
298366 |
+ if (co) {
|
|
|
298366 |
+ QSLIST_REMOVE_HEAD(&pool, pool_next);
|
|
|
298366 |
+ pool_size--;
|
|
|
298366 |
+ }
|
|
|
298366 |
+ qemu_mutex_unlock(&pool_lock);
|
|
|
298366 |
}
|
|
|
298366 |
- qemu_mutex_unlock(&pool_lock);
|
|
|
298366 |
|
|
|
298366 |
if (!co) {
|
|
|
298366 |
co = qemu_coroutine_new();
|
|
|
298366 |
@@ -51,15 +53,17 @@ Coroutine *qemu_coroutine_create(CoroutineEntry *entry)
|
|
|
298366 |
|
|
|
298366 |
static void coroutine_delete(Coroutine *co)
|
|
|
298366 |
{
|
|
|
298366 |
- qemu_mutex_lock(&pool_lock);
|
|
|
298366 |
- if (pool_size < POOL_MAX_SIZE) {
|
|
|
298366 |
- QSLIST_INSERT_HEAD(&pool, co, pool_next);
|
|
|
298366 |
- co->caller = NULL;
|
|
|
298366 |
- pool_size++;
|
|
|
298366 |
+ if (CONFIG_COROUTINE_POOL) {
|
|
|
298366 |
+ qemu_mutex_lock(&pool_lock);
|
|
|
298366 |
+ if (pool_size < POOL_MAX_SIZE) {
|
|
|
298366 |
+ QSLIST_INSERT_HEAD(&pool, co, pool_next);
|
|
|
298366 |
+ co->caller = NULL;
|
|
|
298366 |
+ pool_size++;
|
|
|
298366 |
+ qemu_mutex_unlock(&pool_lock);
|
|
|
298366 |
+ return;
|
|
|
298366 |
+ }
|
|
|
298366 |
qemu_mutex_unlock(&pool_lock);
|
|
|
298366 |
- return;
|
|
|
298366 |
}
|
|
|
298366 |
- qemu_mutex_unlock(&pool_lock);
|
|
|
298366 |
|
|
|
298366 |
qemu_coroutine_delete(co);
|
|
|
298366 |
}
|