Blame SOURCES/kvm-job-Move-coroutine-and-related-code-to-Job.patch

1bdc94
From 0a25884271e9d6a99f0775c3b0c631d006ba2fc2 Mon Sep 17 00:00:00 2001
1bdc94
From: Kevin Wolf <kwolf@redhat.com>
1bdc94
Date: Tue, 26 Jun 2018 09:48:09 +0200
1bdc94
Subject: [PATCH 40/89] job: Move coroutine and related code to Job
1bdc94
1bdc94
RH-Author: Kevin Wolf <kwolf@redhat.com>
1bdc94
Message-id: <20180626094856.6924-27-kwolf@redhat.com>
1bdc94
Patchwork-id: 81113
1bdc94
O-Subject: [RHV-7.6 qemu-kvm-rhev PATCH v2 26/73] job: Move coroutine and related code to Job
1bdc94
Bugzilla: 1513543
1bdc94
RH-Acked-by: Jeffrey Cody <jcody@redhat.com>
1bdc94
RH-Acked-by: Max Reitz <mreitz@redhat.com>
1bdc94
RH-Acked-by: Fam Zheng <famz@redhat.com>
1bdc94
1bdc94
This commit moves some core functions for dealing with the job coroutine
1bdc94
from BlockJob to Job. This includes primarily entering the coroutine
1bdc94
(both for the first and reentering) and yielding explicitly and at pause
1bdc94
points.
1bdc94
1bdc94
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
1bdc94
Reviewed-by: John Snow <jsnow@redhat.com>
1bdc94
(cherry picked from commit da01ff7f38f52791f93fc3ca59afcfbb220f15af)
1bdc94
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
1bdc94
Signed-off-by: Miroslav Rezanina <mrezanin@redhat.com>
1bdc94
---
1bdc94
 block/backup.c               |   2 +-
1bdc94
 block/commit.c               |   4 +-
1bdc94
 block/mirror.c               |  22 ++---
1bdc94
 block/replication.c          |   2 +-
1bdc94
 block/stream.c               |   4 +-
1bdc94
 blockdev.c                   |   8 +-
1bdc94
 blockjob.c                   | 219 ++++++++-----------------------------------
1bdc94
 include/block/blockjob.h     |  40 --------
1bdc94
 include/block/blockjob_int.h |  26 -----
1bdc94
 include/qemu/job.h           |  76 +++++++++++++++
1bdc94
 job.c                        | 137 +++++++++++++++++++++++++++
1bdc94
 tests/test-bdrv-drain.c      |  38 ++++----
1bdc94
 tests/test-blockjob-txn.c    |  12 +--
1bdc94
 tests/test-blockjob.c        |  14 +--
1bdc94
 14 files changed, 305 insertions(+), 299 deletions(-)
1bdc94
1bdc94
diff --git a/block/backup.c b/block/backup.c
1bdc94
index 22dd368..7d9aad9 100644
1bdc94
--- a/block/backup.c
1bdc94
+++ b/block/backup.c
1bdc94
@@ -528,8 +528,8 @@ static const BlockJobDriver backup_job_driver = {
1bdc94
         .instance_size          = sizeof(BackupBlockJob),
1bdc94
         .job_type               = JOB_TYPE_BACKUP,
1bdc94
         .free                   = block_job_free,
1bdc94
+        .start                  = backup_run,
1bdc94
     },
1bdc94
-    .start                  = backup_run,
1bdc94
     .commit                 = backup_commit,
1bdc94
     .abort                  = backup_abort,
1bdc94
     .clean                  = backup_clean,
1bdc94
diff --git a/block/commit.c b/block/commit.c
1bdc94
index d326766..2fbc310 100644
1bdc94
--- a/block/commit.c
1bdc94
+++ b/block/commit.c
1bdc94
@@ -220,8 +220,8 @@ static const BlockJobDriver commit_job_driver = {
1bdc94
         .instance_size = sizeof(CommitBlockJob),
1bdc94
         .job_type      = JOB_TYPE_COMMIT,
1bdc94
         .free          = block_job_free,
1bdc94
+        .start         = commit_run,
1bdc94
     },
1bdc94
-    .start         = commit_run,
1bdc94
 };
1bdc94
 
1bdc94
 static int coroutine_fn bdrv_commit_top_preadv(BlockDriverState *bs,
1bdc94
@@ -371,7 +371,7 @@ void commit_start(const char *job_id, BlockDriverState *bs,
1bdc94
     s->on_error = on_error;
1bdc94
 
1bdc94
     trace_commit_start(bs, base, top, s);
1bdc94
-    block_job_start(&s->common);
1bdc94
+    job_start(&s->common.job);
1bdc94
     return;
1bdc94
 
1bdc94
 fail:
1bdc94
diff --git a/block/mirror.c b/block/mirror.c
1bdc94
index 90d4ac9..95fc807 100644
1bdc94
--- a/block/mirror.c
1bdc94
+++ b/block/mirror.c
1bdc94
@@ -126,7 +126,7 @@ static void mirror_iteration_done(MirrorOp *op, int ret)
1bdc94
     g_free(op);
1bdc94
 
1bdc94
     if (s->waiting_for_io) {
1bdc94
-        qemu_coroutine_enter(s->common.co);
1bdc94
+        qemu_coroutine_enter(s->common.job.co);
1bdc94
     }
1bdc94
 }
1bdc94
 
1bdc94
@@ -345,7 +345,7 @@ static uint64_t coroutine_fn mirror_iteration(MirrorBlockJob *s)
1bdc94
         mirror_wait_for_io(s);
1bdc94
     }
1bdc94
 
1bdc94
-    block_job_pause_point(&s->common);
1bdc94
+    job_pause_point(&s->common.job);
1bdc94
 
1bdc94
     /* Find the number of consective dirty chunks following the first dirty
1bdc94
      * one, and wait for in flight requests in them. */
1bdc94
@@ -597,7 +597,7 @@ static void mirror_throttle(MirrorBlockJob *s)
1bdc94
         s->last_pause_ns = now;
1bdc94
         block_job_sleep_ns(&s->common, 0);
1bdc94
     } else {
1bdc94
-        block_job_pause_point(&s->common);
1bdc94
+        job_pause_point(&s->common.job);
1bdc94
     }
1bdc94
 }
1bdc94
 
1bdc94
@@ -786,7 +786,7 @@ static void coroutine_fn mirror_run(void *opaque)
1bdc94
             goto immediate_exit;
1bdc94
         }
1bdc94
 
1bdc94
-        block_job_pause_point(&s->common);
1bdc94
+        job_pause_point(&s->common.job);
1bdc94
 
1bdc94
         cnt = bdrv_get_dirty_count(s->dirty_bitmap);
1bdc94
         /* cnt is the number of dirty bytes remaining and s->bytes_in_flight is
1bdc94
@@ -957,9 +957,9 @@ static void mirror_complete(BlockJob *job, Error **errp)
1bdc94
     block_job_enter(&s->common);
1bdc94
 }
1bdc94
 
1bdc94
-static void mirror_pause(BlockJob *job)
1bdc94
+static void mirror_pause(Job *job)
1bdc94
 {
1bdc94
-    MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
1bdc94
+    MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job);
1bdc94
 
1bdc94
     mirror_wait_for_all_io(s);
1bdc94
 }
1bdc94
@@ -991,10 +991,10 @@ static const BlockJobDriver mirror_job_driver = {
1bdc94
         .instance_size          = sizeof(MirrorBlockJob),
1bdc94
         .job_type               = JOB_TYPE_MIRROR,
1bdc94
         .free                   = block_job_free,
1bdc94
+        .start                  = mirror_run,
1bdc94
+        .pause                  = mirror_pause,
1bdc94
     },
1bdc94
-    .start                  = mirror_run,
1bdc94
     .complete               = mirror_complete,
1bdc94
-    .pause                  = mirror_pause,
1bdc94
     .attached_aio_context   = mirror_attached_aio_context,
1bdc94
     .drain                  = mirror_drain,
1bdc94
 };
1bdc94
@@ -1004,10 +1004,10 @@ static const BlockJobDriver commit_active_job_driver = {
1bdc94
         .instance_size          = sizeof(MirrorBlockJob),
1bdc94
         .job_type               = JOB_TYPE_COMMIT,
1bdc94
         .free                   = block_job_free,
1bdc94
+        .start                  = mirror_run,
1bdc94
+        .pause                  = mirror_pause,
1bdc94
     },
1bdc94
-    .start                  = mirror_run,
1bdc94
     .complete               = mirror_complete,
1bdc94
-    .pause                  = mirror_pause,
1bdc94
     .attached_aio_context   = mirror_attached_aio_context,
1bdc94
     .drain                  = mirror_drain,
1bdc94
 };
1bdc94
@@ -1244,7 +1244,7 @@ static void mirror_start_job(const char *job_id, BlockDriverState *bs,
1bdc94
     }
1bdc94
 
1bdc94
     trace_mirror_start(bs, s, opaque);
1bdc94
-    block_job_start(&s->common);
1bdc94
+    job_start(&s->common.job);
1bdc94
     return;
1bdc94
 
1bdc94
 fail:
1bdc94
diff --git a/block/replication.c b/block/replication.c
1bdc94
index 6c0c718..3f7500e 100644
1bdc94
--- a/block/replication.c
1bdc94
+++ b/block/replication.c
1bdc94
@@ -574,7 +574,7 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode,
1bdc94
             aio_context_release(aio_context);
1bdc94
             return;
1bdc94
         }
1bdc94
-        block_job_start(job);
1bdc94
+        job_start(&job->job);
1bdc94
         break;
1bdc94
     default:
1bdc94
         aio_context_release(aio_context);
1bdc94
diff --git a/block/stream.c b/block/stream.c
1bdc94
index 0bba816..6d8b7b6 100644
1bdc94
--- a/block/stream.c
1bdc94
+++ b/block/stream.c
1bdc94
@@ -213,8 +213,8 @@ static const BlockJobDriver stream_job_driver = {
1bdc94
         .instance_size = sizeof(StreamBlockJob),
1bdc94
         .job_type      = JOB_TYPE_STREAM,
1bdc94
         .free          = block_job_free,
1bdc94
+        .start         = stream_run,
1bdc94
     },
1bdc94
-    .start         = stream_run,
1bdc94
 };
1bdc94
 
1bdc94
 void stream_start(const char *job_id, BlockDriverState *bs,
1bdc94
@@ -262,7 +262,7 @@ void stream_start(const char *job_id, BlockDriverState *bs,
1bdc94
 
1bdc94
     s->on_error = on_error;
1bdc94
     trace_stream_start(bs, base, s);
1bdc94
-    block_job_start(&s->common);
1bdc94
+    job_start(&s->common.job);
1bdc94
     return;
1bdc94
 
1bdc94
 fail:
1bdc94
diff --git a/blockdev.c b/blockdev.c
1bdc94
index 96a89cc..efb83c4 100644
1bdc94
--- a/blockdev.c
1bdc94
+++ b/blockdev.c
1bdc94
@@ -1952,7 +1952,7 @@ static void drive_backup_commit(BlkActionState *common)
1bdc94
     aio_context_acquire(aio_context);
1bdc94
 
1bdc94
     assert(state->job);
1bdc94
-    block_job_start(state->job);
1bdc94
+    job_start(&state->job->job);
1bdc94
 
1bdc94
     aio_context_release(aio_context);
1bdc94
 }
1bdc94
@@ -2050,7 +2050,7 @@ static void blockdev_backup_commit(BlkActionState *common)
1bdc94
     aio_context_acquire(aio_context);
1bdc94
 
1bdc94
     assert(state->job);
1bdc94
-    block_job_start(state->job);
1bdc94
+    job_start(&state->job->job);
1bdc94
 
1bdc94
     aio_context_release(aio_context);
1bdc94
 }
1bdc94
@@ -3472,7 +3472,7 @@ void qmp_drive_backup(DriveBackup *arg, Error **errp)
1bdc94
     BlockJob *job;
1bdc94
     job = do_drive_backup(arg, NULL, errp);
1bdc94
     if (job) {
1bdc94
-        block_job_start(job);
1bdc94
+        job_start(&job->job);
1bdc94
     }
1bdc94
 }
1bdc94
 
1bdc94
@@ -3560,7 +3560,7 @@ void qmp_blockdev_backup(BlockdevBackup *arg, Error **errp)
1bdc94
     BlockJob *job;
1bdc94
     job = do_blockdev_backup(arg, NULL, errp);
1bdc94
     if (job) {
1bdc94
-        block_job_start(job);
1bdc94
+        job_start(&job->job);
1bdc94
     }
1bdc94
 }
1bdc94
 
1bdc94
diff --git a/blockjob.c b/blockjob.c
1bdc94
index 3ede511..313b1ff 100644
1bdc94
--- a/blockjob.c
1bdc94
+++ b/blockjob.c
1bdc94
@@ -36,30 +36,9 @@
1bdc94
 #include "qemu/coroutine.h"
1bdc94
 #include "qemu/timer.h"
1bdc94
 
1bdc94
-/* Right now, this mutex is only needed to synchronize accesses to job->busy
1bdc94
- * and job->sleep_timer, such as concurrent calls to block_job_do_yield and
1bdc94
- * block_job_enter. */
1bdc94
-static QemuMutex block_job_mutex;
1bdc94
-
1bdc94
-static void block_job_lock(void)
1bdc94
-{
1bdc94
-    qemu_mutex_lock(&block_job_mutex);
1bdc94
-}
1bdc94
-
1bdc94
-static void block_job_unlock(void)
1bdc94
-{
1bdc94
-    qemu_mutex_unlock(&block_job_mutex);
1bdc94
-}
1bdc94
-
1bdc94
-static void __attribute__((__constructor__)) block_job_init(void)
1bdc94
-{
1bdc94
-    qemu_mutex_init(&block_job_mutex);
1bdc94
-}
1bdc94
-
1bdc94
 static void block_job_event_cancelled(BlockJob *job);
1bdc94
 static void block_job_event_completed(BlockJob *job, const char *msg);
1bdc94
 static int block_job_event_pending(BlockJob *job);
1bdc94
-static void block_job_enter_cond(BlockJob *job, bool(*fn)(BlockJob *job));
1bdc94
 
1bdc94
 /* Transactional group of block jobs */
1bdc94
 struct BlockJobTxn {
1bdc94
@@ -161,33 +140,27 @@ static void block_job_txn_del_job(BlockJob *job)
1bdc94
     }
1bdc94
 }
1bdc94
 
1bdc94
-/* Assumes the block_job_mutex is held */
1bdc94
-static bool block_job_timer_pending(BlockJob *job)
1bdc94
-{
1bdc94
-    return timer_pending(&job->sleep_timer);
1bdc94
-}
1bdc94
-
1bdc94
-/* Assumes the block_job_mutex is held */
1bdc94
-static bool block_job_timer_not_pending(BlockJob *job)
1bdc94
+/* Assumes the job_mutex is held */
1bdc94
+static bool job_timer_not_pending(Job *job)
1bdc94
 {
1bdc94
-    return !block_job_timer_pending(job);
1bdc94
+    return !timer_pending(&job->sleep_timer);
1bdc94
 }
1bdc94
 
1bdc94
 static void block_job_pause(BlockJob *job)
1bdc94
 {
1bdc94
-    job->pause_count++;
1bdc94
+    job->job.pause_count++;
1bdc94
 }
1bdc94
 
1bdc94
 static void block_job_resume(BlockJob *job)
1bdc94
 {
1bdc94
-    assert(job->pause_count > 0);
1bdc94
-    job->pause_count--;
1bdc94
-    if (job->pause_count) {
1bdc94
+    assert(job->job.pause_count > 0);
1bdc94
+    job->job.pause_count--;
1bdc94
+    if (job->job.pause_count) {
1bdc94
         return;
1bdc94
     }
1bdc94
 
1bdc94
     /* kick only if no timer is pending */
1bdc94
-    block_job_enter_cond(job, block_job_timer_not_pending);
1bdc94
+    job_enter_cond(&job->job, job_timer_not_pending);
1bdc94
 }
1bdc94
 
1bdc94
 static void block_job_attached_aio_context(AioContext *new_context,
1bdc94
@@ -208,7 +181,7 @@ void block_job_free(Job *job)
1bdc94
                                     block_job_detach_aio_context, bjob);
1bdc94
     blk_unref(bjob->blk);
1bdc94
     error_free(bjob->blocker);
1bdc94
-    assert(!timer_pending(&bjob->sleep_timer));
1bdc94
+    assert(!timer_pending(&bjob->job.sleep_timer));
1bdc94
 }
1bdc94
 
1bdc94
 static void block_job_attached_aio_context(AioContext *new_context,
1bdc94
@@ -226,7 +199,7 @@ static void block_job_attached_aio_context(AioContext *new_context,
1bdc94
 
1bdc94
 static void block_job_drain(BlockJob *job)
1bdc94
 {
1bdc94
-    /* If job is !job->busy this kicks it into the next pause point. */
1bdc94
+    /* If job is !job->job.busy this kicks it into the next pause point. */
1bdc94
     block_job_enter(job);
1bdc94
 
1bdc94
     blk_drain(job->blk);
1bdc94
@@ -244,7 +217,7 @@ static void block_job_detach_aio_context(void *opaque)
1bdc94
 
1bdc94
     block_job_pause(job);
1bdc94
 
1bdc94
-    while (!job->paused && !job->completed) {
1bdc94
+    while (!job->job.paused && !job->completed) {
1bdc94
         block_job_drain(job);
1bdc94
     }
1bdc94
 
1bdc94
@@ -312,29 +285,11 @@ bool block_job_is_internal(BlockJob *job)
1bdc94
     return (job->job.id == NULL);
1bdc94
 }
1bdc94
 
1bdc94
-static bool block_job_started(BlockJob *job)
1bdc94
-{
1bdc94
-    return job->co;
1bdc94
-}
1bdc94
-
1bdc94
 const BlockJobDriver *block_job_driver(BlockJob *job)
1bdc94
 {
1bdc94
     return job->driver;
1bdc94
 }
1bdc94
 
1bdc94
-/**
1bdc94
- * All jobs must allow a pause point before entering their job proper. This
1bdc94
- * ensures that jobs can be paused prior to being started, then resumed later.
1bdc94
- */
1bdc94
-static void coroutine_fn block_job_co_entry(void *opaque)
1bdc94
-{
1bdc94
-    BlockJob *job = opaque;
1bdc94
-
1bdc94
-    assert(job && job->driver && job->driver->start);
1bdc94
-    block_job_pause_point(job);
1bdc94
-    job->driver->start(job);
1bdc94
-}
1bdc94
-
1bdc94
 static void block_job_sleep_timer_cb(void *opaque)
1bdc94
 {
1bdc94
     BlockJob *job = opaque;
1bdc94
@@ -342,24 +297,12 @@ static void block_job_sleep_timer_cb(void *opaque)
1bdc94
     block_job_enter(job);
1bdc94
 }
1bdc94
 
1bdc94
-void block_job_start(BlockJob *job)
1bdc94
-{
1bdc94
-    assert(job && !block_job_started(job) && job->paused &&
1bdc94
-           job->driver && job->driver->start);
1bdc94
-    job->co = qemu_coroutine_create(block_job_co_entry, job);
1bdc94
-    job->pause_count--;
1bdc94
-    job->busy = true;
1bdc94
-    job->paused = false;
1bdc94
-    job_state_transition(&job->job, JOB_STATUS_RUNNING);
1bdc94
-    bdrv_coroutine_enter(blk_bs(job->blk), job->co);
1bdc94
-}
1bdc94
-
1bdc94
 static void block_job_decommission(BlockJob *job)
1bdc94
 {
1bdc94
     assert(job);
1bdc94
     job->completed = true;
1bdc94
-    job->busy = false;
1bdc94
-    job->paused = false;
1bdc94
+    job->job.busy = false;
1bdc94
+    job->job.paused = false;
1bdc94
     job->job.deferred_to_main_loop = true;
1bdc94
     block_job_txn_del_job(job);
1bdc94
     job_state_transition(&job->job, JOB_STATUS_NULL);
1bdc94
@@ -374,7 +317,7 @@ static void block_job_do_dismiss(BlockJob *job)
1bdc94
 static void block_job_conclude(BlockJob *job)
1bdc94
 {
1bdc94
     job_state_transition(&job->job, JOB_STATUS_CONCLUDED);
1bdc94
-    if (job->auto_dismiss || !block_job_started(job)) {
1bdc94
+    if (job->auto_dismiss || !job_started(&job->job)) {
1bdc94
         block_job_do_dismiss(job);
1bdc94
     }
1bdc94
 }
1bdc94
@@ -439,7 +382,7 @@ static int block_job_finalize_single(BlockJob *job)
1bdc94
     }
1bdc94
 
1bdc94
     /* Emit events only if we actually started */
1bdc94
-    if (block_job_started(job)) {
1bdc94
+    if (job_started(&job->job)) {
1bdc94
         if (job_is_cancelled(&job->job)) {
1bdc94
             block_job_event_cancelled(job);
1bdc94
         } else {
1bdc94
@@ -464,7 +407,7 @@ static void block_job_cancel_async(BlockJob *job, bool force)
1bdc94
     if (job->user_paused) {
1bdc94
         /* Do not call block_job_enter here, the caller will handle it.  */
1bdc94
         job->user_paused = false;
1bdc94
-        job->pause_count--;
1bdc94
+        job->job.pause_count--;
1bdc94
     }
1bdc94
     job->job.cancelled = true;
1bdc94
     /* To prevent 'force == false' overriding a previous 'force == true' */
1bdc94
@@ -615,6 +558,12 @@ static void block_job_completed_txn_success(BlockJob *job)
1bdc94
     }
1bdc94
 }
1bdc94
 
1bdc94
+/* Assumes the job_mutex is held */
1bdc94
+static bool job_timer_pending(Job *job)
1bdc94
+{
1bdc94
+    return timer_pending(&job->sleep_timer);
1bdc94
+}
1bdc94
+
1bdc94
 void block_job_set_speed(BlockJob *job, int64_t speed, Error **errp)
1bdc94
 {
1bdc94
     int64_t old_speed = job->speed;
1bdc94
@@ -635,7 +584,7 @@ void block_job_set_speed(BlockJob *job, int64_t speed, Error **errp)
1bdc94
     }
1bdc94
 
1bdc94
     /* kick only if a timer is pending */
1bdc94
-    block_job_enter_cond(job, block_job_timer_pending);
1bdc94
+    job_enter_cond(&job->job, job_timer_pending);
1bdc94
 }
1bdc94
 
1bdc94
 int64_t block_job_ratelimit_get_delay(BlockJob *job, uint64_t n)
1bdc94
@@ -654,7 +603,7 @@ void block_job_complete(BlockJob *job, Error **errp)
1bdc94
     if (job_apply_verb(&job->job, JOB_VERB_COMPLETE, errp)) {
1bdc94
         return;
1bdc94
     }
1bdc94
-    if (job->pause_count || job_is_cancelled(&job->job) ||
1bdc94
+    if (job->job.pause_count || job_is_cancelled(&job->job) ||
1bdc94
         !job->driver->complete)
1bdc94
     {
1bdc94
         error_setg(errp, "The active block job '%s' cannot be completed",
1bdc94
@@ -708,7 +657,7 @@ bool block_job_user_paused(BlockJob *job)
1bdc94
 void block_job_user_resume(BlockJob *job, Error **errp)
1bdc94
 {
1bdc94
     assert(job);
1bdc94
-    if (!job->user_paused || job->pause_count <= 0) {
1bdc94
+    if (!job->user_paused || job->job.pause_count <= 0) {
1bdc94
         error_setg(errp, "Can't resume a job that was not paused");
1bdc94
         return;
1bdc94
     }
1bdc94
@@ -727,7 +676,7 @@ void block_job_cancel(BlockJob *job, bool force)
1bdc94
         return;
1bdc94
     }
1bdc94
     block_job_cancel_async(job, force);
1bdc94
-    if (!block_job_started(job)) {
1bdc94
+    if (!job_started(&job->job)) {
1bdc94
         block_job_completed(job, -ECANCELED);
1bdc94
     } else if (job->job.deferred_to_main_loop) {
1bdc94
         block_job_completed_txn_abort(job);
1bdc94
@@ -797,8 +746,8 @@ BlockJobInfo *block_job_query(BlockJob *job, Error **errp)
1bdc94
     info->type      = g_strdup(job_type_str(&job->job));
1bdc94
     info->device    = g_strdup(job->job.id);
1bdc94
     info->len       = job->len;
1bdc94
-    info->busy      = atomic_read(&job->busy);
1bdc94
-    info->paused    = job->pause_count > 0;
1bdc94
+    info->busy      = atomic_read(&job->job.busy);
1bdc94
+    info->paused    = job->job.pause_count > 0;
1bdc94
     info->offset    = job->offset;
1bdc94
     info->speed     = job->speed;
1bdc94
     info->io_status = job->iostatus;
1bdc94
@@ -915,12 +864,9 @@ void *block_job_create(const char *job_id, const BlockJobDriver *driver,
1bdc94
     job->blk           = blk;
1bdc94
     job->cb            = cb;
1bdc94
     job->opaque        = opaque;
1bdc94
-    job->busy          = false;
1bdc94
-    job->paused        = true;
1bdc94
-    job->pause_count   = 1;
1bdc94
     job->auto_finalize = !(flags & BLOCK_JOB_MANUAL_FINALIZE);
1bdc94
     job->auto_dismiss  = !(flags & BLOCK_JOB_MANUAL_DISMISS);
1bdc94
-    aio_timer_init(qemu_get_aio_context(), &job->sleep_timer,
1bdc94
+    aio_timer_init(qemu_get_aio_context(), &job->job.sleep_timer,
1bdc94
                    QEMU_CLOCK_REALTIME, SCALE_NS,
1bdc94
                    block_job_sleep_timer_cb, job);
1bdc94
 
1bdc94
@@ -980,128 +926,41 @@ void block_job_completed(BlockJob *job, int ret)
1bdc94
     }
1bdc94
 }
1bdc94
 
1bdc94
-static bool block_job_should_pause(BlockJob *job)
1bdc94
-{
1bdc94
-    return job->pause_count > 0;
1bdc94
-}
1bdc94
-
1bdc94
-/* Yield, and schedule a timer to reenter the coroutine after @ns nanoseconds.
1bdc94
- * Reentering the job coroutine with block_job_enter() before the timer has
1bdc94
- * expired is allowed and cancels the timer.
1bdc94
- *
1bdc94
- * If @ns is (uint64_t) -1, no timer is scheduled and block_job_enter() must be
1bdc94
- * called explicitly. */
1bdc94
-static void block_job_do_yield(BlockJob *job, uint64_t ns)
1bdc94
-{
1bdc94
-    block_job_lock();
1bdc94
-    if (ns != -1) {
1bdc94
-        timer_mod(&job->sleep_timer, ns);
1bdc94
-    }
1bdc94
-    job->busy = false;
1bdc94
-    block_job_unlock();
1bdc94
-    qemu_coroutine_yield();
1bdc94
-
1bdc94
-    /* Set by block_job_enter before re-entering the coroutine.  */
1bdc94
-    assert(job->busy);
1bdc94
-}
1bdc94
-
1bdc94
-void coroutine_fn block_job_pause_point(BlockJob *job)
1bdc94
-{
1bdc94
-    assert(job && block_job_started(job));
1bdc94
-
1bdc94
-    if (!block_job_should_pause(job)) {
1bdc94
-        return;
1bdc94
-    }
1bdc94
-    if (job_is_cancelled(&job->job)) {
1bdc94
-        return;
1bdc94
-    }
1bdc94
-
1bdc94
-    if (job->driver->pause) {
1bdc94
-        job->driver->pause(job);
1bdc94
-    }
1bdc94
-
1bdc94
-    if (block_job_should_pause(job) && !job_is_cancelled(&job->job)) {
1bdc94
-        JobStatus status = job->job.status;
1bdc94
-        job_state_transition(&job->job, status == JOB_STATUS_READY
1bdc94
-                                        ? JOB_STATUS_STANDBY
1bdc94
-                                        : JOB_STATUS_PAUSED);
1bdc94
-        job->paused = true;
1bdc94
-        block_job_do_yield(job, -1);
1bdc94
-        job->paused = false;
1bdc94
-        job_state_transition(&job->job, status);
1bdc94
-    }
1bdc94
-
1bdc94
-    if (job->driver->resume) {
1bdc94
-        job->driver->resume(job);
1bdc94
-    }
1bdc94
-}
1bdc94
-
1bdc94
-/*
1bdc94
- * Conditionally enter a block_job pending a call to fn() while
1bdc94
- * under the block_job_lock critical section.
1bdc94
- */
1bdc94
-static void block_job_enter_cond(BlockJob *job, bool(*fn)(BlockJob *job))
1bdc94
-{
1bdc94
-    if (!block_job_started(job)) {
1bdc94
-        return;
1bdc94
-    }
1bdc94
-    if (job->job.deferred_to_main_loop) {
1bdc94
-        return;
1bdc94
-    }
1bdc94
-
1bdc94
-    block_job_lock();
1bdc94
-    if (job->busy) {
1bdc94
-        block_job_unlock();
1bdc94
-        return;
1bdc94
-    }
1bdc94
-
1bdc94
-    if (fn && !fn(job)) {
1bdc94
-        block_job_unlock();
1bdc94
-        return;
1bdc94
-    }
1bdc94
-
1bdc94
-    assert(!job->job.deferred_to_main_loop);
1bdc94
-    timer_del(&job->sleep_timer);
1bdc94
-    job->busy = true;
1bdc94
-    block_job_unlock();
1bdc94
-    aio_co_wake(job->co);
1bdc94
-}
1bdc94
-
1bdc94
 void block_job_enter(BlockJob *job)
1bdc94
 {
1bdc94
-    block_job_enter_cond(job, NULL);
1bdc94
+    job_enter_cond(&job->job, NULL);
1bdc94
 }
1bdc94
 
1bdc94
 void block_job_sleep_ns(BlockJob *job, int64_t ns)
1bdc94
 {
1bdc94
-    assert(job->busy);
1bdc94
+    assert(job->job.busy);
1bdc94
 
1bdc94
     /* Check cancellation *before* setting busy = false, too!  */
1bdc94
     if (job_is_cancelled(&job->job)) {
1bdc94
         return;
1bdc94
     }
1bdc94
 
1bdc94
-    if (!block_job_should_pause(job)) {
1bdc94
-        block_job_do_yield(job, qemu_clock_get_ns(QEMU_CLOCK_REALTIME) + ns);
1bdc94
+    if (!job_should_pause(&job->job)) {
1bdc94
+        job_do_yield(&job->job, qemu_clock_get_ns(QEMU_CLOCK_REALTIME) + ns);
1bdc94
     }
1bdc94
 
1bdc94
-    block_job_pause_point(job);
1bdc94
+    job_pause_point(&job->job);
1bdc94
 }
1bdc94
 
1bdc94
 void block_job_yield(BlockJob *job)
1bdc94
 {
1bdc94
-    assert(job->busy);
1bdc94
+    assert(job->job.busy);
1bdc94
 
1bdc94
     /* Check cancellation *before* setting busy = false, too!  */
1bdc94
     if (job_is_cancelled(&job->job)) {
1bdc94
         return;
1bdc94
     }
1bdc94
 
1bdc94
-    if (!block_job_should_pause(job)) {
1bdc94
-        block_job_do_yield(job, -1);
1bdc94
+    if (!job_should_pause(&job->job)) {
1bdc94
+        job_do_yield(&job->job, -1);
1bdc94
     }
1bdc94
 
1bdc94
-    block_job_pause_point(job);
1bdc94
+    job_pause_point(&job->job);
1bdc94
 }
1bdc94
 
1bdc94
 void block_job_iostatus_reset(BlockJob *job)
1bdc94
@@ -1109,7 +968,7 @@ void block_job_iostatus_reset(BlockJob *job)
1bdc94
     if (job->iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
1bdc94
         return;
1bdc94
     }
1bdc94
-    assert(job->user_paused && job->pause_count > 0);
1bdc94
+    assert(job->user_paused && job->job.pause_count > 0);
1bdc94
     job->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
1bdc94
 }
1bdc94
 
1bdc94
diff --git a/include/block/blockjob.h b/include/block/blockjob.h
1bdc94
index 2a9e865..b60d919 100644
1bdc94
--- a/include/block/blockjob.h
1bdc94
+++ b/include/block/blockjob.h
1bdc94
@@ -51,43 +51,18 @@ typedef struct BlockJob {
1bdc94
     BlockBackend *blk;
1bdc94
 
1bdc94
     /**
1bdc94
-     * The coroutine that executes the job.  If not NULL, it is
1bdc94
-     * reentered when busy is false and the job is cancelled.
1bdc94
-     */
1bdc94
-    Coroutine *co;
1bdc94
-
1bdc94
-    /**
1bdc94
      * Set to true if the job should abort immediately without waiting
1bdc94
      * for data to be in sync.
1bdc94
      */
1bdc94
     bool force;
1bdc94
 
1bdc94
     /**
1bdc94
-     * Counter for pause request. If non-zero, the block job is either paused,
1bdc94
-     * or if busy == true will pause itself as soon as possible.
1bdc94
-     */
1bdc94
-    int pause_count;
1bdc94
-
1bdc94
-    /**
1bdc94
      * Set to true if the job is paused by user.  Can be unpaused with the
1bdc94
      * block-job-resume QMP command.
1bdc94
      */
1bdc94
     bool user_paused;
1bdc94
 
1bdc94
     /**
1bdc94
-     * Set to false by the job while the coroutine has yielded and may be
1bdc94
-     * re-entered by block_job_enter().  There may still be I/O or event loop
1bdc94
-     * activity pending.  Accessed under block_job_mutex (in blockjob.c).
1bdc94
-     */
1bdc94
-    bool busy;
1bdc94
-
1bdc94
-    /**
1bdc94
-     * Set to true by the job while it is in a quiescent state, where
1bdc94
-     * no I/O or event loop activity is pending.
1bdc94
-     */
1bdc94
-    bool paused;
1bdc94
-
1bdc94
-    /**
1bdc94
      * Set to true when the job is ready to be completed.
1bdc94
      */
1bdc94
     bool ready;
1bdc94
@@ -125,12 +100,6 @@ typedef struct BlockJob {
1bdc94
     /** ret code passed to block_job_completed. */
1bdc94
     int ret;
1bdc94
 
1bdc94
-    /**
1bdc94
-     * Timer that is used by @block_job_sleep_ns. Accessed under
1bdc94
-     * block_job_mutex (in blockjob.c).
1bdc94
-     */
1bdc94
-    QEMUTimer sleep_timer;
1bdc94
-
1bdc94
     /** True if this job should automatically finalize itself */
1bdc94
     bool auto_finalize;
1bdc94
 
1bdc94
@@ -208,15 +177,6 @@ void block_job_remove_all_bdrv(BlockJob *job);
1bdc94
 void block_job_set_speed(BlockJob *job, int64_t speed, Error **errp);
1bdc94
 
1bdc94
 /**
1bdc94
- * block_job_start:
1bdc94
- * @job: A job that has not yet been started.
1bdc94
- *
1bdc94
- * Begins execution of a block job.
1bdc94
- * Takes ownership of one reference to the job object.
1bdc94
- */
1bdc94
-void block_job_start(BlockJob *job);
1bdc94
-
1bdc94
-/**
1bdc94
  * block_job_cancel:
1bdc94
  * @job: The job to be canceled.
1bdc94
  * @force: Quit a job without waiting for data to be in sync.
1bdc94
diff --git a/include/block/blockjob_int.h b/include/block/blockjob_int.h
1bdc94
index 0c2f8de..0a614a8 100644
1bdc94
--- a/include/block/blockjob_int.h
1bdc94
+++ b/include/block/blockjob_int.h
1bdc94
@@ -38,9 +38,6 @@ struct BlockJobDriver {
1bdc94
     /** Generic JobDriver callbacks and settings */
1bdc94
     JobDriver job_driver;
1bdc94
 
1bdc94
-    /** Mandatory: Entrypoint for the Coroutine. */
1bdc94
-    CoroutineEntry *start;
1bdc94
-
1bdc94
     /**
1bdc94
      * Optional callback for job types whose completion must be triggered
1bdc94
      * manually.
1bdc94
@@ -85,20 +82,6 @@ struct BlockJobDriver {
1bdc94
      */
1bdc94
     void (*clean)(BlockJob *job);
1bdc94
 
1bdc94
-    /**
1bdc94
-     * If the callback is not NULL, it will be invoked when the job transitions
1bdc94
-     * into the paused state.  Paused jobs must not perform any asynchronous
1bdc94
-     * I/O or event loop activity.  This callback is used to quiesce jobs.
1bdc94
-     */
1bdc94
-    void coroutine_fn (*pause)(BlockJob *job);
1bdc94
-
1bdc94
-    /**
1bdc94
-     * If the callback is not NULL, it will be invoked when the job transitions
1bdc94
-     * out of the paused state.  Any asynchronous I/O or event loop activity
1bdc94
-     * should be restarted from this callback.
1bdc94
-     */
1bdc94
-    void coroutine_fn (*resume)(BlockJob *job);
1bdc94
-
1bdc94
     /*
1bdc94
      * If the callback is not NULL, it will be invoked before the job is
1bdc94
      * resumed in a new AioContext.  This is the place to move any resources
1bdc94
@@ -196,15 +179,6 @@ void block_job_early_fail(BlockJob *job);
1bdc94
 void block_job_completed(BlockJob *job, int ret);
1bdc94
 
1bdc94
 /**
1bdc94
- * block_job_pause_point:
1bdc94
- * @job: The job that is ready to pause.
1bdc94
- *
1bdc94
- * Pause now if block_job_pause() has been called.  Block jobs that perform
1bdc94
- * lots of I/O must call this between requests so that the job can be paused.
1bdc94
- */
1bdc94
-void coroutine_fn block_job_pause_point(BlockJob *job);
1bdc94
-
1bdc94
-/**
1bdc94
  * block_job_enter:
1bdc94
  * @job: The job to enter.
1bdc94
  *
1bdc94
diff --git a/include/qemu/job.h b/include/qemu/job.h
1bdc94
index 933e0ab..9dcff12 100644
1bdc94
--- a/include/qemu/job.h
1bdc94
+++ b/include/qemu/job.h
1bdc94
@@ -28,6 +28,7 @@
1bdc94
 
1bdc94
 #include "qapi/qapi-types-block-core.h"
1bdc94
 #include "qemu/queue.h"
1bdc94
+#include "qemu/coroutine.h"
1bdc94
 
1bdc94
 typedef struct JobDriver JobDriver;
1bdc94
 
1bdc94
@@ -51,6 +52,37 @@ typedef struct Job {
1bdc94
     AioContext *aio_context;
1bdc94
 
1bdc94
     /**
1bdc94
+     * The coroutine that executes the job.  If not NULL, it is reentered when
1bdc94
+     * busy is false and the job is cancelled.
1bdc94
+     */
1bdc94
+    Coroutine *co;
1bdc94
+
1bdc94
+    /**
1bdc94
+     * Timer that is used by @block_job_sleep_ns. Accessed under job_mutex (in
1bdc94
+     * job.c).
1bdc94
+     */
1bdc94
+    QEMUTimer sleep_timer;
1bdc94
+
1bdc94
+    /**
1bdc94
+     * Counter for pause request. If non-zero, the block job is either paused,
1bdc94
+     * or if busy == true will pause itself as soon as possible.
1bdc94
+     */
1bdc94
+    int pause_count;
1bdc94
+
1bdc94
+    /**
1bdc94
+     * Set to false by the job while the coroutine has yielded and may be
1bdc94
+     * re-entered by block_job_enter().  There may still be I/O or event loop
1bdc94
+     * activity pending.  Accessed under block_job_mutex (in blockjob.c).
1bdc94
+     */
1bdc94
+    bool busy;
1bdc94
+
1bdc94
+    /**
1bdc94
+     * Set to true by the job while it is in a quiescent state, where
1bdc94
+     * no I/O or event loop activity is pending.
1bdc94
+     */
1bdc94
+    bool paused;
1bdc94
+
1bdc94
+    /**
1bdc94
      * Set to true if the job should cancel itself.  The flag must
1bdc94
      * always be tested just before toggling the busy flag from false
1bdc94
      * to true.  After a job has been cancelled, it should only yield
1bdc94
@@ -75,6 +107,23 @@ struct JobDriver {
1bdc94
     /** Enum describing the operation */
1bdc94
     JobType job_type;
1bdc94
 
1bdc94
+    /** Mandatory: Entrypoint for the Coroutine. */
1bdc94
+    CoroutineEntry *start;
1bdc94
+
1bdc94
+    /**
1bdc94
+     * If the callback is not NULL, it will be invoked when the job transitions
1bdc94
+     * into the paused state.  Paused jobs must not perform any asynchronous
1bdc94
+     * I/O or event loop activity.  This callback is used to quiesce jobs.
1bdc94
+     */
1bdc94
+    void coroutine_fn (*pause)(Job *job);
1bdc94
+
1bdc94
+    /**
1bdc94
+     * If the callback is not NULL, it will be invoked when the job transitions
1bdc94
+     * out of the paused state.  Any asynchronous I/O or event loop activity
1bdc94
+     * should be restarted from this callback.
1bdc94
+     */
1bdc94
+    void coroutine_fn (*resume)(Job *job);
1bdc94
+
1bdc94
     /** Called when the job is freed */
1bdc94
     void (*free)(Job *job);
1bdc94
 };
1bdc94
@@ -103,6 +152,30 @@ void job_ref(Job *job);
1bdc94
  */
1bdc94
 void job_unref(Job *job);
1bdc94
 
1bdc94
+/**
1bdc94
+ * Conditionally enter the job coroutine if the job is ready to run, not
1bdc94
+ * already busy and fn() returns true. fn() is called while under the job_lock
1bdc94
+ * critical section.
1bdc94
+ */
1bdc94
+void job_enter_cond(Job *job, bool(*fn)(Job *job));
1bdc94
+
1bdc94
+/**
1bdc94
+ * @job: A job that has not yet been started.
1bdc94
+ *
1bdc94
+ * Begins execution of a job.
1bdc94
+ * Takes ownership of one reference to the job object.
1bdc94
+ */
1bdc94
+void job_start(Job *job);
1bdc94
+
1bdc94
+/**
1bdc94
+ * @job: The job that is ready to pause.
1bdc94
+ *
1bdc94
+ * Pause now if job_pause() has been called. Jobs that perform lots of I/O
1bdc94
+ * must call this between requests so that the job can be paused.
1bdc94
+ */
1bdc94
+void coroutine_fn job_pause_point(Job *job);
1bdc94
+
1bdc94
+
1bdc94
 /** Returns the JobType of a given Job. */
1bdc94
 JobType job_type(const Job *job);
1bdc94
 
1bdc94
@@ -153,5 +226,8 @@ void job_defer_to_main_loop(Job *job, JobDeferToMainLoopFn *fn, void *opaque);
1bdc94
 
1bdc94
 /* TODO To be removed from the public interface */
1bdc94
 void job_state_transition(Job *job, JobStatus s1);
1bdc94
+void coroutine_fn job_do_yield(Job *job, uint64_t ns);
1bdc94
+bool job_should_pause(Job *job);
1bdc94
+bool job_started(Job *job);
1bdc94
 
1bdc94
 #endif
1bdc94
diff --git a/job.c b/job.c
1bdc94
index c5a37fb..78497fd 100644
1bdc94
--- a/job.c
1bdc94
+++ b/job.c
1bdc94
@@ -60,6 +60,26 @@ bool JobVerbTable[JOB_VERB__MAX][JOB_STATUS__MAX] = {
1bdc94
     [JOB_VERB_DISMISS]              = {0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0},
1bdc94
 };
1bdc94
 
1bdc94
+/* Right now, this mutex is only needed to synchronize accesses to job->busy
1bdc94
+ * and job->sleep_timer, such as concurrent calls to job_do_yield and
1bdc94
+ * job_enter. */
1bdc94
+static QemuMutex job_mutex;
1bdc94
+
1bdc94
+static void job_lock(void)
1bdc94
+{
1bdc94
+    qemu_mutex_lock(&job_mutex);
1bdc94
+}
1bdc94
+
1bdc94
+static void job_unlock(void)
1bdc94
+{
1bdc94
+    qemu_mutex_unlock(&job_mutex);
1bdc94
+}
1bdc94
+
1bdc94
+static void __attribute__((__constructor__)) job_init(void)
1bdc94
+{
1bdc94
+    qemu_mutex_init(&job_mutex);
1bdc94
+}
1bdc94
+
1bdc94
 /* TODO Make static once the whole state machine is in job.c */
1bdc94
 void job_state_transition(Job *job, JobStatus s1)
1bdc94
 {
1bdc94
@@ -101,6 +121,16 @@ bool job_is_cancelled(Job *job)
1bdc94
     return job->cancelled;
1bdc94
 }
1bdc94
 
1bdc94
+bool job_started(Job *job)
1bdc94
+{
1bdc94
+    return job->co;
1bdc94
+}
1bdc94
+
1bdc94
+bool job_should_pause(Job *job)
1bdc94
+{
1bdc94
+    return job->pause_count > 0;
1bdc94
+}
1bdc94
+
1bdc94
 Job *job_next(Job *job)
1bdc94
 {
1bdc94
     if (!job) {
1bdc94
@@ -143,6 +173,9 @@ void *job_create(const char *job_id, const JobDriver *driver, AioContext *ctx,
1bdc94
     job->id            = g_strdup(job_id);
1bdc94
     job->refcnt        = 1;
1bdc94
     job->aio_context   = ctx;
1bdc94
+    job->busy          = false;
1bdc94
+    job->paused        = true;
1bdc94
+    job->pause_count   = 1;
1bdc94
 
1bdc94
     job_state_transition(job, JOB_STATUS_CREATED);
1bdc94
 
1bdc94
@@ -172,6 +205,110 @@ void job_unref(Job *job)
1bdc94
     }
1bdc94
 }
1bdc94
 
1bdc94
+void job_enter_cond(Job *job, bool(*fn)(Job *job))
1bdc94
+{
1bdc94
+    if (!job_started(job)) {
1bdc94
+        return;
1bdc94
+    }
1bdc94
+    if (job->deferred_to_main_loop) {
1bdc94
+        return;
1bdc94
+    }
1bdc94
+
1bdc94
+    job_lock();
1bdc94
+    if (job->busy) {
1bdc94
+        job_unlock();
1bdc94
+        return;
1bdc94
+    }
1bdc94
+
1bdc94
+    if (fn && !fn(job)) {
1bdc94
+        job_unlock();
1bdc94
+        return;
1bdc94
+    }
1bdc94
+
1bdc94
+    assert(!job->deferred_to_main_loop);
1bdc94
+    timer_del(&job->sleep_timer);
1bdc94
+    job->busy = true;
1bdc94
+    job_unlock();
1bdc94
+    aio_co_wake(job->co);
1bdc94
+}
1bdc94
+
1bdc94
+/* Yield, and schedule a timer to reenter the coroutine after @ns nanoseconds.
1bdc94
+ * Reentering the job coroutine with block_job_enter() before the timer has
1bdc94
+ * expired is allowed and cancels the timer.
1bdc94
+ *
1bdc94
+ * If @ns is (uint64_t) -1, no timer is scheduled and block_job_enter() must be
1bdc94
+ * called explicitly. */
1bdc94
+void coroutine_fn job_do_yield(Job *job, uint64_t ns)
1bdc94
+{
1bdc94
+    job_lock();
1bdc94
+    if (ns != -1) {
1bdc94
+        timer_mod(&job->sleep_timer, ns);
1bdc94
+    }
1bdc94
+    job->busy = false;
1bdc94
+    job_unlock();
1bdc94
+    qemu_coroutine_yield();
1bdc94
+
1bdc94
+    /* Set by job_enter_cond() before re-entering the coroutine.  */
1bdc94
+    assert(job->busy);
1bdc94
+}
1bdc94
+
1bdc94
+void coroutine_fn job_pause_point(Job *job)
1bdc94
+{
1bdc94
+    assert(job && job_started(job));
1bdc94
+
1bdc94
+    if (!job_should_pause(job)) {
1bdc94
+        return;
1bdc94
+    }
1bdc94
+    if (job_is_cancelled(job)) {
1bdc94
+        return;
1bdc94
+    }
1bdc94
+
1bdc94
+    if (job->driver->pause) {
1bdc94
+        job->driver->pause(job);
1bdc94
+    }
1bdc94
+
1bdc94
+    if (job_should_pause(job) && !job_is_cancelled(job)) {
1bdc94
+        JobStatus status = job->status;
1bdc94
+        job_state_transition(job, status == JOB_STATUS_READY
1bdc94
+                                  ? JOB_STATUS_STANDBY
1bdc94
+                                  : JOB_STATUS_PAUSED);
1bdc94
+        job->paused = true;
1bdc94
+        job_do_yield(job, -1);
1bdc94
+        job->paused = false;
1bdc94
+        job_state_transition(job, status);
1bdc94
+    }
1bdc94
+
1bdc94
+    if (job->driver->resume) {
1bdc94
+        job->driver->resume(job);
1bdc94
+    }
1bdc94
+}
1bdc94
+
1bdc94
+/**
1bdc94
+ * All jobs must allow a pause point before entering their job proper. This
1bdc94
+ * ensures that jobs can be paused prior to being started, then resumed later.
1bdc94
+ */
1bdc94
+static void coroutine_fn job_co_entry(void *opaque)
1bdc94
+{
1bdc94
+    Job *job = opaque;
1bdc94
+
1bdc94
+    assert(job && job->driver && job->driver->start);
1bdc94
+    job_pause_point(job);
1bdc94
+    job->driver->start(job);
1bdc94
+}
1bdc94
+
1bdc94
+
1bdc94
+void job_start(Job *job)
1bdc94
+{
1bdc94
+    assert(job && !job_started(job) && job->paused &&
1bdc94
+           job->driver && job->driver->start);
1bdc94
+    job->co = qemu_coroutine_create(job_co_entry, job);
1bdc94
+    job->pause_count--;
1bdc94
+    job->busy = true;
1bdc94
+    job->paused = false;
1bdc94
+    job_state_transition(job, JOB_STATUS_RUNNING);
1bdc94
+    aio_co_enter(job->aio_context, job->co);
1bdc94
+}
1bdc94
+
1bdc94
 typedef struct {
1bdc94
     Job *job;
1bdc94
     JobDeferToMainLoopFn *fn;
1bdc94
diff --git a/tests/test-bdrv-drain.c b/tests/test-bdrv-drain.c
1bdc94
index 4f8cba8..c9f2f9b 100644
1bdc94
--- a/tests/test-bdrv-drain.c
1bdc94
+++ b/tests/test-bdrv-drain.c
1bdc94
@@ -524,8 +524,8 @@ BlockJobDriver test_job_driver = {
1bdc94
     .job_driver = {
1bdc94
         .instance_size  = sizeof(TestBlockJob),
1bdc94
         .free           = block_job_free,
1bdc94
+        .start          = test_job_start,
1bdc94
     },
1bdc94
-    .start          = test_job_start,
1bdc94
     .complete       = test_job_complete,
1bdc94
 };
1bdc94
 
1bdc94
@@ -549,47 +549,47 @@ static void test_blockjob_common(enum drain_type drain_type)
1bdc94
     job = block_job_create("job0", &test_job_driver, NULL, src, 0, BLK_PERM_ALL,
1bdc94
                            0, 0, NULL, NULL, &error_abort);
1bdc94
     block_job_add_bdrv(job, "target", target, 0, BLK_PERM_ALL, &error_abort);
1bdc94
-    block_job_start(job);
1bdc94
+    job_start(&job->job);
1bdc94
 
1bdc94
-    g_assert_cmpint(job->pause_count, ==, 0);
1bdc94
-    g_assert_false(job->paused);
1bdc94
-    g_assert_false(job->busy); /* We're in block_job_sleep_ns() */
1bdc94
+    g_assert_cmpint(job->job.pause_count, ==, 0);
1bdc94
+    g_assert_false(job->job.paused);
1bdc94
+    g_assert_false(job->job.busy); /* We're in block_job_sleep_ns() */
1bdc94
 
1bdc94
     do_drain_begin(drain_type, src);
1bdc94
 
1bdc94
     if (drain_type == BDRV_DRAIN_ALL) {
1bdc94
         /* bdrv_drain_all() drains both src and target */
1bdc94
-        g_assert_cmpint(job->pause_count, ==, 2);
1bdc94
+        g_assert_cmpint(job->job.pause_count, ==, 2);
1bdc94
     } else {
1bdc94
-        g_assert_cmpint(job->pause_count, ==, 1);
1bdc94
+        g_assert_cmpint(job->job.pause_count, ==, 1);
1bdc94
     }
1bdc94
     /* XXX We don't wait until the job is actually paused. Is this okay? */
1bdc94
-    /* g_assert_true(job->paused); */
1bdc94
-    g_assert_false(job->busy); /* The job is paused */
1bdc94
+    /* g_assert_true(job->job.paused); */
1bdc94
+    g_assert_false(job->job.busy); /* The job is paused */
1bdc94
 
1bdc94
     do_drain_end(drain_type, src);
1bdc94
 
1bdc94
-    g_assert_cmpint(job->pause_count, ==, 0);
1bdc94
-    g_assert_false(job->paused);
1bdc94
-    g_assert_false(job->busy); /* We're in block_job_sleep_ns() */
1bdc94
+    g_assert_cmpint(job->job.pause_count, ==, 0);
1bdc94
+    g_assert_false(job->job.paused);
1bdc94
+    g_assert_false(job->job.busy); /* We're in block_job_sleep_ns() */
1bdc94
 
1bdc94
     do_drain_begin(drain_type, target);
1bdc94
 
1bdc94
     if (drain_type == BDRV_DRAIN_ALL) {
1bdc94
         /* bdrv_drain_all() drains both src and target */
1bdc94
-        g_assert_cmpint(job->pause_count, ==, 2);
1bdc94
+        g_assert_cmpint(job->job.pause_count, ==, 2);
1bdc94
     } else {
1bdc94
-        g_assert_cmpint(job->pause_count, ==, 1);
1bdc94
+        g_assert_cmpint(job->job.pause_count, ==, 1);
1bdc94
     }
1bdc94
     /* XXX We don't wait until the job is actually paused. Is this okay? */
1bdc94
-    /* g_assert_true(job->paused); */
1bdc94
-    g_assert_false(job->busy); /* The job is paused */
1bdc94
+    /* g_assert_true(job->job.paused); */
1bdc94
+    g_assert_false(job->job.busy); /* The job is paused */
1bdc94
 
1bdc94
     do_drain_end(drain_type, target);
1bdc94
 
1bdc94
-    g_assert_cmpint(job->pause_count, ==, 0);
1bdc94
-    g_assert_false(job->paused);
1bdc94
-    g_assert_false(job->busy); /* We're in block_job_sleep_ns() */
1bdc94
+    g_assert_cmpint(job->job.pause_count, ==, 0);
1bdc94
+    g_assert_false(job->job.paused);
1bdc94
+    g_assert_false(job->job.busy); /* We're in block_job_sleep_ns() */
1bdc94
 
1bdc94
     ret = block_job_complete_sync(job, &error_abort);
1bdc94
     g_assert_cmpint(ret, ==, 0);
1bdc94
diff --git a/tests/test-blockjob-txn.c b/tests/test-blockjob-txn.c
1bdc94
index c03f966..323e154 100644
1bdc94
--- a/tests/test-blockjob-txn.c
1bdc94
+++ b/tests/test-blockjob-txn.c
1bdc94
@@ -78,8 +78,8 @@ static const BlockJobDriver test_block_job_driver = {
1bdc94
     .job_driver = {
1bdc94
         .instance_size = sizeof(TestBlockJob),
1bdc94
         .free          = block_job_free,
1bdc94
+        .start         = test_block_job_run,
1bdc94
     },
1bdc94
-    .start = test_block_job_run,
1bdc94
 };
1bdc94
 
1bdc94
 /* Create a block job that completes with a given return code after a given
1bdc94
@@ -125,7 +125,7 @@ static void test_single_job(int expected)
1bdc94
 
1bdc94
     txn = block_job_txn_new();
1bdc94
     job = test_block_job_start(1, true, expected, &result, txn);
1bdc94
-    block_job_start(job);
1bdc94
+    job_start(&job->job);
1bdc94
 
1bdc94
     if (expected == -ECANCELED) {
1bdc94
         block_job_cancel(job, false);
1bdc94
@@ -165,8 +165,8 @@ static void test_pair_jobs(int expected1, int expected2)
1bdc94
     txn = block_job_txn_new();
1bdc94
     job1 = test_block_job_start(1, true, expected1, &result1, txn);
1bdc94
     job2 = test_block_job_start(2, true, expected2, &result2, txn);
1bdc94
-    block_job_start(job1);
1bdc94
-    block_job_start(job2);
1bdc94
+    job_start(&job1->job);
1bdc94
+    job_start(&job2->job);
1bdc94
 
1bdc94
     /* Release our reference now to trigger as many nice
1bdc94
      * use-after-free bugs as possible.
1bdc94
@@ -227,8 +227,8 @@ static void test_pair_jobs_fail_cancel_race(void)
1bdc94
     txn = block_job_txn_new();
1bdc94
     job1 = test_block_job_start(1, true, -ECANCELED, &result1, txn);
1bdc94
     job2 = test_block_job_start(2, false, 0, &result2, txn);
1bdc94
-    block_job_start(job1);
1bdc94
-    block_job_start(job2);
1bdc94
+    job_start(&job1->job);
1bdc94
+    job_start(&job2->job);
1bdc94
 
1bdc94
     block_job_cancel(job1, false);
1bdc94
 
1bdc94
diff --git a/tests/test-blockjob.c b/tests/test-blockjob.c
1bdc94
index 5f43bd7..1d18325 100644
1bdc94
--- a/tests/test-blockjob.c
1bdc94
+++ b/tests/test-blockjob.c
1bdc94
@@ -199,8 +199,8 @@ static const BlockJobDriver test_cancel_driver = {
1bdc94
     .job_driver = {
1bdc94
         .instance_size = sizeof(CancelJob),
1bdc94
         .free          = block_job_free,
1bdc94
+        .start         = cancel_job_start,
1bdc94
     },
1bdc94
-    .start         = cancel_job_start,
1bdc94
     .complete      = cancel_job_complete,
1bdc94
 };
1bdc94
 
1bdc94
@@ -254,7 +254,7 @@ static void test_cancel_running(void)
1bdc94
 
1bdc94
     s = create_common(&job;;
1bdc94
 
1bdc94
-    block_job_start(job);
1bdc94
+    job_start(&job->job);
1bdc94
     assert(job->job.status == JOB_STATUS_RUNNING);
1bdc94
 
1bdc94
     cancel_common(s);
1bdc94
@@ -267,7 +267,7 @@ static void test_cancel_paused(void)
1bdc94
 
1bdc94
     s = create_common(&job;;
1bdc94
 
1bdc94
-    block_job_start(job);
1bdc94
+    job_start(&job->job);
1bdc94
     assert(job->job.status == JOB_STATUS_RUNNING);
1bdc94
 
1bdc94
     block_job_user_pause(job, &error_abort);
1bdc94
@@ -284,7 +284,7 @@ static void test_cancel_ready(void)
1bdc94
 
1bdc94
     s = create_common(&job;;
1bdc94
 
1bdc94
-    block_job_start(job);
1bdc94
+    job_start(&job->job);
1bdc94
     assert(job->job.status == JOB_STATUS_RUNNING);
1bdc94
 
1bdc94
     s->should_converge = true;
1bdc94
@@ -301,7 +301,7 @@ static void test_cancel_standby(void)
1bdc94
 
1bdc94
     s = create_common(&job;;
1bdc94
 
1bdc94
-    block_job_start(job);
1bdc94
+    job_start(&job->job);
1bdc94
     assert(job->job.status == JOB_STATUS_RUNNING);
1bdc94
 
1bdc94
     s->should_converge = true;
1bdc94
@@ -322,7 +322,7 @@ static void test_cancel_pending(void)
1bdc94
 
1bdc94
     s = create_common(&job;;
1bdc94
 
1bdc94
-    block_job_start(job);
1bdc94
+    job_start(&job->job);
1bdc94
     assert(job->job.status == JOB_STATUS_RUNNING);
1bdc94
 
1bdc94
     s->should_converge = true;
1bdc94
@@ -346,7 +346,7 @@ static void test_cancel_concluded(void)
1bdc94
 
1bdc94
     s = create_common(&job;;
1bdc94
 
1bdc94
-    block_job_start(job);
1bdc94
+    job_start(&job->job);
1bdc94
     assert(job->job.status == JOB_STATUS_RUNNING);
1bdc94
 
1bdc94
     s->should_converge = true;
1bdc94
-- 
1bdc94
1.8.3.1
1bdc94