yeahuh / rpms / qemu-kvm

Forked from rpms/qemu-kvm 2 years ago
Clone

Blame SOURCES/kvm-test-bdrv-drain-Test-draining-job-source-child-and-p.patch

ae23c9
From f90f3f9a94cb09c3d568fbc9dc338b5f8c5ea17c Mon Sep 17 00:00:00 2001
ae23c9
From: Kevin Wolf <kwolf@redhat.com>
ae23c9
Date: Wed, 10 Oct 2018 20:22:13 +0100
ae23c9
Subject: [PATCH 47/49] test-bdrv-drain: Test draining job source child and
ae23c9
 parent
ae23c9
ae23c9
RH-Author: Kevin Wolf <kwolf@redhat.com>
ae23c9
Message-id: <20181010202213.7372-35-kwolf@redhat.com>
ae23c9
Patchwork-id: 82624
ae23c9
O-Subject: [RHEL-8 qemu-kvm PATCH 44/44] test-bdrv-drain: Test draining job source child and parent
ae23c9
Bugzilla: 1637976
ae23c9
RH-Acked-by: Max Reitz <mreitz@redhat.com>
ae23c9
RH-Acked-by: John Snow <jsnow@redhat.com>
ae23c9
RH-Acked-by: Thomas Huth <thuth@redhat.com>
ae23c9
ae23c9
For the block job drain test, don't only test draining the source and
ae23c9
the target node, but create a backing chain for the source
ae23c9
(source_backing <- source <- source_overlay) and test draining each of
ae23c9
the nodes in it.
ae23c9
ae23c9
When using iothreads, the source node (and therefore the job) is in a
ae23c9
different AioContext than the drain, which happens from the main
ae23c9
thread. This way, the main thread waits in AIO_WAIT_WHILE() for the
ae23c9
iothread to make process and aio_wait_kick() is required to notify it.
ae23c9
The test validates that calling bdrv_wakeup() for a child or a parent
ae23c9
node will actually notify AIO_WAIT_WHILE() instead of letting it hang.
ae23c9
ae23c9
Increase the sleep time a bit (to 1 ms) because the test case is racy
ae23c9
and with the shorter sleep, it didn't reproduce the bug it is supposed
ae23c9
to test for me under 'rr record -n'.
ae23c9
ae23c9
This was because bdrv_drain_invoke_entry() (in the main thread) was only
ae23c9
called after the job had already reached the pause point, so we got a
ae23c9
bdrv_dec_in_flight() from the main thread and the additional
ae23c9
aio_wait_kick() when the job becomes idle (that we really wanted to test
ae23c9
here) wasn't even necessary any more to make progress.
ae23c9
ae23c9
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
ae23c9
Reviewed-by: Eric Blake <eblake@redhat.com>
ae23c9
Reviewed-by: Max Reitz <mreitz@redhat.com>
ae23c9
(cherry picked from commit d8b3afd597d54e496809b05ac39ac29a5799664f)
ae23c9
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
ae23c9
Signed-off-by: Danilo C. L. de Paula <ddepaula@redhat.com>
ae23c9
---
ae23c9
 tests/test-bdrv-drain.c | 77 ++++++++++++++++++++++++++++++++++++++++++++-----
ae23c9
 1 file changed, 69 insertions(+), 8 deletions(-)
ae23c9
ae23c9
diff --git a/tests/test-bdrv-drain.c b/tests/test-bdrv-drain.c
ae23c9
index 7e7ba9b..8641b54 100644
ae23c9
--- a/tests/test-bdrv-drain.c
ae23c9
+++ b/tests/test-bdrv-drain.c
ae23c9
@@ -786,6 +786,7 @@ typedef struct TestBlockJob {
ae23c9
     BlockJob common;
ae23c9
     int run_ret;
ae23c9
     int prepare_ret;
ae23c9
+    bool running;
ae23c9
     bool should_complete;
ae23c9
 } TestBlockJob;
ae23c9
 
ae23c9
@@ -818,12 +819,17 @@ static int coroutine_fn test_job_run(Job *job, Error **errp)
ae23c9
 {
ae23c9
     TestBlockJob *s = container_of(job, TestBlockJob, common.job);
ae23c9
 
ae23c9
+    /* We are running the actual job code past the pause point in
ae23c9
+     * job_co_entry(). */
ae23c9
+    s->running = true;
ae23c9
+
ae23c9
     job_transition_to_ready(&s->common.job);
ae23c9
     while (!s->should_complete) {
ae23c9
         /* Avoid job_sleep_ns() because it marks the job as !busy. We want to
ae23c9
          * emulate some actual activity (probably some I/O) here so that drain
ae23c9
          * has to wait for this activity to stop. */
ae23c9
-        qemu_co_sleep_ns(QEMU_CLOCK_REALTIME, 100000);
ae23c9
+        qemu_co_sleep_ns(QEMU_CLOCK_REALTIME, 1000000);
ae23c9
+
ae23c9
         job_pause_point(&s->common.job);
ae23c9
     }
ae23c9
 
ae23c9
@@ -856,11 +862,19 @@ enum test_job_result {
ae23c9
     TEST_JOB_FAIL_PREPARE,
ae23c9
 };
ae23c9
 
ae23c9
-static void test_blockjob_common(enum drain_type drain_type, bool use_iothread,
ae23c9
-                                 enum test_job_result result)
ae23c9
+enum test_job_drain_node {
ae23c9
+    TEST_JOB_DRAIN_SRC,
ae23c9
+    TEST_JOB_DRAIN_SRC_CHILD,
ae23c9
+    TEST_JOB_DRAIN_SRC_PARENT,
ae23c9
+};
ae23c9
+
ae23c9
+static void test_blockjob_common_drain_node(enum drain_type drain_type,
ae23c9
+                                            bool use_iothread,
ae23c9
+                                            enum test_job_result result,
ae23c9
+                                            enum test_job_drain_node drain_node)
ae23c9
 {
ae23c9
     BlockBackend *blk_src, *blk_target;
ae23c9
-    BlockDriverState *src, *target;
ae23c9
+    BlockDriverState *src, *src_backing, *src_overlay, *target, *drain_bs;
ae23c9
     BlockJob *job;
ae23c9
     TestBlockJob *tjob;
ae23c9
     IOThread *iothread = NULL;
ae23c9
@@ -869,8 +883,32 @@ static void test_blockjob_common(enum drain_type drain_type, bool use_iothread,
ae23c9
 
ae23c9
     src = bdrv_new_open_driver(&bdrv_test, "source", BDRV_O_RDWR,
ae23c9
                                &error_abort);
ae23c9
+    src_backing = bdrv_new_open_driver(&bdrv_test, "source-backing",
ae23c9
+                                       BDRV_O_RDWR, &error_abort);
ae23c9
+    src_overlay = bdrv_new_open_driver(&bdrv_test, "source-overlay",
ae23c9
+                                       BDRV_O_RDWR, &error_abort);
ae23c9
+
ae23c9
+    bdrv_set_backing_hd(src_overlay, src, &error_abort);
ae23c9
+    bdrv_unref(src);
ae23c9
+    bdrv_set_backing_hd(src, src_backing, &error_abort);
ae23c9
+    bdrv_unref(src_backing);
ae23c9
+
ae23c9
     blk_src = blk_new(BLK_PERM_ALL, BLK_PERM_ALL);
ae23c9
-    blk_insert_bs(blk_src, src, &error_abort);
ae23c9
+    blk_insert_bs(blk_src, src_overlay, &error_abort);
ae23c9
+
ae23c9
+    switch (drain_node) {
ae23c9
+    case TEST_JOB_DRAIN_SRC:
ae23c9
+        drain_bs = src;
ae23c9
+        break;
ae23c9
+    case TEST_JOB_DRAIN_SRC_CHILD:
ae23c9
+        drain_bs = src_backing;
ae23c9
+        break;
ae23c9
+    case TEST_JOB_DRAIN_SRC_PARENT:
ae23c9
+        drain_bs = src_overlay;
ae23c9
+        break;
ae23c9
+    default:
ae23c9
+        g_assert_not_reached();
ae23c9
+    }
ae23c9
 
ae23c9
     if (use_iothread) {
ae23c9
         iothread = iothread_new();
ae23c9
@@ -906,11 +944,21 @@ static void test_blockjob_common(enum drain_type drain_type, bool use_iothread,
ae23c9
     job_start(&job->job);
ae23c9
     aio_context_release(ctx);
ae23c9
 
ae23c9
+    if (use_iothread) {
ae23c9
+        /* job_co_entry() is run in the I/O thread, wait for the actual job
ae23c9
+         * code to start (we don't want to catch the job in the pause point in
ae23c9
+         * job_co_entry(). */
ae23c9
+        while (!tjob->running) {
ae23c9
+            aio_poll(qemu_get_aio_context(), false);
ae23c9
+        }
ae23c9
+    }
ae23c9
+
ae23c9
     g_assert_cmpint(job->job.pause_count, ==, 0);
ae23c9
     g_assert_false(job->job.paused);
ae23c9
+    g_assert_true(tjob->running);
ae23c9
     g_assert_true(job->job.busy); /* We're in qemu_co_sleep_ns() */
ae23c9
 
ae23c9
-    do_drain_begin_unlocked(drain_type, src);
ae23c9
+    do_drain_begin_unlocked(drain_type, drain_bs);
ae23c9
 
ae23c9
     if (drain_type == BDRV_DRAIN_ALL) {
ae23c9
         /* bdrv_drain_all() drains both src and target */
ae23c9
@@ -921,7 +969,7 @@ static void test_blockjob_common(enum drain_type drain_type, bool use_iothread,
ae23c9
     g_assert_true(job->job.paused);
ae23c9
     g_assert_false(job->job.busy); /* The job is paused */
ae23c9
 
ae23c9
-    do_drain_end_unlocked(drain_type, src);
ae23c9
+    do_drain_end_unlocked(drain_type, drain_bs);
ae23c9
 
ae23c9
     if (use_iothread) {
ae23c9
         /* paused is reset in the I/O thread, wait for it */
ae23c9
@@ -969,7 +1017,7 @@ static void test_blockjob_common(enum drain_type drain_type, bool use_iothread,
ae23c9
 
ae23c9
     blk_unref(blk_src);
ae23c9
     blk_unref(blk_target);
ae23c9
-    bdrv_unref(src);
ae23c9
+    bdrv_unref(src_overlay);
ae23c9
     bdrv_unref(target);
ae23c9
 
ae23c9
     if (iothread) {
ae23c9
@@ -977,6 +1025,19 @@ static void test_blockjob_common(enum drain_type drain_type, bool use_iothread,
ae23c9
     }
ae23c9
 }
ae23c9
 
ae23c9
+static void test_blockjob_common(enum drain_type drain_type, bool use_iothread,
ae23c9
+                                 enum test_job_result result)
ae23c9
+{
ae23c9
+    test_blockjob_common_drain_node(drain_type, use_iothread, result,
ae23c9
+                                    TEST_JOB_DRAIN_SRC);
ae23c9
+    test_blockjob_common_drain_node(drain_type, use_iothread, result,
ae23c9
+                                    TEST_JOB_DRAIN_SRC_CHILD);
ae23c9
+    if (drain_type == BDRV_SUBTREE_DRAIN) {
ae23c9
+        test_blockjob_common_drain_node(drain_type, use_iothread, result,
ae23c9
+                                        TEST_JOB_DRAIN_SRC_PARENT);
ae23c9
+    }
ae23c9
+}
ae23c9
+
ae23c9
 static void test_blockjob_drain_all(void)
ae23c9
 {
ae23c9
     test_blockjob_common(BDRV_DRAIN_ALL, false, TEST_JOB_SUCCESS);
ae23c9
-- 
ae23c9
1.8.3.1
ae23c9