|
|
77c23f |
From fdd1f3bf672ad8bb0a6db896ec8cbc797c31da1f Mon Sep 17 00:00:00 2001
|
|
|
77c23f |
From: Sergio Lopez Pascual <slp@redhat.com>
|
|
|
77c23f |
Date: Wed, 24 Jun 2020 13:24:53 -0400
|
|
|
77c23f |
Subject: [PATCH 11/12] virtio-blk: On restart, process queued requests in the
|
|
|
77c23f |
proper context
|
|
|
77c23f |
|
|
|
77c23f |
RH-Author: Sergio Lopez Pascual <slp@redhat.com>
|
|
|
77c23f |
Message-id: <20200624132453.111276-3-slp@redhat.com>
|
|
|
77c23f |
Patchwork-id: 97798
|
|
|
77c23f |
O-Subject: [RHEL-AV-8.2.1 qemu-kvm PATCH 2/2] virtio-blk: On restart, process queued requests in the proper context
|
|
|
77c23f |
Bugzilla:
|
|
|
77c23f |
RH-Acked-by: John Snow <jsnow@redhat.com>
|
|
|
77c23f |
RH-Acked-by: Michael S. Tsirkin <mst@redhat.com>
|
|
|
77c23f |
RH-Acked-by: Kevin Wolf <kwolf@redhat.com>
|
|
|
77c23f |
|
|
|
77c23f |
On restart, we were scheduling a BH to process queued requests, which
|
|
|
77c23f |
would run before starting up the data plane, leading to those requests
|
|
|
77c23f |
being assigned and started on coroutines on the main context.
|
|
|
77c23f |
|
|
|
77c23f |
This could cause requests to be wrongly processed in parallel from
|
|
|
77c23f |
different threads (the main thread and the iothread managing the data
|
|
|
77c23f |
plane), potentially leading to multiple issues.
|
|
|
77c23f |
|
|
|
77c23f |
For example, stopping and resuming a VM multiple times while the guest
|
|
|
77c23f |
is generating I/O on a virtio_blk device can trigger a crash with a
|
|
|
77c23f |
stack tracing looking like this one:
|
|
|
77c23f |
|
|
|
77c23f |
<------>
|
|
|
77c23f |
Thread 2 (Thread 0x7ff736765700 (LWP 1062503)):
|
|
|
77c23f |
#0 0x00005567a13b99d6 in iov_memset
|
|
|
77c23f |
(iov=0x6563617073206f4e, iov_cnt=1717922848, offset=516096, fillc=0, bytes=7018105756081554803)
|
|
|
77c23f |
at util/iov.c:69
|
|
|
77c23f |
#1 0x00005567a13bab73 in qemu_iovec_memset
|
|
|
77c23f |
(qiov=0x7ff73ec99748, offset=516096, fillc=0, bytes=7018105756081554803) at util/iov.c:530
|
|
|
77c23f |
#2 0x00005567a12f411c in qemu_laio_process_completion (laiocb=0x7ff6512ee6c0) at block/linux-aio.c:86
|
|
|
77c23f |
#3 0x00005567a12f42ff in qemu_laio_process_completions (s=0x7ff7182e8420) at block/linux-aio.c:217
|
|
|
77c23f |
#4 0x00005567a12f480d in ioq_submit (s=0x7ff7182e8420) at block/linux-aio.c:323
|
|
|
77c23f |
#5 0x00005567a12f43d9 in qemu_laio_process_completions_and_submit (s=0x7ff7182e8420)
|
|
|
77c23f |
at block/linux-aio.c:236
|
|
|
77c23f |
#6 0x00005567a12f44c2 in qemu_laio_poll_cb (opaque=0x7ff7182e8430) at block/linux-aio.c:267
|
|
|
77c23f |
#7 0x00005567a13aed83 in run_poll_handlers_once (ctx=0x5567a2b58c70, timeout=0x7ff7367645f8)
|
|
|
77c23f |
at util/aio-posix.c:520
|
|
|
77c23f |
#8 0x00005567a13aee9f in run_poll_handlers (ctx=0x5567a2b58c70, max_ns=16000, timeout=0x7ff7367645f8)
|
|
|
77c23f |
at util/aio-posix.c:562
|
|
|
77c23f |
#9 0x00005567a13aefde in try_poll_mode (ctx=0x5567a2b58c70, timeout=0x7ff7367645f8)
|
|
|
77c23f |
at util/aio-posix.c:597
|
|
|
77c23f |
#10 0x00005567a13af115 in aio_poll (ctx=0x5567a2b58c70, blocking=true) at util/aio-posix.c:639
|
|
|
77c23f |
#11 0x00005567a109acca in iothread_run (opaque=0x5567a2b29760) at iothread.c:75
|
|
|
77c23f |
#12 0x00005567a13b2790 in qemu_thread_start (args=0x5567a2b694c0) at util/qemu-thread-posix.c:519
|
|
|
77c23f |
#13 0x00007ff73eedf2de in start_thread () at /lib64/libpthread.so.0
|
|
|
77c23f |
#14 0x00007ff73ec10e83 in clone () at /lib64/libc.so.6
|
|
|
77c23f |
|
|
|
77c23f |
Thread 1 (Thread 0x7ff743986f00 (LWP 1062500)):
|
|
|
77c23f |
#0 0x00005567a13b99d6 in iov_memset
|
|
|
77c23f |
(iov=0x6563617073206f4e, iov_cnt=1717922848, offset=516096, fillc=0, bytes=7018105756081554803)
|
|
|
77c23f |
at util/iov.c:69
|
|
|
77c23f |
#1 0x00005567a13bab73 in qemu_iovec_memset
|
|
|
77c23f |
(qiov=0x7ff73ec99748, offset=516096, fillc=0, bytes=7018105756081554803) at util/iov.c:530
|
|
|
77c23f |
#2 0x00005567a12f411c in qemu_laio_process_completion (laiocb=0x7ff6512ee6c0) at block/linux-aio.c:86
|
|
|
77c23f |
#3 0x00005567a12f42ff in qemu_laio_process_completions (s=0x7ff7182e8420) at block/linux-aio.c:217
|
|
|
77c23f |
#4 0x00005567a12f480d in ioq_submit (s=0x7ff7182e8420) at block/linux-aio.c:323
|
|
|
77c23f |
#5 0x00005567a12f4a2f in laio_do_submit (fd=19, laiocb=0x7ff5f4ff9ae0, offset=472363008, type=2)
|
|
|
77c23f |
at block/linux-aio.c:375
|
|
|
77c23f |
#6 0x00005567a12f4af2 in laio_co_submit
|
|
|
77c23f |
(bs=0x5567a2b8c460, s=0x7ff7182e8420, fd=19, offset=472363008, qiov=0x7ff5f4ff9ca0, type=2)
|
|
|
77c23f |
at block/linux-aio.c:394
|
|
|
77c23f |
#7 0x00005567a12f1803 in raw_co_prw
|
|
|
77c23f |
(bs=0x5567a2b8c460, offset=472363008, bytes=20480, qiov=0x7ff5f4ff9ca0, type=2)
|
|
|
77c23f |
at block/file-posix.c:1892
|
|
|
77c23f |
#8 0x00005567a12f1941 in raw_co_pwritev
|
|
|
77c23f |
(bs=0x5567a2b8c460, offset=472363008, bytes=20480, qiov=0x7ff5f4ff9ca0, flags=0)
|
|
|
77c23f |
at block/file-posix.c:1925
|
|
|
77c23f |
#9 0x00005567a12fe3e1 in bdrv_driver_pwritev
|
|
|
77c23f |
(bs=0x5567a2b8c460, offset=472363008, bytes=20480, qiov=0x7ff5f4ff9ca0, qiov_offset=0, flags=0)
|
|
|
77c23f |
at block/io.c:1183
|
|
|
77c23f |
#10 0x00005567a1300340 in bdrv_aligned_pwritev
|
|
|
77c23f |
(child=0x5567a2b5b070, req=0x7ff5f4ff9db0, offset=472363008, bytes=20480, align=512, qiov=0x7ff72c0425b8, qiov_offset=0, flags=0) at block/io.c:1980
|
|
|
77c23f |
#11 0x00005567a1300b29 in bdrv_co_pwritev_part
|
|
|
77c23f |
(child=0x5567a2b5b070, offset=472363008, bytes=20480, qiov=0x7ff72c0425b8, qiov_offset=0, flags=0)
|
|
|
77c23f |
at block/io.c:2137
|
|
|
77c23f |
#12 0x00005567a12baba1 in qcow2_co_pwritev_task
|
|
|
77c23f |
(bs=0x5567a2b92740, file_cluster_offset=472317952, offset=487305216, bytes=20480, qiov=0x7ff72c0425b8, qiov_offset=0, l2meta=0x0) at block/qcow2.c:2444
|
|
|
77c23f |
#13 0x00005567a12bacdb in qcow2_co_pwritev_task_entry (task=0x5567a2b48540) at block/qcow2.c:2475
|
|
|
77c23f |
#14 0x00005567a13167d8 in aio_task_co (opaque=0x5567a2b48540) at block/aio_task.c:45
|
|
|
77c23f |
#15 0x00005567a13cf00c in coroutine_trampoline (i0=738245600, i1=32759) at util/coroutine-ucontext.c:115
|
|
|
77c23f |
#16 0x00007ff73eb622e0 in __start_context () at /lib64/libc.so.6
|
|
|
77c23f |
#17 0x00007ff6626f1350 in ()
|
|
|
77c23f |
#18 0x0000000000000000 in ()
|
|
|
77c23f |
<------>
|
|
|
77c23f |
|
|
|
77c23f |
This is also known to cause crashes with this message (assertion
|
|
|
77c23f |
failed):
|
|
|
77c23f |
|
|
|
77c23f |
aio_co_schedule: Co-routine was already scheduled in 'aio_co_schedule'
|
|
|
77c23f |
|
|
|
77c23f |
RHBZ: https://bugzilla.redhat.com/show_bug.cgi?id=1812765
|
|
|
77c23f |
Signed-off-by: Sergio Lopez <slp@redhat.com>
|
|
|
77c23f |
Message-Id: <20200603093240.40489-3-slp@redhat.com>
|
|
|
77c23f |
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
|
|
|
77c23f |
(cherry picked from commit 49b44549ace7890fffdf027fd3695218ee7f1121)
|
|
|
77c23f |
Signed-off-by: Sergio Lopez <slp@redhat.com>
|
|
|
77c23f |
Signed-off-by: Danilo C. L. de Paula <ddepaula@redhat.com>
|
|
|
77c23f |
---
|
|
|
77c23f |
hw/block/dataplane/virtio-blk.c | 8 ++++++++
|
|
|
77c23f |
hw/block/virtio-blk.c | 18 ++++++++++++------
|
|
|
77c23f |
include/hw/virtio/virtio-blk.h | 2 +-
|
|
|
77c23f |
3 files changed, 21 insertions(+), 7 deletions(-)
|
|
|
77c23f |
|
|
|
77c23f |
diff --git a/hw/block/dataplane/virtio-blk.c b/hw/block/dataplane/virtio-blk.c
|
|
|
77c23f |
index 119906a5fe..ac495fd72a 100644
|
|
|
77c23f |
--- a/hw/block/dataplane/virtio-blk.c
|
|
|
77c23f |
+++ b/hw/block/dataplane/virtio-blk.c
|
|
|
77c23f |
@@ -220,6 +220,9 @@ int virtio_blk_data_plane_start(VirtIODevice *vdev)
|
|
|
77c23f |
goto fail_guest_notifiers;
|
|
|
77c23f |
}
|
|
|
77c23f |
|
|
|
77c23f |
+ /* Process queued requests before the ones in vring */
|
|
|
77c23f |
+ virtio_blk_process_queued_requests(vblk, false);
|
|
|
77c23f |
+
|
|
|
77c23f |
/* Kick right away to begin processing requests already in vring */
|
|
|
77c23f |
for (i = 0; i < nvqs; i++) {
|
|
|
77c23f |
VirtQueue *vq = virtio_get_queue(s->vdev, i);
|
|
|
77c23f |
@@ -239,6 +242,11 @@ int virtio_blk_data_plane_start(VirtIODevice *vdev)
|
|
|
77c23f |
return 0;
|
|
|
77c23f |
|
|
|
77c23f |
fail_guest_notifiers:
|
|
|
77c23f |
+ /*
|
|
|
77c23f |
+ * If we failed to set up the guest notifiers queued requests will be
|
|
|
77c23f |
+ * processed on the main context.
|
|
|
77c23f |
+ */
|
|
|
77c23f |
+ virtio_blk_process_queued_requests(vblk, false);
|
|
|
77c23f |
vblk->dataplane_disabled = true;
|
|
|
77c23f |
s->starting = false;
|
|
|
77c23f |
vblk->dataplane_started = true;
|
|
|
77c23f |
diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c
|
|
|
77c23f |
index 6ff29a05d6..493a263fa6 100644
|
|
|
77c23f |
--- a/hw/block/virtio-blk.c
|
|
|
77c23f |
+++ b/hw/block/virtio-blk.c
|
|
|
77c23f |
@@ -819,7 +819,7 @@ static void virtio_blk_handle_output(VirtIODevice *vdev, VirtQueue *vq)
|
|
|
77c23f |
virtio_blk_handle_output_do(s, vq);
|
|
|
77c23f |
}
|
|
|
77c23f |
|
|
|
77c23f |
-void virtio_blk_process_queued_requests(VirtIOBlock *s)
|
|
|
77c23f |
+void virtio_blk_process_queued_requests(VirtIOBlock *s, bool is_bh)
|
|
|
77c23f |
{
|
|
|
77c23f |
VirtIOBlockReq *req = s->rq;
|
|
|
77c23f |
MultiReqBuffer mrb = {};
|
|
|
77c23f |
@@ -847,7 +847,9 @@ void virtio_blk_process_queued_requests(VirtIOBlock *s)
|
|
|
77c23f |
if (mrb.num_reqs) {
|
|
|
77c23f |
virtio_blk_submit_multireq(s->blk, &mrb);
|
|
|
77c23f |
}
|
|
|
77c23f |
- blk_dec_in_flight(s->conf.conf.blk);
|
|
|
77c23f |
+ if (is_bh) {
|
|
|
77c23f |
+ blk_dec_in_flight(s->conf.conf.blk);
|
|
|
77c23f |
+ }
|
|
|
77c23f |
aio_context_release(blk_get_aio_context(s->conf.conf.blk));
|
|
|
77c23f |
}
|
|
|
77c23f |
|
|
|
77c23f |
@@ -858,21 +860,25 @@ static void virtio_blk_dma_restart_bh(void *opaque)
|
|
|
77c23f |
qemu_bh_delete(s->bh);
|
|
|
77c23f |
s->bh = NULL;
|
|
|
77c23f |
|
|
|
77c23f |
- virtio_blk_process_queued_requests(s);
|
|
|
77c23f |
+ virtio_blk_process_queued_requests(s, true);
|
|
|
77c23f |
}
|
|
|
77c23f |
|
|
|
77c23f |
static void virtio_blk_dma_restart_cb(void *opaque, int running,
|
|
|
77c23f |
RunState state)
|
|
|
77c23f |
{
|
|
|
77c23f |
VirtIOBlock *s = opaque;
|
|
|
77c23f |
+ BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(s)));
|
|
|
77c23f |
+ VirtioBusState *bus = VIRTIO_BUS(qbus);
|
|
|
77c23f |
|
|
|
77c23f |
if (!running) {
|
|
|
77c23f |
return;
|
|
|
77c23f |
}
|
|
|
77c23f |
|
|
|
77c23f |
- if (!s->bh) {
|
|
|
77c23f |
- /* FIXME The data plane is not started yet, so these requests are
|
|
|
77c23f |
- * processed in the main thread. */
|
|
|
77c23f |
+ /*
|
|
|
77c23f |
+ * If ioeventfd is enabled, don't schedule the BH here as queued
|
|
|
77c23f |
+ * requests will be processed while starting the data plane.
|
|
|
77c23f |
+ */
|
|
|
77c23f |
+ if (!s->bh && !virtio_bus_ioeventfd_enabled(bus)) {
|
|
|
77c23f |
s->bh = aio_bh_new(blk_get_aio_context(s->conf.conf.blk),
|
|
|
77c23f |
virtio_blk_dma_restart_bh, s);
|
|
|
77c23f |
blk_inc_in_flight(s->conf.conf.blk);
|
|
|
77c23f |
diff --git a/include/hw/virtio/virtio-blk.h b/include/hw/virtio/virtio-blk.h
|
|
|
77c23f |
index cf8eea2f58..e77f0db3b0 100644
|
|
|
77c23f |
--- a/include/hw/virtio/virtio-blk.h
|
|
|
77c23f |
+++ b/include/hw/virtio/virtio-blk.h
|
|
|
77c23f |
@@ -84,6 +84,6 @@ typedef struct MultiReqBuffer {
|
|
|
77c23f |
} MultiReqBuffer;
|
|
|
77c23f |
|
|
|
77c23f |
bool virtio_blk_handle_vq(VirtIOBlock *s, VirtQueue *vq);
|
|
|
77c23f |
-void virtio_blk_process_queued_requests(VirtIOBlock *s);
|
|
|
77c23f |
+void virtio_blk_process_queued_requests(VirtIOBlock *s, bool is_bh);
|
|
|
77c23f |
|
|
|
77c23f |
#endif
|
|
|
77c23f |
--
|
|
|
77c23f |
2.27.0
|
|
|
77c23f |
|