yeahuh / rpms / qemu-kvm

Forked from rpms/qemu-kvm 2 years ago
Clone

Blame SOURCES/kvm-libvhost-user-Fix-some-memtable-remap-cases.patch

22c213
From ee360b70f179cf540faebe7e55b34e323e2bb179 Mon Sep 17 00:00:00 2001
22c213
From: "Dr. David Alan Gilbert" <dgilbert@redhat.com>
22c213
Date: Mon, 27 Jan 2020 19:02:09 +0100
22c213
Subject: [PATCH 098/116] libvhost-user: Fix some memtable remap cases
22c213
MIME-Version: 1.0
22c213
Content-Type: text/plain; charset=UTF-8
22c213
Content-Transfer-Encoding: 8bit
22c213
22c213
RH-Author: Dr. David Alan Gilbert <dgilbert@redhat.com>
22c213
Message-id: <20200127190227.40942-95-dgilbert@redhat.com>
22c213
Patchwork-id: 93548
22c213
O-Subject: [RHEL-AV-8.2 qemu-kvm PATCH 094/112] libvhost-user: Fix some memtable remap cases
22c213
Bugzilla: 1694164
22c213
RH-Acked-by: Philippe Mathieu-Daudé <philmd@redhat.com>
22c213
RH-Acked-by: Stefan Hajnoczi <stefanha@redhat.com>
22c213
RH-Acked-by: Sergio Lopez Pascual <slp@redhat.com>
22c213
22c213
From: "Dr. David Alan Gilbert" <dgilbert@redhat.com>
22c213
22c213
If a new setmemtable command comes in once the vhost threads are
22c213
running, it will remap the guests address space and the threads
22c213
will now be looking in the wrong place.
22c213
22c213
Fortunately we're running this command under lock, so we can
22c213
update the queue mappings so that threads will look in the new-right
22c213
place.
22c213
22c213
Note: This doesn't fix things that the threads might be doing
22c213
without a lock (e.g. a readv/writev!)  That's for another time.
22c213
22c213
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
22c213
(cherry picked from commit 49e9ec749d4db62ae51f76354143cee183912a1d)
22c213
Signed-off-by: Miroslav Rezanina <mrezanin@redhat.com>
22c213
---
22c213
 contrib/libvhost-user/libvhost-user.c | 33 +++++++++++++++++++++++++--------
22c213
 contrib/libvhost-user/libvhost-user.h |  3 +++
22c213
 2 files changed, 28 insertions(+), 8 deletions(-)
22c213
22c213
diff --git a/contrib/libvhost-user/libvhost-user.c b/contrib/libvhost-user/libvhost-user.c
22c213
index 63e4106..b89bf18 100644
22c213
--- a/contrib/libvhost-user/libvhost-user.c
22c213
+++ b/contrib/libvhost-user/libvhost-user.c
22c213
@@ -565,6 +565,21 @@ vu_reset_device_exec(VuDev *dev, VhostUserMsg *vmsg)
22c213
 }
22c213
 
22c213
 static bool
22c213
+map_ring(VuDev *dev, VuVirtq *vq)
22c213
+{
22c213
+    vq->vring.desc = qva_to_va(dev, vq->vra.desc_user_addr);
22c213
+    vq->vring.used = qva_to_va(dev, vq->vra.used_user_addr);
22c213
+    vq->vring.avail = qva_to_va(dev, vq->vra.avail_user_addr);
22c213
+
22c213
+    DPRINT("Setting virtq addresses:\n");
22c213
+    DPRINT("    vring_desc  at %p\n", vq->vring.desc);
22c213
+    DPRINT("    vring_used  at %p\n", vq->vring.used);
22c213
+    DPRINT("    vring_avail at %p\n", vq->vring.avail);
22c213
+
22c213
+    return !(vq->vring.desc && vq->vring.used && vq->vring.avail);
22c213
+}
22c213
+
22c213
+static bool
22c213
 vu_set_mem_table_exec_postcopy(VuDev *dev, VhostUserMsg *vmsg)
22c213
 {
22c213
     int i;
22c213
@@ -767,6 +782,14 @@ vu_set_mem_table_exec(VuDev *dev, VhostUserMsg *vmsg)
22c213
         close(vmsg->fds[i]);
22c213
     }
22c213
 
22c213
+    for (i = 0; i < dev->max_queues; i++) {
22c213
+        if (dev->vq[i].vring.desc) {
22c213
+            if (map_ring(dev, &dev->vq[i])) {
22c213
+                vu_panic(dev, "remaping queue %d during setmemtable", i);
22c213
+            }
22c213
+        }
22c213
+    }
22c213
+
22c213
     return false;
22c213
 }
22c213
 
22c213
@@ -853,18 +876,12 @@ vu_set_vring_addr_exec(VuDev *dev, VhostUserMsg *vmsg)
22c213
     DPRINT("    avail_user_addr:  0x%016" PRIx64 "\n", vra->avail_user_addr);
22c213
     DPRINT("    log_guest_addr:   0x%016" PRIx64 "\n", vra->log_guest_addr);
22c213
 
22c213
+    vq->vra = *vra;
22c213
     vq->vring.flags = vra->flags;
22c213
-    vq->vring.desc = qva_to_va(dev, vra->desc_user_addr);
22c213
-    vq->vring.used = qva_to_va(dev, vra->used_user_addr);
22c213
-    vq->vring.avail = qva_to_va(dev, vra->avail_user_addr);
22c213
     vq->vring.log_guest_addr = vra->log_guest_addr;
22c213
 
22c213
-    DPRINT("Setting virtq addresses:\n");
22c213
-    DPRINT("    vring_desc  at %p\n", vq->vring.desc);
22c213
-    DPRINT("    vring_used  at %p\n", vq->vring.used);
22c213
-    DPRINT("    vring_avail at %p\n", vq->vring.avail);
22c213
 
22c213
-    if (!(vq->vring.desc && vq->vring.used && vq->vring.avail)) {
22c213
+    if (map_ring(dev, vq)) {
22c213
         vu_panic(dev, "Invalid vring_addr message");
22c213
         return false;
22c213
     }
22c213
diff --git a/contrib/libvhost-user/libvhost-user.h b/contrib/libvhost-user/libvhost-user.h
22c213
index 1844b6f..5cb7708 100644
22c213
--- a/contrib/libvhost-user/libvhost-user.h
22c213
+++ b/contrib/libvhost-user/libvhost-user.h
22c213
@@ -327,6 +327,9 @@ typedef struct VuVirtq {
22c213
     int err_fd;
22c213
     unsigned int enable;
22c213
     bool started;
22c213
+
22c213
+    /* Guest addresses of our ring */
22c213
+    struct vhost_vring_addr vra;
22c213
 } VuVirtq;
22c213
 
22c213
 enum VuWatchCondtion {
22c213
-- 
22c213
1.8.3.1
22c213