yeahuh / rpms / qemu-kvm

Forked from rpms/qemu-kvm 2 years ago
Clone

Blame SOURCES/kvm-libvhost-user-Fix-some-memtable-remap-cases.patch

902636
From ee360b70f179cf540faebe7e55b34e323e2bb179 Mon Sep 17 00:00:00 2001
902636
From: "Dr. David Alan Gilbert" <dgilbert@redhat.com>
902636
Date: Mon, 27 Jan 2020 19:02:09 +0100
902636
Subject: [PATCH 098/116] libvhost-user: Fix some memtable remap cases
902636
MIME-Version: 1.0
902636
Content-Type: text/plain; charset=UTF-8
902636
Content-Transfer-Encoding: 8bit
902636
902636
RH-Author: Dr. David Alan Gilbert <dgilbert@redhat.com>
902636
Message-id: <20200127190227.40942-95-dgilbert@redhat.com>
902636
Patchwork-id: 93548
902636
O-Subject: [RHEL-AV-8.2 qemu-kvm PATCH 094/112] libvhost-user: Fix some memtable remap cases
902636
Bugzilla: 1694164
902636
RH-Acked-by: Philippe Mathieu-Daudé <philmd@redhat.com>
902636
RH-Acked-by: Stefan Hajnoczi <stefanha@redhat.com>
902636
RH-Acked-by: Sergio Lopez Pascual <slp@redhat.com>
902636
902636
From: "Dr. David Alan Gilbert" <dgilbert@redhat.com>
902636
902636
If a new setmemtable command comes in once the vhost threads are
902636
running, it will remap the guests address space and the threads
902636
will now be looking in the wrong place.
902636
902636
Fortunately we're running this command under lock, so we can
902636
update the queue mappings so that threads will look in the new-right
902636
place.
902636
902636
Note: This doesn't fix things that the threads might be doing
902636
without a lock (e.g. a readv/writev!)  That's for another time.
902636
902636
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
902636
(cherry picked from commit 49e9ec749d4db62ae51f76354143cee183912a1d)
902636
Signed-off-by: Miroslav Rezanina <mrezanin@redhat.com>
902636
---
902636
 contrib/libvhost-user/libvhost-user.c | 33 +++++++++++++++++++++++++--------
902636
 contrib/libvhost-user/libvhost-user.h |  3 +++
902636
 2 files changed, 28 insertions(+), 8 deletions(-)
902636
902636
diff --git a/contrib/libvhost-user/libvhost-user.c b/contrib/libvhost-user/libvhost-user.c
902636
index 63e4106..b89bf18 100644
902636
--- a/contrib/libvhost-user/libvhost-user.c
902636
+++ b/contrib/libvhost-user/libvhost-user.c
902636
@@ -565,6 +565,21 @@ vu_reset_device_exec(VuDev *dev, VhostUserMsg *vmsg)
902636
 }
902636
 
902636
 static bool
902636
+map_ring(VuDev *dev, VuVirtq *vq)
902636
+{
902636
+    vq->vring.desc = qva_to_va(dev, vq->vra.desc_user_addr);
902636
+    vq->vring.used = qva_to_va(dev, vq->vra.used_user_addr);
902636
+    vq->vring.avail = qva_to_va(dev, vq->vra.avail_user_addr);
902636
+
902636
+    DPRINT("Setting virtq addresses:\n");
902636
+    DPRINT("    vring_desc  at %p\n", vq->vring.desc);
902636
+    DPRINT("    vring_used  at %p\n", vq->vring.used);
902636
+    DPRINT("    vring_avail at %p\n", vq->vring.avail);
902636
+
902636
+    return !(vq->vring.desc && vq->vring.used && vq->vring.avail);
902636
+}
902636
+
902636
+static bool
902636
 vu_set_mem_table_exec_postcopy(VuDev *dev, VhostUserMsg *vmsg)
902636
 {
902636
     int i;
902636
@@ -767,6 +782,14 @@ vu_set_mem_table_exec(VuDev *dev, VhostUserMsg *vmsg)
902636
         close(vmsg->fds[i]);
902636
     }
902636
 
902636
+    for (i = 0; i < dev->max_queues; i++) {
902636
+        if (dev->vq[i].vring.desc) {
902636
+            if (map_ring(dev, &dev->vq[i])) {
902636
+                vu_panic(dev, "remaping queue %d during setmemtable", i);
902636
+            }
902636
+        }
902636
+    }
902636
+
902636
     return false;
902636
 }
902636
 
902636
@@ -853,18 +876,12 @@ vu_set_vring_addr_exec(VuDev *dev, VhostUserMsg *vmsg)
902636
     DPRINT("    avail_user_addr:  0x%016" PRIx64 "\n", vra->avail_user_addr);
902636
     DPRINT("    log_guest_addr:   0x%016" PRIx64 "\n", vra->log_guest_addr);
902636
 
902636
+    vq->vra = *vra;
902636
     vq->vring.flags = vra->flags;
902636
-    vq->vring.desc = qva_to_va(dev, vra->desc_user_addr);
902636
-    vq->vring.used = qva_to_va(dev, vra->used_user_addr);
902636
-    vq->vring.avail = qva_to_va(dev, vra->avail_user_addr);
902636
     vq->vring.log_guest_addr = vra->log_guest_addr;
902636
 
902636
-    DPRINT("Setting virtq addresses:\n");
902636
-    DPRINT("    vring_desc  at %p\n", vq->vring.desc);
902636
-    DPRINT("    vring_used  at %p\n", vq->vring.used);
902636
-    DPRINT("    vring_avail at %p\n", vq->vring.avail);
902636
 
902636
-    if (!(vq->vring.desc && vq->vring.used && vq->vring.avail)) {
902636
+    if (map_ring(dev, vq)) {
902636
         vu_panic(dev, "Invalid vring_addr message");
902636
         return false;
902636
     }
902636
diff --git a/contrib/libvhost-user/libvhost-user.h b/contrib/libvhost-user/libvhost-user.h
902636
index 1844b6f..5cb7708 100644
902636
--- a/contrib/libvhost-user/libvhost-user.h
902636
+++ b/contrib/libvhost-user/libvhost-user.h
902636
@@ -327,6 +327,9 @@ typedef struct VuVirtq {
902636
     int err_fd;
902636
     unsigned int enable;
902636
     bool started;
902636
+
902636
+    /* Guest addresses of our ring */
902636
+    struct vhost_vring_addr vra;
902636
 } VuVirtq;
902636
 
902636
 enum VuWatchCondtion {
902636
-- 
902636
1.8.3.1
902636