thebeanogamer / rpms / qemu-kvm

Forked from rpms/qemu-kvm 5 months ago
Clone
586cba
From c33bc0b7f2b5cfa330a6d89d60ee94de129c65c1 Mon Sep 17 00:00:00 2001
586cba
From: =?UTF-8?q?Eugenio=20P=C3=A9rez?= <eperezma@redhat.com>
586cba
Date: Thu, 21 Jul 2022 16:05:38 +0200
586cba
Subject: [PATCH 23/32] vdpa: manual forward CVQ buffers
586cba
MIME-Version: 1.0
586cba
Content-Type: text/plain; charset=UTF-8
586cba
Content-Transfer-Encoding: 8bit
586cba
586cba
RH-Author: Eugenio Pérez <eperezma@redhat.com>
586cba
RH-MergeRequest: 108: Net Control Virtqueue shadow Support
586cba
RH-Commit: [23/27] ce128d5152be7eebf87e186eb8b58c2ed95aff6d (eperezmartin/qemu-kvm)
586cba
RH-Bugzilla: 1939363
586cba
RH-Acked-by: Stefano Garzarella <sgarzare@redhat.com>
586cba
RH-Acked-by: Cindy Lu <lulu@redhat.com>
586cba
RH-Acked-by: Laurent Vivier <lvivier@redhat.com>
586cba
586cba
Bugzilla: https://bugzilla.redhat.com/1939363
586cba
586cba
Upstream Status: git://git.qemu.org/qemu.git
586cba
586cba
commit bd907ae4b00ebedad5e586af05ea3d6490318d45
586cba
Author: Eugenio Pérez <eperezma@redhat.com>
586cba
Date:   Wed Jul 20 08:59:42 2022 +0200
586cba
586cba
    vdpa: manual forward CVQ buffers
586cba
586cba
    Do a simple forwarding of CVQ buffers, the same work SVQ could do but
586cba
    through callbacks. No functional change intended.
586cba
586cba
    Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
586cba
    Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
586cba
    Signed-off-by: Jason Wang <jasowang@redhat.com>
586cba
586cba
Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
586cba
---
586cba
 hw/virtio/vhost-vdpa.c         |  3 +-
586cba
 include/hw/virtio/vhost-vdpa.h |  3 ++
586cba
 net/vhost-vdpa.c               | 58 ++++++++++++++++++++++++++++++++++
586cba
 3 files changed, 63 insertions(+), 1 deletion(-)
586cba
586cba
diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c
586cba
index 14b02fe079..49effe5462 100644
586cba
--- a/hw/virtio/vhost-vdpa.c
586cba
+++ b/hw/virtio/vhost-vdpa.c
586cba
@@ -417,7 +417,8 @@ static int vhost_vdpa_init_svq(struct vhost_dev *hdev, struct vhost_vdpa *v,
586cba
     for (unsigned n = 0; n < hdev->nvqs; ++n) {
586cba
         g_autoptr(VhostShadowVirtqueue) svq;
586cba
 
586cba
-        svq = vhost_svq_new(v->iova_tree, NULL, NULL);
586cba
+        svq = vhost_svq_new(v->iova_tree, v->shadow_vq_ops,
586cba
+                            v->shadow_vq_ops_opaque);
586cba
         if (unlikely(!svq)) {
586cba
             error_setg(errp, "Cannot create svq %u", n);
586cba
             return -1;
586cba
diff --git a/include/hw/virtio/vhost-vdpa.h b/include/hw/virtio/vhost-vdpa.h
586cba
index 7214eb47dc..1111d85643 100644
586cba
--- a/include/hw/virtio/vhost-vdpa.h
586cba
+++ b/include/hw/virtio/vhost-vdpa.h
586cba
@@ -15,6 +15,7 @@
586cba
 #include <gmodule.h>
586cba
 
586cba
 #include "hw/virtio/vhost-iova-tree.h"
586cba
+#include "hw/virtio/vhost-shadow-virtqueue.h"
586cba
 #include "hw/virtio/virtio.h"
586cba
 #include "standard-headers/linux/vhost_types.h"
586cba
 
586cba
@@ -35,6 +36,8 @@ typedef struct vhost_vdpa {
586cba
     /* IOVA mapping used by the Shadow Virtqueue */
586cba
     VhostIOVATree *iova_tree;
586cba
     GPtrArray *shadow_vqs;
586cba
+    const VhostShadowVirtqueueOps *shadow_vq_ops;
586cba
+    void *shadow_vq_ops_opaque;
586cba
     struct vhost_dev *dev;
586cba
     VhostVDPAHostNotifier notifier[VIRTIO_QUEUE_MAX];
586cba
 } VhostVDPA;
586cba
diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
586cba
index df1e69ee72..2e3b6b10d8 100644
586cba
--- a/net/vhost-vdpa.c
586cba
+++ b/net/vhost-vdpa.c
586cba
@@ -11,11 +11,14 @@
586cba
 
586cba
 #include "qemu/osdep.h"
586cba
 #include "clients.h"
586cba
+#include "hw/virtio/virtio-net.h"
586cba
 #include "net/vhost_net.h"
586cba
 #include "net/vhost-vdpa.h"
586cba
 #include "hw/virtio/vhost-vdpa.h"
586cba
 #include "qemu/config-file.h"
586cba
 #include "qemu/error-report.h"
586cba
+#include "qemu/log.h"
586cba
+#include "qemu/memalign.h"
586cba
 #include "qemu/option.h"
586cba
 #include "qapi/error.h"
586cba
 #include <linux/vhost.h>
586cba
@@ -187,6 +190,57 @@ static NetClientInfo net_vhost_vdpa_info = {
586cba
         .check_peer_type = vhost_vdpa_check_peer_type,
586cba
 };
586cba
 
586cba
+/**
586cba
+ * Forward buffer for the moment.
586cba
+ */
586cba
+static int vhost_vdpa_net_handle_ctrl_avail(VhostShadowVirtqueue *svq,
586cba
+                                            VirtQueueElement *elem,
586cba
+                                            void *opaque)
586cba
+{
586cba
+    unsigned int n = elem->out_num + elem->in_num;
586cba
+    g_autofree struct iovec *dev_buffers = g_new(struct iovec, n);
586cba
+    size_t in_len, dev_written;
586cba
+    virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
586cba
+    int r;
586cba
+
586cba
+    memcpy(dev_buffers, elem->out_sg, elem->out_num);
586cba
+    memcpy(dev_buffers + elem->out_num, elem->in_sg, elem->in_num);
586cba
+
586cba
+    r = vhost_svq_add(svq, &dev_buffers[0], elem->out_num, &dev_buffers[1],
586cba
+                      elem->in_num, elem);
586cba
+    if (unlikely(r != 0)) {
586cba
+        if (unlikely(r == -ENOSPC)) {
586cba
+            qemu_log_mask(LOG_GUEST_ERROR, "%s: No space on device queue\n",
586cba
+                          __func__);
586cba
+        }
586cba
+        goto out;
586cba
+    }
586cba
+
586cba
+    /*
586cba
+     * We can poll here since we've had BQL from the time we sent the
586cba
+     * descriptor. Also, we need to take the answer before SVQ pulls by itself,
586cba
+     * when BQL is released
586cba
+     */
586cba
+    dev_written = vhost_svq_poll(svq);
586cba
+    if (unlikely(dev_written < sizeof(status))) {
586cba
+        error_report("Insufficient written data (%zu)", dev_written);
586cba
+    }
586cba
+
586cba
+out:
586cba
+    in_len = iov_from_buf(elem->in_sg, elem->in_num, 0, &status,
586cba
+                          sizeof(status));
586cba
+    if (unlikely(in_len < sizeof(status))) {
586cba
+        error_report("Bad device CVQ written length");
586cba
+    }
586cba
+    vhost_svq_push_elem(svq, elem, MIN(in_len, sizeof(status)));
586cba
+    g_free(elem);
586cba
+    return r;
586cba
+}
586cba
+
586cba
+static const VhostShadowVirtqueueOps vhost_vdpa_net_svq_ops = {
586cba
+    .avail_handler = vhost_vdpa_net_handle_ctrl_avail,
586cba
+};
586cba
+
586cba
 static NetClientState *net_vhost_vdpa_init(NetClientState *peer,
586cba
                                            const char *device,
586cba
                                            const char *name,
586cba
@@ -211,6 +265,10 @@ static NetClientState *net_vhost_vdpa_init(NetClientState *peer,
586cba
 
586cba
     s->vhost_vdpa.device_fd = vdpa_device_fd;
586cba
     s->vhost_vdpa.index = queue_pair_index;
586cba
+    if (!is_datapath) {
586cba
+        s->vhost_vdpa.shadow_vq_ops = &vhost_vdpa_net_svq_ops;
586cba
+        s->vhost_vdpa.shadow_vq_ops_opaque = s;
586cba
+    }
586cba
     ret = vhost_vdpa_add(nc, (void *)&s->vhost_vdpa, queue_pair_index, nvqs);
586cba
     if (ret) {
586cba
         qemu_del_net_client(nc);
586cba
-- 
586cba
2.31.1
586cba