thebeanogamer / rpms / qemu-kvm

Forked from rpms/qemu-kvm 5 months ago
Clone
7f1c5b
From 0c19fb7c4a22a30830152b224b2e66963f829a7a Mon Sep 17 00:00:00 2001
7f1c5b
From: Greg Kurz <groug@kaod.org>
7f1c5b
Date: Thu, 19 Jan 2023 18:24:24 +0100
7f1c5b
Subject: [PATCH 19/20] Revert "vhost-user: Introduce nested event loop in
7f1c5b
 vhost_user_read()"
7f1c5b
MIME-Version: 1.0
7f1c5b
Content-Type: text/plain; charset=UTF-8
7f1c5b
Content-Transfer-Encoding: 8bit
7f1c5b
7f1c5b
RH-Author: Laurent Vivier <lvivier@redhat.com>
7f1c5b
RH-MergeRequest: 146: Fix vhost-user with dpdk
7f1c5b
RH-Bugzilla: 2155173
7f1c5b
RH-Acked-by: Cindy Lu <lulu@redhat.com>
7f1c5b
RH-Acked-by: Greg Kurz (RH) <gkurz@redhat.com>
7f1c5b
RH-Acked-by: Eugenio PĂ©rez <eperezma@redhat.com>
7f1c5b
RH-Commit: [2/2] 9b67041f92f29f70b7ccb41d8087801e4e4e38af (lvivier/qemu-kvm-centos)
7f1c5b
7f1c5b
This reverts commit a7f523c7d114d445c5d83aecdba3efc038e5a692.
7f1c5b
7f1c5b
The nested event loop is broken by design. It's only user was removed.
7f1c5b
Drop the code as well so that nobody ever tries to use it again.
7f1c5b
7f1c5b
I had to fix a couple of trivial conflicts around return values because
7f1c5b
of 025faa872bcf ("vhost-user: stick to -errno error return convention").
7f1c5b
7f1c5b
Signed-off-by: Greg Kurz <groug@kaod.org>
7f1c5b
Message-Id: <20230119172424.478268-3-groug@kaod.org>
7f1c5b
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
7f1c5b
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
7f1c5b
Acked-by: Maxime Coquelin <maxime.coquelin@redhat.com>
7f1c5b
(cherry picked from commit 4382138f642f69fdbc79ebf4e93d84be8061191f)
7f1c5b
Signed-off-by: Laurent Vivier <lvivier@redhat.com>
7f1c5b
---
7f1c5b
 hw/virtio/vhost-user.c | 65 ++++--------------------------------------
7f1c5b
 1 file changed, 5 insertions(+), 60 deletions(-)
7f1c5b
7f1c5b
diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
7f1c5b
index 0ac00eb901..7cb49c50f9 100644
7f1c5b
--- a/hw/virtio/vhost-user.c
7f1c5b
+++ b/hw/virtio/vhost-user.c
7f1c5b
@@ -305,19 +305,8 @@ static int vhost_user_read_header(struct vhost_dev *dev, VhostUserMsg *msg)
7f1c5b
     return 0;
7f1c5b
 }
7f1c5b
 
7f1c5b
-struct vhost_user_read_cb_data {
7f1c5b
-    struct vhost_dev *dev;
7f1c5b
-    VhostUserMsg *msg;
7f1c5b
-    GMainLoop *loop;
7f1c5b
-    int ret;
7f1c5b
-};
7f1c5b
-
7f1c5b
-static gboolean vhost_user_read_cb(void *do_not_use, GIOCondition condition,
7f1c5b
-                                   gpointer opaque)
7f1c5b
+static int vhost_user_read(struct vhost_dev *dev, VhostUserMsg *msg)
7f1c5b
 {
7f1c5b
-    struct vhost_user_read_cb_data *data = opaque;
7f1c5b
-    struct vhost_dev *dev = data->dev;
7f1c5b
-    VhostUserMsg *msg = data->msg;
7f1c5b
     struct vhost_user *u = dev->opaque;
7f1c5b
     CharBackend *chr = u->user->chr;
7f1c5b
     uint8_t *p = (uint8_t *) msg;
7f1c5b
@@ -325,8 +314,7 @@ static gboolean vhost_user_read_cb(void *do_not_use, GIOCondition condition,
7f1c5b
 
7f1c5b
     r = vhost_user_read_header(dev, msg);
7f1c5b
     if (r < 0) {
7f1c5b
-        data->ret = r;
7f1c5b
-        goto end;
7f1c5b
+        return r;
7f1c5b
     }
7f1c5b
 
7f1c5b
     /* validate message size is sane */
7f1c5b
@@ -334,8 +322,7 @@ static gboolean vhost_user_read_cb(void *do_not_use, GIOCondition condition,
7f1c5b
         error_report("Failed to read msg header."
7f1c5b
                 " Size %d exceeds the maximum %zu.", msg->hdr.size,
7f1c5b
                 VHOST_USER_PAYLOAD_SIZE);
7f1c5b
-        data->ret = -EPROTO;
7f1c5b
-        goto end;
7f1c5b
+        return -EPROTO;
7f1c5b
     }
7f1c5b
 
7f1c5b
     if (msg->hdr.size) {
7f1c5b
@@ -346,53 +333,11 @@ static gboolean vhost_user_read_cb(void *do_not_use, GIOCondition condition,
7f1c5b
             int saved_errno = errno;
7f1c5b
             error_report("Failed to read msg payload."
7f1c5b
                          " Read %d instead of %d.", r, msg->hdr.size);
7f1c5b
-            data->ret = r < 0 ? -saved_errno : -EIO;
7f1c5b
-            goto end;
7f1c5b
+            return r < 0 ? -saved_errno : -EIO;
7f1c5b
         }
7f1c5b
     }
7f1c5b
 
7f1c5b
-end:
7f1c5b
-    g_main_loop_quit(data->loop);
7f1c5b
-    return G_SOURCE_REMOVE;
7f1c5b
-}
7f1c5b
-
7f1c5b
-static int vhost_user_read(struct vhost_dev *dev, VhostUserMsg *msg)
7f1c5b
-{
7f1c5b
-    struct vhost_user *u = dev->opaque;
7f1c5b
-    CharBackend *chr = u->user->chr;
7f1c5b
-    GMainContext *prev_ctxt = chr->chr->gcontext;
7f1c5b
-    GMainContext *ctxt = g_main_context_new();
7f1c5b
-    GMainLoop *loop = g_main_loop_new(ctxt, FALSE);
7f1c5b
-    struct vhost_user_read_cb_data data = {
7f1c5b
-        .dev = dev,
7f1c5b
-        .loop = loop,
7f1c5b
-        .msg = msg,
7f1c5b
-        .ret = 0
7f1c5b
-    };
7f1c5b
-
7f1c5b
-    /*
7f1c5b
-     * We want to be able to monitor the slave channel fd while waiting
7f1c5b
-     * for chr I/O. This requires an event loop, but we can't nest the
7f1c5b
-     * one to which chr is currently attached : its fd handlers might not
7f1c5b
-     * be prepared for re-entrancy. So we create a new one and switch chr
7f1c5b
-     * to use it.
7f1c5b
-     */
7f1c5b
-    qemu_chr_be_update_read_handlers(chr->chr, ctxt);
7f1c5b
-    qemu_chr_fe_add_watch(chr, G_IO_IN | G_IO_HUP, vhost_user_read_cb, &data);
7f1c5b
-
7f1c5b
-    g_main_loop_run(loop);
7f1c5b
-
7f1c5b
-    /*
7f1c5b
-     * Restore the previous event loop context. This also destroys/recreates
7f1c5b
-     * event sources : this guarantees that all pending events in the original
7f1c5b
-     * context that have been processed by the nested loop are purged.
7f1c5b
-     */
7f1c5b
-    qemu_chr_be_update_read_handlers(chr->chr, prev_ctxt);
7f1c5b
-
7f1c5b
-    g_main_loop_unref(loop);
7f1c5b
-    g_main_context_unref(ctxt);
7f1c5b
-
7f1c5b
-    return data.ret;
7f1c5b
+    return 0;
7f1c5b
 }
7f1c5b
 
7f1c5b
 static int process_message_reply(struct vhost_dev *dev,
7f1c5b
-- 
7f1c5b
2.31.1
7f1c5b