4841a6
From c9b51d54530c526f14ca0f3b9fc0bfa0b60d45ee Mon Sep 17 00:00:00 2001
719b13
From: Si-Wei Liu <si-wei.liu@oracle.com>
719b13
Date: Fri, 6 May 2022 19:28:18 -0700
4841a6
Subject: [PATCH 20/24] virtio-net: don't handle mq request in userspace
4841a6
 handler for vhost-vdpa
719b13
MIME-Version: 1.0
719b13
Content-Type: text/plain; charset=UTF-8
719b13
Content-Transfer-Encoding: 8bit
719b13
4841a6
RH-Author: Jason Wang <jasowang@redhat.com>
4841a6
RH-MergeRequest: 187: Multiqueue fixes for vhost-vDPA
4841a6
RH-Commit: [7/7] 0e6684d12e42752deae8f5ebc56456fed174e0ed
4841a6
RH-Bugzilla: 2069946
719b13
RH-Acked-by: Eugenio PĂ©rez <eperezma@redhat.com>
4841a6
RH-Acked-by: Cindy Lu <lulu@redhat.com>
4841a6
RH-Acked-by: Laurent Vivier <lvivier@redhat.com>
719b13
719b13
virtio_queue_host_notifier_read() tends to read pending event
719b13
left behind on ioeventfd in the vhost_net_stop() path, and
719b13
attempts to handle outstanding kicks from userspace vq handler.
719b13
However, in the ctrl_vq handler, virtio_net_handle_mq() has a
719b13
recursive call into virtio_net_set_status(), which may lead to
719b13
segmentation fault as shown in below stack trace:
719b13
719b13
0  0x000055f800df1780 in qdev_get_parent_bus (dev=0x0) at ../hw/core/qdev.c:376
719b13
1  0x000055f800c68ad8 in virtio_bus_device_iommu_enabled (vdev=vdev@entry=0x0) at ../hw/virtio/virtio-bus.c:331
719b13
2  0x000055f800d70d7f in vhost_memory_unmap (dev=<optimized out>) at ../hw/virtio/vhost.c:318
719b13
3  0x000055f800d70d7f in vhost_memory_unmap (dev=<optimized out>, buffer=0x7fc19bec5240, len=2052, is_write=1, access_len=2052) at ../hw/virtio/vhost.c:336
719b13
4  0x000055f800d71867 in vhost_virtqueue_stop (dev=dev@entry=0x55f8037ccc30, vdev=vdev@entry=0x55f8044ec590, vq=0x55f8037cceb0, idx=0) at ../hw/virtio/vhost.c:1241
719b13
5  0x000055f800d7406c in vhost_dev_stop (hdev=hdev@entry=0x55f8037ccc30, vdev=vdev@entry=0x55f8044ec590) at ../hw/virtio/vhost.c:1839
719b13
6  0x000055f800bf00a7 in vhost_net_stop_one (net=0x55f8037ccc30, dev=0x55f8044ec590) at ../hw/net/vhost_net.c:315
719b13
7  0x000055f800bf0678 in vhost_net_stop (dev=dev@entry=0x55f8044ec590, ncs=0x55f80452bae0, data_queue_pairs=data_queue_pairs@entry=7, cvq=cvq@entry=1)
719b13
   at ../hw/net/vhost_net.c:423
719b13
8  0x000055f800d4e628 in virtio_net_set_status (status=<optimized out>, n=0x55f8044ec590) at ../hw/net/virtio-net.c:296
719b13
9  0x000055f800d4e628 in virtio_net_set_status (vdev=vdev@entry=0x55f8044ec590, status=15 '\017') at ../hw/net/virtio-net.c:370
719b13
10 0x000055f800d534d8 in virtio_net_handle_ctrl (iov_cnt=<optimized out>, iov=<optimized out>, cmd=0 '\000', n=0x55f8044ec590) at ../hw/net/virtio-net.c:1408
719b13
11 0x000055f800d534d8 in virtio_net_handle_ctrl (vdev=0x55f8044ec590, vq=0x7fc1a7e888d0) at ../hw/net/virtio-net.c:1452
719b13
12 0x000055f800d69f37 in virtio_queue_host_notifier_read (vq=0x7fc1a7e888d0) at ../hw/virtio/virtio.c:2331
719b13
13 0x000055f800d69f37 in virtio_queue_host_notifier_read (n=n@entry=0x7fc1a7e8894c) at ../hw/virtio/virtio.c:3575
719b13
14 0x000055f800c688e6 in virtio_bus_cleanup_host_notifier (bus=<optimized out>, n=n@entry=14) at ../hw/virtio/virtio-bus.c:312
719b13
15 0x000055f800d73106 in vhost_dev_disable_notifiers (hdev=hdev@entry=0x55f8035b51b0, vdev=vdev@entry=0x55f8044ec590)
719b13
   at ../../../include/hw/virtio/virtio-bus.h:35
719b13
16 0x000055f800bf00b2 in vhost_net_stop_one (net=0x55f8035b51b0, dev=0x55f8044ec590) at ../hw/net/vhost_net.c:316
719b13
17 0x000055f800bf0678 in vhost_net_stop (dev=dev@entry=0x55f8044ec590, ncs=0x55f80452bae0, data_queue_pairs=data_queue_pairs@entry=7, cvq=cvq@entry=1)
719b13
   at ../hw/net/vhost_net.c:423
719b13
18 0x000055f800d4e628 in virtio_net_set_status (status=<optimized out>, n=0x55f8044ec590) at ../hw/net/virtio-net.c:296
719b13
19 0x000055f800d4e628 in virtio_net_set_status (vdev=0x55f8044ec590, status=15 '\017') at ../hw/net/virtio-net.c:370
719b13
20 0x000055f800d6c4b2 in virtio_set_status (vdev=0x55f8044ec590, val=<optimized out>) at ../hw/virtio/virtio.c:1945
719b13
21 0x000055f800d11d9d in vm_state_notify (running=running@entry=false, state=state@entry=RUN_STATE_SHUTDOWN) at ../softmmu/runstate.c:333
719b13
22 0x000055f800d04e7a in do_vm_stop (state=state@entry=RUN_STATE_SHUTDOWN, send_stop=send_stop@entry=false) at ../softmmu/cpus.c:262
719b13
23 0x000055f800d04e99 in vm_shutdown () at ../softmmu/cpus.c:280
719b13
24 0x000055f800d126af in qemu_cleanup () at ../softmmu/runstate.c:812
719b13
25 0x000055f800ad5b13 in main (argc=<optimized out>, argv=<optimized out>, envp=<optimized out>) at ../softmmu/main.c:51
719b13
719b13
For now, temporarily disable handling MQ request from the ctrl_vq
719b13
userspace hanlder to avoid the recursive virtio_net_set_status()
719b13
call. Some rework is needed to allow changing the number of
719b13
queues without going through a full virtio_net_set_status cycle,
719b13
particularly for vhost-vdpa backend.
719b13
719b13
This patch will need to be reverted as soon as future patches of
719b13
having the change of #queues handled in userspace is merged.
719b13
719b13
Fixes: 402378407db ("vhost-vdpa: multiqueue support")
719b13
Signed-off-by: Si-Wei Liu <si-wei.liu@oracle.com>
719b13
Acked-by: Jason Wang <jasowang@redhat.com>
719b13
Message-Id: <1651890498-24478-8-git-send-email-si-wei.liu@oracle.com>
719b13
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
719b13
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
719b13
(cherry picked from commit 2a7888cc3aa31faee839fa5dddad354ff8941f4c)
4841a6
Signed-off-by: Jason Wang <jasowang@redhat.com>
719b13
---
719b13
 hw/net/virtio-net.c | 13 +++++++++++++
719b13
 1 file changed, 13 insertions(+)
719b13
719b13
diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
719b13
index f118379bb4..7e172ef829 100644
719b13
--- a/hw/net/virtio-net.c
719b13
+++ b/hw/net/virtio-net.c
719b13
@@ -1373,6 +1373,7 @@ static int virtio_net_handle_mq(VirtIONet *n, uint8_t cmd,
719b13
 {
719b13
     VirtIODevice *vdev = VIRTIO_DEVICE(n);
719b13
     uint16_t queue_pairs;
719b13
+    NetClientState *nc = qemu_get_queue(n->nic);
719b13
 
719b13
     virtio_net_disable_rss(n);
719b13
     if (cmd == VIRTIO_NET_CTRL_MQ_HASH_CONFIG) {
719b13
@@ -1404,6 +1405,18 @@ static int virtio_net_handle_mq(VirtIONet *n, uint8_t cmd,
719b13
         return VIRTIO_NET_ERR;
719b13
     }
719b13
 
719b13
+    /* Avoid changing the number of queue_pairs for vdpa device in
719b13
+     * userspace handler. A future fix is needed to handle the mq
719b13
+     * change in userspace handler with vhost-vdpa. Let's disable
719b13
+     * the mq handling from userspace for now and only allow get
719b13
+     * done through the kernel. Ripples may be seen when falling
719b13
+     * back to userspace, but without doing it qemu process would
719b13
+     * crash on a recursive entry to virtio_net_set_status().
719b13
+     */
719b13
+    if (nc->peer && nc->peer->info->type == NET_CLIENT_DRIVER_VHOST_VDPA) {
719b13
+        return VIRTIO_NET_ERR;
719b13
+    }
719b13
+
719b13
     n->curr_queue_pairs = queue_pairs;
719b13
     /* stop the backend before changing the number of queue_pairs to avoid handling a
719b13
      * disabled queue */
719b13
-- 
4841a6
2.35.3
719b13