|
|
22c213 |
From 38282d996cde61261211160577b366b83cad8012 Mon Sep 17 00:00:00 2001
|
|
|
22c213 |
From: "Dr. David Alan Gilbert" <dgilbert@redhat.com>
|
|
|
22c213 |
Date: Mon, 27 Jan 2020 19:01:00 +0100
|
|
|
22c213 |
Subject: [PATCH 029/116] virtiofsd: Start queue threads
|
|
|
22c213 |
MIME-Version: 1.0
|
|
|
22c213 |
Content-Type: text/plain; charset=UTF-8
|
|
|
22c213 |
Content-Transfer-Encoding: 8bit
|
|
|
22c213 |
|
|
|
22c213 |
RH-Author: Dr. David Alan Gilbert <dgilbert@redhat.com>
|
|
|
22c213 |
Message-id: <20200127190227.40942-26-dgilbert@redhat.com>
|
|
|
22c213 |
Patchwork-id: 93479
|
|
|
22c213 |
O-Subject: [RHEL-AV-8.2 qemu-kvm PATCH 025/112] virtiofsd: Start queue threads
|
|
|
22c213 |
Bugzilla: 1694164
|
|
|
22c213 |
RH-Acked-by: Philippe Mathieu-Daudé <philmd@redhat.com>
|
|
|
22c213 |
RH-Acked-by: Stefan Hajnoczi <stefanha@redhat.com>
|
|
|
22c213 |
RH-Acked-by: Sergio Lopez Pascual <slp@redhat.com>
|
|
|
22c213 |
|
|
|
22c213 |
From: "Dr. David Alan Gilbert" <dgilbert@redhat.com>
|
|
|
22c213 |
|
|
|
22c213 |
Start a thread for each queue when we get notified it's been started.
|
|
|
22c213 |
|
|
|
22c213 |
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
|
|
|
22c213 |
fix by:
|
|
|
22c213 |
Signed-off-by: Jun Piao <piaojun@huawei.com>
|
|
|
22c213 |
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
|
|
|
22c213 |
Reviewed-by: Daniel P. Berrangé <berrange@redhat.com>
|
|
|
22c213 |
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
|
|
|
22c213 |
(cherry picked from commit e4c55a3c144493b436e40031e2eed61a84eca47b)
|
|
|
22c213 |
|
|
|
22c213 |
Signed-off-by: Miroslav Rezanina <mrezanin@redhat.com>
|
|
|
22c213 |
---
|
|
|
22c213 |
tools/virtiofsd/fuse_virtio.c | 89 +++++++++++++++++++++++++++++++++++++++++++
|
|
|
22c213 |
1 file changed, 89 insertions(+)
|
|
|
22c213 |
|
|
|
22c213 |
diff --git a/tools/virtiofsd/fuse_virtio.c b/tools/virtiofsd/fuse_virtio.c
|
|
|
22c213 |
index 4819e56..2a94bb3 100644
|
|
|
22c213 |
--- a/tools/virtiofsd/fuse_virtio.c
|
|
|
22c213 |
+++ b/tools/virtiofsd/fuse_virtio.c
|
|
|
22c213 |
@@ -11,6 +11,7 @@
|
|
|
22c213 |
* See the file COPYING.LIB
|
|
|
22c213 |
*/
|
|
|
22c213 |
|
|
|
22c213 |
+#include "qemu/osdep.h"
|
|
|
22c213 |
#include "fuse_virtio.h"
|
|
|
22c213 |
#include "fuse_i.h"
|
|
|
22c213 |
#include "standard-headers/linux/fuse.h"
|
|
|
22c213 |
@@ -30,6 +31,15 @@
|
|
|
22c213 |
|
|
|
22c213 |
#include "contrib/libvhost-user/libvhost-user.h"
|
|
|
22c213 |
|
|
|
22c213 |
+struct fv_QueueInfo {
|
|
|
22c213 |
+ pthread_t thread;
|
|
|
22c213 |
+ struct fv_VuDev *virtio_dev;
|
|
|
22c213 |
+
|
|
|
22c213 |
+ /* Our queue index, corresponds to array position */
|
|
|
22c213 |
+ int qidx;
|
|
|
22c213 |
+ int kick_fd;
|
|
|
22c213 |
+};
|
|
|
22c213 |
+
|
|
|
22c213 |
/*
|
|
|
22c213 |
* We pass the dev element into libvhost-user
|
|
|
22c213 |
* and then use it to get back to the outer
|
|
|
22c213 |
@@ -38,6 +48,13 @@
|
|
|
22c213 |
struct fv_VuDev {
|
|
|
22c213 |
VuDev dev;
|
|
|
22c213 |
struct fuse_session *se;
|
|
|
22c213 |
+
|
|
|
22c213 |
+ /*
|
|
|
22c213 |
+ * The following pair of fields are only accessed in the main
|
|
|
22c213 |
+ * virtio_loop
|
|
|
22c213 |
+ */
|
|
|
22c213 |
+ size_t nqueues;
|
|
|
22c213 |
+ struct fv_QueueInfo **qi;
|
|
|
22c213 |
};
|
|
|
22c213 |
|
|
|
22c213 |
/* From spec */
|
|
|
22c213 |
@@ -83,6 +100,75 @@ static void fv_panic(VuDev *dev, const char *err)
|
|
|
22c213 |
exit(EXIT_FAILURE);
|
|
|
22c213 |
}
|
|
|
22c213 |
|
|
|
22c213 |
+static void *fv_queue_thread(void *opaque)
|
|
|
22c213 |
+{
|
|
|
22c213 |
+ struct fv_QueueInfo *qi = opaque;
|
|
|
22c213 |
+ fuse_log(FUSE_LOG_INFO, "%s: Start for queue %d kick_fd %d\n", __func__,
|
|
|
22c213 |
+ qi->qidx, qi->kick_fd);
|
|
|
22c213 |
+ while (1) {
|
|
|
22c213 |
+ /* TODO */
|
|
|
22c213 |
+ }
|
|
|
22c213 |
+
|
|
|
22c213 |
+ return NULL;
|
|
|
22c213 |
+}
|
|
|
22c213 |
+
|
|
|
22c213 |
+/* Callback from libvhost-user on start or stop of a queue */
|
|
|
22c213 |
+static void fv_queue_set_started(VuDev *dev, int qidx, bool started)
|
|
|
22c213 |
+{
|
|
|
22c213 |
+ struct fv_VuDev *vud = container_of(dev, struct fv_VuDev, dev);
|
|
|
22c213 |
+ struct fv_QueueInfo *ourqi;
|
|
|
22c213 |
+
|
|
|
22c213 |
+ fuse_log(FUSE_LOG_INFO, "%s: qidx=%d started=%d\n", __func__, qidx,
|
|
|
22c213 |
+ started);
|
|
|
22c213 |
+ assert(qidx >= 0);
|
|
|
22c213 |
+
|
|
|
22c213 |
+ /*
|
|
|
22c213 |
+ * Ignore additional request queues for now. passthrough_ll.c must be
|
|
|
22c213 |
+ * audited for thread-safety issues first. It was written with a
|
|
|
22c213 |
+ * well-behaved client in mind and may not protect against all types of
|
|
|
22c213 |
+ * races yet.
|
|
|
22c213 |
+ */
|
|
|
22c213 |
+ if (qidx > 1) {
|
|
|
22c213 |
+ fuse_log(FUSE_LOG_ERR,
|
|
|
22c213 |
+ "%s: multiple request queues not yet implemented, please only "
|
|
|
22c213 |
+ "configure 1 request queue\n",
|
|
|
22c213 |
+ __func__);
|
|
|
22c213 |
+ exit(EXIT_FAILURE);
|
|
|
22c213 |
+ }
|
|
|
22c213 |
+
|
|
|
22c213 |
+ if (started) {
|
|
|
22c213 |
+ /* Fire up a thread to watch this queue */
|
|
|
22c213 |
+ if (qidx >= vud->nqueues) {
|
|
|
22c213 |
+ vud->qi = realloc(vud->qi, (qidx + 1) * sizeof(vud->qi[0]));
|
|
|
22c213 |
+ assert(vud->qi);
|
|
|
22c213 |
+ memset(vud->qi + vud->nqueues, 0,
|
|
|
22c213 |
+ sizeof(vud->qi[0]) * (1 + (qidx - vud->nqueues)));
|
|
|
22c213 |
+ vud->nqueues = qidx + 1;
|
|
|
22c213 |
+ }
|
|
|
22c213 |
+ if (!vud->qi[qidx]) {
|
|
|
22c213 |
+ vud->qi[qidx] = calloc(sizeof(struct fv_QueueInfo), 1);
|
|
|
22c213 |
+ assert(vud->qi[qidx]);
|
|
|
22c213 |
+ vud->qi[qidx]->virtio_dev = vud;
|
|
|
22c213 |
+ vud->qi[qidx]->qidx = qidx;
|
|
|
22c213 |
+ } else {
|
|
|
22c213 |
+ /* Shouldn't have been started */
|
|
|
22c213 |
+ assert(vud->qi[qidx]->kick_fd == -1);
|
|
|
22c213 |
+ }
|
|
|
22c213 |
+ ourqi = vud->qi[qidx];
|
|
|
22c213 |
+ ourqi->kick_fd = dev->vq[qidx].kick_fd;
|
|
|
22c213 |
+ if (pthread_create(&ourqi->thread, NULL, fv_queue_thread, ourqi)) {
|
|
|
22c213 |
+ fuse_log(FUSE_LOG_ERR, "%s: Failed to create thread for queue %d\n",
|
|
|
22c213 |
+ __func__, qidx);
|
|
|
22c213 |
+ assert(0);
|
|
|
22c213 |
+ }
|
|
|
22c213 |
+ } else {
|
|
|
22c213 |
+ /* TODO: Kill the thread */
|
|
|
22c213 |
+ assert(qidx < vud->nqueues);
|
|
|
22c213 |
+ ourqi = vud->qi[qidx];
|
|
|
22c213 |
+ ourqi->kick_fd = -1;
|
|
|
22c213 |
+ }
|
|
|
22c213 |
+}
|
|
|
22c213 |
+
|
|
|
22c213 |
static bool fv_queue_order(VuDev *dev, int qidx)
|
|
|
22c213 |
{
|
|
|
22c213 |
return false;
|
|
|
22c213 |
@@ -92,6 +178,9 @@ static const VuDevIface fv_iface = {
|
|
|
22c213 |
.get_features = fv_get_features,
|
|
|
22c213 |
.set_features = fv_set_features,
|
|
|
22c213 |
|
|
|
22c213 |
+ /* Don't need process message, we've not got any at vhost-user level */
|
|
|
22c213 |
+ .queue_set_started = fv_queue_set_started,
|
|
|
22c213 |
+
|
|
|
22c213 |
.queue_is_processed_in_order = fv_queue_order,
|
|
|
22c213 |
};
|
|
|
22c213 |
|
|
|
22c213 |
--
|
|
|
22c213 |
1.8.3.1
|
|
|
22c213 |
|