thebeanogamer / rpms / qemu-kvm

Forked from rpms/qemu-kvm 5 months ago
Clone

Blame SOURCES/kvm-multifd-Implement-zero-copy-write-in-multifd-migrati.patch

4841a6
From 7a7e2191f1ac4114380248cbd3c6ab7425250747 Mon Sep 17 00:00:00 2001
719b13
From: Leonardo Bras <leobras@redhat.com>
719b13
Date: Wed, 18 May 2022 02:52:25 -0300
4841a6
Subject: [PATCH 23/37] multifd: Implement zero copy write in multifd migration
719b13
 (multifd-zero-copy)
719b13
MIME-Version: 1.0
719b13
Content-Type: text/plain; charset=UTF-8
719b13
Content-Transfer-Encoding: 8bit
719b13
719b13
RH-Author: Leonardo Brás <leobras@redhat.com>
4841a6
RH-MergeRequest: 191: MSG_ZEROCOPY + Multifd @ rhel8.7
4841a6
RH-Commit: [23/26] 904ce3909cfef62dd84cc7d3c6a3482e7e6f28e9
4841a6
RH-Bugzilla: 2072049
719b13
RH-Acked-by: Peter Xu <peterx@redhat.com>
4841a6
RH-Acked-by: Daniel P. Berrangé <berrange@redhat.com>
4841a6
RH-Acked-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
719b13
719b13
Implement zero copy send on nocomp_send_write(), by making use of QIOChannel
719b13
writev + flags & flush interface.
719b13
719b13
Change multifd_send_sync_main() so flush_zero_copy() can be called
719b13
after each iteration in order to make sure all dirty pages are sent before
719b13
a new iteration is started. It will also flush at the beginning and at the
719b13
end of migration.
719b13
719b13
Also make it return -1 if flush_zero_copy() fails, in order to cancel
719b13
the migration process, and avoid resuming the guest in the target host
719b13
without receiving all current RAM.
719b13
719b13
This will work fine on RAM migration because the RAM pages are not usually freed,
719b13
and there is no problem on changing the pages content between writev_zero_copy() and
719b13
the actual sending of the buffer, because this change will dirty the page and
719b13
cause it to be re-sent on a next iteration anyway.
719b13
719b13
A lot of locked memory may be needed in order to use multifd migration
719b13
with zero-copy enabled, so disabling the feature should be necessary for
719b13
low-privileged users trying to perform multifd migrations.
719b13
719b13
Signed-off-by: Leonardo Bras <leobras@redhat.com>
719b13
Reviewed-by: Peter Xu <peterx@redhat.com>
719b13
Reviewed-by: Daniel P. Berrangé <berrange@redhat.com>
719b13
Message-Id: <20220513062836.965425-9-leobras@redhat.com>
719b13
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
719b13
(cherry picked from commit 5b1d9bab2da4fca3a3caee97c430e5709cb32b7b)
719b13
Signed-off-by: Leonardo Bras <leobras@redhat.com>
719b13
---
719b13
 migration/migration.c | 11 ++++++++++-
719b13
 migration/multifd.c   | 37 +++++++++++++++++++++++++++++++++++--
719b13
 migration/multifd.h   |  2 ++
719b13
 migration/socket.c    |  5 +++--
719b13
 4 files changed, 50 insertions(+), 5 deletions(-)
719b13
719b13
diff --git a/migration/migration.c b/migration/migration.c
719b13
index 8e28f2ee41..5357efd348 100644
719b13
--- a/migration/migration.c
719b13
+++ b/migration/migration.c
719b13
@@ -1471,7 +1471,16 @@ static bool migrate_params_check(MigrationParameters *params, Error **errp)
719b13
         error_prepend(errp, "Invalid mapping given for block-bitmap-mapping: ");
719b13
         return false;
719b13
     }
719b13
-
719b13
+#ifdef CONFIG_LINUX
719b13
+    if (params->zero_copy_send &&
719b13
+        (!migrate_use_multifd() ||
719b13
+         params->multifd_compression != MULTIFD_COMPRESSION_NONE ||
719b13
+         (params->tls_creds && *params->tls_creds))) {
719b13
+        error_setg(errp,
719b13
+                   "Zero copy only available for non-compressed non-TLS multifd migration");
719b13
+        return false;
719b13
+    }
719b13
+#endif
719b13
     return true;
719b13
 }
719b13
 
719b13
diff --git a/migration/multifd.c b/migration/multifd.c
719b13
index 193f70cdba..90ab4c4346 100644
719b13
--- a/migration/multifd.c
719b13
+++ b/migration/multifd.c
719b13
@@ -576,6 +576,7 @@ void multifd_save_cleanup(void)
719b13
 int multifd_send_sync_main(QEMUFile *f)
719b13
 {
719b13
     int i;
719b13
+    bool flush_zero_copy;
719b13
 
719b13
     if (!migrate_use_multifd()) {
719b13
         return 0;
719b13
@@ -586,6 +587,20 @@ int multifd_send_sync_main(QEMUFile *f)
719b13
             return -1;
719b13
         }
719b13
     }
719b13
+
719b13
+    /*
719b13
+     * When using zero-copy, it's necessary to flush the pages before any of
719b13
+     * the pages can be sent again, so we'll make sure the new version of the
719b13
+     * pages will always arrive _later_ than the old pages.
719b13
+     *
719b13
+     * Currently we achieve this by flushing the zero-page requested writes
719b13
+     * per ram iteration, but in the future we could potentially optimize it
719b13
+     * to be less frequent, e.g. only after we finished one whole scanning of
719b13
+     * all the dirty bitmaps.
719b13
+     */
719b13
+
719b13
+    flush_zero_copy = migrate_use_zero_copy_send();
719b13
+
719b13
     for (i = 0; i < migrate_multifd_channels(); i++) {
719b13
         MultiFDSendParams *p = &multifd_send_state->params[i];
719b13
 
719b13
@@ -607,6 +622,17 @@ int multifd_send_sync_main(QEMUFile *f)
719b13
         ram_counters.transferred += p->packet_len;
719b13
         qemu_mutex_unlock(&p->mutex);
719b13
         qemu_sem_post(&p->sem);
719b13
+
719b13
+        if (flush_zero_copy && p->c) {
719b13
+            int ret;
719b13
+            Error *err = NULL;
719b13
+
719b13
+            ret = qio_channel_flush(p->c, &err;;
719b13
+            if (ret < 0) {
719b13
+                error_report_err(err);
719b13
+                return -1;
719b13
+            }
719b13
+        }
719b13
     }
719b13
     for (i = 0; i < migrate_multifd_channels(); i++) {
719b13
         MultiFDSendParams *p = &multifd_send_state->params[i];
719b13
@@ -691,8 +717,8 @@ static void *multifd_send_thread(void *opaque)
719b13
                 p->iov[0].iov_base = p->packet;
719b13
             }
719b13
 
719b13
-            ret = qio_channel_writev_all(p->c, p->iov, p->iovs_num,
719b13
-                                         &local_err);
719b13
+            ret = qio_channel_writev_full_all(p->c, p->iov, p->iovs_num, NULL,
719b13
+                                              0, p->write_flags, &local_err);
719b13
             if (ret != 0) {
719b13
                 break;
719b13
             }
719b13
@@ -933,6 +959,13 @@ int multifd_save_setup(Error **errp)
719b13
         /* We need one extra place for the packet header */
719b13
         p->iov = g_new0(struct iovec, page_count + 1);
719b13
         p->normal = g_new0(ram_addr_t, page_count);
719b13
+
719b13
+        if (migrate_use_zero_copy_send()) {
719b13
+            p->write_flags = QIO_CHANNEL_WRITE_FLAG_ZERO_COPY;
719b13
+        } else {
719b13
+            p->write_flags = 0;
719b13
+        }
719b13
+
719b13
         socket_send_channel_create(multifd_new_send_channel_async, p);
719b13
     }
719b13
 
719b13
diff --git a/migration/multifd.h b/migration/multifd.h
719b13
index 92de878155..11d5e273e6 100644
719b13
--- a/migration/multifd.h
719b13
+++ b/migration/multifd.h
719b13
@@ -95,6 +95,8 @@ typedef struct {
719b13
     uint32_t packet_len;
719b13
     /* pointer to the packet */
719b13
     MultiFDPacket_t *packet;
719b13
+    /* multifd flags for sending ram */
719b13
+    int write_flags;
719b13
     /* multifd flags for each packet */
719b13
     uint32_t flags;
719b13
     /* size of the next packet that contains pages */
719b13
diff --git a/migration/socket.c b/migration/socket.c
719b13
index 3754d8f72c..4fd5e85f50 100644
719b13
--- a/migration/socket.c
719b13
+++ b/migration/socket.c
719b13
@@ -79,8 +79,9 @@ static void socket_outgoing_migration(QIOTask *task,
719b13
 
719b13
     trace_migration_socket_outgoing_connected(data->hostname);
719b13
 
719b13
-    if (migrate_use_zero_copy_send()) {
719b13
-        error_setg(&err, "Zero copy send not available in migration");
719b13
+    if (migrate_use_zero_copy_send() &&
719b13
+        !qio_channel_has_feature(sioc, QIO_CHANNEL_FEATURE_WRITE_ZERO_COPY)) {
719b13
+        error_setg(&err, "Zero copy send feature not detected in host kernel");
719b13
     }
719b13
 
719b13
 out:
719b13
-- 
719b13
2.35.3
719b13