thebeanogamer / rpms / qemu-kvm

Forked from rpms/qemu-kvm 5 months ago
Clone

Blame SOURCES/kvm-migration-Make-sure-that-we-don-t-call-write-in-case.patch

902636
From 71b05ab5782aa1e38c016be6264a14f5650d2a87 Mon Sep 17 00:00:00 2001
902636
From: Juan Quintela <quintela@redhat.com>
902636
Date: Tue, 3 Mar 2020 14:51:35 +0000
902636
Subject: [PATCH 03/18] migration: Make sure that we don't call write() in case
902636
 of error
902636
902636
RH-Author: Juan Quintela <quintela@redhat.com>
902636
Message-id: <20200303145143.149290-3-quintela@redhat.com>
902636
Patchwork-id: 94113
902636
O-Subject: [RHEL-AV-8.2.0 qemu-kvm PATCH v2 02/10] migration: Make sure that we don't call write() in case of error
902636
Bugzilla: 1738451
902636
RH-Acked-by: Laurent Vivier <lvivier@redhat.com>
902636
RH-Acked-by: Peter Xu <peterx@redhat.com>
902636
RH-Acked-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
902636
902636
If we are exiting due to an error/finish/.... Just don't try to even
902636
touch the channel with one IO operation.
902636
902636
Signed-off-by: Juan Quintela <quintela@redhat.com>
902636
Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
902636
Signed-off-by: Juan Quintela <quintela@redhat.com>
902636
(cherry picked from commit 4d65a6216bfc44891ac298b74a6921d479805131)
902636
Signed-off-by: Danilo C. L. de Paula <ddepaula@redhat.com>
902636
---
902636
 migration/ram.c | 25 +++++++++++++++++++++++++
902636
 1 file changed, 25 insertions(+)
902636
902636
diff --git a/migration/ram.c b/migration/ram.c
902636
index 65580e3..8c783b3 100644
902636
--- a/migration/ram.c
902636
+++ b/migration/ram.c
902636
@@ -899,6 +899,12 @@ struct {
902636
     uint64_t packet_num;
902636
     /* send channels ready */
902636
     QemuSemaphore channels_ready;
902636
+    /*
902636
+     * Have we already run terminate threads.  There is a race when it
902636
+     * happens that we got one error while we are exiting.
902636
+     * We will use atomic operations.  Only valid values are 0 and 1.
902636
+     */
902636
+    int exiting;
902636
 } *multifd_send_state;
902636
 
902636
 /*
902636
@@ -927,6 +933,10 @@ static int multifd_send_pages(RAMState *rs)
902636
     MultiFDPages_t *pages = multifd_send_state->pages;
902636
     uint64_t transferred;
902636
 
902636
+    if (atomic_read(&multifd_send_state->exiting)) {
902636
+        return -1;
902636
+    }
902636
+
902636
     qemu_sem_wait(&multifd_send_state->channels_ready);
902636
     for (i = next_channel;; i = (i + 1) % migrate_multifd_channels()) {
902636
         p = &multifd_send_state->params[i];
902636
@@ -1008,6 +1018,16 @@ static void multifd_send_terminate_threads(Error *err)
902636
         }
902636
     }
902636
 
902636
+    /*
902636
+     * We don't want to exit each threads twice.  Depending on where
902636
+     * we get the error, or if there are two independent errors in two
902636
+     * threads at the same time, we can end calling this function
902636
+     * twice.
902636
+     */
902636
+    if (atomic_xchg(&multifd_send_state->exiting, 1)) {
902636
+        return;
902636
+    }
902636
+
902636
     for (i = 0; i < migrate_multifd_channels(); i++) {
902636
         MultiFDSendParams *p = &multifd_send_state->params[i];
902636
 
902636
@@ -1117,6 +1137,10 @@ static void *multifd_send_thread(void *opaque)
902636
 
902636
     while (true) {
902636
         qemu_sem_wait(&p->sem);
902636
+
902636
+        if (atomic_read(&multifd_send_state->exiting)) {
902636
+            break;
902636
+        }
902636
         qemu_mutex_lock(&p->mutex);
902636
 
902636
         if (p->pending_job) {
902636
@@ -1225,6 +1249,7 @@ int multifd_save_setup(void)
902636
     multifd_send_state->params = g_new0(MultiFDSendParams, thread_count);
902636
     multifd_send_state->pages = multifd_pages_init(page_count);
902636
     qemu_sem_init(&multifd_send_state->channels_ready, 0);
902636
+    atomic_set(&multifd_send_state->exiting, 0);
902636
 
902636
     for (i = 0; i < thread_count; i++) {
902636
         MultiFDSendParams *p = &multifd_send_state->params[i];
902636
-- 
902636
1.8.3.1
902636