958e1b
From 600b6421fd97881aad2471ea0a6f465a2d55e9d6 Mon Sep 17 00:00:00 2001
eb5a2f
From: Dr. David Alan Gilbert (git) <dgilbert@redhat.com>
eb5a2f
Date: Thu, 8 May 2014 10:58:39 +0200
958e1b
Subject: [PATCH 04/31] XBZRLE: Fix qemu crash when resize the xbzrle cache
eb5a2f
eb5a2f
RH-Author: Dr. David Alan Gilbert (git) <dgilbert@redhat.com>
eb5a2f
Message-id: <1399546722-6350-2-git-send-email-dgilbert@redhat.com>
eb5a2f
Patchwork-id: 58741
eb5a2f
O-Subject: [RHEL7.1/RHEL7.0.z qemu-kvm PATCH 1/4] XBZRLE: Fix qemu crash when resize the xbzrle cache
958e1b
Bugzilla: 1066338
eb5a2f
RH-Acked-by: Juan Quintela <quintela@redhat.com>
eb5a2f
RH-Acked-by: Markus Armbruster <armbru@redhat.com>
eb5a2f
RH-Acked-by: Amit Shah <amit.shah@redhat.com>
eb5a2f
eb5a2f
From: Gonglei <arei.gonglei@huawei.com>
eb5a2f
eb5a2f
Resizing the xbzrle cache during migration causes qemu-crash,
eb5a2f
because the main-thread and migration-thread modify the xbzrle
eb5a2f
cache size concurrently without lock-protection.
eb5a2f
eb5a2f
Signed-off-by: ChenLiang <chenliang88@huawei.com>
eb5a2f
Signed-off-by: Gonglei <arei.gonglei@huawei.com>
eb5a2f
Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
eb5a2f
Signed-off-by: Juan Quintela <quintela@redhat.com>
eb5a2f
(cherry picked from commit fd8cec932c2ddc687e2da954978954b46a926f90)
eb5a2f
---
eb5a2f
 arch_init.c | 52 +++++++++++++++++++++++++++++++++++++++++++++++++---
eb5a2f
 1 file changed, 49 insertions(+), 3 deletions(-)
eb5a2f
eb5a2f
Signed-off-by: Miroslav Rezanina <mrezanin@redhat.com>
eb5a2f
---
eb5a2f
 arch_init.c |   52 +++++++++++++++++++++++++++++++++++++++++++++++++---
eb5a2f
 1 files changed, 49 insertions(+), 3 deletions(-)
eb5a2f
eb5a2f
diff --git a/arch_init.c b/arch_init.c
eb5a2f
index f5d521a..8641afa 100644
eb5a2f
--- a/arch_init.c
eb5a2f
+++ b/arch_init.c
eb5a2f
@@ -164,8 +164,9 @@ static struct {
eb5a2f
     uint8_t *encoded_buf;
eb5a2f
     /* buffer for storing page content */
eb5a2f
     uint8_t *current_buf;
eb5a2f
-    /* Cache for XBZRLE */
eb5a2f
+    /* Cache for XBZRLE, Protected by lock. */
eb5a2f
     PageCache *cache;
eb5a2f
+    QemuMutex lock;
eb5a2f
 } XBZRLE = {
eb5a2f
     .encoded_buf = NULL,
eb5a2f
     .current_buf = NULL,
eb5a2f
@@ -174,16 +175,52 @@ static struct {
eb5a2f
 /* buffer used for XBZRLE decoding */
eb5a2f
 static uint8_t *xbzrle_decoded_buf;
eb5a2f
 
eb5a2f
+static void XBZRLE_cache_lock(void)
eb5a2f
+{
eb5a2f
+    if (migrate_use_xbzrle())
eb5a2f
+        qemu_mutex_lock(&XBZRLE.lock);
eb5a2f
+}
eb5a2f
+
eb5a2f
+static void XBZRLE_cache_unlock(void)
eb5a2f
+{
eb5a2f
+    if (migrate_use_xbzrle())
eb5a2f
+        qemu_mutex_unlock(&XBZRLE.lock);
eb5a2f
+}
eb5a2f
+
eb5a2f
 int64_t xbzrle_cache_resize(int64_t new_size)
eb5a2f
 {
eb5a2f
+    PageCache *new_cache, *cache_to_free;
eb5a2f
+
eb5a2f
     if (new_size < TARGET_PAGE_SIZE) {
eb5a2f
         return -1;
eb5a2f
     }
eb5a2f
 
eb5a2f
+    /* no need to lock, the current thread holds qemu big lock */
eb5a2f
     if (XBZRLE.cache != NULL) {
eb5a2f
-        return cache_resize(XBZRLE.cache, new_size / TARGET_PAGE_SIZE) *
eb5a2f
-            TARGET_PAGE_SIZE;
eb5a2f
+        /* check XBZRLE.cache again later */
eb5a2f
+        if (pow2floor(new_size) == migrate_xbzrle_cache_size()) {
eb5a2f
+            return pow2floor(new_size);
eb5a2f
+        }
eb5a2f
+        new_cache = cache_init(new_size / TARGET_PAGE_SIZE,
eb5a2f
+                                        TARGET_PAGE_SIZE);
eb5a2f
+        if (!new_cache) {
eb5a2f
+            DPRINTF("Error creating cache\n");
eb5a2f
+            return -1;
eb5a2f
+        }
eb5a2f
+
eb5a2f
+        XBZRLE_cache_lock();
eb5a2f
+        /* the XBZRLE.cache may have be destroyed, check it again */
eb5a2f
+        if (XBZRLE.cache != NULL) {
eb5a2f
+            cache_to_free = XBZRLE.cache;
eb5a2f
+            XBZRLE.cache = new_cache;
eb5a2f
+        } else {
eb5a2f
+            cache_to_free = new_cache;
eb5a2f
+        }
eb5a2f
+        XBZRLE_cache_unlock();
eb5a2f
+
eb5a2f
+        cache_fini(cache_to_free);
eb5a2f
     }
eb5a2f
+
eb5a2f
     return pow2floor(new_size);
eb5a2f
 }
eb5a2f
 
eb5a2f
@@ -539,6 +576,8 @@ static int ram_save_block(QEMUFile *f, bool last_stage)
eb5a2f
             ret = ram_control_save_page(f, block->offset,
eb5a2f
                                offset, TARGET_PAGE_SIZE, &bytes_sent);
eb5a2f
 
eb5a2f
+            XBZRLE_cache_lock();
eb5a2f
+
eb5a2f
             current_addr = block->offset + offset;
eb5a2f
             if (ret != RAM_SAVE_CONTROL_NOT_SUPP) {
eb5a2f
                 if (ret != RAM_SAVE_CONTROL_DELAYED) {
eb5a2f
@@ -587,6 +626,7 @@ static int ram_save_block(QEMUFile *f, bool last_stage)
eb5a2f
                 acct_info.norm_pages++;
eb5a2f
             }
eb5a2f
 
eb5a2f
+            XBZRLE_cache_unlock();
eb5a2f
             /* if page is unmodified, continue to the next */
eb5a2f
             if (bytes_sent > 0) {
eb5a2f
                 last_sent_block = block;
eb5a2f
@@ -654,6 +694,7 @@ static void migration_end(void)
eb5a2f
         migration_bitmap = NULL;
eb5a2f
     }
eb5a2f
 
eb5a2f
+    XBZRLE_cache_lock();
eb5a2f
     if (XBZRLE.cache) {
eb5a2f
         cache_fini(XBZRLE.cache);
eb5a2f
         g_free(XBZRLE.cache);
eb5a2f
@@ -663,6 +704,7 @@ static void migration_end(void)
eb5a2f
         XBZRLE.encoded_buf = NULL;
eb5a2f
         XBZRLE.current_buf = NULL;
eb5a2f
     }
eb5a2f
+    XBZRLE_cache_unlock();
eb5a2f
 }
eb5a2f
 
eb5a2f
 static void ram_migration_cancel(void *opaque)
eb5a2f
@@ -693,13 +735,17 @@ static int ram_save_setup(QEMUFile *f, void *opaque)
eb5a2f
     dirty_rate_high_cnt = 0;
eb5a2f
 
eb5a2f
     if (migrate_use_xbzrle()) {
eb5a2f
+        qemu_mutex_lock_iothread();
eb5a2f
         XBZRLE.cache = cache_init(migrate_xbzrle_cache_size() /
eb5a2f
                                   TARGET_PAGE_SIZE,
eb5a2f
                                   TARGET_PAGE_SIZE);
eb5a2f
         if (!XBZRLE.cache) {
eb5a2f
+            qemu_mutex_unlock_iothread();
eb5a2f
             DPRINTF("Error creating cache\n");
eb5a2f
             return -1;
eb5a2f
         }
eb5a2f
+        qemu_mutex_init(&XBZRLE.lock);
eb5a2f
+        qemu_mutex_unlock_iothread();
eb5a2f
 
eb5a2f
         /* We prefer not to abort if there is no memory */
eb5a2f
         XBZRLE.encoded_buf = g_try_malloc0(TARGET_PAGE_SIZE);
eb5a2f
-- 
eb5a2f
1.7.1
eb5a2f