cryptospore / rpms / qemu-kvm

Forked from rpms/qemu-kvm 2 years ago
Clone
Blob Blame History Raw
From ccff61f6316a815aa4a538799e089dec7ce754c5 Mon Sep 17 00:00:00 2001
From: Dr. David Alan Gilbert (git) <dgilbert@redhat.com>
Date: Thu, 27 Feb 2014 14:53:37 +0100
Subject: [PATCH 3/6] Fix two XBZRLE corruption issues

RH-Author: Dr. David Alan Gilbert (git) <dgilbert@redhat.com>
Message-id: <1393512817-21040-1-git-send-email-dgilbert@redhat.com>
Patchwork-id: 57921
O-Subject: [RHEL-7.0 qemu-kvm PATCH 1/1] Fix two XBZRLE corruption issues
Bugzilla: 1063417
RH-Acked-by: Paolo Bonzini <pbonzini@redhat.com>
RH-Acked-by: Juan Quintela <quintela@redhat.com>
RH-Acked-by: Amit Shah <amit.shah@redhat.com>

From: "Dr. David Alan Gilbert" <dgilbert@redhat.com>

Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1063417
Brew: https://brewweb.devel.redhat.com/taskinfo?taskID=7115209
Upstream: 6d3cb1f970ee85361618f7ff02869180394e012d

Push zero'd pages into the XBZRLE cache
    A page that was cached by XBZRLE, zero'd and then XBZRLE'd again
    was being compared against a stale cache value

Don't use 'qemu_put_buffer_async' to put pages from the XBZRLE cache
    Since the cache might change before the data hits the wire

Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
Signed-off-by: Juan Quintela <quintela@redhat.com>
(cherry picked from commit 6d3cb1f970ee85361618f7ff02869180394e012d)
---
 arch_init.c                    | 64 ++++++++++++++++++++++++++++++++----------
 include/migration/page_cache.h |  2 +-
 page_cache.c                   |  2 +-
 3 files changed, 51 insertions(+), 17 deletions(-)

Signed-off-by: Miroslav Rezanina <mrezanin@redhat.com>
---
 arch_init.c                    |   64 ++++++++++++++++++++++++++++++---------
 include/migration/page_cache.h |    2 +-
 page_cache.c                   |    2 +-
 3 files changed, 51 insertions(+), 17 deletions(-)

diff --git a/arch_init.c b/arch_init.c
index 31bf690..f5d521a 100644
--- a/arch_init.c
+++ b/arch_init.c
@@ -122,7 +122,6 @@ static void check_guest_throttling(void);
 #define RAM_SAVE_FLAG_XBZRLE   0x40
 /* 0x80 is reserved in migration.h start with 0x100 next */
 
-
 static struct defconfig_file {
     const char *filename;
     /* Indicates it is an user config file (disabled by -no-user-config) */
@@ -133,6 +132,7 @@ static struct defconfig_file {
     { NULL }, /* end of list */
 };
 
+static const uint8_t ZERO_TARGET_PAGE[TARGET_PAGE_SIZE];
 
 int qemu_read_default_config_files(bool userconfig)
 {
@@ -273,6 +273,34 @@ static size_t save_block_hdr(QEMUFile *f, RAMBlock *block, ram_addr_t offset,
     return size;
 }
 
+/* This is the last block that we have visited serching for dirty pages
+ */
+static RAMBlock *last_seen_block;
+/* This is the last block from where we have sent data */
+static RAMBlock *last_sent_block;
+static ram_addr_t last_offset;
+static unsigned long *migration_bitmap;
+static uint64_t migration_dirty_pages;
+static uint32_t last_version;
+static bool ram_bulk_stage;
+
+/* Update the xbzrle cache to reflect a page that's been sent as all 0.
+ * The important thing is that a stale (not-yet-0'd) page be replaced
+ * by the new data.
+ * As a bonus, if the page wasn't in the cache it gets added so that
+ * when a small write is made into the 0'd page it gets XBZRLE sent
+ */
+static void xbzrle_cache_zero_page(ram_addr_t current_addr)
+{
+    if (ram_bulk_stage || !migrate_use_xbzrle()) {
+        return;
+    }
+
+    /* We don't care if this fails to allocate a new cache page
+     * as long as it updated an old one */
+    cache_insert(XBZRLE.cache, current_addr, ZERO_TARGET_PAGE);
+}
+
 #define ENCODING_FLAG_XBZRLE 0x1
 
 static int save_xbzrle_page(QEMUFile *f, uint8_t *current_data,
@@ -329,18 +357,6 @@ static int save_xbzrle_page(QEMUFile *f, uint8_t *current_data,
     return bytes_sent;
 }
 
-
-/* This is the last block that we have visited serching for dirty pages
- */
-static RAMBlock *last_seen_block;
-/* This is the last block from where we have sent data */
-static RAMBlock *last_sent_block;
-static ram_addr_t last_offset;
-static unsigned long *migration_bitmap;
-static uint64_t migration_dirty_pages;
-static uint32_t last_version;
-static bool ram_bulk_stage;
-
 static inline
 ram_addr_t migration_bitmap_find_and_reset_dirty(MemoryRegion *mr,
                                                  ram_addr_t start)
@@ -512,6 +528,7 @@ static int ram_save_block(QEMUFile *f, bool last_stage)
         } else {
             int ret;
             uint8_t *p;
+            bool send_async = true;
             int cont = (block == last_sent_block) ?
                 RAM_SAVE_FLAG_CONTINUE : 0;
 
@@ -522,6 +539,7 @@ static int ram_save_block(QEMUFile *f, bool last_stage)
             ret = ram_control_save_page(f, block->offset,
                                offset, TARGET_PAGE_SIZE, &bytes_sent);
 
+            current_addr = block->offset + offset;
             if (ret != RAM_SAVE_CONTROL_NOT_SUPP) {
                 if (ret != RAM_SAVE_CONTROL_DELAYED) {
                     if (bytes_sent > 0) {
@@ -536,19 +554,35 @@ static int ram_save_block(QEMUFile *f, bool last_stage)
                                             RAM_SAVE_FLAG_COMPRESS);
                 qemu_put_byte(f, 0);
                 bytes_sent++;
+                /* Must let xbzrle know, otherwise a previous (now 0'd) cached
+                 * page would be stale
+                 */
+                xbzrle_cache_zero_page(current_addr);
             } else if (!ram_bulk_stage && migrate_use_xbzrle()) {
-                current_addr = block->offset + offset;
                 bytes_sent = save_xbzrle_page(f, p, current_addr, block,
                                               offset, cont, last_stage);
                 if (!last_stage) {
+                    /* We must send exactly what's in the xbzrle cache
+                     * even if the page wasn't xbzrle compressed, so that
+                     * it's right next time.
+                     */
                     p = get_cached_data(XBZRLE.cache, current_addr);
+
+                    /* Can't send this cached data async, since the cache page
+                     * might get updated before it gets to the wire
+                     */
+                    send_async = false;
                 }
             }
 
             /* XBZRLE overflow or normal page */
             if (bytes_sent == -1) {
                 bytes_sent = save_block_hdr(f, block, offset, cont, RAM_SAVE_FLAG_PAGE);
-                qemu_put_buffer_async(f, p, TARGET_PAGE_SIZE);
+                if (send_async) {
+                    qemu_put_buffer_async(f, p, TARGET_PAGE_SIZE);
+                } else {
+                    qemu_put_buffer(f, p, TARGET_PAGE_SIZE);
+                }
                 bytes_sent += TARGET_PAGE_SIZE;
                 acct_info.norm_pages++;
             }
diff --git a/include/migration/page_cache.h b/include/migration/page_cache.h
index d156f0d..2d5ce2d 100644
--- a/include/migration/page_cache.h
+++ b/include/migration/page_cache.h
@@ -66,7 +66,7 @@ uint8_t *get_cached_data(const PageCache *cache, uint64_t addr);
  * @addr: page address
  * @pdata: pointer to the page
  */
-int cache_insert(PageCache *cache, uint64_t addr, uint8_t *pdata);
+int cache_insert(PageCache *cache, uint64_t addr, const uint8_t *pdata);
 
 /**
  * cache_resize: resize the page cache. In case of size reduction the extra
diff --git a/page_cache.c b/page_cache.c
index 250772d..5a763f9 100644
--- a/page_cache.c
+++ b/page_cache.c
@@ -151,7 +151,7 @@ uint8_t *get_cached_data(const PageCache *cache, uint64_t addr)
     return cache_get_by_addr(cache, addr)->it_data;
 }
 
-int cache_insert(PageCache *cache, uint64_t addr, uint8_t *pdata)
+int cache_insert(PageCache *cache, uint64_t addr, const uint8_t *pdata)
 {
 
     CacheItem *it = NULL;
-- 
1.7.1