yeahuh / rpms / qemu-kvm

Forked from rpms/qemu-kvm 2 years ago
Clone

Blame SOURCES/kvm-migration-move-some-code-to-ram_save_host_page.patch

ae23c9
From 35790a371aa43b6cc357bc78ee07dd20db16ec4b Mon Sep 17 00:00:00 2001
ae23c9
From: "Dr. David Alan Gilbert" <dgilbert@redhat.com>
ae23c9
Date: Wed, 1 Aug 2018 13:55:10 +0100
ae23c9
Subject: [PATCH 06/21] migration: move some code to ram_save_host_page
ae23c9
ae23c9
RH-Author: Dr. David Alan Gilbert <dgilbert@redhat.com>
ae23c9
Message-id: <20180801135522.11658-7-dgilbert@redhat.com>
ae23c9
Patchwork-id: 81570
ae23c9
O-Subject: [qemu-kvm RHEL8/virt212 PATCH 06/18] migration: move some code to ram_save_host_page
ae23c9
Bugzilla: 1594384
ae23c9
RH-Acked-by: Peter Xu <peterx@redhat.com>
ae23c9
RH-Acked-by: John Snow <jsnow@redhat.com>
ae23c9
RH-Acked-by: Juan Quintela <quintela@redhat.com>
ae23c9
ae23c9
From: Xiao Guangrong <xiaoguangrong@tencent.com>
ae23c9
ae23c9
Move some code from ram_save_target_page() to ram_save_host_page()
ae23c9
to make it be more readable for latter patches that dramatically
ae23c9
clean ram_save_target_page() up
ae23c9
ae23c9
Reviewed-by: Peter Xu <peterx@redhat.com>
ae23c9
Signed-off-by: Xiao Guangrong <xiaoguangrong@tencent.com>
ae23c9
Message-Id: <20180330075128.26919-7-xiaoguangrong@tencent.com>
ae23c9
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
ae23c9
(cherry picked from commit 1faa5665c0f1df2eff291454a3a85625a3bc93dd)
ae23c9
Signed-off-by: Danilo C. L. de Paula <ddepaula@redhat.com>
ae23c9
---
ae23c9
 migration/ram.c | 43 +++++++++++++++++++------------------------
ae23c9
 1 file changed, 19 insertions(+), 24 deletions(-)
ae23c9
ae23c9
diff --git a/migration/ram.c b/migration/ram.c
ae23c9
index 8dc98a5..106fcf1 100644
ae23c9
--- a/migration/ram.c
ae23c9
+++ b/migration/ram.c
ae23c9
@@ -1484,38 +1484,23 @@ err:
ae23c9
  * Returns the number of pages written
ae23c9
  *
ae23c9
  * @rs: current RAM state
ae23c9
- * @ms: current migration state
ae23c9
  * @pss: data about the page we want to send
ae23c9
  * @last_stage: if we are at the completion stage
ae23c9
  */
ae23c9
 static int ram_save_target_page(RAMState *rs, PageSearchStatus *pss,
ae23c9
                                 bool last_stage)
ae23c9
 {
ae23c9
-    int res = 0;
ae23c9
-
ae23c9
-    /* Check the pages is dirty and if it is send it */
ae23c9
-    if (migration_bitmap_clear_dirty(rs, pss->block, pss->page)) {
ae23c9
-        /*
ae23c9
-         * If xbzrle is on, stop using the data compression after first
ae23c9
-         * round of migration even if compression is enabled. In theory,
ae23c9
-         * xbzrle can do better than compression.
ae23c9
-         */
ae23c9
-        if (migrate_use_compression() &&
ae23c9
-            (rs->ram_bulk_stage || !migrate_use_xbzrle())) {
ae23c9
-            res = ram_save_compressed_page(rs, pss, last_stage);
ae23c9
-        } else {
ae23c9
-            res = ram_save_page(rs, pss, last_stage);
ae23c9
-        }
ae23c9
-
ae23c9
-        if (res < 0) {
ae23c9
-            return res;
ae23c9
-        }
ae23c9
-        if (pss->block->unsentmap) {
ae23c9
-            clear_bit(pss->page, pss->block->unsentmap);
ae23c9
-        }
ae23c9
+    /*
ae23c9
+     * If xbzrle is on, stop using the data compression after first
ae23c9
+     * round of migration even if compression is enabled. In theory,
ae23c9
+     * xbzrle can do better than compression.
ae23c9
+     */
ae23c9
+    if (migrate_use_compression() &&
ae23c9
+        (rs->ram_bulk_stage || !migrate_use_xbzrle())) {
ae23c9
+        return ram_save_compressed_page(rs, pss, last_stage);
ae23c9
     }
ae23c9
 
ae23c9
-    return res;
ae23c9
+    return ram_save_page(rs, pss, last_stage);
ae23c9
 }
ae23c9
 
ae23c9
 /**
ae23c9
@@ -1544,12 +1529,22 @@ static int ram_save_host_page(RAMState *rs, PageSearchStatus *pss,
ae23c9
         qemu_ram_pagesize(pss->block) >> TARGET_PAGE_BITS;
ae23c9
 
ae23c9
     do {
ae23c9
+        /* Check the pages is dirty and if it is send it */
ae23c9
+        if (!migration_bitmap_clear_dirty(rs, pss->block, pss->page)) {
ae23c9
+            pss->page++;
ae23c9
+            continue;
ae23c9
+        }
ae23c9
+
ae23c9
         tmppages = ram_save_target_page(rs, pss, last_stage);
ae23c9
         if (tmppages < 0) {
ae23c9
             return tmppages;
ae23c9
         }
ae23c9
 
ae23c9
         pages += tmppages;
ae23c9
+        if (pss->block->unsentmap) {
ae23c9
+            clear_bit(pss->page, pss->block->unsentmap);
ae23c9
+        }
ae23c9
+
ae23c9
         pss->page++;
ae23c9
     } while ((pss->page & (pagesize_bits - 1)) &&
ae23c9
              offset_in_ramblock(pss->block, pss->page << TARGET_PAGE_BITS));
ae23c9
-- 
ae23c9
1.8.3.1
ae23c9