Blame SOURCES/kvm-migration-move-calling-control_save_page-to-the-comm.patch

ae23c9
From c378bddd4b750773a7e8e4987806d08248bc239d Mon Sep 17 00:00:00 2001
ae23c9
From: "Dr. David Alan Gilbert" <dgilbert@redhat.com>
ae23c9
Date: Wed, 1 Aug 2018 13:55:11 +0100
ae23c9
Subject: [PATCH 07/21] migration: move calling control_save_page to the common
ae23c9
 place
ae23c9
ae23c9
RH-Author: Dr. David Alan Gilbert <dgilbert@redhat.com>
ae23c9
Message-id: <20180801135522.11658-8-dgilbert@redhat.com>
ae23c9
Patchwork-id: 81580
ae23c9
O-Subject: [qemu-kvm RHEL8/virt212 PATCH 07/18] migration: move calling control_save_page to the common place
ae23c9
Bugzilla: 1594384
ae23c9
RH-Acked-by: Peter Xu <peterx@redhat.com>
ae23c9
RH-Acked-by: John Snow <jsnow@redhat.com>
ae23c9
RH-Acked-by: Juan Quintela <quintela@redhat.com>
ae23c9
ae23c9
From: Xiao Guangrong <xiaoguangrong@tencent.com>
ae23c9
ae23c9
The function is called by both ram_save_page and ram_save_target_page,
ae23c9
so move it to the common caller to cleanup the code
ae23c9
ae23c9
Reviewed-by: Peter Xu <peterx@redhat.com>
ae23c9
Signed-off-by: Xiao Guangrong <xiaoguangrong@tencent.com>
ae23c9
Message-Id: <20180330075128.26919-8-xiaoguangrong@tencent.com>
ae23c9
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
ae23c9
(cherry picked from commit a8ec91f941c5f83123796331c09333d3557eb5fc)
ae23c9
Signed-off-by: Danilo C. L. de Paula <ddepaula@redhat.com>
ae23c9
---
ae23c9
 migration/ram.c | 16 ++++++++--------
ae23c9
 1 file changed, 8 insertions(+), 8 deletions(-)
ae23c9
ae23c9
diff --git a/migration/ram.c b/migration/ram.c
ae23c9
index 106fcf1..9d6c41c 100644
ae23c9
--- a/migration/ram.c
ae23c9
+++ b/migration/ram.c
ae23c9
@@ -1038,10 +1038,6 @@ static int ram_save_page(RAMState *rs, PageSearchStatus *pss, bool last_stage)
ae23c9
     p = block->host + offset;
ae23c9
     trace_ram_save_page(block->idstr, (uint64_t)offset, p);
ae23c9
 
ae23c9
-    if (control_save_page(rs, block, offset, &pages)) {
ae23c9
-        return pages;
ae23c9
-    }
ae23c9
-
ae23c9
     XBZRLE_cache_lock();
ae23c9
     pages = save_zero_page(rs, block, offset);
ae23c9
     if (pages > 0) {
ae23c9
@@ -1199,10 +1195,6 @@ static int ram_save_compressed_page(RAMState *rs, PageSearchStatus *pss,
ae23c9
 
ae23c9
     p = block->host + offset;
ae23c9
 
ae23c9
-    if (control_save_page(rs, block, offset, &pages)) {
ae23c9
-        return pages;
ae23c9
-    }
ae23c9
-
ae23c9
     /* When starting the process of a new block, the first page of
ae23c9
      * the block should be sent out before other pages in the same
ae23c9
      * block, and all the pages in last block should have been sent
ae23c9
@@ -1490,6 +1482,14 @@ err:
ae23c9
 static int ram_save_target_page(RAMState *rs, PageSearchStatus *pss,
ae23c9
                                 bool last_stage)
ae23c9
 {
ae23c9
+    RAMBlock *block = pss->block;
ae23c9
+    ram_addr_t offset = pss->page << TARGET_PAGE_BITS;
ae23c9
+    int res;
ae23c9
+
ae23c9
+    if (control_save_page(rs, block, offset, &res)) {
ae23c9
+        return res;
ae23c9
+    }
ae23c9
+
ae23c9
     /*
ae23c9
      * If xbzrle is on, stop using the data compression after first
ae23c9
      * round of migration even if compression is enabled. In theory,
ae23c9
-- 
ae23c9
1.8.3.1
ae23c9