|
|
26ba25 |
From 35790a371aa43b6cc357bc78ee07dd20db16ec4b Mon Sep 17 00:00:00 2001
|
|
|
26ba25 |
From: "Dr. David Alan Gilbert" <dgilbert@redhat.com>
|
|
|
26ba25 |
Date: Wed, 1 Aug 2018 13:55:10 +0100
|
|
|
26ba25 |
Subject: [PATCH 06/21] migration: move some code to ram_save_host_page
|
|
|
26ba25 |
|
|
|
26ba25 |
RH-Author: Dr. David Alan Gilbert <dgilbert@redhat.com>
|
|
|
26ba25 |
Message-id: <20180801135522.11658-7-dgilbert@redhat.com>
|
|
|
26ba25 |
Patchwork-id: 81570
|
|
|
26ba25 |
O-Subject: [qemu-kvm RHEL8/virt212 PATCH 06/18] migration: move some code to ram_save_host_page
|
|
|
26ba25 |
Bugzilla: 1594384
|
|
|
26ba25 |
RH-Acked-by: Peter Xu <peterx@redhat.com>
|
|
|
26ba25 |
RH-Acked-by: John Snow <jsnow@redhat.com>
|
|
|
26ba25 |
RH-Acked-by: Juan Quintela <quintela@redhat.com>
|
|
|
26ba25 |
|
|
|
26ba25 |
From: Xiao Guangrong <xiaoguangrong@tencent.com>
|
|
|
26ba25 |
|
|
|
26ba25 |
Move some code from ram_save_target_page() to ram_save_host_page()
|
|
|
26ba25 |
to make it be more readable for latter patches that dramatically
|
|
|
26ba25 |
clean ram_save_target_page() up
|
|
|
26ba25 |
|
|
|
26ba25 |
Reviewed-by: Peter Xu <peterx@redhat.com>
|
|
|
26ba25 |
Signed-off-by: Xiao Guangrong <xiaoguangrong@tencent.com>
|
|
|
26ba25 |
Message-Id: <20180330075128.26919-7-xiaoguangrong@tencent.com>
|
|
|
26ba25 |
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
|
|
|
26ba25 |
(cherry picked from commit 1faa5665c0f1df2eff291454a3a85625a3bc93dd)
|
|
|
26ba25 |
Signed-off-by: Danilo C. L. de Paula <ddepaula@redhat.com>
|
|
|
26ba25 |
---
|
|
|
26ba25 |
migration/ram.c | 43 +++++++++++++++++++------------------------
|
|
|
26ba25 |
1 file changed, 19 insertions(+), 24 deletions(-)
|
|
|
26ba25 |
|
|
|
26ba25 |
diff --git a/migration/ram.c b/migration/ram.c
|
|
|
26ba25 |
index 8dc98a5..106fcf1 100644
|
|
|
26ba25 |
--- a/migration/ram.c
|
|
|
26ba25 |
+++ b/migration/ram.c
|
|
|
26ba25 |
@@ -1484,38 +1484,23 @@ err:
|
|
|
26ba25 |
* Returns the number of pages written
|
|
|
26ba25 |
*
|
|
|
26ba25 |
* @rs: current RAM state
|
|
|
26ba25 |
- * @ms: current migration state
|
|
|
26ba25 |
* @pss: data about the page we want to send
|
|
|
26ba25 |
* @last_stage: if we are at the completion stage
|
|
|
26ba25 |
*/
|
|
|
26ba25 |
static int ram_save_target_page(RAMState *rs, PageSearchStatus *pss,
|
|
|
26ba25 |
bool last_stage)
|
|
|
26ba25 |
{
|
|
|
26ba25 |
- int res = 0;
|
|
|
26ba25 |
-
|
|
|
26ba25 |
- /* Check the pages is dirty and if it is send it */
|
|
|
26ba25 |
- if (migration_bitmap_clear_dirty(rs, pss->block, pss->page)) {
|
|
|
26ba25 |
- /*
|
|
|
26ba25 |
- * If xbzrle is on, stop using the data compression after first
|
|
|
26ba25 |
- * round of migration even if compression is enabled. In theory,
|
|
|
26ba25 |
- * xbzrle can do better than compression.
|
|
|
26ba25 |
- */
|
|
|
26ba25 |
- if (migrate_use_compression() &&
|
|
|
26ba25 |
- (rs->ram_bulk_stage || !migrate_use_xbzrle())) {
|
|
|
26ba25 |
- res = ram_save_compressed_page(rs, pss, last_stage);
|
|
|
26ba25 |
- } else {
|
|
|
26ba25 |
- res = ram_save_page(rs, pss, last_stage);
|
|
|
26ba25 |
- }
|
|
|
26ba25 |
-
|
|
|
26ba25 |
- if (res < 0) {
|
|
|
26ba25 |
- return res;
|
|
|
26ba25 |
- }
|
|
|
26ba25 |
- if (pss->block->unsentmap) {
|
|
|
26ba25 |
- clear_bit(pss->page, pss->block->unsentmap);
|
|
|
26ba25 |
- }
|
|
|
26ba25 |
+ /*
|
|
|
26ba25 |
+ * If xbzrle is on, stop using the data compression after first
|
|
|
26ba25 |
+ * round of migration even if compression is enabled. In theory,
|
|
|
26ba25 |
+ * xbzrle can do better than compression.
|
|
|
26ba25 |
+ */
|
|
|
26ba25 |
+ if (migrate_use_compression() &&
|
|
|
26ba25 |
+ (rs->ram_bulk_stage || !migrate_use_xbzrle())) {
|
|
|
26ba25 |
+ return ram_save_compressed_page(rs, pss, last_stage);
|
|
|
26ba25 |
}
|
|
|
26ba25 |
|
|
|
26ba25 |
- return res;
|
|
|
26ba25 |
+ return ram_save_page(rs, pss, last_stage);
|
|
|
26ba25 |
}
|
|
|
26ba25 |
|
|
|
26ba25 |
/**
|
|
|
26ba25 |
@@ -1544,12 +1529,22 @@ static int ram_save_host_page(RAMState *rs, PageSearchStatus *pss,
|
|
|
26ba25 |
qemu_ram_pagesize(pss->block) >> TARGET_PAGE_BITS;
|
|
|
26ba25 |
|
|
|
26ba25 |
do {
|
|
|
26ba25 |
+ /* Check the pages is dirty and if it is send it */
|
|
|
26ba25 |
+ if (!migration_bitmap_clear_dirty(rs, pss->block, pss->page)) {
|
|
|
26ba25 |
+ pss->page++;
|
|
|
26ba25 |
+ continue;
|
|
|
26ba25 |
+ }
|
|
|
26ba25 |
+
|
|
|
26ba25 |
tmppages = ram_save_target_page(rs, pss, last_stage);
|
|
|
26ba25 |
if (tmppages < 0) {
|
|
|
26ba25 |
return tmppages;
|
|
|
26ba25 |
}
|
|
|
26ba25 |
|
|
|
26ba25 |
pages += tmppages;
|
|
|
26ba25 |
+ if (pss->block->unsentmap) {
|
|
|
26ba25 |
+ clear_bit(pss->page, pss->block->unsentmap);
|
|
|
26ba25 |
+ }
|
|
|
26ba25 |
+
|
|
|
26ba25 |
pss->page++;
|
|
|
26ba25 |
} while ((pss->page & (pagesize_bits - 1)) &&
|
|
|
26ba25 |
offset_in_ramblock(pss->block, pss->page << TARGET_PAGE_BITS));
|
|
|
26ba25 |
--
|
|
|
26ba25 |
1.8.3.1
|
|
|
26ba25 |
|