|
|
357786 |
From 0319afed69c1b3206fce51fd286c3351c6fd6958 Mon Sep 17 00:00:00 2001
|
|
|
357786 |
From: "plai@redhat.com" <plai@redhat.com>
|
|
|
357786 |
Date: Fri, 31 Aug 2018 16:25:51 +0200
|
|
|
357786 |
Subject: [PATCH 09/29] migration: discard non-migratable RAMBlocks
|
|
|
357786 |
MIME-Version: 1.0
|
|
|
357786 |
Content-Type: text/plain; charset=UTF-8
|
|
|
357786 |
Content-Transfer-Encoding: 8bit
|
|
|
357786 |
|
|
|
357786 |
RH-Author: plai@redhat.com
|
|
|
357786 |
Message-id: <1535732759-22481-2-git-send-email-plai@redhat.com>
|
|
|
357786 |
Patchwork-id: 82012
|
|
|
357786 |
O-Subject: [RHEL7.6 PATCH BZ 1539280 1/9] migration: discard non-migratable RAMBlocks
|
|
|
357786 |
Bugzilla: 1539280
|
|
|
357786 |
RH-Acked-by: Michael S. Tsirkin <mst@redhat.com>
|
|
|
357786 |
RH-Acked-by: Pankaj Gupta <pagupta@redhat.com>
|
|
|
357786 |
RH-Acked-by: Miroslav Rezanina <mrezanin@redhat.com>
|
|
|
357786 |
|
|
|
357786 |
From: Cédric Le Goater <clg@kaod.org>
|
|
|
357786 |
|
|
|
357786 |
On the POWER9 processor, the XIVE interrupt controller can control
|
|
|
357786 |
interrupt sources using MMIO to trigger events, to EOI or to turn off
|
|
|
357786 |
the sources. Priority management and interrupt acknowledgment is also
|
|
|
357786 |
controlled by MMIO in the presenter sub-engine.
|
|
|
357786 |
|
|
|
357786 |
These MMIO regions are exposed to guests in QEMU with a set of 'ram
|
|
|
357786 |
device' memory mappings, similarly to VFIO, and the VMAs are populated
|
|
|
357786 |
dynamically with the appropriate pages using a fault handler.
|
|
|
357786 |
|
|
|
357786 |
But, these regions are an issue for migration. We need to discard the
|
|
|
357786 |
associated RAMBlocks from the RAM state on the source VM and let the
|
|
|
357786 |
destination VM rebuild the memory mappings on the new host in the
|
|
|
357786 |
post_load() operation just before resuming the system.
|
|
|
357786 |
|
|
|
357786 |
To achieve this goal, the following introduces a new RAMBlock flag
|
|
|
357786 |
RAM_MIGRATABLE which is updated in the vmstate_register_ram() and
|
|
|
357786 |
vmstate_unregister_ram() routines. This flag is then used by the
|
|
|
357786 |
migration to identify RAMBlocks to discard on the source. Some checks
|
|
|
357786 |
are also performed on the destination to make sure nothing invalid was
|
|
|
357786 |
sent.
|
|
|
357786 |
|
|
|
357786 |
This change impacts the boston, malta and jazz mips boards for which
|
|
|
357786 |
migration compatibility is broken.
|
|
|
357786 |
|
|
|
357786 |
Signed-off-by: Cédric Le Goater <clg@kaod.org>
|
|
|
357786 |
Reviewed-by: Juan Quintela <quintela@redhat.com>
|
|
|
357786 |
Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
|
|
|
357786 |
Signed-off-by: Juan Quintela <quintela@redhat.com>
|
|
|
357786 |
(cherry picked from commit b895de502717b83b4e5f089df617cb23530c4d2d)
|
|
|
357786 |
Signed-off-by: Paul Lai <plai@redhat.com>
|
|
|
357786 |
Signed-off-by: Miroslav Rezanina <mrezanin@redhat.com>
|
|
|
357786 |
---
|
|
|
357786 |
exec.c | 38 ++++++++++++++++++++++++++++++++++++++
|
|
|
357786 |
include/exec/cpu-common.h | 4 ++++
|
|
|
357786 |
migration/postcopy-ram.c | 12 ++++++------
|
|
|
357786 |
migration/ram.c | 46 ++++++++++++++++++++++++++++++++++------------
|
|
|
357786 |
migration/savevm.c | 2 ++
|
|
|
357786 |
5 files changed, 84 insertions(+), 18 deletions(-)
|
|
|
357786 |
|
|
|
357786 |
diff --git a/exec.c b/exec.c
|
|
|
357786 |
index 02b1efe..7323d39 100644
|
|
|
357786 |
--- a/exec.c
|
|
|
357786 |
+++ b/exec.c
|
|
|
357786 |
@@ -104,6 +104,9 @@ static MemoryRegion io_mem_unassigned;
|
|
|
357786 |
* (Set during postcopy)
|
|
|
357786 |
*/
|
|
|
357786 |
#define RAM_UF_ZEROPAGE (1 << 3)
|
|
|
357786 |
+
|
|
|
357786 |
+/* RAM can be migrated */
|
|
|
357786 |
+#define RAM_MIGRATABLE (1 << 4)
|
|
|
357786 |
#endif
|
|
|
357786 |
|
|
|
357786 |
#ifdef TARGET_PAGE_BITS_VARY
|
|
|
357786 |
@@ -1807,6 +1810,21 @@ void qemu_ram_set_uf_zeroable(RAMBlock *rb)
|
|
|
357786 |
rb->flags |= RAM_UF_ZEROPAGE;
|
|
|
357786 |
}
|
|
|
357786 |
|
|
|
357786 |
+bool qemu_ram_is_migratable(RAMBlock *rb)
|
|
|
357786 |
+{
|
|
|
357786 |
+ return rb->flags & RAM_MIGRATABLE;
|
|
|
357786 |
+}
|
|
|
357786 |
+
|
|
|
357786 |
+void qemu_ram_set_migratable(RAMBlock *rb)
|
|
|
357786 |
+{
|
|
|
357786 |
+ rb->flags |= RAM_MIGRATABLE;
|
|
|
357786 |
+}
|
|
|
357786 |
+
|
|
|
357786 |
+void qemu_ram_unset_migratable(RAMBlock *rb)
|
|
|
357786 |
+{
|
|
|
357786 |
+ rb->flags &= ~RAM_MIGRATABLE;
|
|
|
357786 |
+}
|
|
|
357786 |
+
|
|
|
357786 |
/* Called with iothread lock held. */
|
|
|
357786 |
void qemu_ram_set_idstr(RAMBlock *new_block, const char *name, DeviceState *dev)
|
|
|
357786 |
{
|
|
|
357786 |
@@ -3750,6 +3768,26 @@ int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
|
|
|
357786 |
return ret;
|
|
|
357786 |
}
|
|
|
357786 |
|
|
|
357786 |
+int qemu_ram_foreach_migratable_block(RAMBlockIterFunc func, void *opaque)
|
|
|
357786 |
+{
|
|
|
357786 |
+ RAMBlock *block;
|
|
|
357786 |
+ int ret = 0;
|
|
|
357786 |
+
|
|
|
357786 |
+ rcu_read_lock();
|
|
|
357786 |
+ RAMBLOCK_FOREACH(block) {
|
|
|
357786 |
+ if (!qemu_ram_is_migratable(block)) {
|
|
|
357786 |
+ continue;
|
|
|
357786 |
+ }
|
|
|
357786 |
+ ret = func(block->idstr, block->host, block->offset,
|
|
|
357786 |
+ block->used_length, opaque);
|
|
|
357786 |
+ if (ret) {
|
|
|
357786 |
+ break;
|
|
|
357786 |
+ }
|
|
|
357786 |
+ }
|
|
|
357786 |
+ rcu_read_unlock();
|
|
|
357786 |
+ return ret;
|
|
|
357786 |
+}
|
|
|
357786 |
+
|
|
|
357786 |
/*
|
|
|
357786 |
* Unmap pages of memory from start to start+length such that
|
|
|
357786 |
* they a) read as 0, b) Trigger whatever fault mechanism
|
|
|
357786 |
diff --git a/include/exec/cpu-common.h b/include/exec/cpu-common.h
|
|
|
357786 |
index 24d335f..0b58e26 100644
|
|
|
357786 |
--- a/include/exec/cpu-common.h
|
|
|
357786 |
+++ b/include/exec/cpu-common.h
|
|
|
357786 |
@@ -75,6 +75,9 @@ const char *qemu_ram_get_idstr(RAMBlock *rb);
|
|
|
357786 |
bool qemu_ram_is_shared(RAMBlock *rb);
|
|
|
357786 |
bool qemu_ram_is_uf_zeroable(RAMBlock *rb);
|
|
|
357786 |
void qemu_ram_set_uf_zeroable(RAMBlock *rb);
|
|
|
357786 |
+bool qemu_ram_is_migratable(RAMBlock *rb);
|
|
|
357786 |
+void qemu_ram_set_migratable(RAMBlock *rb);
|
|
|
357786 |
+void qemu_ram_unset_migratable(RAMBlock *rb);
|
|
|
357786 |
|
|
|
357786 |
size_t qemu_ram_pagesize(RAMBlock *block);
|
|
|
357786 |
size_t qemu_ram_pagesize_largest(void);
|
|
|
357786 |
@@ -119,6 +122,7 @@ typedef int (RAMBlockIterFunc)(const char *block_name, void *host_addr,
|
|
|
357786 |
ram_addr_t offset, ram_addr_t length, void *opaque);
|
|
|
357786 |
|
|
|
357786 |
int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque);
|
|
|
357786 |
+int qemu_ram_foreach_migratable_block(RAMBlockIterFunc func, void *opaque);
|
|
|
357786 |
int ram_block_discard_range(RAMBlock *rb, uint64_t start, size_t length);
|
|
|
357786 |
|
|
|
357786 |
#endif
|
|
|
357786 |
diff --git a/migration/postcopy-ram.c b/migration/postcopy-ram.c
|
|
|
357786 |
index 4a0b33b..001b041 100644
|
|
|
357786 |
--- a/migration/postcopy-ram.c
|
|
|
357786 |
+++ b/migration/postcopy-ram.c
|
|
|
357786 |
@@ -264,7 +264,7 @@ bool postcopy_ram_supported_by_host(MigrationIncomingState *mis)
|
|
|
357786 |
}
|
|
|
357786 |
|
|
|
357786 |
/* We don't support postcopy with shared RAM yet */
|
|
|
357786 |
- if (qemu_ram_foreach_block(test_ramblock_postcopiable, NULL)) {
|
|
|
357786 |
+ if (qemu_ram_foreach_migratable_block(test_ramblock_postcopiable, NULL)) {
|
|
|
357786 |
goto out;
|
|
|
357786 |
}
|
|
|
357786 |
|
|
|
357786 |
@@ -392,7 +392,7 @@ static int cleanup_range(const char *block_name, void *host_addr,
|
|
|
357786 |
*/
|
|
|
357786 |
int postcopy_ram_incoming_init(MigrationIncomingState *mis, size_t ram_pages)
|
|
|
357786 |
{
|
|
|
357786 |
- if (qemu_ram_foreach_block(init_range, NULL)) {
|
|
|
357786 |
+ if (qemu_ram_foreach_migratable_block(init_range, NULL)) {
|
|
|
357786 |
return -1;
|
|
|
357786 |
}
|
|
|
357786 |
|
|
|
357786 |
@@ -414,7 +414,7 @@ int postcopy_ram_incoming_cleanup(MigrationIncomingState *mis)
|
|
|
357786 |
return -1;
|
|
|
357786 |
}
|
|
|
357786 |
|
|
|
357786 |
- if (qemu_ram_foreach_block(cleanup_range, mis)) {
|
|
|
357786 |
+ if (qemu_ram_foreach_migratable_block(cleanup_range, mis)) {
|
|
|
357786 |
return -1;
|
|
|
357786 |
}
|
|
|
357786 |
/* Let the fault thread quit */
|
|
|
357786 |
@@ -480,7 +480,7 @@ static int nhp_range(const char *block_name, void *host_addr,
|
|
|
357786 |
*/
|
|
|
357786 |
int postcopy_ram_prepare_discard(MigrationIncomingState *mis)
|
|
|
357786 |
{
|
|
|
357786 |
- if (qemu_ram_foreach_block(nhp_range, mis)) {
|
|
|
357786 |
+ if (qemu_ram_foreach_migratable_block(nhp_range, mis)) {
|
|
|
357786 |
return -1;
|
|
|
357786 |
}
|
|
|
357786 |
|
|
|
357786 |
@@ -491,7 +491,7 @@ int postcopy_ram_prepare_discard(MigrationIncomingState *mis)
|
|
|
357786 |
|
|
|
357786 |
/*
|
|
|
357786 |
* Mark the given area of RAM as requiring notification to unwritten areas
|
|
|
357786 |
- * Used as a callback on qemu_ram_foreach_block.
|
|
|
357786 |
+ * Used as a callback on qemu_ram_foreach_migratable_block.
|
|
|
357786 |
* host_addr: Base of area to mark
|
|
|
357786 |
* offset: Offset in the whole ram arena
|
|
|
357786 |
* length: Length of the section
|
|
|
357786 |
@@ -793,7 +793,7 @@ int postcopy_ram_enable_notify(MigrationIncomingState *mis)
|
|
|
357786 |
mis->have_fault_thread = true;
|
|
|
357786 |
|
|
|
357786 |
/* Mark so that we get notified of accesses to unwritten areas */
|
|
|
357786 |
- if (qemu_ram_foreach_block(ram_block_enable_notify, mis)) {
|
|
|
357786 |
+ if (qemu_ram_foreach_migratable_block(ram_block_enable_notify, mis)) {
|
|
|
357786 |
return -1;
|
|
|
357786 |
}
|
|
|
357786 |
|
|
|
357786 |
diff --git a/migration/ram.c b/migration/ram.c
|
|
|
357786 |
index bd563b5..04b5df5 100644
|
|
|
357786 |
--- a/migration/ram.c
|
|
|
357786 |
+++ b/migration/ram.c
|
|
|
357786 |
@@ -153,11 +153,16 @@ out:
|
|
|
357786 |
return ret;
|
|
|
357786 |
}
|
|
|
357786 |
|
|
|
357786 |
+/* Should be holding either ram_list.mutex, or the RCU lock. */
|
|
|
357786 |
+#define RAMBLOCK_FOREACH_MIGRATABLE(block) \
|
|
|
357786 |
+ RAMBLOCK_FOREACH(block) \
|
|
|
357786 |
+ if (!qemu_ram_is_migratable(block)) {} else
|
|
|
357786 |
+
|
|
|
357786 |
static void ramblock_recv_map_init(void)
|
|
|
357786 |
{
|
|
|
357786 |
RAMBlock *rb;
|
|
|
357786 |
|
|
|
357786 |
- RAMBLOCK_FOREACH(rb) {
|
|
|
357786 |
+ RAMBLOCK_FOREACH_MIGRATABLE(rb) {
|
|
|
357786 |
assert(!rb->receivedmap);
|
|
|
357786 |
rb->receivedmap = bitmap_new(rb->max_length >> qemu_target_page_bits());
|
|
|
357786 |
}
|
|
|
357786 |
@@ -813,6 +818,10 @@ unsigned long migration_bitmap_find_dirty(RAMState *rs, RAMBlock *rb,
|
|
|
357786 |
unsigned long *bitmap = rb->bmap;
|
|
|
357786 |
unsigned long next;
|
|
|
357786 |
|
|
|
357786 |
+ if (!qemu_ram_is_migratable(rb)) {
|
|
|
357786 |
+ return size;
|
|
|
357786 |
+ }
|
|
|
357786 |
+
|
|
|
357786 |
if (rs->ram_bulk_stage && start > 0) {
|
|
|
357786 |
next = start + 1;
|
|
|
357786 |
} else {
|
|
|
357786 |
@@ -858,7 +867,7 @@ uint64_t ram_pagesize_summary(void)
|
|
|
357786 |
RAMBlock *block;
|
|
|
357786 |
uint64_t summary = 0;
|
|
|
357786 |
|
|
|
357786 |
- RAMBLOCK_FOREACH(block) {
|
|
|
357786 |
+ RAMBLOCK_FOREACH_MIGRATABLE(block) {
|
|
|
357786 |
summary |= block->page_size;
|
|
|
357786 |
}
|
|
|
357786 |
|
|
|
357786 |
@@ -882,7 +891,7 @@ static void migration_bitmap_sync(RAMState *rs)
|
|
|
357786 |
|
|
|
357786 |
qemu_mutex_lock(&rs->bitmap_mutex);
|
|
|
357786 |
rcu_read_lock();
|
|
|
357786 |
- RAMBLOCK_FOREACH(block) {
|
|
|
357786 |
+ RAMBLOCK_FOREACH_MIGRATABLE(block) {
|
|
|
357786 |
migration_bitmap_sync_range(rs, block, 0, block->used_length);
|
|
|
357786 |
}
|
|
|
357786 |
ram_counters.remaining = ram_bytes_remaining();
|
|
|
357786 |
@@ -1522,6 +1531,11 @@ static int ram_save_host_page(RAMState *rs, PageSearchStatus *pss,
|
|
|
357786 |
size_t pagesize_bits =
|
|
|
357786 |
qemu_ram_pagesize(pss->block) >> TARGET_PAGE_BITS;
|
|
|
357786 |
|
|
|
357786 |
+ if (!qemu_ram_is_migratable(pss->block)) {
|
|
|
357786 |
+ error_report("block %s should not be migrated !", pss->block->idstr);
|
|
|
357786 |
+ return 0;
|
|
|
357786 |
+ }
|
|
|
357786 |
+
|
|
|
357786 |
do {
|
|
|
357786 |
/* Check the pages is dirty and if it is send it */
|
|
|
357786 |
if (!migration_bitmap_clear_dirty(rs, pss->block, pss->page)) {
|
|
|
357786 |
@@ -1620,7 +1634,7 @@ uint64_t ram_bytes_total(void)
|
|
|
357786 |
uint64_t total = 0;
|
|
|
357786 |
|
|
|
357786 |
rcu_read_lock();
|
|
|
357786 |
- RAMBLOCK_FOREACH(block) {
|
|
|
357786 |
+ RAMBLOCK_FOREACH_MIGRATABLE(block) {
|
|
|
357786 |
total += block->used_length;
|
|
|
357786 |
}
|
|
|
357786 |
rcu_read_unlock();
|
|
|
357786 |
@@ -1675,7 +1689,7 @@ static void ram_save_cleanup(void *opaque)
|
|
|
357786 |
*/
|
|
|
357786 |
memory_global_dirty_log_stop();
|
|
|
357786 |
|
|
|
357786 |
- QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
|
|
|
357786 |
+ RAMBLOCK_FOREACH_MIGRATABLE(block) {
|
|
|
357786 |
g_free(block->bmap);
|
|
|
357786 |
block->bmap = NULL;
|
|
|
357786 |
g_free(block->unsentmap);
|
|
|
357786 |
@@ -1738,7 +1752,7 @@ void ram_postcopy_migrated_memory_release(MigrationState *ms)
|
|
|
357786 |
{
|
|
|
357786 |
struct RAMBlock *block;
|
|
|
357786 |
|
|
|
357786 |
- RAMBLOCK_FOREACH(block) {
|
|
|
357786 |
+ RAMBLOCK_FOREACH_MIGRATABLE(block) {
|
|
|
357786 |
unsigned long *bitmap = block->bmap;
|
|
|
357786 |
unsigned long range = block->used_length >> TARGET_PAGE_BITS;
|
|
|
357786 |
unsigned long run_start = find_next_zero_bit(bitmap, range, 0);
|
|
|
357786 |
@@ -1816,7 +1830,7 @@ static int postcopy_each_ram_send_discard(MigrationState *ms)
|
|
|
357786 |
struct RAMBlock *block;
|
|
|
357786 |
int ret;
|
|
|
357786 |
|
|
|
357786 |
- RAMBLOCK_FOREACH(block) {
|
|
|
357786 |
+ RAMBLOCK_FOREACH_MIGRATABLE(block) {
|
|
|
357786 |
PostcopyDiscardState *pds =
|
|
|
357786 |
postcopy_discard_send_init(ms, block->idstr);
|
|
|
357786 |
|
|
|
357786 |
@@ -2024,7 +2038,7 @@ int ram_postcopy_send_discard_bitmap(MigrationState *ms)
|
|
|
357786 |
rs->last_sent_block = NULL;
|
|
|
357786 |
rs->last_page = 0;
|
|
|
357786 |
|
|
|
357786 |
- QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
|
|
|
357786 |
+ RAMBLOCK_FOREACH_MIGRATABLE(block) {
|
|
|
357786 |
unsigned long pages = block->used_length >> TARGET_PAGE_BITS;
|
|
|
357786 |
unsigned long *bitmap = block->bmap;
|
|
|
357786 |
unsigned long *unsentmap = block->unsentmap;
|
|
|
357786 |
@@ -2183,7 +2197,7 @@ static void ram_list_init_bitmaps(void)
|
|
|
357786 |
|
|
|
357786 |
/* Skip setting bitmap if there is no RAM */
|
|
|
357786 |
if (ram_bytes_total()) {
|
|
|
357786 |
- QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
|
|
|
357786 |
+ RAMBLOCK_FOREACH_MIGRATABLE(block) {
|
|
|
357786 |
pages = block->max_length >> TARGET_PAGE_BITS;
|
|
|
357786 |
block->bmap = bitmap_new(pages);
|
|
|
357786 |
bitmap_set(block->bmap, 0, pages);
|
|
|
357786 |
@@ -2264,7 +2278,7 @@ static int ram_save_setup(QEMUFile *f, void *opaque)
|
|
|
357786 |
|
|
|
357786 |
qemu_put_be64(f, ram_bytes_total() | RAM_SAVE_FLAG_MEM_SIZE);
|
|
|
357786 |
|
|
|
357786 |
- RAMBLOCK_FOREACH(block) {
|
|
|
357786 |
+ RAMBLOCK_FOREACH_MIGRATABLE(block) {
|
|
|
357786 |
qemu_put_byte(f, strlen(block->idstr));
|
|
|
357786 |
qemu_put_buffer(f, (uint8_t *)block->idstr, strlen(block->idstr));
|
|
|
357786 |
qemu_put_be64(f, block->used_length);
|
|
|
357786 |
@@ -2508,6 +2522,11 @@ static inline RAMBlock *ram_block_from_stream(QEMUFile *f, int flags)
|
|
|
357786 |
return NULL;
|
|
|
357786 |
}
|
|
|
357786 |
|
|
|
357786 |
+ if (!qemu_ram_is_migratable(block)) {
|
|
|
357786 |
+ error_report("block %s should not be migrated !", id);
|
|
|
357786 |
+ return NULL;
|
|
|
357786 |
+ }
|
|
|
357786 |
+
|
|
|
357786 |
return block;
|
|
|
357786 |
}
|
|
|
357786 |
|
|
|
357786 |
@@ -2750,7 +2769,7 @@ static int ram_load_cleanup(void *opaque)
|
|
|
357786 |
xbzrle_load_cleanup();
|
|
|
357786 |
compress_threads_load_cleanup();
|
|
|
357786 |
|
|
|
357786 |
- RAMBLOCK_FOREACH(rb) {
|
|
|
357786 |
+ RAMBLOCK_FOREACH_MIGRATABLE(rb) {
|
|
|
357786 |
g_free(rb->receivedmap);
|
|
|
357786 |
rb->receivedmap = NULL;
|
|
|
357786 |
}
|
|
|
357786 |
@@ -3012,7 +3031,10 @@ static int ram_load(QEMUFile *f, void *opaque, int version_id)
|
|
|
357786 |
length = qemu_get_be64(f);
|
|
|
357786 |
|
|
|
357786 |
block = qemu_ram_block_by_name(id);
|
|
|
357786 |
- if (block) {
|
|
|
357786 |
+ if (block && !qemu_ram_is_migratable(block)) {
|
|
|
357786 |
+ error_report("block %s should not be migrated !", id);
|
|
|
357786 |
+ ret = -EINVAL;
|
|
|
357786 |
+ } else if (block) {
|
|
|
357786 |
if (length != block->used_length) {
|
|
|
357786 |
Error *local_err = NULL;
|
|
|
357786 |
|
|
|
357786 |
diff --git a/migration/savevm.c b/migration/savevm.c
|
|
|
357786 |
index 56c9feb..b975d3a 100644
|
|
|
357786 |
--- a/migration/savevm.c
|
|
|
357786 |
+++ b/migration/savevm.c
|
|
|
357786 |
@@ -2510,11 +2510,13 @@ void vmstate_register_ram(MemoryRegion *mr, DeviceState *dev)
|
|
|
357786 |
{
|
|
|
357786 |
qemu_ram_set_idstr(mr->ram_block,
|
|
|
357786 |
memory_region_name(mr), dev);
|
|
|
357786 |
+ qemu_ram_set_migratable(mr->ram_block);
|
|
|
357786 |
}
|
|
|
357786 |
|
|
|
357786 |
void vmstate_unregister_ram(MemoryRegion *mr, DeviceState *dev)
|
|
|
357786 |
{
|
|
|
357786 |
qemu_ram_unset_idstr(mr->ram_block);
|
|
|
357786 |
+ qemu_ram_unset_migratable(mr->ram_block);
|
|
|
357786 |
}
|
|
|
357786 |
|
|
|
357786 |
void vmstate_register_ram_global(MemoryRegion *mr)
|
|
|
357786 |
--
|
|
|
357786 |
1.8.3.1
|
|
|
357786 |
|