Blame SOURCES/kvm-qcow2-Discard-VM-state-in-active-L1-after-creating-s.patch

9ae3a8
From 1ebfe2d9807a9ba4bfbec3104c6c8df50c89414d Mon Sep 17 00:00:00 2001
9ae3a8
From: Kevin Wolf <kwolf@redhat.com>
9ae3a8
Date: Fri, 29 May 2015 17:05:12 +0200
9ae3a8
Subject: [PATCH 2/8] qcow2: Discard VM state in active L1 after creating
9ae3a8
 snapshot
9ae3a8
9ae3a8
Message-id: <1432919112-18076-3-git-send-email-kwolf@redhat.com>
9ae3a8
Patchwork-id: 65149
9ae3a8
O-Subject: [RHEL-7.2 qemu-kvm PATCH 2/2] qcow2: Discard VM state in active L1 after creating snapshot
9ae3a8
Bugzilla: 1208808
9ae3a8
RH-Acked-by: Fam Zheng <famz@redhat.com>
9ae3a8
RH-Acked-by: Stefan Hajnoczi <stefanha@redhat.com>
9ae3a8
RH-Acked-by: Laszlo Ersek <lersek@redhat.com>
9ae3a8
9ae3a8
During savevm, the VM state is written to the active L1 of the image and
9ae3a8
then a snapshot is taken. After that, the VM state isn't needed any more
9ae3a8
in the active L1 and should be discarded. This is implemented by this
9ae3a8
patch.
9ae3a8
9ae3a8
The impact of not discarding the VM state is that a snapshot can never
9ae3a8
become smaller than any previous snapshot (because it would be padded
9ae3a8
with old VM state), and more importantly that future savevm operations
9ae3a8
cause unnecessary COWs (with associated flushes), which makes subsequent
9ae3a8
snapshots much slower.
9ae3a8
9ae3a8
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
9ae3a8
Reviewed-by: Max Reitz <mreitz@redhat.com>
9ae3a8
(cherry picked from commit 1ebf561c11302f4fbe4afdd82758fe053cf1d5fc)
9ae3a8
Signed-off-by: Miroslav Rezanina <mrezanin@redhat.com>
9ae3a8
9ae3a8
Conflicts:
9ae3a8
	block/qcow2.h
9ae3a8
9ae3a8
Context-only conflict in qcow2.h.
9ae3a8
9ae3a8
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
9ae3a8
---
9ae3a8
 block/qcow2-snapshot.c | 7 +++++++
9ae3a8
 block/qcow2.c          | 5 -----
9ae3a8
 block/qcow2.h          | 5 +++++
9ae3a8
 3 files changed, 12 insertions(+), 5 deletions(-)
9ae3a8
9ae3a8
diff --git a/block/qcow2-snapshot.c b/block/qcow2-snapshot.c
9ae3a8
index 84481be..6081482 100644
9ae3a8
--- a/block/qcow2-snapshot.c
9ae3a8
+++ b/block/qcow2-snapshot.c
9ae3a8
@@ -407,6 +407,13 @@ int qcow2_snapshot_create(BlockDriverState *bs, QEMUSnapshotInfo *sn_info)
9ae3a8
 
9ae3a8
     g_free(old_snapshot_list);
9ae3a8
 
9ae3a8
+    /* The VM state isn't needed any more in the active L1 table; in fact, it
9ae3a8
+     * hurts by causing expensive COW for the next snapshot. */
9ae3a8
+    qcow2_discard_clusters(bs, qcow2_vm_state_offset(s),
9ae3a8
+                           align_offset(sn->vm_state_size, s->cluster_size)
9ae3a8
+                                >> BDRV_SECTOR_BITS,
9ae3a8
+                           QCOW2_DISCARD_NEVER);
9ae3a8
+
9ae3a8
 #ifdef DEBUG_ALLOC
9ae3a8
     {
9ae3a8
       BdrvCheckResult result = {0};
9ae3a8
diff --git a/block/qcow2.c b/block/qcow2.c
9ae3a8
index babcb4b..6026f8a 100644
9ae3a8
--- a/block/qcow2.c
9ae3a8
+++ b/block/qcow2.c
9ae3a8
@@ -2036,11 +2036,6 @@ static coroutine_fn int qcow2_co_flush_to_os(BlockDriverState *bs)
9ae3a8
     return 0;
9ae3a8
 }
9ae3a8
 
9ae3a8
-static int64_t qcow2_vm_state_offset(BDRVQcowState *s)
9ae3a8
-{
9ae3a8
-	return (int64_t)s->l1_vm_state_index << (s->cluster_bits + s->l2_bits);
9ae3a8
-}
9ae3a8
-
9ae3a8
 static int qcow2_get_info(BlockDriverState *bs, BlockDriverInfo *bdi)
9ae3a8
 {
9ae3a8
     BDRVQcowState *s = bs->opaque;
9ae3a8
diff --git a/block/qcow2.h b/block/qcow2.h
9ae3a8
index 9ad8aad..e958ab4 100644
9ae3a8
--- a/block/qcow2.h
9ae3a8
+++ b/block/qcow2.h
9ae3a8
@@ -417,6 +417,11 @@ static inline int64_t align_offset(int64_t offset, int n)
9ae3a8
     return offset;
9ae3a8
 }
9ae3a8
 
9ae3a8
+static inline int64_t qcow2_vm_state_offset(BDRVQcowState *s)
9ae3a8
+{
9ae3a8
+    return (int64_t)s->l1_vm_state_index << (s->cluster_bits + s->l2_bits);
9ae3a8
+}
9ae3a8
+
9ae3a8
 static inline uint64_t qcow2_max_refcount_clusters(BDRVQcowState *s)
9ae3a8
 {
9ae3a8
     return QCOW_MAX_REFTABLE_SIZE >> s->cluster_bits;
9ae3a8
-- 
9ae3a8
1.8.3.1
9ae3a8