26ba25
From aa3254bca93fb1702f0aa236b70d705ee8bf121c Mon Sep 17 00:00:00 2001
26ba25
From: "Dr. David Alan Gilbert" <dgilbert@redhat.com>
26ba25
Date: Wed, 1 Aug 2018 13:55:08 +0100
26ba25
Subject: [PATCH 04/21] migration: detect compression and decompression errors
26ba25
26ba25
RH-Author: Dr. David Alan Gilbert <dgilbert@redhat.com>
26ba25
Message-id: <20180801135522.11658-5-dgilbert@redhat.com>
26ba25
Patchwork-id: 81583
26ba25
O-Subject: [qemu-kvm RHEL8/virt212 PATCH 04/18] migration: detect compression and decompression errors
26ba25
Bugzilla: 1594384
26ba25
RH-Acked-by: Peter Xu <peterx@redhat.com>
26ba25
RH-Acked-by: John Snow <jsnow@redhat.com>
26ba25
RH-Acked-by: Juan Quintela <quintela@redhat.com>
26ba25
26ba25
From: Xiao Guangrong <xiaoguangrong@tencent.com>
26ba25
26ba25
Currently the page being compressed is allowed to be updated by
26ba25
the VM on the source QEMU, correspondingly the destination QEMU
26ba25
just ignores the decompression error. However, we completely miss
26ba25
the chance to catch real errors, then the VM is corrupted silently
26ba25
26ba25
To make the migration more robuster, we copy the page to a buffer
26ba25
first to avoid it being written by VM, then detect and handle the
26ba25
errors of both compression and decompression errors properly
26ba25
26ba25
Reviewed-by: Peter Xu <peterx@redhat.com>
26ba25
Signed-off-by: Xiao Guangrong <xiaoguangrong@tencent.com>
26ba25
Message-Id: <20180330075128.26919-5-xiaoguangrong@tencent.com>
26ba25
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
26ba25
(cherry picked from commit 34ab9e9743aeaf265929d930747f101fa5c76fea)
26ba25
Signed-off-by: Danilo C. L. de Paula <ddepaula@redhat.com>
26ba25
---
26ba25
 migration/qemu-file.c |  4 ++--
26ba25
 migration/ram.c       | 56 +++++++++++++++++++++++++++++++++++----------------
26ba25
 2 files changed, 41 insertions(+), 19 deletions(-)
26ba25
26ba25
diff --git a/migration/qemu-file.c b/migration/qemu-file.c
26ba25
index bafe3a0..0463f4c 100644
26ba25
--- a/migration/qemu-file.c
26ba25
+++ b/migration/qemu-file.c
26ba25
@@ -710,9 +710,9 @@ ssize_t qemu_put_compression_data(QEMUFile *f, z_stream *stream,
26ba25
     blen = qemu_compress_data(stream, f->buf + f->buf_index + sizeof(int32_t),
26ba25
                               blen, p, size);
26ba25
     if (blen < 0) {
26ba25
-        error_report("Compress Failed!");
26ba25
-        return 0;
26ba25
+        return -1;
26ba25
     }
26ba25
+
26ba25
     qemu_put_be32(f, blen);
26ba25
     if (f->ops->writev_buffer) {
26ba25
         add_to_iovec(f, f->buf + f->buf_index, blen, false);
26ba25
diff --git a/migration/ram.c b/migration/ram.c
26ba25
index be89cd8..cd6d98a 100644
26ba25
--- a/migration/ram.c
26ba25
+++ b/migration/ram.c
26ba25
@@ -269,7 +269,10 @@ struct CompressParam {
26ba25
     QemuCond cond;
26ba25
     RAMBlock *block;
26ba25
     ram_addr_t offset;
26ba25
+
26ba25
+    /* internally used fields */
26ba25
     z_stream stream;
26ba25
+    uint8_t *originbuf;
26ba25
 };
26ba25
 typedef struct CompressParam CompressParam;
26ba25
 
26ba25
@@ -296,13 +299,14 @@ static QemuCond comp_done_cond;
26ba25
 /* The empty QEMUFileOps will be used by file in CompressParam */
26ba25
 static const QEMUFileOps empty_ops = { };
26ba25
 
26ba25
+static QEMUFile *decomp_file;
26ba25
 static DecompressParam *decomp_param;
26ba25
 static QemuThread *decompress_threads;
26ba25
 static QemuMutex decomp_done_lock;
26ba25
 static QemuCond decomp_done_cond;
26ba25
 
26ba25
 static int do_compress_ram_page(QEMUFile *f, z_stream *stream, RAMBlock *block,
26ba25
-                                ram_addr_t offset);
26ba25
+                                ram_addr_t offset, uint8_t *source_buf);
26ba25
 
26ba25
 static void *do_data_compress(void *opaque)
26ba25
 {
26ba25
@@ -318,7 +322,8 @@ static void *do_data_compress(void *opaque)
26ba25
             param->block = NULL;
26ba25
             qemu_mutex_unlock(&param->mutex);
26ba25
 
26ba25
-            do_compress_ram_page(param->file, &param->stream, block, offset);
26ba25
+            do_compress_ram_page(param->file, &param->stream, block, offset,
26ba25
+                                 param->originbuf);
26ba25
 
26ba25
             qemu_mutex_lock(&comp_done_lock);
26ba25
             param->done = true;
26ba25
@@ -370,6 +375,7 @@ static void compress_threads_save_cleanup(void)
26ba25
         qemu_mutex_destroy(&comp_param[i].mutex);
26ba25
         qemu_cond_destroy(&comp_param[i].cond);
26ba25
         deflateEnd(&comp_param[i].stream);
26ba25
+        g_free(comp_param[i].originbuf);
26ba25
         qemu_fclose(comp_param[i].file);
26ba25
         comp_param[i].file = NULL;
26ba25
     }
26ba25
@@ -394,8 +400,14 @@ static int compress_threads_save_setup(void)
26ba25
     qemu_cond_init(&comp_done_cond);
26ba25
     qemu_mutex_init(&comp_done_lock);
26ba25
     for (i = 0; i < thread_count; i++) {
26ba25
+        comp_param[i].originbuf = g_try_malloc(TARGET_PAGE_SIZE);
26ba25
+        if (!comp_param[i].originbuf) {
26ba25
+            goto exit;
26ba25
+        }
26ba25
+
26ba25
         if (deflateInit(&comp_param[i].stream,
26ba25
                         migrate_compress_level()) != Z_OK) {
26ba25
+            g_free(comp_param[i].originbuf);
26ba25
             goto exit;
26ba25
         }
26ba25
 
26ba25
@@ -1054,7 +1066,7 @@ static int ram_save_page(RAMState *rs, PageSearchStatus *pss, bool last_stage)
26ba25
 }
26ba25
 
26ba25
 static int do_compress_ram_page(QEMUFile *f, z_stream *stream, RAMBlock *block,
26ba25
-                                ram_addr_t offset)
26ba25
+                                ram_addr_t offset, uint8_t *source_buf)
26ba25
 {
26ba25
     RAMState *rs = ram_state;
26ba25
     int bytes_sent, blen;
26ba25
@@ -1062,7 +1074,14 @@ static int do_compress_ram_page(QEMUFile *f, z_stream *stream, RAMBlock *block,
26ba25
 
26ba25
     bytes_sent = save_page_header(rs, f, block, offset |
26ba25
                                   RAM_SAVE_FLAG_COMPRESS_PAGE);
26ba25
-    blen = qemu_put_compression_data(f, stream, p, TARGET_PAGE_SIZE);
26ba25
+
26ba25
+    /*
26ba25
+     * copy it to a internal buffer to avoid it being modified by VM
26ba25
+     * so that we can catch up the error during compression and
26ba25
+     * decompression
26ba25
+     */
26ba25
+    memcpy(source_buf, p, TARGET_PAGE_SIZE);
26ba25
+    blen = qemu_put_compression_data(f, stream, source_buf, TARGET_PAGE_SIZE);
26ba25
     if (blen < 0) {
26ba25
         bytes_sent = 0;
26ba25
         qemu_file_set_error(migrate_get_current()->to_dst_file, blen);
26ba25
@@ -2556,7 +2575,7 @@ static void *do_data_decompress(void *opaque)
26ba25
     DecompressParam *param = opaque;
26ba25
     unsigned long pagesize;
26ba25
     uint8_t *des;
26ba25
-    int len;
26ba25
+    int len, ret;
26ba25
 
26ba25
     qemu_mutex_lock(&param->mutex);
26ba25
     while (!param->quit) {
26ba25
@@ -2567,13 +2586,13 @@ static void *do_data_decompress(void *opaque)
26ba25
             qemu_mutex_unlock(&param->mutex);
26ba25
 
26ba25
             pagesize = TARGET_PAGE_SIZE;
26ba25
-            /* qemu_uncompress_data() will return failed in some case,
26ba25
-             * especially when the page is dirtied when doing the compression,
26ba25
-             * it's not a problem because the dirty page will be retransferred
26ba25
-             * and uncompress() won't break the data in other pages.
26ba25
-             */
26ba25
-            qemu_uncompress_data(&param->stream, des, pagesize, param->compbuf,
26ba25
-                                 len);
26ba25
+
26ba25
+            ret = qemu_uncompress_data(&param->stream, des, pagesize,
26ba25
+                                       param->compbuf, len);
26ba25
+            if (ret < 0) {
26ba25
+                error_report("decompress data failed");
26ba25
+                qemu_file_set_error(decomp_file, ret);
26ba25
+            }
26ba25
 
26ba25
             qemu_mutex_lock(&decomp_done_lock);
26ba25
             param->done = true;
26ba25
@@ -2590,12 +2609,12 @@ static void *do_data_decompress(void *opaque)
26ba25
     return NULL;
26ba25
 }
26ba25
 
26ba25
-static void wait_for_decompress_done(void)
26ba25
+static int wait_for_decompress_done(void)
26ba25
 {
26ba25
     int idx, thread_count;
26ba25
 
26ba25
     if (!migrate_use_compression()) {
26ba25
-        return;
26ba25
+        return 0;
26ba25
     }
26ba25
 
26ba25
     thread_count = migrate_decompress_threads();
26ba25
@@ -2606,6 +2625,7 @@ static void wait_for_decompress_done(void)
26ba25
         }
26ba25
     }
26ba25
     qemu_mutex_unlock(&decomp_done_lock);
26ba25
+    return qemu_file_get_error(decomp_file);
26ba25
 }
26ba25
 
26ba25
 static void compress_threads_load_cleanup(void)
26ba25
@@ -2646,9 +2666,10 @@ static void compress_threads_load_cleanup(void)
26ba25
     g_free(decomp_param);
26ba25
     decompress_threads = NULL;
26ba25
     decomp_param = NULL;
26ba25
+    decomp_file = NULL;
26ba25
 }
26ba25
 
26ba25
-static int compress_threads_load_setup(void)
26ba25
+static int compress_threads_load_setup(QEMUFile *f)
26ba25
 {
26ba25
     int i, thread_count;
26ba25
 
26ba25
@@ -2661,6 +2682,7 @@ static int compress_threads_load_setup(void)
26ba25
     decomp_param = g_new0(DecompressParam, thread_count);
26ba25
     qemu_mutex_init(&decomp_done_lock);
26ba25
     qemu_cond_init(&decomp_done_cond);
26ba25
+    decomp_file = f;
26ba25
     for (i = 0; i < thread_count; i++) {
26ba25
         if (inflateInit(&decomp_param[i].stream) != Z_OK) {
26ba25
             goto exit;
26ba25
@@ -2720,7 +2742,7 @@ static void decompress_data_with_multi_threads(QEMUFile *f,
26ba25
  */
26ba25
 static int ram_load_setup(QEMUFile *f, void *opaque)
26ba25
 {
26ba25
-    if (compress_threads_load_setup()) {
26ba25
+    if (compress_threads_load_setup(f)) {
26ba25
         return -1;
26ba25
     }
26ba25
 
26ba25
@@ -3075,7 +3097,7 @@ static int ram_load(QEMUFile *f, void *opaque, int version_id)
26ba25
         }
26ba25
     }
26ba25
 
26ba25
-    wait_for_decompress_done();
26ba25
+    ret |= wait_for_decompress_done();
26ba25
     rcu_read_unlock();
26ba25
     trace_ram_load_complete(ret, seq_iter);
26ba25
     return ret;
26ba25
-- 
26ba25
1.8.3.1
26ba25