|
|
383d26 |
From b835044f9cbe56874b199c6091784cc9c51c7dd5 Mon Sep 17 00:00:00 2001
|
|
|
383d26 |
From: "Dr. David Alan Gilbert" <dgilbert@redhat.com>
|
|
|
383d26 |
Date: Fri, 22 Jun 2018 18:59:50 +0200
|
|
|
383d26 |
Subject: [PATCH 11/57] migration: stop decompression to allocate and free
|
|
|
383d26 |
memory frequently
|
|
|
383d26 |
|
|
|
383d26 |
RH-Author: Dr. David Alan Gilbert <dgilbert@redhat.com>
|
|
|
383d26 |
Message-id: <20180622190005.21297-4-dgilbert@redhat.com>
|
|
|
383d26 |
Patchwork-id: 80995
|
|
|
383d26 |
O-Subject: [RHEL7.6 qemu-kvm-rhev PATCH 03/18] migration: stop decompression to allocate and free memory frequently
|
|
|
383d26 |
Bugzilla: 1584139
|
|
|
383d26 |
RH-Acked-by: Juan Quintela <quintela@redhat.com>
|
|
|
383d26 |
RH-Acked-by: Peter Xu <peterx@redhat.com>
|
|
|
383d26 |
RH-Acked-by: Laurent Vivier <lvivier@redhat.com>
|
|
|
383d26 |
|
|
|
383d26 |
From: Xiao Guangrong <xiaoguangrong@tencent.com>
|
|
|
383d26 |
|
|
|
383d26 |
Current code uses uncompress() to decompress memory which manages
|
|
|
383d26 |
memory internally, that causes huge memory is allocated and freed
|
|
|
383d26 |
very frequently, more worse, frequently returning memory to kernel
|
|
|
383d26 |
will flush TLBs
|
|
|
383d26 |
|
|
|
383d26 |
So, we maintain the memory by ourselves and reuse it for each
|
|
|
383d26 |
decompression
|
|
|
383d26 |
|
|
|
383d26 |
Reviewed-by: Peter Xu <peterx@redhat.com>
|
|
|
383d26 |
Reviewed-by: Jiang Biao <jiang.biao2@zte.com.cn>
|
|
|
383d26 |
Signed-off-by: Xiao Guangrong <xiaoguangrong@tencent.com>
|
|
|
383d26 |
Message-Id: <20180330075128.26919-4-xiaoguangrong@tencent.com>
|
|
|
383d26 |
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
|
|
|
383d26 |
(cherry picked from commit 797ca154b4c68dbd8e93382f714388ab311f672d)
|
|
|
383d26 |
Signed-off-by: Miroslav Rezanina <mrezanin@redhat.com>
|
|
|
383d26 |
---
|
|
|
383d26 |
migration/ram.c | 112 +++++++++++++++++++++++++++++++++++++++++---------------
|
|
|
383d26 |
1 file changed, 82 insertions(+), 30 deletions(-)
|
|
|
383d26 |
|
|
|
383d26 |
diff --git a/migration/ram.c b/migration/ram.c
|
|
|
383d26 |
index 7d3b1da..be89cd8 100644
|
|
|
383d26 |
--- a/migration/ram.c
|
|
|
383d26 |
+++ b/migration/ram.c
|
|
|
383d26 |
@@ -281,6 +281,7 @@ struct DecompressParam {
|
|
|
383d26 |
void *des;
|
|
|
383d26 |
uint8_t *compbuf;
|
|
|
383d26 |
int len;
|
|
|
383d26 |
+ z_stream stream;
|
|
|
383d26 |
};
|
|
|
383d26 |
typedef struct DecompressParam DecompressParam;
|
|
|
383d26 |
|
|
|
383d26 |
@@ -2525,6 +2526,31 @@ void ram_handle_compressed(void *host, uint8_t ch, uint64_t size)
|
|
|
383d26 |
}
|
|
|
383d26 |
}
|
|
|
383d26 |
|
|
|
383d26 |
+/* return the size after decompression, or negative value on error */
|
|
|
383d26 |
+static int
|
|
|
383d26 |
+qemu_uncompress_data(z_stream *stream, uint8_t *dest, size_t dest_len,
|
|
|
383d26 |
+ const uint8_t *source, size_t source_len)
|
|
|
383d26 |
+{
|
|
|
383d26 |
+ int err;
|
|
|
383d26 |
+
|
|
|
383d26 |
+ err = inflateReset(stream);
|
|
|
383d26 |
+ if (err != Z_OK) {
|
|
|
383d26 |
+ return -1;
|
|
|
383d26 |
+ }
|
|
|
383d26 |
+
|
|
|
383d26 |
+ stream->avail_in = source_len;
|
|
|
383d26 |
+ stream->next_in = (uint8_t *)source;
|
|
|
383d26 |
+ stream->avail_out = dest_len;
|
|
|
383d26 |
+ stream->next_out = dest;
|
|
|
383d26 |
+
|
|
|
383d26 |
+ err = inflate(stream, Z_NO_FLUSH);
|
|
|
383d26 |
+ if (err != Z_STREAM_END) {
|
|
|
383d26 |
+ return -1;
|
|
|
383d26 |
+ }
|
|
|
383d26 |
+
|
|
|
383d26 |
+ return stream->total_out;
|
|
|
383d26 |
+}
|
|
|
383d26 |
+
|
|
|
383d26 |
static void *do_data_decompress(void *opaque)
|
|
|
383d26 |
{
|
|
|
383d26 |
DecompressParam *param = opaque;
|
|
|
383d26 |
@@ -2541,13 +2567,13 @@ static void *do_data_decompress(void *opaque)
|
|
|
383d26 |
qemu_mutex_unlock(¶m->mutex);
|
|
|
383d26 |
|
|
|
383d26 |
pagesize = TARGET_PAGE_SIZE;
|
|
|
383d26 |
- /* uncompress() will return failed in some case, especially
|
|
|
383d26 |
- * when the page is dirted when doing the compression, it's
|
|
|
383d26 |
- * not a problem because the dirty page will be retransferred
|
|
|
383d26 |
+ /* qemu_uncompress_data() will return failed in some case,
|
|
|
383d26 |
+ * especially when the page is dirtied when doing the compression,
|
|
|
383d26 |
+ * it's not a problem because the dirty page will be retransferred
|
|
|
383d26 |
* and uncompress() won't break the data in other pages.
|
|
|
383d26 |
*/
|
|
|
383d26 |
- uncompress((Bytef *)des, &pagesize,
|
|
|
383d26 |
- (const Bytef *)param->compbuf, len);
|
|
|
383d26 |
+ qemu_uncompress_data(¶m->stream, des, pagesize, param->compbuf,
|
|
|
383d26 |
+ len);
|
|
|
383d26 |
|
|
|
383d26 |
qemu_mutex_lock(&decomp_done_lock);
|
|
|
383d26 |
param->done = true;
|
|
|
383d26 |
@@ -2582,30 +2608,6 @@ static void wait_for_decompress_done(void)
|
|
|
383d26 |
qemu_mutex_unlock(&decomp_done_lock);
|
|
|
383d26 |
}
|
|
|
383d26 |
|
|
|
383d26 |
-static void compress_threads_load_setup(void)
|
|
|
383d26 |
-{
|
|
|
383d26 |
- int i, thread_count;
|
|
|
383d26 |
-
|
|
|
383d26 |
- if (!migrate_use_compression()) {
|
|
|
383d26 |
- return;
|
|
|
383d26 |
- }
|
|
|
383d26 |
- thread_count = migrate_decompress_threads();
|
|
|
383d26 |
- decompress_threads = g_new0(QemuThread, thread_count);
|
|
|
383d26 |
- decomp_param = g_new0(DecompressParam, thread_count);
|
|
|
383d26 |
- qemu_mutex_init(&decomp_done_lock);
|
|
|
383d26 |
- qemu_cond_init(&decomp_done_cond);
|
|
|
383d26 |
- for (i = 0; i < thread_count; i++) {
|
|
|
383d26 |
- qemu_mutex_init(&decomp_param[i].mutex);
|
|
|
383d26 |
- qemu_cond_init(&decomp_param[i].cond);
|
|
|
383d26 |
- decomp_param[i].compbuf = g_malloc0(compressBound(TARGET_PAGE_SIZE));
|
|
|
383d26 |
- decomp_param[i].done = true;
|
|
|
383d26 |
- decomp_param[i].quit = false;
|
|
|
383d26 |
- qemu_thread_create(decompress_threads + i, "decompress",
|
|
|
383d26 |
- do_data_decompress, decomp_param + i,
|
|
|
383d26 |
- QEMU_THREAD_JOINABLE);
|
|
|
383d26 |
- }
|
|
|
383d26 |
-}
|
|
|
383d26 |
-
|
|
|
383d26 |
static void compress_threads_load_cleanup(void)
|
|
|
383d26 |
{
|
|
|
383d26 |
int i, thread_count;
|
|
|
383d26 |
@@ -2615,16 +2617,30 @@ static void compress_threads_load_cleanup(void)
|
|
|
383d26 |
}
|
|
|
383d26 |
thread_count = migrate_decompress_threads();
|
|
|
383d26 |
for (i = 0; i < thread_count; i++) {
|
|
|
383d26 |
+ /*
|
|
|
383d26 |
+ * we use it as a indicator which shows if the thread is
|
|
|
383d26 |
+ * properly init'd or not
|
|
|
383d26 |
+ */
|
|
|
383d26 |
+ if (!decomp_param[i].compbuf) {
|
|
|
383d26 |
+ break;
|
|
|
383d26 |
+ }
|
|
|
383d26 |
+
|
|
|
383d26 |
qemu_mutex_lock(&decomp_param[i].mutex);
|
|
|
383d26 |
decomp_param[i].quit = true;
|
|
|
383d26 |
qemu_cond_signal(&decomp_param[i].cond);
|
|
|
383d26 |
qemu_mutex_unlock(&decomp_param[i].mutex);
|
|
|
383d26 |
}
|
|
|
383d26 |
for (i = 0; i < thread_count; i++) {
|
|
|
383d26 |
+ if (!decomp_param[i].compbuf) {
|
|
|
383d26 |
+ break;
|
|
|
383d26 |
+ }
|
|
|
383d26 |
+
|
|
|
383d26 |
qemu_thread_join(decompress_threads + i);
|
|
|
383d26 |
qemu_mutex_destroy(&decomp_param[i].mutex);
|
|
|
383d26 |
qemu_cond_destroy(&decomp_param[i].cond);
|
|
|
383d26 |
+ inflateEnd(&decomp_param[i].stream);
|
|
|
383d26 |
g_free(decomp_param[i].compbuf);
|
|
|
383d26 |
+ decomp_param[i].compbuf = NULL;
|
|
|
383d26 |
}
|
|
|
383d26 |
g_free(decompress_threads);
|
|
|
383d26 |
g_free(decomp_param);
|
|
|
383d26 |
@@ -2632,6 +2648,39 @@ static void compress_threads_load_cleanup(void)
|
|
|
383d26 |
decomp_param = NULL;
|
|
|
383d26 |
}
|
|
|
383d26 |
|
|
|
383d26 |
+static int compress_threads_load_setup(void)
|
|
|
383d26 |
+{
|
|
|
383d26 |
+ int i, thread_count;
|
|
|
383d26 |
+
|
|
|
383d26 |
+ if (!migrate_use_compression()) {
|
|
|
383d26 |
+ return 0;
|
|
|
383d26 |
+ }
|
|
|
383d26 |
+
|
|
|
383d26 |
+ thread_count = migrate_decompress_threads();
|
|
|
383d26 |
+ decompress_threads = g_new0(QemuThread, thread_count);
|
|
|
383d26 |
+ decomp_param = g_new0(DecompressParam, thread_count);
|
|
|
383d26 |
+ qemu_mutex_init(&decomp_done_lock);
|
|
|
383d26 |
+ qemu_cond_init(&decomp_done_cond);
|
|
|
383d26 |
+ for (i = 0; i < thread_count; i++) {
|
|
|
383d26 |
+ if (inflateInit(&decomp_param[i].stream) != Z_OK) {
|
|
|
383d26 |
+ goto exit;
|
|
|
383d26 |
+ }
|
|
|
383d26 |
+
|
|
|
383d26 |
+ decomp_param[i].compbuf = g_malloc0(compressBound(TARGET_PAGE_SIZE));
|
|
|
383d26 |
+ qemu_mutex_init(&decomp_param[i].mutex);
|
|
|
383d26 |
+ qemu_cond_init(&decomp_param[i].cond);
|
|
|
383d26 |
+ decomp_param[i].done = true;
|
|
|
383d26 |
+ decomp_param[i].quit = false;
|
|
|
383d26 |
+ qemu_thread_create(decompress_threads + i, "decompress",
|
|
|
383d26 |
+ do_data_decompress, decomp_param + i,
|
|
|
383d26 |
+ QEMU_THREAD_JOINABLE);
|
|
|
383d26 |
+ }
|
|
|
383d26 |
+ return 0;
|
|
|
383d26 |
+exit:
|
|
|
383d26 |
+ compress_threads_load_cleanup();
|
|
|
383d26 |
+ return -1;
|
|
|
383d26 |
+}
|
|
|
383d26 |
+
|
|
|
383d26 |
static void decompress_data_with_multi_threads(QEMUFile *f,
|
|
|
383d26 |
void *host, int len)
|
|
|
383d26 |
{
|
|
|
383d26 |
@@ -2671,8 +2720,11 @@ static void decompress_data_with_multi_threads(QEMUFile *f,
|
|
|
383d26 |
*/
|
|
|
383d26 |
static int ram_load_setup(QEMUFile *f, void *opaque)
|
|
|
383d26 |
{
|
|
|
383d26 |
+ if (compress_threads_load_setup()) {
|
|
|
383d26 |
+ return -1;
|
|
|
383d26 |
+ }
|
|
|
383d26 |
+
|
|
|
383d26 |
xbzrle_load_setup();
|
|
|
383d26 |
- compress_threads_load_setup();
|
|
|
383d26 |
ramblock_recv_map_init();
|
|
|
383d26 |
return 0;
|
|
|
383d26 |
}
|
|
|
383d26 |
--
|
|
|
383d26 |
1.8.3.1
|
|
|
383d26 |
|