902636
From 8e8f421cce99543081f225acf46541312cfbc371 Mon Sep 17 00:00:00 2001
902636
From: Laurent Vivier <lvivier@redhat.com>
902636
Date: Tue, 17 Mar 2020 17:05:18 +0000
902636
Subject: [PATCH 1/2] migration: Rate limit inside host pages
902636
902636
RH-Author: Laurent Vivier <lvivier@redhat.com>
902636
Message-id: <20200317170518.9303-1-lvivier@redhat.com>
902636
Patchwork-id: 94374
902636
O-Subject: [RHEL-AV-8.2.0 qemu-kvm PATCH] migration: Rate limit inside host pages
902636
Bugzilla: 1814336
902636
RH-Acked-by: Peter Xu <peterx@redhat.com>
902636
RH-Acked-by: Juan Quintela <quintela@redhat.com>
902636
RH-Acked-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
902636
902636
From: "Dr. David Alan Gilbert" <dgilbert@redhat.com>
902636
902636
When using hugepages, rate limiting is necessary within each huge
902636
page, since a 1G huge page can take a significant time to send, so
902636
you end up with bursty behaviour.
902636
902636
Fixes: 4c011c37ecb3 ("postcopy: Send whole huge pages")
902636
Reported-by: Lin Ma <LMa@suse.com>
902636
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
902636
Reviewed-by: Juan Quintela <quintela@redhat.com>
902636
Reviewed-by: Peter Xu <peterx@redhat.com>
902636
Signed-off-by: Juan Quintela <quintela@redhat.com>
902636
(cherry picked from commit 97e1e06780e70f6e98a0d2df881e0c0927d3aeb6)
902636
Signed-off-by: Laurent Vivier <lvivier@redhat.com>
902636
902636
BZ: https://bugzilla.redhat.com/show_bug.cgi?id=1814336
902636
BRANCH: rhel-av-8.2.0
902636
UPSTREAM: Merged
902636
BREW: https://brewweb.engineering.redhat.com/brew/taskinfo?taskID=27283241
902636
TESTED: Tested that the migration abort doesn't trigger an error message in
902636
        the kernel logs on P9
902636
902636
Signed-off-by: Danilo C. L. de Paula <ddepaula@redhat.com>
902636
---
902636
 migration/migration.c  | 57 ++++++++++++++++++++++++++++----------------------
902636
 migration/migration.h  |  1 +
902636
 migration/ram.c        |  2 ++
902636
 migration/trace-events |  4 ++--
902636
 4 files changed, 37 insertions(+), 27 deletions(-)
902636
902636
diff --git a/migration/migration.c b/migration/migration.c
902636
index ed18c59..e31d0f5 100644
902636
--- a/migration/migration.c
902636
+++ b/migration/migration.c
902636
@@ -3253,6 +3253,37 @@ void migration_consume_urgent_request(void)
902636
     qemu_sem_wait(&migrate_get_current()->rate_limit_sem);
902636
 }
902636
 
902636
+/* Returns true if the rate limiting was broken by an urgent request */
902636
+bool migration_rate_limit(void)
902636
+{
902636
+    int64_t now = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
902636
+    MigrationState *s = migrate_get_current();
902636
+
902636
+    bool urgent = false;
902636
+    migration_update_counters(s, now);
902636
+    if (qemu_file_rate_limit(s->to_dst_file)) {
902636
+        /*
902636
+         * Wait for a delay to do rate limiting OR
902636
+         * something urgent to post the semaphore.
902636
+         */
902636
+        int ms = s->iteration_start_time + BUFFER_DELAY - now;
902636
+        trace_migration_rate_limit_pre(ms);
902636
+        if (qemu_sem_timedwait(&s->rate_limit_sem, ms) == 0) {
902636
+            /*
902636
+             * We were woken by one or more urgent things but
902636
+             * the timedwait will have consumed one of them.
902636
+             * The service routine for the urgent wake will dec
902636
+             * the semaphore itself for each item it consumes,
902636
+             * so add this one we just eat back.
902636
+             */
902636
+            qemu_sem_post(&s->rate_limit_sem);
902636
+            urgent = true;
902636
+        }
902636
+        trace_migration_rate_limit_post(urgent);
902636
+    }
902636
+    return urgent;
902636
+}
902636
+
902636
 /*
902636
  * Master migration thread on the source VM.
902636
  * It drives the migration and pumps the data down the outgoing channel.
902636
@@ -3319,8 +3350,6 @@ static void *migration_thread(void *opaque)
902636
     trace_migration_thread_setup_complete();
902636
 
902636
     while (migration_is_active(s)) {
902636
-        int64_t current_time;
902636
-
902636
         if (urgent || !qemu_file_rate_limit(s->to_dst_file)) {
902636
             MigIterateState iter_state = migration_iteration_run(s);
902636
             if (iter_state == MIG_ITERATE_SKIP) {
902636
@@ -3347,29 +3376,7 @@ static void *migration_thread(void *opaque)
902636
             update_iteration_initial_status(s);
902636
         }
902636
 
902636
-        current_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
902636
-
902636
-        migration_update_counters(s, current_time);
902636
-
902636
-        urgent = false;
902636
-        if (qemu_file_rate_limit(s->to_dst_file)) {
902636
-            /* Wait for a delay to do rate limiting OR
902636
-             * something urgent to post the semaphore.
902636
-             */
902636
-            int ms = s->iteration_start_time + BUFFER_DELAY - current_time;
902636
-            trace_migration_thread_ratelimit_pre(ms);
902636
-            if (qemu_sem_timedwait(&s->rate_limit_sem, ms) == 0) {
902636
-                /* We were worken by one or more urgent things but
902636
-                 * the timedwait will have consumed one of them.
902636
-                 * The service routine for the urgent wake will dec
902636
-                 * the semaphore itself for each item it consumes,
902636
-                 * so add this one we just eat back.
902636
-                 */
902636
-                qemu_sem_post(&s->rate_limit_sem);
902636
-                urgent = true;
902636
-            }
902636
-            trace_migration_thread_ratelimit_post(urgent);
902636
-        }
902636
+        urgent = migration_rate_limit();
902636
     }
902636
 
902636
     trace_migration_thread_after_loop();
902636
diff --git a/migration/migration.h b/migration/migration.h
902636
index a2b2336..a15e8d8 100644
902636
--- a/migration/migration.h
902636
+++ b/migration/migration.h
902636
@@ -347,5 +347,6 @@ extern bool migrate_pre_2_2;
902636
 
902636
 void migration_make_urgent_request(void);
902636
 void migration_consume_urgent_request(void);
902636
+bool migration_rate_limit(void);
902636
 
902636
 #endif
902636
diff --git a/migration/ram.c b/migration/ram.c
902636
index 3891eff..5344c7d 100644
902636
--- a/migration/ram.c
902636
+++ b/migration/ram.c
902636
@@ -2661,6 +2661,8 @@ static int ram_save_host_page(RAMState *rs, PageSearchStatus *pss,
902636
 
902636
         pages += tmppages;
902636
         pss->page++;
902636
+        /* Allow rate limiting to happen in the middle of huge pages */
902636
+        migration_rate_limit();
902636
     } while ((pss->page & (pagesize_bits - 1)) &&
902636
              offset_in_ramblock(pss->block, pss->page << TARGET_PAGE_BITS));
902636
 
902636
diff --git a/migration/trace-events b/migration/trace-events
902636
index 6dee7b5..2f9129e 100644
902636
--- a/migration/trace-events
902636
+++ b/migration/trace-events
902636
@@ -138,12 +138,12 @@ migrate_send_rp_recv_bitmap(char *name, int64_t size) "block '%s' size 0x%"PRIi6
902636
 migration_completion_file_err(void) ""
902636
 migration_completion_postcopy_end(void) ""
902636
 migration_completion_postcopy_end_after_complete(void) ""
902636
+migration_rate_limit_pre(int ms) "%d ms"
902636
+migration_rate_limit_post(int urgent) "urgent: %d"
902636
 migration_return_path_end_before(void) ""
902636
 migration_return_path_end_after(int rp_error) "%d"
902636
 migration_thread_after_loop(void) ""
902636
 migration_thread_file_err(void) ""
902636
-migration_thread_ratelimit_pre(int ms) "%d ms"
902636
-migration_thread_ratelimit_post(int urgent) "urgent: %d"
902636
 migration_thread_setup_complete(void) ""
902636
 open_return_path_on_source(void) ""
902636
 open_return_path_on_source_continue(void) ""
902636
-- 
902636
1.8.3.1
902636