thebeanogamer / rpms / qemu-kvm

Forked from rpms/qemu-kvm 5 months ago
Clone

Blame SOURCES/kvm-memory-syncronize-kvm-bitmap-using-bitmaps-operation.patch

0a122b
From c18e8f4bf7a628949f0d79facf91ddf6d07401e9 Mon Sep 17 00:00:00 2001
0a122b
From: Juan Quintela <quintela@redhat.com>
0a122b
Date: Tue, 14 Jan 2014 15:07:49 +0100
0a122b
Subject: [PATCH 38/40] memory: syncronize kvm bitmap using bitmaps operations
0a122b
0a122b
RH-Author: Juan Quintela <quintela@redhat.com>
0a122b
Message-id: <1389712071-23303-39-git-send-email-quintela@redhat.com>
0a122b
Patchwork-id: 56693
0a122b
O-Subject: [RHEL7 qemu-kvm PATCH 38/40] memory: syncronize kvm bitmap using bitmaps operations
0a122b
Bugzilla: 997559
0a122b
RH-Acked-by: Paolo Bonzini <pbonzini@redhat.com>
0a122b
RH-Acked-by: Orit Wasserman <owasserm@redhat.com>
0a122b
RH-Acked-by: Dr. David Alan Gilbert (git) <dgilbert@redhat.com>
0a122b
0a122b
If bitmaps are aligned properly, use bitmap operations.  If they are
0a122b
not, just use old bit at a time code.
0a122b
0a122b
Signed-off-by: Juan Quintela <quintela@redhat.com>
0a122b
Reviewed-by: Orit Wasserman <owasserm@redhat.com>
0a122b
(cherry picked from commit ae2810c4bb3b383176e8e1b33931b16c01483aab)
0a122b
Signed-off-by: Juan Quintela <quintela@trasno.org>
0a122b
---
0a122b
 include/exec/ram_addr.h | 54 ++++++++++++++++++++++++++++++++-----------------
0a122b
 1 file changed, 36 insertions(+), 18 deletions(-)
0a122b
0a122b
Signed-off-by: Miroslav Rezanina <mrezanin@redhat.com>
0a122b
---
0a122b
 include/exec/ram_addr.h |   54 +++++++++++++++++++++++++++++++---------------
0a122b
 1 files changed, 36 insertions(+), 18 deletions(-)
0a122b
0a122b
diff --git a/include/exec/ram_addr.h b/include/exec/ram_addr.h
0a122b
index 9962e12..080a8b1 100644
0a122b
--- a/include/exec/ram_addr.h
0a122b
+++ b/include/exec/ram_addr.h
0a122b
@@ -82,29 +82,47 @@ static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap,
0a122b
                                                           ram_addr_t start,
0a122b
                                                           ram_addr_t pages)
0a122b
 {
0a122b
-    unsigned int i, j;
0a122b
+    unsigned long i, j;
0a122b
     unsigned long page_number, c;
0a122b
     hwaddr addr;
0a122b
     ram_addr_t ram_addr;
0a122b
-    unsigned int len = (pages + HOST_LONG_BITS - 1) / HOST_LONG_BITS;
0a122b
+    unsigned long len = (pages + HOST_LONG_BITS - 1) / HOST_LONG_BITS;
0a122b
     unsigned long hpratio = getpagesize() / TARGET_PAGE_SIZE;
0a122b
+    unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS);
0a122b
 
0a122b
-    /*
0a122b
-     * bitmap-traveling is faster than memory-traveling (for addr...)
0a122b
-     * especially when most of the memory is not dirty.
0a122b
-     */
0a122b
-    for (i = 0; i < len; i++) {
0a122b
-        if (bitmap[i] != 0) {
0a122b
-            c = leul_to_cpu(bitmap[i]);
0a122b
-            do {
0a122b
-                j = ffsl(c) - 1;
0a122b
-                c &= ~(1ul << j);
0a122b
-                page_number = (i * HOST_LONG_BITS + j) * hpratio;
0a122b
-                addr = page_number * TARGET_PAGE_SIZE;
0a122b
-                ram_addr = start + addr;
0a122b
-                cpu_physical_memory_set_dirty_range(ram_addr,
0a122b
-                                                    TARGET_PAGE_SIZE * hpratio);
0a122b
-            } while (c != 0);
0a122b
+    /* start address is aligned at the start of a word? */
0a122b
+    if (((page * BITS_PER_LONG) << TARGET_PAGE_BITS) == start) {
0a122b
+        long k;
0a122b
+        long nr = BITS_TO_LONGS(pages);
0a122b
+
0a122b
+        for (k = 0; k < nr; k++) {
0a122b
+            if (bitmap[k]) {
0a122b
+                unsigned long temp = leul_to_cpu(bitmap[k]);
0a122b
+
0a122b
+                ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION][page + k] |= temp;
0a122b
+                ram_list.dirty_memory[DIRTY_MEMORY_VGA][page + k] |= temp;
0a122b
+                ram_list.dirty_memory[DIRTY_MEMORY_CODE][page + k] |= temp;
0a122b
+            }
0a122b
+        }
0a122b
+        xen_modified_memory(start, pages);
0a122b
+    } else {
0a122b
+        /*
0a122b
+         * bitmap-traveling is faster than memory-traveling (for addr...)
0a122b
+         * especially when most of the memory is not dirty.
0a122b
+         */
0a122b
+        for (i = 0; i < len; i++) {
0a122b
+            if (bitmap[i] != 0) {
0a122b
+                c = leul_to_cpu(bitmap[i]);
0a122b
+                do {
0a122b
+                    j = ffsl(c) - 1;
0a122b
+                    c &= ~(1ul << j);
0a122b
+                    page_number = (i * HOST_LONG_BITS + j) * hpratio;
0a122b
+                    addr = page_number * TARGET_PAGE_SIZE;
0a122b
+                    ram_addr = start + addr;
0a122b
+                    cpu_physical_memory_set_dirty_range(ram_addr,
0a122b
+                                       TARGET_PAGE_SIZE * hpratio);
0a122b
+                } while (c != 0);
0a122b
+            }
0a122b
         }
0a122b
     }
0a122b
 }
0a122b
-- 
0a122b
1.7.1
0a122b