Blame SOURCES/kvm-scripts-dump-guest-memory.py-Make-methods-functions.patch

5d360b
From 0e125906f0e8bd7015569e7c76b687e4aacb3cca Mon Sep 17 00:00:00 2001
5d360b
From: =?UTF-8?q?Marc-Andr=C3=A9=20Lureau?= <marcandre.lureau@redhat.com>
5d360b
Date: Wed, 13 Dec 2017 13:39:05 +0100
5d360b
Subject: [PATCH 34/41] scripts/dump-guest-memory.py: Make methods functions
5d360b
MIME-Version: 1.0
5d360b
Content-Type: text/plain; charset=UTF-8
5d360b
Content-Transfer-Encoding: 8bit
5d360b
5d360b
RH-Author: Marc-André Lureau <marcandre.lureau@redhat.com>
5d360b
Message-id: <20171213133912.26176-35-marcandre.lureau@redhat.com>
5d360b
Patchwork-id: 78384
5d360b
O-Subject: [RHEL-7.5 qemu-kvm PATCH v3 34/41] scripts/dump-guest-memory.py: Make methods functions
5d360b
Bugzilla: 1411490
5d360b
RH-Acked-by: Laszlo Ersek <lersek@redhat.com>
5d360b
RH-Acked-by: Michael S. Tsirkin <mst@redhat.com>
5d360b
RH-Acked-by: Miroslav Rezanina <mrezanin@redhat.com>
5d360b
5d360b
From: Janosch Frank <frankja@linux.vnet.ibm.com>
5d360b
5d360b
The functions dealing with qemu components rarely used parts of the
5d360b
class, so they were moved out of the class.
5d360b
5d360b
As the uintptr_t variable is needed both within and outside the class,
5d360b
it was made a constant and moved to the top.
5d360b
5d360b
Reviewed-by: Laszlo Ersek <lersek@redhat.com>
5d360b
Signed-off-by: Janosch Frank <frankja@linux.vnet.ibm.com>
5d360b
Message-Id: <1453464520-3882-3-git-send-email-frankja@linux.vnet.ibm.com>
5d360b
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
5d360b
5d360b
(cherry picked from commit 47890203842de8b29716bdffb406ca851e70829d)
5d360b
5d360b
RHEL: conflicts due to qtailq->qlist, used_length->length
5d360b
5d360b
Signed-off-by: Marc-André Lureau <marcandre.lureau@redhat.com>
5d360b
Signed-off-by: Miroslav Rezanina <mrezanin@redhat.com>
5d360b
---
5d360b
 scripts/dump-guest-memory.py | 184 ++++++++++++++++++++++---------------------
5d360b
 1 file changed, 93 insertions(+), 91 deletions(-)
5d360b
5d360b
diff --git a/scripts/dump-guest-memory.py b/scripts/dump-guest-memory.py
5d360b
index 29f7c5b..7d93d86 100644
5d360b
--- a/scripts/dump-guest-memory.py
5d360b
+++ b/scripts/dump-guest-memory.py
5d360b
@@ -17,6 +17,8 @@
5d360b
 
5d360b
 import struct
5d360b
 
5d360b
+UINTPTR_T = gdb.lookup_type("uintptr_t")
5d360b
+
5d360b
 TARGET_PAGE_SIZE = 0x1000
5d360b
 TARGET_PAGE_MASK = 0xFFFFFFFFFFFFF000
5d360b
 
5d360b
@@ -66,6 +68,94 @@ ELF64_PHDR = ("I"  # p_type
5d360b
               "Q"  # p_align
5d360b
           )
5d360b
 
5d360b
+def int128_get64(val):
5d360b
+    assert (val["hi"] == 0)
5d360b
+    return val["lo"]
5d360b
+
5d360b
+def qtailq_foreach(head, field_str):
5d360b
+    var_p = head["tqh_first"]
5d360b
+    while (var_p != 0):
5d360b
+        var = var_p.dereference()
5d360b
+        yield var
5d360b
+        var_p = var[field_str]["tqe_next"]
5d360b
+
5d360b
+def qemu_get_ram_block(ram_addr):
5d360b
+    ram_blocks = gdb.parse_and_eval("ram_list.blocks")
5d360b
+    for block in qtailq_foreach(ram_blocks, "next"):
5d360b
+        if (ram_addr - block["offset"] < block["length"]):
5d360b
+            return block
5d360b
+    raise gdb.GdbError("Bad ram offset %x" % ram_addr)
5d360b
+
5d360b
+def qemu_get_ram_ptr(ram_addr):
5d360b
+    block = qemu_get_ram_block(ram_addr)
5d360b
+    return block["host"] + (ram_addr - block["offset"])
5d360b
+
5d360b
+def memory_region_get_ram_ptr(mr):
5d360b
+    if (mr["alias"] != 0):
5d360b
+        return (memory_region_get_ram_ptr(mr["alias"].dereference()) +
5d360b
+                mr["alias_offset"])
5d360b
+    return qemu_get_ram_ptr(mr["ram_addr"] & TARGET_PAGE_MASK)
5d360b
+
5d360b
+def get_guest_phys_blocks():
5d360b
+    guest_phys_blocks = []
5d360b
+    print "guest RAM blocks:"
5d360b
+    print ("target_start     target_end       host_addr        message "
5d360b
+           "count")
5d360b
+    print ("---------------- ---------------- ---------------- ------- "
5d360b
+           "-----")
5d360b
+
5d360b
+    current_map_p = gdb.parse_and_eval("address_space_memory.current_map")
5d360b
+    current_map = current_map_p.dereference()
5d360b
+    for cur in range(current_map["nr"]):
5d360b
+        flat_range   = (current_map["ranges"] + cur).dereference()
5d360b
+        mr           = flat_range["mr"].dereference()
5d360b
+
5d360b
+        # we only care about RAM
5d360b
+        if (not mr["ram"]):
5d360b
+            continue
5d360b
+
5d360b
+        section_size = int128_get64(flat_range["addr"]["size"])
5d360b
+        target_start = int128_get64(flat_range["addr"]["start"])
5d360b
+        target_end   = target_start + section_size
5d360b
+        host_addr    = (memory_region_get_ram_ptr(mr) +
5d360b
+                        flat_range["offset_in_region"])
5d360b
+        predecessor = None
5d360b
+
5d360b
+        # find continuity in guest physical address space
5d360b
+        if (len(guest_phys_blocks) > 0):
5d360b
+            predecessor = guest_phys_blocks[-1]
5d360b
+            predecessor_size = (predecessor["target_end"] -
5d360b
+                                predecessor["target_start"])
5d360b
+
5d360b
+            # the memory API guarantees monotonically increasing
5d360b
+            # traversal
5d360b
+            assert (predecessor["target_end"] <= target_start)
5d360b
+
5d360b
+            # we want continuity in both guest-physical and
5d360b
+            # host-virtual memory
5d360b
+            if (predecessor["target_end"] < target_start or
5d360b
+                predecessor["host_addr"] + predecessor_size != host_addr):
5d360b
+                predecessor = None
5d360b
+
5d360b
+        if (predecessor is None):
5d360b
+            # isolated mapping, add it to the list
5d360b
+            guest_phys_blocks.append({"target_start": target_start,
5d360b
+                                      "target_end"  : target_end,
5d360b
+                                      "host_addr"   : host_addr})
5d360b
+            message = "added"
5d360b
+        else:
5d360b
+            # expand predecessor until @target_end; predecessor's
5d360b
+            # start doesn't change
5d360b
+            predecessor["target_end"] = target_end
5d360b
+            message = "joined"
5d360b
+
5d360b
+        print ("%016x %016x %016x %-7s %5u" %
5d360b
+               (target_start, target_end, host_addr.cast(UINTPTR_T),
5d360b
+                message, len(guest_phys_blocks)))
5d360b
+
5d360b
+    return guest_phys_blocks
5d360b
+
5d360b
+
5d360b
 class DumpGuestMemory(gdb.Command):
5d360b
     """Extract guest vmcore from qemu process coredump.
5d360b
 
5d360b
@@ -100,96 +190,9 @@ shape and this command should mostly work."""
5d360b
         super(DumpGuestMemory, self).__init__("dump-guest-memory",
5d360b
                                               gdb.COMMAND_DATA,
5d360b
                                               gdb.COMPLETE_FILENAME)
5d360b
-        self.uintptr_t     = gdb.lookup_type("uintptr_t")
5d360b
         self.elf64_ehdr_le = struct.Struct("<%s" % ELF64_EHDR)
5d360b
         self.elf64_phdr_le = struct.Struct("<%s" % ELF64_PHDR)
5d360b
-
5d360b
-    def int128_get64(self, val):
5d360b
-        assert (val["hi"] == 0)
5d360b
-        return val["lo"]
5d360b
-
5d360b
-    def qtailq_foreach(self, head, field_str):
5d360b
-        var_p = head["tqh_first"]
5d360b
-        while (var_p != 0):
5d360b
-            var = var_p.dereference()
5d360b
-            yield var
5d360b
-            var_p = var[field_str]["tqe_next"]
5d360b
-
5d360b
-    def qemu_get_ram_block(self, ram_addr):
5d360b
-        ram_blocks = gdb.parse_and_eval("ram_list.blocks")
5d360b
-        for block in self.qtailq_foreach(ram_blocks, "next"):
5d360b
-            if (ram_addr - block["offset"] < block["length"]):
5d360b
-                return block
5d360b
-        raise gdb.GdbError("Bad ram offset %x" % ram_addr)
5d360b
-
5d360b
-    def qemu_get_ram_ptr(self, ram_addr):
5d360b
-        block = self.qemu_get_ram_block(ram_addr)
5d360b
-        return block["host"] + (ram_addr - block["offset"])
5d360b
-
5d360b
-    def memory_region_get_ram_ptr(self, mr):
5d360b
-        if (mr["alias"] != 0):
5d360b
-            return (self.memory_region_get_ram_ptr(mr["alias"].dereference()) +
5d360b
-                    mr["alias_offset"])
5d360b
-        return self.qemu_get_ram_ptr(mr["ram_addr"] & TARGET_PAGE_MASK)
5d360b
-
5d360b
-    def guest_phys_blocks_init(self):
5d360b
-        self.guest_phys_blocks = []
5d360b
-
5d360b
-    def guest_phys_blocks_append(self):
5d360b
-        print "guest RAM blocks:"
5d360b
-        print ("target_start     target_end       host_addr        message "
5d360b
-               "count")
5d360b
-        print ("---------------- ---------------- ---------------- ------- "
5d360b
-               "-----")
5d360b
-
5d360b
-        current_map_p = gdb.parse_and_eval("address_space_memory.current_map")
5d360b
-        current_map = current_map_p.dereference()
5d360b
-        for cur in range(current_map["nr"]):
5d360b
-            flat_range   = (current_map["ranges"] + cur).dereference()
5d360b
-            mr           = flat_range["mr"].dereference()
5d360b
-
5d360b
-            # we only care about RAM
5d360b
-            if (not mr["ram"]):
5d360b
-                continue
5d360b
-
5d360b
-            section_size = self.int128_get64(flat_range["addr"]["size"])
5d360b
-            target_start = self.int128_get64(flat_range["addr"]["start"])
5d360b
-            target_end   = target_start + section_size
5d360b
-            host_addr    = (self.memory_region_get_ram_ptr(mr) +
5d360b
-                            flat_range["offset_in_region"])
5d360b
-            predecessor = None
5d360b
-
5d360b
-            # find continuity in guest physical address space
5d360b
-            if (len(self.guest_phys_blocks) > 0):
5d360b
-                predecessor = self.guest_phys_blocks[-1]
5d360b
-                predecessor_size = (predecessor["target_end"] -
5d360b
-                                    predecessor["target_start"])
5d360b
-
5d360b
-                # the memory API guarantees monotonically increasing
5d360b
-                # traversal
5d360b
-                assert (predecessor["target_end"] <= target_start)
5d360b
-
5d360b
-                # we want continuity in both guest-physical and
5d360b
-                # host-virtual memory
5d360b
-                if (predecessor["target_end"] < target_start or
5d360b
-                    predecessor["host_addr"] + predecessor_size != host_addr):
5d360b
-                    predecessor = None
5d360b
-
5d360b
-            if (predecessor is None):
5d360b
-                # isolated mapping, add it to the list
5d360b
-                self.guest_phys_blocks.append({"target_start": target_start,
5d360b
-                                               "target_end"  : target_end,
5d360b
-                                               "host_addr"   : host_addr})
5d360b
-                message = "added"
5d360b
-            else:
5d360b
-                # expand predecessor until @target_end; predecessor's
5d360b
-                # start doesn't change
5d360b
-                predecessor["target_end"] = target_end
5d360b
-                message = "joined"
5d360b
-
5d360b
-            print ("%016x %016x %016x %-7s %5u" %
5d360b
-                   (target_start, target_end, host_addr.cast(self.uintptr_t),
5d360b
-                    message, len(self.guest_phys_blocks)))
5d360b
+        self.guest_phys_blocks = None
5d360b
 
5d360b
     def cpu_get_dump_info(self):
5d360b
         # We can't synchronize the registers with KVM post-mortem, and
5d360b
@@ -263,8 +266,7 @@ shape and this command should mostly work."""
5d360b
                                 len(name) + 1, len(desc), type, name, desc)
5d360b
 
5d360b
     def dump_init(self):
5d360b
-        self.guest_phys_blocks_init()
5d360b
-        self.guest_phys_blocks_append()
5d360b
+        self.guest_phys_blocks = get_guest_phys_blocks()
5d360b
         self.cpu_get_dump_info()
5d360b
         # we have no way to retrieve the VCPU status from KVM
5d360b
         # post-mortem
5d360b
@@ -310,7 +312,7 @@ shape and this command should mostly work."""
5d360b
             cur  = block["host_addr"]
5d360b
             left = block["target_end"] - block["target_start"]
5d360b
             print ("dumping range at %016x for length %016x" %
5d360b
-                   (cur.cast(self.uintptr_t), left))
5d360b
+                   (cur.cast(UINTPTR_T), left))
5d360b
             while (left > 0):
5d360b
                 chunk_size = min(TARGET_PAGE_SIZE, left)
5d360b
                 chunk = qemu_core.read_memory(cur, chunk_size)
5d360b
-- 
5d360b
1.8.3.1
5d360b