Blame SOURCES/0001-arm64-deduce-the-start-address-of-kernel-code-based-.patch

e64a0b
From e3bdc32aab5d8fe09b679cf394da8ba8826e207f Mon Sep 17 00:00:00 2001
e64a0b
From: Pingfan Liu <piliu@redhat.com>
e64a0b
Date: Thu, 24 Feb 2022 11:52:12 +0800
e64a0b
Subject: [PATCH] arm64: deduce the start address of kernel code, based on
e64a0b
 kernel version
e64a0b
e64a0b
After kernel commit e2a073dde921 ("arm64: omit [_text, _stext) from
e64a0b
permanent kernel mapping"), the range [_text, _stext] is reclaimed. But
e64a0b
the current crash code still assumes kernel starting from "_text".
e64a0b
e64a0b
This change only affects the vmalloced area on arm64 and may result a
e64a0b
false in arm64_IS_VMALLOC_ADDR().
e64a0b
e64a0b
Since vmcore has no extra information about this trival change, it can
e64a0b
only be deduced from kernel version, which means ms->kimage_text can not
e64a0b
be correctly initialized until kernel_init() finishes. Here on arm64, it
e64a0b
can be done at the point machdep_init(POST_GDB). This is fine
e64a0b
since there is no access to vmalloced area at this stage.
e64a0b
e64a0b
Signed-off-by: Pingfan Liu <piliu@redhat.com>
e64a0b
---
e64a0b
 arm64.c | 17 +++++++++++++++++
e64a0b
 1 file changed, 17 insertions(+)
e64a0b
e64a0b
diff --git a/arm64.c b/arm64.c
e64a0b
index de1038a..3ab8489 100644
e64a0b
--- a/arm64.c
e64a0b
+++ b/arm64.c
e64a0b
@@ -92,6 +92,20 @@ static void arm64_calc_VA_BITS(void);
e64a0b
 static int arm64_is_uvaddr(ulong, struct task_context *);
e64a0b
 static void arm64_calc_KERNELPACMASK(void);
e64a0b
 
e64a0b
+static void arm64_calc_kernel_start(void)
e64a0b
+{
e64a0b
+	struct machine_specific *ms = machdep->machspec;
e64a0b
+	struct syment *sp;
e64a0b
+
e64a0b
+	if (THIS_KERNEL_VERSION >= LINUX(5,11,0))
e64a0b
+		sp = kernel_symbol_search("_stext");
e64a0b
+	else
e64a0b
+		sp = kernel_symbol_search("_text");
e64a0b
+
e64a0b
+	ms->kimage_text = (sp ? sp->value : 0);
e64a0b
+	sp = kernel_symbol_search("_end");
e64a0b
+	ms->kimage_end = (sp ? sp->value : 0);
e64a0b
+}
e64a0b
 
e64a0b
 /*
e64a0b
  * Do all necessary machine-specific setup here. This is called several times
e64a0b
@@ -241,6 +255,7 @@ arm64_init(int when)
e64a0b
 		if (machdep->flags & NEW_VMEMMAP) {
e64a0b
 			struct syment *sp;
e64a0b
 
e64a0b
+			/* It is finally decided in arm64_calc_kernel_start() */
e64a0b
 			sp = kernel_symbol_search("_text");
e64a0b
 			ms->kimage_text = (sp ? sp->value : 0);
e64a0b
 			sp = kernel_symbol_search("_end");
e64a0b
@@ -387,6 +402,8 @@ arm64_init(int when)
e64a0b
 		break;
e64a0b
 
e64a0b
 	case POST_GDB:
e64a0b
+		/* Rely on kernel version to decide the kernel start address */
e64a0b
+		arm64_calc_kernel_start();
e64a0b
 		arm64_calc_virtual_memory_ranges();
e64a0b
 		arm64_get_section_size_bits();
e64a0b
 
e64a0b
-- 
e64a0b
2.31.1
e64a0b