diff --git a/.crash.metadata b/.crash.metadata
index 761b71a..44d67c9 100644
--- a/.crash.metadata
+++ b/.crash.metadata
@@ -1 +1 @@
-355afc05e5564ffceec6425eb992f461a353ac35 SOURCES/crash-7.1.9.tar.gz
+0179af8d08a36269d5ae41205ec98e0a6957ab5f SOURCES/crash-7.2.0.tar.gz
diff --git a/.gitignore b/.gitignore
index 732da4a..b708ec5 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1 +1 @@
-SOURCES/crash-7.1.9.tar.gz
+SOURCES/crash-7.2.0.tar.gz
diff --git a/SOURCES/github_1e488cfe_to_1160ba19.patch b/SOURCES/github_1e488cfe_to_1160ba19.patch
new file mode 100644
index 0000000..c018be5
--- /dev/null
+++ b/SOURCES/github_1e488cfe_to_1160ba19.patch
@@ -0,0 +1,319 @@
+commit 1e488cfefa1d9ca4ca626bc2a308b39f7404f5db
+Author: Dave Anderson <anderson@redhat.com>
+Date:   Tue Jan 23 16:35:41 2018 -0500
+
+    Fix for the "bt" command and the "ps -s" option for zombie tasks
+    whose kernel stacks have been freed/detached.  Without the patch,
+    the "bt" command indicates "bt: invalid kernel virtual address: 0
+    type: stack contents" and "bt: read of stack at 0 failed"; it will
+    be changed to display "(no stack)".  The "ps -s" option would fail
+    prematurely upon reaching such a task, indicating "ps: invalid kernel
+    virtual address: 0  type: stack contents" and "ps: read of stack at 0
+    failed".
+    (anderson@redhat.com)
+
+diff --git a/kernel.c b/kernel.c
+index 4638495..1bf6251 100644
+--- a/kernel.c
++++ b/kernel.c
+@@ -1,8 +1,8 @@
+ /* kernel.c - core analysis suite
+  *
+  * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc.
+- * Copyright (C) 2002-2017 David Anderson
+- * Copyright (C) 2002-2017 Red Hat, Inc. All rights reserved.
++ * Copyright (C) 2002-2018 David Anderson
++ * Copyright (C) 2002-2018 Red Hat, Inc. All rights reserved.
+  *
+  * This program is free software; you can redistribute it and/or modify
+  * it under the terms of the GNU General Public License as published by
+@@ -2890,6 +2890,11 @@ back_trace(struct bt_info *bt)
+ 			return;
+  	}
+ 
++	if (bt->stackbase == 0) {
++		fprintf(fp, "(no stack)\n");
++		return;
++	}
++
+ 	fill_stackbuf(bt);
+ 
+ 	if (CRASHDEBUG(4)) {
+diff --git a/task.c b/task.c
+index b303ef7..db05ab4 100644
+--- a/task.c
++++ b/task.c
+@@ -1,8 +1,8 @@
+ /* task.c - core analysis suite
+  *
+  * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc.
+- * Copyright (C) 2002-2017 David Anderson
+- * Copyright (C) 2002-2017 Red Hat, Inc. All rights reserved.
++ * Copyright (C) 2002-2018 David Anderson
++ * Copyright (C) 2002-2018 Red Hat, Inc. All rights reserved.
+  *
+  * This program is free software; you can redistribute it and/or modify
+  * it under the terms of the GNU General Public License as published by
+@@ -4182,12 +4182,14 @@ task_pointer_string(struct task_context *tc, ulong do_kstackp, char *buf)
+ 				KVADDR, &bt->stkptr, sizeof(void *),
+                 		"thread_struct ksp", FAULT_ON_ERROR);
+ 		} else {
+-               		bt->task = tc->task;
+-               		bt->tc = tc;
+-               		bt->stackbase = GET_STACKBASE(tc->task);
+-               		bt->stacktop = GET_STACKTOP(tc->task);
+-			bt->flags |= BT_KSTACKP;
+-			back_trace(bt);
++			if ((bt->stackbase = GET_STACKBASE(tc->task))) {
++				bt->stacktop = GET_STACKTOP(tc->task);
++				bt->task = tc->task;
++				bt->tc = tc;
++				bt->flags |= BT_KSTACKP;
++				back_trace(bt);
++			} else
++				bt->stkptr = 0;
+ 		}
+ 
+ 		if (bt->stkptr)
+
+commit 693e0fa8ea8b2791329a4197fafd8700afa14c3b
+Author: Dave Anderson <anderson@redhat.com>
+Date:   Thu Jan 25 14:52:54 2018 -0500
+
+    Fix for running on live systems on 4.15-rc2 and later kernels that
+    are configured with CONFIG_RANDOMIZE_BASE and contain kernel commit
+    668533dc0764b30c9dd2baf3ca800156f688326b, titled "kallsyms: take
+    advantage of the new '%px' format".  Without the patch, a live crash
+    session does not show the "WARNING: kernel relocated ..." message
+    expected with KASLR, and then displays the message "crash: cannot set
+    context for pid: <pid>" prior to generating a SIGSEGV.
+    (anderson@redhat.com)
+
+diff --git a/symbols.c b/symbols.c
+index 2372887..9a3763c 100644
+--- a/symbols.c
++++ b/symbols.c
+@@ -1,8 +1,8 @@
+ /* symbols.c - core analysis suite
+  *
+  * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc.
+- * Copyright (C) 2002-2017 David Anderson
+- * Copyright (C) 2002-2017 Red Hat, Inc. All rights reserved.
++ * Copyright (C) 2002-2018 David Anderson
++ * Copyright (C) 2002-2018 Red Hat, Inc. All rights reserved.
+  *
+  * This program is free software; you can redistribute it and/or modify
+  * it under the terms of the GNU General Public License as published by
+@@ -1004,10 +1004,9 @@ symbol_value_from_proc_kallsyms(char *symname)
+ 
+ 	found = FALSE;
+ 	while (!found && fgets(buf, BUFSIZE, kp) &&
+-	    (parse_line(buf, kallsyms) == 3) && 
+-	    hexadecimal(kallsyms[0], 0)) {
+-
+-		if (STREQ(kallsyms[2], symname)) {
++	    (parse_line(buf, kallsyms) == 3)) {
++		if (hexadecimal(kallsyms[0], 0) && 
++		    STREQ(kallsyms[2], symname)) {
+ 			kallsym = htol(kallsyms[0], RETURN_ON_ERROR, NULL);
+ 			found = TRUE;
+ 			break;
+
+commit 1af7813e0552ac93b39a44abffffc04600d3ed4c
+Author: Dave Anderson <anderson@redhat.com>
+Date:   Thu Jan 25 15:17:26 2018 -0500
+
+    Fix for 4.15-rc5 and later x86_64 kernels that contain kernel commit
+    c482feefe1aeb150156248ba0fd3e029bc886605, titled "x86/entry/64: Make
+    cpu_entry_area.tss read-only".  Without the patch, the addresses and
+    sizes of the x86_64 exception stacks cannot be determined; therefore
+    if a backtrace starts on one of the exception stacks, then the "bt"
+    command will fail.
+    (anderson@redhat.com)
+
+diff --git a/x86_64.c b/x86_64.c
+index e924ca9..467b5d7 100644
+--- a/x86_64.c
++++ b/x86_64.c
+@@ -1245,8 +1245,10 @@ x86_64_ist_init(void)
+ 	struct syment *boot_sp, *tss_sp, *ist_sp;
+ 
+         ms = machdep->machspec;
+-	if (!(tss_sp = per_cpu_symbol_search("per_cpu__init_tss")))
+-		tss_sp = per_cpu_symbol_search("per_cpu__cpu_tss");
++	if (!(tss_sp = per_cpu_symbol_search("per_cpu__init_tss"))) {
++		if (!(tss_sp = per_cpu_symbol_search("per_cpu__cpu_tss")))
++			tss_sp = per_cpu_symbol_search("per_cpu__cpu_tss_rw");
++	}
+ 	ist_sp = per_cpu_symbol_search("per_cpu__orig_ist");
+ 
+ 	x86_64_exception_stacks_init();
+
+commit 1160ba19884fed4420c334394cde7a40b113e09c
+Author: Dave Anderson <anderson@redhat.com>
+Date:   Fri Jan 26 11:06:48 2018 -0500
+
+    Additional fix for support of KASLR enabled kernels captured by the
+    SADUMP dumpfile facility, where this patch fixes a problem when Page
+    Table Isolation(PTI) is enabled.  When PTI is enabled, bit 12 of CR3
+    register is used to split user space and kernel space.  Also bit 11:0
+    is used for Process Context IDentifiers(PCID).  To open an SADUMP
+    dumpfile, the value of CR3 is used to calculate KASLR offset and
+    phys_base; this patch masks the CR3 register value correctly for
+    a PTI enabled kernel.
+    (indou.takao@jp.fujitsu.com)
+
+diff --git a/defs.h b/defs.h
+index 4d2fb2f..92341d2 100644
+--- a/defs.h
++++ b/defs.h
+@@ -2605,6 +2605,8 @@ struct symbol_table_data {
+ 	ulong divide_error_vmlinux;
+ 	ulong idt_table_vmlinux;
+ 	ulong saved_command_line_vmlinux;
++	ulong pti_init_vmlinux;
++	ulong kaiser_init_vmlinux;
+ };
+ 
+ /* flags for st */
+diff --git a/sadump.c b/sadump.c
+index 6b912d4..25cefe9 100644
+--- a/sadump.c
++++ b/sadump.c
+@@ -1749,7 +1749,7 @@ static ulong memparse(char *ptr, char **retptr)
+  * of elfcorehdr.
+  */
+ static ulong
+-get_elfcorehdr(ulong cr3, ulong kaslr_offset)
++get_elfcorehdr(ulong kaslr_offset)
+ {
+ 	char cmdline[BUFSIZE], *ptr;
+ 	ulong cmdline_vaddr;
+@@ -1906,7 +1906,7 @@ get_vmcoreinfo(ulong elfcorehdr, ulong *addr, int *len)
+  *    using "elfcorehdr=" and retrieve kaslr_offset/phys_base from vmcoreinfo.
+  */
+ static int
+-get_kaslr_offset_from_vmcoreinfo(ulong cr3, ulong orig_kaslr_offset,
++get_kaslr_offset_from_vmcoreinfo(ulong orig_kaslr_offset,
+ 		                 ulong *kaslr_offset, ulong *phys_base)
+ {
+ 	ulong elfcorehdr_addr = 0;
+@@ -1916,7 +1916,7 @@ get_kaslr_offset_from_vmcoreinfo(ulong cr3, ulong orig_kaslr_offset,
+ 	int ret = FALSE;
+ 
+ 	/* Find "elfcorehdr=" in the kernel boot parameter */
+-	elfcorehdr_addr = get_elfcorehdr(cr3, orig_kaslr_offset);
++	elfcorehdr_addr = get_elfcorehdr(orig_kaslr_offset);
+ 	if (!elfcorehdr_addr)
+ 		return FALSE;
+ 
+@@ -1973,8 +1973,8 @@ quit:
+  * 1) Get IDTR and CR3 value from the dump header.
+  * 2) Get a virtual address of IDT from IDTR value
+  *    --- (A)
+- * 3) Translate (A) to physical address using CR3, which points a top of
+- *    page table.
++ * 3) Translate (A) to physical address using CR3, the upper 52 bits
++ *    of which points a top of page table.
+  *    --- (B)
+  * 4) Get an address of vector0 (Devide Error) interrupt handler from
+  *    IDT, which are pointed by (B).
+@@ -2023,12 +2023,15 @@ quit:
+  *    kernel. Retrieve vmcoreinfo from address of "elfcorehdr=" and
+  *    get kaslr_offset and phys_base from vmcoreinfo.
+  */
++#define PTI_USER_PGTABLE_BIT	PAGE_SHIFT
++#define PTI_USER_PGTABLE_MASK	(1 << PTI_USER_PGTABLE_BIT)
++#define CR3_PCID_MASK		0xFFFull
+ int
+ sadump_calc_kaslr_offset(ulong *kaslr_offset)
+ {
+ 	ulong phys_base = 0;
+ 	struct sadump_smram_cpu_state scs;
+-	uint64_t idtr = 0, cr3 = 0, idtr_paddr;
++	uint64_t idtr = 0, pgd = 0, idtr_paddr;
+ 	ulong divide_error_vmcore;
+ 	ulong kaslr_offset_kdump, phys_base_kdump;
+ 	int ret = FALSE;
+@@ -2039,7 +2042,10 @@ sadump_calc_kaslr_offset(ulong *kaslr_offset)
+ 
+ 	memset(&scs, 0, sizeof(scs));
+ 	get_sadump_smram_cpu_state_any(&scs);
+-	cr3 = scs.Cr3;
++	if (st->pti_init_vmlinux || st->kaiser_init_vmlinux)
++		pgd = scs.Cr3 & ~(CR3_PCID_MASK|PTI_USER_PGTABLE_MASK);
++	else
++		pgd = scs.Cr3 & ~CR3_PCID_MASK;
+ 	idtr = ((uint64_t)scs.IdtUpper)<<32 | (uint64_t)scs.IdtLower;
+ 
+ 	/*
+@@ -2050,12 +2056,12 @@ sadump_calc_kaslr_offset(ulong *kaslr_offset)
+ 	 *
+ 	 * TODO: XEN and 5-level is not supported
+ 	 */
+-	vt->kernel_pgd[0] = cr3;
++	vt->kernel_pgd[0] = pgd;
+ 	machdep->machspec->last_pml4_read = vt->kernel_pgd[0];
+ 	machdep->machspec->physical_mask_shift = __PHYSICAL_MASK_SHIFT_2_6;
+ 	machdep->machspec->pgdir_shift = PGDIR_SHIFT;
+-	if (!readmem(cr3, PHYSADDR, machdep->machspec->pml4, PAGESIZE(),
+-			"cr3", RETURN_ON_ERROR))
++	if (!readmem(pgd, PHYSADDR, machdep->machspec->pml4, PAGESIZE(),
++			"pgd", RETURN_ON_ERROR))
+ 		goto quit;
+ 
+ 	/* Convert virtual address of IDT table to physical address */
+@@ -2070,7 +2076,7 @@ sadump_calc_kaslr_offset(ulong *kaslr_offset)
+ 
+ 	if (CRASHDEBUG(1)) {
+ 		fprintf(fp, "calc_kaslr_offset: idtr=%lx\n", idtr);
+-		fprintf(fp, "calc_kaslr_offset: cr3=%lx\n", cr3);
++		fprintf(fp, "calc_kaslr_offset: pgd=%lx\n", pgd);
+ 		fprintf(fp, "calc_kaslr_offset: idtr(phys)=%lx\n", idtr_paddr);
+ 		fprintf(fp, "calc_kaslr_offset: divide_error(vmlinux): %lx\n",
+ 			st->divide_error_vmlinux);
+@@ -2084,9 +2090,12 @@ sadump_calc_kaslr_offset(ulong *kaslr_offset)
+ 	 * from vmcoreinfo
+ 	 */
+ 	if (get_kaslr_offset_from_vmcoreinfo(
+-		cr3, *kaslr_offset, &kaslr_offset_kdump, &phys_base_kdump)) {
++		*kaslr_offset, &kaslr_offset_kdump, &phys_base_kdump)) {
+ 		*kaslr_offset =  kaslr_offset_kdump;
+ 		phys_base =  phys_base_kdump;
++	} else if (CRASHDEBUG(1)) {
++		fprintf(fp, "sadump: failed to determine which kernel was running at crash,\n");
++		fprintf(fp, "sadump: asssuming the kdump 1st kernel.\n");
+ 	}
+ 
+ 	if (CRASHDEBUG(1)) {
+diff --git a/symbols.c b/symbols.c
+index 9a3763c..4db9af7 100644
+--- a/symbols.c
++++ b/symbols.c
+@@ -3071,10 +3071,14 @@ dump_symbol_table(void)
+ 		fprintf(fp, "divide_error_vmlinux: %lx\n", st->divide_error_vmlinux);
+ 		fprintf(fp, "   idt_table_vmlinux: %lx\n", st->idt_table_vmlinux);
+ 		fprintf(fp, "saved_command_line_vmlinux: %lx\n", st->saved_command_line_vmlinux);
++		fprintf(fp, "    pti_init_vmlinux: %lx\n", st->pti_init_vmlinux);
++		fprintf(fp, " kaiser_init_vmlinux: %lx\n", st->kaiser_init_vmlinux);
+ 	} else {
+ 		fprintf(fp, "divide_error_vmlinux: (unused)\n");
+ 		fprintf(fp, "   idt_table_vmlinux: (unused)\n");
+ 		fprintf(fp, "saved_command_line_vmlinux: (unused)\n");
++		fprintf(fp, "    pti_init_vmlinux: (unused)\n");
++		fprintf(fp, " kaiser_init_vmlinux: (unused)\n");
+ 	}
+ 
+         fprintf(fp, "    symval_hash[%d]: %lx\n", SYMVAL_HASH,
+@@ -12305,6 +12309,11 @@ numeric_forward(const void *P_x, const void *P_y)
+ 			st->saved_command_line_vmlinux = valueof(x);
+ 		else if (STREQ(y->name, "saved_command_line"))
+ 			st->saved_command_line_vmlinux = valueof(y);
++
++		if (STREQ(x->name, "pti_init"))
++			st->pti_init_vmlinux = valueof(x);
++		else if (STREQ(y->name, "kaiser_init"))
++			st->kaiser_init_vmlinux = valueof(y);
+ 	}
+ 
+   	xs = bfd_get_section(x);
diff --git a/SOURCES/github_494a796e_to_63419fb9.patch b/SOURCES/github_494a796e_to_63419fb9.patch
new file mode 100644
index 0000000..88a808a
--- /dev/null
+++ b/SOURCES/github_494a796e_to_63419fb9.patch
@@ -0,0 +1,1084 @@
+commit 494a796e112869cf5df482dc7618868eca7cf2d5
+Author: Dave Anderson <anderson@redhat.com>
+Date:   Tue Nov 28 09:24:39 2017 -0500
+
+    Fix to support Linux 4.15 and later kernels that contain kernel
+    commit e8cfbc245e24887e3c30235f71e9e9405e0cfc39, titled "pid: remove
+    pidhash".  The kernel's traditional usage of a pid_hash[] array to
+    store PIDs has been replaced by an IDR radix tree, requiring a new
+    crash plug-in function to gather the system's task set.  Without the
+    patch, the crash session fails during initialization with the error
+    message "crash: cannot resolve init_task_union".
+    (anderson@redhat.com)
+
+diff --git a/defs.h b/defs.h
+index 9132075..ba9abad 100644
+--- a/defs.h
++++ b/defs.h
+@@ -845,6 +845,8 @@ struct task_table {                      /* kernel/local task table data */
+ 	long anonpages;
+ 	ulong stack_end_magic;
+ 	ulong pf_kthread;
++	ulong pid_radix_tree;
++	int callbacks;
+ };
+ 
+ #define TASK_INIT_DONE       (0x1)
+@@ -864,6 +866,7 @@ struct task_table {                      /* kernel/local task table data */
+ #define ACTIVE_ONLY       (0x4000)
+ #define START_TIME_NSECS  (0x8000)
+ #define THREAD_INFO_IN_TASK (0x10000)
++#define PID_RADIX_TREE   (0x20000)
+ 
+ #define TASK_SLUSH (20)
+ 
+@@ -1996,6 +1999,8 @@ struct offset_table {                    /* stash of commonly-used offsets */
+ 	long mod_arch_specific_orc_unwind;
+ 	long task_struct_policy;
+ 	long kmem_cache_random;
++	long pid_namespace_idr;
++	long idr_idr_rt;
+ };
+ 
+ struct size_table {         /* stash of commonly-used sizes */
+@@ -2146,6 +2151,7 @@ struct size_table {         /* stash of commonly-used sizes */
+ 	long sk_buff_len;
+ 	long orc_entry;
+ 	long task_struct_policy;
++	long pid;
+ };
+ 
+ struct array_table {
+diff --git a/filesys.c b/filesys.c
+index f9a7797..1b44ad5 100644
+--- a/filesys.c
++++ b/filesys.c
+@@ -2083,38 +2083,6 @@ vfs_init(void)
+ 	if (!(ft->inode_cache = (char *)malloc(SIZE(inode)*INODE_CACHE)))
+ 		error(FATAL, "cannot malloc inode cache\n");
+ 
+-	if (symbol_exists("height_to_maxindex") ||
+-	    symbol_exists("height_to_maxnodes")) {
+-		int newver = symbol_exists("height_to_maxnodes");
+-		int tmp ATTRIBUTE_UNUSED;
+-		if (!newver) {
+-			if (LKCD_KERNTYPES())
+-				ARRAY_LENGTH_INIT_ALT(tmp, "height_to_maxindex",
+-					"radix_tree_preload.nodes", NULL, 0);
+-			else
+-				ARRAY_LENGTH_INIT(tmp, height_to_maxindex,
+-					"height_to_maxindex", NULL, 0);
+-		} else {
+-			if (LKCD_KERNTYPES())
+-				ARRAY_LENGTH_INIT_ALT(tmp, "height_to_maxnodes",
+-					"radix_tree_preload.nodes", NULL, 0);
+-			else
+-				ARRAY_LENGTH_INIT(tmp, height_to_maxnodes,
+-					"height_to_maxnodes", NULL, 0);
+-		}
+-		STRUCT_SIZE_INIT(radix_tree_root, "radix_tree_root");
+-		STRUCT_SIZE_INIT(radix_tree_node, "radix_tree_node");
+-		MEMBER_OFFSET_INIT(radix_tree_root_height, 
+-			"radix_tree_root","height");
+-		MEMBER_OFFSET_INIT(radix_tree_root_rnode, 
+-			"radix_tree_root","rnode");
+-		MEMBER_OFFSET_INIT(radix_tree_node_slots, 
+-			"radix_tree_node","slots");
+-		MEMBER_OFFSET_INIT(radix_tree_node_height, 
+-			"radix_tree_node","height");
+-		MEMBER_OFFSET_INIT(radix_tree_node_shift,
+-			"radix_tree_node","shift");
+-	}
+ 	MEMBER_OFFSET_INIT(rb_root_rb_node, 
+ 		"rb_root","rb_node");
+ 	MEMBER_OFFSET_INIT(rb_node_rb_left, 
+diff --git a/symbols.c b/symbols.c
+index 0d85ff7..2372887 100644
+--- a/symbols.c
++++ b/symbols.c
+@@ -8608,6 +8608,10 @@ dump_offset_table(char *spec, ulong makestruct)
+ 	fprintf(fp, "            mnt_namespace_list: %ld\n",
+ 		OFFSET(mnt_namespace_list));
+ 
++	fprintf(fp, "             pid_namespace_idr: %ld\n",
++		OFFSET(pid_namespace_idr));
++	fprintf(fp, "                    idr_idr_rt: %ld\n",
++		OFFSET(idr_idr_rt));
+         fprintf(fp, "                  pid_link_pid: %ld\n",
+                 OFFSET(pid_link_pid));
+         fprintf(fp, "                pid_hash_chain: %ld\n",
+@@ -10349,6 +10353,8 @@ dump_offset_table(char *spec, ulong makestruct)
+ 		SIZE(pid_link));
+ 	fprintf(fp, "                          upid: %ld\n", 
+ 		SIZE(upid));
++	fprintf(fp, "                           pid: %ld\n",
++		SIZE(pid));
+ 	fprintf(fp, "                  unwind_table: %ld\n", 
+ 		SIZE(unwind_table));
+ 	fprintf(fp, "                        rlimit: %ld\n", 
+diff --git a/task.c b/task.c
+index 724532d..b303ef7 100644
+--- a/task.c
++++ b/task.c
+@@ -30,6 +30,8 @@ static void refresh_hlist_task_table(void);
+ static void refresh_hlist_task_table_v2(void);
+ static void refresh_hlist_task_table_v3(void);
+ static void refresh_active_task_table(void);
++static int radix_tree_task_callback(ulong);
++static void refresh_radix_tree_task_table(void);
+ static struct task_context *store_context(struct task_context *, ulong, char *);
+ static void refresh_context(ulong, ulong);
+ static ulong parent_of(ulong);
+@@ -439,12 +441,55 @@ task_init(void)
+ 	    	((len = SIZE(thread_union)) != STACKSIZE())) 
+ 		machdep->stacksize = len;
+ 
++	MEMBER_OFFSET_INIT(pid_namespace_idr, "pid_namespace", "idr");
++	MEMBER_OFFSET_INIT(idr_idr_rt, "idr", "idr_rt");
++
++	if (symbol_exists("height_to_maxindex") ||
++	    symbol_exists("height_to_maxnodes")) {
++		int newver = symbol_exists("height_to_maxnodes");
++		int tmp ATTRIBUTE_UNUSED;
++		if (!newver) {
++			if (LKCD_KERNTYPES())
++				ARRAY_LENGTH_INIT_ALT(tmp, "height_to_maxindex",
++					"radix_tree_preload.nodes", NULL, 0);
++			else
++				ARRAY_LENGTH_INIT(tmp, height_to_maxindex,
++					"height_to_maxindex", NULL, 0);
++		} else {
++			if (LKCD_KERNTYPES())
++				ARRAY_LENGTH_INIT_ALT(tmp, "height_to_maxnodes",
++					"radix_tree_preload.nodes", NULL, 0);
++			else
++				ARRAY_LENGTH_INIT(tmp, height_to_maxnodes,
++					"height_to_maxnodes", NULL, 0);
++		}
++		STRUCT_SIZE_INIT(radix_tree_root, "radix_tree_root");
++		STRUCT_SIZE_INIT(radix_tree_node, "radix_tree_node");
++		MEMBER_OFFSET_INIT(radix_tree_root_height,
++			"radix_tree_root","height");
++		MEMBER_OFFSET_INIT(radix_tree_root_rnode,
++			"radix_tree_root","rnode");
++		MEMBER_OFFSET_INIT(radix_tree_node_slots,
++			"radix_tree_node","slots");
++		MEMBER_OFFSET_INIT(radix_tree_node_height,
++			"radix_tree_node","height");
++		MEMBER_OFFSET_INIT(radix_tree_node_shift,
++			"radix_tree_node","shift");
++	}
++
+ 	if (symbol_exists("pidhash") && symbol_exists("pid_hash") &&
+ 	    !symbol_exists("pidhash_shift"))
+ 		error(FATAL, 
+         "pidhash and pid_hash both exist -- cannot distinquish between them\n");
+ 
+-	if (symbol_exists("pid_hash") && symbol_exists("pidhash_shift")) {
++	if (VALID_MEMBER(pid_namespace_idr)) {
++		STRUCT_SIZE_INIT(pid, "pid");
++		tt->refresh_task_table = refresh_radix_tree_task_table;
++		tt->pid_radix_tree = symbol_value("init_pid_ns") +
++			OFFSET(pid_namespace_idr) + OFFSET(idr_idr_rt);
++		tt->flags |= PID_RADIX_TREE;
++
++	} else if (symbol_exists("pid_hash") && symbol_exists("pidhash_shift")) {
+ 		int pidhash_shift;
+ 
+ 	   	if (get_symbol_type("PIDTYPE_PID", NULL, &req) != 
+@@ -2271,6 +2316,233 @@ chain_next:
+ 	tt->retries = MAX(tt->retries, retries);
+ }
+ 
++/*
++ *  Linux 4.15: pid_hash[] replaced by IDR/radix_tree
++ */
++static int
++radix_tree_task_callback(ulong task)
++{
++	ulong *tlp;
++
++	if (tt->callbacks < tt->max_tasks) {
++		tlp = (ulong *)tt->task_local;
++		tlp += tt->callbacks++;
++		*tlp = task;
++	}
++
++	return TRUE;
++}
++
++static void
++refresh_radix_tree_task_table(void)
++{
++	int i, cnt;
++	ulong count, retries, next, curtask, curpid, upid_ns, pid_tasks_0, task;
++	ulong *tlp;
++	char *tp;
++	struct radix_tree_pair rtp;
++	struct task_context *tc;
++	char *pidbuf;
++
++	if (DUMPFILE() && (tt->flags & TASK_INIT_DONE))   /* impossible */
++		return;
++
++	if (DUMPFILE()) {                                 /* impossible */
++		please_wait("gathering task table data");
++		if (!symbol_exists("panic_threads"))
++			tt->flags |= POPULATE_PANIC;
++	}
++
++	if (ACTIVE() && !(tt->flags & TASK_REFRESH))
++		return;
++
++	curpid = NO_PID;
++	curtask = NO_TASK;
++
++	/*
++	 *  The current task's task_context entry may change,
++	 *  or the task may not even exist anymore.
++	 */
++	if (ACTIVE() && (tt->flags & TASK_INIT_DONE)) {
++		curtask = CURRENT_TASK();
++		curpid = CURRENT_PID();
++	}
++
++	count = do_radix_tree(tt->pid_radix_tree, RADIX_TREE_COUNT, NULL);
++	if (CRASHDEBUG(1))
++		console("do_radix_tree: count: %ld\n", count);
++
++	retries = 0;
++	pidbuf = GETBUF(SIZE(pid));
++
++retry_radix_tree:
++	if (retries && DUMPFILE())
++		error(FATAL,
++			"\ncannot gather a stable task list via radix tree\n");
++
++	if ((retries == MAX_UNLIMITED_TASK_RETRIES) &&
++	    !(tt->flags & TASK_INIT_DONE))
++		error(FATAL,
++		    "\ncannot gather a stable task list via radix tree (%d retries)\n",
++			retries);
++
++	if (count > tt->max_tasks) {
++		tt->max_tasks = count + TASK_SLUSH;
++		allocate_task_space(tt->max_tasks);
++	}
++
++	BZERO(tt->task_local, tt->max_tasks * sizeof(void *));
++	tt->callbacks = 0;
++	rtp.index = 0;
++	rtp.value = (void *)&radix_tree_task_callback;
++	count = do_radix_tree(tt->pid_radix_tree, RADIX_TREE_DUMP_CB, &rtp);
++	if (CRASHDEBUG(1))
++		console("do_radix_tree: count: %ld  tt->callbacks: %d\n", count, tt->callbacks);
++
++	if (count > tt->max_tasks) {
++		retries++;
++		goto retry_radix_tree;
++	}
++
++	if (!hq_open()) {
++		error(INFO, "cannot hash task_struct entries\n");
++		if (!(tt->flags & TASK_INIT_DONE))
++			clean_exit(1);
++		error(INFO, "using stale task_structs\n");
++		return;
++       }
++
++	/*
++	 *  Get the idle threads first.
++	 */
++	cnt = 0;
++	for (i = 0; i < kt->cpus; i++) {
++		if (!tt->idle_threads[i])
++			continue;
++		if (hq_enter(tt->idle_threads[i]))
++			cnt++;
++		else
++			error(WARNING, "%sduplicate idle tasks?\n",
++				DUMPFILE() ? "\n" : "");
++	}
++
++	for (i = 0; i < tt->max_tasks; i++) {
++		tlp = (ulong *)tt->task_local;
++		tlp += i;
++		if ((next = *tlp) == 0)
++			break;
++
++		/*
++		 *  Translate radix tree contents to PIDTYPE_PID task.
++		 *  - the radix tree contents are struct pid pointers
++		 *  - upid is contained in pid.numbers[0]
++		 *  - upid.ns should point to init->init_pid_ns
++		 *  - pid->tasks[0] is first hlist_node in task->pids[3]
++		 *  - get task from address of task->pids[0]
++		 */
++		if (!readmem(next, KVADDR, pidbuf,
++		    SIZE(pid), "pid", RETURN_ON_ERROR|QUIET)) {
++			error(INFO, "\ncannot read pid struct from radix tree\n");
++			if (DUMPFILE())
++				continue;
++			hq_close();
++			retries++;
++			goto retry_radix_tree;
++		}
++
++		upid_ns = ULONG(pidbuf + OFFSET(pid_numbers) + OFFSET(upid_ns));
++		if (upid_ns != tt->init_pid_ns)
++			continue;
++		pid_tasks_0 = ULONG(pidbuf + OFFSET(pid_tasks));
++		if (!pid_tasks_0)
++			continue;
++		task = pid_tasks_0 - OFFSET(task_struct_pids);
++
++		if (CRASHDEBUG(1))
++			console("pid: %lx  ns: %lx  tasks[0]: %lx task: %lx\n",
++				next, upid_ns, pid_tasks_0, task);
++
++		if (is_idle_thread(task))
++			continue;
++
++		if (!IS_TASK_ADDR(task)) {
++			error(INFO, "%s: IDR radix tree: invalid task address: %lx\n",
++				DUMPFILE() ? "\n" : "", task);
++			if (DUMPFILE())
++				break;
++			hq_close();
++			retries++;
++			goto retry_radix_tree;
++		}
++
++		if (!hq_enter(task)) {
++			error(INFO, "%s: IDR radix tree: duplicate task: %lx\n",
++				DUMPFILE() ? "\n" : "", task);
++			if (DUMPFILE())
++				break;
++			hq_close();
++			retries++;
++			goto retry_radix_tree;
++		}
++
++		cnt++;
++	}
++
++	BZERO(tt->task_local, tt->max_tasks * sizeof(void *));
++	cnt = retrieve_list((ulong *)tt->task_local, cnt);
++	hq_close();
++
++	clear_task_cache();
++
++        for (i = 0, tlp = (ulong *)tt->task_local,
++             tt->running_tasks = 0, tc = tt->context_array;
++             i < tt->max_tasks; i++, tlp++) {
++		if (!(*tlp))
++			continue;
++
++		if (!IS_TASK_ADDR(*tlp)) {
++			error(WARNING,
++		            "%sinvalid task address found in task list: %lx\n",
++				DUMPFILE() ? "\n" : "", *tlp);
++			if (DUMPFILE())
++				continue;
++			retries++;
++			goto retry_radix_tree;
++		}
++
++		if (task_exists(*tlp)) {
++			error(WARNING,
++		           "%sduplicate task address found in task list: %lx\n",
++				DUMPFILE() ? "\n" : "", *tlp);
++			if (DUMPFILE())
++				continue;
++			retries++;
++			goto retry_radix_tree;
++		}
++
++		if (!(tp = fill_task_struct(*tlp))) {
++			if (DUMPFILE())
++				continue;
++			retries++;
++			goto retry_radix_tree;
++		}
++
++		if (store_context(tc, *tlp, tp)) {
++			tc++;
++			tt->running_tasks++;
++		}
++	}
++
++	FREEBUF(pidbuf);
++
++	please_wait_done();
++
++	if (ACTIVE() && (tt->flags & TASK_INIT_DONE))
++		refresh_context(curtask, curpid);
++
++	tt->retries = MAX(tt->retries, retries);
++}
++
+ static void
+ refresh_active_task_table(void)
+ {
+@@ -7054,6 +7326,8 @@ dump_task_table(int verbose)
+                 fprintf(fp, "refresh_hlist_task_table_v3()\n");
+         else if (tt->refresh_task_table == refresh_active_task_table)
+                 fprintf(fp, "refresh_active_task_table()\n");
++        else if (tt->refresh_task_table == refresh_radix_tree_task_table)
++                fprintf(fp, "refresh_radix_tree_task_table()\n");
+ 	else
+ 		fprintf(fp, "%lx\n", (ulong)tt->refresh_task_table);
+ 
+@@ -7090,6 +7364,9 @@ dump_task_table(int verbose)
+         if (tt->flags & PID_HASH)
+                 sprintf(&buf[strlen(buf)], 
+ 			"%sPID_HASH", others++ ? "|" : "");
++	if (tt->flags & PID_RADIX_TREE)
++		sprintf(&buf[strlen(buf)],
++			"%sPID_RADIX_TREE", others++ ? "|" : "");
+         if (tt->flags & THREAD_INFO)
+                 sprintf(&buf[strlen(buf)], 
+ 			"%sTHREAD_INFO", others++ ? "|" : "");
+@@ -7122,6 +7399,8 @@ dump_task_table(int verbose)
+ 	fprintf(fp, "          task_end: %lx\n",  tt->task_end);
+ 	fprintf(fp, "        task_local: %lx\n",  (ulong)tt->task_local);
+ 	fprintf(fp, "         max_tasks: %d\n", tt->max_tasks);
++	fprintf(fp, "    pid_radix_tree: %lx\n", tt->pid_radix_tree);
++	fprintf(fp, "         callbacks: %d\n", tt->callbacks);
+ 	fprintf(fp, "        nr_threads: %d\n", tt->nr_threads);
+ 	fprintf(fp, "     running_tasks: %ld\n", tt->running_tasks);
+ 	fprintf(fp, "           retries: %ld\n", tt->retries);
+
+commit a94b86e9eb3faa963a7c20517574d230837a4292
+Author: Dave Anderson <anderson@redhat.com>
+Date:   Tue Nov 28 12:12:50 2017 -0500
+
+    Fix for the "net" command when the network device listing has an
+    unusually large number of IP addresses.  In that case, without the
+    patch, the command may generate a segmentation violation.
+    (k-hagio@ab.jp.nec.com)
+
+diff --git a/net.c b/net.c
+index bb86963..4199091 100644
+--- a/net.c
++++ b/net.c
+@@ -70,7 +70,7 @@ static void show_net_devices_v3(ulong);
+ static void print_neighbour_q(ulong, int);
+ static void get_netdev_info(ulong, struct devinfo *);
+ static void get_device_name(ulong, char *);
+-static void get_device_address(ulong, char *);
++static long get_device_address(ulong, char **, long);
+ static void get_sock_info(ulong, char *);
+ static void dump_arp(void);
+ static void arp_state_to_flags(unsigned char);
+@@ -441,7 +441,8 @@ show_net_devices(ulong task)
+ {
+ 	ulong next;
+ 	long flen;
+-	char buf[BUFSIZE];
++	char *buf;
++	long buflen = BUFSIZE;
+ 
+ 	if (symbol_exists("dev_base_head")) {
+ 		show_net_devices_v2(task);
+@@ -459,6 +460,7 @@ show_net_devices(ulong task)
+ 	if (!net->netdevice || !next)
+ 		return;
+ 
++	buf = GETBUF(buflen);
+ 	flen = MAX(VADDR_PRLEN, strlen(net->netdevice));
+ 
+ 	fprintf(fp, "%s  NAME   IP ADDRESS(ES)\n",
+@@ -472,12 +474,14 @@ show_net_devices(ulong task)
+ 		get_device_name(next, buf);
+ 		fprintf(fp, "%-6s ", buf);
+ 
+-		get_device_address(next, buf);
++		buflen = get_device_address(next, &buf, buflen);
+ 		fprintf(fp, "%s\n", buf);
+ 
+         	readmem(next+net->dev_next, KVADDR, &next, 
+ 			sizeof(void *), "(net_)device.next", FAULT_ON_ERROR);
+ 	} while (next);
++
++	FREEBUF(buf);
+ }
+ 
+ static void
+@@ -485,13 +489,15 @@ show_net_devices_v2(ulong task)
+ {
+ 	struct list_data list_data, *ld;
+ 	char *net_device_buf;
+-	char buf[BUFSIZE];
++	char *buf;
++	long buflen = BUFSIZE;
+ 	int ndevcnt, i;
+ 	long flen;
+ 
+ 	if (!net->netdevice) /* initialized in net_init() */
+ 		return;
+ 
++	buf = GETBUF(buflen);
+ 	flen = MAX(VADDR_PRLEN, strlen(net->netdevice));
+ 
+ 	fprintf(fp, "%s  NAME   IP ADDRESS(ES)\n",
+@@ -521,12 +527,13 @@ show_net_devices_v2(ulong task)
+ 		get_device_name(ld->list_ptr[i], buf);
+ 		fprintf(fp, "%-6s ", buf);
+ 
+-		get_device_address(ld->list_ptr[i], buf);
++		buflen = get_device_address(ld->list_ptr[i], &buf, buflen);
+ 		fprintf(fp, "%s\n", buf);
+ 	}
+ 	
+ 	FREEBUF(ld->list_ptr);
+ 	FREEBUF(net_device_buf);
++	FREEBUF(buf);
+ }
+ 
+ static void
+@@ -535,13 +542,15 @@ show_net_devices_v3(ulong task)
+ 	ulong nsproxy_p, net_ns_p;
+ 	struct list_data list_data, *ld;
+ 	char *net_device_buf;
+-	char buf[BUFSIZE];
++	char *buf;
++	long buflen = BUFSIZE;
+ 	int ndevcnt, i;
+ 	long flen;
+ 
+ 	if (!net->netdevice) /* initialized in net_init() */
+ 		return;
+ 
++	buf = GETBUF(buflen);
+ 	flen = MAX(VADDR_PRLEN, strlen(net->netdevice));
+ 
+ 	fprintf(fp, "%s  NAME   IP ADDRESS(ES)\n",
+@@ -581,12 +590,13 @@ show_net_devices_v3(ulong task)
+ 		get_device_name(ld->list_ptr[i], buf);
+ 		fprintf(fp, "%-6s ", buf);
+ 
+-		get_device_address(ld->list_ptr[i], buf);
++		buflen = get_device_address(ld->list_ptr[i], &buf, buflen);
+ 		fprintf(fp, "%s\n", buf);
+ 	}
+ 	
+ 	FREEBUF(ld->list_ptr);
+ 	FREEBUF(net_device_buf);
++	FREEBUF(buf);
+ }
+ 
+ /*
+@@ -869,19 +879,24 @@ get_device_name(ulong devaddr, char *buf)
+  *  in_ifaddr->ifa_next points to the next in_ifaddr in the list (if any).
+  * 
+  */
+-static void
+-get_device_address(ulong devaddr, char *buf)
++static long
++get_device_address(ulong devaddr, char **bufp, long buflen)
+ {
+ 	ulong ip_ptr, ifa_list;
+ 	struct in_addr ifa_address;
++	char *buf;
++	char buf2[BUFSIZE];
++	long pos = 0;
+ 
+-	BZERO(buf, BUFSIZE);
++	buf = *bufp;
++	BZERO(buf, buflen);
++	BZERO(buf2, BUFSIZE);
+ 
+         readmem(devaddr + net->dev_ip_ptr, KVADDR,
+         	&ip_ptr, sizeof(ulong), "ip_ptr", FAULT_ON_ERROR);
+ 
+ 	if (!ip_ptr)
+-		return;
++		return buflen;
+ 
+         readmem(ip_ptr + OFFSET(in_device_ifa_list), KVADDR,
+         	&ifa_list, sizeof(ulong), "ifa_list", FAULT_ON_ERROR);
+@@ -891,13 +906,20 @@ get_device_address(ulong devaddr, char *buf)
+         		&ifa_address, sizeof(struct in_addr), "ifa_address", 
+ 			FAULT_ON_ERROR);
+ 
+-		sprintf(&buf[strlen(buf)], "%s%s", 
+-			strlen(buf) ? ", " : "",
+-			inet_ntoa(ifa_address));
++		sprintf(buf2, "%s%s", pos ? ", " : "", inet_ntoa(ifa_address));
++		if (pos + strlen(buf2) >= buflen) {
++			RESIZEBUF(*bufp, buflen, buflen * 2);
++			buf = *bufp;
++			BZERO(buf + buflen, buflen);
++			buflen *= 2;
++		}
++		BCOPY(buf2, &buf[pos], strlen(buf2));
++		pos += strlen(buf2);
+ 
+         	readmem(ifa_list + OFFSET(in_ifaddr_ifa_next), KVADDR,
+         		&ifa_list, sizeof(ulong), "ifa_next", FAULT_ON_ERROR);
+ 	}
++	return buflen;
+ }
+ 
+ /*
+
+commit 264f22dafe9f37780c4113fd08e8d5b2138edbce
+Author: Dave Anderson <anderson@redhat.com>
+Date:   Wed Nov 29 15:28:41 2017 -0500
+
+    Fix for Linux 4.15 and later kernels that are configured with
+    CONFIG_SPARSEMEM_EXTREME, and that contain kernel commit
+    83e3c48729d9ebb7af5a31a504f3fd6aff0348c4, titled "mm/sparsemem:
+    Allocate mem_section at runtime for CONFIG_SPARSEMEM_EXTREME=y".
+    Without the patch, kernels configured with SPARSEMEM_EXTREME
+    have changed the data type of "mem_section" from an array to
+    a pointer, leading to errors in commands such as "kmem -p",
+    "kmem -n", "kmem -s", and any other command that translates a
+    physical address to its page struct address.
+    (anderson@redhat.com)
+
+diff --git a/memory.c b/memory.c
+index 7537c43..0df8ecc 100644
+--- a/memory.c
++++ b/memory.c
+@@ -16928,7 +16928,7 @@ sparse_mem_init(void)
+ {
+ 	ulong addr;
+ 	ulong mem_section_size;
+-	int len, dimension;
++	int len, dimension, mem_section_is_ptr;
+ 
+ 	if (!IS_SPARSEMEM())
+ 		return;
+@@ -16940,8 +16940,19 @@ sparse_mem_init(void)
+ 		error(FATAL, 
+ 		    "CONFIG_SPARSEMEM kernels not supported for this architecture\n");
+ 
++	/*
++	 *  The kernel's mem_section changed from array to pointer in this commit:
++	 *
++	 *   commit 83e3c48729d9ebb7af5a31a504f3fd6aff0348c4
++	 *   mm/sparsemem: Allocate mem_section at runtime for CONFIG_SPARSEMEM_EXTREME=y
++	 */
++	mem_section_is_ptr = 
++		get_symbol_type("mem_section", NULL, NULL) == TYPE_CODE_PTR ? 
++			TRUE : FALSE;
++
+ 	if (((len = get_array_length("mem_section", &dimension, 0)) ==
+-	    (NR_MEM_SECTIONS() / _SECTIONS_PER_ROOT_EXTREME())) || !dimension)
++	    (NR_MEM_SECTIONS() / _SECTIONS_PER_ROOT_EXTREME())) || 
++	    mem_section_is_ptr || !dimension)
+ 		vt->flags |= SPARSEMEM_EX;
+ 
+ 	if (IS_SPARSEMEM_EX()) {
+@@ -16960,7 +16971,7 @@ sparse_mem_init(void)
+ 		fprintf(fp, "SECTIONS_PER_ROOT = %ld\n", SECTIONS_PER_ROOT() );
+ 		fprintf(fp, "SECTION_ROOT_MASK = 0x%lx\n", SECTION_ROOT_MASK());
+ 		fprintf(fp, "PAGES_PER_SECTION = %ld\n", PAGES_PER_SECTION());
+-		if (IS_SPARSEMEM_EX() && !len)
++		if (!mem_section_is_ptr && IS_SPARSEMEM_EX() && !len)
+ 			error(WARNING, "SPARSEMEM_EX: questionable section values\n");
+ 	}
+ 
+@@ -16969,8 +16980,12 @@ sparse_mem_init(void)
+ 	if (!(vt->mem_section = (char *)malloc(SIZE(mem_section))))
+ 		error(FATAL, "cannot malloc mem_section cache\n");
+ 
+-	addr = symbol_value("mem_section");
+-	readmem(addr, KVADDR,vt->mem_sec ,mem_section_size,
++	if (mem_section_is_ptr)
++		get_symbol_data("mem_section", sizeof(void *), &addr);
++	else
++		addr = symbol_value("mem_section");
++
++	readmem(addr, KVADDR, vt->mem_sec, mem_section_size,
+ 		"memory section root table", FAULT_ON_ERROR);
+ }
+ 
+
+commit ed2abb47be9846be7a47d769c420ee3992cc0196
+Author: Dave Anderson <anderson@redhat.com>
+Date:   Wed Dec 13 11:18:05 2017 -0500
+
+    With the latest PPC64 NMI IPI changes, crash_ipi_callback is found
+    multiple times on the stack of active non-panic tasks.  Ensure that
+    the symbol reference relates to an actual backtrace stack frame.
+    (hbathini@linux.vnet.ibm.com)
+
+diff --git a/ppc64.c b/ppc64.c
+index 672ee60..0b04187 100644
+--- a/ppc64.c
++++ b/ppc64.c
+@@ -2337,6 +2337,14 @@ retry:
+                         *nip = *up;
+                         *ksp = bt->stackbase + 
+ 				((char *)(up) - 16 - bt->stackbuf);
++			/*
++			 * Check whether this symbol relates to a
++			 * backtrace or not
++			 */
++			ur_ksp =  *(ulong *)&bt->stackbuf[(*ksp) - bt->stackbase];
++			if (!INSTACK(ur_ksp, bt))
++				continue;
++
+                         return TRUE;
+                 }
+ 	}
+
+commit b6c0fc74fa58d48a0b6801de790e86db1130d22f
+Author: Dave Anderson <anderson@redhat.com>
+Date:   Tue Jan 2 10:07:18 2018 -0500
+
+    Update the starting virtual address of vmalloc space for kernels
+    configured with CONFIG_X86_5LEVEL.
+    (douly.fnst@cn.fujitsu.com)
+
+diff --git a/defs.h b/defs.h
+index ba9abad..4cd07b8 100644
+--- a/defs.h
++++ b/defs.h
+@@ -3316,7 +3316,7 @@ struct arm64_stackframe {
+ 
+ #define USERSPACE_TOP_5LEVEL       0x0100000000000000
+ #define PAGE_OFFSET_5LEVEL         0xff10000000000000
+-#define VMALLOC_START_ADDR_5LEVEL  0xff92000000000000
++#define VMALLOC_START_ADDR_5LEVEL  0xffa0000000000000
+ #define VMALLOC_END_5LEVEL         0xffd1ffffffffffff
+ #define MODULES_VADDR_5LEVEL       0xffffffffa0000000
+ #define MODULES_END_5LEVEL         0xffffffffff5fffff
+
+commit cff3f2076ab52b9d6bae2b516fe3de32cccdb830
+Author: Dave Anderson <anderson@redhat.com>
+Date:   Tue Jan 2 10:29:54 2018 -0500
+
+    Update the X86_64 VSYSCALL_END address to reflect that it only
+    contains 1 page.
+    (douly.fnst@cn.fujitsu.com)
+
+diff --git a/defs.h b/defs.h
+index 4cd07b8..97738a6 100644
+--- a/defs.h
++++ b/defs.h
+@@ -3324,7 +3324,7 @@ struct arm64_stackframe {
+ #define VMEMMAP_END_5LEVEL         0xffd5ffffffffffff
+ 
+ #define VSYSCALL_START             0xffffffffff600000
+-#define VSYSCALL_END               0xffffffffffe00000
++#define VSYSCALL_END               0xffffffffff601000
+ 
+ #define PTOV(X)               ((unsigned long)(X)+(machdep->kvbase))
+ #define VTOP(X)               x86_64_VTOP((ulong)(X))
+
+commit 3fe2663be5c6a5b160025a5a65b655a570e7e79e
+Author: Dave Anderson <anderson@redhat.com>
+Date:   Thu Jan 4 12:54:19 2018 -0500
+
+    Prevent the X86_64 FILL_PML() macro from updating the internal
+    machdep->machspec->last_pml4_read address every time a vmalloc'd
+    kernel virtual address is translated.
+    (douly.fnst@cn.fujitsu.com)
+
+diff --git a/defs.h b/defs.h
+index 97738a6..9a33b41 100644
+--- a/defs.h
++++ b/defs.h
+@@ -3344,7 +3344,7 @@ struct arm64_stackframe {
+ #define PTRS_PER_P4D         512
+ 
+ #define __PGDIR_SHIFT  (machdep->machspec->pgdir_shift)
+- 
++
+ #define pml4_index(address) (((address) >> PML4_SHIFT) & (PTRS_PER_PML4-1))
+ #define p4d_index(address)  (((address) >> P4D_SHIFT) & (PTRS_PER_P4D - 1))
+ #define pgd_index(address)  (((address) >> __PGDIR_SHIFT) & (PTRS_PER_PGD-1))
+@@ -3353,26 +3353,24 @@ struct arm64_stackframe {
+ 
+ #define IS_LAST_PML4_READ(pml4) ((ulong)(pml4) == machdep->machspec->last_pml4_read)
+ 
+-#define FILL_PML4() { \
+-	if (!(pc->flags & RUNTIME) || ACTIVE()) { \
+-		if (!IS_LAST_PML4_READ(vt->kernel_pgd[0])) \
+-                    readmem(vt->kernel_pgd[0], KVADDR, machdep->machspec->pml4, \
+-                        PAGESIZE(), "init_level4_pgt", FAULT_ON_ERROR); \
+-                machdep->machspec->last_pml4_read = (ulong)(vt->kernel_pgd[0]); \
+-	} \
+-}
++#define FILL_PML4() 									\
++	if (!(pc->flags & RUNTIME) || ACTIVE()) { 					\
++		if (!IS_LAST_PML4_READ(vt->kernel_pgd[0])) { 				\
++			readmem(vt->kernel_pgd[0], KVADDR, machdep->machspec->pml4, 	\
++					PAGESIZE(), "init_level4_pgt", FAULT_ON_ERROR); \
++			machdep->machspec->last_pml4_read = (ulong)(vt->kernel_pgd[0]); \
++		} 									\
++	}
+ 
+-#define FILL_PML4_HYPER() { \
+-	if (!machdep->machspec->last_pml4_read) { \
+-		unsigned long idle_pg_table = \
+-		    symbol_exists("idle_pg_table_4") ? symbol_value("idle_pg_table_4") : \
+-			symbol_value("idle_pg_table"); \
+-		readmem(idle_pg_table, KVADDR, \
+-			machdep->machspec->pml4, PAGESIZE(), "idle_pg_table", \
+-			FAULT_ON_ERROR); \
+-		machdep->machspec->last_pml4_read = idle_pg_table; \
+-	}\
+-}
++#define FILL_PML4_HYPER() 								\
++	if (!machdep->machspec->last_pml4_read) { 					\
++		unsigned long idle_pg_table = symbol_exists("idle_pg_table_4") ? 	\
++						symbol_value("idle_pg_table_4") : 	\
++						symbol_value("idle_pg_table"); 	\
++		readmem(idle_pg_table, KVADDR, machdep->machspec->pml4, PAGESIZE(), 	\
++				"idle_pg_table", FAULT_ON_ERROR); 			\
++		machdep->machspec->last_pml4_read = idle_pg_table; 			\
++	}
+ 
+ #define IS_LAST_UPML_READ(pml) ((ulong)(pml) == machdep->machspec->last_upml_read)
+ 
+
+commit 63419fb9a535732082ae7b542ebb2399e6a3ccc9
+Author: Dave Anderson <anderson@redhat.com>
+Date:   Wed Jan 10 14:11:27 2018 -0500
+
+    Fix for the "bt" command in x86_64 kernels that contain, or have
+    backports of, kernel commit 4950d6d48a0c43cc61d0bbb76fb10e0214b79c66,
+    titled "x86/dumpstack: Remove 64-byte gap at end of irq stack".
+    Without the patch, backtraces fail to transition from the IRQ stack
+    back to the process stack, showing an error message such as
+    "bt: cannot transition exception stack to IRQ stack to current
+    process stack".
+    (anderson@redhat.com)
+
+diff --git a/crash.8 b/crash.8
+index 4b53f44..33af024 100644
+--- a/crash.8
++++ b/crash.8
+@@ -258,6 +258,7 @@ required in very rare circumstances:
+ X86_64:
+   phys_base=<physical-address>
+   irq_eframe_link=<value>
++  irq_stack_gap=<value>
+   max_physmem_bits=<value>
+   kernel_image_size=<value>
+   vm=orig       (pre-2.6.11 virtual memory address ranges)
+diff --git a/defs.h b/defs.h
+index 9a33b41..dcd6c26 100644
+--- a/defs.h
++++ b/defs.h
+@@ -1,8 +1,8 @@
+ /* defs.h - core analysis suite
+  *
+  * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc.
+- * Copyright (C) 2002-2017 David Anderson
+- * Copyright (C) 2002-2017 Red Hat, Inc. All rights reserved.
++ * Copyright (C) 2002-2018 David Anderson
++ * Copyright (C) 2002-2018 Red Hat, Inc. All rights reserved.
+  * Copyright (C) 2002 Silicon Graphics, Inc.
+  *
+  * This program is free software; you can redistribute it and/or modify
+@@ -5768,6 +5768,7 @@ struct machine_specific {
+         char *p4d;
+ 	ulong last_p4d_read;
+ 	struct ORC_data orc;
++	ulong irq_stack_gap;
+ };
+ 
+ #define KSYMS_START    (0x1)
+diff --git a/help.c b/help.c
+index e017b03..5b04b09 100644
+--- a/help.c
++++ b/help.c
+@@ -1,8 +1,8 @@
+ /* help.c - core analysis suite
+  *
+  * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc.
+- * Copyright (C) 2002-2017 David Anderson
+- * Copyright (C) 2002-2017 Red Hat, Inc. All rights reserved.
++ * Copyright (C) 2002-2018 David Anderson
++ * Copyright (C) 2002-2018 Red Hat, Inc. All rights reserved.
+  *
+  * This program is free software; you can redistribute it and/or modify
+  * it under the terms of the GNU General Public License as published by
+@@ -159,6 +159,7 @@ char *program_usage_info[] = {
+     "    X86_64:",
+     "      phys_base=<physical-address>",
+     "      irq_eframe_link=<value>",
++    "      irq_stack_gap=<value>",
+     "      max_physmem_bits=<value>",
+     "      kernel_image_size=<value>",
+     "      vm=orig       (pre-2.6.11 virtual memory address ranges)",
+diff --git a/x86_64.c b/x86_64.c
+index 7d01140..d8fade4 100644
+--- a/x86_64.c
++++ b/x86_64.c
+@@ -1,7 +1,7 @@
+ /* x86_64.c -- core analysis suite
+  *
+- * Copyright (C) 2004-2017 David Anderson
+- * Copyright (C) 2004-2017 Red Hat, Inc. All rights reserved.
++ * Copyright (C) 2004-2018 David Anderson
++ * Copyright (C) 2004-2018 Red Hat, Inc. All rights reserved.
+  *
+  * This program is free software; you can redistribute it and/or modify
+  * it under the terms of the GNU General Public License as published by
+@@ -83,6 +83,7 @@ static void x86_64_init_kernel_pgd(void);
+ static void x86_64_cpu_pda_init(void);
+ static void x86_64_per_cpu_init(void);
+ static void x86_64_ist_init(void);
++static void x86_64_irq_stack_gap_init(void);
+ static void x86_64_post_init(void);
+ static void parse_cmdline_args(void);
+ static void x86_64_clear_machdep_cache(void);
+@@ -181,6 +182,7 @@ x86_64_init(int when)
+ 		machdep->flags |= MACHDEP_BT_TEXT;
+ 		machdep->flags |= FRAMESIZE_DEBUG;
+ 		machdep->machspec->irq_eframe_link = UNINITIALIZED;
++		machdep->machspec->irq_stack_gap = UNINITIALIZED;
+ 		machdep->get_kvaddr_ranges = x86_64_get_kvaddr_ranges;
+                 if (machdep->cmdline_args[0])
+                         parse_cmdline_args();
+@@ -638,6 +640,7 @@ x86_64_init(int when)
+ 				MEMBER_OFFSET("cpu_user_regs", "cs") - sizeof(ulong);
+                 }
+ 		x86_64_irq_eframe_link_init();
++		x86_64_irq_stack_gap_init();
+ 		x86_64_framepointer_init();
+ 		x86_64_ORC_init();
+ 		x86_64_thread_return_init();
+@@ -857,8 +860,6 @@ x86_64_dump_machdep_table(ulong arg)
+ 		fprintf(fp, "            last_p4d_read: (unused)\n");
+ 	}
+ 
+-	fprintf(fp, "                 irqstack: %lx\n", (ulong)ms->irqstack);
+-	fprintf(fp, "          irq_eframe_link: %ld\n", ms->irq_eframe_link);
+ 	fprintf(fp, "                 ORC_data: %s", machdep->flags & ORC ? "\n" : "(unused)\n");
+ 	if (machdep->flags & ORC) {
+ 		fprintf(fp, "                    module_ORC: %s\n", ms->orc.module_ORC ? "TRUE" : "FALSE");
+@@ -931,6 +932,9 @@ x86_64_dump_machdep_table(ulong arg)
+ 	fprintf(fp, "            thread_return: %lx\n", ms->thread_return); 
+ 	fprintf(fp, "            page_protnone: %lx\n", ms->page_protnone); 
+ 
++	fprintf(fp, "                 irqstack: %lx\n", (ulong)ms->irqstack);
++	fprintf(fp, "          irq_eframe_link: %ld\n", ms->irq_eframe_link);
++	fprintf(fp, "            irq_stack_gap: %ld\n", ms->irq_stack_gap);
+ 	fprintf(fp, "                  stkinfo: isize: %d\n", 
+ 		ms->stkinfo.isize);
+ 	fprintf(fp, "                           esize[%d]: %d,%d,%d,%d,%d,%d,%d%s\n", 
+@@ -1338,6 +1342,71 @@ x86_64_ist_init(void)
+ 	}
+ }
+ 
++/*
++ *  Determine whether the unused gap at the top of the IRQ stack exists,
++ *  and store its size (either 0 or 64 bytes).
++ */
++static void 
++x86_64_irq_stack_gap_init(void)
++{
++	int c, cpus;
++	struct syment *sp;
++	ulong irq_stack_ptr;
++	struct machine_specific *ms = machdep->machspec;
++	
++	if (ms->irq_stack_gap != UNINITIALIZED)
++		return;
++
++	if (THIS_KERNEL_VERSION >= LINUX(4,9,0)) {
++		ms->irq_stack_gap = 0;
++		return;
++	}
++
++	ms->irq_stack_gap = 64;
++
++	/*
++	 *  Check for backports of this commit:
++	 *
++	 *    commit 4950d6d48a0c43cc61d0bbb76fb10e0214b79c66
++	 *    Author: Josh Poimboeuf <jpoimboe@redhat.com>
++	 *    Date:   Thu Aug 18 10:59:08 2016 -0500
++	 *
++	 *        x86/dumpstack: Remove 64-byte gap at end of irq stack
++	 */
++
++	if (!(sp = per_cpu_symbol_search("per_cpu__irq_stack_ptr")))
++		return;
++
++	/*
++	 *  CONFIG_SMP=n
++	 */
++	if (!(kt->flags & PER_CPU_OFF)) {
++		get_symbol_data(sp->name, sizeof(ulong), &irq_stack_ptr);
++		if ((irq_stack_ptr & 0xfff) == 0)
++			ms->irq_stack_gap = 0;
++		return;
++	}
++
++	/*
++	 *  Check the per-cpu irq_stack_ptr of the first possible cpu.
++	 */
++	if (!cpu_map_addr("possible"))
++		return;
++
++	cpus = kt->kernel_NR_CPUS ? kt->kernel_NR_CPUS : NR_CPUS;
++	for (c = 0; c < cpus; c++) {
++		if (!in_cpu_map(POSSIBLE, c))
++			continue;
++		if (readmem(sp->value + kt->__per_cpu_offset[c],
++		    KVADDR, &irq_stack_ptr, sizeof(void *), "irq_stack_ptr",
++		    QUIET|RETURN_ON_ERROR)) {
++			if ((irq_stack_ptr & 0xfff) == 0)
++				ms->irq_stack_gap = 0;
++			break;
++		}
++	}
++}
++
+ static void 
+ x86_64_post_init(void)
+ { 
+@@ -3352,7 +3421,7 @@ in_exception_stack:
+                     	error(FATAL, "read of IRQ stack at %lx failed\n",
+ 				bt->stackbase);
+ 
+-		stacktop = bt->stacktop - 64; /* from kernel code */
++		stacktop = bt->stacktop - ms->irq_stack_gap; 
+ 
+ 		bt->flags &= ~BT_FRAMESIZE_DISABLE;
+ 
+@@ -3829,7 +3898,7 @@ in_exception_stack:
+                     	error(FATAL, "read of IRQ stack at %lx failed\n",
+ 				bt->stackbase);
+ 
+-		stacktop = bt->stacktop - 64; /* from kernel code */
++		stacktop = bt->stacktop - ms->irq_stack_gap;
+ 
+ 		if (!done) {
+ 			level = dwarf_backtrace(bt, level, stacktop);
+@@ -5575,6 +5644,10 @@ x86_64_compiler_warning_stub(void)
+  *
+  *   --machdep irq_eframe_link=<offset>
+  *
++ *  Force the IRQ stack gap size via:
++ *
++ *   --machdep irq_stack_gap=<size>
++ *
+  *  Force max_physmem_bits via:
+  *
+  *   --machdep max_physmem_bits=<count>
+@@ -5704,6 +5777,15 @@ parse_cmdline_args(void)
+ 						continue;
+ 					}
+ 				}
++	                } else if (STRNEQ(arglist[i], "irq_stack_gap=")) {
++	                        p = arglist[i] + strlen("irq_stack_gap=");
++				if (strlen(p)) {
++					value = stol(p, RETURN_ON_ERROR|QUIET, &errflag);
++					if (!errflag) {
++						machdep->machspec->irq_stack_gap = value;
++						continue;
++					}
++				}
+ 			} else if (STRNEQ(arglist[i], "max_physmem_bits=")) {
+ 	                        p = arglist[i] + strlen("max_physmem_bits=");
+ 				if (strlen(p)) {
diff --git a/SOURCES/github_87179026_to_ad3b8476.patch b/SOURCES/github_87179026_to_ad3b8476.patch
deleted file mode 100644
index ed80966..0000000
--- a/SOURCES/github_87179026_to_ad3b8476.patch
+++ /dev/null
@@ -1,274 +0,0 @@
-commit 8717902685706faf48d2c27eb943822ae8829ccc
-Author: Dave Anderson <anderson@redhat.com>
-Date:   Mon May 1 15:14:36 2017 -0400
-
-    Fix for the "snap.so" extension module to pass the KASLR relocation
-    offset value in the dumpfile header for kernels that are compiled
-    with CONFIG_RANDOMIZE_BASE.  Without the patch, it is necessary to
-    use the "--kaslr=<offset>" command line option, or the session
-    fails with the message "WARNING: cannot read linux_banner string",
-    followed by "crash: vmlinux and vmcore do not match!".
-    (anderson@redhat.com)
-
-diff --git a/extensions/snap.c b/extensions/snap.c
-index 91af859..7c94618 100644
---- a/extensions/snap.c
-+++ b/extensions/snap.c
-@@ -1,7 +1,7 @@
- /* snap.c - capture live memory into a kdump or netdump dumpfile
-  *
-- * Copyright (C) 2009, 2013 David Anderson
-- * Copyright (C) 2009, 2013 Red Hat, Inc. All rights reserved.
-+ * Copyright (C) 2009, 2013, 2014, 2017 David Anderson
-+ * Copyright (C) 2009, 2013, 2014, 2017 Red Hat, Inc. All rights reserved.
-  *
-  * This program is free software; you can redistribute it and/or modify
-  * it under the terms of the GNU General Public License as published by
-@@ -423,7 +423,10 @@ generate_elf_header(int type, int fd, char *filename)
- 	ushort e_machine;
- 	int num_segments;
- 	struct node_table *nt;
--	ulonglong task_struct;
-+	struct SNAP_info {
-+		ulonglong task_struct;
-+		ulonglong relocate;
-+	} SNAP_info;
- 
- 	num_segments = vt->numnodes;
- 
-@@ -606,9 +609,10 @@ generate_elf_header(int type, int fd, char *filename)
- 	notes->p_filesz += len;
- 
-   	/* NT_TASKSTRUCT note */
--	task_struct = CURRENT_TASK();
-+	SNAP_info.task_struct = CURRENT_TASK();
-+	SNAP_info.relocate = kt->relocate;
- 	len = dump_elf_note (ptr, NT_TASKSTRUCT, "SNAP",
--		(char *)&task_struct, sizeof(ulonglong));
-+		(char *)&SNAP_info, sizeof(struct SNAP_info));
- 	offset += len;
- 	ptr += len;
- 	notes->p_filesz += len;
-diff --git a/netdump.c b/netdump.c
-index 409bc43..0772e02 100644
---- a/netdump.c
-+++ b/netdump.c
-@@ -1172,8 +1172,9 @@ netdump_memory_dump(FILE *fp)
- 	netdump_print("            nt_prpsinfo: %lx\n", nd->nt_prpsinfo);
- 	netdump_print("          nt_taskstruct: %lx\n", nd->nt_taskstruct);
- 	netdump_print("            task_struct: %lx\n", nd->task_struct);
--	netdump_print("              page_size: %d\n", nd->page_size);
-+	netdump_print("               relocate: %lx\n", nd->relocate);
- 	netdump_print("           switch_stack: %lx\n", nd->switch_stack);
-+	netdump_print("              page_size: %d\n", nd->page_size);
- 	dump_xen_kdump_data(fp);
- 	netdump_print("     num_prstatus_notes: %d\n", nd->num_prstatus_notes);
- 	netdump_print("         num_qemu_notes: %d\n", nd->num_qemu_notes);
-@@ -1912,8 +1913,6 @@ dump_Elf32_Nhdr(Elf32_Off offset, int store)
- 		if (store) {
- 			nd->nt_taskstruct = (void *)note;
- 			nd->task_struct = *((ulong *)(ptr + note->n_namesz));
--			nd->switch_stack = *((ulong *)
--				(ptr + note->n_namesz + sizeof(ulong)));
- 		}
- 		break;
-         case NT_DISKDUMP:
-@@ -2160,8 +2159,19 @@ dump_Elf64_Nhdr(Elf64_Off offset, int store)
- 		if (store) {
- 			nd->nt_taskstruct = (void *)note;
- 			nd->task_struct = *((ulong *)(ptr + note->n_namesz));
--                        nd->switch_stack = *((ulong *)
--                                (ptr + note->n_namesz + sizeof(ulong)));
-+			if (pc->flags2 & SNAP) {
-+				if (note->n_descsz == 16) {
-+					nd->relocate = *((ulong *)
-+						(ptr + note->n_namesz + sizeof(ulong)));
-+					if (nd->relocate) {
-+						kt->relocate = nd->relocate;
-+						kt->flags |= RELOC_SET;
-+						kt->flags2 |= KASLR;
-+					}
-+				}
-+			} else if (machine_type("IA64"))
-+				nd->switch_stack = *((ulong *)
-+					(ptr + note->n_namesz + sizeof(ulong)));
- 		}
- 		break;
-         case NT_DISKDUMP:
-diff --git a/netdump.h b/netdump.h
-index b63eed7..ec6691c 100644
---- a/netdump.h
-+++ b/netdump.h
-@@ -1,7 +1,7 @@
- /* netdump.h
-  *
-- * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 David Anderson
-- * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 Red Hat, Inc. All rights reserved.
-+ * Copyright (C) 2002-2009, 2017 David Anderson
-+ * Copyright (C) 2002-2009, 2017 Red Hat, Inc. All rights reserved.
-  *
-  * This program is free software; you can redistribute it and/or modify
-  * it under the terms of the GNU General Public License as published by
-@@ -77,6 +77,7 @@ struct vmcore_data {
- 	ulonglong backup_src_start;
- 	ulong backup_src_size;
- 	ulonglong backup_offset;
-+	ulong relocate;
- };
- 
- #define DUMP_ELF_INCOMPLETE  0x1   /* dumpfile is incomplete */
-
-commit c85a70ba752ac31e729a753a03b836dc5591714b
-Author: Dave Anderson <anderson@redhat.com>
-Date:   Mon May 1 15:40:21 2017 -0400
-
-    The native gdb "disassemble" command fails if the kernel has been
-    compiled with CONFIG_RANDOMIZE_BASE because the embedded gdb module
-    still operates under the assumption that the (non-relocated) text
-    locations in the vmlinux file are correct.  The error message that
-    is issued is somewhat confusing, indicating "No function contains
-    specified address".  This patch simply clarifies the error message
-    to indicate "crash: the gdb "disassemble" command is prohibited
-    because the kernel text was relocated by KASLR; use the crash "dis"
-    command instead."
-    (anderson@redhat.com)
-
-diff --git a/gdb_interface.c b/gdb_interface.c
-index 2f7f30d..873787b 100644
---- a/gdb_interface.c
-+++ b/gdb_interface.c
-@@ -737,6 +737,13 @@ is_restricted_command(char *cmd, ulong flags)
- 				newline, newline, pc->program_name);
- 		}
- 	}
-+
-+	if (kt->relocate && 
-+	    STRNEQ("disassemble", cmd) && STRNEQ(cmd, "disas"))
-+               	error(FATAL, 
-+		    "the gdb \"disassemble\" command is prohibited because the kernel text\n"
-+		    "%swas relocated%s; use the crash \"dis\" command instead.\n",
-+			space(strlen(pc->curcmd)+2), kt->flags2 & KASLR ? " by KASLR" : "");
- 	
- 	return FALSE;
- }
-
-commit 14cbcd58c14cbb34912ebce75c99e8bf35d39aef
-Author: Dave Anderson <anderson@redhat.com>
-Date:   Tue May 2 15:45:23 2017 -0400
-
-    Fix for the "mach -m" command in Linux 4.9 and later kernels that
-    contain commit 475339684ef19e46f4702e2d185a869a5c454688, titled
-    "x86/e820: Prepare e280 code for switch to dynamic storage", in
-    which the "e820" symbol was changed from a static e820map structure
-    to a pointer to an e820map structure.  Without the patch, the
-    command either displays just the header, or the header with several
-    nonsensical entries.
-    (anderson@redhat.com)
-
-diff --git a/x86_64.c b/x86_64.c
-index fbef125..74a0268 100644
---- a/x86_64.c
-+++ b/x86_64.c
-@@ -5332,7 +5332,10 @@ x86_64_display_memmap(void)
-         ulonglong addr, size;
-         uint type;
- 
--        e820 = symbol_value("e820");
-+	if (get_symbol_type("e820", NULL, NULL) == TYPE_CODE_PTR)
-+		get_symbol_data("e820", sizeof(void *), &e820);
-+	else
-+		e820 = symbol_value("e820");
- 	if (CRASHDEBUG(1))
- 		dump_struct("e820map", e820, RADIX(16));
-         buf = (char *)GETBUF(SIZE(e820map));
-
-commit a4a538caca140a8e948bbdae2be311168db7a1eb
-Author: Dave Anderson <anderson@redhat.com>
-Date:   Tue May 2 16:51:53 2017 -0400
-
-    Fix for Linux 4.10 and later kdump dumpfiles, or kernels that have
-    backported commit 401721ecd1dcb0a428aa5d6832ee05ffbdbffbbe, titled
-    "kexec: export the value of phys_base instead of symbol address".
-    Without the patch, if the x86_64 "phys_base" value in the VMCOREINFO
-    note is a negative negative decimal number, the crash session fails
-    during session intialization with a "page excluded" or "seek error"
-    when reading "page_offset_base".
-    (anderson@redhat.com)
-
-diff --git a/x86_64.c b/x86_64.c
-index 74a0268..04364f9 100644
---- a/x86_64.c
-+++ b/x86_64.c
-@@ -6219,11 +6219,14 @@ x86_64_calc_phys_base(void)
- 	 * Linux 4.10 exports it in VMCOREINFO (finally).
- 	 */
- 	if ((p1 = pc->read_vmcoreinfo("NUMBER(phys_base)"))) {
--		machdep->machspec->phys_base = dtol(p1, QUIET, NULL);
--		free(p1);
-+		if (*p1 == '-')
-+			machdep->machspec->phys_base = dtol(p1+1, QUIET, NULL) * -1;
-+		else
-+			machdep->machspec->phys_base = dtol(p1, QUIET, NULL);
- 		if (CRASHDEBUG(1))
--			fprintf(fp, "VMCOREINFO: phys_base: %lx\n", 
--				machdep->machspec->phys_base);
-+			fprintf(fp, "VMCOREINFO: NUMBER(phys_base): %s -> %lx\n", 
-+				p1, machdep->machspec->phys_base);
-+		free(p1);
- 		return;
- 	}
- 
-
-commit ad3b84766beefedcfaa191dfd597f136f660a5b6
-Author: Dave Anderson <anderson@redhat.com>
-Date:   Wed May 3 10:29:37 2017 -0400
-
-    Fix for the PPC64 "pte" command.  Without the patch, if the target
-    PTE references a present page, the physical address is incorrect.
-    (anderson@redhat.com)
-
-diff --git a/ppc64.c b/ppc64.c
-index 15025d5..84cec09 100644
---- a/ppc64.c
-+++ b/ppc64.c
-@@ -1,7 +1,7 @@
- /* ppc64.c -- core analysis suite
-  *
-- * Copyright (C) 2004-2015 David Anderson
-- * Copyright (C) 2004-2015 Red Hat, Inc. All rights reserved.
-+ * Copyright (C) 2004-2015,2017 David Anderson
-+ * Copyright (C) 2004-2015,2017 Red Hat, Inc. All rights reserved.
-  * Copyright (C) 2004, 2006 Haren Myneni, IBM Corporation
-  *
-  * This program is free software; you can redistribute it and/or modify
-@@ -1507,6 +1507,8 @@ ppc64_translate_pte(ulong pte, void *physaddr, ulonglong pte_rpn_shift)
-         char *arglist[MAXARGS];
-         ulong paddr;
- 
-+	if (STREQ(pc->curcmd, "pte"))
-+		pte_rpn_shift = machdep->machspec->pte_rpn_shift;
-         paddr =  PTOB(pte >> pte_rpn_shift);
-         page_present = !!(pte & _PAGE_PRESENT);
- 
-@@ -1517,12 +1519,12 @@ ppc64_translate_pte(ulong pte, void *physaddr, ulonglong pte_rpn_shift)
- 
-         sprintf(ptebuf, "%lx", pte);
-         len1 = MAX(strlen(ptebuf), strlen("PTE"));
--        fprintf(fp, "%s  ", mkstring(buf, len1, CENTER|LJUST, "PTE"));
- 
-         if (!page_present && pte) {
-                 swap_location(pte, buf);
-                 if ((c = parse_line(buf, arglist)) != 3)
-                         error(FATAL, "cannot determine swap location\n");
-+                fprintf(fp, "%s  ", mkstring(buf2, len1, CENTER|LJUST, "PTE"));
- 
-                 len2 = MAX(strlen(arglist[0]), strlen("SWAP"));
-                 len3 = MAX(strlen(arglist[2]), strlen("OFFSET"));
-@@ -1541,6 +1543,7 @@ ppc64_translate_pte(ulong pte, void *physaddr, ulonglong pte_rpn_shift)
-                 return page_present;
-         }
- 
-+        fprintf(fp, "%s  ", mkstring(buf, len1, CENTER|LJUST, "PTE"));
-         sprintf(physbuf, "%lx", paddr);
-         len2 = MAX(strlen(physbuf), strlen("PHYSICAL"));
-         fprintf(fp, "%s  ", mkstring(buf, len2, CENTER|LJUST, "PHYSICAL"));
diff --git a/SOURCES/github_a38e3ec4_machine_kexec.patch b/SOURCES/github_a38e3ec4_machine_kexec.patch
new file mode 100644
index 0000000..1662ca4
--- /dev/null
+++ b/SOURCES/github_a38e3ec4_machine_kexec.patch
@@ -0,0 +1,170 @@
+commit a38e3ec4cb2692205d3af7483192077b1acfb199
+Author: Dave Anderson <anderson@redhat.com>
+Date:   Fri Feb 9 14:58:34 2018 -0500
+
+    Fix for the ARM64 "bt" command running against Linux 4.14 and
+    later kernels.  Without the patch, the backtraces of the active
+    tasks in a kdump-generated dumpfile are truncated.  Without the
+    patch, the panic task will just show the "crash_kexec" frame
+    and the kernel-entry user-space exception frame; the non-panic
+    tasks will show their backtraces starting from the stackframe
+    addresses captured in the per-cpu NT_PRSTATUS notes, and will
+    not display the exception frame generated by the NMI callback,
+    nor any stackframes on the IRQ stack.
+    (anderson@redhat.com)
+
+
+--- crash-7.2.0/defs.h.orig
++++ crash-7.2.0/defs.h
+@@ -3137,6 +3137,8 @@ struct machine_specific {
+ 	ulong user_eframe_offset;
+ 	/* for v4.14 or later */
+ 	ulong kern_eframe_offset;
++	ulong machine_kexec_start;
++	ulong machine_kexec_end;
+ };
+ 
+ struct arm64_stackframe {
+--- crash-7.2.0/arm64.c.orig
++++ crash-7.2.0/arm64.c
+@@ -1,8 +1,8 @@
+ /*
+  * arm64.c - core analysis suite
+  *
+- * Copyright (C) 2012-2017 David Anderson
+- * Copyright (C) 2012-2017 Red Hat, Inc. All rights reserved.
++ * Copyright (C) 2012-2018 David Anderson
++ * Copyright (C) 2012-2018 Red Hat, Inc. All rights reserved.
+  *
+  * This program is free software; you can redistribute it and/or modify
+  * it under the terms of the GNU General Public License as published by
+@@ -633,6 +633,8 @@ arm64_dump_machdep_table(ulong arg)
+         	fprintf(fp, "%lx\n", ms->__SWP_OFFSET_MASK);
+ 	else
+ 		fprintf(fp, "(unused)\n");
++	fprintf(fp, "   machine_kexec_start: %lx\n", ms->machine_kexec_start);
++	fprintf(fp, "     machine_kexec_end: %lx\n", ms->machine_kexec_end);
+ 	fprintf(fp, "     crash_kexec_start: %lx\n", ms->crash_kexec_start);
+ 	fprintf(fp, "       crash_kexec_end: %lx\n", ms->crash_kexec_end);
+ 	fprintf(fp, "  crash_save_cpu_start: %lx\n", ms->crash_save_cpu_start);
+@@ -1336,7 +1338,7 @@ arm64_irq_stack_init(void)
+ 	struct syment *sp;
+ 	struct gnu_request request, *req;
+ 	struct machine_specific *ms = machdep->machspec;
+-	ulong p;
++	ulong p, sz;
+ 	req = &request;
+ 
+ 	if (symbol_exists("irq_stack") &&
+@@ -1384,7 +1386,26 @@ arm64_irq_stack_init(void)
+ 
+ 		if (!(ms->irq_stacks = (ulong *)malloc((size_t)(kt->cpus * sizeof(ulong)))))
+ 			error(FATAL, "cannot malloc irq_stack addresses\n");
+-		ms->irq_stack_size = ARM64_IRQ_STACK_SIZE;
++
++		/*
++		 *  Determining the IRQ_STACK_SIZE is tricky, but for now
++		 *  4.14 kernel has:
++		 *
++		 *    #define IRQ_STACK_SIZE          THREAD_SIZE
++		 *
++		 *  and finding a solid usage of THREAD_SIZE is hard, but:   
++		 *
++		 *    union thread_union {
++		 *            ... 
++	         *            unsigned long stack[THREAD_SIZE/sizeof(long)];
++		 *    };
++		 */
++		if (MEMBER_EXISTS("thread_union", "stack")) { 
++			if ((sz = MEMBER_SIZE("thread_union", "stack")) > 0)
++				ms->irq_stack_size = sz;
++		} else
++			ms->irq_stack_size = ARM64_IRQ_STACK_SIZE;
++
+ 		machdep->flags |= IRQ_STACKS;
+ 
+ 		for (i = 0; i < kt->cpus; i++) {
+@@ -1404,7 +1425,7 @@ arm64_stackframe_init(void)
+ 	long task_struct_thread;
+ 	long thread_struct_cpu_context;
+ 	long context_sp, context_pc, context_fp;
+-	struct syment *sp1, *sp1n, *sp2, *sp2n;
++	struct syment *sp1, *sp1n, *sp2, *sp2n, *sp3, *sp3n;
+ 
+ 	STRUCT_SIZE_INIT(note_buf, "note_buf_t");
+ 	STRUCT_SIZE_INIT(elf_prstatus, "elf_prstatus");
+@@ -1441,11 +1462,15 @@ arm64_stackframe_init(void)
+ 	if ((sp1 = kernel_symbol_search("crash_kexec")) &&
+ 	    (sp1n = next_symbol(NULL, sp1)) && 
+ 	    (sp2 = kernel_symbol_search("crash_save_cpu")) &&
+-	    (sp2n = next_symbol(NULL, sp2))) {
++	    (sp2n = next_symbol(NULL, sp2)) &&
++	    (sp3 = kernel_symbol_search("machine_kexec")) &&
++	    (sp3n = next_symbol(NULL, sp3))) {
+ 		machdep->machspec->crash_kexec_start = sp1->value;
+ 		machdep->machspec->crash_kexec_end = sp1n->value;
+ 		machdep->machspec->crash_save_cpu_start = sp2->value;
+ 		machdep->machspec->crash_save_cpu_end = sp2n->value;
++		machdep->machspec->machine_kexec_start = sp3->value;
++		machdep->machspec->machine_kexec_end = sp3n->value;
+ 		machdep->flags |= KDUMP_ENABLED;
+ 	}
+ 
+@@ -2592,6 +2617,7 @@ arm64_in_kdump_text(struct bt_info *bt,
+ {
+ 	ulong *ptr, *start, *base;
+ 	struct machine_specific *ms;
++	ulong crash_kexec_frame;
+ 
+ 	if (!(machdep->flags & KDUMP_ENABLED))
+ 		return FALSE;
+@@ -2606,6 +2632,7 @@ arm64_in_kdump_text(struct bt_info *bt,
+ 			start = (ulong *)&bt->stackbuf[(ulong)(STACK_OFFSET_TYPE(bt->stacktop))];
+ 	}
+ 
++	crash_kexec_frame = 0;
+ 	ms = machdep->machspec;
+ 	for (ptr = start - 8; ptr >= base; ptr--) {
+ 		if (bt->flags & BT_OPT_BACK_TRACE) {
+@@ -2628,13 +2655,27 @@ arm64_in_kdump_text(struct bt_info *bt,
+ 				return TRUE;
+ 			}
+ 		} else {
+-			if ((*ptr >= ms->crash_kexec_start) && (*ptr < ms->crash_kexec_end)) {
++			if ((*ptr >= ms->machine_kexec_start) && (*ptr < ms->machine_kexec_end)) {
+ 				bt->bptr = ((ulong)ptr - (ulong)base)
+ 					   + task_to_stackbase(bt->tc->task);
+ 				if (CRASHDEBUG(1))
+-					fprintf(fp, "%lx: %lx (crash_kexec)\n", bt->bptr, *ptr);
++					fprintf(fp, "%lx: %lx (machine_kexec)\n", bt->bptr, *ptr);
+ 				return TRUE;
+ 			}
++			if ((*ptr >= ms->crash_kexec_start) && (*ptr < ms->crash_kexec_end)) {
++				/*
++				 *  Stash the first crash_kexec frame in case the machine_kexec
++				 *  frame is not found.
++				 */
++				if (!crash_kexec_frame) {
++					crash_kexec_frame = ((ulong)ptr - (ulong)base)
++						+ task_to_stackbase(bt->tc->task);
++					if (CRASHDEBUG(1))
++						fprintf(fp, "%lx: %lx (crash_kexec)\n", 
++							bt->bptr, *ptr);
++				}
++				continue;
++			}
+ 			if ((*ptr >= ms->crash_save_cpu_start) && (*ptr < ms->crash_save_cpu_end)) {
+ 				bt->bptr = ((ulong)ptr - (ulong)base)
+ 					   + task_to_stackbase(bt->tc->task);
+@@ -2645,6 +2686,11 @@ arm64_in_kdump_text(struct bt_info *bt,
+ 		}
+ 	} 
+ 
++	if (crash_kexec_frame) {
++		bt->bptr = crash_kexec_frame;
++		return TRUE;
++	}
++
+ 	return FALSE;
+ }
+ 
diff --git a/SOURCES/github_d833432f_kpti_trampoline.patch b/SOURCES/github_d833432f_kpti_trampoline.patch
new file mode 100644
index 0000000..6d15641
--- /dev/null
+++ b/SOURCES/github_d833432f_kpti_trampoline.patch
@@ -0,0 +1,216 @@
+commit d833432f1ed2d7f507c05d3b6c3e6aa732c49e56
+Author: Dave Anderson <anderson@redhat.com>
+Date:   Fri Jan 19 14:17:53 2018 -0500
+
+    Initial pass for support of kernel page table isolation.  The x86_64
+    "bt" command may indicate "bt: cannot transition from exception stack
+    to current process stack" if the crash callback NMI occurred while an
+    active task was running on the new entry trampoline stack.  This has
+    only been tested on the RHEL7 backport of the upstream patch because
+    as of this commit, crash does not run on 4.15-rc kernels.  Further
+    changes may be required for upstream kernels, and distributions that
+    implement the kernel changes differently than upstream.
+    (anderson@redhat.com)
+
+diff --git a/defs.h b/defs.h
+index dcd6c26..4d2fb2f 100644
+--- a/defs.h
++++ b/defs.h
+@@ -5769,6 +5769,8 @@ struct machine_specific {
+ 	ulong last_p4d_read;
+ 	struct ORC_data orc;
+ 	ulong irq_stack_gap;
++	ulong kpti_entry_stack;
++	ulong kpti_entry_stack_size;
+ };
+ 
+ #define KSYMS_START    (0x1)
+@@ -5786,6 +5788,7 @@ struct machine_specific {
+ #define RANDOMIZED  (0x1000)
+ #define VM_5LEVEL   (0x2000)
+ #define ORC         (0x4000)
++#define KPTI        (0x8000)
+ 
+ #define VM_FLAGS (VM_ORIG|VM_2_6_11|VM_XEN|VM_XEN_RHEL4|VM_5LEVEL)
+ 
+diff --git a/x86_64.c b/x86_64.c
+index d8fade4..e924ca9 100644
+--- a/x86_64.c
++++ b/x86_64.c
+@@ -48,6 +48,7 @@ static void x86_64_back_trace_cmd(struct bt_info *);
+ static ulong x86_64_in_exception_stack(struct bt_info *, int *);
+ static ulong x86_64_in_irqstack(struct bt_info *);
+ static int x86_64_in_alternate_stack(int, ulong);
++static ulong x86_64_in_kpti_entry_stack(int, ulong);
+ static ulong __schedule_frame_adjust(ulong, struct bt_info *);
+ static void x86_64_low_budget_back_trace_cmd(struct bt_info *);
+ static void x86_64_dwarf_back_trace_cmd(struct bt_info *);
+@@ -84,6 +85,7 @@ static void x86_64_cpu_pda_init(void);
+ static void x86_64_per_cpu_init(void);
+ static void x86_64_ist_init(void);
+ static void x86_64_irq_stack_gap_init(void);
++static void x86_64_entry_trampoline_init(void);
+ static void x86_64_post_init(void);
+ static void parse_cmdline_args(void);
+ static void x86_64_clear_machdep_cache(void);
+@@ -641,6 +643,7 @@ x86_64_init(int when)
+                 }
+ 		x86_64_irq_eframe_link_init();
+ 		x86_64_irq_stack_gap_init();
++		x86_64_entry_trampoline_init();
+ 		x86_64_framepointer_init();
+ 		x86_64_ORC_init();
+ 		x86_64_thread_return_init();
+@@ -722,6 +725,8 @@ x86_64_dump_machdep_table(ulong arg)
+ 		fprintf(fp, "%sNESTED_NMI", others++ ? "|" : "");
+ 	if (machdep->flags & RANDOMIZED)
+ 		fprintf(fp, "%sRANDOMIZED", others++ ? "|" : "");
++	if (machdep->flags & KPTI)
++		fprintf(fp, "%sKPTI", others++ ? "|" : "");
+         fprintf(fp, ")\n");
+ 
+ 	fprintf(fp, "             kvbase: %lx\n", machdep->kvbase);
+@@ -973,7 +978,18 @@ x86_64_dump_machdep_table(ulong arg)
+ 			fprintf(fp, "\n   ");
+ 		fprintf(fp, "%016lx ", ms->stkinfo.ibase[c]);
+ 	}
+-	fprintf(fp, "\n");
++	fprintf(fp, "\n                 kpti_entry_stack_size: %ld", ms->kpti_entry_stack_size);
++	fprintf(fp, "\n                      kpti_entry_stack: ");
++	if (machdep->flags & KPTI) {
++		fprintf(fp, "%lx\n   ", ms->kpti_entry_stack);
++		for (c = 0; c < cpus; c++) {
++			if (c && !(c%4))
++				fprintf(fp, "\n   ");
++			fprintf(fp, "%016lx ", ms->kpti_entry_stack + kt->__per_cpu_offset[c]);
++		}
++		fprintf(fp, "\n");
++	} else
++		fprintf(fp, "(unused)\n");
+ }
+ 
+ /*
+@@ -3147,7 +3163,7 @@ x86_64_low_budget_back_trace_cmd(struct bt_info *bt_in)
+ 	struct syment *sp, *spt;
+ 	FILE *ofp;
+ 	ulong estack, irqstack;
+-	ulong irq_eframe;
++	ulong irq_eframe, kpti_eframe;
+ 	struct bt_info bt_local, *bt;
+ 	struct machine_specific *ms;
+ 	ulong last_process_stack_eframe;
+@@ -3493,6 +3509,16 @@ in_exception_stack:
+ 		bt->stacktop = GET_STACKTOP(bt->tc->task);
+ 
+ 		if (!INSTACK(rsp, bt)) {
++			/*
++			 *  If the exception occurred while on the KPTI entry trampoline stack,
++			 *  just print the entry exception frame and bail out.
++			 */
++			if ((kpti_eframe = x86_64_in_kpti_entry_stack(bt->tc->processor, rsp))) {
++				x86_64_exception_frame(EFRAME_PRINT, kpti_eframe, 0, bt, ofp);
++				fprintf(fp, "--- <entry trampoline stack> ---\n");
++				return;
++			}
++
+ 			switch (bt->flags & (BT_EXCEPTION_STACK|BT_IRQSTACK))
+ 			{
+ 			case (BT_EXCEPTION_STACK|BT_IRQSTACK):
+@@ -3720,7 +3746,7 @@ x86_64_dwarf_back_trace_cmd(struct bt_info *bt_in)
+ 	struct syment *sp;
+ 	FILE *ofp;
+ 	ulong estack, irqstack;
+-	ulong irq_eframe;
++	ulong irq_eframe, kpti_eframe;
+ 	struct bt_info bt_local, *bt;
+ 	struct machine_specific *ms;
+ 	ulong last_process_stack_eframe;
+@@ -3940,6 +3966,16 @@ in_exception_stack:
+ 		bt->stacktop = GET_STACKTOP(bt->tc->task);
+ 
+ 		if (!INSTACK(rsp, bt)) {
++			/*
++			 *  If the exception occurred while on the KPTI entry trampoline stack,
++			 *  just print the entry exception frame and bail out.
++			 */
++			if ((kpti_eframe = x86_64_in_kpti_entry_stack(bt->tc->processor, rsp))) {
++				x86_64_exception_frame(EFRAME_PRINT, kpti_eframe, 0, bt, ofp);
++				fprintf(fp, "--- <ENTRY TRAMPOLINE stack> ---\n");
++				return;
++			}
++
+ 			switch (bt->flags & (BT_EXCEPTION_STACK|BT_IRQSTACK))
+ 			{
+ 			case (BT_EXCEPTION_STACK|BT_IRQSTACK):
+@@ -8661,4 +8697,71 @@ next_in_func:
+ 			goto next_in_func;
+ }
+ 
++/*
++ *  KPTI entry stack initialization.  May vary signficantly
++ *  between upstream and distribution backports.
++ */
++static void 
++x86_64_entry_trampoline_init(void)
++{
++	struct machine_specific *ms;
++	struct syment *sp;
++
++	ms = machdep->machspec;
++
++	if (!kernel_symbol_exists("pti_init") &&
++	    !kernel_symbol_exists("kaiser_init"))
++		return;
++
++	/*
++	 *  4.15
++	 */
++	if (MEMBER_EXISTS("entry_stack", "words") && 
++	    MEMBER_EXISTS("entry_stack_page", "stack") &&
++	    (sp = per_cpu_symbol_search("per_cpu__entry_stack_storage"))) {
++		ms->kpti_entry_stack = sp->value + MEMBER_OFFSET("entry_stack_page", "stack");
++		ms->kpti_entry_stack_size = MEMBER_SIZE("entry_stack", "words");
++		machdep->flags |= KPTI;
++		return;
++	}
++
++	/* 
++	 *  RHEL
++	 */
++	if (MEMBER_EXISTS("tss_struct", "stack")) {
++		if (!(sp = per_cpu_symbol_search("per_cpu__init_tss")))
++			sp = per_cpu_symbol_search("per_cpu__cpu_tss");
++		ms->kpti_entry_stack = sp->value + MEMBER_OFFSET("tss_struct", "stack");
++		ms->kpti_entry_stack_size = MEMBER_SIZE("tss_struct", "stack");
++		machdep->flags |= KPTI;
++		return;
++	}
++}
++
++static ulong
++x86_64_in_kpti_entry_stack(int cpu, ulong rsp)
++{
++	ulong stack_base, stack_end;
++	struct machine_specific *ms;
++
++	if (!(machdep->flags & KPTI))
++		return 0;
++
++	ms = machdep->machspec;
++
++	if ((kt->flags & SMP) && (kt->flags & PER_CPU_OFF)) {
++		if (kt->__per_cpu_offset[cpu] == 0)
++			return 0;
++		stack_base = ms->kpti_entry_stack + kt->__per_cpu_offset[cpu];
++	} else
++		stack_base = ms->kpti_entry_stack; 
++
++	stack_end = stack_base + 
++		(ms->kpti_entry_stack_size > 0 ? ms->kpti_entry_stack_size : 512);
++
++	if ((rsp >= stack_base) && (rsp < stack_end))
++		return(stack_end - SIZE(pt_regs));
++
++	return 0;
++}
+ #endif  /* X86_64 */ 
diff --git a/SOURCES/github_da9bd35a_to_e2efacdd.patch b/SOURCES/github_da9bd35a_to_e2efacdd.patch
new file mode 100644
index 0000000..0c8bb82
--- /dev/null
+++ b/SOURCES/github_da9bd35a_to_e2efacdd.patch
@@ -0,0 +1,2334 @@
+commit da9bd35afc2269529b029dd22815e04362e89e5b
+Author: Dave Anderson <anderson@redhat.com>
+Date:   Wed Oct 11 11:17:30 2017 -0400
+
+    Fix for the "runq" command on Linux 4.14 and later kernels that
+    contain commit cd9e61ed1eebbcd5dfad59475d41ec58d9b64b6a, titled
+    "rbtree: cache leftmost node internally".  Without the patch,
+    the command fails with the error message "runq: invalid structure
+    member offset: cfs_rq_rb_leftmost".
+    (anderson@redhat.com)
+
+diff --git a/task.c b/task.c
+index 88706bf..2b12af0 100644
+--- a/task.c
++++ b/task.c
+@@ -8765,10 +8765,15 @@ cfs_rq_offset_init(void)
+ 		MEMBER_OFFSET_INIT(sched_rt_entity_my_q, "sched_rt_entity",
+ 			"my_q");
+ 		MEMBER_OFFSET_INIT(sched_entity_on_rq, "sched_entity", "on_rq");
+-		MEMBER_OFFSET_INIT(cfs_rq_rb_leftmost, "cfs_rq", "rb_leftmost");
+-		MEMBER_OFFSET_INIT(cfs_rq_nr_running, "cfs_rq", "nr_running");
+ 		MEMBER_OFFSET_INIT(cfs_rq_tasks_timeline, "cfs_rq", 
+ 			"tasks_timeline");
++		MEMBER_OFFSET_INIT(cfs_rq_rb_leftmost, "cfs_rq", "rb_leftmost");
++		if (INVALID_MEMBER(cfs_rq_rb_leftmost) && 
++		    VALID_MEMBER(cfs_rq_tasks_timeline) &&
++		    MEMBER_EXISTS("rb_root_cached", "rb_leftmost"))
++			ASSIGN_OFFSET(cfs_rq_rb_leftmost) = OFFSET(cfs_rq_tasks_timeline) + 
++				MEMBER_OFFSET("rb_root_cached", "rb_leftmost");
++		MEMBER_OFFSET_INIT(cfs_rq_nr_running, "cfs_rq", "nr_running");
+ 		MEMBER_OFFSET_INIT(cfs_rq_curr, "cfs_rq", "curr");
+ 		MEMBER_OFFSET_INIT(rt_rq_active, "rt_rq", "active");
+                 MEMBER_OFFSET_INIT(task_struct_run_list, "task_struct",
+
+commit 9e5255af26233e7ef051ebdd8bdccbd15d0d9256
+Author: Dave Anderson <anderson@redhat.com>
+Date:   Wed Oct 11 16:11:34 2017 -0400
+
+    Fix to prevent a useless message during session inialization.
+    Without the patch, if the highest possible node bit in the
+    node_states[N_ONLINE] multi-word bitmask is set, then a message
+    such as "crash: next_online_node: 256 is too large!" will be
+    displayed.
+    (anderson@redhat.com)
+
+diff --git a/memory.c b/memory.c
+index 8efe0b2..9c9a40d 100644
+--- a/memory.c
++++ b/memory.c
+@@ -17200,10 +17200,8 @@ next_online_node(int first)
+ 	int i, j, node;
+ 	ulong mask, *maskptr;
+ 
+-	if ((first/BITS_PER_LONG) >= vt->node_online_map_len) {
+-		error(INFO, "next_online_node: %d is too large!\n", first);
++	if ((first/BITS_PER_LONG) >= vt->node_online_map_len)
+ 		return -1;
+-	}
+ 
+ 	maskptr = (ulong *)vt->node_online_map;
+ 	for (i = node = 0; i <  vt->node_online_map_len; i++, maskptr++) {
+
+commit 2b93c036edf2a5cc21a06a14f377cd9b365f858a
+Author: Dave Anderson <anderson@redhat.com>
+Date:   Tue Oct 17 15:40:17 2017 -0400
+
+    Additional fixes for the ARM64 "bt" command for Linux 4.14 kernels.
+    The patch corrects the contents of in-kernel exception frame register
+    dumps, and properly transitions the backtrace from the IRQ stack
+    to the process stack.
+    (takahiro.akashi@linaro.org)
+
+diff --git a/arm64.c b/arm64.c
+index 20c5d34..c75669b 100644
+--- a/arm64.c
++++ b/arm64.c
+@@ -72,6 +72,7 @@ static void arm64_cmd_mach(void);
+ static void arm64_display_machine_stats(void);
+ static int arm64_get_smp_cpus(void);
+ static void arm64_clear_machdep_cache(void);
++static int arm64_on_process_stack(struct bt_info *, ulong);
+ static int arm64_in_alternate_stack(int, ulong);
+ static int arm64_on_irq_stack(int, ulong);
+ static void arm64_set_irq_stack(struct bt_info *);
+@@ -1333,34 +1334,64 @@ arm64_irq_stack_init(void)
+ 	int i;
+ 	struct syment *sp;
+ 	struct gnu_request request, *req;
+-	req = &request;
+ 	struct machine_specific *ms = machdep->machspec;
++	ulong p;
++	req = &request;
+ 
+-	if (!symbol_exists("irq_stack") ||
+-	    !(sp = per_cpu_symbol_search("irq_stack")) ||
+-	    !get_symbol_type("irq_stack", NULL, req) ||
+-	    (req->typecode != TYPE_CODE_ARRAY) ||
+-	    (req->target_typecode != TYPE_CODE_INT))
+-		return;
+-
+-	if (CRASHDEBUG(1)) {
+-		fprintf(fp, "irq_stack: \n");
+-		fprintf(fp, "  type: %s\n", 
+-			(req->typecode == TYPE_CODE_ARRAY) ? "TYPE_CODE_ARRAY" : "other");
+-		fprintf(fp, "  target_typecode: %s\n", 
+-			req->target_typecode == TYPE_CODE_INT ? "TYPE_CODE_INT" : "other");
+-		fprintf(fp, "  target_length: %ld\n", req->target_length);
+-		fprintf(fp, "  length: %ld\n", req->length);
+-	}
+-
+-	ms->irq_stack_size = req->length;
+-	if (!(ms->irq_stacks = (ulong *)malloc((size_t)(kt->cpus * sizeof(ulong)))))
+-		error(FATAL, "cannot malloc irq_stack addresses\n");
++	if (symbol_exists("irq_stack") &&
++	    (sp = per_cpu_symbol_search("irq_stack")) &&
++	    get_symbol_type("irq_stack", NULL, req)) {
++		/* before v4.14 or CONFIG_VMAP_STACK disabled */
++		if (CRASHDEBUG(1)) {
++			fprintf(fp, "irq_stack: \n");
++			fprintf(fp, "  type: %s\n",
++				(req->typecode == TYPE_CODE_ARRAY) ?
++						"TYPE_CODE_ARRAY" : "other");
++			fprintf(fp, "  target_typecode: %s\n",
++				req->target_typecode == TYPE_CODE_INT ?
++						"TYPE_CODE_INT" : "other");
++			fprintf(fp, "  target_length: %ld\n",
++						req->target_length);
++			fprintf(fp, "  length: %ld\n", req->length);
++		}
++
++		if (!(ms->irq_stacks = (ulong *)malloc((size_t)(kt->cpus * sizeof(ulong)))))
++			error(FATAL, "cannot malloc irq_stack addresses\n");
++		ms->irq_stack_size = req->length;
++		machdep->flags |= IRQ_STACKS;
+ 
+-	for (i = 0; i < kt->cpus; i++)
+-		ms->irq_stacks[i] = kt->__per_cpu_offset[i] + sp->value;
++		for (i = 0; i < kt->cpus; i++)
++			ms->irq_stacks[i] = kt->__per_cpu_offset[i] + sp->value;
++	} else if (symbol_exists("irq_stack_ptr") &&
++	    (sp = per_cpu_symbol_search("irq_stack_ptr")) &&
++	    get_symbol_type("irq_stack_ptr", NULL, req)) {
++		/* v4.14 and later with CONFIG_VMAP_STACK enabled */
++		if (CRASHDEBUG(1)) {
++			fprintf(fp, "irq_stack_ptr: \n");
++			fprintf(fp, "  type: %x, %s\n",
++				(int)req->typecode,
++				(req->typecode == TYPE_CODE_PTR) ?
++						"TYPE_CODE_PTR" : "other");
++			fprintf(fp, "  target_typecode: %x, %s\n",
++				(int)req->target_typecode,
++				req->target_typecode == TYPE_CODE_INT ?
++						"TYPE_CODE_INT" : "other");
++			fprintf(fp, "  target_length: %ld\n",
++						req->target_length);
++			fprintf(fp, "  length: %ld\n", req->length);
++		}
++
++		if (!(ms->irq_stacks = (ulong *)malloc((size_t)(kt->cpus * sizeof(ulong)))))
++			error(FATAL, "cannot malloc irq_stack addresses\n");
++		ms->irq_stack_size = 16384;
++		machdep->flags |= IRQ_STACKS;
+ 
+-	machdep->flags |= IRQ_STACKS;
++		for (i = 0; i < kt->cpus; i++) {
++			p = kt->__per_cpu_offset[i] + sp->value;
++			readmem(p, KVADDR, &(ms->irq_stacks[i]), sizeof(ulong),
++			    "IRQ stack pointer", RETURN_ON_ERROR);
++		}
++	} 
+ }
+ 
+ /*
+@@ -1750,11 +1781,20 @@ arm64_display_full_frame(struct bt_info *bt, ulong sp)
+ 	if (bt->frameptr == sp)
+ 		return;
+ 
+-	if (!INSTACK(sp, bt) || !INSTACK(bt->frameptr, bt)) {
+-		if (sp == 0)
+-			sp = bt->stacktop - USER_EFRAME_OFFSET;
+-		else
+-			return;
++	if (INSTACK(bt->frameptr, bt)) {
++		if (INSTACK(sp, bt)) {
++			; /* normal case */
++		} else {
++			if (sp == 0)
++				/* interrupt in user mode */
++				sp = bt->stacktop - USER_EFRAME_OFFSET;
++			else
++				/* interrupt in kernel mode */
++				sp = bt->stacktop;
++		}
++	} else { 
++		/* IRQ exception frame */
++		return;
+ 	}
+ 
+ 	words = (sp - bt->frameptr) / sizeof(ulong);
+@@ -1860,6 +1900,9 @@ arm64_unwind_frame(struct bt_info *bt, struct arm64_stackframe *frame)
+ 	if ((frame->fp == 0) && (frame->pc == 0))
+ 		return FALSE;
+ 
++	if (!(machdep->flags & IRQ_STACKS))
++		return TRUE;
++
+ 	/*
+ 	 * The kernel's manner of determining the end of the IRQ stack:
+ 	 *
+@@ -1872,7 +1915,25 @@ arm64_unwind_frame(struct bt_info *bt, struct arm64_stackframe *frame)
+ 	 *  irq_stack_ptr = IRQ_STACK_PTR(raw_smp_processor_id());
+ 	 *  orig_sp = IRQ_STACK_TO_TASK_STACK(irq_stack_ptr);   (pt_regs pointer on process stack)
+ 	 */
+-	if (machdep->flags & IRQ_STACKS) {
++	if (machdep->flags & UNW_4_14) {
++		if ((bt->flags & BT_IRQSTACK) &&
++		    !arm64_on_irq_stack(bt->tc->processor, frame->fp)) {
++			if (arm64_on_process_stack(bt, frame->fp)) {
++				arm64_set_process_stack(bt);
++
++				frame->sp = frame->fp - SIZE(pt_regs) + 16;
++				/* for switch_stack */
++				/* fp still points to irq stack */
++				bt->bptr = fp;
++				/* for display_full_frame */
++				/* sp points to process stack */
++				bt->frameptr = frame->sp;
++			} else {
++				/* irq -> user */
++				return FALSE;
++			}
++		}
++	} else { /* !UNW_4_14 */
+ 		ms = machdep->machspec;
+ 		irq_stack_ptr = ms->irq_stacks[bt->tc->processor] + ms->irq_stack_size - 16;
+ 
+@@ -1896,7 +1957,7 @@ arm64_unwind_frame(struct bt_info *bt, struct arm64_stackframe *frame)
+ 				return FALSE;
+ 			}
+ 		}
+-	}
++	} /* UNW_4_14 */
+ 
+ 	return TRUE;
+ }
+@@ -2086,10 +2147,17 @@ arm64_unwind_frame_v2(struct bt_info *bt, struct arm64_stackframe *frame,
+ 			 * We are on process stack. Just add a faked frame
+ 			 */
+ 
+-			if (!arm64_on_irq_stack(bt->tc->processor, ext_frame.fp))
+-				frame->sp = ext_frame.fp
+-					    - sizeof(struct arm64_pt_regs);
+-			else {
++			if (!arm64_on_irq_stack(bt->tc->processor, ext_frame.fp)) {
++				if (MEMBER_EXISTS("pt_regs", "stackframe")) {
++					frame->sp = ext_frame.fp
++						    - sizeof(struct arm64_pt_regs) - 16;
++					frame->fp = ext_frame.fp;
++				} else {
++					frame->sp = ext_frame.fp
++						    - sizeof(struct arm64_pt_regs);
++					frame->fp = frame->sp;
++				}
++			} else {
+ 				/*
+ 				 * FIXME: very exceptional case
+ 				 * We are already back on process stack, but
+@@ -2109,10 +2177,10 @@ arm64_unwind_frame_v2(struct bt_info *bt, struct arm64_stackframe *frame,
+ 				 * Really ugly
+ 				 */
+ 				frame->sp = frame->fp + 0x20;
++				frame->fp = frame->sp;
+ 				fprintf(ofp, " (Next exception frame might be wrong)\n");
+ 			}
+ 
+-			frame->fp = frame->sp;
+ 		} else {
+ 			/* We are on IRQ stack */
+ 
+@@ -2122,9 +2190,15 @@ arm64_unwind_frame_v2(struct bt_info *bt, struct arm64_stackframe *frame,
+ 			if (ext_frame.fp != irq_stack_ptr) {
+ 				/* (2) Just add a faked frame */
+ 
+-				frame->sp = ext_frame.fp
+-					    - sizeof(struct arm64_pt_regs);
+-				frame->fp = frame->sp;
++				if (MEMBER_EXISTS("pt_regs", "stackframe")) {
++					frame->sp = ext_frame.fp
++						    - sizeof(struct arm64_pt_regs);
++					frame->fp = ext_frame.fp;
++				} else {
++					frame->sp = ext_frame.fp
++						    - sizeof(struct arm64_pt_regs) - 16;
++					frame->fp = frame->sp;
++				}
+ 			} else {
+ 				/*
+ 				 * (3)
+@@ -2303,12 +2377,17 @@ arm64_back_trace_cmd(struct bt_info *bt)
+ 
+ 		if (arm64_in_exception_text(bt->instptr) && INSTACK(stackframe.fp, bt)) {
+ 			if (!(bt->flags & BT_IRQSTACK) ||
+-			    (((stackframe.sp + SIZE(pt_regs)) < bt->stacktop)))
+-				exception_frame = stackframe.fp - SIZE(pt_regs);
++			    (((stackframe.sp + SIZE(pt_regs)) < bt->stacktop))) {
++				if (MEMBER_EXISTS("pt_regs", "stackframe"))
++					/* v4.14 or later */
++					exception_frame = stackframe.fp - SIZE(pt_regs) + 16;
++				else
++					exception_frame = stackframe.fp - SIZE(pt_regs);
++			}
+ 		}
+ 
+ 		if ((bt->flags & BT_IRQSTACK) &&
+-		    !arm64_on_irq_stack(bt->tc->processor, stackframe.sp)) {
++		    !arm64_on_irq_stack(bt->tc->processor, stackframe.fp)) {
+ 			bt->flags &= ~BT_IRQSTACK;
+ 			if (arm64_switch_stack(bt, &stackframe, ofp) == USER_MODE)
+ 				break;
+@@ -2424,6 +2503,8 @@ user_space:
+ 		 * otherwise show an exception frame.
+ 		 * Since exception entry code doesn't have a real
+ 		 * stackframe, we fake a dummy frame here.
++		 * Note: Since we have a real stack frame in pt_regs,
++		 * We no longer need a dummy frame on v4.14 or later.
+ 		 */
+ 		if (!arm64_in_exp_entry(stackframe.pc))
+ 			continue;
+@@ -2669,7 +2750,9 @@ arm64_switch_stack(struct bt_info *bt, struct arm64_stackframe *frame, FILE *ofp
+ 	if (frame->fp == 0)
+ 		return USER_MODE;
+ 
+-	arm64_print_exception_frame(bt, frame->sp, KERNEL_MODE, ofp);
++	if (!(machdep->flags & UNW_4_14))
++		arm64_print_exception_frame(bt, frame->sp, KERNEL_MODE, ofp);
++
+ 	return KERNEL_MODE;
+ }
+ 
+@@ -3363,6 +3446,20 @@ arm64_clear_machdep_cache(void) {
+ }
+ 
+ static int
++arm64_on_process_stack(struct bt_info *bt, ulong stkptr)
++{
++	ulong stackbase, stacktop;
++
++	stackbase = GET_STACKBASE(bt->task);
++	stacktop = GET_STACKTOP(bt->task);
++
++	if ((stkptr >= stackbase) && (stkptr < stacktop))
++		return TRUE;
++
++	return FALSE;
++}
++
++static int
+ arm64_on_irq_stack(int cpu, ulong stkptr)
+ {
+ 	return arm64_in_alternate_stack(cpu, stkptr);
+
+commit 30950ba8885fb39a1ed7b071cdb225e3ec38e7b3
+Author: Dave Anderson <anderson@redhat.com>
+Date:   Tue Oct 17 16:20:19 2017 -0400
+
+    Implemented a new "search -T" option, which is identical to the
+    "search -t" option, except that the search is restricted to the
+    kernel stacks of active tasks.
+    (atomlin@redhat.com)
+
+diff --git a/help.c b/help.c
+index 2d80202..a9aab37 100644
+--- a/help.c
++++ b/help.c
+@@ -2862,7 +2862,7 @@ NULL
+ char *help_search[] = {
+ "search",
+ "search memory",
+-"[-s start] [ -[kKV] | -u | -p | -t ] [-e end | -l length] [-m mask]\n"
++"[-s start] [ -[kKV] | -u | -p | -t | -T ] [-e end | -l length] [-m mask]\n"
+ "         [-x count] -[cwh] [value | (expression) | symbol | string] ...",
+ "  This command searches for a given value within a range of user virtual, kernel",
+ "  virtual, or physical memory space.  If no end nor length value is entered, ",
+@@ -2893,6 +2893,7 @@ char *help_search[] = {
+ "          -t  Search only the kernel stack pages of every task.  If one or more",
+ "              matches are found in a task's kernel stack, precede the output",
+ "              with a task-identifying header.",
++"          -T  Same as -t, except only the active task(s) are considered.",
+ "      -e end  Stop the search at this hexadecimal user or kernel virtual",
+ "              address, kernel symbol, or physical address.  The end address",
+ "              must be appropriate for the memory type specified.",
+diff --git a/memory.c b/memory.c
+index 9c9a40d..fb534e8 100644
+--- a/memory.c
++++ b/memory.c
+@@ -13882,7 +13882,7 @@ cmd_search(void)
+ 	ulong value, mask, len;
+ 	ulong uvaddr_start, uvaddr_end;
+ 	ulong kvaddr_start, kvaddr_end, range_end;
+-	int sflag, Kflag, Vflag, pflag, tflag;
++	int sflag, Kflag, Vflag, pflag, Tflag, tflag;
+ 	struct searchinfo searchinfo;
+ 	struct syment *sp;
+ 	struct node_table *nt;
+@@ -13896,7 +13896,7 @@ cmd_search(void)
+ 
+ 	context = max = 0;
+ 	start = end = 0;
+-	value = mask = sflag = pflag = Kflag = Vflag = memtype = len = tflag = 0;
++	value = mask = sflag = pflag = Kflag = Vflag = memtype = len = Tflag = tflag = 0;
+ 	kvaddr_start = kvaddr_end = 0;
+ 	uvaddr_start = UNINITIALIZED;
+ 	uvaddr_end = COMMON_VADDR_SPACE() ? (ulong)(-1) : machdep->kvbase;
+@@ -13933,7 +13933,7 @@ cmd_search(void)
+ 
+ 	searchinfo.mode = SEARCH_ULONG;	/* default search */
+ 
+-        while ((c = getopt(argcnt, args, "tl:ukKVps:e:v:m:hwcx:")) != EOF) {
++        while ((c = getopt(argcnt, args, "Ttl:ukKVps:e:v:m:hwcx:")) != EOF) {
+                 switch(c)
+                 {
+ 		case 'u':
+@@ -14038,12 +14038,19 @@ cmd_search(void)
+ 			context = dtoi(optarg, FAULT_ON_ERROR, NULL);
+ 			break;
+ 
++		case 'T':
+ 		case 't':
+ 			if (XEN_HYPER_MODE())
+ 				error(FATAL, 
+- 			 	    "-t option is not applicable to the "
+-				    "Xen hypervisor\n");
+-			tflag++;
++ 			 	    "-%c option is not applicable to the "
++				    "Xen hypervisor\n", c);
++			if (c == 'T')
++				Tflag++;
++			else if (c == 't')
++				tflag++;
++			if (tflag && Tflag)
++				error(FATAL, 
++				    "-t and -T options are mutually exclusive\n");
+ 			break;
+ 
+                 default:
+@@ -14052,10 +14059,11 @@ cmd_search(void)
+                 }
+         }
+ 
+-	if (tflag && (memtype || start || end || len)) 
++	if ((tflag || Tflag) && (memtype || start || end || len)) 
+ 		error(FATAL, 
+-		    "-t option cannot be used with other "
+-		    "memory-selection options\n");
++		    "-%c option cannot be used with other "
++		    "memory-selection options\n",
++		    tflag ? 't' : 'T');
+ 
+ 	if (XEN_HYPER_MODE()) {
+ 		memtype = KVADDR;
+@@ -14328,10 +14336,12 @@ cmd_search(void)
+ 			break;
+ 		}
+ 
+-		if (tflag) {
++		if (tflag || Tflag) {
+ 			searchinfo.tasks_found = 0;
+ 			tc = FIRST_CONTEXT();
+ 			for (i = 0; i < RUNNING_TASKS(); i++, tc++) {
++				if (Tflag && !is_task_active(tc->task))
++					continue;
+ 				searchinfo.vaddr_start = GET_STACKBASE(tc->task); 
+ 				searchinfo.vaddr_end = GET_STACKTOP(tc->task);
+ 				searchinfo.task_context = tc;
+
+commit 090bf28907782549ba980c588979372061764aa7
+Author: Dave Anderson <anderson@redhat.com>
+Date:   Fri Oct 20 14:23:36 2017 -0400
+
+    Removal of the ARM64 "bt -o" option for Linux 4.14 and later kernels,
+    along with several cleanups/readability improvements.
+    (takahiro.akashi@linaro.org)
+
+diff --git a/arm64.c b/arm64.c
+index c75669b..7904f65 100644
+--- a/arm64.c
++++ b/arm64.c
+@@ -612,6 +612,7 @@ arm64_dump_machdep_table(ulong arg)
+ 	fprintf(fp, "        exp_entry2_end: %lx\n", ms->exp_entry2_end);
+ 	fprintf(fp, "       panic_task_regs: %lx\n", (ulong)ms->panic_task_regs);
+ 	fprintf(fp, "    user_eframe_offset: %ld\n", ms->user_eframe_offset);
++	fprintf(fp, "    kern_eframe_offset: %ld\n", ms->kern_eframe_offset);
+ 	fprintf(fp, "         PTE_PROT_NONE: %lx\n", ms->PTE_PROT_NONE);
+ 	fprintf(fp, "              PTE_FILE: ");
+ 	if (ms->PTE_FILE)
+@@ -1383,7 +1384,7 @@ arm64_irq_stack_init(void)
+ 
+ 		if (!(ms->irq_stacks = (ulong *)malloc((size_t)(kt->cpus * sizeof(ulong)))))
+ 			error(FATAL, "cannot malloc irq_stack addresses\n");
+-		ms->irq_stack_size = 16384;
++		ms->irq_stack_size = ARM64_IRQ_STACK_SIZE;
+ 		machdep->flags |= IRQ_STACKS;
+ 
+ 		for (i = 0; i < kt->cpus; i++) {
+@@ -1410,10 +1411,13 @@ arm64_stackframe_init(void)
+ 	MEMBER_OFFSET_INIT(elf_prstatus_pr_pid, "elf_prstatus", "pr_pid");
+ 	MEMBER_OFFSET_INIT(elf_prstatus_pr_reg, "elf_prstatus", "pr_reg");
+ 
+-	if (MEMBER_EXISTS("pt_regs", "stackframe")) 
++	if (MEMBER_EXISTS("pt_regs", "stackframe")) {
+ 		machdep->machspec->user_eframe_offset = SIZE(pt_regs);
+-	else
++		machdep->machspec->kern_eframe_offset = SIZE(pt_regs) - 16;
++	} else {
+ 		machdep->machspec->user_eframe_offset = SIZE(pt_regs) + 16;
++		machdep->machspec->kern_eframe_offset = SIZE(pt_regs);
++	}
+ 
+ 	machdep->machspec->__exception_text_start = 
+ 		symbol_value("__exception_text_start");
+@@ -1503,6 +1507,7 @@ arm64_stackframe_init(void)
+ #define USER_MODE   (2)
+ 
+ #define USER_EFRAME_OFFSET (machdep->machspec->user_eframe_offset)
++#define KERN_EFRAME_OFFSET (machdep->machspec->kern_eframe_offset)
+ 
+ /*
+  * PSR bits
+@@ -1793,7 +1798,7 @@ arm64_display_full_frame(struct bt_info *bt, ulong sp)
+ 				sp = bt->stacktop;
+ 		}
+ 	} else { 
+-		/* IRQ exception frame */
++		/* This is a transition case from irq to process stack. */
+ 		return;
+ 	}
+ 
+@@ -1903,61 +1908,73 @@ arm64_unwind_frame(struct bt_info *bt, struct arm64_stackframe *frame)
+ 	if (!(machdep->flags & IRQ_STACKS))
+ 		return TRUE;
+ 
+-	/*
+-	 * The kernel's manner of determining the end of the IRQ stack:
+-	 *
+-	 *  #define THREAD_SIZE        16384
+-	 *  #define THREAD_START_SP    (THREAD_SIZE - 16)
+-	 *  #define IRQ_STACK_START_SP THREAD_START_SP
+-	 *  #define IRQ_STACK_PTR(cpu) ((unsigned long)per_cpu(irq_stack, cpu) + IRQ_STACK_START_SP)
+-	 *  #define IRQ_STACK_TO_TASK_STACK(ptr) (*((unsigned long *)((ptr) - 0x08)))
+-	 *
+-	 *  irq_stack_ptr = IRQ_STACK_PTR(raw_smp_processor_id());
+-	 *  orig_sp = IRQ_STACK_TO_TASK_STACK(irq_stack_ptr);   (pt_regs pointer on process stack)
+-	 */
++	if (!(machdep->flags & IRQ_STACKS))
++		return TRUE;
++
+ 	if (machdep->flags & UNW_4_14) {
+ 		if ((bt->flags & BT_IRQSTACK) &&
+ 		    !arm64_on_irq_stack(bt->tc->processor, frame->fp)) {
+ 			if (arm64_on_process_stack(bt, frame->fp)) {
+ 				arm64_set_process_stack(bt);
+ 
+-				frame->sp = frame->fp - SIZE(pt_regs) + 16;
+-				/* for switch_stack */
+-				/* fp still points to irq stack */
++				frame->sp = frame->fp - KERN_EFRAME_OFFSET;
++				/*
++				 * for switch_stack
++				 * fp still points to irq stack
++				 */
+ 				bt->bptr = fp;
+-				/* for display_full_frame */
+-				/* sp points to process stack */
+-				bt->frameptr = frame->sp;
++				/*
++				 * for display_full_frame
++				 * sp points to process stack
++				 *
++				 * If we want to see pt_regs,
++				 * comment out the below.
++				 * bt->frameptr = frame->sp;
++				 */
+ 			} else {
+ 				/* irq -> user */
+ 				return FALSE;
+ 			}
+ 		}
+-	} else { /* !UNW_4_14 */
+-		ms = machdep->machspec;
+-		irq_stack_ptr = ms->irq_stacks[bt->tc->processor] + ms->irq_stack_size - 16;
+-
+-		if (frame->sp == irq_stack_ptr) {
+-			orig_sp = GET_STACK_ULONG(irq_stack_ptr - 8);
+-			arm64_set_process_stack(bt);
+-			if (INSTACK(orig_sp, bt) && (INSTACK(frame->fp, bt) || (frame->fp == 0))) {
+-				ptregs = (struct arm64_pt_regs *)&bt->stackbuf[(ulong)(STACK_OFFSET_TYPE(orig_sp))];
+-				frame->sp = orig_sp;
+-				frame->pc = ptregs->pc;
+-				bt->bptr = fp;
+-				if (CRASHDEBUG(1))
+-					error(INFO, 
+-					    "arm64_unwind_frame: switch stacks: fp: %lx sp: %lx  pc: %lx\n",
+-						frame->fp, frame->sp, frame->pc);
+-			} else {
+-				error(WARNING, 
+-				    "arm64_unwind_frame: on IRQ stack: oriq_sp: %lx%s fp: %lx%s\n",
+-					orig_sp, INSTACK(orig_sp, bt) ? "" : " (?)",
+-					frame->fp, INSTACK(frame->fp, bt) ? "" : " (?)");
+-				return FALSE;
+-			}
++
++		return TRUE;
++	}
++
++	/*
++	 * The kernel's manner of determining the end of the IRQ stack:
++	 *
++	 *  #define THREAD_SIZE        16384
++	 *  #define THREAD_START_SP    (THREAD_SIZE - 16)
++	 *  #define IRQ_STACK_START_SP THREAD_START_SP
++	 *  #define IRQ_STACK_PTR(cpu) ((unsigned long)per_cpu(irq_stack, cpu) + IRQ_STACK_START_SP)
++	 *  #define IRQ_STACK_TO_TASK_STACK(ptr) (*((unsigned long *)((ptr) - 0x08)))
++	 *
++	 *  irq_stack_ptr = IRQ_STACK_PTR(raw_smp_processor_id());
++	 *  orig_sp = IRQ_STACK_TO_TASK_STACK(irq_stack_ptr);   (pt_regs pointer on process stack)
++	 */
++	ms = machdep->machspec;
++	irq_stack_ptr = ms->irq_stacks[bt->tc->processor] + ms->irq_stack_size - 16;
++
++	if (frame->sp == irq_stack_ptr) {
++		orig_sp = GET_STACK_ULONG(irq_stack_ptr - 8);
++		arm64_set_process_stack(bt);
++		if (INSTACK(orig_sp, bt) && (INSTACK(frame->fp, bt) || (frame->fp == 0))) {
++			ptregs = (struct arm64_pt_regs *)&bt->stackbuf[(ulong)(STACK_OFFSET_TYPE(orig_sp))];
++			frame->sp = orig_sp;
++			frame->pc = ptregs->pc;
++			bt->bptr = fp;
++			if (CRASHDEBUG(1))
++				error(INFO,
++				    "arm64_unwind_frame: switch stacks: fp: %lx sp: %lx  pc: %lx\n",
++					frame->fp, frame->sp, frame->pc);
++		} else {
++			error(WARNING,
++			    "arm64_unwind_frame: on IRQ stack: oriq_sp: %lx%s fp: %lx%s\n",
++				orig_sp, INSTACK(orig_sp, bt) ? "" : " (?)",
++				frame->fp, INSTACK(frame->fp, bt) ? "" : " (?)");
++			return FALSE;
+ 		}
+-	} /* UNW_4_14 */
++	}
+ 
+ 	return TRUE;
+ }
+@@ -2147,17 +2164,10 @@ arm64_unwind_frame_v2(struct bt_info *bt, struct arm64_stackframe *frame,
+ 			 * We are on process stack. Just add a faked frame
+ 			 */
+ 
+-			if (!arm64_on_irq_stack(bt->tc->processor, ext_frame.fp)) {
+-				if (MEMBER_EXISTS("pt_regs", "stackframe")) {
+-					frame->sp = ext_frame.fp
+-						    - sizeof(struct arm64_pt_regs) - 16;
+-					frame->fp = ext_frame.fp;
+-				} else {
+-					frame->sp = ext_frame.fp
+-						    - sizeof(struct arm64_pt_regs);
+-					frame->fp = frame->sp;
+-				}
+-			} else {
++			if (!arm64_on_irq_stack(bt->tc->processor, ext_frame.fp))
++				frame->sp = ext_frame.fp
++					    - sizeof(struct arm64_pt_regs);
++			else {
+ 				/*
+ 				 * FIXME: very exceptional case
+ 				 * We are already back on process stack, but
+@@ -2177,10 +2187,10 @@ arm64_unwind_frame_v2(struct bt_info *bt, struct arm64_stackframe *frame,
+ 				 * Really ugly
+ 				 */
+ 				frame->sp = frame->fp + 0x20;
+-				frame->fp = frame->sp;
+ 				fprintf(ofp, " (Next exception frame might be wrong)\n");
+ 			}
+ 
++			frame->fp = frame->sp;
+ 		} else {
+ 			/* We are on IRQ stack */
+ 
+@@ -2190,15 +2200,9 @@ arm64_unwind_frame_v2(struct bt_info *bt, struct arm64_stackframe *frame,
+ 			if (ext_frame.fp != irq_stack_ptr) {
+ 				/* (2) Just add a faked frame */
+ 
+-				if (MEMBER_EXISTS("pt_regs", "stackframe")) {
+-					frame->sp = ext_frame.fp
+-						    - sizeof(struct arm64_pt_regs);
+-					frame->fp = ext_frame.fp;
+-				} else {
+-					frame->sp = ext_frame.fp
+-						    - sizeof(struct arm64_pt_regs) - 16;
+-					frame->fp = frame->sp;
+-				}
++				frame->sp = ext_frame.fp
++					    - sizeof(struct arm64_pt_regs);
++				frame->fp = frame->sp;
+ 			} else {
+ 				/*
+ 				 * (3)
+@@ -2285,6 +2289,11 @@ arm64_back_trace_cmd(struct bt_info *bt)
+ 	FILE *ofp;
+ 
+ 	if (bt->flags & BT_OPT_BACK_TRACE) {
++		if (machdep->flags & UNW_4_14) {
++			option_not_supported('o');
++			return;
++		}
++
+ 		arm64_back_trace_cmd_v2(bt);
+ 		return;
+ 	}
+@@ -2346,7 +2355,7 @@ arm64_back_trace_cmd(struct bt_info *bt)
+ 			goto complete_user;
+ 
+ 		if (DUMPFILE() && is_task_active(bt->task)) {
+-			exception_frame = stackframe.fp - SIZE(pt_regs);
++			exception_frame = stackframe.fp - KERN_EFRAME_OFFSET;
+ 			if (arm64_is_kernel_exception_frame(bt, exception_frame))
+ 				arm64_print_exception_frame(bt, exception_frame, 
+ 					KERNEL_MODE, ofp);
+@@ -2377,13 +2386,8 @@ arm64_back_trace_cmd(struct bt_info *bt)
+ 
+ 		if (arm64_in_exception_text(bt->instptr) && INSTACK(stackframe.fp, bt)) {
+ 			if (!(bt->flags & BT_IRQSTACK) ||
+-			    (((stackframe.sp + SIZE(pt_regs)) < bt->stacktop))) {
+-				if (MEMBER_EXISTS("pt_regs", "stackframe"))
+-					/* v4.14 or later */
+-					exception_frame = stackframe.fp - SIZE(pt_regs) + 16;
+-				else
+-					exception_frame = stackframe.fp - SIZE(pt_regs);
+-			}
++			    (((stackframe.sp + SIZE(pt_regs)) < bt->stacktop)))
++				exception_frame = stackframe.fp - KERN_EFRAME_OFFSET;
+ 		}
+ 
+ 		if ((bt->flags & BT_IRQSTACK) &&
+@@ -2503,8 +2507,6 @@ user_space:
+ 		 * otherwise show an exception frame.
+ 		 * Since exception entry code doesn't have a real
+ 		 * stackframe, we fake a dummy frame here.
+-		 * Note: Since we have a real stack frame in pt_regs,
+-		 * We no longer need a dummy frame on v4.14 or later.
+ 		 */
+ 		if (!arm64_in_exp_entry(stackframe.pc))
+ 			continue;
+diff --git a/defs.h b/defs.h
+index 7768895..a694a66 100644
+--- a/defs.h
++++ b/defs.h
+@@ -3038,6 +3038,7 @@ typedef signed int s32;
+ #define ARM64_VMEMMAP_END    (ARM64_VMEMMAP_VADDR + GIGABYTES(8UL) - 1)
+ 
+ #define ARM64_STACK_SIZE   (16384)
++#define ARM64_IRQ_STACK_SIZE   ARM64_STACK_SIZE
+ 
+ #define _SECTION_SIZE_BITS      30
+ #define _MAX_PHYSMEM_BITS       40
+@@ -3117,6 +3118,8 @@ struct machine_specific {
+ 	ulong kimage_text;
+ 	ulong kimage_end;
+ 	ulong user_eframe_offset;
++	/* for v4.14 or later */
++	ulong kern_eframe_offset;
+ };
+ 
+ struct arm64_stackframe {
+diff --git a/help.c b/help.c
+index a9aab37..f9c5792 100644
+--- a/help.c
++++ b/help.c
+@@ -1799,7 +1799,8 @@ char *help_bt[] = {
+ "           It does so by verifying the thread_info.task pointer, ensuring that",
+ "           the thread_info.cpu is a valid cpu number, and checking the end of ",
+ "           the stack for the STACK_END_MAGIC value.",
+-"       -o  arm64: use optional backtrace method.",
++"       -o  arm64: use optional backtrace method; not supported on Linux 4.14 or",
++"           later kernels.",
+ "           x86: use old backtrace method, permissible only on kernels that were",
+ "           compiled without the -fomit-frame_pointer.",
+ "           x86_64: use old backtrace method, which dumps potentially stale",
+diff --git a/task.c b/task.c
+index 2b12af0..362822c 100644
+--- a/task.c
++++ b/task.c
+@@ -6750,6 +6750,8 @@ panic_search(void)
+ 	fd->keyword_array[0] = FOREACH_BT; 
+ 	if (machine_type("S390X"))
+ 		fd->flags |= FOREACH_o_FLAG;
++	else if (machine_type("ARM64"))
++		fd->flags |= FOREACH_t_FLAG;
+ 	else
+ 		fd->flags |= (FOREACH_t_FLAG|FOREACH_o_FLAG);
+ 
+
+commit 45b74b89530d611b3fa95a1041e158fbb865fa84
+Author: Dave Anderson <anderson@redhat.com>
+Date:   Mon Oct 23 11:15:39 2017 -0400
+
+    Fix for support of KASLR enabled kernels captured by the SADUMP
+    dumpfile facility. SADUMP dumpfile headers do not contain phys_base
+    or VMCOREINFO notes, so without this patch, the crash session fails
+    during initialization with the message "crash: seek error: kernel
+    virtual address: <address>  type: "page_offset_base".  This patch
+    calculates the phys_base value and the KASLR offset using the IDTR
+    and CR3 registers from the dumpfile header.
+    (indou.takao@jp.fujitsu.com)
+
+diff --git a/defs.h b/defs.h
+index a694a66..76e5512 100644
+--- a/defs.h
++++ b/defs.h
+@@ -2591,6 +2591,9 @@ struct symbol_table_data {
+ 	ulong last_section_end;
+ 	ulong _stext_vmlinux;
+ 	struct downsized downsized;
++	ulong divide_error_vmlinux;
++	ulong idt_table_vmlinux;
++	ulong saved_command_line_vmlinux;
+ };
+ 
+ /* flags for st */
+@@ -6312,6 +6315,7 @@ void sadump_set_zero_excluded(void);
+ void sadump_unset_zero_excluded(void);
+ struct sadump_data;
+ struct sadump_data *get_sadump_data(void);
++int sadump_calc_kaslr_offset(ulong *);
+ 
+ /*
+  * qemu.c
+diff --git a/sadump.c b/sadump.c
+index a96ba9c..2ccfa82 100644
+--- a/sadump.c
++++ b/sadump.c
+@@ -1558,12 +1558,17 @@ sadump_display_regs(int cpu, FILE *ofp)
+  */
+ int sadump_phys_base(ulong *phys_base)
+ {
+-	if (SADUMP_VALID()) {
++	if (SADUMP_VALID() && !sd->phys_base) {
+ 		if (CRASHDEBUG(1))
+ 			error(NOTE, "sadump: does not save phys_base.\n");
+ 		return FALSE;
+ 	}
+ 
++	if (sd->phys_base) {
++		*phys_base = sd->phys_base;
++		return TRUE;
++	}
++
+ 	return FALSE;
+ }
+ 
+@@ -1649,3 +1654,461 @@ get_sadump_data(void)
+ {
+ 	return sd;
+ }
++
++#ifdef X86_64
++static int
++get_sadump_smram_cpu_state_any(struct sadump_smram_cpu_state *smram)
++{
++	ulong offset;
++	struct sadump_header *sh = sd->dump_header;
++	int apicid;
++	struct sadump_smram_cpu_state scs, zero;
++
++	offset = sd->sub_hdr_offset + sizeof(uint32_t) +
++		 sd->dump_header->nr_cpus * sizeof(struct sadump_apic_state);
++
++	memset(&zero, 0, sizeof(zero));
++
++	for (apicid = 0; apicid < sh->nr_cpus; ++apicid) {
++		if (!read_device(&scs, sizeof(scs), &offset)) {
++			error(INFO, "sadump: cannot read sub header "
++			      "cpu_state\n");
++			return FALSE;
++		}
++		if (memcmp(&scs, &zero, sizeof(scs)) != 0) {
++			*smram = scs;
++			return TRUE;
++		}
++	}
++
++	return FALSE;
++}
++
++/*
++ * Get address of vector0 interrupt handler (Devide Error) from Interrupt
++ * Descriptor Table.
++ */
++static ulong
++get_vec0_addr(ulong idtr)
++{
++	struct gate_struct64 {
++		uint16_t offset_low;
++		uint16_t segment;
++		uint32_t ist : 3, zero0 : 5, type : 5, dpl : 2, p : 1;
++		uint16_t offset_middle;
++		uint32_t offset_high;
++		uint32_t zero1;
++	} __attribute__((packed)) gate;
++
++	readmem(idtr, PHYSADDR, &gate, sizeof(gate), "idt_table", FAULT_ON_ERROR);
++
++	return ((ulong)gate.offset_high << 32)
++		+ ((ulong)gate.offset_middle << 16)
++		+ gate.offset_low;
++}
++
++/*
++ * Parse a string of [size[KMG] ]offset[KMG]
++ * Import from Linux kernel(lib/cmdline.c)
++ */
++static ulong memparse(char *ptr, char **retptr)
++{
++	char *endptr;
++
++	unsigned long long ret = strtoull(ptr, &endptr, 0);
++
++	switch (*endptr) {
++	case 'E':
++	case 'e':
++		ret <<= 10;
++	case 'P':
++	case 'p':
++		ret <<= 10;
++	case 'T':
++	case 't':
++		ret <<= 10;
++	case 'G':
++	case 'g':
++		ret <<= 10;
++	case 'M':
++	case 'm':
++		ret <<= 10;
++	case 'K':
++	case 'k':
++		ret <<= 10;
++		endptr++;
++	default:
++		break;
++	}
++
++	if (retptr)
++		*retptr = endptr;
++
++	return ret;
++}
++
++/*
++ * Find "elfcorehdr=" in the boot parameter of kernel and return the address
++ * of elfcorehdr.
++ */
++static ulong
++get_elfcorehdr(ulong cr3, ulong kaslr_offset)
++{
++	char cmdline[BUFSIZE], *ptr;
++	ulong cmdline_vaddr;
++	ulong cmdline_paddr;
++	ulong buf_vaddr, buf_paddr;
++	char *end;
++	ulong elfcorehdr_addr = 0, elfcorehdr_size = 0;
++	int verbose = CRASHDEBUG(1)? 1: 0;
++
++	cmdline_vaddr = st->saved_command_line_vmlinux + kaslr_offset;
++	if (!kvtop(NULL, cmdline_vaddr, &cmdline_paddr, verbose))
++		return 0;
++
++	if (CRASHDEBUG(1)) {
++		fprintf(fp, "cmdline vaddr=%lx\n", cmdline_vaddr);
++		fprintf(fp, "cmdline paddr=%lx\n", cmdline_paddr);
++	}
++
++	if (!readmem(cmdline_paddr, PHYSADDR, &buf_vaddr, sizeof(ulong),
++		     "saved_command_line", RETURN_ON_ERROR))
++		return 0;
++
++	if (!kvtop(NULL, buf_vaddr, &buf_paddr, verbose))
++		return 0;
++
++	if (CRASHDEBUG(1)) {
++		fprintf(fp, "cmdline buffer vaddr=%lx\n", buf_vaddr);
++		fprintf(fp, "cmdline buffer paddr=%lx\n", buf_paddr);
++	}
++
++	memset(cmdline, 0, BUFSIZE);
++	if (!readmem(buf_paddr, PHYSADDR, cmdline, BUFSIZE,
++		     "saved_command_line", RETURN_ON_ERROR))
++		return 0;
++
++	ptr = strstr(cmdline, "elfcorehdr=");
++	if (!ptr)
++		return 0;
++
++	if (CRASHDEBUG(1))
++		fprintf(fp, "2nd kernel detected\n");
++
++	ptr += strlen("elfcorehdr=");
++	elfcorehdr_addr = memparse(ptr, &end);
++	if (*end == '@') {
++		elfcorehdr_size = elfcorehdr_addr;
++		elfcorehdr_addr = memparse(end + 1, &end);
++	}
++
++	if (CRASHDEBUG(1)) {
++		fprintf(fp, "elfcorehdr_addr=%lx\n", elfcorehdr_addr);
++		fprintf(fp, "elfcorehdr_size=%lx\n", elfcorehdr_size);
++	}
++
++	return elfcorehdr_addr;
++}
++
++ /*
++  * Get vmcoreinfo from elfcorehdr.
++  * Some codes are imported from Linux kernel(fs/proc/vmcore.c)
++  */
++static int
++get_vmcoreinfo(ulong elfcorehdr, ulong *addr, int *len)
++{
++	unsigned char e_ident[EI_NIDENT];
++	Elf64_Ehdr ehdr;
++	Elf64_Phdr phdr;
++	Elf64_Nhdr nhdr;
++	ulong ptr;
++	ulong nhdr_offset = 0;
++	int i;
++
++	if (!readmem(elfcorehdr, PHYSADDR, e_ident, EI_NIDENT,
++		     "EI_NIDENT", RETURN_ON_ERROR))
++		return FALSE;
++
++	if (e_ident[EI_CLASS] != ELFCLASS64) {
++		error(INFO, "Only ELFCLASS64 is supportd\n");
++		return FALSE;
++	}
++
++	if (!readmem(elfcorehdr, PHYSADDR, &ehdr, sizeof(ehdr),
++			"Elf64_Ehdr", RETURN_ON_ERROR))
++		return FALSE;
++
++	/* Sanity Check */
++	if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 ||
++		(ehdr.e_type != ET_CORE) ||
++		ehdr.e_ident[EI_CLASS] != ELFCLASS64 ||
++		ehdr.e_ident[EI_VERSION] != EV_CURRENT ||
++		ehdr.e_version != EV_CURRENT ||
++		ehdr.e_ehsize != sizeof(Elf64_Ehdr) ||
++		ehdr.e_phentsize != sizeof(Elf64_Phdr) ||
++		ehdr.e_phnum == 0) {
++		error(INFO, "Invalid elf header\n");
++		return FALSE;
++	}
++
++	ptr = elfcorehdr + ehdr.e_phoff;
++	for (i = 0; i < ehdr.e_phnum; i++) {
++		ulong offset;
++		char name[16];
++
++		if (!readmem(ptr, PHYSADDR, &phdr, sizeof(phdr),
++				"Elf64_Phdr", RETURN_ON_ERROR))
++			return FALSE;
++
++		ptr += sizeof(phdr);
++		if (phdr.p_type != PT_NOTE)
++			continue;
++
++		offset = phdr.p_offset;
++		if (!readmem(offset, PHYSADDR, &nhdr, sizeof(nhdr),
++				"Elf64_Nhdr", RETURN_ON_ERROR))
++			return FALSE;
++
++		offset += DIV_ROUND_UP(sizeof(Elf64_Nhdr), sizeof(Elf64_Word))*
++			  sizeof(Elf64_Word);
++		memset(name, 0, sizeof(name));
++		if (!readmem(offset, PHYSADDR, name, sizeof(name),
++				"Elf64_Nhdr name", RETURN_ON_ERROR))
++			return FALSE;
++
++		if(!strcmp(name, "VMCOREINFO")) {
++			nhdr_offset = offset;
++			break;
++		}
++	}
++
++	if (!nhdr_offset)
++		return FALSE;
++
++	*addr = nhdr_offset +
++		DIV_ROUND_UP(nhdr.n_namesz, sizeof(Elf64_Word))*
++		sizeof(Elf64_Word);
++	*len = nhdr.n_descsz;
++
++	if (CRASHDEBUG(1)) {
++		fprintf(fp, "vmcoreinfo addr=%lx\n", *addr);
++		fprintf(fp, "vmcoreinfo len=%d\n", *len);
++	}
++
++	return TRUE;
++}
++
++/*
++ * Check if current kaslr_offset/phys_base is for 1st kernel or 2nd kernel.
++ * If we are in 2nd kernel, get kaslr_offset/phys_base from vmcoreinfo.
++ *
++ * 1. Get command line and try to retrieve "elfcorehdr=" boot parameter
++ * 2. If "elfcorehdr=" is not found in command line, we are in 1st kernel.
++ *    There is nothing to do.
++ * 3. If "elfcorehdr=" is found, we are in 2nd kernel. Find vmcoreinfo
++ *    using "elfcorehdr=" and retrieve kaslr_offset/phys_base from vmcoreinfo.
++ */
++static int
++get_kaslr_offset_from_vmcoreinfo(ulong cr3, ulong orig_kaslr_offset,
++		                 ulong *kaslr_offset, ulong *phys_base)
++{
++	ulong elfcorehdr_addr = 0;
++	ulong vmcoreinfo_addr;
++	int vmcoreinfo_len;
++	char *buf, *pos;
++	int ret = FALSE;
++
++	/* Find "elfcorehdr=" in the kernel boot parameter */
++	elfcorehdr_addr = get_elfcorehdr(cr3, orig_kaslr_offset);
++	if (!elfcorehdr_addr)
++		return FALSE;
++
++	/* Get vmcoreinfo from the address of "elfcorehdr=" */
++	if (!get_vmcoreinfo(elfcorehdr_addr, &vmcoreinfo_addr, &vmcoreinfo_len))
++		return FALSE;
++
++	if (!vmcoreinfo_len)
++		return FALSE;
++
++	if (CRASHDEBUG(1))
++		fprintf(fp, "Find vmcoreinfo in kdump memory\n");
++
++	buf = GETBUF(vmcoreinfo_len);
++	if (!readmem(vmcoreinfo_addr, PHYSADDR, buf, vmcoreinfo_len,
++			"vmcoreinfo", RETURN_ON_ERROR))
++		goto quit;
++
++	/* Get phys_base form vmcoreinfo */
++	pos = strstr(buf, "NUMBER(phys_base)=");
++	if (!pos)
++		goto quit;
++	*phys_base  = strtoull(pos + strlen("NUMBER(phys_base)="), NULL, 0);
++
++	/* Get kaslr_offset form vmcoreinfo */
++	pos = strstr(buf, "KERNELOFFSET=");
++	if (!pos)
++		goto quit;
++	*kaslr_offset = strtoull(pos + strlen("KERNELOFFSET="), NULL, 16);
++
++	ret = TRUE;
++
++quit:
++	FREEBUF(buf);
++	return ret;
++}
++
++/*
++ * Calculate kaslr_offset and phys_base
++ *
++ * kaslr_offset:
++ *   The difference between original address in System.map or vmlinux and
++ *   actual address placed randomly by kaslr feature. To be more accurate,
++ *   kaslr_offset = actual address  - original address
++ *
++ * phys_base:
++ *   Physical address where the kerenel is placed. In other words, it's a
++ *   physical address of __START_KERNEL_map. This is also decided randomly by
++ *   kaslr.
++ *
++ * kaslr offset and phys_base are calculated as follows:
++ *
++ * kaslr_offset:
++ * 1) Get IDTR and CR3 value from the dump header.
++ * 2) Get a virtual address of IDT from IDTR value
++ *    --- (A)
++ * 3) Translate (A) to physical address using CR3, which points a top of
++ *    page table.
++ *    --- (B)
++ * 4) Get an address of vector0 (Devide Error) interrupt handler from
++ *    IDT, which are pointed by (B).
++ *    --- (C)
++ * 5) Get an address of symbol "divide_error" form vmlinux
++ *    --- (D)
++ *
++ * Now we have two addresses:
++ * (C)-> Actual address of "divide_error"
++ * (D)-> Original address of "divide_error" in the vmlinux
++ *
++ * kaslr_offset can be calculated by the difference between these two
++ * value.
++ *
++ * phys_base;
++ * 1) Get IDT virtual address from vmlinux
++ *    --- (E)
++ *
++ * So phys_base can be calculated using relationship of directly mapped
++ * address.
++ *
++ * phys_base =
++ *   Physical address(B) -
++ *   (Virtual address(E) + kaslr_offset - __START_KERNEL_map)
++ *
++ * Note that the address (A) cannot be used instead of (E) because (A) is
++ * not direct map address, it's a fixed map address.
++ *
++ * This solution works in most every case, but does not work in the
++ * following case.
++ *
++ * 1) If the dump is captured on early stage of kernel boot, IDTR points
++ *    early IDT table(early_idts) instead of normal IDT(idt_table).
++ * 2) If the dump is captured whle kdump is working, IDTR points
++ *    IDT table of 2nd kernel, not 1st kernel.
++ *
++ * Current implementation does not support the case 1), need
++ * enhancement in the future. For the case 2), get kaslr_offset and
++ * phys_base as follows.
++ *
++ * 1) Get kaslr_offset and phys_base using the above solution.
++ * 2) Get kernel boot parameter from "saved_command_line"
++ * 3) If "elfcorehdr=" is not included in boot parameter, we are in the
++ *    first kernel, nothing to do any more.
++ * 4) If "elfcorehdr=" is included in boot parameter, we are in the 2nd
++ *    kernel. Retrieve vmcoreinfo from address of "elfcorehdr=" and
++ *    get kaslr_offset and phys_base from vmcoreinfo.
++ */
++int
++sadump_calc_kaslr_offset(ulong *kaslr_offset)
++{
++	ulong phys_base = 0;
++	struct sadump_smram_cpu_state scs;
++	uint64_t idtr = 0, cr3 = 0, idtr_paddr;
++	ulong divide_error_vmcore;
++	ulong kaslr_offset_kdump, phys_base_kdump;
++	int ret = FALSE;
++	int verbose = CRASHDEBUG(1)? 1: 0;
++
++	if (!machine_type("X86_64"))
++		return FALSE;
++
++	memset(&scs, 0, sizeof(scs));
++	get_sadump_smram_cpu_state_any(&scs);
++	cr3 = scs.Cr3;
++	idtr = ((uint64_t)scs.IdtUpper)<<32 | (uint64_t)scs.IdtLower;
++
++	/*
++	 * Set up for kvtop.
++	 *
++	 * calc_kaslr_offset() is called before machdep_init(PRE_GDB), so some
++	 * variables are not initialized yet. Set up them here to call kvtop().
++	 *
++	 * TODO: XEN and 5-level is not supported
++	 */
++	vt->kernel_pgd[0] = cr3;
++	machdep->machspec->last_pml4_read = vt->kernel_pgd[0];
++	machdep->machspec->physical_mask_shift = __PHYSICAL_MASK_SHIFT_2_6;
++	machdep->machspec->pgdir_shift = PGDIR_SHIFT;
++	if (!readmem(cr3, PHYSADDR, machdep->machspec->pml4, PAGESIZE(),
++			"cr3", RETURN_ON_ERROR))
++		goto quit;
++
++	/* Convert virtual address of IDT table to physical address */
++	if (!kvtop(NULL, idtr, &idtr_paddr, verbose))
++		goto quit;
++
++	/* Now we can calculate kaslr_offset and phys_base */
++	divide_error_vmcore = get_vec0_addr(idtr_paddr);
++	*kaslr_offset = divide_error_vmcore - st->divide_error_vmlinux;
++	phys_base = idtr_paddr -
++		(st->idt_table_vmlinux + *kaslr_offset - __START_KERNEL_map);
++
++	if (CRASHDEBUG(1)) {
++		fprintf(fp, "calc_kaslr_offset: idtr=%lx\n", idtr);
++		fprintf(fp, "calc_kaslr_offset: cr3=%lx\n", cr3);
++		fprintf(fp, "calc_kaslr_offset: idtr(phys)=%lx\n", idtr_paddr);
++		fprintf(fp, "calc_kaslr_offset: divide_error(vmlinux): %lx\n",
++			st->divide_error_vmlinux);
++		fprintf(fp, "calc_kaslr_offset: divide_error(vmcore): %lx\n",
++			divide_error_vmcore);
++	}
++
++	/*
++	 * Check if current kaslr_offset/phys_base is for 1st kernel or 2nd
++	 * kernel. If we are in 2nd kernel, get kaslr_offset/phys_base
++	 * from vmcoreinfo
++	 */
++	if (get_kaslr_offset_from_vmcoreinfo(
++		cr3, *kaslr_offset, &kaslr_offset_kdump, &phys_base_kdump)) {
++		*kaslr_offset =  kaslr_offset_kdump;
++		phys_base =  phys_base_kdump;
++	}
++
++	if (CRASHDEBUG(1)) {
++		fprintf(fp, "calc_kaslr_offset: kaslr_offset=%lx\n",
++			*kaslr_offset);
++		fprintf(fp, "calc_kaslr_offset: phys_base=%lx\n", phys_base);
++	}
++
++	sd->phys_base = phys_base;
++	ret = TRUE;
++quit:
++	vt->kernel_pgd[0] = 0;
++	machdep->machspec->last_pml4_read = 0;
++	return ret;
++}
++#else
++int
++sadump_calc_kaslr_offset(ulong *kaslr_offset)
++{
++	return FALSE;
++}
++#endif /* X86_64 */
+diff --git a/sadump.h b/sadump.h
+index 7f8e384..681f5e4 100644
+--- a/sadump.h
++++ b/sadump.h
+@@ -219,6 +219,7 @@ struct sadump_data {
+ 	ulonglong backup_offset;
+ 
+ 	uint64_t max_mapnr;
++	ulong phys_base;
+ };
+ 
+ struct sadump_data *sadump_get_sadump_data(void);
+diff --git a/symbols.c b/symbols.c
+index 02cb34e..b2f2796 100644
+--- a/symbols.c
++++ b/symbols.c
+@@ -624,6 +624,9 @@ kaslr_init(void)
+ 			st->_stext_vmlinux = UNINITIALIZED;
+ 		}
+ 	}
++
++	if (SADUMP_DUMPFILE())
++		kt->flags2 |= KASLR_CHECK;
+ }
+ 
+ /*
+@@ -637,6 +640,19 @@ derive_kaslr_offset(bfd *abfd, int dynamic, bfd_byte *start, bfd_byte *end,
+ 	unsigned long relocate;
+ 	ulong _stext_relocated;
+ 
++	if (SADUMP_DUMPFILE()) {
++		ulong kaslr_offset = 0;
++
++		sadump_calc_kaslr_offset(&kaslr_offset);
++
++		if (kaslr_offset) {
++			kt->relocate = kaslr_offset * -1;
++			kt->flags |= RELOC_SET;
++		}
++
++		return;
++	}
++
+ 	if (ACTIVE()) {
+ 		_stext_relocated = symbol_value_from_proc_kallsyms("_stext");
+ 		if (_stext_relocated == BADVAL)
+@@ -3052,6 +3068,16 @@ dump_symbol_table(void)
+ 	else
+ 		fprintf(fp, "\n");
+ 
++	if (SADUMP_DUMPFILE()) {
++		fprintf(fp, "divide_error_vmlinux: %lx\n", st->divide_error_vmlinux);
++		fprintf(fp, "   idt_table_vmlinux: %lx\n", st->idt_table_vmlinux);
++		fprintf(fp, "saved_command_line_vmlinux: %lx\n", st->saved_command_line_vmlinux);
++	} else {
++		fprintf(fp, "divide_error_vmlinux: (unused)\n");
++		fprintf(fp, "   idt_table_vmlinux: (unused)\n");
++		fprintf(fp, "saved_command_line_vmlinux: (unused)\n");
++	}
++
+         fprintf(fp, "    symval_hash[%d]: %lx\n", SYMVAL_HASH,
+                 (ulong)&st->symval_hash[0]);
+ 
+@@ -12246,6 +12272,24 @@ numeric_forward(const void *P_x, const void *P_y)
+ 		}
+ 	}
+ 
++	if (SADUMP_DUMPFILE()) {
++		/* Need for kaslr_offset and phys_base */
++		if (STREQ(x->name, "divide_error"))
++			st->divide_error_vmlinux = valueof(x);
++		else if (STREQ(y->name, "divide_error"))
++			st->divide_error_vmlinux = valueof(y);
++
++		if (STREQ(x->name, "idt_table"))
++			st->idt_table_vmlinux = valueof(x);
++		else if (STREQ(y->name, "idt_table"))
++			st->idt_table_vmlinux = valueof(y);
++
++		if (STREQ(x->name, "saved_command_line"))
++			st->saved_command_line_vmlinux = valueof(x);
++		else if (STREQ(y->name, "saved_command_line"))
++			st->saved_command_line_vmlinux = valueof(y);
++	}
++
+   	xs = bfd_get_section(x);
+   	ys = bfd_get_section(y);
+ 
+diff --git a/x86_64.c b/x86_64.c
+index 6e60dda..2f9e6db 100644
+--- a/x86_64.c
++++ b/x86_64.c
+@@ -194,6 +194,9 @@ x86_64_init(int when)
+ 			machdep->machspec->kernel_image_size = dtol(string, QUIET, NULL);
+ 			free(string);
+ 		}
++		if (SADUMP_DUMPFILE())
++			/* Need for calculation of kaslr_offset and phys_base */
++			machdep->kvtop = x86_64_kvtop;
+ 		break;
+ 
+ 	case PRE_GDB:
+@@ -2019,6 +2022,22 @@ x86_64_kvtop(struct task_context *tc, ulong kvaddr, physaddr_t *paddr, int verbo
+ 	ulong pte;
+ 	physaddr_t physpage;
+ 
++	if (SADUMP_DUMPFILE() && !(machdep->flags & KSYMS_START)) {
++		/*
++		 * In the case of sadump, to calculate kaslr_offset and
++		 * phys_base, kvtop is called during symtab_init(). In this
++		 * stage phys_base is not initialized yet and x86_64_VTOP()
++		 * does not work. Jump to the code of pagetable translation.
++		 */
++		FILL_PML4();
++		pml4 = ((ulong *)machdep->machspec->pml4) + pml4_index(kvaddr);
++		if (verbose) {
++			fprintf(fp, "PML4 DIRECTORY: %lx\n", vt->kernel_pgd[0]);
++			fprintf(fp, "PAGE DIRECTORY: %lx\n", *pml4);
++		}
++		goto start_vtop_with_pagetable;
++	}
++
+         if (!IS_KVADDR(kvaddr))
+                 return FALSE;
+ 
+@@ -2065,6 +2084,8 @@ x86_64_kvtop(struct task_context *tc, ulong kvaddr, physaddr_t *paddr, int verbo
+                		fprintf(fp, "PAGE DIRECTORY: %lx\n", *pml4);
+ 		}
+ 	}
++
++start_vtop_with_pagetable:
+ 	if (!(*pml4) & _PAGE_PRESENT)
+ 		goto no_kpage;
+ 	pgd_paddr = (*pml4) & PHYSICAL_PAGE_MASK;
+
+commit 4550bf32a5ec1d9b7b6d5099aaee6e8e363a7827
+Author: Dave Anderson <anderson@redhat.com>
+Date:   Wed Oct 25 11:04:53 2017 -0400
+
+    Implemented a new "ps -y policy" option to filter the task display
+    by scheduling policy.  Applicable to both standalone ps invocation
+    as well as via foreach.
+    (oleksandr@redhat.com)
+
+diff --git a/defs.h b/defs.h
+index 76e5512..4b4e331 100644
+--- a/defs.h
++++ b/defs.h
+@@ -1139,6 +1139,7 @@ extern struct machdep_table *machdep;
+ #define FOREACH_a_FLAG   (0x4000000)
+ #define FOREACH_G_FLAG   (0x8000000)
+ #define FOREACH_F_FLAG2 (0x10000000)
++#define FOREACH_y_FLAG  (0x20000000)
+ 
+ #define FOREACH_PS_EXCLUSIVE \
+   (FOREACH_g_FLAG|FOREACH_a_FLAG|FOREACH_t_FLAG|FOREACH_c_FLAG|FOREACH_p_FLAG|FOREACH_l_FLAG|FOREACH_r_FLAG|FOREACH_m_FLAG)
+@@ -1162,6 +1163,7 @@ struct foreach_data {
+ 	int comms;
+ 	int args;
+ 	int regexs;
++	int policy;
+ };
+ 
+ struct reference {       
+@@ -1992,6 +1994,7 @@ struct offset_table {                    /* stash of commonly-used offsets */
+ 	long mod_arch_specific_num_orcs;
+ 	long mod_arch_specific_orc_unwind_ip;
+ 	long mod_arch_specific_orc_unwind;
++	long task_struct_policy;
+ };
+ 
+ struct size_table {         /* stash of commonly-used sizes */
+@@ -2141,6 +2144,7 @@ struct size_table {         /* stash of commonly-used sizes */
+ 	long sk_buff_head_qlen;
+ 	long sk_buff_len;
+ 	long orc_entry;
++	long task_struct_policy;
+ };
+ 
+ struct array_table {
+@@ -4576,6 +4580,13 @@ enum type_code {
+  */
+ #define PF_EXITING 0x00000004  /* getting shut down */
+ #define PF_KTHREAD 0x00200000  /* I am a kernel thread */
++#define SCHED_NORMAL	0
++#define SCHED_FIFO	1
++#define SCHED_RR	2
++#define SCHED_BATCH	3
++#define SCHED_ISO	4
++#define SCHED_IDLE	5
++#define SCHED_DEADLINE	6
+ 
+ extern long _ZOMBIE_;
+ #define IS_ZOMBIE(task)   (task_state(task) & _ZOMBIE_)
+@@ -4603,6 +4614,7 @@ extern long _ZOMBIE_;
+ #define PS_NO_HEADER  (0x10000)
+ #define PS_MSECS      (0x20000)
+ #define PS_SUMMARY    (0x40000)
++#define PS_POLICY     (0x80000)
+ 
+ #define PS_EXCLUSIVE (PS_TGID_LIST|PS_ARGV_ENVP|PS_TIMES|PS_CHILD_LIST|PS_PPID_LIST|PS_LAST_RUN|PS_RLIMIT|PS_MSECS|PS_SUMMARY)
+ 
+@@ -4620,6 +4632,7 @@ struct psinfo {
+ 	} regex_data[MAX_PS_ARGS];
+ 	int regexs;
+ 	ulong *cpus;
++	int policy;
+ };
+ 
+ #define IS_A_NUMBER(X)      (decimal(X, 0) || hexadecimal(X, 0))
+@@ -4823,7 +4836,7 @@ char *strip_ending_char(char *, char);
+ char *strip_beginning_char(char *, char);
+ char *strip_comma(char *);
+ char *strip_hex(char *);
+-char *upper_case(char *, char *);
++char *upper_case(const char *, char *);
+ char *first_nonspace(char *);
+ char *first_space(char *);
+ char *replace_string(char *, char *, char);
+diff --git a/help.c b/help.c
+index f9c5792..efa55e0 100644
+--- a/help.c
++++ b/help.c
+@@ -844,7 +844,7 @@ char *help_foreach[] = {
+ "             net  run the \"net\" command  (optional flags: -s -S -R -d -x)",
+ "             set  run the \"set\" command",
+ "              ps  run the \"ps\" command  (optional flags: -G -s -p -c -t -l -a",
+-"                  -g -r)",
++"                  -g -r -y)",
+ "             sig  run the \"sig\" command (optional flag: -g)",
+ "            vtop  run the \"vtop\" command  (optional flags: -c -u -k)\n",
+ "     flag  Pass this optional flag to the command selected.",
+@@ -1250,7 +1250,7 @@ NULL
+ char *help_ps[] = {
+ "ps",
+ "display process status information",
+-"[-k|-u|-G] [-s] [-p|-c|-t|-[l|m][-C cpu]|-a|-g|-r|-S]\n     [pid | task | command] ...",
++"[-k|-u|-G|-y policy] [-s] [-p|-c|-t|-[l|m][-C cpu]|-a|-g|-r|-S]\n     [pid | task | command] ...",
+ "  This command displays process status for selected, or all, processes" ,
+ "  in the system.  If no arguments are entered, the process data is",
+ "  is displayed for all processes.  Specific processes may be selected",
+@@ -1267,6 +1267,16 @@ char *help_ps[] = {
+ "        -k  restrict the output to only kernel threads.",
+ "        -u  restrict the output to only user tasks.",
+ "        -G  display only the thread group leader in a thread group.",
++" -y policy  restrict the output to tasks having a specified scheduling policy",
++"            expressed by its integer value or by its (case-insensitive) name;",
++"            multiple policies may be entered in a comma-separated list:",
++"              0 or NORMAL",
++"              1 or FIFO",
++"              2 or RR",
++"              3 or BATCH",
++"              4 or ISO",
++"              5 or IDLE",
++"              6 or DEADLINE",
+ " ",
+ "  The process identifier types may be mixed.  For each task, the following",
+ "  items are displayed:",
+diff --git a/symbols.c b/symbols.c
+index b2f2796..f7599e8 100644
+--- a/symbols.c
++++ b/symbols.c
+@@ -8584,6 +8584,8 @@ dump_offset_table(char *spec, ulong makestruct)
+                 OFFSET(task_struct_prio));
+         fprintf(fp, "             task_struct_on_rq: %ld\n",
+                 OFFSET(task_struct_on_rq));
++        fprintf(fp, "            task_struct_policy: %ld\n",
++                OFFSET(task_struct_policy));
+ 
+ 	fprintf(fp, "              thread_info_task: %ld\n",
+                 OFFSET(thread_info_task));
+@@ -10211,6 +10213,7 @@ dump_offset_table(char *spec, ulong makestruct)
+         fprintf(fp, "                       pt_regs: %ld\n", SIZE(pt_regs));
+         fprintf(fp, "                   task_struct: %ld\n", SIZE(task_struct));
+         fprintf(fp, "             task_struct_flags: %ld\n", SIZE(task_struct_flags));
++        fprintf(fp, "            task_struct_policy: %ld\n", SIZE(task_struct_policy));
+         fprintf(fp, "                   thread_info: %ld\n", SIZE(thread_info));
+         fprintf(fp, "                 softirq_state: %ld\n", 
+ 		SIZE(softirq_state));
+diff --git a/task.c b/task.c
+index 362822c..5754159 100644
+--- a/task.c
++++ b/task.c
+@@ -109,6 +109,24 @@ static void show_ps_summary(ulong);
+ static void irqstacks_init(void);
+ static void parse_task_thread(int argcnt, char *arglist[], struct task_context *);
+ static void stack_overflow_check_init(void);
++static int has_sched_policy(ulong, ulong);
++static ulong task_policy(ulong);
++static ulong sched_policy_bit_from_str(const char *);
++static ulong make_sched_policy(const char *);
++
++static struct sched_policy_info {
++	ulong value;
++	char *name;
++} sched_policy_info[] = {
++	{ SCHED_NORMAL,		"NORMAL" },
++	{ SCHED_FIFO,		"FIFO" },
++	{ SCHED_RR,		"RR" },
++	{ SCHED_BATCH,		"BATCH" },
++	{ SCHED_ISO,		"ISO" },
++	{ SCHED_IDLE,		"IDLE" },
++	{ SCHED_DEADLINE,	"DEADLINE" },
++	{ ULONG_MAX,		NULL }
++};
+ 
+ /*
+  *  Figure out how much space will be required to hold the task context
+@@ -273,6 +291,8 @@ task_init(void)
+ 	MEMBER_OFFSET_INIT(task_struct_next_run, "task_struct", "next_run");
+ 	MEMBER_OFFSET_INIT(task_struct_flags, "task_struct", "flags");
+ 	MEMBER_SIZE_INIT(task_struct_flags, "task_struct", "flags");
++	MEMBER_OFFSET_INIT(task_struct_policy, "task_struct", "policy");
++	MEMBER_SIZE_INIT(task_struct_policy, "task_struct", "policy");
+         MEMBER_OFFSET_INIT(task_struct_pidhash_next,
+                 "task_struct", "pidhash_next");
+ 	MEMBER_OFFSET_INIT(task_struct_pgrp, "task_struct", "pgrp");
+@@ -2974,7 +2994,7 @@ cmd_ps(void)
+ 	cpuspec = NULL;
+ 	flag = 0;
+ 
+-        while ((c = getopt(argcnt, args, "SgstcpkuGlmarC:")) != EOF) {
++        while ((c = getopt(argcnt, args, "SgstcpkuGlmarC:y:")) != EOF) {
+                 switch(c)
+ 		{
+ 		case 'k':
+@@ -3075,6 +3095,11 @@ cmd_ps(void)
+ 			make_cpumask(cpuspec, psinfo.cpus, FAULT_ON_ERROR, NULL);
+ 			break;
+ 
++		case 'y':
++			flag |= PS_POLICY;
++			psinfo.policy = make_sched_policy(optarg);
++			break;
++
+ 		default:
+ 			argerrs++;
+ 			break;
+@@ -3218,6 +3243,8 @@ show_ps_data(ulong flag, struct task_context *tc, struct psinfo *psi)
+ 		return;
+ 	if ((flag & PS_KERNEL) && !is_kernel_thread(tc->task))
+ 		return;
++	if ((flag & PS_POLICY) && !has_sched_policy(tc->task, psi->policy))
++		return;
+ 	if (flag & PS_GROUP) {
+ 		if (flag & (PS_LAST_RUN|PS_MSECS))
+ 			error(FATAL, "-G not supported with -%c option\n",
+@@ -3336,7 +3363,7 @@ show_ps(ulong flag, struct psinfo *psi)
+ 
+ 		tc = FIRST_CONTEXT();
+ 		for (i = 0; i < RUNNING_TASKS(); i++, tc++)
+-			show_ps_data(flag, tc, NULL);
++			show_ps_data(flag, tc, psi);
+ 		
+ 		return;
+ 	}
+@@ -3391,7 +3418,7 @@ show_ps(ulong flag, struct psinfo *psi)
+ 				if (flag & PS_TIMES) 
+ 					show_task_times(tc, flag);
+ 				else
+-					show_ps_data(flag, tc, NULL);
++					show_ps_data(flag, tc, psi);
+ 			}
+ 		}
+ 	}
+@@ -3546,7 +3573,7 @@ show_milliseconds(struct task_context *tc, struct psinfo *psi)
+ 	sprintf(format, "[%c%dll%c] ", '%', c, 
+ 		pc->output_radix == 10 ? 'u' : 'x');
+ 
+-	if (psi) {
++	if (psi && psi->cpus) {
+ 		for (c = others = 0; c < kt->cpus; c++) {
+ 			if (!NUM_IN_BITMAP(psi->cpus, c))
+ 				continue;
+@@ -5366,6 +5393,27 @@ task_flags(ulong task)
+ }
+ 
+ /*
++ * Return task's policy as bitmask bit.
++ */
++static ulong
++task_policy(ulong task)
++{
++	ulong policy = 0;
++
++	fill_task_struct(task);
++
++	if (!tt->last_task_read)
++		return policy;
++
++	if (SIZE(task_struct_policy) == sizeof(unsigned int))
++		policy = 1 << UINT(tt->task_struct + OFFSET(task_struct_policy));
++	else
++		policy = 1 << ULONG(tt->task_struct + OFFSET(task_struct_policy));
++
++	return policy;
++}
++
++/*
+  *  Return a task's tgid.
+  */
+ ulong
+@@ -5797,7 +5845,7 @@ cmd_foreach(void)
+ 	BZERO(&foreach_data, sizeof(struct foreach_data));
+ 	fd = &foreach_data;
+ 
+-        while ((c = getopt(argcnt, args, "R:vomlgersStTpukcfFxhdaG")) != EOF) {
++        while ((c = getopt(argcnt, args, "R:vomlgersStTpukcfFxhdaGy:")) != EOF) {
+                 switch(c)
+ 		{
+ 		case 'R':
+@@ -5892,6 +5940,11 @@ cmd_foreach(void)
+ 			fd->flags |= FOREACH_G_FLAG;
+ 			break;
+ 
++		case 'y':
++			fd->flags |= FOREACH_y_FLAG;
++			fd->policy = make_sched_policy(optarg);
++			break;
++
+ 		default:
+ 			argerrs++;
+ 			break;
+@@ -6554,6 +6607,10 @@ foreach(struct foreach_data *fd)
+ 					cmdflags |= PS_GROUP;
+ 				if (fd->flags & FOREACH_s_FLAG)
+ 					cmdflags |= PS_KSTACKP;
++				if (fd->flags & FOREACH_y_FLAG) {
++					cmdflags |= PS_POLICY;
++					psinfo.policy = fd->policy;
++				}
+ 				/*
+ 				 * mutually exclusive flags
+ 				 */ 
+@@ -7389,6 +7446,82 @@ is_kernel_thread(ulong task)
+ }
+ 
+ /*
++ * Checks if task policy corresponds to given mask.
++ */
++static int
++has_sched_policy(ulong task, ulong policy)
++{
++	return !!(task_policy(task) & policy);
++}
++
++/*
++ * Converts sched policy name into mask bit.
++ */
++static ulong
++sched_policy_bit_from_str(const char *policy_str)
++{
++	struct sched_policy_info *info = NULL;
++	ulong policy = 0;
++	int found = 0;
++	char *upper = NULL;
++	/*
++	 * Once kernel gets more than 10 scheduling policies,
++	 * sizes of these arrays should be adjusted
++	 */
++	char digit[2] = { 0, 0 };
++	char hex[4] = { 0, 0, 0, 0 };
++
++	upper = GETBUF(strlen(policy_str) + 1);
++	upper_case(policy_str, upper);
++
++	for (info = sched_policy_info; info->name; info++) {
++		snprintf(digit, sizeof digit, "%lu", info->value);
++		/*
++		 * Not using %#lX format here since "0X" prefix
++		 * is not prepended if 0 value is given
++		 */
++		snprintf(hex, sizeof hex, "0X%lX", info->value);
++		if (strncmp(upper, info->name, strlen(info->name)) == 0 ||
++			strncmp(upper, digit, sizeof digit) == 0 ||
++			strncmp(upper, hex, sizeof hex) == 0) {
++			policy = 1 << info->value;
++			found = 1;
++			break;
++		}
++	}
++
++	FREEBUF(upper);
++
++	if (!found)
++		error(FATAL,
++			"%s: invalid scheduling policy\n", policy_str);
++
++	return policy;
++}
++
++/*
++ * Converts sched policy string set into bitmask.
++ */
++static ulong
++make_sched_policy(const char *policy_str)
++{
++	ulong policy = 0;
++	char *iter = NULL;
++	char *orig = NULL;
++	char *cur = NULL;
++
++	iter = STRDUPBUF(policy_str);
++	orig = iter;
++
++	while ((cur = strsep(&iter, ",")))
++		policy |= sched_policy_bit_from_str(cur);
++
++	FREEBUF(orig);
++
++	return policy;
++}
++
++/*
+  *  Gather an arry of pointers to the per-cpu idle tasks.  The tasklist
+  *  argument must be at least the size of ulong[NR_CPUS].  There may be
+  *  junk in everything after the first entry on a single CPU box, so the
+diff --git a/tools.c b/tools.c
+index 886d7fb..186b703 100644
+--- a/tools.c
++++ b/tools.c
+@@ -423,9 +423,10 @@ strip_hex(char *line)
+  *  Turn a string into upper-case.
+  */
+ char *
+-upper_case(char *s, char *buf)
++upper_case(const char *s, char *buf)
+ {
+-	char *p1, *p2;
++	const char *p1;
++	char *p2;
+ 
+ 	p1 = s;
+ 	p2 = buf;
+
+commit d3909692e9f64e4a1ac440afa81e9efd6e9ea0b4
+Author: Dave Anderson <anderson@redhat.com>
+Date:   Thu Oct 26 16:54:28 2017 -0400
+
+    Fix for the "kmem -[sS]" options on Linux 4.14 and later kernels that
+    contain commit 2482ddec670fb83717d129012bc558777cb159f7, titled
+    "mm: add SLUB free list pointer obfuscation".  Without the patch,
+    there will numerous error messages of the type "kmem: <cache name>
+    slab: <address> invalid freepointer: <obfuscated address>".
+    (anderson@redhat.com)
+
+diff --git a/defs.h b/defs.h
+index 4b4e331..967fce0 100644
+--- a/defs.h
++++ b/defs.h
+@@ -1995,6 +1995,7 @@ struct offset_table {                    /* stash of commonly-used offsets */
+ 	long mod_arch_specific_orc_unwind_ip;
+ 	long mod_arch_specific_orc_unwind;
+ 	long task_struct_policy;
++	long kmem_cache_random;
+ };
+ 
+ struct size_table {         /* stash of commonly-used sizes */
+diff --git a/memory.c b/memory.c
+index fb534e8..9926199 100644
+--- a/memory.c
++++ b/memory.c
+@@ -75,7 +75,7 @@ struct meminfo {           /* general purpose memory information structure */
+ 	ulong container;
+ 	int *freelist;
+ 	int freelist_index_size;
+-
++	ulong random;
+ };
+ 
+ /*
+@@ -293,6 +293,7 @@ static void dump_per_cpu_offsets(void);
+ static void dump_page_flags(ulonglong);
+ static ulong kmem_cache_nodelists(ulong);
+ static void dump_hstates(void);
++static ulong freelist_ptr(struct meminfo *, ulong, ulong);
+ 
+ /*
+  *  Memory display modes specific to this file.
+@@ -726,6 +727,7 @@ vm_init(void)
+ 		MEMBER_OFFSET_INIT(kmem_cache_red_left_pad, "kmem_cache", "red_left_pad");
+ 		MEMBER_OFFSET_INIT(kmem_cache_name, "kmem_cache", "name");
+ 		MEMBER_OFFSET_INIT(kmem_cache_flags, "kmem_cache", "flags");
++		MEMBER_OFFSET_INIT(kmem_cache_random, "kmem_cache", "random");
+ 		MEMBER_OFFSET_INIT(kmem_cache_cpu_freelist, "kmem_cache_cpu", "freelist");
+ 		MEMBER_OFFSET_INIT(kmem_cache_cpu_page, "kmem_cache_cpu", "page");
+ 		MEMBER_OFFSET_INIT(kmem_cache_cpu_node, "kmem_cache_cpu", "node");
+@@ -18000,6 +18002,9 @@ dump_kmem_cache_slub(struct meminfo *si)
+ 		si->slabsize = (PAGESIZE() << order);
+ 		si->inuse = si->num_slabs = 0;
+ 		si->slab_offset = offset;
++		si->random = VALID_MEMBER(kmem_cache_random) ?
++			ULONG(si->cache_buf + OFFSET(kmem_cache_random)) : 0;
++
+ 		if (!get_kmem_cache_slub_data(GET_SLUB_SLABS, si) ||
+ 		    !get_kmem_cache_slub_data(GET_SLUB_OBJECTS, si))
+ 			si->flags |= SLAB_GATHER_FAILURE;
+@@ -18587,6 +18592,15 @@ count_free_objects(struct meminfo *si, ulong freelist)
+ 	return c;
+ }
+ 
++static ulong
++freelist_ptr(struct meminfo *si, ulong ptr, ulong ptr_addr)
++{
++	if (si->random)
++		/* CONFIG_SLAB_FREELIST_HARDENED */
++		return (ptr ^ si->random ^ ptr_addr);
++	else
++		return ptr;
++}
+ 
+ static ulong
+ get_freepointer(struct meminfo *si, void *object)
+@@ -18601,7 +18615,7 @@ get_freepointer(struct meminfo *si, void *object)
+ 		return BADADDR;
+ 	}
+ 
+-	return nextfree;
++	return (freelist_ptr(si, nextfree, vaddr));
+ }
+ 
+ static void
+diff --git a/symbols.c b/symbols.c
+index f7599e8..8a4c878 100644
+--- a/symbols.c
++++ b/symbols.c
+@@ -9378,6 +9378,8 @@ dump_offset_table(char *spec, ulong makestruct)
+                 OFFSET(kmem_cache_cpu_cache));
+         fprintf(fp, "                 kmem_cache_oo: %ld\n",
+                 OFFSET(kmem_cache_oo));
++        fprintf(fp, "             kmem_cache_random: %ld\n",
++                OFFSET(kmem_cache_random));
+ 
+         fprintf(fp, "    kmem_cache_node_nr_partial: %ld\n",
+                 OFFSET(kmem_cache_node_nr_partial));
+
+commit e81db08bc69fb1a7a7e48f892c2038d992a71f6d
+Author: Dave Anderson <anderson@redhat.com>
+Date:   Fri Oct 27 14:10:43 2017 -0400
+
+    Fix for the validation of the bits located in the least signficant
+    bits of mem_section.section_mem_map pointers.  Without the patch,
+    the validation functions always returned valid, due to a coding
+    error found by clang.  However, it was never really a problem
+    because it is extremely unlikely that an existing mem_section would
+    ever be invalid.
+    (oleksandr@redhat.com, anderson@redhat.com)
+
+diff --git a/memory.c b/memory.c
+index 9926199..60594a4 100644
+--- a/memory.c
++++ b/memory.c
+@@ -17003,8 +17003,8 @@ valid_section(ulong addr)
+ 
+ 	if ((mem_section = read_mem_section(addr)))
+         	return (ULONG(mem_section + 
+-			OFFSET(mem_section_section_mem_map)) && 
+-			SECTION_MARKED_PRESENT);
++			OFFSET(mem_section_section_mem_map))
++			& SECTION_MARKED_PRESENT);
+ 	return 0;
+ }
+ 
+@@ -17012,11 +17012,17 @@ int
+ section_has_mem_map(ulong addr)
+ {
+ 	char *mem_section;
++	ulong kernel_version_bit;
++
++	if (THIS_KERNEL_VERSION >= LINUX(2,6,24))
++		kernel_version_bit = SECTION_HAS_MEM_MAP;
++	else
++		kernel_version_bit = SECTION_MARKED_PRESENT;
+ 
+ 	if ((mem_section = read_mem_section(addr)))
+ 		return (ULONG(mem_section + 
+ 			OFFSET(mem_section_section_mem_map))
+-			&& SECTION_HAS_MEM_MAP);
++			& kernel_version_bit);
+ 	return 0;
+ }
+ 
+
+commit 0f40db8fbac538ea448bbb2beb44912e4c43a54a
+Author: Dave Anderson <anderson@redhat.com>
+Date:   Mon Oct 30 14:20:41 2017 -0400
+
+    Fix for the x86_64 kernel virtual address to physical address
+    translation mechanism.  Without the patch, when verifying that the
+    PAGE_PRESENT bit is set in the top-level page table, it would always
+    test positively, and the translation would continue parsing the
+    remainder of the page tables.  This would virtually never be a
+    problem in practice because if the top-level page table entry
+    existed, its PAGE_PRESENT bit would be set.
+    (oleksandr@redhat.com, anderson@redhat.com)
+
+diff --git a/x86_64.c b/x86_64.c
+index 2f9e6db..7d01140 100644
+--- a/x86_64.c
++++ b/x86_64.c
+@@ -2086,7 +2086,7 @@ x86_64_kvtop(struct task_context *tc, ulong kvaddr, physaddr_t *paddr, int verbo
+ 	}
+ 
+ start_vtop_with_pagetable:
+-	if (!(*pml4) & _PAGE_PRESENT)
++	if (!(*pml4 & _PAGE_PRESENT))
+ 		goto no_kpage;
+ 	pgd_paddr = (*pml4) & PHYSICAL_PAGE_MASK;
+ 	FILL_PGD(pgd_paddr, PHYSADDR, PAGESIZE());
+@@ -2187,7 +2187,7 @@ x86_64_kvtop_xen_wpt(struct task_context *tc, ulong kvaddr, physaddr_t *paddr, i
+ 		fprintf(fp, "PML4 DIRECTORY: %lx\n", vt->kernel_pgd[0]);
+                 fprintf(fp, "PAGE DIRECTORY: %lx [machine]\n", *pml4);
+ 	}
+-	if (!(*pml4) & _PAGE_PRESENT)
++	if (!(*pml4 & _PAGE_PRESENT))
+ 		goto no_kpage;
+ 	pgd_paddr = (*pml4) & PHYSICAL_PAGE_MASK;
+ 	pgd_paddr = xen_m2p(pgd_paddr);
+
+commit 9339874f3764fe99a408aec1a814b19c77f5dfe1
+Author: Dave Anderson <anderson@redhat.com>
+Date:   Mon Oct 30 14:33:15 2017 -0400
+
+    Removed a check for a negative block_size value which is always a
+    non-negative unsigned value in the SADUMP header parsing function.
+    (oleksandr@redhat.com)
+
+diff --git a/sadump.c b/sadump.c
+index 2ccfa82..6b912d4 100644
+--- a/sadump.c
++++ b/sadump.c
+@@ -157,9 +157,6 @@ read_dump_header(char *file)
+ 	}
+ 
+ restart:
+-	if (block_size < 0)
+-		return FALSE;
+-
+ 	if (!read_device(sph, block_size, &offset)) {
+ 		error(INFO, "sadump: cannot read partition header\n");
+ 		goto err;
+
+commit b2d1bba766118fddf43235f0bed483dff32ac6e0
+Author: Dave Anderson <anderson@redhat.com>
+Date:   Mon Oct 30 14:46:32 2017 -0400
+
+    Removed a check for an impossible negative value when calculating
+    the beginning address when applying the context value specified by
+    the "search -x <count>" option.
+    (oleksandr@redhat.com)
+
+diff --git a/memory.c b/memory.c
+index 60594a4..ebd671a 100644
+--- a/memory.c
++++ b/memory.c
+@@ -14467,7 +14467,7 @@ display_with_pre_and_post(void *bufptr, ulonglong addr, struct searchinfo *si)
+ 	}
+ 
+ 	amount = ctx * t;
+-	addr_d = addr - amount < 0 ? 0 : addr - amount;
++	addr_d = addr - amount;
+ 
+ 	display_memory(addr_d, ctx, flag, memtype, NULL);
+ 
+
+commit e2efacdd9b7b229747a78c743b2acc6d15280a8a
+Author: Dave Anderson <anderson@redhat.com>
+Date:   Mon Oct 30 16:49:48 2017 -0400
+
+    Implemented a new "timer -C <cpu-specifier>" option that restricts
+    the timer or hrtimer output to the timer queue data associated with
+    one or more cpus.  For multiple cpus, the cpu-specifier uses the
+    standard comma or dash separated list format.
+    (oleksandr@redhat.com)
+
+diff --git a/help.c b/help.c
+index efa55e0..f7f61a1 100644
+--- a/help.c
++++ b/help.c
+@@ -2387,7 +2387,7 @@ NULL
+ char *help_timer[] = {
+ "timer",
+ "timer queue data",
+-"[-r]",
++"[-r][-C cpu]",
+ "  This command displays the timer queue entries, both old- and new-style,",
+ "  in chronological order.  In the case of the old-style timers, the",
+ "  timer_table array index is shown; in the case of the new-style timers, ",
+@@ -2397,6 +2397,8 @@ char *help_timer[] = {
+ "        chronological order.  In the case of the old-style hrtimers, the",
+ "        expiration time is a single value; in the new-style hrtimers, the",
+ "        expiration time is a range.",
++" -C cpu Restrict the output to one or more CPUs, where multiple cpu[s] can", 
++"        be specified, for example, as \"1,3,5\", \"1-3\", or \"1,3,5-7,10\".",
+ "\nEXAMPLES",
+ "    %s> timer",
+ "    JIFFIES",
+diff --git a/kernel.c b/kernel.c
+index 8e95573..4638495 100644
+--- a/kernel.c
++++ b/kernel.c
+@@ -38,18 +38,18 @@ static void display_bh_1(void);
+ static void display_bh_2(void);
+ static void display_bh_3(void);
+ static void display_bh_4(void);
+-static void dump_hrtimer_data(void);
++static void dump_hrtimer_data(const ulong *cpus);
+ static void dump_hrtimer_clock_base(const void *, const int);
+ static void dump_hrtimer_base(const void *, const int);
+ static void dump_active_timers(const void *, ulonglong);
+ static int get_expires_len(const int, const ulong *, const int);
+ static void print_timer(const void *);
+ static ulonglong ktime_to_ns(const void *);
+-static void dump_timer_data(void);
+-static void dump_timer_data_tvec_bases_v1(void);
+-static void dump_timer_data_tvec_bases_v2(void);
+-static void dump_timer_data_tvec_bases_v3(void);
+-static void dump_timer_data_timer_bases(void);
++static void dump_timer_data(const ulong *cpus);
++static void dump_timer_data_tvec_bases_v1(const ulong *cpus);
++static void dump_timer_data_tvec_bases_v2(const ulong *cpus);
++static void dump_timer_data_tvec_bases_v3(const ulong *cpus);
++static void dump_timer_data_timer_bases(const ulong *cpus);
+ struct tv_range;
+ static void init_tv_ranges(struct tv_range *, int, int, int);
+ static int do_timer_list(ulong,int, ulong *, void *,ulong *,struct tv_range *);
+@@ -7353,16 +7353,24 @@ cmd_timer(void)
+ {
+         int c;
+ 	int rflag;
++	char *cpuspec;
++	ulong *cpus = NULL;
+ 
+ 	rflag = 0;
+ 
+-        while ((c = getopt(argcnt, args, "r")) != EOF) {
++        while ((c = getopt(argcnt, args, "rC:")) != EOF) {
+                 switch(c)
+                 {
+ 		case 'r':
+ 			rflag = 1;
+ 			break;
+ 
++		case 'C':
++			cpuspec = optarg;
++			cpus = get_cpumask_buf();
++			make_cpumask(cpuspec, cpus, FAULT_ON_ERROR, NULL);
++			break;
++
+                 default:
+                         argerrs++;
+                         break;
+@@ -7373,15 +7381,18 @@ cmd_timer(void)
+                 cmd_usage(pc->curcmd, SYNOPSIS);
+ 
+ 	if (rflag)
+-		dump_hrtimer_data();
++		dump_hrtimer_data(cpus);
+ 	else
+-		dump_timer_data();
++		dump_timer_data(cpus);
++
++	if (cpus)
++		FREEBUF(cpus);
+ }
+ 
+ static void
+-dump_hrtimer_data(void)
++dump_hrtimer_data(const ulong *cpus)
+ {
+-	int i, j;
++	int i, j, k = 0;
+ 	int hrtimer_max_clock_bases, max_hrtimer_bases;
+ 	struct syment * hrtimer_bases;
+ 
+@@ -7405,7 +7416,10 @@ dump_hrtimer_data(void)
+ 	hrtimer_bases = per_cpu_symbol_search("hrtimer_bases");
+ 
+ 	for (i = 0; i < kt->cpus; i++) {
+-		if (i)
++		if (cpus && !NUM_IN_BITMAP(cpus, i))
++			continue;
++
++		if (k++)
+ 			fprintf(fp, "\n");
+ 
+ 		if (hide_offline_cpu(i)) {
+@@ -7752,7 +7766,7 @@ struct tv_range {
+ #define TVN (6)
+ 
+ static void
+-dump_timer_data(void)
++dump_timer_data(const ulong *cpus)
+ {
+ 	int i;
+ 	ulong timer_active;
+@@ -7773,16 +7787,16 @@ dump_timer_data(void)
+         struct tv_range tv[TVN];
+ 
+ 	if (kt->flags2 & TIMER_BASES) {
+-		dump_timer_data_timer_bases();
++		dump_timer_data_timer_bases(cpus);
+ 		return;
+ 	} else if (kt->flags2 & TVEC_BASES_V3) {
+-		dump_timer_data_tvec_bases_v3();
++		dump_timer_data_tvec_bases_v3(cpus);
+ 		return;
+ 	} else if (kt->flags & TVEC_BASES_V2) {
+-		dump_timer_data_tvec_bases_v2();
++		dump_timer_data_tvec_bases_v2(cpus);
+ 		return;
+ 	} else if (kt->flags & TVEC_BASES_V1) {
+-		dump_timer_data_tvec_bases_v1();
++		dump_timer_data_tvec_bases_v1(cpus);
+ 		return;
+ 	}
+ 		
+@@ -7924,7 +7938,7 @@ dump_timer_data(void)
+  */
+ 
+ static void
+-dump_timer_data_tvec_bases_v1(void)
++dump_timer_data_tvec_bases_v1(const ulong *cpus)
+ {
+ 	int i, cpu, tdx, flen;
+         struct timer_data *td;
+@@ -7947,6 +7961,11 @@ dump_timer_data_tvec_bases_v1(void)
+ 	cpu = 0;
+ 
+ next_cpu:
++	if (cpus && !NUM_IN_BITMAP(cpus, cpu)) {
++		if (++cpu < kt->cpus)
++			goto next_cpu;
++		return;
++	}
+ 
+         count = 0;
+         td = (struct timer_data *)NULL;
+@@ -8039,7 +8058,7 @@ next_cpu:
+  */
+ 
+ static void
+-dump_timer_data_tvec_bases_v2(void)
++dump_timer_data_tvec_bases_v2(const ulong *cpus)
+ {
+ 	int i, cpu, tdx, flen;
+         struct timer_data *td;
+@@ -8073,6 +8092,11 @@ dump_timer_data_tvec_bases_v2(void)
+ 	cpu = 0;
+ 
+ next_cpu:
++	if (cpus && !NUM_IN_BITMAP(cpus, cpu)) {
++		if (++cpu < kt->cpus)
++			goto next_cpu;
++		return;
++	}
+ 	/*
+ 	 * hide data of offline cpu and goto next cpu
+ 	 */
+@@ -8185,7 +8209,7 @@ next_cpu:
+  *  Linux 4.2 timers use new tvec_root, tvec and timer_list structures
+  */
+ static void
+-dump_timer_data_tvec_bases_v3(void)
++dump_timer_data_tvec_bases_v3(const ulong *cpus)
+ {
+ 	int i, cpu, tdx, flen;
+ 	struct timer_data *td;
+@@ -8216,6 +8240,11 @@ dump_timer_data_tvec_bases_v3(void)
+ 	cpu = 0;
+ 
+ next_cpu:
++	if (cpus && !NUM_IN_BITMAP(cpus, cpu)) {
++		if (++cpu < kt->cpus)
++			goto next_cpu;
++		return;
++	}
+ 	/*
+ 	 * hide data of offline cpu and goto next cpu
+ 	 */
+@@ -8758,9 +8787,9 @@ do_timer_list_v4(struct timer_bases_data *data)
+  *  Linux 4.8 timers use new timer_bases[][]
+  */
+ static void
+-dump_timer_data_timer_bases(void)
++dump_timer_data_timer_bases(const ulong *cpus)
+ {
+-	int i, cpu, flen, base, nr_bases, found, display;
++	int i, cpu, flen, base, nr_bases, found, display, j = 0;
+ 	struct syment *sp;
+ 	ulong timer_base, jiffies, function;
+ 	struct timer_bases_data data;
+@@ -8785,6 +8814,11 @@ dump_timer_data_timer_bases(void)
+ 		RJUST|LONG_DEC,MKSTR(jiffies)));
+ 
+ next_cpu:
++	if (cpus && !NUM_IN_BITMAP(cpus, cpu)) {
++		if (++cpu < kt->cpus)
++			goto next_cpu;
++		goto done;
++	}
+ 	/*
+ 	 * hide data of offline cpu and goto next cpu
+ 	 */
+@@ -8803,7 +8837,7 @@ next_cpu:
+ 	else
+ 		timer_base = sp->value;
+ 
+-	if (cpu)
++	if (j++)
+ 		fprintf(fp, "\n");
+ next_base:
+ 
diff --git a/SOURCES/github_ddace972_exception_frame.patch b/SOURCES/github_ddace972_exception_frame.patch
new file mode 100644
index 0000000..116d86b
--- /dev/null
+++ b/SOURCES/github_ddace972_exception_frame.patch
@@ -0,0 +1,30 @@
+commit ddace9720fe7582cd2c92000f75f1f261daa53fd
+Author: Dave Anderson <anderson@redhat.com>
+Date:   Fri Feb 9 16:26:27 2018 -0500
+
+    Fix for the ARM64 "bt" command in kernels that contain commit
+    30d88c0e3ace625a92eead9ca0ad94093a8f59fe, titled "arm64: entry:
+    Apply BP hardening for suspicious interrupts from EL0".  Without
+    the patch, there may be invalid kernel kernel exception frames
+    displayed on an active task's kernel stack, often below a stackframe
+    of the "do_el0_ia_bp_hardening" function; the address translation
+    of the PC and LR values in the the bogus exception frame will
+    display "[unknown or invalid address]".
+    (anderson@redhat.com)
+
+
+--- crash-7.2.0/arm64.c.orig
++++ crash-7.2.0/arm64.c
+@@ -2411,8 +2411,10 @@ arm64_back_trace_cmd(struct bt_info *bt)
+ 
+ 		if (arm64_in_exception_text(bt->instptr) && INSTACK(stackframe.fp, bt)) {
+ 			if (!(bt->flags & BT_IRQSTACK) ||
+-			    (((stackframe.sp + SIZE(pt_regs)) < bt->stacktop)))
+-				exception_frame = stackframe.fp - KERN_EFRAME_OFFSET;
++			    ((stackframe.sp + SIZE(pt_regs)) < bt->stacktop)) {
++				if (arm64_is_kernel_exception_frame(bt, stackframe.fp - KERN_EFRAME_OFFSET))
++					exception_frame = stackframe.fp - KERN_EFRAME_OFFSET;
++			}
+ 		}
+ 
+ 		if ((bt->flags & BT_IRQSTACK) &&
diff --git a/SOURCES/github_f852f5ce_to_03a3e57b.patch b/SOURCES/github_f852f5ce_to_03a3e57b.patch
new file mode 100644
index 0000000..36fde99
--- /dev/null
+++ b/SOURCES/github_f852f5ce_to_03a3e57b.patch
@@ -0,0 +1,785 @@
+commit f852f5ce4d28f88308f0e555c067e63e3edd7f37
+Author: Dave Anderson <anderson@redhat.com>
+Date:   Fri Nov 3 09:21:22 2017 -0400
+
+    Fix for a "ps -l" regression introduced by the new "ps -y" option
+    introduced above.  Without the patch, the -l option generates a
+    segmentation violation if not accompanied by a -C cpu specifier
+    option.
+    (vinayakm.list@gmail.com)
+
+diff --git a/task.c b/task.c
+index 5754159..f2628b7 100644
+--- a/task.c
++++ b/task.c
+@@ -3485,7 +3485,7 @@ show_last_run(struct task_context *tc, struct psinfo *psi)
+ 	sprintf(format, "[%c%dll%c] ", '%', c, 
+ 		pc->output_radix == 10 ? 'u' : 'x');
+ 
+-	if (psi) {
++	if (psi && psi->cpus) {
+ 		for (c = others = 0; c < kt->cpus; c++) {
+ 			if (!NUM_IN_BITMAP(psi->cpus, c))
+ 				continue;
+
+commit 7ac1368cdca0fc2013bb3963456fcd2574c7cdd7
+Author: Dave Anderson <anderson@redhat.com>
+Date:   Mon Nov 6 10:48:40 2017 -0500
+
+    Fix for the "kmem -i" and "kmem -V" options in Linux 4.8 and later
+    kernels containing commit 75ef7184053989118d3814c558a9af62e7376a58,
+    titled "mm, vmstat: add infrastructure for per-node vmstats".
+    Without the patch, the CACHED line of "kmem -i" shows 0, and the
+    VM_STAT section of "kmem -V" is missing entirely.
+    (vinayakm.list@gmail.com)
+
+diff --git a/memory.c b/memory.c
+index ebd671a..3097558 100644
+--- a/memory.c
++++ b/memory.c
+@@ -17340,30 +17340,43 @@ vm_stat_init(void)
+ 	int c ATTRIBUTE_UNUSED;
+         struct gnu_request *req;
+ 	char *start;
+-	long enum_value;
++	long enum_value, zc = -1;
++	int split_vmstat = 0, ni = 0;
+ 
+ 	if (vt->flags & VM_STAT)
+ 		return TRUE;
+ 
+-	if ((vt->nr_vm_stat_items == -1) || !symbol_exists("vm_stat"))
++	if ((vt->nr_vm_stat_items == -1) ||
++		(!symbol_exists("vm_stat") && !symbol_exists("vm_zone_stat")))
+ 		goto bailout;
+ 
+         /*
+          *  look for type: type = atomic_long_t []
+          */
+ 	if (LKCD_KERNTYPES()) {
+-        	if (!symbol_exists("vm_stat"))
++		if ((!symbol_exists("vm_stat") &&
++				!symbol_exists("vm_zone_stat")))
+ 			goto bailout;
+ 		/* 
+ 		 *  Just assume that vm_stat is an array; there is
+ 		 *  no symbol info in a kerntypes file. 
+ 		 */
+ 	} else {
+-		if (!symbol_exists("vm_stat") ||
+-		    get_symbol_type("vm_stat", NULL, NULL) != TYPE_CODE_ARRAY)
++		if (symbol_exists("vm_stat") &&
++		    get_symbol_type("vm_stat", NULL, NULL) == TYPE_CODE_ARRAY) {
++			vt->nr_vm_stat_items =
++				get_array_length("vm_stat", NULL, 0);
++		} else if (symbol_exists("vm_zone_stat") &&
++			get_symbol_type("vm_zone_stat",
++			NULL, NULL) == TYPE_CODE_ARRAY) {
++			vt->nr_vm_stat_items =
++				get_array_length("vm_zone_stat", NULL, 0)
++				+ get_array_length("vm_node_stat", NULL, 0);
++			split_vmstat = 1;
++			enumerator_value("NR_VM_ZONE_STAT_ITEMS", &zc);
++		} else {
+ 			goto bailout;
+-
+-		vt->nr_vm_stat_items = get_array_length("vm_stat", NULL, 0);
++		}
+ 	}
+ 
+         open_tmpfile();
+@@ -17372,6 +17385,14 @@ vm_stat_init(void)
+         req->name = "zone_stat_item";
+         req->flags = GNU_PRINT_ENUMERATORS;
+         gdb_interface(req);
++
++	if (split_vmstat) {
++		req->command = GNU_GET_DATATYPE;
++		req->name = "node_stat_item";
++		req->flags = GNU_PRINT_ENUMERATORS;
++		gdb_interface(req);
++	}
++
+         FREEBUF(req);
+ 
+ 	stringlen = 1;
+@@ -17383,11 +17404,17 @@ vm_stat_init(void)
+ 			continue;
+ 		clean_line(buf);
+ 		c = parse_line(buf, arglist);
+-		if (STREQ(arglist[0], "NR_VM_ZONE_STAT_ITEMS")) {
++		if ((!split_vmstat &&
++			STREQ(arglist[0], "NR_VM_ZONE_STAT_ITEMS")) ||
++			(split_vmstat &&
++			STREQ(arglist[0], "NR_VM_NODE_STAT_ITEMS"))) {
+ 			if (LKCD_KERNTYPES())
+ 				vt->nr_vm_stat_items = 
+ 					MAX(atoi(arglist[2]), count);
+ 			break;
++		} else if (split_vmstat &&
++			STREQ(arglist[0], "NR_VM_ZONE_STAT_ITEMS")) {
++			continue;
+ 		} else {
+ 			stringlen += strlen(arglist[0]);
+ 			count++;
+@@ -17409,18 +17436,24 @@ vm_stat_init(void)
+                 if (strstr(buf, "{") || strstr(buf, "}"))
+                         continue;
+ 		c = parse_line(buf, arglist);
+-		if (enumerator_value(arglist[0], &enum_value))
+-			i = enum_value;
+-		else {
++		if (!enumerator_value(arglist[0], &enum_value)) {
+ 			close_tmpfile();
+ 			goto bailout;
+ 		}
++
++		i = ni + enum_value;
++		if (!ni && (enum_value == zc)) {
++			ni = zc;
++			continue;
++		}
++
+ 		if (i < vt->nr_vm_stat_items) {
+ 			vt->vm_stat_items[i] = start;
+ 			strcpy(start, arglist[0]);
+ 			start += strlen(arglist[0]) + 1;
+ 		}
+         }
++
+ 	close_tmpfile();
+ 
+ 	vt->flags |= VM_STAT;
+@@ -17443,39 +17476,61 @@ dump_vm_stat(char *item, long *retval, ulong zone)
+ 	ulong *vp;
+ 	ulong location;
+ 	int i, maxlen, len;
++	long tc, zc = 0, nc = 0;
++	int split_vmstat = 0;
+ 
+ 	if (!vm_stat_init()) {
+ 		if (!item)
+ 			if (CRASHDEBUG(1))
+-				error(INFO, 
++				error(INFO,
+ 			    	    "vm_stat not available in this kernel\n");
+ 		return FALSE;
+ 	}
+ 
+ 	buf = GETBUF(sizeof(ulong) * vt->nr_vm_stat_items);
+ 
+-	location = zone ? zone : symbol_value("vm_stat");
+-
+-	readmem(location, KVADDR, buf, 
+-	    sizeof(ulong) * vt->nr_vm_stat_items, 
+-	    "vm_stat", FAULT_ON_ERROR);
++	if (symbol_exists("vm_node_stat") && symbol_exists("vm_zone_stat"))
++		split_vmstat = 1;
++	else
++		location = zone ? zone : symbol_value("vm_stat");
++
++	if (split_vmstat) {
++		enumerator_value("NR_VM_ZONE_STAT_ITEMS", &zc);
++		location = zone ? zone : symbol_value("vm_zone_stat");
++		readmem(location, KVADDR, buf,
++			sizeof(ulong) * zc,
++			"vm_zone_stat", FAULT_ON_ERROR);
++		if (!zone) {
++			location = symbol_value("vm_node_stat");
++			enumerator_value("NR_VM_NODE_STAT_ITEMS", &nc);
++			readmem(location, KVADDR, buf + (sizeof(ulong) * zc),
++				sizeof(ulong) * nc,
++				"vm_node_stat", FAULT_ON_ERROR);
++		}
++		tc = zc + nc;
++	} else {
++		readmem(location, KVADDR, buf,
++			sizeof(ulong) * vt->nr_vm_stat_items,
++			"vm_stat", FAULT_ON_ERROR);
++		tc = vt->nr_vm_stat_items;
++	}
+ 
+ 	if (!item) {
+ 		if (!zone)
+ 			fprintf(fp, "  VM_STAT:\n");
+-		for (i = maxlen = 0; i < vt->nr_vm_stat_items; i++)
++		for (i = maxlen = 0; i < tc; i++)
+ 			if ((len = strlen(vt->vm_stat_items[i])) > maxlen)
+ 				maxlen = len;
+ 		vp = (ulong *)buf;
+-		for (i = 0; i < vt->nr_vm_stat_items; i++)
+-			fprintf(fp, "%s%s: %ld\n", 
++		for (i = 0; i < tc; i++)
++			fprintf(fp, "%s%s: %ld\n",
+ 				space(maxlen - strlen(vt->vm_stat_items[i])),
+ 				 vt->vm_stat_items[i], vp[i]);
+ 		return TRUE;
+ 	}
+ 
+ 	vp = (ulong *)buf;
+-	for (i = 0; i < vt->nr_vm_stat_items; i++) {
++	for (i = 0; i < tc; i++) {
+ 		if (STREQ(vt->vm_stat_items[i], item)) {
+ 			*retval = vp[i];
+ 			return TRUE;
+
+commit 333df037bc72aa81faf0904aaea29d43be2c724d
+Author: Dave Anderson <anderson@redhat.com>
+Date:   Mon Nov 6 11:01:45 2017 -0500
+
+    Fix for Linux 4.11 and later kernels that contain kernel commit
+    4b3ef9daa4fc0bba742a79faecb17fdaaead083b, titled "mm/swap: split
+    swap cache into 64MB trunks".  Without the patch, the CACHED line
+    of "kmem -i" may show nonsensical data.
+    (vinayakm.list@gmail.com)
+
+diff --git a/memory.c b/memory.c
+index 3097558..7537c43 100644
+--- a/memory.c
++++ b/memory.c
+@@ -8236,7 +8236,44 @@ dump_kmeminfo(void)
+ 		char *swapper_space = GETBUF(SIZE(address_space));
+ 
+ 		swapper_space_nrpages = 0;
+-		if (symbol_exists("swapper_spaces") && 
++		if (symbol_exists("nr_swapper_spaces") &&
++			(len = get_array_length("nr_swapper_spaces",
++				NULL, 0))) {
++			char *nr_swapper_space =
++				GETBUF(len * sizeof(unsigned int));
++			readmem(symbol_value("nr_swapper_spaces"), KVADDR,
++				nr_swapper_space,  len * sizeof(unsigned int),
++				"nr_swapper_space", RETURN_ON_ERROR);
++			for (i = 0; i < len; i++) {
++				int j;
++				unsigned long sa;
++				unsigned int banks = UINT(nr_swapper_space +
++					(i * sizeof(unsigned int)));
++
++				if (!banks)
++					continue;
++
++				readmem(symbol_value("swapper_spaces") +
++					(i * sizeof(void *)),KVADDR,
++					&sa, sizeof(void *),
++					"swapper_space", RETURN_ON_ERROR);
++
++				if (!sa)
++					continue;
++
++				for (j = 0; j < banks; j++) {
++					readmem(sa + j * SIZE(address_space),
++						KVADDR, swapper_space,
++						SIZE(address_space),
++						"swapper_space",
++						RETURN_ON_ERROR);
++					swapper_space_nrpages +=
++						ULONG(swapper_space +
++						OFFSET(address_space_nrpages));
++				}
++			}
++			FREEBUF(nr_swapper_space);
++		} else if (symbol_exists("swapper_spaces") &&
+ 		    (len = get_array_length("swapper_spaces", NULL, 0))) {
+ 			for (i = 0; i < len; i++) {
+ 		    		if (!readmem(symbol_value("swapper_spaces") + 
+@@ -8253,7 +8290,7 @@ dump_kmeminfo(void)
+ 		    RETURN_ON_ERROR))
+ 			swapper_space_nrpages = ULONG(swapper_space + 
+ 				OFFSET(address_space_nrpages));
+-			
++
+ 		page_cache_size = nr_file_pages - swapper_space_nrpages -
+ 			buffer_pages;
+ 		FREEBUF(swapper_space);
+
+commit 613e5c7d6998c61880498537b4f288ef095cbe14
+Author: Dave Anderson <anderson@redhat.com>
+Date:   Mon Nov 6 15:12:59 2017 -0500
+
+    Implemented a new "dev -D" option that is the same as "dev -d", but
+    filters out the display of disks that have no I/O in progress.
+    (oleksandr@redhat.com)
+
+diff --git a/dev.c b/dev.c
+index e46081e..3db898a 100644
+--- a/dev.c
++++ b/dev.c
+@@ -31,7 +31,7 @@ static const char *pci_strclass (uint, char *);
+ static const char *pci_strvendor(uint, char *); 
+ static const char *pci_strdev(uint, uint, char *); 
+ 
+-static void diskio_option(void);
++static void diskio_option(ulong flags);
+  
+ static struct dev_table {
+         ulong flags;
+@@ -42,6 +42,9 @@ struct dev_table *dt = &dev_table;
+ #define DEV_INIT    0x1
+ #define DISKIO_INIT 0x2
+ 
++#define DIOF_ALL	1 << 0
++#define DIOF_NONZERO	1 << 1
++
+ void
+ dev_init(void)
+ {
+@@ -93,11 +96,15 @@ cmd_dev(void)
+ 
+ 	flags = 0;
+ 
+-        while ((c = getopt(argcnt, args, "dpi")) != EOF) {
++        while ((c = getopt(argcnt, args, "dDpi")) != EOF) {
+                 switch(c)
+                 {
+ 		case 'd':
+-			diskio_option();
++			diskio_option(DIOF_ALL);
++			return;
++
++		case 'D':
++			diskio_option(DIOF_NONZERO);
+ 			return;
+ 
+ 		case 'i':
+@@ -4002,7 +4009,7 @@ init_iter(struct iter *i)
+ }
+ 
+ static void 
+-display_one_diskio(struct iter *i, unsigned long gendisk)
++display_one_diskio(struct iter *i, unsigned long gendisk, ulong flags)
+ {
+ 	char disk_name[BUFSIZE + 1];
+ 	char buf0[BUFSIZE];
+@@ -4028,6 +4035,10 @@ display_one_diskio(struct iter *i, unsigned long gendisk)
+ 		"gen_disk.major", FAULT_ON_ERROR);
+ 	i->get_diskio(queue_addr, &io);
+ 
++	if ((flags & DIOF_NONZERO)
++		&& (io.read + io.write == 0))
++		return;
++
+ 	fprintf(fp, "%s%s%s  %s%s%s%s  %s%5d%s%s%s%s%s",
+ 		mkstring(buf0, 5, RJUST|INT_DEC, (char *)(unsigned long)major),
+ 		space(MINSPACE),
+@@ -4055,7 +4066,7 @@ display_one_diskio(struct iter *i, unsigned long gendisk)
+ }
+ 
+ static void 
+-display_all_diskio(void)
++display_all_diskio(ulong flags)
+ {
+ 	struct iter i;
+ 	unsigned long gendisk;
+@@ -4089,7 +4100,7 @@ display_all_diskio(void)
+ 		mkstring(buf5, 5, RJUST, "DRV"));
+ 
+ 	while ((gendisk = i.next_disk(&i)) != 0)
+-		display_one_diskio(&i, gendisk);
++		display_one_diskio(&i, gendisk, flags);
+ }
+ 
+ static 
+@@ -4149,8 +4160,8 @@ void diskio_init(void)
+ }
+ 
+ static void 
+-diskio_option(void)
++diskio_option(ulong flags)
+ {
+ 	diskio_init();
+-	display_all_diskio();
++	display_all_diskio(flags);
+ }
+diff --git a/help.c b/help.c
+index f7f61a1..fa01bfb 100644
+--- a/help.c
++++ b/help.c
+@@ -2722,7 +2722,7 @@ NULL
+ char *help_dev[] = {
+ "dev",
+ "device data",
+-"[-i | -p | -d]",
++"[-i | -p | -d | -D]",
+ "  If no argument is entered, this command dumps character and block",
+ "  device data.\n",
+ "    -i  display I/O port usage; on 2.4 kernels, also display I/O memory usage.",
+@@ -2736,6 +2736,7 @@ char *help_dev[] = {
+ "           DRV: I/O requests that are in-flight in the device driver.",
+ "                If the device driver uses blk-mq interface, this field",
+ "                shows N/A(MQ).",
++"    -D  same as -d, but filter out disks with no in-progress I/O requests.",
+ "\nEXAMPLES",
+ "  Display character and block device data:\n",
+ "    %s> dev",
+
+commit 57eaba59bff54ab3158d3a909e9f64551e27accf
+Author: Dave Anderson <anderson@redhat.com>
+Date:   Wed Nov 8 14:22:16 2017 -0500
+
+    If a line number request for a module text address initially fails,
+    force the embedded gdb module to complete its two-stage strategy
+    used for reading debuginfo symbol tables from module object files,
+    and then retry the line number extraction.  This automatically does
+    what the "mod -r" or "crash --readnow" options accomplish.
+    (anderson@redhat.com)
+
+diff --git a/defs.h b/defs.h
+index 967fce0..18f36b3 100644
+--- a/defs.h
++++ b/defs.h
+@@ -2688,6 +2688,7 @@ struct load_module {
+ 	struct syment *mod_init_symend;
+ 	ulong mod_percpu;
+ 	ulong mod_percpu_size;
++	struct objfile *loaded_objfile;
+ };
+ 
+ #define IN_MODULE(A,L) \
+@@ -4479,6 +4480,7 @@ struct gnu_request {
+     		struct symbol *sym;
+     		struct objfile *obj;
+   	} global_iterator;
++	struct load_module *lm;
+ };
+ 
+ /*
+diff --git a/gdb-7.6.patch b/gdb-7.6.patch
+index 094f01a..6aeffda 100644
+--- a/gdb-7.6.patch
++++ b/gdb-7.6.patch
+@@ -2323,3 +2323,72 @@ diff -up gdb-7.6/opcodes/configure.orig gdb-7.6/opcodes/configure
+      NO_WERROR="-Wno-error"
+  fi
+  
++--- gdb-7.6/gdb/symtab.c.orig
+++++ gdb-7.6/gdb/symtab.c
++@@ -5266,6 +5266,7 @@ gdb_get_line_number(struct gnu_request *
++ {
++         struct symtab_and_line sal;
++ 	struct symbol *sym;
+++	struct objfile *objfile;
++         CORE_ADDR pc;
++ 
++ #define LASTCHAR(s)      (s[strlen(s)-1])
++@@ -5281,8 +5282,22 @@ gdb_get_line_number(struct gnu_request *
++         sal = find_pc_line(pc, 0);
++ 
++ 	if (!sal.symtab) {
++-		req->buf[0] = '\0';
++-		return;
+++		/*
+++		 *  If a module address line number can't be found, it's typically
+++		 *  due to its addrmap still containing offset values because its 
+++		 *  objfile doesn't have full symbols loaded.
+++		 */
+++		if (req->lm) {
+++			objfile = req->lm->loaded_objfile;
+++			if (!objfile_has_full_symbols(objfile) && objfile->sf) { 
+++				objfile->sf->qf->expand_all_symtabs(objfile);
+++				sal = find_pc_line(pc, 0);
+++			}
+++		}
+++		if (!sal.symtab) {
+++			req->buf[0] = '\0';
+++			return;
+++		}
++ 	}
++ 
++         if (sal.symtab->filename && sal.symtab->dirname) {
++@@ -5557,7 +5572,6 @@ struct load_module *gdb_current_load_mod
++ static void 
++ gdb_add_symbol_file(struct gnu_request *req)
++ {
++-	register struct objfile *loaded_objfile = NULL;
++ 	register struct objfile *objfile;
++ 	register struct minimal_symbol *m;
++ 	struct load_module *lm;
++@@ -5576,6 +5590,7 @@ gdb_add_symbol_file(struct gnu_request *
++ 
++ 	req->name = lm->mod_namelist;
++ 	gdb_delete_symbol_file(req);
+++	lm->loaded_objfile = NULL;
++ 
++ 	if ((lm->mod_flags & MOD_NOPATCH) == 0) {
++ 	        for (i = 0 ; i < lm->mod_sections; i++) {
++@@ -5623,12 +5638,15 @@ gdb_add_symbol_file(struct gnu_request *
++ 
++         ALL_OBJFILES(objfile) {
++ 		if (same_file(objfile->name, lm->mod_namelist)) {
++-                        loaded_objfile = objfile;
+++			if (objfile->separate_debug_objfile)
+++				lm->loaded_objfile = objfile->separate_debug_objfile;
+++			else
+++				lm->loaded_objfile = objfile;
++ 			break;
++ 		}
++         }
++ 
++-	if (!loaded_objfile)
+++	if (!lm->loaded_objfile)
++                 req->flags |= GNU_COMMAND_FAILED;
++ }
++ 
+diff --git a/symbols.c b/symbols.c
+index 8a4c878..0d85ff7 100644
+--- a/symbols.c
++++ b/symbols.c
+@@ -3284,6 +3284,8 @@ dump_symbol_table(void)
+ 				lm->mod_section_data[s].size);
+ 		}
+ 
++		fprintf(fp, "        loaded_objfile: %lx\n", (ulong)lm->loaded_objfile);
++
+ 		if (CRASHDEBUG(1)) {
+         		for (sp = lm->mod_load_symtable; 
+ 			     sp < lm->mod_load_symend; sp++) {
+@@ -4100,6 +4102,7 @@ get_line_number(ulong addr, char *buf, int reserved)
+ 	struct load_module *lm;
+ 
+ 	buf[0] = NULLCHAR;
++	lm = NULL;
+ 
+ 	if (NO_LINE_NUMBERS() || !is_kernel_text(addr))
+ 		return(buf);
+@@ -4129,6 +4132,8 @@ get_line_number(ulong addr, char *buf, int reserved)
+ 		req->command = GNU_GET_LINE_NUMBER;
+ 		req->addr = addr;
+ 		req->buf = buf;
++		if (lm && lm->loaded_objfile)
++			req->lm = lm;
+ 		if ((sp = value_search(addr, NULL)))
+ 			req->name = sp->name;
+ 		gdb_interface(req);
+@@ -12025,6 +12030,7 @@ delete_load_module(ulong base_addr)
+ 			if (lm->mod_section_data)
+ 				free(lm->mod_section_data);
+ 			lm->mod_section_data = (struct mod_section_data *)0;
++			lm->loaded_objfile = NULL;
+ 		}
+ 		st->flags &= ~LOAD_MODULE_SYMS;
+ 		return;
+@@ -12061,6 +12067,7 @@ delete_load_module(ulong base_addr)
+ 			if (lm->mod_section_data)
+ 				free(lm->mod_section_data);
+ 			lm->mod_section_data = (struct mod_section_data *)0;
++			lm->loaded_objfile = NULL;
+                 } else if (lm->mod_flags & MOD_LOAD_SYMS)
+ 			st->flags |= LOAD_MODULE_SYMS;
+         }
+
+commit c8178eca9c74f81a7f803a58d339635cc152e8d9
+Author: Dave Anderson <anderson@redhat.com>
+Date:   Thu Nov 9 11:39:05 2017 -0500
+
+    Update for support of Linux 4.14 and later PPC64 kernels where the
+    hash page table geometry accomodates a larger virtual address range.
+    Without the patch, the virtual-to-physical translation of user space
+    virtual addresses by "vm -p", "vtop", and "rd -u" may generate an
+    invalid translation or otherwise fail.
+    (hbathini@linux.vnet.ibm.com)
+
+diff --git a/defs.h b/defs.h
+index 18f36b3..9132075 100644
+--- a/defs.h
++++ b/defs.h
+@@ -3915,6 +3915,9 @@ struct efi_memory_desc_t {
+ #define PGD_INDEX_SIZE_L4_64K_3_10  12
+ #define PMD_INDEX_SIZE_L4_64K_4_6  5
+ #define PUD_INDEX_SIZE_L4_64K_4_6  5
++#define PMD_INDEX_SIZE_L4_64K_4_12 10
++#define PUD_INDEX_SIZE_L4_64K_4_12 7
++#define PGD_INDEX_SIZE_L4_64K_4_12 8
+ #define PTE_INDEX_SIZE_RADIX_64K  5
+ #define PMD_INDEX_SIZE_RADIX_64K  9
+ #define PUD_INDEX_SIZE_RADIX_64K  9
+diff --git a/ppc64.c b/ppc64.c
+index 84cec09..672ee60 100644
+--- a/ppc64.c
++++ b/ppc64.c
+@@ -447,10 +447,16 @@ ppc64_init(int when)
+ 				} else if (!(machdep->flags & BOOK3E) &&
+ 				    (THIS_KERNEL_VERSION >= LINUX(4,6,0))) {
+ 					m->l1_index_size = PTE_INDEX_SIZE_L4_64K_3_10;
+-					m->l2_index_size = PMD_INDEX_SIZE_L4_64K_4_6;
+-					m->l3_index_size = PUD_INDEX_SIZE_L4_64K_4_6;
+-					m->l4_index_size = PGD_INDEX_SIZE_L4_64K_3_10;
+ 
++					if (THIS_KERNEL_VERSION >= LINUX(4,12,0)) {
++						m->l2_index_size = PMD_INDEX_SIZE_L4_64K_4_12;
++						m->l3_index_size = PUD_INDEX_SIZE_L4_64K_4_12;
++						m->l4_index_size = PGD_INDEX_SIZE_L4_64K_4_12;
++					} else {
++						m->l2_index_size = PMD_INDEX_SIZE_L4_64K_4_6;
++						m->l3_index_size = PUD_INDEX_SIZE_L4_64K_4_6;
++						m->l4_index_size = PGD_INDEX_SIZE_L4_64K_3_10;
++					}
+ 				} else if (THIS_KERNEL_VERSION >= LINUX(3,10,0)) {
+ 					m->l1_index_size = PTE_INDEX_SIZE_L4_64K_3_10;
+ 					m->l2_index_size = PMD_INDEX_SIZE_L4_64K_3_10;
+
+commit 03a3e57b9ad849314e262cac37787604a9fe8362
+Author: Dave Anderson <anderson@redhat.com>
+Date:   Thu Nov 9 14:04:08 2017 -0500
+
+    Implemented a new "runq -T" option that displays the time lag of each
+    CPU relative to the most recent runqueue timestamp.
+    (oleksandr@redhat.com)
+
+diff --git a/help.c b/help.c
+index fa01bfb..e017b03 100644
+--- a/help.c
++++ b/help.c
+@@ -2532,7 +2532,7 @@ NULL
+ char *help_runq[] = {
+ "runq",
+ "run queue",
+-"[-t] [-m] [-g] [-c cpu(s)]",
++"[-t] [-T] [-m] [-g] [-c cpu(s)]",
+ "  With no argument, this command displays the tasks on the run queues",
+ "  of each cpu.",
+ " ",
+@@ -2541,6 +2541,8 @@ char *help_runq[] = {
+ "         whichever applies; following each cpu timestamp is the last_run or ",
+ "         timestamp value of the active task on that cpu, whichever applies, ",
+ "         along with the task identification.",
++"     -T  Display the time lag of each CPU relative to the most recent runqueue",
++"         timestamp.",
+ "     -m  Display the amount of time that the active task on each cpu has been",
+ "         running, expressed in a format consisting of days, hours, minutes, ",
+ "         seconds and milliseconds.",
+diff --git a/task.c b/task.c
+index f2628b7..724532d 100644
+--- a/task.c
++++ b/task.c
+@@ -55,6 +55,7 @@ static long rq_idx(int);
+ static long cpu_idx(int);
+ static void dump_runq(void);
+ static void dump_on_rq_timestamp(void);
++static void dump_on_rq_lag(void);
+ static void dump_on_rq_milliseconds(void);
+ static void dump_runqueues(void);
+ static void dump_prio_array(int, ulong, char *);
+@@ -8045,10 +8046,11 @@ cmd_runq(void)
+ 	ulong *cpus = NULL;
+ 	int sched_debug = 0;
+ 	int dump_timestamp_flag = 0;
++	int dump_lag_flag = 0;
+ 	int dump_task_group_flag = 0;
+ 	int dump_milliseconds_flag = 0;
+ 
+-        while ((c = getopt(argcnt, args, "dtgmc:")) != EOF) {
++        while ((c = getopt(argcnt, args, "dtTgmc:")) != EOF) {
+                 switch(c)
+                 {
+ 		case 'd':
+@@ -8057,6 +8059,9 @@ cmd_runq(void)
+ 		case 't':
+ 			dump_timestamp_flag = 1;
+ 			break;
++		case 'T':
++			dump_lag_flag = 1;
++			break;
+ 		case 'm':
+ 			dump_milliseconds_flag = 1;
+ 			break;
+@@ -8092,6 +8097,8 @@ cmd_runq(void)
+ 
+ 	if (dump_timestamp_flag)
+                 dump_on_rq_timestamp();
++	else if (dump_lag_flag)
++		dump_on_rq_lag();
+ 	else if (dump_milliseconds_flag)
+                 dump_on_rq_milliseconds();
+ 	else if (sched_debug)
+@@ -8177,6 +8184,90 @@ dump_on_rq_timestamp(void)
+ }
+ 
+ /*
++ * Runqueue timestamp struct for dump_on_rq_lag().
++ */
++struct runq_ts_info {
++	int cpu;
++	ulonglong ts;
++};
++
++/*
++ * Comparison function for dump_on_rq_lag().
++ * Sorts runqueue timestamps in a descending order.
++ */
++static int
++compare_runq_ts(const void *p1, const void *p2)
++{
++	const struct runq_ts_info *ts1 = p1;
++	const struct runq_ts_info *ts2 = p2;
++
++	if (ts1->ts > ts2->ts)
++		return -1;
++
++	if (ts1->ts < ts2->ts)
++		return 1;
++
++	return 0;
++}
++
++/*
++ * Calculates integer log10
++ */
++static ulong
++__log10ul(ulong x)
++{
++	ulong ret = 1;
++
++	while (x > 9) {
++		ret++;
++		x /= 10;
++	}
++
++	return ret;
++}
++
++/*
++ * Displays relative CPU lag.
++ */
++static void
++dump_on_rq_lag(void)
++{
++	struct syment *rq_sp;
++	int cpu;
++	ulong runq;
++	ulonglong timestamp;
++	struct runq_ts_info runq_ts[kt->cpus];
++
++	if (!(rq_sp = per_cpu_symbol_search("per_cpu__runqueues")))
++		error(FATAL, "per-cpu runqueues do not exist\n");
++	if (INVALID_MEMBER(rq_timestamp))
++		option_not_supported('T');
++
++	for (cpu = 0; cpu < kt->cpus; cpu++) {
++		if ((kt->flags & SMP) && (kt->flags &PER_CPU_OFF))
++			runq = rq_sp->value + kt->__per_cpu_offset[cpu];
++		else
++			runq = rq_sp->value;
++
++		readmem(runq + OFFSET(rq_timestamp), KVADDR, &timestamp,
++				sizeof(ulonglong), "per-cpu rq timestamp",
++				FAULT_ON_ERROR);
++
++		runq_ts[cpu].cpu = cpu;
++		runq_ts[cpu].ts = timestamp;
++	}
++
++	qsort(runq_ts, (size_t)kt->cpus, sizeof(struct runq_ts_info), compare_runq_ts);
++
++	for (cpu = 0; cpu < kt->cpus; cpu++) {
++		fprintf(fp, "%sCPU %d: %.2lf secs\n",
++			space(2 + __log10ul(kt->cpus) - __log10ul(runq_ts[cpu].cpu)),
++			runq_ts[cpu].cpu,
++			((double)runq_ts[0].ts - (double)runq_ts[cpu].ts) / 1000000000.0);
++	}
++}
++
++/*
+  *  Displays the runqueue and active task timestamps of each cpu.
+  */
+ static void
diff --git a/SOURCES/next_online_node.patch b/SOURCES/next_online_node.patch
deleted file mode 100644
index 638b4bd..0000000
--- a/SOURCES/next_online_node.patch
+++ /dev/null
@@ -1,14 +0,0 @@
---- crash-7.1.9/memory.c.orig
-+++ crash-7.1.9/memory.c
-@@ -17198,10 +17198,8 @@ next_online_node(int first)
- 	int i, j, node;
- 	ulong mask, *maskptr;
- 
--	if ((first/BITS_PER_LONG) >= vt->node_online_map_len) {
--		error(INFO, "next_online_node: %d is too large!\n", first);
-+	if ((first/BITS_PER_LONG) >= vt->node_online_map_len)
- 		return -1;
--	}
- 
- 	maskptr = (ulong *)vt->node_online_map;
- 	for (i = node = 0; i <  vt->node_online_map_len; i++, maskptr++) {
diff --git a/SOURCES/ppc64le_vmalloc.patch b/SOURCES/ppc64le_vmalloc.patch
deleted file mode 100644
index 01f4ea0..0000000
--- a/SOURCES/ppc64le_vmalloc.patch
+++ /dev/null
@@ -1,14 +0,0 @@
---- crash-7.1.9/defs.h.orig
-+++ crash-7.1.9/defs.h
-@@ -3884,8 +3884,9 @@ struct efi_memory_desc_t {
- #define PMD_MASKED_BITS_64K_4_6  0xc0000000000000ffUL
- 
- #define PTE_RPN_MASK_DEFAULT  0xffffffffffffffffUL
--#define PTE_RPN_SIZE_L4_4_6   (PAGESIZE() == PPC64_64K_PAGE_SIZE ? 41 : 45)
--#define PTE_RPN_MASK_L4_4_6   (((1UL << PTE_RPN_SIZE_L4_4_6) - 1) << PAGESHIFT())
-+#define PAGE_PA_MAX_L4_4_6    (THIS_KERNEL_VERSION >= LINUX(4,11,0) ? 53 : 57)
-+#define PTE_RPN_MASK_L4_4_6   \
-+	(((1UL << PAGE_PA_MAX_L4_4_6) - 1) & ~((1UL << PAGESHIFT()) - 1))
- #define PTE_RPN_SHIFT_L4_4_6  PAGESHIFT()
- 
- #define PGD_MASKED_BITS_4_7  0xc0000000000000ffUL
diff --git a/SPECS/crash.spec b/SPECS/crash.spec
index 9b1945a..8c5171f 100644
--- a/SPECS/crash.spec
+++ b/SPECS/crash.spec
@@ -3,8 +3,8 @@
 #
 Summary: Kernel analysis utility for live systems, netdump, diskdump, kdump, LKCD or mcore dumpfiles
 Name: crash
-Version: 7.1.9
-Release: 2.p2%{?dist}
+Version: 7.2.0
+Release: 6%{?dist}
 License: GPLv3
 Group: Development/Debuggers
 Source: http://people.redhat.com/anderson/crash-%{version}.tar.gz
@@ -15,9 +15,13 @@ Buildroot: %{_tmppath}/%{name}-%{version}-%{release}-buildroot-%(%{__id_u} -n)
 BuildRequires: ncurses-devel zlib-devel lzo-devel bison snappy-devel
 Requires: binutils
 Patch0: lzo_snappy.patch
-Patch1: github_87179026_to_ad3b8476.patch
-Patch2: ppc64le_vmalloc.patch
-Patch3: next_online_node.patch
+Patch1: github_da9bd35a_to_e2efacdd.patch
+Patch2: github_f852f5ce_to_03a3e57b.patch
+Patch3: github_494a796e_to_63419fb9.patch
+Patch4: github_d833432f_kpti_trampoline.patch
+Patch5: github_1e488cfe_to_1160ba19.patch
+Patch6: github_a38e3ec4_machine_kexec.patch
+Patch7: github_ddace972_exception_frame.patch
 
 %description
 The core analysis suite is a self-contained tool that can be used to
@@ -40,8 +44,12 @@ offered by Mission Critical Linux, or the LKCD kernel patch.
 %setup -n %{name}-%{version} -q
 %patch0 -p1 -b lzo_snappy.patch
 %patch1 -p1 -b github_87179026_to_ad3b8476.patch
-%patch2 -p1 -b ppc64le_vmalloc.patch
-%patch3 -p1 -b next_online_node.patch
+%patch2 -p1 -b github_f852f5ce_to_03a3e57b.patch
+%patch3 -p1 -b github_494a796e_to_63419fb9.patch
+%patch4 -p1 -b github_d833432f_kpti_trampoline.patch
+%patch5 -p1 -b github_1e488cfe_to_1160ba19.patch
+%patch6 -p1 -b github_a38e3ec4_machine_kexec.patch
+%patch7 -p1 -b github_ddace972_exception_frame.patch
 
 %build
 make RPMPKG="%{version}-%{release}" CFLAGS="%{optflags}"
@@ -70,13 +78,37 @@ rm -rf %{buildroot}
 %{_includedir}/*
 
 %changelog
-* Thu Oct 12 2017 Dave Anderson <anderson@redhat.com> - 7.1.9-2.p2
-- Remove harmless/useless message from next_online_node().
-  Resolves: rhbz#1442738
-
-* Fri Aug 25 2017 Dave Anderson <anderson@redhat.com> - 7.1.9-2.p1
-- Fix ppc64le vmalloc address translation
-  Resolves: rhbz#1483934
+* Mon Feb 12 2018 Dave Anderson <anderson@redhat.com> - 7.2.0-6
+- Fix arm64 backtrace issues seen in Linux 4.14
+  Resolves: rhbz#1542312
+
+* Fri Jan 26 2018 Dave Anderson <anderson@redhat.com> - 7.2.0-5
+- Additional support for analyzing an SADUMP dumpfile if KASLR
+  and KPTI are both enabled
+  Resolves: rhbz#1504467
+
+* Mon Jan 22 2018 Dave Anderson <anderson@redhat.com> - 7.2.0-4
+- Add support for KPTI entry trampoline stack
+  Resolves: rhbz#1534308
+ 
+* Thu Jan 11 2018 Dave Anderson <anderson@redhat.com> - 7.2.0-3
+- Rebase to github commits 494a796e to 63419fb9
+  Resolves: rhbz#1497316
+- Fix IRQ stack transition failure due to kernel's removal of 64-byte gap
+  Resolves: rhbz#1530887
+
+* Tue Nov 21 2017 Dave Anderson <anderson@redhat.com> - 7.2.0-2
+- Rebase to github commits f852f5ce to 03a3e57b
+  Resolves: rhbz#1497316
+
+* Wed Nov  1 2017 Dave Anderson <anderson@redhat.com> - 7.2.0-1
+- Rebase to upstream version 7.2.0
+- Rebase to github commits da9bd35a to e2efacdd
+  Resolves: rhbz#1497316
+- ppc64le: fix for "WARNING: cannot access vmalloc'd module memory"
+  Resolves: rhbz#1485391
+- Support for analyzing an SADUMP crash dump if KASLR is enabled
+  Resolves: rhbz#1504467
 
 * Wed May  3 2017 Dave Anderson <anderson@redhat.com> - 7.1.9-2
 - Rebase to github commits 87179026 to ad3b8476