e293be
diff -up ./arch/x86/include/asm/processor.h.gsi ./arch/x86/include/asm/processor.h
e293be
--- ./arch/x86/include/asm/processor.h.gsi	2018-03-22 06:40:12.000000000 +0900
e293be
+++ ./arch/x86/include/asm/processor.h	2018-04-16 21:12:06.000000000 +0900
e293be
@@ -527,6 +527,15 @@ struct stack_canary {
e293be
 };
e293be
 DECLARE_PER_CPU_ALIGNED(struct stack_canary, stack_canary);
e293be
 #endif
e293be
+/*
e293be
+ * per-CPU IRQ handling stacks
e293be
+ */
e293be
+struct irq_stack {
e293be
+	u32                     stack[THREAD_SIZE/sizeof(u32)];
e293be
+} __aligned(THREAD_SIZE);
e293be
+
e293be
+DECLARE_PER_CPU(struct irq_stack *, hardirq_stack);
e293be
+DECLARE_PER_CPU(struct irq_stack *, softirq_stack);
e293be
 #endif	/* X86_64 */
e293be
 
e293be
 extern unsigned int xstate_size;
e293be
diff -up ./arch/x86/include/asm/thread_info.h.gsi ./arch/x86/include/asm/thread_info.h
e293be
--- ./arch/x86/include/asm/thread_info.h.gsi	2018-03-22 06:40:12.000000000 +0900
e293be
+++ ./arch/x86/include/asm/thread_info.h	2018-04-16 23:01:00.000000000 +0900
e293be
@@ -9,6 +9,7 @@
e293be
 
e293be
 #include <linux/compiler.h>
e293be
 #include <asm/page.h>
e293be
+#include <asm/percpu.h>
e293be
 #include <asm/types.h>
e293be
 
e293be
 /*
e293be
@@ -34,9 +35,6 @@ struct thread_info {
e293be
 	struct restart_block    restart_block;
e293be
 	void __user		*sysenter_return;
e293be
 #ifdef CONFIG_X86_32
e293be
-	unsigned long           previous_esp;   /* ESP of the previous stack in
e293be
-						   case of nested (IRQ) stacks
e293be
-						*/
e293be
 	__u8			supervisor_stack[0];
e293be
 #endif
e293be
 	unsigned int		sig_on_uaccess_error:1;
e293be
@@ -159,9 +157,9 @@ struct thread_info {
e293be
 
e293be
 #define PREEMPT_ACTIVE		0x10000000
e293be
 
e293be
-#ifdef CONFIG_X86_32
e293be
+#define STACK_WARN		(THREAD_SIZE/8)
e293be
+#define KERNEL_STACK_OFFSET	(5*(BITS_PER_LONG/8))
e293be
 
e293be
-#define STACK_WARN	(THREAD_SIZE/8)
e293be
 /*
e293be
  * macros/functions for gaining access to the thread information structure
e293be
  *
e293be
@@ -173,30 +171,9 @@ struct thread_info {
e293be
 /* how to get the current stack pointer from C */
e293be
 register unsigned long current_stack_pointer asm("esp") __used;
e293be
 
e293be
-/* how to get the thread information struct from C */
e293be
-static inline struct thread_info *current_thread_info(void)
e293be
-{
e293be
-	return (struct thread_info *)
e293be
-		(current_stack_pointer & ~(THREAD_SIZE - 1));
e293be
-}
e293be
-
e293be
-#else /* !__ASSEMBLY__ */
e293be
-
e293be
-/* how to get the thread information struct from ASM */
e293be
-#define GET_THREAD_INFO(reg)	 \
e293be
-	movl $-THREAD_SIZE, reg; \
e293be
-	andl %esp, reg
e293be
-
e293be
-/* use this one if reg already contains %esp */
e293be
-#define GET_THREAD_INFO_WITH_ESP(reg) \
e293be
-	andl $-THREAD_SIZE, reg
e293be
-
e293be
 #endif
e293be
 
e293be
-#else /* X86_32 */
e293be
-
e293be
 #include <asm/percpu.h>
e293be
-#define KERNEL_STACK_OFFSET (5*8)
e293be
 
e293be
 /*
e293be
  * macros/functions for gaining access to the thread information structure
e293be
@@ -220,8 +197,8 @@ static inline struct thread_info *curren
e293be
 
e293be
 /* how to get the thread information struct from ASM */
e293be
 #define GET_THREAD_INFO(reg) \
e293be
-	movq PER_CPU_VAR(kernel_stack),reg ; \
e293be
-	subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
e293be
+	_ASM_MOV PER_CPU_VAR(kernel_stack),reg ; \
e293be
+	_ASM_SUB $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg ;
e293be
 
e293be
 /*
e293be
  * Same if PER_CPU_VAR(kernel_stack) is, perhaps with some offset, already in
e293be
@@ -231,8 +208,6 @@ static inline struct thread_info *curren
e293be
 
e293be
 #endif
e293be
 
e293be
-#endif /* !X86_32 */
e293be
-
e293be
 /*
e293be
  * Thread-synchronous status.
e293be
  *
e293be
diff -up ./arch/x86/kernel/cpu/common.c.gsi ./arch/x86/kernel/cpu/common.c
e293be
--- ./arch/x86/kernel/cpu/common.c.gsi	2018-04-15 20:28:45.000000000 +0900
e293be
+++ ./arch/x86/kernel/cpu/common.c	2018-04-16 23:48:12.000000000 +0900
e293be
@@ -1286,6 +1286,13 @@ static __init int setup_clearcpuid(char
e293be
 }
e293be
 __setup("clearcpuid=", setup_clearcpuid);
e293be
 
e293be
+DEFINE_PER_CPU(unsigned long, kernel_stack) =
e293be
+	(unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
e293be
+DEFINE_PER_CPU(unsigned long, __kernel_stack_70__) =
e293be
+	(unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE - 8192;
e293be
+EXPORT_PER_CPU_SYMBOL(kernel_stack);
e293be
+EXPORT_PER_CPU_SYMBOL(__kernel_stack_70__);
e293be
+
e293be
 #ifdef CONFIG_X86_64
e293be
 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
e293be
 struct desc_ptr debug_idt_descr = { NR_VECTORS * 16 - 1,
e293be
@@ -1302,12 +1309,6 @@ DEFINE_PER_CPU(struct task_struct *, cur
e293be
 	&init_task;
e293be
 EXPORT_PER_CPU_SYMBOL(current_task);
e293be
 
e293be
-DEFINE_PER_CPU(unsigned long, kernel_stack) =
e293be
-	(unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
e293be
-DEFINE_PER_CPU(unsigned long, __kernel_stack_70__) =
e293be
-	(unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE - 8192;
e293be
-EXPORT_PER_CPU_SYMBOL(kernel_stack);
e293be
-EXPORT_PER_CPU_SYMBOL(__kernel_stack_70__);
e293be
 
e293be
 DEFINE_PER_CPU(char *, irq_stack_ptr) =
e293be
 	init_per_cpu_var(irq_stack_union.irq_stack) + IRQ_STACK_SIZE;
e293be
diff -up ./arch/x86/kernel/dumpstack_32.c.gsi ./arch/x86/kernel/dumpstack_32.c
e293be
--- ./arch/x86/kernel/dumpstack_32.c.gsi	2018-04-16 21:12:06.000000000 +0900
e293be
+++ ./arch/x86/kernel/dumpstack_32.c	2018-04-17 19:43:45.000000000 +0900
e293be
@@ -17,6 +17,95 @@
e293be
 #include <asm/stacktrace.h>
e293be
 
e293be
 
e293be
+void stack_type_str(enum stack_type type, const char **begin, const char **end)
e293be
+{
e293be
+	switch (type) {
e293be
+	case STACK_TYPE_IRQ:
e293be
+		*begin = "IRQ";
e293be
+		*end   = "EOI";
e293be
+		break;
e293be
+	case STACK_TYPE_SOFTIRQ:
e293be
+		*begin = "SOFTIRQ";
e293be
+		*end   = "EOI";
e293be
+		break;
e293be
+#if 0 /*{*/
e293be
+	case STACK_TYPE_ENTRY:
e293be
+		*begin = "ENTRY_TRAMPOLINE";
e293be
+		*end   = "EOE";
e293be
+#endif /*}*/
e293be
+	default:
e293be
+		*begin = NULL;
e293be
+		*end   = NULL;
e293be
+	}
e293be
+}
e293be
+
e293be
+static bool in_hardirq_stack(unsigned long *stack, struct stack_info *info)
e293be
+{
e293be
+	unsigned long *begin = (unsigned long *)this_cpu_read(hardirq_stack);
e293be
+	unsigned long *end   = begin + (THREAD_SIZE / sizeof(long));
e293be
+
e293be
+	if (stack < begin || stack >= end)
e293be
+		return false;
e293be
+
e293be
+	info->type	= STACK_TYPE_IRQ;
e293be
+	info->begin	= begin;
e293be
+	info->end	= end;
e293be
+
e293be
+	/*
e293be
+	 * See irq_32.c -- the next stack pointer is stored at the beginning of
e293be
+	 * the stack.
e293be
+	 */
e293be
+	info->next_sp	= (unsigned long *)*begin;
e293be
+
e293be
+	return true;
e293be
+}
e293be
+
e293be
+static bool in_softirq_stack(unsigned long *stack, struct stack_info *info)
e293be
+{
e293be
+	unsigned long *begin = (unsigned long *)this_cpu_read(softirq_stack);
e293be
+	unsigned long *end   = begin + (THREAD_SIZE / sizeof(long));
e293be
+
e293be
+	if (stack < begin || stack >= end)
e293be
+		return false;
e293be
+
e293be
+	info->type	= STACK_TYPE_SOFTIRQ;
e293be
+	info->begin	= begin;
e293be
+	info->end	= end;
e293be
+
e293be
+	/*
e293be
+	 * The next stack pointer is stored at the beginning of the stack.
e293be
+	 * See irq_32.c.
e293be
+	 */
e293be
+	info->next_sp	= (unsigned long *)*begin;
e293be
+
e293be
+	return true;
e293be
+}
e293be
+
e293be
+int get_stack_info(unsigned long *stack, struct task_struct *task,
e293be
+		   struct stack_info *info, unsigned long *visit_mask)
e293be
+{
e293be
+	if (!stack)
e293be
+		goto unknown;
e293be
+
e293be
+	task = task ? : current;
e293be
+
e293be
+	if (in_task_stack(stack, task, info))
e293be
+		return 0;
e293be
+
e293be
+	if (task != current)
e293be
+		goto unknown;
e293be
+
e293be
+	if (in_hardirq_stack(stack, info))
e293be
+		return 0;
e293be
+
e293be
+	if (in_softirq_stack(stack, info))
e293be
+		return 0;
e293be
+
e293be
+unknown:
e293be
+	info->type = STACK_TYPE_UNKNOWN;
e293be
+	return -EINVAL;
e293be
+}
e293be
+
e293be
 void show_regs(struct pt_regs *regs)
e293be
 {
e293be
 	int i;
e293be
diff -up ./arch/x86/kernel/irq_32.c.gsi ./arch/x86/kernel/irq_32.c
e293be
--- ./arch/x86/kernel/irq_32.c.gsi	2018-04-15 20:28:44.000000000 +0900
e293be
+++ ./arch/x86/kernel/irq_32.c	2018-04-16 23:09:01.000000000 +0900
e293be
@@ -58,16 +58,8 @@ static inline int check_stack_overflow(v
e293be
 static inline void print_stack_overflow(void) { }
e293be
 #endif
e293be
 
e293be
-/*
e293be
- * per-CPU IRQ handling contexts (thread information and stack)
e293be
- */
e293be
-union irq_ctx {
e293be
-	struct thread_info      tinfo;
e293be
-	u32                     stack[THREAD_SIZE/sizeof(u32)];
e293be
-} __attribute__((aligned(THREAD_SIZE)));
e293be
-
e293be
-static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
e293be
-static DEFINE_PER_CPU(union irq_ctx *, softirq_ctx);
e293be
+DEFINE_PER_CPU(struct irq_stack *, hardirq_stack);
e293be
+DEFINE_PER_CPU(struct irq_stack *, softirq_stack);
e293be
 
e293be
 static void call_on_stack(void *func, void *stack)
e293be
 {
e293be
@@ -80,14 +72,22 @@ static void call_on_stack(void *func, vo
e293be
 		     : "memory", "cc", "edx", "ecx", "eax");
e293be
 }
e293be
 
e293be
+/* how to get the current stack pointer from C */
e293be
+register unsigned long current_stack_pointer asm("esp") __used;
e293be
+
e293be
+static inline void *current_stack(void)
e293be
+{
e293be
+	return (void *)(current_stack_pointer & ~(THREAD_SIZE - 1));
e293be
+}
e293be
+
e293be
 static inline int
e293be
 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
e293be
 {
e293be
-	union irq_ctx *curctx, *irqctx;
e293be
-	u32 *isp, arg1, arg2;
e293be
+	struct irq_stack *curstk, *irqstk;
e293be
+ 	u32 *isp, *prev_esp, arg1, arg2;
e293be
 
e293be
-	curctx = (union irq_ctx *) current_thread_info();
e293be
-	irqctx = __this_cpu_read(hardirq_ctx);
e293be
+	curstk = (struct irq_stack *) current_stack();
e293be
+	irqstk = __this_cpu_read(hardirq_stack);
e293be
 
e293be
 	/*
e293be
 	 * this is where we switch to the IRQ stack. However, if we are
e293be
@@ -95,16 +95,14 @@ execute_on_irq_stack(int overflow, struc
e293be
 	 * handler) we can't do that and just have to keep using the
e293be
 	 * current stack (which is the irq stack already after all)
e293be
 	 */
e293be
-	if (unlikely(curctx == irqctx))
e293be
+	if (unlikely(curstk == irqstk))
e293be
 		return 0;
e293be
 
e293be
-	/* build the stack frame on the IRQ stack */
e293be
-	isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
e293be
-	irqctx->tinfo.task = curctx->tinfo.task;
e293be
-	irqctx->tinfo.previous_esp = current_stack_pointer;
e293be
+	isp = (u32 *) ((char *)irqstk + sizeof(*irqstk));
e293be
 
e293be
-	/* Copy the preempt_count so that the [soft]irq checks work. */
e293be
-	irqctx->tinfo.preempt_count = curctx->tinfo.preempt_count;
e293be
+	/* Save the next esp at the bottom of the stack */
e293be
+	prev_esp = (u32 *)irqstk;
e293be
+ 	*prev_esp = current_stack_pointer;
e293be
 
e293be
 	if (unlikely(overflow))
e293be
 		call_on_stack(print_stack_overflow, isp);
e293be
@@ -124,40 +122,31 @@ execute_on_irq_stack(int overflow, struc
e293be
  */
e293be
 void irq_ctx_init(int cpu)
e293be
 {
e293be
-	union irq_ctx *irqctx;
e293be
+	struct irq_stack *irqstk;
e293be
 
e293be
-	if (per_cpu(hardirq_ctx, cpu))
e293be
+	if (per_cpu(hardirq_stack, cpu))
e293be
 		return;
e293be
 
e293be
-	irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
e293be
+	irqstk = page_address(alloc_pages_node(cpu_to_node(cpu),
e293be
 					       THREADINFO_GFP,
e293be
 					       THREAD_SIZE_ORDER));
e293be
-	memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
e293be
-	irqctx->tinfo.cpu		= cpu;
e293be
-	irqctx->tinfo.preempt_count	= HARDIRQ_OFFSET;
e293be
-	irqctx->tinfo.addr_limit	= MAKE_MM_SEG(0);
e293be
-
e293be
-	per_cpu(hardirq_ctx, cpu) = irqctx;
e293be
+	per_cpu(hardirq_stack, cpu) = irqstk;
e293be
 
e293be
-	irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
e293be
+	irqstk = page_address(alloc_pages_node(cpu_to_node(cpu),
e293be
 					       THREADINFO_GFP,
e293be
 					       THREAD_SIZE_ORDER));
e293be
-	memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
e293be
-	irqctx->tinfo.cpu		= cpu;
e293be
-	irqctx->tinfo.addr_limit	= MAKE_MM_SEG(0);
e293be
-
e293be
-	per_cpu(softirq_ctx, cpu) = irqctx;
e293be
+	per_cpu(softirq_stack, cpu) = irqstk;
e293be
 
e293be
 	printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
e293be
-	       cpu, per_cpu(hardirq_ctx, cpu),  per_cpu(softirq_ctx, cpu));
e293be
+	       cpu, per_cpu(hardirq_stack, cpu),  per_cpu(softirq_stack, cpu));
e293be
 }
e293be
 
e293be
 asmlinkage void do_softirq(void)
e293be
 {
e293be
 	unsigned long flags;
e293be
-	struct thread_info *curctx;
e293be
-	union irq_ctx *irqctx;
e293be
-	u32 *isp;
e293be
+	struct thread_info *curstk;
e293be
+	struct irq_stack *irqstk;
e293be
+ 	u32 *isp, *prev_esp;
e293be
 
e293be
 	if (in_interrupt())
e293be
 		return;
e293be
@@ -165,13 +154,14 @@ asmlinkage void do_softirq(void)
e293be
 	local_irq_save(flags);
e293be
 
e293be
 	if (local_softirq_pending()) {
e293be
-		curctx = current_thread_info();
e293be
-		irqctx = __this_cpu_read(softirq_ctx);
e293be
-		irqctx->tinfo.task = curctx->task;
e293be
-		irqctx->tinfo.previous_esp = current_stack_pointer;
e293be
+		curstk = current_stack();
e293be
+		irqstk = __this_cpu_read(softirq_stack);
e293be
 
e293be
 		/* build the stack frame on the softirq stack */
e293be
-		isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
e293be
+		isp = (u32 *) ((char *)irqstk + sizeof(*irqstk));
e293be
+
e293be
+		prev_esp = (u32 *)irqstk;
e293be
+		*prev_esp = current_stack_pointer;
e293be
 
e293be
 		call_on_stack(__do_softirq, isp);
e293be
 		/*
e293be
diff -up ./arch/x86/kernel/process_32.c.gsi ./arch/x86/kernel/process_32.c
e293be
--- ./arch/x86/kernel/process_32.c.gsi	2018-03-22 06:40:12.000000000 +0900
e293be
+++ ./arch/x86/kernel/process_32.c	2018-04-16 21:12:06.000000000 +0900
e293be
@@ -307,6 +307,10 @@ __switch_to(struct task_struct *prev_p,
e293be
 	 */
e293be
 	arch_end_context_switch(next_p);
e293be
 
e293be
+	this_cpu_write(kernel_stack,
e293be
+		  (unsigned long)task_stack_page(next_p) +
e293be
+		  THREAD_SIZE - KERNEL_STACK_OFFSET);
e293be
+
e293be
 	/*
e293be
 	 * Restore %gs if needed (which is common)
e293be
 	 */
e293be
diff -up ./arch/x86/kernel/ptrace.c.gsi ./arch/x86/kernel/ptrace.c
e293be
--- ./arch/x86/kernel/ptrace.c.gsi	2018-04-16 21:12:06.000000000 +0900
e293be
+++ ./arch/x86/kernel/ptrace.c	2018-04-16 23:39:44.000000000 +0900
e293be
@@ -184,14 +184,15 @@ unsigned long kernel_stack_pointer(struc
e293be
 {
e293be
 	unsigned long context = (unsigned long)regs & ~(THREAD_SIZE - 1);
e293be
 	unsigned long sp = (unsigned long)&regs->sp;
e293be
-	struct thread_info *tinfo;
e293be
+	u32 *prev_esp;
e293be
 
e293be
 	if (context == (sp & ~(THREAD_SIZE - 1)))
e293be
 		return sp;
e293be
 
e293be
-	tinfo = (struct thread_info *)context;
e293be
-	if (tinfo->previous_esp)
e293be
-		return tinfo->previous_esp;
e293be
+	prev_esp = (u32 *)(context);
e293be
+	if (*prev_esp)
e293be
+		return (unsigned long)*prev_esp;
e293be
+
e293be
 
e293be
 	return (unsigned long)regs;
e293be
 }
e293be
diff -up ./arch/x86/kernel/smpboot.c.gsi ./arch/x86/kernel/smpboot.c
e293be
--- ./arch/x86/kernel/smpboot.c.gsi	2018-04-16 21:12:06.000000000 +0900
e293be
+++ ./arch/x86/kernel/smpboot.c	2018-04-16 21:46:54.000000000 +0900
e293be
@@ -898,13 +898,13 @@ static int do_boot_cpu(int apicid, int c
e293be
 #else
e293be
 	clear_tsk_thread_flag(idle, TIF_FORK);
e293be
 	initial_gs = per_cpu_offset(cpu);
e293be
+#endif
e293be
 	per_cpu(kernel_stack, cpu) =
e293be
 		(unsigned long)task_stack_page(idle) -
e293be
 		KERNEL_STACK_OFFSET + THREAD_SIZE;
e293be
 	per_cpu(__kernel_stack_70__, cpu) =
e293be
 		(unsigned long)task_stack_page(idle) -
e293be
 		KERNEL_STACK_OFFSET + THREAD_SIZE - 8192;
e293be
-#endif
e293be
 	early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
e293be
 	initial_code = (unsigned long)start_secondary;
e293be
 	initial_stack  = idle->thread.sp;