liuyingdong / rpms / kernel

Forked from rpms/kernel 4 years ago
Clone
e293be
From 0a2d7d9b1e63dd28baf6c8e1416b64a33f89c900 Mon Sep 17 00:00:00 2001
e293be
From: Linus Torvalds <torvalds@linux-foundation.org>
e293be
Date: Tue, 23 Feb 2016 14:58:52 -0800
e293be
Subject: x86: fix SMAP in 32-bit environments
e293be
e293be
commit de9e478b9d49f3a0214310d921450cf5bb4a21e6 upstream.
e293be
e293be
In commit 11f1a4b9755f ("x86: reorganize SMAP handling in user space
e293be
accesses") I changed how the stac/clac instructions were generated
e293be
around the user space accesses, which then made it possible to do
e293be
batched accesses efficiently for user string copies etc.
e293be
e293be
However, in doing so, I completely spaced out, and didn't even think
e293be
about the 32-bit case.  And nobody really even seemed to notice, because
e293be
SMAP doesn't even exist until modern Skylake processors, and you'd have
e293be
to be crazy to run 32-bit kernels on a modern CPU.
e293be
e293be
Which brings us to Andy Lutomirski.
e293be
e293be
He actually tested the 32-bit kernel on new hardware, and noticed that
e293be
it doesn't work.  My bad.  The trivial fix is to add the required
e293be
uaccess begin/end markers around the raw accesses in <asm/uaccess_32.h>.
e293be
e293be
I feel a bit bad about this patch, just because that header file really
e293be
should be cleaned up to avoid all the duplicated code in it, and this
e293be
commit just expands on the problem.  But this just fixes the bug without
e293be
any bigger cleanup surgery.
e293be
e293be
Reported-and-tested-by: Andy Lutomirski <luto@kernel.org>
e293be
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
e293be
[bwh: Backported to 3.16: There's no 'case 8' in __copy_to_user_inatomic()]
e293be
Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
e293be
---
e293be
 arch/x86/include/asm/uaccess_32.h | 24 ++++++++++++++++++++++++
e293be
 1 file changed, 24 insertions(+)
e293be
e293be
diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
e293be
index 3c03a5de64d3..b25d109fb95a 100644
e293be
--- a/arch/x86/include/asm/uaccess_32.h
e293be
+++ b/arch/x86/include/asm/uaccess_32.h
e293be
@@ -48,16 +48,22 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
e293be
 
e293be
 		switch (n) {
e293be
 		case 1:
e293be
+			__uaccess_begin();
e293be
 			__put_user_size(*(u8 *)from, (u8 __user *)to,
e293be
 					1, ret, 1);
e293be
+			__uaccess_end();
e293be
 			return ret;
e293be
 		case 2:
e293be
+			__uaccess_begin();
e293be
 			__put_user_size(*(u16 *)from, (u16 __user *)to,
e293be
 					2, ret, 2);
e293be
+			__uaccess_end();
e293be
 			return ret;
e293be
 		case 4:
e293be
+			__uaccess_begin();
e293be
 			__put_user_size(*(u32 *)from, (u32 __user *)to,
e293be
 					4, ret, 4);
e293be
+			__uaccess_end();
e293be
 			return ret;
e293be
 		}
e293be
 	}
e293be
@@ -98,13 +104,19 @@ __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
e293be
 
e293be
 		switch (n) {
e293be
 		case 1:
e293be
+			__uaccess_begin();
e293be
 			__get_user_size(*(u8 *)to, from, 1, ret, 1);
e293be
+			__uaccess_end();
e293be
 			return ret;
e293be
 		case 2:
e293be
+			__uaccess_begin();
e293be
 			__get_user_size(*(u16 *)to, from, 2, ret, 2);
e293be
+			__uaccess_end();
e293be
 			return ret;
e293be
 		case 4:
e293be
+			__uaccess_begin();
e293be
 			__get_user_size(*(u32 *)to, from, 4, ret, 4);
e293be
+			__uaccess_end();
e293be
 			return ret;
e293be
 		}
e293be
 	}
e293be
@@ -142,13 +154,19 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
e293be
 
e293be
 		switch (n) {
e293be
 		case 1:
e293be
+			__uaccess_begin();
e293be
 			__get_user_size(*(u8 *)to, from, 1, ret, 1);
e293be
+			__uaccess_end();
e293be
 			return ret;
e293be
 		case 2:
e293be
+			__uaccess_begin();
e293be
 			__get_user_size(*(u16 *)to, from, 2, ret, 2);
e293be
+			__uaccess_end();
e293be
 			return ret;
e293be
 		case 4:
e293be
+			__uaccess_begin();
e293be
 			__get_user_size(*(u32 *)to, from, 4, ret, 4);
e293be
+			__uaccess_end();
e293be
 			return ret;
e293be
 		}
e293be
 	}
e293be
@@ -164,13 +182,19 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
e293be
 
e293be
 		switch (n) {
e293be
 		case 1:
e293be
+			__uaccess_begin();
e293be
 			__get_user_size(*(u8 *)to, from, 1, ret, 1);
e293be
+			__uaccess_end();
e293be
 			return ret;
e293be
 		case 2:
e293be
+			__uaccess_begin();
e293be
 			__get_user_size(*(u16 *)to, from, 2, ret, 2);
e293be
+			__uaccess_end();
e293be
 			return ret;
e293be
 		case 4:
e293be
+			__uaccess_begin();
e293be
 			__get_user_size(*(u32 *)to, from, 4, ret, 4);
e293be
+			__uaccess_end();
e293be
 			return ret;
e293be
 		}
e293be
 	}
e293be
-- 
e293be
cgit 1.2-0.3.lf.el7
e293be
e293be
From 98ce99aa23b43c3dc736cd0354537fca029d69cb Mon Sep 17 00:00:00 2001
e293be
From: Dan Williams <dan.j.williams@intel.com>
e293be
Date: Mon, 29 Jan 2018 17:02:49 -0800
e293be
Subject: x86/uaccess: Use __uaccess_begin_nospec() and uaccess_try_nospec
e293be
e293be
commit 304ec1b050310548db33063e567123fae8fd0301 upstream.
e293be
e293be
Quoting Linus:
e293be
e293be
    I do think that it would be a good idea to very expressly document
e293be
    the fact that it's not that the user access itself is unsafe. I do
e293be
    agree that things like "get_user()" want to be protected, but not
e293be
    because of any direct bugs or problems with get_user() and friends,
e293be
    but simply because get_user() is an excellent source of a pointer
e293be
    that is obviously controlled from a potentially attacking user
e293be
    space. So it's a prime candidate for then finding _subsequent_
e293be
    accesses that can then be used to perturb the cache.
e293be
e293be
__uaccess_begin_nospec() covers __get_user() and copy_from_iter() where the
e293be
limit check is far away from the user pointer de-reference. In those cases
e293be
a barrier_nospec() prevents speculation with a potential pointer to
e293be
privileged memory. uaccess_try_nospec covers get_user_try.
e293be
e293be
Suggested-by: Linus Torvalds <torvalds@linux-foundation.org>
e293be
Suggested-by: Andi Kleen <ak@linux.intel.com>
e293be
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
e293be
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
e293be
Cc: linux-arch@vger.kernel.org
e293be
Cc: Kees Cook <keescook@chromium.org>
e293be
Cc: kernel-hardening@lists.openwall.com
e293be
Cc: gregkh@linuxfoundation.org
e293be
Cc: Al Viro <viro@zeniv.linux.org.uk>
e293be
Cc: alan@linux.intel.com
e293be
Link: https://lkml.kernel.org/r/151727416953.33451.10508284228526170604.stgit@dwillia2-desk3.amr.corp.intel.com
e293be
[bwh: Backported to 3.16:
e293be
 - Convert several more functions to use __uaccess_begin_nospec(), that
e293be
   are just wrappers in mainline
e293be
 - There's no 'case 8' in __copy_to_user_inatomic()
e293be
 - Adjust context]
e293be
Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
e293be
---
e293be
 arch/x86/include/asm/uaccess.h    |  6 +++---
e293be
 arch/x86/include/asm/uaccess_32.h | 24 ++++++++++++------------
e293be
 arch/x86/include/asm/uaccess_64.h | 20 ++++++++++----------
e293be
 arch/x86/lib/usercopy_32.c        | 10 +++++-----
e293be
 4 files changed, 30 insertions(+), 30 deletions(-)
e293be
e293be
diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
e293be
index b25d109fb95a..c803818cedfb 100644
e293be
--- a/arch/x86/include/asm/uaccess_32.h
e293be
+++ b/arch/x86/include/asm/uaccess_32.h
e293be
@@ -48,19 +48,19 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
e293be
 
e293be
 		switch (n) {
e293be
 		case 1:
e293be
-			__uaccess_begin();
e293be
+			__uaccess_begin_nospec();
e293be
 			__put_user_size(*(u8 *)from, (u8 __user *)to,
e293be
 					1, ret, 1);
e293be
 			__uaccess_end();
e293be
 			return ret;
e293be
 		case 2:
e293be
-			__uaccess_begin();
e293be
+			__uaccess_begin_nospec();
e293be
 			__put_user_size(*(u16 *)from, (u16 __user *)to,
e293be
 					2, ret, 2);
e293be
 			__uaccess_end();
e293be
 			return ret;
e293be
 		case 4:
e293be
-			__uaccess_begin();
e293be
+			__uaccess_begin_nospec();
e293be
 			__put_user_size(*(u32 *)from, (u32 __user *)to,
e293be
 					4, ret, 4);
e293be
 			__uaccess_end();
e293be
@@ -104,17 +104,17 @@ __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
e293be
 
e293be
 		switch (n) {
e293be
 		case 1:
e293be
-			__uaccess_begin();
e293be
+			__uaccess_begin_nospec();
e293be
 			__get_user_size(*(u8 *)to, from, 1, ret, 1);
e293be
 			__uaccess_end();
e293be
 			return ret;
e293be
 		case 2:
e293be
-			__uaccess_begin();
e293be
+			__uaccess_begin_nospec();
e293be
 			__get_user_size(*(u16 *)to, from, 2, ret, 2);
e293be
 			__uaccess_end();
e293be
 			return ret;
e293be
 		case 4:
e293be
-			__uaccess_begin();
e293be
+			__uaccess_begin_nospec();
e293be
 			__get_user_size(*(u32 *)to, from, 4, ret, 4);
e293be
 			__uaccess_end();
e293be
 			return ret;
e293be
@@ -154,17 +154,17 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
e293be
 
e293be
 		switch (n) {
e293be
 		case 1:
e293be
-			__uaccess_begin();
e293be
+			__uaccess_begin_nospec();
e293be
 			__get_user_size(*(u8 *)to, from, 1, ret, 1);
e293be
 			__uaccess_end();
e293be
 			return ret;
e293be
 		case 2:
e293be
-			__uaccess_begin();
e293be
+			__uaccess_begin_nospec();
e293be
 			__get_user_size(*(u16 *)to, from, 2, ret, 2);
e293be
 			__uaccess_end();
e293be
 			return ret;
e293be
 		case 4:
e293be
-			__uaccess_begin();
e293be
+			__uaccess_begin_nospec();
e293be
 			__get_user_size(*(u32 *)to, from, 4, ret, 4);
e293be
 			__uaccess_end();
e293be
 			return ret;
e293be
@@ -182,17 +182,17 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
e293be
 
e293be
 		switch (n) {
e293be
 		case 1:
e293be
-			__uaccess_begin();
e293be
+			__uaccess_begin_nospec();
e293be
 			__get_user_size(*(u8 *)to, from, 1, ret, 1);
e293be
 			__uaccess_end();
e293be
 			return ret;
e293be
 		case 2:
e293be
-			__uaccess_begin();
e293be
+			__uaccess_begin_nospec();
e293be
 			__get_user_size(*(u16 *)to, from, 2, ret, 2);
e293be
 			__uaccess_end();
e293be
 			return ret;
e293be
 		case 4:
e293be
-			__uaccess_begin();
e293be
+			__uaccess_begin_nospec();
e293be
 			__get_user_size(*(u32 *)to, from, 4, ret, 4);
e293be
 			__uaccess_end();
e293be
 			return ret;
e293be
diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
e293be
index 7f6e7dd46215..de848bc95a9f 100644
e293be
--- a/arch/x86/lib/usercopy_32.c
e293be
+++ b/arch/x86/lib/usercopy_32.c
e293be
@@ -570,7 +570,7 @@ do {									\
e293be
 unsigned long __copy_to_user_ll(void __user *to, const void *from,
e293be
 				unsigned long n)
e293be
 {
e293be
-	__uaccess_begin();
e293be
+	__uaccess_begin_nospec();
e293be
 	if (movsl_is_ok(to, from, n))
e293be
 		__copy_user(to, from, n);
e293be
 	else
e293be
-- 
e293be
cgit 1.2-0.3.lf.el7
e293be