4.9-stable review patch.  If anyone has any objections, please let me know.

------------------

From: Dan Williams <dan.j.willi...@intel.com>


(cherry picked from commit b5c4ae4f35325d520b230bab6eb3310613b72ac1)

In preparation for converting some __uaccess_begin() instances to
__uacess_begin_nospec(), make sure all 'from user' uaccess paths are
using the _begin(), _end() helpers rather than open-coded stac() and
clac().

No functional changes.

Suggested-by: Ingo Molnar <mi...@redhat.com>
Signed-off-by: Dan Williams <dan.j.willi...@intel.com>
Signed-off-by: Thomas Gleixner <t...@linutronix.de>
Cc: linux-a...@vger.kernel.org
Cc: Tom Lendacky <thomas.lenda...@amd.com>
Cc: Kees Cook <keesc...@chromium.org>
Cc: kernel-harden...@lists.openwall.com
Cc: gre...@linuxfoundation.org
Cc: Al Viro <v...@zeniv.linux.org.uk>
Cc: torva...@linux-foundation.org
Cc: a...@linux.intel.com
Link: 
https://lkml.kernel.org/r/151727416438.33451.17309465232057176966.st...@dwillia2-desk3.amr.corp.intel.com
Signed-off-by: David Woodhouse <d...@amazon.co.uk>
Signed-off-by: Greg Kroah-Hartman <gre...@linuxfoundation.org>
---
 arch/x86/lib/usercopy_32.c |    8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

--- a/arch/x86/lib/usercopy_32.c
+++ b/arch/x86/lib/usercopy_32.c
@@ -570,12 +570,12 @@ do {                                                      
                \
 unsigned long __copy_to_user_ll(void __user *to, const void *from,
                                unsigned long n)
 {
-       stac();
+       __uaccess_begin();
        if (movsl_is_ok(to, from, n))
                __copy_user(to, from, n);
        else
                n = __copy_user_intel(to, from, n);
-       clac();
+       __uaccess_end();
        return n;
 }
 EXPORT_SYMBOL(__copy_to_user_ll);
@@ -627,7 +627,7 @@ EXPORT_SYMBOL(__copy_from_user_ll_nocach
 unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user 
*from,
                                        unsigned long n)
 {
-       stac();
+       __uaccess_begin();
 #ifdef CONFIG_X86_INTEL_USERCOPY
        if (n > 64 && static_cpu_has(X86_FEATURE_XMM2))
                n = __copy_user_intel_nocache(to, from, n);
@@ -636,7 +636,7 @@ unsigned long __copy_from_user_ll_nocach
 #else
        __copy_user(to, from, n);
 #endif
-       clac();
+       __uaccess_end();
        return n;
 }
 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);


Reply via email to