Commit-ID:  ff47ab4ff3cddfa7bc1b25b990e24abe2ae474ff
Gitweb:     http://git.kernel.org/tip/ff47ab4ff3cddfa7bc1b25b990e24abe2ae474ff
Author:     Andi Kleen <a...@linux.intel.com>
AuthorDate: Fri, 16 Aug 2013 14:17:19 -0700
Committer:  H. Peter Anvin <h...@linux.intel.com>
CommitDate: Tue, 10 Sep 2013 15:27:43 -0700

x86: Add 1/2/4/8 byte optimization to 64bit __copy_{from,to}_user_inatomic

The 64bit __copy_{from,to}_user_inatomic always called
copy_from_user_generic, but skipped the special optimizations for 1/2/4/8
byte accesses.

This especially hurts the futex call, which accesses the 4 byte futex
user value with a complicated fast string operation in a function call,
instead of a single movl.

Use __copy_{from,to}_user for _inatomic instead to get the same
optimizations. The only problem was the might_fault() in those functions.
So move that to new wrapper and call __copy_{f,t}_user_nocheck()
from *_inatomic directly.

32bit already did this correctly by duplicating the code.

Signed-off-by: Andi Kleen <a...@linux.intel.com>
Link: 
http://lkml.kernel.org/r/1376687844-19857-2-git-send-email-a...@firstfloor.org
Signed-off-by: H. Peter Anvin <h...@linux.intel.com>
---
 arch/x86/include/asm/uaccess_64.h | 24 ++++++++++++++++++------
 1 file changed, 18 insertions(+), 6 deletions(-)

diff --git a/arch/x86/include/asm/uaccess_64.h 
b/arch/x86/include/asm/uaccess_64.h
index 4f7923d..64476bb 100644
--- a/arch/x86/include/asm/uaccess_64.h
+++ b/arch/x86/include/asm/uaccess_64.h
@@ -77,11 +77,10 @@ int copy_to_user(void __user *dst, const void *src, 
unsigned size)
 }
 
 static __always_inline __must_check
-int __copy_from_user(void *dst, const void __user *src, unsigned size)
+int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size)
 {
        int ret = 0;
 
-       might_fault();
        if (!__builtin_constant_p(size))
                return copy_user_generic(dst, (__force void *)src, size);
        switch (size) {
@@ -121,11 +120,17 @@ int __copy_from_user(void *dst, const void __user *src, 
unsigned size)
 }
 
 static __always_inline __must_check
-int __copy_to_user(void __user *dst, const void *src, unsigned size)
+int __copy_from_user(void *dst, const void __user *src, unsigned size)
+{
+       might_fault();
+       return __copy_from_user_nocheck(dst, src, size);
+}
+
+static __always_inline __must_check
+int __copy_to_user_nocheck(void __user *dst, const void *src, unsigned size)
 {
        int ret = 0;
 
-       might_fault();
        if (!__builtin_constant_p(size))
                return copy_user_generic((__force void *)dst, src, size);
        switch (size) {
@@ -165,6 +170,13 @@ int __copy_to_user(void __user *dst, const void *src, 
unsigned size)
 }
 
 static __always_inline __must_check
+int __copy_to_user(void __user *dst, const void *src, unsigned size)
+{
+       might_fault();
+       return __copy_to_user_nocheck(dst, src, size);
+}
+
+static __always_inline __must_check
 int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
 {
        int ret = 0;
@@ -220,13 +232,13 @@ int __copy_in_user(void __user *dst, const void __user 
*src, unsigned size)
 static __must_check __always_inline int
 __copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
 {
-       return copy_user_generic(dst, (__force const void *)src, size);
+       return __copy_from_user_nocheck(dst, (__force const void *)src, size);
 }
 
 static __must_check __always_inline int
 __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
 {
-       return copy_user_generic((__force void *)dst, src, size);
+       return __copy_to_user_nocheck((__force void *)dst, src, size);
 }
 
 extern long __copy_user_nocache(void *dst, const void __user *src,
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to