Re: [PATCH] x86/copy_user: Remove 64-bit asm _copy_*_user variants

2016-11-01 Thread Linus Torvalds
On Mon, Oct 31, 2016 at 9:10 AM, Borislav Petkov  wrote:
> From: Borislav Petkov 
>
> We already have the same functionality in usercopy_32.c. Share it with
> 64-bit and get rid of some more asm glue which is not needed anymore.

I see this already made it into -tip, but it looks good to me. I think
Al may have some of this in generic code already in his uaccess
cleanup stuff, but I doubt this makes for any huge merge conflict
issues.

 Linus


Re: [PATCH] x86/copy_user: Remove 64-bit asm _copy_*_user variants

2016-11-01 Thread Linus Torvalds
On Mon, Oct 31, 2016 at 9:10 AM, Borislav Petkov  wrote:
> From: Borislav Petkov 
>
> We already have the same functionality in usercopy_32.c. Share it with
> 64-bit and get rid of some more asm glue which is not needed anymore.

I see this already made it into -tip, but it looks good to me. I think
Al may have some of this in generic code already in his uaccess
cleanup stuff, but I doubt this makes for any huge merge conflict
issues.

 Linus


[PATCH] x86/copy_user: Remove 64-bit asm _copy_*_user variants

2016-10-31 Thread Borislav Petkov
From: Borislav Petkov 

We already have the same functionality in usercopy_32.c. Share it with
64-bit and get rid of some more asm glue which is not needed anymore.

Signed-off-by: Borislav Petkov 
---

Guys, please double-check me on this but I think the asm and the
access_ok() macros are equivalent so we can get rid of the asm gunk and
redirect to the __copy_*_user variants after checking...

 arch/x86/lib/copy_user_64.S | 47 ---
 arch/x86/lib/usercopy.c | 49 +
 arch/x86/lib/usercopy_32.c  | 49 -
 3 files changed, 49 insertions(+), 96 deletions(-)

diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
index d376e4b48f88..c5959576c315 100644
--- a/arch/x86/lib/copy_user_64.S
+++ b/arch/x86/lib/copy_user_64.S
@@ -16,53 +16,6 @@
 #include 
 #include 
 
-/* Standard copy_to_user with segment limit checking */
-ENTRY(_copy_to_user)
-   mov PER_CPU_VAR(current_task), %rax
-   movq %rdi,%rcx
-   addq %rdx,%rcx
-   jc bad_to_user
-   cmpq TASK_addr_limit(%rax),%rcx
-   ja bad_to_user
-   ALTERNATIVE_2 "jmp copy_user_generic_unrolled", \
- "jmp copy_user_generic_string",   \
- X86_FEATURE_REP_GOOD, \
- "jmp copy_user_enhanced_fast_string", \
- X86_FEATURE_ERMS
-ENDPROC(_copy_to_user)
-EXPORT_SYMBOL(_copy_to_user)
-
-/* Standard copy_from_user with segment limit checking */
-ENTRY(_copy_from_user)
-   mov PER_CPU_VAR(current_task), %rax
-   movq %rsi,%rcx
-   addq %rdx,%rcx
-   jc bad_from_user
-   cmpq TASK_addr_limit(%rax),%rcx
-   ja bad_from_user
-   ALTERNATIVE_2 "jmp copy_user_generic_unrolled", \
- "jmp copy_user_generic_string",   \
- X86_FEATURE_REP_GOOD, \
- "jmp copy_user_enhanced_fast_string", \
- X86_FEATURE_ERMS
-ENDPROC(_copy_from_user)
-EXPORT_SYMBOL(_copy_from_user)
-
-
-   .section .fixup,"ax"
-   /* must zero dest */
-ENTRY(bad_from_user)
-bad_from_user:
-   movl %edx,%ecx
-   xorl %eax,%eax
-   rep
-   stosb
-bad_to_user:
-   movl %edx,%eax
-   ret
-ENDPROC(bad_from_user)
-   .previous
-
 /*
  * copy_user_generic_unrolled - memory copy with exception handling.
  * This version is for CPUs like P4 that don't have efficient micro
diff --git a/arch/x86/lib/usercopy.c b/arch/x86/lib/usercopy.c
index b4908789484e..c074799bddae 100644
--- a/arch/x86/lib/usercopy.c
+++ b/arch/x86/lib/usercopy.c
@@ -34,3 +34,52 @@ copy_from_user_nmi(void *to, const void __user *from, 
unsigned long n)
return ret;
 }
 EXPORT_SYMBOL_GPL(copy_from_user_nmi);
+
+/**
+ * copy_to_user: - Copy a block of data into user space.
+ * @to:   Destination address, in user space.
+ * @from: Source address, in kernel space.
+ * @n:Number of bytes to copy.
+ *
+ * Context: User context only. This function may sleep if pagefaults are
+ *  enabled.
+ *
+ * Copy data from kernel space to user space.
+ *
+ * Returns number of bytes that could not be copied.
+ * On success, this will be zero.
+ */
+unsigned long _copy_to_user(void __user *to, const void *from, unsigned n)
+{
+   if (access_ok(VERIFY_WRITE, to, n))
+   n = __copy_to_user(to, from, n);
+   return n;
+}
+EXPORT_SYMBOL(_copy_to_user);
+
+/**
+ * copy_from_user: - Copy a block of data from user space.
+ * @to:   Destination address, in kernel space.
+ * @from: Source address, in user space.
+ * @n:Number of bytes to copy.
+ *
+ * Context: User context only. This function may sleep if pagefaults are
+ *  enabled.
+ *
+ * Copy data from user space to kernel space.
+ *
+ * Returns number of bytes that could not be copied.
+ * On success, this will be zero.
+ *
+ * If some data could not be copied, this function will pad the copied
+ * data to the requested size using zero bytes.
+ */
+unsigned long _copy_from_user(void *to, const void __user *from, unsigned n)
+{
+   if (access_ok(VERIFY_READ, from, n))
+   n = __copy_from_user(to, from, n);
+   else
+   memset(to, 0, n);
+   return n;
+}
+EXPORT_SYMBOL(_copy_from_user);
diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
index 3bc7baf2a711..0b281217c890 100644
--- a/arch/x86/lib/usercopy_32.c
+++ b/arch/x86/lib/usercopy_32.c
@@ -640,52 +640,3 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, 
const void __user *fr
return n;
 }
 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
-
-/**
- * copy_to_user: - Copy a block of data into user space.
- * @to:   Destination address, in user space.
- * @from: Source address, in kernel space.
- * @n:Number of bytes to copy.
- *
- * Context: User context only. This 

[PATCH] x86/copy_user: Remove 64-bit asm _copy_*_user variants

2016-10-31 Thread Borislav Petkov
From: Borislav Petkov 

We already have the same functionality in usercopy_32.c. Share it with
64-bit and get rid of some more asm glue which is not needed anymore.

Signed-off-by: Borislav Petkov 
---

Guys, please double-check me on this but I think the asm and the
access_ok() macros are equivalent so we can get rid of the asm gunk and
redirect to the __copy_*_user variants after checking...

 arch/x86/lib/copy_user_64.S | 47 ---
 arch/x86/lib/usercopy.c | 49 +
 arch/x86/lib/usercopy_32.c  | 49 -
 3 files changed, 49 insertions(+), 96 deletions(-)

diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
index d376e4b48f88..c5959576c315 100644
--- a/arch/x86/lib/copy_user_64.S
+++ b/arch/x86/lib/copy_user_64.S
@@ -16,53 +16,6 @@
 #include 
 #include 
 
-/* Standard copy_to_user with segment limit checking */
-ENTRY(_copy_to_user)
-   mov PER_CPU_VAR(current_task), %rax
-   movq %rdi,%rcx
-   addq %rdx,%rcx
-   jc bad_to_user
-   cmpq TASK_addr_limit(%rax),%rcx
-   ja bad_to_user
-   ALTERNATIVE_2 "jmp copy_user_generic_unrolled", \
- "jmp copy_user_generic_string",   \
- X86_FEATURE_REP_GOOD, \
- "jmp copy_user_enhanced_fast_string", \
- X86_FEATURE_ERMS
-ENDPROC(_copy_to_user)
-EXPORT_SYMBOL(_copy_to_user)
-
-/* Standard copy_from_user with segment limit checking */
-ENTRY(_copy_from_user)
-   mov PER_CPU_VAR(current_task), %rax
-   movq %rsi,%rcx
-   addq %rdx,%rcx
-   jc bad_from_user
-   cmpq TASK_addr_limit(%rax),%rcx
-   ja bad_from_user
-   ALTERNATIVE_2 "jmp copy_user_generic_unrolled", \
- "jmp copy_user_generic_string",   \
- X86_FEATURE_REP_GOOD, \
- "jmp copy_user_enhanced_fast_string", \
- X86_FEATURE_ERMS
-ENDPROC(_copy_from_user)
-EXPORT_SYMBOL(_copy_from_user)
-
-
-   .section .fixup,"ax"
-   /* must zero dest */
-ENTRY(bad_from_user)
-bad_from_user:
-   movl %edx,%ecx
-   xorl %eax,%eax
-   rep
-   stosb
-bad_to_user:
-   movl %edx,%eax
-   ret
-ENDPROC(bad_from_user)
-   .previous
-
 /*
  * copy_user_generic_unrolled - memory copy with exception handling.
  * This version is for CPUs like P4 that don't have efficient micro
diff --git a/arch/x86/lib/usercopy.c b/arch/x86/lib/usercopy.c
index b4908789484e..c074799bddae 100644
--- a/arch/x86/lib/usercopy.c
+++ b/arch/x86/lib/usercopy.c
@@ -34,3 +34,52 @@ copy_from_user_nmi(void *to, const void __user *from, 
unsigned long n)
return ret;
 }
 EXPORT_SYMBOL_GPL(copy_from_user_nmi);
+
+/**
+ * copy_to_user: - Copy a block of data into user space.
+ * @to:   Destination address, in user space.
+ * @from: Source address, in kernel space.
+ * @n:Number of bytes to copy.
+ *
+ * Context: User context only. This function may sleep if pagefaults are
+ *  enabled.
+ *
+ * Copy data from kernel space to user space.
+ *
+ * Returns number of bytes that could not be copied.
+ * On success, this will be zero.
+ */
+unsigned long _copy_to_user(void __user *to, const void *from, unsigned n)
+{
+   if (access_ok(VERIFY_WRITE, to, n))
+   n = __copy_to_user(to, from, n);
+   return n;
+}
+EXPORT_SYMBOL(_copy_to_user);
+
+/**
+ * copy_from_user: - Copy a block of data from user space.
+ * @to:   Destination address, in kernel space.
+ * @from: Source address, in user space.
+ * @n:Number of bytes to copy.
+ *
+ * Context: User context only. This function may sleep if pagefaults are
+ *  enabled.
+ *
+ * Copy data from user space to kernel space.
+ *
+ * Returns number of bytes that could not be copied.
+ * On success, this will be zero.
+ *
+ * If some data could not be copied, this function will pad the copied
+ * data to the requested size using zero bytes.
+ */
+unsigned long _copy_from_user(void *to, const void __user *from, unsigned n)
+{
+   if (access_ok(VERIFY_READ, from, n))
+   n = __copy_from_user(to, from, n);
+   else
+   memset(to, 0, n);
+   return n;
+}
+EXPORT_SYMBOL(_copy_from_user);
diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
index 3bc7baf2a711..0b281217c890 100644
--- a/arch/x86/lib/usercopy_32.c
+++ b/arch/x86/lib/usercopy_32.c
@@ -640,52 +640,3 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, 
const void __user *fr
return n;
 }
 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
-
-/**
- * copy_to_user: - Copy a block of data into user space.
- * @to:   Destination address, in user space.
- * @from: Source address, in kernel space.
- * @n:Number of bytes to copy.
- *
- * Context: User context only. This function may sleep if