x86_64 specific version of memset() for user space, memset_user()

In the __wr_after_init scenario, write-rare variables have:
- a primary read-only mapping in kernel memory space
- an alternate, writable mapping, implemented as user-space mapping

The write rare implementation expects the arch code to privide a
memset_user() function, which is currently missing.

clear_user() is the base for memset_user()

Signed-off-by: Igor Stoppa <igor.sto...@huawei.com>

CC: Andy Lutomirski <l...@amacapital.net>
CC: Nadav Amit <nadav.a...@gmail.com>
CC: Matthew Wilcox <wi...@infradead.org>
CC: Peter Zijlstra <pet...@infradead.org>
CC: Kees Cook <keesc...@chromium.org>
CC: Dave Hansen <dave.han...@linux.intel.com>
CC: Mimi Zohar <zo...@linux.vnet.ibm.com>
CC: Thiago Jung Bauermann <bauer...@linux.ibm.com>
CC: Ahmed Soliman <ahmedsoli...@mena.vt.edu>
CC: linux-integr...@vger.kernel.org
CC: kernel-harden...@lists.openwall.com
CC: linux...@kvack.org
CC: linux-kernel@vger.kernel.org
---
 arch/x86/include/asm/uaccess_64.h |  6 ++++
 arch/x86/lib/usercopy_64.c        | 51 +++++++++++++++++++++++++++++++++
 2 files changed, 57 insertions(+)

diff --git a/arch/x86/include/asm/uaccess_64.h 
b/arch/x86/include/asm/uaccess_64.h
index a9d637bc301d..f194bfce4866 100644
--- a/arch/x86/include/asm/uaccess_64.h
+++ b/arch/x86/include/asm/uaccess_64.h
@@ -213,4 +213,10 @@ copy_user_handle_tail(char *to, char *from, unsigned len);
 unsigned long
 mcsafe_handle_tail(char *to, char *from, unsigned len);
 
+unsigned long __must_check
+memset_user(void __user *mem, int c, unsigned long len);
+
+unsigned long __must_check
+__memset_user(void __user *mem, int c, unsigned long len);
+
 #endif /* _ASM_X86_UACCESS_64_H */
diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
index ee42bb0cbeb3..e61963585354 100644
--- a/arch/x86/lib/usercopy_64.c
+++ b/arch/x86/lib/usercopy_64.c
@@ -9,6 +9,57 @@
 #include <linux/uaccess.h>
 #include <linux/highmem.h>
 
+/*
+ * Memset Userspace
+ */
+
+unsigned long __memset_user(void __user *addr, int c, unsigned long size)
+{
+       long __d0;
+       unsigned long  pattern = 0x0101010101010101UL * (0xFFUL & c);
+
+       might_fault();
+       /* no memory constraint: gcc doesn't know about this memory */
+       stac();
+       asm volatile(
+               "       movq %[pattern], %%rdx\n"
+               "       testq  %[size8],%[size8]\n"
+               "       jz     4f\n"
+               "0:     mov %%rdx,(%[dst])\n"
+               "       addq   $8,%[dst]\n"
+               "       decl %%ecx ; jnz   0b\n"
+               "4:     movq  %[size1],%%rcx\n"
+               "       testl %%ecx,%%ecx\n"
+               "       jz     2f\n"
+               "1:     movb   %%dl,(%[dst])\n"
+               "       incq   %[dst]\n"
+               "       decl %%ecx ; jnz  1b\n"
+               "2:\n"
+               ".section .fixup,\"ax\"\n"
+               "3:     lea 0(%[size1],%[size8],8),%[size8]\n"
+               "       jmp 2b\n"
+               ".previous\n"
+               _ASM_EXTABLE_UA(0b, 3b)
+               _ASM_EXTABLE_UA(1b, 2b)
+               : [size8] "=&c"(size), [dst] "=&D" (__d0)
+               : [size1] "r" (size & 7), "[size8]" (size / 8),
+                 "[dst]" (addr), [pattern] "r" (pattern)
+               : "rdx");
+
+       clac();
+       return size;
+}
+EXPORT_SYMBOL(__memset_user);
+
+unsigned long memset_user(void __user *to, int c, unsigned long n)
+{
+       if (access_ok(to, n))
+               return __memset_user(to, c, n);
+       return n;
+}
+EXPORT_SYMBOL(memset_user);
+
+
 /*
  * Zero Userspace
  */
-- 
2.19.1

Reply via email to