In epoll_wait(2), ep_check_params() performs a bulk check for the passed user address:
if (!access_ok(evs, maxevents * sizeof(struct epoll_event))) And later, epoll_put_uevent() uses __put_user() twice to copy 2 data into the region. unsafe_put_user() can be used to save a stac/clac pair, but masked_user_access_begin() or user_access_begin() introduces an unnecessary address masking or access_ok(). Add a low-level helper for such a use case. Signed-off-by: Kuniyuki Iwashima <[email protected]> --- arch/arm64/include/asm/uaccess.h | 1 + arch/powerpc/include/asm/uaccess.h | 13 ++++++++++--- arch/riscv/include/asm/uaccess.h | 1 + arch/x86/include/asm/uaccess.h | 1 + include/linux/uaccess.h | 1 + 5 files changed, 14 insertions(+), 3 deletions(-) diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h index 1aa4ecb73429..30726ce182cb 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h @@ -422,6 +422,7 @@ static __must_check __always_inline bool user_access_begin(const void __user *pt } #define user_access_begin(a,b) user_access_begin(a,b) #define user_access_end() uaccess_ttbr0_disable() +#define __user_write_access_begin(a,b) uaccess_ttbr0_enable() #define unsafe_put_user(x, ptr, label) \ __raw_put_mem("sttr", x, uaccess_mask_ptr(ptr), label, U) #define unsafe_get_user(x, ptr, label) \ diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h index 4f5a46a77fa2..910bf469128d 100644 --- a/arch/powerpc/include/asm/uaccess.h +++ b/arch/powerpc/include/asm/uaccess.h @@ -437,15 +437,22 @@ user_read_access_begin(const void __user *ptr, size_t len) #define user_read_access_begin user_read_access_begin #define user_read_access_end prevent_current_read_from_user +static __always_inline void +__user_write_access_begin(const void __user *ptr, size_t len) +{ + might_fault(); + + allow_write_to_user((void __user *)ptr, len); +} +#define __user_write_access_begin __user_write_access_begin + static __must_check __always_inline bool user_write_access_begin(const void __user *ptr, size_t len) { if (unlikely(!access_ok(ptr, len))) return false; - might_fault(); - - allow_write_to_user((void __user *)ptr, len); + __user_write_access_begin(ptr, len); return true; } #define user_write_access_begin user_write_access_begin diff --git a/arch/riscv/include/asm/uaccess.h b/arch/riscv/include/asm/uaccess.h index f5f4f7f85543..9adc8f0dd1c8 100644 --- a/arch/riscv/include/asm/uaccess.h +++ b/arch/riscv/include/asm/uaccess.h @@ -452,6 +452,7 @@ static __must_check __always_inline bool user_access_begin(const void __user *pt } #define user_access_begin user_access_begin #define user_access_end __disable_user_access +#define __user_write_access_begin(a,b) __enable_user_access() static inline unsigned long user_access_save(void) { return 0UL; } static inline void user_access_restore(unsigned long enabled) { } diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index 91a3fb8ae7ff..23edbaef9f71 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h @@ -524,6 +524,7 @@ static __must_check __always_inline bool user_access_begin(const void __user *pt } #define user_access_begin(a,b) user_access_begin(a,b) #define user_access_end() __uaccess_end() +#define __user_write_access_begin(a,b) __uaccess_begin() #define user_access_save() smap_save() #define user_access_restore(x) smap_restore(x) diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h index 1beb5b395d81..a6e32784e6cd 100644 --- a/include/linux/uaccess.h +++ b/include/linux/uaccess.h @@ -552,6 +552,7 @@ do { \ #ifndef user_access_begin #define user_access_begin(ptr,len) access_ok(ptr, len) #define user_access_end() do { } while (0) +#define __user_write_access_begin(ptr,len) do { } while (0) #define unsafe_op_wrap(op, err) do { if (unlikely(op)) goto err; } while (0) #define unsafe_get_user(x,p,e) unsafe_op_wrap(__get_user(x,p),e) #define unsafe_put_user(x,p,e) unsafe_op_wrap(__put_user(x,p),e) -- 2.51.1.814.gb8fa24458f-goog
