Userfaultfd fault path was by default killable even if the caller does
not have FAULT_FLAG_KILLABLE.  That makes sense before in that when
with gup we don't have FAULT_FLAG_KILLABLE properly set before.  Now
after previous patch we've got FAULT_FLAG_KILLABLE applied even for
gup code so it should also make sense to let userfaultfd to honor the
FAULT_FLAG_KILLABLE.

Because we're unconditionally setting FAULT_FLAG_KILLABLE in gup code
right now, this patch should have no functional change.  It also
cleaned the code a little bit by introducing some helpers.

Signed-off-by: Peter Xu <pet...@redhat.com>
---
 fs/userfaultfd.c | 36 ++++++++++++++++++++++++++++--------
 1 file changed, 28 insertions(+), 8 deletions(-)

diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index 2b3b48e94ae4..8c5863ccbf0e 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -334,6 +334,30 @@ static inline bool userfaultfd_must_wait(struct 
userfaultfd_ctx *ctx,
        return ret;
 }
 
+/* Should pair with userfaultfd_signal_pending() */
+static inline long userfaultfd_get_blocking_state(unsigned int flags)
+{
+       if (flags & FAULT_FLAG_INTERRUPTIBLE)
+               return TASK_INTERRUPTIBLE;
+
+       if (flags & FAULT_FLAG_KILLABLE)
+               return TASK_KILLABLE;
+
+       return TASK_UNINTERRUPTIBLE;
+}
+
+/* Should pair with userfaultfd_get_blocking_state() */
+static inline bool userfaultfd_signal_pending(unsigned int flags)
+{
+       if (flags & FAULT_FLAG_INTERRUPTIBLE)
+               return signal_pending(current);
+
+       if (flags & FAULT_FLAG_KILLABLE)
+               return fatal_signal_pending(current);
+
+       return false;
+}
+
 /*
  * The locking rules involved in returning VM_FAULT_RETRY depending on
  * FAULT_FLAG_ALLOW_RETRY, FAULT_FLAG_RETRY_NOWAIT and
@@ -355,7 +379,7 @@ vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned 
long reason)
        struct userfaultfd_ctx *ctx;
        struct userfaultfd_wait_queue uwq;
        vm_fault_t ret = VM_FAULT_SIGBUS;
-       bool must_wait, return_to_userland;
+       bool must_wait;
        long blocking_state;
 
        /*
@@ -462,9 +486,7 @@ vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned 
long reason)
        uwq.ctx = ctx;
        uwq.waken = false;
 
-       return_to_userland = vmf->flags & FAULT_FLAG_INTERRUPTIBLE;
-       blocking_state = return_to_userland ? TASK_INTERRUPTIBLE :
-                        TASK_KILLABLE;
+       blocking_state = userfaultfd_get_blocking_state(vmf->flags);
 
        spin_lock_irq(&ctx->fault_pending_wqh.lock);
        /*
@@ -490,8 +512,7 @@ vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned 
long reason)
        up_read(&mm->mmap_sem);
 
        if (likely(must_wait && !READ_ONCE(ctx->released) &&
-                  (return_to_userland ? !signal_pending(current) :
-                   !fatal_signal_pending(current)))) {
+                  userfaultfd_signal_pending(vmf->flags))) {
                wake_up_poll(&ctx->fd_wqh, EPOLLIN);
                schedule();
                ret |= VM_FAULT_MAJOR;
@@ -513,8 +534,7 @@ vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned 
long reason)
                        set_current_state(blocking_state);
                        if (READ_ONCE(uwq.waken) ||
                            READ_ONCE(ctx->released) ||
-                           (return_to_userland ? signal_pending(current) :
-                            fatal_signal_pending(current)))
+                           userfaultfd_signal_pending(vmf->flags))
                                break;
                        schedule();
                }
-- 
2.21.0

Reply via email to