This is the gup counterpart of the change that allows the VM_FAULT_RETRY
to happen for more than once.

Signed-off-by: Peter Xu <pet...@redhat.com>
---
 mm/gup.c | 17 +++++++++++++----
 1 file changed, 13 insertions(+), 4 deletions(-)

diff --git a/mm/gup.c b/mm/gup.c
index a40111c742ba..7c3b3ab6be88 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -528,7 +528,10 @@ static int faultin_page(struct task_struct *tsk, struct 
vm_area_struct *vma,
        if (*flags & FOLL_NOWAIT)
                fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT;
        if (*flags & FOLL_TRIED) {
-               VM_WARN_ON_ONCE(fault_flags & FAULT_FLAG_ALLOW_RETRY);
+               /*
+                * Note: FAULT_FLAG_ALLOW_RETRY and FAULT_FLAG_TRIED
+                * can co-exist
+                */
                fault_flags |= FAULT_FLAG_TRIED;
        }
 
@@ -944,17 +947,23 @@ static __always_inline long 
__get_user_pages_locked(struct task_struct *tsk,
                /* VM_FAULT_RETRY triggered, so seek to the faulting offset */
                pages += ret;
                start += ret << PAGE_SHIFT;
+               lock_dropped = true;
 
+retry:
                /*
                 * Repeat on the address that fired VM_FAULT_RETRY
-                * without FAULT_FLAG_ALLOW_RETRY but with
+                * with both FAULT_FLAG_ALLOW_RETRY and
                 * FAULT_FLAG_TRIED.
                 */
                *locked = 1;
-               lock_dropped = true;
                down_read(&mm->mmap_sem);
                ret = __get_user_pages(tsk, mm, start, 1, flags | FOLL_TRIED,
-                                      pages, NULL, NULL);
+                                      pages, NULL, locked);
+               if (!*locked) {
+                       /* Continue to retry until we succeeded */
+                       BUG_ON(ret != 0);
+                       goto retry;
+               }
                if (ret != 1) {
                        BUG_ON(ret > 1);
                        if (!pages_done)
-- 
2.17.1

Reply via email to