As lock_page_or_retry() may release the mmap_sem, it has to know about
the range applying to the lock when using range locks.

This patch adds a new range parameter to __lock_page_or_retry() and
deals with the callers.

Signed-off-by: Laurent Dufour <[email protected]>
---
 include/linux/pagemap.h | 17 +++++++++++++++++
 mm/filemap.c            |  9 +++++++--
 mm/memory.c             |  3 ++-
 3 files changed, 26 insertions(+), 3 deletions(-)

diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 316a19f6b635..efc62200d527 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -433,8 +433,13 @@ static inline pgoff_t linear_page_index(struct 
vm_area_struct *vma,
 
 extern void __lock_page(struct page *page);
 extern int __lock_page_killable(struct page *page);
+#ifdef CONFIG_MEM_RANGE_LOCK
+extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
+                               unsigned int flags, struct range_lock *range);
+#else
 extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
                                unsigned int flags);
+#endif
 extern void unlock_page(struct page *page);
 
 static inline int trylock_page(struct page *page)
@@ -473,12 +478,24 @@ static inline int lock_page_killable(struct page *page)
  * Return value and mmap_sem implications depend on flags; see
  * __lock_page_or_retry().
  */
+#ifdef CONFIG_MEM_RANGE_LOCK
 static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm,
+                                    unsigned int flags,
+                                    struct range_lock *range)
+{
+       might_sleep();
+       return trylock_page(page) || __lock_page_or_retry(page, mm, flags,
+                                                         range);
+}
+#else
+static inline int _lock_page_or_retry(struct page *page, struct mm_struct *mm,
                                     unsigned int flags)
 {
        might_sleep();
        return trylock_page(page) || __lock_page_or_retry(page, mm, flags);
 }
+#define lock_page_or_retry(p, m, f, r) _lock_page_or_retry(p, m, f)
+#endif /* CONFIG_MEM_RANGE_LOCK */
 
 /*
  * This is exported only for wait_on_page_locked/wait_on_page_writeback, etc.,
diff --git a/mm/filemap.c b/mm/filemap.c
index 6f1be573a5e6..adb7c15b8aa4 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1053,7 +1053,11 @@ EXPORT_SYMBOL_GPL(__lock_page_killable);
  * with the page locked and the mmap_sem unperturbed.
  */
 int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
-                        unsigned int flags)
+                        unsigned int flags
+#ifdef CONFIG_MEM_RANGE_LOCK
+                        , struct range_lock *range
+#endif
+       )
 {
        if (flags & FAULT_FLAG_ALLOW_RETRY) {
                /*
@@ -2234,7 +2238,8 @@ int filemap_fault(struct vm_fault *vmf)
                        goto no_cached_page;
        }
 
-       if (!lock_page_or_retry(page, vmf->vma->vm_mm, vmf->flags)) {
+       if (!lock_page_or_retry(page, vmf->vma->vm_mm, vmf->flags,
+                               vmf->lockrange)) {
                put_page(page);
                return ret | VM_FAULT_RETRY;
        }
diff --git a/mm/memory.c b/mm/memory.c
index aa080e9814d4..99f62156616e 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2737,7 +2737,8 @@ int do_swap_page(struct vm_fault *vmf)
        }
 
        swapcache = page;
-       locked = lock_page_or_retry(page, vma->vm_mm, vmf->flags);
+       locked = lock_page_or_retry(page, vma->vm_mm, vmf->flags,
+                                   vmf->lockrange);
 
        delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
        if (!locked) {
-- 
2.7.4

Reply via email to