Add a new do_handle_mm_fault function, which extends the existing
handle_mm_fault() API by adding an mmap sequence count, to be used
in the FAULT_FLAG_SPECULATIVE case.

In the initial implementation, FAULT_FLAG_SPECULATIVE always fails
(by returning VM_FAULT_RETRY).

The existing handle_mm_fault() API is kept as a wrapper around
do_handle_mm_fault() so that we do not have to immediately update
every handle_mm_fault() call site.

Signed-off-by: Michel Lespinasse <mic...@lespinasse.org>
---
 include/linux/mm.h | 12 +++++++++---
 mm/memory.c        | 10 +++++++---
 2 files changed, 16 insertions(+), 6 deletions(-)

diff --git a/include/linux/mm.h b/include/linux/mm.h
index 021fdab5b721..d5988e78e6ab 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1724,9 +1724,15 @@ int generic_error_remove_page(struct address_space 
*mapping, struct page *page);
 int invalidate_inode_page(struct page *page);
 
 #ifdef CONFIG_MMU
-extern vm_fault_t handle_mm_fault(struct vm_area_struct *vma,
-                                 unsigned long address, unsigned int flags,
-                                 struct pt_regs *regs);
+extern vm_fault_t do_handle_mm_fault(struct vm_area_struct *vma,
+               unsigned long address, unsigned int flags,
+               unsigned long seq, struct pt_regs *regs);
+static inline vm_fault_t handle_mm_fault(struct vm_area_struct *vma,
+               unsigned long address, unsigned int flags,
+               struct pt_regs *regs)
+{
+       return do_handle_mm_fault(vma, address, flags, 0, regs);
+}
 extern int fixup_user_fault(struct mm_struct *mm,
                            unsigned long address, unsigned int fault_flags,
                            bool *unlocked);
diff --git a/mm/memory.c b/mm/memory.c
index 477c98bfdd9d..3691be1f1319 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -4509,11 +4509,15 @@ static inline void mm_account_fault(struct pt_regs 
*regs,
  * The mmap_lock may have been released depending on flags and our
  * return value.  See filemap_fault() and __lock_page_or_retry().
  */
-vm_fault_t handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
-                          unsigned int flags, struct pt_regs *regs)
+vm_fault_t do_handle_mm_fault(struct vm_area_struct *vma,
+               unsigned long address, unsigned int flags,
+               unsigned long seq, struct pt_regs *regs)
 {
        vm_fault_t ret;
 
+       if (flags & FAULT_FLAG_SPECULATIVE)
+               return VM_FAULT_RETRY;
+
        __set_current_state(TASK_RUNNING);
 
        count_vm_event(PGFAULT);
@@ -4555,7 +4559,7 @@ vm_fault_t handle_mm_fault(struct vm_area_struct *vma, 
unsigned long address,
 
        return ret;
 }
-EXPORT_SYMBOL_GPL(handle_mm_fault);
+EXPORT_SYMBOL_GPL(do_handle_mm_fault);
 
 #ifndef __PAGETABLE_P4D_FOLDED
 /*
-- 
2.20.1

Reply via email to