in x86 fault handler, only attempt spf if the vma is anonymous.

In do_handle_mm_fault(), let speculative page faults proceed as long
as they fall into anonymous vmas. This enables the speculative
handling code in __handle_mm_fault() and do_anonymous_page().

In handle_pte_fault(), if vmf->pte is set (the original pte was not
pte_none), catch speculative faults and return VM_FAULT_RETRY as
those cases are not implemented yet. Also assert that do_fault()
is not reached in the speculative case.

Signed-off-by: Michel Lespinasse <mic...@lespinasse.org>
---
 arch/x86/mm/fault.c |  2 +-
 mm/memory.c         | 16 ++++++++++++----
 2 files changed, 13 insertions(+), 5 deletions(-)

diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index f8c8e325af77..fbf265f56a06 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -1324,7 +1324,7 @@ void do_user_addr_fault(struct pt_regs *regs,
                goto spf_abort;
        rcu_read_lock();
        vma = find_vma(mm, address);
-       if (!vma || vma->vm_start > address) {
+       if (!vma || vma->vm_start > address || !vma_is_anonymous(vma)) {
                rcu_read_unlock();
                goto spf_abort;
        }
diff --git a/mm/memory.c b/mm/memory.c
index fd84576f9c01..a2c5bf29f989 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -4103,6 +4103,8 @@ static vm_fault_t do_fault(struct vm_fault *vmf)
        struct mm_struct *vm_mm = vma->vm_mm;
        vm_fault_t ret;
 
+       VM_BUG_ON(vmf->flags & FAULT_FLAG_SPECULATIVE);
+
        /*
         * The VMA was not fully populated on mmap() or missing VM_DONTEXPAND
         */
@@ -4340,6 +4342,11 @@ static vm_fault_t handle_pte_fault(struct vm_fault *vmf)
                        return do_fault(vmf);
        }
 
+       if (vmf->flags & FAULT_FLAG_SPECULATIVE) {
+               pte_unmap(vmf->pte);
+               return VM_FAULT_RETRY;
+       }
+
        if (!pte_present(vmf->orig_pte))
                return do_swap_page(vmf);
 
@@ -4668,8 +4675,7 @@ vm_fault_t do_handle_mm_fault(struct vm_area_struct *vma,
 {
        vm_fault_t ret;
 
-       if (flags & FAULT_FLAG_SPECULATIVE)
-               return VM_FAULT_RETRY;
+       VM_BUG_ON((flags & FAULT_FLAG_SPECULATIVE) && !vma_is_anonymous(vma));
 
        __set_current_state(TASK_RUNNING);
 
@@ -4691,10 +4697,12 @@ vm_fault_t do_handle_mm_fault(struct vm_area_struct 
*vma,
        if (flags & FAULT_FLAG_USER)
                mem_cgroup_enter_user_fault();
 
-       if (unlikely(is_vm_hugetlb_page(vma)))
+       if (unlikely(is_vm_hugetlb_page(vma))) {
+               VM_BUG_ON(flags & FAULT_FLAG_SPECULATIVE);
                ret = hugetlb_fault(vma->vm_mm, vma, address, flags);
-       else
+       } else {
                ret = __handle_mm_fault(vma, address, flags, seq);
+       }
 
        if (flags & FAULT_FLAG_USER) {
                mem_cgroup_exit_user_fault();
-- 
2.20.1

Reply via email to