This is needed because in handle_pte_fault() pte_offset_map() is
called and then fe->ptl is fetched and spin_locked.

This was previously embedded in the call to pte_offset_map_lock().

Signed-off-by: Laurent Dufour <[email protected]>
---
 mm/memory.c | 15 +++++++++++----
 1 file changed, 11 insertions(+), 4 deletions(-)

diff --git a/mm/memory.c b/mm/memory.c
index c61042cf8b52..3cbce3762d17 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2240,6 +2240,13 @@ static inline void wp_page_reuse(struct vm_fault *vmf)
        pte_unmap_unlock(vmf->pte, vmf->ptl);
 }
 
+static bool pte_spinlock(struct vm_fault *vmf)
+{
+       vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd);
+       spin_lock(vmf->ptl);
+       return true;
+}
+
 static bool pte_map_lock(struct vm_fault *vmf)
 {
        vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address, 
&vmf->ptl);
@@ -3554,8 +3561,8 @@ static int do_numa_page(struct vm_fault *vmf)
         * validation through pte_unmap_same(). It's of NUMA type but
         * the pfn may be screwed if the read is non atomic.
         */
-       vmf->ptl = pte_lockptr(vma->vm_mm, vmf->pmd);
-       spin_lock(vmf->ptl);
+       if (!pte_spinlock(vmf))
+               return VM_FAULT_RETRY;
        if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte))) {
                pte_unmap_unlock(vmf->pte, vmf->ptl);
                goto out;
@@ -3747,8 +3754,8 @@ static int handle_pte_fault(struct vm_fault *vmf)
        if (pte_protnone(vmf->orig_pte) && vma_is_accessible(vmf->vma))
                return do_numa_page(vmf);
 
-       vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd);
-       spin_lock(vmf->ptl);
+       if (!pte_spinlock(vmf))
+               return VM_FAULT_RETRY;
        entry = vmf->orig_pte;
        if (unlikely(!pte_same(*vmf->pte, entry)))
                goto unlock;
-- 
2.7.4

Reply via email to