In the case pte_map_lock failed to lock the pte or if the VMA is no more valid, the fault entry's fields should not be set so that caller won't try to unlock it.
Signed-off-by: Laurent Dufour <lduf...@linux.vnet.ibm.com> --- mm/memory.c | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/mm/memory.c b/mm/memory.c index f8afd52f0d34..3b28de5838c7 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2135,6 +2135,8 @@ static bool pte_spinlock(struct vm_fault *vmf) static bool pte_map_lock(struct vm_fault *vmf) { bool ret = false; + pte_t *pte; + spinlock_t *ptl; if (!(vmf->flags & FAULT_FLAG_SPECULATIVE)) { vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, @@ -2159,18 +2161,20 @@ static bool pte_map_lock(struct vm_fault *vmf) * to invalidate TLB but this CPU has irq disabled. * Since we are in a speculative patch, accept it could fail */ - vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd); - vmf->pte = pte_offset_map(vmf->pmd, vmf->address); - if (unlikely(!spin_trylock(vmf->ptl))) { - pte_unmap(vmf->pte); + ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd); + pte = pte_offset_map(vmf->pmd, vmf->address); + if (unlikely(!spin_trylock(ptl))) { + pte_unmap(pte); goto out; } if (vma_is_dead(vmf->vma, vmf->sequence)) { - pte_unmap_unlock(vmf->pte, vmf->ptl); + pte_unmap_unlock(pte, ptl); goto out; } + vmf->pte = pte; + vmf->ptl = ptl; ret = true; out: local_irq_enable(); -- 2.7.4