From: Peter Zijlstra <pet...@infradead.org> One of the side effects of speculating on faults (without holding mmap_sem) is that we can race with free_pgtables() and therefore we cannot assume the page-tables will stick around.
Remove the reliance on the pte pointer. Signed-off-by: Peter Zijlstra (Intel) <pet...@infradead.org> [Remove only if !CONFIG_SPF] Signed-off-by: Laurent Dufour <lduf...@linux.vnet.ibm.com> --- mm/memory.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/mm/memory.c b/mm/memory.c index 6632c9b357c9..b7a9baf3df8a 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2287,6 +2287,7 @@ int apply_to_page_range(struct mm_struct *mm, unsigned long addr, } EXPORT_SYMBOL_GPL(apply_to_page_range); +#ifndef CONFIG_SPF /* * handle_pte_fault chooses page fault handler according to an entry which was * read non-atomically. Before making any commitment, on those architectures @@ -2296,7 +2297,7 @@ EXPORT_SYMBOL_GPL(apply_to_page_range); * and do_anonymous_page can safely check later on). */ static inline int pte_unmap_same(struct mm_struct *mm, pmd_t *pmd, - pte_t *page_table, pte_t orig_pte) + pte_t *page_table, pte_t orig_pte) { int same = 1; #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT) @@ -2310,6 +2311,7 @@ static inline int pte_unmap_same(struct mm_struct *mm, pmd_t *pmd, pte_unmap(page_table); return same; } +#endif /* CONFIG_SPF */ static inline void cow_user_page(struct page *dst, struct page *src, unsigned long va, struct vm_area_struct *vma) { @@ -2871,11 +2873,14 @@ int do_swap_page(struct vm_fault *vmf) if (vma_readahead) page = swap_readahead_detect(vmf, &swap_ra); + +#ifndef CONFIG_SPF if (!pte_unmap_same(vma->vm_mm, vmf->pmd, vmf->pte, vmf->orig_pte)) { if (page) put_page(page); goto out; } +#endif entry = pte_to_swp_entry(vmf->orig_pte); if (unlikely(non_swap_entry(entry))) { -- 2.7.4