From: Peter Zijlstra <pet...@infradead.org> One of the side effects of speculating on faults (without holding mmap_sem) is that we can race with free_pgtables() and therefore we cannot assume the page-tables will stick around.
Remove the reliance on the pte pointer. Signed-off-by: Peter Zijlstra (Intel) <pet...@infradead.org> --- mm/memory.c | 29 ----------------------------- 1 file changed, 29 deletions(-) diff --git a/mm/memory.c b/mm/memory.c index ec4e15494901..30bccfa00630 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2270,30 +2270,6 @@ int apply_to_page_range(struct mm_struct *mm, unsigned long addr, } EXPORT_SYMBOL_GPL(apply_to_page_range); -/* - * handle_pte_fault chooses page fault handler according to an entry which was - * read non-atomically. Before making any commitment, on those architectures - * or configurations (e.g. i386 with PAE) which might give a mix of unmatched - * parts, do_swap_page must check under lock before unmapping the pte and - * proceeding (but do_wp_page is only called after already making such a check; - * and do_anonymous_page can safely check later on). - */ -static inline int pte_unmap_same(struct mm_struct *mm, pmd_t *pmd, - pte_t *page_table, pte_t orig_pte) -{ - int same = 1; -#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT) - if (sizeof(pte_t) > sizeof(unsigned long)) { - spinlock_t *ptl = pte_lockptr(mm, pmd); - spin_lock(ptl); - same = pte_same(*page_table, orig_pte); - spin_unlock(ptl); - } -#endif - pte_unmap(page_table); - return same; -} - static inline void cow_user_page(struct page *dst, struct page *src, unsigned long va, struct vm_area_struct *vma) { debug_dma_assert_idle(src); @@ -2854,11 +2830,6 @@ int do_swap_page(struct vm_fault *vmf) if (vma_readahead) page = swap_readahead_detect(vmf, &swap_ra); - if (!pte_unmap_same(vma->vm_mm, vmf->pmd, vmf->pte, vmf->orig_pte)) { - if (page) - put_page(page); - goto out; - } entry = pte_to_swp_entry(vmf->orig_pte); if (unlikely(non_swap_entry(entry))) { -- 2.7.4