If __unmap_hugepage_range() tries to unmap the address range over which
hugepage migration is on the way, we get the wrong page because pte_page()
doesn't work for migration entries. This patch calls pte_to_swp_entry() and
migration_entry_to_page() to get the right page for migration entries.

Signed-off-by: Naoya Horiguchi <n-horigu...@ah.jp.nec.com>
Cc: <sta...@vger.kernel.org>  # [2.6.36+]
---
 mm/hugetlb.c | 9 ++++++++-
 1 file changed, 8 insertions(+), 1 deletion(-)

diff --git mmotm-2014-08-25-16-52.orig/mm/hugetlb.c 
mmotm-2014-08-25-16-52/mm/hugetlb.c
index 1ed9df6def54..0a4511115ee0 100644
--- mmotm-2014-08-25-16-52.orig/mm/hugetlb.c
+++ mmotm-2014-08-25-16-52/mm/hugetlb.c
@@ -2652,6 +2652,13 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, 
struct vm_area_struct *vma,
                if (huge_pte_none(pte))
                        goto unlock;
 
+               if (unlikely(is_hugetlb_entry_migration(pte))) {
+                       swp_entry_t entry = pte_to_swp_entry(pte);
+
+                       page = migration_entry_to_page(entry);
+                       goto clear;
+               }
+
                /*
                 * HWPoisoned hugepage is already unmapped and dropped reference
                 */
@@ -2677,7 +2684,7 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, 
struct vm_area_struct *vma,
                         */
                        set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
                }
-
+clear:
                pte = huge_ptep_get_and_clear(mm, address, ptep);
                tlb_remove_tlb_entry(tlb, ptep, address);
                if (huge_pte_dirty(pte))
-- 
1.9.3

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to