This is a note to let you know that I've just added the patch titled

    mm: numa: call MMU notifiers on THP migration

to the 3.12-stable tree which can be found at:
    
http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=summary

The filename of the patch is:
     mm-numa-call-mmu-notifiers-on-thp-migration.patch
and it can be found in the queue-3.12 subdirectory.

If you, or anyone else, feels it should not be added to the stable tree,
please let <stable@vger.kernel.org> know about it.


>From mgor...@suse.de  Tue Jan  7 09:45:19 2014
From: Mel Gorman <mgor...@suse.de>
Date: Tue,  7 Jan 2014 14:00:37 +0000
Subject: mm: numa: call MMU notifiers on THP migration
To: gre...@linuxfoundation.org
Cc: athorl...@sgi.com, r...@redhat.com, chegu_vi...@hp.com, Mel Gorman 
<mgor...@suse.de>, stable@vger.kernel.org
Message-ID: <1389103248-17617-3-git-send-email-mgor...@suse.de>

From: Mel Gorman <mgor...@suse.de>

commit f714f4f20e59ea6eea264a86b9a51fd51b88fc54 upstream.

MMU notifiers must be called on THP page migration or secondary MMUs
will get very confused.

Signed-off-by: Mel Gorman <mgor...@suse.de>
Reviewed-by: Rik van Riel <r...@redhat.com>
Cc: Alex Thorlton <athorl...@sgi.com>
Signed-off-by: Andrew Morton <a...@linux-foundation.org>
Signed-off-by: Linus Torvalds <torva...@linux-foundation.org>
Signed-off-by: Greg Kroah-Hartman <gre...@linuxfoundation.org>
---
 mm/migrate.c |   22 ++++++++++++++--------
 1 file changed, 14 insertions(+), 8 deletions(-)

--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -36,6 +36,7 @@
 #include <linux/hugetlb_cgroup.h>
 #include <linux/gfp.h>
 #include <linux/balloon_compaction.h>
+#include <linux/mmu_notifier.h>
 
 #include <asm/tlbflush.h>
 
@@ -1655,12 +1656,13 @@ int migrate_misplaced_transhuge_page(str
                                unsigned long address,
                                struct page *page, int node)
 {
-       unsigned long haddr = address & HPAGE_PMD_MASK;
        pg_data_t *pgdat = NODE_DATA(node);
        int isolated = 0;
        struct page *new_page = NULL;
        struct mem_cgroup *memcg = NULL;
        int page_lru = page_is_file_cache(page);
+       unsigned long mmun_start = address & HPAGE_PMD_MASK;
+       unsigned long mmun_end = mmun_start + HPAGE_PMD_SIZE;
        pmd_t orig_entry;
 
        /*
@@ -1702,10 +1704,12 @@ int migrate_misplaced_transhuge_page(str
        WARN_ON(PageLRU(new_page));
 
        /* Recheck the target PMD */
+       mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
        spin_lock(&mm->page_table_lock);
        if (unlikely(!pmd_same(*pmd, entry) || page_count(page) != 2)) {
 fail_putback:
                spin_unlock(&mm->page_table_lock);
+               mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
 
                /* Reverse changes made by migrate_page_copy() */
                if (TestClearPageActive(new_page))
@@ -1746,15 +1750,16 @@ fail_putback:
         * The SetPageUptodate on the new page and page_add_new_anon_rmap
         * guarantee the copy is visible before the pagetable update.
         */
-       flush_cache_range(vma, haddr, haddr + HPAGE_PMD_SIZE);
-       page_add_new_anon_rmap(new_page, vma, haddr);
-       pmdp_clear_flush(vma, haddr, pmd);
-       set_pmd_at(mm, haddr, pmd, entry);
+       flush_cache_range(vma, mmun_start, mmun_end);
+       page_add_new_anon_rmap(new_page, vma, mmun_start);
+       pmdp_clear_flush(vma, mmun_start, pmd);
+       set_pmd_at(mm, mmun_start, pmd, entry);
+       flush_tlb_range(vma, mmun_start, mmun_end);
        update_mmu_cache_pmd(vma, address, &entry);
 
        if (page_count(page) != 2) {
-               set_pmd_at(mm, haddr, pmd, orig_entry);
-               flush_tlb_range(vma, haddr, haddr + HPAGE_PMD_SIZE);
+               set_pmd_at(mm, mmun_start, pmd, orig_entry);
+               flush_tlb_range(vma, mmun_start, mmun_end);
                update_mmu_cache_pmd(vma, address, &entry);
                page_remove_rmap(new_page);
                goto fail_putback;
@@ -1769,6 +1774,7 @@ fail_putback:
         */
        mem_cgroup_end_migration(memcg, page, new_page, true);
        spin_unlock(&mm->page_table_lock);
+       mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
 
        unlock_page(new_page);
        unlock_page(page);
@@ -1789,7 +1795,7 @@ out_dropref:
        spin_lock(&mm->page_table_lock);
        if (pmd_same(*pmd, entry)) {
                entry = pmd_mknonnuma(entry);
-               set_pmd_at(mm, haddr, pmd, entry);
+               set_pmd_at(mm, mmun_start, pmd, entry);
                update_mmu_cache_pmd(vma, address, &entry);
        }
        spin_unlock(&mm->page_table_lock);


Patches currently in stable-queue which might be from mgor...@suse.de are

queue-3.12/sched-numa-skip-inaccessible-vmas.patch
queue-3.12/mm-compaction-respect-ignore_skip_hint-in-update_pageblock_skip.patch
queue-3.12/mm-fix-tlb-flush-race-between-migration-and-change_protection_range.patch
queue-3.12/mm-numa-ensure-anon_vma-is-locked-to-prevent-parallel-thp-splits.patch
queue-3.12/mm-hugetlb-check-for-pte-null-pointer-in-__page_check_address.patch
queue-3.12/mm-numa-serialise-parallel-get_user_page-against-thp-migration.patch
queue-3.12/mm-munlock-fix-deadlock-in-__munlock_pagevec.patch
queue-3.12/mm-numa-guarantee-that-tlb_flush_pending-updates-are-visible-before-page-table-updates.patch
queue-3.12/mm-numa-avoid-unnecessary-work-on-the-failure-path.patch
queue-3.12/mm-page_alloc-revert-numa-aspect-of-fair-allocation-policy.patch
queue-3.12/mm-mempolicy-correct-putback-method-for-isolate-pages-if-failed.patch
queue-3.12/mm-numa-call-mmu-notifiers-on-thp-migration.patch
queue-3.12/mm-munlock-fix-a-bug-where-thp-tail-page-is-encountered.patch
--
To unsubscribe from this list: send the line "unsubscribe stable" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to