From: Matthew Wilcox <mawil...@microsoft.com>

Signed-off-by: Matthew Wilcox <mawil...@microsoft.com>
---
 mm/migrate.c | 41 ++++++++++++++++-------------------------
 1 file changed, 16 insertions(+), 25 deletions(-)

diff --git a/mm/migrate.c b/mm/migrate.c
index 75d19904dd9a..7122fec9b075 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -322,7 +322,7 @@ void __migration_entry_wait(struct mm_struct *mm, pte_t 
*ptep,
        page = migration_entry_to_page(entry);
 
        /*
-        * Once radix-tree replacement of page migration started, page_count
+        * Once page cache replacement of page migration started, page_count
         * *must* be zero. And, we don't want to call wait_on_page_locked()
         * against a page without get_page().
         * So, we use get_page_unless_zero(), here. Even failed, page fault
@@ -437,10 +437,10 @@ int migrate_page_move_mapping(struct address_space 
*mapping,
                struct buffer_head *head, enum migrate_mode mode,
                int extra_count)
 {
+       XA_STATE(xas, &mapping->pages, page_index(page));
        struct zone *oldzone, *newzone;
        int dirty;
        int expected_count = 1 + extra_count;
-       void **pslot;
 
        /*
         * Device public or private pages have an extra refcount as they are
@@ -466,21 +466,16 @@ int migrate_page_move_mapping(struct address_space 
*mapping,
        oldzone = page_zone(page);
        newzone = page_zone(newpage);
 
-       xa_lock_irq(&mapping->pages);
-
-       pslot = radix_tree_lookup_slot(&mapping->pages,
-                                       page_index(page));
+       xas_lock_irq(&xas);
 
        expected_count += 1 + page_has_private(page);
-       if (page_count(page) != expected_count ||
-               radix_tree_deref_slot_protected(pslot,
-                                       &mapping->pages.xa_lock) != page) {
-               xa_unlock_irq(&mapping->pages);
+       if (page_count(page) != expected_count || xas_load(&xas) != page) {
+               xas_unlock_irq(&xas);
                return -EAGAIN;
        }
 
        if (!page_ref_freeze(page, expected_count)) {
-               xa_unlock_irq(&mapping->pages);
+               xas_unlock_irq(&xas);
                return -EAGAIN;
        }
 
@@ -494,7 +489,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
        if (mode == MIGRATE_ASYNC && head &&
                        !buffer_migrate_lock_buffers(head, mode)) {
                page_ref_unfreeze(page, expected_count);
-               xa_unlock_irq(&mapping->pages);
+               xas_unlock_irq(&xas);
                return -EAGAIN;
        }
 
@@ -522,7 +517,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
                SetPageDirty(newpage);
        }
 
-       radix_tree_replace_slot(&mapping->pages, pslot, newpage);
+       xas_store(&xas, newpage);
 
        /*
         * Drop cache reference from old page by unfreezing
@@ -531,7 +526,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
         */
        page_ref_unfreeze(page, expected_count - 1);
 
-       xa_unlock(&mapping->pages);
+       xas_unlock(&xas);
        /* Leave irq disabled to prevent preemption while updating stats */
 
        /*
@@ -571,22 +566,18 @@ EXPORT_SYMBOL(migrate_page_move_mapping);
 int migrate_huge_page_move_mapping(struct address_space *mapping,
                                   struct page *newpage, struct page *page)
 {
+       XA_STATE(xas, &mapping->pages, page_index(page));
        int expected_count;
-       void **pslot;
-
-       xa_lock_irq(&mapping->pages);
-
-       pslot = radix_tree_lookup_slot(&mapping->pages, page_index(page));
 
+       xas_lock_irq(&xas);
        expected_count = 2 + page_has_private(page);
-       if (page_count(page) != expected_count ||
-               radix_tree_deref_slot_protected(pslot, &mapping->pages.xa_lock) 
!= page) {
-               xa_unlock_irq(&mapping->pages);
+       if (page_count(page) != expected_count || xas_load(&xas) != page) {
+               xas_unlock_irq(&xas);
                return -EAGAIN;
        }
 
        if (!page_ref_freeze(page, expected_count)) {
-               xa_unlock_irq(&mapping->pages);
+               xas_unlock_irq(&xas);
                return -EAGAIN;
        }
 
@@ -595,11 +586,11 @@ int migrate_huge_page_move_mapping(struct address_space 
*mapping,
 
        get_page(newpage);
 
-       radix_tree_replace_slot(&mapping->pages, pslot, newpage);
+       xas_store(&xas, newpage);
 
        page_ref_unfreeze(page, expected_count - 1);
 
-       xa_unlock_irq(&mapping->pages);
+       xas_unlock_irq(&xas);
 
        return MIGRATEPAGE_SUCCESS;
 }
-- 
2.15.1

_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

Reply via email to