Instead of calling find_get_entry() for every page index, use an XArray
iterator to skip over NULL entries, and avoid calling get_page(),
because we only want the swap entries.

Signed-off-by: Matthew Wilcox (Oracle) <wi...@infradead.org>
Acked-by: Johannes Weiner <han...@cmpxchg.org>
---
 mm/madvise.c | 21 ++++++++++++---------
 1 file changed, 12 insertions(+), 9 deletions(-)

diff --git a/mm/madvise.c b/mm/madvise.c
index dd1d43cf026d..96189acd6969 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -224,25 +224,28 @@ static void force_shm_swapin_readahead(struct 
vm_area_struct *vma,
                unsigned long start, unsigned long end,
                struct address_space *mapping)
 {
-       pgoff_t index;
+       XA_STATE(xas, &mapping->i_pages, linear_page_index(vma, start));
+       pgoff_t end_index = end / PAGE_SIZE;
        struct page *page;
-       swp_entry_t swap;
 
-       for (; start < end; start += PAGE_SIZE) {
-               index = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
+       rcu_read_lock();
+       xas_for_each(&xas, page, end_index) {
+               swp_entry_t swap;
 
-               page = find_get_entry(mapping, index);
-               if (!xa_is_value(page)) {
-                       if (page)
-                               put_page(page);
+               if (!xa_is_value(page))
                        continue;
-               }
+               rcu_read_unlock();
+
                swap = radix_to_swp_entry(page);
                page = read_swap_cache_async(swap, GFP_HIGHUSER_MOVABLE,
                                                        NULL, 0, false);
                if (page)
                        put_page(page);
+
+               rcu_read_lock();
+               xas_reset(&xas);
        }
+       rcu_read_unlock();
 
        lru_add_drain();        /* Push any new pages onto the LRU now */
 }
-- 
2.28.0

Reply via email to