Reduce LRU lock contention when inserting shmem pages by staging pages
to be added to the same LRU and adding them en masse.

Signed-off-by: Anthony Yznaga <anthony.yzn...@oracle.com>
---
 mm/shmem.c | 5 ++++-
 1 file changed, 4 insertions(+), 1 deletion(-)

diff --git a/mm/shmem.c b/mm/shmem.c
index c3fa72061d8a..63299da75166 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -845,6 +845,7 @@ int shmem_insert_pages(struct mm_struct *charge_mm, struct 
inode *inode,
        struct shmem_inode_info *info = SHMEM_I(inode);
        struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
        gfp_t gfp = mapping_gfp_mask(mapping);
+       LRU_SPLICE(splice);
        int i, err;
        int nr = 0;
 
@@ -908,7 +909,7 @@ int shmem_insert_pages(struct mm_struct *charge_mm, struct 
inode *inode,
 
        for (i = 0; i < npages; i++) {
                if (!PageLRU(pages[i]))
-                       lru_cache_add(pages[i]);
+                       lru_splice_add(pages[i], &splice);
 
                flush_dcache_page(pages[i]);
                SetPageUptodate(pages[i]);
@@ -917,6 +918,8 @@ int shmem_insert_pages(struct mm_struct *charge_mm, struct 
inode *inode,
                unlock_page(pages[i]);
        }
 
+       add_splice_to_lru_list(&splice);
+
        return 0;
 
 out_release:
-- 
1.8.3.1

Reply via email to