From: Davidlohr Bueso <d...@stgolabs.net>

Similarly to the anon memory counterpart, we can share the mapping's lock
ownership as the interval tree is not modified when doing doing the walk,
only the file page.

Signed-off-by: Davidlohr Bueso <dbu...@suse.de>
Acked-by: Rik van Riel <r...@redhat.com>
Acked-by: "Kirill A. Shutemov" <kir...@shutemov.name>
Acked-by: Hugh Dickins <hu...@google.com>
Cc: Oleg Nesterov <o...@redhat.com>
Acked-by: Peter Zijlstra (Intel) <pet...@infradead.org>
Cc: Srikar Dronamraju <sri...@linux.vnet.ibm.com>
Acked-by: Mel Gorman <mgor...@suse.de>
Signed-off-by: Andrew Morton <a...@linux-foundation.org>
Signed-off-by: Linus Torvalds <torva...@linux-foundation.org>

https://jira.sw.ru/browse/PSBM-122663
(cherry picked from commit 3dec0ba0be6a532cac949e02b853021bf6d57dad)
Signed-off-by: Andrey Ryabinin <aryabi...@virtuozzo.com>
---
 include/linux/fs.h | 10 ++++++++++
 mm/rmap.c          |  9 +++++----
 2 files changed, 15 insertions(+), 4 deletions(-)

diff --git a/include/linux/fs.h b/include/linux/fs.h
index f422b0f7b02a..acedffc46fe4 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -709,6 +709,16 @@ static inline void i_mmap_unlock_write(struct 
address_space *mapping)
        up_write(&mapping->i_mmap_rwsem);
 }
 
+static inline void i_mmap_lock_read(struct address_space *mapping)
+{
+       down_read(&mapping->i_mmap_rwsem);
+}
+
+static inline void i_mmap_unlock_read(struct address_space *mapping)
+{
+       up_read(&mapping->i_mmap_rwsem);
+}
+
 /*
  * Might pages of this file be mapped into userspace?
  */
diff --git a/mm/rmap.c b/mm/rmap.c
index e72be32c3dae..523957450d20 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1723,7 +1723,8 @@ static int rmap_walk_file(struct page *page, struct 
rmap_walk_control *rwc)
        if (!mapping)
                return ret;
        pgoff = page_to_pgoff(page);
-       down_write_nested(&mapping->i_mmap_rwsem, SINGLE_DEPTH_NESTING);
+
+       i_mmap_lock_read(mapping);
        vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
                unsigned long address = vma_address(page, vma);
 
@@ -1748,7 +1749,7 @@ static int rmap_walk_file(struct page *page, struct 
rmap_walk_control *rwc)
                if (!mapping_mapped(peer))
                        continue;
 
-               i_mmap_lock_write(peer);
+               i_mmap_lock_read(peer);
 
                vma_interval_tree_foreach(vma, &peer->i_mmap, pgoff, pgoff) {
                        unsigned long address = vma_address(page, vma);
@@ -1764,7 +1765,7 @@ static int rmap_walk_file(struct page *page, struct 
rmap_walk_control *rwc)
 
                        cond_resched();
                }
-               i_mmap_unlock_write(peer);
+               i_mmap_unlock_read(peer);
 
                if (ret != SWAP_AGAIN)
                        goto done;
@@ -1772,7 +1773,7 @@ static int rmap_walk_file(struct page *page, struct 
rmap_walk_control *rwc)
                        goto done;
        }
 done:
-       i_mmap_unlock_write(mapping);
+       i_mmap_unlock_read(mapping);
        return ret;
 }
 
-- 
2.26.2

_______________________________________________
Devel mailing list
Devel@openvz.org
https://lists.openvz.org/mailman/listinfo/devel

Reply via email to