From: John Hubbard <[email protected]>

Introduces put_user_page(), which simply calls put_page().
This provides a way to update all get_user_pages*() callers,
so that they call put_user_page(), instead of put_page().

Also introduces put_user_pages(), and a few dirty/locked variations,
as a replacement for release_pages(), and also as a replacement
for open-coded loops that release multiple pages.
These may be used for subsequent performance improvements,
via batching of pages to be released.

This prepares for eventually fixing the problem described
in [1], and is following a plan listed in [2], [3], [4].

[1] https://lwn.net/Articles/753027/ : "The Trouble with get_user_pages()"

[2] https://lkml.kernel.org/r/[email protected]
    Proposed steps for fixing get_user_pages() + DMA problems.

[3]https://lkml.kernel.org/r/[email protected]
    Bounce buffers (otherwise [2] is not really viable).

[4] https://lkml.kernel.org/r/[email protected]
    Follow-up discussions.

CC: Matthew Wilcox <[email protected]>
CC: Michal Hocko <[email protected]>
CC: Christopher Lameter <[email protected]>
CC: Jason Gunthorpe <[email protected]>
CC: Dan Williams <[email protected]>
CC: Jan Kara <[email protected]>
CC: Al Viro <[email protected]>
CC: Jerome Glisse <[email protected]>
CC: Christoph Hellwig <[email protected]>
CC: Ralph Campbell <[email protected]>
Signed-off-by: John Hubbard <[email protected]>
---
 include/linux/mm.h | 48 ++++++++++++++++++++++++++++++++++++++++++++--
 1 file changed, 46 insertions(+), 2 deletions(-)

diff --git a/include/linux/mm.h b/include/linux/mm.h
index 0416a7204be3..305b206e6851 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -137,6 +137,8 @@ extern int overcommit_ratio_handler(struct ctl_table *, 
int, void __user *,
                                    size_t *, loff_t *);
 extern int overcommit_kbytes_handler(struct ctl_table *, int, void __user *,
                                    size_t *, loff_t *);
+int set_page_dirty(struct page *page);
+int set_page_dirty_lock(struct page *page);
 
 #define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))
 
@@ -943,6 +945,50 @@ static inline void put_page(struct page *page)
                __put_page(page);
 }
 
+/* Pages that were pinned via get_user_pages*() should be released via
+ * either put_user_page(), or one of the put_user_pages*() routines
+ * below.
+ */
+static inline void put_user_page(struct page *page)
+{
+       put_page(page);
+}
+
+static inline void put_user_pages_dirty(struct page **pages,
+                                       unsigned long npages)
+{
+       unsigned long index;
+
+       for (index = 0; index < npages; index++) {
+               if (!PageDirty(pages[index]))
+                       set_page_dirty(pages[index]);
+
+               put_user_page(pages[index]);
+       }
+}
+
+static inline void put_user_pages_dirty_lock(struct page **pages,
+                                            unsigned long npages)
+{
+       unsigned long index;
+
+       for (index = 0; index < npages; index++) {
+               if (!PageDirty(pages[index]))
+                       set_page_dirty_lock(pages[index]);
+
+               put_user_page(pages[index]);
+       }
+}
+
+static inline void put_user_pages(struct page **pages,
+                                 unsigned long npages)
+{
+       unsigned long index;
+
+       for (index = 0; index < npages; index++)
+               put_user_page(pages[index]);
+}
+
 #if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
 #define SECTION_IN_PAGE_FLAGS
 #endif
@@ -1534,8 +1580,6 @@ int redirty_page_for_writepage(struct writeback_control 
*wbc,
 void account_page_dirtied(struct page *page, struct address_space *mapping);
 void account_page_cleaned(struct page *page, struct address_space *mapping,
                          struct bdi_writeback *wb);
-int set_page_dirty(struct page *page);
-int set_page_dirty_lock(struct page *page);
 void __cancel_dirty_page(struct page *page);
 static inline void cancel_dirty_page(struct page *page)
 {
-- 
2.19.0

Reply via email to