Make put_devmap_managed_page return if it took charge of the page
or not and remove the separate page_is_devmap_managed helper.

Signed-off-by: Christoph Hellwig <h...@lst.de>
Reviewed-by: Logan Gunthorpe <log...@deltatee.com>
Reviewed-by: Jason Gunthorpe <j...@nvidia.com>
Reviewed-by: Chaitanya Kulkarni <k...@nvidia.com>
Reviewed-by: Dan Williams <dan.j.willi...@intel.com>
---
 include/linux/mm.h | 34 ++++++++++------------------------
 mm/memremap.c      | 20 +++++++++-----------
 mm/swap.c          | 10 +---------
 3 files changed, 20 insertions(+), 44 deletions(-)

diff --git a/include/linux/mm.h b/include/linux/mm.h
index 91dd0bc786a9ec..26baadcef4556b 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1094,33 +1094,24 @@ static inline bool is_zone_movable_page(const struct 
page *page)
 #ifdef CONFIG_DEV_PAGEMAP_OPS
 DECLARE_STATIC_KEY_FALSE(devmap_managed_key);
 
-static inline bool page_is_devmap_managed(struct page *page)
+bool __put_devmap_managed_page(struct page *page);
+static inline bool put_devmap_managed_page(struct page *page)
 {
        if (!static_branch_unlikely(&devmap_managed_key))
                return false;
        if (!is_zone_device_page(page))
                return false;
-       switch (page->pgmap->type) {
-       case MEMORY_DEVICE_PRIVATE:
-       case MEMORY_DEVICE_FS_DAX:
-               return true;
-       default:
-               break;
-       }
-       return false;
+       if (page->pgmap->type != MEMORY_DEVICE_PRIVATE &&
+           page->pgmap->type != MEMORY_DEVICE_FS_DAX)
+               return false;
+       return __put_devmap_managed_page(page);
 }
 
-void put_devmap_managed_page(struct page *page);
-
 #else /* CONFIG_DEV_PAGEMAP_OPS */
-static inline bool page_is_devmap_managed(struct page *page)
+static inline bool put_devmap_managed_page(struct page *page)
 {
        return false;
 }
-
-static inline void put_devmap_managed_page(struct page *page)
-{
-}
 #endif /* CONFIG_DEV_PAGEMAP_OPS */
 
 static inline bool is_device_private_page(const struct page *page)
@@ -1220,16 +1211,11 @@ static inline void put_page(struct page *page)
        struct folio *folio = page_folio(page);
 
        /*
-        * For devmap managed pages we need to catch refcount transition from
-        * 2 to 1, when refcount reach one it means the page is free and we
-        * need to inform the device driver through callback. See
-        * include/linux/memremap.h and HMM for details.
+        * For some devmap managed pages we need to catch refcount transition
+        * from 2 to 1:
         */
-       if (page_is_devmap_managed(&folio->page)) {
-               put_devmap_managed_page(&folio->page);
+       if (put_devmap_managed_page(&folio->page))
                return;
-       }
-
        folio_put(folio);
 }
 
diff --git a/mm/memremap.c b/mm/memremap.c
index 55d23e9f5c04ec..f41233a67edb12 100644
--- a/mm/memremap.c
+++ b/mm/memremap.c
@@ -502,24 +502,22 @@ void free_devmap_managed_page(struct page *page)
        page->pgmap->ops->page_free(page);
 }
 
-void put_devmap_managed_page(struct page *page)
+bool __put_devmap_managed_page(struct page *page)
 {
-       int count;
-
-       if (WARN_ON_ONCE(!page_is_devmap_managed(page)))
-               return;
-
-       count = page_ref_dec_return(page);
-
        /*
         * devmap page refcounts are 1-based, rather than 0-based: if
         * refcount is 1, then the page is free and the refcount is
         * stable because nobody holds a reference on the page.
         */
-       if (count == 1)
+       switch (page_ref_dec_return(page)) {
+       case 1:
                free_devmap_managed_page(page);
-       else if (!count)
+               break;
+       case 0:
                __put_page(page);
+               break;
+       }
+       return true;
 }
-EXPORT_SYMBOL(put_devmap_managed_page);
+EXPORT_SYMBOL(__put_devmap_managed_page);
 #endif /* CONFIG_DEV_PAGEMAP_OPS */
diff --git a/mm/swap.c b/mm/swap.c
index 08058f74cae23e..25b55c56614311 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -930,16 +930,8 @@ void release_pages(struct page **pages, int nr)
                                unlock_page_lruvec_irqrestore(lruvec, flags);
                                lruvec = NULL;
                        }
-                       /*
-                        * ZONE_DEVICE pages that return 'false' from
-                        * page_is_devmap_managed() do not require special
-                        * processing, and instead, expect a call to
-                        * put_page_testzero().
-                        */
-                       if (page_is_devmap_managed(page)) {
-                               put_devmap_managed_page(page);
+                       if (put_devmap_managed_page(page))
                                continue;
-                       }
                        if (put_page_testzero(page))
                                put_dev_pagemap(page->pgmap);
                        continue;
-- 
2.30.2

Reply via email to