If the page is part of a folio, unlock and put the whole folio at once
instead of individual pages one after the other. This will reduce the
amount of operations once device THP are in use.

Suggested-by: Matthew Brost <[email protected]>
Signed-off-by: Francois Dugast <[email protected]>
---
 drivers/gpu/drm/drm_pagemap.c | 26 +++++++++++++++++---------
 1 file changed, 17 insertions(+), 9 deletions(-)

diff --git a/drivers/gpu/drm/drm_pagemap.c b/drivers/gpu/drm/drm_pagemap.c
index 37d7cfbbb3e8..491de9275add 100644
--- a/drivers/gpu/drm/drm_pagemap.c
+++ b/drivers/gpu/drm/drm_pagemap.c
@@ -149,15 +149,15 @@ static void drm_pagemap_zdd_put(struct drm_pagemap_zdd 
*zdd)
 }
 
 /**
- * drm_pagemap_migration_unlock_put_page() - Put a migration page
- * @page: Pointer to the page to put
+ * drm_pagemap_migration_unlock_put_folio() - Put a migration folio
+ * @folio: Pointer to the folio to put
  *
- * This function unlocks and puts a page.
+ * This function unlocks and puts a folio.
  */
-static void drm_pagemap_migration_unlock_put_page(struct page *page)
+static void drm_pagemap_migration_unlock_put_folio(struct folio *folio)
 {
-       unlock_page(page);
-       put_page(page);
+       folio_unlock(folio);
+       folio_put(folio);
 }
 
 /**
@@ -172,15 +172,23 @@ static void 
drm_pagemap_migration_unlock_put_pages(unsigned long npages,
 {
        unsigned long i;
 
-       for (i = 0; i < npages; ++i) {
+       for (i = 0; i < npages;) {
                struct page *page;
+               struct folio *folio;
+               unsigned int order = 0;
 
                if (!migrate_pfn[i])
-                       continue;
+                       goto next;
 
                page = migrate_pfn_to_page(migrate_pfn[i]);
-               drm_pagemap_migration_unlock_put_page(page);
+               folio = page_folio(page);
+               order = folio_order(folio);
+
+               drm_pagemap_migration_unlock_put_folio(folio);
                migrate_pfn[i] = 0;
+
+next:
+               i += NR_PAGES(order);
        }
 }
 
-- 
2.43.0

Reply via email to