Switch to the common method, shared across all MEMORY_DEVICE_* types,
for requesting access to a ZONE_DEVICE page. The
MEMORY_DEVICE_{PRIVATE,COHERENT} specific expectation that newly
requested pages are locked is moved to the callers.

Cc: Matthew Wilcox <[email protected]>
Cc: Jan Kara <[email protected]>
Cc: "Darrick J. Wong" <[email protected]>
Cc: Christoph Hellwig <[email protected]>
Cc: John Hubbard <[email protected]>
Cc: Alistair Popple <[email protected]>
Cc: Jason Gunthorpe <[email protected]>
Cc: Felix Kuehling <[email protected]>
Cc: Alex Deucher <[email protected]>
Cc: "Christian König" <[email protected]>
Cc: "Pan, Xinhui" <[email protected]>
Cc: David Airlie <[email protected]>
Cc: Daniel Vetter <[email protected]>
Cc: Ben Skeggs <[email protected]>
Cc: Karol Herbst <[email protected]>
Cc: Lyude Paul <[email protected]>
Cc: "Jérôme Glisse" <[email protected]>
Signed-off-by: Dan Williams <[email protected]>
---
 arch/powerpc/kvm/book3s_hv_uvmem.c       |    3 ++-
 drivers/gpu/drm/amd/amdkfd/kfd_migrate.c |    3 ++-
 drivers/gpu/drm/nouveau/nouveau_dmem.c   |    3 ++-
 include/linux/memremap.h                 |    1 -
 lib/test_hmm.c                           |    3 ++-
 mm/memremap.c                            |   13 +------------
 6 files changed, 9 insertions(+), 17 deletions(-)

diff --git a/arch/powerpc/kvm/book3s_hv_uvmem.c 
b/arch/powerpc/kvm/book3s_hv_uvmem.c
index e2f11f9c3f2a..884ec112ad43 100644
--- a/arch/powerpc/kvm/book3s_hv_uvmem.c
+++ b/arch/powerpc/kvm/book3s_hv_uvmem.c
@@ -718,7 +718,8 @@ static struct page *kvmppc_uvmem_get_page(unsigned long 
gpa, struct kvm *kvm)
 
        dpage = pfn_to_page(uvmem_pfn);
        dpage->zone_device_data = pvt;
-       zone_device_page_init(dpage);
+       pgmap_request_folios(dpage->pgmap, page_folio(dpage), 1);
+       lock_page(dpage);
        return dpage;
 out_clear:
        spin_lock(&kvmppc_uvmem_bitmap_lock);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c 
b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
index 97a684568ae0..8cf97060122b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
@@ -223,7 +223,8 @@ svm_migrate_get_vram_page(struct svm_range *prange, 
unsigned long pfn)
        page = pfn_to_page(pfn);
        svm_range_bo_ref(prange->svm_bo);
        page->zone_device_data = prange->svm_bo;
-       zone_device_page_init(page);
+       pgmap_request_folios(page->pgmap, page_folio(page), 1);
+       lock_page(page);
 }
 
 static void
diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c 
b/drivers/gpu/drm/nouveau/nouveau_dmem.c
index 5fe209107246..1482533c7ca0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dmem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c
@@ -324,7 +324,8 @@ nouveau_dmem_page_alloc_locked(struct nouveau_drm *drm)
                        return NULL;
        }
 
-       zone_device_page_init(page);
+       pgmap_request_folios(page->pgmap, page_folio(page), 1);
+       lock_page(page);
        return page;
 }
 
diff --git a/include/linux/memremap.h b/include/linux/memremap.h
index 98196b8d3172..3fb3809d71f3 100644
--- a/include/linux/memremap.h
+++ b/include/linux/memremap.h
@@ -187,7 +187,6 @@ static inline bool folio_is_device_coherent(const struct 
folio *folio)
 }
 
 #ifdef CONFIG_ZONE_DEVICE
-void zone_device_page_init(struct page *page);
 void *memremap_pages(struct dev_pagemap *pgmap, int nid);
 void memunmap_pages(struct dev_pagemap *pgmap);
 void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap);
diff --git a/lib/test_hmm.c b/lib/test_hmm.c
index 67e6f83fe0f8..e4f7219ae3bb 100644
--- a/lib/test_hmm.c
+++ b/lib/test_hmm.c
@@ -632,7 +632,8 @@ static struct page *dmirror_devmem_alloc_page(struct 
dmirror_device *mdevice)
                        goto error;
        }
 
-       zone_device_page_init(dpage);
+       pgmap_request_folios(dpage->pgmap, page_folio(dpage), 1);
+       lock_page(dpage);
        dpage->zone_device_data = rpage;
        return dpage;
 
diff --git a/mm/memremap.c b/mm/memremap.c
index 87a649ecdc54..c46e700f5245 100644
--- a/mm/memremap.c
+++ b/mm/memremap.c
@@ -518,18 +518,6 @@ void free_zone_device_page(struct page *page)
                put_dev_pagemap(page->pgmap);
 }
 
-void zone_device_page_init(struct page *page)
-{
-       /*
-        * Drivers shouldn't be allocating pages after calling
-        * memunmap_pages().
-        */
-       WARN_ON_ONCE(!percpu_ref_tryget_live(&page->pgmap->ref));
-       set_page_count(page, 1);
-       lock_page(page);
-}
-EXPORT_SYMBOL_GPL(zone_device_page_init);
-
 static bool folio_span_valid(struct dev_pagemap *pgmap, struct folio *folio,
                             int nr_folios)
 {
@@ -586,6 +574,7 @@ bool pgmap_request_folios(struct dev_pagemap *pgmap, struct 
folio *folio,
 
        return true;
 }
+EXPORT_SYMBOL_GPL(pgmap_request_folios);
 
 void pgmap_release_folios(struct dev_pagemap *pgmap, struct folio *folio, int 
nr_folios)
 {


Reply via email to