On 1/7/26 19:18, Jordan Niethe wrote:
> To create a migrate from a given struct page, that page is first
> converted to its pfn, before passing the pfn to migrate_pfn().
> 
> A future change will remove device private pages from the physical
> address space. This will mean that device private pages no longer have a
> pfn and must be handled separately.
> 
> Prepare for this with a new helper:
> 
>     - migrate_pfn_from_page()
> 
> This helper takes a struct page as parameter instead of a pfn. This will
> allow more flexibility for handling the mpfn differently for device
> private pages.
> 
> Signed-off-by: Jordan Niethe <[email protected]>
> ---
> v2: New to series
> ---
>  arch/powerpc/kvm/book3s_hv_uvmem.c       |  2 +-
>  drivers/gpu/drm/amd/amdkfd/kfd_migrate.c |  2 +-
>  drivers/gpu/drm/drm_pagemap.c            |  2 +-
>  drivers/gpu/drm/nouveau/nouveau_dmem.c   |  4 ++--
>  include/linux/migrate.h                  |  5 +++++
>  lib/test_hmm.c                           | 11 ++++++-----
>  mm/migrate_device.c                      |  7 +++----
>  7 files changed, 19 insertions(+), 14 deletions(-)
> 
> diff --git a/arch/powerpc/kvm/book3s_hv_uvmem.c 
> b/arch/powerpc/kvm/book3s_hv_uvmem.c
> index e5000bef90f2..67910900af7b 100644
> --- a/arch/powerpc/kvm/book3s_hv_uvmem.c
> +++ b/arch/powerpc/kvm/book3s_hv_uvmem.c
> @@ -784,7 +784,7 @@ static int kvmppc_svm_page_in(struct vm_area_struct *vma,
>               }
>       }
>  
> -     *mig.dst = migrate_pfn(page_to_pfn(dpage));
> +     *mig.dst = migrate_pfn_from_page(dpage);
>       migrate_vma_pages(&mig);
>  out_finalize:
>       migrate_vma_finalize(&mig);
> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c 
> b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
> index af53e796ea1b..ca552c34ece2 100644
> --- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
> +++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
> @@ -646,7 +646,7 @@ svm_migrate_copy_to_ram(struct amdgpu_device *adev, 
> struct svm_range *prange,
>               pr_debug_ratelimited("dma mapping dst to 0x%llx, pfn 0x%lx\n",
>                                    dst[i] >> PAGE_SHIFT, page_to_pfn(dpage));
>  
> -             migrate->dst[i] = migrate_pfn(page_to_pfn(dpage));
> +             migrate->dst[i] = migrate_pfn_from_page(dpage);
>               j++;
>       }
>  
> diff --git a/drivers/gpu/drm/drm_pagemap.c b/drivers/gpu/drm/drm_pagemap.c
> index 37d7cfbbb3e8..5ddf395847ef 100644
> --- a/drivers/gpu/drm/drm_pagemap.c
> +++ b/drivers/gpu/drm/drm_pagemap.c
> @@ -490,7 +490,7 @@ static int drm_pagemap_migrate_populate_ram_pfn(struct 
> vm_area_struct *vas,
>                       goto free_pages;
>  
>               page = folio_page(folio, 0);
> -             mpfn[i] = migrate_pfn(page_to_pfn(page));
> +             mpfn[i] = migrate_pfn_from_page(page);
>  
>  next:
>               if (page)
> diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c 
> b/drivers/gpu/drm/nouveau/nouveau_dmem.c
> index 58071652679d..a7edcdca9701 100644
> --- a/drivers/gpu/drm/nouveau/nouveau_dmem.c
> +++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c
> @@ -249,7 +249,7 @@ static vm_fault_t nouveau_dmem_migrate_to_ram(struct 
> vm_fault *vmf)
>               goto done;
>       }
>  
> -     args.dst[0] = migrate_pfn(page_to_pfn(dpage));
> +     args.dst[0] = migrate_pfn_from_page(dpage);
>       if (order)
>               args.dst[0] |= MIGRATE_PFN_COMPOUND;
>       dfolio = page_folio(dpage);
> @@ -766,7 +766,7 @@ static unsigned long nouveau_dmem_migrate_copy_one(struct 
> nouveau_drm *drm,
>               ((paddr >> PAGE_SHIFT) << NVIF_VMM_PFNMAP_V0_ADDR_SHIFT);
>       if (src & MIGRATE_PFN_WRITE)
>               *pfn |= NVIF_VMM_PFNMAP_V0_W;
> -     mpfn = migrate_pfn(page_to_pfn(dpage));
> +     mpfn = migrate_pfn_from_page(dpage);
>       if (folio_order(page_folio(dpage)))
>               mpfn |= MIGRATE_PFN_COMPOUND;
>       return mpfn;
> diff --git a/include/linux/migrate.h b/include/linux/migrate.h
> index 26ca00c325d9..d269ec1400be 100644
> --- a/include/linux/migrate.h
> +++ b/include/linux/migrate.h
> @@ -140,6 +140,11 @@ static inline unsigned long migrate_pfn(unsigned long 
> pfn)
>       return (pfn << MIGRATE_PFN_SHIFT) | MIGRATE_PFN_VALID;
>  }
>  
> +static inline unsigned long migrate_pfn_from_page(struct page *page)
> +{
> +     return migrate_pfn(page_to_pfn(page));
> +}
> +
>  enum migrate_vma_direction {
>       MIGRATE_VMA_SELECT_SYSTEM = 1 << 0,
>       MIGRATE_VMA_SELECT_DEVICE_PRIVATE = 1 << 1,
> diff --git a/lib/test_hmm.c b/lib/test_hmm.c
> index 8af169d3873a..7e5248404d00 100644
> --- a/lib/test_hmm.c
> +++ b/lib/test_hmm.c
> @@ -727,7 +727,8 @@ static void dmirror_migrate_alloc_and_copy(struct 
> migrate_vma *args,
>                               rpage = BACKING_PAGE(dpage);
>                               rpage->zone_device_data = dmirror;
>  
> -                             *dst = migrate_pfn(page_to_pfn(dpage)) | write;
> +                             *dst = migrate_pfn_from_page(dpage) |
> +                                    write;
>                               src_page = pfn_to_page(spfn + i);
>  
>                               if (spage)
> @@ -754,7 +755,7 @@ static void dmirror_migrate_alloc_and_copy(struct 
> migrate_vma *args,
>               pr_debug("migrating from sys to dev pfn src: 0x%lx pfn dst: 
> 0x%lx\n",
>                        page_to_pfn(spage), page_to_pfn(dpage));
>  
> -             *dst = migrate_pfn(page_to_pfn(dpage)) | write;
> +             *dst = migrate_pfn_from_page(dpage) | write;
>  
>               if (is_large) {
>                       int i;
> @@ -989,7 +990,7 @@ static vm_fault_t 
> dmirror_devmem_fault_alloc_and_copy(struct migrate_vma *args,
>  
>               if (dpage) {
>                       lock_page(dpage);
> -                     *dst |= migrate_pfn(page_to_pfn(dpage));
> +                     *dst |= migrate_pfn_from_page(dpage);
>               }
>  
>               for (i = 0; i < (1 << order); i++) {
> @@ -1000,7 +1001,7 @@ static vm_fault_t 
> dmirror_devmem_fault_alloc_and_copy(struct migrate_vma *args,
>                       if (!dpage && order) {
>                               dpage = alloc_page_vma(GFP_HIGHUSER_MOVABLE, 
> args->vma, addr);
>                               lock_page(dpage);
> -                             dst[i] = migrate_pfn(page_to_pfn(dpage));
> +                             dst[i] = migrate_pfn_from_page(dpage);
>                               dst_page = pfn_to_page(page_to_pfn(dpage));
>                               dpage = NULL; /* For the next iteration */
>                       } else {
> @@ -1412,7 +1413,7 @@ static void dmirror_device_evict_chunk(struct 
> dmirror_chunk *chunk)
>  
>               /* TODO Support splitting here */
>               lock_page(dpage);
> -             dst_pfns[i] = migrate_pfn(page_to_pfn(dpage));
> +             dst_pfns[i] = migrate_pfn_from_page(dpage);
>               if (src_pfns[i] & MIGRATE_PFN_WRITE)
>                       dst_pfns[i] |= MIGRATE_PFN_WRITE;
>               if (order)
> diff --git a/mm/migrate_device.c b/mm/migrate_device.c
> index 23379663b1e1..1a2067f830da 100644
> --- a/mm/migrate_device.c
> +++ b/mm/migrate_device.c
> @@ -207,9 +207,8 @@ static int migrate_vma_collect_huge_pmd(pmd_t *pmdp, 
> unsigned long start,
>                       .vma = walk->vma,
>               };
>  
> -             unsigned long pfn = page_to_pfn(folio_page(folio, 0));
> -
> -             migrate->src[migrate->npages] = migrate_pfn(pfn) | write
> +             migrate->src[migrate->npages] = 
> migrate_pfn_from_page(folio_page(folio, 0))
> +                                             | write
>                                               | MIGRATE_PFN_MIGRATE
>                                               | MIGRATE_PFN_COMPOUND;
>               migrate->dst[migrate->npages++] = 0;
> @@ -328,7 +327,7 @@ static int migrate_vma_collect_pmd(pmd_t *pmdp,
>                               goto again;
>                       }
>  
> -                     mpfn = migrate_pfn(page_to_pfn(page)) |
> +                     mpfn = migrate_pfn_from_page(page) |
>                                       MIGRATE_PFN_MIGRATE;
>                       if (softleaf_is_device_private_write(entry))
>                               mpfn |= MIGRATE_PFN_WRITE;

Makes sense

Reviewed-by: Balbir Singh <[email protected]>



Reply via email to