On 20 Jan 2026, at 17:37, Jordan Niethe wrote:

> Hi,
>
> On 14/1/26 06:44, Zi Yan wrote:
>> On 7 Jan 2026, at 4:18, Jordan Niethe wrote:
>>
>>> A future change will remove device private pages from the physical
>>> address space. This will mean that device private pages no longer have
>>> normal PFN and must be handled separately.
>>>
>>> Prepare for this by modifying page_vma_mapped_walk::pfn to contain flags
>>> as well as a PFN. Introduce a PVMW_PFN_DEVICE_PRIVATE flag to indicate
>>> that a page_vma_mapped_walk::pfn contains a PFN for a device private
>>> page.
>>>
>>> Signed-off-by: Jordan Niethe <[email protected]>
>>> Signed-off-by: Alistair Popple <[email protected]>
>>> ---
>>> v1:
>>>    - Update for HMM huge page support
>>> v2:
>>>    - Move adding device_private param to check_pmd() until final patch
>>> ---
>>>   include/linux/rmap.h | 30 +++++++++++++++++++++++++++++-
>>>   mm/page_vma_mapped.c | 13 +++++++------
>>>   mm/rmap.c            |  4 ++--
>>>   mm/vmscan.c          |  2 +-
>>>   4 files changed, 39 insertions(+), 10 deletions(-)
>>>
>>> diff --git a/include/linux/rmap.h b/include/linux/rmap.h
>>> index daa92a58585d..57c63b6a8f65 100644
>>> --- a/include/linux/rmap.h
>>> +++ b/include/linux/rmap.h
>>> @@ -939,9 +939,37 @@ struct page_vma_mapped_walk {
>>>     unsigned int flags;
>>>   };
>>>
>>> +/* pfn is a device private offset */
>>> +#define PVMW_PFN_DEVICE_PRIVATE    (1UL << 0)
>>> +#define PVMW_PFN_SHIFT             1
>>> +
>>> +static inline unsigned long page_vma_walk_pfn(unsigned long pfn)
>>> +{
>>> +   return (pfn << PVMW_PFN_SHIFT);
>>> +}
>>> +
>>> +static inline unsigned long folio_page_vma_walk_pfn(const struct folio 
>>> *folio)
>>> +{
>>> +   if (folio_is_device_private(folio))
>>> +           return page_vma_walk_pfn(folio_pfn(folio)) |
>>> +                  PVMW_PFN_DEVICE_PRIVATE;
>>> +
>>> +   return page_vma_walk_pfn(folio_pfn(folio));
>>> +}
>>> +
>>> +static inline struct page *page_vma_walk_pfn_to_page(unsigned long 
>>> pvmw_pfn)
>>> +{
>>> +   return pfn_to_page(pvmw_pfn >> PVMW_PFN_SHIFT);
>>> +}
>>> +
>>> +static inline struct folio *page_vma_walk_pfn_to_folio(unsigned long 
>>> pvmw_pfn)
>>> +{
>>> +   return page_folio(page_vma_walk_pfn_to_page(pvmw_pfn));
>>> +}
>>> +
>>>   #define DEFINE_FOLIO_VMA_WALK(name, _folio, _vma, _address, _flags)       
>>> \
>>>     struct page_vma_mapped_walk name = {                            \
>>> -           .pfn = folio_pfn(_folio),                               \
>>> +           .pfn = folio_page_vma_walk_pfn(_folio),                 \
>>>             .nr_pages = folio_nr_pages(_folio),                     \
>>>             .pgoff = folio_pgoff(_folio),                           \
>>>             .vma = _vma,                                            \
>>> diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c
>>> index b38a1d00c971..96c525785d78 100644
>>> --- a/mm/page_vma_mapped.c
>>> +++ b/mm/page_vma_mapped.c
>>> @@ -129,9 +129,9 @@ static bool check_pte(struct page_vma_mapped_walk 
>>> *pvmw, unsigned long pte_nr)
>>>             pfn = softleaf_to_pfn(entry);
>>>     }
>>>
>>> -   if ((pfn + pte_nr - 1) < pvmw->pfn)
>>> +   if ((pfn + pte_nr - 1) < (pvmw->pfn >> PVMW_PFN_SHIFT))
>>
>> Can you add a helper function for (pvmw->pfn >> PVMW_PFN_SHIFT)? It is 
>> impossible
>> to tell why pfn does not need >> PVMW_PFN_SHIFT.
>
> Sure, something like page_vma_walk_pfn_to_offset()?
>

Just page_vma_walk_pfn(pvmw)? Since the code is comparing with pfn.

Best Regards,
Yan, Zi

Reply via email to