Marking PMDs that map a "normal" refcounted folios as special is
against our rules documented for vm_normal_page(): normal (refcounted)
folios shall never have the page table mapping marked as special.

Fortunately, there are not that many pmd_special() check that can be
mislead, and most vm_normal_page_pmd()/vm_normal_folio_pmd() users that
would get this wrong right now are rather harmless: e.g., none so far
bases decisions whether to grab a folio reference on that decision.

Well, and GUP-fast will fallback to GUP-slow. All in all, so far no big
implications as it seems.

Getting this right will get more important as we use
folio_normal_page_pmd() in more places.

Fix it by teaching insert_pfn_pmd() to properly handle folios and
pfns -- moving refcount/mapcount/etc handling in there, renaming it to
insert_pmd(), and distinguishing between both cases using a new simple
"struct folio_or_pfn" structure.

Use folio_mk_pmd() to create a pmd for a folio cleanly.

Fixes: 6c88f72691f8 ("mm/huge_memory: add vmf_insert_folio_pmd()")
Reviewed-by: Jason Gunthorpe <j...@nvidia.com>
Reviewed-by: Lorenzo Stoakes <lorenzo.stoa...@oracle.com>
Reviewed-by: Dan Williams <dan.j.willi...@intel.com>
Tested-by: Dan Williams <dan.j.willi...@intel.com>
Signed-off-by: David Hildenbrand <da...@redhat.com>
---
 mm/huge_memory.c | 59 ++++++++++++++++++++++++++++++++----------------
 1 file changed, 40 insertions(+), 19 deletions(-)

diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 49b98082c5401..d1e3e253c714a 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1372,9 +1372,17 @@ vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault 
*vmf)
        return __do_huge_pmd_anonymous_page(vmf);
 }
 
-static int insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
-               pmd_t *pmd, pfn_t pfn, pgprot_t prot, bool write,
-               pgtable_t pgtable)
+struct folio_or_pfn {
+       union {
+               struct folio *folio;
+               pfn_t pfn;
+       };
+       bool is_folio;
+};
+
+static int insert_pmd(struct vm_area_struct *vma, unsigned long addr,
+               pmd_t *pmd, struct folio_or_pfn fop, pgprot_t prot,
+               bool write, pgtable_t pgtable)
 {
        struct mm_struct *mm = vma->vm_mm;
        pmd_t entry;
@@ -1382,8 +1390,11 @@ static int insert_pfn_pmd(struct vm_area_struct *vma, 
unsigned long addr,
        lockdep_assert_held(pmd_lockptr(mm, pmd));
 
        if (!pmd_none(*pmd)) {
+               const unsigned long pfn = fop.is_folio ? folio_pfn(fop.folio) :
+                                         pfn_t_to_pfn(fop.pfn);
+
                if (write) {
-                       if (pmd_pfn(*pmd) != pfn_t_to_pfn(pfn)) {
+                       if (pmd_pfn(*pmd) != pfn) {
                                WARN_ON_ONCE(!is_huge_zero_pmd(*pmd));
                                return -EEXIST;
                        }
@@ -1396,11 +1407,20 @@ static int insert_pfn_pmd(struct vm_area_struct *vma, 
unsigned long addr,
                return -EEXIST;
        }
 
-       entry = pmd_mkhuge(pfn_t_pmd(pfn, prot));
-       if (pfn_t_devmap(pfn))
-               entry = pmd_mkdevmap(entry);
-       else
-               entry = pmd_mkspecial(entry);
+       if (fop.is_folio) {
+               entry = folio_mk_pmd(fop.folio, vma->vm_page_prot);
+
+               folio_get(fop.folio);
+               folio_add_file_rmap_pmd(fop.folio, &fop.folio->page, vma);
+               add_mm_counter(mm, mm_counter_file(fop.folio), HPAGE_PMD_NR);
+       } else {
+               entry = pmd_mkhuge(pfn_t_pmd(fop.pfn, prot));
+
+               if (pfn_t_devmap(fop.pfn))
+                       entry = pmd_mkdevmap(entry);
+               else
+                       entry = pmd_mkspecial(entry);
+       }
        if (write) {
                entry = pmd_mkyoung(pmd_mkdirty(entry));
                entry = maybe_pmd_mkwrite(entry, vma);
@@ -1431,6 +1451,9 @@ vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t 
pfn, bool write)
        unsigned long addr = vmf->address & PMD_MASK;
        struct vm_area_struct *vma = vmf->vma;
        pgprot_t pgprot = vma->vm_page_prot;
+       struct folio_or_pfn fop = {
+               .pfn = pfn,
+       };
        pgtable_t pgtable = NULL;
        spinlock_t *ptl;
        int error;
@@ -1458,8 +1481,8 @@ vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t 
pfn, bool write)
        pfnmap_setup_cachemode_pfn(pfn_t_to_pfn(pfn), &pgprot);
 
        ptl = pmd_lock(vma->vm_mm, vmf->pmd);
-       error = insert_pfn_pmd(vma, addr, vmf->pmd, pfn, pgprot, write,
-                       pgtable);
+       error = insert_pmd(vma, addr, vmf->pmd, fop, pgprot, write,
+                          pgtable);
        spin_unlock(ptl);
        if (error && pgtable)
                pte_free(vma->vm_mm, pgtable);
@@ -1474,6 +1497,10 @@ vm_fault_t vmf_insert_folio_pmd(struct vm_fault *vmf, 
struct folio *folio,
        struct vm_area_struct *vma = vmf->vma;
        unsigned long addr = vmf->address & PMD_MASK;
        struct mm_struct *mm = vma->vm_mm;
+       struct folio_or_pfn fop = {
+               .folio = folio,
+               .is_folio = true,
+       };
        spinlock_t *ptl;
        pgtable_t pgtable = NULL;
        int error;
@@ -1491,14 +1518,8 @@ vm_fault_t vmf_insert_folio_pmd(struct vm_fault *vmf, 
struct folio *folio,
        }
 
        ptl = pmd_lock(mm, vmf->pmd);
-       if (pmd_none(*vmf->pmd)) {
-               folio_get(folio);
-               folio_add_file_rmap_pmd(folio, &folio->page, vma);
-               add_mm_counter(mm, mm_counter_file(folio), HPAGE_PMD_NR);
-       }
-       error = insert_pfn_pmd(vma, addr, vmf->pmd,
-                       pfn_to_pfn_t(folio_pfn(folio)), vma->vm_page_prot,
-                       write, pgtable);
+       error = insert_pmd(vma, addr, vmf->pmd, fop, vma->vm_page_prot,
+                          write, pgtable);
        spin_unlock(ptl);
        if (error && pgtable)
                pte_free(mm, pgtable);
-- 
2.49.0


Reply via email to