David Hildenbrand wrote:
> Based on Linus' master.
> 
> While working on improving vm_normal_page() and friends, I stumbled
> over this issues: refcounted "normal" pages must not be marked
> using pmd_special() / pud_special().
> 
> Fortunately, so far there doesn't seem to be serious damage.
> 
> This is only compile-tested so far. Still looking for an easy way to test
> PMD/PUD mappings with DAX. Any tests I can easily run?

The way I test this I would not classify as "easy", it is a bit of a pain
to setup, but it is passing here:

[root@host ndctl]# meson test -C build --suite ndctl:dax
ninja: Entering directory `/root/git/ndctl/build'
[168/168] Linking target cxl/cxl
 1/13 ndctl:dax / daxdev-errors.sh          OK              14.30s
 2/13 ndctl:dax / multi-dax.sh              OK               2.89s
 3/13 ndctl:dax / sub-section.sh            OK               8.40s
 4/13 ndctl:dax / dax-dev                   OK               0.06s
 5/13 ndctl:dax / dax-ext4.sh               OK              20.53s
 6/13 ndctl:dax / dax-xfs.sh                OK              20.34s
 7/13 ndctl:dax / device-dax                OK              11.67s
 8/13 ndctl:dax / revoke-devmem             OK               0.25s
 9/13 ndctl:dax / device-dax-fio.sh         OK              34.02s
10/13 ndctl:dax / daxctl-devices.sh         OK               3.44s
11/13 ndctl:dax / daxctl-create.sh          SKIP             0.32s   exit 
status 77
12/13 ndctl:dax / dm.sh                     OK               1.33s
13/13 ndctl:dax / mmap.sh                   OK              85.12s

...ignore the SKIP, that seems to be caused by an acpi-einj regression.

However, how about not duplicating the internals of insert_pfn_p[mu]d()
with something like the below. Either way you can add:

Tested-by: Dan Williams <dan.j.willi...@intel.com>
Reviewed-by: Dan Williams <dan.j.willi...@intel.com>

base-commit: a9dfb7db96f7bc1f30feae673aab7fdbfbc94e9c

-- 8< --
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index d3e66136e41a..cce4456aa62b 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1372,9 +1372,9 @@ vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault 
*vmf)
        return __do_huge_pmd_anonymous_page(vmf);
 }
 
-static int insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
-               pmd_t *pmd, pfn_t pfn, pgprot_t prot, bool write,
-               pgtable_t pgtable)
+static int insert_pmd(struct vm_area_struct *vma, unsigned long addr,
+                      pmd_t *pmd, pfn_t pfn, struct folio *folio,
+                      pgprot_t prot, bool write, pgtable_t pgtable)
 {
        struct mm_struct *mm = vma->vm_mm;
        pmd_t entry;
@@ -1397,9 +1397,7 @@ static int insert_pfn_pmd(struct vm_area_struct *vma, 
unsigned long addr,
        }
 
        entry = pmd_mkhuge(pfn_t_pmd(pfn, prot));
-       if (pfn_t_devmap(pfn))
-               entry = pmd_mkdevmap(entry);
-       else
+       if (!folio)
                entry = pmd_mkspecial(entry);
        if (write) {
                entry = pmd_mkyoung(pmd_mkdirty(entry));
@@ -1458,8 +1456,8 @@ vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t 
pfn, bool write)
        pfnmap_setup_cachemode_pfn(pfn_t_to_pfn(pfn), &pgprot);
 
        ptl = pmd_lock(vma->vm_mm, vmf->pmd);
-       error = insert_pfn_pmd(vma, addr, vmf->pmd, pfn, pgprot, write,
-                       pgtable);
+       error = insert_pmd(vma, addr, vmf->pmd, pfn, NULL, pgprot, write,
+                          pgtable);
        spin_unlock(ptl);
        if (error && pgtable)
                pte_free(vma->vm_mm, pgtable);
@@ -1496,9 +1494,8 @@ vm_fault_t vmf_insert_folio_pmd(struct vm_fault *vmf, 
struct folio *folio,
                folio_add_file_rmap_pmd(folio, &folio->page, vma);
                add_mm_counter(mm, mm_counter_file(folio), HPAGE_PMD_NR);
        }
-       error = insert_pfn_pmd(vma, addr, vmf->pmd,
-                       pfn_to_pfn_t(folio_pfn(folio)), vma->vm_page_prot,
-                       write, pgtable);
+       error = insert_pmd(vma, addr, vmf->pmd, pfn_to_pfn_t(folio_pfn(folio)),
+                          folio, vma->vm_page_prot, write, pgtable);
        spin_unlock(ptl);
        if (error && pgtable)
                pte_free(mm, pgtable);
@@ -1515,8 +1512,8 @@ static pud_t maybe_pud_mkwrite(pud_t pud, struct 
vm_area_struct *vma)
        return pud;
 }
 
-static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
-               pud_t *pud, pfn_t pfn, bool write)
+static void insert_pud(struct vm_area_struct *vma, unsigned long addr,
+                      pud_t *pud, pfn_t pfn, struct folio *folio, bool write)
 {
        struct mm_struct *mm = vma->vm_mm;
        pgprot_t prot = vma->vm_page_prot;
@@ -1535,9 +1532,7 @@ static void insert_pfn_pud(struct vm_area_struct *vma, 
unsigned long addr,
        }
 
        entry = pud_mkhuge(pfn_t_pud(pfn, prot));
-       if (pfn_t_devmap(pfn))
-               entry = pud_mkdevmap(entry);
-       else
+       if (!folio)
                entry = pud_mkspecial(entry);
        if (write) {
                entry = pud_mkyoung(pud_mkdirty(entry));
@@ -1581,7 +1576,7 @@ vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t 
pfn, bool write)
        pfnmap_setup_cachemode_pfn(pfn_t_to_pfn(pfn), &pgprot);
 
        ptl = pud_lock(vma->vm_mm, vmf->pud);
-       insert_pfn_pud(vma, addr, vmf->pud, pfn, write);
+       insert_pud(vma, addr, vmf->pud, pfn, NULL, write);
        spin_unlock(ptl);
 
        return VM_FAULT_NOPAGE;
@@ -1616,7 +1611,7 @@ vm_fault_t vmf_insert_folio_pud(struct vm_fault *vmf, 
struct folio *folio,
        /*
         * If there is already an entry present we assume the folio is
         * already mapped, hence no need to take another reference. We
-        * still call insert_pfn_pud() though in case the mapping needs
+        * still call insert_pud() though in case the mapping needs
         * upgrading to writeable.
         */
        if (pud_none(*vmf->pud)) {
@@ -1624,8 +1619,8 @@ vm_fault_t vmf_insert_folio_pud(struct vm_fault *vmf, 
struct folio *folio,
                folio_add_file_rmap_pud(folio, &folio->page, vma);
                add_mm_counter(mm, mm_counter_file(folio), HPAGE_PUD_NR);
        }
-       insert_pfn_pud(vma, addr, vmf->pud, pfn_to_pfn_t(folio_pfn(folio)),
-               write);
+       insert_pud(vma, addr, vmf->pud, pfn_to_pfn_t(folio_pfn(folio)), folio,
+                  write);
        spin_unlock(ptl);
 
        return VM_FAULT_NOPAGE;

Reply via email to