From: Yulei Zhang <yuleixzh...@tencent.com>

While follow_pmd_mask(), dmem huge pmd should be recognized and return
error pointer of '-EEXIST' to indicate that proper page table entry exists
in pmd special but no corresponding struct page, because dmem page means
non struct page backend. We update pmd if foll_flags takes FOLL_TOUCH.

Signed-off-by: Chen Zhuo <sagazc...@tencent.com>
Signed-off-by: Yulei Zhang <yuleixzh...@tencent.com>
---
 mm/gup.c | 42 ++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 42 insertions(+)

diff --git a/mm/gup.c b/mm/gup.c
index e5739a1974d5..726ffc5b0ea9 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -380,6 +380,42 @@ static int follow_pfn_pte(struct vm_area_struct *vma, 
unsigned long address,
        return -EEXIST;
 }
 
+static struct page *
+follow_special_pmd(struct vm_area_struct *vma, unsigned long address,
+                  pmd_t *pmd, unsigned int flags)
+{
+       spinlock_t *ptl;
+
+       if (flags & FOLL_DUMP)
+               /* Avoid special (like zero) pages in core dumps */
+               return ERR_PTR(-EFAULT);
+
+       /* No page to get reference */
+       if (flags & FOLL_GET)
+               return ERR_PTR(-EFAULT);
+
+       if (flags & FOLL_TOUCH) {
+               pmd_t _pmd;
+
+               ptl = pmd_lock(vma->vm_mm, pmd);
+               if (!pmd_special(*pmd)) {
+                       spin_unlock(ptl);
+                       return NULL;
+               }
+               _pmd = pmd_mkyoung(*pmd);
+               if (flags & FOLL_WRITE)
+                       _pmd = pmd_mkdirty(_pmd);
+               if (pmdp_set_access_flags(vma, address & HPAGE_PMD_MASK,
+                                         pmd, _pmd,
+                                         flags & FOLL_WRITE))
+                       update_mmu_cache_pmd(vma, address, pmd);
+               spin_unlock(ptl);
+       }
+
+       /* Proper page table entry exists, but no corresponding struct page */
+       return ERR_PTR(-EEXIST);
+}
+
 /*
  * FOLL_FORCE can write to even unwritable pte's, but only
  * after we've gone through a COW cycle and they are dirty.
@@ -564,6 +600,12 @@ static struct page *follow_pmd_mask(struct vm_area_struct 
*vma,
                        return page;
                return no_page_table(vma, flags);
        }
+       if (pmd_special(*pmd)) {
+               page = follow_special_pmd(vma, address, pmd, flags);
+               if (page)
+                       return page;
+               return no_page_table(vma, flags);
+       }
        if (is_hugepd(__hugepd(pmd_val(pmdval)))) {
                page = follow_huge_pd(vma, address,
                                      __hugepd(pmd_val(pmdval)), flags,
-- 
2.28.0

Reply via email to