... and hide it behind a kconfig option. There is really no need for
any !xen code to perform this check.

The naming is a bit off: we want to find the "normal" page when a PTE
was marked "special". So it's really not "finding a special" page.

Improve the documentation, and add a comment in the code where XEN ends
up performing the pte_mkspecial() through a hypercall. More details can
be found in commit 923b2919e2c3 ("xen/gntdev: mark userspace PTEs as
special on x86 PV guests").

Cc: David Vrabel <david.vra...@citrix.com>
Signed-off-by: David Hildenbrand <da...@redhat.com>
---
 drivers/xen/Kconfig              |  1 +
 drivers/xen/gntdev.c             |  5 +++--
 include/linux/mm.h               | 18 +++++++++++++-----
 mm/Kconfig                       |  2 ++
 mm/memory.c                      | 10 ++++++++--
 tools/testing/vma/vma_internal.h | 18 +++++++++++++-----
 6 files changed, 40 insertions(+), 14 deletions(-)

diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index 24f485827e039..f9a35ed266ecf 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -138,6 +138,7 @@ config XEN_GNTDEV
        depends on XEN
        default m
        select MMU_NOTIFIER
+       select FIND_NORMAL_PAGE
        help
          Allows userspace processes to use grants.
 
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index 61faea1f06630..d1bc0dae2cdf9 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -309,6 +309,7 @@ static int find_grant_ptes(pte_t *pte, unsigned long addr, 
void *data)
        BUG_ON(pgnr >= map->count);
        pte_maddr = arbitrary_virt_to_machine(pte).maddr;
 
+       /* Note: this will perform a pte_mkspecial() through the hypercall. */
        gnttab_set_map_op(&map->map_ops[pgnr], pte_maddr, flags,
                          map->grants[pgnr].ref,
                          map->grants[pgnr].domid);
@@ -516,7 +517,7 @@ static void gntdev_vma_close(struct vm_area_struct *vma)
        gntdev_put_map(priv, map);
 }
 
-static struct page *gntdev_vma_find_special_page(struct vm_area_struct *vma,
+static struct page *gntdev_vma_find_normal_page(struct vm_area_struct *vma,
                                                 unsigned long addr)
 {
        struct gntdev_grant_map *map = vma->vm_private_data;
@@ -527,7 +528,7 @@ static struct page *gntdev_vma_find_special_page(struct 
vm_area_struct *vma,
 static const struct vm_operations_struct gntdev_vmops = {
        .open = gntdev_vma_open,
        .close = gntdev_vma_close,
-       .find_special_page = gntdev_vma_find_special_page,
+       .find_normal_page = gntdev_vma_find_normal_page,
 };
 
 /* ------------------------------------------------------------------ */
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 022e8ef2c78ef..b01475f3dca99 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -646,13 +646,21 @@ struct vm_operations_struct {
        struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
                                        unsigned long addr, pgoff_t *ilx);
 #endif
+#ifdef CONFIG_FIND_NORMAL_PAGE
        /*
-        * Called by vm_normal_page() for special PTEs to find the
-        * page for @addr.  This is useful if the default behavior
-        * (using pte_page()) would not find the correct page.
+        * Called by vm_normal_page() for special PTEs in @vma at @addr. This
+        * allows for returning a "normal" page from vm_normal_page() even
+        * though the PTE indicates that the "struct page" either does not exist
+        * or should not be touched: "special".
+        *
+        * Do not add new users: this really only works when a "normal" page
+        * was mapped, but then the PTE got changed to something weird (+
+        * marked special) that would not make pte_pfn() identify the originally
+        * inserted page.
         */
-       struct page *(*find_special_page)(struct vm_area_struct *vma,
-                                         unsigned long addr);
+       struct page *(*find_normal_page)(struct vm_area_struct *vma,
+                                        unsigned long addr);
+#endif /* CONFIG_FIND_NORMAL_PAGE */
 };
 
 #ifdef CONFIG_NUMA_BALANCING
diff --git a/mm/Kconfig b/mm/Kconfig
index c6194d1f9d170..607a3f9672bdb 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -1390,6 +1390,8 @@ config PT_RECLAIM
 
          Note: now only empty user PTE page table pages will be reclaimed.
 
+config FIND_NORMAL_PAGE
+       def_bool n
 
 source "mm/damon/Kconfig"
 
diff --git a/mm/memory.c b/mm/memory.c
index 6c65f51248250..1eba95fcde096 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -619,6 +619,10 @@ static inline struct page *vm_normal_page_pfn(struct 
vm_area_struct *vma,
  * If an architecture does not support pte_special(), this function is less
  * trivial and more expensive in some cases.
  *
+ * With CONFIG_FIND_NORMAL_PAGE, we might have pte_special() set on PTEs that
+ * actually map "normal" pages: however, that page cannot be looked up through
+ * pte_pfn(), but instead will be looked up through vm_ops->find_normal_page().
+ *
  * A raw VM_PFNMAP mapping (ie. one that is not COWed) is always considered a
  * special mapping (even if there are underlying and valid "struct pages").
  * COWed pages of a VM_PFNMAP are always normal.
@@ -639,8 +643,10 @@ struct page *vm_normal_page(struct vm_area_struct *vma, 
unsigned long addr,
        unsigned long pfn = pte_pfn(pte);
 
        if (unlikely(pte_special(pte))) {
-               if (vma->vm_ops && vma->vm_ops->find_special_page)
-                       return vma->vm_ops->find_special_page(vma, addr);
+#ifdef CONFIG_FIND_NORMAL_PAGE
+               if (vma->vm_ops && vma->vm_ops->find_normal_page)
+                       return vma->vm_ops->find_normal_page(vma, addr);
+#endif /* CONFIG_FIND_NORMAL_PAGE */
                if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
                        return NULL;
                if (is_zero_pfn(pfn))
diff --git a/tools/testing/vma/vma_internal.h b/tools/testing/vma/vma_internal.h
index 51dd122b8d501..c5bf041036dd7 100644
--- a/tools/testing/vma/vma_internal.h
+++ b/tools/testing/vma/vma_internal.h
@@ -470,13 +470,21 @@ struct vm_operations_struct {
        struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
                                        unsigned long addr, pgoff_t *ilx);
 #endif
+#ifdef CONFIG_FIND_NORMAL_PAGE
        /*
-        * Called by vm_normal_page() for special PTEs to find the
-        * page for @addr.  This is useful if the default behavior
-        * (using pte_page()) would not find the correct page.
+        * Called by vm_normal_page() for special PTEs in @vma at @addr. This
+        * allows for returning a "normal" page from vm_normal_page() even
+        * though the PTE indicates that the "struct page" either does not exist
+        * or should not be touched: "special".
+        *
+        * Do not add new users: this really only works when a "normal" page
+        * was mapped, but then the PTE got changed to something weird (+
+        * marked special) that would not make pte_pfn() identify the originally
+        * inserted page.
         */
-       struct page *(*find_special_page)(struct vm_area_struct *vma,
-                                         unsigned long addr);
+       struct page *(*find_normal_page)(struct vm_area_struct *vma,
+                                        unsigned long addr);
+#endif /* CONFIG_FIND_NORMAL_PAGE */
 };
 
 struct vm_unmapped_area_info {
-- 
2.49.0


Reply via email to