This is preparation patch for transition of x86 to generic GUP_fast()
implementation.

On x86 PAE, page table entry is larger than sizeof(long) and we would
need to provide helper that can read the entry atomically.

Signed-off-by: Kirill A. Shutemov <kirill.shute...@linux.intel.com>
---
 mm/gup.c | 20 ++++++++++++--------
 1 file changed, 12 insertions(+), 8 deletions(-)

diff --git a/mm/gup.c b/mm/gup.c
index a62a778ce4ec..ed2259dc4606 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -1189,6 +1189,17 @@ struct page *get_dump_page(unsigned long addr)
  */
 #ifdef CONFIG_HAVE_GENERIC_RCU_GUP
 
+#ifndef gup_get_pte
+/*
+ * We assume that the pte can be read atomically. If this is not the case for
+ * your architecture, please provide the helper.
+ */
+static inline pte_t gup_get_pte(pte_t *ptep)
+{
+       return READ_ONCE(*ptep);
+}
+#endif
+
 #ifdef __HAVE_ARCH_PTE_SPECIAL
 static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
                         int write, struct page **pages, int *nr)
@@ -1198,14 +1209,7 @@ static int gup_pte_range(pmd_t pmd, unsigned long addr, 
unsigned long end,
 
        ptem = ptep = pte_offset_map(&pmd, addr);
        do {
-               /*
-                * In the line below we are assuming that the pte can be read
-                * atomically. If this is not the case for your architecture,
-                * please wrap this in a helper function!
-                *
-                * for an example see gup_get_pte in arch/x86/mm/gup.c
-                */
-               pte_t pte = READ_ONCE(*ptep);
+               pte_t pte = gup_get_pte(ptep);
                struct page *head, *page;
 
                /*
-- 
2.11.0

Reply via email to