On creation and clearing of a page table mapping, instrument such calls
by invoking page_table_check_pte_set and page_table_check_pte_clear
respectively. These calls serve as a sanity check against illegal
mappings.

Enable ARCH_SUPPORTS_PAGE_TABLE_CHECK for all ppc64, and 32-bit
platforms implementing Book3S.

Change pud_pfn to be a runtime bug rather than a build bug as it is
consumed by page_table_check_pud_{clear,set} which are not called.

See also:

riscv support in commit 3fee229a8eb9 ("riscv/mm: enable 
ARCH_SUPPORTS_PAGE_TABLE_CHECK")
arm64 in commit 42b2547137f5 ("arm64/mm: enable ARCH_SUPPORTS_PAGE_TABLE_CHECK")
x86_64 in commit d283d422c6c4 ("x86: mm: add x86_64 support for page table
    check")

Signed-off-by: Rohan McLure <rmcl...@linux.ibm.com>
---
 arch/powerpc/Kconfig                         | 1 +
 arch/powerpc/include/asm/book3s/32/pgtable.h | 7 ++++++-
 arch/powerpc/include/asm/book3s/64/pgtable.h | 9 ++++++++-
 arch/powerpc/include/asm/nohash/32/pgtable.h | 5 ++++-
 arch/powerpc/include/asm/nohash/64/pgtable.h | 2 ++
 arch/powerpc/include/asm/nohash/pgtable.h    | 1 +
 arch/powerpc/include/asm/pgtable.h           | 4 ++++
 7 files changed, 26 insertions(+), 3 deletions(-)

diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 4c466acdc70d..6c213ac46a92 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -149,6 +149,7 @@ config PPC
        select ARCH_STACKWALK
        select ARCH_SUPPORTS_ATOMIC_RMW
        select ARCH_SUPPORTS_DEBUG_PAGEALLOC    if PPC_BOOK3S || PPC_8xx || 40x
+       select ARCH_SUPPORTS_PAGE_TABLE_CHECK
        select ARCH_USE_BUILTIN_BSWAP
        select ARCH_USE_CMPXCHG_LOCKREF         if PPC64
        select ARCH_USE_MEMTEST
diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h 
b/arch/powerpc/include/asm/book3s/32/pgtable.h
index 40041ac713d9..3d05c8fe4604 100644
--- a/arch/powerpc/include/asm/book3s/32/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/32/pgtable.h
@@ -53,6 +53,8 @@
 
 #ifndef __ASSEMBLY__
 
+#include <linux/page_table_check.h>
+
 static inline bool pte_user(pte_t pte)
 {
        return pte_val(pte) & _PAGE_USER;
@@ -353,7 +355,9 @@ static inline int __ptep_test_and_clear_young(struct 
mm_struct *mm,
 static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long 
addr,
                                       pte_t *ptep)
 {
-       return __pte(pte_update(mm, addr, ptep, ~_PAGE_HASHPTE, 0, 0));
+       unsigned long old = pte_update(mm, addr, ptep, ~_PAGE_HASHPTE, 0, 0);
+       page_table_check_pte_clear(mm, addr, __pte(old));
+       return __pte(old);
 }
 
 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
@@ -541,6 +545,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
                                pte_t *ptep, pte_t pte, int percpu)
 {
+       page_table_check_pte_set(mm, addr, ptep, pte);
 #if defined(CONFIG_SMP) && !defined(CONFIG_PTE_64BIT)
        /* First case is 32-bit Hash MMU in SMP mode with 32-bit PTEs. We use 
the
         * helper pte_update() which does an atomic update. We need to do that
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h 
b/arch/powerpc/include/asm/book3s/64/pgtable.h
index 8874f2a3661d..cbb7bd99c897 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -179,6 +179,8 @@
 #define PAGE_AGP               (PAGE_KERNEL_NC)
 
 #ifndef __ASSEMBLY__
+#include <linux/page_table_check.h>
+
 /*
  * page table defines
  */
@@ -483,6 +485,7 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
                                       unsigned long addr, pte_t *ptep)
 {
        unsigned long old = pte_update(mm, addr, ptep, ~0UL, 0, 0);
+       page_table_check_pte_clear(mm, addr, __pte(old));
        return __pte(old);
 }
 
@@ -491,12 +494,15 @@ static inline pte_t ptep_get_and_clear_full(struct 
mm_struct *mm,
                                            unsigned long addr,
                                            pte_t *ptep, int full)
 {
+       pte_t old_pte;
        if (full && radix_enabled()) {
                /*
                 * We know that this is a full mm pte clear and
                 * hence can be sure there is no parallel set_pte.
                 */
-               return radix__ptep_get_and_clear_full(mm, addr, ptep, full);
+               old_pte = radix__ptep_get_and_clear_full(mm, addr, ptep, full);
+               page_table_check_pte_clear(mm, addr, old_pte);
+               return old_pte;
        }
        return ptep_get_and_clear(mm, addr, ptep);
 }
@@ -872,6 +878,7 @@ static inline void __set_pte_at(struct mm_struct *mm, 
unsigned long addr,
         */
        pte = __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_PTE));
 
+       page_table_check_pte_set(mm, addr, ptep, pte);
        if (radix_enabled())
                return radix__set_pte_at(mm, addr, ptep, pte, percpu);
        return hash__set_pte_at(mm, addr, ptep, pte, percpu);
diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h 
b/arch/powerpc/include/asm/nohash/32/pgtable.h
index 9091e4904a6b..a3416cfb75d7 100644
--- a/arch/powerpc/include/asm/nohash/32/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/32/pgtable.h
@@ -166,6 +166,7 @@ void unmap_kernel_page(unsigned long va);
 #define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_DIRTY | _PAGE_ACCESSED | 
_PAGE_SPECIAL)
 
 #ifndef __ASSEMBLY__
+#include <linux/page_table_check.h>
 
 #define pte_clear(mm, addr, ptep) \
        do { pte_update(mm, addr, ptep, ~0, 0, 0); } while (0)
@@ -305,7 +306,9 @@ static inline int __ptep_test_and_clear_young(struct 
mm_struct *mm,
 static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long 
addr,
                                       pte_t *ptep)
 {
-       return __pte(pte_update(mm, addr, ptep, ~0, 0, 0));
+       unsigned long old = pte_update(mm, addr, ptep, ~0, 0, 0);
+       page_table_check_pte_clear(mm, addr, __pte(old));
+       return __pte(old);
 }
 
 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
diff --git a/arch/powerpc/include/asm/nohash/64/pgtable.h 
b/arch/powerpc/include/asm/nohash/64/pgtable.h
index 599921cc257e..5f2e10842a0d 100644
--- a/arch/powerpc/include/asm/nohash/64/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/64/pgtable.h
@@ -83,6 +83,7 @@
 #define H_PAGE_4K_PFN 0
 
 #ifndef __ASSEMBLY__
+#include <linux/page_table_check.h>
 /* pte_clear moved to later in this file */
 
 static inline pte_t pte_mkwrite(pte_t pte)
@@ -244,6 +245,7 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
                                       unsigned long addr, pte_t *ptep)
 {
        unsigned long old = pte_update(mm, addr, ptep, ~0UL, 0, 0);
+       page_table_check_pte_clear(mm, addr, __pte(old));
        return __pte(old);
 }
 
diff --git a/arch/powerpc/include/asm/nohash/pgtable.h 
b/arch/powerpc/include/asm/nohash/pgtable.h
index b499da6c1a99..62b221b7cccf 100644
--- a/arch/powerpc/include/asm/nohash/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/pgtable.h
@@ -185,6 +185,7 @@ extern void set_pte_at(struct mm_struct *mm, unsigned long 
addr, pte_t *ptep,
 static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
                                pte_t *ptep, pte_t pte, int percpu)
 {
+       page_table_check_pte_set(mm, addr, ptep, pte);
        /* Second case is 32-bit with 64-bit PTE.  In this case, we
         * can just store as long as we do the two halves in the right order
         * with a barrier in between.
diff --git a/arch/powerpc/include/asm/pgtable.h 
b/arch/powerpc/include/asm/pgtable.h
index 8c1f5feb9360..5e1032d12499 100644
--- a/arch/powerpc/include/asm/pgtable.h
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -166,7 +166,11 @@ static inline int pud_pfn(pud_t pud)
         * check so this should never be used. If it grows another user we
         * want to know about it.
         */
+#ifndef CONFIG_PAGE_TABLE_CHECK
        BUILD_BUG();
+#else
+       BUG();
+#endif
        return 0;
 }
 
-- 
2.34.1

Reply via email to