From: Thomas Gleixner <t...@linutronix.de>

PAGE_NX and PAGE_GLOBAL might not be supported, or migth be disabled
on the command line, but Kaiser sets them unconditionally.

Add proper protection against that.

Signed-off-by: Thomas Gleixner <t...@linutronix.de>
Cc: Andy Lutomirski <l...@kernel.org>
Cc: Borislav Petkov <b...@alien8.de>
Cc: Brian Gerst <brge...@gmail.com>
Cc: Dave Hansen <dave.han...@linux.intel.com>
Cc: Denys Vlasenko <dvlas...@redhat.com>
Cc: Josh Poimboeuf <jpoim...@redhat.com>
Cc: Linus Torvalds <torva...@linux-foundation.org>
Cc: Peter Zijlstra <pet...@infradead.org>
Cc: Rik van Riel <r...@redhat.com>
Cc: daniel.gr...@iaik.tugraz.at
Cc: hu...@google.com
Cc: keesc...@google.com
Cc: linux...@kvack.org
Cc: michael.schw...@iaik.tugraz.at
Cc: moritz.l...@iaik.tugraz.at
Cc: richard.fell...@student.tugraz.at
Link: http://lkml.kernel.org/r/20171126232414.313869...@linutronix.de
Signed-off-by: Ingo Molnar <mi...@kernel.org>
---
 arch/x86/include/asm/pgtable_64.h |  3 ++-
 arch/x86/mm/kaiser.c              | 12 +++++++++++-
 2 files changed, 13 insertions(+), 2 deletions(-)

diff --git a/arch/x86/include/asm/pgtable_64.h 
b/arch/x86/include/asm/pgtable_64.h
index 0c6e14f1e274..1c9f1f803ad8 100644
--- a/arch/x86/include/asm/pgtable_64.h
+++ b/arch/x86/include/asm/pgtable_64.h
@@ -222,7 +222,8 @@ static inline pgd_t kaiser_set_shadow_pgd(pgd_t *pgdp, 
pgd_t pgd)
                         * wrong CR3 value, userspace will crash
                         * instead of running.
                         */
-                       pgd.pgd |= _PAGE_NX;
+                       if (__supported_pte_mask & _PAGE_NX)
+                               pgd.pgd |= _PAGE_NX;
                }
        } else if (pgd_userspace_access(*pgdp)) {
                /*
diff --git a/arch/x86/mm/kaiser.c b/arch/x86/mm/kaiser.c
index 10c0e9e6bf6b..6e3c5da5f7e8 100644
--- a/arch/x86/mm/kaiser.c
+++ b/arch/x86/mm/kaiser.c
@@ -42,6 +42,8 @@
 
 #define KAISER_WALK_ATOMIC  0x1
 
+static pteval_t kaiser_pte_mask __ro_after_init = ~(_PAGE_NX | _PAGE_GLOBAL);
+
 /*
  * At runtime, the only things we map are some things for CPU
  * hotplug, and stacks for new processes.  No two CPUs will ever
@@ -244,11 +246,14 @@ static pte_t *kaiser_shadow_pagetable_walk(unsigned long 
address,
 int kaiser_add_user_map(const void *__start_addr, unsigned long size,
                        unsigned long flags)
 {
-       pte_t *pte;
        unsigned long start_addr = (unsigned long)__start_addr;
        unsigned long address = start_addr & PAGE_MASK;
        unsigned long end_addr = PAGE_ALIGN(start_addr + size);
        unsigned long target_address;
+       pte_t *pte;
+
+       /* Clear not supported bits */
+       flags &= kaiser_pte_mask;
 
        for (; address < end_addr; address += PAGE_SIZE) {
                target_address = get_pa_from_kernel_map(address);
@@ -308,6 +313,11 @@ static void __init kaiser_init_all_pgds(void)
        pgd_t *pgd;
        int i;
 
+       if (__supported_pte_mask & _PAGE_NX)
+               kaiser_pte_mask |= _PAGE_NX;
+       if (boot_cpu_has(X86_FEATURE_PGE))
+               kaiser_pte_mask |= _PAGE_GLOBAL;
+
        pgd = kernel_to_shadow_pgdp(pgd_offset_k(0UL));
        for (i = PTRS_PER_PGD / 2; i < PTRS_PER_PGD; i++) {
                /*
-- 
2.14.1

Reply via email to