From: Thomas Gleixner <[email protected]>

PAGE_NX and PAGE_GLOBAL might not be supported, or migth be disabled
on the command line, but Kaiser sets them unconditionally.

Add proper protection against that.

Signed-off-by: Thomas Gleixner <[email protected]>
Cc: Andy Lutomirski <[email protected]>
Cc: Borislav Petkov <[email protected]>
Cc: Brian Gerst <[email protected]>
Cc: Dave Hansen <[email protected]>
Cc: Denys Vlasenko <[email protected]>
Cc: Josh Poimboeuf <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Rik van Riel <[email protected]>
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Ingo Molnar <[email protected]>
---
 arch/x86/include/asm/pgtable_64.h |  3 ++-
 arch/x86/mm/kaiser.c              | 12 +++++++++++-
 2 files changed, 13 insertions(+), 2 deletions(-)

diff --git a/arch/x86/include/asm/pgtable_64.h 
b/arch/x86/include/asm/pgtable_64.h
index 0c6e14f1e274..1c9f1f803ad8 100644
--- a/arch/x86/include/asm/pgtable_64.h
+++ b/arch/x86/include/asm/pgtable_64.h
@@ -222,7 +222,8 @@ static inline pgd_t kaiser_set_shadow_pgd(pgd_t *pgdp, 
pgd_t pgd)
                         * wrong CR3 value, userspace will crash
                         * instead of running.
                         */
-                       pgd.pgd |= _PAGE_NX;
+                       if (__supported_pte_mask & _PAGE_NX)
+                               pgd.pgd |= _PAGE_NX;
                }
        } else if (pgd_userspace_access(*pgdp)) {
                /*
diff --git a/arch/x86/mm/kaiser.c b/arch/x86/mm/kaiser.c
index 10c0e9e6bf6b..6e3c5da5f7e8 100644
--- a/arch/x86/mm/kaiser.c
+++ b/arch/x86/mm/kaiser.c
@@ -42,6 +42,8 @@
 
 #define KAISER_WALK_ATOMIC  0x1
 
+static pteval_t kaiser_pte_mask __ro_after_init = ~(_PAGE_NX | _PAGE_GLOBAL);
+
 /*
  * At runtime, the only things we map are some things for CPU
  * hotplug, and stacks for new processes.  No two CPUs will ever
@@ -244,11 +246,14 @@ static pte_t *kaiser_shadow_pagetable_walk(unsigned long 
address,
 int kaiser_add_user_map(const void *__start_addr, unsigned long size,
                        unsigned long flags)
 {
-       pte_t *pte;
        unsigned long start_addr = (unsigned long)__start_addr;
        unsigned long address = start_addr & PAGE_MASK;
        unsigned long end_addr = PAGE_ALIGN(start_addr + size);
        unsigned long target_address;
+       pte_t *pte;
+
+       /* Clear not supported bits */
+       flags &= kaiser_pte_mask;
 
        for (; address < end_addr; address += PAGE_SIZE) {
                target_address = get_pa_from_kernel_map(address);
@@ -308,6 +313,11 @@ static void __init kaiser_init_all_pgds(void)
        pgd_t *pgd;
        int i;
 
+       if (__supported_pte_mask & _PAGE_NX)
+               kaiser_pte_mask |= _PAGE_NX;
+       if (boot_cpu_has(X86_FEATURE_PGE))
+               kaiser_pte_mask |= _PAGE_GLOBAL;
+
        pgd = kernel_to_shadow_pgdp(pgd_offset_k(0UL));
        for (i = PTRS_PER_PGD / 2; i < PTRS_PER_PGD; i++) {
                /*
-- 
2.14.1

Reply via email to