Commit-ID:  e3238faf20fb1b51a814497751398ab525a2c884
Gitweb:     https://git.kernel.org/tip/e3238faf20fb1b51a814497751398ab525a2c884
Author:     Joerg Roedel <jroe...@suse.de>
AuthorDate: Wed, 18 Jul 2018 11:40:54 +0200
Committer:  Thomas Gleixner <t...@linutronix.de>
CommitDate: Fri, 20 Jul 2018 01:11:41 +0200

x86/pgtable/32: Allocate 8k page-tables when PTI is enabled

Allocate a kernel and a user page-table root when PTI is enabled. Also
allocate a full page per root for PAE because otherwise the bit to flip in
CR3 to switch between them would be non-constant, which creates a lot of
hassle.  Keep that for a later optimization.

Signed-off-by: Joerg Roedel <jroe...@suse.de>
Signed-off-by: Thomas Gleixner <t...@linutronix.de>
Tested-by: Pavel Machek <pa...@ucw.cz>
Cc: "H . Peter Anvin" <h...@zytor.com>
Cc: linux...@kvack.org
Cc: Linus Torvalds <torva...@linux-foundation.org>
Cc: Andy Lutomirski <l...@kernel.org>
Cc: Dave Hansen <dave.han...@intel.com>
Cc: Josh Poimboeuf <jpoim...@redhat.com>
Cc: Juergen Gross <jgr...@suse.com>
Cc: Peter Zijlstra <pet...@infradead.org>
Cc: Borislav Petkov <b...@alien8.de>
Cc: Jiri Kosina <jkos...@suse.cz>
Cc: Boris Ostrovsky <boris.ostrov...@oracle.com>
Cc: Brian Gerst <brge...@gmail.com>
Cc: David Laight <david.lai...@aculab.com>
Cc: Denys Vlasenko <dvlas...@redhat.com>
Cc: Eduardo Valentin <edu...@amazon.com>
Cc: Greg KH <gre...@linuxfoundation.org>
Cc: Will Deacon <will.dea...@arm.com>
Cc: aligu...@amazon.com
Cc: daniel.gr...@iaik.tugraz.at
Cc: hu...@google.com
Cc: keesc...@google.com
Cc: Andrea Arcangeli <aarca...@redhat.com>
Cc: Waiman Long <ll...@redhat.com>
Cc: "David H . Gutteridge" <dhgutteri...@sympatico.ca>
Cc: j...@8bytes.org
Link: 
https://lkml.kernel.org/r/1531906876-13451-18-git-send-email-j...@8bytes.org

---
 arch/x86/kernel/head_32.S | 20 +++++++++++++++-----
 arch/x86/mm/pgtable.c     |  5 +++--
 2 files changed, 18 insertions(+), 7 deletions(-)

diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index abe6df15a8fb..30f9cb2c0b55 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -512,11 +512,18 @@ ENTRY(initial_code)
 ENTRY(setup_once_ref)
        .long setup_once
 
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+#define        PGD_ALIGN       (2 * PAGE_SIZE)
+#define PTI_USER_PGD_FILL      1024
+#else
+#define        PGD_ALIGN       (PAGE_SIZE)
+#define PTI_USER_PGD_FILL      0
+#endif
 /*
  * BSS section
  */
 __PAGE_ALIGNED_BSS
-       .align PAGE_SIZE
+       .align PGD_ALIGN
 #ifdef CONFIG_X86_PAE
 .globl initial_pg_pmd
 initial_pg_pmd:
@@ -526,14 +533,17 @@ initial_pg_pmd:
 initial_page_table:
        .fill 1024,4,0
 #endif
+       .align PGD_ALIGN
 initial_pg_fixmap:
        .fill 1024,4,0
-.globl empty_zero_page
-empty_zero_page:
-       .fill 4096,1,0
 .globl swapper_pg_dir
+       .align PGD_ALIGN
 swapper_pg_dir:
        .fill 1024,4,0
+       .fill PTI_USER_PGD_FILL,4,0
+.globl empty_zero_page
+empty_zero_page:
+       .fill 4096,1,0
 EXPORT_SYMBOL(empty_zero_page)
 
 /*
@@ -542,7 +552,7 @@ EXPORT_SYMBOL(empty_zero_page)
 #ifdef CONFIG_X86_PAE
 __PAGE_ALIGNED_DATA
        /* Page-aligned for the benefit of paravirt? */
-       .align PAGE_SIZE
+       .align PGD_ALIGN
 ENTRY(initial_page_table)
        .long   pa(initial_pg_pmd+PGD_IDENT_ATTR),0     /* low identity map */
 # if KPMDS == 3
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index 47b5951e592b..db6fb7740bf7 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -343,7 +343,8 @@ static inline pgd_t *_pgd_alloc(void)
         * We allocate one page for pgd.
         */
        if (!SHARED_KERNEL_PMD)
-               return (pgd_t *)__get_free_page(PGALLOC_GFP);
+               return (pgd_t *)__get_free_pages(PGALLOC_GFP,
+                                                PGD_ALLOCATION_ORDER);
 
        /*
         * Now PAE kernel is not running as a Xen domain. We can allocate
@@ -355,7 +356,7 @@ static inline pgd_t *_pgd_alloc(void)
 static inline void _pgd_free(pgd_t *pgd)
 {
        if (!SHARED_KERNEL_PMD)
-               free_page((unsigned long)pgd);
+               free_pages((unsigned long)pgd, PGD_ALLOCATION_ORDER);
        else
                kmem_cache_free(pgd_cache, pgd);
 }

Reply via email to