Wrap the code used by PTI to map a per-cpu page-table entry into
a new function so that this code can be re-used to map other
per-cpu entries.

Signed-off-by: Alexandre Chartre <alexandre.char...@oracle.com>
---
 arch/x86/mm/pti.c | 25 ++++++++++++++++---------
 1 file changed, 16 insertions(+), 9 deletions(-)

diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c
index ebc8cd2f1cd8..71ca245d7b38 100644
--- a/arch/x86/mm/pti.c
+++ b/arch/x86/mm/pti.c
@@ -428,6 +428,21 @@ static void __init pti_clone_p4d(unsigned long addr)
        *user_p4d = *kernel_p4d;
 }
 
+/*
+ * Clone a single percpu page.
+ */
+static void __init pti_clone_percpu_page(void *addr)
+{
+       phys_addr_t pa = per_cpu_ptr_to_phys(addr);
+       pte_t *target_pte;
+
+       target_pte = pti_user_pagetable_walk_pte((unsigned long)addr);
+       if (WARN_ON(!target_pte))
+               return;
+
+       *target_pte = pfn_pte(pa >> PAGE_SHIFT, PAGE_KERNEL);
+}
+
 /*
  * Clone the CPU_ENTRY_AREA and associated data into the user space visible
  * page table.
@@ -448,16 +463,8 @@ static void __init pti_clone_user_shared(void)
                 * This is done for all possible CPUs during boot to ensure
                 * that it's propagated to all mms.
                 */
+               pti_clone_percpu_page(&per_cpu(cpu_tss_rw, cpu));
 
-               unsigned long va = (unsigned long)&per_cpu(cpu_tss_rw, cpu);
-               phys_addr_t pa = per_cpu_ptr_to_phys((void *)va);
-               pte_t *target_pte;
-
-               target_pte = pti_user_pagetable_walk_pte(va);
-               if (WARN_ON(!target_pte))
-                       return;
-
-               *target_pte = pfn_pte(pa >> PAGE_SHIFT, PAGE_KERNEL);
        }
 }
 
-- 
2.18.4

Reply via email to