From: Quentin Perret <qper...@google.com>

In pKVM mode, we can't trust the host not to mess with the hypervisor
per-cpu offsets, so let's move the array containing them to the nVHE
code.

Signed-off-by: Quentin Perret <qper...@google.com>
---
 arch/arm64/include/asm/kvm_asm.h  | 4 ++--
 arch/arm64/kernel/image-vars.h    | 3 ---
 arch/arm64/kvm/arm.c              | 9 ++++-----
 arch/arm64/kvm/hyp/nvhe/hyp-smp.c | 2 ++
 4 files changed, 8 insertions(+), 10 deletions(-)

diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index 931a351da3f2..35b9d590bb74 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -110,7 +110,7 @@ enum __kvm_host_smccc_func {
 #define per_cpu_ptr_nvhe_sym(sym, cpu)                                         
\
        ({                                                                      
\
                unsigned long base, off;                                        
\
-               base = kvm_arm_hyp_percpu_base[cpu];                            
\
+               base = kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu];              
\
                off = (unsigned long)&CHOOSE_NVHE_SYM(sym) -                    
\
                      (unsigned long)&CHOOSE_NVHE_SYM(__per_cpu_start);         
\
                base ? (typeof(CHOOSE_NVHE_SYM(sym))*)(base + off) : NULL;      
\
@@ -198,7 +198,7 @@ DECLARE_KVM_HYP_SYM(__kvm_hyp_vector);
 #define __kvm_hyp_init         CHOOSE_NVHE_SYM(__kvm_hyp_init)
 #define __kvm_hyp_vector       CHOOSE_HYP_SYM(__kvm_hyp_vector)
 
-extern unsigned long kvm_arm_hyp_percpu_base[NR_CPUS];
+extern unsigned long kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[];
 DECLARE_KVM_NVHE_SYM(__per_cpu_start);
 DECLARE_KVM_NVHE_SYM(__per_cpu_end);
 
diff --git a/arch/arm64/kernel/image-vars.h b/arch/arm64/kernel/image-vars.h
index 4e3b6d618ac1..37a2d833851a 100644
--- a/arch/arm64/kernel/image-vars.h
+++ b/arch/arm64/kernel/image-vars.h
@@ -102,9 +102,6 @@ KVM_NVHE_ALIAS(gic_nonsecure_priorities);
 KVM_NVHE_ALIAS(__start___kvm_ex_table);
 KVM_NVHE_ALIAS(__stop___kvm_ex_table);
 
-/* Array containing bases of nVHE per-CPU memory regions. */
-KVM_NVHE_ALIAS(kvm_arm_hyp_percpu_base);
-
 /* PMU available static key */
 #ifdef CONFIG_HW_PERF_EVENTS
 KVM_NVHE_ALIAS(kvm_arm_pmu_available);
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index 5b41551a978b..c7b362db692f 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -51,7 +51,6 @@ DEFINE_STATIC_KEY_FALSE(kvm_protected_mode_initialized);
 DECLARE_KVM_HYP_PER_CPU(unsigned long, kvm_hyp_vector);
 
 static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
-unsigned long kvm_arm_hyp_percpu_base[NR_CPUS];
 DECLARE_KVM_NVHE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params);
 
 static bool vgic_present;
@@ -1800,13 +1799,13 @@ static void teardown_hyp_mode(void)
        free_hyp_pgds();
        for_each_possible_cpu(cpu) {
                free_page(per_cpu(kvm_arm_hyp_stack_page, cpu));
-               free_pages(kvm_arm_hyp_percpu_base[cpu], nvhe_percpu_order());
+               free_pages(kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu], 
nvhe_percpu_order());
        }
 }
 
 static int do_pkvm_init(u32 hyp_va_bits)
 {
-       void *per_cpu_base = kvm_ksym_ref(kvm_arm_hyp_percpu_base);
+       void *per_cpu_base = 
kvm_ksym_ref(kvm_nvhe_sym(kvm_arm_hyp_percpu_base));
        int ret;
 
        preempt_disable();
@@ -1907,7 +1906,7 @@ static int init_hyp_mode(void)
 
                page_addr = page_address(page);
                memcpy(page_addr, CHOOSE_NVHE_SYM(__per_cpu_start), 
nvhe_percpu_size());
-               kvm_arm_hyp_percpu_base[cpu] = (unsigned long)page_addr;
+               kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu] = (unsigned 
long)page_addr;
        }
 
        /*
@@ -1968,7 +1967,7 @@ static int init_hyp_mode(void)
        }
 
        for_each_possible_cpu(cpu) {
-               char *percpu_begin = (char *)kvm_arm_hyp_percpu_base[cpu];
+               char *percpu_begin = (char 
*)kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu];
                char *percpu_end = percpu_begin + nvhe_percpu_size();
 
                /* Map Hyp percpu pages */
diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-smp.c 
b/arch/arm64/kvm/hyp/nvhe/hyp-smp.c
index 9f54833af400..04d194583f1e 100644
--- a/arch/arm64/kvm/hyp/nvhe/hyp-smp.c
+++ b/arch/arm64/kvm/hyp/nvhe/hyp-smp.c
@@ -23,6 +23,8 @@ u64 cpu_logical_map(unsigned int cpu)
        return hyp_cpu_logical_map[cpu];
 }
 
+unsigned long __ro_after_init kvm_arm_hyp_percpu_base[NR_CPUS];
+
 unsigned long __hyp_per_cpu_offset(unsigned int cpu)
 {
        unsigned long *cpu_base_array;
-- 
2.36.1.124.g0e6072fb45-goog

_______________________________________________
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm

Reply via email to