In order to re-use some of the stage 2 setup code at EL2, factor parts
of kvm_arm_setup_stage2() out into separate functions.

No functional change intended.

Acked-by: Will Deacon <w...@kernel.org>
Signed-off-by: Quentin Perret <qper...@google.com>
---
 arch/arm64/include/asm/kvm_pgtable.h | 26 +++++++++++++++++
 arch/arm64/kvm/hyp/pgtable.c         | 32 +++++++++++++++++++++
 arch/arm64/kvm/reset.c               | 42 +++-------------------------
 3 files changed, 62 insertions(+), 38 deletions(-)

diff --git a/arch/arm64/include/asm/kvm_pgtable.h 
b/arch/arm64/include/asm/kvm_pgtable.h
index 7945ec87eaec..9cdc198ea6b4 100644
--- a/arch/arm64/include/asm/kvm_pgtable.h
+++ b/arch/arm64/include/asm/kvm_pgtable.h
@@ -13,6 +13,16 @@
 
 #define KVM_PGTABLE_MAX_LEVELS         4U
 
+static inline u64 kvm_get_parange(u64 mmfr0)
+{
+       u64 parange = cpuid_feature_extract_unsigned_field(mmfr0,
+                               ID_AA64MMFR0_PARANGE_SHIFT);
+       if (parange > ID_AA64MMFR0_PARANGE_MAX)
+               parange = ID_AA64MMFR0_PARANGE_MAX;
+
+       return parange;
+}
+
 typedef u64 kvm_pte_t;
 
 /**
@@ -159,6 +169,22 @@ void kvm_pgtable_hyp_destroy(struct kvm_pgtable *pgt);
 int kvm_pgtable_hyp_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys,
                        enum kvm_pgtable_prot prot);
 
+/**
+ * kvm_get_vtcr() - Helper to construct VTCR_EL2
+ * @mmfr0:     Sanitized value of SYS_ID_AA64MMFR0_EL1 register.
+ * @mmfr1:     Sanitized value of SYS_ID_AA64MMFR1_EL1 register.
+ * @phys_shfit:        Value to set in VTCR_EL2.T0SZ.
+ *
+ * The VTCR value is common across all the physical CPUs on the system.
+ * We use system wide sanitised values to fill in different fields,
+ * except for Hardware Management of Access Flags. HA Flag is set
+ * unconditionally on all CPUs, as it is safe to run with or without
+ * the feature and the bit is RES0 on CPUs that don't support it.
+ *
+ * Return: VTCR_EL2 value
+ */
+u64 kvm_get_vtcr(u64 mmfr0, u64 mmfr1, u32 phys_shift);
+
 /**
  * kvm_pgtable_stage2_init() - Initialise a guest stage-2 page-table.
  * @pgt:       Uninitialised page-table structure to initialise.
diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c
index 3d79c8094cdd..296675e5600d 100644
--- a/arch/arm64/kvm/hyp/pgtable.c
+++ b/arch/arm64/kvm/hyp/pgtable.c
@@ -9,6 +9,7 @@
 
 #include <linux/bitfield.h>
 #include <asm/kvm_pgtable.h>
+#include <asm/stage2_pgtable.h>
 
 #define KVM_PTE_VALID                  BIT(0)
 
@@ -449,6 +450,37 @@ struct stage2_map_data {
        struct kvm_pgtable_mm_ops       *mm_ops;
 };
 
+u64 kvm_get_vtcr(u64 mmfr0, u64 mmfr1, u32 phys_shift)
+{
+       u64 vtcr = VTCR_EL2_FLAGS;
+       u8 lvls;
+
+       vtcr |= kvm_get_parange(mmfr0) << VTCR_EL2_PS_SHIFT;
+       vtcr |= VTCR_EL2_T0SZ(phys_shift);
+       /*
+        * Use a minimum 2 level page table to prevent splitting
+        * host PMD huge pages at stage2.
+        */
+       lvls = stage2_pgtable_levels(phys_shift);
+       if (lvls < 2)
+               lvls = 2;
+       vtcr |= VTCR_EL2_LVLS_TO_SL0(lvls);
+
+       /*
+        * Enable the Hardware Access Flag management, unconditionally
+        * on all CPUs. The features is RES0 on CPUs without the support
+        * and must be ignored by the CPUs.
+        */
+       vtcr |= VTCR_EL2_HA;
+
+       /* Set the vmid bits */
+       vtcr |= (get_vmid_bits(mmfr1) == 16) ?
+               VTCR_EL2_VS_16BIT :
+               VTCR_EL2_VS_8BIT;
+
+       return vtcr;
+}
+
 static int stage2_map_set_prot_attr(enum kvm_pgtable_prot prot,
                                    struct stage2_map_data *data)
 {
diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
index 47f3f035f3ea..6aae118c960a 100644
--- a/arch/arm64/kvm/reset.c
+++ b/arch/arm64/kvm/reset.c
@@ -332,19 +332,10 @@ int kvm_set_ipa_limit(void)
        return 0;
 }
 
-/*
- * Configure the VTCR_EL2 for this VM. The VTCR value is common
- * across all the physical CPUs on the system. We use system wide
- * sanitised values to fill in different fields, except for Hardware
- * Management of Access Flags. HA Flag is set unconditionally on
- * all CPUs, as it is safe to run with or without the feature and
- * the bit is RES0 on CPUs that don't support it.
- */
 int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type)
 {
-       u64 vtcr = VTCR_EL2_FLAGS, mmfr0;
-       u32 parange, phys_shift;
-       u8 lvls;
+       u64 mmfr0, mmfr1;
+       u32 phys_shift;
 
        if (type & ~KVM_VM_TYPE_ARM_IPA_SIZE_MASK)
                return -EINVAL;
@@ -359,33 +350,8 @@ int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long 
type)
        }
 
        mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
-       parange = cpuid_feature_extract_unsigned_field(mmfr0,
-                               ID_AA64MMFR0_PARANGE_SHIFT);
-       if (parange > ID_AA64MMFR0_PARANGE_MAX)
-               parange = ID_AA64MMFR0_PARANGE_MAX;
-       vtcr |= parange << VTCR_EL2_PS_SHIFT;
-
-       vtcr |= VTCR_EL2_T0SZ(phys_shift);
-       /*
-        * Use a minimum 2 level page table to prevent splitting
-        * host PMD huge pages at stage2.
-        */
-       lvls = stage2_pgtable_levels(phys_shift);
-       if (lvls < 2)
-               lvls = 2;
-       vtcr |= VTCR_EL2_LVLS_TO_SL0(lvls);
-
-       /*
-        * Enable the Hardware Access Flag management, unconditionally
-        * on all CPUs. The features is RES0 on CPUs without the support
-        * and must be ignored by the CPUs.
-        */
-       vtcr |= VTCR_EL2_HA;
+       mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
+       kvm->arch.vtcr = kvm_get_vtcr(mmfr0, mmfr1, phys_shift);
 
-       /* Set the vmid bits */
-       vtcr |= (kvm_get_vmid_bits() == 16) ?
-               VTCR_EL2_VS_16BIT :
-               VTCR_EL2_VS_8BIT;
-       kvm->arch.vtcr = vtcr;
        return 0;
 }
-- 
2.30.1.766.gb4fecdf3b7-goog

_______________________________________________
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm

Reply via email to