Populate bits [56:55] of the leaf entry with the level provided
by the guest's S2 translation.

Signed-off-by: Marc Zyngier <m...@kernel.org>
---
 arch/arm/include/asm/kvm_mmu.h      |  5 +++++
 arch/arm64/include/asm/kvm_nested.h |  6 ++++++
 virt/kvm/arm/mmu.c                  | 20 ++++++++++++++++++++
 3 files changed, 31 insertions(+)

diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
index be7be6583e54..3774a7289ef2 100644
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -484,6 +484,11 @@ static inline bool kvm_s2_trans_writable(struct 
kvm_s2_trans *trans)
        BUG();
 }
 
+static inline u64 kvm_encode_nested_level(struct kvm_s2_trans *trans)
+{
+       BUG();
+}
+
 static inline void kvm_nested_s2_flush(struct kvm *kvm) {}
 static inline void kvm_nested_s2_wp(struct kvm *kvm) {}
 static inline void kvm_nested_s2_clear(struct kvm *kvm) {}
diff --git a/arch/arm64/include/asm/kvm_nested.h 
b/arch/arm64/include/asm/kvm_nested.h
index debae814fdc5..3e3778d3cec6 100644
--- a/arch/arm64/include/asm/kvm_nested.h
+++ b/arch/arm64/include/asm/kvm_nested.h
@@ -2,6 +2,7 @@
 #ifndef __ARM64_KVM_NESTED_H
 #define __ARM64_KVM_NESTED_H
 
+#include <linux/bitfield.h>
 #include <linux/kvm_host.h>
 
 static inline bool nested_virt_in_use(const struct kvm_vcpu *vcpu)
@@ -79,4 +80,9 @@ void access_nested_id_reg(struct kvm_vcpu *v, struct 
sys_reg_params *p,
 
 #define KVM_NV_GUEST_MAP_SZ    GENMASK_ULL(56, 55)
 
+static inline u64 kvm_encode_nested_level(struct kvm_s2_trans *trans)
+{
+       return FIELD_PREP(KVM_NV_GUEST_MAP_SZ, trans->level);
+}
+
 #endif /* __ARM64_KVM_NESTED_H */
diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
index 7da72c2b7f0f..eaa86cad2ac8 100644
--- a/virt/kvm/arm/mmu.c
+++ b/virt/kvm/arm/mmu.c
@@ -1715,6 +1715,11 @@ static bool fault_supports_stage2_huge_mapping(struct 
kvm_memory_slot *memslot,
               (hva & ~(map_size - 1)) + map_size <= uaddr_end;
 }
 
+#define set_desc_bits(which, desc, val)                                        
\
+       do {                                                            \
+               desc = __ ## which(which ## _val(desc) | val);          \
+       } while(0)
+
 static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
                          struct kvm_s2_trans *nested,
                          struct kvm_memory_slot *memslot,
@@ -1736,6 +1741,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, 
phys_addr_t fault_ipa,
        unsigned long vma_pagesize, flags = 0;
        struct kvm_s2_mmu *mmu = vcpu->arch.hw_mmu;
        unsigned long max_map_size = PUD_SIZE;
+       u64 l1_s2_level;
 
        write_fault = kvm_is_write_fault(vcpu);
        exec_fault = kvm_vcpu_trap_is_iabt(vcpu);
@@ -1845,10 +1851,18 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, 
phys_addr_t fault_ipa,
         * Potentially reduce shadow S2 permissions to match the guest's own
         * S2. For exec faults, we'd only reach this point if the guest
         * actually allowed it (see kvm_s2_handle_perm_fault).
+        *
+        * Also encode the level of the nested translation in the SW bits of
+        * the PTE/PMD/PUD. This will be retrived on TLB invalidation from
+        * the guest.
         */
        if (kvm_is_shadow_s2_fault(vcpu)) {
                writable &= kvm_s2_trans_writable(nested);
                readable &= kvm_s2_trans_readable(nested);
+
+               l1_s2_level = kvm_encode_nested_level(nested);
+       } else {
+               l1_s2_level = 0;
        }
 
        spin_lock(&kvm->mmu_lock);
@@ -1902,6 +1916,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, 
phys_addr_t fault_ipa,
                if (needs_exec)
                        new_pud = kvm_s2pud_mkexec(new_pud);
 
+               set_desc_bits(pud, new_pud, l1_s2_level);
+
                ret = stage2_set_pud_huge(mmu, memcache, fault_ipa, &new_pud);
        } else if (vma_pagesize == PMD_SIZE) {
                pmd_t new_pmd = kvm_pfn_pmd(pfn, mem_type);
@@ -1917,6 +1933,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, 
phys_addr_t fault_ipa,
                if (needs_exec)
                        new_pmd = kvm_s2pmd_mkexec(new_pmd);
 
+               set_desc_bits(pmd, new_pmd, l1_s2_level);
+
                ret = stage2_set_pmd_huge(mmu, memcache, fault_ipa, &new_pmd);
        } else {
                pte_t new_pte = kvm_pfn_pte(pfn, mem_type);
@@ -1932,6 +1950,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, 
phys_addr_t fault_ipa,
                if (needs_exec)
                        new_pte = kvm_s2pte_mkexec(new_pte);
 
+               set_desc_bits(pte, new_pte, l1_s2_level);
+
                ret = stage2_set_pte(mmu, memcache, fault_ipa, &new_pte, flags);
        }
 
-- 
2.20.1

_______________________________________________
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm

Reply via email to