Since we often have a precise idea of the level we're dealing with
when invalidating TLBs, we can provide it to as a hint to our
invalidation helper.

Reviewed-by: James Morse <james.mo...@arm.com>
Signed-off-by: Marc Zyngier <m...@kernel.org>
---
 arch/arm64/include/asm/kvm_asm.h |  3 ++-
 arch/arm64/kvm/hyp/tlb.c         |  5 +++--
 arch/arm64/kvm/mmu.c             | 29 +++++++++++++++--------------
 3 files changed, 20 insertions(+), 17 deletions(-)

diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index 417b9a47e4a7..557be6db3cc2 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -61,7 +61,8 @@ extern char __kvm_hyp_init_end[];
 extern char __kvm_hyp_vector[];
 
 extern void __kvm_flush_vm_context(void);
-extern void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa);
+extern void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa,
+                                    int level);
 extern void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu);
 extern void __kvm_tlb_flush_local_vmid(struct kvm_s2_mmu *mmu);
 
diff --git a/arch/arm64/kvm/hyp/tlb.c b/arch/arm64/kvm/hyp/tlb.c
index 993c74cc054c..29e69b073748 100644
--- a/arch/arm64/kvm/hyp/tlb.c
+++ b/arch/arm64/kvm/hyp/tlb.c
@@ -130,7 +130,8 @@ static void __hyp_text __tlb_switch_to_host(struct 
tlb_inv_context *cxt)
                __tlb_switch_to_host_nvhe(cxt);
 }
 
-void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t 
ipa)
+void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu,
+                                        phys_addr_t ipa, int level)
 {
        struct tlb_inv_context cxt;
 
@@ -146,7 +147,7 @@ void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu 
*mmu, phys_addr_t ipa
         * whole of Stage-1. Weep...
         */
        ipa >>= 12;
-       __tlbi(ipas2e1is, ipa);
+       __tlbi_level(ipas2e1is, ipa, level);
 
        /*
         * We have to ensure completion of the invalidation at Stage-2,
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 4a4437be4bc5..97a24cd51db8 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -58,9 +58,10 @@ void kvm_flush_remote_tlbs(struct kvm *kvm)
        kvm_call_hyp(__kvm_tlb_flush_vmid, &kvm->arch.mmu);
 }
 
-static void kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa)
+static void kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa,
+                                  int level)
 {
-       kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu, ipa);
+       kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu, ipa, level);
 }
 
 /*
@@ -102,7 +103,7 @@ static void stage2_dissolve_pmd(struct kvm_s2_mmu *mmu, 
phys_addr_t addr, pmd_t
                return;
 
        pmd_clear(pmd);
-       kvm_tlb_flush_vmid_ipa(mmu, addr);
+       kvm_tlb_flush_vmid_ipa(mmu, addr, S2_PMD_LEVEL);
        put_page(virt_to_page(pmd));
 }
 
@@ -122,7 +123,7 @@ static void stage2_dissolve_pud(struct kvm_s2_mmu *mmu, 
phys_addr_t addr, pud_t
                return;
 
        stage2_pud_clear(kvm, pudp);
-       kvm_tlb_flush_vmid_ipa(mmu, addr);
+       kvm_tlb_flush_vmid_ipa(mmu, addr, S2_PUD_LEVEL);
        put_page(virt_to_page(pudp));
 }
 
@@ -163,7 +164,7 @@ static void clear_stage2_pgd_entry(struct kvm_s2_mmu *mmu, 
pgd_t *pgd, phys_addr
        struct kvm *kvm = mmu->kvm;
        p4d_t *p4d_table __maybe_unused = stage2_p4d_offset(kvm, pgd, 0UL);
        stage2_pgd_clear(kvm, pgd);
-       kvm_tlb_flush_vmid_ipa(mmu, addr);
+       kvm_tlb_flush_vmid_ipa(mmu, addr, S2_NO_LEVEL_HINT);
        stage2_p4d_free(kvm, p4d_table);
        put_page(virt_to_page(pgd));
 }
@@ -173,7 +174,7 @@ static void clear_stage2_p4d_entry(struct kvm_s2_mmu *mmu, 
p4d_t *p4d, phys_addr
        struct kvm *kvm = mmu->kvm;
        pud_t *pud_table __maybe_unused = stage2_pud_offset(kvm, p4d, 0);
        stage2_p4d_clear(kvm, p4d);
-       kvm_tlb_flush_vmid_ipa(mmu, addr);
+       kvm_tlb_flush_vmid_ipa(mmu, addr, S2_NO_LEVEL_HINT);
        stage2_pud_free(kvm, pud_table);
        put_page(virt_to_page(p4d));
 }
@@ -185,7 +186,7 @@ static void clear_stage2_pud_entry(struct kvm_s2_mmu *mmu, 
pud_t *pud, phys_addr
 
        VM_BUG_ON(stage2_pud_huge(kvm, *pud));
        stage2_pud_clear(kvm, pud);
-       kvm_tlb_flush_vmid_ipa(mmu, addr);
+       kvm_tlb_flush_vmid_ipa(mmu, addr, S2_NO_LEVEL_HINT);
        stage2_pmd_free(kvm, pmd_table);
        put_page(virt_to_page(pud));
 }
@@ -195,7 +196,7 @@ static void clear_stage2_pmd_entry(struct kvm_s2_mmu *mmu, 
pmd_t *pmd, phys_addr
        pte_t *pte_table = pte_offset_kernel(pmd, 0);
        VM_BUG_ON(pmd_thp_or_huge(*pmd));
        pmd_clear(pmd);
-       kvm_tlb_flush_vmid_ipa(mmu, addr);
+       kvm_tlb_flush_vmid_ipa(mmu, addr, S2_NO_LEVEL_HINT);
        free_page((unsigned long)pte_table);
        put_page(virt_to_page(pmd));
 }
@@ -273,7 +274,7 @@ static void unmap_stage2_ptes(struct kvm_s2_mmu *mmu, pmd_t 
*pmd,
                        pte_t old_pte = *pte;
 
                        kvm_set_pte(pte, __pte(0));
-                       kvm_tlb_flush_vmid_ipa(mmu, addr);
+                       kvm_tlb_flush_vmid_ipa(mmu, addr, S2_PTE_LEVEL);
 
                        /* No need to invalidate the cache for device mappings 
*/
                        if (!kvm_is_device_pfn(pte_pfn(old_pte)))
@@ -302,7 +303,7 @@ static void unmap_stage2_pmds(struct kvm_s2_mmu *mmu, pud_t 
*pud,
                                pmd_t old_pmd = *pmd;
 
                                pmd_clear(pmd);
-                               kvm_tlb_flush_vmid_ipa(mmu, addr);
+                               kvm_tlb_flush_vmid_ipa(mmu, addr, S2_PMD_LEVEL);
 
                                kvm_flush_dcache_pmd(old_pmd);
 
@@ -332,7 +333,7 @@ static void unmap_stage2_puds(struct kvm_s2_mmu *mmu, p4d_t 
*p4d,
                                pud_t old_pud = *pud;
 
                                stage2_pud_clear(kvm, pud);
-                               kvm_tlb_flush_vmid_ipa(mmu, addr);
+                               kvm_tlb_flush_vmid_ipa(mmu, addr, S2_PUD_LEVEL);
                                kvm_flush_dcache_pud(old_pud);
                                put_page(virt_to_page(pud));
                        } else {
@@ -1260,7 +1261,7 @@ static int stage2_set_pmd_huge(struct kvm_s2_mmu *mmu,
                 */
                WARN_ON_ONCE(pmd_pfn(old_pmd) != pmd_pfn(*new_pmd));
                pmd_clear(pmd);
-               kvm_tlb_flush_vmid_ipa(mmu, addr);
+               kvm_tlb_flush_vmid_ipa(mmu, addr, S2_PMD_LEVEL);
        } else {
                get_page(virt_to_page(pmd));
        }
@@ -1302,7 +1303,7 @@ static int stage2_set_pud_huge(struct kvm_s2_mmu *mmu,
 
                WARN_ON_ONCE(kvm_pud_pfn(old_pud) != kvm_pud_pfn(*new_pudp));
                stage2_pud_clear(kvm, pudp);
-               kvm_tlb_flush_vmid_ipa(mmu, addr);
+               kvm_tlb_flush_vmid_ipa(mmu, addr, S2_PUD_LEVEL);
        } else {
                get_page(virt_to_page(pudp));
        }
@@ -1451,7 +1452,7 @@ static int stage2_set_pte(struct kvm_s2_mmu *mmu,
                        return 0;
 
                kvm_set_pte(pte, __pte(0));
-               kvm_tlb_flush_vmid_ipa(mmu, addr);
+               kvm_tlb_flush_vmid_ipa(mmu, addr, S2_PTE_LEVEL);
        } else {
                get_page(virt_to_page(pte));
        }
-- 
2.27.0

_______________________________________________
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm

Reply via email to