After dirty-logging is stopped for a VM configured with huge mappings,
KVM will recover the table mappings back to block mappings. As we only
replace the existing page tables with a block entry and the cacheability
has not been changed, the cache maintenance opreations can be skipped.

Signed-off-by: Yanan Wang <wangyana...@huawei.com>
---
 arch/arm64/kvm/mmu.c | 12 +++++++++---
 1 file changed, 9 insertions(+), 3 deletions(-)

diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 8e8549ea1d70..37b427dcbc4f 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -744,7 +744,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, 
phys_addr_t fault_ipa,
 {
        int ret = 0;
        bool write_fault, writable, force_pte = false;
-       bool exec_fault;
+       bool exec_fault, adjust_hugepage;
        bool device = false;
        unsigned long mmu_seq;
        struct kvm *kvm = vcpu->kvm;
@@ -872,12 +872,18 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, 
phys_addr_t fault_ipa,
                mark_page_dirty(kvm, gfn);
        }
 
-       if (fault_status != FSC_PERM && !device)
+       /*
+        * There is no necessity to perform cache maintenance operations if we
+        * will only replace the existing table mappings with a block mapping.
+        */
+       adjust_hugepage = fault_granule < vma_pagesize ? true : false;
+       if (fault_status != FSC_PERM && !device && !adjust_hugepage)
                clean_dcache_guest_page(pfn, vma_pagesize);
 
        if (exec_fault) {
                prot |= KVM_PGTABLE_PROT_X;
-               invalidate_icache_guest_page(pfn, vma_pagesize);
+               if (!adjust_hugepage)
+                       invalidate_icache_guest_page(pfn, vma_pagesize);
        }
 
        if (device)
-- 
2.19.1

_______________________________________________
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm

Reply via email to