Call to mmu_notifier_invalidate_page() are replaced by call to
mmu_notifier_invalidate_range() and thus call are bracketed by
call to mmu_notifier_invalidate_range_start()/end()

Remove now useless invalidate_page callback.

Signed-off-by: Jérôme Glisse <[email protected]>
Cc: Paolo Bonzini <[email protected]>
Cc: Radim Krčmář <[email protected]>
Cc: [email protected]
Cc: Kirill A. Shutemov <[email protected]>
Cc: Andrew Morton <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Andrea Arcangeli <[email protected]>
---
 virt/kvm/kvm_main.c | 42 ------------------------------------------
 1 file changed, 42 deletions(-)

diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 15252d723b54..4d81f6ded88e 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -322,47 +322,6 @@ static inline struct kvm *mmu_notifier_to_kvm(struct 
mmu_notifier *mn)
        return container_of(mn, struct kvm, mmu_notifier);
 }
 
-static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn,
-                                            struct mm_struct *mm,
-                                            unsigned long address)
-{
-       struct kvm *kvm = mmu_notifier_to_kvm(mn);
-       int need_tlb_flush, idx;
-
-       /*
-        * When ->invalidate_page runs, the linux pte has been zapped
-        * already but the page is still allocated until
-        * ->invalidate_page returns. So if we increase the sequence
-        * here the kvm page fault will notice if the spte can't be
-        * established because the page is going to be freed. If
-        * instead the kvm page fault establishes the spte before
-        * ->invalidate_page runs, kvm_unmap_hva will release it
-        * before returning.
-        *
-        * The sequence increase only need to be seen at spin_unlock
-        * time, and not at spin_lock time.
-        *
-        * Increasing the sequence after the spin_unlock would be
-        * unsafe because the kvm page fault could then establish the
-        * pte after kvm_unmap_hva returned, without noticing the page
-        * is going to be freed.
-        */
-       idx = srcu_read_lock(&kvm->srcu);
-       spin_lock(&kvm->mmu_lock);
-
-       kvm->mmu_notifier_seq++;
-       need_tlb_flush = kvm_unmap_hva(kvm, address) | kvm->tlbs_dirty;
-       /* we've to flush the tlb before the pages can be freed */
-       if (need_tlb_flush)
-               kvm_flush_remote_tlbs(kvm);
-
-       spin_unlock(&kvm->mmu_lock);
-
-       kvm_arch_mmu_notifier_invalidate_page(kvm, address);
-
-       srcu_read_unlock(&kvm->srcu, idx);
-}
-
 static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn,
                                        struct mm_struct *mm,
                                        unsigned long address,
@@ -510,7 +469,6 @@ static void kvm_mmu_notifier_release(struct mmu_notifier 
*mn,
 }
 
 static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
-       .invalidate_page        = kvm_mmu_notifier_invalidate_page,
        .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start,
        .invalidate_range_end   = kvm_mmu_notifier_invalidate_range_end,
        .clear_flush_young      = kvm_mmu_notifier_clear_flush_young,
-- 
2.13.5

Reply via email to