mmu_notifier_invalidate_page() is now be call from under the spinlock.
But we can now rely on invalidate_range() being call afterward.

Signed-off-by: Jérôme Glisse <[email protected]>
Cc: Joerg Roedel <[email protected]>
Cc: Suravee Suthikulpanit <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Bernhard Held <[email protected]>
Cc: Adam Borowski <[email protected]>
Cc: Andrea Arcangeli <[email protected]>
Cc: Radim Krčmář <[email protected]>
Cc: Wanpeng Li <[email protected]>
Cc: Paolo Bonzini <[email protected]>
Cc: Takashi Iwai <[email protected]>
Cc: Nadav Amit <[email protected]>
Cc: Mike Galbraith <[email protected]>
Cc: Kirill A. Shutemov <[email protected]>
Cc: axie <[email protected]>
Cc: Andrew Morton <[email protected]>
---
 drivers/iommu/amd_iommu_v2.c | 8 --------
 1 file changed, 8 deletions(-)

diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c
index 6629c472eafd..dccf5b76eff2 100644
--- a/drivers/iommu/amd_iommu_v2.c
+++ b/drivers/iommu/amd_iommu_v2.c
@@ -391,13 +391,6 @@ static int mn_clear_flush_young(struct mmu_notifier *mn,
        return 0;
 }
 
-static void mn_invalidate_page(struct mmu_notifier *mn,
-                              struct mm_struct *mm,
-                              unsigned long address)
-{
-       __mn_flush_page(mn, address);
-}
-
 static void mn_invalidate_range(struct mmu_notifier *mn,
                                struct mm_struct *mm,
                                unsigned long start, unsigned long end)
@@ -436,7 +429,6 @@ static void mn_release(struct mmu_notifier *mn, struct 
mm_struct *mm)
 static const struct mmu_notifier_ops iommu_mn = {
        .release                = mn_release,
        .clear_flush_young      = mn_clear_flush_young,
-       .invalidate_page        = mn_invalidate_page,
        .invalidate_range       = mn_invalidate_range,
 };
 
-- 
2.13.5

Reply via email to