From: Jérôme Glisse <jgli...@redhat.com>

Listener of mm event might not have easy way to get the struct page
behind and address invalidated with mmu_notifier_invalidate_page()
function as this happens after the cpu page table have been clear/
updated. This happens for instance if the listener is storing a dma
mapping inside its secondary page table. To avoid complex reverse
dma mapping lookup just pass along a pointer to the page being
invalidated.

Signed-off-by: Jérôme Glisse <jgli...@redhat.com>
---
 drivers/infiniband/core/umem_odp.c | 1 +
 drivers/iommu/amd_iommu_v2.c       | 1 +
 drivers/misc/sgi-gru/grutlbpurge.c | 1 +
 drivers/xen/gntdev.c               | 1 +
 include/linux/mmu_notifier.h       | 6 +++++-
 mm/mmu_notifier.c                  | 3 ++-
 mm/rmap.c                          | 4 ++--
 virt/kvm/kvm_main.c                | 1 +
 8 files changed, 14 insertions(+), 4 deletions(-)

diff --git a/drivers/infiniband/core/umem_odp.c 
b/drivers/infiniband/core/umem_odp.c
index 8f7f845..d10dd88 100644
--- a/drivers/infiniband/core/umem_odp.c
+++ b/drivers/infiniband/core/umem_odp.c
@@ -166,6 +166,7 @@ static int invalidate_page_trampoline(struct ib_umem *item, 
u64 start,
 static void ib_umem_notifier_invalidate_page(struct mmu_notifier *mn,
                                             struct mm_struct *mm,
                                             unsigned long address,
+                                            struct page *page,
                                             enum mmu_event event)
 {
        struct ib_ucontext *context = container_of(mn, struct ib_ucontext, mn);
diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c
index 4aa4de6..de3c540 100644
--- a/drivers/iommu/amd_iommu_v2.c
+++ b/drivers/iommu/amd_iommu_v2.c
@@ -385,6 +385,7 @@ static int mn_clear_flush_young(struct mmu_notifier *mn,
 static void mn_invalidate_page(struct mmu_notifier *mn,
                               struct mm_struct *mm,
                               unsigned long address,
+                              struct page *page,
                               enum mmu_event event)
 {
        __mn_flush_page(mn, address);
diff --git a/drivers/misc/sgi-gru/grutlbpurge.c 
b/drivers/misc/sgi-gru/grutlbpurge.c
index 44b41b7..c7659b76 100644
--- a/drivers/misc/sgi-gru/grutlbpurge.c
+++ b/drivers/misc/sgi-gru/grutlbpurge.c
@@ -250,6 +250,7 @@ static void gru_invalidate_range_end(struct mmu_notifier 
*mn,
 
 static void gru_invalidate_page(struct mmu_notifier *mn, struct mm_struct *mm,
                                unsigned long address,
+                               struct page *page,
                                enum mmu_event event)
 {
        struct gru_mm_struct *gms = container_of(mn, struct gru_mm_struct,
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index 0e8aa12..90693ce 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -485,6 +485,7 @@ static void mn_invl_range_start(struct mmu_notifier *mn,
 static void mn_invl_page(struct mmu_notifier *mn,
                         struct mm_struct *mm,
                         unsigned long address,
+                        struct page *page,
                         enum mmu_event event)
 {
        struct mmu_notifier_range range;
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
index ada3ed1..283ad26 100644
--- a/include/linux/mmu_notifier.h
+++ b/include/linux/mmu_notifier.h
@@ -172,6 +172,7 @@ struct mmu_notifier_ops {
        void (*invalidate_page)(struct mmu_notifier *mn,
                                struct mm_struct *mm,
                                unsigned long address,
+                               struct page *page,
                                enum mmu_event event);
 
        /*
@@ -290,6 +291,7 @@ extern void __mmu_notifier_change_pte(struct mm_struct *mm,
                                      enum mmu_event event);
 extern void __mmu_notifier_invalidate_page(struct mm_struct *mm,
                                          unsigned long address,
+                                         struct page *page,
                                          enum mmu_event event);
 extern void __mmu_notifier_invalidate_range_start(struct mm_struct *mm,
                                                  struct mmu_notifier_range 
*range);
@@ -338,10 +340,11 @@ static inline void mmu_notifier_change_pte(struct 
mm_struct *mm,
 
 static inline void mmu_notifier_invalidate_page(struct mm_struct *mm,
                                                unsigned long address,
+                                               struct page *page,
                                                enum mmu_event event)
 {
        if (mm_has_notifiers(mm))
-               __mmu_notifier_invalidate_page(mm, address, event);
+               __mmu_notifier_invalidate_page(mm, address, page, event);
 }
 
 static inline void mmu_notifier_invalidate_range_start(struct mm_struct *mm,
@@ -492,6 +495,7 @@ static inline void mmu_notifier_change_pte(struct mm_struct 
*mm,
 
 static inline void mmu_notifier_invalidate_page(struct mm_struct *mm,
                                                unsigned long address,
+                                               struct page *page,
                                                enum mmu_event event)
 {
 }
diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c
index 294ebc4..2ff6d43 100644
--- a/mm/mmu_notifier.c
+++ b/mm/mmu_notifier.c
@@ -160,6 +160,7 @@ void __mmu_notifier_change_pte(struct mm_struct *mm,
 
 void __mmu_notifier_invalidate_page(struct mm_struct *mm,
                                    unsigned long address,
+                                   struct page *page,
                                    enum mmu_event event)
 {
        struct mmu_notifier *mn;
@@ -168,7 +169,7 @@ void __mmu_notifier_invalidate_page(struct mm_struct *mm,
        id = srcu_read_lock(&srcu);
        hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
                if (mn->ops->invalidate_page)
-                       mn->ops->invalidate_page(mn, mm, address, event);
+                       mn->ops->invalidate_page(mn, mm, address, page, event);
        }
        srcu_read_unlock(&srcu, id);
 }
diff --git a/mm/rmap.c b/mm/rmap.c
index 74c51e0..4563edc 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -915,7 +915,7 @@ static int page_mkclean_one(struct page *page, struct 
vm_area_struct *vma,
        pte_unmap_unlock(pte, ptl);
 
        if (ret) {
-               mmu_notifier_invalidate_page(mm, address, MMU_WRITE_BACK);
+               mmu_notifier_invalidate_page(mm, address, page, MMU_WRITE_BACK);
                (*cleaned)++;
        }
 out:
@@ -1338,7 +1338,7 @@ discard:
 out_unmap:
        pte_unmap_unlock(pte, ptl);
        if (ret != SWAP_FAIL && !(flags & TTU_MUNLOCK))
-               mmu_notifier_invalidate_page(mm, address, MMU_MIGRATE);
+               mmu_notifier_invalidate_page(mm, address, page, MMU_MIGRATE);
 out:
        return ret;
 
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 6177c56..62978ed 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -261,6 +261,7 @@ static inline struct kvm *mmu_notifier_to_kvm(struct 
mmu_notifier *mn)
 static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn,
                                             struct mm_struct *mm,
                                             unsigned long address,
+                                            struct page *page,
                                             enum mmu_event event)
 {
        struct kvm *kvm = mmu_notifier_to_kvm(mn);
-- 
1.9.3

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to