From: Jérôme Glisse <[email protected]>

This keep a list of all virtual address range being invalidated (ie inside
a mmu_notifier_invalidate_range_start/end section). Also add an helper to
check if a range is under going such invalidation. With this it easy for a
concurrent thread to ignore invalidation that do not affect the virtual
address range it is working on.

Signed-off-by: Jérôme Glisse <[email protected]>
Cc: David Rientjes <[email protected]>
Cc: Joerg Roedel <[email protected]>
Cc: Dan Williams <[email protected]>
Cc: Christian König <[email protected]>
Cc: Paolo Bonzini <[email protected]>
Cc: Michal Hocko <[email protected]>
Cc: Leon Romanovsky <[email protected]>
Cc: Artemy Kovalyov <[email protected]>
Cc: Evgeny Baskakov <[email protected]>
Cc: Ralph Campbell <[email protected]>
Cc: Mark Hairgrove <[email protected]>
Cc: John Hubbard <[email protected]>
Cc: Mike Marciniszyn <[email protected]>
Cc: Dennis Dalessandro <[email protected]>
Cc: Alex Deucher <[email protected]>
Cc: Sudeep Dutt <[email protected]>
Cc: Ashutosh Dixit <[email protected]>
Cc: Dimitri Sivanich <[email protected]>
---
 include/linux/mmu_notifier.h | 38 ++++++++++++++++++++++++++++++++++++++
 mm/mmu_notifier.c            | 28 ++++++++++++++++++++++++++++
 2 files changed, 66 insertions(+)

diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
index e59db7a1e86d..4bda68499f43 100644
--- a/include/linux/mmu_notifier.h
+++ b/include/linux/mmu_notifier.h
@@ -47,16 +47,20 @@ struct mmu_notifier_mm {
        struct hlist_head list;
        /* to serialize the list modifications and hlist_unhashed */
        spinlock_t lock;
+       /* list of all active invalidation range */
+       struct list_head ranges;
 };
 
 /*
  * struct mmu_notifier_range - range being invalidated with range_start/end
+ * @list: use to track list of active invalidation
  * @mm: mm_struct invalidation is against
  * @start: start address of range (inclusive)
  * @end: end address of range (exclusive)
  * @event: type of invalidation (see enum mmu_notifier_event)
  */
 struct mmu_notifier_range {
+       struct list_head list;
        struct mm_struct *mm;
        unsigned long start;
        unsigned long end;
@@ -268,6 +272,9 @@ extern void __mmu_notifier_invalidate_range_end(
 extern void __mmu_notifier_invalidate_range(struct mm_struct *mm,
                                  unsigned long start, unsigned long end);
 extern bool mm_has_blockable_invalidate_notifiers(struct mm_struct *mm);
+extern bool __mmu_notifier_range_valid(struct mm_struct *mm,
+                                      unsigned long start,
+                                      unsigned long end);
 
 static inline void mmu_notifier_release(struct mm_struct *mm)
 {
@@ -275,6 +282,24 @@ static inline void mmu_notifier_release(struct mm_struct 
*mm)
                __mmu_notifier_release(mm);
 }
 
+static inline bool mmu_notifier_range_valid(struct mm_struct *mm,
+                                           unsigned long start,
+                                           unsigned long end)
+{
+       if (mm_has_notifiers(mm))
+               return __mmu_notifier_range_valid(mm, start, end);
+       return false;
+}
+
+static inline bool mmu_notifier_addr_valid(struct mm_struct *mm,
+                                          unsigned long addr)
+{
+       addr &= PAGE_MASK;
+       if (mm_has_notifiers(mm))
+               return __mmu_notifier_range_valid(mm, addr, addr + PAGE_SIZE);
+       return false;
+}
+
 static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm,
                                          unsigned long start,
                                          unsigned long end)
@@ -487,6 +512,19 @@ static inline void mmu_notifier_release(struct mm_struct 
*mm)
 {
 }
 
+static inline bool mmu_notifier_range_valid(struct mm_struct *mm,
+                                           unsigned long start,
+                                           unsigned long end)
+{
+       return true;
+}
+
+static inline bool mmu_notifier_addr_valid(struct mm_struct *mm,
+                                          unsigned long addr)
+{
+       return true;
+}
+
 static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm,
                                          unsigned long start,
                                          unsigned long end)
diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c
index 91a614b9636e..d7c46eaa5d42 100644
--- a/mm/mmu_notifier.c
+++ b/mm/mmu_notifier.c
@@ -180,6 +180,10 @@ void __mmu_notifier_invalidate_range_start(struct 
mmu_notifier_range *range)
        struct mmu_notifier *mn;
        int id;
 
+       spin_lock(&mm->mmu_notifier_mm->lock);
+       list_add_rcu(&range->list, &mm->mmu_notifier_mm->ranges);
+       spin_unlock(&mm->mmu_notifier_mm->lock);
+
        id = srcu_read_lock(&srcu);
        hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
                if (mn->ops->invalidate_range_start)
@@ -218,6 +222,10 @@ void __mmu_notifier_invalidate_range_end(struct 
mmu_notifier_range *range,
                        mn->ops->invalidate_range_end(mn, range);
        }
        srcu_read_unlock(&srcu, id);
+
+       spin_lock(&mm->mmu_notifier_mm->lock);
+       list_del_rcu(&range->list);
+       spin_unlock(&mm->mmu_notifier_mm->lock);
 }
 EXPORT_SYMBOL_GPL(__mmu_notifier_invalidate_range_end);
 
@@ -288,6 +296,7 @@ static int do_mmu_notifier_register(struct mmu_notifier *mn,
                goto out_clean;
 
        if (!mm_has_notifiers(mm)) {
+               INIT_LIST_HEAD(&mmu_notifier_mm->ranges);
                INIT_HLIST_HEAD(&mmu_notifier_mm->list);
                spin_lock_init(&mmu_notifier_mm->lock);
 
@@ -424,3 +433,22 @@ void mmu_notifier_unregister_no_release(struct 
mmu_notifier *mn,
        mmdrop(mm);
 }
 EXPORT_SYMBOL_GPL(mmu_notifier_unregister_no_release);
+
+bool __mmu_notifier_range_valid(struct mm_struct *mm,
+                               unsigned long start,
+                               unsigned long end)
+{
+       struct mmu_notifier_range *range;
+
+       rcu_read_lock();
+       list_for_each_entry_rcu(range, &mm->mmu_notifier_mm->ranges, list) {
+               if (end < range->start || start >= range->end)
+                       continue;
+               rcu_read_unlock();
+               return false;
+       }
+       rcu_read_unlock();
+
+       return true;
+}
+EXPORT_SYMBOL_GPL(__mmu_notifier_range_valid);
-- 
2.14.3

Reply via email to