From: Lance Yang <[email protected]>
Now that we have tlb_remove_table_sync_mm(), convert callers from
tlb_remove_table_sync_one() to enable targeted IPIs instead of broadcast.
Three callers updated:
1) collapse_huge_page() - after flushing the old PMD, only IPIs CPUs
walking this mm instead of all CPUs.
2) tlb_flush_unshared_tables() - when unsharing hugetlb page tables,
use tlb->mm for targeted IPIs.
3) __tlb_remove_table_one() - updated to take mmu_gather parameter so
it can use tlb->mm when batch allocation fails.
Note that pmdp_get_lockless_sync() (PAE only) also calls
tlb_remove_table_sync_one() under PTL to ensure all ongoing PMD split-reads
complete between pmdp_get_lockless_{start,end}; the critical section is
very short. I'm inclined not to convert it since PAE systems typically
don't have many cores.
Suggested-by: David Hildenbrand (Red Hat) <[email protected]>
Signed-off-by: Lance Yang <[email protected]>
---
include/asm-generic/tlb.h | 11 ++++++-----
mm/khugepaged.c | 2 +-
mm/mmu_gather.c | 12 ++++++------
3 files changed, 13 insertions(+), 12 deletions(-)
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index b6b06e6b879f..40eb74b28f9d 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -831,17 +831,18 @@ static inline void tlb_flush_unshared_tables(struct
mmu_gather *tlb)
/*
* Similarly, we must make sure that concurrent GUP-fast will not
* walk previously-shared page tables that are getting modified+reused
- * elsewhere. So broadcast an IPI to wait for any concurrent GUP-fast.
+ * elsewhere. So send an IPI to wait for any concurrent GUP-fast.
*
- * We only perform this when we are the last sharer of a page table,
- * as the IPI will reach all CPUs: any GUP-fast.
+ * We only perform this when we are the last sharer of a page table.
+ * Use targeted IPI to CPUs actively walking this mm instead of
+ * broadcast.
*
- * Note that on configs where tlb_remove_table_sync_one() is a NOP,
+ * Note that on configs where tlb_remove_table_sync_mm() is a NOP,
* the expectation is that the tlb_flush_mmu_tlbonly() would have issued
* required IPIs already for us.
*/
if (tlb->fully_unshared_tables) {
- tlb_remove_table_sync_one();
+ tlb_remove_table_sync_mm(tlb->mm);
tlb->fully_unshared_tables = false;
}
}
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index fa1e57fd2c46..7781d6628649 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -1173,7 +1173,7 @@ static enum scan_result collapse_huge_page(struct
mm_struct *mm, unsigned long a
_pmd = pmdp_collapse_flush(vma, address, pmd);
spin_unlock(pmd_ptl);
mmu_notifier_invalidate_range_end(&range);
- tlb_remove_table_sync_one();
+ tlb_remove_table_sync_mm(mm);
pte = pte_offset_map_lock(mm, &_pmd, address, &pte_ptl);
if (pte) {
diff --git a/mm/mmu_gather.c b/mm/mmu_gather.c
index 35c89e4b6230..76573ec454e5 100644
--- a/mm/mmu_gather.c
+++ b/mm/mmu_gather.c
@@ -378,7 +378,7 @@ static inline void __tlb_remove_table_one_rcu(struct
rcu_head *head)
__tlb_remove_table(ptdesc);
}
-static inline void __tlb_remove_table_one(void *table)
+static inline void __tlb_remove_table_one(struct mmu_gather *tlb, void *table)
{
struct ptdesc *ptdesc;
@@ -386,16 +386,16 @@ static inline void __tlb_remove_table_one(void *table)
call_rcu(&ptdesc->pt_rcu_head, __tlb_remove_table_one_rcu);
}
#else
-static inline void __tlb_remove_table_one(void *table)
+static inline void __tlb_remove_table_one(struct mmu_gather *tlb, void *table)
{
- tlb_remove_table_sync_one();
+ tlb_remove_table_sync_mm(tlb->mm);
__tlb_remove_table(table);
}
#endif /* CONFIG_PT_RECLAIM */
-static void tlb_remove_table_one(void *table)
+static void tlb_remove_table_one(struct mmu_gather *tlb, void *table)
{
- __tlb_remove_table_one(table);
+ __tlb_remove_table_one(tlb, table);
}
static void tlb_table_flush(struct mmu_gather *tlb)
@@ -417,7 +417,7 @@ void tlb_remove_table(struct mmu_gather *tlb, void *table)
*batch = (struct mmu_table_batch *)__get_free_page(GFP_NOWAIT);
if (*batch == NULL) {
tlb_table_invalidate(tlb);
- tlb_remove_table_one(table);
+ tlb_remove_table_one(tlb, table);
return;
}
(*batch)->nr = 0;
--
2.49.0