Commit-ID:  c6185b1f21a47af94617fde3af7e803817b522a9
Gitweb:     https://git.kernel.org/tip/c6185b1f21a47af94617fde3af7e803817b522a9
Author:     Peter Zijlstra <[email protected]>
AuthorDate: Wed, 19 Sep 2018 10:50:17 +0200
Committer:  Thomas Gleixner <[email protected]>
CommitDate: Thu, 27 Sep 2018 20:39:40 +0200

x86/mm/cpa: Use flush_tlb_all()

Instead of open-coding it..

Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Signed-off-by: Thomas Gleixner <[email protected]>
Reviewed-by: Dave Hansen <[email protected]>
Cc: Bin Yang <[email protected]>
Cc: Mark Gross <[email protected]>
Link: https://lkml.kernel.org/r/[email protected]

---
 arch/x86/mm/pageattr.c | 12 +-----------
 1 file changed, 1 insertion(+), 11 deletions(-)

diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 4e55ded01be5..a22f6b71a308 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -285,16 +285,6 @@ static void cpa_flush_all(unsigned long cache)
        on_each_cpu(__cpa_flush_all, (void *) cache, 1);
 }
 
-static void __cpa_flush_range(void *arg)
-{
-       /*
-        * We could optimize that further and do individual per page
-        * tlb invalidates for a low number of pages. Caveat: we must
-        * flush the high aliases on 64bit as well.
-        */
-       __flush_tlb_all();
-}
-
 static void cpa_flush_range(unsigned long start, int numpages, int cache)
 {
        unsigned int i, level;
@@ -303,7 +293,7 @@ static void cpa_flush_range(unsigned long start, int 
numpages, int cache)
        BUG_ON(irqs_disabled() && !early_boot_irqs_disabled);
        WARN_ON(PAGE_ALIGN(start) != start);
 
-       on_each_cpu(__cpa_flush_range, NULL, 1);
+       flush_tlb_all();
 
        if (!cache)
                return;

Reply via email to