Rather than guarding all cpa_flush_range() uses with a CLFLUSH test,
put it inside.

Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
---
 arch/x86/mm/pageattr.c |   15 +++++++--------
 1 file changed, 7 insertions(+), 8 deletions(-)

--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -293,6 +293,11 @@ static void cpa_flush_range(unsigned lon
        BUG_ON(irqs_disabled() && !early_boot_irqs_disabled);
        WARN_ON(PAGE_ALIGN(start) != start);
 
+       if (!static_cpu_has(X86_FEATURE_CLFLUSH)) {
+               cpa_flush_all(cache);
+               return;
+       }
+
        flush_tlb_kernel_range(start, start + PAGE_SIZE * numpages);
 
        if (!cache)
@@ -2075,10 +2080,7 @@ static int __set_memory_enc_dec(unsigned
        /*
         * Before changing the encryption attribute, we need to flush caches.
         */
-       if (static_cpu_has(X86_FEATURE_CLFLUSH))
-               cpa_flush_range(start, numpages, 1);
-       else
-               cpa_flush_all(1);
+       cpa_flush_range(start, numpages, 1);
 
        ret = __change_page_attr_set_clr(&cpa, 1);
 
@@ -2089,10 +2091,7 @@ static int __set_memory_enc_dec(unsigned
         * in case TLB flushing gets optimized in the cpa_flush_range()
         * path use the same logic as above.
         */
-       if (static_cpu_has(X86_FEATURE_CLFLUSH))
-               cpa_flush_range(start, numpages, 0);
-       else
-               cpa_flush_all(0);
+       cpa_flush_range(start, numpages, 0);
 
        return ret;
 }


Reply via email to