Resulting code (8xx with 16 bytes per cacheline and 16k pages)

0000016c <__flush_dcache_icache_phys>:
 16c:   54 63 00 22     rlwinm  r3,r3,0,0,17
 170:   7d 20 00 a6     mfmsr   r9
 174:   39 40 04 00     li      r10,1024
 178:   55 28 07 34     rlwinm  r8,r9,0,28,26
 17c:   7c 67 1b 78     mr      r7,r3
 180:   7d 49 03 a6     mtctr   r10
 184:   7d 00 01 24     mtmsr   r8
 188:   4c 00 01 2c     isync
 18c:   7c 00 18 6c     dcbst   0,r3
 190:   38 63 00 10     addi    r3,r3,16
 194:   42 00 ff f8     bdnz    18c <__flush_dcache_icache_phys+0x20>
 198:   7c 00 04 ac     hwsync
 19c:   7d 49 03 a6     mtctr   r10
 1a0:   7c 00 3f ac     icbi    0,r7
 1a4:   38 e7 00 10     addi    r7,r7,16
 1a8:   42 00 ff f8     bdnz    1a0 <__flush_dcache_icache_phys+0x34>
 1ac:   7c 00 04 ac     hwsync
 1b0:   7d 20 01 24     mtmsr   r9
 1b4:   4c 00 01 2c     isync
 1b8:   4e 80 00 20     blr

Signed-off-by: Christophe Leroy <christophe.le...@c-s.fr>
---
 This patch is on top of Alastair's series "powerpc: convert cache asm to C"
 Patch 3 of that series should touch __flush_dcache_icache_phys and this
 patch could come just after patch 3.

 arch/powerpc/include/asm/cacheflush.h |  8 +++++
 arch/powerpc/mm/mem.c                 | 55 ++++++++++++++++++++++++++++-------
 2 files changed, 53 insertions(+), 10 deletions(-)

diff --git a/arch/powerpc/include/asm/cacheflush.h 
b/arch/powerpc/include/asm/cacheflush.h
index 1826bf2cc137..bf4f2dc4eb76 100644
--- a/arch/powerpc/include/asm/cacheflush.h
+++ b/arch/powerpc/include/asm/cacheflush.h
@@ -47,6 +47,14 @@ void flush_icache_user_range(struct vm_area_struct *vma,
                                    struct page *page, unsigned long addr,
                                    int len);
 void flush_dcache_icache_page(struct page *page);
+#if defined(CONFIG_PPC32) && !defined(CONFIG_BOOKE)
+void __flush_dcache_icache_phys(unsigned long physaddr);
+#else
+static inline void __flush_dcache_icache_phys(unsigned long physaddr)
+{
+       BUG();
+}
+#endif
 
 /**
  * flush_dcache_range(): Write any modified data cache blocks out to memory 
and invalidate them.
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 43be99de7c9a..43009f9227c4 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -402,6 +402,50 @@ void flush_dcache_page(struct page *page)
 }
 EXPORT_SYMBOL(flush_dcache_page);
 
+#if defined(CONFIG_PPC32) && !defined(CONFIG_BOOKE)
+void __flush_dcache_icache_phys(unsigned long physaddr)
+{
+       unsigned long bytes = l1_dcache_bytes();
+       unsigned long nb = PAGE_SIZE / bytes;
+       unsigned long addr = physaddr & PAGE_MASK;
+       unsigned long msr, msr0;
+       unsigned long loop1 = addr, loop2 = addr;
+
+       if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) {
+               /* For a snooping icache, we still need a dummy icbi to purge 
all the
+                * prefetched instructions from the ifetch buffers. We also 
need a sync
+                * before the icbi to order the the actual stores to memory 
that might
+                * have modified instructions with the icbi.
+                */
+               mb(); /* sync */
+               icbi((void *)addr);
+               mb(); /* sync */
+               isync();
+               return;
+       }
+       msr0 = mfmsr();
+       msr = msr0 & ~MSR_DR;
+       asm volatile(
+           "   mtctr %2;"
+           "   mtmsr %3;"
+           "   isync;"
+           "0: dcbst   0, %0;"
+           "   addi    %0, %0, %4;"
+           "   bdnz    0b;"
+           "   sync;"
+           "   mtctr %2;"
+           "1: icbi    0, %1;"
+           "   addi    %1, %1, %4;"
+           "   bdnz    1b;"
+           "   sync;"
+           "   mtmsr %5;"
+           "   isync;"
+           : "+r" (loop1), "+r" (loop2)
+           : "r" (nb), "r" (msr), "i" (bytes), "r" (msr0)
+           : "ctr", "memory");
+}
+#endif
+
 void flush_dcache_icache_page(struct page *page)
 {
 #ifdef CONFIG_HUGETLB_PAGE
@@ -419,16 +463,7 @@ void flush_dcache_icache_page(struct page *page)
                __flush_dcache_icache(start);
                kunmap_atomic(start);
        } else {
-               unsigned long msr = mfmsr();
-
-               /* Clear the DR bit so that we operate on physical
-                * rather than virtual addresses
-                */
-               mtmsr(msr & ~(MSR_DR));
-
-               __flush_dcache_icache((void *)physaddr);
-
-               mtmsr(msr);
+               __flush_dcache_icache_phys(page_to_pfn(page) << PAGE_SHIFT);
        }
 #endif
 }
-- 
2.13.3

Reply via email to