Reorder Linux PTE bits to (almost) match Hash PTE bits.

RW Kernel : PP = 00
RO Kernel : PP = 00
RW User   : PP = 01
RO User   : PP = 11

So naturally, we should have
_PAGE_USER = 0x001
_PAGE_RW   = 0x002

Today 0x001 and 0x002 and _PAGE_PRESENT and _PAGE_HASHPTE which
both are software only bits.

Switch _PAGE_USER and _PAGE_PRESET
Switch _PAGE_RW and _PAGE_HASHPTE

This allows to remove a few insns.

Signed-off-by: Christophe Leroy <christophe.le...@c-s.fr>
---
v2: rebased on today's powerpc/merge
---
 arch/powerpc/include/asm/book3s/32/hash.h |  8 ++++----
 arch/powerpc/kernel/head_32.S             |  9 +++------
 arch/powerpc/mm/book3s32/hash_low.S       | 14 ++++++--------
 3 files changed, 13 insertions(+), 18 deletions(-)

diff --git a/arch/powerpc/include/asm/book3s/32/hash.h 
b/arch/powerpc/include/asm/book3s/32/hash.h
index 2a0a467d2985..34a7215ae81e 100644
--- a/arch/powerpc/include/asm/book3s/32/hash.h
+++ b/arch/powerpc/include/asm/book3s/32/hash.h
@@ -17,9 +17,9 @@
  * updating the accessed and modified bits in the page table tree.
  */
 
-#define _PAGE_PRESENT  0x001   /* software: pte contains a translation */
-#define _PAGE_HASHPTE  0x002   /* hash_page has made an HPTE for this pte */
-#define _PAGE_USER     0x004   /* usermode access allowed */
+#define _PAGE_USER     0x001   /* usermode access allowed */
+#define _PAGE_RW       0x002   /* software: user write access allowed */
+#define _PAGE_PRESENT  0x004   /* software: pte contains a translation */
 #define _PAGE_GUARDED  0x008   /* G: prohibit speculative access */
 #define _PAGE_COHERENT 0x010   /* M: enforce memory coherence (SMP systems) */
 #define _PAGE_NO_CACHE 0x020   /* I: cache inhibit */
@@ -27,7 +27,7 @@
 #define _PAGE_DIRTY    0x080   /* C: page changed */
 #define _PAGE_ACCESSED 0x100   /* R: page referenced */
 #define _PAGE_EXEC     0x200   /* software: exec allowed */
-#define _PAGE_RW       0x400   /* software: user write access allowed */
+#define _PAGE_HASHPTE  0x400   /* hash_page has made an HPTE for this pte */
 #define _PAGE_SPECIAL  0x800   /* software: Special page */
 
 #ifdef CONFIG_PTE_64BIT
diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S
index 0493fcac6409..1587a754f061 100644
--- a/arch/powerpc/kernel/head_32.S
+++ b/arch/powerpc/kernel/head_32.S
@@ -310,7 +310,7 @@ BEGIN_MMU_FTR_SECTION
        andis.  r0, r5, (DSISR_BAD_FAULT_32S | DSISR_DABRMATCH)@h
 #endif
        bne     handle_page_fault_tramp_2       /* if not, try to put a PTE */
-       rlwinm  r3, r5, 32 - 15, 21, 21         /* DSISR_STORE -> _PAGE_RW */
+       rlwinm  r3, r5, 32 - 24, 30, 30         /* DSISR_STORE -> _PAGE_RW */
        bl      hash_page
        b       handle_page_fault_tramp_1
 FTR_SECTION_ELSE
@@ -437,7 +437,6 @@ InstructionTLBMiss:
        andc.   r1,r1,r0                /* check access & ~permission */
        bne-    InstructionAddressInvalid /* return if access not permitted */
        /* Convert linux-style PTE to low word of PPC-style PTE */
-       rlwimi  r0,r0,32-2,31,31        /* _PAGE_USER -> PP lsb */
        ori     r1, r1, 0xe06           /* clear out reserved bits */
        andc    r1, r0, r1              /* PP = user? 1 : 0 */
 BEGIN_FTR_SECTION
@@ -505,9 +504,8 @@ DataLoadTLBMiss:
         * we would need to update the pte atomically with lwarx/stwcx.
         */
        /* Convert linux-style PTE to low word of PPC-style PTE */
-       rlwinm  r1,r0,32-9,30,30        /* _PAGE_RW -> PP msb */
-       rlwimi  r0,r0,32-1,30,30        /* _PAGE_USER -> PP msb */
-       rlwimi  r0,r0,32-1,31,31        /* _PAGE_USER -> PP lsb */
+       rlwinm  r1,r0,0,30,30           /* _PAGE_RW -> PP msb */
+       rlwimi  r0,r0,1,30,30           /* _PAGE_USER -> PP msb */
        ori     r1,r1,0xe04             /* clear out reserved bits */
        andc    r1,r0,r1                /* PP = user? rw? 1: 3: 0 */
 BEGIN_FTR_SECTION
@@ -585,7 +583,6 @@ DataStoreTLBMiss:
         * we would need to update the pte atomically with lwarx/stwcx.
         */
        /* Convert linux-style PTE to low word of PPC-style PTE */
-       rlwimi  r0,r0,32-2,31,31        /* _PAGE_USER -> PP lsb */
        li      r1,0xe06                /* clear out reserved bits & PP msb */
        andc    r1,r0,r1                /* PP = user? 1: 0 */
 BEGIN_FTR_SECTION
diff --git a/arch/powerpc/mm/book3s32/hash_low.S 
b/arch/powerpc/mm/book3s32/hash_low.S
index c11b0a005196..18fdabde5007 100644
--- a/arch/powerpc/mm/book3s32/hash_low.S
+++ b/arch/powerpc/mm/book3s32/hash_low.S
@@ -41,7 +41,7 @@ mmu_hash_lock:
 /*
  * Load a PTE into the hash table, if possible.
  * The address is in r4, and r3 contains an access flag:
- * _PAGE_RW (0x400) if a write.
+ * _PAGE_RW (0x002) if a write.
  * r9 contains the SRR1 value, from which we use the MSR_PR bit.
  * SPRG_THREAD contains the physical address of the current task's thread.
  *
@@ -78,7 +78,7 @@ _GLOBAL(hash_page)
        blt+    112f                    /* assume user more likely */
        lis     r5, (swapper_pg_dir - ADDR_OFFSET)@ha   /* if kernel address, 
use */
        addi    r5 ,r5 ,(swapper_pg_dir - ADDR_OFFSET)@l        /* kernel page 
table */
-       rlwimi  r3,r9,32-12,29,29       /* MSR_PR -> _PAGE_USER */
+       rlwimi  r3,r9,32-14,31,31       /* MSR_PR -> _PAGE_USER */
 112:
 #ifndef CONFIG_PTE_64BIT
        rlwimi  r5,r4,12,20,29          /* insert top 10 bits of address */
@@ -106,7 +106,7 @@ _GLOBAL(hash_page)
 #else
        rlwimi  r8,r4,23,20,28          /* compute pte address */
 #endif
-       rlwinm  r0,r3,32-3,24,24        /* _PAGE_RW access -> _PAGE_DIRTY */
+       rlwinm  r0,r3,6,24,24           /* _PAGE_RW access -> _PAGE_DIRTY */
        ori     r0,r0,_PAGE_ACCESSED|_PAGE_HASHPTE
 
        /*
@@ -316,11 +316,9 @@ Hash_msk = (((1 << Hash_bits) - 1) * 64)
 
 _GLOBAL(create_hpte)
        /* Convert linux-style PTE (r5) to low word of PPC-style PTE (r8) */
-       rlwinm  r8,r5,32-9,30,30        /* _PAGE_RW -> PP msb */
        rlwinm  r0,r5,32-6,30,30        /* _PAGE_DIRTY -> PP msb */
-       and     r8,r8,r0                /* writable if _RW & _DIRTY */
-       rlwimi  r5,r5,32-1,30,30        /* _PAGE_USER -> PP msb */
-       rlwimi  r5,r5,32-2,31,31        /* _PAGE_USER -> PP lsb */
+       and     r8,r5,r0                /* writable if _RW & _DIRTY */
+       rlwimi  r5,r5,1,30,30           /* _PAGE_USER -> PP msb */
        ori     r8,r8,0xe04             /* clear out reserved bits */
        andc    r8,r5,r8                /* PP = user? (rw&dirty? 1: 3): 0 */
 BEGIN_FTR_SECTION
@@ -572,7 +570,7 @@ _GLOBAL(flush_hash_pages)
 33:    lwarx   r8,0,r5                 /* fetch the pte flags word */
        andi.   r0,r8,_PAGE_HASHPTE
        beq     8f                      /* done if HASHPTE is already clear */
-       rlwinm  r8,r8,0,31,29           /* clear HASHPTE bit */
+       rlwinm  r8,r8,0,~_PAGE_HASHPTE  /* clear HASHPTE bit */
        stwcx.  r8,0,r5                 /* update the pte */
        bne-    33b
 
-- 
2.25.0

Reply via email to