Move r13 load after the call to FIND_PTE, and use r13 instead of
r10 for storing fault address. This will allow using r10 freely
in FIND_PTE in following patch to handle hugepage size.

Signed-off-by: Christophe Leroy <christophe.le...@csgroup.eu>
---
v5: New
---
 arch/powerpc/kernel/head_85xx.S | 30 ++++++++++++++++--------------
 1 file changed, 16 insertions(+), 14 deletions(-)

diff --git a/arch/powerpc/kernel/head_85xx.S b/arch/powerpc/kernel/head_85xx.S
index 282e49c51deb..226f88e77d6d 100644
--- a/arch/powerpc/kernel/head_85xx.S
+++ b/arch/powerpc/kernel/head_85xx.S
@@ -294,9 +294,10 @@ set_ivor:
 /* Macros to hide the PTE size differences
  *
  * FIND_PTE -- walks the page tables given EA & pgdir pointer
- *   r10 -- EA of fault
+ *   r10 -- free
  *   r11 -- PGDIR pointer
  *   r12 -- free
+ *   r13 -- EA of fault
  *   label 2: is the bailout case
  *
  * if we find the pte (fall through):
@@ -307,7 +308,7 @@ set_ivor:
 #ifdef CONFIG_PTE_64BIT
 #ifdef CONFIG_HUGETLB_PAGE
 #define FIND_PTE       \
-       rlwinm  r12, r10, 14, 18, 28;   /* Compute pgdir/pmd offset */  \
+       rlwinm  r12, r13, 14, 18, 28;   /* Compute pgdir/pmd offset */  \
        add     r12, r11, r12;                                          \
        lwz     r11, 4(r12);            /* Get pgd/pmd entry */         \
        rlwinm. r12, r11, 0, 0, 20;     /* Extract pt base address */   \
@@ -317,26 +318,26 @@ set_ivor:
        andi.   r10, r11, HUGEPD_SHIFT_MASK@l; /* extract size field */ \
        xor     r12, r10, r11;          /* drop size bits from pointer */ \
        b       1001f;                                                  \
-1000:  rlwimi  r12, r10, 23, 20, 28;   /* Compute pte address */       \
+1000:  rlwimi  r12, r13, 23, 20, 28;   /* Compute pte address */       \
        li      r10, 0;                 /* clear r10 */                 \
 1001:  lwz     r11, 4(r12);            /* Get pte entry */
 #else
 #define FIND_PTE       \
-       rlwinm  r12, r10, 14, 18, 28;   /* Compute pgdir/pmd offset */  \
+       rlwinm  r12, r13, 14, 18, 28;   /* Compute pgdir/pmd offset */  \
        add     r12, r11, r12;                                          \
        lwz     r11, 4(r12);            /* Get pgd/pmd entry */         \
        rlwinm. r12, r11, 0, 0, 20;     /* Extract pt base address */   \
        beq     2f;                     /* Bail if no table */          \
-       rlwimi  r12, r10, 23, 20, 28;   /* Compute pte address */       \
+       rlwimi  r12, r13, 23, 20, 28;   /* Compute pte address */       \
        lwz     r11, 4(r12);            /* Get pte entry */
 #endif /* HUGEPAGE */
 #else /* !PTE_64BIT */
 #define FIND_PTE       \
-       rlwimi  r11, r10, 12, 20, 29;   /* Create L1 (pgdir/pmd) address */     
\
+       rlwimi  r11, r13, 12, 20, 29;   /* Create L1 (pgdir/pmd) address */     
\
        lwz     r11, 0(r11);            /* Get L1 entry */                      
\
        rlwinm. r12, r11, 0, 0, 19;     /* Extract L2 (pte) base address */     
\
        beq     2f;                     /* Bail if no table */                  
\
-       rlwimi  r12, r10, 22, 20, 29;   /* Compute PTE address */               
\
+       rlwimi  r12, r13, 22, 20, 29;   /* Compute PTE address */               
\
        lwz     r11, 0(r12);            /* Get Linux PTE */
 #endif
 
@@ -443,13 +444,13 @@ START_BTB_FLUSH_SECTION
        BTB_FLUSH(r10)
 1:
 END_BTB_FLUSH_SECTION
-       mfspr   r10, SPRN_DEAR          /* Get faulting address */
+       mfspr   r13, SPRN_DEAR          /* Get faulting address */
 
        /* If we are faulting a kernel address, we have to use the
         * kernel page tables.
         */
        lis     r11, PAGE_OFFSET@h
-       cmplw   5, r10, r11
+       cmplw   5, r13, r11
        blt     5, 3f
        lis     r11, swapper_pg_dir@h
        ori     r11, r11, swapper_pg_dir@l
@@ -472,14 +473,14 @@ END_BTB_FLUSH_SECTION
 #endif
 
 4:
+       FIND_PTE
+
 #ifdef CONFIG_PTE_64BIT
        li      r13,_PAGE_PRESENT|_PAGE_BAP_SR
        oris    r13,r13,_PAGE_ACCESSED@h
 #else
        li      r13,_PAGE_PRESENT|_PAGE_READ|_PAGE_ACCESSED
 #endif
-
-       FIND_PTE
        andc.   r13,r13,r11             /* Check permission */
 
 #ifdef CONFIG_PTE_64BIT
@@ -536,13 +537,13 @@ START_BTB_FLUSH_SECTION
 1:
 END_BTB_FLUSH_SECTION
 
-       mfspr   r10, SPRN_SRR0          /* Get faulting address */
+       mfspr   r13, SPRN_SRR0          /* Get faulting address */
 
        /* If we are faulting a kernel address, we have to use the
         * kernel page tables.
         */
        lis     r11, PAGE_OFFSET@h
-       cmplw   5, r10, r11
+       cmplw   5, r13, r11
        blt     5, 3f
        lis     r11, swapper_pg_dir@h
        ori     r11, r11, swapper_pg_dir@l
@@ -551,6 +552,7 @@ END_BTB_FLUSH_SECTION
        rlwinm  r12,r12,0,16,1
        mtspr   SPRN_MAS1,r12
 
+       FIND_PTE
        /* Make up the required permissions for kernel code */
 #ifdef CONFIG_PTE_64BIT
        li      r13,_PAGE_PRESENT | _PAGE_BAP_SX
@@ -571,6 +573,7 @@ END_BTB_FLUSH_SECTION
        beq     2f                      /* KUAP fault */
 #endif
 
+       FIND_PTE
        /* Make up the required permissions for user code */
 #ifdef CONFIG_PTE_64BIT
        li      r13,_PAGE_PRESENT | _PAGE_BAP_UX
@@ -580,7 +583,6 @@ END_BTB_FLUSH_SECTION
 #endif
 
 4:
-       FIND_PTE
        andc.   r13,r13,r11             /* Check permission */
 
 #ifdef CONFIG_PTE_64BIT
-- 
2.44.0

Reply via email to