Guys,

Here's a cut at a patch that cleans up the TASK_SIZE issue.  We now have
TASK_SIZE @ 0xc0000000 for everything except PREP and use PAGE_OFFSET in
SW TLB handlers.  I'm assuming 8xx will get a cleanup patch from
PAGE_OFFSET testing (Dan :).

Also, note that change in head_32.S for 603.  I made the test match with
what we don on 4xx/fsl-booke.  (Haven't tested this on a system yet).

I'll probably break this up into two patches if we all like this any
possible issues in the future are decoupled.

- k

---

diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 3180457..70e1f89 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -599,7 +599,8 @@ config TASK_SIZE_BOOL

 config TASK_SIZE
        hex "Size of user task space" if TASK_SIZE_BOOL
-       default "0x80000000"
+       default "0x80000000" if PPC_PREP
+       default "0xc0000000"

 config CONSISTENT_START_BOOL
        bool "Set custom consistent memory pool address"
diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S
index d83f04e..a5b13ae 100644
--- a/arch/powerpc/kernel/head_32.S
+++ b/arch/powerpc/kernel/head_32.S
@@ -469,12 +469,12 @@ InstructionTLBMiss:
        mfctr   r0
        /* Get PTE (linux-style) and check access */
        mfspr   r3,SPRN_IMISS
-       lis     r1,[EMAIL PROTECTED]            /* check if kernel address */
-       cmplw   0,r3,r1
+       lis     r1,[EMAIL PROTECTED]            /* check if kernel address */
+       cmplw   0,r1,r3
        mfspr   r2,SPRN_SPRG3
        li      r1,_PAGE_USER|_PAGE_PRESENT /* low addresses tested as user */
        lwz     r2,PGDIR(r2)
-       blt+    112f
+       bge-    112f
        mfspr   r2,SPRN_SRR1            /* and MSR_PR bit from SRR1 */
        rlwimi  r1,r2,32-12,29,29       /* shift MSR_PR to _PAGE_USER posn */
        lis     r2,[EMAIL PROTECTED]    /* if kernel address, use */
@@ -543,12 +543,12 @@ DataLoadTLBMiss:
        mfctr   r0
        /* Get PTE (linux-style) and check access */
        mfspr   r3,SPRN_DMISS
-       lis     r1,[EMAIL PROTECTED]            /* check if kernel address */
-       cmplw   0,r3,r1
+       lis     r1,[EMAIL PROTECTED]            /* check if kernel address */
+       cmplw   0,r1,r3
        mfspr   r2,SPRN_SPRG3
        li      r1,_PAGE_USER|_PAGE_PRESENT /* low addresses tested as user */
        lwz     r2,PGDIR(r2)
-       blt+    112f
+       bge-    112f
        mfspr   r2,SPRN_SRR1            /* and MSR_PR bit from SRR1 */
        rlwimi  r1,r2,32-12,29,29       /* shift MSR_PR to _PAGE_USER posn */
        lis     r2,[EMAIL PROTECTED]    /* if kernel address, use */
@@ -615,12 +615,12 @@ DataStoreTLBMiss:
        mfctr   r0
        /* Get PTE (linux-style) and check access */
        mfspr   r3,SPRN_DMISS
-       lis     r1,[EMAIL PROTECTED]            /* check if kernel address */
-       cmplw   0,r3,r1
+       lis     r1,[EMAIL PROTECTED]            /* check if kernel address */
+       cmplw   0,r1,r3
        mfspr   r2,SPRN_SPRG3
        li      r1,_PAGE_RW|_PAGE_USER|_PAGE_PRESENT /* access flags */
        lwz     r2,PGDIR(r2)
-       blt+    112f
+       bge-    112f
        mfspr   r2,SPRN_SRR1            /* and MSR_PR bit from SRR1 */
        rlwimi  r1,r2,32-12,29,29       /* shift MSR_PR to _PAGE_USER posn */
        lis     r2,[EMAIL PROTECTED]    /* if kernel address, use */
diff --git a/arch/powerpc/kernel/head_40x.S b/arch/powerpc/kernel/head_40x.S
index e312824..cfefc2d 100644
--- a/arch/powerpc/kernel/head_40x.S
+++ b/arch/powerpc/kernel/head_40x.S
@@ -289,7 +289,7 @@ label:
        /* If we are faulting a kernel address, we have to use the
         * kernel page tables.
         */
-       lis     r11, [EMAIL PROTECTED]
+       lis     r11, [EMAIL PROTECTED]
        cmplw   r10, r11
        blt+    3f
        lis     r11, [EMAIL PROTECTED]
@@ -481,7 +481,7 @@ label:
        /* If we are faulting a kernel address, we have to use the
         * kernel page tables.
         */
-       lis     r11, [EMAIL PROTECTED]
+       lis     r11, [EMAIL PROTECTED]
        cmplw   r10, r11
        blt+    3f
        lis     r11, [EMAIL PROTECTED]
@@ -581,7 +581,7 @@ label:
        /* If we are faulting a kernel address, we have to use the
         * kernel page tables.
         */
-       lis     r11, [EMAIL PROTECTED]
+       lis     r11, [EMAIL PROTECTED]
        cmplw   r10, r11
        blt+    3f
        lis     r11, [EMAIL PROTECTED]
diff --git a/arch/powerpc/kernel/head_44x.S b/arch/powerpc/kernel/head_44x.S
index 864d63f..409db61 100644
--- a/arch/powerpc/kernel/head_44x.S
+++ b/arch/powerpc/kernel/head_44x.S
@@ -319,7 +319,7 @@ interrupt_base:
        /* If we are faulting a kernel address, we have to use the
         * kernel page tables.
         */
-       lis     r11, [EMAIL PROTECTED]
+       lis     r11, [EMAIL PROTECTED]
        cmplw   r10, r11
        blt+    3f
        lis     r11, [EMAIL PROTECTED]
@@ -458,7 +458,7 @@ interrupt_base:
        /* If we are faulting a kernel address, we have to use the
         * kernel page tables.
         */
-       lis     r11, [EMAIL PROTECTED]
+       lis     r11, [EMAIL PROTECTED]
        cmplw   r10, r11
        blt+    3f
        lis     r11, [EMAIL PROTECTED]
@@ -528,7 +528,7 @@ interrupt_base:
        /* If we are faulting a kernel address, we have to use the
         * kernel page tables.
         */
-       lis     r11, [EMAIL PROTECTED]
+       lis     r11, [EMAIL PROTECTED]
        cmplw   r10, r11
        blt+    3f
        lis     r11, [EMAIL PROTECTED]
diff --git a/arch/powerpc/kernel/head_fsl_booke.S 
b/arch/powerpc/kernel/head_fsl_booke.S
index ee33ddd..4b98227 100644
--- a/arch/powerpc/kernel/head_fsl_booke.S
+++ b/arch/powerpc/kernel/head_fsl_booke.S
@@ -461,8 +461,7 @@ interrupt_base:
        /* If we are faulting a kernel address, we have to use the
         * kernel page tables.
         */
-       lis     r11, [EMAIL PROTECTED]
-       ori     r11, r11, [EMAIL PROTECTED]
+       lis     r11, [EMAIL PROTECTED]
        cmplw   0, r10, r11
        bge     2f

@@ -584,8 +583,7 @@ interrupt_base:
        /* If we are faulting a kernel address, we have to use the
         * kernel page tables.
         */
-       lis     r11, [EMAIL PROTECTED]
-       ori     r11, r11, [EMAIL PROTECTED]
+       lis     r11, [EMAIL PROTECTED]
        cmplw   5, r10, r11
        blt     5, 3f
        lis     r11, [EMAIL PROTECTED]
@@ -645,8 +643,7 @@ interrupt_base:
        /* If we are faulting a kernel address, we have to use the
         * kernel page tables.
         */
-       lis     r11, [EMAIL PROTECTED]
-       ori     r11, r11, [EMAIL PROTECTED]
+       lis     r11, [EMAIL PROTECTED]
        cmplw   5, r10, r11
        blt     5, 3f
        lis     r11, [EMAIL PROTECTED]
@@ -744,7 +741,7 @@ data_access:
  *     r10 - EA of fault
  *     r11 - TLB (info from Linux PTE)
  *     r12, r13 - available to use
- *     CR5 - results of addr < TASK_SIZE
+ *     CR5 - results of addr >= PAGE_OFFSET
  *     MAS0, MAS1 - loaded with proper value when we get here
  *     MAS2, MAS3 - will need additional info from Linux PTE
  *     Upon exit, we reload everything and RFI.
_______________________________________________
Linuxppc-dev mailing list
Linuxppc-dev@ozlabs.org
https://ozlabs.org/mailman/listinfo/linuxppc-dev

Reply via email to