From: Sangeetha Rao <sangeetha....@intel.com> Signed-off-by: Sangeetha Rao <sangeetha....@intel.com> Signed-off-by: Bruce Ashfield <bruce.ashfi...@windriver.com> --- arch/powerpc/kernel/head_44x.S | 41 ++++++++++++++++++++++++++++++----- arch/powerpc/kernel/traps.c | 47 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 82 insertions(+), 6 deletions(-)
diff --git a/arch/powerpc/kernel/head_44x.S b/arch/powerpc/kernel/head_44x.S index 52c109f..0e4de2a 100644 --- a/arch/powerpc/kernel/head_44x.S +++ b/arch/powerpc/kernel/head_44x.S @@ -627,8 +627,8 @@ finish_tlb_load_44x: andc. r13,r13,r12 /* Check permission */ - /* Jump to common tlb load */ - beq finish_tlb_load_47x + /* Jump to tlb data load */ + beq finish_tlb_load_data_47x 2: /* The bailout. Restore registers to pre-exception conditions * and call the heavyweights to help us out. @@ -647,6 +647,7 @@ finish_tlb_load_44x: * information from different registers and bailout * to a different point. */ + .globl InstructionTLBError47x START_EXCEPTION(InstructionTLBError47x) mtspr SPRN_SPRG_WSCRATCH0,r10 /* Save some working registers */ mtspr SPRN_SPRG_WSCRATCH1,r11 @@ -710,8 +711,8 @@ finish_tlb_load_44x: andc. r13,r13,r12 /* Check permission */ - /* Jump to common TLB load point */ - beq finish_tlb_load_47x + /* Jump to TLB instruction load point */ + beq finish_tlb_load_instruction_47x 2: /* The bailout. Restore registers to pre-exception conditions * and call the heavyweights to help us out. @@ -725,7 +726,8 @@ finish_tlb_load_44x: b InstructionStorage /* - * Both the instruction and data TLB miss get to this + * The instruction data TLB miss gets to finish_tlb_load_instruction_47x + * and data TLB miss gets to finish_tlb_load_data_47x * point to load the TLB. * r10 - free to use * r11 - PTE high word value @@ -734,7 +736,34 @@ finish_tlb_load_44x: * MMUCR - loaded with proper value when we get here * Upon exit, we reload everything and RFI. */ -finish_tlb_load_47x: +finish_tlb_load_data_47x: + /* Combine RPN & ERPN an write WS 1 */ + rlwimi r11,r12,0,0,31-PAGE_SHIFT + tlbwe r11,r13,1 + + /* And make up word 2 */ + li r10,0xf85 /* Mask to apply from PTE */ + rlwimi r10,r12,29,30,30 /* DIRTY -> SW position */ + and r11,r12,r10 /* Mask PTE bits to keep */ + andi. r10,r12,_PAGE_USER /* User page ? */ + beq 1f /* nope, leave U bits empty */ + rlwimi r11,r11,3,26,28 /* yes, copy S bits to U */ +1: tlbwe r11,r13,2 + + mfspr r10,SPRN_DEAR + dcbt 0,r10 + + /* Done...restore registers and get out of here. + */ + mfspr r11, SPRN_SPRG_RSCRATCH4 + mtcr r11 + mfspr r13, SPRN_SPRG_RSCRATCH3 + mfspr r12, SPRN_SPRG_RSCRATCH2 + mfspr r11, SPRN_SPRG_RSCRATCH1 + mfspr r10, SPRN_SPRG_RSCRATCH0 + rfi + .globl finish_tlb_load_instruction_47x +finish_tlb_load_instruction_47x: /* Combine RPN & ERPN an write WS 1 */ rlwimi r11,r12,0,0,31-PAGE_SHIFT tlbwe r11,r13,1 diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index b36da44..de5d18f 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -36,6 +36,10 @@ #include <linux/debugfs.h> #include <linux/ratelimit.h> +#include <asm/mmu.h> +#include <asm/page.h> +#include <asm/dcr-native.h> + #include <asm/emulated_ops.h> #include <asm/pgtable.h> #include <asm/uaccess.h> @@ -656,12 +660,55 @@ int machine_check_generic(struct pt_regs *regs) } #endif /* everything else */ +extern unsigned int finish_tlb_load_instruction_47x; +extern unsigned int InstructionTLBError47x; + void machine_check_exception(struct pt_regs *regs) { int recover = 0; + unsigned int cpu; + unsigned int p2a_status; + unsigned int p2a_address; + unsigned int l2plbstats1; + unsigned int mcsr; + unsigned int mcsr0; __get_cpu_var(irq_stat).mce_exceptions++; + mcsr = mfspr(SPRN_MCSR); + mcsr0 = mfspr(SPRN_MCSRR0); + p2a_status = mfdcr(0x100C) & 0x00000010; + p2a_address = mfdcr(0x1045); + p2a_address = (p2a_address << 4) & 0x000FFFF0; + cpu = smp_processor_id(); + if (cpu < 4) { + mtdcr((cpu + 3) * 256, 0x304); + l2plbstats1 = mfdcr((cpu + 3) * 256 + 4); + } else { + mtdcr((cpu + 9)*256, 0x304); + l2plbstats1 = mfdcr((cpu + 9) * 256 + 4); + } + + if (((mcsr & 0x80200000) == 0x80200000) && + (InstructionTLBError47x < mcsr0) && + (mcsr0 < finish_tlb_load_instruction_47x) && + (p2a_status == 0x00000010) && + (l2plbstats1 == 0x000C0000)) { + if (cpu < 4) { + mtdcr((cpu + 3) * 256 + 4, + 0x000C0000); + } else { + mtdcr((cpu + 9) * 256 + 4, + 0x000C0000); + } + printk(KERN_INFO "machine_check_exception: Core %d: MCSR=0x%x l2plbstats1=0x%x", + cpu, mcsr, l2plbstats1); + mtdcr(0x100C, 0xFFFFFFFF); + mtspr(SPRN_MCSR, 0); + recover = 1; + return; + } + /* See if any machine dependent calls. In theory, we would want * to call the CPU first, and call the ppc_md. one if the CPU * one returns a positive number. However there is existing code -- 1.7.9.5 -- _______________________________________________ linux-yocto mailing list linux-yocto@yoctoproject.org https://lists.yoctoproject.org/listinfo/linux-yocto