From: Anton Blanchard <[email protected]>

                   -------------------
    This is a commit scheduled for the next v2.6.34 longterm release.
    If you see a problem with using this for longterm, please comment.
                   -------------------

commit 0837e3242c73566fc1c0196b4ec61779c25ffc93 upstream.

Events on POWER7 can roll back if a speculative event doesn't
eventually complete. Unfortunately in some rare cases they will
raise a performance monitor exception. We need to catch this to
ensure we reset the PMC. In all cases the PMC will be 256 or less
cycles from overflow.

Signed-off-by: Anton Blanchard <[email protected]>
Signed-off-by: Peter Zijlstra <[email protected]>
LKML-Reference: <20110309143842.6c22845e@kryten>
Signed-off-by: Ingo Molnar <[email protected]>
Signed-off-by: Paul Gortmaker <[email protected]>
---
 arch/powerpc/include/asm/reg.h   |    1 +
 arch/powerpc/kernel/perf_event.c |   24 +++++++++++++++++++++++-
 2 files changed, 24 insertions(+), 1 deletions(-)

diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index 5572e86..f2ac5a6 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -864,6 +864,7 @@
 #define PV_970         0x0039
 #define PV_POWER5      0x003A
 #define PV_POWER5p     0x003B
+#define PV_POWER7      0x003F
 #define PV_970FX       0x003C
 #define PV_630         0x0040
 #define PV_630p        0x0041
diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c
index 08460a2..7a3b2d2 100644
--- a/arch/powerpc/kernel/perf_event.c
+++ b/arch/powerpc/kernel/perf_event.c
@@ -1220,6 +1220,28 @@ unsigned long perf_instruction_pointer(struct pt_regs 
*regs)
        return ip;
 }
 
+static bool pmc_overflow(unsigned long val)
+{
+       if ((int)val < 0)
+               return true;
+
+       /*
+        * Events on POWER7 can roll back if a speculative event doesn't
+        * eventually complete. Unfortunately in some rare cases they will
+        * raise a performance monitor exception. We need to catch this to
+        * ensure we reset the PMC. In all cases the PMC will be 256 or less
+        * cycles from overflow.
+        *
+        * We only do this if the first pass fails to find any overflowing
+        * PMCs because a user might set a period of less than 256 and we
+        * don't want to mistakenly reset them.
+        */
+       if (__is_processor(PV_POWER7) && ((0x80000000 - val) <= 256))
+               return true;
+
+       return false;
+}
+
 /*
  * Performance monitor interrupt stuff
  */
@@ -1267,7 +1289,7 @@ static void perf_event_interrupt(struct pt_regs *regs)
                        if (is_limited_pmc(i + 1))
                                continue;
                        val = read_pmc(i + 1);
-                       if ((int)val < 0)
+                       if (pmc_overflow(val))
                                write_pmc(i + 1, 0);
                }
        }
-- 
1.7.4.4

_______________________________________________
stable mailing list
[email protected]
http://linux.kernel.org/mailman/listinfo/stable

Reply via email to