From: Yan, Zheng <[email protected]>

Flush the PEBS buffer during context switch if PEBS interrupt threshold
is larger than one. This allows perf to supply TID for sample outputs.

Signed-off-by: Yan, Zheng <[email protected]>
Signed-off-by: Kan Liang <[email protected]>
---
 arch/x86/kernel/cpu/perf_event.h           |  2 ++
 arch/x86/kernel/cpu/perf_event_intel.c     | 11 +++++++-
 arch/x86/kernel/cpu/perf_event_intel_ds.c  | 45 +++++++++++++++++++++++++-----
 arch/x86/kernel/cpu/perf_event_intel_lbr.c |  3 --
 4 files changed, 50 insertions(+), 11 deletions(-)

diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
index 5b677a9..446f21b 100644
--- a/arch/x86/kernel/cpu/perf_event.h
+++ b/arch/x86/kernel/cpu/perf_event.h
@@ -871,6 +871,8 @@ void intel_pmu_pebs_enable_all(void);
 
 void intel_pmu_pebs_disable_all(void);
 
+void intel_pmu_pebs_sched_task(struct perf_event_context *ctx, bool sched_in);
+
 void intel_ds_init(void);
 
 void intel_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in);
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c 
b/arch/x86/kernel/cpu/perf_event_intel.c
index 6c8579a..d647d7e 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -2729,6 +2729,15 @@ static void intel_pmu_cpu_dying(int cpu)
        fini_debug_store_on_cpu(cpu);
 }
 
+static void intel_pmu_sched_task(struct perf_event_context *ctx,
+                                bool sched_in)
+{
+       if (x86_pmu.pebs_active)
+               intel_pmu_pebs_sched_task(ctx, sched_in);
+       if (x86_pmu.lbr_nr)
+               intel_pmu_lbr_sched_task(ctx, sched_in);
+}
+
 PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63");
 
 PMU_FORMAT_ATTR(ldlat, "config1:0-15");
@@ -2780,7 +2789,7 @@ static __initconst const struct x86_pmu intel_pmu = {
        .cpu_starting           = intel_pmu_cpu_starting,
        .cpu_dying              = intel_pmu_cpu_dying,
        .guest_get_msrs         = intel_guest_get_msrs,
-       .sched_task             = intel_pmu_lbr_sched_task,
+       .sched_task             = intel_pmu_sched_task,
 };
 
 static __init void intel_clovertown_quirk(void)
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c 
b/arch/x86/kernel/cpu/perf_event_intel_ds.c
index fafbf97..3d8950a 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_ds.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c
@@ -550,6 +550,19 @@ int intel_pmu_drain_bts_buffer(void)
        return 1;
 }
 
+static inline void intel_pmu_drain_pebs_buffer(void)
+{
+       struct pt_regs regs;
+
+       x86_pmu.drain_pebs(&regs);
+}
+
+void intel_pmu_pebs_sched_task(struct perf_event_context *ctx, bool sched_in)
+{
+       if (!sched_in)
+               intel_pmu_drain_pebs_buffer();
+}
+
 /*
  * PEBS
  */
@@ -704,18 +717,28 @@ void intel_pmu_pebs_enable(struct perf_event *event)
         * When the event is constrained enough we can use a larger
         * threshold and run the event with less frequent PMI.
         */
-       if (0 && /* disable this temporarily */
-           (hwc->flags & PERF_X86_EVENT_AUTO_RELOAD)) {
+       if (hwc->flags & PERF_X86_EVENT_AUTO_RELOAD) {
                threshold = ds->pebs_absolute_maximum -
                        x86_pmu.max_pebs_events * x86_pmu.pebs_record_size;
-       } else {
-               threshold = ds->pebs_buffer_base + x86_pmu.pebs_record_size;
-       }
 
-       /* Use auto-reload if possible to save a MSR write in the PMI */
-       if (hwc->flags & PERF_X86_EVENT_AUTO_RELOAD) {
+               /* Use auto-reload if possible to save a MSR write in the PMI */
                ds->pebs_event_reset[hwc->idx] =
                        (u64)(-hwc->sample_period) & x86_pmu.cntval_mask;
+
+               if (first_pebs)
+                       perf_sched_cb_inc(event->ctx->pmu);
+       } else {
+               threshold = ds->pebs_buffer_base + x86_pmu.pebs_record_size;
+
+               ds->pebs_event_reset[hwc->idx] = 0;
+
+               /*
+                * If not all events can use larger buffer,
+                * roll back to threshold = 1
+                */
+               if (!first_pebs &&
+                   (ds->pebs_interrupt_threshold > threshold))
+                       perf_sched_cb_dec(event->ctx->pmu);
        }
 
        if (first_pebs || ds->pebs_interrupt_threshold > threshold)
@@ -726,6 +749,7 @@ void intel_pmu_pebs_disable(struct perf_event *event)
 {
        struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
        struct hw_perf_event *hwc = &event->hw;
+       struct debug_store *ds = cpuc->ds;
 
        cpuc->pebs_enabled &= ~(1ULL << hwc->idx);
 
@@ -734,6 +758,13 @@ void intel_pmu_pebs_disable(struct perf_event *event)
        else if (event->hw.constraint->flags & PERF_X86_EVENT_PEBS_ST)
                cpuc->pebs_enabled &= ~(1ULL << 63);
 
+       if (ds->pebs_interrupt_threshold >
+           ds->pebs_buffer_base + x86_pmu.pebs_record_size) {
+               intel_pmu_drain_pebs_buffer();
+               if (!pebs_is_enabled(cpuc))
+                       perf_sched_cb_dec(event->ctx->pmu);
+       }
+
        if (cpuc->enabled)
                wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled);
 
diff --git a/arch/x86/kernel/cpu/perf_event_intel_lbr.c 
b/arch/x86/kernel/cpu/perf_event_intel_lbr.c
index 94e5b50..c8a72cc 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_lbr.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_lbr.c
@@ -262,9 +262,6 @@ void intel_pmu_lbr_sched_task(struct perf_event_context 
*ctx, bool sched_in)
        struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
        struct x86_perf_task_context *task_ctx;
 
-       if (!x86_pmu.lbr_nr)
-               return;
-
        /*
         * If LBR callstack feature is enabled and the stack was saved when
         * the task was scheduled out, restore the stack. Otherwise flush
-- 
1.7.11.7

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [email protected]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to