With the exception of handling 'empty' buffers, I ended up with the
below. Please try again.


There are two small errors. After fixing them, the patch works well.

---
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -1156,16 +1156,13 @@ int x86_perf_event_set_period(struct per
per_cpu(pmc_prev_left[idx], smp_processor_id()) = left; - if (!(hwc->flags & PERF_X86_EVENT_AUTO_RELOAD) ||
-           local64_read(&hwc->prev_count) != (u64)-left) {
-               /*
-                * The hw event starts counting from this event offset,
-                * mark it to be able to extra future deltas:
-                */
-               local64_set(&hwc->prev_count, (u64)-left);
+       /*
+        * The hw event starts counting from this event offset,
+        * mark it to be able to extra future deltas:
+        */
+       local64_set(&hwc->prev_count, (u64)-left);
- wrmsrl(hwc->event_base, (u64)(-left) & x86_pmu.cntval_mask);
-       }
+       wrmsrl(hwc->event_base, (u64)(-left) & x86_pmu.cntval_mask);
/*
         * Due to erratum on certan cpu we need
--- a/arch/x86/events/intel/ds.c
+++ b/arch/x86/events/intel/ds.c
@@ -1306,17 +1306,89 @@ get_next_pebs_record_by_bit(void *base,
        return NULL;
  }
+/*
+ * Special variant of intel_pmu_save_and_restart() for auto-reload.
+ */
+static int
+intel_pmu_save_and_restart_reload(struct perf_event *event, int count)
+{
+       struct hw_perf_event *hwc = &event->hw;
+       int shift = 64 - x86_pmu.cntval_bits;
+       u64 period = hwc->sample_period;
+       u64 prev_raw_count, new_raw_count;
+       u64 delta;
+
+       WARN_ON(!period);
+
+       /*
+        * drain_pebs() only happens when the PMU is disabled.
+        */
+       WARN_ON(this_cpu_read(cpu_hw_events.enabled));
+
+       prev_raw_count = local64_read(&hwc->prev_count);
+       rdpmcl(hwc->event_base_rdpmc, new_raw_count);
+       local64_set(&hwx->prev_count, new_raw_count);

Here is a typo. It should be &hwc->prev_count.

+
+       /*
+        * Careful, not all hw sign-extends above the physical width
+        * of the counter.
+        */
+       delta = (new_raw_count << shift) - (prev_raw_count << shift);
+       delta >>= shift;

new_raw_count could be smaller than prev_raw_count.
The sign bit will be set. The delta>> could be wrong.

I think we can add a period here to prevent it.
+       delta = (period << shift) + (new_raw_count << shift) -
+               (prev_raw_count << shift);
+       delta >>= shift;
......
+       local64_add(delta + period * (count - 1), &event->count);


Thanks,
Kan
+
+       /*
+        * Since the counter increments a negative counter value and
+        * overflows on the sign switch, giving the interval:
+        *
+        *   [-period, 0]
+        *
+        * the difference between two consequtive reads is:
+        *
+        *   A) value2 - value1;
+        *      when no overflows have happened in between,
+        *
+        *   B) (0 - value1) + (value2 - (-period));
+        *      when one overflow happened in between,
+        *
+        *   C) (0 - value1) + (n - 1) * (period) + (value2 - (-period));
+        *      when @n overflows happened in between.
+        *
+        * Here A) is the obvious difference, B) is the extention to the
+        * discrete interval, where the first term is to the top of the
+        * interval and the second term is from the bottom of the next
+        * interval and 3) the extention to multiple intervals, where the
+        * middle term is the whole intervals covered.
+        *
+        * An equivalent of C, by reduction, is:
+        *
+        *   value2 - value1 + n * period
+        */
+       local64_add(delta + period * count, &event->count);
+
+       perf_event_update_userpage(event);
+
+       return 0;
+}
+
  static void __intel_pmu_pebs_event(struct perf_event *event,
                                   struct pt_regs *iregs,
                                   void *base, void *top,
                                   int bit, int count)
  {
+       struct hw_perf_event *hwc = &event->hw;
        struct perf_sample_data data;
        struct pt_regs regs;
        void *at = get_next_pebs_record_by_bit(base, top, bit);
- if (!intel_pmu_save_and_restart(event) &&
-           !(event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD))
+       if (hwc->flags & PERF_X86_EVENT_AUTO_RELOAD) {
+               /*
+                * Now, auto-reload is only enabled in fixed period mode.
+                * The reload value is always hwc->sample_period.
+                * May need to change it, if auto-reload is enabled in
+                * freq mode later.
+                */
+               intel_pmu_save_and_restart_reload(event, count);
+       } else if (!intel_pmu_save_and_restart(event))
                return;
while (count > 1) {

Reply via email to