On 02/03/2014 08:55 PM, Stephane Eranian wrote: > This patch is needed because that PMU uses 32-bit free > running counters with no interrupt capabilities. > > On SNB/IVB/HSW, we used 20GB/s theoretical peak to calculate > the hrtimer timeout necessary to avoid missing an overflow. > That delay is set to 5s to be on the cautious side. > > The SNB IMC uses free running counters, which are handled > via pseudo fixed counters. The SNB IMC PMU implementation > supports an arbitrary number of events, because the counters > are read-only. Therefore it is not possible to track active > counters. Instead we put active events on a linked list which > is then used by the hrtimer handler to update the SW counts. > > Signed-off-by: Stephane Eranian <eran...@google.com> > --- > arch/x86/kernel/cpu/perf_event_intel_uncore.c | 12 ++++++++++++ > arch/x86/kernel/cpu/perf_event_intel_uncore.h | 2 ++ > 2 files changed, 14 insertions(+) > > diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c > b/arch/x86/kernel/cpu/perf_event_intel_uncore.c > index 8b1f81f..f76937e 100644 > --- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c > +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c > @@ -1730,6 +1730,7 @@ static void snb_uncore_imc_init_box(struct > intel_uncore_box *box) > addr &= ~(PAGE_SIZE - 1); > > box->io_addr = ioremap(addr, SNB_UNCORE_PCI_IMC_MAP_SIZE); > + box->hrtimer_duration = UNCORE_SNB_IMC_HRTIMER_INTERVAL; > } > > static void snb_uncore_imc_enable_box(struct intel_uncore_box *box) > @@ -3166,6 +3167,7 @@ static void uncore_perf_event_update(struct > intel_uncore_box *box, struct perf_e > static enum hrtimer_restart uncore_pmu_hrtimer(struct hrtimer *hrtimer) > { > struct intel_uncore_box *box; > + struct perf_event *event; > unsigned long flags; > int bit; > > @@ -3178,6 +3180,14 @@ static enum hrtimer_restart uncore_pmu_hrtimer(struct > hrtimer *hrtimer) > */ > local_irq_save(flags); > > + /* > + * handle boxes with an active event list as opposed to active > + * counters > + */ > + list_for_each_entry(event, &box->active_list, active_entry) { > + uncore_perf_event_update(box, event); > + } > + > for_each_set_bit(bit, box->active_mask, UNCORE_PMC_IDX_MAX) > uncore_perf_event_update(box, box->events[bit]); > > @@ -3227,6 +3237,8 @@ static struct intel_uncore_box *uncore_alloc_box(struct > intel_uncore_type *type, > /* set default hrtimer timeout */ > box->hrtimer_duration = UNCORE_PMU_HRTIMER_INTERVAL; > > + INIT_LIST_HEAD(&box->active_list); > + > return box; > } > > diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h > b/arch/x86/kernel/cpu/perf_event_intel_uncore.h > index 0770da2..634de93 100644 > --- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h > +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h > @@ -6,6 +6,7 @@ > > #define UNCORE_PMU_NAME_LEN 32 > #define UNCORE_PMU_HRTIMER_INTERVAL (60LL * NSEC_PER_SEC) > +#define UNCORE_SNB_IMC_HRTIMER_INTERVAL (5ULL * NSEC_PER_SEC) > > #define UNCORE_FIXED_EVENT 0xff > #define UNCORE_PMC_IDX_MAX_GENERIC 8 > @@ -492,6 +493,7 @@ struct intel_uncore_box { > u64 hrtimer_duration; /* hrtimer timeout for this box */ > struct hrtimer hrtimer; > struct list_head list; > + struct list_head active_list;
I think patch 8 and patch 9 are disordered Regards Yan, Zheng > void *io_addr; > struct intel_uncore_extra_reg shared_regs[0]; > }; > -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majord...@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/