On Mon, Aug 26, 2019 at 07:47:34AM -0700, [email protected] wrote:

> diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
> index 1b2c37ed49db..f4d6335a18e2 100644
> --- a/arch/x86/events/intel/core.c
> +++ b/arch/x86/events/intel/core.c
> @@ -2131,7 +2131,7 @@ static inline void intel_pmu_ack_status(u64 ack)
>  
>  static void intel_pmu_disable_fixed(struct hw_perf_event *hwc)
>  {
> -     int idx = hwc->idx - INTEL_PMC_IDX_FIXED;
> +     int idx = get_reg_idx(hwc->idx) - INTEL_PMC_IDX_FIXED;
>       u64 ctrl_val, mask;
>  
>       mask = 0xfULL << (idx * 4);
> @@ -2150,6 +2150,7 @@ static void intel_pmu_disable_event(struct perf_event 
> *event)
>  {
>       struct hw_perf_event *hwc = &event->hw;
>       struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
> +     int reg_idx = get_reg_idx(hwc->idx);
>  
>       if (unlikely(hwc->idx == INTEL_PMC_IDX_FIXED_BTS)) {
>               intel_pmu_disable_bts();

It is unfortunate we need that in both cases; and note how the
inconsitent naming.

> @@ -2157,9 +2158,16 @@ static void intel_pmu_disable_event(struct perf_event 
> *event)
>               return;
>       }
>  
> -     cpuc->intel_ctrl_guest_mask &= ~(1ull << hwc->idx);
> -     cpuc->intel_ctrl_host_mask &= ~(1ull << hwc->idx);
> -     cpuc->intel_cp_status &= ~(1ull << hwc->idx);
> +     /*
> +      * When any other topdown events are still enabled,
> +      * cancel the disabling.
> +      */
> +     if (has_other_topdown_event(cpuc->active_mask, hwc->idx))
> +             return;

And this includes a 3rd instance of that check :/ Also, this really
wants to be in disable_fixed.

> +
> +     cpuc->intel_ctrl_guest_mask &= ~(1ull << reg_idx);
> +     cpuc->intel_ctrl_host_mask &= ~(1ull << reg_idx);
> +     cpuc->intel_cp_status &= ~(1ull << reg_idx);
>  
>       if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL))
>               intel_pmu_disable_fixed(hwc);

Same for the enable thing.

Let me clean up this mess for you.

Reply via email to