On Fri, Aug 21, 2020 at 12:57:54PM -0700, kan.li...@linux.intel.com wrote:
> diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
> index 0f3d01562ded..fa08d810dcd2 100644
> --- a/arch/x86/events/core.c
> +++ b/arch/x86/events/core.c
> @@ -1440,7 +1440,10 @@ static void x86_pmu_start(struct perf_event *event, 
> int flags)
>  
>       cpuc->events[idx] = event;
>       __set_bit(idx, cpuc->active_mask);
> -     __set_bit(idx, cpuc->running);
> +     /* The cpuc->running is only used by the P4 PMU */
> +     if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON) &&
> +         (boot_cpu_data.x86 == 0xf))
> +             __set_bit(idx, cpuc->running);
>       x86_pmu.enable(event);
>       perf_event_update_userpage(event);
>  }

Yuck! Use a static_branch() or something. This is a gnarly nest of code
that runs 99.9% of the time to conclude a negative. IOW a complete waste
of cycles for everybody not running a P4 space heater.

> @@ -1544,6 +1547,9 @@ static void x86_pmu_del(struct perf_event *event, int 
> flags)
>       if (cpuc->txn_flags & PERF_PMU_TXN_ADD)
>               goto do_del;
>  
> +     if (READ_ONCE(x86_pmu.attr_rdpmc) && x86_pmu.sched_task &&
> +         test_bit(event->hw.idx, cpuc->active_mask))
> +             __set_bit(event->hw.idx, cpuc->dirty);

And that too seems like an overly complicated set of tests and branches.
This should be effectivly true for the 99% common case.

> @@ -2219,11 +2225,45 @@ static int x86_pmu_event_init(struct perf_event 
> *event)
>       return err;
>  }
>  
> +void x86_pmu_clear_dirty_counters(void)
> +{
> +     struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
> +     int i;
> +
> +     if (bitmap_empty(cpuc->dirty, X86_PMC_IDX_MAX))
> +             return;
> +
> +      /* Don't need to clear the assigned counter. */
> +     for (i = 0; i < cpuc->n_events; i++)
> +             __clear_bit(cpuc->assign[i], cpuc->dirty);
> +
> +     for_each_set_bit(i, cpuc->dirty, X86_PMC_IDX_MAX) {
> +             /* Metrics events don't have corresponding HW counters. */
> +             if (is_metric_idx(i))
> +                     continue;
> +             else if (i >= INTEL_PMC_IDX_FIXED)
> +                     wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + (i - 
> INTEL_PMC_IDX_FIXED), 0);
> +             else
> +                     wrmsrl(x86_pmu_event_addr(i), 0);
> +     }
> +
> +     bitmap_zero(cpuc->dirty, X86_PMC_IDX_MAX);
> +}

The bitmap_{empty,zero}() do indeed compile into single instructions,
neat!

>  static void x86_pmu_event_mapped(struct perf_event *event, struct mm_struct 
> *mm)
>  {
>       if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED))
>               return;
>  
> +     /*
> +      * Enable sched_task() for the RDPMC task,
> +      * and clear the existing dirty counters.
> +      */
> +     if (x86_pmu.sched_task && event->hw.target && 
> !is_sampling_event(event)) {
> +             perf_sched_cb_inc(event->ctx->pmu);
> +             x86_pmu_clear_dirty_counters();
> +     }

I'm failing to see the correctness of the !is_sampling_event() part
there.

>       /*
>        * This function relies on not being called concurrently in two
>        * tasks in the same mm.  Otherwise one task could observe
> @@ -2246,6 +2286,9 @@ static void x86_pmu_event_unmapped(struct perf_event 
> *event, struct mm_struct *m
>       if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED))
>               return;
>  
> +     if (x86_pmu.sched_task && event->hw.target && !is_sampling_event(event))
> +             perf_sched_cb_dec(event->ctx->pmu);
> +

Idem.

>       if (atomic_dec_and_test(&mm->context.perf_rdpmc_allowed))
>               on_each_cpu_mask(mm_cpumask(mm), cr4_update_pce, NULL, 1);
>  }
> diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
> index c72e4904e056..e67713bfa33a 100644
> --- a/arch/x86/events/intel/core.c
> +++ b/arch/x86/events/intel/core.c
> @@ -4166,11 +4166,39 @@ static void intel_pmu_cpu_dead(int cpu)
>       intel_cpuc_finish(&per_cpu(cpu_hw_events, cpu));
>  }
>  
> +static void intel_pmu_rdpmc_sched_task(struct perf_event_context *ctx)
> +{
> +     struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
> +     struct perf_event *event;
> +
> +     if (bitmap_empty(cpuc->dirty, X86_PMC_IDX_MAX))
> +             return;
> +
> +     /*
> +      * If the new task has the RDPMC enabled, clear the dirty counters to
> +      * prevent the potential leak. If the new task doesn't have the RDPMC
> +      * enabled, do nothing.
> +      */
> +     list_for_each_entry(event, &ctx->event_list, event_entry) {
> +             if (event->hw.target &&
> +                 (event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED) &&
> +                 !is_sampling_event(event) &&
> +                 atomic_read(&event->mmap_count))
> +                     break;
> +     }
> +     if (&event->event_entry == &ctx->event_list)
> +             return;

That's horrific, what's wrong with something like:

        if (!atomic_read(&current->mm->context.perf_rdpmc_allowed))
                return;

> +
> +     x86_pmu_clear_dirty_counters();
> +}

How is this Intel specific code? IIRC AMD has RDPMC too.

Reply via email to