On 4/16/2021 12:45 PM, Peter Zijlstra wrote:
On Fri, Apr 16, 2021 at 03:01:48PM -0000, tip-bot2 for Kan Liang wrote:
@@ -2331,6 +2367,9 @@ static void x86_pmu_event_unmapped(struct perf_event 
*event, struct mm_struct *m
        if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED))
                return;
+ if (x86_pmu.sched_task && event->hw.target)
+               perf_sched_cb_dec(event->ctx->pmu);
+
        if (atomic_dec_and_test(&mm->context.perf_rdpmc_allowed))
                on_each_cpu_mask(mm_cpumask(mm), cr4_update_pce, NULL, 1);
  }

'perf test' on a kernel with CONFIG_DEBUG_PREEMPT=y gives:

[  244.439538] BUG: using smp_processor_id() in preemptible [00000000] code: 
perf/1771

If it's a preemptible env, I think we should disable the interrupts and preemption to protect the sched_cb_list.

Seems we don't need perf_ctx_lock() here. I don't think we touch the area in NMI. I think disabling the interrupts should be good enough to protect the cpuctx.

How about the below patch?

diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index e34eb72..45630beed 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -2333,6 +2333,8 @@ static void x86_pmu_clear_dirty_counters(void)

static void x86_pmu_event_mapped(struct perf_event *event, struct mm_struct *mm)
 {
+       unsigned long flags;
+
        if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED))
                return;

@@ -2341,8 +2343,10 @@ static void x86_pmu_event_mapped(struct perf_event *event, struct mm_struct *mm)
         * and clear the existing dirty counters.
         */
        if (x86_pmu.sched_task && event->hw.target) {
+               local_irq_save(flags);
                perf_sched_cb_inc(event->ctx->pmu);
                x86_pmu_clear_dirty_counters();
+               local_irq_restore(flags);
        }

        /*
@@ -2363,12 +2367,16 @@ static void x86_pmu_event_mapped(struct perf_event *event, struct mm_struct *mm)

static void x86_pmu_event_unmapped(struct perf_event *event, struct mm_struct *mm)
 {
+       unsigned long flags;

        if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED))
                return;

-       if (x86_pmu.sched_task && event->hw.target)
+       if (x86_pmu.sched_task && event->hw.target) {
+               local_irq_save(flags);
                perf_sched_cb_dec(event->ctx->pmu);
+               local_irq_restore(flags);
+       }

        if (atomic_dec_and_test(&mm->context.perf_rdpmc_allowed))
                on_each_cpu_mask(mm_cpumask(mm), cr4_update_pce, NULL, 1);

Thanks,
Kan

Reply via email to