From: Peter Zijlstra <pet...@infradead.org>

Avoid allocating the AMD NB event constraints data structure when not
needed. This gets rid of x86_max_cores usage and avoids allocating
this on AMD Core Perfctr supporting hardware (which has separate MSRs
for NB events).

Cc: Rui Huang <ray.hu...@amd.com>
Cc: Ingo Molnar <mi...@kernel.org>
Cc: Thomas Gleixner <t...@linutronix.de>
Cc: Borislav Petkov <b...@alien8.de>
Cc: aherrm...@suse.com
Cc: jencce.ker...@gmail.com
Signed-off-by: Peter Zijlstra (Intel) <pet...@infradead.org>
Link: 
http://lkml.kernel.org/r/20160320124629.gy6...@twins.programming.kicks-ass.net
Signed-off-by: Borislav Petkov <b...@suse.de>
---
 arch/x86/events/amd/core.c   | 21 ++++++++++++++++++---
 arch/x86/events/perf_event.h |  5 +++++
 2 files changed, 23 insertions(+), 3 deletions(-)

diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c
index 049ada8d4e9c..86a9bec18dab 100644
--- a/arch/x86/events/amd/core.c
+++ b/arch/x86/events/amd/core.c
@@ -369,7 +369,7 @@ static int amd_pmu_cpu_prepare(int cpu)
 
        WARN_ON_ONCE(cpuc->amd_nb);
 
-       if (boot_cpu_data.x86_max_cores < 2)
+       if (!x86_pmu.amd_nb_constraints)
                return NOTIFY_OK;
 
        cpuc->amd_nb = amd_alloc_nb(cpu);
@@ -388,7 +388,7 @@ static void amd_pmu_cpu_starting(int cpu)
 
        cpuc->perf_ctr_virt_mask = AMD64_EVENTSEL_HOSTONLY;
 
-       if (boot_cpu_data.x86_max_cores < 2)
+       if (!x86_pmu.amd_nb_constraints)
                return;
 
        nb_id = amd_get_nb_id(cpu);
@@ -414,7 +414,7 @@ static void amd_pmu_cpu_dead(int cpu)
 {
        struct cpu_hw_events *cpuhw;
 
-       if (boot_cpu_data.x86_max_cores < 2)
+       if (!x86_pmu.amd_nb_constraints)
                return;
 
        cpuhw = &per_cpu(cpu_hw_events, cpu);
@@ -648,6 +648,8 @@ static __initconst const struct x86_pmu amd_pmu = {
        .cpu_prepare            = amd_pmu_cpu_prepare,
        .cpu_starting           = amd_pmu_cpu_starting,
        .cpu_dead               = amd_pmu_cpu_dead,
+
+       .amd_nb_constraints     = 1,
 };
 
 static int __init amd_core_pmu_init(void)
@@ -674,6 +676,11 @@ static int __init amd_core_pmu_init(void)
        x86_pmu.eventsel        = MSR_F15H_PERF_CTL;
        x86_pmu.perfctr         = MSR_F15H_PERF_CTR;
        x86_pmu.num_counters    = AMD64_NUM_COUNTERS_CORE;
+       /*
+        * AMD Core perfctr has separate MSRs for the NB events, see
+        * the amd/uncore.c driver.
+        */
+       x86_pmu.amd_nb_constraints = 0;
 
        pr_cont("core perfctr, ");
        return 0;
@@ -693,6 +700,14 @@ __init int amd_pmu_init(void)
        if (ret)
                return ret;
 
+       if (num_possible_cpus() == 1) {
+               /*
+                * No point in allocating data structures to serialize
+                * against other CPUs, when there is only the one CPU.
+                */
+               x86_pmu.amd_nb_constraints = 0;
+       }
+
        /* Events are common for all AMDs */
        memcpy(hw_cache_event_ids, amd_hw_cache_event_ids,
               sizeof(hw_cache_event_ids));
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
index ba6ef18528c9..716d0482f5db 100644
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -608,6 +608,11 @@ struct x86_pmu {
        atomic_t        lbr_exclusive[x86_lbr_exclusive_max];
 
        /*
+        * AMD bits
+        */
+       unsigned int    amd_nb_constraints : 1;
+
+       /*
         * Extra registers for events
         */
        struct extra_reg *extra_regs;
-- 
2.7.3

Reply via email to