The following commit has been merged into the perf/core branch of tip:

Commit-ID:     bc14fe1beeec1d80ee39f03019c10e130c8d376b
Gitweb:        
https://git.kernel.org/tip/bc14fe1beeec1d80ee39f03019c10e130c8d376b
Author:        Kan Liang <kan.li...@linux.intel.com>
AuthorDate:    Mon, 12 Apr 2021 07:30:52 -07:00
Committer:     Peter Zijlstra <pet...@infradead.org>
CommitterDate: Mon, 19 Apr 2021 20:03:26 +02:00

perf/x86/intel: Factor out intel_pmu_check_event_constraints

Each Hybrid PMU has to check and update its own event constraints before
registration.

The intel_pmu_check_event_constraints will be reused later to check
the event constraints of each hybrid PMU.

Signed-off-by: Kan Liang <kan.li...@linux.intel.com>
Signed-off-by: Peter Zijlstra (Intel) <pet...@infradead.org>
Reviewed-by: Andi Kleen <a...@linux.intel.com>
Link: 
https://lkml.kernel.org/r/1618237865-33448-13-git-send-email-kan.li...@linux.intel.com
---
 arch/x86/events/intel/core.c | 82 ++++++++++++++++++++---------------
 1 file changed, 47 insertions(+), 35 deletions(-)

diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index d7e2021..5c5f330 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -5084,6 +5084,49 @@ static void intel_pmu_check_num_counters(int 
*num_counters,
        *intel_ctrl |= fixed_mask << INTEL_PMC_IDX_FIXED;
 }
 
+static void intel_pmu_check_event_constraints(struct event_constraint 
*event_constraints,
+                                             int num_counters,
+                                             int num_counters_fixed,
+                                             u64 intel_ctrl)
+{
+       struct event_constraint *c;
+
+       if (!event_constraints)
+               return;
+
+       /*
+        * event on fixed counter2 (REF_CYCLES) only works on this
+        * counter, so do not extend mask to generic counters
+        */
+       for_each_event_constraint(c, event_constraints) {
+               /*
+                * Don't extend the topdown slots and metrics
+                * events to the generic counters.
+                */
+               if (c->idxmsk64 & INTEL_PMC_MSK_TOPDOWN) {
+                       /*
+                        * Disable topdown slots and metrics events,
+                        * if slots event is not in CPUID.
+                        */
+                       if (!(INTEL_PMC_MSK_FIXED_SLOTS & intel_ctrl))
+                               c->idxmsk64 = 0;
+                       c->weight = hweight64(c->idxmsk64);
+                       continue;
+               }
+
+               if (c->cmask == FIXED_EVENT_FLAGS) {
+                       /* Disabled fixed counters which are not in CPUID */
+                       c->idxmsk64 &= intel_ctrl;
+
+                       if (c->idxmsk64 != INTEL_PMC_MSK_FIXED_REF_CYCLES)
+                               c->idxmsk64 |= (1ULL << num_counters) - 1;
+               }
+               c->idxmsk64 &=
+                       ~(~0ULL << (INTEL_PMC_IDX_FIXED + num_counters_fixed));
+               c->weight = hweight64(c->idxmsk64);
+       }
+}
+
 __init int intel_pmu_init(void)
 {
        struct attribute **extra_skl_attr = &empty_attrs;
@@ -5094,7 +5137,6 @@ __init int intel_pmu_init(void)
        union cpuid10_edx edx;
        union cpuid10_eax eax;
        union cpuid10_ebx ebx;
-       struct event_constraint *c;
        unsigned int fixed_mask;
        struct extra_reg *er;
        bool pmem = false;
@@ -5732,40 +5774,10 @@ __init int intel_pmu_init(void)
        if (x86_pmu.intel_cap.anythread_deprecated)
                x86_pmu.format_attrs = intel_arch_formats_attr;
 
-       if (x86_pmu.event_constraints) {
-               /*
-                * event on fixed counter2 (REF_CYCLES) only works on this
-                * counter, so do not extend mask to generic counters
-                */
-               for_each_event_constraint(c, x86_pmu.event_constraints) {
-                       /*
-                        * Don't extend the topdown slots and metrics
-                        * events to the generic counters.
-                        */
-                       if (c->idxmsk64 & INTEL_PMC_MSK_TOPDOWN) {
-                               /*
-                                * Disable topdown slots and metrics events,
-                                * if slots event is not in CPUID.
-                                */
-                               if (!(INTEL_PMC_MSK_FIXED_SLOTS & 
x86_pmu.intel_ctrl))
-                                       c->idxmsk64 = 0;
-                               c->weight = hweight64(c->idxmsk64);
-                               continue;
-                       }
-
-                       if (c->cmask == FIXED_EVENT_FLAGS) {
-                               /* Disabled fixed counters which are not in 
CPUID */
-                               c->idxmsk64 &= x86_pmu.intel_ctrl;
-
-                               if (c->idxmsk64 != 
INTEL_PMC_MSK_FIXED_REF_CYCLES)
-                                       c->idxmsk64 |= (1ULL << 
x86_pmu.num_counters) - 1;
-                       }
-                       c->idxmsk64 &=
-                               ~(~0ULL << (INTEL_PMC_IDX_FIXED + 
x86_pmu.num_counters_fixed));
-                       c->weight = hweight64(c->idxmsk64);
-               }
-       }
-
+       intel_pmu_check_event_constraints(x86_pmu.event_constraints,
+                                         x86_pmu.num_counters,
+                                         x86_pmu.num_counters_fixed,
+                                         x86_pmu.intel_ctrl);
        /*
         * Access LBR MSR may cause #GP under certain circumstances.
         * E.g. KVM doesn't support LBR MSR

Reply via email to