Now that cpuc->event_constraint[] is retained, we can avoid calling
get_event_constraints() over and over again.

Signed-off-by: Peter Zijlstra (Intel) <pet...@infradead.org>
---
 arch/x86/events/core.c       |   25 +++++++++++++++++++++----
 arch/x86/events/intel/core.c |    3 ++-
 2 files changed, 23 insertions(+), 5 deletions(-)

--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -844,6 +844,12 @@ int perf_assign_events(struct event_cons
 }
 EXPORT_SYMBOL_GPL(perf_assign_events);
 
+static inline bool is_ht_workaround_active(struct cpu_hw_events *cpuc)
+{
+       return is_ht_workaround_enabled() && !cpuc->is_fake &&
+              READ_ONCE(cpuc->excl_cntrs->exclusive_present);
+}
+
 int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
 {
        struct event_constraint *c;
@@ -858,8 +864,20 @@ int x86_schedule_events(struct cpu_hw_ev
                x86_pmu.start_scheduling(cpuc);
 
        for (i = 0, wmin = X86_PMC_IDX_MAX, wmax = 0; i < n; i++) {
-               c = x86_pmu.get_event_constraints(cpuc, i, cpuc->event_list[i]);
-               cpuc->event_constraint[i] = c;
+               c = cpuc->event_constraint[i];
+
+               /*
+                * Request constraints for new events; or for those events that
+                * have a dynamic constraint due to the HT workaround -- for
+                * those the constraint can change due to scheduling activity
+                * on the other sibling.
+                */
+               if (!c || ((c->flags & PERF_X86_EVENT_DYNAMIC) &&
+                          is_ht_workaround_active(cpuc))) {
+
+                       c = x86_pmu.get_event_constraints(cpuc, i, 
cpuc->event_list[i]);
+                       cpuc->event_constraint[i] = c;
+               }
 
                wmin = min(wmin, c->weight);
                wmax = max(wmax, c->weight);
@@ -903,8 +921,7 @@ int x86_schedule_events(struct cpu_hw_ev
                 * N/2 counters can be used. This helps with events with
                 * specific counter constraints.
                 */
-               if (is_ht_workaround_enabled() && !cpuc->is_fake &&
-                   READ_ONCE(cpuc->excl_cntrs->exclusive_present))
+               if (is_ht_workaround_active(cpuc))
                        gpmax /= 2;
 
                unsched = perf_assign_events(cpuc->event_constraint, n, wmin,
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -2945,7 +2945,8 @@ intel_get_event_constraints(struct cpu_h
         * - dynamic constraint: handled by intel_get_excl_constraints()
         */
        c2 = __intel_get_event_constraints(cpuc, idx, event);
-       if (c1 && (c1->flags & PERF_X86_EVENT_DYNAMIC)) {
+       if (c1) {
+               WARN_ON_ONCE(!(c1->flags & PERF_X86_EVENT_DYNAMIC));
                bitmap_copy(c1->idxmsk, c2->idxmsk, X86_PMC_IDX_MAX);
                c1->weight = c2->weight;
                c2 = c1;


Reply via email to