The new version of Intel's CQM uses a RMID hierarchy to avoid conflicts
between cpu, cgroup and task events, making unnecessary to check and
resolve conflicts between events of different types (ie. cgroup vs task).

Reviewed-by: Stephane Eranian <eran...@google.com>
Signed-off-by: David Carrillo-Cisneros <davi...@google.com>
---
 arch/x86/events/intel/cqm.c | 148 --------------------------------------------
 1 file changed, 148 deletions(-)

diff --git a/arch/x86/events/intel/cqm.c b/arch/x86/events/intel/cqm.c
index 1b064c4..a3fde49 100644
--- a/arch/x86/events/intel/cqm.c
+++ b/arch/x86/events/intel/cqm.c
@@ -304,92 +304,6 @@ static inline struct perf_cgroup *event_to_cgroup(struct 
perf_event *event)
 }
 #endif
 
-/*
- * Determine if @a's tasks intersect with @b's tasks
- *
- * There are combinations of events that we explicitly prohibit,
- *
- *                PROHIBITS
- *     system-wide    ->       cgroup and task
- *     cgroup        ->        system-wide
- *                           ->        task in cgroup
- *     task          ->        system-wide
- *                           ->        task in cgroup
- *
- * Call this function before allocating an RMID.
- */
-static bool __conflict_event(struct perf_event *a, struct perf_event *b)
-{
-#ifdef CONFIG_CGROUP_PERF
-       /*
-        * We can have any number of cgroups but only one system-wide
-        * event at a time.
-        */
-       if (a->cgrp && b->cgrp) {
-               struct perf_cgroup *ac = a->cgrp;
-               struct perf_cgroup *bc = b->cgrp;
-
-               /*
-                * This condition should have been caught in
-                * __match_event() and we should be sharing an RMID.
-                */
-               WARN_ON_ONCE(ac == bc);
-
-               if (cgroup_is_descendant(ac->css.cgroup, bc->css.cgroup) ||
-                   cgroup_is_descendant(bc->css.cgroup, ac->css.cgroup))
-                       return true;
-
-               return false;
-       }
-
-       if (a->cgrp || b->cgrp) {
-               struct perf_cgroup *ac, *bc;
-
-               /*
-                * cgroup and system-wide events are mutually exclusive
-                */
-               if ((a->cgrp && !(b->attach_state & PERF_ATTACH_TASK)) ||
-                   (b->cgrp && !(a->attach_state & PERF_ATTACH_TASK)))
-                       return true;
-
-               /*
-                * Ensure neither event is part of the other's cgroup
-                */
-               ac = event_to_cgroup(a);
-               bc = event_to_cgroup(b);
-               if (ac == bc)
-                       return true;
-
-               /*
-                * Must have cgroup and non-intersecting task events.
-                */
-               if (!ac || !bc)
-                       return false;
-
-               /*
-                * We have cgroup and task events, and the task belongs
-                * to a cgroup. Check for for overlap.
-                */
-               if (cgroup_is_descendant(ac->css.cgroup, bc->css.cgroup) ||
-                   cgroup_is_descendant(bc->css.cgroup, ac->css.cgroup))
-                       return true;
-
-               return false;
-       }
-#endif
-       /*
-        * If one of them is not a task, same story as above with cgroups.
-        */
-       if (!(a->attach_state & PERF_ATTACH_TASK) ||
-           !(b->attach_state & PERF_ATTACH_TASK))
-               return true;
-
-       /*
-        * Must be non-overlapping.
-        */
-       return false;
-}
-
 struct rmid_read {
        u32 rmid;
        atomic64_t value;
@@ -465,10 +379,6 @@ static void intel_cqm_stable(void *arg)
        }
 }
 
-/*
- * If we have group events waiting for an RMID that don't conflict with
- * events already running, assign @rmid.
- */
 static bool intel_cqm_sched_in_event(u32 rmid)
 {
        struct perf_event *leader, *event;
@@ -484,9 +394,6 @@ static bool intel_cqm_sched_in_event(u32 rmid)
                if (__rmid_valid(event->hw.cqm_rmid))
                        continue;
 
-               if (__conflict_event(event, leader))
-                       continue;
-
                intel_cqm_xchg_rmid(event, rmid);
                return true;
        }
@@ -592,10 +499,6 @@ static bool intel_cqm_rmid_stabilize(unsigned int 
*available)
                        continue;
                }
 
-               /*
-                * If we have groups waiting for RMIDs, hand
-                * them one now provided they don't conflict.
-                */
                if (intel_cqm_sched_in_event(entry->rmid))
                        continue;
 
@@ -638,46 +541,8 @@ static void __intel_cqm_pick_and_rotate(struct perf_event 
*next)
 }
 
 /*
- * Deallocate the RMIDs from any events that conflict with @event, and
- * place them on the back of the group list.
- */
-static void intel_cqm_sched_out_conflicting_events(struct perf_event *event)
-{
-       struct perf_event *group, *g;
-       u32 rmid;
-
-       lockdep_assert_held(&cache_mutex);
-
-       list_for_each_entry_safe(group, g, &cache_groups, hw.cqm_groups_entry) {
-               if (group == event)
-                       continue;
-
-               rmid = group->hw.cqm_rmid;
-
-               /*
-                * Skip events that don't have a valid RMID.
-                */
-               if (!__rmid_valid(rmid))
-                       continue;
-
-               /*
-                * No conflict? No problem! Leave the event alone.
-                */
-               if (!__conflict_event(group, event))
-                       continue;
-
-               intel_cqm_xchg_rmid(group, INVALID_RMID);
-               __put_rmid(rmid);
-       }
-}
-
-/*
  * Attempt to rotate the groups and assign new RMIDs.
  *
- * We rotate for two reasons,
- *   1. To handle the scheduling of conflicting events
- *   2. To recycle RMIDs
- *
  * Rotating RMIDs is complicated because the hardware doesn't give us
  * any clues.
  *
@@ -732,10 +597,6 @@ again:
                goto stabilize;
 
        /*
-        * We have more event groups without RMIDs than available RMIDs,
-        * or we have event groups that conflict with the ones currently
-        * scheduled.
-        *
         * We force deallocate the rmid of the group at the head of
         * cache_groups. The first event group without an RMID then gets
         * assigned intel_cqm_rotation_rmid. This ensures we always make
@@ -754,8 +615,6 @@ again:
                intel_cqm_xchg_rmid(start, intel_cqm_rotation_rmid);
                intel_cqm_rotation_rmid = __get_rmid();
 
-               intel_cqm_sched_out_conflicting_events(start);
-
                if (__intel_cqm_threshold)
                        __intel_cqm_threshold--;
        }
@@ -858,13 +717,6 @@ static void intel_cqm_setup_event(struct perf_event *event,
                        *group = iter;
                        return;
                }
-
-               /*
-                * We only care about conflicts for events that are
-                * actually scheduled in (and hence have a valid RMID).
-                */
-               if (__conflict_event(iter, event) && __rmid_valid(rmid))
-                       conflict = true;
        }
 
        if (conflict)
-- 
2.8.0.rc3.226.g39d4020

Reply via email to