Events use monrs to monitor CMT events. Events that share their
monitoring target (same thread or cgroup) share a monr.

This patch introduces monrs and adds support for monr creation/destruction.

An event's associated monr is referenced by event->cmt_monr (introduced
in previous patch).

monr->mon_events references the first event that uses that monr and events
that share monr are appended to first event's cmt_list list head.

Hold all pkgd->mutex to modify monr->mon_events and event's data.

Support for CPU and cgroups is added in future patches in this series.

More details in code's comments.

Signed-off-by: David Carrillo-Cisneros <[email protected]>
---
 arch/x86/events/intel/cmt.c | 228 ++++++++++++++++++++++++++++++++++++++++++++
 arch/x86/events/intel/cmt.h |  20 ++++
 2 files changed, 248 insertions(+)

diff --git a/arch/x86/events/intel/cmt.c b/arch/x86/events/intel/cmt.c
index 0a24896..23606a7 100644
--- a/arch/x86/events/intel/cmt.c
+++ b/arch/x86/events/intel/cmt.c
@@ -19,6 +19,8 @@ static struct lock_class_key  lock_keys[CMT_MAX_NR_PKGS];
 #endif
 
 static DEFINE_MUTEX(cmt_mutex);
+/* List of monrs that are associated with an event. */
+static LIST_HEAD(cmt_event_monrs);
 
 static unsigned int cmt_l3_scale;      /* cmt hw units to bytes. */
 
@@ -49,8 +51,210 @@ static struct pkg_data *cmt_pkgs_data_next_rcu(struct 
pkg_data *pkgd)
        return pkgd;
 }
 
+/*
+ * Functions to lock/unlock/assert all per-package mutexes/locks at once.
+ */
+
+static void monr_hrchy_acquire_mutexes(void)
+{
+       struct pkg_data *pkgd = NULL;
+
+       /* RCU protected by cmt_mutex. */
+       while ((pkgd = cmt_pkgs_data_next_rcu(pkgd)))
+               mutex_lock(&pkgd->mutex);
+}
+
+static void monr_hrchy_release_mutexes(void)
+{
+       struct pkg_data *pkgd = NULL;
+
+       /* RCU protected by cmt_mutex. */
+       while ((pkgd = cmt_pkgs_data_next_rcu(pkgd)))
+               mutex_unlock(&pkgd->mutex);
+}
+
+static void monr_hrchy_assert_held_mutexes(void)
+{
+       struct pkg_data *pkgd = NULL;
+
+       /* RCU protected by cmt_mutex. */
+       while ((pkgd = cmt_pkgs_data_next_rcu(pkgd)))
+               lockdep_assert_held(&pkgd->mutex);
+}
+
+static void monr_dealloc(struct monr *monr)
+{
+       kfree(monr);
+}
+
+static struct monr *monr_alloc(void)
+{
+       struct monr *monr;
+
+       lockdep_assert_held(&cmt_mutex);
+
+       monr = kzalloc(sizeof(*monr), GFP_KERNEL);
+       if (!monr)
+               return ERR_PTR(-ENOMEM);
+
+       return monr;
+}
+
+static inline struct monr *monr_from_event(struct perf_event *event)
+{
+       return (struct monr *) READ_ONCE(event->hw.cmt_monr);
+}
+
+static struct monr *monr_remove_event(struct perf_event *event)
+{
+       struct monr *monr = monr_from_event(event);
+
+       lockdep_assert_held(&cmt_mutex);
+       monr_hrchy_assert_held_mutexes();
+
+       if (list_empty(&monr->mon_events->hw.cmt_list)) {
+               monr->mon_events = NULL;
+               /* remove from cmt_event_monrs */
+               list_del_init(&monr->entry);
+       } else {
+               if (monr->mon_events == event)
+                       monr->mon_events = list_next_entry(event, hw.cmt_list);
+               list_del_init(&event->hw.cmt_list);
+       }
+
+       WRITE_ONCE(event->hw.cmt_monr, NULL);
+
+       return monr;
+}
+
+static int monr_append_event(struct monr *monr, struct perf_event *event)
+{
+       lockdep_assert_held(&cmt_mutex);
+       monr_hrchy_assert_held_mutexes();
+
+       if (monr->mon_events) {
+               list_add_tail(&event->hw.cmt_list,
+                             &monr->mon_events->hw.cmt_list);
+       } else {
+               monr->mon_events = event;
+               list_add_tail(&monr->entry, &cmt_event_monrs);
+       }
+
+       WRITE_ONCE(event->hw.cmt_monr, monr);
+
+       return 0;
+}
+
+static bool is_cgroup_event(struct perf_event *event)
+{
+       return false;
+}
+
+static int monr_hrchy_attach_cgroup_event(struct perf_event *event)
+{
+       return -EPERM;
+}
+
+static int monr_hrchy_attach_cpu_event(struct perf_event *event)
+{
+       return -EPERM;
+}
+
+static int monr_hrchy_attach_task_event(struct perf_event *event)
+{
+       struct monr *monr;
+       int err;
+
+       monr = monr_alloc();
+       if (IS_ERR(monr))
+               return -ENOMEM;
+
+       err = monr_append_event(monr, event);
+       if (err)
+               monr_dealloc(monr);
+       return err;
+}
+
+/* Insert or create monr in appropriate position in hierarchy. */
+static int monr_hrchy_attach_event(struct perf_event *event)
+{
+       int err = 0;
+
+       lockdep_assert_held(&cmt_mutex);
+       monr_hrchy_acquire_mutexes();
+
+       if (!is_cgroup_event(event) &&
+           !(event->attach_state & PERF_ATTACH_TASK)) {
+               err = monr_hrchy_attach_cpu_event(event);
+               goto exit;
+       }
+       if (is_cgroup_event(event)) {
+               err = monr_hrchy_attach_cgroup_event(event);
+               goto exit;
+       }
+       err = monr_hrchy_attach_task_event(event);
+exit:
+       monr_hrchy_release_mutexes();
+
+       return err;
+}
+
+/**
+ * __match_event() - Determine if @a and @b should share a rmid.
+ */
+static bool __match_event(struct perf_event *a, struct perf_event *b)
+{
+       /* Cgroup/non-task per-cpu and task events don't mix */
+       if ((a->attach_state & PERF_ATTACH_TASK) !=
+           (b->attach_state & PERF_ATTACH_TASK))
+               return false;
+
+#ifdef CONFIG_CGROUP_PERF
+       if (a->cgrp != b->cgrp)
+               return false;
+#endif
+
+       /* If not task event, it's a a cgroup or a non-task cpu event. */
+       if (!(b->attach_state & PERF_ATTACH_TASK))
+               return true;
+
+       /* Events that target same task are placed into the same group. */
+       if (a->hw.target == b->hw.target)
+               return true;
+
+       /* Are we a inherited event? */
+       if (b->parent == a)
+               return true;
+
+       return false;
+}
+
 static struct pmu intel_cmt_pmu;
 
+/* Try to find a monr with same target, otherwise create new one. */
+static int mon_group_setup_event(struct perf_event *event)
+{
+       struct monr *monr;
+       int err;
+
+       lockdep_assert_held(&cmt_mutex);
+
+       list_for_each_entry(monr, &cmt_event_monrs, entry) {
+               if (!__match_event(monr->mon_events, event))
+                       continue;
+               monr_hrchy_acquire_mutexes();
+               err = monr_append_event(monr, event);
+               monr_hrchy_release_mutexes();
+               return err;
+       }
+       /*
+        * Since no match was found, create a new monr and set this
+        * event as head of a mon_group. All events in this group
+        * will share the monr.
+        */
+       return monr_hrchy_attach_event(event);
+}
+
 static void intel_cmt_event_read(struct perf_event *event)
 {
 }
@@ -68,6 +272,20 @@ static int intel_cmt_event_add(struct perf_event *event, 
int mode)
        return 0;
 }
 
+static void intel_cmt_event_destroy(struct perf_event *event)
+{
+       struct monr *monr;
+
+       mutex_lock(&cmt_mutex);
+       monr_hrchy_acquire_mutexes();
+
+       /* monr is dettached from event. */
+       monr = monr_remove_event(event);
+
+       monr_hrchy_release_mutexes();
+       mutex_unlock(&cmt_mutex);
+}
+
 static int intel_cmt_event_init(struct perf_event *event)
 {
        int err = 0;
@@ -88,6 +306,16 @@ static int intel_cmt_event_init(struct perf_event *event)
            event->attr.sample_period) /* no sampling */
                return -EINVAL;
 
+       event->destroy = intel_cmt_event_destroy;
+
+       INIT_LIST_HEAD(&event->hw.cmt_list);
+
+       mutex_lock(&cmt_mutex);
+
+       err = mon_group_setup_event(event);
+
+       mutex_unlock(&cmt_mutex);
+
        return err;
 }
 
diff --git a/arch/x86/events/intel/cmt.h b/arch/x86/events/intel/cmt.h
index 55416db..0ce5d4d 100644
--- a/arch/x86/events/intel/cmt.h
+++ b/arch/x86/events/intel/cmt.h
@@ -3,6 +3,12 @@
  * (formerly Intel Cache QoS Monitoring, CQM)
  *
  *
+ * A "Monitored Resource" (monr) is the entity monitored by CMT and MBM.
+ * In order to monitor a cgroups and/or thread, it must be associated to
+ * a monr. A monr is active in a CPU when a thread that is associated to
+ * it (either directly or through a cgroup) is scheduled in it.
+ *
+ *
  * Locking
  *
  * One global cmt_mutex. One mutex and spin_lock per package.
@@ -35,3 +41,17 @@ struct pkg_data {
        u32                     max_rmid;
        u16                     pkgid;
 };
+
+/**
+ * struct monr - MONitored Resource.
+ * @mon_events:                The head of event's group that use this monr, 
if any.
+ * @entry:             List entry into cmt_event_monrs.
+ *
+ * An monr is assigned to every CMT event and/or monitored cgroups when
+ * monitoring is activated and that instance's address do not change during
+ * the lifetime of the event or cgroup.
+ */
+struct monr {
+       struct perf_event               *mon_events;
+       struct list_head                entry;
+};
-- 
2.8.0.rc3.226.g39d4020

Reply via email to