This patch adds hot cpu support for Intel Cache allocation. Support
includes updating the cache bitmask MSRs IA32_L3_QOS_n when a new CPU
package comes online. The IA32_L3_QOS_n MSRs are one per Class of
service on each CPU package. The new package's MSRs are synchronized
with the values of existing MSRs. Also the software cache for
IA32_PQR_ASSOC MSRs are reset during hot cpu notifications.

Signed-off-by: Vikas Shivappa <vikas.shiva...@linux.intel.com>
---
 arch/x86/kernel/cpu/intel_rdt.c | 95 ++++++++++++++++++++++++++++++++++++++---
 1 file changed, 90 insertions(+), 5 deletions(-)

diff --git a/arch/x86/kernel/cpu/intel_rdt.c b/arch/x86/kernel/cpu/intel_rdt.c
index 06cba8da..f151200 100644
--- a/arch/x86/kernel/cpu/intel_rdt.c
+++ b/arch/x86/kernel/cpu/intel_rdt.c
@@ -25,6 +25,7 @@
 #include <linux/slab.h>
 #include <linux/err.h>
 #include <linux/spinlock.h>
+#include <linux/cpu.h>
 #include <asm/intel_rdt.h>
 
 /*
@@ -40,6 +41,11 @@ struct static_key __read_mostly rdt_enable_key = 
STATIC_KEY_INIT_FALSE;
  * Mask of CPUs for writing CBM values. We only need one CPU per-socket.
  */
 static cpumask_t rdt_cpumask;
+/*
+ * Temporary cpumask used during hot cpu notificaiton handling. The usage
+ * is serialized by hot cpu locks.
+ */
+static cpumask_t tmp_cpumask;
 
 #define rdt_for_each_child(pos_css, parent_ir)         \
        css_for_each_child((pos_css), &(parent_ir)->css)
@@ -311,13 +317,86 @@ out:
        return err;
 }
 
-static inline void rdt_cpumask_update(int cpu)
+static inline bool rdt_cpumask_update(int cpu)
 {
-       static cpumask_t tmp;
-
-       cpumask_and(&tmp, &rdt_cpumask, topology_core_cpumask(cpu));
-       if (cpumask_empty(&tmp))
+       cpumask_and(&tmp_cpumask, &rdt_cpumask, topology_core_cpumask(cpu));
+       if (cpumask_empty(&tmp_cpumask)) {
                cpumask_set_cpu(cpu, &rdt_cpumask);
+               return true;
+       }
+
+       return false;
+}
+
+/*
+ * cbm_update_msrs() - Updates all the existing IA32_L3_MASK_n MSRs
+ * which are one per CLOSid except IA32_L3_MASK_0 on the current package.
+ */
+static void cbm_update_msrs(void *info)
+{
+       int maxid = boot_cpu_data.x86_cache_max_closid;
+       unsigned int i;
+
+       /*
+        * At cpureset, all bits of IA32_L3_MASK_n are set.
+        * The index starts from one as there is no need
+        * to update IA32_L3_MASK_0 as it belongs to root cgroup
+        * whose cache mask is all 1s always.
+        */
+       for (i = 1; i < maxid; i++) {
+               if (cctable[i].clos_refcnt)
+                       cbm_cpu_update((void *)i);
+       }
+}
+
+static inline void intel_rdt_cpu_start(int cpu)
+{
+       struct intel_pqr_state *state = &per_cpu(pqr_state, cpu);
+
+       state->closid = 0;
+       mutex_lock(&rdt_group_mutex);
+       if (rdt_cpumask_update(cpu))
+               smp_call_function_single(cpu, cbm_update_msrs, NULL, 1);
+       mutex_unlock(&rdt_group_mutex);
+}
+
+static void intel_rdt_cpu_exit(unsigned int cpu)
+{
+       int i;
+
+       mutex_lock(&rdt_group_mutex);
+       if (!cpumask_test_and_clear_cpu(cpu, &rdt_cpumask)) {
+               mutex_unlock(&rdt_group_mutex);
+               return;
+       }
+
+       cpumask_and(&tmp_cpumask, topology_core_cpumask(cpu), cpu_online_mask);
+       cpumask_clear_cpu(cpu, &tmp_cpumask);
+       i = cpumask_any(&tmp_cpumask);
+
+       if (i < nr_cpu_ids)
+               cpumask_set_cpu(i, &rdt_cpumask);
+       mutex_unlock(&rdt_group_mutex);
+}
+
+static int intel_rdt_cpu_notifier(struct notifier_block *nb,
+                                 unsigned long action, void *hcpu)
+{
+       unsigned int cpu  = (unsigned long)hcpu;
+
+       switch (action) {
+       case CPU_DOWN_FAILED:
+       case CPU_ONLINE:
+               intel_rdt_cpu_start(cpu);
+               break;
+       case CPU_DOWN_PREPARE:
+               intel_rdt_cpu_exit(cpu);
+               break;
+       default:
+               break;
+       }
+
+       return NOTIFY_OK;
 }
 
 static int __init intel_rdt_late_init(void)
@@ -355,9 +434,15 @@ static int __init intel_rdt_late_init(void)
        cct->l3_cbm = (1ULL << max_cbm_len) - 1;
        cct->clos_refcnt = 1;
 
+       cpu_notifier_register_begin();
+
        for_each_online_cpu(i)
                rdt_cpumask_update(i);
 
+       __hotcpu_notifier(intel_rdt_cpu_notifier, 0);
+
+       cpu_notifier_register_done();
+
        static_key_slow_inc(&rdt_enable_key);
        pr_info("Intel cache allocation enabled\n");
 out_err:
-- 
1.9.1

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to