Introduce the usage of rdt_enable_key in sched_in code as a preparation
to add RDT monitoring support for sched_in.

Signed-off-by: Vikas Shivappa <vikas.shiva...@linux.intel.com>
---
 arch/x86/include/asm/intel_rdt_sched.h | 12 ++++++++++--
 1 file changed, 10 insertions(+), 2 deletions(-)

diff --git a/arch/x86/include/asm/intel_rdt_sched.h 
b/arch/x86/include/asm/intel_rdt_sched.h
index 2c704d2..8c5be01 100644
--- a/arch/x86/include/asm/intel_rdt_sched.h
+++ b/arch/x86/include/asm/intel_rdt_sched.h
@@ -27,10 +27,12 @@ struct intel_pqr_state {
 
 DECLARE_PER_CPU(struct intel_pqr_state, pqr_state);
 DECLARE_PER_CPU_READ_MOSTLY(struct intel_pqr_state, rdt_cpu_default);
+
+DECLARE_STATIC_KEY_FALSE(rdt_enable_key);
 DECLARE_STATIC_KEY_FALSE(rdt_alloc_enable_key);
 
 /*
- * intel_rdt_sched_in() - Writes the task's CLOSid to IA32_PQR_MSR
+ * __intel_rdt_sched_in() - Writes the task's CLOSid to IA32_PQR_MSR
  *
  * Following considerations are made so that this has minimal impact
  * on scheduler hot path:
@@ -42,7 +44,7 @@ struct intel_pqr_state {
  *
  * Must be called with preemption disabled.
  */
-static inline void intel_rdt_sched_in(void)
+static inline void __intel_rdt_sched_in(void)
 {
        if (static_branch_likely(&rdt_alloc_enable_key)) {
                struct intel_pqr_state *state = this_cpu_ptr(&pqr_state);
@@ -63,6 +65,12 @@ static inline void intel_rdt_sched_in(void)
        }
 }
 
+static inline void intel_rdt_sched_in(void)
+{
+       if (static_branch_likely(&rdt_enable_key))
+               __intel_rdt_sched_in();
+}
+
 #else
 
 static inline void intel_rdt_sched_in(void) {}
-- 
1.9.1

Reply via email to