During context switch, the SSBD bit in SPEC_CTRL MSR is set depending
on the TIF_SSBD flag in the current running task.

In later patches, update of additional bits in SPEC_CTRL MSR are added.
The SPEC_CTRL MSR update functions should get rid of the speculative
store names as they will not be limited to SSBD update.

This patch renames the speculative store update functions to a more
generic name.

Signed-off-by: Tim Chen <tim.c.c...@linux.intel.com>
---
 arch/x86/include/asm/spec-ctrl.h |  6 +++---
 arch/x86/kernel/cpu/bugs.c       |  4 ++--
 arch/x86/kernel/process.c        | 12 ++++++------
 3 files changed, 11 insertions(+), 11 deletions(-)

diff --git a/arch/x86/include/asm/spec-ctrl.h b/arch/x86/include/asm/spec-ctrl.h
index ae7c2c5..8e2f841 100644
--- a/arch/x86/include/asm/spec-ctrl.h
+++ b/arch/x86/include/asm/spec-ctrl.h
@@ -70,11 +70,11 @@ extern void speculative_store_bypass_ht_init(void);
 static inline void speculative_store_bypass_ht_init(void) { }
 #endif
 
-extern void speculative_store_bypass_update(unsigned long tif);
+extern void speculation_ctrl_update(unsigned long tif);
 
-static inline void speculative_store_bypass_update_current(void)
+static inline void speculation_ctrl_update_current(void)
 {
-       speculative_store_bypass_update(current_thread_info()->flags);
+       speculation_ctrl_update(current_thread_info()->flags);
 }
 
 #endif
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index 32d962e..190d6eb 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -202,7 +202,7 @@ x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 
guest_virt_spec_ctrl, bool setguest)
                tif = setguest ? ssbd_spec_ctrl_to_tif(guestval) :
                                 ssbd_spec_ctrl_to_tif(hostval);
 
-               speculative_store_bypass_update(tif);
+               speculation_ctrl_update(tif);
        }
 }
 EXPORT_SYMBOL_GPL(x86_virt_spec_ctrl);
@@ -644,7 +644,7 @@ static int ssb_prctl_set(struct task_struct *task, unsigned 
long ctrl)
         * mitigation until it is next scheduled.
         */
        if (task == current && update)
-               speculative_store_bypass_update_current();
+               speculation_ctrl_update_current();
 
        return 0;
 }
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index c93fcfd..8aa4960 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -395,27 +395,27 @@ static __always_inline void 
amd_set_ssb_virt_state(unsigned long tifn)
        wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, ssbd_tif_to_spec_ctrl(tifn));
 }
 
-static __always_inline void intel_set_ssb_state(unsigned long tifn)
+static __always_inline void spec_ctrl_update_msr(unsigned long tifn)
 {
        u64 msr = x86_spec_ctrl_base | ssbd_tif_to_spec_ctrl(tifn);
 
        wrmsrl(MSR_IA32_SPEC_CTRL, msr);
 }
 
-static __always_inline void __speculative_store_bypass_update(unsigned long 
tifn)
+static __always_inline void __speculation_ctrl_update(unsigned long tifn)
 {
        if (static_cpu_has(X86_FEATURE_VIRT_SSBD))
                amd_set_ssb_virt_state(tifn);
        else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD))
                amd_set_core_ssb_state(tifn);
        else
-               intel_set_ssb_state(tifn);
+               spec_ctrl_update_msr(tifn);
 }
 
-void speculative_store_bypass_update(unsigned long tif)
+void speculation_ctrl_update(unsigned long tif)
 {
        preempt_disable();
-       __speculative_store_bypass_update(tif);
+       __speculation_ctrl_update(tif);
        preempt_enable();
 }
 
@@ -452,7 +452,7 @@ void __switch_to_xtra(struct task_struct *prev_p, struct 
task_struct *next_p,
                set_cpuid_faulting(!!(tifn & _TIF_NOCPUID));
 
        if ((tifp ^ tifn) & _TIF_SSBD)
-               __speculative_store_bypass_update(tifn);
+               __speculation_ctrl_update(tifn);
 }
 
 /*
-- 
2.9.4

Reply via email to