Instead of open coding the calls to load_seg_legacy(), add
x86_fsgsbase_load() to load FS/GS segments.  When FSGSBASE is
enabled, the new helper will be updated.

Signed-off-by: Chang S. Bae <[email protected]>
Reviewed-by: Andi Kleen <[email protected]>
Reviewed-by: Andy Lutomirski <[email protected]>
Reviewed-by: Thomas Gleixner <[email protected]>
Cc: H. Peter Anvin <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: Dave Hansen <[email protected]>
---
 arch/x86/kernel/process_64.c | 14 ++++++++++----
 1 file changed, 10 insertions(+), 4 deletions(-)

diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 710f639..31b4755 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -285,6 +285,15 @@ static __always_inline void load_seg_legacy(unsigned short 
prev_index,
        }
 }
 
+static __always_inline void x86_fsgsbase_load(struct thread_struct *prev,
+                                             struct thread_struct *next)
+{
+       load_seg_legacy(prev->fsindex, prev->fsbase,
+                       next->fsindex, next->fsbase, FS);
+       load_seg_legacy(prev->gsindex, prev->gsbase,
+                       next->gsindex, next->gsbase, GS);
+}
+
 static unsigned long x86_fsgsbase_read_task(struct task_struct *task,
                                            unsigned short selector)
 {
@@ -595,10 +604,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct 
*next_p)
        if (unlikely(next->ds | prev->ds))
                loadsegment(ds, next->ds);
 
-       load_seg_legacy(prev->fsindex, prev->fsbase,
-                       next->fsindex, next->fsbase, FS);
-       load_seg_legacy(prev->gsindex, prev->gsbase,
-                       next->gsindex, next->gsbase, GS);
+       x86_fsgsbase_load(prev, next);
 
        switch_fpu_finish(next_fpu, cpu);
 
-- 
2.7.4

Reply via email to