Instead of open code, load_fsgs() will cleanup __switch_to and symmetric with FS/GS segment save. When FSGSBASE enabled, X86_FEATURE_FSGSBASE check will be incorporated.
Signed-off-by: Chang S. Bae <chang.seok....@intel.com> Reviewed-by: Andi Kleen <a...@linux.intel.com> Cc: Andy Lutomirski <l...@kernel.org> Cc: H. Peter Anvin <h...@zytor.com> Cc: Dave Hansen <dave.han...@linux.intel.com> Cc: Thomas Gleixner <t...@linutronix.de> Cc: Ingo Molnar <mi...@kernel.org> Reviewed-by: Andy Lutomirski <l...@kernel.org> --- arch/x86/kernel/process_64.c | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index e498671..cebf240 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -279,6 +279,15 @@ static __always_inline void load_seg_legacy(unsigned short prev_index, } } +static __always_inline void load_fsgs(struct thread_struct *prev, + struct thread_struct *next) +{ + load_seg_legacy(prev->fsindex, prev->fsbase, + next->fsindex, next->fsbase, FS); + load_seg_legacy(prev->gsindex, prev->gsbase, + next->gsindex, next->gsbase, GS); +} + static unsigned long task_seg_base(struct task_struct *task, unsigned short selector) { @@ -588,10 +597,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) if (unlikely(next->ds | prev->ds)) loadsegment(ds, next->ds); - load_seg_legacy(prev->fsindex, prev->fsbase, - next->fsindex, next->fsbase, FS); - load_seg_legacy(prev->gsindex, prev->gsbase, - next->gsindex, next->gsbase, GS); + load_fsgs(prev, next); switch_fpu_finish(next_fpu, cpu); -- 2.7.4