On Fri, Jul 17, 2020 at 12:20:43AM -0700, ira.we...@intel.com wrote: > diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c > index f362ce0d5ac0..d69250a7c1bf 100644 > --- a/arch/x86/kernel/process.c > +++ b/arch/x86/kernel/process.c > @@ -42,6 +42,7 @@ > #include <asm/spec-ctrl.h> > #include <asm/io_bitmap.h> > #include <asm/proto.h> > +#include <asm/pkeys_internal.h> > > #include "process.h" > > @@ -184,6 +185,36 @@ int copy_thread_tls(unsigned long clone_flags, unsigned > long sp, > return ret; > } > > +/* > + * NOTE: We wrap pks_init_task() and pks_sched_in() with > + * CONFIG_ARCH_HAS_SUPERVISOR_PKEYS because using IS_ENABLED() fails > + * due to the lack of task_struct->saved_pkrs in this configuration. > + * Furthermore, we place them here because of the complexity introduced by > + * header conflicts introduced to get the task_struct definition in the pkeys > + * headers. > + */
I don't see anything much useful in that comment. > +#ifdef CONFIG_ARCH_HAS_SUPERVISOR_PKEYS > +DECLARE_PER_CPU(u32, pkrs_cache); > +static inline void pks_init_task(struct task_struct *tsk) > +{ > + /* New tasks get the most restrictive PKRS value */ > + tsk->thread.saved_pkrs = INIT_PKRS_VALUE; > +} > +static inline void pks_sched_in(void) > +{ > + u64 current_pkrs = current->thread.saved_pkrs; > + > + /* Only update the MSR when current's pkrs is different from the MSR. */ > + if (this_cpu_read(pkrs_cache) == current_pkrs) > + return; > + > + write_pkrs(current_pkrs); Should we write that like: /* * PKRS is only temporarily changed during specific code paths. * Only a preemption during these windows away from the default * value would require updating the MSR. */ if (unlikely(this_cpu_read(pkrs_cache) != current_pkrs)) write_pkrs(current_pkrs); ? > +} > +#else > +static inline void pks_init_task(struct task_struct *tsk) { } > +static inline void pks_sched_in(void) { } > +#endif