Re: [PATCH v6 5/6] Optionally flush L1D on context switch

2020-05-14 Thread Singh, Balbir
On Wed, 2020-05-13 at 17:27 +0200, Thomas Gleixner wrote:
> CAUTION: This email originated from outside of the organization. Do
> not click links or open attachments unless you can confirm the sender
> and know the content is safe.
> 
> 
> 
> Balbir Singh  writes:
> 
> > Implement a mechanism to selectively flush the L1D cache. The goal
> > is to
> > allow tasks that are paranoid due to the recent snoop assisted data
> > sampling
> > vulnerabilites, to flush their L1D on being switched out.  This
> > protects
> > their data from being snooped or leaked via side channels after the
> > task
> > has context switched out.
> > 
> > There are two scenarios we might want to protect against, a task
> > leaving
> > the CPU with data still in L1D (which is the main concern of this
> > patch),
> > the second scenario is a malicious task coming in (not so well
> > trusted)
> > for which we want to clean up the cache before it starts. Only the
> > case
> > for the former is addressed.
> > 
> > A new thread_info flag TIF_SPEC_FLUSH_L1D is added to track tasks
> > which
> > opt-into L1D flushing. cpu_tlbstate.last_user_mm_spec is used to
> > convert
> > the TIF flags into mm state (per cpu via last_user_mm_spec) in
> > cond_mitigation(), which then used to do decide when to call
> > flush_l1d().
> > 
> > Add prctl()'s to opt-in to the L1D cache on context switch out, the
> > existing mechanisms of tracking prev_mm via cpu_tlbstate is
> > reused to track state of the tasks and to flush the L1D cache.
> > The prctl interface is generic and can be ported over to other
> > architectures.
> > 
> > Suggested-by: Thomas Gleixner 
> > Signed-off-by: Balbir Singh 
> > Reviewed-by: Kees Cook 
> > ---
> >  arch/x86/include/asm/thread_info.h |  7 -
> >  arch/x86/mm/tlb.c  | 44
> > --
> >  include/uapi/linux/prctl.h |  4 +++
> >  kernel/sys.c   | 20 ++
> >  4 files changed, 72 insertions(+), 3 deletions(-)
> > 
> > diff --git a/arch/x86/include/asm/thread_info.h
> > b/arch/x86/include/asm/thread_info.h
> > index 8de8ceccb8bc..67de693d9ba1 100644
> > --- a/arch/x86/include/asm/thread_info.h
> > +++ b/arch/x86/include/asm/thread_info.h
> > @@ -84,7 +84,7 @@ struct thread_info {
> >  #define TIF_SYSCALL_AUDIT7   /* syscall auditing active */
> >  #define TIF_SECCOMP  8   /* secure computing */
> >  #define TIF_SPEC_IB  9   /* Indirect branch speculation
> > mitigation */
> > -#define TIF_SPEC_FORCE_UPDATE10  /* Force speculation
> > MSR update in context switch */
> > +#define TIF_SPEC_FLUSH_L1D   10  /* Flush L1D on mm switches
> > (processes) */
> >  #define TIF_USER_RETURN_NOTIFY   11  /* notify kernel of
> > userspace return */
> >  #define TIF_UPROBE   12  /* breakpointed or
> > singlestepping */
> >  #define TIF_PATCH_PENDING13  /* pending live patching
> > update */
> > @@ -96,6 +96,7 @@ struct thread_info {
> >  #define TIF_MEMDIE   20  /* is terminating due to OOM
> > killer */
> >  #define TIF_POLLING_NRFLAG   21  /* idle is polling for
> > TIF_NEED_RESCHED */
> >  #define TIF_IO_BITMAP22  /* uses I/O bitmap */
> > +#define TIF_SPEC_FORCE_UPDATE23  /* Force speculation
> > MSR update in context switch */
> >  #define TIF_FORCED_TF24  /* true if TF in
> > eflags artificially */
> >  #define TIF_BLOCKSTEP25  /* set when we want
> > DEBUGCTLMSR_BTF */
> >  #define TIF_LAZY_MMU_UPDATES 27  /* task is updating the mmu
> > lazily */
> > @@ -132,6 +133,7 @@ struct thread_info {
> >  #define _TIF_ADDR32  (1 << TIF_ADDR32)
> >  #define _TIF_X32 (1 << TIF_X32)
> >  #define _TIF_FSCHECK (1 << TIF_FSCHECK)
> > +#define _TIF_SPEC_FLUSH_L1D  (1 << TIF_SPEC_FLUSH_L1D)
> 
> Bah. These defines are ordered in the same way as the TIF defines
> 
> >  /*
> > - * Bits to mangle the TIF_SPEC_IB state into the mm pointer which
> > is
> > + * Bits to mangle the TIF_SPEC_* state into the mm pointer which is
> >   * stored in cpu_tlb_state.last_user_mm_spec.
> >   */
> >  #define LAST_USER_MM_IBPB0x1UL
> > -#define LAST_USER_MM_SPEC_MASK   (LAST_USER_MM_IBPB)
> > +#define LAST_USER_MM_L1D_FLUSH   0x2UL
> > +#define LAST_USER_MM_SPEC_MASK   (LAST_USER_MM_IBPB |
> > LAST_USER_MM_L1D_FLUSH)
> 
> You lost
> 
> +   BUILD_BUG_ON(TIF_SPEC_FLUSH_L1D != TIF_SPEC_IB + 1);
> 
> from patch I gave you.


Oops.. I'll fix up both and redo patch 5/6, by splitting it up, into
interface vs flush bits

Thanks,
Balbir Singh.


Re: [PATCH v6 5/6] Optionally flush L1D on context switch

2020-05-14 Thread Thomas Gleixner
Balbir,

"Singh, Balbir"  writes:
> On Wed, 2020-05-13 at 18:16 +0200, Thomas Gleixner wrote:
>> Balbir Singh  writes:
>> But looking at this deeper (yes I should have noticed earlier):
>> 
>> Why do we need yet another PRCTL?
>> 
>> We already have PR_SET_SPECULATION_CTRL/PR_GET_SPECULATION_CTRL. That
>> L1D flush thingy fits into this category, right?
>
> It does, I thought about it for a while when I was changing the code and
> left it aside because, looking at the definition
>
> 1PR_SPEC_ENABLE The speculation feature is enabled,
> mitigation is disabled.
> 2PR_SPEC_DISABLEThe speculation feature is disabled,
> mitigation is enabled.
>
> With L1D flush, there is no overriding of the feature as such (as in
> enable when the mitigation is disabled and vice-versa). I am happy to
> reconsider my initial thought though.

L1D is always enabled as L1D will be a source of trouble forever :)

Thanks,

tglx


Re: [PATCH v6 5/6] Optionally flush L1D on context switch

2020-05-14 Thread Singh, Balbir
On Wed, 2020-05-13 at 17:04 +0200, Thomas Gleixner wrote:
> 
> 
> Balbir Singh  writes:
> > 
> > + if (prev_mm & LAST_USER_MM_L1D_FLUSH)
> > + arch_l1d_flush(0); /* Just flush, don't populate the
> > TLB */
> 
> Bah. I fundamentally hate tail comments. They are just disturbing the
> reading flow. Aside of that, this states the WHAT but not the WHY. And
> if you add that explanation then you need more than 20 characters and
> end up with
> 
> if (prev_mm & LAST_USER_MM_L1D_FLUSH) {
> /*
>  * Proper comment explaining why this is flushing
>  * without prepopulating the TLB.
>  */
> arch_l1d_flush(0);
> }
> 

I added a comment due to the use of 0, 0 is usually seen as true or
false and I wanted to add some comments in there to indicate we don't
populate the TLB, the reason we don't do it is, I don't think we need
to. I am happy to revisit the placement of the comment.

> anyway. And even for a short comment which fits after the function
> call
> it's way better to have:
> 
> if (prev_mm & LAST_USER_MM_L1D_FLUSH) {
> /* Short explanation */
> arch_l1d_flush(0);
> }
> 
> Hmm?
> 
> > + /*
> > +  * Leave last_user_mm_spec at LAST_USER_MM_IBPB, we don't
> > +  * want to set LAST_USER_MM_L1D_FLUSH and force a flush before
> > +  * we've allocated the flush pages.
> 
> Ah here is the comment. I still like the explicit define for the (re)
> init.
> 

I saw your tree and it sounds like you fixed it up in there in patch 3.

Balbir Singh.



Re: [PATCH v6 5/6] Optionally flush L1D on context switch

2020-05-14 Thread Singh, Balbir
On Wed, 2020-05-13 at 18:16 +0200, Thomas Gleixner wrote:
> Balbir Singh  writes:
> 
> This part:
> 
> > --- a/include/uapi/linux/prctl.h
> > +++ b/include/uapi/linux/prctl.h
> > @@ -238,4 +238,8 @@ struct prctl_mm_map {
> >  #define PR_SET_IO_FLUSHER57
> >  #define PR_GET_IO_FLUSHER58
> > 
> > +/* Flush L1D on context switch (mm) */
> > +#define PR_SET_L1D_FLUSH 59
> > +#define PR_GET_L1D_FLUSH 60
> 
> ...
> 
> > @@ -2514,6 +2524,16 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned
> > long, arg2, unsigned long, arg3,
> > 
> >   error = (current->flags & PR_IO_FLUSHER) ==
> > PR_IO_FLUSHER;
> >   break;
> > + case PR_SET_L1D_FLUSH:
> > + if (arg3 || arg4 || arg5)
> > + return -EINVAL;
> > + error = arch_prctl_l1d_flush_set(me, arg2);
> > + break;
> > + case PR_GET_L1D_FLUSH:
> > + if (arg2 || arg3 || arg4 || arg5)
> > + return -EINVAL;
> > + error = arch_prctl_l1d_flush_get(me);
> > + break;
> >   default:
> >   error = -EINVAL;
> >   break;
> 
> wants to be split into a separate patch, really. Then we get a proper
> subject lines with proper subsystem prefixes. This part also lacks a
> description in Documentation/userspace-api/ and function prototypes
> for
> the arch_prctl* functions.
> 
> But looking at this deeper (yes I should have noticed earlier):
> 
> Why do we need yet another PRCTL?
> 
> We already have PR_SET_SPECULATION_CTRL/PR_GET_SPECULATION_CTRL. That
> L1D flush thingy fits into this category, right?

It does, I thought about it for a while when I was changing the code and
left it aside because, looking at the definition

1PR_SPEC_ENABLE The speculation feature is enabled,
mitigation is disabled.
2PR_SPEC_DISABLEThe speculation feature is disabled,
mitigation is enabled.

With L1D flush, there is no overriding of the feature as such (as in
enable when the mitigation is disabled and vice-versa). I am happy to
reconsider my initial thought though.


> 
> This makes even more sense if you think about the second use case for
> L1D flush, i.e. the flush when a untrusted task comes in. If we ever
> want to support that case then this will be imposed by seccomp and
> then
> we'd need yet another interface there.
> 

Yep, I see what you mean


> And for this reason we should also name that current opt-in thingy:
> L1D_FLUSH_OUT in the prctl and also for the TIF bits.
> 
> Hmm? Kees?
> 
> I've applied the first 4 patches to:
> 
>   git://git.kernel.org/pub/scm/linux/kernel/git/tglx/devel.git x86/mm
> 
> so the polishing I did gets preserved and you don't have to resend the
> whole pile.
> 

Thanks, I think your change to patch 6 makes sense as well. Let me
respin this based on what you think of the argument above

Balbir



Re: [PATCH v6 5/6] Optionally flush L1D on context switch

2020-05-13 Thread Thomas Gleixner
Balbir Singh  writes:

This part:

> --- a/include/uapi/linux/prctl.h
> +++ b/include/uapi/linux/prctl.h
> @@ -238,4 +238,8 @@ struct prctl_mm_map {
>  #define PR_SET_IO_FLUSHER57
>  #define PR_GET_IO_FLUSHER58
>  
> +/* Flush L1D on context switch (mm) */
> +#define PR_SET_L1D_FLUSH 59
> +#define PR_GET_L1D_FLUSH 60

...

> @@ -2514,6 +2524,16 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, 
> arg2, unsigned long, arg3,
>  
>   error = (current->flags & PR_IO_FLUSHER) == PR_IO_FLUSHER;
>   break;
> + case PR_SET_L1D_FLUSH:
> + if (arg3 || arg4 || arg5)
> + return -EINVAL;
> + error = arch_prctl_l1d_flush_set(me, arg2);
> + break;
> + case PR_GET_L1D_FLUSH:
> + if (arg2 || arg3 || arg4 || arg5)
> + return -EINVAL;
> + error = arch_prctl_l1d_flush_get(me);
> + break;
>   default:
>   error = -EINVAL;
>   break;

wants to be split into a separate patch, really. Then we get a proper
subject lines with proper subsystem prefixes. This part also lacks a
description in Documentation/userspace-api/ and function prototypes for
the arch_prctl* functions.

But looking at this deeper (yes I should have noticed earlier):

Why do we need yet another PRCTL?

We already have PR_SET_SPECULATION_CTRL/PR_GET_SPECULATION_CTRL. That
L1D flush thingy fits into this category, right?

This makes even more sense if you think about the second use case for
L1D flush, i.e. the flush when a untrusted task comes in. If we ever
want to support that case then this will be imposed by seccomp and then
we'd need yet another interface there.

And for this reason we should also name that current opt-in thingy:
L1D_FLUSH_OUT in the prctl and also for the TIF bits.

Hmm? Kees?

I've applied the first 4 patches to:

  git://git.kernel.org/pub/scm/linux/kernel/git/tglx/devel.git x86/mm

so the polishing I did gets preserved and you don't have to resend the
whole pile.

Thanks,

tglx


Re: [PATCH v6 5/6] Optionally flush L1D on context switch

2020-05-13 Thread Thomas Gleixner
Balbir Singh  writes:

> Implement a mechanism to selectively flush the L1D cache. The goal is to
> allow tasks that are paranoid due to the recent snoop assisted data sampling
> vulnerabilites, to flush their L1D on being switched out.  This protects
> their data from being snooped or leaked via side channels after the task
> has context switched out.
>
> There are two scenarios we might want to protect against, a task leaving
> the CPU with data still in L1D (which is the main concern of this patch),
> the second scenario is a malicious task coming in (not so well trusted)
> for which we want to clean up the cache before it starts. Only the case
> for the former is addressed.
>
> A new thread_info flag TIF_SPEC_FLUSH_L1D is added to track tasks which
> opt-into L1D flushing. cpu_tlbstate.last_user_mm_spec is used to convert
> the TIF flags into mm state (per cpu via last_user_mm_spec) in
> cond_mitigation(), which then used to do decide when to call flush_l1d().
>
> Add prctl()'s to opt-in to the L1D cache on context switch out, the
> existing mechanisms of tracking prev_mm via cpu_tlbstate is
> reused to track state of the tasks and to flush the L1D cache.
> The prctl interface is generic and can be ported over to other
> architectures.
>
> Suggested-by: Thomas Gleixner 
> Signed-off-by: Balbir Singh 
> Reviewed-by: Kees Cook 
> ---
>  arch/x86/include/asm/thread_info.h |  7 -
>  arch/x86/mm/tlb.c  | 44 --
>  include/uapi/linux/prctl.h |  4 +++
>  kernel/sys.c   | 20 ++
>  4 files changed, 72 insertions(+), 3 deletions(-)
>
> diff --git a/arch/x86/include/asm/thread_info.h 
> b/arch/x86/include/asm/thread_info.h
> index 8de8ceccb8bc..67de693d9ba1 100644
> --- a/arch/x86/include/asm/thread_info.h
> +++ b/arch/x86/include/asm/thread_info.h
> @@ -84,7 +84,7 @@ struct thread_info {
>  #define TIF_SYSCALL_AUDIT7   /* syscall auditing active */
>  #define TIF_SECCOMP  8   /* secure computing */
>  #define TIF_SPEC_IB  9   /* Indirect branch speculation 
> mitigation */
> -#define TIF_SPEC_FORCE_UPDATE10  /* Force speculation MSR update 
> in context switch */
> +#define TIF_SPEC_FLUSH_L1D   10  /* Flush L1D on mm switches (processes) 
> */
>  #define TIF_USER_RETURN_NOTIFY   11  /* notify kernel of userspace 
> return */
>  #define TIF_UPROBE   12  /* breakpointed or singlestepping */
>  #define TIF_PATCH_PENDING13  /* pending live patching update */
> @@ -96,6 +96,7 @@ struct thread_info {
>  #define TIF_MEMDIE   20  /* is terminating due to OOM killer */
>  #define TIF_POLLING_NRFLAG   21  /* idle is polling for TIF_NEED_RESCHED 
> */
>  #define TIF_IO_BITMAP22  /* uses I/O bitmap */
> +#define TIF_SPEC_FORCE_UPDATE23  /* Force speculation MSR update 
> in context switch */
>  #define TIF_FORCED_TF24  /* true if TF in eflags 
> artificially */
>  #define TIF_BLOCKSTEP25  /* set when we want 
> DEBUGCTLMSR_BTF */
>  #define TIF_LAZY_MMU_UPDATES 27  /* task is updating the mmu lazily */
> @@ -132,6 +133,7 @@ struct thread_info {
>  #define _TIF_ADDR32  (1 << TIF_ADDR32)
>  #define _TIF_X32 (1 << TIF_X32)
>  #define _TIF_FSCHECK (1 << TIF_FSCHECK)
> +#define _TIF_SPEC_FLUSH_L1D  (1 << TIF_SPEC_FLUSH_L1D)

Bah. These defines are ordered in the same way as the TIF defines

>  /*
> - * Bits to mangle the TIF_SPEC_IB state into the mm pointer which is
> + * Bits to mangle the TIF_SPEC_* state into the mm pointer which is
>   * stored in cpu_tlb_state.last_user_mm_spec.
>   */
>  #define LAST_USER_MM_IBPB0x1UL
> -#define LAST_USER_MM_SPEC_MASK   (LAST_USER_MM_IBPB)
> +#define LAST_USER_MM_L1D_FLUSH   0x2UL
> +#define LAST_USER_MM_SPEC_MASK   (LAST_USER_MM_IBPB | 
> LAST_USER_MM_L1D_FLUSH)

You lost

+   BUILD_BUG_ON(TIF_SPEC_FLUSH_L1D != TIF_SPEC_IB + 1);

from patch I gave you.


Re: [PATCH v6 5/6] Optionally flush L1D on context switch

2020-05-13 Thread Thomas Gleixner
Balbir Singh  writes:
>  
> + if (prev_mm & LAST_USER_MM_L1D_FLUSH)
> + arch_l1d_flush(0); /* Just flush, don't populate the TLB */

Bah. I fundamentally hate tail comments. They are just disturbing the
reading flow. Aside of that, this states the WHAT but not the WHY. And
if you add that explanation then you need more than 20 characters and
end up with

if (prev_mm & LAST_USER_MM_L1D_FLUSH) {
/*
 * Proper comment explaining why this is flushing
 * without prepopulating the TLB.
 */
arch_l1d_flush(0);
}

anyway. And even for a short comment which fits after the function call
it's way better to have:

if (prev_mm & LAST_USER_MM_L1D_FLUSH) {
/* Short explanation */
arch_l1d_flush(0);
}

Hmm?

> + /*
> +  * Leave last_user_mm_spec at LAST_USER_MM_IBPB, we don't
> +  * want to set LAST_USER_MM_L1D_FLUSH and force a flush before
> +  * we've allocated the flush pages.

Ah here is the comment. I still like the explicit define for the (re)
init.



[PATCH v6 5/6] Optionally flush L1D on context switch

2020-05-09 Thread Balbir Singh
Implement a mechanism to selectively flush the L1D cache. The goal is to
allow tasks that are paranoid due to the recent snoop assisted data sampling
vulnerabilites, to flush their L1D on being switched out.  This protects
their data from being snooped or leaked via side channels after the task
has context switched out.

There are two scenarios we might want to protect against, a task leaving
the CPU with data still in L1D (which is the main concern of this patch),
the second scenario is a malicious task coming in (not so well trusted)
for which we want to clean up the cache before it starts. Only the case
for the former is addressed.

A new thread_info flag TIF_SPEC_FLUSH_L1D is added to track tasks which
opt-into L1D flushing. cpu_tlbstate.last_user_mm_spec is used to convert
the TIF flags into mm state (per cpu via last_user_mm_spec) in
cond_mitigation(), which then used to do decide when to call flush_l1d().

Add prctl()'s to opt-in to the L1D cache on context switch out, the
existing mechanisms of tracking prev_mm via cpu_tlbstate is
reused to track state of the tasks and to flush the L1D cache.
The prctl interface is generic and can be ported over to other
architectures.

Suggested-by: Thomas Gleixner 
Signed-off-by: Balbir Singh 
Reviewed-by: Kees Cook 
---
 arch/x86/include/asm/thread_info.h |  7 -
 arch/x86/mm/tlb.c  | 44 --
 include/uapi/linux/prctl.h |  4 +++
 kernel/sys.c   | 20 ++
 4 files changed, 72 insertions(+), 3 deletions(-)

diff --git a/arch/x86/include/asm/thread_info.h 
b/arch/x86/include/asm/thread_info.h
index 8de8ceccb8bc..67de693d9ba1 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -84,7 +84,7 @@ struct thread_info {
 #define TIF_SYSCALL_AUDIT  7   /* syscall auditing active */
 #define TIF_SECCOMP8   /* secure computing */
 #define TIF_SPEC_IB9   /* Indirect branch speculation 
mitigation */
-#define TIF_SPEC_FORCE_UPDATE  10  /* Force speculation MSR update in 
context switch */
+#define TIF_SPEC_FLUSH_L1D 10  /* Flush L1D on mm switches (processes) 
*/
 #define TIF_USER_RETURN_NOTIFY 11  /* notify kernel of userspace return */
 #define TIF_UPROBE 12  /* breakpointed or singlestepping */
 #define TIF_PATCH_PENDING  13  /* pending live patching update */
@@ -96,6 +96,7 @@ struct thread_info {
 #define TIF_MEMDIE 20  /* is terminating due to OOM killer */
 #define TIF_POLLING_NRFLAG 21  /* idle is polling for TIF_NEED_RESCHED 
*/
 #define TIF_IO_BITMAP  22  /* uses I/O bitmap */
+#define TIF_SPEC_FORCE_UPDATE  23  /* Force speculation MSR update in 
context switch */
 #define TIF_FORCED_TF  24  /* true if TF in eflags artificially */
 #define TIF_BLOCKSTEP  25  /* set when we want DEBUGCTLMSR_BTF */
 #define TIF_LAZY_MMU_UPDATES   27  /* task is updating the mmu lazily */
@@ -132,6 +133,7 @@ struct thread_info {
 #define _TIF_ADDR32(1 << TIF_ADDR32)
 #define _TIF_X32   (1 << TIF_X32)
 #define _TIF_FSCHECK   (1 << TIF_FSCHECK)
+#define _TIF_SPEC_FLUSH_L1D(1 << TIF_SPEC_FLUSH_L1D)
 
 /* Work to do before invoking the actual syscall. */
 #define _TIF_WORK_SYSCALL_ENTRY\
@@ -235,6 +237,9 @@ static inline int arch_within_stack_frames(const void * 
const stack,
   current_thread_info()->status & TS_COMPAT)
 #endif
 
+extern int arch_prctl_l1d_flush_set(struct task_struct *tsk, unsigned long 
enable);
+extern int arch_prctl_l1d_flush_get(struct task_struct *tsk);
+
 extern void arch_task_cache_init(void);
 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct 
*src);
 extern void arch_release_task_struct(struct task_struct *tsk);
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 10056b8d8f01..7ea9bc9e089f 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -13,6 +13,7 @@
 #include 
 #include 
 #include 
+#include 
 #include 
 #include 
 
@@ -43,11 +44,12 @@
  */
 
 /*
- * Bits to mangle the TIF_SPEC_IB state into the mm pointer which is
+ * Bits to mangle the TIF_SPEC_* state into the mm pointer which is
  * stored in cpu_tlb_state.last_user_mm_spec.
  */
 #define LAST_USER_MM_IBPB  0x1UL
-#define LAST_USER_MM_SPEC_MASK (LAST_USER_MM_IBPB)
+#define LAST_USER_MM_L1D_FLUSH 0x2UL
+#define LAST_USER_MM_SPEC_MASK (LAST_USER_MM_IBPB | LAST_USER_MM_L1D_FLUSH)
 
 /*
  * The x86 feature is called PCID (Process Context IDentifier). It is similar
@@ -308,6 +310,35 @@ void leave_mm(int cpu)
 }
 EXPORT_SYMBOL_GPL(leave_mm);
 
+static int enable_l1d_flush_for_task(struct task_struct *tsk)
+{
+   int ret = l1d_flush_init_once();
+
+   if (ret < 0)
+   return ret;
+
+   set_ti_thread_flag(&tsk->thread_info, TIF_SPEC_FLUSH_L1D);
+   return ret;
+}
+
+static int disable_l1d_flush_for_task(struct t