Sorry, forgot to CC +Tom Lendacky . Please take a look Tom, thanks.

On Wed, Nov 11, 2020 at 4:10 PM Joel Fernandes (Google)
<[email protected]> wrote:
>
> Some hardware such as certain AMD variants don't have cross-HT MDS/L1TF
> issues. Detect this and don't enable core scheduling as it can
> needlessly slow the device done.
>
> Signed-off-by: Joel Fernandes (Google) <[email protected]>
> ---
>  arch/x86/kernel/cpu/bugs.c | 8 ++++++++
>  kernel/sched/core.c        | 7 +++++++
>  kernel/sched/sched.h       | 5 +++++
>  3 files changed, 20 insertions(+)
>
> diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
> index dece79e4d1e9..0e6e61e49b23 100644
> --- a/arch/x86/kernel/cpu/bugs.c
> +++ b/arch/x86/kernel/cpu/bugs.c
> @@ -152,6 +152,14 @@ void __init check_bugs(void)
>  #endif
>  }
>
> +/*
> + * Do not need core scheduling if CPU does not have MDS/L1TF vulnerability.
> + */
> +int arch_allow_core_sched(void)
> +{
> +       return boot_cpu_has_bug(X86_BUG_MDS) || 
> boot_cpu_has_bug(X86_BUG_L1TF);
> +}
> +
>  void
>  x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool 
> setguest)
>  {
> diff --git a/kernel/sched/core.c b/kernel/sched/core.c
> index 64c559192634..c6158b4959fe 100644
> --- a/kernel/sched/core.c
> +++ b/kernel/sched/core.c
> @@ -319,6 +319,13 @@ static void __sched_core_enable(void)
>         for_each_online_cpu(cpu)
>                 BUG_ON(!sched_core_empty(cpu_rq(cpu)));
>
> +       /*
> +        * Some architectures may not want coresched. (ex, AMD does not have
> +        * MDS/L1TF issues so it wants SMT completely on).
> +        */
> +       if (!arch_allow_core_sched())
> +               return;
> +
>         static_branch_enable(&__sched_core_enabled);
>         stop_machine(__sched_core_stopper, (void *)true, NULL);
>
> diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
> index 3cf08c77b678..a1b39764a6ed 100644
> --- a/kernel/sched/sched.h
> +++ b/kernel/sched/sched.h
> @@ -1203,6 +1203,11 @@ int cpu_core_tag_color_write_u64(struct 
> cgroup_subsys_state *css,
>
>  bool cfs_prio_less(struct task_struct *a, struct task_struct *b, bool fi);
>
> +int __weak arch_allow_core_sched(void)
> +{
> +       return true;
> +}
> +
>  #else /* !CONFIG_SCHED_CORE */
>
>  static inline bool sched_core_enqueued(struct task_struct *task) { return 
> false; }
> --
> 2.29.2.222.g5d2a92d10f8-goog
>

Reply via email to