On Tue, May 22, 2018 at 11:05:39AM +0200, Dominik Brodowski wrote:
> Only CPUs which speculate can speculate. Therefore, it seems prudent
> to test for cpu_no_speculation first and only then determine whether
> a specific speculating CPU is susceptible to store bypass speculation.
> This is underlined by all CPUs currently listed in cpu_no_speculation
> were present in cpu_no_spec_store_bypass as well.
> 
> Signed-off-by: Dominik Brodowski <[email protected]>
> ---
>  arch/x86/kernel/cpu/common.c | 18 +++++-------------
>  1 file changed, 5 insertions(+), 13 deletions(-)
> 
> diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
> index 78decc3e3067..2fcc1fbf11b0 100644
> --- a/arch/x86/kernel/cpu/common.c
> +++ b/arch/x86/kernel/cpu/common.c
> @@ -942,12 +942,8 @@ static const __initconst struct x86_cpu_id 
> cpu_no_meltdown[] = {
>       {}
>  };
>  
> +/* Only list CPUs which speculate but are non susceptible to SSB */
>  static const __initconst struct x86_cpu_id cpu_no_spec_store_bypass[] = {
> -     { X86_VENDOR_INTEL,     6,      INTEL_FAM6_ATOM_PINEVIEW        },
> -     { X86_VENDOR_INTEL,     6,      INTEL_FAM6_ATOM_LINCROFT        },
> -     { X86_VENDOR_INTEL,     6,      INTEL_FAM6_ATOM_PENWELL         },
> -     { X86_VENDOR_INTEL,     6,      INTEL_FAM6_ATOM_CLOVERVIEW      },
> -     { X86_VENDOR_INTEL,     6,      INTEL_FAM6_ATOM_CEDARVIEW       },
>       { X86_VENDOR_INTEL,     6,      INTEL_FAM6_ATOM_SILVERMONT1     },
>       { X86_VENDOR_INTEL,     6,      INTEL_FAM6_ATOM_AIRMONT         },
>       { X86_VENDOR_INTEL,     6,      INTEL_FAM6_ATOM_SILVERMONT2     },
> @@ -955,14 +951,10 @@ static const __initconst struct x86_cpu_id 
> cpu_no_spec_store_bypass[] = {
>       { X86_VENDOR_INTEL,     6,      INTEL_FAM6_CORE_YONAH           },
>       { X86_VENDOR_INTEL,     6,      INTEL_FAM6_XEON_PHI_KNL         },
>       { X86_VENDOR_INTEL,     6,      INTEL_FAM6_XEON_PHI_KNM         },
> -     { X86_VENDOR_CENTAUR,   5,                                      },
> -     { X86_VENDOR_INTEL,     5,                                      },
> -     { X86_VENDOR_NSC,       5,                                      },
>       { X86_VENDOR_AMD,       0x12,                                   },
>       { X86_VENDOR_AMD,       0x11,                                   },
>       { X86_VENDOR_AMD,       0x10,                                   },
>       { X86_VENDOR_AMD,       0xf,                                    },
> -     { X86_VENDOR_ANY,       4,                                      },
>       {}
>  };
>  
> @@ -973,16 +965,16 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 
> *c)
>       if (cpu_has(c, X86_FEATURE_ARCH_CAPABILITIES))
>               rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap);

Would it make sense to move that above 'rdmsrl' and the conditional as well
to the logic below?
>  
> -     if (!x86_match_cpu(cpu_no_spec_store_bypass) &&
> -        !(ia32_cap & ARCH_CAP_SSB_NO))
> -             setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
> -
>       if (x86_match_cpu(cpu_no_speculation))
>               return;
>  
>       setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
>       setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
>  
> +     if (!x86_match_cpu(cpu_no_spec_store_bypass) &&
> +        !(ia32_cap & ARCH_CAP_SSB_NO))
> +             setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
> +
>       if (x86_match_cpu(cpu_no_meltdown))
>               return;
>  
> -- 
> 2.17.0
> 

Reply via email to