Re: [PATCH 1/6] arm64: kpti: move check for non-vulnerable CPUs to a function

2018-12-13 Thread Jeremy Linton

Hi Julien,

Thanks for looking at this,

On 12/13/2018 03:13 AM, Julien Thierry wrote:

Hi,

On 06/12/2018 23:44, Jeremy Linton wrote:

From: Mian Yousaf Kaukab 

Add is_meltdown_safe() which is a whitelist of known safe cores.

Signed-off-by: Mian Yousaf Kaukab 
[Moved location of function]
Signed-off-by: Jeremy Linton 
---
  arch/arm64/kernel/cpufeature.c | 16 
  1 file changed, 12 insertions(+), 4 deletions(-)

diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index aec5ecb85737..242898395f68 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -908,8 +908,7 @@ has_useable_cnp(const struct arm64_cpu_capabilities *entry, 
int scope)
  #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
  static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */
  
-static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,

-   int scope)
+static bool is_cpu_meltdown_safe(void)
  {
/* List of CPUs that are not vulnerable and don't need KPTI */
static const struct midr_range kpti_safe_list[] = {
@@ -917,6 +916,16 @@ static bool unmap_kernel_at_el0(const struct 
arm64_cpu_capabilities *entry,
MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
{ /* sentinel */ }
};
+   /* Don't force KPTI for CPUs that are not vulnerable */


This is really a nit, but that comment would make more sense where
is_cpu_meltdown_safe() is called since unmap_kernel_at_el0 is the one
deciding whether to apply KPTI, is_cpu_meltdown_safe() just states
whether the core is safe of not.


That is a good point, thanks.




Otherwise:

Reviewed-by: Julien Thierry 

Cheers,

Julien


+   if (is_midr_in_range_list(read_cpuid_id(), kpti_safe_list))
+   return true;
+
+   return false;
+}
+
+static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
+   int scope)
+{
char const *str = "command line option";
  
  	/*

@@ -940,8 +949,7 @@ static bool unmap_kernel_at_el0(const struct 
arm64_cpu_capabilities *entry,
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))
return true;
  
-	/* Don't force KPTI for CPUs that are not vulnerable */

-   if (is_midr_in_range_list(read_cpuid_id(), kpti_safe_list))
+   if (is_cpu_meltdown_safe())
return false;
  
  	/* Defer to CPU feature registers */








Re: [PATCH 1/6] arm64: kpti: move check for non-vulnerable CPUs to a function

2018-12-13 Thread Julien Thierry
Hi,

On 06/12/2018 23:44, Jeremy Linton wrote:
> From: Mian Yousaf Kaukab 
> 
> Add is_meltdown_safe() which is a whitelist of known safe cores.
> 
> Signed-off-by: Mian Yousaf Kaukab 
> [Moved location of function]
> Signed-off-by: Jeremy Linton 
> ---
>  arch/arm64/kernel/cpufeature.c | 16 
>  1 file changed, 12 insertions(+), 4 deletions(-)
> 
> diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
> index aec5ecb85737..242898395f68 100644
> --- a/arch/arm64/kernel/cpufeature.c
> +++ b/arch/arm64/kernel/cpufeature.c
> @@ -908,8 +908,7 @@ has_useable_cnp(const struct arm64_cpu_capabilities 
> *entry, int scope)
>  #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
>  static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */
>  
> -static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
> - int scope)
> +static bool is_cpu_meltdown_safe(void)
>  {
>   /* List of CPUs that are not vulnerable and don't need KPTI */
>   static const struct midr_range kpti_safe_list[] = {
> @@ -917,6 +916,16 @@ static bool unmap_kernel_at_el0(const struct 
> arm64_cpu_capabilities *entry,
>   MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
>   { /* sentinel */ }
>   };
> + /* Don't force KPTI for CPUs that are not vulnerable */

This is really a nit, but that comment would make more sense where
is_cpu_meltdown_safe() is called since unmap_kernel_at_el0 is the one
deciding whether to apply KPTI, is_cpu_meltdown_safe() just states
whether the core is safe of not.

Otherwise:

Reviewed-by: Julien Thierry 

Cheers,

Julien

> + if (is_midr_in_range_list(read_cpuid_id(), kpti_safe_list))
> + return true;
> +
> + return false;
> +}
> +
> +static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
> + int scope)
> +{
>   char const *str = "command line option";
>  
>   /*
> @@ -940,8 +949,7 @@ static bool unmap_kernel_at_el0(const struct 
> arm64_cpu_capabilities *entry,
>   if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))
>   return true;
>  
> - /* Don't force KPTI for CPUs that are not vulnerable */
> - if (is_midr_in_range_list(read_cpuid_id(), kpti_safe_list))
> + if (is_cpu_meltdown_safe())
>   return false;
>  
>   /* Defer to CPU feature registers */
> 

-- 
Julien Thierry


[PATCH 1/6] arm64: kpti: move check for non-vulnerable CPUs to a function

2018-12-06 Thread Jeremy Linton
From: Mian Yousaf Kaukab 

Add is_meltdown_safe() which is a whitelist of known safe cores.

Signed-off-by: Mian Yousaf Kaukab 
[Moved location of function]
Signed-off-by: Jeremy Linton 
---
 arch/arm64/kernel/cpufeature.c | 16 
 1 file changed, 12 insertions(+), 4 deletions(-)

diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index aec5ecb85737..242898395f68 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -908,8 +908,7 @@ has_useable_cnp(const struct arm64_cpu_capabilities *entry, 
int scope)
 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
 static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */
 
-static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
-   int scope)
+static bool is_cpu_meltdown_safe(void)
 {
/* List of CPUs that are not vulnerable and don't need KPTI */
static const struct midr_range kpti_safe_list[] = {
@@ -917,6 +916,16 @@ static bool unmap_kernel_at_el0(const struct 
arm64_cpu_capabilities *entry,
MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
{ /* sentinel */ }
};
+   /* Don't force KPTI for CPUs that are not vulnerable */
+   if (is_midr_in_range_list(read_cpuid_id(), kpti_safe_list))
+   return true;
+
+   return false;
+}
+
+static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
+   int scope)
+{
char const *str = "command line option";
 
/*
@@ -940,8 +949,7 @@ static bool unmap_kernel_at_el0(const struct 
arm64_cpu_capabilities *entry,
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))
return true;
 
-   /* Don't force KPTI for CPUs that are not vulnerable */
-   if (is_midr_in_range_list(read_cpuid_id(), kpti_safe_list))
+   if (is_cpu_meltdown_safe())
return false;
 
/* Defer to CPU feature registers */
-- 
2.17.2



[PATCH 1/6] arm64: kpti: move check for non-vulnerable CPUs to a function

2018-12-06 Thread Jeremy Linton
From: Mian Yousaf Kaukab 

Add is_meltdown_safe() which is a whitelist of known safe cores.

Signed-off-by: Mian Yousaf Kaukab 
[Moved location of function]
Signed-off-by: Jeremy Linton 
---
 arch/arm64/kernel/cpufeature.c | 16 
 1 file changed, 12 insertions(+), 4 deletions(-)

diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index aec5ecb85737..242898395f68 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -908,8 +908,7 @@ has_useable_cnp(const struct arm64_cpu_capabilities *entry, 
int scope)
 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
 static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */
 
-static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
-   int scope)
+static bool is_cpu_meltdown_safe(void)
 {
/* List of CPUs that are not vulnerable and don't need KPTI */
static const struct midr_range kpti_safe_list[] = {
@@ -917,6 +916,16 @@ static bool unmap_kernel_at_el0(const struct 
arm64_cpu_capabilities *entry,
MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
{ /* sentinel */ }
};
+   /* Don't force KPTI for CPUs that are not vulnerable */
+   if (is_midr_in_range_list(read_cpuid_id(), kpti_safe_list))
+   return true;
+
+   return false;
+}
+
+static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
+   int scope)
+{
char const *str = "command line option";
 
/*
@@ -940,8 +949,7 @@ static bool unmap_kernel_at_el0(const struct 
arm64_cpu_capabilities *entry,
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))
return true;
 
-   /* Don't force KPTI for CPUs that are not vulnerable */
-   if (is_midr_in_range_list(read_cpuid_id(), kpti_safe_list))
+   if (is_cpu_meltdown_safe())
return false;
 
/* Defer to CPU feature registers */
-- 
2.17.2



[PATCH 1/6] arm64: kpti: move check for non-vulnerable CPUs to a function

2018-08-07 Thread Mian Yousaf Kaukab
Prepare to call it in generic cpu vulnerabilities support.

Signed-off-by: Mian Yousaf Kaukab 
---
 arch/arm64/include/asm/cpufeature.h | 16 
 arch/arm64/kernel/cpufeature.c  |  9 +
 2 files changed, 17 insertions(+), 8 deletions(-)

diff --git a/arch/arm64/include/asm/cpufeature.h 
b/arch/arm64/include/asm/cpufeature.h
index 1717ba1db35d..0b0b5b3e36ba 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -530,6 +530,22 @@ void arm64_set_ssbd_mitigation(bool state);
 static inline void arm64_set_ssbd_mitigation(bool state) {}
 #endif
 
+static inline bool is_cpu_meltdown_safe(void)
+{
+   /* List of CPUs that are not vulnerable and don't need KPTI */
+   static const struct midr_range kpti_safe_list[] = {
+   MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
+   MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
+   { /* sentinel */ }
+   };
+
+   /* Don't force KPTI for CPUs that are not vulnerable */
+   if (is_midr_in_range_list(read_cpuid_id(), kpti_safe_list))
+   return true;
+
+   return false;
+}
+
 #endif /* __ASSEMBLY__ */
 
 #endif
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index e238b7932096..6a94f8bce35a 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -865,12 +865,6 @@ static int __kpti_forced; /* 0: not forced, >0: forced on, 
<0: forced off */
 static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
int scope)
 {
-   /* List of CPUs that are not vulnerable and don't need KPTI */
-   static const struct midr_range kpti_safe_list[] = {
-   MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
-   MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
-   { /* sentinel */ }
-   };
char const *str = "command line option";
 
/*
@@ -894,8 +888,7 @@ static bool unmap_kernel_at_el0(const struct 
arm64_cpu_capabilities *entry,
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))
return true;
 
-   /* Don't force KPTI for CPUs that are not vulnerable */
-   if (is_midr_in_range_list(read_cpuid_id(), kpti_safe_list))
+   if (is_cpu_meltdown_safe())
return false;
 
/* Defer to CPU feature registers */
-- 
2.11.0



[PATCH 1/6] arm64: kpti: move check for non-vulnerable CPUs to a function

2018-08-07 Thread Mian Yousaf Kaukab
Prepare to call it in generic cpu vulnerabilities support.

Signed-off-by: Mian Yousaf Kaukab 
---
 arch/arm64/include/asm/cpufeature.h | 16 
 arch/arm64/kernel/cpufeature.c  |  9 +
 2 files changed, 17 insertions(+), 8 deletions(-)

diff --git a/arch/arm64/include/asm/cpufeature.h 
b/arch/arm64/include/asm/cpufeature.h
index 1717ba1db35d..0b0b5b3e36ba 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -530,6 +530,22 @@ void arm64_set_ssbd_mitigation(bool state);
 static inline void arm64_set_ssbd_mitigation(bool state) {}
 #endif
 
+static inline bool is_cpu_meltdown_safe(void)
+{
+   /* List of CPUs that are not vulnerable and don't need KPTI */
+   static const struct midr_range kpti_safe_list[] = {
+   MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
+   MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
+   { /* sentinel */ }
+   };
+
+   /* Don't force KPTI for CPUs that are not vulnerable */
+   if (is_midr_in_range_list(read_cpuid_id(), kpti_safe_list))
+   return true;
+
+   return false;
+}
+
 #endif /* __ASSEMBLY__ */
 
 #endif
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index e238b7932096..6a94f8bce35a 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -865,12 +865,6 @@ static int __kpti_forced; /* 0: not forced, >0: forced on, 
<0: forced off */
 static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
int scope)
 {
-   /* List of CPUs that are not vulnerable and don't need KPTI */
-   static const struct midr_range kpti_safe_list[] = {
-   MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
-   MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
-   { /* sentinel */ }
-   };
char const *str = "command line option";
 
/*
@@ -894,8 +888,7 @@ static bool unmap_kernel_at_el0(const struct 
arm64_cpu_capabilities *entry,
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))
return true;
 
-   /* Don't force KPTI for CPUs that are not vulnerable */
-   if (is_midr_in_range_list(read_cpuid_id(), kpti_safe_list))
+   if (is_cpu_meltdown_safe())
return false;
 
/* Defer to CPU feature registers */
-- 
2.11.0