Prepare to call it in generic cpu vulnerabilities support.

Signed-off-by: Mian Yousaf Kaukab <ykau...@suse.de>
---
 arch/arm64/include/asm/cpufeature.h | 16 ++++++++++++++++
 arch/arm64/kernel/cpufeature.c      |  9 +--------
 2 files changed, 17 insertions(+), 8 deletions(-)

diff --git a/arch/arm64/include/asm/cpufeature.h 
b/arch/arm64/include/asm/cpufeature.h
index 1717ba1db35d..0b0b5b3e36ba 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -530,6 +530,22 @@ void arm64_set_ssbd_mitigation(bool state);
 static inline void arm64_set_ssbd_mitigation(bool state) {}
 #endif
 
+static inline bool is_cpu_meltdown_safe(void)
+{
+       /* List of CPUs that are not vulnerable and don't need KPTI */
+       static const struct midr_range kpti_safe_list[] = {
+               MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
+               MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
+               { /* sentinel */ }
+       };
+
+       /* Don't force KPTI for CPUs that are not vulnerable */
+       if (is_midr_in_range_list(read_cpuid_id(), kpti_safe_list))
+               return true;
+
+       return false;
+}
+
 #endif /* __ASSEMBLY__ */
 
 #endif
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index e238b7932096..6a94f8bce35a 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -865,12 +865,6 @@ static int __kpti_forced; /* 0: not forced, >0: forced on, 
<0: forced off */
 static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
                                int scope)
 {
-       /* List of CPUs that are not vulnerable and don't need KPTI */
-       static const struct midr_range kpti_safe_list[] = {
-               MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
-               MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
-               { /* sentinel */ }
-       };
        char const *str = "command line option";
 
        /*
@@ -894,8 +888,7 @@ static bool unmap_kernel_at_el0(const struct 
arm64_cpu_capabilities *entry,
        if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))
                return true;
 
-       /* Don't force KPTI for CPUs that are not vulnerable */
-       if (is_midr_in_range_list(read_cpuid_id(), kpti_safe_list))
+       if (is_cpu_meltdown_safe())
                return false;
 
        /* Defer to CPU feature registers */
-- 
2.11.0

Reply via email to