Script 'mail_helper' called by obssrc
Hello community,

here is the log from the commit of package xen for openSUSE:Factory checked in 
at 2022-03-16 21:30:16
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/xen (Old)
 and      /work/SRC/openSUSE:Factory/.xen.new.25692 (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Package is "xen"

Wed Mar 16 21:30:16 2022 rev:314 rq:961753 version:4.16.0_06

Changes:
--------
--- /work/SRC/openSUSE:Factory/xen/xen.changes  2022-03-05 14:44:42.331720428 
+0100
+++ /work/SRC/openSUSE:Factory/.xen.new.25692/xen.changes       2022-03-16 
21:30:17.503390239 +0100
@@ -1,0 +2,12 @@
+Mon Mar 14 10:14:00 CET 2022 - jbeul...@suse.com
+
+- bsc#1196915 - VUL-0: CVE-2022-0001, CVE-2022-0002,CVE-2021-26401:
+  xen: BHB speculation issues (XSA-398)
+  62278667-Arm-introduce-new-processors.patch
+  62278668-Arm-move-errata-CSV2-check-earlier.patch
+  62278669-Arm-add-ECBHB-and-CLEARBHB-ID-fields.patch
+  6227866a-Arm-Spectre-BHB-handling.patch
+  6227866b-Arm-allow-SMCCC_ARCH_WORKAROUND_3-use.patch
+  6227866c-x86-AMD-cease-using-thunk-lfence.patch
+
+-------------------------------------------------------------------

New:
----
  62278667-Arm-introduce-new-processors.patch
  62278668-Arm-move-errata-CSV2-check-earlier.patch
  62278669-Arm-add-ECBHB-and-CLEARBHB-ID-fields.patch
  6227866a-Arm-Spectre-BHB-handling.patch
  6227866b-Arm-allow-SMCCC_ARCH_WORKAROUND_3-use.patch
  6227866c-x86-AMD-cease-using-thunk-lfence.patch

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Other differences:
------------------
++++++ xen.spec ++++++
--- /var/tmp/diff_new_pack.ZyhYrp/_old  2022-03-16 21:30:19.539391781 +0100
+++ /var/tmp/diff_new_pack.ZyhYrp/_new  2022-03-16 21:30:19.543391785 +0100
@@ -189,6 +189,12 @@
 Patch32:        6202afa5-x86-TSX-cope-with-deprecation-on-WHL-R-CFL-R.patch
 Patch33:        6202afa7-x86-CPUID-leaf-7-2-EDX-infra.patch
 Patch34:        6202afa8-x86-Intel-PSFD-for-guests.patch
+Patch35:        62278667-Arm-introduce-new-processors.patch
+Patch36:        62278668-Arm-move-errata-CSV2-check-earlier.patch
+Patch37:        62278669-Arm-add-ECBHB-and-CLEARBHB-ID-fields.patch
+Patch38:        6227866a-Arm-Spectre-BHB-handling.patch
+Patch39:        6227866b-Arm-allow-SMCCC_ARCH_WORKAROUND_3-use.patch
+Patch40:        6227866c-x86-AMD-cease-using-thunk-lfence.patch
 # EMBARGOED security fixes
 # libxc
 Patch301:       libxc-bitmap-long.patch

++++++ 62278667-Arm-introduce-new-processors.patch ++++++
# Commit 35d1b85a6b43483f6bd007d48757434e54743e98
# Date 2022-03-08 16:38:02 +0000
# Author Bertrand Marquis <bertrand.marq...@arm.com>
# Committer Andrew Cooper <andrew.coop...@citrix.com>
xen/arm: Introduce new Arm processors

Add some new processor identifiers in processor.h and sync Xen
definitions with status of Linux 5.17 (declared in
arch/arm64/include/asm/cputype.h).

This is part of XSA-398 / CVE-2022-23960.

Signed-off-by: Bertrand Marquis <bertrand.marq...@arm.com>
Acked-by: Julien Grall <jul...@xen.org>

--- a/xen/include/asm-arm/processor.h
+++ b/xen/include/asm-arm/processor.h
@@ -65,6 +65,7 @@
 #define ARM_CPU_PART_CORTEX_A17     0xC0E
 #define ARM_CPU_PART_CORTEX_A15     0xC0F
 #define ARM_CPU_PART_CORTEX_A53     0xD03
+#define ARM_CPU_PART_CORTEX_A35     0xD04
 #define ARM_CPU_PART_CORTEX_A55     0xD05
 #define ARM_CPU_PART_CORTEX_A57     0xD07
 #define ARM_CPU_PART_CORTEX_A72     0xD08
@@ -72,11 +73,20 @@
 #define ARM_CPU_PART_CORTEX_A75     0xD0A
 #define ARM_CPU_PART_CORTEX_A76     0xD0B
 #define ARM_CPU_PART_NEOVERSE_N1    0xD0C
+#define ARM_CPU_PART_CORTEX_A77     0xD0D
+#define ARM_CPU_PART_NEOVERSE_V1    0xD40
+#define ARM_CPU_PART_CORTEX_A78     0xD41
+#define ARM_CPU_PART_CORTEX_X1      0xD44
+#define ARM_CPU_PART_CORTEX_A710    0xD47
+#define ARM_CPU_PART_CORTEX_X2      0xD48
+#define ARM_CPU_PART_NEOVERSE_N2    0xD49
+#define ARM_CPU_PART_CORTEX_A78C    0xD4B
 
 #define MIDR_CORTEX_A12 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, 
ARM_CPU_PART_CORTEX_A12)
 #define MIDR_CORTEX_A17 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, 
ARM_CPU_PART_CORTEX_A17)
 #define MIDR_CORTEX_A15 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, 
ARM_CPU_PART_CORTEX_A15)
 #define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, 
ARM_CPU_PART_CORTEX_A53)
+#define MIDR_CORTEX_A35 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, 
ARM_CPU_PART_CORTEX_A35)
 #define MIDR_CORTEX_A55 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, 
ARM_CPU_PART_CORTEX_A55)
 #define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, 
ARM_CPU_PART_CORTEX_A57)
 #define MIDR_CORTEX_A72 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, 
ARM_CPU_PART_CORTEX_A72)
@@ -84,6 +94,14 @@
 #define MIDR_CORTEX_A75 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, 
ARM_CPU_PART_CORTEX_A75)
 #define MIDR_CORTEX_A76 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, 
ARM_CPU_PART_CORTEX_A76)
 #define MIDR_NEOVERSE_N1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, 
ARM_CPU_PART_NEOVERSE_N1)
+#define MIDR_CORTEX_A77 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, 
ARM_CPU_PART_CORTEX_A77)
+#define MIDR_NEOVERSE_V1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, 
ARM_CPU_PART_NEOVERSE_V1)
+#define MIDR_CORTEX_A78 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, 
ARM_CPU_PART_CORTEX_A78)
+#define MIDR_CORTEX_X1  MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X1)
+#define MIDR_CORTEX_A710 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, 
ARM_CPU_PART_CORTEX_A710)
+#define MIDR_CORTEX_X2  MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X2)
+#define MIDR_NEOVERSE_N2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, 
ARM_CPU_PART_NEOVERSE_N2)
+#define MIDR_CORTEX_A78C MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, 
ARM_CPU_PART_CORTEX_A78C)
 
 /* MPIDR Multiprocessor Affinity Register */
 #define _MPIDR_UP           (30)

++++++ 62278668-Arm-move-errata-CSV2-check-earlier.patch ++++++
# Commit 599616d70eb886b9ad0ef9d6b51693ce790504ba
# Date 2022-03-08 16:38:02 +0000
# Author Bertrand Marquis <bertrand.marq...@arm.com>
# Committer Andrew Cooper <andrew.coop...@citrix.com>
xen/arm: move errata CSV2 check earlier

CSV2 availability check is done after printing to the user that
workaround 1 will be used. Move the check before to prevent saying to the
user that workaround 1 is used when it is not because it is not needed.
This will also allow to reuse install_bp_hardening_vec function for
other use cases.

Code previously returning "true", now returns "0" to conform to
enable_smccc_arch_workaround_1 returning an int and surrounding code
doing a "return 0" if workaround is not needed.

This is part of XSA-398 / CVE-2022-23960.

Signed-off-by: Bertrand Marquis <bertrand.marq...@arm.com>
Reviewed-by: Julien Grall <jul...@xen.org>

--- a/xen/arch/arm/cpuerrata.c
+++ b/xen/arch/arm/cpuerrata.c
@@ -103,13 +103,6 @@ install_bp_hardening_vec(const struct ar
     printk(XENLOG_INFO "CPU%u will %s on exception entry\n",
            smp_processor_id(), desc);
 
-    /*
-     * No need to install hardened vector when the processor has
-     * ID_AA64PRF0_EL1.CSV2 set.
-     */
-    if ( cpu_data[smp_processor_id()].pfr64.csv2 )
-        return true;
-
     spin_lock(&bp_lock);
 
     /*
@@ -167,6 +160,13 @@ static int enable_smccc_arch_workaround_
     if ( !entry->matches(entry) )
         return 0;
 
+    /*
+     * No need to install hardened vector when the processor has
+     * ID_AA64PRF0_EL1.CSV2 set.
+     */
+    if ( cpu_data[smp_processor_id()].pfr64.csv2 )
+        return 0;
+
     if ( smccc_ver < SMCCC_VERSION(1, 1) )
         goto warn;
 

++++++ 62278669-Arm-add-ECBHB-and-CLEARBHB-ID-fields.patch ++++++
# Commit 4b68d12d98b8790d8002fcc2c25a9d713374a4d7
# Date 2022-03-08 16:38:02 +0000
# Author Bertrand Marquis <bertrand.marq...@arm.com>
# Committer Andrew Cooper <andrew.coop...@citrix.com>
xen/arm: Add ECBHB and CLEARBHB ID fields

Introduce ID coprocessor register ID_AA64ISAR2_EL1.
Add definitions in cpufeature and sysregs of ECBHB field in mmfr1 and
CLEARBHB in isar2 ID coprocessor registers.

This is part of XSA-398 / CVE-2022-23960.

Signed-off-by: Bertrand Marquis <bertrand.marq...@arm.com>
Acked-by: Julien Grall <jul...@xen.org>

--- a/xen/arch/arm/cpufeature.c
+++ b/xen/arch/arm/cpufeature.c
@@ -122,6 +122,7 @@ void identify_cpu(struct cpuinfo_arm *c)
 
     c->isa64.bits[0] = READ_SYSREG(ID_AA64ISAR0_EL1);
     c->isa64.bits[1] = READ_SYSREG(ID_AA64ISAR1_EL1);
+    c->isa64.bits[2] = READ_SYSREG(ID_AA64ISAR2_EL1);
 
     c->zfr64.bits[0] = READ_SYSREG(ID_AA64ZFR0_EL1);
 
--- a/xen/include/asm-arm/arm64/sysregs.h
+++ b/xen/include/asm-arm/arm64/sysregs.h
@@ -84,6 +84,9 @@
 #ifndef ID_DFR1_EL1
 #define ID_DFR1_EL1                 S3_0_C0_C3_5
 #endif
+#ifndef ID_AA64ISAR2_EL1
+#define ID_AA64ISAR2_EL1            S3_0_C0_C6_2
+#endif
 
 /* ID registers (imported from arm64/include/asm/sysreg.h in Linux) */
 
@@ -139,6 +142,9 @@
 #define ID_AA64ISAR1_GPI_NI                     0x0
 #define ID_AA64ISAR1_GPI_IMP_DEF                0x1
 
+/* id_aa64isar2 */
+#define ID_AA64ISAR2_CLEARBHB_SHIFT 28
+
 /* id_aa64pfr0 */
 #define ID_AA64PFR0_CSV3_SHIFT       60
 #define ID_AA64PFR0_CSV2_SHIFT       56
@@ -232,6 +238,7 @@
 #define ID_AA64MMFR0_PARANGE_52        0x6
 
 /* id_aa64mmfr1 */
+#define ID_AA64MMFR1_ECBHB_SHIFT     60
 #define ID_AA64MMFR1_ETS_SHIFT       36
 #define ID_AA64MMFR1_TWED_SHIFT      32
 #define ID_AA64MMFR1_XNX_SHIFT       28
--- a/xen/include/asm-arm/cpufeature.h
+++ b/xen/include/asm-arm/cpufeature.h
@@ -243,14 +243,15 @@ struct cpuinfo_arm {
             unsigned long lo:4;
             unsigned long pan:4;
             unsigned long __res1:8;
-            unsigned long __res2:32;
+            unsigned long __res2:28;
+            unsigned long ecbhb:4;
 
             unsigned long __res3:64;
         };
     } mm64;
 
     union {
-        register_t bits[2];
+        register_t bits[3];
         struct {
             /* ISAR0 */
             unsigned long __res0:4;
@@ -286,6 +287,12 @@ struct cpuinfo_arm {
             unsigned long dgh:4;
             unsigned long i8mm:4;
             unsigned long __res2:8;
+
+            /* ISAR2 */
+            unsigned long __res3:28;
+            unsigned long clearbhb:4;
+
+            unsigned long __res4:32;
         };
     } isa64;
 

++++++ 6227866a-Arm-Spectre-BHB-handling.patch ++++++
# Commit 62c91eb66a2904eefb1d1d9642e3697a1e3c3a3c
# Date 2022-03-08 16:38:02 +0000
# Author Rahul Singh <rahul.si...@arm.com>
# Committer Andrew Cooper <andrew.coop...@citrix.com>
xen/arm: Add Spectre BHB handling

This commit is adding Spectre BHB handling to Xen on Arm.
The commit is introducing new alternative code to be executed during
exception entry:
- SMCC workaround 3 call
- loop workaround (with 8, 24 or 32 iterations)
- use of new clearbhb instruction

Cpuerrata is modified by this patch to apply the required workaround for
CPU affected by Spectre BHB when CONFIG_ARM64_HARDEN_BRANCH_PREDICTOR is
enabled.

To do this the system previously used to apply smcc workaround 1 is
reused and new alternative code to be copied in the exception handler is
introduced.

To define the type of workaround required by a processor, 4 new cpu
capabilities are introduced (for each number of loop and for smcc
workaround 3).

When a processor is affected, enable_spectre_bhb_workaround is called
and if the processor does not have CSV2 set to 3 or ECBHB feature (which
would mean that the processor is doing what is required in hardware),
the proper code is enabled at exception entry.

In the case where workaround 3 is not supported by the firmware, we
enable workaround 1 when possible as it will also mitigate Spectre BHB
on systems without CSV2.

This is part of XSA-398 / CVE-2022-23960.

Signed-off-by: Bertrand Marquis <bertrand.marq...@arm.com>
Signed-off-by: Rahul Singh <rahul.si...@arm.com>
Acked-by: Julien Grall <jul...@xen.org>

--- a/xen/arch/arm/arm64/bpi.S
+++ b/xen/arch/arm/arm64/bpi.S
@@ -58,16 +58,42 @@ ENTRY(__bp_harden_hyp_vecs_start)
     .endr
 ENTRY(__bp_harden_hyp_vecs_end)
 
-ENTRY(__smccc_workaround_1_smc_start)
+.macro mitigate_spectre_bhb_loop count
+ENTRY(__mitigate_spectre_bhb_loop_start_\count)
+    stp     x0, x1, [sp, #-16]!
+    mov     x0, \count
+.Lspectre_bhb_loop\@:
+    b   . + 4
+    subs    x0, x0, #1
+    b.ne    .Lspectre_bhb_loop\@
+    sb
+    ldp     x0, x1, [sp], #16
+ENTRY(__mitigate_spectre_bhb_loop_end_\count)
+.endm
+
+.macro smccc_workaround num smcc_id
+ENTRY(__smccc_workaround_smc_start_\num)
     sub     sp, sp, #(8 * 4)
     stp     x0, x1, [sp, #(8 * 2)]
     stp     x2, x3, [sp, #(8 * 0)]
-    mov     w0, #ARM_SMCCC_ARCH_WORKAROUND_1_FID
+    mov     w0, \smcc_id
     smc     #0
     ldp     x2, x3, [sp, #(8 * 0)]
     ldp     x0, x1, [sp, #(8 * 2)]
     add     sp, sp, #(8 * 4)
-ENTRY(__smccc_workaround_1_smc_end)
+ENTRY(__smccc_workaround_smc_end_\num)
+.endm
+
+ENTRY(__mitigate_spectre_bhb_clear_insn_start)
+    clearbhb
+    isb
+ENTRY(__mitigate_spectre_bhb_clear_insn_end)
+
+mitigate_spectre_bhb_loop 8
+mitigate_spectre_bhb_loop 24
+mitigate_spectre_bhb_loop 32
+smccc_workaround 1, #ARM_SMCCC_ARCH_WORKAROUND_1_FID
+smccc_workaround 3, #ARM_SMCCC_ARCH_WORKAROUND_3_FID
 
 /*
  * Local variables:
--- a/xen/arch/arm/cpuerrata.c
+++ b/xen/arch/arm/cpuerrata.c
@@ -145,7 +145,16 @@ install_bp_hardening_vec(const struct ar
     return ret;
 }
 
-extern char __smccc_workaround_1_smc_start[], __smccc_workaround_1_smc_end[];
+extern char __smccc_workaround_smc_start_1[], __smccc_workaround_smc_end_1[];
+extern char __smccc_workaround_smc_start_3[], __smccc_workaround_smc_end_3[];
+extern char __mitigate_spectre_bhb_clear_insn_start[],
+            __mitigate_spectre_bhb_clear_insn_end[];
+extern char __mitigate_spectre_bhb_loop_start_8[],
+            __mitigate_spectre_bhb_loop_end_8[];
+extern char __mitigate_spectre_bhb_loop_start_24[],
+            __mitigate_spectre_bhb_loop_end_24[];
+extern char __mitigate_spectre_bhb_loop_start_32[],
+            __mitigate_spectre_bhb_loop_end_32[];
 
 static int enable_smccc_arch_workaround_1(void *data)
 {
@@ -176,8 +185,8 @@ static int enable_smccc_arch_workaround_
     if ( (int)res.a0 < 0 )
         goto warn;
 
-    return !install_bp_hardening_vec(entry,__smccc_workaround_1_smc_start,
-                                     __smccc_workaround_1_smc_end,
+    return !install_bp_hardening_vec(entry,__smccc_workaround_smc_start_1,
+                                     __smccc_workaround_smc_end_1,
                                      "call ARM_SMCCC_ARCH_WORKAROUND_1");
 
 warn:
@@ -187,6 +196,93 @@ warn:
     return 0;
 }
 
+/*
+ * Spectre BHB Mitigation
+ *
+ * CPU is either:
+ * - Having CVS2.3 so it is not affected.
+ * - Having ECBHB and is clearing the branch history buffer when an exception
+ *   to a different exception level is happening so no mitigation is needed.
+ * - Mitigating using a loop on exception entry (number of loop depending on
+ *   the CPU).
+ * - Mitigating using the firmware.
+ */
+static int enable_spectre_bhb_workaround(void *data)
+{
+    const struct arm_cpu_capabilities *entry = data;
+
+    /*
+     * Enable callbacks are called on every CPU based on the capabilities, so
+     * double-check whether the CPU matches the entry.
+     */
+    if ( !entry->matches(entry) )
+        return 0;
+
+    if ( cpu_data[smp_processor_id()].pfr64.csv2 == 3 )
+        return 0;
+
+    if ( cpu_data[smp_processor_id()].mm64.ecbhb )
+        return 0;
+
+    if ( cpu_data[smp_processor_id()].isa64.clearbhb )
+        return !install_bp_hardening_vec(entry,
+                                    __mitigate_spectre_bhb_clear_insn_start,
+                                    __mitigate_spectre_bhb_clear_insn_end,
+                                     "use clearBHB instruction");
+
+    /* Apply solution depending on hwcaps set on arm_errata */
+    if ( cpus_have_cap(ARM_WORKAROUND_BHB_LOOP_8) )
+        return !install_bp_hardening_vec(entry,
+                                         __mitigate_spectre_bhb_loop_start_8,
+                                         __mitigate_spectre_bhb_loop_end_8,
+                                         "use 8 loops workaround");
+
+    if ( cpus_have_cap(ARM_WORKAROUND_BHB_LOOP_24) )
+        return !install_bp_hardening_vec(entry,
+                                         __mitigate_spectre_bhb_loop_start_24,
+                                         __mitigate_spectre_bhb_loop_end_24,
+                                         "use 24 loops workaround");
+
+    if ( cpus_have_cap(ARM_WORKAROUND_BHB_LOOP_32) )
+        return !install_bp_hardening_vec(entry,
+                                         __mitigate_spectre_bhb_loop_start_32,
+                                         __mitigate_spectre_bhb_loop_end_32,
+                                         "use 32 loops workaround");
+
+    if ( cpus_have_cap(ARM_WORKAROUND_BHB_SMCC_3) )
+    {
+        struct arm_smccc_res res;
+
+        if ( smccc_ver < SMCCC_VERSION(1, 1) )
+            goto warn;
+
+        arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FID,
+                          ARM_SMCCC_ARCH_WORKAROUND_3_FID, &res);
+        /* The return value is in the lower 32-bits. */
+        if ( (int)res.a0 < 0 )
+        {
+            /*
+             * On processor affected with CSV2=0, workaround 1 will mitigate
+             * both Spectre v2 and BHB so use it when available
+             */
+            if ( enable_smccc_arch_workaround_1(data) )
+                return 1;
+
+            goto warn;
+        }
+
+        return !install_bp_hardening_vec(entry,__smccc_workaround_smc_start_3,
+                                         __smccc_workaround_smc_end_3,
+                                         "call ARM_SMCCC_ARCH_WORKAROUND_3");
+    }
+
+warn:
+    printk_once("**** No support for any spectre BHB workaround.  ****\n"
+                "**** Please update your firmware.                ****\n");
+
+    return 0;
+}
+
 #endif /* CONFIG_ARM64_HARDEN_BRANCH_PREDICTOR */
 
 /* Hardening Branch predictor code for Arm32 */
@@ -446,19 +542,77 @@ static const struct arm_cpu_capabilities
     },
     {
         .capability = ARM_HARDEN_BRANCH_PREDICTOR,
-        MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
+        MIDR_RANGE(MIDR_CORTEX_A72, 0, 1 << MIDR_VARIANT_SHIFT),
         .enable = enable_smccc_arch_workaround_1,
     },
     {
-        .capability = ARM_HARDEN_BRANCH_PREDICTOR,
+        .capability = ARM_WORKAROUND_BHB_SMCC_3,
         MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
-        .enable = enable_smccc_arch_workaround_1,
+        .enable = enable_spectre_bhb_workaround,
     },
     {
-        .capability = ARM_HARDEN_BRANCH_PREDICTOR,
+        .capability = ARM_WORKAROUND_BHB_SMCC_3,
         MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
-        .enable = enable_smccc_arch_workaround_1,
+        .enable = enable_spectre_bhb_workaround,
+    },
+    /* spectre BHB */
+    {
+        .capability = ARM_WORKAROUND_BHB_LOOP_8,
+        MIDR_RANGE(MIDR_CORTEX_A72, 1 << MIDR_VARIANT_SHIFT,
+                   (MIDR_VARIANT_MASK | MIDR_REVISION_MASK)),
+        .enable = enable_spectre_bhb_workaround,
+    },
+    {
+        .capability = ARM_WORKAROUND_BHB_LOOP_24,
+        MIDR_ALL_VERSIONS(MIDR_CORTEX_A76),
+        .enable = enable_spectre_bhb_workaround,
+    },
+    {
+        .capability = ARM_WORKAROUND_BHB_LOOP_24,
+        MIDR_ALL_VERSIONS(MIDR_CORTEX_A77),
+        .enable = enable_spectre_bhb_workaround,
+    },
+    {
+        .capability = ARM_WORKAROUND_BHB_LOOP_32,
+        MIDR_ALL_VERSIONS(MIDR_CORTEX_A78),
+        .enable = enable_spectre_bhb_workaround,
+    },
+    {
+        .capability = ARM_WORKAROUND_BHB_LOOP_32,
+        MIDR_ALL_VERSIONS(MIDR_CORTEX_A78C),
+        .enable = enable_spectre_bhb_workaround,
     },
+    {
+        .capability = ARM_WORKAROUND_BHB_LOOP_32,
+        MIDR_ALL_VERSIONS(MIDR_CORTEX_X1),
+        .enable = enable_spectre_bhb_workaround,
+    },
+    {
+        .capability = ARM_WORKAROUND_BHB_LOOP_32,
+        MIDR_ALL_VERSIONS(MIDR_CORTEX_X2),
+        .enable = enable_spectre_bhb_workaround,
+    },
+    {
+        .capability = ARM_WORKAROUND_BHB_LOOP_32,
+        MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
+        .enable = enable_spectre_bhb_workaround,
+    },
+    {
+        .capability = ARM_WORKAROUND_BHB_LOOP_24,
+        MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1),
+        .enable = enable_spectre_bhb_workaround,
+    },
+    {
+        .capability = ARM_WORKAROUND_BHB_LOOP_32,
+        MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
+        .enable = enable_spectre_bhb_workaround,
+    },
+    {
+        .capability = ARM_WORKAROUND_BHB_LOOP_32,
+        MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1),
+        .enable = enable_spectre_bhb_workaround,
+    },
+
 #endif
 #ifdef CONFIG_ARM32_HARDEN_BRANCH_PREDICTOR
     {
--- a/xen/include/asm-arm/arm64/macros.h
+++ b/xen/include/asm-arm/arm64/macros.h
@@ -27,6 +27,11 @@
         sb
     .endm
 
+    /* clearbhb instruction clearing the branch history */
+    .macro clearbhb
+        hint    #22
+    .endm
+
 /*
  * Register aliases.
  */
--- a/xen/include/asm-arm/cpufeature.h
+++ b/xen/include/asm-arm/cpufeature.h
@@ -63,8 +63,12 @@
 #define ARM64_WORKAROUND_AT_SPECULATE 9
 #define ARM_WORKAROUND_858921 10
 #define ARM64_WORKAROUND_REPEAT_TLBI 11
+#define ARM_WORKAROUND_BHB_LOOP_8 12
+#define ARM_WORKAROUND_BHB_LOOP_24 13
+#define ARM_WORKAROUND_BHB_LOOP_32 14
+#define ARM_WORKAROUND_BHB_SMCC_3 15
 
-#define ARM_NCAPS           12
+#define ARM_NCAPS           16
 
 #ifndef __ASSEMBLY__
 
--- a/xen/include/asm-arm/smccc.h
+++ b/xen/include/asm-arm/smccc.h
@@ -334,6 +334,12 @@ void __arm_smccc_1_0_smc(register_t a0,
                        ARM_SMCCC_OWNER_ARCH,        \
                        0x7FFF)
 
+#define ARM_SMCCC_ARCH_WORKAROUND_3_FID             \
+    ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL,         \
+                       ARM_SMCCC_CONV_32,           \
+                       ARM_SMCCC_OWNER_ARCH,        \
+                       0x3FFF)
+
 /* SMCCC error codes */
 #define ARM_SMCCC_NOT_REQUIRED          (-2)
 #define ARM_SMCCC_ERR_UNKNOWN_FUNCTION  (-1)

++++++ 6227866b-Arm-allow-SMCCC_ARCH_WORKAROUND_3-use.patch ++++++
# Commit c0a56ea0fd92ecb471936b7355ddbecbaea3707c
# Date 2022-03-08 16:38:02 +0000
# Author Bertrand Marquis <bertrand.marq...@arm.com>
# Committer Andrew Cooper <andrew.coop...@citrix.com>
xen/arm: Allow to discover and use SMCCC_ARCH_WORKAROUND_3

Allow guest to discover whether or not SMCCC_ARCH_WORKAROUND_3 is
supported and create a fastpath in the code to handle guests request to
do the workaround.

The function SMCCC_ARCH_WORKAROUND_3 will be called by the guest for
flushing the branch history. So we want the handling to be as fast as
possible.

As the mitigation is applied on every guest exit, we can check for the
call before saving all context and return very early.

This is part of XSA-398 / CVE-2022-23960.

Signed-off-by: Bertrand Marquis <bertrand.marq...@arm.com>
Reviewed-by: Julien Grall <jul...@xen.org>

--- a/xen/arch/arm/arm64/entry.S
+++ b/xen/arch/arm/arm64/entry.S
@@ -336,16 +336,26 @@ guest_sync:
         cbnz    x1, guest_sync_slowpath         /* should be 0 for HVC #0 */
 
         /*
-         * Fastest path possible for ARM_SMCCC_ARCH_WORKAROUND_1.
-         * The workaround has already been applied on the exception
+         * Fastest path possible for ARM_SMCCC_ARCH_WORKAROUND_1 and
+         * ARM_SMCCC_ARCH_WORKAROUND_3.
+         * The workaround needed has already been applied on the exception
          * entry from the guest, so let's quickly get back to the guest.
          *
          * Note that eor is used because the function identifier cannot
          * be encoded as an immediate for cmp.
          */
         eor     w0, w0, #ARM_SMCCC_ARCH_WORKAROUND_1_FID
-        cbnz    w0, check_wa2
+        cbz     w0, fastpath_out_workaround
 
+        /* ARM_SMCCC_ARCH_WORKAROUND_2 handling */
+        eor     w0, w0, #(ARM_SMCCC_ARCH_WORKAROUND_1_FID ^ 
ARM_SMCCC_ARCH_WORKAROUND_2_FID)
+        cbz     w0, wa2_ssbd
+
+        /* Fastpath out for ARM_SMCCC_ARCH_WORKAROUND_3 */
+        eor     w0, w0, #(ARM_SMCCC_ARCH_WORKAROUND_2_FID ^ 
ARM_SMCCC_ARCH_WORKAROUND_3_FID)
+        cbnz    w0, guest_sync_slowpath
+
+fastpath_out_workaround:
         /*
          * Clobber both x0 and x1 to prevent leakage. Note that thanks
          * the eor, x0 = 0.
@@ -354,10 +364,7 @@ guest_sync:
         eret
         sb
 
-check_wa2:
-        /* ARM_SMCCC_ARCH_WORKAROUND_2 handling */
-        eor     w0, w0, #(ARM_SMCCC_ARCH_WORKAROUND_1_FID ^ 
ARM_SMCCC_ARCH_WORKAROUND_2_FID)
-        cbnz    w0, guest_sync_slowpath
+wa2_ssbd:
 #ifdef CONFIG_ARM_SSBD
 alternative_cb arm_enable_wa2_handling
         b       wa2_end
--- a/xen/arch/arm/vsmc.c
+++ b/xen/arch/arm/vsmc.c
@@ -124,6 +124,10 @@ static bool handle_arch(struct cpu_user_
                 break;
             }
             break;
+        case ARM_SMCCC_ARCH_WORKAROUND_3_FID:
+            if ( cpus_have_cap(ARM_WORKAROUND_BHB_SMCC_3) )
+                ret = 0;
+            break;
         }
 
         set_user_reg(regs, 0, ret);
@@ -132,6 +136,7 @@ static bool handle_arch(struct cpu_user_
     }
 
     case ARM_SMCCC_ARCH_WORKAROUND_1_FID:
+    case ARM_SMCCC_ARCH_WORKAROUND_3_FID:
         /* No return value */
         return true;
 

++++++ 6227866c-x86-AMD-cease-using-thunk-lfence.patch ++++++
# Commit 8d03080d2a339840d3a59e0932a94f804e45110d
# Date 2022-03-08 16:38:02 +0000
# Author Andrew Cooper <andrew.coop...@citrix.com>
# Committer Andrew Cooper <andrew.coop...@citrix.com>
x86/spec-ctrl: Cease using thunk=lfence on AMD

AMD have updated their Spectre v2 guidance, and lfence/jmp is no longer
considered safe.  AMD are recommending using retpoline everywhere.

Retpoline is incompatible with CET.  All CET-capable hardware has efficient
IBRS (specifically, not something retrofitted in microcode), so use IBRS (and
STIBP for consistency sake).

This is a logical change on AMD, but not on Intel as the default calculations
would end up with these settings anyway.  Leave behind a message if IBRS is
found to be missing.

Also update the default heuristics to never select THUNK_LFENCE.  This causes
AMD CPUs to change their default to retpoline.

Also update the printed message to include the AMD MSR_SPEC_CTRL settings, and
STIBP now that we set it for consistency sake.

This is part of XSA-398 / CVE-2021-26401.

Signed-off-by: Andrew Cooper <andrew.coop...@citrix.com>
Reviewed-by: Jan Beulich <jbeul...@suse.com>

--- a/docs/misc/xen-command-line.pandoc
+++ b/docs/misc/xen-command-line.pandoc
@@ -2269,9 +2269,9 @@ to use.
 
 If Xen was compiled with INDIRECT_THUNK support, `bti-thunk=` can be used to
 select which of the thunks gets patched into the `__x86_indirect_thunk_%reg`
-locations.  The default thunk is `retpoline` (generally preferred for Intel
-hardware), with the alternatives being `jmp` (a `jmp *%reg` gadget, minimal
-overhead), and `lfence` (an `lfence; jmp *%reg` gadget, preferred for AMD).
+locations.  The default thunk is `retpoline` (generally preferred), with the
+alternatives being `jmp` (a `jmp *%reg` gadget, minimal overhead), and
+`lfence` (an `lfence; jmp *%reg` gadget).
 
 On hardware supporting IBRS (Indirect Branch Restricted Speculation), the
 `ibrs=` option can be used to force or prevent Xen using the feature itself.
--- a/xen/arch/x86/spec_ctrl.c
+++ b/xen/arch/x86/spec_ctrl.c
@@ -367,14 +367,19 @@ static void __init print_details(enum in
                "\n");
 
     /* Settings for Xen's protection, irrespective of guests. */
-    printk("  Xen settings: BTI-Thunk %s, SPEC_CTRL: %s%s%s, 
Other:%s%s%s%s%s\n",
+    printk("  Xen settings: BTI-Thunk %s, SPEC_CTRL: %s%s%s%s, 
Other:%s%s%s%s%s\n",
            thunk == THUNK_NONE      ? "N/A" :
            thunk == THUNK_RETPOLINE ? "RETPOLINE" :
            thunk == THUNK_LFENCE    ? "LFENCE" :
            thunk == THUNK_JMP       ? "JMP" : "?",
-           !boot_cpu_has(X86_FEATURE_IBRSB)          ? "No" :
+           (!boot_cpu_has(X86_FEATURE_IBRSB) &&
+            !boot_cpu_has(X86_FEATURE_IBRS))         ? "No" :
            (default_xen_spec_ctrl & SPEC_CTRL_IBRS)  ? "IBRS+" :  "IBRS-",
-           !boot_cpu_has(X86_FEATURE_SSBD)           ? "" :
+           (!boot_cpu_has(X86_FEATURE_STIBP) &&
+            !boot_cpu_has(X86_FEATURE_AMD_STIBP))    ? "" :
+           (default_xen_spec_ctrl & SPEC_CTRL_STIBP) ? " STIBP+" : " STIBP-",
+           (!boot_cpu_has(X86_FEATURE_SSBD) &&
+            !boot_cpu_has(X86_FEATURE_AMD_SSBD))     ? "" :
            (default_xen_spec_ctrl & SPEC_CTRL_SSBD)  ? " SSBD+" : " SSBD-",
            !(caps & ARCH_CAPS_TSX_CTRL)              ? "" :
            (opt_tsx & 1)                             ? " TSX+" : " TSX-",
@@ -945,10 +950,23 @@ void __init init_speculation_mitigations
     /*
      * First, disable the use of retpolines if Xen is using shadow stacks, as
      * they are incompatible.
+     *
+     * In the absence of retpolines, IBRS needs to be used for speculative
+     * safety.  All CET-capable hardware has efficient IBRS.
      */
-    if ( cpu_has_xen_shstk &&
-         (opt_thunk == THUNK_DEFAULT || opt_thunk == THUNK_RETPOLINE) )
-        thunk = THUNK_JMP;
+    if ( cpu_has_xen_shstk )
+    {
+        if ( !has_spec_ctrl )
+            printk(XENLOG_WARNING "?!? CET active, but no MSR_SPEC_CTRL?\n");
+        else if ( opt_ibrs == -1 )
+        {
+            opt_ibrs = ibrs = true;
+            default_xen_spec_ctrl |= SPEC_CTRL_IBRS | SPEC_CTRL_STIBP;
+        }
+
+        if ( opt_thunk == THUNK_DEFAULT || opt_thunk == THUNK_RETPOLINE )
+            thunk = THUNK_JMP;
+    }
 
     /*
      * Has the user specified any custom BTI mitigations?  If so, follow their
@@ -968,16 +986,10 @@ void __init init_speculation_mitigations
         if ( IS_ENABLED(CONFIG_INDIRECT_THUNK) )
         {
             /*
-             * AMD's recommended mitigation is to set lfence as being dispatch
-             * serialising, and to use IND_THUNK_LFENCE.
-             */
-            if ( cpu_has_lfence_dispatch )
-                thunk = THUNK_LFENCE;
-            /*
-             * On Intel hardware, we'd like to use retpoline in preference to
+             * On all hardware, we'd like to use retpoline in preference to
              * IBRS, but only if it is safe on this hardware.
              */
-            else if ( retpoline_safe(caps) )
+            if ( retpoline_safe(caps) )
                 thunk = THUNK_RETPOLINE;
             else if ( has_spec_ctrl )
                 ibrs = true;

Reply via email to