From: Ralf Ramsauer <[email protected]>

In case of an EL1 abort, call the mitigation, and try to return to the
guest as fast as possible, if it explicitely called the mitigation.
Otherwise, handle the trap as usual.

The whole hot path of the workaround fits into the interrupt vector
slot, we just have to outsource the regular exit path to el1_trap.

Signed-off-by: Ralf Ramsauer <[email protected]>
---
 hypervisor/arch/arm64/entry.S               | 39 ++++++++++++++++++++-
 hypervisor/arch/arm64/include/asm/sysregs.h |  3 +-
 2 files changed, 40 insertions(+), 2 deletions(-)

diff --git a/hypervisor/arch/arm64/entry.S b/hypervisor/arch/arm64/entry.S
index 360cf801..ced5d809 100644
--- a/hypervisor/arch/arm64/entry.S
+++ b/hypervisor/arch/arm64/entry.S
@@ -316,6 +316,9 @@ bootstrap_vectors:
        b       __vmreturn
 .endm
 
+el1_trap:
+       handle_vmexit_late arch_handle_trap
+
 .macro handle_vmexit handler
        .align  7
        handle_vmexit_early
@@ -333,6 +336,40 @@ bootstrap_vectors:
        handle_vmexit_late \handler
 .endm
 
+.macro handle_abort_fastpath
+       .align  7
+       handle_vmexit_early
+
+       /* Save old x0, which might contain guest SMC's function ID */
+       mov     x4, x0
+
+       mov     w0, #SMCCC_ARCH_WORKAROUND_1
+       smc     #0
+
+       mrs     x0, esr_el2
+       lsr     x0, x0, #ESR_EC_SHIFT
+       cmp     x0, #ESR_EC_SMC64
+       b.ne    el1_trap /* normal trap if !SMC64 */
+
+       /* w4 holds the guest's function_id */
+       eor     w0, w4, #SMCCC_ARCH_WORKAROUND_1
+       cbnz    w0, el1_trap /* normal trap if !SMCCC_ARCH_WORKAROUND_1 */
+
+       /* Here we land if the guest called SMCCC_ARCH_WORKAROUND_1 */
+
+       /*
+        * Skip guest's instruction, it must have been 'smc #0' and must have
+        * had 4 bytes */
+       mrs     x0, elr_el2
+       add     x0, x0, #4
+       msr     elr_el2, x0
+
+       /* beam me up, we only need to restore x4 and sp */
+       ldr     x4, [sp, #(2 * 16 + 1 * 8)]
+       add     sp, sp, #(16 * 16)
+       eret
+.endm
+
 /*
  * These are the default vectors. They are used on early startup and if no
  * Spectre v2 mitigation is available.
@@ -373,7 +410,7 @@ hyp_vectors_hardened:
        ventry  .
        ventry  .
 
-       handle_vmexit_hardened arch_handle_trap
+       handle_abort_fastpath
        handle_vmexit_hardened irqchip_handle_irq
        ventry  .
        ventry  .
diff --git a/hypervisor/arch/arm64/include/asm/sysregs.h 
b/hypervisor/arch/arm64/include/asm/sysregs.h
index 12af6a95..f748396a 100644
--- a/hypervisor/arch/arm64/include/asm/sysregs.h
+++ b/hypervisor/arch/arm64/include/asm/sysregs.h
@@ -107,7 +107,8 @@
 #define HCR_VM_BIT     (1u << 0)
 
 /* exception class */
-#define ESR_EC(esr)            GET_FIELD((esr), 31, 26)
+#define ESR_EC_SHIFT           (26)
+#define ESR_EC(esr)            GET_FIELD((esr), 31, ESR_EC_SHIFT)
 /* instruction length */
 #define ESR_IL(esr)            GET_FIELD((esr), 25, 25)
 /* Instruction specific syndrome */
-- 
2.20.1

-- 
You received this message because you are subscribed to the Google Groups 
"Jailhouse" group.
To unsubscribe from this group and stop receiving emails from it, send an email 
to [email protected].
For more options, visit https://groups.google.com/d/optout.

Reply via email to