We want SMCCC_ARCH_WORKAROUND_1 to be fast. As fast as possible.
So let's intercept it as early as we can by testing for the
function call number as soon as we've identified a HVC call
coming from the guest.

Signed-off-by: Russell King <rmk+ker...@armlinux.org.uk>
---
 arch/arm/kvm/hyp/hyp-entry.S | 17 ++++++++++++++++-
 1 file changed, 16 insertions(+), 1 deletion(-)

diff --git a/arch/arm/kvm/hyp/hyp-entry.S b/arch/arm/kvm/hyp/hyp-entry.S
index 918a05dd2d63..aa3f9a9837ac 100644
--- a/arch/arm/kvm/hyp/hyp-entry.S
+++ b/arch/arm/kvm/hyp/hyp-entry.S
@@ -16,6 +16,7 @@
  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
  */
 
+#include <linux/arm-smccc.h>
 #include <linux/linkage.h>
 #include <asm/kvm_arm.h>
 #include <asm/kvm_asm.h>
@@ -202,7 +203,7 @@ ENDPROC(__hyp_do_panic)
        lsr     r2, r2, #16
        and     r2, r2, #0xff
        cmp     r2, #0
-       bne     guest_trap              @ Guest called HVC
+       bne     guest_hvc_trap          @ Guest called HVC
 
        /*
         * Getting here means host called HVC, we shift parameters and branch
@@ -253,6 +254,20 @@ THUMB(     orr     lr, #1)
        pop     {r2, lr}
        eret
 
+guest_hvc_trap:
+       movw    r2, #:lower16:ARM_SMCCC_ARCH_WORKAROUND_1
+       movt    r2, #:upper16:ARM_SMCCC_ARCH_WORKAROUND_1
+       ldr     r0, [sp]                @ Guest's r0
+       teq     r0, r2
+       bne     guest_trap
+       add     sp, sp, #12
+       @ Returns:
+       @ r0 = 0
+       @ r1 = HSR value (perfectly predictable)
+       @ r2 = ARM_SMCCC_ARCH_WORKAROUND_1
+       mov     r0, #0
+       eret
+
 guest_trap:
        load_vcpu r0                    @ Load VCPU pointer to r0
 
-- 
2.7.4

_______________________________________________
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm

Reply via email to