SMC/HVC calls may modify registers r0 to r3. To make sure the compiler
doesn't assume input registers to be constant, also mark these registers
as output when used as input.

Signed-off-by: Oliver Schwartz <oliver.schwa...@gmx.de>
---
 hypervisor/arch/arm/include/asm/smc.h      | 12 ++++++------
 include/arch/arm/asm/jailhouse_hypercall.h | 20 +++++++++++---------
 2 files changed, 17 insertions(+), 15 deletions(-)

diff --git a/hypervisor/arch/arm/include/asm/smc.h 
b/hypervisor/arch/arm/include/asm/smc.h
index 1e3cdd5..8aba196 100644
--- a/hypervisor/arch/arm/include/asm/smc.h
+++ b/hypervisor/arch/arm/include/asm/smc.h
@@ -18,8 +18,8 @@ static inline long smc(unsigned long id)
        register unsigned long __id asm("r0") = id;
 
        asm volatile ("smc #0\n\t"
-               : "=r" (__id)
-               : "r"(__id)
+               : "+r" (__id)
+               :
                : "memory", "r1", "r2", "r3");
 
        return __id;
@@ -31,8 +31,8 @@ static inline int smc_arg1(unsigned long id, unsigned long 
par1)
        register unsigned long __par1 asm("r1") = par1;
 
        asm volatile ("smc #0\n\t"
-               : "=r" (__id)
-               : "r"(__id), "r"(__par1)
+               : "+r" (__id), "+r"(__par1)
+               :
                : "memory", "r2", "r3");
 
        return __id;
@@ -46,8 +46,8 @@ static inline long smc_arg2(unsigned long id, unsigned long 
par1,
        register unsigned long __par2 asm("r2") = par2;
 
        asm volatile ("smc #0\n\t"
-               : "=r" (__id)
-               : "r"(__id), "r"(__par1), "r"(__par2)
+               : "+r" (__id), "+r"(__par1), "+r"(__par2)
+               :
                : "memory", "r3");
 
        return __id;
diff --git a/include/arch/arm/asm/jailhouse_hypercall.h 
b/include/arch/arm/asm/jailhouse_hypercall.h
index aabd66b..894dd5d 100644
--- a/include/arch/arm/asm/jailhouse_hypercall.h
+++ b/include/arch/arm/asm/jailhouse_hypercall.h
@@ -43,6 +43,7 @@
 #define JAILHOUSE_CALL_NUM_RESULT      "r0"
 #define JAILHOUSE_CALL_ARG1            "r1"
 #define JAILHOUSE_CALL_ARG2            "r2"
+#define JAILHOUSE_CALL_CLOBBERED       "r3"
 
 /* CPU statistics, arm-specific part */
 #define JAILHOUSE_CPU_STAT_VMEXITS_CP15                
JAILHOUSE_GENERIC_CPU_STATS + 5
@@ -56,9 +57,10 @@ static inline __u32 jailhouse_call(__u32 num)
 
        asm volatile(
                JAILHOUSE_CALL_INS
-               : "=r" (num_result)
-               : "r" (num_result)
-               : "memory");
+               : "+r" (num_result)
+               :
+               : "memory", JAILHOUSE_CALL_ARG1, JAILHOUSE_CALL_ARG2, 
+                 JAILHOUSE_CALL_CLOBBERED);
        return num_result;
 }
 
@@ -69,9 +71,9 @@ static inline __u32 jailhouse_call_arg1(__u32 num, __u32 arg1)
 
        asm volatile(
                JAILHOUSE_CALL_INS
-               : "=r" (num_result)
-               : "r" (num_result), "r" (__arg1)
-               : "memory");
+               : "+r" (num_result), "+r" (__arg1)
+               :
+               : "memory", JAILHOUSE_CALL_ARG2, JAILHOUSE_CALL_CLOBBERED);
        return num_result;
 }
 
@@ -83,9 +85,9 @@ static inline __u32 jailhouse_call_arg2(__u32 num, __u32 
arg1, __u32 arg2)
 
        asm volatile(
                JAILHOUSE_CALL_INS
-               : "=r" (num_result)
-               : "r" (num_result), "r" (__arg1), "r" (__arg2)
-               : "memory");
+               : "+r" (num_result), "+r" (__arg1), "+r" (__arg2)
+               :
+               : "memory", JAILHOUSE_CALL_CLOBBERED);
        return num_result;
 }
 
-- 
2.7.4


-- 
You received this message because you are subscribed to the Google Groups 
"Jailhouse" group.
To unsubscribe from this group and stop receiving emails from it, send an email 
to jailhouse-dev+unsubscr...@googlegroups.com.
To view this discussion on the web visit 
https://groups.google.com/d/msgid/jailhouse-dev/BC946534-03C8-4AB3-9473-C6B880E37A88%40gmx.de.

Reply via email to