> On 17 Sep 2020, at 08:26, Jan Kiszka <jan.kis...@siemens.com> wrote:
> 
> Subject tag should be "arm64". And the patch should go over next first. I can 
> fix up both.
> 
> On 16.09.20 15:07, Oliver Schwartz wrote:
>> SMC/HVC calls may modify registers x0 to x3. To make sure the compiler
>> doesn't assume input registers to be constant, also mark these registers
>> as output when used as input.
>> Signed-off-by: Oliver Schwartz <oliver.schwa...@gmx.de>
>> ---
>>  hypervisor/arch/arm64/include/asm/smc.h      | 13 ++++++-------
>>  include/arch/arm64/asm/jailhouse_hypercall.h | 20 +++++++++++---------
>>  2 files changed, 17 insertions(+), 16 deletions(-)
>> diff --git a/hypervisor/arch/arm64/include/asm/smc.h 
>> b/hypervisor/arch/arm64/include/asm/smc.h
>> index 1a5d5c8..c80fe15 100644
>> --- a/hypervisor/arch/arm64/include/asm/smc.h
>> +++ b/hypervisor/arch/arm64/include/asm/smc.h
>> @@ -28,8 +28,8 @@ static inline long smc_arg1(unsigned long id, unsigned 
>> long par1)
>>      register unsigned long __par1 asm("r1") = par1;
>>      asm volatile ("smc #0\n\t"
>> -            : "=r" (__id)
>> -            : "r"(__id), "r"(__par1)
>> +            : "+r" (__id), "+r"(__par1)
>> +            :
>>              : "memory", "x2", "x3");
>>  
> 
> For SMCCC, I'm considering to align fully with Linux, i.e. convert the 
> remaining register clobberings into early ones, but I also have no strong 
> argument for it.

I find the the clobber list a lot easier to understand, whereas the =& syntax 
isn’t, even after reading up on it. Using =& also requires to introduce 
additional dummy variables for the clobber registers. On the other hand I see 
the point in being in line with the kernel. 

Oliver

> 
> Ralf, thoughts?
> 
> Jan
> 
>>      return __id;
>> @@ -43,8 +43,8 @@ static inline long smc_arg2(unsigned long id, unsigned 
>> long par1,
>>      register unsigned long __par2 asm("r2") = par2;
>>      asm volatile ("smc #0\n\t"
>> -            : "=r" (__id)
>> -            : "r"(__id), "r"(__par1), "r"(__par2)
>> +            : "+r" (__id), "+r"(__par1), "+r"(__par2)
>> +            :
>>              : "memory", "x3");
>>      return __id;
>> @@ -62,9 +62,8 @@ static inline long smc_arg5(unsigned long id, unsigned 
>> long par1,
>>      register unsigned long __par5 asm("r5") = par5;
>>      asm volatile ("smc #0\n\t"
>> -            : "=r" (__id)
>> -            : "r"(__id), "r"(__par1), "r"(__par2), "r"(__par3),
>> -              "r"(__par4), "r"(__par5)
>> +            : "+r" (__id), "+r"(__par1), "+r"(__par2), "+r"(__par3)
>> +            : "r"(__par4), "r"(__par5)
>>              : "memory");
>>      return __id;
>> diff --git a/include/arch/arm64/asm/jailhouse_hypercall.h 
>> b/include/arch/arm64/asm/jailhouse_hypercall.h
>> index 108d052..a9d13ee 100644
>> --- a/include/arch/arm64/asm/jailhouse_hypercall.h
>> +++ b/include/arch/arm64/asm/jailhouse_hypercall.h
>> @@ -42,6 +42,7 @@
>>  #define JAILHOUSE_CALL_NUM_RESULT   "x0"
>>  #define JAILHOUSE_CALL_ARG1         "x1"
>>  #define JAILHOUSE_CALL_ARG2         "x2"
>> +#define JAILHOUSE_CALL_CLOBBERED    "x3"
>>    /* CPU statistics, arm64-specific part */
>>  #define JAILHOUSE_NUM_CPU_STATS                     
>> JAILHOUSE_GENERIC_CPU_STATS + 5
>> @@ -54,9 +55,10 @@ static inline __u64 jailhouse_call(__u64 num)
>>      asm volatile(
>>              JAILHOUSE_CALL_INS
>> -            : "=r" (num_result)
>> -            : "r" (num_result)
>> -            : "memory");
>> +            : "+r" (num_result)
>> +            :
>> +            : "memory", JAILHOUSE_CALL_ARG1, JAILHOUSE_CALL_ARG2,
>> +              JAILHOUSE_CALL_CLOBBERED);
>>      return num_result;
>>  }
>>  @@ -67,9 +69,9 @@ static inline __u64 jailhouse_call_arg1(__u64 num, __u64 
>> arg1)
>>      asm volatile(
>>              JAILHOUSE_CALL_INS
>> -            : "=r" (num_result)
>> -            : "r" (num_result), "r" (__arg1)
>> -            : "memory");
>> +            : "+r" (num_result), "+r" (__arg1)
>> +            :
>> +            : "memory", JAILHOUSE_CALL_ARG2, JAILHOUSE_CALL_CLOBBERED);
>>      return num_result;
>>  }
>>  @@ -81,9 +83,9 @@ static inline __u64 jailhouse_call_arg2(__u64 num, __u64 
>> arg1, __u64 arg2)
>>      asm volatile(
>>              JAILHOUSE_CALL_INS
>> -            : "=r" (num_result)
>> -            : "r" (num_result), "r" (__arg1), "r" (__arg2)
>> -            : "memory");
>> +            : "+r" (num_result), "+r" (__arg1), "+r" (__arg2)
>> +            :
>> +            : "memory", JAILHOUSE_CALL_CLOBBERED);
>>      return num_result;
>>  }
>>  

-- 
You received this message because you are subscribed to the Google Groups 
"Jailhouse" group.
To unsubscribe from this group and stop receiving emails from it, send an email 
to jailhouse-dev+unsubscr...@googlegroups.com.
To view this discussion on the web visit 
https://groups.google.com/d/msgid/jailhouse-dev/A808F818-919F-413D-8849-1BAEBFF54C40%40gmx.de.

Reply via email to