Re: [Xen-devel] [PATCH v2 17/19] xen/arm: Isolate the SError between the context switch of 2 vCPUs

2017-03-30 Thread Wei Chen
On 2017/3/31 6:00, Julien Grall wrote:
>
>
> On 30/03/2017 22:49, Stefano Stabellini wrote:
>> On Thu, 30 Mar 2017, Wei Chen wrote:
>>> +/*
>>> + * If the SErrors option is "FORWARD", we have to prevent forwarding
>>> + * serror to wrong vCPU. So before context switch, we have to use the
>>> + * synchronize_serror to guarantee that the pending serror would be
>>> + * caught by current vCPU.
>>> + *
>>> + * The SKIP_CTXT_SWITCH_SERROR_SYNC will be set to cpu_hwcaps when the
>>> + * SErrors option is NOT "FORWARD".
>>> + */
>>> +asm volatile(ALTERNATIVE("bl synchronize_serror",
>>> + "nop",
>>> + SKIP_CTXT_SWITCH_SERROR_SYNC));
>>
>>
>> This good, but here you need to add:
>>
>>   barrier();
>>
>> because the compiler is free to reorder even asm volatile instructions
>> (it could move the asm volatile after __context_switch theoretically).
>
> ... or it could moved before hand because there are no barrier... What
> you want to use is asm volatile(ALTERNATIVE(...) : : : "memory");
>

I would cover this consideration in the change of #16 in next version.

> Cheers,
>


-- 
Regards,
Wei Chen

___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


Re: [Xen-devel] [PATCH v2 17/19] xen/arm: Isolate the SError between the context switch of 2 vCPUs

2017-03-30 Thread Julien Grall



On 30/03/2017 22:49, Stefano Stabellini wrote:

On Thu, 30 Mar 2017, Wei Chen wrote:

+/*
+ * If the SErrors option is "FORWARD", we have to prevent forwarding
+ * serror to wrong vCPU. So before context switch, we have to use the
+ * synchronize_serror to guarantee that the pending serror would be
+ * caught by current vCPU.
+ *
+ * The SKIP_CTXT_SWITCH_SERROR_SYNC will be set to cpu_hwcaps when the
+ * SErrors option is NOT "FORWARD".
+ */
+asm volatile(ALTERNATIVE("bl synchronize_serror",
+ "nop",
+ SKIP_CTXT_SWITCH_SERROR_SYNC));



This good, but here you need to add:

  barrier();

because the compiler is free to reorder even asm volatile instructions
(it could move the asm volatile after __context_switch theoretically).


... or it could moved before hand because there are no barrier... What 
you want to use is asm volatile(ALTERNATIVE(...) : : : "memory");


Cheers,

--
Julien Grall

___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


Re: [Xen-devel] [PATCH v2 17/19] xen/arm: Isolate the SError between the context switch of 2 vCPUs

2017-03-30 Thread Stefano Stabellini
On Thu, 30 Mar 2017, Wei Chen wrote:
> If there is a pending SError while we are doing context switch, if the
> SError handle option is "FORWARD", We have to guranatee this serror to
> be caught by current vCPU, otherwise it will be caught by next vCPU and
> be forwarded to this wrong vCPU.
> 
> So we have to synchronize SError before switch to next vCPU. But this is
> only required by "FORWARD" option. In this case we added a new flag
> SKIP_CTXT_SWITCH_SERROR_SYNC in cpu_hwcaps to skip synchronizing SError
> in context switch for other options. In the meantime, we don't need to
> export serror_op accessing to other source files.
> 
> Signed-off-by: Wei Chen 
> ---
> v1->v2:
> 1. Added a new flag in cpu_hwcaps to avoid using serror_op to skip
>synchronizing SError in context switch.
> 2. Update commit message to explain why we added this cpu_hwcaps.
> ---
>  xen/arch/arm/domain.c| 14 ++
>  xen/arch/arm/traps.c |  3 +++
>  xen/include/asm-arm/cpufeature.h |  3 ++-
>  3 files changed, 19 insertions(+), 1 deletion(-)
> 
> diff --git a/xen/arch/arm/domain.c b/xen/arch/arm/domain.c
> index 18b34e7..e866ed3 100644
> --- a/xen/arch/arm/domain.c
> +++ b/xen/arch/arm/domain.c
> @@ -29,6 +29,7 @@
>  #include 
>  #include 
>  #include 
> +#include 
>  
>  #include 
>  #include 
> @@ -326,6 +327,19 @@ void context_switch(struct vcpu *prev, struct vcpu *next)
>  
>  local_irq_disable();
>  
> +/*
> + * If the SErrors option is "FORWARD", we have to prevent forwarding
> + * serror to wrong vCPU. So before context switch, we have to use the
> + * synchronize_serror to guarantee that the pending serror would be
> + * caught by current vCPU.
> + *
> + * The SKIP_CTXT_SWITCH_SERROR_SYNC will be set to cpu_hwcaps when the
> + * SErrors option is NOT "FORWARD".
> + */
> +asm volatile(ALTERNATIVE("bl synchronize_serror",
> + "nop",
> + SKIP_CTXT_SWITCH_SERROR_SYNC));


This good, but here you need to add:

  barrier();

because the compiler is free to reorder even asm volatile instructions
(it could move the asm volatile after __context_switch theoretically).


>  set_current(next);
>  
>  prev = __context_switch(prev, next);
> diff --git a/xen/arch/arm/traps.c b/xen/arch/arm/traps.c
> index 55d85ed..9b4546e 100644
> --- a/xen/arch/arm/traps.c
> +++ b/xen/arch/arm/traps.c
> @@ -125,6 +125,9 @@ static int __init update_serrors_cpu_caps(void)
>  if ( serrors_op != SERRORS_DIVERSE )
>  cpus_set_cap(SKIP_CHECK_PENDING_VSERROR);
>  
> +if ( serrors_op != SERRORS_FORWARD )
> +cpus_set_cap(SKIP_CTXT_SWITCH_SERROR_SYNC);
> +
>  return 0;
>  }
>  __initcall(update_serrors_cpu_caps);
> diff --git a/xen/include/asm-arm/cpufeature.h 
> b/xen/include/asm-arm/cpufeature.h
> index ec3f9e5..99ecb2b 100644
> --- a/xen/include/asm-arm/cpufeature.h
> +++ b/xen/include/asm-arm/cpufeature.h
> @@ -41,8 +41,9 @@
>  #define ARM64_WORKAROUND_834220 3
>  #define LIVEPATCH_FEATURE   4
>  #define SKIP_CHECK_PENDING_VSERROR 5
> +#define SKIP_CTXT_SWITCH_SERROR_SYNC 6
>  
> -#define ARM_NCAPS   6
> +#define ARM_NCAPS   7
>  
>  #ifndef __ASSEMBLY__
>  
> -- 
> 2.7.4
> 
> 
> ___
> Xen-devel mailing list
> Xen-devel@lists.xen.org
> https://lists.xen.org/xen-devel
> 

___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


[Xen-devel] [PATCH v2 17/19] xen/arm: Isolate the SError between the context switch of 2 vCPUs

2017-03-30 Thread Wei Chen
If there is a pending SError while we are doing context switch, if the
SError handle option is "FORWARD", We have to guranatee this serror to
be caught by current vCPU, otherwise it will be caught by next vCPU and
be forwarded to this wrong vCPU.

So we have to synchronize SError before switch to next vCPU. But this is
only required by "FORWARD" option. In this case we added a new flag
SKIP_CTXT_SWITCH_SERROR_SYNC in cpu_hwcaps to skip synchronizing SError
in context switch for other options. In the meantime, we don't need to
export serror_op accessing to other source files.

Signed-off-by: Wei Chen 
---
v1->v2:
1. Added a new flag in cpu_hwcaps to avoid using serror_op to skip
   synchronizing SError in context switch.
2. Update commit message to explain why we added this cpu_hwcaps.
---
 xen/arch/arm/domain.c| 14 ++
 xen/arch/arm/traps.c |  3 +++
 xen/include/asm-arm/cpufeature.h |  3 ++-
 3 files changed, 19 insertions(+), 1 deletion(-)

diff --git a/xen/arch/arm/domain.c b/xen/arch/arm/domain.c
index 18b34e7..e866ed3 100644
--- a/xen/arch/arm/domain.c
+++ b/xen/arch/arm/domain.c
@@ -29,6 +29,7 @@
 #include 
 #include 
 #include 
+#include 
 
 #include 
 #include 
@@ -326,6 +327,19 @@ void context_switch(struct vcpu *prev, struct vcpu *next)
 
 local_irq_disable();
 
+/*
+ * If the SErrors option is "FORWARD", we have to prevent forwarding
+ * serror to wrong vCPU. So before context switch, we have to use the
+ * synchronize_serror to guarantee that the pending serror would be
+ * caught by current vCPU.
+ *
+ * The SKIP_CTXT_SWITCH_SERROR_SYNC will be set to cpu_hwcaps when the
+ * SErrors option is NOT "FORWARD".
+ */
+asm volatile(ALTERNATIVE("bl synchronize_serror",
+ "nop",
+ SKIP_CTXT_SWITCH_SERROR_SYNC));
+
 set_current(next);
 
 prev = __context_switch(prev, next);
diff --git a/xen/arch/arm/traps.c b/xen/arch/arm/traps.c
index 55d85ed..9b4546e 100644
--- a/xen/arch/arm/traps.c
+++ b/xen/arch/arm/traps.c
@@ -125,6 +125,9 @@ static int __init update_serrors_cpu_caps(void)
 if ( serrors_op != SERRORS_DIVERSE )
 cpus_set_cap(SKIP_CHECK_PENDING_VSERROR);
 
+if ( serrors_op != SERRORS_FORWARD )
+cpus_set_cap(SKIP_CTXT_SWITCH_SERROR_SYNC);
+
 return 0;
 }
 __initcall(update_serrors_cpu_caps);
diff --git a/xen/include/asm-arm/cpufeature.h b/xen/include/asm-arm/cpufeature.h
index ec3f9e5..99ecb2b 100644
--- a/xen/include/asm-arm/cpufeature.h
+++ b/xen/include/asm-arm/cpufeature.h
@@ -41,8 +41,9 @@
 #define ARM64_WORKAROUND_834220 3
 #define LIVEPATCH_FEATURE   4
 #define SKIP_CHECK_PENDING_VSERROR 5
+#define SKIP_CTXT_SWITCH_SERROR_SYNC 6
 
-#define ARM_NCAPS   6
+#define ARM_NCAPS   7
 
 #ifndef __ASSEMBLY__
 
-- 
2.7.4


___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel