Re: [PATCH v2 1/1] xen/arm64: entry: Add missing code symbol annotations

2024-05-02 Thread Edgar E. Iglesias
On Fri, Apr 26, 2024 at 1:14 AM Stefano Stabellini
 wrote:
>
> On Tue, 16 Apr 2024, Edgar E. Iglesias wrote:
> > From: "Edgar E. Iglesias" 
> >
> > Use the generic xen/linkage.h macros when and add missing
> ^ when what?
>
> > code symbol annotations.
> >
> > Signed-off-by: Edgar E. Iglesias 
>
> I am looking at the implementation of FUNC and as far as I can tell
> there is no change compared to ENTRY. So from that point of view we are
> good. I wonder if we should keep using "ENTRY" because it is nice to
> mark explicitely the entry points as such but at the same time I am also
> OK with this. I'll let the other ARM maintainers decide.
>
> On the other hand, FUNC_LOCAL does introduce a change: it adds a .align
> everywhere. Should not be harmful?
>
> With the commit message fixed:

Thanks Stefano, will fix the commit message in v3:

 "Use the generic xen/linkage.h macros to annotate code symbols
 and add missing annotations."

Cheers,
Edgar



>
> Reviewed-by: Stefano Stabellini 
>
>
> > ---
> >  xen/arch/arm/arm64/entry.S | 72 +-
> >  1 file changed, 48 insertions(+), 24 deletions(-)
> >
> > diff --git a/xen/arch/arm/arm64/entry.S b/xen/arch/arm/arm64/entry.S
> > index f963c923bb..af9a592cae 100644
> > --- a/xen/arch/arm/arm64/entry.S
> > +++ b/xen/arch/arm/arm64/entry.S
> > @@ -289,21 +289,25 @@
> >  b   do_bad_mode
> >  .endm
> >
> > -hyp_sync_invalid:
> > +FUNC_LOCAL(hyp_sync_invalid)
> >  entry   hyp=1
> >  invalid BAD_SYNC
> > +END(hyp_sync_invalid)
> >
> > -hyp_irq_invalid:
> > +FUNC_LOCAL(hyp_irq_invalid)
> >  entry   hyp=1
> >  invalid BAD_IRQ
> > +END(hyp_irq_invalid)
> >
> > -hyp_fiq_invalid:
> > +FUNC_LOCAL(hyp_fiq_invalid)
> >  entry   hyp=1
> >  invalid BAD_FIQ
> > +END(hyp_fiq_invalid)
> >
> > -hyp_error_invalid:
> > +FUNC_LOCAL(hyp_error_invalid)
> >  entry   hyp=1
> >  invalid BAD_ERROR
> > +END(hyp_error_invalid)
> >
> >  /*
> >   * SError received while running in the hypervisor mode.
> > @@ -313,11 +317,12 @@ hyp_error_invalid:
> >   * simplicity, as SError should be rare and potentially fatal,
> >   * all interrupts are kept masked.
> >   */
> > -hyp_error:
> > +FUNC_LOCAL(hyp_error)
> >  entry   hyp=1
> >  mov x0, sp
> >  bl  do_trap_hyp_serror
> >  exithyp=1
> > +END(hyp_error)
> >
> >  /*
> >   * Synchronous exception received while running in the hypervisor mode.
> > @@ -327,7 +332,7 @@ hyp_error:
> >   * some of them. So we want to inherit the state from the interrupted
> >   * context.
> >   */
> > -hyp_sync:
> > +FUNC_LOCAL(hyp_sync)
> >  entry   hyp=1
> >
> >  /* Inherit interrupts */
> > @@ -338,6 +343,7 @@ hyp_sync:
> >  mov x0, sp
> >  bl  do_trap_hyp_sync
> >  exithyp=1
> > +END(hyp_sync)
> >
> >  /*
> >   * IRQ received while running in the hypervisor mode.
> > @@ -352,7 +358,7 @@ hyp_sync:
> >   * would require some rework in some paths (e.g. panic, livepatch) to
> >   * ensure the ordering is enforced everywhere.
> >   */
> > -hyp_irq:
> > +FUNC_LOCAL(hyp_irq)
> >  entry   hyp=1
> >
> >  /* Inherit D, A, F interrupts and keep I masked */
> > @@ -365,8 +371,9 @@ hyp_irq:
> >  mov x0, sp
> >  bl  do_trap_irq
> >  exithyp=1
> > +END(hyp_irq)
> >
> > -guest_sync:
> > +FUNC_LOCAL(guest_sync)
> >  /*
> >   * Save x0, x1 in advance
> >   */
> > @@ -413,8 +420,9 @@ fastpath_out_workaround:
> >  mov x1, xzr
> >  eret
> >  sb
> > +END(guest_sync)
> >
> > -wa2_ssbd:
> > +FUNC_LOCAL(wa2_ssbd)
> >  #ifdef CONFIG_ARM_SSBD
> >  alternative_cb arm_enable_wa2_handling
> >  b   wa2_end
> > @@ -450,42 +458,55 @@ wa2_end:
> >  mov x0, xzr
> >  eret
> >  sb
> > -guest_sync_slowpath:
> > +END(wa2_ssbd)
> > +
> > +FUNC_LOCAL(guest_sync_slowpath)
> >  /*
> >   * x0/x1 may have been scratch by the fast path above, so avoid
> >   * to save them.
> >   */
> >  guest_vector compat=0, iflags=IFLAGS__AI_, trap=guest_sync, 
> > save_x0_x1=0
> > +END(guest_sync_slowpath)
> >
> > -guest_irq:
> > +FUNC_LOCAL(guest_irq)
> >  guest_vector compat=0, iflags=IFLAGS__A__, trap=irq
> > +END(guest_irq)
> >
> > -guest_fiq_invalid:
> > +FUNC_LOCAL(guest_fiq_invalid)
> >  entry   hyp=0, compat=0
> >  invalid BAD_FIQ
> > +END(guest_fiq_invalid)
> >
> > -guest_error:
> > +FUNC_LOCAL(guest_error)
> >  guest_vector compat=0, iflags=IFLAGS__AI_, trap=guest_serror
> > +END(guest_error)
> >
> > -guest_sync_compat:
> > +FUNC_LOCAL(guest_sync_compat)
> >  guest_vector compat=1, iflags=IFLAGS__AI_, trap=guest_sync
> > +END(guest_sync_compat)
> >
> > -guest_irq_compat:
> > +FUNC_LOCAL(guest_irq_compat)
> >  guest_vector compat=1, 

Re: [PATCH v2 1/1] xen/arm64: entry: Add missing code symbol annotations

2024-04-26 Thread Stefano Stabellini
On Fri, 26 Apr 2024, Jan Beulich wrote:
> On 26.04.2024 01:13, Stefano Stabellini wrote:
> > On Tue, 16 Apr 2024, Edgar E. Iglesias wrote:
> >> From: "Edgar E. Iglesias" 
> >>
> >> Use the generic xen/linkage.h macros when and add missing
> > ^ when what?
> > 
> >> code symbol annotations.
> >>
> >> Signed-off-by: Edgar E. Iglesias 
> > 
> > I am looking at the implementation of FUNC and as far as I can tell
> > there is no change compared to ENTRY. So from that point of view we are
> > good. I wonder if we should keep using "ENTRY" because it is nice to
> > mark explicitely the entry points as such but at the same time I am also
> > OK with this. I'll let the other ARM maintainers decide.
> 
> Just to mention it: ENTRY should go away (and hence why PPC and RISC-V had
> it dropped already, while x86 has patches pending to reduce its scope
> enough), not the least to finally allow the oddity near the top of xen.lds.S
> to go away.

I didn't realize that. OK, understood.



Re: [PATCH v2 1/1] xen/arm64: entry: Add missing code symbol annotations

2024-04-26 Thread Jan Beulich
On 26.04.2024 01:13, Stefano Stabellini wrote:
> On Tue, 16 Apr 2024, Edgar E. Iglesias wrote:
>> From: "Edgar E. Iglesias" 
>>
>> Use the generic xen/linkage.h macros when and add missing
> ^ when what?
> 
>> code symbol annotations.
>>
>> Signed-off-by: Edgar E. Iglesias 
> 
> I am looking at the implementation of FUNC and as far as I can tell
> there is no change compared to ENTRY. So from that point of view we are
> good. I wonder if we should keep using "ENTRY" because it is nice to
> mark explicitely the entry points as such but at the same time I am also
> OK with this. I'll let the other ARM maintainers decide.

Just to mention it: ENTRY should go away (and hence why PPC and RISC-V had
it dropped already, while x86 has patches pending to reduce its scope
enough), not the least to finally allow the oddity near the top of xen.lds.S
to go away.

Jan



Re: [PATCH v2 1/1] xen/arm64: entry: Add missing code symbol annotations

2024-04-25 Thread Stefano Stabellini
On Tue, 16 Apr 2024, Edgar E. Iglesias wrote:
> From: "Edgar E. Iglesias" 
> 
> Use the generic xen/linkage.h macros when and add missing
^ when what?

> code symbol annotations.
> 
> Signed-off-by: Edgar E. Iglesias 

I am looking at the implementation of FUNC and as far as I can tell
there is no change compared to ENTRY. So from that point of view we are
good. I wonder if we should keep using "ENTRY" because it is nice to
mark explicitely the entry points as such but at the same time I am also
OK with this. I'll let the other ARM maintainers decide.

On the other hand, FUNC_LOCAL does introduce a change: it adds a .align
everywhere. Should not be harmful?

With the commit message fixed:

Reviewed-by: Stefano Stabellini 


> ---
>  xen/arch/arm/arm64/entry.S | 72 +-
>  1 file changed, 48 insertions(+), 24 deletions(-)
> 
> diff --git a/xen/arch/arm/arm64/entry.S b/xen/arch/arm/arm64/entry.S
> index f963c923bb..af9a592cae 100644
> --- a/xen/arch/arm/arm64/entry.S
> +++ b/xen/arch/arm/arm64/entry.S
> @@ -289,21 +289,25 @@
>  b   do_bad_mode
>  .endm
>  
> -hyp_sync_invalid:
> +FUNC_LOCAL(hyp_sync_invalid)
>  entry   hyp=1
>  invalid BAD_SYNC
> +END(hyp_sync_invalid)
>  
> -hyp_irq_invalid:
> +FUNC_LOCAL(hyp_irq_invalid)
>  entry   hyp=1
>  invalid BAD_IRQ
> +END(hyp_irq_invalid)
>  
> -hyp_fiq_invalid:
> +FUNC_LOCAL(hyp_fiq_invalid)
>  entry   hyp=1
>  invalid BAD_FIQ
> +END(hyp_fiq_invalid)
>  
> -hyp_error_invalid:
> +FUNC_LOCAL(hyp_error_invalid)
>  entry   hyp=1
>  invalid BAD_ERROR
> +END(hyp_error_invalid)
>  
>  /*
>   * SError received while running in the hypervisor mode.
> @@ -313,11 +317,12 @@ hyp_error_invalid:
>   * simplicity, as SError should be rare and potentially fatal,
>   * all interrupts are kept masked.
>   */
> -hyp_error:
> +FUNC_LOCAL(hyp_error)
>  entry   hyp=1
>  mov x0, sp
>  bl  do_trap_hyp_serror
>  exithyp=1
> +END(hyp_error)
>  
>  /*
>   * Synchronous exception received while running in the hypervisor mode.
> @@ -327,7 +332,7 @@ hyp_error:
>   * some of them. So we want to inherit the state from the interrupted
>   * context.
>   */
> -hyp_sync:
> +FUNC_LOCAL(hyp_sync)
>  entry   hyp=1
>  
>  /* Inherit interrupts */
> @@ -338,6 +343,7 @@ hyp_sync:
>  mov x0, sp
>  bl  do_trap_hyp_sync
>  exithyp=1
> +END(hyp_sync)
>  
>  /*
>   * IRQ received while running in the hypervisor mode.
> @@ -352,7 +358,7 @@ hyp_sync:
>   * would require some rework in some paths (e.g. panic, livepatch) to
>   * ensure the ordering is enforced everywhere.
>   */
> -hyp_irq:
> +FUNC_LOCAL(hyp_irq)
>  entry   hyp=1
>  
>  /* Inherit D, A, F interrupts and keep I masked */
> @@ -365,8 +371,9 @@ hyp_irq:
>  mov x0, sp
>  bl  do_trap_irq
>  exithyp=1
> +END(hyp_irq)
>  
> -guest_sync:
> +FUNC_LOCAL(guest_sync)
>  /*
>   * Save x0, x1 in advance
>   */
> @@ -413,8 +420,9 @@ fastpath_out_workaround:
>  mov x1, xzr
>  eret
>  sb
> +END(guest_sync)
>  
> -wa2_ssbd:
> +FUNC_LOCAL(wa2_ssbd)
>  #ifdef CONFIG_ARM_SSBD
>  alternative_cb arm_enable_wa2_handling
>  b   wa2_end
> @@ -450,42 +458,55 @@ wa2_end:
>  mov x0, xzr
>  eret
>  sb
> -guest_sync_slowpath:
> +END(wa2_ssbd)
> +
> +FUNC_LOCAL(guest_sync_slowpath)
>  /*
>   * x0/x1 may have been scratch by the fast path above, so avoid
>   * to save them.
>   */
>  guest_vector compat=0, iflags=IFLAGS__AI_, trap=guest_sync, 
> save_x0_x1=0
> +END(guest_sync_slowpath)
>  
> -guest_irq:
> +FUNC_LOCAL(guest_irq)
>  guest_vector compat=0, iflags=IFLAGS__A__, trap=irq
> +END(guest_irq)
>  
> -guest_fiq_invalid:
> +FUNC_LOCAL(guest_fiq_invalid)
>  entry   hyp=0, compat=0
>  invalid BAD_FIQ
> +END(guest_fiq_invalid)
>  
> -guest_error:
> +FUNC_LOCAL(guest_error)
>  guest_vector compat=0, iflags=IFLAGS__AI_, trap=guest_serror
> +END(guest_error)
>  
> -guest_sync_compat:
> +FUNC_LOCAL(guest_sync_compat)
>  guest_vector compat=1, iflags=IFLAGS__AI_, trap=guest_sync
> +END(guest_sync_compat)
>  
> -guest_irq_compat:
> +FUNC_LOCAL(guest_irq_compat)
>  guest_vector compat=1, iflags=IFLAGS__A__, trap=irq
> +END(guest_irq_compat)
>  
> -guest_fiq_invalid_compat:
> +FUNC_LOCAL(guest_fiq_invalid_compat)
>  entry   hyp=0, compat=1
>  invalid BAD_FIQ
> +END(guest_fiq_invalid_compat)
>  
> -guest_error_compat:
> +FUNC_LOCAL(guest_error_compat)
>  guest_vector compat=1, iflags=IFLAGS__AI_, trap=guest_serror
> +END(guest_error_compat)
>  
> -ENTRY(return_to_new_vcpu32)
> +FUNC(return_to_new_vcpu32)
>  exithyp=0, compat=1
> -ENTRY(return_to_new_vcpu64)
> 

[PATCH v2 1/1] xen/arm64: entry: Add missing code symbol annotations

2024-04-16 Thread Edgar E. Iglesias
From: "Edgar E. Iglesias" 

Use the generic xen/linkage.h macros when and add missing
code symbol annotations.

Signed-off-by: Edgar E. Iglesias 
---
 xen/arch/arm/arm64/entry.S | 72 +-
 1 file changed, 48 insertions(+), 24 deletions(-)

diff --git a/xen/arch/arm/arm64/entry.S b/xen/arch/arm/arm64/entry.S
index f963c923bb..af9a592cae 100644
--- a/xen/arch/arm/arm64/entry.S
+++ b/xen/arch/arm/arm64/entry.S
@@ -289,21 +289,25 @@
 b   do_bad_mode
 .endm
 
-hyp_sync_invalid:
+FUNC_LOCAL(hyp_sync_invalid)
 entry   hyp=1
 invalid BAD_SYNC
+END(hyp_sync_invalid)
 
-hyp_irq_invalid:
+FUNC_LOCAL(hyp_irq_invalid)
 entry   hyp=1
 invalid BAD_IRQ
+END(hyp_irq_invalid)
 
-hyp_fiq_invalid:
+FUNC_LOCAL(hyp_fiq_invalid)
 entry   hyp=1
 invalid BAD_FIQ
+END(hyp_fiq_invalid)
 
-hyp_error_invalid:
+FUNC_LOCAL(hyp_error_invalid)
 entry   hyp=1
 invalid BAD_ERROR
+END(hyp_error_invalid)
 
 /*
  * SError received while running in the hypervisor mode.
@@ -313,11 +317,12 @@ hyp_error_invalid:
  * simplicity, as SError should be rare and potentially fatal,
  * all interrupts are kept masked.
  */
-hyp_error:
+FUNC_LOCAL(hyp_error)
 entry   hyp=1
 mov x0, sp
 bl  do_trap_hyp_serror
 exithyp=1
+END(hyp_error)
 
 /*
  * Synchronous exception received while running in the hypervisor mode.
@@ -327,7 +332,7 @@ hyp_error:
  * some of them. So we want to inherit the state from the interrupted
  * context.
  */
-hyp_sync:
+FUNC_LOCAL(hyp_sync)
 entry   hyp=1
 
 /* Inherit interrupts */
@@ -338,6 +343,7 @@ hyp_sync:
 mov x0, sp
 bl  do_trap_hyp_sync
 exithyp=1
+END(hyp_sync)
 
 /*
  * IRQ received while running in the hypervisor mode.
@@ -352,7 +358,7 @@ hyp_sync:
  * would require some rework in some paths (e.g. panic, livepatch) to
  * ensure the ordering is enforced everywhere.
  */
-hyp_irq:
+FUNC_LOCAL(hyp_irq)
 entry   hyp=1
 
 /* Inherit D, A, F interrupts and keep I masked */
@@ -365,8 +371,9 @@ hyp_irq:
 mov x0, sp
 bl  do_trap_irq
 exithyp=1
+END(hyp_irq)
 
-guest_sync:
+FUNC_LOCAL(guest_sync)
 /*
  * Save x0, x1 in advance
  */
@@ -413,8 +420,9 @@ fastpath_out_workaround:
 mov x1, xzr
 eret
 sb
+END(guest_sync)
 
-wa2_ssbd:
+FUNC_LOCAL(wa2_ssbd)
 #ifdef CONFIG_ARM_SSBD
 alternative_cb arm_enable_wa2_handling
 b   wa2_end
@@ -450,42 +458,55 @@ wa2_end:
 mov x0, xzr
 eret
 sb
-guest_sync_slowpath:
+END(wa2_ssbd)
+
+FUNC_LOCAL(guest_sync_slowpath)
 /*
  * x0/x1 may have been scratch by the fast path above, so avoid
  * to save them.
  */
 guest_vector compat=0, iflags=IFLAGS__AI_, trap=guest_sync, 
save_x0_x1=0
+END(guest_sync_slowpath)
 
-guest_irq:
+FUNC_LOCAL(guest_irq)
 guest_vector compat=0, iflags=IFLAGS__A__, trap=irq
+END(guest_irq)
 
-guest_fiq_invalid:
+FUNC_LOCAL(guest_fiq_invalid)
 entry   hyp=0, compat=0
 invalid BAD_FIQ
+END(guest_fiq_invalid)
 
-guest_error:
+FUNC_LOCAL(guest_error)
 guest_vector compat=0, iflags=IFLAGS__AI_, trap=guest_serror
+END(guest_error)
 
-guest_sync_compat:
+FUNC_LOCAL(guest_sync_compat)
 guest_vector compat=1, iflags=IFLAGS__AI_, trap=guest_sync
+END(guest_sync_compat)
 
-guest_irq_compat:
+FUNC_LOCAL(guest_irq_compat)
 guest_vector compat=1, iflags=IFLAGS__A__, trap=irq
+END(guest_irq_compat)
 
-guest_fiq_invalid_compat:
+FUNC_LOCAL(guest_fiq_invalid_compat)
 entry   hyp=0, compat=1
 invalid BAD_FIQ
+END(guest_fiq_invalid_compat)
 
-guest_error_compat:
+FUNC_LOCAL(guest_error_compat)
 guest_vector compat=1, iflags=IFLAGS__AI_, trap=guest_serror
+END(guest_error_compat)
 
-ENTRY(return_to_new_vcpu32)
+FUNC(return_to_new_vcpu32)
 exithyp=0, compat=1
-ENTRY(return_to_new_vcpu64)
+END(return_to_new_vcpu32)
+
+FUNC(return_to_new_vcpu64)
 exithyp=0, compat=0
+END(return_to_new_vcpu64)
 
-return_from_trap:
+FUNC_LOCAL(return_from_trap)
 msr daifset, #IFLAGS___I_ /* Mask interrupts */
 
 ldr x21, [sp, #UREGS_PC]/* load ELR */
@@ -524,6 +545,7 @@ return_from_trap:
 
 eret
 sb
+END(return_from_trap)
 
 /*
  * Consume pending SError generated by the guest if any.
@@ -536,7 +558,7 @@ return_from_trap:
  * it. So the function will unmask SError exception for a small window and
  * then mask it again.
  */
-check_pending_guest_serror:
+FUNC_LOCAL(check_pending_guest_serror)
 /*
  * Save elr_el2 to check whether the pending SError exception takes
  * place while we are doing this sync exception.
@@ -586,7 +608,7 @@ abort_guest_exit_end:
 csetx19, ne
 
 ret
-ENDPROC(check_pending_guest_serror)