Re: [PATCH v3] powerpc/booke: Avoid link stack corruption in several places

2021-08-27 Thread Michael Ellerman
On Tue, 24 Aug 2021 07:56:26 + (UTC), Christophe Leroy wrote:
> Use bcl 20,31,+4 instead of bl in order to preserve link stack.
> 
> See commit c974809a26a1 ("powerpc/vdso: Avoid link stack corruption
> in __get_datapage()") for details.
> 
> 
> 
> [...]

Applied to powerpc/next.

[1/1] powerpc/booke: Avoid link stack corruption in several places
  https://git.kernel.org/powerpc/c/f5007dbf4da729baa850b33a64dc3cc53757bdf8

cheers


Re: [PATCH v3] powerpc/booke: Avoid link stack corruption in several places

2021-08-24 Thread Segher Boessenkool
Hi!

On Tue, Aug 24, 2021 at 07:56:26AM +, Christophe Leroy wrote:
> Use bcl 20,31,+4 instead of bl in order to preserve link stack.

You use $+4 actually, which is clearer than .+4 or just +4 (and I am
surprised that the latter even works btw, I never knew :-) -- either
way it looks like a typo).

> - bl  invstr  /* Find our address */
> + bcl 20,31,$+4   /* Find our address */
>  invstr:  mflrr6  /* Make it accessible */

You can remove the label now.  This isn't true in all cases, but here
you can (all times it is called "invstr").

> @@ -85,7 +85,7 @@ skpinv: addir6,r6,1 /* 
> Increment */
>   addir6,r6,10
>   slw r6,r8,r6/* convert to mask */
>  
> - bl  1f  /* Find our address */
> + bcl 20,31,$+4   /* Find our address */
>  1:   mflrr7

Here, too.

> @@ -1045,7 +1045,7 @@ head_start_47x:
>   sync
>  
>   /* Find the entry we are running from */
> - bl  1f
> + bcl 20,31,$+4
>  1:   mflrr23
>   tlbsx   r23,0,r23
>   tlbre   r24,r23,0

And here.

> @@ -1132,7 +1132,7 @@ _GLOBAL(switch_to_as1)
>   bne 1b
>  
>   /* Get the tlb entry used by the current running code */
> - bl  0f
> + bcl 20,31,$+4
>  0:   mflrr4
>   tlbsx   0,r4

> @@ -1166,7 +1166,7 @@ _GLOBAL(switch_to_as1)
>  _GLOBAL(restore_to_as0)
>   mflrr0
>  
> - bl  0f
> + bcl 20,31,$+4
>  0:   mflrr9
>   addir9,r9,1f - 0b

And these.

> --- a/arch/powerpc/mm/nohash/tlb_low.S
> +++ b/arch/powerpc/mm/nohash/tlb_low.S
> @@ -199,7 +199,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_476_DD2)
>   * Touch enough instruction cache lines to ensure cache hits
>   */
>  1:   mflrr9
> - bl  2f
> + bcl 20,31,$+4
>  2:   mflrr6
>   li  r7,32
>   PPC_ICBT(0,R6,R7)   /* touch next cache line */
> @@ -414,7 +414,7 @@ _GLOBAL(loadcam_multi)
>* Set up temporary TLB entry that is the same as what we're
>* running from, but in AS=1.
>*/
> - bl  1f
> + bcl 20,31,$+4
>  1:   mflrr6
>   tlbsx   0,r8
>   mfspr   r6,SPRN_MAS1

And these too.

There does not see to be a warning for usused local labels, it would be
useful in this case :-)


Segher


[PATCH v3] powerpc/booke: Avoid link stack corruption in several places

2021-08-24 Thread Christophe Leroy
Use bcl 20,31,+4 instead of bl in order to preserve link stack.

See commit c974809a26a1 ("powerpc/vdso: Avoid link stack corruption
in __get_datapage()") for details.

Signed-off-by: Christophe Leroy 
---
v3: Use $+4 as destination instead of label
v2: Added missing ; in LOAD_REG_ADDR_PIC()
---
 arch/powerpc/include/asm/ppc_asm.h| 2 +-
 arch/powerpc/kernel/exceptions-64e.S  | 6 +++---
 arch/powerpc/kernel/fsl_booke_entry_mapping.S | 8 
 arch/powerpc/kernel/head_44x.S| 6 +++---
 arch/powerpc/kernel/head_fsl_booke.S  | 6 +++---
 arch/powerpc/mm/nohash/tlb_low.S  | 4 ++--
 6 files changed, 16 insertions(+), 16 deletions(-)

diff --git a/arch/powerpc/include/asm/ppc_asm.h 
b/arch/powerpc/include/asm/ppc_asm.h
index 349fc0ec0dbb..7be24048b8d1 100644
--- a/arch/powerpc/include/asm/ppc_asm.h
+++ b/arch/powerpc/include/asm/ppc_asm.h
@@ -260,7 +260,7 @@ GLUE(.,name):
 
 /* Be careful, this will clobber the lr register. */
 #define LOAD_REG_ADDR_PIC(reg, name)   \
-   bl  0f; \
+   bcl 20,31,$+4;  \
 0: mflrreg;\
addis   reg,reg,(name - 0b)@ha; \
addireg,reg,(name - 0b)@l;
diff --git a/arch/powerpc/kernel/exceptions-64e.S 
b/arch/powerpc/kernel/exceptions-64e.S
index 1401787b0b93..7e0943d9f9b0 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -1127,7 +1127,7 @@ found_iprot:
  * r3 = MAS0_TLBSEL (for the iprot array)
  * r4 = SPRN_TLBnCFG
  */
-   bl  invstr  /* Find our address */
+   bcl 20,31,$+4   /* Find our address */
 invstr:mflrr6  /* Make it accessible */
mfmsr   r7
rlwinm  r5,r7,27,31,31  /* extract MSR[IS] */
@@ -1196,7 +1196,7 @@ skpinv:   addir6,r6,1 /* 
Increment */
mfmsr   r6
xorir6,r6,MSR_IS
mtspr   SPRN_SRR1,r6
-   bl  1f  /* Find our address */
+   bcl 20,31,$+4   /* Find our address */
 1: mflrr6
addir6,r6,(2f - 1b)
mtspr   SPRN_SRR0,r6
@@ -1256,7 +1256,7 @@ skpinv:   addir6,r6,1 /* 
Increment */
  * r4 = MAS0 w/TLBSEL & ESEL for the temp mapping
  */
/* Now we branch the new virtual address mapped by this entry */
-   bl  1f  /* Find our address */
+   bcl 20,31,$+4   /* Find our address */
 1: mflrr6
addir6,r6,(2f - 1b)
tovirt(r6,r6)
diff --git a/arch/powerpc/kernel/fsl_booke_entry_mapping.S 
b/arch/powerpc/kernel/fsl_booke_entry_mapping.S
index 8bccce6544b5..dedc17fac8f8 100644
--- a/arch/powerpc/kernel/fsl_booke_entry_mapping.S
+++ b/arch/powerpc/kernel/fsl_booke_entry_mapping.S
@@ -1,7 +1,7 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 
 /* 1. Find the index of the entry we're executing in */
-   bl  invstr  /* Find our address */
+   bcl 20,31,$+4   /* Find our address */
 invstr:mflrr6  /* Make it accessible */
mfmsr   r7
rlwinm  r4,r7,27,31,31  /* extract MSR[IS] */
@@ -85,7 +85,7 @@ skpinv:   addir6,r6,1 /* 
Increment */
addir6,r6,10
slw r6,r8,r6/* convert to mask */
 
-   bl  1f  /* Find our address */
+   bcl 20,31,$+4   /* Find our address */
 1: mflrr7
 
mfspr   r8,SPRN_MAS3
@@ -117,7 +117,7 @@ skpinv: addir6,r6,1 /* 
Increment */
 
xorir6,r4,1
slwir6,r6,5 /* setup new context with other address space */
-   bl  1f  /* Find our address */
+   bcl 20,31,$+4   /* Find our address */
 1: mflrr9
rlwimi  r7,r9,0,20,31
addir7,r7,(2f - 1b)
@@ -207,7 +207,7 @@ next_tlb_setup:
 
lis r7,MSR_KERNEL@h
ori r7,r7,MSR_KERNEL@l
-   bl  1f  /* Find our address */
+   bcl 20,31,$+4   /* Find our address */
 1: mflrr9
rlwimi  r6,r9,0,20,31
addir6,r6,(2f - 1b)
diff --git a/arch/powerpc/kernel/head_44x.S b/arch/powerpc/kernel/head_44x.S
index ddc978a2d381..02d2928d1e01 100644
--- a/arch/powerpc/kernel/head_44x.S
+++ b/arch/powerpc/kernel/head_44x.S
@@ -70,7 +70,7 @@ _ENTRY(_start);
  * address.
  * r21 will be loaded with the physical runtime address of _stext
  */
-   bl  0f  /* Get our runtime address */
+   bcl 20,31,$+4   /* Get our runtime address */
 0: mflrr21 /* Make it accessible */
addis   r21,r21,(_stext - 0b)@ha
addir21,r21,(_stext