https://github.com/dpaoliello created 
https://github.com/llvm/llvm-project/pull/92580

Backports !90115 and !92326

Release notes:
Fixes issues where LLVM is either generating the incorrect thunk for a function 
with aligned parameters or didn't correctly pass through the return value when 
`StructRet` was used.

>From 5e0477fafd6aa8ea8451a7ea4968f407ca893aef Mon Sep 17 00:00:00 2001
From: Eli Friedman <efrie...@quicinc.com>
Date: Fri, 26 Apr 2024 11:06:11 -0700
Subject: [PATCH 1/2] [Arm64EC] Improve alignment mangling in arm64ec thunks.
 (#90115)

In some cases, MSVC's mangling for arm64ec thunks includes the alignment
of a struct. I added some code to try to match... but it never really
worked right. The issues:

- Alignment is only mangled if it's 16 or more (I guess the default is
supposed to be 8).
- Alignment isn't mangled on return values (since the memory is
allocated by the caller).

The current patch leaves hooks to make alignment mangling work... but
doesn't actually ever mangle alignment: clang never actually encodes a
relevant alignment into the IR. Once we get clang to emit the real
size/alignment of structs, we can start emitting it.
---
 llvm/lib/Target/AArch64/AArch64Arm64ECCallLowering.cpp |  7 ++++---
 llvm/test/CodeGen/AArch64/arm64ec-entry-thunks.ll      |  6 +++---
 llvm/test/CodeGen/AArch64/arm64ec-exit-thunks.ll       | 10 +++++-----
 3 files changed, 12 insertions(+), 11 deletions(-)

diff --git a/llvm/lib/Target/AArch64/AArch64Arm64ECCallLowering.cpp 
b/llvm/lib/Target/AArch64/AArch64Arm64ECCallLowering.cpp
index 55c5bbc66a3f4..d4dd28aecac48 100644
--- a/llvm/lib/Target/AArch64/AArch64Arm64ECCallLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64Arm64ECCallLowering.cpp
@@ -181,13 +181,14 @@ void AArch64Arm64ECCallLowering::getThunkArgTypes(
   }
 
   for (unsigned E = FT->getNumParams(); I != E; ++I) {
-    Align ParamAlign = AttrList.getParamAlignment(I).valueOrOne();
 #if 0
     // FIXME: Need more information about argument size; see
     // https://reviews.llvm.org/D132926
     uint64_t ArgSizeBytes = AttrList.getParamArm64ECArgSizeBytes(I);
+    Align ParamAlign = AttrList.getParamAlignment(I).valueOrOne();
 #else
     uint64_t ArgSizeBytes = 0;
+    Align ParamAlign = Align();
 #endif
     Type *Arm64Ty, *X64Ty;
     canonicalizeThunkType(FT->getParamType(I), ParamAlign,
@@ -297,7 +298,7 @@ void AArch64Arm64ECCallLowering::canonicalizeThunkType(
     uint64_t TotalSizeBytes = ElementCnt * ElementSizePerBytes;
     if (ElementTy->isFloatTy() || ElementTy->isDoubleTy()) {
       Out << (ElementTy->isFloatTy() ? "F" : "D") << TotalSizeBytes;
-      if (Alignment.value() >= 8 && !T->isPointerTy())
+      if (Alignment.value() >= 16 && !Ret)
         Out << "a" << Alignment.value();
       Arm64Ty = T;
       if (TotalSizeBytes <= 8) {
@@ -328,7 +329,7 @@ void AArch64Arm64ECCallLowering::canonicalizeThunkType(
   Out << "m";
   if (TypeSize != 4)
     Out << TypeSize;
-  if (Alignment.value() >= 8 && !T->isPointerTy())
+  if (Alignment.value() >= 16 && !Ret)
     Out << "a" << Alignment.value();
   // FIXME: Try to canonicalize Arm64Ty more thoroughly?
   Arm64Ty = T;
diff --git a/llvm/test/CodeGen/AArch64/arm64ec-entry-thunks.ll 
b/llvm/test/CodeGen/AArch64/arm64ec-entry-thunks.ll
index bb9ba05f7a272..c00c9bfe127e8 100644
--- a/llvm/test/CodeGen/AArch64/arm64ec-entry-thunks.ll
+++ b/llvm/test/CodeGen/AArch64/arm64ec-entry-thunks.ll
@@ -223,8 +223,8 @@ define i8 @matches_has_sret() nounwind {
 
 %TSRet = type { i64, i64 }
 define void @has_aligned_sret(ptr align 32 sret(%TSRet)) nounwind {
-; CHECK-LABEL:    .def    $ientry_thunk$cdecl$m16a32$v;
-; CHECK:          .section        
.wowthk$aa,"xr",discard,$ientry_thunk$cdecl$m16a32$v
+; CHECK-LABEL:    .def    $ientry_thunk$cdecl$m16$v;
+; CHECK:          .section        
.wowthk$aa,"xr",discard,$ientry_thunk$cdecl$m16$v
 ; CHECK:          // %bb.0:
 ; CHECK-NEXT:     stp     q6, q7, [sp, #-176]!            // 32-byte Folded 
Spill
 ; CHECK-NEXT:     .seh_save_any_reg_px    q6, 176
@@ -457,7 +457,7 @@ define %T2 @simple_struct(%T1 %0, %T2 %1, %T3, %T4) 
nounwind {
 ; CHECK-NEXT:     .symidx $ientry_thunk$cdecl$i8$v
 ; CHECK-NEXT:     .word   1
 ; CHECK-NEXT:     .symidx "#has_aligned_sret"
-; CHECK-NEXT:     .symidx $ientry_thunk$cdecl$m16a32$v
+; CHECK-NEXT:     .symidx $ientry_thunk$cdecl$m16$v
 ; CHECK-NEXT:     .word   1
 ; CHECK-NEXT:     .symidx "#small_array"
 ; CHECK-NEXT:     .symidx $ientry_thunk$cdecl$m2$m2F8
diff --git a/llvm/test/CodeGen/AArch64/arm64ec-exit-thunks.ll 
b/llvm/test/CodeGen/AArch64/arm64ec-exit-thunks.ll
index 3b911e78aff2a..7a40fcd85ac58 100644
--- a/llvm/test/CodeGen/AArch64/arm64ec-exit-thunks.ll
+++ b/llvm/test/CodeGen/AArch64/arm64ec-exit-thunks.ll
@@ -236,8 +236,8 @@ declare void @has_sret(ptr sret([100 x i8])) nounwind;
 
 %TSRet = type { i64, i64 }
 declare void @has_aligned_sret(ptr align 32 sret(%TSRet)) nounwind;
-; CHECK-LABEL:    .def    $iexit_thunk$cdecl$m16a32$v;
-; CHECK:          .section        
.wowthk$aa,"xr",discard,$iexit_thunk$cdecl$m16a32$v
+; CHECK-LABEL:    .def    $iexit_thunk$cdecl$m16$v;
+; CHECK:          .section        
.wowthk$aa,"xr",discard,$iexit_thunk$cdecl$m16$v
 ; CHECK:          // %bb.0:
 ; CHECK-NEXT:     sub     sp, sp, #48
 ; CHECK-NEXT:     .seh_stackalloc 48
@@ -271,8 +271,8 @@ declare void @has_aligned_sret(ptr align 32 sret(%TSRet)) 
nounwind;
 ; CHECK:          adrp    x11, has_aligned_sret
 ; CHECK:          add     x11, x11, :lo12:has_aligned_sret
 ; CHECK:          ldr     x9, [x9, :lo12:__os_arm64x_check_icall]
-; CHECK:          adrp    x10, ($iexit_thunk$cdecl$m16a32$v)
-; CHECK:          add     x10, x10, :lo12:($iexit_thunk$cdecl$m16a32$v)
+; CHECK:          adrp    x10, ($iexit_thunk$cdecl$m16$v)
+; CHECK:          add     x10, x10, :lo12:($iexit_thunk$cdecl$m16$v)
 ; CHECK:          blr     x9
 ; CHECK:          .seh_startepilogue
 ; CHECK:          ldr     x30, [sp], #16                  // 8-byte Folded 
Reload
@@ -492,7 +492,7 @@ declare %T2 @simple_struct(%T1, %T2, %T3, %T4) nounwind;
 ; CHECK-NEXT:     .symidx has_sret
 ; CHECK-NEXT:     .word   0
 ; CHECK-NEXT:     .symidx has_aligned_sret
-; CHECK-NEXT:     .symidx $iexit_thunk$cdecl$m16a32$v
+; CHECK-NEXT:     .symidx $iexit_thunk$cdecl$m16$v
 ; CHECK-NEXT:     .word   4
 ; CHECK-NEXT:     .symidx "#has_aligned_sret$exit_thunk"
 ; CHECK-NEXT:     .symidx has_aligned_sret

>From f2fce8192ad4d744e4680cb18c7e98c58e90c27f Mon Sep 17 00:00:00 2001
From: Eli Friedman <efrie...@quicinc.com>
Date: Thu, 16 May 2024 09:15:17 -0700
Subject: [PATCH 2/2] [Arm64EC] Correctly handle sret in entry thunks. (#92326)

I accidentally left out the code to transfer sret attributes to entry
thunks, so values weren't being passed in the right registers, and the
sret pointer wasn't returned in the correct register.

Fixes #90229
---
 .../AArch64/AArch64Arm64ECCallLowering.cpp    |  9 ++++-
 .../CodeGen/AArch64/arm64ec-entry-thunks.ll   | 36 +++++++++++--------
 2 files changed, 30 insertions(+), 15 deletions(-)

diff --git a/llvm/lib/Target/AArch64/AArch64Arm64ECCallLowering.cpp 
b/llvm/lib/Target/AArch64/AArch64Arm64ECCallLowering.cpp
index d4dd28aecac48..862aefe46193d 100644
--- a/llvm/lib/Target/AArch64/AArch64Arm64ECCallLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64Arm64ECCallLowering.cpp
@@ -514,7 +514,14 @@ Function 
*AArch64Arm64ECCallLowering::buildEntryThunk(Function *F) {
   // Call the function passed to the thunk.
   Value *Callee = Thunk->getArg(0);
   Callee = IRB.CreateBitCast(Callee, PtrTy);
-  Value *Call = IRB.CreateCall(Arm64Ty, Callee, Args);
+  CallInst *Call = IRB.CreateCall(Arm64Ty, Callee, Args);
+
+  auto SRetAttr = F->getAttributes().getParamAttr(0, Attribute::StructRet);
+  auto InRegAttr = F->getAttributes().getParamAttr(0, Attribute::InReg);
+  if (SRetAttr.isValid() && !InRegAttr.isValid()) {
+    Thunk->addParamAttr(1, SRetAttr);
+    Call->addParamAttr(0, SRetAttr);
+  }
 
   Value *RetVal = Call;
   if (TransformDirectToSRet) {
diff --git a/llvm/test/CodeGen/AArch64/arm64ec-entry-thunks.ll 
b/llvm/test/CodeGen/AArch64/arm64ec-entry-thunks.ll
index c00c9bfe127e8..e9556b9d5cbee 100644
--- a/llvm/test/CodeGen/AArch64/arm64ec-entry-thunks.ll
+++ b/llvm/test/CodeGen/AArch64/arm64ec-entry-thunks.ll
@@ -222,12 +222,12 @@ define i8 @matches_has_sret() nounwind {
 }
 
 %TSRet = type { i64, i64 }
-define void @has_aligned_sret(ptr align 32 sret(%TSRet)) nounwind {
-; CHECK-LABEL:    .def    $ientry_thunk$cdecl$m16$v;
-; CHECK:          .section        
.wowthk$aa,"xr",discard,$ientry_thunk$cdecl$m16$v
+define void @has_aligned_sret(ptr align 32 sret(%TSRet), i32) nounwind {
+; CHECK-LABEL:    .def    $ientry_thunk$cdecl$m16$i8;
+; CHECK:          .section        
.wowthk$aa,"xr",discard,$ientry_thunk$cdecl$m16$i8
 ; CHECK:          // %bb.0:
-; CHECK-NEXT:     stp     q6, q7, [sp, #-176]!            // 32-byte Folded 
Spill
-; CHECK-NEXT:     .seh_save_any_reg_px    q6, 176
+; CHECK-NEXT:     stp     q6, q7, [sp, #-192]!            // 32-byte Folded 
Spill
+; CHECK-NEXT:     .seh_save_any_reg_px    q6, 192
 ; CHECK-NEXT:     stp     q8, q9, [sp, #32]               // 32-byte Folded 
Spill
 ; CHECK-NEXT:     .seh_save_any_reg_p     q8, 32
 ; CHECK-NEXT:     stp     q10, q11, [sp, #64]             // 32-byte Folded 
Spill
@@ -236,17 +236,25 @@ define void @has_aligned_sret(ptr align 32 sret(%TSRet)) 
nounwind {
 ; CHECK-NEXT:     .seh_save_any_reg_p     q12, 96
 ; CHECK-NEXT:     stp     q14, q15, [sp, #128]            // 32-byte Folded 
Spill
 ; CHECK-NEXT:     .seh_save_any_reg_p     q14, 128
-; CHECK-NEXT:     stp     x29, x30, [sp, #160]            // 16-byte Folded 
Spill
-; CHECK-NEXT:     .seh_save_fplr  160
-; CHECK-NEXT:     add     x29, sp, #160
-; CHECK-NEXT:     .seh_add_fp     160
+; CHECK-NEXT:     str     x19, [sp, #160]                 // 8-byte Folded 
Spill
+; CHECK-NEXT:     .seh_save_reg   x19, 160
+; CHECK-NEXT:     stp     x29, x30, [sp, #168]            // 16-byte Folded 
Spill
+; CHECK-NEXT:     .seh_save_fplr  168
+; CHECK-NEXT:     add     x29, sp, #168
+; CHECK-NEXT:     .seh_add_fp     168
 ; CHECK-NEXT:     .seh_endprologue
+; CHECK-NEXT:     mov     x19, x0
+; CHECK-NEXT:     mov     x8, x0
+; CHECK-NEXT:     mov     x0, x1
 ; CHECK-NEXT:     blr     x9
 ; CHECK-NEXT:     adrp    x8, __os_arm64x_dispatch_ret
 ; CHECK-NEXT:     ldr     x0, [x8, :lo12:__os_arm64x_dispatch_ret]
+; CHECK-NEXT:     mov     x8, x19
 ; CHECK-NEXT:     .seh_startepilogue
-; CHECK-NEXT:     ldp     x29, x30, [sp, #160]            // 16-byte Folded 
Reload
-; CHECK-NEXT:     .seh_save_fplr  160
+; CHECK-NEXT:     ldp     x29, x30, [sp, #168]            // 16-byte Folded 
Reload
+; CHECK-NEXT:     .seh_save_fplr  168
+; CHECK-NEXT:     ldr     x19, [sp, #160]                 // 8-byte Folded 
Reload
+; CHECK-NEXT:     .seh_save_reg   x19, 160
 ; CHECK-NEXT:     ldp     q14, q15, [sp, #128]            // 32-byte Folded 
Reload
 ; CHECK-NEXT:     .seh_save_any_reg_p     q14, 128
 ; CHECK-NEXT:     ldp     q12, q13, [sp, #96]             // 32-byte Folded 
Reload
@@ -255,8 +263,8 @@ define void @has_aligned_sret(ptr align 32 sret(%TSRet)) 
nounwind {
 ; CHECK-NEXT:     .seh_save_any_reg_p     q10, 64
 ; CHECK-NEXT:     ldp     q8, q9, [sp, #32]               // 32-byte Folded 
Reload
 ; CHECK-NEXT:     .seh_save_any_reg_p     q8, 32
-; CHECK-NEXT:     ldp     q6, q7, [sp], #176              // 32-byte Folded 
Reload
-; CHECK-NEXT:     .seh_save_any_reg_px    q6, 176
+; CHECK-NEXT:     ldp     q6, q7, [sp], #192              // 32-byte Folded 
Reload
+; CHECK-NEXT:     .seh_save_any_reg_px    q6, 192
 ; CHECK-NEXT:     .seh_endepilogue
 ; CHECK-NEXT:     br      x0
 ; CHECK-NEXT:     .seh_endfunclet
@@ -457,7 +465,7 @@ define %T2 @simple_struct(%T1 %0, %T2 %1, %T3, %T4) 
nounwind {
 ; CHECK-NEXT:     .symidx $ientry_thunk$cdecl$i8$v
 ; CHECK-NEXT:     .word   1
 ; CHECK-NEXT:     .symidx "#has_aligned_sret"
-; CHECK-NEXT:     .symidx $ientry_thunk$cdecl$m16$v
+; CHECK-NEXT:     .symidx $ientry_thunk$cdecl$m16$i8
 ; CHECK-NEXT:     .word   1
 ; CHECK-NEXT:     .symidx "#small_array"
 ; CHECK-NEXT:     .symidx $ientry_thunk$cdecl$m2$m2F8

_______________________________________________
llvm-branch-commits mailing list
llvm-branch-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/llvm-branch-commits

Reply via email to