Author: Joseph Huber Date: 2026-01-19T12:19:15-06:00 New Revision: efb57947bab1b9165fa44c447e776827fcd1d238
URL: https://github.com/llvm/llvm-project/commit/efb57947bab1b9165fa44c447e776827fcd1d238 DIFF: https://github.com/llvm/llvm-project/commit/efb57947bab1b9165fa44c447e776827fcd1d238.diff LOG: [SPIR-V] Enable variadic function lowering for the SPIR-V target (#175076) Summary: We support variadic functions in AMDGPU / NVPTX via an LLVM-IR pass. This patch applies the same handling here to support them on this target. I am unsure what the ABI should look like here, I have mostly copied the one we use for NVPTX where it's basically a struct layout with natural alignment. This wastes some space, which is why AMDGPU does not pad them. Additionally, this required allowing the SPIRV_FUNC calling convention. I'm assuming this is compatible with the C calling convention in IR, but I will need someone to confirm that for me. Added: clang/test/CodeGenSPIRV/Builtins/variadic.c llvm/test/CodeGen/SPIRV/function/variadics-lowering.ll Modified: clang/lib/CodeGen/Targets/SPIR.cpp clang/test/CodeGen/varargs-with-nonzero-default-address-space.c llvm/lib/Target/SPIRV/CMakeLists.txt llvm/lib/Target/SPIRV/SPIRVGlobalRegistry.cpp llvm/lib/Target/SPIRV/SPIRVTargetMachine.cpp llvm/lib/Transforms/IPO/ExpandVariadics.cpp llvm/test/CodeGen/SPIRV/function/vararg.ll llvm/test/CodeGen/SPIRV/llc-pipeline.ll Removed: ################################################################################ diff --git a/clang/lib/CodeGen/Targets/SPIR.cpp b/clang/lib/CodeGen/Targets/SPIR.cpp index 6c6c4794bba49..ba90ab3e67053 100644 --- a/clang/lib/CodeGen/Targets/SPIR.cpp +++ b/clang/lib/CodeGen/Targets/SPIR.cpp @@ -36,6 +36,8 @@ class SPIRVABIInfo : public CommonSPIRABIInfo { public: SPIRVABIInfo(CodeGenTypes &CGT) : CommonSPIRABIInfo(CGT) {} void computeInfo(CGFunctionInfo &FI) const override; + RValue EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType Ty, + AggValueSlot Slot) const override; private: ABIArgInfo classifyKernelArgumentType(QualType Ty) const; @@ -207,6 +209,11 @@ void SPIRVABIInfo::computeInfo(CGFunctionInfo &FI) const { // arguments handling. llvm::CallingConv::ID CC = FI.getCallingConvention(); + for (auto &&[ArgumentsCount, I] : llvm::enumerate(FI.arguments())) + I.info = ArgumentsCount < FI.getNumRequiredArgs() + ? classifyArgumentType(I.type) + : ABIArgInfo::getDirect(); + if (!getCXXABI().classifyReturnType(FI)) FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); @@ -219,6 +226,14 @@ void SPIRVABIInfo::computeInfo(CGFunctionInfo &FI) const { } } +RValue SPIRVABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, + QualType Ty, AggValueSlot Slot) const { + return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*IsIndirect=*/false, + getContext().getTypeInfoInChars(Ty), + CharUnits::fromQuantity(1), + /*AllowHigherAlign=*/true, Slot); +} + unsigned AMDGCNSPIRVABIInfo::numRegsForType(QualType Ty) const { // This duplicates the AMDGPUABI computation. unsigned NumRegs = 0; diff --git a/clang/test/CodeGen/varargs-with-nonzero-default-address-space.c b/clang/test/CodeGen/varargs-with-nonzero-default-address-space.c index b087da34c3dfb..16840ec7d0bfb 100644 --- a/clang/test/CodeGen/varargs-with-nonzero-default-address-space.c +++ b/clang/test/CodeGen/varargs-with-nonzero-default-address-space.c @@ -1,4 +1,4 @@ -// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4 +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 6 // RUN: %clang_cc1 -triple spirv64-unknown-unknown -fcuda-is-device -emit-llvm -o - %s | FileCheck %s struct x { @@ -8,32 +8,37 @@ struct x { // CHECK-LABEL: define spir_func void @testva( // CHECK-SAME: i32 noundef [[N:%.*]], ...) #[[ATTR0:[0-9]+]] { -// CHECK-NEXT: entry: +// CHECK-NEXT: [[ENTRY:.*:]] // CHECK-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 // CHECK-NEXT: [[AP:%.*]] = alloca ptr addrspace(4), align 8 // CHECK-NEXT: [[T:%.*]] = alloca [[STRUCT_X:%.*]], align 8 // CHECK-NEXT: [[AP2:%.*]] = alloca ptr addrspace(4), align 8 // CHECK-NEXT: [[V:%.*]] = alloca i32, align 4 -// CHECK-NEXT: [[VARET:%.*]] = alloca i32, align 4 // CHECK-NEXT: [[N_ADDR_ASCAST:%.*]] = addrspacecast ptr [[N_ADDR]] to ptr addrspace(4) // CHECK-NEXT: [[AP_ASCAST:%.*]] = addrspacecast ptr [[AP]] to ptr addrspace(4) // CHECK-NEXT: [[T_ASCAST:%.*]] = addrspacecast ptr [[T]] to ptr addrspace(4) // CHECK-NEXT: [[AP2_ASCAST:%.*]] = addrspacecast ptr [[AP2]] to ptr addrspace(4) // CHECK-NEXT: [[V_ASCAST:%.*]] = addrspacecast ptr [[V]] to ptr addrspace(4) -// CHECK-NEXT: [[VARET_ASCAST:%.*]] = addrspacecast ptr [[VARET]] to ptr addrspace(4) // CHECK-NEXT: store i32 [[N]], ptr addrspace(4) [[N_ADDR_ASCAST]], align 4 // CHECK-NEXT: call void @llvm.va_start.p4(ptr addrspace(4) [[AP_ASCAST]]) -// CHECK-NEXT: [[TMP0:%.*]] = va_arg ptr addrspace(4) [[AP_ASCAST]], ptr -// CHECK-NEXT: call void @llvm.memcpy.p4.p0.i64(ptr addrspace(4) align 8 [[T_ASCAST]], ptr align 8 [[TMP0]], i64 16, i1 false) +// CHECK-NEXT: [[ARGP_CUR:%.*]] = load ptr addrspace(4), ptr addrspace(4) [[AP_ASCAST]], align 8 +// CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i8, ptr addrspace(4) [[ARGP_CUR]], i32 7 +// CHECK-NEXT: [[ARGP_CUR_ALIGNED:%.*]] = call ptr addrspace(4) @llvm.ptrmask.p4.i64(ptr addrspace(4) [[TMP0]], i64 -8) +// CHECK-NEXT: [[ARGP_NEXT:%.*]] = getelementptr inbounds i8, ptr addrspace(4) [[ARGP_CUR_ALIGNED]], i64 16 +// CHECK-NEXT: store ptr addrspace(4) [[ARGP_NEXT]], ptr addrspace(4) [[AP_ASCAST]], align 8 +// CHECK-NEXT: call void @llvm.memcpy.p4.p4.i64(ptr addrspace(4) align 8 [[T_ASCAST]], ptr addrspace(4) align 8 [[ARGP_CUR_ALIGNED]], i64 16, i1 false) // CHECK-NEXT: call void @llvm.va_copy.p4(ptr addrspace(4) [[AP2_ASCAST]], ptr addrspace(4) [[AP_ASCAST]]) -// CHECK-NEXT: [[TMP1:%.*]] = va_arg ptr addrspace(4) [[AP2_ASCAST]], i32 -// CHECK-NEXT: store i32 [[TMP1]], ptr addrspace(4) [[VARET_ASCAST]], align 4 -// CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr addrspace(4) [[VARET_ASCAST]], align 4 +// CHECK-NEXT: [[ARGP_CUR1:%.*]] = load ptr addrspace(4), ptr addrspace(4) [[AP2_ASCAST]], align 8 +// CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr addrspace(4) [[ARGP_CUR1]], i32 3 +// CHECK-NEXT: [[ARGP_CUR1_ALIGNED:%.*]] = call ptr addrspace(4) @llvm.ptrmask.p4.i64(ptr addrspace(4) [[TMP1]], i64 -4) +// CHECK-NEXT: [[ARGP_NEXT2:%.*]] = getelementptr inbounds i8, ptr addrspace(4) [[ARGP_CUR1_ALIGNED]], i64 4 +// CHECK-NEXT: store ptr addrspace(4) [[ARGP_NEXT2]], ptr addrspace(4) [[AP2_ASCAST]], align 8 +// CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr addrspace(4) [[ARGP_CUR1_ALIGNED]], align 4 // CHECK-NEXT: store i32 [[TMP2]], ptr addrspace(4) [[V_ASCAST]], align 4 // CHECK-NEXT: call void @llvm.va_end.p4(ptr addrspace(4) [[AP2_ASCAST]]) // CHECK-NEXT: call void @llvm.va_end.p4(ptr addrspace(4) [[AP_ASCAST]]) // CHECK-NEXT: ret void - +// void testva(int n, ...) { __builtin_va_list ap; __builtin_va_start(ap, n); diff --git a/clang/test/CodeGenSPIRV/Builtins/variadic.c b/clang/test/CodeGenSPIRV/Builtins/variadic.c new file mode 100644 index 0000000000000..adf7b117812eb --- /dev/null +++ b/clang/test/CodeGenSPIRV/Builtins/variadic.c @@ -0,0 +1,76 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 6 +// RUN: %clang_cc1 -triple spirv64 -emit-llvm -o - %s | FileCheck %s + +extern void varargs_simple(int, ...); + +// CHECK-LABEL: define spir_func void @foo( +// CHECK-SAME: ) #[[ATTR0:[0-9]+]] { +// CHECK-NEXT: [[ENTRY:.*:]] +// CHECK-NEXT: [[C:%.*]] = alloca i8, align 1 +// CHECK-NEXT: [[S:%.*]] = alloca i16, align 2 +// CHECK-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK-NEXT: [[L:%.*]] = alloca i64, align 8 +// CHECK-NEXT: [[F:%.*]] = alloca float, align 4 +// CHECK-NEXT: [[D:%.*]] = alloca double, align 8 +// CHECK-NEXT: [[A:%.*]] = alloca [[STRUCT_ANON:%.*]], align 4 +// CHECK-NEXT: [[V:%.*]] = alloca <4 x i32>, align 16 +// CHECK-NEXT: [[T:%.*]] = alloca [[STRUCT_ANON_0:%.*]], align 1 +// CHECK-NEXT: store i8 1, ptr [[C]], align 1 +// CHECK-NEXT: store i16 1, ptr [[S]], align 2 +// CHECK-NEXT: store i32 1, ptr [[I]], align 4 +// CHECK-NEXT: store i64 1, ptr [[L]], align 8 +// CHECK-NEXT: store float 1.000000e+00, ptr [[F]], align 4 +// CHECK-NEXT: store double 1.000000e+00, ptr [[D]], align 8 +// CHECK-NEXT: [[TMP0:%.*]] = load i8, ptr [[C]], align 1 +// CHECK-NEXT: [[CONV:%.*]] = sext i8 [[TMP0]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr [[S]], align 2 +// CHECK-NEXT: [[CONV1:%.*]] = sext i16 [[TMP1]] to i32 +// CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[I]], align 4 +// CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr [[L]], align 8 +// CHECK-NEXT: [[TMP4:%.*]] = load float, ptr [[F]], align 4 +// CHECK-NEXT: [[CONV2:%.*]] = fpext float [[TMP4]] to double +// CHECK-NEXT: [[TMP5:%.*]] = load double, ptr [[D]], align 8 +// CHECK-NEXT: call spir_func void (i32, ...) @varargs_simple(i32 noundef 0, i32 noundef [[CONV]], i32 noundef [[CONV1]], i32 noundef [[TMP2]], i64 noundef [[TMP3]], double noundef [[CONV2]], double noundef [[TMP5]]) +// CHECK-NEXT: call void @llvm.memcpy.p0.p1.i64(ptr align 4 [[A]], ptr addrspace(1) align 4 @__const.foo.a, i64 12, i1 false) +// CHECK-NEXT: call spir_func void (i32, ...) @varargs_simple(i32 noundef 0, ptr noundef byval([[STRUCT_ANON]]) align 4 [[A]]) +// CHECK-NEXT: store <4 x i32> splat (i32 1), ptr [[V]], align 16 +// CHECK-NEXT: [[TMP6:%.*]] = load <4 x i32>, ptr [[V]], align 16 +// CHECK-NEXT: call spir_func void (i32, ...) @varargs_simple(i32 noundef 0, <4 x i32> noundef [[TMP6]]) +// CHECK-NEXT: call spir_func void (i32, ...) @varargs_simple(i32 noundef 0, ptr noundef byval([[STRUCT_ANON_0]]) align 1 [[T]], ptr noundef byval([[STRUCT_ANON_0]]) align 1 [[T]], i32 noundef 0, ptr noundef byval([[STRUCT_ANON_0]]) align 1 [[T]]) +// CHECK-NEXT: ret void +// +void foo() { + char c = '\x1'; + short s = 1; + int i = 1; + long l = 1; + float f = 1.f; + double d = 1.; + varargs_simple(0, c, s, i, l, f, d); + + struct {int x; char c; int y;} a = {1, '\x1', 1}; + varargs_simple(0, a); + + typedef int __attribute__((ext_vector_type(4))) int4; + int4 v = {1, 1, 1, 1}; + varargs_simple(0, v); + + struct {char c, d;} t; + varargs_simple(0, t, t, 0, t); +} + +typedef struct {long x; long y;} S; +extern void varargs_complex(S, S, ...); + +// CHECK-LABEL: define spir_func void @bar( +// CHECK-SAME: ) #[[ATTR0]] { +// CHECK-NEXT: [[ENTRY:.*:]] +// CHECK-NEXT: [[S:%.*]] = alloca [[STRUCT_S:%.*]], align 8 +// CHECK-NEXT: call void @llvm.memcpy.p0.p1.i64(ptr align 8 [[S]], ptr addrspace(1) align 8 @__const.bar.s, i64 16, i1 false) +// CHECK-NEXT: call spir_func void (ptr, ptr, ...) @varargs_complex(ptr noundef byval([[STRUCT_S]]) align 8 [[S]], ptr noundef byval([[STRUCT_S]]) align 8 [[S]], i32 noundef 1, i64 noundef 1, double noundef 1.000000e+00) +// CHECK-NEXT: ret void +// +void bar() { + S s = {1l, 1l}; + varargs_complex(s, s, 1, 1l, 1.0); +} diff --git a/llvm/lib/Target/SPIRV/CMakeLists.txt b/llvm/lib/Target/SPIRV/CMakeLists.txt index 20d3cfc7203d6..ec44e0a9f4876 100644 --- a/llvm/lib/Target/SPIRV/CMakeLists.txt +++ b/llvm/lib/Target/SPIRV/CMakeLists.txt @@ -63,6 +63,7 @@ add_llvm_target(SPIRVCodeGen Demangle GlobalISel FrontendHLSL + IPO MC SPIRVAnalysis SPIRVDesc diff --git a/llvm/lib/Target/SPIRV/SPIRVGlobalRegistry.cpp b/llvm/lib/Target/SPIRV/SPIRVGlobalRegistry.cpp index 9a2b0771e4dc0..3c5d3ce7a7ed6 100644 --- a/llvm/lib/Target/SPIRV/SPIRVGlobalRegistry.cpp +++ b/llvm/lib/Target/SPIRV/SPIRVGlobalRegistry.cpp @@ -1020,10 +1020,12 @@ SPIRVType *SPIRVGlobalRegistry::getOpTypeFunction( const FunctionType *Ty, SPIRVType *RetType, const SmallVectorImpl<SPIRVType *> &ArgTypes, MachineIRBuilder &MIRBuilder) { - if (Ty->isVarArg()) { + const SPIRVSubtarget *ST = + static_cast<const SPIRVSubtarget *>(&MIRBuilder.getMF().getSubtarget()); + if (Ty->isVarArg() && ST->isShader()) { Function &Fn = MIRBuilder.getMF().getFunction(); Ty->getContext().diagnose(DiagnosticInfoUnsupported( - Fn, "SPIR-V does not support variadic functions", + Fn, "SPIR-V shaders do not support variadic functions", MIRBuilder.getDebugLoc())); } return createOpType(MIRBuilder, [&](MachineIRBuilder &MIRBuilder) { diff --git a/llvm/lib/Target/SPIRV/SPIRVTargetMachine.cpp b/llvm/lib/Target/SPIRV/SPIRVTargetMachine.cpp index 10038753f4a75..2d70972d6fbdb 100644 --- a/llvm/lib/Target/SPIRV/SPIRVTargetMachine.cpp +++ b/llvm/lib/Target/SPIRV/SPIRVTargetMachine.cpp @@ -33,6 +33,7 @@ #include "llvm/Passes/PassBuilder.h" #include "llvm/Support/Compiler.h" #include "llvm/Target/TargetOptions.h" +#include "llvm/Transforms/IPO/ExpandVariadics.h" #include "llvm/Transforms/Scalar.h" #include "llvm/Transforms/Utils.h" #include <optional> @@ -178,6 +179,11 @@ void SPIRVPassConfig::addIRPasses() { addPass(createSPIRVRegularizerPass()); addPass(createSPIRVPrepareFunctionsPass(TM)); addPass(createSPIRVPrepareGlobalsPass()); + + // Variadic function calls aren't supported in shader code. + if (!TM.getSubtargetImpl()->isShader()) { + addPass(createExpandVariadicsPass(ExpandVariadicsMode::Lowering)); + } } void SPIRVPassConfig::addISelPrepare() { diff --git a/llvm/lib/Transforms/IPO/ExpandVariadics.cpp b/llvm/lib/Transforms/IPO/ExpandVariadics.cpp index 4863d6ba789a8..36c94e9395c80 100644 --- a/llvm/lib/Transforms/IPO/ExpandVariadics.cpp +++ b/llvm/lib/Transforms/IPO/ExpandVariadics.cpp @@ -124,6 +124,9 @@ class VariadicABIInfo { }; virtual VAArgSlotInfo slotInfo(const DataLayout &DL, Type *Parameter) = 0; + // Per-target overrides of special symbols. + virtual bool ignoreFunction(Function *F) { return false; } + // Targets implemented so far all have the same trivial lowering for these bool vaEndIsNop() { return true; } bool vaCopyIsMemcpy() { return true; } @@ -153,6 +156,11 @@ class ExpandVariadics : public ModulePass { bool rewriteABI() { return Mode == ExpandVariadicsMode::Lowering; } + template <typename T> bool isValidCallingConv(T *F) { + return F->getCallingConv() == CallingConv::C || + F->getCallingConv() == CallingConv::SPIR_FUNC; + } + bool runOnModule(Module &M) override; bool runOnFunction(Module &M, IRBuilder<> &Builder, Function *F); @@ -230,7 +238,10 @@ class ExpandVariadics : public ModulePass { F->hasFnAttribute(Attribute::Naked)) return false; - if (F->getCallingConv() != CallingConv::C) + if (ABI->ignoreFunction(F)) + return false; + + if (!isValidCallingConv(F)) return false; if (rewriteABI()) @@ -249,7 +260,7 @@ class ExpandVariadics : public ModulePass { return false; } - if (CI->getCallingConv() != CallingConv::C) + if (!isValidCallingConv(CI)) return false; return true; @@ -609,6 +620,9 @@ bool ExpandVariadics::expandCall(Module &M, IRBuilder<> &Builder, CallBase *CB, bool Changed = false; const DataLayout &DL = M.getDataLayout(); + if (ABI->ignoreFunction(CB->getCalledFunction())) + return Changed; + if (!expansionApplicableToFunctionCall(CB)) { if (rewriteABI()) report_fatal_error("Cannot lower callbase instruction"); @@ -940,6 +954,39 @@ struct NVPTX final : public VariadicABIInfo { } }; +struct SPIRV final : public VariadicABIInfo { + + bool enableForTarget() override { return true; } + + bool vaListPassedInSSARegister() override { return true; } + + Type *vaListType(LLVMContext &Ctx) override { + return PointerType::getUnqual(Ctx); + } + + Type *vaListParameterType(Module &M) override { + return PointerType::getUnqual(M.getContext()); + } + + Value *initializeVaList(Module &M, LLVMContext &Ctx, IRBuilder<> &Builder, + AllocaInst *, Value *Buffer) override { + return Builder.CreateAddrSpaceCast(Buffer, vaListParameterType(M)); + } + + VAArgSlotInfo slotInfo(const DataLayout &DL, Type *Parameter) override { + // Expects natural alignment in all cases. The variadic call ABI will handle + // promoting types to their appropriate size and alignment. + Align A = DL.getABITypeAlign(Parameter); + return {A, false}; + } + + // The SPIR-V backend has special handling for SPIR-V mangled printf + // functions. + bool ignoreFunction(Function *F) override { + return F->getName().starts_with('_') && F->getName().contains("printf"); + } +}; + struct Wasm final : public VariadicABIInfo { bool enableForTarget() override { @@ -995,6 +1042,11 @@ std::unique_ptr<VariadicABIInfo> VariadicABIInfo::create(const Triple &T) { return std::make_unique<NVPTX>(); } + case Triple::spirv: + case Triple::spirv64: { + return std::make_unique<SPIRV>(); + } + default: return {}; } diff --git a/llvm/test/CodeGen/SPIRV/function/vararg.ll b/llvm/test/CodeGen/SPIRV/function/vararg.ll index 7f734834ccf51..affefd9638742 100644 --- a/llvm/test/CodeGen/SPIRV/function/vararg.ll +++ b/llvm/test/CodeGen/SPIRV/function/vararg.ll @@ -1,4 +1,4 @@ -; RUN: not llc -verify-machineinstrs -O0 -mtriple=spirv64-unknown-unknown --spirv-ext=+SPV_INTEL_function_pointers < %s 2>&1 | FileCheck %s +; RUN: not llc -verify-machineinstrs -O0 -mtriple=spirv64-unknown-vulkan --spirv-ext=+SPV_INTEL_function_pointers < %s 2>&1 | FileCheck %s define void @bar() { entry: @@ -6,5 +6,5 @@ entry: ret void } -; CHECK:error: {{.*}} in function bar void (): SPIR-V does not support variadic functions +; CHECK:error: {{.*}} in function bar void (): SPIR-V shaders do not support variadic functions declare spir_func void @_Z3fooiz(i32, ...) diff --git a/llvm/test/CodeGen/SPIRV/function/variadics-lowering.ll b/llvm/test/CodeGen/SPIRV/function/variadics-lowering.ll new file mode 100644 index 0000000000000..36c81d94b9e00 --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/function/variadics-lowering.ll @@ -0,0 +1,218 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 +; RUN: opt -S -mtriple=spirv64-- --passes=expand-variadics --expand-variadics-override=lowering < %s | FileCheck %s + +%struct.agg = type { i32, double } + +define spir_func void @variadic_sink(i32 noundef %tag, ...) { +; CHECK-LABEL: define spir_func void @variadic_sink( +; CHECK-SAME: i32 noundef [[TAG:%.*]], ptr [[VARARGS:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[AP:%.*]] = alloca ptr, align 8 +; CHECK-NEXT: store ptr [[VARARGS]], ptr [[AP]], align 8 +; CHECK-NEXT: switch i32 [[TAG]], label %[[SW_DEFAULT:.*]] [ +; CHECK-NEXT: i32 0, label %[[SW_BB:.*]] +; CHECK-NEXT: i32 1, label %[[SW_BB1:.*]] +; CHECK-NEXT: i32 2, label %[[SW_BB4:.*]] +; CHECK-NEXT: i32 3, label %[[SW_BB7:.*]] +; CHECK-NEXT: i32 4, label %[[SW_BB10:.*]] +; CHECK-NEXT: ] +; CHECK: [[SW_BB]]: +; CHECK-NEXT: [[ARGP_CUR:%.*]] = load ptr, ptr [[AP]], align 8 +; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR]], i32 3 +; CHECK-NEXT: [[ARGP_CUR_ALIGNED:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[TMP0]], i64 -4) +; CHECK-NEXT: [[ARGP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR_ALIGNED]], i64 4 +; CHECK-NEXT: store ptr [[ARGP_NEXT]], ptr [[AP]], align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[ARGP_CUR_ALIGNED]], align 4 +; CHECK-NEXT: br label %[[SW_EPILOG:.*]] +; CHECK: [[SW_BB1]]: +; CHECK-NEXT: [[ARGP_CUR2:%.*]] = load ptr, ptr [[AP]], align 8 +; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR2]], i32 7 +; CHECK-NEXT: [[ARGP_CUR2_ALIGNED:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[TMP2]], i64 -8) +; CHECK-NEXT: [[ARGP_NEXT3:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR2_ALIGNED]], i64 8 +; CHECK-NEXT: store ptr [[ARGP_NEXT3]], ptr [[AP]], align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr [[ARGP_CUR2_ALIGNED]], align 8 +; CHECK-NEXT: br label %[[SW_EPILOG]] +; CHECK: [[SW_BB4]]: +; CHECK-NEXT: [[ARGP_CUR5:%.*]] = load ptr, ptr [[AP]], align 8 +; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR5]], i32 7 +; CHECK-NEXT: [[ARGP_CUR5_ALIGNED:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[TMP4]], i64 -8) +; CHECK-NEXT: [[ARGP_NEXT6:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR5_ALIGNED]], i64 8 +; CHECK-NEXT: store ptr [[ARGP_NEXT6]], ptr [[AP]], align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load double, ptr [[ARGP_CUR5_ALIGNED]], align 8 +; CHECK-NEXT: br label %[[SW_EPILOG]] +; CHECK: [[SW_BB7]]: +; CHECK-NEXT: [[ARGP_CUR8:%.*]] = load ptr, ptr [[AP]], align 8 +; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR8]], i32 7 +; CHECK-NEXT: [[ARGP_CUR8_ALIGNED:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[TMP6]], i64 -8) +; CHECK-NEXT: [[ARGP_NEXT9:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR8_ALIGNED]], i64 16 +; CHECK-NEXT: store ptr [[ARGP_NEXT9]], ptr [[AP]], align 8 +; CHECK-NEXT: br label %[[SW_EPILOG]] +; CHECK: [[SW_BB10]]: +; CHECK-NEXT: [[ARGP_CUR11:%.*]] = load ptr, ptr [[AP]], align 8 +; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR11]], i32 15 +; CHECK-NEXT: [[ARGP_CUR11_ALIGNED:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[TMP7]], i64 -16) +; CHECK-NEXT: [[ARGP_NEXT12:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR11_ALIGNED]], i64 16 +; CHECK-NEXT: store ptr [[ARGP_NEXT12]], ptr [[AP]], align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <4 x float>, ptr [[ARGP_CUR11_ALIGNED]], align 16 +; CHECK-NEXT: br label %[[SW_EPILOG]] +; CHECK: [[SW_DEFAULT]]: +; CHECK-NEXT: br label %[[SW_EPILOG]] +; CHECK: [[SW_EPILOG]]: +; CHECK-NEXT: ret void +; +entry: + %ap = alloca ptr, align 8 + call void @llvm.va_start.p0(ptr %ap) + switch i32 %tag, label %sw.default [ + i32 0, label %sw.bb + i32 1, label %sw.bb1 + i32 2, label %sw.bb4 + i32 3, label %sw.bb7 + i32 4, label %sw.bb10 + ] + +sw.bb: ; preds = %entry + %argp.cur = load ptr, ptr %ap, align 8 + %0 = getelementptr inbounds i8, ptr %argp.cur, i32 3 + %argp.cur.aligned = call ptr @llvm.ptrmask.p0.i64(ptr %0, i64 -4) + %argp.next = getelementptr inbounds i8, ptr %argp.cur.aligned, i64 4 + store ptr %argp.next, ptr %ap, align 8 + %1 = load i32, ptr %argp.cur.aligned, align 4 + br label %sw.epilog + +sw.bb1: ; preds = %entry + %argp.cur2 = load ptr, ptr %ap, align 8 + %2 = getelementptr inbounds i8, ptr %argp.cur2, i32 7 + %argp.cur2.aligned = call ptr @llvm.ptrmask.p0.i64(ptr %2, i64 -8) + %argp.next3 = getelementptr inbounds i8, ptr %argp.cur2.aligned, i64 8 + store ptr %argp.next3, ptr %ap, align 8 + %3 = load i64, ptr %argp.cur2.aligned, align 8 + br label %sw.epilog + +sw.bb4: ; preds = %entry + %argp.cur5 = load ptr, ptr %ap, align 8 + %4 = getelementptr inbounds i8, ptr %argp.cur5, i32 7 + %argp.cur5.aligned = call ptr @llvm.ptrmask.p0.i64(ptr %4, i64 -8) + %argp.next6 = getelementptr inbounds i8, ptr %argp.cur5.aligned, i64 8 + store ptr %argp.next6, ptr %ap, align 8 + %5 = load double, ptr %argp.cur5.aligned, align 8 + br label %sw.epilog + +sw.bb7: ; preds = %entry + %argp.cur8 = load ptr, ptr %ap, align 8 + %6 = getelementptr inbounds i8, ptr %argp.cur8, i32 7 + %argp.cur8.aligned = call ptr @llvm.ptrmask.p0.i64(ptr %6, i64 -8) + %argp.next9 = getelementptr inbounds i8, ptr %argp.cur8.aligned, i64 16 + store ptr %argp.next9, ptr %ap, align 8 + br label %sw.epilog + +sw.bb10: ; preds = %entry + %argp.cur11 = load ptr, ptr %ap, align 8 + %7 = getelementptr inbounds i8, ptr %argp.cur11, i32 15 + %argp.cur11.aligned = call ptr @llvm.ptrmask.p0.i64(ptr %7, i64 -16) + %argp.next12 = getelementptr inbounds i8, ptr %argp.cur11.aligned, i64 16 + store ptr %argp.next12, ptr %ap, align 8 + %8 = load <4 x float>, ptr %argp.cur11.aligned, align 16 + br label %sw.epilog + +sw.default: ; preds = %entry + br label %sw.epilog + +sw.epilog: ; preds = %sw.default, %sw.bb10, %sw.bb7, %sw.bb4, %sw.bb1, %sw.bb + call void @llvm.va_end.p0(ptr %ap) + ret void +} + +define spir_func void @call_i32() { +; CHECK-LABEL: define spir_func void @call_i32() { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[VARARG_BUFFER:%.*]] = alloca [[CALL_I32_VARARG:%.*]], align 4 +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[VARARG_BUFFER]]) +; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[CALL_I32_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 0 +; CHECK-NEXT: store i32 1, ptr [[TMP0]], align 4 +; CHECK-NEXT: call spir_func void @variadic_sink(i32 noundef 0, ptr [[VARARG_BUFFER]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[VARARG_BUFFER]]) +; CHECK-NEXT: ret void +; +entry: + call spir_func void (i32, ...) @variadic_sink(i32 noundef 0, i32 noundef 1) + ret void +} + +define spir_func void @call_i64() { +; CHECK-LABEL: define spir_func void @call_i64() { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[VARARG_BUFFER:%.*]] = alloca [[CALL_I64_VARARG:%.*]], align 8 +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[VARARG_BUFFER]]) +; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[CALL_I64_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 0 +; CHECK-NEXT: store i64 1, ptr [[TMP0]], align 8 +; CHECK-NEXT: call spir_func void @variadic_sink(i32 noundef 1, ptr [[VARARG_BUFFER]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[VARARG_BUFFER]]) +; CHECK-NEXT: ret void +; +entry: + call spir_func void (i32, ...) @variadic_sink(i32 noundef 1, i64 noundef 1) + ret void +} + +define spir_func void @call_f64() { +; CHECK-LABEL: define spir_func void @call_f64() { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[VARARG_BUFFER:%.*]] = alloca [[CALL_F64_VARARG:%.*]], align 8 +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[VARARG_BUFFER]]) +; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[CALL_F64_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 0 +; CHECK-NEXT: store double 1.000000e+00, ptr [[TMP0]], align 8 +; CHECK-NEXT: call spir_func void @variadic_sink(i32 noundef 2, ptr [[VARARG_BUFFER]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[VARARG_BUFFER]]) +; CHECK-NEXT: ret void +; +entry: + call spir_func void (i32, ...) @variadic_sink(i32 noundef 2, double noundef 1.000000e+00) + ret void +} + +define spir_func void @call_struct() { +; CHECK-LABEL: define spir_func void @call_struct() { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[DOTCOMPOUNDLITERAL:%.*]] = alloca [[STRUCT_AGG:%.*]], align 8 +; CHECK-NEXT: [[VARARG_BUFFER:%.*]] = alloca [[CALL_STRUCT_VARARG:%.*]], align 8 +; CHECK-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_AGG]], ptr [[DOTCOMPOUNDLITERAL]], i32 0, i32 0 +; CHECK-NEXT: store i32 1, ptr [[A]], align 8 +; CHECK-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[DOTCOMPOUNDLITERAL]], i64 4 +; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 4 [[TMP0]], i8 0, i64 4, i1 false) +; CHECK-NEXT: [[B:%.*]] = getelementptr inbounds nuw [[STRUCT_AGG]], ptr [[DOTCOMPOUNDLITERAL]], i32 0, i32 1 +; CHECK-NEXT: store double 2.000000e+00, ptr [[B]], align 8 +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[VARARG_BUFFER]]) +; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[CALL_STRUCT_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 0 +; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr [[TMP1]], ptr [[DOTCOMPOUNDLITERAL]], i64 16, i1 false) +; CHECK-NEXT: call spir_func void @variadic_sink(i32 noundef 3, ptr [[VARARG_BUFFER]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[VARARG_BUFFER]]) +; CHECK-NEXT: ret void +; +entry: + %.compoundliteral = alloca %struct.agg, align 8 + %a = getelementptr inbounds nuw %struct.agg, ptr %.compoundliteral, i32 0, i32 0 + store i32 1, ptr %a, align 8 + %0 = getelementptr i8, ptr %.compoundliteral, i64 4 + call void @llvm.memset.p0.i64(ptr align 4 %0, i8 0, i64 4, i1 false) + %b = getelementptr inbounds nuw %struct.agg, ptr %.compoundliteral, i32 0, i32 1 + store double 2.000000e+00, ptr %b, align 8 + call spir_func void (i32, ...) @variadic_sink(i32 noundef 3, ptr noundef byval(%struct.agg) align 8 %.compoundliteral) + ret void +} + +define spir_func void @call_vector() { +; CHECK-LABEL: define spir_func void @call_vector() { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[VARARG_BUFFER:%.*]] = alloca [[CALL_VECTOR_VARARG:%.*]], align 16 +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[VARARG_BUFFER]]) +; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[CALL_VECTOR_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 0 +; CHECK-NEXT: store <4 x float> <float 1.000000e+00, float 2.000000e+00, float 3.000000e+00, float 4.000000e+00>, ptr [[TMP0]], align 16 +; CHECK-NEXT: call spir_func void @variadic_sink(i32 noundef 4, ptr [[VARARG_BUFFER]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[VARARG_BUFFER]]) +; CHECK-NEXT: ret void +; +entry: + call spir_func void (i32, ...) @variadic_sink(i32 noundef 4, <4 x float> noundef <float 1.000000e+00, float 2.000000e+00, float 3.000000e+00, float 4.000000e+00>) + ret void +} diff --git a/llvm/test/CodeGen/SPIRV/llc-pipeline.ll b/llvm/test/CodeGen/SPIRV/llc-pipeline.ll index cbd06ae1eec4e..2f8b9decaaba3 100644 --- a/llvm/test/CodeGen/SPIRV/llc-pipeline.ll +++ b/llvm/test/CodeGen/SPIRV/llc-pipeline.ll @@ -33,6 +33,7 @@ ; SPIRV-O0-NEXT: SPIR-V Regularizer ; SPIRV-O0-NEXT: SPIRV prepare functions ; SPIRV-O0-NEXT: SPIRV prepare global variables +; SPIRV-O0-NEXT: Expand variadic functions ; SPIRV-O0-NEXT: FunctionPass Manager ; SPIRV-O0-NEXT: Lower invoke and unwind, for unwindless code generators ; SPIRV-O0-NEXT: Remove unreachable blocks from the CFG @@ -136,6 +137,7 @@ ; SPIRV-Opt-NEXT: SPIR-V Regularizer ; SPIRV-Opt-NEXT: SPIRV prepare functions ; SPIRV-Opt-NEXT: SPIRV prepare global variables +; SPIRV-Opt-NEXT: Expand variadic functions ; SPIRV-Opt-NEXT: FunctionPass Manager ; SPIRV-Opt-NEXT: Dominator Tree Construction ; SPIRV-Opt-NEXT: Natural Loop Information _______________________________________________ cfe-commits mailing list [email protected] https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits
