llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT--> @llvm/pr-subscribers-backend-amdgpu Author: Nicolai Hähnle (nhaehnle) <details> <summary>Changes</summary> **Stack**: - [5/5] #<!-- -->170512 - [4/5] #<!-- -->170511 - [3/5] #<!-- -->170510 - [2/5] #<!-- -->170509 ⬅ - [1/5] #<!-- -->170508 ⚠️ *Part of a stack created by [spr](https://github.com/nhaehnle/spr). Merging this PR using the GitHub UI may have unexpected results.* --- Full diff: https://github.com/llvm/llvm-project/pull/170509.diff 1 Files Affected: - (modified) llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp (+28-17) ``````````diff diff --git a/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp b/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp index bb95265a794a0..77db14513254f 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp @@ -122,6 +122,7 @@ class AMDGPUPromoteAllocaImpl { /// Check whether we have enough local memory for promotion. bool hasSufficientLocalMem(const Function &F); + FixedVectorType *getVectorTypeForAlloca(Type *AllocaTy) const; bool tryPromoteAllocaToVector(AllocaInst &I); bool tryPromoteAllocaToLDS(AllocaInst &I, bool SufficientLDS); @@ -791,16 +792,13 @@ static BasicBlock::iterator skipToNonAllocaInsertPt(BasicBlock &BB, return I; } -// FIXME: Should try to pick the most likely to be profitable allocas first. -bool AMDGPUPromoteAllocaImpl::tryPromoteAllocaToVector(AllocaInst &Alloca) { - LLVM_DEBUG(dbgs() << "Trying to promote to vector: " << Alloca << '\n'); - +FixedVectorType * +AMDGPUPromoteAllocaImpl::getVectorTypeForAlloca(Type *AllocaTy) const { if (DisablePromoteAllocaToVector) { - LLVM_DEBUG(dbgs() << " Promote alloca to vector is disabled\n"); - return false; + LLVM_DEBUG(dbgs() << " Promote alloca to vectors is disabled\n"); + return nullptr; } - Type *AllocaTy = Alloca.getAllocatedType(); auto *VectorTy = dyn_cast<FixedVectorType>(AllocaTy); if (auto *ArrayTy = dyn_cast<ArrayType>(AllocaTy)) { uint64_t NumElems = 1; @@ -832,10 +830,9 @@ bool AMDGPUPromoteAllocaImpl::tryPromoteAllocaToVector(AllocaInst &Alloca) { } } } - if (!VectorTy) { LLVM_DEBUG(dbgs() << " Cannot convert type to vector\n"); - return false; + return nullptr; } const unsigned MaxElements = @@ -845,9 +842,29 @@ bool AMDGPUPromoteAllocaImpl::tryPromoteAllocaToVector(AllocaInst &Alloca) { VectorTy->getNumElements() < 2) { LLVM_DEBUG(dbgs() << " " << *VectorTy << " has an unsupported number of elements\n"); - return false; + return nullptr; } + Type *VecEltTy = VectorTy->getElementType(); + unsigned ElementSizeInBits = DL->getTypeSizeInBits(VecEltTy); + if (ElementSizeInBits != DL->getTypeAllocSizeInBits(VecEltTy)) { + LLVM_DEBUG(dbgs() << " Cannot convert to vector if the allocation size " + "does not match the type's size\n"); + return nullptr; + } + + return VectorTy; +} + +// FIXME: Should try to pick the most likely to be profitable allocas first. +bool AMDGPUPromoteAllocaImpl::tryPromoteAllocaToVector(AllocaInst &Alloca) { + LLVM_DEBUG(dbgs() << "Trying to promote to vectors: " << Alloca << '\n'); + + Type *AllocaTy = Alloca.getAllocatedType(); + FixedVectorType *VectorTy = getVectorTypeForAlloca(AllocaTy); + if (!VectorTy) + return false; + std::map<GetElementPtrInst *, WeakTrackingVH> GEPVectorIdx; SmallVector<Instruction *> WorkList; SmallVector<Instruction *> UsersToRemove; @@ -869,13 +886,7 @@ bool AMDGPUPromoteAllocaImpl::tryPromoteAllocaToVector(AllocaInst &Alloca) { LLVM_DEBUG(dbgs() << " Attempting promotion to: " << *VectorTy << "\n"); Type *VecEltTy = VectorTy->getElementType(); - unsigned ElementSizeInBits = DL->getTypeSizeInBits(VecEltTy); - if (ElementSizeInBits != DL->getTypeAllocSizeInBits(VecEltTy)) { - LLVM_DEBUG(dbgs() << " Cannot convert to vector if the allocation size " - "does not match the type's size\n"); - return false; - } - unsigned ElementSize = ElementSizeInBits / 8; + unsigned ElementSize = DL->getTypeSizeInBits(VecEltTy) / 8; assert(ElementSize > 0); for (auto *U : Uses) { Instruction *Inst = cast<Instruction>(U->getUser()); `````````` </details> https://github.com/llvm/llvm-project/pull/170509 _______________________________________________ llvm-branch-commits mailing list [email protected] https://lists.llvm.org/cgi-bin/mailman/listinfo/llvm-branch-commits
