ctetreau updated this revision to Diff 279370.
ctetreau added a comment.

rebase


Repository:
  rG LLVM Github Monorepo

CHANGES SINCE LAST ACTION
  https://reviews.llvm.org/D82582/new/

https://reviews.llvm.org/D82582

Files:
  clang/lib/CodeGen/CGAtomic.cpp
  clang/lib/CodeGen/CGBuiltin.cpp
  clang/lib/CodeGen/CGExpr.cpp
  clang/lib/CodeGen/CGExprScalar.cpp
  clang/lib/CodeGen/SwiftCallingConv.cpp

Index: clang/lib/CodeGen/SwiftCallingConv.cpp
===================================================================
--- clang/lib/CodeGen/SwiftCallingConv.cpp
+++ clang/lib/CodeGen/SwiftCallingConv.cpp
@@ -320,9 +320,12 @@
   // If we have a vector type, split it.
   if (auto vecTy = dyn_cast_or_null<llvm::VectorType>(type)) {
     auto eltTy = vecTy->getElementType();
-    CharUnits eltSize = (end - begin) / vecTy->getNumElements();
+    CharUnits eltSize =
+        (end - begin) / cast<llvm::FixedVectorType>(vecTy)->getNumElements();
     assert(eltSize == getTypeStoreSize(CGM, eltTy));
-    for (unsigned i = 0, e = vecTy->getNumElements(); i != e; ++i) {
+    for (unsigned i = 0,
+                  e = cast<llvm::FixedVectorType>(vecTy)->getNumElements();
+         i != e; ++i) {
       addEntry(eltTy, begin, begin + eltSize);
       begin += eltSize;
     }
@@ -674,8 +677,9 @@
 
 bool swiftcall::isLegalVectorType(CodeGenModule &CGM, CharUnits vectorSize,
                                   llvm::VectorType *vectorTy) {
-  return isLegalVectorType(CGM, vectorSize, vectorTy->getElementType(),
-                           vectorTy->getNumElements());
+  return isLegalVectorType(
+      CGM, vectorSize, vectorTy->getElementType(),
+      cast<llvm::FixedVectorType>(vectorTy)->getNumElements());
 }
 
 bool swiftcall::isLegalVectorType(CodeGenModule &CGM, CharUnits vectorSize,
@@ -688,7 +692,7 @@
 std::pair<llvm::Type*, unsigned>
 swiftcall::splitLegalVectorType(CodeGenModule &CGM, CharUnits vectorSize,
                                 llvm::VectorType *vectorTy) {
-  auto numElts = vectorTy->getNumElements();
+  auto numElts = cast<llvm::FixedVectorType>(vectorTy)->getNumElements();
   auto eltTy = vectorTy->getElementType();
 
   // Try to split the vector type in half.
@@ -710,7 +714,7 @@
   }
 
   // Try to split the vector into legal subvectors.
-  auto numElts = origVectorTy->getNumElements();
+  auto numElts = cast<llvm::FixedVectorType>(origVectorTy)->getNumElements();
   auto eltTy = origVectorTy->getElementType();
   assert(numElts != 1);
 
Index: clang/lib/CodeGen/CGExprScalar.cpp
===================================================================
--- clang/lib/CodeGen/CGExprScalar.cpp
+++ clang/lib/CodeGen/CGExprScalar.cpp
@@ -1323,7 +1323,7 @@
            "Splatted expr doesn't match with vector element type?");
 
     // Splat the element across to all elements
-    unsigned NumElements = cast<llvm::VectorType>(DstTy)->getNumElements();
+    unsigned NumElements = cast<llvm::FixedVectorType>(DstTy)->getNumElements();
     return Builder.CreateVectorSplat(NumElements, Src, "splat");
   }
 
@@ -1630,7 +1630,7 @@
     Value *RHS = CGF.EmitScalarExpr(E->getExpr(1));
     Value *Mask;
 
-    llvm::VectorType *LTy = cast<llvm::VectorType>(LHS->getType());
+    auto *LTy = cast<llvm::FixedVectorType>(LHS->getType());
     unsigned LHSElts = LTy->getNumElements();
 
     Mask = RHS;
@@ -1648,10 +1648,12 @@
     //   n = extract mask i
     //   x = extract val n
     //   newv = insert newv, x, i
-    auto *RTy = llvm::FixedVectorType::get(LTy->getElementType(),
-                                           MTy->getNumElements());
+    auto *RTy = llvm::FixedVectorType::get(
+        LTy->getElementType(),
+        cast<llvm::FixedVectorType>(MTy)->getNumElements());
     Value* NewV = llvm::UndefValue::get(RTy);
-    for (unsigned i = 0, e = MTy->getNumElements(); i != e; ++i) {
+    for (unsigned i = 0, e = cast<llvm::FixedVectorType>(MTy)->getNumElements();
+         i != e; ++i) {
       Value *IIndx = llvm::ConstantInt::get(CGF.SizeTy, i);
       Value *Indx = Builder.CreateExtractElement(Mask, IIndx, "shuf_idx");
 
@@ -1840,7 +1842,7 @@
     return Visit(E->getInit(0));
   }
 
-  unsigned ResElts = VType->getNumElements();
+  unsigned ResElts = cast<llvm::FixedVectorType>(VType)->getNumElements();
 
   // Loop over initializers collecting the Value for each, and remembering
   // whether the source was swizzle (ExtVectorElementExpr).  This will allow
@@ -1864,7 +1866,8 @@
       if (isa<ExtVectorElementExpr>(IE)) {
         llvm::ExtractElementInst *EI = cast<llvm::ExtractElementInst>(Init);
 
-        if (EI->getVectorOperandType()->getNumElements() == ResElts) {
+        if (cast<llvm::FixedVectorType>(EI->getVectorOperandType())
+                ->getNumElements() == ResElts) {
           llvm::ConstantInt *C = cast<llvm::ConstantInt>(EI->getIndexOperand());
           Value *LHS = nullptr, *RHS = nullptr;
           if (CurIdx == 0) {
@@ -1902,7 +1905,7 @@
       continue;
     }
 
-    unsigned InitElts = VVT->getNumElements();
+    unsigned InitElts = cast<llvm::FixedVectorType>(VVT)->getNumElements();
 
     // If the initializer is an ExtVecEltExpr (a swizzle), and the swizzle's
     // input is the same width as the vector being constructed, generate an
@@ -1911,7 +1914,7 @@
     if (isa<ExtVectorElementExpr>(IE)) {
       llvm::ShuffleVectorInst *SVI = cast<llvm::ShuffleVectorInst>(Init);
       Value *SVOp = SVI->getOperand(0);
-      llvm::VectorType *OpTy = cast<llvm::VectorType>(SVOp->getType());
+      auto *OpTy = cast<llvm::FixedVectorType>(SVOp->getType());
 
       if (OpTy->getNumElements() == ResElts) {
         for (unsigned j = 0; j != CurIdx; ++j) {
@@ -2247,7 +2250,7 @@
     llvm::Type *DstTy = ConvertType(DestTy);
     Value *Elt = Visit(const_cast<Expr*>(E));
     // Splat the element across to all elements
-    unsigned NumElements = cast<llvm::VectorType>(DstTy)->getNumElements();
+    unsigned NumElements = cast<llvm::FixedVectorType>(DstTy)->getNumElements();
     return Builder.CreateVectorSplat(NumElements, Elt, "splat");
   }
 
@@ -4441,7 +4444,7 @@
     llvm::Value *RHS = Visit(rhsExpr);
 
     llvm::Type *condType = ConvertType(condExpr->getType());
-    llvm::VectorType *vecTy = cast<llvm::VectorType>(condType);
+    auto *vecTy = cast<llvm::FixedVectorType>(condType);
 
     unsigned numElem = vecTy->getNumElements();
     llvm::Type *elemType = vecTy->getElementType();
@@ -4644,10 +4647,14 @@
   llvm::Type *DstTy = ConvertType(E->getType());
 
   llvm::Type *SrcTy = Src->getType();
-  unsigned NumElementsSrc = isa<llvm::VectorType>(SrcTy) ?
-    cast<llvm::VectorType>(SrcTy)->getNumElements() : 0;
-  unsigned NumElementsDst = isa<llvm::VectorType>(DstTy) ?
-    cast<llvm::VectorType>(DstTy)->getNumElements() : 0;
+  unsigned NumElementsSrc =
+      isa<llvm::VectorType>(SrcTy)
+          ? cast<llvm::FixedVectorType>(SrcTy)->getNumElements()
+          : 0;
+  unsigned NumElementsDst =
+      isa<llvm::VectorType>(DstTy)
+          ? cast<llvm::FixedVectorType>(DstTy)->getNumElements()
+          : 0;
 
   // Going from vec3 to non-vec3 is a special case and requires a shuffle
   // vector to get a vec4, then a bitcast if the target type is different.
Index: clang/lib/CodeGen/CGExpr.cpp
===================================================================
--- clang/lib/CodeGen/CGExpr.cpp
+++ clang/lib/CodeGen/CGExpr.cpp
@@ -1680,7 +1680,7 @@
     if (Ty->isVectorType()) {
       const llvm::Type *EltTy = Addr.getElementType();
 
-      const auto *VTy = cast<llvm::VectorType>(EltTy);
+      const auto *VTy = cast<llvm::FixedVectorType>(EltTy);
 
       // Handle vectors of size 3 like size 4 for better performance.
       if (VTy->getNumElements() == 3) {
@@ -1765,8 +1765,9 @@
   auto *VectorTy = dyn_cast<llvm::VectorType>(
       cast<llvm::PointerType>(Addr.getPointer()->getType())->getElementType());
   if (VectorTy && !IsVector) {
-    auto *ArrayTy = llvm::ArrayType::get(VectorTy->getElementType(),
-                                         VectorTy->getNumElements());
+    auto *ArrayTy = llvm::ArrayType::get(
+        VectorTy->getElementType(),
+        cast<llvm::FixedVectorType>(VectorTy)->getNumElements());
 
     return Address(CGF.Builder.CreateElementBitCast(Addr, ArrayTy));
   }
@@ -1797,7 +1798,7 @@
       llvm::Type *SrcTy = Value->getType();
       auto *VecTy = dyn_cast<llvm::VectorType>(SrcTy);
       // Handle vec3 special.
-      if (VecTy && VecTy->getNumElements() == 3) {
+      if (VecTy && cast<llvm::FixedVectorType>(VecTy)->getNumElements() == 3) {
         // Our source is a vec3, do a shuffle vector to make it a vec4.
         Value = Builder.CreateShuffleVector(Value, llvm::UndefValue::get(VecTy),
                                             ArrayRef<int>{0, 1, 2, -1},
@@ -2212,7 +2213,7 @@
   if (const VectorType *VTy = Dst.getType()->getAs<VectorType>()) {
     unsigned NumSrcElts = VTy->getNumElements();
     unsigned NumDstElts =
-        cast<llvm::VectorType>(Vec->getType())->getNumElements();
+        cast<llvm::FixedVectorType>(Vec->getType())->getNumElements();
     if (NumDstElts == NumSrcElts) {
       // Use shuffle vector is the src and destination are the same number of
       // elements and restore the vector mask since it is on the side it will be
Index: clang/lib/CodeGen/CGBuiltin.cpp
===================================================================
--- clang/lib/CodeGen/CGBuiltin.cpp
+++ clang/lib/CodeGen/CGBuiltin.cpp
@@ -5690,7 +5690,8 @@
       Ty = HalfTy;
       break;
     }
-    auto *VecFlt = llvm::FixedVectorType::get(Ty, VTy->getNumElements());
+    auto *VecFlt = llvm::FixedVectorType::get(
+        Ty, cast<FixedVectorType>(VTy)->getNumElements());
     llvm::Type *Tys[] = { VTy, VecFlt };
     Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
     return EmitNeonCall(F, Ops, NameHint);
@@ -5838,7 +5839,8 @@
   case NEON::BI__builtin_neon_vextq_v: {
     int CV = cast<ConstantInt>(Ops[2])->getSExtValue();
     SmallVector<int, 16> Indices;
-    for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i)
+    for (unsigned i = 0, e = cast<FixedVectorType>(VTy)->getNumElements();
+         i != e; ++i)
       Indices.push_back(i+CV);
 
     Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
@@ -5950,8 +5952,8 @@
     unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits();
     llvm::Type *EltTy =
       llvm::IntegerType::get(getLLVMContext(), EltBits / 2);
-    auto *NarrowTy =
-        llvm::FixedVectorType::get(EltTy, VTy->getNumElements() * 2);
+    auto *NarrowTy = llvm::FixedVectorType::get(
+        EltTy, cast<FixedVectorType>(VTy)->getNumElements() * 2);
     llvm::Type *Tys[2] = { Ty, NarrowTy };
     return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, NameHint);
   }
@@ -5960,8 +5962,8 @@
     // The source operand type has twice as many elements of half the size.
     unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits();
     llvm::Type *EltTy = llvm::IntegerType::get(getLLVMContext(), EltBits / 2);
-    auto *NarrowTy =
-        llvm::FixedVectorType::get(EltTy, VTy->getNumElements() * 2);
+    auto *NarrowTy = llvm::FixedVectorType::get(
+        EltTy, cast<FixedVectorType>(VTy)->getNumElements() * 2);
     llvm::Type *Tys[2] = { Ty, NarrowTy };
     return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vpaddl");
   }
@@ -5980,8 +5982,9 @@
     auto *RTy = cast<llvm::VectorType>(Ty);
     if (BuiltinID == NEON::BI__builtin_neon_vqdmulhq_lane_v ||
         BuiltinID == NEON::BI__builtin_neon_vqrdmulhq_lane_v)
-      RTy = llvm::FixedVectorType::get(RTy->getElementType(),
-                                       RTy->getNumElements() * 2);
+      RTy = llvm::FixedVectorType::get(
+          RTy->getElementType(),
+          cast<FixedVectorType>(RTy)->getNumElements() * 2);
     llvm::Type *Tys[2] = {
         RTy, GetNeonType(this, NeonTypeFlags(Type.getEltType(), false,
                                              /*isQuad*/ false))};
@@ -6110,7 +6113,8 @@
 
     for (unsigned vi = 0; vi != 2; ++vi) {
       SmallVector<int, 16> Indices;
-      for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
+      for (unsigned i = 0, e = cast<FixedVectorType>(VTy)->getNumElements();
+           i != e; i += 2) {
         Indices.push_back(i+vi);
         Indices.push_back(i+e+vi);
       }
@@ -6138,7 +6142,8 @@
 
     for (unsigned vi = 0; vi != 2; ++vi) {
       SmallVector<int, 16> Indices;
-      for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i)
+      for (unsigned i = 0, e = cast<FixedVectorType>(VTy)->getNumElements();
+           i != e; ++i)
         Indices.push_back(2*i+vi);
 
       Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
@@ -6156,7 +6161,8 @@
 
     for (unsigned vi = 0; vi != 2; ++vi) {
       SmallVector<int, 16> Indices;
-      for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
+      for (unsigned i = 0, e = cast<FixedVectorType>(VTy)->getNumElements();
+           i != e; i += 2) {
         Indices.push_back((i + vi*e) >> 1);
         Indices.push_back(((i + vi*e) >> 1)+e);
       }
@@ -6298,7 +6304,7 @@
 
   // Build a vector containing sequential number like (0, 1, 2, ..., 15)
   SmallVector<int, 16> Indices;
-  llvm::VectorType *TblTy = cast<llvm::VectorType>(Ops[0]->getType());
+  auto *TblTy = cast<llvm::FixedVectorType>(Ops[0]->getType());
   for (unsigned i = 0, e = TblTy->getNumElements(); i != e; ++i) {
     Indices.push_back(2*i);
     Indices.push_back(2*i+1);
@@ -7355,7 +7361,7 @@
   // or odds, as desired).
   SmallVector<int, 16> Indices;
   unsigned InputElements =
-      cast<llvm::VectorType>(V->getType())->getNumElements();
+      cast<llvm::FixedVectorType>(V->getType())->getNumElements();
   for (unsigned i = 0; i < InputElements; i += 2)
     Indices.push_back(i + Odd);
   return Builder.CreateShuffleVector(V, llvm::UndefValue::get(V->getType()),
@@ -7368,7 +7374,7 @@
   assert(V0->getType() == V1->getType() && "Can't zip different vector types");
   SmallVector<int, 16> Indices;
   unsigned InputElements =
-      cast<llvm::VectorType>(V0->getType())->getNumElements();
+      cast<llvm::FixedVectorType>(V0->getType())->getNumElements();
   for (unsigned i = 0; i < InputElements; i++) {
     Indices.push_back(i);
     Indices.push_back(i + InputElements);
@@ -9855,8 +9861,9 @@
 
     // Now adjust things to handle the lane access.
     auto *SourceTy = BuiltinID == NEON::BI__builtin_neon_vfmaq_lane_v
-                         ? llvm::FixedVectorType::get(VTy->getElementType(),
-                                                      VTy->getNumElements() / 2)
+                         ? llvm::FixedVectorType::get(
+                               VTy->getElementType(),
+                               cast<FixedVectorType>(VTy)->getNumElements() / 2)
                          : VTy;
     llvm::Constant *cst = cast<Constant>(Ops[3]);
     Value *SV = llvm::ConstantVector::getSplat(VTy->getElementCount(), cst);
@@ -9887,8 +9894,9 @@
     Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
     Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
 
-    auto *STy = llvm::FixedVectorType::get(VTy->getElementType(),
-                                           VTy->getNumElements() * 2);
+    auto *STy = llvm::FixedVectorType::get(
+        VTy->getElementType(),
+        cast<FixedVectorType>(VTy)->getNumElements() * 2);
     Ops[2] = Builder.CreateBitCast(Ops[2], STy);
     Value *SV = llvm::ConstantVector::getSplat(VTy->getElementCount(),
                                                cast<ConstantInt>(Ops[3]));
@@ -9956,7 +9964,7 @@
     return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vabd");
   case NEON::BI__builtin_neon_vpadal_v:
   case NEON::BI__builtin_neon_vpadalq_v: {
-    unsigned ArgElts = VTy->getNumElements();
+    unsigned ArgElts = cast<FixedVectorType>(VTy)->getNumElements();
     llvm::IntegerType *EltTy = cast<IntegerType>(VTy->getElementType());
     unsigned BitWidth = EltTy->getBitWidth();
     auto *ArgTy = llvm::FixedVectorType::get(
@@ -10861,7 +10869,8 @@
 
     for (unsigned vi = 0; vi != 2; ++vi) {
       SmallVector<int, 16> Indices;
-      for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
+      for (unsigned i = 0, e = cast<FixedVectorType>(VTy)->getNumElements();
+           i != e; i += 2) {
         Indices.push_back(i+vi);
         Indices.push_back(i+e+vi);
       }
@@ -10880,7 +10889,8 @@
 
     for (unsigned vi = 0; vi != 2; ++vi) {
       SmallVector<int, 16> Indices;
-      for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i)
+      for (unsigned i = 0, e = cast<FixedVectorType>(VTy)->getNumElements();
+           i != e; ++i)
         Indices.push_back(2*i+vi);
 
       Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
@@ -10898,7 +10908,8 @@
 
     for (unsigned vi = 0; vi != 2; ++vi) {
       SmallVector<int, 16> Indices;
-      for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
+      for (unsigned i = 0, e = cast<FixedVectorType>(VTy)->getNumElements();
+           i != e; i += 2) {
         Indices.push_back((i + vi*e) >> 1);
         Indices.push_back(((i + vi*e) >> 1)+e);
       }
@@ -11109,7 +11120,8 @@
                                llvm::PointerType::getUnqual(Ops[1]->getType()));
 
   Value *MaskVec = getMaskVecValue(
-      CGF, Ops[2], cast<llvm::VectorType>(Ops[1]->getType())->getNumElements());
+      CGF, Ops[2],
+      cast<llvm::FixedVectorType>(Ops[1]->getType())->getNumElements());
 
   return CGF.Builder.CreateMaskedStore(Ops[1], Ptr, Alignment, MaskVec);
 }
@@ -11121,7 +11133,8 @@
                                llvm::PointerType::getUnqual(Ops[1]->getType()));
 
   Value *MaskVec = getMaskVecValue(
-      CGF, Ops[2], cast<llvm::VectorType>(Ops[1]->getType())->getNumElements());
+      CGF, Ops[2],
+      cast<llvm::FixedVectorType>(Ops[1]->getType())->getNumElements());
 
   return CGF.Builder.CreateMaskedLoad(Ptr, Alignment, MaskVec, Ops[1]);
 }
@@ -11135,7 +11148,8 @@
   Value *Ptr = CGF.Builder.CreateBitCast(Ops[0],
                                          llvm::PointerType::getUnqual(PtrTy));
 
-  Value *MaskVec = getMaskVecValue(CGF, Ops[2], ResultTy->getNumElements());
+  Value *MaskVec = getMaskVecValue(
+      CGF, Ops[2], cast<FixedVectorType>(ResultTy)->getNumElements());
 
   llvm::Function *F = CGF.CGM.getIntrinsic(Intrinsic::masked_expandload,
                                            ResultTy);
@@ -11145,7 +11159,7 @@
 static Value *EmitX86CompressExpand(CodeGenFunction &CGF,
                                     ArrayRef<Value *> Ops,
                                     bool IsCompress) {
-  auto *ResultTy = cast<llvm::VectorType>(Ops[1]->getType());
+  auto *ResultTy = cast<llvm::FixedVectorType>(Ops[1]->getType());
 
   Value *MaskVec = getMaskVecValue(CGF, Ops[2], ResultTy->getNumElements());
 
@@ -11157,7 +11171,7 @@
 
 static Value *EmitX86CompressStore(CodeGenFunction &CGF,
                                    ArrayRef<Value *> Ops) {
-  auto *ResultTy = cast<llvm::VectorType>(Ops[1]->getType());
+  auto *ResultTy = cast<llvm::FixedVectorType>(Ops[1]->getType());
   llvm::Type *PtrTy = ResultTy->getElementType();
 
   // Cast the pointer to element type.
@@ -11193,7 +11207,7 @@
   // Funnel shifts amounts are treated as modulo and types are all power-of-2 so
   // we only care about the lowest log2 bits anyway.
   if (Amt->getType() != Ty) {
-    unsigned NumElts = cast<llvm::VectorType>(Ty)->getNumElements();
+    unsigned NumElts = cast<llvm::FixedVectorType>(Ty)->getNumElements();
     Amt = CGF.Builder.CreateIntCast(Amt, Ty->getScalarType(), false);
     Amt = CGF.Builder.CreateVectorSplat(NumElts, Amt);
   }
@@ -11252,7 +11266,7 @@
       return Op0;
 
   Mask = getMaskVecValue(
-      CGF, Mask, cast<llvm::VectorType>(Op0->getType())->getNumElements());
+      CGF, Mask, cast<llvm::FixedVectorType>(Op0->getType())->getNumElements());
 
   return CGF.Builder.CreateSelect(Mask, Op0, Op1);
 }
@@ -11299,7 +11313,7 @@
   assert((Ops.size() == 2 || Ops.size() == 4) &&
          "Unexpected number of arguments");
   unsigned NumElts =
-      cast<llvm::VectorType>(Ops[0]->getType())->getNumElements();
+      cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
   Value *Cmp;
 
   if (CC == 3) {
@@ -11576,7 +11590,8 @@
 
 static Value *EmitX86SExtMask(CodeGenFunction &CGF, Value *Op,
                               llvm::Type *DstTy) {
-  unsigned NumberOfElements = cast<llvm::VectorType>(DstTy)->getNumElements();
+  unsigned NumberOfElements =
+      cast<llvm::FixedVectorType>(DstTy)->getNumElements();
   Value *Mask = getMaskVecValue(CGF, Op, NumberOfElements);
   return CGF.Builder.CreateSExt(Mask, DstTy, "vpmovm2");
 }
@@ -11612,11 +11627,12 @@
     return CGF.Builder.CreateCall(F, {Ops[0], Ops[1], Ops[2], Ops[3]});
   }
 
-  unsigned NumDstElts = cast<llvm::VectorType>(DstTy)->getNumElements();
+  unsigned NumDstElts = cast<llvm::FixedVectorType>(DstTy)->getNumElements();
   Value *Src = Ops[0];
 
   // Extract the subvector.
-  if (NumDstElts != cast<llvm::VectorType>(Src->getType())->getNumElements()) {
+  if (NumDstElts !=
+      cast<llvm::FixedVectorType>(Src->getType())->getNumElements()) {
     assert(NumDstElts == 4 && "Unexpected vector size");
     Src = CGF.Builder.CreateShuffleVector(Src, UndefValue::get(Src->getType()),
                                           ArrayRef<int>{0, 1, 2, 3});
@@ -11916,7 +11932,7 @@
   case X86::BI__builtin_ia32_vec_ext_v8si:
   case X86::BI__builtin_ia32_vec_ext_v4di: {
     unsigned NumElts =
-        cast<llvm::VectorType>(Ops[0]->getType())->getNumElements();
+        cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
     uint64_t Index = cast<ConstantInt>(Ops[1])->getZExtValue();
     Index &= NumElts - 1;
     // These builtins exist so we can ensure the index is an ICE and in range.
@@ -11932,7 +11948,7 @@
   case X86::BI__builtin_ia32_vec_set_v8si:
   case X86::BI__builtin_ia32_vec_set_v4di: {
     unsigned NumElts =
-        cast<llvm::VectorType>(Ops[0]->getType())->getNumElements();
+        cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
     unsigned Index = cast<ConstantInt>(Ops[2])->getZExtValue();
     Index &= NumElts - 1;
     // These builtins exist so we can ensure the index is an ICE and in range.
@@ -12358,9 +12374,9 @@
       break;
     }
 
-    unsigned MinElts =
-        std::min(cast<llvm::VectorType>(Ops[0]->getType())->getNumElements(),
-                 cast<llvm::VectorType>(Ops[2]->getType())->getNumElements());
+    unsigned MinElts = std::min(
+        cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements(),
+        cast<llvm::FixedVectorType>(Ops[2]->getType())->getNumElements());
     Ops[3] = getMaskVecValue(*this, Ops[3], MinElts);
     Function *Intr = CGM.getIntrinsic(IID);
     return Builder.CreateCall(Intr, Ops);
@@ -12467,9 +12483,9 @@
       break;
     }
 
-    unsigned MinElts =
-        std::min(cast<llvm::VectorType>(Ops[2]->getType())->getNumElements(),
-                 cast<llvm::VectorType>(Ops[3]->getType())->getNumElements());
+    unsigned MinElts = std::min(
+        cast<llvm::FixedVectorType>(Ops[2]->getType())->getNumElements(),
+        cast<llvm::FixedVectorType>(Ops[3]->getType())->getNumElements());
     Ops[1] = getMaskVecValue(*this, Ops[1], MinElts);
     Function *Intr = CGM.getIntrinsic(IID);
     return Builder.CreateCall(Intr, Ops);
@@ -12491,10 +12507,10 @@
   case X86::BI__builtin_ia32_extracti64x2_256_mask:
   case X86::BI__builtin_ia32_extractf64x2_512_mask:
   case X86::BI__builtin_ia32_extracti64x2_512_mask: {
-    auto *DstTy = cast<llvm::VectorType>(ConvertType(E->getType()));
+    auto *DstTy = cast<llvm::FixedVectorType>(ConvertType(E->getType()));
     unsigned NumElts = DstTy->getNumElements();
     unsigned SrcNumElts =
-        cast<llvm::VectorType>(Ops[0]->getType())->getNumElements();
+        cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
     unsigned SubVectors = SrcNumElts / NumElts;
     unsigned Index = cast<ConstantInt>(Ops[1])->getZExtValue();
     assert(llvm::isPowerOf2_32(SubVectors) && "Expected power of 2 subvectors");
@@ -12532,9 +12548,9 @@
   case X86::BI__builtin_ia32_insertf64x2_512:
   case X86::BI__builtin_ia32_inserti64x2_512: {
     unsigned DstNumElts =
-        cast<llvm::VectorType>(Ops[0]->getType())->getNumElements();
+        cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
     unsigned SrcNumElts =
-        cast<llvm::VectorType>(Ops[1]->getType())->getNumElements();
+        cast<llvm::FixedVectorType>(Ops[1]->getType())->getNumElements();
     unsigned SubVectors = DstNumElts / SrcNumElts;
     unsigned Index = cast<ConstantInt>(Ops[2])->getZExtValue();
     assert(llvm::isPowerOf2_32(SubVectors) && "Expected power of 2 subvectors");
@@ -12599,7 +12615,7 @@
   case X86::BI__builtin_ia32_pblendd128:
   case X86::BI__builtin_ia32_pblendd256: {
     unsigned NumElts =
-        cast<llvm::VectorType>(Ops[0]->getType())->getNumElements();
+        cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
     unsigned Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
 
     int Indices[16];
@@ -12616,7 +12632,7 @@
   case X86::BI__builtin_ia32_pshuflw256:
   case X86::BI__builtin_ia32_pshuflw512: {
     uint32_t Imm = cast<llvm::ConstantInt>(Ops[1])->getZExtValue();
-    auto *Ty = cast<llvm::VectorType>(Ops[0]->getType());
+    auto *Ty = cast<llvm::FixedVectorType>(Ops[0]->getType());
     unsigned NumElts = Ty->getNumElements();
 
     // Splat the 8-bits of immediate 4 times to help the loop wrap around.
@@ -12640,7 +12656,7 @@
   case X86::BI__builtin_ia32_pshufhw256:
   case X86::BI__builtin_ia32_pshufhw512: {
     uint32_t Imm = cast<llvm::ConstantInt>(Ops[1])->getZExtValue();
-    auto *Ty = cast<llvm::VectorType>(Ops[0]->getType());
+    auto *Ty = cast<llvm::FixedVectorType>(Ops[0]->getType());
     unsigned NumElts = Ty->getNumElements();
 
     // Splat the 8-bits of immediate 4 times to help the loop wrap around.
@@ -12670,7 +12686,7 @@
   case X86::BI__builtin_ia32_vpermilpd512:
   case X86::BI__builtin_ia32_vpermilps512: {
     uint32_t Imm = cast<llvm::ConstantInt>(Ops[1])->getZExtValue();
-    auto *Ty = cast<llvm::VectorType>(Ops[0]->getType());
+    auto *Ty = cast<llvm::FixedVectorType>(Ops[0]->getType());
     unsigned NumElts = Ty->getNumElements();
     unsigned NumLanes = Ty->getPrimitiveSizeInBits() / 128;
     unsigned NumLaneElts = NumElts / NumLanes;
@@ -12697,7 +12713,7 @@
   case X86::BI__builtin_ia32_shufps256:
   case X86::BI__builtin_ia32_shufps512: {
     uint32_t Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
-    auto *Ty = cast<llvm::VectorType>(Ops[0]->getType());
+    auto *Ty = cast<llvm::FixedVectorType>(Ops[0]->getType());
     unsigned NumElts = Ty->getNumElements();
     unsigned NumLanes = Ty->getPrimitiveSizeInBits() / 128;
     unsigned NumLaneElts = NumElts / NumLanes;
@@ -12725,7 +12741,7 @@
   case X86::BI__builtin_ia32_permdi512:
   case X86::BI__builtin_ia32_permdf512: {
     unsigned Imm = cast<llvm::ConstantInt>(Ops[1])->getZExtValue();
-    auto *Ty = cast<llvm::VectorType>(Ops[0]->getType());
+    auto *Ty = cast<llvm::FixedVectorType>(Ops[0]->getType());
     unsigned NumElts = Ty->getNumElements();
 
     // These intrinsics operate on 256-bit lanes of four 64-bit elements.
@@ -12744,7 +12760,7 @@
     unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0xff;
 
     unsigned NumElts =
-        cast<llvm::VectorType>(Ops[0]->getType())->getNumElements();
+        cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
     assert(NumElts % 16 == 0);
 
     // If palignr is shifting the pair of vectors more than the size of two
@@ -12782,7 +12798,7 @@
   case X86::BI__builtin_ia32_alignq256:
   case X86::BI__builtin_ia32_alignq512: {
     unsigned NumElts =
-        cast<llvm::VectorType>(Ops[0]->getType())->getNumElements();
+        cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
     unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0xff;
 
     // Mask the shift amount to width of two vectors.
@@ -12805,7 +12821,7 @@
   case X86::BI__builtin_ia32_shuf_i32x4:
   case X86::BI__builtin_ia32_shuf_i64x2: {
     unsigned Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
-    auto *Ty = cast<llvm::VectorType>(Ops[0]->getType());
+    auto *Ty = cast<llvm::FixedVectorType>(Ops[0]->getType());
     unsigned NumElts = Ty->getNumElements();
     unsigned NumLanes = Ty->getPrimitiveSizeInBits() == 512 ? 4 : 2;
     unsigned NumLaneElts = NumElts / NumLanes;
@@ -12832,7 +12848,7 @@
   case X86::BI__builtin_ia32_permti256: {
     unsigned Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
     unsigned NumElts =
-        cast<llvm::VectorType>(Ops[0]->getType())->getNumElements();
+        cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
 
     // This takes a very simple approach since there are two lanes and a
     // shuffle can have 2 inputs. So we reserve the first input for the first
@@ -12870,7 +12886,7 @@
   case X86::BI__builtin_ia32_pslldqi256_byteshift:
   case X86::BI__builtin_ia32_pslldqi512_byteshift: {
     unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[1])->getZExtValue() & 0xff;
-    auto *ResultType = cast<llvm::VectorType>(Ops[0]->getType());
+    auto *ResultType = cast<llvm::FixedVectorType>(Ops[0]->getType());
     // Builtin type is vXi64 so multiply by 8 to get bytes.
     unsigned NumElts = ResultType->getNumElements() * 8;
 
@@ -12900,7 +12916,7 @@
   case X86::BI__builtin_ia32_psrldqi256_byteshift:
   case X86::BI__builtin_ia32_psrldqi512_byteshift: {
     unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[1])->getZExtValue() & 0xff;
-    auto *ResultType = cast<llvm::VectorType>(Ops[0]->getType());
+    auto *ResultType = cast<llvm::FixedVectorType>(Ops[0]->getType());
     // Builtin type is vXi64 so multiply by 8 to get bytes.
     unsigned NumElts = ResultType->getNumElements() * 8;
 
@@ -13547,7 +13563,7 @@
   case X86::BI__builtin_ia32_fpclasspd256_mask:
   case X86::BI__builtin_ia32_fpclasspd512_mask: {
     unsigned NumElts =
-        cast<llvm::VectorType>(Ops[0]->getType())->getNumElements();
+        cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
     Value *MaskIn = Ops[2];
     Ops.erase(&Ops[2]);
 
@@ -13585,7 +13601,7 @@
   case X86::BI__builtin_ia32_vp2intersect_d_256:
   case X86::BI__builtin_ia32_vp2intersect_d_128: {
     unsigned NumElts =
-        cast<llvm::VectorType>(Ops[0]->getType())->getNumElements();
+        cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
     Intrinsic::ID ID;
 
     switch (BuiltinID) {
@@ -13644,7 +13660,7 @@
   case X86::BI__builtin_ia32_vpshufbitqmb256_mask:
   case X86::BI__builtin_ia32_vpshufbitqmb512_mask: {
     unsigned NumElts =
-        cast<llvm::VectorType>(Ops[0]->getType())->getNumElements();
+        cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
     Value *MaskIn = Ops[2];
     Ops.erase(&Ops[2]);
 
@@ -13788,7 +13804,7 @@
               ->getElementType()
               ->isIntegerTy(1)) {
         unsigned NumElts =
-            cast<llvm::VectorType>(Ops[0]->getType())->getNumElements();
+            cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
         Value *MaskIn = Ops[3];
         Ops.erase(&Ops[3]);
 
@@ -13810,7 +13826,7 @@
     case X86::BI__builtin_ia32_cmppd256_mask: {
       // FIXME: Support SAE.
       unsigned NumElts =
-          cast<llvm::VectorType>(Ops[0]->getType())->getNumElements();
+          cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
       Value *Cmp;
       if (IsSignaling)
         Cmp = Builder.CreateFCmpS(Pred, Ops[0], Ops[1]);
@@ -13869,7 +13885,7 @@
   case X86::BI__builtin_ia32_cvtneps2bf16_128_mask: {
     Ops[2] = getMaskVecValue(
         *this, Ops[2],
-        cast<llvm::VectorType>(Ops[0]->getType())->getNumElements());
+        cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements());
     Intrinsic::ID IID = Intrinsic::x86_avx512bf16_mask_cvtneps2bf16_128;
     return Builder.CreateCall(CGM.getIntrinsic(IID), Ops);
   }
Index: clang/lib/CodeGen/CGAtomic.cpp
===================================================================
--- clang/lib/CodeGen/CGAtomic.cpp
+++ clang/lib/CodeGen/CGAtomic.cpp
@@ -119,7 +119,7 @@
         ValueTy = lvalue.getType();
         ValueSizeInBits = C.getTypeSize(ValueTy);
         AtomicTy = ValueTy = CGF.getContext().getExtVectorType(
-            lvalue.getType(), cast<llvm::VectorType>(
+            lvalue.getType(), cast<llvm::FixedVectorType>(
                                   lvalue.getExtVectorAddress().getElementType())
                                   ->getNumElements());
         AtomicSizeInBits = C.getTypeSize(AtomicTy);
_______________________________________________
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to