Author: Nicola Zaghen
Date: 2019-12-12T10:29:54Z
New Revision: f798eb21eca97dc44ed40da52ece22780fb74230

URL: 
https://github.com/llvm/llvm-project/commit/f798eb21eca97dc44ed40da52ece22780fb74230
DIFF: 
https://github.com/llvm/llvm-project/commit/f798eb21eca97dc44ed40da52ece22780fb74230.diff

LOG: Temporarily Revert "[DataLayout] Fix occurrences that size and range of 
pointers are assumed to be the same."

This reverts commit 5f6208778ff92567c57d7c1e2e740c284d7e69a5.

This caused failures in Transforms/PhaseOrdering/scev-custom-dl.ll
const: Assertion `getBitWidth() == CR.getBitWidth() && "ConstantRange types 
don't agree!"' failed.

Added: 
    

Modified: 
    clang/lib/CodeGen/CGExprScalar.cpp
    llvm/include/llvm/Analysis/PtrUseVisitor.h
    llvm/include/llvm/Analysis/Utils/Local.h
    llvm/lib/Analysis/ConstantFolding.cpp
    llvm/lib/Analysis/InlineCost.cpp
    llvm/lib/Analysis/InstructionSimplify.cpp
    llvm/lib/Analysis/Loads.cpp
    llvm/lib/Analysis/MemoryBuiltins.cpp
    llvm/lib/Analysis/ScalarEvolution.cpp
    llvm/lib/Analysis/ScalarEvolutionExpander.cpp
    llvm/lib/Analysis/ValueTracking.cpp
    llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
    llvm/lib/IR/DataLayout.cpp
    llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp
    llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
    llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
    llvm/lib/Transforms/Utils/Local.cpp
    llvm/test/Transforms/InstCombine/gep-custom-dl.ll
    llvm/test/Transforms/InstCombine/icmp-custom-dl.ll
    llvm/test/Transforms/PhaseOrdering/scev-custom-dl.ll
    llvm/test/Transforms/SimplifyCFG/switch_create-custom-dl.ll

Removed: 
    llvm/test/Transforms/InstCombine/builtin-object-size-custom-dl.ll
    llvm/test/Transforms/InstCombine/stdio-custom-dl.ll


################################################################################
diff  --git a/clang/lib/CodeGen/CGExprScalar.cpp 
b/clang/lib/CodeGen/CGExprScalar.cpp
index b8846162508e..b1dcbc51f58e 100644
--- a/clang/lib/CodeGen/CGExprScalar.cpp
+++ b/clang/lib/CodeGen/CGExprScalar.cpp
@@ -3264,10 +3264,10 @@ static Value *emitPointerArithmetic(CodeGenFunction 
&CGF,
                                                        expr->getRHS()))
     return CGF.Builder.CreateIntToPtr(index, pointer->getType());
 
-  if (width != DL.getIndexTypeSizeInBits(PtrTy)) {
+  if (width != DL.getTypeSizeInBits(PtrTy)) {
     // Zero-extend or sign-extend the pointer value according to
     // whether the index is signed or not.
-    index = CGF.Builder.CreateIntCast(index, DL.getIndexType(PtrTy), isSigned,
+    index = CGF.Builder.CreateIntCast(index, DL.getIntPtrType(PtrTy), isSigned,
                                       "idx.ext");
   }
 

diff  --git a/llvm/include/llvm/Analysis/PtrUseVisitor.h 
b/llvm/include/llvm/Analysis/PtrUseVisitor.h
index 05bca2226742..fbf04c841d30 100644
--- a/llvm/include/llvm/Analysis/PtrUseVisitor.h
+++ b/llvm/include/llvm/Analysis/PtrUseVisitor.h
@@ -222,9 +222,9 @@ class PtrUseVisitor : protected InstVisitor<DerivedT>,
     // offsets on this pointer.
     // FIXME: Support a vector of pointers.
     assert(I.getType()->isPointerTy());
-    IntegerType *IntIdxTy = cast<IntegerType>(DL.getIndexType(I.getType()));
+    IntegerType *IntPtrTy = cast<IntegerType>(DL.getIntPtrType(I.getType()));
     IsOffsetKnown = true;
-    Offset = APInt(IntIdxTy->getBitWidth(), 0);
+    Offset = APInt(IntPtrTy->getBitWidth(), 0);
     PI.reset();
 
     // Enqueue the uses of this pointer.

diff  --git a/llvm/include/llvm/Analysis/Utils/Local.h 
b/llvm/include/llvm/Analysis/Utils/Local.h
index ca505960cbeb..98b931f93451 100644
--- a/llvm/include/llvm/Analysis/Utils/Local.h
+++ b/llvm/include/llvm/Analysis/Utils/Local.h
@@ -29,15 +29,15 @@ template <typename IRBuilderTy>
 Value *EmitGEPOffset(IRBuilderTy *Builder, const DataLayout &DL, User *GEP,
                      bool NoAssumptions = false) {
   GEPOperator *GEPOp = cast<GEPOperator>(GEP);
-  Type *IntIdxTy = DL.getIndexType(GEP->getType());
-  Value *Result = Constant::getNullValue(IntIdxTy);
+  Type *IntPtrTy = DL.getIntPtrType(GEP->getType());
+  Value *Result = Constant::getNullValue(IntPtrTy);
 
   // If the GEP is inbounds, we know that none of the addressing operations 
will
   // overflow in a signed sense.
   bool isInBounds = GEPOp->isInBounds() && !NoAssumptions;
 
   // Build a mask for high order bits.
-  unsigned IntPtrWidth = IntIdxTy->getScalarType()->getIntegerBitWidth();
+  unsigned IntPtrWidth = IntPtrTy->getScalarType()->getIntegerBitWidth();
   uint64_t PtrSizeMask =
       std::numeric_limits<uint64_t>::max() >> (64 - IntPtrWidth);
 
@@ -56,17 +56,17 @@ Value *EmitGEPOffset(IRBuilderTy *Builder, const DataLayout 
&DL, User *GEP,
         Size = DL.getStructLayout(STy)->getElementOffset(OpValue);
 
         if (Size)
-          Result = Builder->CreateAdd(Result, ConstantInt::get(IntIdxTy, Size),
+          Result = Builder->CreateAdd(Result, ConstantInt::get(IntPtrTy, Size),
                                       GEP->getName()+".offs");
         continue;
       }
 
       // Splat the constant if needed.
-      if (IntIdxTy->isVectorTy() && !OpC->getType()->isVectorTy())
-        OpC = ConstantVector::getSplat(IntIdxTy->getVectorNumElements(), OpC);
+      if (IntPtrTy->isVectorTy() && !OpC->getType()->isVectorTy())
+        OpC = ConstantVector::getSplat(IntPtrTy->getVectorNumElements(), OpC);
 
-      Constant *Scale = ConstantInt::get(IntIdxTy, Size);
-      Constant *OC = ConstantExpr::getIntegerCast(OpC, IntIdxTy, true 
/*SExt*/);
+      Constant *Scale = ConstantInt::get(IntPtrTy, Size);
+      Constant *OC = ConstantExpr::getIntegerCast(OpC, IntPtrTy, true 
/*SExt*/);
       Scale =
           ConstantExpr::getMul(OC, Scale, false /*NUW*/, isInBounds /*NSW*/);
       // Emit an add instruction.
@@ -75,15 +75,15 @@ Value *EmitGEPOffset(IRBuilderTy *Builder, const DataLayout 
&DL, User *GEP,
     }
 
     // Splat the index if needed.
-    if (IntIdxTy->isVectorTy() && !Op->getType()->isVectorTy())
-      Op = Builder->CreateVectorSplat(IntIdxTy->getVectorNumElements(), Op);
+    if (IntPtrTy->isVectorTy() && !Op->getType()->isVectorTy())
+      Op = Builder->CreateVectorSplat(IntPtrTy->getVectorNumElements(), Op);
 
     // Convert to correct type.
-    if (Op->getType() != IntIdxTy)
-      Op = Builder->CreateIntCast(Op, IntIdxTy, true, Op->getName()+".c");
+    if (Op->getType() != IntPtrTy)
+      Op = Builder->CreateIntCast(Op, IntPtrTy, true, Op->getName()+".c");
     if (Size != 1) {
       // We'll let instcombine(mul) convert this to a shl if possible.
-      Op = Builder->CreateMul(Op, ConstantInt::get(IntIdxTy, Size),
+      Op = Builder->CreateMul(Op, ConstantInt::get(IntPtrTy, Size),
                               GEP->getName() + ".idx", false /*NUW*/,
                               isInBounds /*NSW*/);
     }

diff  --git a/llvm/lib/Analysis/ConstantFolding.cpp 
b/llvm/lib/Analysis/ConstantFolding.cpp
index b32924e6497a..23e4b9b86fd8 100644
--- a/llvm/lib/Analysis/ConstantFolding.cpp
+++ b/llvm/lib/Analysis/ConstantFolding.cpp
@@ -766,8 +766,8 @@ Constant *SymbolicallyEvaluateBinop(unsigned Opc, Constant 
*Op0, Constant *Op1,
 Constant *CastGEPIndices(Type *SrcElemTy, ArrayRef<Constant *> Ops,
                          Type *ResultTy, Optional<unsigned> InRangeIndex,
                          const DataLayout &DL, const TargetLibraryInfo *TLI) {
-  Type *IntIdxTy = DL.getIndexType(ResultTy);
-  Type *IntIdxScalarTy = IntIdxTy->getScalarType();
+  Type *IntPtrTy = DL.getIntPtrType(ResultTy);
+  Type *IntPtrScalarTy = IntPtrTy->getScalarType();
 
   bool Any = false;
   SmallVector<Constant*, 32> NewIdxs;
@@ -775,11 +775,11 @@ Constant *CastGEPIndices(Type *SrcElemTy, 
ArrayRef<Constant *> Ops,
     if ((i == 1 ||
          !isa<StructType>(GetElementPtrInst::getIndexedType(
              SrcElemTy, Ops.slice(1, i - 1)))) &&
-        Ops[i]->getType()->getScalarType() != IntIdxScalarTy) {
+        Ops[i]->getType()->getScalarType() != IntPtrScalarTy) {
       Any = true;
       Type *NewType = Ops[i]->getType()->isVectorTy()
-                          ? IntIdxTy
-                          : IntIdxScalarTy;
+                          ? IntPtrTy
+                          : IntPtrTy->getScalarType();
       NewIdxs.push_back(ConstantExpr::getCast(CastInst::getCastOpcode(Ops[i],
                                                                       true,
                                                                       NewType,
@@ -839,7 +839,7 @@ Constant *SymbolicallyEvaluateGEP(const GEPOperator *GEP,
   if (!Ptr->getType()->isPointerTy())
     return nullptr;
 
-  Type *IntIdxTy = DL.getIndexType(Ptr->getType());
+  Type *IntPtrTy = DL.getIntPtrType(Ptr->getType());
 
   // If this is a constant expr gep that is effectively computing an
   // "offsetof", fold it into 'cast int Size to T*' instead of 'gep 0, 0, 12'
@@ -850,7 +850,7 @@ Constant *SymbolicallyEvaluateGEP(const GEPOperator *GEP,
         // "inttoptr (sub (ptrtoint Ptr), V)"
         if (Ops.size() == 2 && ResElemTy->isIntegerTy(8)) {
           auto *CE = dyn_cast<ConstantExpr>(Ops[1]);
-          assert((!CE || CE->getType() == IntIdxTy) &&
+          assert((!CE || CE->getType() == IntPtrTy) &&
                  "CastGEPIndices didn't canonicalize index types!");
           if (CE && CE->getOpcode() == Instruction::Sub &&
               CE->getOperand(0)->isNullValue()) {
@@ -865,7 +865,7 @@ Constant *SymbolicallyEvaluateGEP(const GEPOperator *GEP,
         return nullptr;
       }
 
-  unsigned BitWidth = DL.getTypeSizeInBits(IntIdxTy);
+  unsigned BitWidth = DL.getTypeSizeInBits(IntPtrTy);
   APInt Offset =
       APInt(BitWidth,
             DL.getIndexedOffsetInType(
@@ -945,7 +945,7 @@ Constant *SymbolicallyEvaluateGEP(const GEPOperator *GEP,
         // The element size is 0. This may be [0 x Ty]*, so just use a zero
         // index for this level and proceed to the next level to see if it can
         // accommodate the offset.
-        NewIdxs.push_back(ConstantInt::get(IntIdxTy, 0));
+        NewIdxs.push_back(ConstantInt::get(IntPtrTy, 0));
       } else {
         // The element size is non-zero divide the offset by the element
         // size (rounding down), to compute the index at this level.
@@ -954,7 +954,7 @@ Constant *SymbolicallyEvaluateGEP(const GEPOperator *GEP,
         if (Overflow)
           break;
         Offset -= NewIdx * ElemSize;
-        NewIdxs.push_back(ConstantInt::get(IntIdxTy, NewIdx));
+        NewIdxs.push_back(ConstantInt::get(IntPtrTy, NewIdx));
       }
     } else {
       auto *STy = cast<StructType>(Ty);

diff  --git a/llvm/lib/Analysis/InlineCost.cpp 
b/llvm/lib/Analysis/InlineCost.cpp
index 24af43af4686..55ce940bc3a5 100644
--- a/llvm/lib/Analysis/InlineCost.cpp
+++ b/llvm/lib/Analysis/InlineCost.cpp
@@ -1678,8 +1678,8 @@ ConstantInt 
*CallAnalyzer::stripAndComputeInBoundsConstantOffsets(Value *&V) {
     assert(V->getType()->isPointerTy() && "Unexpected operand type!");
   } while (Visited.insert(V).second);
 
-  Type *IdxPtrTy = DL.getIndexType(V->getType());
-  return cast<ConstantInt>(ConstantInt::get(IdxPtrTy, Offset));
+  Type *IntPtrTy = DL.getIntPtrType(V->getContext(), AS);
+  return cast<ConstantInt>(ConstantInt::get(IntPtrTy, Offset));
 }
 
 /// Find dead blocks due to deleted CFG edges during inlining.

diff  --git a/llvm/lib/Analysis/InstructionSimplify.cpp 
b/llvm/lib/Analysis/InstructionSimplify.cpp
index 75141f1ad95a..c27e2567d5f6 100644
--- a/llvm/lib/Analysis/InstructionSimplify.cpp
+++ b/llvm/lib/Analysis/InstructionSimplify.cpp
@@ -662,16 +662,16 @@ static Constant *stripAndComputeConstantOffsets(const 
DataLayout &DL, Value *&V,
                                                 bool AllowNonInbounds = false) 
{
   assert(V->getType()->isPtrOrPtrVectorTy());
 
-  Type *IntIdxTy = DL.getIndexType(V->getType())->getScalarType();
-  APInt Offset = APInt::getNullValue(IntIdxTy->getIntegerBitWidth());
+  Type *IntPtrTy = DL.getIntPtrType(V->getType())->getScalarType();
+  APInt Offset = APInt::getNullValue(IntPtrTy->getIntegerBitWidth());
 
   V = V->stripAndAccumulateConstantOffsets(DL, Offset, AllowNonInbounds);
   // As that strip may trace through `addrspacecast`, need to sext or trunc
   // the offset calculated.
-  IntIdxTy = DL.getIndexType(V->getType())->getScalarType();
-  Offset = Offset.sextOrTrunc(IntIdxTy->getIntegerBitWidth());
+  IntPtrTy = DL.getIntPtrType(V->getType())->getScalarType();
+  Offset = Offset.sextOrTrunc(IntPtrTy->getIntegerBitWidth());
 
-  Constant *OffsetIntPtr = ConstantInt::get(IntIdxTy, Offset);
+  Constant *OffsetIntPtr = ConstantInt::get(IntPtrTy, Offset);
   if (V->getType()->isVectorTy())
     return ConstantVector::getSplat(V->getType()->getVectorNumElements(),
                                     OffsetIntPtr);
@@ -4032,7 +4032,7 @@ static Value *SimplifyGEPInst(Type *SrcTy, ArrayRef<Value 
*> Ops,
       // The following transforms are only safe if the ptrtoint cast
       // doesn't truncate the pointers.
       if (Ops[1]->getType()->getScalarSizeInBits() ==
-          Q.DL.getPointerSizeInBits(AS)) {
+          Q.DL.getIndexSizeInBits(AS)) {
         auto PtrToIntOrZero = [GEPTy](Value *P) -> Value * {
           if (match(P, m_Zero()))
             return Constant::getNullValue(GEPTy);

diff  --git a/llvm/lib/Analysis/Loads.cpp b/llvm/lib/Analysis/Loads.cpp
index a7d07c0b6183..3f03d53778f4 100644
--- a/llvm/lib/Analysis/Loads.cpp
+++ b/llvm/lib/Analysis/Loads.cpp
@@ -150,7 +150,7 @@ bool llvm::isDereferenceableAndAlignedPointer(const Value 
*V, Type *Ty,
 
   // Require ABI alignment for loads without alignment specification
   const Align Alignment = DL.getValueOrABITypeAlignment(MA, Ty);
-  APInt AccessSize(DL.getPointerTypeSizeInBits(V->getType()),
+  APInt AccessSize(DL.getIndexTypeSizeInBits(V->getType()),
                    DL.getTypeStoreSize(Ty));
   return isDereferenceableAndAlignedPointer(V, Alignment, AccessSize, DL, CtxI,
                                             DT);

diff  --git a/llvm/lib/Analysis/MemoryBuiltins.cpp 
b/llvm/lib/Analysis/MemoryBuiltins.cpp
index 427e6fd3ace2..172c86eb4646 100644
--- a/llvm/lib/Analysis/MemoryBuiltins.cpp
+++ b/llvm/lib/Analysis/MemoryBuiltins.cpp
@@ -544,7 +544,6 @@ Value *llvm::lowerObjectSizeCall(IntrinsicInst *ObjectSize,
           Builder.CreateSub(SizeOffsetPair.first, SizeOffsetPair.second);
       Value *UseZero =
           Builder.CreateICmpULT(SizeOffsetPair.first, SizeOffsetPair.second);
-      ResultSize = Builder.CreateZExtOrTrunc(ResultSize, ResultType);
       return Builder.CreateSelect(UseZero, ConstantInt::get(ResultType, 0),
                                   ResultSize);
     }
@@ -577,7 +576,7 @@ ObjectSizeOffsetVisitor::ObjectSizeOffsetVisitor(const 
DataLayout &DL,
 }
 
 SizeOffsetType ObjectSizeOffsetVisitor::compute(Value *V) {
-  IntTyBits = DL.getIndexTypeSizeInBits(V->getType());
+  IntTyBits = DL.getPointerTypeSizeInBits(V->getType());
   Zero = APInt::getNullValue(IntTyBits);
 
   V = V->stripPointerCasts();
@@ -747,7 +746,7 @@ 
ObjectSizeOffsetVisitor::visitExtractValueInst(ExtractValueInst&) {
 
 SizeOffsetType ObjectSizeOffsetVisitor::visitGEPOperator(GEPOperator &GEP) {
   SizeOffsetType PtrData = compute(GEP.getPointerOperand());
-  APInt Offset(DL.getIndexTypeSizeInBits(GEP.getPointerOperand()->getType()), 
0);
+  APInt Offset(IntTyBits, 0);
   if (!bothKnown(PtrData) || !GEP.accumulateConstantOffset(DL, Offset))
     return unknown();
 
@@ -835,7 +834,7 @@ ObjectSizeOffsetEvaluator::ObjectSizeOffsetEvaluator(
 
 SizeOffsetEvalType ObjectSizeOffsetEvaluator::compute(Value *V) {
   // XXX - Are vectors of pointers possible here?
-  IntTy = cast<IntegerType>(DL.getIndexType(V->getType()));
+  IntTy = cast<IntegerType>(DL.getIntPtrType(V->getType()));
   Zero = ConstantInt::get(IntTy, 0);
 
   SizeOffsetEvalType Result = compute_(V);
@@ -939,12 +938,12 @@ SizeOffsetEvalType 
ObjectSizeOffsetEvaluator::visitCallSite(CallSite CS) {
   }
 
   Value *FirstArg = CS.getArgument(FnData->FstParam);
-  FirstArg = Builder.CreateZExtOrTrunc(FirstArg, IntTy);
+  FirstArg = Builder.CreateZExt(FirstArg, IntTy);
   if (FnData->SndParam < 0)
     return std::make_pair(FirstArg, Zero);
 
   Value *SecondArg = CS.getArgument(FnData->SndParam);
-  SecondArg = Builder.CreateZExtOrTrunc(SecondArg, IntTy);
+  SecondArg = Builder.CreateZExt(SecondArg, IntTy);
   Value *Size = Builder.CreateMul(FirstArg, SecondArg);
   return std::make_pair(Size, Zero);
 

diff  --git a/llvm/lib/Analysis/ScalarEvolution.cpp 
b/llvm/lib/Analysis/ScalarEvolution.cpp
index ca4fb9371b84..d635afb0a299 100644
--- a/llvm/lib/Analysis/ScalarEvolution.cpp
+++ b/llvm/lib/Analysis/ScalarEvolution.cpp
@@ -3495,7 +3495,7 @@ ScalarEvolution::getGEPExpr(GEPOperator *GEP,
   const SCEV *BaseExpr = getSCEV(GEP->getPointerOperand());
   // getSCEV(Base)->getType() has the same address space as Base->getType()
   // because SCEV::getType() preserves the address space.
-  Type *IntIdxTy = getEffectiveSCEVType(BaseExpr->getType());
+  Type *IntPtrTy = getEffectiveSCEVType(BaseExpr->getType());
   // FIXME(PR23527): Don't blindly transfer the inbounds flag from the GEP
   // instruction to its SCEV, because the Instruction may be guarded by control
   // flow and the no-overflow bits may not be valid for the expression in any
@@ -3504,7 +3504,7 @@ ScalarEvolution::getGEPExpr(GEPOperator *GEP,
   SCEV::NoWrapFlags Wrap = GEP->isInBounds() ? SCEV::FlagNSW
                                              : SCEV::FlagAnyWrap;
 
-  const SCEV *TotalOffset = getZero(IntIdxTy);
+  const SCEV *TotalOffset = getZero(IntPtrTy);
   // The array size is unimportant. The first thing we do on CurTy is getting
   // its element type.
   Type *CurTy = ArrayType::get(GEP->getSourceElementType(), 0);
@@ -3514,7 +3514,7 @@ ScalarEvolution::getGEPExpr(GEPOperator *GEP,
       // For a struct, add the member offset.
       ConstantInt *Index = cast<SCEVConstant>(IndexExpr)->getValue();
       unsigned FieldNo = Index->getZExtValue();
-      const SCEV *FieldOffset = getOffsetOfExpr(IntIdxTy, STy, FieldNo);
+      const SCEV *FieldOffset = getOffsetOfExpr(IntPtrTy, STy, FieldNo);
 
       // Add the field offset to the running total offset.
       TotalOffset = getAddExpr(TotalOffset, FieldOffset);
@@ -3525,9 +3525,9 @@ ScalarEvolution::getGEPExpr(GEPOperator *GEP,
       // Update CurTy to its element type.
       CurTy = cast<SequentialType>(CurTy)->getElementType();
       // For an array, add the element offset, explicitly scaled.
-      const SCEV *ElementSize = getSizeOfExpr(IntIdxTy, CurTy);
+      const SCEV *ElementSize = getSizeOfExpr(IntPtrTy, CurTy);
       // Getelementptr indices are signed.
-      IndexExpr = getTruncateOrSignExtend(IndexExpr, IntIdxTy);
+      IndexExpr = getTruncateOrSignExtend(IndexExpr, IntPtrTy);
 
       // Multiply the index by the element size to compute the element offset.
       const SCEV *LocalOffset = getMulExpr(IndexExpr, ElementSize, Wrap);
@@ -3786,7 +3786,7 @@ uint64_t ScalarEvolution::getTypeSizeInBits(Type *Ty) 
const {
 
 /// Return a type with the same bitwidth as the given type and which represents
 /// how SCEV will treat the given type, for which isSCEVable must return
-/// true. For pointer types, this is the pointer index sized integer type.
+/// true. For pointer types, this is the pointer-sized integer type.
 Type *ScalarEvolution::getEffectiveSCEVType(Type *Ty) const {
   assert(isSCEVable(Ty) && "Type is not SCEVable!");
 
@@ -3795,7 +3795,7 @@ Type *ScalarEvolution::getEffectiveSCEVType(Type *Ty) 
const {
 
   // The only other support type is pointer.
   assert(Ty->isPointerTy() && "Unexpected non-pointer non-integer type!");
-  return getDataLayout().getIndexType(Ty);
+  return getDataLayout().getIntPtrType(Ty);
 }
 
 Type *ScalarEvolution::getWiderType(Type *T1, Type *T2) const {
@@ -5726,15 +5726,6 @@ ScalarEvolution::getRangeRef(const SCEV *S,
       assert(SignHint == ScalarEvolution::HINT_RANGE_SIGNED &&
              "generalize as needed!");
       unsigned NS = ComputeNumSignBits(U->getValue(), DL, 0, &AC, nullptr, 
&DT);
-      // If the pointer size is larger than the index size type, this can cause
-      // NS to be larger than BitWidth. So compensate for this.
-      if (U->getType()->isPointerTy()) {
-        unsigned ptrSize = DL.getPointerTypeSizeInBits(U->getType());
-        int ptrIdxDiff = ptrSize - BitWidth;
-        if (ptrIdxDiff > 0 && ptrSize > BitWidth && NS > (unsigned)ptrIdxDiff)
-          NS -= ptrIdxDiff;
-      }
-
       if (NS > 1)
         ConservativeResult = ConservativeResult.intersectWith(
             ConstantRange(APInt::getSignedMinValue(BitWidth).ashr(NS - 1),

diff  --git a/llvm/lib/Analysis/ScalarEvolutionExpander.cpp 
b/llvm/lib/Analysis/ScalarEvolutionExpander.cpp
index dc5d02aa3a3c..bceec921188e 100644
--- a/llvm/lib/Analysis/ScalarEvolutionExpander.cpp
+++ b/llvm/lib/Analysis/ScalarEvolutionExpander.cpp
@@ -414,7 +414,7 @@ Value *SCEVExpander::expandAddToGEP(const SCEV *const 
*op_begin,
   // without the other.
   SplitAddRecs(Ops, Ty, SE);
 
-  Type *IntIdxTy = DL.getIndexType(PTy);
+  Type *IntPtrTy = DL.getIntPtrType(PTy);
 
   // Descend down the pointer's type and attempt to convert the other
   // operands into GEP indices, at each level. The first index in a GEP
@@ -426,7 +426,7 @@ Value *SCEVExpander::expandAddToGEP(const SCEV *const 
*op_begin,
     // array indexing.
     SmallVector<const SCEV *, 8> ScaledOps;
     if (ElTy->isSized()) {
-      const SCEV *ElSize = SE.getSizeOfExpr(IntIdxTy, ElTy);
+      const SCEV *ElSize = SE.getSizeOfExpr(IntPtrTy, ElTy);
       if (!ElSize->isZero()) {
         SmallVector<const SCEV *, 8> NewOps;
         for (const SCEV *Op : Ops) {

diff  --git a/llvm/lib/Analysis/ValueTracking.cpp 
b/llvm/lib/Analysis/ValueTracking.cpp
index ab021aece2f4..59e9d2eb928c 100644
--- a/llvm/lib/Analysis/ValueTracking.cpp
+++ b/llvm/lib/Analysis/ValueTracking.cpp
@@ -90,7 +90,7 @@ static unsigned getBitWidth(Type *Ty, const DataLayout &DL) {
   if (unsigned BitWidth = Ty->getScalarSizeInBits())
     return BitWidth;
 
-  return DL.getPointerTypeSizeInBits(Ty);
+  return DL.getIndexTypeSizeInBits(Ty);
 }
 
 namespace {
@@ -1137,7 +1137,7 @@ static void computeKnownBitsFromOperator(const Operator 
*I, KnownBits &Known,
     // which fall through here.
     Type *ScalarTy = SrcTy->getScalarType();
     SrcBitWidth = ScalarTy->isPointerTy() ?
-      Q.DL.getPointerTypeSizeInBits(ScalarTy) :
+      Q.DL.getIndexTypeSizeInBits(ScalarTy) :
       Q.DL.getTypeSizeInBits(ScalarTy);
 
     assert(SrcBitWidth && "SrcBitWidth can't be zero");
@@ -1664,7 +1664,7 @@ void computeKnownBits(const Value *V, KnownBits &Known, 
unsigned Depth,
 
   Type *ScalarTy = V->getType()->getScalarType();
   unsigned ExpectedWidth = ScalarTy->isPointerTy() ?
-    Q.DL.getPointerTypeSizeInBits(ScalarTy) : Q.DL.getTypeSizeInBits(ScalarTy);
+    Q.DL.getIndexTypeSizeInBits(ScalarTy) : Q.DL.getTypeSizeInBits(ScalarTy);
   assert(ExpectedWidth == BitWidth && "V and Known should have same BitWidth");
   (void)BitWidth;
   (void)ExpectedWidth;
@@ -2409,7 +2409,7 @@ static unsigned ComputeNumSignBitsImpl(const Value *V, 
unsigned Depth,
 
   Type *ScalarTy = V->getType()->getScalarType();
   unsigned TyBits = ScalarTy->isPointerTy() ?
-    Q.DL.getPointerTypeSizeInBits(ScalarTy) :
+    Q.DL.getIndexTypeSizeInBits(ScalarTy) :
     Q.DL.getTypeSizeInBits(ScalarTy);
 
   unsigned Tmp, Tmp2;

diff  --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp 
b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index cb0450cbfa88..9ca51e72ec72 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -9322,8 +9322,8 @@ unsigned SelectionDAG::InferPtrAlignment(SDValue Ptr) 
const {
   const GlobalValue *GV = nullptr;
   int64_t GVOffset = 0;
   if (TLI->isGAPlusOffset(Ptr.getNode(), GV, GVOffset)) {
-    unsigned PtrWidth = 
getDataLayout().getPointerTypeSizeInBits(GV->getType());
-    KnownBits Known(PtrWidth);
+    unsigned IdxWidth = getDataLayout().getIndexTypeSizeInBits(GV->getType());
+    KnownBits Known(IdxWidth);
     llvm::computeKnownBits(GV, Known, getDataLayout());
     unsigned AlignBits = Known.countMinTrailingZeros();
     unsigned Align = AlignBits ? 1 << std::min(31U, AlignBits) : 0;

diff  --git a/llvm/lib/IR/DataLayout.cpp b/llvm/lib/IR/DataLayout.cpp
index 94e0740663cc..5fe7a2e94b6a 100644
--- a/llvm/lib/IR/DataLayout.cpp
+++ b/llvm/lib/IR/DataLayout.cpp
@@ -768,13 +768,13 @@ unsigned DataLayout::getPrefTypeAlignment(Type *Ty) const 
{
 
 IntegerType *DataLayout::getIntPtrType(LLVMContext &C,
                                        unsigned AddressSpace) const {
-  return IntegerType::get(C, getPointerSizeInBits(AddressSpace));
+  return IntegerType::get(C, getIndexSizeInBits(AddressSpace));
 }
 
 Type *DataLayout::getIntPtrType(Type *Ty) const {
   assert(Ty->isPtrOrPtrVectorTy() &&
          "Expected a pointer or pointer vector type.");
-  unsigned NumBits = getPointerTypeSizeInBits(Ty);
+  unsigned NumBits = getIndexTypeSizeInBits(Ty);
   IntegerType *IntTy = IntegerType::get(Ty->getContext(), NumBits);
   if (VectorType *VecTy = dyn_cast<VectorType>(Ty))
     return VectorType::get(IntTy, VecTy->getNumElements());

diff  --git a/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp 
b/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp
index e42cd7555a0d..5112fb1a6c39 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp
@@ -1832,7 +1832,7 @@ Instruction *InstCombiner::visitPtrToInt(PtrToIntInst 
&CI) {
   Type *Ty = CI.getType();
   unsigned AS = CI.getPointerAddressSpace();
 
-  if (Ty->getScalarSizeInBits() == DL.getPointerSizeInBits(AS))
+  if (Ty->getScalarSizeInBits() == DL.getIndexSizeInBits(AS))
     return commonPointerCastTransforms(CI);
 
   Type *PtrTy = DL.getIntPtrType(CI.getContext(), AS);

diff  --git a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp 
b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
index 4abac988c2e4..fd5a4682aa2e 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
@@ -4930,7 +4930,7 @@ Instruction 
*InstCombiner::foldICmpUsingKnownBits(ICmpInst &I) {
   // Get scalar or pointer size.
   unsigned BitWidth = Ty->isIntOrIntVectorTy()
                           ? Ty->getScalarSizeInBits()
-                          : DL.getPointerTypeSizeInBits(Ty->getScalarType());
+                          : DL.getIndexTypeSizeInBits(Ty->getScalarType());
 
   if (!BitWidth)
     return nullptr;

diff  --git a/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp 
b/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
index b1b87f810007..92783504ace8 100644
--- a/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
@@ -901,12 +901,12 @@ bool LoopIdiomRecognize::processLoopStridedStore(
   SCEVExpander Expander(*SE, *DL, "loop-idiom");
 
   Type *DestInt8PtrTy = Builder.getInt8PtrTy(DestAS);
-  Type *IntIdxTy = DL->getIndexType(DestPtr->getType());
+  Type *IntPtr = Builder.getIntPtrTy(*DL, DestAS);
 
   const SCEV *Start = Ev->getStart();
   // Handle negative strided loops.
   if (NegStride)
-    Start = getStartForNegStride(Start, BECount, IntIdxTy, StoreSize, SE);
+    Start = getStartForNegStride(Start, BECount, IntPtr, StoreSize, SE);
 
   // TODO: ideally we should still be able to generate memset if SCEV expander
   // is taught to generate the dependencies at the latest point.
@@ -934,7 +934,7 @@ bool LoopIdiomRecognize::processLoopStridedStore(
   // Okay, everything looks good, insert the memset.
 
   const SCEV *NumBytesS =
-      getNumBytes(BECount, IntIdxTy, StoreSize, CurLoop, DL, SE);
+      getNumBytes(BECount, IntPtr, StoreSize, CurLoop, DL, SE);
 
   // TODO: ideally we should still be able to generate memset if SCEV expander
   // is taught to generate the dependencies at the latest point.
@@ -942,7 +942,7 @@ bool LoopIdiomRecognize::processLoopStridedStore(
     return false;
 
   Value *NumBytes =
-      Expander.expandCodeFor(NumBytesS, IntIdxTy, Preheader->getTerminator());
+      Expander.expandCodeFor(NumBytesS, IntPtr, Preheader->getTerminator());
 
   CallInst *NewCall;
   if (SplatValue) {
@@ -955,7 +955,7 @@ bool LoopIdiomRecognize::processLoopStridedStore(
     Module *M = TheStore->getModule();
     StringRef FuncName = "memset_pattern16";
     FunctionCallee MSP = M->getOrInsertFunction(FuncName, Builder.getVoidTy(),
-                                                Int8PtrTy, Int8PtrTy, 
IntIdxTy);
+                                                Int8PtrTy, Int8PtrTy, IntPtr);
     inferLibFuncAttributes(M, FuncName, *TLI);
 
     // Otherwise we should form a memset_pattern16.  PatternValue is known to 
be
@@ -1022,11 +1022,11 @@ bool 
LoopIdiomRecognize::processLoopStoreOfLoopLoad(StoreInst *SI,
 
   const SCEV *StrStart = StoreEv->getStart();
   unsigned StrAS = SI->getPointerAddressSpace();
-  Type *IntIdxTy = Builder.getIntNTy(DL->getIndexSizeInBits(StrAS));
+  Type *IntPtrTy = Builder.getIntPtrTy(*DL, StrAS);
 
   // Handle negative strided loops.
   if (NegStride)
-    StrStart = getStartForNegStride(StrStart, BECount, IntIdxTy, StoreSize, 
SE);
+    StrStart = getStartForNegStride(StrStart, BECount, IntPtrTy, StoreSize, 
SE);
 
   // Okay, we have a strided store "p[i]" of a loaded value.  We can turn
   // this into a memcpy in the loop preheader now if we want.  However, this
@@ -1052,7 +1052,7 @@ bool 
LoopIdiomRecognize::processLoopStoreOfLoopLoad(StoreInst *SI,
 
   // Handle negative strided loops.
   if (NegStride)
-    LdStart = getStartForNegStride(LdStart, BECount, IntIdxTy, StoreSize, SE);
+    LdStart = getStartForNegStride(LdStart, BECount, IntPtrTy, StoreSize, SE);
 
   // For a memcpy, we have to make sure that the input array is not being
   // mutated by the loop.
@@ -1074,10 +1074,10 @@ bool 
LoopIdiomRecognize::processLoopStoreOfLoopLoad(StoreInst *SI,
   // Okay, everything is safe, we can transform this!
 
   const SCEV *NumBytesS =
-      getNumBytes(BECount, IntIdxTy, StoreSize, CurLoop, DL, SE);
+      getNumBytes(BECount, IntPtrTy, StoreSize, CurLoop, DL, SE);
 
   Value *NumBytes =
-      Expander.expandCodeFor(NumBytesS, IntIdxTy, Preheader->getTerminator());
+      Expander.expandCodeFor(NumBytesS, IntPtrTy, Preheader->getTerminator());
 
   CallInst *NewCall = nullptr;
   // Check whether to generate an unordered atomic memcpy:

diff  --git a/llvm/lib/Transforms/Utils/Local.cpp 
b/llvm/lib/Transforms/Utils/Local.cpp
index d5690a04f531..41f1dd951a84 100644
--- a/llvm/lib/Transforms/Utils/Local.cpp
+++ b/llvm/lib/Transforms/Utils/Local.cpp
@@ -2579,7 +2579,7 @@ void llvm::copyRangeMetadata(const DataLayout &DL, const 
LoadInst &OldLI,
   if (!NewTy->isPointerTy())
     return;
 
-  unsigned BitWidth = DL.getPointerTypeSizeInBits(NewTy);
+  unsigned BitWidth = DL.getIndexTypeSizeInBits(NewTy);
   if (!getConstantRangeFromMetadata(*N).contains(APInt(BitWidth, 0))) {
     MDNode *NN = MDNode::get(OldLI.getContext(), None);
     NewLI.setMetadata(LLVMContext::MD_nonnull, NN);

diff  --git a/llvm/test/Transforms/InstCombine/builtin-object-size-custom-dl.ll 
b/llvm/test/Transforms/InstCombine/builtin-object-size-custom-dl.ll
deleted file mode 100644
index eec0f7684252..000000000000
--- a/llvm/test/Transforms/InstCombine/builtin-object-size-custom-dl.ll
+++ /dev/null
@@ -1,32 +0,0 @@
-; RUN: opt -instcombine -S < %s | FileCheck %s
-target datalayout = "e-m:o-p:40:64:64:32-i64:64-f80:128-n8:16:32:64-S128"
-
-; check that memory builtins can be handled.
-define i64 @objsize1_custom_idx(i64 %sz) {
-entry:
-  %ptr = call i8* @malloc(i64 %sz)
-  %ptr2 = getelementptr inbounds i8, i8* %ptr, i32 2
-  %calc_size = call i64 @llvm.objectsize.i64.p0i8(i8* %ptr2, i1 false, i1 
true, i1 true)
-  ret i64 %calc_size
-}
-
-%struct.V = type { [10 x i8], i32, [10 x i8] }
-
-define i32 @objsize2_custom_idx() #0 {
-entry:
-  %var = alloca %struct.V, align 4
-  %0 = bitcast %struct.V* %var to i8*
-  call void @llvm.lifetime.start.p0i8(i64 28, i8* %0) #3
-  %buf1 = getelementptr inbounds %struct.V, %struct.V* %var, i32 0, i32 0
-  %arrayidx = getelementptr inbounds [10 x i8], [10 x i8]* %buf1, i64 0, i64 1
-  %1 = call i64 @llvm.objectsize.i64.p0i8(i8* %arrayidx, i1 false, i1 false, 
i1 false)
-  %conv = trunc i64 %1 to i32
-  call void @llvm.lifetime.end.p0i8(i64 28, i8* %0) #3
-  ret i32 %conv
-; CHECK: ret i32 27
-}
-
-declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #1
-declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #1
-declare i8* @malloc(i64)
-declare i64 @llvm.objectsize.i64.p0i8(i8*, i1, i1, i1)

diff  --git a/llvm/test/Transforms/InstCombine/gep-custom-dl.ll 
b/llvm/test/Transforms/InstCombine/gep-custom-dl.ll
index fdadc80c60eb..bcaadf47d11d 100644
--- a/llvm/test/Transforms/InstCombine/gep-custom-dl.ll
+++ b/llvm/test/Transforms/InstCombine/gep-custom-dl.ll
@@ -164,22 +164,3 @@ define i32 @test10() {
   %B = ptrtoint double* %A to i32
   ret i32 %B
 }
-
-@X_as1 = addrspace(1) global [1000 x i8] zeroinitializer, align 16
-
-define i16 @constant_fold_custom_dl() {
-; CHECK-LABEL: @constant_fold_custom_dl(
-  ; CHECK: ret i16 ptrtoint
-
-entry:
-  %A = bitcast i8 addrspace(1)* getelementptr inbounds ([1000 x i8], [1000 x 
i8] addrspace(1)* @X_as1, i64 1, i64 0) to i8 addrspace(1)*
-  %B = bitcast i8 addrspace(1)* getelementptr inbounds ([1000 x i8], [1000 x 
i8] addrspace(1)* @X_as1, i64 0, i64 0) to i8 addrspace(1)*
-
-  %B2 = ptrtoint i8 addrspace(1)* %B to i16
-  %C = sub i16 0, %B2
-  %D = getelementptr i8, i8 addrspace(1)* %A, i16 %C
-  %E = ptrtoint i8 addrspace(1)* %D to i16
-
-  ret i16 %E
-}
-

diff  --git a/llvm/test/Transforms/InstCombine/icmp-custom-dl.ll 
b/llvm/test/Transforms/InstCombine/icmp-custom-dl.ll
index 6b7cb1cdd4bb..fcfe07e9be2d 100644
--- a/llvm/test/Transforms/InstCombine/icmp-custom-dl.ll
+++ b/llvm/test/Transforms/InstCombine/icmp-custom-dl.ll
@@ -8,8 +8,8 @@ declare i32 @test58_d(i64 )
 define i1 @test59(i8* %foo) {
 ; CHECK-LABEL: @test59(
 ; CHECK-NEXT:    [[GEP1:%.*]] = getelementptr inbounds i8, i8* [[FOO:%.*]], 
i32 8
-; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint i8* [[GEP1]] to i40
-; CHECK-NEXT:    [[USE:%.*]] = zext i40 [[TMP1]] to i64
+; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint i8* [[GEP1]] to i32
+; CHECK-NEXT:    [[USE:%.*]] = zext i32 [[TMP1]] to i64
 ; CHECK-NEXT:    [[CALL:%.*]] = call i32 @test58_d(i64 [[USE]])
 ; CHECK-NEXT:    ret i1 true
 ;

diff  --git a/llvm/test/Transforms/InstCombine/stdio-custom-dl.ll 
b/llvm/test/Transforms/InstCombine/stdio-custom-dl.ll
deleted file mode 100644
index c4a666db237c..000000000000
--- a/llvm/test/Transforms/InstCombine/stdio-custom-dl.ll
+++ /dev/null
@@ -1,22 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt < %s -instcombine -S -mtriple=x86_64-unknown-linux-gnu | FileCheck 
%s
-
-target datalayout = "e-m:o-p:40:64:64:32-i64:64-f80:128-n8:16:32:64-S128"
-%struct._IO_FILE = type { i32, i8*, i8*, i8*, i8*, i8*, i8*, i8*, i8*, i8*, 
i8*, i8*, %struct._IO_marker*, %struct._IO_FILE*, i32, i32, i64, i16, i8, [1 x 
i8], i8*, i64, i8*, i8*, i8*, i8*, i64, i32, [20 x i8] }
-%struct._IO_marker = type { %struct._IO_marker*, %struct._IO_FILE*, i32 }
-@.str = private unnamed_addr constant [5 x i8] c"file\00", align 1
-@.str.1 = private unnamed_addr constant [2 x i8] c"w\00", align 1
-@.str.2 = private unnamed_addr constant [4 x i8] c"str\00", align 1
-
-; Check fwrite is generated with arguments of ptr size, not index size
-define internal void @fputs_test_custom_dl() {
-; CHECK-LABEL: @fputs_test_custom_dl(
-; CHECK-NEXT:    [[TMP1:%.*]] = call %struct._IO_FILE* @fopen(i8* 
getelementptr inbounds ([5 x i8], [5 x i8]* @.str, i32 0, i32 0), i8* 
getelementptr inbounds ([2 x i8], [2 x i8]* @.str.1, i32 0, i32 0))
-;
-  %call = call %struct._IO_FILE* @fopen(i8* getelementptr inbounds ([5 x i8], 
[5 x i8]* @.str, i64 0, i64 0), i8* getelementptr inbounds ([2 x i8], [2 x i8]* 
@.str.1, i64 0, i64 0))
-  %call1 = call i32 @fputs(i8* getelementptr inbounds ([4 x i8], [4 x i8]* 
@.str.2, i64 0, i64 0), %struct._IO_FILE* %call)
-  ret void
-}
-
-declare %struct._IO_FILE* @fopen(i8*, i8*)
-declare i32 @fputs(i8* nocapture readonly, %struct._IO_FILE* nocapture)

diff  --git a/llvm/test/Transforms/PhaseOrdering/scev-custom-dl.ll 
b/llvm/test/Transforms/PhaseOrdering/scev-custom-dl.ll
index f67b121383cb..ae822dd2e818 100644
--- a/llvm/test/Transforms/PhaseOrdering/scev-custom-dl.ll
+++ b/llvm/test/Transforms/PhaseOrdering/scev-custom-dl.ll
@@ -65,73 +65,3 @@ for.inc:                                          ; preds = 
%for.body
 for.end:                                          ; preds = %for.cond
   ret void
 }
-
-@array = weak global [101 x i32] zeroinitializer, align 32             ; <[100 
x i32]*> [#uses=1]
-
-; CHECK: Loop %bb: backedge-taken count is 100
-
-define void @test_range_ref1a(i32 %x) {
-entry:
-       br label %bb
-
-bb:            ; preds = %bb, %entry
-       %i.01.0 = phi i32 [ 100, %entry ], [ %tmp4, %bb ]               ; <i32> 
[#uses=2]
-       %tmp1 = getelementptr [101 x i32], [101 x i32]* @array, i32 0, i32 
%i.01.0              ; <i32*> [#uses=1]
-       store i32 %x, i32* %tmp1
-       %tmp4 = add i32 %i.01.0, -1             ; <i32> [#uses=2]
-       %tmp7 = icmp sgt i32 %tmp4, -1          ; <i1> [#uses=1]
-       br i1 %tmp7, label %bb, label %return
-
-return:                ; preds = %bb
-       ret void
-}
-
-define i32 @test_loop_idiom_recogize(i32 %x, i32 %y, i32* %lam, i32* %alp) 
nounwind {
-bb1.thread:
-       br label %bb1
-
-bb1:           ; preds = %bb1, %bb1.thread
-       %indvar = phi i32 [ 0, %bb1.thread ], [ %indvar.next, %bb1 ]            
; <i32> [#uses=4]
-       %i.0.reg2mem.0 = sub i32 255, %indvar           ; <i32> [#uses=2]
-       %0 = getelementptr i32, i32* %alp, i32 %i.0.reg2mem.0           ; 
<i32*> [#uses=1]
-       %1 = load i32, i32* %0, align 4         ; <i32> [#uses=1]
-       %2 = getelementptr i32, i32* %lam, i32 %i.0.reg2mem.0           ; 
<i32*> [#uses=1]
-       store i32 %1, i32* %2, align 4
-       %3 = sub i32 254, %indvar               ; <i32> [#uses=1]
-       %4 = icmp slt i32 %3, 0         ; <i1> [#uses=1]
-       %indvar.next = add i32 %indvar, 1               ; <i32> [#uses=1]
-       br i1 %4, label %bb2, label %bb1
-
-bb2:           ; preds = %bb1
-       %tmp10 = mul i32 %indvar, %x            ; <i32> [#uses=1]
-       %z.0.reg2mem.0 = add i32 %tmp10, %y             ; <i32> [#uses=1]
-       %5 = add i32 %z.0.reg2mem.0, %x         ; <i32> [#uses=1]
-       ret i32 %5
-}
-
-declare void @use(i1)
-
-declare void @llvm.experimental.guard(i1, ...)
-
-; This tests getRangeRef acts as intended with 
diff erent idx size.
-; CHECK: max backedge-taken count is 318
-define void @test_range_ref1(i8 %t) {
- entry:
-  %t.ptr = inttoptr i8 %t to i8*
-  %p.42 = inttoptr i8 42 to i8*
-  %cmp1 = icmp slt i8* %t.ptr, %p.42
-  call void(i1, ...) @llvm.experimental.guard(i1 %cmp1) [ "deopt"() ]
-  br label %loop
-
- loop:
-  %idx = phi i8* [ %t.ptr, %entry ], [ %snext, %loop ]
-  %snext = getelementptr inbounds i8, i8* %idx, i64 1
-  %c = icmp slt i8* %idx, %p.42
-  call void @use(i1 %c)
-  %be = icmp slt i8* %snext, %p.42
-  br i1 %be, label %loop, label %exit
-
- exit:
-  ret void
-}
-

diff  --git a/llvm/test/Transforms/SimplifyCFG/switch_create-custom-dl.ll 
b/llvm/test/Transforms/SimplifyCFG/switch_create-custom-dl.ll
index dc07eb20568d..083cfe1ee299 100644
--- a/llvm/test/Transforms/SimplifyCFG/switch_create-custom-dl.ll
+++ b/llvm/test/Transforms/SimplifyCFG/switch_create-custom-dl.ll
@@ -33,10 +33,10 @@ F:              ; preds = %0
 
 define void @test1_ptr(i32* %V) {
 ; CHECK-LABEL: @test1_ptr(
-; CHECK-NEXT:    [[MAGICPTR:%.*]] = ptrtoint i32* [[V:%.*]] to i40
-; CHECK-NEXT:    switch i40 [[MAGICPTR]], label [[F:%.*]] [
-; CHECK-NEXT:    i40 17, label [[T:%.*]]
-; CHECK-NEXT:    i40 4, label [[T]]
+; CHECK-NEXT:    [[MAGICPTR:%.*]] = ptrtoint i32* [[V:%.*]] to i32
+; CHECK-NEXT:    switch i32 [[MAGICPTR]], label [[F:%.*]] [
+; CHECK-NEXT:    i32 17, label [[T:%.*]]
+; CHECK-NEXT:    i32 4, label [[T]]
 ; CHECK-NEXT:    ]
 ; CHECK:       T:
 ; CHECK-NEXT:    call void @foo1()
@@ -59,10 +59,10 @@ F:              ; preds = %0
 
 define void @test1_ptr_as1(i32 addrspace(1)* %V) {
 ; CHECK-LABEL: @test1_ptr_as1(
-; CHECK-NEXT:    [[MAGICPTR:%.*]] = ptrtoint i32 addrspace(1)* [[V:%.*]] to i40
-; CHECK-NEXT:    switch i40 [[MAGICPTR]], label [[F:%.*]] [
-; CHECK-NEXT:    i40 17, label [[T:%.*]]
-; CHECK-NEXT:    i40 4, label [[T]]
+; CHECK-NEXT:    [[MAGICPTR:%.*]] = ptrtoint i32 addrspace(1)* [[V:%.*]] to i32
+; CHECK-NEXT:    switch i32 [[MAGICPTR]], label [[F:%.*]] [
+; CHECK-NEXT:    i32 17, label [[T:%.*]]
+; CHECK-NEXT:    i32 4, label [[T]]
 ; CHECK-NEXT:    ]
 ; CHECK:       T:
 ; CHECK-NEXT:    call void @foo1()


        
_______________________________________________
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to