Author: Max Kazantsev
Date: 2020-11-26T17:28:30+07:00
New Revision: 91d6b6b5fb94656dc12e1d760a3213a3cd72c8c5

URL: 
https://github.com/llvm/llvm-project/commit/91d6b6b5fb94656dc12e1d760a3213a3cd72c8c5
DIFF: 
https://github.com/llvm/llvm-project/commit/91d6b6b5fb94656dc12e1d760a3213a3cd72c8c5.diff

LOG: Revert "[SCEV] Use isBasicBlockEntryGuardedByCond in 
isLoopBackedgeGuardedByCond"

This reverts commit 3d4c0460ec6040fc071e56dc113afd181294591e.

Compile time impact is still high. Need to understand why.

Differential Revision: https://reviews.llvm.org/D92153

Added: 
    

Modified: 
    llvm/lib/Analysis/ScalarEvolution.cpp
    llvm/test/Transforms/LoopStrengthReduce/post-inc-icmpzero.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Analysis/ScalarEvolution.cpp 
b/llvm/lib/Analysis/ScalarEvolution.cpp
index b7bd54aafca70..53fd668be05cd 100644
--- a/llvm/lib/Analysis/ScalarEvolution.cpp
+++ b/llvm/lib/Analysis/ScalarEvolution.cpp
@@ -9911,7 +9911,42 @@ ScalarEvolution::isLoopBackedgeGuardedByCond(const Loop 
*L,
   if (isImpliedViaGuard(Latch, Pred, LHS, RHS))
     return true;
 
-  return isBasicBlockEntryGuardedByCond(Latch, Pred, LHS, RHS);
+  for (DomTreeNode *DTN = DT[Latch], *HeaderDTN = DT[L->getHeader()];
+       DTN != HeaderDTN; DTN = DTN->getIDom()) {
+    assert(DTN && "should reach the loop header before reaching the root!");
+
+    BasicBlock *BB = DTN->getBlock();
+    if (isImpliedViaGuard(BB, Pred, LHS, RHS))
+      return true;
+
+    BasicBlock *PBB = BB->getSinglePredecessor();
+    if (!PBB)
+      continue;
+
+    BranchInst *ContinuePredicate = dyn_cast<BranchInst>(PBB->getTerminator());
+    if (!ContinuePredicate || !ContinuePredicate->isConditional())
+      continue;
+
+    Value *Condition = ContinuePredicate->getCondition();
+
+    // If we have an edge `E` within the loop body that dominates the only
+    // latch, the condition guarding `E` also guards the backedge.  This
+    // reasoning works only for loops with a single latch.
+
+    BasicBlockEdge DominatingEdge(PBB, BB);
+    if (DominatingEdge.isSingleEdge()) {
+      // We're constructively (and conservatively) enumerating edges within the
+      // loop body that dominate the latch.  The dominator tree better agree
+      // with us on this:
+      assert(DT.dominates(DominatingEdge, Latch) && "should be!");
+
+      if (isImpliedCond(Pred, LHS, RHS, Condition,
+                        BB != ContinuePredicate->getSuccessor(0)))
+        return true;
+    }
+  }
+
+  return false;
 }
 
 bool ScalarEvolution::isBasicBlockEntryGuardedByCond(const BasicBlock *BB,

diff  --git a/llvm/test/Transforms/LoopStrengthReduce/post-inc-icmpzero.ll 
b/llvm/test/Transforms/LoopStrengthReduce/post-inc-icmpzero.ll
index c39828923d5f9..8a07a49303d20 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/post-inc-icmpzero.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/post-inc-icmpzero.ll
@@ -69,7 +69,7 @@ define void @_Z15IntegerToStringjjR7Vector2(i32 %i, i32 
%radix, %struct.Vector2*
 ; CHECK-NEXT:    [[UGLYGEP2:%.*]] = bitcast i8* [[UGLYGEP]] to i16*
 ; CHECK-NEXT:    [[TMP29:%.*]] = load i16, i16* [[LSR_IV810]], align 2
 ; CHECK-NEXT:    store i16 [[TMP29]], i16* [[UGLYGEP2]], align 2
-; CHECK-NEXT:    [[LSR_IV_NEXT]] = add nuw nsw i64 [[LSR_IV]], 2
+; CHECK-NEXT:    [[LSR_IV_NEXT]] = add i64 [[LSR_IV]], 2
 ; CHECK-NEXT:    [[LSR_IV_NEXT3:%.*]] = inttoptr i64 [[LSR_IV_NEXT]] to i16*
 ; CHECK-NEXT:    [[SCEVGEP9:%.*]] = getelementptr [33 x i16], [33 x i16]* 
[[LSR_IV8]], i64 0, i64 1
 ; CHECK-NEXT:    [[TMP3]] = bitcast i16* [[SCEVGEP9]] to [33 x i16]*


        
_______________________________________________
llvm-branch-commits mailing list
llvm-branch-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/llvm-branch-commits

Reply via email to