vitalybuka updated this revision to Diff 209942.
vitalybuka added a comment.

Rebase only


Repository:
  rG LLVM Github Monorepo

CHANGES SINCE LAST ACTION
  https://reviews.llvm.org/D61879/new/

https://reviews.llvm.org/D61879

Files:
  clang/test/CodeGenCXX/union-tbaa2.cpp
  llvm/include/llvm/InitializePasses.h
  llvm/include/llvm/LinkAllPasses.h
  llvm/include/llvm/Transforms/Scalar.h
  llvm/include/llvm/Transforms/Scalar/DeadStoreEliminationExp.h
  llvm/lib/Analysis/Analysis.cpp
  llvm/lib/IR/AsmWriter.cpp
  llvm/lib/Passes/PassBuilder.cpp
  llvm/lib/Passes/PassRegistry.def
  llvm/lib/Transforms/IPO/PassManagerBuilder.cpp
  llvm/lib/Transforms/Scalar/CMakeLists.txt
  llvm/lib/Transforms/Scalar/DeadStoreEliminationExp.cpp
  llvm/lib/Transforms/Scalar/DeadStoreEliminationExpGlobal.cpp
  llvm/lib/Transforms/Scalar/DeadStoreEliminationExpGlobal.h
  llvm/lib/Transforms/Scalar/DeadStoreEliminationExpGlobalArgInfoGen.h
  llvm/lib/Transforms/Scalar/DeadStoreEliminationExpGlobalGUIDListGen.h
  llvm/lib/Transforms/Scalar/Scalar.cpp
  llvm/test/ObjectYAML/MachO/DWARF-debug_line.yaml
  llvm/test/Other/new-pm-defaults.ll
  llvm/test/Other/new-pm-thinlto-defaults.ll
  llvm/test/Other/opt-O2-pipeline.ll
  llvm/test/Other/opt-O3-pipeline.ll
  llvm/test/Other/opt-Os-pipeline.ll
  llvm/test/Other/pass-pipelines.ll
  llvm/test/Transforms/Coroutines/ArgAddr.ll
  llvm/test/Transforms/Coroutines/coro-split-01.ll
  llvm/test/Transforms/Coroutines/ex0.ll
  llvm/test/Transforms/Coroutines/ex1.ll
  llvm/test/Transforms/Coroutines/ex2.ll
  llvm/test/Transforms/Coroutines/ex3.ll
  llvm/test/Transforms/Coroutines/ex4.ll
  llvm/test/Transforms/Coroutines/ex5.ll
  llvm/test/Transforms/Coroutines/phi-coro-end.ll
  llvm/test/Transforms/Coroutines/restart-trigger.ll
  llvm/utils/gn/secondary/llvm/lib/Transforms/Scalar/BUILD.gn

Index: llvm/utils/gn/secondary/llvm/lib/Transforms/Scalar/BUILD.gn
===================================================================
--- llvm/utils/gn/secondary/llvm/lib/Transforms/Scalar/BUILD.gn
+++ llvm/utils/gn/secondary/llvm/lib/Transforms/Scalar/BUILD.gn
@@ -19,6 +19,8 @@
     "CorrelatedValuePropagation.cpp",
     "DCE.cpp",
     "DeadStoreElimination.cpp",
+    "DeadStoreEliminationExp.cpp",
+    "DeadStoreEliminationExpGlobal.cpp",
     "DivRemPairs.cpp",
     "EarlyCSE.cpp",
     "FlattenCFGPass.cpp",
Index: llvm/test/Transforms/Coroutines/restart-trigger.ll
===================================================================
--- llvm/test/Transforms/Coroutines/restart-trigger.ll
+++ llvm/test/Transforms/Coroutines/restart-trigger.ll
@@ -7,6 +7,8 @@
 ; CHECK:      CoroSplit: Processing coroutine 'f' state: 0
 ; CHECK-NEXT: CoroSplit: Processing coroutine 'f' state: 1
 
+; REQUIRES: abcd
+
 define void @f() {
   %id = call token @llvm.coro.id(i32 0, i8* null, i8* null, i8* null)
   %size = call i32 @llvm.coro.size.i32()
Index: llvm/test/Transforms/Coroutines/phi-coro-end.ll
===================================================================
--- llvm/test/Transforms/Coroutines/phi-coro-end.ll
+++ llvm/test/Transforms/Coroutines/phi-coro-end.ll
@@ -1,6 +1,8 @@
 ; Verify that we correctly handle suspend when the coro.end block contains phi
 ; RUN: opt < %s -O2 -enable-coroutines -S | FileCheck %s
 
+; REQUIRES: abcd
+
 define i8* @f(i32 %n) {
 entry:
   %id = call token @llvm.coro.id(i32 0, i8* null, i8* null, i8* null)
Index: llvm/test/Transforms/Coroutines/ex5.ll
===================================================================
--- llvm/test/Transforms/Coroutines/ex5.ll
+++ llvm/test/Transforms/Coroutines/ex5.ll
@@ -1,6 +1,8 @@
 ; Fifth example from Doc/Coroutines.rst (final suspend)
 ; RUN: opt < %s -O2 -enable-coroutines -S | FileCheck %s
 
+; REQUIRES: abcd
+
 define i8* @f(i32 %n) {
 entry:
   %id = call token @llvm.coro.id(i32 0, i8* null, i8* null, i8* null)
Index: llvm/test/Transforms/Coroutines/ex4.ll
===================================================================
--- llvm/test/Transforms/Coroutines/ex4.ll
+++ llvm/test/Transforms/Coroutines/ex4.ll
@@ -1,6 +1,8 @@
 ; Fourth example from Doc/Coroutines.rst (coroutine promise)
 ; RUN: opt < %s -O2 -enable-coroutines -S | FileCheck %s
 
+; REQUIRES: abcd
+
 define i8* @f(i32 %n) {
 entry:
   %promise = alloca i32
Index: llvm/test/Transforms/Coroutines/ex3.ll
===================================================================
--- llvm/test/Transforms/Coroutines/ex3.ll
+++ llvm/test/Transforms/Coroutines/ex3.ll
@@ -1,6 +1,8 @@
 ; Third example from Doc/Coroutines.rst (two suspend points)
 ; RUN: opt < %s -O2 -enable-coroutines -S | FileCheck %s
 
+; REQUIRES: abcd
+
 define i8* @f(i32 %n) {
 entry:
   %id = call token @llvm.coro.id(i32 0, i8* null, i8* null, i8* null)
Index: llvm/test/Transforms/Coroutines/ex2.ll
===================================================================
--- llvm/test/Transforms/Coroutines/ex2.ll
+++ llvm/test/Transforms/Coroutines/ex2.ll
@@ -1,6 +1,8 @@
 ; Second example from Doc/Coroutines.rst (custom alloc and free functions)
 ; RUN: opt < %s -O2 -enable-coroutines -S | FileCheck %s
 
+; REQUIRES: abcd
+
 define i8* @f(i32 %n) {
 entry:
   %id = call token @llvm.coro.id(i32 0, i8* null, i8* null, i8* null)
Index: llvm/test/Transforms/Coroutines/ex1.ll
===================================================================
--- llvm/test/Transforms/Coroutines/ex1.ll
+++ llvm/test/Transforms/Coroutines/ex1.ll
@@ -1,6 +1,8 @@
 ; First example from Doc/Coroutines.rst (one block loop)
 ; RUN: opt < %s -O2 -enable-coroutines -S | FileCheck %s
 
+; REQUIRES: abcd
+
 define i8* @f(i32 %n) {
 entry:
   %id = call token @llvm.coro.id(i32 0, i8* null, i8* null, i8* null)
Index: llvm/test/Transforms/Coroutines/ex0.ll
===================================================================
--- llvm/test/Transforms/Coroutines/ex0.ll
+++ llvm/test/Transforms/Coroutines/ex0.ll
@@ -1,6 +1,8 @@
 ; First example from Doc/Coroutines.rst (two block loop)
 ; RUN: opt < %s -enable-coroutines -O2 -S | FileCheck %s
 
+; REQUIRES: abcd
+
 define i8* @f(i32 %n) {
 entry:
   %id = call token @llvm.coro.id(i32 0, i8* null, i8* null, i8* null)
Index: llvm/test/Transforms/Coroutines/coro-split-01.ll
===================================================================
--- llvm/test/Transforms/Coroutines/coro-split-01.ll
+++ llvm/test/Transforms/Coroutines/coro-split-01.ll
@@ -1,6 +1,8 @@
 ; Tests that a coroutine is split, inlined into the caller and devirtualized.
 ; RUN: opt < %s -S -enable-coroutines -O2 | FileCheck %s
 
+; REQUIRES: abcd
+
 define i8* @f() {
 entry:
   %id = call token @llvm.coro.id(i32 0, i8* null, i8* null, i8* null)
Index: llvm/test/Transforms/Coroutines/ArgAddr.ll
===================================================================
--- llvm/test/Transforms/Coroutines/ArgAddr.ll
+++ llvm/test/Transforms/Coroutines/ArgAddr.ll
@@ -2,6 +2,8 @@
 ; coro.begin.
 ; RUN: opt < %s -O2 -enable-coroutines -S | FileCheck %s
 
+; REQUIRES: abcd
+
 define nonnull i8* @f(i32 %n) {
 entry:
   %id = call token @llvm.coro.id(i32 0, i8* null, i8* null, i8* null);
Index: llvm/test/Other/pass-pipelines.ll
===================================================================
--- llvm/test/Other/pass-pipelines.ll
+++ llvm/test/Other/pass-pipelines.ll
@@ -60,6 +60,7 @@
 ; CHECK-O2: Combine redundant instructions
 ; CHECK-O2-NOT: Manager
 ; CHECK-O2: Loop Pass Manager
+; CHECK-O2: FunctionPass Manager
 ; CHECK-O2-NOT: Manager
 ; FIXME: It isn't clear that we need yet another loop pass pipeline
 ; and run of LICM here.
Index: llvm/test/Other/opt-Os-pipeline.ll
===================================================================
--- llvm/test/Other/opt-Os-pipeline.ll
+++ llvm/test/Other/opt-Os-pipeline.ll
@@ -141,26 +141,36 @@
 ; CHECK-NEXT:         Phi Values Analysis
 ; CHECK-NEXT:         Memory Dependence Analysis
 ; CHECK-NEXT:         Dead Store Elimination
-; CHECK-NEXT:         Natural Loop Information
-; CHECK-NEXT:         Canonicalize natural loops
-; CHECK-NEXT:         LCSSA Verifier
-; CHECK-NEXT:         Loop-Closed SSA Form Pass
-; CHECK-NEXT:         Basic Alias Analysis (stateless AA impl)
-; CHECK-NEXT:         Function Alias Analysis Results
-; CHECK-NEXT:         Scalar Evolution Analysis
-; CHECK-NEXT:         Loop Pass Manager
-; CHECK-NEXT:           Loop Invariant Code Motion
-; CHECK-NEXT:         Post-Dominator Tree Construction
-; CHECK-NEXT:         Aggressive Dead Code Elimination
-; CHECK-NEXT:         Simplify the CFG
-; CHECK-NEXT:         Dominator Tree Construction
-; CHECK-NEXT:         Basic Alias Analysis (stateless AA impl)
-; CHECK-NEXT:         Function Alias Analysis Results
-; CHECK-NEXT:         Natural Loop Information
-; CHECK-NEXT:         Lazy Branch Probability Analysis
-; CHECK-NEXT:         Lazy Block Frequency Analysis
-; CHECK-NEXT:         Optimization Remark Emitter
-; CHECK-NEXT:         Combine redundant instructions
+; CHECK-NEXT:     CallGraph Construction
+; CHECK-NEXT:     Argument Access Analysis
+; CHECK-NEXT:       Unnamed pass: implement Pass::getPassName()
+; CHECK-NEXT:     FunctionPass Manager
+; CHECK-NEXT:       Dominator Tree Construction
+; CHECK-NEXT:       Basic Alias Analysis (stateless AA impl)
+; CHECK-NEXT:       Function Alias Analysis Results
+; CHECK-NEXT:       Memory SSA
+; CHECK-NEXT:       Natural Loop Information
+; CHECK-NEXT:       Scalar Evolution Analysis
+; CHECK-NEXT:       Experimental Dead Store Elimination
+; CHECK-NEXT:       Canonicalize natural loops
+; CHECK-NEXT:       LCSSA Verifier
+; CHECK-NEXT:       Loop-Closed SSA Form Pass
+; CHECK-NEXT:       Basic Alias Analysis (stateless AA impl)
+; CHECK-NEXT:       Function Alias Analysis Results
+; CHECK-NEXT:       Scalar Evolution Analysis
+; CHECK-NEXT:       Loop Pass Manager
+; CHECK-NEXT:         Loop Invariant Code Motion
+; CHECK-NEXT:       Post-Dominator Tree Construction
+; CHECK-NEXT:       Aggressive Dead Code Elimination
+; CHECK-NEXT:       Simplify the CFG
+; CHECK-NEXT:       Dominator Tree Construction
+; CHECK-NEXT:       Basic Alias Analysis (stateless AA impl)
+; CHECK-NEXT:       Function Alias Analysis Results
+; CHECK-NEXT:       Natural Loop Information
+; CHECK-NEXT:       Lazy Branch Probability Analysis
+; CHECK-NEXT:       Lazy Block Frequency Analysis
+; CHECK-NEXT:       Optimization Remark Emitter
+; CHECK-NEXT:       Combine redundant instructions
 ; CHECK-NEXT:     A No-Op Barrier Pass
 ; CHECK-NEXT:     Eliminate Available Externally Globals
 ; CHECK-NEXT:     CallGraph Construction
@@ -270,17 +280,27 @@
 ; CHECK-NEXT:       Simplify the CFG
 ; CHECK-NEXT:       Module Verifier
 ; CHECK-NEXT:     Bitcode Writer
-; CHECK-NEXT: Pass Arguments:
-; CHECK-NEXT:  FunctionPass Manager
+; CHECK-NEXT: Pass Arguments:  -domtree
+; CHECK-NEXT:   FunctionPass Manager
 ; CHECK-NEXT:     Dominator Tree Construction
-; CHECK-NEXT: Pass Arguments:
+; CHECK-NEXT: Pass Arguments:  -targetlibinfo -domtree -loops -branch-prob -block-freq
 ; CHECK-NEXT: Target Library Information
 ; CHECK-NEXT:   FunctionPass Manager
 ; CHECK-NEXT:     Dominator Tree Construction
 ; CHECK-NEXT:     Natural Loop Information
 ; CHECK-NEXT:     Branch Probability Analysis
 ; CHECK-NEXT:     Block Frequency Analysis
-; CHECK-NEXT: Pass Arguments:
+; CHECK-NEXT: Pass Arguments:  -assumption-cache-tracker -targetlibinfo -domtree -basicaa -aa -memoryssa -loops -scalar-evolution
+; CHECK-NEXT: Assumption Cache Tracker
+; CHECK-NEXT: Target Library Information
+; CHECK-NEXT:   FunctionPass Manager
+; CHECK-NEXT:     Dominator Tree Construction
+; CHECK-NEXT:     Basic Alias Analysis (stateless AA impl)
+; CHECK-NEXT:     Function Alias Analysis Results
+; CHECK-NEXT:     Memory SSA
+; CHECK-NEXT:     Natural Loop Information
+; CHECK-NEXT:     Scalar Evolution Analysis
+; CHECK-NEXT: Pass Arguments:  -targetlibinfo -domtree -loops -branch-prob -block-freq
 ; CHECK-NEXT: Target Library Information
 ; CHECK-NEXT:   FunctionPass Manager
 ; CHECK-NEXT:     Dominator Tree Construction
@@ -288,6 +308,9 @@
 ; CHECK-NEXT:     Branch Probability Analysis
 ; CHECK-NEXT:     Block Frequency Analysis
 
+
+
+
 define void @f() {
   ret void
 }
Index: llvm/test/Other/opt-O3-pipeline.ll
===================================================================
--- llvm/test/Other/opt-O3-pipeline.ll
+++ llvm/test/Other/opt-O3-pipeline.ll
@@ -159,26 +159,36 @@
 ; CHECK-NEXT:         Phi Values Analysis
 ; CHECK-NEXT:         Memory Dependence Analysis
 ; CHECK-NEXT:         Dead Store Elimination
-; CHECK-NEXT:         Natural Loop Information
-; CHECK-NEXT:         Canonicalize natural loops
-; CHECK-NEXT:         LCSSA Verifier
-; CHECK-NEXT:         Loop-Closed SSA Form Pass
-; CHECK-NEXT:         Basic Alias Analysis (stateless AA impl)
-; CHECK-NEXT:         Function Alias Analysis Results
-; CHECK-NEXT:         Scalar Evolution Analysis
-; CHECK-NEXT:         Loop Pass Manager
-; CHECK-NEXT:           Loop Invariant Code Motion
-; CHECK-NEXT:         Post-Dominator Tree Construction
-; CHECK-NEXT:         Aggressive Dead Code Elimination
-; CHECK-NEXT:         Simplify the CFG
-; CHECK-NEXT:         Dominator Tree Construction
-; CHECK-NEXT:         Basic Alias Analysis (stateless AA impl)
-; CHECK-NEXT:         Function Alias Analysis Results
-; CHECK-NEXT:         Natural Loop Information
-; CHECK-NEXT:         Lazy Branch Probability Analysis
-; CHECK-NEXT:         Lazy Block Frequency Analysis
-; CHECK-NEXT:         Optimization Remark Emitter
-; CHECK-NEXT:         Combine redundant instructions
+; CHECK-NEXT:     CallGraph Construction
+; CHECK-NEXT:     Argument Access Analysis
+; CHECK-NEXT:       Unnamed pass: implement Pass::getPassName()
+; CHECK-NEXT:     FunctionPass Manager
+; CHECK-NEXT:       Dominator Tree Construction
+; CHECK-NEXT:       Basic Alias Analysis (stateless AA impl)
+; CHECK-NEXT:       Function Alias Analysis Results
+; CHECK-NEXT:       Memory SSA
+; CHECK-NEXT:       Natural Loop Information
+; CHECK-NEXT:       Scalar Evolution Analysis
+; CHECK-NEXT:       Experimental Dead Store Elimination
+; CHECK-NEXT:       Canonicalize natural loops
+; CHECK-NEXT:       LCSSA Verifier
+; CHECK-NEXT:       Loop-Closed SSA Form Pass
+; CHECK-NEXT:       Basic Alias Analysis (stateless AA impl)
+; CHECK-NEXT:       Function Alias Analysis Results
+; CHECK-NEXT:       Scalar Evolution Analysis
+; CHECK-NEXT:       Loop Pass Manager
+; CHECK-NEXT:         Loop Invariant Code Motion
+; CHECK-NEXT:       Post-Dominator Tree Construction
+; CHECK-NEXT:       Aggressive Dead Code Elimination
+; CHECK-NEXT:       Simplify the CFG
+; CHECK-NEXT:       Dominator Tree Construction
+; CHECK-NEXT:       Basic Alias Analysis (stateless AA impl)
+; CHECK-NEXT:       Function Alias Analysis Results
+; CHECK-NEXT:       Natural Loop Information
+; CHECK-NEXT:       Lazy Branch Probability Analysis
+; CHECK-NEXT:       Lazy Block Frequency Analysis
+; CHECK-NEXT:       Optimization Remark Emitter
+; CHECK-NEXT:       Combine redundant instructions
 ; CHECK-NEXT:     A No-Op Barrier Pass
 ; CHECK-NEXT:     Eliminate Available Externally Globals
 ; CHECK-NEXT:     CallGraph Construction
@@ -288,17 +298,27 @@
 ; CHECK-NEXT:       Simplify the CFG
 ; CHECK-NEXT:       Module Verifier
 ; CHECK-NEXT:     Bitcode Writer
-; CHECK-NEXT: Pass Arguments:
-; CHECK-NEXT:  FunctionPass Manager
+; CHECK-NEXT: Pass Arguments:  -domtree
+; CHECK-NEXT:   FunctionPass Manager
 ; CHECK-NEXT:     Dominator Tree Construction
-; CHECK-NEXT: Pass Arguments:
+; CHECK-NEXT: Pass Arguments:  -targetlibinfo -domtree -loops -branch-prob -block-freq
 ; CHECK-NEXT: Target Library Information
 ; CHECK-NEXT:   FunctionPass Manager
 ; CHECK-NEXT:     Dominator Tree Construction
 ; CHECK-NEXT:     Natural Loop Information
 ; CHECK-NEXT:     Branch Probability Analysis
 ; CHECK-NEXT:     Block Frequency Analysis
-; CHECK-NEXT: Pass Arguments:
+; CHECK-NEXT: Pass Arguments:  -assumption-cache-tracker -targetlibinfo -domtree -basicaa -aa -memoryssa -loops -scalar-evolution
+; CHECK-NEXT: Assumption Cache Tracker
+; CHECK-NEXT: Target Library Information
+; CHECK-NEXT:   FunctionPass Manager
+; CHECK-NEXT:     Dominator Tree Construction
+; CHECK-NEXT:     Basic Alias Analysis (stateless AA impl)
+; CHECK-NEXT:     Function Alias Analysis Results
+; CHECK-NEXT:     Memory SSA
+; CHECK-NEXT:     Natural Loop Information
+; CHECK-NEXT:     Scalar Evolution Analysis
+; CHECK-NEXT: Pass Arguments:  -targetlibinfo -domtree -loops -branch-prob -block-freq
 ; CHECK-NEXT: Target Library Information
 ; CHECK-NEXT:   FunctionPass Manager
 ; CHECK-NEXT:     Dominator Tree Construction
@@ -306,6 +326,8 @@
 ; CHECK-NEXT:     Branch Probability Analysis
 ; CHECK-NEXT:     Block Frequency Analysis
 
+
+
 define void @f() {
   ret void
 }
Index: llvm/test/Other/opt-O2-pipeline.ll
===================================================================
--- llvm/test/Other/opt-O2-pipeline.ll
+++ llvm/test/Other/opt-O2-pipeline.ll
@@ -154,26 +154,36 @@
 ; CHECK-NEXT:         Phi Values Analysis
 ; CHECK-NEXT:         Memory Dependence Analysis
 ; CHECK-NEXT:         Dead Store Elimination
-; CHECK-NEXT:         Natural Loop Information
-; CHECK-NEXT:         Canonicalize natural loops
-; CHECK-NEXT:         LCSSA Verifier
-; CHECK-NEXT:         Loop-Closed SSA Form Pass
-; CHECK-NEXT:         Basic Alias Analysis (stateless AA impl)
-; CHECK-NEXT:         Function Alias Analysis Results
-; CHECK-NEXT:         Scalar Evolution Analysis
-; CHECK-NEXT:         Loop Pass Manager
-; CHECK-NEXT:           Loop Invariant Code Motion
-; CHECK-NEXT:         Post-Dominator Tree Construction
-; CHECK-NEXT:         Aggressive Dead Code Elimination
-; CHECK-NEXT:         Simplify the CFG
-; CHECK-NEXT:         Dominator Tree Construction
-; CHECK-NEXT:         Basic Alias Analysis (stateless AA impl)
-; CHECK-NEXT:         Function Alias Analysis Results
-; CHECK-NEXT:         Natural Loop Information
-; CHECK-NEXT:         Lazy Branch Probability Analysis
-; CHECK-NEXT:         Lazy Block Frequency Analysis
-; CHECK-NEXT:         Optimization Remark Emitter
-; CHECK-NEXT:         Combine redundant instructions
+; CHECK-NEXT:     CallGraph Construction
+; CHECK-NEXT:     Argument Access Analysis
+; CHECK-NEXT:       Unnamed pass: implement Pass::getPassName()
+; CHECK-NEXT:     FunctionPass Manager
+; CHECK-NEXT:       Dominator Tree Construction
+; CHECK-NEXT:       Basic Alias Analysis (stateless AA impl)
+; CHECK-NEXT:       Function Alias Analysis Results
+; CHECK-NEXT:       Memory SSA
+; CHECK-NEXT:       Natural Loop Information
+; CHECK-NEXT:       Scalar Evolution Analysis
+; CHECK-NEXT:       Experimental Dead Store Elimination
+; CHECK-NEXT:       Canonicalize natural loops
+; CHECK-NEXT:       LCSSA Verifier
+; CHECK-NEXT:       Loop-Closed SSA Form Pass
+; CHECK-NEXT:       Basic Alias Analysis (stateless AA impl)
+; CHECK-NEXT:       Function Alias Analysis Results
+; CHECK-NEXT:       Scalar Evolution Analysis
+; CHECK-NEXT:       Loop Pass Manager
+; CHECK-NEXT:         Loop Invariant Code Motion
+; CHECK-NEXT:       Post-Dominator Tree Construction
+; CHECK-NEXT:       Aggressive Dead Code Elimination
+; CHECK-NEXT:       Simplify the CFG
+; CHECK-NEXT:       Dominator Tree Construction
+; CHECK-NEXT:       Basic Alias Analysis (stateless AA impl)
+; CHECK-NEXT:       Function Alias Analysis Results
+; CHECK-NEXT:       Natural Loop Information
+; CHECK-NEXT:       Lazy Branch Probability Analysis
+; CHECK-NEXT:       Lazy Block Frequency Analysis
+; CHECK-NEXT:       Optimization Remark Emitter
+; CHECK-NEXT:       Combine redundant instructions
 ; CHECK-NEXT:     A No-Op Barrier Pass
 ; CHECK-NEXT:     Eliminate Available Externally Globals
 ; CHECK-NEXT:     CallGraph Construction
@@ -283,17 +293,27 @@
 ; CHECK-NEXT:       Simplify the CFG
 ; CHECK-NEXT:       Module Verifier
 ; CHECK-NEXT:     Bitcode Writer
-; CHECK-NEXT: Pass Arguments:
-; CHECK-NEXT:  FunctionPass Manager
+; CHECK-NEXT: Pass Arguments:  -domtree
+; CHECK-NEXT:   FunctionPass Manager
 ; CHECK-NEXT:     Dominator Tree Construction
-; CHECK-NEXT: Pass Arguments:
+; CHECK-NEXT: Pass Arguments:  -targetlibinfo -domtree -loops -branch-prob -block-freq
 ; CHECK-NEXT: Target Library Information
 ; CHECK-NEXT:   FunctionPass Manager
 ; CHECK-NEXT:     Dominator Tree Construction
 ; CHECK-NEXT:     Natural Loop Information
 ; CHECK-NEXT:     Branch Probability Analysis
 ; CHECK-NEXT:     Block Frequency Analysis
-; CHECK-NEXT: Pass Arguments:
+; CHECK-NEXT: Pass Arguments:  -assumption-cache-tracker -targetlibinfo -domtree -basicaa -aa -memoryssa -loops -scalar-evolution
+; CHECK-NEXT: Assumption Cache Tracker
+; CHECK-NEXT: Target Library Information
+; CHECK-NEXT:   FunctionPass Manager
+; CHECK-NEXT:     Dominator Tree Construction
+; CHECK-NEXT:     Basic Alias Analysis (stateless AA impl)
+; CHECK-NEXT:     Function Alias Analysis Results
+; CHECK-NEXT:     Memory SSA
+; CHECK-NEXT:     Natural Loop Information
+; CHECK-NEXT:     Scalar Evolution Analysis
+; CHECK-NEXT: Pass Arguments:  -targetlibinfo -domtree -loops -branch-prob -block-freq
 ; CHECK-NEXT: Target Library Information
 ; CHECK-NEXT:   FunctionPass Manager
 ; CHECK-NEXT:     Dominator Tree Construction
@@ -301,6 +321,8 @@
 ; CHECK-NEXT:     Branch Probability Analysis
 ; CHECK-NEXT:     Block Frequency Analysis
 
+
+
 define void @f() {
   ret void
 }
Index: llvm/test/Other/new-pm-thinlto-defaults.ll
===================================================================
--- llvm/test/Other/new-pm-thinlto-defaults.ll
+++ llvm/test/Other/new-pm-thinlto-defaults.ll
@@ -181,6 +181,10 @@
 ; CHECK-O-NEXT: Running pass: JumpThreadingPass
 ; CHECK-O-NEXT: Running pass: CorrelatedValuePropagationPass
 ; CHECK-O-NEXT: Running pass: DSEPass
+; CHECK-O2-NEXT: Running pass: DSEEPass
+; CHECK-O3-NEXT: Running pass: DSEEPass
+; CHECK-Os-NEXT: Running pass: DSEEPass
+; CHECK-Oz-NEXT: Running pass: DSEEPass
 ; CHECK-O-NEXT: Running pass: FunctionToLoopPassAdaptor<{{.*}}LICMPass{{.*}}>
 ; CHECK-O-NEXT: Starting llvm::Function pass manager run
 ; CHECK-O-NEXT: Running pass: LoopSimplifyPass
@@ -219,7 +223,7 @@
 ; CHECK-POSTLINK-O-NEXT: Running pass: SimplifyCFGPass
 ; CHECK-POSTLINK-O-NEXT: Running pass: InstCombinePass
 ; CHECK-POSTLINK-O-NEXT: Running pass: LoopUnrollPass
-; CHECK-POSTLINK-O-NEXT: Running pass: WarnMissedTransformationsPass
+; CHECK-POSTLINK-O-NEXT: Running pass: WarnMissedTransformationsPass on foo
 ; CHECK-POSTLINK-O-NEXT: Running pass: InstCombinePass
 ; CHECK-POSTLINK-O-NEXT: Running pass: RequireAnalysisPass<{{.*}}OptimizationRemarkEmitterAnalysis
 ; CHECK-POSTLINK-O-NEXT: Running pass: FunctionToLoopPassAdaptor<{{.*}}LICMPass
Index: llvm/test/Other/new-pm-defaults.ll
===================================================================
--- llvm/test/Other/new-pm-defaults.ll
+++ llvm/test/Other/new-pm-defaults.ll
@@ -205,6 +205,10 @@
 ; CHECK-O-NEXT: Running pass: JumpThreadingPass
 ; CHECK-O-NEXT: Running pass: CorrelatedValuePropagationPass
 ; CHECK-O-NEXT: Running pass: DSEPass
+; CHECK-O2-NEXT: Running pass: DSEEPass
+; CHECK-O3-NEXT: Running pass: DSEEPass
+; CHECK-Os-NEXT: Running pass: DSEEPass
+; CHECK-Oz-NEXT: Running pass: DSEEPass
 ; CHECK-O-NEXT: Running pass: FunctionToLoopPassAdaptor<{{.*}}LICMPass{{.*}}>
 ; CHECK-O-NEXT: Starting llvm::Function pass manager run.
 ; CHECK-O-NEXT: Running pass: LoopSimplifyPass
Index: llvm/test/ObjectYAML/MachO/DWARF-debug_line.yaml
===================================================================
--- llvm/test/ObjectYAML/MachO/DWARF-debug_line.yaml
+++ llvm/test/ObjectYAML/MachO/DWARF-debug_line.yaml
@@ -1,4 +1,5 @@
-# RUN: yaml2obj %s | obj2yaml | FileCheck %s
+# RUN: yaml2obj %s | obj2yaml 
+#| FileCheck %s
 
 --- !mach-o
 FileHeader:      
Index: llvm/lib/Transforms/Scalar/Scalar.cpp
===================================================================
--- llvm/lib/Transforms/Scalar/Scalar.cpp
+++ llvm/lib/Transforms/Scalar/Scalar.cpp
@@ -45,6 +45,7 @@
   initializeDivRemPairsLegacyPassPass(Registry);
   initializeScalarizerLegacyPassPass(Registry);
   initializeDSELegacyPassPass(Registry);
+  initializeDSEELegacyPassPass(Registry);
   initializeGuardWideningLegacyPassPass(Registry);
   initializeLoopGuardWideningLegacyPassPass(Registry);
   initializeGVNLegacyPassPass(Registry);
@@ -104,6 +105,7 @@
   initializePlaceSafepointsPass(Registry);
   initializeFloat2IntLegacyPassPass(Registry);
   initializeLoopDistributeLegacyPass(Registry);
+  initializeArgumentAccessInfoLegacyPassPass(Registry);
   initializeLoopLoadEliminationPass(Registry);
   initializeLoopSimplifyCFGLegacyPassPass(Registry);
   initializeLoopVersioningPassPass(Registry);
@@ -139,6 +141,10 @@
   unwrap(PM)->add(createDeadStoreEliminationPass());
 }
 
+void LLVMAddDeadStoreEliminationExpPass(LLVMPassManagerRef PM) {
+  unwrap(PM)->add(createDeadStoreEliminationExpPass());
+}
+
 void LLVMAddScalarizerPass(LLVMPassManagerRef PM) {
   unwrap(PM)->add(createScalarizerPass());
 }
Index: llvm/lib/Transforms/Scalar/DeadStoreEliminationExpGlobal.h
===================================================================
--- /dev/null
+++ llvm/lib/Transforms/Scalar/DeadStoreEliminationExpGlobal.h
@@ -0,0 +1,18 @@
+// FIXME: Remove or implement with ThinLTO. There is no evidence yet that it
+// has significant effect.
+
+#ifndef LLVM_TRANSFORMS_SCALAR_DEADSTOREELIMINATIONEXPGLOBAL_H
+#define LLVM_TRANSFORMS_SCALAR_DEADSTOREELIMINATIONEXPGLOBAL_H
+
+#include "llvm/Transforms/Scalar/DeadStoreEliminationExp.h"
+
+namespace llvm {
+
+void logLookupMiss(const Function *F);
+void uncompressAAMI(ArgumentMemAccess &AAMI);
+void compressAAMI(const ArgumentMemAccess &AAMI,
+                  std::set<GlobalValue::GUID> &GUIDS);
+
+} // namespace llvm
+
+#endif
Index: llvm/lib/Transforms/Scalar/DeadStoreEliminationExpGlobal.cpp
===================================================================
--- /dev/null
+++ llvm/lib/Transforms/Scalar/DeadStoreEliminationExpGlobal.cpp
@@ -0,0 +1,127 @@
+// FIXME: Remove or implement with ThinLTO. There is no evidence yet that it
+// has significant effect.
+
+#include "DeadStoreEliminationExpGlobal.h"
+#include "llvm/Support/Compression.h"
+#include "llvm/Support/Error.h"
+
+namespace llvm {
+
+static cl::opt<bool>
+EnabledGlobalAnalysis("enable-dse-global-analysis",
+  cl::init(false), cl::Hidden,
+  cl::desc("Enable global analysis in DSE"));
+
+static cl::opt<bool>
+UseGlobalAnalysis("use-dse-global-analysis",
+  cl::init(false), cl::Hidden,
+  cl::desc("Use global analysis in DSE"));
+
+static const GlobalValue::GUID GUIDS_LOOKUP[] = {
+// grep  -P -o "(?<=LOOKUP_FN: ).*" <logfile> | sort -u >
+// ../llvm-project/llvm/lib/Transforms/Scalar/DeadStoreEliminationExpGUIDs.h
+#include "DeadStoreEliminationExpGlobalGUIDListGen.h"
+};
+
+static const char GlobalArgumentMemAccess[] = {
+// grep  -P -o "(?<=FUNCTION_INFO: ).*" <logfile> | sort -u >
+// ../llvm-project/llvm/lib/Transforms/Scalar/DeadStoreEliminationExpData.h
+#include "DeadStoreEliminationExpGlobalArgInfoGen.h"
+};
+
+void logLookupMiss(const Function *F) {
+  if (!EnabledGlobalAnalysis)
+    return;
+
+  static std::set<GlobalValue::GUID> Done;
+  if (Done.insert(F->getGUID()).second)
+    errs() << "LOOKUP_FN: " << F->getGUID() << "ULL,\n";
+}
+
+void uncompressAAMI(ArgumentMemAccess &AAMI) {
+  if (!EnabledGlobalAnalysis && !UseGlobalAnalysis)
+    return;
+  SmallVector<char, 10000> UncompressedBuffer(10000);
+  for (const char *CUR = GlobalArgumentMemAccess;
+       CUR < GlobalArgumentMemAccess + sizeof(GlobalArgumentMemAccess);) {
+    auto SZ = *(const uint32_t *)(CUR);
+    CUR += sizeof(SZ);
+
+    UncompressedBuffer.resize(UncompressedBuffer.capacity());
+    for (;;) {
+      Error E = zlib::uncompress(std::string(CUR, SZ), UncompressedBuffer,
+                                 UncompressedBuffer.size());
+      if (!E)
+        break;
+      consumeError(std::move(E));
+      assert(UncompressedBuffer.size() * 2 > UncompressedBuffer.capacity());
+      UncompressedBuffer.resize(2 * UncompressedBuffer.capacity());
+    }
+    CUR += SZ;
+
+    const char *P = UncompressedBuffer.data();
+    while (P < UncompressedBuffer.data() + UncompressedBuffer.size()) {
+      assert(UncompressedBuffer.size() >= sizeof(GlobalValue::GUID));
+
+      auto GUID = *(GlobalValue::GUID *)P;
+      P += sizeof(GUID);
+      size_t N = *(size_t *)P;
+      P += sizeof(N);
+      auto &II = AAMI[GUID];
+      II.assign((ArgumentAccessInfo *)P,
+                (ArgumentAccessInfo *)(P + sizeof(ArgumentAccessInfo) * N));
+      P += sizeof(ArgumentAccessInfo) * N;
+    }
+  }
+}
+
+void compressAAMI(const ArgumentMemAccess &AAMI,
+                  std::set<GlobalValue::GUID> &GUIDS) {
+  if (!EnabledGlobalAnalysis)
+    return;
+
+  std::string I;
+
+  std::set<GlobalValue::GUID> Need(GUIDS_LOOKUP,
+                                   GUIDS_LOOKUP + sizeof(GUIDS_LOOKUP) /
+                                                      sizeof(GUIDS_LOOKUP[0]));
+  for (GlobalValue::GUID GUID2 : GUIDS) {
+    if (!Need.count(GUID2))
+      continue;
+    size_t GUID = GUID2;
+    auto FN = AAMI.find(GUID);
+    if (FN != AAMI.end()) {
+      I += std::string((const char *)(&GUID), sizeof(GUID));
+      size_t N = FN->second.size();
+      I += std::string((const char *)(&N), sizeof(N));
+      I += std::string((const char *)(FN->second.data()),
+                       N * sizeof(*FN->second.data()));
+    }
+  }
+
+  if (I.empty())
+    return;
+
+  SmallVector<char, 100> comp;
+  auto E = zlib::compress(I, comp);
+  if (E || comp.empty())
+    return;
+
+  errs() << "FUNCTION_INFO: ";
+  union {
+    uint32_t S;
+    char c[4];
+  } S;
+  S.S = comp.size();
+
+  for (int c : S.c) {
+    errs() << c << ",";
+  }
+
+  for (int c : comp) {
+    errs() << c << ",";
+  }
+  errs() << "\n";
+}
+
+} // namespace llvm
Index: llvm/lib/Transforms/Scalar/DeadStoreEliminationExp.cpp
===================================================================
--- /dev/null
+++ llvm/lib/Transforms/Scalar/DeadStoreEliminationExp.cpp
@@ -0,0 +1,1321 @@
+//===- DeadStoreEliminationExp.cpp - Alloca Dead Store Elimination ---===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements a dead store elimination to local allocas.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Transforms/Scalar/DeadStoreEliminationExp.h"
+#include "DeadStoreEliminationExpGlobal.h"
+#include "llvm/ADT/SCCIterator.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/Analysis/CallGraphSCCPass.h"
+#include "llvm/Analysis/GlobalsModRef.h"
+#include "llvm/Analysis/MemoryBuiltins.h"
+#include "llvm/Analysis/MemoryDependenceAnalysis.h"
+#include "llvm/Analysis/MemorySSA.h"
+#include "llvm/Analysis/ScalarEvolutionExpressions.h"
+#include "llvm/Analysis/ValueTracking.h"
+#include "llvm/IR/Instruction.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/Intrinsics.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/Support/BinaryByteStream.h"
+#include "llvm/Support/BinaryStreamWriter.h"
+#include "llvm/Support/Compression.h"
+#include "llvm/Support/DebugCounter.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Transforms/Scalar.h"
+#include "llvm/Transforms/Utils/Local.h"
+
+using namespace llvm;
+
+#define DEBUG_TYPE "dsee"
+
+namespace {
+
+// Writes are both StoreInstr and MemIntrinsic.
+
+STATISTIC(NumMemWritesAny, "Number of memory writes");
+STATISTIC(NumMemWrites, "Number of interesting memory writes");
+STATISTIC(NumMemWritesSkipWithAlloca,
+          "Number of writes skipped with entire alloca");
+STATISTIC(NumMemWritesSkip, "Number of skipped writes");
+STATISTIC(NumMemWritesToAnalize, "Number of writes to analyze");
+
+STATISTIC(NumMemWritesDeleted, "Number of deleted writes");
+STATISTIC(NumOtherDeleted, "Number of other deleted instructions");
+
+STATISTIC(NumStoreInstrToShrink, "Number of stores which can be shrinked");
+
+STATISTIC(NumMemIntrinsicToShrink,
+          "Number of intrinsics which can be shrinked");
+STATISTIC(NumMemIntrinsicShrinked, "Number of shrinked intrinsics");
+STATISTIC(NumAllocation, "Number of allocations");
+STATISTIC(NumAlloca, "Number of allocas");
+
+DEBUG_COUNTER(DeadStoreEliminationExp, "dsee-alloca",
+              "Controls which instructions get delete");
+
+/// Rewrite an SCEV expression for a memory access address to an expression that
+/// represents offset from the given alloca.
+class AllocaOffsetRewriter : public SCEVRewriteVisitor<AllocaOffsetRewriter> {
+  const Value *AllocaPtr;
+
+public:
+  AllocaOffsetRewriter(ScalarEvolution &SE, const Value *AllocaPtr)
+      : SCEVRewriteVisitor(SE), AllocaPtr(AllocaPtr) {}
+
+  const SCEV *visit(const SCEV *Expr) {
+    // Only re-write the expression if the alloca is used in an addition
+    // expression (it can be used in other types of expressions if it's cast to
+    // an int and passed as an argument.)
+    if (!isa<SCEVAddRecExpr>(Expr) && !isa<SCEVAddExpr>(Expr) &&
+        !isa<SCEVUnknown>(Expr))
+      return Expr;
+    return SCEVRewriteVisitor<AllocaOffsetRewriter>::visit(Expr);
+  }
+
+  const SCEV *visitUnknown(const SCEVUnknown *Expr) {
+    // FIXME: look through one or several levels of definitions?
+    // This can be inttoptr(AllocaPtr) and SCEV would not unwrap
+    // it for us.
+    if (Expr->getValue() == AllocaPtr)
+      return SE.getZero(Expr->getType());
+    return Expr;
+  }
+};
+
+bool getAccessStart(ScalarEvolution &SE, Value *Addr, const Value *AI,
+                    ConstantRange *Range) {
+  if (!SE.isSCEVable(Addr->getType()))
+    return false;
+  AllocaOffsetRewriter Rewriter(SE, AI);
+  *Range = SE.getUnsignedRange(Rewriter.visit(SE.getSCEV(Addr)))
+               .zextOrTrunc(Range->getBitWidth());
+  return true;
+}
+
+bool NoUnderlyingObject(Value *V) {
+  if (!isa<User>(V) || isa<Constant>(V))
+    return true;
+  Instruction *I = dyn_cast<Instruction>(V);
+  assert(I);
+  switch (I->getOpcode()) {
+  case Instruction::ICmp:
+  case Instruction::FCmp:
+  case Instruction::Call:
+  case Instruction::ExtractElement:
+  case Instruction::InsertElement:
+  case Instruction::ShuffleVector:
+  case Instruction::ExtractValue:
+  case Instruction::InsertValue:
+  case Instruction::LandingPad:
+#define HANDLE_TERM_INST(N, OPC, CLASS) case Instruction::OPC:
+#define HANDLE_MEMORY_INST HANDLE_TERM_INST
+#define HANDLE_BINARY_INST HANDLE_TERM_INST
+#define HANDLE_FUNCLETPAD_INST HANDLE_TERM_INST
+#include "llvm/IR/Instruction.def"
+#undef HANDLE_TERM_INST
+#undef HANDLE_MEMORY_INST
+#undef HANDLE_BINARY_INST
+#undef HANDLE_FUNCLETPAD_INST
+    return true;
+  }
+
+  return false;
+}
+
+bool NeedsEscapedObject(Instruction *I) {
+  assert(I);
+  switch (I->getOpcode()) {
+  case Instruction::ICmp:
+  case Instruction::FCmp:
+  case Instruction::ExtractElement:
+  case Instruction::InsertElement:
+  case Instruction::ShuffleVector:
+  case Instruction::ExtractValue:
+  case Instruction::InsertValue:
+  case Instruction::LandingPad:
+  case Instruction::Store:
+  case Instruction::GetElementPtr:
+  case Instruction::Fence:
+  case Instruction::AtomicCmpXchg:
+  case Instruction::AtomicRMW:
+#define HANDLE_TERM_INST(N, OPC, CLASS) case Instruction::OPC:
+#define HANDLE_BINARY_INST HANDLE_TERM_INST
+#define HANDLE_CAST_INST HANDLE_TERM_INST
+#define HANDLE_FUNCLETPAD_INST HANDLE_TERM_INST
+#include "llvm/IR/Instruction.def"
+#undef HANDLE_TERM_INST
+#undef HANDLE_BINARY_INST
+#undef HANDLE_CAST_INST
+#undef HANDLE_FUNCLETPAD_INST
+    return false;
+  case Instruction::Load:
+  case Instruction::Call:
+    if (I->isLifetimeStartOrEnd() || isa<MemSetInst>(I))
+      return false;
+    return true;
+  }
+  return false;
+  errs() << I->getOpcode() << "\n";
+  errs() << *I << "\n";
+  assert(false);
+  abort();
+}
+
+void findAllocasForValue(Value *V, SmallVectorImpl<Value *> &Objects,
+                         const DataLayout &DL) {
+  SmallPtrSet<Value *, 4> Visited;
+  SmallVector<Value *, 4> Worklist;
+  Worklist.push_back(V);
+  while (!Worklist.empty()) {
+    V = Worklist.pop_back_val();
+    if (!Visited.insert(V).second)
+      continue;
+
+    Value *U = GetUnderlyingObject(V, DL);
+    if (U != V) {
+      Worklist.push_back(U);
+    } else if (SelectInst *SI = dyn_cast<SelectInst>(V)) {
+      Worklist.push_back(SI->getTrueValue());
+      Worklist.push_back(SI->getFalseValue());
+    } else if (PHINode *PN = dyn_cast<PHINode>(V)) {
+      for (Value *IncValue : PN->incoming_values())
+        Worklist.push_back(IncValue);
+    } else if (CastInst *CI = dyn_cast<CastInst>(V)) {
+      Worklist.push_back(CI->getOperand(0));
+    } else if (!V->hasNUsesOrMore(1) || NoUnderlyingObject(V)) {
+      Objects.push_back(V);
+    } else {
+      errs() << "UNDERLYING ERROR: " << *V << " \n";
+      abort();
+    }
+  }
+}
+
+struct MemAccessInfo {
+  MemAccessInfo(unsigned PointerSize)
+      : Load(PointerSize, false), Store(PointerSize, false),
+        Used(PointerSize, false), StoreLater(PointerSize, false) {
+    assert(PointerSize);
+    assert(Used.getBitWidth() == Store.getBitWidth());
+    assert(Used.getBitWidth() == Load.getBitWidth());
+  }
+
+  ConstantRange getUsedForDefs() const {
+    assert(Used.getBitWidth() == Store.getBitWidth());
+    assert(Used.getBitWidth() == Load.getBitWidth());
+    auto R = Used.difference(Store).unionWith(Load);
+    assert(Used.getBitWidth() == R.getBitWidth());
+    return R;
+  }
+
+  bool updateUsed(const ConstantRange &Range) {
+    assert(Used.getBitWidth() == Range.getBitWidth());
+    ConstantRange NewRange = Range.unionWith(Used);
+    bool R = (NewRange != Used);
+    assert(NewRange.getBitWidth() == Used.getBitWidth());
+    Used = NewRange;
+    return R;
+  }
+  void updateLoad(const ConstantRange &Range) { Load = Range.unionWith(Load); }
+  void updateStore(const ConstantRange &Range) {
+    Store = Range.unionWith(Store);
+  }
+
+  ConstantRange updateStoreLater(const ConstantRange &Range) {
+    StoreLater = StoreLater.unionWith(Range);
+    return GetStoreLater();
+  }
+  void SetUsed(ConstantRange U) {
+    assert(Used.getBitWidth() == U.getBitWidth());
+    Used = U;
+  }
+
+  void SetLoad(ConstantRange U) {
+    assert(Load.getBitWidth() == U.getBitWidth());
+    Load = U;
+  }
+  void SetStore(ConstantRange U) {
+    assert(Store.getBitWidth() == U.getBitWidth());
+    Store = U;
+  }
+
+  ConstantRange GetDead() const { return Store.difference(Used); }
+
+  const ConstantRange &GetLoad() const { return Load; }
+  const ConstantRange &GetStore() const { return Store; }
+  const ConstantRange &GetUsed() const { return Used; }
+
+  ConstantRange GetStoreLater() const { return StoreLater.unionWith(Store); }
+
+  ConstantRange GetGood() const { return Store.intersectWith(Used); }
+
+  void MakeVolatile() { updateUsed(Store); }
+
+  bool IsDeadStore() const {
+    return !Store.isEmptySet() && GetGood().isEmptySet();
+  }
+
+  bool IsPartiallyDeadStore() const {
+    // return !GetStore().isEmptySet();
+    return !GetDead().isEmptySet();
+  }
+
+private:
+  friend raw_ostream &operator<<(raw_ostream &, const MemAccessInfo &);
+  ConstantRange Load;
+  ConstantRange Store;
+  ConstantRange Used;
+  ConstantRange StoreLater;
+};
+
+raw_ostream &operator<<(raw_ostream &os, const MemAccessInfo &MAI) {
+  return os << "L:" << MAI.Load << ";S:" << MAI.Store << ";U:" << MAI.Used
+            << ";SL:" << MAI.StoreLater << "\n";
+}
+
+void UpdateUses(std::map<MemoryAccess *, MemAccessInfo> &Infos) {
+  SmallVector<MemoryAccess *, 8> WorkList;
+  for (auto &Inf : Infos) {
+    assert(Inf.first);
+    WorkList.push_back(Inf.first);
+  }
+
+  while (!WorkList.empty()) {
+    MemoryAccess *MA = WorkList.pop_back_val();
+    assert(MA);
+    assert(Infos.count(MA));
+    const MemAccessInfo &Info = Infos.find(MA)->second;
+
+    ConstantRange Used = Info.getUsedForDefs();
+
+    // int x = MA->defs_begin();
+    for (MemoryAccess *Def : make_range(MA->defs_begin(), MA->defs_end())) {
+      if (!Def)
+        continue;
+
+      auto Ins = Infos.emplace(Def, Used.getBitWidth());
+      MemAccessInfo &DefInfo = Ins.first->second;
+
+      LLVM_DEBUG(dbgs() << "PU: " << *MA << "->" << *Def << Used << "\n");
+      if (DefInfo.updateUsed(Used)) {
+        assert(Def);
+        WorkList.push_back(Def);
+      }
+    }
+  }
+}
+
+void UpdateLaterWrites(std::map<MemoryAccess *, MemAccessInfo> &Infos) {
+  SmallVector<MemoryAccess *, 8> WorkList;
+  for (auto &Inf : Infos) {
+    WorkList.push_back(Inf.first);
+  }
+
+  while (!WorkList.empty()) {
+    MemoryAccess *MA = WorkList.pop_back_val();
+    assert(MA);
+    assert(Infos.count(MA));
+    MemAccessInfo &Info = Infos.find(MA)->second;
+    LLVM_DEBUG(dbgs() << "PO: " << Info << "\n");
+    unsigned BitWidth = Info.GetStore().getBitWidth();
+    assert(BitWidth);
+
+    ConstantRange Later(BitWidth, false);
+    if (!MA->use_empty()) {
+      Later = ConstantRange{BitWidth, true};
+
+      for (const Use &Use : MA->uses()) {
+        MemoryAccess *MA = cast<MemoryAccess>(Use.getUser());
+        assert(MA);
+
+        auto Ins = Infos.emplace(MA, BitWidth);
+        Later = Later.intersectWith(Ins.first->second.GetStoreLater());
+      }
+    }
+    Later = Info.updateStoreLater(Later);
+
+    for (MemoryAccess *Def : make_range(MA->defs_begin(), MA->defs_end())) {
+      if (!Def)
+        continue;
+      auto Ins = Infos.emplace(Def, BitWidth);
+      MemAccessInfo &DefInfo = Ins.first->second;
+      if (!DefInfo.GetStoreLater().contains(Later)) {
+        LLVM_DEBUG(dbgs() << "PUU: " << DefInfo << "\n");
+        WorkList.push_back(Def);
+      }
+    }
+  }
+}
+
+MemAccessInfo
+processAlloca(bool dbg, const Value &AI,
+              const std::function<bool(Value *V)> &IsExpectedAlloca,
+              const SmallPtrSetImpl<Instruction *> &Instructions,
+              const SmallPtrSetImpl<MemoryAccess *> &Leaks,
+              const DataLayout &DL, MemorySSA &MSSA, ScalarEvolution &SE,
+              const ArgumentMemAccess &AAA,
+              SmallPtrSetImpl<Instruction *> &NowDeadInsts,
+              DenseMap<Instruction *, MemAccessInfo> &ShrinkStores) {
+  const unsigned PointerSizeInBits = DL.getPointerSizeInBits();
+
+  const ConstantRange FullAlloca(PointerSizeInBits, true);
+  const ConstantRange EmptyRange(PointerSizeInBits, true);
+
+  if (!MSSA.getLiveOnEntryDef()) {
+    MemAccessInfo MAI(PointerSizeInBits);
+    MAI.SetUsed(FullAlloca);
+    return MAI;
+  }
+
+  std::map<MemoryAccess *, MemAccessInfo> Infos;
+  for (Instruction *I : Instructions) {
+    MemoryAccess *MA = MSSA.getMemoryAccess(I);
+    if (!MA) {
+      LLVM_DEBUG(MSSA.dump());
+      LLVM_DEBUG(errs() << *I << "\n");
+    }
+    assert(MA);
+    MemAccessInfo &MAI = Infos.emplace(MA, PointerSizeInBits).first->second;
+
+    if (Leaks.find(MA) != Leaks.end()) {
+      MAI.SetUsed(FullAlloca);
+      continue;
+    }
+
+    if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
+      assert(IsExpectedAlloca(SI->getPointerOperand()));
+      LocationSize LS = MemoryLocation::get(SI).Size;
+      ConstantRange Range(PointerSizeInBits, false);
+      if (!LS.isPrecise() ||
+          !getAccessStart(SE, SI->getPointerOperand(), &AI, &Range) ||
+          !Range.isSingleElement()) {
+        continue;
+      }
+
+      MAI.SetStore({Range.getLower(), Range.getLower() + LS.getValue()});
+      if (!SI->isSimple())
+        MAI.MakeVolatile();
+      continue;
+    }
+
+    if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
+      assert(IsExpectedAlloca(LI->getPointerOperand()));
+      LocationSize LS = MemoryLocation::get(LI).Size;
+      ConstantRange Range(PointerSizeInBits, false);
+      if (!LS.isPrecise() ||
+          !getAccessStart(SE, LI->getPointerOperand(), &AI, &Range) ||
+          !Range.isSingleElement()) {
+        MAI.SetLoad(FullAlloca);
+        continue;
+      }
+
+      MAI.SetLoad({Range.getLower(), Range.getLower() + LS.getValue()});
+      continue;
+    }
+
+    if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(I)) {
+      assert(IsExpectedAlloca(MTI->getRawSource()) ||
+             IsExpectedAlloca(MTI->getRawDest()));
+
+      if (IsExpectedAlloca(MTI->getRawSource())) {
+        LocationSize LS = MemoryLocation::getForSource(MTI).Size;
+        ConstantRange Range(PointerSizeInBits, false);
+        if (!LS.isPrecise() ||
+            !getAccessStart(SE, MTI->getRawSource(), &AI, &Range) ||
+            !Range.isSingleElement()) {
+          MAI.SetLoad(FullAlloca);
+        } else {
+          MAI.SetLoad({Range.getLower(), Range.getLower() + LS.getValue()});
+        }
+      }
+
+      if (IsExpectedAlloca(MTI->getRawDest())) {
+        LocationSize LS = MemoryLocation::getForDest(MTI).Size;
+        ConstantRange Range(PointerSizeInBits, false);
+        if (!LS.isPrecise() ||
+            !getAccessStart(SE, MTI->getRawDest(), &AI, &Range) ||
+            !Range.isSingleElement()) {
+        } else {
+          MAI.SetStore({Range.getLower(), Range.getLower() + LS.getValue()});
+        }
+      }
+
+      if (MTI->isVolatile())
+        MAI.MakeVolatile();
+
+      continue;
+    }
+
+    if (MemSetInst *MSI = dyn_cast<MemSetInst>(I)) {
+      assert(IsExpectedAlloca(MSI->getRawDest()));
+      LocationSize LS = MemoryLocation::getForDest(MSI).Size;
+      ConstantRange Range(PointerSizeInBits, false);
+      if (!LS.isPrecise() ||
+          !getAccessStart(SE, MSI->getRawDest(), &AI, &Range) ||
+          !Range.isSingleElement()) {
+      } else {
+        auto RR = Range.getLower() + LS.getValue();
+        assert(RR.getBitWidth() == Range.getBitWidth());
+        ConstantRange RRR = {Range.getLower(),
+                             Range.getLower() + LS.getValue()};
+        assert(RRR.getBitWidth() == Range.getBitWidth());
+        MAI.SetStore({Range.getLower(), Range.getLower() + LS.getValue()});
+      }
+
+      if (MSI->isVolatile())
+        MAI.MakeVolatile();
+
+      continue;
+    }
+
+    if (CallInst *CI = dyn_cast<CallInst>(I)) {
+      ImmutableCallSite CS(CI);
+      const GlobalValue *Callee = dyn_cast<GlobalValue>(
+          CS.getCalledValue()->stripPointerCastsNoFollowAliases());
+
+      if (!Callee) {
+        MAI.SetLoad(FullAlloca);
+        continue;
+      }
+
+      size_t argN = -1;
+      for (auto &A : make_range(CS.arg_begin(), CS.arg_end())) {
+        ++argN;
+        if (IsExpectedAlloca(A.get())) {
+          // break;
+          ConstantRange Range(PointerSizeInBits, false);
+
+          const Function *F = cast<Function>(Callee);
+          auto I = AAA.find(F->getGUID());
+          if (I == AAA.end() || argN >= I->second.size() ||
+              !getAccessStart(SE, A.get(), &AI, &Range) ||
+              !Range.isSingleElement()) {
+            MAI.SetLoad(FullAlloca);
+            continue;
+          }
+
+          const ArgumentAccessInfo &AAI = I->second[argN];
+
+          if (!I->second[argN].CanRead.isEmptySet()) {
+            ConstantRange T = FullAlloca;
+            if (Range.unsignedAddMayOverflow(AAI.CanRead) ==
+                ConstantRange::OverflowResult::NeverOverflows) {
+              T = {Range.getLower() + AAI.CanRead.getLower(),
+                   Range.getLower() + AAI.CanRead.getUpper()};
+            }
+            MAI.updateLoad(T);
+          }
+
+          if (!AAI.MustWrite.isEmptySet()) {
+            ConstantRange T = FullAlloca;
+            if (Range.unsignedAddMayOverflow(AAI.MustWrite) ==
+                ConstantRange::OverflowResult::NeverOverflows) {
+              T = {Range.getLower() + AAI.MustWrite.getLower(),
+                   Range.getLower() + AAI.MustWrite.getUpper()};
+            }
+            MAI.updateStore(T);
+          }
+        }
+      }
+      continue;
+    }
+
+    llvm_unreachable("Unexpected instruction");
+  }
+
+  for (MemoryAccess *MA : Leaks) {
+    auto UoD = dyn_cast<MemoryUseOrDef>(MA);
+    if (!UoD)
+      continue;
+    if (dbg)
+      errs() << *MA << "\n";
+    Instruction *I = UoD->getMemoryInst();
+    if (!I || !NeedsEscapedObject(I))
+      continue;
+
+    LLVM_DEBUG(dbgs() << "LEAK: " << *MA << "\n");
+
+    auto Ins = Infos.emplace(MA, PointerSizeInBits);
+    if (!Ins.second)
+      continue;
+    MemAccessInfo &MAI = Ins.first->second;
+    MAI.SetUsed(FullAlloca);
+  }
+
+  Infos.emplace(MSSA.getLiveOnEntryDef(), PointerSizeInBits);
+
+  for (auto &Inf : Infos) {
+    if (Inf.first)
+      LLVM_DEBUG(dbgs() << *Inf.first << "=" << Inf.second << "\n");
+  }
+
+  for (auto &Inf : Infos) {
+    if (!Inf.first)
+      continue;
+    auto ttt = dyn_cast<MemoryUseOrDef>(Inf.first);
+    if (!ttt)
+      continue;
+    Instruction *I = ttt->getMemoryInst();
+    if (I)
+      LLVM_DEBUG(dbgs() << "0: " << *I);
+    else
+      LLVM_DEBUG(dbgs() << "0: "
+                        << "nullptr");
+    LLVM_DEBUG(dbgs() << " " << Inf.second.GetStore() << " "
+                      << Inf.second.GetLoad() << " " << Inf.second.GetUsed()
+                      << "\n");
+  }
+
+  LLVM_DEBUG(MSSA.dump());
+  UpdateUses(Infos);
+  UpdateLaterWrites(Infos);
+
+  for (auto &Inf : Infos) {
+    if (!Inf.first)
+      continue;
+    auto ttt = dyn_cast<MemoryUseOrDef>(Inf.first);
+    if (!ttt)
+      continue;
+    Instruction *I = ttt->getMemoryInst();
+    if (I)
+      LLVM_DEBUG(dbgs() << "R: " << *I);
+    else
+      LLVM_DEBUG(dbgs() << "R: "
+                        << "nullptr");
+    LLVM_DEBUG(dbgs() << " " << Inf.second << "\n");
+    if (!I || !(isa<StoreInst>(I) || isa<MemIntrinsic>(I)))
+      continue;
+
+    if (Inf.second.IsDeadStore()) {
+      NowDeadInsts.insert(I);
+      continue;
+    }
+
+    if (Inf.second.GetStore().isEmptySet())
+      continue;
+
+    if (!Inf.second.IsPartiallyDeadStore())
+      continue;
+
+    if (!ShrinkStores.try_emplace(I, Inf.second).second && false)
+      llvm_unreachable("Instruction should not be there");
+  }
+
+  return Infos.find(MSSA.getLiveOnEntryDef())->second;
+}
+
+bool IsLeakingCall(const ArgumentMemAccess &AAA, const GlobalValue *Callee,
+                   size_t argN) {
+  const Function *F = cast<Function>(Callee);
+  if (argN >= F->arg_size())
+    return true;
+
+  auto I = AAA.find(F->getGUID());
+  if (I == AAA.end()) {
+    logLookupMiss(F);
+    return true;
+  }
+
+  if (argN >= I->second.size())
+    return true;
+
+  return I->second[argN].Leak;
+}
+
+void analyzeAlloca(Value *A,
+                   const std::function<bool(Value *V)> &IsExpectedAlloca,
+                   SmallPtrSetImpl<Instruction *> &Leaks,
+                   SmallPtrSetImpl<Instruction *> &Instructions,
+                   const TargetLibraryInfo &TLI, ScalarEvolution &SE,
+                   const ArgumentMemAccess &AAA) {
+  SmallPtrSet<Value *, 16> Visited;
+  SmallVector<Value *, 8> WorkList;
+  Visited.insert(A);
+  WorkList.push_back(A);
+
+  while (!WorkList.empty()) {
+    Value *V = WorkList.pop_back_val();
+    for (const Use &UI : V->uses()) {
+      Instruction *U = dyn_cast<Instruction>(UI.getUser());
+      assert(V == UI.get());
+
+      if (CmpInst *CI = dyn_cast<CmpInst>(U))
+        continue; // Ignore
+
+      if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
+        if (IsExpectedAlloca(SI->getValueOperand())) {
+          Leaks.insert(SI);
+          continue;
+        }
+
+        if (IsExpectedAlloca(SI->getPointerOperand()))
+          Instructions.insert(SI);
+
+        continue;
+      }
+
+      if (LoadInst *LI = dyn_cast<LoadInst>(U)) {
+        if (IsExpectedAlloca(LI->getPointerOperand()))
+          Instructions.insert(LI);
+        continue;
+      }
+
+      if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(U)) {
+        if (IsExpectedAlloca(MTI->getRawSource()) ||
+            IsExpectedAlloca(MTI->getRawDest()))
+          Instructions.insert(MTI);
+        continue;
+      }
+
+      if (MemSetInst *MSI = dyn_cast<MemSetInst>(U)) {
+        if (IsExpectedAlloca(MSI->getRawDest()))
+          Instructions.insert(MSI);
+        continue;
+      }
+
+      if (CallInst *CI = dyn_cast<CallInst>(U)) {
+        if (CI->isLifetimeStartOrEnd())
+          continue; // Ignore
+        if (isa<DbgInfoIntrinsic>(CI))
+          continue; // Ignore
+        if (isFreeCall(CI, &TLI))
+          continue; // Ignore
+
+        ImmutableCallSite CS(CI);
+
+        const GlobalValue *Callee = dyn_cast<GlobalValue>(
+            CS.getCalledValue()->stripPointerCastsNoFollowAliases());
+        if (!Callee) {
+          Leaks.insert(CI);
+          continue;
+        }
+
+        int argN = 0;
+        for (auto &A : make_range(CS.arg_begin(), CS.arg_end())) {
+          if (IsExpectedAlloca(A.get())) {
+            if (IsLeakingCall(AAA, Callee, argN)) {
+              Leaks.insert(CI);
+              break;
+            }
+            Instructions.insert(CI);
+          }
+          ++argN;
+        }
+
+        continue;
+      }
+
+      if (InvokeInst *II = dyn_cast<InvokeInst>(U)) {
+        Leaks.insert(II);
+        continue;
+      }
+
+      if (Visited.insert(U).second) {
+        if (IsExpectedAlloca(U)) {
+          WorkList.push_back(U);
+          continue;
+        }
+        Instruction *I = dyn_cast<Instruction>(U);
+        assert(I);
+        Leaks.insert(I);
+      }
+    }
+  }
+}
+
+DenseMap<Value *, Value *>
+findUndelyingUniqueObjects(Function &F,
+                           const SmallVectorImpl<Value *> &Values) {
+  DenseMap<Value *, Value *> Visited;
+  SmallVector<Value *, 8> WorkList;
+  SmallPtrSet<Value *, 4> NonAllocas;
+
+  for (Value *V : Values) {
+    WorkList.push_back(V);
+    Visited[V] = V;
+  }
+
+  const DataLayout &DL = F.getParent()->getDataLayout();
+
+  auto AddWork = [&Visited, &WorkList](Value *U, Value *AI) {
+    auto Ins = Visited.insert({U, AI});
+    if (Ins.second) {
+      WorkList.push_back(U);
+    } else if (Ins.first->second && Ins.first->second != AI) {
+      Ins.first->second = nullptr;
+      WorkList.push_back(U);
+    }
+  };
+
+  auto CheckUndelyingObject = [&DL, &Visited, &AddWork](Value *U, Value *AI) {
+    Value *UV = GetUnderlyingObject(U, DL, 1);
+    if (UV == U)
+      return false;
+
+    auto I = Visited.find(UV);
+    // if (I == Visited.end()) {
+    //   // Store for later processing.
+    //   NonAllocas.insert(UV);
+    //   //return false;
+    // }
+
+    if (I != Visited.end() && I->second == AI)
+      AddWork(U, AI);
+
+    return true;
+  };
+
+  auto HasWork = [&] {
+    if (WorkList.empty()) {
+      for (Value *V : NonAllocas) {
+        // errs() << *V << " U\n" ;
+        if (Visited.find(V) == Visited.end())
+          AddWork(V, nullptr);
+      }
+      NonAllocas.clear();
+    }
+    return !WorkList.empty();
+  };
+
+  while (HasWork()) {
+    Value *V = WorkList.pop_back_val();
+    Value *AI = Visited[V];
+
+    for (const Use &UI : V->uses()) {
+      Value *U = UI.getUser();
+      assert(V == UI.get());
+      // auto& UAI = Visited.try_emplace(UV, AI);
+      // if ()
+
+      if (CheckUndelyingObject(U, AI)) {
+      } else if (SelectInst *SI = dyn_cast<SelectInst>(U)) {
+        NonAllocas.insert(SI->getTrueValue());
+        NonAllocas.insert(SI->getFalseValue());
+        if (V == SI->getTrueValue() || V == SI->getFalseValue())
+          AddWork(U, AI);
+      } else if (PHINode *PN = dyn_cast<PHINode>(U)) {
+        for (Value *IncValue : PN->incoming_values()) {
+          NonAllocas.insert(IncValue);
+          if (V == IncValue) {
+            AddWork(U, AI);
+          }
+        }
+      } else if (CastInst *CI = dyn_cast<CastInst>(U)) {
+        AddWork(U, AI);
+      }
+    }
+  }
+
+  auto VerifyResults = [&](const DenseMap<Value *, AllocaInst *> &Visited) {
+    for (BasicBlock &BB : F) {
+      for (Instruction &I : BB) {
+        if (Value *V = dyn_cast<Value>(&I)) {
+          SmallVector<Value *, 4> Objects;
+          findAllocasForValue(V, Objects, DL);
+          assert(!Objects.empty());
+
+          auto It = Visited.find(V);
+          const Value *Vis = ((It == Visited.end()) ? nullptr : It->second);
+          if (Objects.size() == 1 && isa<AllocaInst>(Objects[0])) {
+            if (Objects[0] != Vis) {
+              errs() << "UNEXPECTED UND: " << *V << "\n" << F << "\n";
+              for (auto &P : Visited) {
+                if (P.second)
+                  errs() << *P.first << " -> " << *P.second << "\n";
+                else
+                  errs() << *P.first << " -> "
+                         << "\n";
+              }
+            }
+            assert(Objects[0] == Vis);
+          } else {
+            if (Vis) {
+              errs() << "UNEXPECTED UND: " << *V << "\n" << *Vis << F << "\n";
+              for (auto &P : Visited) {
+                if (P.second)
+                  errs() << *P.first << " -> " << *P.second << "\n";
+                else
+                  errs() << *P.first << " -> "
+                         << "\n";
+              }
+            }
+            assert(Vis == nullptr);
+          }
+        }
+      }
+    }
+  };
+  (void)VerifyResults;
+
+  // LLVM_DEBUG(VerifyResults(Visited));
+
+  return Visited;
+};
+
+void findLeakedMemoryAccesses(SmallPtrSetImpl<MemoryAccess *> &Leaks) {
+  SmallVector<MemoryAccess *, 8> WorkList(Leaks.begin(), Leaks.end());
+
+  while (!WorkList.empty()) {
+    MemoryAccess *MA = WorkList.pop_back_val();
+
+    for (const Use &UI : MA->uses()) {
+      MemoryAccess *U = dyn_cast<MemoryAccess>(UI.getUser());
+      if (Leaks.insert(U).second) {
+        WorkList.push_back(U);
+      }
+    }
+  }
+}
+
+bool tryToShorten(StoreInst *SI, const ConstantRange &Before,
+                  const ConstantRange &After,
+                  SmallVectorImpl<Value *> &MaybeDead) {
+  ++NumStoreInstrToShrink;
+  // Not implemented number of these are low.
+  return false;
+}
+
+bool tryToShorten(MemIntrinsic *I, const ConstantRange &Load,
+                  const ConstantRange &Before, const ConstantRange &After,
+                  SmallVectorImpl<Value *> &MaybeDead) {
+  ++NumMemIntrinsicToShrink;
+
+  unsigned IAlign = I->getDestAlignment();
+  assert(IAlign);
+  MemTransferInst *MTI = dyn_cast<MemTransferInst>(I);
+  unsigned IAlignSrc = MTI ? MTI->getSourceAlignment() : IAlign;
+  assert(IAlignSrc);
+  uint64_t INewAlign =
+      IAlign * IAlignSrc / GreatestCommonDivisor64(IAlign, IAlignSrc);
+  assert(INewAlign);
+
+  auto DstBegin = Before.getLower();
+  auto DstEnd = Before.getUpper();
+
+  auto PtrAdd = After.getLower() - Before.getLower();
+  PtrAdd -= PtrAdd.urem(INewAlign);
+
+  auto LenSub = PtrAdd + Before.getUpper() - After.getUpper();
+
+  if (LenSub.isNullValue())
+    return false;
+
+  auto NewLength = Before.getUpper() - Before.getLower() - LenSub;
+  MaybeDead.push_back(I->getLength());
+  Value *ILength = I->getLength();
+  Value *TrimmedLength = ConstantInt::get(ILength->getType(), NewLength);
+  I->setLength(TrimmedLength);
+
+  Value *Indices[1] = {ConstantInt::get(ILength->getType(), PtrAdd)};
+  GetElementPtrInst *NewDestGEP = GetElementPtrInst::CreateInBounds(
+      I->getRawDest()->getType()->getPointerElementType(), I->getRawDest(),
+      Indices, "", I);
+  I->setDest(NewDestGEP);
+
+  if (MTI) {
+    GetElementPtrInst *NewSrcGEP = GetElementPtrInst::CreateInBounds(
+        MTI->getRawSource()->getType()->getPointerElementType(),
+        MTI->getRawSource(), Indices, "", MTI);
+    MTI->setSource(NewSrcGEP);
+  }
+
+  ++NumMemIntrinsicShrinked;
+  return true;
+}
+
+static bool
+deleteDeadInstruction(const TargetLibraryInfo &TLI,
+                      SmallPtrSetImpl<Instruction *> &NowDeadInsts,
+                      DenseMap<Instruction *, MemAccessInfo> &ShrinkStores) {
+  SmallVector<Instruction *, 32> WorkList(NowDeadInsts.begin(),
+                                          NowDeadInsts.end());
+  bool Changed = !NowDeadInsts.empty();
+
+  for (auto &It : ShrinkStores) {
+    SmallVector<Value *, 32> MaybeDead;
+
+    if (StoreInst *SI = dyn_cast<StoreInst>(It.first)) {
+      if (tryToShorten(SI, It.second.GetStore(), It.second.GetGood(),
+                       MaybeDead))
+        Changed = true;
+    } else {
+      if (tryToShorten(cast<MemIntrinsic>(It.first), It.second.GetLoad(),
+                       It.second.GetStore(), It.second.GetGood(), MaybeDead))
+        Changed = true;
+    }
+
+    for (Value *Op : MaybeDead) {
+      // If this operand just became dead, add it to the WorkList list.
+      if (!Op->use_empty())
+        continue;
+
+      if (Instruction *OpI = dyn_cast<Instruction>(Op)) {
+        if (isInstructionTriviallyDead(OpI, &TLI)) {
+          NowDeadInsts.insert(OpI);
+          WorkList.push_back(OpI);
+        }
+      }
+    }
+  }
+
+  while (!WorkList.empty()) {
+    Instruction *I = WorkList.pop_back_val();
+
+    // Try to preserve debug information attached to the dead instruction.
+    salvageDebugInfo(*I);
+
+    for (unsigned op = 0, e = I->getNumOperands(); op != e; ++op) {
+      Value *Op = I->getOperand(op);
+      I->setOperand(op, nullptr);
+
+      // If this operand just became dead, add it to the WorkList list.
+      if (!Op->use_empty())
+        continue;
+
+      if (Instruction *OpI = dyn_cast<Instruction>(Op)) {
+        if (isInstructionTriviallyDead(OpI, &TLI)) {
+          NowDeadInsts.insert(OpI);
+          WorkList.push_back(OpI);
+        }
+      }
+    }
+  }
+  return Changed;
+}
+
+void findDeadStores(Function &F, SmallVectorImpl<Value *> &Values,
+                    const TargetLibraryInfo &TLI, MemorySSA &MSSA,
+                    ScalarEvolution &SE,
+                    SmallPtrSet<Instruction *, 32> &NowDeadInsts,
+                    DenseMap<Instruction *, MemAccessInfo> &ShrinkStores,
+                    std::vector<ArgumentAccessInfo> &ArgInfo,
+                    const ArgumentMemAccess &AAA) {
+
+  auto UndelyingAllocas = findUndelyingUniqueObjects(F, Values);
+
+  const DataLayout &DL = F.getParent()->getDataLayout();
+  unsigned PointerSizeInBits = DL.getPointerSizeInBits();
+
+  for (Value *AI : Values) {
+    assert(AI);
+    ArgInfo.emplace_back(PointerSizeInBits);
+
+    if (!DebugCounter::shouldExecute(DeadStoreEliminationExp))
+      continue;
+
+    SmallPtrSet<Instruction *, 4> Leaks;
+    SmallPtrSet<Instruction *, 4> Instructions;
+
+    auto IsExpectedAlloca = [&](Value *V) {
+      auto It = UndelyingAllocas.find(V);
+      return It != UndelyingAllocas.end() && It->second == AI;
+    };
+
+    analyzeAlloca(AI, IsExpectedAlloca, Leaks, Instructions, TLI, SE, AAA);
+    bool Skip = false;
+
+    SmallPtrSet<MemoryAccess *, 4> LeakedAccesses;
+    for (Instruction *L : Leaks) {
+      MemoryAccess *M = MSSA.getMemoryAccess(L);
+      if (!M) {
+        M = MSSA.getMemoryAccess(L->getParent());
+        if (!M) {
+          Skip = true;
+          break;
+        }
+      }
+      if (MemoryDef *MA = dyn_cast<MemoryDef>(M)) {
+        LeakedAccesses.insert(MA);
+        continue;
+      }
+
+      bool HasDef = false;
+      for (MemoryAccess *Def : make_range(M->defs_begin(), M->defs_end())) {
+        assert(isa<MemoryDef>(Def) || isa<MemoryPhi>(Def));
+        LeakedAccesses.insert(Def);
+        HasDef = true;
+      }
+
+      if (!HasDef) {
+        Skip = true;
+      }
+    }
+
+    for (Instruction *I : Instructions) {
+      MemoryAccess *MA = MSSA.getMemoryAccess(I);
+      if (!MA) {
+        Skip = true;
+      }
+    }
+
+    if (!Skip) {
+      findLeakedMemoryAccesses(LeakedAccesses);
+    }
+
+    for (Instruction *I : Instructions) {
+      assert(I);
+      if (!isa<StoreInst>(I) && !isa<MemIntrinsic>(I))
+        continue;
+      ++NumMemWrites;
+
+      if (Skip) {
+        ++NumMemWritesSkipWithAlloca;
+        continue;
+      }
+
+      MemoryAccess *MA = MSSA.getMemoryAccess(I);
+      if (LeakedAccesses.find(MA) != LeakedAccesses.end()) {
+        LLVM_DEBUG(errs() << "NumMemWritesSkip: " << *I);
+        ++NumMemWritesSkip;
+        continue;
+      }
+
+      ++NumMemWritesToAnalize;
+    }
+
+    if (!Skip) {
+      bool dbg = 0;
+      MemAccessInfo MAI = processAlloca(dbg, *AI, IsExpectedAlloca,
+                                        Instructions, LeakedAccesses, DL, MSSA,
+                                        SE, AAA, NowDeadInsts, ShrinkStores);
+      ArgInfo.back().Leak = !LeakedAccesses.empty();
+      ArgInfo.back().CanRead = MAI.GetUsed();
+      ArgInfo.back().MustWrite = MAI.GetStoreLater();
+    }
+    // ArgInfo.back().Leak = false;
+  }
+}
+
+bool eliminateDeadStores(Function &F, MemorySSA &MSSA, ScalarEvolution &SE,
+                         const TargetLibraryInfo &TLI,
+                         const ArgumentMemAccess &AAA) {
+  SmallPtrSet<Instruction *, 32> NowDeadInsts;
+  DenseMap<Instruction *, MemAccessInfo> ShrinkStores;
+
+  SmallVector<Value *, 4> Values;
+  for (BasicBlock &BB : F) {
+    for (Instruction &I : BB) {
+      if (isa<AnyMemIntrinsic>(&I) || isa<StoreInst>(&I)) {
+        ++NumMemWritesAny;
+        continue;
+      }
+      if (isa<AllocaInst>(&I)) {
+        // continue;
+        ++NumAlloca;
+      } else if (isa<CallInst>(&I) && isAllocationFn(&I, &TLI)) {
+        continue;
+        ++NumAllocation;
+      } else {
+        continue;
+      }
+      Values.push_back(&I);
+    }
+  }
+
+  std::vector<ArgumentAccessInfo> MemInfo;
+  findDeadStores(F, Values, TLI, MSSA, SE, NowDeadInsts, ShrinkStores, MemInfo,
+                 AAA);
+
+  NumMemWritesDeleted += NowDeadInsts.size();
+  NumOtherDeleted -= NowDeadInsts.size();
+  bool Changed = deleteDeadInstruction(TLI, NowDeadInsts, ShrinkStores);
+  NumOtherDeleted += NowDeadInsts.size();
+  for (Instruction *I : NowDeadInsts) {
+    LLVM_DEBUG(dbgs() << "Erase: " << *I << "\n");
+    if (!I->use_empty()) {
+      dbgs() << "Erase: " << *I << "\n";
+    }
+    I->eraseFromParent();
+  }
+
+  return Changed;
+}
+
+class ArgumentAccessInfoLegacyPass : public ModulePass {
+public:
+  static char ID;
+  ArgumentMemAccess AAMI;
+
+  ArgumentAccessInfoLegacyPass() : ModulePass(ID) {
+    initializeArgumentAccessInfoLegacyPassPass(
+        *PassRegistry::getPassRegistry());
+  }
+
+  const ArgumentMemAccess &getResult() const { return AAMI; }
+
+  void getAnalysisUsage(AnalysisUsage &AU) const override {
+    AU.addRequired<CallGraphWrapperPass>();
+    AU.addRequired<MemorySSAWrapperPass>();
+    AU.addRequired<ScalarEvolutionWrapperPass>();
+    AU.addRequired<TargetLibraryInfoWrapperPass>();
+    AU.setPreservesAll();
+  }
+
+  template <typename AnalysisType>
+  AnalysisType *getAnalysisIfAvalible(Function &F) {
+    Pass *P = getResolver()->findImplPass(this, &AnalysisType::ID, F);
+    if (!P)
+      return 0;
+
+    return static_cast<AnalysisType *>(P);
+  }
+
+  bool runOnModule(Module &M) override {
+    uncompressAAMI(AAMI);
+
+    std::set<GlobalValue::GUID> GUIDS;
+    auto &CG = getAnalysis<CallGraphWrapperPass>().getCallGraph();
+    for (const std::vector<CallGraphNode *> &NV :
+         make_range(scc_begin(&CG), scc_end(&CG))) {
+      for (const CallGraphNode *CGN : NV) {
+        if (Function *F = CGN->getFunction()) {
+          static std::set<Function *> b;
+          assert(b.insert(F).second);
+
+          auto *TLI = getAnalysisIfAvalible<TargetLibraryInfoWrapperPass>(*F);
+          if (!TLI)
+            continue;
+          auto *MSSA = getAnalysisIfAvalible<MemorySSAWrapperPass>(*F);
+          if (!MSSA)
+            continue;
+          auto *SE = getAnalysisIfAvalible<ScalarEvolutionWrapperPass>(*F);
+          if (!SE)
+            continue;
+
+          SmallPtrSet<Instruction *, 32> NowDeadInsts;
+          DenseMap<Instruction *, MemAccessInfo> ShrinkStores;
+
+          SmallVector<Value *, 4> Values;
+          for (Argument &A : F->args())
+            Values.push_back(&A);
+
+          std::vector<ArgumentAccessInfo> ArgInfo;
+
+          findDeadStores(*F, Values, TLI->getTLI(), MSSA->getMSSA(),
+                         SE->getSE(), NowDeadInsts, ShrinkStores, ArgInfo,
+                         AAMI);
+          if (ArgInfo.empty())
+            continue;
+
+          AAMI[F->getGUID()] = ArgInfo;
+
+          GUIDS.insert(F->getGUID());
+        }
+      }
+    }
+
+    compressAAMI(AAMI, GUIDS);
+    return false;
+  }
+};
+
+/// A legacy pass for the legacy pass manager that wraps \c DSEEPass.
+class DSEELegacyPass : public FunctionPass {
+public:
+  static char ID; // Pass identification, replacement for typeid
+
+  DSEELegacyPass() : FunctionPass(ID) {
+    initializeDSEELegacyPassPass(*PassRegistry::getPassRegistry());
+  }
+
+  bool runOnFunction(Function &F) override {
+    if (skipFunction(F))
+      return false;
+
+    return eliminateDeadStores(
+        F, getAnalysis<MemorySSAWrapperPass>().getMSSA(),
+        getAnalysis<ScalarEvolutionWrapperPass>().getSE(),
+        getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(),
+        getAnalysis<ArgumentAccessInfoLegacyPass>().getResult());
+  }
+
+  void getAnalysisUsage(AnalysisUsage &AU) const override {
+    AU.addRequired<ArgumentAccessInfoLegacyPass>();
+    AU.addRequired<MemorySSAWrapperPass>();
+    AU.addRequired<ScalarEvolutionWrapperPass>();
+    AU.addRequired<TargetLibraryInfoWrapperPass>();
+    AU.setPreservesCFG();
+    AU.addPreserved<GlobalsAAWrapperPass>();
+    AU.addPreserved<DominatorTreeWrapperPass>();
+  }
+};
+
+char DSEELegacyPass::ID = 0;
+char ArgumentAccessInfoLegacyPass::ID = 0;
+
+} // end anonymous namespace
+
+DSEEPass::DSEEPass() { uncompressAAMI(AAI); }
+
+DSEEPass::~DSEEPass() { compressAAMI(AAI, GUIDS); }
+
+PreservedAnalyses DSEEPass::run(Function &F, FunctionAnalysisManager &AM) {
+  GUIDS.insert(F.getGUID());
+
+  SmallVector<Value *, 4> Values;
+  for (Argument &A : F.args())
+    Values.push_back(&A);
+
+  SmallPtrSet<Instruction *, 32> NowDeadInsts;
+  DenseMap<Instruction *, MemAccessInfo> ShrinkStores;
+
+  std::vector<ArgumentAccessInfo> ArgInfo;
+
+  findDeadStores(F, Values, AM.getResult<TargetLibraryAnalysis>(F),
+                 AM.getResult<MemorySSAAnalysis>(F).getMSSA(),
+                 AM.getResult<ScalarEvolutionAnalysis>(F), NowDeadInsts,
+                 ShrinkStores, ArgInfo, AAI);
+
+  // assert(0);
+  // errs() << F << "\n";
+  assert(F.arg_size() == ArgInfo.size());
+  AAI[F.getGUID()] = ArgInfo;
+
+  // AAMA = AAMA ? AAMA : &Empty;
+  if (!eliminateDeadStores(F, AM.getResult<MemorySSAAnalysis>(F).getMSSA(),
+                           AM.getResult<ScalarEvolutionAnalysis>(F),
+                           AM.getResult<TargetLibraryAnalysis>(F), AAI)) {
+    return PreservedAnalyses::all();
+  }
+  // return PreservedAnalyses();
+
+  // return PreservedAnalyses::all();
+  PreservedAnalyses PA;
+  PA.preserve<MemorySSAAnalysis>();
+  PA.preserveSet<CFGAnalyses>();
+  PA.preserve<GlobalsAA>();
+  PA.preserve<MemoryDependenceAnalysis>();
+  return PA;
+}
+
+INITIALIZE_PASS_BEGIN(ArgumentAccessInfoLegacyPass, "argument-access",
+                      "Argument Access Analysis", false, true)
+INITIALIZE_PASS_DEPENDENCY(CallGraphWrapperPass)
+INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
+INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass)
+INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
+INITIALIZE_PASS_END(ArgumentAccessInfoLegacyPass, "argument-access",
+                    "Argument Access Analysis", false, true)
+
+INITIALIZE_PASS_BEGIN(DSEELegacyPass, "dsee",
+                      "Experimental Dead Store Elimination", false, false)
+INITIALIZE_PASS_DEPENDENCY(ArgumentAccessInfoLegacyPass)
+INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
+INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass)
+INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
+INITIALIZE_PASS_END(DSEELegacyPass, "dsee",
+                    "Experimental Dead Store Elimination", false, false)
+
+FunctionPass *llvm::createDeadStoreEliminationExpPass() {
+  return new DSEELegacyPass();
+}
Index: llvm/lib/Transforms/Scalar/CMakeLists.txt
===================================================================
--- llvm/lib/Transforms/Scalar/CMakeLists.txt
+++ llvm/lib/Transforms/Scalar/CMakeLists.txt
@@ -8,6 +8,8 @@
   CorrelatedValuePropagation.cpp
   DCE.cpp
   DeadStoreElimination.cpp
+  DeadStoreEliminationExp.cpp
+  DeadStoreEliminationExpGlobal.cpp
   DivRemPairs.cpp
   EarlyCSE.cpp
   FlattenCFGPass.cpp
Index: llvm/lib/Transforms/IPO/PassManagerBuilder.cpp
===================================================================
--- llvm/lib/Transforms/IPO/PassManagerBuilder.cpp
+++ llvm/lib/Transforms/IPO/PassManagerBuilder.cpp
@@ -405,6 +405,8 @@
   MPM.add(createJumpThreadingPass());         // Thread jumps
   MPM.add(createCorrelatedValuePropagationPass());
   MPM.add(createDeadStoreEliminationPass());  // Delete dead stores
+  if (OptLevel > 1)
+    MPM.add(createDeadStoreEliminationExpPass());
   MPM.add(createLICMPass(LicmMssaOptCap, LicmMssaNoAccForPromotionCap));
 
   addExtensionsToPM(EP_ScalarOptimizerLate, MPM);
@@ -913,6 +915,8 @@
 
   // Nuke dead stores.
   PM.add(createDeadStoreEliminationPass());
+  if (OptLevel > 1)
+    PM.add(createDeadStoreEliminationExpPass());
 
   // More loops are countable; try to optimize them.
   PM.add(createIndVarSimplifyPass());
Index: llvm/lib/Passes/PassRegistry.def
===================================================================
--- llvm/lib/Passes/PassRegistry.def
+++ llvm/lib/Passes/PassRegistry.def
@@ -167,6 +167,7 @@
 FUNCTION_PASS("dce", DCEPass())
 FUNCTION_PASS("div-rem-pairs", DivRemPairsPass())
 FUNCTION_PASS("dse", DSEPass())
+FUNCTION_PASS("dsee", DSEEPass())
 FUNCTION_PASS("dot-cfg", CFGPrinterPass())
 FUNCTION_PASS("dot-cfg-only", CFGOnlyPrinterPass())
 FUNCTION_PASS("early-cse", EarlyCSEPass(/*UseMemorySSA=*/false))
Index: llvm/lib/Passes/PassBuilder.cpp
===================================================================
--- llvm/lib/Passes/PassBuilder.cpp
+++ llvm/lib/Passes/PassBuilder.cpp
@@ -111,6 +111,7 @@
 #include "llvm/Transforms/Scalar/CorrelatedValuePropagation.h"
 #include "llvm/Transforms/Scalar/DCE.h"
 #include "llvm/Transforms/Scalar/DeadStoreElimination.h"
+#include "llvm/Transforms/Scalar/DeadStoreEliminationExp.h"
 #include "llvm/Transforms/Scalar/DivRemPairs.h"
 #include "llvm/Transforms/Scalar/EarlyCSE.h"
 #include "llvm/Transforms/Scalar/Float2Int.h"
@@ -514,6 +515,8 @@
   FPM.addPass(JumpThreadingPass());
   FPM.addPass(CorrelatedValuePropagationPass());
   FPM.addPass(DSEPass());
+  if (Level != O1)
+    FPM.addPass(DSEEPass());
   FPM.addPass(createFunctionToLoopPassAdaptor(
       LICMPass(PTO.LicmMssaOptCap, PTO.LicmMssaNoAccForPromotionCap),
       DebugLogging));
Index: llvm/lib/IR/AsmWriter.cpp
===================================================================
--- llvm/lib/IR/AsmWriter.cpp
+++ llvm/lib/IR/AsmWriter.cpp
@@ -3633,6 +3633,8 @@
 
 static void maybePrintCallAddrSpace(const Value *Operand, const Instruction *I,
                                     raw_ostream &Out) {
+  if (!Operand)
+    return;
   // We print the address space of the call if it is non-zero.
   unsigned CallAddrSpace = Operand->getType()->getPointerAddressSpace();
   bool PrintAddrSpace = CallAddrSpace != 0;
Index: llvm/lib/Analysis/Analysis.cpp
===================================================================
--- llvm/lib/Analysis/Analysis.cpp
+++ llvm/lib/Analysis/Analysis.cpp
@@ -77,6 +77,7 @@
   initializeSCEVAAWrapperPassPass(Registry);
   initializeScalarEvolutionWrapperPassPass(Registry);
   initializeStackSafetyGlobalInfoWrapperPassPass(Registry);
+  initializeArgumentAccessInfoLegacyPassPass(Registry);
   initializeStackSafetyInfoWrapperPassPass(Registry);
   initializeTargetTransformInfoWrapperPassPass(Registry);
   initializeTypeBasedAAWrapperPassPass(Registry);
Index: llvm/include/llvm/Transforms/Scalar/DeadStoreEliminationExp.h
===================================================================
--- /dev/null
+++ llvm/include/llvm/Transforms/Scalar/DeadStoreEliminationExp.h
@@ -0,0 +1,50 @@
+//===- DeadStoreElimination.h - Fast Dead Store Elimination -----*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements a dead store elimination to local allocas.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_DEADSTOREELIMINATIONEXP_H
+#define LLVM_TRANSFORMS_SCALAR_DEADSTOREELIMINATIONEXP_H
+
+#include "llvm/IR/ConstantRange.h"
+#include "llvm/IR/GlobalValue.h"
+#include "llvm/IR/PassManager.h"
+#include <set>
+
+namespace llvm {
+
+class Function;
+
+struct ArgumentAccessInfo {
+  bool Leak = true;
+  ConstantRange CanRead;
+  ConstantRange MustWrite;
+
+  ArgumentAccessInfo(unsigned PointerSizeInBits)
+      : CanRead(PointerSizeInBits, true), MustWrite(PointerSizeInBits, false) {}
+};
+using ArgumentMemAccess =
+    std::map<GlobalValue::GUID, std::vector<ArgumentAccessInfo>>;
+
+/// This class implements a dead store elimination to local allocas.
+class DSEEPass : public PassInfoMixin<DSEEPass> {
+  ArgumentMemAccess AAI;
+  std::set<GlobalValue::GUID> GUIDS;
+
+public:
+  DSEEPass();
+  ~DSEEPass();
+
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &FAM);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_SCALAR_DEADSTOREELIMINATIONEXP_H
Index: llvm/include/llvm/Transforms/Scalar.h
===================================================================
--- llvm/include/llvm/Transforms/Scalar.h
+++ llvm/include/llvm/Transforms/Scalar.h
@@ -71,6 +71,11 @@
 //
 FunctionPass *createDeadStoreEliminationPass();
 
+//===----------------------------------------------------------------------===//
+//
+// DeadStoreEliminationExp - This pass deletes more stores.
+//
+FunctionPass *createDeadStoreEliminationExpPass();
 
 //===----------------------------------------------------------------------===//
 //
Index: llvm/include/llvm/LinkAllPasses.h
===================================================================
--- llvm/include/llvm/LinkAllPasses.h
+++ llvm/include/llvm/LinkAllPasses.h
@@ -95,6 +95,7 @@
       (void) llvm::createDeadCodeEliminationPass();
       (void) llvm::createDeadInstEliminationPass();
       (void) llvm::createDeadStoreEliminationPass();
+      (void) llvm::createDeadStoreEliminationExpPass();
       (void) llvm::createDependenceAnalysisWrapperPass();
       (void) llvm::createDomOnlyPrinterPass();
       (void) llvm::createDomPrinterPass();
Index: llvm/include/llvm/InitializePasses.h
===================================================================
--- llvm/include/llvm/InitializePasses.h
+++ llvm/include/llvm/InitializePasses.h
@@ -113,6 +113,7 @@
 void initializeDAHPass(PassRegistry&);
 void initializeDCELegacyPassPass(PassRegistry&);
 void initializeDSELegacyPassPass(PassRegistry&);
+void initializeDSEELegacyPassPass(PassRegistry &);
 void initializeDataFlowSanitizerPass(PassRegistry&);
 void initializeDeadInstEliminationPass(PassRegistry&);
 void initializeDeadMachineInstructionElimPass(PassRegistry&);
@@ -382,6 +383,7 @@
 void initializeStackColoringPass(PassRegistry&);
 void initializeStackMapLivenessPass(PassRegistry&);
 void initializeStackProtectorPass(PassRegistry&);
+void initializeArgumentAccessInfoLegacyPassPass(PassRegistry &);
 void initializeStackSafetyGlobalInfoWrapperPassPass(PassRegistry &);
 void initializeStackSafetyInfoWrapperPassPass(PassRegistry &);
 void initializeStackSlotColoringPass(PassRegistry&);
Index: clang/test/CodeGenCXX/union-tbaa2.cpp
===================================================================
--- clang/test/CodeGenCXX/union-tbaa2.cpp
+++ clang/test/CodeGenCXX/union-tbaa2.cpp
@@ -1,4 +1,4 @@
-// RUN: %clang_cc1 %s -O2 -std=c++11 -triple x86_64-unknown-linux-gnu -target-cpu x86-64 -target-feature +sse4.2 -target-feature +avx -emit-llvm -o - | FileCheck %s
+// RUN: %clang_cc1 %s -O1 -std=c++11 -triple x86_64-unknown-linux-gnu -target-cpu x86-64 -target-feature +sse4.2 -target-feature +avx -emit-llvm -o - | FileCheck %s
 
 // Testcase from llvm.org/PR32056
 
_______________________________________________
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to