Author: Gabriel Baraldi
Date: 2026-01-26T09:40:50Z
New Revision: b30dd8f1ad837af62991f00ca270b76538cf90c4

URL: 
https://github.com/llvm/llvm-project/commit/b30dd8f1ad837af62991f00ca270b76538cf90c4
DIFF: 
https://github.com/llvm/llvm-project/commit/b30dd8f1ad837af62991f00ca270b76538cf90c4.diff

LOG: [SLPVectorizer] Check std::optional coming out of getPointersDiff (#175784)

Fixes https://github.com/llvm/llvm-project/issues/175768
There are other unchecked uses std::optional in this pass but I couldn't
figure out a test that triggers them

(cherry picked from commit 72a20b8e29876106aa918d03ef95f27a6bf0fa0d)

Added: 
    llvm/test/Transforms/SLPVectorizer/X86/crash_getpointersdiff-nullopt.ll

Modified: 
    llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp 
b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
index 7808e922dd90a..a717043508132 100644
--- a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
+++ b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
@@ -7082,12 +7082,17 @@ bool BoUpSLP::analyzeConstantStrideCandidate(
     Value *Ptr0, Value *PtrN, StridedPtrInfo &SPtrInfo) const {
   const size_t Sz = PointerOps.size();
   SmallVector<int64_t> SortedOffsetsFromBase(Sz);
-  // Go through `PointerOps` in sorted order and record offsets from `Ptr0`.
+  // Go through `PointerOps` in sorted order and record offsets from
+  // PointerOps[0]. We use PointerOps[0] rather than Ptr0 because
+  // sortPtrAccesses only validates getPointersDiff for pairs relative to
+  // PointerOps[0]. This is safe since only offset 
diff erences are used below.
   for (unsigned I : seq<unsigned>(Sz)) {
     Value *Ptr =
         SortedIndices.empty() ? PointerOps[I] : PointerOps[SortedIndices[I]];
-    SortedOffsetsFromBase[I] =
-        *getPointersDiff(ScalarTy, Ptr0, ScalarTy, Ptr, *DL, *SE);
+    std::optional<int64_t> Offset =
+        getPointersDiff(ScalarTy, PointerOps[0], ScalarTy, Ptr, *DL, *SE);
+    assert(Offset && "sortPtrAccesses should have validated this pointer");
+    SortedOffsetsFromBase[I] = *Offset;
   }
 
   // The code below checks that `SortedOffsetsFromBase` looks as follows:
@@ -7436,10 +7441,18 @@ BoUpSLP::LoadsState BoUpSLP::canVectorizeLoads(
       Ptr0 = PointerOps[Order.front()];
       PtrN = PointerOps[Order.back()];
     }
-    std::optional<int64_t> Diff =
-        getPointersDiff(ScalarTy, Ptr0, ScalarTy, PtrN, *DL, *SE);
+    // sortPtrAccesses validates getPointersDiff for all pointers relative to
+    // PointerOps[0], so compute the span using PointerOps[0] as intermediate:
+    //   Diff = offset(PtrN) - offset(Ptr0) relative to PointerOps[0]
+    std::optional<int64_t> Diff0 =
+        getPointersDiff(ScalarTy, PointerOps[0], ScalarTy, Ptr0, *DL, *SE);
+    std::optional<int64_t> DiffN =
+        getPointersDiff(ScalarTy, PointerOps[0], ScalarTy, PtrN, *DL, *SE);
+    assert(Diff0 && DiffN &&
+           "sortPtrAccesses should have validated these pointers");
+    int64_t Diff = *DiffN - *Diff0;
     // Check that the sorted loads are consecutive.
-    if (static_cast<uint64_t>(*Diff) == Sz - 1)
+    if (static_cast<uint64_t>(Diff) == Sz - 1)
       return LoadsState::Vectorize;
     if (isMaskedLoadCompress(VL, PointerOps, Order, *TTI, *DL, *SE, *AC, *DT,
                              *TLI, [&](Value *V) {
@@ -7451,7 +7464,7 @@ BoUpSLP::LoadsState BoUpSLP::canVectorizeLoads(
         cast<LoadInst>(Order.empty() ? VL.front() : VL[Order.front()])
             ->getAlign();
     if (analyzeConstantStrideCandidate(PointerOps, ScalarTy, Alignment, Order,
-                                       *Diff, Ptr0, PtrN, SPtrInfo))
+                                       Diff, Ptr0, PtrN, SPtrInfo))
       return LoadsState::StridedVectorize;
   }
   if (!TTI->isLegalMaskedGather(VecTy, CommonAlignment) ||

diff  --git a/llvm/test/Transforms/SLPVectorizer/X86/crash_getpointers
diff -nullopt.ll b/llvm/test/Transforms/SLPVectorizer/X86/crash_getpointers
diff -nullopt.ll
new file mode 100644
index 0000000000000..cbbac32febdb3
--- /dev/null
+++ b/llvm/test/Transforms/SLPVectorizer/X86/crash_getpointers
diff -nullopt.ll
@@ -0,0 +1,109 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -passes=slp-vectorizer -S -mtriple=x86_64-unknown-linux-gnu | 
FileCheck %s
+
+; Test that SLP vectorizer doesn't crash when getPointersDiff returns
+; std::nullopt for pointers that can't be compared.
+
+target datalayout = 
"e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+define void @test(i64 %arg0, i64 %arg1) {
+; CHECK-LABEL: @test(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[INIT:%.*]] = add i64 [[ARG0:%.*]], 1
+; CHECK-NEXT:    br label [[LOOP:%.*]]
+; CHECK:       loop:
+; CHECK-NEXT:    [[IV:%.*]] = phi i64 [ [[INIT]], [[ENTRY:%.*]] ], [ 
[[IV_NEXT:%.*]], [[REDUCE:%.*]] ]
+; CHECK-NEXT:    [[COUNTER:%.*]] = phi i64 [ 1, [[ENTRY]] ], [ 
[[COUNTER_NEXT:%.*]], [[REDUCE]] ]
+; CHECK-NEXT:    [[OFF0:%.*]] = add i64 [[IV]], -4
+; CHECK-NEXT:    [[PTR:%.*]] = call ptr null(ptr null, ptr null)
+; CHECK-NEXT:    [[IDX0:%.*]] = add i64 [[ARG1:%.*]], [[OFF0]]
+; CHECK-NEXT:    [[IDX0_SCALED:%.*]] = shl i64 [[IDX0]], 3
+; CHECK-NEXT:    [[GEP0:%.*]] = getelementptr i8, ptr [[PTR]], i64 
[[IDX0_SCALED]]
+; CHECK-NEXT:    [[GEP0_OFF:%.*]] = getelementptr i8, ptr [[GEP0]], i64 -8
+; CHECK-NEXT:    [[TMP0:%.*]] = load <4 x double>, ptr [[GEP0_OFF]], align 8
+; CHECK-NEXT:    [[IDX4:%.*]] = add i64 [[ARG1]], [[IV]]
+; CHECK-NEXT:    [[IDX4_SCALED:%.*]] = shl i64 [[IDX4]], 3
+; CHECK-NEXT:    [[GEP4:%.*]] = getelementptr i8, ptr [[PTR]], i64 
[[IDX4_SCALED]]
+; CHECK-NEXT:    [[GEP4_OFF:%.*]] = getelementptr i8, ptr [[GEP4]], i64 -8
+; CHECK-NEXT:    [[LOAD4:%.*]] = load double, ptr [[GEP4_OFF]], align 8
+; CHECK-NEXT:    [[LOAD5:%.*]] = load double, ptr [[GEP4]], align 8
+; CHECK-NEXT:    br label [[REDUCE]]
+; CHECK:       dead:
+; CHECK-NEXT:    br label [[REDUCE]]
+; CHECK:       reduce:
+; CHECK-NEXT:    [[PHI4:%.*]] = phi double [ [[LOAD4]], [[LOOP]] ], [ 
0.000000e+00, [[DEAD:%.*]] ]
+; CHECK-NEXT:    [[PHI5:%.*]] = phi double [ [[LOAD5]], [[LOOP]] ], [ 
0.000000e+00, [[DEAD]] ]
+; CHECK-NEXT:    [[TMP1:%.*]] = phi <4 x double> [ [[TMP0]], [[LOOP]] ], [ 
poison, [[DEAD]] ]
+; CHECK-NEXT:    [[TMP2:%.*]] = call double 
@llvm.vector.reduce.fminimum.v4f64(<4 x double> [[TMP1]])
+; CHECK-NEXT:    [[TMP3:%.*]] = call double @llvm.minimum.f64(double [[TMP2]], 
double [[PHI4]])
+; CHECK-NEXT:    [[TMP4:%.*]] = call double @llvm.minimum.f64(double [[PHI5]], 
double 0.000000e+00)
+; CHECK-NEXT:    [[TMP5:%.*]] = call double @llvm.minimum.f64(double [[TMP3]], 
double [[TMP4]])
+; CHECK-NEXT:    [[MIN6:%.*]] = call double @llvm.minimum.f64(double [[TMP5]], 
double 0.000000e+00)
+; CHECK-NEXT:    [[COUNTER_NEXT]] = add i64 [[COUNTER]], 1
+; CHECK-NEXT:    [[IV_NEXT]] = add i64 [[COUNTER]], [[INIT]]
+; CHECK-NEXT:    br label [[LOOP]]
+;
+entry:
+  %init = add i64 %arg0, 1
+  br label %loop
+
+loop:
+  %iv = phi i64 [ %init, %entry ], [ %iv.next, %reduce ]
+  %counter = phi i64 [ 1, %entry ], [ %counter.next, %reduce ]
+  %off0 = add i64 %iv, -4
+  %off1 = add i64 %iv, -3
+  %off2 = add i64 %iv, -2
+  %off3 = add i64 %iv, -1
+  %ptr = call ptr null(ptr null, ptr null)
+  %idx0 = add i64 %arg1, %off0
+  %idx0.scaled = shl i64 %idx0, 3
+  %gep0 = getelementptr i8, ptr %ptr, i64 %idx0.scaled
+  %gep0.off = getelementptr i8, ptr %gep0, i64 -8
+  %load0 = load double, ptr %gep0.off, align 8
+  %idx1 = add i64 %arg1, %off1
+  %idx1.scaled = shl i64 %idx1, 3
+  %gep1 = getelementptr i8, ptr %ptr, i64 %idx1.scaled
+  %gep1.off = getelementptr i8, ptr %gep1, i64 -8
+  %load1 = load double, ptr %gep1.off, align 8
+  %idx2 = add i64 %arg1, %off2
+  %idx2.scaled = shl i64 %idx2, 3
+  %gep2 = getelementptr i8, ptr %ptr, i64 %idx2.scaled
+  %gep2.off = getelementptr i8, ptr %gep2, i64 -8
+  %load2 = load double, ptr %gep2.off, align 8
+  %idx3 = add i64 %arg1, %off3
+  %idx3.scaled = shl i64 %idx3, 3
+  %gep3 = getelementptr i8, ptr %ptr, i64 %idx3.scaled
+  %gep3.off = getelementptr i8, ptr %gep3, i64 -8
+  %load3 = load double, ptr %gep3.off, align 8
+  %idx4 = add i64 %arg1, %iv
+  %idx4.scaled = shl i64 %idx4, 3
+  %gep4 = getelementptr i8, ptr %ptr, i64 %idx4.scaled
+  %gep4.off = getelementptr i8, ptr %gep4, i64 -8
+  %load4 = load double, ptr %gep4.off, align 8
+  %load5 = load double, ptr %gep4, align 8
+  br label %reduce
+
+dead:
+  br label %reduce
+
+reduce:
+  %phi0 = phi double [ %load0, %loop ], [ 0.000000e+00, %dead ]
+  %phi1 = phi double [ %load1, %loop ], [ 0.000000e+00, %dead ]
+  %phi2 = phi double [ %load2, %loop ], [ 0.000000e+00, %dead ]
+  %phi3 = phi double [ %load3, %loop ], [ 0.000000e+00, %dead ]
+  %phi4 = phi double [ %load4, %loop ], [ 0.000000e+00, %dead ]
+  %phi5 = phi double [ %load5, %loop ], [ 0.000000e+00, %dead ]
+  %min0 = call double @llvm.minimum.f64(double 0.000000e+00, double %phi0)
+  %min1 = call double @llvm.minimum.f64(double %min0, double %phi1)
+  %min2 = call double @llvm.minimum.f64(double %min1, double %phi2)
+  %min3 = call double @llvm.minimum.f64(double %min2, double %phi3)
+  %min4 = call double @llvm.minimum.f64(double %min3, double %phi4)
+  %min5 = call double @llvm.minimum.f64(double %min4, double %phi5)
+  %min6 = call double @llvm.minimum.f64(double %min5, double 0.000000e+00)
+  %counter.next = add i64 %counter, 1
+  %iv.next = add i64 %counter, %init
+  br label %loop
+}
+
+declare double @llvm.minimum.f64(double, double)


        
_______________________________________________
llvm-branch-commits mailing list
[email protected]
https://lists.llvm.org/cgi-bin/mailman/listinfo/llvm-branch-commits

Reply via email to