https://github.com/SamTebbs33 updated 
https://github.com/llvm/llvm-project/pull/153187

>From 353e4ba1eae7b7118c9c8ae445e453d57ef9ab00 Mon Sep 17 00:00:00 2001
From: Samuel Tebbs <[email protected]>
Date: Tue, 12 Aug 2025 14:25:36 +0100
Subject: [PATCH 1/2] [AArch64] Split large loop dependence masks

This PR adds splitting in the AArch64 backend for the LOOP_DEPENDENCE_MASK nodes
so that even large vector types can be turned into whilewr/rw.
---
 .../SelectionDAG/LegalizeVectorTypes.cpp      |   4 +
 .../Target/AArch64/AArch64ISelLowering.cpp    | 135 ++-
 llvm/test/CodeGen/AArch64/alias_mask.ll       | 839 ++++++++++--------
 .../CodeGen/AArch64/alias_mask_scalable.ll    | 825 ++++++++---------
 4 files changed, 985 insertions(+), 818 deletions(-)

diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp 
b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
index bcfb32b6d09f7..0dbdd19fe314e 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
@@ -4012,6 +4012,10 @@ SDValue 
DAGTypeLegalizer::SplitVecOp_EXTRACT_SUBVECTOR(SDNode *N) {
     report_fatal_error("Don't know how to extract fixed-width predicate "
                        "subvector from a scalable predicate vector");
 
+  // Don't create a stack temporary if the result is unused.
+  if (!N->hasAnyUseOfValue(0))
+    return DAG.getPOISON(SubVT);
+
   // Spill the vector to the stack. We should use the alignment for
   // the smallest part.
   SDValue Vec = N->getOperand(0);
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp 
b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 0abf6560ad1e1..4c388ed5d9d29 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -1933,9 +1933,12 @@ AArch64TargetLowering::AArch64TargetLowering(const 
TargetMachine &TM,
   if (Subtarget->hasSVE2() ||
       (Subtarget->hasSME() && Subtarget->isStreaming())) {
     // FIXME: Support wider fixed-length types when msve-vector-bits is used.
-    for (auto VT : {MVT::v2i32, MVT::v4i16, MVT::v8i8, MVT::v16i8}) {
-      setOperationAction(ISD::LOOP_DEPENDENCE_RAW_MASK, VT, Custom);
-      setOperationAction(ISD::LOOP_DEPENDENCE_WAR_MASK, VT, Custom);
+    for (auto Elts : {2, 4, 8, 16}) {
+      for (auto EltVT : {MVT::i8, MVT::i16, MVT::i32}) {
+        MVT VT = MVT::getVectorVT(EltVT, Elts);
+        setOperationAction(ISD::LOOP_DEPENDENCE_RAW_MASK, VT, Custom);
+        setOperationAction(ISD::LOOP_DEPENDENCE_WAR_MASK, VT, Custom);
+      }
     }
     for (auto VT : {MVT::nxv2i1, MVT::nxv4i1, MVT::nxv8i1, MVT::nxv16i1}) {
       setOperationAction(ISD::LOOP_DEPENDENCE_RAW_MASK, VT, Custom);
@@ -5359,33 +5362,25 @@ SDValue AArch64TargetLowering::LowerINT_TO_FP(SDValue 
Op,
 
 static MVT getSVEContainerType(EVT ContentTy);
 
+static inline SDValue getPTrue(SelectionDAG &DAG, SDLoc DL, EVT VT,
+                               int Pattern);
+
+static SDValue getSVEPredicateBitCast(EVT VT, SDValue Op, SelectionDAG &DAG);
+
 SDValue
 AArch64TargetLowering::LowerLOOP_DEPENDENCE_MASK(SDValue Op,
                                                  SelectionDAG &DAG) const {
   SDLoc DL(Op);
-  uint64_t EltSize = Op.getConstantOperandVal(2);
+  assert((Subtarget->hasSVE2() ||
+          (Subtarget->hasSME() && Subtarget->isStreaming())) &&
+         "Lowering loop_dependence_raw_mask or loop_dependence_war_mask "
+         "requires SVE or SME");
+
+  LLVMContext &Ctx = *DAG.getContext();
   EVT VT = Op.getValueType();
-  switch (EltSize) {
-  case 1:
-    if (VT != MVT::v16i8 && VT != MVT::nxv16i1)
-      return SDValue();
-    break;
-  case 2:
-    if (VT != MVT::v8i8 && VT != MVT::nxv8i1)
-      return SDValue();
-    break;
-  case 4:
-    if (VT != MVT::v4i16 && VT != MVT::nxv4i1)
-      return SDValue();
-    break;
-  case 8:
-    if (VT != MVT::v2i32 && VT != MVT::nxv2i1)
-      return SDValue();
-    break;
-  default:
-    // Other element sizes are incompatible with whilewr/rw, so expand instead
-    return SDValue();
-  }
+  unsigned MaskOpcode = Op.getOpcode();
+  unsigned NumElements = VT.getVectorMinNumElements();
+  uint64_t EltSizeInBytes = Op.getConstantOperandVal(2);
 
   // TODO: Support split masks
   unsigned LaneOffset = Op.getConstantOperandVal(3);
@@ -5394,25 +5389,83 @@ 
AArch64TargetLowering::LowerLOOP_DEPENDENCE_MASK(SDValue Op,
 
   SDValue PtrA = Op.getOperand(0);
   SDValue PtrB = Op.getOperand(1);
+  SDValue EltSizeInBytesValue = Op.getOperand(2);
 
-  if (VT.isScalableVT())
-    return DAG.getNode(Op.getOpcode(), DL, VT, PtrA, PtrB, Op.getOperand(2),
-                       Op.getOperand(3));
+  // Other element sizes are incompatible with whilewr/rw, so expand instead
+  if (!is_contained({1u, 2u, 4u, 8u}, EltSizeInBytes))
+    return SDValue();
 
-  // We can use the SVE whilewr/whilerw instruction to lower this
-  // intrinsic by creating the appropriate sequence of scalable vector
-  // operations and then extracting a fixed-width subvector from the scalable
-  // vector. Scalable vector variants are already legal.
-  EVT ContainerVT =
-      EVT::getVectorVT(*DAG.getContext(), VT.getVectorElementType(),
-                       VT.getVectorNumElements(), true);
-  EVT WhileVT = ContainerVT.changeElementType(MVT::i1);
+  if (EltSizeInBytes * NumElements < 16) {
+    // The element size and vector length combination must at least form a
+    // 128-bit vector. Shorter vector lengths can be widened then extracted
+    EVT WideVT = VT.getDoubleNumVectorElementsVT(Ctx);
+    // Re-create the node, but widened.
+    SDValue Widened = DAG.getNode(MaskOpcode, DL, WideVT, Op->ops());
+    return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Widened,
+                       DAG.getVectorIdxConstant(0, DL));
+  }
 
-  SDValue Mask = DAG.getNode(Op.getOpcode(), DL, WhileVT, PtrA, PtrB,
-                             Op.getOperand(2), Op.getOperand(3));
-  SDValue MaskAsInt = DAG.getNode(ISD::SIGN_EXTEND, DL, ContainerVT, Mask);
-  return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, MaskAsInt,
-                     DAG.getVectorIdxConstant(0, DL));
+  if (!VT.isScalableVT()) {
+    // We can use the SVE whilewr/whilerw instruction to lower this
+    // intrinsic by creating the appropriate sequence of scalable vector
+    // operations and then extracting a fixed-width subvector from the
+    // scalable vector. Scalable vector variants are already legal.
+    EVT ContainerVT = MVT::getScalableVectorVT(
+        VT.getVectorElementType().getSimpleVT(), NumElements);
+    EVT WhileVT = ContainerVT.changeElementType(MVT::i1);
+
+    SDValue Mask =
+        DAG.getNode(MaskOpcode, DL, WhileVT, PtrA, PtrB, EltSizeInBytesValue, 
Op.getOperand(3));
+    SDValue MaskAsInt = DAG.getNode(ISD::SIGN_EXTEND, DL, ContainerVT, Mask);
+    return convertFromScalableVector(DAG, VT, MaskAsInt);
+  }
+
+  EVT EltVT = MVT::getIntegerVT(EltSizeInBytes * 8);
+  unsigned PredElements = 
getPackedSVEVectorVT(EltVT).getVectorMinNumElements();
+  bool NeedsSplit = NumElements > PredElements;
+  if (!NeedsSplit)
+    return Op;
+
+  EVT PartVT = VT.getHalfNumVectorElementsVT(*DAG.getContext());
+  TypeSize PartOffset =
+      TypeSize::get(PartVT.getVectorMinNumElements(), PartVT.isScalableVT()) *
+      EltSizeInBytes;
+
+  SDValue Zero = DAG.getConstant(0, DL, MVT::i64);
+  SDValue Low =
+      DAG.getNode(MaskOpcode, DL, PartVT, PtrA, PtrB, EltSizeInBytesValue, 
Zero);
+  SDValue High = DAG.getNode(MaskOpcode, DL, PartVT,
+                             DAG.getMemBasePlusOffset(PtrA, PartOffset, DL),
+                             PtrB, EltSizeInBytesValue, Zero);
+
+  /// Split the loop dependence mask.
+  /// This is done by creating a high and low mask, each of half the vector
+  /// length. A BRKPB of the high mask and a predicate of all zeroes is needed
+  /// to guarantee that the high mask is safe. A case where simply producing a
+  /// high mask without the select is unsafe, is when the difference between 
the
+  /// two pointers is less than half the vector length, e.g. ptrA = 0 and ptrB 
3
+  /// when the vector length is 32.
+  ///     The full 32xi1 mask should be three active lanes and the rest
+  ///     inactive, however when half the vector length is added to ptrA to
+  ///     produce the high mask, the difference between ptrA and ptrB is now
+  ///     -13, which will result in a mask with all lanes active. The BRKPB 
will
+  ///     guard against this case by producing a mask of all inactive lanes 
when
+  ///     the final element of the low mask is inactive, and a correct high 
mask
+  ///     in other cases.
+  High = DAG.getNOT(DL, High, PartVT);
+  High = DAG.getNode(
+      ISD::INTRINSIC_WO_CHAIN, DL, MVT::nxv16i1,
+      {DAG.getConstant(Intrinsic::aarch64_sve_brkpb_z, DL, MVT::i64),
+       getPTrue(DAG, DL, MVT::nxv16i1, AArch64SVEPredPattern::all),
+       getSVEPredicateBitCast(MVT::nxv16i1, Low, DAG),
+       getSVEPredicateBitCast(MVT::nxv16i1, High, DAG)});
+  High = getSVEPredicateBitCast(PartVT, High, DAG);
+  SDValue Inserted =
+      DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getPOISON(VT), Low,
+                  DAG.getVectorIdxConstant(0, DL));
+  return DAG.getNode(
+      ISD::INSERT_SUBVECTOR, DL, VT, Inserted, High,
+      DAG.getVectorIdxConstant(PartVT.getVectorMinNumElements(), DL));
 }
 
 SDValue AArch64TargetLowering::LowerBITCAST(SDValue Op,
diff --git a/llvm/test/CodeGen/AArch64/alias_mask.ll 
b/llvm/test/CodeGen/AArch64/alias_mask.ll
index 1ec6eeded90cd..985ba136e8d3f 100644
--- a/llvm/test/CodeGen/AArch64/alias_mask.ll
+++ b/llvm/test/CodeGen/AArch64/alias_mask.ll
@@ -97,8 +97,8 @@ entry:
   ret <2 x i1> %0
 }
 
-define <32 x i1> @whilewr_8_split(ptr %a, ptr %b) {
-; CHECK-LABEL: whilewr_8_split:
+define <32 x i1> @whilewr_8_expand_high(ptr %a, ptr %b) {
+; CHECK-LABEL: whilewr_8_expand_high:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    index z0.d, #0, #1
 ; CHECK-NEXT:    sub x9, x1, x0
@@ -161,8 +161,8 @@ entry:
   ret <32 x i1> %0
 }
 
-define <64 x i1> @whilewr_8_split2(ptr %a, ptr %b) {
-; CHECK-LABEL: whilewr_8_split2:
+define <64 x i1> @whilewr_8_expand_high2(ptr %a, ptr %b) {
+; CHECK-LABEL: whilewr_8_expand_high2:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    index z0.d, #0, #1
 ; CHECK-NEXT:    sub x11, x1, x0
@@ -281,495 +281,494 @@ entry:
   ret <64 x i1> %0
 }
 
-define <16 x i1> @whilewr_16_expand(ptr %a, ptr %b) {
-; CHECK-LABEL: whilewr_16_expand:
+define <16 x i1> @whilewr_16_split(ptr %a, ptr %b) {
+; CHECK-LABEL: whilewr_16_split:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    index z0.d, #0, #1
-; CHECK-NEXT:    sub x8, x1, x0
-; CHECK-NEXT:    add x8, x8, x8, lsr #63
-; CHECK-NEXT:    asr x8, x8, #1
-; CHECK-NEXT:    mov z1.d, z0.d
-; CHECK-NEXT:    mov z2.d, z0.d
-; CHECK-NEXT:    mov z4.d, z0.d
-; CHECK-NEXT:    mov z5.d, z0.d
-; CHECK-NEXT:    mov z6.d, z0.d
-; CHECK-NEXT:    mov z7.d, z0.d
-; CHECK-NEXT:    mov z16.d, z0.d
-; CHECK-NEXT:    dup v3.2d, x8
-; CHECK-NEXT:    cmp x8, #1
-; CHECK-NEXT:    add z1.d, z1.d, #12 // =0xc
-; CHECK-NEXT:    add z2.d, z2.d, #10 // =0xa
-; CHECK-NEXT:    add z4.d, z4.d, #8 // =0x8
-; CHECK-NEXT:    add z5.d, z5.d, #6 // =0x6
-; CHECK-NEXT:    add z6.d, z6.d, #4 // =0x4
-; CHECK-NEXT:    add z7.d, z7.d, #2 // =0x2
-; CHECK-NEXT:    add z16.d, z16.d, #14 // =0xe
-; CHECK-NEXT:    cmhi v0.2d, v3.2d, v0.2d
-; CHECK-NEXT:    cset w8, lt
-; CHECK-NEXT:    cmhi v1.2d, v3.2d, v1.2d
-; CHECK-NEXT:    cmhi v2.2d, v3.2d, v2.2d
-; CHECK-NEXT:    cmhi v4.2d, v3.2d, v4.2d
-; CHECK-NEXT:    cmhi v5.2d, v3.2d, v5.2d
-; CHECK-NEXT:    cmhi v6.2d, v3.2d, v6.2d
-; CHECK-NEXT:    cmhi v16.2d, v3.2d, v16.2d
-; CHECK-NEXT:    cmhi v3.2d, v3.2d, v7.2d
-; CHECK-NEXT:    uzp1 v2.4s, v4.4s, v2.4s
-; CHECK-NEXT:    uzp1 v4.4s, v6.4s, v5.4s
-; CHECK-NEXT:    uzp1 v1.4s, v1.4s, v16.4s
-; CHECK-NEXT:    uzp1 v0.4s, v0.4s, v3.4s
-; CHECK-NEXT:    uzp1 v1.8h, v2.8h, v1.8h
-; CHECK-NEXT:    uzp1 v0.8h, v0.8h, v4.8h
-; CHECK-NEXT:    uzp1 v0.16b, v0.16b, v1.16b
-; CHECK-NEXT:    dup v1.16b, w8
-; CHECK-NEXT:    orr v0.16b, v0.16b, v1.16b
+; CHECK-NEXT:    whilewr p1.h, x0, x1
+; CHECK-NEXT:    incb x0
+; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    whilewr p3.h, x0, x1
+; CHECK-NEXT:    and p2.b, p1/z, p1.b, p0.b
+; CHECK-NEXT:    not p3.b, p0/z, p3.b
+; CHECK-NEXT:    and p0.b, p3/z, p3.b, p0.b
+; CHECK-NEXT:    ptrue p3.b
+; CHECK-NEXT:    brkpb p0.b, p3/z, p2.b, p0.b
+; CHECK-NEXT:    uzp1 p0.b, p1.b, p0.b
+; CHECK-NEXT:    mov z0.b, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
 ; CHECK-NEXT:    ret
 entry:
   %0 = call <16 x i1> @llvm.loop.dependence.war.mask.v16i1(ptr %a, ptr %b, i64 
2)
   ret <16 x i1> %0
 }
 
-define <32 x i1> @whilewr_16_expand2(ptr %a, ptr %b) {
-; CHECK-LABEL: whilewr_16_expand2:
+define <32 x i1> @whilewr_16_expand_high(ptr %a, ptr %b) {
+; CHECK-LABEL: whilewr_16_expand_high:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    sub x9, x1, x0
 ; CHECK-NEXT:    index z0.d, #0, #1
-; CHECK-NEXT:    sub x10, x9, #32
+; CHECK-NEXT:    sub x9, x1, x0
+; CHECK-NEXT:    ptrue p1.h
+; CHECK-NEXT:    sub x9, x9, #32
+; CHECK-NEXT:    ptrue p3.b
 ; CHECK-NEXT:    add x9, x9, x9, lsr #63
-; CHECK-NEXT:    add x10, x10, x10, lsr #63
-; CHECK-NEXT:    asr x9, x9, #1
-; CHECK-NEXT:    asr x10, x10, #1
 ; CHECK-NEXT:    mov z1.d, z0.d
 ; CHECK-NEXT:    mov z2.d, z0.d
 ; CHECK-NEXT:    mov z3.d, z0.d
 ; CHECK-NEXT:    mov z4.d, z0.d
 ; CHECK-NEXT:    mov z5.d, z0.d
-; CHECK-NEXT:    mov z6.d, z0.d
-; CHECK-NEXT:    dup v7.2d, x9
-; CHECK-NEXT:    dup v16.2d, x10
-; CHECK-NEXT:    add z1.d, z1.d, #12 // =0xc
+; CHECK-NEXT:    mov z7.d, z0.d
+; CHECK-NEXT:    mov z16.d, z0.d
+; CHECK-NEXT:    asr x9, x9, #1
 ; CHECK-NEXT:    add z2.d, z2.d, #10 // =0xa
-; CHECK-NEXT:    cmp x10, #1
 ; CHECK-NEXT:    add z3.d, z3.d, #8 // =0x8
+; CHECK-NEXT:    add z1.d, z1.d, #12 // =0xc
+; CHECK-NEXT:    dup v6.2d, x9
 ; CHECK-NEXT:    add z4.d, z4.d, #6 // =0x6
 ; CHECK-NEXT:    add z5.d, z5.d, #4 // =0x4
-; CHECK-NEXT:    add z6.d, z6.d, #2 // =0x2
-; CHECK-NEXT:    cmhi v17.2d, v7.2d, v0.2d
-; CHECK-NEXT:    cmhi v18.2d, v16.2d, v0.2d
-; CHECK-NEXT:    add z0.d, z0.d, #14 // =0xe
-; CHECK-NEXT:    cmhi v19.2d, v7.2d, v1.2d
-; CHECK-NEXT:    cmhi v20.2d, v7.2d, v2.2d
-; CHECK-NEXT:    cmhi v21.2d, v7.2d, v3.2d
-; CHECK-NEXT:    cmhi v22.2d, v7.2d, v4.2d
-; CHECK-NEXT:    cmhi v23.2d, v7.2d, v5.2d
-; CHECK-NEXT:    cmhi v24.2d, v7.2d, v6.2d
-; CHECK-NEXT:    cmhi v1.2d, v16.2d, v1.2d
-; CHECK-NEXT:    cmhi v2.2d, v16.2d, v2.2d
-; CHECK-NEXT:    cmhi v3.2d, v16.2d, v3.2d
-; CHECK-NEXT:    cmhi v4.2d, v16.2d, v4.2d
-; CHECK-NEXT:    cmhi v7.2d, v7.2d, v0.2d
-; CHECK-NEXT:    cmhi v5.2d, v16.2d, v5.2d
-; CHECK-NEXT:    cmhi v6.2d, v16.2d, v6.2d
-; CHECK-NEXT:    cset w10, lt
-; CHECK-NEXT:    cmhi v0.2d, v16.2d, v0.2d
-; CHECK-NEXT:    uzp1 v16.4s, v21.4s, v20.4s
+; CHECK-NEXT:    add z7.d, z7.d, #2 // =0x2
+; CHECK-NEXT:    add z16.d, z16.d, #14 // =0xe
 ; CHECK-NEXT:    cmp x9, #1
-; CHECK-NEXT:    uzp1 v20.4s, v23.4s, v22.4s
-; CHECK-NEXT:    uzp1 v17.4s, v17.4s, v24.4s
 ; CHECK-NEXT:    cset w9, lt
+; CHECK-NEXT:    whilewr p0.h, x0, x1
+; CHECK-NEXT:    incb x0
+; CHECK-NEXT:    cmhi v2.2d, v6.2d, v2.2d
+; CHECK-NEXT:    cmhi v3.2d, v6.2d, v3.2d
+; CHECK-NEXT:    cmhi v0.2d, v6.2d, v0.2d
+; CHECK-NEXT:    cmhi v1.2d, v6.2d, v1.2d
+; CHECK-NEXT:    cmhi v4.2d, v6.2d, v4.2d
+; CHECK-NEXT:    cmhi v5.2d, v6.2d, v5.2d
+; CHECK-NEXT:    cmhi v7.2d, v6.2d, v7.2d
+; CHECK-NEXT:    cmhi v6.2d, v6.2d, v16.2d
+; CHECK-NEXT:    whilewr p2.h, x0, x1
 ; CHECK-NEXT:    uzp1 v2.4s, v3.4s, v2.4s
-; CHECK-NEXT:    uzp1 v3.4s, v19.4s, v7.4s
-; CHECK-NEXT:    uzp1 v4.4s, v5.4s, v4.4s
-; CHECK-NEXT:    uzp1 v5.4s, v18.4s, v6.4s
-; CHECK-NEXT:    uzp1 v0.4s, v1.4s, v0.4s
-; CHECK-NEXT:    uzp1 v1.8h, v17.8h, v20.8h
-; CHECK-NEXT:    uzp1 v3.8h, v16.8h, v3.8h
-; CHECK-NEXT:    uzp1 v4.8h, v5.8h, v4.8h
-; CHECK-NEXT:    uzp1 v0.8h, v2.8h, v0.8h
-; CHECK-NEXT:    dup v2.16b, w9
+; CHECK-NEXT:    uzp1 v3.4s, v5.4s, v4.4s
+; CHECK-NEXT:    not p2.b, p1/z, p2.b
+; CHECK-NEXT:    uzp1 v0.4s, v0.4s, v7.4s
+; CHECK-NEXT:    uzp1 v1.4s, v1.4s, v6.4s
+; CHECK-NEXT:    and p2.b, p2/z, p2.b, p1.b
+; CHECK-NEXT:    and p1.b, p0/z, p0.b, p1.b
+; CHECK-NEXT:    uzp1 v0.8h, v0.8h, v3.8h
+; CHECK-NEXT:    uzp1 v1.8h, v2.8h, v1.8h
+; CHECK-NEXT:    brkpb p1.b, p3/z, p1.b, p2.b
+; CHECK-NEXT:    uzp1 p0.b, p0.b, p1.b
+; CHECK-NEXT:    uzp1 v0.16b, v0.16b, v1.16b
+; CHECK-NEXT:    dup v1.16b, w9
 ; CHECK-NEXT:    adrp x9, .LCPI11_0
-; CHECK-NEXT:    uzp1 v1.16b, v1.16b, v3.16b
-; CHECK-NEXT:    dup v3.16b, w10
-; CHECK-NEXT:    uzp1 v0.16b, v4.16b, v0.16b
-; CHECK-NEXT:    orr v1.16b, v1.16b, v2.16b
 ; CHECK-NEXT:    ldr q2, [x9, :lo12:.LCPI11_0]
-; CHECK-NEXT:    orr v0.16b, v0.16b, v3.16b
-; CHECK-NEXT:    shl v1.16b, v1.16b, #7
+; CHECK-NEXT:    orr v0.16b, v0.16b, v1.16b
+; CHECK-NEXT:    mov z1.b, p0/z, #-1 // =0xffffffffffffffff
 ; CHECK-NEXT:    shl v0.16b, v0.16b, #7
-; CHECK-NEXT:    cmlt v1.16b, v1.16b, #0
+; CHECK-NEXT:    shl v1.16b, v1.16b, #7
 ; CHECK-NEXT:    cmlt v0.16b, v0.16b, #0
-; CHECK-NEXT:    and v1.16b, v1.16b, v2.16b
+; CHECK-NEXT:    cmlt v1.16b, v1.16b, #0
 ; CHECK-NEXT:    and v0.16b, v0.16b, v2.16b
-; CHECK-NEXT:    ext v2.16b, v1.16b, v1.16b, #8
-; CHECK-NEXT:    ext v3.16b, v0.16b, v0.16b, #8
-; CHECK-NEXT:    zip1 v1.16b, v1.16b, v2.16b
-; CHECK-NEXT:    zip1 v0.16b, v0.16b, v3.16b
-; CHECK-NEXT:    addv h1, v1.8h
+; CHECK-NEXT:    and v1.16b, v1.16b, v2.16b
+; CHECK-NEXT:    ext v2.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT:    ext v3.16b, v1.16b, v1.16b, #8
+; CHECK-NEXT:    zip1 v0.16b, v0.16b, v2.16b
+; CHECK-NEXT:    zip1 v1.16b, v1.16b, v3.16b
 ; CHECK-NEXT:    addv h0, v0.8h
-; CHECK-NEXT:    str h1, [x8]
+; CHECK-NEXT:    addv h1, v1.8h
 ; CHECK-NEXT:    str h0, [x8, #2]
+; CHECK-NEXT:    str h1, [x8]
 ; CHECK-NEXT:    ret
 entry:
   %0 = call <32 x i1> @llvm.loop.dependence.war.mask.v32i1(ptr %a, ptr %b, i64 
2)
   ret <32 x i1> %0
 }
 
-define <8 x i1> @whilewr_32_expand(ptr %a, ptr %b) {
-; CHECK-LABEL: whilewr_32_expand:
+define <8 x i1> @whilewr_32_split(ptr %a, ptr %b) {
+; CHECK-LABEL: whilewr_32_split:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    index z0.d, #0, #1
-; CHECK-NEXT:    subs x8, x1, x0
-; CHECK-NEXT:    add x9, x8, #3
-; CHECK-NEXT:    csel x8, x9, x8, mi
-; CHECK-NEXT:    asr x8, x8, #2
-; CHECK-NEXT:    mov z2.d, z0.d
-; CHECK-NEXT:    mov z3.d, z0.d
-; CHECK-NEXT:    mov z4.d, z0.d
-; CHECK-NEXT:    dup v1.2d, x8
-; CHECK-NEXT:    cmp x8, #1
-; CHECK-NEXT:    cset w8, lt
-; CHECK-NEXT:    add z4.d, z4.d, #6 // =0x6
-; CHECK-NEXT:    add z2.d, z2.d, #4 // =0x4
-; CHECK-NEXT:    add z3.d, z3.d, #2 // =0x2
-; CHECK-NEXT:    cmhi v0.2d, v1.2d, v0.2d
-; CHECK-NEXT:    cmhi v4.2d, v1.2d, v4.2d
-; CHECK-NEXT:    cmhi v2.2d, v1.2d, v2.2d
-; CHECK-NEXT:    cmhi v1.2d, v1.2d, v3.2d
-; CHECK-NEXT:    uzp1 v2.4s, v2.4s, v4.4s
-; CHECK-NEXT:    uzp1 v0.4s, v0.4s, v1.4s
-; CHECK-NEXT:    dup v1.8b, w8
-; CHECK-NEXT:    uzp1 v0.8h, v0.8h, v2.8h
+; CHECK-NEXT:    whilewr p1.s, x0, x1
+; CHECK-NEXT:    incb x0
+; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    whilewr p3.s, x0, x1
+; CHECK-NEXT:    and p2.b, p1/z, p1.b, p0.b
+; CHECK-NEXT:    not p3.b, p0/z, p3.b
+; CHECK-NEXT:    and p0.b, p3/z, p3.b, p0.b
+; CHECK-NEXT:    ptrue p3.b
+; CHECK-NEXT:    brkpb p0.b, p3/z, p2.b, p0.b
+; CHECK-NEXT:    uzp1 p0.h, p1.h, p0.h
+; CHECK-NEXT:    mov z0.h, p0/z, #-1 // =0xffffffffffffffff
 ; CHECK-NEXT:    xtn v0.8b, v0.8h
-; CHECK-NEXT:    orr v0.8b, v0.8b, v1.8b
 ; CHECK-NEXT:    ret
 entry:
   %0 = call <8 x i1> @llvm.loop.dependence.war.mask.v8i1(ptr %a, ptr %b, i64 4)
   ret <8 x i1> %0
 }
 
-define <16 x i1> @whilewr_32_expand2(ptr %a, ptr %b) {
-; CHECK-LABEL: whilewr_32_expand2:
+define <16 x i1> @whilewr_32_split2(ptr %a, ptr %b) {
+; CHECK-LABEL: whilewr_32_split2:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    index z0.d, #0, #1
-; CHECK-NEXT:    subs x8, x1, x0
-; CHECK-NEXT:    add x9, x8, #3
-; CHECK-NEXT:    csel x8, x9, x8, mi
-; CHECK-NEXT:    asr x8, x8, #2
-; CHECK-NEXT:    mov z1.d, z0.d
-; CHECK-NEXT:    mov z2.d, z0.d
-; CHECK-NEXT:    mov z4.d, z0.d
-; CHECK-NEXT:    mov z5.d, z0.d
-; CHECK-NEXT:    mov z6.d, z0.d
-; CHECK-NEXT:    mov z7.d, z0.d
-; CHECK-NEXT:    mov z16.d, z0.d
-; CHECK-NEXT:    dup v3.2d, x8
-; CHECK-NEXT:    cmp x8, #1
-; CHECK-NEXT:    add z1.d, z1.d, #12 // =0xc
-; CHECK-NEXT:    add z2.d, z2.d, #10 // =0xa
-; CHECK-NEXT:    add z4.d, z4.d, #8 // =0x8
-; CHECK-NEXT:    add z5.d, z5.d, #6 // =0x6
-; CHECK-NEXT:    add z6.d, z6.d, #4 // =0x4
-; CHECK-NEXT:    add z7.d, z7.d, #2 // =0x2
-; CHECK-NEXT:    add z16.d, z16.d, #14 // =0xe
-; CHECK-NEXT:    cmhi v0.2d, v3.2d, v0.2d
-; CHECK-NEXT:    cset w8, lt
-; CHECK-NEXT:    cmhi v1.2d, v3.2d, v1.2d
-; CHECK-NEXT:    cmhi v2.2d, v3.2d, v2.2d
-; CHECK-NEXT:    cmhi v4.2d, v3.2d, v4.2d
-; CHECK-NEXT:    cmhi v5.2d, v3.2d, v5.2d
-; CHECK-NEXT:    cmhi v6.2d, v3.2d, v6.2d
-; CHECK-NEXT:    cmhi v16.2d, v3.2d, v16.2d
-; CHECK-NEXT:    cmhi v3.2d, v3.2d, v7.2d
-; CHECK-NEXT:    uzp1 v2.4s, v4.4s, v2.4s
-; CHECK-NEXT:    uzp1 v4.4s, v6.4s, v5.4s
-; CHECK-NEXT:    uzp1 v1.4s, v1.4s, v16.4s
-; CHECK-NEXT:    uzp1 v0.4s, v0.4s, v3.4s
-; CHECK-NEXT:    uzp1 v1.8h, v2.8h, v1.8h
-; CHECK-NEXT:    uzp1 v0.8h, v0.8h, v4.8h
-; CHECK-NEXT:    uzp1 v0.16b, v0.16b, v1.16b
-; CHECK-NEXT:    dup v1.16b, w8
-; CHECK-NEXT:    orr v0.16b, v0.16b, v1.16b
+; CHECK-NEXT:    addvl x8, x0, #3
+; CHECK-NEXT:    whilewr p2.s, x0, x1
+; CHECK-NEXT:    whilewr p1.s, x8, x1
+; CHECK-NEXT:    mov x8, x0
+; CHECK-NEXT:    incb x0, all, mul #2
+; CHECK-NEXT:    incb x8
+; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p3.b
+; CHECK-NEXT:    whilewr p6.s, x0, x1
+; CHECK-NEXT:    whilewr p5.s, x8, x1
+; CHECK-NEXT:    not p1.b, p0/z, p1.b
+; CHECK-NEXT:    not p5.b, p0/z, p5.b
+; CHECK-NEXT:    and p4.b, p2/z, p2.b, p0.b
+; CHECK-NEXT:    and p5.b, p5/z, p5.b, p0.b
+; CHECK-NEXT:    and p7.b, p6/z, p6.b, p0.b
+; CHECK-NEXT:    and p0.b, p1/z, p1.b, p0.b
+; CHECK-NEXT:    brkpb p1.b, p3/z, p4.b, p5.b
+; CHECK-NEXT:    brkpb p0.b, p3/z, p7.b, p0.b
+; CHECK-NEXT:    ptrue p4.h
+; CHECK-NEXT:    uzp1 p0.h, p6.h, p0.h
+; CHECK-NEXT:    uzp1 p1.h, p2.h, p1.h
+; CHECK-NEXT:    not p0.b, p4/z, p0.b
+; CHECK-NEXT:    and p2.b, p1/z, p1.b, p4.b
+; CHECK-NEXT:    and p0.b, p0/z, p0.b, p4.b
+; CHECK-NEXT:    brkpb p0.b, p3/z, p2.b, p0.b
+; CHECK-NEXT:    uzp1 p0.b, p1.b, p0.b
+; CHECK-NEXT:    mov z0.b, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
 ; CHECK-NEXT:    ret
 entry:
   %0 = call <16 x i1> @llvm.loop.dependence.war.mask.v16i1(ptr %a, ptr %b, i64 
4)
   ret <16 x i1> %0
 }
 
-define <32 x i1> @whilewr_32_expand3(ptr %a, ptr %b) {
-; CHECK-LABEL: whilewr_32_expand3:
+define <32 x i1> @whilewr_32_expand_high(ptr %a, ptr %b) {
+; CHECK-LABEL: whilewr_32_expand_high:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    sub x10, x1, x0
+; CHECK-NEXT:    sub x9, x1, x0
 ; CHECK-NEXT:    index z0.d, #0, #1
-; CHECK-NEXT:    sub x9, x10, #61
-; CHECK-NEXT:    subs x11, x10, #64
-; CHECK-NEXT:    add x12, x10, #3
-; CHECK-NEXT:    csel x9, x9, x11, mi
-; CHECK-NEXT:    asr x11, x9, #2
-; CHECK-NEXT:    mov z1.d, z0.d
+; CHECK-NEXT:    ptrue p1.s
+; CHECK-NEXT:    sub x10, x9, #61
+; CHECK-NEXT:    subs x9, x9, #64
+; CHECK-NEXT:    ptrue p0.b
+; CHECK-NEXT:    csel x9, x10, x9, mi
+; CHECK-NEXT:    mov x10, x0
+; CHECK-NEXT:    asr x9, x9, #2
+; CHECK-NEXT:    incb x10
 ; CHECK-NEXT:    mov z2.d, z0.d
 ; CHECK-NEXT:    mov z3.d, z0.d
-; CHECK-NEXT:    cmp x11, #1
 ; CHECK-NEXT:    mov z4.d, z0.d
+; CHECK-NEXT:    dup v1.2d, x9
 ; CHECK-NEXT:    mov z5.d, z0.d
-; CHECK-NEXT:    cset w9, lt
-; CHECK-NEXT:    cmp x10, #0
 ; CHECK-NEXT:    mov z6.d, z0.d
-; CHECK-NEXT:    csel x10, x12, x10, mi
-; CHECK-NEXT:    dup v7.2d, x11
-; CHECK-NEXT:    add z1.d, z1.d, #12 // =0xc
-; CHECK-NEXT:    asr x10, x10, #2
-; CHECK-NEXT:    add z2.d, z2.d, #10 // =0xa
-; CHECK-NEXT:    add z3.d, z3.d, #8 // =0x8
-; CHECK-NEXT:    add z4.d, z4.d, #6 // =0x6
-; CHECK-NEXT:    add z5.d, z5.d, #4 // =0x4
-; CHECK-NEXT:    add z6.d, z6.d, #2 // =0x2
-; CHECK-NEXT:    dup v16.2d, x10
-; CHECK-NEXT:    cmhi v17.2d, v7.2d, v0.2d
-; CHECK-NEXT:    cmhi v19.2d, v7.2d, v1.2d
-; CHECK-NEXT:    cmhi v20.2d, v7.2d, v2.2d
-; CHECK-NEXT:    cmhi v21.2d, v7.2d, v3.2d
-; CHECK-NEXT:    cmp x10, #1
-; CHECK-NEXT:    cmhi v22.2d, v7.2d, v4.2d
-; CHECK-NEXT:    cset w10, lt
-; CHECK-NEXT:    cmhi v18.2d, v16.2d, v0.2d
+; CHECK-NEXT:    mov z7.d, z0.d
+; CHECK-NEXT:    cmp x9, #1
+; CHECK-NEXT:    cset w9, lt
+; CHECK-NEXT:    whilewr p3.s, x10, x1
+; CHECK-NEXT:    addvl x10, x0, #3
+; CHECK-NEXT:    whilewr p2.s, x0, x1
+; CHECK-NEXT:    incb x0, all, mul #2
+; CHECK-NEXT:    add z2.d, z2.d, #12 // =0xc
+; CHECK-NEXT:    add z3.d, z3.d, #10 // =0xa
+; CHECK-NEXT:    add z4.d, z4.d, #8 // =0x8
+; CHECK-NEXT:    cmhi v16.2d, v1.2d, v0.2d
+; CHECK-NEXT:    add z5.d, z5.d, #6 // =0x6
+; CHECK-NEXT:    whilewr p4.s, x10, x1
 ; CHECK-NEXT:    add z0.d, z0.d, #14 // =0xe
-; CHECK-NEXT:    cmhi v1.2d, v16.2d, v1.2d
-; CHECK-NEXT:    cmhi v2.2d, v16.2d, v2.2d
-; CHECK-NEXT:    cmhi v3.2d, v16.2d, v3.2d
-; CHECK-NEXT:    cmhi v4.2d, v16.2d, v4.2d
-; CHECK-NEXT:    cmhi v23.2d, v16.2d, v5.2d
-; CHECK-NEXT:    cmhi v24.2d, v16.2d, v6.2d
-; CHECK-NEXT:    cmhi v5.2d, v7.2d, v5.2d
-; CHECK-NEXT:    cmhi v16.2d, v16.2d, v0.2d
-; CHECK-NEXT:    cmhi v6.2d, v7.2d, v6.2d
-; CHECK-NEXT:    cmhi v0.2d, v7.2d, v0.2d
-; CHECK-NEXT:    uzp1 v7.4s, v21.4s, v20.4s
-; CHECK-NEXT:    uzp1 v2.4s, v3.4s, v2.4s
-; CHECK-NEXT:    uzp1 v3.4s, v23.4s, v4.4s
-; CHECK-NEXT:    uzp1 v4.4s, v18.4s, v24.4s
-; CHECK-NEXT:    uzp1 v5.4s, v5.4s, v22.4s
-; CHECK-NEXT:    uzp1 v1.4s, v1.4s, v16.4s
-; CHECK-NEXT:    uzp1 v6.4s, v17.4s, v6.4s
-; CHECK-NEXT:    uzp1 v0.4s, v19.4s, v0.4s
-; CHECK-NEXT:    uzp1 v3.8h, v4.8h, v3.8h
-; CHECK-NEXT:    uzp1 v1.8h, v2.8h, v1.8h
-; CHECK-NEXT:    uzp1 v2.8h, v6.8h, v5.8h
-; CHECK-NEXT:    uzp1 v0.8h, v7.8h, v0.8h
-; CHECK-NEXT:    uzp1 v1.16b, v3.16b, v1.16b
-; CHECK-NEXT:    uzp1 v0.16b, v2.16b, v0.16b
-; CHECK-NEXT:    dup v3.16b, w10
-; CHECK-NEXT:    dup v2.16b, w9
+; CHECK-NEXT:    add z6.d, z6.d, #4 // =0x4
+; CHECK-NEXT:    add z7.d, z7.d, #2 // =0x2
+; CHECK-NEXT:    not p3.b, p1/z, p3.b
+; CHECK-NEXT:    whilewr p6.s, x0, x1
+; CHECK-NEXT:    cmhi v2.2d, v1.2d, v2.2d
+; CHECK-NEXT:    cmhi v3.2d, v1.2d, v3.2d
+; CHECK-NEXT:    not p4.b, p1/z, p4.b
+; CHECK-NEXT:    cmhi v0.2d, v1.2d, v0.2d
+; CHECK-NEXT:    cmhi v4.2d, v1.2d, v4.2d
+; CHECK-NEXT:    cmhi v5.2d, v1.2d, v5.2d
+; CHECK-NEXT:    cmhi v6.2d, v1.2d, v6.2d
+; CHECK-NEXT:    cmhi v1.2d, v1.2d, v7.2d
+; CHECK-NEXT:    and p5.b, p2/z, p2.b, p1.b
+; CHECK-NEXT:    and p3.b, p3/z, p3.b, p1.b
+; CHECK-NEXT:    uzp1 v3.4s, v4.4s, v3.4s
+; CHECK-NEXT:    uzp1 v0.4s, v2.4s, v0.4s
+; CHECK-NEXT:    and p7.b, p6/z, p6.b, p1.b
+; CHECK-NEXT:    uzp1 v2.4s, v6.4s, v5.4s
+; CHECK-NEXT:    uzp1 v1.4s, v16.4s, v1.4s
+; CHECK-NEXT:    and p1.b, p4/z, p4.b, p1.b
+; CHECK-NEXT:    ptrue p4.h
+; CHECK-NEXT:    brkpb p1.b, p0/z, p7.b, p1.b
+; CHECK-NEXT:    uzp1 v0.8h, v3.8h, v0.8h
+; CHECK-NEXT:    brkpb p3.b, p0/z, p5.b, p3.b
+; CHECK-NEXT:    uzp1 v1.8h, v1.8h, v2.8h
+; CHECK-NEXT:    uzp1 p1.h, p6.h, p1.h
+; CHECK-NEXT:    uzp1 p2.h, p2.h, p3.h
+; CHECK-NEXT:    not p1.b, p4/z, p1.b
+; CHECK-NEXT:    and p3.b, p2/z, p2.b, p4.b
+; CHECK-NEXT:    uzp1 v0.16b, v1.16b, v0.16b
+; CHECK-NEXT:    dup v1.16b, w9
+; CHECK-NEXT:    and p1.b, p1/z, p1.b, p4.b
 ; CHECK-NEXT:    adrp x9, .LCPI14_0
-; CHECK-NEXT:    orr v1.16b, v1.16b, v3.16b
-; CHECK-NEXT:    orr v0.16b, v0.16b, v2.16b
 ; CHECK-NEXT:    ldr q2, [x9, :lo12:.LCPI14_0]
-; CHECK-NEXT:    shl v1.16b, v1.16b, #7
+; CHECK-NEXT:    brkpb p0.b, p0/z, p3.b, p1.b
+; CHECK-NEXT:    orr v0.16b, v0.16b, v1.16b
+; CHECK-NEXT:    uzp1 p0.b, p2.b, p0.b
+; CHECK-NEXT:    mov z1.b, p0/z, #-1 // =0xffffffffffffffff
 ; CHECK-NEXT:    shl v0.16b, v0.16b, #7
-; CHECK-NEXT:    cmlt v1.16b, v1.16b, #0
+; CHECK-NEXT:    shl v1.16b, v1.16b, #7
 ; CHECK-NEXT:    cmlt v0.16b, v0.16b, #0
-; CHECK-NEXT:    and v1.16b, v1.16b, v2.16b
+; CHECK-NEXT:    cmlt v1.16b, v1.16b, #0
 ; CHECK-NEXT:    and v0.16b, v0.16b, v2.16b
-; CHECK-NEXT:    ext v2.16b, v1.16b, v1.16b, #8
-; CHECK-NEXT:    ext v3.16b, v0.16b, v0.16b, #8
-; CHECK-NEXT:    zip1 v1.16b, v1.16b, v2.16b
-; CHECK-NEXT:    zip1 v0.16b, v0.16b, v3.16b
-; CHECK-NEXT:    addv h1, v1.8h
+; CHECK-NEXT:    and v1.16b, v1.16b, v2.16b
+; CHECK-NEXT:    ext v2.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT:    ext v3.16b, v1.16b, v1.16b, #8
+; CHECK-NEXT:    zip1 v0.16b, v0.16b, v2.16b
+; CHECK-NEXT:    zip1 v1.16b, v1.16b, v3.16b
 ; CHECK-NEXT:    addv h0, v0.8h
-; CHECK-NEXT:    str h1, [x8]
+; CHECK-NEXT:    addv h1, v1.8h
 ; CHECK-NEXT:    str h0, [x8, #2]
+; CHECK-NEXT:    str h1, [x8]
 ; CHECK-NEXT:    ret
 entry:
   %0 = call <32 x i1> @llvm.loop.dependence.war.mask.v32i1(ptr %a, ptr %b, i64 
4)
   ret <32 x i1> %0
 }
 
-define <4 x i1> @whilewr_64_expand(ptr %a, ptr %b) {
-; CHECK-LABEL: whilewr_64_expand:
+define <4 x i1> @whilewr_64_split(ptr %a, ptr %b) {
+; CHECK-LABEL: whilewr_64_split:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    index z0.d, #0, #1
-; CHECK-NEXT:    subs x8, x1, x0
-; CHECK-NEXT:    add x9, x8, #7
-; CHECK-NEXT:    csel x8, x9, x8, mi
-; CHECK-NEXT:    asr x8, x8, #3
-; CHECK-NEXT:    mov z1.d, z0.d
-; CHECK-NEXT:    dup v2.2d, x8
-; CHECK-NEXT:    cmp x8, #1
-; CHECK-NEXT:    cset w8, lt
-; CHECK-NEXT:    add z1.d, z1.d, #2 // =0x2
-; CHECK-NEXT:    cmhi v0.2d, v2.2d, v0.2d
-; CHECK-NEXT:    cmhi v1.2d, v2.2d, v1.2d
-; CHECK-NEXT:    uzp1 v0.4s, v0.4s, v1.4s
-; CHECK-NEXT:    dup v1.4h, w8
+; CHECK-NEXT:    whilewr p1.d, x0, x1
+; CHECK-NEXT:    incb x0
+; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    whilewr p3.d, x0, x1
+; CHECK-NEXT:    and p2.b, p1/z, p1.b, p0.b
+; CHECK-NEXT:    not p3.b, p0/z, p3.b
+; CHECK-NEXT:    and p0.b, p3/z, p3.b, p0.b
+; CHECK-NEXT:    ptrue p3.b
+; CHECK-NEXT:    brkpb p0.b, p3/z, p2.b, p0.b
+; CHECK-NEXT:    uzp1 p0.s, p1.s, p0.s
+; CHECK-NEXT:    mov z0.s, p0/z, #-1 // =0xffffffffffffffff
 ; CHECK-NEXT:    xtn v0.4h, v0.4s
-; CHECK-NEXT:    orr v0.8b, v0.8b, v1.8b
 ; CHECK-NEXT:    ret
 entry:
   %0 = call <4 x i1> @llvm.loop.dependence.war.mask.v4i1(ptr %a, ptr %b, i64 8)
   ret <4 x i1> %0
 }
 
-define <8 x i1> @whilewr_64_expand2(ptr %a, ptr %b) {
-; CHECK-LABEL: whilewr_64_expand2:
+define <8 x i1> @whilewr_64_split2(ptr %a, ptr %b) {
+; CHECK-LABEL: whilewr_64_split2:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    index z0.d, #0, #1
-; CHECK-NEXT:    subs x8, x1, x0
-; CHECK-NEXT:    add x9, x8, #7
-; CHECK-NEXT:    csel x8, x9, x8, mi
-; CHECK-NEXT:    asr x8, x8, #3
-; CHECK-NEXT:    mov z2.d, z0.d
-; CHECK-NEXT:    mov z3.d, z0.d
-; CHECK-NEXT:    mov z4.d, z0.d
-; CHECK-NEXT:    dup v1.2d, x8
-; CHECK-NEXT:    cmp x8, #1
-; CHECK-NEXT:    cset w8, lt
-; CHECK-NEXT:    add z4.d, z4.d, #6 // =0x6
-; CHECK-NEXT:    add z2.d, z2.d, #4 // =0x4
-; CHECK-NEXT:    add z3.d, z3.d, #2 // =0x2
-; CHECK-NEXT:    cmhi v0.2d, v1.2d, v0.2d
-; CHECK-NEXT:    cmhi v4.2d, v1.2d, v4.2d
-; CHECK-NEXT:    cmhi v2.2d, v1.2d, v2.2d
-; CHECK-NEXT:    cmhi v1.2d, v1.2d, v3.2d
-; CHECK-NEXT:    uzp1 v2.4s, v2.4s, v4.4s
-; CHECK-NEXT:    uzp1 v0.4s, v0.4s, v1.4s
-; CHECK-NEXT:    dup v1.8b, w8
-; CHECK-NEXT:    uzp1 v0.8h, v0.8h, v2.8h
+; CHECK-NEXT:    addvl x8, x0, #3
+; CHECK-NEXT:    whilewr p2.d, x0, x1
+; CHECK-NEXT:    whilewr p1.d, x8, x1
+; CHECK-NEXT:    mov x8, x0
+; CHECK-NEXT:    incb x0, all, mul #2
+; CHECK-NEXT:    incb x8
+; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p3.b
+; CHECK-NEXT:    whilewr p6.d, x0, x1
+; CHECK-NEXT:    whilewr p5.d, x8, x1
+; CHECK-NEXT:    not p1.b, p0/z, p1.b
+; CHECK-NEXT:    not p5.b, p0/z, p5.b
+; CHECK-NEXT:    and p4.b, p2/z, p2.b, p0.b
+; CHECK-NEXT:    and p5.b, p5/z, p5.b, p0.b
+; CHECK-NEXT:    and p7.b, p6/z, p6.b, p0.b
+; CHECK-NEXT:    and p0.b, p1/z, p1.b, p0.b
+; CHECK-NEXT:    brkpb p1.b, p3/z, p4.b, p5.b
+; CHECK-NEXT:    brkpb p0.b, p3/z, p7.b, p0.b
+; CHECK-NEXT:    ptrue p4.s
+; CHECK-NEXT:    uzp1 p0.s, p6.s, p0.s
+; CHECK-NEXT:    uzp1 p1.s, p2.s, p1.s
+; CHECK-NEXT:    not p0.b, p4/z, p0.b
+; CHECK-NEXT:    and p2.b, p1/z, p1.b, p4.b
+; CHECK-NEXT:    and p0.b, p0/z, p0.b, p4.b
+; CHECK-NEXT:    brkpb p0.b, p3/z, p2.b, p0.b
+; CHECK-NEXT:    uzp1 p0.h, p1.h, p0.h
+; CHECK-NEXT:    mov z0.h, p0/z, #-1 // =0xffffffffffffffff
 ; CHECK-NEXT:    xtn v0.8b, v0.8h
-; CHECK-NEXT:    orr v0.8b, v0.8b, v1.8b
 ; CHECK-NEXT:    ret
 entry:
   %0 = call <8 x i1> @llvm.loop.dependence.war.mask.v8i1(ptr %a, ptr %b, i64 8)
   ret <8 x i1> %0
 }
 
-define <16 x i1> @whilewr_64_expand3(ptr %a, ptr %b) {
-; CHECK-LABEL: whilewr_64_expand3:
+define <16 x i1> @whilewr_64_split3(ptr %a, ptr %b) {
+; CHECK-LABEL: whilewr_64_split3:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    index z0.d, #0, #1
-; CHECK-NEXT:    subs x8, x1, x0
-; CHECK-NEXT:    add x9, x8, #7
-; CHECK-NEXT:    csel x8, x9, x8, mi
-; CHECK-NEXT:    asr x8, x8, #3
-; CHECK-NEXT:    mov z1.d, z0.d
-; CHECK-NEXT:    mov z2.d, z0.d
-; CHECK-NEXT:    mov z4.d, z0.d
-; CHECK-NEXT:    mov z5.d, z0.d
-; CHECK-NEXT:    mov z6.d, z0.d
-; CHECK-NEXT:    mov z7.d, z0.d
-; CHECK-NEXT:    mov z16.d, z0.d
-; CHECK-NEXT:    dup v3.2d, x8
-; CHECK-NEXT:    cmp x8, #1
-; CHECK-NEXT:    add z1.d, z1.d, #12 // =0xc
-; CHECK-NEXT:    add z2.d, z2.d, #10 // =0xa
-; CHECK-NEXT:    add z4.d, z4.d, #8 // =0x8
-; CHECK-NEXT:    add z5.d, z5.d, #6 // =0x6
-; CHECK-NEXT:    add z6.d, z6.d, #4 // =0x4
-; CHECK-NEXT:    add z7.d, z7.d, #2 // =0x2
-; CHECK-NEXT:    add z16.d, z16.d, #14 // =0xe
-; CHECK-NEXT:    cmhi v0.2d, v3.2d, v0.2d
-; CHECK-NEXT:    cset w8, lt
-; CHECK-NEXT:    cmhi v1.2d, v3.2d, v1.2d
-; CHECK-NEXT:    cmhi v2.2d, v3.2d, v2.2d
-; CHECK-NEXT:    cmhi v4.2d, v3.2d, v4.2d
-; CHECK-NEXT:    cmhi v5.2d, v3.2d, v5.2d
-; CHECK-NEXT:    cmhi v6.2d, v3.2d, v6.2d
-; CHECK-NEXT:    cmhi v16.2d, v3.2d, v16.2d
-; CHECK-NEXT:    cmhi v3.2d, v3.2d, v7.2d
-; CHECK-NEXT:    uzp1 v2.4s, v4.4s, v2.4s
-; CHECK-NEXT:    uzp1 v4.4s, v6.4s, v5.4s
-; CHECK-NEXT:    uzp1 v1.4s, v1.4s, v16.4s
-; CHECK-NEXT:    uzp1 v0.4s, v0.4s, v3.4s
-; CHECK-NEXT:    uzp1 v1.8h, v2.8h, v1.8h
-; CHECK-NEXT:    uzp1 v0.8h, v0.8h, v4.8h
-; CHECK-NEXT:    uzp1 v0.16b, v0.16b, v1.16b
-; CHECK-NEXT:    dup v1.16b, w8
-; CHECK-NEXT:    orr v0.16b, v0.16b, v1.16b
+; CHECK-NEXT:    mov x8, x0
+; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    addvl x9, x0, #3
+; CHECK-NEXT:    incb x8
+; CHECK-NEXT:    whilewr p2.d, x0, x1
+; CHECK-NEXT:    whilewr p3.d, x9, x1
+; CHECK-NEXT:    addvl x9, x0, #6
+; CHECK-NEXT:    ptrue p1.b
+; CHECK-NEXT:    whilewr p4.d, x8, x1
+; CHECK-NEXT:    mov x8, x0
+; CHECK-NEXT:    incb x8, all, mul #2
+; CHECK-NEXT:    and p5.b, p2/z, p2.b, p0.b
+; CHECK-NEXT:    not p4.b, p0/z, p4.b
+; CHECK-NEXT:    not p3.b, p0/z, p3.b
+; CHECK-NEXT:    whilewr p6.d, x8, x1
+; CHECK-NEXT:    addvl x8, x0, #5
+; CHECK-NEXT:    and p4.b, p4/z, p4.b, p0.b
+; CHECK-NEXT:    and p3.b, p3/z, p3.b, p0.b
+; CHECK-NEXT:    brkpb p4.b, p1/z, p5.b, p4.b
+; CHECK-NEXT:    and p5.b, p6/z, p6.b, p0.b
+; CHECK-NEXT:    uzp1 p2.s, p2.s, p4.s
+; CHECK-NEXT:    brkpb p4.b, p1/z, p5.b, p3.b
+; CHECK-NEXT:    whilewr p5.d, x8, x1
+; CHECK-NEXT:    mov x8, x0
+; CHECK-NEXT:    incb x8, all, mul #4
+; CHECK-NEXT:    whilewr p9.d, x9, x1
+; CHECK-NEXT:    not p5.b, p0/z, p5.b
+; CHECK-NEXT:    and p10.b, p9/z, p9.b, p0.b
+; CHECK-NEXT:    whilewr p7.d, x8, x1
+; CHECK-NEXT:    addvl x8, x0, #7
+; CHECK-NEXT:    whilewr p8.d, x8, x1
+; CHECK-NEXT:    and p5.b, p5/z, p5.b, p0.b
+; CHECK-NEXT:    not p8.b, p0/z, p8.b
+; CHECK-NEXT:    ptrue p3.s
+; CHECK-NEXT:    and p8.b, p8/z, p8.b, p0.b
+; CHECK-NEXT:    and p0.b, p7/z, p7.b, p0.b
+; CHECK-NEXT:    brkpb p8.b, p1/z, p10.b, p8.b
+; CHECK-NEXT:    uzp1 p4.s, p6.s, p4.s
+; CHECK-NEXT:    brkpb p0.b, p1/z, p0.b, p5.b
+; CHECK-NEXT:    uzp1 p5.s, p9.s, p8.s
+; CHECK-NEXT:    not p4.b, p3/z, p4.b
+; CHECK-NEXT:    uzp1 p0.s, p7.s, p0.s
+; CHECK-NEXT:    not p5.b, p3/z, p5.b
+; CHECK-NEXT:    and p6.b, p2/z, p2.b, p3.b
+; CHECK-NEXT:    and p4.b, p4/z, p4.b, p3.b
+; CHECK-NEXT:    and p7.b, p0/z, p0.b, p3.b
+; CHECK-NEXT:    and p3.b, p5/z, p5.b, p3.b
+; CHECK-NEXT:    brkpb p4.b, p1/z, p6.b, p4.b
+; CHECK-NEXT:    brkpb p3.b, p1/z, p7.b, p3.b
+; CHECK-NEXT:    ptrue p5.h
+; CHECK-NEXT:    uzp1 p0.h, p0.h, p3.h
+; CHECK-NEXT:    uzp1 p2.h, p2.h, p4.h
+; CHECK-NEXT:    not p0.b, p5/z, p0.b
+; CHECK-NEXT:    and p3.b, p2/z, p2.b, p5.b
+; CHECK-NEXT:    and p0.b, p0/z, p0.b, p5.b
+; CHECK-NEXT:    brkpb p0.b, p1/z, p3.b, p0.b
+; CHECK-NEXT:    uzp1 p0.b, p2.b, p0.b
+; CHECK-NEXT:    mov z0.b, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
 ; CHECK-NEXT:    ret
 entry:
   %0 = call <16 x i1> @llvm.loop.dependence.war.mask.v16i1(ptr %a, ptr %b, i64 
8)
   ret <16 x i1> %0
 }
 
-define <32 x i1> @whilewr_64_expand4(ptr %a, ptr %b) {
-; CHECK-LABEL: whilewr_64_expand4:
+define <32 x i1> @whilewr_64_expand_high(ptr %a, ptr %b) {
+; CHECK-LABEL: whilewr_64_expand_high:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    sub x10, x1, x0
+; CHECK-NEXT:    sub x9, x1, x0
+; CHECK-NEXT:    mov x11, x0
+; CHECK-NEXT:    addvl x13, x0, #3
+; CHECK-NEXT:    sub x10, x9, #121
+; CHECK-NEXT:    subs x9, x9, #128
+; CHECK-NEXT:    incb x11, all, mul #2
+; CHECK-NEXT:    csel x9, x10, x9, mi
+; CHECK-NEXT:    ptrue p6.d
+; CHECK-NEXT:    mov x12, x0
+; CHECK-NEXT:    asr x10, x9, #3
+; CHECK-NEXT:    ptrue p0.b
+; CHECK-NEXT:    incb x12, all, mul #4
 ; CHECK-NEXT:    index z0.d, #0, #1
-; CHECK-NEXT:    sub x9, x10, #121
-; CHECK-NEXT:    subs x11, x10, #128
-; CHECK-NEXT:    add x12, x10, #7
-; CHECK-NEXT:    csel x9, x9, x11, mi
-; CHECK-NEXT:    asr x11, x9, #3
+; CHECK-NEXT:    cmp x10, #1
+; CHECK-NEXT:    dup v4.2d, x10
+; CHECK-NEXT:    cset w9, lt
+; CHECK-NEXT:    whilewr p4.d, x11, x1
+; CHECK-NEXT:    mov x11, x0
+; CHECK-NEXT:    incb x11
+; CHECK-NEXT:    whilewr p3.d, x13, x1
+; CHECK-NEXT:    addvl x13, x0, #5
+; CHECK-NEXT:    whilewr p2.d, x0, x1
 ; CHECK-NEXT:    mov z1.d, z0.d
 ; CHECK-NEXT:    mov z2.d, z0.d
+; CHECK-NEXT:    not p3.b, p6/z, p3.b
 ; CHECK-NEXT:    mov z3.d, z0.d
-; CHECK-NEXT:    cmp x11, #1
-; CHECK-NEXT:    mov z4.d, z0.d
 ; CHECK-NEXT:    mov z5.d, z0.d
-; CHECK-NEXT:    cset w9, lt
-; CHECK-NEXT:    cmp x10, #0
+; CHECK-NEXT:    whilewr p8.d, x11, x1
+; CHECK-NEXT:    addvl x11, x0, #6
 ; CHECK-NEXT:    mov z6.d, z0.d
-; CHECK-NEXT:    csel x10, x12, x10, mi
-; CHECK-NEXT:    dup v7.2d, x11
+; CHECK-NEXT:    and p10.b, p4/z, p4.b, p6.b
+; CHECK-NEXT:    mov z7.d, z0.d
+; CHECK-NEXT:    mov z16.d, z0.d
+; CHECK-NEXT:    and p3.b, p3/z, p3.b, p6.b
+; CHECK-NEXT:    add z3.d, z3.d, #14 // =0xe
 ; CHECK-NEXT:    add z1.d, z1.d, #12 // =0xc
-; CHECK-NEXT:    asr x10, x10, #3
+; CHECK-NEXT:    not p8.b, p6/z, p8.b
 ; CHECK-NEXT:    add z2.d, z2.d, #10 // =0xa
-; CHECK-NEXT:    add z3.d, z3.d, #8 // =0x8
-; CHECK-NEXT:    add z4.d, z4.d, #6 // =0x6
-; CHECK-NEXT:    add z5.d, z5.d, #4 // =0x4
-; CHECK-NEXT:    add z6.d, z6.d, #2 // =0x2
-; CHECK-NEXT:    dup v16.2d, x10
-; CHECK-NEXT:    cmhi v17.2d, v7.2d, v0.2d
-; CHECK-NEXT:    cmhi v19.2d, v7.2d, v1.2d
-; CHECK-NEXT:    cmhi v20.2d, v7.2d, v2.2d
-; CHECK-NEXT:    cmhi v21.2d, v7.2d, v3.2d
-; CHECK-NEXT:    cmp x10, #1
-; CHECK-NEXT:    cmhi v22.2d, v7.2d, v4.2d
-; CHECK-NEXT:    cset w10, lt
-; CHECK-NEXT:    cmhi v18.2d, v16.2d, v0.2d
-; CHECK-NEXT:    add z0.d, z0.d, #14 // =0xe
-; CHECK-NEXT:    cmhi v1.2d, v16.2d, v1.2d
-; CHECK-NEXT:    cmhi v2.2d, v16.2d, v2.2d
-; CHECK-NEXT:    cmhi v3.2d, v16.2d, v3.2d
-; CHECK-NEXT:    cmhi v4.2d, v16.2d, v4.2d
-; CHECK-NEXT:    cmhi v23.2d, v16.2d, v5.2d
-; CHECK-NEXT:    cmhi v24.2d, v16.2d, v6.2d
-; CHECK-NEXT:    cmhi v5.2d, v7.2d, v5.2d
-; CHECK-NEXT:    cmhi v16.2d, v16.2d, v0.2d
-; CHECK-NEXT:    cmhi v6.2d, v7.2d, v6.2d
-; CHECK-NEXT:    cmhi v0.2d, v7.2d, v0.2d
-; CHECK-NEXT:    uzp1 v7.4s, v21.4s, v20.4s
-; CHECK-NEXT:    uzp1 v2.4s, v3.4s, v2.4s
-; CHECK-NEXT:    uzp1 v3.4s, v23.4s, v4.4s
-; CHECK-NEXT:    uzp1 v4.4s, v18.4s, v24.4s
-; CHECK-NEXT:    uzp1 v5.4s, v5.4s, v22.4s
-; CHECK-NEXT:    uzp1 v1.4s, v1.4s, v16.4s
-; CHECK-NEXT:    uzp1 v6.4s, v17.4s, v6.4s
-; CHECK-NEXT:    uzp1 v0.4s, v19.4s, v0.4s
-; CHECK-NEXT:    uzp1 v3.8h, v4.8h, v3.8h
+; CHECK-NEXT:    add z5.d, z5.d, #8 // =0x8
+; CHECK-NEXT:    and p9.b, p2/z, p2.b, p6.b
+; CHECK-NEXT:    add z6.d, z6.d, #6 // =0x6
+; CHECK-NEXT:    add z7.d, z7.d, #4 // =0x4
+; CHECK-NEXT:    brkpb p10.b, p0/z, p10.b, p3.b
+; CHECK-NEXT:    add z16.d, z16.d, #2 // =0x2
+; CHECK-NEXT:    cmhi v0.2d, v4.2d, v0.2d
+; CHECK-NEXT:    and p8.b, p8/z, p8.b, p6.b
+; CHECK-NEXT:    cmhi v3.2d, v4.2d, v3.2d
+; CHECK-NEXT:    cmhi v1.2d, v4.2d, v1.2d
+; CHECK-NEXT:    whilewr p5.d, x13, x1
+; CHECK-NEXT:    cmhi v2.2d, v4.2d, v2.2d
+; CHECK-NEXT:    cmhi v5.2d, v4.2d, v5.2d
+; CHECK-NEXT:    ptrue p3.s
+; CHECK-NEXT:    cmhi v6.2d, v4.2d, v6.2d
+; CHECK-NEXT:    cmhi v7.2d, v4.2d, v7.2d
+; CHECK-NEXT:    brkpb p8.b, p0/z, p9.b, p8.b
+; CHECK-NEXT:    cmhi v4.2d, v4.2d, v16.2d
+; CHECK-NEXT:    uzp1 v1.4s, v1.4s, v3.4s
+; CHECK-NEXT:    uzp1 p4.s, p4.s, p10.s
+; CHECK-NEXT:    uzp1 v2.4s, v5.4s, v2.4s
+; CHECK-NEXT:    whilewr p1.d, x12, x1
+; CHECK-NEXT:    addvl x12, x0, #7
+; CHECK-NEXT:    uzp1 v3.4s, v7.4s, v6.4s
+; CHECK-NEXT:    whilewr p7.d, x12, x1
+; CHECK-NEXT:    uzp1 v0.4s, v0.4s, v4.4s
+; CHECK-NEXT:    uzp1 p2.s, p2.s, p8.s
+; CHECK-NEXT:    not p8.b, p3/z, p4.b
 ; CHECK-NEXT:    uzp1 v1.8h, v2.8h, v1.8h
-; CHECK-NEXT:    uzp1 v2.8h, v6.8h, v5.8h
-; CHECK-NEXT:    uzp1 v0.8h, v7.8h, v0.8h
-; CHECK-NEXT:    uzp1 v1.16b, v3.16b, v1.16b
-; CHECK-NEXT:    uzp1 v0.16b, v2.16b, v0.16b
-; CHECK-NEXT:    dup v3.16b, w10
-; CHECK-NEXT:    dup v2.16b, w9
+; CHECK-NEXT:    not p9.b, p6/z, p5.b
+; CHECK-NEXT:    and p5.b, p8/z, p8.b, p3.b
+; CHECK-NEXT:    uzp1 v0.8h, v0.8h, v3.8h
+; CHECK-NEXT:    and p8.b, p9/z, p9.b, p6.b
+; CHECK-NEXT:    whilewr p9.d, x11, x1
+; CHECK-NEXT:    not p7.b, p6/z, p7.b
+; CHECK-NEXT:    and p10.b, p9/z, p9.b, p6.b
+; CHECK-NEXT:    uzp1 v0.16b, v0.16b, v1.16b
+; CHECK-NEXT:    dup v1.16b, w9
+; CHECK-NEXT:    and p7.b, p7/z, p7.b, p6.b
 ; CHECK-NEXT:    adrp x9, .LCPI18_0
-; CHECK-NEXT:    orr v1.16b, v1.16b, v3.16b
-; CHECK-NEXT:    orr v0.16b, v0.16b, v2.16b
+; CHECK-NEXT:    and p6.b, p1/z, p1.b, p6.b
 ; CHECK-NEXT:    ldr q2, [x9, :lo12:.LCPI18_0]
-; CHECK-NEXT:    shl v1.16b, v1.16b, #7
+; CHECK-NEXT:    brkpb p7.b, p0/z, p10.b, p7.b
+; CHECK-NEXT:    brkpb p6.b, p0/z, p6.b, p8.b
+; CHECK-NEXT:    orr v0.16b, v0.16b, v1.16b
+; CHECK-NEXT:    uzp1 p7.s, p9.s, p7.s
+; CHECK-NEXT:    uzp1 p1.s, p1.s, p6.s
+; CHECK-NEXT:    not p6.b, p3/z, p7.b
 ; CHECK-NEXT:    shl v0.16b, v0.16b, #7
-; CHECK-NEXT:    cmlt v1.16b, v1.16b, #0
+; CHECK-NEXT:    and p4.b, p2/z, p2.b, p3.b
+; CHECK-NEXT:    and p7.b, p1/z, p1.b, p3.b
+; CHECK-NEXT:    and p3.b, p6/z, p6.b, p3.b
 ; CHECK-NEXT:    cmlt v0.16b, v0.16b, #0
-; CHECK-NEXT:    and v1.16b, v1.16b, v2.16b
+; CHECK-NEXT:    brkpb p4.b, p0/z, p4.b, p5.b
+; CHECK-NEXT:    brkpb p3.b, p0/z, p7.b, p3.b
+; CHECK-NEXT:    ptrue p5.h
 ; CHECK-NEXT:    and v0.16b, v0.16b, v2.16b
-; CHECK-NEXT:    ext v2.16b, v1.16b, v1.16b, #8
-; CHECK-NEXT:    ext v3.16b, v0.16b, v0.16b, #8
-; CHECK-NEXT:    zip1 v1.16b, v1.16b, v2.16b
-; CHECK-NEXT:    zip1 v0.16b, v0.16b, v3.16b
-; CHECK-NEXT:    addv h1, v1.8h
+; CHECK-NEXT:    uzp1 p1.h, p1.h, p3.h
+; CHECK-NEXT:    uzp1 p2.h, p2.h, p4.h
+; CHECK-NEXT:    not p1.b, p5/z, p1.b
+; CHECK-NEXT:    and p3.b, p2/z, p2.b, p5.b
+; CHECK-NEXT:    and p1.b, p1/z, p1.b, p5.b
+; CHECK-NEXT:    brkpb p0.b, p0/z, p3.b, p1.b
+; CHECK-NEXT:    uzp1 p0.b, p2.b, p0.b
+; CHECK-NEXT:    mov z1.b, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    shl v1.16b, v1.16b, #7
+; CHECK-NEXT:    cmlt v1.16b, v1.16b, #0
+; CHECK-NEXT:    and v1.16b, v1.16b, v2.16b
+; CHECK-NEXT:    ext v2.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT:    ext v3.16b, v1.16b, v1.16b, #8
+; CHECK-NEXT:    zip1 v0.16b, v0.16b, v2.16b
+; CHECK-NEXT:    zip1 v1.16b, v1.16b, v3.16b
 ; CHECK-NEXT:    addv h0, v0.8h
-; CHECK-NEXT:    str h1, [x8]
+; CHECK-NEXT:    addv h1, v1.8h
 ; CHECK-NEXT:    str h0, [x8, #2]
+; CHECK-NEXT:    str h1, [x8]
 ; CHECK-NEXT:    ret
 entry:
   %0 = call <32 x i1> @llvm.loop.dependence.war.mask.v32i1(ptr %a, ptr %b, i64 
8)
@@ -994,3 +993,79 @@ entry:
   %0 = call <1 x i1> @llvm.loop.dependence.raw.mask.v1i1(ptr %a, ptr %b, i64 8)
   ret <1 x i1> %0
 }
+
+define <2 x i1> @whilewr_8_widen_extract(ptr %a, ptr %b) {
+; CHECK-LABEL: whilewr_8_widen_extract:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    whilewr p0.b, x0, x1
+; CHECK-NEXT:    punpklo p0.h, p0.b
+; CHECK-NEXT:    punpklo p0.h, p0.b
+; CHECK-NEXT:    mov z0.s, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
+; CHECK-NEXT:    ret
+entry:
+  %0 = call <2 x i1> @llvm.loop.dependence.war.mask.v2i1(ptr %a, ptr %b, i64 1)
+  ret <2 x i1> %0
+}
+
+define <4 x i1> @whilewr_8_widen_extract2(ptr %a, ptr %b) {
+; CHECK-LABEL: whilewr_8_widen_extract2:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    whilewr p0.b, x0, x1
+; CHECK-NEXT:    punpklo p0.h, p0.b
+; CHECK-NEXT:    mov z0.h, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
+; CHECK-NEXT:    ret
+entry:
+  %0 = call <4 x i1> @llvm.loop.dependence.war.mask.v4i1(ptr %a, ptr %b, i64 1)
+  ret <4 x i1> %0
+}
+
+define <8 x i1> @whilewr_8_widen_extract3(ptr %a, ptr %b) {
+; CHECK-LABEL: whilewr_8_widen_extract3:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    whilewr p0.b, x0, x1
+; CHECK-NEXT:    mov z0.b, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
+; CHECK-NEXT:    ret
+entry:
+  %0 = call <8 x i1> @llvm.loop.dependence.war.mask.v8i1(ptr %a, ptr %b, i64 1)
+  ret <8 x i1> %0
+}
+
+define <2 x i1> @whilewr_16_widen_extract(ptr %a, ptr %b) {
+; CHECK-LABEL: whilewr_16_widen_extract:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    whilewr p0.h, x0, x1
+; CHECK-NEXT:    punpklo p0.h, p0.b
+; CHECK-NEXT:    mov z0.s, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
+; CHECK-NEXT:    ret
+entry:
+  %0 = call <2 x i1> @llvm.loop.dependence.war.mask.v2i1(ptr %a, ptr %b, i64 2)
+  ret <2 x i1> %0
+}
+
+define <4 x i1> @whilewr_16_widen_extract2(ptr %a, ptr %b) {
+; CHECK-LABEL: whilewr_16_widen_extract2:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    whilewr p0.h, x0, x1
+; CHECK-NEXT:    mov z0.h, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
+; CHECK-NEXT:    ret
+entry:
+  %0 = call <4 x i1> @llvm.loop.dependence.war.mask.v4i1(ptr %a, ptr %b, i64 2)
+  ret <4 x i1> %0
+}
+
+define <2 x i1> @whilewr_32_widen_extract(ptr %a, ptr %b) {
+; CHECK-LABEL: whilewr_32_widen_extract:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    whilewr p0.s, x0, x1
+; CHECK-NEXT:    mov z0.s, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
+; CHECK-NEXT:    ret
+entry:
+  %0 = call <2 x i1> @llvm.loop.dependence.war.mask.v2i1(ptr %a, ptr %b, i64 4)
+  ret <2 x i1> %0
+}
diff --git a/llvm/test/CodeGen/AArch64/alias_mask_scalable.ll 
b/llvm/test/CodeGen/AArch64/alias_mask_scalable.ll
index 92da69e2e6f41..1b3dfe6f86418 100644
--- a/llvm/test/CodeGen/AArch64/alias_mask_scalable.ll
+++ b/llvm/test/CodeGen/AArch64/alias_mask_scalable.ll
@@ -81,8 +81,8 @@ entry:
   ret <vscale x 2 x i1> %0
 }
 
-define <vscale x 32 x i1> @whilewr_8_split(ptr %a, ptr %b) {
-; CHECK-LABEL: whilewr_8_split:
+define <vscale x 32 x i1> @whilewr_8_expand_high(ptr %a, ptr %b) {
+; CHECK-LABEL: whilewr_8_expand_high:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK-NEXT:    addvl sp, sp, #-1
@@ -143,8 +143,8 @@ entry:
   ret <vscale x 32 x i1> %0
 }
 
-define <vscale x 64 x i1> @whilewr_8_split2(ptr %a, ptr %b) {
-; CHECK-LABEL: whilewr_8_split2:
+define <vscale x 64 x i1> @whilewr_8_expand_high2(ptr %a, ptr %b) {
+; CHECK-LABEL: whilewr_8_expand_high2:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK-NEXT:    addvl sp, sp, #-1
@@ -258,151 +258,87 @@ entry:
   ret <vscale x 64 x i1> %0
 }
 
-define <vscale x 16 x i1> @whilewr_16_expand(ptr %a, ptr %b) {
-; CHECK-LABEL: whilewr_16_expand:
+define <vscale x 16 x i1> @whilewr_16_split(ptr %a, ptr %b) {
+; CHECK-LABEL: whilewr_16_split:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-NEXT:    addvl sp, sp, #-1
-; CHECK-NEXT:    str p7, [sp, #4, mul vl] // 2-byte Spill
-; CHECK-NEXT:    str p6, [sp, #5, mul vl] // 2-byte Spill
-; CHECK-NEXT:    str p5, [sp, #6, mul vl] // 2-byte Spill
-; CHECK-NEXT:    str p4, [sp, #7, mul vl] // 2-byte Spill
-; CHECK-NEXT:    .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 
0x1e, 0x22 // sp + 16 + 8 * VG
-; CHECK-NEXT:    .cfi_offset w29, -16
-; CHECK-NEXT:    index z0.d, #0, #1
-; CHECK-NEXT:    sub x8, x1, x0
-; CHECK-NEXT:    ptrue p0.d
-; CHECK-NEXT:    add x8, x8, x8, lsr #63
-; CHECK-NEXT:    asr x8, x8, #1
-; CHECK-NEXT:    mov z1.d, z0.d
-; CHECK-NEXT:    mov z4.d, z0.d
-; CHECK-NEXT:    mov z5.d, z0.d
-; CHECK-NEXT:    mov z2.d, x8
-; CHECK-NEXT:    incd z1.d
-; CHECK-NEXT:    incd z4.d, all, mul #2
-; CHECK-NEXT:    incd z5.d, all, mul #4
-; CHECK-NEXT:    cmphi p2.d, p0/z, z2.d, z0.d
-; CHECK-NEXT:    mov z3.d, z1.d
-; CHECK-NEXT:    cmphi p1.d, p0/z, z2.d, z1.d
-; CHECK-NEXT:    incd z1.d, all, mul #4
-; CHECK-NEXT:    cmphi p3.d, p0/z, z2.d, z4.d
-; CHECK-NEXT:    incd z4.d, all, mul #4
-; CHECK-NEXT:    cmphi p4.d, p0/z, z2.d, z5.d
-; CHECK-NEXT:    incd z3.d, all, mul #2
-; CHECK-NEXT:    cmphi p5.d, p0/z, z2.d, z1.d
-; CHECK-NEXT:    cmphi p7.d, p0/z, z2.d, z4.d
-; CHECK-NEXT:    uzp1 p1.s, p2.s, p1.s
-; CHECK-NEXT:    mov z0.d, z3.d
-; CHECK-NEXT:    cmphi p6.d, p0/z, z2.d, z3.d
-; CHECK-NEXT:    uzp1 p2.s, p4.s, p5.s
-; CHECK-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Reload
-; CHECK-NEXT:    ldr p4, [sp, #7, mul vl] // 2-byte Reload
-; CHECK-NEXT:    incd z0.d, all, mul #4
-; CHECK-NEXT:    uzp1 p3.s, p3.s, p6.s
-; CHECK-NEXT:    ldr p6, [sp, #5, mul vl] // 2-byte Reload
-; CHECK-NEXT:    cmphi p0.d, p0/z, z2.d, z0.d
-; CHECK-NEXT:    uzp1 p1.h, p1.h, p3.h
-; CHECK-NEXT:    cmp x8, #1
-; CHECK-NEXT:    cset w8, lt
-; CHECK-NEXT:    sbfx x8, x8, #0, #1
-; CHECK-NEXT:    uzp1 p0.s, p7.s, p0.s
-; CHECK-NEXT:    ldr p7, [sp, #4, mul vl] // 2-byte Reload
-; CHECK-NEXT:    uzp1 p0.h, p2.h, p0.h
+; CHECK-NEXT:    whilewr p1.h, x0, x1
+; CHECK-NEXT:    incb x0
+; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    whilewr p3.h, x0, x1
+; CHECK-NEXT:    and p2.b, p1/z, p1.b, p0.b
+; CHECK-NEXT:    not p3.b, p0/z, p3.b
+; CHECK-NEXT:    and p0.b, p3/z, p3.b, p0.b
+; CHECK-NEXT:    ptrue p3.b
+; CHECK-NEXT:    brkpb p0.b, p3/z, p2.b, p0.b
 ; CHECK-NEXT:    uzp1 p0.b, p1.b, p0.b
-; CHECK-NEXT:    whilelo p1.b, xzr, x8
-; CHECK-NEXT:    sel p0.b, p0, p0.b, p1.b
-; CHECK-NEXT:    addvl sp, sp, #1
-; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
 entry:
   %0 = call <vscale x 16 x i1> @llvm.loop.dependence.war.mask.nxv16i1(ptr %a, 
ptr %b, i64 2)
   ret <vscale x 16 x i1> %0
 }
 
-define <vscale x 32 x i1> @whilewr_16_expand2(ptr %a, ptr %b) {
-; CHECK-LABEL: whilewr_16_expand2:
+define <vscale x 32 x i1> @whilewr_16_expand_high(ptr %a, ptr %b) {
+; CHECK-LABEL: whilewr_16_expand_high:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK-NEXT:    addvl sp, sp, #-1
-; CHECK-NEXT:    str p9, [sp, #2, mul vl] // 2-byte Spill
-; CHECK-NEXT:    str p8, [sp, #3, mul vl] // 2-byte Spill
-; CHECK-NEXT:    str p7, [sp, #4, mul vl] // 2-byte Spill
 ; CHECK-NEXT:    str p6, [sp, #5, mul vl] // 2-byte Spill
 ; CHECK-NEXT:    str p5, [sp, #6, mul vl] // 2-byte Spill
 ; CHECK-NEXT:    str p4, [sp, #7, mul vl] // 2-byte Spill
 ; CHECK-NEXT:    .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 
0x1e, 0x22 // sp + 16 + 8 * VG
 ; CHECK-NEXT:    .cfi_offset w29, -16
 ; CHECK-NEXT:    index z0.d, #0, #1
-; CHECK-NEXT:    sub x8, x1, x0
-; CHECK-NEXT:    incb x0, all, mul #2
-; CHECK-NEXT:    add x8, x8, x8, lsr #63
+; CHECK-NEXT:    mov x8, x0
 ; CHECK-NEXT:    ptrue p0.d
-; CHECK-NEXT:    asr x8, x8, #1
-; CHECK-NEXT:    sub x9, x1, x0
+; CHECK-NEXT:    incb x8, all, mul #2
 ; CHECK-NEXT:    mov z1.d, z0.d
-; CHECK-NEXT:    mov z2.d, z0.d
+; CHECK-NEXT:    sub x8, x1, x8
 ; CHECK-NEXT:    mov z3.d, z0.d
-; CHECK-NEXT:    mov z5.d, x8
-; CHECK-NEXT:    add x9, x9, x9, lsr #63
+; CHECK-NEXT:    add x8, x8, x8, lsr #63
+; CHECK-NEXT:    mov z5.d, z0.d
 ; CHECK-NEXT:    incd z1.d
-; CHECK-NEXT:    incd z2.d, all, mul #2
-; CHECK-NEXT:    incd z3.d, all, mul #4
-; CHECK-NEXT:    cmphi p2.d, p0/z, z5.d, z0.d
-; CHECK-NEXT:    asr x9, x9, #1
+; CHECK-NEXT:    asr x8, x8, #1
+; CHECK-NEXT:    incd z3.d, all, mul #2
+; CHECK-NEXT:    incd z5.d, all, mul #4
+; CHECK-NEXT:    mov z2.d, x8
 ; CHECK-NEXT:    mov z4.d, z1.d
-; CHECK-NEXT:    mov z6.d, z1.d
-; CHECK-NEXT:    mov z7.d, z2.d
-; CHECK-NEXT:    cmphi p1.d, p0/z, z5.d, z1.d
-; CHECK-NEXT:    cmphi p3.d, p0/z, z5.d, z3.d
-; CHECK-NEXT:    cmphi p5.d, p0/z, z5.d, z2.d
+; CHECK-NEXT:    cmphi p1.d, p0/z, z2.d, z1.d
+; CHECK-NEXT:    cmphi p2.d, p0/z, z2.d, z0.d
+; CHECK-NEXT:    cmphi p3.d, p0/z, z2.d, z3.d
 ; CHECK-NEXT:    incd z4.d, all, mul #2
-; CHECK-NEXT:    incd z6.d, all, mul #4
-; CHECK-NEXT:    incd z7.d, all, mul #4
+; CHECK-NEXT:    incd z1.d, all, mul #4
+; CHECK-NEXT:    incd z3.d, all, mul #4
+; CHECK-NEXT:    cmphi p5.d, p0/z, z2.d, z5.d
+; CHECK-NEXT:    cmphi p4.d, p0/z, z2.d, z4.d
+; CHECK-NEXT:    incd z4.d, all, mul #4
+; CHECK-NEXT:    cmphi p6.d, p0/z, z2.d, z1.d
 ; CHECK-NEXT:    uzp1 p1.s, p2.s, p1.s
-; CHECK-NEXT:    mov z24.d, z4.d
-; CHECK-NEXT:    cmphi p4.d, p0/z, z5.d, z6.d
-; CHECK-NEXT:    cmphi p6.d, p0/z, z5.d, z4.d
-; CHECK-NEXT:    cmphi p7.d, p0/z, z5.d, z7.d
-; CHECK-NEXT:    incd z24.d, all, mul #4
 ; CHECK-NEXT:    uzp1 p2.s, p3.s, p4.s
-; CHECK-NEXT:    uzp1 p3.s, p5.s, p6.s
-; CHECK-NEXT:    cmphi p8.d, p0/z, z5.d, z24.d
-; CHECK-NEXT:    mov z5.d, x9
+; CHECK-NEXT:    cmphi p3.d, p0/z, z2.d, z3.d
+; CHECK-NEXT:    cmphi p0.d, p0/z, z2.d, z4.d
 ; CHECK-NEXT:    cmp x8, #1
-; CHECK-NEXT:    uzp1 p1.h, p1.h, p3.h
-; CHECK-NEXT:    cset w8, lt
-; CHECK-NEXT:    cmphi p4.d, p0/z, z5.d, z24.d
-; CHECK-NEXT:    cmphi p5.d, p0/z, z5.d, z7.d
-; CHECK-NEXT:    cmphi p6.d, p0/z, z5.d, z6.d
-; CHECK-NEXT:    uzp1 p7.s, p7.s, p8.s
-; CHECK-NEXT:    cmphi p9.d, p0/z, z5.d, z3.d
-; CHECK-NEXT:    cmphi p3.d, p0/z, z5.d, z4.d
-; CHECK-NEXT:    cmphi p8.d, p0/z, z5.d, z2.d
-; CHECK-NEXT:    sbfx x8, x8, #0, #1
-; CHECK-NEXT:    uzp1 p2.h, p2.h, p7.h
-; CHECK-NEXT:    cmphi p7.d, p0/z, z5.d, z1.d
-; CHECK-NEXT:    cmphi p0.d, p0/z, z5.d, z0.d
-; CHECK-NEXT:    uzp1 p4.s, p5.s, p4.s
-; CHECK-NEXT:    uzp1 p5.s, p9.s, p6.s
-; CHECK-NEXT:    ldr p9, [sp, #2, mul vl] // 2-byte Reload
-; CHECK-NEXT:    whilelo p6.b, xzr, x8
-; CHECK-NEXT:    uzp1 p3.s, p8.s, p3.s
-; CHECK-NEXT:    cmp x9, #1
-; CHECK-NEXT:    ldr p8, [sp, #3, mul vl] // 2-byte Reload
-; CHECK-NEXT:    uzp1 p0.s, p0.s, p7.s
+; CHECK-NEXT:    uzp1 p4.s, p5.s, p6.s
+; CHECK-NEXT:    ldr p6, [sp, #5, mul vl] // 2-byte Reload
+; CHECK-NEXT:    uzp1 p1.h, p1.h, p2.h
 ; CHECK-NEXT:    cset w8, lt
-; CHECK-NEXT:    ldr p7, [sp, #4, mul vl] // 2-byte Reload
-; CHECK-NEXT:    uzp1 p4.h, p5.h, p4.h
+; CHECK-NEXT:    whilewr p2.h, x0, x1
+; CHECK-NEXT:    incb x0
 ; CHECK-NEXT:    sbfx x8, x8, #0, #1
-; CHECK-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Reload
-; CHECK-NEXT:    uzp1 p0.h, p0.h, p3.h
-; CHECK-NEXT:    uzp1 p1.b, p1.b, p2.b
-; CHECK-NEXT:    uzp1 p2.b, p0.b, p4.b
+; CHECK-NEXT:    ptrue p5.h
+; CHECK-NEXT:    uzp1 p0.s, p3.s, p0.s
+; CHECK-NEXT:    whilewr p3.h, x0, x1
+; CHECK-NEXT:    uzp1 p0.h, p4.h, p0.h
+; CHECK-NEXT:    not p3.b, p5/z, p3.b
+; CHECK-NEXT:    and p4.b, p2/z, p2.b, p5.b
+; CHECK-NEXT:    uzp1 p0.b, p1.b, p0.b
+; CHECK-NEXT:    and p1.b, p3/z, p3.b, p5.b
+; CHECK-NEXT:    ptrue p3.b
+; CHECK-NEXT:    whilelo p5.b, xzr, x8
+; CHECK-NEXT:    brkpb p3.b, p3/z, p4.b, p1.b
 ; CHECK-NEXT:    ldr p4, [sp, #7, mul vl] // 2-byte Reload
-; CHECK-NEXT:    whilelo p3.b, xzr, x8
-; CHECK-NEXT:    sel p0.b, p1, p1.b, p6.b
-; CHECK-NEXT:    ldr p6, [sp, #5, mul vl] // 2-byte Reload
-; CHECK-NEXT:    sel p1.b, p2, p2.b, p3.b
+; CHECK-NEXT:    sel p1.b, p0, p0.b, p5.b
+; CHECK-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Reload
+; CHECK-NEXT:    uzp1 p0.b, p2.b, p3.b
 ; CHECK-NEXT:    addvl sp, sp, #1
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
@@ -411,42 +347,27 @@ entry:
   ret <vscale x 32 x i1> %0
 }
 
-define <vscale x 8 x i1> @whilewr_32_expand(ptr %a, ptr %b) {
-; CHECK-LABEL: whilewr_32_expand:
+define <vscale x 8 x i1> @whilewr_32_split(ptr %a, ptr %b) {
+; CHECK-LABEL: whilewr_32_split:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    index z0.d, #0, #1
-; CHECK-NEXT:    subs x8, x1, x0
-; CHECK-NEXT:    ptrue p0.d
-; CHECK-NEXT:    add x9, x8, #3
-; CHECK-NEXT:    csel x8, x9, x8, mi
-; CHECK-NEXT:    asr x8, x8, #2
-; CHECK-NEXT:    mov z1.d, z0.d
-; CHECK-NEXT:    mov z2.d, z0.d
-; CHECK-NEXT:    mov z3.d, x8
-; CHECK-NEXT:    incd z1.d
-; CHECK-NEXT:    incd z2.d, all, mul #2
-; CHECK-NEXT:    cmphi p1.d, p0/z, z3.d, z0.d
-; CHECK-NEXT:    mov z4.d, z1.d
-; CHECK-NEXT:    cmphi p2.d, p0/z, z3.d, z1.d
-; CHECK-NEXT:    cmphi p3.d, p0/z, z3.d, z2.d
-; CHECK-NEXT:    incd z4.d, all, mul #2
-; CHECK-NEXT:    uzp1 p1.s, p1.s, p2.s
-; CHECK-NEXT:    cmphi p0.d, p0/z, z3.d, z4.d
-; CHECK-NEXT:    cmp x8, #1
-; CHECK-NEXT:    cset w8, lt
-; CHECK-NEXT:    sbfx x8, x8, #0, #1
-; CHECK-NEXT:    uzp1 p0.s, p3.s, p0.s
+; CHECK-NEXT:    whilewr p1.s, x0, x1
+; CHECK-NEXT:    incb x0
+; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    whilewr p3.s, x0, x1
+; CHECK-NEXT:    and p2.b, p1/z, p1.b, p0.b
+; CHECK-NEXT:    not p3.b, p0/z, p3.b
+; CHECK-NEXT:    and p0.b, p3/z, p3.b, p0.b
+; CHECK-NEXT:    ptrue p3.b
+; CHECK-NEXT:    brkpb p0.b, p3/z, p2.b, p0.b
 ; CHECK-NEXT:    uzp1 p0.h, p1.h, p0.h
-; CHECK-NEXT:    whilelo p1.h, xzr, x8
-; CHECK-NEXT:    sel p0.b, p0, p0.b, p1.b
 ; CHECK-NEXT:    ret
 entry:
   %0 = call <vscale x 8 x i1> @llvm.loop.dependence.war.mask.nxv8i1(ptr %a, 
ptr %b, i64 4)
   ret <vscale x 8 x i1> %0
 }
 
-define <vscale x 16 x i1> @whilewr_32_expand2(ptr %a, ptr %b) {
-; CHECK-LABEL: whilewr_32_expand2:
+define <vscale x 16 x i1> @whilewr_32_split2(ptr %a, ptr %b) {
+; CHECK-LABEL: whilewr_32_split2:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK-NEXT:    addvl sp, sp, #-1
@@ -456,49 +377,36 @@ define <vscale x 16 x i1> @whilewr_32_expand2(ptr %a, ptr 
%b) {
 ; CHECK-NEXT:    str p4, [sp, #7, mul vl] // 2-byte Spill
 ; CHECK-NEXT:    .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 
0x1e, 0x22 // sp + 16 + 8 * VG
 ; CHECK-NEXT:    .cfi_offset w29, -16
-; CHECK-NEXT:    index z0.d, #0, #1
-; CHECK-NEXT:    subs x8, x1, x0
-; CHECK-NEXT:    ptrue p0.d
-; CHECK-NEXT:    add x9, x8, #3
-; CHECK-NEXT:    csel x8, x9, x8, mi
-; CHECK-NEXT:    asr x8, x8, #2
-; CHECK-NEXT:    mov z1.d, z0.d
-; CHECK-NEXT:    mov z4.d, z0.d
-; CHECK-NEXT:    mov z5.d, z0.d
-; CHECK-NEXT:    mov z2.d, x8
-; CHECK-NEXT:    incd z1.d
-; CHECK-NEXT:    incd z4.d, all, mul #2
-; CHECK-NEXT:    incd z5.d, all, mul #4
-; CHECK-NEXT:    cmphi p2.d, p0/z, z2.d, z0.d
-; CHECK-NEXT:    mov z3.d, z1.d
-; CHECK-NEXT:    cmphi p1.d, p0/z, z2.d, z1.d
-; CHECK-NEXT:    incd z1.d, all, mul #4
-; CHECK-NEXT:    cmphi p3.d, p0/z, z2.d, z4.d
-; CHECK-NEXT:    incd z4.d, all, mul #4
-; CHECK-NEXT:    cmphi p4.d, p0/z, z2.d, z5.d
-; CHECK-NEXT:    incd z3.d, all, mul #2
-; CHECK-NEXT:    cmphi p5.d, p0/z, z2.d, z1.d
-; CHECK-NEXT:    cmphi p7.d, p0/z, z2.d, z4.d
-; CHECK-NEXT:    uzp1 p1.s, p2.s, p1.s
-; CHECK-NEXT:    mov z0.d, z3.d
-; CHECK-NEXT:    cmphi p6.d, p0/z, z2.d, z3.d
-; CHECK-NEXT:    uzp1 p2.s, p4.s, p5.s
+; CHECK-NEXT:    addvl x8, x0, #3
+; CHECK-NEXT:    whilewr p2.s, x0, x1
+; CHECK-NEXT:    whilewr p1.s, x8, x1
+; CHECK-NEXT:    mov x8, x0
+; CHECK-NEXT:    incb x0, all, mul #2
+; CHECK-NEXT:    incb x8
+; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p3.b
+; CHECK-NEXT:    whilewr p6.s, x0, x1
+; CHECK-NEXT:    whilewr p5.s, x8, x1
+; CHECK-NEXT:    not p1.b, p0/z, p1.b
+; CHECK-NEXT:    not p5.b, p0/z, p5.b
+; CHECK-NEXT:    and p4.b, p2/z, p2.b, p0.b
+; CHECK-NEXT:    and p5.b, p5/z, p5.b, p0.b
+; CHECK-NEXT:    and p7.b, p6/z, p6.b, p0.b
+; CHECK-NEXT:    and p0.b, p1/z, p1.b, p0.b
+; CHECK-NEXT:    brkpb p1.b, p3/z, p4.b, p5.b
 ; CHECK-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Reload
-; CHECK-NEXT:    ldr p4, [sp, #7, mul vl] // 2-byte Reload
-; CHECK-NEXT:    incd z0.d, all, mul #4
-; CHECK-NEXT:    uzp1 p3.s, p3.s, p6.s
-; CHECK-NEXT:    ldr p6, [sp, #5, mul vl] // 2-byte Reload
-; CHECK-NEXT:    cmphi p0.d, p0/z, z2.d, z0.d
-; CHECK-NEXT:    uzp1 p1.h, p1.h, p3.h
-; CHECK-NEXT:    cmp x8, #1
-; CHECK-NEXT:    cset w8, lt
-; CHECK-NEXT:    sbfx x8, x8, #0, #1
-; CHECK-NEXT:    uzp1 p0.s, p7.s, p0.s
+; CHECK-NEXT:    brkpb p0.b, p3/z, p7.b, p0.b
 ; CHECK-NEXT:    ldr p7, [sp, #4, mul vl] // 2-byte Reload
-; CHECK-NEXT:    uzp1 p0.h, p2.h, p0.h
+; CHECK-NEXT:    ptrue p4.h
+; CHECK-NEXT:    uzp1 p0.h, p6.h, p0.h
+; CHECK-NEXT:    ldr p6, [sp, #5, mul vl] // 2-byte Reload
+; CHECK-NEXT:    uzp1 p1.h, p2.h, p1.h
+; CHECK-NEXT:    not p0.b, p4/z, p0.b
+; CHECK-NEXT:    and p2.b, p1/z, p1.b, p4.b
+; CHECK-NEXT:    and p0.b, p0/z, p0.b, p4.b
+; CHECK-NEXT:    ldr p4, [sp, #7, mul vl] // 2-byte Reload
+; CHECK-NEXT:    brkpb p0.b, p3/z, p2.b, p0.b
 ; CHECK-NEXT:    uzp1 p0.b, p1.b, p0.b
-; CHECK-NEXT:    whilelo p1.b, xzr, x8
-; CHECK-NEXT:    sel p0.b, p0, p0.b, p1.b
 ; CHECK-NEXT:    addvl sp, sp, #1
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
@@ -507,13 +415,11 @@ entry:
   ret <vscale x 16 x i1> %0
 }
 
-define <vscale x 32 x i1> @whilewr_32_expand3(ptr %a, ptr %b) {
-; CHECK-LABEL: whilewr_32_expand3:
+define <vscale x 32 x i1> @whilewr_32_expand_high(ptr %a, ptr %b) {
+; CHECK-LABEL: whilewr_32_expand_high:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK-NEXT:    addvl sp, sp, #-1
-; CHECK-NEXT:    str p10, [sp, #1, mul vl] // 2-byte Spill
-; CHECK-NEXT:    str p9, [sp, #2, mul vl] // 2-byte Spill
 ; CHECK-NEXT:    str p8, [sp, #3, mul vl] // 2-byte Spill
 ; CHECK-NEXT:    str p7, [sp, #4, mul vl] // 2-byte Spill
 ; CHECK-NEXT:    str p6, [sp, #5, mul vl] // 2-byte Spill
@@ -522,79 +428,76 @@ define <vscale x 32 x i1> @whilewr_32_expand3(ptr %a, ptr 
%b) {
 ; CHECK-NEXT:    .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 
0x1e, 0x22 // sp + 16 + 8 * VG
 ; CHECK-NEXT:    .cfi_offset w29, -16
 ; CHECK-NEXT:    index z0.d, #0, #1
-; CHECK-NEXT:    subs x8, x1, x0
+; CHECK-NEXT:    mov x8, x0
 ; CHECK-NEXT:    ptrue p0.d
-; CHECK-NEXT:    add x9, x8, #3
-; CHECK-NEXT:    incb x0, all, mul #4
-; CHECK-NEXT:    csel x8, x9, x8, mi
-; CHECK-NEXT:    asr x8, x8, #2
+; CHECK-NEXT:    incb x8, all, mul #4
+; CHECK-NEXT:    ptrue p1.s
+; CHECK-NEXT:    ptrue p2.b
 ; CHECK-NEXT:    mov z1.d, z0.d
-; CHECK-NEXT:    mov z2.d, z0.d
-; CHECK-NEXT:    mov z4.d, z0.d
-; CHECK-NEXT:    mov z5.d, x8
-; CHECK-NEXT:    incd z1.d
-; CHECK-NEXT:    incd z2.d, all, mul #2
-; CHECK-NEXT:    incd z4.d, all, mul #4
-; CHECK-NEXT:    cmphi p5.d, p0/z, z5.d, z0.d
-; CHECK-NEXT:    mov z3.d, z1.d
-; CHECK-NEXT:    mov z6.d, z2.d
-; CHECK-NEXT:    mov z7.d, z1.d
-; CHECK-NEXT:    cmphi p2.d, p0/z, z5.d, z4.d
-; CHECK-NEXT:    cmphi p3.d, p0/z, z5.d, z2.d
-; CHECK-NEXT:    cmphi p4.d, p0/z, z5.d, z1.d
-; CHECK-NEXT:    incd z3.d, all, mul #2
-; CHECK-NEXT:    incd z6.d, all, mul #4
-; CHECK-NEXT:    incd z7.d, all, mul #4
-; CHECK-NEXT:    uzp1 p4.s, p5.s, p4.s
-; CHECK-NEXT:    mov z24.d, z3.d
-; CHECK-NEXT:    cmphi p6.d, p0/z, z5.d, z6.d
-; CHECK-NEXT:    cmphi p7.d, p0/z, z5.d, z7.d
-; CHECK-NEXT:    cmphi p8.d, p0/z, z5.d, z3.d
-; CHECK-NEXT:    incd z24.d, all, mul #4
-; CHECK-NEXT:    uzp1 p2.s, p2.s, p7.s
-; CHECK-NEXT:    uzp1 p3.s, p3.s, p8.s
-; CHECK-NEXT:    cmphi p9.d, p0/z, z5.d, z24.d
-; CHECK-NEXT:    cmp x8, #1
-; CHECK-NEXT:    uzp1 p3.h, p4.h, p3.h
-; CHECK-NEXT:    cset w8, lt
-; CHECK-NEXT:    sbfx x8, x8, #0, #1
-; CHECK-NEXT:    uzp1 p6.s, p6.s, p9.s
-; CHECK-NEXT:    whilelo p1.b, xzr, x8
-; CHECK-NEXT:    subs x8, x1, x0
-; CHECK-NEXT:    uzp1 p2.h, p2.h, p6.h
+; CHECK-NEXT:    subs x8, x1, x8
+; CHECK-NEXT:    mov z3.d, z0.d
 ; CHECK-NEXT:    add x9, x8, #3
+; CHECK-NEXT:    mov z5.d, z0.d
 ; CHECK-NEXT:    csel x8, x9, x8, mi
-; CHECK-NEXT:    uzp1 p2.b, p3.b, p2.b
+; CHECK-NEXT:    mov x9, x0
+; CHECK-NEXT:    incd z1.d
 ; CHECK-NEXT:    asr x8, x8, #2
-; CHECK-NEXT:    mov z5.d, x8
-; CHECK-NEXT:    cmphi p5.d, p0/z, z5.d, z24.d
-; CHECK-NEXT:    cmphi p7.d, p0/z, z5.d, z6.d
-; CHECK-NEXT:    cmphi p8.d, p0/z, z5.d, z7.d
-; CHECK-NEXT:    cmphi p9.d, p0/z, z5.d, z4.d
-; CHECK-NEXT:    cmphi p4.d, p0/z, z5.d, z3.d
-; CHECK-NEXT:    cmphi p10.d, p0/z, z5.d, z2.d
-; CHECK-NEXT:    cmphi p6.d, p0/z, z5.d, z1.d
-; CHECK-NEXT:    cmphi p0.d, p0/z, z5.d, z0.d
+; CHECK-NEXT:    incd z3.d, all, mul #2
+; CHECK-NEXT:    incd z5.d, all, mul #4
+; CHECK-NEXT:    incb x9
+; CHECK-NEXT:    mov z2.d, x8
+; CHECK-NEXT:    mov z4.d, z1.d
+; CHECK-NEXT:    cmphi p3.d, p0/z, z2.d, z1.d
+; CHECK-NEXT:    incd z1.d, all, mul #4
+; CHECK-NEXT:    cmphi p4.d, p0/z, z2.d, z0.d
+; CHECK-NEXT:    incd z4.d, all, mul #2
+; CHECK-NEXT:    cmphi p5.d, p0/z, z2.d, z3.d
+; CHECK-NEXT:    incd z3.d, all, mul #4
+; CHECK-NEXT:    cmphi p7.d, p0/z, z2.d, z5.d
+; CHECK-NEXT:    cmphi p8.d, p0/z, z2.d, z1.d
+; CHECK-NEXT:    cmphi p6.d, p0/z, z2.d, z4.d
+; CHECK-NEXT:    incd z4.d, all, mul #4
+; CHECK-NEXT:    uzp1 p3.s, p4.s, p3.s
+; CHECK-NEXT:    uzp1 p4.s, p5.s, p6.s
+; CHECK-NEXT:    cmphi p5.d, p0/z, z2.d, z3.d
+; CHECK-NEXT:    cmphi p0.d, p0/z, z2.d, z4.d
 ; CHECK-NEXT:    cmp x8, #1
-; CHECK-NEXT:    uzp1 p5.s, p7.s, p5.s
+; CHECK-NEXT:    uzp1 p6.s, p7.s, p8.s
+; CHECK-NEXT:    uzp1 p3.h, p3.h, p4.h
 ; CHECK-NEXT:    cset w8, lt
-; CHECK-NEXT:    uzp1 p7.s, p9.s, p8.s
+; CHECK-NEXT:    whilewr p4.s, x9, x1
+; CHECK-NEXT:    addvl x9, x0, #3
 ; CHECK-NEXT:    sbfx x8, x8, #0, #1
-; CHECK-NEXT:    ldr p9, [sp, #2, mul vl] // 2-byte Reload
-; CHECK-NEXT:    uzp1 p4.s, p10.s, p4.s
-; CHECK-NEXT:    ldr p10, [sp, #1, mul vl] // 2-byte Reload
-; CHECK-NEXT:    uzp1 p0.s, p0.s, p6.s
+; CHECK-NEXT:    whilewr p7.s, x0, x1
+; CHECK-NEXT:    incb x0, all, mul #2
+; CHECK-NEXT:    whilewr p8.s, x9, x1
+; CHECK-NEXT:    uzp1 p0.s, p5.s, p0.s
+; CHECK-NEXT:    not p4.b, p1/z, p4.b
+; CHECK-NEXT:    not p8.b, p1/z, p8.b
+; CHECK-NEXT:    uzp1 p0.h, p6.h, p0.h
+; CHECK-NEXT:    whilewr p6.s, x0, x1
+; CHECK-NEXT:    and p5.b, p7/z, p7.b, p1.b
+; CHECK-NEXT:    and p4.b, p4/z, p4.b, p1.b
+; CHECK-NEXT:    and p8.b, p8/z, p8.b, p1.b
+; CHECK-NEXT:    and p1.b, p6/z, p6.b, p1.b
+; CHECK-NEXT:    uzp1 p0.b, p3.b, p0.b
+; CHECK-NEXT:    brkpb p1.b, p2/z, p1.b, p8.b
 ; CHECK-NEXT:    ldr p8, [sp, #3, mul vl] // 2-byte Reload
-; CHECK-NEXT:    uzp1 p5.h, p7.h, p5.h
-; CHECK-NEXT:    ldr p7, [sp, #4, mul vl] // 2-byte Reload
-; CHECK-NEXT:    uzp1 p0.h, p0.h, p4.h
+; CHECK-NEXT:    brkpb p3.b, p2/z, p5.b, p4.b
+; CHECK-NEXT:    ptrue p4.h
+; CHECK-NEXT:    uzp1 p1.h, p6.h, p1.h
 ; CHECK-NEXT:    ldr p6, [sp, #5, mul vl] // 2-byte Reload
+; CHECK-NEXT:    uzp1 p3.h, p7.h, p3.h
+; CHECK-NEXT:    ldr p7, [sp, #4, mul vl] // 2-byte Reload
+; CHECK-NEXT:    not p1.b, p4/z, p1.b
+; CHECK-NEXT:    and p5.b, p3/z, p3.b, p4.b
+; CHECK-NEXT:    and p1.b, p1/z, p1.b, p4.b
 ; CHECK-NEXT:    whilelo p4.b, xzr, x8
-; CHECK-NEXT:    uzp1 p3.b, p0.b, p5.b
+; CHECK-NEXT:    brkpb p2.b, p2/z, p5.b, p1.b
 ; CHECK-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Reload
-; CHECK-NEXT:    sel p0.b, p2, p2.b, p1.b
-; CHECK-NEXT:    sel p1.b, p3, p3.b, p4.b
+; CHECK-NEXT:    sel p1.b, p0, p0.b, p4.b
 ; CHECK-NEXT:    ldr p4, [sp, #7, mul vl] // 2-byte Reload
+; CHECK-NEXT:    uzp1 p0.b, p3.b, p2.b
 ; CHECK-NEXT:    addvl sp, sp, #1
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
@@ -603,120 +506,152 @@ entry:
   ret <vscale x 32 x i1> %0
 }
 
-define <vscale x 4 x i1> @whilewr_64_expand(ptr %a, ptr %b) {
-; CHECK-LABEL: whilewr_64_expand:
+define <vscale x 4 x i1> @whilewr_64_split(ptr %a, ptr %b) {
+; CHECK-LABEL: whilewr_64_split:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    index z0.d, #0, #1
-; CHECK-NEXT:    subs x8, x1, x0
+; CHECK-NEXT:    whilewr p1.d, x0, x1
+; CHECK-NEXT:    incb x0
 ; CHECK-NEXT:    ptrue p0.d
-; CHECK-NEXT:    add x9, x8, #7
-; CHECK-NEXT:    csel x8, x9, x8, mi
-; CHECK-NEXT:    asr x8, x8, #3
-; CHECK-NEXT:    mov z1.d, z0.d
-; CHECK-NEXT:    mov z2.d, x8
-; CHECK-NEXT:    incd z1.d
-; CHECK-NEXT:    cmphi p1.d, p0/z, z2.d, z0.d
-; CHECK-NEXT:    cmphi p0.d, p0/z, z2.d, z1.d
-; CHECK-NEXT:    cmp x8, #1
-; CHECK-NEXT:    cset w8, lt
-; CHECK-NEXT:    sbfx x8, x8, #0, #1
+; CHECK-NEXT:    whilewr p3.d, x0, x1
+; CHECK-NEXT:    and p2.b, p1/z, p1.b, p0.b
+; CHECK-NEXT:    not p3.b, p0/z, p3.b
+; CHECK-NEXT:    and p0.b, p3/z, p3.b, p0.b
+; CHECK-NEXT:    ptrue p3.b
+; CHECK-NEXT:    brkpb p0.b, p3/z, p2.b, p0.b
 ; CHECK-NEXT:    uzp1 p0.s, p1.s, p0.s
-; CHECK-NEXT:    whilelo p1.s, xzr, x8
-; CHECK-NEXT:    sel p0.b, p0, p0.b, p1.b
 ; CHECK-NEXT:    ret
 entry:
   %0 = call <vscale x 4 x i1> @llvm.loop.dependence.war.mask.nxv4i1(ptr %a, 
ptr %b, i64 8)
   ret <vscale x 4 x i1> %0
 }
 
-define <vscale x 8 x i1> @whilewr_64_expand2(ptr %a, ptr %b) {
-; CHECK-LABEL: whilewr_64_expand2:
+define <vscale x 8 x i1> @whilewr_64_split2(ptr %a, ptr %b) {
+; CHECK-LABEL: whilewr_64_split2:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    index z0.d, #0, #1
-; CHECK-NEXT:    subs x8, x1, x0
+; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    addvl sp, sp, #-1
+; CHECK-NEXT:    str p7, [sp, #4, mul vl] // 2-byte Spill
+; CHECK-NEXT:    str p6, [sp, #5, mul vl] // 2-byte Spill
+; CHECK-NEXT:    str p5, [sp, #6, mul vl] // 2-byte Spill
+; CHECK-NEXT:    str p4, [sp, #7, mul vl] // 2-byte Spill
+; CHECK-NEXT:    .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 
0x1e, 0x22 // sp + 16 + 8 * VG
+; CHECK-NEXT:    .cfi_offset w29, -16
+; CHECK-NEXT:    addvl x8, x0, #3
+; CHECK-NEXT:    whilewr p2.d, x0, x1
+; CHECK-NEXT:    whilewr p1.d, x8, x1
+; CHECK-NEXT:    mov x8, x0
+; CHECK-NEXT:    incb x0, all, mul #2
+; CHECK-NEXT:    incb x8
 ; CHECK-NEXT:    ptrue p0.d
-; CHECK-NEXT:    add x9, x8, #7
-; CHECK-NEXT:    csel x8, x9, x8, mi
-; CHECK-NEXT:    asr x8, x8, #3
-; CHECK-NEXT:    mov z1.d, z0.d
-; CHECK-NEXT:    mov z2.d, z0.d
-; CHECK-NEXT:    mov z3.d, x8
-; CHECK-NEXT:    incd z1.d
-; CHECK-NEXT:    incd z2.d, all, mul #2
-; CHECK-NEXT:    cmphi p1.d, p0/z, z3.d, z0.d
-; CHECK-NEXT:    mov z4.d, z1.d
-; CHECK-NEXT:    cmphi p2.d, p0/z, z3.d, z1.d
-; CHECK-NEXT:    cmphi p3.d, p0/z, z3.d, z2.d
-; CHECK-NEXT:    incd z4.d, all, mul #2
-; CHECK-NEXT:    uzp1 p1.s, p1.s, p2.s
-; CHECK-NEXT:    cmphi p0.d, p0/z, z3.d, z4.d
-; CHECK-NEXT:    cmp x8, #1
-; CHECK-NEXT:    cset w8, lt
-; CHECK-NEXT:    sbfx x8, x8, #0, #1
-; CHECK-NEXT:    uzp1 p0.s, p3.s, p0.s
+; CHECK-NEXT:    ptrue p3.b
+; CHECK-NEXT:    whilewr p6.d, x0, x1
+; CHECK-NEXT:    whilewr p5.d, x8, x1
+; CHECK-NEXT:    not p1.b, p0/z, p1.b
+; CHECK-NEXT:    not p5.b, p0/z, p5.b
+; CHECK-NEXT:    and p4.b, p2/z, p2.b, p0.b
+; CHECK-NEXT:    and p5.b, p5/z, p5.b, p0.b
+; CHECK-NEXT:    and p7.b, p6/z, p6.b, p0.b
+; CHECK-NEXT:    and p0.b, p1/z, p1.b, p0.b
+; CHECK-NEXT:    brkpb p1.b, p3/z, p4.b, p5.b
+; CHECK-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Reload
+; CHECK-NEXT:    brkpb p0.b, p3/z, p7.b, p0.b
+; CHECK-NEXT:    ldr p7, [sp, #4, mul vl] // 2-byte Reload
+; CHECK-NEXT:    ptrue p4.s
+; CHECK-NEXT:    uzp1 p0.s, p6.s, p0.s
+; CHECK-NEXT:    ldr p6, [sp, #5, mul vl] // 2-byte Reload
+; CHECK-NEXT:    uzp1 p1.s, p2.s, p1.s
+; CHECK-NEXT:    not p0.b, p4/z, p0.b
+; CHECK-NEXT:    and p2.b, p1/z, p1.b, p4.b
+; CHECK-NEXT:    and p0.b, p0/z, p0.b, p4.b
+; CHECK-NEXT:    ldr p4, [sp, #7, mul vl] // 2-byte Reload
+; CHECK-NEXT:    brkpb p0.b, p3/z, p2.b, p0.b
 ; CHECK-NEXT:    uzp1 p0.h, p1.h, p0.h
-; CHECK-NEXT:    whilelo p1.h, xzr, x8
-; CHECK-NEXT:    sel p0.b, p0, p0.b, p1.b
+; CHECK-NEXT:    addvl sp, sp, #1
+; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
 entry:
   %0 = call <vscale x 8 x i1> @llvm.loop.dependence.war.mask.nxv8i1(ptr %a, 
ptr %b, i64 8)
   ret <vscale x 8 x i1> %0
 }
 
-define <vscale x 16 x i1> @whilewr_64_expand3(ptr %a, ptr %b) {
-; CHECK-LABEL: whilewr_64_expand3:
+define <vscale x 16 x i1> @whilewr_64_split3(ptr %a, ptr %b) {
+; CHECK-LABEL: whilewr_64_split3:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK-NEXT:    addvl sp, sp, #-1
+; CHECK-NEXT:    str p10, [sp, #1, mul vl] // 2-byte Spill
+; CHECK-NEXT:    str p9, [sp, #2, mul vl] // 2-byte Spill
+; CHECK-NEXT:    str p8, [sp, #3, mul vl] // 2-byte Spill
 ; CHECK-NEXT:    str p7, [sp, #4, mul vl] // 2-byte Spill
 ; CHECK-NEXT:    str p6, [sp, #5, mul vl] // 2-byte Spill
 ; CHECK-NEXT:    str p5, [sp, #6, mul vl] // 2-byte Spill
 ; CHECK-NEXT:    str p4, [sp, #7, mul vl] // 2-byte Spill
 ; CHECK-NEXT:    .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 
0x1e, 0x22 // sp + 16 + 8 * VG
 ; CHECK-NEXT:    .cfi_offset w29, -16
-; CHECK-NEXT:    index z0.d, #0, #1
-; CHECK-NEXT:    subs x8, x1, x0
+; CHECK-NEXT:    mov x8, x0
 ; CHECK-NEXT:    ptrue p0.d
-; CHECK-NEXT:    add x9, x8, #7
-; CHECK-NEXT:    csel x8, x9, x8, mi
-; CHECK-NEXT:    asr x8, x8, #3
-; CHECK-NEXT:    mov z1.d, z0.d
-; CHECK-NEXT:    mov z4.d, z0.d
-; CHECK-NEXT:    mov z5.d, z0.d
-; CHECK-NEXT:    mov z2.d, x8
-; CHECK-NEXT:    incd z1.d
-; CHECK-NEXT:    incd z4.d, all, mul #2
-; CHECK-NEXT:    incd z5.d, all, mul #4
-; CHECK-NEXT:    cmphi p2.d, p0/z, z2.d, z0.d
-; CHECK-NEXT:    mov z3.d, z1.d
-; CHECK-NEXT:    cmphi p1.d, p0/z, z2.d, z1.d
-; CHECK-NEXT:    incd z1.d, all, mul #4
-; CHECK-NEXT:    cmphi p3.d, p0/z, z2.d, z4.d
-; CHECK-NEXT:    incd z4.d, all, mul #4
-; CHECK-NEXT:    cmphi p4.d, p0/z, z2.d, z5.d
-; CHECK-NEXT:    incd z3.d, all, mul #2
-; CHECK-NEXT:    cmphi p5.d, p0/z, z2.d, z1.d
-; CHECK-NEXT:    cmphi p7.d, p0/z, z2.d, z4.d
-; CHECK-NEXT:    uzp1 p1.s, p2.s, p1.s
-; CHECK-NEXT:    mov z0.d, z3.d
-; CHECK-NEXT:    cmphi p6.d, p0/z, z2.d, z3.d
-; CHECK-NEXT:    uzp1 p2.s, p4.s, p5.s
-; CHECK-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Reload
-; CHECK-NEXT:    ldr p4, [sp, #7, mul vl] // 2-byte Reload
-; CHECK-NEXT:    incd z0.d, all, mul #4
-; CHECK-NEXT:    uzp1 p3.s, p3.s, p6.s
-; CHECK-NEXT:    ldr p6, [sp, #5, mul vl] // 2-byte Reload
-; CHECK-NEXT:    cmphi p0.d, p0/z, z2.d, z0.d
-; CHECK-NEXT:    uzp1 p1.h, p1.h, p3.h
-; CHECK-NEXT:    cmp x8, #1
-; CHECK-NEXT:    cset w8, lt
-; CHECK-NEXT:    sbfx x8, x8, #0, #1
+; CHECK-NEXT:    addvl x9, x0, #3
+; CHECK-NEXT:    incb x8
+; CHECK-NEXT:    whilewr p2.d, x0, x1
+; CHECK-NEXT:    whilewr p3.d, x9, x1
+; CHECK-NEXT:    addvl x9, x0, #6
+; CHECK-NEXT:    ptrue p1.b
+; CHECK-NEXT:    whilewr p4.d, x8, x1
+; CHECK-NEXT:    mov x8, x0
+; CHECK-NEXT:    incb x8, all, mul #2
+; CHECK-NEXT:    and p5.b, p2/z, p2.b, p0.b
+; CHECK-NEXT:    not p4.b, p0/z, p4.b
+; CHECK-NEXT:    not p3.b, p0/z, p3.b
+; CHECK-NEXT:    whilewr p6.d, x8, x1
+; CHECK-NEXT:    addvl x8, x0, #5
+; CHECK-NEXT:    and p4.b, p4/z, p4.b, p0.b
+; CHECK-NEXT:    and p3.b, p3/z, p3.b, p0.b
+; CHECK-NEXT:    brkpb p4.b, p1/z, p5.b, p4.b
+; CHECK-NEXT:    and p5.b, p6/z, p6.b, p0.b
+; CHECK-NEXT:    uzp1 p2.s, p2.s, p4.s
+; CHECK-NEXT:    brkpb p4.b, p1/z, p5.b, p3.b
+; CHECK-NEXT:    whilewr p5.d, x8, x1
+; CHECK-NEXT:    mov x8, x0
+; CHECK-NEXT:    incb x8, all, mul #4
+; CHECK-NEXT:    whilewr p9.d, x9, x1
+; CHECK-NEXT:    not p5.b, p0/z, p5.b
+; CHECK-NEXT:    and p10.b, p9/z, p9.b, p0.b
+; CHECK-NEXT:    whilewr p7.d, x8, x1
+; CHECK-NEXT:    addvl x8, x0, #7
+; CHECK-NEXT:    whilewr p8.d, x8, x1
+; CHECK-NEXT:    and p5.b, p5/z, p5.b, p0.b
+; CHECK-NEXT:    not p8.b, p0/z, p8.b
+; CHECK-NEXT:    ptrue p3.s
+; CHECK-NEXT:    and p8.b, p8/z, p8.b, p0.b
+; CHECK-NEXT:    and p0.b, p7/z, p7.b, p0.b
+; CHECK-NEXT:    brkpb p8.b, p1/z, p10.b, p8.b
+; CHECK-NEXT:    ldr p10, [sp, #1, mul vl] // 2-byte Reload
+; CHECK-NEXT:    uzp1 p4.s, p6.s, p4.s
+; CHECK-NEXT:    brkpb p0.b, p1/z, p0.b, p5.b
+; CHECK-NEXT:    uzp1 p5.s, p9.s, p8.s
+; CHECK-NEXT:    ldr p9, [sp, #2, mul vl] // 2-byte Reload
+; CHECK-NEXT:    not p4.b, p3/z, p4.b
+; CHECK-NEXT:    ldr p8, [sp, #3, mul vl] // 2-byte Reload
 ; CHECK-NEXT:    uzp1 p0.s, p7.s, p0.s
+; CHECK-NEXT:    not p5.b, p3/z, p5.b
+; CHECK-NEXT:    and p6.b, p2/z, p2.b, p3.b
+; CHECK-NEXT:    and p4.b, p4/z, p4.b, p3.b
+; CHECK-NEXT:    and p7.b, p0/z, p0.b, p3.b
+; CHECK-NEXT:    and p3.b, p5/z, p5.b, p3.b
+; CHECK-NEXT:    brkpb p4.b, p1/z, p6.b, p4.b
+; CHECK-NEXT:    ldr p6, [sp, #5, mul vl] // 2-byte Reload
+; CHECK-NEXT:    brkpb p3.b, p1/z, p7.b, p3.b
 ; CHECK-NEXT:    ldr p7, [sp, #4, mul vl] // 2-byte Reload
-; CHECK-NEXT:    uzp1 p0.h, p2.h, p0.h
-; CHECK-NEXT:    uzp1 p0.b, p1.b, p0.b
-; CHECK-NEXT:    whilelo p1.b, xzr, x8
-; CHECK-NEXT:    sel p0.b, p0, p0.b, p1.b
+; CHECK-NEXT:    ptrue p5.h
+; CHECK-NEXT:    uzp1 p0.h, p0.h, p3.h
+; CHECK-NEXT:    uzp1 p2.h, p2.h, p4.h
+; CHECK-NEXT:    ldr p4, [sp, #7, mul vl] // 2-byte Reload
+; CHECK-NEXT:    not p0.b, p5/z, p0.b
+; CHECK-NEXT:    and p3.b, p2/z, p2.b, p5.b
+; CHECK-NEXT:    and p0.b, p0/z, p0.b, p5.b
+; CHECK-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Reload
+; CHECK-NEXT:    brkpb p0.b, p1/z, p3.b, p0.b
+; CHECK-NEXT:    uzp1 p0.b, p2.b, p0.b
 ; CHECK-NEXT:    addvl sp, sp, #1
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
@@ -725,11 +660,12 @@ entry:
   ret <vscale x 16 x i1> %0
 }
 
-define <vscale x 32 x i1> @whilewr_64_expand4(ptr %a, ptr %b) {
-; CHECK-LABEL: whilewr_64_expand4:
+define <vscale x 32 x i1> @whilewr_64_expand_high(ptr %a, ptr %b) {
+; CHECK-LABEL: whilewr_64_expand_high:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK-NEXT:    addvl sp, sp, #-1
+; CHECK-NEXT:    str p11, [sp] // 2-byte Spill
 ; CHECK-NEXT:    str p10, [sp, #1, mul vl] // 2-byte Spill
 ; CHECK-NEXT:    str p9, [sp, #2, mul vl] // 2-byte Spill
 ; CHECK-NEXT:    str p8, [sp, #3, mul vl] // 2-byte Spill
@@ -739,80 +675,109 @@ define <vscale x 32 x i1> @whilewr_64_expand4(ptr %a, 
ptr %b) {
 ; CHECK-NEXT:    str p4, [sp, #7, mul vl] // 2-byte Spill
 ; CHECK-NEXT:    .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 
0x1e, 0x22 // sp + 16 + 8 * VG
 ; CHECK-NEXT:    .cfi_offset w29, -16
-; CHECK-NEXT:    index z0.d, #0, #1
-; CHECK-NEXT:    subs x8, x1, x0
+; CHECK-NEXT:    index z1.d, #0, #1
+; CHECK-NEXT:    addvl x8, x0, #8
 ; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    subs x8, x1, x8
+; CHECK-NEXT:    ptrue p1.b
 ; CHECK-NEXT:    add x9, x8, #7
 ; CHECK-NEXT:    csel x8, x9, x8, mi
-; CHECK-NEXT:    addvl x9, x0, #8
+; CHECK-NEXT:    mov x9, x0
+; CHECK-NEXT:    mov z0.d, z1.d
+; CHECK-NEXT:    mov z2.d, z1.d
 ; CHECK-NEXT:    asr x8, x8, #3
-; CHECK-NEXT:    mov z1.d, z0.d
-; CHECK-NEXT:    mov z2.d, z0.d
-; CHECK-NEXT:    mov z4.d, z0.d
-; CHECK-NEXT:    mov z5.d, x8
-; CHECK-NEXT:    incd z1.d
+; CHECK-NEXT:    mov z5.d, z1.d
+; CHECK-NEXT:    incb x9, all, mul #2
+; CHECK-NEXT:    mov z4.d, x8
+; CHECK-NEXT:    incd z0.d
 ; CHECK-NEXT:    incd z2.d, all, mul #2
-; CHECK-NEXT:    incd z4.d, all, mul #4
-; CHECK-NEXT:    cmphi p5.d, p0/z, z5.d, z0.d
-; CHECK-NEXT:    mov z3.d, z1.d
-; CHECK-NEXT:    mov z6.d, z2.d
-; CHECK-NEXT:    mov z7.d, z1.d
-; CHECK-NEXT:    cmphi p2.d, p0/z, z5.d, z4.d
-; CHECK-NEXT:    cmphi p3.d, p0/z, z5.d, z2.d
-; CHECK-NEXT:    cmphi p4.d, p0/z, z5.d, z1.d
+; CHECK-NEXT:    incd z5.d, all, mul #4
+; CHECK-NEXT:    cmphi p5.d, p0/z, z4.d, z1.d
+; CHECK-NEXT:    mov z3.d, z0.d
+; CHECK-NEXT:    cmphi p2.d, p0/z, z4.d, z2.d
+; CHECK-NEXT:    incd z2.d, all, mul #4
+; CHECK-NEXT:    cmphi p3.d, p0/z, z4.d, z0.d
+; CHECK-NEXT:    incd z0.d, all, mul #4
+; CHECK-NEXT:    cmphi p10.d, p0/z, z4.d, z5.d
 ; CHECK-NEXT:    incd z3.d, all, mul #2
+; CHECK-NEXT:    cmphi p6.d, p0/z, z4.d, z2.d
+; CHECK-NEXT:    cmphi p11.d, p0/z, z4.d, z0.d
+; CHECK-NEXT:    uzp1 p3.s, p5.s, p3.s
+; CHECK-NEXT:    mov z6.d, z3.d
+; CHECK-NEXT:    cmphi p9.d, p0/z, z4.d, z3.d
+; CHECK-NEXT:    uzp1 p5.s, p10.s, p11.s
 ; CHECK-NEXT:    incd z6.d, all, mul #4
-; CHECK-NEXT:    incd z7.d, all, mul #4
-; CHECK-NEXT:    uzp1 p4.s, p5.s, p4.s
-; CHECK-NEXT:    mov z24.d, z3.d
-; CHECK-NEXT:    cmphi p6.d, p0/z, z5.d, z6.d
-; CHECK-NEXT:    cmphi p7.d, p0/z, z5.d, z7.d
-; CHECK-NEXT:    cmphi p8.d, p0/z, z5.d, z3.d
-; CHECK-NEXT:    incd z24.d, all, mul #4
-; CHECK-NEXT:    uzp1 p2.s, p2.s, p7.s
-; CHECK-NEXT:    uzp1 p3.s, p3.s, p8.s
-; CHECK-NEXT:    cmphi p9.d, p0/z, z5.d, z24.d
+; CHECK-NEXT:    uzp1 p9.s, p2.s, p9.s
+; CHECK-NEXT:    cmphi p7.d, p0/z, z4.d, z6.d
+; CHECK-NEXT:    uzp1 p3.h, p3.h, p9.h
 ; CHECK-NEXT:    cmp x8, #1
-; CHECK-NEXT:    uzp1 p3.h, p4.h, p3.h
 ; CHECK-NEXT:    cset w8, lt
+; CHECK-NEXT:    whilewr p4.d, x9, x1
+; CHECK-NEXT:    addvl x9, x0, #3
+; CHECK-NEXT:    whilewr p8.d, x9, x1
+; CHECK-NEXT:    mov x9, x0
 ; CHECK-NEXT:    sbfx x8, x8, #0, #1
-; CHECK-NEXT:    uzp1 p6.s, p6.s, p9.s
-; CHECK-NEXT:    whilelo p1.b, xzr, x8
-; CHECK-NEXT:    subs x8, x1, x9
-; CHECK-NEXT:    uzp1 p2.h, p2.h, p6.h
-; CHECK-NEXT:    add x9, x8, #7
-; CHECK-NEXT:    csel x8, x9, x8, mi
-; CHECK-NEXT:    uzp1 p2.b, p3.b, p2.b
-; CHECK-NEXT:    asr x8, x8, #3
-; CHECK-NEXT:    mov z5.d, x8
-; CHECK-NEXT:    cmphi p5.d, p0/z, z5.d, z24.d
-; CHECK-NEXT:    cmphi p7.d, p0/z, z5.d, z6.d
-; CHECK-NEXT:    cmphi p8.d, p0/z, z5.d, z7.d
-; CHECK-NEXT:    cmphi p9.d, p0/z, z5.d, z4.d
-; CHECK-NEXT:    cmphi p4.d, p0/z, z5.d, z3.d
-; CHECK-NEXT:    cmphi p10.d, p0/z, z5.d, z2.d
-; CHECK-NEXT:    cmphi p6.d, p0/z, z5.d, z1.d
-; CHECK-NEXT:    cmphi p0.d, p0/z, z5.d, z0.d
-; CHECK-NEXT:    cmp x8, #1
-; CHECK-NEXT:    uzp1 p5.s, p7.s, p5.s
-; CHECK-NEXT:    cset w8, lt
-; CHECK-NEXT:    uzp1 p7.s, p9.s, p8.s
-; CHECK-NEXT:    sbfx x8, x8, #0, #1
+; CHECK-NEXT:    incb x9, all, mul #4
+; CHECK-NEXT:    uzp1 p6.s, p6.s, p7.s
+; CHECK-NEXT:    not p8.b, p0/z, p8.b
+; CHECK-NEXT:    uzp1 p5.h, p5.h, p6.h
+; CHECK-NEXT:    whilewr p2.d, x9, x1
+; CHECK-NEXT:    addvl x9, x0, #5
+; CHECK-NEXT:    whilewr p9.d, x9, x1
+; CHECK-NEXT:    addvl x9, x0, #6
+; CHECK-NEXT:    whilewr p11.d, x9, x1
+; CHECK-NEXT:    addvl x9, x0, #7
+; CHECK-NEXT:    whilewr p7.d, x9, x1
+; CHECK-NEXT:    not p9.b, p0/z, p9.b
+; CHECK-NEXT:    whilewr p10.d, x0, x1
+; CHECK-NEXT:    incb x0
+; CHECK-NEXT:    and p6.b, p4/z, p4.b, p0.b
+; CHECK-NEXT:    uzp1 p3.b, p3.b, p5.b
+; CHECK-NEXT:    and p5.b, p2/z, p2.b, p0.b
+; CHECK-NEXT:    and p8.b, p8/z, p8.b, p0.b
+; CHECK-NEXT:    and p9.b, p9/z, p9.b, p0.b
+; CHECK-NEXT:    not p7.b, p0/z, p7.b
+; CHECK-NEXT:    brkpb p6.b, p1/z, p6.b, p8.b
+; CHECK-NEXT:    and p8.b, p11/z, p11.b, p0.b
+; CHECK-NEXT:    brkpb p5.b, p1/z, p5.b, p9.b
+; CHECK-NEXT:    whilewr p9.d, x0, x1
+; CHECK-NEXT:    and p7.b, p7/z, p7.b, p0.b
+; CHECK-NEXT:    not p9.b, p0/z, p9.b
+; CHECK-NEXT:    brkpb p7.b, p1/z, p8.b, p7.b
+; CHECK-NEXT:    ptrue p8.s
+; CHECK-NEXT:    and p9.b, p9/z, p9.b, p0.b
+; CHECK-NEXT:    and p0.b, p10/z, p10.b, p0.b
+; CHECK-NEXT:    uzp1 p7.s, p11.s, p7.s
+; CHECK-NEXT:    ldr p11, [sp] // 2-byte Reload
+; CHECK-NEXT:    uzp1 p4.s, p4.s, p6.s
+; CHECK-NEXT:    brkpb p0.b, p1/z, p0.b, p9.b
 ; CHECK-NEXT:    ldr p9, [sp, #2, mul vl] // 2-byte Reload
-; CHECK-NEXT:    uzp1 p4.s, p10.s, p4.s
+; CHECK-NEXT:    uzp1 p2.s, p2.s, p5.s
+; CHECK-NEXT:    not p5.b, p8/z, p7.b
+; CHECK-NEXT:    not p4.b, p8/z, p4.b
+; CHECK-NEXT:    uzp1 p0.s, p10.s, p0.s
 ; CHECK-NEXT:    ldr p10, [sp, #1, mul vl] // 2-byte Reload
-; CHECK-NEXT:    uzp1 p0.s, p0.s, p6.s
+; CHECK-NEXT:    and p7.b, p2/z, p2.b, p8.b
+; CHECK-NEXT:    and p5.b, p5/z, p5.b, p8.b
+; CHECK-NEXT:    and p4.b, p4/z, p4.b, p8.b
+; CHECK-NEXT:    and p6.b, p0/z, p0.b, p8.b
 ; CHECK-NEXT:    ldr p8, [sp, #3, mul vl] // 2-byte Reload
-; CHECK-NEXT:    uzp1 p5.h, p7.h, p5.h
+; CHECK-NEXT:    brkpb p5.b, p1/z, p7.b, p5.b
 ; CHECK-NEXT:    ldr p7, [sp, #4, mul vl] // 2-byte Reload
+; CHECK-NEXT:    brkpb p4.b, p1/z, p6.b, p4.b
+; CHECK-NEXT:    ptrue p6.h
+; CHECK-NEXT:    uzp1 p2.h, p2.h, p5.h
 ; CHECK-NEXT:    uzp1 p0.h, p0.h, p4.h
+; CHECK-NEXT:    not p2.b, p6/z, p2.b
+; CHECK-NEXT:    and p4.b, p0/z, p0.b, p6.b
+; CHECK-NEXT:    and p2.b, p2/z, p2.b, p6.b
 ; CHECK-NEXT:    ldr p6, [sp, #5, mul vl] // 2-byte Reload
-; CHECK-NEXT:    whilelo p4.b, xzr, x8
-; CHECK-NEXT:    uzp1 p3.b, p0.b, p5.b
-; CHECK-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Reload
-; CHECK-NEXT:    sel p0.b, p2, p2.b, p1.b
-; CHECK-NEXT:    sel p1.b, p3, p3.b, p4.b
+; CHECK-NEXT:    whilelo p5.b, xzr, x8
+; CHECK-NEXT:    brkpb p2.b, p1/z, p4.b, p2.b
 ; CHECK-NEXT:    ldr p4, [sp, #7, mul vl] // 2-byte Reload
+; CHECK-NEXT:    sel p1.b, p3, p3.b, p5.b
+; CHECK-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Reload
+; CHECK-NEXT:    uzp1 p0.b, p0.b, p2.b
 ; CHECK-NEXT:    addvl sp, sp, #1
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
@@ -913,3 +878,73 @@ entry:
   %0 = call <vscale x 16 x i1> @llvm.loop.dependence.war.mask.nxv16i1(ptr %a, 
ptr %b, i64 3)
   ret <vscale x 16 x i1> %0
 }
+
+define <vscale x 2 x i1> @whilewr_8_widen_extract(ptr %a, ptr %b) {
+; CHECK-LABEL: whilewr_8_widen_extract:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    whilewr p0.b, x0, x1
+; CHECK-NEXT:    punpklo p0.h, p0.b
+; CHECK-NEXT:    punpklo p0.h, p0.b
+; CHECK-NEXT:    punpklo p0.h, p0.b
+; CHECK-NEXT:    ret
+entry:
+  %0 = call <vscale x 2 x i1> @llvm.loop.dependence.war.mask.nxv2i1(ptr %a, 
ptr %b, i64 1)
+  ret <vscale x 2 x i1> %0
+}
+
+define <vscale x 4 x i1> @whilewr_8_widen_extract2(ptr %a, ptr %b) {
+; CHECK-LABEL: whilewr_8_widen_extract2:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    whilewr p0.b, x0, x1
+; CHECK-NEXT:    punpklo p0.h, p0.b
+; CHECK-NEXT:    punpklo p0.h, p0.b
+; CHECK-NEXT:    ret
+entry:
+  %0 = call <vscale x 4 x i1> @llvm.loop.dependence.war.mask.nxv4i1(ptr %a, 
ptr %b, i64 1)
+  ret <vscale x 4 x i1> %0
+}
+
+define <vscale x 8 x i1> @whilewr_8_widen_extract3(ptr %a, ptr %b) {
+; CHECK-LABEL: whilewr_8_widen_extract3:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    whilewr p0.b, x0, x1
+; CHECK-NEXT:    punpklo p0.h, p0.b
+; CHECK-NEXT:    ret
+entry:
+  %0 = call <vscale x 8 x i1> @llvm.loop.dependence.war.mask.nxv8i1(ptr %a, 
ptr %b, i64 1)
+  ret <vscale x 8 x i1> %0
+}
+
+define <vscale x 2 x i1> @whilewr_16_widen_extract(ptr %a, ptr %b) {
+; CHECK-LABEL: whilewr_16_widen_extract:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    whilewr p0.h, x0, x1
+; CHECK-NEXT:    punpklo p0.h, p0.b
+; CHECK-NEXT:    punpklo p0.h, p0.b
+; CHECK-NEXT:    ret
+entry:
+  %0 = call <vscale x 2 x i1> @llvm.loop.dependence.war.mask.nxv2i1(ptr %a, 
ptr %b, i64 2)
+  ret <vscale x 2 x i1> %0
+}
+
+define <vscale x 4 x i1> @whilewr_16_widen_extract2(ptr %a, ptr %b) {
+; CHECK-LABEL: whilewr_16_widen_extract2:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    whilewr p0.h, x0, x1
+; CHECK-NEXT:    punpklo p0.h, p0.b
+; CHECK-NEXT:    ret
+entry:
+  %0 = call <vscale x 4 x i1> @llvm.loop.dependence.war.mask.nxv4i1(ptr %a, 
ptr %b, i64 2)
+  ret <vscale x 4 x i1> %0
+}
+
+define <vscale x 2 x i1> @whilewr_32_widen_extract(ptr %a, ptr %b) {
+; CHECK-LABEL: whilewr_32_widen_extract:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    whilewr p0.s, x0, x1
+; CHECK-NEXT:    punpklo p0.h, p0.b
+; CHECK-NEXT:    ret
+entry:
+  %0 = call <vscale x 2 x i1> @llvm.loop.dependence.war.mask.nxv2i1(ptr %a, 
ptr %b, i64 4)
+  ret <vscale x 2 x i1> %0
+}

>From d35fdad58ec5f256ff3ff58b16625651124b727f Mon Sep 17 00:00:00 2001
From: Samuel Tebbs <[email protected]>
Date: Wed, 26 Nov 2025 10:09:55 +0000
Subject: [PATCH 2/2] Format

---
 llvm/lib/Target/AArch64/AArch64ISelLowering.cpp | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp 
b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 4c388ed5d9d29..38ef6192e23d0 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -5414,8 +5414,8 @@ AArch64TargetLowering::LowerLOOP_DEPENDENCE_MASK(SDValue 
Op,
         VT.getVectorElementType().getSimpleVT(), NumElements);
     EVT WhileVT = ContainerVT.changeElementType(MVT::i1);
 
-    SDValue Mask =
-        DAG.getNode(MaskOpcode, DL, WhileVT, PtrA, PtrB, EltSizeInBytesValue, 
Op.getOperand(3));
+    SDValue Mask = DAG.getNode(MaskOpcode, DL, WhileVT, PtrA, PtrB,
+                               EltSizeInBytesValue, Op.getOperand(3));
     SDValue MaskAsInt = DAG.getNode(ISD::SIGN_EXTEND, DL, ContainerVT, Mask);
     return convertFromScalableVector(DAG, VT, MaskAsInt);
   }
@@ -5432,8 +5432,8 @@ AArch64TargetLowering::LowerLOOP_DEPENDENCE_MASK(SDValue 
Op,
       EltSizeInBytes;
 
   SDValue Zero = DAG.getConstant(0, DL, MVT::i64);
-  SDValue Low =
-      DAG.getNode(MaskOpcode, DL, PartVT, PtrA, PtrB, EltSizeInBytesValue, 
Zero);
+  SDValue Low = DAG.getNode(MaskOpcode, DL, PartVT, PtrA, PtrB,
+                            EltSizeInBytesValue, Zero);
   SDValue High = DAG.getNode(MaskOpcode, DL, PartVT,
                              DAG.getMemBasePlusOffset(PtrA, PartOffset, DL),
                              PtrB, EltSizeInBytesValue, Zero);

_______________________________________________
llvm-branch-commits mailing list
[email protected]
https://lists.llvm.org/cgi-bin/mailman/listinfo/llvm-branch-commits

Reply via email to