kmclaughlin created this revision.
kmclaughlin added reviewers: efriedma, sdesmalen, andwar.
Herald added subscribers: psnobl, rkruppe, hiraditya, kristof.beyls, tschuett.
Herald added a reviewer: rengolin.
Herald added a project: LLVM.

Several SVE intrinsics with immediate arguments (including those
added by D70253 <https://reviews.llvm.org/D70253> & D70437 
<https://reviews.llvm.org/D70437>) do not use the ImmArg property.
This patch adds ImmArg<Op> where required and changes
the appropriate patterns which match the immediates.


Repository:
  rG LLVM Github Monorepo

https://reviews.llvm.org/D72612

Files:
  llvm/include/llvm/IR/IntrinsicsAArch64.td
  llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
  llvm/lib/Target/AArch64/AArch64InstrFormats.td
  llvm/lib/Target/AArch64/AArch64InstrInfo.td
  llvm/lib/Target/AArch64/SVEInstrFormats.td

Index: llvm/lib/Target/AArch64/SVEInstrFormats.td
===================================================================
--- llvm/lib/Target/AArch64/SVEInstrFormats.td
+++ llvm/lib/Target/AArch64/SVEInstrFormats.td
@@ -354,6 +354,12 @@
 : Pat<(vtd (op vt1:$Op1, vt2:$Op2, (vt3 ImmTy:$Op3))),
       (inst $Op1, $Op2, ImmTy:$Op3)>;
 
+class SVE_3_Op_Cpx_Imm_Pat<ValueType vtd, SDPatternOperator op, ValueType vt1,
+                           ValueType vt2, ComplexPattern cpx, Operand ImmTy,
+                           Instruction inst>
+: Pat<(vtd (op vt1:$Op1, vt2:$Op2, (cpx ImmTy:$Op3))),
+      (inst $Op1, $Op2, ImmTy:$Op3)>;
+
 class SVE_4_Op_Imm_Pat<ValueType vtd, SDPatternOperator op, ValueType vt1,
                        ValueType vt2, ValueType vt3, ValueType vt4,
                        Operand ImmTy, Instruction inst>
@@ -4377,10 +4383,10 @@
     let Inst{9-8} = imm{4-3};
   }
 
-  def : SVE_3_Op_Imm_Pat<nxv16i8, op, nxv16i1, nxv16i8, i32, vecshiftR8,  !cast<Instruction>(NAME # _B)>;
-  def : SVE_3_Op_Imm_Pat<nxv8i16, op, nxv8i1,  nxv8i16, i32, vecshiftR16, !cast<Instruction>(NAME # _H)>;
-  def : SVE_3_Op_Imm_Pat<nxv4i32, op, nxv4i1,  nxv4i32, i32, vecshiftR32, !cast<Instruction>(NAME # _S)>;
-  def : SVE_3_Op_Imm_Pat<nxv2i64, op, nxv2i1,  nxv2i64, i32, vecshiftR64, !cast<Instruction>(NAME # _D)>;
+  def : SVE_3_Op_Cpx_Imm_Pat<nxv16i8, op, nxv16i1, nxv16i8, shiftimm8,  vecshiftR8,  !cast<Instruction>(NAME # _B)>;
+  def : SVE_3_Op_Cpx_Imm_Pat<nxv8i16, op, nxv8i1,  nxv8i16, shiftimm16, vecshiftR16, !cast<Instruction>(NAME # _H)>;
+  def : SVE_3_Op_Cpx_Imm_Pat<nxv4i32, op, nxv4i1,  nxv4i32, shiftimm32, vecshiftR32, !cast<Instruction>(NAME # _S)>;
+  def : SVE_3_Op_Cpx_Imm_Pat<nxv2i64, op, nxv2i1,  nxv2i64, shiftimm64, vecshiftR64, !cast<Instruction>(NAME # _D)>;
 }
 
 class sve_int_bin_pred_shift<bits<2> sz8_64, bit wide, bits<3> opc,
Index: llvm/lib/Target/AArch64/AArch64InstrInfo.td
===================================================================
--- llvm/lib/Target/AArch64/AArch64InstrInfo.td
+++ llvm/lib/Target/AArch64/AArch64InstrInfo.td
@@ -5023,6 +5023,7 @@
 defm : Neon_INS_elt_pattern<v2f64, v1f64, f64, INSvi64lane>;
 
 
+let AddedComplexity = 1 in {
 // Floating point vector extractions are codegen'd as either a sequence of
 // subregister extractions, or a MOV (aka CPY here, alias for DUP) if
 // the lane number is anything other than zero.
@@ -5032,6 +5033,7 @@
           (f32 (EXTRACT_SUBREG V128:$Rn, ssub))>;
 def : Pat<(vector_extract (v8f16 V128:$Rn), 0),
           (f16 (EXTRACT_SUBREG V128:$Rn, hsub))>;
+}
 
 def : Pat<(vector_extract (v2f64 V128:$Rn), VectorIndexD:$idx),
           (f64 (CPYi64 V128:$Rn, VectorIndexD:$idx))>;
Index: llvm/lib/Target/AArch64/AArch64InstrFormats.td
===================================================================
--- llvm/lib/Target/AArch64/AArch64InstrFormats.td
+++ llvm/lib/Target/AArch64/AArch64InstrFormats.td
@@ -683,6 +683,10 @@
   let ParserMatchClass = Imm0_63Operand;
 }
 
+def shiftimm8  : ComplexPattern<i32, 1, "SelectSVEShiftImm64<1, 8>",  []>;
+def shiftimm16 : ComplexPattern<i32, 1, "SelectSVEShiftImm64<1, 16>", []>;
+def shiftimm32 : ComplexPattern<i32, 1, "SelectSVEShiftImm64<1, 32>", []>;
+def shiftimm64 : ComplexPattern<i32, 1, "SelectSVEShiftImm64<1, 64>", []>;
 
 // Crazy immediate formats used by 32-bit and 64-bit logical immediate
 // instructions for splatting repeating bit patterns across the immediate.
@@ -832,7 +836,7 @@
 }
 
 // imm32_0_7 predicate - True if the 32-bit immediate is in the range [0,7]
-def imm32_0_7 : Operand<i32>, ImmLeaf<i32, [{
+def imm32_0_7 : Operand<i32>, TImmLeaf<i32, [{
   return ((uint32_t)Imm) < 8;
 }]> {
   let ParserMatchClass = Imm0_7Operand;
@@ -1091,8 +1095,8 @@
   let RenderMethod = "addVectorIndexOperands";
 }
 
-class AsmVectorIndexOpnd<ValueType ty, AsmOperandClass mc, code pred>
-    : Operand<ty>, ImmLeaf<ty, pred> {
+class AsmVectorIndexOpnd<ValueType ty, AsmOperandClass mc, int Val>
+    : Operand<ty>, ComplexPattern<ty, 1, "SelectVectorIndex<" # Val # ">", []> {
   let ParserMatchClass = mc;
   let PrintMethod = "printVectorIndex";
 }
@@ -1103,17 +1107,17 @@
 def VectorIndexSOperand : AsmVectorIndex<0, 3>;
 def VectorIndexDOperand : AsmVectorIndex<0, 1>;
 
-def VectorIndex1 : AsmVectorIndexOpnd<i64, VectorIndex1Operand, [{ return ((uint64_t)Imm) == 1; }]>;
-def VectorIndexB : AsmVectorIndexOpnd<i64, VectorIndexBOperand, [{ return ((uint64_t)Imm) < 16; }]>;
-def VectorIndexH : AsmVectorIndexOpnd<i64, VectorIndexHOperand, [{ return ((uint64_t)Imm) < 8; }]>;
-def VectorIndexS : AsmVectorIndexOpnd<i64, VectorIndexSOperand, [{ return ((uint64_t)Imm) < 4; }]>;
-def VectorIndexD : AsmVectorIndexOpnd<i64, VectorIndexDOperand, [{ return ((uint64_t)Imm) < 2; }]>;
+def VectorIndex1 : AsmVectorIndexOpnd<i64, VectorIndex1Operand, 1>;
+def VectorIndexB : AsmVectorIndexOpnd<i64, VectorIndexBOperand, 16>;
+def VectorIndexH : AsmVectorIndexOpnd<i64, VectorIndexHOperand, 8>;
+def VectorIndexS : AsmVectorIndexOpnd<i64, VectorIndexSOperand, 4>;
+def VectorIndexD : AsmVectorIndexOpnd<i64, VectorIndexDOperand, 2>;
 
-def VectorIndex132b : AsmVectorIndexOpnd<i32, VectorIndex1Operand, [{ return ((uint64_t)Imm) == 1; }]>;
-def VectorIndexB32b : AsmVectorIndexOpnd<i32, VectorIndexBOperand, [{ return ((uint64_t)Imm) < 16; }]>;
-def VectorIndexH32b : AsmVectorIndexOpnd<i32, VectorIndexHOperand, [{ return ((uint64_t)Imm) < 8; }]>;
-def VectorIndexS32b : AsmVectorIndexOpnd<i32, VectorIndexSOperand, [{ return ((uint64_t)Imm) < 4; }]>;
-def VectorIndexD32b : AsmVectorIndexOpnd<i32, VectorIndexDOperand, [{ return ((uint64_t)Imm) < 2; }]>;
+def VectorIndex132b : AsmVectorIndexOpnd<i32, VectorIndex1Operand, 1>;
+def VectorIndexB32b : AsmVectorIndexOpnd<i32, VectorIndexBOperand, 16>;
+def VectorIndexH32b : AsmVectorIndexOpnd<i32, VectorIndexHOperand, 8>;
+def VectorIndexS32b : AsmVectorIndexOpnd<i32, VectorIndexSOperand, 4>;
+def VectorIndexD32b : AsmVectorIndexOpnd<i32, VectorIndexDOperand, 2>;
 
 def SVEVectorIndexExtDupBOperand : AsmVectorIndex<0, 63, "SVE">;
 def SVEVectorIndexExtDupHOperand : AsmVectorIndex<0, 31, "SVE">;
@@ -1122,15 +1126,15 @@
 def SVEVectorIndexExtDupQOperand : AsmVectorIndex<0, 3, "SVE">;
 
 def sve_elm_idx_extdup_b
-  : AsmVectorIndexOpnd<i64, SVEVectorIndexExtDupBOperand, [{ return ((uint64_t)Imm) < 64; }]>;
+  : AsmVectorIndexOpnd<i64, SVEVectorIndexExtDupBOperand, 64>;
 def sve_elm_idx_extdup_h
-  : AsmVectorIndexOpnd<i64, SVEVectorIndexExtDupHOperand, [{ return ((uint64_t)Imm) < 32; }]>;
+  : AsmVectorIndexOpnd<i64, SVEVectorIndexExtDupHOperand, 32>;
 def sve_elm_idx_extdup_s
-  : AsmVectorIndexOpnd<i64, SVEVectorIndexExtDupSOperand, [{ return ((uint64_t)Imm) < 16; }]>;
+  : AsmVectorIndexOpnd<i64, SVEVectorIndexExtDupSOperand, 16>;
 def sve_elm_idx_extdup_d
-  : AsmVectorIndexOpnd<i64, SVEVectorIndexExtDupDOperand, [{ return ((uint64_t)Imm) < 8; }]>;
+  : AsmVectorIndexOpnd<i64, SVEVectorIndexExtDupDOperand, 8>;
 def sve_elm_idx_extdup_q
-  : AsmVectorIndexOpnd<i64, SVEVectorIndexExtDupQOperand, [{ return ((uint64_t)Imm) < 4; }]>;
+  : AsmVectorIndexOpnd<i64, SVEVectorIndexExtDupQOperand, 4>;
 
 // 8-bit immediate for AdvSIMD where 64-bit values of the form:
 // aaaaaaaa bbbbbbbb cccccccc dddddddd eeeeeeee ffffffff gggggggg hhhhhhhh
Index: llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
===================================================================
--- llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
+++ llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
@@ -169,6 +169,16 @@
     return SelectSVELogicalImm(N, VT, Imm);
   }
 
+  template<unsigned Val>
+  bool SelectVectorIndex(SDValue N, SDValue &Imm) {
+    return SelectVectorIndex(N, Val, Imm);
+  }
+
+  template<unsigned Low, unsigned High>
+  bool SelectSVEShiftImm64(SDValue N, SDValue &Imm) {
+    return SelectSVEShiftImm64(N, Low, High, Imm);
+  }
+
   // Returns a suitable CNT/INC/DEC/RDVL multiplier to calculate VSCALE*N.
   template<signed Min, signed Max, signed Scale, bool Shift>
   bool SelectCntImm(SDValue N, SDValue &Imm) {
@@ -271,6 +281,11 @@
   bool SelectSVEAddSubImm(SDValue N, MVT VT, SDValue &Imm, SDValue &Shift);
 
   bool SelectSVELogicalImm(SDValue N, MVT VT, SDValue &Imm);
+
+  bool SelectVectorIndex(SDValue N, uint64_t Val, SDValue &Imm);
+
+  bool SelectSVEShiftImm64(SDValue N, uint64_t Low, uint64_t High,
+                           SDValue &Imm);
 };
 } // end anonymous namespace
 
@@ -2946,6 +2961,36 @@
   return false;
 }
 
+bool AArch64DAGToDAGISel::SelectVectorIndex(SDValue N, uint64_t Val, SDValue &Imm) {
+  if (auto *CN = dyn_cast<ConstantSDNode>(N)) {
+    uint64_t ImmVal = CN->getZExtValue();
+    SDLoc DL(N);
+
+    if ((Val == 1 && ImmVal == 1) || (Val !=1 && ImmVal < Val)) {
+        Imm = CurDAG->getTargetConstant(ImmVal, DL, N.getValueType());
+      return true;
+    }
+  }
+
+  return false;
+}
+
+bool AArch64DAGToDAGISel::SelectSVEShiftImm64(SDValue N, uint64_t Low,
+                                              uint64_t High, SDValue &Imm) {
+
+  if (auto *CN = dyn_cast<ConstantSDNode>(N)) {
+    uint64_t ImmVal = CN->getZExtValue();
+    SDLoc DL(N);
+
+    if (ImmVal >= Low && ImmVal <= High) {
+      Imm = CurDAG->getTargetConstant(ImmVal, DL, MVT::i32);
+      return true;
+    }
+  }
+
+  return false;
+}
+
 bool AArch64DAGToDAGISel::trySelectStackSlotTagP(SDNode *N) {
   // tagp(FrameIndex, IRGstack, tag_offset):
   // since the offset between FrameIndex and IRGstack is a compile-time
Index: llvm/include/llvm/IR/IntrinsicsAArch64.td
===================================================================
--- llvm/include/llvm/IR/IntrinsicsAArch64.td
+++ llvm/include/llvm/IR/IntrinsicsAArch64.td
@@ -794,7 +794,7 @@
                 [LLVMMatchType<0>,
                  LLVMMatchType<0>,
                  llvm_i32_ty],
-                [IntrNoMem]>;
+                [IntrNoMem, ImmArg<2>]>;
 
   class AdvSIMD_3VectorArgIndexed_Intrinsic
     : Intrinsic<[llvm_anyvector_ty],
@@ -802,7 +802,7 @@
                  LLVMMatchType<0>,
                  LLVMMatchType<0>,
                  llvm_i32_ty],
-                [IntrNoMem]>;
+                [IntrNoMem, ImmArg<3>]>;
 
   class AdvSIMD_Pred1VectorArg_Intrinsic
     : Intrinsic<[llvm_anyvector_ty],
@@ -894,7 +894,7 @@
                 [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                  LLVMMatchType<0>,
                  llvm_i32_ty],
-                [IntrNoMem]>;
+                [IntrNoMem, ImmArg<2>]>;
 
   class AdvSIMD_SVE_ShiftWide_Intrinsic
     : Intrinsic<[llvm_anyvector_ty],
@@ -932,7 +932,7 @@
                  LLVMMatchType<0>,
                  llvm_i32_ty,
                  llvm_i32_ty],
-                [IntrNoMem]>;
+                [IntrNoMem, ImmArg<3>]>;
 
   class AdvSIMD_SVE_EXPA_Intrinsic
     : Intrinsic<[llvm_anyvector_ty],
@@ -1012,7 +1012,7 @@
                  LLVMSubdivide4VectorType<0>,
                  LLVMSubdivide4VectorType<0>,
                  llvm_i32_ty],
-                [IntrNoMem]>;
+                [IntrNoMem, ImmArg<3>]>;
 
   class AdvSIMD_SVE_TBL_Intrinsic
     : Intrinsic<[llvm_anyvector_ty],
@@ -1033,7 +1033,7 @@
                  LLVMSubdivide2VectorType<0>,
                  LLVMSubdivide2VectorType<0>,
                  llvm_i32_ty],
-                [IntrNoMem]>;
+                [IntrNoMem, ImmArg<3>]>;
 
   class SVE2_1VectorArg_Narrowing_Intrinsic
     : Intrinsic<[LLVMSubdivide2VectorType<0>],
_______________________________________________
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to