kmclaughlin created this revision. kmclaughlin added reviewers: sdesmalen, SjoerdMeijer, greened. Herald added subscribers: psnobl, rkruppe, kristof.beyls, tschuett. Herald added a reviewer: rengolin. kmclaughlin added a parent revision: D67549: [IntrinsicEmitter] Add overloaded types for SVE intrinsics (Subdivide2 & Subdivide4).
Implements the following intrinsics: - int_aarch64_sve_sunpkhi - int_aarch64_sve_sunpklo - int_aarch64_sve_uunpkhi - int_aarch64_sve_uunpklo This patch also adds AArch64ISD nodes for UNPK instead of implementing the intrinsics directly, as they are required for a future patch which implements the sign/zero extension of legal vectors. This patch includes tests for the Subdivide2Argument type added by D67549 <https://reviews.llvm.org/D67549> https://reviews.llvm.org/D67550 Files: include/llvm/IR/IntrinsicsAArch64.td lib/Target/AArch64/AArch64ISelLowering.cpp lib/Target/AArch64/AArch64ISelLowering.h lib/Target/AArch64/AArch64InstrInfo.td lib/Target/AArch64/AArch64SVEInstrInfo.td lib/Target/AArch64/SVEInstrFormats.td test/CodeGen/AArch64/sve-intrinsics-perm-select.ll
Index: test/CodeGen/AArch64/sve-intrinsics-perm-select.ll =================================================================== --- /dev/null +++ test/CodeGen/AArch64/sve-intrinsics-perm-select.ll @@ -0,0 +1,129 @@ +; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s + +; +; SUNPKHI +; + +define <vscale x 8 x i16> @sunpkhi_i16(<vscale x 16 x i8> %a) { +; CHECK-LABEL: sunpkhi_i16 +; CHECK: sunpkhi z0.h, z0.b +; CHECK-NEXT: ret + %res = call <vscale x 8 x i16> @llvm.aarch64.sve.sunpkhi.nxv8i16(<vscale x 16 x i8> %a) + ret <vscale x 8 x i16> %res +} + +define <vscale x 4 x i32> @sunpkhi_i32(<vscale x 8 x i16> %a) { +; CHECK-LABEL: sunpkhi_i32 +; CHECK: sunpkhi z0.s, z0.h +; CHECK-NEXT: ret + %res = call <vscale x 4 x i32> @llvm.aarch64.sve.sunpkhi.nxv4i32(<vscale x 8 x i16> %a) + ret <vscale x 4 x i32> %res +} + +define <vscale x 2 x i64> @sunpkhi_i64(<vscale x 4 x i32> %a) { +; CHECK-LABEL: sunpkhi_i64 +; CHECK: sunpkhi z0.d, z0.s +; CHECK-NEXT: ret + %res = call <vscale x 2 x i64> @llvm.aarch64.sve.sunpkhi.nxv2i64(<vscale x 4 x i32> %a) + ret <vscale x 2 x i64> %res +} + +; +; SUNPKLO +; + +define <vscale x 8 x i16> @sunpklo_i16(<vscale x 16 x i8> %a) { +; CHECK-LABEL: sunpklo_i16 +; CHECK: sunpklo z0.h, z0.b +; CHECK-NEXT: ret + %res = call <vscale x 8 x i16> @llvm.aarch64.sve.sunpklo.nxv8i16(<vscale x 16 x i8> %a) + ret <vscale x 8 x i16> %res +} + +define <vscale x 4 x i32> @sunpklo_i32(<vscale x 8 x i16> %a) { +; CHECK-LABEL: sunpklo_i32 +; CHECK: sunpklo z0.s, z0.h +; CHECK-NEXT: ret + %res = call <vscale x 4 x i32> @llvm.aarch64.sve.sunpklo.nxv4i32(<vscale x 8 x i16> %a) + ret <vscale x 4 x i32> %res +} + +define <vscale x 2 x i64> @sunpklo_i64(<vscale x 4 x i32> %a) { +; CHECK-LABEL: sunpklo_i64 +; CHECK: sunpklo z0.d, z0.s +; CHECK-NEXT: ret + %res = call <vscale x 2 x i64> @llvm.aarch64.sve.sunpklo.nxv2i64(<vscale x 4 x i32> %a) + ret <vscale x 2 x i64> %res +} + +; +; UUNPKHI +; + +define <vscale x 8 x i16> @uunpkhi_i16(<vscale x 16 x i8> %a) { +; CHECK-LABEL: uunpkhi_i16 +; CHECK: uunpkhi z0.h, z0.b +; CHECK-NEXT: ret + %res = call <vscale x 8 x i16> @llvm.aarch64.sve.uunpkhi.nxv8i16(<vscale x 16 x i8> %a) + ret <vscale x 8 x i16> %res +} + +define <vscale x 4 x i32> @uunpkhi_i32(<vscale x 8 x i16> %a) { +; CHECK-LABEL: uunpkhi_i32 +; CHECK: uunpkhi z0.s, z0.h +; CHECK-NEXT: ret + %res = call <vscale x 4 x i32> @llvm.aarch64.sve.uunpkhi.nxv4i32(<vscale x 8 x i16> %a) + ret <vscale x 4 x i32> %res +} + +define <vscale x 2 x i64> @uunpkhi_i64(<vscale x 4 x i32> %a) { +; CHECK-LABEL: uunpkhi_i64 +; CHECK: uunpkhi z0.d, z0.s +; CHECK-NEXT: ret + %res = call <vscale x 2 x i64> @llvm.aarch64.sve.uunpkhi.nxv2i64(<vscale x 4 x i32> %a) + ret <vscale x 2 x i64> %res +} + +; +; UUNPKLO +; + +define <vscale x 8 x i16> @uunpklo_i16(<vscale x 16 x i8> %a) { +; CHECK-LABEL: uunpklo_i16 +; CHECK: uunpklo z0.h, z0.b +; CHECK-NEXT: ret + %res = call <vscale x 8 x i16> @llvm.aarch64.sve.uunpklo.nxv8i16(<vscale x 16 x i8> %a) + ret <vscale x 8 x i16> %res +} + +define <vscale x 4 x i32> @uunpklo_i32(<vscale x 8 x i16> %a) { +; CHECK-LABEL: uunpklo_i32 +; CHECK: uunpklo z0.s, z0.h +; CHECK-NEXT: ret + %res = call <vscale x 4 x i32> @llvm.aarch64.sve.uunpklo.nxv4i32(<vscale x 8 x i16> %a) + ret <vscale x 4 x i32> %res +} + +define <vscale x 2 x i64> @uunpklo_i64(<vscale x 4 x i32> %a) { +; CHECK-LABEL: uunpklo_i64 +; CHECK: uunpklo z0.d, z0.s +; CHECK-NEXT: ret + %res = call <vscale x 2 x i64> @llvm.aarch64.sve.uunpklo.nxv2i64(<vscale x 4 x i32> %a) + ret <vscale x 2 x i64> %res +} + +declare <vscale x 8 x i16> @llvm.aarch64.sve.sunpkhi.nxv8i16(<vscale x 16 x i8>) +declare <vscale x 4 x i32> @llvm.aarch64.sve.sunpkhi.nxv4i32(<vscale x 8 x i16>) +declare <vscale x 2 x i64> @llvm.aarch64.sve.sunpkhi.nxv2i64(<vscale x 4 x i32>) + +declare <vscale x 8 x i16> @llvm.aarch64.sve.sunpklo.nxv8i16(<vscale x 16 x i8>) +declare <vscale x 4 x i32> @llvm.aarch64.sve.sunpklo.nxv4i32(<vscale x 8 x i16>) +declare <vscale x 2 x i64> @llvm.aarch64.sve.sunpklo.nxv2i64(<vscale x 4 x i32>) + +declare <vscale x 8 x i16> @llvm.aarch64.sve.uunpkhi.nxv8i16(<vscale x 16 x i8>) +declare <vscale x 4 x i32> @llvm.aarch64.sve.uunpkhi.nxv4i32(<vscale x 8 x i16>) +declare <vscale x 2 x i64> @llvm.aarch64.sve.uunpkhi.nxv2i64(<vscale x 4 x i32>) + +declare <vscale x 8 x i16> @llvm.aarch64.sve.uunpklo.nxv8i16(<vscale x 16 x i8>) +declare <vscale x 4 x i32> @llvm.aarch64.sve.uunpklo.nxv4i32(<vscale x 8 x i16>) +declare <vscale x 2 x i64> @llvm.aarch64.sve.uunpklo.nxv2i64(<vscale x 4 x i32>) Index: lib/Target/AArch64/SVEInstrFormats.td =================================================================== --- lib/Target/AArch64/SVEInstrFormats.td +++ lib/Target/AArch64/SVEInstrFormats.td @@ -283,6 +283,11 @@ // SVE pattern match helpers. //===----------------------------------------------------------------------===// +class SVE_1_Op_Pat<ValueType vtd, SDPatternOperator op, ValueType vt1, + Instruction inst> +: Pat<(vtd (op vt1:$Op1)), + (inst $Op1)>; + class SVE_3_Op_Pat<ValueType vtd, SDPatternOperator op, ValueType vt1, ValueType vt2, ValueType vt3, Instruction inst> : Pat<(vtd (op vt1:$Op1, vt2:$Op2, vt3:$Op3)), @@ -828,7 +833,7 @@ } class sve_int_perm_unpk<bits<2> sz16_64, bits<2> opc, string asm, - ZPRRegOp zprty1, ZPRRegOp zprty2> + ZPRRegOp zprty1, ZPRRegOp zprty2, SDPatternOperator op> : I<(outs zprty1:$Zd), (ins zprty2:$Zn), asm, "\t$Zd, $Zn", "", []>, Sched<[]> { @@ -843,10 +848,14 @@ let Inst{4-0} = Zd; } -multiclass sve_int_perm_unpk<bits<2> opc, string asm> { - def _H : sve_int_perm_unpk<0b01, opc, asm, ZPR16, ZPR8>; - def _S : sve_int_perm_unpk<0b10, opc, asm, ZPR32, ZPR16>; - def _D : sve_int_perm_unpk<0b11, opc, asm, ZPR64, ZPR32>; +multiclass sve_int_perm_unpk<bits<2> opc, string asm, SDPatternOperator op> { + def _H : sve_int_perm_unpk<0b01, opc, asm, ZPR16, ZPR8, op>; + def _S : sve_int_perm_unpk<0b10, opc, asm, ZPR32, ZPR16, op>; + def _D : sve_int_perm_unpk<0b11, opc, asm, ZPR64, ZPR32, op>; + + def : SVE_1_Op_Pat<nxv8i16, op, nxv16i8, !cast<Instruction>(NAME # _H)>; + def : SVE_1_Op_Pat<nxv4i32, op, nxv8i16, !cast<Instruction>(NAME # _S)>; + def : SVE_1_Op_Pat<nxv2i64, op, nxv4i32, !cast<Instruction>(NAME # _D)>; } class sve_int_perm_insrs<bits<2> sz8_64, string asm, ZPRRegOp zprty, Index: lib/Target/AArch64/AArch64SVEInstrInfo.td =================================================================== --- lib/Target/AArch64/AArch64SVEInstrInfo.td +++ lib/Target/AArch64/AArch64SVEInstrInfo.td @@ -211,10 +211,10 @@ defm REV_PP : sve_int_perm_reverse_p<"rev">; defm REV_ZZ : sve_int_perm_reverse_z<"rev">; - defm SUNPKLO_ZZ : sve_int_perm_unpk<0b00, "sunpklo">; - defm SUNPKHI_ZZ : sve_int_perm_unpk<0b01, "sunpkhi">; - defm UUNPKLO_ZZ : sve_int_perm_unpk<0b10, "uunpklo">; - defm UUNPKHI_ZZ : sve_int_perm_unpk<0b11, "uunpkhi">; + defm SUNPKLO_ZZ : sve_int_perm_unpk<0b00, "sunpklo", AArch64sunpklo>; + defm SUNPKHI_ZZ : sve_int_perm_unpk<0b01, "sunpkhi", AArch64sunpkhi>; + defm UUNPKLO_ZZ : sve_int_perm_unpk<0b10, "uunpklo", AArch64uunpklo>; + defm UUNPKHI_ZZ : sve_int_perm_unpk<0b11, "uunpkhi", AArch64uunpkhi>; def PUNPKLO_PP : sve_int_perm_punpk<0b0, "punpklo">; def PUNPKHI_PP : sve_int_perm_punpk<0b1, "punpkhi">; Index: lib/Target/AArch64/AArch64InstrInfo.td =================================================================== --- lib/Target/AArch64/AArch64InstrInfo.td +++ lib/Target/AArch64/AArch64InstrInfo.td @@ -421,6 +421,14 @@ def AArch64st2g : SDNode<"AArch64ISD::ST2G", SDT_AArch64SETTAG, [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>; def AArch64stz2g : SDNode<"AArch64ISD::STZ2G", SDT_AArch64SETTAG, [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>; +def SDT_AArch64unpk : SDTypeProfile<1, 1, [ + SDTCisInt<0>, SDTCisInt<1>, SDTCisOpSmallerThanOp<1, 0> +]>; +def AArch64sunpkhi : SDNode<"AArch64ISD::SUNPKHI", SDT_AArch64unpk>; +def AArch64sunpklo : SDNode<"AArch64ISD::SUNPKLO", SDT_AArch64unpk>; +def AArch64uunpkhi : SDNode<"AArch64ISD::UUNPKHI", SDT_AArch64unpk>; +def AArch64uunpklo : SDNode<"AArch64ISD::UUNPKLO", SDT_AArch64unpk>; + //===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===// Index: lib/Target/AArch64/AArch64ISelLowering.h =================================================================== --- lib/Target/AArch64/AArch64ISelLowering.h +++ lib/Target/AArch64/AArch64ISelLowering.h @@ -191,6 +191,11 @@ FRECPE, FRECPS, FRSQRTE, FRSQRTS, + SUNPKHI, + SUNPKLO, + UUNPKHI, + UUNPKLO, + // NEON Load/Store with post-increment base updates LD2post = ISD::FIRST_TARGET_MEMORY_OPCODE, LD3post, Index: lib/Target/AArch64/AArch64ISelLowering.cpp =================================================================== --- lib/Target/AArch64/AArch64ISelLowering.cpp +++ lib/Target/AArch64/AArch64ISelLowering.cpp @@ -1301,6 +1301,10 @@ case AArch64ISD::STZG: return "AArch64ISD::STZG"; case AArch64ISD::ST2G: return "AArch64ISD::ST2G"; case AArch64ISD::STZ2G: return "AArch64ISD::STZ2G"; + case AArch64ISD::SUNPKHI: return "AArch64ISD::SUNPKHI"; + case AArch64ISD::SUNPKLO: return "AArch64ISD::SUNPKLO"; + case AArch64ISD::UUNPKHI: return "AArch64ISD::UUNPKHI"; + case AArch64ISD::UUNPKLO: return "AArch64ISD::UUNPKLO"; } return nullptr; } @@ -2839,6 +2843,19 @@ return DAG.getNode(ISD::UMIN, dl, Op.getValueType(), Op.getOperand(1), Op.getOperand(2)); + case Intrinsic::aarch64_sve_sunpkhi: + return DAG.getNode(AArch64ISD::SUNPKHI, dl, Op.getValueType(), + Op.getOperand(1)); + case Intrinsic::aarch64_sve_sunpklo: + return DAG.getNode(AArch64ISD::SUNPKLO, dl, Op.getValueType(), + Op.getOperand(1)); + case Intrinsic::aarch64_sve_uunpkhi: + return DAG.getNode(AArch64ISD::UUNPKHI, dl, Op.getValueType(), + Op.getOperand(1)); + case Intrinsic::aarch64_sve_uunpklo: + return DAG.getNode(AArch64ISD::UUNPKLO, dl, Op.getValueType(), + Op.getOperand(1)); + case Intrinsic::localaddress: { const auto &MF = DAG.getMachineFunction(); const auto *RegInfo = Subtarget->getRegisterInfo(); Index: include/llvm/IR/IntrinsicsAArch64.td =================================================================== --- include/llvm/IR/IntrinsicsAArch64.td +++ include/llvm/IR/IntrinsicsAArch64.td @@ -768,6 +768,11 @@ LLVMMatchType<0>], [IntrNoMem]>; + class AdvSIMD_SVE_Unpack_Intrinsic + : Intrinsic<[llvm_anyvector_ty], + [LLVMSubdivide2VectorType<0>], + [IntrNoMem]>; + // This class of intrinsics are not intended to be useful within LLVM IR but // are instead here to support some of the more regid parts of the ACLE. class Builtin_SVCVT<string name, LLVMType OUT, LLVMType IN> @@ -788,6 +793,16 @@ def int_aarch64_sve_neg : AdvSIMD_Merged1VectorArg_Intrinsic; // +// Permutations and selection +// + +def int_aarch64_sve_sunpkhi : AdvSIMD_SVE_Unpack_Intrinsic; +def int_aarch64_sve_sunpklo : AdvSIMD_SVE_Unpack_Intrinsic; + +def int_aarch64_sve_uunpkhi : AdvSIMD_SVE_Unpack_Intrinsic; +def int_aarch64_sve_uunpklo : AdvSIMD_SVE_Unpack_Intrinsic; + +// // Floating-point comparisons //
_______________________________________________ cfe-commits mailing list cfe-commits@lists.llvm.org https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits