Author: vangthao95 Date: 2026-01-22T09:18:11-08:00 New Revision: 0f117154d79c829a5f10751e3e7c661ff1c72f45
URL: https://github.com/llvm/llvm-project/commit/0f117154d79c829a5f10751e3e7c661ff1c72f45 DIFF: https://github.com/llvm/llvm-project/commit/0f117154d79c829a5f10751e3e7c661ff1c72f45.diff LOG: Revert "[AMDGPU][GlobalISel] Add RegBankLegalize support for G_MUL (#177314)" This reverts commit 39642a4b22a2494a25ee31b06ad5f8e55d11d6d0. Added: Modified: llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeHelper.cpp llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeHelper.h llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeRules.cpp llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeRules.h llvm/test/CodeGen/AMDGPU/GlobalISel/mul.ll llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-mul.mir llvm/test/CodeGen/AMDGPU/integer-mad-patterns.ll llvm/test/CodeGen/AMDGPU/vector-reduce-mul.ll Removed: ################################################################################ diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeHelper.cpp b/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeHelper.cpp index c5e3b3d6056a2..e7705649dd1ac 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeHelper.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeHelper.cpp @@ -681,27 +681,6 @@ bool RegBankLegalizeHelper::lowerSplitTo32(MachineInstr &MI) { return true; } -bool RegBankLegalizeHelper::lowerSplitTo32Mul(MachineInstr &MI) { - Register Dst = MI.getOperand(0).getReg(); - LLT DstTy = MRI.getType(Dst); - assert(DstTy == S64); - auto Op1 = B.buildUnmerge({VgprRB_S32}, MI.getOperand(1).getReg()); - auto Op2 = B.buildUnmerge({VgprRB_S32}, MI.getOperand(2).getReg()); - - // TODO: G_AMDGPU_MAD_* optimizations for G_MUL divergent S64 operation to - // match GlobalISel with old regbankselect. - auto Lo = B.buildMul(VgprRB_S32, Op1.getReg(0), Op2.getReg(0)); - auto Carry = B.buildUMulH(VgprRB_S32, Op1.getReg(0), Op2.getReg(0)); - auto MulLo0Hi1 = B.buildMul(VgprRB_S32, Op1.getReg(0), Op2.getReg(1)); - auto MulHi0Lo1 = B.buildMul(VgprRB_S32, Op1.getReg(1), Op2.getReg(0)); - auto Sum = B.buildAdd(VgprRB_S32, MulLo0Hi1, MulHi0Lo1); - auto Hi = B.buildAdd(VgprRB_S32, Sum, Carry); - - B.buildMergeLikeInstr(Dst, {Lo, Hi}); - MI.eraseFromParent(); - return true; -} - bool RegBankLegalizeHelper::lowerSplitTo16(MachineInstr &MI) { Register Dst = MI.getOperand(0).getReg(); assert(MRI.getType(Dst) == V2S16); @@ -949,8 +928,6 @@ bool RegBankLegalizeHelper::lower(MachineInstr &MI, } case SplitTo32: return lowerSplitTo32(MI); - case SplitTo32Mul: - return lowerSplitTo32Mul(MI); case SplitTo32Select: return lowerSplitTo32Select(MI); case SplitTo32SExtInReg: diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeHelper.h b/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeHelper.h index 86669ae6ff6c7..f92ed3de6cf27 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeHelper.h +++ b/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeHelper.h @@ -127,7 +127,6 @@ class RegBankLegalizeHelper { bool lowerS_BFE(MachineInstr &MI); bool lowerUniMAD64(MachineInstr &MI); bool lowerSplitTo32(MachineInstr &MI); - bool lowerSplitTo32Mul(MachineInstr &MI); bool lowerSplitTo16(MachineInstr &MI); bool lowerSplitTo32Select(MachineInstr &MI); bool lowerSplitTo32SExtInReg(MachineInstr &MI); diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeRules.cpp b/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeRules.cpp index d05c05599d395..7441846dc3e34 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeRules.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeRules.cpp @@ -495,17 +495,9 @@ RegBankLegalizeRules::RegBankLegalizeRules(const GCNSubtarget &_ST, .Uni(V2S16, {{UniInVgprV2S16}, {VgprV2S16, VgprV2S16}}) .Div(V2S16, {{VgprV2S16}, {VgprV2S16, VgprV2S16}}); - bool HasVecMulU64 = ST->hasVectorMulU64(); addRulesForGOpcs({G_MUL}, Standard) - .Div(S16, {{Vgpr16}, {Vgpr16, Vgpr16}}) .Uni(S32, {{Sgpr32}, {Sgpr32, Sgpr32}}) - .Div(S32, {{Vgpr32}, {Vgpr32, Vgpr32}}) - .Uni(S64, {{SgprB64}, {SgprB64, SgprB64}}) - .Uni(V2S16, {{UniInVgprV2S16}, {VgprV2S16, VgprV2S16}}) - .Div(V2S16, {{VgprV2S16}, {VgprV2S16, VgprV2S16}}) - .Uni(S16, {{Sgpr32Trunc}, {Sgpr32AExt, Sgpr32AExt}}) - .Div(S64, {{VgprB64}, {VgprB64, VgprB64}}, HasVecMulU64) - .Div(S64, {{VgprB64}, {VgprB64, VgprB64}, SplitTo32Mul}, !HasVecMulU64); + .Div(S32, {{Vgpr32}, {Vgpr32, Vgpr32}}); bool hasMulHi = ST->hasScalarMulHiInsts(); addRulesForGOpcs({G_UMULH, G_SMULH}, Standard) diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeRules.h b/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeRules.h index 3b4be7ff4cb83..0d147f30a9d6f 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeRules.h +++ b/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeRules.h @@ -229,7 +229,6 @@ enum LoweringMethodID { UniMul64, DivSMulToMAD, SplitTo32, - SplitTo32Mul, ScalarizeToS16, SplitTo32Select, SplitTo32SExtInReg, diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/mul.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/mul.ll index 6593a8288e0c9..1462b5965c0ab 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/mul.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/mul.ll @@ -1,12 +1,12 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -global-isel -new-reg-bank-select -mtriple=amdgcn -mcpu=gfx700 < %s | FileCheck -check-prefixes=GCN,GFX7 %s -; RUN: llc -global-isel -new-reg-bank-select -mtriple=amdgcn -mcpu=gfx801 < %s | FileCheck -check-prefixes=GCN,GFX8 %s -; RUN: llc -global-isel -new-reg-bank-select -mtriple=amdgcn -mcpu=gfx900 < %s | FileCheck -check-prefixes=GCN,GFX9 %s -; RUN: llc -global-isel -new-reg-bank-select -mtriple=amdgcn -mcpu=gfx1010 < %s | FileCheck -check-prefixes=GFX10PLUS,GFX10 %s -; RUN: llc -global-isel -new-reg-bank-select -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16, -amdgpu-enable-delay-alu=0 < %s | FileCheck -check-prefixes=GFX10PLUS,GFX11,GFX11-TRUE16 %s -; RUN: llc -global-isel -new-reg-bank-select -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16, -amdgpu-enable-delay-alu=0 < %s | FileCheck -check-prefixes=GFX10PLUS,GFX11,GFX11-FAKE16 %s -; RUN: llc -global-isel -new-reg-bank-select -mtriple=amdgcn -mcpu=gfx1200 -mattr=-real-true16 < %s | FileCheck -check-prefixes=GFX12 %s -; RUN: llc -global-isel -new-reg-bank-select -mtriple=amdgcn -mcpu=gfx1250 -mattr=-real-true16 < %s | FileCheck -check-prefixes=GFX1250 %s +; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx700 < %s | FileCheck -check-prefixes=GCN,GFX7 %s +; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx801 < %s | FileCheck -check-prefixes=GCN,GFX8 %s +; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx900 < %s | FileCheck -check-prefixes=GCN,GFX9 %s +; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx1010 < %s | FileCheck -check-prefixes=GFX10PLUS,GFX10 %s +; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16, -amdgpu-enable-delay-alu=0 < %s | FileCheck -check-prefixes=GFX10PLUS,GFX11,GFX11-TRUE16 %s +; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16, -amdgpu-enable-delay-alu=0 < %s | FileCheck -check-prefixes=GFX10PLUS,GFX11,GFX11-FAKE16 %s +; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx1200 -mattr=-real-true16 < %s | FileCheck -check-prefixes=GFX12 %s +; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx1250 -mattr=-real-true16 < %s | FileCheck -check-prefixes=GFX1250 %s define amdgpu_ps i16 @s_mul_i16(i16 inreg %num, i16 inreg %den) { ; GCN-LABEL: s_mul_i16: @@ -358,132 +358,6 @@ define i32 @v_mul_i32(i32 %num, i32 %den) { ret i32 %result } -define amdgpu_ps <2 x i16> @s_mul_v2i16(<2 x i16> inreg %num, <2 x i16> inreg %den) { -; GFX7-LABEL: s_mul_v2i16: -; GFX7: ; %bb.0: -; GFX7-NEXT: s_mul_i32 s0, s0, s2 -; GFX7-NEXT: s_mul_i32 s1, s1, s3 -; GFX7-NEXT: s_add_i32 s0, s0, s0 -; GFX7-NEXT: s_add_i32 s1, s1, s1 -; GFX7-NEXT: ; return to shader part epilog -; -; GFX8-LABEL: s_mul_v2i16: -; GFX8: ; %bb.0: -; GFX8-NEXT: s_lshr_b32 s2, s0, 16 -; GFX8-NEXT: s_lshr_b32 s3, s1, 16 -; GFX8-NEXT: s_mul_i32 s0, s0, s1 -; GFX8-NEXT: s_mul_i32 s1, s2, s3 -; GFX8-NEXT: s_add_i32 s1, s1, s1 -; GFX8-NEXT: s_add_i32 s0, s0, s0 -; GFX8-NEXT: s_and_b32 s1, 0xffff, s1 -; GFX8-NEXT: s_and_b32 s0, 0xffff, s0 -; GFX8-NEXT: s_lshl_b32 s1, s1, 16 -; GFX8-NEXT: s_or_b32 s0, s0, s1 -; GFX8-NEXT: ; return to shader part epilog -; -; GFX9-LABEL: s_mul_v2i16: -; GFX9: ; %bb.0: -; GFX9-NEXT: v_mov_b32_e32 v0, s1 -; GFX9-NEXT: v_pk_mul_lo_u16 v0, s0, v0 -; GFX9-NEXT: v_readfirstlane_b32 s0, v0 -; GFX9-NEXT: s_lshr_b32 s1, s0, 16 -; GFX9-NEXT: s_add_i32 s0, s0, s0 -; GFX9-NEXT: s_add_i32 s1, s1, s1 -; GFX9-NEXT: s_pack_ll_b32_b16 s0, s0, s1 -; GFX9-NEXT: ; return to shader part epilog -; -; GFX10PLUS-LABEL: s_mul_v2i16: -; GFX10PLUS: ; %bb.0: -; GFX10PLUS-NEXT: v_pk_mul_lo_u16 v0, s0, s1 -; GFX10PLUS-NEXT: v_readfirstlane_b32 s0, v0 -; GFX10PLUS-NEXT: s_lshr_b32 s1, s0, 16 -; GFX10PLUS-NEXT: s_add_i32 s0, s0, s0 -; GFX10PLUS-NEXT: s_add_i32 s1, s1, s1 -; GFX10PLUS-NEXT: s_pack_ll_b32_b16 s0, s0, s1 -; GFX10PLUS-NEXT: ; return to shader part epilog -; -; GFX12-LABEL: s_mul_v2i16: -; GFX12: ; %bb.0: -; GFX12-NEXT: v_pk_mul_lo_u16 v0, s0, s1 -; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX12-NEXT: v_readfirstlane_b32 s0, v0 -; GFX12-NEXT: s_lshr_b32 s1, s0, 16 -; GFX12-NEXT: s_add_co_i32 s0, s0, s0 -; GFX12-NEXT: s_wait_alu depctr_sa_sdst(0) -; GFX12-NEXT: s_add_co_i32 s1, s1, s1 -; GFX12-NEXT: s_wait_alu depctr_sa_sdst(0) -; GFX12-NEXT: s_pack_ll_b32_b16 s0, s0, s1 -; GFX12-NEXT: s_wait_alu depctr_sa_sdst(0) -; GFX12-NEXT: ; return to shader part epilog -; -; GFX1250-LABEL: s_mul_v2i16: -; GFX1250: ; %bb.0: -; GFX1250-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 25, 1), 1 -; GFX1250-NEXT: v_pk_mul_lo_u16 v0, s0, s1 -; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(SALU_CYCLE_1) -; GFX1250-NEXT: v_readfirstlane_b32 s0, v0 -; GFX1250-NEXT: s_lshr_b32 s1, s0, 16 -; GFX1250-NEXT: s_add_co_i32 s0, s0, s0 -; GFX1250-NEXT: s_add_co_i32 s1, s1, s1 -; GFX1250-NEXT: s_pack_ll_b32_b16 s0, s0, s1 -; GFX1250-NEXT: ; return to shader part epilog - %mul = mul <2 x i16> %num, %den - %result = add <2 x i16> %mul, %mul - ret <2 x i16> %result -} - -define <2 x i16> @v_mul_v2i16(<2 x i16> %num, <2 x i16> %den) { -; GFX7-LABEL: v_mul_v2i16: -; GFX7: ; %bb.0: -; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX7-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; GFX7-NEXT: v_and_b32_e32 v2, 0xffff, v2 -; GFX7-NEXT: v_mul_u32_u24_e32 v0, v0, v2 -; GFX7-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; GFX7-NEXT: v_and_b32_e32 v2, 0xffff, v3 -; GFX7-NEXT: v_mul_u32_u24_e32 v1, v1, v2 -; GFX7-NEXT: s_setpc_b64 s[30:31] -; -; GFX8-LABEL: v_mul_v2i16: -; GFX8: ; %bb.0: -; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX8-NEXT: v_mul_lo_u16_e32 v2, v0, v1 -; GFX8-NEXT: v_mul_lo_u16_sdwa v0, v0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 -; GFX8-NEXT: v_or_b32_e32 v0, v2, v0 -; GFX8-NEXT: s_setpc_b64 s[30:31] -; -; GFX9-LABEL: v_mul_v2i16: -; GFX9: ; %bb.0: -; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-NEXT: v_pk_mul_lo_u16 v0, v0, v1 -; GFX9-NEXT: s_setpc_b64 s[30:31] -; -; GFX10PLUS-LABEL: v_mul_v2i16: -; GFX10PLUS: ; %bb.0: -; GFX10PLUS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX10PLUS-NEXT: v_pk_mul_lo_u16 v0, v0, v1 -; GFX10PLUS-NEXT: s_setpc_b64 s[30:31] -; -; GFX12-LABEL: v_mul_v2i16: -; GFX12: ; %bb.0: -; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX12-NEXT: s_wait_expcnt 0x0 -; GFX12-NEXT: s_wait_samplecnt 0x0 -; GFX12-NEXT: s_wait_bvhcnt 0x0 -; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: v_pk_mul_lo_u16 v0, v0, v1 -; GFX12-NEXT: s_setpc_b64 s[30:31] -; -; GFX1250-LABEL: v_mul_v2i16: -; GFX1250: ; %bb.0: -; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: v_pk_mul_lo_u16 v0, v0, v1 -; GFX1250-NEXT: s_set_pc_i64 s[30:31] - %result = mul <2 x i16> %num, %den - ret <2 x i16> %result -} - define amdgpu_ps <2 x i32> @s_mul_v2i32(<2 x i32> inreg %num, <2 x i32> inreg %den) { ; GCN-LABEL: s_mul_v2i32: ; GCN: ; %bb.0: @@ -714,11 +588,10 @@ define i64 @v_mul_i64(i64 %num, i64 %den) { ; GFX12-NEXT: s_wait_bvhcnt 0x0 ; GFX12-NEXT: s_wait_kmcnt 0x0 ; GFX12-NEXT: v_mul_hi_u32 v4, v0, v2 -; GFX12-NEXT: v_mul_lo_u32 v3, v0, v3 -; GFX12-NEXT: v_mul_lo_u32 v1, v1, v2 +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) +; GFX12-NEXT: v_mad_co_u64_u32 v[3:4], null, v0, v3, v[4:5] ; GFX12-NEXT: v_mul_lo_u32 v0, v0, v2 -; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) -; GFX12-NEXT: v_add3_u32 v1, v3, v1, v4 +; GFX12-NEXT: v_mad_co_u64_u32 v[1:2], null, v1, v2, v[3:4] ; GFX12-NEXT: s_setpc_b64 s[30:31] ; ; GFX1250-LABEL: v_mul_i64: @@ -1462,18 +1335,14 @@ define amdgpu_ps <8 x i32> @s_mul_i256(i256 inreg %num, i256 inreg %den) { ; GFX7-NEXT: s_cselect_b32 s33, 1, 0 ; GFX7-NEXT: v_readfirstlane_b32 s35, v0 ; GFX7-NEXT: s_add_u32 s19, s34, s19 +; GFX7-NEXT: v_mov_b32_e32 v0, s14 ; GFX7-NEXT: s_addc_u32 s28, s35, s28 +; GFX7-NEXT: v_mul_hi_u32 v0, s16, v0 ; GFX7-NEXT: s_cselect_b32 s34, 1, 0 -; GFX7-NEXT: s_cmp_lg_u32 s25, 0 -; GFX7-NEXT: v_mov_b32_e32 v0, s14 -; GFX7-NEXT: s_cselect_b32 s25, 1, 0 ; GFX7-NEXT: s_cmp_lg_u32 s26, 0 -; GFX7-NEXT: v_mul_hi_u32 v0, s16, v0 ; GFX7-NEXT: s_addc_u32 s19, s25, s19 -; GFX7-NEXT: s_cselect_b32 s25, 1, 0 -; GFX7-NEXT: s_cmp_lg_u32 s20, 0 ; GFX7-NEXT: v_mov_b32_e32 v2, s13 -; GFX7-NEXT: s_cselect_b32 s20, 1, 0 +; GFX7-NEXT: s_cselect_b32 s25, 1, 0 ; GFX7-NEXT: s_cmp_lg_u32 s21, 0 ; GFX7-NEXT: v_mul_hi_u32 v6, s1, v2 ; GFX7-NEXT: s_addc_u32 s20, s20, 0 @@ -1549,8 +1418,6 @@ define amdgpu_ps <8 x i32> @s_mul_i256(i256 inreg %num, i256 inreg %den) { ; GFX7-NEXT: s_add_u32 s27, s39, s27 ; GFX7-NEXT: s_addc_u32 s25, s40, s25 ; GFX7-NEXT: s_cselect_b32 s39, 1, 0 -; GFX7-NEXT: s_cmp_lg_u32 s30, 0 -; GFX7-NEXT: s_cselect_b32 s30, 1, 0 ; GFX7-NEXT: s_cmp_lg_u32 s31, 0 ; GFX7-NEXT: s_addc_u32 s30, s30, 0 ; GFX7-NEXT: s_cmp_lg_u32 s33, 0 @@ -1560,8 +1427,6 @@ define amdgpu_ps <8 x i32> @s_mul_i256(i256 inreg %num, i256 inreg %den) { ; GFX7-NEXT: s_cmp_lg_u32 s21, 0 ; GFX7-NEXT: s_addc_u32 s21, s30, s27 ; GFX7-NEXT: s_cselect_b32 s27, 1, 0 -; GFX7-NEXT: s_cmp_lg_u32 s22, 0 -; GFX7-NEXT: s_cselect_b32 s22, 1, 0 ; GFX7-NEXT: s_cmp_lg_u32 s23, 0 ; GFX7-NEXT: s_addc_u32 s22, s22, 0 ; GFX7-NEXT: s_cmp_lg_u32 s24, 0 @@ -1691,18 +1556,14 @@ define amdgpu_ps <8 x i32> @s_mul_i256(i256 inreg %num, i256 inreg %den) { ; GFX8-NEXT: s_cselect_b32 s33, 1, 0 ; GFX8-NEXT: v_readfirstlane_b32 s35, v0 ; GFX8-NEXT: s_add_u32 s19, s34, s19 +; GFX8-NEXT: v_mov_b32_e32 v0, s14 ; GFX8-NEXT: s_addc_u32 s28, s35, s28 +; GFX8-NEXT: v_mul_hi_u32 v0, s16, v0 ; GFX8-NEXT: s_cselect_b32 s34, 1, 0 -; GFX8-NEXT: s_cmp_lg_u32 s25, 0 -; GFX8-NEXT: v_mov_b32_e32 v0, s14 -; GFX8-NEXT: s_cselect_b32 s25, 1, 0 ; GFX8-NEXT: s_cmp_lg_u32 s26, 0 -; GFX8-NEXT: v_mul_hi_u32 v0, s16, v0 ; GFX8-NEXT: s_addc_u32 s19, s25, s19 -; GFX8-NEXT: s_cselect_b32 s25, 1, 0 -; GFX8-NEXT: s_cmp_lg_u32 s20, 0 ; GFX8-NEXT: v_mov_b32_e32 v2, s13 -; GFX8-NEXT: s_cselect_b32 s20, 1, 0 +; GFX8-NEXT: s_cselect_b32 s25, 1, 0 ; GFX8-NEXT: s_cmp_lg_u32 s21, 0 ; GFX8-NEXT: v_mul_hi_u32 v6, s1, v2 ; GFX8-NEXT: s_addc_u32 s20, s20, 0 @@ -1778,8 +1639,6 @@ define amdgpu_ps <8 x i32> @s_mul_i256(i256 inreg %num, i256 inreg %den) { ; GFX8-NEXT: s_add_u32 s27, s39, s27 ; GFX8-NEXT: s_addc_u32 s25, s40, s25 ; GFX8-NEXT: s_cselect_b32 s39, 1, 0 -; GFX8-NEXT: s_cmp_lg_u32 s30, 0 -; GFX8-NEXT: s_cselect_b32 s30, 1, 0 ; GFX8-NEXT: s_cmp_lg_u32 s31, 0 ; GFX8-NEXT: s_addc_u32 s30, s30, 0 ; GFX8-NEXT: s_cmp_lg_u32 s33, 0 @@ -1789,8 +1648,6 @@ define amdgpu_ps <8 x i32> @s_mul_i256(i256 inreg %num, i256 inreg %den) { ; GFX8-NEXT: s_cmp_lg_u32 s21, 0 ; GFX8-NEXT: s_addc_u32 s21, s30, s27 ; GFX8-NEXT: s_cselect_b32 s27, 1, 0 -; GFX8-NEXT: s_cmp_lg_u32 s22, 0 -; GFX8-NEXT: s_cselect_b32 s22, 1, 0 ; GFX8-NEXT: s_cmp_lg_u32 s23, 0 ; GFX8-NEXT: s_addc_u32 s22, s22, 0 ; GFX8-NEXT: s_cmp_lg_u32 s24, 0 @@ -1898,13 +1755,9 @@ define amdgpu_ps <8 x i32> @s_mul_i256(i256 inreg %num, i256 inreg %den) { ; GFX9-NEXT: s_add_u32 s19, s34, s19 ; GFX9-NEXT: s_addc_u32 s24, s35, s24 ; GFX9-NEXT: s_cselect_b32 s34, 1, 0 -; GFX9-NEXT: s_cmp_lg_u32 s22, 0 -; GFX9-NEXT: s_cselect_b32 s22, 1, 0 ; GFX9-NEXT: s_cmp_lg_u32 s23, 0 ; GFX9-NEXT: s_addc_u32 s19, s22, s19 ; GFX9-NEXT: s_cselect_b32 s22, 1, 0 -; GFX9-NEXT: s_cmp_lg_u32 s20, 0 -; GFX9-NEXT: s_cselect_b32 s20, 1, 0 ; GFX9-NEXT: s_cmp_lg_u32 s21, 0 ; GFX9-NEXT: s_addc_u32 s20, s20, 0 ; GFX9-NEXT: s_cmp_lg_u32 s22, 0 @@ -1966,8 +1819,6 @@ define amdgpu_ps <8 x i32> @s_mul_i256(i256 inreg %num, i256 inreg %den) { ; GFX9-NEXT: s_add_u32 s24, s39, s24 ; GFX9-NEXT: s_addc_u32 s22, s40, s22 ; GFX9-NEXT: s_cselect_b32 s39, 1, 0 -; GFX9-NEXT: s_cmp_lg_u32 s30, 0 -; GFX9-NEXT: s_cselect_b32 s30, 1, 0 ; GFX9-NEXT: s_cmp_lg_u32 s31, 0 ; GFX9-NEXT: s_addc_u32 s30, s30, 0 ; GFX9-NEXT: s_cmp_lg_u32 s33, 0 @@ -1977,8 +1828,6 @@ define amdgpu_ps <8 x i32> @s_mul_i256(i256 inreg %num, i256 inreg %den) { ; GFX9-NEXT: s_cmp_lg_u32 s21, 0 ; GFX9-NEXT: s_addc_u32 s21, s30, s24 ; GFX9-NEXT: s_cselect_b32 s24, 1, 0 -; GFX9-NEXT: s_cmp_lg_u32 s26, 0 -; GFX9-NEXT: s_cselect_b32 s26, 1, 0 ; GFX9-NEXT: s_cmp_lg_u32 s27, 0 ; GFX9-NEXT: s_addc_u32 s26, s26, 0 ; GFX9-NEXT: s_cmp_lg_u32 s28, 0 @@ -2085,18 +1934,12 @@ define amdgpu_ps <8 x i32> @s_mul_i256(i256 inreg %num, i256 inreg %den) { ; GFX10PLUS-NEXT: s_add_u32 s18, s33, s18 ; GFX10PLUS-NEXT: s_addc_u32 s23, s34, s23 ; GFX10PLUS-NEXT: s_cselect_b32 s33, 1, 0 -; GFX10PLUS-NEXT: s_cmp_lg_u32 s21, 0 -; GFX10PLUS-NEXT: s_mul_hi_u32 s34, s1, s13 -; GFX10PLUS-NEXT: s_cselect_b32 s21, 1, 0 ; GFX10PLUS-NEXT: s_cmp_lg_u32 s22, 0 ; GFX10PLUS-NEXT: s_mul_hi_u32 s22, s0, s14 ; GFX10PLUS-NEXT: s_addc_u32 s18, s21, s18 ; GFX10PLUS-NEXT: s_cselect_b32 s21, 1, 0 -; GFX10PLUS-NEXT: s_cmp_lg_u32 s19, 0 -; GFX10PLUS-NEXT: s_mul_hi_u32 s35, s1, s12 -; GFX10PLUS-NEXT: s_cselect_b32 s19, 1, 0 ; GFX10PLUS-NEXT: s_cmp_lg_u32 s20, 0 -; GFX10PLUS-NEXT: s_mul_hi_u32 s36, s2, s11 +; GFX10PLUS-NEXT: s_mul_hi_u32 s34, s1, s13 ; GFX10PLUS-NEXT: s_addc_u32 s19, s19, 0 ; GFX10PLUS-NEXT: s_cmp_lg_u32 s21, 0 ; GFX10PLUS-NEXT: s_mul_i32 s21, s0, s14 @@ -2130,10 +1973,12 @@ define amdgpu_ps <8 x i32> @s_mul_i256(i256 inreg %num, i256 inreg %den) { ; GFX10PLUS-NEXT: s_add_u32 s23, s23, s24 ; GFX10PLUS-NEXT: s_addc_u32 s21, s34, s21 ; GFX10PLUS-NEXT: s_mul_i32 s34, s1, s12 +; GFX10PLUS-NEXT: s_mul_hi_u32 s35, s1, s12 ; GFX10PLUS-NEXT: s_cselect_b32 s24, 1, 0 ; GFX10PLUS-NEXT: s_add_u32 s23, s34, s23 ; GFX10PLUS-NEXT: s_addc_u32 s21, s35, s21 ; GFX10PLUS-NEXT: s_mul_i32 s35, s2, s11 +; GFX10PLUS-NEXT: s_mul_hi_u32 s36, s2, s11 ; GFX10PLUS-NEXT: s_cselect_b32 s34, 1, 0 ; GFX10PLUS-NEXT: s_add_u32 s23, s35, s23 ; GFX10PLUS-NEXT: s_addc_u32 s21, s36, s21 @@ -2153,38 +1998,34 @@ define amdgpu_ps <8 x i32> @s_mul_i256(i256 inreg %num, i256 inreg %den) { ; GFX10PLUS-NEXT: s_add_u32 s23, s38, s23 ; GFX10PLUS-NEXT: s_addc_u32 s21, s39, s21 ; GFX10PLUS-NEXT: s_cselect_b32 s38, 1, 0 -; GFX10PLUS-NEXT: s_cmp_lg_u32 s29, 0 -; GFX10PLUS-NEXT: s_mul_i32 s1, s1, s14 -; GFX10PLUS-NEXT: s_cselect_b32 s29, 1, 0 ; GFX10PLUS-NEXT: s_cmp_lg_u32 s30, 0 -; GFX10PLUS-NEXT: s_mul_i32 s2, s2, s13 +; GFX10PLUS-NEXT: s_mul_i32 s1, s1, s14 ; GFX10PLUS-NEXT: s_addc_u32 s29, s29, 0 ; GFX10PLUS-NEXT: s_cmp_lg_u32 s31, 0 -; GFX10PLUS-NEXT: s_mul_i32 s3, s3, s12 +; GFX10PLUS-NEXT: s_mul_i32 s2, s2, s13 ; GFX10PLUS-NEXT: s_addc_u32 s29, s29, 0 ; GFX10PLUS-NEXT: s_cmp_lg_u32 s33, 0 -; GFX10PLUS-NEXT: s_mul_i32 s4, s4, s11 +; GFX10PLUS-NEXT: s_mul_i32 s3, s3, s12 ; GFX10PLUS-NEXT: s_addc_u32 s29, s29, 0 ; GFX10PLUS-NEXT: s_cmp_lg_u32 s20, 0 -; GFX10PLUS-NEXT: s_mul_i32 s5, s5, s10 +; GFX10PLUS-NEXT: s_mul_i32 s4, s4, s11 ; GFX10PLUS-NEXT: s_addc_u32 s20, s29, s23 ; GFX10PLUS-NEXT: s_cselect_b32 s23, 1, 0 -; GFX10PLUS-NEXT: s_cmp_lg_u32 s25, 0 -; GFX10PLUS-NEXT: s_mul_i32 s6, s6, s9 -; GFX10PLUS-NEXT: s_cselect_b32 s25, 1, 0 ; GFX10PLUS-NEXT: s_cmp_lg_u32 s26, 0 ; GFX10PLUS-NEXT: s_mul_i32 s26, s0, s15 ; GFX10PLUS-NEXT: s_addc_u32 s25, s25, 0 ; GFX10PLUS-NEXT: s_cmp_lg_u32 s27, 0 -; GFX10PLUS-NEXT: s_mul_i32 s7, s7, s8 +; GFX10PLUS-NEXT: s_mul_i32 s5, s5, s10 ; GFX10PLUS-NEXT: s_addc_u32 s25, s25, 0 ; GFX10PLUS-NEXT: s_cmp_lg_u32 s28, 0 -; GFX10PLUS-NEXT: s_mul_i32 s0, s0, s8 +; GFX10PLUS-NEXT: s_mul_i32 s6, s6, s9 ; GFX10PLUS-NEXT: s_addc_u32 s25, s25, 0 ; GFX10PLUS-NEXT: s_cmp_lg_u32 s23, 0 +; GFX10PLUS-NEXT: s_mul_i32 s7, s7, s8 ; GFX10PLUS-NEXT: s_addc_u32 s15, s25, s21 ; GFX10PLUS-NEXT: s_addc_u32 s21, s22, s26 ; GFX10PLUS-NEXT: s_cmp_lg_u32 s38, 0 +; GFX10PLUS-NEXT: s_mul_i32 s0, s0, s8 ; GFX10PLUS-NEXT: s_addc_u32 s1, s21, s1 ; GFX10PLUS-NEXT: s_cmp_lg_u32 s37, 0 ; GFX10PLUS-NEXT: s_addc_u32 s1, s1, s2 @@ -2272,18 +2113,12 @@ define amdgpu_ps <8 x i32> @s_mul_i256(i256 inreg %num, i256 inreg %den) { ; GFX12-NEXT: s_add_co_u32 s18, s33, s18 ; GFX12-NEXT: s_add_co_ci_u32 s23, s34, s23 ; GFX12-NEXT: s_cselect_b32 s33, 1, 0 -; GFX12-NEXT: s_cmp_lg_u32 s21, 0 -; GFX12-NEXT: s_mul_hi_u32 s34, s1, s13 -; GFX12-NEXT: s_cselect_b32 s21, 1, 0 ; GFX12-NEXT: s_cmp_lg_u32 s22, 0 ; GFX12-NEXT: s_mul_hi_u32 s22, s0, s14 ; GFX12-NEXT: s_add_co_ci_u32 s18, s21, s18 ; GFX12-NEXT: s_cselect_b32 s21, 1, 0 -; GFX12-NEXT: s_cmp_lg_u32 s19, 0 -; GFX12-NEXT: s_mul_hi_u32 s35, s1, s12 -; GFX12-NEXT: s_cselect_b32 s19, 1, 0 ; GFX12-NEXT: s_cmp_lg_u32 s20, 0 -; GFX12-NEXT: s_mul_hi_u32 s36, s2, s11 +; GFX12-NEXT: s_mul_hi_u32 s34, s1, s13 ; GFX12-NEXT: s_add_co_ci_u32 s19, s19, 0 ; GFX12-NEXT: s_cmp_lg_u32 s21, 0 ; GFX12-NEXT: s_mul_i32 s21, s0, s14 @@ -2317,10 +2152,12 @@ define amdgpu_ps <8 x i32> @s_mul_i256(i256 inreg %num, i256 inreg %den) { ; GFX12-NEXT: s_add_co_u32 s23, s23, s24 ; GFX12-NEXT: s_add_co_ci_u32 s21, s34, s21 ; GFX12-NEXT: s_mul_i32 s34, s1, s12 +; GFX12-NEXT: s_mul_hi_u32 s35, s1, s12 ; GFX12-NEXT: s_cselect_b32 s24, 1, 0 ; GFX12-NEXT: s_add_co_u32 s23, s34, s23 ; GFX12-NEXT: s_add_co_ci_u32 s21, s35, s21 ; GFX12-NEXT: s_mul_i32 s35, s2, s11 +; GFX12-NEXT: s_mul_hi_u32 s36, s2, s11 ; GFX12-NEXT: s_cselect_b32 s34, 1, 0 ; GFX12-NEXT: s_add_co_u32 s23, s35, s23 ; GFX12-NEXT: s_add_co_ci_u32 s21, s36, s21 @@ -2340,38 +2177,34 @@ define amdgpu_ps <8 x i32> @s_mul_i256(i256 inreg %num, i256 inreg %den) { ; GFX12-NEXT: s_add_co_u32 s23, s38, s23 ; GFX12-NEXT: s_add_co_ci_u32 s21, s39, s21 ; GFX12-NEXT: s_cselect_b32 s38, 1, 0 -; GFX12-NEXT: s_cmp_lg_u32 s29, 0 -; GFX12-NEXT: s_mul_i32 s1, s1, s14 -; GFX12-NEXT: s_cselect_b32 s29, 1, 0 ; GFX12-NEXT: s_cmp_lg_u32 s30, 0 -; GFX12-NEXT: s_mul_i32 s2, s2, s13 +; GFX12-NEXT: s_mul_i32 s1, s1, s14 ; GFX12-NEXT: s_add_co_ci_u32 s29, s29, 0 ; GFX12-NEXT: s_cmp_lg_u32 s31, 0 -; GFX12-NEXT: s_mul_i32 s3, s3, s12 +; GFX12-NEXT: s_mul_i32 s2, s2, s13 ; GFX12-NEXT: s_add_co_ci_u32 s29, s29, 0 ; GFX12-NEXT: s_cmp_lg_u32 s33, 0 -; GFX12-NEXT: s_mul_i32 s4, s4, s11 +; GFX12-NEXT: s_mul_i32 s3, s3, s12 ; GFX12-NEXT: s_add_co_ci_u32 s29, s29, 0 ; GFX12-NEXT: s_cmp_lg_u32 s20, 0 -; GFX12-NEXT: s_mul_i32 s5, s5, s10 +; GFX12-NEXT: s_mul_i32 s4, s4, s11 ; GFX12-NEXT: s_add_co_ci_u32 s20, s29, s23 ; GFX12-NEXT: s_cselect_b32 s23, 1, 0 -; GFX12-NEXT: s_cmp_lg_u32 s25, 0 -; GFX12-NEXT: s_mul_i32 s6, s6, s9 -; GFX12-NEXT: s_cselect_b32 s25, 1, 0 ; GFX12-NEXT: s_cmp_lg_u32 s26, 0 ; GFX12-NEXT: s_mul_i32 s26, s0, s15 ; GFX12-NEXT: s_add_co_ci_u32 s25, s25, 0 ; GFX12-NEXT: s_cmp_lg_u32 s27, 0 -; GFX12-NEXT: s_mul_i32 s7, s7, s8 +; GFX12-NEXT: s_mul_i32 s5, s5, s10 ; GFX12-NEXT: s_add_co_ci_u32 s25, s25, 0 ; GFX12-NEXT: s_cmp_lg_u32 s28, 0 -; GFX12-NEXT: s_mul_i32 s0, s0, s8 +; GFX12-NEXT: s_mul_i32 s6, s6, s9 ; GFX12-NEXT: s_add_co_ci_u32 s25, s25, 0 ; GFX12-NEXT: s_cmp_lg_u32 s23, 0 +; GFX12-NEXT: s_mul_i32 s7, s7, s8 ; GFX12-NEXT: s_add_co_ci_u32 s15, s25, s21 ; GFX12-NEXT: s_add_co_ci_u32 s21, s22, s26 ; GFX12-NEXT: s_cmp_lg_u32 s38, 0 +; GFX12-NEXT: s_mul_i32 s0, s0, s8 ; GFX12-NEXT: s_add_co_ci_u32 s1, s21, s1 ; GFX12-NEXT: s_cmp_lg_u32 s37, 0 ; GFX12-NEXT: s_add_co_ci_u32 s1, s1, s2 @@ -2460,18 +2293,12 @@ define amdgpu_ps <8 x i32> @s_mul_i256(i256 inreg %num, i256 inreg %den) { ; GFX1250-NEXT: s_add_co_u32 s18, s33, s18 ; GFX1250-NEXT: s_add_co_ci_u32 s23, s34, s23 ; GFX1250-NEXT: s_cselect_b32 s33, 1, 0 -; GFX1250-NEXT: s_cmp_lg_u32 s21, 0 -; GFX1250-NEXT: s_mul_hi_u32 s34, s1, s13 -; GFX1250-NEXT: s_cselect_b32 s21, 1, 0 ; GFX1250-NEXT: s_cmp_lg_u32 s22, 0 ; GFX1250-NEXT: s_mul_hi_u32 s22, s0, s14 ; GFX1250-NEXT: s_add_co_ci_u32 s18, s21, s18 ; GFX1250-NEXT: s_cselect_b32 s21, 1, 0 -; GFX1250-NEXT: s_cmp_lg_u32 s19, 0 -; GFX1250-NEXT: s_mul_hi_u32 s35, s1, s12 -; GFX1250-NEXT: s_cselect_b32 s19, 1, 0 ; GFX1250-NEXT: s_cmp_lg_u32 s20, 0 -; GFX1250-NEXT: s_mul_hi_u32 s36, s2, s11 +; GFX1250-NEXT: s_mul_hi_u32 s34, s1, s13 ; GFX1250-NEXT: s_add_co_ci_u32 s19, s19, 0 ; GFX1250-NEXT: s_cmp_lg_u32 s21, 0 ; GFX1250-NEXT: s_mul_i32 s21, s0, s14 @@ -2505,10 +2332,12 @@ define amdgpu_ps <8 x i32> @s_mul_i256(i256 inreg %num, i256 inreg %den) { ; GFX1250-NEXT: s_add_co_u32 s23, s23, s24 ; GFX1250-NEXT: s_add_co_ci_u32 s21, s34, s21 ; GFX1250-NEXT: s_mul_i32 s34, s1, s12 +; GFX1250-NEXT: s_mul_hi_u32 s35, s1, s12 ; GFX1250-NEXT: s_cselect_b32 s24, 1, 0 ; GFX1250-NEXT: s_add_co_u32 s23, s34, s23 ; GFX1250-NEXT: s_add_co_ci_u32 s21, s35, s21 ; GFX1250-NEXT: s_mul_i32 s35, s2, s11 +; GFX1250-NEXT: s_mul_hi_u32 s36, s2, s11 ; GFX1250-NEXT: s_cselect_b32 s34, 1, 0 ; GFX1250-NEXT: s_add_co_u32 s23, s35, s23 ; GFX1250-NEXT: s_add_co_ci_u32 s21, s36, s21 @@ -2528,38 +2357,34 @@ define amdgpu_ps <8 x i32> @s_mul_i256(i256 inreg %num, i256 inreg %den) { ; GFX1250-NEXT: s_add_co_u32 s23, s38, s23 ; GFX1250-NEXT: s_add_co_ci_u32 s21, s39, s21 ; GFX1250-NEXT: s_cselect_b32 s38, 1, 0 -; GFX1250-NEXT: s_cmp_lg_u32 s29, 0 -; GFX1250-NEXT: s_mul_i32 s1, s1, s14 -; GFX1250-NEXT: s_cselect_b32 s29, 1, 0 ; GFX1250-NEXT: s_cmp_lg_u32 s30, 0 -; GFX1250-NEXT: s_mul_i32 s2, s2, s13 +; GFX1250-NEXT: s_mul_i32 s1, s1, s14 ; GFX1250-NEXT: s_add_co_ci_u32 s29, s29, 0 ; GFX1250-NEXT: s_cmp_lg_u32 s31, 0 -; GFX1250-NEXT: s_mul_i32 s3, s3, s12 +; GFX1250-NEXT: s_mul_i32 s2, s2, s13 ; GFX1250-NEXT: s_add_co_ci_u32 s29, s29, 0 ; GFX1250-NEXT: s_cmp_lg_u32 s33, 0 -; GFX1250-NEXT: s_mul_i32 s4, s4, s11 +; GFX1250-NEXT: s_mul_i32 s3, s3, s12 ; GFX1250-NEXT: s_add_co_ci_u32 s29, s29, 0 ; GFX1250-NEXT: s_cmp_lg_u32 s20, 0 -; GFX1250-NEXT: s_mul_i32 s5, s5, s10 +; GFX1250-NEXT: s_mul_i32 s4, s4, s11 ; GFX1250-NEXT: s_add_co_ci_u32 s20, s29, s23 ; GFX1250-NEXT: s_cselect_b32 s23, 1, 0 -; GFX1250-NEXT: s_cmp_lg_u32 s25, 0 -; GFX1250-NEXT: s_mul_i32 s6, s6, s9 -; GFX1250-NEXT: s_cselect_b32 s25, 1, 0 ; GFX1250-NEXT: s_cmp_lg_u32 s26, 0 ; GFX1250-NEXT: s_mul_i32 s26, s0, s15 ; GFX1250-NEXT: s_add_co_ci_u32 s25, s25, 0 ; GFX1250-NEXT: s_cmp_lg_u32 s27, 0 -; GFX1250-NEXT: s_mul_i32 s7, s7, s8 +; GFX1250-NEXT: s_mul_i32 s5, s5, s10 ; GFX1250-NEXT: s_add_co_ci_u32 s25, s25, 0 ; GFX1250-NEXT: s_cmp_lg_u32 s28, 0 -; GFX1250-NEXT: s_mul_i32 s0, s0, s8 +; GFX1250-NEXT: s_mul_i32 s6, s6, s9 ; GFX1250-NEXT: s_add_co_ci_u32 s25, s25, 0 ; GFX1250-NEXT: s_cmp_lg_u32 s23, 0 +; GFX1250-NEXT: s_mul_i32 s7, s7, s8 ; GFX1250-NEXT: s_add_co_ci_u32 s15, s25, s21 ; GFX1250-NEXT: s_add_co_ci_u32 s21, s22, s26 ; GFX1250-NEXT: s_cmp_lg_u32 s38, 0 +; GFX1250-NEXT: s_mul_i32 s0, s0, s8 ; GFX1250-NEXT: s_add_co_ci_u32 s1, s21, s1 ; GFX1250-NEXT: s_cmp_lg_u32 s37, 0 ; GFX1250-NEXT: s_add_co_ci_u32 s1, s1, s2 diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-mul.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-mul.mir index 7a7353a3a168d..6b91707328dba 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-mul.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-mul.mir @@ -1,5 +1,5 @@ # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py -# RUN: llc -mtriple=amdgcn -mcpu=gfx1200 -run-pass='amdgpu-regbankselect,amdgpu-regbanklegalize' %s -o - | FileCheck %s +# RUN: llc -mtriple=amdgcn -mcpu=fiji -run-pass=regbankselect %s -verify-machineinstrs -o - -regbankselect-fast | FileCheck %s --- name: mul_s32_ss @@ -107,13 +107,13 @@ body: | ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr(s64) = COPY $vgpr2_vgpr3 ; CHECK-NEXT: [[UV:%[0-9]+]]:vgpr(s32), [[UV1:%[0-9]+]]:vgpr(s32) = G_UNMERGE_VALUES [[COPY]](s64) ; CHECK-NEXT: [[UV2:%[0-9]+]]:vgpr(s32), [[UV3:%[0-9]+]]:vgpr(s32) = G_UNMERGE_VALUES [[COPY1]](s64) - ; CHECK-NEXT: [[MUL:%[0-9]+]]:vgpr(s32) = G_MUL [[UV]], [[UV2]] ; CHECK-NEXT: [[UMULH:%[0-9]+]]:vgpr(s32) = G_UMULH [[UV]], [[UV2]] - ; CHECK-NEXT: [[MUL1:%[0-9]+]]:vgpr(s32) = G_MUL [[UV]], [[UV3]] - ; CHECK-NEXT: [[MUL2:%[0-9]+]]:vgpr(s32) = G_MUL [[UV1]], [[UV2]] - ; CHECK-NEXT: [[ADD:%[0-9]+]]:vgpr(s32) = G_ADD [[MUL1]], [[MUL2]] - ; CHECK-NEXT: [[ADD1:%[0-9]+]]:vgpr(s32) = G_ADD [[ADD]], [[UMULH]] - ; CHECK-NEXT: [[MV:%[0-9]+]]:vgpr(s64) = G_MERGE_VALUES [[MUL]](s32), [[ADD1]](s32) + ; CHECK-NEXT: [[MUL:%[0-9]+]]:vgpr(s32) = G_MUL [[UV]], [[UV3]] + ; CHECK-NEXT: [[ADD:%[0-9]+]]:vgpr(s32) = G_ADD [[UMULH]], [[MUL]] + ; CHECK-NEXT: [[MUL1:%[0-9]+]]:vgpr(s32) = G_MUL [[UV1]], [[UV2]] + ; CHECK-NEXT: [[ADD1:%[0-9]+]]:vgpr(s32) = G_ADD [[ADD]], [[MUL1]] + ; CHECK-NEXT: [[MUL2:%[0-9]+]]:vgpr(s32) = G_MUL [[UV]], [[UV2]] + ; CHECK-NEXT: [[MV:%[0-9]+]]:vgpr(s64) = G_MERGE_VALUES [[MUL2]](s32), [[ADD1]](s32) %0:_(s64) = COPY $vgpr0_vgpr1 %1:_(s64) = COPY $vgpr2_vgpr3 %2:_(s64) = G_MUL %0, %1 @@ -129,9 +129,9 @@ body: | ; CHECK-LABEL: name: mul_s64_zext_ss ; CHECK: liveins: $sgpr0_sgpr1, $sgpr2_sgpr3 ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr(s64) = COPY $sgpr0_sgpr1 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr(s64) = COPY $sgpr2_sgpr3 - ; CHECK-NEXT: [[MUL:%[0-9]+]]:sgpr(s64) = G_MUL [[COPY]], [[COPY1]] + ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_64(s64) = COPY $sgpr0_sgpr1 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr_64(s64) = COPY $sgpr2_sgpr3 + ; CHECK-NEXT: [[S_MUL_U64_:%[0-9]+]]:sgpr_64(s64) = S_MUL_U64 [[COPY]](s64), [[COPY1]](s64) %0:_(s64) = COPY $sgpr0_sgpr1 %1:_(s64) = COPY $sgpr2_sgpr3 %2:_(s64) = G_AMDGPU_S_MUL_U64_U32 %0, %1 @@ -149,10 +149,10 @@ body: | ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr(s64) = COPY $vgpr0_vgpr1 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr(s64) = COPY $vgpr2_vgpr3 - ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:vgpr(s32) = G_TRUNC [[COPY]](s64) - ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:vgpr(s32) = G_TRUNC [[COPY1]](s64) - ; CHECK-NEXT: [[C:%[0-9]+]]:vgpr(s64) = G_CONSTANT i64 0 - ; CHECK-NEXT: [[AMDGPU_MAD_U64_U32_:%[0-9]+]]:vgpr(s64), [[AMDGPU_MAD_U64_U32_1:%[0-9]+]]:sgpr(s32) = G_AMDGPU_MAD_U64_U32 [[TRUNC]](s32), [[TRUNC1]], [[C]] + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:vgpr_32(s32) = G_TRUNC [[COPY]](s64) + ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:vgpr_32(s32) = G_TRUNC [[COPY1]](s64) + ; CHECK-NEXT: [[C:%[0-9]+]]:vreg_64(s64) = G_CONSTANT i64 0 + ; CHECK-NEXT: [[AMDGPU_MAD_U64_U32_:%[0-9]+]]:vgpr(s64), [[AMDGPU_MAD_U64_U32_1:%[0-9]+]]:vreg_64 = G_AMDGPU_MAD_U64_U32 [[TRUNC]](s32), [[TRUNC1]], [[C]] %0:_(s64) = COPY $vgpr0_vgpr1 %1:_(s64) = COPY $vgpr2_vgpr3 %2:_(s64) = G_AMDGPU_S_MUL_U64_U32 %0, %1 @@ -168,9 +168,9 @@ body: | ; CHECK-LABEL: name: mul_s64_sext_ss ; CHECK: liveins: $sgpr0_sgpr1, $sgpr2_sgpr3 ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr(s64) = COPY $sgpr0_sgpr1 - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr(s64) = COPY $sgpr2_sgpr3 - ; CHECK-NEXT: [[MUL:%[0-9]+]]:sgpr(s64) = G_MUL [[COPY]], [[COPY1]] + ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_64(s64) = COPY $sgpr0_sgpr1 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr_64(s64) = COPY $sgpr2_sgpr3 + ; CHECK-NEXT: [[S_MUL_U64_:%[0-9]+]]:sgpr_64(s64) = S_MUL_U64 [[COPY]](s64), [[COPY1]](s64) %0:_(s64) = COPY $sgpr0_sgpr1 %1:_(s64) = COPY $sgpr2_sgpr3 %2:_(s64) = G_AMDGPU_S_MUL_I64_I32 %0, %1 @@ -188,10 +188,10 @@ body: | ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr(s64) = COPY $vgpr0_vgpr1 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr(s64) = COPY $vgpr2_vgpr3 - ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:vgpr(s32) = G_TRUNC [[COPY]](s64) - ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:vgpr(s32) = G_TRUNC [[COPY1]](s64) - ; CHECK-NEXT: [[C:%[0-9]+]]:vgpr(s64) = G_CONSTANT i64 0 - ; CHECK-NEXT: [[AMDGPU_MAD_I64_I32_:%[0-9]+]]:vgpr(s64), [[AMDGPU_MAD_I64_I32_1:%[0-9]+]]:sgpr(s32) = G_AMDGPU_MAD_I64_I32 [[TRUNC]](s32), [[TRUNC1]], [[C]] + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:vgpr_32(s32) = G_TRUNC [[COPY]](s64) + ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:vgpr_32(s32) = G_TRUNC [[COPY1]](s64) + ; CHECK-NEXT: [[C:%[0-9]+]]:vreg_64(s64) = G_CONSTANT i64 0 + ; CHECK-NEXT: [[AMDGPU_MAD_I64_I32_:%[0-9]+]]:vgpr(s64), [[AMDGPU_MAD_I64_I32_1:%[0-9]+]]:vreg_64 = G_AMDGPU_MAD_I64_I32 [[TRUNC]](s32), [[TRUNC1]], [[C]] %0:_(s64) = COPY $vgpr0_vgpr1 %1:_(s64) = COPY $vgpr2_vgpr3 %2:_(s64) = G_AMDGPU_S_MUL_I64_I32 %0, %1 diff --git a/llvm/test/CodeGen/AMDGPU/integer-mad-patterns.ll b/llvm/test/CodeGen/AMDGPU/integer-mad-patterns.ll index 38fe2cc5e0529..d74515f19dc8d 100644 --- a/llvm/test/CodeGen/AMDGPU/integer-mad-patterns.ll +++ b/llvm/test/CodeGen/AMDGPU/integer-mad-patterns.ll @@ -1,34 +1,34 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2 ; RUN: llc -global-isel=0 -mtriple=amdgcn-amd-amdpal -mcpu=gfx600 < %s | FileCheck -check-prefixes=GFX67,GFX6,GFX67-SDAG,GFX6-SDAG %s -; RUN: llc -global-isel=1 -new-reg-bank-select -mtriple=amdgcn-amd-amdpal -mcpu=gfx600 < %s | FileCheck -check-prefixes=GFX67,GFX6,GFX67-GISEL,GFX6-GISEL %s +; RUN: llc -global-isel=1 -mtriple=amdgcn-amd-amdpal -mcpu=gfx600 < %s | FileCheck -check-prefixes=GFX67,GFX6,GFX67-GISEL,GFX6-GISEL %s ; RUN: llc -global-isel=0 -mtriple=amdgcn-amd-amdpal -mcpu=gfx700 < %s | FileCheck -check-prefixes=GFX67,GFX7,GFX67-SDAG,GFX7-SDAG %s -; RUN: llc -global-isel=1 -new-reg-bank-select -mtriple=amdgcn-amd-amdpal -mcpu=gfx700 < %s | FileCheck -check-prefixes=GFX67,GFX7,GFX67-GISEL,GFX7-GISEL %s +; RUN: llc -global-isel=1 -mtriple=amdgcn-amd-amdpal -mcpu=gfx700 < %s | FileCheck -check-prefixes=GFX67,GFX7,GFX67-GISEL,GFX7-GISEL %s ; RUN: llc -global-isel=0 -mtriple=amdgcn-amd-amdpal -mcpu=gfx803 < %s | FileCheck -check-prefixes=GFX8,GFX8-SDAG %s -; RUN: llc -global-isel=1 -new-reg-bank-select -mtriple=amdgcn-amd-amdpal -mcpu=gfx803 < %s | FileCheck -check-prefixes=GFX8,GFX8-GISEL %s +; RUN: llc -global-isel=1 -mtriple=amdgcn-amd-amdpal -mcpu=gfx803 < %s | FileCheck -check-prefixes=GFX8,GFX8-GISEL %s ; RUN: llc -global-isel=0 -mtriple=amdgcn-amd-amdpal -mcpu=gfx900 < %s | FileCheck -check-prefixes=GFX9,GFX9-SDAG,GFX900-SDAG,GFX900 %s -; RUN: llc -global-isel=1 -new-reg-bank-select -mtriple=amdgcn-amd-amdpal -mcpu=gfx900 < %s | FileCheck -check-prefixes=GFX9,GFX9-GISEL,GFX900-GISEL,GFX900 %s +; RUN: llc -global-isel=1 -mtriple=amdgcn-amd-amdpal -mcpu=gfx900 < %s | FileCheck -check-prefixes=GFX9,GFX9-GISEL,GFX900-GISEL,GFX900 %s ; RUN: llc -global-isel=0 -mtriple=amdgcn-amd-amdpal -mcpu=gfx90a < %s | FileCheck -check-prefixes=GFX9,GFX90A,GFX9-SDAG,GFX90A-SDAG %s -; RUN: llc -global-isel=1 -new-reg-bank-select -mtriple=amdgcn-amd-amdpal -mcpu=gfx90a < %s | FileCheck -check-prefixes=GFX9,GFX90A,GFX9-GISEL,GFX90A-GISEL %s +; RUN: llc -global-isel=1 -mtriple=amdgcn-amd-amdpal -mcpu=gfx90a < %s | FileCheck -check-prefixes=GFX9,GFX90A,GFX9-GISEL,GFX90A-GISEL %s ; RUN: llc -global-isel=0 -mtriple=amdgcn-amd-amdpal -mcpu=gfx1030 < %s | FileCheck -check-prefixes=GFX10,GFX10-SDAG %s -; RUN: llc -global-isel=1 -new-reg-bank-select -mtriple=amdgcn-amd-amdpal -mcpu=gfx1030 < %s | FileCheck -check-prefixes=GFX10,GFX10-GISEL %s +; RUN: llc -global-isel=1 -mtriple=amdgcn-amd-amdpal -mcpu=gfx1030 < %s | FileCheck -check-prefixes=GFX10,GFX10-GISEL %s ; RUN: llc -global-isel=0 -mtriple=amdgcn-amd-amdpal -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck -check-prefixes=GFX11,GFX11-SDAG,GFX11-SDAG-TRUE16 %s ; RUN: llc -global-isel=0 -mtriple=amdgcn-amd-amdpal -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck -check-prefixes=GFX11,GFX11-SDAG,GFX11-SDAG-FAKE16 %s -; RUN: llc -global-isel=1 -new-reg-bank-select -mtriple=amdgcn-amd-amdpal -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck -check-prefixes=GFX11,GFX11-GISEL,GFX11-GISEL-TRUE16 %s -; RUN: llc -global-isel=1 -new-reg-bank-select -mtriple=amdgcn-amd-amdpal -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck -check-prefixes=GFX11,GFX11-GISEL,GFX11-GISEL-FAKE16 %s +; RUN: llc -global-isel=1 -mtriple=amdgcn-amd-amdpal -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck -check-prefixes=GFX11,GFX11-GISEL,GFX11-GISEL-TRUE16 %s +; RUN: llc -global-isel=1 -mtriple=amdgcn-amd-amdpal -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck -check-prefixes=GFX11,GFX11-GISEL,GFX11-GISEL-FAKE16 %s ; RUN: llc -global-isel=0 -mtriple=amdgcn-amd-amdpal -mcpu=gfx1200 -mattr=+real-true16 < %s | FileCheck -check-prefixes=GFX1200,GFX1200-SDAG,GFX1200-SDAG-TRUE16 %s ; RUN: llc -global-isel=0 -mtriple=amdgcn-amd-amdpal -mcpu=gfx1200 -mattr=-real-true16 < %s | FileCheck -check-prefixes=GFX1200,GFX1200-SDAG,GFX1200-SDAG-FAKE16 %s -; RUN: llc -global-isel=1 -new-reg-bank-select -mtriple=amdgcn-amd-amdpal -mcpu=gfx1200 -mattr=+real-true16 < %s | FileCheck -check-prefixes=GFX1200,GFX1200-GISEL,GFX1200-GISEL-TRUE16 %s -; RUN: llc -global-isel=1 -new-reg-bank-select -mtriple=amdgcn-amd-amdpal -mcpu=gfx1200 -mattr=-real-true16 < %s | FileCheck -check-prefixes=GFX1200,GFX1200-GISEL,GFX1200-GISEL-FAKE16 %s +; RUN: llc -global-isel=1 -mtriple=amdgcn-amd-amdpal -mcpu=gfx1200 -mattr=+real-true16 < %s | FileCheck -check-prefixes=GFX1200,GFX1200-GISEL,GFX1200-GISEL-TRUE16 %s +; RUN: llc -global-isel=1 -mtriple=amdgcn-amd-amdpal -mcpu=gfx1200 -mattr=-real-true16 < %s | FileCheck -check-prefixes=GFX1200,GFX1200-GISEL,GFX1200-GISEL-FAKE16 %s ; RUN: llc -global-isel=0 -mtriple=amdgcn-amd-amdpal -mcpu=gfx1250 < %s | FileCheck -check-prefixes=GFX1250,GFX1250-SDAG %s -; RUN: llc -global-isel=1 -new-reg-bank-select -mtriple=amdgcn-amd-amdpal -mcpu=gfx1250 < %s | FileCheck -check-prefixes=GFX1250,GFX1250-GISEL %s +; RUN: llc -global-isel=1 -mtriple=amdgcn-amd-amdpal -mcpu=gfx1250 < %s | FileCheck -check-prefixes=GFX1250,GFX1250-GISEL %s ; Test for integer mad formation for patterns used in clpeak @@ -6191,46 +6191,41 @@ define i64 @clpeak_imad_pat_i64(i64 %x, i64 %y) { ; GFX1200-GISEL-NEXT: s_wait_samplecnt 0x0 ; GFX1200-GISEL-NEXT: s_wait_bvhcnt 0x0 ; GFX1200-GISEL-NEXT: s_wait_kmcnt 0x0 -; GFX1200-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, v0, 1 +; GFX1200-GISEL-NEXT: v_add_co_u32 v4, vcc_lo, v0, 1 ; GFX1200-GISEL-NEXT: s_wait_alu depctr_va_vcc(0) -; GFX1200-GISEL-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo -; GFX1200-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3) -; GFX1200-GISEL-NEXT: v_mul_hi_u32 v4, v0, v2 -; GFX1200-GISEL-NEXT: v_mul_lo_u32 v5, v0, v3 -; GFX1200-GISEL-NEXT: v_mul_lo_u32 v6, v1, v2 -; GFX1200-GISEL-NEXT: v_mul_lo_u32 v7, v0, v2 -; GFX1200-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX1200-GISEL-NEXT: v_add3_u32 v4, v5, v6, v4 -; GFX1200-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, v7, v0 +; GFX1200-GISEL-NEXT: v_add_co_ci_u32_e64 v5, null, 0, v1, vcc_lo +; GFX1200-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2) +; GFX1200-GISEL-NEXT: v_mul_hi_u32 v0, v4, v2 +; GFX1200-GISEL-NEXT: v_mul_lo_u32 v6, v4, v2 +; GFX1200-GISEL-NEXT: v_mad_co_u64_u32 v[0:1], null, v4, v3, v[0:1] +; GFX1200-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1200-GISEL-NEXT: v_add_co_u32 v4, vcc_lo, v6, v4 +; GFX1200-GISEL-NEXT: v_mul_lo_u32 v7, v4, v2 +; GFX1200-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_2) +; GFX1200-GISEL-NEXT: v_mad_co_u64_u32 v[0:1], null, v5, v2, v[0:1] +; GFX1200-GISEL-NEXT: v_mul_hi_u32 v1, v4, v2 ; GFX1200-GISEL-NEXT: s_wait_alu depctr_va_vcc(0) -; GFX1200-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX1200-GISEL-NEXT: v_add_co_ci_u32_e64 v1, null, v4, v1, vcc_lo -; GFX1200-GISEL-NEXT: v_mul_hi_u32 v5, v0, v2 -; GFX1200-GISEL-NEXT: v_mul_lo_u32 v3, v0, v3 -; GFX1200-GISEL-NEXT: v_mul_lo_u32 v0, v0, v2 -; GFX1200-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_1) -; GFX1200-GISEL-NEXT: v_mul_lo_u32 v1, v1, v2 -; GFX1200-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, v7, 1 +; GFX1200-GISEL-NEXT: v_add_co_ci_u32_e64 v5, null, v0, v5, vcc_lo +; GFX1200-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(VALU_DEP_3) +; GFX1200-GISEL-NEXT: v_mad_co_u64_u32 v[3:4], null, v4, v3, v[1:2] +; GFX1200-GISEL-NEXT: v_add_co_u32 v4, vcc_lo, v6, 1 ; GFX1200-GISEL-NEXT: s_wait_alu depctr_va_vcc(0) -; GFX1200-GISEL-NEXT: v_add_co_ci_u32_e64 v4, null, 0, v4, vcc_lo -; GFX1200-GISEL-NEXT: v_mul_lo_u32 v4, v0, v4 -; GFX1200-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_2) -; GFX1200-GISEL-NEXT: v_add3_u32 v1, v3, v1, v5 -; GFX1200-GISEL-NEXT: v_mul_hi_u32 v3, v0, v2 -; GFX1200-GISEL-NEXT: v_mul_lo_u32 v5, v1, v2 -; GFX1200-GISEL-NEXT: v_mul_lo_u32 v2, v0, v2 -; GFX1200-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, v0, 1 +; GFX1200-GISEL-NEXT: v_add_co_ci_u32_e64 v6, null, 0, v0, vcc_lo +; GFX1200-GISEL-NEXT: v_add_co_u32 v8, vcc_lo, v7, 1 +; GFX1200-GISEL-NEXT: v_mul_hi_u32 v0, v7, v4 +; GFX1200-GISEL-NEXT: v_mad_co_u64_u32 v[1:2], null, v5, v2, v[3:4] +; GFX1200-GISEL-NEXT: v_mul_lo_u32 v5, v7, v4 +; GFX1200-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3) +; GFX1200-GISEL-NEXT: v_mad_co_u64_u32 v[2:3], null, v7, v6, v[0:1] ; GFX1200-GISEL-NEXT: s_wait_alu depctr_va_vcc(0) -; GFX1200-GISEL-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo -; GFX1200-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX1200-GISEL-NEXT: v_add3_u32 v3, v4, v5, v3 -; GFX1200-GISEL-NEXT: v_mul_hi_u32 v4, v2, v0 -; GFX1200-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX1200-GISEL-NEXT: v_mul_lo_u32 v1, v2, v1 -; GFX1200-GISEL-NEXT: v_mul_lo_u32 v3, v3, v0 -; GFX1200-GISEL-NEXT: v_mul_lo_u32 v0, v2, v0 +; GFX1200-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v1, vcc_lo +; GFX1200-GISEL-NEXT: v_mul_hi_u32 v0, v5, v8 +; GFX1200-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1200-GISEL-NEXT: v_mad_co_u64_u32 v[1:2], null, v1, v4, v[2:3] +; GFX1200-GISEL-NEXT: v_mad_co_u64_u32 v[2:3], null, v5, v3, v[0:1] +; GFX1200-GISEL-NEXT: v_mul_lo_u32 v0, v5, v8 ; GFX1200-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) -; GFX1200-GISEL-NEXT: v_add3_u32 v1, v1, v3, v4 +; GFX1200-GISEL-NEXT: v_mad_co_u64_u32 v[1:2], null, v1, v8, v[2:3] ; GFX1200-GISEL-NEXT: s_setpc_b64 s[30:31] ; ; GFX1250-SDAG-LABEL: clpeak_imad_pat_i64: @@ -7085,73 +7080,73 @@ define <2 x i64> @clpeak_imad_pat_v2i64(<2 x i64> %x, <2 x i64> %y) { ; GFX1200-GISEL-NEXT: s_wait_samplecnt 0x0 ; GFX1200-GISEL-NEXT: s_wait_bvhcnt 0x0 ; GFX1200-GISEL-NEXT: s_wait_kmcnt 0x0 -; GFX1200-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, v0, 1 +; GFX1200-GISEL-NEXT: v_add_co_u32 v8, vcc_lo, v0, 1 +; GFX1200-GISEL-NEXT: v_add_co_u32 v9, s0, v2, 1 ; GFX1200-GISEL-NEXT: s_wait_alu depctr_va_vcc(0) -; GFX1200-GISEL-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo -; GFX1200-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, v2, 1 +; GFX1200-GISEL-NEXT: v_add_co_ci_u32_e64 v10, null, 0, v1, vcc_lo +; GFX1200-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX1200-GISEL-NEXT: v_mul_hi_u32 v0, v8, v4 +; GFX1200-GISEL-NEXT: v_mul_hi_u32 v1, v9, v6 +; GFX1200-GISEL-NEXT: s_wait_alu depctr_va_sdst(0) +; GFX1200-GISEL-NEXT: v_add_co_ci_u32_e64 v11, null, 0, v3, s0 +; GFX1200-GISEL-NEXT: v_mul_lo_u32 v12, v8, v4 +; GFX1200-GISEL-NEXT: v_mul_lo_u32 v13, v9, v6 +; GFX1200-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX1200-GISEL-NEXT: v_mad_co_u64_u32 v[2:3], null, v8, v5, v[0:1] +; GFX1200-GISEL-NEXT: v_add_co_u32 v14, vcc_lo, v12, v8 +; GFX1200-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX1200-GISEL-NEXT: v_add_co_u32 v15, s0, v13, v9 +; GFX1200-GISEL-NEXT: v_mad_co_u64_u32 v[0:1], null, v9, v7, v[1:2] +; GFX1200-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1200-GISEL-NEXT: v_mad_co_u64_u32 v[1:2], null, v10, v4, v[2:3] +; GFX1200-GISEL-NEXT: v_mad_co_u64_u32 v[2:3], null, v11, v6, v[0:1] +; GFX1200-GISEL-NEXT: v_mul_hi_u32 v0, v14, v4 +; GFX1200-GISEL-NEXT: v_mul_hi_u32 v3, v15, v6 ; GFX1200-GISEL-NEXT: s_wait_alu depctr_va_vcc(0) -; GFX1200-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo -; GFX1200-GISEL-NEXT: v_mul_hi_u32 v8, v0, v4 -; GFX1200-GISEL-NEXT: v_mul_lo_u32 v9, v0, v5 -; GFX1200-GISEL-NEXT: v_mul_lo_u32 v10, v1, v4 -; GFX1200-GISEL-NEXT: v_mul_hi_u32 v11, v2, v6 -; GFX1200-GISEL-NEXT: v_mul_lo_u32 v12, v2, v7 -; GFX1200-GISEL-NEXT: v_mul_lo_u32 v13, v3, v6 -; GFX1200-GISEL-NEXT: v_mul_lo_u32 v14, v0, v4 -; GFX1200-GISEL-NEXT: v_mul_lo_u32 v15, v2, v6 -; GFX1200-GISEL-NEXT: v_add3_u32 v8, v9, v10, v8 +; GFX1200-GISEL-NEXT: v_add_co_ci_u32_e64 v16, null, v1, v10, vcc_lo +; GFX1200-GISEL-NEXT: s_wait_alu depctr_va_sdst(0) ; GFX1200-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) -; GFX1200-GISEL-NEXT: v_add3_u32 v9, v12, v13, v11 -; GFX1200-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, v14, v0 -; GFX1200-GISEL-NEXT: s_wait_alu depctr_va_vcc(0) -; GFX1200-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_3) -; GFX1200-GISEL-NEXT: v_add_co_ci_u32_e64 v1, null, v8, v1, vcc_lo -; GFX1200-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, v15, v2 -; GFX1200-GISEL-NEXT: s_wait_alu depctr_va_vcc(0) -; GFX1200-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, v9, v3, vcc_lo -; GFX1200-GISEL-NEXT: v_mul_hi_u32 v10, v0, v4 -; GFX1200-GISEL-NEXT: v_mul_lo_u32 v5, v0, v5 -; GFX1200-GISEL-NEXT: v_mul_lo_u32 v1, v1, v4 -; GFX1200-GISEL-NEXT: v_mul_hi_u32 v11, v2, v6 -; GFX1200-GISEL-NEXT: v_mul_lo_u32 v7, v2, v7 -; GFX1200-GISEL-NEXT: v_mul_lo_u32 v3, v3, v6 -; GFX1200-GISEL-NEXT: v_mul_lo_u32 v0, v0, v4 -; GFX1200-GISEL-NEXT: v_add_co_u32 v4, vcc_lo, v14, 1 -; GFX1200-GISEL-NEXT: v_mul_lo_u32 v2, v2, v6 -; GFX1200-GISEL-NEXT: v_add3_u32 v1, v5, v1, v10 +; GFX1200-GISEL-NEXT: v_add_co_ci_u32_e64 v11, null, v2, v11, s0 +; GFX1200-GISEL-NEXT: v_mad_co_u64_u32 v[8:9], null, v14, v5, v[0:1] +; GFX1200-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_3) +; GFX1200-GISEL-NEXT: v_mad_co_u64_u32 v[9:10], null, v15, v7, v[3:4] +; GFX1200-GISEL-NEXT: v_mul_lo_u32 v10, v15, v6 +; GFX1200-GISEL-NEXT: v_mul_lo_u32 v7, v14, v4 +; GFX1200-GISEL-NEXT: v_mad_co_u64_u32 v[3:4], null, v16, v4, v[8:9] +; GFX1200-GISEL-NEXT: v_add_co_u32 v8, vcc_lo, v12, 1 +; GFX1200-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_4) +; GFX1200-GISEL-NEXT: v_mad_co_u64_u32 v[4:5], null, v11, v6, v[9:10] ; GFX1200-GISEL-NEXT: s_wait_alu depctr_va_vcc(0) -; GFX1200-GISEL-NEXT: v_add_co_ci_u32_e64 v5, null, 0, v8, vcc_lo -; GFX1200-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, v15, 1 -; GFX1200-GISEL-NEXT: v_add3_u32 v3, v7, v3, v11 +; GFX1200-GISEL-NEXT: v_add_co_ci_u32_e64 v5, null, 0, v1, vcc_lo +; GFX1200-GISEL-NEXT: v_add_co_u32 v9, vcc_lo, v13, 1 +; GFX1200-GISEL-NEXT: v_mul_hi_u32 v0, v7, v8 ; GFX1200-GISEL-NEXT: s_wait_alu depctr_va_vcc(0) -; GFX1200-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v9, vcc_lo -; GFX1200-GISEL-NEXT: v_mul_hi_u32 v8, v0, v4 -; GFX1200-GISEL-NEXT: v_mul_lo_u32 v5, v0, v5 -; GFX1200-GISEL-NEXT: v_mul_lo_u32 v9, v1, v4 -; GFX1200-GISEL-NEXT: v_mul_hi_u32 v11, v2, v6 -; GFX1200-GISEL-NEXT: v_mul_lo_u32 v7, v2, v7 -; GFX1200-GISEL-NEXT: v_mul_lo_u32 v12, v3, v6 -; GFX1200-GISEL-NEXT: v_add_co_u32 v10, vcc_lo, v0, 1 -; GFX1200-GISEL-NEXT: v_mul_lo_u32 v0, v0, v4 +; GFX1200-GISEL-NEXT: v_add_co_ci_u32_e64 v2, null, 0, v2, vcc_lo +; GFX1200-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_3) +; GFX1200-GISEL-NEXT: v_mul_hi_u32 v1, v10, v9 +; GFX1200-GISEL-NEXT: v_mul_lo_u32 v15, v10, v9 +; GFX1200-GISEL-NEXT: v_add_co_u32 v12, vcc_lo, v7, 1 ; GFX1200-GISEL-NEXT: s_wait_alu depctr_va_vcc(0) -; GFX1200-GISEL-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo -; GFX1200-GISEL-NEXT: v_add3_u32 v4, v5, v9, v8 -; GFX1200-GISEL-NEXT: v_mul_lo_u32 v5, v2, v6 -; GFX1200-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, v2, 1 +; GFX1200-GISEL-NEXT: v_add_co_ci_u32_e64 v13, null, 0, v3, vcc_lo +; GFX1200-GISEL-NEXT: v_add_co_u32 v14, vcc_lo, v10, 1 +; GFX1200-GISEL-NEXT: v_mul_lo_u32 v11, v7, v8 +; GFX1200-GISEL-NEXT: v_mad_co_u64_u32 v[5:6], null, v7, v5, v[0:1] +; GFX1200-GISEL-NEXT: v_mad_co_u64_u32 v[1:2], null, v10, v2, v[1:2] +; GFX1200-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_4) +; GFX1200-GISEL-NEXT: v_mul_hi_u32 v2, v15, v14 ; GFX1200-GISEL-NEXT: s_wait_alu depctr_va_vcc(0) -; GFX1200-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo -; GFX1200-GISEL-NEXT: v_add3_u32 v6, v7, v12, v11 -; GFX1200-GISEL-NEXT: v_mul_hi_u32 v7, v0, v10 -; GFX1200-GISEL-NEXT: v_mul_lo_u32 v1, v0, v1 -; GFX1200-GISEL-NEXT: v_mul_lo_u32 v4, v4, v10 -; GFX1200-GISEL-NEXT: v_mul_hi_u32 v8, v5, v2 -; GFX1200-GISEL-NEXT: v_mul_lo_u32 v3, v5, v3 -; GFX1200-GISEL-NEXT: v_mul_lo_u32 v6, v6, v2 -; GFX1200-GISEL-NEXT: v_mul_lo_u32 v0, v0, v10 -; GFX1200-GISEL-NEXT: v_mul_lo_u32 v2, v5, v2 -; GFX1200-GISEL-NEXT: v_add3_u32 v1, v1, v4, v7 -; GFX1200-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_4) -; GFX1200-GISEL-NEXT: v_add3_u32 v3, v3, v6, v8 +; GFX1200-GISEL-NEXT: v_add_co_ci_u32_e64 v10, null, 0, v4, vcc_lo +; GFX1200-GISEL-NEXT: v_mul_hi_u32 v0, v11, v12 +; GFX1200-GISEL-NEXT: v_mad_co_u64_u32 v[5:6], null, v3, v8, v[5:6] +; GFX1200-GISEL-NEXT: v_mad_co_u64_u32 v[3:4], null, v4, v9, v[1:2] +; GFX1200-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3) +; GFX1200-GISEL-NEXT: v_mad_co_u64_u32 v[6:7], null, v11, v13, v[0:1] +; GFX1200-GISEL-NEXT: v_mul_lo_u32 v0, v11, v12 +; GFX1200-GISEL-NEXT: v_mad_co_u64_u32 v[7:8], null, v15, v10, v[2:3] +; GFX1200-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3) +; GFX1200-GISEL-NEXT: v_mad_co_u64_u32 v[1:2], null, v5, v12, v[6:7] +; GFX1200-GISEL-NEXT: v_mul_lo_u32 v2, v15, v14 +; GFX1200-GISEL-NEXT: v_mad_co_u64_u32 v[3:4], null, v3, v14, v[7:8] ; GFX1200-GISEL-NEXT: s_setpc_b64 s[30:31] ; ; GFX1250-SDAG-LABEL: clpeak_imad_pat_v2i64: diff --git a/llvm/test/CodeGen/AMDGPU/vector-reduce-mul.ll b/llvm/test/CodeGen/AMDGPU/vector-reduce-mul.ll index 98c5473c55f1f..d707ab797f66c 100644 --- a/llvm/test/CodeGen/AMDGPU/vector-reduce-mul.ll +++ b/llvm/test/CodeGen/AMDGPU/vector-reduce-mul.ll @@ -1,21 +1,21 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 ; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx700 < %s | FileCheck -check-prefixes=GFX7,GFX7-SDAG %s -; RUN: llc -global-isel=1 -new-reg-bank-select -mtriple=amdgcn -mcpu=gfx700 < %s | FileCheck -check-prefixes=GFX7,GFX7-GISEL %s +; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx700 < %s | FileCheck -check-prefixes=GFX7,GFX7-GISEL %s ; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx801 < %s | FileCheck -check-prefixes=GFX8,GFX8-SDAG %s -; RUN: llc -global-isel=1 -new-reg-bank-select -mtriple=amdgcn -mcpu=gfx801 < %s | FileCheck -check-prefixes=GFX8,GFX8-GISEL %s +; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx801 < %s | FileCheck -check-prefixes=GFX8,GFX8-GISEL %s ; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx942 < %s | FileCheck -check-prefixes=GFX9,GFX9-SDAG %s -; RUN: llc -global-isel=1 -new-reg-bank-select -mtriple=amdgcn -mcpu=gfx942 < %s | FileCheck -check-prefixes=GFX9,GFX9-GISEL %s +; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx942 < %s | FileCheck -check-prefixes=GFX9,GFX9-GISEL %s ; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1010 < %s | FileCheck -check-prefixes=GFX10,GFX10-SDAG %s -; RUN: llc -global-isel=1 -new-reg-bank-select -mtriple=amdgcn -mcpu=gfx1010 < %s | FileCheck -check-prefixes=GFX10,GFX10-GISEL %s +; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1010 < %s | FileCheck -check-prefixes=GFX10,GFX10-GISEL %s ; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck -check-prefixes=GFX11,GFX11-SDAG,GFX11-SDAG-TRUE16 %s ; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck -check-prefixes=GFX11,GFX11-SDAG,GFX11-SDAG-FAKE16 %s ; FIXME-TRUE16. enable gisel -; XUN: llc -global-isel=1 -new-reg-bank-select -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck -check-prefixes=GFX11,GFX11-GISEL,GFX11-GISEL-TRUE16 %s -; RUN: llc -global-isel=1 -new-reg-bank-select -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck -check-prefixes=GFX11,GFX11-GISEL,GFX11-GISEL-FAKE16 %s +; XUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck -check-prefixes=GFX11,GFX11-GISEL,GFX11-GISEL-TRUE16 %s +; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck -check-prefixes=GFX11,GFX11-GISEL,GFX11-GISEL-FAKE16 %s ; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1200 -mattr=+real-true16 < %s | FileCheck -check-prefixes=GFX12,GFX12-SDAG,GFX12-SDAG-TRUE16 %s ; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1200 -mattr=-real-true16 < %s | FileCheck -check-prefixes=GFX12,GFX12-SDAG,GFX12-SDAG-FAKE16 %s -; XUN: llc -global-isel=1 -new-reg-bank-select -mtriple=amdgcn -mcpu=gfx1200 -mattr=+real-true16 < %s | FileCheck -check-prefixes=GFX12,GFX12-GISEL,GFX12-GISEL-TRUE16 %s -; RUN: llc -global-isel=1 -new-reg-bank-select -mtriple=amdgcn -mcpu=gfx1200 -mattr=-real-true16 < %s | FileCheck -check-prefixes=GFX12,GFX12-GISEL,GFX12-GISEL-FAKE16 %s +; XUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1200 -mattr=+real-true16 < %s | FileCheck -check-prefixes=GFX12,GFX12-GISEL,GFX12-GISEL-TRUE16 %s +; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1200 -mattr=-real-true16 < %s | FileCheck -check-prefixes=GFX12,GFX12-GISEL,GFX12-GISEL-FAKE16 %s define i8 @test_vector_reduce_mul_v2i8(<2 x i8> %v) { ; GFX7-SDAG-LABEL: test_vector_reduce_mul_v2i8: @@ -991,19 +991,33 @@ define i16 @test_vector_reduce_mul_v2i16(<2 x i16> %v) { ; GFX8-NEXT: v_mul_lo_u16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 ; GFX8-NEXT: s_setpc_b64 s[30:31] ; -; GFX9-LABEL: test_vector_reduce_mul_v2i16: -; GFX9: ; %bb.0: ; %entry -; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-NEXT: v_alignbit_b32 v1, s0, v0, 16 -; GFX9-NEXT: v_pk_mul_lo_u16 v0, v0, v1 -; GFX9-NEXT: s_setpc_b64 s[30:31] +; GFX9-SDAG-LABEL: test_vector_reduce_mul_v2i16: +; GFX9-SDAG: ; %bb.0: ; %entry +; GFX9-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-SDAG-NEXT: v_alignbit_b32 v1, s0, v0, 16 +; GFX9-SDAG-NEXT: v_pk_mul_lo_u16 v0, v0, v1 +; GFX9-SDAG-NEXT: s_setpc_b64 s[30:31] ; -; GFX10-LABEL: test_vector_reduce_mul_v2i16: -; GFX10: ; %bb.0: ; %entry -; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX10-NEXT: v_alignbit_b32 v1, s4, v0, 16 -; GFX10-NEXT: v_pk_mul_lo_u16 v0, v0, v1 -; GFX10-NEXT: s_setpc_b64 s[30:31] +; GFX9-GISEL-LABEL: test_vector_reduce_mul_v2i16: +; GFX9-GISEL: ; %bb.0: ; %entry +; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-GISEL-NEXT: v_lshrrev_b32_e32 v1, 16, v0 +; GFX9-GISEL-NEXT: v_pk_mul_lo_u16 v0, v0, v1 +; GFX9-GISEL-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-SDAG-LABEL: test_vector_reduce_mul_v2i16: +; GFX10-SDAG: ; %bb.0: ; %entry +; GFX10-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-SDAG-NEXT: v_alignbit_b32 v1, s4, v0, 16 +; GFX10-SDAG-NEXT: v_pk_mul_lo_u16 v0, v0, v1 +; GFX10-SDAG-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-GISEL-LABEL: test_vector_reduce_mul_v2i16: +; GFX10-GISEL: ; %bb.0: ; %entry +; GFX10-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-GISEL-NEXT: v_lshrrev_b32_e32 v1, 16, v0 +; GFX10-GISEL-NEXT: v_pk_mul_lo_u16 v0, v0, v1 +; GFX10-GISEL-NEXT: s_setpc_b64 s[30:31] ; ; GFX11-SDAG-TRUE16-LABEL: test_vector_reduce_mul_v2i16: ; GFX11-SDAG-TRUE16: ; %bb.0: ; %entry @@ -1026,7 +1040,7 @@ define i16 @test_vector_reduce_mul_v2i16(<2 x i16> %v) { ; GFX11-GISEL-LABEL: test_vector_reduce_mul_v2i16: ; GFX11-GISEL: ; %bb.0: ; %entry ; GFX11-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-GISEL-NEXT: v_alignbit_b32 v1, s0, v0, 16 +; GFX11-GISEL-NEXT: v_lshrrev_b32_e32 v1, 16, v0 ; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX11-GISEL-NEXT: v_pk_mul_lo_u16 v0, v0, v1 ; GFX11-GISEL-NEXT: s_setpc_b64 s[30:31] @@ -1064,7 +1078,7 @@ define i16 @test_vector_reduce_mul_v2i16(<2 x i16> %v) { ; GFX12-GISEL-NEXT: s_wait_samplecnt 0x0 ; GFX12-GISEL-NEXT: s_wait_bvhcnt 0x0 ; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0 -; GFX12-GISEL-NEXT: v_alignbit_b32 v1, s0, v0, 16 +; GFX12-GISEL-NEXT: v_lshrrev_b32_e32 v1, 16, v0 ; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX12-GISEL-NEXT: v_pk_mul_lo_u16 v0, v0, v1 ; GFX12-GISEL-NEXT: s_setpc_b64 s[30:31] @@ -1247,22 +1261,39 @@ define i16 @test_vector_reduce_mul_v4i16(<4 x i16> %v) { ; GFX8-GISEL-NEXT: v_mul_lo_u16_e32 v0, v2, v0 ; GFX8-GISEL-NEXT: s_setpc_b64 s[30:31] ; -; GFX9-LABEL: test_vector_reduce_mul_v4i16: -; GFX9: ; %bb.0: ; %entry -; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-NEXT: v_pk_mul_lo_u16 v0, v0, v1 -; GFX9-NEXT: s_nop 0 -; GFX9-NEXT: v_alignbit_b32 v1, s0, v0, 16 -; GFX9-NEXT: v_pk_mul_lo_u16 v0, v0, v1 -; GFX9-NEXT: s_setpc_b64 s[30:31] +; GFX9-SDAG-LABEL: test_vector_reduce_mul_v4i16: +; GFX9-SDAG: ; %bb.0: ; %entry +; GFX9-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-SDAG-NEXT: v_pk_mul_lo_u16 v0, v0, v1 +; GFX9-SDAG-NEXT: s_nop 0 +; GFX9-SDAG-NEXT: v_alignbit_b32 v1, s0, v0, 16 +; GFX9-SDAG-NEXT: v_pk_mul_lo_u16 v0, v0, v1 +; GFX9-SDAG-NEXT: s_setpc_b64 s[30:31] ; -; GFX10-LABEL: test_vector_reduce_mul_v4i16: -; GFX10: ; %bb.0: ; %entry -; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX10-NEXT: v_pk_mul_lo_u16 v0, v0, v1 -; GFX10-NEXT: v_alignbit_b32 v1, s4, v0, 16 -; GFX10-NEXT: v_pk_mul_lo_u16 v0, v0, v1 -; GFX10-NEXT: s_setpc_b64 s[30:31] +; GFX9-GISEL-LABEL: test_vector_reduce_mul_v4i16: +; GFX9-GISEL: ; %bb.0: ; %entry +; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-GISEL-NEXT: v_pk_mul_lo_u16 v0, v0, v1 +; GFX9-GISEL-NEXT: s_nop 0 +; GFX9-GISEL-NEXT: v_lshrrev_b32_e32 v1, 16, v0 +; GFX9-GISEL-NEXT: v_pk_mul_lo_u16 v0, v0, v1 +; GFX9-GISEL-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-SDAG-LABEL: test_vector_reduce_mul_v4i16: +; GFX10-SDAG: ; %bb.0: ; %entry +; GFX10-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-SDAG-NEXT: v_pk_mul_lo_u16 v0, v0, v1 +; GFX10-SDAG-NEXT: v_alignbit_b32 v1, s4, v0, 16 +; GFX10-SDAG-NEXT: v_pk_mul_lo_u16 v0, v0, v1 +; GFX10-SDAG-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-GISEL-LABEL: test_vector_reduce_mul_v4i16: +; GFX10-GISEL: ; %bb.0: ; %entry +; GFX10-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-GISEL-NEXT: v_pk_mul_lo_u16 v0, v0, v1 +; GFX10-GISEL-NEXT: v_lshrrev_b32_e32 v1, 16, v0 +; GFX10-GISEL-NEXT: v_pk_mul_lo_u16 v0, v0, v1 +; GFX10-GISEL-NEXT: s_setpc_b64 s[30:31] ; ; GFX11-SDAG-TRUE16-LABEL: test_vector_reduce_mul_v4i16: ; GFX11-SDAG-TRUE16: ; %bb.0: ; %entry @@ -1290,7 +1321,7 @@ define i16 @test_vector_reduce_mul_v4i16(<4 x i16> %v) { ; GFX11-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-GISEL-NEXT: v_pk_mul_lo_u16 v0, v0, v1 ; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-GISEL-NEXT: v_alignbit_b32 v1, s0, v0, 16 +; GFX11-GISEL-NEXT: v_lshrrev_b32_e32 v1, 16, v0 ; GFX11-GISEL-NEXT: v_pk_mul_lo_u16 v0, v0, v1 ; GFX11-GISEL-NEXT: s_setpc_b64 s[30:31] ; @@ -1332,7 +1363,7 @@ define i16 @test_vector_reduce_mul_v4i16(<4 x i16> %v) { ; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0 ; GFX12-GISEL-NEXT: v_pk_mul_lo_u16 v0, v0, v1 ; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-GISEL-NEXT: v_alignbit_b32 v1, s0, v0, 16 +; GFX12-GISEL-NEXT: v_lshrrev_b32_e32 v1, 16, v0 ; GFX12-GISEL-NEXT: v_pk_mul_lo_u16 v0, v0, v1 ; GFX12-GISEL-NEXT: s_setpc_b64 s[30:31] entry: @@ -1419,7 +1450,7 @@ define i16 @test_vector_reduce_mul_v8i16(<8 x i16> %v) { ; GFX9-GISEL-NEXT: s_nop 0 ; GFX9-GISEL-NEXT: v_pk_mul_lo_u16 v0, v0, v1 ; GFX9-GISEL-NEXT: s_nop 0 -; GFX9-GISEL-NEXT: v_alignbit_b32 v1, s0, v0, 16 +; GFX9-GISEL-NEXT: v_lshrrev_b32_e32 v1, 16, v0 ; GFX9-GISEL-NEXT: v_pk_mul_lo_u16 v0, v0, v1 ; GFX9-GISEL-NEXT: s_setpc_b64 s[30:31] ; @@ -1439,7 +1470,7 @@ define i16 @test_vector_reduce_mul_v8i16(<8 x i16> %v) { ; GFX10-GISEL-NEXT: v_pk_mul_lo_u16 v0, v0, v2 ; GFX10-GISEL-NEXT: v_pk_mul_lo_u16 v1, v1, v3 ; GFX10-GISEL-NEXT: v_pk_mul_lo_u16 v0, v0, v1 -; GFX10-GISEL-NEXT: v_alignbit_b32 v1, s4, v0, 16 +; GFX10-GISEL-NEXT: v_lshrrev_b32_e32 v1, 16, v0 ; GFX10-GISEL-NEXT: v_pk_mul_lo_u16 v0, v0, v1 ; GFX10-GISEL-NEXT: s_setpc_b64 s[30:31] ; @@ -1476,7 +1507,7 @@ define i16 @test_vector_reduce_mul_v8i16(<8 x i16> %v) { ; GFX11-GISEL-NEXT: v_pk_mul_lo_u16 v1, v1, v3 ; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX11-GISEL-NEXT: v_pk_mul_lo_u16 v0, v0, v1 -; GFX11-GISEL-NEXT: v_alignbit_b32 v1, s0, v0, 16 +; GFX11-GISEL-NEXT: v_lshrrev_b32_e32 v1, 16, v0 ; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX11-GISEL-NEXT: v_pk_mul_lo_u16 v0, v0, v1 ; GFX11-GISEL-NEXT: s_setpc_b64 s[30:31] @@ -1526,7 +1557,7 @@ define i16 @test_vector_reduce_mul_v8i16(<8 x i16> %v) { ; GFX12-GISEL-NEXT: v_pk_mul_lo_u16 v1, v1, v3 ; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX12-GISEL-NEXT: v_pk_mul_lo_u16 v0, v0, v1 -; GFX12-GISEL-NEXT: v_alignbit_b32 v1, s0, v0, 16 +; GFX12-GISEL-NEXT: v_lshrrev_b32_e32 v1, 16, v0 ; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX12-GISEL-NEXT: v_pk_mul_lo_u16 v0, v0, v1 ; GFX12-GISEL-NEXT: s_setpc_b64 s[30:31] @@ -1662,7 +1693,7 @@ define i16 @test_vector_reduce_mul_v16i16(<16 x i16> %v) { ; GFX9-GISEL-NEXT: s_nop 0 ; GFX9-GISEL-NEXT: v_pk_mul_lo_u16 v0, v0, v1 ; GFX9-GISEL-NEXT: s_nop 0 -; GFX9-GISEL-NEXT: v_alignbit_b32 v1, s0, v0, 16 +; GFX9-GISEL-NEXT: v_lshrrev_b32_e32 v1, 16, v0 ; GFX9-GISEL-NEXT: v_pk_mul_lo_u16 v0, v0, v1 ; GFX9-GISEL-NEXT: s_setpc_b64 s[30:31] ; @@ -1690,7 +1721,7 @@ define i16 @test_vector_reduce_mul_v16i16(<16 x i16> %v) { ; GFX10-GISEL-NEXT: v_pk_mul_lo_u16 v0, v0, v2 ; GFX10-GISEL-NEXT: v_pk_mul_lo_u16 v1, v1, v3 ; GFX10-GISEL-NEXT: v_pk_mul_lo_u16 v0, v0, v1 -; GFX10-GISEL-NEXT: v_alignbit_b32 v1, s4, v0, 16 +; GFX10-GISEL-NEXT: v_lshrrev_b32_e32 v1, 16, v0 ; GFX10-GISEL-NEXT: v_pk_mul_lo_u16 v0, v0, v1 ; GFX10-GISEL-NEXT: s_setpc_b64 s[30:31] ; @@ -1742,7 +1773,7 @@ define i16 @test_vector_reduce_mul_v16i16(<16 x i16> %v) { ; GFX11-GISEL-NEXT: v_pk_mul_lo_u16 v1, v1, v3 ; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX11-GISEL-NEXT: v_pk_mul_lo_u16 v0, v0, v1 -; GFX11-GISEL-NEXT: v_alignbit_b32 v1, s0, v0, 16 +; GFX11-GISEL-NEXT: v_lshrrev_b32_e32 v1, 16, v0 ; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX11-GISEL-NEXT: v_pk_mul_lo_u16 v0, v0, v1 ; GFX11-GISEL-NEXT: s_setpc_b64 s[30:31] @@ -1807,7 +1838,7 @@ define i16 @test_vector_reduce_mul_v16i16(<16 x i16> %v) { ; GFX12-GISEL-NEXT: v_pk_mul_lo_u16 v1, v1, v3 ; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX12-GISEL-NEXT: v_pk_mul_lo_u16 v0, v0, v1 -; GFX12-GISEL-NEXT: v_alignbit_b32 v1, s0, v0, 16 +; GFX12-GISEL-NEXT: v_lshrrev_b32_e32 v1, 16, v0 ; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX12-GISEL-NEXT: v_pk_mul_lo_u16 v0, v0, v1 ; GFX12-GISEL-NEXT: s_setpc_b64 s[30:31] @@ -2586,11 +2617,10 @@ define i64 @test_vector_reduce_mul_v2i64(<2 x i64> %v) { ; GFX12-GISEL-NEXT: s_wait_bvhcnt 0x0 ; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0 ; GFX12-GISEL-NEXT: v_mul_hi_u32 v4, v0, v2 -; GFX12-GISEL-NEXT: v_mul_lo_u32 v3, v0, v3 -; GFX12-GISEL-NEXT: v_mul_lo_u32 v1, v1, v2 +; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) +; GFX12-GISEL-NEXT: v_mad_co_u64_u32 v[3:4], null, v0, v3, v[4:5] ; GFX12-GISEL-NEXT: v_mul_lo_u32 v0, v0, v2 -; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) -; GFX12-GISEL-NEXT: v_add3_u32 v1, v3, v1, v4 +; GFX12-GISEL-NEXT: v_mad_co_u64_u32 v[1:2], null, v1, v2, v[3:4] ; GFX12-GISEL-NEXT: s_setpc_b64 s[30:31] entry: %res = call i64 @llvm.vector.reduce.mul.v2i64(<2 x i64> %v) @@ -2760,17 +2790,16 @@ define i64 @test_vector_reduce_mul_v3i64(<3 x i64> %v) { ; GFX12-GISEL-NEXT: s_wait_bvhcnt 0x0 ; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0 ; GFX12-GISEL-NEXT: v_mul_hi_u32 v6, v0, v2 -; GFX12-GISEL-NEXT: v_mul_lo_u32 v3, v0, v3 -; GFX12-GISEL-NEXT: v_mul_lo_u32 v1, v1, v2 -; GFX12-GISEL-NEXT: v_mul_lo_u32 v0, v0, v2 +; GFX12-GISEL-NEXT: v_mul_lo_u32 v8, v0, v2 ; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX12-GISEL-NEXT: v_add3_u32 v1, v3, v1, v6 -; GFX12-GISEL-NEXT: v_mul_hi_u32 v2, v0, v4 -; GFX12-GISEL-NEXT: v_mul_lo_u32 v3, v0, v5 -; GFX12-GISEL-NEXT: v_mul_lo_u32 v0, v0, v4 -; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-GISEL-NEXT: v_mul_lo_u32 v1, v1, v4 -; GFX12-GISEL-NEXT: v_add3_u32 v1, v3, v1, v2 +; GFX12-GISEL-NEXT: v_mad_co_u64_u32 v[6:7], null, v0, v3, v[6:7] +; GFX12-GISEL-NEXT: v_mul_hi_u32 v0, v8, v4 +; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX12-GISEL-NEXT: v_mad_co_u64_u32 v[1:2], null, v1, v2, v[6:7] +; GFX12-GISEL-NEXT: v_mad_co_u64_u32 v[2:3], null, v8, v5, v[0:1] +; GFX12-GISEL-NEXT: v_mul_lo_u32 v0, v8, v4 +; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX12-GISEL-NEXT: v_mad_co_u64_u32 v[1:2], null, v1, v4, v[2:3] ; GFX12-GISEL-NEXT: s_setpc_b64 s[30:31] entry: %res = call i64 @llvm.vector.reduce.mul.v3i64(<3 x i64> %v) @@ -2983,24 +3012,22 @@ define i64 @test_vector_reduce_mul_v4i64(<4 x i64> %v) { ; GFX12-GISEL-NEXT: s_wait_samplecnt 0x0 ; GFX12-GISEL-NEXT: s_wait_bvhcnt 0x0 ; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0 -; GFX12-GISEL-NEXT: v_mul_hi_u32 v8, v0, v4 -; GFX12-GISEL-NEXT: v_mul_lo_u32 v5, v0, v5 -; GFX12-GISEL-NEXT: v_mul_hi_u32 v9, v2, v6 -; GFX12-GISEL-NEXT: v_mul_lo_u32 v7, v2, v7 -; GFX12-GISEL-NEXT: v_mul_lo_u32 v3, v3, v6 -; GFX12-GISEL-NEXT: v_mul_lo_u32 v1, v1, v4 -; GFX12-GISEL-NEXT: v_mul_lo_u32 v0, v0, v4 -; GFX12-GISEL-NEXT: v_mul_lo_u32 v2, v2, v6 -; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) -; GFX12-GISEL-NEXT: v_add3_u32 v3, v7, v3, v9 -; GFX12-GISEL-NEXT: v_add3_u32 v1, v5, v1, v8 +; GFX12-GISEL-NEXT: v_mul_hi_u32 v8, v2, v6 +; GFX12-GISEL-NEXT: v_mul_hi_u32 v9, v0, v4 +; GFX12-GISEL-NEXT: v_mul_lo_u32 v10, v0, v4 +; GFX12-GISEL-NEXT: v_mul_lo_u32 v11, v2, v6 ; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX12-GISEL-NEXT: v_mul_hi_u32 v4, v0, v2 -; GFX12-GISEL-NEXT: v_mul_lo_u32 v3, v0, v3 -; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2) -; GFX12-GISEL-NEXT: v_mul_lo_u32 v1, v1, v2 -; GFX12-GISEL-NEXT: v_mul_lo_u32 v0, v0, v2 -; GFX12-GISEL-NEXT: v_add3_u32 v1, v3, v1, v4 +; GFX12-GISEL-NEXT: v_mad_co_u64_u32 v[7:8], null, v2, v7, v[8:9] +; GFX12-GISEL-NEXT: v_mad_co_u64_u32 v[8:9], null, v0, v5, v[9:10] +; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX12-GISEL-NEXT: v_mul_hi_u32 v0, v10, v11 +; GFX12-GISEL-NEXT: v_mad_co_u64_u32 v[2:3], null, v3, v6, v[7:8] +; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX12-GISEL-NEXT: v_mad_co_u64_u32 v[3:4], null, v1, v4, v[8:9] +; GFX12-GISEL-NEXT: v_mad_co_u64_u32 v[1:2], null, v10, v2, v[0:1] +; GFX12-GISEL-NEXT: v_mul_lo_u32 v0, v10, v11 +; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX12-GISEL-NEXT: v_mad_co_u64_u32 v[1:2], null, v3, v11, v[1:2] ; GFX12-GISEL-NEXT: s_setpc_b64 s[30:31] entry: %res = call i64 @llvm.vector.reduce.mul.v4i64(<4 x i64> %v) @@ -3385,44 +3412,40 @@ define i64 @test_vector_reduce_mul_v8i64(<8 x i64> %v) { ; GFX12-GISEL-NEXT: s_wait_samplecnt 0x0 ; GFX12-GISEL-NEXT: s_wait_bvhcnt 0x0 ; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0 -; GFX12-GISEL-NEXT: v_mul_lo_u32 v16, v0, v8 -; GFX12-GISEL-NEXT: v_mul_hi_u32 v17, v0, v8 -; GFX12-GISEL-NEXT: v_mul_lo_u32 v0, v0, v9 -; GFX12-GISEL-NEXT: v_mul_lo_u32 v1, v1, v8 -; GFX12-GISEL-NEXT: v_mul_hi_u32 v8, v2, v10 -; GFX12-GISEL-NEXT: v_mul_lo_u32 v9, v2, v11 -; GFX12-GISEL-NEXT: v_mul_hi_u32 v11, v4, v12 -; GFX12-GISEL-NEXT: v_mul_lo_u32 v13, v4, v13 -; GFX12-GISEL-NEXT: v_mul_lo_u32 v5, v5, v12 -; GFX12-GISEL-NEXT: v_mul_hi_u32 v18, v6, v14 -; GFX12-GISEL-NEXT: v_mul_lo_u32 v15, v6, v15 -; GFX12-GISEL-NEXT: v_mul_lo_u32 v7, v7, v14 -; GFX12-GISEL-NEXT: v_mul_lo_u32 v3, v3, v10 -; GFX12-GISEL-NEXT: v_mul_lo_u32 v4, v4, v12 -; GFX12-GISEL-NEXT: v_mul_lo_u32 v2, v2, v10 -; GFX12-GISEL-NEXT: v_mul_lo_u32 v6, v6, v14 -; GFX12-GISEL-NEXT: v_add3_u32 v5, v13, v5, v11 -; GFX12-GISEL-NEXT: v_add3_u32 v0, v0, v1, v17 -; GFX12-GISEL-NEXT: v_add3_u32 v1, v15, v7, v18 -; GFX12-GISEL-NEXT: v_add3_u32 v3, v9, v3, v8 -; GFX12-GISEL-NEXT: v_mul_hi_u32 v7, v16, v4 -; GFX12-GISEL-NEXT: v_mul_lo_u32 v5, v16, v5 -; GFX12-GISEL-NEXT: v_mul_hi_u32 v8, v2, v6 -; GFX12-GISEL-NEXT: v_mul_lo_u32 v1, v2, v1 -; GFX12-GISEL-NEXT: v_mul_lo_u32 v3, v3, v6 -; GFX12-GISEL-NEXT: v_mul_lo_u32 v0, v0, v4 -; GFX12-GISEL-NEXT: v_mul_lo_u32 v4, v16, v4 -; GFX12-GISEL-NEXT: v_mul_lo_u32 v2, v2, v6 -; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) -; GFX12-GISEL-NEXT: v_add3_u32 v1, v1, v3, v8 -; GFX12-GISEL-NEXT: v_add3_u32 v0, v5, v0, v7 +; GFX12-GISEL-NEXT: v_mul_hi_u32 v16, v0, v8 +; GFX12-GISEL-NEXT: v_mul_hi_u32 v17, v6, v14 +; GFX12-GISEL-NEXT: v_mul_lo_u32 v21, v0, v8 +; GFX12-GISEL-NEXT: v_mul_lo_u32 v22, v2, v10 +; GFX12-GISEL-NEXT: v_mul_lo_u32 v23, v6, v14 +; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_3) +; GFX12-GISEL-NEXT: v_mad_co_u64_u32 v[18:19], null, v0, v9, v[16:17] +; GFX12-GISEL-NEXT: v_mul_hi_u32 v9, v2, v10 +; GFX12-GISEL-NEXT: v_mul_hi_u32 v0, v4, v12 +; GFX12-GISEL-NEXT: v_mad_co_u64_u32 v[15:16], null, v6, v15, v[17:18] +; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX12-GISEL-NEXT: v_mad_co_u64_u32 v[16:17], null, v2, v11, v[9:10] +; GFX12-GISEL-NEXT: v_mad_co_u64_u32 v[19:20], null, v4, v13, v[0:1] +; GFX12-GISEL-NEXT: v_mul_lo_u32 v9, v4, v12 +; GFX12-GISEL-NEXT: v_mul_hi_u32 v0, v22, v23 +; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_4) +; GFX12-GISEL-NEXT: v_mad_co_u64_u32 v[6:7], null, v7, v14, v[15:16] +; GFX12-GISEL-NEXT: v_mad_co_u64_u32 v[2:3], null, v3, v10, v[16:17] +; GFX12-GISEL-NEXT: v_mad_co_u64_u32 v[3:4], null, v5, v12, v[19:20] +; GFX12-GISEL-NEXT: v_mul_hi_u32 v4, v21, v9 +; GFX12-GISEL-NEXT: v_mul_lo_u32 v7, v21, v9 +; GFX12-GISEL-NEXT: v_mad_co_u64_u32 v[5:6], null, v22, v6, v[0:1] +; GFX12-GISEL-NEXT: v_mul_lo_u32 v6, v22, v23 +; GFX12-GISEL-NEXT: v_mad_co_u64_u32 v[0:1], null, v1, v8, v[18:19] ; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX12-GISEL-NEXT: v_mul_hi_u32 v3, v4, v2 -; GFX12-GISEL-NEXT: v_mul_lo_u32 v1, v4, v1 +; GFX12-GISEL-NEXT: v_mad_co_u64_u32 v[3:4], null, v21, v3, v[4:5] +; GFX12-GISEL-NEXT: v_mad_co_u64_u32 v[1:2], null, v2, v23, v[5:6] +; GFX12-GISEL-NEXT: v_mul_hi_u32 v2, v7, v6 ; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2) -; GFX12-GISEL-NEXT: v_mul_lo_u32 v5, v0, v2 -; GFX12-GISEL-NEXT: v_mul_lo_u32 v0, v4, v2 -; GFX12-GISEL-NEXT: v_add3_u32 v1, v1, v5, v3 +; GFX12-GISEL-NEXT: v_mad_co_u64_u32 v[3:4], null, v0, v9, v[3:4] +; GFX12-GISEL-NEXT: v_mul_lo_u32 v0, v7, v6 +; GFX12-GISEL-NEXT: v_mad_co_u64_u32 v[1:2], null, v7, v1, v[2:3] +; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX12-GISEL-NEXT: v_mad_co_u64_u32 v[1:2], null, v3, v6, v[1:2] ; GFX12-GISEL-NEXT: s_setpc_b64 s[30:31] entry: %res = call i64 @llvm.vector.reduce.mul.v8i64(<8 x i64> %v) @@ -4171,90 +4194,75 @@ define i64 @test_vector_reduce_mul_v16i64(<16 x i64> %v) { ; GFX12-GISEL-NEXT: s_wait_samplecnt 0x0 ; GFX12-GISEL-NEXT: s_wait_bvhcnt 0x0 ; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0 -; GFX12-GISEL-NEXT: scratch_load_b32 v31, off, s32 -; GFX12-GISEL-NEXT: v_mul_lo_u32 v32, v0, v16 -; GFX12-GISEL-NEXT: v_mul_hi_u32 v33, v0, v16 -; GFX12-GISEL-NEXT: v_mul_lo_u32 v0, v0, v17 -; GFX12-GISEL-NEXT: v_mul_lo_u32 v1, v1, v16 -; GFX12-GISEL-NEXT: v_mul_lo_u32 v16, v2, v18 -; GFX12-GISEL-NEXT: v_mul_hi_u32 v17, v2, v18 -; GFX12-GISEL-NEXT: v_mul_lo_u32 v2, v2, v19 -; GFX12-GISEL-NEXT: v_mul_lo_u32 v3, v3, v18 -; GFX12-GISEL-NEXT: v_mul_lo_u32 v18, v4, v20 -; GFX12-GISEL-NEXT: v_mul_hi_u32 v19, v4, v20 -; GFX12-GISEL-NEXT: v_mul_lo_u32 v4, v4, v21 -; GFX12-GISEL-NEXT: v_mul_lo_u32 v5, v5, v20 -; GFX12-GISEL-NEXT: v_mul_lo_u32 v20, v6, v22 -; GFX12-GISEL-NEXT: v_mul_hi_u32 v21, v6, v22 -; GFX12-GISEL-NEXT: v_mul_lo_u32 v6, v6, v23 -; GFX12-GISEL-NEXT: v_mul_lo_u32 v7, v7, v22 -; GFX12-GISEL-NEXT: v_mul_lo_u32 v22, v8, v24 -; GFX12-GISEL-NEXT: v_mul_hi_u32 v23, v8, v24 -; GFX12-GISEL-NEXT: v_mul_lo_u32 v8, v8, v25 -; GFX12-GISEL-NEXT: v_mul_lo_u32 v9, v9, v24 -; GFX12-GISEL-NEXT: v_mul_lo_u32 v24, v10, v26 -; GFX12-GISEL-NEXT: v_mul_hi_u32 v25, v10, v26 -; GFX12-GISEL-NEXT: v_mul_lo_u32 v10, v10, v27 -; GFX12-GISEL-NEXT: v_mul_lo_u32 v11, v11, v26 -; GFX12-GISEL-NEXT: v_mul_lo_u32 v26, v12, v28 -; GFX12-GISEL-NEXT: v_mul_hi_u32 v27, v12, v28 -; GFX12-GISEL-NEXT: v_mul_lo_u32 v12, v12, v29 -; GFX12-GISEL-NEXT: v_mul_lo_u32 v13, v13, v28 -; GFX12-GISEL-NEXT: v_mul_hi_u32 v29, v14, v30 -; GFX12-GISEL-NEXT: v_mul_lo_u32 v15, v15, v30 -; GFX12-GISEL-NEXT: v_add3_u32 v0, v0, v1, v33 -; GFX12-GISEL-NEXT: v_add3_u32 v1, v2, v3, v17 -; GFX12-GISEL-NEXT: v_add3_u32 v2, v4, v5, v19 -; GFX12-GISEL-NEXT: v_add3_u32 v4, v8, v9, v23 -; GFX12-GISEL-NEXT: v_mul_lo_u32 v28, v14, v30 -; GFX12-GISEL-NEXT: v_add3_u32 v3, v6, v7, v21 -; GFX12-GISEL-NEXT: v_add3_u32 v5, v10, v11, v25 -; GFX12-GISEL-NEXT: v_add3_u32 v6, v12, v13, v27 -; GFX12-GISEL-NEXT: v_mul_hi_u32 v8, v32, v22 -; GFX12-GISEL-NEXT: v_mul_hi_u32 v11, v16, v24 -; GFX12-GISEL-NEXT: v_mul_hi_u32 v13, v18, v26 -; GFX12-GISEL-NEXT: v_mul_lo_u32 v4, v32, v4 -; GFX12-GISEL-NEXT: v_mul_lo_u32 v5, v16, v5 -; GFX12-GISEL-NEXT: v_mul_lo_u32 v1, v1, v24 -; GFX12-GISEL-NEXT: v_mul_lo_u32 v6, v18, v6 -; GFX12-GISEL-NEXT: v_mul_lo_u32 v2, v2, v26 -; GFX12-GISEL-NEXT: v_mul_lo_u32 v3, v3, v28 -; GFX12-GISEL-NEXT: v_mul_lo_u32 v0, v0, v22 -; GFX12-GISEL-NEXT: v_mul_lo_u32 v7, v32, v22 -; GFX12-GISEL-NEXT: v_mul_lo_u32 v10, v16, v24 -; GFX12-GISEL-NEXT: v_mul_lo_u32 v12, v18, v26 -; GFX12-GISEL-NEXT: v_add3_u32 v1, v5, v1, v11 -; GFX12-GISEL-NEXT: v_add3_u32 v2, v6, v2, v13 -; GFX12-GISEL-NEXT: v_add3_u32 v0, v4, v0, v8 -; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX12-GISEL-NEXT: v_mul_hi_u32 v16, v7, v12 -; GFX12-GISEL-NEXT: v_mul_lo_u32 v2, v7, v2 -; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2) -; GFX12-GISEL-NEXT: v_mul_lo_u32 v0, v0, v12 -; GFX12-GISEL-NEXT: v_mul_lo_u32 v5, v7, v12 -; GFX12-GISEL-NEXT: v_add3_u32 v0, v2, v0, v16 +; GFX12-GISEL-NEXT: scratch_load_b32 v39, off, s32 +; GFX12-GISEL-NEXT: v_mul_hi_u32 v31, v0, v16 +; GFX12-GISEL-NEXT: v_mul_hi_u32 v32, v2, v18 +; GFX12-GISEL-NEXT: v_mul_hi_u32 v33, v4, v20 +; GFX12-GISEL-NEXT: v_mul_hi_u32 v34, v6, v22 +; GFX12-GISEL-NEXT: v_mul_hi_u32 v35, v8, v24 +; GFX12-GISEL-NEXT: v_mul_hi_u32 v36, v10, v26 +; GFX12-GISEL-NEXT: v_mul_hi_u32 v37, v12, v28 +; GFX12-GISEL-NEXT: v_mul_hi_u32 v38, v14, v30 +; GFX12-GISEL-NEXT: v_mul_lo_u32 v51, v2, v18 +; GFX12-GISEL-NEXT: v_mul_lo_u32 v53, v6, v22 +; GFX12-GISEL-NEXT: v_mul_lo_u32 v55, v10, v26 +; GFX12-GISEL-NEXT: v_mul_lo_u32 v65, v14, v30 +; GFX12-GISEL-NEXT: v_mad_co_u64_u32 v[48:49], null, v0, v17, v[31:32] +; GFX12-GISEL-NEXT: v_mad_co_u64_u32 v[31:32], null, v2, v19, v[32:33] +; GFX12-GISEL-NEXT: v_mad_co_u64_u32 v[32:33], null, v4, v21, v[33:34] +; GFX12-GISEL-NEXT: v_mad_co_u64_u32 v[33:34], null, v6, v23, v[34:35] +; GFX12-GISEL-NEXT: v_mad_co_u64_u32 v[34:35], null, v8, v25, v[35:36] +; GFX12-GISEL-NEXT: v_mad_co_u64_u32 v[35:36], null, v10, v27, v[36:37] +; GFX12-GISEL-NEXT: v_mad_co_u64_u32 v[36:37], null, v12, v29, v[37:38] +; GFX12-GISEL-NEXT: v_mul_lo_u32 v50, v0, v16 +; GFX12-GISEL-NEXT: v_mul_lo_u32 v52, v4, v20 +; GFX12-GISEL-NEXT: v_mul_lo_u32 v54, v8, v24 +; GFX12-GISEL-NEXT: v_mul_lo_u32 v64, v12, v28 +; GFX12-GISEL-NEXT: v_mul_lo_u32 v23, v51, v55 +; GFX12-GISEL-NEXT: v_mul_lo_u32 v27, v53, v65 +; GFX12-GISEL-NEXT: v_mul_hi_u32 v6, v53, v65 +; GFX12-GISEL-NEXT: v_mad_co_u64_u32 v[7:8], null, v7, v22, v[33:34] +; GFX12-GISEL-NEXT: v_mad_co_u64_u32 v[16:17], null, v1, v16, v[48:49] +; GFX12-GISEL-NEXT: v_mul_lo_u32 v21, v50, v54 +; GFX12-GISEL-NEXT: v_mul_lo_u32 v25, v52, v64 +; GFX12-GISEL-NEXT: v_mad_co_u64_u32 v[8:9], null, v9, v24, v[34:35] +; GFX12-GISEL-NEXT: v_mul_hi_u32 v2, v51, v55 +; GFX12-GISEL-NEXT: v_mad_co_u64_u32 v[17:18], null, v3, v18, v[31:32] +; GFX12-GISEL-NEXT: v_mad_co_u64_u32 v[9:10], null, v11, v26, v[35:36] +; GFX12-GISEL-NEXT: v_mul_hi_u32 v3, v23, v27 +; GFX12-GISEL-NEXT: v_mul_hi_u32 v4, v52, v64 +; GFX12-GISEL-NEXT: v_mad_co_u64_u32 v[18:19], null, v5, v20, v[32:33] +; GFX12-GISEL-NEXT: v_mul_hi_u32 v0, v50, v54 +; GFX12-GISEL-NEXT: v_mul_hi_u32 v1, v21, v25 ; GFX12-GISEL-NEXT: s_wait_loadcnt 0x0 -; GFX12-GISEL-NEXT: v_mul_lo_u32 v9, v14, v31 -; GFX12-GISEL-NEXT: v_mul_hi_u32 v14, v20, v28 +; GFX12-GISEL-NEXT: v_mad_co_u64_u32 v[37:38], null, v14, v39, v[38:39] +; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_4) +; GFX12-GISEL-NEXT: v_mad_co_u64_u32 v[14:15], null, v15, v30, v[37:38] +; GFX12-GISEL-NEXT: v_mad_co_u64_u32 v[10:11], null, v13, v28, v[36:37] +; GFX12-GISEL-NEXT: v_mad_co_u64_u32 v[11:12], null, v51, v9, v[2:3] +; GFX12-GISEL-NEXT: v_mul_lo_u32 v13, v21, v25 +; GFX12-GISEL-NEXT: v_mad_co_u64_u32 v[5:6], null, v53, v14, v[6:7] +; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX12-GISEL-NEXT: v_mad_co_u64_u32 v[9:10], null, v52, v10, v[4:5] +; GFX12-GISEL-NEXT: v_mad_co_u64_u32 v[4:5], null, v7, v65, v[5:6] +; GFX12-GISEL-NEXT: v_mad_co_u64_u32 v[5:6], null, v50, v8, v[0:1] +; GFX12-GISEL-NEXT: v_mad_co_u64_u32 v[6:7], null, v17, v55, v[11:12] +; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) +; GFX12-GISEL-NEXT: v_mad_co_u64_u32 v[7:8], null, v18, v64, v[9:10] +; GFX12-GISEL-NEXT: v_mad_co_u64_u32 v[2:3], null, v23, v4, v[3:4] +; GFX12-GISEL-NEXT: v_mul_lo_u32 v8, v23, v27 +; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX12-GISEL-NEXT: v_mad_co_u64_u32 v[3:4], null, v16, v54, v[5:6] +; GFX12-GISEL-NEXT: v_mad_co_u64_u32 v[0:1], null, v21, v7, v[1:2] +; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_4) +; GFX12-GISEL-NEXT: v_mad_co_u64_u32 v[1:2], null, v6, v27, v[2:3] +; GFX12-GISEL-NEXT: v_mul_hi_u32 v2, v13, v8 ; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2) -; GFX12-GISEL-NEXT: v_add3_u32 v9, v9, v15, v29 -; GFX12-GISEL-NEXT: v_mul_lo_u32 v15, v20, v28 -; GFX12-GISEL-NEXT: v_mul_lo_u32 v9, v20, v9 -; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_4) -; GFX12-GISEL-NEXT: v_mul_hi_u32 v4, v10, v15 -; GFX12-GISEL-NEXT: v_mul_lo_u32 v1, v1, v15 -; GFX12-GISEL-NEXT: v_mul_lo_u32 v6, v10, v15 -; GFX12-GISEL-NEXT: v_add3_u32 v3, v9, v3, v14 -; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX12-GISEL-NEXT: v_mul_lo_u32 v3, v10, v3 -; GFX12-GISEL-NEXT: v_mul_hi_u32 v2, v5, v6 -; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3) -; GFX12-GISEL-NEXT: v_add3_u32 v1, v3, v1, v4 -; GFX12-GISEL-NEXT: v_mul_lo_u32 v3, v0, v6 -; GFX12-GISEL-NEXT: v_mul_lo_u32 v0, v5, v6 -; GFX12-GISEL-NEXT: v_mul_lo_u32 v1, v5, v1 +; GFX12-GISEL-NEXT: v_mad_co_u64_u32 v[3:4], null, v3, v25, v[0:1] +; GFX12-GISEL-NEXT: v_mul_lo_u32 v0, v13, v8 +; GFX12-GISEL-NEXT: v_mad_co_u64_u32 v[1:2], null, v13, v1, v[2:3] ; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX12-GISEL-NEXT: v_add3_u32 v1, v1, v3, v2 +; GFX12-GISEL-NEXT: v_mad_co_u64_u32 v[1:2], null, v3, v8, v[1:2] ; GFX12-GISEL-NEXT: s_setpc_b64 s[30:31] entry: %res = call i64 @llvm.vector.reduce.mul.v16i64(<16 x i64> %v) _______________________________________________ llvm-branch-commits mailing list [email protected] https://lists.llvm.org/cgi-bin/mailman/listinfo/llvm-branch-commits
