Author: Craig Topper Date: 2021-01-12T14:46:16-08:00 New Revision: 1730b0f66adaea6ed65d441dc2032013dd3c3664
URL: https://github.com/llvm/llvm-project/commit/1730b0f66adaea6ed65d441dc2032013dd3c3664 DIFF: https://github.com/llvm/llvm-project/commit/1730b0f66adaea6ed65d441dc2032013dd3c3664.diff LOG: [RISCV] Remove '.mask' from vcompress intrinsic name. NFC It has a mask argument, but isn't a masked instruction. It doesn't use the mask policy of or the v0.t syntax. Added: Modified: llvm/include/llvm/IR/IntrinsicsRISCV.td llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td llvm/test/CodeGen/RISCV/rvv/vcompress-rv32.ll llvm/test/CodeGen/RISCV/rvv/vcompress-rv64.ll Removed: ################################################################################ diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td index 8171be8a1ca8..e45be2b72796 100644 --- a/llvm/include/llvm/IR/IntrinsicsRISCV.td +++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td @@ -740,7 +740,7 @@ let TargetPrefix = "riscv" in { defm vrgather : RISCVBinaryAAX; - def "int_riscv_vcompress_mask" : RISCVBinaryAAAMask; + def "int_riscv_vcompress" : RISCVBinaryAAAMask; defm vaaddu : RISCVSaturatingBinaryAAX; defm vaadd : RISCVSaturatingBinaryAAX; diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td index 3604a25b0d6a..a715676183e2 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -947,7 +947,7 @@ multiclass VPseudoUnaryV_M { multiclass VPseudoUnaryV_V_AnyMask { foreach m = MxList.m in { let VLMul = m.value in - def _VM # "_" # m.MX # "_MASK" : VPseudoUnaryAnyMask<m.vrclass, m.vrclass>; + def _VM # "_" # m.MX : VPseudoUnaryAnyMask<m.vrclass, m.vrclass>; } } @@ -1404,12 +1404,12 @@ class VPatUnaryAnyMask<string intrinsic, LMULInfo vlmul, VReg result_reg_class, VReg op1_reg_class> : - Pat<(result_type (!cast<Intrinsic>(intrinsic#"_mask") + Pat<(result_type (!cast<Intrinsic>(intrinsic) (result_type result_reg_class:$merge), (op1_type op1_reg_class:$rs1), (mask_type VR:$rs2), (XLenVT GPR:$vl))), - (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX#"_MASK") + (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX) (result_type result_reg_class:$merge), (op1_type op1_reg_class:$rs1), (mask_type VR:$rs2), diff --git a/llvm/test/CodeGen/RISCV/rvv/vcompress-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vcompress-rv32.ll index b8d42eeb9e6c..24b6d73d64c3 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vcompress-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vcompress-rv32.ll @@ -1,20 +1,20 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \ ; RUN: --riscv-no-aliases < %s | FileCheck %s -declare <vscale x 1 x i8> @llvm.riscv.vcompress.mask.nxv1i8( +declare <vscale x 1 x i8> @llvm.riscv.vcompress.nxv1i8( <vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i1>, i32); -define <vscale x 1 x i8> @intrinsic_vcompress_mask_vm_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vcompress_mask_vm_nxv1i8_nxv1i8: +define <vscale x 1 x i8> @intrinsic_vcompress_vm_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vcompress_vm_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,mf8,tu,mu ; CHECK-NEXT: vcompress.vm v16, v17, v0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call <vscale x 1 x i8> @llvm.riscv.vcompress.mask.nxv1i8( + %a = call <vscale x 1 x i8> @llvm.riscv.vcompress.nxv1i8( <vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, @@ -23,20 +23,20 @@ entry: ret <vscale x 1 x i8> %a } -declare <vscale x 2 x i8> @llvm.riscv.vcompress.mask.nxv2i8( +declare <vscale x 2 x i8> @llvm.riscv.vcompress.nxv2i8( <vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i1>, i32); -define <vscale x 2 x i8> @intrinsic_vcompress_mask_vm_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vcompress_mask_vm_nxv2i8_nxv2i8: +define <vscale x 2 x i8> @intrinsic_vcompress_vm_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vcompress_vm_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,mf4,tu,mu ; CHECK-NEXT: vcompress.vm v16, v17, v0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call <vscale x 2 x i8> @llvm.riscv.vcompress.mask.nxv2i8( + %a = call <vscale x 2 x i8> @llvm.riscv.vcompress.nxv2i8( <vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, @@ -45,20 +45,20 @@ entry: ret <vscale x 2 x i8> %a } -declare <vscale x 4 x i8> @llvm.riscv.vcompress.mask.nxv4i8( +declare <vscale x 4 x i8> @llvm.riscv.vcompress.nxv4i8( <vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i1>, i32); -define <vscale x 4 x i8> @intrinsic_vcompress_mask_vm_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vcompress_mask_vm_nxv4i8_nxv4i8: +define <vscale x 4 x i8> @intrinsic_vcompress_vm_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vcompress_vm_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,mf2,tu,mu ; CHECK-NEXT: vcompress.vm v16, v17, v0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call <vscale x 4 x i8> @llvm.riscv.vcompress.mask.nxv4i8( + %a = call <vscale x 4 x i8> @llvm.riscv.vcompress.nxv4i8( <vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, @@ -67,20 +67,20 @@ entry: ret <vscale x 4 x i8> %a } -declare <vscale x 8 x i8> @llvm.riscv.vcompress.mask.nxv8i8( +declare <vscale x 8 x i8> @llvm.riscv.vcompress.nxv8i8( <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i1>, i32); -define <vscale x 8 x i8> @intrinsic_vcompress_mask_vm_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vcompress_mask_vm_nxv8i8_nxv8i8: +define <vscale x 8 x i8> @intrinsic_vcompress_vm_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vcompress_vm_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,m1,tu,mu ; CHECK-NEXT: vcompress.vm v16, v17, v0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call <vscale x 8 x i8> @llvm.riscv.vcompress.mask.nxv8i8( + %a = call <vscale x 8 x i8> @llvm.riscv.vcompress.nxv8i8( <vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, @@ -89,20 +89,20 @@ entry: ret <vscale x 8 x i8> %a } -declare <vscale x 16 x i8> @llvm.riscv.vcompress.mask.nxv16i8( +declare <vscale x 16 x i8> @llvm.riscv.vcompress.nxv16i8( <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i1>, i32); -define <vscale x 16 x i8> @intrinsic_vcompress_mask_vm_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vcompress_mask_vm_nxv16i8_nxv16i8: +define <vscale x 16 x i8> @intrinsic_vcompress_vm_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vcompress_vm_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,m2,tu,mu ; CHECK-NEXT: vcompress.vm v16, v18, v0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call <vscale x 16 x i8> @llvm.riscv.vcompress.mask.nxv16i8( + %a = call <vscale x 16 x i8> @llvm.riscv.vcompress.nxv16i8( <vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, @@ -111,20 +111,20 @@ entry: ret <vscale x 16 x i8> %a } -declare <vscale x 32 x i8> @llvm.riscv.vcompress.mask.nxv32i8( +declare <vscale x 32 x i8> @llvm.riscv.vcompress.nxv32i8( <vscale x 32 x i8>, <vscale x 32 x i8>, <vscale x 32 x i1>, i32); -define <vscale x 32 x i8> @intrinsic_vcompress_mask_vm_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vcompress_mask_vm_nxv32i8_nxv32i8: +define <vscale x 32 x i8> @intrinsic_vcompress_vm_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vcompress_vm_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,m4,tu,mu ; CHECK-NEXT: vcompress.vm v16, v20, v0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call <vscale x 32 x i8> @llvm.riscv.vcompress.mask.nxv32i8( + %a = call <vscale x 32 x i8> @llvm.riscv.vcompress.nxv32i8( <vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, @@ -133,14 +133,14 @@ entry: ret <vscale x 32 x i8> %a } -declare <vscale x 64 x i8> @llvm.riscv.vcompress.mask.nxv64i8( +declare <vscale x 64 x i8> @llvm.riscv.vcompress.nxv64i8( <vscale x 64 x i8>, <vscale x 64 x i8>, <vscale x 64 x i1>, i32); -define <vscale x 64 x i8> @intrinsic_vcompress_mask_vm_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vcompress_mask_vm_nxv64i8_nxv64i8: +define <vscale x 64 x i8> @intrinsic_vcompress_vm_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vcompress_vm_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu ; CHECK-NEXT: vle8.v v8, (a0) @@ -148,7 +148,7 @@ define <vscale x 64 x i8> @intrinsic_vcompress_mask_vm_nxv64i8_nxv64i8(<vscale x ; CHECK-NEXT: vcompress.vm v16, v8, v0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call <vscale x 64 x i8> @llvm.riscv.vcompress.mask.nxv64i8( + %a = call <vscale x 64 x i8> @llvm.riscv.vcompress.nxv64i8( <vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, @@ -157,20 +157,20 @@ entry: ret <vscale x 64 x i8> %a } -declare <vscale x 1 x i16> @llvm.riscv.vcompress.mask.nxv1i16( +declare <vscale x 1 x i16> @llvm.riscv.vcompress.nxv1i16( <vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i1>, i32); -define <vscale x 1 x i16> @intrinsic_vcompress_mask_vm_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vcompress_mask_vm_nxv1i16_nxv1i16: +define <vscale x 1 x i16> @intrinsic_vcompress_vm_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vcompress_vm_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu ; CHECK-NEXT: vcompress.vm v16, v17, v0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call <vscale x 1 x i16> @llvm.riscv.vcompress.mask.nxv1i16( + %a = call <vscale x 1 x i16> @llvm.riscv.vcompress.nxv1i16( <vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, @@ -179,20 +179,20 @@ entry: ret <vscale x 1 x i16> %a } -declare <vscale x 2 x i16> @llvm.riscv.vcompress.mask.nxv2i16( +declare <vscale x 2 x i16> @llvm.riscv.vcompress.nxv2i16( <vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i1>, i32); -define <vscale x 2 x i16> @intrinsic_vcompress_mask_vm_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vcompress_mask_vm_nxv2i16_nxv2i16: +define <vscale x 2 x i16> @intrinsic_vcompress_vm_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vcompress_vm_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu ; CHECK-NEXT: vcompress.vm v16, v17, v0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call <vscale x 2 x i16> @llvm.riscv.vcompress.mask.nxv2i16( + %a = call <vscale x 2 x i16> @llvm.riscv.vcompress.nxv2i16( <vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, @@ -201,20 +201,20 @@ entry: ret <vscale x 2 x i16> %a } -declare <vscale x 4 x i16> @llvm.riscv.vcompress.mask.nxv4i16( +declare <vscale x 4 x i16> @llvm.riscv.vcompress.nxv4i16( <vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i1>, i32); -define <vscale x 4 x i16> @intrinsic_vcompress_mask_vm_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vcompress_mask_vm_nxv4i16_nxv4i16: +define <vscale x 4 x i16> @intrinsic_vcompress_vm_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vcompress_vm_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu ; CHECK-NEXT: vcompress.vm v16, v17, v0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call <vscale x 4 x i16> @llvm.riscv.vcompress.mask.nxv4i16( + %a = call <vscale x 4 x i16> @llvm.riscv.vcompress.nxv4i16( <vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, @@ -223,20 +223,20 @@ entry: ret <vscale x 4 x i16> %a } -declare <vscale x 8 x i16> @llvm.riscv.vcompress.mask.nxv8i16( +declare <vscale x 8 x i16> @llvm.riscv.vcompress.nxv8i16( <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i1>, i32); -define <vscale x 8 x i16> @intrinsic_vcompress_mask_vm_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vcompress_mask_vm_nxv8i16_nxv8i16: +define <vscale x 8 x i16> @intrinsic_vcompress_vm_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vcompress_vm_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu ; CHECK-NEXT: vcompress.vm v16, v18, v0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call <vscale x 8 x i16> @llvm.riscv.vcompress.mask.nxv8i16( + %a = call <vscale x 8 x i16> @llvm.riscv.vcompress.nxv8i16( <vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, @@ -245,20 +245,20 @@ entry: ret <vscale x 8 x i16> %a } -declare <vscale x 16 x i16> @llvm.riscv.vcompress.mask.nxv16i16( +declare <vscale x 16 x i16> @llvm.riscv.vcompress.nxv16i16( <vscale x 16 x i16>, <vscale x 16 x i16>, <vscale x 16 x i1>, i32); -define <vscale x 16 x i16> @intrinsic_vcompress_mask_vm_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vcompress_mask_vm_nxv16i16_nxv16i16: +define <vscale x 16 x i16> @intrinsic_vcompress_vm_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vcompress_vm_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,m4,tu,mu ; CHECK-NEXT: vcompress.vm v16, v20, v0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call <vscale x 16 x i16> @llvm.riscv.vcompress.mask.nxv16i16( + %a = call <vscale x 16 x i16> @llvm.riscv.vcompress.nxv16i16( <vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, @@ -267,14 +267,14 @@ entry: ret <vscale x 16 x i16> %a } -declare <vscale x 32 x i16> @llvm.riscv.vcompress.mask.nxv32i16( +declare <vscale x 32 x i16> @llvm.riscv.vcompress.nxv32i16( <vscale x 32 x i16>, <vscale x 32 x i16>, <vscale x 32 x i1>, i32); -define <vscale x 32 x i16> @intrinsic_vcompress_mask_vm_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vcompress_mask_vm_nxv32i16_nxv32i16: +define <vscale x 32 x i16> @intrinsic_vcompress_vm_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vcompress_vm_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu ; CHECK-NEXT: vle16.v v8, (a0) @@ -282,7 +282,7 @@ define <vscale x 32 x i16> @intrinsic_vcompress_mask_vm_nxv32i16_nxv32i16(<vscal ; CHECK-NEXT: vcompress.vm v16, v8, v0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call <vscale x 32 x i16> @llvm.riscv.vcompress.mask.nxv32i16( + %a = call <vscale x 32 x i16> @llvm.riscv.vcompress.nxv32i16( <vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, @@ -291,20 +291,20 @@ entry: ret <vscale x 32 x i16> %a } -declare <vscale x 1 x i32> @llvm.riscv.vcompress.mask.nxv1i32( +declare <vscale x 1 x i32> @llvm.riscv.vcompress.nxv1i32( <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i1>, i32); -define <vscale x 1 x i32> @intrinsic_vcompress_mask_vm_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vcompress_mask_vm_nxv1i32_nxv1i32: +define <vscale x 1 x i32> @intrinsic_vcompress_vm_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vcompress_vm_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu ; CHECK-NEXT: vcompress.vm v16, v17, v0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call <vscale x 1 x i32> @llvm.riscv.vcompress.mask.nxv1i32( + %a = call <vscale x 1 x i32> @llvm.riscv.vcompress.nxv1i32( <vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, @@ -313,20 +313,20 @@ entry: ret <vscale x 1 x i32> %a } -declare <vscale x 2 x i32> @llvm.riscv.vcompress.mask.nxv2i32( +declare <vscale x 2 x i32> @llvm.riscv.vcompress.nxv2i32( <vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i1>, i32); -define <vscale x 2 x i32> @intrinsic_vcompress_mask_vm_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vcompress_mask_vm_nxv2i32_nxv2i32: +define <vscale x 2 x i32> @intrinsic_vcompress_vm_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vcompress_vm_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu ; CHECK-NEXT: vcompress.vm v16, v17, v0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call <vscale x 2 x i32> @llvm.riscv.vcompress.mask.nxv2i32( + %a = call <vscale x 2 x i32> @llvm.riscv.vcompress.nxv2i32( <vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, @@ -335,20 +335,20 @@ entry: ret <vscale x 2 x i32> %a } -declare <vscale x 4 x i32> @llvm.riscv.vcompress.mask.nxv4i32( +declare <vscale x 4 x i32> @llvm.riscv.vcompress.nxv4i32( <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i1>, i32); -define <vscale x 4 x i32> @intrinsic_vcompress_mask_vm_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vcompress_mask_vm_nxv4i32_nxv4i32: +define <vscale x 4 x i32> @intrinsic_vcompress_vm_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vcompress_vm_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu ; CHECK-NEXT: vcompress.vm v16, v18, v0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call <vscale x 4 x i32> @llvm.riscv.vcompress.mask.nxv4i32( + %a = call <vscale x 4 x i32> @llvm.riscv.vcompress.nxv4i32( <vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, @@ -357,20 +357,20 @@ entry: ret <vscale x 4 x i32> %a } -declare <vscale x 8 x i32> @llvm.riscv.vcompress.mask.nxv8i32( +declare <vscale x 8 x i32> @llvm.riscv.vcompress.nxv8i32( <vscale x 8 x i32>, <vscale x 8 x i32>, <vscale x 8 x i1>, i32); -define <vscale x 8 x i32> @intrinsic_vcompress_mask_vm_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vcompress_mask_vm_nxv8i32_nxv8i32: +define <vscale x 8 x i32> @intrinsic_vcompress_vm_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vcompress_vm_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e32,m4,tu,mu ; CHECK-NEXT: vcompress.vm v16, v20, v0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call <vscale x 8 x i32> @llvm.riscv.vcompress.mask.nxv8i32( + %a = call <vscale x 8 x i32> @llvm.riscv.vcompress.nxv8i32( <vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, @@ -379,14 +379,14 @@ entry: ret <vscale x 8 x i32> %a } -declare <vscale x 16 x i32> @llvm.riscv.vcompress.mask.nxv16i32( +declare <vscale x 16 x i32> @llvm.riscv.vcompress.nxv16i32( <vscale x 16 x i32>, <vscale x 16 x i32>, <vscale x 16 x i1>, i32); -define <vscale x 16 x i32> @intrinsic_vcompress_mask_vm_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vcompress_mask_vm_nxv16i32_nxv16i32: +define <vscale x 16 x i32> @intrinsic_vcompress_vm_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vcompress_vm_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu ; CHECK-NEXT: vle32.v v8, (a0) @@ -394,7 +394,7 @@ define <vscale x 16 x i32> @intrinsic_vcompress_mask_vm_nxv16i32_nxv16i32(<vscal ; CHECK-NEXT: vcompress.vm v16, v8, v0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call <vscale x 16 x i32> @llvm.riscv.vcompress.mask.nxv16i32( + %a = call <vscale x 16 x i32> @llvm.riscv.vcompress.nxv16i32( <vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, @@ -403,20 +403,20 @@ entry: ret <vscale x 16 x i32> %a } -declare <vscale x 1 x half> @llvm.riscv.vcompress.mask.nxv1f16( +declare <vscale x 1 x half> @llvm.riscv.vcompress.nxv1f16( <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x i1>, i32); -define <vscale x 1 x half> @intrinsic_vcompress_mask_vm_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vcompress_mask_vm_nxv1f16_nxv1f16: +define <vscale x 1 x half> @intrinsic_vcompress_vm_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vcompress_vm_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu ; CHECK-NEXT: vcompress.vm v16, v17, v0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call <vscale x 1 x half> @llvm.riscv.vcompress.mask.nxv1f16( + %a = call <vscale x 1 x half> @llvm.riscv.vcompress.nxv1f16( <vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, @@ -425,20 +425,20 @@ entry: ret <vscale x 1 x half> %a } -declare <vscale x 2 x half> @llvm.riscv.vcompress.mask.nxv2f16( +declare <vscale x 2 x half> @llvm.riscv.vcompress.nxv2f16( <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x i1>, i32); -define <vscale x 2 x half> @intrinsic_vcompress_mask_vm_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x i1> %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vcompress_mask_vm_nxv2f16_nxv2f16: +define <vscale x 2 x half> @intrinsic_vcompress_vm_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x i1> %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vcompress_vm_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu ; CHECK-NEXT: vcompress.vm v16, v17, v0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call <vscale x 2 x half> @llvm.riscv.vcompress.mask.nxv2f16( + %a = call <vscale x 2 x half> @llvm.riscv.vcompress.nxv2f16( <vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x i1> %2, @@ -447,20 +447,20 @@ entry: ret <vscale x 2 x half> %a } -declare <vscale x 4 x half> @llvm.riscv.vcompress.mask.nxv4f16( +declare <vscale x 4 x half> @llvm.riscv.vcompress.nxv4f16( <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x i1>, i32); -define <vscale x 4 x half> @intrinsic_vcompress_mask_vm_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x i1> %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vcompress_mask_vm_nxv4f16_nxv4f16: +define <vscale x 4 x half> @intrinsic_vcompress_vm_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x i1> %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vcompress_vm_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu ; CHECK-NEXT: vcompress.vm v16, v17, v0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call <vscale x 4 x half> @llvm.riscv.vcompress.mask.nxv4f16( + %a = call <vscale x 4 x half> @llvm.riscv.vcompress.nxv4f16( <vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x i1> %2, @@ -469,20 +469,20 @@ entry: ret <vscale x 4 x half> %a } -declare <vscale x 8 x half> @llvm.riscv.vcompress.mask.nxv8f16( +declare <vscale x 8 x half> @llvm.riscv.vcompress.nxv8f16( <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x i1>, i32); -define <vscale x 8 x half> @intrinsic_vcompress_mask_vm_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x i1> %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vcompress_mask_vm_nxv8f16_nxv8f16: +define <vscale x 8 x half> @intrinsic_vcompress_vm_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x i1> %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vcompress_vm_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu ; CHECK-NEXT: vcompress.vm v16, v18, v0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call <vscale x 8 x half> @llvm.riscv.vcompress.mask.nxv8f16( + %a = call <vscale x 8 x half> @llvm.riscv.vcompress.nxv8f16( <vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x i1> %2, @@ -491,20 +491,20 @@ entry: ret <vscale x 8 x half> %a } -declare <vscale x 16 x half> @llvm.riscv.vcompress.mask.nxv16f16( +declare <vscale x 16 x half> @llvm.riscv.vcompress.nxv16f16( <vscale x 16 x half>, <vscale x 16 x half>, <vscale x 16 x i1>, i32); -define <vscale x 16 x half> @intrinsic_vcompress_mask_vm_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x i1> %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vcompress_mask_vm_nxv16f16_nxv16f16: +define <vscale x 16 x half> @intrinsic_vcompress_vm_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x i1> %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vcompress_vm_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,m4,tu,mu ; CHECK-NEXT: vcompress.vm v16, v20, v0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call <vscale x 16 x half> @llvm.riscv.vcompress.mask.nxv16f16( + %a = call <vscale x 16 x half> @llvm.riscv.vcompress.nxv16f16( <vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x i1> %2, @@ -513,14 +513,14 @@ entry: ret <vscale x 16 x half> %a } -declare <vscale x 32 x half> @llvm.riscv.vcompress.mask.nxv32f16( +declare <vscale x 32 x half> @llvm.riscv.vcompress.nxv32f16( <vscale x 32 x half>, <vscale x 32 x half>, <vscale x 32 x i1>, i32); -define <vscale x 32 x half> @intrinsic_vcompress_mask_vm_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x i1> %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vcompress_mask_vm_nxv32f16_nxv32f16: +define <vscale x 32 x half> @intrinsic_vcompress_vm_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x i1> %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vcompress_vm_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu ; CHECK-NEXT: vle16.v v8, (a0) @@ -528,7 +528,7 @@ define <vscale x 32 x half> @intrinsic_vcompress_mask_vm_nxv32f16_nxv32f16(<vsca ; CHECK-NEXT: vcompress.vm v16, v8, v0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call <vscale x 32 x half> @llvm.riscv.vcompress.mask.nxv32f16( + %a = call <vscale x 32 x half> @llvm.riscv.vcompress.nxv32f16( <vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x i1> %2, @@ -537,20 +537,20 @@ entry: ret <vscale x 32 x half> %a } -declare <vscale x 1 x float> @llvm.riscv.vcompress.mask.nxv1f32( +declare <vscale x 1 x float> @llvm.riscv.vcompress.nxv1f32( <vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x i1>, i32); -define <vscale x 1 x float> @intrinsic_vcompress_mask_vm_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vcompress_mask_vm_nxv1f32_nxv1f32: +define <vscale x 1 x float> @intrinsic_vcompress_vm_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vcompress_vm_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu ; CHECK-NEXT: vcompress.vm v16, v17, v0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call <vscale x 1 x float> @llvm.riscv.vcompress.mask.nxv1f32( + %a = call <vscale x 1 x float> @llvm.riscv.vcompress.nxv1f32( <vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, @@ -559,20 +559,20 @@ entry: ret <vscale x 1 x float> %a } -declare <vscale x 2 x float> @llvm.riscv.vcompress.mask.nxv2f32( +declare <vscale x 2 x float> @llvm.riscv.vcompress.nxv2f32( <vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x i1>, i32); -define <vscale x 2 x float> @intrinsic_vcompress_mask_vm_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vcompress_mask_vm_nxv2f32_nxv2f32: +define <vscale x 2 x float> @intrinsic_vcompress_vm_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vcompress_vm_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu ; CHECK-NEXT: vcompress.vm v16, v17, v0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call <vscale x 2 x float> @llvm.riscv.vcompress.mask.nxv2f32( + %a = call <vscale x 2 x float> @llvm.riscv.vcompress.nxv2f32( <vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, @@ -581,20 +581,20 @@ entry: ret <vscale x 2 x float> %a } -declare <vscale x 4 x float> @llvm.riscv.vcompress.mask.nxv4f32( +declare <vscale x 4 x float> @llvm.riscv.vcompress.nxv4f32( <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x i1>, i32); -define <vscale x 4 x float> @intrinsic_vcompress_mask_vm_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vcompress_mask_vm_nxv4f32_nxv4f32: +define <vscale x 4 x float> @intrinsic_vcompress_vm_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vcompress_vm_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu ; CHECK-NEXT: vcompress.vm v16, v18, v0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call <vscale x 4 x float> @llvm.riscv.vcompress.mask.nxv4f32( + %a = call <vscale x 4 x float> @llvm.riscv.vcompress.nxv4f32( <vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, @@ -603,20 +603,20 @@ entry: ret <vscale x 4 x float> %a } -declare <vscale x 8 x float> @llvm.riscv.vcompress.mask.nxv8f32( +declare <vscale x 8 x float> @llvm.riscv.vcompress.nxv8f32( <vscale x 8 x float>, <vscale x 8 x float>, <vscale x 8 x i1>, i32); -define <vscale x 8 x float> @intrinsic_vcompress_mask_vm_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vcompress_mask_vm_nxv8f32_nxv8f32: +define <vscale x 8 x float> @intrinsic_vcompress_vm_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vcompress_vm_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e32,m4,tu,mu ; CHECK-NEXT: vcompress.vm v16, v20, v0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call <vscale x 8 x float> @llvm.riscv.vcompress.mask.nxv8f32( + %a = call <vscale x 8 x float> @llvm.riscv.vcompress.nxv8f32( <vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, @@ -625,14 +625,14 @@ entry: ret <vscale x 8 x float> %a } -declare <vscale x 16 x float> @llvm.riscv.vcompress.mask.nxv16f32( +declare <vscale x 16 x float> @llvm.riscv.vcompress.nxv16f32( <vscale x 16 x float>, <vscale x 16 x float>, <vscale x 16 x i1>, i32); -define <vscale x 16 x float> @intrinsic_vcompress_mask_vm_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x i1> %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vcompress_mask_vm_nxv16f32_nxv16f32: +define <vscale x 16 x float> @intrinsic_vcompress_vm_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x i1> %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vcompress_vm_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu ; CHECK-NEXT: vle32.v v8, (a0) @@ -640,7 +640,7 @@ define <vscale x 16 x float> @intrinsic_vcompress_mask_vm_nxv16f32_nxv16f32(<vsc ; CHECK-NEXT: vcompress.vm v16, v8, v0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call <vscale x 16 x float> @llvm.riscv.vcompress.mask.nxv16f32( + %a = call <vscale x 16 x float> @llvm.riscv.vcompress.nxv16f32( <vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x i1> %2, diff --git a/llvm/test/CodeGen/RISCV/rvv/vcompress-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vcompress-rv64.ll index a2e9df6e3fa2..dd1b48983344 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vcompress-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vcompress-rv64.ll @@ -1,20 +1,20 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \ ; RUN: --riscv-no-aliases < %s | FileCheck %s -declare <vscale x 1 x i8> @llvm.riscv.vcompress.mask.nxv1i8( +declare <vscale x 1 x i8> @llvm.riscv.vcompress.nxv1i8( <vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i1>, i64); -define <vscale x 1 x i8> @intrinsic_vcompress_mask_vm_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vcompress_mask_vm_nxv1i8_nxv1i8: +define <vscale x 1 x i8> @intrinsic_vcompress_vm_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vcompress_vm_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,mf8,tu,mu ; CHECK-NEXT: vcompress.vm v16, v17, v0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call <vscale x 1 x i8> @llvm.riscv.vcompress.mask.nxv1i8( + %a = call <vscale x 1 x i8> @llvm.riscv.vcompress.nxv1i8( <vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, @@ -23,20 +23,20 @@ entry: ret <vscale x 1 x i8> %a } -declare <vscale x 2 x i8> @llvm.riscv.vcompress.mask.nxv2i8( +declare <vscale x 2 x i8> @llvm.riscv.vcompress.nxv2i8( <vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i1>, i64); -define <vscale x 2 x i8> @intrinsic_vcompress_mask_vm_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vcompress_mask_vm_nxv2i8_nxv2i8: +define <vscale x 2 x i8> @intrinsic_vcompress_vm_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vcompress_vm_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,mf4,tu,mu ; CHECK-NEXT: vcompress.vm v16, v17, v0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call <vscale x 2 x i8> @llvm.riscv.vcompress.mask.nxv2i8( + %a = call <vscale x 2 x i8> @llvm.riscv.vcompress.nxv2i8( <vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, @@ -45,20 +45,20 @@ entry: ret <vscale x 2 x i8> %a } -declare <vscale x 4 x i8> @llvm.riscv.vcompress.mask.nxv4i8( +declare <vscale x 4 x i8> @llvm.riscv.vcompress.nxv4i8( <vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i1>, i64); -define <vscale x 4 x i8> @intrinsic_vcompress_mask_vm_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vcompress_mask_vm_nxv4i8_nxv4i8: +define <vscale x 4 x i8> @intrinsic_vcompress_vm_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vcompress_vm_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,mf2,tu,mu ; CHECK-NEXT: vcompress.vm v16, v17, v0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call <vscale x 4 x i8> @llvm.riscv.vcompress.mask.nxv4i8( + %a = call <vscale x 4 x i8> @llvm.riscv.vcompress.nxv4i8( <vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, @@ -67,20 +67,20 @@ entry: ret <vscale x 4 x i8> %a } -declare <vscale x 8 x i8> @llvm.riscv.vcompress.mask.nxv8i8( +declare <vscale x 8 x i8> @llvm.riscv.vcompress.nxv8i8( <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i1>, i64); -define <vscale x 8 x i8> @intrinsic_vcompress_mask_vm_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vcompress_mask_vm_nxv8i8_nxv8i8: +define <vscale x 8 x i8> @intrinsic_vcompress_vm_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vcompress_vm_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,m1,tu,mu ; CHECK-NEXT: vcompress.vm v16, v17, v0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call <vscale x 8 x i8> @llvm.riscv.vcompress.mask.nxv8i8( + %a = call <vscale x 8 x i8> @llvm.riscv.vcompress.nxv8i8( <vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, @@ -89,20 +89,20 @@ entry: ret <vscale x 8 x i8> %a } -declare <vscale x 16 x i8> @llvm.riscv.vcompress.mask.nxv16i8( +declare <vscale x 16 x i8> @llvm.riscv.vcompress.nxv16i8( <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i1>, i64); -define <vscale x 16 x i8> @intrinsic_vcompress_mask_vm_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vcompress_mask_vm_nxv16i8_nxv16i8: +define <vscale x 16 x i8> @intrinsic_vcompress_vm_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vcompress_vm_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,m2,tu,mu ; CHECK-NEXT: vcompress.vm v16, v18, v0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call <vscale x 16 x i8> @llvm.riscv.vcompress.mask.nxv16i8( + %a = call <vscale x 16 x i8> @llvm.riscv.vcompress.nxv16i8( <vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, @@ -111,20 +111,20 @@ entry: ret <vscale x 16 x i8> %a } -declare <vscale x 32 x i8> @llvm.riscv.vcompress.mask.nxv32i8( +declare <vscale x 32 x i8> @llvm.riscv.vcompress.nxv32i8( <vscale x 32 x i8>, <vscale x 32 x i8>, <vscale x 32 x i1>, i64); -define <vscale x 32 x i8> @intrinsic_vcompress_mask_vm_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vcompress_mask_vm_nxv32i8_nxv32i8: +define <vscale x 32 x i8> @intrinsic_vcompress_vm_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vcompress_vm_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e8,m4,tu,mu ; CHECK-NEXT: vcompress.vm v16, v20, v0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call <vscale x 32 x i8> @llvm.riscv.vcompress.mask.nxv32i8( + %a = call <vscale x 32 x i8> @llvm.riscv.vcompress.nxv32i8( <vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, @@ -133,14 +133,14 @@ entry: ret <vscale x 32 x i8> %a } -declare <vscale x 64 x i8> @llvm.riscv.vcompress.mask.nxv64i8( +declare <vscale x 64 x i8> @llvm.riscv.vcompress.nxv64i8( <vscale x 64 x i8>, <vscale x 64 x i8>, <vscale x 64 x i1>, i64); -define <vscale x 64 x i8> @intrinsic_vcompress_mask_vm_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vcompress_mask_vm_nxv64i8_nxv64i8: +define <vscale x 64 x i8> @intrinsic_vcompress_vm_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vcompress_vm_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu ; CHECK-NEXT: vle8.v v8, (a0) @@ -148,7 +148,7 @@ define <vscale x 64 x i8> @intrinsic_vcompress_mask_vm_nxv64i8_nxv64i8(<vscale x ; CHECK-NEXT: vcompress.vm v16, v8, v0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call <vscale x 64 x i8> @llvm.riscv.vcompress.mask.nxv64i8( + %a = call <vscale x 64 x i8> @llvm.riscv.vcompress.nxv64i8( <vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, @@ -157,20 +157,20 @@ entry: ret <vscale x 64 x i8> %a } -declare <vscale x 1 x i16> @llvm.riscv.vcompress.mask.nxv1i16( +declare <vscale x 1 x i16> @llvm.riscv.vcompress.nxv1i16( <vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i1>, i64); -define <vscale x 1 x i16> @intrinsic_vcompress_mask_vm_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vcompress_mask_vm_nxv1i16_nxv1i16: +define <vscale x 1 x i16> @intrinsic_vcompress_vm_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vcompress_vm_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu ; CHECK-NEXT: vcompress.vm v16, v17, v0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call <vscale x 1 x i16> @llvm.riscv.vcompress.mask.nxv1i16( + %a = call <vscale x 1 x i16> @llvm.riscv.vcompress.nxv1i16( <vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, @@ -179,20 +179,20 @@ entry: ret <vscale x 1 x i16> %a } -declare <vscale x 2 x i16> @llvm.riscv.vcompress.mask.nxv2i16( +declare <vscale x 2 x i16> @llvm.riscv.vcompress.nxv2i16( <vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i1>, i64); -define <vscale x 2 x i16> @intrinsic_vcompress_mask_vm_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vcompress_mask_vm_nxv2i16_nxv2i16: +define <vscale x 2 x i16> @intrinsic_vcompress_vm_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vcompress_vm_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu ; CHECK-NEXT: vcompress.vm v16, v17, v0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call <vscale x 2 x i16> @llvm.riscv.vcompress.mask.nxv2i16( + %a = call <vscale x 2 x i16> @llvm.riscv.vcompress.nxv2i16( <vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, @@ -201,20 +201,20 @@ entry: ret <vscale x 2 x i16> %a } -declare <vscale x 4 x i16> @llvm.riscv.vcompress.mask.nxv4i16( +declare <vscale x 4 x i16> @llvm.riscv.vcompress.nxv4i16( <vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i1>, i64); -define <vscale x 4 x i16> @intrinsic_vcompress_mask_vm_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vcompress_mask_vm_nxv4i16_nxv4i16: +define <vscale x 4 x i16> @intrinsic_vcompress_vm_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vcompress_vm_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu ; CHECK-NEXT: vcompress.vm v16, v17, v0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call <vscale x 4 x i16> @llvm.riscv.vcompress.mask.nxv4i16( + %a = call <vscale x 4 x i16> @llvm.riscv.vcompress.nxv4i16( <vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, @@ -223,20 +223,20 @@ entry: ret <vscale x 4 x i16> %a } -declare <vscale x 8 x i16> @llvm.riscv.vcompress.mask.nxv8i16( +declare <vscale x 8 x i16> @llvm.riscv.vcompress.nxv8i16( <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i1>, i64); -define <vscale x 8 x i16> @intrinsic_vcompress_mask_vm_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vcompress_mask_vm_nxv8i16_nxv8i16: +define <vscale x 8 x i16> @intrinsic_vcompress_vm_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vcompress_vm_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu ; CHECK-NEXT: vcompress.vm v16, v18, v0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call <vscale x 8 x i16> @llvm.riscv.vcompress.mask.nxv8i16( + %a = call <vscale x 8 x i16> @llvm.riscv.vcompress.nxv8i16( <vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, @@ -245,20 +245,20 @@ entry: ret <vscale x 8 x i16> %a } -declare <vscale x 16 x i16> @llvm.riscv.vcompress.mask.nxv16i16( +declare <vscale x 16 x i16> @llvm.riscv.vcompress.nxv16i16( <vscale x 16 x i16>, <vscale x 16 x i16>, <vscale x 16 x i1>, i64); -define <vscale x 16 x i16> @intrinsic_vcompress_mask_vm_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vcompress_mask_vm_nxv16i16_nxv16i16: +define <vscale x 16 x i16> @intrinsic_vcompress_vm_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vcompress_vm_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,m4,tu,mu ; CHECK-NEXT: vcompress.vm v16, v20, v0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call <vscale x 16 x i16> @llvm.riscv.vcompress.mask.nxv16i16( + %a = call <vscale x 16 x i16> @llvm.riscv.vcompress.nxv16i16( <vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, @@ -267,14 +267,14 @@ entry: ret <vscale x 16 x i16> %a } -declare <vscale x 32 x i16> @llvm.riscv.vcompress.mask.nxv32i16( +declare <vscale x 32 x i16> @llvm.riscv.vcompress.nxv32i16( <vscale x 32 x i16>, <vscale x 32 x i16>, <vscale x 32 x i1>, i64); -define <vscale x 32 x i16> @intrinsic_vcompress_mask_vm_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vcompress_mask_vm_nxv32i16_nxv32i16: +define <vscale x 32 x i16> @intrinsic_vcompress_vm_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vcompress_vm_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu ; CHECK-NEXT: vle16.v v8, (a0) @@ -282,7 +282,7 @@ define <vscale x 32 x i16> @intrinsic_vcompress_mask_vm_nxv32i16_nxv32i16(<vscal ; CHECK-NEXT: vcompress.vm v16, v8, v0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call <vscale x 32 x i16> @llvm.riscv.vcompress.mask.nxv32i16( + %a = call <vscale x 32 x i16> @llvm.riscv.vcompress.nxv32i16( <vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, @@ -291,20 +291,20 @@ entry: ret <vscale x 32 x i16> %a } -declare <vscale x 1 x i32> @llvm.riscv.vcompress.mask.nxv1i32( +declare <vscale x 1 x i32> @llvm.riscv.vcompress.nxv1i32( <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i1>, i64); -define <vscale x 1 x i32> @intrinsic_vcompress_mask_vm_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vcompress_mask_vm_nxv1i32_nxv1i32: +define <vscale x 1 x i32> @intrinsic_vcompress_vm_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vcompress_vm_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu ; CHECK-NEXT: vcompress.vm v16, v17, v0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call <vscale x 1 x i32> @llvm.riscv.vcompress.mask.nxv1i32( + %a = call <vscale x 1 x i32> @llvm.riscv.vcompress.nxv1i32( <vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, @@ -313,20 +313,20 @@ entry: ret <vscale x 1 x i32> %a } -declare <vscale x 2 x i32> @llvm.riscv.vcompress.mask.nxv2i32( +declare <vscale x 2 x i32> @llvm.riscv.vcompress.nxv2i32( <vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i1>, i64); -define <vscale x 2 x i32> @intrinsic_vcompress_mask_vm_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vcompress_mask_vm_nxv2i32_nxv2i32: +define <vscale x 2 x i32> @intrinsic_vcompress_vm_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vcompress_vm_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu ; CHECK-NEXT: vcompress.vm v16, v17, v0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call <vscale x 2 x i32> @llvm.riscv.vcompress.mask.nxv2i32( + %a = call <vscale x 2 x i32> @llvm.riscv.vcompress.nxv2i32( <vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, @@ -335,20 +335,20 @@ entry: ret <vscale x 2 x i32> %a } -declare <vscale x 4 x i32> @llvm.riscv.vcompress.mask.nxv4i32( +declare <vscale x 4 x i32> @llvm.riscv.vcompress.nxv4i32( <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i1>, i64); -define <vscale x 4 x i32> @intrinsic_vcompress_mask_vm_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vcompress_mask_vm_nxv4i32_nxv4i32: +define <vscale x 4 x i32> @intrinsic_vcompress_vm_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vcompress_vm_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu ; CHECK-NEXT: vcompress.vm v16, v18, v0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call <vscale x 4 x i32> @llvm.riscv.vcompress.mask.nxv4i32( + %a = call <vscale x 4 x i32> @llvm.riscv.vcompress.nxv4i32( <vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, @@ -357,20 +357,20 @@ entry: ret <vscale x 4 x i32> %a } -declare <vscale x 8 x i32> @llvm.riscv.vcompress.mask.nxv8i32( +declare <vscale x 8 x i32> @llvm.riscv.vcompress.nxv8i32( <vscale x 8 x i32>, <vscale x 8 x i32>, <vscale x 8 x i1>, i64); -define <vscale x 8 x i32> @intrinsic_vcompress_mask_vm_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vcompress_mask_vm_nxv8i32_nxv8i32: +define <vscale x 8 x i32> @intrinsic_vcompress_vm_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vcompress_vm_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e32,m4,tu,mu ; CHECK-NEXT: vcompress.vm v16, v20, v0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call <vscale x 8 x i32> @llvm.riscv.vcompress.mask.nxv8i32( + %a = call <vscale x 8 x i32> @llvm.riscv.vcompress.nxv8i32( <vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, @@ -379,14 +379,14 @@ entry: ret <vscale x 8 x i32> %a } -declare <vscale x 16 x i32> @llvm.riscv.vcompress.mask.nxv16i32( +declare <vscale x 16 x i32> @llvm.riscv.vcompress.nxv16i32( <vscale x 16 x i32>, <vscale x 16 x i32>, <vscale x 16 x i1>, i64); -define <vscale x 16 x i32> @intrinsic_vcompress_mask_vm_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vcompress_mask_vm_nxv16i32_nxv16i32: +define <vscale x 16 x i32> @intrinsic_vcompress_vm_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vcompress_vm_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu ; CHECK-NEXT: vle32.v v8, (a0) @@ -394,7 +394,7 @@ define <vscale x 16 x i32> @intrinsic_vcompress_mask_vm_nxv16i32_nxv16i32(<vscal ; CHECK-NEXT: vcompress.vm v16, v8, v0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call <vscale x 16 x i32> @llvm.riscv.vcompress.mask.nxv16i32( + %a = call <vscale x 16 x i32> @llvm.riscv.vcompress.nxv16i32( <vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, @@ -403,20 +403,20 @@ entry: ret <vscale x 16 x i32> %a } -declare <vscale x 1 x i64> @llvm.riscv.vcompress.mask.nxv1i64( +declare <vscale x 1 x i64> @llvm.riscv.vcompress.nxv1i64( <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, i64); -define <vscale x 1 x i64> @intrinsic_vcompress_mask_vm_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vcompress_mask_vm_nxv1i64_nxv1i64: +define <vscale x 1 x i64> @intrinsic_vcompress_vm_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vcompress_vm_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu ; CHECK-NEXT: vcompress.vm v16, v17, v0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call <vscale x 1 x i64> @llvm.riscv.vcompress.mask.nxv1i64( + %a = call <vscale x 1 x i64> @llvm.riscv.vcompress.nxv1i64( <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, @@ -425,20 +425,20 @@ entry: ret <vscale x 1 x i64> %a } -declare <vscale x 2 x i64> @llvm.riscv.vcompress.mask.nxv2i64( +declare <vscale x 2 x i64> @llvm.riscv.vcompress.nxv2i64( <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i1>, i64); -define <vscale x 2 x i64> @intrinsic_vcompress_mask_vm_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vcompress_mask_vm_nxv2i64_nxv2i64: +define <vscale x 2 x i64> @intrinsic_vcompress_vm_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vcompress_vm_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu ; CHECK-NEXT: vcompress.vm v16, v18, v0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call <vscale x 2 x i64> @llvm.riscv.vcompress.mask.nxv2i64( + %a = call <vscale x 2 x i64> @llvm.riscv.vcompress.nxv2i64( <vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, @@ -447,20 +447,20 @@ entry: ret <vscale x 2 x i64> %a } -declare <vscale x 4 x i64> @llvm.riscv.vcompress.mask.nxv4i64( +declare <vscale x 4 x i64> @llvm.riscv.vcompress.nxv4i64( <vscale x 4 x i64>, <vscale x 4 x i64>, <vscale x 4 x i1>, i64); -define <vscale x 4 x i64> @intrinsic_vcompress_mask_vm_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vcompress_mask_vm_nxv4i64_nxv4i64: +define <vscale x 4 x i64> @intrinsic_vcompress_vm_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vcompress_vm_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu ; CHECK-NEXT: vcompress.vm v16, v20, v0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call <vscale x 4 x i64> @llvm.riscv.vcompress.mask.nxv4i64( + %a = call <vscale x 4 x i64> @llvm.riscv.vcompress.nxv4i64( <vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, @@ -469,14 +469,14 @@ entry: ret <vscale x 4 x i64> %a } -declare <vscale x 8 x i64> @llvm.riscv.vcompress.mask.nxv8i64( +declare <vscale x 8 x i64> @llvm.riscv.vcompress.nxv8i64( <vscale x 8 x i64>, <vscale x 8 x i64>, <vscale x 8 x i1>, i64); -define <vscale x 8 x i64> @intrinsic_vcompress_mask_vm_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vcompress_mask_vm_nxv8i64_nxv8i64: +define <vscale x 8 x i64> @intrinsic_vcompress_vm_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vcompress_vm_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu ; CHECK-NEXT: vle64.v v8, (a0) @@ -484,7 +484,7 @@ define <vscale x 8 x i64> @intrinsic_vcompress_mask_vm_nxv8i64_nxv8i64(<vscale x ; CHECK-NEXT: vcompress.vm v16, v8, v0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call <vscale x 8 x i64> @llvm.riscv.vcompress.mask.nxv8i64( + %a = call <vscale x 8 x i64> @llvm.riscv.vcompress.nxv8i64( <vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, @@ -493,20 +493,20 @@ entry: ret <vscale x 8 x i64> %a } -declare <vscale x 1 x half> @llvm.riscv.vcompress.mask.nxv1f16( +declare <vscale x 1 x half> @llvm.riscv.vcompress.nxv1f16( <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x i1>, i64); -define <vscale x 1 x half> @intrinsic_vcompress_mask_vm_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vcompress_mask_vm_nxv1f16_nxv1f16: +define <vscale x 1 x half> @intrinsic_vcompress_vm_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vcompress_vm_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu ; CHECK-NEXT: vcompress.vm v16, v17, v0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call <vscale x 1 x half> @llvm.riscv.vcompress.mask.nxv1f16( + %a = call <vscale x 1 x half> @llvm.riscv.vcompress.nxv1f16( <vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, @@ -515,20 +515,20 @@ entry: ret <vscale x 1 x half> %a } -declare <vscale x 2 x half> @llvm.riscv.vcompress.mask.nxv2f16( +declare <vscale x 2 x half> @llvm.riscv.vcompress.nxv2f16( <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x i1>, i64); -define <vscale x 2 x half> @intrinsic_vcompress_mask_vm_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x i1> %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vcompress_mask_vm_nxv2f16_nxv2f16: +define <vscale x 2 x half> @intrinsic_vcompress_vm_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x i1> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vcompress_vm_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu ; CHECK-NEXT: vcompress.vm v16, v17, v0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call <vscale x 2 x half> @llvm.riscv.vcompress.mask.nxv2f16( + %a = call <vscale x 2 x half> @llvm.riscv.vcompress.nxv2f16( <vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x i1> %2, @@ -537,20 +537,20 @@ entry: ret <vscale x 2 x half> %a } -declare <vscale x 4 x half> @llvm.riscv.vcompress.mask.nxv4f16( +declare <vscale x 4 x half> @llvm.riscv.vcompress.nxv4f16( <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x i1>, i64); -define <vscale x 4 x half> @intrinsic_vcompress_mask_vm_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x i1> %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vcompress_mask_vm_nxv4f16_nxv4f16: +define <vscale x 4 x half> @intrinsic_vcompress_vm_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x i1> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vcompress_vm_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu ; CHECK-NEXT: vcompress.vm v16, v17, v0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call <vscale x 4 x half> @llvm.riscv.vcompress.mask.nxv4f16( + %a = call <vscale x 4 x half> @llvm.riscv.vcompress.nxv4f16( <vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x i1> %2, @@ -559,20 +559,20 @@ entry: ret <vscale x 4 x half> %a } -declare <vscale x 8 x half> @llvm.riscv.vcompress.mask.nxv8f16( +declare <vscale x 8 x half> @llvm.riscv.vcompress.nxv8f16( <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x i1>, i64); -define <vscale x 8 x half> @intrinsic_vcompress_mask_vm_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x i1> %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vcompress_mask_vm_nxv8f16_nxv8f16: +define <vscale x 8 x half> @intrinsic_vcompress_vm_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x i1> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vcompress_vm_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu ; CHECK-NEXT: vcompress.vm v16, v18, v0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call <vscale x 8 x half> @llvm.riscv.vcompress.mask.nxv8f16( + %a = call <vscale x 8 x half> @llvm.riscv.vcompress.nxv8f16( <vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x i1> %2, @@ -581,20 +581,20 @@ entry: ret <vscale x 8 x half> %a } -declare <vscale x 16 x half> @llvm.riscv.vcompress.mask.nxv16f16( +declare <vscale x 16 x half> @llvm.riscv.vcompress.nxv16f16( <vscale x 16 x half>, <vscale x 16 x half>, <vscale x 16 x i1>, i64); -define <vscale x 16 x half> @intrinsic_vcompress_mask_vm_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x i1> %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vcompress_mask_vm_nxv16f16_nxv16f16: +define <vscale x 16 x half> @intrinsic_vcompress_vm_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x i1> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vcompress_vm_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e16,m4,tu,mu ; CHECK-NEXT: vcompress.vm v16, v20, v0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call <vscale x 16 x half> @llvm.riscv.vcompress.mask.nxv16f16( + %a = call <vscale x 16 x half> @llvm.riscv.vcompress.nxv16f16( <vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x i1> %2, @@ -603,14 +603,14 @@ entry: ret <vscale x 16 x half> %a } -declare <vscale x 32 x half> @llvm.riscv.vcompress.mask.nxv32f16( +declare <vscale x 32 x half> @llvm.riscv.vcompress.nxv32f16( <vscale x 32 x half>, <vscale x 32 x half>, <vscale x 32 x i1>, i64); -define <vscale x 32 x half> @intrinsic_vcompress_mask_vm_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x i1> %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vcompress_mask_vm_nxv32f16_nxv32f16: +define <vscale x 32 x half> @intrinsic_vcompress_vm_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x i1> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vcompress_vm_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu ; CHECK-NEXT: vle16.v v8, (a0) @@ -618,7 +618,7 @@ define <vscale x 32 x half> @intrinsic_vcompress_mask_vm_nxv32f16_nxv32f16(<vsca ; CHECK-NEXT: vcompress.vm v16, v8, v0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call <vscale x 32 x half> @llvm.riscv.vcompress.mask.nxv32f16( + %a = call <vscale x 32 x half> @llvm.riscv.vcompress.nxv32f16( <vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x i1> %2, @@ -627,20 +627,20 @@ entry: ret <vscale x 32 x half> %a } -declare <vscale x 1 x float> @llvm.riscv.vcompress.mask.nxv1f32( +declare <vscale x 1 x float> @llvm.riscv.vcompress.nxv1f32( <vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x i1>, i64); -define <vscale x 1 x float> @intrinsic_vcompress_mask_vm_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vcompress_mask_vm_nxv1f32_nxv1f32: +define <vscale x 1 x float> @intrinsic_vcompress_vm_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vcompress_vm_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu ; CHECK-NEXT: vcompress.vm v16, v17, v0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call <vscale x 1 x float> @llvm.riscv.vcompress.mask.nxv1f32( + %a = call <vscale x 1 x float> @llvm.riscv.vcompress.nxv1f32( <vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, @@ -649,20 +649,20 @@ entry: ret <vscale x 1 x float> %a } -declare <vscale x 2 x float> @llvm.riscv.vcompress.mask.nxv2f32( +declare <vscale x 2 x float> @llvm.riscv.vcompress.nxv2f32( <vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x i1>, i64); -define <vscale x 2 x float> @intrinsic_vcompress_mask_vm_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vcompress_mask_vm_nxv2f32_nxv2f32: +define <vscale x 2 x float> @intrinsic_vcompress_vm_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vcompress_vm_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu ; CHECK-NEXT: vcompress.vm v16, v17, v0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call <vscale x 2 x float> @llvm.riscv.vcompress.mask.nxv2f32( + %a = call <vscale x 2 x float> @llvm.riscv.vcompress.nxv2f32( <vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, @@ -671,20 +671,20 @@ entry: ret <vscale x 2 x float> %a } -declare <vscale x 4 x float> @llvm.riscv.vcompress.mask.nxv4f32( +declare <vscale x 4 x float> @llvm.riscv.vcompress.nxv4f32( <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x i1>, i64); -define <vscale x 4 x float> @intrinsic_vcompress_mask_vm_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vcompress_mask_vm_nxv4f32_nxv4f32: +define <vscale x 4 x float> @intrinsic_vcompress_vm_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vcompress_vm_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu ; CHECK-NEXT: vcompress.vm v16, v18, v0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call <vscale x 4 x float> @llvm.riscv.vcompress.mask.nxv4f32( + %a = call <vscale x 4 x float> @llvm.riscv.vcompress.nxv4f32( <vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, @@ -693,20 +693,20 @@ entry: ret <vscale x 4 x float> %a } -declare <vscale x 8 x float> @llvm.riscv.vcompress.mask.nxv8f32( +declare <vscale x 8 x float> @llvm.riscv.vcompress.nxv8f32( <vscale x 8 x float>, <vscale x 8 x float>, <vscale x 8 x i1>, i64); -define <vscale x 8 x float> @intrinsic_vcompress_mask_vm_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vcompress_mask_vm_nxv8f32_nxv8f32: +define <vscale x 8 x float> @intrinsic_vcompress_vm_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vcompress_vm_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e32,m4,tu,mu ; CHECK-NEXT: vcompress.vm v16, v20, v0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call <vscale x 8 x float> @llvm.riscv.vcompress.mask.nxv8f32( + %a = call <vscale x 8 x float> @llvm.riscv.vcompress.nxv8f32( <vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, @@ -715,14 +715,14 @@ entry: ret <vscale x 8 x float> %a } -declare <vscale x 16 x float> @llvm.riscv.vcompress.mask.nxv16f32( +declare <vscale x 16 x float> @llvm.riscv.vcompress.nxv16f32( <vscale x 16 x float>, <vscale x 16 x float>, <vscale x 16 x i1>, i64); -define <vscale x 16 x float> @intrinsic_vcompress_mask_vm_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x i1> %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vcompress_mask_vm_nxv16f32_nxv16f32: +define <vscale x 16 x float> @intrinsic_vcompress_vm_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x i1> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vcompress_vm_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu ; CHECK-NEXT: vle32.v v8, (a0) @@ -730,7 +730,7 @@ define <vscale x 16 x float> @intrinsic_vcompress_mask_vm_nxv16f32_nxv16f32(<vsc ; CHECK-NEXT: vcompress.vm v16, v8, v0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call <vscale x 16 x float> @llvm.riscv.vcompress.mask.nxv16f32( + %a = call <vscale x 16 x float> @llvm.riscv.vcompress.nxv16f32( <vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x i1> %2, @@ -739,20 +739,20 @@ entry: ret <vscale x 16 x float> %a } -declare <vscale x 1 x double> @llvm.riscv.vcompress.mask.nxv1f64( +declare <vscale x 1 x double> @llvm.riscv.vcompress.nxv1f64( <vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x i1>, i64); -define <vscale x 1 x double> @intrinsic_vcompress_mask_vm_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x i1> %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vcompress_mask_vm_nxv1f64_nxv1f64: +define <vscale x 1 x double> @intrinsic_vcompress_vm_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x i1> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vcompress_vm_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu ; CHECK-NEXT: vcompress.vm v16, v17, v0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call <vscale x 1 x double> @llvm.riscv.vcompress.mask.nxv1f64( + %a = call <vscale x 1 x double> @llvm.riscv.vcompress.nxv1f64( <vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x i1> %2, @@ -761,20 +761,20 @@ entry: ret <vscale x 1 x double> %a } -declare <vscale x 2 x double> @llvm.riscv.vcompress.mask.nxv2f64( +declare <vscale x 2 x double> @llvm.riscv.vcompress.nxv2f64( <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x i1>, i64); -define <vscale x 2 x double> @intrinsic_vcompress_mask_vm_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x i1> %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vcompress_mask_vm_nxv2f64_nxv2f64: +define <vscale x 2 x double> @intrinsic_vcompress_vm_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x i1> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vcompress_vm_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu ; CHECK-NEXT: vcompress.vm v16, v18, v0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call <vscale x 2 x double> @llvm.riscv.vcompress.mask.nxv2f64( + %a = call <vscale x 2 x double> @llvm.riscv.vcompress.nxv2f64( <vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x i1> %2, @@ -783,20 +783,20 @@ entry: ret <vscale x 2 x double> %a } -declare <vscale x 4 x double> @llvm.riscv.vcompress.mask.nxv4f64( +declare <vscale x 4 x double> @llvm.riscv.vcompress.nxv4f64( <vscale x 4 x double>, <vscale x 4 x double>, <vscale x 4 x i1>, i64); -define <vscale x 4 x double> @intrinsic_vcompress_mask_vm_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x i1> %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vcompress_mask_vm_nxv4f64_nxv4f64: +define <vscale x 4 x double> @intrinsic_vcompress_vm_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x i1> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vcompress_vm_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu ; CHECK-NEXT: vcompress.vm v16, v20, v0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call <vscale x 4 x double> @llvm.riscv.vcompress.mask.nxv4f64( + %a = call <vscale x 4 x double> @llvm.riscv.vcompress.nxv4f64( <vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x i1> %2, @@ -805,14 +805,14 @@ entry: ret <vscale x 4 x double> %a } -declare <vscale x 8 x double> @llvm.riscv.vcompress.mask.nxv8f64( +declare <vscale x 8 x double> @llvm.riscv.vcompress.nxv8f64( <vscale x 8 x double>, <vscale x 8 x double>, <vscale x 8 x i1>, i64); -define <vscale x 8 x double> @intrinsic_vcompress_mask_vm_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x i1> %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vcompress_mask_vm_nxv8f64_nxv8f64: +define <vscale x 8 x double> @intrinsic_vcompress_vm_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x i1> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vcompress_vm_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu ; CHECK-NEXT: vle64.v v8, (a0) @@ -820,7 +820,7 @@ define <vscale x 8 x double> @intrinsic_vcompress_mask_vm_nxv8f64_nxv8f64(<vscal ; CHECK-NEXT: vcompress.vm v16, v8, v0 ; CHECK-NEXT: jalr zero, 0(ra) entry: - %a = call <vscale x 8 x double> @llvm.riscv.vcompress.mask.nxv8f64( + %a = call <vscale x 8 x double> @llvm.riscv.vcompress.nxv8f64( <vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x i1> %2, _______________________________________________ llvm-branch-commits mailing list llvm-branch-commits@lists.llvm.org https://lists.llvm.org/cgi-bin/mailman/listinfo/llvm-branch-commits