Author: Craig Topper
Date: 2020-12-10T19:48:03-08:00
New Revision: b90e2d850e780d290b554963db1cd264625a73a4

URL: 
https://github.com/llvm/llvm-project/commit/b90e2d850e780d290b554963db1cd264625a73a4
DIFF: 
https://github.com/llvm/llvm-project/commit/b90e2d850e780d290b554963db1cd264625a73a4.diff

LOG: [RISCV] Use tail agnostic policy for vsetvli instruction emitted in the 
custom inserter

The compiler is making no effort to preserve upper elements. To do so would 
require another source operand tied with the destination and a different 
intrinsic interface to give control of this source to the programmer.

This patch changes the tail policy to agnostic so that the CPU doesn't need to 
make an effort to preserve them.

This is consistent with the RVV intrinsic spec here 
https://github.com/riscv/rvv-intrinsic-doc/blob/master/rvv-intrinsic-rfc.md#configuration-setting

Differential Revision: https://reviews.llvm.org/D93080

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/RISCVISelLowering.cpp
    llvm/test/CodeGen/RISCV/rvv/add-vsetvli-gpr.mir
    llvm/test/CodeGen/RISCV/rvv/add-vsetvli-vlmax.ll
    llvm/test/CodeGen/RISCV/rvv/load-add-store-16.ll
    llvm/test/CodeGen/RISCV/rvv/load-add-store-32.ll
    llvm/test/CodeGen/RISCV/rvv/load-add-store-64.ll
    llvm/test/CodeGen/RISCV/rvv/load-add-store-8.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp 
b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 78909b2f4039..d7496b3ac7c9 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -1952,7 +1952,7 @@ static MachineBasicBlock *addVSetVL(MachineInstr &MI, 
MachineBasicBlock *BB,
 
   // For simplicity we reuse the vtype representation here.
   MIB.addImm(RISCVVType::encodeVTYPE(Multiplier, ElementWidth,
-                                     /*TailAgnostic*/ false,
+                                     /*TailAgnostic*/ true,
                                      /*MaskAgnostic*/ false));
 
   // Remove (now) redundant operands from pseudo

diff  --git a/llvm/test/CodeGen/RISCV/rvv/add-vsetvli-gpr.mir 
b/llvm/test/CodeGen/RISCV/rvv/add-vsetvli-gpr.mir
index bb7228871fc9..b2b07343be2f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/add-vsetvli-gpr.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/add-vsetvli-gpr.mir
@@ -40,23 +40,23 @@ body:             |
 
 # POST-INSERTER: %0:gpr = COPY $x13
 # POST-INSERTER: %4:vr = IMPLICIT_DEF
-# POST-INSERTER: dead %10:gpr = PseudoVSETVLI %0, 12, implicit-def $vl, 
implicit-def $vtype
+# POST-INSERTER: dead %10:gpr = PseudoVSETVLI %0, 76, implicit-def $vl, 
implicit-def $vtype
 # POST-INSERTER: %5:vr = PseudoVLE64_V_M1 %4, %2, $noreg, $noreg, -1, implicit 
$vl, implicit $vtype :: (load unknown-size from %ir.pa, align 8)
 # POST-INSERTER: %6:vr = IMPLICIT_DEF
-# POST-INSERTER: dead %11:gpr = PseudoVSETVLI %0, 12, implicit-def $vl, 
implicit-def $vtype
+# POST-INSERTER: dead %11:gpr = PseudoVSETVLI %0, 76, implicit-def $vl, 
implicit-def $vtype
 # POST-INSERTER: %7:vr = PseudoVLE64_V_M1 %6, %1, $noreg, $noreg, -1, implicit 
$vl, implicit $vtype :: (load unknown-size from %ir.pb, align 8)
 # POST-INSERTER: %8:vr = IMPLICIT_DEF
-# POST-INSERTER: dead %12:gpr = PseudoVSETVLI %0, 12, implicit-def $vl, 
implicit-def $vtype
+# POST-INSERTER: dead %12:gpr = PseudoVSETVLI %0, 76, implicit-def $vl, 
implicit-def $vtype
 # POST-INSERTER: %9:vr = PseudoVADD_VV_M1 %8, killed %5, killed %7, $noreg, 
$noreg, -1, implicit $vl, implicit $vtype
-# POST-INSERTER: dead %13:gpr = PseudoVSETVLI %0, 12, implicit-def $vl, 
implicit-def $vtype
+# POST-INSERTER: dead %13:gpr = PseudoVSETVLI %0, 76, implicit-def $vl, 
implicit-def $vtype
 # POST-INSERTER: PseudoVSE64_V_M1 killed %9, %3, $noreg, $noreg, -1, implicit 
$vl, implicit $vtype :: (store unknown-size into %ir.pc, align 8)
 
-# CODEGEN: vsetvli     a4, a3, e64,m1,tu,mu
+# CODEGEN: vsetvli     a4, a3, e64,m1,ta,mu
 # CODEGEN-NEXT: vle64.v        v25, (a1)
-# CODEGEN-NEXT: vsetvli        a1, a3, e64,m1,tu,mu
+# CODEGEN-NEXT: vsetvli        a1, a3, e64,m1,ta,mu
 # CODEGEN-NEXT: vle64.v        v26, (a2)
-# CODEGEN-NEXT: vsetvli        a1, a3, e64,m1,tu,mu
+# CODEGEN-NEXT: vsetvli        a1, a3, e64,m1,ta,mu
 # CODEGEN-NEXT: vadd.vv        v25, v25, v26
-# CODEGEN-NEXT: vsetvli        a1, a3, e64,m1,tu,mu
+# CODEGEN-NEXT: vsetvli        a1, a3, e64,m1,ta,mu
 # CODEGEN-NEXT: vse64.v        v25, (a0)
 # CODEGEN-NEXT: ret

diff  --git a/llvm/test/CodeGen/RISCV/rvv/add-vsetvli-vlmax.ll 
b/llvm/test/CodeGen/RISCV/rvv/add-vsetvli-vlmax.ll
index 1ac50da0858c..d88d354f6302 100644
--- a/llvm/test/CodeGen/RISCV/rvv/add-vsetvli-vlmax.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/add-vsetvli-vlmax.ll
@@ -29,13 +29,13 @@ define void @vadd_vint64m1(
 ; PRE-INSERTER: PseudoVSE64_V_M1 killed %7, %0, $noreg, $x0, 64, implicit $vl, 
implicit $vtype :: (store unknown-size into %ir.pc, align 8)
 
 ; POST-INSERTER: %4:vr = IMPLICIT_DEF
-; POST-INSERTER: dead %9:gpr = PseudoVSETVLI $x0, 12, implicit-def $vl, 
implicit-def $vtype
+; POST-INSERTER: dead %9:gpr = PseudoVSETVLI $x0, 76, implicit-def $vl, 
implicit-def $vtype
 ; POST-INSERTER: %3:vr = PseudoVLE64_V_M1 %4, %1, $noreg, $noreg, -1, implicit 
$vl, implicit $vtype :: (load unknown-size from %ir.pa, align 8)
 ; POST-INSERTER: %6:vr = IMPLICIT_DEF
-; POST-INSERTER: dead %10:gpr = PseudoVSETVLI $x0, 12, implicit-def $vl, 
implicit-def $vtype
+; POST-INSERTER: dead %10:gpr = PseudoVSETVLI $x0, 76, implicit-def $vl, 
implicit-def $vtype
 ; POST-INSERTER: %5:vr = PseudoVLE64_V_M1 %6, %2, $noreg, $noreg, -1, implicit 
$vl, implicit $vtype :: (load unknown-size from %ir.pb, align 8)
 ; POST-INSERTER: %8:vr = IMPLICIT_DEF
-; POST-INSERTER: dead %11:gpr = PseudoVSETVLI $x0, 12, implicit-def $vl, 
implicit-def $vtype
+; POST-INSERTER: dead %11:gpr = PseudoVSETVLI $x0, 76, implicit-def $vl, 
implicit-def $vtype
 ; POST-INSERTER: %7:vr = PseudoVADD_VV_M1 %8, killed %3, killed %5, $noreg, 
$noreg, -1, implicit $vl, implicit $vtype
-; POST-INSERTER: dead %12:gpr = PseudoVSETVLI $x0, 12, implicit-def $vl, 
implicit-def $vtype
+; POST-INSERTER: dead %12:gpr = PseudoVSETVLI $x0, 76, implicit-def $vl, 
implicit-def $vtype
 ; POST-INSERTER: PseudoVSE64_V_M1 killed %7, %0, $noreg, $noreg, -1, implicit 
$vl, implicit $vtype :: (store unknown-size into %ir.pc, align 8)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/load-add-store-16.ll 
b/llvm/test/CodeGen/RISCV/rvv/load-add-store-16.ll
index 68b0b4e18530..52d70831c6a9 100644
--- a/llvm/test/CodeGen/RISCV/rvv/load-add-store-16.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/load-add-store-16.ll
@@ -7,13 +7,13 @@
 define void @vadd_vint16m1(<vscale x 4 x i16> *%pc, <vscale x 4 x i16> *%pa, 
<vscale x 4 x i16> *%pb) nounwind {
 ; CHECK-LABEL: vadd_vint16m1:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a3, zero, e16,m1,tu,mu
+; CHECK-NEXT:    vsetvli a3, zero, e16,m1,ta,mu
 ; CHECK-NEXT:    vle16.v v25, (a1)
-; CHECK-NEXT:    vsetvli a1, zero, e16,m1,tu,mu
+; CHECK-NEXT:    vsetvli a1, zero, e16,m1,ta,mu
 ; CHECK-NEXT:    vle16.v v26, (a2)
-; CHECK-NEXT:    vsetvli a1, zero, e16,m1,tu,mu
+; CHECK-NEXT:    vsetvli a1, zero, e16,m1,ta,mu
 ; CHECK-NEXT:    vadd.vv v25, v25, v26
-; CHECK-NEXT:    vsetvli a1, zero, e16,m1,tu,mu
+; CHECK-NEXT:    vsetvli a1, zero, e16,m1,ta,mu
 ; CHECK-NEXT:    vse16.v v25, (a0)
 ; CHECK-NEXT:    ret
   %va = load <vscale x 4 x i16>, <vscale x 4 x i16>* %pa
@@ -26,13 +26,13 @@ define void @vadd_vint16m1(<vscale x 4 x i16> *%pc, <vscale 
x 4 x i16> *%pa, <vs
 define void @vadd_vint16m2(<vscale x 8 x i16> *%pc, <vscale x 8 x i16> *%pa, 
<vscale x 8 x i16> *%pb) nounwind {
 ; CHECK-LABEL: vadd_vint16m2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a3, zero, e16,m2,tu,mu
+; CHECK-NEXT:    vsetvli a3, zero, e16,m2,ta,mu
 ; CHECK-NEXT:    vle16.v v26, (a1)
-; CHECK-NEXT:    vsetvli a1, zero, e16,m2,tu,mu
+; CHECK-NEXT:    vsetvli a1, zero, e16,m2,ta,mu
 ; CHECK-NEXT:    vle16.v v28, (a2)
-; CHECK-NEXT:    vsetvli a1, zero, e16,m2,tu,mu
+; CHECK-NEXT:    vsetvli a1, zero, e16,m2,ta,mu
 ; CHECK-NEXT:    vadd.vv v26, v26, v28
-; CHECK-NEXT:    vsetvli a1, zero, e16,m2,tu,mu
+; CHECK-NEXT:    vsetvli a1, zero, e16,m2,ta,mu
 ; CHECK-NEXT:    vse16.v v26, (a0)
 ; CHECK-NEXT:    ret
   %va = load <vscale x 8 x i16>, <vscale x 8 x i16>* %pa
@@ -45,13 +45,13 @@ define void @vadd_vint16m2(<vscale x 8 x i16> *%pc, <vscale 
x 8 x i16> *%pa, <vs
 define void @vadd_vint16m4(<vscale x 16 x i16> *%pc, <vscale x 16 x i16> *%pa, 
<vscale x 16 x i16> *%pb) nounwind {
 ; CHECK-LABEL: vadd_vint16m4:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a3, zero, e16,m4,tu,mu
+; CHECK-NEXT:    vsetvli a3, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v28, (a1)
-; CHECK-NEXT:    vsetvli a1, zero, e16,m4,tu,mu
+; CHECK-NEXT:    vsetvli a1, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v8, (a2)
-; CHECK-NEXT:    vsetvli a1, zero, e16,m4,tu,mu
+; CHECK-NEXT:    vsetvli a1, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vadd.vv v28, v28, v8
-; CHECK-NEXT:    vsetvli a1, zero, e16,m4,tu,mu
+; CHECK-NEXT:    vsetvli a1, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vse16.v v28, (a0)
 ; CHECK-NEXT:    ret
   %va = load <vscale x 16 x i16>, <vscale x 16 x i16>* %pa
@@ -64,13 +64,13 @@ define void @vadd_vint16m4(<vscale x 16 x i16> *%pc, 
<vscale x 16 x i16> *%pa, <
 define void @vadd_vint16m8(<vscale x 32 x i16> *%pc, <vscale x 32 x i16> *%pa, 
<vscale x 32 x i16> *%pb) nounwind {
 ; CHECK-LABEL: vadd_vint16m8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a3, zero, e16,m8,tu,mu
+; CHECK-NEXT:    vsetvli a3, zero, e16,m8,ta,mu
 ; CHECK-NEXT:    vle16.v v8, (a1)
-; CHECK-NEXT:    vsetvli a1, zero, e16,m8,tu,mu
+; CHECK-NEXT:    vsetvli a1, zero, e16,m8,ta,mu
 ; CHECK-NEXT:    vle16.v v16, (a2)
-; CHECK-NEXT:    vsetvli a1, zero, e16,m8,tu,mu
+; CHECK-NEXT:    vsetvli a1, zero, e16,m8,ta,mu
 ; CHECK-NEXT:    vadd.vv v8, v8, v16
-; CHECK-NEXT:    vsetvli a1, zero, e16,m8,tu,mu
+; CHECK-NEXT:    vsetvli a1, zero, e16,m8,ta,mu
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
   %va = load <vscale x 32 x i16>, <vscale x 32 x i16>* %pa
@@ -83,13 +83,13 @@ define void @vadd_vint16m8(<vscale x 32 x i16> *%pc, 
<vscale x 32 x i16> *%pa, <
 define void @vadd_vint16mf2(<vscale x 2 x i16> *%pc, <vscale x 2 x i16> *%pa, 
<vscale x 2 x i16> *%pb) nounwind {
 ; CHECK-LABEL: vadd_vint16mf2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a3, zero, e16,mf2,tu,mu
+; CHECK-NEXT:    vsetvli a3, zero, e16,mf2,ta,mu
 ; CHECK-NEXT:    vle16.v v25, (a1)
-; CHECK-NEXT:    vsetvli a1, zero, e16,mf2,tu,mu
+; CHECK-NEXT:    vsetvli a1, zero, e16,mf2,ta,mu
 ; CHECK-NEXT:    vle16.v v26, (a2)
-; CHECK-NEXT:    vsetvli a1, zero, e16,mf2,tu,mu
+; CHECK-NEXT:    vsetvli a1, zero, e16,mf2,ta,mu
 ; CHECK-NEXT:    vadd.vv v25, v25, v26
-; CHECK-NEXT:    vsetvli a1, zero, e16,mf2,tu,mu
+; CHECK-NEXT:    vsetvli a1, zero, e16,mf2,ta,mu
 ; CHECK-NEXT:    vse16.v v25, (a0)
 ; CHECK-NEXT:    ret
   %va = load <vscale x 2 x i16>, <vscale x 2 x i16>* %pa
@@ -102,13 +102,13 @@ define void @vadd_vint16mf2(<vscale x 2 x i16> *%pc, 
<vscale x 2 x i16> *%pa, <v
 define void @vadd_vint16mf4(<vscale x 1 x i16> *%pc, <vscale x 1 x i16> *%pa, 
<vscale x 1 x i16> *%pb) nounwind {
 ; CHECK-LABEL: vadd_vint16mf4:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a3, zero, e16,mf4,tu,mu
+; CHECK-NEXT:    vsetvli a3, zero, e16,mf4,ta,mu
 ; CHECK-NEXT:    vle16.v v25, (a1)
-; CHECK-NEXT:    vsetvli a1, zero, e16,mf4,tu,mu
+; CHECK-NEXT:    vsetvli a1, zero, e16,mf4,ta,mu
 ; CHECK-NEXT:    vle16.v v26, (a2)
-; CHECK-NEXT:    vsetvli a1, zero, e16,mf4,tu,mu
+; CHECK-NEXT:    vsetvli a1, zero, e16,mf4,ta,mu
 ; CHECK-NEXT:    vadd.vv v25, v25, v26
-; CHECK-NEXT:    vsetvli a1, zero, e16,mf4,tu,mu
+; CHECK-NEXT:    vsetvli a1, zero, e16,mf4,ta,mu
 ; CHECK-NEXT:    vse16.v v25, (a0)
 ; CHECK-NEXT:    ret
   %va = load <vscale x 1 x i16>, <vscale x 1 x i16>* %pa

diff  --git a/llvm/test/CodeGen/RISCV/rvv/load-add-store-32.ll 
b/llvm/test/CodeGen/RISCV/rvv/load-add-store-32.ll
index bac63747fb1e..e99432109916 100644
--- a/llvm/test/CodeGen/RISCV/rvv/load-add-store-32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/load-add-store-32.ll
@@ -7,13 +7,13 @@
 define void @vadd_vint32m1(<vscale x 2 x i32> *%pc, <vscale x 2 x i32> *%pa, 
<vscale x 2 x i32> *%pb) nounwind {
 ; CHECK-LABEL: vadd_vint32m1:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a3, zero, e32,m1,tu,mu
+; CHECK-NEXT:    vsetvli a3, zero, e32,m1,ta,mu
 ; CHECK-NEXT:    vle32.v v25, (a1)
-; CHECK-NEXT:    vsetvli a1, zero, e32,m1,tu,mu
+; CHECK-NEXT:    vsetvli a1, zero, e32,m1,ta,mu
 ; CHECK-NEXT:    vle32.v v26, (a2)
-; CHECK-NEXT:    vsetvli a1, zero, e32,m1,tu,mu
+; CHECK-NEXT:    vsetvli a1, zero, e32,m1,ta,mu
 ; CHECK-NEXT:    vadd.vv v25, v25, v26
-; CHECK-NEXT:    vsetvli a1, zero, e32,m1,tu,mu
+; CHECK-NEXT:    vsetvli a1, zero, e32,m1,ta,mu
 ; CHECK-NEXT:    vse32.v v25, (a0)
 ; CHECK-NEXT:    ret
   %va = load <vscale x 2 x i32>, <vscale x 2 x i32>* %pa
@@ -26,13 +26,13 @@ define void @vadd_vint32m1(<vscale x 2 x i32> *%pc, <vscale 
x 2 x i32> *%pa, <vs
 define void @vadd_vint32m2(<vscale x 4 x i32> *%pc, <vscale x 4 x i32> *%pa, 
<vscale x 4 x i32> *%pb) nounwind {
 ; CHECK-LABEL: vadd_vint32m2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a3, zero, e32,m2,tu,mu
+; CHECK-NEXT:    vsetvli a3, zero, e32,m2,ta,mu
 ; CHECK-NEXT:    vle32.v v26, (a1)
-; CHECK-NEXT:    vsetvli a1, zero, e32,m2,tu,mu
+; CHECK-NEXT:    vsetvli a1, zero, e32,m2,ta,mu
 ; CHECK-NEXT:    vle32.v v28, (a2)
-; CHECK-NEXT:    vsetvli a1, zero, e32,m2,tu,mu
+; CHECK-NEXT:    vsetvli a1, zero, e32,m2,ta,mu
 ; CHECK-NEXT:    vadd.vv v26, v26, v28
-; CHECK-NEXT:    vsetvli a1, zero, e32,m2,tu,mu
+; CHECK-NEXT:    vsetvli a1, zero, e32,m2,ta,mu
 ; CHECK-NEXT:    vse32.v v26, (a0)
 ; CHECK-NEXT:    ret
   %va = load <vscale x 4 x i32>, <vscale x 4 x i32>* %pa
@@ -45,13 +45,13 @@ define void @vadd_vint32m2(<vscale x 4 x i32> *%pc, <vscale 
x 4 x i32> *%pa, <vs
 define void @vadd_vint32m4(<vscale x 8 x i32> *%pc, <vscale x 8 x i32> *%pa, 
<vscale x 8 x i32> *%pb) nounwind {
 ; CHECK-LABEL: vadd_vint32m4:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a3, zero, e32,m4,tu,mu
+; CHECK-NEXT:    vsetvli a3, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vle32.v v28, (a1)
-; CHECK-NEXT:    vsetvli a1, zero, e32,m4,tu,mu
+; CHECK-NEXT:    vsetvli a1, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vle32.v v8, (a2)
-; CHECK-NEXT:    vsetvli a1, zero, e32,m4,tu,mu
+; CHECK-NEXT:    vsetvli a1, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vadd.vv v28, v28, v8
-; CHECK-NEXT:    vsetvli a1, zero, e32,m4,tu,mu
+; CHECK-NEXT:    vsetvli a1, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vse32.v v28, (a0)
 ; CHECK-NEXT:    ret
   %va = load <vscale x 8 x i32>, <vscale x 8 x i32>* %pa
@@ -64,13 +64,13 @@ define void @vadd_vint32m4(<vscale x 8 x i32> *%pc, <vscale 
x 8 x i32> *%pa, <vs
 define void @vadd_vint32m8(<vscale x 16 x i32> *%pc, <vscale x 16 x i32> *%pa, 
<vscale x 16 x i32> *%pb) nounwind {
 ; CHECK-LABEL: vadd_vint32m8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a3, zero, e32,m8,tu,mu
+; CHECK-NEXT:    vsetvli a3, zero, e32,m8,ta,mu
 ; CHECK-NEXT:    vle32.v v8, (a1)
-; CHECK-NEXT:    vsetvli a1, zero, e32,m8,tu,mu
+; CHECK-NEXT:    vsetvli a1, zero, e32,m8,ta,mu
 ; CHECK-NEXT:    vle32.v v16, (a2)
-; CHECK-NEXT:    vsetvli a1, zero, e32,m8,tu,mu
+; CHECK-NEXT:    vsetvli a1, zero, e32,m8,ta,mu
 ; CHECK-NEXT:    vadd.vv v8, v8, v16
-; CHECK-NEXT:    vsetvli a1, zero, e32,m8,tu,mu
+; CHECK-NEXT:    vsetvli a1, zero, e32,m8,ta,mu
 ; CHECK-NEXT:    vse32.v v8, (a0)
 ; CHECK-NEXT:    ret
   %va = load <vscale x 16 x i32>, <vscale x 16 x i32>* %pa
@@ -83,13 +83,13 @@ define void @vadd_vint32m8(<vscale x 16 x i32> *%pc, 
<vscale x 16 x i32> *%pa, <
 define void @vadd_vint32mf2(<vscale x 1 x i32> *%pc, <vscale x 1 x i32> *%pa, 
<vscale x 1 x i32> *%pb) nounwind {
 ; CHECK-LABEL: vadd_vint32mf2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a3, zero, e32,mf2,tu,mu
+; CHECK-NEXT:    vsetvli a3, zero, e32,mf2,ta,mu
 ; CHECK-NEXT:    vle32.v v25, (a1)
-; CHECK-NEXT:    vsetvli a1, zero, e32,mf2,tu,mu
+; CHECK-NEXT:    vsetvli a1, zero, e32,mf2,ta,mu
 ; CHECK-NEXT:    vle32.v v26, (a2)
-; CHECK-NEXT:    vsetvli a1, zero, e32,mf2,tu,mu
+; CHECK-NEXT:    vsetvli a1, zero, e32,mf2,ta,mu
 ; CHECK-NEXT:    vadd.vv v25, v25, v26
-; CHECK-NEXT:    vsetvli a1, zero, e32,mf2,tu,mu
+; CHECK-NEXT:    vsetvli a1, zero, e32,mf2,ta,mu
 ; CHECK-NEXT:    vse32.v v25, (a0)
 ; CHECK-NEXT:    ret
   %va = load <vscale x 1 x i32>, <vscale x 1 x i32>* %pa

diff  --git a/llvm/test/CodeGen/RISCV/rvv/load-add-store-64.ll 
b/llvm/test/CodeGen/RISCV/rvv/load-add-store-64.ll
index 55eb4937cf36..74ac7adef14a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/load-add-store-64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/load-add-store-64.ll
@@ -7,13 +7,13 @@
 define void @vadd_vint64m1(<vscale x 1 x i64> *%pc, <vscale x 1 x i64> *%pa, 
<vscale x 1 x i64> *%pb) nounwind {
 ; CHECK-LABEL: vadd_vint64m1:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a3, zero, e64,m1,tu,mu
+; CHECK-NEXT:    vsetvli a3, zero, e64,m1,ta,mu
 ; CHECK-NEXT:    vle64.v v25, (a1)
-; CHECK-NEXT:    vsetvli a1, zero, e64,m1,tu,mu
+; CHECK-NEXT:    vsetvli a1, zero, e64,m1,ta,mu
 ; CHECK-NEXT:    vle64.v v26, (a2)
-; CHECK-NEXT:    vsetvli a1, zero, e64,m1,tu,mu
+; CHECK-NEXT:    vsetvli a1, zero, e64,m1,ta,mu
 ; CHECK-NEXT:    vadd.vv v25, v25, v26
-; CHECK-NEXT:    vsetvli a1, zero, e64,m1,tu,mu
+; CHECK-NEXT:    vsetvli a1, zero, e64,m1,ta,mu
 ; CHECK-NEXT:    vse64.v v25, (a0)
 ; CHECK-NEXT:    ret
   %va = load <vscale x 1 x i64>, <vscale x 1 x i64>* %pa
@@ -26,13 +26,13 @@ define void @vadd_vint64m1(<vscale x 1 x i64> *%pc, <vscale 
x 1 x i64> *%pa, <vs
 define void @vadd_vint64m2(<vscale x 2 x i64> *%pc, <vscale x 2 x i64> *%pa, 
<vscale x 2 x i64> *%pb) nounwind {
 ; CHECK-LABEL: vadd_vint64m2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a3, zero, e64,m2,tu,mu
+; CHECK-NEXT:    vsetvli a3, zero, e64,m2,ta,mu
 ; CHECK-NEXT:    vle64.v v26, (a1)
-; CHECK-NEXT:    vsetvli a1, zero, e64,m2,tu,mu
+; CHECK-NEXT:    vsetvli a1, zero, e64,m2,ta,mu
 ; CHECK-NEXT:    vle64.v v28, (a2)
-; CHECK-NEXT:    vsetvli a1, zero, e64,m2,tu,mu
+; CHECK-NEXT:    vsetvli a1, zero, e64,m2,ta,mu
 ; CHECK-NEXT:    vadd.vv v26, v26, v28
-; CHECK-NEXT:    vsetvli a1, zero, e64,m2,tu,mu
+; CHECK-NEXT:    vsetvli a1, zero, e64,m2,ta,mu
 ; CHECK-NEXT:    vse64.v v26, (a0)
 ; CHECK-NEXT:    ret
   %va = load <vscale x 2 x i64>, <vscale x 2 x i64>* %pa
@@ -45,13 +45,13 @@ define void @vadd_vint64m2(<vscale x 2 x i64> *%pc, <vscale 
x 2 x i64> *%pa, <vs
 define void @vadd_vint64m4(<vscale x 4 x i64> *%pc, <vscale x 4 x i64> *%pa, 
<vscale x 4 x i64> *%pb) nounwind {
 ; CHECK-LABEL: vadd_vint64m4:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a3, zero, e64,m4,tu,mu
+; CHECK-NEXT:    vsetvli a3, zero, e64,m4,ta,mu
 ; CHECK-NEXT:    vle64.v v28, (a1)
-; CHECK-NEXT:    vsetvli a1, zero, e64,m4,tu,mu
+; CHECK-NEXT:    vsetvli a1, zero, e64,m4,ta,mu
 ; CHECK-NEXT:    vle64.v v8, (a2)
-; CHECK-NEXT:    vsetvli a1, zero, e64,m4,tu,mu
+; CHECK-NEXT:    vsetvli a1, zero, e64,m4,ta,mu
 ; CHECK-NEXT:    vadd.vv v28, v28, v8
-; CHECK-NEXT:    vsetvli a1, zero, e64,m4,tu,mu
+; CHECK-NEXT:    vsetvli a1, zero, e64,m4,ta,mu
 ; CHECK-NEXT:    vse64.v v28, (a0)
 ; CHECK-NEXT:    ret
   %va = load <vscale x 4 x i64>, <vscale x 4 x i64>* %pa
@@ -64,13 +64,13 @@ define void @vadd_vint64m4(<vscale x 4 x i64> *%pc, <vscale 
x 4 x i64> *%pa, <vs
 define void @vadd_vint64m8(<vscale x 8 x i64> *%pc, <vscale x 8 x i64> *%pa, 
<vscale x 8 x i64> *%pb) nounwind {
 ; CHECK-LABEL: vadd_vint64m8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a3, zero, e64,m8,tu,mu
+; CHECK-NEXT:    vsetvli a3, zero, e64,m8,ta,mu
 ; CHECK-NEXT:    vle64.v v8, (a1)
-; CHECK-NEXT:    vsetvli a1, zero, e64,m8,tu,mu
+; CHECK-NEXT:    vsetvli a1, zero, e64,m8,ta,mu
 ; CHECK-NEXT:    vle64.v v16, (a2)
-; CHECK-NEXT:    vsetvli a1, zero, e64,m8,tu,mu
+; CHECK-NEXT:    vsetvli a1, zero, e64,m8,ta,mu
 ; CHECK-NEXT:    vadd.vv v8, v8, v16
-; CHECK-NEXT:    vsetvli a1, zero, e64,m8,tu,mu
+; CHECK-NEXT:    vsetvli a1, zero, e64,m8,ta,mu
 ; CHECK-NEXT:    vse64.v v8, (a0)
 ; CHECK-NEXT:    ret
   %va = load <vscale x 8 x i64>, <vscale x 8 x i64>* %pa

diff  --git a/llvm/test/CodeGen/RISCV/rvv/load-add-store-8.ll 
b/llvm/test/CodeGen/RISCV/rvv/load-add-store-8.ll
index 5764da47093c..7e575f8b7649 100644
--- a/llvm/test/CodeGen/RISCV/rvv/load-add-store-8.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/load-add-store-8.ll
@@ -7,13 +7,13 @@
 define void @vadd_vint8m1(<vscale x 8 x i8> *%pc, <vscale x 8 x i8> *%pa, 
<vscale x 8 x i8> *%pb) nounwind {
 ; CHECK-LABEL: vadd_vint8m1:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a3, zero, e8,m1,tu,mu
+; CHECK-NEXT:    vsetvli a3, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vle8.v v25, (a1)
-; CHECK-NEXT:    vsetvli a1, zero, e8,m1,tu,mu
+; CHECK-NEXT:    vsetvli a1, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vle8.v v26, (a2)
-; CHECK-NEXT:    vsetvli a1, zero, e8,m1,tu,mu
+; CHECK-NEXT:    vsetvli a1, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vadd.vv v25, v25, v26
-; CHECK-NEXT:    vsetvli a1, zero, e8,m1,tu,mu
+; CHECK-NEXT:    vsetvli a1, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vse8.v v25, (a0)
 ; CHECK-NEXT:    ret
   %va = load <vscale x 8 x i8>, <vscale x 8 x i8>* %pa
@@ -26,13 +26,13 @@ define void @vadd_vint8m1(<vscale x 8 x i8> *%pc, <vscale x 
8 x i8> *%pa, <vscal
 define void @vadd_vint8m2(<vscale x 16 x i8> *%pc, <vscale x 16 x i8> *%pa, 
<vscale x 16 x i8> *%pb) nounwind {
 ; CHECK-LABEL: vadd_vint8m2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a3, zero, e8,m2,tu,mu
+; CHECK-NEXT:    vsetvli a3, zero, e8,m2,ta,mu
 ; CHECK-NEXT:    vle8.v v26, (a1)
-; CHECK-NEXT:    vsetvli a1, zero, e8,m2,tu,mu
+; CHECK-NEXT:    vsetvli a1, zero, e8,m2,ta,mu
 ; CHECK-NEXT:    vle8.v v28, (a2)
-; CHECK-NEXT:    vsetvli a1, zero, e8,m2,tu,mu
+; CHECK-NEXT:    vsetvli a1, zero, e8,m2,ta,mu
 ; CHECK-NEXT:    vadd.vv v26, v26, v28
-; CHECK-NEXT:    vsetvli a1, zero, e8,m2,tu,mu
+; CHECK-NEXT:    vsetvli a1, zero, e8,m2,ta,mu
 ; CHECK-NEXT:    vse8.v v26, (a0)
 ; CHECK-NEXT:    ret
   %va = load <vscale x 16 x i8>, <vscale x 16 x i8>* %pa
@@ -45,13 +45,13 @@ define void @vadd_vint8m2(<vscale x 16 x i8> *%pc, <vscale 
x 16 x i8> *%pa, <vsc
 define void @vadd_vint8m4(<vscale x 32 x i8> *%pc, <vscale x 32 x i8> *%pa, 
<vscale x 32 x i8> *%pb) nounwind {
 ; CHECK-LABEL: vadd_vint8m4:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a3, zero, e8,m4,tu,mu
+; CHECK-NEXT:    vsetvli a3, zero, e8,m4,ta,mu
 ; CHECK-NEXT:    vle8.v v28, (a1)
-; CHECK-NEXT:    vsetvli a1, zero, e8,m4,tu,mu
+; CHECK-NEXT:    vsetvli a1, zero, e8,m4,ta,mu
 ; CHECK-NEXT:    vle8.v v8, (a2)
-; CHECK-NEXT:    vsetvli a1, zero, e8,m4,tu,mu
+; CHECK-NEXT:    vsetvli a1, zero, e8,m4,ta,mu
 ; CHECK-NEXT:    vadd.vv v28, v28, v8
-; CHECK-NEXT:    vsetvli a1, zero, e8,m4,tu,mu
+; CHECK-NEXT:    vsetvli a1, zero, e8,m4,ta,mu
 ; CHECK-NEXT:    vse8.v v28, (a0)
 ; CHECK-NEXT:    ret
   %va = load <vscale x 32 x i8>, <vscale x 32 x i8>* %pa
@@ -64,13 +64,13 @@ define void @vadd_vint8m4(<vscale x 32 x i8> *%pc, <vscale 
x 32 x i8> *%pa, <vsc
 define void @vadd_vint8m8(<vscale x 64 x i8> *%pc, <vscale x 64 x i8> *%pa, 
<vscale x 64 x i8> *%pb) nounwind {
 ; CHECK-LABEL: vadd_vint8m8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a3, zero, e8,m8,tu,mu
+; CHECK-NEXT:    vsetvli a3, zero, e8,m8,ta,mu
 ; CHECK-NEXT:    vle8.v v8, (a1)
-; CHECK-NEXT:    vsetvli a1, zero, e8,m8,tu,mu
+; CHECK-NEXT:    vsetvli a1, zero, e8,m8,ta,mu
 ; CHECK-NEXT:    vle8.v v16, (a2)
-; CHECK-NEXT:    vsetvli a1, zero, e8,m8,tu,mu
+; CHECK-NEXT:    vsetvli a1, zero, e8,m8,ta,mu
 ; CHECK-NEXT:    vadd.vv v8, v8, v16
-; CHECK-NEXT:    vsetvli a1, zero, e8,m8,tu,mu
+; CHECK-NEXT:    vsetvli a1, zero, e8,m8,ta,mu
 ; CHECK-NEXT:    vse8.v v8, (a0)
 ; CHECK-NEXT:    ret
   %va = load <vscale x 64 x i8>, <vscale x 64 x i8>* %pa
@@ -83,13 +83,13 @@ define void @vadd_vint8m8(<vscale x 64 x i8> *%pc, <vscale 
x 64 x i8> *%pa, <vsc
 define void @vadd_vint8mf2(<vscale x 4 x i8> *%pc, <vscale x 4 x i8> *%pa, 
<vscale x 4 x i8> *%pb) nounwind {
 ; CHECK-LABEL: vadd_vint8mf2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a3, zero, e8,mf2,tu,mu
+; CHECK-NEXT:    vsetvli a3, zero, e8,mf2,ta,mu
 ; CHECK-NEXT:    vle8.v v25, (a1)
-; CHECK-NEXT:    vsetvli a1, zero, e8,mf2,tu,mu
+; CHECK-NEXT:    vsetvli a1, zero, e8,mf2,ta,mu
 ; CHECK-NEXT:    vle8.v v26, (a2)
-; CHECK-NEXT:    vsetvli a1, zero, e8,mf2,tu,mu
+; CHECK-NEXT:    vsetvli a1, zero, e8,mf2,ta,mu
 ; CHECK-NEXT:    vadd.vv v25, v25, v26
-; CHECK-NEXT:    vsetvli a1, zero, e8,mf2,tu,mu
+; CHECK-NEXT:    vsetvli a1, zero, e8,mf2,ta,mu
 ; CHECK-NEXT:    vse8.v v25, (a0)
 ; CHECK-NEXT:    ret
   %va = load <vscale x 4 x i8>, <vscale x 4 x i8>* %pa
@@ -102,13 +102,13 @@ define void @vadd_vint8mf2(<vscale x 4 x i8> *%pc, 
<vscale x 4 x i8> *%pa, <vsca
 define void @vadd_vint8mf4(<vscale x 2 x i8> *%pc, <vscale x 2 x i8> *%pa, 
<vscale x 2 x i8> *%pb) nounwind {
 ; CHECK-LABEL: vadd_vint8mf4:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a3, zero, e8,mf4,tu,mu
+; CHECK-NEXT:    vsetvli a3, zero, e8,mf4,ta,mu
 ; CHECK-NEXT:    vle8.v v25, (a1)
-; CHECK-NEXT:    vsetvli a1, zero, e8,mf4,tu,mu
+; CHECK-NEXT:    vsetvli a1, zero, e8,mf4,ta,mu
 ; CHECK-NEXT:    vle8.v v26, (a2)
-; CHECK-NEXT:    vsetvli a1, zero, e8,mf4,tu,mu
+; CHECK-NEXT:    vsetvli a1, zero, e8,mf4,ta,mu
 ; CHECK-NEXT:    vadd.vv v25, v25, v26
-; CHECK-NEXT:    vsetvli a1, zero, e8,mf4,tu,mu
+; CHECK-NEXT:    vsetvli a1, zero, e8,mf4,ta,mu
 ; CHECK-NEXT:    vse8.v v25, (a0)
 ; CHECK-NEXT:    ret
   %va = load <vscale x 2 x i8>, <vscale x 2 x i8>* %pa
@@ -121,13 +121,13 @@ define void @vadd_vint8mf4(<vscale x 2 x i8> *%pc, 
<vscale x 2 x i8> *%pa, <vsca
 define void @vadd_vint8mf8(<vscale x 1 x i8> *%pc, <vscale x 1 x i8> *%pa, 
<vscale x 1 x i8> *%pb) nounwind {
 ; CHECK-LABEL: vadd_vint8mf8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a3, zero, e8,mf8,tu,mu
+; CHECK-NEXT:    vsetvli a3, zero, e8,mf8,ta,mu
 ; CHECK-NEXT:    vle8.v v25, (a1)
-; CHECK-NEXT:    vsetvli a1, zero, e8,mf8,tu,mu
+; CHECK-NEXT:    vsetvli a1, zero, e8,mf8,ta,mu
 ; CHECK-NEXT:    vle8.v v26, (a2)
-; CHECK-NEXT:    vsetvli a1, zero, e8,mf8,tu,mu
+; CHECK-NEXT:    vsetvli a1, zero, e8,mf8,ta,mu
 ; CHECK-NEXT:    vadd.vv v25, v25, v26
-; CHECK-NEXT:    vsetvli a1, zero, e8,mf8,tu,mu
+; CHECK-NEXT:    vsetvli a1, zero, e8,mf8,ta,mu
 ; CHECK-NEXT:    vse8.v v25, (a0)
 ; CHECK-NEXT:    ret
   %va = load <vscale x 1 x i8>, <vscale x 1 x i8>* %pa


        
_______________________________________________
llvm-branch-commits mailing list
llvm-branch-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/llvm-branch-commits

Reply via email to