This revision was landed with ongoing or failed builds.
This revision was automatically updated to reflect the committed changes.
Closed by commit rG2f2af2d01763: [RISCV] Change the immediate argument to Zk* 
intrinsics/builtins from i8 to i32. (authored by craig.topper).

Repository:
  rG LLVM Github Monorepo

CHANGES SINCE LAST ACTION
  https://reviews.llvm.org/D152627/new/

https://reviews.llvm.org/D152627

Files:
  clang/include/clang/Basic/BuiltinsRISCV.def
  clang/test/CodeGen/RISCV/rvk-intrinsics/riscv32-zknd.c
  clang/test/CodeGen/RISCV/rvk-intrinsics/riscv32-zkne.c
  clang/test/CodeGen/RISCV/rvk-intrinsics/riscv32-zksed.c
  clang/test/CodeGen/RISCV/rvk-intrinsics/riscv64-zksed.c
  llvm/include/llvm/IR/IntrinsicsRISCV.td
  llvm/lib/IR/AutoUpgrade.cpp
  llvm/lib/Target/RISCV/RISCVInstrInfoZk.td
  llvm/test/CodeGen/RISCV/rv32zknd-intrinsic-autoupgrade.ll
  llvm/test/CodeGen/RISCV/rv32zknd-intrinsic.ll
  llvm/test/CodeGen/RISCV/rv32zkne-intrinsic-autoupgrade.ll
  llvm/test/CodeGen/RISCV/rv32zkne-intrinsic.ll
  llvm/test/CodeGen/RISCV/rv32zksed-intrinsic-autoupgrade.ll
  llvm/test/CodeGen/RISCV/rv32zksed-intrinsic.ll
  llvm/test/CodeGen/RISCV/rv64zksed-intrinsic-autoupgrade.ll
  llvm/test/CodeGen/RISCV/rv64zksed-intrinsic.ll

Index: llvm/test/CodeGen/RISCV/rv64zksed-intrinsic.ll
===================================================================
--- llvm/test/CodeGen/RISCV/rv64zksed-intrinsic.ll
+++ llvm/test/CodeGen/RISCV/rv64zksed-intrinsic.ll
@@ -2,24 +2,24 @@
 ; RUN: llc -mtriple=riscv64 -mattr=+zksed -verify-machineinstrs < %s \
 ; RUN:   | FileCheck %s -check-prefix=RV64ZKSED
 
-declare i64 @llvm.riscv.sm4ks.i64(i64, i64, i8);
+declare i64 @llvm.riscv.sm4ks.i64(i64, i64, i32);
 
 define i64 @sm4ks_i64(i64 %a, i64 %b) nounwind {
 ; RV64ZKSED-LABEL: sm4ks_i64:
 ; RV64ZKSED:       # %bb.0:
 ; RV64ZKSED-NEXT:    sm4ks a0, a0, a1, 0
 ; RV64ZKSED-NEXT:    ret
-  %val = call i64 @llvm.riscv.sm4ks.i64(i64 %a, i64 %b, i8 0)
+  %val = call i64 @llvm.riscv.sm4ks.i64(i64 %a, i64 %b, i32 0)
   ret i64 %val
 }
 
-declare i64 @llvm.riscv.sm4ed.i64(i64, i64, i8);
+declare i64 @llvm.riscv.sm4ed.i64(i64, i64, i32);
 
 define i64 @sm4ed_i64(i64 %a, i64 %b) nounwind {
 ; RV64ZKSED-LABEL: sm4ed_i64:
 ; RV64ZKSED:       # %bb.0:
 ; RV64ZKSED-NEXT:    sm4ed a0, a0, a1, 1
 ; RV64ZKSED-NEXT:    ret
-  %val = call i64 @llvm.riscv.sm4ed.i64(i64 %a, i64 %b, i8 1)
+  %val = call i64 @llvm.riscv.sm4ed.i64(i64 %a, i64 %b, i32 1)
   ret i64 %val
 }
Index: llvm/test/CodeGen/RISCV/rv32zksed-intrinsic.ll
===================================================================
--- llvm/test/CodeGen/RISCV/rv32zksed-intrinsic.ll
+++ llvm/test/CodeGen/RISCV/rv32zksed-intrinsic.ll
@@ -2,24 +2,24 @@
 ; RUN: llc -mtriple=riscv32 -mattr=+zksed -verify-machineinstrs < %s \
 ; RUN:   | FileCheck %s -check-prefix=RV32ZKSED
 
-declare i32 @llvm.riscv.sm4ks.i32(i32, i32, i8);
+declare i32 @llvm.riscv.sm4ks.i32(i32, i32, i32);
 
 define i32 @sm4ks_i32(i32 %a, i32 %b) nounwind {
 ; RV32ZKSED-LABEL: sm4ks_i32:
 ; RV32ZKSED:       # %bb.0:
 ; RV32ZKSED-NEXT:    sm4ks a0, a0, a1, 2
 ; RV32ZKSED-NEXT:    ret
-  %val = call i32 @llvm.riscv.sm4ks.i32(i32 %a, i32 %b, i8 2)
+  %val = call i32 @llvm.riscv.sm4ks.i32(i32 %a, i32 %b, i32 2)
   ret i32 %val
 }
 
-declare i32 @llvm.riscv.sm4ed.i32(i32, i32, i8);
+declare i32 @llvm.riscv.sm4ed.i32(i32, i32, i32);
 
 define i32 @sm4ed_i32(i32 %a, i32 %b) nounwind {
 ; RV32ZKSED-LABEL: sm4ed_i32:
 ; RV32ZKSED:       # %bb.0:
 ; RV32ZKSED-NEXT:    sm4ed a0, a0, a1, 3
 ; RV32ZKSED-NEXT:    ret
-  %val = call i32 @llvm.riscv.sm4ed.i32(i32 %a, i32 %b, i8 3)
+  %val = call i32 @llvm.riscv.sm4ed.i32(i32 %a, i32 %b, i32 3)
   ret i32 %val
 }
Index: llvm/test/CodeGen/RISCV/rv32zkne-intrinsic.ll
===================================================================
--- llvm/test/CodeGen/RISCV/rv32zkne-intrinsic.ll
+++ llvm/test/CodeGen/RISCV/rv32zkne-intrinsic.ll
@@ -2,24 +2,24 @@
 ; RUN: llc -mtriple=riscv32 -mattr=+zkne -verify-machineinstrs < %s \
 ; RUN:   | FileCheck %s -check-prefix=RV32ZKNE
 
-declare i32 @llvm.riscv.aes32esi(i32, i32, i8);
+declare i32 @llvm.riscv.aes32esi(i32, i32, i32);
 
 define i32 @aes32esi(i32 %a, i32 %b) nounwind {
 ; RV32ZKNE-LABEL: aes32esi:
 ; RV32ZKNE:       # %bb.0:
 ; RV32ZKNE-NEXT:    aes32esi a0, a0, a1, 2
 ; RV32ZKNE-NEXT:    ret
-    %val = call i32 @llvm.riscv.aes32esi(i32 %a, i32 %b, i8 2)
+    %val = call i32 @llvm.riscv.aes32esi(i32 %a, i32 %b, i32 2)
     ret i32 %val
 }
 
-declare i32 @llvm.riscv.aes32esmi(i32, i32, i8);
+declare i32 @llvm.riscv.aes32esmi(i32, i32, i32);
 
 define i32 @aes32esmi(i32 %a, i32 %b) nounwind {
 ; RV32ZKNE-LABEL: aes32esmi:
 ; RV32ZKNE:       # %bb.0:
 ; RV32ZKNE-NEXT:    aes32esmi a0, a0, a1, 3
 ; RV32ZKNE-NEXT:    ret
-    %val = call i32 @llvm.riscv.aes32esmi(i32 %a, i32 %b, i8 3)
+    %val = call i32 @llvm.riscv.aes32esmi(i32 %a, i32 %b, i32 3)
     ret i32 %val
 }
Index: llvm/test/CodeGen/RISCV/rv32zknd-intrinsic.ll
===================================================================
--- llvm/test/CodeGen/RISCV/rv32zknd-intrinsic.ll
+++ llvm/test/CodeGen/RISCV/rv32zknd-intrinsic.ll
@@ -2,24 +2,24 @@
 ; RUN: llc -mtriple=riscv32 -mattr=+zknd -verify-machineinstrs < %s \
 ; RUN:   | FileCheck %s -check-prefix=RV32ZKND
 
-declare i32 @llvm.riscv.aes32dsi(i32, i32, i8);
+declare i32 @llvm.riscv.aes32dsi(i32, i32, i32);
 
 define i32 @aes32dsi(i32 %a, i32 %b) nounwind {
 ; RV32ZKND-LABEL: aes32dsi:
 ; RV32ZKND:       # %bb.0:
 ; RV32ZKND-NEXT:    aes32dsi a0, a0, a1, 0
 ; RV32ZKND-NEXT:    ret
-    %val = call i32 @llvm.riscv.aes32dsi(i32 %a, i32 %b, i8 0)
+    %val = call i32 @llvm.riscv.aes32dsi(i32 %a, i32 %b, i32 0)
     ret i32 %val
 }
 
-declare i32 @llvm.riscv.aes32dsmi(i32, i32, i8);
+declare i32 @llvm.riscv.aes32dsmi(i32, i32, i32);
 
 define i32 @aes32dsmi(i32 %a, i32 %b) nounwind {
 ; RV32ZKND-LABEL: aes32dsmi:
 ; RV32ZKND:       # %bb.0:
 ; RV32ZKND-NEXT:    aes32dsmi a0, a0, a1, 1
 ; RV32ZKND-NEXT:    ret
-    %val = call i32 @llvm.riscv.aes32dsmi(i32 %a, i32 %b, i8 1)
+    %val = call i32 @llvm.riscv.aes32dsmi(i32 %a, i32 %b, i32 1)
     ret i32 %val
 }
Index: llvm/lib/Target/RISCV/RISCVInstrInfoZk.td
===================================================================
--- llvm/lib/Target/RISCV/RISCVInstrInfoZk.td
+++ llvm/lib/Target/RISCV/RISCVInstrInfoZk.td
@@ -29,7 +29,7 @@
   let OperandNamespace = "RISCVOp";
 }
 
-def byteselect : Operand<i8>, TImmLeaf<i8, [{return isUInt<2>(Imm);}]> {
+def byteselect : Operand<i32>, TImmLeaf<i32, [{return isUInt<2>(Imm);}]> {
   let ParserMatchClass = UImmAsmOperand<2>;
   let DecoderMethod = "decodeUImmOperand<2>";
   let OperandType = "OPERAND_UIMM2";
@@ -134,7 +134,7 @@
 //===----------------------------------------------------------------------===//
 
 class PatGprGprByteSelect<SDPatternOperator OpNode, RVInst Inst>
-    : Pat<(OpNode GPR:$rs1, GPR:$rs2, i8:$imm),
+    : Pat<(OpNode GPR:$rs1, GPR:$rs2, byteselect:$imm),
           (Inst GPR:$rs1, GPR:$rs2, byteselect:$imm)>;
 
 // Zknd
@@ -151,7 +151,7 @@
 
 let Predicates = [HasStdExtZkndOrZkne, IsRV64] in {
 def : PatGprGpr<int_riscv_aes64ks2, AES64KS2>;
-def : Pat<(int_riscv_aes64ks1i GPR:$rs1, i32:$rnum),
+def : Pat<(int_riscv_aes64ks1i GPR:$rs1, rnum:$rnum),
           (AES64KS1I GPR:$rs1, rnum:$rnum)>;
 } // Predicates = [HasStdExtZkndOrZkne, IsRV64]
 
Index: llvm/lib/IR/AutoUpgrade.cpp
===================================================================
--- llvm/lib/IR/AutoUpgrade.cpp
+++ llvm/lib/IR/AutoUpgrade.cpp
@@ -28,6 +28,7 @@
 #include "llvm/IR/Intrinsics.h"
 #include "llvm/IR/IntrinsicsAArch64.h"
 #include "llvm/IR/IntrinsicsARM.h"
+#include "llvm/IR/IntrinsicsRISCV.h"
 #include "llvm/IR/IntrinsicsWebAssembly.h"
 #include "llvm/IR/IntrinsicsX86.h"
 #include "llvm/IR/LLVMContext.h"
@@ -1127,6 +1128,47 @@
     }
     break;
 
+  case 'r':
+    if (Name == "riscv.aes32dsi" &&
+        !F->getFunctionType()->getParamType(2)->isIntegerTy(32)) {
+      rename(F);
+      NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::riscv_aes32dsi);
+      return true;
+    }
+    if (Name == "riscv.aes32dsmi" &&
+        !F->getFunctionType()->getParamType(2)->isIntegerTy(32)) {
+      rename(F);
+      NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::riscv_aes32dsmi);
+      return true;
+    }
+    if (Name == "riscv.aes32esi" &&
+        !F->getFunctionType()->getParamType(2)->isIntegerTy(32)) {
+      rename(F);
+      NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::riscv_aes32esi);
+      return true;
+    }
+    if (Name == "riscv.aes32esmi" &&
+        !F->getFunctionType()->getParamType(2)->isIntegerTy(32)) {
+      rename(F);
+      NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::riscv_aes32esmi);
+      return true;
+    }
+    if (Name.startswith("riscv.sm4ks") &&
+        !F->getFunctionType()->getParamType(2)->isIntegerTy(32)) {
+      rename(F);
+      NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::riscv_sm4ks,
+                                        F->getReturnType());
+      return true;
+    }
+    if (Name.startswith("riscv.sm4ed") &&
+        !F->getFunctionType()->getParamType(2)->isIntegerTy(32)) {
+      rename(F);
+      NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::riscv_sm4ed,
+                                        F->getReturnType());
+      return true;
+    }
+    break;
+
   case 's':
     if (Name == "stackprotectorcheck") {
       NewFn = nullptr;
@@ -4236,6 +4278,24 @@
     CI->eraseFromParent();
     return;
 
+  case Intrinsic::riscv_aes32dsi:
+  case Intrinsic::riscv_aes32dsmi:
+  case Intrinsic::riscv_aes32esi:
+  case Intrinsic::riscv_aes32esmi:
+  case Intrinsic::riscv_sm4ks:
+  case Intrinsic::riscv_sm4ed: {
+    // The last argument to these intrinsics used to be i8 and changed to i32.
+    Value *Arg2 = CI->getArgOperand(2);
+    if (Arg2->getType()->isIntegerTy(32))
+      return;
+
+    Arg2 = ConstantInt::get(Type::getInt32Ty(C), cast<ConstantInt>(Arg2)->getZExtValue());
+
+    NewCall = Builder.CreateCall(NewFn, {CI->getArgOperand(0),
+                                 CI->getArgOperand(1), Arg2});
+    break;
+  }
+
   case Intrinsic::x86_xop_vfrcz_ss:
   case Intrinsic::x86_xop_vfrcz_sd:
     NewCall = Builder.CreateCall(NewFn, {CI->getArgOperand(1)});
Index: llvm/include/llvm/IR/IntrinsicsRISCV.td
===================================================================
--- llvm/include/llvm/IR/IntrinsicsRISCV.td
+++ llvm/include/llvm/IR/IntrinsicsRISCV.td
@@ -1511,7 +1511,7 @@
 
 class ScalarCryptoByteSelect32
     : DefaultAttrsIntrinsic<[llvm_i32_ty],
-                            [llvm_i32_ty, llvm_i32_ty, llvm_i8_ty],
+                            [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
                             [IntrNoMem, IntrSpeculatable,
                              ImmArg<ArgIndex<2>>]>;
 
@@ -1532,7 +1532,7 @@
 
 class ScalarCryptoByteSelectAny
     : DefaultAttrsIntrinsic<[llvm_anyint_ty],
-                            [LLVMMatchType<0>, LLVMMatchType<0>, llvm_i8_ty],
+                            [LLVMMatchType<0>, LLVMMatchType<0>, llvm_i32_ty],
                             [IntrNoMem, IntrSpeculatable, ImmArg<ArgIndex<2>>]>;
 
 // Zknd
Index: clang/test/CodeGen/RISCV/rvk-intrinsics/riscv64-zksed.c
===================================================================
--- clang/test/CodeGen/RISCV/rvk-intrinsics/riscv64-zksed.c
+++ clang/test/CodeGen/RISCV/rvk-intrinsics/riscv64-zksed.c
@@ -10,7 +10,7 @@
 // RV64ZKSED-NEXT:    store i64 [[RS2:%.*]], ptr [[RS2_ADDR]], align 8
 // RV64ZKSED-NEXT:    [[TMP0:%.*]] = load i64, ptr [[RS1_ADDR]], align 8
 // RV64ZKSED-NEXT:    [[TMP1:%.*]] = load i64, ptr [[RS2_ADDR]], align 8
-// RV64ZKSED-NEXT:    [[TMP2:%.*]] = call i64 @llvm.riscv.sm4ks.i64(i64 [[TMP0]], i64 [[TMP1]], i8 0)
+// RV64ZKSED-NEXT:    [[TMP2:%.*]] = call i64 @llvm.riscv.sm4ks.i64(i64 [[TMP0]], i64 [[TMP1]], i32 0)
 // RV64ZKSED-NEXT:    ret i64 [[TMP2]]
 //
 long sm4ks(long rs1, long rs2) {
@@ -25,7 +25,7 @@
 // RV64ZKSED-NEXT:    store i64 [[RS2:%.*]], ptr [[RS2_ADDR]], align 8
 // RV64ZKSED-NEXT:    [[TMP0:%.*]] = load i64, ptr [[RS1_ADDR]], align 8
 // RV64ZKSED-NEXT:    [[TMP1:%.*]] = load i64, ptr [[RS2_ADDR]], align 8
-// RV64ZKSED-NEXT:    [[TMP2:%.*]] = call i64 @llvm.riscv.sm4ed.i64(i64 [[TMP0]], i64 [[TMP1]], i8 0)
+// RV64ZKSED-NEXT:    [[TMP2:%.*]] = call i64 @llvm.riscv.sm4ed.i64(i64 [[TMP0]], i64 [[TMP1]], i32 0)
 // RV64ZKSED-NEXT:    ret i64 [[TMP2]]
 //
 long sm4ed(long rs1, long rs2) {
Index: clang/test/CodeGen/RISCV/rvk-intrinsics/riscv32-zksed.c
===================================================================
--- clang/test/CodeGen/RISCV/rvk-intrinsics/riscv32-zksed.c
+++ clang/test/CodeGen/RISCV/rvk-intrinsics/riscv32-zksed.c
@@ -10,7 +10,7 @@
 // RV32ZKSED-NEXT:    store i32 [[RS2:%.*]], ptr [[RS2_ADDR]], align 4
 // RV32ZKSED-NEXT:    [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
 // RV32ZKSED-NEXT:    [[TMP1:%.*]] = load i32, ptr [[RS2_ADDR]], align 4
-// RV32ZKSED-NEXT:    [[TMP2:%.*]] = call i32 @llvm.riscv.sm4ks.i32(i32 [[TMP0]], i32 [[TMP1]], i8 0)
+// RV32ZKSED-NEXT:    [[TMP2:%.*]] = call i32 @llvm.riscv.sm4ks.i32(i32 [[TMP0]], i32 [[TMP1]], i32 0)
 // RV32ZKSED-NEXT:    ret i32 [[TMP2]]
 //
 long sm4ks(long rs1, long rs2) {
@@ -26,7 +26,7 @@
 // RV32ZKSED-NEXT:    store i32 [[RS2:%.*]], ptr [[RS2_ADDR]], align 4
 // RV32ZKSED-NEXT:    [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
 // RV32ZKSED-NEXT:    [[TMP1:%.*]] = load i32, ptr [[RS2_ADDR]], align 4
-// RV32ZKSED-NEXT:    [[TMP2:%.*]] = call i32 @llvm.riscv.sm4ed.i32(i32 [[TMP0]], i32 [[TMP1]], i8 0)
+// RV32ZKSED-NEXT:    [[TMP2:%.*]] = call i32 @llvm.riscv.sm4ed.i32(i32 [[TMP0]], i32 [[TMP1]], i32 0)
 // RV32ZKSED-NEXT:    ret i32 [[TMP2]]
 //
 long sm4ed(long rs1, long rs2) {
Index: clang/test/CodeGen/RISCV/rvk-intrinsics/riscv32-zkne.c
===================================================================
--- clang/test/CodeGen/RISCV/rvk-intrinsics/riscv32-zkne.c
+++ clang/test/CodeGen/RISCV/rvk-intrinsics/riscv32-zkne.c
@@ -10,7 +10,7 @@
 // RV32ZKNE-NEXT:    store i32 [[RS2:%.*]], ptr [[RS2_ADDR]], align 4
 // RV32ZKNE-NEXT:    [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
 // RV32ZKNE-NEXT:    [[TMP1:%.*]] = load i32, ptr [[RS2_ADDR]], align 4
-// RV32ZKNE-NEXT:    [[TMP2:%.*]] = call i32 @llvm.riscv.aes32esi(i32 [[TMP0]], i32 [[TMP1]], i8 3)
+// RV32ZKNE-NEXT:    [[TMP2:%.*]] = call i32 @llvm.riscv.aes32esi(i32 [[TMP0]], i32 [[TMP1]], i32 3)
 // RV32ZKNE-NEXT:    ret i32 [[TMP2]]
 //
 int aes32esi(int rs1, int rs2) {
@@ -25,7 +25,7 @@
 // RV32ZKNE-NEXT:    store i32 [[RS2:%.*]], ptr [[RS2_ADDR]], align 4
 // RV32ZKNE-NEXT:    [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
 // RV32ZKNE-NEXT:    [[TMP1:%.*]] = load i32, ptr [[RS2_ADDR]], align 4
-// RV32ZKNE-NEXT:    [[TMP2:%.*]] = call i32 @llvm.riscv.aes32esmi(i32 [[TMP0]], i32 [[TMP1]], i8 3)
+// RV32ZKNE-NEXT:    [[TMP2:%.*]] = call i32 @llvm.riscv.aes32esmi(i32 [[TMP0]], i32 [[TMP1]], i32 3)
 // RV32ZKNE-NEXT:    ret i32 [[TMP2]]
 //
 int aes32esmi(int rs1, int rs2) {
Index: clang/test/CodeGen/RISCV/rvk-intrinsics/riscv32-zknd.c
===================================================================
--- clang/test/CodeGen/RISCV/rvk-intrinsics/riscv32-zknd.c
+++ clang/test/CodeGen/RISCV/rvk-intrinsics/riscv32-zknd.c
@@ -10,7 +10,7 @@
 // RV32ZKND-NEXT:    store i32 [[RS2:%.*]], ptr [[RS2_ADDR]], align 4
 // RV32ZKND-NEXT:    [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
 // RV32ZKND-NEXT:    [[TMP1:%.*]] = load i32, ptr [[RS2_ADDR]], align 4
-// RV32ZKND-NEXT:    [[TMP2:%.*]] = call i32 @llvm.riscv.aes32dsi(i32 [[TMP0]], i32 [[TMP1]], i8 3)
+// RV32ZKND-NEXT:    [[TMP2:%.*]] = call i32 @llvm.riscv.aes32dsi(i32 [[TMP0]], i32 [[TMP1]], i32 3)
 // RV32ZKND-NEXT:    ret i32 [[TMP2]]
 //
 int aes32dsi(int rs1, int rs2) {
@@ -25,7 +25,7 @@
 // RV32ZKND-NEXT:    store i32 [[RS2:%.*]], ptr [[RS2_ADDR]], align 4
 // RV32ZKND-NEXT:    [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
 // RV32ZKND-NEXT:    [[TMP1:%.*]] = load i32, ptr [[RS2_ADDR]], align 4
-// RV32ZKND-NEXT:    [[TMP2:%.*]] = call i32 @llvm.riscv.aes32dsmi(i32 [[TMP0]], i32 [[TMP1]], i8 3)
+// RV32ZKND-NEXT:    [[TMP2:%.*]] = call i32 @llvm.riscv.aes32dsmi(i32 [[TMP0]], i32 [[TMP1]], i32 3)
 // RV32ZKND-NEXT:    ret i32 [[TMP2]]
 //
 int aes32dsmi(int rs1, int rs2) {
Index: clang/include/clang/Basic/BuiltinsRISCV.def
===================================================================
--- clang/include/clang/Basic/BuiltinsRISCV.def
+++ clang/include/clang/Basic/BuiltinsRISCV.def
@@ -38,8 +38,8 @@
 TARGET_BUILTIN(__builtin_riscv_unzip_32, "ZiZi", "nc", "zbkb,32bit")
 
 // Zknd extension
-TARGET_BUILTIN(__builtin_riscv_aes32dsi_32, "ZiZiZiIUc", "nc", "zknd,32bit")
-TARGET_BUILTIN(__builtin_riscv_aes32dsmi_32, "ZiZiZiIUc", "nc", "zknd,32bit")
+TARGET_BUILTIN(__builtin_riscv_aes32dsi_32, "ZiZiZiIUi", "nc", "zknd,32bit")
+TARGET_BUILTIN(__builtin_riscv_aes32dsmi_32, "ZiZiZiIUi", "nc", "zknd,32bit")
 TARGET_BUILTIN(__builtin_riscv_aes64ds_64, "WiWiWi", "nc", "zknd,64bit")
 TARGET_BUILTIN(__builtin_riscv_aes64dsm_64, "WiWiWi", "nc", "zknd,64bit")
 TARGET_BUILTIN(__builtin_riscv_aes64im_64, "WiWi", "nc", "zknd,64bit")
@@ -49,8 +49,8 @@
 TARGET_BUILTIN(__builtin_riscv_aes64ks2_64, "WiWiWi", "nc", "zknd|zkne,64bit")
 
 // Zkne extension
-TARGET_BUILTIN(__builtin_riscv_aes32esi_32, "ZiZiZiIUc", "nc", "zkne,32bit")
-TARGET_BUILTIN(__builtin_riscv_aes32esmi_32, "ZiZiZiIUc", "nc", "zkne,32bit")
+TARGET_BUILTIN(__builtin_riscv_aes32esi_32, "ZiZiZiIUi", "nc", "zkne,32bit")
+TARGET_BUILTIN(__builtin_riscv_aes32esmi_32, "ZiZiZiIUi", "nc", "zkne,32bit")
 TARGET_BUILTIN(__builtin_riscv_aes64es_64, "WiWiWi", "nc", "zkne,64bit")
 TARGET_BUILTIN(__builtin_riscv_aes64esm_64, "WiWiWi", "nc", "zkne,64bit")
 
@@ -72,8 +72,8 @@
 TARGET_BUILTIN(__builtin_riscv_sha512sum1_64, "WiWi", "nc", "zknh,64bit")
 
 // Zksed extension
-TARGET_BUILTIN(__builtin_riscv_sm4ed, "LiLiLiIUc", "nc", "zksed")
-TARGET_BUILTIN(__builtin_riscv_sm4ks, "LiLiLiIUc", "nc", "zksed")
+TARGET_BUILTIN(__builtin_riscv_sm4ed, "LiLiLiIUi", "nc", "zksed")
+TARGET_BUILTIN(__builtin_riscv_sm4ks, "LiLiLiIUi", "nc", "zksed")
 
 // Zksh extension
 TARGET_BUILTIN(__builtin_riscv_sm3p0, "LiLi", "nc", "zksh")
_______________________________________________
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to