LevyHsu updated this revision to Diff 340705.
LevyHsu added a comment.

1. clang/test/CodeGen/RISCV/rvb-intrinsics/riscv64-zbm.c
  - All test cases renamed


Repository:
  rG LLVM Github Monorepo

CHANGES SINCE LAST ACTION
  https://reviews.llvm.org/D101248/new/

https://reviews.llvm.org/D101248

Files:
  clang/include/clang/Basic/BuiltinsRISCV.def
  clang/lib/CodeGen/CGBuiltin.cpp
  clang/test/CodeGen/RISCV/rvb-intrinsics/riscv64-zbm.c
  llvm/include/llvm/IR/IntrinsicsRISCV.td
  llvm/lib/Target/RISCV/RISCVInstrInfoB.td
  llvm/test/CodeGen/RISCV/rv64zbm-intrinsic.ll

Index: llvm/test/CodeGen/RISCV/rv64zbm-intrinsic.ll
===================================================================
--- /dev/null
+++ llvm/test/CodeGen/RISCV/rv64zbm-intrinsic.ll
@@ -0,0 +1,53 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-b -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s -check-prefix=RV64IB
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-zbm -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s -check-prefix=RV64IBM
+
+declare i64 @llvm.riscv.bmator.i64(i64 %a, i64 %b)
+
+define i64 @bmator64(i64 %a, i64 %b) nounwind {
+; RV64IB-LABEL: bmator64:
+; RV64IB:       # %bb.0:
+; RV64IB-NEXT:    bmator a0, a0, a1
+; RV64IB-NEXT:    ret
+;
+; RV64IBM-LABEL: bmator64:
+; RV64IBM:       # %bb.0:
+; RV64IBM-NEXT:    bmator a0, a0, a1
+; RV64IBM-NEXT:    ret
+  %tmp = call i64 @llvm.riscv.bmator.i64(i64 %a, i64 %b)
+ ret i64 %tmp
+}
+
+declare i64 @llvm.riscv.bmatxor.i64(i64 %a, i64 %b)
+
+define i64 @bmatxor64(i64 %a, i64 %b) nounwind {
+; RV64IB-LABEL: bmatxor64:
+; RV64IB:       # %bb.0:
+; RV64IB-NEXT:    bmatxor a0, a0, a1
+; RV64IB-NEXT:    ret
+;
+; RV64IBM-LABEL: bmatxor64:
+; RV64IBM:       # %bb.0:
+; RV64IBM-NEXT:    bmatxor a0, a0, a1
+; RV64IBM-NEXT:    ret
+  %tmp = call i64 @llvm.riscv.bmatxor.i64(i64 %a, i64 %b)
+ ret i64 %tmp
+}
+
+declare i64 @llvm.riscv.bmatflip.i64(i64 %a)
+
+define i64 @bmatflip64(i64 %a) nounwind {
+; RV64IB-LABEL: bmatflip64:
+; RV64IB:       # %bb.0:
+; RV64IB-NEXT:    bmatflip a0, a0
+; RV64IB-NEXT:    ret
+;
+; RV64IBM-LABEL: bmatflip64:
+; RV64IBM:       # %bb.0:
+; RV64IBM-NEXT:    bmatflip a0, a0
+; RV64IBM-NEXT:    ret
+  %tmp = call i64 @llvm.riscv.bmatflip.i64(i64 %a)
+ ret i64 %tmp
+}
Index: llvm/lib/Target/RISCV/RISCVInstrInfoB.td
===================================================================
--- llvm/lib/Target/RISCV/RISCVInstrInfoB.td
+++ llvm/lib/Target/RISCV/RISCVInstrInfoB.td
@@ -934,6 +934,12 @@
 def : PatGprGpr<int_riscv_clmulr, CLMULR>;
 } // Predicates = [HasStdExtZbc]
 
+let Predicates = [HasStdExtZbm, IsRV64] in {
+def : PatGprGpr<int_riscv_bmator, BMATOR>;
+def : PatGprGpr<int_riscv_bmatxor, BMATXOR>;
+def : PatGpr<int_riscv_bmatflip, BMATFLIP>;
+} // Predicates = [HasStdExtZbm, IsRV64]
+
 let Predicates = [HasStdExtZbr] in {
 def : PatGpr<int_riscv_crc32_b, CRC32B>;
 def : PatGpr<int_riscv_crc32_h, CRC32H>;
Index: llvm/include/llvm/IR/IntrinsicsRISCV.td
===================================================================
--- llvm/include/llvm/IR/IntrinsicsRISCV.td
+++ llvm/include/llvm/IR/IntrinsicsRISCV.td
@@ -89,6 +89,11 @@
   def int_riscv_clmulh : BitManipGPRGPRIntrinsics;
   def int_riscv_clmulr : BitManipGPRGPRIntrinsics;
 
+  // Zbm
+  def int_riscv_bmator   : BitManipGPRGPRIntrinsics;
+  def int_riscv_bmatxor  : BitManipGPRGPRIntrinsics;
+  def int_riscv_bmatflip : BitManipGPRIntrinsics;
+
   // Zbp
   def int_riscv_grev  : BitManipGPRGPRIntrinsics;
   def int_riscv_gorc  : BitManipGPRGPRIntrinsics;
Index: clang/test/CodeGen/RISCV/rvb-intrinsics/riscv64-zbm.c
===================================================================
--- /dev/null
+++ clang/test/CodeGen/RISCV/rvb-intrinsics/riscv64-zbm.c
@@ -0,0 +1,45 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-zbm -emit-llvm %s -o - \
+// RUN:     | FileCheck %s  -check-prefix=RV64ZBM
+
+// RV64ZBM-LABEL: @bmator(
+// RV64ZBM-NEXT:  entry:
+// RV64ZBM-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
+// RV64ZBM-NEXT:    [[B_ADDR:%.*]] = alloca i64, align 8
+// RV64ZBM-NEXT:    store i64 [[A:%.*]], i64* [[A_ADDR]], align 8
+// RV64ZBM-NEXT:    store i64 [[B:%.*]], i64* [[B_ADDR]], align 8
+// RV64ZBM-NEXT:    [[TMP0:%.*]] = load i64, i64* [[A_ADDR]], align 8
+// RV64ZBM-NEXT:    [[TMP1:%.*]] = load i64, i64* [[B_ADDR]], align 8
+// RV64ZBM-NEXT:    [[TMP2:%.*]] = call i64 @llvm.riscv.bmator.i64(i64 [[TMP0]], i64 [[TMP1]])
+// RV64ZBM-NEXT:    ret i64 [[TMP2]]
+//
+long bmator(long a, long b) {
+  return __builtin_riscv_bmator(a, b);
+}
+
+// RV64ZBM-LABEL: @bmatxor(
+// RV64ZBM-NEXT:  entry:
+// RV64ZBM-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
+// RV64ZBM-NEXT:    [[B_ADDR:%.*]] = alloca i64, align 8
+// RV64ZBM-NEXT:    store i64 [[A:%.*]], i64* [[A_ADDR]], align 8
+// RV64ZBM-NEXT:    store i64 [[B:%.*]], i64* [[B_ADDR]], align 8
+// RV64ZBM-NEXT:    [[TMP0:%.*]] = load i64, i64* [[A_ADDR]], align 8
+// RV64ZBM-NEXT:    [[TMP1:%.*]] = load i64, i64* [[B_ADDR]], align 8
+// RV64ZBM-NEXT:    [[TMP2:%.*]] = call i64 @llvm.riscv.bmatxor.i64(i64 [[TMP0]], i64 [[TMP1]])
+// RV64ZBM-NEXT:    ret i64 [[TMP2]]
+//
+long bmatxor(long a, long b) {
+  return __builtin_riscv_bmatxor(a, b);
+}
+
+// RV64ZBM-LABEL: @bmatflip(
+// RV64ZBM-NEXT:  entry:
+// RV64ZBM-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
+// RV64ZBM-NEXT:    store i64 [[A:%.*]], i64* [[A_ADDR]], align 8
+// RV64ZBM-NEXT:    [[TMP0:%.*]] = load i64, i64* [[A_ADDR]], align 8
+// RV64ZBM-NEXT:    [[TMP1:%.*]] = call i64 @llvm.riscv.bmatflip.i64(i64 [[TMP0]])
+// RV64ZBM-NEXT:    ret i64 [[TMP1]]
+//
+long bmatflip(long a) {
+  return __builtin_riscv_bmatflip(a);
+}
Index: clang/lib/CodeGen/CGBuiltin.cpp
===================================================================
--- clang/lib/CodeGen/CGBuiltin.cpp
+++ clang/lib/CodeGen/CGBuiltin.cpp
@@ -17844,6 +17844,9 @@
   case RISCV::BI__builtin_riscv_clmul:
   case RISCV::BI__builtin_riscv_clmulh:
   case RISCV::BI__builtin_riscv_clmulr:
+  case RISCV::BI__builtin_riscv_bmator:
+  case RISCV::BI__builtin_riscv_bmatxor:
+  case RISCV::BI__builtin_riscv_bmatflip:
   case RISCV::BI__builtin_riscv_grev_32:
   case RISCV::BI__builtin_riscv_grev_64:
   case RISCV::BI__builtin_riscv_gorc_32:
@@ -17883,6 +17886,17 @@
       ID = Intrinsic::riscv_clmulr;
       break;
 
+    // Zbm
+    case RISCV::BI__builtin_riscv_bmator:
+      ID = Intrinsic::riscv_bmator;
+      break;
+    case RISCV::BI__builtin_riscv_bmatxor:
+      ID = Intrinsic::riscv_bmatxor;
+      break;
+    case RISCV::BI__builtin_riscv_bmatflip:
+      ID = Intrinsic::riscv_bmatflip;
+      break;
+
     // Zbp
     case RISCV::BI__builtin_riscv_grev_32:
     case RISCV::BI__builtin_riscv_grev_64:
Index: clang/include/clang/Basic/BuiltinsRISCV.def
===================================================================
--- clang/include/clang/Basic/BuiltinsRISCV.def
+++ clang/include/clang/Basic/BuiltinsRISCV.def
@@ -26,6 +26,11 @@
 TARGET_BUILTIN(__builtin_riscv_clmulh, "LiLiLi", "nc", "experimental-zbc")
 TARGET_BUILTIN(__builtin_riscv_clmulr, "LiLiLi", "nc", "experimental-zbc")
 
+// Zbm extension
+TARGET_BUILTIN(__builtin_riscv_bmator, "WiWiWi", "nc","experimental-zbm,64bit")
+TARGET_BUILTIN(__builtin_riscv_bmatxor, "WiWiWi", "nc", "experimental-zbm,64bit")
+TARGET_BUILTIN(__builtin_riscv_bmatflip, "WiWi", "nc", "experimental-zbm,64bit")
+
 // Zbp extension
 TARGET_BUILTIN(__builtin_riscv_grev_32, "ZiZiZi", "nc", "experimental-zbp")
 TARGET_BUILTIN(__builtin_riscv_grev_64, "WiWiWi", "nc", "experimental-zbp,64bit")
_______________________________________________
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to