This revision was automatically updated to reflect the committed changes.
Closed by commit rG9c859fc54d92: [AArch64][SVE] Add SVE2 intrinsics for bit 
permutation & table lookup (authored by kmclaughlin).

Changed prior to commit:
  https://reviews.llvm.org/D74912?vs=246487&id=246661#toc

Repository:
  rG LLVM Github Monorepo

CHANGES SINCE LAST ACTION
  https://reviews.llvm.org/D74912/new/

https://reviews.llvm.org/D74912

Files:
  llvm/include/llvm/IR/IntrinsicsAArch64.td
  llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
  llvm/lib/Target/AArch64/SVEInstrFormats.td
  llvm/test/CodeGen/AArch64/sve2-intrinsics-bit-permutation.ll
  llvm/test/CodeGen/AArch64/sve2-intrinsics-perm-tb.ll

Index: llvm/test/CodeGen/AArch64/sve2-intrinsics-perm-tb.ll
===================================================================
--- /dev/null
+++ llvm/test/CodeGen/AArch64/sve2-intrinsics-perm-tb.ll
@@ -0,0 +1,181 @@
+; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve2 < %s | FileCheck %s
+
+;
+; TBL2
+;
+
+define <vscale x 16 x i8> @tbl2_b(<vscale x 16 x i8> %a, <vscale x 16 x i8> %unused,
+                                  <vscale x 16 x i8> %b, <vscale x 16 x i8> %c) {
+; CHECK-LABEL: tbl2_b:
+; CHECK: mov z1.d, z0.d
+; CHECK-NEXT: tbl z0.b, { z1.b, z2.b }, z3.b
+; CHECK-NEXT: ret
+  %out = call <vscale x 16 x i8> @llvm.aarch64.sve.tbl2.nxv16i8(<vscale x 16 x i8> %a,
+                                                                <vscale x 16 x i8> %b,
+                                                                <vscale x 16 x i8> %c)
+  ret <vscale x 16 x i8> %out
+}
+
+define <vscale x 8 x i16> @tbl2_h(<vscale x 8 x i16> %a, <vscale x 16 x i8> %unused,
+                                  <vscale x 8 x i16> %b, <vscale x 8 x i16> %c) {
+; CHECK-LABEL: tbl2_h:
+; CHECK: mov z1.d, z0.d
+; CHECK-NEXT: tbl z0.h, { z1.h, z2.h }, z3.h
+; CHECK-NEXT: ret
+  %out = call <vscale x 8 x i16> @llvm.aarch64.sve.tbl2.nxv8i16(<vscale x 8 x i16> %a,
+                                                                <vscale x 8 x i16> %b,
+                                                                <vscale x 8 x i16> %c)
+  ret <vscale x 8 x i16> %out
+}
+
+define <vscale x 4 x i32> @tbl2_s(<vscale x 4 x i32> %a, <vscale x 4 x i32> %unused,
+                                  <vscale x 4 x i32> %b, <vscale x 4 x i32> %c) {
+; CHECK-LABEL: tbl2_s:
+; CHECK: mov z1.d, z0.d
+; CHECK-NEXT: tbl z0.s, { z1.s, z2.s }, z3.s
+; CHECK-NEXT: ret
+  %out = call <vscale x 4 x i32> @llvm.aarch64.sve.tbl2.nxv4i32(<vscale x 4 x i32> %a,
+                                                                <vscale x 4 x i32> %b,
+                                                                <vscale x 4 x i32> %c)
+  ret <vscale x 4 x i32> %out
+}
+
+define <vscale x 2 x i64> @tbl2_d(<vscale x 2 x i64> %a, <vscale x 2 x i64> %unused,
+                                  <vscale x 2 x i64> %b, <vscale x 2 x i64> %c) {
+; CHECK-LABEL: tbl2_d:
+; CHECK: mov z1.d, z0.d
+; CHECK-NEXT: tbl z0.d, { z1.d, z2.d }, z3.d
+; CHECK-NEXT: ret
+  %out = call <vscale x 2 x i64> @llvm.aarch64.sve.tbl2.nxv2i64(<vscale x 2 x i64> %a,
+                                                                <vscale x 2 x i64> %b,
+                                                                <vscale x 2 x i64> %c)
+  ret <vscale x 2 x i64> %out
+}
+
+define <vscale x 8 x half> @tbl2_fh(<vscale x 8 x half> %a, <vscale x 8 x half> %unused,
+                                    <vscale x 8 x half> %b, <vscale x 8 x i16> %c) {
+; CHECK-LABEL: tbl2_fh:
+; CHECK: mov z1.d, z0.d
+; CHECK-NEXT: tbl z0.h, { z1.h, z2.h }, z3.h
+; CHECK-NEXT: ret
+  %out = call <vscale x 8 x half> @llvm.aarch64.sve.tbl2.nxv8f16(<vscale x 8 x half> %a,
+                                                                 <vscale x 8 x half> %b,
+                                                                 <vscale x 8 x i16> %c)
+  ret <vscale x 8 x half> %out
+}
+
+define <vscale x 4 x float> @tbl2_fs(<vscale x 4 x float> %a, <vscale x 4 x float> %unused,
+                                     <vscale x 4 x float> %b, <vscale x 4 x i32> %c) {
+; CHECK-LABEL: tbl2_fs:
+; CHECK: mov z1.d, z0.d
+; CHECK-NEXT: tbl z0.s, { z1.s, z2.s }, z3.s
+; CHECK-NEXT: ret
+  %out = call <vscale x 4 x float> @llvm.aarch64.sve.tbl2.nxv4f32(<vscale x 4 x float> %a,
+                                                                  <vscale x 4 x float> %b,
+                                                                  <vscale x 4 x i32> %c)
+  ret <vscale x 4 x float> %out
+}
+
+define <vscale x 2 x double> @tbl2_fd(<vscale x 2 x double> %a, <vscale x 2 x double> %unused,
+                                      <vscale x 2 x double> %b, <vscale x 2 x i64> %c) {
+; CHECK-LABEL: tbl2_fd:
+; CHECK: mov z1.d, z0.d
+; CHECK-NEXT: tbl z0.d, { z1.d, z2.d }, z3.d
+; CHECK-NEXT: ret
+  %out = call <vscale x 2 x double> @llvm.aarch64.sve.tbl2.nxv2f64(<vscale x 2 x double> %a,
+                                                                   <vscale x 2 x double> %b,
+                                                                   <vscale x 2 x i64> %c)
+  ret <vscale x 2 x double> %out
+}
+
+;
+; TBX
+;
+
+define <vscale x 16 x i8> @tbx_b(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i8> %c) {
+; CHECK-LABEL: tbx_b:
+; CHECK: tbx z0.b, z1.b, z2.b
+; CHECK-NEXT: ret
+  %out = call <vscale x 16 x i8> @llvm.aarch64.sve.tbx.nxv16i8(<vscale x 16 x i8> %a,
+                                                               <vscale x 16 x i8> %b,
+                                                               <vscale x 16 x i8> %c)
+  ret <vscale x 16 x i8> %out
+}
+
+define <vscale x 8 x i16> @tbx_h(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, <vscale x 8 x i16> %c) {
+; CHECK-LABEL: tbx_h:
+; CHECK: tbx z0.h, z1.h, z2.h
+; CHECK-NEXT: ret
+  %out = call <vscale x 8 x i16> @llvm.aarch64.sve.tbx.nxv8i16(<vscale x 8 x i16> %a,
+                                                               <vscale x 8 x i16> %b,
+                                                               <vscale x 8 x i16> %c)
+  ret <vscale x 8 x i16> %out
+}
+
+define <vscale x 8 x half> @ftbx_h(<vscale x 8 x half> %a, <vscale x 8 x half> %b, <vscale x 8 x i16> %c) {
+; CHECK-LABEL: ftbx_h:
+; CHECK: tbx z0.h, z1.h, z2.h
+; CHECK-NEXT: ret
+  %out = call <vscale x 8 x half> @llvm.aarch64.sve.tbx.nxv8f16(<vscale x 8 x half> %a,
+                                                                <vscale x 8 x half> %b,
+                                                                <vscale x 8 x i16> %c)
+  ret <vscale x 8 x half> %out
+}
+
+define <vscale x 4 x i32> @tbx_s(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i32> %c) {
+; CHECK-LABEL: tbx_s:
+; CHECK: tbx z0.s, z1.s, z2.s
+; CHECK-NEXT: ret
+  %out = call <vscale x 4 x i32> @llvm.aarch64.sve.tbx.nxv4i32(<vscale x 4 x i32> %a,
+                                                               <vscale x 4 x i32> %b,
+                                                               <vscale x 4 x i32> %c)
+  ret <vscale x 4 x i32> %out
+}
+
+define <vscale x 4 x float> @ftbx_s(<vscale x 4 x float> %a, <vscale x 4 x float> %b, <vscale x 4 x i32> %c) {
+; CHECK-LABEL: ftbx_s:
+; CHECK: tbx z0.s, z1.s, z2.s
+; CHECK-NEXT: ret
+  %out = call <vscale x 4 x float> @llvm.aarch64.sve.tbx.nxv4f32(<vscale x 4 x float> %a,
+                                                                 <vscale x 4 x float> %b,
+                                                                 <vscale x 4 x i32> %c)
+  ret <vscale x 4 x float> %out
+}
+
+define <vscale x 2 x i64> @tbx_d(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i64> %c) {
+; CHECK-LABEL: tbx_d:
+; CHECK: tbx z0.d, z1.d, z2.d
+; CHECK-NEXT: ret
+  %out = call <vscale x 2 x i64> @llvm.aarch64.sve.tbx.nxv2i64(<vscale x 2 x i64> %a,
+                                                               <vscale x 2 x i64> %b,
+                                                               <vscale x 2 x i64> %c)
+  ret <vscale x 2 x i64> %out
+}
+
+define <vscale x 2 x double> @ftbx_d(<vscale x 2 x double> %a, <vscale x 2 x double> %b, <vscale x 2 x i64> %c) {
+; CHECK-LABEL: ftbx_d:
+; CHECK: tbx z0.d, z1.d, z2.d
+; CHECK-NEXT: ret
+  %out = call <vscale x 2 x double> @llvm.aarch64.sve.tbx.nxv2f64(<vscale x 2 x double> %a,
+                                                                  <vscale x 2 x double> %b,
+                                                                  <vscale x 2 x i64> %c)
+  ret <vscale x 2 x double> %out
+}
+
+declare <vscale x 16 x i8> @llvm.aarch64.sve.tbl2.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>)
+declare <vscale x 8 x i16> @llvm.aarch64.sve.tbl2.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>)
+declare <vscale x 4 x i32> @llvm.aarch64.sve.tbl2.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>)
+declare <vscale x 2 x i64> @llvm.aarch64.sve.tbl2.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>)
+
+declare <vscale x 8 x half> @llvm.aarch64.sve.tbl2.nxv8f16(<vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x i16>)
+declare <vscale x 4 x float> @llvm.aarch64.sve.tbl2.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x i32>)
+declare <vscale x 2 x double> @llvm.aarch64.sve.tbl2.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x i64>)
+
+declare <vscale x 16 x i8> @llvm.aarch64.sve.tbx.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>)
+declare <vscale x 8 x i16> @llvm.aarch64.sve.tbx.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>)
+declare <vscale x 4 x i32> @llvm.aarch64.sve.tbx.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>)
+declare <vscale x 2 x i64> @llvm.aarch64.sve.tbx.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>)
+
+declare <vscale x 8 x half> @llvm.aarch64.sve.tbx.nxv8f16(<vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x i16>)
+declare <vscale x 4 x float> @llvm.aarch64.sve.tbx.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x i32>)
+declare <vscale x 2 x double> @llvm.aarch64.sve.tbx.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x i64>)
Index: llvm/test/CodeGen/AArch64/sve2-intrinsics-bit-permutation.ll
===================================================================
--- /dev/null
+++ llvm/test/CodeGen/AArch64/sve2-intrinsics-bit-permutation.ll
@@ -0,0 +1,124 @@
+; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve2,+sve2-bitperm < %s | FileCheck %s
+
+;
+; BDEP
+;
+
+define <vscale x 16 x i8> @bdep_nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
+; CHECK-LABEL: bdep_nxv16i8:
+; CHECK: bdep z0.b, z0.b, z1.b
+; CHECK-NEXT: ret
+  %out = call <vscale x 16 x i8> @llvm.aarch64.sve.bdep.x.nx16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b)
+  ret <vscale x 16 x i8> %out
+}
+
+define <vscale x 8 x i16> @bdep_nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
+; CHECK-LABEL: bdep_nxv8i16:
+; CHECK: bdep z0.h, z0.h, z1.h
+; CHECK-NEXT: ret
+  %out = call <vscale x 8 x i16> @llvm.aarch64.sve.bdep.x.nx8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b)
+  ret <vscale x 8 x i16> %out
+}
+
+define <vscale x 4 x i32> @bdep_nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
+; CHECK-LABEL: bdep_nxv4i32:
+; CHECK: bdep z0.s, z0.s, z1.s
+; CHECK-NEXT: ret
+  %out = call <vscale x 4 x i32> @llvm.aarch64.sve.bdep.x.nx4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b)
+  ret <vscale x 4 x i32> %out
+}
+
+define <vscale x 2 x i64> @bdep_nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
+; CHECK-LABEL: bdep_nxv2i64:
+; CHECK: bdep z0.d, z0.d, z1.d
+; CHECK-NEXT: ret
+  %out = call <vscale x 2 x i64> @llvm.aarch64.sve.bdep.x.nx2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b)
+  ret <vscale x 2 x i64> %out
+}
+
+;
+; BEXT
+;
+
+define <vscale x 16 x i8> @bext_nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
+; CHECK-LABEL: bext_nxv16i8:
+; CHECK: bext z0.b, z0.b, z1.b
+; CHECK-NEXT: ret
+  %out = call <vscale x 16 x i8> @llvm.aarch64.sve.bext.x.nx16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b)
+  ret <vscale x 16 x i8> %out
+}
+
+define <vscale x 8 x i16> @bext_nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
+; CHECK-LABEL: bext_nxv8i16:
+; CHECK: bext z0.h, z0.h, z1.h
+; CHECK-NEXT: ret
+  %out = call <vscale x 8 x i16> @llvm.aarch64.sve.bext.x.nx8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b)
+  ret <vscale x 8 x i16> %out
+}
+
+define <vscale x 4 x i32> @bext_nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
+; CHECK-LABEL: bext_nxv4i32:
+; CHECK: bext z0.s, z0.s, z1.s
+; CHECK-NEXT: ret
+  %out = call <vscale x 4 x i32> @llvm.aarch64.sve.bext.x.nx4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b)
+  ret <vscale x 4 x i32> %out
+}
+
+define <vscale x 2 x i64> @bext_nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
+; CHECK-LABEL: bext_nxv2i64:
+; CHECK: bext z0.d, z0.d, z1.d
+; CHECK-NEXT: ret
+  %out = call <vscale x 2 x i64> @llvm.aarch64.sve.bext.x.nx2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b)
+  ret <vscale x 2 x i64> %out
+}
+
+;
+; BGRP
+;
+
+define <vscale x 16 x i8> @bgrp_nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
+; CHECK-LABEL: bgrp_nxv16i8:
+; CHECK: bgrp z0.b, z0.b, z1.b
+; CHECK-NEXT: ret
+  %out = call <vscale x 16 x i8> @llvm.aarch64.sve.bgrp.x.nx16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b)
+  ret <vscale x 16 x i8> %out
+}
+
+define <vscale x 8 x i16> @bgrp_nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
+; CHECK-LABEL: bgrp_nxv8i16:
+; CHECK: bgrp z0.h, z0.h, z1.h
+; CHECK-NEXT: ret
+  %out = call <vscale x 8 x i16> @llvm.aarch64.sve.bgrp.x.nx8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b)
+  ret <vscale x 8 x i16> %out
+}
+
+define <vscale x 4 x i32> @bgrp_nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
+; CHECK-LABEL: bgrp_nxv4i32:
+; CHECK: bgrp z0.s, z0.s, z1.s
+; CHECK-NEXT: ret
+  %out = call <vscale x 4 x i32> @llvm.aarch64.sve.bgrp.x.nx4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b)
+  ret <vscale x 4 x i32> %out
+}
+
+define <vscale x 2 x i64> @bgrp_nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
+; CHECK-LABEL: bgrp_nxv2i64:
+; CHECK: bgrp z0.d, z0.d, z1.d
+; CHECK-NEXT: ret
+  %out = call <vscale x 2 x i64> @llvm.aarch64.sve.bgrp.x.nx2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b)
+  ret <vscale x 2 x i64> %out
+}
+
+declare <vscale x 16 x i8> @llvm.aarch64.sve.bdep.x.nx16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b)
+declare <vscale x 8 x i16> @llvm.aarch64.sve.bdep.x.nx8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b)
+declare <vscale x 4 x i32> @llvm.aarch64.sve.bdep.x.nx4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b)
+declare <vscale x 2 x i64> @llvm.aarch64.sve.bdep.x.nx2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b)
+
+declare <vscale x 16 x i8> @llvm.aarch64.sve.bext.x.nx16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b)
+declare <vscale x 8 x i16> @llvm.aarch64.sve.bext.x.nx8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b)
+declare <vscale x 4 x i32> @llvm.aarch64.sve.bext.x.nx4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b)
+declare <vscale x 2 x i64> @llvm.aarch64.sve.bext.x.nx2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b)
+
+declare <vscale x 16 x i8> @llvm.aarch64.sve.bgrp.x.nx16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b)
+declare <vscale x 8 x i16> @llvm.aarch64.sve.bgrp.x.nx8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b)
+declare <vscale x 4 x i32> @llvm.aarch64.sve.bgrp.x.nx4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b)
+declare <vscale x 2 x i64> @llvm.aarch64.sve.bgrp.x.nx2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b)
Index: llvm/lib/Target/AArch64/SVEInstrFormats.td
===================================================================
--- llvm/lib/Target/AArch64/SVEInstrFormats.td
+++ llvm/lib/Target/AArch64/SVEInstrFormats.td
@@ -998,11 +998,46 @@
   def : SVE_2_Op_Pat<nxv2f64, op, nxv2f64, nxv2i64, !cast<Instruction>(NAME # _D)>;
 }
 
-multiclass sve2_int_perm_tbl<string asm> {
+multiclass sve2_int_perm_tbl<string asm, SDPatternOperator op> {
   def _B : sve_int_perm_tbl<0b00, 0b01, asm, ZPR8,  ZZ_b>;
   def _H : sve_int_perm_tbl<0b01, 0b01, asm, ZPR16, ZZ_h>;
   def _S : sve_int_perm_tbl<0b10, 0b01, asm, ZPR32, ZZ_s>;
   def _D : sve_int_perm_tbl<0b11, 0b01, asm, ZPR64, ZZ_d>;
+
+  def : Pat<(nxv16i8 (op nxv16i8:$Op1, nxv16i8:$Op2, nxv16i8:$Op3)),
+            (nxv16i8 (!cast<Instruction>(NAME # _B) (REG_SEQUENCE ZPR2, nxv16i8:$Op1, zsub0,
+                                                                        nxv16i8:$Op2, zsub1),
+                                                     nxv16i8:$Op3))>;
+
+  def : Pat<(nxv8i16 (op nxv8i16:$Op1, nxv8i16:$Op2, nxv8i16:$Op3)),
+            (nxv8i16 (!cast<Instruction>(NAME # _H) (REG_SEQUENCE ZPR2, nxv8i16:$Op1, zsub0,
+                                                                        nxv8i16:$Op2, zsub1),
+                                                     nxv8i16:$Op3))>;
+
+  def : Pat<(nxv4i32 (op nxv4i32:$Op1, nxv4i32:$Op2, nxv4i32:$Op3)),
+            (nxv4i32 (!cast<Instruction>(NAME # _S) (REG_SEQUENCE ZPR2, nxv4i32:$Op1, zsub0,
+                                                                        nxv4i32:$Op2, zsub1),
+                                                     nxv4i32:$Op3))>;
+
+  def : Pat<(nxv2i64 (op nxv2i64:$Op1, nxv2i64:$Op2, nxv2i64:$Op3)),
+            (nxv2i64 (!cast<Instruction>(NAME # _D) (REG_SEQUENCE ZPR2, nxv2i64:$Op1, zsub0,
+                                                                        nxv2i64:$Op2, zsub1),
+                                                     nxv2i64:$Op3))>;
+
+  def : Pat<(nxv8f16 (op nxv8f16:$Op1, nxv8f16:$Op2, nxv8i16:$Op3)),
+            (nxv8f16 (!cast<Instruction>(NAME # _H) (REG_SEQUENCE ZPR2, nxv8f16:$Op1, zsub0,
+                                                                        nxv8f16:$Op2, zsub1),
+                                                     nxv8i16:$Op3))>;
+
+  def : Pat<(nxv4f32 (op nxv4f32:$Op1, nxv4f32:$Op2, nxv4i32:$Op3)),
+            (nxv4f32 (!cast<Instruction>(NAME # _S) (REG_SEQUENCE ZPR2, nxv4f32:$Op1, zsub0,
+                                                                        nxv4f32:$Op2, zsub1),
+                                                     nxv4i32:$Op3))>;
+
+  def : Pat<(nxv2f64 (op nxv2f64:$Op1, nxv2f64:$Op2, nxv2i64:$Op3)),
+            (nxv2f64 (!cast<Instruction>(NAME # _D) (REG_SEQUENCE ZPR2, nxv2f64:$Op1, zsub0,
+                                                                        nxv2f64:$Op2, zsub1),
+                                                     nxv2i64:$Op3))>;
 }
 
 class sve2_int_perm_tbx<bits<2> sz8_64, string asm, ZPRRegOp zprty>
@@ -1024,11 +1059,20 @@
   let Constraints = "$Zd = $_Zd";
 }
 
-multiclass sve2_int_perm_tbx<string asm> {
+multiclass sve2_int_perm_tbx<string asm, SDPatternOperator op> {
   def _B : sve2_int_perm_tbx<0b00, asm, ZPR8>;
   def _H : sve2_int_perm_tbx<0b01, asm, ZPR16>;
   def _S : sve2_int_perm_tbx<0b10, asm, ZPR32>;
   def _D : sve2_int_perm_tbx<0b11, asm, ZPR64>;
+
+  def : SVE_3_Op_Pat<nxv16i8, op, nxv16i8, nxv16i8, nxv16i8, !cast<Instruction>(NAME # _B)>;
+  def : SVE_3_Op_Pat<nxv8i16, op, nxv8i16, nxv8i16, nxv8i16, !cast<Instruction>(NAME # _H)>;
+  def : SVE_3_Op_Pat<nxv4i32, op, nxv4i32, nxv4i32, nxv4i32, !cast<Instruction>(NAME # _S)>;
+  def : SVE_3_Op_Pat<nxv2i64, op, nxv2i64, nxv2i64, nxv2i64, !cast<Instruction>(NAME # _D)>;
+
+  def : SVE_3_Op_Pat<nxv8f16, op, nxv8f16, nxv8f16, nxv8i16, !cast<Instruction>(NAME # _H)>;
+  def : SVE_3_Op_Pat<nxv4f32, op, nxv4f32, nxv4f32, nxv4i32, !cast<Instruction>(NAME # _S)>;
+  def : SVE_3_Op_Pat<nxv2f64, op, nxv2f64, nxv2f64, nxv2i64, !cast<Instruction>(NAME # _D)>;
 }
 
 class sve_int_perm_reverse_z<bits<2> sz8_64, string asm, ZPRRegOp zprty>
@@ -3024,11 +3068,16 @@
   let Inst{4-0}   = Zd;
 }
 
-multiclass sve2_misc_bitwise<bits<4> opc, string asm> {
+multiclass sve2_misc_bitwise<bits<4> opc, string asm, SDPatternOperator op> {
   def _B : sve2_misc<0b00, opc, asm, ZPR8, ZPR8>;
   def _H : sve2_misc<0b01, opc, asm, ZPR16, ZPR16>;
   def _S : sve2_misc<0b10, opc, asm, ZPR32, ZPR32>;
   def _D : sve2_misc<0b11, opc, asm, ZPR64, ZPR64>;
+
+  def : SVE_2_Op_Pat<nxv16i8, op, nxv16i8, nxv16i8, !cast<Instruction>(NAME # _B)>;
+  def : SVE_2_Op_Pat<nxv8i16, op, nxv8i16, nxv8i16, !cast<Instruction>(NAME # _H)>;
+  def : SVE_2_Op_Pat<nxv4i32, op, nxv4i32, nxv4i32, !cast<Instruction>(NAME # _S)>;
+  def : SVE_2_Op_Pat<nxv2i64, op, nxv2i64, nxv2i64, !cast<Instruction>(NAME # _D)>;
 }
 
 multiclass sve2_misc_int_addsub_long_interleaved<bits<2> opc, string asm,
Index: llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
===================================================================
--- llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
+++ llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
@@ -1921,8 +1921,8 @@
   defm STNT1D_ZZR_D : sve2_mem_sstnt_vs<0b110, "stnt1d", Z_d, ZPR64>;
 
   // SVE2 table lookup (three sources)
-  defm TBL_ZZZZ : sve2_int_perm_tbl<"tbl">;
-  defm TBX_ZZZ  : sve2_int_perm_tbx<"tbx">;
+  defm TBL_ZZZZ : sve2_int_perm_tbl<"tbl", int_aarch64_sve_tbl2>;
+  defm TBX_ZZZ  : sve2_int_perm_tbx<"tbx", int_aarch64_sve_tbx>;
 
   // SVE2 integer compare scalar count and limit
   defm WHILEGE_PWW : sve_int_while4_rr<0b000, "whilege", int_aarch64_sve_whilege>;
@@ -1970,7 +1970,7 @@
 
 let Predicates = [HasSVE2BitPerm] in {
   // SVE2 bitwise permute
-  defm BEXT_ZZZ : sve2_misc_bitwise<0b1100, "bext">;
-  defm BDEP_ZZZ : sve2_misc_bitwise<0b1101, "bdep">;
-  defm BGRP_ZZZ : sve2_misc_bitwise<0b1110, "bgrp">;
+  defm BEXT_ZZZ : sve2_misc_bitwise<0b1100, "bext", int_aarch64_sve_bext_x>;
+  defm BDEP_ZZZ : sve2_misc_bitwise<0b1101, "bdep", int_aarch64_sve_bdep_x>;
+  defm BGRP_ZZZ : sve2_misc_bitwise<0b1110, "bgrp", int_aarch64_sve_bgrp_x>;
 }
Index: llvm/include/llvm/IR/IntrinsicsAArch64.td
===================================================================
--- llvm/include/llvm/IR/IntrinsicsAArch64.td
+++ llvm/include/llvm/IR/IntrinsicsAArch64.td
@@ -1059,6 +1059,13 @@
                  LLVMVectorOfBitcastsToInt<0>],
                 [IntrNoMem]>;
 
+  class AdvSIMD_SVE2_TBX_Intrinsic
+    : Intrinsic<[llvm_anyvector_ty],
+                [LLVMMatchType<0>,
+                 LLVMMatchType<0>,
+                 LLVMVectorOfBitcastsToInt<0>],
+                [IntrNoMem]>;
+
   class SVE2_1VectorArg_Long_Intrinsic
     : Intrinsic<[llvm_anyvector_ty],
                 [LLVMSubdivide2VectorType<0>,
@@ -2073,5 +2080,19 @@
                               Intrinsic<[llvm_nxv4i32_ty],
                                         [llvm_nxv4i32_ty, llvm_nxv4i32_ty],
                                         [IntrNoMem]>;
+//
+// SVE2 - Extended table lookup/permute
+//
+
+def int_aarch64_sve_tbl2 : AdvSIMD_SVE2_TBX_Intrinsic;
+def int_aarch64_sve_tbx  : AdvSIMD_SVE2_TBX_Intrinsic;
+
+//
+// SVE2 - Optional bit permutation
+//
+
+def int_aarch64_sve_bdep_x : AdvSIMD_2VectorArg_Intrinsic;
+def int_aarch64_sve_bext_x : AdvSIMD_2VectorArg_Intrinsic;
+def int_aarch64_sve_bgrp_x : AdvSIMD_2VectorArg_Intrinsic;
 
 }
_______________________________________________
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to