fpetrogalli created this revision. fpetrogalli added reviewers: sdesmalen, kmclaughlin, efriedma. Herald added subscribers: cfe-commits, kristof.beyls, tschuett. Herald added a reviewer: rengolin. Herald added a project: clang. fpetrogalli added a parent revision: D79638: [llvm][SVE] IR intrinscs for matrix multiplication instructions.. fpetrogalli updated this revision to Diff 262923. fpetrogalli added a comment.
I replaced the lines `Signed = !Signed` in the tablegen emitter with `Signed = false`. Guarded by __ARM_FEATURE_SVE_MATMUL_INT8: - svmmla_u32 - svmmla_s32 - svusmmla_s32 Guarded by __ARM_FEATURE_SVE_MATMUL_FP32: - svmmla_f32 Guarded by __ARM_FEATURE_SVE_MATMUL_FP64: - svmmla_f64 Extra change: replace one use of auto with the type returned by the function (NFC). Repository: rG LLVM Github Monorepo https://reviews.llvm.org/D79639 Files: clang/include/clang/Basic/arm_sve.td clang/lib/CodeGen/CGBuiltin.cpp clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_matmul_fp32.c clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_matmul_fp64.c clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_mmla.c clang/utils/TableGen/SveEmitter.cpp
Index: clang/utils/TableGen/SveEmitter.cpp =================================================================== --- clang/utils/TableGen/SveEmitter.cpp +++ clang/utils/TableGen/SveEmitter.cpp @@ -513,6 +513,11 @@ case 'q': ElementBitwidth /= 4; break; + case 'b': + Signed = false; + Float = false; + ElementBitwidth /= 4; + break; case 'o': ElementBitwidth *= 4; break; Index: clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_mmla.c =================================================================== --- /dev/null +++ clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_mmla.c @@ -0,0 +1,39 @@ +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE_MATMUL_INT8 \ +// RUN: -triple aarch64-none-linux-gnu -target-feature +sve \ +// RUN: -fallow-half-arguments-and-returns -S -O1 -Werror -Wall \ +// RUN: -emit-llvm -o - %s | FileCheck %s + +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE_MATMUL_INT8 \ +// RUN: -triple aarch64-none-linux-gnu -target-feature +sve \ +// RUN: -fallow-half-arguments-and-returns -S -O1 -Werror -Wall \ +// RUN: -emit-llvm -o - %s -DSVE_OVERLOADED_FORMS| FileCheck %s + +#include <arm_sve.h> + +#ifdef SVE_OVERLOADED_FORMS +// A simple used,unused... macro, long enough to represent any SVE builtin. +#define SVE_ACLE_FUNC(A1, A2_UNUSED, A3, A4_UNUSED) A1##A3 +#else +#define SVE_ACLE_FUNC(A1, A2, A3, A4) A1##A2##A3##A4 +#endif + +svint32_t test_svmmla_s32(svint32_t x, svint8_t y, svint8_t z) { + // CHECK-LABEL: test_svmmla_s32 + // CHECK: %[[RET:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.smmla.nxv4i32(<vscale x 4 x i32> %x, <vscale x 16 x i8> %y, <vscale x 16 x i8> %z) + // CHECK: ret <vscale x 4 x i32> %[[RET]] + return SVE_ACLE_FUNC(svmmla, _s32, , )(x, y, z); +} + +svuint32_t test_svmmla_u32(svuint32_t x, svuint8_t y, svuint8_t z) { + // CHECK-LABEL: test_svmmla_u32 + // CHECK: %[[RET:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.ummla.nxv4i32(<vscale x 4 x i32> %x, <vscale x 16 x i8> %y, <vscale x 16 x i8> %z) + // CHECK: ret <vscale x 4 x i32> %[[RET]] + return SVE_ACLE_FUNC(svmmla, _u32, , )(x, y, z); +} + +svint32_t test_svusmmla_s32(svint32_t x, svuint8_t y, svint8_t z) { + // CHECK-LABEL: test_svusmmla_s32 + // CHECK: %[[RET:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.usmmla.nxv4i32(<vscale x 4 x i32> %x, <vscale x 16 x i8> %y, <vscale x 16 x i8> %z) + // CHECK: ret <vscale x 4 x i32> %[[RET]] + return SVE_ACLE_FUNC(svusmmla, _s32, , )(x, y, z); +} Index: clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_matmul_fp64.c =================================================================== --- /dev/null +++ clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_matmul_fp64.c @@ -0,0 +1,25 @@ +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE_MATMUL_FP64 \ +// RUN: -triple aarch64-none-linux-gnu -target-feature +sve \ +// RUN: -fallow-half-arguments-and-returns -S -O1 -Werror -Wall \ +// RUN: -emit-llvm -o - %s | FileCheck %s + +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE_MATMUL_FP64 \ +// RUN: -triple aarch64-none-linux-gnu -target-feature +sve \ +// RUN: -fallow-half-arguments-and-returns -S -O1 -Werror -Wall \ +// RUN: -emit-llvm -o - %s -DSVE_OVERLOADED_FORMS | FileCheck %s + +#include <arm_sve.h> + +#ifdef SVE_OVERLOADED_FORMS +// A simple used,unused... macro, long enough to represent any SVE builtin. +#define SVE_ACLE_FUNC(A1, A2_UNUSED, A3, A4_UNUSED) A1##A3 +#else +#define SVE_ACLE_FUNC(A1, A2, A3, A4) A1##A2##A3##A4 +#endif + +svfloat64_t test_svmmla_f64(svfloat64_t x, svfloat64_t y, svfloat64_t z) { + // CHECK-LABEL: test_svmmla_f64 + // CHECK: %[[RET:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.mmla.nxv2f64(<vscale x 2 x double> %x, <vscale x 2 x double> %y, <vscale x 2 x double> %z) + // CHECK: ret <vscale x 2 x double> %[[RET]] + return SVE_ACLE_FUNC(svmmla, _f64, , )(x, y, z); +} Index: clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_matmul_fp32.c =================================================================== --- /dev/null +++ clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_matmul_fp32.c @@ -0,0 +1,25 @@ +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE_MATMUL_FP32 \ +// RUN: -triple aarch64-none-linux-gnu -target-feature +sve \ +// RUN: -fallow-half-arguments-and-returns -S -O1 -Werror -Wall \ +// RUN: -emit-llvm -o - %s | FileCheck %s + +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE_MATMUL_FP32 \ +// RUN: -triple aarch64-none-linux-gnu -target-feature +sve \ +// RUN: -fallow-half-arguments-and-returns -S -O1 -Werror -Wall \ +// RUN: -emit-llvm -o - %s -DSVE_OVERLOADED_FORMS | FileCheck %s + +#include <arm_sve.h> + +#ifdef SVE_OVERLOADED_FORMS +// A simple used,unused... macro, long enough to represent any SVE builtin. +#define SVE_ACLE_FUNC(A1, A2_UNUSED, A3, A4_UNUSED) A1##A3 +#else +#define SVE_ACLE_FUNC(A1, A2, A3, A4) A1##A2##A3##A4 +#endif + +svfloat32_t test_svmmla_f32(svfloat32_t x, svfloat32_t y, svfloat32_t z) { + // CHECK-LABEL: test_svmmla_f32 + // CHECK: %[[RET:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.mmla.nxv4f32(<vscale x 4 x float> %x, <vscale x 4 x float> %y, <vscale x 4 x float> %z) + // CHECK: ret <vscale x 4 x float> %[[RET]] + return SVE_ACLE_FUNC(svmmla, _f32, , )(x, y, z); +} Index: clang/lib/CodeGen/CGBuiltin.cpp =================================================================== --- clang/lib/CodeGen/CGBuiltin.cpp +++ clang/lib/CodeGen/CGBuiltin.cpp @@ -7985,8 +7985,8 @@ } } - auto *Builtin = findARMVectorIntrinsicInMap(AArch64SVEIntrinsicMap, BuiltinID, - AArch64SVEIntrinsicsProvenSorted); + const ARMVectorIntrinsicInfo *Builtin = findARMVectorIntrinsicInMap( + AArch64SVEIntrinsicMap, BuiltinID, AArch64SVEIntrinsicsProvenSorted); SVETypeFlags TypeFlags(Builtin->TypeModifier); if (TypeFlags.isLoad()) return EmitSVEMaskedLoad(E, Ty, Ops, Builtin->LLVMIntrinsic, Index: clang/include/clang/Basic/arm_sve.td =================================================================== --- clang/include/clang/Basic/arm_sve.td +++ clang/include/clang/Basic/arm_sve.td @@ -71,6 +71,7 @@ // e: 1/2 width unsigned elements, 2x element count // h: 1/2 width elements, 2x element count // q: 1/4 width elements, 4x element count +// b: 1/4 width elements, 4x element count, integer, unsigned // o: 4x width elements, 1/4 element count // // w: vector of element type promoted to 64bits, vector maintains @@ -1223,6 +1224,21 @@ def SVQINCP_N_U32 : SInst<"svqincp[_n_u32]_{d}", "mmP", "PcPsPiPl", MergeNone, "aarch64_sve_uqincp_n32">; def SVQINCP_N_U64 : SInst<"svqincp[_n_u64]_{d}", "nnP", "PcPsPiPl", MergeNone, "aarch64_sve_uqincp_n64">; +let ArchGuard = "defined(__ARM_FEATURE_SVE_MATMUL_INT8)" in { +def SVMLLA_S32 : SInst<"svmmla[_s32]", "ddqq","i", MergeNone, "aarch64_sve_smmla">; +def SVMLLA_U32 : SInst<"svmmla[_u32]", "ddqq","Ui", MergeNone, "aarch64_sve_ummla">; +def SVUSMLLA_S32 : SInst<"svusmmla[_s32]", "ddbq","i", MergeNone, "aarch64_sve_usmmla">; +} + +let ArchGuard = "defined(__ARM_FEATURE_SVE_MATMUL_FP32)" in { +def SVMLLA_F32 : SInst<"svmmla[_f32]", "dddd","f", MergeNone, "aarch64_sve_mmla">; +} + +let ArchGuard = "defined(__ARM_FEATURE_SVE_MATMUL_FP64)" in { +def SVMLLA_F64 : SInst<"svmmla[_f64]", "dddd","d", MergeNone, "aarch64_sve_mmla">; +} + + //////////////////////////////////////////////////////////////////////////////// // SVE2 WhileGE/GT let ArchGuard = "defined(__ARM_FEATURE_SVE2)" in {
_______________________________________________ cfe-commits mailing list cfe-commits@lists.llvm.org https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits