Author: Kerry McLaughlin Date: 2020-12-17T11:08:15Z New Revision: 6d2a78996bee74611dad55b6c42b828ce1ee0953
URL: https://github.com/llvm/llvm-project/commit/6d2a78996bee74611dad55b6c42b828ce1ee0953 DIFF: https://github.com/llvm/llvm-project/commit/6d2a78996bee74611dad55b6c42b828ce1ee0953.diff LOG: [SVE][CodeGen] Add bfloat16 support to scalable masked gather Reviewed By: david-arm Differential Revision: https://reviews.llvm.org/D93307 Added: Modified: llvm/lib/Target/AArch64/AArch64ISelLowering.cpp llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td llvm/test/CodeGen/AArch64/sve-masked-gather-32b-signed-scaled.ll llvm/test/CodeGen/AArch64/sve-masked-gather-32b-signed-unscaled.ll llvm/test/CodeGen/AArch64/sve-masked-gather-32b-unsigned-scaled.ll llvm/test/CodeGen/AArch64/sve-masked-gather-32b-unsigned-unscaled.ll llvm/test/CodeGen/AArch64/sve-masked-gather-64b-scaled.ll llvm/test/CodeGen/AArch64/sve-masked-gather-64b-unscaled.ll llvm/test/CodeGen/AArch64/sve-masked-scatter-legalise.ll Removed: ################################################################################ diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp index e4d1b514b776..9eeacc8df0bf 100644 --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -1151,8 +1151,10 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM, setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom); } - for (auto VT : {MVT::nxv2bf16, MVT::nxv4bf16, MVT::nxv8bf16}) + for (auto VT : {MVT::nxv2bf16, MVT::nxv4bf16, MVT::nxv8bf16}) { + setOperationAction(ISD::MGATHER, VT, Custom); setOperationAction(ISD::MSCATTER, VT, Custom); + } setOperationAction(ISD::SPLAT_VECTOR, MVT::nxv8bf16, Custom); diff --git a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td index adbace24ee6c..fbe24460d51f 100644 --- a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td +++ b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td @@ -1196,6 +1196,10 @@ let Predicates = [HasSVE] in { (UUNPKLO_ZZ_D ZPR:$Zs)>; def : Pat<(nxv2bf16 (extract_subvector (nxv4bf16 ZPR:$Zs), (i64 2))), (UUNPKHI_ZZ_D ZPR:$Zs)>; + def : Pat<(nxv4bf16 (extract_subvector (nxv8bf16 ZPR:$Zs), (i64 0))), + (UUNPKLO_ZZ_S ZPR:$Zs)>; + def : Pat<(nxv4bf16 (extract_subvector (nxv8bf16 ZPR:$Zs), (i64 4))), + (UUNPKHI_ZZ_S ZPR:$Zs)>; } def : Pat<(nxv4f16 (extract_subvector (nxv8f16 ZPR:$Zs), (i64 0))), diff --git a/llvm/test/CodeGen/AArch64/sve-masked-gather-32b-signed-scaled.ll b/llvm/test/CodeGen/AArch64/sve-masked-gather-32b-signed-scaled.ll index e6b89b0070d6..25d0a471c29a 100644 --- a/llvm/test/CodeGen/AArch64/sve-masked-gather-32b-signed-scaled.ll +++ b/llvm/test/CodeGen/AArch64/sve-masked-gather-32b-signed-scaled.ll @@ -48,6 +48,16 @@ define <vscale x 2 x half> @masked_gather_nxv2f16(half* %base, <vscale x 2 x i32 ret <vscale x 2 x half> %vals } +define <vscale x 2 x bfloat> @masked_gather_nxv2bf16(bfloat* %base, <vscale x 2 x i32> %offsets, <vscale x 2 x i1> %mask) #0 { +; CHECK-LABEL: masked_gather_nxv2bf16: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0, z0.d, sxtw #1] +; CHECK-NEXT: ret + %ptrs = getelementptr bfloat, bfloat* %base, <vscale x 2 x i32> %offsets + %vals = call <vscale x 2 x bfloat> @llvm.masked.gather.nxv2bf16(<vscale x 2 x bfloat*> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x bfloat> undef) + ret <vscale x 2 x bfloat> %vals +} + define <vscale x 2 x float> @masked_gather_nxv2f32(float* %base, <vscale x 2 x i32> %offsets, <vscale x 2 x i1> %mask) { ; CHECK-LABEL: masked_gather_nxv2f32: ; CHECK: // %bb.0: @@ -125,6 +135,16 @@ define <vscale x 4 x half> @masked_gather_nxv4f16(half* %base, <vscale x 4 x i32 ret <vscale x 4 x half> %vals } +define <vscale x 4 x bfloat> @masked_gather_nxv4bf16(bfloat* %base, <vscale x 4 x i32> %offsets, <vscale x 4 x i1> %mask) #0 { +; CHECK-LABEL: masked_gather_nxv4bf16: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1h { z0.s }, p0/z, [x0, z0.s, sxtw #1] +; CHECK-NEXT: ret + %ptrs = getelementptr bfloat, bfloat* %base, <vscale x 4 x i32> %offsets + %vals = call <vscale x 4 x bfloat> @llvm.masked.gather.nxv4bf16(<vscale x 4 x bfloat*> %ptrs, i32 2, <vscale x 4 x i1> %mask, <vscale x 4 x bfloat> undef) + ret <vscale x 4 x bfloat> %vals +} + define <vscale x 4 x float> @masked_gather_nxv4f32(float* %base, <vscale x 4 x i32> %offsets, <vscale x 4 x i1> %mask) { ; CHECK-LABEL: masked_gather_nxv4f32: ; CHECK: // %bb.0: @@ -150,10 +170,13 @@ declare <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x i16*>, i32, declare <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x i32*>, i32, <vscale x 2 x i1>, <vscale x 2 x i32>) declare <vscale x 2 x i64> @llvm.masked.gather.nxv2i64(<vscale x 2 x i64*>, i32, <vscale x 2 x i1>, <vscale x 2 x i64>) declare <vscale x 2 x half> @llvm.masked.gather.nxv2f16(<vscale x 2 x half*>, i32, <vscale x 2 x i1>, <vscale x 2 x half>) +declare <vscale x 2 x bfloat> @llvm.masked.gather.nxv2bf16(<vscale x 2 x bfloat*>, i32, <vscale x 2 x i1>, <vscale x 2 x bfloat>) declare <vscale x 2 x float> @llvm.masked.gather.nxv2f32(<vscale x 2 x float*>, i32, <vscale x 2 x i1>, <vscale x 2 x float>) declare <vscale x 2 x double> @llvm.masked.gather.nxv2f64(<vscale x 2 x double*>, i32, <vscale x 2 x i1>, <vscale x 2 x double>) declare <vscale x 4 x i16> @llvm.masked.gather.nxv4i16(<vscale x 4 x i16*>, i32, <vscale x 4 x i1>, <vscale x 4 x i16>) declare <vscale x 4 x i32> @llvm.masked.gather.nxv4i32(<vscale x 4 x i32*>, i32, <vscale x 4 x i1>, <vscale x 4 x i32>) declare <vscale x 4 x half> @llvm.masked.gather.nxv4f16(<vscale x 4 x half*>, i32, <vscale x 4 x i1>, <vscale x 4 x half>) +declare <vscale x 4 x bfloat> @llvm.masked.gather.nxv4bf16(<vscale x 4 x bfloat*>, i32, <vscale x 4 x i1>, <vscale x 4 x bfloat>) declare <vscale x 4 x float> @llvm.masked.gather.nxv4f32(<vscale x 4 x float*>, i32, <vscale x 4 x i1>, <vscale x 4 x float>) +attributes #0 = { "target-features"="+sve,+bf16" } diff --git a/llvm/test/CodeGen/AArch64/sve-masked-gather-32b-signed-unscaled.ll b/llvm/test/CodeGen/AArch64/sve-masked-gather-32b-signed-unscaled.ll index 2d4ce50e8464..b9bf9049d46f 100644 --- a/llvm/test/CodeGen/AArch64/sve-masked-gather-32b-signed-unscaled.ll +++ b/llvm/test/CodeGen/AArch64/sve-masked-gather-32b-signed-unscaled.ll @@ -63,6 +63,17 @@ define <vscale x 2 x half> @masked_gather_nxv2f16(i8* %base, <vscale x 2 x i32> ret <vscale x 2 x half> %vals } +define <vscale x 2 x bfloat> @masked_gather_nxv2bf16(i8* %base, <vscale x 2 x i32> %offsets, <vscale x 2 x i1> %mask) #0 { +; CHECK-LABEL: masked_gather_nxv2bf16: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0, z0.d, sxtw] +; CHECK-NEXT: ret + %byte_ptrs = getelementptr i8, i8* %base, <vscale x 2 x i32> %offsets + %ptrs = bitcast <vscale x 2 x i8*> %byte_ptrs to <vscale x 2 x bfloat*> + %vals = call <vscale x 2 x bfloat> @llvm.masked.gather.nxv2bf16(<vscale x 2 x bfloat*> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x bfloat> undef) + ret <vscale x 2 x bfloat> %vals +} + define <vscale x 2 x float> @masked_gather_nxv2f32(i8* %base, <vscale x 2 x i32> %offsets, <vscale x 2 x i1> %mask) { ; CHECK-LABEL: masked_gather_nxv2f32: ; CHECK: // %bb.0: @@ -169,6 +180,17 @@ define <vscale x 4 x half> @masked_gather_nxv4f16(i8* %base, <vscale x 4 x i32> ret <vscale x 4 x half> %vals } +define <vscale x 4 x bfloat> @masked_gather_nxv4bf16(i8* %base, <vscale x 4 x i32> %offsets, <vscale x 4 x i1> %mask) #0 { +; CHECK-LABEL: masked_gather_nxv4bf16: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1h { z0.s }, p0/z, [x0, z0.s, sxtw] +; CHECK-NEXT: ret + %byte_ptrs = getelementptr i8, i8* %base, <vscale x 4 x i32> %offsets + %ptrs = bitcast <vscale x 4 x i8*> %byte_ptrs to <vscale x 4 x bfloat*> + %vals = call <vscale x 4 x bfloat> @llvm.masked.gather.nxv4bf16(<vscale x 4 x bfloat*> %ptrs, i32 2, <vscale x 4 x i1> %mask, <vscale x 4 x bfloat> undef) + ret <vscale x 4 x bfloat> %vals +} + define <vscale x 4 x float> @masked_gather_nxv4f32(i8* %base, <vscale x 4 x i32> %offsets, <vscale x 4 x i1> %mask) { ; CHECK-LABEL: masked_gather_nxv4f32: ; CHECK: // %bb.0: @@ -208,6 +230,7 @@ declare <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x i16*>, i32, declare <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x i32*>, i32, <vscale x 2 x i1>, <vscale x 2 x i32>) declare <vscale x 2 x i64> @llvm.masked.gather.nxv2i64(<vscale x 2 x i64*>, i32, <vscale x 2 x i1>, <vscale x 2 x i64>) declare <vscale x 2 x half> @llvm.masked.gather.nxv2f16(<vscale x 2 x half*>, i32, <vscale x 2 x i1>, <vscale x 2 x half>) +declare <vscale x 2 x bfloat> @llvm.masked.gather.nxv2bf16(<vscale x 2 x bfloat*>, i32, <vscale x 2 x i1>, <vscale x 2 x bfloat>) declare <vscale x 2 x float> @llvm.masked.gather.nxv2f32(<vscale x 2 x float*>, i32, <vscale x 2 x i1>, <vscale x 2 x float>) declare <vscale x 2 x double> @llvm.masked.gather.nxv2f64(<vscale x 2 x double*>, i32, <vscale x 2 x i1>, <vscale x 2 x double>) @@ -215,4 +238,6 @@ declare <vscale x 4 x i8> @llvm.masked.gather.nxv4i8(<vscale x 4 x i8*>, i32, <v declare <vscale x 4 x i16> @llvm.masked.gather.nxv4i16(<vscale x 4 x i16*>, i32, <vscale x 4 x i1>, <vscale x 4 x i16>) declare <vscale x 4 x i32> @llvm.masked.gather.nxv4i32(<vscale x 4 x i32*>, i32, <vscale x 4 x i1>, <vscale x 4 x i32>) declare <vscale x 4 x half> @llvm.masked.gather.nxv4f16(<vscale x 4 x half*>, i32, <vscale x 4 x i1>, <vscale x 4 x half>) +declare <vscale x 4 x bfloat> @llvm.masked.gather.nxv4bf16(<vscale x 4 x bfloat*>, i32, <vscale x 4 x i1>, <vscale x 4 x bfloat>) declare <vscale x 4 x float> @llvm.masked.gather.nxv4f32(<vscale x 4 x float*>, i32, <vscale x 4 x i1>, <vscale x 4 x float>) +attributes #0 = { "target-features"="+sve,+bf16" } diff --git a/llvm/test/CodeGen/AArch64/sve-masked-gather-32b-unsigned-scaled.ll b/llvm/test/CodeGen/AArch64/sve-masked-gather-32b-unsigned-scaled.ll index 41f1eb4e94d4..c7f8a7677527 100644 --- a/llvm/test/CodeGen/AArch64/sve-masked-gather-32b-unsigned-scaled.ll +++ b/llvm/test/CodeGen/AArch64/sve-masked-gather-32b-unsigned-scaled.ll @@ -52,6 +52,17 @@ define <vscale x 2 x half> @masked_gather_nxv2f16(half* %base, <vscale x 2 x i32 ret <vscale x 2 x half> %vals } +define <vscale x 2 x bfloat> @masked_gather_nxv2bf16(bfloat* %base, <vscale x 2 x i32> %offsets, <vscale x 2 x i1> %mask) #0 { +; CHECK-LABEL: masked_gather_nxv2bf16: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0, z0.d, uxtw #1] +; CHECK-NEXT: ret + %offsets.zext = zext <vscale x 2 x i32> %offsets to <vscale x 2 x i64> + %ptrs = getelementptr bfloat, bfloat* %base, <vscale x 2 x i64> %offsets.zext + %vals = call <vscale x 2 x bfloat> @llvm.masked.gather.nxv2bf16(<vscale x 2 x bfloat*> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x bfloat> undef) + ret <vscale x 2 x bfloat> %vals +} + define <vscale x 2 x float> @masked_gather_nxv2f32(float* %base, <vscale x 2 x i32> %offsets, <vscale x 2 x i1> %mask) { ; CHECK-LABEL: masked_gather_nxv2f32: ; CHECK: // %bb.0: @@ -136,6 +147,17 @@ define <vscale x 4 x half> @masked_gather_nxv4f16(half* %base, <vscale x 4 x i32 ret <vscale x 4 x half> %vals } +define <vscale x 4 x bfloat> @masked_gather_nxv4bf16(bfloat* %base, <vscale x 4 x i32> %offsets, <vscale x 4 x i1> %mask) #0 { +; CHECK-LABEL: masked_gather_nxv4bf16: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1h { z0.s }, p0/z, [x0, z0.s, uxtw #1] +; CHECK-NEXT: ret + %offsets.zext = zext <vscale x 4 x i32> %offsets to <vscale x 4 x i64> + %ptrs = getelementptr bfloat, bfloat* %base, <vscale x 4 x i64> %offsets.zext + %vals = call <vscale x 4 x bfloat> @llvm.masked.gather.nxv4bf16(<vscale x 4 x bfloat*> %ptrs, i32 2, <vscale x 4 x i1> %mask, <vscale x 4 x bfloat> undef) + ret <vscale x 4 x bfloat> %vals +} + define <vscale x 4 x float> @masked_gather_nxv4f32(float* %base, <vscale x 4 x i32> %offsets, <vscale x 4 x i1> %mask) { ; CHECK-LABEL: masked_gather_nxv4f32: ; CHECK: // %bb.0: @@ -163,10 +185,13 @@ declare <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x i16*>, i32, declare <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x i32*>, i32, <vscale x 2 x i1>, <vscale x 2 x i32>) declare <vscale x 2 x i64> @llvm.masked.gather.nxv2i64(<vscale x 2 x i64*>, i32, <vscale x 2 x i1>, <vscale x 2 x i64>) declare <vscale x 2 x half> @llvm.masked.gather.nxv2f16(<vscale x 2 x half*>, i32, <vscale x 2 x i1>, <vscale x 2 x half>) +declare <vscale x 2 x bfloat> @llvm.masked.gather.nxv2bf16(<vscale x 2 x bfloat*>, i32, <vscale x 2 x i1>, <vscale x 2 x bfloat>) declare <vscale x 2 x float> @llvm.masked.gather.nxv2f32(<vscale x 2 x float*>, i32, <vscale x 2 x i1>, <vscale x 2 x float>) declare <vscale x 2 x double> @llvm.masked.gather.nxv2f64(<vscale x 2 x double*>, i32, <vscale x 2 x i1>, <vscale x 2 x double>) declare <vscale x 4 x i16> @llvm.masked.gather.nxv4i16(<vscale x 4 x i16*>, i32, <vscale x 4 x i1>, <vscale x 4 x i16>) declare <vscale x 4 x i32> @llvm.masked.gather.nxv4i32(<vscale x 4 x i32*>, i32, <vscale x 4 x i1>, <vscale x 4 x i32>) declare <vscale x 4 x half> @llvm.masked.gather.nxv4f16(<vscale x 4 x half*>, i32, <vscale x 4 x i1>, <vscale x 4 x half>) +declare <vscale x 4 x bfloat> @llvm.masked.gather.nxv4bf16(<vscale x 4 x bfloat*>, i32, <vscale x 4 x i1>, <vscale x 4 x bfloat>) declare <vscale x 4 x float> @llvm.masked.gather.nxv4f32(<vscale x 4 x float*>, i32, <vscale x 4 x i1>, <vscale x 4 x float>) +attributes #0 = { "target-features"="+sve,+bf16" } diff --git a/llvm/test/CodeGen/AArch64/sve-masked-gather-32b-unsigned-unscaled.ll b/llvm/test/CodeGen/AArch64/sve-masked-gather-32b-unsigned-unscaled.ll index 51ab73c14ac9..fe7290fb1fe8 100644 --- a/llvm/test/CodeGen/AArch64/sve-masked-gather-32b-unsigned-unscaled.ll +++ b/llvm/test/CodeGen/AArch64/sve-masked-gather-32b-unsigned-unscaled.ll @@ -68,6 +68,18 @@ define <vscale x 2 x half> @masked_gather_nxv2f16(i8* %base, <vscale x 2 x i32> ret <vscale x 2 x half> %vals } +define <vscale x 2 x bfloat> @masked_gather_nxv2bf16(i8* %base, <vscale x 2 x i32> %offsets, <vscale x 2 x i1> %mask) #0 { +; CHECK-LABEL: masked_gather_nxv2bf16: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0, z0.d, uxtw] +; CHECK-NEXT: ret + %offsets.zext = zext <vscale x 2 x i32> %offsets to <vscale x 2 x i64> + %byte_ptrs = getelementptr i8, i8* %base, <vscale x 2 x i64> %offsets.zext + %ptrs = bitcast <vscale x 2 x i8*> %byte_ptrs to <vscale x 2 x bfloat*> + %vals = call <vscale x 2 x bfloat> @llvm.masked.gather.nxv2bf16(<vscale x 2 x bfloat*> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x bfloat> undef) + ret <vscale x 2 x bfloat> %vals +} + define <vscale x 2 x float> @masked_gather_nxv2f32(i8* %base, <vscale x 2 x i32> %offsets, <vscale x 2 x i1> %mask) { ; CHECK-LABEL: masked_gather_nxv2f32: ; CHECK: // %bb.0: @@ -183,6 +195,18 @@ define <vscale x 4 x half> @masked_gather_nxv4f16(i8* %base, <vscale x 4 x i32> ret <vscale x 4 x half> %vals } +define <vscale x 4 x bfloat> @masked_gather_nxv4bf16(i8* %base, <vscale x 4 x i32> %offsets, <vscale x 4 x i1> %mask) #0 { +; CHECK-LABEL: masked_gather_nxv4bf16: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1h { z0.s }, p0/z, [x0, z0.s, uxtw] +; CHECK-NEXT: ret + %offsets.zext = zext <vscale x 4 x i32> %offsets to <vscale x 4 x i64> + %byte_ptrs = getelementptr i8, i8* %base, <vscale x 4 x i64> %offsets.zext + %ptrs = bitcast <vscale x 4 x i8*> %byte_ptrs to <vscale x 4 x bfloat*> + %vals = call <vscale x 4 x bfloat> @llvm.masked.gather.nxv4bf16(<vscale x 4 x bfloat*> %ptrs, i32 2, <vscale x 4 x i1> %mask, <vscale x 4 x bfloat> undef) + ret <vscale x 4 x bfloat> %vals +} + define <vscale x 4 x float> @masked_gather_nxv4f32(i8* %base, <vscale x 4 x i32> %offsets, <vscale x 4 x i1> %mask) { ; CHECK-LABEL: masked_gather_nxv4f32: ; CHECK: // %bb.0: @@ -225,6 +249,7 @@ declare <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x i16*>, i32, declare <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x i32*>, i32, <vscale x 2 x i1>, <vscale x 2 x i32>) declare <vscale x 2 x i64> @llvm.masked.gather.nxv2i64(<vscale x 2 x i64*>, i32, <vscale x 2 x i1>, <vscale x 2 x i64>) declare <vscale x 2 x half> @llvm.masked.gather.nxv2f16(<vscale x 2 x half*>, i32, <vscale x 2 x i1>, <vscale x 2 x half>) +declare <vscale x 2 x bfloat> @llvm.masked.gather.nxv2bf16(<vscale x 2 x bfloat*>, i32, <vscale x 2 x i1>, <vscale x 2 x bfloat>) declare <vscale x 2 x float> @llvm.masked.gather.nxv2f32(<vscale x 2 x float*>, i32, <vscale x 2 x i1>, <vscale x 2 x float>) declare <vscale x 2 x double> @llvm.masked.gather.nxv2f64(<vscale x 2 x double*>, i32, <vscale x 2 x i1>, <vscale x 2 x double>) @@ -232,4 +257,6 @@ declare <vscale x 4 x i8> @llvm.masked.gather.nxv4i8(<vscale x 4 x i8*>, i32, <v declare <vscale x 4 x i16> @llvm.masked.gather.nxv4i16(<vscale x 4 x i16*>, i32, <vscale x 4 x i1>, <vscale x 4 x i16>) declare <vscale x 4 x i32> @llvm.masked.gather.nxv4i32(<vscale x 4 x i32*>, i32, <vscale x 4 x i1>, <vscale x 4 x i32>) declare <vscale x 4 x half> @llvm.masked.gather.nxv4f16(<vscale x 4 x half*>, i32, <vscale x 4 x i1>, <vscale x 4 x half>) +declare <vscale x 4 x bfloat> @llvm.masked.gather.nxv4bf16(<vscale x 4 x bfloat*>, i32, <vscale x 4 x i1>, <vscale x 4 x bfloat>) declare <vscale x 4 x float> @llvm.masked.gather.nxv4f32(<vscale x 4 x float*>, i32, <vscale x 4 x i1>, <vscale x 4 x float>) +attributes #0 = { "target-features"="+sve,+bf16" } diff --git a/llvm/test/CodeGen/AArch64/sve-masked-gather-64b-scaled.ll b/llvm/test/CodeGen/AArch64/sve-masked-gather-64b-scaled.ll index 15dfcc61316e..c594f2c488e3 100644 --- a/llvm/test/CodeGen/AArch64/sve-masked-gather-64b-scaled.ll +++ b/llvm/test/CodeGen/AArch64/sve-masked-gather-64b-scaled.ll @@ -44,6 +44,16 @@ define <vscale x 2 x half> @masked_gather_nxv2f16(half* %base, <vscale x 2 x i64 ret <vscale x 2 x half> %vals } +define <vscale x 2 x bfloat> @masked_gather_nxv2bf16(bfloat* %base, <vscale x 2 x i64> %offsets, <vscale x 2 x i1> %mask) #0 { +; CHECK-LABEL: masked_gather_nxv2bf16: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0, z0.d, lsl #1] +; CHECK-NEXT: ret + %ptrs = getelementptr bfloat, bfloat* %base, <vscale x 2 x i64> %offsets + %vals = call <vscale x 2 x bfloat> @llvm.masked.gather.nxv2bf16(<vscale x 2 x bfloat*> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x bfloat> undef) + ret <vscale x 2 x bfloat> %vals +} + define <vscale x 2 x float> @masked_gather_nxv2f32(float* %base, <vscale x 2 x i64> %offsets, <vscale x 2 x i1> %mask) { ; CHECK-LABEL: masked_gather_nxv2f32: ; CHECK: // %bb.0: @@ -90,5 +100,7 @@ declare <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x i16*>, i32, declare <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x i32*>, i32, <vscale x 2 x i1>, <vscale x 2 x i32>) declare <vscale x 2 x i64> @llvm.masked.gather.nxv2i64(<vscale x 2 x i64*>, i32, <vscale x 2 x i1>, <vscale x 2 x i64>) declare <vscale x 2 x half> @llvm.masked.gather.nxv2f16(<vscale x 2 x half*>, i32, <vscale x 2 x i1>, <vscale x 2 x half>) +declare <vscale x 2 x bfloat> @llvm.masked.gather.nxv2bf16(<vscale x 2 x bfloat*>, i32, <vscale x 2 x i1>, <vscale x 2 x bfloat>) declare <vscale x 2 x float> @llvm.masked.gather.nxv2f32(<vscale x 2 x float*>, i32, <vscale x 2 x i1>, <vscale x 2 x float>) declare <vscale x 2 x double> @llvm.masked.gather.nxv2f64(<vscale x 2 x double*>, i32, <vscale x 2 x i1>, <vscale x 2 x double>) +attributes #0 = { "target-features"="+sve,+bf16" } diff --git a/llvm/test/CodeGen/AArch64/sve-masked-gather-64b-unscaled.ll b/llvm/test/CodeGen/AArch64/sve-masked-gather-64b-unscaled.ll index 3320b88691ee..beb5bf3d28ba 100644 --- a/llvm/test/CodeGen/AArch64/sve-masked-gather-64b-unscaled.ll +++ b/llvm/test/CodeGen/AArch64/sve-masked-gather-64b-unscaled.ll @@ -59,6 +59,17 @@ define <vscale x 2 x half> @masked_gather_nxv2f16(i8* %base, <vscale x 2 x i64> ret <vscale x 2 x half> %vals } +define <vscale x 2 x bfloat> @masked_gather_nxv2bf16(i8* %base, <vscale x 2 x i64> %offsets, <vscale x 2 x i1> %mask) #0 { +; CHECK-LABEL: masked_gather_nxv2bf16: +; CHECK: // %bb.0: +; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0, z0.d] +; CHECK-NEXT: ret + %byte_ptrs = getelementptr i8, i8* %base, <vscale x 2 x i64> %offsets + %ptrs = bitcast <vscale x 2 x i8*> %byte_ptrs to <vscale x 2 x bfloat*> + %vals = call <vscale x 2 x bfloat> @llvm.masked.gather.nxv2bf16(<vscale x 2 x bfloat*> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x bfloat> undef) + ret <vscale x 2 x bfloat> %vals +} + define <vscale x 2 x float> @masked_gather_nxv2f32(i8* %base, <vscale x 2 x i64> %offsets, <vscale x 2 x i1> %mask) { ; CHECK-LABEL: masked_gather_nxv2f32: ; CHECK: // %bb.0: @@ -121,5 +132,7 @@ declare <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x i16*>, i32, declare <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x i32*>, i32, <vscale x 2 x i1>, <vscale x 2 x i32>) declare <vscale x 2 x i64> @llvm.masked.gather.nxv2i64(<vscale x 2 x i64*>, i32, <vscale x 2 x i1>, <vscale x 2 x i64>) declare <vscale x 2 x half> @llvm.masked.gather.nxv2f16(<vscale x 2 x half*>, i32, <vscale x 2 x i1>, <vscale x 2 x half>) +declare <vscale x 2 x bfloat> @llvm.masked.gather.nxv2bf16(<vscale x 2 x bfloat*>, i32, <vscale x 2 x i1>, <vscale x 2 x bfloat>) declare <vscale x 2 x float> @llvm.masked.gather.nxv2f32(<vscale x 2 x float*>, i32, <vscale x 2 x i1>, <vscale x 2 x float>) declare <vscale x 2 x double> @llvm.masked.gather.nxv2f64(<vscale x 2 x double*>, i32, <vscale x 2 x i1>, <vscale x 2 x double>) +attributes #0 = { "target-features"="+sve,+bf16" } diff --git a/llvm/test/CodeGen/AArch64/sve-masked-scatter-legalise.ll b/llvm/test/CodeGen/AArch64/sve-masked-scatter-legalise.ll index c3746a61d875..9cb642fac8bf 100644 --- a/llvm/test/CodeGen/AArch64/sve-masked-scatter-legalise.ll +++ b/llvm/test/CodeGen/AArch64/sve-masked-scatter-legalise.ll @@ -25,6 +25,16 @@ define void @masked_scatter_nxv8i16(<vscale x 8 x i16> %data, i16* %base, <vscal ret void } +define void @masked_scatter_nxv8bf16(<vscale x 8 x bfloat> %data, bfloat* %base, <vscale x 8 x i16> %offsets, <vscale x 8 x i1> %mask) #0 { +; CHECK-LABEL: masked_scatter_nxv8bf16 +; CHECK-DAG: st1h { {{z[0-9]+}}.s }, {{p[0-9]+}}, [x0, {{z[0-9]+}}.s, sxtw #1] +; CHECK-DAG: st1h { {{z[0-9]+}}.s }, {{p[0-9]+}}, [x0, {{z[0-9]+}}.s, sxtw #1] +; CHECK: ret + %ptrs = getelementptr bfloat, bfloat* %base, <vscale x 8 x i16> %offsets + call void @llvm.masked.scatter.nxv8bf16(<vscale x 8 x bfloat> %data, <vscale x 8 x bfloat*> %ptrs, i32 1, <vscale x 8 x i1> %mask) + ret void +} + define void @masked_scatter_nxv8f32(<vscale x 8 x float> %data, float* %base, <vscale x 8 x i32> %indexes, <vscale x 8 x i1> %masks) { ; CHECK-LABEL: masked_scatter_nxv8f32 ; CHECK-DAG: st1w { z0.s }, {{p[0-9]+}}, [x0, {{z[0-9]+}}.s, uxtw #2] @@ -56,4 +66,6 @@ define void @masked_scatter_nxv32i32(<vscale x 32 x i32> %data, i32* %base, <vsc declare void @llvm.masked.scatter.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8*>, i32, <vscale x 16 x i1>) declare void @llvm.masked.scatter.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16*>, i32, <vscale x 8 x i1>) declare void @llvm.masked.scatter.nxv8f32(<vscale x 8 x float>, <vscale x 8 x float*>, i32, <vscale x 8 x i1>) +declare void @llvm.masked.scatter.nxv8bf16(<vscale x 8 x bfloat>, <vscale x 8 x bfloat*>, i32, <vscale x 8 x i1>) declare void @llvm.masked.scatter.nxv32i32(<vscale x 32 x i32>, <vscale x 32 x i32*>, i32, <vscale x 32 x i1>) +attributes #0 = { "target-features"="+sve,+bf16" } _______________________________________________ llvm-branch-commits mailing list llvm-branch-commits@lists.llvm.org https://lists.llvm.org/cgi-bin/mailman/listinfo/llvm-branch-commits