FreddyYe updated this revision to Diff 484998.
FreddyYe marked an inline comment as done.
FreddyYe added a comment.

Address comments.


Repository:
  rG LLVM Github Monorepo

CHANGES SINCE LAST ACTION
  https://reviews.llvm.org/D140531/new/

https://reviews.llvm.org/D140531

Files:
  clang/docs/ReleaseNotes.rst
  clang/lib/Headers/avx512vlbwintrin.h
  clang/test/CodeGen/X86/avx512vlbw-reduceIntrin.c

Index: clang/test/CodeGen/X86/avx512vlbw-reduceIntrin.c
===================================================================
--- /dev/null
+++ clang/test/CodeGen/X86/avx512vlbw-reduceIntrin.c
@@ -0,0 +1,420 @@
+// RUN: %clang_cc1 -ffreestanding %s -O0 -triple=x86_64 -target-feature +avx512bw -target-feature +avx512vl -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -ffreestanding %s -O0 -triple=i386 -target-feature +avx512bw -target-feature +avx512vl -emit-llvm -o - -Wall -Werror | FileCheck %s
+
+#include <immintrin.h>
+
+short test_mm_reduce_add_epi16(__m128i __W){
+// CHECK-LABEL: @test_mm_reduce_add_epi16(
+// CHECK: call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %{{.*}})
+  return _mm_reduce_add_epi16(__W);
+}
+
+short test_mm_reduce_mul_epi16(__m128i __W){
+// CHECK-LABEL: @test_mm_reduce_mul_epi16(
+// CHECK:    call i16 @llvm.vector.reduce.mul.v8i16(<8 x i16> %{{.*}})
+  return _mm_reduce_mul_epi16(__W);
+}
+
+short test_mm_reduce_or_epi16(__m128i __W){
+// CHECK-LABEL: @test_mm_reduce_or_epi16(
+// CHECK:    call i16 @llvm.vector.reduce.or.v8i16(<8 x i16> %{{.*}})
+  return _mm_reduce_or_epi16(__W);
+}
+
+short test_mm_reduce_and_epi16(__m128i __W){
+// CHECK-LABEL: @test_mm_reduce_and_epi16(
+// CHECK:    call i16 @llvm.vector.reduce.and.v8i16(<8 x i16> %{{.*}})
+  return _mm_reduce_and_epi16(__W);
+}
+
+short test_mm_mask_reduce_add_epi16(__mmask8 __M, __m128i __W){
+// CHECK-LABEL: @test_mm_mask_reduce_add_epi16(
+// CHECK:    select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
+// CHECK:    call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %{{.*}})
+  return _mm_mask_reduce_add_epi16(__M, __W);
+}
+
+short test_mm_mask_reduce_mul_epi16(__mmask8 __M, __m128i __W){
+// CHECK-LABEL: @test_mm_mask_reduce_mul_epi16(
+// CHECK:    select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
+// CHECK:    call i16 @llvm.vector.reduce.mul.v8i16(<8 x i16> %{{.*}})
+  return _mm_mask_reduce_mul_epi16(__M, __W);
+}
+
+short test_mm_mask_reduce_and_epi16(__mmask8 __M, __m128i __W){
+// CHECK-LABEL: @test_mm_mask_reduce_and_epi16(
+// CHECK:    select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
+// CHECK:    call i16 @llvm.vector.reduce.and.v8i16(<8 x i16> %{{.*}}
+  return _mm_mask_reduce_and_epi16(__M, __W);
+}
+
+short test_mm_mask_reduce_or_epi16(__mmask8 __M, __m128i __W){
+// CHECK-LABEL: @test_mm_mask_reduce_or_epi16(
+// CHECK:    select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
+// CHECK:    call i16 @llvm.vector.reduce.or.v8i16(<8 x i16> %{{.*}})
+  return _mm_mask_reduce_or_epi16(__M, __W);
+}
+
+short test_mm256_reduce_add_epi16(__m256i __W){
+// CHECK-LABEL: @test_mm256_reduce_add_epi16(
+// CHECK:    call i16 @llvm.vector.reduce.add.v16i16(<16 x i16> %{{.*}})
+  return _mm256_reduce_add_epi16(__W);
+}
+
+short test_mm256_reduce_mul_epi16(__m256i __W){
+// CHECK-LABEL: @test_mm256_reduce_mul_epi16(
+// CHECK:    call i16 @llvm.vector.reduce.mul.v16i16(<16 x i16> %{{.*}})
+  return _mm256_reduce_mul_epi16(__W);
+}
+
+short test_mm256_reduce_or_epi16(__m256i __W){
+// CHECK-LABEL: @test_mm256_reduce_or_epi16(
+// CHECK:    call i16 @llvm.vector.reduce.or.v16i16(<16 x i16> %{{.*}})
+  return _mm256_reduce_or_epi16(__W);
+}
+
+short test_mm256_reduce_and_epi16(__m256i __W){
+// CHECK-LABEL: @test_mm256_reduce_and_epi16(
+// CHECK:    call i16 @llvm.vector.reduce.and.v16i16(<16 x i16> %{{.*}})
+  return _mm256_reduce_and_epi16(__W);
+}
+
+short test_mm256_mask_reduce_add_epi16(__mmask16 __M, __m256i __W){
+// CHECK-LABEL: @test_mm256_mask_reduce_add_epi16(
+// CHECK:    select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
+// CHECK:    call i16 @llvm.vector.reduce.add.v16i16(<16 x i16> %{{.*}})
+  return _mm256_mask_reduce_add_epi16(__M, __W);
+}
+
+short test_mm256_mask_reduce_mul_epi16(__mmask16 __M, __m256i __W){
+// CHECK-LABEL: @test_mm256_mask_reduce_mul_epi16(
+// CHECK:    select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
+// CHECK:    call i16 @llvm.vector.reduce.mul.v16i16(<16 x i16> %{{.*}})
+  return _mm256_mask_reduce_mul_epi16(__M, __W);
+}
+
+short test_mm256_mask_reduce_and_epi16(__mmask16 __M, __m256i __W){
+// CHECK-LABEL: @test_mm256_mask_reduce_and_epi16(
+// CHECK:    select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
+// CHECK:    call i16 @llvm.vector.reduce.and.v16i16(<16 x i16> %{{.*}})
+  return _mm256_mask_reduce_and_epi16(__M, __W);
+}
+
+short test_mm256_mask_reduce_or_epi16(__mmask16 __M, __m256i __W){
+// CHECK-LABEL: @test_mm256_mask_reduce_or_epi16(
+// CHECK:    select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
+// CHECK:    call i16 @llvm.vector.reduce.or.v16i16(<16 x i16> %{{.*}})
+  return _mm256_mask_reduce_or_epi16(__M, __W);
+}
+
+signed char test_mm_reduce_add_epi8(__m128i __W){
+// CHECK-LABEL: @test_mm_reduce_add_epi8(
+// CHECK:    call i8 @llvm.vector.reduce.add.v16i8(<16 x i8> %{{.*}})
+  return _mm_reduce_add_epi8(__W);
+}
+
+signed char test_mm_reduce_mul_epi8(__m128i __W){
+// CHECK-LABEL: @test_mm_reduce_mul_epi8(
+// CHECK:    call i8 @llvm.vector.reduce.mul.v16i8(<16 x i8> %{{.*}})
+  return _mm_reduce_mul_epi8(__W);
+}
+
+signed char test_mm_reduce_and_epi8(__m128i __W){
+// CHECK-LABEL: @test_mm_reduce_and_epi8(
+// CHECK:    call i8 @llvm.vector.reduce.and.v16i8(<16 x i8> %{{.*}})
+  return _mm_reduce_and_epi8(__W);
+}
+
+signed char test_mm_reduce_or_epi8(__m128i __W){
+// CHECK-LABEL: @test_mm_reduce_or_epi8(
+// CHECK:    call i8 @llvm.vector.reduce.or.v16i8(<16 x i8> %{{.*}})
+  return _mm_reduce_or_epi8(__W);
+}
+
+signed char test_mm_mask_reduce_add_epi8(__mmask16 __M, __m128i __W){
+// CHECK-LABEL: @test_mm_mask_reduce_add_epi8(
+// CHECK:    select <16 x i1> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}
+// CHECK:    call i8 @llvm.vector.reduce.add.v16i8(<16 x i8> %{{.*}})
+  return _mm_mask_reduce_add_epi8(__M, __W);
+}
+
+signed char test_mm_mask_reduce_mul_epi8(__mmask16 __M, __m128i __W){
+// CHECK-LABEL: @test_mm_mask_reduce_mul_epi8(
+// CHECK:    select <16 x i1> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}
+// CHECK:    call i8 @llvm.vector.reduce.mul.v16i8(<16 x i8> %{{.*}})
+  return _mm_mask_reduce_mul_epi8(__M, __W);
+}
+
+signed char test_mm_mask_reduce_and_epi8(__mmask16 __M, __m128i __W){
+// CHECK-LABEL: @test_mm_mask_reduce_and_epi8(
+// CHECK:    select <16 x i1> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}
+// CHECK:    call i8 @llvm.vector.reduce.and.v16i8(<16 x i8> %{{.*}})
+  return _mm_mask_reduce_and_epi8(__M, __W);
+}
+
+signed char test_mm_mask_reduce_or_epi8(__mmask16 __M, __m128i __W){
+// CHECK-LABEL: @test_mm_mask_reduce_or_epi8(
+// CHECK:    select <16 x i1> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}
+// CHECK:    call i8 @llvm.vector.reduce.or.v16i8(<16 x i8> %{{.*}})
+  return _mm_mask_reduce_or_epi8(__M, __W);
+}
+
+signed char test_mm256_reduce_add_epi8(__m256i __W){
+// CHECK-LABEL: @test_mm256_reduce_add_epi8(
+// CHECK:    call i8 @llvm.vector.reduce.add.v32i8(<32 x i8> %{{.*}})
+  return _mm256_reduce_add_epi8(__W);
+}
+
+signed char test_mm256_reduce_mul_epi8(__m256i __W){
+// CHECK-LABEL: @test_mm256_reduce_mul_epi8(
+// CHECK:    call i8 @llvm.vector.reduce.mul.v32i8(<32 x i8> %{{.*}})
+  return _mm256_reduce_mul_epi8(__W);
+}
+
+signed char test_mm256_reduce_and_epi8(__m256i __W){
+// CHECK-LABEL: @test_mm256_reduce_and_epi8(
+// CHECK:    call i8 @llvm.vector.reduce.and.v32i8(<32 x i8> %{{.*}})
+  return _mm256_reduce_and_epi8(__W);
+}
+
+signed char test_mm256_reduce_or_epi8(__m256i __W){
+// CHECK-LABEL: @test_mm256_reduce_or_epi8(
+// CHECK:    call i8 @llvm.vector.reduce.or.v32i8(<32 x i8> %{{.*}})
+  return _mm256_reduce_or_epi8(__W);
+}
+
+signed char test_mm256_mask_reduce_add_epi8(__mmask32 __M, __m256i __W){
+// CHECK-LABEL: @test_mm256_mask_reduce_add_epi8(
+// CHECK:    select <32 x i1> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}}
+// CHECK:    call i8 @llvm.vector.reduce.add.v32i8(<32 x i8> %{{.*}})
+  return _mm256_mask_reduce_add_epi8(__M, __W);
+}
+
+signed char test_mm256_mask_reduce_mul_epi8(__mmask32 __M, __m256i __W){
+// CHECK-LABEL: @test_mm256_mask_reduce_mul_epi8(
+// CHECK:    select <32 x i1> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}}
+// CHECK:    call i8 @llvm.vector.reduce.mul.v32i8(<32 x i8> %{{.*}})
+  return _mm256_mask_reduce_mul_epi8(__M, __W);
+}
+
+signed char test_mm256_mask_reduce_and_epi8(__mmask32 __M, __m256i __W){
+// CHECK-LABEL: @test_mm256_mask_reduce_and_epi8(
+// CHECK:    select <32 x i1> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}}
+// CHECK:    call i8 @llvm.vector.reduce.and.v32i8(<32 x i8> %{{.*}})
+  return _mm256_mask_reduce_and_epi8(__M, __W);
+}
+
+signed char test_mm256_mask_reduce_or_epi8(__mmask32 __M, __m256i __W){
+// CHECK-LABEL: @test_mm256_mask_reduce_or_epi8(
+// CHECK:    select <32 x i1> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}}
+// CHECK:    call i8 @llvm.vector.reduce.or.v32i8(<32 x i8> %{{.*}})
+  return _mm256_mask_reduce_or_epi8(__M, __W);
+}
+
+short test_mm_reduce_max_epi16(__m128i __W){
+// CHECK-LABEL: test_mm_reduce_max_epi16
+// CHECK:    call i16 @llvm.vector.reduce.smax.v8i16(<8 x i16> %{{.*}})
+  return _mm_reduce_max_epi16(__W);
+}
+
+short test_mm_reduce_min_epi16(__m128i __W){
+// CHECK-LABEL: test_mm_reduce_min_epi16
+// CHECK:    call i16 @llvm.vector.reduce.smin.v8i16(<8 x i16> %{{.*}})
+  return _mm_reduce_min_epi16(__W);
+}
+
+unsigned short test_mm_reduce_max_epu16(__m128i __W){
+// CHECK-LABEL: test_mm_reduce_max_epu16
+// CHECK:    call i16 @llvm.vector.reduce.umax.v8i16(<8 x i16> %{{.*}})
+  return _mm_reduce_max_epu16(__W);
+}
+
+unsigned short test_mm_reduce_min_epu16(__m128i __W){
+// CHECK-LABEL: test_mm_reduce_min_epu16
+// CHECK:    call i16 @llvm.vector.reduce.umin.v8i16(<8 x i16> %{{.*}})
+  return _mm_reduce_min_epu16(__W);
+}
+
+short test_mm_mask_reduce_max_epi16(__mmask8 __M, __m128i __W){
+// CHECK-LABEL: test_mm_mask_reduce_max_epi16
+// CHECK:    select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
+// CHECK:    call i16 @llvm.vector.reduce.smax.v8i16(<8 x i16> %{{.*}})
+  return _mm_mask_reduce_max_epi16(__M, __W);
+}
+
+short test_mm_mask_reduce_min_epi16(__mmask8 __M, __m128i __W){
+// CHECK-LABEL: test_mm_mask_reduce_min_epi16
+// CHECK:    select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
+// CHECK:    call i16 @llvm.vector.reduce.smin.v8i16(<8 x i16> %{{.*}})
+  return _mm_mask_reduce_min_epi16(__M, __W);
+}
+
+unsigned short test_mm_mask_reduce_max_epu16(__mmask8 __M, __m128i __W){
+// CHECK-LABEL: test_mm_mask_reduce_max_epu16
+// CHECK:    select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
+// CHECK:    call i16 @llvm.vector.reduce.umax.v8i16(<8 x i16> %{{.*}})
+  return _mm_mask_reduce_max_epu16(__M, __W);
+}
+
+unsigned short test_mm_mask_reduce_min_epu16(__mmask8 __M, __m128i __W){
+// CHECK-LABEL: test_mm_mask_reduce_min_epu16
+// CHECK:    select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
+// CHECK:    call i16 @llvm.vector.reduce.umin.v8i16(<8 x i16> %{{.*}})
+  return _mm_mask_reduce_min_epu16(__M, __W);
+}
+
+short test_mm256_reduce_max_epi16(__m256i __W){
+// CHECK-LABEL: test_mm256_reduce_max_epi16
+// CHECK:    call i16 @llvm.vector.reduce.smax.v16i16(<16 x i16> %{{.*}})
+  return _mm256_reduce_max_epi16(__W);
+}
+
+short test_mm256_reduce_min_epi16(__m256i __W){
+// CHECK-LABEL: test_mm256_reduce_min_epi16
+// CHECK:    call i16 @llvm.vector.reduce.smin.v16i16(<16 x i16> %{{.*}})
+  return _mm256_reduce_min_epi16(__W);
+}
+
+unsigned short test_mm256_reduce_max_epu16(__m256i __W){
+// CHECK-LABEL: test_mm256_reduce_max_epu16
+// CHECK:    call i16 @llvm.vector.reduce.umax.v16i16(<16 x i16> %{{.*}})
+  return _mm256_reduce_max_epu16(__W);
+}
+
+unsigned short test_mm256_reduce_min_epu16(__m256i __W){
+// CHECK-LABEL: test_mm256_reduce_min_epu16
+// CHECK:    call i16 @llvm.vector.reduce.umin.v16i16(<16 x i16> %{{.*}})
+  return _mm256_reduce_min_epu16(__W);
+}
+
+short test_mm256_mask_reduce_max_epi16(__mmask16 __M, __m256i __W){
+// CHECK-LABEL: test_mm256_mask_reduce_max_epi16
+// CHECK:    select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
+// CHECK:    call i16 @llvm.vector.reduce.smax.v16i16(<16 x i16> %{{.*}})
+  return _mm256_mask_reduce_max_epi16(__M, __W);
+}
+
+short test_mm256_mask_reduce_min_epi16(__mmask16 __M, __m256i __W){
+// CHECK-LABEL: test_mm256_mask_reduce_min_epi16
+// CHECK:    select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
+// CHECK:    call i16 @llvm.vector.reduce.smin.v16i16(<16 x i16> %{{.*}})
+  return _mm256_mask_reduce_min_epi16(__M, __W);
+}
+
+unsigned short test_mm256_mask_reduce_max_epu16(__mmask16 __M, __m256i __W){
+// CHECK-LABEL: test_mm256_mask_reduce_max_epu16
+// CHECK:    select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
+// CHECK:    call i16 @llvm.vector.reduce.umax.v16i16(<16 x i16> %{{.*}})
+  return _mm256_mask_reduce_max_epu16(__M, __W);
+}
+
+unsigned short test_mm256_mask_reduce_min_epu16(__mmask16 __M, __m256i __W){
+// CHECK-LABEL: test_mm256_mask_reduce_min_epu16
+// CHECK:    select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
+// CHECK:    call i16 @llvm.vector.reduce.umin.v16i16(<16 x i16> %{{.*}})
+  return _mm256_mask_reduce_min_epu16(__M, __W);
+}
+
+signed char test_mm_reduce_max_epi8(__m128i __W){
+// CHECK-LABEL: test_mm_reduce_max_epi8
+// CHECK:    call i8 @llvm.vector.reduce.smax.v16i8(<16 x i8> %{{.*}})
+  return _mm_reduce_max_epi8(__W);
+}
+
+signed char test_mm_reduce_min_epi8(__m128i __W){
+// CHECK-LABEL: test_mm_reduce_min_epi8
+// CHECK:    call i8 @llvm.vector.reduce.smin.v16i8(<16 x i8> %{{.*}})
+  return _mm_reduce_min_epi8(__W);
+}
+
+unsigned char test_mm_reduce_max_epu8(__m128i __W){
+// CHECK-LABEL: test_mm_reduce_max_epu8
+// CHECK:    call i8 @llvm.vector.reduce.umax.v16i8(<16 x i8> %{{.*}})
+  return _mm_reduce_max_epu8(__W);
+}
+
+unsigned char test_mm_reduce_min_epu8(__m128i __W){
+// CHECK-LABEL: test_mm_reduce_min_epu8
+// CHECK:    call i8 @llvm.vector.reduce.umin.v16i8(<16 x i8> %{{.*}})
+  return _mm_reduce_min_epu8(__W);
+}
+
+signed char test_mm_mask_reduce_max_epi8(__mmask16 __M, __m128i __W){
+// CHECK-LABEL: test_mm_mask_reduce_max_epi8
+// CHECK:    select <16 x i1> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}
+// CHECK:    call i8 @llvm.vector.reduce.smax.v16i8(<16 x i8> %{{.*}})
+  return _mm_mask_reduce_max_epi8(__M, __W);
+}
+
+signed char test_mm_mask_reduce_min_epi8(__mmask16 __M, __m128i __W){
+// CHECK-LABEL: test_mm_mask_reduce_min_epi8
+// CHECK:    select <16 x i1> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}
+// CHECK:    call i8 @llvm.vector.reduce.smin.v16i8(<16 x i8> %{{.*}})
+  return _mm_mask_reduce_min_epi8(__M, __W);
+}
+
+unsigned char test_mm_mask_reduce_max_epu8(__mmask16 __M, __m128i __W){
+// CHECK-LABEL: test_mm_mask_reduce_max_epu8
+// CHECK:    select <16 x i1> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}
+// CHECK:    call i8 @llvm.vector.reduce.umax.v16i8(<16 x i8> %{{.*}})
+  return _mm_mask_reduce_max_epu8(__M, __W);
+}
+
+unsigned char test_mm_mask_reduce_min_epu8(__mmask16 __M, __m128i __W){
+// CHECK-LABEL: test_mm_mask_reduce_min_epu8
+// CHECK:    select <16 x i1> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}
+// CHECK:    call i8 @llvm.vector.reduce.umin.v16i8(<16 x i8> %{{.*}})
+  return _mm_mask_reduce_min_epu8(__M, __W);
+}
+
+signed char test_mm256_reduce_max_epi8(__m256i __W){
+// CHECK-LABEL: test_mm256_reduce_max_epi8
+// CHECK:    call i8 @llvm.vector.reduce.smax.v32i8(<32 x i8> %{{.*}})
+  return _mm256_reduce_max_epi8(__W);
+}
+
+signed char test_mm256_reduce_min_epi8(__m256i __W){
+// CHECK-LABEL: test_mm256_reduce_min_epi8
+// CHECK:    call i8 @llvm.vector.reduce.smin.v32i8(<32 x i8> %{{.*}})
+  return _mm256_reduce_min_epi8(__W);
+}
+
+unsigned char test_mm256_reduce_max_epu8(__m256i __W){
+// CHECK-LABEL: test_mm256_reduce_max_epu8
+// CHECK:    call i8 @llvm.vector.reduce.umax.v32i8(<32 x i8> %{{.*}})
+  return _mm256_reduce_max_epu8(__W);
+}
+
+unsigned char test_mm256_reduce_min_epu8(__m256i __W){
+// CHECK-LABEL: test_mm256_reduce_min_epu8
+// CHECK:    call i8 @llvm.vector.reduce.umin.v32i8(<32 x i8> %{{.*}})
+  return _mm256_reduce_min_epu8(__W);
+}
+
+signed char test_mm256_mask_reduce_max_epi8(__mmask32 __M, __m256i __W){
+// CHECK-LABEL: test_mm256_mask_reduce_max_epi8
+// CHECK:    select <32 x i1> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}}
+// CHECK:    call i8 @llvm.vector.reduce.smax.v32i8(<32 x i8> %{{.*}})
+  return _mm256_mask_reduce_max_epi8(__M, __W);
+}
+
+signed char test_mm256_mask_reduce_min_epi8(__mmask32 __M, __m256i __W){
+// CHECK-LABEL: test_mm256_mask_reduce_min_epi8
+// CHECK:    select <32 x i1> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}}
+// CHECK:    call i8 @llvm.vector.reduce.smin.v32i8(<32 x i8> %{{.*}})
+  return _mm256_mask_reduce_min_epi8(__M, __W);
+}
+
+unsigned char test_mm256_mask_reduce_max_epu8(__mmask32 __M, __m256i __W){
+// CHECK-LABEL: test_mm256_mask_reduce_max_epu8
+// CHECK:    select <32 x i1> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}}
+// CHECK:    call i8 @llvm.vector.reduce.umax.v32i8(<32 x i8> %{{.*}})
+  return _mm256_mask_reduce_max_epu8(__M, __W);
+}
+
+unsigned char test_mm256_mask_reduce_min_epu8(__mmask32 __M, __m256i __W){
+// CHECK-LABEL: test_mm256_mask_reduce_min_epu8
+// CHECK:    select <32 x i1> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}}
+// CHECK:    call i8 @llvm.vector.reduce.umin.v32i8(<32 x i8> %{{.*}})
+  return _mm256_mask_reduce_min_epu8(__M, __W);
+}
Index: clang/lib/Headers/avx512vlbwintrin.h
===================================================================
--- clang/lib/Headers/avx512vlbwintrin.h
+++ clang/lib/Headers/avx512vlbwintrin.h
@@ -2254,7 +2254,7 @@
 
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_set1_epi8 (__m128i __O, __mmask16 __M, char __A)
+_mm_mask_set1_epi8 (__m128i __O, __mmask16 __M, signed char __A)
 {
   return (__m128i) __builtin_ia32_selectb_128(__M,
                                               (__v16qi) _mm_set1_epi8(__A),
@@ -2262,7 +2262,7 @@
 }
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_set1_epi8 (__mmask16 __M, char __A)
+_mm_maskz_set1_epi8 (__mmask16 __M, signed char __A)
 {
  return (__m128i) __builtin_ia32_selectb_128(__M,
                                              (__v16qi) _mm_set1_epi8(__A),
@@ -2270,7 +2270,7 @@
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_set1_epi8 (__m256i __O, __mmask32 __M, char __A)
+_mm256_mask_set1_epi8 (__m256i __O, __mmask32 __M, signed char __A)
 {
   return (__m256i) __builtin_ia32_selectb_256(__M,
                                               (__v32qi) _mm256_set1_epi8(__A),
@@ -2278,7 +2278,7 @@
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_set1_epi8 (__mmask32 __M, char __A)
+_mm256_maskz_set1_epi8 (__mmask32 __M, signed char __A)
 {
   return (__m256i) __builtin_ia32_selectb_256(__M,
                                               (__v32qi) _mm256_set1_epi8(__A),
@@ -2803,6 +2803,358 @@
                                   (__v16hi)_mm256_dbsad_epu8((A), (B), (imm)), \
                                   (__v16hi)_mm256_setzero_si256()))
 
+static __inline__ short __DEFAULT_FN_ATTRS128
+_mm_reduce_add_epi16(__m128i __W) {
+  return __builtin_reduce_add((__v8hi)__W);
+}
+
+static __inline__ short __DEFAULT_FN_ATTRS128
+_mm_reduce_mul_epi16(__m128i __W) {
+  return __builtin_reduce_mul((__v8hi)__W);
+}
+
+static __inline__ short __DEFAULT_FN_ATTRS128
+_mm_reduce_and_epi16(__m128i __W) {
+  return __builtin_reduce_and((__v8hi)__W);
+}
+
+static __inline__ short __DEFAULT_FN_ATTRS128
+_mm_reduce_or_epi16(__m128i __W) {
+  return __builtin_reduce_or((__v8hi)__W);
+}
+
+static __inline__ short __DEFAULT_FN_ATTRS128
+_mm_mask_reduce_add_epi16( __mmask8 __M, __m128i __W) {
+  __W = _mm_maskz_mov_epi16(__M, __W);
+  return __builtin_reduce_add((__v8hi)__W);
+}
+
+static __inline__ short __DEFAULT_FN_ATTRS128
+_mm_mask_reduce_mul_epi16( __mmask8 __M, __m128i __W) {
+  __W = _mm_mask_mov_epi16(_mm_set1_epi16(1), __M, __W);
+  return __builtin_reduce_mul((__v8hi)__W);
+}
+
+static __inline__ short __DEFAULT_FN_ATTRS128
+_mm_mask_reduce_and_epi16( __mmask8 __M, __m128i __W) {
+  __W = _mm_mask_mov_epi16(_mm_set1_epi16(-1), __M, __W);
+  return __builtin_reduce_and((__v8hi)__W);
+}
+
+static __inline__ short __DEFAULT_FN_ATTRS128
+_mm_mask_reduce_or_epi16(__mmask8 __M, __m128i __W) {
+  __W = _mm_maskz_mov_epi16(__M, __W);
+  return __builtin_reduce_or((__v8hi)__W);
+}
+
+static __inline__ short __DEFAULT_FN_ATTRS128
+_mm_reduce_max_epi16(__m128i __V) {
+  return __builtin_reduce_max((__v8hi)__V);
+}
+
+static __inline__ unsigned short __DEFAULT_FN_ATTRS128
+_mm_reduce_max_epu16(__m128i __V) {
+  return __builtin_reduce_max((__v8hu)__V);
+}
+
+static __inline__ short __DEFAULT_FN_ATTRS128
+_mm_reduce_min_epi16(__m128i __V) {
+  return __builtin_reduce_min((__v8hi)__V);
+}
+
+static __inline__ unsigned short __DEFAULT_FN_ATTRS128
+_mm_reduce_min_epu16(__m128i __V) {
+  return __builtin_reduce_min((__v8hu)__V);
+}
+
+static __inline__ short __DEFAULT_FN_ATTRS128
+_mm_mask_reduce_max_epi16(__mmask16 __M, __m128i __V) {
+  __V = _mm_mask_mov_epi16(_mm_set1_epi16(-32767-1), __M, __V);
+  return __builtin_reduce_max((__v8hi)__V);
+}
+
+static __inline__ unsigned short __DEFAULT_FN_ATTRS128
+_mm_mask_reduce_max_epu16(__mmask16 __M, __m128i __V) {
+  __V = _mm_maskz_mov_epi16(__M, __V);
+  return __builtin_reduce_max((__v8hu)__V);
+}
+
+static __inline__ short __DEFAULT_FN_ATTRS128
+_mm_mask_reduce_min_epi16(__mmask16 __M, __m128i __V) {
+  __V = _mm_mask_mov_epi16(_mm_set1_epi16(32767), __M, __V);
+  return __builtin_reduce_min((__v8hi)__V);
+}
+
+static __inline__ unsigned short __DEFAULT_FN_ATTRS128
+_mm_mask_reduce_min_epu16(__mmask16 __M, __m128i __V) {
+  __V = _mm_mask_mov_epi16(_mm_set1_epi16(-1), __M, __V);
+  return __builtin_reduce_min((__v8hu)__V);
+}
+
+static __inline__ short __DEFAULT_FN_ATTRS256
+_mm256_reduce_add_epi16(__m256i __W) {
+  return __builtin_reduce_add((__v16hi)__W);
+}
+
+static __inline__ short __DEFAULT_FN_ATTRS256
+_mm256_reduce_mul_epi16(__m256i __W) {
+  return __builtin_reduce_mul((__v16hi)__W);
+}
+
+static __inline__ short __DEFAULT_FN_ATTRS256
+_mm256_reduce_and_epi16(__m256i __W) {
+  return __builtin_reduce_and((__v16hi)__W);
+}
+
+static __inline__ short __DEFAULT_FN_ATTRS256
+_mm256_reduce_or_epi16(__m256i __W) {
+  return __builtin_reduce_or((__v16hi)__W);
+}
+
+static __inline__ short __DEFAULT_FN_ATTRS256
+_mm256_mask_reduce_add_epi16( __mmask16 __M, __m256i __W) {
+  __W = _mm256_maskz_mov_epi16(__M, __W);
+  return __builtin_reduce_add((__v16hi)__W);
+}
+
+static __inline__ short __DEFAULT_FN_ATTRS256
+_mm256_mask_reduce_mul_epi16( __mmask16 __M, __m256i __W) {
+  __W = _mm256_mask_mov_epi16(_mm256_set1_epi16(1), __M, __W);
+  return __builtin_reduce_mul((__v16hi)__W);
+}
+
+static __inline__ short __DEFAULT_FN_ATTRS256
+_mm256_mask_reduce_and_epi16( __mmask16 __M, __m256i __W) {
+  __W = _mm256_mask_mov_epi16(_mm256_set1_epi16(-1), __M, __W);
+  return __builtin_reduce_and((__v16hi)__W);
+}
+
+static __inline__ short __DEFAULT_FN_ATTRS256
+_mm256_mask_reduce_or_epi16(__mmask16 __M, __m256i __W) {
+  __W = _mm256_maskz_mov_epi16(__M, __W);
+  return __builtin_reduce_or((__v16hi)__W);
+}
+
+static __inline__ short __DEFAULT_FN_ATTRS256
+_mm256_reduce_max_epi16(__m256i __V) {
+  return __builtin_reduce_max((__v16hi)__V);
+}
+
+static __inline__ unsigned short __DEFAULT_FN_ATTRS256
+_mm256_reduce_max_epu16(__m256i __V) {
+  return __builtin_reduce_max((__v16hu)__V);
+}
+
+static __inline__ short __DEFAULT_FN_ATTRS256
+_mm256_reduce_min_epi16(__m256i __V) {
+  return __builtin_reduce_min((__v16hi)__V);
+}
+
+static __inline__ unsigned short __DEFAULT_FN_ATTRS256
+_mm256_reduce_min_epu16(__m256i __V) {
+  return __builtin_reduce_min((__v16hu)__V);
+}
+
+static __inline__ short __DEFAULT_FN_ATTRS256
+_mm256_mask_reduce_max_epi16(__mmask16 __M, __m256i __V) {
+  __V = _mm256_mask_mov_epi16(_mm256_set1_epi16(-32767-1), __M, __V);
+  return __builtin_reduce_max((__v16hi)__V);
+}
+
+static __inline__ unsigned short __DEFAULT_FN_ATTRS256
+_mm256_mask_reduce_max_epu16(__mmask16 __M, __m256i __V) {
+  __V = _mm256_maskz_mov_epi16(__M, __V);
+  return __builtin_reduce_max((__v16hu)__V);
+}
+
+static __inline__ short __DEFAULT_FN_ATTRS256
+_mm256_mask_reduce_min_epi16(__mmask16 __M, __m256i __V) {
+  __V = _mm256_mask_mov_epi16(_mm256_set1_epi16(32767), __M, __V);
+  return __builtin_reduce_min((__v16hi)__V);
+}
+
+static __inline__ unsigned short __DEFAULT_FN_ATTRS256
+_mm256_mask_reduce_min_epu16(__mmask16 __M, __m256i __V) {
+  __V = _mm256_mask_mov_epi16(_mm256_set1_epi16(-1), __M, __V);
+  return __builtin_reduce_min((__v16hu)__V);
+}
+
+static __inline__ signed char __DEFAULT_FN_ATTRS128
+_mm_reduce_add_epi8(__m128i __W) {
+  return __builtin_reduce_add((__v16qi)__W);
+}
+
+static __inline__ signed char __DEFAULT_FN_ATTRS128
+_mm_reduce_mul_epi8(__m128i __W) {
+  return __builtin_reduce_mul((__v16qi)__W);
+}
+
+static __inline__ signed char __DEFAULT_FN_ATTRS128
+_mm_reduce_and_epi8(__m128i __W) {
+  return __builtin_reduce_and((__v16qi)__W);
+}
+
+static __inline__ signed char __DEFAULT_FN_ATTRS128
+_mm_reduce_or_epi8(__m128i __W) {
+  return __builtin_reduce_or((__v16qi)__W);
+}
+
+static __inline__ signed char __DEFAULT_FN_ATTRS128
+_mm_mask_reduce_add_epi8( __mmask16 __M, __m128i __W) {
+  __W = _mm_maskz_mov_epi8(__M, __W);
+  return __builtin_reduce_add((__v16qi)__W);
+}
+
+static __inline__ signed char __DEFAULT_FN_ATTRS128
+_mm_mask_reduce_mul_epi8( __mmask16 __M, __m128i __W) {
+  __W = _mm_mask_mov_epi8(_mm_set1_epi8(1), __M, __W);
+  return __builtin_reduce_mul((__v16qi)__W);
+}
+
+static __inline__ signed char __DEFAULT_FN_ATTRS128
+_mm_mask_reduce_and_epi8( __mmask16 __M, __m128i __W) {
+  __W = _mm_mask_mov_epi8(_mm_set1_epi8(-1), __M, __W);
+  return __builtin_reduce_and((__v16qi)__W);
+}
+
+static __inline__ signed char __DEFAULT_FN_ATTRS128
+_mm_mask_reduce_or_epi8(__mmask16 __M, __m128i __W) {
+  __W = _mm_maskz_mov_epi8(__M, __W);
+  return __builtin_reduce_or((__v16qi)__W);
+}
+
+static __inline__ signed char __DEFAULT_FN_ATTRS128
+_mm_reduce_max_epi8(__m128i __V) {
+  return __builtin_reduce_max((__v16qi)__V);
+}
+
+static __inline__ unsigned char __DEFAULT_FN_ATTRS128
+_mm_reduce_max_epu8(__m128i __V) {
+  return __builtin_reduce_max((__v16qu)__V);
+}
+
+static __inline__ signed char __DEFAULT_FN_ATTRS128
+_mm_reduce_min_epi8(__m128i __V) {
+  return __builtin_reduce_min((__v16qi)__V);
+}
+
+static __inline__ unsigned char __DEFAULT_FN_ATTRS128
+_mm_reduce_min_epu8(__m128i __V) {
+  return __builtin_reduce_min((__v16qu)__V);
+}
+
+static __inline__ signed char __DEFAULT_FN_ATTRS128
+_mm_mask_reduce_max_epi8(__mmask16 __M, __m128i __V) {
+  __V = _mm_mask_mov_epi8(_mm_set1_epi8(-127-1), __M, __V);
+  return __builtin_reduce_max((__v16qi)__V);
+}
+
+static __inline__ unsigned char __DEFAULT_FN_ATTRS128
+_mm_mask_reduce_max_epu8(__mmask16 __M, __m128i __V) {
+  __V = _mm_maskz_mov_epi8(__M, __V);
+  return __builtin_reduce_max((__v16qu)__V);
+}
+
+static __inline__ signed char __DEFAULT_FN_ATTRS128
+_mm_mask_reduce_min_epi8(__mmask16 __M, __m128i __V) {
+  __V = _mm_mask_mov_epi8(_mm_set1_epi8(127), __M, __V);
+  return __builtin_reduce_min((__v16qi)__V);
+}
+
+static __inline__ unsigned char __DEFAULT_FN_ATTRS128
+_mm_mask_reduce_min_epu8(__mmask16 __M, __m128i __V) {
+  __V = _mm_mask_mov_epi8(_mm_set1_epi8(-1), __M, __V);
+  return __builtin_reduce_min((__v16qu)__V);
+}
+
+static __inline__ signed char __DEFAULT_FN_ATTRS256
+_mm256_reduce_add_epi8(__m256i __W) {
+  return __builtin_reduce_add((__v32qi)__W);
+}
+
+static __inline__ signed char __DEFAULT_FN_ATTRS256
+_mm256_reduce_mul_epi8(__m256i __W) {
+  return __builtin_reduce_mul((__v32qi)__W);
+}
+
+static __inline__ signed char __DEFAULT_FN_ATTRS256
+_mm256_reduce_and_epi8(__m256i __W) {
+  return __builtin_reduce_and((__v32qi)__W);
+}
+
+static __inline__ signed char __DEFAULT_FN_ATTRS256
+_mm256_reduce_or_epi8(__m256i __W) {
+  return __builtin_reduce_or((__v32qi)__W);
+}
+
+static __inline__ signed char __DEFAULT_FN_ATTRS256
+_mm256_mask_reduce_add_epi8( __mmask32 __M, __m256i __W) {
+  __W = _mm256_maskz_mov_epi8(__M, __W);
+  return __builtin_reduce_add((__v32qi)__W);
+}
+
+static __inline__ signed char __DEFAULT_FN_ATTRS256
+_mm256_mask_reduce_mul_epi8( __mmask32 __M, __m256i __W) {
+  __W = _mm256_mask_mov_epi8(_mm256_set1_epi8(1), __M, __W);
+  return __builtin_reduce_mul((__v32qi)__W);
+}
+
+static __inline__ signed char __DEFAULT_FN_ATTRS256
+_mm256_mask_reduce_and_epi8( __mmask32 __M, __m256i __W) {
+  __W = _mm256_mask_mov_epi8(_mm256_set1_epi8(-1), __M, __W);
+  return __builtin_reduce_and((__v32qi)__W);
+}
+
+static __inline__ signed char __DEFAULT_FN_ATTRS256
+_mm256_mask_reduce_or_epi8(__mmask32 __M, __m256i __W) {
+  __W = _mm256_maskz_mov_epi8(__M, __W);
+  return __builtin_reduce_or((__v32qi)__W);
+}
+
+static __inline__ signed char __DEFAULT_FN_ATTRS256
+_mm256_reduce_max_epi8(__m256i __V) {
+  return __builtin_reduce_max((__v32qi)__V);
+}
+
+static __inline__ unsigned char __DEFAULT_FN_ATTRS256
+_mm256_reduce_max_epu8(__m256i __V) {
+  return __builtin_reduce_max((__v32qu)__V);
+}
+
+static __inline__ signed char __DEFAULT_FN_ATTRS256
+_mm256_reduce_min_epi8(__m256i __V) {
+  return __builtin_reduce_min((__v32qi)__V);
+}
+
+static __inline__ unsigned char __DEFAULT_FN_ATTRS256
+_mm256_reduce_min_epu8(__m256i __V) {
+  return __builtin_reduce_min((__v32qu)__V);
+}
+
+static __inline__ signed char __DEFAULT_FN_ATTRS256
+_mm256_mask_reduce_max_epi8(__mmask32 __M, __m256i __V) {
+  __V = _mm256_mask_mov_epi8(_mm256_set1_epi8(-127-1), __M, __V);
+  return __builtin_reduce_max((__v32qi)__V);
+}
+
+static __inline__ unsigned char __DEFAULT_FN_ATTRS256
+_mm256_mask_reduce_max_epu8(__mmask32 __M, __m256i __V) {
+  __V = _mm256_maskz_mov_epi8(__M, __V);
+  return __builtin_reduce_max((__v32qu)__V);
+}
+
+static __inline__ signed char __DEFAULT_FN_ATTRS256
+_mm256_mask_reduce_min_epi8(__mmask32 __M, __m256i __V) {
+  __V = _mm256_mask_mov_epi8(_mm256_set1_epi8(127), __M, __V);
+  return __builtin_reduce_min((__v32qi)__V);
+}
+
+static __inline__ unsigned char __DEFAULT_FN_ATTRS256
+_mm256_mask_reduce_min_epu8(__mmask32 __M, __m256i __V) {
+  __V = _mm256_mask_mov_epi8(_mm256_set1_epi8(-1), __M, __V);
+  return __builtin_reduce_min((__v32qu)__V);
+}
+
 #undef __DEFAULT_FN_ATTRS128
 #undef __DEFAULT_FN_ATTRS256
 
Index: clang/docs/ReleaseNotes.rst
===================================================================
--- clang/docs/ReleaseNotes.rst
+++ clang/docs/ReleaseNotes.rst
@@ -803,6 +803,8 @@
 - ``-march=raptorlake`` and ``-march=meteorlake`` are now supported.
 - ``-march=sierraforest``, ``-march=graniterapids`` and ``-march=grandridge`` are now supported.
 - Lift _BitInt() supported max width from 128 to 8388608.
+- Support intrinsics of ``_mm(256)_reduce_(add|mul|or|and)_epi8/16``.
+- Support intrinsics of ``_mm(256)_reduce_(max|min)_ep[i|u]8/16``.
 
 WebAssembly Support in Clang
 ----------------------------
_______________________________________________
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to