Author: ctopper
Date: Sun Jun 10 10:27:05 2018
New Revision: 334366

URL: http://llvm.org/viewvc/llvm-project?rev=334366&view=rev
Log:
[X86] Use target independent masked expandload and compressstore intrinsics to 
implement expandload/compressstore builtins.

Summary: We've had these target independent intrinsics for at least a year and 
a half. Looks like they do exactly what we need here and the backend already 
supports them.

Reviewers: RKSimon, delena, spatel, GBuella

Reviewed By: RKSimon

Subscribers: cfe-commits, llvm-commits

Differential Revision: https://reviews.llvm.org/D47693

Modified:
    cfe/trunk/lib/CodeGen/CGBuiltin.cpp
    cfe/trunk/test/CodeGen/avx512f-builtins.c
    cfe/trunk/test/CodeGen/avx512vbmi2-builtins.c
    cfe/trunk/test/CodeGen/avx512vl-builtins.c
    cfe/trunk/test/CodeGen/avx512vlvbmi2-builtins.c

Modified: cfe/trunk/lib/CodeGen/CGBuiltin.cpp
URL: 
http://llvm.org/viewvc/llvm-project/cfe/trunk/lib/CodeGen/CGBuiltin.cpp?rev=334366&r1=334365&r2=334366&view=diff
==============================================================================
--- cfe/trunk/lib/CodeGen/CGBuiltin.cpp (original)
+++ cfe/trunk/lib/CodeGen/CGBuiltin.cpp Sun Jun 10 10:27:05 2018
@@ -8496,6 +8496,40 @@ static Value *EmitX86MaskedLoad(CodeGenF
   return CGF.Builder.CreateMaskedLoad(Ptr, Align, MaskVec, Ops[1]);
 }
 
+static Value *EmitX86ExpandLoad(CodeGenFunction &CGF,
+                                ArrayRef<Value *> Ops) {
+  llvm::Type *ResultTy = Ops[1]->getType();
+  llvm::Type *PtrTy = ResultTy->getVectorElementType();
+
+  // Cast the pointer to element type.
+  Value *Ptr = CGF.Builder.CreateBitCast(Ops[0],
+                                         llvm::PointerType::getUnqual(PtrTy));
+
+  Value *MaskVec = getMaskVecValue(CGF, Ops[2],
+                                   ResultTy->getVectorNumElements());
+
+  llvm::Function *F = CGF.CGM.getIntrinsic(Intrinsic::masked_expandload,
+                                           ResultTy);
+  return CGF.Builder.CreateCall(F, { Ptr, MaskVec, Ops[1] });
+}
+
+static Value *EmitX86CompressStore(CodeGenFunction &CGF,
+                                   ArrayRef<Value *> Ops) {
+  llvm::Type *ResultTy = Ops[1]->getType();
+  llvm::Type *PtrTy = ResultTy->getVectorElementType();
+
+  // Cast the pointer to element type.
+  Value *Ptr = CGF.Builder.CreateBitCast(Ops[0],
+                                         llvm::PointerType::getUnqual(PtrTy));
+
+  Value *MaskVec = getMaskVecValue(CGF, Ops[2],
+                                   ResultTy->getVectorNumElements());
+
+  llvm::Function *F = CGF.CGM.getIntrinsic(Intrinsic::masked_compressstore,
+                                           ResultTy);
+  return CGF.Builder.CreateCall(F, { Ops[1], Ptr, MaskVec });
+}
+
 static Value *EmitX86MaskLogic(CodeGenFunction &CGF, Instruction::BinaryOps 
Opc,
                               unsigned NumElts, ArrayRef<Value *> Ops,
                               bool InvertLHS = false) {
@@ -9219,6 +9253,46 @@ Value *CodeGenFunction::EmitX86BuiltinEx
     return EmitX86MaskedLoad(*this, Ops, Align);
   }
 
+  case X86::BI__builtin_ia32_expandloaddf128_mask:
+  case X86::BI__builtin_ia32_expandloaddf256_mask:
+  case X86::BI__builtin_ia32_expandloaddf512_mask:
+  case X86::BI__builtin_ia32_expandloadsf128_mask:
+  case X86::BI__builtin_ia32_expandloadsf256_mask:
+  case X86::BI__builtin_ia32_expandloadsf512_mask:
+  case X86::BI__builtin_ia32_expandloaddi128_mask:
+  case X86::BI__builtin_ia32_expandloaddi256_mask:
+  case X86::BI__builtin_ia32_expandloaddi512_mask:
+  case X86::BI__builtin_ia32_expandloadsi128_mask:
+  case X86::BI__builtin_ia32_expandloadsi256_mask:
+  case X86::BI__builtin_ia32_expandloadsi512_mask:
+  case X86::BI__builtin_ia32_expandloadhi128_mask:
+  case X86::BI__builtin_ia32_expandloadhi256_mask:
+  case X86::BI__builtin_ia32_expandloadhi512_mask:
+  case X86::BI__builtin_ia32_expandloadqi128_mask:
+  case X86::BI__builtin_ia32_expandloadqi256_mask:
+  case X86::BI__builtin_ia32_expandloadqi512_mask:
+    return EmitX86ExpandLoad(*this, Ops);
+
+  case X86::BI__builtin_ia32_compressstoredf128_mask:
+  case X86::BI__builtin_ia32_compressstoredf256_mask:
+  case X86::BI__builtin_ia32_compressstoredf512_mask:
+  case X86::BI__builtin_ia32_compressstoresf128_mask:
+  case X86::BI__builtin_ia32_compressstoresf256_mask:
+  case X86::BI__builtin_ia32_compressstoresf512_mask:
+  case X86::BI__builtin_ia32_compressstoredi128_mask:
+  case X86::BI__builtin_ia32_compressstoredi256_mask:
+  case X86::BI__builtin_ia32_compressstoredi512_mask:
+  case X86::BI__builtin_ia32_compressstoresi128_mask:
+  case X86::BI__builtin_ia32_compressstoresi256_mask:
+  case X86::BI__builtin_ia32_compressstoresi512_mask:
+  case X86::BI__builtin_ia32_compressstorehi128_mask:
+  case X86::BI__builtin_ia32_compressstorehi256_mask:
+  case X86::BI__builtin_ia32_compressstorehi512_mask:
+  case X86::BI__builtin_ia32_compressstoreqi128_mask:
+  case X86::BI__builtin_ia32_compressstoreqi256_mask:
+  case X86::BI__builtin_ia32_compressstoreqi512_mask:
+    return EmitX86CompressStore(*this, Ops);
+
   case X86::BI__builtin_ia32_storehps:
   case X86::BI__builtin_ia32_storelps: {
     llvm::Type *PtrTy = llvm::PointerType::getUnqual(Int64Ty);

Modified: cfe/trunk/test/CodeGen/avx512f-builtins.c
URL: 
http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGen/avx512f-builtins.c?rev=334366&r1=334365&r2=334366&view=diff
==============================================================================
--- cfe/trunk/test/CodeGen/avx512f-builtins.c (original)
+++ cfe/trunk/test/CodeGen/avx512f-builtins.c Sun Jun 10 10:27:05 2018
@@ -7293,40 +7293,52 @@ __m512i test_mm512_maskz_expand_epi64(__
 }
 __m512i test_mm512_mask_expandloadu_epi64(__m512i __W, __mmask8 __U, void 
const *__P) {
   // CHECK-LABEL: @test_mm512_mask_expandloadu_epi64
-  // CHECK: @llvm.x86.avx512.mask.expand.load.q.512
+  // CHECK: @llvm.masked.expandload.v8i64(i64* %{{.*}}, <8 x i1> %{{.*}}, <8 x 
i64> %{{.*}})
   return _mm512_mask_expandloadu_epi64(__W, __U, __P); 
 }
 
 __m512i test_mm512_maskz_expandloadu_epi64(__mmask8 __U, void const *__P) {
   // CHECK-LABEL: @test_mm512_maskz_expandloadu_epi64
-  // CHECK: @llvm.x86.avx512.mask.expand.load.q.512
+  // CHECK: @llvm.masked.expandload.v8i64(i64* %{{.*}}, <8 x i1> %{{.*}}, <8 x 
i64> %{{.*}})
   return _mm512_maskz_expandloadu_epi64(__U, __P); 
 }
 
 __m512d test_mm512_mask_expandloadu_pd(__m512d __W, __mmask8 __U, void const 
*__P) {
   // CHECK-LABEL: @test_mm512_mask_expandloadu_pd
-  // CHECK: @llvm.x86.avx512.mask.expand.load.pd.512
+  // CHECK: @llvm.masked.expandload.v8f64(double* %{{.*}}, <8 x i1> %{{.*}}, 
<8 x double> %{{.*}})
   return _mm512_mask_expandloadu_pd(__W, __U, __P); 
 }
 
 __m512d test_mm512_maskz_expandloadu_pd(__mmask8 __U, void const *__P) {
   // CHECK-LABEL: @test_mm512_maskz_expandloadu_pd
-  // CHECK: @llvm.x86.avx512.mask.expand.load.pd.512
+  // CHECK: @llvm.masked.expandload.v8f64(double* %{{.*}}, <8 x i1> %{{.*}}, 
<8 x double> %{{.*}})
   return _mm512_maskz_expandloadu_pd(__U, __P); 
 }
 
 __m512i test_mm512_mask_expandloadu_epi32(__m512i __W, __mmask16 __U, void 
const *__P) {
   // CHECK-LABEL: @test_mm512_mask_expandloadu_epi32
-  // CHECK: @llvm.x86.avx512.mask.expand.load.d.512
+  // CHECK: @llvm.masked.expandload.v16i32(i32* %{{.*}}, <16 x i1> %{{.*}}, 
<16 x i32> %{{.*}})
   return _mm512_mask_expandloadu_epi32(__W, __U, __P); 
 }
 
 __m512i test_mm512_maskz_expandloadu_epi32(__mmask16 __U, void const *__P) {
   // CHECK-LABEL: @test_mm512_maskz_expandloadu_epi32
-  // CHECK: @llvm.x86.avx512.mask.expand.load.d.512
+  // CHECK: @llvm.masked.expandload.v16i32(i32* %{{.*}}, <16 x i1> %{{.*}}, 
<16 x i32> %{{.*}})
   return _mm512_maskz_expandloadu_epi32(__U, __P); 
 }
 
+__m512 test_mm512_mask_expandloadu_ps(__m512 __W, __mmask16 __U, void const 
*__P) {
+  // CHECK-LABEL: @test_mm512_mask_expandloadu_ps
+  // CHECK: @llvm.masked.expandload.v16f32(float* %{{.*}}, <16 x i1> %{{.*}}, 
<16 x float> %{{.*}})
+  return _mm512_mask_expandloadu_ps(__W, __U, __P); 
+}
+
+__m512 test_mm512_maskz_expandloadu_ps(__mmask16 __U, void const *__P) {
+  // CHECK-LABEL: @test_mm512_maskz_expandloadu_ps
+  // CHECK: @llvm.masked.expandload.v16f32(float* %{{.*}}, <16 x i1> %{{.*}}, 
<16 x float> %{{.*}})
+  return _mm512_maskz_expandloadu_ps(__U, __P); 
+}
+
 __m512 test_mm512_mask_expand_ps(__m512 __W, __mmask16 __U, __m512 __A) {
   // CHECK-LABEL: @test_mm512_mask_expand_ps
   // CHECK: @llvm.x86.avx512.mask.expand.ps.512
@@ -7428,25 +7440,25 @@ __m512 test_mm512_maskz_mov_ps(__mmask16
 
 void test_mm512_mask_compressstoreu_pd(void *__P, __mmask8 __U, __m512d __A) {
   // CHECK-LABEL: @test_mm512_mask_compressstoreu_pd
-  // CHECK: @llvm.x86.avx512.mask.compress.store.pd.512
+  // CHECK: @llvm.masked.compressstore.v8f64(<8 x double> %{{.*}}, double* 
%{{.*}}, <8 x i1> %{{.*}})
   return _mm512_mask_compressstoreu_pd(__P, __U, __A); 
 }
 
 void test_mm512_mask_compressstoreu_epi64(void *__P, __mmask8 __U, __m512i 
__A) {
   // CHECK-LABEL: @test_mm512_mask_compressstoreu_epi64
-  // CHECK: @llvm.x86.avx512.mask.compress.store.q.512
+  // CHECK: @llvm.masked.compressstore.v8i64(<8 x i64> %{{.*}}, i64* %{{.*}}, 
<8 x i1> %{{.*}})
   return _mm512_mask_compressstoreu_epi64(__P, __U, __A); 
 }
 
 void test_mm512_mask_compressstoreu_ps(void *__P, __mmask16 __U, __m512 __A) {
   // CHECK-LABEL: @test_mm512_mask_compressstoreu_ps
-  // CHECK: @llvm.x86.avx512.mask.compress.store.ps.512
+  // CHECK: @llvm.masked.compressstore.v16f32(<16 x float> %{{.*}}, float* 
%{{.*}}, <16 x i1> %{{.*}})
   return _mm512_mask_compressstoreu_ps(__P, __U, __A); 
 }
 
 void test_mm512_mask_compressstoreu_epi32(void *__P, __mmask16 __U, __m512i 
__A) {
   // CHECK-LABEL: @test_mm512_mask_compressstoreu_epi32
-  // CHECK: @llvm.x86.avx512.mask.compress.store.d.512
+  // CHECK: @llvm.masked.compressstore.v16i32(<16 x i32> %{{.*}}, i32* 
%{{.*}}, <16 x i1> %{{.*}})
   return _mm512_mask_compressstoreu_epi32(__P, __U, __A); 
 }
 

Modified: cfe/trunk/test/CodeGen/avx512vbmi2-builtins.c
URL: 
http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGen/avx512vbmi2-builtins.c?rev=334366&r1=334365&r2=334366&view=diff
==============================================================================
--- cfe/trunk/test/CodeGen/avx512vbmi2-builtins.c (original)
+++ cfe/trunk/test/CodeGen/avx512vbmi2-builtins.c Sun Jun 10 10:27:05 2018
@@ -28,13 +28,13 @@ __m512i test_mm512_maskz_compress_epi8(_
 
 void test_mm512_mask_compressstoreu_epi16(void *__P, __mmask32 __U, __m512i 
__D) {
   // CHECK-LABEL: @test_mm512_mask_compressstoreu_epi16
-  // CHECK: @llvm.x86.avx512.mask.compress.store.w.512
+  // CHECK: @llvm.masked.compressstore.v32i16(<32 x i16> %{{.*}}, i16* 
%{{.*}}, <32 x i1> %{{.*}})
   _mm512_mask_compressstoreu_epi16(__P, __U, __D);
 }
 
 void test_mm512_mask_compressstoreu_epi8(void *__P, __mmask64 __U, __m512i 
__D) {
   // CHECK-LABEL: @test_mm512_mask_compressstoreu_epi8
-  // CHECK: @llvm.x86.avx512.mask.compress.store.b.512
+  // CHECK: @llvm.masked.compressstore.v64i8(<64 x i8> %{{.*}}, i8* %{{.*}}, 
<64 x i1> %{{.*}})
   _mm512_mask_compressstoreu_epi8(__P, __U, __D);
 }
 
@@ -64,25 +64,25 @@ __m512i test_mm512_maskz_expand_epi8(__m
 
 __m512i test_mm512_mask_expandloadu_epi16(__m512i __S, __mmask32 __U, void 
const* __P) {
   // CHECK-LABEL: @test_mm512_mask_expandloadu_epi16
-  // CHECK: @llvm.x86.avx512.mask.expand.load.w.512
+  // CHECK: @llvm.masked.expandload.v32i16(i16* %{{.*}}, <32 x i1> %{{.*}}, 
<32 x i16> %{{.*}})
   return _mm512_mask_expandloadu_epi16(__S, __U, __P);
 }
 
 __m512i test_mm512_maskz_expandloadu_epi16(__mmask32 __U, void const* __P) {
   // CHECK-LABEL: @test_mm512_maskz_expandloadu_epi16
-  // CHECK: @llvm.x86.avx512.mask.expand.load.w.512
+  // CHECK: @llvm.masked.expandload.v32i16(i16* %{{.*}}, <32 x i1> %{{.*}}, 
<32 x i16> %{{.*}})
   return _mm512_maskz_expandloadu_epi16(__U, __P);
 }
 
 __m512i test_mm512_mask_expandloadu_epi8(__m512i __S, __mmask64 __U, void 
const* __P) {
   // CHECK-LABEL: @test_mm512_mask_expandloadu_epi8
-  // CHECK: @llvm.x86.avx512.mask.expand.load.b.512
+  // CHECK: @llvm.masked.expandload.v64i8(i8* %{{.*}}, <64 x i1> %{{.*}}, <64 
x i8> %{{.*}})
   return _mm512_mask_expandloadu_epi8(__S, __U, __P);
 }
 
 __m512i test_mm512_maskz_expandloadu_epi8(__mmask64 __U, void const* __P) {
   // CHECK-LABEL: @test_mm512_maskz_expandloadu_epi8
-  // CHECK: @llvm.x86.avx512.mask.expand.load.b.512
+  // CHECK: @llvm.masked.expandload.v64i8(i8* %{{.*}}, <64 x i1> %{{.*}}, <64 
x i8> %{{.*}})
   return _mm512_maskz_expandloadu_epi8(__U, __P);
 }
 

Modified: cfe/trunk/test/CodeGen/avx512vl-builtins.c
URL: 
http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGen/avx512vl-builtins.c?rev=334366&r1=334365&r2=334366&view=diff
==============================================================================
--- cfe/trunk/test/CodeGen/avx512vl-builtins.c (original)
+++ cfe/trunk/test/CodeGen/avx512vl-builtins.c Sun Jun 10 10:27:05 2018
@@ -2043,42 +2043,42 @@ __m256i test_mm256_maskz_compress_epi32(
 }
 void test_mm_mask_compressstoreu_pd(void *__P, __mmask8 __U, __m128d __A) {
   // CHECK-LABEL: @test_mm_mask_compressstoreu_pd
-  // CHECK: @llvm.x86.avx512.mask.compress.store.pd.128
+  // CHECK: @llvm.masked.compressstore.v2f64(<2 x double> %{{.*}}, double* 
%{{.*}}, <2 x i1> %{{.*}})
   return _mm_mask_compressstoreu_pd(__P,__U,__A); 
 }
 void test_mm256_mask_compressstoreu_pd(void *__P, __mmask8 __U, __m256d __A) {
   // CHECK-LABEL: @test_mm256_mask_compressstoreu_pd
-  // CHECK: @llvm.x86.avx512.mask.compress.store.pd.256
+  // CHECK: @llvm.masked.compressstore.v4f64(<4 x double> %{{.*}}, double* 
%{{.*}}, <4 x i1> %{{.*}})
   return _mm256_mask_compressstoreu_pd(__P,__U,__A); 
 }
 void test_mm_mask_compressstoreu_epi64(void *__P, __mmask8 __U, __m128i __A) {
   // CHECK-LABEL: @test_mm_mask_compressstoreu_epi64
-  // CHECK: @llvm.x86.avx512.mask.compress.store.q.128
+  // CHECK: @llvm.masked.compressstore.v2i64(<2 x i64> %{{.*}}, i64* %{{.*}}, 
<2 x i1> %{{.*}})
   return _mm_mask_compressstoreu_epi64(__P,__U,__A); 
 }
 void test_mm256_mask_compressstoreu_epi64(void *__P, __mmask8 __U, __m256i 
__A) {
   // CHECK-LABEL: @test_mm256_mask_compressstoreu_epi64
-  // CHECK: @llvm.x86.avx512.mask.compress.store.q.256
+  // CHECK: @llvm.masked.compressstore.v4i64(<4 x i64> %{{.*}}, i64* %{{.*}}, 
<4 x i1> %{{.*}})
   return _mm256_mask_compressstoreu_epi64(__P,__U,__A); 
 }
 void test_mm_mask_compressstoreu_ps(void *__P, __mmask8 __U, __m128 __A) {
   // CHECK-LABEL: @test_mm_mask_compressstoreu_ps
-  // CHECK: @llvm.x86.avx512.mask.compress.store.ps.128
+  // CHECK: @llvm.masked.compressstore.v4f32(<4 x float> %{{.*}}, float* 
%{{.*}}, <4 x i1> %{{.*}})
   return _mm_mask_compressstoreu_ps(__P,__U,__A); 
 }
 void test_mm256_mask_compressstoreu_ps(void *__P, __mmask8 __U, __m256 __A) {
   // CHECK-LABEL: @test_mm256_mask_compressstoreu_ps
-  // CHECK: @llvm.x86.avx512.mask.compress.store.ps.256
+  // CHECK: @llvm.masked.compressstore.v8f32(<8 x float> %{{.*}}, float* 
%{{.*}}, <8 x i1> %{{.*}})
   return _mm256_mask_compressstoreu_ps(__P,__U,__A); 
 }
 void test_mm_mask_compressstoreu_epi32(void *__P, __mmask8 __U, __m128i __A) {
   // CHECK-LABEL: @test_mm_mask_compressstoreu_epi32
-  // CHECK: @llvm.x86.avx512.mask.compress.store.d.128
+  // CHECK: @llvm.masked.compressstore.v4i32(<4 x i32> %{{.*}}, i32* %{{.*}}, 
<4 x i1> %{{.*}})
   return _mm_mask_compressstoreu_epi32(__P,__U,__A); 
 }
 void test_mm256_mask_compressstoreu_epi32(void *__P, __mmask8 __U, __m256i 
__A) {
   // CHECK-LABEL: @test_mm256_mask_compressstoreu_epi32
-  // CHECK: @llvm.x86.avx512.mask.compress.store.d.256
+  // CHECK: @llvm.masked.compressstore.v8i32(<8 x i32> %{{.*}}, i32* %{{.*}}, 
<8 x i1> %{{.*}})
   return _mm256_mask_compressstoreu_epi32(__P,__U,__A); 
 }
 __m128d test_mm_mask_cvtepi32_pd(__m128d __W, __mmask8 __U, __m128i __A) {
@@ -2550,82 +2550,82 @@ __m256i test_mm256_maskz_expand_epi64(__
 }
 __m128d test_mm_mask_expandloadu_pd(__m128d __W, __mmask8 __U, void const 
*__P) {
   // CHECK-LABEL: @test_mm_mask_expandloadu_pd
-  // CHECK: @llvm.x86.avx512.mask.expand.load.pd.128
+  // CHECK: @llvm.masked.expandload.v2f64(double* %{{.*}}, <2 x i1> %{{.*}}, 
<2 x double> %{{.*}})
   return _mm_mask_expandloadu_pd(__W,__U,__P); 
 }
 __m128d test_mm_maskz_expandloadu_pd(__mmask8 __U, void const *__P) {
   // CHECK-LABEL: @test_mm_maskz_expandloadu_pd
-  // CHECK: @llvm.x86.avx512.mask.expand.load.pd.128
+  // CHECK: @llvm.masked.expandload.v2f64(double* %{{.*}}, <2 x i1> %{{.*}}, 
<2 x double> %{{.*}})
   return _mm_maskz_expandloadu_pd(__U,__P); 
 }
 __m256d test_mm256_mask_expandloadu_pd(__m256d __W, __mmask8 __U, void const 
*__P) {
   // CHECK-LABEL: @test_mm256_mask_expandloadu_pd
-  // CHECK: @llvm.x86.avx512.mask.expand.load.pd.256
+  // CHECK: @llvm.masked.expandload.v4f64(double* %{{.*}}, <4 x i1> %{{.*}}, 
<4 x double> %{{.*}})
   return _mm256_mask_expandloadu_pd(__W,__U,__P); 
 }
 __m256d test_mm256_maskz_expandloadu_pd(__mmask8 __U, void const *__P) {
   // CHECK-LABEL: @test_mm256_maskz_expandloadu_pd
-  // CHECK: @llvm.x86.avx512.mask.expand.load.pd.256
+  // CHECK: @llvm.masked.expandload.v4f64(double* %{{.*}}, <4 x i1> %{{.*}}, 
<4 x double> %{{.*}})
   return _mm256_maskz_expandloadu_pd(__U,__P); 
 }
 __m128i test_mm_mask_expandloadu_epi64(__m128i __W, __mmask8 __U, void const 
*__P) {
   // CHECK-LABEL: @test_mm_mask_expandloadu_epi64
-  // CHECK: @llvm.x86.avx512.mask.expand.load.q.128
+  // CHECK: @llvm.masked.expandload.v2i64(i64* %{{.*}}, <2 x i1> %{{.*}}, <2 x 
i64> %{{.*}})
   return _mm_mask_expandloadu_epi64(__W,__U,__P); 
 }
 __m128i test_mm_maskz_expandloadu_epi64(__mmask8 __U, void const *__P) {
   // CHECK-LABEL: @test_mm_maskz_expandloadu_epi64
-  // CHECK: @llvm.x86.avx512.mask.expand.load.q.128
+  // CHECK: @llvm.masked.expandload.v2i64(i64* %{{.*}}, <2 x i1> %{{.*}}, <2 x 
i64> %{{.*}})
   return _mm_maskz_expandloadu_epi64(__U,__P); 
 }
 __m256i test_mm256_mask_expandloadu_epi64(__m256i __W, __mmask8 __U,   void 
const *__P) {
   // CHECK-LABEL: @test_mm256_mask_expandloadu_epi64
-  // CHECK: @llvm.x86.avx512.mask.expand.load.q.256
+  // CHECK: @llvm.masked.expandload.v4i64(i64* %{{.*}}, <4 x i1> %{{.*}}, <4 x 
i64> %{{.*}})
   return _mm256_mask_expandloadu_epi64(__W,__U,__P); 
 }
 __m256i test_mm256_maskz_expandloadu_epi64(__mmask8 __U, void const *__P) {
   // CHECK-LABEL: @test_mm256_maskz_expandloadu_epi64
-  // CHECK: @llvm.x86.avx512.mask.expand.load.q.256
+  // CHECK: @llvm.masked.expandload.v4i64(i64* %{{.*}}, <4 x i1> %{{.*}}, <4 x 
i64> %{{.*}})
   return _mm256_maskz_expandloadu_epi64(__U,__P); 
 }
 __m128 test_mm_mask_expandloadu_ps(__m128 __W, __mmask8 __U, void const *__P) {
   // CHECK-LABEL: @test_mm_mask_expandloadu_ps
-  // CHECK: @llvm.x86.avx512.mask.expand.load.ps.128
+  // CHECK: @llvm.masked.expandload.v4f32(float* %{{.*}}, <4 x i1> %{{.*}}, <4 
x float> %{{.*}})
   return _mm_mask_expandloadu_ps(__W,__U,__P); 
 }
 __m128 test_mm_maskz_expandloadu_ps(__mmask8 __U, void const *__P) {
   // CHECK-LABEL: @test_mm_maskz_expandloadu_ps
-  // CHECK: @llvm.x86.avx512.mask.expand.load.ps.128
+  // CHECK: @llvm.masked.expandload.v4f32(float* %{{.*}}, <4 x i1> %{{.*}}, <4 
x float> %{{.*}})
   return _mm_maskz_expandloadu_ps(__U,__P); 
 }
 __m256 test_mm256_mask_expandloadu_ps(__m256 __W, __mmask8 __U, void const 
*__P) {
   // CHECK-LABEL: @test_mm256_mask_expandloadu_ps
-  // CHECK: @llvm.x86.avx512.mask.expand.load.ps.256
+  // CHECK: @llvm.masked.expandload.v8f32(float* %{{.*}}, <8 x i1> %{{.*}}, <8 
x float> %{{.*}})
   return _mm256_mask_expandloadu_ps(__W,__U,__P); 
 }
 __m256 test_mm256_maskz_expandloadu_ps(__mmask8 __U, void const *__P) {
   // CHECK-LABEL: @test_mm256_maskz_expandloadu_ps
-  // CHECK: @llvm.x86.avx512.mask.expand.load.ps.256
+  // CHECK: @llvm.masked.expandload.v8f32(float* %{{.*}}, <8 x i1> %{{.*}}, <8 
x float> %{{.*}})
   return _mm256_maskz_expandloadu_ps(__U,__P); 
 }
 __m128i test_mm_mask_expandloadu_epi32(__m128i __W, __mmask8 __U, void const 
*__P) {
   // CHECK-LABEL: @test_mm_mask_expandloadu_epi32
-  // CHECK: @llvm.x86.avx512.mask.expand.load.d.128
+  // CHECK: @llvm.masked.expandload.v4i32(i32* %{{.*}}, <4 x i1> %{{.*}}, <4 x 
i32> %{{.*}})
   return _mm_mask_expandloadu_epi32(__W,__U,__P); 
 }
 __m128i test_mm_maskz_expandloadu_epi32(__mmask8 __U, void const *__P) {
   // CHECK-LABEL: @test_mm_maskz_expandloadu_epi32
-  // CHECK: @llvm.x86.avx512.mask.expand.load.d.128
+  // CHECK: @llvm.masked.expandload.v4i32(i32* %{{.*}}, <4 x i1> %{{.*}}, <4 x 
i32> %{{.*}})
   return _mm_maskz_expandloadu_epi32(__U,__P); 
 }
 __m256i test_mm256_mask_expandloadu_epi32(__m256i __W, __mmask8 __U,   void 
const *__P) {
   // CHECK-LABEL: @test_mm256_mask_expandloadu_epi32
-  // CHECK: @llvm.x86.avx512.mask.expand.load.d.256
+  // CHECK: @llvm.masked.expandload.v8i32(i32* %{{.*}}, <8 x i1> %{{.*}}, <8 x 
i32> %{{.*}})
   return _mm256_mask_expandloadu_epi32(__W,__U,__P); 
 }
 __m256i test_mm256_maskz_expandloadu_epi32(__mmask8 __U, void const *__P) {
   // CHECK-LABEL: @test_mm256_maskz_expandloadu_epi32
-  // CHECK: @llvm.x86.avx512.mask.expand.load.d.256
+  // CHECK: @llvm.masked.expandload.v8i32(i32* %{{.*}}, <8 x i1> %{{.*}}, <8 x 
i32> %{{.*}})
   return _mm256_maskz_expandloadu_epi32(__U,__P); 
 }
 __m128 test_mm_mask_expand_ps(__m128 __W, __mmask8 __U, __m128 __A) {

Modified: cfe/trunk/test/CodeGen/avx512vlvbmi2-builtins.c
URL: 
http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGen/avx512vlvbmi2-builtins.c?rev=334366&r1=334365&r2=334366&view=diff
==============================================================================
--- cfe/trunk/test/CodeGen/avx512vlvbmi2-builtins.c (original)
+++ cfe/trunk/test/CodeGen/avx512vlvbmi2-builtins.c Sun Jun 10 10:27:05 2018
@@ -28,13 +28,13 @@ __m128i test_mm_maskz_compress_epi8(__mm
 
 void test_mm_mask_compressstoreu_epi16(void *__P, __mmask8 __U, __m128i __D) {
   // CHECK-LABEL: @test_mm_mask_compressstoreu_epi16
-  // CHECK: @llvm.x86.avx512.mask.compress.store.w.128
+  // CHECK: @llvm.masked.compressstore.v8i16(<8 x i16> %{{.*}}, i16* %{{.*}}, 
<8 x i1> %{{.*}})
   _mm_mask_compressstoreu_epi16(__P, __U, __D);
 }
 
 void test_mm_mask_compressstoreu_epi8(void *__P, __mmask16 __U, __m128i __D) {
   // CHECK-LABEL: @test_mm_mask_compressstoreu_epi8
-  // CHECK: @llvm.x86.avx512.mask.compress.store.b.128
+  // CHECK: @llvm.masked.compressstore.v16i8(<16 x i8> %{{.*}}, i8* %{{.*}}, 
<16 x i1> %{{.*}})
   _mm_mask_compressstoreu_epi8(__P, __U, __D);
 }
 
@@ -64,25 +64,25 @@ __m128i test_mm_maskz_expand_epi8(__mmas
 
 __m128i test_mm_mask_expandloadu_epi16(__m128i __S, __mmask8 __U, void const* 
__P) {
   // CHECK-LABEL: @test_mm_mask_expandloadu_epi16
-  // CHECK: @llvm.x86.avx512.mask.expand.load.w.128
+  // CHECK: @llvm.masked.expandload.v8i16(i16* %{{.*}}, <8 x i1> %{{.*}}, <8 x 
i16> %{{.*}})
   return _mm_mask_expandloadu_epi16(__S, __U, __P);
 }
 
 __m128i test_mm_maskz_expandloadu_epi16(__mmask8 __U, void const* __P) {
   // CHECK-LABEL: @test_mm_maskz_expandloadu_epi16
-  // CHECK: @llvm.x86.avx512.mask.expand.load.w.128
+  // CHECK: @llvm.masked.expandload.v8i16(i16* %{{.*}}, <8 x i1> %{{.*}}, <8 x 
i16> %{{.*}})
   return _mm_maskz_expandloadu_epi16(__U, __P);
 }
 
 __m128i test_mm_mask_expandloadu_epi8(__m128i __S, __mmask16 __U, void const* 
__P) {
   // CHECK-LABEL: @test_mm_mask_expandloadu_epi8
-  // CHECK: @llvm.x86.avx512.mask.expand.load.b.128
+  // CHECK: @llvm.masked.expandload.v16i8(i8* %{{.*}}, <16 x i1> %{{.*}}, <16 
x i8> %{{.*}})
   return _mm_mask_expandloadu_epi8(__S, __U, __P);
 }
 
 __m128i test_mm_maskz_expandloadu_epi8(__mmask16 __U, void const* __P) {
   // CHECK-LABEL: @test_mm_maskz_expandloadu_epi8
-  // CHECK: @llvm.x86.avx512.mask.expand.load.b.128
+  // CHECK: @llvm.masked.expandload.v16i8(i8* %{{.*}}, <16 x i1> %{{.*}}, <16 
x i8> %{{.*}})
   return _mm_maskz_expandloadu_epi8(__U, __P);
 }
 
@@ -112,13 +112,13 @@ __m256i test_mm256_maskz_compress_epi8(_
 
 void test_mm256_mask_compressstoreu_epi16(void *__P, __mmask16 __U, __m256i 
__D) {
   // CHECK-LABEL: @test_mm256_mask_compressstoreu_epi16
-  // CHECK: @llvm.x86.avx512.mask.compress.store.w.256
+  // CHECK: @llvm.masked.compressstore.v16i16(<16 x i16> %{{.*}}, i16* 
%{{.*}}, <16 x i1> %{{.*}})
   _mm256_mask_compressstoreu_epi16(__P, __U, __D);
 }
 
 void test_mm256_mask_compressstoreu_epi8(void *__P, __mmask32 __U, __m256i 
__D) {
   // CHECK-LABEL: @test_mm256_mask_compressstoreu_epi8
-  // CHECK: @llvm.x86.avx512.mask.compress.store.b.256
+  // CHECK: @llvm.masked.compressstore.v32i8(<32 x i8> %{{.*}}, i8* %{{.*}}, 
<32 x i1> %{{.*}})
   _mm256_mask_compressstoreu_epi8(__P, __U, __D);
 }
 
@@ -148,25 +148,25 @@ __m256i test_mm256_maskz_expand_epi8(__m
 
 __m256i test_mm256_mask_expandloadu_epi16(__m256i __S, __mmask16 __U, void 
const* __P) {
   // CHECK-LABEL: @test_mm256_mask_expandloadu_epi16
-  // CHECK: @llvm.x86.avx512.mask.expand.load.w.256
+  // CHECK: @llvm.masked.expandload.v16i16(i16* %{{.*}}, <16 x i1> %{{.*}}, 
<16 x i16> %{{.*}})
   return _mm256_mask_expandloadu_epi16(__S, __U, __P);
 }
 
 __m256i test_mm256_maskz_expandloadu_epi16(__mmask16 __U, void const* __P) {
   // CHECK-LABEL: @test_mm256_maskz_expandloadu_epi16
-  // CHECK: @llvm.x86.avx512.mask.expand.load.w.256
+  // CHECK: @llvm.masked.expandload.v16i16(i16* %{{.*}}, <16 x i1> %{{.*}}, 
<16 x i16> %{{.*}})
   return _mm256_maskz_expandloadu_epi16(__U, __P);
 }
 
 __m256i test_mm256_mask_expandloadu_epi8(__m256i __S, __mmask32 __U, void 
const* __P) {
   // CHECK-LABEL: @test_mm256_mask_expandloadu_epi8
-  // CHECK: @llvm.x86.avx512.mask.expand.load.b.256
+  // CHECK: @llvm.masked.expandload.v32i8(i8* %{{.*}}, <32 x i1> %{{.*}}, <32 
x i8> %{{.*}})
   return _mm256_mask_expandloadu_epi8(__S, __U, __P);
 }
 
 __m256i test_mm256_maskz_expandloadu_epi8(__mmask32 __U, void const* __P) {
   // CHECK-LABEL: @test_mm256_maskz_expandloadu_epi8
-  // CHECK: @llvm.x86.avx512.mask.expand.load.b.256
+  // CHECK: @llvm.masked.expandload.v32i8(i8* %{{.*}}, <32 x i1> %{{.*}}, <32 
x i8> %{{.*}})
   return _mm256_maskz_expandloadu_epi8(__U, __P);
 }
 


_______________________________________________
cfe-commits mailing list
cfe-commits@lists.llvm.org
http://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to