On Tue, Mar 10, 2015 at 8:19 AM, Sanjay Patel <[email protected]> wrote:
> Author: spatel > Date: Tue Mar 10 10:19:26 2015 > New Revision: 231792 > > URL: http://llvm.org/viewvc/llvm-project?rev=231792&view=rev > Log: > [X86, AVX] Replace vinsertf128 intrinsics with generic shuffles. > > We want to replace as much custom x86 shuffling via intrinsics > as possible because pushing the code down the generic shuffle > optimization path allows for better codegen and less complexity > in LLVM. > > This is the sibling patch for the LLVM half of this change: > http://reviews.llvm.org/D8086 > > Differential Revision: http://reviews.llvm.org/D8088 > > > Modified: > cfe/trunk/include/clang/Basic/BuiltinsX86.def > cfe/trunk/lib/Headers/avxintrin.h > cfe/trunk/lib/Sema/SemaChecking.cpp > cfe/trunk/test/CodeGen/avx-shuffle-builtins.c > cfe/trunk/test/CodeGen/builtins-x86.c > > Modified: cfe/trunk/include/clang/Basic/BuiltinsX86.def > URL: > http://llvm.org/viewvc/llvm-project/cfe/trunk/include/clang/Basic/BuiltinsX86.def?rev=231792&r1=231791&r2=231792&view=diff > > ============================================================================== > --- cfe/trunk/include/clang/Basic/BuiltinsX86.def (original) > +++ cfe/trunk/include/clang/Basic/BuiltinsX86.def Tue Mar 10 10:19:26 2015 > @@ -450,9 +450,6 @@ BUILTIN(__builtin_ia32_cvttps2dq256, "V8 > BUILTIN(__builtin_ia32_vperm2f128_pd256, "V4dV4dV4dIc", "") > BUILTIN(__builtin_ia32_vperm2f128_ps256, "V8fV8fV8fIc", "") > BUILTIN(__builtin_ia32_vperm2f128_si256, "V8iV8iV8iIc", "") > -BUILTIN(__builtin_ia32_vinsertf128_pd256, "V4dV4dV2dIc", "") > -BUILTIN(__builtin_ia32_vinsertf128_ps256, "V8fV8fV4fIc", "") > -BUILTIN(__builtin_ia32_vinsertf128_si256, "V8iV8iV4iIc", "") > BUILTIN(__builtin_ia32_sqrtpd256, "V4dV4d", "") > BUILTIN(__builtin_ia32_sqrtps256, "V8fV8f", "") > BUILTIN(__builtin_ia32_rsqrtps256, "V8fV8f", "") > > Modified: cfe/trunk/lib/Headers/avxintrin.h > URL: > http://llvm.org/viewvc/llvm-project/cfe/trunk/lib/Headers/avxintrin.h?rev=231792&r1=231791&r2=231792&view=diff > > ============================================================================== > --- cfe/trunk/lib/Headers/avxintrin.h (original) > +++ cfe/trunk/lib/Headers/avxintrin.h Tue Mar 10 10:19:26 2015 > @@ -472,22 +472,6 @@ _mm256_extract_epi64(__m256i __a, const > } > #endif > > -/* Vector insert */ > -#define _mm256_insertf128_pd(V1, V2, O) __extension__ ({ \ > - __m256d __V1 = (V1); \ > - __m128d __V2 = (V2); \ > - (__m256d)__builtin_ia32_vinsertf128_pd256((__v4df)__V1, (__v2df)__V2, > (O)); }) > - > -#define _mm256_insertf128_ps(V1, V2, O) __extension__ ({ \ > - __m256 __V1 = (V1); \ > - __m128 __V2 = (V2); \ > - (__m256)__builtin_ia32_vinsertf128_ps256((__v8sf)__V1, (__v4sf)__V2, > (O)); }) > - > -#define _mm256_insertf128_si256(V1, V2, O) __extension__ ({ \ > - __m256i __V1 = (V1); \ > - __m128i __V2 = (V2); \ > - (__m256i)__builtin_ia32_vinsertf128_si256((__v8si)__V1, (__v4si)__V2, > (O)); }) > - > static __inline __m256i __attribute__((__always_inline__, __nodebug__)) > _mm256_insert_epi32(__m256i __a, int __b, int const __imm) > { > @@ -1166,6 +1150,42 @@ _mm256_castsi128_si256(__m128i __a) > return __builtin_shufflevector(__a, __a, 0, 1, -1, -1); > } > > +/* > + Vector insert. > + We use macros rather than inlines because we only want to accept > + invocations where the immediate M is a constant expression. > +*/ > I wonder if we could use the enable_if attribute http://clang.llvm.org/docs/AttributeReference.html#enable-if together with __builtin_constant_p to get the same effect, but with a more readable implementation. -- Sean Silva > +#define _mm256_insertf128_ps(V1, V2, M) __extension__ ({ \ > + (__m256)__builtin_shufflevector( \ > + (__v8sf)(V1), \ > + (__v8sf)_mm256_castps128_ps256((__m128)(V2)), \ > + (((M) & 1) ? 0 : 8), \ > + (((M) & 1) ? 1 : 9), \ > + (((M) & 1) ? 2 : 10), \ > + (((M) & 1) ? 3 : 11), \ > + (((M) & 1) ? 8 : 4), \ > + (((M) & 1) ? 9 : 5), \ > + (((M) & 1) ? 10 : 6), \ > + (((M) & 1) ? 11 : 7) );}) > + > +#define _mm256_insertf128_pd(V1, V2, M) __extension__ ({ \ > + (__m256d)__builtin_shufflevector( \ > + (__v4df)(V1), \ > + (__v4df)_mm256_castpd128_pd256((__m128d)(V2)), \ > + (((M) & 1) ? 0 : 4), \ > + (((M) & 1) ? 1 : 5), \ > + (((M) & 1) ? 4 : 2), \ > + (((M) & 1) ? 5 : 3) );}) > + > +#define _mm256_insertf128_si256(V1, V2, M) __extension__ ({ \ > + (__m256i)__builtin_shufflevector( \ > + (__v4di)(V1), \ > + (__v4di)_mm256_castsi128_si256((__m128i)(V2)), \ > + (((M) & 1) ? 0 : 4), \ > + (((M) & 1) ? 1 : 5), \ > + (((M) & 1) ? 4 : 2), \ > + (((M) & 1) ? 5 : 3) );}) > + > /* SIMD load ops (unaligned) */ > static __inline __m256 __attribute__((__always_inline__, __nodebug__)) > _mm256_loadu2_m128(float const *__addr_hi, float const *__addr_lo) > > Modified: cfe/trunk/lib/Sema/SemaChecking.cpp > URL: > http://llvm.org/viewvc/llvm-project/cfe/trunk/lib/Sema/SemaChecking.cpp?rev=231792&r1=231791&r2=231792&view=diff > > ============================================================================== > --- cfe/trunk/lib/Sema/SemaChecking.cpp (original) > +++ cfe/trunk/lib/Sema/SemaChecking.cpp Tue Mar 10 10:19:26 2015 > @@ -882,9 +882,6 @@ bool Sema::CheckX86BuiltinFunctionCall(u > case X86::BI__builtin_ia32_vextractf128_ps256: > case X86::BI__builtin_ia32_vextractf128_si256: > case X86::BI__builtin_ia32_extract128i256: i = 1, l = 0, u = 1; break; > - case X86::BI__builtin_ia32_vinsertf128_pd256: > - case X86::BI__builtin_ia32_vinsertf128_ps256: > - case X86::BI__builtin_ia32_vinsertf128_si256: > case X86::BI__builtin_ia32_insert128i256: i = 2, l = 0; u = 1; break; > case X86::BI__builtin_ia32_sha1rnds4: i = 2, l = 0; u = 3; break; > case X86::BI__builtin_ia32_vpermil2pd: > > Modified: cfe/trunk/test/CodeGen/avx-shuffle-builtins.c > URL: > http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGen/avx-shuffle-builtins.c?rev=231792&r1=231791&r2=231792&view=diff > > ============================================================================== > --- cfe/trunk/test/CodeGen/avx-shuffle-builtins.c (original) > +++ cfe/trunk/test/CodeGen/avx-shuffle-builtins.c Tue Mar 10 10:19:26 2015 > @@ -97,3 +97,42 @@ test_mm256_broadcast_ss(float const *__a > // CHECK: insertelement <8 x float> {{.*}}, i32 7 > return _mm256_broadcast_ss(__a); > } > + > +// Make sure we have the correct mask for each insertf128 case. > + > +__m256d test_mm256_insertf128_ps_0(__m256 a, __m128 b) { > + // CHECK-LABEL: @test_mm256_insertf128_ps_0 > + // CHECK: shufflevector{{.*}}<i32 8, i32 9, i32 10, i32 11, i32 4, i32 > 5, i32 6, i32 7> > + return _mm256_insertf128_ps(a, b, 0); > +} > + > +__m256d test_mm256_insertf128_pd_0(__m256d a, __m128d b) { > + // CHECK-LABEL: @test_mm256_insertf128_pd_0 > + // CHECK: shufflevector{{.*}}<i32 4, i32 5, i32 2, i32 3> > + return _mm256_insertf128_pd(a, b, 0); > +} > + > +__m256d test_mm256_insertf128_si256_0(__m256i a, __m128i b) { > + // CHECK-LABEL: @test_mm256_insertf128_si256_0 > + // CHECK: shufflevector{{.*}}<i32 4, i32 5, i32 2, i32 3> > + return _mm256_insertf128_si256(a, b, 0); > +} > + > +__m256d test_mm256_insertf128_ps_1(__m256 a, __m128 b) { > + // CHECK-LABEL: @test_mm256_insertf128_ps_1 > + // CHECK: shufflevector{{.*}}<i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, > i32 10, i32 11> > + return _mm256_insertf128_ps(a, b, 1); > +} > + > +__m256d test_mm256_insertf128_pd_1(__m256d a, __m128d b) { > + // CHECK-LABEL: @test_mm256_insertf128_pd_1 > + // CHECK: shufflevector{{.*}}<i32 0, i32 1, i32 4, i32 5> > + return _mm256_insertf128_pd(a, b, 1); > +} > + > +__m256d test_mm256_insertf128_si256_1(__m256i a, __m128i b) { > + // CHECK-LABEL: @test_mm256_insertf128_si256_1 > + // CHECK: shufflevector{{.*}}<i32 0, i32 1, i32 4, i32 5> > + return _mm256_insertf128_si256(a, b, 1); > +} > + > > Modified: cfe/trunk/test/CodeGen/builtins-x86.c > URL: > http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGen/builtins-x86.c?rev=231792&r1=231791&r2=231792&view=diff > > ============================================================================== > --- cfe/trunk/test/CodeGen/builtins-x86.c (original) > +++ cfe/trunk/test/CodeGen/builtins-x86.c Tue Mar 10 10:19:26 2015 > @@ -419,9 +419,6 @@ void f0() { > tmp_V4d = __builtin_ia32_vperm2f128_pd256(tmp_V4d, tmp_V4d, 0x7); > tmp_V8f = __builtin_ia32_vperm2f128_ps256(tmp_V8f, tmp_V8f, 0x7); > tmp_V8i = __builtin_ia32_vperm2f128_si256(tmp_V8i, tmp_V8i, 0x7); > - tmp_V4d = __builtin_ia32_vinsertf128_pd256(tmp_V4d, tmp_V2d, 0x1); > - tmp_V8f = __builtin_ia32_vinsertf128_ps256(tmp_V8f, tmp_V4f, 0x1); > - tmp_V8i = __builtin_ia32_vinsertf128_si256(tmp_V8i, tmp_V4i, 0x1); > tmp_V4d = __builtin_ia32_sqrtpd256(tmp_V4d); > tmp_V8f = __builtin_ia32_sqrtps256(tmp_V8f); > tmp_V8f = __builtin_ia32_rsqrtps256(tmp_V8f); > > > _______________________________________________ > cfe-commits mailing list > [email protected] > http://lists.cs.uiuc.edu/mailman/listinfo/cfe-commits >
_______________________________________________ cfe-commits mailing list [email protected] http://lists.cs.uiuc.edu/mailman/listinfo/cfe-commits
