Updated to ignore upper bits of the immediate input.

http://reviews.llvm.org/D8088

Files:
  include/clang/Basic/BuiltinsX86.def
  lib/Headers/avxintrin.h
  lib/Sema/SemaChecking.cpp
  test/CodeGen/avx-shuffle-builtins.c
  test/CodeGen/builtins-x86.c

EMAIL PREFERENCES
  http://reviews.llvm.org/settings/panel/emailpreferences/
Index: include/clang/Basic/BuiltinsX86.def
===================================================================
--- include/clang/Basic/BuiltinsX86.def
+++ include/clang/Basic/BuiltinsX86.def
@@ -450,9 +450,6 @@
 BUILTIN(__builtin_ia32_vperm2f128_pd256, "V4dV4dV4dIc", "")
 BUILTIN(__builtin_ia32_vperm2f128_ps256, "V8fV8fV8fIc", "")
 BUILTIN(__builtin_ia32_vperm2f128_si256, "V8iV8iV8iIc", "")
-BUILTIN(__builtin_ia32_vinsertf128_pd256, "V4dV4dV2dIc", "")
-BUILTIN(__builtin_ia32_vinsertf128_ps256, "V8fV8fV4fIc", "")
-BUILTIN(__builtin_ia32_vinsertf128_si256, "V8iV8iV4iIc", "")
 BUILTIN(__builtin_ia32_sqrtpd256, "V4dV4d", "")
 BUILTIN(__builtin_ia32_sqrtps256, "V8fV8f", "")
 BUILTIN(__builtin_ia32_rsqrtps256, "V8fV8f", "")
Index: lib/Headers/avxintrin.h
===================================================================
--- lib/Headers/avxintrin.h
+++ lib/Headers/avxintrin.h
@@ -472,22 +472,6 @@
 }
 #endif
 
-/* Vector insert */
-#define _mm256_insertf128_pd(V1, V2, O) __extension__ ({ \
-  __m256d __V1 = (V1); \
-  __m128d __V2 = (V2); \
-  (__m256d)__builtin_ia32_vinsertf128_pd256((__v4df)__V1, (__v2df)__V2, (O)); })
-
-#define _mm256_insertf128_ps(V1, V2, O) __extension__ ({ \
-  __m256 __V1 = (V1); \
-  __m128 __V2 = (V2); \
-  (__m256)__builtin_ia32_vinsertf128_ps256((__v8sf)__V1, (__v4sf)__V2, (O)); })
-
-#define _mm256_insertf128_si256(V1, V2, O) __extension__ ({ \
-  __m256i __V1 = (V1); \
-  __m128i __V2 = (V2); \
-  (__m256i)__builtin_ia32_vinsertf128_si256((__v8si)__V1, (__v4si)__V2, (O)); })
-
 static __inline __m256i __attribute__((__always_inline__, __nodebug__))
 _mm256_insert_epi32(__m256i __a, int __b, int const __imm)
 {
@@ -1166,6 +1150,42 @@
   return __builtin_shufflevector(__a, __a, 0, 1, -1, -1);
 }
 
+/* 
+   Vector insert.
+   We use macros rather than inlines because we only want to accept
+   invocations where the immediate M is a constant expression.
+*/
+#define _mm256_insertf128_ps(V1, V2, M) __extension__ ({ \
+  __m256 __V1 = (V1); \
+  __m256 __V2 = _mm256_castps128_ps256((__m128)(V2)); \
+  (__m256)__builtin_shufflevector((__v8sf)__V1, (__v8sf)__V2, \
+    (((M) & 1) ?  0 :  8), \
+    (((M) & 1) ?  1 :  9), \
+    (((M) & 1) ?  2 : 10), \
+    (((M) & 1) ?  3 : 11), \
+    (((M) & 1) ?  8 :  4), \
+    (((M) & 1) ?  9 :  5), \
+    (((M) & 1) ? 10 :  6), \
+    (((M) & 1) ? 11 :  7) );})
+
+#define _mm256_insertf128_pd(V1, V2, M) __extension__ ({ \
+  __m256d __V1 = (V1); \
+  __m256d __V2 = _mm256_castpd128_pd256((__m128d)(V2)); \
+  (__m256d)__builtin_shufflevector((__v4df)__V1, (__v4df)__V2, \
+    (((M) & 1) ? 0 : 4), \
+    (((M) & 1) ? 1 : 5), \
+    (((M) & 1) ? 4 : 2), \
+    (((M) & 1) ? 5 : 3) );})
+
+#define _mm256_insertf128_si256(V1, V2, M) __extension__ ({ \
+  __m256i __V1 = (V1); \
+  __m256i __V2 = _mm256_castsi128_si256((__m128i)(V2)); \
+  (__m256i)__builtin_shufflevector((__v4di)__V1, (__v4di)__V2, \
+    (((M) & 1) ? 0 : 4), \
+    (((M) & 1) ? 1 : 5), \
+    (((M) & 1) ? 4 : 2), \
+    (((M) & 1) ? 5 : 3) );})
+
 /* SIMD load ops (unaligned) */
 static __inline __m256 __attribute__((__always_inline__, __nodebug__))
 _mm256_loadu2_m128(float const *__addr_hi, float const *__addr_lo)
Index: lib/Sema/SemaChecking.cpp
===================================================================
--- lib/Sema/SemaChecking.cpp
+++ lib/Sema/SemaChecking.cpp
@@ -882,9 +882,6 @@
   case X86::BI__builtin_ia32_vextractf128_ps256:
   case X86::BI__builtin_ia32_vextractf128_si256:
   case X86::BI__builtin_ia32_extract128i256: i = 1, l = 0, u = 1; break;
-  case X86::BI__builtin_ia32_vinsertf128_pd256:
-  case X86::BI__builtin_ia32_vinsertf128_ps256:
-  case X86::BI__builtin_ia32_vinsertf128_si256:
   case X86::BI__builtin_ia32_insert128i256: i = 2, l = 0; u = 1; break;
   case X86::BI__builtin_ia32_sha1rnds4: i = 2, l = 0; u = 3; break;
   case X86::BI__builtin_ia32_vpermil2pd:
Index: test/CodeGen/avx-shuffle-builtins.c
===================================================================
--- test/CodeGen/avx-shuffle-builtins.c
+++ test/CodeGen/avx-shuffle-builtins.c
@@ -97,3 +97,42 @@
   // CHECK: insertelement <8 x float> {{.*}}, i32 7
   return _mm256_broadcast_ss(__a);
 }
+
+// Make sure we have the correct mask for each insertf128 case.
+
+__m256d test_mm256_insertf128_ps_0(__m256 a, __m128 b) {
+  // CHECK-LABEL: @test_mm256_insertf128_ps_0
+  // CHECK: shufflevector{{.*}}<i32 8, i32 9, i32 10, i32 11, i32 4, i32 5, i32 6, i32 7>
+  return _mm256_insertf128_ps(a, b, 0);
+}
+
+__m256d test_mm256_insertf128_pd_0(__m256d a, __m128d b) {
+  // CHECK-LABEL: @test_mm256_insertf128_pd_0
+  // CHECK: shufflevector{{.*}}<i32 4, i32 5, i32 2, i32 3>
+  return _mm256_insertf128_pd(a, b, 0);
+}
+
+__m256d test_mm256_insertf128_si256_0(__m256i a, __m128i b) {
+  // CHECK-LABEL: @test_mm256_insertf128_si256_0
+  // CHECK: shufflevector{{.*}}<i32 4, i32 5, i32 2, i32 3>
+  return _mm256_insertf128_si256(a, b, 0);
+}
+
+__m256d test_mm256_insertf128_ps_1(__m256 a, __m128 b) {
+  // CHECK-LABEL: @test_mm256_insertf128_ps_1
+  // CHECK: shufflevector{{.*}}<i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 10, i32 11>
+  return _mm256_insertf128_ps(a, b, 1);
+}
+
+__m256d test_mm256_insertf128_pd_1(__m256d a, __m128d b) {
+  // CHECK-LABEL: @test_mm256_insertf128_pd_1
+  // CHECK: shufflevector{{.*}}<i32 0, i32 1, i32 4, i32 5>
+  return _mm256_insertf128_pd(a, b, 1);
+}
+
+__m256d test_mm256_insertf128_si256_1(__m256i a, __m128i b) {
+  // CHECK-LABEL: @test_mm256_insertf128_si256_1
+  // CHECK: shufflevector{{.*}}<i32 0, i32 1, i32 4, i32 5>
+  return _mm256_insertf128_si256(a, b, 1);
+}
+
Index: test/CodeGen/builtins-x86.c
===================================================================
--- test/CodeGen/builtins-x86.c
+++ test/CodeGen/builtins-x86.c
@@ -419,9 +419,6 @@
   tmp_V4d = __builtin_ia32_vperm2f128_pd256(tmp_V4d, tmp_V4d, 0x7);
   tmp_V8f = __builtin_ia32_vperm2f128_ps256(tmp_V8f, tmp_V8f, 0x7);
   tmp_V8i = __builtin_ia32_vperm2f128_si256(tmp_V8i, tmp_V8i, 0x7);
-  tmp_V4d = __builtin_ia32_vinsertf128_pd256(tmp_V4d, tmp_V2d, 0x1);
-  tmp_V8f = __builtin_ia32_vinsertf128_ps256(tmp_V8f, tmp_V4f, 0x1);
-  tmp_V8i = __builtin_ia32_vinsertf128_si256(tmp_V8i, tmp_V4i, 0x1);
   tmp_V4d = __builtin_ia32_sqrtpd256(tmp_V4d);
   tmp_V8f = __builtin_ia32_sqrtps256(tmp_V8f);
   tmp_V8f = __builtin_ia32_rsqrtps256(tmp_V8f);
_______________________________________________
cfe-commits mailing list
[email protected]
http://lists.cs.uiuc.edu/mailman/listinfo/cfe-commits

Reply via email to