On Sun, 27 Mar 2016, Allan Sandfeld Jensen wrote:

Would it be possible to add constexpr to the intrinsics headers?

For instance _mm_set_XX and _mm_setzero intrinsics.

Already suggested here:
https://gcc.gnu.org/bugzilla/show_bug.cgi?id=65197

A patch would be welcome (I started doing it at some point, I don't remember if it was functional, the patch is attached).

Ideally it could also be added all intrinsics that can be evaluated at compile
time, but it is harder to tell which those are.

Does gcc have a C extension we can use to set constexpr?

What for?

--
Marc Glisse
Index: gcc/config/i386/avx2intrin.h
===================================================================
--- gcc/config/i386/avx2intrin.h        (revision 223886)
+++ gcc/config/i386/avx2intrin.h        (working copy)
@@ -93,41 +93,45 @@ _mm256_packus_epi32 (__m256i __A, __m256
   return (__m256i)__builtin_ia32_packusdw256 ((__v8si)__A, (__v8si)__B);
 }
 
 extern __inline __m256i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm256_packus_epi16 (__m256i __A, __m256i __B)
 {
   return (__m256i)__builtin_ia32_packuswb256 ((__v16hi)__A, (__v16hi)__B);
 }
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m256i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm256_add_epi8 (__m256i __A, __m256i __B)
 {
   return (__m256i) ((__v32qu)__A + (__v32qu)__B);
 }
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m256i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm256_add_epi16 (__m256i __A, __m256i __B)
 {
   return (__m256i) ((__v16hu)__A + (__v16hu)__B);
 }
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m256i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm256_add_epi32 (__m256i __A, __m256i __B)
 {
   return (__m256i) ((__v8su)__A + (__v8su)__B);
 }
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m256i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm256_add_epi64 (__m256i __A, __m256i __B)
 {
   return (__m256i) ((__v4du)__A + (__v4du)__B);
 }
 
 extern __inline __m256i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm256_adds_epi8 (__m256i __A, __m256i __B)
@@ -167,20 +171,21 @@ _mm256_alignr_epi8 (__m256i __A, __m256i
 }
 #else
 /* In that case (__N*8) will be in vreg, and insn will not be matched. */
 /* Use define instead */
 #define _mm256_alignr_epi8(A, B, N)                               \
   ((__m256i) __builtin_ia32_palignr256 ((__v4di)(__m256i)(A),     \
                                        (__v4di)(__m256i)(B),      \
                                        (int)(N) * 8))
 #endif
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m256i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm256_and_si256 (__m256i __A, __m256i __B)
 {
   return (__m256i) ((__v4du)__A & (__v4du)__B);
 }
 
 extern __inline __m256i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm256_andnot_si256 (__m256i __A, __m256i __B)
@@ -219,69 +224,77 @@ _mm256_blend_epi16 (__m256i __X, __m256i
   return (__m256i) __builtin_ia32_pblendw256 ((__v16hi)__X,
                                              (__v16hi)__Y,
                                               __M);
 }
 #else
 #define _mm256_blend_epi16(X, Y, M)                                    \
   ((__m256i) __builtin_ia32_pblendw256 ((__v16hi)(__m256i)(X),         \
                                        (__v16hi)(__m256i)(Y), (int)(M)))
 #endif
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m256i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm256_cmpeq_epi8 (__m256i __A, __m256i __B)
 {
   return (__m256i) ((__v32qi)__A == (__v32qi)__B);
 }
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m256i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm256_cmpeq_epi16 (__m256i __A, __m256i __B)
 {
   return (__m256i) ((__v16hi)__A == (__v16hi)__B);
 }
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m256i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm256_cmpeq_epi32 (__m256i __A, __m256i __B)
 {
   return (__m256i) ((__v8si)__A == (__v8si)__B);
 }
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m256i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm256_cmpeq_epi64 (__m256i __A, __m256i __B)
 {
   return (__m256i) ((__v4di)__A == (__v4di)__B);
 }
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m256i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm256_cmpgt_epi8 (__m256i __A, __m256i __B)
 {
   return (__m256i) ((__v32qi)__A > (__v32qi)__B);
 }
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m256i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm256_cmpgt_epi16 (__m256i __A, __m256i __B)
 {
   return (__m256i) ((__v16hi)__A > (__v16hi)__B);
 }
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m256i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm256_cmpgt_epi32 (__m256i __A, __m256i __B)
 {
   return (__m256i) ((__v8si)__A > (__v8si)__B);
 }
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m256i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm256_cmpgt_epi64 (__m256i __A, __m256i __B)
 {
   return (__m256i) ((__v4di)__A > (__v4di)__B);
 }
 
 extern __inline __m256i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm256_hadd_epi16 (__m256i __X, __m256i __Y)
@@ -541,41 +554,44 @@ _mm256_mulhi_epu16 (__m256i __A, __m256i
   return (__m256i)__builtin_ia32_pmulhuw256 ((__v16hi)__A, (__v16hi)__B);
 }
 
 extern __inline __m256i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm256_mulhi_epi16 (__m256i __A, __m256i __B)
 {
   return (__m256i)__builtin_ia32_pmulhw256 ((__v16hi)__A, (__v16hi)__B);
 }
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m256i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm256_mullo_epi16 (__m256i __A, __m256i __B)
 {
   return (__m256i) ((__v16hu)__A * (__v16hu)__B);
 }
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m256i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm256_mullo_epi32 (__m256i __A, __m256i __B)
 {
   return (__m256i) ((__v8su)__A * (__v8su)__B);
 }
 
 extern __inline __m256i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm256_mul_epu32 (__m256i __A, __m256i __B)
 {
   return (__m256i)__builtin_ia32_pmuludq256 ((__v8si)__A, (__v8si)__B);
 }
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m256i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm256_or_si256 (__m256i __A, __m256i __B)
 {
   return (__m256i) ((__v4du)__A | (__v4du)__B);
 }
 
 extern __inline __m256i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm256_sad_epu8 (__m256i __A, __m256i __B)
@@ -789,41 +805,45 @@ _mm256_srli_epi64 (__m256i __A, int __B)
   return (__m256i)__builtin_ia32_psrlqi256 ((__v4di)__A, __B);
 }
 
 extern __inline __m256i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm256_srl_epi64 (__m256i __A, __m128i __B)
 {
   return (__m256i)__builtin_ia32_psrlq256((__v4di)__A, (__v2di)__B);
 }
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m256i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm256_sub_epi8 (__m256i __A, __m256i __B)
 {
   return (__m256i) ((__v32qu)__A - (__v32qu)__B);
 }
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m256i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm256_sub_epi16 (__m256i __A, __m256i __B)
 {
   return (__m256i) ((__v16hu)__A - (__v16hu)__B);
 }
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m256i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm256_sub_epi32 (__m256i __A, __m256i __B)
 {
   return (__m256i) ((__v8su)__A - (__v8su)__B);
 }
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m256i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm256_sub_epi64 (__m256i __A, __m256i __B)
 {
   return (__m256i) ((__v4du)__A - (__v4du)__B);
 }
 
 extern __inline __m256i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm256_subs_epi8 (__m256i __A, __m256i __B)
@@ -901,20 +921,21 @@ _mm256_unpacklo_epi32 (__m256i __A, __m2
   return (__m256i)__builtin_ia32_punpckldq256 ((__v8si)__A, (__v8si)__B);
 }
 
 extern __inline __m256i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm256_unpacklo_epi64 (__m256i __A, __m256i __B)
 {
   return (__m256i)__builtin_ia32_punpcklqdq256 ((__v4di)__A, (__v4di)__B);
 }
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m256i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm256_xor_si256 (__m256i __A, __m256i __B)
 {
   return (__m256i) ((__v4du)__A ^ (__v4du)__B);
 }
 
 extern __inline __m256i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm256_stream_load_si256 (__m256i const *__X)
Index: gcc/config/i386/avx512bwintrin.h
===================================================================
--- gcc/config/i386/avx512bwintrin.h    (revision 223886)
+++ gcc/config/i386/avx512bwintrin.h    (working copy)
@@ -33,34 +33,36 @@
 #pragma GCC target("avx512bw")
 #define __DISABLE_AVX512BW__
 #endif /* __AVX512BW__ */
 
 /* Internal data types for implementing the intrinsics.  */
 typedef short __v32hi __attribute__ ((__vector_size__ (64)));
 typedef char __v64qi __attribute__ ((__vector_size__ (64)));
 
 typedef unsigned long long __mmask64;
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m512i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm512_setzero_qi (void)
 {
   return __extension__ (__m512i)(__v64qi){ 0, 0, 0, 0, 0, 0, 0, 0,
                                           0, 0, 0, 0, 0, 0, 0, 0,
                                           0, 0, 0, 0, 0, 0, 0, 0,
                                           0, 0, 0, 0, 0, 0, 0, 0,
                                           0, 0, 0, 0, 0, 0, 0, 0,
                                           0, 0, 0, 0, 0, 0, 0, 0,
                                           0, 0, 0, 0, 0, 0, 0, 0,
                                           0, 0, 0, 0, 0, 0, 0, 0 };
 }
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m512i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm512_setzero_hi (void)
 {
   return __extension__ (__m512i)(__v32hi){ 0, 0, 0, 0, 0, 0, 0, 0,
                                           0, 0, 0, 0, 0, 0, 0, 0,
                                           0, 0, 0, 0, 0, 0, 0, 0,
                                           0, 0, 0, 0, 0, 0, 0, 0 };
 }
 
@@ -657,20 +659,21 @@ extern __inline __m512i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm512_maskz_avg_epu8 (__mmask64 __U, __m512i __A, __m512i __B)
 {
   return (__m512i) __builtin_ia32_pavgb512_mask ((__v64qi) __A,
                                                 (__v64qi) __B,
                                                 (__v64qi)
                                                 _mm512_setzero_qi(),
                                                 (__mmask64) __U);
 }
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m512i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm512_add_epi8 (__m512i __A, __m512i __B)
 {
   return (__m512i) ((__v64qu) __A + (__v64qu) __B);
 }
 
 extern __inline __m512i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm512_mask_add_epi8 (__m512i __W, __mmask64 __U, __m512i __A,
@@ -686,20 +689,21 @@ extern __inline __m512i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm512_maskz_add_epi8 (__mmask64 __U, __m512i __A, __m512i __B)
 {
   return (__m512i) __builtin_ia32_paddb512_mask ((__v64qi) __A,
                                                 (__v64qi) __B,
                                                 (__v64qi)
                                                 _mm512_setzero_qi (),
                                                 (__mmask64) __U);
 }
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m512i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm512_sub_epi8 (__m512i __A, __m512i __B)
 {
   return (__m512i) ((__v64qu) __A - (__v64qu) __B);
 }
 
 extern __inline __m512i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm512_mask_sub_epi8 (__m512i __W, __mmask64 __U, __m512i __A,
@@ -880,20 +884,21 @@ extern __inline __m512i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm512_maskz_adds_epu8 (__mmask64 __U, __m512i __A, __m512i __B)
 {
   return (__m512i) __builtin_ia32_paddusb512_mask ((__v64qi) __A,
                                                   (__v64qi) __B,
                                                   (__v64qi)
                                                   _mm512_setzero_qi (),
                                                   (__mmask64) __U);
 }
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m512i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm512_sub_epi16 (__m512i __A, __m512i __B)
 {
   return (__m512i) ((__v32hu) __A - (__v32hu) __B);
 }
 
 extern __inline __m512i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm512_mask_sub_epi16 (__m512i __W, __mmask32 __U, __m512i __A,
@@ -975,20 +980,21 @@ extern __inline __m512i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm512_maskz_subs_epu16 (__mmask32 __U, __m512i __A, __m512i __B)
 {
   return (__m512i) __builtin_ia32_psubusw512_mask ((__v32hi) __A,
                                                   (__v32hi) __B,
                                                   (__v32hi)
                                                   _mm512_setzero_hi (),
                                                   (__mmask32) __U);
 }
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m512i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm512_add_epi16 (__m512i __A, __m512i __B)
 {
   return (__m512i) ((__v32hu) __A + (__v32hu) __B);
 }
 
 extern __inline __m512i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm512_mask_add_epi16 (__m512i __W, __mmask32 __U, __m512i __A,
Index: gcc/config/i386/avx512fintrin.h
===================================================================
--- gcc/config/i386/avx512fintrin.h     (revision 223886)
+++ gcc/config/i386/avx512fintrin.h     (working copy)
@@ -48,52 +48,56 @@ typedef unsigned char __v64qu __attribut
 
 /* The Intel API is flexible enough that we must allow aliasing with other
    vector types, and their scalar components.  */
 typedef float __m512 __attribute__ ((__vector_size__ (64), __may_alias__));
 typedef long long __m512i __attribute__ ((__vector_size__ (64), 
__may_alias__));
 typedef double __m512d __attribute__ ((__vector_size__ (64), __may_alias__));
 
 typedef unsigned char  __mmask8;
 typedef unsigned short __mmask16;
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m512i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm512_set_epi64 (long long __A, long long __B, long long __C,
                  long long __D, long long __E, long long __F,
                  long long __G, long long __H)
 {
   return __extension__ (__m512i) (__v8di)
         { __H, __G, __F, __E, __D, __C, __B, __A };
 }
 
 /* Create the vector [A B C D E F G H I J K L M N O P].  */
+__GCC_X86_CONSTEXPR11
 extern __inline __m512i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm512_set_epi32 (int __A, int __B, int __C, int __D,
                  int __E, int __F, int __G, int __H,
                  int __I, int __J, int __K, int __L,
                  int __M, int __N, int __O, int __P)
 {
   return __extension__ (__m512i)(__v16si)
         { __P, __O, __N, __M, __L, __K, __J, __I,
           __H, __G, __F, __E, __D, __C, __B, __A };
 }
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m512d
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm512_set_pd (double __A, double __B, double __C, double __D,
               double __E, double __F, double __G, double __H)
 {
   return __extension__ (__m512d)
         { __H, __G, __F, __E, __D, __C, __B, __A };
 }
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m512
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm512_set_ps (float __A, float __B, float __C, float __D,
               float __E, float __F, float __G, float __H,
               float __I, float __J, float __K, float __L,
               float __M, float __N, float __O, float __P)
 {
   return __extension__ (__m512)
         { __P, __O, __N, __M, __L, __K, __J, __I,
           __H, __G, __F, __E, __D, __C, __B, __A };
@@ -129,35 +133,37 @@ _mm512_undefined_pd (void)
 }
 
 extern __inline __m512i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm512_undefined_si512 (void)
 {
   __m512i __Y = __Y;
   return __Y;
 }
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m512i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm512_set1_epi8 (char __A)
 {
   return __extension__ (__m512i)(__v64qi)
         { __A, __A, __A, __A, __A, __A, __A, __A,
           __A, __A, __A, __A, __A, __A, __A, __A,
           __A, __A, __A, __A, __A, __A, __A, __A,
           __A, __A, __A, __A, __A, __A, __A, __A,
           __A, __A, __A, __A, __A, __A, __A, __A,
           __A, __A, __A, __A, __A, __A, __A, __A,
           __A, __A, __A, __A, __A, __A, __A, __A,
           __A, __A, __A, __A, __A, __A, __A, __A };
 }
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m512i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm512_set1_epi16 (short __A)
 {
   return __extension__ (__m512i)(__v32hi)
         { __A, __A, __A, __A, __A, __A, __A, __A,
           __A, __A, __A, __A, __A, __A, __A, __A,
           __A, __A, __A, __A, __A, __A, __A, __A,
           __A, __A, __A, __A, __A, __A, __A, __A };
 }
@@ -178,46 +184,50 @@ __attribute__ ((__gnu_inline__, __always
 _mm512_set1_ps (float __A)
 {
   return (__m512) __builtin_ia32_broadcastss512 (__extension__
                                                 (__v4sf) { __A, },
                                                 (__v16sf)
                                                 _mm512_undefined_ps (),
                                                 (__mmask16) -1);
 }
 
 /* Create the vector [A B C D A B C D A B C D A B C D].  */
+__GCC_X86_CONSTEXPR11
 extern __inline __m512i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm512_set4_epi32 (int __A, int __B, int __C, int __D)
 {
   return __extension__ (__m512i)(__v16si)
         { __D, __C, __B, __A, __D, __C, __B, __A,
           __D, __C, __B, __A, __D, __C, __B, __A };
 }
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m512i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm512_set4_epi64 (long long __A, long long __B, long long __C,
                   long long __D)
 {
   return __extension__ (__m512i) (__v8di)
         { __D, __C, __B, __A, __D, __C, __B, __A };
 }
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m512d
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm512_set4_pd (double __A, double __B, double __C, double __D)
 {
   return __extension__ (__m512d)
         { __D, __C, __B, __A, __D, __C, __B, __A };
 }
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m512
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm512_set4_ps (float __A, float __B, float __C, float __D)
 {
   return __extension__ (__m512)
         { __D, __C, __B, __A, __D, __C, __B, __A,
           __D, __C, __B, __A, __D, __C, __B, __A };
 }
 
 #define _mm512_setr4_epi64(e0,e1,e2,e3)                                        
      \
@@ -225,42 +235,46 @@ _mm512_set4_ps (float __A, float __B, fl
 
 #define _mm512_setr4_epi32(e0,e1,e2,e3)                                        
      \
   _mm512_set4_epi32(e3,e2,e1,e0)
 
 #define _mm512_setr4_pd(e0,e1,e2,e3)                                         \
   _mm512_set4_pd(e3,e2,e1,e0)
 
 #define _mm512_setr4_ps(e0,e1,e2,e3)                                         \
   _mm512_set4_ps(e3,e2,e1,e0)
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m512
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm512_setzero_ps (void)
 {
   return __extension__ (__m512){ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
                                 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 };
 }
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m512d
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm512_setzero_pd (void)
 {
   return __extension__ (__m512d) { 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 };
 }
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m512i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm512_setzero_epi32 (void)
 {
   return __extension__ (__m512i)(__v8di){ 0, 0, 0, 0, 0, 0, 0, 0 };
 }
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m512i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm512_setzero_si512 (void)
 {
   return __extension__ (__m512i)(__v8di){ 0, 0, 0, 0, 0, 0, 0, 0 };
 }
 
 extern __inline __m512d
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm512_mask_mov_pd (__m512d __W, __mmask8 __U, __m512d __A)
@@ -508,20 +522,21 @@ _mm512_store_epi32 (void *__P, __m512i _
 }
 
 extern __inline void
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm512_mask_store_epi32 (void *__P, __mmask16 __U, __m512i __A)
 {
   __builtin_ia32_movdqa32store512_mask ((__v16si *) __P, (__v16si) __A,
                                        (__mmask16) __U);
 }
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m512i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm512_mullo_epi32 (__m512i __A, __m512i __B)
 {
   return (__m512i) ((__v16su) __A * (__v16su) __B);
 }
 
 extern __inline __m512i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm512_maskz_mullo_epi32 (__mmask16 __M, __m512i __A, __m512i __B)
@@ -631,20 +646,21 @@ extern __inline __m512i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm512_maskz_srlv_epi32 (__mmask16 __U, __m512i __X, __m512i __Y)
 {
   return (__m512i) __builtin_ia32_psrlv16si_mask ((__v16si) __X,
                                                  (__v16si) __Y,
                                                  (__v16si)
                                                  _mm512_setzero_si512 (),
                                                  (__mmask16) __U);
 }
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m512i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm512_add_epi64 (__m512i __A, __m512i __B)
 {
   return (__m512i) ((__v8du) __A + (__v8du) __B);
 }
 
 extern __inline __m512i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm512_mask_add_epi64 (__m512i __W, __mmask8 __U, __m512i __A, __m512i __B)
@@ -659,20 +675,21 @@ extern __inline __m512i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm512_maskz_add_epi64 (__mmask8 __U, __m512i __A, __m512i __B)
 {
   return (__m512i) __builtin_ia32_paddq512_mask ((__v8di) __A,
                                                 (__v8di) __B,
                                                 (__v8di)
                                                 _mm512_setzero_si512 (),
                                                 (__mmask8) __U);
 }
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m512i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm512_sub_epi64 (__m512i __A, __m512i __B)
 {
   return (__m512i) ((__v8du) __A - (__v8du) __B);
 }
 
 extern __inline __m512i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm512_mask_sub_epi64 (__m512i __W, __mmask8 __U, __m512i __A, __m512i __B)
@@ -783,20 +800,21 @@ extern __inline __m512i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm512_maskz_srlv_epi64 (__mmask8 __U, __m512i __X, __m512i __Y)
 {
   return (__m512i) __builtin_ia32_psrlv8di_mask ((__v8di) __X,
                                                 (__v8di) __Y,
                                                 (__v8di)
                                                 _mm512_setzero_si512 (),
                                                 (__mmask8) __U);
 }
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m512i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm512_add_epi32 (__m512i __A, __m512i __B)
 {
   return (__m512i) ((__v16su) __A + (__v16su) __B);
 }
 
 extern __inline __m512i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm512_mask_add_epi32 (__m512i __W, __mmask16 __U, __m512i __A, __m512i __B)
@@ -842,20 +860,21 @@ extern __inline __m512i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm512_maskz_mul_epi32 (__mmask8 __M, __m512i __X, __m512i __Y)
 {
   return (__m512i) __builtin_ia32_pmuldq512_mask ((__v16si) __X,
                                                  (__v16si) __Y,
                                                  (__v8di)
                                                  _mm512_setzero_si512 (),
                                                  __M);
 }
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m512i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm512_sub_epi32 (__m512i __A, __m512i __B)
 {
   return (__m512i) ((__v16su) __A - (__v16su) __B);
 }
 
 extern __inline __m512i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm512_mask_sub_epi32 (__m512i __W, __mmask16 __U, __m512i __A, __m512i __B)
Index: gcc/config/i386/avxintrin.h
===================================================================
--- gcc/config/i386/avxintrin.h (revision 223886)
+++ gcc/config/i386/avxintrin.h (working copy)
@@ -118,26 +118,28 @@ typedef double __m256d __attribute__ ((_
 #define _CMP_FALSE_OS  0x1b
 /* Not-equal (ordered, signaling)  */
 #define _CMP_NEQ_OS    0x1c
 /* Greater-than-or-equal (ordered, non-signaling)  */
 #define _CMP_GE_OQ     0x1d
 /* Greater-than (ordered, non-signaling)  */
 #define _CMP_GT_OQ     0x1e
 /* True (unordered, signaling)  */
 #define _CMP_TRUE_US   0x1f
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm256_add_pd (__m256d __A, __m256d __B)
 {
   return (__m256d) ((__v4df)__A + (__v4df)__B);
 }
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm256_add_ps (__m256 __A, __m256 __B)
 {
   return (__m256) ((__v8sf)__A + (__v8sf)__B);
 }
 
 extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm256_addsub_pd (__m256d __A, __m256d __B)
 {
   return (__m256d) __builtin_ia32_addsubpd256 ((__v4df)__A, (__v4df)__B);
@@ -212,26 +214,28 @@ _mm256_blendv_pd (__m256d __X, __m256d _
 }
 
 extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm256_blendv_ps (__m256 __X, __m256 __Y, __m256 __M)
 {
   return (__m256) __builtin_ia32_blendvps256 ((__v8sf)__X,
                                              (__v8sf)__Y,
                                              (__v8sf)__M);
 }
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm256_div_pd (__m256d __A, __m256d __B)
 {
   return (__m256d) ((__v4df)__A / (__v4df)__B);
 }
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm256_div_ps (__m256 __A, __m256 __B)
 {
   return (__m256) ((__v8sf)__A / (__v8sf)__B);
 }
 
 /* Dot product instructions with mask-defined summing and zeroing parts
    of result.  */
 
 #ifdef __OPTIMIZE__
@@ -289,26 +293,28 @@ _mm256_min_pd (__m256d __A, __m256d __B)
 {
   return (__m256d) __builtin_ia32_minpd256 ((__v4df)__A, (__v4df)__B);
 }
 
 extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm256_min_ps (__m256 __A, __m256 __B)
 {
   return (__m256) __builtin_ia32_minps256 ((__v8sf)__A, (__v8sf)__B);
 }
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm256_mul_pd (__m256d __A, __m256d __B)
 {
   return (__m256d) ((__v4df)__A * (__v4df)__B);
 }
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm256_mul_ps (__m256 __A, __m256 __B)
 {
   return (__m256) ((__v8sf)__A * (__v8sf)__B);
 }
 
 extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm256_or_pd (__m256d __A, __m256d __B)
 {
   return (__m256d) __builtin_ia32_orpd256 ((__v4df)__A, (__v4df)__B);
@@ -337,26 +343,28 @@ _mm256_shuffle_ps (__m256 __A, __m256 __
 #else
 #define _mm256_shuffle_pd(A, B, N)                                     \
   ((__m256d)__builtin_ia32_shufpd256 ((__v4df)(__m256d)(A),            \
                                      (__v4df)(__m256d)(B), (int)(N)))
 
 #define _mm256_shuffle_ps(A, B, N)                                     \
   ((__m256) __builtin_ia32_shufps256 ((__v8sf)(__m256)(A),             \
                                      (__v8sf)(__m256)(B), (int)(N)))
 #endif
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm256_sub_pd (__m256d __A, __m256d __B)
 {
   return (__m256d) ((__v4df)__A - (__v4df)__B);
 }
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm256_sub_ps (__m256 __A, __m256 __B)
 {
   return (__m256) ((__v8sf)__A - (__v8sf)__B);
 }
 
 extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm256_xor_pd (__m256d __A, __m256d __B)
 {
   return (__m256d) __builtin_ia32_xorpd256 ((__v4df)__A, (__v4df)__B);
@@ -1185,201 +1193,222 @@ _mm256_undefined_ps (void)
   return __Y;
 }
 
 extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm256_undefined_si256 (void)
 {
   __m256i __Y = __Y;
   return __Y;
 }
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm256_setzero_pd (void)
 {
   return __extension__ (__m256d){ 0.0, 0.0, 0.0, 0.0 };
 }
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm256_setzero_ps (void)
 {
   return __extension__ (__m256){ 0.0, 0.0, 0.0, 0.0,
                                 0.0, 0.0, 0.0, 0.0 };
 }
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm256_setzero_si256 (void)
 {
   return __extension__ (__m256i)(__v4di){ 0, 0, 0, 0 };
 }
 
 /* Create the vector [A B C D].  */
+__GCC_X86_CONSTEXPR11
 extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm256_set_pd (double __A, double __B, double __C, double __D)
 {
   return __extension__ (__m256d){ __D, __C, __B, __A };
 }
 
 /* Create the vector [A B C D E F G H].  */
+__GCC_X86_CONSTEXPR11
 extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm256_set_ps (float __A, float __B, float __C, float __D,
               float __E, float __F, float __G, float __H)
 {
   return __extension__ (__m256){ __H, __G, __F, __E,
                                 __D, __C, __B, __A };
 }
 
 /* Create the vector [A B C D E F G H].  */
+__GCC_X86_CONSTEXPR11
 extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm256_set_epi32 (int __A, int __B, int __C, int __D,
                  int __E, int __F, int __G, int __H)
 {
   return __extension__ (__m256i)(__v8si){ __H, __G, __F, __E,
                                          __D, __C, __B, __A };
 }
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm256_set_epi16 (short __q15, short __q14, short __q13, short __q12,
                  short __q11, short __q10, short __q09, short __q08,
                  short __q07, short __q06, short __q05, short __q04,
                  short __q03, short __q02, short __q01, short __q00)
 {
   return __extension__ (__m256i)(__v16hi){
     __q00, __q01, __q02, __q03, __q04, __q05, __q06, __q07,
     __q08, __q09, __q10, __q11, __q12, __q13, __q14, __q15
   };
 }
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm256_set_epi8  (char __q31, char __q30, char __q29, char __q28,
                  char __q27, char __q26, char __q25, char __q24,
                  char __q23, char __q22, char __q21, char __q20,
                  char __q19, char __q18, char __q17, char __q16,
                  char __q15, char __q14, char __q13, char __q12,
                  char __q11, char __q10, char __q09, char __q08,
                  char __q07, char __q06, char __q05, char __q04,
                  char __q03, char __q02, char __q01, char __q00)
 {
   return __extension__ (__m256i)(__v32qi){
     __q00, __q01, __q02, __q03, __q04, __q05, __q06, __q07,
     __q08, __q09, __q10, __q11, __q12, __q13, __q14, __q15,
     __q16, __q17, __q18, __q19, __q20, __q21, __q22, __q23,
     __q24, __q25, __q26, __q27, __q28, __q29, __q30, __q31
   };
 }
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm256_set_epi64x (long long __A, long long __B, long long __C,
                   long long __D)
 {
   return __extension__ (__m256i)(__v4di){ __D, __C, __B, __A };
 }
 
 /* Create a vector with all elements equal to A.  */
+__GCC_X86_CONSTEXPR11
 extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm256_set1_pd (double __A)
 {
   return __extension__ (__m256d){ __A, __A, __A, __A };
 }
 
 /* Create a vector with all elements equal to A.  */
+__GCC_X86_CONSTEXPR11
 extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm256_set1_ps (float __A)
 {
   return __extension__ (__m256){ __A, __A, __A, __A,
                                 __A, __A, __A, __A };
 }
 
 /* Create a vector with all elements equal to A.  */
+__GCC_X86_CONSTEXPR11
 extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm256_set1_epi32 (int __A)
 {
   return __extension__ (__m256i)(__v8si){ __A, __A, __A, __A,
                                          __A, __A, __A, __A };
 }
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm256_set1_epi16 (short __A)
 {
   return _mm256_set_epi16 (__A, __A, __A, __A, __A, __A, __A, __A,
                           __A, __A, __A, __A, __A, __A, __A, __A);
 }
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm256_set1_epi8 (char __A)
 {
   return _mm256_set_epi8 (__A, __A, __A, __A, __A, __A, __A, __A,
                          __A, __A, __A, __A, __A, __A, __A, __A,
                          __A, __A, __A, __A, __A, __A, __A, __A,
                          __A, __A, __A, __A, __A, __A, __A, __A);
 }
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm256_set1_epi64x (long long __A)
 {
   return __extension__ (__m256i)(__v4di){ __A, __A, __A, __A };
 }
 
 /* Create vectors of elements in the reversed order from the
    _mm256_set_XXX functions.  */
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm256_setr_pd (double __A, double __B, double __C, double __D)
 {
   return _mm256_set_pd (__D, __C, __B, __A);
 }
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm256_setr_ps (float __A, float __B, float __C, float __D,
                float __E, float __F, float __G, float __H)
 {
   return _mm256_set_ps (__H, __G, __F, __E, __D, __C, __B, __A);
 }
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm256_setr_epi32 (int __A, int __B, int __C, int __D,
                   int __E, int __F, int __G, int __H)
 {
   return _mm256_set_epi32 (__H, __G, __F, __E, __D, __C, __B, __A);
 }
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm256_setr_epi16 (short __q15, short __q14, short __q13, short __q12,
                   short __q11, short __q10, short __q09, short __q08,
                   short __q07, short __q06, short __q05, short __q04,
                   short __q03, short __q02, short __q01, short __q00)
 {
   return _mm256_set_epi16 (__q00, __q01, __q02, __q03,
                           __q04, __q05, __q06, __q07,
                           __q08, __q09, __q10, __q11,
                           __q12, __q13, __q14, __q15);
 }
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm256_setr_epi8  (char __q31, char __q30, char __q29, char __q28,
                   char __q27, char __q26, char __q25, char __q24,
                   char __q23, char __q22, char __q21, char __q20,
                   char __q19, char __q18, char __q17, char __q16,
                   char __q15, char __q14, char __q13, char __q12,
                   char __q11, char __q10, char __q09, char __q08,
                   char __q07, char __q06, char __q05, char __q04,
                   char __q03, char __q02, char __q01, char __q00)
 {
   return _mm256_set_epi8 (__q00, __q01, __q02, __q03,
                          __q04, __q05, __q06, __q07,
                          __q08, __q09, __q10, __q11,
                          __q12, __q13, __q14, __q15,
                          __q16, __q17, __q18, __q19,
                          __q20, __q21, __q22, __q23,
                          __q24, __q25, __q26, __q27,
                          __q28, __q29, __q30, __q31);
 }
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm256_setr_epi64x (long long __A, long long __B, long long __C,
                    long long __D)
 {
   return _mm256_set_epi64x (__D, __C, __B, __A);
 }
 
 /* Casts between various SP, DP, INT vector types.  Note that these do no
    conversion of values, they just change the type.  */
 extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
Index: gcc/config/i386/emmintrin.h
===================================================================
--- gcc/config/i386/emmintrin.h (revision 223886)
+++ gcc/config/i386/emmintrin.h (working copy)
@@ -50,103 +50,113 @@ typedef unsigned char __v16qu __attribut
 /* The Intel API is flexible enough that we must allow aliasing with other
    vector types, and their scalar components.  */
 typedef long long __m128i __attribute__ ((__vector_size__ (16), 
__may_alias__));
 typedef double __m128d __attribute__ ((__vector_size__ (16), __may_alias__));
 
 /* Create a selector for use with the SHUFPD instruction.  */
 #define _MM_SHUFFLE2(fp1,fp0) \
  (((fp1) << 1) | (fp0))
 
 /* Create a vector with element 0 as F and the rest zero.  */
+__GCC_X86_CONSTEXPR11
 extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm_set_sd (double __F)
 {
   return __extension__ (__m128d){ __F, 0.0 };
 }
 
 /* Create a vector with both elements equal to F.  */
+__GCC_X86_CONSTEXPR11
 extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm_set1_pd (double __F)
 {
   return __extension__ (__m128d){ __F, __F };
 }
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm_set_pd1 (double __F)
 {
   return _mm_set1_pd (__F);
 }
 
 /* Create a vector with the lower value X and upper value W.  */
+__GCC_X86_CONSTEXPR11
 extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm_set_pd (double __W, double __X)
 {
   return __extension__ (__m128d){ __X, __W };
 }
 
 /* Create a vector with the lower value W and upper value X.  */
+__GCC_X86_CONSTEXPR11
 extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm_setr_pd (double __W, double __X)
 {
   return __extension__ (__m128d){ __W, __X };
 }
 
 /* Create an undefined vector.  */
 extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm_undefined_pd (void)
 {
   __m128d __Y = __Y;
   return __Y;
 }
 
 /* Create a vector of zeros.  */
+__GCC_X86_CONSTEXPR11
 extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm_setzero_pd (void)
 {
   return __extension__ (__m128d){ 0.0, 0.0 };
 }
 
 /* Sets the low DPFP value of A from the low value of B.  */
 extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm_move_sd (__m128d __A, __m128d __B)
 {
   return (__m128d) __builtin_ia32_movsd ((__v2df)__A, (__v2df)__B);
 }
 
 /* Load two DPFP values from P.  The address must be 16-byte aligned.  */
+__GCC_X86_CONSTEXPR11
 extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm_load_pd (double const *__P)
 {
   return *(__m128d *)__P;
 }
 
 /* Load two DPFP values from P.  The address need not be 16-byte aligned.  */
 extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm_loadu_pd (double const *__P)
 {
   return __builtin_ia32_loadupd (__P);
 }
 
 /* Create a vector with all two elements equal to *P.  */
+__GCC_X86_CONSTEXPR11
 extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm_load1_pd (double const *__P)
 {
   return _mm_set1_pd (*__P);
 }
 
 /* Create a vector with element 0 as *P and the rest zero.  */
+__GCC_X86_CONSTEXPR11
 extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm_load_sd (double const *__P)
 {
   return _mm_set_sd (*__P);
 }
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm_load_pd1 (double const *__P)
 {
   return _mm_load1_pd (__P);
 }
 
 /* Load two DPFP values in reverse order.  The address must be aligned.  */
 extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm_loadr_pd (double const *__P)
 {
@@ -230,56 +240,60 @@ _mm_cvtsi128_si64 (__m128i __A)
 }
 
 /* Microsoft intrinsic.  */
 extern __inline long long __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm_cvtsi128_si64x (__m128i __A)
 {
   return ((__v2di)__A)[0];
 }
 #endif
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm_add_pd (__m128d __A, __m128d __B)
 {
   return (__m128d) ((__v2df)__A + (__v2df)__B);
 }
 
 extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm_add_sd (__m128d __A, __m128d __B)
 {
   return (__m128d)__builtin_ia32_addsd ((__v2df)__A, (__v2df)__B);
 }
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm_sub_pd (__m128d __A, __m128d __B)
 {
   return (__m128d) ((__v2df)__A - (__v2df)__B);
 }
 
 extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm_sub_sd (__m128d __A, __m128d __B)
 {
   return (__m128d)__builtin_ia32_subsd ((__v2df)__A, (__v2df)__B);
 }
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm_mul_pd (__m128d __A, __m128d __B)
 {
   return (__m128d) ((__v2df)__A * (__v2df)__B);
 }
 
 extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm_mul_sd (__m128d __A, __m128d __B)
 {
   return (__m128d)__builtin_ia32_mulsd ((__v2df)__A, (__v2df)__B);
 }
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm_div_pd (__m128d __A, __m128d __B)
 {
   return (__m128d) ((__v2df)__A / (__v2df)__B);
 }
 
 extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm_div_sd (__m128d __A, __m128d __B)
 {
   return (__m128d)__builtin_ia32_divsd ((__v2df)__A, (__v2df)__B);
@@ -574,113 +588,127 @@ _mm_ucomige_sd (__m128d __A, __m128d __B
 }
 
 extern __inline int __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm_ucomineq_sd (__m128d __A, __m128d __B)
 {
   return __builtin_ia32_ucomisdneq ((__v2df)__A, (__v2df)__B);
 }
 
 /* Create a vector of Qi, where i is the element number.  */
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm_set_epi64x (long long __q1, long long __q0)
 {
   return __extension__ (__m128i)(__v2di){ __q0, __q1 };
 }
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm_set_epi64 (__m64 __q1,  __m64 __q0)
 {
   return _mm_set_epi64x ((long long)__q1, (long long)__q0);
 }
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm_set_epi32 (int __q3, int __q2, int __q1, int __q0)
 {
   return __extension__ (__m128i)(__v4si){ __q0, __q1, __q2, __q3 };
 }
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm_set_epi16 (short __q7, short __q6, short __q5, short __q4,
               short __q3, short __q2, short __q1, short __q0)
 {
   return __extension__ (__m128i)(__v8hi){
     __q0, __q1, __q2, __q3, __q4, __q5, __q6, __q7 };
 }
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm_set_epi8 (char __q15, char __q14, char __q13, char __q12,
              char __q11, char __q10, char __q09, char __q08,
              char __q07, char __q06, char __q05, char __q04,
              char __q03, char __q02, char __q01, char __q00)
 {
   return __extension__ (__m128i)(__v16qi){
     __q00, __q01, __q02, __q03, __q04, __q05, __q06, __q07,
     __q08, __q09, __q10, __q11, __q12, __q13, __q14, __q15
   };
 }
 
 /* Set all of the elements of the vector to A.  */
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm_set1_epi64x (long long __A)
 {
   return _mm_set_epi64x (__A, __A);
 }
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm_set1_epi64 (__m64 __A)
 {
   return _mm_set_epi64 (__A, __A);
 }
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm_set1_epi32 (int __A)
 {
   return _mm_set_epi32 (__A, __A, __A, __A);
 }
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm_set1_epi16 (short __A)
 {
   return _mm_set_epi16 (__A, __A, __A, __A, __A, __A, __A, __A);
 }
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm_set1_epi8 (char __A)
 {
   return _mm_set_epi8 (__A, __A, __A, __A, __A, __A, __A, __A,
                       __A, __A, __A, __A, __A, __A, __A, __A);
 }
 
 /* Create a vector of Qi, where i is the element number.
    The parameter order is reversed from the _mm_set_epi* functions.  */
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm_setr_epi64 (__m64 __q0, __m64 __q1)
 {
   return _mm_set_epi64 (__q1, __q0);
 }
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm_setr_epi32 (int __q0, int __q1, int __q2, int __q3)
 {
   return _mm_set_epi32 (__q3, __q2, __q1, __q0);
 }
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm_setr_epi16 (short __q0, short __q1, short __q2, short __q3,
                short __q4, short __q5, short __q6, short __q7)
 {
   return _mm_set_epi16 (__q7, __q6, __q5, __q4, __q3, __q2, __q1, __q0);
 }
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm_setr_epi8 (char __q00, char __q01, char __q02, char __q03,
               char __q04, char __q05, char __q06, char __q07,
               char __q08, char __q09, char __q10, char __q11,
               char __q12, char __q13, char __q14, char __q15)
 {
   return _mm_set_epi8 (__q15, __q14, __q13, __q12, __q11, __q10, __q09, __q08,
                       __q07, __q06, __q05, __q04, __q03, __q02, __q01, __q00);
 }
 
@@ -721,20 +749,21 @@ _mm_storel_epi64 (__m128i *__P, __m128i
 {
   *(long long *)__P = ((__v2di)__B)[0];
 }
 
 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm_movepi64_pi64 (__m128i __B)
 {
   return (__m64) ((__v2di)__B)[0];
 }
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm_movpi64_epi64 (__m64 __A)
 {
   return _mm_set_epi64 ((__m64)0LL, __A);
 }
 
 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm_move_epi64 (__m128i __A)
 {
   return (__m128i)__builtin_ia32_movq128 ((__v2di) __A);
@@ -742,20 +771,21 @@ _mm_move_epi64 (__m128i __A)
 
 /* Create an undefined vector.  */
 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm_undefined_si128 (void)
 {
   __m128i __Y = __Y;
   return __Y;
 }
 
 /* Create a vector of zeros.  */
+__GCC_X86_CONSTEXPR11
 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm_setzero_si128 (void)
 {
   return __extension__ (__m128i)(__v4si){ 0, 0, 0, 0 };
 }
 
 extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm_cvtepi32_pd (__m128i __A)
 {
   return (__m128d)__builtin_ia32_cvtdq2pd ((__v4si) __A);
@@ -1000,38 +1030,42 @@ _mm_unpacklo_epi32 (__m128i __A, __m128i
 {
   return (__m128i)__builtin_ia32_punpckldq128 ((__v4si)__A, (__v4si)__B);
 }
 
 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm_unpacklo_epi64 (__m128i __A, __m128i __B)
 {
   return (__m128i)__builtin_ia32_punpcklqdq128 ((__v2di)__A, (__v2di)__B);
 }
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm_add_epi8 (__m128i __A, __m128i __B)
 {
   return (__m128i) ((__v16qu)__A + (__v16qu)__B);
 }
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm_add_epi16 (__m128i __A, __m128i __B)
 {
   return (__m128i) ((__v8hu)__A + (__v8hu)__B);
 }
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm_add_epi32 (__m128i __A, __m128i __B)
 {
   return (__m128i) ((__v4su)__A + (__v4su)__B);
 }
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm_add_epi64 (__m128i __A, __m128i __B)
 {
   return (__m128i) ((__v2du)__A + (__v2du)__B);
 }
 
 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm_adds_epi8 (__m128i __A, __m128i __B)
 {
   return (__m128i)__builtin_ia32_paddsb128 ((__v16qi)__A, (__v16qi)__B);
@@ -1048,38 +1082,42 @@ _mm_adds_epu8 (__m128i __A, __m128i __B)
 {
   return (__m128i)__builtin_ia32_paddusb128 ((__v16qi)__A, (__v16qi)__B);
 }
 
 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm_adds_epu16 (__m128i __A, __m128i __B)
 {
   return (__m128i)__builtin_ia32_paddusw128 ((__v8hi)__A, (__v8hi)__B);
 }
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm_sub_epi8 (__m128i __A, __m128i __B)
 {
   return (__m128i) ((__v16qu)__A - (__v16qu)__B);
 }
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm_sub_epi16 (__m128i __A, __m128i __B)
 {
   return (__m128i) ((__v8hu)__A - (__v8hu)__B);
 }
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm_sub_epi32 (__m128i __A, __m128i __B)
 {
   return (__m128i) ((__v4su)__A - (__v4su)__B);
 }
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm_sub_epi64 (__m128i __A, __m128i __B)
 {
   return (__m128i) ((__v2du)__A - (__v2du)__B);
 }
 
 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm_subs_epi8 (__m128i __A, __m128i __B)
 {
   return (__m128i)__builtin_ia32_psubsb128 ((__v16qi)__A, (__v16qi)__B);
@@ -1108,20 +1146,21 @@ _mm_madd_epi16 (__m128i __A, __m128i __B
 {
   return (__m128i)__builtin_ia32_pmaddwd128 ((__v8hi)__A, (__v8hi)__B);
 }
 
 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm_mulhi_epi16 (__m128i __A, __m128i __B)
 {
   return (__m128i)__builtin_ia32_pmulhw128 ((__v8hi)__A, (__v8hi)__B);
 }
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm_mullo_epi16 (__m128i __A, __m128i __B)
 {
   return (__m128i) ((__v8hu)__A * (__v8hu)__B);
 }
 
 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm_mul_su32 (__m64 __A, __m64 __B)
 {
   return (__m64)__builtin_ia32_pmuludq ((__v2si)__A, (__v2si)__B);
@@ -1257,92 +1296,104 @@ _mm_srl_epi32 (__m128i __A, __m128i __B)
 {
   return (__m128i)__builtin_ia32_psrld128 ((__v4si)__A, (__v4si)__B);
 }
 
 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm_srl_epi64 (__m128i __A, __m128i __B)
 {
   return (__m128i)__builtin_ia32_psrlq128 ((__v2di)__A, (__v2di)__B);
 }
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm_and_si128 (__m128i __A, __m128i __B)
 {
   return (__m128i) ((__v2du)__A & (__v2du)__B);
 }
 
 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm_andnot_si128 (__m128i __A, __m128i __B)
 {
   return (__m128i)__builtin_ia32_pandn128 ((__v2di)__A, (__v2di)__B);
 }
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm_or_si128 (__m128i __A, __m128i __B)
 {
   return (__m128i) ((__v2du)__A | (__v2du)__B);
 }
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm_xor_si128 (__m128i __A, __m128i __B)
 {
   return (__m128i) ((__v2du)__A ^ (__v2du)__B);
 }
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm_cmpeq_epi8 (__m128i __A, __m128i __B)
 {
   return (__m128i) ((__v16qi)__A == (__v16qi)__B);
 }
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm_cmpeq_epi16 (__m128i __A, __m128i __B)
 {
   return (__m128i) ((__v8hi)__A == (__v8hi)__B);
 }
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm_cmpeq_epi32 (__m128i __A, __m128i __B)
 {
   return (__m128i) ((__v4si)__A == (__v4si)__B);
 }
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm_cmplt_epi8 (__m128i __A, __m128i __B)
 {
   return (__m128i) ((__v16qi)__A < (__v16qi)__B);
 }
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm_cmplt_epi16 (__m128i __A, __m128i __B)
 {
   return (__m128i) ((__v8hi)__A < (__v8hi)__B);
 }
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm_cmplt_epi32 (__m128i __A, __m128i __B)
 {
   return (__m128i) ((__v4si)__A < (__v4si)__B);
 }
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm_cmpgt_epi8 (__m128i __A, __m128i __B)
 {
   return (__m128i) ((__v16qi)__A > (__v16qi)__B);
 }
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm_cmpgt_epi16 (__m128i __A, __m128i __B)
 {
   return (__m128i) ((__v8hi)__A > (__v8hi)__B);
 }
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm_cmpgt_epi32 (__m128i __A, __m128i __B)
 {
   return (__m128i) ((__v4si)__A > (__v4si)__B);
 }
 
 #ifdef __OPTIMIZE__
 extern __inline int __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm_extract_epi16 (__m128i const __A, int const __N)
 {
@@ -1486,35 +1537,38 @@ _mm_lfence (void)
 {
   __builtin_ia32_lfence ();
 }
 
 extern __inline void __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm_mfence (void)
 {
   __builtin_ia32_mfence ();
 }
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm_cvtsi32_si128 (int __A)
 {
   return _mm_set_epi32 (0, 0, 0, __A);
 }
 
 #ifdef __x86_64__
 /* Intel intrinsic.  */
+__GCC_X86_CONSTEXPR11
 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm_cvtsi64_si128 (long long __A)
 {
   return _mm_set_epi64x (0, __A);
 }
 
 /* Microsoft intrinsic.  */
+__GCC_X86_CONSTEXPR11
 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm_cvtsi64x_si128 (long long __A)
 {
   return _mm_set_epi64x (0, __A);
 }
 #endif
 
 /* Casts between various SP, DP, INT vector types.  Note that these do no
    conversion of values, they just change the type.  */
 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
Index: gcc/config/i386/mmintrin.h
===================================================================
--- gcc/config/i386/mmintrin.h  (revision 223886)
+++ gcc/config/i386/mmintrin.h  (working copy)
@@ -20,20 +20,31 @@
    a copy of the GCC Runtime Library Exception along with this program;
    see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
    <http://www.gnu.org/licenses/>.  */
 
 /* Implemented from the specification included in the Intel C++ Compiler
    User Guide and Reference, version 9.0.  */
 
 #ifndef _MMINTRIN_H_INCLUDED
 #define _MMINTRIN_H_INCLUDED
 
+#if defined __cplusplus && __cplusplus >= 201103L
+#define __GCC_X86_CONSTEXPR11 constexpr
+#if __cplusplus >= 201402L
+#define __GCC_X86_CONSTEXPR14 constexpr
+#else
+#define __GCC_X86_CONSTEXPR14
+#endif
+#else
+#define __GCC_X86_CONSTEXPR11
+#endif
+
 #ifndef __MMX__
 #pragma GCC push_options
 #pragma GCC target("mmx")
 #define __DISABLE_MMX__
 #endif /* __MMX__ */
 
 /* The Intel API is flexible enough that we must allow aliasing with other
    vector types, and their scalar components.  */
 typedef int __m64 __attribute__ ((__vector_size__ (8), __may_alias__));
 
@@ -67,39 +78,43 @@ _mm_cvtsi32_si64 (int __i)
 extern __inline __m64  __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _m_from_int (int __i)
 {
   return _mm_cvtsi32_si64 (__i);
 }
 
 #ifdef __x86_64__
 /* Convert I to a __m64 object.  */
 
 /* Intel intrinsic.  */
+__GCC_X86_CONSTEXPR11
 extern __inline __m64  __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _m_from_int64 (long long __i)
 {
   return (__m64) __i;
 }
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m64  __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm_cvtsi64_m64 (long long __i)
 {
   return (__m64) __i;
 }
 
 /* Microsoft intrinsic.  */
+__GCC_X86_CONSTEXPR11
 extern __inline __m64  __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm_cvtsi64x_si64 (long long __i)
 {
   return (__m64) __i;
 }
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m64  __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm_set_pi64x (long long __i)
 {
   return (__m64) __i;
 }
 #endif
 
 /* Convert the lower 32 bits of the __m64 object into an integer.  */
 extern __inline int __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm_cvtsi64_si32 (__m64 __i)
@@ -110,33 +125,36 @@ _mm_cvtsi64_si32 (__m64 __i)
 extern __inline int __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _m_to_int (__m64 __i)
 {
   return _mm_cvtsi64_si32 (__i);
 }
 
 #ifdef __x86_64__
 /* Convert the __m64 object to a 64bit integer.  */
 
 /* Intel intrinsic.  */
+__GCC_X86_CONSTEXPR11
 extern __inline long long __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _m_to_int64 (__m64 __i)
 {
   return (long long)__i;
 }
 
+__GCC_X86_CONSTEXPR11
 extern __inline long long __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm_cvtm64_si64 (__m64 __i)
 {
   return (long long)__i;
 }
 
 /* Microsoft intrinsic.  */
+__GCC_X86_CONSTEXPR11
 extern __inline long long __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm_cvtsi64_si64x (__m64 __i)
 {
   return (long long)__i;
 }
 #endif
 
 /* Pack the four 16-bit values from M1 into the lower four 8-bit values of
    the result, and the four 16-bit values from M2 into the upper four 8-bit
    values of the result, all with signed saturation.  */
@@ -858,20 +876,21 @@ _mm_cmpgt_pi32 (__m64 __m1, __m64 __m2)
   return (__m64) __builtin_ia32_pcmpgtd ((__v2si)__m1, (__v2si)__m2);
 }
 
 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _m_pcmpgtd (__m64 __m1, __m64 __m2)
 {
   return _mm_cmpgt_pi32 (__m1, __m2);
 }
 
 /* Creates a 64-bit zero.  */
+__GCC_X86_CONSTEXPR11
 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm_setzero_si64 (void)
 {
   return (__m64)0LL;
 }
 
 /* Creates a vector of two 32-bit values; I0 is least significant.  */
 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm_set_pi32 (int __i1, int __i0)
 {
Index: gcc/config/i386/xmmintrin.h
===================================================================
--- gcc/config/i386/xmmintrin.h (revision 223886)
+++ gcc/config/i386/xmmintrin.h (working copy)
@@ -104,20 +104,21 @@ typedef float __v4sf __attribute__ ((__v
 
 /* Create an undefined vector.  */
 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm_undefined_ps (void)
 {
   __m128 __Y = __Y;
   return __Y;
 }
 
 /* Create a vector of zeros.  */
+__GCC_X86_CONSTEXPR11
 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm_setzero_ps (void)
 {
   return __extension__ (__m128){ 0.0f, 0.0f, 0.0f, 0.0f };
 }
 
 /* Perform the respective operation on the lower SPFP (single-precision
    floating-point) values of A and B; the upper three SPFP values are
    passed through from A.  */
 
@@ -170,38 +171,42 @@ _mm_min_ss (__m128 __A, __m128 __B)
 }
 
 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm_max_ss (__m128 __A, __m128 __B)
 {
   return (__m128) __builtin_ia32_maxss ((__v4sf)__A, (__v4sf)__B);
 }
 
 /* Perform the respective operation on the four SPFP values in A and B.  */
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm_add_ps (__m128 __A, __m128 __B)
 {
   return (__m128) ((__v4sf)__A + (__v4sf)__B);
 }
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm_sub_ps (__m128 __A, __m128 __B)
 {
   return (__m128) ((__v4sf)__A - (__v4sf)__B);
 }
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm_mul_ps (__m128 __A, __m128 __B)
 {
   return (__m128) ((__v4sf)__A * (__v4sf)__B);
 }
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm_div_ps (__m128 __A, __m128 __B)
 {
   return (__m128) ((__v4sf)__A / (__v4sf)__B);
 }
 
 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm_sqrt_ps (__m128 __A)
 {
   return (__m128) __builtin_ia32_sqrtps ((__v4sf)__A);
@@ -871,53 +876,59 @@ _MM_SET_ROUNDING_MODE (unsigned int __mo
   _mm_setcsr((_mm_getcsr() & ~_MM_ROUND_MASK) | __mode);
 }
 
 extern __inline void __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _MM_SET_FLUSH_ZERO_MODE (unsigned int __mode)
 {
   _mm_setcsr((_mm_getcsr() & ~_MM_FLUSH_ZERO_MASK) | __mode);
 }
 
 /* Create a vector with element 0 as F and the rest zero.  */
+__GCC_X86_CONSTEXPR11
 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm_set_ss (float __F)
 {
   return __extension__ (__m128)(__v4sf){ __F, 0.0f, 0.0f, 0.0f };
 }
 
 /* Create a vector with all four elements equal to F.  */
+__GCC_X86_CONSTEXPR11
 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm_set1_ps (float __F)
 {
   return __extension__ (__m128)(__v4sf){ __F, __F, __F, __F };
 }
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm_set_ps1 (float __F)
 {
   return _mm_set1_ps (__F);
 }
 
 /* Create a vector with element 0 as *P and the rest zero.  */
+__GCC_X86_CONSTEXPR11
 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm_load_ss (float const *__P)
 {
   return _mm_set_ss (*__P);
 }
 
 /* Create a vector with all four elements equal to *P.  */
+__GCC_X86_CONSTEXPR11
 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm_load1_ps (float const *__P)
 {
   return _mm_set1_ps (*__P);
 }
 
+__GCC_X86_CONSTEXPR11
 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm_load_ps1 (float const *__P)
 {
   return _mm_load1_ps (__P);
 }
 
 /* Load four SPFP values from P.  The address must be 16-byte aligned.  */
 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm_load_ps (float const *__P)
 {
@@ -933,27 +944,29 @@ _mm_loadu_ps (float const *__P)
 
 /* Load four SPFP values in reverse order.  The address must be aligned.  */
 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm_loadr_ps (float const *__P)
 {
   __v4sf __tmp = *(__v4sf *)__P;
   return (__m128) __builtin_ia32_shufps (__tmp, __tmp, _MM_SHUFFLE (0,1,2,3));
 }
 
 /* Create the vector [Z Y X W].  */
+__GCC_X86_CONSTEXPR11
 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm_set_ps (const float __Z, const float __Y, const float __X, const float __W)
 {
   return __extension__ (__m128)(__v4sf){ __W, __X, __Y, __Z };
 }
 
 /* Create the vector [W X Y Z].  */
+__GCC_X86_CONSTEXPR11
 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm_setr_ps (float __Z, float __Y, float __X, float __W)
 {
   return __extension__ (__m128)(__v4sf){ __Z, __Y, __X, __W };
 }
 
 /* Stores the lower SPFP value.  */
 extern __inline void __attribute__((__gnu_inline__, __always_inline__, 
__artificial__))
 _mm_store_ss (float *__P, __m128 __A)
 {

Reply via email to