================
@@ -0,0 +1,453 @@
+/*===----------- avx10_2satcvtdsintrin.h - AVX512SATCVTDS intrinsics 
--------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM 
Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __IMMINTRIN_H
+#error                                                                         
\
+    "Never use <avx10_2satcvtdsintrin.h> directly; include <immintrin.h> 
instead."
+#endif // __IMMINTRIN_H
+
+#ifndef __AVX10_2SATCVTDSINTRIN_H
+#define __AVX10_2SATCVTDSINTRIN_H
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS                                                     
\
+  __attribute__((__always_inline__, __nodebug__, __target__("avx10.2-256"),    
\
+                 __min_vector_width__(256)))
+
+#define _mm_cvtt_roundssd_i32(A, R)                                            
\
+  ((int)__builtin_ia32_vcvttssd2si32((__v2df)(__m128)(A), (const int)(R)))
+
+#define _mm_cvtt_roundssd_si32(A, R)                                           
\
+  ((int)__builtin_ia32_vcvttssd2si32((__v2df)(__m128d)(A), (const int)(R)))
+
+#ifdef __x86_64__
+#define _mm_cvtt_roundssd_si64(A, R)                                           
\
+  ((long long)__builtin_ia32_vcvttssd2si64((__v2df)(__m128d)(A),               
\
+                                           (const int)(R)))
+
+#define _mm_cvtt_roundssd_i64(A, R)                                            
\
+  ((long long)__builtin_ia32_vcvttssd2si64((__v2df)(__m128d)(A),               
\
+                                           (const int)(R)))
+#endif
+
+#define _mm_cvtt_roundssd_u32(A, R)                                            
\
+  ((unsigned int)__builtin_ia32_vcvttssd2usi32((__v2df)(__m128d)(A),           
\
+                                               (const int)(R)))
+
+#ifdef __x86_64__
+#define _mm_cvtt_roundssd_u64(A, R)                                            
\
+  ((unsigned long long)__builtin_ia32_vcvttssd2usi64((__v2df)(__m128d)(A),     
\
+                                                     (const int)(R)))
+#endif
+
+#define _mm_cvtt_roundsss_i32(A, R)                                            
\
+  ((int)__builtin_ia32_vcvttsss2si32((__v4sf)(__m128)(A), (const int)(R)))
+
+#define _mm_cvtt_roundsss_si32(A, R)                                           
\
+  ((int)__builtin_ia32_vcvttsss2si32((__v4sf)(__m128)(A), (const int)(R)))
+
+#ifdef __x86_64__
+#define _mm_cvtt_roundsss_i64(A, R)                                            
\
+  ((long long)__builtin_ia32_vcvttsss2si64((__v4sf)(__m128)(A), (const 
int)(R)))
+
+#define _mm_cvtt_roundsss_si64(A, R)                                           
\
+  ((long long)__builtin_ia32_vcvttsss2si64((__v4sf)(__m128)(A), (const 
int)(R)))
+#endif
+
+#define _mm_cvtt_roundsss_u32(A, R)                                            
\
+  ((unsigned int)__builtin_ia32_vcvttsss2usi32((__v4sf)(__m128)(A),            
\
+                                               (const int)(R)))
+
+#ifdef __x86_64__
+#define _mm_cvtt_roundsss_u64(A, R)                                            
\
+  ((unsigned long long)__builtin_ia32_vcvttsss2usi64((__v4sf)(__m128)(A),      
\
+                                                     (const int)(R)))
+#endif
+
+//  128 Bit : Double -> int
+#define _mm_cvttspd_epi32(A)                                                   
\
+  ((__m128i)__builtin_ia32_vcvttpd2dqs128_mask(                                
\
+      (__v2df)(__m128d)A, (__v4si)(__m128i)_mm_undefined_si128(),              
\
+      (__mmask8)(-1)))
+
+#define _mm_mask_cvttspd_epi32(W, U, A)                                        
\
+  ((__m128i)__builtin_ia32_vcvttpd2dqs128_mask(                                
\
+      (__v2df)(__m128d)A, (__v4si)(__m128i)W, (__mmask8)U))
+
+#define _mm_maskz_cvttspd_epi32(U, A)                                          
\
+  ((__m128i)__builtin_ia32_vcvttpd2dqs128_mask(                                
\
+      (__v2df)(__m128d)A, (__v4si)(__m128i)_mm_setzero_si128(), (__mmask8)U))
+
+//  256 Bit : Double -> int
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm256_cvttspd_epi32(__m256d A) {
+  return ((__m128i)__builtin_ia32_vcvttpd2dqs256_round_mask(
+      (__v4df)(__m256d)A, (__v4si)_mm_undefined_si128(), (__mmask8)-1,
+      _MM_FROUND_CUR_DIRECTION));
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_mask_cvttspd_epi32(__m128i W, __mmask8 U, __m256d A) {
+  return ((__m128i)__builtin_ia32_vcvttpd2dqs256_round_mask(
+      (__v4df)A, (__v4si)W, U, _MM_FROUND_CUR_DIRECTION));
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvttspd_epi32(__mmask8 U, __m256d A) {
+  return ((__m128i)__builtin_ia32_vcvttpd2dqs256_round_mask(
+      (__v4df)A, (__v4si)_mm_setzero_si128(), U, _MM_FROUND_CUR_DIRECTION));
+}
+
+#define _mm256_cvtts_roundpd_epi32(A, R)                                       
\
+  ((__m128i)__builtin_ia32_vcvttpd2dqs256_round_mask(                          
\
+      (__v4df)(__m256d)A, (__v4si)(__m128i)_mm_undefined_si128(),              
\
+      (__mmask8) - 1, (int)(R)))
+
+#define _mm256_mask_cvtts_roundpd_epi32(W, U, A, R)                            
\
+  ((__m128i)__builtin_ia32_vcvttpd2dqs256_round_mask(                          
\
+      (__v4df)(__m256d)A, (__v4si)(__m128i)W, (__mmask8)U, (int)(R)))
+
+#define _mm256_maskz_cvtts_roundpd_epi32(U, A, R)                              
\
+  ((__m128i)__builtin_ia32_vcvttpd2dqs256_round_mask(                          
\
+      (__v4df)(__m256d)A, (__v4si)(__m128i)_mm_setzero_si128(), (__mmask8)U,   
\
+      (int)(R)))
+
+//  128 Bit : Double -> uint
+#define _mm_cvttspd_epu32(A)                                                   
\
+  ((__m128i)__builtin_ia32_vcvttpd2udqs128_mask(                               
\
+      (__v2df)(__m128d)A, (__v4si)(__m128i)_mm_undefined_si128(),              
\
+      (__mmask8)(-1)))
+
+#define _mm_mask_cvttspd_epu32(W, U, A)                                        
\
+  ((__m128i)__builtin_ia32_vcvttpd2udqs128_mask(                               
\
+      ((__v2df)(__m128d)A), (__v4si)(__m128i)W, (__mmask8)U))
+
+#define _mm_maskz_cvttspd_epu32(U, A)                                          
\
+  ((__m128i)__builtin_ia32_vcvttpd2udqs128_mask(                               
\
+      (__v2df)(__m128d)A, (__v4si)(__m128i)_mm_setzero_si128(), (__mmask8)U))
+
+//  256 Bit : Double -> uint
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm256_cvttspd_epu32(__m256d A) {
+  return ((__m128i)__builtin_ia32_vcvttpd2udqs256_round_mask(
+      (__v4df)A, (__v4si)_mm_undefined_si128(), (__mmask8)-1,
+      _MM_FROUND_CUR_DIRECTION));
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_mask_cvttspd_epu32(__m128i W, __mmask8 U, __m256d A) {
+  return ((__m128i)__builtin_ia32_vcvttpd2udqs256_round_mask(
+      (__v4df)A, (__v4si)W, U, _MM_FROUND_CUR_DIRECTION));
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvttspd_epu32(__mmask8 U, __m256d A) {
+  return ((__m128i)__builtin_ia32_vcvttpd2udqs256_round_mask(
+      (__v4df)A, (__v4si)_mm_setzero_si128(), U, _MM_FROUND_CUR_DIRECTION));
+}
+
+#define _mm256_cvtts_roundpd_epu32(A, R)                                       
\
+  ((__m128i)__builtin_ia32_vcvttpd2udqs256_round_mask(                         
\
+      (__v4df)(__m256d)A, (__v4si)(__m128i)_mm_undefined_si128(),              
\
+      (__mmask8) - 1, (int)(R)))
+
+#define _mm256_mask_cvtts_roundpd_epu32(W, U, A, R)                            
\
+  ((__m128i)__builtin_ia32_vcvttpd2udqs256_round_mask(                         
\
+      (__v4df)(__m256d)A, (__v4si)(__m128i)W, (__mmask8)U, (int)(R)))
+
+#define _mm256_maskz_cvtts_roundpd_epu32(U, A, R)                              
\
+  ((__m128i)__builtin_ia32_vcvttpd2udqs256_round_mask(                         
\
+      (__v4df)(__m256d)A, (__v4si)(__m128i)_mm_setzero_si128(), (__mmask8)U,   
\
+      (int)(R)))
+
+//  128 Bit : Double -> long
+#ifdef __x86_64__
----------------
phoebewang wrote:

Vector instructions are available on 32-bit.

https://github.com/llvm/llvm-project/pull/102592
_______________________________________________
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to