jtony updated this revision to Diff 77037.
jtony added a comment.

I also migrated this commit from the old anonymous repository to the new jtony 
repository to prepare for committing upstream (use git svn dcommit). Should not 
have any difference with the previous patch, update it just in case.


https://reviews.llvm.org/D26282

Files:
  lib/Headers/altivec.h
  test/CodeGen/builtins-ppc-altivec.c
  test/CodeGen/builtins-ppc-quadword.c
  test/CodeGen/builtins-ppc-vsx.c

Index: test/CodeGen/builtins-ppc-vsx.c
===================================================================
--- test/CodeGen/builtins-ppc-vsx.c
+++ test/CodeGen/builtins-ppc-vsx.c
@@ -21,6 +21,7 @@
 vector signed long long vsll = { 255LL, -937LL };
 vector unsigned long long vull = { 1447LL, 2894LL };
 double d = 23.4;
+signed long long sll = 618LL;
 float af[4] = {23.4f, 56.7f, 89.0f, 12.3f};
 double ad[2] = {23.4, 56.7};
 signed char asc[16] = { -8,  9, -10, 11, -12, 13, -14, 15,
@@ -31,8 +32,8 @@
 unsigned short aus[8] = { 0, 1, 2, 3, 4, 5, 6, 7 };
 signed int asi[4] = { -1, 2, -3, 4 };
 unsigned int aui[4] = { 0, 1, 2, 3 };
-signed long asl[2] = { -1L, 2L };
-unsigned long aul[2] = { 1L, 2L };
+signed long long asll[2] = { -1L, 2L };
+unsigned long long aull[2] = { 1L, 2L };
 
 vector float res_vf;
 vector double res_vd;
@@ -1248,4 +1249,28 @@
   res_vull = vec_sro(vull, vuc);
 // CHECK: @llvm.ppc.altivec.vsro
 // CHECK-LE: @llvm.ppc.altivec.vsro
+
+res_vsll = vec_xl(sll, asll);
+// CHECK: load <2 x i64>, <2 x i64>* %{{[0-9]+}}, align 16
+// CHECK-LE: load <2 x i64>, <2 x i64>* %{{[0-9]+}}, align 16
+
+res_vull = vec_xl(sll, aull);
+// CHECK: load <2 x i64>, <2 x i64>* %{{[0-9]+}}, align 16
+// CHECK-LE: load <2 x i64>, <2 x i64>* %{{[0-9]+}}, align 16
+
+res_vd = vec_xl(sll, ad);
+// CHECK: load <2 x double>, <2 x double>* %{{[0-9]+}}, align 16
+// CHECK-LE: load <2 x double>, <2 x double>* %{{[0-9]+}}, align 16
+
+vec_xst(vsll, sll, asll);
+// CHECK: store <2 x i64> %{{[0-9]+}}, <2 x i64>* %{{[0-9]+}}, align 16
+// CHECK-LE: store <2 x i64> %{{[0-9]+}}, <2 x i64>* %{{[0-9]+}}, align 16
+
+vec_xst(vull, sll, aull);
+// CHECK: store <2 x i64> %{{[0-9]+}}, <2 x i64>* %{{[0-9]+}}, align 16
+// CHECK-LE: store <2 x i64> %{{[0-9]+}}, <2 x i64>* %{{[0-9]+}}, align 16
+
+vec_xst(vd, sll, ad);
+// CHECK: store <2 x double> %{{[0-9]+}}, <2 x double>* %{{[0-9]+}}, align 16
+// CHECK-LE: store <2 x double> %{{[0-9]+}}, <2 x double>* %{{[0-9]+}}, align 16
 }
Index: test/CodeGen/builtins-ppc-quadword.c
===================================================================
--- test/CodeGen/builtins-ppc-quadword.c
+++ test/CodeGen/builtins-ppc-quadword.c
@@ -15,6 +15,12 @@
 // CHECK-PPC: error: __int128 is not supported on this target
 vector unsigned __int128 vulll = { 1 };
 
+signed long long param_sll;
+// CHECK-PPC: error: __int128 is not supported on this target
+signed __int128 param_lll;
+// CHECK-PPC: error: __int128 is not supported on this target
+unsigned __int128 param_ulll;
+
 // CHECK-PPC: error: __int128 is not supported on this target
 vector signed __int128 res_vlll;
 // CHECK-PPC: error: __int128 is not supported on this target
@@ -165,4 +171,26 @@
 // CHECK-LE: xor <16 x i8>
 // CHECK-LE: call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> {{%.+}}, <4 x i32> {{%.+}}, <16 x i8> {{%.+}})
 // CHECK_PPC: error: call to 'vec_revb' is ambiguous
+
+  /* vec_xl */
+  res_vlll = vec_xl(param_sll, &param_lll);
+  // CHECK: load <1 x i128>, <1 x i128>* %{{[0-9]+}}, align 16
+  // CHECK-LE: load <1 x i128>, <1 x i128>* %{{[0-9]+}}, align 16
+  // CHECK-PPC: error: call to 'vec_xl' is ambiguous
+
+  res_vulll = vec_xl(param_sll, &param_ulll);
+  // CHECK: load <1 x i128>, <1 x i128>* %{{[0-9]+}}, align 16
+  // CHECK-LE: load <1 x i128>, <1 x i128>* %{{[0-9]+}}, align 16
+  // CHECK-PPC: error: call to 'vec_xl' is ambiguous
+
+  /* vec_xst */
+   vec_xst(vlll, param_sll, &param_lll);
+  // CHECK: store <1 x i128> %{{[0-9]+}}, <1 x i128>* %{{[0-9]+}}, align 16
+  // CHECK-LE: store <1 x i128> %{{[0-9]+}}, <1 x i128>* %{{[0-9]+}}, align 16
+  // CHECK-PPC: error: call to 'vec_xst' is ambiguous
+
+   vec_xst(vulll, param_sll, &param_ulll);
+  // CHECK: store <1 x i128> %{{[0-9]+}}, <1 x i128>* %{{[0-9]+}}, align 16
+  // CHECK-LE: store <1 x i128> %{{[0-9]+}}, <1 x i128>* %{{[0-9]+}}, align 16
+  // CHECK-PPC: error: call to 'vec_xst' is ambiguous
 }
Index: test/CodeGen/builtins-ppc-altivec.c
===================================================================
--- test/CodeGen/builtins-ppc-altivec.c
+++ test/CodeGen/builtins-ppc-altivec.c
@@ -45,6 +45,7 @@
 int param_i;
 unsigned int param_ui;
 float param_f;
+signed long long param_sll;
 
 int res_sc;
 int res_uc;
@@ -9224,3 +9225,69 @@
 // CHECK-LE: xor <16 x i8>
 // CHECK-LE: call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> {{%.+}}, <4 x i32> {{%.+}}, <16 x i8> {{%.+}})
 }
+
+/* ------------------------------ vec_xl ------------------------------------ */
+void test9() {
+  // CHECK-LABEL: define void @test9
+  // CHECK-LE-LABEL: define void @test9
+  res_vsc = vec_xl(param_sll, &param_sc);
+  // CHECK: load <16 x i8>, <16 x i8>* %{{[0-9]+}}, align 16
+  // CHECK-LE: load <16 x i8>, <16 x i8>* %{{[0-9]+}}, align 16
+
+  res_vuc = vec_xl(param_sll, &param_uc);
+  // CHECK: load <16 x i8>, <16 x i8>* %{{[0-9]+}}, align 16
+  // CHECK-LE: load <16 x i8>, <16 x i8>* %{{[0-9]+}}, align 16
+
+  res_vs = vec_xl(param_sll, &param_s);
+  // CHECK: load <8 x i16>, <8 x i16>* %{{[0-9]+}}, align 16
+  // CHECK-LE: load <8 x i16>, <8 x i16>* %{{[0-9]+}}, align 16
+
+  res_vus = vec_xl(param_sll, &param_us);
+  // CHECK: load <8 x i16>, <8 x i16>* %{{[0-9]+}}, align 16
+  // CHECK-LE: load <8 x i16>, <8 x i16>* %{{[0-9]+}}, align 16
+
+  res_vi = vec_xl(param_sll, &param_i);
+  // CHECK: load <4 x i32>, <4 x i32>* %{{[0-9]+}}, align 16
+  // CHECK-LE: load <4 x i32>, <4 x i32>* %{{[0-9]+}}, align 16
+
+  res_vui = vec_xl(param_sll, &param_ui);
+  // CHECK: load <4 x i32>, <4 x i32>* %{{[0-9]+}}, align 16
+  // CHECK-LE: load <4 x i32>, <4 x i32>* %{{[0-9]+}}, align 16
+
+  res_vf = vec_xl(param_sll, &param_f);
+  // CHECK: load <4 x float>, <4 x float>* %{{[0-9]+}}, align 16
+  // CHECK-LE: load <4 x float>, <4 x float>* %{{[0-9]+}}, align 16
+}
+
+/* ------------------------------ vec_xst ------------------------------------ */
+void test10() {
+  // CHECK-LABEL: define void @test10
+  // CHECK-LE-LABEL: define void @test10
+  vec_xst(vsc, param_sll, &param_sc);
+  // CHECK: store <16 x i8> %{{[0-9]+}}, <16 x i8>* %{{[0-9]+}}, align 16
+  // CHECK-LE: store <16 x i8> %{{[0-9]+}}, <16 x i8>* %{{[0-9]+}}, align 16
+
+  vec_xst(vuc, param_sll, &param_uc);
+  // CHECK: store <16 x i8> %{{[0-9]+}}, <16 x i8>* %{{[0-9]+}}, align 16
+  // CHECK-LE: store <16 x i8> %{{[0-9]+}}, <16 x i8>* %{{[0-9]+}}, align 16
+
+  vec_xst(vs, param_sll, &param_s);
+  // CHECK: store <8 x i16> %{{[0-9]+}}, <8 x i16>* %{{[0-9]+}}, align 16
+  // CHECK-LE: store <8 x i16> %{{[0-9]+}}, <8 x i16>* %{{[0-9]+}}, align 16
+
+  vec_xst(vus, param_sll, &param_us);
+  // CHECK: store <8 x i16> %{{[0-9]+}}, <8 x i16>* %{{[0-9]+}}, align 16
+  // CHECK-LE: store <8 x i16> %{{[0-9]+}}, <8 x i16>* %{{[0-9]+}}, align 16
+
+  vec_xst(vi, param_sll, &param_i);
+  // CHECK: store <4 x i32> %{{[0-9]+}}, <4 x i32>* %{{[0-9]+}}, align 16
+  // CHECK-LE: store <4 x i32> %{{[0-9]+}}, <4 x i32>* %{{[0-9]+}}, align 16
+
+  vec_xst(vui, param_sll, &param_ui);
+  // CHECK: store <4 x i32> %{{[0-9]+}}, <4 x i32>* %{{[0-9]+}}, align 16
+  // CHECK-LE: store <4 x i32> %{{[0-9]+}}, <4 x i32>* %{{[0-9]+}}, align 16
+
+  vec_xst(vf, param_sll, &param_f);
+  // CHECK: store <4 x float> %{{[0-9]+}}, <4 x float>* %{{[0-9]+}}, align 16
+  // CHECK-LE: store <4 x float> %{{[0-9]+}}, <4 x float>* %{{[0-9]+}}, align 16
+}
Index: lib/Headers/altivec.h
===================================================================
--- lib/Headers/altivec.h
+++ lib/Headers/altivec.h
@@ -15606,6 +15606,150 @@
 }
 #endif /* END __POWER8_VECTOR__ && __powerpc64__ */
 
+
+/* vec_xl */
+
+static inline __ATTRS_o_ai vector signed char vec_xl(signed long long __offset,
+                                                     signed char *__ptr) {
+  return *(vector signed char *)(__ptr + __offset);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_xl(signed long long __offset, unsigned char *__ptr) {
+  return *(vector unsigned char *)(__ptr + __offset);
+}
+
+static inline __ATTRS_o_ai vector signed short vec_xl(signed long long __offset,
+                                                      signed short *__ptr) {
+  return *(vector signed short *)(__ptr + __offset);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_xl(signed long long __offset, unsigned short *__ptr) {
+  return *(vector unsigned short *)(__ptr + __offset);
+}
+
+static inline __ATTRS_o_ai vector signed int vec_xl(signed long long __offset,
+                                                    signed int *__ptr) {
+  return *(vector signed int *)(__ptr + __offset);
+}
+
+static inline __ATTRS_o_ai vector unsigned int vec_xl(signed long long __offset,
+                                                      unsigned int *__ptr) {
+  return *(vector unsigned int *)(__ptr + __offset);
+}
+
+static inline __ATTRS_o_ai vector float vec_xl(signed long long __offset,
+                                               float *__ptr) {
+  return *(vector float *)(__ptr + __offset);
+}
+
+#ifdef __VSX__
+static inline __ATTRS_o_ai vector signed long long
+vec_xl(signed long long __offset, signed long long *__ptr) {
+  return *(vector signed long long *)(__ptr + __offset);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_xl(signed long long __offset, unsigned long long *__ptr) {
+  return *(vector unsigned long long *)(__ptr + __offset);
+}
+
+static inline __ATTRS_o_ai vector double vec_xl(signed long long __offset,
+                                                double *__ptr) {
+  return *(vector double *)(__ptr + __offset);
+}
+#endif
+
+#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
+static inline __ATTRS_o_ai vector signed __int128
+vec_xl(signed long long __offset, signed __int128 *__ptr) {
+  return *(vector signed __int128 *)(__ptr + __offset);
+}
+
+static inline __ATTRS_o_ai vector unsigned __int128
+vec_xl(signed long long __offset, unsigned __int128 *__ptr) {
+  return *(vector unsigned __int128 *)(__ptr + __offset);
+}
+#endif
+
+/* vec_xst */
+
+static inline __ATTRS_o_ai void vec_xst(vector signed char __vec,
+                                        signed long long __offset,
+                                        signed char *__ptr) {
+  *(vector signed char *)(__ptr + __offset) = __vec;
+}
+
+static inline __ATTRS_o_ai void vec_xst(vector unsigned char __vec,
+                                        signed long long __offset,
+                                        unsigned char *__ptr) {
+  *(vector unsigned char *)(__ptr + __offset) = __vec;
+}
+
+static inline __ATTRS_o_ai void vec_xst(vector signed short __vec,
+                                        signed long long __offset,
+                                        signed short *__ptr) {
+  *(vector signed short *)(__ptr + __offset) = __vec;
+}
+
+static inline __ATTRS_o_ai void vec_xst(vector unsigned short __vec,
+                                        signed long long __offset,
+                                        unsigned short *__ptr) {
+  *(vector unsigned short *)(__ptr + __offset) = __vec;
+}
+
+static inline __ATTRS_o_ai void vec_xst(vector signed int __vec,
+                                        signed long long __offset,
+                                        signed int *__ptr) {
+  *(vector signed int *)(__ptr + __offset) = __vec;
+}
+
+static inline __ATTRS_o_ai void vec_xst(vector unsigned int __vec,
+                                        signed long long __offset,
+                                        unsigned int *__ptr) {
+  *(vector unsigned int *)(__ptr + __offset) = __vec;
+}
+
+static inline __ATTRS_o_ai void vec_xst(vector float __vec,
+                                        signed long long __offset,
+                                        float *__ptr) {
+  *(vector float *)(__ptr + __offset) = __vec;
+}
+
+#ifdef __VSX__
+static inline __ATTRS_o_ai void vec_xst(vector signed long long __vec,
+                                        signed long long __offset,
+                                        signed long long *__ptr) {
+  *(vector signed long long *)(__ptr + __offset) = __vec;
+}
+
+static inline __ATTRS_o_ai void vec_xst(vector unsigned long long __vec,
+                                        signed long long __offset,
+                                        unsigned long long *__ptr) {
+  *(vector unsigned long long *)(__ptr + __offset) = __vec;
+}
+
+static inline __ATTRS_o_ai void vec_xst(vector double __vec,
+                                        signed long long __offset,
+                                        double *__ptr) {
+  *(vector double *)(__ptr + __offset) = __vec;
+}
+#endif
+
+#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
+static inline __ATTRS_o_ai void vec_xst(vector signed __int128 __vec,
+                                        signed long long __offset,
+                                        signed __int128 *__ptr) {
+  *(vector signed __int128 *)(__ptr + __offset) = __vec;
+}
+
+static inline __ATTRS_o_ai void vec_xst(vector unsigned __int128 __vec,
+                                        signed long long __offset,
+                                        unsigned __int128 *__ptr) {
+  *(vector unsigned __int128 *)(__ptr + __offset) = __vec;
+}
+#endif
 #undef __ATTRS_o_ai
 
 #endif /* __ALTIVEC_H */
_______________________________________________
cfe-commits mailing list
cfe-commits@lists.llvm.org
http://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to