This patch changes some of the dejagnu options to better restrict
where the test cases run so that they will no longer cause failures on
power7 machines.

Based on a subsequent patch I also updated the code formatting (indentation,
etc.) for the code from the original patch (r235577) in both the test cases
and in rs6000-c.c.

Bootstrapped and tested on powerpc64le-unknown-linux-gnu (on both
power7 and power8) and powerpc64-unknown-linux-gnu (power8) with no
regressions. Is this ok for trunk?

[gcc]

2016-05-20  Bill Seurer  <seu...@linux.vnet.ibm.com>

        * config/rs6000/rs6000-c.c (altivec_resolve_overloaded_builtin): Fix
        code formatting in ALTIVEC_BUILTIN_VEC_ADDEC section.

[gcc/testsuite]

2016-05-20  Bill Seurer  <seu...@linux.vnet.ibm.com>

        * gcc.target/powerpc/vec-addec.c: Change dejagnu options, fix code
          formatting.
        * gcc.target/powerpc/vec-addec-int128.c: Change dejagnu options, fix 
code
          formatting.

Index: /home/seurer/gcc/gcc-checkin2/gcc/config/rs6000/rs6000-c.c
===================================================================
--- /home/seurer/gcc/gcc-checkin2/gcc/config/rs6000/rs6000-c.c  (revision 
236518)
+++ /home/seurer/gcc/gcc-checkin2/gcc/config/rs6000/rs6000-c.c  (working copy)
@@ -4622,37 +4622,41 @@ assignment for unaligned loads and stores");
       /* All 3 arguments must be vectors of (signed or unsigned) (int or
          __int128) and the types must match.  */
       if ((arg0_type != arg1_type) || (arg1_type != arg2_type))
-       goto bad; 
+       goto bad;
       if (TREE_CODE (arg0_type) != VECTOR_TYPE)
-       goto bad; 
+       goto bad;
 
       switch (TYPE_MODE (TREE_TYPE (arg0_type)))
        {
-         /* For {un}signed ints, 
-            vec_adde (va, vb, carryv) == vec_add (vec_add (va, vb), 
-                                               vec_and (carryv, 0x1)).  */
+         /* For {un}signed ints,
+            vec_adde (va, vb, carryv) == vec_add (vec_add (va, vb),
+                                                  vec_and (carryv, 0x1)).  */
          case SImode:
            {
-             vec<tree, va_gc> *params = make_tree_vector();
+             vec<tree, va_gc> *params = make_tree_vector ();
              vec_safe_push (params, arg0);
              vec_safe_push (params, arg1);
-             tree call = altivec_resolve_overloaded_builtin
-                (loc, rs6000_builtin_decls[ALTIVEC_BUILTIN_VEC_ADD], params);
-             tree const1 = build_vector_from_val (arg0_type, 
-                build_int_cstu(TREE_TYPE (arg0_type), 1));
-             tree and_expr = fold_build2_loc (loc, BIT_AND_EXPR,
-                               arg0_type, arg2, const1);
-             params = make_tree_vector();
+             tree add_builtin = rs6000_builtin_decls[ALTIVEC_BUILTIN_VEC_ADD];
+             tree call = altivec_resolve_overloaded_builtin (loc, add_builtin,
+                                                             params);
+             tree const1 = build_int_cstu (TREE_TYPE (arg0_type), 1);
+             tree ones_vector = build_vector_from_val (arg0_type, const1);
+             tree and_expr = fold_build2_loc (loc, BIT_AND_EXPR, arg0_type,
+                                              arg2, ones_vector);
+             params = make_tree_vector ();
              vec_safe_push (params, call);
              vec_safe_push (params, and_expr);
-             return altivec_resolve_overloaded_builtin
-                (loc, rs6000_builtin_decls[ALTIVEC_BUILTIN_VEC_ADD], params);
+             return altivec_resolve_overloaded_builtin (loc, add_builtin,
+                                                        params);
            }
          /* For {un}signed __int128s use the vaddeuqm instruction
                directly.  */
          case TImode:
-           return altivec_resolve_overloaded_builtin
-               (loc, rs6000_builtin_decls[P8V_BUILTIN_VEC_VADDEUQM], arglist);
+           {
+             tree adde_bii = rs6000_builtin_decls[P8V_BUILTIN_VEC_VADDEUQM];
+             return altivec_resolve_overloaded_builtin (loc, adde_bii,
+                                                        arglist);
+           }
 
          /* Types other than {un}signed int and {un}signed __int128
                are errors.  */
@@ -4839,9 +4843,9 @@ assignment for unaligned loads and stores");
       arg1_type = TREE_TYPE (arg1);
 
       if (TREE_CODE (arg1_type) != VECTOR_TYPE)
-       goto bad; 
+       goto bad;
       if (!INTEGRAL_TYPE_P (TREE_TYPE (arg2)))
-       goto bad; 
+       goto bad;
 
       /* If we are targeting little-endian, but -maltivec=be has been
         specified to override the element order, adjust the element
@@ -4942,9 +4946,9 @@ assignment for unaligned loads and stores");
       arg2 = (*arglist)[2];
 
       if (TREE_CODE (arg1_type) != VECTOR_TYPE)
-       goto bad; 
+       goto bad;
       if (!INTEGRAL_TYPE_P (TREE_TYPE (arg2)))
-       goto bad; 
+       goto bad;
 
       /* If we are targeting little-endian, but -maltivec=be has been
         specified to override the element order, adjust the element
Index: 
/home/seurer/gcc/gcc-checkin2/gcc/testsuite/gcc.target/powerpc/vec-adde-int128.c
===================================================================
--- 
/home/seurer/gcc/gcc-checkin2/gcc/testsuite/gcc.target/powerpc/vec-adde-int128.c
    (revision 236518)
+++ 
/home/seurer/gcc/gcc-checkin2/gcc/testsuite/gcc.target/powerpc/vec-adde-int128.c
    (working copy)
@@ -1,7 +1,9 @@
-/* { dg-do run { target { powerpc64-*-* } } } */
+/* { dg-do run { target { powerpc64*-*-* } } } */
+/* { dg-require-effective-target powerpc_p8vector_ok } */
 /* { dg-skip-if "do not override -mcpu" { powerpc*-*-* } { "-mcpu=*" } { 
"-mcpu=power8" } } */
 /* { dg-options "-mcpu=power8 -O3" } */
 
+
 /* Test that the vec_adde builtin works as expected.  */
 
 #include "altivec.h"
@@ -20,38 +22,43 @@ STYPE expected_##NAMESUFFIX[N]; \
 \
 __attribute__((noinline)) void vector_tests_##NAMESUFFIX () \
 { \
+  vector STYPE v1, v2, v3, tmp; \
   int i; \
-  vector STYPE v1, v2, v3, tmp; \
-  for (i = 0; i < N; i+=16/sizeof(STYPE)) { \
-    /* result=addend1+addend2+(carry & 0x1) */ \
-    v1 = (vector STYPE) { addend1_##NAMESUFFIX[i] }; \
-    v2 = (vector STYPE) { addend2_##NAMESUFFIX[i] }; \
-    v3 = (vector STYPE) { carry_##NAMESUFFIX[i] }; \
+  for (i = 0; i < N; i+=16/sizeof (STYPE)) \
+    { \
+      /* result=addend1+addend2+(carry & 0x1).  */ \
+      v1 = (vector STYPE) { addend1_##NAMESUFFIX[i] }; \
+      v2 = (vector STYPE) { addend2_##NAMESUFFIX[i] }; \
+      v3 = (vector STYPE) { carry_##NAMESUFFIX[i] }; \
 \
-    tmp = vec_adde (v1, v2, v3); \
-    result_##NAMESUFFIX[i] = tmp[0]; \
-  } \
+      tmp = vec_adde (v1, v2, v3); \
+      result_##NAMESUFFIX[i] = tmp[0]; \
+    } \
 } \
 \
 __attribute__((noinline)) void init_##NAMESUFFIX () \
 { \
   int i; \
-  for (i = 0; i < N; ++i) { \
-    result_##NAMESUFFIX[i] = 0; \
-    addend1_##NAMESUFFIX[i] = 1; \
-    addend2_##NAMESUFFIX[i] = 2; \
-    carry_##NAMESUFFIX[i] = (i%12); \
-    expected_##NAMESUFFIX[i] = addend1_##NAMESUFFIX[i] + \
-               addend2_##NAMESUFFIX[i] + (carry_##NAMESUFFIX[i] & 0x1); \
-  } \
+  for (i = 0; i < N; ++i) \
+    { \
+      result_##NAMESUFFIX[i] = 0; \
+      addend1_##NAMESUFFIX[i] = 1; \
+      addend2_##NAMESUFFIX[i] = 2; \
+      carry_##NAMESUFFIX[i] = (i%12); \
+      expected_##NAMESUFFIX[i] = addend1_##NAMESUFFIX[i] + \
+                                addend2_##NAMESUFFIX[i] + \
+                                (carry_##NAMESUFFIX[i] & 0x1); \
+    } \
 } \
 \
 __attribute__((noinline)) void verify_results_##NAMESUFFIX () \
 { \
-  for (int i = 0; i < N; ++i) { \
-    if (result_##NAMESUFFIX[i] != expected_##NAMESUFFIX[i]) \
-      abort(); \
-  } \
+  int i; \
+  for (i = 0; i < N; ++i) \
+    { \
+      if (result_##NAMESUFFIX[i] != expected_##NAMESUFFIX[i]) \
+       abort (); \
+    } \
 }
 
 
@@ -63,13 +70,13 @@ __attribute__((noinline)) void verify_results_##NA
 }
 
 
-define_test_functions(signed __int128, si128);
-define_test_functions(unsigned __int128, ui128);
+define_test_functions (signed __int128, si128);
+define_test_functions (unsigned __int128, ui128);
 
 int main ()
 {
-  execute_test_functions(signed __int128, si128);
-  execute_test_functions(unsigned __int128, ui128);
+  execute_test_functions (signed __int128, si128);
+  execute_test_functions (unsigned __int128, ui128);
 
   return 0;
 }
Index: /home/seurer/gcc/gcc-checkin2/gcc/testsuite/gcc.target/powerpc/vec-adde.c
===================================================================
--- /home/seurer/gcc/gcc-checkin2/gcc/testsuite/gcc.target/powerpc/vec-adde.c   
(revision 236518)
+++ /home/seurer/gcc/gcc-checkin2/gcc/testsuite/gcc.target/powerpc/vec-adde.c   
(working copy)
@@ -1,6 +1,6 @@
-/* { dg-do run { target { powerpc64-*-* } } } */
-/* { dg-skip-if "do not override -mcpu" { powerpc*-*-* } { "-mcpu=*" } { 
"-mcpu=power8" } } */
-/* { dg-options "-mcpu=power8 -O3" } */
+/* { dg-do run { target { powerpc64*-*-* } } } */
+/* { dg-require-effective-target powerpc_vsx_ok } */
+/* { dg-options "-mvsx -O3" } */
 
 /* Test that the vec_adde builtin works as expected.  */
 
@@ -20,38 +20,43 @@ STYPE expected_##NAMESUFFIX[N]; \
 \
 __attribute__((noinline)) void vector_tests_##NAMESUFFIX () \
 { \
+  vector STYPE v1, v2, v3, tmp; \
   int i; \
-  vector STYPE v1, v2, v3, tmp; \
-  for (i = 0; i < N; i+=16/sizeof(STYPE)) { \
-    /* result=addend1+addend2+(carry & 0x1) */ \
-    v1 = vec_vsx_ld (0, &addend1_##NAMESUFFIX[i]); \
-    v2 = vec_vsx_ld (0, &addend2_##NAMESUFFIX[i]); \
-    v3 = vec_vsx_ld (0, &carry_##NAMESUFFIX[i]); \
+  for (i = 0; i < N; i+=16/sizeof (STYPE)) \
+    { \
+      /* result=addend1+addend2+(carry & 0x1).  */ \
+      v1 = vec_vsx_ld (0, &addend1_##NAMESUFFIX[i]); \
+      v2 = vec_vsx_ld (0, &addend2_##NAMESUFFIX[i]); \
+      v3 = vec_vsx_ld (0, &carry_##NAMESUFFIX[i]); \
 \
-    tmp = vec_adde (v1, v2, v3); \
-    vec_vsx_st (tmp, 0, &result_##NAMESUFFIX[i]); \
-  } \
+      tmp = vec_adde (v1, v2, v3); \
+     vec_vsx_st (tmp, 0, &result_##NAMESUFFIX[i]); \
+    } \
 } \
 \
 __attribute__((noinline)) void init_##NAMESUFFIX () \
 { \
   int i; \
-  for (i = 0; i < N; ++i) { \
-    result_##NAMESUFFIX[i] = 0; \
-    addend1_##NAMESUFFIX[i] = 1; \
-    addend2_##NAMESUFFIX[i] = 2; \
-    carry_##NAMESUFFIX[i] = (i%12); \
-    expected_##NAMESUFFIX[i] = addend1_##NAMESUFFIX[i] + \
-               addend2_##NAMESUFFIX[i] + (carry_##NAMESUFFIX[i] & 0x1); \
-  } \
+  for (i = 0; i < N; ++i) \
+    { \
+      result_##NAMESUFFIX[i] = 0; \
+      addend1_##NAMESUFFIX[i] = 1; \
+      addend2_##NAMESUFFIX[i] = 2; \
+      carry_##NAMESUFFIX[i] = (i%12); \
+      expected_##NAMESUFFIX[i] = addend1_##NAMESUFFIX[i] + \
+                                addend2_##NAMESUFFIX[i] + \
+                                (carry_##NAMESUFFIX[i] & 0x1); \
+    } \
 } \
 \
 __attribute__((noinline)) void verify_results_##NAMESUFFIX () \
 { \
-  for (int i = 0; i < N; ++i) { \
-    if (result_##NAMESUFFIX[i] != expected_##NAMESUFFIX[i]) \
-      abort(); \
-  } \
+  int i; \
+  for (i = 0; i < N; ++i) \
+    { \
+      if (result_##NAMESUFFIX[i] != expected_##NAMESUFFIX[i]) \
+       abort (); \
+    } \
 }
 
 
@@ -63,13 +68,13 @@ __attribute__((noinline)) void verify_results_##NA
 }
 
 
-define_test_functions(signed int, si);
-define_test_functions(unsigned int, ui);
+define_test_functions (signed int, si);
+define_test_functions (unsigned int, ui);
 
 int main ()
 {
-  execute_test_functions(signed int, si);
-  execute_test_functions(unsigned int, ui);
+  execute_test_functions (signed int, si);
+  execute_test_functions (unsigned int, ui);
 
   return 0;
 }
-- 

-Bill Seurer

Reply via email to