A number of places in the rs6000 backend assume the value for a vec
splat can be found at element nunits-1 of a vector constant, which is
wrong for little-endian.  This patch fixes them and the ICE found
when running altivec-consts.c on powerpc64le.

I've also updated the testcase so that it passes for little-endian.
vspltish() and vspltisw() don't do the right thing little-endian, nor
is it easy to make the function work.  Consider
  v16qi v = { 0, 15, 0, 15, 0, 15, 0, 15, 0, 15, 0, 15, 0, 15, 0, 15 };
  vspltish (w, 15);
vs
  v8hi v = { 15, 15, 15, 15, 15, 15, 15, 15 };
  vspltish (w, 15);
So rather than write two or three variants to build constants the
right way, I just simply use static arrays.  I've also added a
scan-assembler-not to make sure none of the vector constants are
loaded from memory, and a new test that has constants appropriate for
little-endian optimisation.

Bootstrapped and regression tested powerpc64-linux.  OK for mainline
and 4.8?

gcc/
        * config/rs6000/rs6000.c (vspltis_constant): Correct for little-endian.
        (gen_easy_altivec_constant): Likewise.
        * config/rs6000/predicates.md (easy_vector_constant_add_self,
        easy_vector_constant_msb): Likewise.
gcc/testsuite/
        * gcc.target/powerpc/altivec-consts.c: Correct for little-endian.
        Add scan-assembler-not "lvx".
        * gcc.target/powerpc/le-altivec-consts.c: New.

Index: gcc/config/rs6000/rs6000.c
===================================================================
--- gcc/config/rs6000/rs6000.c  (revision 200274)
+++ gcc/config/rs6000/rs6000.c  (working copy)
@@ -4657,7 +4662,7 @@ vspltis_constant (rtx op, unsigned step, unsigned
   bitsize = GET_MODE_BITSIZE (inner);
   mask = GET_MODE_MASK (inner);
 
-  val = const_vector_elt_as_int (op, nunits - 1);
+  val = const_vector_elt_as_int (op, BYTES_BIG_ENDIAN ? nunits - 1 : 0);
   splat_val = val;
   msb_val = val > 0 ? 0 : -1;
 
@@ -4697,7 +4702,7 @@ vspltis_constant (rtx op, unsigned step, unsigned
   for (i = 0; i < nunits - 1; ++i)
     {
       HOST_WIDE_INT desired_val;
-      if (((i + 1) & (step - 1)) == 0)
+      if (((BYTES_BIG_ENDIAN ? i + 1 : i) & (step - 1)) == 0)
        desired_val = val;
       else
        desired_val = msb_val;
@@ -4782,13 +4787,13 @@ gen_easy_altivec_constant (rtx op)
 {
   enum machine_mode mode = GET_MODE (op);
   int nunits = GET_MODE_NUNITS (mode);
-  rtx last = CONST_VECTOR_ELT (op, nunits - 1);
+  rtx val = CONST_VECTOR_ELT (op, BYTES_BIG_ENDIAN ? nunits - 1 : 0);
   unsigned step = nunits / 4;
   unsigned copies = 1;
 
   /* Start with a vspltisw.  */
   if (vspltis_constant (op, step, copies))
-    return gen_rtx_VEC_DUPLICATE (V4SImode, gen_lowpart (SImode, last));
+    return gen_rtx_VEC_DUPLICATE (V4SImode, gen_lowpart (SImode, val));
 
   /* Then try with a vspltish.  */
   if (step == 1)
@@ -4797,7 +4802,7 @@ gen_easy_altivec_constant (rtx op)
     step >>= 1;
 
   if (vspltis_constant (op, step, copies))
-    return gen_rtx_VEC_DUPLICATE (V8HImode, gen_lowpart (HImode, last));
+    return gen_rtx_VEC_DUPLICATE (V8HImode, gen_lowpart (HImode, val));
 
   /* And finally a vspltisb.  */
   if (step == 1)
@@ -4806,7 +4811,7 @@ gen_easy_altivec_constant (rtx op)
     step >>= 1;
 
   if (vspltis_constant (op, step, copies))
-    return gen_rtx_VEC_DUPLICATE (V16QImode, gen_lowpart (QImode, last));
+    return gen_rtx_VEC_DUPLICATE (V16QImode, gen_lowpart (QImode, val));
 
   gcc_unreachable ();
 }
Index: gcc/config/rs6000/predicates.md
===================================================================
--- gcc/config/rs6000/predicates.md     (revision 200274)
+++ gcc/config/rs6000/predicates.md     (working copy)
@@ -527,9 +527,11 @@
            (match_test "easy_altivec_constant (op, mode)")))
 {
   HOST_WIDE_INT val;
+  int elt;
   if (mode == V2DImode || mode == V2DFmode)
     return 0;
-  val = const_vector_elt_as_int (op, GET_MODE_NUNITS (mode) - 1);
+  elt = BYTES_BIG_ENDIAN ? GET_MODE_NUNITS (mode) - 1 : 0;
+  val = const_vector_elt_as_int (op, elt);
   val = ((val & 0xff) ^ 0x80) - 0x80;
   return EASY_VECTOR_15_ADD_SELF (val);
 })
@@ -541,9 +543,11 @@
            (match_test "easy_altivec_constant (op, mode)")))
 {
   HOST_WIDE_INT val;
+  int elt;
   if (mode == V2DImode || mode == V2DFmode)
     return 0;
-  val = const_vector_elt_as_int (op, GET_MODE_NUNITS (mode) - 1);
+  elt = BYTES_BIG_ENDIAN ? GET_MODE_NUNITS (mode) - 1 : 0;
+  val = const_vector_elt_as_int (op, elt);
   return EASY_VECTOR_MSB (val, GET_MODE_INNER (mode));
 })
 
Index: gcc/testsuite/gcc.target/powerpc/le-altivec-consts.c
===================================================================
--- gcc/testsuite/gcc.target/powerpc/le-altivec-consts.c        (revision 0)
+++ gcc/testsuite/gcc.target/powerpc/le-altivec-consts.c        (revision 0)
@@ -0,0 +1,253 @@
+/* { dg-do run { target { powerpc*-*-* && vmx_hw } } } */
+/* { dg-do compile { target { powerpc*-*-* && { ! vmx_hw } } } } */
+/* { dg-require-effective-target powerpc_altivec_ok } */
+/* { dg-options "-maltivec -mabi=altivec -O2" } */
+
+/* Check that "easy" AltiVec constants are correctly synthesized.  */
+
+extern void abort (void);
+
+typedef __attribute__ ((vector_size (16))) unsigned char v16qi;
+typedef __attribute__ ((vector_size (16))) unsigned short v8hi;
+typedef __attribute__ ((vector_size (16))) unsigned int v4si;
+
+typedef __attribute__((aligned(16))) char c16[16];
+typedef __attribute__((aligned(16))) short s8[8];
+typedef __attribute__((aligned(16))) int i4[4];
+
+#define V16QI(V1,V2,V3,V4,V5,V6,V7,V8,V9,V10,V11,V12,V13,V14,V15,V16)  \
+  v16qi v = {V1,V2,V3,V4,V5,V6,V7,V8,V9,V10,V11,V12,V13,V14,V15,V16};  \
+  static c16 w = {V1,V2,V3,V4,V5,V6,V7,V8,V9,V10,V11,V12,V13,V14,V15,V16}; \
+  check_v16qi (v, w);
+
+#define V8HI(V1,V2,V3,V4,V5,V6,V7,V8)          \
+  v8hi v = {V1,V2,V3,V4,V5,V6,V7,V8};          \
+  static s8 w = {V1,V2,V3,V4,V5,V6,V7,V8};     \
+  check_v8hi (v, w);
+
+#define V4SI(V1,V2,V3,V4)      \
+  v4si v = {V1,V2,V3,V4};      \
+  static i4 w = {V1,V2,V3,V4}; \
+  check_v4si (v, w);
+
+
+/* Use three different check functions for each mode-instruction pair.
+   The callers have no typecasting and no addressable vectors, to make
+   the test more robust.  */
+
+void __attribute__ ((noinline)) check_v16qi (v16qi v1, char *v2)
+{
+  if (memcmp (&v1, v2, 16))
+    abort ();
+}
+
+void __attribute__ ((noinline)) check_v8hi (v8hi v1, short *v2)
+{
+  if (memcmp (&v1, v2, 16))
+    abort ();
+}
+
+void __attribute__ ((noinline)) check_v4si (v4si v1, int *v2)
+{
+  if (memcmp (&v1, v2, 16))
+    abort ();
+}
+
+
+/* V16QI tests.  */
+
+void v16qi_vspltisb ()
+{
+  V16QI (15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15);
+}
+
+void v16qi_vspltisb_neg ()
+{
+  V16QI (-5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5);
+}
+
+void v16qi_vspltisb_addself ()
+{
+  V16QI (30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30);
+}
+
+void v16qi_vspltisb_neg_addself ()
+{
+  V16QI (-24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, 
-24, -24);
+}
+
+void v16qi_vspltish ()
+{
+  V16QI (15, 0, 15, 0, 15, 0, 15, 0, 15, 0, 15, 0, 15, 0, 15, 0);
+}
+
+void v16qi_vspltish_addself ()
+{
+  V16QI (30, 0, 30, 0, 30, 0, 30, 0, 30, 0, 30, 0, 30, 0, 30, 0);
+}
+
+void v16qi_vspltish_neg ()
+{
+  V16QI (-5, -1, -5, -1, -5, -1, -5, -1, -5, -1, -5, -1, -5, -1, -5, -1);
+}
+
+void v16qi_vspltisw ()
+{
+  V16QI (15, 0, 0, 0, 15, 0, 0, 0, 15, 0, 0, 0, 15, 0, 0, 0);
+}
+
+void v16qi_vspltisw_addself ()
+{
+  V16QI (30, 0, 0, 0, 30, 0, 0, 0, 30, 0, 0, 0, 30, 0, 0, 0);
+}
+
+void v16qi_vspltisw_neg ()
+{
+  V16QI (-5, -1, -1, -1, -5, -1, -1, -1, -5, -1, -1, -1, -5, -1, -1, -1);
+}
+
+
+/* V8HI tests. */
+
+void v8hi_vspltisb ()
+{
+  V8HI (0x0F0F, 0x0F0F, 0x0F0F, 0x0F0F, 0x0F0F, 0x0F0F, 0x0F0F, 0x0F0F);
+}
+
+void v8hi_vspltisb_addself ()
+{
+  V8HI (0x1E1E, 0x1E1E, 0x1E1E, 0x1E1E, 0x1E1E, 0x1E1E, 0x1E1E, 0x1E1E);
+}
+
+void v8hi_vspltisb_neg ()
+{
+  V8HI (0xFBFB, 0xFBFB, 0xFBFB, 0xFBFB, 0xFBFB, 0xFBFB, 0xFBFB, 0xFBFB);
+}
+
+void v8hi_vspltish ()
+{
+  V8HI (15, 15, 15, 15, 15, 15, 15, 15);
+}
+
+void v8hi_vspltish_neg ()
+{
+  V8HI (-5, -5, -5, -5, -5, -5, -5, -5);
+}
+
+void v8hi_vspltish_addself ()
+{
+  V8HI (30, 30, 30, 30, 30, 30, 30, 30);
+}
+
+void v8hi_vspltish_neg_addself ()
+{
+  V8HI (-24, -24, -24, -24, -24, -24, -24, -24);
+}
+
+void v8hi_vspltisw ()
+{
+  V8HI (15, 0, 15, 0, 15, 0, 15, 0);
+}
+
+void v8hi_vspltisw_addself ()
+{
+  V8HI (30, 0, 30, 0, 30, 0, 30, 0);
+}
+
+void v8hi_vspltisw_neg ()
+{
+  V8HI (-5, -1, -5, -1, -5, -1, -5, -1);
+}
+
+/* V4SI tests. */
+
+void v4si_vspltisb ()
+{
+  V4SI (0x0F0F0F0F, 0x0F0F0F0F, 0x0F0F0F0F, 0x0F0F0F0F);
+}
+
+void v4si_vspltisb_addself ()
+{
+  V4SI (0x1E1E1E1E, 0x1E1E1E1E, 0x1E1E1E1E, 0x1E1E1E1E);
+}
+
+void v4si_vspltisb_neg ()
+{
+  V4SI (0xFBFBFBFB, 0xFBFBFBFB, 0xFBFBFBFB, 0xFBFBFBFB);
+}
+
+void v4si_vspltish ()
+{
+  V4SI (0x000F000F, 0x000F000F, 0x000F000F, 0x000F000F);
+}
+
+void v4si_vspltish_addself ()
+{
+  V4SI (0x001E001E, 0x001E001E, 0x001E001E, 0x001E001E);
+}
+
+void v4si_vspltish_neg ()
+{
+  V4SI (0xFFFBFFFB, 0xFFFBFFFB, 0xFFFBFFFB, 0xFFFBFFFB);
+}
+
+void v4si_vspltisw ()
+{
+  V4SI (15, 15, 15, 15);
+}
+
+void v4si_vspltisw_neg ()
+{
+  V4SI (-5, -5, -5, -5);
+}
+
+void v4si_vspltisw_addself ()
+{
+  V4SI (30, 30, 30, 30);
+}
+
+void v4si_vspltisw_neg_addself ()
+{
+  V4SI (-24, -24, -24, -24);
+}
+
+
+
+int main ()
+{
+  v16qi_vspltisb ();
+  v16qi_vspltisb_neg ();
+  v16qi_vspltisb_addself ();
+  v16qi_vspltisb_neg_addself ();
+  v16qi_vspltish ();
+  v16qi_vspltish_addself ();
+  v16qi_vspltish_neg ();
+  v16qi_vspltisw ();
+  v16qi_vspltisw_addself ();
+  v16qi_vspltisw_neg ();
+
+  v8hi_vspltisb ();
+  v8hi_vspltisb_addself ();
+  v8hi_vspltisb_neg ();
+  v8hi_vspltish ();
+  v8hi_vspltish_neg ();
+  v8hi_vspltish_addself ();
+  v8hi_vspltish_neg_addself ();
+  v8hi_vspltisw ();
+  v8hi_vspltisw_addself ();
+  v8hi_vspltisw_neg ();
+
+  v4si_vspltisb ();
+  v4si_vspltisb_addself ();
+  v4si_vspltisb_neg ();
+  v4si_vspltish ();
+  v4si_vspltish_addself ();
+  v4si_vspltish_neg ();
+  v4si_vspltisw ();
+  v4si_vspltisw_neg ();
+  v4si_vspltisw_addself ();
+  v4si_vspltisw_neg_addself ();
+  return 0;
+}
+
+/* { dg-final { scan-assembler-not "lvx" { target { powerpc*le-*-* } } } } */
Index: gcc/testsuite/gcc.target/powerpc/altivec-consts.c
===================================================================
--- gcc/testsuite/gcc.target/powerpc/altivec-consts.c   (revision 200274)
+++ gcc/testsuite/gcc.target/powerpc/altivec-consts.c   (working copy)
@@ -11,31 +11,24 @@ typedef __attribute__ ((vector_size (16))) unsigne
 typedef __attribute__ ((vector_size (16))) unsigned short v8hi;
 typedef __attribute__ ((vector_size (16))) unsigned int v4si;
 
-char w[16] __attribute__((aligned(16)));
- 
-
-/* Emulate the vspltis? instructions on a 16-byte array of chars.  */
+typedef __attribute__((aligned(16))) char c16[16];
+typedef __attribute__((aligned(16))) short s8[8];
+typedef __attribute__((aligned(16))) int i4[4];
 
-void vspltisb (char *v, int val)
-{
-  int i;
-  for (i = 0; i < 16; i++)
-    v[i] = val;
-}
+#define V16QI(V1,V2,V3,V4,V5,V6,V7,V8,V9,V10,V11,V12,V13,V14,V15,V16)  \
+  v16qi v = {V1,V2,V3,V4,V5,V6,V7,V8,V9,V10,V11,V12,V13,V14,V15,V16};  \
+  static c16 w = {V1,V2,V3,V4,V5,V6,V7,V8,V9,V10,V11,V12,V13,V14,V15,V16}; \
+  check_v16qi (v, w);
 
-void vspltish (char *v, int val)
-{
-  int i;
-  for (i = 0; i < 16; i += 2)
-    v[i] = val >> 7, v[i + 1] = val;
-}
+#define V8HI(V1,V2,V3,V4,V5,V6,V7,V8)          \
+  v8hi v = {V1,V2,V3,V4,V5,V6,V7,V8};          \
+  static s8 w = {V1,V2,V3,V4,V5,V6,V7,V8};     \
+  check_v8hi (v, w);
 
-void vspltisw (char *v, int val)
-{
-  int i;
-  for (i = 0; i < 16; i += 4)
-    v[i] = v[i + 1] = v[i + 2] = val >> 7, v[i + 3] = val;
-}
+#define V4SI(V1,V2,V3,V4)      \
+  v4si v = {V1,V2,V3,V4};      \
+  static i4 w = {V1,V2,V3,V4}; \
+  check_v4si (v, w);
 
 
 /* Use three different check functions for each mode-instruction pair.
@@ -48,13 +41,13 @@ void __attribute__ ((noinline)) check_v16qi (v16qi
     abort ();
 }
 
-void __attribute__ ((noinline)) check_v8hi (v8hi v1, char *v2)
+void __attribute__ ((noinline)) check_v8hi (v8hi v1, short *v2)
 {
   if (memcmp (&v1, v2, 16))
     abort ();
 }
 
-void __attribute__ ((noinline)) check_v4si (v4si v1, char *v2)
+void __attribute__ ((noinline)) check_v4si (v4si v1, int *v2)
 {
   if (memcmp (&v1, v2, 16))
     abort ();
@@ -65,72 +58,52 @@ void __attribute__ ((noinline)) check_v16qi (v16qi
 
 void v16qi_vspltisb ()
 {
-  v16qi v = { 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15 };
-  vspltisb (w, 15);
-  check_v16qi (v, w);
+  V16QI (15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15);
 }
 
 void v16qi_vspltisb_neg ()
 {
-  v16qi v = { -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5 };
-  vspltisb (w, -5);
-  check_v16qi (v, w);
+  V16QI (-5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5);
 }
 
 void v16qi_vspltisb_addself ()
 {
-  v16qi v = { 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30 };
-  vspltisb (w, 30);
-  check_v16qi (v, w);
+  V16QI (30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30);
 }
 
 void v16qi_vspltisb_neg_addself ()
 {
-  v16qi v = { -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, 
-24, -24, -24 };
-  vspltisb (w, -24);
-  check_v16qi (v, w);
+  V16QI (-24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, 
-24, -24);
 }
 
 void v16qi_vspltish ()
 {
-  v16qi v = { 0, 15, 0, 15, 0, 15, 0, 15, 0, 15, 0, 15, 0, 15, 0, 15 };
-  vspltish (w, 15);
-  check_v16qi (v, w);
+  V16QI (0, 15, 0, 15, 0, 15, 0, 15, 0, 15, 0, 15, 0, 15, 0, 15);
 }
 
 void v16qi_vspltish_addself ()
 {
-  v16qi v = { 0, 30, 0, 30, 0, 30, 0, 30, 0, 30, 0, 30, 0, 30, 0, 30 };
-  vspltish (w, 30);
-  check_v16qi (v, w);
+  V16QI (0, 30, 0, 30, 0, 30, 0, 30, 0, 30, 0, 30, 0, 30, 0, 30);
 }
 
 void v16qi_vspltish_neg ()
 {
-  v16qi v = { -1, -5, -1, -5, -1, -5, -1, -5, -1, -5, -1, -5, -1, -5, -1, -5 };
-  vspltish (w, -5);
-  check_v16qi (v, w);
+  V16QI (-1, -5, -1, -5, -1, -5, -1, -5, -1, -5, -1, -5, -1, -5, -1, -5);
 }
 
 void v16qi_vspltisw ()
 {
-  v16qi v = { 0, 0, 0, 15, 0, 0, 0, 15, 0, 0, 0, 15, 0, 0, 0, 15 };
-  vspltisw (w, 15);
-  check_v16qi (v, w);
+  V16QI (0, 0, 0, 15, 0, 0, 0, 15, 0, 0, 0, 15, 0, 0, 0, 15);
 }
 
 void v16qi_vspltisw_addself ()
 {
-  v16qi v = { 0, 0, 0, 30, 0, 0, 0, 30, 0, 0, 0, 30, 0, 0, 0, 30 };
-  vspltisw (w, 30);
-  check_v16qi (v, w);
+  V16QI (0, 0, 0, 30, 0, 0, 0, 30, 0, 0, 0, 30, 0, 0, 0, 30);
 }
 
 void v16qi_vspltisw_neg ()
 {
-  v16qi v = { -1, -1, -1, -5, -1, -1, -1, -5, -1, -1, -1, -5, -1, -1, -1, -5 };
-  vspltisw (w, -5);
-  check_v16qi (v, w);
+  V16QI (-1, -1, -1, -5, -1, -1, -1, -5, -1, -1, -1, -5, -1, -1, -1, -5);
 }
 
 
@@ -138,144 +111,104 @@ void v16qi_vspltisw_neg ()
 
 void v8hi_vspltisb ()
 {
-  v8hi v = { 0x0F0F, 0x0F0F, 0x0F0F, 0x0F0F, 0x0F0F, 0x0F0F, 0x0F0F, 0x0F0F };
-  vspltisb (w, 15);
-  check_v8hi (v, w);
+  V8HI (0x0F0F, 0x0F0F, 0x0F0F, 0x0F0F, 0x0F0F, 0x0F0F, 0x0F0F, 0x0F0F);
 }
 
 void v8hi_vspltisb_addself ()
 {
-  v8hi v = { 0x1E1E, 0x1E1E, 0x1E1E, 0x1E1E, 0x1E1E, 0x1E1E, 0x1E1E, 0x1E1E };
-  vspltisb (w, 30);
-  check_v8hi (v, w);
+  V8HI (0x1E1E, 0x1E1E, 0x1E1E, 0x1E1E, 0x1E1E, 0x1E1E, 0x1E1E, 0x1E1E);
 }
 
 void v8hi_vspltisb_neg ()
 {
-  v8hi v = { 0xFBFB, 0xFBFB, 0xFBFB, 0xFBFB, 0xFBFB, 0xFBFB, 0xFBFB, 0xFBFB };
-  vspltisb (w, -5);
-  check_v8hi (v, w);
+  V8HI (0xFBFB, 0xFBFB, 0xFBFB, 0xFBFB, 0xFBFB, 0xFBFB, 0xFBFB, 0xFBFB);
 }
 
 void v8hi_vspltish ()
 {
-  v8hi v = { 15, 15, 15, 15, 15, 15, 15, 15 };
-  vspltish (w, 15);
-  check_v8hi (v, w);
+  V8HI (15, 15, 15, 15, 15, 15, 15, 15);
 }
 
 void v8hi_vspltish_neg ()
 {
-  v8hi v = { -5, -5, -5, -5, -5, -5, -5, -5 };
-  vspltish (w, -5);
-  check_v8hi (v, w);
+  V8HI (-5, -5, -5, -5, -5, -5, -5, -5);
 }
 
 void v8hi_vspltish_addself ()
 {
-  v8hi v = { 30, 30, 30, 30, 30, 30, 30, 30 };
-  vspltish (w, 30);
-  check_v8hi (v, w);
+  V8HI (30, 30, 30, 30, 30, 30, 30, 30);
 }
 
 void v8hi_vspltish_neg_addself ()
 {
-  v8hi v = { -24, -24, -24, -24, -24, -24, -24, -24 };
-  vspltish (w, -24);
-  check_v8hi (v, w);
+  V8HI (-24, -24, -24, -24, -24, -24, -24, -24);
 }
 
 void v8hi_vspltisw ()
 {
-  v8hi v = { 0, 15, 0, 15, 0, 15, 0, 15 };
-  vspltisw (w, 15);
-  check_v8hi (v, w);
+  V8HI (0, 15, 0, 15, 0, 15, 0, 15);
 }
 
 void v8hi_vspltisw_addself ()
 {
-  v8hi v = { 0, 30, 0, 30, 0, 30, 0, 30 };
-  vspltisw (w, 30);
-  check_v8hi (v, w);
+  V8HI (0, 30, 0, 30, 0, 30, 0, 30);
 }
 
 void v8hi_vspltisw_neg ()
 {
-  v8hi v = { -1, -5, -1, -5, -1, -5, -1, -5 };
-  vspltisw (w, -5);
-  check_v8hi (v, w);
+  V8HI (-1, -5, -1, -5, -1, -5, -1, -5);
 }
 
 /* V4SI tests. */
 
 void v4si_vspltisb ()
 {
-  v4si v = { 0x0F0F0F0F, 0x0F0F0F0F, 0x0F0F0F0F, 0x0F0F0F0F };
-  vspltisb (w, 15);
-  check_v4si (v, w);
+  V4SI (0x0F0F0F0F, 0x0F0F0F0F, 0x0F0F0F0F, 0x0F0F0F0F);
 }
 
 void v4si_vspltisb_addself ()
 {
-  v4si v = { 0x1E1E1E1E, 0x1E1E1E1E, 0x1E1E1E1E, 0x1E1E1E1E };
-  vspltisb (w, 30);
-  check_v4si (v, w);
+  V4SI (0x1E1E1E1E, 0x1E1E1E1E, 0x1E1E1E1E, 0x1E1E1E1E);
 }
 
 void v4si_vspltisb_neg ()
 {
-  v4si v = { 0xFBFBFBFB, 0xFBFBFBFB, 0xFBFBFBFB, 0xFBFBFBFB };
-  vspltisb (w, -5);
-  check_v4si (v, w);
+  V4SI (0xFBFBFBFB, 0xFBFBFBFB, 0xFBFBFBFB, 0xFBFBFBFB);
 }
 
 void v4si_vspltish ()
 {
-  v4si v = { 0x000F000F, 0x000F000F, 0x000F000F, 0x000F000F };
-  vspltish (w, 15);
-  check_v4si (v, w);
+  V4SI (0x000F000F, 0x000F000F, 0x000F000F, 0x000F000F);
 }
 
 void v4si_vspltish_addself ()
 {
-  v4si v = { 0x001E001E, 0x001E001E, 0x001E001E, 0x001E001E };
-  vspltish (w, 30);
-  check_v4si (v, w);
+  V4SI (0x001E001E, 0x001E001E, 0x001E001E, 0x001E001E);
 }
 
 void v4si_vspltish_neg ()
 {
-  v4si v = { 0xFFFBFFFB, 0xFFFBFFFB, 0xFFFBFFFB, 0xFFFBFFFB };
-  vspltish (w, -5);
-  check_v4si (v, w);
+  V4SI (0xFFFBFFFB, 0xFFFBFFFB, 0xFFFBFFFB, 0xFFFBFFFB);
 }
 
 void v4si_vspltisw ()
 {
-  v4si v = { 15, 15, 15, 15 };
-  vspltisw (w, 15);
-  check_v4si (v, w);
+  V4SI (15, 15, 15, 15);
 }
 
 void v4si_vspltisw_neg ()
 {
-  v4si v = { -5, -5, -5, -5 };
-  vspltisw (w, -5);
-  check_v4si (v, w);
+  V4SI (-5, -5, -5, -5);
 }
 
 void v4si_vspltisw_addself ()
 {
-  v4si v = { 30, 30, 30, 30 };
-  vspltisw (w, 30);
-  check_v4si (v, w);
+  V4SI (30, 30, 30, 30);
 }
 
 void v4si_vspltisw_neg_addself ()
 {
-  v4si v = { -24, -24, -24, -24 };
-  vspltisw (w, -24);
-  check_v4si (v, w);
+  V4SI (-24, -24, -24, -24);
 }
 
 
@@ -316,3 +249,5 @@ int main ()
   v4si_vspltisw_neg_addself ();
   return 0;
 }
+
+/* { dg-final { scan-assembler-not "lvx" { target { ! powerpc*le-*-* } } } } */

-- 
Alan Modra
Australia Development Lab, IBM

Reply via email to