Hi,

There are a set of SIMD shift intrinsics that have very tight predicates
on the range of immediates they can accept, but have been written with very
loose predicates, bailing out with an error in final if there has been an
issue.

This is a problem if some pass figures out that a value passed to the related,
non-immediate form, intrinsics is constant and tries to use the immediate
form. This can result in a bogus error.

This patch tightens all such predicates, preventing the compiler from
trying to emit the immediate-form instructions where they are
inappropriate.

Cross-tested for aarch64-none-elf with no issues.

OK?

Thanks,
James

---
gcc/

2014-09-11  James Greenhalgh  <james.greenha...@arm.com>

        * config/aarch64/aarch64-protos.h (aarch64_simd_const_bounds): Change
        return type to bool.
        * config/aarch64/aarch64-simd.md (aarch64_<sur>q<r>shl<mode>): Use
        new predicates.
        (aarch64_<sur>shll2_n<mode>): Likewise.
        (aarch64_<sur>shr_n<mode>): Likewise.
        (aarch64_<sur>sra_n<mode>: Likewise.
        (aarch64_<sur>s<lr>i_n<mode>): Likewise.
        (aarch64_<sur>qshl<u>_n<mode>): Likewise.
        * config/aarch64/aarch64.c (aarch64_simd_const_bounds): Change
        return type to bool; don't print errors.
        * config/aarch64/iterators.md (ve_mode): New.
        (offsetlr): Remap to infix text for use in new predicates.
        * config/aarch64/predicates.md (aarch64_simd_shift_imm_qi): New.
        (aarch64_simd_shift_imm_hi): Likewise.
        (aarch64_simd_shift_imm_si): Likewise.
        (aarch64_simd_shift_imm_di): Likewise.
        (aarch64_simd_shift_imm_offset_qi): Likewise.
        (aarch64_simd_shift_imm_offset_hi): Likewise.
        (aarch64_simd_shift_imm_offset_si): Likewise.
        (aarch64_simd_shift_imm_offset_di): Likewise.
        (aarch64_simd_shift_imm_bitsize_qi): Likewise.
        (aarch64_simd_shift_imm_bitsize_hi): Likewise.
        (aarch64_simd_shift_imm_bitsize_si): Likewise.
        (aarch64_simd_shift_imm_bitsize_di): Likewise.

gcc/testsuite/

2014-09-08  James Greenhalgh  <james.greenha...@arm.com>

        * gcc.target/aarch64/simd/vqshlb_1.c: New.
diff --git a/gcc/config/aarch64/aarch64-protos.h b/gcc/config/aarch64/aarch64-protos.h
index 35f89ff..9de7af7 100644
--- a/gcc/config/aarch64/aarch64-protos.h
+++ b/gcc/config/aarch64/aarch64-protos.h
@@ -205,6 +205,7 @@ bool aarch64_regno_ok_for_base_p (int, bool);
 bool aarch64_regno_ok_for_index_p (int, bool);
 bool aarch64_simd_check_vect_par_cnst_half (rtx op, enum machine_mode mode,
 					    bool high);
+bool aarch64_simd_const_bounds (rtx, HOST_WIDE_INT, HOST_WIDE_INT);
 bool aarch64_simd_imm_scalar_p (rtx x, enum machine_mode mode);
 bool aarch64_simd_imm_zero_p (rtx, enum machine_mode);
 bool aarch64_simd_scalar_immediate_valid_for_move (rtx, enum machine_mode);
@@ -255,7 +256,6 @@ void aarch64_emit_call_insn (rtx);
 /* Initialize builtins for SIMD intrinsics.  */
 void init_aarch64_simd_builtins (void);
 
-void aarch64_simd_const_bounds (rtx, HOST_WIDE_INT, HOST_WIDE_INT);
 void aarch64_simd_disambiguate_copy (rtx *, rtx *, rtx *, unsigned int);
 
 /* Emit code to place a AdvSIMD pair result in memory locations (with equal
diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md
index 6a45e91512ffe1c8c2ecd2b1ba4336baf87f7256..9e688e310027c772cfe5ecd4a158796b143998c5 100644
--- a/gcc/config/aarch64/aarch64-simd.md
+++ b/gcc/config/aarch64/aarch64-simd.md
@@ -3715,12 +3715,12 @@ (define_insn "aarch64_<sur>q<r>shl<mode>
 (define_insn "aarch64_<sur>shll_n<mode>"
   [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
 	(unspec:<VWIDE> [(match_operand:VDW 1 "register_operand" "w")
-			 (match_operand:SI 2 "immediate_operand" "i")]
+			 (match_operand:SI 2
+			   "aarch64_simd_shift_imm_bitsize_<ve_mode>" "i")]
                          VSHLL))]
   "TARGET_SIMD"
   "*
   int bit_width = GET_MODE_UNIT_SIZE (<MODE>mode) * BITS_PER_UNIT;
-  aarch64_simd_const_bounds (operands[2], 0, bit_width + 1);
   if (INTVAL (operands[2]) == bit_width)
   {
     return \"shll\\t%0.<Vwtype>, %1.<Vtype>, %2\";
@@ -3741,7 +3741,6 @@ (define_insn "aarch64_<sur>shll2_n<mode>
   "TARGET_SIMD"
   "*
   int bit_width = GET_MODE_UNIT_SIZE (<MODE>mode) * BITS_PER_UNIT;
-  aarch64_simd_const_bounds (operands[2], 0, bit_width + 1);
   if (INTVAL (operands[2]) == bit_width)
   {
     return \"shll2\\t%0.<Vwtype>, %1.<Vtype>, %2\";
@@ -3757,13 +3756,11 @@ (define_insn "aarch64_<sur>shll2_n<mode>
 (define_insn "aarch64_<sur>shr_n<mode>"
   [(set (match_operand:VSDQ_I_DI 0 "register_operand" "=w")
         (unspec:VSDQ_I_DI [(match_operand:VSDQ_I_DI 1 "register_operand" "w")
-			   (match_operand:SI 2 "immediate_operand" "i")]
+			   (match_operand:SI 2
+			     "aarch64_simd_shift_imm_offset_<ve_mode>" "i")]
 			  VRSHR_N))]
   "TARGET_SIMD"
-  "*
-  int bit_width = GET_MODE_UNIT_SIZE (<MODE>mode) * BITS_PER_UNIT;
-  aarch64_simd_const_bounds (operands[2], 1, bit_width + 1);
-  return \"<sur>shr\\t%<v>0<Vmtype>, %<v>1<Vmtype>, %2\";"
+  "<sur>shr\\t%<v>0<Vmtype>, %<v>1<Vmtype>, %2"
   [(set_attr "type" "neon_sat_shift_imm<q>")]
 )
 
@@ -3773,13 +3770,11 @@ (define_insn "aarch64_<sur>sra_n<mode>"
   [(set (match_operand:VSDQ_I_DI 0 "register_operand" "=w")
 	(unspec:VSDQ_I_DI [(match_operand:VSDQ_I_DI 1 "register_operand" "0")
 		       (match_operand:VSDQ_I_DI 2 "register_operand" "w")
-                       (match_operand:SI 3 "immediate_operand" "i")]
+                       (match_operand:SI 3
+			 "aarch64_simd_shift_imm_offset_<ve_mode>" "i")]
                       VSRA))]
   "TARGET_SIMD"
-  "*
-  int bit_width = GET_MODE_UNIT_SIZE (<MODE>mode) * BITS_PER_UNIT;
-  aarch64_simd_const_bounds (operands[3], 1, bit_width + 1);
-  return \"<sur>sra\\t%<v>0<Vmtype>, %<v>2<Vmtype>, %3\";"
+  "<sur>sra\\t%<v>0<Vmtype>, %<v>2<Vmtype>, %3"
   [(set_attr "type" "neon_shift_acc<q>")]
 )
 
@@ -3789,14 +3784,11 @@ (define_insn "aarch64_<sur>s<lr>i_n<mode
   [(set (match_operand:VSDQ_I_DI 0 "register_operand" "=w")
 	(unspec:VSDQ_I_DI [(match_operand:VSDQ_I_DI 1 "register_operand" "0")
 		       (match_operand:VSDQ_I_DI 2 "register_operand" "w")
-                       (match_operand:SI 3 "immediate_operand" "i")]
+                       (match_operand:SI 3
+			 "aarch64_simd_shift_imm_<offsetlr><ve_mode>" "i")]
                       VSLRI))]
   "TARGET_SIMD"
-  "*
-  int bit_width = GET_MODE_UNIT_SIZE (<MODE>mode) * BITS_PER_UNIT;
-  aarch64_simd_const_bounds (operands[3], 1 - <VSLRI:offsetlr>,
-                             bit_width - <VSLRI:offsetlr> + 1);
-  return \"s<lr>i\\t%<v>0<Vmtype>, %<v>2<Vmtype>, %3\";"
+  "s<lr>i\\t%<v>0<Vmtype>, %<v>2<Vmtype>, %3"
   [(set_attr "type" "neon_shift_imm<q>")]
 )
 
@@ -3805,13 +3797,11 @@ (define_insn "aarch64_<sur>s<lr>i_n<mode
 (define_insn "aarch64_<sur>qshl<u>_n<mode>"
   [(set (match_operand:VSDQ_I 0 "register_operand" "=w")
 	(unspec:VSDQ_I [(match_operand:VSDQ_I 1 "register_operand" "w")
-		       (match_operand:SI 2 "immediate_operand" "i")]
+		       (match_operand:SI 2
+			 "aarch64_simd_shift_imm_<ve_mode>" "i")]
                       VQSHL_N))]
   "TARGET_SIMD"
-  "*
-  int bit_width = GET_MODE_UNIT_SIZE (<MODE>mode) * BITS_PER_UNIT;
-  aarch64_simd_const_bounds (operands[2], 0, bit_width);
-  return \"<sur>qshl<u>\\t%<v>0<Vmtype>, %<v>1<Vmtype>, %2\";"
+  "<sur>qshl<u>\\t%<v>0<Vmtype>, %<v>1<Vmtype>, %2"
   [(set_attr "type" "neon_sat_shift_imm<q>")]
 )
 
@@ -3821,13 +3811,11 @@ (define_insn "aarch64_<sur>qshl<u>_n<mod
 (define_insn "aarch64_<sur>q<r>shr<u>n_n<mode>"
   [(set (match_operand:<VNARROWQ> 0 "register_operand" "=w")
         (unspec:<VNARROWQ> [(match_operand:VSQN_HSDI 1 "register_operand" "w")
-			    (match_operand:SI 2 "immediate_operand" "i")]
+			    (match_operand:SI 2
+			      "aarch64_simd_shift_imm_offset_<ve_mode>" "i")]
 			   VQSHRN_N))]
   "TARGET_SIMD"
-  "*
-  int bit_width = GET_MODE_UNIT_SIZE (<MODE>mode) * BITS_PER_UNIT;
-  aarch64_simd_const_bounds (operands[2], 1, bit_width + 1);
-  return \"<sur>q<r>shr<u>n\\t%<vn2>0<Vmntype>, %<v>1<Vmtype>, %2\";"
+  "<sur>q<r>shr<u>n\\t%<vn2>0<Vmntype>, %<v>1<Vmtype>, %2"
   [(set_attr "type" "neon_sat_shift_imm_narrow_q")]
 )
 
diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c
index e020bd3..b87db36 100644
--- a/gcc/config/aarch64/aarch64.c
+++ b/gcc/config/aarch64/aarch64.c
@@ -7931,14 +7931,13 @@ aarch64_simd_lane_bounds (rtx operand, HOST_WIDE_INT low, HOST_WIDE_INT high)
     error ("lane out of range");
 }
 
-void
+bool
 aarch64_simd_const_bounds (rtx operand, HOST_WIDE_INT low, HOST_WIDE_INT high)
 {
   gcc_assert (CONST_INT_P (operand));
-  HOST_WIDE_INT lane = INTVAL (operand);
+  HOST_WIDE_INT constant = INTVAL (operand);
 
-  if (lane < low || lane >= high)
-    error ("constant out of range");
+  return (constant >= low && constant <= high);
 }
 
 /* Emit code to reinterpret one AdvSIMD type as another,
diff --git a/gcc/config/aarch64/iterators.md b/gcc/config/aarch64/iterators.md
index a00283a04b3c916422ea65c5260e39db63d1a4b3..79f3ba58c28638924294c630ee86379cb1352053 100644
--- a/gcc/config/aarch64/iterators.md
+++ b/gcc/config/aarch64/iterators.md
@@ -538,6 +538,14 @@ (define_mode_attr v_cmp_result [(V8QI "v
 				(V2DF "v2di") (DF    "di")
 				(SF   "si")])
 
+;; Lower case element modes (as used in shift immediate patterns).
+(define_mode_attr ve_mode [(V8QI "qi") (V16QI "qi")
+			   (V4HI "hi") (V8HI  "hi")
+			   (V2SI "si") (V4SI  "si")
+			   (DI   "di") (V2DI  "di")
+			   (QI   "qi") (HI    "hi")
+			   (SI   "si")])
+
 ;; Vm for lane instructions is restricted to FP_LO_REGS.
 (define_mode_attr vwx [(V4HI "x") (V8HI "x") (HI "x")
 		       (V2SI "w") (V4SI "w") (SI "w")])
@@ -1007,8 +1015,9 @@ (define_int_attr addsub [(UNSPEC_SHADD "
 			 (UNSPEC_RADDHN2 "add")
 			 (UNSPEC_RSUBHN2 "sub")])
 
-(define_int_attr offsetlr [(UNSPEC_SSLI	"1") (UNSPEC_USLI "1")
-			   (UNSPEC_SSRI	"0") (UNSPEC_USRI "0")])
+(define_int_attr offsetlr [(UNSPEC_SSLI "") (UNSPEC_USLI "")
+			   (UNSPEC_SSRI "offset_")
+			   (UNSPEC_USRI "offset_")])
 
 ;; Standard pattern names for floating-point rounding instructions.
 (define_int_attr frint_pattern [(UNSPEC_FRINTZ "btrunc")
diff --git a/gcc/config/aarch64/predicates.md b/gcc/config/aarch64/predicates.md
index 8191169e89b1eaf04c00ea709af70412d2cee361..3b65b5e03f3e762d62bb7b2d59e9a05dad34162b 100644
--- a/gcc/config/aarch64/predicates.md
+++ b/gcc/config/aarch64/predicates.md
@@ -279,3 +279,56 @@ (define_special_predicate "aarch64_simd_
 {
   return aarch64_const_vec_all_same_int_p (op, -1);
 })
+
+;; Predicates used by the various SIMD shift operations.  These
+;; fall in to 3 categories.
+;;   Shifts with a range 0-(bit_size - 1) (aarch64_simd_shift_imm)
+;;   Shifts with a range 1-bit_size (aarch64_simd_shift_imm_offset)
+;;   Shifts with a range 0-bit_size (aarch64_simd_shift_imm_bitsize)
+(define_predicate "aarch64_simd_shift_imm_qi"
+  (and (match_code "const_int")
+       (match_test "aarch64_simd_const_bounds (op, 0, 7)")))
+
+(define_predicate "aarch64_simd_shift_imm_hi"
+  (and (match_code "const_int")
+       (match_test "aarch64_simd_const_bounds (op, 0, 15)")))
+
+(define_predicate "aarch64_simd_shift_imm_si"
+  (and (match_code "const_int")
+       (match_test "aarch64_simd_const_bounds (op, 0, 31)")))
+
+(define_predicate "aarch64_simd_shift_imm_di"
+  (and (match_code "const_int")
+       (match_test "aarch64_simd_const_bounds (op, 0, 63)")))
+
+(define_predicate "aarch64_simd_shift_imm_offset_qi"
+  (and (match_code "const_int")
+       (match_test "aarch64_simd_const_bounds (op, 1, 8)")))
+
+(define_predicate "aarch64_simd_shift_imm_offset_hi"
+  (and (match_code "const_int")
+       (match_test "aarch64_simd_const_bounds (op, 1, 16)")))
+
+(define_predicate "aarch64_simd_shift_imm_offset_si"
+  (and (match_code "const_int")
+       (match_test "aarch64_simd_const_bounds (op, 1, 32)")))
+
+(define_predicate "aarch64_simd_shift_imm_offset_di"
+  (and (match_code "const_int")
+       (match_test "aarch64_simd_const_bounds (op, 1, 64)")))
+
+(define_predicate "aarch64_simd_shift_imm_bitsize_qi"
+  (and (match_code "const_int")
+       (match_test "aarch64_simd_const_bounds (op, 0, 8)")))
+
+(define_predicate "aarch64_simd_shift_imm_bitsize_hi"
+  (and (match_code "const_int")
+       (match_test "aarch64_simd_const_bounds (op, 0, 16)")))
+
+(define_predicate "aarch64_simd_shift_imm_bitsize_si"
+  (and (match_code "const_int")
+       (match_test "aarch64_simd_const_bounds (op, 0, 32)")))
+
+(define_predicate "aarch64_simd_shift_imm_bitsize_di"
+  (and (match_code "const_int")
+       (match_test "aarch64_simd_const_bounds (op, 0, 64)")))
diff --git a/gcc/testsuite/gcc.target/aarch64/simd/vqshlb_1.c b/gcc/testsuite/gcc.target/aarch64/simd/vqshlb_1.c
new file mode 100644
index 0000000..ae741de
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/simd/vqshlb_1.c
@@ -0,0 +1,21 @@
+/* { dg-do run } */
+/* { dg-options "-O3" } */
+
+#include "arm_neon.h"
+
+extern void abort ();
+
+int
+main (int argc, char **argv)
+{
+  int8_t arg1 = -1;
+  int8_t arg2 = 127;
+  int8_t exp = -128;
+  int8_t got = vqshlb_s8 (arg1, arg2);
+
+  if (exp != got)
+    abort ();
+
+  return 0;
+}
+

Reply via email to