[AArch64] Add support for floating-point vcond.

2013-01-08 Thread James Greenhalgh

Hi,

The AArch64 port has support for integer vcond, but did not have
support for floating-point vcond.

This patch adds this support and enables check_effective_target_vect_cond
in the testsuite.

Regression tested on aarch64-none-elf with no regressions.

Is this OK to commit?

Thanks,
James Greenhalgh

---
gcc/

2013-01-08  James Greenhalgh  james.greenha...@arm.com

* config/aarch64/aarch64-simd.md
(aarch64_simd_bslmode_internal): Add floating-point modes.
(aarch64_simd_bsl): Likewise.
(aarch64_vcond_internalmode): Likewise.
(vcondmodemode): Likewise.
(aarch64_cmcmpmode): Fix constraints, add new modes.
* config/aarch64/iterators.md (V_cmp_result): Add V2DF.

gcc/testsuite/

2013-01-08  James Greenhalgh  james.greenha...@arm.com

* gcc/testsuite/gcc.target/aarch64/vect-fcm-eq-d.c: New.
* gcc/testsuite/gcc.target/aarch64/vect-fcm-eq-f.c: Likewise.
* gcc/testsuite/gcc.target/aarch64/vect-fcm-ge-d.c: Likewise.
* gcc/testsuite/gcc.target/aarch64/vect-fcm-ge-f.c: Likewise.
* gcc/testsuite/gcc.target/aarch64/vect-fcm-gt-d.c: Likewise.
* gcc/testsuite/gcc.target/aarch64/vect-fcm-gt-f.c: Likewise.
* gcc/testsuite/gcc.target/aarch64/vect-fcm.x: Likewise.
* gcc/testsuite/lib/target-supports.exp
(check_effective_target_vect_cond): Enable for AArch64.
diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md
index d4b52c3..e6655e8 100644
--- a/gcc/config/aarch64/aarch64-simd.md
+++ b/gcc/config/aarch64/aarch64-simd.md
@@ -1463,7 +1463,7 @@
(set_attr simd_mode V2SI)]
 )
 
-;; vbsl_* intrinsics may compile to any of vbsl/vbif/vbit depending on register
+;; vbsl_* intrinsics may compile to any of bsl/bif/bit depending on register
 ;; allocation.  For an intrinsic of form:
 ;;   vD = bsl_* (vS, vN, vM)
 ;; We can use any of:
@@ -1472,11 +1472,12 @@
 ;;   bif vD, vM, vS  (if D = N, so 0-bits in vS choose bits from vM, else vN)
 
 (define_insn aarch64_simd_bslmode_internal
-  [(set (match_operand:VDQ 0 register_operand		 =w,w,w)
-	(unspec:VDQ [(match_operand:VDQ 1 register_operand  0,w,w)
-		 (match_operand:VDQ 2 register_operand  w,w,0)
-		 (match_operand:VDQ 3 register_operand  w,0,w)]
-		UNSPEC_BSL))]
+  [(set (match_operand:VALL 0 register_operand		=w,w,w)
+	(unspec:VALL
+	 [(match_operand:V_cmp_result 1 register_operand	 0,w,w)
+	  (match_operand:VALL 2 register_operand		 w,w,0)
+	  (match_operand:VALL 3 register_operand		 w,0,w)]
+	 UNSPEC_BSL))]
   TARGET_SIMD
   @
   bsl\\t%0.Vbtype, %2.Vbtype, %3.Vbtype
@@ -1485,15 +1486,15 @@
 )
 
 (define_expand aarch64_simd_bslmode
-  [(set (match_operand:VDQ 0 register_operand)
-	(unspec:VDQ [(match_operand:V_cmp_result 1 register_operand)
-		 (match_operand:VDQ 2 register_operand)
-		 (match_operand:VDQ 3 register_operand)]
-		UNSPEC_BSL))]
+  [(set (match_operand:VALL 0 register_operand)
+	(unspec:VALL [(match_operand:V_cmp_result 1 register_operand)
+		  (match_operand:VALL 2 register_operand)
+		  (match_operand:VALL 3 register_operand)]
+		 UNSPEC_BSL))]
   TARGET_SIMD
 {
   /* We can't alias operands together if they have different modes.  */
-  operands[1] = gen_lowpart (MODEmode, operands[1]);
+  operands[1] = gen_lowpart (V_cmp_resultmode, operands[1]);
 })
 
 (define_expand aarch64_vcond_internalmode
@@ -1574,14 +1575,64 @@
   DONE;
 })
 
+(define_expand aarch64_vcond_internalmode
+  [(set (match_operand:VDQF 0 register_operand)
+	(if_then_else:VDQF
+	  (match_operator 3 comparison_operator
+	[(match_operand:VDQF 4 register_operand)
+	 (match_operand:VDQF 5 nonmemory_operand)])
+	  (match_operand:VDQF 1 register_operand)
+	  (match_operand:VDQF 2 register_operand)))]
+  TARGET_SIMD
+{
+  int inverse = 0;
+  rtx mask = gen_reg_rtx (V_cmp_resultmode);
+
+  if (!REG_P (operands[5])
+   (operands[5] != CONST0_RTX (MODEmode)))
+operands[5] = force_reg (MODEmode, operands[5]);
+
+  switch (GET_CODE (operands[3]))
+{
+case LT:
+  inverse = 1;
+  /* Fall through.  */
+case GE:
+  emit_insn (gen_aarch64_cmgemode (mask, operands[4], operands[5]));
+  break;
+case LE:
+  inverse = 1;
+  /* Fall through.  */
+case GT:
+  emit_insn (gen_aarch64_cmgtmode (mask, operands[4], operands[5]));
+  break;
+case NE:
+  inverse = 1;
+  /* Fall through.  */
+case EQ:
+  emit_insn (gen_aarch64_cmeqmode (mask, operands[4], operands[5]));
+  break;
+default:
+  gcc_unreachable ();
+}
+
+  if (inverse)
+emit_insn (gen_aarch64_simd_bslmode (operands[0], mask, operands[2],
+operands[1]));
+  else
+emit_insn (gen_aarch64_simd_bslmode (operands[0], mask, operands[1],
+operands[2]));
+  DONE;
+})
+
 (define_expand vcondmodemode
-  [(set (match_operand:VDQ 0 register_operand)
-	(if_then_else:VDQ
+  [(set (match_operand:VALL 0 register_operand)
+	(if_then_else:VALL
 	  

Re: [AArch64] Add support for floating-point vcond.

2013-01-08 Thread Marcus Shawcroft
On 8 January 2013 12:04, James Greenhalgh james.greenha...@arm.com wrote:

 Hi,

 The AArch64 port has support for integer vcond, but did not have
 support for floating-point vcond.

 This patch adds this support and enables check_effective_target_vect_cond
 in the testsuite.

 Regression tested on aarch64-none-elf with no regressions.

 Is this OK to commit?

OK
/Marcus