This patch adds an extension to aarch64_gimple_fold_builtin () that does
constant folding on __builtin_fmulx* calls for 32 and 64 bit floating point
scalar modes. We fold when both arguments are constant, as well as when only one
is. The special cases of 0*inf, -0*inf, 0*-inf, and -0*-inf are also
handled. The case for vector constant arguments will be dealt with in a future
patch since the tests for that would be obscure and would unnecessarily
complicate this patch.
Added tests to check for proper handling of constant folding. Tested on targets
aarch64-none-elf and aarch64_be-none-elf.
---
gcc/
2015-XX-XX Bilyan Borisov
* config/aarch64/aarch64-builtins.c (aarch64_gimple_fold_builtin): Added
constant folding.
gcc/testsuite/
2015-XX-XX Bilyan Borisov
* gcc.target/aarch64/simd/vmulx.x: New.
* gcc.target/aarch64/simd/vmulx_f64_2.c: Likewise.
* gcc.target/aarch64/simd/vmulxd_f64_2.c: Likewise.
* gcc.target/aarch64/simd/vmulxs_f32_2.c: Likewise.
diff --git a/gcc/config/aarch64/aarch64-builtins.c b/gcc/config/aarch64/aarch64-builtins.c
index a1998ed550ac801e4d80baae122bf58e394a563f..339054d344900c942d5ce7c047479de3bbb4e61b 100644
--- a/gcc/config/aarch64/aarch64-builtins.c
+++ b/gcc/config/aarch64/aarch64-builtins.c
@@ -1362,7 +1362,7 @@ aarch64_gimple_fold_builtin (gimple_stmt_iterator *gsi)
if (fndecl)
{
int fcode = DECL_FUNCTION_CODE (fndecl);
- int nargs = gimple_call_num_args (stmt);
+ unsigned nargs = gimple_call_num_args (stmt);
tree *args = (nargs > 0
? gimple_call_arg_ptr (stmt, 0)
: _mark_node);
@@ -1386,7 +1386,54 @@ aarch64_gimple_fold_builtin (gimple_stmt_iterator *gsi)
new_stmt = gimple_build_assign (gimple_call_lhs (stmt),
REDUC_MIN_EXPR, args[0]);
break;
-
+ BUILTIN_GPF (BINOP, fmulx, 0)
+ {
+ gcc_assert (nargs == 2);
+ bool a0_cst_p = TREE_CODE (args[0]) == REAL_CST;
+ bool a1_cst_p = TREE_CODE (args[1]) == REAL_CST;
+ if (a0_cst_p || a1_cst_p)
+ {
+ if (a0_cst_p && a1_cst_p)
+ {
+ tree t0 = TREE_TYPE (args[0]);
+ real_value a0 = (TREE_REAL_CST (args[0]));
+ real_value a1 = (TREE_REAL_CST (args[1]));
+ if (real_equal (, ))
+ std::swap (a0, a1);
+ /* According to real_equal (), +0 equals -0. */
+ if (real_equal (, ) && real_isinf ())
+ {
+ real_value res = dconst2;
+ res.sign = a0.sign ^ a1.sign;
+ new_stmt =
+gimple_build_assign (gimple_call_lhs (stmt),
+ REAL_CST,
+ build_real (t0, res));
+ }
+ else
+ new_stmt =
+ gimple_build_assign (gimple_call_lhs (stmt),
+ MULT_EXPR,
+ args[0], args[1]);
+ }
+ else /* a0_cst_p ^ a1_cst_p. */
+ {
+ real_value const_part = a0_cst_p
+ ? TREE_REAL_CST (args[0]) : TREE_REAL_CST (args[1]);
+ if (!real_equal (_part, )
+ && !real_isinf (_part))
+ new_stmt =
+ gimple_build_assign (gimple_call_lhs (stmt),
+ MULT_EXPR, args[0], args[1]);
+ }
+ }
+ if (new_stmt)
+ {
+ gimple_set_vuse (new_stmt, gimple_vuse (stmt));
+ gimple_set_vdef (new_stmt, gimple_vdef (stmt));
+ }
+ break;
+ }
default:
break;
}
diff --git a/gcc/testsuite/gcc.target/aarch64/simd/vmulx.x b/gcc/testsuite/gcc.target/aarch64/simd/vmulx.x
new file mode 100644
index ..8968a64a95cb40a466dd77fea4e9f9f63ad707dc
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/simd/vmulx.x
@@ -0,0 +1,46 @@
+#define PASS_ARRAY(...) {__VA_ARGS__}
+
+#define SETUP_TEST_CASE_VEC(I, INTRINSIC, BASE_TYPE, TYPE1, TYPE2, \
+ VALS1, VALS2, EXPS, LEN, FM, Q_LD, Q_ST, \
+ V1, V2) \
+ do \
+{ \
+ int i##I;\
+ BASE_TYPE vec##I##_1_data[] = VALS1; \
+ BASE_TYPE vec##I##_2_data[] = VALS2; \
+ V1 TYPE1 vec##I##_1 = vld1##Q_LD##_##FM (vec##I##_1_data); \
+ V2 TYPE2 vec##I##_2 = vld1##Q_LD##_##FM (vec##I##_2_data); \
+ TYPE1 actual##I##_v = INTRINSIC (vec##I##_1, vec##I##_2); \
+ volatile BASE_TYPE expected##I[] = EXPS;\
+ BASE_TYPE actual##I[LEN]; \
+ vst1##Q_ST##_##FM (actual##I, actual##I##_v);\
+ for (i##I = 0; i##I < LEN; ++i##I) \
+if (actual##I[i##I] != expected##I[i##I])\
+ abort ();\
+} \
+ while (0)\
+
+#define SETUP_TEST_CASE_SCALAR(I, INTRINSIC, TYPE, VAL1, VAL2, EXP) \
+ do \
+{ \
+ TYPE vec_##I##_1 = VAL1; \
+ TYPE vec_##I##_2 = VAL2; \
+ TYPE expected_##I = EXP; \
+ volatile TYPE actual_##I = INTRINSIC (vec_##I##_1, vec_##I##_2); \
+ if (actual_##I != expected_##I) \
+abort ();\
+} \
+ while (0)\
+
+/* Functions used to return values that won't be optimised away. */
+float32_t __attribute__ ((noinline))
+foo32 ()
+{
+ return 1.0;
+}
+
+float64_t __attribute__