On Dec 8, 2024, Richard Biener <[email protected]> wrote:
> Like below, gimple_convert_def_p ().
>> +static bool
>> +gimple_fold_follow_convert (tree t, tree op[1])
> Since it doesn't actually fold - can you name it
> static bool
> gimple_binop_def_p (...)
>> + if (tree_swap_operands_p (op0, op1))
>> + std::swap (op0, op1);
> All stmts are canonical, you shouldn't need to swap operands here.
>> + if (uniform_integer_cst_p (op1))
> ... put this in the caller so it could be made more universal eventually?
Incremental for upcoming v4
diff --git a/gcc/gimple-fold.cc b/gcc/gimple-fold.cc
index 126d6c5b849e9..65f1e7eb97fae 100644
--- a/gcc/gimple-fold.cc
+++ b/gcc/gimple-fold.cc
@@ -7443,7 +7443,7 @@ maybe_fold_comparisons_from_match_pd (tree type, enum
tree_code code,
conversion. */
static bool
-gimple_fold_follow_convert (tree t, tree op[1])
+gimple_convert_def_p (tree t, tree op[1])
{
if (TREE_CODE (t) == SSA_NAME
&& !SSA_NAME_IS_DEFAULT_DEF (t))
@@ -7466,23 +7466,16 @@ gimple_fold_follow_convert (tree t, tree op[1])
binary expression with code CODE. */
static bool
-gimple_fold_binop_cst (enum tree_code code, tree t, tree op[2])
+gimple_binop_def_p (enum tree_code code, tree t, tree op[2])
{
if (TREE_CODE (t) == SSA_NAME
&& !SSA_NAME_IS_DEFAULT_DEF (t))
if (gimple *def = dyn_cast <gassign *> (SSA_NAME_DEF_STMT (t)))
if (gimple_assign_rhs_code (def) == code)
{
- tree op0 = gimple_assign_rhs1 (def);
- tree op1 = gimple_assign_rhs2 (def);
- if (tree_swap_operands_p (op0, op1))
- std::swap (op0, op1);
- if (uniform_integer_cst_p (op1))
- {
- op[0] = op0;
- op[1] = op1;
- return true;
- }
+ op[0] = gimple_assign_rhs1 (def);
+ op[1] = gimple_assign_rhs2 (def);
+ return true;
}
return false;
}
@@ -7545,7 +7538,7 @@ decode_field_reference (tree *pexp, HOST_WIDE_INT
*pbitsize,
narrowing then widening casts, or vice-versa, for those that are not
essential for the compare have already been optimized out at this
point. */
- if (gimple_fold_follow_convert (exp, res_ops))
+ if (gimple_convert_def_p (exp, res_ops))
{
if (!outer_type)
{
@@ -7556,7 +7549,8 @@ decode_field_reference (tree *pexp, HOST_WIDE_INT
*pbitsize,
}
/* Recognize and save a masking operation. */
- if (pand_mask && gimple_fold_binop_cst (BIT_AND_EXPR, exp, res_ops))
+ if (pand_mask && gimple_binop_def_p (BIT_AND_EXPR, exp, res_ops)
+ && uniform_integer_cst_p (res_ops[1]))
{
loc[1] = gimple_location (SSA_NAME_DEF_STMT (exp));
exp = res_ops[0];
@@ -7564,7 +7558,8 @@ decode_field_reference (tree *pexp, HOST_WIDE_INT
*pbitsize,
}
/* Turn (a ^ b) [!]= 0 into a [!]= b. */
- if (xor_p && gimple_fold_binop_cst (BIT_XOR_EXPR, exp, res_ops))
+ if (xor_p && gimple_binop_def_p (BIT_XOR_EXPR, exp, res_ops)
+ && uniform_integer_cst_p (res_ops[1]))
{
/* No location recorded for this one, it's entirely subsumed by the
compare. */
@@ -7586,7 +7581,7 @@ decode_field_reference (tree *pexp, HOST_WIDE_INT
*pbitsize,
}
/* Another chance to drop conversions. */
- if (gimple_fold_follow_convert (exp, res_ops))
+ if (gimple_convert_def_p (exp, res_ops))
{
if (!outer_type)
{
@@ -7597,7 +7592,8 @@ decode_field_reference (tree *pexp, HOST_WIDE_INT
*pbitsize,
}
/* Take note of shifts. */
- if (gimple_fold_binop_cst (RSHIFT_EXPR, exp, res_ops))
+ if (gimple_binop_def_p (RSHIFT_EXPR, exp, res_ops)
+ && uniform_integer_cst_p (res_ops[1]))
{
loc[2] = gimple_location (SSA_NAME_DEF_STMT (exp));
exp = res_ops[0];
@@ -7609,7 +7605,7 @@ decode_field_reference (tree *pexp, HOST_WIDE_INT
*pbitsize,
}
/* Yet another chance to drop conversions. */
- if (gimple_fold_follow_convert (exp, res_ops))
+ if (gimple_convert_def_p (exp, res_ops))
{
if (!outer_type)
{
diff --git a/gcc/testsuite/gcc.target/aarch64/long_branch_1.c
b/gcc/testsuite/gcc.target/aarch64/long_branch_1.c
index 49d8b6a2278ad..0b04e36873388 100644
--- a/gcc/testsuite/gcc.target/aarch64/long_branch_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/long_branch_1.c
@@ -1,6 +1,6 @@
/* { dg-do assemble } */
/* { dg-timeout-factor 2.0 } */
-/* { dg-options "-O1 -fno-reorder-blocks -fno-tree-cselim --save-temps" } */
+/* { dg-options "-O1 -fno-reorder-blocks -fno-tree-cselim -fno-tree-ifcombine
--save-temps" } */
__attribute__((noinline, noclone)) int
--
Alexandre Oliva, happy hacker https://FSFLA.org/blogs/lxo/
Free Software Activist GNU Toolchain Engineer
More tolerance and less prejudice are key for inclusion and diversity
Excluding neuro-others for not behaving ""normal"" is *not* inclusive