Hi!
This patch adds ASSERT_EXPRs derived from
tmp_N = var_M & cst2;
if (tmp_N cmp val)
where cst2 and val are constants. Bootstrapped/regtested
on x86_64-linux and i686-linux, ok for trunk?
2012-03-14 Jakub Jelinek
PR tree-optimization/52267
* tree-vrp.c (masked_increment): New function.
(register_edge_assert_for_2): Derive ASSERT_EXPRs
from (X & CST1) cmp CST2 tests.
* gcc.dg/pr52267.c: New test.
* gcc.dg/tree-ssa/vrp65.c: New test.
* gcc.dg/tree-ssa/vrp66.c: New test.
--- gcc/tree-vrp.c.jj 2012-03-14 09:39:41.574262693 +0100
+++ gcc/tree-vrp.c 2012-03-14 13:22:49.468537249 +0100
@@ -4338,6 +4338,34 @@ extract_code_and_val_from_cond_with_ops
return true;
}
+/* Find out smallest RES where RES > VAL && (RES & MASK) == RES, if any
+ (otherwise return VAL). VAL and MASK must be zero-extended for
+ precision PREC. If SGNBIT is non-zero, first xor VAL with SGNBIT
+ (to transform signed values into unsigned) and at the end xor
+ SGNBIT back. */
+
+static double_int
+masked_increment (double_int val, double_int mask, double_int sgnbit,
+ unsigned int prec)
+{
+ double_int bit = double_int_one, res;
+ unsigned int i;
+
+ val = double_int_xor (val, sgnbit);
+ for (i = 0; i < prec; i++, bit = double_int_add (bit, bit))
+{
+ res = mask;
+ if (double_int_zero_p (double_int_and (res, bit)))
+ continue;
+ res = double_int_sub (bit, double_int_one);
+ res = double_int_and_not (double_int_add (val, bit), res);
+ res = double_int_and (res, mask);
+ if (double_int_ucmp (res, val) > 0)
+ return double_int_xor (res, sgnbit);
+}
+ return double_int_xor (val, sgnbit);
+}
+
/* Try to register an edge assertion for SSA name NAME on edge E for
the condition COND contributing to the conditional jump pointed to by BSI.
Invert the condition COND if INVERT is true.
@@ -4466,7 +4494,7 @@ register_edge_assert_for_2 (tree name, e
&& TREE_CODE (val) == INTEGER_CST)
{
gimple def_stmt = SSA_NAME_DEF_STMT (name);
- tree name2 = NULL_TREE, cst2 = NULL_TREE;
+ tree name2 = NULL_TREE, names[2], cst2 = NULL_TREE;
tree val2 = NULL_TREE;
double_int mask = double_int_zero;
unsigned int prec = TYPE_PRECISION (TREE_TYPE (val));
@@ -4591,6 +4619,248 @@ register_edge_assert_for_2 (tree name, e
retval = true;
}
}
+
+ /* Add asserts for NAME cmp CST and NAME being defined as
+NAME = NAME2 & CST2.
+
+Extract CST2 from the and. */
+ names[0] = NULL_TREE;
+ names[1] = NULL_TREE;
+ cst2 = NULL_TREE;
+ if (is_gimple_assign (def_stmt)
+ && gimple_assign_rhs_code (def_stmt) == BIT_AND_EXPR)
+ {
+ name2 = gimple_assign_rhs1 (def_stmt);
+ cst2 = gimple_assign_rhs2 (def_stmt);
+ if (TREE_CODE (name2) == SSA_NAME
+ && INTEGRAL_TYPE_P (TREE_TYPE (name2))
+ && TREE_CODE (cst2) == INTEGER_CST
+ && !integer_zerop (cst2)
+ && prec <= 2 * HOST_BITS_PER_WIDE_INT
+ && (prec > 1
+ || TYPE_UNSIGNED (TREE_TYPE (val
+ {
+ gimple def_stmt2 = SSA_NAME_DEF_STMT (name2);
+ if (gimple_assign_cast_p (def_stmt2))
+ {
+ names[1] = gimple_assign_rhs1 (def_stmt2);
+ if (!CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt2))
+ || !INTEGRAL_TYPE_P (TREE_TYPE (names[1]))
+ || (TYPE_PRECISION (TREE_TYPE (name2))
+ != TYPE_PRECISION (TREE_TYPE (names[1])))
+ || !live_on_edge (e, names[1])
+ || has_single_use (names[1]))
+ names[1] = NULL_TREE;
+ }
+ if (live_on_edge (e, name2)
+ && !has_single_use (name2))
+ names[0] = name2;
+ }
+ }
+ if (names[0] || names[1])
+ {
+ double_int minv, maxv = double_int_zero, valv, cst2v;
+ double_int tem, sgnbit;
+ bool valid_p = false, valn = false, cst2n = false;
+ enum tree_code ccode = comp_code;
+
+ valv = double_int_zext (tree_to_double_int (val), prec);
+ cst2v = double_int_zext (tree_to_double_int (cst2), prec);
+ if (!TYPE_UNSIGNED (TREE_TYPE (val)))
+ {
+ valn = double_int_negative_p (double_int_sext (valv, prec));
+ cst2n = double_int_negative_p (double_int_sext (cst2v, prec));
+ }
+ /* If CST2 doesn't have most significant bit set,
+but VAL is negative, we have comparison like
+if ((x & 0x123) > -4) (always true). Just give up. */
+ if (!cst2n && valn)
+ ccode = ERROR_MARK;
+ if (cst2n)
+ sgnbit = double_int_zext (double_int_lshift (double_int_one,
+