This fixes an endless recursion in folding that appears because we are inconsistent in how we treat X & CST canonicalization if we can omit some bits in CST. The following patch makes the two places that do this consistent.
Bootstrap and regtest running on x86_64-unknown-linux-gnu. Richard. 2014-04-30 Richard Biener <rguent...@suse.de> PR middle-end/61010 * fold-const.c (fold_binary_loc): Consistently avoid canonicalizing X & CST away from a CST that is the mask of a mode. * gcc.dg/torture/pr61010.c: New testcase. Index: gcc/fold-const.c =================================================================== *** gcc/fold-const.c (revision 209928) --- gcc/fold-const.c (working copy) *************** fold_binary_loc (location_t loc, *** 11426,11432 **** { double_int c1, c2, c3, msk; int width = TYPE_PRECISION (type), w; - bool try_simplify = true; c1 = tree_to_double_int (TREE_OPERAND (arg0, 1)); c2 = tree_to_double_int (arg1); --- 11426,11431 ---- *************** fold_binary_loc (location_t loc, *** 11463,11482 **** } } ! /* If X is a tree of the form (Y * K1) & K2, this might conflict ! with that optimization from the BIT_AND_EXPR optimizations. ! This could end up in an infinite recursion. */ ! if (TREE_CODE (TREE_OPERAND (arg0, 0)) == MULT_EXPR ! && TREE_CODE (TREE_OPERAND (TREE_OPERAND (arg0, 0), 1)) ! == INTEGER_CST) ! { ! tree t = TREE_OPERAND (TREE_OPERAND (arg0, 0), 1); ! double_int masked = mask_with_tz (type, c3, tree_to_double_int (t)); ! ! try_simplify = (masked != c1); ! } ! ! if (try_simplify && c3 != c1) return fold_build2_loc (loc, BIT_IOR_EXPR, type, fold_build2_loc (loc, BIT_AND_EXPR, type, TREE_OPERAND (arg0, 0), --- 11462,11468 ---- } } ! if (c3 != c1) return fold_build2_loc (loc, BIT_IOR_EXPR, type, fold_build2_loc (loc, BIT_AND_EXPR, type, TREE_OPERAND (arg0, 0), *************** fold_binary_loc (location_t loc, *** 11866,11881 **** && TREE_CODE (arg0) == MULT_EXPR && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST) { double_int masked ! = mask_with_tz (type, tree_to_double_int (arg1), tree_to_double_int (TREE_OPERAND (arg0, 1))); if (masked.is_zero ()) return omit_two_operands_loc (loc, type, build_zero_cst (type), arg0, arg1); ! else if (masked != tree_to_double_int (arg1)) ! return fold_build2_loc (loc, code, type, op0, ! double_int_to_tree (type, masked)); } /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M, --- 11852,11876 ---- && TREE_CODE (arg0) == MULT_EXPR && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST) { + double_int darg1 = tree_to_double_int (arg1); double_int masked ! = mask_with_tz (type, darg1, tree_to_double_int (TREE_OPERAND (arg0, 1))); if (masked.is_zero ()) return omit_two_operands_loc (loc, type, build_zero_cst (type), arg0, arg1); ! else if (masked != darg1) ! { ! /* Avoid the transform if arg1 is a mask of some ! mode which allows further optimizations. */ ! int pop = darg1.popcount (); ! if (!(pop >= BITS_PER_UNIT ! && exact_log2 (pop) != -1 ! && double_int::mask (pop) == darg1)) ! return fold_build2_loc (loc, code, type, op0, ! double_int_to_tree (type, masked)); ! } } /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M, Index: gcc/testsuite/gcc.dg/torture/pr61010.c =================================================================== *** gcc/testsuite/gcc.dg/torture/pr61010.c (revision 0) --- gcc/testsuite/gcc.dg/torture/pr61010.c (working copy) *************** *** 0 **** --- 1,8 ---- + /* { dg-do compile } */ + + int main (void) + { + int a = 0; + unsigned b = (a * 64 & 192) | 63U; + return 0; + }