Bah. The range was being clobbered half way through the calculation. Tested on x86-64 Linux.
Pushed. On Fri, Oct 1, 2021 at 4:52 PM Aldy Hernandez <al...@redhat.com> wrote: > > Well, after talking with Andrew it seems that X << Y being non-zero > also implies X is non-zero. So we don't even need relationals here. > > So, I leave gori relationals in his capable hands, while I test this > much simpler patch which fixes the PR with no additional > infrastructure ;-). > > Will push pending tests. > Aldy > > On Fri, Oct 1, 2021 at 2:43 PM Aldy Hernandez <al...@redhat.com> wrote: > > > > Knowing that X << X is non-zero means X is also non-zero. This patch > > teaches this this to range-ops. > > > > As usual, the big twiddling experts could come up with all sorts of > > fancy enhancements in this area, and we welcome all patches :). > > > > I will push this pending tests. > > > > gcc/ChangeLog: > > > > PR tree-optimization/102546 > > * range-op.cc (operator_lshift::op1_range): Handle EQ_EXPR > > relation. > > --- > > gcc/range-op.cc | 19 ++++++++++++++++--- > > gcc/testsuite/gcc.dg/tree-ssa/pr102546.c | 23 +++++++++++++++++++++++ > > 2 files changed, 39 insertions(+), 3 deletions(-) > > create mode 100644 gcc/testsuite/gcc.dg/tree-ssa/pr102546.c > > > > diff --git a/gcc/range-op.cc b/gcc/range-op.cc > > index 5e37133026d..53f3be4266e 100644 > > --- a/gcc/range-op.cc > > +++ b/gcc/range-op.cc > > @@ -2075,9 +2075,14 @@ operator_lshift::op1_range (irange &r, > > tree type, > > const irange &lhs, > > const irange &op2, > > - relation_kind rel ATTRIBUTE_UNUSED) const > > + relation_kind rel) const > > { > > tree shift_amount; > > + int_range<2> adjust (type); > > + > > + if (rel == EQ_EXPR && !lhs.contains_p (build_zero_cst (type))) > > + adjust.set_nonzero (type); > > + > > if (op2.singleton_p (&shift_amount)) > > { > > wide_int shift = wi::to_wide (shift_amount); > > @@ -2086,10 +2091,11 @@ operator_lshift::op1_range (irange &r, > > if (wi::ge_p (shift, wi::uhwi (TYPE_PRECISION (type), > > TYPE_PRECISION (op2.type ())), > > UNSIGNED)) > > - return false; > > + goto done; > > if (shift == 0) > > { > > r = lhs; > > + r.intersect (adjust); > > return true; > > } > > > > @@ -2126,9 +2132,16 @@ operator_lshift::op1_range (irange &r, > > > > if (utype != type) > > range_cast (r, type); > > + r.intersect (adjust); > > return true; > > } > > - return false; > > + > > + done: > > + if (adjust.varying_p ()) > > + return false; > > + > > + r = adjust; > > + return true; > > } > > > > bool > > diff --git a/gcc/testsuite/gcc.dg/tree-ssa/pr102546.c > > b/gcc/testsuite/gcc.dg/tree-ssa/pr102546.c > > new file mode 100644 > > index 00000000000..4bd98747732 > > --- /dev/null > > +++ b/gcc/testsuite/gcc.dg/tree-ssa/pr102546.c > > @@ -0,0 +1,23 @@ > > +// { dg-do compile } > > +// { dg-options "-O3 -fdump-tree-optimized" } > > + > > +static int a; > > +static char b, c, d; > > +void bar(void); > > +void foo(void); > > + > > +int main() { > > + int f = 0; > > + for (; f <= 5; f++) { > > + bar(); > > + b = b && f; > > + d = f << f; > > + if (!(a >= d || f)) > > + foo(); > > + c = 1; > > + for (; c; c = 0) > > + ; > > + } > > +} > > + > > +// { dg-final { scan-tree-dump-not "foo" "optimized" } } > > -- > > 2.31.1 > >
From 72739a6fde0020b98cdabad7218c6d4f5ce36bce Mon Sep 17 00:00:00 2001 From: Aldy Hernandez <al...@redhat.com> Date: Sat, 2 Oct 2021 16:59:26 +0200 Subject: [PATCH] [PR102563] Do not clobber range in operator_lshift::op1_range. We're clobbering the final range before we're done calculating it. Tested on x86-64 Linux. gcc/ChangeLog: * range-op.cc (operator_lshift::op1_range): Do not clobber range. gcc/testsuite/ChangeLog: * gcc.dg/tree-ssa/pr102563.c: New test. --- gcc/range-op.cc | 12 ++++++------ gcc/testsuite/gcc.dg/tree-ssa/pr102563.c | 16 ++++++++++++++++ 2 files changed, 22 insertions(+), 6 deletions(-) create mode 100644 gcc/testsuite/gcc.dg/tree-ssa/pr102563.c diff --git a/gcc/range-op.cc b/gcc/range-op.cc index 2baca4a197f..bbf2924f815 100644 --- a/gcc/range-op.cc +++ b/gcc/range-op.cc @@ -2112,8 +2112,6 @@ operator_lshift::op1_range (irange &r, else op_rshift.fold_range (tmp_range, utype, lhs, op2); - r.intersect (tmp_range); - // Start with ranges which can produce the LHS by right shifting the // result by the shift amount. // ie [0x08, 0xF0] = op1 << 2 will start with @@ -2128,13 +2126,15 @@ operator_lshift::op1_range (irange &r, unsigned low_bits = TYPE_PRECISION (utype) - TREE_INT_CST_LOW (shift_amount); wide_int up_mask = wi::mask (low_bits, true, TYPE_PRECISION (utype)); - wide_int new_ub = wi::bit_or (up_mask, r.upper_bound ()); - wide_int new_lb = wi::set_bit (r.lower_bound (), low_bits); + wide_int new_ub = wi::bit_or (up_mask, tmp_range.upper_bound ()); + wide_int new_lb = wi::set_bit (tmp_range.lower_bound (), low_bits); int_range<2> fill_range (utype, new_lb, new_ub); - r.union_ (fill_range); + tmp_range.union_ (fill_range); if (utype != type) - range_cast (r, type); + range_cast (tmp_range, type); + + r.intersect (tmp_range); return true; } diff --git a/gcc/testsuite/gcc.dg/tree-ssa/pr102563.c b/gcc/testsuite/gcc.dg/tree-ssa/pr102563.c new file mode 100644 index 00000000000..8871dffe24a --- /dev/null +++ b/gcc/testsuite/gcc.dg/tree-ssa/pr102563.c @@ -0,0 +1,16 @@ +// { dg-do compile } +// { dg-options "-O2 -w" } + +int _bdf_parse_glyphs_bp; +long _bdf_parse_glyphs_nibbles; + +void _bdf_parse_glyphs_p() +{ + long p_2; + + _bdf_parse_glyphs_nibbles = p_2 << 1; + + for (; 0 < _bdf_parse_glyphs_nibbles;) + if (1 < _bdf_parse_glyphs_nibbles) + _bdf_parse_glyphs_bp = _bdf_parse_glyphs_p; +} -- 2.31.1