[PATCH] A jump threading opportunity for condition branch

2019-05-21 Thread Jiufu Guo
Hi,

This patch implements a new opportunity of jump threading for PR77820.
In this optimization, conditional jumps are merged with unconditional jump.
And then moving CMP result to GPR is eliminated.

It looks like below:

  
  p0 = a CMP b
  goto ;

  
  p1 = c CMP d
  goto ;

  
  # phi = PHI 
  if (phi != 0) goto ; else goto ;

Could be transformed to:

  
  p0 = a CMP b
  if (p0 != 0) goto ; else goto ;

  
  p1 = c CMP d
  if (p1 != 0) goto ; else goto ;


This optimization eliminates:
1. saving CMP result: p0 = a CMP b.
2. additional CMP on branch: if (phi != 0).
3. converting CMP result if there is phi = (INT_CONV) p0 if there is.

Bootstrapped and tested on powerpc64le with no regressions(one case is improved)
and new testcases are added. Is this ok for trunk?

Thanks!
Jiufu Guo


[gcc]
2019-05-21  Jiufu Guo  
Lijia He  

PR tree-optimization/77820
* tree-ssa-threadedge.c (cmp_from_unconditional_block): New function.
* tree-ssa-threadedge.c (is_trivial_join_block): New function.
* tree-ssa-threadedge.c (thread_across_edge): Call 
is_trivial_join_block.

[gcc/testsuite]
2019-05-21  Jiufu Guo  
Lijia He  

PR tree-optimization/77820
* gcc.dg/tree-ssa/phi_on_compare-1.c: New testcase.
* gcc.dg/tree-ssa/phi_on_compare-2.c: New testcase.
* gcc.dg/tree-ssa/phi_on_compare-3.c: New testcase.
* gcc.dg/tree-ssa/phi_on_compare-4.c: New testcase.
* gcc.dg/tree-ssa/split-path-6.c: Update testcase.

---
 gcc/testsuite/gcc.dg/tree-ssa/phi_on_compare-1.c | 32 +
 gcc/testsuite/gcc.dg/tree-ssa/phi_on_compare-2.c | 27 +++
 gcc/testsuite/gcc.dg/tree-ssa/phi_on_compare-3.c | 31 
 gcc/testsuite/gcc.dg/tree-ssa/phi_on_compare-4.c | 40 +++
 gcc/testsuite/gcc.dg/tree-ssa/split-path-6.c |  2 +-
 gcc/tree-ssa-threadedge.c| 91 +++-
 6 files changed, 219 insertions(+), 4 deletions(-)
 create mode 100644 gcc/testsuite/gcc.dg/tree-ssa/phi_on_compare-1.c
 create mode 100644 gcc/testsuite/gcc.dg/tree-ssa/phi_on_compare-2.c
 create mode 100644 gcc/testsuite/gcc.dg/tree-ssa/phi_on_compare-3.c
 create mode 100644 gcc/testsuite/gcc.dg/tree-ssa/phi_on_compare-4.c

diff --git a/gcc/testsuite/gcc.dg/tree-ssa/phi_on_compare-1.c 
b/gcc/testsuite/gcc.dg/tree-ssa/phi_on_compare-1.c
new file mode 100644
index 000..ad4890a
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/tree-ssa/phi_on_compare-1.c
@@ -0,0 +1,32 @@
+/* { dg-do compile } */
+/* { dg-options "-Ofast -fdump-tree-vrp1" } */
+
+void g (int);
+void g1 (int);
+
+void
+f (long a, long b, long c, long d, long x)
+{
+  _Bool t;
+  if (x)
+{
+  g (a + 1);
+  t = a < b;
+  c = d + x;
+}
+  else
+{
+  g (b + 1);
+  a = c + d;
+  t = c > d;
+}
+
+  if (t)
+{
+  g1 (c);
+}
+
+  g (a);
+}
+
+/* { dg-final { scan-tree-dump-times "Removing basic block" 1 "vrp1" } } */
diff --git a/gcc/testsuite/gcc.dg/tree-ssa/phi_on_compare-2.c 
b/gcc/testsuite/gcc.dg/tree-ssa/phi_on_compare-2.c
new file mode 100644
index 000..ca67d65
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/tree-ssa/phi_on_compare-2.c
@@ -0,0 +1,27 @@
+/* { dg-do compile } */
+/* { dg-options "-Ofast -fdump-tree-vrp1" } */
+
+void g (void);
+void g1 (void);
+
+void
+f (long a, long b, long c, long d, int x)
+{
+  _Bool t;
+  if (x)
+{
+  t = c < d;
+}
+  else
+{
+  t = a < b;
+}
+
+  if (t)
+{
+  g1 ();
+  g ();
+}
+}
+
+/* { dg-final { scan-tree-dump-times "Removing basic block" 1 "vrp1" } } */
diff --git a/gcc/testsuite/gcc.dg/tree-ssa/phi_on_compare-3.c 
b/gcc/testsuite/gcc.dg/tree-ssa/phi_on_compare-3.c
new file mode 100644
index 000..a126e97
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/tree-ssa/phi_on_compare-3.c
@@ -0,0 +1,31 @@
+/* { dg-do compile } */
+/* { dg-options "-Ofast -fdump-tree-vrp1" } */
+
+void g (void);
+void g1 (void);
+
+void
+f (long a, long b, long c, long d, int x)
+{
+  int t;
+  if (x)
+{
+  t = a < b;
+}
+  else if (d == x)
+{
+  t = c < b;
+}
+  else
+{
+  t = d > c;
+}
+
+  if (t)
+{
+  g1 ();
+  g ();
+}
+}
+
+/* { dg-final { scan-tree-dump-times "Removing basic block" 1 "vrp1" } } */
diff --git a/gcc/testsuite/gcc.dg/tree-ssa/phi_on_compare-4.c 
b/gcc/testsuite/gcc.dg/tree-ssa/phi_on_compare-4.c
new file mode 100644
index 000..5a50c2d
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/tree-ssa/phi_on_compare-4.c
@@ -0,0 +1,40 @@
+/* { dg-do compile } */
+/* { dg-options "-Ofast -fdump-tree-vrp1" } */
+
+void g (int);
+void g1 (int);
+
+void
+f (long a, long b, long c, long d, int x)
+{
+  int t;
+  _Bool l1 = 0, l2 = 0;
+  if (x)
+{
+  g (a);
+  c = a + b;
+  t = a < b;
+  l1 = 1;
+}
+  else
+{
+  g1 (b);
+  t = c > d;
+  d = c + b;
+  l2 = 1;
+}
+
+  if (t)
+{
+  if (l1 | l2)
+  g1 (c);
+}
+  else
+{
+  g (d);
+  g1 (a + 

Re: [PATCH] A jump threading opportunity for condition branch

2019-05-23 Thread Jiufu Guo
Hi,

Richard Biener  writes:

> On Tue, 21 May 2019, Jiufu Guo wrote:
>
>> Hi,
>> 
>> This patch implements a new opportunity of jump threading for PR77820.
>> In this optimization, conditional jumps are merged with unconditional jump.
>> And then moving CMP result to GPR is eliminated.
>> 
>> It looks like below:
>> 
>>   
>>   p0 = a CMP b
>>   goto ;
>> 
>>   
>>   p1 = c CMP d
>>   goto ;
>> 
>>   
>>   # phi = PHI 
>>   if (phi != 0) goto ; else goto ;
>> 
>> Could be transformed to:
>> 
>>   
>>   p0 = a CMP b
>>   if (p0 != 0) goto ; else goto ;
>> 
>>   
>>   p1 = c CMP d
>>   if (p1 != 0) goto ; else goto ;
>> 
>> 
>> This optimization eliminates:
>> 1. saving CMP result: p0 = a CMP b.
>> 2. additional CMP on branch: if (phi != 0).
>> 3. converting CMP result if there is phi = (INT_CONV) p0 if there is.
>> 
>> Bootstrapped and tested on powerpc64le with no regressions(one case is 
>> improved)
>> and new testcases are added. Is this ok for trunk?
>> 
>> Thanks!
>> Jiufu Guo
>> 
...
>> diff --git a/gcc/tree-ssa-threadedge.c b/gcc/tree-ssa-threadedge.c
>> index c3ea2d6..23000f6 100644
>> --- a/gcc/tree-ssa-threadedge.c
>> +++ b/gcc/tree-ssa-threadedge.c
>> @@ -1157,6 +1157,90 @@ thread_through_normal_block (edge e,
>>return 0;
>>  }
>>  
>> +/* Return true if PHI's INDEX-th incoming value is a CMP, and the CMP is
>> +   defined in the incoming basic block. Otherwise return false.  */
>> +static bool
>> +cmp_from_unconditional_block (gphi *phi, int index)
>> +{
>> +  tree value = gimple_phi_arg_def (phi, index);
>> +  if (!(TREE_CODE (value) == SSA_NAME && has_single_use (value)))
>> +return false;
>
> Not sure why we should reject a constant here but I guess we
> expect it to find a simplified condition anyways ;)
>
Const could be accepted here, like "# t_9 = PHI <5(3), t_17(4)>". I
found this case is already handled by other jump-threading code, like
'ethread' pass.

>> +
>> +  gimple *def = SSA_NAME_DEF_STMT (value);
>> +
>> +  if (!is_gimple_assign (def))
>> +return false;
>> +
>> +  if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def)))
>> +{
>> +  value = gimple_assign_rhs1 (def);
>> +  if (!(TREE_CODE (value) == SSA_NAME && has_single_use (value)))
>> +return false;
>> +
>> +  def = SSA_NAME_DEF_STMT (value);
>> +
>> +  if (!is_gimple_assign (def))
>> +return false;
>
> too much vertial space.
>
Thanks, I will refine it. 
>> +}
>> +
>> +  if (TREE_CODE_CLASS (gimple_assign_rhs_code (def)) != tcc_comparison)
>> +return false;
>> +
>> +  /* Check if phi's incoming value is defined in the incoming basic_block.  
>> */
>> +  edge e = gimple_phi_arg_edge (phi, index);
>> +  if (def->bb != e->src)
>> +return false;
>
> why does this matter?
>
Through preparing pathes and duplicating block, this transform can also
help to combine a cmp in previous block and a gcond in current block.
"if (def->bb != e->src)" make sure the cmp is define in the incoming
block of the current; and then combining "cmp with gcond" is safe.  If
the cmp is defined far from the incoming block, it would be hard to
achieve the combining, and the transform may not needed.

>> +
>> +  if (!single_succ_p (def->bb))
>> +return false;
>
> Or this?  The actual threading will ensure this will hold true.
>
Yes, other thread code check this and ensure it to be true, like
function thread_through_normal_block. Since this new function is invoked
outside thread_through_normal_block, so, checking single_succ_p is also
needed for this case.

>> +  return true;
>> +}
>> +
>> +/* There are basic blocks look like:
>> +  
>> +  p0 = a CMP b ; or p0 = (INT)( a CMP b)
>> +  goto ;
>> +
>> +  
>> +  p1 = c CMP d
>> +  goto ;
>> +
>> +  
>> +  # phi = PHI 
>> +  if (phi != 0) goto ; else goto ;
>> +
>> +  Then, : a trivial join block.
>> +
>> + Check if BB is  in like above.  */
>> +
>> +bool
>> +is_trivial_join_block (basic_block bb)
>
> I'd make this work on a specific edge.
>
> edge_forwards_conditional_to_conditional_jump_through_empty_bb_p (edge e)
> {
>   basic_block b = e->dest;
>
> maybe too elaborate name ;)
>
Thanks for help to name the function!  It is very valuable for me ;)
>> +{
>> +  gimple *gs = last_and_only_stmt (bb);
>> +  if (gs == NULL)
>> +return false;
>> +
>> +  if (gimple_code (gs) != GIMPLE_COND)
>> +return false;
>> +
>> +  tree cond = gimple_cond_lhs (gs);
>> +
>> +  if (TREE_CODE (cond) != SSA_NAME)
>> +return false;
>
> space after if( too much vertical space in this function
> for my taste btw.
Will update this.
>
> For the forwarding to work we want a NE_EXPR or EQ_EXPR
> as gimple_cond_code and integer_one_p or integer_zero_p
> gimple_cond_rhs.
Right, checking those would be more safe.  Since no issue found, during
bootstrap and regression tests, so I did not add these checking.  I will
add this checking.
>
>> +
>> +  if (gimple_code (SSA_NAME_DEF_STMT (cond)) != GIMPLE_PHI)
>> +return false;
>> +
>> +  gphi *phi = as_a (SSA_NAME_DEF_STMT (cond));

Re: [PATCH] A jump threading opportunity for condition branch

2019-05-23 Thread Richard Biener
On Thu, 23 May 2019, Jiufu Guo wrote:

> Hi,
> 
> Richard Biener  writes:
> 
> > On Tue, 21 May 2019, Jiufu Guo wrote:
> >
> >> Hi,
> >> 
> >> This patch implements a new opportunity of jump threading for PR77820.
> >> In this optimization, conditional jumps are merged with unconditional jump.
> >> And then moving CMP result to GPR is eliminated.
> >> 
> >> It looks like below:
> >> 
> >>   
> >>   p0 = a CMP b
> >>   goto ;
> >> 
> >>   
> >>   p1 = c CMP d
> >>   goto ;
> >> 
> >>   
> >>   # phi = PHI 
> >>   if (phi != 0) goto ; else goto ;
> >> 
> >> Could be transformed to:
> >> 
> >>   
> >>   p0 = a CMP b
> >>   if (p0 != 0) goto ; else goto ;
> >> 
> >>   
> >>   p1 = c CMP d
> >>   if (p1 != 0) goto ; else goto ;
> >> 
> >> 
> >> This optimization eliminates:
> >> 1. saving CMP result: p0 = a CMP b.
> >> 2. additional CMP on branch: if (phi != 0).
> >> 3. converting CMP result if there is phi = (INT_CONV) p0 if there is.
> >> 
> >> Bootstrapped and tested on powerpc64le with no regressions(one case is 
> >> improved)
> >> and new testcases are added. Is this ok for trunk?
> >> 
> >> Thanks!
> >> Jiufu Guo
> >> 
> ...
> >> diff --git a/gcc/tree-ssa-threadedge.c b/gcc/tree-ssa-threadedge.c
> >> index c3ea2d6..23000f6 100644
> >> --- a/gcc/tree-ssa-threadedge.c
> >> +++ b/gcc/tree-ssa-threadedge.c
> >> @@ -1157,6 +1157,90 @@ thread_through_normal_block (edge e,
> >>return 0;
> >>  }
> >>  
> >> +/* Return true if PHI's INDEX-th incoming value is a CMP, and the CMP is
> >> +   defined in the incoming basic block. Otherwise return false.  */
> >> +static bool
> >> +cmp_from_unconditional_block (gphi *phi, int index)
> >> +{
> >> +  tree value = gimple_phi_arg_def (phi, index);
> >> +  if (!(TREE_CODE (value) == SSA_NAME && has_single_use (value)))
> >> +return false;
> >
> > Not sure why we should reject a constant here but I guess we
> > expect it to find a simplified condition anyways ;)
> >
> Const could be accepted here, like "# t_9 = PHI <5(3), t_17(4)>". I
> found this case is already handled by other jump-threading code, like
> 'ethread' pass.
> 
> >> +
> >> +  gimple *def = SSA_NAME_DEF_STMT (value);
> >> +
> >> +  if (!is_gimple_assign (def))
> >> +return false;
> >> +
> >> +  if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def)))
> >> +{
> >> +  value = gimple_assign_rhs1 (def);
> >> +  if (!(TREE_CODE (value) == SSA_NAME && has_single_use (value)))
> >> +  return false;
> >> +
> >> +  def = SSA_NAME_DEF_STMT (value);
> >> +
> >> +  if (!is_gimple_assign (def))
> >> +  return false;
> >
> > too much vertial space.
> >
> Thanks, I will refine it. 
> >> +}
> >> +
> >> +  if (TREE_CODE_CLASS (gimple_assign_rhs_code (def)) != tcc_comparison)
> >> +return false;
> >> +
> >> +  /* Check if phi's incoming value is defined in the incoming 
> >> basic_block.  */
> >> +  edge e = gimple_phi_arg_edge (phi, index);
> >> +  if (def->bb != e->src)
> >> +return false;
> >
> > why does this matter?
> >
> Through preparing pathes and duplicating block, this transform can also
> help to combine a cmp in previous block and a gcond in current block.
> "if (def->bb != e->src)" make sure the cmp is define in the incoming
> block of the current; and then combining "cmp with gcond" is safe.  If
> the cmp is defined far from the incoming block, it would be hard to
> achieve the combining, and the transform may not needed.

We're in SSA form so the "combining" doesn't really care where the
definition comes from.

> >> +
> >> +  if (!single_succ_p (def->bb))
> >> +return false;
> >
> > Or this?  The actual threading will ensure this will hold true.
> >
> Yes, other thread code check this and ensure it to be true, like
> function thread_through_normal_block. Since this new function is invoked
> outside thread_through_normal_block, so, checking single_succ_p is also
> needed for this case.

I mean threading will isolate the path making this trivially true.
It's also no requirement for combining, in fact due to the single-use
check the definition can be sinked across the edge already (if
the edges dest didn't have multiple predecessors which this threading
will fix as well).

> >> +  return true;
> >> +}
> >> +
> >> +/* There are basic blocks look like:
> >> +  
> >> +  p0 = a CMP b ; or p0 = (INT)( a CMP b)
> >> +  goto ;
> >> +
> >> +  
> >> +  p1 = c CMP d
> >> +  goto ;
> >> +
> >> +  
> >> +  # phi = PHI 
> >> +  if (phi != 0) goto ; else goto ;
> >> +
> >> +  Then, : a trivial join block.
> >> +
> >> + Check if BB is  in like above.  */
> >> +
> >> +bool
> >> +is_trivial_join_block (basic_block bb)
> >
> > I'd make this work on a specific edge.
> >
> > edge_forwards_conditional_to_conditional_jump_through_empty_bb_p (edge e)
> > {
> >   basic_block b = e->dest;
> >
> > maybe too elaborate name ;)
> >
> Thanks for help to name the function!  It is very valuable for me ;)
> >> +{
> >> +  gimple *gs = last_and_only_stmt (bb);
> >> +  if (gs == NULL)
> >> +return fal

Re: [PATCH] A jump threading opportunity for condition branch

2019-05-23 Thread Jiufu Guo
Richard Biener  writes:

> On Thu, 23 May 2019, Jiufu Guo wrote:
>
>> Hi,
>> 
>> Richard Biener  writes:
>> 
>> > On Tue, 21 May 2019, Jiufu Guo wrote:
>> >
>> >> Hi,
>> >> 
>> >> This patch implements a new opportunity of jump threading for PR77820.
>> >> In this optimization, conditional jumps are merged with unconditional 
>> >> jump.
>> >> And then moving CMP result to GPR is eliminated.
>> >> 
>> >> It looks like below:
>> >> 
>> >>   
>> >>   p0 = a CMP b
>> >>   goto ;
>> >> 
>> >>   
>> >>   p1 = c CMP d
>> >>   goto ;
>> >> 
>> >>   
>> >>   # phi = PHI 
>> >>   if (phi != 0) goto ; else goto ;
>> >> 
>> >> Could be transformed to:
>> >> 
>> >>   
>> >>   p0 = a CMP b
>> >>   if (p0 != 0) goto ; else goto ;
>> >> 
>> >>   
>> >>   p1 = c CMP d
>> >>   if (p1 != 0) goto ; else goto ;
>> >> 
>> >> 
>> >> This optimization eliminates:
>> >> 1. saving CMP result: p0 = a CMP b.
>> >> 2. additional CMP on branch: if (phi != 0).
>> >> 3. converting CMP result if there is phi = (INT_CONV) p0 if there is.
>> >> 
>> >> Bootstrapped and tested on powerpc64le with no regressions(one case is 
>> >> improved)
>> >> and new testcases are added. Is this ok for trunk?
>> >> 
>> >> Thanks!
>> >> Jiufu Guo
>> >> 
>> ...
>> >> diff --git a/gcc/tree-ssa-threadedge.c b/gcc/tree-ssa-threadedge.c
>> >> index c3ea2d6..23000f6 100644
>> >> --- a/gcc/tree-ssa-threadedge.c
>> >> +++ b/gcc/tree-ssa-threadedge.c
>> >> @@ -1157,6 +1157,90 @@ thread_through_normal_block (edge e,
>> >>return 0;
>> >>  }
>> >>  
>> >> +/* Return true if PHI's INDEX-th incoming value is a CMP, and the CMP is
>> >> +   defined in the incoming basic block. Otherwise return false.  */
>> >> +static bool
>> >> +cmp_from_unconditional_block (gphi *phi, int index)
>> >> +{
>> >> +  tree value = gimple_phi_arg_def (phi, index);
>> >> +  if (!(TREE_CODE (value) == SSA_NAME && has_single_use (value)))
>> >> +return false;
>> >
>> > Not sure why we should reject a constant here but I guess we
>> > expect it to find a simplified condition anyways ;)
>> >
>> Const could be accepted here, like "# t_9 = PHI <5(3), t_17(4)>". I
>> found this case is already handled by other jump-threading code, like
>> 'ethread' pass.
>> 
>> >> +
>> >> +  gimple *def = SSA_NAME_DEF_STMT (value);
>> >> +
>> >> +  if (!is_gimple_assign (def))
>> >> +return false;
>> >> +
>> >> +  if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def)))
>> >> +{
>> >> +  value = gimple_assign_rhs1 (def);
>> >> +  if (!(TREE_CODE (value) == SSA_NAME && has_single_use (value)))
>> >> + return false;
>> >> +
>> >> +  def = SSA_NAME_DEF_STMT (value);
>> >> +
>> >> +  if (!is_gimple_assign (def))
>> >> + return false;
>> >
>> > too much vertial space.
>> >
>> Thanks, I will refine it. 
>> >> +}
>> >> +
>> >> +  if (TREE_CODE_CLASS (gimple_assign_rhs_code (def)) != tcc_comparison)
>> >> +return false;
>> >> +
>> >> +  /* Check if phi's incoming value is defined in the incoming 
>> >> basic_block.  */
>> >> +  edge e = gimple_phi_arg_edge (phi, index);
>> >> +  if (def->bb != e->src)
>> >> +return false;
>> >
>> > why does this matter?
>> >
>> Through preparing pathes and duplicating block, this transform can also
>> help to combine a cmp in previous block and a gcond in current block.
>> "if (def->bb != e->src)" make sure the cmp is define in the incoming
>> block of the current; and then combining "cmp with gcond" is safe.  If
>> the cmp is defined far from the incoming block, it would be hard to
>> achieve the combining, and the transform may not needed.
>
> We're in SSA form so the "combining" doesn't really care where the
> definition comes from.
>
>> >> +
>> >> +  if (!single_succ_p (def->bb))
>> >> +return false;
>> >
>> > Or this?  The actual threading will ensure this will hold true.
>> >
>> Yes, other thread code check this and ensure it to be true, like
>> function thread_through_normal_block. Since this new function is invoked
>> outside thread_through_normal_block, so, checking single_succ_p is also
>> needed for this case.
>
> I mean threading will isolate the path making this trivially true.
> It's also no requirement for combining, in fact due to the single-use
> check the definition can be sinked across the edge already (if
> the edges dest didn't have multiple predecessors which this threading
> will fix as well).
>
I would relax these check and have a test.

And I refactor the code a little as below. Thanks for any comments!

bool
edge_forwards_cmp_to_conditional_jump_through_empty_bb_p (edge e)
{
  basic_block bb = e->dest;

  /* See if there is only one stmt which is gcond.  */
  gimple *gs = last_and_only_stmt (bb);
  if (gs == NULL || gimple_code (gs) != GIMPLE_COND)
return false;

  /* See if gcond's condition is "(phi !=/== 0/1)".  */
  tree cond = gimple_cond_lhs (gs);
  if (TREE_CODE (cond) != SSA_NAME
  || gimple_code (SSA_NAME_DEF_STMT (cond)) != GIMPLE_PHI
  || gimple_bb (SSA_NAME_DEF_STMT (cond)) != bb)
return fal

Re: [PATCH] A jump threading opportunity for condition branch

2019-05-24 Thread Richard Biener
On Thu, 23 May 2019, Jiufu Guo wrote:

> Richard Biener  writes:
> 
> > On Thu, 23 May 2019, Jiufu Guo wrote:
> >
> >> Hi,
> >> 
> >> Richard Biener  writes:
> >> 
> >> > On Tue, 21 May 2019, Jiufu Guo wrote:
> >> >
> >> >> Hi,
> >> >> 
> >> >> This patch implements a new opportunity of jump threading for PR77820.
> >> >> In this optimization, conditional jumps are merged with unconditional 
> >> >> jump.
> >> >> And then moving CMP result to GPR is eliminated.
> >> >> 
> >> >> It looks like below:
> >> >> 
> >> >>   
> >> >>   p0 = a CMP b
> >> >>   goto ;
> >> >> 
> >> >>   
> >> >>   p1 = c CMP d
> >> >>   goto ;
> >> >> 
> >> >>   
> >> >>   # phi = PHI 
> >> >>   if (phi != 0) goto ; else goto ;
> >> >> 
> >> >> Could be transformed to:
> >> >> 
> >> >>   
> >> >>   p0 = a CMP b
> >> >>   if (p0 != 0) goto ; else goto ;
> >> >> 
> >> >>   
> >> >>   p1 = c CMP d
> >> >>   if (p1 != 0) goto ; else goto ;
> >> >> 
> >> >> 
> >> >> This optimization eliminates:
> >> >> 1. saving CMP result: p0 = a CMP b.
> >> >> 2. additional CMP on branch: if (phi != 0).
> >> >> 3. converting CMP result if there is phi = (INT_CONV) p0 if there is.
> >> >> 
> >> >> Bootstrapped and tested on powerpc64le with no regressions(one case is 
> >> >> improved)
> >> >> and new testcases are added. Is this ok for trunk?
> >> >> 
> >> >> Thanks!
> >> >> Jiufu Guo
> >> >> 
> >> ...
> >> >> diff --git a/gcc/tree-ssa-threadedge.c b/gcc/tree-ssa-threadedge.c
> >> >> index c3ea2d6..23000f6 100644
> >> >> --- a/gcc/tree-ssa-threadedge.c
> >> >> +++ b/gcc/tree-ssa-threadedge.c
> >> >> @@ -1157,6 +1157,90 @@ thread_through_normal_block (edge e,
> >> >>return 0;
> >> >>  }
> >> >>  
> >> >> +/* Return true if PHI's INDEX-th incoming value is a CMP, and the CMP 
> >> >> is
> >> >> +   defined in the incoming basic block. Otherwise return false.  */
> >> >> +static bool
> >> >> +cmp_from_unconditional_block (gphi *phi, int index)
> >> >> +{
> >> >> +  tree value = gimple_phi_arg_def (phi, index);
> >> >> +  if (!(TREE_CODE (value) == SSA_NAME && has_single_use (value)))
> >> >> +return false;
> >> >
> >> > Not sure why we should reject a constant here but I guess we
> >> > expect it to find a simplified condition anyways ;)
> >> >
> >> Const could be accepted here, like "# t_9 = PHI <5(3), t_17(4)>". I
> >> found this case is already handled by other jump-threading code, like
> >> 'ethread' pass.
> >> 
> >> >> +
> >> >> +  gimple *def = SSA_NAME_DEF_STMT (value);
> >> >> +
> >> >> +  if (!is_gimple_assign (def))
> >> >> +return false;
> >> >> +
> >> >> +  if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def)))
> >> >> +{
> >> >> +  value = gimple_assign_rhs1 (def);
> >> >> +  if (!(TREE_CODE (value) == SSA_NAME && has_single_use (value)))
> >> >> +   return false;
> >> >> +
> >> >> +  def = SSA_NAME_DEF_STMT (value);
> >> >> +
> >> >> +  if (!is_gimple_assign (def))
> >> >> +   return false;
> >> >
> >> > too much vertial space.
> >> >
> >> Thanks, I will refine it. 
> >> >> +}
> >> >> +
> >> >> +  if (TREE_CODE_CLASS (gimple_assign_rhs_code (def)) != tcc_comparison)
> >> >> +return false;
> >> >> +
> >> >> +  /* Check if phi's incoming value is defined in the incoming 
> >> >> basic_block.  */
> >> >> +  edge e = gimple_phi_arg_edge (phi, index);
> >> >> +  if (def->bb != e->src)
> >> >> +return false;
> >> >
> >> > why does this matter?
> >> >
> >> Through preparing pathes and duplicating block, this transform can also
> >> help to combine a cmp in previous block and a gcond in current block.
> >> "if (def->bb != e->src)" make sure the cmp is define in the incoming
> >> block of the current; and then combining "cmp with gcond" is safe.  If
> >> the cmp is defined far from the incoming block, it would be hard to
> >> achieve the combining, and the transform may not needed.
> >
> > We're in SSA form so the "combining" doesn't really care where the
> > definition comes from.
> >
> >> >> +
> >> >> +  if (!single_succ_p (def->bb))
> >> >> +return false;
> >> >
> >> > Or this?  The actual threading will ensure this will hold true.
> >> >
> >> Yes, other thread code check this and ensure it to be true, like
> >> function thread_through_normal_block. Since this new function is invoked
> >> outside thread_through_normal_block, so, checking single_succ_p is also
> >> needed for this case.
> >
> > I mean threading will isolate the path making this trivially true.
> > It's also no requirement for combining, in fact due to the single-use
> > check the definition can be sinked across the edge already (if
> > the edges dest didn't have multiple predecessors which this threading
> > will fix as well).
> >
> I would relax these check and have a test.
> 
> And I refactor the code a little as below. Thanks for any comments!
> 
> bool
> edge_forwards_cmp_to_conditional_jump_through_empty_bb_p (edge e)
> {
>   basic_block bb = e->dest;
> 
>   /* See if there is only one stmt which is gcond.  */
>   gimple

Re: [PATCH] A jump threading opportunity for condition branch

2019-05-24 Thread Jiufu Guo
Richard Biener  writes:

> On Thu, 23 May 2019, Jiufu Guo wrote:
>
>> Richard Biener  writes:
>> 
>> > On Thu, 23 May 2019, Jiufu Guo wrote:
>> >
>> >> Hi,
>> >> 
>> >> Richard Biener  writes:
>> >> 
>> >> > On Tue, 21 May 2019, Jiufu Guo wrote:
>> >> >
>> >> >> Hi,
>> >> >> 
>> >> >> This patch implements a new opportunity of jump threading for PR77820.
>> >> >> In this optimization, conditional jumps are merged with unconditional 
>> >> >> jump.
>> >> >> And then moving CMP result to GPR is eliminated.
>> >> >> 
>> >> >> It looks like below:
>> >> >> 
>> >> >>   
>> >> >>   p0 = a CMP b
>> >> >>   goto ;
>> >> >> 
>> >> >>   
>> >> >>   p1 = c CMP d
>> >> >>   goto ;
>> >> >> 
>> >> >>   
>> >> >>   # phi = PHI 
>> >> >>   if (phi != 0) goto ; else goto ;
>> >> >> 
>> >> >> Could be transformed to:
>> >> >> 
>> >> >>   
>> >> >>   p0 = a CMP b
>> >> >>   if (p0 != 0) goto ; else goto ;
>> >> >> 
>> >> >>   
>> >> >>   p1 = c CMP d
>> >> >>   if (p1 != 0) goto ; else goto ;
>> >> >> 
>> >> >> 
>> >> >> This optimization eliminates:
>> >> >> 1. saving CMP result: p0 = a CMP b.
>> >> >> 2. additional CMP on branch: if (phi != 0).
>> >> >> 3. converting CMP result if there is phi = (INT_CONV) p0 if there is.
>> >> >> 
>> >> >> Bootstrapped and tested on powerpc64le with no regressions(one case is 
>> >> >> improved)
>> >> >> and new testcases are added. Is this ok for trunk?
>> >> >> 
>> >> >> Thanks!
>> >> >> Jiufu Guo
>> >> >> 
>> >> ...
>> >> >> diff --git a/gcc/tree-ssa-threadedge.c b/gcc/tree-ssa-threadedge.c
>> >> >> index c3ea2d6..23000f6 100644
>> >> >> --- a/gcc/tree-ssa-threadedge.c
>> >> >> +++ b/gcc/tree-ssa-threadedge.c
>> >> >> @@ -1157,6 +1157,90 @@ thread_through_normal_block (edge e,
>> >> >>return 0;
>> >> >>  }
>> >> >>  
>> >> >> +/* Return true if PHI's INDEX-th incoming value is a CMP, and the CMP 
>> >> >> is
>> >> >> +   defined in the incoming basic block. Otherwise return false.  */
>> >> >> +static bool
>> >> >> +cmp_from_unconditional_block (gphi *phi, int index)
>> >> >> +{
>> >> >> +  tree value = gimple_phi_arg_def (phi, index);
>> >> >> +  if (!(TREE_CODE (value) == SSA_NAME && has_single_use (value)))
>> >> >> +return false;
>> >> >
>> >> > Not sure why we should reject a constant here but I guess we
>> >> > expect it to find a simplified condition anyways ;)
>> >> >
>> >> Const could be accepted here, like "# t_9 = PHI <5(3), t_17(4)>". I
>> >> found this case is already handled by other jump-threading code, like
>> >> 'ethread' pass.
>> >> 
>> >> >> +
>> >> >> +  gimple *def = SSA_NAME_DEF_STMT (value);
>> >> >> +
>> >> >> +  if (!is_gimple_assign (def))
>> >> >> +return false;
>> >> >> +
>> >> >> +  if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def)))
>> >> >> +{
>> >> >> +  value = gimple_assign_rhs1 (def);
>> >> >> +  if (!(TREE_CODE (value) == SSA_NAME && has_single_use (value)))
>> >> >> +  return false;
>> >> >> +
>> >> >> +  def = SSA_NAME_DEF_STMT (value);
>> >> >> +
>> >> >> +  if (!is_gimple_assign (def))
>> >> >> +  return false;
>> >> >
>> >> > too much vertial space.
>> >> >
>> >> Thanks, I will refine it. 
>> >> >> +}
>> >> >> +
>> >> >> +  if (TREE_CODE_CLASS (gimple_assign_rhs_code (def)) != 
>> >> >> tcc_comparison)
>> >> >> +return false;
>> >> >> +
>> >> >> +  /* Check if phi's incoming value is defined in the incoming 
>> >> >> basic_block.  */
>> >> >> +  edge e = gimple_phi_arg_edge (phi, index);
>> >> >> +  if (def->bb != e->src)
>> >> >> +return false;
>> >> >
>> >> > why does this matter?
>> >> >
>> >> Through preparing pathes and duplicating block, this transform can also
>> >> help to combine a cmp in previous block and a gcond in current block.
>> >> "if (def->bb != e->src)" make sure the cmp is define in the incoming
>> >> block of the current; and then combining "cmp with gcond" is safe.  If
>> >> the cmp is defined far from the incoming block, it would be hard to
>> >> achieve the combining, and the transform may not needed.
>> >
>> > We're in SSA form so the "combining" doesn't really care where the
>> > definition comes from.
>> >
>> >> >> +
>> >> >> +  if (!single_succ_p (def->bb))
>> >> >> +return false;
>> >> >
>> >> > Or this?  The actual threading will ensure this will hold true.
>> >> >
>> >> Yes, other thread code check this and ensure it to be true, like
>> >> function thread_through_normal_block. Since this new function is invoked
>> >> outside thread_through_normal_block, so, checking single_succ_p is also
>> >> needed for this case.
>> >
>> > I mean threading will isolate the path making this trivially true.
>> > It's also no requirement for combining, in fact due to the single-use
>> > check the definition can be sinked across the edge already (if
>> > the edges dest didn't have multiple predecessors which this threading
>> > will fix as well).
>> >
>> I would relax these check and have a test.
>> 
>> And I refactor the code a little as below. Thanks for any comments!
>> 

Re: [PATCH] A jump threading opportunity for condition branch

2019-05-29 Thread Jeff Law
On 5/21/19 7:44 AM, Jiufu Guo wrote:
> Hi,
> 
> This patch implements a new opportunity of jump threading for PR77820.
> In this optimization, conditional jumps are merged with unconditional jump.
> And then moving CMP result to GPR is eliminated.
> 
> It looks like below:
> 
>   
>   p0 = a CMP b
>   goto ;
> 
>   
>   p1 = c CMP d
>   goto ;
> 
>   
>   # phi = PHI 
>   if (phi != 0) goto ; else goto ;
> 
> Could be transformed to:
> 
>   
>   p0 = a CMP b
>   if (p0 != 0) goto ; else goto ;
> 
>   
>   p1 = c CMP d
>   if (p1 != 0) goto ; else goto ;
A few high level notes.

I think LLVM does this in their jump threading pass as well, mostly
because it enables discovering additional jump threading opportunities
IIRC.   But it appears to me to be inherently good on its own as well as
it eliminates a dynamic unconditional jump.

It's also the case that after this transformation we may be able to
combine the assignment and test resulting in something like this:

>   
>   if (a CMP b) goto ; else goto ;
>
>   
>   if (c CMP d) goto ; else goto ;
Which is inherently good *and* the blocks no longer have side effects
which can have secondary positive effects in the jump threader.

I wouldn't be surprised if this was particularly useful for chained
boolean logical tests where some of the arms collapse down to single tests.

Jeff


Re: [PATCH] A jump threading opportunity for condition branch

2019-05-29 Thread Jeff Law
On 5/23/19 6:05 AM, Jiufu Guo wrote:
> Hi,
> 
> Richard Biener  writes:
> 
>> On Tue, 21 May 2019, Jiufu Guo wrote:
>>

>>>  
>>> +/* Return true if PHI's INDEX-th incoming value is a CMP, and the CMP is
>>> +   defined in the incoming basic block. Otherwise return false.  */
>>> +static bool
>>> +cmp_from_unconditional_block (gphi *phi, int index)
>>> +{
>>> +  tree value = gimple_phi_arg_def (phi, index);
>>> +  if (!(TREE_CODE (value) == SSA_NAME && has_single_use (value)))
>>> +return false;
>> Not sure why we should reject a constant here but I guess we
>> expect it to find a simplified condition anyways ;)
>>
> Const could be accepted here, like "# t_9 = PHI <5(3), t_17(4)>". I
> found this case is already handled by other jump-threading code, like
> 'ethread' pass.
Right.  There's no need to handle constants here.  They'll result in
trivially discoverable jump threading opportunities.

>>> +  /* Check if phi's incoming value is defined in the incoming basic_block. 
>>>  */
>>> +  edge e = gimple_phi_arg_edge (phi, index);
>>> +  if (def->bb != e->src)
>>> +return false;
>> why does this matter?
>>
> Through preparing pathes and duplicating block, this transform can also
> help to combine a cmp in previous block and a gcond in current block.
> "if (def->bb != e->src)" make sure the cmp is define in the incoming
> block of the current; and then combining "cmp with gcond" is safe.  If
> the cmp is defined far from the incoming block, it would be hard to
> achieve the combining, and the transform may not needed.
I don't think it's strictly needed in the long term and could be
addressed in a follow-up if we can find cases where it helps.  I think
we'd just need to double check insertion of the new conditional branch
to relax this if we cared.

However, I would expect sinking to have done is job here and would be
surprised if trying to handle this actually improved any real world code.
> 
>>> +
>>> +  if (!single_succ_p (def->bb))
>>> +return false;
>> Or this?  The actual threading will ensure this will hold true.
>>
> Yes, other thread code check this and ensure it to be true, like
> function thread_through_normal_block. Since this new function is invoked
> outside thread_through_normal_block, so, checking single_succ_p is also
> needed for this case.
Agreed that it's needed.  Consider if the source block has multiple
successors.  Where do we insert the copy of the conditional branch?


>>> +{
>>> +  gimple *gs = last_and_only_stmt (bb);
>>> +  if (gs == NULL)
>>> +return false;
>>> +
>>> +  if (gimple_code (gs) != GIMPLE_COND)
>>> +return false;
>>> +
>>> +  tree cond = gimple_cond_lhs (gs);
>>> +
>>> +  if (TREE_CODE (cond) != SSA_NAME)
>>> +return false;
>> space after if( too much vertical space in this function
>> for my taste btw.
> Will update this.
>> For the forwarding to work we want a NE_EXPR or EQ_EXPR
>> as gimple_cond_code and integer_one_p or integer_zero_p
>> gimple_cond_rhs.
> Right, checking those would be more safe.  Since no issue found, during
> bootstrap and regression tests, so I did not add these checking.  I will
> add this checking.
Definitely want to verify that we're dealing with an equality test
against 0/1.

Jeff


Re: [PATCH] A jump threading opportunity for condition branch

2019-05-29 Thread Jeff Law
On 5/23/19 6:11 AM, Richard Biener wrote:
> On Thu, 23 May 2019, Jiufu Guo wrote:
> 
>> Hi,
>>
>> Richard Biener  writes:
>>
>>> On Tue, 21 May 2019, Jiufu Guo wrote:

 +}
 +
 +  if (TREE_CODE_CLASS (gimple_assign_rhs_code (def)) != tcc_comparison)
 +return false;
 +
 +  /* Check if phi's incoming value is defined in the incoming 
 basic_block.  */
 +  edge e = gimple_phi_arg_edge (phi, index);
 +  if (def->bb != e->src)
 +return false;
>>> why does this matter?
>>>
>> Through preparing pathes and duplicating block, this transform can also
>> help to combine a cmp in previous block and a gcond in current block.
>> "if (def->bb != e->src)" make sure the cmp is define in the incoming
>> block of the current; and then combining "cmp with gcond" is safe.  If
>> the cmp is defined far from the incoming block, it would be hard to
>> achieve the combining, and the transform may not needed.
> We're in SSA form so the "combining" doesn't really care where the
> definition comes from.
Combining doesn't care, but we need to make sure the copy of the
conditional ends up in the right block since it wouldn't necessarily be
associated with def->bb anymore.  But I'd expect the sinking pass to
make this a non-issue in practice anyway.

> 
 +
 +  if (!single_succ_p (def->bb))
 +return false;
>>> Or this?  The actual threading will ensure this will hold true.
>>>
>> Yes, other thread code check this and ensure it to be true, like
>> function thread_through_normal_block. Since this new function is invoked
>> outside thread_through_normal_block, so, checking single_succ_p is also
>> needed for this case.
> I mean threading will isolate the path making this trivially true.
> It's also no requirement for combining, in fact due to the single-use
> check the definition can be sinked across the edge already (if
> the edges dest didn't have multiple predecessors which this threading
> will fix as well).
I don't think so.  The CMP source block could end with a call and have
an abnormal edge (for example).  We can't put the copied conditional
before the call and putting it after the call essentially means creating
a new block.

The CMP source block could also end with a conditional.  Where do we put
the one we want to copy into the CMP source block in that case? :-)

This is something else we'd want to check if we ever allowed the the CMP
defining block to not be the immediate predecessor of the conditional
jump block.  If we did that we'd need to validate that the block where
we're going to insert the copy of the jump has a single successor.


Jeff


Re: [PATCH] A jump threading opportunity for condition branch

2019-05-29 Thread Jeff Law
On 5/24/19 6:45 AM, Richard Biener wrote:
[ Aggressive snipping ]

> As said in my first review I'd just check whether for the
> edge we want to thread through the definition comes from a CMP.
> Suppose you have
> 
>  # val_1 = PHI 
>  if (val_1 != 0)
> 
> and only one edge has a b_3 = d_5 != 0 condition it's still
> worth tail-duplicating the if block.
Agreed.  The cost of tail duplicating here is so small we should be
doing it highly aggressively.  About the only case where we might not
want to would be if we're optimizing for size rather than speed.  That
case isn't clearly a win either way.

jeff


Re: [PATCH] A jump threading opportunity for condition branch

2019-05-29 Thread Jiufu Guo
Jeff Law  writes:

> On 5/23/19 6:11 AM, Richard Biener wrote:
>> On Thu, 23 May 2019, Jiufu Guo wrote:
>> 
>>> Hi,
>>>
>>> Richard Biener  writes:
>>>
 On Tue, 21 May 2019, Jiufu Guo wrote:
>
> +}
> +
> +  if (TREE_CODE_CLASS (gimple_assign_rhs_code (def)) != tcc_comparison)
> +return false;
> +
> +  /* Check if phi's incoming value is defined in the incoming 
> basic_block.  */
> +  edge e = gimple_phi_arg_edge (phi, index);
> +  if (def->bb != e->src)
> +return false;
 why does this matter?

>>> Through preparing pathes and duplicating block, this transform can also
>>> help to combine a cmp in previous block and a gcond in current block.
>>> "if (def->bb != e->src)" make sure the cmp is define in the incoming
>>> block of the current; and then combining "cmp with gcond" is safe.  If
>>> the cmp is defined far from the incoming block, it would be hard to
>>> achieve the combining, and the transform may not needed.
>> We're in SSA form so the "combining" doesn't really care where the
>> definition comes from.
> Combining doesn't care, but we need to make sure the copy of the
> conditional ends up in the right block since it wouldn't necessarily be
> associated with def->bb anymore.  But I'd expect the sinking pass to
> make this a non-issue in practice anyway.
>
>> 
> +
> +  if (!single_succ_p (def->bb))
> +return false;
 Or this?  The actual threading will ensure this will hold true.

>>> Yes, other thread code check this and ensure it to be true, like
>>> function thread_through_normal_block. Since this new function is invoked
>>> outside thread_through_normal_block, so, checking single_succ_p is also
>>> needed for this case.
>> I mean threading will isolate the path making this trivially true.
>> It's also no requirement for combining, in fact due to the single-use
>> check the definition can be sinked across the edge already (if
>> the edges dest didn't have multiple predecessors which this threading
>> will fix as well).
> I don't think so.  The CMP source block could end with a call and have
> an abnormal edge (for example).  We can't put the copied conditional
> before the call and putting it after the call essentially means creating
> a new block.
>
> The CMP source block could also end with a conditional.  Where do we put
> the one we want to copy into the CMP source block in that case? :-)
>
> This is something else we'd want to check if we ever allowed the the CMP
> defining block to not be the immediate predecessor of the conditional
> jump block.  If we did that we'd need to validate that the block where
> we're going to insert the copy of the jump has a single successor.
OK, Adding single_succ_p (e->src) could make sure the copy jump is
insert to end of immediate predecessor, instead the define block of CMP,
if def->bb != e->src. 
>
>
> Jeff



Re: [PATCH] A jump threading opportunity for condition branch

2019-05-29 Thread Richard Biener
On May 29, 2019 10:12:31 PM GMT+02:00, Jeff Law  wrote:
>On 5/23/19 6:05 AM, Jiufu Guo wrote:
>> Hi,
>> 
>> Richard Biener  writes:
>> 
>>> On Tue, 21 May 2019, Jiufu Guo wrote:
>>>
>
  
 +/* Return true if PHI's INDEX-th incoming value is a CMP, and the
>CMP is
 +   defined in the incoming basic block. Otherwise return false. 
>*/
 +static bool
 +cmp_from_unconditional_block (gphi *phi, int index)
 +{
 +  tree value = gimple_phi_arg_def (phi, index);
 +  if (!(TREE_CODE (value) == SSA_NAME && has_single_use (value)))
 +return false;
>>> Not sure why we should reject a constant here but I guess we
>>> expect it to find a simplified condition anyways ;)
>>>
>> Const could be accepted here, like "# t_9 = PHI <5(3), t_17(4)>". I
>> found this case is already handled by other jump-threading code, like
>> 'ethread' pass.
>Right.  There's no need to handle constants here.  They'll result in
>trivially discoverable jump threading opportunities.
>
 +  /* Check if phi's incoming value is defined in the incoming
>basic_block.  */
 +  edge e = gimple_phi_arg_edge (phi, index);
 +  if (def->bb != e->src)
 +return false;
>>> why does this matter?
>>>
>> Through preparing pathes and duplicating block, this transform can
>also
>> help to combine a cmp in previous block and a gcond in current block.
>> "if (def->bb != e->src)" make sure the cmp is define in the incoming
>> block of the current; and then combining "cmp with gcond" is safe. 
>If
>> the cmp is defined far from the incoming block, it would be hard to
>> achieve the combining, and the transform may not needed.
>I don't think it's strictly needed in the long term and could be
>addressed in a follow-up if we can find cases where it helps.  I think
>we'd just need to double check insertion of the new conditional branch
>to relax this if we cared.
>
>However, I would expect sinking to have done is job here and would be
>surprised if trying to handle this actually improved any real world
>code.
>> 
 +
 +  if (!single_succ_p (def->bb))
 +return false;
>>> Or this?  The actual threading will ensure this will hold true.
>>>
>> Yes, other thread code check this and ensure it to be true, like
>> function thread_through_normal_block. Since this new function is
>invoked
>> outside thread_through_normal_block, so, checking single_succ_p is
>also
>> needed for this case.
>Agreed that it's needed.  Consider if the source block has multiple
>successors.  Where do we insert the copy of the conditional branch?

We're duplicating its block? That is, we are isolating a path into a 
conditional - that's always possible? I wanted to make sure that when threading 
threads through a conditional in the block with the compare we'd add the extra 
tail duplication? AFAIK we're still looking at unmodified CFG here?

>
 +{
 +  gimple *gs = last_and_only_stmt (bb);
 +  if (gs == NULL)
 +return false;
 +
 +  if (gimple_code (gs) != GIMPLE_COND)
 +return false;
 +
 +  tree cond = gimple_cond_lhs (gs);
 +
 +  if (TREE_CODE (cond) != SSA_NAME)
 +return false;
>>> space after if( too much vertical space in this function
>>> for my taste btw.
>> Will update this.
>>> For the forwarding to work we want a NE_EXPR or EQ_EXPR
>>> as gimple_cond_code and integer_one_p or integer_zero_p
>>> gimple_cond_rhs.
>> Right, checking those would be more safe.  Since no issue found,
>during
>> bootstrap and regression tests, so I did not add these checking.  I
>will
>> add this checking.
>Definitely want to verify that we're dealing with an equality test
>against 0/1.
>
>Jeff



Re: [PATCH] A jump threading opportunity for condition branch

2019-05-29 Thread Richard Biener
On May 29, 2019 10:18:01 PM GMT+02:00, Jeff Law  wrote:
>On 5/23/19 6:11 AM, Richard Biener wrote:
>> On Thu, 23 May 2019, Jiufu Guo wrote:
>> 
>>> Hi,
>>>
>>> Richard Biener  writes:
>>>
 On Tue, 21 May 2019, Jiufu Guo wrote:
>
> +}
> +
> +  if (TREE_CODE_CLASS (gimple_assign_rhs_code (def)) !=
>tcc_comparison)
> +return false;
> +
> +  /* Check if phi's incoming value is defined in the incoming
>basic_block.  */
> +  edge e = gimple_phi_arg_edge (phi, index);
> +  if (def->bb != e->src)
> +return false;
 why does this matter?

>>> Through preparing pathes and duplicating block, this transform can
>also
>>> help to combine a cmp in previous block and a gcond in current
>block.
>>> "if (def->bb != e->src)" make sure the cmp is define in the incoming
>>> block of the current; and then combining "cmp with gcond" is safe. 
>If
>>> the cmp is defined far from the incoming block, it would be hard to
>>> achieve the combining, and the transform may not needed.
>> We're in SSA form so the "combining" doesn't really care where the
>> definition comes from.
>Combining doesn't care, but we need to make sure the copy of the
>conditional ends up in the right block since it wouldn't necessarily be
>associated with def->bb anymore.  But I'd expect the sinking pass to
>make this a non-issue in practice anyway.
>
>> 
> +
> +  if (!single_succ_p (def->bb))
> +return false;
 Or this?  The actual threading will ensure this will hold true.

>>> Yes, other thread code check this and ensure it to be true, like
>>> function thread_through_normal_block. Since this new function is
>invoked
>>> outside thread_through_normal_block, so, checking single_succ_p is
>also
>>> needed for this case.
>> I mean threading will isolate the path making this trivially true.
>> It's also no requirement for combining, in fact due to the single-use
>> check the definition can be sinked across the edge already (if
>> the edges dest didn't have multiple predecessors which this threading
>> will fix as well).
>I don't think so.  The CMP source block could end with a call and have
>an abnormal edge (for example).  We can't put the copied conditional
>before the call and putting it after the call essentially means
>creating
>a new block.
>
>The CMP source block could also end with a conditional.  Where do we
>put
>the one we want to copy into the CMP source block in that case? :-)
>
>This is something else we'd want to check if we ever allowed the the
>CMP
>defining block to not be the immediate predecessor of the conditional
>jump block.  If we did that we'd need to validate that the block where
>we're going to insert the copy of the jump has a single successor.

But were just isolating a path here. The actual combine job is left to followup 
cleanups. 

Richard. 

>
>Jeff



Re: [PATCH] A jump threading opportunity for condition branch

2019-05-29 Thread Richard Biener
On May 29, 2019 10:21:46 PM GMT+02:00, Jeff Law  wrote:
>On 5/24/19 6:45 AM, Richard Biener wrote:
>[ Aggressive snipping ]
>
>> As said in my first review I'd just check whether for the
>> edge we want to thread through the definition comes from a CMP.
>> Suppose you have
>> 
>>  # val_1 = PHI 
>>  if (val_1 != 0)
>> 
>> and only one edge has a b_3 = d_5 != 0 condition it's still
>> worth tail-duplicating the if block.
>Agreed.  The cost of tail duplicating here is so small we should be
>doing it highly aggressively.  About the only case where we might not
>want to would be if we're optimizing for size rather than speed.  That
>case isn't clearly a win either way.

Even there the PHI likely causes edge copies to be inserted. So I wouldn't care 
for the moment. The proper check would be ! Optimize_edge_for_size_p (e). 

Richard. 

>jeff



Re: [PATCH] A jump threading opportunity for condition branch

2019-05-29 Thread Jiufu Guo
Richard Biener  writes:

> On May 29, 2019 10:21:46 PM GMT+02:00, Jeff Law  wrote:
>>On 5/24/19 6:45 AM, Richard Biener wrote:
>>[ Aggressive snipping ]
>>
>>> As said in my first review I'd just check whether for the
>>> edge we want to thread through the definition comes from a CMP.
>>> Suppose you have
>>> 
>>>  # val_1 = PHI 
>>>  if (val_1 != 0)
>>> 
>>> and only one edge has a b_3 = d_5 != 0 condition it's still
>>> worth tail-duplicating the if block.
>>Agreed.  The cost of tail duplicating here is so small we should be
>>doing it highly aggressively.  About the only case where we might not
>>want to would be if we're optimizing for size rather than speed.  That
>>case isn't clearly a win either way.
>
> Even there the PHI likely causes edge copies to be inserted. So I
> wouldn't care for the moment. The proper check would be !
> Optimize_edge_for_size_p (e).
For most of this kind of case where the bb contains just one conditional
jump stmt, it may not increase the size especially for there are
combinings in follow passes -- it may save size ;)

>
> Richard. 
>
>>jeff



Re: [PATCH] A jump threading opportunity for condition branch

2019-05-30 Thread Jeff Law
On 5/30/19 12:57 AM, Jiufu Guo wrote:
> Richard Biener  writes:
> 
>> On May 29, 2019 10:21:46 PM GMT+02:00, Jeff Law  wrote:
>>> On 5/24/19 6:45 AM, Richard Biener wrote:
>>> [ Aggressive snipping ]
>>>
 As said in my first review I'd just check whether for the
 edge we want to thread through the definition comes from a CMP.
 Suppose you have

  # val_1 = PHI 
  if (val_1 != 0)

 and only one edge has a b_3 = d_5 != 0 condition it's still
 worth tail-duplicating the if block.
>>> Agreed.  The cost of tail duplicating here is so small we should be
>>> doing it highly aggressively.  About the only case where we might not
>>> want to would be if we're optimizing for size rather than speed.  That
>>> case isn't clearly a win either way.
>>
>> Even there the PHI likely causes edge copies to be inserted. So I
>> wouldn't care for the moment. The proper check would be !
>> Optimize_edge_for_size_p (e).
> For most of this kind of case where the bb contains just one conditional
> jump stmt, it may not increase the size especially for there are
> combinings in follow passes -- it may save size ;)
My point was it's not as clear cut.  Regardless I think we've gone
pretty deep into the weeds.  I think we could easily handle that case as
a follow-up.

jeff


Re: [PATCH] A jump threading opportunity for condition branch

2019-05-30 Thread Jeff Law
On 5/30/19 12:44 AM, Richard Biener wrote:
> On May 29, 2019 10:21:46 PM GMT+02:00, Jeff Law  wrote:
>> On 5/24/19 6:45 AM, Richard Biener wrote:
>> [ Aggressive snipping ]
>>
>>> As said in my first review I'd just check whether for the
>>> edge we want to thread through the definition comes from a CMP.
>>> Suppose you have
>>>
>>>  # val_1 = PHI 
>>>  if (val_1 != 0)
>>>
>>> and only one edge has a b_3 = d_5 != 0 condition it's still
>>> worth tail-duplicating the if block.
>> Agreed.  The cost of tail duplicating here is so small we should be
>> doing it highly aggressively.  About the only case where we might not
>> want to would be if we're optimizing for size rather than speed.  That
>> case isn't clearly a win either way.
> 
> Even there the PHI likely causes edge copies to be inserted. So I wouldn't 
> care for the moment. The proper check would be ! Optimize_edge_for_size_p 
> (e). 
Agreed, with capitalization fixed :-)
jeff



Re: [PATCH] A jump threading opportunity for condition branch

2019-05-30 Thread Jeff Law
On 5/30/19 12:41 AM, Richard Biener wrote:
> On May 29, 2019 10:18:01 PM GMT+02:00, Jeff Law  wrote:
>> On 5/23/19 6:11 AM, Richard Biener wrote:
>>> On Thu, 23 May 2019, Jiufu Guo wrote:
>>>
 Hi,

 Richard Biener  writes:

> On Tue, 21 May 2019, Jiufu Guo wrote:
>>
>> +}
>> +
>> +  if (TREE_CODE_CLASS (gimple_assign_rhs_code (def)) !=
>> tcc_comparison)
>> +return false;
>> +
>> +  /* Check if phi's incoming value is defined in the incoming
>> basic_block.  */
>> +  edge e = gimple_phi_arg_edge (phi, index);
>> +  if (def->bb != e->src)
>> +return false;
> why does this matter?
>
 Through preparing pathes and duplicating block, this transform can
>> also
 help to combine a cmp in previous block and a gcond in current
>> block.
 "if (def->bb != e->src)" make sure the cmp is define in the incoming
 block of the current; and then combining "cmp with gcond" is safe. 
>> If
 the cmp is defined far from the incoming block, it would be hard to
 achieve the combining, and the transform may not needed.
>>> We're in SSA form so the "combining" doesn't really care where the
>>> definition comes from.
>> Combining doesn't care, but we need to make sure the copy of the
>> conditional ends up in the right block since it wouldn't necessarily be
>> associated with def->bb anymore.  But I'd expect the sinking pass to
>> make this a non-issue in practice anyway.
>>
>>>
>> +
>> +  if (!single_succ_p (def->bb))
>> +return false;
> Or this?  The actual threading will ensure this will hold true.
>
 Yes, other thread code check this and ensure it to be true, like
 function thread_through_normal_block. Since this new function is
>> invoked
 outside thread_through_normal_block, so, checking single_succ_p is
>> also
 needed for this case.
>>> I mean threading will isolate the path making this trivially true.
>>> It's also no requirement for combining, in fact due to the single-use
>>> check the definition can be sinked across the edge already (if
>>> the edges dest didn't have multiple predecessors which this threading
>>> will fix as well).
>> I don't think so.  The CMP source block could end with a call and have
>> an abnormal edge (for example).  We can't put the copied conditional
>> before the call and putting it after the call essentially means
>> creating
>> a new block.
>>
>> The CMP source block could also end with a conditional.  Where do we
>> put
>> the one we want to copy into the CMP source block in that case? :-)
>>
>> This is something else we'd want to check if we ever allowed the the
>> CMP
>> defining block to not be the immediate predecessor of the conditional
>> jump block.  If we did that we'd need to validate that the block where
>> we're going to insert the copy of the jump has a single successor.
> 
> But were just isolating a path here. The actual combine job is left to 
> followup cleanups. 
Absolutely agreed.  My point was that there's some additional stuff we'd
have to verify does the right thing if we wanted to allow the CMP to be
somewhere other than in the immediate predecessor of the conditional
jump block.

Jeff



Re: [PATCH] A jump threading opportunity for condition branch

2019-05-31 Thread Richard Biener
On Thu, 30 May 2019, Jeff Law wrote:

> On 5/30/19 12:41 AM, Richard Biener wrote:
> > On May 29, 2019 10:18:01 PM GMT+02:00, Jeff Law  wrote:
> >> On 5/23/19 6:11 AM, Richard Biener wrote:
> >>> On Thu, 23 May 2019, Jiufu Guo wrote:
> >>>
>  Hi,
> 
>  Richard Biener  writes:
> 
> > On Tue, 21 May 2019, Jiufu Guo wrote:
> >>
> >> +}
> >> +
> >> +  if (TREE_CODE_CLASS (gimple_assign_rhs_code (def)) !=
> >> tcc_comparison)
> >> +return false;
> >> +
> >> +  /* Check if phi's incoming value is defined in the incoming
> >> basic_block.  */
> >> +  edge e = gimple_phi_arg_edge (phi, index);
> >> +  if (def->bb != e->src)
> >> +return false;
> > why does this matter?
> >
>  Through preparing pathes and duplicating block, this transform can
> >> also
>  help to combine a cmp in previous block and a gcond in current
> >> block.
>  "if (def->bb != e->src)" make sure the cmp is define in the incoming
>  block of the current; and then combining "cmp with gcond" is safe. 
> >> If
>  the cmp is defined far from the incoming block, it would be hard to
>  achieve the combining, and the transform may not needed.
> >>> We're in SSA form so the "combining" doesn't really care where the
> >>> definition comes from.
> >> Combining doesn't care, but we need to make sure the copy of the
> >> conditional ends up in the right block since it wouldn't necessarily be
> >> associated with def->bb anymore.  But I'd expect the sinking pass to
> >> make this a non-issue in practice anyway.
> >>
> >>>
> >> +
> >> +  if (!single_succ_p (def->bb))
> >> +return false;
> > Or this?  The actual threading will ensure this will hold true.
> >
>  Yes, other thread code check this and ensure it to be true, like
>  function thread_through_normal_block. Since this new function is
> >> invoked
>  outside thread_through_normal_block, so, checking single_succ_p is
> >> also
>  needed for this case.
> >>> I mean threading will isolate the path making this trivially true.
> >>> It's also no requirement for combining, in fact due to the single-use
> >>> check the definition can be sinked across the edge already (if
> >>> the edges dest didn't have multiple predecessors which this threading
> >>> will fix as well).
> >> I don't think so.  The CMP source block could end with a call and have
> >> an abnormal edge (for example).  We can't put the copied conditional
> >> before the call and putting it after the call essentially means
> >> creating
> >> a new block.
> >>
> >> The CMP source block could also end with a conditional.  Where do we
> >> put
> >> the one we want to copy into the CMP source block in that case? :-)
> >>
> >> This is something else we'd want to check if we ever allowed the the
> >> CMP
> >> defining block to not be the immediate predecessor of the conditional
> >> jump block.  If we did that we'd need to validate that the block where
> >> we're going to insert the copy of the jump has a single successor.
> > 
> > But were just isolating a path here. The actual combine job is left to 
> > followup cleanups. 
> Absolutely agreed.  My point was that there's some additional stuff we'd
> have to verify does the right thing if we wanted to allow the CMP to be
> somewhere other than in the immediate predecessor of the conditional
> jump block.

For correctness?  No.  For the CMP to be forwarded?  No.  For optimality
maybe - forwarding a binary operation always incurs register pressure
increase.

Btw, as you already said sinking should have sinked the CMP to the
predecessor (since we have a single use in the PHI).

So I hardly see the point of making this difference.

Richard.


Re: [PATCH] A jump threading opportunity for condition branch

2019-05-31 Thread Jeff Law
On 5/31/19 1:24 AM, Richard Biener wrote:
> On Thu, 30 May 2019, Jeff Law wrote:
> 
>> On 5/30/19 12:41 AM, Richard Biener wrote:
>>> On May 29, 2019 10:18:01 PM GMT+02:00, Jeff Law  wrote:
 On 5/23/19 6:11 AM, Richard Biener wrote:
> On Thu, 23 May 2019, Jiufu Guo wrote:
>
>> Hi,
>>
>> Richard Biener  writes:
>>
>>> On Tue, 21 May 2019, Jiufu Guo wrote:

 +}
 +
 +  if (TREE_CODE_CLASS (gimple_assign_rhs_code (def)) !=
 tcc_comparison)
 +return false;
 +
 +  /* Check if phi's incoming value is defined in the incoming
 basic_block.  */
 +  edge e = gimple_phi_arg_edge (phi, index);
 +  if (def->bb != e->src)
 +return false;
>>> why does this matter?
>>>
>> Through preparing pathes and duplicating block, this transform can
 also
>> help to combine a cmp in previous block and a gcond in current
 block.
>> "if (def->bb != e->src)" make sure the cmp is define in the incoming
>> block of the current; and then combining "cmp with gcond" is safe. 
 If
>> the cmp is defined far from the incoming block, it would be hard to
>> achieve the combining, and the transform may not needed.
> We're in SSA form so the "combining" doesn't really care where the
> definition comes from.
 Combining doesn't care, but we need to make sure the copy of the
 conditional ends up in the right block since it wouldn't necessarily be
 associated with def->bb anymore.  But I'd expect the sinking pass to
 make this a non-issue in practice anyway.

>
 +
 +  if (!single_succ_p (def->bb))
 +return false;
>>> Or this?  The actual threading will ensure this will hold true.
>>>
>> Yes, other thread code check this and ensure it to be true, like
>> function thread_through_normal_block. Since this new function is
 invoked
>> outside thread_through_normal_block, so, checking single_succ_p is
 also
>> needed for this case.
> I mean threading will isolate the path making this trivially true.
> It's also no requirement for combining, in fact due to the single-use
> check the definition can be sinked across the edge already (if
> the edges dest didn't have multiple predecessors which this threading
> will fix as well).
 I don't think so.  The CMP source block could end with a call and have
 an abnormal edge (for example).  We can't put the copied conditional
 before the call and putting it after the call essentially means
 creating
 a new block.

 The CMP source block could also end with a conditional.  Where do we
 put
 the one we want to copy into the CMP source block in that case? :-)

 This is something else we'd want to check if we ever allowed the the
 CMP
 defining block to not be the immediate predecessor of the conditional
 jump block.  If we did that we'd need to validate that the block where
 we're going to insert the copy of the jump has a single successor.
>>>
>>> But were just isolating a path here. The actual combine job is left to 
>>> followup cleanups. 
>> Absolutely agreed.  My point was that there's some additional stuff we'd
>> have to verify does the right thing if we wanted to allow the CMP to be
>> somewhere other than in the immediate predecessor of the conditional
>> jump block.
> 
> For correctness?  No.  For the CMP to be forwarded?  No.  For optimality
> maybe - forwarding a binary operation always incurs register pressure
> increase.
For correctness of the patch.  Conceptually I have _no_ issues with
having the CMP in a different block than an immediate predecessor of the
conditional jump block.  But the patch does certain code which would
need to be audited with that change in mind.

> 
> Btw, as you already said sinking should have sinked the CMP to the
> predecessor (since we have a single use in the PHI).
> 
> So I hardly see the point of making this difference.
:-)

jeff


Re: [PATCH] A jump threading opportunity for condition branch

2019-06-03 Thread Jiufu Guo
Jeff Law  writes:

> On 5/31/19 1:24 AM, Richard Biener wrote:
>> On Thu, 30 May 2019, Jeff Law wrote:
>> 
>>> On 5/30/19 12:41 AM, Richard Biener wrote:
 On May 29, 2019 10:18:01 PM GMT+02:00, Jeff Law  wrote:
> On 5/23/19 6:11 AM, Richard Biener wrote:
>> On Thu, 23 May 2019, Jiufu Guo wrote:
>>
>>> Hi,
>>>
>>> Richard Biener  writes:
>>>
 On Tue, 21 May 2019, Jiufu Guo wrote:
>
> +}
> +
> +  if (TREE_CODE_CLASS (gimple_assign_rhs_code (def)) !=
> tcc_comparison)
> +return false;
> +
> +  /* Check if phi's incoming value is defined in the incoming
> basic_block.  */
> +  edge e = gimple_phi_arg_edge (phi, index);
> +  if (def->bb != e->src)
> +return false;
 why does this matter?

>>> Through preparing pathes and duplicating block, this transform can
> also
>>> help to combine a cmp in previous block and a gcond in current
> block.
>>> "if (def->bb != e->src)" make sure the cmp is define in the incoming
>>> block of the current; and then combining "cmp with gcond" is safe. 
> If
>>> the cmp is defined far from the incoming block, it would be hard to
>>> achieve the combining, and the transform may not needed.
>> We're in SSA form so the "combining" doesn't really care where the
>> definition comes from.
> Combining doesn't care, but we need to make sure the copy of the
> conditional ends up in the right block since it wouldn't necessarily be
> associated with def->bb anymore.  But I'd expect the sinking pass to
> make this a non-issue in practice anyway.
>
>>
> +
> +  if (!single_succ_p (def->bb))
> +return false;
 Or this?  The actual threading will ensure this will hold true.

>>> Yes, other thread code check this and ensure it to be true, like
>>> function thread_through_normal_block. Since this new function is
> invoked
>>> outside thread_through_normal_block, so, checking single_succ_p is
> also
>>> needed for this case.
>> I mean threading will isolate the path making this trivially true.
>> It's also no requirement for combining, in fact due to the single-use
>> check the definition can be sinked across the edge already (if
>> the edges dest didn't have multiple predecessors which this threading
>> will fix as well).
> I don't think so.  The CMP source block could end with a call and have
> an abnormal edge (for example).  We can't put the copied conditional
> before the call and putting it after the call essentially means
> creating
> a new block.
>
> The CMP source block could also end with a conditional.  Where do we
> put
> the one we want to copy into the CMP source block in that case? :-)
>
> This is something else we'd want to check if we ever allowed the the
> CMP
> defining block to not be the immediate predecessor of the conditional
> jump block.  If we did that we'd need to validate that the block where
> we're going to insert the copy of the jump has a single successor.

 But were just isolating a path here. The actual combine job is left to 
 followup cleanups. 
>>> Absolutely agreed.  My point was that there's some additional stuff we'd
>>> have to verify does the right thing if we wanted to allow the CMP to be
>>> somewhere other than in the immediate predecessor of the conditional
>>> jump block.
>> 
>> For correctness?  No.  For the CMP to be forwarded?  No.  For optimality
>> maybe - forwarding a binary operation always incurs register pressure
>> increase.
> For correctness of the patch.  Conceptually I have _no_ issues with
> having the CMP in a different block than an immediate predecessor of the
> conditional jump block.  But the patch does certain code which would
> need to be audited with that change in mind.
Thanks for all your great comments! It is right, if immediate predecessor
of conditional jump block has more than one successors, the conditional
jump block can be duplicated to split the path; and the condtional jump
will keep in the duplicate block instead inserting into predecessor.  From
functionality aspect, it is still correct. While it does not merge CMP
with conditional jump in this pass; then it may not directly help to
eliminate the CMP. While I also agree this path may provides other
optimize opportunity in following passes.

I just have a check with gcc bootstrap, and find there are ~1800 edges
as !single_succ_p (e->src).  And similar number edges are single_succ_p
(e->src).  It would be valuable to take the opptunity for these edges of
!single_succ_p (e->src).

Jiufu Guo
>
>> 
>> Btw, as you already said sinking should have sinked the CMP to the
>> predecessor (since we have a single use in the PHI).
>> 
>> So I hardly see the point of making this difference.
> :-)
>
> jeff



Re: [PATCH] A jump threading opportunity for condition branch

2019-06-04 Thread Richard Biener
On Tue, 4 Jun 2019, Jiufu Guo wrote:

> Jeff Law  writes:
> 
> > On 5/31/19 1:24 AM, Richard Biener wrote:
> >> On Thu, 30 May 2019, Jeff Law wrote:
> >> 
> >>> On 5/30/19 12:41 AM, Richard Biener wrote:
>  On May 29, 2019 10:18:01 PM GMT+02:00, Jeff Law  wrote:
> > On 5/23/19 6:11 AM, Richard Biener wrote:
> >> On Thu, 23 May 2019, Jiufu Guo wrote:
> >>
> >>> Hi,
> >>>
> >>> Richard Biener  writes:
> >>>
>  On Tue, 21 May 2019, Jiufu Guo wrote:
> >
> > +}
> > +
> > +  if (TREE_CODE_CLASS (gimple_assign_rhs_code (def)) !=
> > tcc_comparison)
> > +return false;
> > +
> > +  /* Check if phi's incoming value is defined in the incoming
> > basic_block.  */
> > +  edge e = gimple_phi_arg_edge (phi, index);
> > +  if (def->bb != e->src)
> > +return false;
>  why does this matter?
> 
> >>> Through preparing pathes and duplicating block, this transform can
> > also
> >>> help to combine a cmp in previous block and a gcond in current
> > block.
> >>> "if (def->bb != e->src)" make sure the cmp is define in the incoming
> >>> block of the current; and then combining "cmp with gcond" is safe. 
> > If
> >>> the cmp is defined far from the incoming block, it would be hard to
> >>> achieve the combining, and the transform may not needed.
> >> We're in SSA form so the "combining" doesn't really care where the
> >> definition comes from.
> > Combining doesn't care, but we need to make sure the copy of the
> > conditional ends up in the right block since it wouldn't necessarily be
> > associated with def->bb anymore.  But I'd expect the sinking pass to
> > make this a non-issue in practice anyway.
> >
> >>
> > +
> > +  if (!single_succ_p (def->bb))
> > +return false;
>  Or this?  The actual threading will ensure this will hold true.
> 
> >>> Yes, other thread code check this and ensure it to be true, like
> >>> function thread_through_normal_block. Since this new function is
> > invoked
> >>> outside thread_through_normal_block, so, checking single_succ_p is
> > also
> >>> needed for this case.
> >> I mean threading will isolate the path making this trivially true.
> >> It's also no requirement for combining, in fact due to the single-use
> >> check the definition can be sinked across the edge already (if
> >> the edges dest didn't have multiple predecessors which this threading
> >> will fix as well).
> > I don't think so.  The CMP source block could end with a call and have
> > an abnormal edge (for example).  We can't put the copied conditional
> > before the call and putting it after the call essentially means
> > creating
> > a new block.
> >
> > The CMP source block could also end with a conditional.  Where do we
> > put
> > the one we want to copy into the CMP source block in that case? :-)
> >
> > This is something else we'd want to check if we ever allowed the the
> > CMP
> > defining block to not be the immediate predecessor of the conditional
> > jump block.  If we did that we'd need to validate that the block where
> > we're going to insert the copy of the jump has a single successor.
> 
>  But were just isolating a path here. The actual combine job is left to 
>  followup cleanups. 
> >>> Absolutely agreed.  My point was that there's some additional stuff we'd
> >>> have to verify does the right thing if we wanted to allow the CMP to be
> >>> somewhere other than in the immediate predecessor of the conditional
> >>> jump block.
> >> 
> >> For correctness?  No.  For the CMP to be forwarded?  No.  For optimality
> >> maybe - forwarding a binary operation always incurs register pressure
> >> increase.
> > For correctness of the patch.  Conceptually I have _no_ issues with
> > having the CMP in a different block than an immediate predecessor of the
> > conditional jump block.  But the patch does certain code which would
> > need to be audited with that change in mind.
> Thanks for all your great comments! It is right, if immediate predecessor
> of conditional jump block has more than one successors, the conditional
> jump block can be duplicated to split the path; and the condtional jump
> will keep in the duplicate block instead inserting into predecessor.  From
> functionality aspect, it is still correct. While it does not merge CMP
> with conditional jump in this pass; then it may not directly help to
> eliminate the CMP. While I also agree this path may provides other
> optimize opportunity in following passes.
> 
> I just have a check with gcc bootstrap, and find there are ~1800 edges
> as !single_succ_p (e->src).  And similar number edges are single_succ_p
> (e->src).  It would be valuable to take the opptunity for these edges of
> !sin

Re: [PATCH] A jump threading opportunity for condition branch

2019-06-06 Thread Jeff Law
On 5/31/19 9:03 AM, Jeff Law wrote:
> On 5/31/19 1:24 AM, Richard Biener wrote:
>> On Thu, 30 May 2019, Jeff Law wrote:
>>
>>> On 5/30/19 12:41 AM, Richard Biener wrote:
 On May 29, 2019 10:18:01 PM GMT+02:00, Jeff Law  wrote:
> On 5/23/19 6:11 AM, Richard Biener wrote:
>> On Thu, 23 May 2019, Jiufu Guo wrote:
>>
>>> Hi,
>>>
>>> Richard Biener  writes:
>>>
 On Tue, 21 May 2019, Jiufu Guo wrote:
>
> +}
> +
> +  if (TREE_CODE_CLASS (gimple_assign_rhs_code (def)) !=
> tcc_comparison)
> +return false;
> +
> +  /* Check if phi's incoming value is defined in the incoming
> basic_block.  */
> +  edge e = gimple_phi_arg_edge (phi, index);
> +  if (def->bb != e->src)
> +return false;
 why does this matter?

>>> Through preparing pathes and duplicating block, this transform can
> also
>>> help to combine a cmp in previous block and a gcond in current
> block.
>>> "if (def->bb != e->src)" make sure the cmp is define in the incoming
>>> block of the current; and then combining "cmp with gcond" is safe. 
> If
>>> the cmp is defined far from the incoming block, it would be hard to
>>> achieve the combining, and the transform may not needed.
>> We're in SSA form so the "combining" doesn't really care where the
>> definition comes from.
> Combining doesn't care, but we need to make sure the copy of the
> conditional ends up in the right block since it wouldn't necessarily be
> associated with def->bb anymore.  But I'd expect the sinking pass to
> make this a non-issue in practice anyway.
>
>>
> +
> +  if (!single_succ_p (def->bb))
> +return false;
 Or this?  The actual threading will ensure this will hold true.

>>> Yes, other thread code check this and ensure it to be true, like
>>> function thread_through_normal_block. Since this new function is
> invoked
>>> outside thread_through_normal_block, so, checking single_succ_p is
> also
>>> needed for this case.
>> I mean threading will isolate the path making this trivially true.
>> It's also no requirement for combining, in fact due to the single-use
>> check the definition can be sinked across the edge already (if
>> the edges dest didn't have multiple predecessors which this threading
>> will fix as well).
> I don't think so.  The CMP source block could end with a call and have
> an abnormal edge (for example).  We can't put the copied conditional
> before the call and putting it after the call essentially means
> creating
> a new block.
>
> The CMP source block could also end with a conditional.  Where do we
> put
> the one we want to copy into the CMP source block in that case? :-)
>
> This is something else we'd want to check if we ever allowed the the
> CMP
> defining block to not be the immediate predecessor of the conditional
> jump block.  If we did that we'd need to validate that the block where
> we're going to insert the copy of the jump has a single successor.

 But were just isolating a path here. The actual combine job is left to 
 followup cleanups. 
>>> Absolutely agreed.  My point was that there's some additional stuff we'd
>>> have to verify does the right thing if we wanted to allow the CMP to be
>>> somewhere other than in the immediate predecessor of the conditional
>>> jump block.
>>
>> For correctness?  No.  For the CMP to be forwarded?  No.  For optimality
>> maybe - forwarding a binary operation always incurs register pressure
>> increase.
> For correctness of the patch.  Conceptually I have _no_ issues with
> having the CMP in a different block than an immediate predecessor of the
> conditional jump block.  But the patch does certain code which would
> need to be audited with that change in mind.
> 
>>
>> Btw, as you already said sinking should have sinked the CMP to the
>> predecessor (since we have a single use in the PHI).
>>
>> So I hardly see the point of making this difference.
> :-)
So just to satisfy my curiosity I put in some instrumentation to check
for cases where the CMP is not in an immediate predecessor of the
conditional branch.  It happens.  It's not terribly common though.  I'd
guess it's cases where this code is running before sinking.

I went ahead and audited the patch for this case so that we could just
eliminate that test.  The key thing thing is that we don't use the block
with the CMP insn at all in this code.  So there's no possibility of
duplicating the conditional into the wrong block or anything like that.

Since this code is running from within thread_across_edge it can't be
called with complex/abnormal edges or any other cases that can't be
handled since we filter those out before calling thread_across_edge.

So it should be safe to just elimin

Re: [PATCH] A jump threading opportunity for condition branch

2019-05-22 Thread Richard Biener
On Tue, 21 May 2019, Jiufu Guo wrote:

> Hi,
> 
> This patch implements a new opportunity of jump threading for PR77820.
> In this optimization, conditional jumps are merged with unconditional jump.
> And then moving CMP result to GPR is eliminated.
> 
> It looks like below:
> 
>   
>   p0 = a CMP b
>   goto ;
> 
>   
>   p1 = c CMP d
>   goto ;
> 
>   
>   # phi = PHI 
>   if (phi != 0) goto ; else goto ;
> 
> Could be transformed to:
> 
>   
>   p0 = a CMP b
>   if (p0 != 0) goto ; else goto ;
> 
>   
>   p1 = c CMP d
>   if (p1 != 0) goto ; else goto ;
> 
> 
> This optimization eliminates:
> 1. saving CMP result: p0 = a CMP b.
> 2. additional CMP on branch: if (phi != 0).
> 3. converting CMP result if there is phi = (INT_CONV) p0 if there is.
> 
> Bootstrapped and tested on powerpc64le with no regressions(one case is 
> improved)
> and new testcases are added. Is this ok for trunk?
> 
> Thanks!
> Jiufu Guo
> 
> 
> [gcc]
> 2019-05-21  Jiufu Guo  
>   Lijia He  
> 
>   PR tree-optimization/77820
>   * tree-ssa-threadedge.c (cmp_from_unconditional_block): New function.
>   * tree-ssa-threadedge.c (is_trivial_join_block): New function.
>   * tree-ssa-threadedge.c (thread_across_edge): Call 
> is_trivial_join_block.
> 
> [gcc/testsuite]
> 2019-05-21  Jiufu Guo  
>   Lijia He  
> 
>   PR tree-optimization/77820
>   * gcc.dg/tree-ssa/phi_on_compare-1.c: New testcase.
>   * gcc.dg/tree-ssa/phi_on_compare-2.c: New testcase.
>   * gcc.dg/tree-ssa/phi_on_compare-3.c: New testcase.
>   * gcc.dg/tree-ssa/phi_on_compare-4.c: New testcase.
>   * gcc.dg/tree-ssa/split-path-6.c: Update testcase.
> 
> ---
>  gcc/testsuite/gcc.dg/tree-ssa/phi_on_compare-1.c | 32 +
>  gcc/testsuite/gcc.dg/tree-ssa/phi_on_compare-2.c | 27 +++
>  gcc/testsuite/gcc.dg/tree-ssa/phi_on_compare-3.c | 31 
>  gcc/testsuite/gcc.dg/tree-ssa/phi_on_compare-4.c | 40 +++
>  gcc/testsuite/gcc.dg/tree-ssa/split-path-6.c |  2 +-
>  gcc/tree-ssa-threadedge.c| 91 
> +++-
>  6 files changed, 219 insertions(+), 4 deletions(-)
>  create mode 100644 gcc/testsuite/gcc.dg/tree-ssa/phi_on_compare-1.c
>  create mode 100644 gcc/testsuite/gcc.dg/tree-ssa/phi_on_compare-2.c
>  create mode 100644 gcc/testsuite/gcc.dg/tree-ssa/phi_on_compare-3.c
>  create mode 100644 gcc/testsuite/gcc.dg/tree-ssa/phi_on_compare-4.c
> 
> diff --git a/gcc/testsuite/gcc.dg/tree-ssa/phi_on_compare-1.c 
> b/gcc/testsuite/gcc.dg/tree-ssa/phi_on_compare-1.c
> new file mode 100644
> index 000..ad4890a
> --- /dev/null
> +++ b/gcc/testsuite/gcc.dg/tree-ssa/phi_on_compare-1.c
> @@ -0,0 +1,32 @@
> +/* { dg-do compile } */
> +/* { dg-options "-Ofast -fdump-tree-vrp1" } */
> +
> +void g (int);
> +void g1 (int);
> +
> +void
> +f (long a, long b, long c, long d, long x)
> +{
> +  _Bool t;
> +  if (x)
> +{
> +  g (a + 1);
> +  t = a < b;
> +  c = d + x;
> +}
> +  else
> +{
> +  g (b + 1);
> +  a = c + d;
> +  t = c > d;
> +}
> +
> +  if (t)
> +{
> +  g1 (c);
> +}
> +
> +  g (a);
> +}
> +
> +/* { dg-final { scan-tree-dump-times "Removing basic block" 1 "vrp1" } } */
> diff --git a/gcc/testsuite/gcc.dg/tree-ssa/phi_on_compare-2.c 
> b/gcc/testsuite/gcc.dg/tree-ssa/phi_on_compare-2.c
> new file mode 100644
> index 000..ca67d65
> --- /dev/null
> +++ b/gcc/testsuite/gcc.dg/tree-ssa/phi_on_compare-2.c
> @@ -0,0 +1,27 @@
> +/* { dg-do compile } */
> +/* { dg-options "-Ofast -fdump-tree-vrp1" } */
> +
> +void g (void);
> +void g1 (void);
> +
> +void
> +f (long a, long b, long c, long d, int x)
> +{
> +  _Bool t;
> +  if (x)
> +{
> +  t = c < d;
> +}
> +  else
> +{
> +  t = a < b;
> +}
> +
> +  if (t)
> +{
> +  g1 ();
> +  g ();
> +}
> +}
> +
> +/* { dg-final { scan-tree-dump-times "Removing basic block" 1 "vrp1" } } */
> diff --git a/gcc/testsuite/gcc.dg/tree-ssa/phi_on_compare-3.c 
> b/gcc/testsuite/gcc.dg/tree-ssa/phi_on_compare-3.c
> new file mode 100644
> index 000..a126e97
> --- /dev/null
> +++ b/gcc/testsuite/gcc.dg/tree-ssa/phi_on_compare-3.c
> @@ -0,0 +1,31 @@
> +/* { dg-do compile } */
> +/* { dg-options "-Ofast -fdump-tree-vrp1" } */
> +
> +void g (void);
> +void g1 (void);
> +
> +void
> +f (long a, long b, long c, long d, int x)
> +{
> +  int t;
> +  if (x)
> +{
> +  t = a < b;
> +}
> +  else if (d == x)
> +{
> +  t = c < b;
> +}
> +  else
> +{
> +  t = d > c;
> +}
> +
> +  if (t)
> +{
> +  g1 ();
> +  g ();
> +}
> +}
> +
> +/* { dg-final { scan-tree-dump-times "Removing basic block" 1 "vrp1" } } */
> diff --git a/gcc/testsuite/gcc.dg/tree-ssa/phi_on_compare-4.c 
> b/gcc/testsuite/gcc.dg/tree-ssa/phi_on_compare-4.c
> new file mode 100644
> index 000..5a50c2d
> --- /dev/null
> +++ b/gcc/testsuite/gcc.dg/tree-ssa/phi_on_compare-4.c
> @@ -0,0 +1,40 @@
> +/* { dg-do compile } */
> +/* { dg-options "-Ofast -fdump-tree-