[PATCH] Minor cleanups to backward threader.

2021-10-14 Thread Aldy Hernandez via Gcc-patches
I will commit this as obvious pending tests on x86-64 Linux.

gcc/ChangeLog:

* tree-ssa-threadbackward.c (class back_threader): Make m_imports
an auto_bitmap.
(back_threader::~back_threader): Do not release m_path.
---
 gcc/tree-ssa-threadbackward.c | 5 +
 1 file changed, 1 insertion(+), 4 deletions(-)

diff --git a/gcc/tree-ssa-threadbackward.c b/gcc/tree-ssa-threadbackward.c
index 496b68e0a82..1999ccf4834 100644
--- a/gcc/tree-ssa-threadbackward.c
+++ b/gcc/tree-ssa-threadbackward.c
@@ -105,7 +105,7 @@ private:
   hash_set m_visited_bbs;
   // The set of SSA names, any of which could potentially change the
   // value of the final conditional in a path.
-  bitmap m_imports;
+  auto_bitmap m_imports;
   // The last statement in the path.
   gimple *m_last_stmt;
   // This is a bit of a wart.  It's used to pass the LHS SSA name to
@@ -125,13 +125,10 @@ back_threader::back_threader (bool speed_p)
 m_solver (m_ranger, /*resolve=*/false)
 {
   m_last_stmt = NULL;
-  m_imports = BITMAP_ALLOC (NULL);
 }
 
 back_threader::~back_threader ()
 {
-  m_path.release ();
-  BITMAP_FREE (m_imports);
 }
 
 // Register the current path for jump threading if it's profitable to
-- 
2.31.1



Re: [PATCH] Minor cleanups to forward threader.

2021-09-19 Thread Jeff Law via Gcc-patches




On 9/19/2021 9:37 AM, Aldy Hernandez wrote:

Every time we allocate a threading edge we push it onto the path in a
distinct step.  There's no need to do this in two steps, and avoiding
this, keeps us from exposing the internals of the registry.

I've also did some tiny cleanups in thread_across_edge, most importantly
removing the bitmap in favor of an auto_bitmap.

There are no functional changes.

OK pending tests?

gcc/ChangeLog:

* tree-ssa-threadbackward.c
(back_threader_registry::register_path): Use push_edge.
* tree-ssa-threadedge.c
(jump_threader::thread_around_empty_blocks): Same.
(jump_threader::thread_through_normal_block): Same.
(jump_threader::thread_across_edge): Same.  Also, use auto_bitmap.
Tidy up code.
* tree-ssa-threadupdate.c
(jt_path_registry::allocate_thread_edge): Remove.
(jt_path_registry::push_edge): New.
(dump_jump_thread_path): Make static.
* tree-ssa-threadupdate.h (allocate_thread_edge): Remove.
(push_edge): New.

OK
jeff



[PATCH] Minor cleanups to forward threader.

2021-09-19 Thread Aldy Hernandez via Gcc-patches
Every time we allocate a threading edge we push it onto the path in a
distinct step.  There's no need to do this in two steps, and avoiding
this, keeps us from exposing the internals of the registry.

I've also did some tiny cleanups in thread_across_edge, most importantly
removing the bitmap in favor of an auto_bitmap.

There are no functional changes.

OK pending tests?

gcc/ChangeLog:

* tree-ssa-threadbackward.c
(back_threader_registry::register_path): Use push_edge.
* tree-ssa-threadedge.c
(jump_threader::thread_around_empty_blocks): Same.
(jump_threader::thread_through_normal_block): Same.
(jump_threader::thread_across_edge): Same.  Also, use auto_bitmap.
Tidy up code.
* tree-ssa-threadupdate.c
(jt_path_registry::allocate_thread_edge): Remove.
(jt_path_registry::push_edge): New.
(dump_jump_thread_path): Make static.
* tree-ssa-threadupdate.h (allocate_thread_edge): Remove.
(push_edge): New.
---
 gcc/tree-ssa-threadbackward.c | 10 ++---
 gcc/tree-ssa-threadedge.c | 69 ++-
 gcc/tree-ssa-threadupdate.c   | 12 +++---
 gcc/tree-ssa-threadupdate.h   |  2 +-
 4 files changed, 30 insertions(+), 63 deletions(-)

diff --git a/gcc/tree-ssa-threadbackward.c b/gcc/tree-ssa-threadbackward.c
index 805b7ac5ef6..c6530d3a6bb 100644
--- a/gcc/tree-ssa-threadbackward.c
+++ b/gcc/tree-ssa-threadbackward.c
@@ -902,15 +902,11 @@ back_threader_registry::register_path (const 
vec _path,
 
   edge e = find_edge (bb1, bb2);
   gcc_assert (e);
-  jump_thread_edge *x
-   = m_lowlevel_registry.allocate_thread_edge (e, EDGE_COPY_SRC_BLOCK);
-  jump_thread_path->safe_push (x);
+  m_lowlevel_registry.push_edge (jump_thread_path, e, EDGE_COPY_SRC_BLOCK);
 }
 
-  jump_thread_edge *x
-= m_lowlevel_registry.allocate_thread_edge (taken_edge,
-   EDGE_NO_COPY_SRC_BLOCK);
-  jump_thread_path->safe_push (x);
+  m_lowlevel_registry.push_edge (jump_thread_path,
+taken_edge, EDGE_NO_COPY_SRC_BLOCK);
 
   if (m_lowlevel_registry.register_jump_thread (jump_thread_path))
 ++m_threaded_paths;
diff --git a/gcc/tree-ssa-threadedge.c b/gcc/tree-ssa-threadedge.c
index 2b9a4c31592..04138cb06fe 100644
--- a/gcc/tree-ssa-threadedge.c
+++ b/gcc/tree-ssa-threadedge.c
@@ -898,10 +898,7 @@ jump_threader::thread_around_empty_blocks 
(vec *path,
 
  if (!bitmap_bit_p (visited, taken_edge->dest->index))
{
- jump_thread_edge *x
-   = m_registry->allocate_thread_edge (taken_edge,
-   EDGE_NO_COPY_SRC_BLOCK);
- path->safe_push (x);
+ m_registry->push_edge (path, taken_edge, EDGE_NO_COPY_SRC_BLOCK);
  bitmap_set_bit (visited, taken_edge->dest->index);
  return thread_around_empty_blocks (path, taken_edge, visited);
}
@@ -942,10 +939,7 @@ jump_threader::thread_around_empty_blocks 
(vec *path,
return false;
   bitmap_set_bit (visited, taken_edge->dest->index);
 
-  jump_thread_edge *x
-   = m_registry->allocate_thread_edge (taken_edge,
-   EDGE_NO_COPY_SRC_BLOCK);
-  path->safe_push (x);
+  m_registry->push_edge (path, taken_edge, EDGE_NO_COPY_SRC_BLOCK);
 
   thread_around_empty_blocks (path, taken_edge, visited);
   return true;
@@ -1051,16 +1045,9 @@ jump_threader::thread_through_normal_block 
(vec *path,
  /* Only push the EDGE_START_JUMP_THREAD marker if this is
 first edge on the path.  */
  if (path->length () == 0)
-   {
-  jump_thread_edge *x
-   = m_registry->allocate_thread_edge (e, EDGE_START_JUMP_THREAD);
- path->safe_push (x);
-   }
+   m_registry->push_edge (path, e, EDGE_START_JUMP_THREAD);
 
- jump_thread_edge *x
-   = m_registry->allocate_thread_edge (taken_edge,
-   EDGE_COPY_SRC_BLOCK);
- path->safe_push (x);
+ m_registry->push_edge (path, taken_edge, EDGE_COPY_SRC_BLOCK);
 
  /* See if we can thread through DEST as well, this helps capture
 secondary effects of threading without having to re-run DOM or
@@ -1146,53 +1133,43 @@ 
edge_forwards_cmp_to_conditional_jump_through_empty_bb_p (edge e)
 void
 jump_threader::thread_across_edge (edge e)
 {
-  bitmap visited = BITMAP_ALLOC (NULL);
+  auto_bitmap visited;
 
   m_state->push (e);
 
   stmt_count = 0;
 
   vec *path = m_registry->allocate_thread_path ();
-  bitmap_clear (visited);
   bitmap_set_bit (visited, e->src->index);
   bitmap_set_bit (visited, e->dest->index);
 
-  int threaded;
+  int threaded = 0;
   if ((e->flags & EDGE_DFS_BACK) == 0)
 threaded = thread_through_normal_block (path, e, visited);
-  else
-threaded = 0;
 
   if 

[PATCH] Minor cleanups / PR63155

2018-10-08 Thread Richard Biener


Bootstrapped and tested on x86_64-unknown-linux-gnu, applied.

Richard.

2018-10-08  Richard Biener  

PR tree-optimization/63155
* tree-ssa-propagate.c (add_ssa_edge): Do cheap check first.
(ssa_propagation_engine::ssa_propagate): Remove redundant
bitmap bit clearing.

Index: gcc/tree-ssa-propagate.c
===
--- gcc/tree-ssa-propagate.c(revision 264911)
+++ gcc/tree-ssa-propagate.c(working copy)
@@ -143,10 +143,12 @@ add_ssa_edge (tree var)
   FOR_EACH_IMM_USE_FAST (use_p, iter, var)
 {
   gimple *use_stmt = USE_STMT (use_p);
-  basic_block use_bb = gimple_bb (use_stmt);
+  if (!prop_simulate_again_p (use_stmt))
+   continue;
 
   /* If we did not yet simulate the block wait for this to happen
  and do not add the stmt to the SSA edge worklist.  */
+  basic_block use_bb = gimple_bb (use_stmt);
   if (! (use_bb->flags & BB_VISITED))
continue;
 
@@ -157,9 +159,6 @@ add_ssa_edge (tree var)
   & EDGE_EXECUTABLE))
continue;
 
-  if (!prop_simulate_again_p (use_stmt))
-   continue;
-
   bitmap worklist;
   if (bb_to_cfg_order[gimple_bb (use_stmt)->index] < curr_order)
worklist = ssa_edge_worklist_back;
@@ -804,7 +803,6 @@ ssa_propagation_engine::ssa_propagate (v
   else
{
  curr_order = next_stmt_bb_order;
- bitmap_clear_bit (ssa_edge_worklist, next_stmt_uid);
  if (dump_file && (dump_flags & TDF_DETAILS))
{
  fprintf (dump_file, "\nSimulating statement: ");


[PATCH] Minor cleanups

2014-04-15 Thread Richard Biener

Bootstrapped and tested on x86_64-unknown-linux-gnu, applied.

Richard.

2014-04-15  Richard Biener  rguent...@suse.de

* tree.c (iterative_hash_expr): Use enum tree_code_class
to store TREE_CODE_CLASS.
(tree_block): Likewise.
(tree_set_block): Likewise.
* tree.h (fold_build_pointer_plus_loc): Use
convert_to_ptrofftype_loc.

Index: gcc/tree.c
===
--- gcc/tree.c  (revision 209374)
+++ gcc/tree.c  (working copy)
@@ -7387,7 +7387,7 @@ iterative_hash_expr (const_tree t, hashv
 {
   int i;
   enum tree_code code;
-  char tclass;
+  enum tree_code_class tclass;
 
   if (t == NULL_TREE)
 return iterative_hash_hashval_t (0, val);
@@ -11235,7 +11235,7 @@ walk_tree_without_duplicates_1 (tree *tp
 tree
 tree_block (tree t)
 {
-  char const c = TREE_CODE_CLASS (TREE_CODE (t));
+  const enum tree_code_class c = TREE_CODE_CLASS (TREE_CODE (t));
 
   if (IS_EXPR_CODE_CLASS (c))
 return LOCATION_BLOCK (t-exp.locus);
@@ -11246,7 +11246,7 @@ tree_block (tree t)
 void
 tree_set_block (tree t, tree b)
 {
-  char const c = TREE_CODE_CLASS (TREE_CODE (t));
+  const enum tree_code_class c = TREE_CODE_CLASS (TREE_CODE (t));
 
   if (IS_EXPR_CODE_CLASS (c))
 {
Index: gcc/tree.h
===
--- gcc/tree.h  (revision 209374)
+++ gcc/tree.h  (working copy)
@@ -4187,7 +4187,7 @@ static inline tree
 fold_build_pointer_plus_loc (location_t loc, tree ptr, tree off)
 {
   return fold_build2_loc (loc, POINTER_PLUS_EXPR, TREE_TYPE (ptr),
- ptr, fold_convert_loc (loc, sizetype, off));
+ ptr, convert_to_ptrofftype_loc (loc, off));
 }
 #define fold_build_pointer_plus(p,o) \
fold_build_pointer_plus_loc (UNKNOWN_LOCATION, p, o)