FX wrote:
 The best way to test IRA is to build and use the branch.  It is easy to
compare the old RA (which is the default on the branch) and IRA (-fira
option switches IRA on).  I'd recommend to try the following option sets:
  -fira
  -fira -fira-algorithm=CB

OK, I've done that and I see a 40% to 60% increase in compilation time
for the first (Fortran) testcase I tried, is that expected?

With the compiler from the ira branch on x86_64-linux, here are the
timings reported by "gfortran -c -time -save-temps" with and without
IRA (two timings provided for each set of option, to check
reproducability):

With -O0
# f951 148.97 9.92
# as 3.95 0.18

# f951 137.51 7.05
# as 3.98 0.17

With -O0 -fira
# f951 223.89 10.91
# as 3.67 0.18

# f951 218.98 8.43
# as 3.61 0.19

-O0 -fira -fira-algorithm=CB
# f951 191.32 9.03
# as 3.65 0.15

# f951 190.92 8.96
# as 3.63 0.18


(The testcase is 400k lines of preprocessed Fortran code, 16M is size,
available here:
http://www.pci.unizh.ch/vandevondele/tmp/all_cp2k_gfortran.f90.gz)


Thanks for extremely interesting test (with more 3500 big functions in one file). It seems that the difference in cpu time is not so big but the wall time difference is really big. The reason for this is memory consumption. Even with the old allocator gcc needs more 1GB for -O0. With IRA it needs more 2GB memory. The same problem would be if global.c were used for -O0.

The following patch brings the same memory consumption for -fira -O0 as for the old allocator. The patch decreases the difference in wall time to 13-20% (I can see 10% discrepancy in wall time even for the old allocator on my 2GB Core2 machine. I found it depends on did I start firefox on my machine or not). I'll commit the patch after thorough testing.

It is a temporary solution, I think I'll have better solution in 2-3 weeks bringing the wall time really close to the old allocator.




Index: cfgloopanal.c
===================================================================
--- cfgloopanal.c	(revision 134601)
+++ cfgloopanal.c	(working copy)
@@ -390,8 +390,8 @@ estimate_reg_pressure_cost (unsigned n_n
        one.  */
     cost = target_spill_cost * n_new;
 
-  if (flag_ira && (flag_ira_algorithm == IRA_ALGORITHM_REGIONAL
-		   || flag_ira_algorithm == IRA_ALGORITHM_MIXED)
+  if (optimize && flag_ira && (flag_ira_algorithm == IRA_ALGORITHM_REGIONAL
+			       || flag_ira_algorithm == IRA_ALGORITHM_MIXED)
       && number_of_loops () <= (unsigned) IRA_MAX_LOOPS_NUM)
     /* IRA regional allocation deals with high register pressure
        better.  So decrease the cost (to do more accurate the cost
Index: caller-save.c
===================================================================
--- caller-save.c	(revision 134601)
+++ caller-save.c	(working copy)
@@ -457,7 +457,7 @@ setup_save_areas (void)
 	unsigned int regno = reg_renumber[i];
 	unsigned int endregno
 	  = end_hard_regno (GET_MODE (regno_reg_rtx[i]), regno);
-	if (flag_ira && flag_ira_ipra)
+	if (flag_ira && optimize && flag_ira_ipra)
 	  {
 	    HARD_REG_SET clobbered_regs;
 	    
@@ -472,7 +472,7 @@ setup_save_areas (void)
 	      SET_HARD_REG_BIT (hard_regs_used, r);
       }
 
-  if (flag_ira && flag_ira_share_save_slots)
+  if (flag_ira && optimize && flag_ira_share_save_slots)
     {
       rtx insn, slot;
       struct insn_chain *chain, *next;
@@ -857,7 +857,7 @@ calculate_local_save_info (void)
 		  
 		  /* Remember live_throughout can contain spilled
 		     registers when IRA is used.  */
-		  if (flag_ira && r < 0)
+		  if (flag_ira && optimize && r < 0)
 		    continue;
 		  gcc_assert (r >= 0);
 		  nregs = hard_regno_nregs[r][PSEUDO_REGNO_MODE (regno)];
@@ -1203,7 +1203,7 @@ save_call_clobbered_regs (void)
   struct insn_chain *chain, *next;
   enum machine_mode save_mode[FIRST_PSEUDO_REGISTER];
 
-  if (flag_ira && flag_ira_move_spills)
+  if (flag_ira && optimize && flag_ira_move_spills)
     {
       /* Do global analysis for better placement of spill code. */
       alloc_aux_for_blocks (sizeof (struct bb_info));
@@ -1248,7 +1248,7 @@ save_call_clobbered_regs (void)
 
 		    regno += insert_restore (chain, 1, regno, MOVE_MAX_WORDS,
 					     save_mode);
-		    if (flag_ira && flag_ira_move_spills)
+		    if (flag_ira && optimize && flag_ira_move_spills)
 		      {
 			gcc_assert (before == regno);
 			save_mode[before] = VOIDmode;
@@ -1291,7 +1291,7 @@ save_call_clobbered_regs (void)
 
 		  /* Remember live_throughout can contain spilled
 		     registers when IRA is used.  */
-		  if (flag_ira && r < 0)
+		  if (flag_ira && optimize && r < 0)
 		    continue;
 		  gcc_assert (r >= 0);
 		  nregs = hard_regno_nregs[r][PSEUDO_REGNO_MODE (regno)];
@@ -1343,7 +1343,7 @@ save_call_clobbered_regs (void)
 	     remain saved.  If the last insn in the block is a JUMP_INSN, put
 	     the restore before the insn, otherwise, put it after the insn.  */
 
-	  if (flag_ira && flag_ira_move_spills)
+	  if (flag_ira && optimize && flag_ira_move_spills)
 	    set_hard_reg_saved (BB_INFO_BY_INDEX (chain->block)->save_here,
 				BB_INFO_BY_INDEX (chain->block)->save_out_mode,
 				save_mode);
@@ -1356,21 +1356,22 @@ save_call_clobbered_regs (void)
 
 		  regno += insert_restore (chain, JUMP_P (insn),
 					   regno, MOVE_MAX_WORDS, save_mode);
-		  if (flag_ira && flag_ira_move_spills)
+		  if (flag_ira && optimize && flag_ira_move_spills)
 		    {
 		      gcc_assert (before == regno);
 		      save_mode[before] = VOIDmode;
 		    }
 		}
 
-	  if (flag_ira && flag_ira_move_spills && next_bb_info != NULL)
+	  if (flag_ira && optimize
+	      && flag_ira_move_spills && next_bb_info != NULL)
 	    set_hard_reg_saved (next_bb_info->save_in,
 				next_bb_info->save_in_mode, save_mode);
 
 	}
     }
 
-  if (flag_ira && flag_ira_move_spills)
+  if (flag_ira && optimize && flag_ira_move_spills)
     free_aux_for_blocks ();
 }
 
Index: global.c
===================================================================
--- global.c	(revision 134601)
+++ global.c	(working copy)
@@ -1209,6 +1209,19 @@ find_reg (int num, HARD_REG_SET losers, 
 	      return;
 	    }
 	}
+      else if (dump_file
+	       && ! CALLER_SAVE_PROFITABLE (optimize_size
+					    ? allocno[num].n_refs
+					    : allocno[num].freq,
+					    optimize_size
+					    ? allocno[num].calls_crossed
+					    : allocno[num].freq_calls_crossed))
+	fprintf (dump_file,
+		 "callee-clobered reg is not profitable for %d (%d vs %d)\n",
+		 allocno[num].reg,
+		 optimize_size ? allocno[num].n_refs : allocno[num].freq,
+		 optimize_size
+		 ? allocno[num].calls_crossed : allocno[num].freq_calls_crossed);
     }
 
   /* If we haven't succeeded yet,
@@ -1455,7 +1468,7 @@ build_insn_chain (void)
 	  /* Consider spilled pseudos too for IRA because they still
 	     have a chance to get hard-registers in the reload when
 	     IRA is used.  */
-	  if (reg_renumber[i] >= 0 || flag_ira)
+	  if (reg_renumber[i] >= 0 || (flag_ira && optimize))
 	    bitmap_set_bit (live_relevant_regs, i);
 	}
 
@@ -1496,12 +1509,14 @@ build_insn_chain (void)
 			   because they still have a chance to get
 			   hard-registers in the reload when IRA is
 			   used.  */
-			else if (reg_renumber[regno] >= 0 || flag_ira)
+			else if (reg_renumber[regno] >= 0
+				 || (flag_ira && optimize))
 			  bitmap_set_bit (&c->dead_or_set, regno);
 		      }
 
 		    if ((regno < FIRST_PSEUDO_REGISTER
-			 || reg_renumber[regno] >= 0 || flag_ira)
+			 || reg_renumber[regno] >= 0
+			 || (flag_ira && optimize))
 			&& (!DF_REF_FLAGS_IS_SET (def, DF_REF_CONDITIONAL)))
 		      {
 			rtx reg = DF_REF_REG (def);
@@ -1601,7 +1616,8 @@ build_insn_chain (void)
 			   because they still have a chance to get
 			   hard-registers in the reload when IRA is
 			   used.  */
-			else if (reg_renumber[regno] >= 0 || flag_ira)
+			else if (reg_renumber[regno] >= 0
+				 || (flag_ira && optimize))
 			  bitmap_set_bit (&c->dead_or_set, regno);
 		      }
 		    
@@ -1610,7 +1626,8 @@ build_insn_chain (void)
 			   because they still have a chance to get
 			   hard-registers in the reload when IRA is
 			   used.  */
-			|| reg_renumber[regno] >= 0 || flag_ira)
+			|| reg_renumber[regno] >= 0
+			|| (flag_ira && optimize))
 		      {
 			if (GET_CODE (reg) == SUBREG
 			    && !DF_REF_FLAGS_IS_SET (use,
Index: alias.c
===================================================================
--- alias.c	(revision 134601)
+++ alias.c	(working copy)
@@ -2013,7 +2013,7 @@ nonoverlapping_memrefs_p (const_rtx x, c
   rtx moffsetx, moffsety;
   HOST_WIDE_INT offsetx = 0, offsety = 0, sizex, sizey, tem;
 
-  if (flag_ira && reload_completed)
+  if (flag_ira && optimize && reload_completed)
     {
       /* We need this code for IRA because of stack slot sharing.  RTL
 	 in decl can be different than RTL used in insns.  It is a
Index: ira.c
===================================================================
--- ira.c	(revision 134601)
+++ ira.c	(working copy)
@@ -1800,92 +1800,100 @@ ira (FILE *f)
   rebuild_p = update_equiv_regs ();
   regstat_free_n_sets_and_refs ();
   regstat_free_ri ();
-    
-#ifndef IRA_NO_OBSTACK
-  gcc_obstack_init (&ira_obstack);
-#endif
-  bitmap_obstack_initialize (&ira_bitmap_obstack);
 
-  max_regno = max_reg_num ();
-  reg_equiv_len = max_regno;
-  reg_equiv_invariant_p = ira_allocate (max_regno * sizeof (int));
-  memset (reg_equiv_invariant_p, 0, max_regno * sizeof (int));
-  reg_equiv_const = ira_allocate (max_regno * sizeof (rtx));
-  memset (reg_equiv_const, 0, max_regno * sizeof (rtx));
-  find_reg_equiv_invariant_const ();
-  if (rebuild_p)
+  if (! optimize)
     {
-      timevar_push (TV_JUMP);
-      rebuild_jump_labels (get_insns ());
-      purge_all_dead_edges ();
-      timevar_pop (TV_JUMP);
+      allocate_reg_info ();
+      setup_eliminable_regset ();
     }
-  max_regno_before_ira = allocated_reg_info_size = max_reg_num ();
-  allocate_reg_info ();
-  setup_eliminable_regset ();
-
-  overall_cost = reg_cost = mem_cost = 0;
-  load_cost = store_cost = shuffle_cost = 0;
-  move_loops_num = additional_jumps_num = 0;
-
-  ira_assert (current_loops == NULL);
-  flow_loops_find (&ira_loops);
-  current_loops = &ira_loops;
-  saved_flag_ira_algorithm = flag_ira_algorithm;
-  if (number_of_loops () > (unsigned) IRA_MAX_LOOPS_NUM)
-    flag_ira_algorithm = IRA_ALGORITHM_CB;
-
-  if (internal_flag_ira_verbose > 0 && ira_dump_file != NULL)
-    fprintf (ira_dump_file, "Building IRA IR\n");
-  loops_p = ira_build (flag_ira_algorithm == IRA_ALGORITHM_REGIONAL
-		       || flag_ira_algorithm == IRA_ALGORITHM_MIXED);
-  ira_color ();
-
-  max_point_before_emit = max_point;
-
-  ira_emit (loops_p);
-
-  max_regno = max_reg_num ();
-  
-  if (! loops_p)
-    initiate_ira_assign ();
   else
     {
-      expand_reg_info (allocated_reg_info_size);
-      allocated_reg_info_size = max_regno;
- 
+#ifndef IRA_NO_OBSTACK
+      gcc_obstack_init (&ira_obstack);
+#endif
+      bitmap_obstack_initialize (&ira_bitmap_obstack);
+      
+      max_regno = max_reg_num ();
+      reg_equiv_len = max_regno;
+      reg_equiv_invariant_p = ira_allocate (max_regno * sizeof (int));
+      memset (reg_equiv_invariant_p, 0, max_regno * sizeof (int));
+      reg_equiv_const = ira_allocate (max_regno * sizeof (rtx));
+      memset (reg_equiv_const, 0, max_regno * sizeof (rtx));
+      find_reg_equiv_invariant_const ();
+      if (rebuild_p)
+	{
+	  timevar_push (TV_JUMP);
+	  rebuild_jump_labels (get_insns ());
+	  purge_all_dead_edges ();
+	  timevar_pop (TV_JUMP);
+	}
+      max_regno_before_ira = allocated_reg_info_size = max_reg_num ();
+      allocate_reg_info ();
+      setup_eliminable_regset ();
+      
+      overall_cost = reg_cost = mem_cost = 0;
+      load_cost = store_cost = shuffle_cost = 0;
+      move_loops_num = additional_jumps_num = 0;
+      
+      ira_assert (current_loops == NULL);
+      flow_loops_find (&ira_loops);
+      current_loops = &ira_loops;
+      saved_flag_ira_algorithm = flag_ira_algorithm;
+      if (number_of_loops () > (unsigned) IRA_MAX_LOOPS_NUM)
+	flag_ira_algorithm = IRA_ALGORITHM_CB;
+      
       if (internal_flag_ira_verbose > 0 && ira_dump_file != NULL)
-	fprintf (ira_dump_file, "Flattening IR\n");
-      ira_flattening (max_regno_before_ira, max_point_before_emit);
-      /* New insns were generated: add notes and recalculate live
-	 info.  */
-      df_analyze ();
-
-      {
-	basic_block bb;
-	
-	FOR_ALL_BB (bb)
-	  bb->loop_father = NULL;
-	current_loops = NULL;
-      }
-
-      setup_allocno_assignment_flags ();
-      initiate_ira_assign ();
-      reassign_conflict_allocnos (max_regno);
-    }
-
-  setup_reg_renumber ();
-
-  calculate_allocation_cost ();
+	fprintf (ira_dump_file, "Building IRA IR\n");
+      loops_p = ira_build (flag_ira_algorithm == IRA_ALGORITHM_REGIONAL
+			   || flag_ira_algorithm == IRA_ALGORITHM_MIXED);
+      ira_color ();
+      
+      max_point_before_emit = max_point;
+      
+      ira_emit (loops_p);
+      
+      max_regno = max_reg_num ();
+      
+      if (! loops_p)
+	initiate_ira_assign ();
+      else
+	{
+	  expand_reg_info (allocated_reg_info_size);
+	  allocated_reg_info_size = max_regno;
+	  
+	  if (internal_flag_ira_verbose > 0 && ira_dump_file != NULL)
+	    fprintf (ira_dump_file, "Flattening IR\n");
+	  ira_flattening (max_regno_before_ira, max_point_before_emit);
+	  /* New insns were generated: add notes and recalculate live
+	     info.  */
+	  df_analyze ();
+	  
+	  {
+	    basic_block bb;
+	    
+	    FOR_ALL_BB (bb)
+	      bb->loop_father = NULL;
+	    current_loops = NULL;
+	  }
+	  
+	  setup_allocno_assignment_flags ();
+	  initiate_ira_assign ();
+	  reassign_conflict_allocnos (max_regno);
+	}
+      
+      setup_reg_renumber ();
 
+      calculate_allocation_cost ();
+      
 #ifdef ENABLE_IRA_CHECKING
-  check_allocation ();
+      check_allocation ();
 #endif
-
-  setup_preferred_alternate_classes ();
-
-  delete_trivially_dead_insns (get_insns (), max_reg_num ());
-  max_regno = max_reg_num ();
+      
+      setup_preferred_alternate_classes ();
+      
+      delete_trivially_dead_insns (get_insns (), max_reg_num ());
+      max_regno = max_reg_num ();
+    }
   
   /* Determine if the current function is a leaf before running IRA
      since this can impact optimizations done by the prologue and
@@ -1897,62 +1905,74 @@ ira (FILE *f)
   memset (VEC_address (rtx, reg_equiv_memory_loc_vec), 0,
 	  sizeof (rtx) * max_regno);
   reg_equiv_memory_loc = VEC_address (rtx, reg_equiv_memory_loc_vec);
-  
+
   regstat_init_n_sets_and_refs ();
   regstat_compute_ri ();
 
   allocate_initial_values (reg_equiv_memory_loc);
-  
-  fix_reg_equiv_init ();
 
+  if (optimize)
+    {
+      fix_reg_equiv_init ();
+      
 #ifdef ENABLE_IRA_CHECKING
-  print_redundant_copies ();
+      print_redundant_copies ();
 #endif
-
-  overall_cost_before = overall_cost;
-
-  spilled_reg_stack_slots_num = 0;
-  spilled_reg_stack_slots
-    = ira_allocate (max_regno * sizeof (struct spilled_reg_stack_slot));
-  memset (spilled_reg_stack_slots, 0,
-	  max_regno * sizeof (struct spilled_reg_stack_slot));
-
+      
+      overall_cost_before = overall_cost;
+      
+      spilled_reg_stack_slots_num = 0;
+      spilled_reg_stack_slots
+	= ira_allocate (max_regno * sizeof (struct spilled_reg_stack_slot));
+      memset (spilled_reg_stack_slots, 0,
+	      max_regno * sizeof (struct spilled_reg_stack_slot));
+    }
+      
   df_set_flags (DF_NO_INSN_RESCAN);
   build_insn_chain ();
-  sort_insn_chain (TRUE);
-  reload_completed = ! reload (get_insns (), 1);
 
-  ira_free (spilled_reg_stack_slots);
-
-  finish_ira_assign ();
-
-  if (internal_flag_ira_verbose > 0 && ira_dump_file != NULL
-      && overall_cost_before != overall_cost)
-    fprintf (ira_dump_file, "+++Overall after reload %d\n", overall_cost);
+  if (optimize)
+    sort_insn_chain (TRUE);
 
-  ira_destroy ();
+  reload_completed = ! reload (get_insns (), optimize > 0);
 
-  flow_loops_free (&ira_loops);
-  free_dominance_info (CDI_DOMINATORS);
-  FOR_ALL_BB (bb)
-    bb->loop_father = NULL;
-  current_loops = NULL;
+  if (optimize)
+    {
+      ira_free (spilled_reg_stack_slots);
+      
+      finish_ira_assign ();
+      
+      if (internal_flag_ira_verbose > 0 && ira_dump_file != NULL
+	  && overall_cost_before != overall_cost)
+	fprintf (ira_dump_file, "+++Overall after reload %d\n", overall_cost);
+      
+      ira_destroy ();
+      
+      flow_loops_free (&ira_loops);
+      free_dominance_info (CDI_DOMINATORS);
+      FOR_ALL_BB (bb)
+	bb->loop_father = NULL;
+      current_loops = NULL;
+    }
 
   flag_ira_algorithm = saved_flag_ira_algorithm;
 
-  cleanup_cfg (CLEANUP_EXPENSIVE);
-
   regstat_free_ri ();
   regstat_free_n_sets_and_refs ();
+      
+  if (optimize)
+    {
+      cleanup_cfg (CLEANUP_EXPENSIVE);
+      
+      ira_free (reg_equiv_invariant_p);
+      ira_free (reg_equiv_const);
 
-  ira_free (reg_equiv_invariant_p);
-  ira_free (reg_equiv_const);
-
-  bitmap_obstack_release (&ira_bitmap_obstack);
+      bitmap_obstack_release (&ira_bitmap_obstack);
 #ifndef IRA_NO_OBSTACK
-  obstack_free (&ira_obstack, NULL);
+      obstack_free (&ira_obstack, NULL);
 #endif
-  
+    }
+
   /* The code after the reload has changed so much that at this point
      we might as well just rescan everything.  Not that
      df_rescan_all_insns is not going to help here because it does not
Index: reload1.c
===================================================================
--- reload1.c	(revision 134601)
+++ reload1.c	(working copy)
@@ -553,7 +553,7 @@ compute_use_by_pseudos (HARD_REG_SET *to
 	     which might still contain registers that have not
 	     actually been allocated since they have an
 	     equivalence.  */
-	  gcc_assert (flag_ira || reload_completed);
+	  gcc_assert ((flag_ira && optimize) || reload_completed);
 	}
       else
 	add_to_hard_reg_set (to, PSEUDO_REGNO_MODE (regno), r);
@@ -897,7 +897,7 @@ reload (rtx first, int global)
   for (n = 0, i = LAST_VIRTUAL_REGISTER + 1; i < max_regno; i++)
     temp_pseudo_reg_arr[n++] = i;
   
-  if (flag_ira)
+  if (flag_ira && optimize)
     /* Ask IRA to order pseudo-registers for better stack slot
        sharing.  */
     sort_regnos_for_alter_reg (temp_pseudo_reg_arr, n, reg_max_ref_width);
@@ -1051,7 +1051,7 @@ reload (rtx first, int global)
 
       calculate_needs_all_insns (global);
 
-      if (! flag_ira)
+      if (! flag_ira || ! optimize)
 	/* Don't do it for IRA.  We need this info because we don't
 	   change live_throughout and dead_or_set for chains when IRA
 	   is used.  */
@@ -1114,7 +1114,7 @@ reload (rtx first, int global)
       obstack_free (&reload_obstack, reload_firstobj);
     }
 
-  if (flag_ira)
+  if (flag_ira && optimize)
     /* Restore the original insn chain order for correct reload
        work.  */
     sort_insn_chain (FALSE);
@@ -1624,7 +1624,7 @@ calculate_needs_all_insns (int global)
 				       reg_equiv_memory_loc
 				       [REGNO (SET_DEST (set))]))))
 		{
-		  if (flag_ira)
+		  if (flag_ira && optimize)
 		    /* Inform IRA about the insn deletion.  */
 		    mark_memory_move_deletion (REGNO (SET_DEST (set)),
 					       REGNO (SET_SRC (set)));
@@ -1733,7 +1733,7 @@ count_pseudo (int reg)
       || REGNO_REG_SET_P (&spilled_pseudos, reg)
       /* Ignore spilled pseudo-registers which can be here only if IRA
 	 is used.  */
-      || (flag_ira && r < 0))
+      || (flag_ira && optimize && r < 0))
     return;
 
   SET_REGNO_REG_SET (&pseudos_counted, reg);
@@ -1814,7 +1814,7 @@ count_spilled_pseudo (int spilled, int s
 
   /* Ignore spilled pseudo-registers which can be here only if IRA is
      used.  */
-  if ((flag_ira && r < 0)
+  if ((flag_ira && optimize && r < 0)
       || REGNO_REG_SET_P (&spilled_pseudos, reg)
       || spilled + spilled_nregs <= r || r + nregs <= spilled)
     return;
@@ -1882,7 +1882,7 @@ find_reg (struct insn_chain *chain, int 
 	  if (! ok)
 	    continue;
 
-	  if (flag_ira)
+	  if (flag_ira && optimize)
 	    {
 	      /* Ask IRA to find a better pseudo-register for
 		 spilling.  */
@@ -2165,10 +2165,10 @@ alter_reg (int i, int from_reg, bool don
       int adjust = 0;
       bool shared_p = false;
 
-      if (flag_ira)
+      if (flag_ira && optimize)
 	/* Mark the spill for IRA.  */
 	SET_REGNO_REG_SET (&spilled_pseudos, i);
-      x = (dont_share_p || ! flag_ira
+      x = (dont_share_p || ! flag_ira || ! optimize
 	   ? NULL_RTX : reuse_stack_slot (i, inherent_size, total_size));
       if (x)
 	shared_p = true;
@@ -2180,7 +2180,7 @@ alter_reg (int i, int from_reg, bool don
 	 enough inherent space and enough total space.
 	 Otherwise, we allocate a new slot, making sure that it has no less
 	 inherent space, and no less total space, then the previous slot.  */
-      else if (from_reg == -1 || (! dont_share_p && flag_ira))
+      else if (from_reg == -1 || (! dont_share_p && flag_ira && optimize))
 	{
 	  alias_set_type alias_set = new_alias_set ();
 
@@ -2199,7 +2199,7 @@ alter_reg (int i, int from_reg, bool don
 	  set_mem_alias_set (x, alias_set);
 	  dse_record_singleton_alias_set (alias_set, mode);
 
-	  if (! dont_share_p && flag_ira)
+	  if (! dont_share_p && flag_ira && optimize)
 	    /* Inform IRA about allocation a new stack slot.  */
 	    mark_new_stack_slot (x, i, total_size);
 	}
@@ -3944,7 +3944,7 @@ finish_spills (int global)
       spill_reg_order[i] = -1;
 
   EXECUTE_IF_SET_IN_REG_SET (&spilled_pseudos, FIRST_PSEUDO_REGISTER, i, rsi)
-    if (! flag_ira || reg_renumber[i] >= 0)
+    if (! flag_ira || ! optimize || reg_renumber[i] >= 0)
       {
 	/* Record the current hard register the pseudo is allocated to
 	   in pseudo_previous_regs so we avoid reallocating it to the
@@ -3954,7 +3954,7 @@ finish_spills (int global)
 	SET_HARD_REG_BIT (pseudo_previous_regs[i], reg_renumber[i]);
 	/* Mark it as no longer having a hard register home.  */
 	reg_renumber[i] = -1;
-	if (flag_ira)
+	if (flag_ira && optimize)
 	  /* Inform IRA about the change.  */
 	  mark_allocation_change (i);
 	/* We will need to scan everything again.  */
@@ -3984,7 +3984,7 @@ finish_spills (int global)
 	    }
 	}
 
-      if (! flag_ira)
+      if (! flag_ira || ! optimize)
 	{
 	  /* Retry allocating the spilled pseudos.  For each reg,
 	     merge the various reg sets that indicate which hard regs
@@ -4035,7 +4035,7 @@ finish_spills (int global)
       HARD_REG_SET used_by_pseudos;
       HARD_REG_SET used_by_pseudos2;
 
-      if (! flag_ira)
+      if (! flag_ira || ! optimize)
 	{
 	  /* Don't do it for IRA because IRA and the reload still can
 	     assign hard registers to the spilled pseudos on next
@@ -5131,6 +5131,7 @@ reloads_unique_chain_p (int r1, int r2)
   return true;
 }
 
+
 /* The recursive function change all occurrences of WHAT in *WHERE
    onto REPL.  */
 static void
@@ -7008,7 +7009,7 @@ emit_input_reload_insns (struct insn_cha
 		  && REG_N_SETS (REGNO (old)) == 1)
 		{
 		  reg_renumber[REGNO (old)] = REGNO (reloadreg);
-		  if (flag_ira)
+		  if (flag_ira && optimize)
 		    /* Inform IRA about the change.  */
 		    mark_allocation_change (REGNO (old));
 		  alter_reg (REGNO (old), -1, false);
@@ -8547,7 +8548,7 @@ delete_output_reload (rtx insn, int j, i
 
       /* For the debugging info, say the pseudo lives in this reload reg.  */
       reg_renumber[REGNO (reg)] = REGNO (new_reload_reg);
-      if (flag_ira)
+      if (flag_ira && optimize)
 	/* Inform IRA about the change.  */
 	mark_allocation_change (REGNO (reg));
       alter_reg (REGNO (reg), -1, false);
bash-3.2$ svn diff --diff-cmd diff -x -up
Index: cfgloopanal.c
===================================================================
--- cfgloopanal.c	(revision 134601)
+++ cfgloopanal.c	(working copy)
@@ -390,8 +390,8 @@ estimate_reg_pressure_cost (unsigned n_n
        one.  */
     cost = target_spill_cost * n_new;
 
-  if (flag_ira && (flag_ira_algorithm == IRA_ALGORITHM_REGIONAL
-		   || flag_ira_algorithm == IRA_ALGORITHM_MIXED)
+  if (optimize && flag_ira && (flag_ira_algorithm == IRA_ALGORITHM_REGIONAL
+			       || flag_ira_algorithm == IRA_ALGORITHM_MIXED)
       && number_of_loops () <= (unsigned) IRA_MAX_LOOPS_NUM)
     /* IRA regional allocation deals with high register pressure
        better.  So decrease the cost (to do more accurate the cost
Index: caller-save.c
===================================================================
--- caller-save.c	(revision 134601)
+++ caller-save.c	(working copy)
@@ -457,7 +457,7 @@ setup_save_areas (void)
 	unsigned int regno = reg_renumber[i];
 	unsigned int endregno
 	  = end_hard_regno (GET_MODE (regno_reg_rtx[i]), regno);
-	if (flag_ira && flag_ira_ipra)
+	if (flag_ira && optimize && flag_ira_ipra)
 	  {
 	    HARD_REG_SET clobbered_regs;
 	    
@@ -472,7 +472,7 @@ setup_save_areas (void)
 	      SET_HARD_REG_BIT (hard_regs_used, r);
       }
 
-  if (flag_ira && flag_ira_share_save_slots)
+  if (flag_ira && optimize && flag_ira_share_save_slots)
     {
       rtx insn, slot;
       struct insn_chain *chain, *next;
@@ -857,7 +857,7 @@ calculate_local_save_info (void)
 		  
 		  /* Remember live_throughout can contain spilled
 		     registers when IRA is used.  */
-		  if (flag_ira && r < 0)
+		  if (flag_ira && optimize && r < 0)
 		    continue;
 		  gcc_assert (r >= 0);
 		  nregs = hard_regno_nregs[r][PSEUDO_REGNO_MODE (regno)];
@@ -1203,7 +1203,7 @@ save_call_clobbered_regs (void)
   struct insn_chain *chain, *next;
   enum machine_mode save_mode[FIRST_PSEUDO_REGISTER];
 
-  if (flag_ira && flag_ira_move_spills)
+  if (flag_ira && optimize && flag_ira_move_spills)
     {
       /* Do global analysis for better placement of spill code. */
       alloc_aux_for_blocks (sizeof (struct bb_info));
@@ -1248,7 +1248,7 @@ save_call_clobbered_regs (void)
 
 		    regno += insert_restore (chain, 1, regno, MOVE_MAX_WORDS,
 					     save_mode);
-		    if (flag_ira && flag_ira_move_spills)
+		    if (flag_ira && optimize && flag_ira_move_spills)
 		      {
 			gcc_assert (before == regno);
 			save_mode[before] = VOIDmode;
@@ -1291,7 +1291,7 @@ save_call_clobbered_regs (void)
 
 		  /* Remember live_throughout can contain spilled
 		     registers when IRA is used.  */
-		  if (flag_ira && r < 0)
+		  if (flag_ira && optimize && r < 0)
 		    continue;
 		  gcc_assert (r >= 0);
 		  nregs = hard_regno_nregs[r][PSEUDO_REGNO_MODE (regno)];
@@ -1343,7 +1343,7 @@ save_call_clobbered_regs (void)
 	     remain saved.  If the last insn in the block is a JUMP_INSN, put
 	     the restore before the insn, otherwise, put it after the insn.  */
 
-	  if (flag_ira && flag_ira_move_spills)
+	  if (flag_ira && optimize && flag_ira_move_spills)
 	    set_hard_reg_saved (BB_INFO_BY_INDEX (chain->block)->save_here,
 				BB_INFO_BY_INDEX (chain->block)->save_out_mode,
 				save_mode);
@@ -1356,21 +1356,22 @@ save_call_clobbered_regs (void)
 
 		  regno += insert_restore (chain, JUMP_P (insn),
 					   regno, MOVE_MAX_WORDS, save_mode);
-		  if (flag_ira && flag_ira_move_spills)
+		  if (flag_ira && optimize && flag_ira_move_spills)
 		    {
 		      gcc_assert (before == regno);
 		      save_mode[before] = VOIDmode;
 		    }
 		}
 
-	  if (flag_ira && flag_ira_move_spills && next_bb_info != NULL)
+	  if (flag_ira && optimize
+	      && flag_ira_move_spills && next_bb_info != NULL)
 	    set_hard_reg_saved (next_bb_info->save_in,
 				next_bb_info->save_in_mode, save_mode);
 
 	}
     }
 
-  if (flag_ira && flag_ira_move_spills)
+  if (flag_ira && optimize && flag_ira_move_spills)
     free_aux_for_blocks ();
 }
 
Index: global.c
===================================================================
--- global.c	(revision 134601)
+++ global.c	(working copy)
@@ -1455,7 +1455,7 @@ build_insn_chain (void)
 	  /* Consider spilled pseudos too for IRA because they still
 	     have a chance to get hard-registers in the reload when
 	     IRA is used.  */
-	  if (reg_renumber[i] >= 0 || flag_ira)
+	  if (reg_renumber[i] >= 0 || (flag_ira && optimize))
 	    bitmap_set_bit (live_relevant_regs, i);
 	}
 
@@ -1496,12 +1496,14 @@ build_insn_chain (void)
 			   because they still have a chance to get
 			   hard-registers in the reload when IRA is
 			   used.  */
-			else if (reg_renumber[regno] >= 0 || flag_ira)
+			else if (reg_renumber[regno] >= 0
+				 || (flag_ira && optimize))
 			  bitmap_set_bit (&c->dead_or_set, regno);
 		      }
 
 		    if ((regno < FIRST_PSEUDO_REGISTER
-			 || reg_renumber[regno] >= 0 || flag_ira)
+			 || reg_renumber[regno] >= 0
+			 || (flag_ira && optimize))
 			&& (!DF_REF_FLAGS_IS_SET (def, DF_REF_CONDITIONAL)))
 		      {
 			rtx reg = DF_REF_REG (def);
@@ -1601,7 +1603,8 @@ build_insn_chain (void)
 			   because they still have a chance to get
 			   hard-registers in the reload when IRA is
 			   used.  */
-			else if (reg_renumber[regno] >= 0 || flag_ira)
+			else if (reg_renumber[regno] >= 0
+				 || (flag_ira && optimize))
 			  bitmap_set_bit (&c->dead_or_set, regno);
 		      }
 		    
@@ -1610,7 +1613,8 @@ build_insn_chain (void)
 			   because they still have a chance to get
 			   hard-registers in the reload when IRA is
 			   used.  */
-			|| reg_renumber[regno] >= 0 || flag_ira)
+			|| reg_renumber[regno] >= 0
+			|| (flag_ira && optimize))
 		      {
 			if (GET_CODE (reg) == SUBREG
 			    && !DF_REF_FLAGS_IS_SET (use,
Index: alias.c
===================================================================
--- alias.c	(revision 134601)
+++ alias.c	(working copy)
@@ -2013,7 +2013,7 @@ nonoverlapping_memrefs_p (const_rtx x, c
   rtx moffsetx, moffsety;
   HOST_WIDE_INT offsetx = 0, offsety = 0, sizex, sizey, tem;
 
-  if (flag_ira && reload_completed)
+  if (flag_ira && optimize && reload_completed)
     {
       /* We need this code for IRA because of stack slot sharing.  RTL
 	 in decl can be different than RTL used in insns.  It is a
Index: ira.c
===================================================================
--- ira.c	(revision 134601)
+++ ira.c	(working copy)
@@ -1800,92 +1800,100 @@ ira (FILE *f)
   rebuild_p = update_equiv_regs ();
   regstat_free_n_sets_and_refs ();
   regstat_free_ri ();
-    
-#ifndef IRA_NO_OBSTACK
-  gcc_obstack_init (&ira_obstack);
-#endif
-  bitmap_obstack_initialize (&ira_bitmap_obstack);
 
-  max_regno = max_reg_num ();
-  reg_equiv_len = max_regno;
-  reg_equiv_invariant_p = ira_allocate (max_regno * sizeof (int));
-  memset (reg_equiv_invariant_p, 0, max_regno * sizeof (int));
-  reg_equiv_const = ira_allocate (max_regno * sizeof (rtx));
-  memset (reg_equiv_const, 0, max_regno * sizeof (rtx));
-  find_reg_equiv_invariant_const ();
-  if (rebuild_p)
+  if (! optimize)
     {
-      timevar_push (TV_JUMP);
-      rebuild_jump_labels (get_insns ());
-      purge_all_dead_edges ();
-      timevar_pop (TV_JUMP);
+      allocate_reg_info ();
+      setup_eliminable_regset ();
     }
-  max_regno_before_ira = allocated_reg_info_size = max_reg_num ();
-  allocate_reg_info ();
-  setup_eliminable_regset ();
-
-  overall_cost = reg_cost = mem_cost = 0;
-  load_cost = store_cost = shuffle_cost = 0;
-  move_loops_num = additional_jumps_num = 0;
-
-  ira_assert (current_loops == NULL);
-  flow_loops_find (&ira_loops);
-  current_loops = &ira_loops;
-  saved_flag_ira_algorithm = flag_ira_algorithm;
-  if (number_of_loops () > (unsigned) IRA_MAX_LOOPS_NUM)
-    flag_ira_algorithm = IRA_ALGORITHM_CB;
-
-  if (internal_flag_ira_verbose > 0 && ira_dump_file != NULL)
-    fprintf (ira_dump_file, "Building IRA IR\n");
-  loops_p = ira_build (flag_ira_algorithm == IRA_ALGORITHM_REGIONAL
-		       || flag_ira_algorithm == IRA_ALGORITHM_MIXED);
-  ira_color ();
-
-  max_point_before_emit = max_point;
-
-  ira_emit (loops_p);
-
-  max_regno = max_reg_num ();
-  
-  if (! loops_p)
-    initiate_ira_assign ();
   else
     {
-      expand_reg_info (allocated_reg_info_size);
-      allocated_reg_info_size = max_regno;
- 
+#ifndef IRA_NO_OBSTACK
+      gcc_obstack_init (&ira_obstack);
+#endif
+      bitmap_obstack_initialize (&ira_bitmap_obstack);
+      
+      max_regno = max_reg_num ();
+      reg_equiv_len = max_regno;
+      reg_equiv_invariant_p = ira_allocate (max_regno * sizeof (int));
+      memset (reg_equiv_invariant_p, 0, max_regno * sizeof (int));
+      reg_equiv_const = ira_allocate (max_regno * sizeof (rtx));
+      memset (reg_equiv_const, 0, max_regno * sizeof (rtx));
+      find_reg_equiv_invariant_const ();
+      if (rebuild_p)
+	{
+	  timevar_push (TV_JUMP);
+	  rebuild_jump_labels (get_insns ());
+	  purge_all_dead_edges ();
+	  timevar_pop (TV_JUMP);
+	}
+      max_regno_before_ira = allocated_reg_info_size = max_reg_num ();
+      allocate_reg_info ();
+      setup_eliminable_regset ();
+      
+      overall_cost = reg_cost = mem_cost = 0;
+      load_cost = store_cost = shuffle_cost = 0;
+      move_loops_num = additional_jumps_num = 0;
+      
+      ira_assert (current_loops == NULL);
+      flow_loops_find (&ira_loops);
+      current_loops = &ira_loops;
+      saved_flag_ira_algorithm = flag_ira_algorithm;
+      if (number_of_loops () > (unsigned) IRA_MAX_LOOPS_NUM)
+	flag_ira_algorithm = IRA_ALGORITHM_CB;
+      
       if (internal_flag_ira_verbose > 0 && ira_dump_file != NULL)
-	fprintf (ira_dump_file, "Flattening IR\n");
-      ira_flattening (max_regno_before_ira, max_point_before_emit);
-      /* New insns were generated: add notes and recalculate live
-	 info.  */
-      df_analyze ();
-
-      {
-	basic_block bb;
-	
-	FOR_ALL_BB (bb)
-	  bb->loop_father = NULL;
-	current_loops = NULL;
-      }
-
-      setup_allocno_assignment_flags ();
-      initiate_ira_assign ();
-      reassign_conflict_allocnos (max_regno);
-    }
-
-  setup_reg_renumber ();
-
-  calculate_allocation_cost ();
+	fprintf (ira_dump_file, "Building IRA IR\n");
+      loops_p = ira_build (flag_ira_algorithm == IRA_ALGORITHM_REGIONAL
+			   || flag_ira_algorithm == IRA_ALGORITHM_MIXED);
+      ira_color ();
+      
+      max_point_before_emit = max_point;
+      
+      ira_emit (loops_p);
+      
+      max_regno = max_reg_num ();
+      
+      if (! loops_p)
+	initiate_ira_assign ();
+      else
+	{
+	  expand_reg_info (allocated_reg_info_size);
+	  allocated_reg_info_size = max_regno;
+	  
+	  if (internal_flag_ira_verbose > 0 && ira_dump_file != NULL)
+	    fprintf (ira_dump_file, "Flattening IR\n");
+	  ira_flattening (max_regno_before_ira, max_point_before_emit);
+	  /* New insns were generated: add notes and recalculate live
+	     info.  */
+	  df_analyze ();
+	  
+	  {
+	    basic_block bb;
+	    
+	    FOR_ALL_BB (bb)
+	      bb->loop_father = NULL;
+	    current_loops = NULL;
+	  }
+	  
+	  setup_allocno_assignment_flags ();
+	  initiate_ira_assign ();
+	  reassign_conflict_allocnos (max_regno);
+	}
+      
+      setup_reg_renumber ();
 
+      calculate_allocation_cost ();
+      
 #ifdef ENABLE_IRA_CHECKING
-  check_allocation ();
+      check_allocation ();
 #endif
-
-  setup_preferred_alternate_classes ();
-
-  delete_trivially_dead_insns (get_insns (), max_reg_num ());
-  max_regno = max_reg_num ();
+      
+      setup_preferred_alternate_classes ();
+      
+      delete_trivially_dead_insns (get_insns (), max_reg_num ());
+      max_regno = max_reg_num ();
+    }
   
   /* Determine if the current function is a leaf before running IRA
      since this can impact optimizations done by the prologue and
@@ -1897,62 +1905,74 @@ ira (FILE *f)
   memset (VEC_address (rtx, reg_equiv_memory_loc_vec), 0,
 	  sizeof (rtx) * max_regno);
   reg_equiv_memory_loc = VEC_address (rtx, reg_equiv_memory_loc_vec);
-  
+
   regstat_init_n_sets_and_refs ();
   regstat_compute_ri ();
 
   allocate_initial_values (reg_equiv_memory_loc);
-  
-  fix_reg_equiv_init ();
 
+  if (optimize)
+    {
+      fix_reg_equiv_init ();
+      
 #ifdef ENABLE_IRA_CHECKING
-  print_redundant_copies ();
+      print_redundant_copies ();
 #endif
-
-  overall_cost_before = overall_cost;
-
-  spilled_reg_stack_slots_num = 0;
-  spilled_reg_stack_slots
-    = ira_allocate (max_regno * sizeof (struct spilled_reg_stack_slot));
-  memset (spilled_reg_stack_slots, 0,
-	  max_regno * sizeof (struct spilled_reg_stack_slot));
-
+      
+      overall_cost_before = overall_cost;
+      
+      spilled_reg_stack_slots_num = 0;
+      spilled_reg_stack_slots
+	= ira_allocate (max_regno * sizeof (struct spilled_reg_stack_slot));
+      memset (spilled_reg_stack_slots, 0,
+	      max_regno * sizeof (struct spilled_reg_stack_slot));
+    }
+      
   df_set_flags (DF_NO_INSN_RESCAN);
   build_insn_chain ();
-  sort_insn_chain (TRUE);
-  reload_completed = ! reload (get_insns (), 1);
 
-  ira_free (spilled_reg_stack_slots);
-
-  finish_ira_assign ();
-
-  if (internal_flag_ira_verbose > 0 && ira_dump_file != NULL
-      && overall_cost_before != overall_cost)
-    fprintf (ira_dump_file, "+++Overall after reload %d\n", overall_cost);
+  if (optimize)
+    sort_insn_chain (TRUE);
 
-  ira_destroy ();
+  reload_completed = ! reload (get_insns (), optimize > 0);
 
-  flow_loops_free (&ira_loops);
-  free_dominance_info (CDI_DOMINATORS);
-  FOR_ALL_BB (bb)
-    bb->loop_father = NULL;
-  current_loops = NULL;
+  if (optimize)
+    {
+      ira_free (spilled_reg_stack_slots);
+      
+      finish_ira_assign ();
+      
+      if (internal_flag_ira_verbose > 0 && ira_dump_file != NULL
+	  && overall_cost_before != overall_cost)
+	fprintf (ira_dump_file, "+++Overall after reload %d\n", overall_cost);
+      
+      ira_destroy ();
+      
+      flow_loops_free (&ira_loops);
+      free_dominance_info (CDI_DOMINATORS);
+      FOR_ALL_BB (bb)
+	bb->loop_father = NULL;
+      current_loops = NULL;
+    }
 
   flag_ira_algorithm = saved_flag_ira_algorithm;
 
-  cleanup_cfg (CLEANUP_EXPENSIVE);
-
   regstat_free_ri ();
   regstat_free_n_sets_and_refs ();
+      
+  if (optimize)
+    {
+      cleanup_cfg (CLEANUP_EXPENSIVE);
+      
+      ira_free (reg_equiv_invariant_p);
+      ira_free (reg_equiv_const);
 
-  ira_free (reg_equiv_invariant_p);
-  ira_free (reg_equiv_const);
-
-  bitmap_obstack_release (&ira_bitmap_obstack);
+      bitmap_obstack_release (&ira_bitmap_obstack);
 #ifndef IRA_NO_OBSTACK
-  obstack_free (&ira_obstack, NULL);
+      obstack_free (&ira_obstack, NULL);
 #endif
-  
+    }
+
   /* The code after the reload has changed so much that at this point
      we might as well just rescan everything.  Not that
      df_rescan_all_insns is not going to help here because it does not
Index: reload1.c
===================================================================
--- reload1.c	(revision 134601)
+++ reload1.c	(working copy)
@@ -553,7 +553,7 @@ compute_use_by_pseudos (HARD_REG_SET *to
 	     which might still contain registers that have not
 	     actually been allocated since they have an
 	     equivalence.  */
-	  gcc_assert (flag_ira || reload_completed);
+	  gcc_assert ((flag_ira && optimize) || reload_completed);
 	}
       else
 	add_to_hard_reg_set (to, PSEUDO_REGNO_MODE (regno), r);
@@ -897,7 +897,7 @@ reload (rtx first, int global)
   for (n = 0, i = LAST_VIRTUAL_REGISTER + 1; i < max_regno; i++)
     temp_pseudo_reg_arr[n++] = i;
   
-  if (flag_ira)
+  if (flag_ira && optimize)
     /* Ask IRA to order pseudo-registers for better stack slot
        sharing.  */
     sort_regnos_for_alter_reg (temp_pseudo_reg_arr, n, reg_max_ref_width);
@@ -1051,7 +1051,7 @@ reload (rtx first, int global)
 
       calculate_needs_all_insns (global);
 
-      if (! flag_ira)
+      if (! flag_ira || ! optimize)
 	/* Don't do it for IRA.  We need this info because we don't
 	   change live_throughout and dead_or_set for chains when IRA
 	   is used.  */
@@ -1114,7 +1114,7 @@ reload (rtx first, int global)
       obstack_free (&reload_obstack, reload_firstobj);
     }
 
-  if (flag_ira)
+  if (flag_ira && optimize)
     /* Restore the original insn chain order for correct reload
        work.  */
     sort_insn_chain (FALSE);
@@ -1624,7 +1624,7 @@ calculate_needs_all_insns (int global)
 				       reg_equiv_memory_loc
 				       [REGNO (SET_DEST (set))]))))
 		{
-		  if (flag_ira)
+		  if (flag_ira && optimize)
 		    /* Inform IRA about the insn deletion.  */
 		    mark_memory_move_deletion (REGNO (SET_DEST (set)),
 					       REGNO (SET_SRC (set)));
@@ -1733,7 +1733,7 @@ count_pseudo (int reg)
       || REGNO_REG_SET_P (&spilled_pseudos, reg)
       /* Ignore spilled pseudo-registers which can be here only if IRA
 	 is used.  */
-      || (flag_ira && r < 0))
+      || (flag_ira && optimize && r < 0))
     return;
 
   SET_REGNO_REG_SET (&pseudos_counted, reg);
@@ -1814,7 +1814,7 @@ count_spilled_pseudo (int spilled, int s
 
   /* Ignore spilled pseudo-registers which can be here only if IRA is
      used.  */
-  if ((flag_ira && r < 0)
+  if ((flag_ira && optimize && r < 0)
       || REGNO_REG_SET_P (&spilled_pseudos, reg)
       || spilled + spilled_nregs <= r || r + nregs <= spilled)
     return;
@@ -1882,7 +1882,7 @@ find_reg (struct insn_chain *chain, int 
 	  if (! ok)
 	    continue;
 
-	  if (flag_ira)
+	  if (flag_ira && optimize)
 	    {
 	      /* Ask IRA to find a better pseudo-register for
 		 spilling.  */
@@ -2165,10 +2165,10 @@ alter_reg (int i, int from_reg, bool don
       int adjust = 0;
       bool shared_p = false;
 
-      if (flag_ira)
+      if (flag_ira && optimize)
 	/* Mark the spill for IRA.  */
 	SET_REGNO_REG_SET (&spilled_pseudos, i);
-      x = (dont_share_p || ! flag_ira
+      x = (dont_share_p || ! flag_ira || ! optimize
 	   ? NULL_RTX : reuse_stack_slot (i, inherent_size, total_size));
       if (x)
 	shared_p = true;
@@ -2180,7 +2180,7 @@ alter_reg (int i, int from_reg, bool don
 	 enough inherent space and enough total space.
 	 Otherwise, we allocate a new slot, making sure that it has no less
 	 inherent space, and no less total space, then the previous slot.  */
-      else if (from_reg == -1 || (! dont_share_p && flag_ira))
+      else if (from_reg == -1 || (! dont_share_p && flag_ira && optimize))
 	{
 	  alias_set_type alias_set = new_alias_set ();
 
@@ -2199,7 +2199,7 @@ alter_reg (int i, int from_reg, bool don
 	  set_mem_alias_set (x, alias_set);
 	  dse_record_singleton_alias_set (alias_set, mode);
 
-	  if (! dont_share_p && flag_ira)
+	  if (! dont_share_p && flag_ira && optimize)
 	    /* Inform IRA about allocation a new stack slot.  */
 	    mark_new_stack_slot (x, i, total_size);
 	}
@@ -3944,7 +3944,7 @@ finish_spills (int global)
       spill_reg_order[i] = -1;
 
   EXECUTE_IF_SET_IN_REG_SET (&spilled_pseudos, FIRST_PSEUDO_REGISTER, i, rsi)
-    if (! flag_ira || reg_renumber[i] >= 0)
+    if (! flag_ira || ! optimize || reg_renumber[i] >= 0)
       {
 	/* Record the current hard register the pseudo is allocated to
 	   in pseudo_previous_regs so we avoid reallocating it to the
@@ -3954,7 +3954,7 @@ finish_spills (int global)
 	SET_HARD_REG_BIT (pseudo_previous_regs[i], reg_renumber[i]);
 	/* Mark it as no longer having a hard register home.  */
 	reg_renumber[i] = -1;
-	if (flag_ira)
+	if (flag_ira && optimize)
 	  /* Inform IRA about the change.  */
 	  mark_allocation_change (i);
 	/* We will need to scan everything again.  */
@@ -3984,7 +3984,7 @@ finish_spills (int global)
 	    }
 	}
 
-      if (! flag_ira)
+      if (! flag_ira || ! optimize)
 	{
 	  /* Retry allocating the spilled pseudos.  For each reg,
 	     merge the various reg sets that indicate which hard regs
@@ -4035,7 +4035,7 @@ finish_spills (int global)
       HARD_REG_SET used_by_pseudos;
       HARD_REG_SET used_by_pseudos2;
 
-      if (! flag_ira)
+      if (! flag_ira || ! optimize)
 	{
 	  /* Don't do it for IRA because IRA and the reload still can
 	     assign hard registers to the spilled pseudos on next
@@ -5131,6 +5131,7 @@ reloads_unique_chain_p (int r1, int r2)
   return true;
 }
 
+
 /* The recursive function change all occurrences of WHAT in *WHERE
    onto REPL.  */
 static void
@@ -7008,7 +7009,7 @@ emit_input_reload_insns (struct insn_cha
 		  && REG_N_SETS (REGNO (old)) == 1)
 		{
 		  reg_renumber[REGNO (old)] = REGNO (reloadreg);
-		  if (flag_ira)
+		  if (flag_ira && optimize)
 		    /* Inform IRA about the change.  */
 		    mark_allocation_change (REGNO (old));
 		  alter_reg (REGNO (old), -1, false);
@@ -8547,7 +8548,7 @@ delete_output_reload (rtx insn, int j, i
 
       /* For the debugging info, say the pseudo lives in this reload reg.  */
       reg_renumber[REGNO (reg)] = REGNO (new_reload_reg);
-      if (flag_ira)
+      if (flag_ira && optimize)
 	/* Inform IRA about the change.  */
 	mark_allocation_change (REGNO (reg));
       alter_reg (REGNO (reg), -1, false);

Reply via email to