diff --git a/gcc/config/i386/i386.c b/gcc/config/i386/i386.c
old mode 100644
new mode 100755
index 69c8165..2d0831f
--- a/gcc/config/i386/i386.c
+++ b/gcc/config/i386/i386.c
@@ -17407,11 +17407,11 @@ ix86_lea_outperforms (rtx insn, unsigned int regno0, unsigned int regno1,
   if (ix86_tune == PROCESSOR_SLM)
     {
       if (has_scale)
-        return true;
+	return true;
       if (split_cost < 1)
-        return false;
+	return false;
       if (regno0 == regno1 || regno0 == regno2)
-        return false;
+	return false;
       return true;
     }
 
@@ -17605,7 +17605,7 @@ ix86_avoid_lea_for_addr (rtx insn, rtx operands[])
     }
 
   return !ix86_lea_outperforms (insn, regno0, regno1, regno2, split_cost,
-                                parts.scale > 1);
+				parts.scale > 1);
 }
 
 /* Emit x86 binary operand CODE in mode MODE, where the first operand
@@ -24402,7 +24402,7 @@ exact_dependency_1 (rtx addr, rtx insn)
     {
     case MEM:
       if (rtx_equal_p (addr, insn))
-        return true;
+	return true;
       break;
     case REG:
     CASE_CONST_ANY:
@@ -24420,17 +24420,17 @@ exact_dependency_1 (rtx addr, rtx insn)
   for (i = 0; i < GET_RTX_LENGTH (code); i++)
     {
       switch (*format_ptr++)
-       {
-       case 'e':
-         if (exact_dependency_1 (addr, XEXP (insn, i)))
-           return true;
-         break;
-       case 'E':
-         for (j = 0; j < XVECLEN (insn, i); j++)
-           if (exact_dependency_1 (addr, XVECEXP (insn, i, j)))
-             return true;
-         break;
-       }
+	{
+	case 'e':
+	  if (exact_dependency_1 (addr, XEXP (insn, i)))
+	    return true;
+	  break;
+        case 'E':
+	  for (j = 0; j < XVECLEN (insn, i); j++)
+	    if (exact_dependency_1 (addr, XVECEXP (insn, i, j)))
+	      return true;
+	    break;
+        }
     }
   return false;
 }
@@ -24610,35 +24610,35 @@ ix86_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
 
     case PROCESSOR_SLM:
       if (!reload_completed)
-        return cost;
+	return cost;
 
       /* Increase cost of integer loads.  */
       memory = get_attr_memory (dep_insn);
       if (memory == MEMORY_LOAD || memory == MEMORY_BOTH)
-        {
-          enum attr_unit unit = get_attr_unit (dep_insn);
-          if (unit == UNIT_INTEGER && cost == 1)
-            {
-              if (memory == MEMORY_LOAD)
-                cost = 3;
-              else
-                {
-                  /* Increase cost of ld/st for short int types only
-                     because of store forwarding issue.  */
-                  rtx set = single_set (dep_insn);
-                  if (set && (GET_MODE (SET_DEST (set)) == QImode
-                              || GET_MODE (SET_DEST (set)) == HImode))
-                    {
-                      /* Increase cost of store/load insn if exact
-                         dependence exists and it is load insn.  */
-                      enum attr_memory insn_memory = get_attr_memory (insn);
-                      if (insn_memory == MEMORY_LOAD
-                          && exact_store_load_dependency (dep_insn, insn))
-                        cost = 3;
-                    }
-                }
-            }
-        }
+	{
+	  enum attr_unit unit = get_attr_unit (dep_insn);
+	  if (unit == UNIT_INTEGER && cost == 1)
+	    {
+	      if (memory == MEMORY_LOAD)
+		cost = 3;
+	      else
+		{
+		  /* Increase cost of ld/st for short int types only
+		     because of store forwarding issue.  */
+		  rtx set = single_set (dep_insn);
+		  if (set && (GET_MODE (SET_DEST (set)) == QImode
+			      || GET_MODE (SET_DEST (set)) == HImode))
+		    {
+		      /* Increase cost of store/load insn if exact
+			 dependence exists and it is load insn.  */
+		      enum attr_memory insn_memory = get_attr_memory (insn);
+		      if (insn_memory == MEMORY_LOAD
+			  && exact_store_load_dependency (dep_insn, insn))
+			cost = 3;
+		    }
+		}
+	    }
+	}
 
     default:
       break;
@@ -24688,7 +24688,7 @@ ia32_multipass_dfa_lookahead (void)
        ready list.
    Return index of IMUL producer if it was found and -1 otherwise.  */
 static int
-do_reoder_for_imul(rtx *ready, int n_ready)
+do_reorder_for_imul (rtx *ready, int n_ready)
 {
   rtx insn, set, insn1, insn2;
   sd_iterator_def sd_it;
@@ -24699,10 +24699,6 @@ do_reoder_for_imul(rtx *ready, int n_ready)
   if (ix86_tune != PROCESSOR_ATOM)
     return index;
 
-  /* Do not perform ready list reodering for pre-reload schedule pass.  */
-  if (!reload_completed)
-    return index;
-
   /* Check that IMUL instruction is on the top of ready list.  */
   insn = ready[n_ready - 1];
   set = single_set (insn);
@@ -24713,53 +24709,53 @@ do_reoder_for_imul(rtx *ready, int n_ready)
     return index;
 
   /* Search for producer of independent IMUL instruction.  */
-  for (i = n_ready - 2; i>= 0; i--)
+  for (i = n_ready - 2; i >= 0; i--)
     {
       insn = ready[i];
       if (!NONDEBUG_INSN_P (insn))
-        continue;
+	continue;
       /* Skip IMUL instruction.  */
       insn2 = PATTERN (insn);
       if (GET_CODE (insn2) == PARALLEL)
-        insn2 = XVECEXP (insn2, 0, 0);
+	insn2 = XVECEXP (insn2, 0, 0);
       if (GET_CODE (insn2) == SET
-          && GET_CODE (SET_SRC (insn2)) == MULT
-          && GET_MODE (SET_SRC (insn2)) == SImode)
-        continue;
+	  && GET_CODE (SET_SRC (insn2)) == MULT
+	  && GET_MODE (SET_SRC (insn2)) == SImode)
+	continue;
 
       FOR_EACH_DEP (insn, SD_LIST_FORW, sd_it, dep)
-        {
-          rtx con;
+	{
+	  rtx con;
 	  con = DEP_CON (dep);
 	  if (!NONDEBUG_INSN_P (con))
 	    continue;
-          insn1 = PATTERN (con);
-          if (GET_CODE (insn1) == PARALLEL)
-            insn1 = XVECEXP (insn1, 0, 0);
-
-          if (GET_CODE (insn1) == SET
-              && GET_CODE (SET_SRC (insn1)) == MULT
-              && GET_MODE (SET_SRC (insn1)) == SImode)
-            {
-              sd_iterator_def sd_it1;
-              dep_t dep1;
-              /* Check if there is no other dependee for IMUL.  */
-              index = i;
-              FOR_EACH_DEP (con, SD_LIST_BACK, sd_it1, dep1)
-                {
-                  rtx pro;
-                  pro = DEP_PRO (dep1);
+	  insn1 = PATTERN (con);
+	  if (GET_CODE (insn1) == PARALLEL)
+	    insn1 = XVECEXP (insn1, 0, 0);
+
+	  if (GET_CODE (insn1) == SET
+	      && GET_CODE (SET_SRC (insn1)) == MULT
+	      && GET_MODE (SET_SRC (insn1)) == SImode)
+	    {
+	      sd_iterator_def sd_it1;
+	      dep_t dep1;
+	      /* Check if there is no other dependee for IMUL.  */
+	      index = i;
+	      FOR_EACH_DEP (con, SD_LIST_BACK, sd_it1, dep1)
+		{
+		  rtx pro;
+		  pro = DEP_PRO (dep1);
 		  if (!NONDEBUG_INSN_P (pro))
 		    continue;
-                  if (pro != insn)
-                    index = -1;
-                }
-              if (index >= 0)
-                break;
-            }
-        }
+		  if (pro != insn)
+		    index = -1;
+		}
+	      if (index >= 0)
+		break;
+	    }
+	}
       if (index >= 0)
-        break;
+	break;
     }
   return index;
 }
@@ -24769,7 +24765,7 @@ do_reoder_for_imul(rtx *ready, int n_ready)
    scheduled earlier. Applied for Silvermont only.
    Return true if top 2 insns must be interchanged.  */
 static bool
-swap_top_of_ready_list(rtx *ready, int n_ready)
+swap_top_of_ready_list (rtx *ready, int n_ready)
 {
   rtx top = ready[n_ready - 1];
   rtx next = ready[n_ready - 2];
@@ -24782,8 +24778,6 @@ swap_top_of_ready_list(rtx *ready, int n_ready)
 
   if (ix86_tune != PROCESSOR_SLM)
     return false;
-  if (!reload_completed)
-    return false;
 
   if (!NONDEBUG_INSN_P (top))
     return false;
@@ -24803,37 +24797,37 @@ swap_top_of_ready_list(rtx *ready, int n_ready)
   if (INSN_PRIORITY_KNOWN (top) && INSN_PRIORITY_KNOWN (next))
     {
       if (INSN_PRIORITY (top) != INSN_PRIORITY (next))
-        return false;
+	return false;
       /* Determine winner more precise.  */
       FOR_EACH_DEP (top, SD_LIST_RES_BACK, sd_it, dep)
-        {
-          rtx pro;
-          pro = DEP_PRO (dep);
-          if (!NONDEBUG_INSN_P (pro))
-            continue;
-          if (INSN_TICK (pro) > clock1)
-            clock1 = INSN_TICK (pro);
-        }
+	{
+	  rtx pro;
+	  pro = DEP_PRO (dep);
+	  if (!NONDEBUG_INSN_P (pro))
+	    continue;
+	  if (INSN_TICK (pro) > clock1)
+	    clock1 = INSN_TICK (pro);
+	}
       FOR_EACH_DEP (next, SD_LIST_RES_BACK, sd_it, dep)
-        {
-          rtx pro;
-          pro = DEP_PRO (dep);
-          if (!NONDEBUG_INSN_P (pro))
-            continue;
-          if (INSN_TICK (pro) > clock2)
-            clock2 = INSN_TICK (pro);
-        }
+	{
+	  rtx pro;
+	  pro = DEP_PRO (dep);
+	  if (!NONDEBUG_INSN_P (pro))
+	    continue;
+	  if (INSN_TICK (pro) > clock2)
+	    clock2 = INSN_TICK (pro);
+	}
 
       if (clock1 == clock2)
-      {
-        /* Determine winner - load must win. */
-        enum attr_memory memory1, memory2;
-        memory1 = get_attr_memory (top);
-        memory2 = get_attr_memory (next);
-        if (memory2 == MEMORY_LOAD && memory1 != MEMORY_LOAD)
-          return true;
-      }
-      return (bool) (clock2 < clock1);
+	{
+	  /* Determine winner - load must win. */
+	  enum attr_memory memory1, memory2;
+	  memory1 = get_attr_memory (top);
+	  memory2 = get_attr_memory (next);
+	  if (memory2 == MEMORY_LOAD && memory1 != MEMORY_LOAD)
+	    return true;
+	}
+	return (bool) (clock2 < clock1);
     }
   return false;
   #undef INSN_TICK
@@ -24842,7 +24836,7 @@ swap_top_of_ready_list(rtx *ready, int n_ready)
 /* Perform possible reodering of ready list for Atom/Silvermont only.
    Return issue rate.  */
 static int
-ix86_sched_reorder(FILE *dump, int sched_verbose, rtx *ready, int *pn_ready,
+ix86_sched_reorder (FILE *dump, int sched_verbose, rtx *ready, int *pn_ready,
                    int clock_var)
 {
   int issue_rate = -1;
@@ -24852,7 +24846,7 @@ ix86_sched_reorder(FILE *dump, int sched_verbose, rtx *ready, int *pn_ready,
   int index = -1;
 
   /* Set up issue rate.  */
-  issue_rate = ix86_issue_rate();
+  issue_rate = ix86_issue_rate ();
 
   /* Do reodering for Atom/SLM only.  */
   if (ix86_tune != PROCESSOR_ATOM && ix86_tune != PROCESSOR_SLM)
@@ -24866,24 +24860,24 @@ ix86_sched_reorder(FILE *dump, int sched_verbose, rtx *ready, int *pn_ready,
   if (!reload_completed)
     return issue_rate;
 
-  if ((index = do_reoder_for_imul (ready, n_ready)) >= 0)
+  if ((index = do_reorder_for_imul (ready, n_ready)) >= 0)
     {
       if (sched_verbose > 1)
-        fprintf(dump, ";;\tatom sched_reorder: put %d insn on top\n",
-                INSN_UID (ready[index]));
+	fprintf (dump, ";;\tatom sched_reorder: put %d insn on top\n",
+		 INSN_UID (ready[index]));
 
       /* Put IMUL producer (ready[index]) at the top of ready list.  */
-      insn= ready[index];
+      insn = ready[index];
       for (i = index; i < n_ready - 1; i++)
-        ready[i] = ready[i + 1];
+	ready[i] = ready[i + 1];
       ready[n_ready - 1] = insn;
       return issue_rate;
     }
   if (clock_var != 0 && swap_top_of_ready_list (ready, n_ready))
     {
       if (sched_verbose > 1)
-        fprintf(dump, ";;\tslm sched_reorder: swap %d and %d insns\n",
-                INSN_UID (ready[n_ready - 1]), INSN_UID (ready[n_ready - 2]));
+	fprintf (dump, ";;\tslm sched_reorder: swap %d and %d insns\n",
+		 INSN_UID (ready[n_ready - 1]), INSN_UID (ready[n_ready - 2]));
       /* Swap 2 top elements of ready list.  */
       insn = ready[n_ready - 1];
       ready[n_ready - 1] = ready[n_ready - 2];
@@ -30054,11 +30048,11 @@ fold_builtin_cpu (tree fndecl, tree *args)
     M_AMD,
     M_CPU_TYPE_START,
     M_INTEL_ATOM,
-    M_INTEL_SLM,
     M_INTEL_CORE2,
     M_INTEL_COREI7,
     M_AMDFAM10H,
     M_AMDFAM15H,
+    M_INTEL_SLM,
     M_CPU_SUBTYPE_START,
     M_INTEL_COREI7_NEHALEM,
     M_INTEL_COREI7_WESTMERE,
diff --git a/libgcc/config/i386/cpuinfo.c b/libgcc/config/i386/cpuinfo.c
index 39b9d11..b7e64c3 100644
--- a/libgcc/config/i386/cpuinfo.c
+++ b/libgcc/config/i386/cpuinfo.c
@@ -47,14 +47,16 @@ enum processor_vendor
   VENDOR_MAX
 };
 
+/* Any new types or subtypes have to be inserted at the end. */
+
 enum processor_types
 {
   INTEL_ATOM = 1,
-  INTEL_SLM,
   INTEL_CORE2,
   INTEL_COREI7,
   AMDFAM10H,
   AMDFAM15H,
+  INTEL_SLM,
   CPU_TYPE_MAX
 };
 
