ix86_compute_frame_layout will now populate fields added to structs
machine_function and ix86_frame and modify the frame layout specific to
facilitate the use of save & restore stubs.
---
 gcc/config/i386/i386.c | 117 ++++++++++++++++++++++++++++++++++++++++++++++++-
 1 file changed, 116 insertions(+), 1 deletion(-)

diff --git a/gcc/config/i386/i386.c b/gcc/config/i386/i386.c
index cb4e688..f3149ef 100644
--- a/gcc/config/i386/i386.c
+++ b/gcc/config/i386/i386.c
@@ -12516,6 +12516,8 @@ ix86_compute_frame_layout (struct ix86_frame *frame)
 
   frame->nregs = ix86_nsaved_regs ();
   frame->nsseregs = ix86_nsaved_sseregs ();
+  m->outline_ms_sysv_pad_in = 0;
+  m->outline_ms_sysv_pad_out = 0;
   CLEAR_HARD_REG_SET (stub_managed_regs);
 
   /* 64-bit MS ABI seem to require stack alignment to be always 16,
@@ -12531,6 +12533,61 @@ ix86_compute_frame_layout (struct ix86_frame *frame)
       crtl->stack_alignment_needed = 128;
     }
 
+  /* m->outline_ms_sysv is initially enabled in ix86_expand_call for all
+     64-bit ms_abi functions that call a sysv function.  So this is where
+     we prune away cases where actually don't want to out-of-line the
+     pro/epilogues.  */
+  if (m->outline_ms_sysv)
+  {
+    gcc_assert (TARGET_64BIT_MS_ABI);
+    gcc_assert (flag_outline_msabi_xlogues);
+
+    /* Do we need to handle SEH and disable the optimization? */
+    gcc_assert (!TARGET_SEH);
+
+    if (!TARGET_SSE)
+      m->outline_ms_sysv = false;
+
+    /* Don't break hot-patched functions.  */
+    else if (ix86_function_ms_hook_prologue (current_function_decl))
+      m->outline_ms_sysv = false;
+
+    /* TODO: Still need to add support for hard frame pointers when stack
+       realignment is not needed.  */
+    else if (crtl->stack_realign_finalized
+            && (frame_pointer_needed && !crtl->stack_realign_needed))
+      {
+       static bool warned = false;
+       if (!warned)
+         {
+           warned = true;
+           warning (OPT_foutline_msabi_xlogues,
+                    "not currently supported with hard frame pointers when "
+                    "not realigning stack.");
+         }
+       m->outline_ms_sysv = false;
+      }
+
+    /* TODO: Cases that have not yet been examined.  */
+    else if (crtl->calls_eh_return
+            || crtl->need_drap
+            || m->static_chain_on_stack
+            || ix86_using_red_zone ()
+            || flag_split_stack)
+      {
+       static bool warned = false;
+       if (!warned)
+         {
+           warned = true;
+           warning (OPT_foutline_msabi_xlogues,
+                    "not currently supported with the following: SEH, "
+                    "DRAP, static call chains on the stack, red zones or "
+                    "split stack.");
+         }
+       m->outline_ms_sysv = false;
+      }
+  }
+
   stack_alignment_needed = crtl->stack_alignment_needed / BITS_PER_UNIT;
   preferred_alignment = crtl->preferred_stack_boundary / BITS_PER_UNIT;
 
@@ -12599,6 +12656,60 @@ ix86_compute_frame_layout (struct ix86_frame *frame)
   /* The traditional frame pointer location is at the top of the frame.  */
   frame->hard_frame_pointer_offset = offset;
 
+  if (m->outline_ms_sysv)
+    {
+      unsigned i;
+      HOST_WIDE_INT offset_after_int_regs;
+
+      gcc_assert (!(offset & 7));
+
+      /* Select an appropriate layout for incoming stack offset.  */
+      m->outline_ms_sysv_pad_in = (!crtl->stack_realign_needed && (offset & 
8));
+      const struct xlogue_layout &xlogue = xlogue_layout::get_instance ();
+
+      gcc_assert (frame->nregs >= 2);
+      gcc_assert (frame->nsseregs >= 10);
+
+      for (i = 0; i < xlogue.get_nregs (); ++i)
+       {
+         unsigned regno = xlogue.get_reginfo (i).regno;
+
+         if (ix86_save_reg (regno, false, false))
+           {
+             add_to_hard_reg_set (&stub_managed_regs, DImode, regno);
+             /* For the purposes of pro/epilogue generation, we'll only count
+                regs that aren't saved/restored by out-of-line stubs.  */
+             if (SSE_REGNO_P (regno))
+               --frame->nsseregs;
+             else
+               --frame->nregs;
+           }
+         else
+           break;
+       }
+
+      gcc_assert (i >= xlogue_layout::MIN_REGS);
+      gcc_assert (i <= xlogue_layout::MAX_REGS);
+      gcc_assert (frame->nregs >=0);
+      gcc_assert (frame->nsseregs >=0);
+      m->outline_ms_sysv_extra_regs = i - xlogue_layout::MIN_REGS;
+
+      /* If, after saving any remaining int regs we need padding for
+        16-byte alignment, we insert that padding prior to remaining int
+        reg saves.  */
+      offset_after_int_regs = xlogue.get_stack_space_used ()
+                             + frame->nregs * UNITS_PER_WORD;
+      if (offset_after_int_regs & 8)
+      {
+       m->outline_ms_sysv_pad_out = 1;
+       offset_after_int_regs += UNITS_PER_WORD;
+      }
+
+      gcc_assert (!(offset_after_int_regs & 15));
+      offset += xlogue.get_stack_space_used ();
+      frame->outlined_save_offset = offset;
+    }
+
   /* Register save area */
   offset += frame->nregs * UNITS_PER_WORD;
   frame->reg_save_offset = offset;
@@ -12611,6 +12722,10 @@ ix86_compute_frame_layout (struct ix86_frame *frame)
   /* Align and set SSE register save area.  */
   if (frame->nsseregs)
     {
+      if (m->outline_ms_sysv)
+       /* If stack is not 16-byte aligned here, then bug.  */
+       gcc_assert (!(offset & 15));
+
       /* The only ABI that has saved SSE registers (Win64) also has a
         16-byte aligned default stack, and thus we don't need to be
         within the re-aligned local stack frame to save them.  In case
@@ -12618,7 +12733,7 @@ ix86_compute_frame_layout (struct ix86_frame *frame)
         unaligned move of SSE register will be emitted, so there is
         no point to round up the SSE register save area outside the
         re-aligned local stack frame to 16 bytes.  */
-      if (ix86_incoming_stack_boundary >= 128)
+      else if (ix86_incoming_stack_boundary >= 128)
        offset = ROUND_UP (offset, 16);
       offset += frame->nsseregs * 16;
     }
-- 
2.9.0

Reply via email to