Reviewers: ulan, Rodolph Perfetta (ARM),

Message:
PTAL, thanks.

Description:
Arm64: Remove forced csp alignment to 16 byte values for Nvidia chips.

Remove the forced alignment of csp to 16 byte values on Nvidia chips.
Benchmarks on current devices show that this is no longer required.

Please review this at https://codereview.chromium.org/710613002/

Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge

Affected files (+17, -69 lines):
  M src/arm64/assembler-arm64.cc
  M src/arm64/code-stubs-arm64.cc
  M src/arm64/macro-assembler-arm64.h
  M src/arm64/macro-assembler-arm64.cc
  M src/arm64/macro-assembler-arm64-inl.h
  M src/flag-definitions.h


Index: src/arm64/assembler-arm64.cc
diff --git a/src/arm64/assembler-arm64.cc b/src/arm64/assembler-arm64.cc
index 076e143bc8120c97f74dbf11f2b3c20616ae3143..524be154c53bef81a410c3c648f456fc32f85bd3 100644
--- a/src/arm64/assembler-arm64.cc
+++ b/src/arm64/assembler-arm64.cc
@@ -44,17 +44,8 @@ namespace internal {
 // CpuFeatures implementation.

 void CpuFeatures::ProbeImpl(bool cross_compile) {
-  if (cross_compile) {
- // Always align csp in cross compiled code - this is safe and ensures that
-    // csp will always be aligned if it is enabled by probing at runtime.
-    if (FLAG_enable_always_align_csp) supported_ |= 1u << ALWAYS_ALIGN_CSP;
-  } else {
-    base::CPU cpu;
-    if (FLAG_enable_always_align_csp &&
-        (cpu.implementer() == base::CPU::NVIDIA || FLAG_debug_code)) {
-      supported_ |= 1u << ALWAYS_ALIGN_CSP;
-    }
-  }
+  // AArch64 has no configuration options, no further probing is required.
+  supported_ = 0;
 }


Index: src/arm64/code-stubs-arm64.cc
diff --git a/src/arm64/code-stubs-arm64.cc b/src/arm64/code-stubs-arm64.cc
index 6583775a6601bfd87b1570d7991ffad921d6b1b0..cdc273cec7d78ad38a8ddadddaeb6860ad6d2f36 100644
--- a/src/arm64/code-stubs-arm64.cc
+++ b/src/arm64/code-stubs-arm64.cc
@@ -4293,18 +4293,10 @@ void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
 }


-static unsigned int GetProfileEntryHookCallSize(MacroAssembler* masm) {
-  // The entry hook is a "BumpSystemStackPointer" instruction (sub),
-  // followed by a "Push lr" instruction, followed by a call.
-  unsigned int size =
-      Assembler::kCallSizeWithRelocation + (2 * kInstructionSize);
-  if (CpuFeatures::IsSupported(ALWAYS_ALIGN_CSP)) {
-    // If ALWAYS_ALIGN_CSP then there will be an extra bic instruction in
-    // "BumpSystemStackPointer".
-    size += kInstructionSize;
-  }
-  return size;
-}
+// The entry hook is a "BumpSystemStackPointer" instruction (sub), followed by
+// a "Push lr" instruction, followed by a call.
+static const unsigned int kProfileEntryHookCallSize =
+    Assembler::kCallSizeWithRelocation + (2 * kInstructionSize);


 void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
@@ -4317,7 +4309,7 @@ void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
     __ Push(lr);
     __ CallStub(&stub);
     DCHECK(masm->SizeOfCodeGeneratedSince(&entry_hook_call_start) ==
-           GetProfileEntryHookCallSize(masm));
+           kProfileEntryHookCallSize);

     __ Pop(lr);
   }
@@ -4335,7 +4327,7 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
   const int kNumSavedRegs = kCallerSaved.Count();

   // Compute the function's address as the first argument.
-  __ Sub(x0, lr, GetProfileEntryHookCallSize(masm));
+  __ Sub(x0, lr, kProfileEntryHookCallSize);

 #if V8_HOST_ARCH_ARM64
   uintptr_t entry_hook =
Index: src/arm64/macro-assembler-arm64-inl.h
diff --git a/src/arm64/macro-assembler-arm64-inl.h b/src/arm64/macro-assembler-arm64-inl.h index 4a4d644daebba183245b40365ba519dc23fb58cf..cef7da5df0f90cf7878211f09715ea8f8f89665d 100644
--- a/src/arm64/macro-assembler-arm64-inl.h
+++ b/src/arm64/macro-assembler-arm64-inl.h
@@ -1244,14 +1244,7 @@ void MacroAssembler::Uxtw(const Register& rd, const Register& rn) {
 void MacroAssembler::BumpSystemStackPointer(const Operand& space) {
   DCHECK(!csp.Is(sp_));
   if (!TmpList()->IsEmpty()) {
-    if (CpuFeatures::IsSupported(ALWAYS_ALIGN_CSP)) {
-      UseScratchRegisterScope temps(this);
-      Register temp = temps.AcquireX();
-      Sub(temp, StackPointer(), space);
-      Bic(csp, temp, 0xf);
-    } else {
-      Sub(csp, StackPointer(), space);
-    }
+    Sub(csp, StackPointer(), space);
   } else {
     // TODO(jbramley): Several callers rely on this not using scratch
// registers, so we use the assembler directly here. However, this means @@ -1284,20 +1277,6 @@ void MacroAssembler::BumpSystemStackPointer(const Operand& space) {
 }


-void MacroAssembler::SyncSystemStackPointer() {
-  DCHECK(emit_debug_code());
-  DCHECK(!csp.Is(sp_));
-  { InstructionAccurateScope scope(this);
-    if (CpuFeatures::IsSupported(ALWAYS_ALIGN_CSP)) {
-      bic(csp, StackPointer(), 0xf);
-    } else {
-      mov(csp, StackPointer());
-    }
-  }
-  AssertStackConsistency();
-}
-
-
 void MacroAssembler::InitializeRootRegister() {
   ExternalReference roots_array_start =
       ExternalReference::roots_array_start(isolate());
@@ -1586,7 +1565,7 @@ void MacroAssembler::Drop(uint64_t count, uint64_t unit_size) { // It is safe to leave csp where it is when unwinding the JavaScript stack, // but if we keep it matching StackPointer, the simulator can detect memory
     // accesses in the now-free part of the stack.
-    SyncSystemStackPointer();
+    Mov(csp, StackPointer());
   }
 }

@@ -1608,7 +1587,7 @@ void MacroAssembler::Drop(const Register& count, uint64_t unit_size) { // It is safe to leave csp where it is when unwinding the JavaScript stack, // but if we keep it matching StackPointer, the simulator can detect memory
     // accesses in the now-free part of the stack.
-    SyncSystemStackPointer();
+    Mov(csp, StackPointer());
   }
 }

@@ -1630,7 +1609,7 @@ void MacroAssembler::DropBySMI(const Register& count_smi, uint64_t unit_size) { // It is safe to leave csp where it is when unwinding the JavaScript stack, // but if we keep it matching StackPointer, the simulator can detect memory
     // accesses in the now-free part of the stack.
-    SyncSystemStackPointer();
+    Mov(csp, StackPointer());
   }
 }

Index: src/arm64/macro-assembler-arm64.cc
diff --git a/src/arm64/macro-assembler-arm64.cc b/src/arm64/macro-assembler-arm64.cc index e0a2190f2825a34af71bccaca1ebbc5af02a2d2d..71e35d4726fb2f48c7306c334118bdd8260deeff 100644
--- a/src/arm64/macro-assembler-arm64.cc
+++ b/src/arm64/macro-assembler-arm64.cc
@@ -1209,7 +1209,7 @@ void MacroAssembler::PopPostamble(Operand total_size) { // It is safe to leave csp where it is when unwinding the JavaScript stack, // but if we keep it matching StackPointer, the simulator can detect memory
     // accesses in the now-free part of the stack.
-    SyncSystemStackPointer();
+    Mov(csp, StackPointer());
   }
 }

@@ -1308,7 +1308,7 @@ void MacroAssembler::AssertStackConsistency() {
// Avoid emitting code when !use_real_abort() since non-real aborts cause too
   // much code to be generated.
   if (emit_debug_code() && use_real_aborts()) {
- if (csp.Is(StackPointer()) || CpuFeatures::IsSupported(ALWAYS_ALIGN_CSP)) {
+    if (csp.Is(StackPointer())) {
       // Always check the alignment of csp if ALWAYS_ALIGN_CSP is true.  We
// can't check the alignment of csp without using a scratch register (or // clobbering the flags), but the processor (or simulator) will abort if
Index: src/arm64/macro-assembler-arm64.h
diff --git a/src/arm64/macro-assembler-arm64.h b/src/arm64/macro-assembler-arm64.h index cff42d7df74a638404c66b5f8e9a538ef07d4a3a..3469568d7663fe04bc0017f0eb353b46dab7f721 100644
--- a/src/arm64/macro-assembler-arm64.h
+++ b/src/arm64/macro-assembler-arm64.h
@@ -761,9 +761,9 @@ class MacroAssembler : public Assembler {
   // it can be evidence of a potential bug because the ABI forbids accesses
   // below csp.
   //
- // If StackPointer() is the system stack pointer (csp) or ALWAYS_ALIGN_CSP is
-  // enabled, then csp will be dereferenced to  cause the processor
-  // (or simulator) to abort if it is not properly aligned.
+  // If StackPointer() is the system stack pointer (csp), then csp will be
+ // dereferenced to cause the processor (or simulator) to abort if it is not
+  // properly aligned.
   //
   // If emit_debug_code() is false, this emits no code.
   void AssertStackConsistency();
@@ -830,15 +830,6 @@ class MacroAssembler : public Assembler {
   // not make sense in that context.
   inline void BumpSystemStackPointer(const Operand& space);

-  // Re-synchronizes the system stack pointer (csp) with the current stack
-  // pointer (according to StackPointer()).  This function will ensure the
- // new value of the system stack pointer is remains aligned to 16 bytes, and
-  // is lower than or equal to the value of the current stack pointer.
-  //
- // This method asserts that StackPointer() is not csp, since the call does
-  // not make sense in that context.
-  inline void SyncSystemStackPointer();
-
// Helpers ------------------------------------------------------------------
   // Root register.
   inline void InitializeRootRegister();
Index: src/flag-definitions.h
diff --git a/src/flag-definitions.h b/src/flag-definitions.h
index ca82ce709da5ac6d8e43e1672ee08b1ddf893c6d..9f2fc38cb23d2af07533c905350eb799405591d0 100644
--- a/src/flag-definitions.h
+++ b/src/flag-definitions.h
@@ -438,11 +438,6 @@ DEFINE_BOOL(enable_vldr_imm, false,
 DEFINE_BOOL(force_long_branches, false,
             "force all emitted branches to be in long mode (MIPS only)")

-// cpu-arm64.cc
-DEFINE_BOOL(enable_always_align_csp, true,
- "enable alignment of csp to 16 bytes on platforms which prefer "
-            "the register to always be aligned (ARM64 only)")
-
 // bootstrapper.cc
 DEFINE_STRING(expose_natives_as, NULL, "expose natives in global object")
 DEFINE_STRING(expose_debug_as, NULL, "expose debug in global object")


--
--
v8-dev mailing list
v8-dev@googlegroups.com
http://groups.google.com/group/v8-dev
--- You received this message because you are subscribed to the Google Groups "v8-dev" group.
To unsubscribe from this group and stop receiving emails from it, send an email 
to v8-dev+unsubscr...@googlegroups.com.
For more options, visit https://groups.google.com/d/optout.

Reply via email to