Revision: 17191
Author:   yu...@chromium.org
Date:     Mon Oct 14 12:41:28 2013 UTC
Log: Track JS allocations as they arrive with no affection on performance when tracking is switched off.

BUG=277984
R=hpa...@chromium.org

Review URL: https://codereview.chromium.org/22852024

Patch from Alexandra Mikhaylova <amikhayl...@google.com>.
http://code.google.com/p/v8/source/detail?r=17191

Modified:
 /branches/bleeding_edge/include/v8-profiler.h
 /branches/bleeding_edge/src/api.cc
 /branches/bleeding_edge/src/assembler.cc
 /branches/bleeding_edge/src/assembler.h
 /branches/bleeding_edge/src/builtins.cc
 /branches/bleeding_edge/src/heap-profiler.cc
 /branches/bleeding_edge/src/heap-profiler.h
 /branches/bleeding_edge/src/heap-snapshot-generator.cc
 /branches/bleeding_edge/src/heap-snapshot-generator.h
 /branches/bleeding_edge/src/heap.cc
 /branches/bleeding_edge/src/mark-compact.cc
 /branches/bleeding_edge/src/objects.cc
 /branches/bleeding_edge/src/serialize.cc
 /branches/bleeding_edge/src/serialize.h
 /branches/bleeding_edge/src/spaces-inl.h
 /branches/bleeding_edge/src/spaces.h
 /branches/bleeding_edge/src/x64/code-stubs-x64.cc
 /branches/bleeding_edge/src/x64/macro-assembler-x64.cc
 /branches/bleeding_edge/src/x64/macro-assembler-x64.h
 /branches/bleeding_edge/test/cctest/cctest.h
 /branches/bleeding_edge/test/cctest/test-heap-profiler.cc

=======================================
--- /branches/bleeding_edge/include/v8-profiler.h Thu Oct 10 13:15:47 2013 UTC +++ /branches/bleeding_edge/include/v8-profiler.h Mon Oct 14 12:41:28 2013 UTC
@@ -475,6 +475,19 @@
    */
   void SetRetainedObjectInfo(UniqueId id, RetainedObjectInfo* info);

+  /**
+ * Starts recording JS allocations immediately as they arrive and tracking of
+   * heap objects population statistics.
+   */
+  void StartRecordingHeapAllocations();
+
+  /**
+   * Stops recording JS allocations and tracking of heap objects population
+ * statistics, cleans all collected heap objects population statistics data.
+   */
+  void StopRecordingHeapAllocations();
+
+
  private:
   HeapProfiler();
   ~HeapProfiler();
=======================================
--- /branches/bleeding_edge/src/api.cc  Thu Oct 10 13:15:47 2013 UTC
+++ /branches/bleeding_edge/src/api.cc  Mon Oct 14 12:41:28 2013 UTC
@@ -7298,6 +7298,16 @@
                                          RetainedObjectInfo* info) {
reinterpret_cast<i::HeapProfiler*>(this)->SetRetainedObjectInfo(id, info);
 }
+
+
+void HeapProfiler::StartRecordingHeapAllocations() {
+ reinterpret_cast<i::HeapProfiler*>(this)->StartHeapAllocationsRecording();
+}
+
+
+void HeapProfiler::StopRecordingHeapAllocations() {
+  reinterpret_cast<i::HeapProfiler*>(this)->StopHeapAllocationsRecording();
+}


 v8::Testing::StressType internal::Testing::stress_type_ =
=======================================
--- /branches/bleeding_edge/src/assembler.cc    Thu Oct 10 10:37:18 2013 UTC
+++ /branches/bleeding_edge/src/assembler.cc    Mon Oct 14 12:41:28 2013 UTC
@@ -1331,6 +1331,14 @@
   return ExternalReference(
       reinterpret_cast<void*>(&double_constants.the_hole_nan));
 }
+
+
+ExternalReference ExternalReference::record_object_allocation_function(
+  Isolate* isolate) {
+  return ExternalReference(
+      Redirect(isolate,
+ FUNCTION_ADDR(HeapProfiler::RecordObjectAllocationFromMasm)));
+}


 #ifndef V8_INTERPRETED_REGEXP
=======================================
--- /branches/bleeding_edge/src/assembler.h     Thu Oct 10 08:45:40 2013 UTC
+++ /branches/bleeding_edge/src/assembler.h     Mon Oct 14 12:41:28 2013 UTC
@@ -728,6 +728,9 @@

   static ExternalReference get_make_code_young_function(Isolate* isolate);

+  // New heap objects tracking support.
+ static ExternalReference record_object_allocation_function(Isolate* isolate);
+
   // Deoptimization support.
   static ExternalReference new_deoptimizer_function(Isolate* isolate);
static ExternalReference compute_output_frames_function(Isolate* isolate);
=======================================
--- /branches/bleeding_edge/src/builtins.cc     Fri Oct 11 14:05:23 2013 UTC
+++ /branches/bleeding_edge/src/builtins.cc     Mon Oct 14 12:41:28 2013 UTC
@@ -274,7 +274,8 @@
   }

   HEAP_PROFILE(heap, ObjectMoveEvent(elms->address(),
-                                     elms->address() + size_delta));
+                                     elms->address() + size_delta,
+                                     elms->Size()));
   return FixedArrayBase::cast(HeapObject::FromAddress(
       elms->address() + to_trim * entry_size));
 }
=======================================
--- /branches/bleeding_edge/src/heap-profiler.cc Fri Jul 5 09:52:11 2013 UTC +++ /branches/bleeding_edge/src/heap-profiler.cc Mon Oct 14 12:41:28 2013 UTC
@@ -27,6 +27,7 @@

 #include "v8.h"

+#include "deoptimizer.h"
 #include "heap-profiler.h"
 #include "heap-snapshot-generator-inl.h"

@@ -35,7 +36,8 @@

 HeapProfiler::HeapProfiler(Heap* heap)
     : snapshots_(new HeapSnapshotsCollection(heap)),
-      next_snapshot_uid_(1) {
+      next_snapshot_uid_(1),
+      is_tracking_allocations_(false) {
 }


@@ -132,14 +134,86 @@
 }


-void HeapProfiler::ObjectMoveEvent(Address from, Address to) {
-  snapshots_->ObjectMoveEvent(from, to);
+void HeapProfiler::ObjectMoveEvent(Address from, Address to, int size) {
+  snapshots_->ObjectMoveEvent(from, to, size);
 }
+
+
+void HeapProfiler::NewObjectEvent(Address addr, int size) {
+  snapshots_->NewObjectEvent(addr, size);
+}
+
+
+void HeapProfiler::UpdateObjectSizeEvent(Address addr, int size) {
+  snapshots_->UpdateObjectSizeEvent(addr, size);
+}
+

 void HeapProfiler::SetRetainedObjectInfo(UniqueId id,
                                          RetainedObjectInfo* info) {
// TODO(yurus, marja): Don't route this information through GlobalHandles.
   heap()->isolate()->global_handles()->SetRetainedObjectInfo(id, info);
 }
+
+
+void HeapProfiler::StartHeapAllocationsRecording() {
+  StartHeapObjectsTracking();
+  is_tracking_allocations_ = true;
+  DropCompiledCode();
+  snapshots_->UpdateHeapObjectsMap();
+}
+
+
+void HeapProfiler::StopHeapAllocationsRecording() {
+  StopHeapObjectsTracking();
+  is_tracking_allocations_ = false;
+  DropCompiledCode();
+}
+
+
+void HeapProfiler::RecordObjectAllocationFromMasm(Isolate* isolate,
+                                                  Address obj,
+                                                  int size) {
+  isolate->heap_profiler()->NewObjectEvent(obj, size);
+}
+
+
+void HeapProfiler::DropCompiledCode() {
+  Isolate* isolate = heap()->isolate();
+  HandleScope scope(isolate);
+
+  if (FLAG_concurrent_recompilation) {
+    isolate->optimizing_compiler_thread()->Flush();
+  }
+
+  Deoptimizer::DeoptimizeAll(isolate);
+
+  Handle<Code> lazy_compile =
+      Handle<Code>(isolate->builtins()->builtin(Builtins::kLazyCompile));
+
+  heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask,
+                            "switch allocations tracking");
+
+  DisallowHeapAllocation no_allocation;
+
+  HeapIterator iterator(heap());
+  HeapObject* obj = NULL;
+  while (((obj = iterator.next()) != NULL)) {
+    if (obj->IsJSFunction()) {
+      JSFunction* function = JSFunction::cast(obj);
+      SharedFunctionInfo* shared = function->shared();
+
+      if (!shared->allows_lazy_compilation()) continue;
+      if (!shared->script()->IsScript()) continue;
+
+      Code::Kind kind = function->code()->kind();
+      if (kind == Code::FUNCTION || kind == Code::BUILTIN) {
+        function->set_code(*lazy_compile);
+        shared->set_code(*lazy_compile);
+      }
+    }
+  }
+}
+

 } }  // namespace v8::internal
=======================================
--- /branches/bleeding_edge/src/heap-profiler.h Thu Jul  4 16:34:07 2013 UTC
+++ /branches/bleeding_edge/src/heap-profiler.h Mon Oct 14 12:41:28 2013 UTC
@@ -63,13 +63,22 @@

   void StartHeapObjectsTracking();
   void StopHeapObjectsTracking();
+
+  static void RecordObjectAllocationFromMasm(Isolate* isolate,
+                                             Address obj,
+                                             int size);
+
   SnapshotObjectId PushHeapObjectsStats(OutputStream* stream);
   int GetSnapshotsCount();
   HeapSnapshot* GetSnapshot(int index);
   SnapshotObjectId GetSnapshotObjectId(Handle<Object> obj);
   void DeleteAllSnapshots();

-  void ObjectMoveEvent(Address from, Address to);
+  void ObjectMoveEvent(Address from, Address to, int size);
+
+  void NewObjectEvent(Address addr, int size);
+
+  void UpdateObjectSizeEvent(Address addr, int size);

   void DefineWrapperClass(
       uint16_t class_id, v8::HeapProfiler::WrapperInfoCallback callback);
@@ -82,12 +91,26 @@

   void SetRetainedObjectInfo(UniqueId id, RetainedObjectInfo* info);

+  bool is_tracking_allocations() {
+    return is_tracking_allocations_;
+  }
+
+  void StartHeapAllocationsRecording();
+  void StopHeapAllocationsRecording();
+
+  int FindUntrackedObjects() {
+    return snapshots_->FindUntrackedObjects();
+  }
+
+  void DropCompiledCode();
+
  private:
   Heap* heap() const { return snapshots_->heap(); }

   HeapSnapshotsCollection* snapshots_;
   unsigned next_snapshot_uid_;
   List<v8::HeapProfiler::WrapperInfoCallback> wrapper_callbacks_;
+  bool is_tracking_allocations_;
 };

 } }  // namespace v8::internal
=======================================
--- /branches/bleeding_edge/src/heap-snapshot-generator.cc Tue Sep 24 10:30:41 2013 UTC +++ /branches/bleeding_edge/src/heap-snapshot-generator.cc Mon Oct 14 12:41:28 2013 UTC
@@ -397,7 +397,7 @@
 }


-void HeapObjectsMap::MoveObject(Address from, Address to) {
+void HeapObjectsMap::MoveObject(Address from, Address to, int object_size) {
   ASSERT(to != NULL);
   ASSERT(from != NULL);
   if (from == to) return;
@@ -428,9 +428,24 @@
     int from_entry_info_index =
         static_cast<int>(reinterpret_cast<intptr_t>(from_value));
     entries_.at(from_entry_info_index).addr = to;
+    // Size of an object can change during its life, so to keep information
+ // about the object in entries_ consistent, we have to adjust size when the
+    // object is migrated.
+    entries_.at(from_entry_info_index).size = object_size;
     to_entry->value = from_value;
   }
 }
+
+
+void HeapObjectsMap::NewObject(Address addr, int size) {
+  ASSERT(addr != NULL);
+  FindOrAddEntry(addr, size, false);
+}
+
+
+void HeapObjectsMap::UpdateObjectSize(Address addr, int size) {
+  FindOrAddEntry(addr, size, false);
+}


 SnapshotObjectId HeapObjectsMap::FindEntry(Address addr) {
@@ -445,7 +460,8 @@


 SnapshotObjectId HeapObjectsMap::FindOrAddEntry(Address addr,
-                                                unsigned int size) {
+                                                unsigned int size,
+                                                bool accessed) {
ASSERT(static_cast<uint32_t>(entries_.length()) > entries_map_.occupancy()); HashMap::Entry* entry = entries_map_.Lookup(addr, ComputePointerHash(addr),
                                               true);
@@ -453,14 +469,14 @@
     int entry_index =
         static_cast<int>(reinterpret_cast<intptr_t>(entry->value));
     EntryInfo& entry_info = entries_.at(entry_index);
-    entry_info.accessed = true;
+    entry_info.accessed = accessed;
     entry_info.size = size;
     return entry_info.id;
   }
   entry->value = reinterpret_cast<void*>(entries_.length());
   SnapshotObjectId id = next_id_;
   next_id_ += kObjectIdStep;
-  entries_.Add(EntryInfo(id, addr, size));
+  entries_.Add(EntryInfo(id, addr, size, accessed));
ASSERT(static_cast<uint32_t>(entries_.length()) > entries_map_.occupancy());
   return id;
 }
@@ -482,6 +498,27 @@
   }
   RemoveDeadEntries();
 }
+
+
+int HeapObjectsMap::FindUntrackedObjects() {
+  HeapIterator iterator(heap_);
+  int untracked = 0;
+  for (HeapObject* obj = iterator.next();
+       obj != NULL;
+       obj = iterator.next()) {
+    HashMap::Entry* entry = entries_map_.Lookup(
+      obj->address(), ComputePointerHash(obj->address()), false);
+    if (entry == NULL) {
+      untracked++;
+    } else {
+      int entry_index = static_cast<int>(
+          reinterpret_cast<intptr_t>(entry->value));
+      EntryInfo& entry_info = entries_.at(entry_index);
+      CHECK_EQ(obj->Size(), static_cast<int>(entry_info.size));
+    }
+  }
+  return untracked;
+}


SnapshotObjectId HeapObjectsMap::PushHeapObjectsStats(OutputStream* stream) {
=======================================
--- /branches/bleeding_edge/src/heap-snapshot-generator.h Mon Sep 16 13:13:42 2013 UTC +++ /branches/bleeding_edge/src/heap-snapshot-generator.h Mon Oct 14 12:41:28 2013 UTC
@@ -227,8 +227,12 @@

   void SnapshotGenerationFinished();
   SnapshotObjectId FindEntry(Address addr);
-  SnapshotObjectId FindOrAddEntry(Address addr, unsigned int size);
-  void MoveObject(Address from, Address to);
+  SnapshotObjectId FindOrAddEntry(Address addr,
+                                  unsigned int size,
+                                  bool accessed = true);
+  void MoveObject(Address from, Address to, int size);
+  void NewObject(Address addr, int size);
+  void UpdateObjectSize(Address addr, int size);
   SnapshotObjectId last_assigned_id() const {
     return next_id_ - kObjectIdStep;
   }
@@ -247,6 +251,10 @@
   static const SnapshotObjectId kGcRootsFirstSubrootId;
   static const SnapshotObjectId kFirstAvailableObjectId;

+  int FindUntrackedObjects();
+
+  void UpdateHeapObjectsMap();
+
  private:
   struct EntryInfo {
   EntryInfo(SnapshotObjectId id, Address addr, unsigned int size)
@@ -265,7 +273,6 @@
     uint32_t count;
   };

-  void UpdateHeapObjectsMap();
   void RemoveDeadEntries();

   SnapshotObjectId next_id_;
@@ -306,11 +313,21 @@
     return ids_.FindOrAddEntry(object_addr, object_size);
   }
   Handle<HeapObject> FindHeapObjectById(SnapshotObjectId id);
- void ObjectMoveEvent(Address from, Address to) { ids_.MoveObject(from, to); }
+  void ObjectMoveEvent(Address from, Address to, int size) {
+    ids_.MoveObject(from, to, size);
+  }
+ void NewObjectEvent(Address addr, int size) { ids_.NewObject(addr, size); }
+  void UpdateObjectSizeEvent(Address addr, int size) {
+    ids_.UpdateObjectSize(addr, size);
+  }
   SnapshotObjectId last_assigned_id() const {
     return ids_.last_assigned_id();
   }
   size_t GetUsedMemorySize() const;
+
+  int FindUntrackedObjects() { return ids_.FindUntrackedObjects(); }
+
+  void UpdateHeapObjectsMap() { ids_.UpdateHeapObjectsMap(); }

  private:
   bool is_tracking_objects_;  // Whether tracking object moves is needed.
=======================================
--- /branches/bleeding_edge/src/heap.cc Thu Oct 10 13:14:37 2013 UTC
+++ /branches/bleeding_edge/src/heap.cc Mon Oct 14 12:41:28 2013 UTC
@@ -2101,7 +2101,8 @@
     if (logging_and_profiling_mode == LOGGING_AND_PROFILING_ENABLED) {
       // Update NewSpace stats if necessary.
       RecordCopiedObject(heap, target);
- HEAP_PROFILE(heap, ObjectMoveEvent(source->address(), target->address()));
+      HEAP_PROFILE(heap,
+ ObjectMoveEvent(source->address(), target->address(), size));
       Isolate* isolate = heap->isolate();
       if (isolate->logger()->is_logging_code_events() ||
           isolate->cpu_profiler()->is_profiling()) {
@@ -4927,6 +4928,13 @@
       alloc_memento->set_map_no_write_barrier(allocation_memento_map());
       ASSERT(site->map() == allocation_site_map());
       alloc_memento->set_allocation_site(site, SKIP_WRITE_BARRIER);
+      HeapProfiler* profiler = isolate()->heap_profiler();
+      if (profiler->is_tracking_allocations()) {
+        profiler->UpdateObjectSizeEvent(HeapObject::cast(clone)->address(),
+                                        object_size);
+        profiler->NewObjectEvent(alloc_memento->address(),
+                                 AllocationMemento::kSize);
+      }
     }
   }

=======================================
--- /branches/bleeding_edge/src/mark-compact.cc Fri Oct  4 07:25:24 2013 UTC
+++ /branches/bleeding_edge/src/mark-compact.cc Mon Oct 14 12:41:28 2013 UTC
@@ -2759,7 +2759,7 @@
                                          Address src,
                                          int size,
                                          AllocationSpace dest) {
-  HEAP_PROFILE(heap(), ObjectMoveEvent(src, dst));
+  HEAP_PROFILE(heap(), ObjectMoveEvent(src, dst, size));
   ASSERT(heap()->AllowedToBeMigrated(HeapObject::FromAddress(src), dest));
   ASSERT(dest != LO_SPACE && size <= Page::kMaxNonCodeHeapObjectSize);
   if (dest == OLD_POINTER_SPACE) {
@@ -2942,7 +2942,9 @@
   ASSERT(target_space == heap()->old_pointer_space() ||
          target_space == heap()->old_data_space());
   Object* result;
-  MaybeObject* maybe_result = target_space->AllocateRaw(object_size);
+  MaybeObject* maybe_result = target_space->AllocateRaw(
+      object_size,
+      PagedSpace::MOVE_OBJECT);
   if (maybe_result->ToObject(&result)) {
     HeapObject* target = HeapObject::cast(result);
     MigrateObject(target->address(),
@@ -3015,7 +3017,7 @@

       int size = object->Size();

-      MaybeObject* target = space->AllocateRaw(size);
+ MaybeObject* target = space->AllocateRaw(size, PagedSpace::MOVE_OBJECT);
       if (target->IsFailure()) {
         // OS refused to give us memory.
         V8::FatalProcessOutOfMemory("Evacuation");
=======================================
--- /branches/bleeding_edge/src/objects.cc      Thu Oct 10 15:38:52 2013 UTC
+++ /branches/bleeding_edge/src/objects.cc      Mon Oct 14 12:41:28 2013 UTC
@@ -2284,6 +2284,13 @@
MemoryChunk::IncrementLiveBytesFromMutator(elms->address(), -size_delta);
     }
   }
+
+  // The array may not be moved during GC,
+  // and size has to be adjusted nevertheless.
+  HeapProfiler* profiler = heap->isolate()->heap_profiler();
+  if (profiler->is_tracking_allocations()) {
+    profiler->UpdateObjectSizeEvent(elms->address(), elms->Size());
+  }
 }


=======================================
--- /branches/bleeding_edge/src/serialize.cc    Fri Oct  4 07:25:24 2013 UTC
+++ /branches/bleeding_edge/src/serialize.cc    Mon Oct 14 12:41:28 2013 UTC
@@ -581,6 +581,10 @@
       UNCLASSIFIED,
       63,
       "Heap::allocation_sites_list_address()");
+ Add(ExternalReference::record_object_allocation_function(isolate).address(),
+      UNCLASSIFIED,
+      64,
+      "HeapProfiler::RecordObjectAllocationFromMasm");

// Add a small set of deopt entry addresses to encoder without generating the
   // deopt table code, which isn't possible at deserialization time.
@@ -591,7 +595,7 @@
         entry,
         Deoptimizer::LAZY,
         Deoptimizer::CALCULATE_ENTRY_ADDRESS);
-    Add(address, LAZY_DEOPTIMIZATION, 64 + entry, "lazy_deopt");
+    Add(address, LAZY_DEOPTIMIZATION, 65 + entry, "lazy_deopt");
   }
 }

=======================================
--- /branches/bleeding_edge/src/serialize.h     Wed Sep 11 07:14:41 2013 UTC
+++ /branches/bleeding_edge/src/serialize.h     Mon Oct 14 12:41:28 2013 UTC
@@ -366,6 +366,10 @@
   Address Allocate(int space_index, int size) {
     Address address = high_water_[space_index];
     high_water_[space_index] = address + size;
+    HeapProfiler* profiler = isolate_->heap_profiler();
+    if (profiler->is_tracking_allocations()) {
+      profiler->NewObjectEvent(address, size);
+    }
     return address;
   }

=======================================
--- /branches/bleeding_edge/src/spaces-inl.h    Wed Sep 11 18:30:01 2013 UTC
+++ /branches/bleeding_edge/src/spaces-inl.h    Mon Oct 14 12:41:28 2013 UTC
@@ -28,6 +28,7 @@
 #ifndef V8_SPACES_INL_H_
 #define V8_SPACES_INL_H_

+#include "heap-profiler.h"
 #include "isolate.h"
 #include "spaces.h"
 #include "v8memory.h"
@@ -273,12 +274,18 @@


 // Raw allocation.
-MaybeObject* PagedSpace::AllocateRaw(int size_in_bytes) {
+MaybeObject* PagedSpace::AllocateRaw(int size_in_bytes,
+                                     AllocationType event) {
+  HeapProfiler* profiler = heap()->isolate()->heap_profiler();
+
   HeapObject* object = AllocateLinearly(size_in_bytes);
   if (object != NULL) {
     if (identity() == CODE_SPACE) {
       SkipList::Update(object->address(), size_in_bytes);
     }
+    if (event == NEW_OBJECT && profiler->is_tracking_allocations()) {
+      profiler->NewObjectEvent(object->address(), size_in_bytes);
+    }
     return object;
   }

@@ -291,6 +298,9 @@
     if (identity() == CODE_SPACE) {
       SkipList::Update(object->address(), size_in_bytes);
     }
+    if (event == NEW_OBJECT && profiler->is_tracking_allocations()) {
+      profiler->NewObjectEvent(object->address(), size_in_bytes);
+    }
     return object;
   }

@@ -299,6 +309,9 @@
     if (identity() == CODE_SPACE) {
       SkipList::Update(object->address(), size_in_bytes);
     }
+    if (event == NEW_OBJECT && profiler->is_tracking_allocations()) {
+      profiler->NewObjectEvent(object->address(), size_in_bytes);
+    }
     return object;
   }

@@ -332,9 +345,14 @@
     return SlowAllocateRaw(size_in_bytes);
   }

-  Object* obj = HeapObject::FromAddress(old_top);
+  HeapObject* obj = HeapObject::FromAddress(old_top);
   allocation_info_.top += size_in_bytes;
   ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
+
+  HeapProfiler* profiler = heap()->isolate()->heap_profiler();
+  if (profiler != NULL && profiler->is_tracking_allocations()) {
+    profiler->NewObjectEvent(obj->address(), size_in_bytes);
+  }

   return obj;
 }
=======================================
--- /branches/bleeding_edge/src/spaces.h        Thu Sep 12 08:57:10 2013 UTC
+++ /branches/bleeding_edge/src/spaces.h        Mon Oct 14 12:41:28 2013 UTC
@@ -1714,9 +1714,16 @@
   Address* allocation_top_address() { return &allocation_info_.top; }
   Address* allocation_limit_address() { return &allocation_info_.limit; }

+  enum AllocationType {
+    NEW_OBJECT,
+    MOVE_OBJECT
+  };
+
// Allocate the requested number of bytes in the space if possible, return a
   // failure object if not.
-  MUST_USE_RESULT inline MaybeObject* AllocateRaw(int size_in_bytes);
+  MUST_USE_RESULT inline MaybeObject* AllocateRaw(
+      int size_in_bytes,
+      AllocationType event = NEW_OBJECT);

   virtual bool ReserveSpace(int bytes);

=======================================
--- /branches/bleeding_edge/src/x64/code-stubs-x64.cc Fri Oct 4 08:17:11 2013 UTC +++ /branches/bleeding_edge/src/x64/code-stubs-x64.cc Mon Oct 14 12:41:28 2013 UTC
@@ -4226,9 +4226,15 @@
     STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
     STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
     __ testb(rbx, Immediate(kStringEncodingMask));
-    __ j(zero, &two_byte_slice, Label::kNear);
+    // Make long jumps when allocations tracking is on due to
+    // RecordObjectAllocation inside MacroAssembler::Allocate.
+    Label::Distance jump_distance =
+        masm->isolate()->heap_profiler()->is_tracking_allocations()
+        ? Label::kFar
+        : Label::kNear;
+    __ j(zero, &two_byte_slice, jump_distance);
     __ AllocateAsciiSlicedString(rax, rbx, r14, &runtime);
-    __ jmp(&set_slice_header, Label::kNear);
+    __ jmp(&set_slice_header, jump_distance);
     __ bind(&two_byte_slice);
     __ AllocateTwoByteSlicedString(rax, rbx, r14, &runtime);
     __ bind(&set_slice_header);
=======================================
--- /branches/bleeding_edge/src/x64/macro-assembler-x64.cc Fri Oct 11 07:12:06 2013 UTC +++ /branches/bleeding_edge/src/x64/macro-assembler-x64.cc Mon Oct 14 12:41:28 2013 UTC
@@ -4093,6 +4093,10 @@
   // Load address of new object into result.
   LoadAllocationTopHelper(result, scratch, flags);

+  if (isolate()->heap_profiler()->is_tracking_allocations()) {
+    RecordObjectAllocation(isolate(), result, object_size);
+  }
+
// Align the next allocation. Storing the filler map without checking top is
   // safe in new-space because the limit of the heap is aligned there.
   if (((flags & DOUBLE_ALIGNMENT) != 0) && FLAG_debug_code) {
@@ -4172,6 +4176,10 @@
   // Load address of new object into result.
   LoadAllocationTopHelper(result, scratch, flags);

+  if (isolate()->heap_profiler()->is_tracking_allocations()) {
+    RecordObjectAllocation(isolate(), result, object_size);
+  }
+
// Align the next allocation. Storing the filler map without checking top is
   // safe in new-space because the limit of the heap is aligned there.
   if (((flags & DOUBLE_ALIGNMENT) != 0) && FLAG_debug_code) {
@@ -4931,6 +4939,38 @@
               Heap::kAllocationMementoMapRootIndex);
   bind(&no_memento_available);
 }
+
+
+void MacroAssembler::RecordObjectAllocation(Isolate* isolate,
+                                            Register object,
+                                            Register object_size) {
+  FrameScope frame(this, StackFrame::EXIT);
+  PushSafepointRegisters();
+  PrepareCallCFunction(3);
+  // In case object is rdx
+  movq(kScratchRegister, object);
+  movq(arg_reg_3, object_size);
+  movq(arg_reg_2, kScratchRegister);
+  movq(arg_reg_1, isolate, RelocInfo::EXTERNAL_REFERENCE);
+  CallCFunction(
+      ExternalReference::record_object_allocation_function(isolate), 3);
+  PopSafepointRegisters();
+}
+
+
+void MacroAssembler::RecordObjectAllocation(Isolate* isolate,
+                                            Register object,
+                                            int object_size) {
+  FrameScope frame(this, StackFrame::EXIT);
+  PushSafepointRegisters();
+  PrepareCallCFunction(3);
+  movq(arg_reg_2, object);
+  movq(arg_reg_3, Immediate(object_size));
+  movq(arg_reg_1, isolate, RelocInfo::EXTERNAL_REFERENCE);
+  CallCFunction(
+      ExternalReference::record_object_allocation_function(isolate), 3);
+  PopSafepointRegisters();
+}


 } }  // namespace v8::internal
=======================================
--- /branches/bleeding_edge/src/x64/macro-assembler-x64.h Fri Oct 4 07:13:43 2013 UTC +++ /branches/bleeding_edge/src/x64/macro-assembler-x64.h Mon Oct 14 12:41:28 2013 UTC
@@ -1115,6 +1115,15 @@
                 Label* gc_required,
                 AllocationFlags flags);

+  // Record a JS object allocation if allocations tracking mode is on.
+  void RecordObjectAllocation(Isolate* isolate,
+                              Register object,
+                              Register object_size);
+
+  void RecordObjectAllocation(Isolate* isolate,
+                              Register object,
+                              int object_size);
+
// Undo allocation in new space. The object passed and objects allocated after // it will no longer be allocated. Make sure that no pointers are left to the // object(s) no longer allocated as they would be invalid when allocation is
=======================================
--- /branches/bleeding_edge/test/cctest/cctest.h Mon Sep 23 11:25:52 2013 UTC +++ /branches/bleeding_edge/test/cctest/cctest.h Mon Oct 14 12:41:28 2013 UTC
@@ -346,6 +346,28 @@
   space->ResetFreeList();
   space->ClearStats();
 }
+
+
+// Helper class for new allocations tracking and checking.
+// To use checking of JS allocations tracking in a test,
+// just create an instance of this class.
+class HeapObjectsTracker {
+ public:
+  HeapObjectsTracker() {
+    heap_profiler_ = i::Isolate::Current()->heap_profiler();
+    CHECK_NE(NULL, heap_profiler_);
+    heap_profiler_->StartHeapAllocationsRecording();
+  }
+
+  ~HeapObjectsTracker() {
+    i::Isolate::Current()->heap()->CollectAllAvailableGarbage();
+    CHECK_EQ(0, heap_profiler_->FindUntrackedObjects());
+    heap_profiler_->StopHeapAllocationsRecording();
+  }
+
+ private:
+  i::HeapProfiler* heap_profiler_;
+};


 #endif  // ifndef CCTEST_H_
=======================================
--- /branches/bleeding_edge/test/cctest/test-heap-profiler.cc Thu Sep 19 09:46:15 2013 UTC +++ /branches/bleeding_edge/test/cctest/test-heap-profiler.cc Mon Oct 14 12:41:28 2013 UTC
@@ -2005,3 +2005,19 @@
       GetProperty(foo_func, v8::HeapGraphEdge::kInternal, "code");
   CHECK_NE(NULL, code);
 }
+
+
+// This is an example of using checking of JS allocations tracking in a test.
+TEST(HeapObjectsTracker) {
+  LocalContext env;
+  v8::HandleScope scope(env->GetIsolate());
+  HeapObjectsTracker tracker;
+  CompileRun("var a = 1.2");
+  CompileRun("var a = 1.2; var b = 1.0; var c = 1.0;");
+  CompileRun(
+    "var a = [];"
+    "for (var i = 0; i < 5; ++i)"
+    "    a[i] = i;\n"
+    "for (var i = 0; i < 3; ++i)"
+    "    a.shift();\n");
+}

--
--
v8-dev mailing list
v8-dev@googlegroups.com
http://groups.google.com/group/v8-dev
--- You received this message because you are subscribed to the Google Groups "v8-dev" group.
To unsubscribe from this group and stop receiving emails from it, send an email 
to v8-dev+unsubscr...@googlegroups.com.
For more options, visit https://groups.google.com/groups/opt_out.

Reply via email to