Revision: 15465
Author:   bmeu...@chromium.org
Date:     Wed Jul  3 04:40:30 2013
Log:      Reintroduce runtime zone to Isolate.

In case tcmalloc is not being used, the malloc()/free() overhead
can be significant for several runtime functions like StringReplace.
Therefore we reintroduce the runtime_zone into Isolate and reenable
the segment caching functionality of Zone.

There's now also a simpler version of ZoneScope w/o nesting capabilities.

BUG=v8:2759
R=da...@chromium.org, yang...@chromium.org

Review URL: https://codereview.chromium.org/18635003
http://code.google.com/p/v8/source/detail?r=15465

Modified:
 /branches/bleeding_edge/src/isolate.cc
 /branches/bleeding_edge/src/isolate.h
 /branches/bleeding_edge/src/runtime.cc
 /branches/bleeding_edge/src/zone.cc
 /branches/bleeding_edge/src/zone.h

=======================================
--- /branches/bleeding_edge/src/isolate.cc      Tue Jul  2 00:21:07 2013
+++ /branches/bleeding_edge/src/isolate.cc      Wed Jul  3 04:40:30 2013
@@ -1762,6 +1762,7 @@
       descriptor_lookup_cache_(NULL),
       handle_scope_implementer_(NULL),
       unicode_cache_(NULL),
+      runtime_zone_(this),
       in_use_list_(0),
       free_list_(0),
       preallocated_storage_preallocated_(false),
@@ -1960,6 +1961,9 @@
 Isolate::~Isolate() {
   TRACE_ISOLATE(destructor);

+  // Has to be called while counters_ are still alive
+  runtime_zone_.DeleteKeptSegment();
+
   // The entry stack must be empty when we get here,
   // except for the default isolate, where it can
   // still contain up to one entry stack item
=======================================
--- /branches/bleeding_edge/src/isolate.h       Mon Jul  1 03:54:39 2013
+++ /branches/bleeding_edge/src/isolate.h       Wed Jul  3 04:40:30 2013
@@ -896,6 +896,7 @@
     ASSERT(handle_scope_implementer_);
     return handle_scope_implementer_;
   }
+  Zone* runtime_zone() { return &runtime_zone_; }

   UnicodeCache* unicode_cache() {
     return unicode_cache_;
@@ -1270,6 +1271,7 @@
   v8::ImplementationUtilities::HandleScopeData handle_scope_data_;
   HandleScopeImplementer* handle_scope_implementer_;
   UnicodeCache* unicode_cache_;
+  Zone runtime_zone_;
   PreallocatedStorage in_use_list_;
   PreallocatedStorage free_list_;
   bool preallocated_storage_preallocated_;
=======================================
--- /branches/bleeding_edge/src/runtime.cc      Tue Jul  2 08:32:46 2013
+++ /branches/bleeding_edge/src/runtime.cc      Wed Jul  3 04:40:30 2013
@@ -3598,8 +3598,8 @@
   ASSERT(subject->IsFlat());
   ASSERT(replacement->IsFlat());

-  Zone zone(isolate);
-  ZoneList<int> indices(8, &zone);
+  ZoneScope zone_scope(isolate->runtime_zone());
+  ZoneList<int> indices(8, zone_scope.zone());
   ASSERT_EQ(JSRegExp::ATOM, pattern_regexp->TypeTag());
   String* pattern =
       String::cast(pattern_regexp->DataAt(JSRegExp::kAtomPatternIndex));
@@ -3608,7 +3608,7 @@
   int replacement_len = replacement->length();

   FindStringIndicesDispatch(
-      isolate, *subject, pattern, &indices, 0xffffffff, &zone);
+      isolate, *subject, pattern, &indices, 0xffffffff, zone_scope.zone());

   int matches = indices.length();
   if (matches == 0) return *subject;
@@ -3684,8 +3684,8 @@
   int subject_length = subject->length();

   // CompiledReplacement uses zone allocation.
-  Zone zone(isolate);
-  CompiledReplacement compiled_replacement(&zone);
+  ZoneScope zone_scope(isolate->runtime_zone());
+  CompiledReplacement compiled_replacement(zone_scope.zone());
   bool simple_replace = compiled_replacement.Compile(replacement,
                                                      capture_count,
                                                      subject_length);
@@ -4218,14 +4218,14 @@

   int capture_count = regexp->CaptureCount();

-  Zone zone(isolate);
-  ZoneList<int> offsets(8, &zone);
+  ZoneScope zone_scope(isolate->runtime_zone());
+  ZoneList<int> offsets(8, zone_scope.zone());

   while (true) {
     int32_t* match = global_cache.FetchNext();
     if (match == NULL) break;
-    offsets.Add(match[0], &zone);  // start
-    offsets.Add(match[1], &zone);  // end
+    offsets.Add(match[0], zone_scope.zone());  // start
+    offsets.Add(match[1], zone_scope.zone());  // end
   }

   if (global_cache.HasException()) return Failure::Exception();
@@ -6310,18 +6310,18 @@

   static const int kMaxInitialListCapacity = 16;

-  Zone zone(isolate);
+  ZoneScope zone_scope(isolate->runtime_zone());

   // Find (up to limit) indices of separator and end-of-string in subject
   int initial_capacity = Min<uint32_t>(kMaxInitialListCapacity, limit);
-  ZoneList<int> indices(initial_capacity, &zone);
+  ZoneList<int> indices(initial_capacity, zone_scope.zone());
   if (!pattern->IsFlat()) FlattenString(pattern);

   FindStringIndicesDispatch(isolate, *subject, *pattern,
-                            &indices, limit, &zone);
+                            &indices, limit, zone_scope.zone());

   if (static_cast<uint32_t>(indices.length()) < limit) {
-    indices.Add(subject_length, &zone);
+    indices.Add(subject_length, zone_scope.zone());
   }

   // The list indices now contains the end of each part to create.
=======================================
--- /branches/bleeding_edge/src/zone.cc Thu Jun 27 06:10:43 2013
+++ /branches/bleeding_edge/src/zone.cc Wed Jul  3 04:40:30 2013
@@ -78,31 +78,82 @@


 Zone::~Zone() {
+  DeleteAll();
+  DeleteKeptSegment();
+
+  ASSERT(segment_bytes_allocated_ == 0);
+}
+
+
+void Zone::DeleteAll() {
 #ifdef DEBUG
   // Constant byte value used for zapping dead memory in debug mode.
   static const unsigned char kZapDeadByte = 0xcd;
 #endif

-  // Traverse the chained list of segments, zapping
-  // (in debug mode) and freeing every segment
-  Segment* current = segment_head_;
-  while (current != NULL) {
+  // Find a segment with a suitable size to keep around.
+  Segment* keep = segment_head_;
+  while (keep != NULL && keep->size() > kMaximumKeptSegmentSize) {
+    keep = keep->next();
+  }
+
+  // Traverse the chained list of segments, zapping (in debug mode)
+  // and freeing every segment except the one we wish to keep.
+  for (Segment* current = segment_head_; current != NULL; ) {
     Segment* next = current->next();
-    int size = current->size();
+    if (current == keep) {
+      // Unlink the segment we wish to keep from the list.
+      current->clear_next();
+    } else {
+      int size = current->size();
 #ifdef DEBUG
-    // Zap the entire current segment (including the header).
-    memset(current, kZapDeadByte, size);
+      // Zap the entire current segment (including the header).
+      memset(current, kZapDeadByte, size);
 #endif
-    DeleteSegment(current, size);
+      DeleteSegment(current, size);
+    }
     current = next;
   }

-  // We must clear the position and limit to force
-  // a new segment to be allocated on demand.
-  position_ = limit_ = 0;
+  // If we have found a segment we want to keep, we must recompute the
+  // variables 'position' and 'limit' to prepare for future allocate
+  // attempts. Otherwise, we must clear the position and limit to
+  // force a new segment to be allocated on demand.
+  if (keep != NULL) {
+    Address start = keep->start();
+    position_ = RoundUp(start, kAlignment);
+    limit_ = keep->end();
+#ifdef DEBUG
+    // Zap the contents of the kept segment (but not the header).
+    memset(start, kZapDeadByte, keep->capacity());
+#endif
+  } else {
+    position_ = limit_ = 0;
+  }

-  // Update the head segment.
-  segment_head_ = NULL;
+  // Update the head segment to be the kept segment (if any).
+  segment_head_ = keep;
+}
+
+
+void Zone::DeleteKeptSegment() {
+#ifdef DEBUG
+  // Constant byte value used for zapping dead memory in debug mode.
+  static const unsigned char kZapDeadByte = 0xcd;
+#endif
+
+  ASSERT(segment_head_ == NULL || segment_head_->next() == NULL);
+  if (segment_head_ != NULL) {
+    int size = segment_head_->size();
+#ifdef DEBUG
+    // Zap the entire kept segment (including the header).
+    memset(segment_head_, kZapDeadByte, size);
+#endif
+    DeleteSegment(segment_head_, size);
+    segment_head_ = NULL;
+  }
+
+  ASSERT(segment_bytes_allocated_ == 0);
 }


=======================================
--- /branches/bleeding_edge/src/zone.h  Thu Jun 27 06:10:43 2013
+++ /branches/bleeding_edge/src/zone.h  Wed Jul  3 04:40:30 2013
@@ -66,6 +66,14 @@
   template <typename T>
   inline T* NewArray(int length);

+ // Deletes all objects and free all memory allocated in the Zone. Keeps one + // small (size <= kMaximumKeptSegmentSize) segment around if it finds one.
+  void DeleteAll();
+
+  // Deletes the last small segment kept around by DeleteAll(). You
+  // may no longer allocate in the Zone after a call to this method.
+  void DeleteKeptSegment();
+
   // Returns true if more memory has been allocated in zones than
   // the limit allows.
   inline bool excess_allocation();
@@ -90,6 +98,9 @@
   // Never allocate segments larger than this size in bytes.
   static const int kMaximumSegmentSize = 1 * MB;

+  // Never keep segments larger than this size in bytes around.
+  static const int kMaximumKeptSegmentSize = 64 * KB;
+
   // Report zone excess when allocation exceeds this limit.
   static const int kExcessLimit = 256 * MB;

@@ -109,10 +120,10 @@

   // Creates a new segment, sets it size, and pushes it to the front
   // of the segment chain. Returns the new segment.
-  Segment* NewSegment(int size);
+  INLINE(Segment* NewSegment(int size));

   // Deletes the given segment. Does not touch the segment chain.
-  void DeleteSegment(Segment* segment, int size);
+  INLINE(void DeleteSegment(Segment* segment, int size));

   // The free region in the current (front) segment is represented as
   // the half-open interval [position, limit). The 'position' variable
@@ -145,6 +156,20 @@
 };


+// The ZoneScope is used to automatically call DeleteAll() on a
+// Zone when the ZoneScope is destroyed (i.e. goes out of scope)
+struct ZoneScope {
+ public:
+  explicit ZoneScope(Zone* zone) : zone_(zone) { }
+  ~ZoneScope() { zone_->DeleteAll(); }
+
+  Zone* zone() { return zone_; }
+
+ private:
+  Zone* zone_;
+};
+
+
 // The ZoneAllocationPolicy is used to specialize generic data
 // structures to allocate themselves and their elements in the Zone.
 struct ZoneAllocationPolicy {

--
--
v8-dev mailing list
v8-dev@googlegroups.com
http://groups.google.com/group/v8-dev
--- You received this message because you are subscribed to the Google Groups "v8-dev" group.
To unsubscribe from this group and stop receiving emails from it, send an email 
to v8-dev+unsubscr...@googlegroups.com.
For more options, visit https://groups.google.com/groups/opt_out.


Reply via email to