Revision: 22471
Author:   [email protected]
Date:     Fri Jul 18 11:04:20 2014 UTC
Log:      Concurrent/parallel precise sweeping.

BUG=
[email protected]

Review URL: https://codereview.chromium.org/398333002
http://code.google.com/p/v8/source/detail?r=22471

Modified:
 /branches/bleeding_edge/src/flag-definitions.h
 /branches/bleeding_edge/src/heap-snapshot-generator.cc
 /branches/bleeding_edge/src/heap.cc
 /branches/bleeding_edge/src/mark-compact.cc
 /branches/bleeding_edge/src/mark-compact.h
 /branches/bleeding_edge/src/spaces.cc
 /branches/bleeding_edge/src/spaces.h
 /branches/bleeding_edge/test/cctest/test-heap.cc

=======================================
--- /branches/bleeding_edge/src/flag-definitions.h Fri Jul 18 07:17:21 2014 UTC +++ /branches/bleeding_edge/src/flag-definitions.h Fri Jul 18 11:04:20 2014 UTC
@@ -516,6 +516,7 @@
             "trace progress of the incremental marking")
 DEFINE_BOOL(track_gc_object_stats, false,
             "track object counts and memory usage")
+DEFINE_BOOL(always_precise_sweeping, false, "always sweep precisely")
 DEFINE_BOOL(parallel_sweeping, false, "enable parallel sweeping")
 DEFINE_BOOL(concurrent_sweeping, true, "enable concurrent sweeping")
 DEFINE_INT(sweeper_threads, 0,
=======================================
--- /branches/bleeding_edge/src/heap-snapshot-generator.cc Thu Jul 17 09:44:37 2014 UTC +++ /branches/bleeding_edge/src/heap-snapshot-generator.cc Fri Jul 18 11:04:20 2014 UTC
@@ -2600,12 +2600,12 @@

 #ifdef VERIFY_HEAP
   Heap* debug_heap = heap_;
-  CHECK(debug_heap->old_data_space()->is_iterable());
-  CHECK(debug_heap->old_pointer_space()->is_iterable());
-  CHECK(debug_heap->code_space()->is_iterable());
-  CHECK(debug_heap->cell_space()->is_iterable());
-  CHECK(debug_heap->property_cell_space()->is_iterable());
-  CHECK(debug_heap->map_space()->is_iterable());
+  CHECK(debug_heap->old_data_space()->swept_precisely());
+  CHECK(debug_heap->old_pointer_space()->swept_precisely());
+  CHECK(debug_heap->code_space()->swept_precisely());
+  CHECK(debug_heap->cell_space()->swept_precisely());
+  CHECK(debug_heap->property_cell_space()->swept_precisely());
+  CHECK(debug_heap->map_space()->swept_precisely());
 #endif

 #ifdef VERIFY_HEAP
=======================================
--- /branches/bleeding_edge/src/heap.cc Fri Jul 18 08:55:40 2014 UTC
+++ /branches/bleeding_edge/src/heap.cc Fri Jul 18 11:04:20 2014 UTC
@@ -1297,7 +1297,7 @@

// The old data space was normally swept conservatively so that the iterator
   // doesn't work, so we normally skip the next bit.
-  if (heap->old_data_space()->is_iterable()) {
+  if (heap->old_data_space()->swept_precisely()) {
     HeapObjectIterator data_it(heap->old_data_space());
     for (HeapObject* object = data_it.Next();
          object != NULL; object = data_it.Next())
@@ -3304,8 +3304,7 @@
   // pages is set after sweeping all pages.
   return (!is_in_old_pointer_space && !is_in_old_data_space) ||
          page->WasSwept() ||
-         (page->parallel_sweeping() <=
-             MemoryChunk::PARALLEL_SWEEPING_FINALIZE);
+         (page->parallel_sweeping() <= MemoryChunk::SWEEPING_FINALIZE);
 }


@@ -4209,8 +4208,8 @@


 bool Heap::IsHeapIterable() {
-  return (old_pointer_space()->is_iterable() &&
-          old_data_space()->is_iterable() &&
+  return (old_pointer_space()->swept_precisely() &&
+          old_data_space()->swept_precisely() &&
           new_space_top_after_last_gc_ == new_space()->top());
 }

=======================================
--- /branches/bleeding_edge/src/mark-compact.cc Fri Jul 18 08:55:40 2014 UTC
+++ /branches/bleeding_edge/src/mark-compact.cc Fri Jul 18 11:04:20 2014 UTC
@@ -207,7 +207,7 @@
   // TODO(hpayer): Bring back VerifyEvacuation for parallel-concurrently
   // swept pages.
   if ((FLAG_concurrent_sweeping || FLAG_parallel_sweeping) &&
-      !space->is_iterable()) return;
+      !space->swept_precisely()) return;
   PageIterator it(space);

   while (it.has_next()) {
@@ -2044,7 +2044,7 @@
 static void DiscoverGreyObjectsInSpace(Heap* heap,
                                        MarkingDeque* marking_deque,
                                        PagedSpace* space) {
-  if (space->is_iterable()) {
+  if (space->swept_precisely()) {
     HeapObjectIterator it(space);
     DiscoverGreyObjectsWithIterator(heap, marking_deque, &it);
   } else {
@@ -3127,7 +3127,7 @@
     ASSERT(p->IsEvacuationCandidate() ||
            p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
     ASSERT(static_cast<int>(p->parallel_sweeping()) ==
-           MemoryChunk::PARALLEL_SWEEPING_DONE);
+           MemoryChunk::SWEEPING_DONE);
     if (p->IsEvacuationCandidate()) {
       // During compaction we might have to request a new page.
       // Check that space still have room for that.
@@ -3221,6 +3221,21 @@
   IGNORE_FREE_SPACE,
   ZAP_FREE_SPACE
 };
+
+
+template<MarkCompactCollector::SweepingParallelism mode>
+static intptr_t Free(PagedSpace* space,
+                     FreeList* free_list,
+                     Address start,
+                     int size) {
+  if (mode == MarkCompactCollector::SWEEP_ON_MAIN_THREAD) {
+    ASSERT(free_list == NULL);
+    return space->Free(start, size);
+  } else {
+    // TODO(hpayer): account for wasted bytes in concurrent sweeping too.
+    return size - free_list->Free(start, size);
+  }
+}


 // Sweep a space precisely.  After this has been done the space can
@@ -3229,23 +3244,32 @@
 // over it.  Map space is swept precisely, because it is not compacted.
 // Slots in live objects pointing into evacuation candidates are updated
 // if requested.
+// Returns the size of the biggest continuous freed memory chunk in bytes.
 template<SweepingMode sweeping_mode,
+         MarkCompactCollector::SweepingParallelism parallelism,
          SkipListRebuildingMode skip_list_mode,
          FreeSpaceTreatmentMode free_space_mode>
-static void SweepPrecisely(PagedSpace* space,
+static int SweepPrecisely(PagedSpace* space,
+                           FreeList* free_list,
                            Page* p,
                            ObjectVisitor* v) {
   ASSERT(!p->IsEvacuationCandidate() && !p->WasSwept());
   ASSERT_EQ(skip_list_mode == REBUILD_SKIP_LIST,
             space->identity() == CODE_SPACE);
ASSERT((p->skip_list() == NULL) || (skip_list_mode == REBUILD_SKIP_LIST));
+  ASSERT(parallelism == MarkCompactCollector::SWEEP_ON_MAIN_THREAD ||
+         sweeping_mode == SWEEP_ONLY);

   double start_time = 0.0;
   if (FLAG_print_cumulative_gc_stat) {
     start_time = base::OS::TimeCurrentMillis();
   }

-  p->MarkSweptPrecisely();
+  if (parallelism == MarkCompactCollector::SWEEP_IN_PARALLEL) {
+    p->set_parallel_sweeping(MemoryChunk::SWEEPING_FINALIZE);
+  } else {
+    p->MarkSweptPrecisely();
+  }

   Address free_start = p->area_start();
ASSERT(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0);
@@ -3256,6 +3280,9 @@
   if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list) {
     skip_list->Clear();
   }
+
+  intptr_t freed_bytes = 0;
+  intptr_t max_freed_bytes = 0;

   for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
     Address cell_base = it.CurrentCellBase();
@@ -3265,10 +3292,12 @@
     for ( ; live_objects != 0; live_objects--) {
       Address free_end = cell_base + offsets[live_index++] * kPointerSize;
       if (free_end != free_start) {
+        int size = static_cast<int>(free_end - free_start);
         if (free_space_mode == ZAP_FREE_SPACE) {
- memset(free_start, 0xcc, static_cast<int>(free_end - free_start));
+          memset(free_start, 0xcc, size);
         }
-        space->Free(free_start, static_cast<int>(free_end - free_start));
+ freed_bytes = Free<parallelism>(space, free_list, free_start, size);
+        max_freed_bytes = Max(freed_bytes, max_freed_bytes);
 #ifdef ENABLE_GDB_JIT_INTERFACE
         if (FLAG_gdbjit && space->identity() == CODE_SPACE) {
           GDBJITInterface::RemoveCodeRange(free_start, free_end);
@@ -3299,10 +3328,12 @@
     *cell = 0;
   }
   if (free_start != p->area_end()) {
+    int size = static_cast<int>(p->area_end() - free_start);
     if (free_space_mode == ZAP_FREE_SPACE) {
- memset(free_start, 0xcc, static_cast<int>(p->area_end() - free_start));
+      memset(free_start, 0xcc, size);
     }
-    space->Free(free_start, static_cast<int>(p->area_end() - free_start));
+    freed_bytes = Free<parallelism>(space, free_list, free_start, size);
+    max_freed_bytes = Max(freed_bytes, max_freed_bytes);
 #ifdef ENABLE_GDB_JIT_INTERFACE
     if (FLAG_gdbjit && space->identity() == CODE_SPACE) {
       GDBJITInterface::RemoveCodeRange(free_start, p->area_end());
@@ -3313,6 +3344,7 @@
   if (FLAG_print_cumulative_gc_stat) {
space->heap()->AddSweepingTime(base::OS::TimeCurrentMillis() - start_time);
   }
+ return FreeList::GuaranteedAllocatable(static_cast<int>(max_freed_bytes));
 }


@@ -3550,21 +3582,24 @@
             break;
           case OLD_POINTER_SPACE:
             SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS,
+                           SWEEP_ON_MAIN_THREAD,
                            IGNORE_SKIP_LIST,
                            IGNORE_FREE_SPACE>(
-                space, p, &updating_visitor);
+                space, NULL, p, &updating_visitor);
             break;
           case CODE_SPACE:
             if (FLAG_zap_code_space) {
               SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS,
+                             SWEEP_ON_MAIN_THREAD,
                              REBUILD_SKIP_LIST,
                              ZAP_FREE_SPACE>(
-                  space, p, &updating_visitor);
+                  space, NULL, p, &updating_visitor);
             } else {
               SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS,
+                             SWEEP_ON_MAIN_THREAD,
                              REBUILD_SKIP_LIST,
                              IGNORE_FREE_SPACE>(
-                  space, p, &updating_visitor);
+                  space, NULL, p, &updating_visitor);
             }
             break;
           default:
@@ -3935,20 +3970,6 @@
   USE(live_objects);
   return block_address + offsets[0] * kPointerSize;
 }
-
-
-template<MarkCompactCollector::SweepingParallelism mode>
-static intptr_t Free(PagedSpace* space,
-                     FreeList* free_list,
-                     Address start,
-                     int size) {
-  if (mode == MarkCompactCollector::SWEEP_ON_MAIN_THREAD) {
-    return space->Free(start, size);
-  } else {
-    // TODO(hpayer): account for wasted bytes in concurrent sweeping too.
-    return size - free_list->Free(start, size);
-  }
-}


 // Force instantiation of templatized SweepConservatively method for
@@ -3973,9 +3994,9 @@
 // memory that can be ignored when scanning.  Dead objects other than free
 // spaces will not contain the free space map.
 template<MarkCompactCollector::SweepingParallelism mode>
-intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space,
-                                                   FreeList* free_list,
-                                                   Page* p) {
+int MarkCompactCollector::SweepConservatively(PagedSpace* space,
+                                              FreeList* free_list,
+                                              Page* p) {
   ASSERT(!p->IsEvacuationCandidate() && !p->WasSwept());
   ASSERT((mode == MarkCompactCollector::SWEEP_IN_PARALLEL &&
          free_list != NULL) ||
@@ -3985,7 +4006,7 @@
   // When parallel sweeping is active, the page will be marked after
   // sweeping by the main thread.
   if (mode == MarkCompactCollector::SWEEP_IN_PARALLEL) {
-    p->set_parallel_sweeping(MemoryChunk::PARALLEL_SWEEPING_FINALIZE);
+    p->set_parallel_sweeping(MemoryChunk::SWEEPING_FINALIZE);
   } else {
     p->MarkSweptConservatively();
   }
@@ -4082,10 +4103,17 @@
   int max_freed_overall = 0;
   while (it.has_next()) {
     Page* p = it.next();
-
     if (p->TryParallelSweeping()) {
-      max_freed = static_cast<int>(SweepConservatively<SWEEP_IN_PARALLEL>(
-          space, &private_free_list, p));
+      if (space->swept_precisely()) {
+        max_freed = SweepPrecisely<SWEEP_ONLY,
+                                   SWEEP_IN_PARALLEL,
+                                   IGNORE_SKIP_LIST,
+                                   IGNORE_FREE_SPACE>(
+                                       space, &private_free_list, p, NULL);
+      } else {
+        max_freed = SweepConservatively<SWEEP_IN_PARALLEL>(
+            space, &private_free_list, p);
+      }
       ASSERT(max_freed >= 0);
       free_list->Concatenate(&private_free_list);
       if (required_freed_bytes > 0 && max_freed >= required_freed_bytes) {
@@ -4100,7 +4128,9 @@


void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
-  space->set_is_iterable(sweeper == PRECISE);
+  space->set_swept_precisely(sweeper == PRECISE ||
+                             sweeper == CONCURRENT_PRECISE ||
+                             sweeper == PARALLEL_PRECISE);
   space->ClearStats();

// We defensively initialize end_of_unswept_pages_ here with the first page
@@ -4115,7 +4145,7 @@

   while (it.has_next()) {
     Page* p = it.next();
-    ASSERT(p->parallel_sweeping() == MemoryChunk::PARALLEL_SWEEPING_DONE);
+    ASSERT(p->parallel_sweeping() == MemoryChunk::SWEEPING_DONE);

     // Clear sweeping flags indicating that marking bits are still intact.
     p->ClearSweptPrecisely();
@@ -4160,26 +4190,55 @@
PrintF("Sweeping 0x%" V8PRIxPTR " conservatively in parallel.\n",
                    reinterpret_cast<intptr_t>(p));
           }
-          p->set_parallel_sweeping(MemoryChunk::PARALLEL_SWEEPING_PENDING);
+          p->set_parallel_sweeping(MemoryChunk::SWEEPING_PENDING);
           space->IncreaseUnsweptFreeBytes(p);
         }
         space->set_end_of_unswept_pages(p);
         break;
       }
+      case CONCURRENT_PRECISE:
+      case PARALLEL_PRECISE:
+        if (!parallel_sweeping_active) {
+          if (FLAG_gc_verbose) {
+            PrintF("Sweeping 0x%" V8PRIxPTR " precisely.\n",
+                   reinterpret_cast<intptr_t>(p));
+          }
+          SweepPrecisely<SWEEP_ONLY,
+                         SWEEP_ON_MAIN_THREAD,
+                         IGNORE_SKIP_LIST,
+                         IGNORE_FREE_SPACE>(space, NULL, p, NULL);
+          pages_swept++;
+          parallel_sweeping_active = true;
+        } else {
+          if (FLAG_gc_verbose) {
+ PrintF("Sweeping 0x%" V8PRIxPTR " conservatively in parallel.\n",
+                   reinterpret_cast<intptr_t>(p));
+          }
+          p->set_parallel_sweeping(MemoryChunk::SWEEPING_PENDING);
+          space->IncreaseUnsweptFreeBytes(p);
+        }
+        space->set_end_of_unswept_pages(p);
+        break;
       case PRECISE: {
         if (FLAG_gc_verbose) {
           PrintF("Sweeping 0x%" V8PRIxPTR " precisely.\n",
                  reinterpret_cast<intptr_t>(p));
         }
         if (space->identity() == CODE_SPACE && FLAG_zap_code_space) {
-          SweepPrecisely<SWEEP_ONLY, REBUILD_SKIP_LIST, ZAP_FREE_SPACE>(
-              space, p, NULL);
+          SweepPrecisely<SWEEP_ONLY,
+                         SWEEP_ON_MAIN_THREAD,
+                         REBUILD_SKIP_LIST,
+                         ZAP_FREE_SPACE>(space, NULL, p, NULL);
         } else if (space->identity() == CODE_SPACE) {
-          SweepPrecisely<SWEEP_ONLY, REBUILD_SKIP_LIST, IGNORE_FREE_SPACE>(
-              space, p, NULL);
+          SweepPrecisely<SWEEP_ONLY,
+                         SWEEP_ON_MAIN_THREAD,
+                         REBUILD_SKIP_LIST,
+                         IGNORE_FREE_SPACE>(space, NULL, p, NULL);
         } else {
-          SweepPrecisely<SWEEP_ONLY, IGNORE_SKIP_LIST, IGNORE_FREE_SPACE>(
-              space, p, NULL);
+          SweepPrecisely<SWEEP_ONLY,
+                         SWEEP_ON_MAIN_THREAD,
+                         IGNORE_SKIP_LIST,
+                         IGNORE_FREE_SPACE>(space, NULL, p, NULL);
         }
         pages_swept++;
         break;
@@ -4199,6 +4258,21 @@
   // Give pages that are queued to be freed back to the OS.
   heap()->FreeQueuedChunks();
 }
+
+
+static bool ShouldStartSweeperThreads(MarkCompactCollector::SweeperType type) {
+  return type == MarkCompactCollector::PARALLEL_CONSERVATIVE ||
+         type == MarkCompactCollector::CONCURRENT_CONSERVATIVE ||
+         type == MarkCompactCollector::PARALLEL_PRECISE ||
+         type == MarkCompactCollector::CONCURRENT_PRECISE;
+}
+
+
+static bool ShouldWaitForSweeperThreads(
+    MarkCompactCollector::SweeperType type) {
+  return type == MarkCompactCollector::PARALLEL_CONSERVATIVE ||
+         type == MarkCompactCollector::PARALLEL_PRECISE;
+}


 void MarkCompactCollector::SweepSpaces() {
@@ -4209,7 +4283,12 @@
   SweeperType how_to_sweep = CONCURRENT_CONSERVATIVE;
   if (FLAG_parallel_sweeping) how_to_sweep = PARALLEL_CONSERVATIVE;
   if (FLAG_concurrent_sweeping) how_to_sweep = CONCURRENT_CONSERVATIVE;
-
+  if (FLAG_always_precise_sweeping && FLAG_parallel_sweeping) {
+    how_to_sweep = PARALLEL_PRECISE;
+  }
+  if (FLAG_always_precise_sweeping && FLAG_concurrent_sweeping) {
+    how_to_sweep = CONCURRENT_PRECISE;
+  }
   if (sweep_precisely_) how_to_sweep = PRECISE;

   MoveEvacuationCandidatesToEndOfPagesList();
@@ -4226,12 +4305,11 @@
       SweepSpace(heap()->old_data_space(), how_to_sweep);
     }

-    if (how_to_sweep == PARALLEL_CONSERVATIVE ||
-        how_to_sweep == CONCURRENT_CONSERVATIVE) {
+    if (ShouldStartSweeperThreads(how_to_sweep)) {
       StartSweeperThreads();
     }

-    if (how_to_sweep == PARALLEL_CONSERVATIVE) {
+    if (ShouldWaitForSweeperThreads(how_to_sweep)) {
       EnsureSweepingCompleted();
     }
   }
@@ -4270,11 +4348,15 @@
   PageIterator it(space);
   while (it.has_next()) {
     Page* p = it.next();
- if (p->parallel_sweeping() == MemoryChunk::PARALLEL_SWEEPING_FINALIZE) {
-      p->set_parallel_sweeping(MemoryChunk::PARALLEL_SWEEPING_DONE);
-      p->MarkSweptConservatively();
+    if (p->parallel_sweeping() == MemoryChunk::SWEEPING_FINALIZE) {
+      p->set_parallel_sweeping(MemoryChunk::SWEEPING_DONE);
+      if (space->swept_precisely()) {
+        p->MarkSweptPrecisely();
+      } else {
+        p->MarkSweptConservatively();
+      }
     }
-    ASSERT(p->parallel_sweeping() == MemoryChunk::PARALLEL_SWEEPING_DONE);
+    ASSERT(p->parallel_sweeping() == MemoryChunk::SWEEPING_DONE);
   }
 }

=======================================
--- /branches/bleeding_edge/src/mark-compact.h  Fri Jul 18 08:55:40 2014 UTC
+++ /branches/bleeding_edge/src/mark-compact.h  Fri Jul 18 11:04:20 2014 UTC
@@ -567,6 +567,8 @@
   enum SweeperType {
     PARALLEL_CONSERVATIVE,
     CONCURRENT_CONSERVATIVE,
+    PARALLEL_PRECISE,
+    CONCURRENT_PRECISE,
     PRECISE
   };

@@ -586,7 +588,7 @@
   // Sweep a single page from the given space conservatively.
// Returns the size of the biggest continuous freed memory chunk in bytes.
   template<SweepingParallelism type>
-  static intptr_t SweepConservatively(PagedSpace* space,
+  static int SweepConservatively(PagedSpace* space,
                                       FreeList* free_list,
                                       Page* p);

=======================================
--- /branches/bleeding_edge/src/spaces.cc       Thu Jul 17 10:03:30 2014 UTC
+++ /branches/bleeding_edge/src/spaces.cc       Fri Jul 18 11:04:20 2014 UTC
@@ -67,7 +67,7 @@
                                     HeapObjectIterator::PageMode mode,
                                     HeapObjectCallback size_f) {
   // Check that we actually can iterate this space.
-  ASSERT(space->is_iterable());
+  ASSERT(space->swept_precisely());

   space_ = space;
   cur_addr_ = cur;
@@ -479,7 +479,7 @@
   chunk->write_barrier_counter_ = kWriteBarrierCounterGranularity;
   chunk->progress_bar_ = 0;
   chunk->high_water_mark_ = static_cast<int>(area_start - base);
-  chunk->set_parallel_sweeping(PARALLEL_SWEEPING_DONE);
+  chunk->set_parallel_sweeping(SWEEPING_DONE);
   chunk->available_in_small_free_list_ = 0;
   chunk->available_in_medium_free_list_ = 0;
   chunk->available_in_large_free_list_ = 0;
@@ -935,7 +935,7 @@
                        Executability executable)
     : Space(heap, id, executable),
       free_list_(this),
-      is_iterable_(true),
+      swept_precisely_(true),
       unswept_free_bytes_(0),
       end_of_unswept_pages_(NULL) {
   if (id == CODE_SPACE) {
@@ -1157,7 +1157,7 @@
 #ifdef VERIFY_HEAP
 void PagedSpace::Verify(ObjectVisitor* visitor) {
   // We can only iterate over the pages if they were swept precisely.
-  if (!is_iterable_) return;
+  if (!swept_precisely_) return;

   bool allocation_pointer_found_in_space =
       (allocation_info_.top() == allocation_info_.limit());
@@ -2775,7 +2775,7 @@
              ", available: %" V8_PTR_PREFIX "d, %%%d\n",
          Capacity(), Waste(), Available(), pct);

-  if (!is_iterable_) return;
+  if (!swept_precisely_) return;
   ClearHistograms(heap()->isolate());
   HeapObjectIterator obj_it(this);
   for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next())
=======================================
--- /branches/bleeding_edge/src/spaces.h        Wed Jul 16 13:36:46 2014 UTC
+++ /branches/bleeding_edge/src/spaces.h        Fri Jul 18 11:04:20 2014 UTC
@@ -450,18 +450,17 @@
   intptr_t GetFlags() { return flags_; }


-  // PARALLEL_SWEEPING_DONE - The page state when sweeping is complete or
-  // sweeping must not be performed on that page.
-  // PARALLEL_SWEEPING_FINALIZE - A sweeper thread is done sweeping this
-  // page and will not touch the page memory anymore.
-  // PARALLEL_SWEEPING_IN_PROGRESS - This page is currently swept by a
-  // sweeper thread.
-  // PARALLEL_SWEEPING_PENDING - This page is ready for parallel sweeping.
+ // SWEEPING_DONE - The page state when sweeping is complete or sweeping must
+  // not be performed on that page.
+ // SWEEPING_FINALIZE - A sweeper thread is done sweeping this page and will
+  // not touch the page memory anymore.
+ // SWEEPING_IN_PROGRESS - This page is currently swept by a sweeper thread.
+  // SWEEPING_PENDING - This page is ready for parallel sweeping.
   enum ParallelSweepingState {
-    PARALLEL_SWEEPING_DONE,
-    PARALLEL_SWEEPING_FINALIZE,
-    PARALLEL_SWEEPING_IN_PROGRESS,
-    PARALLEL_SWEEPING_PENDING
+    SWEEPING_DONE,
+    SWEEPING_FINALIZE,
+    SWEEPING_IN_PROGRESS,
+    SWEEPING_PENDING
   };

   ParallelSweepingState parallel_sweeping() {
@@ -475,8 +474,8 @@

   bool TryParallelSweeping() {
     return base::Acquire_CompareAndSwap(
-               &parallel_sweeping_, PARALLEL_SWEEPING_PENDING,
-               PARALLEL_SWEEPING_IN_PROGRESS) == PARALLEL_SWEEPING_PENDING;
+        &parallel_sweeping_, SWEEPING_PENDING, SWEEPING_IN_PROGRESS) ==
+            SWEEPING_PENDING;
   }

   // Manage live byte count (count of bytes known to be live,
@@ -1917,8 +1916,8 @@
   static void ResetCodeStatistics(Isolate* isolate);
 #endif

-  bool is_iterable() { return is_iterable_; }
-  void set_is_iterable(bool b) { is_iterable_ = b; }
+  bool swept_precisely() { return swept_precisely_; }
+  void set_swept_precisely(bool b) { swept_precisely_ = b; }

   // Evacuation candidates are swept by evacuator.  Needs to return a valid
   // result before _and_ after evacuation has finished.
@@ -2002,7 +2001,7 @@
   AllocationInfo allocation_info_;

   // This space was swept precisely, hence it is iterable.
-  bool is_iterable_;
+  bool swept_precisely_;

   // The number of free bytes which could be reclaimed by advancing the
// concurrent sweeper threads. This is only an estimation because concurrent
=======================================
--- /branches/bleeding_edge/test/cctest/test-heap.cc Tue Jul 15 08:24:44 2014 UTC +++ /branches/bleeding_edge/test/cctest/test-heap.cc Fri Jul 18 11:04:20 2014 UTC
@@ -4321,7 +4321,7 @@
   CHECK(heap->InOldPointerSpace(o->elements()));
   CHECK(heap->InOldPointerSpace(*o));
   Page* page = Page::FromAddress(o->elements()->address());
- CHECK(page->parallel_sweeping() <= MemoryChunk::PARALLEL_SWEEPING_FINALIZE ||
+  CHECK(page->parallel_sweeping() <= MemoryChunk::SWEEPING_FINALIZE ||
         Marking::IsBlack(Marking::MarkBitFrom(o->elements())));
 }

--
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev
--- You received this message because you are subscribed to the Google Groups "v8-dev" group.
To unsubscribe from this group and stop receiving emails from it, send an email 
to [email protected].
For more options, visit https://groups.google.com/d/optout.

Reply via email to