Revision: 5299
Author: sgje...@chromium.org
Date: Wed Aug 18 03:45:15 2010
Log: Tracks the maximum usage of executable memory allocated by V8 and allows the histogram data to be gathered and reported.

This patch is contains only the usage tracking logic from 3030048 (already LGTM'd). It does not implement the RWX Limit.

BUG=52122
TEST=Check the V8.ExecutableMemoryMax histogram in the Chrome about:histograms page

Review URL: http://codereview.chromium.org/3161015

http://code.google.com/p/v8/source/detail?r=5299

Modified:
 /branches/bleeding_edge/src/spaces-inl.h
 /branches/bleeding_edge/src/spaces.cc
 /branches/bleeding_edge/src/spaces.h

=======================================
--- /branches/bleeding_edge/src/spaces-inl.h    Fri Jun 11 10:03:19 2010
+++ /branches/bleeding_edge/src/spaces-inl.h    Wed Aug 18 03:45:15 2010
@@ -220,21 +220,22 @@


 void Page::FlipMeaningOfInvalidatedWatermarkFlag() {
-  watermark_invalidated_mark_ ^= WATERMARK_INVALIDATED;
+  watermark_invalidated_mark_ ^= 1 << WATERMARK_INVALIDATED;
 }


 bool Page::IsWatermarkValid() {
-  return (flags_ & WATERMARK_INVALIDATED) != watermark_invalidated_mark_;
+ return (flags_ & (1 << WATERMARK_INVALIDATED)) != watermark_invalidated_mark_;
 }


 void Page::InvalidateWatermark(bool value) {
   if (value) {
- flags_ = (flags_ & ~WATERMARK_INVALIDATED) | watermark_invalidated_mark_;
+    flags_ = (flags_ & ~(1 << WATERMARK_INVALIDATED)) |
+             watermark_invalidated_mark_;
   } else {
-    flags_ = (flags_ & ~WATERMARK_INVALIDATED) |
-             (watermark_invalidated_mark_ ^ WATERMARK_INVALIDATED);
+    flags_ = (flags_ & ~(1 << WATERMARK_INVALIDATED)) |
+             (watermark_invalidated_mark_ ^ (1 << WATERMARK_INVALIDATED));
   }

   ASSERT(IsWatermarkValid() == !value);
@@ -242,15 +243,15 @@


 bool Page::GetPageFlag(PageFlag flag) {
-  return (flags_ & flag) != 0;
+  return (flags_ & (1 << flag)) != 0;
 }


 void Page::SetPageFlag(PageFlag flag, bool value) {
   if (value) {
-    flags_ |= flag;
+    flags_ |= (1 << flag);
   } else {
-    flags_ &= ~flag;
+    flags_ &= ~(1 << flag);
   }
 }

@@ -288,6 +289,15 @@
 void Page::SetIsLargeObjectPage(bool is_large_object_page) {
   SetPageFlag(IS_NORMAL_PAGE, !is_large_object_page);
 }
+
+bool Page::IsPageExecutable() {
+  return GetPageFlag(IS_EXECUTABLE);
+}
+
+
+void Page::SetIsPageExecutable(bool is_page_executable) {
+  SetPageFlag(IS_EXECUTABLE, is_page_executable);
+}


// -----------------------------------------------------------------------------
=======================================
--- /branches/bleeding_edge/src/spaces.cc       Mon Jul 19 03:02:11 2010
+++ /branches/bleeding_edge/src/spaces.cc       Wed Aug 18 03:45:15 2010
@@ -41,7 +41,7 @@
          && (info).top <= (space).high()              \
          && (info).limit == (space).high())

-intptr_t Page::watermark_invalidated_mark_ = Page::WATERMARK_INVALIDATED;
+intptr_t Page::watermark_invalidated_mark_ = 1 << Page::WATERMARK_INVALIDATED;

// ----------------------------------------------------------------------------
 // HeapObjectIterator
@@ -266,6 +266,7 @@
 //
 int MemoryAllocator::capacity_   = 0;
 int MemoryAllocator::size_       = 0;
+int MemoryAllocator::size_executable_ = 0;

 VirtualMemory* MemoryAllocator::initial_chunk_ = NULL;

@@ -292,6 +293,8 @@
 }


+void *executable_memory_histogram = NULL;
+
 bool MemoryAllocator::Setup(int capacity) {
   capacity_ = RoundUp(capacity, Page::kPageSize);

@@ -308,6 +311,9 @@
   if (max_nof_chunks_ > kMaxNofChunks) return false;

   size_ = 0;
+  size_executable_ = 0;
+  executable_memory_histogram =
+ StatsTable::CreateHistogram("V8.ExecutableMemoryMax", 0, MB * 512, 50);
   ChunkInfo info;  // uninitialized element.
   for (int i = max_nof_chunks_ - 1; i >= 0; i--) {
     chunks_.Add(info);
@@ -353,6 +359,16 @@
   }
   int alloced = static_cast<int>(*allocated);
   size_ += alloced;
+
+  if (executable == EXECUTABLE) {
+    size_executable_ += alloced;
+    static int size_executable_max_observed_ = 0;
+    if (size_executable_max_observed_ < size_executable_) {
+      size_executable_max_observed_ = size_executable_;
+      StatsTable::AddHistogramSample(executable_memory_histogram,
+          size_executable_);
+    }
+  }
 #ifdef DEBUG
   ZapBlock(reinterpret_cast<Address>(mem), alloced);
 #endif
@@ -361,7 +377,9 @@
 }


-void MemoryAllocator::FreeRawMemory(void* mem, size_t length) {
+void MemoryAllocator::FreeRawMemory(void* mem,
+                                    size_t length,
+                                    Executability executable) {
 #ifdef DEBUG
   ZapBlock(reinterpret_cast<Address>(mem), length);
 #endif
@@ -372,6 +390,7 @@
   }
   Counters::memory_allocated.Decrement(static_cast<int>(length));
   size_ -= static_cast<int>(length);
+  if (executable == EXECUTABLE) size_executable_ -= length;
   ASSERT(size_ >= 0);
 }

@@ -425,7 +444,7 @@

   *allocated_pages = PagesInChunk(static_cast<Address>(chunk), chunk_size);
   if (*allocated_pages == 0) {
-    FreeRawMemory(chunk, chunk_size);
+    FreeRawMemory(chunk, chunk_size, owner->executable());
     LOG(DeleteEvent("PagedChunk", chunk));
     return Page::FromAddress(NULL);
   }
@@ -591,7 +610,7 @@
     Counters::memory_allocated.Decrement(static_cast<int>(c.size()));
   } else {
     LOG(DeleteEvent("PagedChunk", c.address()));
-    FreeRawMemory(c.address(), c.size());
+    FreeRawMemory(c.address(), c.size(), c.owner()->executable());
   }
   c.init(NULL, 0, NULL);
   Push(chunk_id);
@@ -2552,7 +2571,7 @@
   if (mem == NULL) return NULL;
   LOG(NewEvent("LargeObjectChunk", mem, *chunk_size));
   if (*chunk_size < requested) {
-    MemoryAllocator::FreeRawMemory(mem, *chunk_size);
+    MemoryAllocator::FreeRawMemory(mem, *chunk_size, executable);
     LOG(DeleteEvent("LargeObjectChunk", mem));
     return NULL;
   }
@@ -2590,7 +2609,12 @@
     LargeObjectChunk* chunk = first_chunk_;
     first_chunk_ = first_chunk_->next();
     LOG(DeleteEvent("LargeObjectChunk", chunk->address()));
-    MemoryAllocator::FreeRawMemory(chunk->address(), chunk->size());
+ Page* page = Page::FromAddress(RoundUp(chunk->address(), Page::kPageSize));
+    Executability executable =
+        page->IsPageExecutable() ? EXECUTABLE : NOT_EXECUTABLE;
+    MemoryAllocator::FreeRawMemory(chunk->address(),
+                                   chunk->size(),
+                                   executable);
   }

   size_ = 0;
@@ -2654,6 +2678,7 @@
   // low order bit should already be clear.
   ASSERT((chunk_size & 0x1) == 0);
   page->SetIsLargeObjectPage(true);
+  page->SetIsPageExecutable(executable);
   page->SetRegionMarks(Page::kAllRegionsCleanMarks);
   return HeapObject::FromAddress(object_address);
 }
@@ -2768,6 +2793,10 @@
       previous = current;
       current = current->next();
     } else {
+      Page* page = Page::FromAddress(RoundUp(current->address(),
+                                     Page::kPageSize));
+      Executability executable =
+          page->IsPageExecutable() ? EXECUTABLE : NOT_EXECUTABLE;
       Address chunk_address = current->address();
       size_t chunk_size = current->size();

@@ -2783,7 +2812,7 @@
       MarkCompactCollector::ReportDeleteIfNeeded(object);
       size_ -= static_cast<int>(chunk_size);
       page_count_--;
-      MemoryAllocator::FreeRawMemory(chunk_address, chunk_size);
+ MemoryAllocator::FreeRawMemory(chunk_address, chunk_size, executable);
       LOG(DeleteEvent("LargeObjectChunk", chunk_address));
     }
   }
=======================================
--- /branches/bleeding_edge/src/spaces.h        Fri Jun 11 10:03:19 2010
+++ /branches/bleeding_edge/src/spaces.h        Wed Aug 18 03:45:15 2010
@@ -197,6 +197,10 @@

   inline void SetIsLargeObjectPage(bool is_large_object_page);

+  inline bool IsPageExecutable();
+
+  inline void SetIsPageExecutable(bool is_page_executable);
+
   // Returns the offset of a given address to this page.
   INLINE(int Offset(Address a)) {
     int offset = static_cast<int>(a - address());
@@ -256,13 +260,16 @@
   STATIC_CHECK(kRegionSize == kPageSize / kBitsPerInt);

   enum PageFlag {
-    IS_NORMAL_PAGE = 1 << 0,
-    WAS_IN_USE_BEFORE_MC = 1 << 1,
+    IS_NORMAL_PAGE = 0,
+    WAS_IN_USE_BEFORE_MC,

// Page allocation watermark was bumped by preallocation during scavenge. // Correct watermark can be retrieved by CachedAllocationWatermark() method
-    WATERMARK_INVALIDATED = 1 << 2
+    WATERMARK_INVALIDATED,
+    IS_EXECUTABLE,
+    NUM_PAGE_FLAGS  // Must be last
   };
+  static const int kPageFlagMask = (1 << NUM_PAGE_FLAGS) - 1;

   // To avoid an additional WATERMARK_INVALIDATED flag clearing pass during
   // scavenge we just invalidate the watermark on each old space page after
@@ -291,7 +298,7 @@

   inline void ClearGCFields();

-  static const int kAllocationWatermarkOffsetShift = 3;
+ static const int kAllocationWatermarkOffsetShift = WATERMARK_INVALIDATED + 1;
   static const int kAllocationWatermarkOffsetBits  = kPageSizeBits + 1;
   static const uint32_t kAllocationWatermarkOffsetMask =
       ((1 << kAllocationWatermarkOffsetBits) - 1) <<
@@ -557,13 +564,18 @@
   static void* AllocateRawMemory(const size_t requested,
                                  size_t* allocated,
                                  Executability executable);
-  static void FreeRawMemory(void* buf, size_t length);
+  static void FreeRawMemory(void* buf,
+                            size_t length,
+                            Executability executable);

   // Returns the maximum available bytes of heaps.
static int Available() { return capacity_ < size_ ? 0 : capacity_ - size_; }

   // Returns allocated spaces in bytes.
   static int Size() { return size_; }
+
+  // Returns allocated executable spaces in bytes.
+  static int SizeExecutable() { return size_executable_; }

   // Returns maximum available bytes that the old space can have.
   static int MaxAvailable() {
@@ -628,6 +640,8 @@

   // Allocated space size in bytes.
   static int size_;
+  // Allocated executable space size in bytes.
+  static int size_executable_;

   // The initial chunk of virtual memory.
   static VirtualMemory* initial_chunk_;
@@ -2058,7 +2072,7 @@
   LargeObjectChunk* next() { return next_; }
   void set_next(LargeObjectChunk* chunk) { next_ = chunk; }

-  size_t size() { return size_; }
+  size_t size() { return size_ & ~Page::kPageFlagMask; }
   void set_size(size_t size_in_bytes) { size_ = size_in_bytes; }

   // Returns the object in this chunk.

--
v8-dev mailing list
v8-dev@googlegroups.com
http://groups.google.com/group/v8-dev

Reply via email to