Revision: 3004
Author: [email protected]
Date: Thu Oct  1 02:29:38 2009
Log: Allocate all executable code within a 2 GB code range.
Review URL: http://codereview.chromium.org/244022
http://code.google.com/p/v8/source/detail?r=3004

Modified:
  /branches/bleeding_edge/src/heap.cc
  /branches/bleeding_edge/src/heap.h
  /branches/bleeding_edge/src/spaces.cc
  /branches/bleeding_edge/src/spaces.h
  /branches/bleeding_edge/test/cctest/test-alloc.cc

=======================================
--- /branches/bleeding_edge/src/heap.cc Wed Sep 30 05:25:46 2009
+++ /branches/bleeding_edge/src/heap.cc Thu Oct  1 02:29:38 2009
@@ -77,14 +77,17 @@
  int Heap::semispace_size_  = 512*KB;
  int Heap::old_generation_size_ = 128*MB;
  int Heap::initial_semispace_size_ = 128*KB;
+size_t Heap::code_range_size_ = 0;
  #elif defined(V8_TARGET_ARCH_X64)
  int Heap::semispace_size_  = 16*MB;
  int Heap::old_generation_size_ = 1*GB;
  int Heap::initial_semispace_size_ = 1*MB;
+size_t Heap::code_range_size_ = V8_UINT64_C(2)*GB;
  #else
  int Heap::semispace_size_  = 8*MB;
  int Heap::old_generation_size_ = 512*MB;
  int Heap::initial_semispace_size_ = 512*KB;
+size_t Heap::code_range_size_ = 0;
  #endif

  GCCallback Heap::global_gc_prologue_callback_ = NULL;
@@ -1923,6 +1926,7 @@
    // Initialize the object
    HeapObject::cast(result)->set_map(code_map());
    Code* code = Code::cast(result);
+  ASSERT(!CodeRange::exists() || CodeRange::contains(code->address()));
    code->set_instruction_size(desc.instr_size);
    code->set_relocation_size(desc.reloc_size);
    code->set_sinfo_size(sinfo_size);
@@ -1967,6 +1971,7 @@
              obj_size);
    // Relocate the copy.
    Code* new_code = Code::cast(result);
+  ASSERT(!CodeRange::exists() || CodeRange::contains(code->address()));
    new_code->Relocate(new_addr - old_addr);
    return new_code;
  }
@@ -3214,6 +3219,14 @@

    // Initialize the code space, set its maximum capacity to the old
    // generation size. It needs executable memory.
+  // On 64-bit platform(s), we put all code objects in a 2 GB range of
+  // virtual address space, so that they can call each other with near  
calls.
+  if (code_range_size_ > 0) {
+    if (!CodeRange::Setup(code_range_size_)) {
+      return false;
+    }
+  }
+
    code_space_ =
        new OldSpace(old_generation_size_, CODE_SPACE, EXECUTABLE);
    if (code_space_ == NULL) return false;
=======================================
--- /branches/bleeding_edge/src/heap.h  Wed Sep 16 06:41:24 2009
+++ /branches/bleeding_edge/src/heap.h  Thu Oct  1 02:29:38 2009
@@ -887,6 +887,7 @@
    static int initial_semispace_size_;
    static int young_generation_size_;
    static int old_generation_size_;
+  static size_t code_range_size_;

    // For keeping track of how much data has survived
    // scavenge since last new space expansion.
=======================================
--- /branches/bleeding_edge/src/spaces.cc       Mon Sep 21 03:35:47 2009
+++ /branches/bleeding_edge/src/spaces.cc       Thu Oct  1 02:29:38 2009
@@ -144,6 +144,128 @@
  Page::RSetState Page::rset_state_ = Page::IN_USE;
  #endif

+//  
-----------------------------------------------------------------------------
+// CodeRange
+
+List<CodeRange::FreeBlock> CodeRange::free_list_(0);
+List<CodeRange::FreeBlock> CodeRange::allocation_list_(0);
+int CodeRange::current_allocation_block_index_ = 0;
+VirtualMemory* CodeRange::code_range_ = NULL;
+
+
+bool CodeRange::Setup(const size_t requested) {
+  ASSERT(code_range_ == NULL);
+
+  code_range_ = new VirtualMemory(requested);
+  CHECK(code_range_ != NULL);
+  if (!code_range_->IsReserved()) {
+    delete code_range_;
+    code_range_ = NULL;
+    return false;
+  }
+
+  // We are sure that we have mapped a block of requested addresses.
+  ASSERT(code_range_->size() == requested);
+  LOG(NewEvent("CodeRange", code_range_->address(), requested));
+  allocation_list_.Add(FreeBlock(code_range_->address(),  
code_range_->size()));
+  current_allocation_block_index_ = 0;
+  return true;
+}
+
+
+int CodeRange::CompareFreeBlockAddress(const FreeBlock* left,
+                                       const FreeBlock* right) {
+  // The entire point of CodeRange is that the difference between two
+  // addresses in the range can be represented as a signed 32-bit int,
+  // so the cast is semantically correct.
+  return static_cast<int>(left->start - right->start);
+}
+
+
+void CodeRange::GetNextAllocationBlock(size_t requested) {
+  for (current_allocation_block_index_++;
+       current_allocation_block_index_ < allocation_list_.length();
+       current_allocation_block_index_++) {
+    if (requested <=  
allocation_list_[current_allocation_block_index_].size) {
+      return;  // Found a large enough allocation block.
+    }
+  }
+
+  // Sort and merge the free blocks on the free list and the allocation  
list.
+  free_list_.AddAll(allocation_list_);
+  allocation_list_.Clear();
+  free_list_.Sort(&CompareFreeBlockAddress);
+  for (int i = 0; i < free_list_.length();) {
+    FreeBlock merged = free_list_[i];
+    i++;
+    // Add adjacent free blocks to the current merged block.
+    while (i < free_list_.length() &&
+           free_list_[i].start == merged.start + merged.size) {
+      merged.size += free_list_[i].size;
+      i++;
+    }
+    if (merged.size > 0) {
+      allocation_list_.Add(merged);
+    }
+  }
+  free_list_.Clear();
+
+  for (current_allocation_block_index_ = 0;
+       current_allocation_block_index_ < allocation_list_.length();
+       current_allocation_block_index_++) {
+    if (requested <=  
allocation_list_[current_allocation_block_index_].size) {
+      return;  // Found a large enough allocation block.
+    }
+  }
+
+  // Code range is full or too fragmented.
+  V8::FatalProcessOutOfMemory("CodeRange::GetNextAllocationBlock");
+}
+
+
+
+void* CodeRange::AllocateRawMemory(const size_t requested, size_t*  
allocated) {
+  ASSERT(current_allocation_block_index_ < allocation_list_.length());
+  if (requested > allocation_list_[current_allocation_block_index_].size) {
+    // Find an allocation block large enough.  This function call may
+    // call V8::FatalProcessOutOfMemory if it cannot find a large enough  
block.
+    GetNextAllocationBlock(requested);
+  }
+  // Commit the requested memory at the start of the current allocation  
block.
+  *allocated = RoundUp(requested, Page::kPageSize);
+  FreeBlock current = allocation_list_[current_allocation_block_index_];
+  if (*allocated >= current.size - Page::kPageSize) {
+    // Don't leave a small free block, useless for a large object or chunk.
+    *allocated = current.size;
+  }
+  ASSERT(*allocated <= current.size);
+  if (!code_range_->Commit(current.start, *allocated, true)) {
+    *allocated = 0;
+    return NULL;
+  }
+  allocation_list_[current_allocation_block_index_].start += *allocated;
+  allocation_list_[current_allocation_block_index_].size -= *allocated;
+  if (*allocated == current.size) {
+    GetNextAllocationBlock(0);  // This block is used up, get the next one.
+  }
+  return current.start;
+}
+
+
+void CodeRange::FreeRawMemory(void* address, size_t length) {
+  free_list_.Add(FreeBlock(address, length));
+  code_range_->Uncommit(address, length);
+}
+
+
+void CodeRange::TearDown() {
+    delete code_range_;  // Frees all memory in the virtual memory range.
+    code_range_ = NULL;
+    free_list_.Free();
+    allocation_list_.Free();
+}
+
+
  //  
-----------------------------------------------------------------------------
  // MemoryAllocator
  //
@@ -226,8 +348,12 @@
                                           size_t* allocated,
                                           Executability executable) {
    if (size_ + static_cast<int>(requested) > capacity_) return NULL;
-
-  void* mem = OS::Allocate(requested, allocated, executable == EXECUTABLE);
+  void* mem;
+  if (executable == EXECUTABLE  && CodeRange::exists()) {
+    mem = CodeRange::AllocateRawMemory(requested, allocated);
+  } else {
+    mem = OS::Allocate(requested, allocated, (executable == EXECUTABLE));
+  }
    int alloced = *allocated;
    size_ += alloced;
    Counters::memory_allocated.Increment(alloced);
@@ -236,7 +362,11 @@


  void MemoryAllocator::FreeRawMemory(void* mem, size_t length) {
-  OS::Free(mem, length);
+  if (CodeRange::contains(static_cast<Address>(mem))) {
+    CodeRange::FreeRawMemory(mem, length);
+  } else {
+    OS::Free(mem, length);
+  }
    Counters::memory_allocated.Decrement(length);
    size_ -= length;
    ASSERT(size_ >= 0);
=======================================
--- /branches/bleeding_edge/src/spaces.h        Sun Sep 13 23:39:54 2009
+++ /branches/bleeding_edge/src/spaces.h        Thu Oct  1 02:29:38 2009
@@ -314,6 +314,72 @@
  };


+//  
----------------------------------------------------------------------------
+// All heap objects containing executable code (code objects) must be  
allocated
+// from a 2 GB range of memory, so that they can call each other using  
32-bit
+// displacements.  This happens automatically on 32-bit platforms, where  
32-bit
+// displacements cover the entire 4GB virtual address space.  On 64-bit
+// platforms, we support this using the CodeRange object, which reserves  
and
+// manages a range of virtual memory.
+class CodeRange : public AllStatic {
+ public:
+  // Reserves a range of virtual memory, but does not commit any of it.
+  // Can only be called once, at heap initialization time.
+  // Returns false on failure.
+  static bool Setup(const size_t requested_size);
+
+  // Frees the range of virtual memory, and frees the data structures used  
to
+  // manage it.
+  static void TearDown();
+
+  static bool exists() { return code_range_ != NULL; }
+  static bool contains(Address address) {
+    if (code_range_ == NULL) return false;
+    Address start = static_cast<Address>(code_range_->address());
+    return start <= address && address < start + code_range_->size();
+  }
+
+  // Allocates a chunk of memory from the large-object portion of
+  // the code range.  On platforms with no separate code range, should
+  // not be called.
+  static void* AllocateRawMemory(const size_t requested, size_t*  
allocated);
+  static void FreeRawMemory(void* buf, size_t length);
+
+ private:
+  // The reserved range of virtual memory that all code objects are put in.
+  static VirtualMemory* code_range_;
+  // Plain old data class, just a struct plus a constructor.
+  class FreeBlock {
+   public:
+    FreeBlock(Address start_arg, size_t size_arg)
+        : start(start_arg), size(size_arg) {}
+    FreeBlock(void* start_arg, size_t size_arg)
+        : start(static_cast<Address>(start_arg)), size(size_arg) {}
+
+    Address start;
+    size_t size;
+  };
+
+  // Freed blocks of memory are added to the free list.  When the  
allocation
+  // list is exhausted, the free list is sorted and merged to make the new
+  // allocation list.
+  static List<FreeBlock> free_list_;
+  // Memory is allocated from the free blocks on the allocation list.
+  // The block at current_allocation_block_index_ is the current block.
+  static List<FreeBlock> allocation_list_;
+  static int current_allocation_block_index_;
+
+  // Finds a block on the allocation list that contains at least the
+  // requested amount of memory.  If none is found, sorts and merges
+  // the existing free memory blocks, and searches again.
+  // If none can be found, terminates V8 with FatalProcessOutOfMemory.
+  static void GetNextAllocationBlock(size_t requested);
+  // Compares the start addresses of two free blocks.
+  static int CompareFreeBlockAddress(const FreeBlock* left,
+                                     const FreeBlock* right);
+};
+
+
  //  
----------------------------------------------------------------------------
  // A space acquires chunks of memory from the operating system. The memory
  // allocator manages chunks for the paged heap spaces (old space and map
@@ -380,8 +446,9 @@
    // function returns an invalid page pointer (NULL). The caller must check
    // whether the returned page is valid (by calling Page::is_valid()).  It  
is
    // guaranteed that allocated pages have contiguous addresses.  The actual
-  // number of allocated page is returned in the output parameter
-  // allocated_pages.
+  // number of allocated pages is returned in the output parameter
+  // allocated_pages.  If the PagedSpace owner is executable and there is
+  // a code range, the pages are allocated from the code range.
    static Page* AllocatePages(int requested_pages, int* allocated_pages,
                               PagedSpace* owner);

@@ -395,6 +462,9 @@
    // Allocates and frees raw memory of certain size.
    // These are just thin wrappers around OS::Allocate and OS::Free,
    // but keep track of allocated bytes as part of heap.
+  // If the flag is EXECUTABLE and a code range exists, the requested
+  // memory is allocated from the code range.  If a code range exists
+  // and the freed memory is in it, the code range manages the freed  
memory.
    static void* AllocateRawMemory(const size_t requested,
                                   size_t* allocated,
                                   Executability executable);
=======================================
--- /branches/bleeding_edge/test/cctest/test-alloc.cc   Thu Oct 30 07:16:02  
2008
+++ /branches/bleeding_edge/test/cctest/test-alloc.cc   Thu Oct  1 02:29:38  
2009
@@ -144,3 +144,65 @@
    CHECK_EQ(42, result->Int32Value());
    env->Exit();
  }
+
+
+// CodeRange test.
+// Tests memory management in a CodeRange by allocating and freeing blocks,
+// using a pseudorandom generator to choose block sizes geometrically
+// distributed between 2 * Page::kPageSize and 2^5 + 1 * Page::kPageSize.
+// Ensure that the freed chunks are collected and reused by allocating (in
+// total) more than the size of the CodeRange.
+
+// This pseudorandom generator does not need to be particularly good.
+// Use the lower half of the V8::Random() generator.
+unsigned int Pseudorandom() {
+  static uint32_t lo = 2345;
+  lo = 18273 * (lo & 0xFFFF) + (lo >> 16);  // Provably not 0.
+  return lo & 0xFFFF;
+}
+
+
+// Plain old data class.  Represents a block of allocated memory.
+class Block {
+ public:
+  Block(void* base_arg, int size_arg)
+      : base(base_arg), size(size_arg) {}
+
+  void *base;
+  int size;
+};
+
+
+TEST(CodeRange) {
+  const int code_range_size = 16*MB;
+  CodeRange::Setup(code_range_size);
+  int current_allocated = 0;
+  int total_allocated = 0;
+  List<Block> blocks(1000);
+
+  while (total_allocated < 5 * code_range_size) {
+    if (current_allocated < code_range_size / 10) {
+      // Allocate a block.
+      // Geometrically distributed sizes, greater than Page::kPageSize.
+      size_t requested = (Page::kPageSize << (Pseudorandom() % 6)) +
+           Pseudorandom() % 5000 + 1;
+      size_t allocated = 0;
+      void* base = CodeRange::AllocateRawMemory(requested, &allocated);
+      blocks.Add(Block(base, allocated));
+      current_allocated += allocated;
+      total_allocated += allocated;
+    } else {
+      // Free a block.
+      int index = Pseudorandom() % blocks.length();
+      CodeRange::FreeRawMemory(blocks[index].base, blocks[index].size);
+      current_allocated -= blocks[index].size;
+      if (index < blocks.length() - 1) {
+        blocks[index] = blocks.RemoveLast();
+      } else {
+        blocks.RemoveLast();
+      }
+    }
+  }
+
+  CodeRange::TearDown();
+}

--~--~---------~--~----~------------~-------~--~----~
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev
-~----------~----~----~----~------~----~------~--~---

Reply via email to