- Revision
- 183959
- Author
- gga...@apple.com
- Date
- 2015-05-07 16:29:15 -0700 (Thu, 07 May 2015)
Log Message
Release assert in com.apple.WebKit.WebContent under _javascript_Core: JSC::JSONProtoFuncStringify
https://bugs.webkit.org/show_bug.cgi?id=144758
Reviewed by Andreas Kling.
This was an out-of-memory error when trying to shrink a string builder.
bmalloc was missing the optimization that allowed realloc() to shrink
without copying. So, let's add it.
* bmalloc/Allocator.cpp:
(bmalloc::Allocator::reallocate): Added Large and XLarge cases for
shrinking without copying. This isn't possible for small and medium
objects, and probably not very profitable, either.
* bmalloc/Heap.cpp:
(bmalloc::Heap::findXLarge):
(bmalloc::Heap::deallocateXLarge):
* bmalloc/Heap.h: Refactored this code to return a reference to an
XLarge range. This makes the code reusable, and also makes it easier
for realloc() to update metadata.
* bmalloc/LargeObject.h:
(bmalloc::LargeObject::split): Allow allocated objects to split because
that's what realloc() wants to do, and there's nothing intrinsically
wrong with it.
Modified Paths
Diff
Modified: trunk/Source/bmalloc/ChangeLog (183958 => 183959)
--- trunk/Source/bmalloc/ChangeLog 2015-05-07 23:08:53 UTC (rev 183958)
+++ trunk/Source/bmalloc/ChangeLog 2015-05-07 23:29:15 UTC (rev 183959)
@@ -1,3 +1,31 @@
+2015-05-07 Geoffrey Garen <gga...@apple.com>
+
+ Release assert in com.apple.WebKit.WebContent under _javascript_Core: JSC::JSONProtoFuncStringify
+ https://bugs.webkit.org/show_bug.cgi?id=144758
+
+ Reviewed by Andreas Kling.
+
+ This was an out-of-memory error when trying to shrink a string builder.
+ bmalloc was missing the optimization that allowed realloc() to shrink
+ without copying. So, let's add it.
+
+ * bmalloc/Allocator.cpp:
+ (bmalloc::Allocator::reallocate): Added Large and XLarge cases for
+ shrinking without copying. This isn't possible for small and medium
+ objects, and probably not very profitable, either.
+
+ * bmalloc/Heap.cpp:
+ (bmalloc::Heap::findXLarge):
+ (bmalloc::Heap::deallocateXLarge):
+ * bmalloc/Heap.h: Refactored this code to return a reference to an
+ XLarge range. This makes the code reusable, and also makes it easier
+ for realloc() to update metadata.
+
+ * bmalloc/LargeObject.h:
+ (bmalloc::LargeObject::split): Allow allocated objects to split because
+ that's what realloc() wants to do, and there's nothing intrinsically
+ wrong with it.
+
2015-05-07 Dan Bernstein <m...@apple.com>
<rdar://problem/19317140> [Xcode] Remove usage of AspenFamily.xcconfig in Source/
Modified: trunk/Source/bmalloc/bmalloc/Allocator.cpp (183958 => 183959)
--- trunk/Source/bmalloc/bmalloc/Allocator.cpp 2015-05-07 23:08:53 UTC (rev 183958)
+++ trunk/Source/bmalloc/bmalloc/Allocator.cpp 2015-05-07 23:29:15 UTC (rev 183959)
@@ -112,10 +112,6 @@
if (!m_isBmallocEnabled)
return realloc(object, newSize);
- void* result = allocate(newSize);
- if (!object)
- return result;
-
size_t oldSize = 0;
switch (objectType(object)) {
case Small: {
@@ -129,20 +125,48 @@
break;
}
case Large: {
- std::lock_guard<StaticMutex> lock(PerProcess<Heap>::mutex());
+ std::unique_lock<StaticMutex> lock(PerProcess<Heap>::mutex());
LargeObject largeObject(object);
oldSize = largeObject.size();
+
+ if (newSize < oldSize && newSize > mediumMax) {
+ newSize = roundUpToMultipleOf<largeAlignment>(newSize);
+ if (oldSize - newSize >= largeMin) {
+ std::pair<LargeObject, LargeObject> split = largeObject.split(newSize);
+
+ lock.unlock();
+ m_deallocator.deallocate(split.second.begin());
+ lock.lock();
+ }
+ return object;
+ }
break;
}
case XLarge: {
- std::lock_guard<StaticMutex> lock(PerProcess<Heap>::mutex());
- Range range = PerProcess<Heap>::getFastCase()->findXLarge(lock, object);
- RELEASE_BASSERT(range);
+ BASSERT(objectType(nullptr) == XLarge);
+ if (!object)
+ break;
+
+ std::unique_lock<StaticMutex> lock(PerProcess<Heap>::mutex());
+ Range& range = PerProcess<Heap>::getFastCase()->findXLarge(lock, object);
oldSize = range.size();
+
+ if (newSize < oldSize && newSize > largeMax) {
+ newSize = roundUpToMultipleOf<xLargeAlignment>(newSize);
+ if (oldSize - newSize >= xLargeAlignment) {
+ lock.unlock();
+ vmDeallocate(static_cast<char*>(object) + oldSize, oldSize - newSize);
+ lock.lock();
+
+ range = Range(object, newSize);
+ }
+ return object;
+ }
break;
}
}
+ void* result = allocate(newSize);
size_t copySize = std::min(oldSize, newSize);
memcpy(result, object, copySize);
m_deallocator.deallocate(object);
Modified: trunk/Source/bmalloc/bmalloc/Deallocator.cpp (183958 => 183959)
--- trunk/Source/bmalloc/bmalloc/Deallocator.cpp 2015-05-07 23:08:53 UTC (rev 183958)
+++ trunk/Source/bmalloc/bmalloc/Deallocator.cpp 2015-05-07 23:29:15 UTC (rev 183959)
@@ -99,6 +99,7 @@
return;
}
+ BASSERT(objectType(nullptr) == XLarge);
if (!object)
return;
Modified: trunk/Source/bmalloc/bmalloc/Heap.cpp (183958 => 183959)
--- trunk/Source/bmalloc/bmalloc/Heap.cpp 2015-05-07 23:08:53 UTC (rev 183958)
+++ trunk/Source/bmalloc/bmalloc/Heap.cpp 2015-05-07 23:29:15 UTC (rev 183959)
@@ -306,7 +306,7 @@
return result;
}
-Range Heap::findXLarge(std::lock_guard<StaticMutex>&, void* object)
+Range& Heap::findXLarge(std::unique_lock<StaticMutex>&, void* object)
{
for (auto& range : m_xLargeObjects) {
if (range.begin() != object)
@@ -314,23 +314,17 @@
return range;
}
- return Range();
+ RELEASE_BASSERT(false);
+ return *static_cast<Range*>(nullptr); // Silence compiler error.
}
void Heap::deallocateXLarge(std::unique_lock<StaticMutex>& lock, void* object)
{
- for (auto& range : m_xLargeObjects) {
- if (range.begin() != object)
- continue;
+ Range toDeallocate = m_xLargeObjects.pop(&findXLarge(lock, object));
- Range toDeallocate = m_xLargeObjects.pop(&range);
-
- lock.unlock();
- vmDeallocate(toDeallocate.begin(), toDeallocate.size());
- lock.lock();
-
- break;
- }
+ lock.unlock();
+ vmDeallocate(toDeallocate.begin(), toDeallocate.size());
+ lock.lock();
}
void* Heap::allocateLarge(std::lock_guard<StaticMutex>&, LargeObject& largeObject, size_t size)
Modified: trunk/Source/bmalloc/bmalloc/Heap.h (183958 => 183959)
--- trunk/Source/bmalloc/bmalloc/Heap.h 2015-05-07 23:08:53 UTC (rev 183958)
+++ trunk/Source/bmalloc/bmalloc/Heap.h 2015-05-07 23:29:15 UTC (rev 183959)
@@ -66,7 +66,7 @@
void* allocateXLarge(std::lock_guard<StaticMutex>&, size_t);
void* allocateXLarge(std::lock_guard<StaticMutex>&, size_t alignment, size_t);
void* tryAllocateXLarge(std::lock_guard<StaticMutex>&, size_t alignment, size_t);
- Range findXLarge(std::lock_guard<StaticMutex>&, void*);
+ Range& findXLarge(std::unique_lock<StaticMutex>&, void*);
void deallocateXLarge(std::unique_lock<StaticMutex>&, void*);
void scavenge(std::unique_lock<StaticMutex>&, std::chrono::milliseconds sleepDuration);
Modified: trunk/Source/bmalloc/bmalloc/LargeObject.h (183958 => 183959)
--- trunk/Source/bmalloc/bmalloc/LargeObject.h 2015-05-07 23:08:53 UTC (rev 183958)
+++ trunk/Source/bmalloc/bmalloc/LargeObject.h 2015-05-07 23:29:15 UTC (rev 183959)
@@ -206,8 +206,6 @@
inline std::pair<LargeObject, LargeObject> LargeObject::split(size_t size) const
{
- BASSERT(isFree());
-
Range split(begin(), size);
Range leftover = Range(split.end(), this->size() - size);
BASSERT(leftover.size() >= largeMin);