Title: [240175] trunk/Source
Revision
240175
Author
keith_mil...@apple.com
Date
2019-01-18 14:48:22 -0800 (Fri, 18 Jan 2019)

Log Message

Gigacages should start allocations from a slide
https://bugs.webkit.org/show_bug.cgi?id=193523

Reviewed by Mark Lam.

Source/bmalloc:

This patch makes it so that Gigacage Heaps slide the start of the
cage by some random amount. We still ensure that there is always
at least 4/2GB, on MacOS/iOS respectively, of VA space available
for allocation.

Also, this patch changes some macros into constants since macros
are the devil.

* bmalloc/Gigacage.cpp:
(Gigacage::bmalloc::protectGigacageBasePtrs):
(Gigacage::bmalloc::unprotectGigacageBasePtrs):
(Gigacage::bmalloc::runwaySize):
(Gigacage::ensureGigacage):
(Gigacage::shouldBeEnabled):
* bmalloc/Gigacage.h:
(Gigacage::name):
(Gigacage::gigacageSizeToMask):
(Gigacage::size):
(Gigacage::mask):
(Gigacage::basePtr):
(Gigacage::ensureGigacage):
(Gigacage::wasEnabled):
(Gigacage::isCaged):
(Gigacage::isEnabled):
(Gigacage::caged):
(Gigacage::disableDisablingPrimitiveGigacageIfShouldBeEnabled):
(Gigacage::canPrimitiveGigacageBeDisabled):
(Gigacage::disablePrimitiveGigacage):
(Gigacage::addPrimitiveDisableCallback):
(Gigacage::removePrimitiveDisableCallback):
* bmalloc/Heap.cpp:
(bmalloc::Heap::Heap):
* bmalloc/Sizes.h:
(bmalloc::Sizes::maskSizeClass):
(bmalloc::Sizes::maskObjectSize):
(bmalloc::Sizes::logSizeClass):
(bmalloc::Sizes::logObjectSize):
(bmalloc::Sizes::sizeClass):
(bmalloc::Sizes::objectSize):
(bmalloc::Sizes::pageSize):

Source/_javascript_Core:

This patch changes some macros into constants since macros are the
devil.

* ftl/FTLLowerDFGToB3.cpp:
(JSC::FTL::DFG::LowerDFGToB3::caged):
* llint/LowLevelInterpreter64.asm:

Source/WTF:

This patch changes some macros into constants since macros are the
devil.

* wtf/Gigacage.cpp:
* wtf/Gigacage.h:

Modified Paths

Diff

Modified: trunk/Source/_javascript_Core/ChangeLog (240174 => 240175)


--- trunk/Source/_javascript_Core/ChangeLog	2019-01-18 22:26:37 UTC (rev 240174)
+++ trunk/Source/_javascript_Core/ChangeLog	2019-01-18 22:48:22 UTC (rev 240175)
@@ -1,3 +1,17 @@
+2019-01-18  Keith Miller  <keith_mil...@apple.com>
+
+        Gigacages should start allocations from a slide
+        https://bugs.webkit.org/show_bug.cgi?id=193523
+
+        Reviewed by Mark Lam.
+
+        This patch changes some macros into constants since macros are the
+        devil.
+
+        * ftl/FTLLowerDFGToB3.cpp:
+        (JSC::FTL::DFG::LowerDFGToB3::caged):
+        * llint/LowLevelInterpreter64.asm:
+
 2019-01-18  Matt Lewis  <jlew...@apple.com>
 
         Unreviewed, rolling out r240160.

Modified: trunk/Source/_javascript_Core/ftl/FTLLowerDFGToB3.cpp (240174 => 240175)


--- trunk/Source/_javascript_Core/ftl/FTLLowerDFGToB3.cpp	2019-01-18 22:26:37 UTC (rev 240174)
+++ trunk/Source/_javascript_Core/ftl/FTLLowerDFGToB3.cpp	2019-01-18 22:48:22 UTC (rev 240175)
@@ -13852,6 +13852,7 @@
     
     LValue caged(Gigacage::Kind kind, LValue ptr)
     {
+#if GIGACAGE_ENABLED
         if (!Gigacage::isEnabled(kind))
             return ptr;
         
@@ -13880,6 +13881,9 @@
         // and possibly other smart things if we want to be able to remove this opaque.
         // https://bugs.webkit.org/show_bug.cgi?id=175493
         return m_out.opaque(result);
+#else
+        return ptr;
+#endif
     }
     
     void buildSwitch(SwitchData* data, LType type, LValue switchValue)

Modified: trunk/Source/_javascript_Core/llint/LowLevelInterpreter64.asm (240174 => 240175)


--- trunk/Source/_javascript_Core/llint/LowLevelInterpreter64.asm	2019-01-18 22:26:37 UTC (rev 240174)
+++ trunk/Source/_javascript_Core/llint/LowLevelInterpreter64.asm	2019-01-18 22:48:22 UTC (rev 240175)
@@ -1315,7 +1315,7 @@
     arrayProfile(OpGetById::Metadata::m_modeMetadata.arrayLengthMode.arrayProfile, t0, t2, t5)
     btiz t0, IsArray, .opGetByIdSlow
     btiz t0, IndexingShapeMask, .opGetByIdSlow
-    loadCaged(_g_gigacageBasePtrs + Gigacage::BasePtrs::jsValue, constexpr JSVALUE_GIGACAGE_MASK, JSObject::m_butterfly[t3], t0, t1)
+    loadCaged(_g_gigacageBasePtrs + Gigacage::BasePtrs::jsValue, constexpr Gigacage::jsValueGigacageMask, JSObject::m_butterfly[t3], t0, t1)
     loadi -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t0], t0
     bilt t0, 0, .opGetByIdSlow
     orq tagTypeNumber, t0
@@ -1438,7 +1438,7 @@
     loadConstantOrVariableInt32(size, t3, t1, .opGetByValSlow)
     sxi2q t1, t1
 
-    loadCaged(_g_gigacageBasePtrs + Gigacage::BasePtrs::jsValue, constexpr JSVALUE_GIGACAGE_MASK, JSObject::m_butterfly[t0], t3, tagTypeNumber)
+    loadCaged(_g_gigacageBasePtrs + Gigacage::BasePtrs::jsValue, constexpr Gigacage::jsValueGigacageMask, JSObject::m_butterfly[t0], t3, tagTypeNumber)
     move TagTypeNumber, tagTypeNumber
 
     andi IndexingShapeMask, t2
@@ -1504,7 +1504,7 @@
     bia t2, Int8ArrayType - FirstTypedArrayType, .opGetByValUint8ArrayOrUint8ClampedArray
 
     # We have Int8ArrayType.
-    loadCaged(_g_gigacageBasePtrs + Gigacage::BasePtrs::primitive, constexpr PRIMITIVE_GIGACAGE_MASK, JSArrayBufferView::m_vector[t0], t3, t2)
+    loadCaged(_g_gigacageBasePtrs + Gigacage::BasePtrs::primitive, constexpr Gigacage::primitiveGigacageMask, JSArrayBufferView::m_vector[t0], t3, t2)
     loadbs [t3, t1], t0
     finishIntGetByVal(t0, t1)
 
@@ -1512,13 +1512,13 @@
     bia t2, Uint8ArrayType - FirstTypedArrayType, .opGetByValUint8ClampedArray
 
     # We have Uint8ArrayType.
-    loadCaged(_g_gigacageBasePtrs + Gigacage::BasePtrs::primitive, constexpr PRIMITIVE_GIGACAGE_MASK, JSArrayBufferView::m_vector[t0], t3, t2)
+    loadCaged(_g_gigacageBasePtrs + Gigacage::BasePtrs::primitive, constexpr Gigacage::primitiveGigacageMask, JSArrayBufferView::m_vector[t0], t3, t2)
     loadb [t3, t1], t0
     finishIntGetByVal(t0, t1)
 
 .opGetByValUint8ClampedArray:
     # We have Uint8ClampedArrayType.
-    loadCaged(_g_gigacageBasePtrs + Gigacage::BasePtrs::primitive, constexpr PRIMITIVE_GIGACAGE_MASK, JSArrayBufferView::m_vector[t0], t3, t2)
+    loadCaged(_g_gigacageBasePtrs + Gigacage::BasePtrs::primitive, constexpr Gigacage::primitiveGigacageMask, JSArrayBufferView::m_vector[t0], t3, t2)
     loadb [t3, t1], t0
     finishIntGetByVal(t0, t1)
 
@@ -1527,13 +1527,13 @@
     bia t2, Int16ArrayType - FirstTypedArrayType, .opGetByValUint16Array
 
     # We have Int16ArrayType.
-    loadCaged(_g_gigacageBasePtrs + Gigacage::BasePtrs::primitive, constexpr PRIMITIVE_GIGACAGE_MASK, JSArrayBufferView::m_vector[t0], t3, t2)
+    loadCaged(_g_gigacageBasePtrs + Gigacage::BasePtrs::primitive, constexpr Gigacage::primitiveGigacageMask, JSArrayBufferView::m_vector[t0], t3, t2)
     loadhs [t3, t1, 2], t0
     finishIntGetByVal(t0, t1)
 
 .opGetByValUint16Array:
     # We have Uint16ArrayType.
-    loadCaged(_g_gigacageBasePtrs + Gigacage::BasePtrs::primitive, constexpr PRIMITIVE_GIGACAGE_MASK, JSArrayBufferView::m_vector[t0], t3, t2)
+    loadCaged(_g_gigacageBasePtrs + Gigacage::BasePtrs::primitive, constexpr Gigacage::primitiveGigacageMask, JSArrayBufferView::m_vector[t0], t3, t2)
     loadh [t3, t1, 2], t0
     finishIntGetByVal(t0, t1)
 
@@ -1545,13 +1545,13 @@
     bia t2, Int32ArrayType - FirstTypedArrayType, .opGetByValUint32Array
 
     # We have Int32ArrayType.
-    loadCaged(_g_gigacageBasePtrs + Gigacage::BasePtrs::primitive, constexpr PRIMITIVE_GIGACAGE_MASK, JSArrayBufferView::m_vector[t0], t3, t2)
+    loadCaged(_g_gigacageBasePtrs + Gigacage::BasePtrs::primitive, constexpr Gigacage::primitiveGigacageMask, JSArrayBufferView::m_vector[t0], t3, t2)
     loadi [t3, t1, 4], t0
     finishIntGetByVal(t0, t1)
 
 .opGetByValUint32Array:
     # We have Uint32ArrayType.
-    loadCaged(_g_gigacageBasePtrs + Gigacage::BasePtrs::primitive, constexpr PRIMITIVE_GIGACAGE_MASK, JSArrayBufferView::m_vector[t0], t3, t2)
+    loadCaged(_g_gigacageBasePtrs + Gigacage::BasePtrs::primitive, constexpr Gigacage::primitiveGigacageMask, JSArrayBufferView::m_vector[t0], t3, t2)
     # This is the hardest part because of large unsigned values.
     loadi [t3, t1, 4], t0
     bilt t0, 0, .opGetByValSlow # This case is still awkward to implement in LLInt.
@@ -1563,7 +1563,7 @@
     bieq t2, Float32ArrayType - FirstTypedArrayType, .opGetByValSlow
 
     # We have Float64ArrayType.
-    loadCaged(_g_gigacageBasePtrs + Gigacage::BasePtrs::primitive, constexpr PRIMITIVE_GIGACAGE_MASK, JSArrayBufferView::m_vector[t0], t3, t2)
+    loadCaged(_g_gigacageBasePtrs + Gigacage::BasePtrs::primitive, constexpr Gigacage::primitiveGigacageMask, JSArrayBufferView::m_vector[t0], t3, t2)
     loadd [t3, t1, 8], ft0
     bdnequn ft0, ft0, .opGetByValSlow
     finishDoubleGetByVal(ft0, t0, t1)
@@ -1599,7 +1599,7 @@
         get(m_property, t0)
         loadConstantOrVariableInt32(size, t0, t3, .opPutByValSlow)
         sxi2q t3, t3
-        loadCaged(_g_gigacageBasePtrs + Gigacage::BasePtrs::jsValue, constexpr JSVALUE_GIGACAGE_MASK, JSObject::m_butterfly[t1], t0, tagTypeNumber)
+        loadCaged(_g_gigacageBasePtrs + Gigacage::BasePtrs::jsValue, constexpr Gigacage::jsValueGigacageMask, JSObject::m_butterfly[t1], t0, tagTypeNumber)
         move TagTypeNumber, tagTypeNumber
         btinz t2, CopyOnWrite, .opPutByValSlow
         andi IndexingShapeMask, t2

Modified: trunk/Source/WTF/ChangeLog (240174 => 240175)


--- trunk/Source/WTF/ChangeLog	2019-01-18 22:26:37 UTC (rev 240174)
+++ trunk/Source/WTF/ChangeLog	2019-01-18 22:48:22 UTC (rev 240175)
@@ -1,3 +1,16 @@
+2019-01-18  Keith Miller  <keith_mil...@apple.com>
+
+        Gigacages should start allocations from a slide
+        https://bugs.webkit.org/show_bug.cgi?id=193523
+
+        Reviewed by Mark Lam.
+
+        This patch changes some macros into constants since macros are the
+        devil.
+
+        * wtf/Gigacage.cpp:
+        * wtf/Gigacage.h:
+
 2019-01-18  Matt Lewis  <jlew...@apple.com>
 
         Unreviewed, rolling out r240160.

Modified: trunk/Source/WTF/wtf/Gigacage.cpp (240174 => 240175)


--- trunk/Source/WTF/wtf/Gigacage.cpp	2019-01-18 22:26:37 UTC (rev 240174)
+++ trunk/Source/WTF/wtf/Gigacage.cpp	2019-01-18 22:48:22 UTC (rev 240175)
@@ -32,10 +32,10 @@
 
 #if defined(USE_SYSTEM_MALLOC) && USE_SYSTEM_MALLOC
 
-alignas(void*) char g_gigacageBasePtrs[GIGACAGE_BASE_PTRS_SIZE];
-
 namespace Gigacage {
 
+alignas(void*) char g_gigacageBasePtrs[gigacageBasePtrsSize];
+
 void* tryMalloc(Kind, size_t size)
 {
     return FastMalloc::tryMalloc(size);
@@ -61,7 +61,7 @@
 }
 
 } // namespace Gigacage
-#else
+#else // defined(USE_SYSTEM_MALLOC) && USE_SYSTEM_MALLOC
 #include <bmalloc/bmalloc.h>
 
 namespace Gigacage {

Modified: trunk/Source/WTF/wtf/Gigacage.h (240174 => 240175)


--- trunk/Source/WTF/wtf/Gigacage.h	2019-01-18 22:26:37 UTC (rev 240174)
+++ trunk/Source/WTF/wtf/Gigacage.h	2019-01-18 22:48:22 UTC (rev 240175)
@@ -26,19 +26,19 @@
 #pragma once
 
 #include <wtf/FastMalloc.h>
+#include <wtf/StdLibExtras.h>
 
 #if defined(USE_SYSTEM_MALLOC) && USE_SYSTEM_MALLOC
 #define GIGACAGE_ENABLED 0
-#define PRIMITIVE_GIGACAGE_MASK 0
-#define JSVALUE_GIGACAGE_MASK 0
-#define GIGACAGE_BASE_PTRS_SIZE 8192
 
-extern "C" {
-alignas(void*) extern WTF_EXPORT_PRIVATE char g_gigacageBasePtrs[GIGACAGE_BASE_PTRS_SIZE];
-}
-
 namespace Gigacage {
 
+const size_t primitiveGigacageMask = 0;
+const size_t jsValueGigacageMask = 0;
+const size_t gigacageBasePtrsSize = 8 * KB;
+
+extern "C" alignas(void*) WTF_EXPORT_PRIVATE char g_gigacageBasePtrs[gigacageBasePtrsSize];
+
 struct BasePtrs {
     uintptr_t reservedForFlags;
     void* primitive;

Modified: trunk/Source/bmalloc/ChangeLog (240174 => 240175)


--- trunk/Source/bmalloc/ChangeLog	2019-01-18 22:26:37 UTC (rev 240174)
+++ trunk/Source/bmalloc/ChangeLog	2019-01-18 22:48:22 UTC (rev 240175)
@@ -1,3 +1,51 @@
+2019-01-18  Keith Miller  <keith_mil...@apple.com>
+
+        Gigacages should start allocations from a slide
+        https://bugs.webkit.org/show_bug.cgi?id=193523
+
+        Reviewed by Mark Lam.
+
+        This patch makes it so that Gigacage Heaps slide the start of the
+        cage by some random amount. We still ensure that there is always
+        at least 4/2GB, on MacOS/iOS respectively, of VA space available
+        for allocation.
+
+        Also, this patch changes some macros into constants since macros
+        are the devil.
+
+        * bmalloc/Gigacage.cpp:
+        (Gigacage::bmalloc::protectGigacageBasePtrs):
+        (Gigacage::bmalloc::unprotectGigacageBasePtrs):
+        (Gigacage::bmalloc::runwaySize):
+        (Gigacage::ensureGigacage):
+        (Gigacage::shouldBeEnabled):
+        * bmalloc/Gigacage.h:
+        (Gigacage::name):
+        (Gigacage::gigacageSizeToMask):
+        (Gigacage::size):
+        (Gigacage::mask):
+        (Gigacage::basePtr):
+        (Gigacage::ensureGigacage):
+        (Gigacage::wasEnabled):
+        (Gigacage::isCaged):
+        (Gigacage::isEnabled):
+        (Gigacage::caged):
+        (Gigacage::disableDisablingPrimitiveGigacageIfShouldBeEnabled):
+        (Gigacage::canPrimitiveGigacageBeDisabled):
+        (Gigacage::disablePrimitiveGigacage):
+        (Gigacage::addPrimitiveDisableCallback):
+        (Gigacage::removePrimitiveDisableCallback):
+        * bmalloc/Heap.cpp:
+        (bmalloc::Heap::Heap):
+        * bmalloc/Sizes.h:
+        (bmalloc::Sizes::maskSizeClass):
+        (bmalloc::Sizes::maskObjectSize):
+        (bmalloc::Sizes::logSizeClass):
+        (bmalloc::Sizes::logObjectSize):
+        (bmalloc::Sizes::sizeClass):
+        (bmalloc::Sizes::objectSize):
+        (bmalloc::Sizes::pageSize):
+
 2019-01-18  Matt Lewis  <jlew...@apple.com>
 
         Unreviewed, rolling out r240160.

Modified: trunk/Source/bmalloc/bmalloc/Gigacage.cpp (240174 => 240175)


--- trunk/Source/bmalloc/bmalloc/Gigacage.cpp	2019-01-18 22:26:37 UTC (rev 240174)
+++ trunk/Source/bmalloc/bmalloc/Gigacage.cpp	2019-01-18 22:48:22 UTC (rev 240175)
@@ -35,23 +35,25 @@
 #include <cstdio>
 #include <mutex>
 
+#if GIGACAGE_ENABLED
+
+namespace Gigacage {
+
 // This is exactly 32GB because inside JSC, indexed accesses for arrays, typed arrays, etc,
 // use unsigned 32-bit ints as indices. The items those indices access are 8 bytes or less
 // in size. 2^32 * 8 = 32GB. This means if an access on a caged type happens to go out of
 // bounds, the access is guaranteed to land somewhere else in the cage or inside the runway.
 // If this were less than 32GB, those OOB accesses could reach outside of the cage.
-#define GIGACAGE_RUNWAY (32llu * 1024 * 1024 * 1024)
+constexpr size_t gigacageRunway = 32llu * 1024 * 1024 * 1024;
 
 // Note: g_gigacageBasePtrs[0] is reserved for storing the wasEnabled flag.
 // The first gigacageBasePtr will start at g_gigacageBasePtrs[sizeof(void*)].
 // This is done so that the wasEnabled flag will also be protected along with the
 // gigacageBasePtrs.
-alignas(GIGACAGE_BASE_PTRS_SIZE) char g_gigacageBasePtrs[GIGACAGE_BASE_PTRS_SIZE];
+alignas(gigacageBasePtrsSize) char g_gigacageBasePtrs[gigacageBasePtrsSize];
 
 using namespace bmalloc;
 
-namespace Gigacage {
-
 namespace {
 
 bool s_isDisablingPrimitiveGigacageDisabled;
@@ -61,12 +63,12 @@
     uintptr_t basePtrs = reinterpret_cast<uintptr_t>(g_gigacageBasePtrs);
     // We might only get page size alignment, but that's also the minimum we need.
     RELEASE_BASSERT(!(basePtrs & (vmPageSize() - 1)));
-    mprotect(g_gigacageBasePtrs, GIGACAGE_BASE_PTRS_SIZE, PROT_READ);
+    mprotect(g_gigacageBasePtrs, gigacageBasePtrsSize, PROT_READ);
 }
 
 void unprotectGigacageBasePtrs()
 {
-    mprotect(g_gigacageBasePtrs, GIGACAGE_BASE_PTRS_SIZE, PROT_READ | PROT_WRITE);
+    mprotect(g_gigacageBasePtrs, gigacageBasePtrsSize, PROT_READ | PROT_WRITE);
 }
 
 class UnprotectGigacageBasePtrsScope {
@@ -101,7 +103,6 @@
     Vector<Callback> callbacks;
 };
 
-#if GIGACAGE_ENABLED
 size_t runwaySize(Kind kind)
 {
     switch (kind) {
@@ -108,19 +109,17 @@
     case Kind::ReservedForFlagsAndNotABasePtr:
         RELEASE_BASSERT_NOT_REACHED();
     case Kind::Primitive:
-        return static_cast<size_t>(GIGACAGE_RUNWAY);
+        return gigacageRunway;
     case Kind::JSValue:
-        return static_cast<size_t>(0);
+        return 0;
     }
-    return static_cast<size_t>(0);
+    return 0;
 }
-#endif
 
 } // anonymous namespace
 
 void ensureGigacage()
 {
-#if GIGACAGE_ENABLED
     static std::once_flag onceFlag;
     std::call_once(
         onceFlag,
@@ -189,7 +188,6 @@
             setWasEnabled();
             protectGigacageBasePtrs();
         });
-#endif // GIGACAGE_ENABLED
 }
 
 void disablePrimitiveGigacage()
@@ -265,8 +263,6 @@
 bool shouldBeEnabled()
 {
     static bool cached = false;
-
-#if GIGACAGE_ENABLED
     static std::once_flag onceFlag;
     std::call_once(
         onceFlag,
@@ -288,12 +284,11 @@
             
             cached = true;
         });
-#endif // GIGACAGE_ENABLED
-    
     return cached;
 }
 
 } // namespace Gigacage
 
+#endif // GIGACAGE_ENABLED
 
 

Modified: trunk/Source/bmalloc/bmalloc/Gigacage.h (240174 => 240175)


--- trunk/Source/bmalloc/bmalloc/Gigacage.h	2019-01-18 22:26:37 UTC (rev 240174)
+++ trunk/Source/bmalloc/bmalloc/Gigacage.h	2019-01-18 22:48:22 UTC (rev 240175)
@@ -30,16 +30,53 @@
 #include "BExport.h"
 #include "BInline.h"
 #include "BPlatform.h"
+#include "Sizes.h"
 #include <cstddef>
 #include <inttypes.h>
 
+#if ((BOS(DARWIN) || BOS(LINUX)) && \
+(BCPU(X86_64) || (BCPU(ARM64) && !defined(__ILP32__) && (!BPLATFORM(IOS_FAMILY) || BPLATFORM(IOS)))))
+#define GIGACAGE_ENABLED 1
+#else
+#define GIGACAGE_ENABLED 0
+#endif
+
+
+namespace Gigacage {
+
+enum Kind {
+    ReservedForFlagsAndNotABasePtr = 0,
+    Primitive,
+    JSValue,
+};
+
+BINLINE const char* name(Kind kind)
+{
+    switch (kind) {
+    case ReservedForFlagsAndNotABasePtr:
+        RELEASE_BASSERT_NOT_REACHED();
+    case Primitive:
+        return "Primitive";
+    case JSValue:
+        return "JSValue";
+    }
+    BCRASH();
+    return nullptr;
+}
+
+#if GIGACAGE_ENABLED
+
 #if BCPU(ARM64)
-#define PRIMITIVE_GIGACAGE_SIZE 0x80000000llu
-#define JSVALUE_GIGACAGE_SIZE 0x40000000llu
+constexpr size_t primitiveGigacageSize = 2 * bmalloc::Sizes::GB;
+constexpr size_t jsValueGigacageSize = 1 * bmalloc::Sizes::GB;
+constexpr size_t gigacageBasePtrsSize = 16 * bmalloc::Sizes::kB;
+constexpr size_t minimumCageSizeAfterSlide = bmalloc::Sizes::GB / 2;
 #define GIGACAGE_ALLOCATION_CAN_FAIL 1
 #else
-#define PRIMITIVE_GIGACAGE_SIZE 0x800000000llu
-#define JSVALUE_GIGACAGE_SIZE 0x400000000llu
+constexpr size_t primitiveGigacageSize = 32 * bmalloc::Sizes::GB;
+constexpr size_t jsValueGigacageSize = 16 * bmalloc::Sizes::GB;
+constexpr size_t gigacageBasePtrsSize = 4 * bmalloc::Sizes::kB;
+constexpr size_t minimumCageSizeAfterSlide = 4 * bmalloc::Sizes::GB;
 #define GIGACAGE_ALLOCATION_CAN_FAIL 0
 #endif
 
@@ -50,31 +87,19 @@
 #define GIGACAGE_ALLOCATION_CAN_FAIL 1
 #endif
 
-static_assert(bmalloc::isPowerOfTwo(PRIMITIVE_GIGACAGE_SIZE), "");
-static_assert(bmalloc::isPowerOfTwo(JSVALUE_GIGACAGE_SIZE), "");
 
-#define GIGACAGE_SIZE_TO_MASK(size) ((size) - 1)
+static_assert(bmalloc::isPowerOfTwo(primitiveGigacageSize), "");
+static_assert(bmalloc::isPowerOfTwo(jsValueGigacageSize), "");
+static_assert(primitiveGigacageSize > minimumCageSizeAfterSlide, "");
+static_assert(jsValueGigacageSize > minimumCageSizeAfterSlide, "");
 
-#define PRIMITIVE_GIGACAGE_MASK GIGACAGE_SIZE_TO_MASK(PRIMITIVE_GIGACAGE_SIZE)
-#define JSVALUE_GIGACAGE_MASK GIGACAGE_SIZE_TO_MASK(JSVALUE_GIGACAGE_SIZE)
+constexpr size_t gigacageSizeToMask(size_t size) { return size - 1; }
 
-#if ((BOS(DARWIN) || BOS(LINUX)) && \
-    (BCPU(X86_64) || (BCPU(ARM64) && !defined(__ILP32__) && (!BPLATFORM(IOS_FAMILY) || BPLATFORM(IOS)))))
-#define GIGACAGE_ENABLED 1
-#else
-#define GIGACAGE_ENABLED 0
-#endif
+constexpr size_t primitiveGigacageMask = gigacageSizeToMask(primitiveGigacageSize);
+constexpr size_t jsValueGigacageMask = gigacageSizeToMask(jsValueGigacageSize);
 
-#if BCPU(ARM64)
-#define GIGACAGE_BASE_PTRS_SIZE 16384
-#else
-#define GIGACAGE_BASE_PTRS_SIZE 4096
-#endif
+extern "C" alignas(gigacageBasePtrsSize) BEXPORT char g_gigacageBasePtrs[gigacageBasePtrsSize];
 
-extern "C" alignas(GIGACAGE_BASE_PTRS_SIZE) BEXPORT char g_gigacageBasePtrs[GIGACAGE_BASE_PTRS_SIZE];
-
-namespace Gigacage {
-
 BINLINE bool wasEnabled() { return g_gigacageBasePtrs[0]; }
 BINLINE void setWasEnabled() { g_gigacageBasePtrs[0] = true; }
 
@@ -84,16 +109,10 @@
     void* jsValue;
 };
 
-enum Kind {
-    ReservedForFlagsAndNotABasePtr = 0,
-    Primitive,
-    JSValue,
-};
-
 static_assert(offsetof(BasePtrs, primitive) == Kind::Primitive * sizeof(void*), "");
 static_assert(offsetof(BasePtrs, jsValue) == Kind::JSValue * sizeof(void*), "");
 
-static constexpr unsigned numKinds = 2;
+constexpr unsigned numKinds = 2;
 
 BEXPORT void ensureGigacage();
 
@@ -109,20 +128,6 @@
 inline bool isPrimitiveGigacagePermanentlyEnabled() { return isDisablingPrimitiveGigacageDisabled(); }
 inline bool canPrimitiveGigacageBeDisabled() { return !isDisablingPrimitiveGigacageDisabled(); }
 
-BINLINE const char* name(Kind kind)
-{
-    switch (kind) {
-    case ReservedForFlagsAndNotABasePtr:
-        RELEASE_BASSERT_NOT_REACHED();
-    case Primitive:
-        return "Primitive";
-    case JSValue:
-        return "JSValue";
-    }
-    BCRASH();
-    return nullptr;
-}
-
 BINLINE void*& basePtr(BasePtrs& basePtrs, Kind kind)
 {
     switch (kind) {
@@ -158,9 +163,9 @@
     case ReservedForFlagsAndNotABasePtr:
         RELEASE_BASSERT_NOT_REACHED();
     case Primitive:
-        return static_cast<size_t>(PRIMITIVE_GIGACAGE_SIZE);
+        return static_cast<size_t>(primitiveGigacageSize);
     case JSValue:
-        return static_cast<size_t>(JSVALUE_GIGACAGE_SIZE);
+        return static_cast<size_t>(jsValueGigacageSize);
     }
     BCRASH();
     return 0;
@@ -173,7 +178,7 @@
 
 BINLINE size_t mask(Kind kind)
 {
-    return GIGACAGE_SIZE_TO_MASK(size(kind));
+    return gigacageSizeToMask(size(kind));
 }
 
 template<typename Func>
@@ -202,6 +207,29 @@
 
 BEXPORT bool shouldBeEnabled();
 
+#else // GIGACAGE_ENABLED
+
+BINLINE void*& basePtr(Kind)
+{
+    BCRASH();
+    static void* unreachable;
+    return unreachable;
+}
+BINLINE size_t size(Kind) { BCRASH(); return 0; }
+BINLINE void ensureGigacage() { }
+BINLINE bool wasEnabled() { return false; }
+BINLINE bool isCaged(Kind, const void*) { return true; }
+BINLINE bool isEnabled() { return false; }
+template<typename T> BINLINE T* caged(Kind, T* ptr) { return ptr; }
+BINLINE void disableDisablingPrimitiveGigacageIfShouldBeEnabled() { }
+BINLINE bool canPrimitiveGigacageBeDisabled() { return false; }
+BINLINE void disablePrimitiveGigacage() { }
+BINLINE void addPrimitiveDisableCallback(void (*)(void*), void*) { }
+BINLINE void removePrimitiveDisableCallback(void (*)(void*), void*) { }
+
+#endif // GIGACAGE_ENABLED
+
 } // namespace Gigacage
 
 
+

Modified: trunk/Source/bmalloc/bmalloc/Heap.cpp (240174 => 240175)


--- trunk/Source/bmalloc/bmalloc/Heap.cpp	2019-01-18 22:26:37 UTC (rev 240174)
+++ trunk/Source/bmalloc/bmalloc/Heap.cpp	2019-01-18 22:48:22 UTC (rev 240175)
@@ -29,6 +29,7 @@
 #include "BulkDecommit.h"
 #include "BumpAllocator.h"
 #include "Chunk.h"
+#include "CryptoRandom.h"
 #include "Environment.h"
 #include "Gigacage.h"
 #include "DebugHeap.h"
@@ -61,7 +62,12 @@
 #if GIGACAGE_ENABLED
         if (usingGigacage()) {
             RELEASE_BASSERT(gigacageBasePtr());
-            m_largeFree.add(LargeRange(gigacageBasePtr(), gigacageSize(), 0, 0));
+            uint64_t random;
+            cryptoRandom(reinterpret_cast<unsigned char*>(&random), sizeof(random));
+            ptrdiff_t offset = random % (gigacageSize() - Gigacage::minimumCageSizeAfterSlide);
+            offset = reinterpret_cast<ptrdiff_t>(roundDownToMultipleOf(vmPageSize(), reinterpret_cast<void*>(offset)));
+            void* base = reinterpret_cast<unsigned char*>(gigacageBasePtr()) + offset;
+            m_largeFree.add(LargeRange(base, gigacageSize() - offset, 0, 0));
         }
 #endif
     }

Modified: trunk/Source/bmalloc/bmalloc/Sizes.h (240174 => 240175)


--- trunk/Source/bmalloc/bmalloc/Sizes.h	2019-01-18 22:26:37 UTC (rev 240174)
+++ trunk/Source/bmalloc/bmalloc/Sizes.h	2019-01-18 22:48:22 UTC (rev 240175)
@@ -40,90 +40,91 @@
 // Repository for malloc sizing constants and calculations.
 
 namespace Sizes {
-    static const size_t kB = 1024;
-    static const size_t MB = kB * kB;
+static constexpr size_t kB = 1024;
+static constexpr size_t MB = kB * kB;
+static constexpr size_t GB = kB * kB * kB;
 
-    static const size_t alignment = 8;
-    static const size_t alignmentMask = alignment - 1ul;
+static constexpr size_t alignment = 8;
+static constexpr size_t alignmentMask = alignment - 1ul;
 
-    static const size_t chunkSize = 1 * MB;
-    static const size_t chunkMask = ~(chunkSize - 1ul);
+static constexpr size_t chunkSize = 1 * MB;
+static constexpr size_t chunkMask = ~(chunkSize - 1ul);
 
-    static const size_t smallLineSize = 256;
-    static const size_t smallPageSize = 4 * kB;
-    static const size_t smallPageLineCount = smallPageSize / smallLineSize;
+static constexpr size_t smallLineSize = 256;
+static constexpr size_t smallPageSize = 4 * kB;
+static constexpr size_t smallPageLineCount = smallPageSize / smallLineSize;
 
-    static const size_t maskSizeClassMax = 512;
-    static const size_t smallMax = 32 * kB;
+static constexpr size_t maskSizeClassMax = 512;
+static constexpr size_t smallMax = 32 * kB;
 
-    static const size_t pageSizeMax = smallMax * 2;
-    static const size_t pageClassCount = pageSizeMax / smallPageSize;
+static constexpr size_t pageSizeMax = smallMax * 2;
+static constexpr size_t pageClassCount = pageSizeMax / smallPageSize;
 
-    static const size_t pageSizeWasteFactor = 8;
-    static const size_t logWasteFactor = 8;
+static constexpr size_t pageSizeWasteFactor = 8;
+static constexpr size_t logWasteFactor = 8;
 
-    static const size_t largeAlignment = smallMax / pageSizeWasteFactor;
-    static const size_t largeAlignmentMask = largeAlignment - 1;
+static constexpr size_t largeAlignment = smallMax / pageSizeWasteFactor;
+static constexpr size_t largeAlignmentMask = largeAlignment - 1;
 
-    static const size_t deallocatorLogCapacity = 512;
-    static const size_t bumpRangeCacheCapacity = 3;
-    
-    static const size_t scavengerBytesPerMemoryPressureCheck = 16 * MB;
-    static const double memoryPressureThreshold = 0.75;
-    
-    static const size_t maskSizeClassCount = maskSizeClassMax / alignment;
+static constexpr size_t deallocatorLogCapacity = 512;
+static constexpr size_t bumpRangeCacheCapacity = 3;
 
-    constexpr size_t maskSizeClass(size_t size)
-    {
-        // We mask to accommodate zero.
-        return mask((size - 1) / alignment, maskSizeClassCount - 1);
-    }
+static constexpr size_t scavengerBytesPerMemoryPressureCheck = 16 * MB;
+static constexpr double memoryPressureThreshold = 0.75;
 
-    inline size_t maskObjectSize(size_t maskSizeClass)
-    {
-        return (maskSizeClass + 1) * alignment;
-    }
+static constexpr size_t maskSizeClassCount = maskSizeClassMax / alignment;
 
-    static const size_t logAlignmentMin = maskSizeClassMax / logWasteFactor;
+constexpr size_t maskSizeClass(size_t size)
+{
+    // We mask to accommodate zero.
+    return mask((size - 1) / alignment, maskSizeClassCount - 1);
+}
 
-    static const size_t logSizeClassCount = (log2(smallMax) - log2(maskSizeClassMax)) * logWasteFactor;
+inline size_t maskObjectSize(size_t maskSizeClass)
+{
+    return (maskSizeClass + 1) * alignment;
+}
 
-    inline size_t logSizeClass(size_t size)
-    {
-        size_t base = log2(size - 1) - log2(maskSizeClassMax);
-        size_t offset = (size - 1 - (maskSizeClassMax << base));
-        return base * logWasteFactor + offset / (logAlignmentMin << base);
-    }
+static constexpr size_t logAlignmentMin = maskSizeClassMax / logWasteFactor;
 
-    inline size_t logObjectSize(size_t logSizeClass)
-    {
-        size_t base = logSizeClass / logWasteFactor;
-        size_t offset = logSizeClass % logWasteFactor;
-        return (maskSizeClassMax << base) + (offset + 1) * (logAlignmentMin << base);
-    }
+static constexpr size_t logSizeClassCount = (log2(smallMax) - log2(maskSizeClassMax)) * logWasteFactor;
 
-    static const size_t sizeClassCount = maskSizeClassCount + logSizeClassCount;
+inline size_t logSizeClass(size_t size)
+{
+    size_t base = log2(size - 1) - log2(maskSizeClassMax);
+    size_t offset = (size - 1 - (maskSizeClassMax << base));
+    return base * logWasteFactor + offset / (logAlignmentMin << base);
+}
 
-    inline size_t sizeClass(size_t size)
-    {
-        if (size <= maskSizeClassMax)
-            return maskSizeClass(size);
-        return maskSizeClassCount + logSizeClass(size);
-    }
+inline size_t logObjectSize(size_t logSizeClass)
+{
+    size_t base = logSizeClass / logWasteFactor;
+    size_t offset = logSizeClass % logWasteFactor;
+    return (maskSizeClassMax << base) + (offset + 1) * (logAlignmentMin << base);
+}
 
-    inline size_t objectSize(size_t sizeClass)
-    {
-        if (sizeClass < maskSizeClassCount)
-            return maskObjectSize(sizeClass);
-        return logObjectSize(sizeClass - maskSizeClassCount);
-    }
-    
-    inline size_t pageSize(size_t pageClass)
-    {
-        return (pageClass + 1) * smallPageSize;
-    }
+static constexpr size_t sizeClassCount = maskSizeClassCount + logSizeClassCount;
+
+inline size_t sizeClass(size_t size)
+{
+    if (size <= maskSizeClassMax)
+        return maskSizeClass(size);
+    return maskSizeClassCount + logSizeClass(size);
 }
 
+inline size_t objectSize(size_t sizeClass)
+{
+    if (sizeClass < maskSizeClassCount)
+        return maskObjectSize(sizeClass);
+    return logObjectSize(sizeClass - maskSizeClassCount);
+}
+
+inline size_t pageSize(size_t pageClass)
+{
+    return (pageClass + 1) * smallPageSize;
+}
+} // namespace Sizes
+
 using namespace Sizes;
 
 } // namespace bmalloc
_______________________________________________
webkit-changes mailing list
webkit-changes@lists.webkit.org
https://lists.webkit.org/mailman/listinfo/webkit-changes

Reply via email to