Title: [240171] trunk/Source
Revision
240171
Author
jlew...@apple.com
Date
2019-01-18 13:32:32 -0800 (Fri, 18 Jan 2019)

Log Message

Unreviewed, rolling out r240160.

This broke multiple internal builds.

Reverted changeset:

"Gigacages should start allocations from a slide"
https://bugs.webkit.org/show_bug.cgi?id=193523
https://trac.webkit.org/changeset/240160

Modified Paths

Diff

Modified: trunk/Source/_javascript_Core/ChangeLog (240170 => 240171)


--- trunk/Source/_javascript_Core/ChangeLog	2019-01-18 21:30:07 UTC (rev 240170)
+++ trunk/Source/_javascript_Core/ChangeLog	2019-01-18 21:32:32 UTC (rev 240171)
@@ -1,3 +1,15 @@
+2019-01-18  Matt Lewis  <jlew...@apple.com>
+
+        Unreviewed, rolling out r240160.
+
+        This broke multiple internal builds.
+
+        Reverted changeset:
+
+        "Gigacages should start allocations from a slide"
+        https://bugs.webkit.org/show_bug.cgi?id=193523
+        https://trac.webkit.org/changeset/240160
+
 2019-01-18  Keith Miller  <keith_mil...@apple.com>
 
         Gigacages should start allocations from a slide

Modified: trunk/Source/_javascript_Core/llint/LowLevelInterpreter64.asm (240170 => 240171)


--- trunk/Source/_javascript_Core/llint/LowLevelInterpreter64.asm	2019-01-18 21:30:07 UTC (rev 240170)
+++ trunk/Source/_javascript_Core/llint/LowLevelInterpreter64.asm	2019-01-18 21:32:32 UTC (rev 240171)
@@ -1315,7 +1315,7 @@
     arrayProfile(OpGetById::Metadata::m_modeMetadata.arrayLengthMode.arrayProfile, t0, t2, t5)
     btiz t0, IsArray, .opGetByIdSlow
     btiz t0, IndexingShapeMask, .opGetByIdSlow
-    loadCaged(_g_gigacageBasePtrs + Gigacage::BasePtrs::jsValue, constexpr Gigacage::jsValueGigacageMask, JSObject::m_butterfly[t3], t0, t1)
+    loadCaged(_g_gigacageBasePtrs + Gigacage::BasePtrs::jsValue, constexpr JSVALUE_GIGACAGE_MASK, JSObject::m_butterfly[t3], t0, t1)
     loadi -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t0], t0
     bilt t0, 0, .opGetByIdSlow
     orq tagTypeNumber, t0
@@ -1438,7 +1438,7 @@
     loadConstantOrVariableInt32(size, t3, t1, .opGetByValSlow)
     sxi2q t1, t1
 
-    loadCaged(_g_gigacageBasePtrs + Gigacage::BasePtrs::jsValue, constexpr Gigacage::jsValueGigacageMask, JSObject::m_butterfly[t0], t3, tagTypeNumber)
+    loadCaged(_g_gigacageBasePtrs + Gigacage::BasePtrs::jsValue, constexpr JSVALUE_GIGACAGE_MASK, JSObject::m_butterfly[t0], t3, tagTypeNumber)
     move TagTypeNumber, tagTypeNumber
 
     andi IndexingShapeMask, t2
@@ -1504,7 +1504,7 @@
     bia t2, Int8ArrayType - FirstTypedArrayType, .opGetByValUint8ArrayOrUint8ClampedArray
 
     # We have Int8ArrayType.
-    loadCaged(_g_gigacageBasePtrs + Gigacage::BasePtrs::primitive, constexpr Gigacage::primitiveGigacageMask, JSArrayBufferView::m_vector[t0], t3, t2)
+    loadCaged(_g_gigacageBasePtrs + Gigacage::BasePtrs::primitive, constexpr PRIMITIVE_GIGACAGE_MASK, JSArrayBufferView::m_vector[t0], t3, t2)
     loadbs [t3, t1], t0
     finishIntGetByVal(t0, t1)
 
@@ -1512,13 +1512,13 @@
     bia t2, Uint8ArrayType - FirstTypedArrayType, .opGetByValUint8ClampedArray
 
     # We have Uint8ArrayType.
-    loadCaged(_g_gigacageBasePtrs + Gigacage::BasePtrs::primitive, constexpr Gigacage::primitiveGigacageMask, JSArrayBufferView::m_vector[t0], t3, t2)
+    loadCaged(_g_gigacageBasePtrs + Gigacage::BasePtrs::primitive, constexpr PRIMITIVE_GIGACAGE_MASK, JSArrayBufferView::m_vector[t0], t3, t2)
     loadb [t3, t1], t0
     finishIntGetByVal(t0, t1)
 
 .opGetByValUint8ClampedArray:
     # We have Uint8ClampedArrayType.
-    loadCaged(_g_gigacageBasePtrs + Gigacage::BasePtrs::primitive, constexpr Gigacage::primitiveGigacageMask, JSArrayBufferView::m_vector[t0], t3, t2)
+    loadCaged(_g_gigacageBasePtrs + Gigacage::BasePtrs::primitive, constexpr PRIMITIVE_GIGACAGE_MASK, JSArrayBufferView::m_vector[t0], t3, t2)
     loadb [t3, t1], t0
     finishIntGetByVal(t0, t1)
 
@@ -1527,13 +1527,13 @@
     bia t2, Int16ArrayType - FirstTypedArrayType, .opGetByValUint16Array
 
     # We have Int16ArrayType.
-    loadCaged(_g_gigacageBasePtrs + Gigacage::BasePtrs::primitive, constexpr Gigacage::primitiveGigacageMask, JSArrayBufferView::m_vector[t0], t3, t2)
+    loadCaged(_g_gigacageBasePtrs + Gigacage::BasePtrs::primitive, constexpr PRIMITIVE_GIGACAGE_MASK, JSArrayBufferView::m_vector[t0], t3, t2)
     loadhs [t3, t1, 2], t0
     finishIntGetByVal(t0, t1)
 
 .opGetByValUint16Array:
     # We have Uint16ArrayType.
-    loadCaged(_g_gigacageBasePtrs + Gigacage::BasePtrs::primitive, constexpr Gigacage::primitiveGigacageMask, JSArrayBufferView::m_vector[t0], t3, t2)
+    loadCaged(_g_gigacageBasePtrs + Gigacage::BasePtrs::primitive, constexpr PRIMITIVE_GIGACAGE_MASK, JSArrayBufferView::m_vector[t0], t3, t2)
     loadh [t3, t1, 2], t0
     finishIntGetByVal(t0, t1)
 
@@ -1545,13 +1545,13 @@
     bia t2, Int32ArrayType - FirstTypedArrayType, .opGetByValUint32Array
 
     # We have Int32ArrayType.
-    loadCaged(_g_gigacageBasePtrs + Gigacage::BasePtrs::primitive, constexpr Gigacage::primitiveGigacageMask, JSArrayBufferView::m_vector[t0], t3, t2)
+    loadCaged(_g_gigacageBasePtrs + Gigacage::BasePtrs::primitive, constexpr PRIMITIVE_GIGACAGE_MASK, JSArrayBufferView::m_vector[t0], t3, t2)
     loadi [t3, t1, 4], t0
     finishIntGetByVal(t0, t1)
 
 .opGetByValUint32Array:
     # We have Uint32ArrayType.
-    loadCaged(_g_gigacageBasePtrs + Gigacage::BasePtrs::primitive, constexpr Gigacage::primitiveGigacageMask, JSArrayBufferView::m_vector[t0], t3, t2)
+    loadCaged(_g_gigacageBasePtrs + Gigacage::BasePtrs::primitive, constexpr PRIMITIVE_GIGACAGE_MASK, JSArrayBufferView::m_vector[t0], t3, t2)
     # This is the hardest part because of large unsigned values.
     loadi [t3, t1, 4], t0
     bilt t0, 0, .opGetByValSlow # This case is still awkward to implement in LLInt.
@@ -1563,7 +1563,7 @@
     bieq t2, Float32ArrayType - FirstTypedArrayType, .opGetByValSlow
 
     # We have Float64ArrayType.
-    loadCaged(_g_gigacageBasePtrs + Gigacage::BasePtrs::primitive, constexpr Gigacage::primitiveGigacageMask, JSArrayBufferView::m_vector[t0], t3, t2)
+    loadCaged(_g_gigacageBasePtrs + Gigacage::BasePtrs::primitive, constexpr PRIMITIVE_GIGACAGE_MASK, JSArrayBufferView::m_vector[t0], t3, t2)
     loadd [t3, t1, 8], ft0
     bdnequn ft0, ft0, .opGetByValSlow
     finishDoubleGetByVal(ft0, t0, t1)
@@ -1599,7 +1599,7 @@
         get(m_property, t0)
         loadConstantOrVariableInt32(size, t0, t3, .opPutByValSlow)
         sxi2q t3, t3
-        loadCaged(_g_gigacageBasePtrs + Gigacage::BasePtrs::jsValue, constexpr Gigacage::jsValueGigacageMask, JSObject::m_butterfly[t1], t0, tagTypeNumber)
+        loadCaged(_g_gigacageBasePtrs + Gigacage::BasePtrs::jsValue, constexpr JSVALUE_GIGACAGE_MASK, JSObject::m_butterfly[t1], t0, tagTypeNumber)
         move TagTypeNumber, tagTypeNumber
         btinz t2, CopyOnWrite, .opPutByValSlow
         andi IndexingShapeMask, t2

Modified: trunk/Source/WTF/ChangeLog (240170 => 240171)


--- trunk/Source/WTF/ChangeLog	2019-01-18 21:30:07 UTC (rev 240170)
+++ trunk/Source/WTF/ChangeLog	2019-01-18 21:32:32 UTC (rev 240171)
@@ -1,3 +1,15 @@
+2019-01-18  Matt Lewis  <jlew...@apple.com>
+
+        Unreviewed, rolling out r240160.
+
+        This broke multiple internal builds.
+
+        Reverted changeset:
+
+        "Gigacages should start allocations from a slide"
+        https://bugs.webkit.org/show_bug.cgi?id=193523
+        https://trac.webkit.org/changeset/240160
+
 2019-01-18  Keith Miller  <keith_mil...@apple.com>
 
         Gigacages should start allocations from a slide

Modified: trunk/Source/WTF/wtf/Gigacage.cpp (240170 => 240171)


--- trunk/Source/WTF/wtf/Gigacage.cpp	2019-01-18 21:30:07 UTC (rev 240170)
+++ trunk/Source/WTF/wtf/Gigacage.cpp	2019-01-18 21:32:32 UTC (rev 240171)
@@ -32,10 +32,10 @@
 
 #if defined(USE_SYSTEM_MALLOC) && USE_SYSTEM_MALLOC
 
+alignas(void*) char g_gigacageBasePtrs[GIGACAGE_BASE_PTRS_SIZE];
+
 namespace Gigacage {
 
-alignas(void*) char g_gigacageBasePtrs[gigacageBasePtrsSize];
-
 void* tryMalloc(Kind, size_t size)
 {
     return FastMalloc::tryMalloc(size);
@@ -61,7 +61,7 @@
 }
 
 } // namespace Gigacage
-#else // defined(USE_SYSTEM_MALLOC) && USE_SYSTEM_MALLOC
+#else
 #include <bmalloc/bmalloc.h>
 
 namespace Gigacage {

Modified: trunk/Source/WTF/wtf/Gigacage.h (240170 => 240171)


--- trunk/Source/WTF/wtf/Gigacage.h	2019-01-18 21:30:07 UTC (rev 240170)
+++ trunk/Source/WTF/wtf/Gigacage.h	2019-01-18 21:32:32 UTC (rev 240171)
@@ -26,19 +26,19 @@
 #pragma once
 
 #include <wtf/FastMalloc.h>
-#include <wtf/StdLibExtras.h>
 
 #if defined(USE_SYSTEM_MALLOC) && USE_SYSTEM_MALLOC
 #define GIGACAGE_ENABLED 0
+#define PRIMITIVE_GIGACAGE_MASK 0
+#define JSVALUE_GIGACAGE_MASK 0
+#define GIGACAGE_BASE_PTRS_SIZE 8192
 
+extern "C" {
+alignas(void*) extern WTF_EXPORT_PRIVATE char g_gigacageBasePtrs[GIGACAGE_BASE_PTRS_SIZE];
+}
+
 namespace Gigacage {
 
-const size_t primitiveGigacageMask = 0;
-const size_t jsValueGigacageMask = 0;
-const size_t gigacageBasePtrsSize = 8 * KB;
-
-extern "C" alignas(void*) WTF_EXPORT_PRIVATE char g_gigacageBasePtrs[gigacageBasePtrsSize];
-
 struct BasePtrs {
     uintptr_t reservedForFlags;
     void* primitive;

Modified: trunk/Source/bmalloc/ChangeLog (240170 => 240171)


--- trunk/Source/bmalloc/ChangeLog	2019-01-18 21:30:07 UTC (rev 240170)
+++ trunk/Source/bmalloc/ChangeLog	2019-01-18 21:32:32 UTC (rev 240171)
@@ -1,3 +1,15 @@
+2019-01-18  Matt Lewis  <jlew...@apple.com>
+
+        Unreviewed, rolling out r240160.
+
+        This broke multiple internal builds.
+
+        Reverted changeset:
+
+        "Gigacages should start allocations from a slide"
+        https://bugs.webkit.org/show_bug.cgi?id=193523
+        https://trac.webkit.org/changeset/240160
+
 2019-01-18  Keith Miller  <keith_mil...@apple.com>
 
         Gigacages should start allocations from a slide

Modified: trunk/Source/bmalloc/bmalloc/Gigacage.cpp (240170 => 240171)


--- trunk/Source/bmalloc/bmalloc/Gigacage.cpp	2019-01-18 21:30:07 UTC (rev 240170)
+++ trunk/Source/bmalloc/bmalloc/Gigacage.cpp	2019-01-18 21:32:32 UTC (rev 240171)
@@ -35,25 +35,23 @@
 #include <cstdio>
 #include <mutex>
 
-#if GIGACAGE_ENABLED
-
-namespace Gigacage {
-
 // This is exactly 32GB because inside JSC, indexed accesses for arrays, typed arrays, etc,
 // use unsigned 32-bit ints as indices. The items those indices access are 8 bytes or less
 // in size. 2^32 * 8 = 32GB. This means if an access on a caged type happens to go out of
 // bounds, the access is guaranteed to land somewhere else in the cage or inside the runway.
 // If this were less than 32GB, those OOB accesses could reach outside of the cage.
-constexpr size_t gigacageRunway = 32llu * 1024 * 1024 * 1024;
+#define GIGACAGE_RUNWAY (32llu * 1024 * 1024 * 1024)
 
 // Note: g_gigacageBasePtrs[0] is reserved for storing the wasEnabled flag.
 // The first gigacageBasePtr will start at g_gigacageBasePtrs[sizeof(void*)].
 // This is done so that the wasEnabled flag will also be protected along with the
 // gigacageBasePtrs.
-alignas(gigacageBasePtrsSize) char g_gigacageBasePtrs[gigacageBasePtrsSize];
+alignas(GIGACAGE_BASE_PTRS_SIZE) char g_gigacageBasePtrs[GIGACAGE_BASE_PTRS_SIZE];
 
 using namespace bmalloc;
 
+namespace Gigacage {
+
 namespace {
 
 bool s_isDisablingPrimitiveGigacageDisabled;
@@ -63,12 +61,12 @@
     uintptr_t basePtrs = reinterpret_cast<uintptr_t>(g_gigacageBasePtrs);
     // We might only get page size alignment, but that's also the minimum we need.
     RELEASE_BASSERT(!(basePtrs & (vmPageSize() - 1)));
-    mprotect(g_gigacageBasePtrs, gigacageBasePtrsSize, PROT_READ);
+    mprotect(g_gigacageBasePtrs, GIGACAGE_BASE_PTRS_SIZE, PROT_READ);
 }
 
 void unprotectGigacageBasePtrs()
 {
-    mprotect(g_gigacageBasePtrs, gigacageBasePtrsSize, PROT_READ | PROT_WRITE);
+    mprotect(g_gigacageBasePtrs, GIGACAGE_BASE_PTRS_SIZE, PROT_READ | PROT_WRITE);
 }
 
 class UnprotectGigacageBasePtrsScope {
@@ -103,6 +101,7 @@
     Vector<Callback> callbacks;
 };
 
+#if GIGACAGE_ENABLED
 size_t runwaySize(Kind kind)
 {
     switch (kind) {
@@ -109,17 +108,19 @@
     case Kind::ReservedForFlagsAndNotABasePtr:
         RELEASE_BASSERT_NOT_REACHED();
     case Kind::Primitive:
-        return gigacageRunway;
+        return static_cast<size_t>(GIGACAGE_RUNWAY);
     case Kind::JSValue:
-        return 0;
+        return static_cast<size_t>(0);
     }
-    return 0;
+    return static_cast<size_t>(0);
 }
+#endif
 
 } // anonymous namespace
 
 void ensureGigacage()
 {
+#if GIGACAGE_ENABLED
     static std::once_flag onceFlag;
     std::call_once(
         onceFlag,
@@ -188,6 +189,7 @@
             setWasEnabled();
             protectGigacageBasePtrs();
         });
+#endif // GIGACAGE_ENABLED
 }
 
 void disablePrimitiveGigacage()
@@ -263,6 +265,8 @@
 bool shouldBeEnabled()
 {
     static bool cached = false;
+
+#if GIGACAGE_ENABLED
     static std::once_flag onceFlag;
     std::call_once(
         onceFlag,
@@ -284,11 +288,12 @@
             
             cached = true;
         });
+#endif // GIGACAGE_ENABLED
+    
     return cached;
 }
 
 } // namespace Gigacage
 
-#endif // GIGACAGE_ENABLED
 
 

Modified: trunk/Source/bmalloc/bmalloc/Gigacage.h (240170 => 240171)


--- trunk/Source/bmalloc/bmalloc/Gigacage.h	2019-01-18 21:30:07 UTC (rev 240170)
+++ trunk/Source/bmalloc/bmalloc/Gigacage.h	2019-01-18 21:32:32 UTC (rev 240171)
@@ -30,53 +30,16 @@
 #include "BExport.h"
 #include "BInline.h"
 #include "BPlatform.h"
-#include "Sizes.h"
 #include <cstddef>
 #include <inttypes.h>
 
-#if ((BOS(DARWIN) || BOS(LINUX)) && \
-(BCPU(X86_64) || (BCPU(ARM64) && !defined(__ILP32__) && (!BPLATFORM(IOS_FAMILY) || BPLATFORM(IOS)))))
-#define GIGACAGE_ENABLED 1
-#else
-#define GIGACAGE_ENABLED 0
-#endif
-
-
-namespace Gigacage {
-
-enum Kind {
-    ReservedForFlagsAndNotABasePtr = 0,
-    Primitive,
-    JSValue,
-};
-
-BINLINE const char* name(Kind kind)
-{
-    switch (kind) {
-    case ReservedForFlagsAndNotABasePtr:
-        RELEASE_BASSERT_NOT_REACHED();
-    case Primitive:
-        return "Primitive";
-    case JSValue:
-        return "JSValue";
-    }
-    BCRASH();
-    return nullptr;
-}
-
-#if GIGACAGE_ENABLED
-
 #if BCPU(ARM64)
-constexpr size_t primitiveGigacageSize = 2 * bmalloc::Sizes::GB;
-constexpr size_t jsValueGigacageSize = 1 * bmalloc::Sizes::GB;
-constexpr size_t gigacageBasePtrsSize = 16 * bmalloc::Sizes::kB;
-constexpr size_t minimumCageSizeAfterSlide = bmalloc::Sizes::GB / 2;
+#define PRIMITIVE_GIGACAGE_SIZE 0x80000000llu
+#define JSVALUE_GIGACAGE_SIZE 0x40000000llu
 #define GIGACAGE_ALLOCATION_CAN_FAIL 1
 #else
-constexpr size_t primitiveGigacageSize = 32 * bmalloc::Sizes::GB;
-constexpr size_t jsValueGigacageSize = 16 * bmalloc::Sizes::GB;
-constexpr size_t gigacageBasePtrsSize = 4 * bmalloc::Sizes::kB;
-constexpr size_t minimumCageSizeAfterSlide = 4 * bmalloc::Sizes::GB;
+#define PRIMITIVE_GIGACAGE_SIZE 0x800000000llu
+#define JSVALUE_GIGACAGE_SIZE 0x400000000llu
 #define GIGACAGE_ALLOCATION_CAN_FAIL 0
 #endif
 
@@ -87,19 +50,31 @@
 #define GIGACAGE_ALLOCATION_CAN_FAIL 1
 #endif
 
+static_assert(bmalloc::isPowerOfTwo(PRIMITIVE_GIGACAGE_SIZE), "");
+static_assert(bmalloc::isPowerOfTwo(JSVALUE_GIGACAGE_SIZE), "");
 
-static_assert(bmalloc::isPowerOfTwo(primitiveGigacageSize), "");
-static_assert(bmalloc::isPowerOfTwo(jsValueGigacageSize), "");
-static_assert(primitiveGigacageSize > minimumCageSizeAfterSlide, "");
-static_assert(jsValueGigacageSize > minimumCageSizeAfterSlide, "");
+#define GIGACAGE_SIZE_TO_MASK(size) ((size) - 1)
 
-constexpr size_t gigacageSizeToMask(size_t size) { return size - 1; }
+#define PRIMITIVE_GIGACAGE_MASK GIGACAGE_SIZE_TO_MASK(PRIMITIVE_GIGACAGE_SIZE)
+#define JSVALUE_GIGACAGE_MASK GIGACAGE_SIZE_TO_MASK(JSVALUE_GIGACAGE_SIZE)
 
-constexpr size_t primitiveGigacageMask = gigacageSizeToMask(primitiveGigacageSize);
-constexpr size_t jsValueGigacageMask = gigacageSizeToMask(jsValueGigacageSize);
+#if ((BOS(DARWIN) || BOS(LINUX)) && \
+    (BCPU(X86_64) || (BCPU(ARM64) && !defined(__ILP32__) && (!BPLATFORM(IOS_FAMILY) || BPLATFORM(IOS)))))
+#define GIGACAGE_ENABLED 1
+#else
+#define GIGACAGE_ENABLED 0
+#endif
 
-extern "C" alignas(gigacageBasePtrsSize) BEXPORT char g_gigacageBasePtrs[gigacageBasePtrsSize];
+#if BCPU(ARM64)
+#define GIGACAGE_BASE_PTRS_SIZE 16384
+#else
+#define GIGACAGE_BASE_PTRS_SIZE 4096
+#endif
 
+extern "C" alignas(GIGACAGE_BASE_PTRS_SIZE) BEXPORT char g_gigacageBasePtrs[GIGACAGE_BASE_PTRS_SIZE];
+
+namespace Gigacage {
+
 BINLINE bool wasEnabled() { return g_gigacageBasePtrs[0]; }
 BINLINE void setWasEnabled() { g_gigacageBasePtrs[0] = true; }
 
@@ -109,10 +84,16 @@
     void* jsValue;
 };
 
+enum Kind {
+    ReservedForFlagsAndNotABasePtr = 0,
+    Primitive,
+    JSValue,
+};
+
 static_assert(offsetof(BasePtrs, primitive) == Kind::Primitive * sizeof(void*), "");
 static_assert(offsetof(BasePtrs, jsValue) == Kind::JSValue * sizeof(void*), "");
 
-constexpr unsigned numKinds = 2;
+static constexpr unsigned numKinds = 2;
 
 BEXPORT void ensureGigacage();
 
@@ -128,6 +109,20 @@
 inline bool isPrimitiveGigacagePermanentlyEnabled() { return isDisablingPrimitiveGigacageDisabled(); }
 inline bool canPrimitiveGigacageBeDisabled() { return !isDisablingPrimitiveGigacageDisabled(); }
 
+BINLINE const char* name(Kind kind)
+{
+    switch (kind) {
+    case ReservedForFlagsAndNotABasePtr:
+        RELEASE_BASSERT_NOT_REACHED();
+    case Primitive:
+        return "Primitive";
+    case JSValue:
+        return "JSValue";
+    }
+    BCRASH();
+    return nullptr;
+}
+
 BINLINE void*& basePtr(BasePtrs& basePtrs, Kind kind)
 {
     switch (kind) {
@@ -163,9 +158,9 @@
     case ReservedForFlagsAndNotABasePtr:
         RELEASE_BASSERT_NOT_REACHED();
     case Primitive:
-        return static_cast<size_t>(primitiveGigacageSize);
+        return static_cast<size_t>(PRIMITIVE_GIGACAGE_SIZE);
     case JSValue:
-        return static_cast<size_t>(jsValueGigacageSize);
+        return static_cast<size_t>(JSVALUE_GIGACAGE_SIZE);
     }
     BCRASH();
     return 0;
@@ -178,7 +173,7 @@
 
 BINLINE size_t mask(Kind kind)
 {
-    return gigacageSizeToMask(size(kind));
+    return GIGACAGE_SIZE_TO_MASK(size(kind));
 }
 
 template<typename Func>
@@ -207,27 +202,6 @@
 
 BEXPORT bool shouldBeEnabled();
 
-#else // GIGACAGE_ENABLED
-
-BINLINE void*& basePtr(Kind)
-{
-    BCRASH();
-    static void* unreachable;
-    return unreachable;
-}
-BINLINE size_t size(Kind) { BCRASH(); return 0; }
-BINLINE void ensureGigacage() { }
-BINLINE bool wasEnabled() { return false; }
-BINLINE bool isCaged(Kind, const void*) { return true; }
-template<typename T> BINLINE T* caged(Kind, T* ptr) { return ptr; }
-BINLINE void disableDisablingPrimitiveGigacageIfShouldBeEnabled() { }
-BINLINE void disablePrimitiveGigacage() { }
-BINLINE void addPrimitiveDisableCallback(void (*)(void*), void*) { }
-BINLINE void removePrimitiveDisableCallback(void (*)(void*), void*) { }
-
-#endif // GIGACAGE_ENABLED
-
 } // namespace Gigacage
 
 
-

Modified: trunk/Source/bmalloc/bmalloc/Heap.cpp (240170 => 240171)


--- trunk/Source/bmalloc/bmalloc/Heap.cpp	2019-01-18 21:30:07 UTC (rev 240170)
+++ trunk/Source/bmalloc/bmalloc/Heap.cpp	2019-01-18 21:32:32 UTC (rev 240171)
@@ -29,7 +29,6 @@
 #include "BulkDecommit.h"
 #include "BumpAllocator.h"
 #include "Chunk.h"
-#include "CryptoRandom.h"
 #include "Environment.h"
 #include "Gigacage.h"
 #include "DebugHeap.h"
@@ -62,12 +61,7 @@
 #if GIGACAGE_ENABLED
         if (usingGigacage()) {
             RELEASE_BASSERT(gigacageBasePtr());
-            uint64_t random;
-            cryptoRandom(reinterpret_cast<unsigned char*>(&random), sizeof(random));
-            ptrdiff_t offset = random % (gigacageSize() - Gigacage::minimumCageSizeAfterSlide);
-            offset = reinterpret_cast<ptrdiff_t>(roundDownToMultipleOf(vmPageSize(), reinterpret_cast<void*>(offset)));
-            void* base = reinterpret_cast<unsigned char*>(gigacageBasePtr()) + offset;
-            m_largeFree.add(LargeRange(base, gigacageSize() - offset, 0, 0));
+            m_largeFree.add(LargeRange(gigacageBasePtr(), gigacageSize(), 0, 0));
         }
 #endif
     }

Modified: trunk/Source/bmalloc/bmalloc/Sizes.h (240170 => 240171)


--- trunk/Source/bmalloc/bmalloc/Sizes.h	2019-01-18 21:30:07 UTC (rev 240170)
+++ trunk/Source/bmalloc/bmalloc/Sizes.h	2019-01-18 21:32:32 UTC (rev 240171)
@@ -40,91 +40,90 @@
 // Repository for malloc sizing constants and calculations.
 
 namespace Sizes {
-static constexpr size_t kB = 1024;
-static constexpr size_t MB = kB * kB;
-static constexpr size_t GB = kB * kB * kB;
+    static const size_t kB = 1024;
+    static const size_t MB = kB * kB;
 
-static constexpr size_t alignment = 8;
-static constexpr size_t alignmentMask = alignment - 1ul;
+    static const size_t alignment = 8;
+    static const size_t alignmentMask = alignment - 1ul;
 
-static constexpr size_t chunkSize = 1 * MB;
-static constexpr size_t chunkMask = ~(chunkSize - 1ul);
+    static const size_t chunkSize = 1 * MB;
+    static const size_t chunkMask = ~(chunkSize - 1ul);
 
-static constexpr size_t smallLineSize = 256;
-static constexpr size_t smallPageSize = 4 * kB;
-static constexpr size_t smallPageLineCount = smallPageSize / smallLineSize;
+    static const size_t smallLineSize = 256;
+    static const size_t smallPageSize = 4 * kB;
+    static const size_t smallPageLineCount = smallPageSize / smallLineSize;
 
-static constexpr size_t maskSizeClassMax = 512;
-static constexpr size_t smallMax = 32 * kB;
+    static const size_t maskSizeClassMax = 512;
+    static const size_t smallMax = 32 * kB;
 
-static constexpr size_t pageSizeMax = smallMax * 2;
-static constexpr size_t pageClassCount = pageSizeMax / smallPageSize;
+    static const size_t pageSizeMax = smallMax * 2;
+    static const size_t pageClassCount = pageSizeMax / smallPageSize;
 
-static constexpr size_t pageSizeWasteFactor = 8;
-static constexpr size_t logWasteFactor = 8;
+    static const size_t pageSizeWasteFactor = 8;
+    static const size_t logWasteFactor = 8;
 
-static constexpr size_t largeAlignment = smallMax / pageSizeWasteFactor;
-static constexpr size_t largeAlignmentMask = largeAlignment - 1;
+    static const size_t largeAlignment = smallMax / pageSizeWasteFactor;
+    static const size_t largeAlignmentMask = largeAlignment - 1;
 
-static constexpr size_t deallocatorLogCapacity = 512;
-static constexpr size_t bumpRangeCacheCapacity = 3;
+    static const size_t deallocatorLogCapacity = 512;
+    static const size_t bumpRangeCacheCapacity = 3;
+    
+    static const size_t scavengerBytesPerMemoryPressureCheck = 16 * MB;
+    static const double memoryPressureThreshold = 0.75;
+    
+    static const size_t maskSizeClassCount = maskSizeClassMax / alignment;
 
-static constexpr size_t scavengerBytesPerMemoryPressureCheck = 16 * MB;
-static constexpr double memoryPressureThreshold = 0.75;
+    constexpr size_t maskSizeClass(size_t size)
+    {
+        // We mask to accommodate zero.
+        return mask((size - 1) / alignment, maskSizeClassCount - 1);
+    }
 
-static constexpr size_t maskSizeClassCount = maskSizeClassMax / alignment;
+    inline size_t maskObjectSize(size_t maskSizeClass)
+    {
+        return (maskSizeClass + 1) * alignment;
+    }
 
-constexpr size_t maskSizeClass(size_t size)
-{
-    // We mask to accommodate zero.
-    return mask((size - 1) / alignment, maskSizeClassCount - 1);
-}
+    static const size_t logAlignmentMin = maskSizeClassMax / logWasteFactor;
 
-inline size_t maskObjectSize(size_t maskSizeClass)
-{
-    return (maskSizeClass + 1) * alignment;
-}
+    static const size_t logSizeClassCount = (log2(smallMax) - log2(maskSizeClassMax)) * logWasteFactor;
 
-static constexpr size_t logAlignmentMin = maskSizeClassMax / logWasteFactor;
+    inline size_t logSizeClass(size_t size)
+    {
+        size_t base = log2(size - 1) - log2(maskSizeClassMax);
+        size_t offset = (size - 1 - (maskSizeClassMax << base));
+        return base * logWasteFactor + offset / (logAlignmentMin << base);
+    }
 
-static constexpr size_t logSizeClassCount = (log2(smallMax) - log2(maskSizeClassMax)) * logWasteFactor;
+    inline size_t logObjectSize(size_t logSizeClass)
+    {
+        size_t base = logSizeClass / logWasteFactor;
+        size_t offset = logSizeClass % logWasteFactor;
+        return (maskSizeClassMax << base) + (offset + 1) * (logAlignmentMin << base);
+    }
 
-inline size_t logSizeClass(size_t size)
-{
-    size_t base = log2(size - 1) - log2(maskSizeClassMax);
-    size_t offset = (size - 1 - (maskSizeClassMax << base));
-    return base * logWasteFactor + offset / (logAlignmentMin << base);
-}
+    static const size_t sizeClassCount = maskSizeClassCount + logSizeClassCount;
 
-inline size_t logObjectSize(size_t logSizeClass)
-{
-    size_t base = logSizeClass / logWasteFactor;
-    size_t offset = logSizeClass % logWasteFactor;
-    return (maskSizeClassMax << base) + (offset + 1) * (logAlignmentMin << base);
-}
+    inline size_t sizeClass(size_t size)
+    {
+        if (size <= maskSizeClassMax)
+            return maskSizeClass(size);
+        return maskSizeClassCount + logSizeClass(size);
+    }
 
-static constexpr size_t sizeClassCount = maskSizeClassCount + logSizeClassCount;
-
-inline size_t sizeClass(size_t size)
-{
-    if (size <= maskSizeClassMax)
-        return maskSizeClass(size);
-    return maskSizeClassCount + logSizeClass(size);
+    inline size_t objectSize(size_t sizeClass)
+    {
+        if (sizeClass < maskSizeClassCount)
+            return maskObjectSize(sizeClass);
+        return logObjectSize(sizeClass - maskSizeClassCount);
+    }
+    
+    inline size_t pageSize(size_t pageClass)
+    {
+        return (pageClass + 1) * smallPageSize;
+    }
 }
 
-inline size_t objectSize(size_t sizeClass)
-{
-    if (sizeClass < maskSizeClassCount)
-        return maskObjectSize(sizeClass);
-    return logObjectSize(sizeClass - maskSizeClassCount);
-}
-
-inline size_t pageSize(size_t pageClass)
-{
-    return (pageClass + 1) * smallPageSize;
-}
-} // namespace Sizes
-
 using namespace Sizes;
 
 } // namespace bmalloc
_______________________________________________
webkit-changes mailing list
webkit-changes@lists.webkit.org
https://lists.webkit.org/mailman/listinfo/webkit-changes

Reply via email to