mirror of
https://github.com/darlinghq/darling-bmalloc.git
synced 2024-11-23 04:19:41 +00:00
Merge pull request #2 from darlinghq/update_sources_11.5
Update Sources 11.5 - bmalloc
This commit is contained in:
commit
66c88bc0b9
@ -1,6 +1,6 @@
|
||||
project(bmalloc)
|
||||
|
||||
set(CMAKE_CXX_STANDARD 14)
|
||||
set(CMAKE_CXX_STANDARD 17)
|
||||
|
||||
include_directories(
|
||||
bmalloc
|
||||
@ -12,7 +12,6 @@ add_darling_static_library(bmalloc FAT
|
||||
bmalloc/AllIsoHeaps.cpp
|
||||
bmalloc/Allocator.cpp
|
||||
bmalloc/AvailableMemory.cpp
|
||||
bmalloc/bmalloc.cpp
|
||||
bmalloc/Cache.cpp
|
||||
bmalloc/CryptoRandom.cpp
|
||||
bmalloc/Deallocator.cpp
|
||||
@ -21,6 +20,7 @@ add_darling_static_library(bmalloc FAT
|
||||
bmalloc/FreeList.cpp
|
||||
bmalloc/Gigacage.cpp
|
||||
bmalloc/Heap.cpp
|
||||
bmalloc/HeapConstants.cpp
|
||||
bmalloc/HeapKind.cpp
|
||||
bmalloc/IsoHeapImpl.cpp
|
||||
bmalloc/IsoPage.cpp
|
||||
@ -33,12 +33,12 @@ add_darling_static_library(bmalloc FAT
|
||||
bmalloc/Logging.cpp
|
||||
bmalloc/Mutex.cpp
|
||||
bmalloc/ObjectType.cpp
|
||||
bmalloc/ObjectTypeTable.cpp
|
||||
bmalloc/PerProcess.cpp
|
||||
bmalloc/PerThread.cpp
|
||||
bmalloc/ProcessCheck.mm
|
||||
bmalloc/Scavenger.cpp
|
||||
bmalloc/VMHeap.cpp
|
||||
bmalloc/Zone.cpp
|
||||
bmalloc/bmalloc.cpp
|
||||
)
|
||||
|
||||
add_darling_library(mbmalloc
|
||||
|
@ -16,6 +16,7 @@ set(bmalloc_SOURCES
|
||||
bmalloc/FreeList.cpp
|
||||
bmalloc/Gigacage.cpp
|
||||
bmalloc/Heap.cpp
|
||||
bmalloc/HeapConstants.cpp
|
||||
bmalloc/HeapKind.cpp
|
||||
bmalloc/IsoHeapImpl.cpp
|
||||
bmalloc/IsoPage.cpp
|
||||
@ -28,12 +29,10 @@ set(bmalloc_SOURCES
|
||||
bmalloc/Logging.cpp
|
||||
bmalloc/Mutex.cpp
|
||||
bmalloc/ObjectType.cpp
|
||||
bmalloc/ObjectTypeTable.cpp
|
||||
bmalloc/PerProcess.cpp
|
||||
bmalloc/PerThread.cpp
|
||||
bmalloc/Scavenger.cpp
|
||||
bmalloc/VMHeap.cpp
|
||||
bmalloc/bmalloc.cpp
|
||||
bmalloc/mbmalloc.cpp
|
||||
)
|
||||
|
||||
set(bmalloc_PUBLIC_HEADERS
|
||||
@ -65,11 +64,15 @@ set(bmalloc_PUBLIC_HEADERS
|
||||
bmalloc/EligibilityResult.h
|
||||
bmalloc/EligibilityResultInlines.h
|
||||
bmalloc/Environment.h
|
||||
bmalloc/FailureAction.h
|
||||
bmalloc/FixedVector.h
|
||||
bmalloc/FreeList.h
|
||||
bmalloc/FreeListInlines.h
|
||||
bmalloc/Gigacage.h
|
||||
bmalloc/GigacageConfig.h
|
||||
bmalloc/GigacageKind.h
|
||||
bmalloc/Heap.h
|
||||
bmalloc/HeapConstants.h
|
||||
bmalloc/HeapKind.h
|
||||
bmalloc/IsoAllocator.h
|
||||
bmalloc/IsoAllocatorInlines.h
|
||||
@ -110,6 +113,8 @@ set(bmalloc_PUBLIC_HEADERS
|
||||
bmalloc/Mutex.h
|
||||
bmalloc/Object.h
|
||||
bmalloc/ObjectType.h
|
||||
bmalloc/ObjectTypeTable.h
|
||||
bmalloc/Packed.h
|
||||
bmalloc/PerHeapKind.h
|
||||
bmalloc/PerProcess.h
|
||||
bmalloc/PerThread.h
|
||||
@ -125,7 +130,6 @@ set(bmalloc_PUBLIC_HEADERS
|
||||
bmalloc/StdLibExtras.h
|
||||
bmalloc/Syscall.h
|
||||
bmalloc/VMAllocate.h
|
||||
bmalloc/VMHeap.h
|
||||
bmalloc/Vector.h
|
||||
bmalloc/Zone.h
|
||||
bmalloc/bmalloc.h
|
||||
@ -145,8 +149,6 @@ set(bmalloc_INTERFACE_LIBRARIES bmalloc)
|
||||
set(bmalloc_INTERFACE_INCLUDE_DIRECTORIES ${bmalloc_FRAMEWORK_HEADERS_DIR})
|
||||
set(bmalloc_INTERFACE_DEPENDENCIES bmalloc_CopyHeaders)
|
||||
|
||||
set(mbmalloc_LIBRARIES bmalloc)
|
||||
|
||||
WEBKIT_FRAMEWORK_DECLARE(bmalloc)
|
||||
WEBKIT_INCLUDE_CONFIG_FILES_IF_EXISTS()
|
||||
|
||||
@ -155,13 +157,13 @@ WEBKIT_COPY_FILES(bmalloc_CopyHeaders
|
||||
FILES ${bmalloc_PUBLIC_HEADERS}
|
||||
)
|
||||
|
||||
WEBKIT_WRAP_SOURCELIST(${bmalloc_SOURCES})
|
||||
WEBKIT_FRAMEWORK(bmalloc)
|
||||
WEBKIT_FRAMEWORK_TARGET(bmalloc)
|
||||
|
||||
if (DEVELOPER_MODE)
|
||||
# Only build mbmalloc on platforms that MallocBench supports
|
||||
if (DEVELOPER_MODE AND (APPLE OR HAVE_MALLOC_TRIM))
|
||||
add_library(mbmalloc SHARED bmalloc/mbmalloc.cpp)
|
||||
target_include_directories(mbmalloc PRIVATE ${bmalloc_PRIVATE_INCLUDE_DIRECTORIES})
|
||||
target_link_libraries(mbmalloc ${CMAKE_THREAD_LIBS_INIT} ${mbmalloc_LIBRARIES} ${bmalloc_LIBRARIES})
|
||||
target_link_libraries(mbmalloc Threads::Threads bmalloc)
|
||||
set_target_properties(mbmalloc PROPERTIES COMPILE_DEFINITIONS "BUILDING_mbmalloc")
|
||||
endif ()
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright (C) 2009-2019 Apple Inc. All rights reserved.
|
||||
// Copyright (C) 2009-2020 Apple Inc. All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions
|
||||
@ -94,59 +94,60 @@ GCC_WARN_UNINITIALIZED_AUTOS = YES;
|
||||
GCC_WARN_UNUSED_FUNCTION = YES;
|
||||
GCC_WARN_UNUSED_VARIABLE = YES;
|
||||
PREBINDING = NO;
|
||||
WARNING_CFLAGS = -Wall -Wextra -Wcast-qual -Wchar-subscripts -Wextra-tokens -Wformat=2 -Winit-self -Wmissing-format-attribute -Wmissing-noreturn -Wpacked -Wpointer-arith -Wredundant-decls -Wundef -Wwrite-strings -Wexit-time-destructors -Wglobal-constructors -Wtautological-compare -Wimplicit-fallthrough;
|
||||
WARNING_CFLAGS = -Wall -Wextra -Wcast-qual -Wchar-subscripts -Wconditional-uninitialized -Wextra-tokens -Wformat=2 -Winit-self -Wmissing-format-attribute -Wmissing-noreturn -Wpacked -Wpointer-arith -Wredundant-decls -Wundef -Wwrite-strings -Wexit-time-destructors -Wglobal-constructors -Wtautological-compare -Wimplicit-fallthrough -Wvla -Wliteral-conversion;
|
||||
|
||||
TARGET_MAC_OS_X_VERSION_MAJOR = $(TARGET_MAC_OS_X_VERSION_MAJOR$(MACOSX_DEPLOYMENT_TARGET:suffix:identifier));
|
||||
TARGET_MAC_OS_X_VERSION_MAJOR_12 = 101200;
|
||||
TARGET_MAC_OS_X_VERSION_MAJOR_13 = 101300;
|
||||
TARGET_MAC_OS_X_VERSION_MAJOR_14 = 101400;
|
||||
TARGET_MAC_OS_X_VERSION_MAJOR_15 = 101500;
|
||||
TARGET_MAC_OS_X_VERSION_MAJOR_16 = 101600;
|
||||
TARGET_MACOS_LEGACY_VERSION_IDENTIFIER = $(TARGET_MACOS_LEGACY_VERSION_IDENTIFIER_$(MACOSX_DEPLOYMENT_TARGET:base))
|
||||
TARGET_MACOS_LEGACY_VERSION_IDENTIFIER_10 = 10$(MACOSX_DEPLOYMENT_TARGET:suffix:identifier)
|
||||
|
||||
TARGET_MAC_OS_X_VERSION_MAJOR = $(TARGET_MAC_OS_X_VERSION_MAJOR_$(TARGET_MACOS_LEGACY_VERSION_IDENTIFIER))
|
||||
TARGET_MAC_OS_X_VERSION_MAJOR_ = $(MACOSX_DEPLOYMENT_TARGET:base:base)0000
|
||||
TARGET_MAC_OS_X_VERSION_MAJOR_10_13 = 101300
|
||||
TARGET_MAC_OS_X_VERSION_MAJOR_10_14 = 101400
|
||||
TARGET_MAC_OS_X_VERSION_MAJOR_10_15 = 101500
|
||||
|
||||
SUPPORTED_PLATFORMS = iphoneos iphonesimulator macosx appletvos appletvsimulator watchos watchsimulator;
|
||||
SUPPORTS_MACCATALYST = YES;
|
||||
|
||||
// DEBUG_DEFINES, GCC_OPTIMIZATION_LEVEL, STRIP_INSTALLED_PRODUCT and DEAD_CODE_STRIPPING vary between the debug and normal variants.
|
||||
// We set up the values for each variant here, and have the Debug configuration in the Xcode project use the _debug variant.
|
||||
DEBUG_DEFINES_debug = ;
|
||||
DEBUG_DEFINES_normal = NDEBUG;
|
||||
DEBUG_DEFINES = $(DEBUG_DEFINES_$(CURRENT_VARIANT));
|
||||
DEAD_CODE_STRIPPING = YES;
|
||||
DEAD_CODE_STRIPPING[config=Debug] = NO;
|
||||
|
||||
GCC_OPTIMIZATION_LEVEL = $(GCC_OPTIMIZATION_LEVEL_$(CURRENT_VARIANT));
|
||||
GCC_OPTIMIZATION_LEVEL_normal = 3;
|
||||
GCC_OPTIMIZATION_LEVEL_debug = 0;
|
||||
DEBUG_DEFINES = NDEBUG;
|
||||
DEBUG_DEFINES[config=Debug] = ;
|
||||
|
||||
STRIP_INSTALLED_PRODUCT = $(STRIP_INSTALLED_PRODUCT_$(CURRENT_VARIANT));
|
||||
STRIP_INSTALLED_PRODUCT_normal = YES;
|
||||
STRIP_INSTALLED_PRODUCT_debug = NO;
|
||||
GCC_OPTIMIZATION_LEVEL = 3;
|
||||
GCC_OPTIMIZATION_LEVEL[config=Debug] = 0;
|
||||
|
||||
DEAD_CODE_STRIPPING_debug = NO;
|
||||
DEAD_CODE_STRIPPING_normal = YES;
|
||||
DEAD_CODE_STRIPPING = $(DEAD_CODE_STRIPPING_$(CURRENT_VARIANT));
|
||||
STRIP_INSTALLED_PRODUCT = YES;
|
||||
STRIP_INSTALLED_PRODUCT[config=Debug] = NO;
|
||||
|
||||
SDKROOT = macosx.internal;
|
||||
|
||||
OTHER_CFLAGS = $(ASAN_OTHER_CFLAGS);
|
||||
OTHER_CPLUSPLUSFLAGS = $(ASAN_OTHER_CPLUSPLUSFLAGS);
|
||||
OTHER_LDFLAGS = $(ASAN_OTHER_LDFLAGS);
|
||||
|
||||
BMALLOC_INSTALL_PATH_PREFIX = $(BMALLOC_INSTALL_PATH_PREFIX_DEPLOYMENT_$(DEPLOYMENT_LOCATION));
|
||||
BMALLOC_INSTALL_PATH_PREFIX_DEPLOYMENT_YES = $(BMALLOC_INSTALL_PATH_PREFIX_DEPLOYMENT_YES_USE_ALTERNATE_$(WK_USE_ALTERNATE_FRAMEWORKS_DIR));
|
||||
BMALLOC_INSTALL_PATH_PREFIX_DEPLOYMENT_YES_USE_ALTERNATE_YES = $(WK_ALTERNATE_FRAMEWORKS_DIR)/;
|
||||
|
||||
// Disable on all platforms other than macOS, due to <rdar://problem/49013399>.
|
||||
LLVM_LTO = NO;
|
||||
LLVM_LTO[sdk=macosx*] = $(WK_LLVM_LTO_$(WK_XCODE_SUPPORTS_LTO));
|
||||
LLVM_LTO = $(WK_LLVM_LTO_$(WK_XCODE_SUPPORTS_LTO));
|
||||
WK_LLVM_LTO_NO = NO;
|
||||
WK_LLVM_LTO_YES = $(WK_USER_LTO_MODE);
|
||||
|
||||
WK_XCODE_SUPPORTS_LTO = $(WK_NOT_$(WK_XCODE_VERSION_BEFORE_10_2_$(XCODE_VERSION_MAJOR)));
|
||||
WK_XCODE_VERSION_BEFORE_10_2_0700 = YES;
|
||||
WK_XCODE_SUPPORTS_LTO[arch=arm*] = $(WK_NOT_$(WK_XCODE_VERSION_BEFORE_11_4_$(XCODE_VERSION_MAJOR)));
|
||||
|
||||
WK_XCODE_VERSION_BEFORE_10_2_0800 = YES;
|
||||
WK_XCODE_VERSION_BEFORE_10_2_0900 = YES;
|
||||
WK_XCODE_VERSION_BEFORE_10_2_1000 = $(WK_XCODE_VERSION_BEFORE_10_2_1000_$(XCODE_VERSION_MINOR));
|
||||
WK_XCODE_VERSION_BEFORE_10_2_1000_1000 = YES;
|
||||
WK_XCODE_VERSION_BEFORE_10_2_1000_1010 = YES;
|
||||
|
||||
WK_XCODE_VERSION_BEFORE_11_4_0800 = YES;
|
||||
WK_XCODE_VERSION_BEFORE_11_4_0900 = YES;
|
||||
WK_XCODE_VERSION_BEFORE_11_4_1000 = YES;
|
||||
WK_XCODE_VERSION_BEFORE_11_4_1100 = $(WK_XCODE_VERSION_BEFORE_11_4_1100_$(XCODE_VERSION_MINOR));
|
||||
WK_XCODE_VERSION_BEFORE_11_4_1100_1100 = YES;
|
||||
WK_XCODE_VERSION_BEFORE_11_4_1100_1110 = YES;
|
||||
WK_XCODE_VERSION_BEFORE_11_4_1100_1120 = YES;
|
||||
WK_XCODE_VERSION_BEFORE_11_4_1100_1130 = YES;
|
||||
|
||||
WK_USER_LTO_MODE = $(WK_USER_LTO_MODE_$(WK_LTO_MODE));
|
||||
WK_USER_LTO_MODE_full = YES;
|
||||
WK_USER_LTO_MODE_thin = YES_THIN;
|
||||
|
@ -27,14 +27,26 @@
|
||||
ARCHS = $(ARCHS_STANDARD_32_64_BIT);
|
||||
ONLY_ACTIVE_ARCH = YES;
|
||||
|
||||
TARGET_MAC_OS_X_VERSION_MAJOR = $(MAC_OS_X_VERSION_MAJOR);
|
||||
// FIXME: Once <rdar://problem/70185899> is fixed, replace the following with
|
||||
// TARGET_MAC_OS_X_VERSION_MAJOR = $(MAC_OS_X_VERSION_MAJOR)
|
||||
TARGET_MACOS_11_VERSION_MAJOR = $(TARGET_MACOS_11_VERSION_MAJOR_$(MAC_OS_X_VERSION_MAJOR))
|
||||
TARGET_MACOS_11_VERSION_MAJOR_110100 = 110000
|
||||
TARGET_MACOS_11_VERSION_MAJOR_110200 = 110000
|
||||
TARGET_MACOS_11_VERSION_MAJOR_110300 = 110000
|
||||
TARGET_MACOS_11_VERSION_MAJOR_110400 = 110000
|
||||
TARGET_MACOS_11_VERSION_MAJOR_110500 = 110000
|
||||
TARGET_MACOS_11_VERSION_MAJOR_110600 = 110000
|
||||
|
||||
MACOSX_DEPLOYMENT_TARGET = $(MACOSX_DEPLOYMENT_TARGET_$(PLATFORM_NAME)_$(TARGET_MAC_OS_X_VERSION_MAJOR));
|
||||
MACOSX_DEPLOYMENT_TARGET_macosx_101200 = 10.12;
|
||||
MACOSX_DEPLOYMENT_TARGET_macosx_101300 = 10.13;
|
||||
MACOSX_DEPLOYMENT_TARGET_macosx_101400 = 10.14;
|
||||
MACOSX_DEPLOYMENT_TARGET_macosx_101500 = 10.15;
|
||||
MACOSX_DEPLOYMENT_TARGET_macosx_101600 = 10.16;
|
||||
TARGET_MAC_OS_X_VERSION_MAJOR = $(TARGET_MAC_OS_X_VERSION_MAJOR_$(TARGET_MACOS_11_VERSION_MAJOR))
|
||||
TARGET_MAC_OS_X_VERSION_MAJOR_110000 = 110000
|
||||
TARGET_MAC_OS_X_VERSION_MAJOR_ = $(MAC_OS_X_VERSION_MAJOR)
|
||||
|
||||
MACOSX_DEPLOYMENT_TARGET = $(MACOSX_DEPLOYMENT_TARGET_$(TARGET_MAC_OS_X_VERSION_MAJOR));
|
||||
MACOSX_DEPLOYMENT_TARGET_101300 = 10.13;
|
||||
MACOSX_DEPLOYMENT_TARGET_101400 = 10.14;
|
||||
MACOSX_DEPLOYMENT_TARGET_101500 = 10.15;
|
||||
MACOSX_DEPLOYMENT_TARGET_110000 = 11.0;
|
||||
MACOSX_DEPLOYMENT_TARGET_120000 = 12.0;
|
||||
|
||||
GCC_WARN_ABOUT_DEPRECATED_FUNCTIONS = YES;
|
||||
DEBUG_INFORMATION_FORMAT = dwarf;
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright (C) 2019 Apple Inc. All rights reserved.
|
||||
// Copyright (C) 2019-2020 Apple Inc. All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions
|
||||
@ -25,6 +25,10 @@ WK_EMPTY_ = YES;
|
||||
WK_NOT_ = YES;
|
||||
WK_NOT_YES = NO;
|
||||
|
||||
WK_DEFAULT_PLATFORM_NAME = $(WK_DEFAULT_PLATFORM_NAME_$(WK_EMPTY_$(FALLBACK_PLATFORM)));
|
||||
WK_DEFAULT_PLATFORM_NAME_YES = $(PLATFORM_NAME);
|
||||
WK_DEFAULT_PLATFORM_NAME_ = $(FALLBACK_PLATFORM);
|
||||
|
||||
WK_ALTERNATE_FRAMEWORKS_DIR = $(WK_ALTERNATE_FRAMEWORKS_DIR_$(SDK_VARIANT));
|
||||
WK_ALTERNATE_FRAMEWORKS_DIR_iosmac = /System/iOSSupport;
|
||||
WK_USE_ALTERNATE_FRAMEWORKS_DIR = $(WK_NOT_$(WK_EMPTY_$(WK_ALTERNATE_FRAMEWORKS_DIR)));
|
||||
@ -38,7 +42,7 @@ WK_ALTERNATE_WEBKIT_SDK_PATH_YES = $(WK_ALTERNATE_FRAMEWORKS_DIR)/;
|
||||
|
||||
WK_PLATFORM_NAME = $(WK_PLATFORM_NAME_ALTERNATE_$(WK_USE_ALTERNATE_PLATFORM_NAME));
|
||||
WK_PLATFORM_NAME_ALTERNATE_YES = $(WK_ALTERNATE_PLATFORM_NAME);
|
||||
WK_PLATFORM_NAME_ALTERNATE_NO = $(PLATFORM_NAME);
|
||||
WK_PLATFORM_NAME_ALTERNATE_NO = $(WK_DEFAULT_PLATFORM_NAME);
|
||||
|
||||
EFFECTIVE_PLATFORM_NAME = $(EFFECTIVE_PLATFORM_NAME_ALTERNATE_$(WK_USE_ALTERNATE_PLATFORM_NAME));
|
||||
EFFECTIVE_PLATFORM_NAME_ALTERNATE_YES = -$(WK_ALTERNATE_PLATFORM_NAME);
|
||||
|
@ -5,6 +5,5 @@ list(APPEND bmalloc_SOURCES
|
||||
)
|
||||
|
||||
list(APPEND bmalloc_PUBLIC_HEADERS
|
||||
bmalloc/darwin/BSoftLinking.h
|
||||
bmalloc/darwin/MemoryStatusSPI.h
|
||||
)
|
||||
|
9
README.md
Normal file
9
README.md
Normal file
@ -0,0 +1,9 @@
|
||||
# darling-bmalloc
|
||||
|
||||
## Upstream URL
|
||||
|
||||
https://github.com/apple-oss-distributions/bmalloc/tree/bmalloc-7611.3.10.1.3
|
||||
|
||||
## Updating Source
|
||||
|
||||
When updating the source code, make sure to refer to the [`Updating Sources`](https://docs.darlinghq.org/contributing/updating-sources/index.html#updating-sources) section from the Darling Docs website. Additional details (if any) can be found in [`darling/notes/UPDATE_SOURCE.md`](darling/notes/UPDATE_SOURCE.md).
|
@ -78,7 +78,6 @@
|
||||
0F7EB85A1F955A1100F1ABCB /* DeferredDecommitInlines.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F7EB8591F955A0F00F1ABCB /* DeferredDecommitInlines.h */; settings = {ATTRIBUTES = (Private, ); }; };
|
||||
0FD557331F7EDB7B00B1F0A3 /* HeapKind.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0FD557321F7EDB7B00B1F0A3 /* HeapKind.cpp */; };
|
||||
1400274918F89C1300115C97 /* Heap.h in Headers */ = {isa = PBXBuildFile; fileRef = 14DA320C18875B09007269E0 /* Heap.h */; settings = {ATTRIBUTES = (Private, ); }; };
|
||||
1400274A18F89C2300115C97 /* VMHeap.h in Headers */ = {isa = PBXBuildFile; fileRef = 144F7BFC18BFC517003537F3 /* VMHeap.h */; settings = {ATTRIBUTES = (Private, ); }; };
|
||||
140FA00319CE429C00FFD3C8 /* BumpRange.h in Headers */ = {isa = PBXBuildFile; fileRef = 140FA00219CE429C00FFD3C8 /* BumpRange.h */; settings = {ATTRIBUTES = (Private, ); }; };
|
||||
140FA00519CE4B6800FFD3C8 /* LineMetadata.h in Headers */ = {isa = PBXBuildFile; fileRef = 140FA00419CE4B6800FFD3C8 /* LineMetadata.h */; settings = {ATTRIBUTES = (Private, ); }; };
|
||||
141D9B001C8E51C0000ABBA0 /* List.h in Headers */ = {isa = PBXBuildFile; fileRef = 141D9AFF1C8E51C0000ABBA0 /* List.h */; settings = {ATTRIBUTES = (Private, ); }; };
|
||||
@ -125,12 +124,9 @@
|
||||
14F271C518EA397E008C152F /* Deallocator.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 145F6859179DC90200D65598 /* Deallocator.cpp */; };
|
||||
14F271C718EA3990008C152F /* Heap.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 14DA320E18875D9F007269E0 /* Heap.cpp */; };
|
||||
14F271C818EA3990008C152F /* ObjectType.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 14105E8318E14374003A106E /* ObjectType.cpp */; };
|
||||
14F271C918EA3990008C152F /* VMHeap.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 144F7BFB18BFC517003537F3 /* VMHeap.cpp */; };
|
||||
4426E2801C838EE0008EB042 /* Logging.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 4426E27E1C838EE0008EB042 /* Logging.cpp */; };
|
||||
4426E2811C838EE0008EB042 /* Logging.h in Headers */ = {isa = PBXBuildFile; fileRef = 4426E27F1C838EE0008EB042 /* Logging.h */; settings = {ATTRIBUTES = (Private, ); }; };
|
||||
4426E2831C839547008EB042 /* BSoftLinking.h in Headers */ = {isa = PBXBuildFile; fileRef = 4426E2821C839547008EB042 /* BSoftLinking.h */; };
|
||||
52F47249210BA30200B730BB /* MemoryStatusSPI.h in Headers */ = {isa = PBXBuildFile; fileRef = 52F47248210BA2F500B730BB /* MemoryStatusSPI.h */; settings = {ATTRIBUTES = (Private, ); }; };
|
||||
6543DDB420EEAEF3003B23D8 /* PerThread.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 6543DDB320EEAEF3003B23D8 /* PerThread.cpp */; };
|
||||
6599C5CC1EC3F15900A2F7BB /* AvailableMemory.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 6599C5CA1EC3F15900A2F7BB /* AvailableMemory.cpp */; };
|
||||
6599C5CD1EC3F15900A2F7BB /* AvailableMemory.h in Headers */ = {isa = PBXBuildFile; fileRef = 6599C5CB1EC3F15900A2F7BB /* AvailableMemory.h */; settings = {ATTRIBUTES = (Private, ); }; };
|
||||
7939885B2076EEB60074A2E7 /* BulkDecommit.h in Headers */ = {isa = PBXBuildFile; fileRef = 7939885A2076EEB50074A2E7 /* BulkDecommit.h */; settings = {ATTRIBUTES = (Private, ); }; };
|
||||
@ -141,6 +137,9 @@
|
||||
AD14AD2A202529C700890E3B /* ProcessCheck.mm in Sources */ = {isa = PBXBuildFile; fileRef = AD14AD28202529B000890E3B /* ProcessCheck.mm */; };
|
||||
DE8B13B321CC5D9F00A63FCD /* BVMTags.h in Headers */ = {isa = PBXBuildFile; fileRef = DE8B13B221CC5D9F00A63FCD /* BVMTags.h */; settings = {ATTRIBUTES = (Private, ); }; };
|
||||
E31E74802238CA5C005D084A /* StaticPerProcess.h in Headers */ = {isa = PBXBuildFile; fileRef = E31E747F2238CA5B005D084A /* StaticPerProcess.h */; settings = {ATTRIBUTES = (Private, ); }; };
|
||||
E328D84D23CEB38900545B18 /* Packed.h in Headers */ = {isa = PBXBuildFile; fileRef = E328D84C23CEB38900545B18 /* Packed.h */; settings = {ATTRIBUTES = (Private, ); }; };
|
||||
E378A9DF246B68720029C2BB /* ObjectTypeTable.cpp in Sources */ = {isa = PBXBuildFile; fileRef = E378A9DE246B686A0029C2BB /* ObjectTypeTable.cpp */; };
|
||||
E378A9E0246B68750029C2BB /* ObjectTypeTable.h in Headers */ = {isa = PBXBuildFile; fileRef = E378A9DD246B686A0029C2BB /* ObjectTypeTable.h */; settings = {ATTRIBUTES = (Private, ); }; };
|
||||
E3A413C9226061140037F470 /* IsoSharedPageInlines.h in Headers */ = {isa = PBXBuildFile; fileRef = E3A413C8226061140037F470 /* IsoSharedPageInlines.h */; settings = {ATTRIBUTES = (Private, ); }; };
|
||||
E3F24402225D2C0100A0E0C3 /* IsoSharedPage.h in Headers */ = {isa = PBXBuildFile; fileRef = E3F24401225D2C0100A0E0C3 /* IsoSharedPage.h */; settings = {ATTRIBUTES = (Private, ); }; };
|
||||
E3F24404225D2C7600A0E0C3 /* IsoSharedPage.cpp in Sources */ = {isa = PBXBuildFile; fileRef = E3F24403225D2C7600A0E0C3 /* IsoSharedPage.cpp */; };
|
||||
@ -148,6 +147,11 @@
|
||||
E3FBB5A1225EADB000DB6FBD /* IsoSharedHeap.cpp in Sources */ = {isa = PBXBuildFile; fileRef = E3FBB59E225EADB000DB6FBD /* IsoSharedHeap.cpp */; };
|
||||
E3FBB5A2225EADB000DB6FBD /* IsoSharedHeap.h in Headers */ = {isa = PBXBuildFile; fileRef = E3FBB59F225EADB000DB6FBD /* IsoSharedHeap.h */; settings = {ATTRIBUTES = (Private, ); }; };
|
||||
E3FBB5A4225ECAD200DB6FBD /* IsoSharedHeapInlines.h in Headers */ = {isa = PBXBuildFile; fileRef = E3FBB5A3225ECAD200DB6FBD /* IsoSharedHeapInlines.h */; settings = {ATTRIBUTES = (Private, ); }; };
|
||||
EB17D11123BFCD42002093A7 /* HeapConstants.cpp in Sources */ = {isa = PBXBuildFile; fileRef = EB17D11023BFC8C4002093A7 /* HeapConstants.cpp */; };
|
||||
EB17D11223BFCD7A002093A7 /* HeapConstants.h in Headers */ = {isa = PBXBuildFile; fileRef = EB17D10E23BE691D002093A7 /* HeapConstants.h */; settings = {ATTRIBUTES = (Private, ); }; };
|
||||
FE48BD3B2321E8D700F136D0 /* FailureAction.h in Headers */ = {isa = PBXBuildFile; fileRef = FE48BD3A2321E8CC00F136D0 /* FailureAction.h */; settings = {ATTRIBUTES = (Private, ); }; };
|
||||
FEC3A39324846A8100395B54 /* GigacageConfig.h in Headers */ = {isa = PBXBuildFile; fileRef = FEC3A39224846A6D00395B54 /* GigacageConfig.h */; settings = {ATTRIBUTES = (Private, ); }; };
|
||||
FEC3A395248471FE00395B54 /* GigacageKind.h in Headers */ = {isa = PBXBuildFile; fileRef = FEC3A394248471FE00395B54 /* GigacageKind.h */; settings = {ATTRIBUTES = (Private, ); }; };
|
||||
/* End PBXBuildFile section */
|
||||
|
||||
/* Begin PBXContainerItemProxy section */
|
||||
@ -250,8 +254,6 @@
|
||||
144BE11E1CA346520099C8C0 /* Object.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = Object.h; path = bmalloc/Object.h; sourceTree = "<group>"; };
|
||||
144C07F21C7B70260051BB6A /* LargeMap.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = LargeMap.cpp; path = bmalloc/LargeMap.cpp; sourceTree = "<group>"; };
|
||||
144C07F31C7B70260051BB6A /* LargeMap.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = LargeMap.h; path = bmalloc/LargeMap.h; sourceTree = "<group>"; };
|
||||
144F7BFB18BFC517003537F3 /* VMHeap.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = VMHeap.cpp; path = bmalloc/VMHeap.cpp; sourceTree = "<group>"; };
|
||||
144F7BFC18BFC517003537F3 /* VMHeap.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = VMHeap.h; path = bmalloc/VMHeap.h; sourceTree = "<group>"; };
|
||||
1452478618BC757C00F80098 /* SmallLine.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = SmallLine.h; path = bmalloc/SmallLine.h; sourceTree = "<group>"; };
|
||||
145F6855179DC8CA00D65598 /* Allocator.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; lineEnding = 0; name = Allocator.cpp; path = bmalloc/Allocator.cpp; sourceTree = "<group>"; xcLanguageSpecificationIdentifier = xcode.lang.cpp; };
|
||||
145F6856179DC8CA00D65598 /* Allocator.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; lineEnding = 0; name = Allocator.h; path = bmalloc/Allocator.h; sourceTree = "<group>"; xcLanguageSpecificationIdentifier = xcode.lang.objcpp; };
|
||||
@ -280,9 +282,7 @@
|
||||
14F271BE18EA3963008C152F /* libbmalloc.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libbmalloc.a; sourceTree = BUILT_PRODUCTS_DIR; };
|
||||
4426E27E1C838EE0008EB042 /* Logging.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = Logging.cpp; path = bmalloc/Logging.cpp; sourceTree = "<group>"; };
|
||||
4426E27F1C838EE0008EB042 /* Logging.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = Logging.h; path = bmalloc/Logging.h; sourceTree = "<group>"; };
|
||||
4426E2821C839547008EB042 /* BSoftLinking.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = BSoftLinking.h; path = bmalloc/darwin/BSoftLinking.h; sourceTree = "<group>"; };
|
||||
52F47248210BA2F500B730BB /* MemoryStatusSPI.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = MemoryStatusSPI.h; path = bmalloc/darwin/MemoryStatusSPI.h; sourceTree = "<group>"; };
|
||||
6543DDB320EEAEF3003B23D8 /* PerThread.cpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.cpp; name = PerThread.cpp; path = bmalloc/PerThread.cpp; sourceTree = "<group>"; };
|
||||
6599C5CA1EC3F15900A2F7BB /* AvailableMemory.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = AvailableMemory.cpp; path = bmalloc/AvailableMemory.cpp; sourceTree = "<group>"; };
|
||||
6599C5CB1EC3F15900A2F7BB /* AvailableMemory.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = AvailableMemory.h; path = bmalloc/AvailableMemory.h; sourceTree = "<group>"; };
|
||||
7939885A2076EEB50074A2E7 /* BulkDecommit.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = BulkDecommit.h; path = bmalloc/BulkDecommit.h; sourceTree = "<group>"; };
|
||||
@ -294,6 +294,9 @@
|
||||
AD14AD28202529B000890E3B /* ProcessCheck.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; name = ProcessCheck.mm; path = bmalloc/ProcessCheck.mm; sourceTree = "<group>"; };
|
||||
DE8B13B221CC5D9F00A63FCD /* BVMTags.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; name = BVMTags.h; path = bmalloc/BVMTags.h; sourceTree = "<group>"; };
|
||||
E31E747F2238CA5B005D084A /* StaticPerProcess.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = StaticPerProcess.h; path = bmalloc/StaticPerProcess.h; sourceTree = "<group>"; };
|
||||
E328D84C23CEB38900545B18 /* Packed.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = Packed.h; path = bmalloc/Packed.h; sourceTree = "<group>"; };
|
||||
E378A9DD246B686A0029C2BB /* ObjectTypeTable.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; name = ObjectTypeTable.h; path = bmalloc/ObjectTypeTable.h; sourceTree = "<group>"; };
|
||||
E378A9DE246B686A0029C2BB /* ObjectTypeTable.cpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.cpp; name = ObjectTypeTable.cpp; path = bmalloc/ObjectTypeTable.cpp; sourceTree = "<group>"; };
|
||||
E3A413C8226061140037F470 /* IsoSharedPageInlines.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = IsoSharedPageInlines.h; path = bmalloc/IsoSharedPageInlines.h; sourceTree = "<group>"; };
|
||||
E3F24401225D2C0100A0E0C3 /* IsoSharedPage.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = IsoSharedPage.h; path = bmalloc/IsoSharedPage.h; sourceTree = "<group>"; };
|
||||
E3F24403225D2C7600A0E0C3 /* IsoSharedPage.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = IsoSharedPage.cpp; path = bmalloc/IsoSharedPage.cpp; sourceTree = "<group>"; };
|
||||
@ -301,6 +304,11 @@
|
||||
E3FBB59E225EADB000DB6FBD /* IsoSharedHeap.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = IsoSharedHeap.cpp; path = bmalloc/IsoSharedHeap.cpp; sourceTree = "<group>"; };
|
||||
E3FBB59F225EADB000DB6FBD /* IsoSharedHeap.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = IsoSharedHeap.h; path = bmalloc/IsoSharedHeap.h; sourceTree = "<group>"; };
|
||||
E3FBB5A3225ECAD200DB6FBD /* IsoSharedHeapInlines.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = IsoSharedHeapInlines.h; path = bmalloc/IsoSharedHeapInlines.h; sourceTree = "<group>"; };
|
||||
EB17D10E23BE691D002093A7 /* HeapConstants.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = HeapConstants.h; path = bmalloc/HeapConstants.h; sourceTree = "<group>"; };
|
||||
EB17D11023BFC8C4002093A7 /* HeapConstants.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = HeapConstants.cpp; path = bmalloc/HeapConstants.cpp; sourceTree = "<group>"; };
|
||||
FE48BD3A2321E8CC00F136D0 /* FailureAction.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = FailureAction.h; path = bmalloc/FailureAction.h; sourceTree = "<group>"; };
|
||||
FEC3A39224846A6D00395B54 /* GigacageConfig.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = GigacageConfig.h; path = bmalloc/GigacageConfig.h; sourceTree = "<group>"; };
|
||||
FEC3A394248471FE00395B54 /* GigacageKind.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = GigacageKind.h; path = bmalloc/GigacageKind.h; sourceTree = "<group>"; };
|
||||
/* End PBXFileReference section */
|
||||
|
||||
/* Begin PBXFrameworksBuildPhase section */
|
||||
@ -477,22 +485,27 @@
|
||||
142B44351E2839E7001DA6E9 /* DebugHeap.h */,
|
||||
14895D8F1A3A319C0006235D /* Environment.cpp */,
|
||||
14895D901A3A319C0006235D /* Environment.h */,
|
||||
FE48BD3A2321E8CC00F136D0 /* FailureAction.h */,
|
||||
0F5BF14E1F22DEAF0029D91D /* Gigacage.cpp */,
|
||||
0F5BF14C1F22B0C30029D91D /* Gigacage.h */,
|
||||
FEC3A39224846A6D00395B54 /* GigacageConfig.h */,
|
||||
FEC3A394248471FE00395B54 /* GigacageKind.h */,
|
||||
14DA320E18875D9F007269E0 /* Heap.cpp */,
|
||||
14DA320C18875B09007269E0 /* Heap.h */,
|
||||
EB17D11023BFC8C4002093A7 /* HeapConstants.cpp */,
|
||||
EB17D10E23BE691D002093A7 /* HeapConstants.h */,
|
||||
140FA00419CE4B6800FFD3C8 /* LineMetadata.h */,
|
||||
144BE11E1CA346520099C8C0 /* Object.h */,
|
||||
14105E8318E14374003A106E /* ObjectType.cpp */,
|
||||
1485656018A43DBA00ED6942 /* ObjectType.h */,
|
||||
E378A9DE246B686A0029C2BB /* ObjectTypeTable.cpp */,
|
||||
E378A9DD246B686A0029C2BB /* ObjectTypeTable.h */,
|
||||
795AB3C6206E0D250074FE76 /* PhysicalPageMap.h */,
|
||||
AD14AD27202529A600890E3B /* ProcessCheck.h */,
|
||||
AD14AD28202529B000890E3B /* ProcessCheck.mm */,
|
||||
0F5BF1501F22E1570029D91D /* Scavenger.cpp */,
|
||||
0F5BF1511F22E1570029D91D /* Scavenger.h */,
|
||||
145F6874179DF84100D65598 /* Sizes.h */,
|
||||
144F7BFB18BFC517003537F3 /* VMHeap.cpp */,
|
||||
144F7BFC18BFC517003537F3 /* VMHeap.h */,
|
||||
1440AFCC1A9527AF00837FAA /* Zone.cpp */,
|
||||
1440AFCA1A95261100837FAA /* Zone.h */,
|
||||
);
|
||||
@ -525,10 +538,10 @@
|
||||
14C8992A1CC485E70027A057 /* Map.h */,
|
||||
143CB81A19022BC900B16A45 /* Mutex.cpp */,
|
||||
143CB81B19022BC900B16A45 /* Mutex.h */,
|
||||
E328D84C23CEB38900545B18 /* Packed.h */,
|
||||
0F5BF1481F22A8D80029D91D /* PerHeapKind.h */,
|
||||
0F26A7A42054830D0090A141 /* PerProcess.cpp */,
|
||||
14446A0717A61FA400F9EA1D /* PerProcess.h */,
|
||||
6543DDB320EEAEF3003B23D8 /* PerThread.cpp */,
|
||||
144469FD17A61F1F00F9EA1D /* PerThread.h */,
|
||||
145F6878179E3A4400D65598 /* Range.h */,
|
||||
148EFAE61D6B953B008E721E /* ScopeExit.h */,
|
||||
@ -544,7 +557,6 @@
|
||||
4408F2961C9896C40012EC64 /* darwin */ = {
|
||||
isa = PBXGroup;
|
||||
children = (
|
||||
4426E2821C839547008EB042 /* BSoftLinking.h */,
|
||||
52F47248210BA2F500B730BB /* MemoryStatusSPI.h */,
|
||||
);
|
||||
name = darwin;
|
||||
@ -577,7 +589,6 @@
|
||||
1448C30118F3754C00502839 /* bmalloc.h in Headers */,
|
||||
0F7EB84D1F9541C700F1ABCB /* BMalloced.h in Headers */,
|
||||
14C919C918FCC59F0028DB43 /* BPlatform.h in Headers */,
|
||||
4426E2831C839547008EB042 /* BSoftLinking.h in Headers */,
|
||||
7939885B2076EEB60074A2E7 /* BulkDecommit.h in Headers */,
|
||||
14DD789C18F48D4A00950702 /* BumpAllocator.h in Headers */,
|
||||
140FA00319CE429C00FFD3C8 /* BumpRange.h in Headers */,
|
||||
@ -594,11 +605,15 @@
|
||||
0F7EB8231F9541B000F1ABCB /* EligibilityResult.h in Headers */,
|
||||
0F7EB8381F9541B000F1ABCB /* EligibilityResultInlines.h in Headers */,
|
||||
14895D921A3A319C0006235D /* Environment.h in Headers */,
|
||||
FE48BD3B2321E8D700F136D0 /* FailureAction.h in Headers */,
|
||||
14DD78C818F48D7500950702 /* FixedVector.h in Headers */,
|
||||
0F7EB8441F9541B000F1ABCB /* FreeList.h in Headers */,
|
||||
0F7EB8291F9541B000F1ABCB /* FreeListInlines.h in Headers */,
|
||||
0F5BF14D1F22B0C30029D91D /* Gigacage.h in Headers */,
|
||||
FEC3A39324846A8100395B54 /* GigacageConfig.h in Headers */,
|
||||
FEC3A395248471FE00395B54 /* GigacageKind.h in Headers */,
|
||||
1400274918F89C1300115C97 /* Heap.h in Headers */,
|
||||
EB17D11223BFCD7A002093A7 /* HeapConstants.h in Headers */,
|
||||
0F5BF1471F22A8B10029D91D /* HeapKind.h in Headers */,
|
||||
0F7EB83C1F9541B000F1ABCB /* IsoAllocator.h in Headers */,
|
||||
0F7EB8261F9541B000F1ABCB /* IsoAllocatorInlines.h in Headers */,
|
||||
@ -640,6 +655,8 @@
|
||||
143CB81D19022BC900B16A45 /* Mutex.h in Headers */,
|
||||
144BE11F1CA346520099C8C0 /* Object.h in Headers */,
|
||||
14DD789318F48D0F00950702 /* ObjectType.h in Headers */,
|
||||
E378A9E0246B68750029C2BB /* ObjectTypeTable.h in Headers */,
|
||||
E328D84D23CEB38900545B18 /* Packed.h in Headers */,
|
||||
0F5BF1491F22A8D80029D91D /* PerHeapKind.h in Headers */,
|
||||
14DD78CB18F48D7500950702 /* PerProcess.h in Headers */,
|
||||
14DD78CC18F48D7500950702 /* PerThread.h in Headers */,
|
||||
@ -656,7 +673,6 @@
|
||||
14DD78CE18F48D7500950702 /* Syscall.h in Headers */,
|
||||
14DD78CF18F48D7500950702 /* Vector.h in Headers */,
|
||||
14DD78D018F48D7500950702 /* VMAllocate.h in Headers */,
|
||||
1400274A18F89C2300115C97 /* VMHeap.h in Headers */,
|
||||
1440AFCB1A95261100837FAA /* Zone.h in Headers */,
|
||||
);
|
||||
runOnlyForDeploymentPostprocessing = 0;
|
||||
@ -706,7 +722,7 @@
|
||||
isa = PBXProject;
|
||||
attributes = {
|
||||
LastSwiftUpdateCheck = 0700;
|
||||
LastUpgradeCheck = 1000;
|
||||
LastUpgradeCheck = 1140;
|
||||
TargetAttributes = {
|
||||
0F7EB8501F95504B00F1ABCB = {
|
||||
CreatedOnToolsVersion = 9.0;
|
||||
@ -720,6 +736,7 @@
|
||||
hasScannedForEncodings = 0;
|
||||
knownRegions = (
|
||||
en,
|
||||
Base,
|
||||
);
|
||||
mainGroup = 145F6836179DC45F00D65598;
|
||||
productRefGroup = 145F6840179DC45F00D65598 /* Products */;
|
||||
@ -758,6 +775,7 @@
|
||||
0F7EB83E1F9541B000F1ABCB /* FreeList.cpp in Sources */,
|
||||
0F5BF14F1F22DEAF0029D91D /* Gigacage.cpp in Sources */,
|
||||
14F271C718EA3990008C152F /* Heap.cpp in Sources */,
|
||||
EB17D11123BFCD42002093A7 /* HeapConstants.cpp in Sources */,
|
||||
0FD557331F7EDB7B00B1F0A3 /* HeapKind.cpp in Sources */,
|
||||
0F7EB83B1F9541B000F1ABCB /* IsoHeapImpl.cpp in Sources */,
|
||||
0F5549EF1FB54704007FF75A /* IsoPage.cpp in Sources */,
|
||||
@ -770,11 +788,10 @@
|
||||
4426E2801C838EE0008EB042 /* Logging.cpp in Sources */,
|
||||
143CB81C19022BC900B16A45 /* Mutex.cpp in Sources */,
|
||||
14F271C818EA3990008C152F /* ObjectType.cpp in Sources */,
|
||||
E378A9DF246B68720029C2BB /* ObjectTypeTable.cpp in Sources */,
|
||||
0F26A7A5205483130090A141 /* PerProcess.cpp in Sources */,
|
||||
6543DDB420EEAEF3003B23D8 /* PerThread.cpp in Sources */,
|
||||
AD14AD2A202529C700890E3B /* ProcessCheck.mm in Sources */,
|
||||
0F5BF1521F22E1570029D91D /* Scavenger.cpp in Sources */,
|
||||
14F271C918EA3990008C152F /* VMHeap.cpp in Sources */,
|
||||
1440AFCD1A9527AF00837FAA /* Zone.cpp in Sources */,
|
||||
);
|
||||
runOnlyForDeploymentPostprocessing = 0;
|
||||
@ -844,10 +861,6 @@
|
||||
isa = XCBuildConfiguration;
|
||||
baseConfigurationReference = 14B650C718F39F4800751968 /* DebugRelease.xcconfig */;
|
||||
buildSettings = {
|
||||
DEAD_CODE_STRIPPING = "$(DEAD_CODE_STRIPPING_debug)";
|
||||
DEBUG_DEFINES = "$(DEBUG_DEFINES_debug)";
|
||||
GCC_OPTIMIZATION_LEVEL = "$(GCC_OPTIMIZATION_LEVEL_debug)";
|
||||
STRIP_INSTALLED_PRODUCT = "$(STRIP_INSTALLED_PRODUCT_debug)";
|
||||
};
|
||||
name = Debug;
|
||||
};
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (C) 2014 Apple Inc. All rights reserved.
|
||||
* Copyright (C) 2014-2020 Apple Inc. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
@ -28,6 +28,7 @@
|
||||
|
||||
#include "BAssert.h"
|
||||
#include <algorithm>
|
||||
#include <climits>
|
||||
#include <cstdint>
|
||||
#include <cstddef>
|
||||
#include <limits>
|
||||
@ -129,7 +130,7 @@ template<typename T> inline void divideRoundingUp(T numerator, T denominator, T&
|
||||
quotient += 1;
|
||||
}
|
||||
|
||||
template<typename T> inline T divideRoundingUp(T numerator, T denominator)
|
||||
template<typename T> constexpr T divideRoundingUp(T numerator, T denominator)
|
||||
{
|
||||
return (numerator + denominator - 1) / denominator;
|
||||
}
|
||||
@ -175,24 +176,105 @@ constexpr unsigned long log2(unsigned long value)
|
||||
|
||||
#define BOFFSETOF(class, field) (reinterpret_cast<ptrdiff_t>(&(reinterpret_cast<class*>(0x4000)->field)) - 0x4000)
|
||||
|
||||
template <typename T>
|
||||
constexpr unsigned ctzConstexpr(T value)
|
||||
{
|
||||
constexpr unsigned bitSize = sizeof(T) * CHAR_BIT;
|
||||
|
||||
using UT = typename std::make_unsigned<T>::type;
|
||||
UT uValue = value;
|
||||
|
||||
unsigned zeroCount = 0;
|
||||
for (unsigned i = 0; i < bitSize; i++) {
|
||||
if (uValue & 1)
|
||||
break;
|
||||
|
||||
zeroCount++;
|
||||
uValue >>= 1;
|
||||
}
|
||||
return zeroCount;
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
bool findBitInWord(T word, size_t& index, size_t endIndex, bool value)
|
||||
inline unsigned ctz(T value)
|
||||
{
|
||||
constexpr unsigned bitSize = sizeof(T) * CHAR_BIT;
|
||||
|
||||
using UT = typename std::make_unsigned<T>::type;
|
||||
UT uValue = value;
|
||||
|
||||
#if BCOMPILER(GCC_COMPATIBLE)
|
||||
if (uValue)
|
||||
return __builtin_ctzll(uValue);
|
||||
return bitSize;
|
||||
#elif BCOMPILER(MSVC) && !BCPU(X86)
|
||||
unsigned long ret = 0;
|
||||
if (_BitScanForward64(&ret, uValue))
|
||||
return ret;
|
||||
return bitSize;
|
||||
#else
|
||||
UNUSED_PARAM(bitSize);
|
||||
UNUSED_PARAM(uValue);
|
||||
return ctzConstexpr(value);
|
||||
#endif
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
bool findBitInWord(T word, size_t& startOrResultIndex, size_t endIndex, bool value)
|
||||
{
|
||||
static_assert(std::is_unsigned<T>::value, "Type used in findBitInWord must be unsigned");
|
||||
constexpr size_t bitsInWord = sizeof(word) * 8;
|
||||
BASSERT(startOrResultIndex <= bitsInWord && endIndex <= bitsInWord);
|
||||
BUNUSED(bitsInWord);
|
||||
|
||||
size_t index = startOrResultIndex;
|
||||
word >>= index;
|
||||
|
||||
|
||||
#if BCOMPILER(GCC_COMPATIBLE) && (BCPU(X86_64) || BCPU(ARM64))
|
||||
// We should only use ctz() when we know that ctz() is implementated using
|
||||
// a fast hardware instruction. Otherwise, this will actually result in
|
||||
// worse performance.
|
||||
|
||||
word ^= (static_cast<T>(value) - 1);
|
||||
index += ctz(word);
|
||||
if (index < endIndex) {
|
||||
startOrResultIndex = index;
|
||||
return true;
|
||||
}
|
||||
#else
|
||||
while (index < endIndex) {
|
||||
if ((word & 1) == static_cast<T>(value))
|
||||
if ((word & 1) == static_cast<T>(value)) {
|
||||
startOrResultIndex = index;
|
||||
return true;
|
||||
}
|
||||
index++;
|
||||
word >>= 1;
|
||||
}
|
||||
|
||||
index = endIndex;
|
||||
#endif
|
||||
|
||||
startOrResultIndex = endIndex;
|
||||
return false;
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
constexpr unsigned getLSBSetNonZeroConstexpr(T t)
|
||||
{
|
||||
return ctzConstexpr(t);
|
||||
}
|
||||
|
||||
// From http://graphics.stanford.edu/~seander/bithacks.html#RoundUpPowerOf2
|
||||
constexpr uint32_t roundUpToPowerOfTwo(uint32_t v)
|
||||
{
|
||||
v--;
|
||||
v |= v >> 1;
|
||||
v |= v >> 2;
|
||||
v |= v >> 4;
|
||||
v |= v >> 8;
|
||||
v |= v >> 16;
|
||||
v++;
|
||||
return v;
|
||||
}
|
||||
|
||||
} // namespace bmalloc
|
||||
|
||||
#endif // Algorithm_h
|
||||
|
@ -29,20 +29,20 @@ namespace bmalloc {
|
||||
|
||||
DEFINE_STATIC_PER_PROCESS_STORAGE(AllIsoHeaps);
|
||||
|
||||
AllIsoHeaps::AllIsoHeaps(const std::lock_guard<Mutex>&)
|
||||
AllIsoHeaps::AllIsoHeaps(const LockHolder&)
|
||||
{
|
||||
}
|
||||
|
||||
void AllIsoHeaps::add(IsoHeapImplBase* heap)
|
||||
{
|
||||
std::lock_guard<Mutex> locker(mutex());
|
||||
LockHolder locker(mutex());
|
||||
heap->m_next = m_head;
|
||||
m_head = heap;
|
||||
}
|
||||
|
||||
IsoHeapImplBase* AllIsoHeaps::head()
|
||||
{
|
||||
std::lock_guard<Mutex> locker(mutex());
|
||||
LockHolder locker(mutex());
|
||||
return m_head;
|
||||
}
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (C) 2017 Apple Inc. All rights reserved.
|
||||
* Copyright (C) 2017-2019 Apple Inc. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
@ -31,9 +31,9 @@
|
||||
|
||||
namespace bmalloc {
|
||||
|
||||
class AllIsoHeaps : public StaticPerProcess<AllIsoHeaps> {
|
||||
class BEXPORT AllIsoHeaps : public StaticPerProcess<AllIsoHeaps> {
|
||||
public:
|
||||
AllIsoHeaps(const std::lock_guard<Mutex>&);
|
||||
AllIsoHeaps(const LockHolder&);
|
||||
|
||||
void add(IsoHeapImplBase*);
|
||||
IsoHeapImplBase* head();
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (C) 2014-2018 Apple Inc. All rights reserved.
|
||||
* Copyright (C) 2014-2019 Apple Inc. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
@ -50,28 +50,7 @@ Allocator::~Allocator()
|
||||
scavenge();
|
||||
}
|
||||
|
||||
void* Allocator::tryAllocate(size_t size)
|
||||
{
|
||||
if (size <= smallMax)
|
||||
return allocate(size);
|
||||
|
||||
std::unique_lock<Mutex> lock(Heap::mutex());
|
||||
return m_heap.tryAllocateLarge(lock, alignment, size);
|
||||
}
|
||||
|
||||
void* Allocator::allocate(size_t alignment, size_t size)
|
||||
{
|
||||
bool crashOnFailure = true;
|
||||
return allocateImpl(alignment, size, crashOnFailure);
|
||||
}
|
||||
|
||||
void* Allocator::tryAllocate(size_t alignment, size_t size)
|
||||
{
|
||||
bool crashOnFailure = false;
|
||||
return allocateImpl(alignment, size, crashOnFailure);
|
||||
}
|
||||
|
||||
void* Allocator::allocateImpl(size_t alignment, size_t size, bool crashOnFailure)
|
||||
void* Allocator::allocateImpl(size_t alignment, size_t size, FailureAction action)
|
||||
{
|
||||
BASSERT(isPowerOfTwo(alignment));
|
||||
|
||||
@ -79,41 +58,25 @@ void* Allocator::allocateImpl(size_t alignment, size_t size, bool crashOnFailure
|
||||
size = alignment;
|
||||
|
||||
if (size <= smallMax && alignment <= smallMax)
|
||||
return allocate(roundUpToMultipleOf(alignment, size));
|
||||
return allocateImpl(roundUpToMultipleOf(alignment, size), action);
|
||||
|
||||
std::unique_lock<Mutex> lock(Heap::mutex());
|
||||
if (crashOnFailure)
|
||||
return m_heap.allocateLarge(lock, alignment, size);
|
||||
return m_heap.tryAllocateLarge(lock, alignment, size);
|
||||
return allocateLarge(size, action);
|
||||
}
|
||||
|
||||
void* Allocator::reallocate(void* object, size_t newSize)
|
||||
void* Allocator::reallocateImpl(void* object, size_t newSize, FailureAction action)
|
||||
{
|
||||
bool crashOnFailure = true;
|
||||
return reallocateImpl(object, newSize, crashOnFailure);
|
||||
}
|
||||
if (!object)
|
||||
return allocateImpl(newSize, action);
|
||||
|
||||
void* Allocator::tryReallocate(void* object, size_t newSize)
|
||||
{
|
||||
bool crashOnFailure = false;
|
||||
return reallocateImpl(object, newSize, crashOnFailure);
|
||||
}
|
||||
|
||||
void* Allocator::reallocateImpl(void* object, size_t newSize, bool crashOnFailure)
|
||||
{
|
||||
size_t oldSize = 0;
|
||||
switch (objectType(m_heap, object)) {
|
||||
case ObjectType::Small: {
|
||||
BASSERT(objectType(m_heap, nullptr) == ObjectType::Small);
|
||||
if (!object)
|
||||
break;
|
||||
|
||||
size_t sizeClass = Object(object).page()->sizeClass();
|
||||
oldSize = objectSize(sizeClass);
|
||||
break;
|
||||
}
|
||||
case ObjectType::Large: {
|
||||
std::unique_lock<Mutex> lock(Heap::mutex());
|
||||
UniqueLockHolder lock(Heap::mutex());
|
||||
oldSize = m_heap.largeSize(lock, object);
|
||||
|
||||
if (newSize < oldSize && newSize > smallMax) {
|
||||
@ -125,12 +88,10 @@ void* Allocator::reallocateImpl(void* object, size_t newSize, bool crashOnFailur
|
||||
}
|
||||
|
||||
void* result = nullptr;
|
||||
if (crashOnFailure)
|
||||
result = allocate(newSize);
|
||||
else {
|
||||
result = tryAllocate(newSize);
|
||||
if (!result)
|
||||
return nullptr;
|
||||
result = allocateImpl(newSize, action);
|
||||
if (!result) {
|
||||
BASSERT(action == FailureAction::ReturnNull);
|
||||
return nullptr;
|
||||
}
|
||||
size_t copySize = std::min(oldSize, newSize);
|
||||
memcpy(result, object, copySize);
|
||||
@ -157,51 +118,55 @@ void Allocator::scavenge()
|
||||
}
|
||||
}
|
||||
|
||||
BNO_INLINE void Allocator::refillAllocatorSlowCase(BumpAllocator& allocator, size_t sizeClass)
|
||||
BNO_INLINE void Allocator::refillAllocatorSlowCase(BumpAllocator& allocator, size_t sizeClass, FailureAction action)
|
||||
{
|
||||
BumpRangeCache& bumpRangeCache = m_bumpRangeCaches[sizeClass];
|
||||
|
||||
std::unique_lock<Mutex> lock(Heap::mutex());
|
||||
UniqueLockHolder lock(Heap::mutex());
|
||||
m_deallocator.processObjectLog(lock);
|
||||
m_heap.allocateSmallBumpRanges(lock, sizeClass, allocator, bumpRangeCache, m_deallocator.lineCache(lock));
|
||||
m_heap.allocateSmallBumpRanges(lock, sizeClass, allocator, bumpRangeCache, m_deallocator.lineCache(lock), action);
|
||||
}
|
||||
|
||||
BINLINE void Allocator::refillAllocator(BumpAllocator& allocator, size_t sizeClass)
|
||||
BINLINE void Allocator::refillAllocator(BumpAllocator& allocator, size_t sizeClass, FailureAction action)
|
||||
{
|
||||
BumpRangeCache& bumpRangeCache = m_bumpRangeCaches[sizeClass];
|
||||
if (!bumpRangeCache.size())
|
||||
return refillAllocatorSlowCase(allocator, sizeClass);
|
||||
return refillAllocatorSlowCase(allocator, sizeClass, action);
|
||||
return allocator.refill(bumpRangeCache.pop());
|
||||
}
|
||||
|
||||
BNO_INLINE void* Allocator::allocateLarge(size_t size)
|
||||
BNO_INLINE void* Allocator::allocateLarge(size_t size, FailureAction action)
|
||||
{
|
||||
std::unique_lock<Mutex> lock(Heap::mutex());
|
||||
return m_heap.allocateLarge(lock, alignment, size);
|
||||
UniqueLockHolder lock(Heap::mutex());
|
||||
return m_heap.allocateLarge(lock, alignment, size, action);
|
||||
}
|
||||
|
||||
BNO_INLINE void* Allocator::allocateLogSizeClass(size_t size)
|
||||
BNO_INLINE void* Allocator::allocateLogSizeClass(size_t size, FailureAction action)
|
||||
{
|
||||
size_t sizeClass = bmalloc::sizeClass(size);
|
||||
BumpAllocator& allocator = m_bumpAllocators[sizeClass];
|
||||
if (!allocator.canAllocate())
|
||||
refillAllocator(allocator, sizeClass);
|
||||
refillAllocator(allocator, sizeClass, action);
|
||||
if (action == FailureAction::ReturnNull && !allocator.canAllocate())
|
||||
return nullptr;
|
||||
return allocator.allocate();
|
||||
}
|
||||
|
||||
void* Allocator::allocateSlowCase(size_t size)
|
||||
void* Allocator::allocateSlowCase(size_t size, FailureAction action)
|
||||
{
|
||||
if (size <= maskSizeClassMax) {
|
||||
size_t sizeClass = bmalloc::maskSizeClass(size);
|
||||
BumpAllocator& allocator = m_bumpAllocators[sizeClass];
|
||||
refillAllocator(allocator, sizeClass);
|
||||
refillAllocator(allocator, sizeClass, action);
|
||||
if (action == FailureAction::ReturnNull && !allocator.canAllocate())
|
||||
return nullptr;
|
||||
return allocator.allocate();
|
||||
}
|
||||
|
||||
if (size <= smallMax)
|
||||
return allocateLogSizeClass(size);
|
||||
return allocateLogSizeClass(size, action);
|
||||
|
||||
return allocateLarge(size);
|
||||
return allocateLarge(size, action);
|
||||
}
|
||||
|
||||
} // namespace bmalloc
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (C) 2014-2018 Apple Inc. All rights reserved.
|
||||
* Copyright (C) 2014-2019 Apple Inc. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
@ -28,6 +28,7 @@
|
||||
|
||||
#include "BExport.h"
|
||||
#include "BumpAllocator.h"
|
||||
#include "FailureAction.h"
|
||||
#include <array>
|
||||
|
||||
namespace bmalloc {
|
||||
@ -42,27 +43,28 @@ public:
|
||||
Allocator(Heap&, Deallocator&);
|
||||
~Allocator();
|
||||
|
||||
BEXPORT void* tryAllocate(size_t);
|
||||
void* allocate(size_t);
|
||||
void* tryAllocate(size_t alignment, size_t);
|
||||
void* allocate(size_t alignment, size_t);
|
||||
void* tryReallocate(void*, size_t);
|
||||
void* reallocate(void*, size_t);
|
||||
void* tryAllocate(size_t size) { return allocateImpl(size, FailureAction::ReturnNull); }
|
||||
void* allocate(size_t size) { return allocateImpl(size, FailureAction::Crash); }
|
||||
void* tryAllocate(size_t alignment, size_t size) { return allocateImpl(alignment, size, FailureAction::ReturnNull); }
|
||||
void* allocate(size_t alignment, size_t size) { return allocateImpl(alignment, size, FailureAction::Crash); }
|
||||
void* tryReallocate(void* object, size_t newSize) { return reallocateImpl(object, newSize, FailureAction::ReturnNull); }
|
||||
void* reallocate(void* object, size_t newSize) { return reallocateImpl(object, newSize, FailureAction::Crash); }
|
||||
|
||||
void scavenge();
|
||||
|
||||
private:
|
||||
void* allocateImpl(size_t alignment, size_t, bool crashOnFailure);
|
||||
void* reallocateImpl(void*, size_t, bool crashOnFailure);
|
||||
void* allocateImpl(size_t, FailureAction);
|
||||
BEXPORT void* allocateImpl(size_t alignment, size_t, FailureAction);
|
||||
BEXPORT void* reallocateImpl(void*, size_t, FailureAction);
|
||||
|
||||
bool allocateFastCase(size_t, void*&);
|
||||
BEXPORT void* allocateSlowCase(size_t);
|
||||
BEXPORT void* allocateSlowCase(size_t, FailureAction);
|
||||
|
||||
void* allocateLogSizeClass(size_t, FailureAction);
|
||||
void* allocateLarge(size_t, FailureAction);
|
||||
|
||||
void* allocateLogSizeClass(size_t);
|
||||
void* allocateLarge(size_t);
|
||||
|
||||
void refillAllocator(BumpAllocator&, size_t sizeClass);
|
||||
void refillAllocatorSlowCase(BumpAllocator&, size_t sizeClass);
|
||||
inline void refillAllocator(BumpAllocator&, size_t sizeClass, FailureAction);
|
||||
void refillAllocatorSlowCase(BumpAllocator&, size_t sizeClass, FailureAction);
|
||||
|
||||
std::array<BumpAllocator, sizeClassCount> m_bumpAllocators;
|
||||
std::array<BumpRangeCache, sizeClassCount> m_bumpRangeCaches;
|
||||
@ -84,11 +86,11 @@ inline bool Allocator::allocateFastCase(size_t size, void*& object)
|
||||
return true;
|
||||
}
|
||||
|
||||
inline void* Allocator::allocate(size_t size)
|
||||
inline void* Allocator::allocateImpl(size_t size, FailureAction action)
|
||||
{
|
||||
void* object;
|
||||
if (!allocateFastCase(size, object))
|
||||
return allocateSlowCase(size);
|
||||
return allocateSlowCase(size, action);
|
||||
return object;
|
||||
}
|
||||
|
||||
|
@ -47,13 +47,19 @@
|
||||
#if BOS(LINUX)
|
||||
#include <algorithm>
|
||||
#include <fcntl.h>
|
||||
#elif BOS(FREEBSD)
|
||||
#include "VMAllocate.h"
|
||||
#include <sys/sysctl.h>
|
||||
#include <sys/sysinfo.h>
|
||||
#include <sys/types.h>
|
||||
#include <sys/user.h>
|
||||
#endif
|
||||
#include <unistd.h>
|
||||
#endif
|
||||
|
||||
namespace bmalloc {
|
||||
|
||||
static const size_t availableMemoryGuess = 512 * bmalloc::MB;
|
||||
static constexpr size_t availableMemoryGuess = 512 * bmalloc::MB;
|
||||
|
||||
#if BOS(DARWIN)
|
||||
static size_t memorySizeAccordingToKernel()
|
||||
@ -163,6 +169,11 @@ static size_t computeAvailableMemory()
|
||||
return ((sizeAccordingToKernel + multiple - 1) / multiple) * multiple;
|
||||
#elif BOS(LINUX)
|
||||
return LinuxMemory::singleton().availableMemory;
|
||||
#elif BOS(FREEBSD)
|
||||
struct sysinfo info;
|
||||
if (!sysinfo(&info))
|
||||
return info.totalram * info.mem_unit;
|
||||
return availableMemoryGuess;
|
||||
#elif BOS(UNIX)
|
||||
long pages = sysconf(_SC_PHYS_PAGES);
|
||||
long pageSize = sysconf(_SC_PAGE_SIZE);
|
||||
@ -184,7 +195,7 @@ size_t availableMemory()
|
||||
return availableMemory;
|
||||
}
|
||||
|
||||
#if BPLATFORM(IOS_FAMILY) || BOS(LINUX)
|
||||
#if BPLATFORM(IOS_FAMILY) || BOS(LINUX) || BOS(FREEBSD)
|
||||
MemoryStatus memoryStatus()
|
||||
{
|
||||
#if BPLATFORM(IOS_FAMILY)
|
||||
@ -200,6 +211,21 @@ MemoryStatus memoryStatus()
|
||||
auto& memory = LinuxMemory::singleton();
|
||||
size_t memoryFootprint = memory.footprint();
|
||||
double percentInUse = static_cast<double>(memoryFootprint) / static_cast<double>(memory.availableMemory);
|
||||
#elif BOS(FREEBSD)
|
||||
struct kinfo_proc info;
|
||||
size_t infolen = sizeof(info);
|
||||
|
||||
int mib[4];
|
||||
mib[0] = CTL_KERN;
|
||||
mib[1] = KERN_PROC;
|
||||
mib[2] = KERN_PROC_PID;
|
||||
mib[3] = getpid();
|
||||
|
||||
size_t memoryFootprint = 0;
|
||||
if (!sysctl(mib, 4, &info, &infolen, nullptr, 0))
|
||||
memoryFootprint = static_cast<size_t>(info.ki_rssize) * vmPageSize();
|
||||
|
||||
double percentInUse = static_cast<double>(memoryFootprint) / static_cast<double>(availableMemory());
|
||||
#endif
|
||||
|
||||
double percentAvailableMemoryInUse = std::min(percentInUse, 1.0);
|
||||
|
@ -30,9 +30,9 @@
|
||||
|
||||
namespace bmalloc {
|
||||
|
||||
size_t availableMemory();
|
||||
BEXPORT size_t availableMemory();
|
||||
|
||||
#if BPLATFORM(IOS_FAMILY) || BOS(LINUX)
|
||||
#if BPLATFORM(IOS_FAMILY) || BOS(LINUX) || BOS(FREEBSD)
|
||||
struct MemoryStatus {
|
||||
MemoryStatus(size_t memoryFootprint, double percentAvailableMemoryInUse)
|
||||
: memoryFootprint(memoryFootprint)
|
||||
@ -61,7 +61,7 @@ inline double percentAvailableMemoryInUse()
|
||||
|
||||
inline bool isUnderMemoryPressure()
|
||||
{
|
||||
#if BPLATFORM(IOS_FAMILY) || BOS(LINUX)
|
||||
#if BPLATFORM(IOS_FAMILY) || BOS(LINUX) || BOS(FREEBSD)
|
||||
return percentAvailableMemoryInUse() > memoryPressureThreshold;
|
||||
#else
|
||||
return false;
|
||||
|
@ -38,6 +38,14 @@
|
||||
|
||||
#define BASAN_ENABLED BCOMPILER_HAS_CLANG_FEATURE(address_sanitizer)
|
||||
|
||||
/* BCOMPILER_HAS_CLANG_DECLSPEC() - whether the compiler supports a Microsoft style __declspec attribute. */
|
||||
/* https://clang.llvm.org/docs/LanguageExtensions.html#has-declspec-attribute */
|
||||
#ifdef __has_declspec_attribute
|
||||
#define BCOMPILER_HAS_CLANG_DECLSPEC(x) __has_declspec_attribute(x)
|
||||
#else
|
||||
#define BCOMPILER_HAS_CLANG_DECLSPEC(x) 0
|
||||
#endif
|
||||
|
||||
/* BCOMPILER(GCC_COMPATIBLE) - GNU Compiler Collection or compatibles */
|
||||
|
||||
#if defined(__GNUC__)
|
||||
|
@ -27,10 +27,25 @@
|
||||
|
||||
#include "BPlatform.h"
|
||||
|
||||
#if BUSE(EXPORT_MACROS)
|
||||
#define BEXPORT __attribute__((visibility("default")))
|
||||
#if BUSE(DECLSPEC_ATTRIBUTE)
|
||||
#define BEXPORT_DECLARATION __declspec(dllexport)
|
||||
#define BIMPORT_DECLARATION __declspec(dllimport)
|
||||
#elif BUSE(VISIBILITY_ATTRIBUTE)
|
||||
#define BEXPORT_DECLARATION __attribute__((visibility("default")))
|
||||
#define BIMPORT_DECLARATION BEXPORT_DECLARATION
|
||||
#else
|
||||
#define BEXPORT
|
||||
#define BEXPORT_DECLARATION
|
||||
#define BIMPORT_DECLARATION
|
||||
#endif
|
||||
|
||||
#if !defined(BEXPORT)
|
||||
|
||||
#if defined(BUILDING_bmalloc) || defined(STATICALLY_LINKED_WITH_bmalloc)
|
||||
#define BEXPORT BEXPORT_DECLARATION
|
||||
#else
|
||||
#define BEXPORT BIMPORT_DECLARATION
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
#define BNOEXPORT
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (C) 2014-2018 Apple Inc. All rights reserved.
|
||||
* Copyright (C) 2014-2020 Apple Inc. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
@ -40,7 +40,7 @@
|
||||
#define BOS_DARWIN 1
|
||||
#endif
|
||||
|
||||
#ifdef __unix
|
||||
#if defined(__unix) || defined(__unix__)
|
||||
#define BOS_UNIX 1
|
||||
#endif
|
||||
|
||||
@ -58,10 +58,14 @@
|
||||
|
||||
#if BOS(DARWIN) && !defined(BUILDING_WITH_CMAKE)
|
||||
#if TARGET_OS_IOS
|
||||
#define BOS_IOS 1
|
||||
#define BPLATFORM_IOS 1
|
||||
#if TARGET_OS_SIMULATOR
|
||||
#define BPLATFORM_IOS_SIMULATOR 1
|
||||
#endif
|
||||
#if defined(TARGET_OS_MACCATALYST) && TARGET_OS_MACCATALYST
|
||||
#define BPLATFORM_MACCATALYST 1
|
||||
#endif
|
||||
#endif
|
||||
#if TARGET_OS_IPHONE
|
||||
#define BPLATFORM_IOS_FAMILY 1
|
||||
@ -69,6 +73,7 @@
|
||||
#define BPLATFORM_IOS_FAMILY_SIMULATOR 1
|
||||
#endif
|
||||
#elif TARGET_OS_MAC
|
||||
#define BOS_MAC 1
|
||||
#define BPLATFORM_MAC 1
|
||||
#endif
|
||||
#endif
|
||||
@ -78,13 +83,23 @@
|
||||
#endif
|
||||
|
||||
#if defined(TARGET_OS_WATCH) && TARGET_OS_WATCH
|
||||
#define BOS_WATCHOS 1
|
||||
#define BPLATFORM_WATCHOS 1
|
||||
#endif
|
||||
|
||||
#if defined(TARGET_OS_TV) && TARGET_OS_TV
|
||||
#define BOS_APPLETV 1
|
||||
#define BPLATFORM_APPLETV 1
|
||||
#endif
|
||||
|
||||
#if defined(__SCE__)
|
||||
#define BPLATORM_PLAYSTATION 1
|
||||
#endif
|
||||
|
||||
/* ==== Feature decision macros: these define feature choices for a particular port. ==== */
|
||||
|
||||
#define BENABLE(WTF_FEATURE) (defined BENABLE_##WTF_FEATURE && BENABLE_##WTF_FEATURE)
|
||||
|
||||
/* ==== Policy decision macros: these define policy choices for a particular port. ==== */
|
||||
|
||||
/* BUSE() - use a particular third-party library or optional OS service */
|
||||
@ -115,8 +130,8 @@
|
||||
#define BCPU_X86_64 1
|
||||
#endif
|
||||
|
||||
/* BCPU(ARM64) - Apple */
|
||||
#if (defined(__arm64__) && defined(__APPLE__)) || defined(__aarch64__)
|
||||
/* BCPU(ARM64) */
|
||||
#if defined(__arm64__) || defined(__aarch64__)
|
||||
#define BCPU_ARM64 1
|
||||
#endif
|
||||
|
||||
@ -230,27 +245,98 @@
|
||||
|
||||
#endif /* ARM */
|
||||
|
||||
|
||||
#if BCOMPILER(GCC_COMPATIBLE)
|
||||
/* __LP64__ is not defined on 64bit Windows since it uses LLP64. Using __SIZEOF_POINTER__ is simpler. */
|
||||
#if __SIZEOF_POINTER__ == 8
|
||||
#define BCPU_ADDRESS64 1
|
||||
#elif __SIZEOF_POINTER__ == 4
|
||||
#define BCPU_ADDRESS32 1
|
||||
#else
|
||||
#error "Unsupported pointer width"
|
||||
#endif
|
||||
#else
|
||||
#error "Unsupported compiler for bmalloc"
|
||||
#endif
|
||||
|
||||
#if BCOMPILER(GCC_COMPATIBLE)
|
||||
#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
|
||||
#define BCPU_BIG_ENDIAN 1
|
||||
#elif __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
|
||||
#define BCPU_LITTLE_ENDIAN 1
|
||||
#elif __BYTE_ORDER__ == __ORDER_PDP_ENDIAN__
|
||||
#define BCPU_MIDDLE_ENDIAN 1
|
||||
#else
|
||||
#error "Unknown endian"
|
||||
#endif
|
||||
#else
|
||||
#error "Unsupported compiler for bmalloc"
|
||||
#endif
|
||||
|
||||
#if BCPU(ADDRESS64)
|
||||
#if (BOS(IOS) || BOS(TVOS) || BOS(WATCHOS)) && BCPU(ARM64)
|
||||
#define BOS_EFFECTIVE_ADDRESS_WIDTH 36
|
||||
#else
|
||||
/* We strongly assume that effective address width is <= 48 in 64bit architectures (e.g. NaN boxing). */
|
||||
#define BOS_EFFECTIVE_ADDRESS_WIDTH 48
|
||||
#endif
|
||||
#else
|
||||
#define BOS_EFFECTIVE_ADDRESS_WIDTH 32
|
||||
#endif
|
||||
|
||||
#define BATTRIBUTE_PRINTF(formatStringArgument, extraArguments) __attribute__((__format__(printf, formatStringArgument, extraArguments)))
|
||||
|
||||
/* Export macro support. Detects the attributes available for shared library symbol export
|
||||
decorations. */
|
||||
#if BOS(WINDOWS) || (BCOMPILER_HAS_CLANG_DECLSPEC(dllimport) && BCOMPILER_HAS_CLANG_DECLSPEC(dllexport))
|
||||
#define BUSE_DECLSPEC_ATTRIBUTE 1
|
||||
#elif BCOMPILER(GCC_COMPATIBLE)
|
||||
#define BUSE_VISIBILITY_ATTRIBUTE 1
|
||||
#endif
|
||||
|
||||
#if BPLATFORM(MAC) || BPLATFORM(IOS_FAMILY)
|
||||
#define BUSE_OS_LOG 1
|
||||
#endif
|
||||
|
||||
#if !defined(BUSE_EXPORT_MACROS) && (BPLATFORM(MAC) || BPLATFORM(IOS_FAMILY))
|
||||
#define BUSE_EXPORT_MACROS 1
|
||||
#endif
|
||||
|
||||
/* BUNUSED_PARAM */
|
||||
#if !defined(BUNUSED_PARAM)
|
||||
#define BUNUSED_PARAM(variable) (void)variable
|
||||
#endif
|
||||
|
||||
/* Enable this to put each IsoHeap and other allocation categories into their own malloc heaps, so that tools like vmmap can show how big each heap is. */
|
||||
#define BENABLE_MALLOC_HEAP_BREAKDOWN 0
|
||||
|
||||
/* This is used for debugging when hacking on how bmalloc calculates its physical footprint. */
|
||||
#define ENABLE_PHYSICAL_PAGE_MAP 0
|
||||
|
||||
#if BPLATFORM(IOS_FAMILY) && (BCPU(ARM64) || BCPU(ARM))
|
||||
#define BUSE_CHECK_NANO_MALLOC 1
|
||||
#if BPLATFORM(MAC)
|
||||
#define BUSE_PARTIAL_SCAVENGE 1
|
||||
#else
|
||||
#define BUSE_CHECK_NANO_MALLOC 0
|
||||
#define BUSE_PARTIAL_SCAVENGE 0
|
||||
#endif
|
||||
|
||||
#if !defined(BUSE_PRECOMPUTED_CONSTANTS_VMPAGE4K)
|
||||
#define BUSE_PRECOMPUTED_CONSTANTS_VMPAGE4K 1
|
||||
#endif
|
||||
|
||||
#if !defined(BUSE_PRECOMPUTED_CONSTANTS_VMPAGE16K)
|
||||
#define BUSE_PRECOMPUTED_CONSTANTS_VMPAGE16K 1
|
||||
#endif
|
||||
|
||||
/* The unified Config record feature is not available for Windows because the
|
||||
Windows port puts WTF in a separate DLL, and the offlineasm code accessing
|
||||
the config record expects the config record to be directly accessible like
|
||||
a global variable (and not have to go thru DLL shenanigans). C++ code would
|
||||
resolve these DLL bindings automatically, but offlineasm does not.
|
||||
|
||||
The permanently freezing feature also currently relies on the Config records
|
||||
being unified, and the Windows port also does not currently have an
|
||||
implementation for the freezing mechanism anyway. For simplicity, we just
|
||||
disable both the use of unified Config record and config freezing for the
|
||||
Windows port.
|
||||
*/
|
||||
#if BOS(WINDOWS)
|
||||
#define BENABLE_UNIFIED_AND_FREEZABLE_CONFIG_RECORD 0
|
||||
#else
|
||||
#define BENABLE_UNIFIED_AND_FREEZABLE_CONFIG_RECORD 1
|
||||
#endif
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (C) 2014 Apple Inc. All rights reserved.
|
||||
* Copyright (C) 2014-2019 Apple Inc. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (C) 2014-2017 Apple Inc. All rights reserved.
|
||||
* Copyright (C) 2014-2019 Apple Inc. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
@ -53,37 +53,29 @@ Cache::Cache(HeapKind heapKind)
|
||||
|
||||
BNO_INLINE void* Cache::tryAllocateSlowCaseNullCache(HeapKind heapKind, size_t size)
|
||||
{
|
||||
if (auto* debugHeap = DebugHeap::tryGet()) {
|
||||
constexpr bool crashOnFailure = false;
|
||||
return debugHeap->malloc(size, crashOnFailure);
|
||||
}
|
||||
if (auto* debugHeap = DebugHeap::tryGet())
|
||||
return debugHeap->malloc(size, FailureAction::ReturnNull);
|
||||
return PerThread<PerHeapKind<Cache>>::getSlowCase()->at(mapToActiveHeapKind(heapKind)).allocator().tryAllocate(size);
|
||||
}
|
||||
|
||||
BNO_INLINE void* Cache::allocateSlowCaseNullCache(HeapKind heapKind, size_t size)
|
||||
{
|
||||
if (auto* debugHeap = DebugHeap::tryGet()) {
|
||||
constexpr bool crashOnFailure = true;
|
||||
return debugHeap->malloc(size, crashOnFailure);
|
||||
}
|
||||
if (auto* debugHeap = DebugHeap::tryGet())
|
||||
return debugHeap->malloc(size, FailureAction::Crash);
|
||||
return PerThread<PerHeapKind<Cache>>::getSlowCase()->at(mapToActiveHeapKind(heapKind)).allocator().allocate(size);
|
||||
}
|
||||
|
||||
BNO_INLINE void* Cache::tryAllocateSlowCaseNullCache(HeapKind heapKind, size_t alignment, size_t size)
|
||||
{
|
||||
if (auto* debugHeap = DebugHeap::tryGet()) {
|
||||
constexpr bool crashOnFailure = false;
|
||||
return debugHeap->memalign(alignment, size, crashOnFailure);
|
||||
}
|
||||
if (auto* debugHeap = DebugHeap::tryGet())
|
||||
return debugHeap->memalign(alignment, size, FailureAction::ReturnNull);
|
||||
return PerThread<PerHeapKind<Cache>>::getSlowCase()->at(mapToActiveHeapKind(heapKind)).allocator().tryAllocate(alignment, size);
|
||||
}
|
||||
|
||||
BNO_INLINE void* Cache::allocateSlowCaseNullCache(HeapKind heapKind, size_t alignment, size_t size)
|
||||
{
|
||||
if (auto* debugHeap = DebugHeap::tryGet()) {
|
||||
constexpr bool crashOnFailure = true;
|
||||
return debugHeap->memalign(alignment, size, crashOnFailure);
|
||||
}
|
||||
if (auto* debugHeap = DebugHeap::tryGet())
|
||||
return debugHeap->memalign(alignment, size, FailureAction::Crash);
|
||||
return PerThread<PerHeapKind<Cache>>::getSlowCase()->at(mapToActiveHeapKind(heapKind)).allocator().allocate(alignment, size);
|
||||
}
|
||||
|
||||
@ -98,19 +90,15 @@ BNO_INLINE void Cache::deallocateSlowCaseNullCache(HeapKind heapKind, void* obje
|
||||
|
||||
BNO_INLINE void* Cache::tryReallocateSlowCaseNullCache(HeapKind heapKind, void* object, size_t newSize)
|
||||
{
|
||||
if (auto* debugHeap = DebugHeap::tryGet()) {
|
||||
constexpr bool crashOnFailure = false;
|
||||
return debugHeap->realloc(object, newSize, crashOnFailure);
|
||||
}
|
||||
if (auto* debugHeap = DebugHeap::tryGet())
|
||||
return debugHeap->realloc(object, newSize, FailureAction::ReturnNull);
|
||||
return PerThread<PerHeapKind<Cache>>::getSlowCase()->at(mapToActiveHeapKind(heapKind)).allocator().tryReallocate(object, newSize);
|
||||
}
|
||||
|
||||
BNO_INLINE void* Cache::reallocateSlowCaseNullCache(HeapKind heapKind, void* object, size_t newSize)
|
||||
{
|
||||
if (auto* debugHeap = DebugHeap::tryGet()) {
|
||||
constexpr bool crashOnFailure = true;
|
||||
return debugHeap->realloc(object, newSize, crashOnFailure);
|
||||
}
|
||||
if (auto* debugHeap = DebugHeap::tryGet())
|
||||
return debugHeap->realloc(object, newSize, FailureAction::Crash);
|
||||
return PerThread<PerHeapKind<Cache>>::getSlowCase()->at(mapToActiveHeapKind(heapKind)).allocator().reallocate(object, newSize);
|
||||
}
|
||||
|
||||
|
@ -46,7 +46,7 @@ public:
|
||||
static void* tryReallocate(HeapKind, void*, size_t);
|
||||
static void* reallocate(HeapKind, void*, size_t);
|
||||
|
||||
static void scavenge(HeapKind);
|
||||
BEXPORT static void scavenge(HeapKind);
|
||||
|
||||
Cache(HeapKind);
|
||||
|
||||
|
@ -38,6 +38,7 @@ namespace bmalloc {
|
||||
class Chunk : public ListNode<Chunk> {
|
||||
public:
|
||||
static Chunk* get(void*);
|
||||
static size_t metadataSize(size_t pageSize);
|
||||
|
||||
Chunk(size_t pageSize);
|
||||
|
||||
@ -73,13 +74,16 @@ struct ChunkHash {
|
||||
}
|
||||
};
|
||||
|
||||
template<typename Function> void forEachPage(Chunk* chunk, size_t pageSize, Function function)
|
||||
inline size_t Chunk::metadataSize(size_t pageSize)
|
||||
{
|
||||
// We align to at least the page size so we can service aligned allocations
|
||||
// at equal and smaller powers of two, and also so we can vmDeallocatePhysicalPages().
|
||||
size_t metadataSize = roundUpToMultipleOfNonPowerOfTwo(pageSize, sizeof(Chunk));
|
||||
return roundUpToMultipleOfNonPowerOfTwo(pageSize, sizeof(Chunk));
|
||||
}
|
||||
|
||||
Object begin(chunk, metadataSize);
|
||||
template<typename Function> void forEachPage(Chunk* chunk, size_t pageSize, Function function)
|
||||
{
|
||||
Object begin(chunk, Chunk::metadataSize(pageSize));
|
||||
Object end(chunk, chunkSize);
|
||||
|
||||
for (auto it = begin; it + pageSize <= end; it = it + pageSize)
|
||||
|
@ -61,7 +61,7 @@ public:
|
||||
|
||||
class ARC4RandomNumberGenerator : public StaticPerProcess<ARC4RandomNumberGenerator> {
|
||||
public:
|
||||
ARC4RandomNumberGenerator(const std::lock_guard<Mutex>&);
|
||||
ARC4RandomNumberGenerator(const LockHolder&);
|
||||
|
||||
uint32_t randomNumber();
|
||||
void randomValues(void* buffer, size_t length);
|
||||
@ -86,7 +86,7 @@ ARC4Stream::ARC4Stream()
|
||||
j = 0;
|
||||
}
|
||||
|
||||
ARC4RandomNumberGenerator::ARC4RandomNumberGenerator(const std::lock_guard<Mutex>&)
|
||||
ARC4RandomNumberGenerator::ARC4RandomNumberGenerator(const LockHolder&)
|
||||
: m_count(0)
|
||||
{
|
||||
}
|
||||
@ -164,7 +164,7 @@ uint8_t ARC4RandomNumberGenerator::getByte()
|
||||
|
||||
void ARC4RandomNumberGenerator::randomValues(void* buffer, size_t length)
|
||||
{
|
||||
std::lock_guard<Mutex> lock(mutex());
|
||||
LockHolder lock(mutex());
|
||||
|
||||
unsigned char* result = reinterpret_cast<unsigned char*>(buffer);
|
||||
stirIfNeeded();
|
||||
|
@ -50,13 +50,13 @@ Deallocator::~Deallocator()
|
||||
|
||||
void Deallocator::scavenge()
|
||||
{
|
||||
std::unique_lock<Mutex> lock(Heap::mutex());
|
||||
UniqueLockHolder lock(Heap::mutex());
|
||||
|
||||
processObjectLog(lock);
|
||||
m_heap.deallocateLineCache(lock, lineCache(lock));
|
||||
}
|
||||
|
||||
void Deallocator::processObjectLog(std::unique_lock<Mutex>& lock)
|
||||
void Deallocator::processObjectLog(UniqueLockHolder& lock)
|
||||
{
|
||||
for (Object object : m_objectLog)
|
||||
m_heap.derefSmallLine(lock, object, lineCache(lock));
|
||||
@ -68,14 +68,16 @@ void Deallocator::deallocateSlowCase(void* object)
|
||||
if (!object)
|
||||
return;
|
||||
|
||||
std::unique_lock<Mutex> lock(Heap::mutex());
|
||||
if (m_heap.isLarge(lock, object)) {
|
||||
if (m_heap.isLarge(object)) {
|
||||
UniqueLockHolder lock(Heap::mutex());
|
||||
m_heap.deallocateLarge(lock, object);
|
||||
return;
|
||||
}
|
||||
|
||||
if (m_objectLog.size() == m_objectLog.capacity())
|
||||
if (m_objectLog.size() == m_objectLog.capacity()) {
|
||||
UniqueLockHolder lock(Heap::mutex());
|
||||
processObjectLog(lock);
|
||||
}
|
||||
|
||||
m_objectLog.push(object);
|
||||
}
|
||||
|
@ -46,9 +46,9 @@ public:
|
||||
void deallocate(void*);
|
||||
void scavenge();
|
||||
|
||||
void processObjectLog(std::unique_lock<Mutex>&);
|
||||
void processObjectLog(UniqueLockHolder&);
|
||||
|
||||
LineCache& lineCache(std::unique_lock<Mutex>&) { return m_lineCache; }
|
||||
LineCache& lineCache(UniqueLockHolder&) { return m_lineCache; }
|
||||
|
||||
private:
|
||||
bool deallocateFastCase(void*);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (C) 2016-2018 Apple Inc. All rights reserved.
|
||||
* Copyright (C) 2016-2019 Apple Inc. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
@ -40,34 +40,31 @@ DEFINE_STATIC_PER_PROCESS_STORAGE(DebugHeap);
|
||||
|
||||
#if BOS(DARWIN)
|
||||
|
||||
DebugHeap::DebugHeap(std::lock_guard<Mutex>&)
|
||||
DebugHeap::DebugHeap(const LockHolder&)
|
||||
: m_zone(malloc_create_zone(0, 0))
|
||||
, m_pageSize(vmPageSize())
|
||||
{
|
||||
malloc_set_zone_name(m_zone, "WebKit Using System Malloc");
|
||||
}
|
||||
|
||||
void* DebugHeap::malloc(size_t size, bool crashOnFailure)
|
||||
void* DebugHeap::malloc(size_t size, FailureAction action)
|
||||
{
|
||||
void* result = malloc_zone_malloc(m_zone, size);
|
||||
if (!result && crashOnFailure)
|
||||
BCRASH();
|
||||
RELEASE_BASSERT(action == FailureAction::ReturnNull || result);
|
||||
return result;
|
||||
}
|
||||
|
||||
void* DebugHeap::memalign(size_t alignment, size_t size, bool crashOnFailure)
|
||||
void* DebugHeap::memalign(size_t alignment, size_t size, FailureAction action)
|
||||
{
|
||||
void* result = malloc_zone_memalign(m_zone, alignment, size);
|
||||
if (!result && crashOnFailure)
|
||||
BCRASH();
|
||||
RELEASE_BASSERT(action == FailureAction::ReturnNull || result);
|
||||
return result;
|
||||
}
|
||||
|
||||
void* DebugHeap::realloc(void* object, size_t size, bool crashOnFailure)
|
||||
void* DebugHeap::realloc(void* object, size_t size, FailureAction action)
|
||||
{
|
||||
void* result = malloc_zone_realloc(m_zone, object, size);
|
||||
if (!result && crashOnFailure)
|
||||
BCRASH();
|
||||
RELEASE_BASSERT(action == FailureAction::ReturnNull || result);
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -91,35 +88,30 @@ void DebugHeap::dump()
|
||||
|
||||
#else
|
||||
|
||||
DebugHeap::DebugHeap(std::lock_guard<Mutex>&)
|
||||
DebugHeap::DebugHeap(const LockHolder&)
|
||||
: m_pageSize(vmPageSize())
|
||||
{
|
||||
}
|
||||
|
||||
void* DebugHeap::malloc(size_t size, bool crashOnFailure)
|
||||
void* DebugHeap::malloc(size_t size, FailureAction action)
|
||||
{
|
||||
void* result = ::malloc(size);
|
||||
if (!result && crashOnFailure)
|
||||
BCRASH();
|
||||
RELEASE_BASSERT(action == FailureAction::ReturnNull || result);
|
||||
return result;
|
||||
}
|
||||
|
||||
void* DebugHeap::memalign(size_t alignment, size_t size, bool crashOnFailure)
|
||||
void* DebugHeap::memalign(size_t alignment, size_t size, FailureAction action)
|
||||
{
|
||||
void* result;
|
||||
if (posix_memalign(&result, alignment, size)) {
|
||||
if (crashOnFailure)
|
||||
BCRASH();
|
||||
return nullptr;
|
||||
}
|
||||
if (posix_memalign(&result, alignment, size))
|
||||
RELEASE_BASSERT(action == FailureAction::ReturnNull || result);
|
||||
return result;
|
||||
}
|
||||
|
||||
void* DebugHeap::realloc(void* object, size_t size, bool crashOnFailure)
|
||||
void* DebugHeap::realloc(void* object, size_t size, FailureAction action)
|
||||
{
|
||||
void* result = ::realloc(object, size);
|
||||
if (!result && crashOnFailure)
|
||||
BCRASH();
|
||||
RELEASE_BASSERT(action == FailureAction::ReturnNull || result);
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -149,7 +141,7 @@ void* DebugHeap::memalignLarge(size_t alignment, size_t size)
|
||||
if (!result)
|
||||
return nullptr;
|
||||
{
|
||||
std::lock_guard<Mutex> locker(mutex());
|
||||
LockHolder locker(mutex());
|
||||
m_sizeMap[result] = size;
|
||||
}
|
||||
return result;
|
||||
@ -162,7 +154,7 @@ void DebugHeap::freeLarge(void* base)
|
||||
|
||||
size_t size;
|
||||
{
|
||||
std::lock_guard<Mutex> locker(mutex());
|
||||
LockHolder locker(mutex());
|
||||
size = m_sizeMap[base];
|
||||
size_t numErased = m_sizeMap.erase(base);
|
||||
RELEASE_BASSERT(numErased == 1);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (C) 2016-2018 Apple Inc. All rights reserved.
|
||||
* Copyright (C) 2016-2019 Apple Inc. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
@ -26,6 +26,7 @@
|
||||
#pragma once
|
||||
|
||||
#include "Environment.h"
|
||||
#include "FailureAction.h"
|
||||
#include "Mutex.h"
|
||||
#include "StaticPerProcess.h"
|
||||
#include <mutex>
|
||||
@ -39,11 +40,11 @@ namespace bmalloc {
|
||||
|
||||
class DebugHeap : private StaticPerProcess<DebugHeap> {
|
||||
public:
|
||||
DebugHeap(std::lock_guard<Mutex>&);
|
||||
DebugHeap(const LockHolder&);
|
||||
|
||||
void* malloc(size_t, bool crashOnFailure);
|
||||
void* memalign(size_t alignment, size_t, bool crashOnFailure);
|
||||
void* realloc(void*, size_t, bool crashOnFailure);
|
||||
void* malloc(size_t, FailureAction);
|
||||
void* memalign(size_t alignment, size_t, FailureAction);
|
||||
void* realloc(void*, size_t, FailureAction);
|
||||
void free(void*);
|
||||
|
||||
void* memalignLarge(size_t alignment, size_t);
|
||||
|
@ -26,6 +26,8 @@
|
||||
#pragma once
|
||||
|
||||
#include "IsoPageTrigger.h"
|
||||
#include "Mutex.h"
|
||||
#include <mutex>
|
||||
|
||||
namespace bmalloc {
|
||||
|
||||
@ -37,10 +39,10 @@ public:
|
||||
DeferredTrigger() { }
|
||||
|
||||
template<typename Config>
|
||||
void didBecome(IsoPage<Config>&);
|
||||
void didBecome(const LockHolder&, IsoPage<Config>&);
|
||||
|
||||
template<typename Config>
|
||||
void handleDeferral(IsoPage<Config>&);
|
||||
void handleDeferral(const LockHolder&, IsoPage<Config>&);
|
||||
|
||||
private:
|
||||
bool m_hasBeenDeferred { false };
|
||||
|
@ -25,28 +25,29 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "BAssert.h"
|
||||
#include "DeferredTrigger.h"
|
||||
|
||||
namespace bmalloc {
|
||||
|
||||
template<IsoPageTrigger trigger>
|
||||
template<typename Config>
|
||||
void DeferredTrigger<trigger>::didBecome(IsoPage<Config>& page)
|
||||
void DeferredTrigger<trigger>::didBecome(const LockHolder& locker, IsoPage<Config>& page)
|
||||
{
|
||||
if (page.isInUseForAllocation())
|
||||
m_hasBeenDeferred = true;
|
||||
else
|
||||
page.directory().didBecome(&page, trigger);
|
||||
page.directory().didBecome(locker, &page, trigger);
|
||||
}
|
||||
|
||||
template<IsoPageTrigger trigger>
|
||||
template<typename Config>
|
||||
void DeferredTrigger<trigger>::handleDeferral(IsoPage<Config>& page)
|
||||
void DeferredTrigger<trigger>::handleDeferral(const LockHolder& locker, IsoPage<Config>& page)
|
||||
{
|
||||
RELEASE_BASSERT(!page.isInUseForAllocation());
|
||||
|
||||
if (m_hasBeenDeferred) {
|
||||
page.directory().didBecome(&page, trigger);
|
||||
page.directory().didBecome(locker, &page, trigger);
|
||||
m_hasBeenDeferred = false;
|
||||
}
|
||||
}
|
||||
|
@ -34,6 +34,12 @@
|
||||
#include <dlfcn.h>
|
||||
#endif
|
||||
|
||||
#if BPLATFORM(IOS_FAMILY) && !BPLATFORM(MACCATALYST) && !BPLATFORM(IOS_FAMILY_SIMULATOR)
|
||||
#define BUSE_CHECK_NANO_MALLOC 1
|
||||
#else
|
||||
#define BUSE_CHECK_NANO_MALLOC 0
|
||||
#endif
|
||||
|
||||
#if BUSE(CHECK_NANO_MALLOC)
|
||||
extern "C" {
|
||||
#if __has_include(<malloc_private.h>)
|
||||
@ -127,7 +133,7 @@ static bool isNanoMallocEnabled()
|
||||
|
||||
DEFINE_STATIC_PER_PROCESS_STORAGE(Environment);
|
||||
|
||||
Environment::Environment(std::lock_guard<Mutex>&)
|
||||
Environment::Environment(const LockHolder&)
|
||||
: m_isDebugHeapEnabled(computeIsDebugHeapEnabled())
|
||||
{
|
||||
}
|
||||
@ -140,10 +146,16 @@ bool Environment::computeIsDebugHeapEnabled()
|
||||
return true;
|
||||
if (isSanitizerEnabled())
|
||||
return true;
|
||||
|
||||
#if BUSE(CHECK_NANO_MALLOC)
|
||||
if (!isNanoMallocEnabled() && !shouldProcessUnconditionallyUseBmalloc())
|
||||
return true;
|
||||
#endif
|
||||
|
||||
#if BENABLE_MALLOC_HEAP_BREAKDOWN
|
||||
return true;
|
||||
#endif
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -33,7 +33,7 @@ namespace bmalloc {
|
||||
|
||||
class Environment : public StaticPerProcess<Environment> {
|
||||
public:
|
||||
BEXPORT Environment(std::lock_guard<Mutex>&);
|
||||
BEXPORT Environment(const LockHolder&);
|
||||
|
||||
bool isDebugHeapEnabled() { return m_isDebugHeapEnabled; }
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (C) 2014-2017 Apple Inc. All rights reserved.
|
||||
* Copyright (C) 2019 Apple Inc. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
@ -20,39 +20,13 @@
|
||||
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
|
||||
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef VMHeap_h
|
||||
#define VMHeap_h
|
||||
|
||||
#include "Chunk.h"
|
||||
#include "FixedVector.h"
|
||||
#include "HeapKind.h"
|
||||
#include "LargeRange.h"
|
||||
#include "Map.h"
|
||||
#include "StaticPerProcess.h"
|
||||
#include "Vector.h"
|
||||
#if BOS(DARWIN)
|
||||
#include "Zone.h"
|
||||
#endif
|
||||
#pragma once
|
||||
|
||||
namespace bmalloc {
|
||||
|
||||
class BeginTag;
|
||||
class EndTag;
|
||||
class Heap;
|
||||
|
||||
typedef enum { Sync, Async } ScavengeMode;
|
||||
|
||||
class VMHeap : public StaticPerProcess<VMHeap> {
|
||||
public:
|
||||
VMHeap(std::lock_guard<Mutex>&);
|
||||
|
||||
LargeRange tryAllocateLargeChunk(size_t alignment, size_t);
|
||||
};
|
||||
DECLARE_STATIC_PER_PROCESS_STORAGE(VMHeap);
|
||||
enum class FailureAction { Crash, ReturnNull };
|
||||
|
||||
} // namespace bmalloc
|
||||
|
||||
#endif // VMHeap_h
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (C) 2017-2019 Apple Inc. All rights reserved.
|
||||
* Copyright (C) 2017-2020 Apple Inc. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
@ -27,18 +27,26 @@
|
||||
|
||||
#include "CryptoRandom.h"
|
||||
#include "Environment.h"
|
||||
#include "Mutex.h"
|
||||
#include "ProcessCheck.h"
|
||||
#include "StaticPerProcess.h"
|
||||
#include "VMAllocate.h"
|
||||
#include "Vector.h"
|
||||
#include "bmalloc.h"
|
||||
#include <cstdio>
|
||||
#include <mutex>
|
||||
|
||||
#if BOS(DARWIN)
|
||||
#include <mach/mach.h>
|
||||
#endif
|
||||
|
||||
#if GIGACAGE_ENABLED
|
||||
|
||||
namespace Gigacage {
|
||||
|
||||
#if !BENABLE(UNIFIED_AND_FREEZABLE_CONFIG_RECORD)
|
||||
Config g_gigacageConfig;
|
||||
#endif
|
||||
|
||||
struct Callback {
|
||||
Callback() { }
|
||||
|
||||
@ -52,12 +60,12 @@ struct Callback {
|
||||
void* argument { nullptr };
|
||||
};
|
||||
|
||||
}
|
||||
} // namespace Gigacage
|
||||
|
||||
namespace bmalloc {
|
||||
|
||||
struct PrimitiveDisableCallbacks : public StaticPerProcess<PrimitiveDisableCallbacks> {
|
||||
PrimitiveDisableCallbacks(std::lock_guard<Mutex>&) { }
|
||||
PrimitiveDisableCallbacks(const LockHolder&) { }
|
||||
|
||||
Vector<Gigacage::Callback> callbacks;
|
||||
};
|
||||
@ -73,55 +81,23 @@ namespace Gigacage {
|
||||
// in size. 2^32 * 8 = 32GB. This means if an access on a caged type happens to go out of
|
||||
// bounds, the access is guaranteed to land somewhere else in the cage or inside the runway.
|
||||
// If this were less than 32GB, those OOB accesses could reach outside of the cage.
|
||||
constexpr size_t gigacageRunway = 32llu * 1024 * 1024 * 1024;
|
||||
constexpr size_t gigacageRunway = 32llu * bmalloc::Sizes::GB;
|
||||
|
||||
// Note: g_gigacageBasePtrs[0] is reserved for storing the wasEnabled flag.
|
||||
// The first gigacageBasePtr will start at g_gigacageBasePtrs[sizeof(void*)].
|
||||
// This is done so that the wasEnabled flag will also be protected along with the
|
||||
// gigacageBasePtrs.
|
||||
alignas(gigacageBasePtrsSize) char g_gigacageBasePtrs[gigacageBasePtrsSize];
|
||||
bool disablePrimitiveGigacageRequested = false;
|
||||
|
||||
using namespace bmalloc;
|
||||
|
||||
namespace {
|
||||
|
||||
bool s_isDisablingPrimitiveGigacageDisabled;
|
||||
|
||||
void protectGigacageBasePtrs()
|
||||
{
|
||||
uintptr_t basePtrs = reinterpret_cast<uintptr_t>(g_gigacageBasePtrs);
|
||||
// We might only get page size alignment, but that's also the minimum we need.
|
||||
RELEASE_BASSERT(!(basePtrs & (vmPageSize() - 1)));
|
||||
mprotect(g_gigacageBasePtrs, gigacageBasePtrsSize, PROT_READ);
|
||||
}
|
||||
|
||||
void unprotectGigacageBasePtrs()
|
||||
{
|
||||
mprotect(g_gigacageBasePtrs, gigacageBasePtrsSize, PROT_READ | PROT_WRITE);
|
||||
}
|
||||
|
||||
class UnprotectGigacageBasePtrsScope {
|
||||
public:
|
||||
UnprotectGigacageBasePtrsScope()
|
||||
{
|
||||
unprotectGigacageBasePtrs();
|
||||
}
|
||||
|
||||
~UnprotectGigacageBasePtrsScope()
|
||||
{
|
||||
protectGigacageBasePtrs();
|
||||
}
|
||||
};
|
||||
|
||||
size_t runwaySize(Kind kind)
|
||||
{
|
||||
switch (kind) {
|
||||
case Kind::ReservedForFlagsAndNotABasePtr:
|
||||
RELEASE_BASSERT_NOT_REACHED();
|
||||
case Kind::Primitive:
|
||||
return gigacageRunway;
|
||||
case Kind::JSValue:
|
||||
return 0;
|
||||
case Kind::NumberOfKinds:
|
||||
RELEASE_BASSERT_NOT_REACHED();
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@ -134,20 +110,29 @@ void ensureGigacage()
|
||||
std::call_once(
|
||||
onceFlag,
|
||||
[] {
|
||||
RELEASE_BASSERT(!g_gigacageConfig.ensureGigacageHasBeenCalled);
|
||||
g_gigacageConfig.ensureGigacageHasBeenCalled = true;
|
||||
|
||||
if (!shouldBeEnabled())
|
||||
return;
|
||||
|
||||
Kind shuffledKinds[numKinds];
|
||||
for (unsigned i = 0; i < numKinds; ++i)
|
||||
shuffledKinds[i] = static_cast<Kind>(i + 1); // + 1 to skip Kind::ReservedForFlagsAndNotABasePtr.
|
||||
|
||||
#if BENABLE(UNIFIED_AND_FREEZABLE_CONFIG_RECORD)
|
||||
// We might only get page size alignment, but that's also the minimum
|
||||
// alignment we need for freezing the Config.
|
||||
RELEASE_BASSERT(!(reinterpret_cast<size_t>(&g_gigacageConfig) & (vmPageSize() - 1)));
|
||||
#endif
|
||||
|
||||
Kind shuffledKinds[NumberOfKinds];
|
||||
for (unsigned i = 0; i < NumberOfKinds; ++i)
|
||||
shuffledKinds[i] = static_cast<Kind>(i);
|
||||
|
||||
// We just go ahead and assume that 64 bits is enough randomness. That's trivially true right
|
||||
// now, but would stop being true if we went crazy with gigacages. Based on my math, 21 is the
|
||||
// largest value of n so that n! <= 2^64.
|
||||
static_assert(numKinds <= 21, "too many kinds");
|
||||
static_assert(NumberOfKinds <= 21, "too many kinds");
|
||||
uint64_t random;
|
||||
cryptoRandom(reinterpret_cast<unsigned char*>(&random), sizeof(random));
|
||||
for (unsigned i = numKinds; i--;) {
|
||||
for (unsigned i = NumberOfKinds; i--;) {
|
||||
unsigned limit = i + 1;
|
||||
unsigned j = static_cast<unsigned>(random % limit);
|
||||
random /= limit;
|
||||
@ -158,7 +143,7 @@ void ensureGigacage()
|
||||
return roundUpToMultipleOf(alignment(kind), totalSize);
|
||||
};
|
||||
auto bump = [] (Kind kind, size_t totalSize) -> size_t {
|
||||
return totalSize + size(kind);
|
||||
return totalSize + maxSize(kind);
|
||||
};
|
||||
|
||||
size_t totalSize = 0;
|
||||
@ -184,7 +169,7 @@ void ensureGigacage()
|
||||
size_t nextCage = 0;
|
||||
for (Kind kind : shuffledKinds) {
|
||||
nextCage = alignTo(kind, nextCage);
|
||||
basePtr(kind) = reinterpret_cast<char*>(base) + nextCage;
|
||||
g_gigacageConfig.setBasePtr(kind, reinterpret_cast<char*>(base) + nextCage);
|
||||
nextCage = bump(kind, nextCage);
|
||||
if (runwaySize(kind) > 0) {
|
||||
char* runway = reinterpret_cast<char*>(base) + nextCage;
|
||||
@ -193,49 +178,54 @@ void ensureGigacage()
|
||||
nextCage += runwaySize(kind);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
g_gigacageConfig.start = base;
|
||||
g_gigacageConfig.totalSize = totalSize;
|
||||
vmDeallocatePhysicalPages(base, totalSize);
|
||||
setWasEnabled();
|
||||
protectGigacageBasePtrs();
|
||||
g_gigacageConfig.isEnabled = true;
|
||||
});
|
||||
}
|
||||
|
||||
void disablePrimitiveGigacage()
|
||||
{
|
||||
if (g_gigacageConfig.disablingPrimitiveGigacageIsForbidden)
|
||||
fprintf(stderr, "FATAL: Disabling Primitive gigacage is forbidden, but we don't want that in this process.\n");
|
||||
|
||||
RELEASE_BASSERT(!g_gigacageConfig.disablingPrimitiveGigacageIsForbidden);
|
||||
|
||||
ensureGigacage();
|
||||
if (!basePtrs().primitive) {
|
||||
disablePrimitiveGigacageRequested = true;
|
||||
if (!g_gigacageConfig.basePtrs[Primitive]) {
|
||||
// It was never enabled. That means that we never even saved any callbacks. Or, we had already disabled
|
||||
// it before, and already called the callbacks.
|
||||
return;
|
||||
}
|
||||
|
||||
PrimitiveDisableCallbacks& callbacks = *PrimitiveDisableCallbacks::get();
|
||||
std::unique_lock<Mutex> lock(PrimitiveDisableCallbacks::mutex());
|
||||
UniqueLockHolder lock(PrimitiveDisableCallbacks::mutex());
|
||||
for (Callback& callback : callbacks.callbacks)
|
||||
callback.function(callback.argument);
|
||||
callbacks.callbacks.shrink(0);
|
||||
UnprotectGigacageBasePtrsScope unprotectScope;
|
||||
basePtrs().primitive = nullptr;
|
||||
}
|
||||
|
||||
void addPrimitiveDisableCallback(void (*function)(void*), void* argument)
|
||||
{
|
||||
ensureGigacage();
|
||||
if (!basePtrs().primitive) {
|
||||
if (!g_gigacageConfig.basePtrs[Primitive]) {
|
||||
// It was already disabled or we were never able to enable it.
|
||||
function(argument);
|
||||
return;
|
||||
}
|
||||
|
||||
PrimitiveDisableCallbacks& callbacks = *PrimitiveDisableCallbacks::get();
|
||||
std::unique_lock<Mutex> lock(PrimitiveDisableCallbacks::mutex());
|
||||
UniqueLockHolder lock(PrimitiveDisableCallbacks::mutex());
|
||||
callbacks.callbacks.push(Callback(function, argument));
|
||||
}
|
||||
|
||||
void removePrimitiveDisableCallback(void (*function)(void*), void* argument)
|
||||
{
|
||||
PrimitiveDisableCallbacks& callbacks = *PrimitiveDisableCallbacks::get();
|
||||
std::unique_lock<Mutex> lock(PrimitiveDisableCallbacks::mutex());
|
||||
UniqueLockHolder lock(PrimitiveDisableCallbacks::mutex());
|
||||
for (size_t i = 0; i < callbacks.callbacks.size(); ++i) {
|
||||
if (callbacks.callbacks[i].function == function
|
||||
&& callbacks.callbacks[i].argument == argument) {
|
||||
@ -246,37 +236,36 @@ void removePrimitiveDisableCallback(void (*function)(void*), void* argument)
|
||||
}
|
||||
}
|
||||
|
||||
static void primitiveGigacageDisabled(void*)
|
||||
static bool verifyGigacageIsEnabled()
|
||||
{
|
||||
if (GIGACAGE_ALLOCATION_CAN_FAIL && !wasEnabled())
|
||||
return;
|
||||
|
||||
static bool s_false;
|
||||
fprintf(stderr, "FATAL: Primitive gigacage disabled, but we don't want that in this process.\n");
|
||||
if (!s_false)
|
||||
BCRASH();
|
||||
bool isEnabled = g_gigacageConfig.isEnabled;
|
||||
for (size_t i = 0; i < NumberOfKinds; ++i)
|
||||
isEnabled = isEnabled && g_gigacageConfig.basePtrs[i];
|
||||
isEnabled = isEnabled && g_gigacageConfig.start;
|
||||
isEnabled = isEnabled && g_gigacageConfig.totalSize;
|
||||
return isEnabled;
|
||||
}
|
||||
|
||||
void disableDisablingPrimitiveGigacageIfShouldBeEnabled()
|
||||
void forbidDisablingPrimitiveGigacage()
|
||||
{
|
||||
if (shouldBeEnabled()) {
|
||||
addPrimitiveDisableCallback(primitiveGigacageDisabled, nullptr);
|
||||
s_isDisablingPrimitiveGigacageDisabled = true;
|
||||
}
|
||||
}
|
||||
ensureGigacage();
|
||||
RELEASE_BASSERT(g_gigacageConfig.shouldBeEnabledHasBeenCalled
|
||||
&& (GIGACAGE_ALLOCATION_CAN_FAIL || !g_gigacageConfig.shouldBeEnabled || verifyGigacageIsEnabled()));
|
||||
|
||||
bool isDisablingPrimitiveGigacageDisabled()
|
||||
{
|
||||
return s_isDisablingPrimitiveGigacageDisabled;
|
||||
if (!g_gigacageConfig.disablingPrimitiveGigacageIsForbidden)
|
||||
g_gigacageConfig.disablingPrimitiveGigacageIsForbidden = true;
|
||||
RELEASE_BASSERT(disablingPrimitiveGigacageIsForbidden());
|
||||
}
|
||||
|
||||
bool shouldBeEnabled()
|
||||
{
|
||||
static bool cached = false;
|
||||
static std::once_flag onceFlag;
|
||||
std::call_once(
|
||||
onceFlag,
|
||||
[] {
|
||||
RELEASE_BASSERT(!g_gigacageConfig.shouldBeEnabledHasBeenCalled);
|
||||
g_gigacageConfig.shouldBeEnabledHasBeenCalled = true;
|
||||
|
||||
bool debugHeapEnabled = Environment::get()->isDebugHeapEnabled();
|
||||
if (debugHeapEnabled)
|
||||
return;
|
||||
@ -292,13 +281,21 @@ bool shouldBeEnabled()
|
||||
fprintf(stderr, "Warning: invalid argument to GIGACAGE_ENABLED: %s\n", gigacageEnabled);
|
||||
}
|
||||
|
||||
cached = true;
|
||||
g_gigacageConfig.shouldBeEnabled = true;
|
||||
});
|
||||
return cached;
|
||||
return g_gigacageConfig.shouldBeEnabled;
|
||||
}
|
||||
|
||||
size_t size(Kind kind)
|
||||
{
|
||||
return PerProcess<PerHeapKind<Heap>>::get()->at(heapKind(kind)).gigacageSize();
|
||||
}
|
||||
|
||||
size_t footprint(Kind kind)
|
||||
{
|
||||
return PerProcess<PerHeapKind<Heap>>::get()->at(heapKind(kind)).footprint();
|
||||
}
|
||||
|
||||
} // namespace Gigacage
|
||||
|
||||
#endif // GIGACAGE_ENABLED
|
||||
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (C) 2017-2019 Apple Inc. All rights reserved.
|
||||
* Copyright (C) 2017-2020 Apple Inc. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
@ -25,12 +25,13 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "Algorithm.h"
|
||||
#include "BAssert.h"
|
||||
#include "BExport.h"
|
||||
#include "BInline.h"
|
||||
#include "BPlatform.h"
|
||||
#include "GigacageConfig.h"
|
||||
#include "Sizes.h"
|
||||
#include "StdLibExtras.h"
|
||||
#include <cstddef>
|
||||
#include <inttypes.h>
|
||||
|
||||
@ -44,21 +45,15 @@
|
||||
|
||||
namespace Gigacage {
|
||||
|
||||
enum Kind {
|
||||
ReservedForFlagsAndNotABasePtr = 0,
|
||||
Primitive,
|
||||
JSValue,
|
||||
};
|
||||
|
||||
BINLINE const char* name(Kind kind)
|
||||
{
|
||||
switch (kind) {
|
||||
case ReservedForFlagsAndNotABasePtr:
|
||||
RELEASE_BASSERT_NOT_REACHED();
|
||||
case Primitive:
|
||||
return "Primitive";
|
||||
case JSValue:
|
||||
return "JSValue";
|
||||
case NumberOfKinds:
|
||||
break;
|
||||
}
|
||||
BCRASH();
|
||||
return nullptr;
|
||||
@ -66,25 +61,22 @@ BINLINE const char* name(Kind kind)
|
||||
|
||||
#if GIGACAGE_ENABLED
|
||||
|
||||
#if BCPU(ARM64)
|
||||
#if BOS_EFFECTIVE_ADDRESS_WIDTH < 48
|
||||
constexpr size_t primitiveGigacageSize = 2 * bmalloc::Sizes::GB;
|
||||
constexpr size_t jsValueGigacageSize = 2 * bmalloc::Sizes::GB;
|
||||
constexpr size_t gigacageBasePtrsSize = 16 * bmalloc::Sizes::kB;
|
||||
constexpr size_t maximumCageSizeReductionForSlide = bmalloc::Sizes::GB / 4;
|
||||
#define GIGACAGE_ALLOCATION_CAN_FAIL 1
|
||||
#else
|
||||
constexpr size_t primitiveGigacageSize = 32 * bmalloc::Sizes::GB;
|
||||
constexpr size_t jsValueGigacageSize = 16 * bmalloc::Sizes::GB;
|
||||
constexpr size_t gigacageBasePtrsSize = 4 * bmalloc::Sizes::kB;
|
||||
constexpr size_t maximumCageSizeReductionForSlide = 4 * bmalloc::Sizes::GB;
|
||||
#define GIGACAGE_ALLOCATION_CAN_FAIL 0
|
||||
#endif
|
||||
|
||||
// In Linux, if `vm.overcommit_memory = 2` is specified, mmap with large size can fail if it exceeds the size of RAM.
|
||||
// So we specify GIGACAGE_ALLOCATION_CAN_FAIL = 1.
|
||||
#if BOS(LINUX) || defined(DARLING)
|
||||
#undef GIGACAGE_ALLOCATION_CAN_FAIL
|
||||
#define GIGACAGE_ALLOCATION_CAN_FAIL 1
|
||||
#else
|
||||
#define GIGACAGE_ALLOCATION_CAN_FAIL 0
|
||||
#endif
|
||||
|
||||
|
||||
@ -98,21 +90,13 @@ constexpr size_t gigacageSizeToMask(size_t size) { return size - 1; }
|
||||
constexpr size_t primitiveGigacageMask = gigacageSizeToMask(primitiveGigacageSize);
|
||||
constexpr size_t jsValueGigacageMask = gigacageSizeToMask(jsValueGigacageSize);
|
||||
|
||||
extern "C" alignas(gigacageBasePtrsSize) BEXPORT char g_gigacageBasePtrs[gigacageBasePtrsSize];
|
||||
// These constants are needed by the LLInt.
|
||||
constexpr ptrdiff_t offsetOfPrimitiveGigacageBasePtr = Kind::Primitive * sizeof(void*);
|
||||
constexpr ptrdiff_t offsetOfJSValueGigacageBasePtr = Kind::JSValue * sizeof(void*);
|
||||
|
||||
BINLINE bool wasEnabled() { return g_gigacageBasePtrs[0]; }
|
||||
BINLINE void setWasEnabled() { g_gigacageBasePtrs[0] = true; }
|
||||
extern "C" BEXPORT bool disablePrimitiveGigacageRequested;
|
||||
|
||||
struct BasePtrs {
|
||||
uintptr_t reservedForFlags;
|
||||
void* primitive;
|
||||
void* jsValue;
|
||||
};
|
||||
|
||||
static_assert(offsetof(BasePtrs, primitive) == Kind::Primitive * sizeof(void*), "");
|
||||
static_assert(offsetof(BasePtrs, jsValue) == Kind::JSValue * sizeof(void*), "");
|
||||
|
||||
constexpr unsigned numKinds = 2;
|
||||
BINLINE bool isEnabled() { return g_gigacageConfig.isEnabled; }
|
||||
|
||||
BEXPORT void ensureGigacage();
|
||||
|
||||
@ -122,50 +106,46 @@ BEXPORT void disablePrimitiveGigacage();
|
||||
BEXPORT void addPrimitiveDisableCallback(void (*)(void*), void*);
|
||||
BEXPORT void removePrimitiveDisableCallback(void (*)(void*), void*);
|
||||
|
||||
BEXPORT void disableDisablingPrimitiveGigacageIfShouldBeEnabled();
|
||||
BEXPORT void forbidDisablingPrimitiveGigacage();
|
||||
|
||||
BEXPORT bool isDisablingPrimitiveGigacageDisabled();
|
||||
inline bool isPrimitiveGigacagePermanentlyEnabled() { return isDisablingPrimitiveGigacageDisabled(); }
|
||||
inline bool canPrimitiveGigacageBeDisabled() { return !isDisablingPrimitiveGigacageDisabled(); }
|
||||
|
||||
BINLINE void*& basePtr(BasePtrs& basePtrs, Kind kind)
|
||||
BINLINE bool disablingPrimitiveGigacageIsForbidden()
|
||||
{
|
||||
switch (kind) {
|
||||
case ReservedForFlagsAndNotABasePtr:
|
||||
RELEASE_BASSERT_NOT_REACHED();
|
||||
case Primitive:
|
||||
return basePtrs.primitive;
|
||||
case JSValue:
|
||||
return basePtrs.jsValue;
|
||||
}
|
||||
BCRASH();
|
||||
return basePtrs.primitive;
|
||||
return g_gigacageConfig.disablingPrimitiveGigacageIsForbidden;
|
||||
}
|
||||
|
||||
BINLINE BasePtrs& basePtrs()
|
||||
BINLINE bool disableNotRequestedForPrimitiveGigacage()
|
||||
{
|
||||
return *reinterpret_cast<BasePtrs*>(reinterpret_cast<void*>(g_gigacageBasePtrs));
|
||||
}
|
||||
|
||||
BINLINE void*& basePtr(Kind kind)
|
||||
{
|
||||
return basePtr(basePtrs(), kind);
|
||||
return !disablePrimitiveGigacageRequested;
|
||||
}
|
||||
|
||||
BINLINE bool isEnabled(Kind kind)
|
||||
{
|
||||
return !!basePtr(kind);
|
||||
if (kind == Primitive)
|
||||
return g_gigacageConfig.basePtr(Primitive) && (disablingPrimitiveGigacageIsForbidden() || disableNotRequestedForPrimitiveGigacage());
|
||||
return g_gigacageConfig.basePtr(kind);
|
||||
}
|
||||
|
||||
BINLINE size_t size(Kind kind)
|
||||
BINLINE void* basePtr(Kind kind)
|
||||
{
|
||||
BASSERT(isEnabled(kind));
|
||||
return g_gigacageConfig.basePtr(kind);
|
||||
}
|
||||
|
||||
BINLINE void* addressOfBasePtr(Kind kind)
|
||||
{
|
||||
RELEASE_BASSERT(kind < NumberOfKinds);
|
||||
return &g_gigacageConfig.basePtrs[kind];
|
||||
}
|
||||
|
||||
BINLINE size_t maxSize(Kind kind)
|
||||
{
|
||||
switch (kind) {
|
||||
case ReservedForFlagsAndNotABasePtr:
|
||||
RELEASE_BASSERT_NOT_REACHED();
|
||||
case Primitive:
|
||||
return static_cast<size_t>(primitiveGigacageSize);
|
||||
case JSValue:
|
||||
return static_cast<size_t>(jsValueGigacageSize);
|
||||
case NumberOfKinds:
|
||||
break;
|
||||
}
|
||||
BCRASH();
|
||||
return 0;
|
||||
@ -173,14 +153,17 @@ BINLINE size_t size(Kind kind)
|
||||
|
||||
BINLINE size_t alignment(Kind kind)
|
||||
{
|
||||
return size(kind);
|
||||
return maxSize(kind);
|
||||
}
|
||||
|
||||
BINLINE size_t mask(Kind kind)
|
||||
{
|
||||
return gigacageSizeToMask(size(kind));
|
||||
return gigacageSizeToMask(maxSize(kind));
|
||||
}
|
||||
|
||||
BEXPORT size_t size(Kind);
|
||||
BEXPORT size_t footprint(Kind);
|
||||
|
||||
template<typename Func>
|
||||
void forEachKind(const Func& func)
|
||||
{
|
||||
@ -192,9 +175,9 @@ template<typename T>
|
||||
BINLINE T* caged(Kind kind, T* ptr)
|
||||
{
|
||||
BASSERT(ptr);
|
||||
void* gigacageBasePtr = basePtr(kind);
|
||||
if (!gigacageBasePtr)
|
||||
if (!isEnabled(kind))
|
||||
return ptr;
|
||||
void* gigacageBasePtr = basePtr(kind);
|
||||
return reinterpret_cast<T*>(
|
||||
reinterpret_cast<uintptr_t>(gigacageBasePtr) + (
|
||||
reinterpret_cast<uintptr_t>(ptr) & mask(kind)));
|
||||
@ -213,25 +196,35 @@ BINLINE bool isCaged(Kind kind, const void* ptr)
|
||||
return caged(kind, ptr) == ptr;
|
||||
}
|
||||
|
||||
BINLINE bool contains(const void* ptr)
|
||||
{
|
||||
auto* start = reinterpret_cast<const uint8_t*>(g_gigacageConfig.start);
|
||||
auto* p = reinterpret_cast<const uint8_t*>(ptr);
|
||||
return static_cast<size_t>(p - start) < g_gigacageConfig.totalSize;
|
||||
}
|
||||
|
||||
BEXPORT bool shouldBeEnabled();
|
||||
|
||||
#else // GIGACAGE_ENABLED
|
||||
|
||||
BINLINE void*& basePtr(Kind)
|
||||
BINLINE void* basePtr(Kind)
|
||||
{
|
||||
BCRASH();
|
||||
static void* unreachable;
|
||||
return unreachable;
|
||||
}
|
||||
BINLINE size_t size(Kind) { BCRASH(); return 0; }
|
||||
BINLINE size_t maxSize(Kind) { BCRASH(); return 0; }
|
||||
BINLINE size_t size(Kind) { return 0; }
|
||||
BINLINE size_t footprint(Kind) { return 0; }
|
||||
BINLINE void ensureGigacage() { }
|
||||
BINLINE bool wasEnabled() { return false; }
|
||||
BINLINE bool contains(const void*) { return false; }
|
||||
BINLINE bool disablingPrimitiveGigacageIsForbidden() { return false; }
|
||||
BINLINE bool isEnabled() { return false; }
|
||||
BINLINE bool isCaged(Kind, const void*) { return true; }
|
||||
BINLINE bool isEnabled(Kind) { return false; }
|
||||
template<typename T> BINLINE T* caged(Kind, T* ptr) { return ptr; }
|
||||
template<typename T> BINLINE T* cagedMayBeNull(Kind, T* ptr) { return ptr; }
|
||||
BINLINE void disableDisablingPrimitiveGigacageIfShouldBeEnabled() { }
|
||||
BINLINE bool canPrimitiveGigacageBeDisabled() { return false; }
|
||||
BINLINE void forbidDisablingPrimitiveGigacage() { }
|
||||
BINLINE void disablePrimitiveGigacage() { }
|
||||
BINLINE void addPrimitiveDisableCallback(void (*)(void*), void*) { }
|
||||
BINLINE void removePrimitiveDisableCallback(void (*)(void*), void*) { }
|
||||
|
104
bmalloc/GigacageConfig.h
Normal file
104
bmalloc/GigacageConfig.h
Normal file
@ -0,0 +1,104 @@
|
||||
/*
|
||||
* Copyright (C) 2020 Apple Inc. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
|
||||
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
|
||||
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
||||
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
|
||||
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "Algorithm.h"
|
||||
#include "GigacageKind.h"
|
||||
#include "StdLibExtras.h"
|
||||
#include <inttypes.h>
|
||||
|
||||
#if BENABLE(UNIFIED_AND_FREEZABLE_CONFIG_RECORD)
|
||||
|
||||
namespace WebConfig {
|
||||
|
||||
using Slot = uint64_t;
|
||||
extern "C" Slot g_config[];
|
||||
|
||||
} // namespace WebConfig
|
||||
|
||||
#endif // BENABLE(UNIFIED_AND_FREEZABLE_CONFIG_RECORD)
|
||||
|
||||
namespace Gigacage {
|
||||
|
||||
struct Config {
|
||||
void* basePtr(Kind kind) const
|
||||
{
|
||||
RELEASE_BASSERT(kind < NumberOfKinds);
|
||||
return basePtrs[kind];
|
||||
}
|
||||
|
||||
void setBasePtr(Kind kind, void* ptr)
|
||||
{
|
||||
RELEASE_BASSERT(kind < NumberOfKinds);
|
||||
basePtrs[kind] = ptr;
|
||||
}
|
||||
|
||||
// All the fields in this struct should be chosen such that their
|
||||
// initial value is 0 / null / falsy because Config is instantiated
|
||||
// as a global singleton.
|
||||
|
||||
bool isPermanentlyFrozen; // Will be set by the client if the Config gets frozen.
|
||||
bool isEnabled;
|
||||
bool disablingPrimitiveGigacageIsForbidden;
|
||||
bool shouldBeEnabled;
|
||||
|
||||
// We would like to just put the std::once_flag for these functions
|
||||
// here, but we can't because std::once_flag has a implicitly-deleted
|
||||
// default constructor. So, we use a boolean instead.
|
||||
bool shouldBeEnabledHasBeenCalled;
|
||||
bool ensureGigacageHasBeenCalled;
|
||||
|
||||
void* start;
|
||||
size_t totalSize;
|
||||
void* basePtrs[NumberOfKinds];
|
||||
};
|
||||
|
||||
#if BENABLE(UNIFIED_AND_FREEZABLE_CONFIG_RECORD)
|
||||
|
||||
constexpr size_t startSlotOfGigacageConfig = 0;
|
||||
constexpr size_t startOffsetOfGigacageConfig = startSlotOfGigacageConfig * sizeof(WebConfig::Slot);
|
||||
|
||||
constexpr size_t reservedSlotsForGigacageConfig = 6;
|
||||
constexpr size_t reservedBytesForGigacageConfig = reservedSlotsForGigacageConfig * sizeof(WebConfig::Slot);
|
||||
|
||||
constexpr size_t alignmentOfGigacageConfig = std::alignment_of<Gigacage::Config>::value;
|
||||
|
||||
static_assert(sizeof(Gigacage::Config) <= reservedBytesForGigacageConfig);
|
||||
static_assert(bmalloc::roundUpToMultipleOf<alignmentOfGigacageConfig>(startOffsetOfGigacageConfig) == startOffsetOfGigacageConfig);
|
||||
|
||||
#define g_gigacageConfig (*bmalloc::bitwise_cast<Gigacage::Config*>(&WebConfig::g_config[Gigacage::startSlotOfGigacageConfig]))
|
||||
|
||||
#else // not BENABLE(UNIFIED_AND_FREEZABLE_CONFIG_RECORD)
|
||||
|
||||
extern "C" BEXPORT Config g_gigacageConfig;
|
||||
|
||||
#endif // BENABLE(UNIFIED_AND_FREEZABLE_CONFIG_RECORD)
|
||||
|
||||
} // namespace Gigacage
|
||||
|
||||
#if !BENABLE(UNIFIED_AND_FREEZABLE_CONFIG_RECORD)
|
||||
using Gigacage::g_gigacageConfig;
|
||||
#endif
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (C) 2018 Apple Inc. All rights reserved.
|
||||
* Copyright (C) 2020 Apple Inc. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
@ -23,24 +23,14 @@
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#include "PerThread.h"
|
||||
#pragma once
|
||||
|
||||
#include "BExport.h"
|
||||
#include "Cache.h"
|
||||
#include "Heap.h"
|
||||
namespace Gigacage {
|
||||
|
||||
namespace bmalloc {
|
||||
enum Kind {
|
||||
Primitive,
|
||||
JSValue,
|
||||
NumberOfKinds
|
||||
};
|
||||
|
||||
#if !HAVE_PTHREAD_MACHDEP_H
|
||||
|
||||
template<> BEXPORT bool PerThreadStorage<PerHeapKind<Cache>>::s_didInitialize = false;
|
||||
template<> BEXPORT pthread_key_t PerThreadStorage<PerHeapKind<Cache>>::s_key = 0;
|
||||
template<> BEXPORT std::once_flag PerThreadStorage<PerHeapKind<Cache>>::s_onceFlag = { };
|
||||
|
||||
template<> BEXPORT bool PerThreadStorage<PerHeapKind<Heap>>::s_didInitialize = false;
|
||||
template<> BEXPORT pthread_key_t PerThreadStorage<PerHeapKind<Heap>>::s_key = 0;
|
||||
template<> BEXPORT std::once_flag PerThreadStorage<PerHeapKind<Heap>>::s_onceFlag = { };
|
||||
|
||||
#endif
|
||||
|
||||
} // namespace bmalloc
|
||||
} // namespace Gigacage
|
306
bmalloc/Heap.cpp
306
bmalloc/Heap.cpp
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (C) 2014-2018 Apple Inc. All rights reserved.
|
||||
* Copyright (C) 2014-2019 Apple Inc. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
@ -30,41 +30,41 @@
|
||||
#include "BumpAllocator.h"
|
||||
#include "Chunk.h"
|
||||
#include "CryptoRandom.h"
|
||||
#include "DebugHeap.h"
|
||||
#include "Environment.h"
|
||||
#include "Gigacage.h"
|
||||
#include "DebugHeap.h"
|
||||
#include "HeapConstants.h"
|
||||
#include "PerProcess.h"
|
||||
#include "Scavenger.h"
|
||||
#include "SmallLine.h"
|
||||
#include "SmallPage.h"
|
||||
#include "VMHeap.h"
|
||||
#include "bmalloc.h"
|
||||
#include <thread>
|
||||
#include <vector>
|
||||
|
||||
#if BOS(DARWIN)
|
||||
#include "Zone.h"
|
||||
#endif
|
||||
|
||||
namespace bmalloc {
|
||||
|
||||
Heap::Heap(HeapKind kind, std::lock_guard<Mutex>&)
|
||||
: m_kind(kind)
|
||||
, m_vmPageSizePhysical(vmPageSizePhysical())
|
||||
Heap::Heap(HeapKind kind, LockHolder&)
|
||||
: m_kind { kind }, m_constants { *HeapConstants::get() }
|
||||
{
|
||||
RELEASE_BASSERT(vmPageSizePhysical() >= smallPageSize);
|
||||
RELEASE_BASSERT(vmPageSize() >= vmPageSizePhysical());
|
||||
|
||||
initializeLineMetadata();
|
||||
initializePageMetadata();
|
||||
|
||||
BASSERT(!Environment::get()->isDebugHeapEnabled());
|
||||
|
||||
Gigacage::ensureGigacage();
|
||||
#if GIGACAGE_ENABLED
|
||||
if (usingGigacage()) {
|
||||
RELEASE_BASSERT(gigacageBasePtr());
|
||||
void* gigacageBasePtr = this->gigacageBasePtr();
|
||||
RELEASE_BASSERT(gigacageBasePtr);
|
||||
uint64_t random[2];
|
||||
cryptoRandom(reinterpret_cast<unsigned char*>(random), sizeof(random));
|
||||
size_t size = roundDownToMultipleOf(vmPageSize(), gigacageSize() - (random[0] % Gigacage::maximumCageSizeReductionForSlide));
|
||||
ptrdiff_t offset = roundDownToMultipleOf(vmPageSize(), random[1] % (gigacageSize() - size));
|
||||
void* base = reinterpret_cast<unsigned char*>(gigacageBasePtr()) + offset;
|
||||
size_t gigacageSize = Gigacage::maxSize(gigacageKind(kind));
|
||||
size_t size = roundDownToMultipleOf(vmPageSize(), gigacageSize - (random[0] % Gigacage::maximumCageSizeReductionForSlide));
|
||||
m_gigacageSize = size;
|
||||
ptrdiff_t offset = roundDownToMultipleOf(vmPageSize(), random[1] % (gigacageSize - size));
|
||||
void* base = reinterpret_cast<unsigned char*>(gigacageBasePtr) + offset;
|
||||
m_largeFree.add(LargeRange(base, size, 0, 0));
|
||||
}
|
||||
#endif
|
||||
@ -74,7 +74,7 @@ Heap::Heap(HeapKind kind, std::lock_guard<Mutex>&)
|
||||
|
||||
bool Heap::usingGigacage()
|
||||
{
|
||||
return isGigacage(m_kind) && gigacageBasePtr();
|
||||
return isGigacage(m_kind) && Gigacage::isEnabled(gigacageKind(m_kind));
|
||||
}
|
||||
|
||||
void* Heap::gigacageBasePtr()
|
||||
@ -84,66 +84,10 @@ void* Heap::gigacageBasePtr()
|
||||
|
||||
size_t Heap::gigacageSize()
|
||||
{
|
||||
return Gigacage::size(gigacageKind(m_kind));
|
||||
return m_gigacageSize;
|
||||
}
|
||||
|
||||
void Heap::initializeLineMetadata()
|
||||
{
|
||||
size_t sizeClassCount = bmalloc::sizeClass(smallLineSize);
|
||||
size_t smallLineCount = m_vmPageSizePhysical / smallLineSize;
|
||||
m_smallLineMetadata.grow(sizeClassCount * smallLineCount);
|
||||
|
||||
for (size_t sizeClass = 0; sizeClass < sizeClassCount; ++sizeClass) {
|
||||
size_t size = objectSize(sizeClass);
|
||||
LineMetadata* pageMetadata = &m_smallLineMetadata[sizeClass * smallLineCount];
|
||||
|
||||
size_t object = 0;
|
||||
size_t line = 0;
|
||||
while (object < m_vmPageSizePhysical) {
|
||||
line = object / smallLineSize;
|
||||
size_t leftover = object % smallLineSize;
|
||||
|
||||
size_t objectCount;
|
||||
size_t remainder;
|
||||
divideRoundingUp(smallLineSize - leftover, size, objectCount, remainder);
|
||||
|
||||
pageMetadata[line] = { static_cast<unsigned char>(leftover), static_cast<unsigned char>(objectCount) };
|
||||
|
||||
object += objectCount * size;
|
||||
}
|
||||
|
||||
// Don't allow the last object in a page to escape the page.
|
||||
if (object > m_vmPageSizePhysical) {
|
||||
BASSERT(pageMetadata[line].objectCount);
|
||||
--pageMetadata[line].objectCount;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void Heap::initializePageMetadata()
|
||||
{
|
||||
auto computePageSize = [&](size_t sizeClass) {
|
||||
size_t size = objectSize(sizeClass);
|
||||
if (sizeClass < bmalloc::sizeClass(smallLineSize))
|
||||
return m_vmPageSizePhysical;
|
||||
|
||||
for (size_t pageSize = m_vmPageSizePhysical;
|
||||
pageSize < pageSizeMax;
|
||||
pageSize += m_vmPageSizePhysical) {
|
||||
RELEASE_BASSERT(pageSize <= chunkSize / 2);
|
||||
size_t waste = pageSize % size;
|
||||
if (waste <= pageSize / pageSizeWasteFactor)
|
||||
return pageSize;
|
||||
}
|
||||
|
||||
return pageSizeMax;
|
||||
};
|
||||
|
||||
for (size_t i = 0; i < sizeClassCount; ++i)
|
||||
m_pageClasses[i] = (computePageSize(i) - 1) / smallPageSize;
|
||||
}
|
||||
|
||||
size_t Heap::freeableMemory(std::lock_guard<Mutex>&)
|
||||
size_t Heap::freeableMemory(UniqueLockHolder&)
|
||||
{
|
||||
return m_freeableMemory;
|
||||
}
|
||||
@ -153,14 +97,14 @@ size_t Heap::footprint()
|
||||
return m_footprint;
|
||||
}
|
||||
|
||||
void Heap::markAllLargeAsEligibile(std::lock_guard<Mutex>&)
|
||||
void Heap::markAllLargeAsEligibile(const LockHolder&)
|
||||
{
|
||||
m_largeFree.markAllAsEligibile();
|
||||
m_hasPendingDecommits = false;
|
||||
m_condition.notify_all();
|
||||
}
|
||||
|
||||
void Heap::decommitLargeRange(std::lock_guard<Mutex>&, LargeRange& range, BulkDecommit& decommitter)
|
||||
void Heap::decommitLargeRange(UniqueLockHolder&, LargeRange& range, BulkDecommit& decommitter)
|
||||
{
|
||||
m_footprint -= range.totalPhysicalSize();
|
||||
m_freeableMemory -= range.totalPhysicalSize();
|
||||
@ -175,10 +119,10 @@ void Heap::decommitLargeRange(std::lock_guard<Mutex>&, LargeRange& range, BulkDe
|
||||
#endif
|
||||
}
|
||||
|
||||
#if BPLATFORM(MAC)
|
||||
void Heap::scavenge(std::lock_guard<Mutex>& lock, BulkDecommit& decommitter)
|
||||
#if BUSE(PARTIAL_SCAVENGE)
|
||||
void Heap::scavenge(UniqueLockHolder& lock, BulkDecommit& decommitter)
|
||||
#else
|
||||
void Heap::scavenge(std::lock_guard<Mutex>& lock, BulkDecommit& decommitter, size_t& deferredDecommits)
|
||||
void Heap::scavenge(UniqueLockHolder& lock, BulkDecommit& decommitter, size_t& deferredDecommits)
|
||||
#endif
|
||||
{
|
||||
for (auto& list : m_freePages) {
|
||||
@ -186,7 +130,7 @@ void Heap::scavenge(std::lock_guard<Mutex>& lock, BulkDecommit& decommitter, siz
|
||||
for (auto* page : chunk->freePages()) {
|
||||
if (!page->hasPhysicalPages())
|
||||
continue;
|
||||
#if !BPLATFORM(MAC)
|
||||
#if !BUSE(PARTIAL_SCAVENGE)
|
||||
if (page->usedSinceLastScavenge()) {
|
||||
page->clearUsedSinceLastScavenge();
|
||||
deferredDecommits++;
|
||||
@ -209,11 +153,11 @@ void Heap::scavenge(std::lock_guard<Mutex>& lock, BulkDecommit& decommitter, siz
|
||||
|
||||
for (auto& list : m_chunkCache) {
|
||||
while (!list.isEmpty())
|
||||
deallocateSmallChunk(list.pop(), &list - &m_chunkCache[0]);
|
||||
deallocateSmallChunk(lock, list.pop(), &list - &m_chunkCache[0]);
|
||||
}
|
||||
|
||||
for (LargeRange& range : m_largeFree) {
|
||||
#if BPLATFORM(MAC)
|
||||
#if BUSE(PARTIAL_SCAVENGE)
|
||||
m_highWatermark = std::min(m_highWatermark, static_cast<void*>(range.begin()));
|
||||
#else
|
||||
if (range.usedSinceLastScavenge()) {
|
||||
@ -225,13 +169,13 @@ void Heap::scavenge(std::lock_guard<Mutex>& lock, BulkDecommit& decommitter, siz
|
||||
decommitLargeRange(lock, range, decommitter);
|
||||
}
|
||||
|
||||
#if BPLATFORM(MAC)
|
||||
#if BUSE(PARTIAL_SCAVENGE)
|
||||
m_freeableMemory = 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
#if BPLATFORM(MAC)
|
||||
void Heap::scavengeToHighWatermark(std::lock_guard<Mutex>& lock, BulkDecommit& decommitter)
|
||||
#if BUSE(PARTIAL_SCAVENGE)
|
||||
void Heap::scavengeToHighWatermark(UniqueLockHolder& lock, BulkDecommit& decommitter)
|
||||
{
|
||||
void* newHighWaterMark = nullptr;
|
||||
for (LargeRange& range : m_largeFree) {
|
||||
@ -244,7 +188,7 @@ void Heap::scavengeToHighWatermark(std::lock_guard<Mutex>& lock, BulkDecommit& d
|
||||
}
|
||||
#endif
|
||||
|
||||
void Heap::deallocateLineCache(std::unique_lock<Mutex>&, LineCache& lineCache)
|
||||
void Heap::deallocateLineCache(UniqueLockHolder&, LineCache& lineCache)
|
||||
{
|
||||
for (auto& list : lineCache) {
|
||||
while (!list.isEmpty()) {
|
||||
@ -254,44 +198,58 @@ void Heap::deallocateLineCache(std::unique_lock<Mutex>&, LineCache& lineCache)
|
||||
}
|
||||
}
|
||||
|
||||
void Heap::allocateSmallChunk(std::unique_lock<Mutex>& lock, size_t pageClass)
|
||||
void Heap::allocateSmallChunk(UniqueLockHolder& lock, size_t pageClass, FailureAction action)
|
||||
{
|
||||
RELEASE_BASSERT(isActiveHeapKind(m_kind));
|
||||
|
||||
size_t pageSize = bmalloc::pageSize(pageClass);
|
||||
|
||||
Chunk* chunk = [&]() {
|
||||
Chunk* chunk = [&]() -> Chunk* {
|
||||
if (!m_chunkCache[pageClass].isEmpty())
|
||||
return m_chunkCache[pageClass].pop();
|
||||
|
||||
void* memory = allocateLarge(lock, chunkSize, chunkSize);
|
||||
void* memory = allocateLarge(lock, chunkSize, chunkSize, action);
|
||||
if (!memory) {
|
||||
BASSERT(action == FailureAction::ReturnNull);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
Chunk* chunk = new (memory) Chunk(pageSize);
|
||||
|
||||
m_objectTypes.set(chunk, ObjectType::Small);
|
||||
m_objectTypes.set(lock, chunk, ObjectType::Small);
|
||||
|
||||
size_t accountedInFreeable = 0;
|
||||
forEachPage(chunk, pageSize, [&](SmallPage* page) {
|
||||
page->setHasPhysicalPages(true);
|
||||
#if !BPLATFORM(MAC)
|
||||
#if !BUSE(PARTIAL_SCAVENGE)
|
||||
page->setUsedSinceLastScavenge();
|
||||
#endif
|
||||
page->setHasFreeLines(lock, true);
|
||||
chunk->freePages().push(page);
|
||||
accountedInFreeable += pageSize;
|
||||
});
|
||||
|
||||
m_freeableMemory += chunkSize;
|
||||
|
||||
m_freeableMemory += accountedInFreeable;
|
||||
|
||||
auto metadataSize = Chunk::metadataSize(pageSize);
|
||||
vmDeallocatePhysicalPagesSloppy(chunk->address(sizeof(Chunk)), metadataSize - sizeof(Chunk));
|
||||
|
||||
auto decommitSize = chunkSize - metadataSize - accountedInFreeable;
|
||||
if (decommitSize > 0)
|
||||
vmDeallocatePhysicalPagesSloppy(chunk->address(chunkSize - decommitSize), decommitSize);
|
||||
|
||||
m_scavenger->schedule(0);
|
||||
|
||||
return chunk;
|
||||
}();
|
||||
|
||||
m_freePages[pageClass].push(chunk);
|
||||
if (chunk)
|
||||
m_freePages[pageClass].push(chunk);
|
||||
}
|
||||
|
||||
void Heap::deallocateSmallChunk(Chunk* chunk, size_t pageClass)
|
||||
void Heap::deallocateSmallChunk(UniqueLockHolder& lock, Chunk* chunk, size_t pageClass)
|
||||
{
|
||||
m_objectTypes.set(chunk, ObjectType::Large);
|
||||
m_objectTypes.set(lock, chunk, ObjectType::Large);
|
||||
|
||||
size_t size = m_largeAllocated.remove(chunk);
|
||||
size_t totalPhysicalSize = size;
|
||||
@ -315,7 +273,7 @@ void Heap::deallocateSmallChunk(Chunk* chunk, size_t pageClass)
|
||||
m_largeFree.add(LargeRange(chunk, size, startPhysicalSize, totalPhysicalSize));
|
||||
}
|
||||
|
||||
SmallPage* Heap::allocateSmallPage(std::unique_lock<Mutex>& lock, size_t sizeClass, LineCache& lineCache)
|
||||
SmallPage* Heap::allocateSmallPage(UniqueLockHolder& lock, size_t sizeClass, LineCache& lineCache, FailureAction action)
|
||||
{
|
||||
RELEASE_BASSERT(isActiveHeapKind(m_kind));
|
||||
|
||||
@ -327,11 +285,13 @@ SmallPage* Heap::allocateSmallPage(std::unique_lock<Mutex>& lock, size_t sizeCla
|
||||
|
||||
m_scavenger->didStartGrowing();
|
||||
|
||||
SmallPage* page = [&]() {
|
||||
size_t pageClass = m_pageClasses[sizeClass];
|
||||
SmallPage* page = [&]() -> SmallPage* {
|
||||
size_t pageClass = m_constants.pageClass(sizeClass);
|
||||
|
||||
if (m_freePages[pageClass].isEmpty())
|
||||
allocateSmallChunk(lock, pageClass);
|
||||
allocateSmallChunk(lock, pageClass, action);
|
||||
if (action == FailureAction::ReturnNull && m_freePages[pageClass].isEmpty())
|
||||
return nullptr;
|
||||
|
||||
Chunk* chunk = m_freePages[pageClass].tail();
|
||||
|
||||
@ -354,18 +314,22 @@ SmallPage* Heap::allocateSmallPage(std::unique_lock<Mutex>& lock, size_t sizeCla
|
||||
m_physicalPageMap.commit(page->begin()->begin(), pageSize);
|
||||
#endif
|
||||
}
|
||||
#if !BPLATFORM(MAC)
|
||||
#if !BUSE(PARTIAL_SCAVENGE)
|
||||
page->setUsedSinceLastScavenge();
|
||||
#endif
|
||||
|
||||
return page;
|
||||
}();
|
||||
if (!page) {
|
||||
BASSERT(action == FailureAction::ReturnNull);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
page->setSizeClass(sizeClass);
|
||||
return page;
|
||||
}
|
||||
|
||||
void Heap::deallocateSmallLine(std::unique_lock<Mutex>& lock, Object object, LineCache& lineCache)
|
||||
void Heap::deallocateSmallLine(UniqueLockHolder& lock, Object object, LineCache& lineCache)
|
||||
{
|
||||
BASSERT(!object.line()->refCount(lock));
|
||||
SmallPage* page = object.page();
|
||||
@ -379,8 +343,7 @@ void Heap::deallocateSmallLine(std::unique_lock<Mutex>& lock, Object object, Lin
|
||||
if (page->refCount(lock))
|
||||
return;
|
||||
|
||||
size_t sizeClass = page->sizeClass();
|
||||
size_t pageClass = m_pageClasses[sizeClass];
|
||||
size_t pageClass = m_constants.pageClass(page->sizeClass());
|
||||
|
||||
m_freeableMemory += physicalPageSizeSloppy(page->begin()->begin(), pageSize(pageClass));
|
||||
|
||||
@ -397,7 +360,7 @@ void Heap::deallocateSmallLine(std::unique_lock<Mutex>& lock, Object object, Lin
|
||||
m_freePages[pageClass].remove(chunk);
|
||||
|
||||
if (!m_chunkCache[pageClass].isEmpty())
|
||||
deallocateSmallChunk(m_chunkCache[pageClass].pop(), pageClass);
|
||||
deallocateSmallChunk(lock, m_chunkCache[pageClass].pop(), pageClass);
|
||||
|
||||
m_chunkCache[pageClass].push(chunk);
|
||||
}
|
||||
@ -406,22 +369,25 @@ void Heap::deallocateSmallLine(std::unique_lock<Mutex>& lock, Object object, Lin
|
||||
}
|
||||
|
||||
void Heap::allocateSmallBumpRangesByMetadata(
|
||||
std::unique_lock<Mutex>& lock, size_t sizeClass,
|
||||
UniqueLockHolder& lock, size_t sizeClass,
|
||||
BumpAllocator& allocator, BumpRangeCache& rangeCache,
|
||||
LineCache& lineCache)
|
||||
LineCache& lineCache, FailureAction action)
|
||||
{
|
||||
BUNUSED(action);
|
||||
RELEASE_BASSERT(isActiveHeapKind(m_kind));
|
||||
|
||||
SmallPage* page = allocateSmallPage(lock, sizeClass, lineCache);
|
||||
SmallPage* page = allocateSmallPage(lock, sizeClass, lineCache, action);
|
||||
if (!page) {
|
||||
BASSERT(action == FailureAction::ReturnNull);
|
||||
return;
|
||||
}
|
||||
SmallLine* lines = page->begin();
|
||||
BASSERT(page->hasFreeLines(lock));
|
||||
size_t smallLineCount = m_vmPageSizePhysical / smallLineSize;
|
||||
LineMetadata* pageMetadata = &m_smallLineMetadata[sizeClass * smallLineCount];
|
||||
|
||||
|
||||
auto findSmallBumpRange = [&](size_t& lineNumber) {
|
||||
for ( ; lineNumber < smallLineCount; ++lineNumber) {
|
||||
for ( ; lineNumber < m_constants.smallLineCount(); ++lineNumber) {
|
||||
if (!lines[lineNumber].refCount(lock)) {
|
||||
if (pageMetadata[lineNumber].objectCount)
|
||||
if (m_constants.objectCount(sizeClass, lineNumber))
|
||||
return true;
|
||||
}
|
||||
}
|
||||
@ -429,18 +395,19 @@ void Heap::allocateSmallBumpRangesByMetadata(
|
||||
};
|
||||
|
||||
auto allocateSmallBumpRange = [&](size_t& lineNumber) -> BumpRange {
|
||||
char* begin = lines[lineNumber].begin() + pageMetadata[lineNumber].startOffset;
|
||||
char* begin = lines[lineNumber].begin() + m_constants.startOffset(sizeClass, lineNumber);
|
||||
unsigned short objectCount = 0;
|
||||
|
||||
for ( ; lineNumber < smallLineCount; ++lineNumber) {
|
||||
for ( ; lineNumber < m_constants.smallLineCount(); ++lineNumber) {
|
||||
if (lines[lineNumber].refCount(lock))
|
||||
break;
|
||||
|
||||
if (!pageMetadata[lineNumber].objectCount)
|
||||
auto lineObjectCount = m_constants.objectCount(sizeClass, lineNumber);
|
||||
if (!lineObjectCount)
|
||||
continue;
|
||||
|
||||
objectCount += pageMetadata[lineNumber].objectCount;
|
||||
lines[lineNumber].ref(lock, pageMetadata[lineNumber].objectCount);
|
||||
objectCount += lineObjectCount;
|
||||
lines[lineNumber].ref(lock, lineObjectCount);
|
||||
page->ref(lock);
|
||||
}
|
||||
return { begin, objectCount };
|
||||
@ -450,14 +417,14 @@ void Heap::allocateSmallBumpRangesByMetadata(
|
||||
for (;;) {
|
||||
if (!findSmallBumpRange(lineNumber)) {
|
||||
page->setHasFreeLines(lock, false);
|
||||
BASSERT(allocator.canAllocate());
|
||||
BASSERT(action == FailureAction::ReturnNull || allocator.canAllocate());
|
||||
return;
|
||||
}
|
||||
|
||||
// In a fragmented page, some free ranges might not fit in the cache.
|
||||
if (rangeCache.size() == rangeCache.capacity()) {
|
||||
lineCache[sizeClass].push(page);
|
||||
BASSERT(allocator.canAllocate());
|
||||
BASSERT(action == FailureAction::ReturnNull || allocator.canAllocate());
|
||||
return;
|
||||
}
|
||||
|
||||
@ -470,14 +437,19 @@ void Heap::allocateSmallBumpRangesByMetadata(
|
||||
}
|
||||
|
||||
void Heap::allocateSmallBumpRangesByObject(
|
||||
std::unique_lock<Mutex>& lock, size_t sizeClass,
|
||||
UniqueLockHolder& lock, size_t sizeClass,
|
||||
BumpAllocator& allocator, BumpRangeCache& rangeCache,
|
||||
LineCache& lineCache)
|
||||
LineCache& lineCache, FailureAction action)
|
||||
{
|
||||
BUNUSED(action);
|
||||
RELEASE_BASSERT(isActiveHeapKind(m_kind));
|
||||
|
||||
size_t size = allocator.size();
|
||||
SmallPage* page = allocateSmallPage(lock, sizeClass, lineCache);
|
||||
SmallPage* page = allocateSmallPage(lock, sizeClass, lineCache, action);
|
||||
if (!page) {
|
||||
BASSERT(action == FailureAction::ReturnNull);
|
||||
return;
|
||||
}
|
||||
BASSERT(page->hasFreeLines(lock));
|
||||
|
||||
auto findSmallBumpRange = [&](Object& it, Object& end) {
|
||||
@ -503,18 +475,18 @@ void Heap::allocateSmallBumpRangesByObject(
|
||||
};
|
||||
|
||||
Object it(page->begin()->begin());
|
||||
Object end(it + pageSize(m_pageClasses[sizeClass]));
|
||||
Object end(it + pageSize(m_constants.pageClass(page->sizeClass())));
|
||||
for (;;) {
|
||||
if (!findSmallBumpRange(it, end)) {
|
||||
page->setHasFreeLines(lock, false);
|
||||
BASSERT(allocator.canAllocate());
|
||||
BASSERT(action == FailureAction::ReturnNull || allocator.canAllocate());
|
||||
return;
|
||||
}
|
||||
|
||||
// In a fragmented page, some free ranges might not fit in the cache.
|
||||
if (rangeCache.size() == rangeCache.capacity()) {
|
||||
lineCache[sizeClass].push(page);
|
||||
BASSERT(allocator.canAllocate());
|
||||
BASSERT(action == FailureAction::ReturnNull || allocator.canAllocate());
|
||||
return;
|
||||
}
|
||||
|
||||
@ -526,7 +498,7 @@ void Heap::allocateSmallBumpRangesByObject(
|
||||
}
|
||||
}
|
||||
|
||||
LargeRange Heap::splitAndAllocate(std::unique_lock<Mutex>&, LargeRange& range, size_t alignment, size_t size)
|
||||
LargeRange Heap::splitAndAllocate(UniqueLockHolder& lock, LargeRange& range, size_t alignment, size_t size)
|
||||
{
|
||||
RELEASE_BASSERT(isActiveHeapKind(m_kind));
|
||||
|
||||
@ -568,14 +540,22 @@ LargeRange Heap::splitAndAllocate(std::unique_lock<Mutex>&, LargeRange& range, s
|
||||
m_largeFree.add(next);
|
||||
}
|
||||
|
||||
m_objectTypes.set(Chunk::get(range.begin()), ObjectType::Large);
|
||||
m_objectTypes.set(lock, Chunk::get(range.begin()), ObjectType::Large);
|
||||
|
||||
m_largeAllocated.set(range.begin(), range.size());
|
||||
return range;
|
||||
}
|
||||
|
||||
void* Heap::tryAllocateLarge(std::unique_lock<Mutex>& lock, size_t alignment, size_t size)
|
||||
void* Heap::allocateLarge(UniqueLockHolder& lock, size_t alignment, size_t size, FailureAction action)
|
||||
{
|
||||
#define ASSERT_OR_RETURN_ON_FAILURE(cond) do { \
|
||||
if (action == FailureAction::Crash) \
|
||||
RELEASE_BASSERT(cond); \
|
||||
else if (!(cond)) \
|
||||
return nullptr; \
|
||||
} while (false)
|
||||
|
||||
|
||||
RELEASE_BASSERT(isActiveHeapKind(m_kind));
|
||||
|
||||
BASSERT(isPowerOfTwo(alignment));
|
||||
@ -583,13 +563,11 @@ void* Heap::tryAllocateLarge(std::unique_lock<Mutex>& lock, size_t alignment, si
|
||||
m_scavenger->didStartGrowing();
|
||||
|
||||
size_t roundedSize = size ? roundUpToMultipleOf(largeAlignment, size) : largeAlignment;
|
||||
if (roundedSize < size) // Check for overflow
|
||||
return nullptr;
|
||||
ASSERT_OR_RETURN_ON_FAILURE(roundedSize >= size); // Check for overflow
|
||||
size = roundedSize;
|
||||
|
||||
size_t roundedAlignment = roundUpToMultipleOf<largeAlignment>(alignment);
|
||||
if (roundedAlignment < alignment) // Check for overflow
|
||||
return nullptr;
|
||||
ASSERT_OR_RETURN_ON_FAILURE(roundedAlignment >= alignment); // Check for overflow
|
||||
alignment = roundedAlignment;
|
||||
|
||||
LargeRange range = m_largeFree.remove(alignment, size);
|
||||
@ -597,15 +575,13 @@ void* Heap::tryAllocateLarge(std::unique_lock<Mutex>& lock, size_t alignment, si
|
||||
if (m_hasPendingDecommits) {
|
||||
m_condition.wait(lock, [&]() { return !m_hasPendingDecommits; });
|
||||
// Now we're guaranteed we're looking at all available memory.
|
||||
return tryAllocateLarge(lock, alignment, size);
|
||||
return allocateLarge(lock, alignment, size, action);
|
||||
}
|
||||
|
||||
if (usingGigacage())
|
||||
return nullptr;
|
||||
ASSERT_OR_RETURN_ON_FAILURE(!usingGigacage());
|
||||
|
||||
range = VMHeap::get()->tryAllocateLargeChunk(alignment, size);
|
||||
if (!range)
|
||||
return nullptr;
|
||||
range = tryAllocateLargeChunk(alignment, size);
|
||||
ASSERT_OR_RETURN_ON_FAILURE(range);
|
||||
|
||||
m_largeFree.add(range);
|
||||
range = m_largeFree.remove(alignment, size);
|
||||
@ -614,30 +590,46 @@ void* Heap::tryAllocateLarge(std::unique_lock<Mutex>& lock, size_t alignment, si
|
||||
m_freeableMemory -= range.totalPhysicalSize();
|
||||
|
||||
void* result = splitAndAllocate(lock, range, alignment, size).begin();
|
||||
#if BPLATFORM(MAC)
|
||||
#if BUSE(PARTIAL_SCAVENGE)
|
||||
m_highWatermark = std::max(m_highWatermark, result);
|
||||
#endif
|
||||
ASSERT_OR_RETURN_ON_FAILURE(result);
|
||||
return result;
|
||||
|
||||
#undef ASSERT_OR_RETURN_ON_FAILURE
|
||||
}
|
||||
|
||||
void* Heap::allocateLarge(std::unique_lock<Mutex>& lock, size_t alignment, size_t size)
|
||||
LargeRange Heap::tryAllocateLargeChunk(size_t alignment, size_t size)
|
||||
{
|
||||
void* result = tryAllocateLarge(lock, alignment, size);
|
||||
RELEASE_BASSERT(result);
|
||||
return result;
|
||||
// We allocate VM in aligned multiples to increase the chances that
|
||||
// the OS will provide contiguous ranges that we can merge.
|
||||
size_t roundedAlignment = roundUpToMultipleOf<chunkSize>(alignment);
|
||||
if (roundedAlignment < alignment) // Check for overflow
|
||||
return LargeRange();
|
||||
alignment = roundedAlignment;
|
||||
|
||||
size_t roundedSize = roundUpToMultipleOf<chunkSize>(size);
|
||||
if (roundedSize < size) // Check for overflow
|
||||
return LargeRange();
|
||||
size = roundedSize;
|
||||
|
||||
void* memory = tryVMAllocate(alignment, size);
|
||||
if (!memory)
|
||||
return LargeRange();
|
||||
|
||||
#if BOS(DARWIN)
|
||||
PerProcess<Zone>::get()->addRange(Range(memory, size));
|
||||
#endif
|
||||
|
||||
return LargeRange(memory, size, 0, 0);
|
||||
}
|
||||
|
||||
bool Heap::isLarge(std::unique_lock<Mutex>&, void* object)
|
||||
{
|
||||
return m_objectTypes.get(Object(object).chunk()) == ObjectType::Large;
|
||||
}
|
||||
|
||||
size_t Heap::largeSize(std::unique_lock<Mutex>&, void* object)
|
||||
size_t Heap::largeSize(UniqueLockHolder&, void* object)
|
||||
{
|
||||
return m_largeAllocated.get(object);
|
||||
}
|
||||
|
||||
void Heap::shrinkLarge(std::unique_lock<Mutex>& lock, const Range& object, size_t newSize)
|
||||
void Heap::shrinkLarge(UniqueLockHolder& lock, const Range& object, size_t newSize)
|
||||
{
|
||||
BASSERT(object.size() > newSize);
|
||||
|
||||
@ -648,7 +640,7 @@ void Heap::shrinkLarge(std::unique_lock<Mutex>& lock, const Range& object, size_
|
||||
m_scavenger->schedule(size);
|
||||
}
|
||||
|
||||
void Heap::deallocateLarge(std::unique_lock<Mutex>&, void* object)
|
||||
void Heap::deallocateLarge(UniqueLockHolder&, void* object)
|
||||
{
|
||||
size_t size = m_largeAllocated.remove(object);
|
||||
m_largeFree.add(LargeRange(object, size, size, size));
|
||||
@ -658,11 +650,11 @@ void Heap::deallocateLarge(std::unique_lock<Mutex>&, void* object)
|
||||
|
||||
void Heap::externalCommit(void* ptr, size_t size)
|
||||
{
|
||||
std::unique_lock<Mutex> lock(Heap::mutex());
|
||||
UniqueLockHolder lock(Heap::mutex());
|
||||
externalCommit(lock, ptr, size);
|
||||
}
|
||||
|
||||
void Heap::externalCommit(std::unique_lock<Mutex>&, void* ptr, size_t size)
|
||||
void Heap::externalCommit(UniqueLockHolder&, void* ptr, size_t size)
|
||||
{
|
||||
BUNUSED_PARAM(ptr);
|
||||
|
||||
@ -674,11 +666,11 @@ void Heap::externalCommit(std::unique_lock<Mutex>&, void* ptr, size_t size)
|
||||
|
||||
void Heap::externalDecommit(void* ptr, size_t size)
|
||||
{
|
||||
std::unique_lock<Mutex> lock(Heap::mutex());
|
||||
UniqueLockHolder lock(Heap::mutex());
|
||||
externalDecommit(lock, ptr, size);
|
||||
}
|
||||
|
||||
void Heap::externalDecommit(std::unique_lock<Mutex>&, void* ptr, size_t size)
|
||||
void Heap::externalDecommit(UniqueLockHolder&, void* ptr, size_t size)
|
||||
{
|
||||
BUNUSED_PARAM(ptr);
|
||||
|
||||
|
103
bmalloc/Heap.h
103
bmalloc/Heap.h
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (C) 2014-2018 Apple Inc. All rights reserved.
|
||||
* Copyright (C) 2014-2019 Apple Inc. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
@ -28,19 +28,19 @@
|
||||
|
||||
#include "BumpRange.h"
|
||||
#include "Chunk.h"
|
||||
#include "FailureAction.h"
|
||||
#include "HeapKind.h"
|
||||
#include "LargeMap.h"
|
||||
#include "LineMetadata.h"
|
||||
#include "List.h"
|
||||
#include "Map.h"
|
||||
#include "Mutex.h"
|
||||
#include "Object.h"
|
||||
#include "ObjectTypeTable.h"
|
||||
#include "PerHeapKind.h"
|
||||
#include "PerProcess.h"
|
||||
#include "PhysicalPageMap.h"
|
||||
#include "SmallLine.h"
|
||||
#include "SmallPage.h"
|
||||
#include "Vector.h"
|
||||
#include <array>
|
||||
#include <condition_variable>
|
||||
#include <mutex>
|
||||
@ -48,54 +48,53 @@
|
||||
|
||||
namespace bmalloc {
|
||||
|
||||
class BeginTag;
|
||||
class BulkDecommit;
|
||||
class BumpAllocator;
|
||||
class DebugHeap;
|
||||
class EndTag;
|
||||
class HeapConstants;
|
||||
class Scavenger;
|
||||
|
||||
class Heap {
|
||||
public:
|
||||
Heap(HeapKind, std::lock_guard<Mutex>&);
|
||||
Heap(HeapKind, LockHolder&);
|
||||
|
||||
static Mutex& mutex() { return PerProcess<PerHeapKind<Heap>>::mutex(); }
|
||||
|
||||
HeapKind kind() const { return m_kind; }
|
||||
|
||||
void allocateSmallBumpRanges(std::unique_lock<Mutex>&, size_t sizeClass,
|
||||
BumpAllocator&, BumpRangeCache&, LineCache&);
|
||||
void derefSmallLine(std::unique_lock<Mutex>&, Object, LineCache&);
|
||||
void deallocateLineCache(std::unique_lock<Mutex>&, LineCache&);
|
||||
void allocateSmallBumpRanges(UniqueLockHolder&, size_t sizeClass,
|
||||
BumpAllocator&, BumpRangeCache&, LineCache&, FailureAction);
|
||||
void derefSmallLine(UniqueLockHolder&, Object, LineCache&);
|
||||
void deallocateLineCache(UniqueLockHolder&, LineCache&);
|
||||
|
||||
void* allocateLarge(std::unique_lock<Mutex>&, size_t alignment, size_t);
|
||||
void* tryAllocateLarge(std::unique_lock<Mutex>&, size_t alignment, size_t);
|
||||
void deallocateLarge(std::unique_lock<Mutex>&, void*);
|
||||
void* allocateLarge(UniqueLockHolder&, size_t alignment, size_t, FailureAction);
|
||||
void deallocateLarge(UniqueLockHolder&, void*);
|
||||
|
||||
bool isLarge(std::unique_lock<Mutex>&, void*);
|
||||
size_t largeSize(std::unique_lock<Mutex>&, void*);
|
||||
void shrinkLarge(std::unique_lock<Mutex>&, const Range&, size_t);
|
||||
bool isLarge(void*);
|
||||
size_t largeSize(UniqueLockHolder&, void*);
|
||||
void shrinkLarge(UniqueLockHolder&, const Range&, size_t);
|
||||
|
||||
#if BPLATFORM(MAC)
|
||||
void scavengeToHighWatermark(std::lock_guard<Mutex>&, BulkDecommit&);
|
||||
void scavenge(std::lock_guard<Mutex>&, BulkDecommit&);
|
||||
#if BUSE(PARTIAL_SCAVENGE)
|
||||
void scavengeToHighWatermark(UniqueLockHolder&, BulkDecommit&);
|
||||
void scavenge(UniqueLockHolder&, BulkDecommit&);
|
||||
#else
|
||||
void scavenge(std::lock_guard<Mutex>&, BulkDecommit&, size_t& deferredDecommits);
|
||||
void scavenge(UniqueLockHolder&, BulkDecommit&, size_t& deferredDecommits);
|
||||
#endif
|
||||
void scavenge(std::lock_guard<Mutex>&, BulkDecommit&, size_t& freed, size_t goal);
|
||||
void scavenge(UniqueLockHolder&, BulkDecommit&, size_t& freed, size_t goal);
|
||||
|
||||
size_t freeableMemory(std::lock_guard<Mutex>&);
|
||||
size_t freeableMemory(UniqueLockHolder&);
|
||||
size_t footprint();
|
||||
size_t gigacageSize();
|
||||
|
||||
void externalDecommit(void* ptr, size_t);
|
||||
void externalDecommit(std::unique_lock<Mutex>&, void* ptr, size_t);
|
||||
void externalDecommit(UniqueLockHolder&, void* ptr, size_t);
|
||||
void externalCommit(void* ptr, size_t);
|
||||
void externalCommit(std::unique_lock<Mutex>&, void* ptr, size_t);
|
||||
void externalCommit(UniqueLockHolder&, void* ptr, size_t);
|
||||
|
||||
void markAllLargeAsEligibile(std::lock_guard<Mutex>&);
|
||||
void markAllLargeAsEligibile(const LockHolder&);
|
||||
|
||||
private:
|
||||
void decommitLargeRange(std::lock_guard<Mutex>&, LargeRange&, BulkDecommit&);
|
||||
void decommitLargeRange(UniqueLockHolder&, LargeRange&, BulkDecommit&);
|
||||
|
||||
struct LargeObjectHash {
|
||||
static unsigned hash(void* key)
|
||||
@ -109,37 +108,27 @@ private:
|
||||
|
||||
bool usingGigacage();
|
||||
void* gigacageBasePtr(); // May crash if !usingGigacage().
|
||||
size_t gigacageSize();
|
||||
|
||||
void initializeLineMetadata();
|
||||
void initializePageMetadata();
|
||||
|
||||
void allocateSmallBumpRangesByMetadata(std::unique_lock<Mutex>&,
|
||||
size_t sizeClass, BumpAllocator&, BumpRangeCache&, LineCache&);
|
||||
void allocateSmallBumpRangesByObject(std::unique_lock<Mutex>&,
|
||||
size_t sizeClass, BumpAllocator&, BumpRangeCache&, LineCache&);
|
||||
void allocateSmallBumpRangesByMetadata(UniqueLockHolder&,
|
||||
size_t sizeClass, BumpAllocator&, BumpRangeCache&, LineCache&, FailureAction);
|
||||
void allocateSmallBumpRangesByObject(UniqueLockHolder&,
|
||||
size_t sizeClass, BumpAllocator&, BumpRangeCache&, LineCache&, FailureAction);
|
||||
|
||||
SmallPage* allocateSmallPage(std::unique_lock<Mutex>&, size_t sizeClass, LineCache&);
|
||||
void deallocateSmallLine(std::unique_lock<Mutex>&, Object, LineCache&);
|
||||
SmallPage* allocateSmallPage(UniqueLockHolder&, size_t sizeClass, LineCache&, FailureAction);
|
||||
void deallocateSmallLine(UniqueLockHolder&, Object, LineCache&);
|
||||
|
||||
void allocateSmallChunk(std::unique_lock<Mutex>&, size_t pageClass);
|
||||
void deallocateSmallChunk(Chunk*, size_t pageClass);
|
||||
void allocateSmallChunk(UniqueLockHolder&, size_t pageClass, FailureAction);
|
||||
void deallocateSmallChunk(UniqueLockHolder&, Chunk*, size_t pageClass);
|
||||
|
||||
void mergeLarge(BeginTag*&, EndTag*&, Range&);
|
||||
void mergeLargeLeft(EndTag*&, BeginTag*&, Range&, bool& inVMHeap);
|
||||
void mergeLargeRight(EndTag*&, BeginTag*&, Range&, bool& inVMHeap);
|
||||
|
||||
LargeRange splitAndAllocate(std::unique_lock<Mutex>&, LargeRange&, size_t alignment, size_t);
|
||||
LargeRange tryAllocateLargeChunk(size_t alignment, size_t);
|
||||
LargeRange splitAndAllocate(UniqueLockHolder&, LargeRange&, size_t alignment, size_t);
|
||||
|
||||
HeapKind m_kind;
|
||||
HeapConstants& m_constants;
|
||||
|
||||
bool m_hasPendingDecommits { false };
|
||||
std::condition_variable_any m_condition;
|
||||
|
||||
size_t m_vmPageSizePhysical;
|
||||
Vector<LineMetadata> m_smallLineMetadata;
|
||||
std::array<size_t, sizeClassCount> m_pageClasses;
|
||||
|
||||
LineCache m_lineCache;
|
||||
std::array<List<Chunk>, pageClassCount> m_freePages;
|
||||
std::array<List<Chunk>, pageClassCount> m_chunkCache;
|
||||
@ -147,10 +136,11 @@ private:
|
||||
Map<void*, size_t, LargeObjectHash> m_largeAllocated;
|
||||
LargeMap m_largeFree;
|
||||
|
||||
Map<Chunk*, ObjectType, ChunkHash> m_objectTypes;
|
||||
ObjectTypeTable m_objectTypes;
|
||||
|
||||
Scavenger* m_scavenger { nullptr };
|
||||
|
||||
size_t m_gigacageSize { 0 };
|
||||
size_t m_footprint { 0 };
|
||||
size_t m_freeableMemory { 0 };
|
||||
|
||||
@ -158,28 +148,33 @@ private:
|
||||
PhysicalPageMap m_physicalPageMap;
|
||||
#endif
|
||||
|
||||
#if BPLATFORM(MAC)
|
||||
#if BUSE(PARTIAL_SCAVENGE)
|
||||
void* m_highWatermark { nullptr };
|
||||
#endif
|
||||
};
|
||||
|
||||
inline void Heap::allocateSmallBumpRanges(
|
||||
std::unique_lock<Mutex>& lock, size_t sizeClass,
|
||||
UniqueLockHolder& lock, size_t sizeClass,
|
||||
BumpAllocator& allocator, BumpRangeCache& rangeCache,
|
||||
LineCache& lineCache)
|
||||
LineCache& lineCache, FailureAction action)
|
||||
{
|
||||
if (sizeClass < bmalloc::sizeClass(smallLineSize))
|
||||
return allocateSmallBumpRangesByMetadata(lock, sizeClass, allocator, rangeCache, lineCache);
|
||||
return allocateSmallBumpRangesByObject(lock, sizeClass, allocator, rangeCache, lineCache);
|
||||
return allocateSmallBumpRangesByMetadata(lock, sizeClass, allocator, rangeCache, lineCache, action);
|
||||
return allocateSmallBumpRangesByObject(lock, sizeClass, allocator, rangeCache, lineCache, action);
|
||||
}
|
||||
|
||||
inline void Heap::derefSmallLine(std::unique_lock<Mutex>& lock, Object object, LineCache& lineCache)
|
||||
inline void Heap::derefSmallLine(UniqueLockHolder& lock, Object object, LineCache& lineCache)
|
||||
{
|
||||
if (!object.line()->deref(lock))
|
||||
return;
|
||||
deallocateSmallLine(lock, object, lineCache);
|
||||
}
|
||||
|
||||
inline bool Heap::isLarge(void* object)
|
||||
{
|
||||
return m_objectTypes.get(Object(object).chunk()) == ObjectType::Large;
|
||||
}
|
||||
|
||||
} // namespace bmalloc
|
||||
|
||||
#endif // Heap_h
|
||||
|
131
bmalloc/HeapConstants.cpp
Normal file
131
bmalloc/HeapConstants.cpp
Normal file
@ -0,0 +1,131 @@
|
||||
/*
|
||||
* Copyright (C) 2014-2019 Apple Inc. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
|
||||
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
|
||||
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
||||
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
|
||||
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#include "HeapConstants.h"
|
||||
#include <algorithm>
|
||||
|
||||
namespace bmalloc {
|
||||
|
||||
DEFINE_STATIC_PER_PROCESS_STORAGE(HeapConstants);
|
||||
|
||||
HeapConstants::HeapConstants(const LockHolder&)
|
||||
: m_vmPageSizePhysical { vmPageSizePhysical() }
|
||||
{
|
||||
RELEASE_BASSERT(m_vmPageSizePhysical >= smallPageSize);
|
||||
RELEASE_BASSERT(vmPageSize() >= m_vmPageSizePhysical);
|
||||
|
||||
initializeLineMetadata();
|
||||
initializePageMetadata();
|
||||
}
|
||||
|
||||
template <class C>
|
||||
constexpr void fillLineMetadata(C& container, size_t VMPageSize)
|
||||
{
|
||||
constexpr size_t clsCount = sizeClass(smallLineSize);
|
||||
size_t lineCount = smallLineCount(VMPageSize);
|
||||
|
||||
for (size_t cls = 0; cls < clsCount; ++cls) {
|
||||
size_t size = objectSize(cls);
|
||||
size_t baseIndex = cls * lineCount;
|
||||
size_t object = 0;
|
||||
while (object < VMPageSize) {
|
||||
size_t line = object / smallLineSize;
|
||||
size_t leftover = object % smallLineSize;
|
||||
|
||||
auto objectCount = divideRoundingUp(smallLineSize - leftover, size);
|
||||
|
||||
object += objectCount * size;
|
||||
|
||||
// Don't allow the last object in a page to escape the page.
|
||||
if (object > VMPageSize) {
|
||||
BASSERT(objectCount);
|
||||
--objectCount;
|
||||
}
|
||||
|
||||
container[baseIndex + line] = { static_cast<unsigned char>(leftover), static_cast<unsigned char>(objectCount) };
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template <size_t VMPageSize>
|
||||
constexpr auto computeLineMetadata()
|
||||
{
|
||||
std::array<LineMetadata, sizeClass(smallLineSize) * smallLineCount(VMPageSize)> result;
|
||||
fillLineMetadata(result, VMPageSize);
|
||||
return result;
|
||||
}
|
||||
|
||||
#if BUSE(PRECOMPUTED_CONSTANTS_VMPAGE4K)
|
||||
constexpr auto kPrecalcuratedLineMetadata4k = computeLineMetadata<4 * kB>();
|
||||
#endif
|
||||
|
||||
#if BUSE(PRECOMPUTED_CONSTANTS_VMPAGE16K)
|
||||
constexpr auto kPrecalcuratedLineMetadata16k = computeLineMetadata<16 * kB>();
|
||||
#endif
|
||||
|
||||
void HeapConstants::initializeLineMetadata()
|
||||
{
|
||||
#if BUSE(PRECOMPUTED_CONSTANTS_VMPAGE4K)
|
||||
if (m_vmPageSizePhysical == 4 * kB) {
|
||||
m_smallLineMetadata = &kPrecalcuratedLineMetadata4k[0];
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
||||
#if BUSE(PRECOMPUTED_CONSTANTS_VMPAGE16K)
|
||||
if (m_vmPageSizePhysical == 16 * kB) {
|
||||
m_smallLineMetadata = &kPrecalcuratedLineMetadata16k[0];
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
||||
size_t sizeClassCount = bmalloc::sizeClass(smallLineSize);
|
||||
m_smallLineMetadataStorage.grow(sizeClassCount * smallLineCount());
|
||||
fillLineMetadata(m_smallLineMetadataStorage, m_vmPageSizePhysical);
|
||||
m_smallLineMetadata = &m_smallLineMetadataStorage[0];
|
||||
}
|
||||
|
||||
void HeapConstants::initializePageMetadata()
|
||||
{
|
||||
auto computePageSize = [&](size_t sizeClass) {
|
||||
size_t size = objectSize(sizeClass);
|
||||
if (sizeClass < bmalloc::sizeClass(smallLineSize))
|
||||
return m_vmPageSizePhysical;
|
||||
|
||||
for (size_t pageSize = m_vmPageSizePhysical; pageSize < pageSizeMax; pageSize += m_vmPageSizePhysical) {
|
||||
RELEASE_BASSERT(pageSize <= chunkSize / 2);
|
||||
size_t waste = pageSize % size;
|
||||
if (waste <= pageSize / pageSizeWasteFactor)
|
||||
return pageSize;
|
||||
}
|
||||
|
||||
return pageSizeMax;
|
||||
};
|
||||
|
||||
for (size_t i = 0; i < sizeClassCount; ++i)
|
||||
m_pageClasses[i] = (computePageSize(i) - 1) / smallPageSize;
|
||||
}
|
||||
|
||||
} // namespace bmalloc
|
64
bmalloc/HeapConstants.h
Normal file
64
bmalloc/HeapConstants.h
Normal file
@ -0,0 +1,64 @@
|
||||
/*
|
||||
* Copyright (C) 2014-2019 Apple Inc. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
|
||||
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
|
||||
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
||||
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
|
||||
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "LineMetadata.h"
|
||||
#include "Mutex.h"
|
||||
#include "Sizes.h"
|
||||
#include "StaticPerProcess.h"
|
||||
#include "Vector.h"
|
||||
#include <array>
|
||||
#include <mutex>
|
||||
|
||||
namespace bmalloc {
|
||||
|
||||
class HeapConstants : public StaticPerProcess<HeapConstants> {
|
||||
public:
|
||||
HeapConstants(const LockHolder&);
|
||||
~HeapConstants() = delete;
|
||||
|
||||
inline size_t pageClass(size_t sizeClass) const { return m_pageClasses[sizeClass]; }
|
||||
inline size_t smallLineCount() const { return bmalloc::smallLineCount(m_vmPageSizePhysical); }
|
||||
inline unsigned char startOffset(size_t sizeClass, size_t lineNumber) const { return lineMetadata(sizeClass, lineNumber).startOffset; }
|
||||
inline unsigned char objectCount(size_t sizeClass, size_t lineNumber) const { return lineMetadata(sizeClass, lineNumber).objectCount; }
|
||||
|
||||
private:
|
||||
void initializeLineMetadata();
|
||||
void initializePageMetadata();
|
||||
|
||||
inline const LineMetadata& lineMetadata(size_t sizeClass, size_t lineNumber) const
|
||||
{
|
||||
return m_smallLineMetadata[sizeClass * smallLineCount() + lineNumber];
|
||||
}
|
||||
|
||||
size_t m_vmPageSizePhysical;
|
||||
const LineMetadata* m_smallLineMetadata { };
|
||||
Vector<LineMetadata> m_smallLineMetadataStorage;
|
||||
std::array<size_t, sizeClassCount> m_pageClasses;
|
||||
};
|
||||
DECLARE_STATIC_PER_PROCESS_STORAGE(HeapConstants);
|
||||
|
||||
} // namespace bmalloc
|
@ -70,12 +70,12 @@ BINLINE Gigacage::Kind gigacageKind(HeapKind kind)
|
||||
BINLINE HeapKind heapKind(Gigacage::Kind kind)
|
||||
{
|
||||
switch (kind) {
|
||||
case Gigacage::ReservedForFlagsAndNotABasePtr:
|
||||
RELEASE_BASSERT_NOT_REACHED();
|
||||
case Gigacage::Primitive:
|
||||
return HeapKind::PrimitiveGigacage;
|
||||
case Gigacage::JSValue:
|
||||
return HeapKind::JSValueGigacage;
|
||||
case Gigacage::NumberOfKinds:
|
||||
break;
|
||||
}
|
||||
BCRASH();
|
||||
return HeapKind::Primary;
|
||||
@ -86,7 +86,7 @@ BINLINE bool isActiveHeapKindAfterEnsuringGigacage(HeapKind kind)
|
||||
switch (kind) {
|
||||
case HeapKind::PrimitiveGigacage:
|
||||
case HeapKind::JSValueGigacage:
|
||||
if (Gigacage::wasEnabled())
|
||||
if (Gigacage::isEnabled())
|
||||
return true;
|
||||
return false;
|
||||
default:
|
||||
@ -101,7 +101,7 @@ BINLINE HeapKind mapToActiveHeapKindAfterEnsuringGigacage(HeapKind kind)
|
||||
switch (kind) {
|
||||
case HeapKind::PrimitiveGigacage:
|
||||
case HeapKind::JSValueGigacage:
|
||||
if (Gigacage::wasEnabled())
|
||||
if (Gigacage::isEnabled())
|
||||
return kind;
|
||||
return HeapKind::Primary;
|
||||
default:
|
||||
|
@ -40,13 +40,12 @@ public:
|
||||
IsoAllocator(IsoHeapImpl<Config>&);
|
||||
~IsoAllocator();
|
||||
|
||||
void* allocate(bool abortOnFailure);
|
||||
void scavenge();
|
||||
void* allocate(IsoHeapImpl<Config>&, bool abortOnFailure);
|
||||
void scavenge(IsoHeapImpl<Config>&);
|
||||
|
||||
private:
|
||||
void* allocateSlow(bool abortOnFailure);
|
||||
void* allocateSlow(IsoHeapImpl<Config>&, bool abortOnFailure);
|
||||
|
||||
IsoHeapImpl<Config>* m_heap { nullptr };
|
||||
FreeList m_freeList;
|
||||
IsoPage<Config>* m_currentPage { nullptr };
|
||||
};
|
||||
|
@ -34,8 +34,7 @@
|
||||
namespace bmalloc {
|
||||
|
||||
template<typename Config>
|
||||
IsoAllocator<Config>::IsoAllocator(IsoHeapImpl<Config>& heap)
|
||||
: m_heap(&heap)
|
||||
IsoAllocator<Config>::IsoAllocator(IsoHeapImpl<Config>&)
|
||||
{
|
||||
}
|
||||
|
||||
@ -45,36 +44,36 @@ IsoAllocator<Config>::~IsoAllocator()
|
||||
}
|
||||
|
||||
template<typename Config>
|
||||
void* IsoAllocator<Config>::allocate(bool abortOnFailure)
|
||||
void* IsoAllocator<Config>::allocate(IsoHeapImpl<Config>& heap, bool abortOnFailure)
|
||||
{
|
||||
static constexpr bool verbose = false;
|
||||
void* result = m_freeList.allocate<Config>(
|
||||
[&] () -> void* {
|
||||
return allocateSlow(abortOnFailure);
|
||||
return allocateSlow(heap, abortOnFailure);
|
||||
});
|
||||
if (verbose)
|
||||
fprintf(stderr, "%p: allocated %p of size %u\n", m_heap, result, Config::objectSize);
|
||||
fprintf(stderr, "%p: allocated %p of size %u\n", &heap, result, Config::objectSize);
|
||||
return result;
|
||||
}
|
||||
|
||||
template<typename Config>
|
||||
BNO_INLINE void* IsoAllocator<Config>::allocateSlow(bool abortOnFailure)
|
||||
BNO_INLINE void* IsoAllocator<Config>::allocateSlow(IsoHeapImpl<Config>& heap, bool abortOnFailure)
|
||||
{
|
||||
std::lock_guard<Mutex> locker(m_heap->lock);
|
||||
LockHolder locker(heap.lock);
|
||||
|
||||
AllocationMode allocationMode = m_heap->updateAllocationMode();
|
||||
AllocationMode allocationMode = heap.updateAllocationMode();
|
||||
if (allocationMode == AllocationMode::Shared) {
|
||||
if (m_currentPage) {
|
||||
m_currentPage->stopAllocating(m_freeList);
|
||||
m_currentPage->stopAllocating(locker, m_freeList);
|
||||
m_currentPage = nullptr;
|
||||
m_freeList.clear();
|
||||
}
|
||||
return m_heap->allocateFromShared(locker, abortOnFailure);
|
||||
return heap.allocateFromShared(locker, abortOnFailure);
|
||||
}
|
||||
|
||||
BASSERT(allocationMode == AllocationMode::Fast);
|
||||
|
||||
EligibilityResult<Config> result = m_heap->takeFirstEligible();
|
||||
EligibilityResult<Config> result = heap.takeFirstEligible(locker);
|
||||
if (result.kind != EligibilityKind::Success) {
|
||||
RELEASE_BASSERT(result.kind == EligibilityKind::OutOfMemory);
|
||||
RELEASE_BASSERT(!abortOnFailure);
|
||||
@ -82,20 +81,20 @@ BNO_INLINE void* IsoAllocator<Config>::allocateSlow(bool abortOnFailure)
|
||||
}
|
||||
|
||||
if (m_currentPage)
|
||||
m_currentPage->stopAllocating(m_freeList);
|
||||
m_currentPage->stopAllocating(locker, m_freeList);
|
||||
|
||||
m_currentPage = result.page;
|
||||
m_freeList = m_currentPage->startAllocating();
|
||||
m_freeList = m_currentPage->startAllocating(locker);
|
||||
|
||||
return m_freeList.allocate<Config>([] () { BCRASH(); return nullptr; });
|
||||
}
|
||||
|
||||
template<typename Config>
|
||||
void IsoAllocator<Config>::scavenge()
|
||||
void IsoAllocator<Config>::scavenge(IsoHeapImpl<Config>& heap)
|
||||
{
|
||||
if (m_currentPage) {
|
||||
std::lock_guard<Mutex> locker(m_heap->lock);
|
||||
m_currentPage->stopAllocating(m_freeList);
|
||||
LockHolder locker(heap.lock);
|
||||
m_currentPage->stopAllocating(locker, m_freeList);
|
||||
m_currentPage = nullptr;
|
||||
m_freeList.clear();
|
||||
}
|
||||
|
@ -59,7 +59,7 @@ void IsoDeallocator<Config>::deallocate(api::IsoHeap<Type>& handle, void* ptr)
|
||||
// should be rarely taken. If we see frequent malloc-and-free pattern, we tier up the allocator from shared mode to fast mode.
|
||||
IsoPageBase* page = IsoPageBase::pageFor(ptr);
|
||||
if (page->isShared()) {
|
||||
std::lock_guard<Mutex> locker(*m_lock);
|
||||
LockHolder locker(*m_lock);
|
||||
static_cast<IsoSharedPage*>(page)->free<Config>(locker, handle, ptr);
|
||||
return;
|
||||
}
|
||||
@ -73,10 +73,10 @@ void IsoDeallocator<Config>::deallocate(api::IsoHeap<Type>& handle, void* ptr)
|
||||
template<typename Config>
|
||||
BNO_INLINE void IsoDeallocator<Config>::scavenge()
|
||||
{
|
||||
std::lock_guard<Mutex> locker(*m_lock);
|
||||
LockHolder locker(*m_lock);
|
||||
|
||||
for (void* ptr : m_objectLog)
|
||||
IsoPage<Config>::pageFor(ptr)->free(ptr);
|
||||
IsoPage<Config>::pageFor(ptr)->free(locker, ptr);
|
||||
m_objectLog.clear();
|
||||
}
|
||||
|
||||
|
@ -28,6 +28,7 @@
|
||||
#include "Bits.h"
|
||||
#include "EligibilityResult.h"
|
||||
#include "IsoPage.h"
|
||||
#include "Packed.h"
|
||||
#include "Vector.h"
|
||||
|
||||
namespace bmalloc {
|
||||
@ -49,7 +50,7 @@ public:
|
||||
|
||||
IsoHeapImpl<Config>& heap() { return m_heap; }
|
||||
|
||||
virtual void didBecome(IsoPage<Config>*, IsoPageTrigger) = 0;
|
||||
virtual void didBecome(const LockHolder&, IsoPage<Config>*, IsoPageTrigger) = 0;
|
||||
|
||||
protected:
|
||||
IsoHeapImpl<Config>& m_heap;
|
||||
@ -64,9 +65,9 @@ public:
|
||||
|
||||
// Find the first page that is eligible for allocation and return it. May return null if there is no
|
||||
// such thing. May allocate a new page if we have an uncommitted page.
|
||||
EligibilityResult<Config> takeFirstEligible();
|
||||
EligibilityResult<Config> takeFirstEligible(const LockHolder&);
|
||||
|
||||
void didBecome(IsoPage<Config>*, IsoPageTrigger) override;
|
||||
void didBecome(const LockHolder&, IsoPage<Config>*, IsoPageTrigger) override;
|
||||
|
||||
// This gets called from a bulk decommit function in the Scavenger, so no locks are held. This function
|
||||
// needs to get the heap lock.
|
||||
@ -74,25 +75,25 @@ public:
|
||||
|
||||
// Iterate over all empty and committed pages, and put them into the vector. This also records the
|
||||
// pages as being decommitted. It's the caller's job to do the actual decommitting.
|
||||
void scavenge(Vector<DeferredDecommit>&);
|
||||
#if BPLATFORM(MAC)
|
||||
void scavengeToHighWatermark(Vector<DeferredDecommit>&);
|
||||
void scavenge(const LockHolder&, Vector<DeferredDecommit>&);
|
||||
#if BUSE(PARTIAL_SCAVENGE)
|
||||
void scavengeToHighWatermark(const LockHolder&, Vector<DeferredDecommit>&);
|
||||
#endif
|
||||
|
||||
template<typename Func>
|
||||
void forEachCommittedPage(const Func&);
|
||||
void forEachCommittedPage(const LockHolder&, const Func&);
|
||||
|
||||
private:
|
||||
void scavengePage(size_t, Vector<DeferredDecommit>&);
|
||||
void scavengePage(const LockHolder&, size_t, Vector<DeferredDecommit>&);
|
||||
|
||||
std::array<PackedAlignedPtr<IsoPage<Config>, IsoPage<Config>::pageSize>, numPages> m_pages { };
|
||||
// NOTE: I suppose that this could be two bitvectors. But from working on the GC, I found that the
|
||||
// number of bitvectors does not matter as much as whether or not they make intuitive sense.
|
||||
Bits<numPages> m_eligible;
|
||||
Bits<numPages> m_empty;
|
||||
Bits<numPages> m_committed;
|
||||
std::array<IsoPage<Config>*, numPages> m_pages;
|
||||
unsigned m_firstEligibleOrDecommitted { 0 };
|
||||
#if BPLATFORM(MAC)
|
||||
#if BUSE(PARTIAL_SCAVENGE)
|
||||
unsigned m_highWatermark { 0 };
|
||||
#endif
|
||||
};
|
||||
|
@ -39,12 +39,10 @@ template<typename Config, unsigned passedNumPages>
|
||||
IsoDirectory<Config, passedNumPages>::IsoDirectory(IsoHeapImpl<Config>& heap)
|
||||
: IsoDirectoryBase<Config>(heap)
|
||||
{
|
||||
for (unsigned i = numPages; i--;)
|
||||
m_pages[i] = nullptr;
|
||||
}
|
||||
|
||||
template<typename Config, unsigned passedNumPages>
|
||||
EligibilityResult<Config> IsoDirectory<Config, passedNumPages>::takeFirstEligible()
|
||||
EligibilityResult<Config> IsoDirectory<Config, passedNumPages>::takeFirstEligible(const LockHolder&)
|
||||
{
|
||||
unsigned pageIndex = (m_eligible | ~m_committed).findBit(m_firstEligibleOrDecommitted, true);
|
||||
m_firstEligibleOrDecommitted = pageIndex;
|
||||
@ -52,14 +50,14 @@ EligibilityResult<Config> IsoDirectory<Config, passedNumPages>::takeFirstEligibl
|
||||
if (pageIndex >= numPages)
|
||||
return EligibilityKind::Full;
|
||||
|
||||
#if BPLATFORM(MAC)
|
||||
#if BUSE(PARTIAL_SCAVENGE)
|
||||
m_highWatermark = std::max(pageIndex, m_highWatermark);
|
||||
#endif
|
||||
|
||||
Scavenger& scavenger = *Scavenger::get();
|
||||
scavenger.didStartGrowing();
|
||||
|
||||
IsoPage<Config>* page = m_pages[pageIndex];
|
||||
IsoPage<Config>* page = m_pages[pageIndex].get();
|
||||
|
||||
if (!m_committed[pageIndex]) {
|
||||
scavenger.scheduleIfUnderMemoryPressure(IsoPageBase::pageSize);
|
||||
@ -93,7 +91,7 @@ EligibilityResult<Config> IsoDirectory<Config, passedNumPages>::takeFirstEligibl
|
||||
}
|
||||
|
||||
template<typename Config, unsigned passedNumPages>
|
||||
void IsoDirectory<Config, passedNumPages>::didBecome(IsoPage<Config>* page, IsoPageTrigger trigger)
|
||||
void IsoDirectory<Config, passedNumPages>::didBecome(const LockHolder& locker, IsoPage<Config>* page, IsoPageTrigger trigger)
|
||||
{
|
||||
static constexpr bool verbose = false;
|
||||
unsigned pageIndex = page->index();
|
||||
@ -103,7 +101,7 @@ void IsoDirectory<Config, passedNumPages>::didBecome(IsoPage<Config>* page, IsoP
|
||||
fprintf(stderr, "%p: %p did become eligible.\n", this, page);
|
||||
m_eligible[pageIndex] = true;
|
||||
m_firstEligibleOrDecommitted = std::min(m_firstEligibleOrDecommitted, pageIndex);
|
||||
this->m_heap.didBecomeEligibleOrDecommited(this);
|
||||
this->m_heap.didBecomeEligibleOrDecommited(locker, this);
|
||||
return;
|
||||
case IsoPageTrigger::Empty:
|
||||
if (verbose)
|
||||
@ -123,44 +121,44 @@ void IsoDirectory<Config, passedNumPages>::didDecommit(unsigned index)
|
||||
// FIXME: We could do this without grabbing the lock. I just doubt that it matters. This is not going
|
||||
// to be a frequently executed path, in the sense that decommitting perf will be dominated by the
|
||||
// syscall itself (which has to do many hard things).
|
||||
std::lock_guard<Mutex> locker(this->m_heap.lock);
|
||||
LockHolder locker(this->m_heap.lock);
|
||||
BASSERT(!!m_committed[index]);
|
||||
this->m_heap.isNoLongerFreeable(m_pages[index], IsoPageBase::pageSize);
|
||||
this->m_heap.isNoLongerFreeable(m_pages[index].get(), IsoPageBase::pageSize);
|
||||
m_committed[index] = false;
|
||||
m_firstEligibleOrDecommitted = std::min(m_firstEligibleOrDecommitted, index);
|
||||
this->m_heap.didBecomeEligibleOrDecommited(this);
|
||||
this->m_heap.didDecommit(m_pages[index], IsoPageBase::pageSize);
|
||||
this->m_heap.didBecomeEligibleOrDecommited(locker, this);
|
||||
this->m_heap.didDecommit(m_pages[index].get(), IsoPageBase::pageSize);
|
||||
}
|
||||
|
||||
template<typename Config, unsigned passedNumPages>
|
||||
void IsoDirectory<Config, passedNumPages>::scavengePage(size_t index, Vector<DeferredDecommit>& decommits)
|
||||
void IsoDirectory<Config, passedNumPages>::scavengePage(const LockHolder&, size_t index, Vector<DeferredDecommit>& decommits)
|
||||
{
|
||||
// Make sure that this page is now off limits.
|
||||
m_empty[index] = false;
|
||||
m_eligible[index] = false;
|
||||
decommits.push(DeferredDecommit(this, m_pages[index], index));
|
||||
decommits.push(DeferredDecommit(this, m_pages[index].get(), index));
|
||||
}
|
||||
|
||||
template<typename Config, unsigned passedNumPages>
|
||||
void IsoDirectory<Config, passedNumPages>::scavenge(Vector<DeferredDecommit>& decommits)
|
||||
void IsoDirectory<Config, passedNumPages>::scavenge(const LockHolder& locker, Vector<DeferredDecommit>& decommits)
|
||||
{
|
||||
(m_empty & m_committed).forEachSetBit(
|
||||
[&] (size_t index) {
|
||||
scavengePage(index, decommits);
|
||||
scavengePage(locker, index, decommits);
|
||||
});
|
||||
#if BPLATFORM(MAC)
|
||||
#if BUSE(PARTIAL_SCAVENGE)
|
||||
m_highWatermark = 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
#if BPLATFORM(MAC)
|
||||
#if BUSE(PARTIAL_SCAVENGE)
|
||||
template<typename Config, unsigned passedNumPages>
|
||||
void IsoDirectory<Config, passedNumPages>::scavengeToHighWatermark(Vector<DeferredDecommit>& decommits)
|
||||
void IsoDirectory<Config, passedNumPages>::scavengeToHighWatermark(const LockHolder& locker, Vector<DeferredDecommit>& decommits)
|
||||
{
|
||||
(m_empty & m_committed).forEachSetBit(
|
||||
[&] (size_t index) {
|
||||
if (index > m_highWatermark)
|
||||
scavengePage(index, decommits);
|
||||
scavengePage(locker, index, decommits);
|
||||
});
|
||||
m_highWatermark = 0;
|
||||
}
|
||||
@ -168,11 +166,11 @@ void IsoDirectory<Config, passedNumPages>::scavengeToHighWatermark(Vector<Deferr
|
||||
|
||||
template<typename Config, unsigned passedNumPages>
|
||||
template<typename Func>
|
||||
void IsoDirectory<Config, passedNumPages>::forEachCommittedPage(const Func& func)
|
||||
void IsoDirectory<Config, passedNumPages>::forEachCommittedPage(const LockHolder&, const Func& func)
|
||||
{
|
||||
m_committed.forEachSetBit(
|
||||
[&] (size_t index) {
|
||||
func(*m_pages[index]);
|
||||
func(*(m_pages[index].get()));
|
||||
});
|
||||
}
|
||||
|
||||
|
@ -28,6 +28,10 @@
|
||||
#include "IsoConfig.h"
|
||||
#include "Mutex.h"
|
||||
|
||||
#if BENABLE_MALLOC_HEAP_BREAKDOWN
|
||||
#include <malloc/malloc.h>
|
||||
#endif
|
||||
|
||||
namespace bmalloc {
|
||||
|
||||
template<typename Config> class IsoHeapImpl;
|
||||
@ -44,8 +48,12 @@ template<typename Type>
|
||||
struct IsoHeap {
|
||||
typedef IsoConfig<sizeof(Type)> Config;
|
||||
|
||||
constexpr IsoHeap() = default;
|
||||
|
||||
#if BENABLE_MALLOC_HEAP_BREAKDOWN
|
||||
IsoHeap(const char* = nullptr);
|
||||
#else
|
||||
constexpr IsoHeap(const char* = nullptr) { }
|
||||
#endif
|
||||
|
||||
void* allocate();
|
||||
void* tryAllocate();
|
||||
void deallocate(void* p);
|
||||
@ -67,6 +75,10 @@ struct IsoHeap {
|
||||
unsigned m_allocatorOffsetPlusOne { 0 };
|
||||
unsigned m_deallocatorOffsetPlusOne { 0 };
|
||||
IsoHeapImpl<Config>* m_impl { nullptr };
|
||||
|
||||
#if BENABLE_MALLOC_HEAP_BREAKDOWN
|
||||
malloc_zone_t* m_zone;
|
||||
#endif
|
||||
};
|
||||
|
||||
// Use this together with MAKE_BISO_MALLOCED_IMPL.
|
||||
@ -82,7 +94,8 @@ public: \
|
||||
\
|
||||
void* operator new[](size_t size) = delete; \
|
||||
void operator delete[](void* p) = delete; \
|
||||
using webkitFastMalloced = int; \
|
||||
private: \
|
||||
typedef int __makeBisoMallocedMacroSemicolonifier
|
||||
using __makeBisoMallocedMacroSemicolonifier = int
|
||||
|
||||
} } // namespace bmalloc::api
|
||||
|
@ -31,7 +31,8 @@
|
||||
|
||||
namespace bmalloc {
|
||||
|
||||
IsoHeapImplBase::IsoHeapImplBase()
|
||||
IsoHeapImplBase::IsoHeapImplBase(Mutex& lock)
|
||||
: lock(lock)
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -29,6 +29,7 @@
|
||||
#include "IsoAllocator.h"
|
||||
#include "IsoDirectoryPage.h"
|
||||
#include "IsoTLSAllocatorEntry.h"
|
||||
#include "Packed.h"
|
||||
#include "PhysicalPageMap.h"
|
||||
|
||||
namespace bmalloc {
|
||||
@ -37,6 +38,8 @@ class AllIsoHeaps;
|
||||
|
||||
class BEXPORT IsoHeapImplBase {
|
||||
MAKE_BMALLOCED;
|
||||
IsoHeapImplBase(const IsoHeapImplBase&) = delete;
|
||||
IsoHeapImplBase& operator=(const IsoHeapImplBase&) = delete;
|
||||
public:
|
||||
static constexpr unsigned maxAllocationFromShared = 8;
|
||||
static constexpr unsigned maxAllocationFromSharedMask = (1U << maxAllocationFromShared) - 1U;
|
||||
@ -46,29 +49,51 @@ public:
|
||||
virtual ~IsoHeapImplBase();
|
||||
|
||||
virtual void scavenge(Vector<DeferredDecommit>&) = 0;
|
||||
#if BPLATFORM(MAC)
|
||||
#if BUSE(PARTIAL_SCAVENGE)
|
||||
virtual void scavengeToHighWatermark(Vector<DeferredDecommit>&) = 0;
|
||||
#endif
|
||||
virtual size_t freeableMemory() = 0;
|
||||
virtual size_t footprint() = 0;
|
||||
|
||||
void scavengeNow();
|
||||
static void finishScavenging(Vector<DeferredDecommit>&);
|
||||
|
||||
protected:
|
||||
IsoHeapImplBase();
|
||||
void didCommit(void* ptr, size_t bytes);
|
||||
void didDecommit(void* ptr, size_t bytes);
|
||||
|
||||
void isNowFreeable(void* ptr, size_t bytes);
|
||||
void isNoLongerFreeable(void* ptr, size_t bytes);
|
||||
|
||||
size_t freeableMemory();
|
||||
size_t footprint();
|
||||
|
||||
void addToAllIsoHeaps();
|
||||
|
||||
protected:
|
||||
IsoHeapImplBase(Mutex&);
|
||||
|
||||
friend class IsoSharedPage;
|
||||
friend class AllIsoHeaps;
|
||||
|
||||
public:
|
||||
// It's almost always the caller's responsibility to grab the lock. This lock comes from the
|
||||
// (*PerProcess<IsoTLSEntryHolder<IsoTLSDeallocatorEntry<Config>>>::get())->lock. That's pretty weird, and we don't
|
||||
// try to disguise the fact that it's weird. We only do that because heaps in the same size class
|
||||
// share the same deallocator log, so it makes sense for them to also share the same lock to
|
||||
// amortize lock acquisition costs.
|
||||
Mutex& lock;
|
||||
protected:
|
||||
IsoHeapImplBase* m_next { nullptr };
|
||||
std::chrono::steady_clock::time_point m_lastSlowPathTime;
|
||||
std::array<void*, maxAllocationFromShared> m_sharedCells { };
|
||||
size_t m_footprint { 0 };
|
||||
size_t m_freeableMemory { 0 };
|
||||
#if ENABLE_PHYSICAL_PAGE_MAP
|
||||
PhysicalPageMap m_physicalPageMap;
|
||||
#endif
|
||||
std::array<PackedAlignedPtr<uint8_t, bmalloc::alignment>, maxAllocationFromShared> m_sharedCells { };
|
||||
protected:
|
||||
unsigned m_numberOfAllocationsFromSharedInOneCycle { 0 };
|
||||
unsigned m_availableShared { maxAllocationFromSharedMask };
|
||||
AllocationMode m_allocationMode { AllocationMode::Init };
|
||||
|
||||
bool m_isInlineDirectoryEligibleOrDecommitted { true };
|
||||
static_assert(sizeof(m_availableShared) * 8 >= maxAllocationFromShared, "");
|
||||
};
|
||||
|
||||
@ -80,21 +105,17 @@ class IsoHeapImpl final : public IsoHeapImplBase {
|
||||
public:
|
||||
IsoHeapImpl();
|
||||
|
||||
EligibilityResult<Config> takeFirstEligible();
|
||||
EligibilityResult<Config> takeFirstEligible(const LockHolder&);
|
||||
|
||||
// Callbacks from directory.
|
||||
void didBecomeEligibleOrDecommited(IsoDirectory<Config, numPagesInInlineDirectory>*);
|
||||
void didBecomeEligibleOrDecommited(IsoDirectory<Config, IsoDirectoryPage<Config>::numPages>*);
|
||||
void didBecomeEligibleOrDecommited(const LockHolder&, IsoDirectory<Config, numPagesInInlineDirectory>*);
|
||||
void didBecomeEligibleOrDecommited(const LockHolder&, IsoDirectory<Config, IsoDirectoryPage<Config>::numPages>*);
|
||||
|
||||
void scavenge(Vector<DeferredDecommit>&) override;
|
||||
#if BPLATFORM(MAC)
|
||||
#if BUSE(PARTIAL_SCAVENGE)
|
||||
void scavengeToHighWatermark(Vector<DeferredDecommit>&) override;
|
||||
#endif
|
||||
|
||||
size_t freeableMemory() override;
|
||||
|
||||
size_t footprint() override;
|
||||
|
||||
unsigned allocatorOffset();
|
||||
unsigned deallocatorOffset();
|
||||
|
||||
@ -103,47 +124,26 @@ public:
|
||||
unsigned numCommittedPages();
|
||||
|
||||
template<typename Func>
|
||||
void forEachDirectory(const Func&);
|
||||
void forEachDirectory(const LockHolder&, const Func&);
|
||||
|
||||
template<typename Func>
|
||||
void forEachCommittedPage(const Func&);
|
||||
void forEachCommittedPage(const LockHolder&, const Func&);
|
||||
|
||||
// This is only accurate when all threads are scavenged. Otherwise it will overestimate.
|
||||
template<typename Func>
|
||||
void forEachLiveObject(const Func&);
|
||||
|
||||
void didCommit(void* ptr, size_t bytes);
|
||||
void didDecommit(void* ptr, size_t bytes);
|
||||
|
||||
void isNowFreeable(void* ptr, size_t bytes);
|
||||
void isNoLongerFreeable(void* ptr, size_t bytes);
|
||||
void forEachLiveObject(const LockHolder&, const Func&);
|
||||
|
||||
AllocationMode updateAllocationMode();
|
||||
void* allocateFromShared(const std::lock_guard<Mutex>&, bool abortOnFailure);
|
||||
|
||||
// It's almost always the caller's responsibility to grab the lock. This lock comes from the
|
||||
// PerProcess<IsoTLSDeallocatorEntry<Config>>::get()->lock. That's pretty weird, and we don't
|
||||
// try to disguise the fact that it's weird. We only do that because heaps in the same size class
|
||||
// share the same deallocator log, so it makes sense for them to also share the same lock to
|
||||
// amortize lock acquisition costs.
|
||||
Mutex& lock;
|
||||
void* allocateFromShared(const LockHolder&, bool abortOnFailure);
|
||||
|
||||
private:
|
||||
PackedPtr<IsoDirectoryPage<Config>> m_headDirectory { nullptr };
|
||||
PackedPtr<IsoDirectoryPage<Config>> m_tailDirectory { nullptr };
|
||||
PackedPtr<IsoDirectoryPage<Config>> m_firstEligibleOrDecommitedDirectory { nullptr };
|
||||
IsoDirectory<Config, numPagesInInlineDirectory> m_inlineDirectory;
|
||||
IsoDirectoryPage<Config>* m_headDirectory { nullptr };
|
||||
IsoDirectoryPage<Config>* m_tailDirectory { nullptr };
|
||||
size_t m_footprint { 0 };
|
||||
size_t m_freeableMemory { 0 };
|
||||
#if ENABLE_PHYSICAL_PAGE_MAP
|
||||
PhysicalPageMap m_physicalPageMap;
|
||||
#endif
|
||||
unsigned m_nextDirectoryPageIndex { 1 }; // We start at 1 so that the high water mark being zero means we've only allocated in the inline directory since the last scavenge.
|
||||
unsigned m_directoryHighWatermark { 0 };
|
||||
|
||||
bool m_isInlineDirectoryEligibleOrDecommitted { true };
|
||||
IsoDirectoryPage<Config>* m_firstEligibleOrDecommitedDirectory { nullptr };
|
||||
|
||||
IsoTLSAllocatorEntry<Config> m_allocator;
|
||||
IsoTLSEntryHolder<IsoTLSAllocatorEntry<Config>> m_allocator;
|
||||
};
|
||||
|
||||
} // namespace bmalloc
|
||||
|
@ -34,65 +34,74 @@ namespace bmalloc {
|
||||
|
||||
template<typename Config>
|
||||
IsoHeapImpl<Config>::IsoHeapImpl()
|
||||
: lock(PerProcess<IsoTLSDeallocatorEntry<Config>>::get()->lock)
|
||||
: IsoHeapImplBase((*PerProcess<IsoTLSEntryHolder<IsoTLSDeallocatorEntry<Config>>>::get())->lock)
|
||||
, m_inlineDirectory(*this)
|
||||
, m_allocator(*this)
|
||||
{
|
||||
addToAllIsoHeaps();
|
||||
}
|
||||
|
||||
template<typename Config>
|
||||
EligibilityResult<Config> IsoHeapImpl<Config>::takeFirstEligible()
|
||||
EligibilityResult<Config> IsoHeapImpl<Config>::takeFirstEligible(const LockHolder& locker)
|
||||
{
|
||||
if (m_isInlineDirectoryEligibleOrDecommitted) {
|
||||
EligibilityResult<Config> result = m_inlineDirectory.takeFirstEligible();
|
||||
EligibilityResult<Config> result = m_inlineDirectory.takeFirstEligible(locker);
|
||||
if (result.kind == EligibilityKind::Full)
|
||||
m_isInlineDirectoryEligibleOrDecommitted = false;
|
||||
else
|
||||
return result;
|
||||
}
|
||||
|
||||
if (!m_firstEligibleOrDecommitedDirectory) {
|
||||
// If nothing is eligible, it can only be because we have no directories. It wouldn't be the end
|
||||
// of the world if we broke this invariant. It would only mean that didBecomeEligibleOrDecommited() would need
|
||||
// a null check.
|
||||
RELEASE_BASSERT(!m_headDirectory);
|
||||
RELEASE_BASSERT(!m_tailDirectory);
|
||||
}
|
||||
|
||||
for (; m_firstEligibleOrDecommitedDirectory; m_firstEligibleOrDecommitedDirectory = m_firstEligibleOrDecommitedDirectory->next) {
|
||||
EligibilityResult<Config> result = m_firstEligibleOrDecommitedDirectory->payload.takeFirstEligible();
|
||||
if (result.kind != EligibilityKind::Full) {
|
||||
m_directoryHighWatermark = std::max(m_directoryHighWatermark, m_firstEligibleOrDecommitedDirectory->index());
|
||||
return result;
|
||||
{
|
||||
auto* cursor = m_firstEligibleOrDecommitedDirectory.get();
|
||||
if (!cursor) {
|
||||
// If nothing is eligible, it can only be because we have no directories. It wouldn't be the end
|
||||
// of the world if we broke this invariant. It would only mean that didBecomeEligibleOrDecommited() would need
|
||||
// a null check.
|
||||
RELEASE_BASSERT(!m_headDirectory.get());
|
||||
RELEASE_BASSERT(!m_tailDirectory.get());
|
||||
} else {
|
||||
auto* originalCursor = cursor;
|
||||
BUNUSED(originalCursor);
|
||||
for (; cursor; cursor = cursor->next) {
|
||||
EligibilityResult<Config> result = cursor->payload.takeFirstEligible(locker);
|
||||
// While iterating, m_firstEligibleOrDecommitedDirectory is never changed. We are holding a lock,
|
||||
// and IsoDirectory::takeFirstEligible must not populate a new eligibile / decommitted pages.
|
||||
BASSERT(m_firstEligibleOrDecommitedDirectory.get() == originalCursor);
|
||||
if (result.kind != EligibilityKind::Full) {
|
||||
m_directoryHighWatermark = std::max(m_directoryHighWatermark, cursor->index());
|
||||
m_firstEligibleOrDecommitedDirectory = cursor;
|
||||
return result;
|
||||
}
|
||||
}
|
||||
m_firstEligibleOrDecommitedDirectory = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
auto* newDirectory = new IsoDirectoryPage<Config>(*this, m_nextDirectoryPageIndex++);
|
||||
if (m_headDirectory) {
|
||||
if (m_headDirectory.get()) {
|
||||
m_tailDirectory->next = newDirectory;
|
||||
m_tailDirectory = newDirectory;
|
||||
} else {
|
||||
RELEASE_BASSERT(!m_tailDirectory);
|
||||
RELEASE_BASSERT(!m_tailDirectory.get());
|
||||
m_headDirectory = newDirectory;
|
||||
m_tailDirectory = newDirectory;
|
||||
}
|
||||
m_directoryHighWatermark = newDirectory->index();
|
||||
m_firstEligibleOrDecommitedDirectory = newDirectory;
|
||||
EligibilityResult<Config> result = newDirectory->payload.takeFirstEligible();
|
||||
EligibilityResult<Config> result = newDirectory->payload.takeFirstEligible(locker);
|
||||
RELEASE_BASSERT(result.kind != EligibilityKind::Full);
|
||||
return result;
|
||||
}
|
||||
|
||||
template<typename Config>
|
||||
void IsoHeapImpl<Config>::didBecomeEligibleOrDecommited(IsoDirectory<Config, numPagesInInlineDirectory>* directory)
|
||||
void IsoHeapImpl<Config>::didBecomeEligibleOrDecommited(const LockHolder&, IsoDirectory<Config, numPagesInInlineDirectory>* directory)
|
||||
{
|
||||
RELEASE_BASSERT(directory == &m_inlineDirectory);
|
||||
m_isInlineDirectoryEligibleOrDecommitted = true;
|
||||
}
|
||||
|
||||
template<typename Config>
|
||||
void IsoHeapImpl<Config>::didBecomeEligibleOrDecommited(IsoDirectory<Config, IsoDirectoryPage<Config>::numPages>* directory)
|
||||
void IsoHeapImpl<Config>::didBecomeEligibleOrDecommited(const LockHolder&, IsoDirectory<Config, IsoDirectoryPage<Config>::numPages>* directory)
|
||||
{
|
||||
RELEASE_BASSERT(m_firstEligibleOrDecommitedDirectory);
|
||||
auto* directoryPage = IsoDirectoryPage<Config>::pageFor(directory);
|
||||
@ -103,31 +112,31 @@ void IsoHeapImpl<Config>::didBecomeEligibleOrDecommited(IsoDirectory<Config, Iso
|
||||
template<typename Config>
|
||||
void IsoHeapImpl<Config>::scavenge(Vector<DeferredDecommit>& decommits)
|
||||
{
|
||||
std::lock_guard<Mutex> locker(this->lock);
|
||||
LockHolder locker(this->lock);
|
||||
forEachDirectory(
|
||||
locker,
|
||||
[&] (auto& directory) {
|
||||
directory.scavenge(decommits);
|
||||
directory.scavenge(locker, decommits);
|
||||
});
|
||||
m_directoryHighWatermark = 0;
|
||||
}
|
||||
|
||||
#if BPLATFORM(MAC)
|
||||
#if BUSE(PARTIAL_SCAVENGE)
|
||||
template<typename Config>
|
||||
void IsoHeapImpl<Config>::scavengeToHighWatermark(Vector<DeferredDecommit>& decommits)
|
||||
{
|
||||
std::lock_guard<Mutex> locker(this->lock);
|
||||
LockHolder locker(this->lock);
|
||||
if (!m_directoryHighWatermark)
|
||||
m_inlineDirectory.scavengeToHighWatermark(decommits);
|
||||
for (IsoDirectoryPage<Config>* page = m_headDirectory; page; page = page->next) {
|
||||
m_inlineDirectory.scavengeToHighWatermark(locker, decommits);
|
||||
for (IsoDirectoryPage<Config>* page = m_headDirectory.get(); page; page = page->next) {
|
||||
if (page->index() >= m_directoryHighWatermark)
|
||||
page->payload.scavengeToHighWatermark(decommits);
|
||||
page->payload.scavengeToHighWatermark(locker, decommits);
|
||||
}
|
||||
m_directoryHighWatermark = 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
template<typename Config>
|
||||
size_t IsoHeapImpl<Config>::freeableMemory()
|
||||
inline size_t IsoHeapImplBase::freeableMemory()
|
||||
{
|
||||
return m_freeableMemory;
|
||||
}
|
||||
@ -135,20 +144,22 @@ size_t IsoHeapImpl<Config>::freeableMemory()
|
||||
template<typename Config>
|
||||
unsigned IsoHeapImpl<Config>::allocatorOffset()
|
||||
{
|
||||
return m_allocator.offset();
|
||||
return m_allocator->offset();
|
||||
}
|
||||
|
||||
template<typename Config>
|
||||
unsigned IsoHeapImpl<Config>::deallocatorOffset()
|
||||
{
|
||||
return PerProcess<IsoTLSDeallocatorEntry<Config>>::get()->offset();
|
||||
return (*PerProcess<IsoTLSEntryHolder<IsoTLSDeallocatorEntry<Config>>>::get())->offset();
|
||||
}
|
||||
|
||||
template<typename Config>
|
||||
unsigned IsoHeapImpl<Config>::numLiveObjects()
|
||||
{
|
||||
LockHolder locker(this->lock);
|
||||
unsigned result = 0;
|
||||
forEachLiveObject(
|
||||
locker,
|
||||
[&] (void*) {
|
||||
result++;
|
||||
});
|
||||
@ -158,8 +169,10 @@ unsigned IsoHeapImpl<Config>::numLiveObjects()
|
||||
template<typename Config>
|
||||
unsigned IsoHeapImpl<Config>::numCommittedPages()
|
||||
{
|
||||
LockHolder locker(this->lock);
|
||||
unsigned result = 0;
|
||||
forEachCommittedPage(
|
||||
locker,
|
||||
[&] (IsoPage<Config>&) {
|
||||
result++;
|
||||
});
|
||||
@ -168,40 +181,41 @@ unsigned IsoHeapImpl<Config>::numCommittedPages()
|
||||
|
||||
template<typename Config>
|
||||
template<typename Func>
|
||||
void IsoHeapImpl<Config>::forEachDirectory(const Func& func)
|
||||
void IsoHeapImpl<Config>::forEachDirectory(const LockHolder&, const Func& func)
|
||||
{
|
||||
func(m_inlineDirectory);
|
||||
for (IsoDirectoryPage<Config>* page = m_headDirectory; page; page = page->next)
|
||||
for (IsoDirectoryPage<Config>* page = m_headDirectory.get(); page; page = page->next)
|
||||
func(page->payload);
|
||||
}
|
||||
|
||||
template<typename Config>
|
||||
template<typename Func>
|
||||
void IsoHeapImpl<Config>::forEachCommittedPage(const Func& func)
|
||||
void IsoHeapImpl<Config>::forEachCommittedPage(const LockHolder& locker, const Func& func)
|
||||
{
|
||||
forEachDirectory(
|
||||
locker,
|
||||
[&] (auto& directory) {
|
||||
directory.forEachCommittedPage(func);
|
||||
directory.forEachCommittedPage(locker, func);
|
||||
});
|
||||
}
|
||||
|
||||
template<typename Config>
|
||||
template<typename Func>
|
||||
void IsoHeapImpl<Config>::forEachLiveObject(const Func& func)
|
||||
void IsoHeapImpl<Config>::forEachLiveObject(const LockHolder& locker, const Func& func)
|
||||
{
|
||||
forEachCommittedPage(
|
||||
locker,
|
||||
[&] (IsoPage<Config>& page) {
|
||||
page.forEachLiveObject(func);
|
||||
page.forEachLiveObject(locker, func);
|
||||
});
|
||||
for (unsigned index = 0; index < maxAllocationFromShared; ++index) {
|
||||
void* pointer = m_sharedCells[index];
|
||||
void* pointer = m_sharedCells[index].get();
|
||||
if (pointer && !(m_availableShared & (1U << index)))
|
||||
func(pointer);
|
||||
}
|
||||
}
|
||||
|
||||
template<typename Config>
|
||||
size_t IsoHeapImpl<Config>::footprint()
|
||||
inline size_t IsoHeapImplBase::footprint()
|
||||
{
|
||||
#if ENABLE_PHYSICAL_PAGE_MAP
|
||||
RELEASE_BASSERT(m_footprint == m_physicalPageMap.footprint());
|
||||
@ -209,8 +223,7 @@ size_t IsoHeapImpl<Config>::footprint()
|
||||
return m_footprint;
|
||||
}
|
||||
|
||||
template<typename Config>
|
||||
void IsoHeapImpl<Config>::didCommit(void* ptr, size_t bytes)
|
||||
inline void IsoHeapImplBase::didCommit(void* ptr, size_t bytes)
|
||||
{
|
||||
BUNUSED_PARAM(ptr);
|
||||
m_footprint += bytes;
|
||||
@ -219,8 +232,7 @@ void IsoHeapImpl<Config>::didCommit(void* ptr, size_t bytes)
|
||||
#endif
|
||||
}
|
||||
|
||||
template<typename Config>
|
||||
void IsoHeapImpl<Config>::didDecommit(void* ptr, size_t bytes)
|
||||
inline void IsoHeapImplBase::didDecommit(void* ptr, size_t bytes)
|
||||
{
|
||||
BUNUSED_PARAM(ptr);
|
||||
m_footprint -= bytes;
|
||||
@ -229,15 +241,13 @@ void IsoHeapImpl<Config>::didDecommit(void* ptr, size_t bytes)
|
||||
#endif
|
||||
}
|
||||
|
||||
template<typename Config>
|
||||
void IsoHeapImpl<Config>::isNowFreeable(void* ptr, size_t bytes)
|
||||
inline void IsoHeapImplBase::isNowFreeable(void* ptr, size_t bytes)
|
||||
{
|
||||
BUNUSED_PARAM(ptr);
|
||||
m_freeableMemory += bytes;
|
||||
}
|
||||
|
||||
template<typename Config>
|
||||
void IsoHeapImpl<Config>::isNoLongerFreeable(void* ptr, size_t bytes)
|
||||
inline void IsoHeapImplBase::isNoLongerFreeable(void* ptr, size_t bytes)
|
||||
{
|
||||
BUNUSED_PARAM(ptr);
|
||||
m_freeableMemory -= bytes;
|
||||
@ -295,14 +305,14 @@ AllocationMode IsoHeapImpl<Config>::updateAllocationMode()
|
||||
}
|
||||
|
||||
template<typename Config>
|
||||
void* IsoHeapImpl<Config>::allocateFromShared(const std::lock_guard<Mutex>&, bool abortOnFailure)
|
||||
void* IsoHeapImpl<Config>::allocateFromShared(const LockHolder&, bool abortOnFailure)
|
||||
{
|
||||
static constexpr bool verbose = false;
|
||||
|
||||
unsigned indexPlusOne = __builtin_ffs(m_availableShared);
|
||||
BASSERT(indexPlusOne);
|
||||
unsigned index = indexPlusOne - 1;
|
||||
void* result = m_sharedCells[index];
|
||||
void* result = m_sharedCells[index].get();
|
||||
if (result) {
|
||||
if (verbose)
|
||||
fprintf(stderr, "%p: allocated %p from shared again of size %u\n", this, result, Config::objectSize);
|
||||
@ -315,7 +325,7 @@ void* IsoHeapImpl<Config>::allocateFromShared(const std::lock_guard<Mutex>&, boo
|
||||
fprintf(stderr, "%p: allocated %p from shared of size %u\n", this, result, Config::objectSize);
|
||||
BASSERT(index < IsoHeapImplBase::maxAllocationFromShared);
|
||||
*indexSlotFor<Config>(result) = index;
|
||||
m_sharedCells[index] = result;
|
||||
m_sharedCells[index] = bitwise_cast<uint8_t*>(result);
|
||||
}
|
||||
BASSERT(result);
|
||||
m_availableShared &= ~(1U << index);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (C) 2017 Apple Inc. All rights reserved.
|
||||
* Copyright (C) 2017-2019 Apple Inc. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
@ -43,6 +43,16 @@
|
||||
|
||||
namespace bmalloc { namespace api {
|
||||
|
||||
#if BENABLE_MALLOC_HEAP_BREAKDOWN
|
||||
template<typename Type>
|
||||
IsoHeap<Type>::IsoHeap(const char* heapClass)
|
||||
: m_zone(malloc_create_zone(0, 0))
|
||||
{
|
||||
if (heapClass)
|
||||
malloc_set_zone_name(m_zone, heapClass);
|
||||
}
|
||||
#endif
|
||||
|
||||
template<typename Type>
|
||||
void* IsoHeap<Type>::allocate()
|
||||
{
|
||||
@ -85,6 +95,7 @@ void IsoHeap<Type>::initialize()
|
||||
// when IsoHeap::isInitialized returns true, we need to store the value to m_impl *after*
|
||||
// all the initialization finishes.
|
||||
auto* heap = new IsoHeapImpl<Config>();
|
||||
heap->addToAllIsoHeaps();
|
||||
setAllocatorOffset(heap->allocatorOffset());
|
||||
setDeallocatorOffset(heap->deallocatorOffset());
|
||||
auto* atomic = reinterpret_cast<std::atomic<IsoHeapImpl<Config>*>*>(&m_impl);
|
||||
@ -103,7 +114,7 @@ auto IsoHeap<Type>::impl() -> IsoHeapImpl<Config>&
|
||||
public: \
|
||||
static ::bmalloc::api::IsoHeap<isoType>& bisoHeap() \
|
||||
{ \
|
||||
static ::bmalloc::api::IsoHeap<isoType> heap; \
|
||||
static ::bmalloc::api::IsoHeap<isoType> heap("WebKit_"#isoType); \
|
||||
return heap; \
|
||||
} \
|
||||
\
|
||||
@ -123,13 +134,14 @@ public: \
|
||||
\
|
||||
void* operator new[](size_t size) = delete; \
|
||||
void operator delete[](void* p) = delete; \
|
||||
using webkitFastMalloced = int; \
|
||||
private: \
|
||||
typedef int __makeBisoMallocedInlineMacroSemicolonifier
|
||||
using __makeBisoMallocedInlineMacroSemicolonifier = int
|
||||
|
||||
#define MAKE_BISO_MALLOCED_IMPL(isoType) \
|
||||
::bmalloc::api::IsoHeap<isoType>& isoType::bisoHeap() \
|
||||
{ \
|
||||
static ::bmalloc::api::IsoHeap<isoType> heap; \
|
||||
static ::bmalloc::api::IsoHeap<isoType> heap("WebKit "#isoType); \
|
||||
return heap; \
|
||||
} \
|
||||
\
|
||||
@ -150,7 +162,7 @@ struct MakeBisoMallocedImplMacroSemicolonifier##isoType { }
|
||||
template<> \
|
||||
::bmalloc::api::IsoHeap<isoType>& isoType::bisoHeap() \
|
||||
{ \
|
||||
static ::bmalloc::api::IsoHeap<isoType> heap; \
|
||||
static ::bmalloc::api::IsoHeap<isoType> heap("WebKit_"#isoType); \
|
||||
return heap; \
|
||||
} \
|
||||
\
|
||||
|
@ -26,7 +26,7 @@
|
||||
#include "IsoPage.h"
|
||||
|
||||
#include "PerProcess.h"
|
||||
#include "VMHeap.h"
|
||||
#include "VMAllocate.h"
|
||||
|
||||
namespace bmalloc {
|
||||
|
||||
|
@ -28,7 +28,9 @@
|
||||
#include "Bits.h"
|
||||
#include "DeferredTrigger.h"
|
||||
#include "FreeList.h"
|
||||
#include "Mutex.h"
|
||||
#include <climits>
|
||||
#include <mutex>
|
||||
|
||||
namespace bmalloc {
|
||||
|
||||
@ -42,6 +44,8 @@ public:
|
||||
|
||||
explicit IsoPageBase(bool isShared)
|
||||
: m_isShared(isShared)
|
||||
, m_eligibilityHasBeenNoted(true)
|
||||
, m_isInUseForAllocation(false)
|
||||
{
|
||||
}
|
||||
|
||||
@ -52,7 +56,9 @@ public:
|
||||
protected:
|
||||
BEXPORT static void* allocatePageMemory();
|
||||
|
||||
bool m_isShared { false };
|
||||
bool m_isShared : 1;
|
||||
bool m_eligibilityHasBeenNoted : 1;
|
||||
bool m_isInUseForAllocation : 1;
|
||||
};
|
||||
|
||||
template<typename Config>
|
||||
@ -71,19 +77,19 @@ public:
|
||||
|
||||
unsigned index() const { return m_index; }
|
||||
|
||||
void free(void*);
|
||||
void free(const LockHolder&, void*);
|
||||
|
||||
// Called after this page is already selected for allocation.
|
||||
FreeList startAllocating();
|
||||
FreeList startAllocating(const LockHolder&);
|
||||
|
||||
// Called after the allocator picks another page to replace this one.
|
||||
void stopAllocating(FreeList freeList);
|
||||
void stopAllocating(const LockHolder&, FreeList);
|
||||
|
||||
IsoDirectoryBase<Config>& directory() { return m_directory; }
|
||||
bool isInUseForAllocation() const { return m_isInUseForAllocation; }
|
||||
|
||||
template<typename Func>
|
||||
void forEachLiveObject(const Func&);
|
||||
void forEachLiveObject(const LockHolder&, const Func&);
|
||||
|
||||
IsoHeapImpl<Config>& heap();
|
||||
|
||||
@ -111,16 +117,13 @@ private:
|
||||
|
||||
// This must have a trivial destructor.
|
||||
|
||||
bool m_eligibilityHasBeenNoted { true };
|
||||
bool m_isInUseForAllocation { false };
|
||||
DeferredTrigger<IsoPageTrigger::Eligible> m_eligibilityTrigger;
|
||||
DeferredTrigger<IsoPageTrigger::Empty> m_emptyTrigger;
|
||||
|
||||
IsoDirectoryBase<Config>& m_directory;
|
||||
uint8_t m_numNonEmptyWords { 0 };
|
||||
static_assert(bitsArrayLength(numObjects) <= UINT8_MAX);
|
||||
unsigned m_index { UINT_MAX };
|
||||
|
||||
IsoDirectoryBase<Config>& m_directory;
|
||||
unsigned m_allocBits[bitsArrayLength(numObjects)];
|
||||
unsigned m_numNonEmptyWords { 0 };
|
||||
};
|
||||
|
||||
} // namespace bmalloc
|
||||
|
@ -47,8 +47,8 @@ IsoPage<Config>* IsoPage<Config>::tryCreate(IsoDirectoryBase<Config>& directory,
|
||||
template<typename Config>
|
||||
IsoPage<Config>::IsoPage(IsoDirectoryBase<Config>& directory, unsigned index)
|
||||
: IsoPageBase(false)
|
||||
, m_directory(directory)
|
||||
, m_index(index)
|
||||
, m_directory(directory)
|
||||
{
|
||||
memset(m_allocBits, 0, sizeof(m_allocBits));
|
||||
}
|
||||
@ -65,14 +65,14 @@ IsoPage<Config>* IsoPage<Config>::pageFor(void* ptr)
|
||||
}
|
||||
|
||||
template<typename Config>
|
||||
void IsoPage<Config>::free(void* passedPtr)
|
||||
void IsoPage<Config>::free(const LockHolder& locker, void* passedPtr)
|
||||
{
|
||||
BASSERT(!m_isShared);
|
||||
unsigned offset = static_cast<char*>(passedPtr) - reinterpret_cast<char*>(this);
|
||||
unsigned index = offset / Config::objectSize;
|
||||
|
||||
if (!m_eligibilityHasBeenNoted) {
|
||||
m_eligibilityTrigger.didBecome(*this);
|
||||
m_eligibilityTrigger.didBecome(locker, *this);
|
||||
m_eligibilityHasBeenNoted = true;
|
||||
}
|
||||
|
||||
@ -82,12 +82,12 @@ void IsoPage<Config>::free(void* passedPtr)
|
||||
unsigned newWord = m_allocBits[wordIndex] &= ~(1 << bitIndex);
|
||||
if (!newWord) {
|
||||
if (!--m_numNonEmptyWords)
|
||||
m_emptyTrigger.didBecome(*this);
|
||||
m_emptyTrigger.didBecome(locker, *this);
|
||||
}
|
||||
}
|
||||
|
||||
template<typename Config>
|
||||
FreeList IsoPage<Config>::startAllocating()
|
||||
FreeList IsoPage<Config>::startAllocating(const LockHolder&)
|
||||
{
|
||||
static constexpr bool verbose = false;
|
||||
|
||||
@ -208,7 +208,7 @@ FreeList IsoPage<Config>::startAllocating()
|
||||
}
|
||||
|
||||
template<typename Config>
|
||||
void IsoPage<Config>::stopAllocating(FreeList freeList)
|
||||
void IsoPage<Config>::stopAllocating(const LockHolder& locker, FreeList freeList)
|
||||
{
|
||||
static constexpr bool verbose = false;
|
||||
|
||||
@ -217,19 +217,19 @@ void IsoPage<Config>::stopAllocating(FreeList freeList)
|
||||
|
||||
freeList.forEach<Config>(
|
||||
[&] (void* ptr) {
|
||||
free(ptr);
|
||||
free(locker, ptr);
|
||||
});
|
||||
|
||||
RELEASE_BASSERT(m_isInUseForAllocation);
|
||||
m_isInUseForAllocation = false;
|
||||
|
||||
m_eligibilityTrigger.handleDeferral(*this);
|
||||
m_emptyTrigger.handleDeferral(*this);
|
||||
m_eligibilityTrigger.handleDeferral(locker, *this);
|
||||
m_emptyTrigger.handleDeferral(locker, *this);
|
||||
}
|
||||
|
||||
template<typename Config>
|
||||
template<typename Func>
|
||||
void IsoPage<Config>::forEachLiveObject(const Func& func)
|
||||
void IsoPage<Config>::forEachLiveObject(const LockHolder&, const Func& func)
|
||||
{
|
||||
for (unsigned wordIndex = 0; wordIndex < bitsArrayLength(numObjects); ++wordIndex) {
|
||||
unsigned word = m_allocBits[wordIndex];
|
||||
|
@ -53,7 +53,7 @@ private:
|
||||
|
||||
class IsoSharedHeap : public StaticPerProcess<IsoSharedHeap> {
|
||||
public:
|
||||
IsoSharedHeap(std::lock_guard<Mutex>&)
|
||||
IsoSharedHeap(const LockHolder&)
|
||||
{
|
||||
}
|
||||
|
||||
@ -62,7 +62,7 @@ public:
|
||||
|
||||
private:
|
||||
template<unsigned>
|
||||
void* allocateSlow(bool abortOnFailure);
|
||||
void* allocateSlow(const LockHolder&, bool abortOnFailure);
|
||||
|
||||
IsoSharedPage* m_currentPage { nullptr };
|
||||
VariadicBumpAllocator m_allocator;
|
||||
|
@ -51,16 +51,16 @@ inline constexpr unsigned computeObjectSizeForSharedCell(unsigned objectSize)
|
||||
template<unsigned passedObjectSize>
|
||||
void* IsoSharedHeap::allocateNew(bool abortOnFailure)
|
||||
{
|
||||
std::lock_guard<Mutex> locker(mutex());
|
||||
LockHolder locker(mutex());
|
||||
constexpr unsigned objectSize = computeObjectSizeForSharedCell(passedObjectSize);
|
||||
return m_allocator.template allocate<objectSize>(
|
||||
[&] () -> void* {
|
||||
return allocateSlow<passedObjectSize>(abortOnFailure);
|
||||
return allocateSlow<passedObjectSize>(locker, abortOnFailure);
|
||||
});
|
||||
}
|
||||
|
||||
template<unsigned passedObjectSize>
|
||||
BNO_INLINE void* IsoSharedHeap::allocateSlow(bool abortOnFailure)
|
||||
BNO_INLINE void* IsoSharedHeap::allocateSlow(const LockHolder& locker, bool abortOnFailure)
|
||||
{
|
||||
Scavenger& scavenger = *Scavenger::get();
|
||||
scavenger.didStartGrowing();
|
||||
@ -73,10 +73,10 @@ BNO_INLINE void* IsoSharedHeap::allocateSlow(bool abortOnFailure)
|
||||
}
|
||||
|
||||
if (m_currentPage)
|
||||
m_currentPage->stopAllocating();
|
||||
m_currentPage->stopAllocating(locker);
|
||||
|
||||
m_currentPage = page;
|
||||
m_allocator = m_currentPage->startAllocating();
|
||||
m_allocator = m_currentPage->startAllocating(locker);
|
||||
|
||||
constexpr unsigned objectSize = computeObjectSizeForSharedCell(passedObjectSize);
|
||||
return m_allocator.allocate<objectSize>([] () { BCRASH(); return nullptr; });
|
||||
|
@ -38,9 +38,9 @@ public:
|
||||
BEXPORT static IsoSharedPage* tryCreate();
|
||||
|
||||
template<typename Config, typename Type>
|
||||
void free(const std::lock_guard<Mutex>&, api::IsoHeap<Type>&, void*);
|
||||
VariadicBumpAllocator startAllocating();
|
||||
void stopAllocating();
|
||||
void free(const LockHolder&, api::IsoHeap<Type>&, void*);
|
||||
VariadicBumpAllocator startAllocating(const LockHolder&);
|
||||
void stopAllocating(const LockHolder&);
|
||||
|
||||
private:
|
||||
IsoSharedPage()
|
||||
|
@ -35,18 +35,18 @@ namespace bmalloc {
|
||||
// This is because empty IsoSharedPage is still split into various different objects that should keep some part of virtual memory region dedicated.
|
||||
// We cannot set up bump allocation for such a page. Not freeing IsoSharedPages are OK since IsoSharedPage is only used for the lower tier of IsoHeap.
|
||||
template<typename Config, typename Type>
|
||||
void IsoSharedPage::free(const std::lock_guard<Mutex>&, api::IsoHeap<Type>& handle, void* ptr)
|
||||
void IsoSharedPage::free(const LockHolder&, api::IsoHeap<Type>& handle, void* ptr)
|
||||
{
|
||||
auto& heapImpl = handle.impl();
|
||||
uint8_t index = *indexSlotFor<Config>(ptr) & IsoHeapImplBase::maxAllocationFromSharedMask;
|
||||
// IsoDeallocator::deallocate is called from delete operator. This is dispatched by vtable if virtual destructor exists.
|
||||
// If vptr is replaced to the other vptr, we may accidentally chain this pointer to the incorrect HeapImplBase, which totally breaks the IsoHeap's goal.
|
||||
// To harden that, we validate that this pointer is actually allocated for a specific HeapImplBase here by checking whether this pointer is listed in HeapImplBase's shared cells.
|
||||
RELEASE_BASSERT(heapImpl.m_sharedCells[index] == ptr);
|
||||
RELEASE_BASSERT(heapImpl.m_sharedCells[index].get() == ptr);
|
||||
heapImpl.m_availableShared |= (1U << index);
|
||||
}
|
||||
|
||||
inline VariadicBumpAllocator IsoSharedPage::startAllocating()
|
||||
inline VariadicBumpAllocator IsoSharedPage::startAllocating(const LockHolder&)
|
||||
{
|
||||
static constexpr bool verbose = false;
|
||||
|
||||
@ -61,7 +61,7 @@ inline VariadicBumpAllocator IsoSharedPage::startAllocating()
|
||||
return VariadicBumpAllocator(payloadEnd, remaining);
|
||||
}
|
||||
|
||||
inline void IsoSharedPage::stopAllocating()
|
||||
inline void IsoSharedPage::stopAllocating(const LockHolder&)
|
||||
{
|
||||
static constexpr bool verbose = false;
|
||||
|
||||
|
@ -26,7 +26,6 @@
|
||||
#include "IsoTLS.h"
|
||||
|
||||
#include "Environment.h"
|
||||
#include "Gigacage.h"
|
||||
#include "IsoTLSEntryInlines.h"
|
||||
#include "IsoTLSInlines.h"
|
||||
#include "IsoTLSLayout.h"
|
||||
@ -184,21 +183,16 @@ void IsoTLS::determineMallocFallbackState()
|
||||
if (s_mallocFallbackState != MallocFallbackState::Undecided)
|
||||
return;
|
||||
|
||||
#if GIGACAGE_ENABLED || BCPU(ARM64)
|
||||
#if !BCPU(ARM64)
|
||||
if (!Gigacage::shouldBeEnabled()) {
|
||||
if (Environment::get()->isDebugHeapEnabled()) {
|
||||
s_mallocFallbackState = MallocFallbackState::FallBackToMalloc;
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
||||
const char* env = getenv("bmalloc_IsoHeap");
|
||||
if (env && (!strcasecmp(env, "false") || !strcasecmp(env, "no") || !strcmp(env, "0")))
|
||||
s_mallocFallbackState = MallocFallbackState::FallBackToMalloc;
|
||||
else
|
||||
s_mallocFallbackState = MallocFallbackState::DoNotFallBack;
|
||||
#else
|
||||
s_mallocFallbackState = MallocFallbackState::FallBackToMalloc;
|
||||
#endif
|
||||
});
|
||||
}
|
||||
|
||||
|
@ -49,7 +49,7 @@ public:
|
||||
template<typename Type>
|
||||
static void ensureHeap(api::IsoHeap<Type>&);
|
||||
|
||||
static void scavenge();
|
||||
BEXPORT static void scavenge();
|
||||
|
||||
template<typename Type>
|
||||
static void scavenge(api::IsoHeap<Type>&);
|
||||
@ -60,8 +60,8 @@ private:
|
||||
template<typename Config, typename Type>
|
||||
static void* allocateImpl(api::IsoHeap<Type>&, bool abortOnFailure);
|
||||
|
||||
template<typename Config>
|
||||
void* allocateFast(unsigned offset, bool abortOnFailure);
|
||||
template<typename Config, typename Type>
|
||||
void* allocateFast(api::IsoHeap<Type>&, unsigned offset, bool abortOnFailure);
|
||||
|
||||
template<typename Config, typename Type>
|
||||
static void* allocateSlow(api::IsoHeap<Type>&, bool abortOnFailure);
|
||||
@ -109,7 +109,7 @@ private:
|
||||
char m_data[1];
|
||||
|
||||
#if HAVE_PTHREAD_MACHDEP_H
|
||||
static const pthread_key_t tlsKey = __PTK_FRAMEWORK_JAVASCRIPTCORE_KEY1;
|
||||
static constexpr pthread_key_t tlsKey = __PTK_FRAMEWORK_JAVASCRIPTCORE_KEY1;
|
||||
#else
|
||||
BEXPORT static bool s_didInitialize;
|
||||
BEXPORT static pthread_key_t s_tlsKey;
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (C) 2017 Apple Inc. All rights reserved.
|
||||
* Copyright (C) 2017-2019 Apple Inc. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
@ -33,13 +33,17 @@ namespace bmalloc {
|
||||
template<typename Config> class IsoHeapImpl;
|
||||
|
||||
template<typename Config>
|
||||
class IsoTLSAllocatorEntry : public DefaultIsoTLSEntry<IsoAllocator<Config>> {
|
||||
class IsoTLSAllocatorEntry final : public DefaultIsoTLSEntry<IsoAllocator<Config>> {
|
||||
public:
|
||||
IsoTLSAllocatorEntry(IsoHeapImpl<Config>&);
|
||||
template<typename T> friend class IsoTLSEntryHolder;
|
||||
~IsoTLSAllocatorEntry();
|
||||
|
||||
private:
|
||||
IsoTLSAllocatorEntry(IsoHeapImpl<Config>&);
|
||||
|
||||
void construct(void* dst) override;
|
||||
|
||||
void scavenge(void* entry) override;
|
||||
|
||||
IsoHeapImpl<Config>& m_heap;
|
||||
};
|
||||
|
@ -46,5 +46,11 @@ void IsoTLSAllocatorEntry<Config>::construct(void* dst)
|
||||
new (dst) IsoAllocator<Config>(m_heap);
|
||||
}
|
||||
|
||||
template<typename Config>
|
||||
void IsoTLSAllocatorEntry<Config>::scavenge(void* entry)
|
||||
{
|
||||
static_cast<IsoAllocator<Config>*>(entry)->scavenge(m_heap);
|
||||
}
|
||||
|
||||
} // namespace bmalloc
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (C) 2017 Apple Inc. All rights reserved.
|
||||
* Copyright (C) 2017-2019 Apple Inc. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
@ -33,9 +33,9 @@
|
||||
namespace bmalloc {
|
||||
|
||||
template<typename Config>
|
||||
class IsoTLSDeallocatorEntry : public DefaultIsoTLSEntry<IsoDeallocator<Config>> {
|
||||
class IsoTLSDeallocatorEntry final : public DefaultIsoTLSEntry<IsoDeallocator<Config>> {
|
||||
public:
|
||||
IsoTLSDeallocatorEntry(const std::lock_guard<Mutex>&);
|
||||
template<typename T> friend class IsoTLSEntryHolder;
|
||||
~IsoTLSDeallocatorEntry();
|
||||
|
||||
// This is used as the heap lock, since heaps in the same size class share the same deallocator
|
||||
@ -43,7 +43,10 @@ public:
|
||||
Mutex lock;
|
||||
|
||||
private:
|
||||
IsoTLSDeallocatorEntry(const LockHolder&);
|
||||
|
||||
void construct(void* entry) override;
|
||||
void scavenge(void* entry) override;
|
||||
};
|
||||
|
||||
} // namespace bmalloc
|
||||
|
@ -28,7 +28,7 @@
|
||||
namespace bmalloc {
|
||||
|
||||
template<typename Config>
|
||||
IsoTLSDeallocatorEntry<Config>::IsoTLSDeallocatorEntry(const std::lock_guard<Mutex>&)
|
||||
IsoTLSDeallocatorEntry<Config>::IsoTLSDeallocatorEntry(const LockHolder&)
|
||||
{
|
||||
}
|
||||
|
||||
@ -43,5 +43,11 @@ void IsoTLSDeallocatorEntry<Config>::construct(void* entry)
|
||||
new (entry) IsoDeallocator<Config>(lock);
|
||||
}
|
||||
|
||||
template<typename Config>
|
||||
void IsoTLSDeallocatorEntry<Config>::scavenge(void* entry)
|
||||
{
|
||||
static_cast<IsoDeallocator<Config>*>(entry)->scavenge();
|
||||
}
|
||||
|
||||
} // namespace bmalloc
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (C) 2017 Apple Inc. All rights reserved.
|
||||
* Copyright (C) 2017-2019 Apple Inc. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
@ -26,19 +26,14 @@
|
||||
#include "IsoTLSEntry.h"
|
||||
|
||||
#include "BAssert.h"
|
||||
#include "IsoTLSLayout.h"
|
||||
#include "PerProcess.h"
|
||||
#include <climits>
|
||||
|
||||
namespace bmalloc {
|
||||
|
||||
IsoTLSEntry::IsoTLSEntry(size_t alignment, size_t size)
|
||||
: m_offset(UINT_MAX)
|
||||
, m_alignment(alignment)
|
||||
, m_size(size)
|
||||
IsoTLSEntry::IsoTLSEntry(size_t size)
|
||||
: m_size(size)
|
||||
{
|
||||
IsoTLSLayout::get()->add(this);
|
||||
RELEASE_BASSERT(m_offset != UINT_MAX);
|
||||
}
|
||||
|
||||
IsoTLSEntry::~IsoTLSEntry()
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (C) 2017 Apple Inc. All rights reserved.
|
||||
* Copyright (C) 2017-2019 Apple Inc. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
@ -25,21 +25,47 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "BAssert.h"
|
||||
#include "BMalloced.h"
|
||||
#include "IsoTLSLayout.h"
|
||||
#include <climits>
|
||||
|
||||
namespace bmalloc {
|
||||
|
||||
class IsoTLS;
|
||||
class IsoTLSLayout;
|
||||
|
||||
template<typename Entry>
|
||||
class IsoTLSEntryHolder {
|
||||
MAKE_BMALLOCED;
|
||||
IsoTLSEntryHolder(const IsoTLSEntryHolder&) = delete;
|
||||
IsoTLSEntryHolder& operator=(const IsoTLSEntryHolder&) = delete;
|
||||
public:
|
||||
template<typename... Args>
|
||||
IsoTLSEntryHolder(Args&&... args)
|
||||
: m_entry(std::forward<Args>(args)...)
|
||||
{
|
||||
IsoTLSLayout::get()->add(&m_entry);
|
||||
RELEASE_BASSERT(m_entry.offset() != UINT_MAX);
|
||||
}
|
||||
|
||||
inline const Entry& operator*() const { m_entry; }
|
||||
inline Entry& operator*() { m_entry; }
|
||||
inline const Entry* operator->() const { return &m_entry; }
|
||||
inline Entry* operator->() { return &m_entry; }
|
||||
|
||||
private:
|
||||
Entry m_entry;
|
||||
};
|
||||
|
||||
class BEXPORT IsoTLSEntry {
|
||||
MAKE_BMALLOCED;
|
||||
IsoTLSEntry(const IsoTLSEntry&) = delete;
|
||||
IsoTLSEntry& operator=(const IsoTLSEntry&) = delete;
|
||||
public:
|
||||
IsoTLSEntry(size_t alignment, size_t size);
|
||||
virtual ~IsoTLSEntry();
|
||||
|
||||
size_t offset() const { return m_offset; }
|
||||
size_t alignment() const { return m_alignment; }
|
||||
size_t alignment() const { return sizeof(void*); }
|
||||
size_t size() const { return m_size; }
|
||||
size_t extent() const { return m_offset + m_size; }
|
||||
|
||||
@ -50,6 +76,9 @@ public:
|
||||
|
||||
template<typename Func>
|
||||
void walkUpToInclusive(IsoTLSEntry*, const Func&);
|
||||
|
||||
protected:
|
||||
IsoTLSEntry(size_t size);
|
||||
|
||||
private:
|
||||
friend class IsoTLS;
|
||||
@ -57,18 +86,18 @@ private:
|
||||
|
||||
IsoTLSEntry* m_next { nullptr };
|
||||
|
||||
size_t m_offset; // Computed in constructor.
|
||||
size_t m_alignment;
|
||||
size_t m_size;
|
||||
unsigned m_offset { UINT_MAX }; // Computed in constructor.
|
||||
unsigned m_size;
|
||||
};
|
||||
|
||||
template<typename EntryType>
|
||||
class DefaultIsoTLSEntry : public IsoTLSEntry {
|
||||
public:
|
||||
DefaultIsoTLSEntry();
|
||||
~DefaultIsoTLSEntry();
|
||||
~DefaultIsoTLSEntry() = default;
|
||||
|
||||
protected:
|
||||
DefaultIsoTLSEntry();
|
||||
|
||||
// This clones src onto dst and then destructs src. Therefore, entry destructors cannot do
|
||||
// scavenging.
|
||||
void move(void* src, void* dst) override;
|
||||
@ -76,8 +105,6 @@ protected:
|
||||
// Likewise, this is separate from scavenging. When the TLS is shutting down, we will be asked to
|
||||
// scavenge and then we will be asked to destruct.
|
||||
void destruct(void* entry) override;
|
||||
|
||||
void scavenge(void* entry) override;
|
||||
};
|
||||
|
||||
} // namespace bmalloc
|
||||
|
@ -43,13 +43,10 @@ void IsoTLSEntry::walkUpToInclusive(IsoTLSEntry* last, const Func& func)
|
||||
|
||||
template<typename EntryType>
|
||||
DefaultIsoTLSEntry<EntryType>::DefaultIsoTLSEntry()
|
||||
: IsoTLSEntry(alignof(EntryType), sizeof(EntryType))
|
||||
{
|
||||
}
|
||||
|
||||
template<typename EntryType>
|
||||
DefaultIsoTLSEntry<EntryType>::~DefaultIsoTLSEntry()
|
||||
: IsoTLSEntry(sizeof(EntryType))
|
||||
{
|
||||
static_assert(sizeof(EntryType) <= UINT32_MAX);
|
||||
static_assert(sizeof(void*) == alignof(EntryType), "Because IsoTLSEntry includes vtable, it should be the same to the pointer");
|
||||
}
|
||||
|
||||
template<typename EntryType>
|
||||
@ -67,12 +64,5 @@ void DefaultIsoTLSEntry<EntryType>::destruct(void* passedEntry)
|
||||
entry->~EntryType();
|
||||
}
|
||||
|
||||
template<typename EntryType>
|
||||
void DefaultIsoTLSEntry<EntryType>::scavenge(void* passedEntry)
|
||||
{
|
||||
EntryType* entry = static_cast<EntryType*>(passedEntry);
|
||||
entry->scavenge();
|
||||
}
|
||||
|
||||
} // namespace bmalloc
|
||||
|
||||
|
@ -30,6 +30,10 @@
|
||||
#include "IsoTLS.h"
|
||||
#include "bmalloc.h"
|
||||
|
||||
#if BOS(DARWIN)
|
||||
#include <malloc/malloc.h>
|
||||
#endif
|
||||
|
||||
namespace bmalloc {
|
||||
|
||||
template<typename Type>
|
||||
@ -56,7 +60,7 @@ void IsoTLS::scavenge(api::IsoHeap<Type>& handle)
|
||||
return;
|
||||
unsigned offset = handle.allocatorOffset();
|
||||
if (offset < tls->m_extent)
|
||||
reinterpret_cast<IsoAllocator<typename api::IsoHeap<Type>::Config>*>(tls->m_data + offset)->scavenge();
|
||||
reinterpret_cast<IsoAllocator<typename api::IsoHeap<Type>::Config>*>(tls->m_data + offset)->scavenge(handle.impl());
|
||||
offset = handle.deallocatorOffset();
|
||||
if (offset < tls->m_extent)
|
||||
reinterpret_cast<IsoDeallocator<typename api::IsoHeap<Type>::Config>*>(tls->m_data + offset)->scavenge();
|
||||
@ -70,13 +74,13 @@ void* IsoTLS::allocateImpl(api::IsoHeap<Type>& handle, bool abortOnFailure)
|
||||
IsoTLS* tls = get();
|
||||
if (!tls || offset >= tls->m_extent)
|
||||
return allocateSlow<Config>(handle, abortOnFailure);
|
||||
return tls->allocateFast<Config>(offset, abortOnFailure);
|
||||
return tls->allocateFast<Config>(handle, offset, abortOnFailure);
|
||||
}
|
||||
|
||||
template<typename Config>
|
||||
void* IsoTLS::allocateFast(unsigned offset, bool abortOnFailure)
|
||||
template<typename Config, typename Type>
|
||||
void* IsoTLS::allocateFast(api::IsoHeap<Type>& handle, unsigned offset, bool abortOnFailure)
|
||||
{
|
||||
return reinterpret_cast<IsoAllocator<Config>*>(m_data + offset)->allocate(abortOnFailure);
|
||||
return reinterpret_cast<IsoAllocator<Config>*>(m_data + offset)->allocate(handle.impl(), abortOnFailure);
|
||||
}
|
||||
|
||||
template<typename Config, typename Type>
|
||||
@ -88,7 +92,11 @@ BNO_INLINE void* IsoTLS::allocateSlow(api::IsoHeap<Type>& handle, bool abortOnFa
|
||||
determineMallocFallbackState();
|
||||
continue;
|
||||
case MallocFallbackState::FallBackToMalloc:
|
||||
#if BENABLE_MALLOC_HEAP_BREAKDOWN
|
||||
return malloc_zone_malloc(handle.m_zone, Config::objectSize);
|
||||
#else
|
||||
return api::tryMalloc(Config::objectSize);
|
||||
#endif
|
||||
case MallocFallbackState::DoNotFallBack:
|
||||
break;
|
||||
}
|
||||
@ -100,7 +108,7 @@ BNO_INLINE void* IsoTLS::allocateSlow(api::IsoHeap<Type>& handle, bool abortOnFa
|
||||
|
||||
IsoTLS* tls = ensureHeapAndEntries(handle);
|
||||
|
||||
return tls->allocateFast<Config>(handle.allocatorOffset(), abortOnFailure);
|
||||
return tls->allocateFast<Config>(handle, handle.allocatorOffset(), abortOnFailure);
|
||||
}
|
||||
|
||||
template<typename Config, typename Type>
|
||||
@ -131,7 +139,11 @@ BNO_INLINE void IsoTLS::deallocateSlow(api::IsoHeap<Type>& handle, void* p)
|
||||
determineMallocFallbackState();
|
||||
continue;
|
||||
case MallocFallbackState::FallBackToMalloc:
|
||||
#if BENABLE_MALLOC_HEAP_BREAKDOWN
|
||||
return malloc_zone_free(handle.m_zone, p);
|
||||
#else
|
||||
return api::free(p);
|
||||
#endif
|
||||
case MallocFallbackState::DoNotFallBack:
|
||||
break;
|
||||
}
|
||||
@ -172,7 +184,7 @@ template<typename Type>
|
||||
void IsoTLS::ensureHeap(api::IsoHeap<Type>& handle)
|
||||
{
|
||||
if (!handle.isInitialized()) {
|
||||
std::lock_guard<Mutex> locker(handle.m_initializationLock);
|
||||
LockHolder locker(handle.m_initializationLock);
|
||||
if (!handle.isInitialized())
|
||||
handle.initialize();
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (C) 2017 Apple Inc. All rights reserved.
|
||||
* Copyright (C) 2017-2019 Apple Inc. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
@ -31,7 +31,7 @@ namespace bmalloc {
|
||||
|
||||
DEFINE_STATIC_PER_PROCESS_STORAGE(IsoTLSLayout);
|
||||
|
||||
IsoTLSLayout::IsoTLSLayout(const std::lock_guard<Mutex>&)
|
||||
IsoTLSLayout::IsoTLSLayout(const LockHolder&)
|
||||
{
|
||||
}
|
||||
|
||||
@ -39,15 +39,21 @@ void IsoTLSLayout::add(IsoTLSEntry* entry)
|
||||
{
|
||||
static Mutex addingMutex;
|
||||
RELEASE_BASSERT(!entry->m_next);
|
||||
std::lock_guard<Mutex> locking(addingMutex);
|
||||
// IsoTLSLayout::head() does not take a lock. So we should emit memory fence to make sure that newly added entry is initialized when it is chained to this linked-list.
|
||||
// Emitting memory fence here is OK since this function is not frequently called.
|
||||
LockHolder locking(addingMutex);
|
||||
if (m_head) {
|
||||
RELEASE_BASSERT(m_tail);
|
||||
entry->m_offset = roundUpToMultipleOf(entry->alignment(), m_tail->extent());
|
||||
size_t offset = roundUpToMultipleOf(entry->alignment(), m_tail->extent());
|
||||
RELEASE_BASSERT(offset < UINT_MAX);
|
||||
entry->m_offset = offset;
|
||||
std::atomic_thread_fence(std::memory_order_seq_cst);
|
||||
m_tail->m_next = entry;
|
||||
m_tail = entry;
|
||||
} else {
|
||||
RELEASE_BASSERT(!m_tail);
|
||||
entry->m_offset = 0;
|
||||
std::atomic_thread_fence(std::memory_order_seq_cst);
|
||||
m_head = entry;
|
||||
m_tail = entry;
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (C) 2017 Apple Inc. All rights reserved.
|
||||
* Copyright (C) 2017-2019 Apple Inc. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
@ -35,9 +35,9 @@ class IsoTLSEntry;
|
||||
|
||||
class IsoTLSLayout : public StaticPerProcess<IsoTLSLayout> {
|
||||
public:
|
||||
IsoTLSLayout(const std::lock_guard<Mutex>&);
|
||||
BEXPORT IsoTLSLayout(const LockHolder&);
|
||||
|
||||
void add(IsoTLSEntry*);
|
||||
BEXPORT void add(IsoTLSEntry*);
|
||||
|
||||
IsoTLSEntry* head() const { return m_head; }
|
||||
|
||||
|
@ -76,7 +76,7 @@ void LargeMap::add(const LargeRange& range)
|
||||
merged = merge(merged, m_free.pop(i--));
|
||||
}
|
||||
|
||||
#if !BPLATFORM(MAC)
|
||||
#if !BUSE(PARTIAL_SCAVENGE)
|
||||
merged.setUsedSinceLastScavenge();
|
||||
#endif
|
||||
m_free.push(merged);
|
||||
|
@ -37,7 +37,7 @@ public:
|
||||
: Range()
|
||||
, m_startPhysicalSize(0)
|
||||
, m_totalPhysicalSize(0)
|
||||
#if !BPLATFORM(MAC)
|
||||
#if !BUSE(PARTIAL_SCAVENGE)
|
||||
, m_isEligible(true)
|
||||
, m_usedSinceLastScavenge(false)
|
||||
#endif
|
||||
@ -48,7 +48,7 @@ public:
|
||||
: Range(other)
|
||||
, m_startPhysicalSize(startPhysicalSize)
|
||||
, m_totalPhysicalSize(totalPhysicalSize)
|
||||
#if !BPLATFORM(MAC)
|
||||
#if !BUSE(PARTIAL_SCAVENGE)
|
||||
, m_isEligible(true)
|
||||
, m_usedSinceLastScavenge(false)
|
||||
#endif
|
||||
@ -57,7 +57,7 @@ public:
|
||||
BASSERT(this->totalPhysicalSize() >= this->startPhysicalSize());
|
||||
}
|
||||
|
||||
#if BPLATFORM(MAC)
|
||||
#if BUSE(PARTIAL_SCAVENGE)
|
||||
LargeRange(void* begin, size_t size, size_t startPhysicalSize, size_t totalPhysicalSize)
|
||||
: Range(begin, size)
|
||||
, m_startPhysicalSize(startPhysicalSize)
|
||||
@ -104,7 +104,7 @@ public:
|
||||
void setEligible(bool eligible) { m_isEligible = eligible; }
|
||||
bool isEligibile() const { return m_isEligible; }
|
||||
|
||||
#if !BPLATFORM(MAC)
|
||||
#if !BUSE(PARTIAL_SCAVENGE)
|
||||
bool usedSinceLastScavenge() const { return m_usedSinceLastScavenge; }
|
||||
void clearUsedSinceLastScavenge() { m_usedSinceLastScavenge = false; }
|
||||
void setUsedSinceLastScavenge() { m_usedSinceLastScavenge = true; }
|
||||
@ -116,7 +116,7 @@ public:
|
||||
private:
|
||||
size_t m_startPhysicalSize;
|
||||
size_t m_totalPhysicalSize;
|
||||
#if BPLATFORM(MAC)
|
||||
#if BUSE(PARTIAL_SCAVENGE)
|
||||
bool m_isEligible { true };
|
||||
#else
|
||||
unsigned m_isEligible: 1;
|
||||
@ -144,7 +144,7 @@ inline bool canMerge(const LargeRange& a, const LargeRange& b)
|
||||
inline LargeRange merge(const LargeRange& a, const LargeRange& b)
|
||||
{
|
||||
const LargeRange& left = std::min(a, b);
|
||||
#if !BPLATFORM(MAC)
|
||||
#if !BUSE(PARTIAL_SCAVENGE)
|
||||
bool mergedUsedSinceLastScavenge = a.usedSinceLastScavenge() || b.usedSinceLastScavenge();
|
||||
#endif
|
||||
if (left.size() == left.startPhysicalSize()) {
|
||||
@ -153,7 +153,7 @@ inline LargeRange merge(const LargeRange& a, const LargeRange& b)
|
||||
a.size() + b.size(),
|
||||
a.startPhysicalSize() + b.startPhysicalSize(),
|
||||
a.totalPhysicalSize() + b.totalPhysicalSize()
|
||||
#if !BPLATFORM(MAC)
|
||||
#if !BUSE(PARTIAL_SCAVENGE)
|
||||
, mergedUsedSinceLastScavenge
|
||||
#endif
|
||||
);
|
||||
@ -165,7 +165,7 @@ inline LargeRange merge(const LargeRange& a, const LargeRange& b)
|
||||
a.size() + b.size(),
|
||||
left.startPhysicalSize(),
|
||||
a.totalPhysicalSize() + b.totalPhysicalSize()
|
||||
#if !BPLATFORM(MAC)
|
||||
#if !BUSE(PARTIAL_SCAVENGE)
|
||||
, mergedUsedSinceLastScavenge
|
||||
#endif
|
||||
);
|
||||
|
@ -23,14 +23,15 @@
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef LineMetadata_h
|
||||
#define LineMetadata_h
|
||||
#pragma once
|
||||
|
||||
#include "Sizes.h"
|
||||
|
||||
namespace bmalloc {
|
||||
|
||||
struct LineMetadata {
|
||||
unsigned char startOffset;
|
||||
unsigned char objectCount;
|
||||
unsigned char startOffset { };
|
||||
unsigned char objectCount { };
|
||||
};
|
||||
|
||||
static_assert(
|
||||
@ -42,5 +43,3 @@ static_assert(
|
||||
"maximum object count must fit in LineMetadata::objectCount");
|
||||
|
||||
} // namespace bmalloc
|
||||
|
||||
#endif // LineMetadata_h
|
||||
|
@ -31,31 +31,8 @@
|
||||
#include <stdio.h>
|
||||
#endif
|
||||
|
||||
#if BPLATFORM(IOS_FAMILY)
|
||||
#include <CoreFoundation/CoreFoundation.h>
|
||||
#include <mach/exception_types.h>
|
||||
#include <objc/objc.h>
|
||||
#include <unistd.h>
|
||||
|
||||
#include "BSoftLinking.h"
|
||||
BSOFT_LINK_PRIVATE_FRAMEWORK(CrashReporterSupport);
|
||||
BSOFT_LINK_FUNCTION(CrashReporterSupport, SimulateCrash, BOOL, (pid_t pid, mach_exception_data_type_t exceptionCode, CFStringRef description), (pid, exceptionCode, description));
|
||||
#endif
|
||||
|
||||
namespace bmalloc {
|
||||
|
||||
void logVMFailure(size_t vmSize)
|
||||
{
|
||||
#if BPLATFORM(IOS_FAMILY)
|
||||
const mach_exception_data_type_t kExceptionCode = 0xc105ca11;
|
||||
CFStringRef description = CFStringCreateWithFormat(kCFAllocatorDefault, nullptr, CFSTR("bmalloc failed to mmap %lu bytes"), vmSize);
|
||||
SimulateCrash(getpid(), kExceptionCode, description);
|
||||
CFRelease(description);
|
||||
#else
|
||||
BUNUSED_PARAM(vmSize);
|
||||
#endif
|
||||
}
|
||||
|
||||
#if !BUSE(OS_LOG)
|
||||
void reportAssertionFailureWithMessage(const char* file, int line, const char* function, const char* format, ...)
|
||||
{
|
||||
|
@ -31,8 +31,6 @@
|
||||
|
||||
namespace bmalloc {
|
||||
|
||||
BEXPORT void logVMFailure(size_t vmSize);
|
||||
|
||||
#if !BUSE(OS_LOG)
|
||||
void reportAssertionFailureWithMessage(const char* file, int line, const char* function, const char* format, ...) BATTRIBUTE_PRINTF(4, 5);
|
||||
#endif
|
||||
|
@ -80,10 +80,10 @@ public:
|
||||
}
|
||||
|
||||
private:
|
||||
static const unsigned minCapacity = 16;
|
||||
static const unsigned maxLoad = 2;
|
||||
static const unsigned rehashLoad = 4;
|
||||
static const unsigned minLoad = 8;
|
||||
static constexpr unsigned minCapacity = 16;
|
||||
static constexpr unsigned maxLoad = 2;
|
||||
static constexpr unsigned rehashLoad = 4;
|
||||
static constexpr unsigned minLoad = 8;
|
||||
|
||||
bool shouldGrow() { return m_keyCount * maxLoad >= capacity(); }
|
||||
bool shouldShrink() { return m_keyCount * minLoad <= capacity() && capacity() > minCapacity; }
|
||||
|
@ -27,16 +27,30 @@
|
||||
#include "Mutex.h"
|
||||
|
||||
#include "ScopeExit.h"
|
||||
#if BOS(DARWIN)
|
||||
#include <mach/mach_traps.h>
|
||||
#include <mach/thread_switch.h>
|
||||
#endif
|
||||
#include <thread>
|
||||
|
||||
namespace bmalloc {
|
||||
|
||||
static inline void yield()
|
||||
{
|
||||
#if BOS(DARWIN)
|
||||
constexpr mach_msg_timeout_t timeoutInMS = 1;
|
||||
thread_switch(MACH_PORT_NULL, SWITCH_OPTION_DEPRESS, timeoutInMS);
|
||||
#else
|
||||
sched_yield();
|
||||
#endif
|
||||
}
|
||||
|
||||
void Mutex::lockSlowCase()
|
||||
{
|
||||
// The longest critical section in bmalloc is much shorter than the
|
||||
// time it takes to make a system call to yield to the OS scheduler.
|
||||
// So, we try again a lot before we yield.
|
||||
static const size_t aLot = 256;
|
||||
static constexpr size_t aLot = 256;
|
||||
|
||||
if (!m_isSpinning.exchange(true)) {
|
||||
auto clear = makeScopeExit([&] { m_isSpinning.store(false); });
|
||||
@ -49,7 +63,7 @@ void Mutex::lockSlowCase()
|
||||
|
||||
// Avoid spinning pathologically.
|
||||
while (!try_lock())
|
||||
sched_yield();
|
||||
yield();
|
||||
}
|
||||
|
||||
} // namespace bmalloc
|
||||
|
@ -35,6 +35,11 @@
|
||||
|
||||
namespace bmalloc {
|
||||
|
||||
class Mutex;
|
||||
|
||||
using UniqueLockHolder = std::unique_lock<Mutex>;
|
||||
using LockHolder = std::scoped_lock<Mutex>;
|
||||
|
||||
class Mutex {
|
||||
public:
|
||||
constexpr Mutex() = default;
|
||||
@ -51,7 +56,7 @@ private:
|
||||
};
|
||||
|
||||
static inline void sleep(
|
||||
std::unique_lock<Mutex>& lock, std::chrono::milliseconds duration)
|
||||
UniqueLockHolder& lock, std::chrono::milliseconds duration)
|
||||
{
|
||||
if (duration == std::chrono::milliseconds(0))
|
||||
return;
|
||||
@ -62,7 +67,7 @@ static inline void sleep(
|
||||
}
|
||||
|
||||
static inline void waitUntilFalse(
|
||||
std::unique_lock<Mutex>& lock, std::chrono::milliseconds sleepDuration,
|
||||
UniqueLockHolder& lock, std::chrono::milliseconds sleepDuration,
|
||||
bool& condition)
|
||||
{
|
||||
while (condition) {
|
||||
|
@ -38,8 +38,7 @@ ObjectType objectType(Heap& heap, void* object)
|
||||
if (!object)
|
||||
return ObjectType::Small;
|
||||
|
||||
std::unique_lock<Mutex> lock(Heap::mutex());
|
||||
if (heap.isLarge(lock, object))
|
||||
if (heap.isLarge(object))
|
||||
return ObjectType::Large;
|
||||
}
|
||||
|
||||
|
105
bmalloc/ObjectTypeTable.cpp
Normal file
105
bmalloc/ObjectTypeTable.cpp
Normal file
@ -0,0 +1,105 @@
|
||||
/*
|
||||
* Copyright (C) 2020 Apple Inc. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
|
||||
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
|
||||
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
||||
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
|
||||
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#include "ObjectTypeTable.h"
|
||||
|
||||
#include "VMAllocate.h"
|
||||
|
||||
namespace bmalloc {
|
||||
|
||||
ObjectTypeTable::Bits sentinelBits { nullptr, 0, 0 };
|
||||
|
||||
void ObjectTypeTable::set(UniqueLockHolder&, Chunk* chunk, ObjectType objectType)
|
||||
{
|
||||
unsigned index = convertToIndex(chunk);
|
||||
Bits* bits = m_bits;
|
||||
if (!(bits->begin() <= index && index < bits->end())) {
|
||||
unsigned newBegin = 0;
|
||||
unsigned newEnd = 0;
|
||||
if (bits == &sentinelBits) {
|
||||
// This is initial allocation of ObjectTypeTable. In this case, it could be possible that for the first registration,
|
||||
// some VAs are already allocated for a different purpose, and later they will be reused for bmalloc. In that case,
|
||||
// soon, we will see a smaller index request than this initial one. We try to subtract a 128MB offset to the initial
|
||||
// newBegin to cover such patterns without extending table too quickly, and if we can't subtract 128MB, we will set
|
||||
// newBegin to 0.
|
||||
constexpr unsigned offsetForInitialAllocation = ObjectTypeTable::Bits::bitCountPerWord * 4;
|
||||
if (index < offsetForInitialAllocation)
|
||||
newBegin = 0;
|
||||
else
|
||||
newBegin = index - offsetForInitialAllocation;
|
||||
newEnd = index + 1;
|
||||
} else if (index < bits->begin()) {
|
||||
BASSERT(bits->begin());
|
||||
BASSERT(bits->end());
|
||||
// We need to verify if "bits->begin() - bits->count()" doesn't underflow,
|
||||
// otherwise we will set "newBegin" as "index" and it creates a pathological
|
||||
// case that will keep increasing BitVector everytime we access
|
||||
// "index < bits->begin()".
|
||||
if (bits->begin() < bits->count())
|
||||
newBegin = 0;
|
||||
else
|
||||
newBegin = std::min<unsigned>(index, bits->begin() - bits->count());
|
||||
newEnd = bits->end();
|
||||
} else {
|
||||
BASSERT(bits->begin());
|
||||
BASSERT(bits->end());
|
||||
newBegin = bits->begin();
|
||||
// We need to verify if "bits->end() + bits->count()" doesn't overflow,
|
||||
// otherwise we will set "newEnd" as "index + 1" and it creates a
|
||||
// pathological case that will keep increasing BitVector everytime we access
|
||||
// "index > bits->end()".
|
||||
if (std::numeric_limits<unsigned>::max() - bits->count() < bits->end())
|
||||
newEnd = std::numeric_limits<unsigned>::max();
|
||||
else
|
||||
newEnd = std::max<unsigned>(index + 1, bits->end() + bits->count());
|
||||
}
|
||||
newBegin = static_cast<unsigned>(roundDownToMultipleOf<size_t>(ObjectTypeTable::Bits::bitCountPerWord, newBegin));
|
||||
BASSERT(newEnd > newBegin);
|
||||
|
||||
unsigned count = newEnd - newBegin;
|
||||
size_t size = vmSize(sizeof(Bits) + (roundUpToMultipleOf<size_t>(ObjectTypeTable::Bits::bitCountPerWord, count) / 8));
|
||||
RELEASE_BASSERT(size <= 0x80000000U); // Too large bitvector, out-of-memory.
|
||||
size = roundUpToPowerOfTwo(size);
|
||||
newEnd = newBegin + ((size - sizeof(Bits)) / sizeof(ObjectTypeTable::Bits::WordType)) * ObjectTypeTable::Bits::bitCountPerWord;
|
||||
BASSERT(newEnd > newBegin);
|
||||
void* allocated = vmAllocate(size);
|
||||
memset(allocated, 0, size);
|
||||
auto* newBits = new (allocated) Bits(bits, newBegin, newEnd);
|
||||
|
||||
memcpy(newBits->wordForIndex(bits->begin()), bits->words(), bits->sizeInBytes());
|
||||
#if !defined(NDEBUG)
|
||||
for (unsigned index = bits->begin(); index < bits->end(); ++index)
|
||||
BASSERT(bits->get(index) == newBits->get(index));
|
||||
#endif
|
||||
std::atomic_thread_fence(std::memory_order_seq_cst); // Ensure table gets valid when it is visible to the other threads since ObjectTypeTable::get does not take a lock.
|
||||
m_bits = newBits;
|
||||
bits = newBits;
|
||||
}
|
||||
bool value = !!static_cast<std::underlying_type_t<ObjectType>>(objectType);
|
||||
BASSERT(static_cast<ObjectType>(value) == objectType);
|
||||
bits->set(index, value);
|
||||
}
|
||||
|
||||
} // namespace bmalloc
|
132
bmalloc/ObjectTypeTable.h
Normal file
132
bmalloc/ObjectTypeTable.h
Normal file
@ -0,0 +1,132 @@
|
||||
/*
|
||||
* Copyright (C) 2020 Apple Inc. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
|
||||
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
|
||||
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
||||
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
|
||||
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "Mutex.h"
|
||||
#include "ObjectType.h"
|
||||
#include "Sizes.h"
|
||||
|
||||
namespace bmalloc {
|
||||
|
||||
class Chunk;
|
||||
|
||||
// Querying ObjectType for Chunk without locking.
|
||||
class ObjectTypeTable {
|
||||
public:
|
||||
ObjectTypeTable();
|
||||
|
||||
static constexpr unsigned shiftAmount = 20;
|
||||
static_assert((1ULL << shiftAmount) == chunkSize);
|
||||
static_assert((BOS_EFFECTIVE_ADDRESS_WIDTH - shiftAmount) <= 32);
|
||||
|
||||
class Bits;
|
||||
|
||||
ObjectType get(Chunk*);
|
||||
void set(UniqueLockHolder&, Chunk*, ObjectType);
|
||||
|
||||
private:
|
||||
static unsigned convertToIndex(Chunk* chunk)
|
||||
{
|
||||
uintptr_t address = reinterpret_cast<uintptr_t>(chunk);
|
||||
BASSERT(!(address & (~chunkMask)));
|
||||
return static_cast<unsigned>(address >> shiftAmount);
|
||||
}
|
||||
|
||||
Bits* m_bits;
|
||||
};
|
||||
|
||||
class ObjectTypeTable::Bits {
|
||||
public:
|
||||
using WordType = unsigned;
|
||||
static constexpr unsigned bitCountPerWord = sizeof(WordType) * 8;
|
||||
static constexpr WordType one = 1;
|
||||
constexpr Bits(Bits* previous, unsigned begin, unsigned end)
|
||||
: m_previous(previous)
|
||||
, m_begin(begin)
|
||||
, m_end(end)
|
||||
{
|
||||
}
|
||||
|
||||
bool get(unsigned index);
|
||||
void set(unsigned index, bool);
|
||||
|
||||
Bits* previous() const { return m_previous; }
|
||||
unsigned begin() const { return m_begin; }
|
||||
unsigned end() const { return m_end; }
|
||||
unsigned count() const { return m_end - m_begin; }
|
||||
unsigned sizeInBytes() const { return count() / 8; }
|
||||
|
||||
const WordType* words() const { return const_cast<Bits*>(this)->words(); }
|
||||
WordType* words() { return reinterpret_cast<WordType*>(reinterpret_cast<uintptr_t>(this) + sizeof(Bits)); }
|
||||
|
||||
WordType* wordForIndex(unsigned);
|
||||
|
||||
private:
|
||||
Bits* m_previous { nullptr }; // Keeping the previous Bits* just to suppress Leaks warnings.
|
||||
unsigned m_begin { 0 };
|
||||
unsigned m_end { 0 };
|
||||
};
|
||||
static_assert(!(sizeof(ObjectTypeTable::Bits) % sizeof(ObjectTypeTable::Bits::WordType)));
|
||||
|
||||
extern BEXPORT ObjectTypeTable::Bits sentinelBits;
|
||||
|
||||
inline ObjectTypeTable::ObjectTypeTable()
|
||||
: m_bits(&sentinelBits)
|
||||
{
|
||||
}
|
||||
|
||||
inline ObjectType ObjectTypeTable::get(Chunk* chunk)
|
||||
{
|
||||
Bits* bits = m_bits;
|
||||
unsigned index = convertToIndex(chunk);
|
||||
BASSERT(bits);
|
||||
if (bits->begin() <= index && index < bits->end())
|
||||
return static_cast<ObjectType>(bits->get(index));
|
||||
return { };
|
||||
}
|
||||
|
||||
inline bool ObjectTypeTable::Bits::get(unsigned index)
|
||||
{
|
||||
unsigned n = index - begin();
|
||||
return words()[n / bitCountPerWord] & (one << (n % bitCountPerWord));
|
||||
}
|
||||
|
||||
inline void ObjectTypeTable::Bits::set(unsigned index, bool value)
|
||||
{
|
||||
unsigned n = index - begin();
|
||||
if (value)
|
||||
words()[n / bitCountPerWord] |= (one << (n % bitCountPerWord));
|
||||
else
|
||||
words()[n / bitCountPerWord] &= ~(one << (n % bitCountPerWord));
|
||||
}
|
||||
|
||||
inline ObjectTypeTable::Bits::WordType* ObjectTypeTable::Bits::wordForIndex(unsigned index)
|
||||
{
|
||||
unsigned n = index - begin();
|
||||
return &words()[n / bitCountPerWord];
|
||||
}
|
||||
|
||||
} // namespace bmalloc
|
235
bmalloc/Packed.h
Normal file
235
bmalloc/Packed.h
Normal file
@ -0,0 +1,235 @@
|
||||
/*
|
||||
* Copyright (C) 2019-2020 Apple Inc. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
|
||||
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
|
||||
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
||||
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
|
||||
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "Algorithm.h"
|
||||
#include "StdLibExtras.h"
|
||||
#include <array>
|
||||
|
||||
namespace bmalloc {
|
||||
|
||||
template<typename T>
|
||||
class Packed {
|
||||
public:
|
||||
static_assert(std::is_trivial<T>::value);
|
||||
static constexpr bool isPackedType = true;
|
||||
|
||||
Packed()
|
||||
: Packed(T { })
|
||||
{
|
||||
}
|
||||
|
||||
Packed(const T& value)
|
||||
{
|
||||
memcpy(m_storage.data(), &value, sizeof(T));
|
||||
}
|
||||
|
||||
T get() const
|
||||
{
|
||||
T value { };
|
||||
memcpy(&value, m_storage.data(), sizeof(T));
|
||||
return value;
|
||||
}
|
||||
|
||||
void set(const T& value)
|
||||
{
|
||||
memcpy(m_storage.data(), &value, sizeof(T));
|
||||
}
|
||||
|
||||
Packed<T>& operator=(const T& value)
|
||||
{
|
||||
set(value);
|
||||
return *this;
|
||||
}
|
||||
|
||||
template<class U>
|
||||
T exchange(U&& newValue)
|
||||
{
|
||||
T oldValue = get();
|
||||
set(std::forward<U>(newValue));
|
||||
return oldValue;
|
||||
}
|
||||
|
||||
void swap(Packed& other)
|
||||
{
|
||||
m_storage.swap(other.m_storage);
|
||||
}
|
||||
|
||||
template<typename Other, typename = std::enable_if_t<Other::isPackedType>>
|
||||
void swap(Other& other)
|
||||
{
|
||||
T t1 = get();
|
||||
T t2 = other.get();
|
||||
set(t2);
|
||||
other.set(t1);
|
||||
}
|
||||
|
||||
void swap(T& t2)
|
||||
{
|
||||
T t1 = get();
|
||||
std::swap(t1, t2);
|
||||
set(t1);
|
||||
}
|
||||
|
||||
private:
|
||||
std::array<uint8_t, sizeof(T)> m_storage;
|
||||
};
|
||||
|
||||
// PackedAlignedPtr can take alignment parameter too. PackedAlignedPtr only uses this alignment information if it is profitable: we use
|
||||
// alignment information only when we can reduce the size of the storage.
|
||||
template<typename T, size_t alignment = alignof(T)>
|
||||
class PackedAlignedPtr {
|
||||
public:
|
||||
static_assert(isPowerOfTwo(alignment), "Alignment needs to be power-of-two");
|
||||
static constexpr bool isPackedType = true;
|
||||
static constexpr unsigned alignmentShiftSizeIfProfitable = getLSBSetNonZeroConstexpr(alignment);
|
||||
static constexpr unsigned storageSizeWithoutAlignmentShift = roundUpToMultipleOf<8, uintptr_t>(BOS_EFFECTIVE_ADDRESS_WIDTH) / 8;
|
||||
static constexpr unsigned storageSizeWithAlignmentShift = roundUpToMultipleOf<8, uintptr_t>(BOS_EFFECTIVE_ADDRESS_WIDTH - alignmentShiftSizeIfProfitable) / 8;
|
||||
static constexpr bool isAlignmentShiftProfitable = storageSizeWithoutAlignmentShift > storageSizeWithAlignmentShift;
|
||||
static constexpr unsigned alignmentShiftSize = isAlignmentShiftProfitable ? alignmentShiftSizeIfProfitable : 0;
|
||||
static constexpr unsigned storageSize = storageSizeWithAlignmentShift;
|
||||
|
||||
constexpr PackedAlignedPtr()
|
||||
: m_storage()
|
||||
{
|
||||
}
|
||||
|
||||
constexpr PackedAlignedPtr(std::nullptr_t)
|
||||
: m_storage()
|
||||
{
|
||||
}
|
||||
|
||||
PackedAlignedPtr(T* value)
|
||||
{
|
||||
set(value);
|
||||
}
|
||||
|
||||
T* get() const
|
||||
{
|
||||
// FIXME: PackedPtr<> can load memory with one mov by checking page boundary.
|
||||
// https://bugs.webkit.org/show_bug.cgi?id=197754
|
||||
uintptr_t value = 0;
|
||||
#if BCPU(LITTLE_ENDIAN)
|
||||
memcpy(&value, m_storage.data(), storageSize);
|
||||
#else
|
||||
memcpy(bitwise_cast<uint8_t*>(&value) + (sizeof(void*) - storageSize), m_storage.data(), storageSize);
|
||||
#endif
|
||||
if (isAlignmentShiftProfitable)
|
||||
value <<= alignmentShiftSize;
|
||||
return bitwise_cast<T*>(value);
|
||||
}
|
||||
|
||||
void set(T* passedValue)
|
||||
{
|
||||
uintptr_t value = bitwise_cast<uintptr_t>(passedValue);
|
||||
if (isAlignmentShiftProfitable)
|
||||
value >>= alignmentShiftSize;
|
||||
#if BCPU(LITTLE_ENDIAN)
|
||||
memcpy(m_storage.data(), &value, storageSize);
|
||||
#else
|
||||
memcpy(m_storage.data(), bitwise_cast<uint8_t*>(&value) + (sizeof(void*) - storageSize), storageSize);
|
||||
#endif
|
||||
}
|
||||
|
||||
void clear()
|
||||
{
|
||||
set(nullptr);
|
||||
}
|
||||
|
||||
T* operator->() const { return get(); }
|
||||
T& operator*() const { return *get(); }
|
||||
bool operator!() const { return !get(); }
|
||||
|
||||
// This conversion operator allows implicit conversion to bool but not to other integer types.
|
||||
typedef T* (PackedAlignedPtr::*UnspecifiedBoolType);
|
||||
operator UnspecifiedBoolType() const { return get() ? &PackedAlignedPtr::m_storage : nullptr; }
|
||||
explicit operator bool() const { return get(); }
|
||||
|
||||
PackedAlignedPtr& operator=(T* value)
|
||||
{
|
||||
set(value);
|
||||
return *this;
|
||||
}
|
||||
|
||||
template<class U>
|
||||
T* exchange(U&& newValue)
|
||||
{
|
||||
T* oldValue = get();
|
||||
set(std::forward<U>(newValue));
|
||||
return oldValue;
|
||||
}
|
||||
|
||||
void swap(std::nullptr_t) { clear(); }
|
||||
|
||||
void swap(PackedAlignedPtr& other)
|
||||
{
|
||||
m_storage.swap(other.m_storage);
|
||||
}
|
||||
|
||||
template<typename Other, typename = std::enable_if_t<Other::isPackedType>>
|
||||
void swap(Other& other)
|
||||
{
|
||||
T* t1 = get();
|
||||
T* t2 = other.get();
|
||||
set(t2);
|
||||
other.set(t1);
|
||||
}
|
||||
|
||||
void swap(T* t2)
|
||||
{
|
||||
T* t1 = get();
|
||||
std::swap(t1, t2);
|
||||
set(t1);
|
||||
}
|
||||
|
||||
private:
|
||||
std::array<uint8_t, storageSize> m_storage;
|
||||
};
|
||||
|
||||
template<typename T>
|
||||
class Packed<T*> : public PackedAlignedPtr<T, 1> {
|
||||
public:
|
||||
using Base = PackedAlignedPtr<T, 1>;
|
||||
using Base::Base;
|
||||
};
|
||||
|
||||
template<typename T>
|
||||
using PackedPtr = Packed<T*>;
|
||||
|
||||
template<typename T>
|
||||
struct PackedPtrTraits {
|
||||
template<typename U> using RebindTraits = PackedPtrTraits<U>;
|
||||
|
||||
using StorageType = PackedPtr<T>;
|
||||
|
||||
template<class U> static T* exchange(StorageType& ptr, U&& newValue) { return ptr.exchange(newValue); }
|
||||
|
||||
template<typename Other> static void swap(PackedPtr<T>& a, Other& b) { a.swap(b); }
|
||||
|
||||
static T* unwrap(const StorageType& ptr) { return ptr.get(); }
|
||||
};
|
||||
|
||||
} // namespace bmalloc
|
@ -60,7 +60,7 @@ static void* allocate(size_t size, size_t alignment)
|
||||
|
||||
PerProcessData* getPerProcessData(unsigned hash, const char* disambiguator, size_t size, size_t alignment)
|
||||
{
|
||||
std::lock_guard<Mutex> lock(s_mutex);
|
||||
LockHolder lock(s_mutex);
|
||||
|
||||
PerProcessData*& bucket = s_table[hash % tableSize];
|
||||
|
||||
|
@ -46,7 +46,7 @@ namespace bmalloc {
|
||||
// x = object->m_field; // OK
|
||||
// if (globalFlag) { ... } // Undefined behavior.
|
||||
//
|
||||
// std::lock_guard<Mutex> lock(PerProcess<Object>::mutex());
|
||||
// LockHolder lock(PerProcess<Object>::mutex());
|
||||
// Object* object = PerProcess<Object>::get(lock);
|
||||
// if (globalFlag) { ... } // OK.
|
||||
|
||||
@ -105,7 +105,7 @@ private:
|
||||
|
||||
BNO_INLINE static T* getSlowCase()
|
||||
{
|
||||
std::lock_guard<Mutex> lock(mutex());
|
||||
LockHolder lock(mutex());
|
||||
if (!s_object.load()) {
|
||||
if (s_data->isInitialized)
|
||||
s_object.store(static_cast<T*>(s_data->memory));
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (C) 2014-2018 Apple Inc. All rights reserved.
|
||||
* Copyright (C) 2014-2019 Apple Inc. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
@ -68,7 +68,7 @@ template<typename T> struct PerThreadStorage;
|
||||
// For now, we only support PerThread<PerHeapKind<Cache>>. We can expand to other types by
|
||||
// using more keys.
|
||||
template<> struct PerThreadStorage<PerHeapKind<Cache>> {
|
||||
static const pthread_key_t key = __PTK_FRAMEWORK_JAVASCRIPTCORE_KEY0;
|
||||
static constexpr pthread_key_t key = __PTK_FRAMEWORK_JAVASCRIPTCORE_KEY0;
|
||||
|
||||
static void* get()
|
||||
{
|
||||
@ -108,16 +108,9 @@ template<typename T> struct PerThreadStorage {
|
||||
}
|
||||
};
|
||||
|
||||
class Cache;
|
||||
class Heap;
|
||||
|
||||
template<> bool PerThreadStorage<PerHeapKind<Cache>>::s_didInitialize;
|
||||
template<> pthread_key_t PerThreadStorage<PerHeapKind<Cache>>::s_key;
|
||||
template<> std::once_flag PerThreadStorage<PerHeapKind<Cache>>::s_onceFlag;
|
||||
|
||||
template<> bool PerThreadStorage<PerHeapKind<Heap>>::s_didInitialize;
|
||||
template<> pthread_key_t PerThreadStorage<PerHeapKind<Heap>>::s_key;
|
||||
template<> std::once_flag PerThreadStorage<PerHeapKind<Heap>>::s_onceFlag;
|
||||
template<typename T> bool PerThreadStorage<T>::s_didInitialize;
|
||||
template<typename T> pthread_key_t PerThreadStorage<T>::s_key;
|
||||
template<typename T> std::once_flag PerThreadStorage<T>::s_onceFlag;
|
||||
|
||||
#endif
|
||||
|
||||
|
@ -39,10 +39,8 @@ bool gigacageEnabledForProcess();
|
||||
inline bool gigacageEnabledForProcess() { return true; }
|
||||
#endif
|
||||
|
||||
#if BUSE(CHECK_NANO_MALLOC)
|
||||
#if BPLATFORM(IOS_FAMILY)
|
||||
bool shouldProcessUnconditionallyUseBmalloc();
|
||||
#else
|
||||
inline bool shouldProcessUnconditionallyUseBmalloc() { return true; }
|
||||
#endif
|
||||
|
||||
}
|
||||
|
@ -53,7 +53,7 @@ bool gigacageEnabledForProcess()
|
||||
}
|
||||
#endif // !BPLATFORM(WATCHOS)
|
||||
|
||||
#if BUSE(CHECK_NANO_MALLOC)
|
||||
#if BPLATFORM(IOS_FAMILY)
|
||||
bool shouldProcessUnconditionallyUseBmalloc()
|
||||
{
|
||||
static bool result;
|
||||
@ -74,6 +74,6 @@ bool shouldProcessUnconditionallyUseBmalloc()
|
||||
|
||||
return result;
|
||||
}
|
||||
#endif // BUSE(CHECK_NANO_MALLOC)
|
||||
#endif // BPLATFORM(IOS_FAMILY)
|
||||
|
||||
}
|
||||
|
@ -30,6 +30,7 @@
|
||||
#include "BulkDecommit.h"
|
||||
#include "Environment.h"
|
||||
#include "Heap.h"
|
||||
#include "IsoHeapImplInlines.h"
|
||||
#if BOS(DARWIN)
|
||||
#import <dispatch/dispatch.h>
|
||||
#import <mach/host_info.h>
|
||||
@ -39,6 +40,10 @@
|
||||
#include <stdio.h>
|
||||
#include <thread>
|
||||
|
||||
#if BPLATFORM(PLAYSTATION)
|
||||
#include <pthread_np.h>
|
||||
#endif
|
||||
|
||||
namespace bmalloc {
|
||||
|
||||
static constexpr bool verbose = false;
|
||||
@ -67,7 +72,7 @@ struct PrintTime {
|
||||
|
||||
DEFINE_STATIC_PER_PROCESS_STORAGE(Scavenger);
|
||||
|
||||
Scavenger::Scavenger(std::lock_guard<Mutex>&)
|
||||
Scavenger::Scavenger(const LockHolder&)
|
||||
{
|
||||
BASSERT(!Environment::get()->isDebugHeapEnabled());
|
||||
|
||||
@ -80,7 +85,7 @@ Scavenger::Scavenger(std::lock_guard<Mutex>&)
|
||||
dispatch_resume(m_pressureHandlerDispatchSource);
|
||||
dispatch_release(queue);
|
||||
#endif
|
||||
#if BPLATFORM(MAC)
|
||||
#if BUSE(PARTIAL_SCAVENGE)
|
||||
m_waitTime = std::chrono::milliseconds(m_isInMiniMode ? 200 : 2000);
|
||||
#else
|
||||
m_waitTime = std::chrono::milliseconds(10);
|
||||
@ -91,11 +96,11 @@ Scavenger::Scavenger(std::lock_guard<Mutex>&)
|
||||
|
||||
void Scavenger::run()
|
||||
{
|
||||
std::lock_guard<Mutex> lock(mutex());
|
||||
runHoldingLock();
|
||||
LockHolder lock(mutex());
|
||||
run(lock);
|
||||
}
|
||||
|
||||
void Scavenger::runHoldingLock()
|
||||
void Scavenger::run(const LockHolder&)
|
||||
{
|
||||
m_state = State::Run;
|
||||
m_condition.notify_all();
|
||||
@ -103,11 +108,11 @@ void Scavenger::runHoldingLock()
|
||||
|
||||
void Scavenger::runSoon()
|
||||
{
|
||||
std::lock_guard<Mutex> lock(mutex());
|
||||
runSoonHoldingLock();
|
||||
LockHolder lock(mutex());
|
||||
runSoon(lock);
|
||||
}
|
||||
|
||||
void Scavenger::runSoonHoldingLock()
|
||||
void Scavenger::runSoon(const LockHolder&)
|
||||
{
|
||||
if (willRunSoon())
|
||||
return;
|
||||
@ -123,11 +128,11 @@ void Scavenger::didStartGrowing()
|
||||
|
||||
void Scavenger::scheduleIfUnderMemoryPressure(size_t bytes)
|
||||
{
|
||||
std::lock_guard<Mutex> lock(mutex());
|
||||
scheduleIfUnderMemoryPressureHoldingLock(bytes);
|
||||
LockHolder lock(mutex());
|
||||
scheduleIfUnderMemoryPressure(lock, bytes);
|
||||
}
|
||||
|
||||
void Scavenger::scheduleIfUnderMemoryPressureHoldingLock(size_t bytes)
|
||||
void Scavenger::scheduleIfUnderMemoryPressure(const LockHolder& lock, size_t bytes)
|
||||
{
|
||||
m_scavengerBytes += bytes;
|
||||
if (m_scavengerBytes < scavengerBytesPerMemoryPressureCheck)
|
||||
@ -142,19 +147,19 @@ void Scavenger::scheduleIfUnderMemoryPressureHoldingLock(size_t bytes)
|
||||
return;
|
||||
|
||||
m_isProbablyGrowing = false;
|
||||
runHoldingLock();
|
||||
run(lock);
|
||||
}
|
||||
|
||||
void Scavenger::schedule(size_t bytes)
|
||||
{
|
||||
std::lock_guard<Mutex> lock(mutex());
|
||||
scheduleIfUnderMemoryPressureHoldingLock(bytes);
|
||||
LockHolder lock(mutex());
|
||||
scheduleIfUnderMemoryPressure(lock, bytes);
|
||||
|
||||
if (willRunSoon())
|
||||
return;
|
||||
|
||||
m_isProbablyGrowing = false;
|
||||
runSoonHoldingLock();
|
||||
runSoon(lock);
|
||||
}
|
||||
|
||||
inline void dumpStats()
|
||||
@ -178,14 +183,14 @@ inline void dumpStats()
|
||||
|
||||
std::chrono::milliseconds Scavenger::timeSinceLastFullScavenge()
|
||||
{
|
||||
std::unique_lock<Mutex> lock(mutex());
|
||||
UniqueLockHolder lock(mutex());
|
||||
return std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::steady_clock::now() - m_lastFullScavengeTime);
|
||||
}
|
||||
|
||||
#if BPLATFORM(MAC)
|
||||
#if BUSE(PARTIAL_SCAVENGE)
|
||||
std::chrono::milliseconds Scavenger::timeSinceLastPartialScavenge()
|
||||
{
|
||||
std::unique_lock<Mutex> lock(mutex());
|
||||
UniqueLockHolder lock(mutex());
|
||||
return std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::steady_clock::now() - m_lastPartialScavengeTime);
|
||||
}
|
||||
#endif
|
||||
@ -199,7 +204,10 @@ void Scavenger::enableMiniMode()
|
||||
|
||||
void Scavenger::scavenge()
|
||||
{
|
||||
std::unique_lock<Mutex> lock(m_scavengingMutex);
|
||||
if (!m_isEnabled)
|
||||
return;
|
||||
|
||||
UniqueLockHolder lock(m_scavengingMutex);
|
||||
|
||||
if (verbose) {
|
||||
fprintf(stderr, "--------------------------------\n");
|
||||
@ -212,14 +220,14 @@ void Scavenger::scavenge()
|
||||
|
||||
{
|
||||
PrintTime printTime("\nfull scavenge under lock time");
|
||||
#if !BPLATFORM(MAC)
|
||||
#if !BUSE(PARTIAL_SCAVENGE)
|
||||
size_t deferredDecommits = 0;
|
||||
#endif
|
||||
std::lock_guard<Mutex> lock(Heap::mutex());
|
||||
UniqueLockHolder lock(Heap::mutex());
|
||||
for (unsigned i = numHeaps; i--;) {
|
||||
if (!isActiveHeapKind(static_cast<HeapKind>(i)))
|
||||
continue;
|
||||
#if BPLATFORM(MAC)
|
||||
#if BUSE(PARTIAL_SCAVENGE)
|
||||
PerProcess<PerHeapKind<Heap>>::get()->at(i).scavenge(lock, decommitter);
|
||||
#else
|
||||
PerProcess<PerHeapKind<Heap>>::get()->at(i).scavenge(lock, decommitter, deferredDecommits);
|
||||
@ -227,7 +235,7 @@ void Scavenger::scavenge()
|
||||
}
|
||||
decommitter.processEager();
|
||||
|
||||
#if !BPLATFORM(MAC)
|
||||
#if !BUSE(PARTIAL_SCAVENGE)
|
||||
if (deferredDecommits)
|
||||
m_state = State::RunSoon;
|
||||
#endif
|
||||
@ -240,7 +248,7 @@ void Scavenger::scavenge()
|
||||
|
||||
{
|
||||
PrintTime printTime("full scavenge mark all as eligible time");
|
||||
std::lock_guard<Mutex> lock(Heap::mutex());
|
||||
LockHolder lock(Heap::mutex());
|
||||
for (unsigned i = numHeaps; i--;) {
|
||||
if (!isActiveHeapKind(static_cast<HeapKind>(i)))
|
||||
continue;
|
||||
@ -266,15 +274,18 @@ void Scavenger::scavenge()
|
||||
}
|
||||
|
||||
{
|
||||
std::unique_lock<Mutex> lock(mutex());
|
||||
UniqueLockHolder lock(mutex());
|
||||
m_lastFullScavengeTime = std::chrono::steady_clock::now();
|
||||
}
|
||||
}
|
||||
|
||||
#if BPLATFORM(MAC)
|
||||
#if BUSE(PARTIAL_SCAVENGE)
|
||||
void Scavenger::partialScavenge()
|
||||
{
|
||||
std::unique_lock<Mutex> lock(m_scavengingMutex);
|
||||
if (!m_isEnabled)
|
||||
return;
|
||||
|
||||
UniqueLockHolder lock(m_scavengingMutex);
|
||||
|
||||
if (verbose) {
|
||||
fprintf(stderr, "--------------------------------\n");
|
||||
@ -286,7 +297,7 @@ void Scavenger::partialScavenge()
|
||||
BulkDecommit decommitter;
|
||||
{
|
||||
PrintTime printTime("\npartialScavenge under lock time");
|
||||
std::lock_guard<Mutex> lock(Heap::mutex());
|
||||
UniqueLockHolder lock(Heap::mutex());
|
||||
for (unsigned i = numHeaps; i--;) {
|
||||
if (!isActiveHeapKind(static_cast<HeapKind>(i)))
|
||||
continue;
|
||||
@ -307,7 +318,7 @@ void Scavenger::partialScavenge()
|
||||
|
||||
{
|
||||
PrintTime printTime("partialScavenge mark all as eligible time");
|
||||
std::lock_guard<Mutex> lock(Heap::mutex());
|
||||
LockHolder lock(Heap::mutex());
|
||||
for (unsigned i = numHeaps; i--;) {
|
||||
if (!isActiveHeapKind(static_cast<HeapKind>(i)))
|
||||
continue;
|
||||
@ -334,7 +345,7 @@ void Scavenger::partialScavenge()
|
||||
}
|
||||
|
||||
{
|
||||
std::unique_lock<Mutex> lock(mutex());
|
||||
UniqueLockHolder lock(mutex());
|
||||
m_lastPartialScavengeTime = std::chrono::steady_clock::now();
|
||||
}
|
||||
}
|
||||
@ -344,7 +355,7 @@ size_t Scavenger::freeableMemory()
|
||||
{
|
||||
size_t result = 0;
|
||||
{
|
||||
std::lock_guard<Mutex> lock(Heap::mutex());
|
||||
UniqueLockHolder lock(Heap::mutex());
|
||||
for (unsigned i = numHeaps; i--;) {
|
||||
if (!isActiveHeapKind(static_cast<HeapKind>(i)))
|
||||
continue;
|
||||
@ -401,12 +412,12 @@ void Scavenger::threadRunLoop()
|
||||
|
||||
while (true) {
|
||||
if (m_state == State::Sleep) {
|
||||
std::unique_lock<Mutex> lock(mutex());
|
||||
UniqueLockHolder lock(mutex());
|
||||
m_condition.wait(lock, [&]() { return m_state != State::Sleep; });
|
||||
}
|
||||
|
||||
if (m_state == State::RunSoon) {
|
||||
std::unique_lock<Mutex> lock(mutex());
|
||||
UniqueLockHolder lock(mutex());
|
||||
m_condition.wait_for(lock, m_waitTime, [&]() { return m_state != State::RunSoon; });
|
||||
}
|
||||
|
||||
@ -421,7 +432,7 @@ void Scavenger::threadRunLoop()
|
||||
fprintf(stderr, "--------------------------------\n");
|
||||
}
|
||||
|
||||
#if BPLATFORM(MAC)
|
||||
#if BUSE(PARTIAL_SCAVENGE)
|
||||
enum class ScavengeMode {
|
||||
None,
|
||||
Partial,
|
||||
@ -495,15 +506,11 @@ void Scavenger::threadRunLoop()
|
||||
static_cast<double>(std::chrono::duration_cast<std::chrono::microseconds>(timeSpentScavenging).count()) / 1000);
|
||||
}
|
||||
|
||||
std::chrono::milliseconds newWaitTime;
|
||||
|
||||
if (m_isInMiniMode) {
|
||||
timeSpentScavenging *= 50;
|
||||
newWaitTime = std::chrono::duration_cast<std::chrono::milliseconds>(timeSpentScavenging);
|
||||
newWaitTime = std::min(std::max(newWaitTime, std::chrono::milliseconds(25)), std::chrono::milliseconds(500));
|
||||
} else {
|
||||
// FIXME: We need to investigate mini-mode's adjustment.
|
||||
// https://bugs.webkit.org/show_bug.cgi?id=203987
|
||||
if (!m_isInMiniMode) {
|
||||
timeSpentScavenging *= 150;
|
||||
newWaitTime = std::chrono::duration_cast<std::chrono::milliseconds>(timeSpentScavenging);
|
||||
std::chrono::milliseconds newWaitTime = std::chrono::duration_cast<std::chrono::milliseconds>(timeSpentScavenging);
|
||||
m_waitTime = std::min(std::max(newWaitTime, std::chrono::milliseconds(100)), std::chrono::milliseconds(10000));
|
||||
}
|
||||
|
||||
@ -516,7 +523,7 @@ void Scavenger::threadRunLoop()
|
||||
void Scavenger::setThreadName(const char* name)
|
||||
{
|
||||
BUNUSED(name);
|
||||
#if BOS(DARWIN)
|
||||
#if BOS(DARWIN) || BPLATFORM(PLAYSTATION)
|
||||
pthread_setname_np(name);
|
||||
#elif BOS(LINUX)
|
||||
// Truncate the given name since Linux limits the size of the thread name 16 including null terminator.
|
||||
|
@ -42,7 +42,7 @@ namespace bmalloc {
|
||||
|
||||
class Scavenger : public StaticPerProcess<Scavenger> {
|
||||
public:
|
||||
BEXPORT Scavenger(std::lock_guard<Mutex>&);
|
||||
BEXPORT Scavenger(const LockHolder&);
|
||||
|
||||
~Scavenger() = delete;
|
||||
|
||||
@ -74,13 +74,16 @@ public:
|
||||
|
||||
void enableMiniMode();
|
||||
|
||||
// Used for debugging only.
|
||||
void disable() { m_isEnabled = false; }
|
||||
|
||||
private:
|
||||
enum class State { Sleep, Run, RunSoon };
|
||||
|
||||
void runHoldingLock();
|
||||
void runSoonHoldingLock();
|
||||
void run(const LockHolder&);
|
||||
void runSoon(const LockHolder&);
|
||||
|
||||
void scheduleIfUnderMemoryPressureHoldingLock(size_t bytes);
|
||||
void scheduleIfUnderMemoryPressure(const LockHolder&, size_t bytes);
|
||||
|
||||
BNO_RETURN static void threadEntryPoint(Scavenger*);
|
||||
BNO_RETURN void threadRunLoop();
|
||||
@ -89,7 +92,7 @@ private:
|
||||
void setThreadName(const char*);
|
||||
|
||||
std::chrono::milliseconds timeSinceLastFullScavenge();
|
||||
#if BPLATFORM(MAC)
|
||||
#if BUSE(PARTIAL_SCAVENGE)
|
||||
std::chrono::milliseconds timeSinceLastPartialScavenge();
|
||||
void partialScavenge();
|
||||
#endif
|
||||
@ -105,7 +108,7 @@ private:
|
||||
|
||||
std::thread m_thread;
|
||||
std::chrono::steady_clock::time_point m_lastFullScavengeTime { std::chrono::steady_clock::now() };
|
||||
#if BPLATFORM(MAC)
|
||||
#if BUSE(PARTIAL_SCAVENGE)
|
||||
std::chrono::steady_clock::time_point m_lastPartialScavengeTime { std::chrono::steady_clock::now() };
|
||||
#endif
|
||||
|
||||
@ -115,6 +118,7 @@ private:
|
||||
#endif
|
||||
|
||||
Vector<DeferredDecommit> m_deferredDecommits;
|
||||
bool m_isEnabled { true };
|
||||
};
|
||||
DECLARE_STATIC_PER_PROCESS_STORAGE(Scavenger);
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (C) 2016 Apple Inc. All rights reserved.
|
||||
* Copyright (C) 2016-2020 Apple Inc. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
@ -23,6 +23,8 @@
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <type_traits>
|
||||
#include <utility>
|
||||
|
||||
|
@ -80,7 +80,7 @@ constexpr size_t maskSizeClass(size_t size)
|
||||
return mask((size - 1) / alignment, maskSizeClassCount - 1);
|
||||
}
|
||||
|
||||
inline size_t maskObjectSize(size_t maskSizeClass)
|
||||
constexpr size_t maskObjectSize(size_t maskSizeClass)
|
||||
{
|
||||
return (maskSizeClass + 1) * alignment;
|
||||
}
|
||||
@ -89,14 +89,14 @@ static constexpr size_t logAlignmentMin = maskSizeClassMax / logWasteFactor;
|
||||
|
||||
static constexpr size_t logSizeClassCount = (log2(smallMax) - log2(maskSizeClassMax)) * logWasteFactor;
|
||||
|
||||
inline size_t logSizeClass(size_t size)
|
||||
constexpr size_t logSizeClass(size_t size)
|
||||
{
|
||||
size_t base = log2(size - 1) - log2(maskSizeClassMax);
|
||||
size_t offset = (size - 1 - (maskSizeClassMax << base));
|
||||
return base * logWasteFactor + offset / (logAlignmentMin << base);
|
||||
}
|
||||
|
||||
inline size_t logObjectSize(size_t logSizeClass)
|
||||
constexpr size_t logObjectSize(size_t logSizeClass)
|
||||
{
|
||||
size_t base = logSizeClass / logWasteFactor;
|
||||
size_t offset = logSizeClass % logWasteFactor;
|
||||
@ -105,24 +105,30 @@ inline size_t logObjectSize(size_t logSizeClass)
|
||||
|
||||
static constexpr size_t sizeClassCount = maskSizeClassCount + logSizeClassCount;
|
||||
|
||||
inline size_t sizeClass(size_t size)
|
||||
constexpr size_t sizeClass(size_t size)
|
||||
{
|
||||
if (size <= maskSizeClassMax)
|
||||
return maskSizeClass(size);
|
||||
return maskSizeClassCount + logSizeClass(size);
|
||||
}
|
||||
|
||||
inline size_t objectSize(size_t sizeClass)
|
||||
constexpr size_t objectSize(size_t sizeClass)
|
||||
{
|
||||
if (sizeClass < maskSizeClassCount)
|
||||
return maskObjectSize(sizeClass);
|
||||
return logObjectSize(sizeClass - maskSizeClassCount);
|
||||
}
|
||||
|
||||
inline size_t pageSize(size_t pageClass)
|
||||
constexpr size_t pageSize(size_t pageClass)
|
||||
{
|
||||
return (pageClass + 1) * smallPageSize;
|
||||
}
|
||||
|
||||
constexpr size_t smallLineCount(size_t vmPageSize)
|
||||
{
|
||||
return vmPageSize / smallLineSize;
|
||||
}
|
||||
|
||||
} // namespace Sizes
|
||||
|
||||
using namespace Sizes;
|
||||
|
@ -35,9 +35,9 @@ namespace bmalloc {
|
||||
|
||||
class SmallLine {
|
||||
public:
|
||||
void ref(std::unique_lock<Mutex>&, unsigned char = 1);
|
||||
bool deref(std::unique_lock<Mutex>&);
|
||||
unsigned refCount(std::unique_lock<Mutex>&) { return m_refCount; }
|
||||
void ref(UniqueLockHolder&, unsigned char = 1);
|
||||
bool deref(UniqueLockHolder&);
|
||||
unsigned refCount(UniqueLockHolder&) { return m_refCount; }
|
||||
|
||||
char* begin();
|
||||
char* end();
|
||||
@ -51,13 +51,13 @@ static_assert(
|
||||
|
||||
};
|
||||
|
||||
inline void SmallLine::ref(std::unique_lock<Mutex>&, unsigned char refCount)
|
||||
inline void SmallLine::ref(UniqueLockHolder&, unsigned char refCount)
|
||||
{
|
||||
BASSERT(!m_refCount);
|
||||
m_refCount = refCount;
|
||||
}
|
||||
|
||||
inline bool SmallLine::deref(std::unique_lock<Mutex>&)
|
||||
inline bool SmallLine::deref(UniqueLockHolder&)
|
||||
{
|
||||
BASSERT(m_refCount);
|
||||
--m_refCount;
|
||||
|
@ -38,20 +38,20 @@ class SmallLine;
|
||||
|
||||
class SmallPage : public ListNode<SmallPage> {
|
||||
public:
|
||||
void ref(std::unique_lock<Mutex>&);
|
||||
bool deref(std::unique_lock<Mutex>&);
|
||||
unsigned refCount(std::unique_lock<Mutex>&) { return m_refCount; }
|
||||
void ref(UniqueLockHolder&);
|
||||
bool deref(UniqueLockHolder&);
|
||||
unsigned refCount(UniqueLockHolder&) { return m_refCount; }
|
||||
|
||||
size_t sizeClass() { return m_sizeClass; }
|
||||
void setSizeClass(size_t sizeClass) { m_sizeClass = sizeClass; }
|
||||
|
||||
bool hasFreeLines(std::unique_lock<Mutex>&) const { return m_hasFreeLines; }
|
||||
void setHasFreeLines(std::unique_lock<Mutex>&, bool hasFreeLines) { m_hasFreeLines = hasFreeLines; }
|
||||
bool hasFreeLines(UniqueLockHolder&) const { return m_hasFreeLines; }
|
||||
void setHasFreeLines(UniqueLockHolder&, bool hasFreeLines) { m_hasFreeLines = hasFreeLines; }
|
||||
|
||||
bool hasPhysicalPages() { return m_hasPhysicalPages; }
|
||||
void setHasPhysicalPages(bool hasPhysicalPages) { m_hasPhysicalPages = hasPhysicalPages; }
|
||||
|
||||
#if !BPLATFORM(MAC)
|
||||
#if !BUSE(PARTIAL_SCAVENGE)
|
||||
bool usedSinceLastScavenge() { return m_usedSinceLastScavenge; }
|
||||
void clearUsedSinceLastScavenge() { m_usedSinceLastScavenge = false; }
|
||||
void setUsedSinceLastScavenge() { m_usedSinceLastScavenge = true; }
|
||||
@ -65,7 +65,7 @@ public:
|
||||
private:
|
||||
unsigned char m_hasFreeLines: 1;
|
||||
unsigned char m_hasPhysicalPages: 1;
|
||||
#if !BPLATFORM(MAC)
|
||||
#if !BUSE(PARTIAL_SCAVENGE)
|
||||
unsigned char m_usedSinceLastScavenge: 1;
|
||||
#endif
|
||||
unsigned char m_refCount: 7;
|
||||
@ -79,14 +79,14 @@ static_assert(
|
||||
|
||||
using LineCache = std::array<List<SmallPage>, sizeClassCount>;
|
||||
|
||||
inline void SmallPage::ref(std::unique_lock<Mutex>&)
|
||||
inline void SmallPage::ref(UniqueLockHolder&)
|
||||
{
|
||||
BASSERT(!m_slide);
|
||||
++m_refCount;
|
||||
BASSERT(m_refCount);
|
||||
}
|
||||
|
||||
inline bool SmallPage::deref(std::unique_lock<Mutex>&)
|
||||
inline bool SmallPage::deref(UniqueLockHolder&)
|
||||
{
|
||||
BASSERT(!m_slide);
|
||||
BASSERT(m_refCount);
|
||||
|
@ -25,6 +25,7 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "BExport.h"
|
||||
#include "BInline.h"
|
||||
#include "Mutex.h"
|
||||
#include "Sizes.h"
|
||||
@ -53,7 +54,7 @@ namespace bmalloc {
|
||||
template<typename T> struct StaticPerProcessStorageTraits;
|
||||
|
||||
template<typename T>
|
||||
class BEXPORT StaticPerProcess {
|
||||
class StaticPerProcess {
|
||||
public:
|
||||
static T* get()
|
||||
{
|
||||
@ -79,7 +80,7 @@ private:
|
||||
BNO_INLINE static T* getSlowCase()
|
||||
{
|
||||
using Storage = typename StaticPerProcessStorageTraits<T>::Storage;
|
||||
std::lock_guard<Mutex> lock(Storage::s_mutex);
|
||||
LockHolder lock(Storage::s_mutex);
|
||||
if (!Storage::s_object.load(std::memory_order_consume)) {
|
||||
T* t = new (&Storage::s_memory) T(lock);
|
||||
Storage::s_object.store(t, std::memory_order_release);
|
||||
@ -92,9 +93,9 @@ private:
|
||||
template<> struct StaticPerProcessStorageTraits<Type> { \
|
||||
using Memory = typename std::aligned_storage<sizeof(Type), std::alignment_of<Type>::value>::type; \
|
||||
struct BEXPORT Storage { \
|
||||
BEXPORT static std::atomic<Type*> s_object; \
|
||||
BEXPORT static Mutex s_mutex; \
|
||||
BEXPORT static Memory s_memory; \
|
||||
static std::atomic<Type*> s_object; \
|
||||
static Mutex s_mutex; \
|
||||
static Memory s_memory; \
|
||||
}; \
|
||||
};
|
||||
|
||||
|
@ -95,7 +95,7 @@ inline void vmValidate(void* p, size_t vmSize)
|
||||
|
||||
inline size_t vmPageSizePhysical()
|
||||
{
|
||||
#if BPLATFORM(IOS_FAMILY)
|
||||
#if BOS(DARWIN) && (BCPU(ARM64) || BCPU(ARM))
|
||||
return vm_kernel_page_size;
|
||||
#else
|
||||
static size_t cached;
|
||||
|
@ -1,65 +0,0 @@
|
||||
/*
|
||||
* Copyright (C) 2014-2017 Apple Inc. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
|
||||
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
|
||||
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
||||
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
|
||||
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#include "PerProcess.h"
|
||||
#include "VMHeap.h"
|
||||
#include <thread>
|
||||
|
||||
namespace bmalloc {
|
||||
|
||||
DEFINE_STATIC_PER_PROCESS_STORAGE(VMHeap);
|
||||
|
||||
VMHeap::VMHeap(std::lock_guard<Mutex>&)
|
||||
{
|
||||
}
|
||||
|
||||
LargeRange VMHeap::tryAllocateLargeChunk(size_t alignment, size_t size)
|
||||
{
|
||||
// We allocate VM in aligned multiples to increase the chances that
|
||||
// the OS will provide contiguous ranges that we can merge.
|
||||
size_t roundedAlignment = roundUpToMultipleOf<chunkSize>(alignment);
|
||||
if (roundedAlignment < alignment) // Check for overflow
|
||||
return LargeRange();
|
||||
alignment = roundedAlignment;
|
||||
|
||||
size_t roundedSize = roundUpToMultipleOf<chunkSize>(size);
|
||||
if (roundedSize < size) // Check for overflow
|
||||
return LargeRange();
|
||||
size = roundedSize;
|
||||
|
||||
void* memory = tryVMAllocate(alignment, size);
|
||||
if (!memory)
|
||||
return LargeRange();
|
||||
|
||||
Chunk* chunk = static_cast<Chunk*>(memory);
|
||||
|
||||
#if BOS(DARWIN)
|
||||
PerProcess<Zone>::get()->addRange(Range(chunk->bytes(), size));
|
||||
#endif
|
||||
|
||||
return LargeRange(chunk->bytes(), size, 0, 0);
|
||||
}
|
||||
|
||||
} // namespace bmalloc
|
@ -72,8 +72,8 @@ public:
|
||||
void shrinkToFit();
|
||||
|
||||
private:
|
||||
static const size_t growFactor = 2;
|
||||
static const size_t shrinkFactor = 4;
|
||||
static constexpr size_t growFactor = 2;
|
||||
static constexpr size_t shrinkFactor = 4;
|
||||
static size_t initialCapacity() { return vmPageSize() / sizeof(T); }
|
||||
|
||||
void growCapacity();
|
||||
|
@ -115,7 +115,7 @@ static const malloc_introspection_t zoneIntrospect = {
|
||||
.statistics = bmalloc::statistics
|
||||
};
|
||||
|
||||
Zone::Zone(std::lock_guard<Mutex>&)
|
||||
Zone::Zone(const LockHolder&)
|
||||
{
|
||||
malloc_zone_t::size = &bmalloc::zoneSize;
|
||||
malloc_zone_t::zone_name = "WebKit Malloc";
|
||||
|
@ -40,9 +40,9 @@ class Chunk;
|
||||
class Zone : public malloc_zone_t {
|
||||
public:
|
||||
// Enough capacity to track a 64GB heap, so probably enough for anything.
|
||||
static const size_t capacity = 2048;
|
||||
static constexpr size_t capacity = 2048;
|
||||
|
||||
Zone(std::lock_guard<Mutex>&);
|
||||
Zone(const LockHolder&);
|
||||
Zone(task_t, memory_reader_t, vm_address_t);
|
||||
|
||||
void addRange(Range);
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user