Support Share GC Part2:

Issue:https://gitee.com/openharmony/arkcompiler_ets_runtime/issues/I91D19?from=project-issue
1. implement share gc marker, sweeper
2. adapt local gc to skip object in shared heap

Signed-off-by: lukai <lukai25@huawei.com>
Change-Id: Id73c9d2c52b11adfef36fff032c926aa5ed3f7cc
This commit is contained in:
lukai 2024-02-02 19:36:16 +08:00
parent d8f421a6e3
commit a72dd481f6
50 changed files with 1827 additions and 472 deletions

View File

@ -769,6 +769,9 @@ ecma_source = [
"ecmascript/mem/parallel_marker.cpp",
"ecmascript/mem/partial_gc.cpp",
"ecmascript/mem/regexp_cached_chunk.cpp",
"ecmascript/mem/shared_heap/shared_concurrent_sweeper.cpp",
"ecmascript/mem/shared_heap/share_gc.cpp",
"ecmascript/mem/shared_heap/shared_space.cpp",
"ecmascript/mem/stw_young_gc.cpp",
"ecmascript/mem/space.cpp",
"ecmascript/mem/sparse_space.cpp",

View File

@ -223,7 +223,6 @@ JSHandle<JSHClass> Builtins::CreateSFunctionPrototypeHClass(const JSHandle<JSTag
attributes.SetOffset(index);
attributes.SetIsAccessor(each.second);
if (each.first == "[Symbol.hasInstance]") {
// todo(lukai) globalruntime.env
keyString = env->GetHasInstanceSymbol();
} else {
keyString = JSHandle<JSTaggedValue>(factory_->NewFromUtf8(each.first));
@ -275,7 +274,6 @@ JSHandle<JSFunction> Builtins::NewSFunction(const JSHandle<GlobalEnv> &env, cons
EcmaEntrypoint func, int length,
kungfu::BuiltinsStubCSigns::ID builtinId) const
{
// todo(lukai) globalruntime.globalenv?
JSHandle<JSHClass> hclass = JSHandle<JSHClass>::Cast(env->GetSFunctionClassWithoutAccessor());
JSHandle<JSFunction> function = factory_->NewSFunctionByHClass(reinterpret_cast<void *>(func),
hclass, FunctionKind::NORMAL_FUNCTION, builtinId, MemSpaceType::SHARED_NON_MOVABLE);
@ -321,7 +319,6 @@ JSHandle<JSTaggedValue> Builtins::CreateSGetterSetter(const JSHandle<GlobalEnv>
void Builtins::SharedStrictModeForbiddenAccessCallerArguments(const JSHandle<GlobalEnv> &env, uint32_t &index,
const JSHandle<JSObject> &prototype) const
{
// todo(lukai) globalruntime.env?
JSHandle<JSHClass> hclass = JSHandle<JSHClass>::Cast(env->GetSFunctionClassWithoutProto());
JSHandle<JSFunction> func =
factory_->NewSFunctionWithAccessor(

View File

@ -2496,7 +2496,7 @@ inline GateRef StubBuilder::InYoungGeneration(GateRef region)
}
}
inline GateRef StubBuilder::InSharedSpace(GateRef region)
inline GateRef StubBuilder::InSharedHeap(GateRef region)
{
auto offset = Region::PackedData::GetFlagOffset(env_->Is32Bit());
GateRef x = Load(VariableType::NATIVE_POINTER(), PtrAdd(IntPtr(offset), region),
@ -2514,6 +2514,24 @@ inline GateRef StubBuilder::InSharedSpace(GateRef region)
}
}
inline GateRef StubBuilder::InSharedSweepableSpace(GateRef region)
{
auto offset = Region::PackedData::GetFlagOffset(env_->Is32Bit());
GateRef x = Load(VariableType::NATIVE_POINTER(), PtrAdd(IntPtr(offset), region),
IntPtr(0));
if (env_->Is32Bit()) {
GateRef spaceType = Int32And(x, Int32(RegionSpaceFlag::VALID_SPACE_MASK));
GateRef greater = Int32GreaterThanOrEqual(spaceType, Int32(RegionSpaceFlag::SHARED_SWEEPABLE_SPACE_BEGIN));
GateRef less = Int32LessThanOrEqual(spaceType, Int32(RegionSpaceFlag::SHARED_SWEEPABLE_SPACE_END));
return BoolAnd(greater, less);
} else {
GateRef spaceType = Int64And(x, Int64(RegionSpaceFlag::VALID_SPACE_MASK));
GateRef greater = Int64GreaterThanOrEqual(spaceType, Int64(RegionSpaceFlag::SHARED_SWEEPABLE_SPACE_BEGIN));
GateRef less = Int64LessThanOrEqual(spaceType, Int64(RegionSpaceFlag::SHARED_SWEEPABLE_SPACE_END));
return BoolAnd(greater, less);
}
}
inline GateRef StubBuilder::GetParentEnv(GateRef object)
{
GateRef index = Int32(LexicalEnv::PARENT_ENV_INDEX);

View File

@ -1257,8 +1257,8 @@ void StubBuilder::SetValueWithBarrier(GateRef glue, GateRef obj, GateRef offset,
GateRef objectRegion = ObjectAddressToRange(obj);
GateRef valueRegion = ObjectAddressToRange(value);
GateRef slotAddr = PtrAdd(TaggedCastToIntPtr(obj), offset);
GateRef objectNotInShare = BoolNot(InSharedSpace(objectRegion));
GateRef valueRegionInShare = InSharedSpace(valueRegion);
GateRef objectNotInShare = BoolNot(InSharedHeap(objectRegion));
GateRef valueRegionInShare = InSharedSweepableSpace(valueRegion);
Branch(BoolAnd(objectNotInShare, valueRegionInShare), &shareBarrier, &shareBarrierExit);
Bind(&shareBarrier);
{

View File

@ -221,7 +221,8 @@ public:
GateRef TaggedIsAccessor(GateRef x);
GateRef ObjectAddressToRange(GateRef x);
GateRef InYoungGeneration(GateRef region);
GateRef InSharedSpace(GateRef region);
GateRef InSharedHeap(GateRef region);
GateRef InSharedSweepableSpace(GateRef region);
GateRef TaggedIsGeneratorObject(GateRef x);
GateRef TaggedIsJSArray(GateRef x);
GateRef TaggedIsAsyncGeneratorObject(GateRef x);

View File

@ -479,6 +479,7 @@ void EcmaVM::ProcessReferences(const WeakRootVisitor &visitor)
void EcmaVM::PushToNativePointerList(JSNativePointer *pointer)
{
ASSERT(!JSTaggedValue(pointer).IsInSharedHeap());
nativePointerList_.emplace_back(pointer);
}

View File

@ -81,7 +81,6 @@ void JSFunction::InitializeJSFunction(JSThread *thread, const JSHandle<JSFunctio
void JSFunction::InitializeSFunction(JSThread *thread, const JSHandle<JSFunction> &func, FunctionKind kind)
{
InitializeWithDefaultValue(thread, func);
// todo(lukai) gobalruntime.const
auto globalConst = thread->GlobalConstants();
if (HasAccessor(kind)) {
JSHandle<JSTaggedValue> accessor = globalConst->GetHandledFunctionNameAccessor();

View File

@ -909,7 +909,7 @@ public:
enableContext_ = value;
}
bool IsEnableContext()
bool IsEnableContext() const
{
return enableContext_;
}
@ -919,7 +919,7 @@ public:
enablePrintExecuteTime_ = value;
}
bool IsEnablePrintExecuteTime()
bool IsEnablePrintExecuteTime() const
{
return enablePrintExecuteTime_;
}

View File

@ -58,12 +58,20 @@ JSHandle<EcmaString> GetTypeString(JSThread *thread, PreferredPrimitiveType type
return JSHandle<EcmaString>::Cast(globalConst->GetHandledStringString());
}
// todo(lukai) maybe add new tag: hclass.sharedTag == 1
bool JSTaggedValue::IsInSharedSpace() const
bool JSTaggedValue::IsInSharedHeap() const
{
if (IsHeapObject()) {
Region *region = Region::ObjectAddressToRange(GetTaggedObject());
return region->InSharedSpace();
Region *region = Region::ObjectAddressToRange(value_);
return region->InSharedHeap();
}
return false;
}
bool JSTaggedValue::IsInSharedSweepableSpace() const
{
if (IsHeapObject()) {
Region *region = Region::ObjectAddressToRange(value_);
return region->InSharedSweepableSpace();
}
return false;
}

View File

@ -650,7 +650,8 @@ public:
bool IsJSSharedObject() const;
bool IsJSSharedFunction() const;
bool IsJSShared() const;
bool IsInSharedSpace() const;
bool IsInSharedHeap() const;
bool IsInSharedSweepableSpace() const;
static bool IsSameTypeOrHClass(JSTaggedValue x, JSTaggedValue y);
static ComparisonResult Compare(JSThread *thread, const JSHandle<JSTaggedValue> &x,

View File

@ -575,7 +575,9 @@ bool JSThread::CheckSafepoint()
}
if (IsSuspended()) {
interruptMutex_.Unlock();
WaitSuspension();
interruptMutex_.Lock();
}
// vmThreadControl_ 's thread_ is current JSThread's this.

View File

@ -40,11 +40,11 @@ static ARK_INLINE void WriteBarrier(const JSThread *thread, void *obj, size_t of
ASSERT((slotAddr % static_cast<uint8_t>(MemAlignment::MEM_ALIGN_OBJECT)) == 0);
objectRegion->InsertOldToNewRSet(slotAddr);
}
if (!objectRegion->InSharedSpace() && valueRegion->InSharedSpace()) {
if (!objectRegion->InSharedHeap() && valueRegion->InSharedSweepableSpace()) {
objectRegion->AtomicInsertLocalToShareRset(slotAddr);
}
// todo(lukai) remove this check in future, when all references are allocated in sharedheap.
if (objectRegion->InSharedSpace() && !valueRegion->InSharedSpace()) {
if (objectRegion->InSharedHeap() && !valueRegion->InSharedHeap()) {
LOG_FULL(ERROR) << "Shared space reference to " << valueRegion->GetSpaceTypeName();
}
if (thread->IsConcurrentMarkingOrFinished()) {

View File

@ -21,6 +21,9 @@ namespace panda::ecmascript {
void Barriers::Update(const JSThread *thread, uintptr_t slotAddr, Region *objectRegion, TaggedObject *value,
Region *valueRegion, bool onDeserialize)
{
if (valueRegion->InSharedHeap()) {
return;
}
auto heap = thread->GetEcmaVM()->GetHeap();
if (heap->IsFullMark()) {
if (valueRegion->InCollectSet() && !objectRegion->InYoungSpaceOrCSet()) {

View File

@ -75,7 +75,6 @@ void FullGC::Initialize()
auto callback = [](Region *current) {
current->ResetAliveObject();
current->ClearOldToNewRSet();
current->ClearLocalToShareRSet();
};
heap_->EnumerateNonMovableRegions(callback);
heap_->GetAppSpawnSpace()->EnumerateRegions([](Region *current) {
@ -125,7 +124,7 @@ void FullGC::Sweep()
Region *objectRegion = Region::ObjectAddressToRange(header);
if (!HasEvacuated(objectRegion)) {
if (!objectRegion->Test(header)) {
if (!objectRegion->InSharedHeap() && !objectRegion->Test(header)) {
slot.Clear();
}
} else {
@ -150,7 +149,7 @@ void FullGC::Sweep()
return reinterpret_cast<TaggedObject *>(ToUintPtr(nullptr));
}
if (!HasEvacuated(objectRegion)) {
if (objectRegion->Test(header)) {
if (objectRegion->InSharedHeap() || objectRegion->Test(header)) {
return header;
}
return reinterpret_cast<TaggedObject *>(ToUintPtr(nullptr));

View File

@ -47,7 +47,7 @@ namespace panda::ecmascript {
if (UNLIKELY((object) == nullptr)) { \
size_t oomOvershootSize = GetEcmaParamConfiguration().GetOutOfMemoryOvershootSize(); \
(space)->IncreaseOutOfMemoryOvershootSize(oomOvershootSize); \
(object) = reinterpret_cast<TaggedObject *>((space)->ConcurrentAllocate(size)); \
(object) = reinterpret_cast<TaggedObject *>((space)->Allocate(thread, size)); \
ThrowOutOfMemoryError(thread, size, message); \
}
@ -59,6 +59,14 @@ void SharedHeap::EnumerateOldSpaceRegions(const Callback &cb) const
sHugeObjectSpace_->EnumerateRegions(cb);
}
template<class Callback>
void SharedHeap::EnumerateOldSpaceRegionsWithRecord(const Callback &cb) const
{
sOldSpace_->EnumerateRegionsWithRecord(cb);
sNonMovableSpace_->EnumerateRegionsWithRecord(cb);
sHugeObjectSpace_->EnumerateRegionsWithRecord(cb);
}
template<class Callback>
void Heap::EnumerateOldSpaceRegions(const Callback &cb, Region *region) const
{
@ -278,7 +286,7 @@ TaggedObject *Heap::AllocateClassClass(JSHClass *hclass, size_t size)
TaggedObject *SharedHeap::AllocateClassClass(JSHClass *hclass, size_t size)
{
size = AlignUp(size, static_cast<size_t>(MemAlignment::MEM_ALIGN_OBJECT));
auto object = reinterpret_cast<TaggedObject *>(sNonMovableSpace_->Allocate(size));
auto object = reinterpret_cast<TaggedObject *>(sNonMovableSpace_->AllocateWithoutGC(size));
if (UNLIKELY(object == nullptr)) {
LOG_ECMA_MEM(FATAL) << "Heap::AllocateClassClass can not allocate any space";
UNREACHABLE();
@ -473,7 +481,7 @@ TaggedObject *SharedHeap::AllocateNonMovableOrHugeObject(JSThread *thread, JSHCl
if (size > MAX_REGULAR_HEAP_OBJECT_SIZE) {
return AllocateHugeObject(thread, hclass, size);
}
auto object = reinterpret_cast<TaggedObject *>(sNonMovableSpace_->ConcurrentAllocate(size));
auto object = reinterpret_cast<TaggedObject *>(sNonMovableSpace_->Allocate(thread, size));
CHECK_SOBJ_AND_THROW_OOM_ERROR(thread, object, size, sNonMovableSpace_,
"SharedHeap::AllocateNonMovableOrHugeObject");
object->SetClass(thread, hclass);
@ -494,7 +502,7 @@ TaggedObject *SharedHeap::AllocateOldOrHugeObject(JSThread *thread, JSHClass *hc
if (size > MAX_REGULAR_HEAP_OBJECT_SIZE) {
return AllocateHugeObject(thread, hclass, size);
}
auto object = reinterpret_cast<TaggedObject *>(sOldSpace_->ConcurrentAllocate(size));
auto object = reinterpret_cast<TaggedObject *>(sOldSpace_->Allocate(thread, size));
CHECK_SOBJ_AND_THROW_OOM_ERROR(thread, object, size, sOldSpace_, "SharedHeap::AllocateOldOrHugeObject");
object->SetClass(thread, hclass);
// todo(lukai)
@ -517,7 +525,7 @@ TaggedObject *SharedHeap::AllocateOldOrHugeObject(JSThread *thread, size_t size)
TaggedObject *SharedHeap::AllocateHugeObject(JSThread *thread, JSHClass *hclass, size_t size)
{
// Check whether it is necessary to trigger Old GC before expanding to avoid OOM risk.
CheckAndTriggerOldGC(size);
CheckAndTriggerOldGC(thread, size);
auto object = AllocateHugeObject(thread, size);
object->SetClass(thread, hclass);
// todo(lukai)
@ -528,17 +536,17 @@ TaggedObject *SharedHeap::AllocateHugeObject(JSThread *thread, JSHClass *hclass,
TaggedObject *SharedHeap::AllocateHugeObject(JSThread *thread, size_t size)
{
// Check whether it is necessary to trigger Old GC before expanding to avoid OOM risk.
CheckAndTriggerOldGC(size);
CheckAndTriggerOldGC(thread, size);
auto *object = reinterpret_cast<TaggedObject *>(sHugeObjectSpace_->ConcurrentAllocate(size));
auto *object = reinterpret_cast<TaggedObject *>(sHugeObjectSpace_->Allocate(size));
if (UNLIKELY(object == nullptr)) {
CollectGarbage(TriggerGCType::SHARED_GC, GCReason::ALLOCATION_LIMIT);
object = reinterpret_cast<TaggedObject *>(sHugeObjectSpace_->ConcurrentAllocate(size));
CollectGarbage(thread, TriggerGCType::SHARED_GC, GCReason::ALLOCATION_LIMIT);
object = reinterpret_cast<TaggedObject *>(sHugeObjectSpace_->Allocate(size));
if (UNLIKELY(object == nullptr)) {
// if allocate huge object OOM, temporarily increase space size to avoid vm crash
size_t oomOvershootSize = config_.GetOutOfMemoryOvershootSize();
sOldSpace_->IncreaseOutOfMemoryOvershootSize(oomOvershootSize);
object = reinterpret_cast<TaggedObject *>(sHugeObjectSpace_->ConcurrentAllocate(size));
object = reinterpret_cast<TaggedObject *>(sHugeObjectSpace_->Allocate(size));
// todo(lukai)
// DumpHeapSnapshotBeforeOOM();
ThrowOutOfMemoryError(thread, size, "SharedHeap::AllocateHugeObject");
@ -562,7 +570,7 @@ TaggedObject *SharedHeap::AllocateReadOnlyOrHugeObject(JSThread *thread, JSHClas
if (size > MAX_REGULAR_HEAP_OBJECT_SIZE) {
return AllocateHugeObject(thread, hclass, size);
}
auto object = reinterpret_cast<TaggedObject *>(sReadOnlySpace_->ConcurrentAllocate(size));
auto object = reinterpret_cast<TaggedObject *>(sReadOnlySpace_->Allocate(thread, size));
CHECK_SOBJ_AND_THROW_OOM_ERROR(thread, object, size, sReadOnlySpace_, "SharedHeap::AllocateReadOnlyOrHugeObject");
object->SetClass(thread, hclass);
return object;

View File

@ -19,6 +19,8 @@
#include <thread>
#include "ecmascript/base/block_hook_scope.h"
#include "ecmascript/checkpoint/thread_state_transition.h"
#include "ecmascript/ecma_string_table.h"
#include "ecmascript/ecma_vm.h"
#include "ecmascript/free_object.h"
#include "ecmascript/js_finalization_registry.h"
@ -31,15 +33,16 @@
#include "ecmascript/mem/incremental_marker.h"
#include "ecmascript/mem/mark_stack.h"
#include "ecmascript/mem/mem_controller.h"
#include "ecmascript/mem/partial_gc.h"
#include "ecmascript/mem/native_area_allocator.h"
#include "ecmascript/mem/partial_gc.h"
#include "ecmascript/mem/parallel_evacuator.h"
#include "ecmascript/mem/parallel_marker-inl.h"
#include "ecmascript/mem/shared_heap/shared_concurrent_sweeper.h"
#include "ecmascript/mem/shared_heap/share_gc.h"
#include "ecmascript/mem/stw_young_gc.h"
#include "ecmascript/mem/verification.h"
#include "ecmascript/mem/work_manager.h"
#include "ecmascript/mem/gc_stats.h"
#include "ecmascript/ecma_string_table.h"
#include "ecmascript/runtime_call_id.h"
#if !WIN_OR_MAC_OR_IOS_PLATFORM
#include "ecmascript/dfx/hprof/heap_profiler_interface.h"
@ -54,40 +57,124 @@
#endif
namespace panda::ecmascript {
bool SharedHeap::CheckAndTriggerOldGC(size_t size)
bool SharedHeap::CheckAndTriggerOldGC(JSThread *thread, size_t size)
{
if ((OldSpaceExceedLimit() || OldSpaceExceedCapacity(size) ||
GetHeapObjectSize() > globalSpaceAllocLimit_ ) && !NeedStopCollection()) {
CollectGarbage(TriggerGCType::SHARED_GC, GCReason::ALLOCATION_LIMIT);
CollectGarbage(thread, TriggerGCType::SHARED_GC, GCReason::ALLOCATION_LIMIT);
return true;
}
return false;
}
void SharedHeap::Initialize(NativeAreaAllocator *nativeAreaAllocator, HeapRegionAllocator *heapRegionAllocator)
void SharedHeap::Initialize(NativeAreaAllocator *nativeAreaAllocator, HeapRegionAllocator *heapRegionAllocator,
const JSRuntimeOptions &option)
{
nativeAreaAllocator_ = nativeAreaAllocator;
heapRegionAllocator_ = heapRegionAllocator;
parallelGC_ = option.EnableParallelGC();
size_t maxHeapSize = config_.GetMaxHeapSize();
size_t nonmovableSpaceCapacity = config_.GetDefaultNonMovableSpaceSize();
sNonMovableSpace_ = new NonMovableSpace(this, nonmovableSpaceCapacity, nonmovableSpaceCapacity,
MemSpaceType::SHARED_NON_MOVABLE);
sNonMovableSpace_ = new SharedNonMovableSpace(this, nonmovableSpaceCapacity, nonmovableSpaceCapacity);
sNonMovableSpace_->Initialize();
size_t oldSpaceCapacity = maxHeapSize - nonmovableSpaceCapacity;
globalSpaceAllocLimit_ = maxHeapSize;
sOldSpace_ = new OldSpace(this, oldSpaceCapacity, oldSpaceCapacity, MemSpaceType::SHARED_OLD_SPACE);
sOldSpace_ = new SharedOldSpace(this, oldSpaceCapacity, oldSpaceCapacity);
sOldSpace_->Initialize();
size_t readOnlySpaceCapacity = config_.GetDefaultReadOnlySpaceSize();
sReadOnlySpace_ = new ReadOnlySpace(this, readOnlySpaceCapacity, readOnlySpaceCapacity,
MemSpaceType::SHARED_READ_ONLY_SPACE);
sReadOnlySpace_ = new SharedReadOnlySpace(this, readOnlySpaceCapacity, readOnlySpaceCapacity);
sHugeObjectSpace_ = new HugeObjectSpace(this, heapRegionAllocator_, oldSpaceCapacity, oldSpaceCapacity,
MemSpaceType::SHARED_HUGE_OBJECT_SPACE);
}
void SharedHeap::PostInitialization(const GlobalEnvConstants *globalEnvConstants, const JSRuntimeOptions &option)
{
globalEnvConstants_ = globalEnvConstants;
uint32_t totalThreadNum = Taskpool::GetCurrentTaskpool()->GetTotalThreadNum();
maxMarkTaskCount_ = totalThreadNum - 1;
sWorkManager_ = new ShareGCWorkManager(this, totalThreadNum + 1);
shareGCMarker_ = new ShareGCMarker(sWorkManager_);
sSweeper_ = new SharedConcurrentSweeper(this, option.EnableConcurrentSweep() ?
EnableConcurrentSweepType::ENABLE : EnableConcurrentSweepType::CONFIG_DISABLE);
shareGC_ = new ShareGC(this);
}
void SharedHeap::PostGCMarkingTask()
{
IncreaseTaskCount();
Taskpool::GetCurrentTaskpool()->PostTask(std::make_unique<ParallelMarkTask>(-1, this));
}
bool SharedHeap::ParallelMarkTask::Run(uint32_t threadIndex)
{
// Synchronizes-with. Ensure that WorkManager::Initialize must be seen by MarkerThreads.
while (!sHeap_->GetWorkManager()->HasInitialized());
sHeap_->GetShareGCMarker()->ProcessMarkStack(threadIndex);
sHeap_->ReduceTaskCount();
return true;
}
bool SharedHeap::AsyncClearTask::Run([[maybe_unused]] uint32_t threadIndex)
{
sHeap_->ReclaimRegions();
return true;
}
void SharedHeap::CollectGarbage(JSThread *thread, [[maybe_unused]]TriggerGCType gcType, [[maybe_unused]]GCReason reason)
{
ASSERT(gcType == TriggerGCType::SHARED_GC);
CHECK_NO_GC
Prepare();
SuspendAllScope scope(thread);
shareGC_->RunPhases();
// Weak node nativeFinalizeCallback would be called after localGC
}
void SharedHeap::Prepare()
{
WaitRunningTaskFinished();
sSweeper_->EnsureAllTaskFinished();
WaitClearTaskFinished();
}
void SharedHeap::PrepareRecordRegionsForReclaim()
{
sOldSpace_->SetRecordRegion();
sNonMovableSpace_->SetRecordRegion();
sHugeObjectSpace_->SetRecordRegion();
}
void SharedHeap::Resume()
{
sHugeObjectSpace_->ReclaimHugeRegion();
PrepareRecordRegionsForReclaim();
if (parallelGC_) {
clearTaskFinished_ = false;
Taskpool::GetCurrentTaskpool()->PostTask(
std::make_unique<AsyncClearTask>(JSThread::GetCurrentThreadId(), this));
} else {
ReclaimRegions();
}
}
void SharedHeap::ReclaimRegions()
{
sSweeper_->WaitAllTaskFinished();
EnumerateOldSpaceRegionsWithRecord([] (Region *region) {
region->ClearMarkGCBitset();
region->ClearCrossRegionRSet();
region->ResetAliveObject();
});
if (!clearTaskFinished_) {
LockHolder holder(waitClearTaskFinishedMutex_);
clearTaskFinished_ = true;
waitClearTaskFinishedCV_.SignalAll();
}
}
Heap::Heap(EcmaVM *ecmaVm)
: BaseHeap(ecmaVm->GetEcmaParamConfiguration()),
ecmaVm_(ecmaVm), thread_(ecmaVm->GetJSThread()) {}
@ -312,12 +399,6 @@ void Heap::Resume(TriggerGCType gcType)
PrepareRecordRegionsForReclaim();
hugeObjectSpace_->ReclaimHugeRegion();
hugeMachineCodeSpace_->ReclaimHugeRegion();
// todo(lukai) onlyfortest, delete this after all references of sharedobject are in shared space.
SharedHeap::GetInstance()->EnumerateOldSpaceRegions([] (Region *region) {
region->ClearMarkGCBitset();
region->ClearCrossRegionRSet();
region->ResetAliveObject();
});
if (parallelGC_) {
clearTaskFinished_ = false;
Taskpool::GetCurrentTaskpool()->PostTask(
@ -1182,22 +1263,6 @@ void Heap::TriggerConcurrentMarking()
}
}
void Heap::WaitRunningTaskFinished()
{
LockHolder holder(waitTaskFinishedMutex_);
while (runningTaskCount_ > 0) {
waitTaskFinishedCV_.Wait(&waitTaskFinishedMutex_);
}
}
void Heap::WaitClearTaskFinished()
{
LockHolder holder(waitClearTaskFinishedMutex_);
while (!clearTaskFinished_) {
waitClearTaskFinishedCV_.Wait(&waitClearTaskFinishedMutex_);
}
}
void Heap::WaitAllTasksFinished()
{
WaitRunningTaskFinished();
@ -1220,12 +1285,6 @@ void Heap::PostParallelGCTask(ParallelGCTaskPhase gcTask)
std::make_unique<ParallelGCTask>(GetJSThread()->GetThreadId(), this, gcTask));
}
void Heap::IncreaseTaskCount()
{
LockHolder holder(waitTaskFinishedMutex_);
runningTaskCount_++;
}
void Heap::ChangeGCParams(bool inBackground)
{
inBackground_ = inBackground;
@ -1418,21 +1477,6 @@ bool Heap::NeedStopCollection()
return false;
}
bool Heap::CheckCanDistributeTask()
{
LockHolder holder(waitTaskFinishedMutex_);
return runningTaskCount_ < maxMarkTaskCount_;
}
void Heap::ReduceTaskCount()
{
LockHolder holder(waitTaskFinishedMutex_);
runningTaskCount_--;
if (runningTaskCount_ == 0) {
waitTaskFinishedCV_.SignalAll();
}
}
bool Heap::ParallelGCTask::Run(uint32_t threadIndex)
{
// Synchronizes-with. Ensure that WorkManager::Initialize must be seen by MarkerThreads.
@ -1673,4 +1717,41 @@ std::tuple<uint64_t, uint8_t *, int, kungfu::CalleeRegAndOffsetVec> Heap::CalCal
}
return code->CalCallSiteInfo(retAddr);
};
void BaseHeap::IncreaseTaskCount()
{
LockHolder holder(waitTaskFinishedMutex_);
runningTaskCount_++;
}
void BaseHeap::WaitRunningTaskFinished()
{
LockHolder holder(waitTaskFinishedMutex_);
while (runningTaskCount_ > 0) {
waitTaskFinishedCV_.Wait(&waitTaskFinishedMutex_);
}
}
bool BaseHeap::CheckCanDistributeTask()
{
LockHolder holder(waitTaskFinishedMutex_);
return runningTaskCount_ < maxMarkTaskCount_;
}
void BaseHeap::ReduceTaskCount()
{
LockHolder holder(waitTaskFinishedMutex_);
runningTaskCount_--;
if (runningTaskCount_ == 0) {
waitTaskFinishedCV_.SignalAll();
}
}
void BaseHeap::WaitClearTaskFinished()
{
LockHolder holder(waitClearTaskFinishedMutex_);
while (!clearTaskFinished_) {
waitClearTaskFinishedCV_.Wait(&waitClearTaskFinishedMutex_);
}
}
} // namespace panda::ecmascript

View File

@ -21,6 +21,7 @@
#include "ecmascript/js_thread.h"
#include "ecmascript/mem/linear_space.h"
#include "ecmascript/mem/mark_stack.h"
#include "ecmascript/mem/shared_heap/shared_space.h"
#include "ecmascript/mem/sparse_space.h"
#include "ecmascript/mem/work_manager.h"
#include "ecmascript/taskpool/taskpool.h"
@ -44,6 +45,9 @@ class MemController;
class NativeAreaAllocator;
class ParallelEvacuator;
class PartialGC;
class SharedConcurrentSweeper;
class ShareGC;
class ShareGCMarker;
class STWYoungGC;
using IdleNotifyStatusCallback = std::function<void(bool)>;
@ -91,55 +95,22 @@ public:
virtual bool NeedStopCollection() = 0;
virtual bool CheckAndTriggerOldGC(size_t size = 0) = 0;
virtual bool IsEmptyIdleTask() = 0;
virtual size_t CalculateLinearSpaceOverShoot() = 0;
virtual void TryTriggerIncrementalMarking() = 0;
virtual void TryTriggerIdleCollection() = 0;
virtual void TryTriggerConcurrentMarking() = 0;
virtual bool OldSpaceExceedCapacity(size_t size) const = 0;
virtual bool OldSpaceExceedLimit() const = 0;
virtual ConcurrentSweeper *GetSweeper() const = 0;
virtual OldSpace *GetOldSpace() const = 0;
virtual NonMovableSpace *GetNonMovableSpace() const = 0;
virtual HugeObjectSpace *GetHugeObjectSpace() const = 0;
virtual ReadOnlySpace *GetReadOnlySpace() const = 0;
virtual void CollectGarbage(TriggerGCType gcType, GCReason reason) = 0;
virtual inline size_t GetCommittedSize() const = 0;
virtual inline size_t GetHeapObjectSize() const = 0;
virtual size_t GetRegionCachedSize() const = 0;
virtual void ChangeGCParams(bool inBackground) = 0;
virtual const GlobalEnvConstants *GetGlobalConst() const = 0;
virtual JSObjectResizingStrategy *GetJSObjectResizingStrategy() = 0;
virtual GCStats *GetEcmaGCStats() = 0;
MemController *GetMemController() const
{
return memController_;
}
/*
* Functions invoked during GC.
*/
void SetMarkType(MarkType markType)
{
markType_ = markType;
@ -200,17 +171,22 @@ public:
return heapAliveSizeAfterGC_;
}
uint32_t GetMaxMarkTaskCount() const
{
return maxMarkTaskCount_;
}
bool CheckCanDistributeTask();
void IncreaseTaskCount();
void ReduceTaskCount();
void WaitRunningTaskFinished();
void WaitClearTaskFinished();
protected:
void ThrowOutOfMemoryError(JSThread *thread, size_t size, std::string functionName,
bool NonMovableObjNearOOM = false);
void FatalOutOfMemoryError(size_t size, std::string functionName);
const EcmaParamConfiguration config_;
/*
* The memory controller providing memory statistics (by allocations and coleections),
* which is used for GC heuristics.
*/
MemController *memController_ {nullptr};
MarkType markType_ {MarkType::MARK_YOUNG};
// Region allocators.
NativeAreaAllocator *nativeAreaAllocator_ {nullptr};
@ -218,6 +194,14 @@ protected:
size_t heapAliveSizeAfterGC_ {0};
size_t globalSpaceAllocLimit_ {0};
// parallel marker task count.
uint32_t runningTaskCount_ {0};
uint32_t maxMarkTaskCount_ {0};
Mutex waitTaskFinishedMutex_;
ConditionVariable waitTaskFinishedCV_;
Mutex waitClearTaskFinishedMutex_;
ConditionVariable waitClearTaskFinishedCV_;
bool clearTaskFinished_ {true};
bool inBackground_ {false};
bool shouldThrowOOMError_ {false};
bool oldGCRequested_ {false};
@ -229,7 +213,6 @@ public:
SharedHeap(const EcmaParamConfiguration &config) : BaseHeap(config) {}
virtual ~SharedHeap() = default;
// todo(lukai) SharedHeap should be initialized in GlobalRuntime initialize
static SharedHeap* GetInstance()
{
EcmaParamConfiguration config(false, DEFAULT_HEAP_SIZE);
@ -237,8 +220,37 @@ public:
return shareHeap;
}
void Initialize(NativeAreaAllocator *nativeAreaAllocator, HeapRegionAllocator *heapRegionAllocator);
void Initialize(NativeAreaAllocator *nativeAreaAllocator, HeapRegionAllocator *heapRegionAllocator,
const JSRuntimeOptions &option);
void PostInitialization(const GlobalEnvConstants *globalEnvConstants, const JSRuntimeOptions &option);
class ParallelMarkTask : public Task {
public:
ParallelMarkTask(int32_t id, SharedHeap *heap)
: Task(id), sHeap_(heap) {};
~ParallelMarkTask() override = default;
bool Run(uint32_t threadIndex) override;
NO_COPY_SEMANTIC(ParallelMarkTask);
NO_MOVE_SEMANTIC(ParallelMarkTask);
private:
SharedHeap *sHeap_ {nullptr};
};
class AsyncClearTask : public Task {
public:
AsyncClearTask(int32_t id, SharedHeap *heap)
: Task(id), sHeap_(heap) {}
~AsyncClearTask() override = default;
bool Run(uint32_t threadIndex) override;
NO_COPY_SEMANTIC(AsyncClearTask);
NO_MOVE_SEMANTIC(AsyncClearTask);
private:
SharedHeap *sHeap_;
};
bool IsMarking() const override
{
LOG_FULL(ERROR) << "SharedHeap IsMarking() not support yet";
@ -247,8 +259,7 @@ public:
bool IsReadyToMark() const override
{
LOG_FULL(ERROR) << "SharedHeap IsReadyToMark() not support yet";
return false;
return true;
}
bool NeedStopCollection() override
@ -257,30 +268,7 @@ public:
return onSerializeEvent_;
}
bool CheckAndTriggerOldGC(size_t size = 0) override;
bool IsEmptyIdleTask() override
{
LOG_FULL(ERROR) << "SharedHeap IsEmptyIdleTask() not support yet";
return true;
}
size_t CalculateLinearSpaceOverShoot() override
{
LOG_FULL(ERROR) << "SharedHeap CalculateLinearSpaceOverShoot() not support yet";
return 0;
}
void TryTriggerIncrementalMarking() override
{
LOG_FULL(ERROR) << "SharedHeap TryTriggerIncrementalMarking() not support yet";
return;
}
void TryTriggerIdleCollection() override
{
LOG_FULL(ERROR) << "SharedHeap TryTriggerIdleCollection() not support yet";
return;
}
bool CheckAndTriggerOldGC(JSThread *thread, size_t size = 0);
void TryTriggerConcurrentMarking() override
{
@ -291,45 +279,50 @@ public:
bool OldSpaceExceedCapacity(size_t size) const override
{
size_t totalSize = sOldSpace_->GetCommittedSize() + sHugeObjectSpace_->GetCommittedSize() + size;
return totalSize >= sOldSpace_->GetMaximumCapacity() + sOldSpace_->GetOvershootSize() +
sOldSpace_->GetOutOfMemoryOvershootSize();
return totalSize >= sOldSpace_->GetMaximumCapacity() + sOldSpace_->GetOutOfMemoryOvershootSize();
}
bool OldSpaceExceedLimit() const override
{
size_t totalSize = sOldSpace_->GetHeapObjectSize() + sHugeObjectSpace_->GetHeapObjectSize();
return totalSize >= sOldSpace_->GetInitialCapacity() + sOldSpace_->GetOvershootSize();
return totalSize >= sOldSpace_->GetInitialCapacity();
}
ConcurrentSweeper *GetSweeper() const override
SharedConcurrentSweeper *GetSweeper() const
{
LOG_FULL(FATAL) << "SharedHeap ChangeGCParams() not support yet";
return nullptr;
return sSweeper_;
}
OldSpace *GetOldSpace() const override
bool IsParallelGCEnabled() const
{
return parallelGC_;
}
SharedOldSpace *GetOldSpace() const
{
return sOldSpace_;
}
NonMovableSpace *GetNonMovableSpace() const override
SharedNonMovableSpace *GetNonMovableSpace() const
{
return sNonMovableSpace_;
}
HugeObjectSpace *GetHugeObjectSpace() const override
HugeObjectSpace *GetHugeObjectSpace() const
{
return sHugeObjectSpace_;
}
ReadOnlySpace *GetReadOnlySpace() const override
SharedReadOnlySpace *GetReadOnlySpace() const
{
return sReadOnlySpace_;
}
void CollectGarbage([[maybe_unused]]TriggerGCType gcType, [[maybe_unused]]GCReason reason) override
void CollectGarbage(JSThread *thread, TriggerGCType gcType, GCReason reason);
void SetMaxMarkTaskCount(uint32_t maxTaskCount)
{
LOG_FULL(ERROR) << "SharedHeap CollectGarbage() not support yet";
maxMarkTaskCount_ = maxTaskCount;
}
inline size_t GetCommittedSize() const override
@ -350,12 +343,6 @@ public:
return result;
}
size_t GetRegionCachedSize() const override
{
LOG_FULL(ERROR) << "SharedHeap GetRegionCachedSize() not support yet";
return 0;
}
void ChangeGCParams([[maybe_unused]]bool inBackground) override
{
LOG_FULL(ERROR) << "SharedHeap ChangeGCParams() not support yet";
@ -368,12 +355,6 @@ public:
return nullptr;
}
JSObjectResizingStrategy *GetJSObjectResizingStrategy() override
{
LOG_FULL(ERROR) << "SharedHeap GetJSObjectResizingStrategy() not support yet";
return nullptr;
}
inline void SetGlobalEnvConstants(const GlobalEnvConstants *globalEnvConstants)
{
globalEnvConstants_ = globalEnvConstants;
@ -384,9 +365,43 @@ public:
return globalEnvConstants_;
}
SharedSparseSpace *GetSpaceWithType(MemSpaceType type) const
{
switch (type) {
case MemSpaceType::SHARED_OLD_SPACE:
return sOldSpace_;
case MemSpaceType::SHARED_NON_MOVABLE:
return sNonMovableSpace_;
default:
LOG_ECMA(FATAL) << "this branch is unreachable";
UNREACHABLE();
break;
}
}
void Prepare();
void Resume();
void ReclaimRegions();
void PostGCMarkingTask();
ShareGCWorkManager *GetWorkManager() const
{
return sWorkManager_;
}
ShareGCMarker *GetShareGCMarker() const
{
return shareGCMarker_;
}
void PrepareRecordRegionsForReclaim();
template<class Callback>
void EnumerateOldSpaceRegions(const Callback &cb) const;
template<class Callback>
void EnumerateOldSpaceRegionsWithRecord(const Callback &cb) const;
inline TaggedObject *AllocateClassClass(JSHClass *hclass, size_t size);
inline TaggedObject *AllocateNonMovableOrHugeObject(JSThread *thread, JSHClass *hclass);
@ -404,13 +419,19 @@ public:
inline TaggedObject *AllocateHugeObject(JSThread *thread, size_t size);
inline TaggedObject *AllocateReadOnlyOrHugeObject(JSThread *thread, JSHClass *hclass);
inline TaggedObject *AllocateReadOnlyOrHugeObject(JSThread *thread, JSHClass *hclass, size_t size);
private:
bool parallelGC_ {true};
const GlobalEnvConstants *globalEnvConstants_ {nullptr};
OldSpace *sOldSpace_ {nullptr};
NonMovableSpace *sNonMovableSpace_ {nullptr};
ReadOnlySpace *sReadOnlySpace_{nullptr};
SharedOldSpace *sOldSpace_ {nullptr};
SharedNonMovableSpace *sNonMovableSpace_ {nullptr};
SharedReadOnlySpace *sReadOnlySpace_ {nullptr};
HugeObjectSpace *sHugeObjectSpace_ {nullptr};
ShareGCWorkManager *sWorkManager_ {nullptr};
SharedConcurrentSweeper *sSweeper_ {nullptr};
ShareGC *shareGC_ {nullptr};
ShareGCMarker *shareGCMarker_ {nullptr};
};
class Heap : public BaseHeap {
@ -445,17 +466,17 @@ public:
return inactiveSemiSpace_;
}
OldSpace *GetOldSpace() const override
OldSpace *GetOldSpace() const
{
return oldSpace_;
}
NonMovableSpace *GetNonMovableSpace() const override
NonMovableSpace *GetNonMovableSpace() const
{
return nonMovableSpace_;
}
HugeObjectSpace *GetHugeObjectSpace() const override
HugeObjectSpace *GetHugeObjectSpace() const
{
return hugeObjectSpace_;
}
@ -475,7 +496,7 @@ public:
return snapshotSpace_;
}
ReadOnlySpace *GetReadOnlySpace() const override
ReadOnlySpace *GetReadOnlySpace() const
{
return readOnlySpace_;
}
@ -516,7 +537,7 @@ public:
return fullGC_;
}
ConcurrentSweeper *GetSweeper() const override
ConcurrentSweeper *GetSweeper() const
{
return sweeper_;
}
@ -604,6 +625,11 @@ public:
LOG_GC(INFO) << "SmartGC: enter app cold start";
}
MemController *GetMemController() const
{
return memController_;
}
/*
* For object allocations.
*/
@ -635,8 +661,8 @@ public:
/*
* GC triggers.
*/
void CollectGarbage(TriggerGCType gcType, GCReason reason = GCReason::OTHER) override;
bool CheckAndTriggerOldGC(size_t size = 0) override;
void CollectGarbage(TriggerGCType gcType, GCReason reason = GCReason::OTHER);
bool CheckAndTriggerOldGC(size_t size = 0);
bool CheckAndTriggerHintGC();
TriggerGCType SelectGCType() const;
/*
@ -653,21 +679,18 @@ public:
GCStats *GetEcmaGCStats() override;
JSObjectResizingStrategy *GetJSObjectResizingStrategy() override;
JSObjectResizingStrategy *GetJSObjectResizingStrategy();
void TriggerIdleCollection(int idleMicroSec);
void NotifyMemoryPressure(bool inHighMemoryPressure);
bool CheckCanDistributeTask();
void WaitRunningTaskFinished();
void TryTriggerConcurrentMarking() override;
void AdjustBySurvivalRate(size_t originalNewSpaceSize);
void TriggerConcurrentMarking();
bool CheckCanTriggerConcurrentMarking();
void TryTriggerIdleCollection() override;
void TryTriggerIncrementalMarking() override;
void TryTriggerIdleCollection();
void TryTriggerIncrementalMarking();
void CalculateIdleDuration();
void UpdateWorkManager(WorkManager *workManager);
/*
@ -718,7 +741,7 @@ public:
memGrowingtype_ = memGrowingType;
}
size_t CalculateLinearSpaceOverShoot() override
size_t CalculateLinearSpaceOverShoot()
{
return oldSpace_->GetMaximumCapacity() - oldSpace_->GetInitialCapacity();
}
@ -727,7 +750,7 @@ public:
inline size_t GetHeapObjectSize() const override;
size_t GetRegionCachedSize() const override
size_t GetRegionCachedSize() const
{
return activeSemiSpace_->GetInitialCapacity();
}
@ -745,11 +768,6 @@ public:
size_t GetHeapLimitSize() const;
uint32_t GetMaxMarkTaskCount() const
{
return maxMarkTaskCount_;
}
uint32_t GetMaxEvacuateTaskCount() const
{
return maxEvacuateTaskCount_;
@ -781,7 +799,7 @@ public:
void ClearIdleTask();
bool IsEmptyIdleTask() override
bool IsEmptyIdleTask()
{
return idleTask_ == IdleTaskType::NO_TASK;
}
@ -929,9 +947,6 @@ private:
void AdjustOldSpaceLimit();
// record lastRegion for each space, which will be used in ReclaimRegions()
void PrepareRecordRegionsForReclaim();
void IncreaseTaskCount();
void ReduceTaskCount();
void WaitClearTaskFinished();
void InvokeWeakNodeNativeFinalizeCallback();
void DumpHeapSnapshotBeforeOOM(bool isFullGC = true);
inline void ReclaimRegions(TriggerGCType gcType);
@ -1073,6 +1088,11 @@ private:
bool enableIdleGC_ {false};
HeapMode mode_ { HeapMode::NORMAL };
/*
* The memory controller providing memory statistics (by allocations and coleections),
* which is used for GC heuristics.
*/
MemController *memController_ {nullptr};
size_t promotedSize_ {0};
size_t semiSpaceCopiedSize_ {0};
size_t nativeBindingSize_{0};
@ -1080,17 +1100,9 @@ private:
MemGrowingType memGrowingtype_ {MemGrowingType::HIGH_THROUGHPUT};
TriggerGCType gcType_ {TriggerGCType::YOUNG_GC};
bool clearTaskFinished_ {true};
Mutex waitClearTaskFinishedMutex_;
ConditionVariable waitClearTaskFinishedCV_;
uint32_t runningTaskCount_ {0};
// parallel marker task number.
uint32_t maxMarkTaskCount_ {0};
// parallel evacuator task number.
uint32_t maxEvacuateTaskCount_ {0};
Mutex finishColdStartMutex_;
Mutex waitTaskFinishedMutex_;
ConditionVariable waitTaskFinishedCV_;
// Application status

View File

@ -23,8 +23,9 @@
#include "ecmascript/mem/mem_controller.h"
namespace panda::ecmascript {
LinearSpace::LinearSpace(BaseHeap *heap, MemSpaceType type, size_t initialCapacity, size_t maximumCapacity)
LinearSpace::LinearSpace(Heap *heap, MemSpaceType type, size_t initialCapacity, size_t maximumCapacity)
: Space(heap, heap->GetHeapRegionAllocator(), type, initialCapacity, maximumCapacity),
localHeap_(heap),
waterLine_(0)
{
}
@ -42,18 +43,18 @@ uintptr_t LinearSpace::Allocate(size_t size, bool isPromoted)
return object;
}
if (Expand(isPromoted)) {
if (!isPromoted && !heap_->NeedStopCollection()) {
heap_->TryTriggerIncrementalMarking();
heap_->TryTriggerIdleCollection();
heap_->TryTriggerConcurrentMarking();
if (!isPromoted && !localHeap_->NeedStopCollection()) {
localHeap_->TryTriggerIncrementalMarking();
localHeap_->TryTriggerIdleCollection();
localHeap_->TryTriggerConcurrentMarking();
}
object = allocator_.Allocate(size);
} else if (heap_->IsMarking() || !heap_->IsEmptyIdleTask()) {
} else if (localHeap_->IsMarking() || !localHeap_->IsEmptyIdleTask()) {
// Temporary adjust semi space capacity
if (heap_->IsFullMark()) {
overShootSize_ = heap_->CalculateLinearSpaceOverShoot();
if (localHeap_->IsFullMark()) {
overShootSize_ = localHeap_->CalculateLinearSpaceOverShoot();
} else {
size_t stepOverShootSize = heap_->GetEcmaParamConfiguration().GetSemiSpaceStepOvershootSize();
size_t stepOverShootSize = localHeap_->GetEcmaParamConfiguration().GetSemiSpaceStepOvershootSize();
size_t maxOverShootSize = std::max(initialCapacity_ / 2, stepOverShootSize); // 2: half
if (overShootSize_ < maxOverShootSize) {
overShootSize_ += stepOverShootSize;
@ -75,7 +76,7 @@ uintptr_t LinearSpace::Allocate(size_t size, bool isPromoted)
bool LinearSpace::Expand(bool isPromoted)
{
if (committedSize_ >= initialCapacity_ + overShootSize_ + outOfMemoryOvershootSize_ &&
!heap_->NeedStopCollection()) {
!localHeap_->NeedStopCollection()) {
return false;
}
@ -95,7 +96,7 @@ bool LinearSpace::Expand(bool isPromoted)
}
currentRegion->SetHighWaterMark(top);
}
Region *region = heapRegionAllocator_->AllocateAlignedRegion(this, DEFAULT_REGION_SIZE, heap_);
Region *region = heapRegionAllocator_->AllocateAlignedRegion(this, DEFAULT_REGION_SIZE, localHeap_);
allocator_.Reset(region->GetBegin(), region->GetEnd());
AddRegion(region);
@ -163,13 +164,13 @@ void LinearSpace::InvokeAllocationInspector(Address object, size_t size, size_t
allocationCounter_.AdvanceAllocationInspector(alignedSize);
}
SemiSpace::SemiSpace(BaseHeap *heap, size_t initialCapacity, size_t maximumCapacity)
SemiSpace::SemiSpace(Heap *heap, size_t initialCapacity, size_t maximumCapacity)
: LinearSpace(heap, MemSpaceType::SEMI_SPACE, initialCapacity, maximumCapacity),
minimumCapacity_(initialCapacity) {}
void SemiSpace::Initialize()
{
Region *region = heapRegionAllocator_->AllocateAlignedRegion(this, DEFAULT_REGION_SIZE, heap_);
Region *region = heapRegionAllocator_->AllocateAlignedRegion(this, DEFAULT_REGION_SIZE, localHeap_);
AddRegion(region);
allocator_.Reset(region->GetBegin(), region->GetEnd());
}
@ -271,7 +272,7 @@ bool SemiSpace::AdjustCapacity(size_t allocatedSizeSinceGC, JSThread *thread)
size_t newCapacity = initialCapacity_ * GROWING_FACTOR;
SetInitialCapacity(std::min(newCapacity, maximumCapacity_));
if (newCapacity == maximumCapacity_) {
heap_->GetJSObjectResizingStrategy()->UpdateGrowStep(
localHeap_->GetJSObjectResizingStrategy()->UpdateGrowStep(
thread,
JSObjectResizingStrategy::PROPERTIES_GROW_SIZE * 2); // 2: double
}
@ -280,13 +281,13 @@ bool SemiSpace::AdjustCapacity(size_t allocatedSizeSinceGC, JSThread *thread)
if (initialCapacity_ <= minimumCapacity_) {
return false;
}
double speed = heap_->GetMemController()->GetNewSpaceAllocationThroughputPerMS();
double speed = localHeap_->GetMemController()->GetNewSpaceAllocationThroughputPerMS();
if (speed > LOW_ALLOCATION_SPEED_PER_MS) {
return false;
}
size_t newCapacity = initialCapacity_ / GROWING_FACTOR;
SetInitialCapacity(std::max(newCapacity, minimumCapacity_));
heap_->GetJSObjectResizingStrategy()->UpdateGrowStep(thread);
localHeap_->GetJSObjectResizingStrategy()->UpdateGrowStep(thread);
return true;
}
return false;
@ -305,22 +306,9 @@ size_t SemiSpace::GetAllocatedSizeSinceGC(uintptr_t top) const
return allocateAfterLastGC_ + currentRegionSize;
}
SnapshotSpace::SnapshotSpace(BaseHeap *heap, size_t initialCapacity, size_t maximumCapacity)
SnapshotSpace::SnapshotSpace(Heap *heap, size_t initialCapacity, size_t maximumCapacity)
: LinearSpace(heap, MemSpaceType::SNAPSHOT_SPACE, initialCapacity, maximumCapacity) {}
ReadOnlySpace::ReadOnlySpace(BaseHeap *heap, size_t initialCapacity, size_t maximumCapacity, MemSpaceType type)
ReadOnlySpace::ReadOnlySpace(Heap *heap, size_t initialCapacity, size_t maximumCapacity, MemSpaceType type)
: LinearSpace(heap, type, initialCapacity, maximumCapacity) {}
uintptr_t ReadOnlySpace::ConcurrentAllocate(size_t size)
{
LockHolder holder(allocateLock_);
auto object = allocator_.Allocate(size);
if (object != 0) {
return object;
}
if (Expand(false)) {
object = allocator_.Allocate(size);
}
return object;
}
} // namespace panda::ecmascript

View File

@ -21,7 +21,7 @@
namespace panda::ecmascript {
class LinearSpace : public Space {
public:
LinearSpace(BaseHeap *heap, MemSpaceType type, size_t initialCapacity, size_t maximumCapacity);
LinearSpace(Heap *heap, MemSpaceType type, size_t initialCapacity, size_t maximumCapacity);
NO_COPY_SEMANTIC(LinearSpace);
NO_MOVE_SEMANTIC(LinearSpace);
uintptr_t Allocate(size_t size, bool isPromoted = false);
@ -45,6 +45,7 @@ public:
void InvokeAllocationInspector(Address object, size_t size, size_t alignedSize);
protected:
Heap *localHeap_;
BumpPointerAllocator allocator_;
size_t overShootSize_ {0};
size_t allocateAfterLastGC_ {0};
@ -54,7 +55,7 @@ protected:
class SemiSpace : public LinearSpace {
public:
SemiSpace(BaseHeap *heap, size_t initialCapacity, size_t maximumCapacity);
SemiSpace(Heap *heap, size_t initialCapacity, size_t maximumCapacity);
~SemiSpace() override = default;
NO_COPY_SEMANTIC(SemiSpace);
NO_MOVE_SEMANTIC(SemiSpace);
@ -91,7 +92,7 @@ private:
class SnapshotSpace : public LinearSpace {
public:
SnapshotSpace(BaseHeap *heap, size_t initialCapacity, size_t maximumCapacity);
SnapshotSpace(Heap *heap, size_t initialCapacity, size_t maximumCapacity);
~SnapshotSpace() override = default;
NO_COPY_SEMANTIC(SnapshotSpace);
NO_MOVE_SEMANTIC(SnapshotSpace);
@ -112,7 +113,7 @@ private:
class ReadOnlySpace : public LinearSpace {
public:
ReadOnlySpace(BaseHeap *heap, size_t initialCapacity, size_t maximumCapacity,
ReadOnlySpace(Heap *heap, size_t initialCapacity, size_t maximumCapacity,
MemSpaceType type = MemSpaceType::READ_ONLY_SPACE);
~ReadOnlySpace() override = default;
void SetReadOnly()
@ -131,13 +132,8 @@ public:
EnumerateRegions(cb);
}
uintptr_t ConcurrentAllocate(size_t size);
NO_COPY_SEMANTIC(ReadOnlySpace);
NO_MOVE_SEMANTIC(ReadOnlySpace);
private:
Mutex allocateLock_;
};
} // namespace panda::ecmascript
#endif // ECMASCRIPT_MEM_LINEAR_SPACE_H

View File

@ -97,14 +97,12 @@ template<class T>
class ContinuousStack : public Stack {
public:
ContinuousStack() = default;
explicit ContinuousStack(Heap *heap) : heap_(heap) {}
~ContinuousStack() override = default;
NO_COPY_SEMANTIC(ContinuousStack);
NO_MOVE_SEMANTIC(ContinuousStack);
inline void BeginMarking(Heap *heap, ContinuousStack<T> *other)
inline void BeginMarking(ContinuousStack<T> *other)
{
heap_ = heap;
currentArea_ = other->currentArea_;
if (currentArea_ == nullptr) {
currentArea_ = NativeAreaAllocator::AllocateSpace(DEFAULT_MARK_STACK_SIZE);
@ -161,7 +159,6 @@ private:
ResetBegin(currentArea_->GetBegin(), currentArea_->GetEnd());
}
Heap *heap_ {nullptr};
Area *currentArea_ {nullptr};
EcmaList<Area> areaList_ {};
EcmaList<Area> unusedList_ {};

View File

@ -98,6 +98,9 @@ void ParallelEvacuator::UpdateObjectSlot(ObjectSlot &slot)
{
JSTaggedValue value(slot.GetTaggedType());
if (value.IsHeapObject()) {
if (value.IsInSharedHeap()) {
return;
}
if (value.IsWeakForHeapObject()) {
return UpdateWeakObjectSlot(value.GetTaggedWeakRef(), slot);
}
@ -129,9 +132,12 @@ void ParallelEvacuator::UpdateWeakObjectSlot(TaggedObject *value, ObjectSlot &sl
slot.Clear();
}
return;
} else if (objectRegion->InSharedHeap()) {
return;
}
if (heap_->IsFullMark()) {
ASSERT(!objectRegion->InSharedHeap());
if (!objectRegion->Test(value)) {
slot.Clear();
}
@ -141,7 +147,7 @@ void ParallelEvacuator::UpdateWeakObjectSlot(TaggedObject *value, ObjectSlot &sl
void ParallelEvacuator::UpdateLocalToShareRSet(TaggedObject *object, JSHClass *cls)
{
Region *region = Region::ObjectAddressToRange(object);
ASSERT(!region->InSharedSpace());
ASSERT(!region->InSharedHeap());
auto callbackWithCSet = [this, region](TaggedObject *root, ObjectSlot start, ObjectSlot end, VisitObjectArea area) {
if (area == VisitObjectArea::IN_OBJECT) {
if (VisitBodyInObj(root, start, end, [&](ObjectSlot slot) { SetLocalToShareRSet(slot, region); })) {
@ -157,13 +163,13 @@ void ParallelEvacuator::UpdateLocalToShareRSet(TaggedObject *object, JSHClass *c
void ParallelEvacuator::SetLocalToShareRSet(ObjectSlot slot, Region *region)
{
ASSERT(!region->InSharedSpace());
ASSERT(!region->InSharedHeap());
JSTaggedType value = slot.GetTaggedType();
if (!JSTaggedValue(value).IsHeapObject()) {
return;
}
Region *valueRegion = Region::ObjectAddressToRange(value);
if (valueRegion->InSharedSpace()) {
if (valueRegion->InSharedSweepableSpace()) {
region->InsertLocalToShareRset(slot.SlotAddress());
}
}

View File

@ -205,6 +205,9 @@ void ParallelEvacuator::VerifyValue(TaggedObject *object, ObjectSlot slot)
return;
}
Region *objectRegion = Region::ObjectAddressToRange(value.GetTaggedObject());
if (objectRegion->InSharedHeap()) {
return;
}
if (!heap_->IsFullMark() && !objectRegion->InYoungSpace()) {
return;
}
@ -243,10 +246,6 @@ void ParallelEvacuator::UpdateReference()
heap_->EnumerateSnapshotSpaceRegions([this] (Region *current) {
AddWorkload(std::make_unique<UpdateRSetWorkload>(this, current));
});
// todo(lukai) onlyfortest, delete this after all references of sharedobject are in shared space.
SharedHeap::GetInstance()->EnumerateOldSpaceRegions([this] (Region *current) {
AddWorkload(std::make_unique<UpdateRSetWorkload>(this, current));
});
LOG_GC(DEBUG) << "UpdatePointers statistic: younge space region compact moving count:"
<< youngeRegionMoveCount
<< "younge space region compact coping count:" << youngeRegionCopyCount
@ -322,6 +321,9 @@ void ParallelEvacuator::UpdateWeakReference()
LOG_GC(ERROR) << "PartialGC updateWeakReference: region is nullptr, header is " << header;
return reinterpret_cast<TaggedObject *>(ToUintPtr(nullptr));
}
if (objectRegion->InSharedHeap()) {
return header;
}
if (objectRegion->InYoungSpaceOrCSet()) {
if (objectRegion->InNewToNewSet()) {
if (objectRegion->Test(header)) {

View File

@ -72,7 +72,8 @@ inline void NonMovableMarker::MarkObject(uint32_t threadId, TaggedObject *object
{
Region *objectRegion = Region::ObjectAddressToRange(object);
if (!heap_->IsFullMark() && !objectRegion->InYoungSpace()) {
if ((!heap_->IsFullMark() && !objectRegion->InYoungSpace()) ||
objectRegion->InSharedHeap()) {
return;
}
@ -262,7 +263,7 @@ inline bool MovableMarker::UpdateForwardAddressIfFailed(TaggedObject *object, ui
void MovableMarker::UpdateLocalToShareRSet(TaggedObject *object, JSHClass *cls)
{
Region *region = Region::ObjectAddressToRange(object);
ASSERT(!region->InSharedSpace());
ASSERT(!region->InSharedHeap());
auto callbackWithCSet = [this, region](TaggedObject *root, ObjectSlot start, ObjectSlot end, VisitObjectArea area) {
if (area == VisitObjectArea::IN_OBJECT) {
if (VisitBodyInObj(root, start, end,
@ -281,13 +282,13 @@ void MovableMarker::UpdateLocalToShareRSet(TaggedObject *object, JSHClass *cls)
void MovableMarker::SetLocalToShareRSet(ObjectSlot slot, Region *region)
{
ASSERT(!region->InSharedSpace());
ASSERT(!region->InSharedHeap());
JSTaggedType value = slot.GetTaggedType();
if (!JSTaggedValue(value).IsHeapObject()) {
return;
}
Region *valueRegion = Region::ObjectAddressToRange(value);
if (valueRegion->InSharedSpace()) {
if (valueRegion->InSharedSweepableSpace()) {
region->InsertLocalToShareRset(slot.SlotAddress());
}
}
@ -377,7 +378,7 @@ inline SlotStatus CompressGCMarker::MarkObject(uint32_t threadId, TaggedObject *
{
Region *objectRegion = Region::ObjectAddressToRange(object);
if (!NeedEvacuate(objectRegion)) {
if (objectRegion->AtomicMark(object)) {
if (!objectRegion->InSharedHeap() && objectRegion->AtomicMark(object)) {
workManager_->Push(threadId, object);
}
return SlotStatus::CLEAR_SLOT;
@ -450,9 +451,111 @@ inline bool CompressGCMarker::NeedEvacuate(Region *region)
{
if (isAppSpawn_) {
return !region->InHugeObjectSpace() && !region->InReadOnlySpace() && !region->InNonMovableSpace() &&
!region->InSharedSpace();
!region->InSharedHeap();
}
return region->InYoungOrOldSpace();
}
inline void ShareGCMarker::MarkObject(uint32_t threadId, TaggedObject *object)
{
Region *objectRegion = Region::ObjectAddressToRange(object);
ASSERT(objectRegion->InSharedSweepableSpace());
if (objectRegion->AtomicMark(object)) {
sWorkManager_->Push(threadId, object);
}
}
inline void ShareGCMarker::MarkValue(uint32_t threadId, ObjectSlot &slot)
{
JSTaggedValue value(slot.GetTaggedType());
if (value.IsInSharedSweepableSpace()) {
if (!value.IsWeakForHeapObject()) {
MarkObject(threadId, value.GetTaggedObject());
} else {
RecordWeakReference(threadId, reinterpret_cast<JSTaggedType *>(slot.SlotAddress()));
}
}
}
inline void ShareGCMarker::HandleRoots(uint32_t threadId, [[maybe_unused]] Root type, ObjectSlot slot)
{
JSTaggedValue value(slot.GetTaggedType());
if (value.IsInSharedSweepableSpace()) {
MarkObject(threadId, value.GetTaggedObject());
}
}
inline void ShareGCMarker::HandleRangeRoots(uint32_t threadId, [[maybe_unused]] Root type, ObjectSlot start,
ObjectSlot end)
{
for (ObjectSlot slot = start; slot < end; slot++) {
JSTaggedValue value(slot.GetTaggedType());
if (value.IsInSharedSweepableSpace()) {
if (value.IsWeakForHeapObject()) {
LOG_ECMA_MEM(FATAL) << "Weak Reference in ShareGCMarker roots";
}
MarkObject(threadId, value.GetTaggedObject());
}
}
}
inline void ShareGCMarker::HandleDerivedRoots([[maybe_unused]] Root type, [[maybe_unused]] ObjectSlot base,
[[maybe_unused]] ObjectSlot derived,
[[maybe_unused]] uintptr_t baseOldObject)
{
// It is only used to update the derived value. The mark of share GC does not need to update slot
}
template <typename Callback>
ARK_INLINE bool ShareGCMarker::VisitBodyInObj(TaggedObject *root, ObjectSlot start, ObjectSlot end,
Callback callback)
{
auto hclass = root->SynchronizedGetClass();
int index = 0;
auto layout = LayoutInfo::UncheckCast(hclass->GetLayout().GetTaggedObject());
ObjectSlot realEnd = start;
realEnd += layout->GetPropertiesCapacity();
end = end > realEnd ? realEnd : end;
for (ObjectSlot slot = start; slot < end; slot++) {
auto attr = layout->GetAttr(index++);
if (attr.IsTaggedRep()) {
callback(slot);
}
}
return true;
}
inline void ShareGCMarker::ProcessLocalToShare(uint32_t threadId, Heap *localHeap)
{
localHeap->EnumerateRegions(std::bind(&ShareGCMarker::HandleLocalToShareRSet, this, threadId,
std::placeholders::_1));
ProcessMarkStack(threadId);
}
inline void ShareGCMarker::RecordWeakReference(uint32_t threadId, JSTaggedType *slot)
{
sWorkManager_->PushWeakReference(threadId, slot);
}
// Don't call this function when mutator thread is running.
inline void ShareGCMarker::HandleLocalToShareRSet(uint32_t threadId, Region *region)
{
// If the mem does not point to a shared object, the related bit in localToShareRSet will be cleared.
region->AtomicIterateAllLocalToShareBits([this, threadId](void *mem) -> bool {
ObjectSlot slot(ToUintPtr(mem));
JSTaggedValue value(slot.GetTaggedType());
if (value.IsInSharedSweepableSpace()) {
if (value.IsWeakForHeapObject()) {
RecordWeakReference(threadId, reinterpret_cast<JSTaggedType *>(mem));
} else {
MarkObject(threadId, value.GetTaggedObject());
}
return true;
} else {
// clear bit.
return false;
}
});
}
} // namespace panda::ecmascript
#endif // ECMASCRIPT_MEM_PARALLEL_MARKER_INL_H

View File

@ -218,4 +218,49 @@ uintptr_t CompressGCMarker::AllocateForwardAddress(uint32_t threadId, size_t siz
return AllocateAppSpawnSpace(size);
}
}
void ShareGCMarker::MarkRoots(uint32_t threadId, EcmaVM *localVm)
{
ECMA_BYTRACE_NAME(HITRACE_TAG_ARK, "ShareGCMarker::MarkRoots");
ObjectXRay::VisitVMRoots(
localVm,
std::bind(&ShareGCMarker::HandleRoots, this, threadId, std::placeholders::_1, std::placeholders::_2),
std::bind(&ShareGCMarker::HandleRangeRoots, this, threadId, std::placeholders::_1, std::placeholders::_2,
std::placeholders::_3),
std::bind(&ShareGCMarker::HandleDerivedRoots, this, std::placeholders::_1, std::placeholders::_2,
std::placeholders::_3, std::placeholders::_4));
sWorkManager_->PushWorkNodeToGlobal(threadId, false);
}
void ShareGCMarker::ProcessMarkStack(uint32_t threadId)
{
auto cb = [&](ObjectSlot slot) {
MarkValue(threadId, slot);
};
auto visitor = [this, threadId, cb](TaggedObject *root, ObjectSlot start, ObjectSlot end,
VisitObjectArea area) {
if (area == VisitObjectArea::IN_OBJECT) {
if (VisitBodyInObj(root, start, end, cb)) {
return;
}
}
for (ObjectSlot slot = start; slot < end; slot++) {
MarkValue(threadId, slot);
}
};
TaggedObject *obj = nullptr;
while (true) {
obj = nullptr;
if (!sWorkManager_->Pop(threadId, &obj)) {
break;
}
JSHClass *hclass = obj->SynchronizedGetClass();
auto size = hclass->SizeFromJSHClass(obj);
Region *region = Region::ObjectAddressToRange(obj);
ASSERT(region->InSharedSweepableSpace());
region->IncreaseAliveObjectSafe(size);
MarkObject(threadId, hclass);
ObjectXRay::VisitObjectBody<VisitType::OLD_GC_VISIT>(obj, hclass, visitor);
}
}
} // namespace panda::ecmascript

View File

@ -189,5 +189,30 @@ private:
bool isAppSpawn_ {false};
Mutex mutex_;
};
class ShareGCMarker {
public:
explicit ShareGCMarker(ShareGCWorkManager *workManger) : sWorkManager_(workManger) {}
~ShareGCMarker() = default;
void MarkRoots(uint32_t threadId, EcmaVM *localVm);
void ProcessMarkStack(uint32_t threadId);
template <typename Callback>
inline bool VisitBodyInObj(TaggedObject *root, ObjectSlot start, ObjectSlot end, Callback callback);
inline void MarkValue(uint32_t threadId, ObjectSlot &slot);
inline void MarkObject(uint32_t threadId, TaggedObject *object);
inline void HandleRoots(uint32_t threadId, [[maybe_unused]] Root type, ObjectSlot slot);
inline void HandleRangeRoots(uint32_t threadId, [[maybe_unused]] Root type, ObjectSlot start,
ObjectSlot end);
inline void HandleDerivedRoots(Root type, ObjectSlot base, ObjectSlot derived,
uintptr_t baseOldObject);
inline void ProcessLocalToShare(uint32_t threadId, Heap *localHeap);
inline void HandleLocalToShareRSet(uint32_t threadId, Region *region);
inline void RecordWeakReference(uint32_t threadId, JSTaggedType *ref);
private:
ShareGCWorkManager *sWorkManager_ { nullptr };
};
} // namespace panda::ecmascript
#endif // ECMASCRIPT_MEM_PARALLEL_MARKER_H

View File

@ -147,6 +147,7 @@ void PartialGC::ProcessNativeDelete()
TRACE_GC(GCStats::Scope::ScopeId::ClearNativeObject, heap_->GetEcmaVM()->GetEcmaGCStats());
WeakRootVisitor gcUpdateWeak = [this](TaggedObject *header) {
Region *objectRegion = Region::ObjectAddressToRange(reinterpret_cast<TaggedObject *>(header));
ASSERT(!objectRegion->InSharedHeap());
if (!objectRegion->InYoungSpaceOrCSet() && !heap_->IsFullMark()) {
return header;
}

View File

@ -162,13 +162,6 @@ inline void Region::AtomicClearLocalToShareRSetInRange(uintptr_t start, uintptr_
}
}
inline void Region::ClearLocalToShareRSet()
{
if (localToShareSet_ != nullptr) {
localToShareSet_->ClearAll();
}
}
inline void Region::DeleteLocalToShareRSet()
{
if (localToShareSet_ != nullptr) {

View File

@ -50,12 +50,14 @@ enum RegionSpaceFlag {
IN_HUGE_MACHINE_CODE_SPACE = 0x10,
IN_SHARED_NON_MOVABLE = 0x11,
IN_SHARED_OLD_SPACE = 0x12,
IN_SHARED_READ_ONLY_SPACE = 0x13,
IN_SHARED_HUGE_OBJECT_SPACE = 0x14,
IN_SHARED_HUGE_OBJECT_SPACE = 0x13,
IN_SHARED_READ_ONLY_SPACE = 0x14,
VALID_SPACE_MASK = 0xFF,
SHARED_SPACE_BEGIN = IN_SHARED_NON_MOVABLE,
SHARED_SPACE_END = IN_SHARED_HUGE_OBJECT_SPACE,
SHARED_SPACE_END = IN_SHARED_READ_ONLY_SPACE,
SHARED_SWEEPABLE_SPACE_BEGIN = IN_SHARED_NON_MOVABLE,
SHARED_SWEEPABLE_SPACE_END = IN_SHARED_HUGE_OBJECT_SPACE,
};
enum RegionGCFlags {
@ -216,7 +218,6 @@ public:
void AtomicClearLocalToShareRSetInRange(uintptr_t start, uintptr_t end);
template <typename Visitor>
void AtomicIterateAllLocalToShareBits(Visitor visitor);
void ClearLocalToShareRSet();
void DeleteLocalToShareRSet();
// Cross region remembered set
void InsertCrossRegionRSet(uintptr_t addr);
@ -325,7 +326,14 @@ public:
return packedData_.flags_.spaceFlag_ == RegionSpaceFlag::IN_APPSPAWN_SPACE;
}
bool InSharedSpace() const
// Not including shared read only space.
bool InSharedSweepableSpace() const
{
auto flag = packedData_.flags_.spaceFlag_;
return flag >= RegionSpaceFlag::SHARED_SWEEPABLE_SPACE_BEGIN && flag <= RegionSpaceFlag::SHARED_SWEEPABLE_SPACE_END;
}
bool InSharedHeap() const
{
auto flag = packedData_.flags_.spaceFlag_;
return flag >= RegionSpaceFlag::SHARED_SPACE_BEGIN && flag <= RegionSpaceFlag::SHARED_SPACE_END;

View File

@ -0,0 +1,125 @@
/*
* Copyright (c) 2024 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "ecmascript/mem/shared_heap/share_gc.h"
#include "ecmascript/ecma_string_table.h"
#include "ecmascript/ecma_vm.h"
#include "ecmascript/mem/barriers-inl.h"
#include "ecmascript/mem/mark_stack.h"
#include "ecmascript/mem/mem.h"
#include "ecmascript/mem/parallel_marker-inl.h"
#include "ecmascript/mem/shared_heap/shared_concurrent_sweeper.h"
#include "ecmascript/mem/space-inl.h"
#include "ecmascript/mem/visitor.h"
#include "ecmascript/mem/gc_stats.h"
#include "ecmascript/runtime.h"
namespace panda::ecmascript {
void ShareGC::RunPhases()
{
ECMA_BYTRACE_NAME(HITRACE_TAG_ARK, "ShareGC::RunPhases");
Initialize();
Mark();
Sweep();
Finish();
}
void ShareGC::Initialize()
{
ECMA_BYTRACE_NAME(HITRACE_TAG_ARK, "ShareGC::Initialize");
sHeap_->EnumerateOldSpaceRegions([](Region *current) {
ASSERT(current->InSharedSweepableSpace());
current->ResetAliveObject();
});
sWorkManager_->Initialize();
}
void ShareGC::Mark()
{
ECMA_BYTRACE_NAME(HITRACE_TAG_ARK, "ShareGC::Mark");
auto threads = Runtime::GetInstance()->ThreadList();
for (auto &thread : threads) {
auto vm = thread->GetEcmaVM();
if (!vm->IsInitialized()) {
continue;
}
sHeap_->GetShareGCMarker()->MarkRoots(MAIN_THREAD_INDEX, vm);
sHeap_->GetShareGCMarker()->ProcessLocalToShare(MAIN_THREAD_INDEX, const_cast<Heap*>(vm->GetHeap()));
}
sHeap_->WaitRunningTaskFinished();
}
void ShareGC::Sweep()
{
ECMA_BYTRACE_NAME(HITRACE_TAG_ARK, "ShareGC::Sweep");
UpdateRecordWeakReference();
WeakRootVisitor gcUpdateWeak = [](TaggedObject *header) {
Region *objectRegion = Region::ObjectAddressToRange(header);
if (!objectRegion) {
LOG_GC(ERROR) << "ShareGC updateWeakReference: region is nullptr, header is " << header;
return reinterpret_cast<TaggedObject *>(ToUintPtr(nullptr));
}
if (objectRegion->Test(header)) {
return header;
}
return reinterpret_cast<TaggedObject *>(ToUintPtr(nullptr));
};
// todo(lukai) wait for stringtable.
// EcmaStringTable::GetInstance()->SweepWeakReference(gcUpdateWeak);
auto threads = Runtime::GetInstance()->ThreadList();
for (auto &thread : threads) {
if (!thread->GetEcmaVM()->IsInitialized()) {
continue;
}
thread->IterateWeakEcmaGlobalStorage(gcUpdateWeak);
}
sHeap_->GetSweeper()->Sweep();
sHeap_->GetSweeper()->PostTask();
}
void ShareGC::Finish()
{
ECMA_BYTRACE_NAME(HITRACE_TAG_ARK, "ShareGC::Finish");
sHeap_->Resume();
sWorkManager_->Finish();
sHeap_->GetSweeper()->TryFillSweptRegion();
}
void ShareGC::UpdateRecordWeakReference()
{
auto totalThreadCount = Taskpool::GetCurrentTaskpool()->GetTotalThreadNum() + 1;
for (uint32_t i = 0; i < totalThreadCount; i++) {
ProcessQueue *queue = sHeap_->GetWorkManager()->GetWeakReferenceQueue(i);
while (true) {
auto obj = queue->PopBack();
if (UNLIKELY(obj == nullptr)) {
break;
}
ObjectSlot slot(ToUintPtr(obj));
JSTaggedValue value(slot.GetTaggedType());
if (value.IsWeak()) {
auto header = value.GetTaggedWeakRef();
Region *objectRegion = Region::ObjectAddressToRange(header);
if (!objectRegion->Test(header)) {
slot.Clear();
}
}
}
}
}
} // namespace panda::ecmascript

View File

@ -0,0 +1,50 @@
/*
* Copyright (c) 2024 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef ECMASCRIPT_MEM_SHARED_HEAP_SHARE_GC_H
#define ECMASCRIPT_MEM_SHARED_HEAP_SHARE_GC_H
#include "ecmascript/mem/allocator.h"
#include "ecmascript/mem/garbage_collector.h"
#include "ecmascript/mem/heap.h"
#include "ecmascript/mem/mark_stack.h"
#include "ecmascript/mem/mark_word.h"
#include "ecmascript/mem/mem.h"
#include "ecmascript/mem/work_manager.h"
namespace panda::ecmascript {
class ShareGC : public GarbageCollector {
public:
explicit ShareGC(SharedHeap *heap) : sHeap_(heap), sWorkManager_(heap->GetWorkManager()) {}
~ShareGC() override = default;
NO_COPY_SEMANTIC(ShareGC);
NO_MOVE_SEMANTIC(ShareGC);
void RunPhases() override;
protected:
void Initialize() override;
void Mark() override;
void Sweep() override;
void Finish() override;
private:
void UpdateRecordWeakReference();
SharedHeap *sHeap_;
ShareGCWorkManager *sWorkManager_ {nullptr};
};
} // namespace panda::ecmascript
#endif // ECMASCRIPT_MEM_SHARED_HEAP_SHARE_GC_H

View File

@ -0,0 +1,162 @@
/*
* Copyright (c) 2024 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "ecmascript/mem/shared_heap/shared_concurrent_sweeper.h"
#include "ecmascript/ecma_macros.h"
#include "ecmascript/mem/heap.h"
#include "ecmascript/mem/region-inl.h"
#include "ecmascript/mem/space-inl.h"
#include "ecmascript/taskpool/taskpool.h"
namespace panda::ecmascript {
SharedConcurrentSweeper::SharedConcurrentSweeper(SharedHeap *heap, EnableConcurrentSweepType type)
: sHeap_(heap),
enableType_(type)
{
}
void SharedConcurrentSweeper::PostTask()
{
auto tid = JSThread::GetCurrentThreadId();
if (ConcurrentSweepEnabled()) {
Taskpool::GetCurrentTaskpool()->PostTask(
std::make_unique<SweeperTask>(tid, this, SHARED_OLD_SPACE));
Taskpool::GetCurrentTaskpool()->PostTask(
std::make_unique<SweeperTask>(tid, this, SHARED_NON_MOVABLE));
}
}
void SharedConcurrentSweeper::Sweep()
{
if (ConcurrentSweepEnabled()) {
// Add all region to region list. Ensure all task finish
sHeap_->GetOldSpace()->PrepareSweeping();
sHeap_->GetNonMovableSpace()->PrepareSweeping();
// Prepare
isSweeping_ = true;
for (int type = SHARED_SWEEPING_SPACE_BEGIN; type < SHARED_SWEEPING_SPACE_NUM; type++) {
int spaceIndex = SHARED_SWEEPING_SPACE_BEGIN - type;
remainingTaskNum_[spaceIndex] = SHARED_SWEEPING_SPACE_NUM;
}
} else {
sHeap_->GetOldSpace()->Sweep();
sHeap_->GetNonMovableSpace()->Sweep();
}
sHeap_->GetHugeObjectSpace()->Sweep();
}
void SharedConcurrentSweeper::AsyncSweepSpace(MemSpaceType type, bool isMain)
{
auto space = sHeap_->GetSpaceWithType(type);
space->AsyncSweep(isMain);
int spaceIndex = type - SHARED_SWEEPING_SPACE_BEGIN;
LockHolder holder(mutexs_[spaceIndex]);
if (--remainingTaskNum_[spaceIndex] == 0) {
cvs_[type].SignalAll();
}
}
void SharedConcurrentSweeper::WaitAllTaskFinished()
{
if (!isSweeping_) {
return;
}
for (int type = SHARED_SWEEPING_SPACE_BEGIN; type < SHARED_SWEEPING_SPACE_NUM; type++) {
int spaceIndex = type - SHARED_SWEEPING_SPACE_BEGIN;
if (remainingTaskNum_[spaceIndex] > 0) {
LockHolder holder(mutexs_[spaceIndex]);
while (remainingTaskNum_[spaceIndex] > 0) {
cvs_[spaceIndex].Wait(&mutexs_[spaceIndex]);
}
}
}
}
// call in suspendAll thread.
void SharedConcurrentSweeper::EnsureAllTaskFinished()
{
if (!isSweeping_) {
return;
}
for (int type = SHARED_SWEEPING_SPACE_BEGIN; type < SHARED_SWEEPING_SPACE_NUM; type++) {
int spaceIndex = type - SHARED_SWEEPING_SPACE_BEGIN;
WaitingTaskFinish(static_cast<MemSpaceType>(spaceIndex));
}
isSweeping_ = false;
if (IsRequestDisabled()) {
enableType_ = EnableConcurrentSweepType::DISABLE;
}
}
// call in mutator thread
void SharedConcurrentSweeper::EnsureTaskFinished(MemSpaceType type)
{
if (!isSweeping_) {
return;
}
WaitingTaskFinish(type);
}
void SharedConcurrentSweeper::WaitingTaskFinish(MemSpaceType type)
{
int spaceIndex = type - SHARED_SWEEPING_SPACE_BEGIN;
if (remainingTaskNum_[spaceIndex] > 0) {
{
LockHolder holder(mutexs_[spaceIndex]);
remainingTaskNum_[spaceIndex]++;
}
AsyncSweepSpace(type, true);
LockHolder holder(mutexs_[spaceIndex]);
while (remainingTaskNum_[spaceIndex] > 0) {
cvs_[spaceIndex].Wait(&mutexs_[spaceIndex]);
}
}
SharedSparseSpace *space = sHeap_->GetSpaceWithType(type);
space->FinishFillSweptRegion();
}
void SharedConcurrentSweeper::TryFillSweptRegion()
{
sHeap_->GetOldSpace()->TryFillSweptRegion();
sHeap_->GetNonMovableSpace()->TryFillSweptRegion();
}
bool SharedConcurrentSweeper::SweeperTask::Run([[maybe_unused]] uint32_t threadIndex)
{
if (type_ == SHARED_NON_MOVABLE) {
sweeper_->AsyncSweepSpace(SHARED_NON_MOVABLE, false);
sweeper_->AsyncSweepSpace(SHARED_OLD_SPACE, false);
} else {
ASSERT(type_ == SHARED_OLD_SPACE);
sweeper_->AsyncSweepSpace(SHARED_OLD_SPACE, false);
sweeper_->AsyncSweepSpace(SHARED_NON_MOVABLE, false);
}
return true;
}
void SharedConcurrentSweeper::EnableConcurrentSweep(EnableConcurrentSweepType type)
{
if (IsConfigDisabled()) {
return;
}
if (ConcurrentSweepEnabled() && isSweeping_ && type == EnableConcurrentSweepType::DISABLE) {
enableType_ = EnableConcurrentSweepType::REQUEST_DISABLE;
} else {
enableType_ = type;
}
}
} // namespace panda::ecmascript

View File

@ -0,0 +1,105 @@
/*
* Copyright (c) 2024 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef ECMASCRIPT_MEM_SHARED_HEAP_SHARED_CONCURRENT_SWEEPER_H
#define ECMASCRIPT_MEM_SHARED_HEAP_SHARED_CONCURRENT_SWEEPER_H
#include "ecmascript/mem/concurrent_sweeper.h"
namespace panda::ecmascript {
class SharedHeap;
class SharedConcurrentSweeper {
public:
SharedConcurrentSweeper(SharedHeap *heap, EnableConcurrentSweepType type);
~SharedConcurrentSweeper() = default;
NO_COPY_SEMANTIC(SharedConcurrentSweeper);
NO_MOVE_SEMANTIC(SharedConcurrentSweeper);
void PostTask();
void Sweep();
void WaitAllTaskFinished();
// Help to finish sweeping task. It can be called through js thread
void EnsureAllTaskFinished();
// Ensure task finish. It can be called through js thread
void EnsureTaskFinished(MemSpaceType type);
void TryFillSweptRegion();
void EnableConcurrentSweep(EnableConcurrentSweepType type);
bool IsSweeping()
{
return isSweeping_;
}
bool ConcurrentSweepEnabled()
{
return !IsDisabled();
}
void ConfigConcurrentSweep(bool enabled)
{
enableType_ = enabled ? EnableConcurrentSweepType::ENABLE :
EnableConcurrentSweepType::CONFIG_DISABLE;
}
bool IsDisabled() const
{
return enableType_ == EnableConcurrentSweepType::DISABLE ||
enableType_ == EnableConcurrentSweepType::CONFIG_DISABLE;
}
bool IsRequestDisabled() const
{
return enableType_ == EnableConcurrentSweepType::REQUEST_DISABLE;
}
bool IsConfigDisabled() const
{
return enableType_ == EnableConcurrentSweepType::CONFIG_DISABLE;
}
private:
class SweeperTask : public Task {
public:
SweeperTask(int32_t id, SharedConcurrentSweeper *sweeper, MemSpaceType type)
: Task(id), sweeper_(sweeper), type_(type) {};
~SweeperTask() override = default;
bool Run(uint32_t threadIndex) override;
NO_COPY_SEMANTIC(SweeperTask);
NO_MOVE_SEMANTIC(SweeperTask);
private:
SharedConcurrentSweeper *sweeper_;
MemSpaceType type_;
};
void AsyncSweepSpace(MemSpaceType type, bool isMain);
void WaitingTaskFinish(MemSpaceType type);
std::array<Mutex, SHARED_SWEEPING_SPACE_NUM> mutexs_;
std::array<ConditionVariable, SHARED_SWEEPING_SPACE_NUM> cvs_;
std::array<std::atomic_int, SHARED_SWEEPING_SPACE_NUM> remainingTaskNum_ = {0, 0};
SharedHeap *sHeap_;
EnableConcurrentSweepType enableType_ {EnableConcurrentSweepType::CONFIG_DISABLE};
bool isSweeping_ {false};
};
} // namespace panda::ecmascript
#endif // ECMASCRIPT_MEM_SHARED_HEAP_SHARED_CONCURRENT_SWEEPER_H

View File

@ -0,0 +1,325 @@
/*
* Copyright (c) 2024 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "ecmascript/mem/shared_heap/shared_space.h"
#include "ecmascript/js_hclass-inl.h"
#include "ecmascript/mem/allocator-inl.h"
#include "ecmascript/mem/free_object_set.h"
#include "ecmascript/mem/heap-inl.h"
#include "ecmascript/mem/shared_heap/shared_concurrent_sweeper.h"
namespace panda::ecmascript {
SharedSparseSpace::SharedSparseSpace(SharedHeap *heap, MemSpaceType type, size_t initialCapacity, size_t maximumCapacity)
: Space(heap, heap->GetHeapRegionAllocator(), type, initialCapacity, maximumCapacity),
sweepState_(SweepState::NO_SWEEP),
sHeap_(heap),
liveObjectSize_(0)
{
allocator_ = new FreeListAllocator(heap);
}
void SharedSparseSpace::Initialize()
{
Region *region = heapRegionAllocator_->AllocateAlignedRegion(this, DEFAULT_REGION_SIZE, sHeap_);
region->InitializeFreeObjectSets();
AddRegion(region);
allocator_->Initialize(region);
}
void SharedSparseSpace::Reset()
{
allocator_->RebuildFreeList();
ReclaimRegions();
liveObjectSize_ = 0;
}
// only used in share heap initialize before first vmThread created.
uintptr_t SharedSparseSpace::AllocateWithoutGC(size_t size)
{
uintptr_t object = TryAllocate(size);
CHECK_SOBJECT_AND_INC_OBJ_SIZE(size);
object = AllocateWithExpand(size);
CHECK_SOBJECT_AND_INC_OBJ_SIZE(size);
return object;
}
uintptr_t SharedSparseSpace::Allocate(JSThread *thread, size_t size, bool allowGC)
{
uintptr_t object = TryAllocate(size);
CHECK_SOBJECT_AND_INC_OBJ_SIZE(size);
if (sweepState_ == SweepState::SWEEPING) {
object = AllocateAfterSweepingCompleted(size);
CHECK_SOBJECT_AND_INC_OBJ_SIZE(size);
}
// Check whether it is necessary to trigger Old GC before expanding to avoid OOM risk.
if (allowGC && sHeap_->CheckAndTriggerOldGC(thread)) {
object = TryAllocate(size);
CHECK_SOBJECT_AND_INC_OBJ_SIZE(size);
}
object = AllocateWithExpand(size);
CHECK_SOBJECT_AND_INC_OBJ_SIZE(size);
if (allowGC) {
sHeap_->CollectGarbage(thread, TriggerGCType::SHARED_GC, GCReason::ALLOCATION_FAILED);
object = Allocate(thread, size, false);
}
return object;
}
uintptr_t SharedSparseSpace::TryAllocate(size_t size)
{
LockHolder lock(allocateLock_);
return allocator_->Allocate(size);
}
uintptr_t SharedSparseSpace::AllocateWithExpand(size_t size)
{
LockHolder lock(allocateLock_);
// In order to avoid expand twice by different threads, try allocate first.
auto object = allocator_->Allocate(size);
CHECK_SOBJECT_AND_INC_OBJ_SIZE(size);
if (Expand()) {
object = allocator_->Allocate(size);
CHECK_SOBJECT_AND_INC_OBJ_SIZE(size);
}
return object;
}
bool SharedSparseSpace::Expand()
{
if (committedSize_ >= maximumCapacity_ + outOfMemoryOvershootSize_) {
LOG_ECMA_MEM(INFO) << "Expand::Committed size " << committedSize_ << " of Sparse Space is too big. ";
return false;
}
Region *region = heapRegionAllocator_->AllocateAlignedRegion(this, DEFAULT_REGION_SIZE, sHeap_);
region->InitializeFreeObjectSets();
AddRegion(region);
allocator_->AddFree(region);
return true;
}
uintptr_t SharedSparseSpace::AllocateAfterSweepingCompleted(size_t size)
{
LockHolder lock(allocateLock_);
if (sweepState_ != SweepState::SWEEPING) {
return allocator_->Allocate(size);
}
if (TryFillSweptRegion()) {
auto object = allocator_->Allocate(size);
if (object != 0) {
return object;
}
}
// Parallel sweep and fill
sHeap_->GetSweeper()->EnsureTaskFinished(spaceType_);
return allocator_->Allocate(size);
}
void SharedSparseSpace::PrepareSweeping()
{
liveObjectSize_ = 0;
EnumerateRegions([this](Region *current) {
IncreaseLiveObjectSize(current->AliveObject());
current->ResetWasted();
AddSweepingRegion(current);
});
SortSweepingRegion();
sweepState_ = SweepState::SWEEPING;
allocator_->RebuildFreeList();
}
void SharedSparseSpace::AsyncSweep(bool isMain)
{
Region *current = GetSweepingRegionSafe();
while (current != nullptr) {
FreeRegion(current, isMain);
// Main thread sweeping region is added;
if (!isMain) {
AddSweptRegionSafe(current);
}
current = GetSweepingRegionSafe();
}
}
void SharedSparseSpace::Sweep()
{
liveObjectSize_ = 0;
allocator_->RebuildFreeList();
EnumerateRegions([this](Region *current) {
IncreaseLiveObjectSize(current->AliveObject());
current->ResetWasted();
FreeRegion(current);
});
}
bool SharedSparseSpace::TryFillSweptRegion()
{
if (sweptList_.empty()) {
return false;
}
Region *region = nullptr;
while ((region = GetSweptRegionSafe()) != nullptr) {
allocator_->CollectFreeObjectSet(region);
region->ResetSwept();
}
return true;
}
bool SharedSparseSpace::FinishFillSweptRegion()
{
bool ret = TryFillSweptRegion();
sweepState_ = SweepState::SWEPT;
return ret;
}
void SharedSparseSpace::AddSweepingRegion(Region *region)
{
sweepingList_.emplace_back(region);
}
void SharedSparseSpace::SortSweepingRegion()
{
// Sweep low alive object size at first
std::sort(sweepingList_.begin(), sweepingList_.end(), [](Region *first, Region *second) {
return first->AliveObject() < second->AliveObject();
});
}
Region *SharedSparseSpace::GetSweepingRegionSafe()
{
LockHolder holder(lock_);
Region *region = nullptr;
if (!sweepingList_.empty()) {
region = sweepingList_.back();
sweepingList_.pop_back();
}
return region;
}
void SharedSparseSpace::AddSweptRegionSafe(Region *region)
{
LockHolder holder(lock_);
sweptList_.emplace_back(region);
}
Region *SharedSparseSpace::GetSweptRegionSafe()
{
LockHolder holder(lock_);
Region *region = nullptr;
if (!sweptList_.empty()) {
region = sweptList_.back();
sweptList_.pop_back();
}
return region;
}
void SharedSparseSpace::FreeRegion(Region *current, bool isMain)
{
uintptr_t freeStart = current->GetBegin();
current->IterateAllMarkedBits([this, &freeStart, isMain](void *mem) {
auto header = reinterpret_cast<TaggedObject *>(mem);
auto klass = header->GetClass();
auto size = klass->SizeFromJSHClass(header);
uintptr_t freeEnd = ToUintPtr(mem);
if (freeStart != freeEnd) {
FreeLiveRange(freeStart, freeEnd, isMain);
}
freeStart = freeEnd + size;
});
uintptr_t freeEnd = current->GetEnd();
if (freeStart != freeEnd) {
FreeLiveRange(freeStart, freeEnd, isMain);
}
}
void SharedSparseSpace::FreeLiveRange(uintptr_t freeStart, uintptr_t freeEnd, bool isMain)
{
// No need to clear rememberset here, because shared region has no remember set now.
allocator_->Free(freeStart, freeEnd - freeStart, isMain);
}
size_t SharedSparseSpace::GetHeapObjectSize() const
{
return liveObjectSize_;
}
void SharedSparseSpace::IncreaseAllocatedSize(size_t size)
{
allocator_->IncreaseAllocatedSize(size);
}
size_t SharedSparseSpace::GetTotalAllocatedSize() const
{
return allocator_->GetAllocatedSize();
}
void SharedSparseSpace::InvokeAllocationInspector(Address object, size_t size, size_t alignedSize)
{
ASSERT(size <= alignedSize);
if (LIKELY(!allocationCounter_.IsActive())) {
return;
}
if (alignedSize >= allocationCounter_.NextBytes()) {
allocationCounter_.InvokeAllocationInspector(object, size, alignedSize);
}
allocationCounter_.AdvanceAllocationInspector(alignedSize);
}
SharedNonMovableSpace::SharedNonMovableSpace(SharedHeap *heap, size_t initialCapacity, size_t maximumCapacity)
: SharedSparseSpace(heap, MemSpaceType::SHARED_NON_MOVABLE, initialCapacity, maximumCapacity)
{
}
SharedOldSpace::SharedOldSpace(SharedHeap *heap, size_t initialCapacity, size_t maximumCapacity)
: SharedSparseSpace(heap, MemSpaceType::SHARED_OLD_SPACE, initialCapacity, maximumCapacity)
{
}
SharedReadOnlySpace::SharedReadOnlySpace(SharedHeap *heap, size_t initialCapacity, size_t maximumCapacity)
: Space(heap, heap->GetHeapRegionAllocator(), MemSpaceType::SHARED_READ_ONLY_SPACE, initialCapacity, maximumCapacity) {}
bool SharedReadOnlySpace::Expand()
{
if (committedSize_ >= initialCapacity_ + outOfMemoryOvershootSize_ &&
!heap_->NeedStopCollection()) {
return false;
}
uintptr_t top = allocator_.GetTop();
auto currentRegion = GetCurrentRegion();
if (currentRegion != nullptr) {
currentRegion->SetHighWaterMark(top);
}
Region *region = heapRegionAllocator_->AllocateAlignedRegion(this, DEFAULT_REGION_SIZE, heap_);
allocator_.Reset(region->GetBegin(), region->GetEnd());
AddRegion(region);
return true;
}
uintptr_t SharedReadOnlySpace::Allocate([[maybe_unused]]JSThread *thread, size_t size)
{
LockHolder holder(allocateLock_);
auto object = allocator_.Allocate(size);
if (object != 0) {
return object;
}
if (Expand()) {
object = allocator_.Allocate(size);
}
return object;
}
} // namespace panda::ecmascript

View File

@ -0,0 +1,166 @@
/*
* Copyright (c) 2024 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef ECMASCRIPT_MEM_SHARED_SHARED_SPACE_H
#define ECMASCRIPT_MEM_SHARED_SHARED_SPACE_H
#include "ecmascript/mem/mem_common.h"
#include "ecmascript/mem/sparse_space.h"
namespace panda::ecmascript {
#ifdef ECMASCRIPT_SUPPORT_HEAPSAMPLING
#define CHECK_SOBJECT_AND_INC_OBJ_SIZE(size) \
if (object != 0) { \
IncreaseLiveObjectSize(size); \
if (sHeap_->IsReadyToMark()) { \
Region::ObjectAddressToRange(object)->IncreaseAliveObject(size); \
} \
InvokeAllocationInspector(object, size, size); \
return object; \
}
#else
#define CHECK_SOBJECT_AND_INC_OBJ_SIZE(size) \
if (object != 0) { \
IncreaseLiveObjectSize(size); \
if (heap_->IsReadyToMark()) { \
Region::ObjectAddressToRange(object)->IncreaseAliveObject(size); \
} \
return object; \
}
#endif
class SharedHeap;
class SharedSparseSpace : public Space {
public:
SharedSparseSpace(SharedHeap *heap, MemSpaceType type, size_t initialCapacity, size_t maximumCapacity);
~SharedSparseSpace() override
{
delete allocator_;
}
NO_COPY_SEMANTIC(SharedSparseSpace);
NO_MOVE_SEMANTIC(SharedSparseSpace);
void Initialize() override;
void Reset();
uintptr_t AllocateWithoutGC(size_t size);
uintptr_t Allocate(JSThread *thread, size_t size, bool allowGC = true);
// For sweeping
void PrepareSweeping();
void AsyncSweep(bool isMain);
void Sweep();
bool TryFillSweptRegion();
// Ensure All region finished sweeping
bool FinishFillSweptRegion();
void AddSweepingRegion(Region *region);
void SortSweepingRegion();
Region *GetSweepingRegionSafe();
void AddSweptRegionSafe(Region *region);
Region *GetSweptRegionSafe();
void FreeRegion(Region *current, bool isMain = true);
void FreeLiveRange(uintptr_t freeStart, uintptr_t freeEnd, bool isMain);
size_t GetHeapObjectSize() const;
void IncreaseAllocatedSize(size_t size);
void IncreaseLiveObjectSize(size_t size)
{
liveObjectSize_ += size;
}
void DecreaseLiveObjectSize(size_t size)
{
liveObjectSize_ -= size;
}
size_t GetTotalAllocatedSize() const;
void InvokeAllocationInspector(Address object, size_t size, size_t alignedSize);
protected:
FreeListAllocator *allocator_;
SweepState sweepState_ = SweepState::NO_SWEEP;
private:
uintptr_t AllocateWithExpand(size_t size);
uintptr_t TryAllocate(size_t size);
bool Expand();
// For sweeping
uintptr_t AllocateAfterSweepingCompleted(size_t size);
Mutex lock_;
Mutex allocateLock_;
SharedHeap *sHeap_ {nullptr};
std::vector<Region *> sweepingList_;
std::vector<Region *> sweptList_;
size_t liveObjectSize_ {0};
};
class SharedNonMovableSpace : public SharedSparseSpace {
public:
SharedNonMovableSpace(SharedHeap *heap, size_t initialCapacity, size_t maximumCapacity);
~SharedNonMovableSpace() override = default;
NO_COPY_SEMANTIC(SharedNonMovableSpace);
NO_MOVE_SEMANTIC(SharedNonMovableSpace);
};
class SharedOldSpace : public SharedSparseSpace {
public:
SharedOldSpace(SharedHeap *heap, size_t initialCapacity, size_t maximumCapacity);
~SharedOldSpace() override = default;
NO_COPY_SEMANTIC(SharedOldSpace);
NO_MOVE_SEMANTIC(SharedOldSpace);
};
class SharedReadOnlySpace : public Space {
public:
SharedReadOnlySpace(SharedHeap *heap, size_t initialCapacity, size_t maximumCapacity);
~SharedReadOnlySpace() override = default;
void SetReadOnly()
{
auto cb = [](Region *region) {
region->SetReadOnlyAndMarked();
};
EnumerateRegions(cb);
}
void ClearReadOnly()
{
auto cb = [](Region *region) {
region->ClearReadOnly();
};
EnumerateRegions(cb);
}
bool Expand();
uintptr_t Allocate(JSThread *thread, size_t size);
NO_COPY_SEMANTIC(SharedReadOnlySpace);
NO_MOVE_SEMANTIC(SharedReadOnlySpace);
private:
Mutex allocateLock_;
BumpPointerAllocator allocator_;
};
} // namespace panda::ecmascript
#endif // ECMASCRIPT_MEM_SHARED_SHARED_SPACE_H

View File

@ -49,6 +49,9 @@ enum MemSpaceType {
SHARED_END = SHARED_HUGE_OBJECT_SPACE,
// Free region means memory maybe always in use and can not be evacuated
FREE_LIST_NUM = MACHINE_CODE_SPACE - OLD_SPACE + 1,
SHARED_SWEEPING_SPACE_BEGIN = SHARED_NON_MOVABLE,
SHARED_SWEEPING_SPACE_END = SHARED_OLD_SPACE,
SHARED_SWEEPING_SPACE_NUM = SHARED_SWEEPING_SPACE_END - SHARED_SWEEPING_SPACE_BEGIN + 1,
};
static inline bool IsSMemSpace(MemSpaceType type)

View File

@ -24,9 +24,10 @@
#include "ecmascript/runtime_call_id.h"
namespace panda::ecmascript {
SparseSpace::SparseSpace(BaseHeap *heap, MemSpaceType type, size_t initialCapacity, size_t maximumCapacity)
SparseSpace::SparseSpace(Heap *heap, MemSpaceType type, size_t initialCapacity, size_t maximumCapacity)
: Space(heap, heap->GetHeapRegionAllocator(), type, initialCapacity, maximumCapacity),
sweepState_(SweepState::NO_SWEEP),
localHeap_(heap),
liveObjectSize_(0)
{
allocator_ = new FreeListAllocator(heap);
@ -34,7 +35,7 @@ SparseSpace::SparseSpace(BaseHeap *heap, MemSpaceType type, size_t initialCapaci
void SparseSpace::Initialize()
{
Region *region = heapRegionAllocator_->AllocateAlignedRegion(this, DEFAULT_REGION_SIZE, heap_);
Region *region = heapRegionAllocator_->AllocateAlignedRegion(this, DEFAULT_REGION_SIZE, localHeap_);
region->InitializeFreeObjectSets();
AddRegion(region);
@ -64,7 +65,7 @@ uintptr_t SparseSpace::Allocate(size_t size, bool allowGC)
}
// Check whether it is necessary to trigger Old GC before expanding to avoid OOM risk.
if (allowGC && heap_->CheckAndTriggerOldGC()) {
if (allowGC && localHeap_->CheckAndTriggerOldGC()) {
object = allocator_->Allocate(size);
CHECK_OBJECT_AND_INC_OBJ_SIZE(size);
}
@ -75,32 +76,7 @@ uintptr_t SparseSpace::Allocate(size_t size, bool allowGC)
}
if (allowGC) {
heap_->CollectGarbage(TriggerGCType::OLD_GC, GCReason::ALLOCATION_FAILED);
object = Allocate(size, false);
// Size is already increment
}
return object;
}
uintptr_t SparseSpace::ConcurrentAllocate(size_t size, bool allowGC)
{
LockHolder holder(allocateLock_);
auto object = allocator_->Allocate(size);
CHECK_OBJECT_AND_INC_OBJ_SIZE(size);
// Check whether it is necessary to trigger Old GC before expanding to avoid OOM risk.
if (allowGC && heap_->CheckAndTriggerOldGC()) {
object = allocator_->Allocate(size);
CHECK_OBJECT_AND_INC_OBJ_SIZE(size);
}
if (Expand()) {
object = allocator_->Allocate(size);
CHECK_OBJECT_AND_INC_OBJ_SIZE(size);
}
if (allowGC) {
heap_->CollectGarbage(TriggerGCType::SHARED_GC, GCReason::ALLOCATION_FAILED);
localHeap_->CollectGarbage(TriggerGCType::OLD_GC, GCReason::ALLOCATION_FAILED);
object = Allocate(size, false);
// Size is already increment
}
@ -114,7 +90,7 @@ bool SparseSpace::Expand()
return false;
}
Region *region = heapRegionAllocator_->AllocateAlignedRegion(this, DEFAULT_REGION_SIZE, heap_);
Region *region = heapRegionAllocator_->AllocateAlignedRegion(this, DEFAULT_REGION_SIZE, localHeap_);
region->InitializeFreeObjectSets();
AddRegion(region);
allocator_->AddFree(region);
@ -131,7 +107,7 @@ uintptr_t SparseSpace::AllocateAfterSweepingCompleted(size_t size)
}
}
// Parallel sweep and fill
heap_->GetSweeper()->EnsureTaskFinished(spaceType_);
localHeap_->GetSweeper()->EnsureTaskFinished(spaceType_);
return allocator_->Allocate(size);
}
@ -293,7 +269,7 @@ void SparseSpace::FreeRegion(Region *current, bool isMain)
void SparseSpace::FreeLiveRange(Region *current, uintptr_t freeStart, uintptr_t freeEnd, bool isMain)
{
heap_->GetSweeper()->ClearRSetInRange(current, freeStart, freeEnd);
localHeap_->GetSweeper()->ClearRSetInRange(current, freeStart, freeEnd);
allocator_->Free(freeStart, freeEnd - freeStart, isMain);
}
@ -373,12 +349,9 @@ void SparseSpace::InvokeAllocationInspector(Address object, size_t size, size_t
allocationCounter_.AdvanceAllocationInspector(alignedSize);
}
OldSpace::OldSpace(BaseHeap *heap, size_t initialCapacity, size_t maximumCapacity)
OldSpace::OldSpace(Heap *heap, size_t initialCapacity, size_t maximumCapacity)
: SparseSpace(heap, OLD_SPACE, initialCapacity, maximumCapacity) {}
OldSpace::OldSpace(BaseHeap *heap, size_t initialCapacity, size_t maximumCapacity, MemSpaceType type)
: SparseSpace(heap, type, initialCapacity, maximumCapacity) {}
Region *OldSpace::TrySweepToGetSuitableRegion(size_t size)
{
// Try Sweeping region to get space for allocation
@ -437,10 +410,10 @@ void OldSpace::Merge(LocalSpace *localSpace)
IncreaseLiveObjectSize(region->AliveObject());
allocator_->CollectFreeObjectSet(region);
});
size_t hugeSpaceCommitSize = heap_->GetHugeObjectSpace()->GetCommittedSize();
size_t hugeSpaceCommitSize = localHeap_->GetHugeObjectSpace()->GetCommittedSize();
if (committedSize_ + hugeSpaceCommitSize > GetOverShootMaximumCapacity()) {
LOG_ECMA_MEM(ERROR) << "Merge::Committed size " << committedSize_ << " of old space is too big. ";
heap_->ShouldThrowOOMError(true);
localHeap_->ShouldThrowOOMError(true);
IncreaseMergeSize(committedSize_ - oldCommittedSize);
// if throw OOM, temporarily increase space size to avoid vm crash
IncreaseOutOfMemoryOvershootSize(committedSize_ + hugeSpaceCommitSize - GetOverShootMaximumCapacity());
@ -452,13 +425,13 @@ void OldSpace::Merge(LocalSpace *localSpace)
void OldSpace::SelectCSet()
{
if (heap_->IsMarking()) {
heap_->GetEcmaGCStats()->RecordStatisticBeforeGC(TriggerGCType::OLD_GC, GCReason::OTHER);
if (localHeap_->IsMarking()) {
localHeap_->GetEcmaGCStats()->RecordStatisticBeforeGC(TriggerGCType::OLD_GC, GCReason::OTHER);
}
CheckRegionSize();
// 1、Select region which alive object larger than limit
int64_t evacuateSizeLimit = 0;
if (!heap_->IsInBackground()) {
if (!localHeap_->IsInBackground()) {
evacuateSizeLimit = PARTIAL_GC_MAX_EVACUATION_SIZE_FOREGROUND;
EnumerateRegions([this](Region *region) {
if (!region->MostObjectAlive()) {
@ -485,7 +458,7 @@ void OldSpace::SelectCSet()
// Limit cset size
unsigned long selectedRegionNumber = 0;
int64_t expectFreeSize = static_cast<int64_t>(heap_->GetCommittedSize() - heap_->GetHeapAliveSizeAfterGC());
int64_t expectFreeSize = static_cast<int64_t>(localHeap_->GetCommittedSize() - localHeap_->GetHeapAliveSizeAfterGC());
int64_t evacuateSize = std::min(evacuateSizeLimit, expectFreeSize);
EnumerateCollectRegionSet([&](Region *current) {
if (evacuateSize > 0) {
@ -502,7 +475,7 @@ void OldSpace::SelectCSet()
collectRegionSet_.resize(selectedRegionNumber);
}
heap_->GetEcmaGCStats()->SetRecordData(
localHeap_->GetEcmaGCStats()->SetRecordData(
RecordData::COLLECT_REGION_SET_SIZE, collectRegionSet_.size() * Region::AVERAGE_REGION_EVACUATE_SIZE);
EnumerateCollectRegionSet([&](Region *current) {
RemoveRegion(current);
@ -518,7 +491,7 @@ void OldSpace::CheckRegionSize()
{
#ifndef NDEBUG
if (sweepState_ == SweepState::SWEEPING) {
heap_->GetSweeper()->EnsureTaskFinished(spaceType_);
localHeap_->GetSweeper()->EnsureTaskFinished(spaceType_);
}
size_t available = allocator_->GetAvailableSize();
size_t wasted = allocator_->GetWastedSize();
@ -544,7 +517,7 @@ void OldSpace::RevertCSet()
void OldSpace::ReclaimCSet()
{
size_t cachedSize = heap_->GetRegionCachedSize();
size_t cachedSize = localHeap_->GetRegionCachedSize();
EnumerateCollectRegionSet([this, &cachedSize](Region *region) {
region->DeleteCrossRegionRSet();
region->DeleteOldToNewRSet();
@ -556,7 +529,7 @@ void OldSpace::ReclaimCSet()
collectRegionSet_.clear();
}
LocalSpace::LocalSpace(BaseHeap *heap, size_t initialCapacity, size_t maximumCapacity)
LocalSpace::LocalSpace(Heap *heap, size_t initialCapacity, size_t maximumCapacity)
: SparseSpace(heap, LOCAL_SPACE, initialCapacity, maximumCapacity) {}
bool LocalSpace::AddRegionToList(Region *region)
@ -586,23 +559,18 @@ void LocalSpace::Stop()
uintptr_t NonMovableSpace::CheckAndAllocate(size_t size)
{
if (maximumCapacity_ == committedSize_ && GetHeapObjectSize() > MAX_NONMOVABLE_LIVE_OBJ_SIZE &&
!heap_->GetOldGCRequested()) {
heap_->CollectGarbage(TriggerGCType::OLD_GC, GCReason::ALLOCATION_LIMIT);
!localHeap_->GetOldGCRequested()) {
localHeap_->CollectGarbage(TriggerGCType::OLD_GC, GCReason::ALLOCATION_LIMIT);
}
return Allocate(size);
}
NonMovableSpace::NonMovableSpace(BaseHeap *heap, size_t initialCapacity, size_t maximumCapacity)
NonMovableSpace::NonMovableSpace(Heap *heap, size_t initialCapacity, size_t maximumCapacity)
: SparseSpace(heap, MemSpaceType::NON_MOVABLE, initialCapacity, maximumCapacity)
{
}
NonMovableSpace::NonMovableSpace(BaseHeap *heap, size_t initialCapacity, size_t maximumCapacity, MemSpaceType type)
: SparseSpace(heap, type, initialCapacity, maximumCapacity)
{
}
AppSpawnSpace::AppSpawnSpace(BaseHeap *heap, size_t initialCapacity)
AppSpawnSpace::AppSpawnSpace(Heap *heap, size_t initialCapacity)
: SparseSpace(heap, MemSpaceType::APPSPAWN_SPACE, initialCapacity, initialCapacity)
{
}
@ -631,7 +599,7 @@ uintptr_t LocalSpace::Allocate(size_t size, bool isExpand)
return object;
}
MachineCodeSpace::MachineCodeSpace(BaseHeap *heap, size_t initialCapacity, size_t maximumCapacity)
MachineCodeSpace::MachineCodeSpace(Heap *heap, size_t initialCapacity, size_t maximumCapacity)
: SparseSpace(heap, MemSpaceType::MACHINE_CODE_SPACE, initialCapacity, maximumCapacity)
{
}

View File

@ -51,7 +51,7 @@ class LocalSpace;
class SparseSpace : public Space {
public:
SparseSpace(BaseHeap *heap, MemSpaceType type, size_t initialCapacity, size_t maximumCapacity);
SparseSpace(Heap *heap, MemSpaceType type, size_t initialCapacity, size_t maximumCapacity);
~SparseSpace() override
{
delete allocator_;
@ -64,7 +64,6 @@ public:
void ResetTopPointer(uintptr_t top);
uintptr_t Allocate(size_t size, bool allowGC = true);
uintptr_t ConcurrentAllocate(size_t size, bool allowGC = true);
bool Expand();
// For sweeping
@ -132,13 +131,13 @@ public:
protected:
FreeListAllocator *allocator_;
SweepState sweepState_ = SweepState::NO_SWEEP;
Heap *localHeap_ {nullptr};
private:
// For sweeping
uintptr_t AllocateAfterSweepingCompleted(size_t size);
Mutex lock_;
Mutex allocateLock_;
std::vector<Region *> sweepingList_;
std::vector<Region *> sweptList_;
size_t liveObjectSize_ {0};
@ -147,8 +146,7 @@ private:
class OldSpace : public SparseSpace {
public:
OldSpace(BaseHeap *heap, size_t initialCapacity, size_t maximumCapacity);
OldSpace(BaseHeap *heap, size_t initialCapacity, size_t maximumCapacity, MemSpaceType type);
OldSpace(Heap *heap, size_t initialCapacity, size_t maximumCapacity);
~OldSpace() override = default;
NO_COPY_SEMANTIC(OldSpace);
NO_MOVE_SEMANTIC(OldSpace);
@ -207,8 +205,7 @@ private:
class NonMovableSpace : public SparseSpace {
public:
NonMovableSpace(BaseHeap *heap, size_t initialCapacity, size_t maximumCapacity);
NonMovableSpace(BaseHeap *heap, size_t initialCapacity, size_t maximumCapacity, MemSpaceType type);
NonMovableSpace(Heap *heap, size_t initialCapacity, size_t maximumCapacity);
~NonMovableSpace() override = default;
NO_COPY_SEMANTIC(NonMovableSpace);
NO_MOVE_SEMANTIC(NonMovableSpace);
@ -218,7 +215,7 @@ public:
class AppSpawnSpace : public SparseSpace {
public:
AppSpawnSpace(BaseHeap *heap, size_t initialCapacity);
AppSpawnSpace(Heap *heap, size_t initialCapacity);
~AppSpawnSpace() override = default;
NO_COPY_SEMANTIC(AppSpawnSpace);
NO_MOVE_SEMANTIC(AppSpawnSpace);
@ -229,7 +226,7 @@ public:
class LocalSpace : public SparseSpace {
public:
LocalSpace() = delete;
LocalSpace(BaseHeap *heap, size_t initialCapacity, size_t maximumCapacity);
LocalSpace(Heap *heap, size_t initialCapacity, size_t maximumCapacity);
~LocalSpace() override = default;
NO_COPY_SEMANTIC(LocalSpace);
NO_MOVE_SEMANTIC(LocalSpace);
@ -242,7 +239,7 @@ public:
class MachineCodeSpace : public SparseSpace {
public:
MachineCodeSpace(BaseHeap *heap, size_t initialCapacity, size_t maximumCapacity);
MachineCodeSpace(Heap *heap, size_t initialCapacity, size_t maximumCapacity);
~MachineCodeSpace() override = default;
NO_COPY_SEMANTIC(MachineCodeSpace);
NO_MOVE_SEMANTIC(MachineCodeSpace); // Note: Expand() left for define

View File

@ -28,14 +28,55 @@
#include "ecmascript/mem/tlab_allocator-inl.h"
namespace panda::ecmascript {
WorkManagerBase::WorkManagerBase(NativeAreaAllocator *allocator)
: spaceChunk_(allocator), workSpace_(0), spaceStart_(0), spaceEnd_(0)
{
workSpace_ = ToUintPtr(GetSpaceChunk()->Allocate(WORKNODE_SPACE_SIZE));
}
WorkNode *WorkManagerBase::AllocateWorkNode()
{
size_t totalSize = sizeof(WorkNode) + sizeof(Stack) + STACK_AREA_SIZE;
ASSERT(totalSize < WORKNODE_SPACE_SIZE);
// CAS
volatile auto atomicField = reinterpret_cast<volatile std::atomic<uintptr_t> *>(&spaceStart_);
bool result = false;
uintptr_t begin = 0;
do {
begin = atomicField->load(std::memory_order_acquire);
if (begin + totalSize >= spaceEnd_) {
LockHolder lock(mtx_);
begin = atomicField->load(std::memory_order_acquire);
if (begin + totalSize >= spaceEnd_) {
agedSpaces_.emplace_back(workSpace_);
workSpace_ = ToUintPtr(GetSpaceChunk()->Allocate(WORKNODE_SPACE_SIZE));
spaceStart_ = workSpace_;
spaceEnd_ = workSpace_ + WORKNODE_SPACE_SIZE;
begin = spaceStart_;
}
}
result = std::atomic_compare_exchange_strong_explicit(atomicField, &begin, begin + totalSize,
std::memory_order_release, std::memory_order_relaxed);
} while (!result);
Stack *stack = reinterpret_cast<Stack *>(begin + sizeof(WorkNode));
stack->ResetBegin(begin + sizeof(WorkNode) + sizeof(Stack), begin + totalSize);
WorkNode *work = reinterpret_cast<WorkNode *>(begin);
return new (work) WorkNode(stack);
}
WorkManagerBase::~WorkManagerBase()
{
GetSpaceChunk()->Free(reinterpret_cast<void *>(workSpace_));
}
WorkManager::WorkManager(Heap *heap, uint32_t threadNum)
: heap_(heap), threadNum_(threadNum), spaceChunk_(heap_->GetNativeAreaAllocator()), continuousQueue_ { nullptr },
workSpace_(0), spaceStart_(0), spaceEnd_(0), parallelGCTaskPhase_(UNDEFINED_TASK)
: WorkManagerBase(heap->GetNativeAreaAllocator()), heap_(heap), threadNum_(threadNum),
continuousQueue_ { nullptr }, parallelGCTaskPhase_(UNDEFINED_TASK)
{
for (uint32_t i = 0; i < threadNum_; i++) {
continuousQueue_.at(i) = new ProcessQueue(heap);
continuousQueue_.at(i) = new ProcessQueue();
}
workSpace_ = ToUintPtr(GetSpaceChunk()->Allocate(WORKNODE_SPACE_SIZE));
}
WorkManager::~WorkManager()
@ -46,8 +87,6 @@ WorkManager::~WorkManager()
delete continuousQueue_.at(i);
continuousQueue_.at(i) = nullptr;
}
GetSpaceChunk()->Free(reinterpret_cast<void *>(workSpace_));
}
bool WorkManager::Push(uint32_t threadId, TaggedObject *object)
@ -125,10 +164,7 @@ size_t WorkManager::Finish()
aliveSize += holder.aliveSize_;
}
while (!agedSpaces_.empty()) {
GetSpaceChunk()->Free(reinterpret_cast<void *>(agedSpaces_.back()));
agedSpaces_.pop_back();
}
FinishBase();
initialized_.store(false, std::memory_order_release);
return aliveSize;
}
@ -146,14 +182,13 @@ void WorkManager::Finish(size_t &aliveSize, size_t &promotedSize)
void WorkManager::Initialize(TriggerGCType gcType, ParallelGCTaskPhase taskPhase)
{
parallelGCTaskPhase_ = taskPhase;
spaceStart_ = workSpace_;
spaceEnd_ = workSpace_ + WORKNODE_SPACE_SIZE;
InitializeBase();
for (uint32_t i = 0; i < threadNum_; i++) {
WorkNodeHolder &holder = works_.at(i);
holder.inNode_ = AllocateWorkNode();
holder.outNode_ = AllocateWorkNode();
holder.weakQueue_ = new ProcessQueue();
holder.weakQueue_->BeginMarking(heap_, continuousQueue_.at(i));
holder.weakQueue_->BeginMarking(continuousQueue_.at(i));
holder.aliveSize_ = 0;
holder.promotedSize_ = 0;
if (gcType != TriggerGCType::OLD_GC) {
@ -167,34 +202,97 @@ void WorkManager::Initialize(TriggerGCType gcType, ParallelGCTaskPhase taskPhase
initialized_.store(true, std::memory_order_release);
}
WorkNode *WorkManager::AllocateWorkNode()
ShareGCWorkManager::ShareGCWorkManager(SharedHeap *heap, uint32_t threadNum)
: WorkManagerBase(heap->GetNativeAreaAllocator()), sHeap_(heap), threadNum_(threadNum),
continuousQueue_ { nullptr }
{
size_t totalSize = sizeof(WorkNode) + sizeof(Stack) + STACK_AREA_SIZE;
ASSERT(totalSize < WORKNODE_SPACE_SIZE);
for (uint32_t i = 0; i < threadNum_; i++) {
continuousQueue_.at(i) = new ProcessQueue();
}
}
// CAS
volatile auto atomicField = reinterpret_cast<volatile std::atomic<uintptr_t> *>(&spaceStart_);
bool result = false;
uintptr_t begin = 0;
do {
begin = atomicField->load(std::memory_order_acquire);
if (begin + totalSize >= spaceEnd_) {
LockHolder lock(mtx_);
begin = atomicField->load(std::memory_order_acquire);
if (begin + totalSize >= spaceEnd_) {
agedSpaces_.emplace_back(workSpace_);
workSpace_ = ToUintPtr(GetSpaceChunk()->Allocate(WORKNODE_SPACE_SIZE));
spaceStart_ = workSpace_;
spaceEnd_ = workSpace_ + WORKNODE_SPACE_SIZE;
begin = spaceStart_;
}
ShareGCWorkManager::~ShareGCWorkManager()
{
Finish();
for (uint32_t i = 0; i < threadNum_; i++) {
continuousQueue_.at(i)->Destroy();
delete continuousQueue_.at(i);
continuousQueue_.at(i) = nullptr;
}
}
void ShareGCWorkManager::Initialize()
{
InitializeBase();
for (uint32_t i = 0; i < threadNum_; i++) {
ShareGCWorkNodeHolder &holder = works_.at(i);
holder.inNode_ = AllocateWorkNode();
holder.outNode_ = AllocateWorkNode();
holder.weakQueue_ = new ProcessQueue();
holder.weakQueue_->BeginMarking(continuousQueue_.at(i));
}
if (initialized_.load(std::memory_order_relaxed)) {
LOG_ECMA(FATAL) << "this branch is unreachable";
UNREACHABLE();
}
initialized_.store(true, std::memory_order_release);
}
void ShareGCWorkManager::Finish()
{
for (uint32_t i = 0; i < threadNum_; i++) {
ShareGCWorkNodeHolder &holder = works_.at(i);
if (holder.weakQueue_ != nullptr) {
holder.weakQueue_->FinishMarking(continuousQueue_.at(i));
delete holder.weakQueue_;
holder.weakQueue_ = nullptr;
}
result = std::atomic_compare_exchange_strong_explicit(atomicField, &begin, begin + totalSize,
std::memory_order_release, std::memory_order_relaxed);
} while (!result);
Stack *stack = reinterpret_cast<Stack *>(begin + sizeof(WorkNode));
stack->ResetBegin(begin + sizeof(WorkNode) + sizeof(Stack), begin + totalSize);
WorkNode *work = reinterpret_cast<WorkNode *>(begin);
return new (work) WorkNode(stack);
}
FinishBase();
initialized_.store(false, std::memory_order_release);
}
bool ShareGCWorkManager::Push(uint32_t threadId, TaggedObject *object)
{
WorkNode *&inNode = works_.at(threadId).inNode_;
if (!inNode->PushObject(ToUintPtr(object))) {
PushWorkNodeToGlobal(threadId);
return inNode->PushObject(ToUintPtr(object));
}
return true;
}
void ShareGCWorkManager::PushWorkNodeToGlobal(uint32_t threadId, bool postTask)
{
WorkNode *&inNode = works_.at(threadId).inNode_;
if (!inNode->IsEmpty()) {
workStack_.Push(inNode);
inNode = AllocateWorkNode();
if (postTask && sHeap_->IsParallelGCEnabled() && sHeap_->CheckCanDistributeTask()) {
sHeap_->PostGCMarkingTask();
}
}
}
bool ShareGCWorkManager::Pop(uint32_t threadId, TaggedObject **object)
{
WorkNode *&outNode = works_.at(threadId).outNode_;
WorkNode *&inNode = works_.at(threadId).inNode_;
if (!outNode->PopObject(reinterpret_cast<uintptr_t *>(object))) {
if (!inNode->IsEmpty()) {
WorkNode *tmp = outNode;
outNode = inNode;
inNode = tmp;
} else if (!PopWorkNodeFromGlobal(threadId)) {
return false;
}
return outNode->PopObject(reinterpret_cast<uintptr_t *>(object));
}
return true;
}
bool ShareGCWorkManager::PopWorkNodeFromGlobal(uint32_t threadId)
{
return workStack_.Pop(&works_.at(threadId).outNode_);
}
} // namespace panda::ecmascript

View File

@ -28,6 +28,7 @@ static constexpr uint32_t MARKSTACK_MAX_SIZE = 100;
static constexpr uint32_t STACK_AREA_SIZE = sizeof(uintptr_t) * MARKSTACK_MAX_SIZE;
class Heap;
class SharedHeap;
class Stack;
class SemiSpaceCollector;
class TlabAllocator;
@ -132,11 +133,49 @@ struct WorkNodeHolder {
size_t promotedSize_ = 0;
};
class WorkManager final {
class WorkManagerBase {
public:
WorkManagerBase(NativeAreaAllocator *allocator);
virtual ~WorkManagerBase();
WorkSpaceChunk *GetSpaceChunk() const
{
return const_cast<WorkSpaceChunk *>(&spaceChunk_);
}
void InitializeBase()
{
spaceStart_ = workSpace_;
spaceEnd_ = workSpace_ + WORKNODE_SPACE_SIZE;
}
void FinishBase()
{
while (!agedSpaces_.empty()) {
GetSpaceChunk()->Free(reinterpret_cast<void *>(agedSpaces_.back()));
agedSpaces_.pop_back();
}
}
WorkNode *AllocateWorkNode();
Mutex mtx_;
private:
NO_COPY_SEMANTIC(WorkManagerBase);
NO_MOVE_SEMANTIC(WorkManagerBase);
WorkSpaceChunk spaceChunk_;
uintptr_t workSpace_;
uintptr_t spaceStart_;
uintptr_t spaceEnd_;
std::vector<uintptr_t> agedSpaces_;
};
class WorkManager : public WorkManagerBase {
public:
WorkManager() = delete;
WorkManager(Heap *heap, uint32_t threadNum);
~WorkManager();
~WorkManager() override;
void Initialize(TriggerGCType gcType, ParallelGCTaskPhase taskPhase);
size_t Finish();
@ -194,35 +233,75 @@ public:
{
return threadNum_;
}
inline bool HasInitialized() const
{
return initialized_.load(std::memory_order_acquire);
}
WorkSpaceChunk *GetSpaceChunk() const
{
return const_cast<WorkSpaceChunk *>(&spaceChunk_);
}
private:
NO_COPY_SEMANTIC(WorkManager);
NO_MOVE_SEMANTIC(WorkManager);
WorkNode *AllocateWorkNode();
Heap *heap_;
uint32_t threadNum_;
WorkSpaceChunk spaceChunk_;
std::array<WorkNodeHolder, MAX_TASKPOOL_THREAD_NUM + 1> works_;
std::array<ContinuousStack<JSTaggedType> *, MAX_TASKPOOL_THREAD_NUM + 1> continuousQueue_;
GlobalWorkStack workStack_;
uintptr_t workSpace_;
uintptr_t spaceStart_;
uintptr_t spaceEnd_;
std::vector<uintptr_t> agedSpaces_;
Mutex mtx_;
ParallelGCTaskPhase parallelGCTaskPhase_;
std::atomic<bool> initialized_ {false};
};
struct ShareGCWorkNodeHolder {
WorkNode *inNode_ {nullptr};
WorkNode *outNode_ {nullptr};
ProcessQueue *weakQueue_ {nullptr};
};
class ShareGCWorkManager : public WorkManagerBase {
public:
ShareGCWorkManager(SharedHeap *heap, uint32_t threadNum);
~ShareGCWorkManager() override;
void Initialize();
void Finish();
bool Push(uint32_t threadId, TaggedObject *object);
bool Pop(uint32_t threadId, TaggedObject **object);
bool PopWorkNodeFromGlobal(uint32_t threadId);
void PushWorkNodeToGlobal(uint32_t threadId, bool postTask = true);
inline void PushWeakReference(uint32_t threadId, JSTaggedType *weak)
{
works_.at(threadId).weakQueue_->PushBack(weak);
}
inline ProcessQueue *GetWeakReferenceQueue(uint32_t threadId) const
{
return works_.at(threadId).weakQueue_;
}
inline uint32_t GetTotalThreadNum()
{
return threadNum_;
}
inline bool HasInitialized() const
{
return initialized_.load(std::memory_order_acquire);
}
private:
NO_COPY_SEMANTIC(ShareGCWorkManager);
NO_MOVE_SEMANTIC(ShareGCWorkManager);
SharedHeap *sHeap_;
uint32_t threadNum_;
std::array<ShareGCWorkNodeHolder, MAX_TASKPOOL_THREAD_NUM + 1> works_;
std::array<ContinuousStack<JSTaggedType> *, MAX_TASKPOOL_THREAD_NUM + 1> continuousQueue_;
GlobalWorkStack workStack_;
std::atomic<bool> initialized_ {false};
};
} // namespace panda::ecmascript
#endif // ECMASCRIPT_MEM_WORK_MANAGER_H

View File

@ -2376,18 +2376,9 @@ JSHandle<TaggedArray> ObjectFactory::NewAndCopyTaggedArray(JSHandle<TaggedArray>
if (newLength == 0) {
return dstElements;
}
// Region *region = Region::ObjectAddressToRange(reinterpret_cast<TaggedObject *>(*dstElements));
// if (region->InYoungSpace() && !thread_->IsConcurrentMarkingOrFinished()) {
// size_t size = oldLength * sizeof(JSTaggedType);
// if (memcpy_s(reinterpret_cast<void *>(dstElements->GetData()), size,
// reinterpret_cast<void *>(srcElements->GetData() + k), size) != EOK) {
// LOG_FULL(FATAL) << "memcpy_s failed";
// }
// } else {
for (uint32_t i = 0; i < oldLength; i++) {
dstElements->Set(thread_, i, srcElements->Get(i + k));
}
// }
for (uint32_t i = 0; i < oldLength; i++) {
dstElements->Set(thread_, i, srcElements->Get(i + k));
}
for (uint32_t i = oldLength; i < newLength; i++) {
dstElements->Set(thread_, i, JSTaggedValue::Hole());
}
@ -2435,18 +2426,9 @@ JSHandle<TaggedArray> ObjectFactory::NewAndCopyTaggedArrayByObject(JSHandle<JSOb
if (newLength == 0) {
return dstElements;
}
// Region *region = Region::ObjectAddressToRange(reinterpret_cast<TaggedObject *>(*dstElements));
// if (region->InYoungSpace() && !thread_->IsConcurrentMarkingOrFinished()) {
// size_t size = oldLength * sizeof(JSTaggedType);
// if (memcpy_s(reinterpret_cast<void *>(dstElements->GetData()), size,
// reinterpret_cast<void *>(srcElements->GetData() + k), size) != EOK) {
// LOG_FULL(FATAL) << "memcpy_s failed";
// }
// } else {
for (uint32_t i = 0; i < oldLength; i++) {
dstElements->Set(thread_, i, ElementAccessor::Get(thisObjHandle, i + k));
}
// }
for (uint32_t i = 0; i < oldLength; i++) {
dstElements->Set(thread_, i, ElementAccessor::Get(thisObjHandle, i + k));
}
for (uint32_t i = oldLength; i < newLength; i++) {
dstElements->Set(thread_, i, JSTaggedValue::Hole());
}
@ -2464,21 +2446,12 @@ JSHandle<MutantTaggedArray> ObjectFactory::NewAndCopyMutantTaggedArrayByObject(J
if (newLength == 0) {
return dstElements;
}
// Region *region = Region::ObjectAddressToRange(reinterpret_cast<TaggedObject *>(*dstElements));
// if (region->InYoungSpace() && !thread_->IsConcurrentMarkingOrFinished()) {
// size_t size = oldLength * sizeof(JSTaggedType);
// if (memcpy_s(reinterpret_cast<void *>(dstElements->GetData()), size,
// reinterpret_cast<void *>(srcElements->GetData() + k), size) != EOK) {
// LOG_FULL(FATAL) << "memcpy_s failed";
// }
// } else {
for (uint32_t i = 0; i < oldLength; i++) {
ElementsKind kind = thisObjHandle->GetClass()->GetElementsKind();
JSTaggedValue value = JSTaggedValue(ElementAccessor::ConvertTaggedValueWithElementsKind(
ElementAccessor::Get(thisObjHandle, i + k), kind));
dstElements->Set<false>(thread_, i, value);
}
// }
for (uint32_t i = 0; i < oldLength; i++) {
ElementsKind kind = thisObjHandle->GetClass()->GetElementsKind();
JSTaggedValue value = JSTaggedValue(ElementAccessor::ConvertTaggedValueWithElementsKind(
ElementAccessor::Get(thisObjHandle, i + k), kind));
dstElements->Set<false>(thread_, i, value);
}
for (uint32_t i = oldLength; i < newLength; i++) {
ElementsKind kind = thisObjHandle->GetClass()->GetElementsKind();
JSTaggedValue value = JSTaggedValue(ElementAccessor::ConvertTaggedValueWithElementsKind(JSTaggedValue::Hole(),

View File

@ -785,7 +785,7 @@ private:
NO_MOVE_SEMANTIC(ObjectFactory);
void NewObjectHook() const;
void NewSObjectHook() const;
// used for creating jshclass in GlobalEnv, EcmaVM
JSHandle<JSHClass> NewEcmaHClassClass(JSHClass *hclass, uint32_t size, JSType type);

View File

@ -58,7 +58,7 @@ void Runtime::InitializeIfFirstVm(EcmaVM *vm)
if (++vmCount_ == 1) {
PreInitialization(vm);
vm->Initialize();
PostInitialization();
PostInitialization(vm);
} else {
vm->Initialize();
}
@ -70,15 +70,16 @@ void Runtime::PreInitialization(const EcmaVM *vm)
nativeAreaAllocator_ = std::make_unique<NativeAreaAllocator>();
heapRegionAllocator_ = std::make_unique<HeapRegionAllocator>();
stringTable_ = std::make_unique<EcmaStringTable>();
SharedHeap::GetInstance()->Initialize(nativeAreaAllocator_.get(), heapRegionAllocator_.get());
SharedHeap::GetInstance()->Initialize(nativeAreaAllocator_.get(), heapRegionAllocator_.get(),
const_cast<EcmaVM*>(vm)->GetJSOptions());
}
void Runtime::PostInitialization()
void Runtime::PostInitialization(const EcmaVM *vm)
{
// Use the main thread's globalconst after it has initialized,
// and copy shared parts to other thread's later.
globalConstants_ = mainThread_->GlobalConstants();
SharedHeap::GetInstance()->SetGlobalEnvConstants(globalConstants_);
SharedHeap::GetInstance()->PostInitialization(globalConstants_, const_cast<EcmaVM*>(vm)->GetJSOptions());
}
void Runtime::DestroyIfLastVm()

View File

@ -54,6 +54,12 @@ public:
return &mutatorLock_;
}
std::list<JSThread*> ThreadList()
{
LockHolder lock(threadsLock_);
return threads_;
}
inline const GlobalEnvConstants *GetGlobalEnvConstants()
{
return globalConstants_;
@ -71,7 +77,7 @@ private:
void ResumeAllThreadsImpl(JSThread *current);
void PreInitialization(const EcmaVM *vm);
void PostInitialization();
void PostInitialization(const EcmaVM *vm);
Mutex threadsLock_;
std::list<JSThread*> threads_;

View File

@ -412,7 +412,7 @@ void BaseDeserializer::UpdateBarrier(uintptr_t addr, ObjectSlot slot)
ASSERT(slot.SlotAddress() % static_cast<uint8_t>(MemAlignment::MEM_ALIGN_OBJECT) == 0);
rootRegion->InsertOldToNewRSet(slot.SlotAddress());
}
if (!rootRegion->InSharedSpace() && valueRegion->InSharedSpace()) {
if (!rootRegion->InSharedHeap() && valueRegion->InSharedSweepableSpace()) {
rootRegion->AtomicInsertLocalToShareRset(slot.SlotAddress());
}
if (thread_->IsConcurrentMarkingOrFinished()) {

View File

@ -25,7 +25,7 @@ SerializedObjectSpace BaseSerializer::GetSerializedObjectSpace(TaggedObject *obj
{
auto region = Region::ObjectAddressToRange(object);
// todo(lukai) allocateToSharedSpace please!
if (region->InYoungOrOldSpace() || region->InAppSpawnSpace() || region->InSharedSpace()) {
if (region->InYoungOrOldSpace() || region->InAppSpawnSpace() || region->InSharedHeap()) {
return SerializedObjectSpace::OLD_SPACE;
}
if (region->InNonMovableSpace() || region->InReadOnlySpace()) {

View File

@ -25,6 +25,15 @@
#include "ecmascript/jspandafile/program_object.h"
namespace panda::ecmascript {
void ObjectFactory::NewSObjectHook() const
{
#ifndef NDEBUG
if (vm_->GetJSOptions().EnableForceGC() && vm_->IsInitialized() && thread_->IsAllContextsInitialized()) {
sHeap_->CollectGarbage(thread_, TriggerGCType::SHARED_GC, GCReason::OTHER);
}
#endif
}
JSHandle<JSHClass> ObjectFactory::CreateSFunctionClass(uint32_t size, JSType type,
const JSHandle<JSTaggedValue> &prototype, bool isAccessor)
{
@ -63,7 +72,7 @@ JSHandle<JSHClass> ObjectFactory::NewSEcmaHClass(uint32_t size, JSType type, uin
JSHandle<JSHClass> ObjectFactory::NewSEcmaHClass(JSHClass *hclass, uint32_t size, JSType type, uint32_t inlinedProps)
{
NewObjectHook();
NewSObjectHook();
uint32_t classSize = JSHClass::SIZE;
auto *newClass = static_cast<JSHClass *>(sHeap_->AllocateNonMovableOrHugeObject(thread_, hclass, classSize));
newClass->Initialize(thread_, size, type, inlinedProps, thread_->GlobalConstants()->GetHandledEmptySLayoutInfo());
@ -74,7 +83,7 @@ JSHandle<JSHClass> ObjectFactory::NewSEcmaHClass(JSHClass *hclass, uint32_t size
JSHandle<JSHClass> ObjectFactory::NewSEcmaHClass(uint32_t size, uint32_t inlinedProps, JSType type,
const JSHandle<JSTaggedValue> &prototype, const JSHandle<JSTaggedValue> &layout)
{
NewObjectHook();
NewSObjectHook();
uint32_t classSize = JSHClass::SIZE;
auto *newClass = static_cast<JSHClass *>(sHeap_->AllocateNonMovableOrHugeObject(
thread_, JSHClass::Cast(thread_->GlobalConstants()->GetHClassClass().GetTaggedObject()), classSize));
@ -91,7 +100,7 @@ JSHandle<JSHClass> ObjectFactory::NewSEcmaHClass(uint32_t size, uint32_t inlined
JSHandle<JSHClass> ObjectFactory::NewSEcmaHClassClass(JSHClass *hclass, uint32_t size, JSType type)
{
NewObjectHook();
NewSObjectHook();
uint32_t classSize = JSHClass::SIZE;
auto *newClass = static_cast<JSHClass *>(sHeap_->AllocateClassClass(hclass, classSize));
newClass->Initialize(thread_, size, type, 0, thread_->GlobalConstants()->GetHandledEmptySLayoutInfo());
@ -99,9 +108,9 @@ JSHandle<JSHClass> ObjectFactory::NewSEcmaHClassClass(JSHClass *hclass, uint32_t
}
JSHandle<JSHClass> ObjectFactory::NewSEcmaReadOnlyHClass(JSHClass *hclass, uint32_t size, JSType type,
uint32_t inlinedProps)
uint32_t inlinedProps)
{
NewObjectHook();
NewSObjectHook();
uint32_t classSize = JSHClass::SIZE;
auto *newClass = static_cast<JSHClass *>(sHeap_->AllocateReadOnlyOrHugeObject(thread_, hclass, classSize));
newClass->Initialize(thread_, size, type, inlinedProps, thread_->GlobalConstants()->GetHandledEmptySLayoutInfo());
@ -118,7 +127,7 @@ JSHandle<JSHClass> ObjectFactory::InitSClassClass()
JSHandle<AccessorData> ObjectFactory::NewSAccessorData()
{
NewObjectHook();
NewSObjectHook();
TaggedObject *header = sHeap_->AllocateOldOrHugeObject(
thread_, JSHClass::Cast(thread_->GlobalConstants()->GetAccessorDataClass().GetTaggedObject()));
JSHandle<AccessorData> acc(thread_, AccessorData::Cast(header));
@ -151,7 +160,7 @@ JSHandle<Method> ObjectFactory::NewSMethod(const JSPandaFile *jsPandaFile, Metho
JSHandle<Method> ObjectFactory::NewSMethod(const MethodLiteral *methodLiteral, MemSpaceType spaceType)
{
ASSERT(spaceType == SHARED_NON_MOVABLE || spaceType == SHARED_OLD_SPACE);
NewObjectHook();
NewSObjectHook();
TaggedObject *header = nullptr;
if (spaceType == SHARED_NON_MOVABLE) {
header = sHeap_->AllocateNonMovableOrHugeObject(thread_,
@ -221,7 +230,7 @@ JSHandle<JSFunction> ObjectFactory::NewSFunctionByHClass(const void *func, const
TaggedObject *ObjectFactory::NewSharedOldSpaceObject(const JSHandle<JSHClass> &hclass)
{
NewObjectHook();
NewSObjectHook();
TaggedObject *header = sHeap_->AllocateOldOrHugeObject(thread_, *hclass);
uint32_t inobjPropCount = hclass->GetInlinedProperties();
if (inobjPropCount > 0) {
@ -247,7 +256,7 @@ JSHandle<TaggedArray> ObjectFactory::SharedEmptyArray() const
JSHandle<TaggedArray> ObjectFactory::NewSTaggedArrayWithoutInit(uint32_t length)
{
NewObjectHook();
NewSObjectHook();
size_t size = TaggedArray::ComputeSize(JSTaggedValue::TaggedTypeSize(), length);
auto arrayClass = JSHClass::Cast(thread_->GlobalConstants()->GetArrayClass().GetTaggedObject());
TaggedObject *header = sHeap_->AllocateOldOrHugeObject(thread_, arrayClass, size);
@ -277,7 +286,7 @@ JSHandle<LayoutInfo> ObjectFactory::CopyAndReSortSLayoutInfo(const JSHandle<Layo
JSHandle<TaggedArray> ObjectFactory::NewSDictionaryArray(uint32_t length)
{
NewObjectHook();
NewSObjectHook();
ASSERT(length > 0);
size_t size = TaggedArray::ComputeSize(JSTaggedValue::TaggedTypeSize(), length);
auto header = sHeap_->AllocateOldOrHugeObject(

View File

@ -1666,7 +1666,7 @@ void SnapshotProcessor::DeserializeTaggedField(uint64_t *value, TaggedObject *ro
ASSERT((ToUintPtr(value) % static_cast<uint8_t>(MemAlignment::MEM_ALIGN_OBJECT)) == 0);
rootRegion->InsertOldToNewRSet((uintptr_t)value);
}
if (!rootRegion->InSharedSpace() && valueRegion->InSharedSpace()) {
if (!rootRegion->InSharedHeap() && valueRegion->InSharedSweepableSpace()) {
rootRegion->AtomicInsertLocalToShareRset((uintptr_t)value);
}
*value = taggedObjectAddr;

View File

@ -2887,7 +2887,7 @@ void RuntimeStubs::StoreBarrier([[maybe_unused]] uintptr_t argGlue,
ASSERT((slotAddr % static_cast<uint8_t>(MemAlignment::MEM_ALIGN_OBJECT)) == 0);
objectRegion->InsertOldToNewRSet(slotAddr);
}
if (!objectRegion->InSharedSpace() && valueRegion->InSharedSpace()) {
if (!objectRegion->InSharedHeap() && valueRegion->InSharedSweepableSpace()) {
objectRegion->AtomicInsertLocalToShareRset(slotAddr);
}
if (!thread->IsConcurrentMarkingOrFinished()) {

View File

@ -222,18 +222,9 @@ void TaggedArray::CopyTaggedArrayElement(const JSThread *thread, JSHandle<Tagged
{
ASSERT(effectiveLength <= srcElements->GetLength());
ASSERT(effectiveLength <= dstElements->GetLength());
// Region *region = Region::ObjectAddressToRange(reinterpret_cast<TaggedObject *>(*dstElements));
// if (region->InYoungSpace() && !thread->IsConcurrentMarkingOrFinished()) {
// size_t size = effectiveLength * sizeof(JSTaggedType);
// if (memcpy_s(reinterpret_cast<void *>(dstElements->GetData()), size,
// reinterpret_cast<void *>(srcElements->GetData()), size) != EOK) {
// LOG_FULL(FATAL) << "memcpy_s failed" << " size: " << size;
// }
// } else {
for (uint32_t i = 0; i < effectiveLength; i++) {
dstElements->Set(thread, i, srcElements->Get(i));
}
// }
for (uint32_t i = 0; i < effectiveLength; i++) {
dstElements->Set(thread, i, srcElements->Get(i));
}
}
inline bool TaggedArray::IsDictionaryMode() const

View File

@ -69,7 +69,7 @@ public:
int freeSize = table->Size() - table->EntriesCount() - numOfAddedElements;
if (table->HoleEntriesCount() > freeSize / 2) { // 2: half
int copyLength = Derived::GetEntryIndex(table->Size());
JSHandle<Derived> copyTable = table.GetTaggedValue().IsInSharedSpace() ?
JSHandle<Derived> copyTable = table.GetTaggedValue().IsInSharedHeap() ?
JSHandle<Derived>(thread->GetEcmaVM()->GetFactory()->NewSDictionaryArray(copyLength)) :
JSHandle<Derived>(thread->GetEcmaVM()->GetFactory()->NewDictionaryArray(copyLength));
copyTable->SetHashTableSize(thread, table->Size());
@ -82,7 +82,7 @@ public:
table->Size(), numOfAddedElements);
newSize = std::max(newSize, MIN_SHRINK_SIZE);
int length = Derived::GetEntryIndex(newSize);
JSHandle<Derived> newTable = table.GetTaggedValue().IsInSharedSpace() ?
JSHandle<Derived> newTable = table.GetTaggedValue().IsInSharedHeap() ?
JSHandle<Derived>(thread->GetEcmaVM()->GetFactory()->NewSDictionaryArray(length)) :
JSHandle<Derived>(thread->GetEcmaVM()->GetFactory()->NewDictionaryArray(length));
newTable->SetHashTableSize(thread, newSize);