enable shared full gc

Issue: https://gitee.com/openharmony/arkcompiler_ets_runtime/issues/IAMN77?from=project-issue

Signed-off-by: xiongluo <xiongluo@huawei.com>
Change-Id: I26a07b167f0d799d8f73f3b2fdbc78adbe16157a
This commit is contained in:
xiongluo 2024-09-19 18:49:45 +08:00
parent 1fad367a6f
commit 9f034b1314
80 changed files with 1969 additions and 736 deletions

View File

@ -839,6 +839,7 @@ ecma_source = [
"ecmascript/mem/shared_heap/shared_concurrent_marker.cpp", "ecmascript/mem/shared_heap/shared_concurrent_marker.cpp",
"ecmascript/mem/shared_heap/shared_concurrent_sweeper.cpp", "ecmascript/mem/shared_heap/shared_concurrent_sweeper.cpp",
"ecmascript/mem/shared_heap/shared_gc.cpp", "ecmascript/mem/shared_heap/shared_gc.cpp",
"ecmascript/mem/shared_heap/shared_full_gc.cpp",
"ecmascript/mem/shared_heap/shared_gc_marker.cpp", "ecmascript/mem/shared_heap/shared_gc_marker.cpp",
"ecmascript/mem/shared_heap/shared_space.cpp", "ecmascript/mem/shared_heap/shared_space.cpp",
"ecmascript/mem/stw_young_gc.cpp", "ecmascript/mem/stw_young_gc.cpp",

View File

@ -624,7 +624,7 @@ JSTaggedValue TypedArrayHelper::CreateSharedFromTypedArray(EcmaRuntimeCallInfo *
if (allocateResult == JSTaggedValue::Exception()) { if (allocateResult == JSTaggedValue::Exception()) {
return allocateResult; return allocateResult;
} }
JSTaggedValue checkResult = CheckBufferAndType(buffer, thread, obj, srcArray); JSTaggedValue checkResult = CheckBufferAndType(srcData.GetTaggedValue(), thread, obj, srcArray);
if (checkResult == JSTaggedValue::Exception()) { if (checkResult == JSTaggedValue::Exception()) {
return checkResult; return checkResult;
} }

View File

@ -217,7 +217,8 @@ JSTaggedValue BuiltinsArkTools::ForceFullGC(EcmaRuntimeCallInfo *info)
auto heap = const_cast<Heap *>(info->GetThread()->GetEcmaVM()->GetHeap()); auto heap = const_cast<Heap *>(info->GetThread()->GetEcmaVM()->GetHeap());
heap->CollectGarbage( heap->CollectGarbage(
TriggerGCType::FULL_GC, GCReason::EXTERNAL_TRIGGER); TriggerGCType::FULL_GC, GCReason::EXTERNAL_TRIGGER);
SharedHeap::GetInstance()->CollectGarbage<TriggerGCType::SHARED_GC, GCReason::EXTERNAL_TRIGGER>(info->GetThread()); SharedHeap::GetInstance()->CollectGarbage<TriggerGCType::SHARED_FULL_GC, GCReason::EXTERNAL_TRIGGER>(
info->GetThread());
heap->GetHeapPrepare(); heap->GetHeapPrepare();
return JSTaggedValue::True(); return JSTaggedValue::True();
} }

View File

@ -414,7 +414,7 @@ JSTaggedValue BuiltinsSharedArray::Concat(EcmaRuntimeCallInfo *argv)
THROW_NEW_ERROR_AND_RETURN_VALUE(thread, error, JSTaggedValue::Exception()); THROW_NEW_ERROR_AND_RETURN_VALUE(thread, error, JSTaggedValue::Exception());
} }
JSHandle<JSObject> thisObjHandle = JSTaggedValue::ToObject(thread, thisHandle); JSHandle<JSObject> thisObjHandle = JSTaggedValue::ToObject(thread, thisHandle);
[[maybe_unused]] ConcurrentApiScope<JSSharedArray> scope(thread, thisHandle.GetTaggedValue().GetTaggedObject()); [[maybe_unused]] ConcurrentApiScope<JSSharedArray> scope(thread, thisHandle);
RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread);
JSHandle<JSTaggedValue> thisObjVal(thisObjHandle); JSHandle<JSTaggedValue> thisObjVal(thisObjHandle);
@ -529,7 +529,7 @@ JSTaggedValue BuiltinsSharedArray::Entries(EcmaRuntimeCallInfo *argv)
// 1. Let O be ToObject(this value). // 1. Let O be ToObject(this value).
// 2. ReturnIfAbrupt(O). // 2. ReturnIfAbrupt(O).
JSHandle<JSObject> self = JSTaggedValue::ToObject(thread, GetThis(argv)); JSHandle<JSObject> self = JSTaggedValue::ToObject(thread, GetThis(argv));
[[maybe_unused]] ConcurrentApiScope<JSSharedArray> scope(thread, thisHandle.GetTaggedValue().GetTaggedObject()); [[maybe_unused]] ConcurrentApiScope<JSSharedArray> scope(thread, thisHandle);
RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread);
// 3. Return CreateArrayIterator(O, "key+value"). // 3. Return CreateArrayIterator(O, "key+value").
JSHandle<JSSharedArrayIterator> iter(factory->NewJSSharedArrayIterator(self, IterationKind::KEY_AND_VALUE)); JSHandle<JSSharedArrayIterator> iter(factory->NewJSSharedArrayIterator(self, IterationKind::KEY_AND_VALUE));
@ -551,8 +551,7 @@ JSTaggedValue BuiltinsSharedArray::Fill(EcmaRuntimeCallInfo *argv)
THROW_NEW_ERROR_AND_RETURN_VALUE(thread, error, JSTaggedValue::Exception()); THROW_NEW_ERROR_AND_RETURN_VALUE(thread, error, JSTaggedValue::Exception());
} }
JSHandle<JSObject> thisObjHandle = JSTaggedValue::ToObject(thread, thisObjVal); JSHandle<JSObject> thisObjHandle = JSTaggedValue::ToObject(thread, thisObjVal);
[[maybe_unused]] ConcurrentApiScope<JSSharedArray, ModType::WRITE> scope( [[maybe_unused]] ConcurrentApiScope<JSSharedArray, ModType::WRITE> scope(thread, thisObjVal);
thread, thisObjHandle.GetTaggedValue().GetTaggedObject());
RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread);
if (thisObjVal->IsJSSharedArray()) { if (thisObjVal->IsJSSharedArray()) {
bool isDictionary = thisObjHandle->GetJSHClass()->IsDictionaryElement(); bool isDictionary = thisObjHandle->GetJSHClass()->IsDictionaryElement();
@ -702,7 +701,7 @@ JSTaggedValue BuiltinsSharedArray::Filter(EcmaRuntimeCallInfo *argv)
THROW_NEW_ERROR_AND_RETURN_VALUE(thread, error, JSTaggedValue::Exception()); THROW_NEW_ERROR_AND_RETURN_VALUE(thread, error, JSTaggedValue::Exception());
} }
JSHandle<JSObject> thisObjHandle = JSTaggedValue::ToObject(thread, thisHandle); JSHandle<JSObject> thisObjHandle = JSTaggedValue::ToObject(thread, thisHandle);
[[maybe_unused]] ConcurrentApiScope<JSSharedArray> scope(thread, thisHandle.GetTaggedValue().GetTaggedObject()); [[maybe_unused]] ConcurrentApiScope<JSSharedArray> scope(thread, thisHandle);
// 2. ReturnIfAbrupt(O). // 2. ReturnIfAbrupt(O).
RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread);
JSHandle<JSTaggedValue> thisObjVal(thisObjHandle); JSHandle<JSTaggedValue> thisObjVal(thisObjHandle);
@ -773,7 +772,7 @@ JSTaggedValue BuiltinsSharedArray::Find(EcmaRuntimeCallInfo *argv)
THROW_NEW_ERROR_AND_RETURN_VALUE(thread, error, JSTaggedValue::Exception()); THROW_NEW_ERROR_AND_RETURN_VALUE(thread, error, JSTaggedValue::Exception());
} }
JSHandle<JSObject> thisObjHandle = JSTaggedValue::ToObject(thread, thisHandle); JSHandle<JSObject> thisObjHandle = JSTaggedValue::ToObject(thread, thisHandle);
[[maybe_unused]] ConcurrentApiScope<JSSharedArray> scope(thread, thisHandle.GetTaggedValue().GetTaggedObject()); [[maybe_unused]] ConcurrentApiScope<JSSharedArray> scope(thread, thisHandle);
// 2. ReturnIfAbrupt(O). // 2. ReturnIfAbrupt(O).
RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread);
JSHandle<JSTaggedValue> thisObjVal(thisObjHandle); JSHandle<JSTaggedValue> thisObjVal(thisObjHandle);
@ -840,7 +839,7 @@ JSTaggedValue BuiltinsSharedArray::FindIndex(EcmaRuntimeCallInfo *argv)
THROW_NEW_ERROR_AND_RETURN_VALUE(thread, error, JSTaggedValue::Exception()); THROW_NEW_ERROR_AND_RETURN_VALUE(thread, error, JSTaggedValue::Exception());
} }
JSHandle<JSObject> thisObjHandle = JSTaggedValue::ToObject(thread, thisHandle); JSHandle<JSObject> thisObjHandle = JSTaggedValue::ToObject(thread, thisHandle);
[[maybe_unused]] ConcurrentApiScope<JSSharedArray> scope(thread, thisHandle.GetTaggedValue().GetTaggedObject()); [[maybe_unused]] ConcurrentApiScope<JSSharedArray> scope(thread, thisHandle);
// 2. ReturnIfAbrupt(O). // 2. ReturnIfAbrupt(O).
RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread);
JSHandle<JSTaggedValue> thisObjVal(thisObjHandle); JSHandle<JSTaggedValue> thisObjVal(thisObjHandle);
@ -915,7 +914,7 @@ JSTaggedValue BuiltinsSharedArray::ForEach(EcmaRuntimeCallInfo *argv)
THROW_NEW_ERROR_AND_RETURN_VALUE(thread, error, JSTaggedValue::Exception()); THROW_NEW_ERROR_AND_RETURN_VALUE(thread, error, JSTaggedValue::Exception());
} }
JSHandle<JSObject> thisObjHandle = JSTaggedValue::ToObject(thread, thisHandle); JSHandle<JSObject> thisObjHandle = JSTaggedValue::ToObject(thread, thisHandle);
[[maybe_unused]] ConcurrentApiScope<JSSharedArray> scope(thread, thisHandle.GetTaggedValue().GetTaggedObject()); [[maybe_unused]] ConcurrentApiScope<JSSharedArray> scope(thread, thisHandle);
// 2. ReturnIfAbrupt(O). // 2. ReturnIfAbrupt(O).
RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread);
JSHandle<JSTaggedValue> thisObjVal(thisObjHandle); JSHandle<JSTaggedValue> thisObjVal(thisObjHandle);
@ -1059,7 +1058,7 @@ JSTaggedValue BuiltinsSharedArray::IndexOf(EcmaRuntimeCallInfo *argv)
auto error = ContainerError::BindError(thread, "The indexOf method cannot be bound."); auto error = ContainerError::BindError(thread, "The indexOf method cannot be bound.");
THROW_NEW_ERROR_AND_RETURN_VALUE(thread, error, JSTaggedValue::Exception()); THROW_NEW_ERROR_AND_RETURN_VALUE(thread, error, JSTaggedValue::Exception());
} }
[[maybe_unused]] ConcurrentApiScope<JSSharedArray> scope(thread, thisHandle.GetTaggedValue().GetTaggedObject()); [[maybe_unused]] ConcurrentApiScope<JSSharedArray> scope(thread, thisHandle);
RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread);
JSTaggedValue opResult; JSTaggedValue opResult;
if (thisHandle->IsStableJSArray(thread)) { if (thisHandle->IsStableJSArray(thread)) {
@ -1082,7 +1081,7 @@ JSTaggedValue BuiltinsSharedArray::Join(EcmaRuntimeCallInfo *argv)
auto error = ContainerError::BindError(thread, "The join method cannot be bound."); auto error = ContainerError::BindError(thread, "The join method cannot be bound.");
THROW_NEW_ERROR_AND_RETURN_VALUE(thread, error, JSTaggedValue::Exception()); THROW_NEW_ERROR_AND_RETURN_VALUE(thread, error, JSTaggedValue::Exception());
} }
[[maybe_unused]] ConcurrentApiScope<JSSharedArray> scope(thread, thisHandle.GetTaggedValue().GetTaggedObject()); [[maybe_unused]] ConcurrentApiScope<JSSharedArray> scope(thread, thisHandle);
RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread);
auto opResult = BuiltinsArray::Join(argv); auto opResult = BuiltinsArray::Join(argv);
return opResult; return opResult;
@ -1099,7 +1098,7 @@ JSTaggedValue BuiltinsSharedArray::Keys(EcmaRuntimeCallInfo *argv)
auto error = ContainerError::BindError(thread, "The keys method cannot be bound."); auto error = ContainerError::BindError(thread, "The keys method cannot be bound.");
THROW_NEW_ERROR_AND_RETURN_VALUE(thread, error, JSTaggedValue::Exception()); THROW_NEW_ERROR_AND_RETURN_VALUE(thread, error, JSTaggedValue::Exception());
} }
[[maybe_unused]] ConcurrentApiScope<JSSharedArray> scope(thread, thisHandle.GetTaggedValue().GetTaggedObject()); [[maybe_unused]] ConcurrentApiScope<JSSharedArray> scope(thread, thisHandle);
RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread);
auto opResult = BuiltinsArray::Keys(argv); auto opResult = BuiltinsArray::Keys(argv);
return opResult; return opResult;
@ -1191,7 +1190,7 @@ JSTaggedValue BuiltinsSharedArray::Map(EcmaRuntimeCallInfo *argv)
THROW_NEW_ERROR_AND_RETURN_VALUE(thread, error, JSTaggedValue::Exception()); THROW_NEW_ERROR_AND_RETURN_VALUE(thread, error, JSTaggedValue::Exception());
} }
JSHandle<JSObject> thisObjHandle = JSTaggedValue::ToObject(thread, thisHandle); JSHandle<JSObject> thisObjHandle = JSTaggedValue::ToObject(thread, thisHandle);
[[maybe_unused]] ConcurrentApiScope<JSSharedArray> scope(thread, thisHandle.GetTaggedValue().GetTaggedObject()); [[maybe_unused]] ConcurrentApiScope<JSSharedArray> scope(thread, thisHandle);
// 2. ReturnIfAbrupt(O). // 2. ReturnIfAbrupt(O).
RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread);
JSHandle<JSTaggedValue> thisObjVal(thisObjHandle); JSHandle<JSTaggedValue> thisObjVal(thisObjHandle);
@ -1287,8 +1286,7 @@ JSTaggedValue BuiltinsSharedArray::Pop(EcmaRuntimeCallInfo *argv)
THROW_NEW_ERROR_AND_RETURN_VALUE(thread, error, JSTaggedValue::Exception()); THROW_NEW_ERROR_AND_RETURN_VALUE(thread, error, JSTaggedValue::Exception());
} }
JSHandle<JSObject> thisObjHandle = JSTaggedValue::ToObject(thread, thisHandle); JSHandle<JSObject> thisObjHandle = JSTaggedValue::ToObject(thread, thisHandle);
[[maybe_unused]] ConcurrentApiScope<JSSharedArray, ModType::WRITE> scope( [[maybe_unused]] ConcurrentApiScope<JSSharedArray, ModType::WRITE> scope(thread, thisHandle);
thread, thisHandle.GetTaggedValue().GetTaggedObject());
RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread);
JSTaggedValue opResult = PopInner(argv, thisHandle, thisObjHandle); JSTaggedValue opResult = PopInner(argv, thisHandle, thisObjHandle);
@ -1360,8 +1358,7 @@ JSTaggedValue BuiltinsSharedArray::Push(EcmaRuntimeCallInfo *argv)
auto error = ContainerError::BindError(thread, "The push method cannot be bound."); auto error = ContainerError::BindError(thread, "The push method cannot be bound.");
THROW_NEW_ERROR_AND_RETURN_VALUE(thread, error, JSTaggedValue::Exception()); THROW_NEW_ERROR_AND_RETURN_VALUE(thread, error, JSTaggedValue::Exception());
} }
[[maybe_unused]] ConcurrentApiScope<JSSharedArray, ModType::WRITE> scope( [[maybe_unused]] ConcurrentApiScope<JSSharedArray, ModType::WRITE> scope(thread, thisHandle);
thread, thisHandle.GetTaggedValue().GetTaggedObject());
RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread);
if (thisHandle->IsStableJSArray(thread)) { if (thisHandle->IsStableJSArray(thread)) {
auto opResult = JSStableArray::Push(JSHandle<JSSharedArray>::Cast(thisHandle), argv); auto opResult = JSStableArray::Push(JSHandle<JSSharedArray>::Cast(thisHandle), argv);
@ -1462,7 +1459,7 @@ JSTaggedValue BuiltinsSharedArray::Reduce(EcmaRuntimeCallInfo *argv)
THROW_NEW_ERROR_AND_RETURN_VALUE(thread, error, JSTaggedValue::Exception()); THROW_NEW_ERROR_AND_RETURN_VALUE(thread, error, JSTaggedValue::Exception());
} }
JSHandle<JSObject> thisObjHandle = JSTaggedValue::ToObject(thread, thisHandle); JSHandle<JSObject> thisObjHandle = JSTaggedValue::ToObject(thread, thisHandle);
[[maybe_unused]] ConcurrentApiScope<JSSharedArray> scope(thread, thisHandle.GetTaggedValue().GetTaggedObject()); [[maybe_unused]] ConcurrentApiScope<JSSharedArray> scope(thread, thisHandle);
// 2. ReturnIfAbrupt(O). // 2. ReturnIfAbrupt(O).
RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread);
JSHandle<JSTaggedValue> thisObjVal(thisObjHandle); JSHandle<JSTaggedValue> thisObjVal(thisObjHandle);
@ -1541,8 +1538,7 @@ JSTaggedValue BuiltinsSharedArray::Shift(EcmaRuntimeCallInfo *argv)
THROW_NEW_ERROR_AND_RETURN_VALUE(thread, error, JSTaggedValue::Exception()); THROW_NEW_ERROR_AND_RETURN_VALUE(thread, error, JSTaggedValue::Exception());
} }
JSHandle<JSObject> thisObjHandle = JSTaggedValue::ToObject(thread, thisHandle); JSHandle<JSObject> thisObjHandle = JSTaggedValue::ToObject(thread, thisHandle);
[[maybe_unused]] ConcurrentApiScope<JSSharedArray, ModType::WRITE> scope( [[maybe_unused]] ConcurrentApiScope<JSSharedArray, ModType::WRITE> scope(thread, thisHandle);
thread, thisHandle.GetTaggedValue().GetTaggedObject());
// 2. ReturnIfAbrupt(O). // 2. ReturnIfAbrupt(O).
RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread);
if (thisHandle->IsStableJSArray(thread) && JSObject::IsArrayLengthWritable(thread, thisObjHandle)) { if (thisHandle->IsStableJSArray(thread) && JSObject::IsArrayLengthWritable(thread, thisObjHandle)) {
@ -1636,7 +1632,7 @@ JSTaggedValue BuiltinsSharedArray::Slice(EcmaRuntimeCallInfo *argv)
THROW_NEW_ERROR_AND_RETURN_VALUE(thread, error, JSTaggedValue::Exception()); THROW_NEW_ERROR_AND_RETURN_VALUE(thread, error, JSTaggedValue::Exception());
} }
JSHandle<JSObject> thisObjHandle = JSTaggedValue::ToObject(thread, thisHandle); JSHandle<JSObject> thisObjHandle = JSTaggedValue::ToObject(thread, thisHandle);
[[maybe_unused]] ConcurrentApiScope<JSSharedArray> scope(thread, thisHandle.GetTaggedValue().GetTaggedObject()); [[maybe_unused]] ConcurrentApiScope<JSSharedArray> scope(thread, thisHandle);
// 2. ReturnIfAbrupt(O). // 2. ReturnIfAbrupt(O).
RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread);
JSHandle<JSTaggedValue> thisObjVal(thisObjHandle); JSHandle<JSTaggedValue> thisObjVal(thisObjHandle);
@ -1770,8 +1766,7 @@ JSTaggedValue BuiltinsSharedArray::Sort(EcmaRuntimeCallInfo *argv)
THROW_NEW_ERROR_AND_RETURN_VALUE(thread, error, JSTaggedValue::Exception()); THROW_NEW_ERROR_AND_RETURN_VALUE(thread, error, JSTaggedValue::Exception());
} }
JSHandle<JSObject> thisObjHandle = JSTaggedValue::ToObject(thread, thisHandle); JSHandle<JSObject> thisObjHandle = JSTaggedValue::ToObject(thread, thisHandle);
[[maybe_unused]] ConcurrentApiScope<JSSharedArray, ModType::WRITE> scope( [[maybe_unused]] ConcurrentApiScope<JSSharedArray, ModType::WRITE> scope(thread, thisHandle);
thread, thisHandle.GetTaggedValue().GetTaggedObject());
RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread);
// Array sort // Array sort
@ -1801,7 +1796,7 @@ JSTaggedValue BuiltinsSharedArray::Splice(EcmaRuntimeCallInfo *argv)
} }
JSHandle<JSObject> thisObjHandle = JSTaggedValue::ToObject(thread, thisHandle); JSHandle<JSObject> thisObjHandle = JSTaggedValue::ToObject(thread, thisHandle);
[[maybe_unused]] ConcurrentApiScope<JSSharedArray, ModType::WRITE> scope( [[maybe_unused]] ConcurrentApiScope<JSSharedArray, ModType::WRITE> scope(
thread, thisHandle.GetTaggedValue().GetTaggedObject()); thread, thisHandle);
// 2. ReturnIfAbrupt(O). // 2. ReturnIfAbrupt(O).
RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread);
JSHandle<JSTaggedValue> thisObjVal(thisObjHandle); JSHandle<JSTaggedValue> thisObjVal(thisObjHandle);
@ -2016,7 +2011,7 @@ JSTaggedValue BuiltinsSharedArray::ToString(EcmaRuntimeCallInfo *argv)
THROW_NEW_ERROR_AND_RETURN_VALUE(thread, error, JSTaggedValue::Exception()); THROW_NEW_ERROR_AND_RETURN_VALUE(thread, error, JSTaggedValue::Exception());
} }
JSHandle<JSObject> thisObjHandle = JSTaggedValue::ToObject(thread, thisHandle); JSHandle<JSObject> thisObjHandle = JSTaggedValue::ToObject(thread, thisHandle);
[[maybe_unused]] ConcurrentApiScope<JSSharedArray> scope(thread, thisHandle.GetTaggedValue().GetTaggedObject()); [[maybe_unused]] ConcurrentApiScope<JSSharedArray> scope(thread, thisHandle);
// 2. ReturnIfAbrupt(array). // 2. ReturnIfAbrupt(array).
RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread);
JSHandle<JSTaggedValue> thisObjVal(thisObjHandle); JSHandle<JSTaggedValue> thisObjVal(thisObjHandle);
@ -2064,8 +2059,7 @@ JSTaggedValue BuiltinsSharedArray::Unshift(EcmaRuntimeCallInfo *argv)
THROW_NEW_ERROR_AND_RETURN_VALUE(thread, error, JSTaggedValue::Exception()); THROW_NEW_ERROR_AND_RETURN_VALUE(thread, error, JSTaggedValue::Exception());
} }
JSHandle<JSObject> thisObjHandle = JSTaggedValue::ToObject(thread, thisHandle); JSHandle<JSObject> thisObjHandle = JSTaggedValue::ToObject(thread, thisHandle);
[[maybe_unused]] ConcurrentApiScope<JSSharedArray, ModType::WRITE> scope( [[maybe_unused]] ConcurrentApiScope<JSSharedArray, ModType::WRITE> scope(thread, thisHandle);
thread, thisHandle.GetTaggedValue().GetTaggedObject());
// 2. ReturnIfAbrupt(O). // 2. ReturnIfAbrupt(O).
RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread);
JSHandle<JSTaggedValue> thisObjVal(thisObjHandle); JSHandle<JSTaggedValue> thisObjVal(thisObjHandle);
@ -2164,7 +2158,7 @@ JSTaggedValue BuiltinsSharedArray::Values(EcmaRuntimeCallInfo *argv)
// 1. Let O be ToObject(this value). // 1. Let O be ToObject(this value).
// 2. ReturnIfAbrupt(O). // 2. ReturnIfAbrupt(O).
JSHandle<JSObject> self = JSTaggedValue::ToObject(thread, GetThis(argv)); JSHandle<JSObject> self = JSTaggedValue::ToObject(thread, GetThis(argv));
[[maybe_unused]] ConcurrentApiScope<JSSharedArray> scope(thread, thisHandle.GetTaggedValue().GetTaggedObject()); [[maybe_unused]] ConcurrentApiScope<JSSharedArray> scope(thread, thisHandle);
RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread);
// 3. Return CreateArrayIterator(O, "value"). // 3. Return CreateArrayIterator(O, "value").
JSHandle<JSSharedArrayIterator> iter(factory->NewJSSharedArrayIterator(self, IterationKind::VALUE)); JSHandle<JSSharedArrayIterator> iter(factory->NewJSSharedArrayIterator(self, IterationKind::VALUE));
@ -2247,7 +2241,7 @@ JSTaggedValue BuiltinsSharedArray::Includes(EcmaRuntimeCallInfo *argv)
THROW_NEW_ERROR_AND_RETURN_VALUE(thread, error, JSTaggedValue::Exception()); THROW_NEW_ERROR_AND_RETURN_VALUE(thread, error, JSTaggedValue::Exception());
} }
JSHandle<JSObject> thisObjHandle = JSTaggedValue::ToObject(thread, thisHandle); JSHandle<JSObject> thisObjHandle = JSTaggedValue::ToObject(thread, thisHandle);
[[maybe_unused]] ConcurrentApiScope<JSSharedArray> scope(thread, thisHandle.GetTaggedValue().GetTaggedObject()); [[maybe_unused]] ConcurrentApiScope<JSSharedArray> scope(thread, thisHandle);
RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread);
uint32_t argc = argv->GetArgsNumber(); uint32_t argc = argv->GetArgsNumber();
@ -2322,7 +2316,7 @@ JSTaggedValue BuiltinsSharedArray::At(EcmaRuntimeCallInfo *argv)
auto error = ContainerError::BindError(thread, "The at method cannot be bound."); auto error = ContainerError::BindError(thread, "The at method cannot be bound.");
THROW_NEW_ERROR_AND_RETURN_VALUE(thread, error, JSTaggedValue::Exception()); THROW_NEW_ERROR_AND_RETURN_VALUE(thread, error, JSTaggedValue::Exception());
} }
[[maybe_unused]] ConcurrentApiScope<JSSharedArray> scope(thread, thisHandle.GetTaggedValue().GetTaggedObject()); [[maybe_unused]] ConcurrentApiScope<JSSharedArray> scope(thread, thisHandle);
RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread);
if (thisHandle->IsStableJSArray(thread)) { if (thisHandle->IsStableJSArray(thread)) {
auto opResult = JSStableArray::At(JSHandle<JSSharedArray>::Cast(thisHandle), argv); auto opResult = JSStableArray::At(JSHandle<JSSharedArray>::Cast(thisHandle), argv);
@ -2383,8 +2377,7 @@ JSTaggedValue BuiltinsSharedArray::ShrinkTo(EcmaRuntimeCallInfo *argv)
THROW_NEW_ERROR_AND_RETURN_VALUE(thread, error, JSTaggedValue::Exception()); THROW_NEW_ERROR_AND_RETURN_VALUE(thread, error, JSTaggedValue::Exception());
} }
JSHandle<JSObject> thisObjHandle = JSTaggedValue::ToObject(thread, thisHandle); JSHandle<JSObject> thisObjHandle = JSTaggedValue::ToObject(thread, thisHandle);
[[maybe_unused]] ConcurrentApiScope<JSSharedArray, ModType::WRITE> scope( [[maybe_unused]] ConcurrentApiScope<JSSharedArray, ModType::WRITE> scope(thread, thisHandle);
thread, thisHandle.GetTaggedValue().GetTaggedObject());
JSHandle<JSTaggedValue> newLengthValue = GetCallArg(argv, 0); JSHandle<JSTaggedValue> newLengthValue = GetCallArg(argv, 0);
if (!newLengthValue->IsNumber()) { if (!newLengthValue->IsNumber()) {
auto error = ContainerError::ParamError(thread, "Parameter error.Invalid array length."); auto error = ContainerError::ParamError(thread, "Parameter error.Invalid array length.");
@ -2423,8 +2416,7 @@ JSTaggedValue BuiltinsSharedArray::ExtendTo(EcmaRuntimeCallInfo *argv)
THROW_NEW_ERROR_AND_RETURN_VALUE(thread, error, JSTaggedValue::Exception()); THROW_NEW_ERROR_AND_RETURN_VALUE(thread, error, JSTaggedValue::Exception());
} }
JSHandle<JSObject> thisObjHandle = JSTaggedValue::ToObject(thread, thisHandle); JSHandle<JSObject> thisObjHandle = JSTaggedValue::ToObject(thread, thisHandle);
[[maybe_unused]] ConcurrentApiScope<JSSharedArray, ModType::WRITE> scope( [[maybe_unused]] ConcurrentApiScope<JSSharedArray, ModType::WRITE> scope(thread, thisHandle);
thread, thisHandle.GetTaggedValue().GetTaggedObject());
JSHandle<JSTaggedValue> newLengthValue = GetCallArg(argv, 0); JSHandle<JSTaggedValue> newLengthValue = GetCallArg(argv, 0);
if (!newLengthValue->IsNumber()) { if (!newLengthValue->IsNumber()) {
auto error = ContainerError::ParamError(thread, "Parameter error.Invalid array length."); auto error = ContainerError::ParamError(thread, "Parameter error.Invalid array length.");

View File

@ -131,9 +131,9 @@ JSTaggedValue BuiltinsSharedMap::Has(EcmaRuntimeCallInfo *argv)
"The has method cannot be bound."); "The has method cannot be bound.");
THROW_NEW_ERROR_AND_RETURN_VALUE(thread, error, JSTaggedValue::Exception()); THROW_NEW_ERROR_AND_RETURN_VALUE(thread, error, JSTaggedValue::Exception());
} }
JSSharedMap *jsMap = JSSharedMap::Cast(self.GetTaggedValue().GetTaggedObject()); JSHandle<JSSharedMap> map(self);
JSHandle<JSTaggedValue> key = GetCallArg(argv, 0); JSHandle<JSTaggedValue> key = GetCallArg(argv, 0);
bool flag = jsMap->Has(thread, key.GetTaggedValue()); bool flag = JSSharedMap::Has(thread, map, key.GetTaggedValue());
return GetTaggedBoolean(flag); return GetTaggedBoolean(flag);
} }
@ -148,9 +148,9 @@ JSTaggedValue BuiltinsSharedMap::Get(EcmaRuntimeCallInfo *argv)
"The get method cannot be bound."); "The get method cannot be bound.");
THROW_NEW_ERROR_AND_RETURN_VALUE(thread, error, JSTaggedValue::Exception()); THROW_NEW_ERROR_AND_RETURN_VALUE(thread, error, JSTaggedValue::Exception());
} }
JSSharedMap *jsMap = JSSharedMap::Cast(self.GetTaggedValue().GetTaggedObject()); JSHandle<JSSharedMap> map(self);
JSHandle<JSTaggedValue> key = GetCallArg(argv, 0); JSHandle<JSTaggedValue> key = GetCallArg(argv, 0);
JSTaggedValue value = jsMap->Get(thread, key.GetTaggedValue()); JSTaggedValue value = JSSharedMap::Get(thread, map, key.GetTaggedValue());
return value; return value;
} }
@ -165,7 +165,7 @@ JSTaggedValue BuiltinsSharedMap::ForEach(EcmaRuntimeCallInfo *argv)
"The forEach method cannot be bound."); "The forEach method cannot be bound.");
THROW_NEW_ERROR_AND_RETURN_VALUE(thread, error, JSTaggedValue::Exception()); THROW_NEW_ERROR_AND_RETURN_VALUE(thread, error, JSTaggedValue::Exception());
} }
[[maybe_unused]] ConcurrentApiScope<JSSharedMap> scope(thread, self.GetTaggedValue().GetTaggedObject()); [[maybe_unused]] ConcurrentApiScope<JSSharedMap> scope(thread, self);
RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, JSTaggedValue::Exception()); RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, JSTaggedValue::Exception());
JSHandle<JSSharedMap> map(self); JSHandle<JSSharedMap> map(self);
JSHandle<JSTaggedValue> func(GetCallArg(argv, 0)); JSHandle<JSTaggedValue> func(GetCallArg(argv, 0));
@ -210,8 +210,8 @@ JSTaggedValue BuiltinsSharedMap::GetSize(EcmaRuntimeCallInfo *argv)
if (!self->IsJSSharedMap()) { if (!self->IsJSSharedMap()) {
THROW_TYPE_ERROR_AND_RETURN(thread, "obj is not SharedMap", JSTaggedValue::Exception()); THROW_TYPE_ERROR_AND_RETURN(thread, "obj is not SharedMap", JSTaggedValue::Exception());
} }
JSSharedMap *jsMap = JSSharedMap::Cast(self.GetTaggedValue().GetTaggedObject()); JSHandle<JSSharedMap> map(self);
uint32_t size = jsMap->GetSize(thread); uint32_t size = JSSharedMap::GetSize(thread, map);
return JSTaggedValue(size); return JSTaggedValue(size);
} }

View File

@ -172,9 +172,9 @@ JSTaggedValue BuiltinsSharedSet::Has(EcmaRuntimeCallInfo *argv)
"The has method cannot be bound."); "The has method cannot be bound.");
THROW_NEW_ERROR_AND_RETURN_VALUE(thread, error, JSTaggedValue::Exception()); THROW_NEW_ERROR_AND_RETURN_VALUE(thread, error, JSTaggedValue::Exception());
} }
JSSharedSet* jsSet = JSSharedSet::Cast(self.GetTaggedValue().GetTaggedObject()); JSHandle<JSSharedSet> set(self);
JSHandle<JSTaggedValue> value = GetCallArg(argv, 0); JSHandle<JSTaggedValue> value = GetCallArg(argv, 0);
bool flag = jsSet->Has(thread, value.GetTaggedValue()); bool flag = JSSharedSet::Has(thread, set, value.GetTaggedValue());
return GetTaggedBoolean(flag); return GetTaggedBoolean(flag);
} }
@ -189,7 +189,7 @@ JSTaggedValue BuiltinsSharedSet::ForEach(EcmaRuntimeCallInfo *argv)
"The forEach method cannot be bound."); "The forEach method cannot be bound.");
THROW_NEW_ERROR_AND_RETURN_VALUE(thread, error, JSTaggedValue::Exception()); THROW_NEW_ERROR_AND_RETURN_VALUE(thread, error, JSTaggedValue::Exception());
} }
[[maybe_unused]] ConcurrentApiScope<JSSharedSet> scope(thread, self.GetTaggedValue().GetTaggedObject()); [[maybe_unused]] ConcurrentApiScope<JSSharedSet> scope(thread, self);
RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, JSTaggedValue::Exception()); RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, JSTaggedValue::Exception());
JSHandle<JSSharedSet> set(self); JSHandle<JSSharedSet> set(self);
@ -233,8 +233,8 @@ JSTaggedValue BuiltinsSharedSet::GetSize(EcmaRuntimeCallInfo *argv)
if (!self->IsJSSharedSet()) { if (!self->IsJSSharedSet()) {
THROW_TYPE_ERROR_AND_RETURN(thread, "obj is not SharedSet", JSTaggedValue::Exception()); THROW_TYPE_ERROR_AND_RETURN(thread, "obj is not SharedSet", JSTaggedValue::Exception());
} }
JSSharedSet* jsSet = JSSharedSet::Cast(self.GetTaggedValue().GetTaggedObject()); JSHandle<JSSharedSet> set(self);
uint32_t size = jsSet->GetSize(thread); uint32_t size = JSSharedSet::GetSize(thread, set);
RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, JSTaggedValue(0)); RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, JSTaggedValue(0));
return JSTaggedValue(size); return JSTaggedValue(size);
} }

View File

@ -450,8 +450,7 @@ JSTaggedValue BuiltinsSharedTypedArray::CopyWithin(EcmaRuntimeCallInfo *argv)
auto error = ContainerError::BindError(thread, "The copyWithin method cannot be bound."); auto error = ContainerError::BindError(thread, "The copyWithin method cannot be bound.");
THROW_NEW_ERROR_AND_RETURN_VALUE(thread, error, JSTaggedValue::Exception()); THROW_NEW_ERROR_AND_RETURN_VALUE(thread, error, JSTaggedValue::Exception());
} }
[[maybe_unused]] ConcurrentApiScope<JSSharedTypedArray, ModType::WRITE> scope( [[maybe_unused]] ConcurrentApiScope<JSSharedTypedArray, ModType::WRITE> scope(thread, thisHandle);
thread, thisHandle.GetTaggedValue().GetTaggedObject());
return BuiltinsArray::CopyWithin(argv); return BuiltinsArray::CopyWithin(argv);
} }
@ -493,8 +492,7 @@ JSTaggedValue BuiltinsSharedTypedArray::Every(EcmaRuntimeCallInfo *argv)
JSHandle<JSObject> thisObjHandle = JSTaggedValue::ToObject(thread, thisHandle); JSHandle<JSObject> thisObjHandle = JSTaggedValue::ToObject(thread, thisHandle);
// 2. ReturnIfAbrupt(O). // 2. ReturnIfAbrupt(O).
RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread);
[[maybe_unused]] ConcurrentApiScope<JSSharedTypedArray> scope(thread, [[maybe_unused]] ConcurrentApiScope<JSSharedTypedArray> scope(thread, thisHandle);
thisHandle.GetTaggedValue().GetTaggedObject());
JSHandle<JSTaggedValue> thisObjVal(thisObjHandle); JSHandle<JSTaggedValue> thisObjVal(thisObjHandle);
// 3. Let len be ToLength(Get(O, "length")). // 3. Let len be ToLength(Get(O, "length")).
@ -559,8 +557,7 @@ JSTaggedValue BuiltinsSharedTypedArray::Fill(EcmaRuntimeCallInfo *argv)
auto error = ContainerError::BindError(thread, "The fill method cannot be bound."); auto error = ContainerError::BindError(thread, "The fill method cannot be bound.");
THROW_NEW_ERROR_AND_RETURN_VALUE(thread, error, JSTaggedValue::Exception()); THROW_NEW_ERROR_AND_RETURN_VALUE(thread, error, JSTaggedValue::Exception());
} }
[[maybe_unused]] ConcurrentApiScope<JSSharedTypedArray, ModType::WRITE> scope( [[maybe_unused]] ConcurrentApiScope<JSSharedTypedArray, ModType::WRITE> scope(thread, thisHandle);
thread, thisHandle.GetTaggedValue().GetTaggedObject());
return BuiltinsArray::Fill(argv); return BuiltinsArray::Fill(argv);
} }
@ -590,8 +587,7 @@ JSTaggedValue BuiltinsSharedTypedArray::Filter(EcmaRuntimeCallInfo *argv)
if (!callbackFnHandle->IsCallable()) { if (!callbackFnHandle->IsCallable()) {
THROW_TYPE_ERROR_AND_RETURN(thread, "the callbackfun is not callable.", JSTaggedValue::Exception()); THROW_TYPE_ERROR_AND_RETURN(thread, "the callbackfun is not callable.", JSTaggedValue::Exception());
} }
[[maybe_unused]] ConcurrentApiScope<JSSharedTypedArray> scope(thread, [[maybe_unused]] ConcurrentApiScope<JSSharedTypedArray> scope(thread, thisHandle);
thisHandle.GetTaggedValue().GetTaggedObject());
// 6. If thisArg was supplied, let T be thisArg; else let T be undefined. // 6. If thisArg was supplied, let T be thisArg; else let T be undefined.
JSHandle<JSTaggedValue> thisArgHandle = GetCallArg(argv, 1); JSHandle<JSTaggedValue> thisArgHandle = GetCallArg(argv, 1);
@ -665,8 +661,7 @@ JSTaggedValue BuiltinsSharedTypedArray::Find(EcmaRuntimeCallInfo *argv)
auto error = ContainerError::BindError(thread, "The find method cannot be bound."); auto error = ContainerError::BindError(thread, "The find method cannot be bound.");
THROW_NEW_ERROR_AND_RETURN_VALUE(thread, error, JSTaggedValue::Exception()); THROW_NEW_ERROR_AND_RETURN_VALUE(thread, error, JSTaggedValue::Exception());
} }
[[maybe_unused]] ConcurrentApiScope<JSSharedTypedArray> scope(thread, [[maybe_unused]] ConcurrentApiScope<JSSharedTypedArray> scope(thread, thisHandle);
thisHandle.GetTaggedValue().GetTaggedObject());
return BuiltinsArray::Find(argv); return BuiltinsArray::Find(argv);
} }
@ -680,8 +675,7 @@ JSTaggedValue BuiltinsSharedTypedArray::FindIndex(EcmaRuntimeCallInfo *argv)
auto error = ContainerError::BindError(thread, "The findIndex method cannot be bound."); auto error = ContainerError::BindError(thread, "The findIndex method cannot be bound.");
THROW_NEW_ERROR_AND_RETURN_VALUE(thread, error, JSTaggedValue::Exception()); THROW_NEW_ERROR_AND_RETURN_VALUE(thread, error, JSTaggedValue::Exception());
} }
[[maybe_unused]] ConcurrentApiScope<JSSharedTypedArray> scope(thread, [[maybe_unused]] ConcurrentApiScope<JSSharedTypedArray> scope(thread, thisHandle);
thisHandle.GetTaggedValue().GetTaggedObject());
return BuiltinsArray::FindIndex(argv); return BuiltinsArray::FindIndex(argv);
} }
@ -701,8 +695,7 @@ JSTaggedValue BuiltinsSharedTypedArray::ForEach(EcmaRuntimeCallInfo *argv)
JSHandle<JSObject> thisObjHandle = JSTaggedValue::ToObject(thread, thisHandle); JSHandle<JSObject> thisObjHandle = JSTaggedValue::ToObject(thread, thisHandle);
// 2. ReturnIfAbrupt(O). // 2. ReturnIfAbrupt(O).
RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread);
[[maybe_unused]] ConcurrentApiScope<JSSharedTypedArray> scope(thread, [[maybe_unused]] ConcurrentApiScope<JSSharedTypedArray> scope(thread, thisHandle);
thisHandle.GetTaggedValue().GetTaggedObject());
JSHandle<JSTaggedValue> thisObjVal(thisObjHandle); JSHandle<JSTaggedValue> thisObjVal(thisObjHandle);
// 3. Let len be ToLength(Get(O, "length")). // 3. Let len be ToLength(Get(O, "length")).
@ -761,8 +754,7 @@ JSTaggedValue BuiltinsSharedTypedArray::IndexOf(EcmaRuntimeCallInfo *argv)
auto error = ContainerError::BindError(thread, "The indexOf method cannot be bound."); auto error = ContainerError::BindError(thread, "The indexOf method cannot be bound.");
THROW_NEW_ERROR_AND_RETURN_VALUE(thread, error, JSTaggedValue::Exception()); THROW_NEW_ERROR_AND_RETURN_VALUE(thread, error, JSTaggedValue::Exception());
} }
[[maybe_unused]] ConcurrentApiScope<JSSharedTypedArray> scope(thread, [[maybe_unused]] ConcurrentApiScope<JSSharedTypedArray> scope(thread, thisHandle);
thisHandle.GetTaggedValue().GetTaggedObject());
return BuiltinsArray::IndexOf(argv); return BuiltinsArray::IndexOf(argv);
} }
@ -778,8 +770,7 @@ JSTaggedValue BuiltinsSharedTypedArray::Join(EcmaRuntimeCallInfo *argv)
auto error = ContainerError::BindError(thread, "The join method cannot be bound."); auto error = ContainerError::BindError(thread, "The join method cannot be bound.");
THROW_NEW_ERROR_AND_RETURN_VALUE(thread, error, JSTaggedValue::Exception()); THROW_NEW_ERROR_AND_RETURN_VALUE(thread, error, JSTaggedValue::Exception());
} }
[[maybe_unused]] ConcurrentApiScope<JSSharedTypedArray> scope(thread, [[maybe_unused]] ConcurrentApiScope<JSSharedTypedArray> scope(thread, thisHandle);
thisHandle.GetTaggedValue().GetTaggedObject());
uint32_t length = JSHandle<JSTypedArray>::Cast(thisHandle)->GetArrayLength(); uint32_t length = JSHandle<JSTypedArray>::Cast(thisHandle)->GetArrayLength();
JSHandle<JSTaggedValue> sepHandle = GetCallArg(argv, 0); JSHandle<JSTaggedValue> sepHandle = GetCallArg(argv, 0);
@ -934,8 +925,7 @@ JSTaggedValue BuiltinsSharedTypedArray::Map(EcmaRuntimeCallInfo *argv)
} }
// 3. ReturnIfAbrupt(valid). // 3. ReturnIfAbrupt(valid).
RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread);
[[maybe_unused]] ConcurrentApiScope<JSSharedTypedArray> scope(thread, [[maybe_unused]] ConcurrentApiScope<JSSharedTypedArray> scope(thread, thisHandle);
thisHandle.GetTaggedValue().GetTaggedObject());
JSHandle<JSTypedArray> thisObj(thisHandle); JSHandle<JSTypedArray> thisObj(thisHandle);
// 4. Let len be the value of Os [[ArrayLength]] internal slot. // 4. Let len be the value of Os [[ArrayLength]] internal slot.
@ -1000,8 +990,7 @@ JSTaggedValue BuiltinsSharedTypedArray::Reduce(EcmaRuntimeCallInfo *argv)
auto error = ContainerError::BindError(thread, "The reduce method cannot be bound."); auto error = ContainerError::BindError(thread, "The reduce method cannot be bound.");
THROW_NEW_ERROR_AND_RETURN_VALUE(thread, error, JSTaggedValue::Exception()); THROW_NEW_ERROR_AND_RETURN_VALUE(thread, error, JSTaggedValue::Exception());
} }
[[maybe_unused]] ConcurrentApiScope<JSSharedTypedArray> scope(thread, [[maybe_unused]] ConcurrentApiScope<JSSharedTypedArray> scope(thread, thisHandle);
thisHandle.GetTaggedValue().GetTaggedObject());
return BuiltinsArray::Reduce(argv); return BuiltinsArray::Reduce(argv);
} }
@ -1021,8 +1010,7 @@ JSTaggedValue BuiltinsSharedTypedArray::Reverse(EcmaRuntimeCallInfo *argv)
JSHandle<JSObject> thisObjHandle = JSTaggedValue::ToObject(thread, thisHandle); JSHandle<JSObject> thisObjHandle = JSTaggedValue::ToObject(thread, thisHandle);
// 2. ReturnIfAbrupt(O). // 2. ReturnIfAbrupt(O).
RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread);
[[maybe_unused]] ConcurrentApiScope<JSSharedTypedArray, ModType::WRITE> scope( [[maybe_unused]] ConcurrentApiScope<JSSharedTypedArray, ModType::WRITE> scope(thread, thisHandle);
thread, thisHandle.GetTaggedValue().GetTaggedObject());
JSHandle<JSTaggedValue> thisObjVal(thisObjHandle); JSHandle<JSTaggedValue> thisObjVal(thisObjHandle);
// 3. Let len be O.[[ArrayLength]] // 3. Let len be O.[[ArrayLength]]
@ -1087,8 +1075,7 @@ JSTaggedValue BuiltinsSharedTypedArray::Set(EcmaRuntimeCallInfo *argv)
auto error = ContainerError::BindError(thread, "The set method cannot be bound."); auto error = ContainerError::BindError(thread, "The set method cannot be bound.");
THROW_NEW_ERROR_AND_RETURN_VALUE(thread, error, JSTaggedValue::Exception()); THROW_NEW_ERROR_AND_RETURN_VALUE(thread, error, JSTaggedValue::Exception());
} }
[[maybe_unused]] ConcurrentApiScope<JSSharedTypedArray, ModType::WRITE> scope( [[maybe_unused]] ConcurrentApiScope<JSSharedTypedArray, ModType::WRITE> scope(thread, target);
thread, target.GetTaggedValue().GetTaggedObject());
// 5. Assert: target has a [[ViewedArrayBuffer]] internal slot. // 5. Assert: target has a [[ViewedArrayBuffer]] internal slot.
// 6. Let targetOffset be ToInteger (offset). // 6. Let targetOffset be ToInteger (offset).
@ -1343,8 +1330,7 @@ JSTaggedValue BuiltinsSharedTypedArray::Slice(EcmaRuntimeCallInfo *argv)
// 3. ReturnIfAbrupt(valid). // 3. ReturnIfAbrupt(valid).
RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread);
[[maybe_unused]] ConcurrentApiScope<JSSharedTypedArray> scope(thread, [[maybe_unused]] ConcurrentApiScope<JSSharedTypedArray> scope(thread, thisHandle);
thisHandle.GetTaggedValue().GetTaggedObject());
JSHandle<JSTypedArray> thisObj(thisHandle); JSHandle<JSTypedArray> thisObj(thisHandle);
// 4. Let len be the value of Os [[ArrayLength]] internal slot. // 4. Let len be the value of Os [[ArrayLength]] internal slot.
@ -1476,8 +1462,7 @@ JSTaggedValue BuiltinsSharedTypedArray::Some(EcmaRuntimeCallInfo *argv)
auto error = ContainerError::BindError(thread, "The some method cannot be bound."); auto error = ContainerError::BindError(thread, "The some method cannot be bound.");
THROW_NEW_ERROR_AND_RETURN_VALUE(thread, error, JSTaggedValue::Exception()); THROW_NEW_ERROR_AND_RETURN_VALUE(thread, error, JSTaggedValue::Exception());
} }
[[maybe_unused]] ConcurrentApiScope<JSSharedTypedArray> scope(thread, [[maybe_unused]] ConcurrentApiScope<JSSharedTypedArray> scope(thread, thisHandle);
thisHandle.GetTaggedValue().GetTaggedObject());
return BuiltinsArray::Some(argv); return BuiltinsArray::Some(argv);
} }
@ -1497,8 +1482,7 @@ JSTaggedValue BuiltinsSharedTypedArray::Sort(EcmaRuntimeCallInfo *argv)
JSHandle<JSObject> thisObjHandle = JSTaggedValue::ToObject(thread, thisHandle); JSHandle<JSObject> thisObjHandle = JSTaggedValue::ToObject(thread, thisHandle);
RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread);
[[maybe_unused]] ConcurrentApiScope<JSSharedTypedArray, ModType::WRITE> scope( [[maybe_unused]] ConcurrentApiScope<JSSharedTypedArray, ModType::WRITE> scope(thread, thisHandle);
thread, thisHandle.GetTaggedValue().GetTaggedObject());
JSHandle<JSTaggedValue> thisObjVal(thisObjHandle); JSHandle<JSTaggedValue> thisObjVal(thisObjHandle);
JSHandle<JSTaggedValue> buffer; JSHandle<JSTaggedValue> buffer;
@ -1573,8 +1557,7 @@ JSTaggedValue BuiltinsSharedTypedArray::Subarray(EcmaRuntimeCallInfo *argv)
auto error = ContainerError::BindError(thread, "The subarray method cannot be bound."); auto error = ContainerError::BindError(thread, "The subarray method cannot be bound.");
THROW_NEW_ERROR_AND_RETURN_VALUE(thread, error, JSTaggedValue::Exception()); THROW_NEW_ERROR_AND_RETURN_VALUE(thread, error, JSTaggedValue::Exception());
} }
[[maybe_unused]] ConcurrentApiScope<JSSharedTypedArray> scope(thread, [[maybe_unused]] ConcurrentApiScope<JSSharedTypedArray> scope(thread, thisHandle);
thisHandle.GetTaggedValue().GetTaggedObject());
// 4. Assert: O has a [[ViewedArrayBuffer]] internal slot. // 4. Assert: O has a [[ViewedArrayBuffer]] internal slot.
// 6. Let srcLength be the value of Os [[ArrayLength]] internal slot. // 6. Let srcLength be the value of Os [[ArrayLength]] internal slot.
@ -1654,8 +1637,7 @@ JSTaggedValue BuiltinsSharedTypedArray::ToLocaleString(EcmaRuntimeCallInfo *argv
auto error = ContainerError::BindError(thread, "The toLocaleString method cannot be bound."); auto error = ContainerError::BindError(thread, "The toLocaleString method cannot be bound.");
THROW_NEW_ERROR_AND_RETURN_VALUE(thread, error, JSTaggedValue::Exception()); THROW_NEW_ERROR_AND_RETURN_VALUE(thread, error, JSTaggedValue::Exception());
} }
[[maybe_unused]] ConcurrentApiScope<JSSharedTypedArray> scope(thread, [[maybe_unused]] ConcurrentApiScope<JSSharedTypedArray> scope(thread, thisHandle);
thisHandle.GetTaggedValue().GetTaggedObject());
return BuiltinsArray::ToLocaleString(argv); return BuiltinsArray::ToLocaleString(argv);
} }
@ -1669,8 +1651,7 @@ JSTaggedValue BuiltinsSharedTypedArray::ToString(EcmaRuntimeCallInfo *argv)
auto error = ContainerError::BindError(thread, "The toString method cannot be bound."); auto error = ContainerError::BindError(thread, "The toString method cannot be bound.");
THROW_NEW_ERROR_AND_RETURN_VALUE(thread, error, JSTaggedValue::Exception()); THROW_NEW_ERROR_AND_RETURN_VALUE(thread, error, JSTaggedValue::Exception());
} }
[[maybe_unused]] ConcurrentApiScope<JSSharedTypedArray> scope(thread, [[maybe_unused]] ConcurrentApiScope<JSSharedTypedArray> scope(thread, thisHandle);
thisHandle.GetTaggedValue().GetTaggedObject());
return BuiltinsArray::ToString(argv); return BuiltinsArray::ToString(argv);
} }
@ -1737,8 +1718,7 @@ JSTaggedValue BuiltinsSharedTypedArray::At(EcmaRuntimeCallInfo *argv)
JSHandle<JSObject> thisObjHandle = JSTaggedValue::ToObject(thread, thisHandle); JSHandle<JSObject> thisObjHandle = JSTaggedValue::ToObject(thread, thisHandle);
// ReturnIfAbrupt(O). // ReturnIfAbrupt(O).
RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread);
[[maybe_unused]] ConcurrentApiScope<JSSharedTypedArray> scope(thread, [[maybe_unused]] ConcurrentApiScope<JSSharedTypedArray> scope(thread, thisHandle);
thisHandle.GetTaggedValue().GetTaggedObject());
// 3. Let len be O.[[ArrayLength]]. // 3. Let len be O.[[ArrayLength]].
uint32_t len = JSHandle<JSTypedArray>::Cast(thisObjHandle)->GetArrayLength(); uint32_t len = JSHandle<JSTypedArray>::Cast(thisObjHandle)->GetArrayLength();
@ -1774,8 +1754,7 @@ JSTaggedValue BuiltinsSharedTypedArray::Includes(EcmaRuntimeCallInfo *argv)
auto error = ContainerError::BindError(thread, "The includes method cannot be bound."); auto error = ContainerError::BindError(thread, "The includes method cannot be bound.");
THROW_NEW_ERROR_AND_RETURN_VALUE(thread, error, JSTaggedValue::Exception()); THROW_NEW_ERROR_AND_RETURN_VALUE(thread, error, JSTaggedValue::Exception());
} }
[[maybe_unused]] ConcurrentApiScope<JSSharedTypedArray> scope(thread, [[maybe_unused]] ConcurrentApiScope<JSSharedTypedArray> scope(thread, thisHandle);
thisHandle.GetTaggedValue().GetTaggedObject());
return BuiltinsArray::Includes(argv); return BuiltinsArray::Includes(argv);
} }
} // namespace panda::ecmascript::builtins } // namespace panda::ecmascript::builtins

View File

@ -944,7 +944,7 @@ HWTEST_F_L0(BuiltinsStringTest, Replace)
JSHandle<EcmaString> replaceStr1 = factory->NewFromASCII("abc$$"); JSHandle<EcmaString> replaceStr1 = factory->NewFromASCII("abc$$");
JSHandle<EcmaString> expected1 = factory->NewFromASCII("Twas the night before abc$..."); JSHandle<EcmaString> expected1 = factory->NewFromASCII("Twas the night before abc$...");
args[0] = searchStr.GetTaggedValue();
args[1] = replaceStr1.GetTaggedValue(); args[1] = replaceStr1.GetTaggedValue();
auto result1 = StringAlgorithm(thread, thisStr.GetTaggedValue(), args, 8, AlgorithmType::REPLACE); auto result1 = StringAlgorithm(thread, thisStr.GetTaggedValue(), args, 8, AlgorithmType::REPLACE);
@ -954,7 +954,7 @@ HWTEST_F_L0(BuiltinsStringTest, Replace)
JSHandle<EcmaString> replaceStr2 = factory->NewFromASCII("abc$$dd"); JSHandle<EcmaString> replaceStr2 = factory->NewFromASCII("abc$$dd");
JSHandle<EcmaString> expected2 = factory->NewFromASCII("Twas the night before abc$dd..."); JSHandle<EcmaString> expected2 = factory->NewFromASCII("Twas the night before abc$dd...");
args[0] = searchStr.GetTaggedValue();
args[1] = replaceStr2.GetTaggedValue(); args[1] = replaceStr2.GetTaggedValue();
auto result2 = StringAlgorithm(thread, thisStr.GetTaggedValue(), args, 8, AlgorithmType::REPLACE); auto result2 = StringAlgorithm(thread, thisStr.GetTaggedValue(), args, 8, AlgorithmType::REPLACE);
@ -964,7 +964,7 @@ HWTEST_F_L0(BuiltinsStringTest, Replace)
JSHandle<EcmaString> replaceStr3 = factory->NewFromASCII("abc$&dd"); JSHandle<EcmaString> replaceStr3 = factory->NewFromASCII("abc$&dd");
JSHandle<EcmaString> expected3 = factory->NewFromASCII("Twas the night before abcXmasdd..."); JSHandle<EcmaString> expected3 = factory->NewFromASCII("Twas the night before abcXmasdd...");
args[0] = searchStr.GetTaggedValue();
args[1] = replaceStr3.GetTaggedValue(); args[1] = replaceStr3.GetTaggedValue();
auto result3 = StringAlgorithm(thread, thisStr.GetTaggedValue(), args, 8, AlgorithmType::REPLACE); auto result3 = StringAlgorithm(thread, thisStr.GetTaggedValue(), args, 8, AlgorithmType::REPLACE);
@ -975,7 +975,7 @@ HWTEST_F_L0(BuiltinsStringTest, Replace)
JSHandle<EcmaString> replaceStr4 = factory->NewFromASCII("abc$`dd"); JSHandle<EcmaString> replaceStr4 = factory->NewFromASCII("abc$`dd");
JSHandle<EcmaString> expected4 = JSHandle<EcmaString> expected4 =
factory->NewFromASCII("Twas the night before abcTwas the night before dd..."); factory->NewFromASCII("Twas the night before abcTwas the night before dd...");
args[0] = searchStr.GetTaggedValue();
args[1] = replaceStr4.GetTaggedValue(); args[1] = replaceStr4.GetTaggedValue();
auto result4 = StringAlgorithm(thread, thisStr.GetTaggedValue(), args, 8, AlgorithmType::REPLACE); auto result4 = StringAlgorithm(thread, thisStr.GetTaggedValue(), args, 8, AlgorithmType::REPLACE);
@ -1001,7 +1001,7 @@ HWTEST_F_L0(BuiltinsStringTest, Replace2)
JSHandle<EcmaString> replaceStr2 = factory->NewFromASCII("abc$`dd$\'$ff"); JSHandle<EcmaString> replaceStr2 = factory->NewFromASCII("abc$`dd$\'$ff");
JSHandle<EcmaString> expected2 = JSHandle<EcmaString> expected2 =
factory->NewFromASCII("Twas the night before abcTwas the night before dd...$ff..."); factory->NewFromASCII("Twas the night before abcTwas the night before dd...$ff...");
args[0] = searchStr.GetTaggedValue();
args[1] = replaceStr2.GetTaggedValue(); args[1] = replaceStr2.GetTaggedValue();
auto result2 = StringAlgorithm(thread, thisStr.GetTaggedValue(), args, 8, AlgorithmType::REPLACE); auto result2 = StringAlgorithm(thread, thisStr.GetTaggedValue(), args, 8, AlgorithmType::REPLACE);
@ -1013,7 +1013,7 @@ HWTEST_F_L0(BuiltinsStringTest, Replace2)
JSHandle<EcmaString> replaceStr3 = factory->NewFromASCII("abc$`dd$\'$"); JSHandle<EcmaString> replaceStr3 = factory->NewFromASCII("abc$`dd$\'$");
JSHandle<EcmaString> expected3 = JSHandle<EcmaString> expected3 =
factory->NewFromASCII("Twas the night before abcTwas the night before dd...$..."); factory->NewFromASCII("Twas the night before abcTwas the night before dd...$...");
args[0] = searchStr.GetTaggedValue();
args[1] = replaceStr3.GetTaggedValue(); args[1] = replaceStr3.GetTaggedValue();
auto result3 = StringAlgorithm(thread, thisStr.GetTaggedValue(), args, 8, AlgorithmType::REPLACE); auto result3 = StringAlgorithm(thread, thisStr.GetTaggedValue(), args, 8, AlgorithmType::REPLACE);
@ -1024,7 +1024,7 @@ HWTEST_F_L0(BuiltinsStringTest, Replace2)
JSHandle<EcmaString> replaceStr4 = factory->NewFromASCII("abc$`dd$$"); JSHandle<EcmaString> replaceStr4 = factory->NewFromASCII("abc$`dd$$");
JSHandle<EcmaString> expected4 = JSHandle<EcmaString> expected4 =
factory->NewFromASCII("Twas the night before abcTwas the night before dd$..."); factory->NewFromASCII("Twas the night before abcTwas the night before dd$...");
args[0] = searchStr.GetTaggedValue();
args[1] = replaceStr4.GetTaggedValue(); args[1] = replaceStr4.GetTaggedValue();
auto result4 = StringAlgorithm(thread, thisStr.GetTaggedValue(), args, 8, AlgorithmType::REPLACE); auto result4 = StringAlgorithm(thread, thisStr.GetTaggedValue(), args, 8, AlgorithmType::REPLACE);

View File

@ -40,6 +40,8 @@ enum TriggerGCType {
// GC is expected to compress objects into appspawn space; // GC is expected to compress objects into appspawn space;
APPSPAWN_FULL_GC, APPSPAWN_FULL_GC,
SHARED_GC, SHARED_GC,
SHARED_FULL_GC,
APPSPAWN_SHARED_FULL_GC,
GC_TYPE_LAST GC_TYPE_LAST
}; };

View File

@ -106,7 +106,7 @@ GateRef NewObjectStubBuilder::NewJSArrayWithSize(GateRef hclass, GateRef size)
return result; return result;
} }
void NewObjectStubBuilder::NewJSObject(Variable *result, Label *exit, GateRef hclass, MemoryAttribute mAttr) void NewObjectStubBuilder::NewJSObject(Variable *result, Label *exit, GateRef hclass)
{ {
auto env = GetEnvironment(); auto env = GetEnvironment();
@ -117,11 +117,7 @@ void NewObjectStubBuilder::NewJSObject(Variable *result, Label *exit, GateRef hc
AllocateInYoung(result, &hasPendingException, &noException, hclass); AllocateInYoung(result, &hasPendingException, &noException, hclass);
Bind(&noException); Bind(&noException);
{ {
if (mAttr.Value() == MemoryAttribute::NoBarrier().Value()) { StoreHClass(glue_, result->ReadVariable(), hclass);
StoreHClassWithoutBarrier(glue_, result->ReadVariable(), hclass);
} else {
StoreHClass(glue_, result->ReadVariable(), hclass);
}
DEFVARIABLE(initValue, VariableType::JS_ANY(), Undefined()); DEFVARIABLE(initValue, VariableType::JS_ANY(), Undefined());
Label isTS(env); Label isTS(env);
Label initialize(env); Label initialize(env);
@ -136,7 +132,8 @@ void NewObjectStubBuilder::NewJSObject(Variable *result, Label *exit, GateRef hc
Bind(&initialize); Bind(&initialize);
Label afterInitialize(env); Label afterInitialize(env);
InitializeWithSpeicalValue(&afterInitialize, InitializeWithSpeicalValue(&afterInitialize,
result->ReadVariable(), *initValue, Int32(JSObject::SIZE), ChangeIntPtrToInt32(size_), mAttr); result->ReadVariable(), *initValue, Int32(JSObject::SIZE), ChangeIntPtrToInt32(size_),
MemoryAttribute::NoBarrier());
Bind(&afterInitialize); Bind(&afterInitialize);
auto emptyArray = GetGlobalConstantValue( auto emptyArray = GetGlobalConstantValue(
VariableType::JS_POINTER(), glue_, ConstantIndex::EMPTY_ARRAY_OBJECT_INDEX); VariableType::JS_POINTER(), glue_, ConstantIndex::EMPTY_ARRAY_OBJECT_INDEX);
@ -153,7 +150,7 @@ void NewObjectStubBuilder::NewJSObject(Variable *result, Label *exit, GateRef hc
} }
} }
void NewObjectStubBuilder::NewSObject(Variable *result, Label *exit, GateRef hclass, MemoryAttribute mAttr) void NewObjectStubBuilder::NewSObject(Variable *result, Label *exit, GateRef hclass)
{ {
auto env = GetEnvironment(); auto env = GetEnvironment();
@ -162,11 +159,7 @@ void NewObjectStubBuilder::NewSObject(Variable *result, Label *exit, GateRef hcl
AllocateInSOld(result, &afterAllocate, hclass); AllocateInSOld(result, &afterAllocate, hclass);
Bind(&afterAllocate); Bind(&afterAllocate);
{ {
if (mAttr.Value() == MemoryAttribute::NoBarrier().Value()) { StoreHClass(glue_, result->ReadVariable(), hclass);
StoreHClassWithoutBarrier(glue_, result->ReadVariable(), hclass);
} else {
StoreHClass(glue_, result->ReadVariable(), hclass);
}
DEFVARIABLE(initValue, VariableType::JS_ANY(), Undefined()); DEFVARIABLE(initValue, VariableType::JS_ANY(), Undefined());
Label isTS(env); Label isTS(env);
Label initialize(env); Label initialize(env);
@ -181,7 +174,8 @@ void NewObjectStubBuilder::NewSObject(Variable *result, Label *exit, GateRef hcl
Bind(&initialize); Bind(&initialize);
Label afterInitialize(env); Label afterInitialize(env);
InitializeWithSpeicalValue(&afterInitialize, InitializeWithSpeicalValue(&afterInitialize,
result->ReadVariable(), *initValue, Int32(JSObject::SIZE), ChangeIntPtrToInt32(size_), mAttr); result->ReadVariable(), *initValue, Int32(JSObject::SIZE), ChangeIntPtrToInt32(size_),
MemoryAttribute::NoBarrier());
Bind(&afterInitialize); Bind(&afterInitialize);
auto emptyArray = GetGlobalConstantValue( auto emptyArray = GetGlobalConstantValue(
VariableType::JS_POINTER(), glue_, ConstantIndex::EMPTY_ARRAY_OBJECT_INDEX); VariableType::JS_POINTER(), glue_, ConstantIndex::EMPTY_ARRAY_OBJECT_INDEX);
@ -274,7 +268,8 @@ void NewObjectStubBuilder::NewJSObject(Variable *result, Label *exit, GateRef hc
DEFVARIABLE(initValue, VariableType::JS_ANY(), Undefined()); DEFVARIABLE(initValue, VariableType::JS_ANY(), Undefined());
Label afterInitialize(env); Label afterInitialize(env);
InitializeWithSpeicalValue(&afterInitialize, InitializeWithSpeicalValue(&afterInitialize,
result->ReadVariable(), *initValue, Int32(JSObject::SIZE), ChangeIntPtrToInt32(size_)); result->ReadVariable(), *initValue, Int32(JSObject::SIZE), ChangeIntPtrToInt32(size_),
MemoryAttribute::NoBarrier());
Bind(&afterInitialize); Bind(&afterInitialize);
auto emptyArray = GetGlobalConstantValue( auto emptyArray = GetGlobalConstantValue(
VariableType::JS_POINTER(), glue_, ConstantIndex::EMPTY_ARRAY_OBJECT_INDEX); VariableType::JS_POINTER(), glue_, ConstantIndex::EMPTY_ARRAY_OBJECT_INDEX);
@ -286,7 +281,7 @@ void NewObjectStubBuilder::NewJSObject(Variable *result, Label *exit, GateRef hc
Jump(exit); Jump(exit);
} }
GateRef NewObjectStubBuilder::NewJSObject(GateRef glue, GateRef hclass, MemoryAttribute mAttr) GateRef NewObjectStubBuilder::NewJSObject(GateRef glue, GateRef hclass)
{ {
auto env = GetEnvironment(); auto env = GetEnvironment();
Label entry(env); Label entry(env);
@ -295,7 +290,7 @@ GateRef NewObjectStubBuilder::NewJSObject(GateRef glue, GateRef hclass, MemoryAt
DEFVARIABLE(result, VariableType::JS_ANY(), Undefined()); DEFVARIABLE(result, VariableType::JS_ANY(), Undefined());
SetGlue(glue); SetGlue(glue);
NewJSObject(&result, &exit, hclass, mAttr); NewJSObject(&result, &exit, hclass);
Bind(&exit); Bind(&exit);
auto ret = *result; auto ret = *result;
@ -303,7 +298,7 @@ GateRef NewObjectStubBuilder::NewJSObject(GateRef glue, GateRef hclass, MemoryAt
return ret; return ret;
} }
GateRef NewObjectStubBuilder::NewSObject(GateRef glue, GateRef hclass, MemoryAttribute mAttr) GateRef NewObjectStubBuilder::NewSObject(GateRef glue, GateRef hclass)
{ {
auto env = GetEnvironment(); auto env = GetEnvironment();
Label entry(env); Label entry(env);
@ -312,7 +307,7 @@ GateRef NewObjectStubBuilder::NewSObject(GateRef glue, GateRef hclass, MemoryAtt
DEFVARIABLE(result, VariableType::JS_ANY(), Undefined()); DEFVARIABLE(result, VariableType::JS_ANY(), Undefined());
SetGlue(glue); SetGlue(glue);
NewSObject(&result, &exit, hclass, mAttr); NewSObject(&result, &exit, hclass);
Bind(&exit); Bind(&exit);
auto ret = *result; auto ret = *result;
@ -843,8 +838,6 @@ GateRef NewObjectStubBuilder::NewJSFunction(GateRef glue, GateRef constpool, Gat
Bind(&afterAOTLiteral); Bind(&afterAOTLiteral);
GateRef method = GetMethodFromConstPool(glue, constpool, index); GateRef method = GetMethodFromConstPool(glue, constpool, index);
DEFVARIABLE(hclass, VariableType::JS_ANY(), Undefined()); DEFVARIABLE(hclass, VariableType::JS_ANY(), Undefined());
bool knownKind = JSFunction::IsNormalFunctionAndCanSkipWbWhenInitialization(targetKind);
Label isSendableFunc(env); Label isSendableFunc(env);
Label isNotSendableFunc(env); Label isNotSendableFunc(env);
Label afterSendableFunc(env); Label afterSendableFunc(env);
@ -852,7 +845,7 @@ GateRef NewObjectStubBuilder::NewJSFunction(GateRef glue, GateRef constpool, Gat
Bind(&isSendableFunc); Bind(&isSendableFunc);
{ {
hclass = LoadSHClassFromMethod(glue, method); hclass = LoadSHClassFromMethod(glue, method);
result = NewSObject(glue, *hclass, knownKind ? MemoryAttribute::NoBarrier() : MemoryAttribute::Default()); result = NewSObject(glue, *hclass);
GateRef kind = GetFuncKind(method); GateRef kind = GetFuncKind(method);
InitializeSFunction(glue, *result, kind, targetKind); InitializeSFunction(glue, *result, kind, targetKind);
Jump(&afterSendableFunc); Jump(&afterSendableFunc);
@ -860,7 +853,7 @@ GateRef NewObjectStubBuilder::NewJSFunction(GateRef glue, GateRef constpool, Gat
Bind(&isNotSendableFunc); Bind(&isNotSendableFunc);
{ {
hclass = LoadHClassFromMethod(glue, method); hclass = LoadHClassFromMethod(glue, method);
result = NewJSObject(glue, *hclass, knownKind ? MemoryAttribute::NoBarrier() : MemoryAttribute::Default()); result = NewJSObject(glue, *hclass);
SetExtensibleToBitfield(glue, *hclass, true); SetExtensibleToBitfield(glue, *hclass, true);
GateRef kind = GetFuncKind(method); GateRef kind = GetFuncKind(method);
InitializeJSFunction(glue, *result, kind, targetKind); InitializeJSFunction(glue, *result, kind, targetKind);
@ -868,7 +861,7 @@ GateRef NewObjectStubBuilder::NewJSFunction(GateRef glue, GateRef constpool, Gat
} }
Bind(&afterSendableFunc); Bind(&afterSendableFunc);
SetCallableToBitfield(glue, *hclass, true); SetCallableToBitfield(glue, *hclass, true);
SetMethodToFunction(glue, *result, method, knownKind ? MemoryAttribute::NoBarrier() : MemoryAttribute::Default()); SetMethodToFunction(glue, *result, method);
SetCompiledCodeFlagToFunction(glue, *result, Int32(0)); SetCompiledCodeFlagToFunction(glue, *result, Int32(0));
SetMachineCodeToFunction(glue, *result, Undefined(), MemoryAttribute::NoBarrier()); SetMachineCodeToFunction(glue, *result, Undefined(), MemoryAttribute::NoBarrier());
@ -924,27 +917,21 @@ void NewObjectStubBuilder::NewJSFunction(GateRef glue, GateRef jsFunc, GateRef i
} }
Bind(&notException); Bind(&notException);
{ {
bool knownKind = JSFunction::IsNormalFunctionAndCanSkipWbWhenInitialization(targetKind);
GateRef module = GetModuleFromFunction(jsFunc); GateRef module = GetModuleFromFunction(jsFunc);
SetLengthToFunction(glue_, result->ReadVariable(), length); SetLengthToFunction(glue_, result->ReadVariable(), length);
BRANCH(IsSendableFunction(GetMethodFromFunction(result->ReadVariable())), &isSendableFunc, &isNotSendableFunc); BRANCH(IsSendableFunction(GetMethodFromFunction(result->ReadVariable())), &isSendableFunc, &isNotSendableFunc);
Bind(&isSendableFunc); Bind(&isSendableFunc);
{ {
GateRef smodule = CallRuntime(glue, RTSTUB_ID(GetSharedModule), { module }); GateRef smodule = CallRuntime(glue, RTSTUB_ID(GetSharedModule), { module });
SetSendableEnvToModule(glue, smodule, GetSendableEnvFromModule(module), SetSendableEnvToModule(glue, smodule, GetSendableEnvFromModule(module));
knownKind ? MemoryAttribute::NoBarrier() : MemoryAttribute::Default()); SetModuleToFunction(glue, result->ReadVariable(), smodule);
SetModuleToFunction(glue, result->ReadVariable(), smodule,
knownKind ? MemoryAttribute::NoBarrier() : MemoryAttribute::Default());
Jump(&afterSendableFunc); Jump(&afterSendableFunc);
} }
Bind(&isNotSendableFunc); Bind(&isNotSendableFunc);
{ {
SetLexicalEnvToFunction(glue_, result->ReadVariable(), lexEnv, SetLexicalEnvToFunction(glue_, result->ReadVariable(), lexEnv);
knownKind ? MemoryAttribute::NoBarrier() : MemoryAttribute::Default()); SetModuleToFunction(glue_, result->ReadVariable(), module);
SetModuleToFunction(glue_, result->ReadVariable(), module, SetHomeObjectToFunction(glue_, result->ReadVariable(), GetHomeObjectFromFunction(jsFunc));
knownKind ? MemoryAttribute::NoBarrier() : MemoryAttribute::Default());
SetHomeObjectToFunction(glue_, result->ReadVariable(), GetHomeObjectFromFunction(jsFunc),
knownKind ? MemoryAttribute::NoBarrier() : MemoryAttribute::Default());
#if ECMASCRIPT_ENABLE_IC #if ECMASCRIPT_ENABLE_IC
SetProfileTypeInfoCellToFunction(jsFunc, result->ReadVariable(), slotId); SetProfileTypeInfoCellToFunction(jsFunc, result->ReadVariable(), slotId);
#endif #endif
@ -1007,13 +994,11 @@ void NewObjectStubBuilder::InitializeSFunction(GateRef glue, GateRef func, GateR
auto funcAccessor = GetGlobalConstantValue(VariableType::JS_POINTER(), glue, auto funcAccessor = GetGlobalConstantValue(VariableType::JS_POINTER(), glue,
ConstantIndex::FUNCTION_NAME_ACCESSOR); ConstantIndex::FUNCTION_NAME_ACCESSOR);
SetPropertyInlinedProps(glue, func, hclass, funcAccessor, Int32(JSFunction::NAME_INLINE_PROPERTY_INDEX), SetPropertyInlinedProps(glue, func, hclass, funcAccessor, Int32(JSFunction::NAME_INLINE_PROPERTY_INDEX),
VariableType::JS_ANY(), VariableType::JS_ANY(), MemoryAttribute::NoBarrier());
MemoryAttribute::NoBarrier());
funcAccessor = GetGlobalConstantValue(VariableType::JS_POINTER(), glue, funcAccessor = GetGlobalConstantValue(VariableType::JS_POINTER(), glue,
ConstantIndex::FUNCTION_LENGTH_ACCESSOR); ConstantIndex::FUNCTION_LENGTH_ACCESSOR);
SetPropertyInlinedProps(glue, func, hclass, funcAccessor, Int32(JSFunction::LENGTH_INLINE_PROPERTY_INDEX), SetPropertyInlinedProps(glue, func, hclass, funcAccessor, Int32(JSFunction::LENGTH_INLINE_PROPERTY_INDEX),
VariableType::JS_ANY(), VariableType::JS_ANY(), MemoryAttribute::NoBarrier());
MemoryAttribute::NoBarrier());
Jump(&exit); Jump(&exit);
} }
} else { } else {
@ -1109,13 +1094,11 @@ void NewObjectStubBuilder::InitializeJSFunction(GateRef glue, GateRef func, Gate
auto funcAccessor = GetGlobalConstantValue(VariableType::JS_POINTER(), glue, auto funcAccessor = GetGlobalConstantValue(VariableType::JS_POINTER(), glue,
ConstantIndex::FUNCTION_NAME_ACCESSOR); ConstantIndex::FUNCTION_NAME_ACCESSOR);
SetPropertyInlinedProps(glue, func, hclass, funcAccessor, Int32(JSFunction::NAME_INLINE_PROPERTY_INDEX), SetPropertyInlinedProps(glue, func, hclass, funcAccessor, Int32(JSFunction::NAME_INLINE_PROPERTY_INDEX),
VariableType::JS_ANY(), VariableType::JS_ANY(), MemoryAttribute::NoBarrier());
MemoryAttribute::NoBarrier());
funcAccessor = GetGlobalConstantValue(VariableType::JS_POINTER(), glue, funcAccessor = GetGlobalConstantValue(VariableType::JS_POINTER(), glue,
ConstantIndex::FUNCTION_LENGTH_ACCESSOR); ConstantIndex::FUNCTION_LENGTH_ACCESSOR);
SetPropertyInlinedProps(glue, func, hclass, funcAccessor, Int32(JSFunction::LENGTH_INLINE_PROPERTY_INDEX), SetPropertyInlinedProps(glue, func, hclass, funcAccessor, Int32(JSFunction::LENGTH_INLINE_PROPERTY_INDEX),
VariableType::JS_ANY(), VariableType::JS_ANY(), MemoryAttribute::NoBarrier());
MemoryAttribute::NoBarrier());
Jump(&exit); Jump(&exit);
} }
} else { } else {
@ -1206,11 +1189,13 @@ GateRef NewObjectStubBuilder::NewJSBoundFunction(GateRef glue, GateRef target, G
GateRef nameAccessor = GetGlobalConstantValue(VariableType::JS_POINTER(), glue, GateRef nameAccessor = GetGlobalConstantValue(VariableType::JS_POINTER(), glue,
ConstantIndex::FUNCTION_NAME_ACCESSOR); ConstantIndex::FUNCTION_NAME_ACCESSOR);
SetPropertyInlinedProps(glue, *result, hclass, nameAccessor, SetPropertyInlinedProps(glue, *result, hclass, nameAccessor,
Int32(JSFunction::NAME_INLINE_PROPERTY_INDEX)); Int32(JSFunction::NAME_INLINE_PROPERTY_INDEX), VariableType::JS_ANY(),
MemoryAttribute::NoBarrier());
GateRef lengthAccessor = GetGlobalConstantValue(VariableType::JS_POINTER(), glue, GateRef lengthAccessor = GetGlobalConstantValue(VariableType::JS_POINTER(), glue,
ConstantIndex::FUNCTION_LENGTH_ACCESSOR); ConstantIndex::FUNCTION_LENGTH_ACCESSOR);
SetPropertyInlinedProps(glue, *result, hclass, lengthAccessor, SetPropertyInlinedProps(glue, *result, hclass, lengthAccessor,
Int32(JSFunction::LENGTH_INLINE_PROPERTY_INDEX)); Int32(JSFunction::LENGTH_INLINE_PROPERTY_INDEX), VariableType::JS_ANY(),
MemoryAttribute::NoBarrier());
SetJSObjectTaggedField(glue, *result, JSBoundFunction::BOUND_TARGET_OFFSET, target); SetJSObjectTaggedField(glue, *result, JSBoundFunction::BOUND_TARGET_OFFSET, target);
SetJSObjectTaggedField(glue, *result, JSBoundFunction::BOUND_THIS_OFFSET, boundThis); SetJSObjectTaggedField(glue, *result, JSBoundFunction::BOUND_THIS_OFFSET, boundThis);
SetJSObjectTaggedField(glue, *result, JSBoundFunction::BOUND_ARGUMENTS_OFFSET, args); SetJSObjectTaggedField(glue, *result, JSBoundFunction::BOUND_ARGUMENTS_OFFSET, args);
@ -1396,7 +1381,7 @@ void NewObjectStubBuilder::NewJSArrayLiteral(Variable *result, Label *exit, Regi
Bind(&initializeArray); Bind(&initializeArray);
Store(VariableType::JS_POINTER(), glue_, result->ReadVariable(), IntPtr(0), hclass); Store(VariableType::JS_POINTER(), glue_, result->ReadVariable(), IntPtr(0), hclass);
InitializeWithSpeicalValue(&afterInitialize, result->ReadVariable(), Undefined(), Int32(JSArray::SIZE), InitializeWithSpeicalValue(&afterInitialize, result->ReadVariable(), Undefined(), Int32(JSArray::SIZE),
TruncInt64ToInt32(size_)); TruncInt64ToInt32(size_), MemoryAttribute::NoBarrier());
Bind(&afterInitialize); Bind(&afterInitialize);
GateRef hashOffset = IntPtr(ECMAObject::HASH_OFFSET); GateRef hashOffset = IntPtr(ECMAObject::HASH_OFFSET);
Store(VariableType::INT64(), glue_, result->ReadVariable(), hashOffset, Int64(JSTaggedValue(0).GetRawData())); Store(VariableType::INT64(), glue_, result->ReadVariable(), hashOffset, Int64(JSTaggedValue(0).GetRawData()));
@ -1423,7 +1408,7 @@ void NewObjectStubBuilder::NewJSArrayLiteral(Variable *result, Label *exit, Regi
auto accessor = GetGlobalConstantValue(VariableType::JS_POINTER(), glue_, ConstantIndex::ARRAY_LENGTH_ACCESSOR); auto accessor = GetGlobalConstantValue(VariableType::JS_POINTER(), glue_, ConstantIndex::ARRAY_LENGTH_ACCESSOR);
SetPropertyInlinedProps(glue_, result->ReadVariable(), hclass, accessor, SetPropertyInlinedProps(glue_, result->ReadVariable(), hclass, accessor,
Int32(JSArray::LENGTH_INLINE_PROPERTY_INDEX), VariableType::JS_POINTER()); Int32(JSArray::LENGTH_INLINE_PROPERTY_INDEX), VariableType::JS_POINTER(), MemoryAttribute::NoBarrier());
Jump(exit); Jump(exit);
} }

View File

@ -52,12 +52,10 @@ public:
void NewLexicalEnv(Variable *result, Label *exit, GateRef numSlots, GateRef parent); void NewLexicalEnv(Variable *result, Label *exit, GateRef numSlots, GateRef parent);
void NewJSObject(Variable *result, Label *exit, GateRef hclass, GateRef size); void NewJSObject(Variable *result, Label *exit, GateRef hclass, GateRef size);
void NewJSObject(Variable *result, Label *exit, GateRef hclass, void NewJSObject(Variable *result, Label *exit, GateRef hclass);
MemoryAttribute mAttr = MemoryAttribute::Default()); void NewSObject(Variable *result, Label *exit, GateRef hclass);
void NewSObject(Variable *result, Label *exit, GateRef hclass, GateRef NewJSObject(GateRef glue, GateRef hclass);
MemoryAttribute mAttr = MemoryAttribute::Default()); GateRef NewSObject(GateRef glue, GateRef hclass);
GateRef NewJSObject(GateRef glue, GateRef hclass, MemoryAttribute mAttr = MemoryAttribute::Default());
GateRef NewSObject(GateRef glue, GateRef hclass, MemoryAttribute mAttr = MemoryAttribute::Default());
GateRef NewJSProxy(GateRef glue, GateRef target, GateRef handler); GateRef NewJSProxy(GateRef glue, GateRef target, GateRef handler);
GateRef NewJSArray(GateRef glue, GateRef hclass); GateRef NewJSArray(GateRef glue, GateRef hclass);
GateRef NewTaggedArray(GateRef glue, GateRef len); GateRef NewTaggedArray(GateRef glue, GateRef len);

View File

@ -32,7 +32,7 @@ static constexpr uint32_t DAEMON_THREAD_INDEX = 0;
class DaemonThread : public JSThread { class DaemonThread : public JSThread {
public: public:
static void CreateNewInstance(); static void CreateNewInstance();
static DaemonThread *GetInstance(); static DaemonThread *PUBLIC_API GetInstance();
static void DestroyInstance(); static void DestroyInstance();
using ThreadId = uint32_t; using ThreadId = uint32_t;

View File

@ -959,6 +959,9 @@ void EcmaContext::Iterate(const RootVisitor &v, const RootRangeVisitor &rv)
if (propertiesCache_ != nullptr) { if (propertiesCache_ != nullptr) {
propertiesCache_->Clear(); propertiesCache_->Clear();
} }
if (regExpParserCache_ != nullptr) {
regExpParserCache_->Clear();
}
if (!vm_->GetJSOptions().EnableGlobalLeakCheck() && currentHandleStorageIndex_ != -1) { if (!vm_->GetJSOptions().EnableGlobalLeakCheck() && currentHandleStorageIndex_ != -1) {
// IterateHandle when disableGlobalLeakCheck. // IterateHandle when disableGlobalLeakCheck.
int32_t nid = currentHandleStorageIndex_; int32_t nid = currentHandleStorageIndex_;

View File

@ -629,6 +629,12 @@ public:
{ {
return hasKeptObjects_; return hasKeptObjects_;
} }
void ClearCachedConstantPool()
{
cachedSharedConstpools_.clear();
}
private: private:
void CJSExecution(JSHandle<JSFunction> &func, JSHandle<JSTaggedValue> &thisArg, void CJSExecution(JSHandle<JSFunction> &func, JSHandle<JSTaggedValue> &thisArg,
const JSPandaFile *jsPandaFile, std::string_view entryPoint); const JSPandaFile *jsPandaFile, std::string_view entryPoint);

View File

@ -1015,7 +1015,7 @@ EcmaString *EcmaString::ToLower(const EcmaVM *vm, const JSHandle<EcmaString> &sr
std::string res = base::StringHelper::ToLower(u16str); std::string res = base::StringHelper::ToLower(u16str);
return *(factory->NewFromStdString(res)); return *(factory->NewFromStdString(res));
} else { } else {
return ConvertUtf8ToLowerOrUpper(vm, src, true, srcFlat); return ConvertUtf8ToLowerOrUpper(vm, src, true);
} }
} }
@ -1037,7 +1037,7 @@ EcmaString *EcmaString::TryToLower(const EcmaVM *vm, const JSHandle<EcmaString>
if (upperIndex == srcLength) { if (upperIndex == srcLength) {
return *src; return *src;
} }
return ConvertUtf8ToLowerOrUpper(vm, src, true, srcFlat, upperIndex); return ConvertUtf8ToLowerOrUpper(vm, src, true, upperIndex);
} }
/* static */ /* static */
@ -1058,17 +1058,18 @@ EcmaString *EcmaString::TryToUpper(const EcmaVM *vm, const JSHandle<EcmaString>
if (lowerIndex == srcLength) { if (lowerIndex == srcLength) {
return *src; return *src;
} }
return ConvertUtf8ToLowerOrUpper(vm, src, false, srcFlat, lowerIndex); return ConvertUtf8ToLowerOrUpper(vm, src, false, lowerIndex);
} }
/* static */ /* static */
EcmaString *EcmaString::ConvertUtf8ToLowerOrUpper(const EcmaVM *vm, const JSHandle<EcmaString> &src, EcmaString *EcmaString::ConvertUtf8ToLowerOrUpper(const EcmaVM *vm, const JSHandle<EcmaString> &src,
bool toLower, FlatStringInfo &srcFlat, uint32_t startIndex) bool toLower, uint32_t startIndex)
{ {
const char start = toLower ? 'A' : 'a'; const char start = toLower ? 'A' : 'a';
const char end = toLower ? 'Z' : 'z'; const char end = toLower ? 'Z' : 'z';
uint32_t srcLength = src->GetLength(); uint32_t srcLength = src->GetLength();
JSHandle<EcmaString> newString(vm->GetJSThread(), CreateLineString(vm, srcLength, true)); JSHandle<EcmaString> newString(vm->GetJSThread(), CreateLineString(vm, srcLength, true));
auto srcFlat = FlattenAllString(vm, src);
Span<uint8_t> data(srcFlat.GetDataUtf8Writable(), srcLength); Span<uint8_t> data(srcFlat.GetDataUtf8Writable(), srcLength);
auto newStringPtr = newString->GetDataUtf8Writable(); auto newStringPtr = newString->GetDataUtf8Writable();
if (startIndex > 0) { if (startIndex > 0) {
@ -1098,7 +1099,7 @@ EcmaString *EcmaString::ToUpper(const EcmaVM *vm, const JSHandle<EcmaString> &sr
std::string res = base::StringHelper::ToUpper(u16str); std::string res = base::StringHelper::ToUpper(u16str);
return *(factory->NewFromStdString(res)); return *(factory->NewFromStdString(res));
} else { } else {
return ConvertUtf8ToLowerOrUpper(vm, src, false, srcFlat); return ConvertUtf8ToLowerOrUpper(vm, src, false);
} }
} }

View File

@ -750,7 +750,7 @@ private:
static EcmaString *TryToUpper(const EcmaVM *vm, const JSHandle<EcmaString> &src); static EcmaString *TryToUpper(const EcmaVM *vm, const JSHandle<EcmaString> &src);
static EcmaString *ConvertUtf8ToLowerOrUpper(const EcmaVM *vm, const JSHandle<EcmaString> &src, static EcmaString *ConvertUtf8ToLowerOrUpper(const EcmaVM *vm, const JSHandle<EcmaString> &src,
bool toLower, FlatStringInfo &srcFlat, uint32_t startIndex = 0); bool toLower, uint32_t startIndex = 0);
}; };
// The LineEcmaString abstract class captures sequential string values, only LineEcmaString can store chars data // The LineEcmaString abstract class captures sequential string values, only LineEcmaString can store chars data

View File

@ -161,7 +161,10 @@ void EcmaVM::PreFork()
heap_->GetReadOnlySpace()->SetReadOnly(); heap_->GetReadOnlySpace()->SetReadOnly();
heap_->DisableParallelGC(); heap_->DisableParallelGC();
SetPostForked(false); SetPostForked(false);
SharedHeap::GetInstance()->DisableParallelGC(thread_);
auto sHeap = SharedHeap::GetInstance();
sHeap->CompactHeapBeforeFork(thread_);
sHeap->DisableParallelGC(thread_);
} }
void EcmaVM::PostFork() void EcmaVM::PostFork()
@ -593,6 +596,9 @@ void EcmaVM::ProcessSharedNativeDelete(const WeakRootVisitor &visitor)
std::make_pair(object->GetDeleter(), std::make_pair(object->GetExternalPointer(), object->GetData()))); std::make_pair(object->GetDeleter(), std::make_pair(object->GetExternalPointer(), object->GetData())));
sharedIter = sharedNativePointerList_.erase(sharedIter); sharedIter = sharedNativePointerList_.erase(sharedIter);
} else { } else {
if (fwd != reinterpret_cast<TaggedObject *>(object)) {
*sharedIter = reinterpret_cast<JSNativePointer *>(fwd);
}
++sharedIter; ++sharedIter;
} }
} }
@ -600,9 +606,6 @@ void EcmaVM::ProcessSharedNativeDelete(const WeakRootVisitor &visitor)
void EcmaVM::ProcessReferences(const WeakRootVisitor &visitor) void EcmaVM::ProcessReferences(const WeakRootVisitor &visitor)
{ {
if (thread_->GetCurrentEcmaContext()->GetRegExpParserCache() != nullptr) {
thread_->GetCurrentEcmaContext()->GetRegExpParserCache()->Clear();
}
// process native ref should be limited to OldGC or FullGC only // process native ref should be limited to OldGC or FullGC only
if (!heap_->IsGeneralYoungGC()) { if (!heap_->IsGeneralYoungGC()) {
heap_->ResetNativeBindingSize(); heap_->ResetNativeBindingSize();

View File

@ -195,13 +195,13 @@ public:
return const_cast<EcmaVM *>(vm); return const_cast<EcmaVM *>(vm);
} }
void CheckThread() const void PUBLIC_API CheckThread() const
{ {
// Exclude GC thread // Exclude GC thread
if (thread_ == nullptr) { if (thread_ == nullptr) {
LOG_FULL(FATAL) << "Fatal: ecma_vm has been destructed! vm address is: " << this; LOG_FULL(FATAL) << "Fatal: ecma_vm has been destructed! vm address is: " << this;
} }
if (!Taskpool::GetCurrentTaskpool()->IsInThreadPool(std::this_thread::get_id()) && if (!Taskpool::GetCurrentTaskpool()->IsDaemonThreadOrInThreadPool(std::this_thread::get_id()) &&
thread_->GetThreadId() != JSThread::GetCurrentThreadId() && !thread_->IsCrossThreadExecutionEnable()) { thread_->GetThreadId() != JSThread::GetCurrentThreadId() && !thread_->IsCrossThreadExecutionEnable()) {
LOG_FULL(FATAL) << "Fatal: ecma_vm cannot run in multi-thread!" LOG_FULL(FATAL) << "Fatal: ecma_vm cannot run in multi-thread!"
<< " thread:" << thread_->GetThreadId() << " thread:" << thread_->GetThreadId()

View File

@ -193,18 +193,28 @@ public:
{ {
ASSERT(!thread->IsJitThread()); ASSERT(!thread->IsJitThread());
if (Jit::GetInstance()->IsEnableFastJit() || Jit::GetInstance()->IsEnableBaselineJit()) { if (Jit::GetInstance()->IsEnableFastJit() || Jit::GetInstance()->IsEnableBaselineJit()) {
Clock::time_point start = Clock::now(); LockJit(thread_);
thread_->GetJitLock()->Lock();
Jit::GetInstance()->GetJitDfx()->SetLockHoldingTime(
std::chrono::duration_cast<std::chrono::microseconds>(Clock::now() - start).count());
locked_ = true; locked_ = true;
} }
} }
static void LockJit(JSThread *thread)
{
Clock::time_point start = Clock::now();
thread->GetJitLock()->Lock();
Jit::GetInstance()->GetJitDfx()->SetLockHoldingTime(
std::chrono::duration_cast<std::chrono::microseconds>(Clock::now() - start).count());
}
static void UnlockJit(JSThread *thread)
{
thread->GetJitLock()->Unlock();
}
~JitGCLockHolder() ~JitGCLockHolder()
{ {
if (locked_) { if (locked_) {
thread_->GetJitLock()->Unlock(); UnlockJit(thread_);
locked_ = false; locked_ = false;
} }
} }

View File

@ -33,7 +33,7 @@ bool JSAPIBitVector::Push(
JSThread* thread, const JSHandle<JSAPIBitVector>& bitVector, const JSHandle<JSTaggedValue>& value) JSThread* thread, const JSHandle<JSAPIBitVector>& bitVector, const JSHandle<JSTaggedValue>& value)
{ {
[[maybe_unused]] ConcurrentApiScope<JSAPIBitVector, ModType::WRITE> scope(thread, [[maybe_unused]] ConcurrentApiScope<JSAPIBitVector, ModType::WRITE> scope(thread,
bitVector.GetTaggedValue().GetTaggedObject()); JSHandle<JSTaggedValue>::Cast(bitVector));
uint32_t length = bitVector->GetLength(); uint32_t length = bitVector->GetLength();
JSHandle<JSNativePointer> np(thread, bitVector->GetNativePointer()); JSHandle<JSNativePointer> np(thread, bitVector->GetNativePointer());
auto elements = reinterpret_cast<std::vector<std::bitset<BIT_SET_LENGTH>>*>(np->GetExternalPointer()); auto elements = reinterpret_cast<std::vector<std::bitset<BIT_SET_LENGTH>>*>(np->GetExternalPointer());
@ -53,7 +53,7 @@ bool JSAPIBitVector::Push(
JSTaggedValue JSAPIBitVector::Pop(JSThread* thread, const JSHandle<JSAPIBitVector>& bitVector) JSTaggedValue JSAPIBitVector::Pop(JSThread* thread, const JSHandle<JSAPIBitVector>& bitVector)
{ {
[[maybe_unused]] ConcurrentApiScope<JSAPIBitVector, ModType::WRITE> scope(thread, [[maybe_unused]] ConcurrentApiScope<JSAPIBitVector, ModType::WRITE> scope(thread,
bitVector.GetTaggedValue().GetTaggedObject()); JSHandle<JSTaggedValue>::Cast(bitVector));
uint32_t lastIndex = bitVector->GetLength() - 1; uint32_t lastIndex = bitVector->GetLength() - 1;
if (lastIndex < 0) { if (lastIndex < 0) {
return JSTaggedValue::Undefined(); return JSTaggedValue::Undefined();
@ -164,7 +164,7 @@ JSTaggedValue JSAPIBitVector::SetBitsByRange(JSThread* thread, const JSHandle<JS
const JSHandle<JSTaggedValue>& value, const JSHandle<JSTaggedValue>& start, const JSHandle<JSTaggedValue>& end) const JSHandle<JSTaggedValue>& value, const JSHandle<JSTaggedValue>& start, const JSHandle<JSTaggedValue>& end)
{ {
[[maybe_unused]] ConcurrentApiScope<JSAPIBitVector, ModType::WRITE> scope(thread, [[maybe_unused]] ConcurrentApiScope<JSAPIBitVector, ModType::WRITE> scope(thread,
bitVector.GetTaggedValue().GetTaggedObject()); JSHandle<JSTaggedValue>::Cast(bitVector));
int32_t startIndex = JSTaggedValue::ToInt32(thread, start); int32_t startIndex = JSTaggedValue::ToInt32(thread, start);
int32_t endIndex = JSTaggedValue::ToInt32(thread, end); int32_t endIndex = JSTaggedValue::ToInt32(thread, end);
int32_t length = bitVector->GetLength(); int32_t length = bitVector->GetLength();
@ -196,7 +196,7 @@ JSTaggedValue JSAPIBitVector::GetBitsByRange(JSThread* thread, const JSHandle<JS
const JSHandle<JSTaggedValue>& start, const JSHandle<JSTaggedValue>& end) const JSHandle<JSTaggedValue>& start, const JSHandle<JSTaggedValue>& end)
{ {
[[maybe_unused]] ConcurrentApiScope<JSAPIBitVector, ModType::WRITE> scope(thread, [[maybe_unused]] ConcurrentApiScope<JSAPIBitVector, ModType::WRITE> scope(thread,
bitVector.GetTaggedValue().GetTaggedObject()); JSHandle<JSTaggedValue>::Cast(bitVector));
int32_t startIndex = JSTaggedValue::ToInt32(thread, start); int32_t startIndex = JSTaggedValue::ToInt32(thread, start);
int32_t endIndex = JSTaggedValue::ToInt32(thread, end); int32_t endIndex = JSTaggedValue::ToInt32(thread, end);
int32_t length = bitVector->GetLength(); int32_t length = bitVector->GetLength();
@ -238,7 +238,7 @@ JSTaggedValue JSAPIBitVector::SetAllBits(
JSThread* thread, const JSHandle<JSAPIBitVector>& bitVector, const JSHandle<JSTaggedValue>& value) JSThread* thread, const JSHandle<JSAPIBitVector>& bitVector, const JSHandle<JSTaggedValue>& value)
{ {
[[maybe_unused]] ConcurrentApiScope<JSAPIBitVector, ModType::WRITE> scope(thread, [[maybe_unused]] ConcurrentApiScope<JSAPIBitVector, ModType::WRITE> scope(thread,
bitVector.GetTaggedValue().GetTaggedObject()); JSHandle<JSTaggedValue>::Cast(bitVector));
JSHandle<JSNativePointer> np(thread, bitVector->GetNativePointer()); JSHandle<JSNativePointer> np(thread, bitVector->GetNativePointer());
auto elements = reinterpret_cast<std::vector<std::bitset<BIT_SET_LENGTH>>*>(np->GetExternalPointer()); auto elements = reinterpret_cast<std::vector<std::bitset<BIT_SET_LENGTH>>*>(np->GetExternalPointer());
int size = static_cast<int>(elements->size()); int size = static_cast<int>(elements->size());
@ -257,7 +257,7 @@ JSTaggedValue JSAPIBitVector::SetAllBits(
JSTaggedValue JSAPIBitVector::GetBitCountByRange(JSThread* thread, const JSHandle<JSAPIBitVector>& bitVector, JSTaggedValue JSAPIBitVector::GetBitCountByRange(JSThread* thread, const JSHandle<JSAPIBitVector>& bitVector,
const JSHandle<JSTaggedValue>& value, const JSHandle<JSTaggedValue>& start, const JSHandle<JSTaggedValue>& end) const JSHandle<JSTaggedValue>& value, const JSHandle<JSTaggedValue>& start, const JSHandle<JSTaggedValue>& end)
{ {
[[maybe_unused]] ConcurrentApiScope<JSAPIBitVector> scope(thread, bitVector.GetTaggedValue().GetTaggedObject()); [[maybe_unused]] ConcurrentApiScope<JSAPIBitVector> scope(thread, JSHandle<JSTaggedValue>::Cast(bitVector));
int32_t startIndex = JSTaggedValue::ToInt32(thread, start); int32_t startIndex = JSTaggedValue::ToInt32(thread, start);
int32_t endIndex = JSTaggedValue::ToInt32(thread, end); int32_t endIndex = JSTaggedValue::ToInt32(thread, end);
int32_t length = bitVector->GetLength(); int32_t length = bitVector->GetLength();
@ -294,7 +294,7 @@ JSTaggedValue JSAPIBitVector::GetBitCountByRange(JSThread* thread, const JSHandl
int JSAPIBitVector::GetIndexOf(JSThread* thread, const JSHandle<JSAPIBitVector>& bitVector, int JSAPIBitVector::GetIndexOf(JSThread* thread, const JSHandle<JSAPIBitVector>& bitVector,
const JSHandle<JSTaggedValue>& value, const JSHandle<JSTaggedValue>& start, const JSHandle<JSTaggedValue>& end) const JSHandle<JSTaggedValue>& value, const JSHandle<JSTaggedValue>& start, const JSHandle<JSTaggedValue>& end)
{ {
[[maybe_unused]] ConcurrentApiScope<JSAPIBitVector> scope(thread, bitVector.GetTaggedValue().GetTaggedObject()); [[maybe_unused]] ConcurrentApiScope<JSAPIBitVector> scope(thread, JSHandle<JSTaggedValue>::Cast(bitVector));
int32_t startIndex = JSTaggedValue::ToInt32(thread, start); int32_t startIndex = JSTaggedValue::ToInt32(thread, start);
int32_t endIndex = JSTaggedValue::ToInt32(thread, end); int32_t endIndex = JSTaggedValue::ToInt32(thread, end);
int32_t length = bitVector->GetLength(); int32_t length = bitVector->GetLength();
@ -330,7 +330,7 @@ int JSAPIBitVector::GetIndexOf(JSThread* thread, const JSHandle<JSAPIBitVector>&
int JSAPIBitVector::GetLastIndexOf(JSThread* thread, const JSHandle<JSAPIBitVector>& bitVector, int JSAPIBitVector::GetLastIndexOf(JSThread* thread, const JSHandle<JSAPIBitVector>& bitVector,
const JSHandle<JSTaggedValue>& value, const JSHandle<JSTaggedValue>& start, const JSHandle<JSTaggedValue>& end) const JSHandle<JSTaggedValue>& value, const JSHandle<JSTaggedValue>& start, const JSHandle<JSTaggedValue>& end)
{ {
[[maybe_unused]] ConcurrentApiScope<JSAPIBitVector> scope(thread, bitVector.GetTaggedValue().GetTaggedObject()); [[maybe_unused]] ConcurrentApiScope<JSAPIBitVector> scope(thread, JSHandle<JSTaggedValue>::Cast(bitVector));
int32_t startIndex = JSTaggedValue::ToInt32(thread, start); int32_t startIndex = JSTaggedValue::ToInt32(thread, start);
int32_t endIndex = JSTaggedValue::ToInt32(thread, end); int32_t endIndex = JSTaggedValue::ToInt32(thread, end);
int32_t length = bitVector->GetLength(); int32_t length = bitVector->GetLength();
@ -366,7 +366,7 @@ int JSAPIBitVector::GetLastIndexOf(JSThread* thread, const JSHandle<JSAPIBitVect
JSTaggedValue JSAPIBitVector::FlipBitByIndex(JSThread* thread, const JSHandle<JSAPIBitVector>& bitVector, int index) JSTaggedValue JSAPIBitVector::FlipBitByIndex(JSThread* thread, const JSHandle<JSAPIBitVector>& bitVector, int index)
{ {
[[maybe_unused]] ConcurrentApiScope<JSAPIBitVector, ModType::WRITE> scope(thread, [[maybe_unused]] ConcurrentApiScope<JSAPIBitVector, ModType::WRITE> scope(thread,
bitVector.GetTaggedValue().GetTaggedObject()); JSHandle<JSTaggedValue>::Cast(bitVector));
if (index >= bitVector->GetLength()) { if (index >= bitVector->GetLength()) {
std::ostringstream oss; std::ostringstream oss;
oss << "The value of \"index\" is out of range. It must be >= 0 && <= " << (bitVector->GetLength() - 1) oss << "The value of \"index\" is out of range. It must be >= 0 && <= " << (bitVector->GetLength() - 1)
@ -389,7 +389,7 @@ JSTaggedValue JSAPIBitVector::FlipBitsByRange(JSThread* thread, const JSHandle<J
const JSHandle<JSTaggedValue>& start, const JSHandle<JSTaggedValue>& end) const JSHandle<JSTaggedValue>& start, const JSHandle<JSTaggedValue>& end)
{ {
[[maybe_unused]] ConcurrentApiScope<JSAPIBitVector, ModType::WRITE> scope(thread, [[maybe_unused]] ConcurrentApiScope<JSAPIBitVector, ModType::WRITE> scope(thread,
bitVector.GetTaggedValue().GetTaggedObject()); JSHandle<JSTaggedValue>::Cast(bitVector));
int32_t startIndex = JSTaggedValue::ToInt32(thread, start); int32_t startIndex = JSTaggedValue::ToInt32(thread, start);
int32_t endIndex = JSTaggedValue::ToInt32(thread, end); int32_t endIndex = JSTaggedValue::ToInt32(thread, end);
int32_t length = bitVector->GetLength(); int32_t length = bitVector->GetLength();
@ -428,7 +428,7 @@ void JSAPIBitVector::Resize(JSThread* thread, const JSHandle<JSAPIBitVector>& bi
THROW_NEW_ERROR_AND_RETURN(thread, error); THROW_NEW_ERROR_AND_RETURN(thread, error);
} }
[[maybe_unused]] ConcurrentApiScope<JSAPIBitVector, ModType::WRITE> scope(thread, [[maybe_unused]] ConcurrentApiScope<JSAPIBitVector, ModType::WRITE> scope(thread,
bitVector.GetTaggedValue().GetTaggedObject()); JSHandle<JSTaggedValue>::Cast(bitVector));
int length = bitVector->GetLength(); int length = bitVector->GetLength();
uint32_t elementsLength = ((length - 1) / BIT_SET_LENGTH) + 1; uint32_t elementsLength = ((length - 1) / BIT_SET_LENGTH) + 1;
uint32_t newElementsLength = ((newSize - 1) / BIT_SET_LENGTH) + 1; uint32_t newElementsLength = ((newSize - 1) / BIT_SET_LENGTH) + 1;

View File

@ -47,7 +47,7 @@ JSTaggedValue JSAPIBitVectorIterator::Next(EcmaRuntimeCallInfo* argv)
// If a has a [[TypedBitVectorName]] internal slot, then // If a has a [[TypedBitVectorName]] internal slot, then
// Let len be the value of Os [[BitVectorLength]] internal slot. // Let len be the value of Os [[BitVectorLength]] internal slot.
ASSERT(bitVector->IsJSAPIBitVector()); ASSERT(bitVector->IsJSAPIBitVector());
[[maybe_unused]] ConcurrentApiScope<JSAPIBitVector> scope(thread, bitVector.GetTaggedValue().GetTaggedObject()); [[maybe_unused]] ConcurrentApiScope<JSAPIBitVector> scope(thread, bitVector);
const uint32_t length = static_cast<uint32_t>(JSHandle<JSAPIBitVector>::Cast(bitVector)->GetSize()); const uint32_t length = static_cast<uint32_t>(JSHandle<JSAPIBitVector>::Cast(bitVector)->GetSize());
// If index >= len, then // If index >= len, then
if (index >= length) { if (index >= length) {

View File

@ -1113,12 +1113,12 @@ JSTaggedValue JSFunction::GetNativeFunctionExtraInfo() const
return JSTaggedValue::Undefined(); return JSTaggedValue::Undefined();
} }
void JSFunction::InitializeForConcurrentFunction(JSThread *thread) void JSFunction::InitializeForConcurrentFunction(JSThread *thread, JSHandle<JSFunction> &func)
{ {
JSHandle<Method> method(thread, this->GetMethod()); JSHandle<Method> method(thread, func->GetMethod());
JSTaggedValue sendableEnv = JSTaggedValue::Undefined(); JSMutableHandle<JSTaggedValue> sendableEnv(thread, JSTaggedValue::Undefined());
if (this->IsSharedFunction() && !this->GetModule().IsUndefined()) { if (func->IsSharedFunction() && !func->GetModule().IsUndefined()) {
sendableEnv = SourceTextModule::Cast(this->GetModule())->GetSendableEnv(); sendableEnv.Update(SourceTextModule::Cast(func->GetModule())->GetSendableEnv());
} }
const JSPandaFile *jsPandaFile = method->GetJSPandaFile(); const JSPandaFile *jsPandaFile = method->GetJSPandaFile();
if (jsPandaFile == nullptr) { if (jsPandaFile == nullptr) {
@ -1155,12 +1155,12 @@ void JSFunction::InitializeForConcurrentFunction(JSThread *thread)
JSHandle<ecmascript::SourceTextModule> module = JSHandle<ecmascript::SourceTextModule>::Cast(moduleRecord); JSHandle<ecmascript::SourceTextModule> module = JSHandle<ecmascript::SourceTextModule>::Cast(moduleRecord);
module->SetStatus(ecmascript::ModuleStatus::INSTANTIATED); module->SetStatus(ecmascript::ModuleStatus::INSTANTIATED);
ecmascript::SourceTextModule::EvaluateForConcurrent(thread, module, method); ecmascript::SourceTextModule::EvaluateForConcurrent(thread, module, method);
if (this->IsSharedFunction()) { if (func->IsSharedFunction()) {
JSHandle<JSTaggedValue> sendableClassRecord = moduleManager->GenerateSendableFuncModule(moduleRecord); JSHandle<JSTaggedValue> sendableClassRecord = moduleManager->GenerateSendableFuncModule(moduleRecord);
SourceTextModule::Cast(sendableClassRecord.GetTaggedValue())->SetSendableEnv(thread, sendableEnv); SourceTextModule::Cast(sendableClassRecord.GetTaggedValue())->SetSendableEnv(thread, sendableEnv);
this->SetModule(thread, sendableClassRecord); func->SetModule(thread, sendableClassRecord);
} else { } else {
this->SetModule(thread, moduleRecord); func->SetModule(thread, moduleRecord);
} }
// for debugger, to notify the script loaded and parsed which the concurrent function is in // for debugger, to notify the script loaded and parsed which the concurrent function is in

View File

@ -334,7 +334,7 @@ public:
void SetJitCompiledFuncEntry(JSThread *thread, JSHandle<MachineCode> &machineCode, bool isFastCall); void SetJitCompiledFuncEntry(JSThread *thread, JSHandle<MachineCode> &machineCode, bool isFastCall);
void InitializeForConcurrentFunction(JSThread *thread); static void InitializeForConcurrentFunction(JSThread *thread, JSHandle<JSFunction> &func);
bool IsSendableOrConcurrentFunction() const; bool IsSendableOrConcurrentFunction() const;
bool IsSharedFunction() const; bool IsSharedFunction() const;

View File

@ -1048,6 +1048,13 @@ bool JSThread::EraseContext(EcmaContext *context)
return false; return false;
} }
void JSThread::ClearContextCachedConstantPool()
{
for (EcmaContext *context : contexts_) {
context->ClearCachedConstantPool();
}
}
PropertiesCache *JSThread::GetPropertiesCache() const PropertiesCache *JSThread::GetPropertiesCache() const
{ {
return glueData_.currentContext_->GetPropertiesCache(); return glueData_.currentContext_->GetPropertiesCache();

View File

@ -1309,6 +1309,7 @@ public:
bool IsPropertyCacheCleared() const; bool IsPropertyCacheCleared() const;
bool EraseContext(EcmaContext *context); bool EraseContext(EcmaContext *context);
void ClearContextCachedConstantPool();
const GlobalEnvConstants *GetFirstGlobalConst() const; const GlobalEnvConstants *GetFirstGlobalConst() const;
bool IsAllContextsInitialized() const; bool IsAllContextsInitialized() const;

View File

@ -524,7 +524,7 @@ public:
JSHandle<Method> method = factory->NewSMethod( JSHandle<Method> method = factory->NewSMethod(
jsPandaFile, methodLiteral, constpoolHandle, entryIndex, isLoadedAOT && hasEntryIndex); jsPandaFile, methodLiteral, constpoolHandle, entryIndex, isLoadedAOT && hasEntryIndex);
CASSetObjectToCache(thread, constpool, index, method.GetTaggedValue()); CASSetObjectToCache(thread, constpoolHandle.GetTaggedValue(), index, method.GetTaggedValue());
return method.GetTaggedValue(); return method.GetTaggedValue();
} }

View File

@ -753,7 +753,11 @@ void SharedGCStats::PrintGCMemoryStatistic()
<< "SharedHugeObjectSpace used:" << "SharedHugeObjectSpace used:"
<< STATS_DATA_FORMAT(sizeToKB(sHeap_->GetHugeObjectSpace()->GetHeapObjectSize())) << "KB" << STATS_DATA_FORMAT(sizeToKB(sHeap_->GetHugeObjectSpace()->GetHeapObjectSize())) << "KB"
<< " committed:" << " committed:"
<< STATS_DATA_FORMAT(sizeToKB(sHeap_->GetHugeObjectSpace()->GetCommittedSize())) << "KB\n"; << STATS_DATA_FORMAT(sizeToKB(sHeap_->GetHugeObjectSpace()->GetCommittedSize())) << "KB\n"
<< "SharedAppSpawnSpace used:"
<< STATS_DATA_FORMAT(sizeToKB(sHeap_->GetAppSpawnSpace()->GetHeapObjectSize())) << "KB"
<< " committed:"
<< STATS_DATA_FORMAT(sizeToKB(sHeap_->GetAppSpawnSpace()->GetCommittedSize())) << "KB";
LOG_GC(INFO) << STATS_DESCRIPTION_FORMAT("Anno memory usage size:") LOG_GC(INFO) << STATS_DESCRIPTION_FORMAT("Anno memory usage size:")
<< STATS_DATA_FORMAT(sizeToMB(heapRegionAllocator->GetAnnoMemoryUsage())) << "MB\n" << STATS_DATA_FORMAT(sizeToMB(heapRegionAllocator->GetAnnoMemoryUsage())) << "MB\n"

View File

@ -78,6 +78,7 @@ void SharedHeap::EnumerateOldSpaceRegions(const Callback &cb) const
sOldSpace_->EnumerateRegions(cb); sOldSpace_->EnumerateRegions(cb);
sNonMovableSpace_->EnumerateRegions(cb); sNonMovableSpace_->EnumerateRegions(cb);
sHugeObjectSpace_->EnumerateRegions(cb); sHugeObjectSpace_->EnumerateRegions(cb);
sAppSpawnSpace_->EnumerateRegions(cb);
} }
template<class Callback> template<class Callback>
@ -94,6 +95,7 @@ void SharedHeap::IterateOverObjects(const Callback &cb) const
sOldSpace_->IterateOverObjects(cb); sOldSpace_->IterateOverObjects(cb);
sNonMovableSpace_->IterateOverObjects(cb); sNonMovableSpace_->IterateOverObjects(cb);
sHugeObjectSpace_->IterateOverObjects(cb); sHugeObjectSpace_->IterateOverObjects(cb);
sAppSpawnSpace_->IterateOverMarkedObjects(cb);
} }
template<class Callback> template<class Callback>
@ -275,6 +277,11 @@ bool Heap::InHeapProfiler()
#endif #endif
} }
void SharedHeap::MergeToOldSpaceSync(SharedLocalSpace *localSpace)
{
sOldSpace_->Merge(localSpace);
}
TaggedObject *Heap::TryAllocateYoungGeneration(JSHClass *hclass, size_t size) TaggedObject *Heap::TryAllocateYoungGeneration(JSHClass *hclass, size_t size)
{ {
size = AlignUp(size, static_cast<size_t>(MemAlignment::MEM_ALIGN_OBJECT)); size = AlignUp(size, static_cast<size_t>(MemAlignment::MEM_ALIGN_OBJECT));
@ -606,6 +613,17 @@ void Heap::SwapOldSpace()
#endif #endif
} }
void SharedHeap::SwapOldSpace()
{
sCompressSpace_->SetInitialCapacity(sOldSpace_->GetInitialCapacity());
auto *oldSpace = sCompressSpace_;
sCompressSpace_ = sOldSpace_;
sOldSpace_ = oldSpace;
#ifdef ECMASCRIPT_SUPPORT_HEAPSAMPLING
sOldSpace_->SwapAllocationCounter(sCompressSpace_);
#endif
}
void Heap::ReclaimRegions(TriggerGCType gcType) void Heap::ReclaimRegions(TriggerGCType gcType)
{ {
activeSemiSpace_->EnumerateRegionsWithRecord([] (Region *region) { activeSemiSpace_->EnumerateRegionsWithRecord([] (Region *region) {
@ -756,12 +774,14 @@ void SharedHeap::CollectGarbageFinish(bool inDaemon)
// so do not need lock. // so do not need lock.
smartGCStats_.forceGC_ = false; smartGCStats_.forceGC_ = false;
} }
localFullMarkTriggered_ = false;
// Record alive object size after shared gc // Record alive object size after shared gc
NotifyHeapAliveSizeAfterGC(GetHeapObjectSize()); NotifyHeapAliveSizeAfterGC(GetHeapObjectSize());
// Adjust shared gc trigger threshold // Adjust shared gc trigger threshold
AdjustGlobalSpaceAllocLimit(); AdjustGlobalSpaceAllocLimit();
GetEcmaGCStats()->RecordStatisticAfterGC(); GetEcmaGCStats()->RecordStatisticAfterGC();
GetEcmaGCStats()->PrintGCStatistic(); GetEcmaGCStats()->PrintGCStatistic();
ProcessAllGCListeners();
} }
TaggedObject *SharedHeap::AllocateNonMovableOrHugeObject(JSThread *thread, JSHClass *hclass) TaggedObject *SharedHeap::AllocateNonMovableOrHugeObject(JSThread *thread, JSHClass *hclass)
@ -828,7 +848,7 @@ TaggedObject *SharedHeap::AllocateOldOrHugeObject(JSThread *thread, JSHClass *hc
TaggedObject *object = thread->IsJitThread() ? nullptr : TaggedObject *object = thread->IsJitThread() ? nullptr :
const_cast<Heap*>(thread->GetEcmaVM()->GetHeap())->AllocateSharedOldSpaceFromTlab(thread, size); const_cast<Heap*>(thread->GetEcmaVM()->GetHeap())->AllocateSharedOldSpaceFromTlab(thread, size);
if (object == nullptr) { if (object == nullptr) {
object = reinterpret_cast<TaggedObject *>(sOldSpace_->Allocate(thread, size)); object = AllocateInSOldSpace(thread, size);
CHECK_SOBJ_AND_THROW_OOM_ERROR(thread, object, size, sOldSpace_, "SharedHeap::AllocateOldOrHugeObject"); CHECK_SOBJ_AND_THROW_OOM_ERROR(thread, object, size, sOldSpace_, "SharedHeap::AllocateOldOrHugeObject");
object->SetClass(thread, hclass); object->SetClass(thread, hclass);
TryTriggerConcurrentMarking(thread); TryTriggerConcurrentMarking(thread);
@ -850,13 +870,38 @@ TaggedObject *SharedHeap::AllocateOldOrHugeObject(JSThread *thread, size_t size)
TaggedObject *object = thread->IsJitThread() ? nullptr : TaggedObject *object = thread->IsJitThread() ? nullptr :
const_cast<Heap*>(thread->GetEcmaVM()->GetHeap())->AllocateSharedOldSpaceFromTlab(thread, size); const_cast<Heap*>(thread->GetEcmaVM()->GetHeap())->AllocateSharedOldSpaceFromTlab(thread, size);
if (object == nullptr) { if (object == nullptr) {
object = reinterpret_cast<TaggedObject *>(sOldSpace_->Allocate(thread, size)); object = AllocateInSOldSpace(thread, size);
CHECK_SOBJ_AND_THROW_OOM_ERROR(thread, object, size, sOldSpace_, "SharedHeap::AllocateOldOrHugeObject"); CHECK_SOBJ_AND_THROW_OOM_ERROR(thread, object, size, sOldSpace_, "SharedHeap::AllocateOldOrHugeObject");
TryTriggerConcurrentMarking(thread); TryTriggerConcurrentMarking(thread);
} }
return object; return object;
} }
TaggedObject *SharedHeap::AllocateInSOldSpace(JSThread *thread, size_t size)
{
// jit thread no heap
bool allowGC = !thread->IsJitThread();
if (allowGC) {
auto localHeap = const_cast<Heap*>(thread->GetEcmaVM()->GetHeap());
localHeap->TryTriggerFullMarkBySharedSize(size);
}
TaggedObject *object = reinterpret_cast<TaggedObject *>(sOldSpace_->TryAllocateAndExpand(thread, size, false));
// Check whether it is necessary to trigger Shared GC before expanding to avoid OOM risk.
if (object == nullptr) {
if (allowGC) {
CheckAndTriggerSharedGC(thread);
}
object = reinterpret_cast<TaggedObject *>(sOldSpace_->TryAllocateAndExpand(thread, size, true));
if (object == nullptr) {
if (allowGC) {
CollectGarbage<TriggerGCType::SHARED_GC, GCReason::ALLOCATION_FAILED>(thread);
}
object = reinterpret_cast<TaggedObject *>(sOldSpace_->TryAllocateAndExpand(thread, size, true));
}
}
return object;
}
TaggedObject *SharedHeap::AllocateHugeObject(JSThread *thread, JSHClass *hclass, size_t size) TaggedObject *SharedHeap::AllocateHugeObject(JSThread *thread, JSHClass *hclass, size_t size)
{ {
auto object = AllocateHugeObject(thread, size); auto object = AllocateHugeObject(thread, size);
@ -919,7 +964,7 @@ TaggedObject *SharedHeap::AllocateSOldTlab(JSThread *thread, size_t size)
if (sOldSpace_->GetCommittedSize() > sOldSpace_->GetInitialCapacity() / 2) { // 2: half if (sOldSpace_->GetCommittedSize() > sOldSpace_->GetInitialCapacity() / 2) { // 2: half
object = reinterpret_cast<TaggedObject *>(sOldSpace_->AllocateNoGCAndExpand(thread, size)); object = reinterpret_cast<TaggedObject *>(sOldSpace_->AllocateNoGCAndExpand(thread, size));
} else { } else {
object = reinterpret_cast<TaggedObject *>(sOldSpace_->Allocate(thread, size)); object = AllocateInSOldSpace(thread, size);
} }
return object; return object;
} }
@ -951,7 +996,7 @@ void SharedHeap::TriggerConcurrentMarking(JSThread *thread)
template<TriggerGCType gcType, GCReason gcReason> template<TriggerGCType gcType, GCReason gcReason>
void SharedHeap::CollectGarbage(JSThread *thread) void SharedHeap::CollectGarbage(JSThread *thread)
{ {
ASSERT(gcType == TriggerGCType::SHARED_GC); ASSERT(gcType == TriggerGCType::SHARED_GC || gcType == TriggerGCType::SHARED_FULL_GC);
#ifndef NDEBUG #ifndef NDEBUG
ASSERT(!thread->HasLaunchedSuspendAll()); ASSERT(!thread->HasLaunchedSuspendAll());
#endif #endif

View File

@ -20,6 +20,10 @@
#include "ecmascript/base/block_hook_scope.h" #include "ecmascript/base/block_hook_scope.h"
#include "ecmascript/checkpoint/thread_state_transition.h" #include "ecmascript/checkpoint/thread_state_transition.h"
#if defined(ECMASCRIPT_SUPPORT_CPUPROFILER)
#include "ecmascript/dfx/cpu_profiler/cpu_profiler.h"
#endif
#include "ecmascript/daemon/daemon_thread.h"
#include "ecmascript/ecma_string_table.h" #include "ecmascript/ecma_string_table.h"
#include "ecmascript/ecma_vm.h" #include "ecmascript/ecma_vm.h"
#include "ecmascript/free_object.h" #include "ecmascript/free_object.h"
@ -40,6 +44,7 @@
#include "ecmascript/mem/shared_heap/shared_concurrent_sweeper.h" #include "ecmascript/mem/shared_heap/shared_concurrent_sweeper.h"
#include "ecmascript/mem/shared_heap/shared_gc_marker-inl.h" #include "ecmascript/mem/shared_heap/shared_gc_marker-inl.h"
#include "ecmascript/mem/shared_heap/shared_gc.h" #include "ecmascript/mem/shared_heap/shared_gc.h"
#include "ecmascript/mem/shared_heap/shared_full_gc.h"
#include "ecmascript/mem/shared_heap/shared_concurrent_marker.h" #include "ecmascript/mem/shared_heap/shared_concurrent_marker.h"
#include "ecmascript/mem/stw_young_gc.h" #include "ecmascript/mem/stw_young_gc.h"
#include "ecmascript/mem/verification.h" #include "ecmascript/mem/verification.h"
@ -102,17 +107,31 @@ void SharedHeap::DestroyInstance()
void SharedHeap::ForceCollectGarbageWithoutDaemonThread(TriggerGCType gcType, GCReason gcReason, JSThread *thread) void SharedHeap::ForceCollectGarbageWithoutDaemonThread(TriggerGCType gcType, GCReason gcReason, JSThread *thread)
{ {
ASSERT(gcType == TriggerGCType::SHARED_GC);
ASSERT(!dThread_->IsRunning()); ASSERT(!dThread_->IsRunning());
SuspendAllScope scope(thread); SuspendAllScope scope(thread);
SharedGCScope sharedGCScope; // SharedGCScope should be after SuspendAllScope.
RecursionScope recurScope(this, HeapType::SHARED_HEAP); RecursionScope recurScope(this, HeapType::SHARED_HEAP);
GetEcmaGCStats()->RecordStatisticBeforeGC(gcType, gcReason); GetEcmaGCStats()->RecordStatisticBeforeGC(gcType, gcReason);
if (UNLIKELY(ShouldVerifyHeap())) { if (UNLIKELY(ShouldVerifyHeap())) {
// pre gc heap verify // pre gc heap verify
LOG_ECMA(DEBUG) << "pre gc shared heap verify"; LOG_ECMA(DEBUG) << "pre gc shared heap verify";
sharedGCMarker_->MergeBackAndResetRSetWorkListHandler();
SharedHeapVerification(this, VerifyKind::VERIFY_PRE_SHARED_GC).VerifyAll(); SharedHeapVerification(this, VerifyKind::VERIFY_PRE_SHARED_GC).VerifyAll();
} }
sharedGC_->RunPhases(); switch (gcType) {
case TriggerGCType::SHARED_GC: {
sharedGC_->RunPhases();
break;
}
case TriggerGCType::SHARED_FULL_GC: {
sharedFullGC_->RunPhases();
break;
}
default:
LOG_ECMA(FATAL) << "this branch is unreachable";
UNREACHABLE();
break;
}
if (UNLIKELY(ShouldVerifyHeap())) { if (UNLIKELY(ShouldVerifyHeap())) {
// pre gc heap verify // pre gc heap verify
LOG_ECMA(DEBUG) << "after gc shared heap verify"; LOG_ECMA(DEBUG) << "after gc shared heap verify";
@ -197,8 +216,10 @@ void SharedHeap::Initialize(NativeAreaAllocator *nativeAreaAllocator, HeapRegion
TRIGGER_SHARED_CONCURRENT_MARKING_OBJECT_LIMIT_RATE); TRIGGER_SHARED_CONCURRENT_MARKING_OBJECT_LIMIT_RATE);
sOldSpace_ = new SharedOldSpace(this, oldSpaceCapacity, oldSpaceCapacity); sOldSpace_ = new SharedOldSpace(this, oldSpaceCapacity, oldSpaceCapacity);
sCompressSpace_ = new SharedOldSpace(this, oldSpaceCapacity, oldSpaceCapacity);
sReadOnlySpace_ = new SharedReadOnlySpace(this, readOnlySpaceCapacity, readOnlySpaceCapacity); sReadOnlySpace_ = new SharedReadOnlySpace(this, readOnlySpaceCapacity, readOnlySpaceCapacity);
sHugeObjectSpace_ = new SharedHugeObjectSpace(this, heapRegionAllocator_, oldSpaceCapacity, oldSpaceCapacity); sHugeObjectSpace_ = new SharedHugeObjectSpace(this, heapRegionAllocator_, oldSpaceCapacity, oldSpaceCapacity);
sAppSpawnSpace_ = new SharedAppSpawnSpace(this, oldSpaceCapacity);
growingFactor_ = config_.GetSharedHeapLimitGrowingFactor(); growingFactor_ = config_.GetSharedHeapLimitGrowingFactor();
growingStep_ = config_.GetSharedHeapLimitGrowingStep(); growingStep_ = config_.GetSharedHeapLimitGrowingStep();
incNativeSizeTriggerSharedCM_= config_.GetStepNativeSizeInc(); incNativeSizeTriggerSharedCM_= config_.GetStepNativeSizeInc();
@ -217,6 +238,11 @@ void SharedHeap::Destroy()
delete sOldSpace_; delete sOldSpace_;
sOldSpace_ = nullptr; sOldSpace_ = nullptr;
} }
if (sCompressSpace_ != nullptr) {
sCompressSpace_->Reset();
delete sCompressSpace_;
sCompressSpace_ = nullptr;
}
if (sNonMovableSpace_ != nullptr) { if (sNonMovableSpace_ != nullptr) {
sNonMovableSpace_->Reset(); sNonMovableSpace_->Reset();
delete sNonMovableSpace_; delete sNonMovableSpace_;
@ -233,10 +259,19 @@ void SharedHeap::Destroy()
delete sReadOnlySpace_; delete sReadOnlySpace_;
sReadOnlySpace_ = nullptr; sReadOnlySpace_ = nullptr;
} }
if (sAppSpawnSpace_ != nullptr) {
sAppSpawnSpace_->Reset();
delete sAppSpawnSpace_;
sAppSpawnSpace_ = nullptr;
}
if (sharedGC_ != nullptr) { if (sharedGC_ != nullptr) {
delete sharedGC_; delete sharedGC_;
sharedGC_ = nullptr; sharedGC_ = nullptr;
} }
if (sharedFullGC_ != nullptr) {
delete sharedFullGC_;
sharedFullGC_ = nullptr;
}
nativeAreaAllocator_ = nullptr; nativeAreaAllocator_ = nullptr;
heapRegionAllocator_ = nullptr; heapRegionAllocator_ = nullptr;
@ -253,7 +288,10 @@ void SharedHeap::Destroy()
delete sharedGCMarker_; delete sharedGCMarker_;
sharedGCMarker_ = nullptr; sharedGCMarker_ = nullptr;
} }
if (sharedGCMovableMarker_ != nullptr) {
delete sharedGCMovableMarker_;
sharedGCMovableMarker_ = nullptr;
}
dThread_ = nullptr; dThread_ = nullptr;
} }
@ -264,31 +302,43 @@ void SharedHeap::PostInitialization(const GlobalEnvConstants *globalEnvConstants
maxMarkTaskCount_ = totalThreadNum - 1; maxMarkTaskCount_ = totalThreadNum - 1;
sWorkManager_ = new SharedGCWorkManager(this, totalThreadNum + 1); sWorkManager_ = new SharedGCWorkManager(this, totalThreadNum + 1);
sharedGCMarker_ = new SharedGCMarker(sWorkManager_); sharedGCMarker_ = new SharedGCMarker(sWorkManager_);
sharedGCMovableMarker_ = new SharedGCMovableMarker(sWorkManager_, this);
sConcurrentMarker_ = new SharedConcurrentMarker(option.EnableSharedConcurrentMark() ? sConcurrentMarker_ = new SharedConcurrentMarker(option.EnableSharedConcurrentMark() ?
EnableConcurrentMarkType::ENABLE : EnableConcurrentMarkType::CONFIG_DISABLE); EnableConcurrentMarkType::ENABLE : EnableConcurrentMarkType::CONFIG_DISABLE);
sSweeper_ = new SharedConcurrentSweeper(this, option.EnableConcurrentSweep() ? sSweeper_ = new SharedConcurrentSweeper(this, option.EnableConcurrentSweep() ?
EnableConcurrentSweepType::ENABLE : EnableConcurrentSweepType::CONFIG_DISABLE); EnableConcurrentSweepType::ENABLE : EnableConcurrentSweepType::CONFIG_DISABLE);
sharedGC_ = new SharedGC(this); sharedGC_ = new SharedGC(this);
sharedFullGC_ = new SharedFullGC(this);
} }
void SharedHeap::PostGCMarkingTask() void SharedHeap::PostGCMarkingTask(SharedParallelMarkPhase sharedTaskPhase)
{ {
IncreaseTaskCount(); IncreaseTaskCount();
Taskpool::GetCurrentTaskpool()->PostTask(std::make_unique<ParallelMarkTask>(dThread_->GetThreadId(), this)); Taskpool::GetCurrentTaskpool()->PostTask(std::make_unique<ParallelMarkTask>(dThread_->GetThreadId(),
this, sharedTaskPhase));
} }
bool SharedHeap::ParallelMarkTask::Run(uint32_t threadIndex) bool SharedHeap::ParallelMarkTask::Run(uint32_t threadIndex)
{ {
// Synchronizes-with. Ensure that WorkManager::Initialize must be seen by MarkerThreads. // Synchronizes-with. Ensure that WorkManager::Initialize must be seen by MarkerThreads.
while (!sHeap_->GetWorkManager()->HasInitialized()); while (!sHeap_->GetWorkManager()->HasInitialized());
sHeap_->GetSharedGCMarker()->ProcessMarkStack(threadIndex); switch (taskPhase_) {
case SharedParallelMarkPhase::SHARED_MARK_TASK:
sHeap_->GetSharedGCMarker()->ProcessMarkStack(threadIndex);
break;
case SharedParallelMarkPhase::SHARED_COMPRESS_TASK:
sHeap_->GetSharedGCMovableMarker()->ProcessMarkStack(threadIndex);
break;
default:
break;
}
sHeap_->ReduceTaskCount(); sHeap_->ReduceTaskCount();
return true; return true;
} }
bool SharedHeap::AsyncClearTask::Run([[maybe_unused]] uint32_t threadIndex) bool SharedHeap::AsyncClearTask::Run([[maybe_unused]] uint32_t threadIndex)
{ {
sHeap_->ReclaimRegions(); sHeap_->ReclaimRegions(gcType_);
return true; return true;
} }
@ -324,21 +374,37 @@ void SharedHeap::WaitGCFinishedAfterAllJSThreadEliminated()
void SharedHeap::DaemonCollectGarbage([[maybe_unused]]TriggerGCType gcType, [[maybe_unused]]GCReason gcReason) void SharedHeap::DaemonCollectGarbage([[maybe_unused]]TriggerGCType gcType, [[maybe_unused]]GCReason gcReason)
{ {
RecursionScope recurScope(this, HeapType::SHARED_HEAP); RecursionScope recurScope(this, HeapType::SHARED_HEAP);
ASSERT(gcType == TriggerGCType::SHARED_GC); ASSERT(gcType == TriggerGCType::SHARED_GC || gcType == TriggerGCType::SHARED_FULL_GC);
ASSERT(JSThread::GetCurrent() == dThread_); ASSERT(JSThread::GetCurrent() == dThread_);
{ {
ThreadManagedScope runningScope(dThread_); ThreadManagedScope runningScope(dThread_);
SuspendAllScope scope(dThread_); SuspendAllScope scope(dThread_);
SharedGCScope sharedGCScope; // SharedGCScope should be after SuspendAllScope.
gcType_ = gcType; gcType_ = gcType;
GetEcmaGCStats()->RecordStatisticBeforeGC(gcType, gcReason); GetEcmaGCStats()->RecordStatisticBeforeGC(gcType, gcReason);
if (UNLIKELY(ShouldVerifyHeap())) { if (UNLIKELY(ShouldVerifyHeap())) {
// pre gc heap verify // pre gc heap verify
LOG_ECMA(DEBUG) << "pre gc shared heap verify"; LOG_ECMA(DEBUG) << "pre gc shared heap verify";
sharedGCMarker_->MergeBackAndResetRSetWorkListHandler();
SharedHeapVerification(this, VerifyKind::VERIFY_PRE_SHARED_GC).VerifyAll(); SharedHeapVerification(this, VerifyKind::VERIFY_PRE_SHARED_GC).VerifyAll();
} }
sharedGC_->RunPhases(); switch (gcType) {
case TriggerGCType::SHARED_GC: {
sharedGC_->RunPhases();
break;
}
case TriggerGCType::SHARED_FULL_GC: {
sharedFullGC_->RunPhases();
break;
}
default:
LOG_ECMA(FATAL) << "this branch is unreachable";
UNREACHABLE();
break;
}
if (UNLIKELY(ShouldVerifyHeap())) { if (UNLIKELY(ShouldVerifyHeap())) {
// pre gc heap verify // after gc heap verify
LOG_ECMA(DEBUG) << "after gc shared heap verify"; LOG_ECMA(DEBUG) << "after gc shared heap verify";
SharedHeapVerification(this, VerifyKind::VERIFY_POST_SHARED_GC).VerifyAll(); SharedHeapVerification(this, VerifyKind::VERIFY_POST_SHARED_GC).VerifyAll();
} }
@ -382,6 +448,34 @@ void SharedHeap::Prepare(bool inTriggerGCThread)
WaitClearTaskFinished(); WaitClearTaskFinished();
} }
SharedHeap::SharedGCScope::SharedGCScope()
{
Runtime::GetInstance()->GCIterateThreadList([](JSThread *thread) {
std::shared_ptr<pgo::PGOProfiler> pgoProfiler = thread->GetEcmaVM()->GetPGOProfiler();
if (pgoProfiler != nullptr) {
pgoProfiler->SuspendByGC();
}
#if defined(ECMASCRIPT_SUPPORT_CPUPROFILER)
thread->SetGcState(true);
#endif
});
}
SharedHeap::SharedGCScope::~SharedGCScope()
{
Runtime::GetInstance()->GCIterateThreadList([](JSThread *thread) {
ASSERT(!thread->IsInRunningState());
const_cast<Heap *>(thread->GetEcmaVM()->GetHeap())->ProcessGCListeners();
std::shared_ptr<pgo::PGOProfiler> pgoProfiler = thread->GetEcmaVM()->GetPGOProfiler();
if (pgoProfiler != nullptr) {
pgoProfiler->ResumeByGC();
}
#if defined(ECMASCRIPT_SUPPORT_CPUPROFILER)
thread->SetGcState(false);
#endif
});
}
void SharedHeap::PrepareRecordRegionsForReclaim() void SharedHeap::PrepareRecordRegionsForReclaim()
{ {
sOldSpace_->SetRecordRegion(); sOldSpace_->SetRecordRegion();
@ -389,23 +483,25 @@ void SharedHeap::PrepareRecordRegionsForReclaim()
sHugeObjectSpace_->SetRecordRegion(); sHugeObjectSpace_->SetRecordRegion();
} }
void SharedHeap::Reclaim() void SharedHeap::Reclaim(TriggerGCType gcType)
{ {
PrepareRecordRegionsForReclaim(); PrepareRecordRegionsForReclaim();
sHugeObjectSpace_->ReclaimHugeRegion(); sHugeObjectSpace_->ReclaimHugeRegion();
if (parallelGC_) { if (parallelGC_) {
clearTaskFinished_ = false; clearTaskFinished_ = false;
Taskpool::GetCurrentTaskpool()->PostTask( Taskpool::GetCurrentTaskpool()->PostTask(
std::make_unique<AsyncClearTask>(dThread_->GetThreadId(), this)); std::make_unique<AsyncClearTask>(dThread_->GetThreadId(), this, gcType));
} else { } else {
ReclaimRegions(); ReclaimRegions(gcType);
} }
} }
void SharedHeap::ReclaimRegions() void SharedHeap::ReclaimRegions(TriggerGCType gcType)
{ {
sOldSpace_->ReclaimRegions(); if (gcType == TriggerGCType::SHARED_FULL_GC) {
sNonMovableSpace_->ReclaimRegions(); sCompressSpace_->Reset();
}
sSweeper_->WaitAllTaskFinished(); sSweeper_->WaitAllTaskFinished();
EnumerateOldSpaceRegionsWithRecord([] (Region *region) { EnumerateOldSpaceRegionsWithRecord([] (Region *region) {
region->ClearMarkGCBitset(); region->ClearMarkGCBitset();
@ -449,24 +545,24 @@ void SharedHeap::UpdateWorkManager(SharedGCWorkManager *sWorkManager)
{ {
sConcurrentMarker_->ResetWorkManager(sWorkManager); sConcurrentMarker_->ResetWorkManager(sWorkManager);
sharedGCMarker_->ResetWorkManager(sWorkManager); sharedGCMarker_->ResetWorkManager(sWorkManager);
sharedGCMovableMarker_->ResetWorkManager(sWorkManager);
sharedGC_->ResetWorkManager(sWorkManager); sharedGC_->ResetWorkManager(sWorkManager);
sharedFullGC_->ResetWorkManager(sWorkManager);
} }
void SharedHeap::TryTriggerLocalConcurrentMarking(JSThread *thread) void SharedHeap::TryTriggerLocalConcurrentMarking()
{ {
if (localFullMarkTriggered_) { if (localFullMarkTriggered_) {
return; return;
} }
{ if (reinterpret_cast<std::atomic<bool>*>(&localFullMarkTriggered_)->exchange(true, std::memory_order_relaxed)
SuspendAllScope scope(thread); != false) {
if (!localFullMarkTriggered_) { return;
localFullMarkTriggered_ = true;
Runtime::GetInstance()->GCIterateThreadList([](JSThread *thread) {
ASSERT(!thread->IsInRunningState());
thread->SetFullMarkRequest();
});
}
} }
ASSERT(localFullMarkTriggered_ == true);
Runtime::GetInstance()->GCIterateThreadList([](JSThread *thread) {
thread->SetFullMarkRequest();
});
} }
size_t SharedHeap::VerifyHeapObjects(VerifyKind verifyKind) const size_t SharedHeap::VerifyHeapObjects(VerifyKind verifyKind) const
@ -484,6 +580,10 @@ size_t SharedHeap::VerifyHeapObjects(VerifyKind verifyKind) const
VerifyObjectVisitor verifier(this, &failCount, verifyKind); VerifyObjectVisitor verifier(this, &failCount, verifyKind);
sHugeObjectSpace_->IterateOverObjects(verifier); sHugeObjectSpace_->IterateOverObjects(verifier);
} }
{
VerifyObjectVisitor verifier(this, &failCount, verifyKind);
sAppSpawnSpace_->IterateOverMarkedObjects(verifier);
}
return failCount; return failCount;
} }
@ -499,6 +599,50 @@ bool SharedHeap::NeedStopCollection()
return false; return false;
} }
void SharedHeap::CompactHeapBeforeFork(JSThread *thread)
{
ThreadManagedScope managedScope(thread);
WaitGCFinished(thread);
sharedFullGC_->SetForAppSpawn(true);
CollectGarbage<TriggerGCType::SHARED_FULL_GC, GCReason::OTHER>(thread);
sharedFullGC_->SetForAppSpawn(false);
}
void SharedHeap::MoveOldSpaceToAppspawn()
{
auto committedSize = sOldSpace_->GetCommittedSize();
sAppSpawnSpace_->SetInitialCapacity(committedSize);
sAppSpawnSpace_->SetMaximumCapacity(committedSize);
sOldSpace_->SetInitialCapacity(sOldSpace_->GetInitialCapacity() - committedSize);
sOldSpace_->SetMaximumCapacity(sOldSpace_->GetMaximumCapacity() - committedSize);
#ifdef ECMASCRIPT_SUPPORT_HEAPSAMPLING
sAppSpawnSpace_->SwapAllocationCounter(sOldSpace_);
#endif
auto threadId = Runtime::GetInstance()->GetMainThread()->GetThreadId();
sOldSpace_->EnumerateRegions([&](Region *region) {
region->SetRegionSpaceFlag(RegionSpaceFlag::IN_SHARED_APPSPAWN_SPACE);
PageTag(region, region->GetCapacity(), PageTagType::HEAP, region->GetSpaceTypeName(), threadId);
sAppSpawnSpace_->AddRegion(region);
sAppSpawnSpace_->IncreaseLiveObjectSize(region->AliveObject());
});
sOldSpace_->GetRegionList().Clear();
sOldSpace_->Reset();
}
void SharedHeap::ReclaimForAppSpawn()
{
sSweeper_->WaitAllTaskFinished();
sHugeObjectSpace_->ReclaimHugeRegion();
sCompressSpace_->Reset();
MoveOldSpaceToAppspawn();
auto cb = [] (Region *region) {
region->ClearMarkGCBitset();
region->ResetAliveObject();
};
sNonMovableSpace_->EnumerateRegions(cb);
sHugeObjectSpace_->EnumerateRegions(cb);
}
void SharedHeap::DumpHeapSnapshotBeforeOOM([[maybe_unused]]bool isFullGC, [[maybe_unused]]JSThread *thread) void SharedHeap::DumpHeapSnapshotBeforeOOM([[maybe_unused]]bool isFullGC, [[maybe_unused]]JSThread *thread)
{ {
#if defined(ECMASCRIPT_SUPPORT_SNAPSHOT) #if defined(ECMASCRIPT_SUPPORT_SNAPSHOT)
@ -953,6 +1097,7 @@ void Heap::CollectGarbage(TriggerGCType gcType, GCReason reason)
if (UNLIKELY(ShouldVerifyHeap())) { if (UNLIKELY(ShouldVerifyHeap())) {
// pre gc heap verify // pre gc heap verify
LOG_ECMA(DEBUG) << "pre gc heap verify"; LOG_ECMA(DEBUG) << "pre gc heap verify";
ProcessSharedGCRSetWorkList();
Verification(this, VerifyKind::VERIFY_PRE_GC).VerifyAll(); Verification(this, VerifyKind::VERIFY_PRE_GC).VerifyAll();
} }
@ -1922,7 +2067,7 @@ void Heap::ChangeGCParams(bool inBackground)
if (sHeap_->GetHeapObjectSize() - sHeap_->GetHeapAliveSizeAfterGC() > BACKGROUND_GROW_LIMIT && if (sHeap_->GetHeapObjectSize() - sHeap_->GetHeapAliveSizeAfterGC() > BACKGROUND_GROW_LIMIT &&
sHeap_->GetCommittedSize() >= MIN_BACKGROUNG_GC_LIMIT && sHeap_->GetCommittedSize() >= MIN_BACKGROUNG_GC_LIMIT &&
doubleOne * sHeap_->GetHeapObjectSize() / sHeap_->GetCommittedSize() <= MIN_OBJECT_SURVIVAL_RATE) { doubleOne * sHeap_->GetHeapObjectSize() / sHeap_->GetCommittedSize() <= MIN_OBJECT_SURVIVAL_RATE) {
sHeap_->CollectGarbage<TriggerGCType::SHARED_GC, GCReason::SWITCH_BACKGROUND>(thread_); sHeap_->CollectGarbage<TriggerGCType::SHARED_FULL_GC, GCReason::SWITCH_BACKGROUND>(thread_);
} }
if (GetMemGrowingType() != MemGrowingType::PRESSURE) { if (GetMemGrowingType() != MemGrowingType::PRESSURE) {
SetMemGrowingType(MemGrowingType::CONSERVATIVE); SetMemGrowingType(MemGrowingType::CONSERVATIVE);
@ -2418,6 +2563,14 @@ void Heap::ProcessGCListeners()
} }
} }
void SharedHeap::ProcessAllGCListeners()
{
Runtime::GetInstance()->GCIterateThreadList([](JSThread *thread) {
ASSERT(!thread->IsInRunningState());
const_cast<Heap *>(thread->GetEcmaVM()->GetHeap())->ProcessGCListeners();
});
}
#if defined(ECMASCRIPT_SUPPORT_SNAPSHOT) && defined(PANDA_TARGET_OHOS) && defined(ENABLE_HISYSEVENT) #if defined(ECMASCRIPT_SUPPORT_SNAPSHOT) && defined(PANDA_TARGET_OHOS) && defined(ENABLE_HISYSEVENT)
uint64_t Heap::GetCurrentTickMillseconds() uint64_t Heap::GetCurrentTickMillseconds()
{ {

View File

@ -53,7 +53,10 @@ class RSetWorkListHandler;
class SharedConcurrentMarker; class SharedConcurrentMarker;
class SharedConcurrentSweeper; class SharedConcurrentSweeper;
class SharedGC; class SharedGC;
class SharedGCMarkerBase;
class SharedGCMarker; class SharedGCMarker;
class SharedFullGC;
class SharedGCMovableMarker;
class STWYoungGC; class STWYoungGC;
class ThreadLocalAllocationBuffer; class ThreadLocalAllocationBuffer;
class IdleGCTrigger; class IdleGCTrigger;
@ -110,6 +113,7 @@ enum class VerifyKind {
VERIFY_POST_SHARED_GC, VERIFY_POST_SHARED_GC,
VERIFY_SHARED_GC_MARK, VERIFY_SHARED_GC_MARK,
VERIFY_SHARED_GC_SWEEP, VERIFY_SHARED_GC_SWEEP,
VERIFY_END,
}; };
class BaseHeap { class BaseHeap {
@ -392,8 +396,8 @@ public:
class ParallelMarkTask : public Task { class ParallelMarkTask : public Task {
public: public:
ParallelMarkTask(int32_t id, SharedHeap *heap) ParallelMarkTask(int32_t id, SharedHeap *heap, SharedParallelMarkPhase taskPhase)
: Task(id), sHeap_(heap) {}; : Task(id), sHeap_(heap), taskPhase_(taskPhase) {};
~ParallelMarkTask() override = default; ~ParallelMarkTask() override = default;
bool Run(uint32_t threadIndex) override; bool Run(uint32_t threadIndex) override;
@ -402,12 +406,13 @@ public:
private: private:
SharedHeap *sHeap_ {nullptr}; SharedHeap *sHeap_ {nullptr};
SharedParallelMarkPhase taskPhase_;
}; };
class AsyncClearTask : public Task { class AsyncClearTask : public Task {
public: public:
AsyncClearTask(int32_t id, SharedHeap *heap) AsyncClearTask(int32_t id, SharedHeap *heap, TriggerGCType type)
: Task(id), sHeap_(heap) {} : Task(id), sHeap_(heap), gcType_(type) {}
~AsyncClearTask() override = default; ~AsyncClearTask() override = default;
bool Run(uint32_t threadIndex) override; bool Run(uint32_t threadIndex) override;
@ -415,6 +420,7 @@ public:
NO_MOVE_SEMANTIC(AsyncClearTask); NO_MOVE_SEMANTIC(AsyncClearTask);
private: private:
SharedHeap *sHeap_; SharedHeap *sHeap_;
TriggerGCType gcType_;
}; };
bool IsMarking() const override bool IsMarking() const override
{ {
@ -483,7 +489,7 @@ public:
bool CheckHugeAndTriggerSharedGC(JSThread *thread, size_t size); bool CheckHugeAndTriggerSharedGC(JSThread *thread, size_t size);
void TryTriggerLocalConcurrentMarking(JSThread *currentThread); void TryTriggerLocalConcurrentMarking();
// Called when all vm is destroyed, and try to destroy daemon thread. // Called when all vm is destroyed, and try to destroy daemon thread.
void WaitAllTasksFinishedAfterAllJSThreadEliminated(); void WaitAllTasksFinishedAfterAllJSThreadEliminated();
@ -557,6 +563,11 @@ public:
return sReadOnlySpace_; return sReadOnlySpace_;
} }
SharedAppSpawnSpace *GetAppSpawnSpace() const
{
return sAppSpawnSpace_;
}
void SetForceGC(bool forceGC) void SetForceGC(bool forceGC)
{ {
LockHolder lock(smartGCStats_.sensitiveStatusMutex_); LockHolder lock(smartGCStats_.sensitiveStatusMutex_);
@ -678,8 +689,10 @@ public:
} }
void Prepare(bool inTriggerGCThread); void Prepare(bool inTriggerGCThread);
void Reclaim(); void Reclaim(TriggerGCType gcType);
void PostGCMarkingTask(); void PostGCMarkingTask(SharedParallelMarkPhase sharedTaskPhase);
void CompactHeapBeforeFork(JSThread *thread);
void ReclaimForAppSpawn();
SharedGCWorkManager *GetWorkManager() const SharedGCWorkManager *GetWorkManager() const
{ {
@ -691,6 +704,12 @@ public:
return sharedGCMarker_; return sharedGCMarker_;
} }
SharedGCMovableMarker *GetSharedGCMovableMarker() const
{
return sharedGCMovableMarker_;
}
inline void SwapOldSpace();
void PrepareRecordRegionsForReclaim(); void PrepareRecordRegionsForReclaim();
template<class Callback> template<class Callback>
@ -730,15 +749,26 @@ public:
size_t VerifyHeapObjects(VerifyKind verifyKind) const; size_t VerifyHeapObjects(VerifyKind verifyKind) const;
inline void MergeToOldSpaceSync(SharedLocalSpace *localSpace);
void DumpHeapSnapshotBeforeOOM(bool isFullGC, JSThread *thread); void DumpHeapSnapshotBeforeOOM(bool isFullGC, JSThread *thread);
class SharedGCScope {
public:
SharedGCScope();
~SharedGCScope();
};
private: private:
void ProcessAllGCListeners();
inline void CollectGarbageFinish(bool inDaemon); inline void CollectGarbageFinish(bool inDaemon);
void MoveOldSpaceToAppspawn();
void ReclaimRegions(); void ReclaimRegions(TriggerGCType type);
void ForceCollectGarbageWithoutDaemonThread(TriggerGCType gcType, GCReason gcReason, JSThread *thread); void ForceCollectGarbageWithoutDaemonThread(TriggerGCType gcType, GCReason gcReason, JSThread *thread);
inline TaggedObject *AllocateInSOldSpace(JSThread *thread, size_t size);
struct SharedHeapSmartGCStats { struct SharedHeapSmartGCStats {
/** /**
* For SmartGC. * For SmartGC.
@ -775,14 +805,18 @@ private:
DaemonThread *dThread_ {nullptr}; DaemonThread *dThread_ {nullptr};
const GlobalEnvConstants *globalEnvConstants_ {nullptr}; const GlobalEnvConstants *globalEnvConstants_ {nullptr};
SharedOldSpace *sOldSpace_ {nullptr}; SharedOldSpace *sOldSpace_ {nullptr};
SharedOldSpace *sCompressSpace_ {nullptr};
SharedNonMovableSpace *sNonMovableSpace_ {nullptr}; SharedNonMovableSpace *sNonMovableSpace_ {nullptr};
SharedReadOnlySpace *sReadOnlySpace_ {nullptr}; SharedReadOnlySpace *sReadOnlySpace_ {nullptr};
SharedHugeObjectSpace *sHugeObjectSpace_ {nullptr}; SharedHugeObjectSpace *sHugeObjectSpace_ {nullptr};
SharedAppSpawnSpace *sAppSpawnSpace_ {nullptr};
SharedGCWorkManager *sWorkManager_ {nullptr}; SharedGCWorkManager *sWorkManager_ {nullptr};
SharedConcurrentMarker *sConcurrentMarker_ {nullptr}; SharedConcurrentMarker *sConcurrentMarker_ {nullptr};
SharedConcurrentSweeper *sSweeper_ {nullptr}; SharedConcurrentSweeper *sSweeper_ {nullptr};
SharedGC *sharedGC_ {nullptr}; SharedGC *sharedGC_ {nullptr};
SharedFullGC *sharedFullGC_ {nullptr};
SharedGCMarker *sharedGCMarker_ {nullptr}; SharedGCMarker *sharedGCMarker_ {nullptr};
SharedGCMovableMarker *sharedGCMovableMarker_ {nullptr};
size_t growingFactor_ {0}; size_t growingFactor_ {0};
size_t growingStep_ {0}; size_t growingStep_ {0};
size_t incNativeSizeTriggerSharedCM_ {0}; size_t incNativeSizeTriggerSharedCM_ {0};
@ -1435,6 +1469,7 @@ public:
PUBLIC_API GCListenerId AddGCListener(FinishGCListener listener, void *data); PUBLIC_API GCListenerId AddGCListener(FinishGCListener listener, void *data);
PUBLIC_API void RemoveGCListener(GCListenerId listenerId); PUBLIC_API void RemoveGCListener(GCListenerId listenerId);
void ProcessGCListeners();
private: private:
inline TaggedObject *AllocateHugeObject(size_t size); inline TaggedObject *AllocateHugeObject(size_t size);
@ -1456,7 +1491,6 @@ private:
void PrepareRecordRegionsForReclaim(); void PrepareRecordRegionsForReclaim();
inline void ReclaimRegions(TriggerGCType gcType); inline void ReclaimRegions(TriggerGCType gcType);
inline size_t CalculateCommittedCacheSize(); inline size_t CalculateCommittedCacheSize();
void ProcessGCListeners();
#if defined(ECMASCRIPT_SUPPORT_SNAPSHOT) && defined(PANDA_TARGET_OHOS) && defined(ENABLE_HISYSEVENT) #if defined(ECMASCRIPT_SUPPORT_SNAPSHOT) && defined(PANDA_TARGET_OHOS) && defined(ENABLE_HISYSEVENT)
uint64_t GetCurrentTickMillseconds(); uint64_t GetCurrentTickMillseconds();
void ThresholdReachedDump(); void ThresholdReachedDump();

View File

@ -97,6 +97,7 @@ static constexpr size_t IDLE_SPACE_SIZE_MIN_INC_STEP_FULL = 1_MB;
using TaggedType = uint64_t; using TaggedType = uint64_t;
static constexpr uint32_t TAGGED_TYPE_SIZE = sizeof(TaggedType); static constexpr uint32_t TAGGED_TYPE_SIZE = sizeof(TaggedType);
static constexpr uint32_t TAGGED_TYPE_SIZE_LOG = base::MathHelper::GetIntLog2(TAGGED_TYPE_SIZE); static constexpr uint32_t TAGGED_TYPE_SIZE_LOG = base::MathHelper::GetIntLog2(TAGGED_TYPE_SIZE);
constexpr size_t HEAD_SIZE = TaggedObject::TaggedObjectSize();
template<typename T> template<typename T>
constexpr inline bool IsAligned(T value, size_t alignment) constexpr inline bool IsAligned(T value, size_t alignment)

View File

@ -25,7 +25,6 @@
#include "ecmascript/mem/tlab_allocator-inl.h" #include "ecmascript/mem/tlab_allocator-inl.h"
namespace panda::ecmascript { namespace panda::ecmascript {
constexpr size_t HEAD_SIZE = TaggedObject::TaggedObjectSize();
template <typename Callback> template <typename Callback>
ARK_INLINE bool NonMovableMarker::VisitBodyInObj(TaggedObject *root, ObjectSlot start, ObjectSlot end, ARK_INLINE bool NonMovableMarker::VisitBodyInObj(TaggedObject *root, ObjectSlot start, ObjectSlot end,

View File

@ -51,8 +51,10 @@ enum RegionSpaceFlag {
IN_HUGE_MACHINE_CODE_SPACE = 0x11, IN_HUGE_MACHINE_CODE_SPACE = 0x11,
IN_SHARED_NON_MOVABLE = 0x12, IN_SHARED_NON_MOVABLE = 0x12,
IN_SHARED_OLD_SPACE = 0x13, IN_SHARED_OLD_SPACE = 0x13,
IN_SHARED_HUGE_OBJECT_SPACE = 0x14, IN_SHARED_APPSPAWN_SPACE = 0X14,
IN_SHARED_READ_ONLY_SPACE = 0x15, IN_SHARED_HUGE_OBJECT_SPACE = 0x15,
IN_SHARED_READ_ONLY_SPACE = 0x16,
VALID_SPACE_MASK = 0xFF, VALID_SPACE_MASK = 0xFF,
GENERAL_YOUNG_BEGIN = IN_EDEN_SPACE, GENERAL_YOUNG_BEGIN = IN_EDEN_SPACE,
@ -135,6 +137,8 @@ static inline std::string ToSpaceTypeName(uint8_t space)
return "shared read only space"; return "shared read only space";
case RegionSpaceFlag::IN_SHARED_HUGE_OBJECT_SPACE: case RegionSpaceFlag::IN_SHARED_HUGE_OBJECT_SPACE:
return "shared huge object space"; return "shared huge object space";
case RegionSpaceFlag::IN_SHARED_APPSPAWN_SPACE:
return "shared appspawn space";
default: default:
return "invalid space"; return "invalid space";
} }
@ -356,6 +360,10 @@ public:
uint8_t GetRegionSpaceFlag(); uint8_t GetRegionSpaceFlag();
void SetRegionSpaceFlag(RegionSpaceFlag flag)
{
packedData_.flags_.spaceFlag_ = flag;
}
bool InEdenSpace() const bool InEdenSpace() const
{ {
return packedData_.flags_.spaceFlag_ == RegionSpaceFlag::IN_EDEN_SPACE; return packedData_.flags_.spaceFlag_ == RegionSpaceFlag::IN_EDEN_SPACE;
@ -439,6 +447,11 @@ public:
return packedData_.flags_.spaceFlag_ == RegionSpaceFlag::IN_SHARED_READ_ONLY_SPACE; return packedData_.flags_.spaceFlag_ == RegionSpaceFlag::IN_SHARED_READ_ONLY_SPACE;
} }
bool InSharedAppSpawnSpace() const
{
return packedData_.flags_.spaceFlag_ == RegionSpaceFlag::IN_SHARED_APPSPAWN_SPACE;
}
bool InAppSpawnSpace() const bool InAppSpawnSpace() const
{ {
return packedData_.flags_.spaceFlag_ == RegionSpaceFlag::IN_APPSPAWN_SPACE; return packedData_.flags_.spaceFlag_ == RegionSpaceFlag::IN_APPSPAWN_SPACE;

View File

@ -64,10 +64,10 @@ void SharedConcurrentMarker::Mark(TriggerGCType gcType, GCReason gcReason)
ECMA_BYTRACE_NAME(HITRACE_TAG_ARK, "SharedConcurrentMarker::Mark"); ECMA_BYTRACE_NAME(HITRACE_TAG_ARK, "SharedConcurrentMarker::Mark");
CHECK_DAEMON_THREAD(); CHECK_DAEMON_THREAD();
// TODO: support shared runtime state // TODO: support shared runtime state
InitializeMarking();
if (UNLIKELY(sHeap_->ShouldVerifyHeap())) { if (UNLIKELY(sHeap_->ShouldVerifyHeap())) {
SharedHeapVerification(sHeap_, VerifyKind::VERIFY_PRE_SHARED_GC).VerifyAll(); SharedHeapVerification(sHeap_, VerifyKind::VERIFY_PRE_SHARED_GC).VerifyAll();
} }
InitializeMarking();
} }
// Daemon thread do not need to post task to GC_Thread // Daemon thread do not need to post task to GC_Thread
ASSERT(!dThread_->IsInRunningState()); ASSERT(!dThread_->IsInRunningState());
@ -127,11 +127,14 @@ void SharedConcurrentMarker::InitializeMarking()
dThread_->SetSharedMarkStatus(SharedMarkStatus::CONCURRENT_MARKING_OR_FINISHED); dThread_->SetSharedMarkStatus(SharedMarkStatus::CONCURRENT_MARKING_OR_FINISHED);
sHeapObjectSize_ = sHeap_->GetHeapObjectSize(); sHeapObjectSize_ = sHeap_->GetHeapObjectSize();
sHeap_->GetAppSpawnSpace()->EnumerateRegions([](Region *current) {
current->ClearMarkGCBitset();
});
sHeap_->EnumerateOldSpaceRegions([](Region *current) { sHeap_->EnumerateOldSpaceRegions([](Region *current) {
ASSERT(current->InSharedSweepableSpace()); ASSERT(current->InSharedSweepableSpace());
current->ResetAliveObject(); current->ResetAliveObject();
}); });
sWorkManager_->Initialize(); sWorkManager_->Initialize(TriggerGCType::SHARED_GC, SharedParallelMarkPhase::SHARED_MARK_TASK);
sHeap_->GetSharedGCMarker()->MarkRoots(DAEMON_THREAD_INDEX, SharedMarkType::CONCURRENT_MARK_INITIAL_MARK); sHeap_->GetSharedGCMarker()->MarkRoots(DAEMON_THREAD_INDEX, SharedMarkType::CONCURRENT_MARK_INITIAL_MARK);
} }

View File

@ -27,30 +27,40 @@ SharedConcurrentSweeper::SharedConcurrentSweeper(SharedHeap *heap, EnableConcurr
{ {
} }
void SharedConcurrentSweeper::PostTask() void SharedConcurrentSweeper::PostTask(bool isFullGC)
{ {
auto tid = DaemonThread::GetInstance()->GetThreadId(); auto tid = DaemonThread::GetInstance()->GetThreadId();
if (ConcurrentSweepEnabled()) { if (ConcurrentSweepEnabled()) {
Taskpool::GetCurrentTaskpool()->PostTask( if (!isFullGC) {
std::make_unique<SweeperTask>(tid, this, SHARED_OLD_SPACE)); Taskpool::GetCurrentTaskpool()->PostTask(
std::make_unique<SweeperTask>(tid, this, SHARED_OLD_SPACE));
}
Taskpool::GetCurrentTaskpool()->PostTask( Taskpool::GetCurrentTaskpool()->PostTask(
std::make_unique<SweeperTask>(tid, this, SHARED_NON_MOVABLE)); std::make_unique<SweeperTask>(tid, this, SHARED_NON_MOVABLE));
} }
} }
void SharedConcurrentSweeper::Sweep() void SharedConcurrentSweeper::Sweep(bool isFullGC)
{ {
isFullGC_ = isFullGC;
if (ConcurrentSweepEnabled()) { if (ConcurrentSweepEnabled()) {
// Add all region to region list. Ensure all task finish // Add all region to region list. Ensure all task finish
sHeap_->GetOldSpace()->PrepareSweeping(); if (!isFullGC_) {
sHeap_->GetOldSpace()->PrepareSweeping();
for (int spaceIndex = 0; spaceIndex < SHARED_SWEEPING_SPACE_NUM; spaceIndex++) {
remainingTaskNum_[spaceIndex] = SHARED_SWEEPING_SPACE_NUM;
}
} else {
remainingTaskNum_[0] = 0; // No need sweep shared old space in FullGC.
remainingTaskNum_[1] = 1; // Need sweep nonmovable space in FullGC.
}
sHeap_->GetNonMovableSpace()->PrepareSweeping(); sHeap_->GetNonMovableSpace()->PrepareSweeping();
// Prepare // Prepare
isSweeping_ = true; isSweeping_ = true;
for (int spaceIndex = 0; spaceIndex < SHARED_SWEEPING_SPACE_NUM; spaceIndex++) {
remainingTaskNum_[spaceIndex] = SHARED_SWEEPING_SPACE_NUM;
}
} else { } else {
sHeap_->GetOldSpace()->Sweep(); if (!isFullGC_) {
sHeap_->GetOldSpace()->Sweep();
}
sHeap_->GetNonMovableSpace()->Sweep(); sHeap_->GetNonMovableSpace()->Sweep();
} }
sHeap_->GetHugeObjectSpace()->Sweep(); sHeap_->GetHugeObjectSpace()->Sweep();
@ -72,7 +82,8 @@ void SharedConcurrentSweeper::WaitAllTaskFinished()
if (!isSweeping_) { if (!isSweeping_) {
return; return;
} }
for (int spaceIndex = 0; spaceIndex < SHARED_SWEEPING_SPACE_NUM; spaceIndex++) { int spaceIndex = isFullGC_ ? 1 : 0;
for (; spaceIndex < SHARED_SWEEPING_SPACE_NUM; spaceIndex++) {
if (remainingTaskNum_[spaceIndex] > 0) { if (remainingTaskNum_[spaceIndex] > 0) {
LockHolder holder(mutexs_[spaceIndex]); LockHolder holder(mutexs_[spaceIndex]);
while (remainingTaskNum_[spaceIndex] > 0) { while (remainingTaskNum_[spaceIndex] > 0) {
@ -88,7 +99,8 @@ void SharedConcurrentSweeper::EnsureAllTaskFinished()
if (!isSweeping_) { if (!isSweeping_) {
return; return;
} }
for (int spaceIndex = 0; spaceIndex < SHARED_SWEEPING_SPACE_NUM; spaceIndex++) { int spaceIndex = isFullGC_ ? 1 : 0;
for (; spaceIndex < SHARED_SWEEPING_SPACE_NUM; spaceIndex++) {
int type = spaceIndex + SHARED_SWEEPING_SPACE_BEGIN; int type = spaceIndex + SHARED_SWEEPING_SPACE_BEGIN;
WaitingTaskFinish(static_cast<MemSpaceType>(type)); WaitingTaskFinish(static_cast<MemSpaceType>(type));
} }
@ -127,7 +139,9 @@ void SharedConcurrentSweeper::WaitingTaskFinish(MemSpaceType type)
void SharedConcurrentSweeper::TryFillSweptRegion() void SharedConcurrentSweeper::TryFillSweptRegion()
{ {
sHeap_->GetOldSpace()->TryFillSweptRegion(); if (!isFullGC_) {
sHeap_->GetOldSpace()->TryFillSweptRegion();
}
sHeap_->GetNonMovableSpace()->TryFillSweptRegion(); sHeap_->GetNonMovableSpace()->TryFillSweptRegion();
} }
@ -135,10 +149,14 @@ bool SharedConcurrentSweeper::SweeperTask::Run([[maybe_unused]] uint32_t threadI
{ {
if (type_ == SHARED_NON_MOVABLE) { if (type_ == SHARED_NON_MOVABLE) {
sweeper_->AsyncSweepSpace(SHARED_NON_MOVABLE, false); sweeper_->AsyncSweepSpace(SHARED_NON_MOVABLE, false);
sweeper_->AsyncSweepSpace(SHARED_OLD_SPACE, false); if (!sweeper_->isFullGC_) {
sweeper_->AsyncSweepSpace(SHARED_OLD_SPACE, false);
}
} else { } else {
ASSERT(type_ == SHARED_OLD_SPACE); ASSERT(type_ == SHARED_OLD_SPACE);
sweeper_->AsyncSweepSpace(SHARED_OLD_SPACE, false); if (!sweeper_->isFullGC_) {
sweeper_->AsyncSweepSpace(SHARED_OLD_SPACE, false);
}
sweeper_->AsyncSweepSpace(SHARED_NON_MOVABLE, false); sweeper_->AsyncSweepSpace(SHARED_NON_MOVABLE, false);
} }

View File

@ -29,8 +29,8 @@ public:
NO_COPY_SEMANTIC(SharedConcurrentSweeper); NO_COPY_SEMANTIC(SharedConcurrentSweeper);
NO_MOVE_SEMANTIC(SharedConcurrentSweeper); NO_MOVE_SEMANTIC(SharedConcurrentSweeper);
void PostTask(); void PostTask(bool isFullGC);
void Sweep(); void Sweep(bool isFullGC);
void WaitAllTaskFinished(); void WaitAllTaskFinished();
// Help to finish sweeping task. It can be called through js thread // Help to finish sweeping task. It can be called through js thread
@ -100,6 +100,7 @@ private:
SharedHeap *sHeap_; SharedHeap *sHeap_;
EnableConcurrentSweepType enableType_ {EnableConcurrentSweepType::CONFIG_DISABLE}; EnableConcurrentSweepType enableType_ {EnableConcurrentSweepType::CONFIG_DISABLE};
bool isSweeping_ {false}; bool isSweeping_ {false};
bool isFullGC_ {false};
}; };
} // namespace panda::ecmascript } // namespace panda::ecmascript
#endif // ECMASCRIPT_MEM_SHARED_HEAP_SHARED_CONCURRENT_SWEEPER_H #endif // ECMASCRIPT_MEM_SHARED_HEAP_SHARED_CONCURRENT_SWEEPER_H

View File

@ -0,0 +1,184 @@
/*
* Copyright (c) 2024 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "ecmascript/mem/shared_heap/shared_full_gc.h"
#include "ecmascript/checkpoint/thread_state_transition.h"
#include "ecmascript/ecma_string_table.h"
#include "ecmascript/ecma_vm.h"
#include "ecmascript/mem/barriers-inl.h"
#include "ecmascript/mem/gc_stats.h"
#include "ecmascript/mem/mark_stack.h"
#include "ecmascript/mem/mem.h"
#include "ecmascript/mem/object_xray.h"
#include "ecmascript/mem/shared_heap/shared_concurrent_marker.h"
#include "ecmascript/mem/shared_heap/shared_concurrent_sweeper.h"
#include "ecmascript/mem/shared_heap/shared_gc_marker-inl.h"
#include "ecmascript/mem/slots.h"
#include "ecmascript/mem/space-inl.h"
#include "ecmascript/mem/verification.h"
#include "ecmascript/mem/visitor.h"
#include "ecmascript/runtime.h"
namespace panda::ecmascript {
void SharedFullGC::RunPhases()
{
ECMA_BYTRACE_NAME(HITRACE_TAG_ARK, "SharedFullGC::RunPhases"
+ std::to_string(static_cast<int>(sHeap_->GetEcmaGCStats()->GetGCReason()))
+ ";Sensitive" + std::to_string(static_cast<int>(sHeap_->GetSensitiveStatus()))
+ ";IsInBackground" + std::to_string(sHeap_->IsInBackground())
+ ";Startup" + std::to_string(sHeap_->OnStartupEvent())
+ ";Old" + std::to_string(sHeap_->GetOldSpace()->GetCommittedSize())
+ ";huge" + std::to_string(sHeap_->GetHugeObjectSpace()->GetCommittedSize())
+ ";NonMov" + std::to_string(sHeap_->GetNonMovableSpace()->GetCommittedSize())
+ ";TotCommit" + std::to_string(sHeap_->GetCommittedSize()));
TRACE_GC(GCStats::Scope::ScopeId::TotalGC, sHeap_->GetEcmaGCStats());
Initialize();
Mark();
Sweep();
Finish();
}
void SharedFullGC::Initialize()
{
ECMA_BYTRACE_NAME(HITRACE_TAG_ARK, "SharedFullGC::Initialize");
TRACE_GC(GCStats::Scope::ScopeId::Initialize, sHeap_->GetEcmaGCStats());
sHeap_->Prepare(true);
if (UNLIKELY(sHeap_->CheckOngoingConcurrentMarking())) {
// Concurrent shared mark should always trigger shared gc without moving.
sHeap_->GetConcurrentMarker()->Reset(true);
}
sHeap_->GetAppSpawnSpace()->EnumerateRegions([](Region *current) {
current->ClearMarkGCBitset();
});
sHeap_->EnumerateOldSpaceRegions([](Region *current) {
ASSERT(current->InSharedSweepableSpace());
current->ResetAliveObject();
});
sWorkManager_->Initialize(TriggerGCType::SHARED_FULL_GC, SharedParallelMarkPhase::SHARED_COMPRESS_TASK);
}
void SharedFullGC::Mark()
{
ECMA_BYTRACE_NAME(HITRACE_TAG_ARK, "SharedFullGC::Mark");
TRACE_GC(GCStats::Scope::ScopeId::Mark, sHeap_->GetEcmaGCStats());
SharedGCMovableMarker *marker = sHeap_->GetSharedGCMovableMarker();
marker->MarkRoots(DAEMON_THREAD_INDEX, SharedMarkType::NOT_CONCURRENT_MARK, VMRootVisitType::UPDATE_ROOT);
marker->DoMark<SharedMarkType::NOT_CONCURRENT_MARK>(DAEMON_THREAD_INDEX);
marker->MergeBackAndResetRSetWorkListHandler();
sHeap_->WaitRunningTaskFinished();
}
void SharedFullGC::Sweep()
{
ECMA_BYTRACE_NAME(HITRACE_TAG_ARK, "SharedFullGC::Sweep");
TRACE_GC(GCStats::Scope::ScopeId::Sweep, sHeap_->GetEcmaGCStats());
UpdateRecordWeakReference();
WeakRootVisitor gcUpdateWeak = [](TaggedObject *header) {
Region *objectRegion = Region::ObjectAddressToRange(header);
if (!objectRegion) {
LOG_GC(ERROR) << "SharedFullGC updateWeakReference: region is nullptr, header is " << header;
return reinterpret_cast<TaggedObject *>(ToUintPtr(nullptr));
}
if (objectRegion->InSharedOldSpace()) {
MarkWord markWord(header);
if (markWord.IsForwardingAddress()) {
return markWord.ToForwardingAddress();
}
return reinterpret_cast<TaggedObject *>(ToUintPtr(nullptr));
}
if (!objectRegion->InSharedSweepableSpace() || objectRegion->Test(header)) {
return header;
}
return reinterpret_cast<TaggedObject *>(ToUintPtr(nullptr));
};
auto stringTableCleaner = Runtime::GetInstance()->GetEcmaStringTable()->GetCleaner();
stringTableCleaner->PostSweepWeakRefTask(gcUpdateWeak);
Runtime::GetInstance()->ProcessNativeDeleteInSharedGC(gcUpdateWeak);
Runtime::GetInstance()->GCIterateThreadList([&](JSThread *thread) {
ASSERT(!thread->IsInRunningState());
thread->IterateWeakEcmaGlobalStorage(gcUpdateWeak, GCKind::SHARED_GC);
thread->GetEcmaVM()->ProcessSharedNativeDelete(gcUpdateWeak);
const_cast<Heap*>(thread->GetEcmaVM()->GetHeap())->ResetTlab();
thread->ClearContextCachedConstantPool();
});
stringTableCleaner->JoinAndWaitSweepWeakRefTask(gcUpdateWeak);
sHeap_->GetSweeper()->Sweep(true);
sHeap_->GetSweeper()->PostTask(true);
}
void SharedFullGC::Finish()
{
ECMA_BYTRACE_NAME(HITRACE_TAG_ARK, "SharedFullGC::Finish");
TRACE_GC(GCStats::Scope::ScopeId::Finish, sHeap_->GetEcmaGCStats());
sHeap_->SwapOldSpace();
sWorkManager_->Finish();
if (!isAppspawn_) {
sHeap_->Reclaim(TriggerGCType::SHARED_FULL_GC);
} else {
sHeap_->ReclaimForAppSpawn();
}
sHeap_->GetSweeper()->TryFillSweptRegion();
}
void SharedFullGC::UpdateRecordWeakReference()
{
auto totalThreadCount = Taskpool::GetCurrentTaskpool()->GetTotalThreadNum() + 1;
for (uint32_t i = 0; i < totalThreadCount; i++) {
ProcessQueue *queue = sHeap_->GetWorkManager()->GetWeakReferenceQueue(i);
while (true) {
auto obj = queue->PopBack();
if (UNLIKELY(obj == nullptr)) {
break;
}
ObjectSlot slot(ToUintPtr(obj));
JSTaggedValue value(slot.GetTaggedType());
ASSERT(value.IsWeak());
auto header = value.GetTaggedWeakRef();
Region *objectRegion = Region::ObjectAddressToRange(header);
if (!objectRegion->InSharedOldSpace()) {
if (!objectRegion->Test(header)) {
slot.Clear();
}
} else {
MarkWord markWord(header);
if (markWord.IsForwardingAddress()) {
TaggedObject *dst = markWord.ToForwardingAddress();
auto weakRef = JSTaggedValue(JSTaggedValue(dst).CreateAndGetWeakRef()).GetRawTaggedObject();
slot.Update(weakRef);
} else {
slot.Clear();
}
}
}
}
}
bool SharedFullGC::HasEvacuated(Region *region)
{
auto marker = reinterpret_cast<SharedGCMovableMarker *>(sHeap_->GetSharedGCMovableMarker());
return marker->NeedEvacuate(region);
}
void SharedFullGC::ResetWorkManager(SharedGCWorkManager *sWorkManager)
{
sWorkManager_ = sWorkManager;
}
} // namespace panda::ecmascript

View File

@ -0,0 +1,56 @@
/*
* Copyright (c) 2024 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef ECMASCRIPT_MEM_SHARED_HEAP_SHARED_FULL_GC_H
#define ECMASCRIPT_MEM_SHARED_HEAP_SHARED_FULL_GC_H
#include "ecmascript/mem/garbage_collector.h"
#include "ecmascript/mem/heap.h"
#include "ecmascript/mem/mark_stack.h"
#include "ecmascript/mem/mark_word.h"
#include "ecmascript/mem/mem.h"
#include "ecmascript/mem/work_manager.h"
namespace panda::ecmascript {
class SharedFullGC : public GarbageCollector {
public:
explicit SharedFullGC(SharedHeap *heap) : sHeap_(heap), sWorkManager_(heap->GetWorkManager()) {}
~SharedFullGC() override = default;
NO_COPY_SEMANTIC(SharedFullGC);
NO_MOVE_SEMANTIC(SharedFullGC);
void RunPhases() override;
void ResetWorkManager(SharedGCWorkManager *workManager);
void SetForAppSpawn(bool flag)
{
isAppspawn_ = flag;
}
protected:
void Initialize() override;
void Mark() override;
void Sweep() override;
void Finish() override;
private:
void UpdateRecordWeakReference();
bool HasEvacuated(Region *region);
SharedHeap *sHeap_ {nullptr};
SharedGCWorkManager *sWorkManager_ {nullptr};
bool isAppspawn_ {false};
};
} // namespace panda::ecmascript
#endif // ECMASCRIPT_MEM_SHARED_HEAP_SHARED_FULL_GC_H

View File

@ -70,11 +70,14 @@ void SharedGC::Initialize()
TRACE_GC(GCStats::Scope::ScopeId::Initialize, sHeap_->GetEcmaGCStats()); TRACE_GC(GCStats::Scope::ScopeId::Initialize, sHeap_->GetEcmaGCStats());
if (!markingInProgress_) { if (!markingInProgress_) {
sHeap_->Prepare(true); sHeap_->Prepare(true);
sHeap_->GetAppSpawnSpace()->EnumerateRegions([](Region *current) {
current->ClearMarkGCBitset();
});
sHeap_->EnumerateOldSpaceRegions([](Region *current) { sHeap_->EnumerateOldSpaceRegions([](Region *current) {
ASSERT(current->InSharedSweepableSpace()); ASSERT(current->InSharedSweepableSpace());
current->ResetAliveObject(); current->ResetAliveObject();
}); });
sWorkManager_->Initialize(); sWorkManager_->Initialize(TriggerGCType::SHARED_GC, SharedParallelMarkPhase::SHARED_MARK_TASK);
} }
} }
void SharedGC::Mark() void SharedGC::Mark()
@ -120,15 +123,15 @@ void SharedGC::Sweep()
}); });
stringTableCleaner->JoinAndWaitSweepWeakRefTask(gcUpdateWeak); stringTableCleaner->JoinAndWaitSweepWeakRefTask(gcUpdateWeak);
sHeap_->GetSweeper()->Sweep(); sHeap_->GetSweeper()->Sweep(false);
sHeap_->GetSweeper()->PostTask(); sHeap_->GetSweeper()->PostTask(false);
} }
void SharedGC::Finish() void SharedGC::Finish()
{ {
ECMA_BYTRACE_NAME(HITRACE_TAG_ARK, "SharedGC::Finish"); ECMA_BYTRACE_NAME(HITRACE_TAG_ARK, "SharedGC::Finish");
TRACE_GC(GCStats::Scope::ScopeId::Finish, sHeap_->GetEcmaGCStats()); TRACE_GC(GCStats::Scope::ScopeId::Finish, sHeap_->GetEcmaGCStats());
sHeap_->Reclaim(); sHeap_->Reclaim(TriggerGCType::SHARED_GC);
if (markingInProgress_) { if (markingInProgress_) {
sHeap_->GetConcurrentMarker()->Reset(false); sHeap_->GetConcurrentMarker()->Reset(false);
} else { } else {

View File

@ -21,18 +21,20 @@
#include "ecmascript/js_hclass-inl.h" #include "ecmascript/js_hclass-inl.h"
#include "ecmascript/mem/heap-inl.h" #include "ecmascript/mem/heap-inl.h"
#include "ecmascript/mem/region-inl.h" #include "ecmascript/mem/region-inl.h"
#include "ecmascript/mem/tlab_allocator-inl.h"
namespace panda::ecmascript { namespace panda::ecmascript {
inline void SharedGCMarker::MarkObject(uint32_t threadId, TaggedObject *object) inline void SharedGCMarker::MarkObject(uint32_t threadId, TaggedObject *object, [[maybe_unused]] ObjectSlot &slot)
{ {
Region *objectRegion = Region::ObjectAddressToRange(object); Region *objectRegion = Region::ObjectAddressToRange(object);
ASSERT(objectRegion->InSharedHeap()); ASSERT(objectRegion->InSharedHeap());
if (!objectRegion->InSharedReadOnlySpace() && objectRegion->AtomicMark(object)) { if (!objectRegion->InSharedReadOnlySpace() && objectRegion->AtomicMark(object)) {
ASSERT(objectRegion->InSharedSweepableSpace());
sWorkManager_->Push(threadId, object); sWorkManager_->Push(threadId, object);
} }
} }
inline void SharedGCMarker::MarkObjectFromJSThread(WorkNode *&localBuffer, TaggedObject *object) inline void SharedGCMarkerBase::MarkObjectFromJSThread(WorkNode *&localBuffer, TaggedObject *object)
{ {
Region *objectRegion = Region::ObjectAddressToRange(object); Region *objectRegion = Region::ObjectAddressToRange(object);
ASSERT(objectRegion->InSharedHeap()); ASSERT(objectRegion->InSharedHeap());
@ -46,30 +48,30 @@ inline void SharedGCMarker::MarkValue(uint32_t threadId, ObjectSlot &slot)
JSTaggedValue value(slot.GetTaggedType()); JSTaggedValue value(slot.GetTaggedType());
if (value.IsInSharedSweepableSpace()) { if (value.IsInSharedSweepableSpace()) {
if (!value.IsWeakForHeapObject()) { if (!value.IsWeakForHeapObject()) {
MarkObject(threadId, value.GetTaggedObject()); MarkObject(threadId, value.GetTaggedObject(), slot);
} else { } else {
RecordWeakReference(threadId, reinterpret_cast<JSTaggedType *>(slot.SlotAddress())); RecordWeakReference(threadId, reinterpret_cast<JSTaggedType *>(slot.SlotAddress()));
} }
} }
} }
inline void SharedGCMarker::HandleRoots(uint32_t threadId, [[maybe_unused]] Root type, ObjectSlot slot) inline void SharedGCMarkerBase::HandleRoots(uint32_t threadId, [[maybe_unused]] Root type, ObjectSlot slot)
{ {
JSTaggedValue value(slot.GetTaggedType()); JSTaggedValue value(slot.GetTaggedType());
if (value.IsInSharedSweepableSpace()) { if (value.IsInSharedSweepableSpace()) {
MarkObject(threadId, value.GetTaggedObject()); MarkObject(threadId, value.GetTaggedObject(), slot);
} }
} }
inline void SharedGCMarker::HandleLocalRoots(uint32_t threadId, [[maybe_unused]] Root type, ObjectSlot slot) inline void SharedGCMarkerBase::HandleLocalRoots(uint32_t threadId, [[maybe_unused]] Root type, ObjectSlot slot)
{ {
JSTaggedValue value(slot.GetTaggedType()); JSTaggedValue value(slot.GetTaggedType());
if (value.IsInSharedSweepableSpace()) { if (value.IsInSharedSweepableSpace()) {
MarkObject(threadId, value.GetTaggedObject()); MarkObject(threadId, value.GetTaggedObject(), slot);
} }
} }
inline void SharedGCMarker::HandleLocalRangeRoots(uint32_t threadId, [[maybe_unused]] Root type, ObjectSlot start, inline void SharedGCMarkerBase::HandleLocalRangeRoots(uint32_t threadId, [[maybe_unused]] Root type, ObjectSlot start,
ObjectSlot end) ObjectSlot end)
{ {
for (ObjectSlot slot = start; slot < end; slot++) { for (ObjectSlot slot = start; slot < end; slot++) {
@ -78,21 +80,29 @@ inline void SharedGCMarker::HandleLocalRangeRoots(uint32_t threadId, [[maybe_unu
if (value.IsWeakForHeapObject()) { if (value.IsWeakForHeapObject()) {
LOG_ECMA_MEM(FATAL) << "Weak Reference in SharedGCMarker roots"; LOG_ECMA_MEM(FATAL) << "Weak Reference in SharedGCMarker roots";
} }
MarkObject(threadId, value.GetTaggedObject()); MarkObject(threadId, value.GetTaggedObject(), slot);
} }
} }
} }
inline void SharedGCMarker::HandleLocalDerivedRoots([[maybe_unused]] Root type, [[maybe_unused]] ObjectSlot base, void SharedGCMarker::HandleLocalDerivedRoots([[maybe_unused]] Root type, [[maybe_unused]] ObjectSlot base,
[[maybe_unused]] ObjectSlot derived, [[maybe_unused]] ObjectSlot derived,
[[maybe_unused]] uintptr_t baseOldObject) [[maybe_unused]] uintptr_t baseOldObject)
{ {
// It is only used to update the derived value. The mark of share GC does not need to update slot // It is only used to update the derived value. The mark of share GC does not need to update slot
} }
void SharedGCMovableMarker::HandleLocalDerivedRoots([[maybe_unused]] Root type, ObjectSlot base,
ObjectSlot derived, uintptr_t baseOldObject)
{
if (JSTaggedValue(base.GetTaggedType()).IsHeapObject()) {
derived.Update(base.GetTaggedType() + derived.GetTaggedType() - baseOldObject);
}
}
template <typename Callback> template <typename Callback>
ARK_INLINE bool SharedGCMarker::VisitBodyInObj(TaggedObject *root, ObjectSlot start, ObjectSlot end, ARK_INLINE bool SharedGCMarkerBase::VisitBodyInObj(TaggedObject *root, ObjectSlot start, ObjectSlot end,
Callback callback) Callback callback)
{ {
auto hclass = root->SynchronizedGetClass(); auto hclass = root->SynchronizedGetClass();
int index = 0; int index = 0;
@ -109,28 +119,30 @@ ARK_INLINE bool SharedGCMarker::VisitBodyInObj(TaggedObject *root, ObjectSlot st
return true; return true;
} }
inline void SharedGCMarker::RecordWeakReference(uint32_t threadId, JSTaggedType *slot) inline void SharedGCMarkerBase::RecordWeakReference(uint32_t threadId, JSTaggedType *slot)
{ {
sWorkManager_->PushWeakReference(threadId, slot); sWorkManager_->PushWeakReference(threadId, slot);
} }
inline void SharedGCMarker::RecordObject(JSTaggedValue value, uint32_t threadId, void *mem) inline void SharedGCMarkerBase::RecordObject(JSTaggedValue value, uint32_t threadId, void *mem)
{ {
if (value.IsWeakForHeapObject()) { if (value.IsWeakForHeapObject()) {
RecordWeakReference(threadId, reinterpret_cast<JSTaggedType *>(mem)); RecordWeakReference(threadId, reinterpret_cast<JSTaggedType *>(mem));
} else { } else {
MarkObject(threadId, value.GetTaggedObject()); ObjectSlot slot(ToUintPtr(mem));
MarkObject(threadId, value.GetTaggedObject(), slot);
} }
} }
template<SharedMarkType markType> template<SharedMarkType markType>
inline bool SharedGCMarker::GetVisitor(JSTaggedValue value, uint32_t threadId, void *mem) inline bool SharedGCMarkerBase::GetVisitor(JSTaggedValue value, uint32_t threadId, void *mem)
{ {
if (value.IsInSharedSweepableSpace()) { if (value.IsInSharedSweepableSpace()) {
if constexpr (markType == SharedMarkType::CONCURRENT_MARK_INITIAL_MARK) { if constexpr (markType == SharedMarkType::CONCURRENT_MARK_INITIAL_MARK) {
// For now if record weak references from local to share in marking root, the slots // For now if record weak references from local to share in marking root, the slots
// may be invalid due to LocalGC, so just mark them as strong-reference. // may be invalid due to LocalGC, so just mark them as strong-reference.
MarkObject(threadId, value.GetHeapObject()); ObjectSlot slot(ToUintPtr(mem));
MarkObject(threadId, value.GetHeapObject(), slot);
} else { } else {
static_assert(markType == SharedMarkType::NOT_CONCURRENT_MARK); static_assert(markType == SharedMarkType::NOT_CONCURRENT_MARK);
RecordObject(value, threadId, mem); RecordObject(value, threadId, mem);
@ -141,19 +153,18 @@ inline bool SharedGCMarker::GetVisitor(JSTaggedValue value, uint32_t threadId, v
} }
template<SharedMarkType markType> template<SharedMarkType markType>
inline auto SharedGCMarker::GenerateRSetVisitor(uint32_t threadId) inline auto SharedGCMarkerBase::GenerateRSetVisitor(uint32_t threadId)
{ {
auto visitor = [this, threadId](void *mem) -> bool { auto visitor = [this, threadId](void *mem) -> bool {
ObjectSlot slot(ToUintPtr(mem)); ObjectSlot slot(ToUintPtr(mem));
JSTaggedValue value(slot.GetTaggedType()); JSTaggedValue value(slot.GetTaggedType());
return GetVisitor<markType>(value, threadId, mem); return GetVisitor<markType>(value, threadId, mem);
}; };
return visitor; return visitor;
} }
template<SharedMarkType markType> template<SharedMarkType markType>
inline void SharedGCMarker::ProcessVisitorOfDoMark(uint32_t threadId) inline void SharedGCMarkerBase::ProcessVisitorOfDoMark(uint32_t threadId)
{ {
auto rSetVisitor = GenerateRSetVisitor<markType>(threadId); auto rSetVisitor = GenerateRSetVisitor<markType>(threadId);
auto visitor = [rSetVisitor](Region *region, RememberedSet *rSet) { auto visitor = [rSetVisitor](Region *region, RememberedSet *rSet) {
@ -166,7 +177,7 @@ inline void SharedGCMarker::ProcessVisitorOfDoMark(uint32_t threadId)
} }
template<SharedMarkType markType> template<SharedMarkType markType>
inline void SharedGCMarker::DoMark(uint32_t threadId) inline void SharedGCMarkerBase::DoMark(uint32_t threadId)
{ {
if constexpr (markType != SharedMarkType::CONCURRENT_MARK_REMARK) { if constexpr (markType != SharedMarkType::CONCURRENT_MARK_REMARK) {
ProcessVisitorOfDoMark<markType>(threadId); ProcessVisitorOfDoMark<markType>(threadId);
@ -174,7 +185,7 @@ inline void SharedGCMarker::DoMark(uint32_t threadId)
ProcessMarkStack(threadId); ProcessMarkStack(threadId);
} }
inline bool SharedGCMarker::MarkObjectOfProcessVisitor(void *mem, WorkNode *&localBuffer) inline bool SharedGCMarkerBase::MarkObjectOfProcessVisitor(void *mem, WorkNode *&localBuffer)
{ {
ObjectSlot slot(ToUintPtr(mem)); ObjectSlot slot(ToUintPtr(mem));
JSTaggedValue value(slot.GetTaggedType()); JSTaggedValue value(slot.GetTaggedType());
@ -189,7 +200,7 @@ inline bool SharedGCMarker::MarkObjectOfProcessVisitor(void *mem, WorkNode *&loc
return false; return false;
} }
inline void SharedGCMarker::ProcessVisitor(RSetWorkListHandler *handler) inline void SharedGCMarkerBase::ProcessVisitor(RSetWorkListHandler *handler)
{ {
WorkNode *&localBuffer = handler->GetHeap()->GetMarkingObjectLocalBuffer(); WorkNode *&localBuffer = handler->GetHeap()->GetMarkingObjectLocalBuffer();
auto rSetVisitor = [this, &localBuffer](void *mem) -> bool { auto rSetVisitor = [this, &localBuffer](void *mem) -> bool {
@ -201,7 +212,7 @@ inline void SharedGCMarker::ProcessVisitor(RSetWorkListHandler *handler)
handler->ProcessAll(visitor); handler->ProcessAll(visitor);
} }
inline void SharedGCMarker::ProcessThenMergeBackRSetFromBoundJSThread(RSetWorkListHandler *handler) inline void SharedGCMarkerBase::ProcessThenMergeBackRSetFromBoundJSThread(RSetWorkListHandler *handler)
{ {
ECMA_BYTRACE_NAME(HITRACE_TAG_ARK, "SharedGCMarker::ProcessRSet"); ECMA_BYTRACE_NAME(HITRACE_TAG_ARK, "SharedGCMarker::ProcessRSet");
ASSERT(JSThread::GetCurrent() == handler->GetHeap()->GetEcmaVM()->GetJSThread()); ASSERT(JSThread::GetCurrent() == handler->GetHeap()->GetEcmaVM()->GetJSThread());
@ -209,5 +220,108 @@ inline void SharedGCMarker::ProcessThenMergeBackRSetFromBoundJSThread(RSetWorkLi
ProcessVisitor(handler); ProcessVisitor(handler);
handler->WaitFinishedThenMergeBack(); handler->WaitFinishedThenMergeBack();
} }
void SharedGCMovableMarker::MarkObject(uint32_t threadId, TaggedObject *object, ObjectSlot &slot)
{
Region *objectRegion = Region::ObjectAddressToRange(object);
ASSERT(objectRegion->InSharedHeap());
if (!NeedEvacuate(objectRegion)) {
if (!objectRegion->InSharedReadOnlySpace() && objectRegion->AtomicMark(object)) {
auto hclass = object->GetClass();
auto size = hclass->SizeFromJSHClass(object);
objectRegion->IncreaseAliveObject(size);
sWorkManager_->Push(threadId, object);
}
return;
}
MarkWord markWord(object);
if (markWord.IsForwardingAddress()) {
TaggedObject *dst = markWord.ToForwardingAddress();
slot.Update(dst);
return;
}
return EvacuateObject(threadId, object, markWord, slot);
}
void SharedGCMovableMarker::MarkValue(uint32_t threadId, ObjectSlot &slot)
{
JSTaggedValue value(slot.GetTaggedType());
if (value.IsInSharedSweepableSpace()) {
if (!value.IsWeakForHeapObject()) {
MarkObject(threadId, value.GetTaggedObject(), slot);
} else {
RecordWeakReference(threadId, reinterpret_cast<JSTaggedType *>(slot.SlotAddress()));
}
}
}
bool SharedGCMovableMarker::NeedEvacuate(Region *region)
{
return region->InSharedOldSpace();
}
void SharedGCMovableMarker::EvacuateObject(uint32_t threadId, TaggedObject *object,
const MarkWord &markWord, ObjectSlot slot)
{
JSHClass *klass = markWord.GetJSHClass();
size_t size = klass->SizeFromJSHClass(object);
uintptr_t forwardAddress = AllocateForwardAddress(threadId, size);
RawCopyObject(ToUintPtr(object), forwardAddress, size, markWord);
auto oldValue = markWord.GetValue();
auto result = Barriers::AtomicSetPrimitive(object, 0, oldValue,
MarkWord::FromForwardingAddress(forwardAddress));
if (result == oldValue) {
UpdateForwardAddressIfSuccess(threadId, klass, forwardAddress, size, slot);
return;
}
UpdateForwardAddressIfFailed(object, forwardAddress, size, slot);
}
uintptr_t SharedGCMovableMarker::AllocateDstSpace(uint32_t threadId, size_t size)
{
uintptr_t forwardAddress = 0;
forwardAddress = sWorkManager_->GetTlabAllocator(threadId)->Allocate(size, SHARED_COMPRESS_SPACE);
if (UNLIKELY(forwardAddress == 0)) {
LOG_ECMA_MEM(FATAL) << "EvacuateObject alloc failed: "
<< " size: " << size;
UNREACHABLE();
}
return forwardAddress;
}
inline void SharedGCMovableMarker::RawCopyObject(uintptr_t fromAddress, uintptr_t toAddress, size_t size,
const MarkWord &markWord)
{
if (memcpy_s(ToVoidPtr(toAddress + HEAD_SIZE), size - HEAD_SIZE, ToVoidPtr(fromAddress + HEAD_SIZE),
size - HEAD_SIZE) != EOK) {
LOG_FULL(FATAL) << "memcpy_s failed";
}
*reinterpret_cast<MarkWordType *>(toAddress) = markWord.GetValue();
}
void SharedGCMovableMarker::UpdateForwardAddressIfSuccess(uint32_t threadId, JSHClass *klass, uintptr_t toAddress,
size_t size, ObjectSlot slot)
{
sWorkManager_->IncreaseAliveSize(threadId, size);
if (klass->HasReferenceField()) {
sWorkManager_->Push(threadId, reinterpret_cast<TaggedObject *>(toAddress));
}
slot.Update(reinterpret_cast<TaggedObject *>(toAddress));
}
void SharedGCMovableMarker::UpdateForwardAddressIfFailed(TaggedObject *object, uintptr_t toAddress, size_t size,
ObjectSlot slot)
{
FreeObject::FillFreeObject(sHeap_, toAddress, size);
TaggedObject *dst = MarkWord(object).ToForwardingAddress();
slot.Update(dst);
}
uintptr_t SharedGCMovableMarker::AllocateForwardAddress(uint32_t threadId, size_t size)
{
return AllocateDstSpace(threadId, size);
}
} // namespace panda::ecmascript } // namespace panda::ecmascript
#endif // ECMASCRIPT_MEM_SHARED_HEAP_SHARED_GC_MARKER_INL_H #endif // ECMASCRIPT_MEM_SHARED_HEAP_SHARED_GC_MARKER_INL_H

View File

@ -22,9 +22,9 @@
#include "ecmascript/runtime.h" #include "ecmascript/runtime.h"
namespace panda::ecmascript { namespace panda::ecmascript {
void SharedGCMarker::MarkRoots(uint32_t threadId, SharedMarkType markType) void SharedGCMarkerBase::MarkRoots(uint32_t threadId, SharedMarkType markType, VMRootVisitType type)
{ {
ECMA_BYTRACE_NAME(HITRACE_TAG_ARK, "SharedGCMarker::MarkRoots"); ECMA_BYTRACE_NAME(HITRACE_TAG_ARK, "SharedGCMarkerBase::MarkRoots");
MarkSerializeRoots(threadId); MarkSerializeRoots(threadId);
MarkSharedModule(threadId); MarkSharedModule(threadId);
MarkStringCache(threadId); MarkStringCache(threadId);
@ -38,16 +38,17 @@ void SharedGCMarker::MarkRoots(uint32_t threadId, SharedMarkType markType)
runtime->GCIterateThreadList([&](JSThread *thread) { runtime->GCIterateThreadList([&](JSThread *thread) {
ASSERT(!thread->IsInRunningState()); ASSERT(!thread->IsInRunningState());
auto vm = thread->GetEcmaVM(); auto vm = thread->GetEcmaVM();
MarkLocalVMRoots(threadId, vm, markType); MarkLocalVMRoots(threadId, vm, markType, type);
if (markType != SharedMarkType::CONCURRENT_MARK_REMARK) { if (markType != SharedMarkType::CONCURRENT_MARK_REMARK) {
CollectLocalVMRSet(vm); CollectLocalVMRSet(vm);
} }
}); });
} }
void SharedGCMarker::MarkLocalVMRoots(uint32_t threadId, EcmaVM *localVm, SharedMarkType markType) void SharedGCMarkerBase::MarkLocalVMRoots(uint32_t threadId, EcmaVM *localVm, SharedMarkType markType,
VMRootVisitType type)
{ {
ECMA_BYTRACE_NAME(HITRACE_TAG_ARK, "SharedGCMarker::MarkLocalVMRoots"); ECMA_BYTRACE_NAME(HITRACE_TAG_ARK, "SharedGCMarkerBase::MarkLocalVMRoots");
Heap *heap = const_cast<Heap*>(localVm->GetHeap()); Heap *heap = const_cast<Heap*>(localVm->GetHeap());
if (markType != SharedMarkType::CONCURRENT_MARK_REMARK) { if (markType != SharedMarkType::CONCURRENT_MARK_REMARK) {
heap->GetSweeper()->EnsureAllTaskFinished(); heap->GetSweeper()->EnsureAllTaskFinished();
@ -61,38 +62,38 @@ void SharedGCMarker::MarkLocalVMRoots(uint32_t threadId, EcmaVM *localVm, Shared
}, },
[this](Root type, ObjectSlot base, ObjectSlot derived, uintptr_t baseOldObject) { [this](Root type, ObjectSlot base, ObjectSlot derived, uintptr_t baseOldObject) {
this->HandleLocalDerivedRoots(type, base, derived, baseOldObject); this->HandleLocalDerivedRoots(type, base, derived, baseOldObject);
}, VMRootVisitType::MARK); }, type);
heap->ProcessSharedGCMarkingLocalBuffer(); heap->ProcessSharedGCMarkingLocalBuffer();
} }
void SharedGCMarker::CollectLocalVMRSet(EcmaVM *localVm) void SharedGCMarkerBase::CollectLocalVMRSet(EcmaVM *localVm)
{ {
ECMA_BYTRACE_NAME(HITRACE_TAG_ARK, "SharedGCMarker::CollectLocalVMRSet"); ECMA_BYTRACE_NAME(HITRACE_TAG_ARK, "SharedGCMarkerBase::CollectLocalVMRSet");
Heap *heap = const_cast<Heap*>(localVm->GetHeap()); Heap *heap = const_cast<Heap*>(localVm->GetHeap());
RSetWorkListHandler *handler = new RSetWorkListHandler(heap); RSetWorkListHandler *handler = new RSetWorkListHandler(heap);
heap->SetRSetWorkListHandler(handler); heap->SetRSetWorkListHandler(handler);
rSetHandlers_.emplace_back(handler); rSetHandlers_.emplace_back(handler);
} }
void SharedGCMarker::MarkSerializeRoots(uint32_t threadId) void SharedGCMarkerBase::MarkSerializeRoots(uint32_t threadId)
{ {
ECMA_BYTRACE_NAME(HITRACE_TAG_ARK, "SharedGCMarker::MarkSerializeRoots"); ECMA_BYTRACE_NAME(HITRACE_TAG_ARK, "SharedGCMarkerBase::MarkSerializeRoots");
auto callback = [this, threadId](Root type, ObjectSlot slot) {this->HandleRoots(threadId, type, slot);}; auto callback = [this, threadId](Root type, ObjectSlot slot) {this->HandleRoots(threadId, type, slot);};
Runtime::GetInstance()->IterateSerializeRoot(callback); Runtime::GetInstance()->IterateSerializeRoot(callback);
} }
void SharedGCMarker::MarkStringCache(uint32_t threadId) void SharedGCMarkerBase::MarkStringCache(uint32_t threadId)
{ {
ECMA_BYTRACE_NAME(HITRACE_TAG_ARK, "SharedGCMarker::MarkStringCache"); ECMA_BYTRACE_NAME(HITRACE_TAG_ARK, "SharedGCMarkerBase::MarkStringCache");
auto cacheStringCallback = [this, threadId](Root type, ObjectSlot start, ObjectSlot end) { auto cacheStringCallback = [this, threadId](Root type, ObjectSlot start, ObjectSlot end) {
this->HandleLocalRangeRoots(threadId, type, start, end); this->HandleLocalRangeRoots(threadId, type, start, end);
}; };
Runtime::GetInstance()->IterateCachedStringRoot(cacheStringCallback); Runtime::GetInstance()->IterateCachedStringRoot(cacheStringCallback);
} }
void SharedGCMarker::MarkSharedModule(uint32_t threadId) void SharedGCMarkerBase::MarkSharedModule(uint32_t threadId)
{ {
ECMA_BYTRACE_NAME(HITRACE_TAG_ARK, "SharedGCMarker::MarkSharedModule"); ECMA_BYTRACE_NAME(HITRACE_TAG_ARK, "SharedGCMarkerBase::MarkSharedModule");
auto visitor = [this, threadId](Root type, ObjectSlot slot) {this->HandleRoots(threadId, type, slot);}; auto visitor = [this, threadId](Root type, ObjectSlot slot) {this->HandleRoots(threadId, type, slot);};
SharedModuleManager::GetInstance()->Iterate(visitor); SharedModuleManager::GetInstance()->Iterate(visitor);
} }
@ -139,12 +140,58 @@ void SharedGCMarker::ProcessMarkStack(uint32_t threadId)
Region *region = Region::ObjectAddressToRange(obj); Region *region = Region::ObjectAddressToRange(obj);
ASSERT(region->InSharedSweepableSpace()); ASSERT(region->InSharedSweepableSpace());
region->IncreaseAliveObjectSafe(size); region->IncreaseAliveObjectSafe(size);
MarkObject(threadId, hclass); ObjectSlot objectSlot(ToUintPtr(obj));
MarkObject(threadId, hclass, objectSlot);
ObjectXRay::VisitObjectBody<VisitType::OLD_GC_VISIT>(obj, hclass, visitor); ObjectXRay::VisitObjectBody<VisitType::OLD_GC_VISIT>(obj, hclass, visitor);
} }
} }
void SharedGCMarker::MergeBackAndResetRSetWorkListHandler() void SharedGCMovableMarker::ProcessMarkStack(uint32_t threadId)
{
#ifndef NDEBUG
DaemonThread *dThread = DaemonThread::GetInstance();
if (UNLIKELY(!dThread->IsRunning())) {
// This DAEMON_THREAD_INDEX not means in daemon thread, but the daemon thread is terminated, and
// SharedGC is directly running in the current js thread, this maybe happen only AppSpawn
// trigger GC after PreFork (which is not expected), and at this time ParallelGC is disabled
ASSERT(threadId == DAEMON_THREAD_INDEX);
} else {
if (os::thread::GetCurrentThreadId() != dThread->GetThreadId()) {
ASSERT(threadId != 0);
} else {
ASSERT(threadId == 0);
}
}
#endif
auto cb = [&](ObjectSlot slot) {
MarkValue(threadId, slot);
};
EcmaObjectRangeVisitor visitor = [this, threadId, cb](TaggedObject *root, ObjectSlot start, ObjectSlot end,
VisitObjectArea area) {
if (area == VisitObjectArea::IN_OBJECT) {
if (VisitBodyInObj(root, start, end, cb)) {
return;
}
}
for (ObjectSlot slot = start; slot < end; slot++) {
MarkValue(threadId, slot);
}
};
TaggedObject *obj = nullptr;
while (true) {
obj = nullptr;
if (!sWorkManager_->Pop(threadId, &obj)) {
break;
}
JSHClass *hclass = obj->SynchronizedGetClass();
[[maybe_unused]] Region *region = Region::ObjectAddressToRange(obj);
ObjectSlot objectSlot(ToUintPtr(obj));
MarkObject(threadId, hclass, objectSlot);
ObjectXRay::VisitObjectBody<VisitType::OLD_GC_VISIT>(obj, hclass, visitor);
}
}
void SharedGCMarkerBase::MergeBackAndResetRSetWorkListHandler()
{ {
for (RSetWorkListHandler *handler : rSetHandlers_) { for (RSetWorkListHandler *handler : rSetHandlers_) {
handler->MergeBack(); handler->MergeBack();
@ -153,8 +200,15 @@ void SharedGCMarker::MergeBackAndResetRSetWorkListHandler()
rSetHandlers_.clear(); rSetHandlers_.clear();
} }
void SharedGCMarker::ResetWorkManager(SharedGCWorkManager *workManager) void SharedGCMarkerBase::ResetWorkManager(SharedGCWorkManager *workManager)
{ {
sWorkManager_ = workManager; sWorkManager_ = workManager;
} }
SharedGCMarker::SharedGCMarker(SharedGCWorkManager *workManger)
: SharedGCMarkerBase(workManger) {}
SharedGCMovableMarker::SharedGCMovableMarker(SharedGCWorkManager *workManger, SharedHeap *sHeap)
: SharedGCMarkerBase(workManger), sHeap_(sHeap) {}
} // namespace panda::ecmascript } // namespace panda::ecmascript

View File

@ -25,6 +25,8 @@ namespace panda::ecmascript {
class Region; class Region;
class TaggedObject; class TaggedObject;
class SharedGCMarker; class SharedGCMarker;
class JSHClass;
enum class Root;
enum class SharedMarkType : uint8_t { enum class SharedMarkType : uint8_t {
NOT_CONCURRENT_MARK, NOT_CONCURRENT_MARK,
@ -32,32 +34,28 @@ enum class SharedMarkType : uint8_t {
CONCURRENT_MARK_REMARK, CONCURRENT_MARK_REMARK,
}; };
class SharedGCMarker { class SharedGCMarkerBase {
public: public:
explicit SharedGCMarker(SharedGCWorkManager *workManger) : sWorkManager_(workManger) {} explicit SharedGCMarkerBase(SharedGCWorkManager *workManger) : sWorkManager_(workManger) {}
~SharedGCMarker() = default; virtual ~SharedGCMarkerBase() = default;
void ResetWorkManager(SharedGCWorkManager *workManager); void ResetWorkManager(SharedGCWorkManager *workManager);
void MarkRoots(uint32_t threadId, SharedMarkType markType); void MarkRoots(uint32_t threadId, SharedMarkType markType, VMRootVisitType type = VMRootVisitType::MARK);
void MarkLocalVMRoots(uint32_t threadId, EcmaVM *localVm, SharedMarkType markType); void MarkLocalVMRoots(uint32_t threadId, EcmaVM *localVm, SharedMarkType markType,
VMRootVisitType type = VMRootVisitType::MARK);
void CollectLocalVMRSet(EcmaVM *localVm); void CollectLocalVMRSet(EcmaVM *localVm);
void MarkStringCache(uint32_t threadId); void MarkStringCache(uint32_t threadId);
void MarkSerializeRoots(uint32_t threadId); void MarkSerializeRoots(uint32_t threadId);
void MarkSharedModule(uint32_t threadId); void MarkSharedModule(uint32_t threadId);
void ProcessMarkStack(uint32_t threadId);
inline void ProcessThenMergeBackRSetFromBoundJSThread(RSetWorkListHandler *handler); inline void ProcessThenMergeBackRSetFromBoundJSThread(RSetWorkListHandler *handler);
template<SharedMarkType markType> template<SharedMarkType markType>
inline void DoMark(uint32_t threadId); inline void DoMark(uint32_t threadId);
template <typename Callback> template <typename Callback>
inline bool VisitBodyInObj(TaggedObject *root, ObjectSlot start, ObjectSlot end, Callback callback); inline bool VisitBodyInObj(TaggedObject *root, ObjectSlot start, ObjectSlot end, Callback callback);
inline void MarkValue(uint32_t threadId, ObjectSlot &slot);
inline void MarkObject(uint32_t threadId, TaggedObject *object);
inline void HandleRoots(uint32_t threadId, [[maybe_unused]] Root type, ObjectSlot slot); inline void HandleRoots(uint32_t threadId, [[maybe_unused]] Root type, ObjectSlot slot);
inline void HandleLocalRoots(uint32_t threadId, [[maybe_unused]] Root type, ObjectSlot slot); inline void HandleLocalRoots(uint32_t threadId, [[maybe_unused]] Root type, ObjectSlot slot);
inline void HandleLocalRangeRoots(uint32_t threadId, [[maybe_unused]] Root type, ObjectSlot start, inline void HandleLocalRangeRoots(uint32_t threadId, [[maybe_unused]] Root type, ObjectSlot start,
ObjectSlot end); ObjectSlot end);
inline void HandleLocalDerivedRoots(Root type, ObjectSlot base, ObjectSlot derived,
uintptr_t baseOldObject);
inline void RecordWeakReference(uint32_t threadId, JSTaggedType *ref); inline void RecordWeakReference(uint32_t threadId, JSTaggedType *ref);
void MergeBackAndResetRSetWorkListHandler(); void MergeBackAndResetRSetWorkListHandler();
template<SharedMarkType markType> template<SharedMarkType markType>
@ -65,16 +63,78 @@ public:
inline void ProcessVisitor(RSetWorkListHandler *handler); inline void ProcessVisitor(RSetWorkListHandler *handler);
inline bool MarkObjectOfProcessVisitor(void *mem, WorkNode *&localBuffer); inline bool MarkObjectOfProcessVisitor(void *mem, WorkNode *&localBuffer);
private:
inline void MarkObjectFromJSThread(WorkNode *&localBuffer, TaggedObject *object); inline void MarkObjectFromJSThread(WorkNode *&localBuffer, TaggedObject *object);
virtual inline void MarkValue([[maybe_unused]] uint32_t threadId, [[maybe_unused]] ObjectSlot &slot)
{
LOG_GC(FATAL) << " can not call this method";
}
virtual inline void MarkObject([[maybe_unused]] uint32_t threadId, [[maybe_unused]] TaggedObject *object,
[[maybe_unused]] ObjectSlot &slot)
{
LOG_GC(FATAL) << " can not call this method";
}
virtual inline void HandleLocalDerivedRoots([[maybe_unused]] Root type, [[maybe_unused]] ObjectSlot base,
[[maybe_unused]] ObjectSlot derived,
[[maybe_unused]] uintptr_t baseOldObject)
{
LOG_GC(FATAL) << " can not call this method";
}
virtual void ProcessMarkStack([[maybe_unused]] uint32_t threadId)
{
LOG_GC(FATAL) << " can not call this method";
}
protected:
SharedGCWorkManager *sWorkManager_ {nullptr};
private:
template<SharedMarkType markType> template<SharedMarkType markType>
inline auto GenerateRSetVisitor(uint32_t threadId); inline auto GenerateRSetVisitor(uint32_t threadId);
inline void RecordObject(JSTaggedValue value, uint32_t threadId, void *mem); inline void RecordObject(JSTaggedValue value, uint32_t threadId, void *mem);
template<SharedMarkType markType> template<SharedMarkType markType>
inline bool GetVisitor(JSTaggedValue value, uint32_t threadId, void *mem); inline bool GetVisitor(JSTaggedValue value, uint32_t threadId, void *mem);
SharedGCWorkManager *sWorkManager_ {nullptr};
std::vector<RSetWorkListHandler*> rSetHandlers_; std::vector<RSetWorkListHandler*> rSetHandlers_;
}; };
class SharedGCMarker : public SharedGCMarkerBase {
public:
explicit SharedGCMarker(SharedGCWorkManager *workManger);
~SharedGCMarker() override = default;
void ProcessMarkStack(uint32_t threadId) override;
protected:
inline void MarkValue(uint32_t threadId, ObjectSlot &slot) override;
inline void MarkObject(uint32_t threadId, TaggedObject *object, ObjectSlot &slot) override;
inline void HandleLocalDerivedRoots(Root type, ObjectSlot base, ObjectSlot derived,
uintptr_t baseOldObject) override;
};
class SharedGCMovableMarker : public SharedGCMarkerBase {
public:
explicit SharedGCMovableMarker(SharedGCWorkManager *workManger, SharedHeap *sHeap);
~SharedGCMovableMarker() override = default;
inline bool NeedEvacuate(Region *region);
void ProcessMarkStack(uint32_t threadId) override;
protected:
inline void HandleLocalDerivedRoots(Root type, ObjectSlot base, ObjectSlot derived,
uintptr_t baseOldObject) override;
inline void MarkValue(uint32_t threadId, ObjectSlot &slot) override;
inline void MarkObject(uint32_t threadId, TaggedObject *object, ObjectSlot &slot) override;
inline uintptr_t AllocateForwardAddress(uint32_t threadId, size_t size);
inline void EvacuateObject(uint32_t threadId, TaggedObject *object, const MarkWord &markWord, ObjectSlot slot);
inline uintptr_t AllocateDstSpace(uint32_t threadId, size_t size);
inline void RawCopyObject(uintptr_t fromAddress, uintptr_t toAddress, size_t size, const MarkWord &markWord);
inline void UpdateForwardAddressIfSuccess(uint32_t threadId, JSHClass *klass, uintptr_t toAddress, size_t size,
ObjectSlot slot);
inline void UpdateForwardAddressIfFailed(TaggedObject *object, uintptr_t toAddress, size_t size, ObjectSlot slot);
private:
SharedHeap *sHeap_;
};
} // namespace panda::ecmascript } // namespace panda::ecmascript
#endif // ECMASCRIPT_MEM_SHARED_HEAP_SHARED_GC_MARKER_H #endif // ECMASCRIPT_MEM_SHARED_HEAP_SHARED_GC_MARKER_H

View File

@ -68,7 +68,8 @@ uintptr_t SharedSparseSpace::Allocate(JSThread *thread, size_t size, bool allowG
UNREACHABLE(); UNREACHABLE();
} }
#endif #endif
thread->CheckSafepointIfSuspended(); // Shared old space cannot use this allocate func. Shared full gc may happen in trigger and thread state update.
// Shared old space pointer might change by shared full gc.
// jit thread no heap // jit thread no heap
allowGC = allowGC && (!thread->IsJitThread()); allowGC = allowGC && (!thread->IsJitThread());
if (allowGC) { if (allowGC) {
@ -95,6 +96,21 @@ uintptr_t SharedSparseSpace::Allocate(JSThread *thread, size_t size, bool allowG
return object; return object;
} }
uintptr_t SharedSparseSpace::TryAllocateAndExpand(JSThread *thread, size_t size, bool expand)
{
uintptr_t object = TryAllocate(thread, size);
CHECK_SOBJECT_AND_INC_OBJ_SIZE(size);
if (sweepState_ == SweepState::SWEEPING) {
object = AllocateAfterSweepingCompleted(thread, size);
CHECK_SOBJECT_AND_INC_OBJ_SIZE(size);
}
if (expand) {
object = AllocateWithExpand(thread, size);
CHECK_SOBJECT_AND_INC_OBJ_SIZE(size);
}
return object;
}
uintptr_t SharedSparseSpace::AllocateNoGCAndExpand(JSThread *thread, size_t size) uintptr_t SharedSparseSpace::AllocateNoGCAndExpand(JSThread *thread, size_t size)
{ {
#if ECMASCRIPT_ENABLE_THREAD_STATE_CHECK #if ECMASCRIPT_ENABLE_THREAD_STATE_CHECK
@ -112,17 +128,17 @@ uintptr_t SharedSparseSpace::AllocateNoGCAndExpand(JSThread *thread, size_t size
return object; return object;
} }
uintptr_t SharedSparseSpace::TryAllocate(JSThread *thread, size_t size) uintptr_t SharedSparseSpace::TryAllocate([[maybe_unused]] JSThread *thread, size_t size)
{ {
RuntimeLockHolder lock(thread, allocateLock_); LockHolder lock(allocateLock_);
return allocator_->Allocate(size); return allocator_->Allocate(size);
} }
uintptr_t SharedSparseSpace::AllocateWithExpand(JSThread *thread, size_t size) uintptr_t SharedSparseSpace::AllocateWithExpand(JSThread *thread, size_t size)
{ {
RuntimeLockHolder lock(thread, allocateLock_); LockHolder lock(allocateLock_);
// In order to avoid expand twice by different threads, try allocate first. // In order to avoid expand twice by different threads, try allocate first.
CheckAndTriggerLocalFullMark(thread); CheckAndTriggerLocalFullMark();
auto object = allocator_->Allocate(size); auto object = allocator_->Allocate(size);
if (object == 0 && Expand(thread)) { if (object == 0 && Expand(thread)) {
object = allocator_->Allocate(size); object = allocator_->Allocate(size);
@ -165,9 +181,9 @@ void SharedSparseSpace::MergeDeserializeAllocateRegions(const std::vector<Region
} }
} }
uintptr_t SharedSparseSpace::AllocateAfterSweepingCompleted(JSThread *thread, size_t size) uintptr_t SharedSparseSpace::AllocateAfterSweepingCompleted([[maybe_unused]] JSThread *thread, size_t size)
{ {
RuntimeLockHolder lock(thread, allocateLock_); LockHolder lock(allocateLock_);
if (sweepState_ != SweepState::SWEEPING) { if (sweepState_ != SweepState::SWEEPING) {
return allocator_->Allocate(size); return allocator_->Allocate(size);
} }
@ -182,29 +198,10 @@ uintptr_t SharedSparseSpace::AllocateAfterSweepingCompleted(JSThread *thread, si
return allocator_->Allocate(size); return allocator_->Allocate(size);
} }
void SharedSparseSpace::ReclaimRegions()
{
EnumerateReclaimRegions([this](Region *region) {
region->DeleteCrossRegionRSet();
region->DeleteOldToNewRSet();
region->DeleteLocalToShareRSet();
region->DeleteSweepingOldToNewRSet();
region->DeleteSweepingLocalToShareRSet();
region->DestroyFreeObjectSets();
heapRegionAllocator_->FreeRegion(region, 0);
});
reclaimRegionList_.clear();
}
void SharedSparseSpace::PrepareSweeping() void SharedSparseSpace::PrepareSweeping()
{ {
liveObjectSize_ = 0; liveObjectSize_ = 0;
EnumerateRegions([this](Region *current) { EnumerateRegions([this](Region *current) {
if (current->AliveObject() == 0) {
RemoveRegion(current);
reclaimRegionList_.emplace(current);
return;
}
IncreaseLiveObjectSize(current->AliveObject()); IncreaseLiveObjectSize(current->AliveObject());
current->ResetWasted(); current->ResetWasted();
AddSweepingRegion(current); AddSweepingRegion(current);
@ -319,6 +316,11 @@ void SharedSparseSpace::FreeRegion(Region *current, bool isMain)
} }
} }
void SharedSparseSpace::DetachFreeObjectSet(Region *region)
{
allocator_->DetachFreeObjectSet(region);
}
void SharedSparseSpace::FreeLiveRange(uintptr_t freeStart, uintptr_t freeEnd, bool isMain) void SharedSparseSpace::FreeLiveRange(uintptr_t freeStart, uintptr_t freeEnd, bool isMain)
{ {
// No need to clear rememberset here, because shared region has no remember set now. // No need to clear rememberset here, because shared region has no remember set now.
@ -379,13 +381,28 @@ void SharedSparseSpace::InvokeAllocationInspector(Address object, size_t size, s
allocationCounter_.AdvanceAllocationInspector(alignedSize); allocationCounter_.AdvanceAllocationInspector(alignedSize);
} }
void SharedSparseSpace::CheckAndTriggerLocalFullMark(JSThread *thread) void SharedSparseSpace::CheckAndTriggerLocalFullMark()
{ {
if (liveObjectSize_ >= triggerLocalFullMarkLimit_) { if (liveObjectSize_ >= triggerLocalFullMarkLimit_) {
sHeap_->TryTriggerLocalConcurrentMarking(thread); sHeap_->TryTriggerLocalConcurrentMarking();
} }
} }
SharedAppSpawnSpace::SharedAppSpawnSpace(SharedHeap *heap, size_t initialCapacity)
: SharedSparseSpace(heap, MemSpaceType::SHARED_APPSPAWN_SPACE, initialCapacity, initialCapacity)
{
}
void SharedAppSpawnSpace::IterateOverMarkedObjects(const std::function<void(TaggedObject *object)> &visitor) const
{
EnumerateRegions([&](Region *current) {
current->IterateAllMarkedBits([&](void *mem) {
ASSERT(current->InRange(ToUintPtr(mem)));
visitor(reinterpret_cast<TaggedObject *>(mem));
});
});
}
SharedNonMovableSpace::SharedNonMovableSpace(SharedHeap *heap, size_t initialCapacity, size_t maximumCapacity) SharedNonMovableSpace::SharedNonMovableSpace(SharedHeap *heap, size_t initialCapacity, size_t maximumCapacity)
: SharedSparseSpace(heap, MemSpaceType::SHARED_NON_MOVABLE, initialCapacity, maximumCapacity) : SharedSparseSpace(heap, MemSpaceType::SHARED_NON_MOVABLE, initialCapacity, maximumCapacity)
{ {
@ -396,6 +413,77 @@ SharedOldSpace::SharedOldSpace(SharedHeap *heap, size_t initialCapacity, size_t
{ {
} }
void SharedOldSpace::Merge(SharedLocalSpace *localSpace)
{
localSpace->FreeBumpPoint();
LockHolder lock(lock_);
size_t oldCommittedSize = committedSize_;
localSpace->EnumerateRegions([&](Region *region) {
localSpace->DetachFreeObjectSet(region);
localSpace->RemoveRegion(region);
localSpace->DecreaseLiveObjectSize(region->AliveObject());
AddRegion(region);
IncreaseLiveObjectSize(region->AliveObject());
allocator_->CollectFreeObjectSet(region);
});
size_t hugeSpaceCommitSize = sHeap_->GetHugeObjectSpace()->GetCommittedSize();
if (committedSize_ + hugeSpaceCommitSize > GetOverShootMaximumCapacity()) {
LOG_ECMA_MEM(ERROR) << "Merge::Committed size " << committedSize_ << " of old space is too big. ";
if (sHeap_->CanThrowOOMError()) {
sHeap_->ShouldThrowOOMError(true);
}
IncreaseMergeSize(committedSize_ - oldCommittedSize);
// if throw OOM, temporarily increase space size to avoid vm crash
IncreaseOutOfMemoryOvershootSize(committedSize_ + hugeSpaceCommitSize - GetOverShootMaximumCapacity());
}
localSpace->GetRegionList().Clear();
allocator_->IncreaseAllocatedSize(localSpace->GetTotalAllocatedSize());
}
SharedLocalSpace::SharedLocalSpace(SharedHeap *heap, size_t initialCapacity, size_t maximumCapacity)
: SharedSparseSpace(heap, MemSpaceType::SHARED_LOCAL_SPACE, initialCapacity, maximumCapacity) {}
bool SharedLocalSpace::AddRegionToList(Region *region)
{
if (committedSize_ >= maximumCapacity_) {
LOG_ECMA_MEM(FATAL) << "AddRegionTotList::Committed size " << committedSize_ << " of local space is too big.";
return false;
}
AddRegion(region);
allocator_->CollectFreeObjectSet(region);
IncreaseLiveObjectSize(region->AliveObject());
return true;
}
void SharedLocalSpace::FreeBumpPoint()
{
allocator_->FreeBumpPoint();
}
void SharedLocalSpace::Stop()
{
Region *currentRegion = GetCurrentRegion();
if (currentRegion != nullptr) {
currentRegion->SetHighWaterMark(currentRegion->GetBegin() + currentRegion->AliveObject());
}
}
uintptr_t SharedLocalSpace::Allocate(size_t size, bool isExpand)
{
auto object = allocator_->Allocate(size);
if (object == 0) {
// Shared Full GC will compress all regions and cannot recognize all threads' region.
if (isExpand && Expand(Runtime::GetInstance()->GetMainThread())) {
object = allocator_->Allocate(size);
}
}
if (object != 0) {
Region::ObjectAddressToRange(object)->IncreaseAliveObject(size);
}
return object;
}
SharedReadOnlySpace::SharedReadOnlySpace(SharedHeap *heap, size_t initialCapacity, size_t maximumCapacity) SharedReadOnlySpace::SharedReadOnlySpace(SharedHeap *heap, size_t initialCapacity, size_t maximumCapacity)
: Space( : Space(
heap, heap->GetHeapRegionAllocator(), MemSpaceType::SHARED_READ_ONLY_SPACE, initialCapacity, maximumCapacity) heap, heap->GetHeapRegionAllocator(), MemSpaceType::SHARED_READ_ONLY_SPACE, initialCapacity, maximumCapacity)
@ -536,7 +624,7 @@ void SharedHugeObjectSpace::InvokeAllocationInspector(Address object, size_t obj
void SharedHugeObjectSpace::CheckAndTriggerLocalFullMark(JSThread *thread, size_t size) void SharedHugeObjectSpace::CheckAndTriggerLocalFullMark(JSThread *thread, size_t size)
{ {
if (committedSize_ >= triggerLocalFullMarkLimit_) { if (committedSize_ >= triggerLocalFullMarkLimit_) {
reinterpret_cast<SharedHeap*>(heap_)->TryTriggerLocalConcurrentMarking(thread); reinterpret_cast<SharedHeap*>(heap_)->TryTriggerLocalConcurrentMarking();
} else { } else {
auto localHeap = const_cast<Heap*>(thread->GetEcmaVM()->GetHeap()); auto localHeap = const_cast<Heap*>(thread->GetEcmaVM()->GetHeap());
if (!thread->IsJitThread()) { if (!thread->IsJitThread()) {

View File

@ -42,6 +42,7 @@ namespace panda::ecmascript {
#endif #endif
class SharedHeap; class SharedHeap;
class SharedLocalSpace;
class SharedSparseSpace : public Space { class SharedSparseSpace : public Space {
public: public:
@ -58,6 +59,7 @@ public:
uintptr_t AllocateWithoutGC(JSThread *thread, size_t size); uintptr_t AllocateWithoutGC(JSThread *thread, size_t size);
uintptr_t Allocate(JSThread *thread, size_t size, bool allowGC = true); uintptr_t Allocate(JSThread *thread, size_t size, bool allowGC = true);
uintptr_t TryAllocateAndExpand(JSThread *thread, size_t size, bool expand);
// For work deserialize // For work deserialize
void ResetTopPointer(uintptr_t top); void ResetTopPointer(uintptr_t top);
@ -104,46 +106,46 @@ public:
return committedSize_ >= maximumCapacity_ + outOfMemoryOvershootSize_; return committedSize_ >= maximumCapacity_ + outOfMemoryOvershootSize_;
} }
void CheckAndTriggerLocalFullMark(JSThread *thread); void CheckAndTriggerLocalFullMark();
size_t GetTotalAllocatedSize() const; size_t GetTotalAllocatedSize() const;
void InvokeAllocationInspector(Address object, size_t size, size_t alignedSize); void InvokeAllocationInspector(Address object, size_t size, size_t alignedSize);
template<class Callback> void DetachFreeObjectSet(Region *region);
void EnumerateReclaimRegions(const Callback &cb) const
{
for (Region *current : reclaimRegionList_) {
if (current != nullptr) {
cb(current);
}
}
}
void ReclaimRegions();
protected: protected:
bool Expand(JSThread *thread);
FreeListAllocator<FreeObject> *allocator_; FreeListAllocator<FreeObject> *allocator_;
SweepState sweepState_ = SweepState::NO_SWEEP; SweepState sweepState_ = SweepState::NO_SWEEP;
SharedHeap *sHeap_ {nullptr};
private: private:
static constexpr double LIVE_OBJECT_SIZE_RATIO = 0.8; static constexpr double LIVE_OBJECT_SIZE_RATIO = 0.8;
uintptr_t AllocateWithExpand(JSThread *thread, size_t size); uintptr_t AllocateWithExpand(JSThread *thread, size_t size);
uintptr_t TryAllocate(JSThread *thread, size_t size); uintptr_t TryAllocate(JSThread *thread, size_t size);
bool Expand(JSThread *thread);
// For sweeping // For sweeping
uintptr_t AllocateAfterSweepingCompleted(JSThread *thread, size_t size); uintptr_t AllocateAfterSweepingCompleted(JSThread *thread, size_t size);
Mutex lock_; Mutex lock_;
Mutex allocateLock_; Mutex allocateLock_;
SharedHeap *sHeap_ {nullptr};
std::vector<Region *> sweepingList_; std::vector<Region *> sweepingList_;
std::vector<Region *> sweptList_; std::vector<Region *> sweptList_;
std::set<Region*> reclaimRegionList_;
size_t liveObjectSize_ {0}; size_t liveObjectSize_ {0};
size_t triggerLocalFullMarkLimit_ {0}; size_t triggerLocalFullMarkLimit_ {0};
}; };
class SharedAppSpawnSpace : public SharedSparseSpace {
public:
SharedAppSpawnSpace(SharedHeap *heap, size_t initialCapacity);
~SharedAppSpawnSpace() override = default;
NO_COPY_SEMANTIC(SharedAppSpawnSpace);
NO_MOVE_SEMANTIC(SharedAppSpawnSpace);
void IterateOverMarkedObjects(const std::function<void(TaggedObject *object)> &visitor) const;
};
class SharedNonMovableSpace : public SharedSparseSpace { class SharedNonMovableSpace : public SharedSparseSpace {
public: public:
SharedNonMovableSpace(SharedHeap *heap, size_t initialCapacity, size_t maximumCapacity); SharedNonMovableSpace(SharedHeap *heap, size_t initialCapacity, size_t maximumCapacity);
@ -156,8 +158,40 @@ class SharedOldSpace : public SharedSparseSpace {
public: public:
SharedOldSpace(SharedHeap *heap, size_t initialCapacity, size_t maximumCapacity); SharedOldSpace(SharedHeap *heap, size_t initialCapacity, size_t maximumCapacity);
~SharedOldSpace() override = default; ~SharedOldSpace() override = default;
size_t GetMergeSize() const
{
return mergeSize_;
}
void IncreaseMergeSize(size_t size)
{
mergeSize_ += size;
}
void ResetMergeSize()
{
mergeSize_ = 0;
}
void Merge(SharedLocalSpace *localSpace);
NO_COPY_SEMANTIC(SharedOldSpace); NO_COPY_SEMANTIC(SharedOldSpace);
NO_MOVE_SEMANTIC(SharedOldSpace); NO_MOVE_SEMANTIC(SharedOldSpace);
Mutex lock_;
size_t mergeSize_ {0};
};
class SharedLocalSpace : public SharedSparseSpace {
public:
SharedLocalSpace() = delete;
SharedLocalSpace(SharedHeap *heap, size_t initialCapacity, size_t maximumCapacity);
~SharedLocalSpace() override = default;
NO_COPY_SEMANTIC(SharedLocalSpace);
NO_MOVE_SEMANTIC(SharedLocalSpace);
uintptr_t Allocate(size_t size, bool isExpand = true);
bool AddRegionToList(Region *region);
void FreeBumpPoint();
void Stop();
}; };
class SharedReadOnlySpace : public Space { class SharedReadOnlySpace : public Space {

View File

@ -99,10 +99,15 @@ RegionSpaceFlag Space::GetRegionFlag() const
case MemSpaceType::APPSPAWN_SPACE: case MemSpaceType::APPSPAWN_SPACE:
flags = RegionSpaceFlag::IN_APPSPAWN_SPACE; flags = RegionSpaceFlag::IN_APPSPAWN_SPACE;
break; break;
case MemSpaceType::SHARED_APPSPAWN_SPACE:
flags = RegionSpaceFlag::IN_SHARED_APPSPAWN_SPACE;
break;
case MemSpaceType::SHARED_NON_MOVABLE: case MemSpaceType::SHARED_NON_MOVABLE:
flags = RegionSpaceFlag::IN_SHARED_NON_MOVABLE; flags = RegionSpaceFlag::IN_SHARED_NON_MOVABLE;
break; break;
case MemSpaceType::SHARED_OLD_SPACE: case MemSpaceType::SHARED_OLD_SPACE:
case MemSpaceType::SHARED_LOCAL_SPACE:
case MemSpaceType::SHARED_COMPRESS_SPACE:
flags = RegionSpaceFlag::IN_SHARED_OLD_SPACE; flags = RegionSpaceFlag::IN_SHARED_OLD_SPACE;
break; break;
case MemSpaceType::SHARED_READ_ONLY_SPACE: case MemSpaceType::SHARED_READ_ONLY_SPACE:

View File

@ -79,7 +79,7 @@ void Space::ClearAndFreeRegion(Region *region, size_t cachedSize)
if (spaceType_ == MemSpaceType::OLD_SPACE || spaceType_ == MemSpaceType::NON_MOVABLE || if (spaceType_ == MemSpaceType::OLD_SPACE || spaceType_ == MemSpaceType::NON_MOVABLE ||
spaceType_ == MemSpaceType::MACHINE_CODE_SPACE || spaceType_ == MemSpaceType::LOCAL_SPACE || spaceType_ == MemSpaceType::MACHINE_CODE_SPACE || spaceType_ == MemSpaceType::LOCAL_SPACE ||
spaceType_ == MemSpaceType::APPSPAWN_SPACE || spaceType_ == MemSpaceType::SHARED_NON_MOVABLE || spaceType_ == MemSpaceType::APPSPAWN_SPACE || spaceType_ == MemSpaceType::SHARED_NON_MOVABLE ||
spaceType_ == MemSpaceType::SHARED_OLD_SPACE) { spaceType_ == MemSpaceType::SHARED_OLD_SPACE || spaceType_ == MemSpaceType::SHARED_LOCAL_SPACE) {
region->DestroyFreeObjectSets(); region->DestroyFreeObjectSets();
} }
// regions of EdenSpace are allocated in EdenSpace constructor and fixed, not allocate by heapRegionAllocator_ // regions of EdenSpace are allocated in EdenSpace constructor and fixed, not allocate by heapRegionAllocator_

View File

@ -40,18 +40,21 @@ enum MemSpaceType {
READ_ONLY_SPACE, READ_ONLY_SPACE,
APPSPAWN_SPACE, APPSPAWN_SPACE,
HUGE_MACHINE_CODE_SPACE, HUGE_MACHINE_CODE_SPACE,
SHARED_NON_MOVABLE,
SHARED_OLD_SPACE, SHARED_OLD_SPACE,
SHARED_NON_MOVABLE,
SHARED_READ_ONLY_SPACE, SHARED_READ_ONLY_SPACE,
SHARED_HUGE_OBJECT_SPACE, SHARED_HUGE_OBJECT_SPACE,
SHARED_LOCAL_SPACE,
SHARED_COMPRESS_SPACE,
SHARED_APPSPAWN_SPACE,
SPACE_TYPE_LAST, // Count of different types SPACE_TYPE_LAST, // Count of different types
SHARED_BEGIN = SHARED_NON_MOVABLE, SHARED_BEGIN = SHARED_OLD_SPACE,
SHARED_END = SHARED_HUGE_OBJECT_SPACE, SHARED_END = SHARED_HUGE_OBJECT_SPACE,
// Free region means memory maybe always in use and can not be evacuated // Free region means memory maybe always in use and can not be evacuated
FREE_LIST_NUM = MACHINE_CODE_SPACE - OLD_SPACE + 1, FREE_LIST_NUM = MACHINE_CODE_SPACE - OLD_SPACE + 1,
SHARED_SWEEPING_SPACE_BEGIN = SHARED_NON_MOVABLE, SHARED_SWEEPING_SPACE_BEGIN = SHARED_OLD_SPACE,
SHARED_SWEEPING_SPACE_END = SHARED_OLD_SPACE, SHARED_SWEEPING_SPACE_END = SHARED_NON_MOVABLE,
SHARED_SWEEPING_SPACE_NUM = SHARED_SWEEPING_SPACE_END - SHARED_SWEEPING_SPACE_BEGIN + 1, SHARED_SWEEPING_SPACE_NUM = SHARED_SWEEPING_SPACE_END - SHARED_SWEEPING_SPACE_BEGIN + 1,
}; };
@ -105,6 +108,12 @@ static inline std::string ToSpaceTypeName(MemSpaceType type)
return "shared read only space"; return "shared read only space";
case SHARED_HUGE_OBJECT_SPACE: case SHARED_HUGE_OBJECT_SPACE:
return "shared huge object space"; return "shared huge object space";
case SHARED_COMPRESS_SPACE:
return "compress space";
case SHARED_LOCAL_SPACE:
return "shared local space";
case SHARED_APPSPAWN_SPACE:
return "shared appspawn space";
default: default:
return "unknown space"; return "unknown space";
} }

View File

@ -139,5 +139,41 @@ bool TlabAllocator::ExpandCompressFromOld(size_t size)
} }
return false; return false;
} }
SharedTlabAllocator::SharedTlabAllocator(SharedHeap *sHeap)
: sHeap_(sHeap)
{
size_t maxOldSpaceCapacity = sHeap->GetOldSpace()->GetMaximumCapacity();
sLocalSpace_ = new SharedLocalSpace(sHeap, maxOldSpaceCapacity, maxOldSpaceCapacity);
}
inline void SharedTlabAllocator::Finalize()
{
sHeap_->MergeToOldSpaceSync(sLocalSpace_);
}
uintptr_t SharedTlabAllocator::Allocate(size_t size, MemSpaceType space)
{
uintptr_t result = 0;
switch (space) {
case SHARED_COMPRESS_SPACE:
result = AllocateInCompressSpace(size);
break;
default:
LOG_ECMA(FATAL) << "this branch is unreachable";
UNREACHABLE();
}
return result;
}
uintptr_t SharedTlabAllocator::AllocateInCompressSpace(size_t size)
{
ASSERT(AlignUp(size, static_cast<size_t>(MemAlignment::MEM_ALIGN_OBJECT)) == size);
size = AlignUp(size, static_cast<size_t>(MemAlignment::MEM_ALIGN_OBJECT));
uintptr_t result = sLocalSpace_->Allocate(size, true);
ASSERT(result != 0);
return result;
}
} // namespace panda::ecmascript } // namespace panda::ecmascript
#endif // ECMASCRIPT_MEM_TLAB_ALLOCATOR_INL_H #endif // ECMASCRIPT_MEM_TLAB_ALLOCATOR_INL_H

View File

@ -21,7 +21,15 @@
namespace panda::ecmascript { namespace panda::ecmascript {
class Heap; class Heap;
class TlabAllocator { class TlabAllocatorBase {
public:
TlabAllocatorBase() = default;
~TlabAllocatorBase() = default;
NO_COPY_SEMANTIC(TlabAllocatorBase);
NO_MOVE_SEMANTIC(TlabAllocatorBase);
};
class TlabAllocator : public TlabAllocatorBase {
public: public:
TlabAllocator() = delete; TlabAllocator() = delete;
inline explicit TlabAllocator(Heap *heap); inline explicit TlabAllocator(Heap *heap);
@ -53,6 +61,30 @@ private:
LocalSpace *localSpace_; LocalSpace *localSpace_;
}; };
class SharedTlabAllocator : public TlabAllocatorBase {
public:
SharedTlabAllocator() = delete;
inline explicit SharedTlabAllocator(SharedHeap *sHeap);
~SharedTlabAllocator()
{
delete sLocalSpace_;
}
NO_COPY_SEMANTIC(SharedTlabAllocator);
NO_MOVE_SEMANTIC(SharedTlabAllocator);
inline void Finalize();
inline uintptr_t Allocate(size_t size, MemSpaceType space);
private:
inline uintptr_t AllocateInCompressSpace(size_t size);
inline bool ExpandCompressFromOld(size_t size);
SharedHeap *sHeap_;
SharedLocalSpace *sLocalSpace_;
};
} // namespace panda::ecmascript } // namespace panda::ecmascript
#endif // ECMASCRIPT_MEM_TLAB_ALLOCATOR_H #endif // ECMASCRIPT_MEM_TLAB_ALLOCATOR_H

View File

@ -790,6 +790,14 @@ size_t SharedHeapVerification::VerifyRoot() const
[]([[maybe_unused]] Root type, [[maybe_unused]] ObjectSlot base, [[maybe_unused]] ObjectSlot derived, []([[maybe_unused]] Root type, [[maybe_unused]] ObjectSlot base, [[maybe_unused]] ObjectSlot derived,
[[maybe_unused]] uintptr_t baseOldObject) { [[maybe_unused]] uintptr_t baseOldObject) {
}; };
RootVisitor serializeVisitor = [this, &failCount]([[maybe_unused]] Root type, ObjectSlot slot) {
JSTaggedValue value(slot.GetTaggedType());
if (!sHeap_->IsAlive(value.GetTaggedObject())) {
LOG_ECMA(ERROR) << "Serialize Heap verify detected a dead object. " << value.GetTaggedObject();
++failCount;
}
};
Runtime::GetInstance()->IterateSerializeRoot(serializeVisitor);
Runtime::GetInstance()->GCIterateThreadList([&](JSThread *thread) { Runtime::GetInstance()->GCIterateThreadList([&](JSThread *thread) {
ASSERT(!thread->IsInRunningState()); ASSERT(!thread->IsInRunningState());
auto vm = thread->GetEcmaVM(); auto vm = thread->GetEcmaVM();
@ -813,6 +821,25 @@ size_t SharedHeapVerification::VerifyHeap() const
if (failCount > 0) { if (failCount > 0) {
LOG_GC(ERROR) << "SharedHeap VerifyHeap detects deadObject count is " << failCount; LOG_GC(ERROR) << "SharedHeap VerifyHeap detects deadObject count is " << failCount;
} }
VerifyKind localVerifyKind = VerifyKind::VERIFY_END;
if (verifyKind_ == VerifyKind::VERIFY_PRE_SHARED_GC) {
localVerifyKind = VerifyKind::VERIFY_PRE_GC;
} else if (verifyKind_ == VerifyKind::VERIFY_POST_SHARED_GC) {
localVerifyKind = VerifyKind::VERIFY_POST_GC;
}
Runtime::GetInstance()->GCIterateThreadList([&, localVerifyKind](JSThread *thread) {
ASSERT(!thread->IsInRunningState());
auto vm = thread->GetEcmaVM();
auto localHeap = const_cast<Heap*>(vm->GetHeap());
localHeap->GetSweeper()->EnsureAllTaskFinished();
if (localVerifyKind != VerifyKind::VERIFY_END) {
Verification(localHeap, localVerifyKind).VerifyAll();
}
if (failCount > 0) {
LOG_GC(ERROR) << "SharedHeap VerifyRoot detects deadObject in local heap count is " << failCount;
}
});
return failCount; return failCount;
} }

View File

@ -154,6 +154,11 @@ void WorkManager::Finish(size_t &aliveSize, size_t &promotedSize)
for (uint32_t i = 0; i < threadNum_; i++) { for (uint32_t i = 0; i < threadNum_; i++) {
WorkNodeHolder &holder = works_.at(i); WorkNodeHolder &holder = works_.at(i);
promotedSize += holder.promotedSize_; promotedSize += holder.promotedSize_;
if (holder.allocator_ != nullptr) {
holder.allocator_->Finalize();
delete holder.allocator_;
holder.allocator_ = nullptr;
}
} }
initialized_.store(false, std::memory_order_release); initialized_.store(false, std::memory_order_release);
} }
@ -183,7 +188,7 @@ void WorkManager::Initialize(TriggerGCType gcType, ParallelGCTaskPhase taskPhase
SharedGCWorkManager::SharedGCWorkManager(SharedHeap *heap, uint32_t threadNum) SharedGCWorkManager::SharedGCWorkManager(SharedHeap *heap, uint32_t threadNum)
: WorkManagerBase(heap->GetNativeAreaAllocator()), sHeap_(heap), threadNum_(threadNum), : WorkManagerBase(heap->GetNativeAreaAllocator()), sHeap_(heap), threadNum_(threadNum),
continuousQueue_ { nullptr } continuousQueue_ { nullptr }, sharedTaskPhase_(SHARED_UNDEFINED_TASK)
{ {
for (uint32_t i = 0; i < threadNum_; i++) { for (uint32_t i = 0; i < threadNum_; i++) {
continuousQueue_.at(i) = new ProcessQueue(); continuousQueue_.at(i) = new ProcessQueue();
@ -200,8 +205,9 @@ SharedGCWorkManager::~SharedGCWorkManager()
} }
} }
void SharedGCWorkManager::Initialize() void SharedGCWorkManager::Initialize(TriggerGCType gcType, SharedParallelMarkPhase taskPhase)
{ {
sharedTaskPhase_ = taskPhase;
InitializeBase(); InitializeBase();
for (uint32_t i = 0; i < threadNum_; i++) { for (uint32_t i = 0; i < threadNum_; i++) {
SharedGCWorkNodeHolder &holder = works_.at(i); SharedGCWorkNodeHolder &holder = works_.at(i);
@ -209,6 +215,9 @@ void SharedGCWorkManager::Initialize()
holder.outNode_ = AllocateWorkNode(); holder.outNode_ = AllocateWorkNode();
holder.weakQueue_ = new ProcessQueue(); holder.weakQueue_ = new ProcessQueue();
holder.weakQueue_->BeginMarking(continuousQueue_.at(i)); holder.weakQueue_->BeginMarking(continuousQueue_.at(i));
if (gcType == TriggerGCType::SHARED_FULL_GC) {
holder.allocator_ = new SharedTlabAllocator(sHeap_);
}
} }
if (initialized_.load(std::memory_order_acquire)) { if (initialized_.load(std::memory_order_acquire)) {
LOG_ECMA(FATAL) << "this branch is unreachable"; LOG_ECMA(FATAL) << "this branch is unreachable";
@ -217,8 +226,9 @@ void SharedGCWorkManager::Initialize()
initialized_.store(true, std::memory_order_release); initialized_.store(true, std::memory_order_release);
} }
void SharedGCWorkManager::Finish() size_t SharedGCWorkManager::Finish()
{ {
size_t aliveSize = 0;
for (uint32_t i = 0; i < threadNum_; i++) { for (uint32_t i = 0; i < threadNum_; i++) {
SharedGCWorkNodeHolder &holder = works_.at(i); SharedGCWorkNodeHolder &holder = works_.at(i);
if (holder.weakQueue_ != nullptr) { if (holder.weakQueue_ != nullptr) {
@ -226,9 +236,16 @@ void SharedGCWorkManager::Finish()
delete holder.weakQueue_; delete holder.weakQueue_;
holder.weakQueue_ = nullptr; holder.weakQueue_ = nullptr;
} }
aliveSize += holder.aliveSize_;
if (holder.allocator_ != nullptr) {
holder.allocator_->Finalize();
delete holder.allocator_;
holder.allocator_ = nullptr;
}
} }
FinishBase(); FinishBase();
initialized_.store(false, std::memory_order_release); initialized_.store(false, std::memory_order_release);
return aliveSize;
} }
bool SharedGCWorkManager::Push(uint32_t threadId, TaggedObject *object) bool SharedGCWorkManager::Push(uint32_t threadId, TaggedObject *object)
@ -263,7 +280,7 @@ void SharedGCWorkManager::PushWorkNodeToGlobal(uint32_t threadId, bool postTask)
workStack_.Push(inNode); workStack_.Push(inNode);
inNode = AllocateWorkNode(); inNode = AllocateWorkNode();
if (postTask && sHeap_->IsParallelGCEnabled() && sHeap_->CheckCanDistributeTask()) { if (postTask && sHeap_->IsParallelGCEnabled() && sHeap_->CheckCanDistributeTask()) {
sHeap_->PostGCMarkingTask(); sHeap_->PostGCMarkingTask(sharedTaskPhase_);
} }
} }
} }
@ -274,7 +291,7 @@ void SharedGCWorkManager::PushLocalBufferToGlobal(WorkNode *&node, bool postTask
ASSERT(!node->IsEmpty()); ASSERT(!node->IsEmpty());
workStack_.Push(node); workStack_.Push(node);
if (postTask && sHeap_->IsParallelGCEnabled() && sHeap_->CheckCanDistributeTask()) { if (postTask && sHeap_->IsParallelGCEnabled() && sHeap_->CheckCanDistributeTask()) {
sHeap_->PostGCMarkingTask(); sHeap_->PostGCMarkingTask(sharedTaskPhase_);
} }
node = nullptr; node = nullptr;
} }

View File

@ -32,6 +32,7 @@ class SharedHeap;
class Stack; class Stack;
class SemiSpaceCollector; class SemiSpaceCollector;
class TlabAllocator; class TlabAllocator;
class SharedTlabAllocator;
class Region; class Region;
class WorkSpaceChunk; class WorkSpaceChunk;
@ -47,6 +48,13 @@ enum ParallelGCTaskPhase {
TASK_LAST // Count of different Task phase TASK_LAST // Count of different Task phase
}; };
enum SharedParallelMarkPhase {
SHARED_MARK_TASK,
SHARED_COMPRESS_TASK,
SHARED_UNDEFINED_TASK,
SHARED_TASK_LAST // Count of different Task phase
};
class WorkNode { class WorkNode {
public: public:
explicit WorkNode(Stack *stack) : next_(nullptr), stack_(stack) {} explicit WorkNode(Stack *stack) : next_(nullptr), stack_(stack) {}
@ -157,6 +165,11 @@ public:
} }
WorkNode *AllocateWorkNode(); WorkNode *AllocateWorkNode();
virtual size_t Finish()
{
LOG_ECMA(FATAL) << " WorkManagerBase Finish";
return 0;
}
Mutex mtx_; Mutex mtx_;
private: private:
@ -177,7 +190,7 @@ public:
~WorkManager() override; ~WorkManager() override;
void Initialize(TriggerGCType gcType, ParallelGCTaskPhase taskPhase); void Initialize(TriggerGCType gcType, ParallelGCTaskPhase taskPhase);
size_t Finish(); size_t Finish() override;
void Finish(size_t &aliveSize, size_t &promotedSize); void Finish(size_t &aliveSize, size_t &promotedSize);
bool Push(uint32_t threadId, TaggedObject *object); bool Push(uint32_t threadId, TaggedObject *object);
@ -253,6 +266,8 @@ struct SharedGCWorkNodeHolder {
WorkNode *inNode_ {nullptr}; WorkNode *inNode_ {nullptr};
WorkNode *outNode_ {nullptr}; WorkNode *outNode_ {nullptr};
ProcessQueue *weakQueue_ {nullptr}; ProcessQueue *weakQueue_ {nullptr};
SharedTlabAllocator *allocator_ {nullptr};
size_t aliveSize_ = 0;
}; };
class SharedGCWorkManager : public WorkManagerBase { class SharedGCWorkManager : public WorkManagerBase {
@ -260,8 +275,18 @@ public:
SharedGCWorkManager(SharedHeap *heap, uint32_t threadNum); SharedGCWorkManager(SharedHeap *heap, uint32_t threadNum);
~SharedGCWorkManager() override; ~SharedGCWorkManager() override;
void Initialize(); void Initialize(TriggerGCType gcType, SharedParallelMarkPhase taskPhase);
void Finish(); size_t Finish() override;
inline SharedTlabAllocator *GetTlabAllocator(uint32_t threadId) const
{
return works_.at(threadId).allocator_;
}
inline void IncreaseAliveSize(uint32_t threadId, size_t size)
{
works_.at(threadId).aliveSize_ += size;
}
bool Push(uint32_t threadId, TaggedObject *object); bool Push(uint32_t threadId, TaggedObject *object);
bool PushToLocalMarkingBuffer(WorkNode *&markingBuffer, TaggedObject *object); bool PushToLocalMarkingBuffer(WorkNode *&markingBuffer, TaggedObject *object);
@ -301,6 +326,7 @@ private:
std::array<ContinuousStack<JSTaggedType> *, MAX_TASKPOOL_THREAD_NUM + 1> continuousQueue_; std::array<ContinuousStack<JSTaggedType> *, MAX_TASKPOOL_THREAD_NUM + 1> continuousQueue_;
GlobalWorkStack workStack_; GlobalWorkStack workStack_;
std::atomic<bool> initialized_ {false}; std::atomic<bool> initialized_ {false};
SharedParallelMarkPhase sharedTaskPhase_;
}; };
} // namespace panda::ecmascript } // namespace panda::ecmascript
#endif // ECMASCRIPT_MEM_WORK_MANAGER_H #endif // ECMASCRIPT_MEM_WORK_MANAGER_H

View File

@ -301,6 +301,7 @@ Local<StringRef> RegExpRef::GetOriginalSource(const EcmaVM *vm)
std::string RegExpRef::GetOriginalFlags([[maybe_unused]] const EcmaVM *vm) std::string RegExpRef::GetOriginalFlags([[maybe_unused]] const EcmaVM *vm)
{ {
DCHECK_SPECIAL_VALUE_WITH_RETURN(this, ""); DCHECK_SPECIAL_VALUE_WITH_RETURN(this, "");
ecmascript::ThreadManagedScope managedScope(vm->GetJSThread());
JSHandle<JSRegExp> regExp(JSNApiHelper::ToJSHandle(this)); JSHandle<JSRegExp> regExp(JSNApiHelper::ToJSHandle(this));
JSTaggedValue regExpFlags = regExp->GetOriginalFlags(); JSTaggedValue regExpFlags = regExp->GetOriginalFlags();
uint32_t regExpFlagsInt = static_cast<uint32_t>(regExpFlags.GetInt()); uint32_t regExpFlagsInt = static_cast<uint32_t>(regExpFlags.GetInt());
@ -330,6 +331,7 @@ std::string RegExpRef::GetOriginalFlags([[maybe_unused]] const EcmaVM *vm)
Local<JSValueRef> RegExpRef::IsGlobal(const EcmaVM *vm) Local<JSValueRef> RegExpRef::IsGlobal(const EcmaVM *vm)
{ {
CROSS_THREAD_AND_EXCEPTION_CHECK_WITH_RETURN(vm, JSValueRef::Undefined(vm)); CROSS_THREAD_AND_EXCEPTION_CHECK_WITH_RETURN(vm, JSValueRef::Undefined(vm));
ecmascript::ThreadManagedScope managedScope(vm->GetJSThread());
JSHandle<JSRegExp> regExp(JSNApiHelper::ToJSHandle(this)); JSHandle<JSRegExp> regExp(JSNApiHelper::ToJSHandle(this));
LOG_IF_SPECIAL(regExp, FATAL); LOG_IF_SPECIAL(regExp, FATAL);
JSTaggedValue flags = regExp->GetOriginalFlags(); JSTaggedValue flags = regExp->GetOriginalFlags();
@ -341,6 +343,7 @@ Local<JSValueRef> RegExpRef::IsGlobal(const EcmaVM *vm)
Local<JSValueRef> RegExpRef::IsIgnoreCase(const EcmaVM *vm) Local<JSValueRef> RegExpRef::IsIgnoreCase(const EcmaVM *vm)
{ {
CROSS_THREAD_AND_EXCEPTION_CHECK_WITH_RETURN(vm, JSValueRef::Undefined(vm)); CROSS_THREAD_AND_EXCEPTION_CHECK_WITH_RETURN(vm, JSValueRef::Undefined(vm));
ecmascript::ThreadManagedScope managedScope(vm->GetJSThread());
JSHandle<JSRegExp> regExp(JSNApiHelper::ToJSHandle(this)); JSHandle<JSRegExp> regExp(JSNApiHelper::ToJSHandle(this));
LOG_IF_SPECIAL(regExp, FATAL); LOG_IF_SPECIAL(regExp, FATAL);
JSTaggedValue flags = regExp->GetOriginalFlags(); JSTaggedValue flags = regExp->GetOriginalFlags();
@ -352,6 +355,7 @@ Local<JSValueRef> RegExpRef::IsIgnoreCase(const EcmaVM *vm)
Local<JSValueRef> RegExpRef::IsMultiline(const EcmaVM *vm) Local<JSValueRef> RegExpRef::IsMultiline(const EcmaVM *vm)
{ {
CROSS_THREAD_AND_EXCEPTION_CHECK_WITH_RETURN(vm, JSValueRef::Undefined(vm)); CROSS_THREAD_AND_EXCEPTION_CHECK_WITH_RETURN(vm, JSValueRef::Undefined(vm));
ecmascript::ThreadManagedScope managedScope(vm->GetJSThread());
JSHandle<JSRegExp> regExp(JSNApiHelper::ToJSHandle(this)); JSHandle<JSRegExp> regExp(JSNApiHelper::ToJSHandle(this));
LOG_IF_SPECIAL(regExp, FATAL); LOG_IF_SPECIAL(regExp, FATAL);
JSTaggedValue flags = regExp->GetOriginalFlags(); JSTaggedValue flags = regExp->GetOriginalFlags();
@ -363,6 +367,7 @@ Local<JSValueRef> RegExpRef::IsMultiline(const EcmaVM *vm)
Local<JSValueRef> RegExpRef::IsDotAll(const EcmaVM *vm) Local<JSValueRef> RegExpRef::IsDotAll(const EcmaVM *vm)
{ {
CROSS_THREAD_AND_EXCEPTION_CHECK_WITH_RETURN(vm, JSValueRef::Undefined(vm)); CROSS_THREAD_AND_EXCEPTION_CHECK_WITH_RETURN(vm, JSValueRef::Undefined(vm));
ecmascript::ThreadManagedScope managedScope(vm->GetJSThread());
JSHandle<JSRegExp> regExp(JSNApiHelper::ToJSHandle(this)); JSHandle<JSRegExp> regExp(JSNApiHelper::ToJSHandle(this));
LOG_IF_SPECIAL(regExp, FATAL); LOG_IF_SPECIAL(regExp, FATAL);
JSTaggedValue flags = regExp->GetOriginalFlags(); JSTaggedValue flags = regExp->GetOriginalFlags();
@ -374,6 +379,7 @@ Local<JSValueRef> RegExpRef::IsDotAll(const EcmaVM *vm)
Local<JSValueRef> RegExpRef::IsUtf16(const EcmaVM *vm) Local<JSValueRef> RegExpRef::IsUtf16(const EcmaVM *vm)
{ {
CROSS_THREAD_AND_EXCEPTION_CHECK_WITH_RETURN(vm, JSValueRef::Undefined(vm)); CROSS_THREAD_AND_EXCEPTION_CHECK_WITH_RETURN(vm, JSValueRef::Undefined(vm));
ecmascript::ThreadManagedScope managedScope(vm->GetJSThread());
JSHandle<JSRegExp> regExp(JSNApiHelper::ToJSHandle(this)); JSHandle<JSRegExp> regExp(JSNApiHelper::ToJSHandle(this));
LOG_IF_SPECIAL(regExp, FATAL); LOG_IF_SPECIAL(regExp, FATAL);
JSTaggedValue flags = regExp->GetOriginalFlags(); JSTaggedValue flags = regExp->GetOriginalFlags();
@ -385,6 +391,7 @@ Local<JSValueRef> RegExpRef::IsUtf16(const EcmaVM *vm)
Local<JSValueRef> RegExpRef::IsStick(const EcmaVM *vm) Local<JSValueRef> RegExpRef::IsStick(const EcmaVM *vm)
{ {
CROSS_THREAD_AND_EXCEPTION_CHECK_WITH_RETURN(vm, JSValueRef::Undefined(vm)); CROSS_THREAD_AND_EXCEPTION_CHECK_WITH_RETURN(vm, JSValueRef::Undefined(vm));
ecmascript::ThreadManagedScope managedScope(vm->GetJSThread());
JSHandle<JSRegExp> regExp(JSNApiHelper::ToJSHandle(this)); JSHandle<JSRegExp> regExp(JSNApiHelper::ToJSHandle(this));
LOG_IF_SPECIAL(regExp, FATAL); LOG_IF_SPECIAL(regExp, FATAL);
JSTaggedValue flags = regExp->GetOriginalFlags(); JSTaggedValue flags = regExp->GetOriginalFlags();
@ -395,6 +402,7 @@ Local<JSValueRef> RegExpRef::IsStick(const EcmaVM *vm)
bool GeneratorFunctionRef::IsGenerator(const EcmaVM *vm) bool GeneratorFunctionRef::IsGenerator(const EcmaVM *vm)
{ {
ecmascript::ThreadManagedScope managedScope(vm->GetJSThread());
// Omit exception check because ark calls here may not // Omit exception check because ark calls here may not
// cause side effect even pending exception exists. // cause side effect even pending exception exists.
return IsGeneratorFunction(vm); return IsGeneratorFunction(vm);
@ -403,6 +411,7 @@ bool GeneratorFunctionRef::IsGenerator(const EcmaVM *vm)
Local<JSValueRef> GeneratorObjectRef::GetGeneratorState(const EcmaVM *vm) Local<JSValueRef> GeneratorObjectRef::GetGeneratorState(const EcmaVM *vm)
{ {
CROSS_THREAD_AND_EXCEPTION_CHECK_WITH_RETURN(vm, JSValueRef::Undefined(vm)); CROSS_THREAD_AND_EXCEPTION_CHECK_WITH_RETURN(vm, JSValueRef::Undefined(vm));
ecmascript::ThreadManagedScope managedScope(vm->GetJSThread());
JSHandle<JSGeneratorObject> jsGenerator(JSNApiHelper::ToJSHandle(this)); JSHandle<JSGeneratorObject> jsGenerator(JSNApiHelper::ToJSHandle(this));
LOG_IF_SPECIAL(jsGenerator, FATAL); LOG_IF_SPECIAL(jsGenerator, FATAL);
if (jsGenerator->GetGeneratorState() == JSGeneratorState::COMPLETED) { if (jsGenerator->GetGeneratorState() == JSGeneratorState::COMPLETED) {
@ -414,7 +423,7 @@ Local<JSValueRef> GeneratorObjectRef::GetGeneratorState(const EcmaVM *vm)
Local<JSValueRef> GeneratorObjectRef::GetGeneratorFunction(const EcmaVM *vm) Local<JSValueRef> GeneratorObjectRef::GetGeneratorFunction(const EcmaVM *vm)
{ {
CROSS_THREAD_AND_EXCEPTION_CHECK_WITH_RETURN(vm, JSValueRef::Undefined(vm)); CROSS_THREAD_AND_EXCEPTION_CHECK_WITH_RETURN(vm, JSValueRef::Undefined(vm));
ecmascript::ThreadManagedScope managedScope(thread); ecmascript::ThreadManagedScope managedScope(vm->GetJSThread());
JSHandle<JSGeneratorObject> jsGenerator(JSNApiHelper::ToJSHandle(this)); JSHandle<JSGeneratorObject> jsGenerator(JSNApiHelper::ToJSHandle(this));
LOG_IF_SPECIAL(jsGenerator, FATAL); LOG_IF_SPECIAL(jsGenerator, FATAL);
JSHandle<GeneratorContext> generatorContext(thread, jsGenerator->GetGeneratorContext()); JSHandle<GeneratorContext> generatorContext(thread, jsGenerator->GetGeneratorContext());
@ -425,7 +434,7 @@ Local<JSValueRef> GeneratorObjectRef::GetGeneratorFunction(const EcmaVM *vm)
Local<JSValueRef> GeneratorObjectRef::GetGeneratorReceiver(const EcmaVM *vm) Local<JSValueRef> GeneratorObjectRef::GetGeneratorReceiver(const EcmaVM *vm)
{ {
CROSS_THREAD_AND_EXCEPTION_CHECK_WITH_RETURN(vm, JSValueRef::Undefined(vm)); CROSS_THREAD_AND_EXCEPTION_CHECK_WITH_RETURN(vm, JSValueRef::Undefined(vm));
ecmascript::ThreadManagedScope managedScope(thread); ecmascript::ThreadManagedScope managedScope(vm->GetJSThread());
JSHandle<JSGeneratorObject> jsGenerator(JSNApiHelper::ToJSHandle(this)); JSHandle<JSGeneratorObject> jsGenerator(JSNApiHelper::ToJSHandle(this));
LOG_IF_SPECIAL(jsGenerator, FATAL); LOG_IF_SPECIAL(jsGenerator, FATAL);
JSHandle<GeneratorContext> generatorContext(thread, jsGenerator->GetGeneratorContext()); JSHandle<GeneratorContext> generatorContext(thread, jsGenerator->GetGeneratorContext());
@ -436,7 +445,7 @@ Local<JSValueRef> GeneratorObjectRef::GetGeneratorReceiver(const EcmaVM *vm)
Local<JSValueRef> CollatorRef::GetCompareFunction(const EcmaVM *vm) Local<JSValueRef> CollatorRef::GetCompareFunction(const EcmaVM *vm)
{ {
CROSS_THREAD_AND_EXCEPTION_CHECK_WITH_RETURN(vm, JSValueRef::Undefined(vm)); CROSS_THREAD_AND_EXCEPTION_CHECK_WITH_RETURN(vm, JSValueRef::Undefined(vm));
ecmascript::ThreadManagedScope managedScope(thread); ecmascript::ThreadManagedScope managedScope(vm->GetJSThread());
#ifdef ARK_SUPPORT_INTL #ifdef ARK_SUPPORT_INTL
JSHandle<JSCollator> jsCollator(JSNApiHelper::ToJSHandle(this)); JSHandle<JSCollator> jsCollator(JSNApiHelper::ToJSHandle(this));
LOG_IF_SPECIAL(jsCollator, FATAL); LOG_IF_SPECIAL(jsCollator, FATAL);
@ -451,7 +460,7 @@ Local<JSValueRef> CollatorRef::GetCompareFunction(const EcmaVM *vm)
Local<JSValueRef> DataTimeFormatRef::GetFormatFunction(const EcmaVM *vm) Local<JSValueRef> DataTimeFormatRef::GetFormatFunction(const EcmaVM *vm)
{ {
CROSS_THREAD_AND_EXCEPTION_CHECK_WITH_RETURN(vm, JSValueRef::Undefined(vm)); CROSS_THREAD_AND_EXCEPTION_CHECK_WITH_RETURN(vm, JSValueRef::Undefined(vm));
ecmascript::ThreadManagedScope managedScope(thread); ecmascript::ThreadManagedScope managedScope(vm->GetJSThread());
#ifdef ARK_SUPPORT_INTL #ifdef ARK_SUPPORT_INTL
JSHandle<JSDateTimeFormat> jsDateTimeFormat(JSNApiHelper::ToJSHandle(this)); JSHandle<JSDateTimeFormat> jsDateTimeFormat(JSNApiHelper::ToJSHandle(this));
LOG_IF_SPECIAL(jsDateTimeFormat, FATAL); LOG_IF_SPECIAL(jsDateTimeFormat, FATAL);
@ -466,7 +475,7 @@ Local<JSValueRef> DataTimeFormatRef::GetFormatFunction(const EcmaVM *vm)
Local<JSValueRef> NumberFormatRef::GetFormatFunction(const EcmaVM *vm) Local<JSValueRef> NumberFormatRef::GetFormatFunction(const EcmaVM *vm)
{ {
CROSS_THREAD_AND_EXCEPTION_CHECK_WITH_RETURN(vm, JSValueRef::Undefined(vm)); CROSS_THREAD_AND_EXCEPTION_CHECK_WITH_RETURN(vm, JSValueRef::Undefined(vm));
ecmascript::ThreadManagedScope managedScope(thread); ecmascript::ThreadManagedScope managedScope(vm->GetJSThread());
#ifdef ARK_SUPPORT_INTL #ifdef ARK_SUPPORT_INTL
JSHandle<JSNumberFormat> jsNumberFormat(JSNApiHelper::ToJSHandle(this)); JSHandle<JSNumberFormat> jsNumberFormat(JSNApiHelper::ToJSHandle(this));
LOG_IF_SPECIAL(jsNumberFormat, FATAL); LOG_IF_SPECIAL(jsNumberFormat, FATAL);
@ -483,6 +492,7 @@ JSTaggedValue Callback::RegisterCallback(ecmascript::EcmaRuntimeCallInfo *ecmaRu
{ {
// Constructor // Constructor
JSThread *thread = ecmaRuntimeCallInfo->GetThread(); JSThread *thread = ecmaRuntimeCallInfo->GetThread();
ecmascript::ThreadManagedScope managedScope(thread);
JSHandle<JSTaggedValue> constructor = BuiltinsBase::GetConstructor(ecmaRuntimeCallInfo); JSHandle<JSTaggedValue> constructor = BuiltinsBase::GetConstructor(ecmaRuntimeCallInfo);
if (!constructor->IsJSFunction()) { if (!constructor->IsJSFunction()) {
return JSTaggedValue::False(); return JSTaggedValue::False();

File diff suppressed because it is too large Load Diff

View File

@ -34,7 +34,7 @@ EcmaString *ObjectFactory::AllocLineStringObjectNoGC(size_t size)
if (size > MAX_REGULAR_HEAP_OBJECT_SIZE) { if (size > MAX_REGULAR_HEAP_OBJECT_SIZE) {
object = reinterpret_cast<TaggedObject *>(sHeap_->GetHugeObjectSpace()->Allocate(thread_, size)); object = reinterpret_cast<TaggedObject *>(sHeap_->GetHugeObjectSpace()->Allocate(thread_, size));
} else { } else {
object = reinterpret_cast<TaggedObject *>(sHeap_->GetOldSpace()->Allocate(thread_, size, false)); object = reinterpret_cast<TaggedObject *>(sHeap_->GetOldSpace()->TryAllocateAndExpand(thread_, size, true));
} }
ASSERT(object != nullptr); ASSERT(object != nullptr);
object->SetClass(thread_, JSHClass::Cast(thread_->GlobalConstants()->GetLineStringClass().GetTaggedObject())); object->SetClass(thread_, JSHClass::Cast(thread_->GlobalConstants()->GetLineStringClass().GetTaggedObject()));

View File

@ -873,6 +873,8 @@ public:
size_t nativeBindingsize = 0, size_t nativeBindingsize = 0,
NativeFlag flag = NativeFlag::NO_DIV); NativeFlag flag = NativeFlag::NO_DIV);
JSHandle<JSNativePointer> NewSReadOnlyJSNativePointer(void *externalPointer);
JSHandle<AccessorData> NewSInternalAccessor(void *setter, void *getter); JSHandle<AccessorData> NewSInternalAccessor(void *setter, void *getter);
JSHandle<JSSymbol> NewSWellKnownSymbol(const JSHandle<JSTaggedValue> &name); JSHandle<JSSymbol> NewSWellKnownSymbol(const JSHandle<JSTaggedValue> &name);

View File

@ -334,6 +334,9 @@ void Runtime::ProcessNativeDeleteInSharedGC(const WeakRootVisitor &visitor)
freeSharedConstpoolIndex_.insert(constpoolIndex); freeSharedConstpoolIndex_.insert(constpoolIndex);
continue; continue;
} }
if (fwd != reinterpret_cast<TaggedObject *>(obj)) {
constpoolIter->second = JSTaggedValue(fwd);
}
} }
++constpoolIter; ++constpoolIter;
} }

View File

@ -98,7 +98,19 @@ public:
return stringTable_.get(); return stringTable_.get();
} }
uint32_t PushSerializationRoot([[maybe_unused]] JSThread *thread, std::vector<TaggedObject *> &rootSet) inline std::pair<JSTaggedType *, size_t> GetSerializeRootMapValue([[maybe_unused]] JSThread *thread,
uint32_t dataIndex)
{
ASSERT(thread->IsInManagedState());
LockHolder lock(serializeLock_);
auto iter = serializeRootMap_.find(dataIndex);
if (iter == serializeRootMap_.end()) {
return std::make_pair(nullptr, 0);
}
return std::make_pair(iter->second.data(), iter->second.size());
}
uint32_t PushSerializationRoot([[maybe_unused]] JSThread *thread, std::vector<JSTaggedType> &rootSet)
{ {
ASSERT(thread->IsInManagedState()); ASSERT(thread->IsInManagedState());
LockHolder lock(serializeLock_); LockHolder lock(serializeLock_);
@ -251,7 +263,7 @@ private:
std::unique_ptr<HeapRegionAllocator> heapRegionAllocator_; std::unique_ptr<HeapRegionAllocator> heapRegionAllocator_;
// for stringTable. // for stringTable.
std::unique_ptr<EcmaStringTable> stringTable_; std::unique_ptr<EcmaStringTable> stringTable_;
std::unordered_map<uint32_t, std::vector<TaggedObject *>> serializeRootMap_; std::unordered_map<uint32_t, std::vector<JSTaggedType>> serializeRootMap_;
std::vector<uint32_t> serializeDataIndexVector_; std::vector<uint32_t> serializeDataIndexVector_;
// Shared constantpool cache // Shared constantpool cache

View File

@ -16,6 +16,7 @@
#include "ecmascript/runtime_lock.h" #include "ecmascript/runtime_lock.h"
#include "ecmascript/checkpoint/thread_state_transition.h" #include "ecmascript/checkpoint/thread_state_transition.h"
#include "ecmascript/js_thread.h" #include "ecmascript/js_thread.h"
#include "ecmascript/mem/heap-inl.h"
namespace panda::ecmascript { namespace panda::ecmascript {
RuntimeLockHolder::RuntimeLockHolder(JSThread *thread, Mutex &mtx) RuntimeLockHolder::RuntimeLockHolder(JSThread *thread, Mutex &mtx)
@ -24,6 +25,9 @@ RuntimeLockHolder::RuntimeLockHolder(JSThread *thread, Mutex &mtx)
if (mtx_.TryLock()) { if (mtx_.TryLock()) {
return; return;
} }
#ifndef NDEBUG
SharedHeap::GetInstance()->CollectGarbage<TriggerGCType::SHARED_FULL_GC, GCReason::OTHER>(thread_);
#endif
ThreadStateTransitionScope<JSThread, ThreadState::WAIT> ts(thread_); ThreadStateTransitionScope<JSThread, ThreadState::WAIT> ts(thread_);
mtx.Lock(); mtx.Lock();
} }

View File

@ -41,6 +41,24 @@ namespace panda::ecmascript {
case (uint8_t)SerializedObjectSpace::SHARED_NON_MOVABLE_SPACE: \ case (uint8_t)SerializedObjectSpace::SHARED_NON_MOVABLE_SPACE: \
case (uint8_t)SerializedObjectSpace::SHARED_HUGE_SPACE case (uint8_t)SerializedObjectSpace::SHARED_HUGE_SPACE
BaseDeserializer::BaseDeserializer(JSThread *thread, SerializeData *data, void *hint)
: thread_(thread), heap_(const_cast<Heap *>(thread->GetEcmaVM()->GetHeap())), data_(data), engine_(hint)
{
sheap_ = SharedHeap::GetInstance();
uint32_t index = data_->GetDataIndex();
if (index != 0) {
std::pair<JSTaggedType *, size_t> dataVectorPair = Runtime::GetInstance()->GetSerializeRootMapValue(thread_,
index);
if (dataVectorPair.first == nullptr) {
LOG_ECMA(FATAL) << "Unknown serializer root index: " << index;
UNREACHABLE();
}
// ValueVector is the pointer to the data from serialize vector and must be const.
valueVector_ = dataVectorPair.first;
vectorSize_ = dataVectorPair.second;
}
}
JSHandle<JSTaggedValue> BaseDeserializer::ReadValue() JSHandle<JSTaggedValue> BaseDeserializer::ReadValue()
{ {
ECMA_BYTRACE_NAME(HITRACE_TAG_ARK, "Deserialize dataSize: " + std::to_string(data_->Size())); ECMA_BYTRACE_NAME(HITRACE_TAG_ARK, "Deserialize dataSize: " + std::to_string(data_->Size()));
@ -67,7 +85,7 @@ JSHandle<JSTaggedValue> BaseDeserializer::DeserializeJSTaggedValue()
// initialize concurrent func here // initialize concurrent func here
for (auto func : concurrentFunctions_) { for (auto func : concurrentFunctions_) {
func->InitializeForConcurrentFunction(thread_); JSFunction::InitializeForConcurrentFunction(thread_, func);
} }
concurrentFunctions_.clear(); concurrentFunctions_.clear();
@ -95,7 +113,7 @@ uintptr_t BaseDeserializer::DeserializeTaggedObject(SerializedObjectSpace space)
{ {
size_t objSize = data_->ReadUint32(position_); size_t objSize = data_->ReadUint32(position_);
uintptr_t res = RelocateObjectAddr(space, objSize); uintptr_t res = RelocateObjectAddr(space, objSize);
objectVector_.push_back(res); objectVector_.push_back(static_cast<JSTaggedType>(res));
DeserializeObjectField(res, res + objSize); DeserializeObjectField(res, res + objSize);
return res; return res;
} }
@ -182,7 +200,8 @@ void BaseDeserializer::HandleNewObjectEncodeFlag(SerializedObjectSpace space, u
FunctionKind funcKind = func->GetFunctionKind(); FunctionKind funcKind = func->GetFunctionKind();
if (funcKind == FunctionKind::CONCURRENT_FUNCTION || object->GetClass()->IsJSSharedFunction()) { if (funcKind == FunctionKind::CONCURRENT_FUNCTION || object->GetClass()->IsJSSharedFunction()) {
// defer initialize concurrent function // defer initialize concurrent function
concurrentFunctions_.push_back(reinterpret_cast<JSFunction *>(object)); JSHandle<JSFunction> funcHandle(thread_, func);
concurrentFunctions_.push_back(funcHandle);
} }
func->SetRawProfileTypeInfo(thread_, thread_->GlobalConstants()->GetEmptyProfileTypeInfoCell(), SKIP_BARRIER); func->SetRawProfileTypeInfo(thread_, thread_->GlobalConstants()->GetEmptyProfileTypeInfoCell(), SKIP_BARRIER);
func->SetWorkNodePointer(reinterpret_cast<uintptr_t>(nullptr)); func->SetWorkNodePointer(reinterpret_cast<uintptr_t>(nullptr));
@ -250,10 +269,10 @@ size_t BaseDeserializer::ReadSingleEncodeData(uint8_t encodeFlag, uintptr_t objA
} }
case (uint8_t)EncodeFlag::REFERENCE: { case (uint8_t)EncodeFlag::REFERENCE: {
uint32_t valueIndex = data_->ReadUint32(position_); uint32_t valueIndex = data_->ReadUint32(position_);
uintptr_t valueAddr = objectVector_[valueIndex]; JSTaggedType valueAddr = objectVector_[valueIndex];
UpdateMaybeWeak(slot, valueAddr, GetAndResetWeak()); UpdateMaybeWeak(slot, valueAddr, GetAndResetWeak());
WriteBarrier<WriteBarrierType::DESERIALIZE>(thread_, reinterpret_cast<void *>(objAddr), fieldOffset, WriteBarrier<WriteBarrierType::DESERIALIZE>(thread_, reinterpret_cast<void *>(objAddr), fieldOffset,
static_cast<JSTaggedType>(valueAddr)); valueAddr);
break; break;
} }
case (uint8_t)EncodeFlag::WEAK: { case (uint8_t)EncodeFlag::WEAK: {
@ -340,7 +359,16 @@ size_t BaseDeserializer::ReadSingleEncodeData(uint8_t encodeFlag, uintptr_t objA
break; break;
} }
case (uint8_t)EncodeFlag::SHARED_OBJECT: { case (uint8_t)EncodeFlag::SHARED_OBJECT: {
JSTaggedType value = data_->ReadJSTaggedType(position_); uint32_t index = data_->ReadUint32(position_);
if (UNLIKELY(valueVector_ == nullptr)) {
LOG_ECMA(FATAL) << "Deserializer valueVector is nullptr.";
UNREACHABLE();
}
if (UNLIKELY(index >= vectorSize_)) {
LOG_ECMA(FATAL) << "Shared object index invalid, index: " << index << " vectorSize: " << vectorSize_;
UNREACHABLE();
}
JSTaggedType value = valueVector_[index];
objectVector_.push_back(value); objectVector_.push_back(value);
bool isErrorMsg = GetAndResetIsErrorMsg(); bool isErrorMsg = GetAndResetIsErrorMsg();
if (isErrorMsg) { if (isErrorMsg) {

View File

@ -78,11 +78,8 @@ struct JSErrorInfo {
class BaseDeserializer { class BaseDeserializer {
public: public:
explicit BaseDeserializer(JSThread *thread, SerializeData *data, void *hint = nullptr) explicit BaseDeserializer(JSThread *thread, SerializeData *data, void *hint = nullptr);
: thread_(thread), heap_(const_cast<Heap *>(thread->GetEcmaVM()->GetHeap())), data_(data), engine_(hint)
{
sheap_ = SharedHeap::GetInstance();
}
~BaseDeserializer() ~BaseDeserializer()
{ {
objectVector_.clear(); objectVector_.clear();
@ -225,7 +222,9 @@ private:
uintptr_t machineCodeSpaceBeginAddr_ {0}; uintptr_t machineCodeSpaceBeginAddr_ {0};
uintptr_t sOldSpaceBeginAddr_ {0}; uintptr_t sOldSpaceBeginAddr_ {0};
uintptr_t sNonMovableSpaceBeginAddr_ {0}; uintptr_t sNonMovableSpaceBeginAddr_ {0};
CVector<uintptr_t> objectVector_; JSTaggedType const *valueVector_ {nullptr};
size_t vectorSize_ {0};
CVector<JSTaggedType> objectVector_;
CVector<Region *> regionVector_; CVector<Region *> regionVector_;
size_t oldRegionIndex_ {0}; size_t oldRegionIndex_ {0};
size_t nonMovableRegionIndex_ {0}; size_t nonMovableRegionIndex_ {0};
@ -241,7 +240,7 @@ private:
bool functionInShared_ {false}; bool functionInShared_ {false};
CVector<NativeBindingInfo *> nativeBindingInfos_; CVector<NativeBindingInfo *> nativeBindingInfos_;
CVector<JSErrorInfo *> jsErrorInfos_; CVector<JSErrorInfo *> jsErrorInfos_;
CVector<JSFunction *> concurrentFunctions_; CVector<JSHandle<JSFunction>> concurrentFunctions_;
size_t position_ {0}; size_t position_ {0};
}; };
} }

View File

@ -39,6 +39,7 @@ SerializedObjectSpace BaseSerializer::GetSerializedObjectSpace(TaggedObject *obj
return SerializedObjectSpace::MACHINE_CODE_SPACE; return SerializedObjectSpace::MACHINE_CODE_SPACE;
case RegionSpaceFlag::IN_HUGE_OBJECT_SPACE: case RegionSpaceFlag::IN_HUGE_OBJECT_SPACE:
return SerializedObjectSpace::HUGE_SPACE; return SerializedObjectSpace::HUGE_SPACE;
case RegionSpaceFlag::IN_SHARED_APPSPAWN_SPACE:
case RegionSpaceFlag::IN_SHARED_OLD_SPACE: case RegionSpaceFlag::IN_SHARED_OLD_SPACE:
return SerializedObjectSpace::SHARED_OLD_SPACE; return SerializedObjectSpace::SHARED_OLD_SPACE;
case RegionSpaceFlag::IN_SHARED_NON_MOVABLE: case RegionSpaceFlag::IN_SHARED_NON_MOVABLE:
@ -101,9 +102,9 @@ bool BaseSerializer::SerializeRootObject(TaggedObject *object)
void BaseSerializer::SerializeSharedObject(TaggedObject *object) void BaseSerializer::SerializeSharedObject(TaggedObject *object)
{ {
data_->WriteEncodeFlag(EncodeFlag::SHARED_OBJECT); data_->WriteEncodeFlag(EncodeFlag::SHARED_OBJECT);
data_->WriteJSTaggedType(reinterpret_cast<JSTaggedType>(object)); data_->WriteUint32(sharedObjects_.size());
referenceMap_.emplace(object, objectIndex_++); referenceMap_.emplace(object, objectIndex_++);
sharedObjects_.emplace_back(object); sharedObjects_.emplace_back(static_cast<JSTaggedType>(ToUintPtr(object)));
} }
bool BaseSerializer::SerializeSpecialObjIndividually(JSType objectType, TaggedObject *root, bool BaseSerializer::SerializeSpecialObjIndividually(JSType objectType, TaggedObject *root,

View File

@ -70,7 +70,7 @@ protected:
EcmaVM *vm_; EcmaVM *vm_;
std::unique_ptr<SerializeData> data_; std::unique_ptr<SerializeData> data_;
CUnorderedMap<TaggedObject *, uint32_t> referenceMap_; CUnorderedMap<TaggedObject *, uint32_t> referenceMap_;
std::vector<TaggedObject *> sharedObjects_; std::vector<JSTaggedType> sharedObjects_;
size_t objectIndex_ {0}; size_t objectIndex_ {0};
static constexpr size_t PARENT_ENV_SLOT = sizeof(TaggedObject); static constexpr size_t PARENT_ENV_SLOT = sizeof(TaggedObject);
static constexpr size_t SCOPE_INFO_SLOT = PARENT_ENV_SLOT * 2; // 2: the second object slot of lexical env static constexpr size_t SCOPE_INFO_SLOT = PARENT_ENV_SLOT * 2; // 2: the second object slot of lexical env

View File

@ -569,7 +569,7 @@ public:
EXPECT_TRUE(!res.IsEmpty()) << "[Empty] Deserialize JSSharedSet failed"; EXPECT_TRUE(!res.IsEmpty()) << "[Empty] Deserialize JSSharedSet failed";
EXPECT_TRUE(res->IsJSSharedSet()) << "[NotJSSharedSet] Deserialize JSSharedSet failed"; EXPECT_TRUE(res->IsJSSharedSet()) << "[NotJSSharedSet] Deserialize JSSharedSet failed";
JSHandle<JSSharedSet> jsSet = JSHandle<JSSharedSet>::Cast(res); JSHandle<JSSharedSet> jsSet = JSHandle<JSSharedSet>::Cast(res);
auto size = jsSet->GetSize(thread); auto size = JSSharedSet::GetSize(thread, jsSet);
EXPECT_TRUE(size == INITIALIZE_SIZE); EXPECT_TRUE(size == INITIALIZE_SIZE);
JSSharedSet::Clear(thread, jsSet); JSSharedSet::Clear(thread, jsSet);
Destroy(); Destroy();
@ -584,10 +584,10 @@ public:
EXPECT_TRUE(res->IsJSSharedSet()) << "[NotJSSharedSet] Deserialize JSSharedSet failed"; EXPECT_TRUE(res->IsJSSharedSet()) << "[NotJSSharedSet] Deserialize JSSharedSet failed";
JSHandle<JSSharedSet> jsSet = JSHandle<JSSharedSet>::Cast(res); JSHandle<JSSharedSet> jsSet = JSHandle<JSSharedSet>::Cast(res);
auto size = jsSet->GetSize(thread); auto size = JSSharedSet::GetSize(thread, jsSet);
EXPECT_TRUE(size == INITIALIZE_SIZE); EXPECT_TRUE(size == INITIALIZE_SIZE);
for (int32_t i = 0; i < size; i++) { for (int32_t i = 0; i < size; i++) {
EXPECT_TRUE(jsSet->Has(thread, JSTaggedValue(i))); EXPECT_TRUE(JSSharedSet::Has(thread, jsSet, JSTaggedValue(i)));
} }
JSSharedSet::Add(thread, jsSet, JSHandle<JSTaggedValue>(thread, JSTaggedValue(INITIALIZE_SIZE))); JSSharedSet::Add(thread, jsSet, JSHandle<JSTaggedValue>(thread, JSTaggedValue(INITIALIZE_SIZE)));
bool result = JSSharedSet::Delete(thread, jsSet, JSHandle<JSTaggedValue>(thread, JSTaggedValue(0))); bool result = JSSharedSet::Delete(thread, jsSet, JSHandle<JSTaggedValue>(thread, JSTaggedValue(0)));
@ -604,29 +604,9 @@ public:
EXPECT_TRUE(!res.IsEmpty()) << "[Empty] Deserialize JSSharedSet fail"; EXPECT_TRUE(!res.IsEmpty()) << "[Empty] Deserialize JSSharedSet fail";
EXPECT_TRUE(res->IsJSSharedSet()) << "[NotJSSharedSet] Deserialize JSSharedSet fail"; EXPECT_TRUE(res->IsJSSharedSet()) << "[NotJSSharedSet] Deserialize JSSharedSet fail";
JSHandle<JSSharedSet> jsSet = JSHandle<JSSharedSet>::Cast(res); JSHandle<JSSharedSet> jsSet = JSHandle<JSSharedSet>::Cast(res);
EXPECT_TRUE(jsSet->GetSize(thread) == INITIALIZE_SIZE); EXPECT_TRUE(JSSharedSet::GetSize(thread, jsSet) == INITIALIZE_SIZE);
for (int i = 0; i < INITIALIZE_SIZE; i++) { for (int i = 0; i < INITIALIZE_SIZE; i++) {
EXPECT_TRUE(jsSet->Has(thread, JSTaggedValue(i))); EXPECT_TRUE(JSSharedSet::Has(thread, jsSet, JSTaggedValue(i)));
}
Destroy();
}
void JSSharedSetMultiThreadTest2(SerializeData *data, std::pair<int32_t, int32_t> range,
std::atomic<uint32_t> &pendingExceptions)
{
EXPECT_TRUE(data != nullptr);
Init();
BaseDeserializer deserializer(thread, data);
JSHandle<JSTaggedValue> res = deserializer.ReadValue();
EXPECT_TRUE(!res.IsEmpty()) << "[Empty] Deserialize JSSharedSet fail";
EXPECT_TRUE(res->IsJSSharedSet()) << "[NotJSSharedSet] Deserialize JSSharedSet fail";
JSHandle<JSSharedSet> jsSet = JSHandle<JSSharedSet>::Cast(res);
for (int32_t i = range.first; i < range.second; i++) {
JSSharedSet::Add(thread, jsSet, JSHandle<JSTaggedValue>(thread, JSTaggedValue(i)));
if (thread->HasPendingException()) {
pendingExceptions++;
break;
}
} }
Destroy(); Destroy();
} }
@ -639,7 +619,7 @@ public:
EXPECT_TRUE(!res.IsEmpty()) << "[Empty] Deserialize JSSharedMap failed"; EXPECT_TRUE(!res.IsEmpty()) << "[Empty] Deserialize JSSharedMap failed";
EXPECT_TRUE(res->IsJSSharedMap()) << "[NotJSSharedMap] Deserialize JSSharedMap failed"; EXPECT_TRUE(res->IsJSSharedMap()) << "[NotJSSharedMap] Deserialize JSSharedMap failed";
JSHandle<JSSharedMap> jsMap = JSHandle<JSSharedMap>::Cast(res); JSHandle<JSSharedMap> jsMap = JSHandle<JSSharedMap>::Cast(res);
auto size = jsMap->GetSize(thread); auto size = JSSharedMap::GetSize(thread, jsMap);
EXPECT_TRUE(size == INITIALIZE_SIZE); EXPECT_TRUE(size == INITIALIZE_SIZE);
JSSharedMap::Clear(thread, jsMap); JSSharedMap::Clear(thread, jsMap);
Destroy(); Destroy();
@ -654,10 +634,10 @@ public:
EXPECT_TRUE(res->IsJSSharedMap()) << "[NotJSSharedMap] Deserialize JSSharedMap failed"; EXPECT_TRUE(res->IsJSSharedMap()) << "[NotJSSharedMap] Deserialize JSSharedMap failed";
JSHandle<JSSharedMap> jsMap = JSHandle<JSSharedMap>::Cast(res); JSHandle<JSSharedMap> jsMap = JSHandle<JSSharedMap>::Cast(res);
auto size = jsMap->GetSize(thread); auto size = JSSharedMap::GetSize(thread, jsMap);
EXPECT_TRUE(size == INITIALIZE_SIZE); EXPECT_TRUE(size == INITIALIZE_SIZE);
for (int32_t i = 0; i < size; i++) { for (int32_t i = 0; i < size; i++) {
EXPECT_TRUE(jsMap->Has(thread, JSTaggedValue(i))); EXPECT_TRUE(JSSharedMap::Has(thread, jsMap, JSTaggedValue(i)));
} }
JSSharedMap::Set(thread, jsMap, JSHandle<JSTaggedValue>(thread, JSTaggedValue(INITIALIZE_SIZE)), JSSharedMap::Set(thread, jsMap, JSHandle<JSTaggedValue>(thread, JSTaggedValue(INITIALIZE_SIZE)),
JSHandle<JSTaggedValue>(thread, JSTaggedValue(INITIALIZE_SIZE))); JSHandle<JSTaggedValue>(thread, JSTaggedValue(INITIALIZE_SIZE)));
@ -666,27 +646,6 @@ public:
Destroy(); Destroy();
} }
void JSSharedMapMultiThreadTest(SerializeData *data, std::pair<int32_t, int32_t> range,
std::atomic<uint32_t> &pendingExceptions)
{
EXPECT_TRUE(data != nullptr);
Init();
BaseDeserializer deserializer(thread, data);
JSHandle<JSTaggedValue> res = deserializer.ReadValue();
EXPECT_TRUE(!res.IsEmpty()) << "[Empty] Deserialize JSSharedMap fail";
EXPECT_TRUE(res->IsJSSharedMap()) << "[NotJSSharedMap] Deserialize JSSharedMap fail";
JSHandle<JSSharedMap> jsMap = JSHandle<JSSharedMap>::Cast(res);
for (int32_t i = range.first; i < range.second; i++) {
JSSharedMap::Set(thread, jsMap, JSHandle<JSTaggedValue>(thread, JSTaggedValue(i)),
JSHandle<JSTaggedValue>(thread, JSTaggedValue(i)));
if (thread->HasPendingException()) {
pendingExceptions++;
break;
}
}
Destroy();
}
void JSRegexpTest(SerializeData *data) void JSRegexpTest(SerializeData *data)
{ {
Init(); Init();
@ -1430,7 +1389,6 @@ HWTEST_F_L0(JSSerializerTest, SerializeNativeBindingObject2)
delete serializer; delete serializer;
} }
HWTEST_F_L0(JSSerializerTest, TestSerializeJSSet) HWTEST_F_L0(JSSerializerTest, TestSerializeJSSet)
{ {
ObjectFactory *factory = ecmaVm->GetFactory(); ObjectFactory *factory = ecmaVm->GetFactory();
@ -2116,7 +2074,8 @@ JSHandle<JSObject> CreateSObject(JSThread *thread)
JSHandle<JSObject> object = factory->NewSharedOldSpaceJSObject(hclass); JSHandle<JSObject> object = factory->NewSharedOldSpaceJSObject(hclass);
uint32_t fieldIndex = 0; uint32_t fieldIndex = 0;
while (fieldIndex < length) { while (fieldIndex < length) {
object->SetPropertyInlinedProps(thread, fieldIndex++, CreateEmptySObject(thread).GetTaggedValue()); JSHandle<JSObject> emptyObject = CreateEmptySObject(thread);
object->SetPropertyInlinedProps(thread, fieldIndex++, emptyObject.GetTaggedValue());
} }
return object; return object;
} }
@ -2230,26 +2189,26 @@ HWTEST_F_L0(JSSerializerTest, SerializeJSSharedSetBasic1)
jsDeserializerTest, data.get()); jsDeserializerTest, data.get());
ThreadSuspensionScope scope(thread); ThreadSuspensionScope scope(thread);
t1.join(); t1.join();
EXPECT_TRUE(jsSet->GetSize(thread) == 0); EXPECT_TRUE(JSSharedSet::GetSize(thread, jsSet) == 0);
} }
{ {
for (int i = 0; i < INITIALIZE_SIZE; i++) { for (int i = 0; i < INITIALIZE_SIZE; i++) {
JSSharedSet::Add(thread, jsSet, JSHandle<JSTaggedValue>(thread, JSTaggedValue(i))); JSSharedSet::Add(thread, jsSet, JSHandle<JSTaggedValue>(thread, JSTaggedValue(i)));
} }
EXPECT_TRUE(!jsSet->Has(thread, JSTaggedValue(INITIALIZE_SIZE))); EXPECT_TRUE(!JSSharedSet::Has(thread, jsSet, JSTaggedValue(INITIALIZE_SIZE)));
JSDeserializerTest jsDeserializerTest; JSDeserializerTest jsDeserializerTest;
// The Deserializer thread will add and delete a element // The Deserializer thread will add and delete a element
std::thread t1(&JSDeserializerTest::JSSharedSetBasicTest2, std::thread t1(&JSDeserializerTest::JSSharedSetBasicTest2,
jsDeserializerTest, data.get()); jsDeserializerTest, data.get());
ThreadSuspensionScope scope(thread); ThreadSuspensionScope scope(thread);
t1.join(); t1.join();
EXPECT_TRUE(!jsSet->Has(thread, JSTaggedValue(0))); EXPECT_TRUE(!JSSharedSet::Has(thread, jsSet, JSTaggedValue(0)));
EXPECT_TRUE(jsSet->Has(thread, JSTaggedValue(INITIALIZE_SIZE))); EXPECT_TRUE(JSSharedSet::Has(thread, jsSet, JSTaggedValue(INITIALIZE_SIZE)));
} }
delete serializer; delete serializer;
}; };
HWTEST_F_L0(JSSerializerTest, SerializeMultiThreadJSSharedSet1) HWTEST_F_L0(JSSerializerTest, SerializeMultiThreadJSSharedSet)
{ {
JSHandle<JSSharedSet> jsSet = CreateSSet(thread); JSHandle<JSSharedSet> jsSet = CreateSSet(thread);
ValueSerializer *serializer = new ValueSerializer(thread); ValueSerializer *serializer = new ValueSerializer(thread);
@ -2276,38 +2235,7 @@ HWTEST_F_L0(JSSerializerTest, SerializeMultiThreadJSSharedSet1)
delete serializer; delete serializer;
}; };
HWTEST_F_L0(JSSerializerTest, SerializeMultiThreadJSSharedSet2) HWTEST_F_L0(JSSerializerTest, SerializeJSSharedMapBasic)
{
JSHandle<JSSharedSet> jsSet = CreateSSet(thread);
ValueSerializer *serializer = new ValueSerializer(thread);
bool success = serializer->WriteValue(thread, JSHandle<JSTaggedValue>(jsSet),
JSHandle<JSTaggedValue>(thread, JSTaggedValue::Undefined()),
JSHandle<JSTaggedValue>(thread, JSTaggedValue::Undefined()));
EXPECT_TRUE(success) << "Serialize JSSharedSet fail";
std::unique_ptr<SerializeData> data = serializer->Release();
constexpr uint32_t maxNumDeserialziers = 10;
std::atomic<uint32_t> pendingExceptions = 0;
JSDeserializerTest jsDeserializerTests[maxNumDeserialziers];
std::thread threads[maxNumDeserialziers];
for (int32_t i = 0; i < maxNumDeserialziers; i++) {
threads[i] = std::thread(&JSDeserializerTest::JSSharedSetMultiThreadTest2,
jsDeserializerTests[i], data.get(),
std::make_pair<int32_t, int32_t>(i * maxNumDeserialziers, (i + 1) * maxNumDeserialziers),
std::ref(pendingExceptions));
}
ThreadSuspensionScope scope(thread);
for (int i = 0; i < maxNumDeserialziers; i++) {
threads[i].join();
}
if (pendingExceptions != 0) {
EXPECT_TRUE(jsSet->GetSize(thread) != maxNumDeserialziers * maxNumDeserialziers);
} else {
EXPECT_TRUE(jsSet->GetSize(thread) == maxNumDeserialziers * maxNumDeserialziers);
}
delete serializer;
};
HWTEST_F_L0(JSSerializerTest, SerializeJSSharedMapBasic1)
{ {
JSHandle<JSSharedMap> jsMap = CreateSMap(thread); JSHandle<JSSharedMap> jsMap = CreateSMap(thread);
ValueSerializer *serializer = new ValueSerializer(thread); ValueSerializer *serializer = new ValueSerializer(thread);
@ -2327,53 +2255,22 @@ HWTEST_F_L0(JSSerializerTest, SerializeJSSharedMapBasic1)
jsDeserializerTest, data.get()); jsDeserializerTest, data.get());
ThreadSuspensionScope scope(thread); ThreadSuspensionScope scope(thread);
t1.join(); t1.join();
EXPECT_TRUE(jsMap->GetSize(thread) == 0); EXPECT_TRUE(JSSharedMap::GetSize(thread, jsMap) == 0);
} }
{ {
for (int i = 0; i < INITIALIZE_SIZE; i++) { for (int i = 0; i < INITIALIZE_SIZE; i++) {
JSSharedMap::Set(thread, jsMap, JSHandle<JSTaggedValue>(thread, JSTaggedValue(i)), JSSharedMap::Set(thread, jsMap, JSHandle<JSTaggedValue>(thread, JSTaggedValue(i)),
JSHandle<JSTaggedValue>(thread, JSTaggedValue(i))); JSHandle<JSTaggedValue>(thread, JSTaggedValue(i)));
} }
EXPECT_TRUE(!jsMap->Has(thread, JSTaggedValue(INITIALIZE_SIZE))); EXPECT_TRUE(!JSSharedMap::Has(thread, jsMap, JSTaggedValue(INITIALIZE_SIZE)));
JSDeserializerTest jsDeserializerTest; JSDeserializerTest jsDeserializerTest;
// The Deserializer thread will add and delete a element // The Deserializer thread will add and delete a element
std::thread t1(&JSDeserializerTest::JSSharedMapBasicTest2, std::thread t1(&JSDeserializerTest::JSSharedMapBasicTest2,
jsDeserializerTest, data.get()); jsDeserializerTest, data.get());
ThreadSuspensionScope scope(thread); ThreadSuspensionScope scope(thread);
t1.join(); t1.join();
EXPECT_TRUE(!jsMap->Has(thread, JSTaggedValue(0))); EXPECT_TRUE(!JSSharedMap::Has(thread, jsMap, JSTaggedValue(0)));
EXPECT_TRUE(jsMap->Has(thread, JSTaggedValue(INITIALIZE_SIZE))); EXPECT_TRUE(JSSharedMap::Has(thread, jsMap, JSTaggedValue(INITIALIZE_SIZE)));
}
delete serializer;
};
HWTEST_F_L0(JSSerializerTest, SerializeMultiThreadJSSharedMap)
{
JSHandle<JSSharedMap> jsMap = CreateSMap(thread);
ValueSerializer *serializer = new ValueSerializer(thread);
bool success = serializer->WriteValue(thread, JSHandle<JSTaggedValue>(jsMap),
JSHandle<JSTaggedValue>(thread, JSTaggedValue::Undefined()),
JSHandle<JSTaggedValue>(thread, JSTaggedValue::Undefined()));
EXPECT_TRUE(success) << "Serialize JSSharedMap fail";
std::unique_ptr<SerializeData> data = serializer->Release();
constexpr uint32_t maxNumDeserialziers = 10;
std::atomic<uint32_t> pendingExceptions = 0;
JSDeserializerTest jsDeserializerTests[maxNumDeserialziers];
std::thread threads[maxNumDeserialziers];
for (int32_t i = 0; i < maxNumDeserialziers; i++) {
threads[i] = std::thread(&JSDeserializerTest::JSSharedMapMultiThreadTest,
jsDeserializerTests[i], data.get(),
std::make_pair<int32_t, int32_t>(i * maxNumDeserialziers, (i + 1) * maxNumDeserialziers),
std::ref(pendingExceptions));
}
ThreadSuspensionScope scope(thread);
for (int i = 0; i < maxNumDeserialziers; i++) {
threads[i].join();
}
if (pendingExceptions != 0) {
EXPECT_TRUE(jsMap->GetSize(thread) != maxNumDeserialziers * maxNumDeserialziers);
} else {
EXPECT_TRUE(jsMap->GetSize(thread) == maxNumDeserialziers * maxNumDeserialziers);
} }
delete serializer; delete serializer;
}; };

View File

@ -120,6 +120,12 @@ bool ValueSerializer::WriteValue(JSThread *thread,
return false; return false;
} }
SerializeJSTaggedValue(value.GetTaggedValue()); SerializeJSTaggedValue(value.GetTaggedValue());
// ThreadNativeScope may trigger moving gc, so PushSerializationRoot must do before native state.
// Push share root object to runtime map
uint32_t index = data_->GetDataIndex();
if (!sharedObjects_.empty()) {
index = Runtime::GetInstance()->PushSerializationRoot(thread_, sharedObjects_);
}
{ {
ThreadNativeScope nativeScope(thread); ThreadNativeScope nativeScope(thread);
for (auto &entry : detachCallbackInfo_) { for (auto &entry : detachCallbackInfo_) {
@ -128,7 +134,7 @@ bool ValueSerializer::WriteValue(JSThread *thread,
if (detachNative == nullptr || entry.first < 0) { if (detachNative == nullptr || entry.first < 0) {
LOG_ECMA(ERROR) << "ValueSerialize: SerializeNativeBindingObject detachNative == nullptr"; LOG_ECMA(ERROR) << "ValueSerialize: SerializeNativeBindingObject detachNative == nullptr";
notSupport_ = true; notSupport_ = true;
return false; break;
} }
void *buffer = detachNative(info->env, info->nativeValue, info->hint, info->detachData); void *buffer = detachNative(info->env, info->nativeValue, info->hint, info->detachData);
data_->EmitU64(reinterpret_cast<uint64_t>(buffer), static_cast<size_t>(entry.first)); data_->EmitU64(reinterpret_cast<uint64_t>(buffer), static_cast<size_t>(entry.first));
@ -137,11 +143,13 @@ bool ValueSerializer::WriteValue(JSThread *thread,
if (notSupport_) { if (notSupport_) {
LOG_ECMA(ERROR) << "ValueSerialize: serialize data is incomplete"; LOG_ECMA(ERROR) << "ValueSerialize: serialize data is incomplete";
data_->SetIncompleteData(true); data_->SetIncompleteData(true);
if (!sharedObjects_.empty()) {
// If notSupport, serializeRoot should be removed.
Runtime::GetInstance()->RemoveSerializationRoot(thread_, index);
}
return false; return false;
} }
// Push share root object to runtime map
if (!sharedObjects_.empty()) { if (!sharedObjects_.empty()) {
uint32_t index = Runtime::GetInstance()->PushSerializationRoot(thread_, sharedObjects_);
data_->SetDataIndex(index); data_->SetDataIndex(index);
} }
size_t maxSerializerSize = vm_->GetEcmaParamConfiguration().GetMaxJSSerializerSize(); size_t maxSerializerSize = vm_->GetEcmaParamConfiguration().GetMaxJSSerializerSize();

View File

@ -52,6 +52,10 @@ void ObjectFactory::NewSObjectHook() const
} else { } else {
sHeap_->TriggerConcurrentMarking<TriggerGCType::SHARED_GC, GCReason::OTHER>(thread_); sHeap_->TriggerConcurrentMarking<TriggerGCType::SHARED_GC, GCReason::OTHER>(thread_);
} }
if (!ecmascript::AnFileDataManager::GetInstance()->IsEnable()) {
sHeap_->WaitGCFinished(thread_);
sHeap_->CollectGarbage<TriggerGCType::SHARED_FULL_GC, GCReason::OTHER>(thread_);
}
} }
#endif #endif
} }
@ -539,20 +543,36 @@ JSHandle<JSNativePointer> ObjectFactory::NewSJSNativePointer(void *externalPoint
return obj; return obj;
} }
JSHandle<JSNativePointer> ObjectFactory::NewSReadOnlyJSNativePointer(void* externalPointer)
{
NewSObjectHook();
auto jsNativePointerClass =
JSHClass::Cast(thread_->GlobalConstants()->GetSJSNativePointerClass().GetTaggedObject());
jsNativePointerClass->SetIsJSShared(true);
TaggedObject* header = sHeap_->AllocateReadOnlyOrHugeObject(thread_, jsNativePointerClass);
JSHandle<JSNativePointer> obj(thread_, header);
obj->SetExternalPointer(externalPointer);
obj->SetDeleter(nullptr);
obj->SetData(nullptr);
obj->SetBindingSize(0);
obj->SetNativeFlag(NativeFlag::NO_DIV);
return obj;
}
JSHandle<AccessorData> ObjectFactory::NewSInternalAccessor(void *setter, void *getter) JSHandle<AccessorData> ObjectFactory::NewSInternalAccessor(void *setter, void *getter)
{ {
NewSObjectHook(); NewSObjectHook();
TaggedObject *header = sHeap_->AllocateNonMovableOrHugeObject(thread_, TaggedObject *header = sHeap_->AllocateReadOnlyOrHugeObject(thread_,
JSHClass::Cast(thread_->GlobalConstants()->GetInternalAccessorClass().GetTaggedObject())); JSHClass::Cast(thread_->GlobalConstants()->GetInternalAccessorClass().GetTaggedObject()));
JSHandle<AccessorData> obj(thread_, AccessorData::Cast(header)); JSHandle<AccessorData> obj(thread_, AccessorData::Cast(header));
obj->SetGetter(thread_, JSTaggedValue::Undefined()); obj->SetGetter(thread_, JSTaggedValue::Undefined());
obj->SetSetter(thread_, JSTaggedValue::Undefined()); obj->SetSetter(thread_, JSTaggedValue::Undefined());
if (setter != nullptr) { if (setter != nullptr) {
JSHandle<JSNativePointer> setFunc = NewSJSNativePointer(setter, nullptr, nullptr, true); JSHandle<JSNativePointer> setFunc = NewSReadOnlyJSNativePointer(setter);
obj->SetSetter(thread_, setFunc.GetTaggedValue()); obj->SetSetter(thread_, setFunc.GetTaggedValue());
} }
if (getter != nullptr) { if (getter != nullptr) {
JSHandle<JSNativePointer> getFunc = NewSJSNativePointer(getter, nullptr, nullptr, true); JSHandle<JSNativePointer> getFunc = NewSReadOnlyJSNativePointer(getter);
obj->SetGetter(thread_, getFunc); obj->SetGetter(thread_, getFunc);
} }
return obj; return obj;

View File

@ -31,8 +31,8 @@ enum class ModType : uint8_t {
template<typename Container, ModType modType = ModType::READ> template<typename Container, ModType modType = ModType::READ>
class ConcurrentApiScope final { class ConcurrentApiScope final {
public: public:
ConcurrentApiScope(JSThread *thread, const TaggedObject *obj, SCheckMode mode = SCheckMode::CHECK) ConcurrentApiScope(JSThread *thread, const JSHandle<JSTaggedValue> &objHandle, SCheckMode mode = SCheckMode::CHECK)
: thread_(thread), obj_(obj), checkMode_(mode) : thread_(thread), objHandle_(objHandle), checkMode_(mode)
{ {
if (checkMode_ == SCheckMode::SKIP) { if (checkMode_ == SCheckMode::SKIP) {
return; return;
@ -64,7 +64,8 @@ private:
inline uint32_t GetModRecord() inline uint32_t GetModRecord()
{ {
return reinterpret_cast<volatile std::atomic<uint32_t> *>( return reinterpret_cast<volatile std::atomic<uint32_t> *>(
ToUintPtr(obj_) + Container::MOD_RECORD_OFFSET)->load(std::memory_order_acquire); ToUintPtr(objHandle_->GetTaggedObject()) +
Container::MOD_RECORD_OFFSET)->load(std::memory_order_acquire);
} }
inline void CanWrite() inline void CanWrite()
@ -72,8 +73,8 @@ private:
// Set to ModType::WRITE, expect no writer and readers // Set to ModType::WRITE, expect no writer and readers
constexpr uint32_t expectedModRecord = 0; constexpr uint32_t expectedModRecord = 0;
constexpr uint32_t desiredModRecord = WRITE_MOD_MASK; constexpr uint32_t desiredModRecord = WRITE_MOD_MASK;
uint32_t ret = Barriers::AtomicSetPrimitive(const_cast<TaggedObject *>(obj_), Container::MOD_RECORD_OFFSET, uint32_t ret = Barriers::AtomicSetPrimitive(objHandle_->GetTaggedObject(),
expectedModRecord, desiredModRecord); Container::MOD_RECORD_OFFSET, expectedModRecord, desiredModRecord);
if (ret != expectedModRecord) { if (ret != expectedModRecord) {
auto error = containers::ContainerError::BusinessError( auto error = containers::ContainerError::BusinessError(
thread_, containers::ErrorFlag::CONCURRENT_MODIFICATION_ERROR, "Concurrent modification exception"); thread_, containers::ErrorFlag::CONCURRENT_MODIFICATION_ERROR, "Concurrent modification exception");
@ -85,8 +86,8 @@ private:
{ {
constexpr uint32_t expectedModRecord = WRITE_MOD_MASK; constexpr uint32_t expectedModRecord = WRITE_MOD_MASK;
constexpr uint32_t desiredModRecord = 0u; constexpr uint32_t desiredModRecord = 0u;
uint32_t ret = Barriers::AtomicSetPrimitive(const_cast<TaggedObject *>(obj_), Container::MOD_RECORD_OFFSET, uint32_t ret = Barriers::AtomicSetPrimitive(objHandle_->GetTaggedObject(),
expectedModRecord, desiredModRecord); Container::MOD_RECORD_OFFSET, expectedModRecord, desiredModRecord);
if (ret != expectedModRecord) { if (ret != expectedModRecord) {
auto error = containers::ContainerError::BusinessError( auto error = containers::ContainerError::BusinessError(
thread_, containers::ErrorFlag::CONCURRENT_MODIFICATION_ERROR, "Concurrent modification exception"); thread_, containers::ErrorFlag::CONCURRENT_MODIFICATION_ERROR, "Concurrent modification exception");
@ -106,8 +107,8 @@ private:
} }
// Increase readers by 1 // Increase readers by 1
desiredModRecord_ = expectModRecord_ + 1; desiredModRecord_ = expectModRecord_ + 1;
auto ret = Barriers::AtomicSetPrimitive(const_cast<TaggedObject *>(obj_), Container::MOD_RECORD_OFFSET, auto ret = Barriers::AtomicSetPrimitive(objHandle_->GetTaggedObject(),
expectModRecord_, desiredModRecord_); Container::MOD_RECORD_OFFSET, expectModRecord_, desiredModRecord_);
if (ret == expectModRecord_) { if (ret == expectModRecord_) {
break; break;
} }
@ -118,8 +119,8 @@ private:
{ {
std::swap(expectModRecord_, desiredModRecord_); std::swap(expectModRecord_, desiredModRecord_);
while (true) { while (true) {
auto ret = Barriers::AtomicSetPrimitive(const_cast<TaggedObject *>(obj_), Container::MOD_RECORD_OFFSET, auto ret = Barriers::AtomicSetPrimitive(objHandle_->GetTaggedObject(),
expectModRecord_, desiredModRecord_); Container::MOD_RECORD_OFFSET, expectModRecord_, desiredModRecord_);
if (ret == expectModRecord_) { if (ret == expectModRecord_) {
break; break;
} }
@ -136,7 +137,7 @@ private:
} }
JSThread *thread_ {nullptr}; JSThread *thread_ {nullptr};
const TaggedObject *obj_ {nullptr}; JSHandle<JSTaggedValue> objHandle_;
SCheckMode checkMode_ { SCheckMode::CHECK }; SCheckMode checkMode_ { SCheckMode::CHECK };
// For readers // For readers
uint32_t expectModRecord_ {0}; uint32_t expectModRecord_ {0};

View File

@ -36,7 +36,7 @@ using base::ArrayHelper;
JSTaggedValue JSSharedArray::LengthGetter([[maybe_unused]] JSThread *thread, const JSHandle<JSObject> &self, JSTaggedValue JSSharedArray::LengthGetter([[maybe_unused]] JSThread *thread, const JSHandle<JSObject> &self,
SCheckMode checkMode) SCheckMode checkMode)
{ {
[[maybe_unused]] ConcurrentApiScope<JSSharedArray> scope(thread, self.GetTaggedValue().GetTaggedObject(), [[maybe_unused]] ConcurrentApiScope<JSSharedArray> scope(thread, JSHandle<JSTaggedValue>::Cast(self),
checkMode); checkMode);
RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, JSTaggedValue::Exception()); RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, JSTaggedValue::Exception());
return JSTaggedValue(JSSharedArray::Cast(*self)->GetLength()); return JSTaggedValue(JSSharedArray::Cast(*self)->GetLength());
@ -459,7 +459,7 @@ OperationResult JSSharedArray::GetProperty(JSThread *thread, const JSHandle<JSTa
const JSHandle<JSTaggedValue> &key, SCheckMode sCheckMode) const JSHandle<JSTaggedValue> &key, SCheckMode sCheckMode)
{ {
// Add Concurrent check for shared array // Add Concurrent check for shared array
[[maybe_unused]] ConcurrentApiScope<JSSharedArray> scope(thread, obj.GetTaggedValue().GetTaggedObject(), [[maybe_unused]] ConcurrentApiScope<JSSharedArray> scope(thread, obj,
sCheckMode); sCheckMode);
RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, RETURN_VALUE_IF_ABRUPT_COMPLETION(thread,
OperationResult(thread, JSTaggedValue::Exception(), PropertyMetaData(false))); OperationResult(thread, JSTaggedValue::Exception(), PropertyMetaData(false)));
@ -478,7 +478,7 @@ bool JSSharedArray::SetProperty(JSThread *thread, const JSHandle<JSTaggedValue>
{ {
// Concurrent check for shared array // Concurrent check for shared array
[[maybe_unused]] ConcurrentApiScope<JSSharedArray, ModType::WRITE> scope( [[maybe_unused]] ConcurrentApiScope<JSSharedArray, ModType::WRITE> scope(
thread, obj.GetTaggedValue().GetTaggedObject(), sCheckMode); thread, obj, sCheckMode);
RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, false); RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, false);
// 2 ~ 4 findProperty in Receiver, Obj and its parents // 2 ~ 4 findProperty in Receiver, Obj and its parents
ObjectOperator op(thread, obj, key); ObjectOperator op(thread, obj, key);
@ -497,7 +497,7 @@ bool JSSharedArray::SetProperty(JSThread *thread, const JSHandle<JSTaggedValue>
{ {
// Concurrent check for shared array // Concurrent check for shared array
[[maybe_unused]] ConcurrentApiScope<JSSharedArray, ModType::WRITE> scope( [[maybe_unused]] ConcurrentApiScope<JSSharedArray, ModType::WRITE> scope(
thread, obj.GetTaggedValue().GetTaggedObject(), sCheckMode); thread, obj, sCheckMode);
RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, false); RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, false);
// 2 ~ 4 findProperty in Receiver, Obj and its parents // 2 ~ 4 findProperty in Receiver, Obj and its parents
ObjectOperator op(thread, obj, index); ObjectOperator op(thread, obj, index);

View File

@ -48,13 +48,11 @@ JSTaggedValue JSSharedArrayIterator::Next(EcmaRuntimeCallInfo *argv)
JSHandle<JSSharedArrayIterator> iter(thisObj); JSHandle<JSSharedArrayIterator> iter(thisObj);
JSHandle<JSTaggedValue> array(thread, iter->GetIteratedArray()); JSHandle<JSTaggedValue> array(thread, iter->GetIteratedArray());
if (array->IsJSSharedArray()) { if (array->IsJSSharedArray()) {
JSHandle<JSSharedArray> iteratedArray(thread, iter->GetIteratedArray()); [[maybe_unused]] ConcurrentApiScope<JSSharedArray> scope(thread, array);
[[maybe_unused]] ConcurrentApiScope<JSSharedArray> scope(thread, *iteratedArray);
RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, JSTaggedValue::Exception()); RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, JSTaggedValue::Exception());
return NextInternal(thread, iter, array); return NextInternal(thread, iter, array);
} else if (array->IsSharedTypedArray()) { } else if (array->IsSharedTypedArray()) {
JSHandle<JSSharedTypedArray> iteratedArray(thread, iter->GetIteratedArray()); [[maybe_unused]] ConcurrentApiScope<JSSharedTypedArray> scope(thread, array);
[[maybe_unused]] ConcurrentApiScope<JSSharedTypedArray> scope(thread, *iteratedArray);
RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, JSTaggedValue::Exception()); RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, JSTaggedValue::Exception());
return NextInternal(thread, iter, array); return NextInternal(thread, iter, array);
} }

View File

@ -30,7 +30,7 @@ void JSSharedMap::Set(JSThread *thread, const JSHandle<JSSharedMap> &map,
THROW_NEW_ERROR_AND_RETURN(thread, error); THROW_NEW_ERROR_AND_RETURN(thread, error);
} }
[[maybe_unused]] ConcurrentApiScope<JSSharedMap, ModType::WRITE> scope(thread, [[maybe_unused]] ConcurrentApiScope<JSSharedMap, ModType::WRITE> scope(thread,
map.GetTaggedValue().GetTaggedObject()); JSHandle<JSTaggedValue>::Cast(map));
RETURN_IF_ABRUPT_COMPLETION(thread); RETURN_IF_ABRUPT_COMPLETION(thread);
JSHandle<LinkedHashMap> mapHandle(thread, LinkedHashMap::Cast(map->GetLinkedMap().GetTaggedObject())); JSHandle<LinkedHashMap> mapHandle(thread, LinkedHashMap::Cast(map->GetLinkedMap().GetTaggedObject()));
@ -41,7 +41,7 @@ void JSSharedMap::Set(JSThread *thread, const JSHandle<JSSharedMap> &map,
bool JSSharedMap::Delete(JSThread *thread, const JSHandle<JSSharedMap> &map, const JSHandle<JSTaggedValue> &key) bool JSSharedMap::Delete(JSThread *thread, const JSHandle<JSSharedMap> &map, const JSHandle<JSTaggedValue> &key)
{ {
[[maybe_unused]] ConcurrentApiScope<JSSharedMap, ModType::WRITE> scope(thread, [[maybe_unused]] ConcurrentApiScope<JSSharedMap, ModType::WRITE> scope(thread,
map.GetTaggedValue().GetTaggedObject()); JSHandle<JSTaggedValue>::Cast(map));
RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, false); RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, false);
JSHandle<LinkedHashMap> mapHandle(thread, LinkedHashMap::Cast(map->GetLinkedMap().GetTaggedObject())); JSHandle<LinkedHashMap> mapHandle(thread, LinkedHashMap::Cast(map->GetLinkedMap().GetTaggedObject()));
int entry = mapHandle->FindElement(thread, key.GetTaggedValue()); int entry = mapHandle->FindElement(thread, key.GetTaggedValue());
@ -55,47 +55,47 @@ bool JSSharedMap::Delete(JSThread *thread, const JSHandle<JSSharedMap> &map, con
void JSSharedMap::Clear(JSThread *thread, const JSHandle<JSSharedMap> &map) void JSSharedMap::Clear(JSThread *thread, const JSHandle<JSSharedMap> &map)
{ {
[[maybe_unused]] ConcurrentApiScope<JSSharedMap, ModType::WRITE> scope(thread, [[maybe_unused]] ConcurrentApiScope<JSSharedMap, ModType::WRITE> scope(thread,
map.GetTaggedValue().GetTaggedObject()); JSHandle<JSTaggedValue>::Cast(map));
RETURN_IF_ABRUPT_COMPLETION(thread); RETURN_IF_ABRUPT_COMPLETION(thread);
JSHandle<LinkedHashMap> mapHandle(thread, LinkedHashMap::Cast(map->GetLinkedMap().GetTaggedObject())); JSHandle<LinkedHashMap> mapHandle(thread, LinkedHashMap::Cast(map->GetLinkedMap().GetTaggedObject()));
JSHandle<LinkedHashMap> newMap = LinkedHashMap::Clear(thread, mapHandle); JSHandle<LinkedHashMap> newMap = LinkedHashMap::Clear(thread, mapHandle);
map->SetLinkedMap(thread, newMap); map->SetLinkedMap(thread, newMap);
} }
bool JSSharedMap::Has(JSThread *thread, JSTaggedValue key) const bool JSSharedMap::Has(JSThread *thread, const JSHandle<JSSharedMap> &map, JSTaggedValue key)
{ {
[[maybe_unused]] ConcurrentApiScope<JSSharedMap> scope(thread, reinterpret_cast<const TaggedObject*>(this)); [[maybe_unused]] ConcurrentApiScope<JSSharedMap> scope(thread, JSHandle<JSTaggedValue>::Cast(map));
RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, false); RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, false);
return LinkedHashMap::Cast(GetLinkedMap().GetTaggedObject())->Has(thread, key); return LinkedHashMap::Cast(map->GetLinkedMap().GetTaggedObject())->Has(thread, key);
} }
JSTaggedValue JSSharedMap::Get(JSThread *thread, JSTaggedValue key) const JSTaggedValue JSSharedMap::Get(JSThread *thread, const JSHandle<JSSharedMap> &map, JSTaggedValue key)
{ {
[[maybe_unused]] ConcurrentApiScope<JSSharedMap> scope(thread, reinterpret_cast<const TaggedObject*>(this)); [[maybe_unused]] ConcurrentApiScope<JSSharedMap> scope(thread, JSHandle<JSTaggedValue>::Cast(map));
RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, JSTaggedValue::Undefined()); RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, JSTaggedValue::Undefined());
return LinkedHashMap::Cast(GetLinkedMap().GetTaggedObject())->Get(thread, key); return LinkedHashMap::Cast(map->GetLinkedMap().GetTaggedObject())->Get(thread, key);
} }
uint32_t JSSharedMap::GetSize(JSThread *thread) const uint32_t JSSharedMap::GetSize(JSThread *thread, const JSHandle<JSSharedMap> &map)
{ {
[[maybe_unused]] ConcurrentApiScope<JSSharedMap> scope(thread, reinterpret_cast<const TaggedObject*>(this)); [[maybe_unused]] ConcurrentApiScope<JSSharedMap> scope(thread, JSHandle<JSTaggedValue>::Cast(map));
RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, 0); RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, 0);
return LinkedHashMap::Cast(GetLinkedMap().GetTaggedObject())->NumberOfElements(); return LinkedHashMap::Cast(map->GetLinkedMap().GetTaggedObject())->NumberOfElements();
} }
JSTaggedValue JSSharedMap::GetKey(JSThread *thread, uint32_t entry) const JSTaggedValue JSSharedMap::GetKey(JSThread *thread, const JSHandle<JSSharedMap> &map, uint32_t entry)
{ {
[[maybe_unused]] ConcurrentApiScope<JSSharedMap> scope(thread, reinterpret_cast<const TaggedObject*>(this)); [[maybe_unused]] ConcurrentApiScope<JSSharedMap> scope(thread, JSHandle<JSTaggedValue>::Cast(map));
ASSERT_PRINT(entry >= 0 && entry < GetSize(thread), "entry must be non-negative integer less than capacity"); ASSERT_PRINT(entry >= 0 && entry < GetSize(thread, map), "entry must be non-negative integer less than capacity");
RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, JSTaggedValue::Undefined()); RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, JSTaggedValue::Undefined());
return LinkedHashMap::Cast(GetLinkedMap().GetTaggedObject())->GetKey(entry); return LinkedHashMap::Cast(map->GetLinkedMap().GetTaggedObject())->GetKey(entry);
} }
JSTaggedValue JSSharedMap::GetValue(JSThread *thread, uint32_t entry) const JSTaggedValue JSSharedMap::GetValue(JSThread *thread, const JSHandle<JSSharedMap> &map, uint32_t entry)
{ {
[[maybe_unused]] ConcurrentApiScope<JSSharedMap> scope(thread, reinterpret_cast<const TaggedObject*>(this)); [[maybe_unused]] ConcurrentApiScope<JSSharedMap> scope(thread, JSHandle<JSTaggedValue>::Cast(map));
ASSERT_PRINT(entry >= 0 && entry < GetSize(thread), "entry must be non-negative integer less than capacity"); ASSERT_PRINT(entry >= 0 && entry < GetSize(thread, map), "entry must be non-negative integer less than capacity");
RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, JSTaggedValue::Undefined()); RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, JSTaggedValue::Undefined());
return LinkedHashMap::Cast(GetLinkedMap().GetTaggedObject())->GetValue(entry); return LinkedHashMap::Cast(map->GetLinkedMap().GetTaggedObject())->GetValue(entry);
} }
} // namespace panda::ecmascript } // namespace panda::ecmascript

View File

@ -30,15 +30,15 @@ public:
const JSHandle<JSTaggedValue> &value); const JSHandle<JSTaggedValue> &value);
static void Clear(JSThread *thread, const JSHandle<JSSharedMap> &map); static void Clear(JSThread *thread, const JSHandle<JSSharedMap> &map);
bool Has(JSThread *thread, JSTaggedValue key) const; static bool Has(JSThread *thread, const JSHandle<JSSharedMap> &map, JSTaggedValue key);
JSTaggedValue Get(JSThread *thread, JSTaggedValue key) const; static JSTaggedValue Get(JSThread *thread, const JSHandle<JSSharedMap> &map, JSTaggedValue key);
uint32_t GetSize(JSThread *thread) const; static uint32_t GetSize(JSThread *thread, const JSHandle<JSSharedMap> &map);
JSTaggedValue GetKey(JSThread *thread, uint32_t entry) const; static JSTaggedValue GetKey(JSThread *thread, const JSHandle<JSSharedMap> &map, uint32_t entry);
JSTaggedValue GetValue(JSThread *thread, uint32_t entry) const; static JSTaggedValue GetValue(JSThread *thread, const JSHandle<JSSharedMap> &map, uint32_t entry);
static constexpr size_t LINKED_MAP_OFFSET = JSObject::SIZE; static constexpr size_t LINKED_MAP_OFFSET = JSObject::SIZE;
ACCESSORS(LinkedMap, LINKED_MAP_OFFSET, MOD_RECORD_OFFSET) ACCESSORS(LinkedMap, LINKED_MAP_OFFSET, MOD_RECORD_OFFSET)

View File

@ -45,7 +45,7 @@ JSTaggedValue JSSharedMapIterator::NextInternal(JSThread *thread, JSHandle<JSTag
return JSIterator::CreateIterResultObject(thread, undefinedHandle, true).GetTaggedValue(); return JSIterator::CreateIterResultObject(thread, undefinedHandle, true).GetTaggedValue();
}; };
JSHandle<JSSharedMap> iteratedMap(thread, iter->GetIteratedMap()); JSHandle<JSSharedMap> iteratedMap(thread, iter->GetIteratedMap());
[[maybe_unused]] ConcurrentApiScope<JSSharedMap> scope(thread, *iteratedMap); [[maybe_unused]] ConcurrentApiScope<JSSharedMap> scope(thread, JSHandle<JSTaggedValue>::Cast(iteratedMap));
RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, JSTaggedValue::Exception()); RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, JSTaggedValue::Exception());
JSHandle<LinkedHashMap> map(thread, iteratedMap->GetLinkedMap()); JSHandle<LinkedHashMap> map(thread, iteratedMap->GetLinkedMap());

View File

@ -29,7 +29,7 @@ void JSSharedSet::Add(JSThread *thread, const JSHandle<JSSharedSet> &set, const
THROW_NEW_ERROR_AND_RETURN(thread, error); THROW_NEW_ERROR_AND_RETURN(thread, error);
} }
[[maybe_unused]] ConcurrentApiScope<JSSharedSet, ModType::WRITE> scope(thread, [[maybe_unused]] ConcurrentApiScope<JSSharedSet, ModType::WRITE> scope(thread,
set.GetTaggedValue().GetTaggedObject()); JSHandle<JSTaggedValue>::Cast(set));
RETURN_IF_ABRUPT_COMPLETION(thread); RETURN_IF_ABRUPT_COMPLETION(thread);
JSHandle<LinkedHashSet> setHandle(thread, LinkedHashSet::Cast(set->GetLinkedSet().GetTaggedObject())); JSHandle<LinkedHashSet> setHandle(thread, LinkedHashSet::Cast(set->GetLinkedSet().GetTaggedObject()));
@ -40,7 +40,7 @@ void JSSharedSet::Add(JSThread *thread, const JSHandle<JSSharedSet> &set, const
bool JSSharedSet::Delete(JSThread *thread, const JSHandle<JSSharedSet> &set, const JSHandle<JSTaggedValue> &value) bool JSSharedSet::Delete(JSThread *thread, const JSHandle<JSSharedSet> &set, const JSHandle<JSTaggedValue> &value)
{ {
[[maybe_unused]] ConcurrentApiScope<JSSharedSet, ModType::WRITE> scope(thread, [[maybe_unused]] ConcurrentApiScope<JSSharedSet, ModType::WRITE> scope(thread,
set.GetTaggedValue().GetTaggedObject()); JSHandle<JSTaggedValue>::Cast(set));
RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, false); RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, false);
JSHandle<LinkedHashSet> setHandle(thread, LinkedHashSet::Cast(set->GetLinkedSet().GetTaggedObject())); JSHandle<LinkedHashSet> setHandle(thread, LinkedHashSet::Cast(set->GetLinkedSet().GetTaggedObject()));
int entry = setHandle->FindElement(thread, value.GetTaggedValue()); int entry = setHandle->FindElement(thread, value.GetTaggedValue());
@ -54,33 +54,33 @@ bool JSSharedSet::Delete(JSThread *thread, const JSHandle<JSSharedSet> &set, con
void JSSharedSet::Clear(JSThread *thread, const JSHandle<JSSharedSet> &set) void JSSharedSet::Clear(JSThread *thread, const JSHandle<JSSharedSet> &set)
{ {
[[maybe_unused]] ConcurrentApiScope<JSSharedSet, ModType::WRITE> scope(thread, [[maybe_unused]] ConcurrentApiScope<JSSharedSet, ModType::WRITE> scope(thread,
set.GetTaggedValue().GetTaggedObject()); JSHandle<JSTaggedValue>::Cast(set));
RETURN_IF_ABRUPT_COMPLETION(thread); RETURN_IF_ABRUPT_COMPLETION(thread);
JSHandle<LinkedHashSet> setHandle(thread, LinkedHashSet::Cast(set->GetLinkedSet().GetTaggedObject())); JSHandle<LinkedHashSet> setHandle(thread, LinkedHashSet::Cast(set->GetLinkedSet().GetTaggedObject()));
JSHandle<LinkedHashSet> newSet = LinkedHashSet::Clear(thread, setHandle); JSHandle<LinkedHashSet> newSet = LinkedHashSet::Clear(thread, setHandle);
set->SetLinkedSet(thread, newSet); set->SetLinkedSet(thread, newSet);
} }
bool JSSharedSet::Has(JSThread *thread, JSTaggedValue value) const bool JSSharedSet::Has(JSThread *thread, const JSHandle<JSSharedSet> &set, JSTaggedValue value)
{ {
[[maybe_unused]] ConcurrentApiScope<JSSharedSet> scope(thread, reinterpret_cast<const TaggedObject*>(this)); [[maybe_unused]] ConcurrentApiScope<JSSharedSet> scope(thread, JSHandle<JSTaggedValue>::Cast(set));
RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, false); RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, false);
return LinkedHashSet::Cast(GetLinkedSet().GetTaggedObject())->Has(thread, value); return LinkedHashSet::Cast(set->GetLinkedSet().GetTaggedObject())->Has(thread, value);
} }
uint32_t JSSharedSet::GetSize(JSThread *thread) const uint32_t JSSharedSet::GetSize(JSThread *thread, const JSHandle<JSSharedSet> &set)
{ {
[[maybe_unused]] ConcurrentApiScope<JSSharedSet> scope(thread, reinterpret_cast<const TaggedObject*>(this)); [[maybe_unused]] ConcurrentApiScope<JSSharedSet> scope(thread, JSHandle<JSTaggedValue>::Cast(set));
RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, 0); RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, 0);
return LinkedHashSet::Cast(GetLinkedSet().GetTaggedObject())->NumberOfElements(); return LinkedHashSet::Cast(set->GetLinkedSet().GetTaggedObject())->NumberOfElements();
} }
JSTaggedValue JSSharedSet::GetValue(JSThread *thread, int entry) const JSTaggedValue JSSharedSet::GetValue(JSThread *thread, const JSHandle<JSSharedSet> &set, int entry)
{ {
[[maybe_unused]] ConcurrentApiScope<JSSharedSet> scope(thread, reinterpret_cast<const TaggedObject*>(this)); [[maybe_unused]] ConcurrentApiScope<JSSharedSet> scope(thread, JSHandle<JSTaggedValue>::Cast(set));
ASSERT_PRINT(entry >= 0 && static_cast<uint32_t>(entry) < GetSize(thread), ASSERT_PRINT(entry >= 0 && static_cast<uint32_t>(entry) < GetSize(thread, set),
"entry must be non-negative integer less than capacity"); "entry must be non-negative integer less than capacity");
RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, JSTaggedValue::Undefined()); RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, JSTaggedValue::Undefined());
return LinkedHashSet::Cast(GetLinkedSet().GetTaggedObject())->GetValue(entry); return LinkedHashSet::Cast(set->GetLinkedSet().GetTaggedObject())->GetValue(entry);
} }
} // namespace panda::ecmascript } // namespace panda::ecmascript

View File

@ -30,11 +30,11 @@ public:
static void Clear(JSThread *thread, const JSHandle<JSSharedSet> &set); static void Clear(JSThread *thread, const JSHandle<JSSharedSet> &set);
bool Has(JSThread *thread, JSTaggedValue value) const; static bool Has(JSThread *thread, const JSHandle<JSSharedSet> &set, JSTaggedValue value);
uint32_t GetSize(JSThread *thread) const; static uint32_t GetSize(JSThread *thread, const JSHandle<JSSharedSet> &set);
JSTaggedValue GetValue(JSThread *thread, int entry) const; static JSTaggedValue GetValue(JSThread *thread, const JSHandle<JSSharedSet> &set, int entry);
static constexpr size_t LINKED_SET_OFFSET = JSObject::SIZE; static constexpr size_t LINKED_SET_OFFSET = JSObject::SIZE;
ACCESSORS_SYNCHRONIZED(LinkedSet, LINKED_SET_OFFSET, MOD_RECORD_OFFSET) ACCESSORS_SYNCHRONIZED(LinkedSet, LINKED_SET_OFFSET, MOD_RECORD_OFFSET)

View File

@ -45,7 +45,7 @@ JSTaggedValue JSSharedSetIterator::NextInternal(JSThread *thread, JSHandle<JSTag
return JSIterator::CreateIterResultObject(thread, undefinedHandle, true).GetTaggedValue(); return JSIterator::CreateIterResultObject(thread, undefinedHandle, true).GetTaggedValue();
} }
JSHandle<JSSharedSet> iteratedSet(thread, iter->GetIteratedSet()); JSHandle<JSSharedSet> iteratedSet(thread, iter->GetIteratedSet());
[[maybe_unused]] ConcurrentApiScope<JSSharedSet> scope(thread, *iteratedSet); [[maybe_unused]] ConcurrentApiScope<JSSharedSet> scope(thread, JSHandle<JSTaggedValue>::Cast(iteratedSet));
RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, JSTaggedValue::Exception()); RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, JSTaggedValue::Exception());
JSHandle<LinkedHashSet> set(thread, iteratedSet->GetLinkedSet()); JSHandle<LinkedHashSet> set(thread, iteratedSet->GetLinkedSet());

View File

@ -1338,12 +1338,12 @@ void SnapshotProcessor::DeserializeString(uintptr_t stringBegin, uintptr_t strin
EcmaStringTable *stringTable = vm_->GetEcmaStringTable(); EcmaStringTable *stringTable = vm_->GetEcmaStringTable();
JSThread *thread = vm_->GetJSThread(); JSThread *thread = vm_->GetJSThread();
ASSERT(deserializeStringVector_.empty()); ASSERT(deserializeStringVector_.empty());
auto oldSpace = sHeap_->GetOldSpace();
auto hugeSpace = sHeap_->GetHugeObjectSpace(); auto hugeSpace = sHeap_->GetHugeObjectSpace();
auto globalConst = const_cast<GlobalEnvConstants *>(thread->GlobalConstants()); auto globalConst = const_cast<GlobalEnvConstants *>(thread->GlobalConstants());
auto lineStringClass = globalConst->GetLineStringClass(); auto lineStringClass = globalConst->GetLineStringClass();
auto constantStringClass = globalConst->GetConstantStringClass(); auto constantStringClass = globalConst->GetConstantStringClass();
while (stringBegin < stringEnd) { while (stringBegin < stringEnd) {
// str is from snapshot file, which is in native heap.
EcmaString *str = reinterpret_cast<EcmaString *>(stringBegin); EcmaString *str = reinterpret_cast<EcmaString *>(stringBegin);
int index = JSTaggedValue(*(reinterpret_cast<JSTaggedType *>(str))).GetInt(); int index = JSTaggedValue(*(reinterpret_cast<JSTaggedType *>(str))).GetInt();
if (index == 1) { if (index == 1) {
@ -1376,7 +1376,7 @@ void SnapshotProcessor::DeserializeString(uintptr_t stringBegin, uintptr_t strin
if (UNLIKELY(strSize > MAX_REGULAR_HEAP_OBJECT_SIZE)) { if (UNLIKELY(strSize > MAX_REGULAR_HEAP_OBJECT_SIZE)) {
newObj = hugeSpace->Allocate(thread, strSize); newObj = hugeSpace->Allocate(thread, strSize);
} else { } else {
newObj = oldSpace->Allocate(thread, strSize, false); newObj = sHeap_->GetOldSpace()->TryAllocateAndExpand(thread, strSize, true);
} }
if (newObj == 0) { if (newObj == 0) {
LOG_ECMA_MEM(FATAL) << "Snapshot Allocate OldSharedSpace OOM"; LOG_ECMA_MEM(FATAL) << "Snapshot Allocate OldSharedSpace OOM";

View File

@ -21,6 +21,7 @@
#include "ecmascript/common.h" #include "ecmascript/common.h"
#include "ecmascript/taskpool/runner.h" #include "ecmascript/taskpool/runner.h"
#include "ecmascript/platform/mutex.h" #include "ecmascript/platform/mutex.h"
#include "ecmascript/daemon/daemon_thread.h"
namespace panda::ecmascript { namespace panda::ecmascript {
class PUBLIC_API Taskpool { class PUBLIC_API Taskpool {
@ -63,6 +64,13 @@ public:
return runner_->IsInThreadPool(id); return runner_->IsInThreadPool(id);
} }
bool PUBLIC_API IsDaemonThreadOrInThreadPool(std::thread::id id) const
{
DaemonThread *dThread = DaemonThread::GetInstance();
return IsInThreadPool(id) || (dThread != nullptr
&& dThread->GetThreadId() == JSThread::GetCurrentThreadId());
}
void SetThreadPriority(PriorityMode mode) void SetThreadPriority(PriorityMode mode)
{ {
runner_->SetQosPriority(mode); runner_->SetQosPriority(mode);

View File

@ -174,6 +174,7 @@ HWTEST_F_L0(GCTest, NativeBindingCheckGCTest)
HWTEST_F_L0(GCTest, SharedGC) HWTEST_F_L0(GCTest, SharedGC)
{ {
#ifdef NDEBUG
constexpr size_t ALLOCATE_COUNT = 100; constexpr size_t ALLOCATE_COUNT = 100;
constexpr size_t ALLOCATE_SIZE = 512; constexpr size_t ALLOCATE_SIZE = 512;
ObjectFactory *factory = thread->GetEcmaVM()->GetFactory(); ObjectFactory *factory = thread->GetEcmaVM()->GetFactory();
@ -189,7 +190,57 @@ HWTEST_F_L0(GCTest, SharedGC)
size_t oldSizeBefore = sHeap->GetOldSpace()->GetHeapObjectSize(); size_t oldSizeBefore = sHeap->GetOldSpace()->GetHeapObjectSize();
EXPECT_TRUE(oldSizeBefore > oldSizebase); EXPECT_TRUE(oldSizeBefore > oldSizebase);
sHeap->CollectGarbage<TriggerGCType::SHARED_GC, GCReason::OTHER>(thread); sHeap->CollectGarbage<TriggerGCType::SHARED_GC, GCReason::OTHER>(thread);
sHeap->CollectGarbage<TriggerGCType::SHARED_GC, GCReason::OTHER>(thread); auto oldSizeAfter = sHeap->GetOldSpace()->GetHeapObjectSize();
EXPECT_TRUE(oldSizeBefore > oldSizeAfter);
EXPECT_EQ(oldSizebase, oldSizeAfter);
#endif
}
HWTEST_F_L0(GCTest, SharedFullGC)
{
constexpr size_t ALLOCATE_COUNT = 100;
constexpr size_t ALLOCATE_SIZE = 512;
ObjectFactory *factory = thread->GetEcmaVM()->GetFactory();
auto sHeap = SharedHeap::GetInstance();
sHeap->CollectGarbage<TriggerGCType::SHARED_FULL_GC, GCReason::OTHER>(thread);
auto oldSizebase = sHeap->GetOldSpace()->GetHeapObjectSize();
EXPECT_TRUE(oldSizebase > 0);
{
[[maybe_unused]] ecmascript::EcmaHandleScope baseScope(thread);
for (int i = 0; i < ALLOCATE_COUNT; i++) {
factory->NewSOldSpaceTaggedArray(ALLOCATE_SIZE, JSTaggedValue::Undefined());
}
}
size_t oldSizeBefore = sHeap->GetOldSpace()->GetHeapObjectSize();
EXPECT_TRUE(oldSizeBefore > oldSizebase);
EXPECT_TRUE(oldSizeBefore > TaggedArray::ComputeSize(JSTaggedValue::TaggedTypeSize(), ALLOCATE_SIZE));
sHeap->CollectGarbage<TriggerGCType::SHARED_FULL_GC, GCReason::OTHER>(thread);
auto oldSizeAfter = sHeap->GetOldSpace()->GetHeapObjectSize();
EXPECT_TRUE(oldSizeBefore > oldSizeAfter);
}
HWTEST_F_L0(GCTest, SharedFullGCInAppspawn)
{
constexpr size_t ALLOCATE_COUNT = 10;
constexpr size_t ALLOCATE_SIZE = 512;
ObjectFactory *factory = thread->GetEcmaVM()->GetFactory();
auto sHeap = SharedHeap::GetInstance();
sHeap->CompactHeapBeforeFork(thread);
EXPECT_TRUE(sHeap->GetOldSpace()->GetHeapObjectSize() == 0);
auto oldSizebase = sHeap->GetOldSpace()->GetHeapObjectSize();
{
[[maybe_unused]] ecmascript::EcmaHandleScope baseScope(thread);
for (int i = 0; i < ALLOCATE_COUNT; i++) {
factory->NewSOldSpaceTaggedArray(ALLOCATE_SIZE, JSTaggedValue::Undefined());
}
}
size_t oldSizeBefore = sHeap->GetOldSpace()->GetHeapObjectSize();
EXPECT_TRUE(oldSizeBefore > oldSizebase);
sHeap->CollectGarbage<TriggerGCType::SHARED_FULL_GC, GCReason::OTHER>(thread);
sHeap->GetAppSpawnSpace()->IterateOverMarkedObjects([](TaggedObject *object) {
Region *objectRegion = Region::ObjectAddressToRange(object);
EXPECT_TRUE(objectRegion->InSharedAppSpawnSpace());
});
auto oldSizeAfter = sHeap->GetOldSpace()->GetHeapObjectSize(); auto oldSizeAfter = sHeap->GetOldSpace()->GetHeapObjectSize();
EXPECT_TRUE(oldSizeBefore > oldSizeAfter); EXPECT_TRUE(oldSizeBefore > oldSizeAfter);
EXPECT_EQ(oldSizebase, oldSizeAfter); EXPECT_EQ(oldSizebase, oldSizeAfter);
@ -204,4 +255,23 @@ HWTEST_F_L0(GCTest, SharedGCSuspendAll)
} }
EXPECT_TRUE(thread->IsInRunningState()); EXPECT_TRUE(thread->IsInRunningState());
} }
HWTEST_F_L0(GCTest, SerializeGCCheck)
{
ObjectFactory *factory = thread->GetEcmaVM()->GetFactory();
{
[[maybe_unused]] ecmascript::EcmaHandleScope baseScope(thread);
JSHandle<EcmaString> key1(factory->NewFromASCII("error1"));
JSHandle<EcmaString> key2(factory->NewFromASCII("error2"));
JSHandle<EcmaString> msg(factory->NewFromASCII("this is error"));
std::vector<JSTaggedType> stringVec;
stringVec.push_back(reinterpret_cast<JSTaggedType>(key1.GetTaggedValue().GetTaggedObject()));
stringVec.push_back(reinterpret_cast<JSTaggedType>(key2.GetTaggedValue().GetTaggedObject()));
stringVec.push_back(reinterpret_cast<JSTaggedType>(msg.GetTaggedValue().GetTaggedObject()));
Runtime::GetInstance()->PushSerializationRoot(thread, stringVec);
}
auto sHeap = SharedHeap::GetInstance();
sHeap->CollectGarbage<TriggerGCType::SHARED_FULL_GC, GCReason::OTHER>(thread);
};
} // namespace panda::test } // namespace panda::test

View File

@ -81,4 +81,25 @@ HWTEST_F_L0(JSVerificationTest, VerifyHeapObjects)
VerifyObjectVisitor objVerifier(heap, &failCount); VerifyObjectVisitor objVerifier(heap, &failCount);
const_cast<SemiSpace *>(heap->GetNewSpace())->IterateOverObjects(objVerifier); // newspace reference the old space const_cast<SemiSpace *>(heap->GetNewSpace())->IterateOverObjects(objVerifier); // newspace reference the old space
} }
} // namespace panda::test
HWTEST_F_L0(JSVerificationTest, NoBarrierInternalAccessor)
{
auto ecmaVm = thread->GetEcmaVM();
auto heap = const_cast<Heap*>(ecmaVm->GetHeap());
auto objectFactory = ecmaVm->GetFactory();
EXPECT_EQ(heap->VerifyHeapObjects(), 0U);
size_t failCount = 0;
{
EcmaHandleScope handleScope(thread);
auto newArray = objectFactory->NewTaggedArray(5, JSTaggedValue::Undefined(), MemSpaceType::SEMI_SPACE);
newArray->Set<false>(thread, 0, thread->GlobalConstants()->GetFunctionNameAccessor());
newArray->Set<false>(thread, 1, thread->GlobalConstants()->GetFunctionPrototypeAccessor());
newArray->Set<false>(thread, 2, thread->GlobalConstants()->GetFunctionLengthAccessor());
newArray->Set<false>(thread, 3, thread->GlobalConstants()->GetArrayLengthAccessor());
newArray->Set<false>(thread, 4, thread->GlobalConstants()->GetSharedArrayLengthAccessor());
VerifyObjectVisitor(heap, &failCount, VerifyKind::VERIFY_MARK_YOUNG)(
newArray.GetTaggedValue().GetTaggedObject());
}
EXPECT_EQ(failCount, 0U);
}
} // namespace panda::test

View File

@ -134,6 +134,7 @@
panda::ecmascript::ClassHelper::MatchFieldType*; panda::ecmascript::ClassHelper::MatchFieldType*;
panda::ecmascript::ConvertToStdString*; panda::ecmascript::ConvertToStdString*;
panda::ecmascript::ConvertToString*; panda::ecmascript::ConvertToString*;
panda::ecmascript::DaemonThread::GetInstance*;
panda::ecmascript::Deoptimizier::ComputeShift*; panda::ecmascript::Deoptimizier::ComputeShift*;
panda::ecmascript::Deoptimizier::DisplayItems*; panda::ecmascript::Deoptimizier::DisplayItems*;
panda::ecmascript::Deoptimizier::EncodeDeoptVregIndex*; panda::ecmascript::Deoptimizier::EncodeDeoptVregIndex*;
@ -156,6 +157,7 @@
panda::ecmascript::EcmaStringAccessor::CreateLineString*; panda::ecmascript::EcmaStringAccessor::CreateLineString*;
panda::ecmascript::EcmaStringAccessor::EcmaStringAccessor*; panda::ecmascript::EcmaStringAccessor::EcmaStringAccessor*;
panda::ecmascript::EcmaStringAccessor::ToStdString*; panda::ecmascript::EcmaStringAccessor::ToStdString*;
panda::ecmascript::EcmaVM::CheckThread*;
panda::ecmascript::EcmaVM::GetGlobalEnv*; panda::ecmascript::EcmaVM::GetGlobalEnv*;
panda::ecmascript::EcmaVM::IsEnableElementsKind*; panda::ecmascript::EcmaVM::IsEnableElementsKind*;
panda::ecmascript::EcmaVM::IsEnablePGOProfiler*; panda::ecmascript::EcmaVM::IsEnablePGOProfiler*;