mirror of
https://gitee.com/openharmony/arkcompiler_ets_runtime
synced 2025-02-17 02:10:00 +00:00
!9355 Bugfix of Wrong Disposal of Weak Reference During Partial GC
Merge pull request !9355 from 张博虓/bugfix_weakreference
This commit is contained in:
commit
afaffae213
@ -318,8 +318,13 @@ void ParallelEvacuator::SetObjectRSet(ObjectSlot slot, Region *region)
|
||||
region->InsertOldToNewRSet(slot.SlotAddress());
|
||||
} else if (valueRegion->InSharedSweepableSpace()) {
|
||||
region->InsertLocalToShareRSet(slot.SlotAddress());
|
||||
} else if (valueRegion->InCollectSet() || JSTaggedValue(value).IsWeakForHeapObject()) {
|
||||
} else if (valueRegion->InCollectSet()) {
|
||||
region->InsertCrossRegionRSet(slot.SlotAddress());
|
||||
} else if (JSTaggedValue(value).IsWeakForHeapObject()) {
|
||||
if (heap_->IsConcurrentFullMark() && !valueRegion->InSharedHeap() &&
|
||||
(valueRegion->GetMarkGCBitset() == nullptr || !valueRegion->Test(value))) {
|
||||
slot.Clear();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -94,14 +94,18 @@ void ParallelEvacuator::EvacuateSpace()
|
||||
if (heap_->IsParallelGCEnabled()) {
|
||||
LockHolder holder(mutex_);
|
||||
parallel_ = CalculateEvacuationThreadNum();
|
||||
for (int i = 0; i < parallel_; i++) {
|
||||
ASSERT(parallel_ >= 0);
|
||||
evacuateTaskNum_ = static_cast<uint32_t>(parallel_);
|
||||
for (uint32_t i = 1; i <= evacuateTaskNum_; i++) {
|
||||
Taskpool::GetCurrentTaskpool()->PostTask(
|
||||
std::make_unique<EvacuationTask>(heap_->GetJSThread()->GetThreadId(), this));
|
||||
std::make_unique<EvacuationTask>(heap_->GetJSThread()->GetThreadId(), i, this));
|
||||
}
|
||||
} else {
|
||||
evacuateTaskNum_ = 0;
|
||||
}
|
||||
{
|
||||
GCStats::Scope sp2(GCStats::Scope::ScopeId::EvacuateRegion, heap_->GetEcmaVM()->GetEcmaGCStats());
|
||||
EvacuateSpace(allocator_, MAIN_THREAD_INDEX, true);
|
||||
EvacuateSpace(allocator_, MAIN_THREAD_INDEX, 0, true);
|
||||
}
|
||||
|
||||
{
|
||||
@ -114,8 +118,10 @@ void ParallelEvacuator::EvacuateSpace()
|
||||
}
|
||||
}
|
||||
|
||||
bool ParallelEvacuator::EvacuateSpace(TlabAllocator *allocator, uint32_t threadIndex, bool isMain)
|
||||
bool ParallelEvacuator::EvacuateSpace(TlabAllocator *allocator, uint32_t threadIndex, uint32_t idOrder, bool isMain)
|
||||
{
|
||||
UpdateRecordWeakReferenceInParallel(idOrder);
|
||||
|
||||
auto &arrayTrackInfoSet = ArrayTrackInfoSet(threadIndex);
|
||||
DrainWorkloads(evacuateWorkloadSet_, [&](std::unique_ptr<Workload> ®ion) {
|
||||
EvacuateRegion(allocator, region->GetRegion(), arrayTrackInfoSet);
|
||||
@ -130,6 +136,30 @@ bool ParallelEvacuator::EvacuateSpace(TlabAllocator *allocator, uint32_t threadI
|
||||
return true;
|
||||
}
|
||||
|
||||
void ParallelEvacuator::UpdateRecordWeakReferenceInParallel(uint32_t idOrder)
|
||||
{
|
||||
auto totalThreadCount = Taskpool::GetCurrentTaskpool()->GetTotalThreadNum() + 1;
|
||||
for (uint32_t i = idOrder; i < totalThreadCount; i += (evacuateTaskNum_ + 1)) {
|
||||
ProcessQueue *queue = heap_->GetWorkManager()->GetWeakReferenceQueue(i);
|
||||
while (true) {
|
||||
auto obj = queue->PopBack();
|
||||
if (UNLIKELY(obj == nullptr)) {
|
||||
break;
|
||||
}
|
||||
ObjectSlot slot(ToUintPtr(obj));
|
||||
JSTaggedType value = slot.GetTaggedType();
|
||||
if (JSTaggedValue(value).IsWeak()) {
|
||||
ASSERT(heap_->IsConcurrentFullMark());
|
||||
Region *objectRegion = Region::ObjectAddressToRange(value);
|
||||
if (!objectRegion->InGeneralNewSpaceOrCSet() && !objectRegion->InSharedHeap() &&
|
||||
(objectRegion->GetMarkGCBitset() == nullptr || !objectRegion->Test(value))) {
|
||||
slot.Clear();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void ParallelEvacuator::EvacuateRegion(TlabAllocator *allocator, Region *region,
|
||||
std::unordered_set<JSTaggedType> &trackSet)
|
||||
{
|
||||
@ -258,7 +288,7 @@ void ParallelEvacuator::UpdateReference()
|
||||
{
|
||||
GCStats::Scope sp2(GCStats::Scope::ScopeId::UpdateWeekRef, heap_->GetEcmaVM()->GetEcmaGCStats());
|
||||
if (heap_->IsEdenMark()) {
|
||||
UpdateWeakReference();
|
||||
UpdateWeakReferenceOpt<TriggerGCType::EDEN_GC>();
|
||||
} else if (heap_->IsYoungMark()) {
|
||||
UpdateWeakReferenceOpt<TriggerGCType::YOUNG_GC>();
|
||||
} else {
|
||||
@ -368,37 +398,24 @@ void ParallelEvacuator::UpdateWeakReference()
|
||||
heap_->GetEcmaVM()->GetJSThread()->UpdateJitCodeMapReference(gcUpdateWeak);
|
||||
}
|
||||
|
||||
template<TriggerGCType gcType>
|
||||
void ParallelEvacuator::UpdateRecordWeakReferenceOpt()
|
||||
{
|
||||
auto totalThreadCount = Taskpool::GetCurrentTaskpool()->GetTotalThreadNum() + 1;
|
||||
for (uint32_t i = 0; i < totalThreadCount; i++) {
|
||||
ProcessQueue *queue = heap_->GetWorkManager()->GetWeakReferenceQueue(i);
|
||||
|
||||
while (true) {
|
||||
auto obj = queue->PopBack();
|
||||
if (UNLIKELY(obj == nullptr)) {
|
||||
break;
|
||||
}
|
||||
ObjectSlot slot(ToUintPtr(obj));
|
||||
JSTaggedValue value(slot.GetTaggedType());
|
||||
if (value.IsHeapObject()) {
|
||||
UpdateWeakObjectSlotOpt<gcType>(value, slot);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template<TriggerGCType gcType>
|
||||
void ParallelEvacuator::UpdateWeakReferenceOpt()
|
||||
{
|
||||
MEM_ALLOCATE_AND_GC_TRACE(heap_->GetEcmaVM(), UpdateWeakReference);
|
||||
ECMA_BYTRACE_NAME(HITRACE_TAG_ARK, "GC::UpdateWeakReference");
|
||||
UpdateRecordWeakReferenceOpt<gcType>();
|
||||
WeakRootVisitor gcUpdateWeak = [](TaggedObject *header) -> TaggedObject* {
|
||||
Region *objectRegion = Region::ObjectAddressToRange(reinterpret_cast<TaggedObject *>(header));
|
||||
ASSERT(objectRegion != nullptr);
|
||||
if constexpr (gcType == TriggerGCType::YOUNG_GC) {
|
||||
if constexpr (gcType == TriggerGCType::EDEN_GC) {
|
||||
if (!objectRegion->InEdenSpace()) {
|
||||
return header;
|
||||
}
|
||||
MarkWord markWord(header);
|
||||
if (markWord.IsForwardingAddress()) {
|
||||
return markWord.ToForwardingAddress();
|
||||
}
|
||||
return nullptr;
|
||||
} else if constexpr (gcType == TriggerGCType::YOUNG_GC) {
|
||||
if (!objectRegion->InGeneralNewSpace()) {
|
||||
return header;
|
||||
}
|
||||
@ -451,7 +468,6 @@ void ParallelEvacuator::UpdateRSet(Region *region)
|
||||
}
|
||||
region->IterateAllOldToNewBits(cb);
|
||||
if (heap_->IsYoungMark()) {
|
||||
region->DeleteCrossRegionRSet();
|
||||
return;
|
||||
}
|
||||
if constexpr (IsEdenGC) {
|
||||
@ -462,7 +478,10 @@ void ParallelEvacuator::UpdateRSet(Region *region)
|
||||
} else {
|
||||
region->IterateAllCrossRegionBits([this](void *mem) {
|
||||
ObjectSlot slot(ToUintPtr(mem));
|
||||
UpdateObjectSlotOpt<TriggerGCType::OLD_GC>(slot);
|
||||
JSTaggedType value = slot.GetTaggedType();
|
||||
if (JSTaggedValue(value).IsHeapObject() && Region::ObjectAddressToRange(value)->InCollectSet()) {
|
||||
UpdateObjectSlotOpt<TriggerGCType::OLD_GC>(slot);
|
||||
}
|
||||
});
|
||||
}
|
||||
region->DeleteCrossRegionRSet();
|
||||
@ -659,8 +678,9 @@ void ParallelEvacuator::WorkloadSet::Clear()
|
||||
indexCursor_.store(0, std::memory_order_relaxed);
|
||||
remainingWorkloadNum_.store(0, std::memory_order_relaxed);
|
||||
}
|
||||
ParallelEvacuator::EvacuationTask::EvacuationTask(int32_t id, ParallelEvacuator *evacuator)
|
||||
: Task(id), evacuator_(evacuator)
|
||||
|
||||
ParallelEvacuator::EvacuationTask::EvacuationTask(int32_t id, uint32_t idOrder, ParallelEvacuator *evacuator)
|
||||
: Task(id), idOrder_(idOrder), evacuator_(evacuator)
|
||||
{
|
||||
allocator_ = new TlabAllocator(evacuator->heap_);
|
||||
}
|
||||
@ -672,7 +692,7 @@ ParallelEvacuator::EvacuationTask::~EvacuationTask()
|
||||
|
||||
bool ParallelEvacuator::EvacuationTask::Run(uint32_t threadIndex)
|
||||
{
|
||||
return evacuator_->EvacuateSpace(allocator_, threadIndex);
|
||||
return evacuator_->EvacuateSpace(allocator_, threadIndex, idOrder_);
|
||||
}
|
||||
|
||||
bool ParallelEvacuator::UpdateReferenceTask::Run([[maybe_unused]] uint32_t threadIndex)
|
||||
|
@ -51,7 +51,7 @@ public:
|
||||
private:
|
||||
class EvacuationTask : public Task {
|
||||
public:
|
||||
EvacuationTask(int32_t id, ParallelEvacuator *evacuator);
|
||||
EvacuationTask(int32_t id, uint32_t idOrder, ParallelEvacuator *evacuator);
|
||||
~EvacuationTask() override;
|
||||
bool Run(uint32_t threadIndex) override;
|
||||
|
||||
@ -59,6 +59,7 @@ private:
|
||||
NO_MOVE_SEMANTIC(EvacuationTask);
|
||||
|
||||
private:
|
||||
uint32_t idOrder_;
|
||||
ParallelEvacuator *evacuator_;
|
||||
TlabAllocator *allocator_ {nullptr};
|
||||
};
|
||||
@ -182,7 +183,8 @@ private:
|
||||
bool ProcessWorkloads(bool isMain = false);
|
||||
|
||||
void EvacuateSpace();
|
||||
bool EvacuateSpace(TlabAllocator *allocation, uint32_t threadIndex, bool isMain = false);
|
||||
bool EvacuateSpace(TlabAllocator *allocation, uint32_t threadIndex, uint32_t idOrder, bool isMain = false);
|
||||
void UpdateRecordWeakReferenceInParallel(uint32_t idOrder);
|
||||
void EvacuateRegion(TlabAllocator *allocator, Region *region, std::unordered_set<JSTaggedType> &trackSet);
|
||||
template<bool SetEdenObject>
|
||||
inline void SetObjectFieldRSet(TaggedObject *object, JSHClass *cls);
|
||||
@ -203,8 +205,6 @@ private:
|
||||
void UpdateRecordWeakReference();
|
||||
template<TriggerGCType gcType>
|
||||
void UpdateWeakReferenceOpt();
|
||||
template<TriggerGCType gcType>
|
||||
void UpdateRecordWeakReferenceOpt();
|
||||
template<bool IsEdenGC>
|
||||
void UpdateRSet(Region *region);
|
||||
void UpdateNewToEdenRSetReference(Region *region);
|
||||
@ -237,6 +237,7 @@ private:
|
||||
|
||||
uintptr_t waterLine_ = 0;
|
||||
std::unordered_set<JSTaggedType> arrayTrackInfoSets_[MAX_TASKPOOL_THREAD_NUM + 1];
|
||||
uint32_t evacuateTaskNum_ = 0;
|
||||
std::atomic_int parallel_ = 0;
|
||||
Mutex mutex_;
|
||||
ConditionVariable condition_;
|
||||
|
@ -313,4 +313,73 @@ HWTEST_F_L0(GCTest, Destroy)
|
||||
heap = nullptr;
|
||||
};
|
||||
|
||||
HWTEST_F_L0(GCTest, WeakRefTest)
|
||||
{
|
||||
ObjectFactory *factory = thread->GetEcmaVM()->GetFactory();
|
||||
std::vector<JSHandle<TaggedArray>> srcArrayHandleRecord;
|
||||
std::vector<JSHandle<TaggedArray>> dstOldArrayHandleRecord;
|
||||
std::vector<JSHandle<TaggedArray>> dstNewArrayHandleRecord;
|
||||
for (int i = 0; i < 1024; i++) {
|
||||
JSHandle<TaggedArray> arrayHandle = factory->NewTaggedArray(64, JSTaggedValue::True(),
|
||||
MemSpaceType::OLD_SPACE);
|
||||
srcArrayHandleRecord.emplace_back(arrayHandle);
|
||||
}
|
||||
for (int i = 0; i < 1024; i++) {
|
||||
JSHandle<TaggedArray> arrayHandle = factory->NewTaggedArray(128, JSTaggedValue::True(),
|
||||
MemSpaceType::OLD_SPACE);
|
||||
dstOldArrayHandleRecord.emplace_back(arrayHandle);
|
||||
}
|
||||
for (int i = 0; i < 1024; i++) {
|
||||
JSHandle<TaggedArray> arrayHandle = factory->NewTaggedArray(128, JSTaggedValue::True(),
|
||||
MemSpaceType::SEMI_SPACE);
|
||||
dstNewArrayHandleRecord.emplace_back(arrayHandle);
|
||||
}
|
||||
for (auto it : srcArrayHandleRecord) {
|
||||
uint32_t countIndex = 0;
|
||||
std::random_device randomDevice;
|
||||
std::shuffle(dstOldArrayHandleRecord.begin(), dstOldArrayHandleRecord.end(), std::mt19937(randomDevice()));
|
||||
for (auto it2 : dstOldArrayHandleRecord) {
|
||||
if (Region::ObjectAddressToRange(it2.GetTaggedValue().GetTaggedObject())->InGeneralOldSpace()) {
|
||||
JSTaggedValue valueWeak = it2.GetTaggedValue().CreateAndGetWeakRef();
|
||||
it->Set<true>(thread, countIndex, valueWeak);
|
||||
if (++countIndex >= 40) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
auto heap = const_cast<Heap *>(thread->GetEcmaVM()->GetHeap());
|
||||
heap->SetMarkType(MarkType::MARK_FULL);
|
||||
auto concurrentMarker = heap->GetConcurrentMarker();
|
||||
concurrentMarker->Mark();
|
||||
std::this_thread::sleep_for(std::chrono::seconds(5));
|
||||
for (auto it : srcArrayHandleRecord) {
|
||||
uint32_t countIndex = 0;
|
||||
std::random_device randomDevice;
|
||||
std::shuffle(dstNewArrayHandleRecord.begin(), dstNewArrayHandleRecord.end(), std::mt19937(randomDevice()));
|
||||
for (auto it2 : dstNewArrayHandleRecord) {
|
||||
if (Region::ObjectAddressToRange(it2.GetTaggedValue().GetTaggedObject())->InGeneralNewSpace()) {
|
||||
JSTaggedValue valueWeak = it2.GetTaggedValue().CreateAndGetWeakRef();
|
||||
it->Set<true>(thread, countIndex, valueWeak);
|
||||
if (++countIndex >= 40) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
auto partialGc = heap->GetPartialGC();
|
||||
partialGc->RunPhases();
|
||||
for (auto it : dstOldArrayHandleRecord) {
|
||||
EXPECT_TRUE(it.GetTaggedValue() != JSTaggedValue::Undefined());
|
||||
}
|
||||
for (auto it : dstNewArrayHandleRecord) {
|
||||
EXPECT_TRUE(it.GetTaggedValue() != JSTaggedValue::Undefined());
|
||||
}
|
||||
for (auto it : srcArrayHandleRecord) {
|
||||
for (uint32_t i = 0; i < 40; i++) {
|
||||
EXPECT_TRUE(it->Get(i) != JSTaggedValue::Undefined());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace panda::test
|
||||
|
Loading…
x
Reference in New Issue
Block a user