mirror of
https://gitee.com/openharmony/arkcompiler_ets_runtime
synced 2024-11-23 10:09:54 +00:00
!10142 setProcessRset标志位修正到jsthread
Merge pull request !10142 from jinjiawei/waitsharedgc
This commit is contained in:
commit
69816e2241
@ -810,7 +810,7 @@ bool JSThread::CheckSafepoint()
|
||||
heap->HandleExitHighSensitiveEvent();
|
||||
|
||||
// Do not trigger local gc during the shared gc processRset process.
|
||||
if (heap->IsProcessingRset()) {
|
||||
if (IsProcessingLocalToSharedRset()) {
|
||||
return false;
|
||||
}
|
||||
// After concurrent mark finish, should trigger gc here to avoid create much floating garbage
|
||||
|
@ -1334,6 +1334,16 @@ public:
|
||||
fullMarkRequest_ = false;
|
||||
}
|
||||
|
||||
void SetProcessingLocalToSharedRset(bool processing)
|
||||
{
|
||||
processingLocalToSharedRset_ = processing;
|
||||
}
|
||||
|
||||
bool IsProcessingLocalToSharedRset() const
|
||||
{
|
||||
return processingLocalToSharedRset_;
|
||||
}
|
||||
|
||||
inline bool IsThreadSafe() const
|
||||
{
|
||||
return IsMainThread() || HasSuspendRequest();
|
||||
@ -1615,6 +1625,8 @@ private:
|
||||
// Shared heap
|
||||
bool isMainThread_ {false};
|
||||
bool fullMarkRequest_ {false};
|
||||
// Shared heap collect local heap Rset
|
||||
bool processingLocalToSharedRset_ {false};
|
||||
|
||||
// { ElementsKind, (hclass, hclassWithProto) }
|
||||
CMap<ElementsKind, std::pair<ConstantIndex, ConstantIndex>> arrayHClassIndexMap_;
|
||||
|
@ -833,6 +833,10 @@ void Heap::ProcessSharedGCRSetWorkList()
|
||||
ASSERT(thread_->IsSharedConcurrentMarkingOrFinished());
|
||||
ASSERT(this == sharedGCData_.rSetWorkListHandler_->GetHeap());
|
||||
sHeap_->GetSharedGCMarker()->ProcessThenMergeBackRSetFromBoundJSThread(sharedGCData_.rSetWorkListHandler_);
|
||||
// The current thread may end earlier than the deamon thread.
|
||||
// To ensure the accuracy of the state range, set true is executed on js thread and deamon thread.
|
||||
// Reentrant does not cause exceptions because all the values are set to false.
|
||||
thread_->SetProcessingLocalToSharedRset(false);
|
||||
ASSERT(sharedGCData_.rSetWorkListHandler_ == nullptr);
|
||||
}
|
||||
}
|
||||
|
@ -1538,16 +1538,6 @@ public:
|
||||
return gcType_ == TriggerGCType::YOUNG_GC || gcType_ == TriggerGCType::EDEN_GC;
|
||||
}
|
||||
|
||||
bool IsProcessingRset() const
|
||||
{
|
||||
return isProcessingRset_;
|
||||
}
|
||||
|
||||
void SetProcessingRset(bool processing)
|
||||
{
|
||||
isProcessingRset_ = processing;
|
||||
}
|
||||
|
||||
void EnableEdenGC();
|
||||
|
||||
void TryEnableEdenGC();
|
||||
@ -1771,7 +1761,6 @@ private:
|
||||
bool fullMarkRequested_ {false};
|
||||
bool oldSpaceLimitAdjusted_ {false};
|
||||
bool enableIdleGC_ {false};
|
||||
bool isProcessingRset_ {false};
|
||||
std::atomic_bool isCSetClearing_ {false};
|
||||
HeapMode mode_ { HeapMode::NORMAL };
|
||||
|
||||
|
@ -23,7 +23,7 @@
|
||||
#include "ecmascript/mem/heap.h"
|
||||
|
||||
namespace panda::ecmascript {
|
||||
inline RSetWorkListHandler::RSetWorkListHandler(Heap *heap) : heap_(heap)
|
||||
inline RSetWorkListHandler::RSetWorkListHandler(Heap *heap, JSThread *thread) : heap_(heap), ownerThread_(thread)
|
||||
{
|
||||
CollectRSetItemsInHeap(heap);
|
||||
}
|
||||
@ -140,15 +140,6 @@ inline void RSetWorkListHandler::MergeBackForAllItem()
|
||||
}
|
||||
}
|
||||
|
||||
inline void RSetWorkListHandler::NotifyProcessRsetFinished()
|
||||
{
|
||||
LockHolder lock(mutex_);
|
||||
if (!processRsetFinished_) {
|
||||
heap_->SetProcessingRset(false);
|
||||
processRsetFinished_ = true;
|
||||
}
|
||||
}
|
||||
|
||||
inline bool RSetWorkListHandler::MergeBack()
|
||||
{
|
||||
ASSERT((JSThread::GetCurrent()->IsJSThread() && JSThread::GetCurrent()->IsInRunningState()) ||
|
||||
|
@ -40,7 +40,7 @@ private:
|
||||
|
||||
class RSetWorkListHandler {
|
||||
public:
|
||||
explicit RSetWorkListHandler(Heap *heap);
|
||||
explicit RSetWorkListHandler(Heap *heap, JSThread *thread);
|
||||
~RSetWorkListHandler() = default;
|
||||
|
||||
inline void Initialize();
|
||||
@ -62,7 +62,10 @@ public:
|
||||
|
||||
inline void MergeBackForAllItem();
|
||||
|
||||
inline void NotifyProcessRsetFinished();
|
||||
JSThread *GetOwnerThreadUnsafe() const
|
||||
{
|
||||
return ownerThread_;
|
||||
}
|
||||
|
||||
private:
|
||||
inline void CollectRSetItemsInHeap(const Heap *heap);
|
||||
@ -73,6 +76,8 @@ private:
|
||||
inline bool TryMergeBack();
|
||||
|
||||
Heap *heap_ {nullptr};
|
||||
// The thread is not guaranteed to be alive. The caller must ensure that the thread is alive.
|
||||
JSThread *ownerThread_ {nullptr};
|
||||
/**
|
||||
* This value represent whether there are some items to process, this is set to true in Initialize when collecting
|
||||
* the RSet in heap(call from daemon thread in SuspendAll), and use CAS to set to false when try to merge back and
|
||||
@ -85,7 +90,6 @@ private:
|
||||
* And thus WaitFinishedThenMergeBack should ONLY be called from the bound js thread in RUNNING state.
|
||||
*/
|
||||
bool initialized_ {false};
|
||||
bool processRsetFinished_ {false};
|
||||
std::vector<RSetItem> items_;
|
||||
std::atomic<int> nextItemIndex_ {-1};
|
||||
int remainItems_ {0};
|
||||
|
@ -173,7 +173,9 @@ inline void SharedGCMarkerBase::ProcessVisitorOfDoMark(uint32_t threadId)
|
||||
for (RSetWorkListHandler *handler : rSetHandlers_) {
|
||||
ECMA_BYTRACE_NAME(HITRACE_TAG_ARK, "SharedGCMarker::ProcessRSet");
|
||||
handler->ProcessAll(visitor);
|
||||
handler->NotifyProcessRsetFinished();
|
||||
// To ensure the accuracy of the state range, notify finished is executed on js thread and deamon thread.
|
||||
// Reentrant does not cause exceptions because all the values are set to false.
|
||||
NotifyThreadProcessRsetFinished(handler->GetOwnerThreadUnsafe());
|
||||
}
|
||||
}
|
||||
|
||||
@ -220,7 +222,25 @@ inline void SharedGCMarkerBase::ProcessThenMergeBackRSetFromBoundJSThread(RSetWo
|
||||
ASSERT(JSThread::GetCurrent()->IsInRunningState());
|
||||
ProcessVisitor(handler);
|
||||
handler->WaitFinishedThenMergeBack();
|
||||
handler->NotifyProcessRsetFinished();
|
||||
}
|
||||
|
||||
inline void SharedGCMarkerBase::NotifyThreadProcessRsetStart(JSThread *localThread)
|
||||
{
|
||||
// This method is called within the GCIterateThreadList method,
|
||||
// so the thread lock problem does not need to be considered.
|
||||
ASSERT(localThread != nullptr);
|
||||
localThread->SetProcessingLocalToSharedRset(true);
|
||||
}
|
||||
|
||||
inline void SharedGCMarkerBase::NotifyThreadProcessRsetFinished(JSThread *localThread)
|
||||
{
|
||||
// The localThread may have been released or reused.
|
||||
Runtime::GetInstance()->GCIterateThreadList([localThread](JSThread *thread) {
|
||||
if (localThread == thread) {
|
||||
thread->SetProcessingLocalToSharedRset(false);
|
||||
return;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
void SharedGCMovableMarker::MarkObject(uint32_t threadId, TaggedObject *object, ObjectSlot &slot)
|
||||
|
@ -66,9 +66,9 @@ void SharedGCMarkerBase::CollectLocalVMRSet(EcmaVM *localVm)
|
||||
{
|
||||
ECMA_BYTRACE_NAME(HITRACE_TAG_ARK, "SharedGCMarkerBase::CollectLocalVMRSet");
|
||||
Heap *heap = const_cast<Heap*>(localVm->GetHeap());
|
||||
RSetWorkListHandler *handler = new RSetWorkListHandler(heap);
|
||||
RSetWorkListHandler *handler = new RSetWorkListHandler(heap, localVm->GetJSThreadNoCheck());
|
||||
heap->SetRSetWorkListHandler(handler);
|
||||
heap->SetProcessingRset(true);
|
||||
NotifyThreadProcessRsetStart(handler->GetOwnerThreadUnsafe());
|
||||
rSetHandlers_.emplace_back(handler);
|
||||
}
|
||||
|
||||
|
@ -91,6 +91,11 @@ protected:
|
||||
SharedGCWorkManager *sWorkManager_ {nullptr};
|
||||
|
||||
private:
|
||||
// This method is called within the GCIterateThreadList method,
|
||||
// so the thread lock problem does not need to be considered.
|
||||
inline void NotifyThreadProcessRsetStart(JSThread *localThread);
|
||||
inline void NotifyThreadProcessRsetFinished(JSThread *localThread);
|
||||
|
||||
template<SharedMarkType markType>
|
||||
inline auto GenerateRSetVisitor(uint32_t threadId);
|
||||
inline void RecordObject(JSTaggedValue value, uint32_t threadId, void *mem);
|
||||
|
Loading…
Reference in New Issue
Block a user