mirror of
https://gitee.com/openharmony/arkcompiler_ets_runtime
synced 2024-11-27 04:00:37 +00:00
change mutex based on pthread_mutex
Signed-off-by: xiongluo <xiongluo@huawei.com> Change-Id: I232c1378a20dcef26e61cee76b7b40cccc5b5e9f
This commit is contained in:
parent
4906107bce
commit
c7ab7dddd8
5
BUILD.gn
5
BUILD.gn
@ -834,7 +834,10 @@ ecma_profiler_source += [
|
||||
|
||||
ecma_platform_source = []
|
||||
|
||||
ecma_platform_source += [ "ecmascript/platform/common/map.cpp" ]
|
||||
ecma_platform_source += [
|
||||
"ecmascript/platform/common/map.cpp",
|
||||
"ecmascript/platform/common/mutex.cpp",
|
||||
]
|
||||
|
||||
config("include_llvm") {
|
||||
if (compile_llvm_online) {
|
||||
|
@ -38,7 +38,7 @@ void AnFileDataManager::DestroyFileMapMem(MemMap &fileMapMem)
|
||||
|
||||
void AnFileDataManager::SafeDestroyAllData()
|
||||
{
|
||||
os::memory::WriteLockHolder lock(lock_);
|
||||
WriteLockHolder lock(lock_);
|
||||
if (loadedStub_ != nullptr) {
|
||||
ExecutedMemoryAllocator::DestroyBuf(loadedStub_->GetStubsMem());
|
||||
loadedStub_ = nullptr;
|
||||
@ -53,7 +53,7 @@ void AnFileDataManager::SafeDestroyAllData()
|
||||
|
||||
void AnFileDataManager::SafeDestroyAnData(const std::string &fileName)
|
||||
{
|
||||
os::memory::WriteLockHolder lock(lock_);
|
||||
WriteLockHolder lock(lock_);
|
||||
std::string anBasename = JSFilePath::GetBaseName(fileName);
|
||||
auto index = UnSafeGetFileInfoIndex(anBasename);
|
||||
if (index == INVALID_INDEX) {
|
||||
@ -65,7 +65,7 @@ void AnFileDataManager::SafeDestroyAnData(const std::string &fileName)
|
||||
|
||||
bool AnFileDataManager::SafeLoad(const std::string &fileName, Type type)
|
||||
{
|
||||
os::memory::WriteLockHolder lock(lock_);
|
||||
WriteLockHolder lock(lock_);
|
||||
if (type == Type::STUB) {
|
||||
if (loadedStub_ != nullptr) {
|
||||
return true;
|
||||
@ -133,19 +133,19 @@ uint32_t AnFileDataManager::UnSafeGetFileInfoIndex(const std::string &fileName)
|
||||
|
||||
uint32_t AnFileDataManager::SafeGetFileInfoIndex(const std::string &fileName)
|
||||
{
|
||||
os::memory::ReadLockHolder lock(lock_);
|
||||
ReadLockHolder lock(lock_);
|
||||
return UnSafeGetFileInfoIndex(fileName);
|
||||
}
|
||||
|
||||
std::shared_ptr<AnFileInfo> AnFileDataManager::SafeGetAnFileInfo(uint32_t index)
|
||||
{
|
||||
os::memory::ReadLockHolder lock(lock_);
|
||||
ReadLockHolder lock(lock_);
|
||||
return UnSafeGetAnFileInfo(index);
|
||||
}
|
||||
|
||||
std::shared_ptr<StubFileInfo> AnFileDataManager::SafeGetStubFileInfo()
|
||||
{
|
||||
os::memory::ReadLockHolder lock(lock_);
|
||||
ReadLockHolder lock(lock_);
|
||||
return loadedStub_;
|
||||
}
|
||||
|
||||
@ -165,7 +165,7 @@ bool AnFileDataManager::SafeTryReadLock()
|
||||
|
||||
bool AnFileDataManager::SafeInsideStub(uintptr_t pc)
|
||||
{
|
||||
os::memory::ReadLockHolder lock(lock_);
|
||||
ReadLockHolder lock(lock_);
|
||||
if (loadedStub_ == nullptr) {
|
||||
LOG_COMPILER(ERROR) << "SafeInsideStub: The stub file is not loaded.";
|
||||
return false;
|
||||
@ -189,7 +189,7 @@ bool AnFileDataManager::SafeInsideStub(uintptr_t pc)
|
||||
|
||||
bool AnFileDataManager::SafeInsideAOT(uintptr_t pc)
|
||||
{
|
||||
os::memory::ReadLockHolder lock(lock_);
|
||||
ReadLockHolder lock(lock_);
|
||||
for (auto &info : loadedAn_) {
|
||||
const std::vector<ModuleSectionDes> &des = info->GetCodeUnits();
|
||||
for (const auto &curDes : des) {
|
||||
@ -203,7 +203,7 @@ bool AnFileDataManager::SafeInsideAOT(uintptr_t pc)
|
||||
|
||||
AOTFileInfo::CallSiteInfo AnFileDataManager::SafeCalCallSiteInfo(uintptr_t retAddr)
|
||||
{
|
||||
os::memory::ReadLockHolder lock(lock_);
|
||||
ReadLockHolder lock(lock_);
|
||||
AOTFileInfo::CallSiteInfo callsiteInfo;
|
||||
|
||||
bool ans = false;
|
||||
|
@ -76,7 +76,7 @@ private:
|
||||
return loadedAn_.at(index);
|
||||
}
|
||||
|
||||
os::memory::RWLock lock_ {};
|
||||
RWLock lock_ {};
|
||||
std::unordered_map<std::string, uint32_t> anFileNameToIndexMap_ {};
|
||||
std::vector<std::shared_ptr<AnFileInfo>> loadedAn_ {};
|
||||
std::shared_ptr<StubFileInfo> loadedStub_ {nullptr};
|
||||
|
@ -28,7 +28,7 @@
|
||||
#include "ecmascript/taskpool/taskpool.h"
|
||||
|
||||
namespace panda::ecmascript {
|
||||
os::memory::Mutex CpuProfiler::synchronizationMutex_;
|
||||
Mutex CpuProfiler::synchronizationMutex_;
|
||||
CMap<pthread_t, const EcmaVM *> CpuProfiler::profilerMap_ = CMap<pthread_t, const EcmaVM *>();
|
||||
CpuProfiler::CpuProfiler(const EcmaVM *vm, const int interval) : vm_(vm), interval_(interval)
|
||||
{
|
||||
@ -71,7 +71,7 @@ void CpuProfiler::StartCpuProfilerForInfo()
|
||||
}
|
||||
tid_ = static_cast<pthread_t>(syscall(SYS_gettid));
|
||||
{
|
||||
os::memory::LockHolder lock(synchronizationMutex_);
|
||||
LockHolder lock(synchronizationMutex_);
|
||||
profilerMap_[tid_] = vm_;
|
||||
}
|
||||
|
||||
@ -133,7 +133,7 @@ void CpuProfiler::StartCpuProfilerForFile(const std::string &fileName)
|
||||
}
|
||||
tid_ = static_cast<pthread_t>(syscall(SYS_gettid));
|
||||
{
|
||||
os::memory::LockHolder lock(synchronizationMutex_);
|
||||
LockHolder lock(synchronizationMutex_);
|
||||
profilerMap_[tid_] = vm_;
|
||||
}
|
||||
outToFile_ = true;
|
||||
@ -388,7 +388,7 @@ void CpuProfiler::GetStackSignalHandler(int signal, [[maybe_unused]] siginfo_t *
|
||||
CpuProfiler *profiler = nullptr;
|
||||
JSThread *thread = nullptr;
|
||||
{
|
||||
os::memory::LockHolder lock(synchronizationMutex_);
|
||||
LockHolder lock(synchronizationMutex_);
|
||||
pthread_t tid = static_cast<pthread_t>(syscall(SYS_gettid));
|
||||
const EcmaVM *vm = profilerMap_[tid];
|
||||
if (vm == nullptr) {
|
||||
|
@ -98,7 +98,7 @@ public:
|
||||
|
||||
static CMap<pthread_t, const EcmaVM *> profilerMap_;
|
||||
private:
|
||||
static os::memory::Mutex synchronizationMutex_;
|
||||
static Mutex synchronizationMutex_;
|
||||
|
||||
void GetStack(FrameIterator &it);
|
||||
static uint64_t GetPcFromContext(void *context);
|
||||
|
@ -801,7 +801,7 @@ void SamplesRecord::TranslateUrlPositionBySourceMap(struct FrameInfo &codeEntry)
|
||||
void SamplesQueue::PostFrame(FrameInfoTemp *frameInfoTemps, MethodKey *frameStack,
|
||||
int frameInfoTempsLength, int frameStackLength)
|
||||
{
|
||||
os::memory::LockHolder holder(mtx_);
|
||||
LockHolder holder(mtx_);
|
||||
if (!IsFull()) {
|
||||
// frameInfoTemps
|
||||
for (int i = 0; i < frameInfoTempsLength; i++) {
|
||||
@ -836,7 +836,7 @@ void SamplesQueue::PostFrame(FrameInfoTemp *frameInfoTemps, MethodKey *frameStac
|
||||
void SamplesQueue::PostNapiFrame(CVector<FrameInfoTemp> &napiFrameInfoTemps,
|
||||
CVector<MethodKey> &napiFrameStack)
|
||||
{
|
||||
os::memory::LockHolder holder(mtx_);
|
||||
LockHolder holder(mtx_);
|
||||
if (!IsFull()) {
|
||||
size_t frameInfoTempsLength = napiFrameInfoTemps.size();
|
||||
size_t frameStackLength = napiFrameStack.size();
|
||||
@ -873,7 +873,7 @@ void SamplesQueue::PostNapiFrame(CVector<FrameInfoTemp> &napiFrameInfoTemps,
|
||||
|
||||
FrameStackAndInfo *SamplesQueue::PopFrame()
|
||||
{
|
||||
os::memory::LockHolder holder(mtx_);
|
||||
LockHolder holder(mtx_);
|
||||
if (!IsEmpty()) {
|
||||
FrameStackAndInfo *frame = &frames_[front_];
|
||||
front_ = (front_ + 1) % QUEUE_CAPACITY;
|
||||
|
@ -26,7 +26,7 @@
|
||||
#include "ecmascript/js_thread.h"
|
||||
#include "ecmascript/jspandafile/method_literal.h"
|
||||
#include "ecmascript/mem/c_containers.h"
|
||||
#include "libpandabase/os/mutex.h"
|
||||
#include "ecmascript/platform/mutex.h"
|
||||
|
||||
namespace panda::ecmascript {
|
||||
const int MAX_STACK_SIZE = 128; // 128:the maximum size of the js stack
|
||||
@ -108,7 +108,7 @@ private:
|
||||
FrameStackAndInfo frames_[QUEUE_CAPACITY] = {};
|
||||
int front_ = 0;
|
||||
int rear_ = 0;
|
||||
os::memory::Mutex mtx_;
|
||||
Mutex mtx_;
|
||||
};
|
||||
|
||||
class SamplesRecord {
|
||||
|
@ -110,7 +110,7 @@ void HeapProfiler::AllocationEvent(TaggedObject *address, size_t size)
|
||||
|
||||
void HeapProfiler::MoveEvent(uintptr_t address, TaggedObject *forwardAddress, size_t size)
|
||||
{
|
||||
os::memory::LockHolder lock(mutex_);
|
||||
LockHolder lock(mutex_);
|
||||
if (isProfiling_) {
|
||||
entryIdMap_->Move(address, reinterpret_cast<Address>(forwardAddress));
|
||||
if (heapTracker_ != nullptr) {
|
||||
|
@ -124,7 +124,7 @@ private:
|
||||
std::unique_ptr<HeapTracker> heapTracker_;
|
||||
Chunk chunk_;
|
||||
std::unique_ptr<HeapSampling> heapSampling_ {nullptr};
|
||||
os::memory::Mutex mutex_;
|
||||
Mutex mutex_;
|
||||
};
|
||||
} // namespace panda::ecmascript
|
||||
#endif // ECMASCRIPT_DFX_HPROF_HEAP_PROFILER_H
|
||||
|
@ -25,7 +25,7 @@ bool VmThreadControl::NotifyVMThreadSuspension() // block caller thread
|
||||
}
|
||||
SetVMNeedSuspension(true);
|
||||
thread_->SetCheckSafePointStatus();
|
||||
os::memory::LockHolder lock(vmThreadSuspensionMutex_);
|
||||
LockHolder lock(vmThreadSuspensionMutex_);
|
||||
while (!IsSuspended()) {
|
||||
if (vmThreadNeedSuspensionCV_.TimedWait(&vmThreadSuspensionMutex_, TIME_OUT_MS)) {
|
||||
SetVMNeedSuspension(false);
|
||||
@ -58,7 +58,7 @@ bool VmThreadControl::IsSuspended() const
|
||||
|
||||
void VmThreadControl::SuspendVM() // block vm thread
|
||||
{
|
||||
os::memory::LockHolder lock(vmThreadSuspensionMutex_);
|
||||
LockHolder lock(vmThreadSuspensionMutex_);
|
||||
SetVMSuspended(true);
|
||||
vmThreadNeedSuspensionCV_.Signal(); // wake up the thread who needs suspend vmthread
|
||||
while (VMNeedSuspension()) {
|
||||
@ -69,7 +69,7 @@ void VmThreadControl::SuspendVM() // block vm thread
|
||||
|
||||
void VmThreadControl::ResumeVM()
|
||||
{
|
||||
os::memory::LockHolder lock(vmThreadSuspensionMutex_);
|
||||
LockHolder lock(vmThreadSuspensionMutex_);
|
||||
SetVMNeedSuspension(false);
|
||||
vmThreadHasSuspendedCV_.Signal();
|
||||
}
|
||||
|
@ -17,7 +17,7 @@
|
||||
#define ECMASCRIPT_VM_THREAD_CONTROL_H
|
||||
|
||||
#include "ecmascript/js_thread.h"
|
||||
#include "libpandabase/os/mutex.h"
|
||||
#include "ecmascript/platform/mutex.h"
|
||||
#include "libpandabase/utils/bit_field.h"
|
||||
|
||||
namespace panda::ecmascript {
|
||||
@ -46,9 +46,9 @@ public:
|
||||
|
||||
private:
|
||||
JSThread *thread_;
|
||||
os::memory::Mutex vmThreadSuspensionMutex_;
|
||||
os::memory::ConditionVariable vmThreadNeedSuspensionCV_;
|
||||
os::memory::ConditionVariable vmThreadHasSuspendedCV_;
|
||||
Mutex vmThreadSuspensionMutex_;
|
||||
ConditionVariable vmThreadNeedSuspensionCV_;
|
||||
ConditionVariable vmThreadHasSuspendedCV_;
|
||||
};
|
||||
} // namespace panda::ecmascript
|
||||
#endif // ECMASCRIPT_VM_THREAD_CONTROL_H
|
@ -635,7 +635,7 @@ void EcmaVM::DumpCallTimeInfo()
|
||||
|
||||
void EcmaVM::WorkersetInfo(EcmaVM *workerVm)
|
||||
{
|
||||
os::memory::LockHolder lock(mutex_);
|
||||
LockHolder lock(mutex_);
|
||||
auto thread = workerVm->GetJSThread();
|
||||
if (thread != nullptr) {
|
||||
auto tid = thread->GetThreadId();
|
||||
@ -647,7 +647,7 @@ void EcmaVM::WorkersetInfo(EcmaVM *workerVm)
|
||||
|
||||
EcmaVM *EcmaVM::GetWorkerVm(uint32_t tid)
|
||||
{
|
||||
os::memory::LockHolder lock(mutex_);
|
||||
LockHolder lock(mutex_);
|
||||
EcmaVM *workerVm = nullptr;
|
||||
if (!workerList_.empty()) {
|
||||
auto iter = workerList_.find(tid);
|
||||
@ -660,7 +660,7 @@ EcmaVM *EcmaVM::GetWorkerVm(uint32_t tid)
|
||||
|
||||
bool EcmaVM::DeleteWorker(EcmaVM *workerVm)
|
||||
{
|
||||
os::memory::LockHolder lock(mutex_);
|
||||
LockHolder lock(mutex_);
|
||||
auto thread = workerVm->GetJSThread();
|
||||
if (thread != nullptr) {
|
||||
auto tid = thread->GetThreadId();
|
||||
@ -679,7 +679,7 @@ bool EcmaVM::DeleteWorker(EcmaVM *workerVm)
|
||||
|
||||
bool EcmaVM::SuspendWorkerVm(uint32_t tid)
|
||||
{
|
||||
os::memory::LockHolder lock(mutex_);
|
||||
LockHolder lock(mutex_);
|
||||
if (!workerList_.empty()) {
|
||||
auto iter = workerList_.find(tid);
|
||||
if (iter != workerList_.end()) {
|
||||
@ -691,7 +691,7 @@ bool EcmaVM::SuspendWorkerVm(uint32_t tid)
|
||||
|
||||
void EcmaVM::ResumeWorkerVm(uint32_t tid)
|
||||
{
|
||||
os::memory::LockHolder lock(mutex_);
|
||||
LockHolder lock(mutex_);
|
||||
if (!workerList_.empty()) {
|
||||
auto iter = workerList_.find(tid);
|
||||
if (iter != workerList_.end()) {
|
||||
|
@ -311,7 +311,7 @@ public:
|
||||
void EnumerateWorkerVm(Callback cb)
|
||||
{
|
||||
// since there is a lock, so cannot mark function const
|
||||
os::memory::LockHolder lock(mutex_);
|
||||
LockHolder lock(mutex_);
|
||||
for (const auto &item : workerList_) {
|
||||
cb(item.second);
|
||||
}
|
||||
@ -566,7 +566,7 @@ private:
|
||||
friend class JSPandaFileExecutor;
|
||||
friend class EcmaContext;
|
||||
CMap<uint32_t, EcmaVM *> workerList_ {};
|
||||
os::memory::Mutex mutex_;
|
||||
Mutex mutex_;
|
||||
};
|
||||
} // namespace ecmascript
|
||||
} // namespace panda
|
||||
|
@ -36,7 +36,7 @@ JSPandaFileManager *JSPandaFileManager::GetInstance()
|
||||
|
||||
JSPandaFileManager::~JSPandaFileManager()
|
||||
{
|
||||
os::memory::LockHolder lock(jsPandaFileLock_);
|
||||
LockHolder lock(jsPandaFileLock_);
|
||||
extractors_.clear();
|
||||
oldJSPandaFiles_.clear();
|
||||
loadedJSPandaFiles_.clear();
|
||||
@ -46,7 +46,7 @@ std::shared_ptr<JSPandaFile> JSPandaFileManager::LoadJSPandaFile(JSThread *threa
|
||||
std::string_view entryPoint, bool needUpdate)
|
||||
{
|
||||
{
|
||||
os::memory::LockHolder lock(jsPandaFileLock_);
|
||||
LockHolder lock(jsPandaFileLock_);
|
||||
std::shared_ptr<JSPandaFile> jsPandaFile;
|
||||
if (needUpdate) {
|
||||
auto pf = panda_file::OpenPandaFileOrZip(filename, panda_file::File::READ_WRITE);
|
||||
@ -121,7 +121,7 @@ std::shared_ptr<JSPandaFile> JSPandaFileManager::LoadJSPandaFile(JSThread *threa
|
||||
return nullptr;
|
||||
}
|
||||
{
|
||||
os::memory::LockHolder lock(jsPandaFileLock_);
|
||||
LockHolder lock(jsPandaFileLock_);
|
||||
std::shared_ptr<JSPandaFile> jsPandaFile;
|
||||
if (needUpdate) {
|
||||
auto pf = panda_file::OpenPandaFileFromMemory(buffer, size);
|
||||
@ -165,7 +165,7 @@ std::shared_ptr<JSPandaFile> JSPandaFileManager::LoadJSPandaFileSecure(JSThread
|
||||
return nullptr;
|
||||
}
|
||||
{
|
||||
os::memory::LockHolder lock(jsPandaFileLock_);
|
||||
LockHolder lock(jsPandaFileLock_);
|
||||
std::shared_ptr<JSPandaFile> jsPandaFile;
|
||||
if (needUpdate) {
|
||||
auto pf = panda_file::OpenPandaFileFromSecureMemory(buffer, size);
|
||||
@ -226,7 +226,7 @@ std::shared_ptr<JSPandaFile> JSPandaFileManager::FindJSPandaFileWithChecksum(con
|
||||
|
||||
std::shared_ptr<JSPandaFile> JSPandaFileManager::FindMergedJSPandaFile()
|
||||
{
|
||||
os::memory::LockHolder lock(jsPandaFileLock_);
|
||||
LockHolder lock(jsPandaFileLock_);
|
||||
for (const auto &iter : loadedJSPandaFiles_) {
|
||||
const std::shared_ptr<JSPandaFile> &jsPandafile = iter.second.first;
|
||||
if (jsPandafile->IsFirstMergedAbc()) {
|
||||
@ -250,13 +250,13 @@ std::shared_ptr<JSPandaFile> JSPandaFileManager::FindJSPandaFileUnlocked(const C
|
||||
|
||||
std::shared_ptr<JSPandaFile> JSPandaFileManager::FindJSPandaFile(const CString &filename)
|
||||
{
|
||||
os::memory::LockHolder lock(jsPandaFileLock_);
|
||||
LockHolder lock(jsPandaFileLock_);
|
||||
return FindJSPandaFileUnlocked(filename);
|
||||
}
|
||||
|
||||
std::shared_ptr<JSPandaFile> JSPandaFileManager::GetJSPandaFile(const panda_file::File *pf)
|
||||
{
|
||||
os::memory::LockHolder lock(jsPandaFileLock_);
|
||||
LockHolder lock(jsPandaFileLock_);
|
||||
for (const auto &iter : loadedJSPandaFiles_) {
|
||||
const std::shared_ptr<JSPandaFile> &jsPandafile = iter.second.first;
|
||||
if (jsPandafile->GetPandaFile() == pf) {
|
||||
@ -269,7 +269,7 @@ std::shared_ptr<JSPandaFile> JSPandaFileManager::GetJSPandaFile(const panda_file
|
||||
void JSPandaFileManager::AddJSPandaFileVm(const EcmaVM *vm, const std::shared_ptr<JSPandaFile> &jsPandaFile)
|
||||
{
|
||||
const auto &filename = jsPandaFile->GetJSPandaFileDesc();
|
||||
os::memory::LockHolder lock(jsPandaFileLock_);
|
||||
LockHolder lock(jsPandaFileLock_);
|
||||
if (loadedJSPandaFiles_.find(filename) != loadedJSPandaFiles_.end()) {
|
||||
LOG_ECMA(FATAL) << "add failed, file already exist: " << filename;
|
||||
UNREACHABLE();
|
||||
@ -301,7 +301,7 @@ void JSPandaFileManager::RemoveJSPandaFileVm(const EcmaVM *vm, const JSPandaFile
|
||||
return;
|
||||
}
|
||||
|
||||
os::memory::LockHolder lock(jsPandaFileLock_);
|
||||
LockHolder lock(jsPandaFileLock_);
|
||||
auto iterOld = oldJSPandaFiles_.begin();
|
||||
while (iterOld != oldJSPandaFiles_.end()) {
|
||||
if (iterOld->first.get() == jsPandaFile) {
|
||||
@ -379,7 +379,7 @@ DebugInfoExtractor *JSPandaFileManager::GetJSPtExtractor(const JSPandaFile *jsPa
|
||||
{
|
||||
LOG_ECMA_IF(jsPandaFile == nullptr, FATAL) << "GetJSPtExtractor error, js pandafile is nullptr";
|
||||
|
||||
os::memory::LockHolder lock(jsPandaFileLock_);
|
||||
LockHolder lock(jsPandaFileLock_);
|
||||
const auto &filename = jsPandaFile->GetJSPandaFileDesc();
|
||||
if (loadedJSPandaFiles_.find(filename) == loadedJSPandaFiles_.end()) {
|
||||
LOG_ECMA(FATAL) << "get extractor failed, file not exist: " << filename;
|
||||
@ -401,7 +401,7 @@ DebugInfoExtractor *JSPandaFileManager::GetJSPtExtractorAndExtract(const JSPanda
|
||||
{
|
||||
LOG_ECMA_IF(jsPandaFile == nullptr, FATAL) << "GetJSPtExtractor error, js pandafile is nullptr";
|
||||
|
||||
os::memory::LockHolder lock(jsPandaFileLock_);
|
||||
LockHolder lock(jsPandaFileLock_);
|
||||
const auto &filename = jsPandaFile->GetJSPandaFileDesc();
|
||||
if (loadedJSPandaFiles_.find(filename) == loadedJSPandaFiles_.end()) {
|
||||
LOG_ECMA(FATAL) << "get extractor failed, file not exist: " << filename;
|
||||
@ -424,7 +424,7 @@ DebugInfoExtractor *JSPandaFileManager::CpuProfilerGetJSPtExtractor(const JSPand
|
||||
{
|
||||
LOG_ECMA_IF(jsPandaFile == nullptr, FATAL) << "GetJSPtExtractor error, js pandafile is nullptr";
|
||||
|
||||
os::memory::LockHolder lock(jsPandaFileLock_);
|
||||
LockHolder lock(jsPandaFileLock_);
|
||||
const auto &filename = jsPandaFile->GetJSPandaFileDesc();
|
||||
if (loadedJSPandaFiles_.find(filename) == loadedJSPandaFiles_.end()) {
|
||||
LOG_ECMA(FATAL) << "get extractor failed, file not exist: " << filename;
|
||||
@ -467,7 +467,7 @@ std::shared_ptr<JSPandaFile> JSPandaFileManager::GenerateJSPandaFile(JSThread *t
|
||||
|
||||
{
|
||||
// For worker, JSPandaFile may be created by another vm.
|
||||
os::memory::LockHolder lock(jsPandaFileLock_);
|
||||
LockHolder lock(jsPandaFileLock_);
|
||||
std::shared_ptr<JSPandaFile> jsPandaFile = FindJSPandaFileUnlocked(desc);
|
||||
if (jsPandaFile != nullptr) {
|
||||
InsertJSPandaFileVmUnlocked(vm, jsPandaFile);
|
||||
|
@ -19,6 +19,7 @@
|
||||
#include "ecmascript/jspandafile/js_pandafile.h"
|
||||
#include "ecmascript/jspandafile/panda_file_translator.h"
|
||||
#include "ecmascript/jspandafile/debug_info_extractor.h"
|
||||
#include "ecmascript/platform/mutex.h"
|
||||
|
||||
namespace panda {
|
||||
namespace ecmascript {
|
||||
@ -59,7 +60,7 @@ public:
|
||||
template<typename Callback>
|
||||
void EnumerateJSPandaFiles(Callback cb)
|
||||
{
|
||||
os::memory::LockHolder lock(jsPandaFileLock_);
|
||||
LockHolder lock(jsPandaFileLock_);
|
||||
for (const auto &item : loadedJSPandaFiles_) {
|
||||
if (!cb(item.second.first.get())) {
|
||||
return;
|
||||
@ -97,7 +98,7 @@ private:
|
||||
static void *AllocateBuffer(size_t size);
|
||||
static void FreeBuffer(void *mem);
|
||||
|
||||
os::memory::RecursiveMutex jsPandaFileLock_;
|
||||
RecursiveMutex jsPandaFileLock_;
|
||||
// JSPandaFile was hold by ecma vm list.
|
||||
using JSPandaFileVmsPair = std::pair<std::shared_ptr<JSPandaFile>, std::set<const EcmaVM *>>;
|
||||
std::unordered_map<const CString, JSPandaFileVmsPair, CStringHash> loadedJSPandaFiles_;
|
||||
|
@ -31,7 +31,7 @@
|
||||
|
||||
namespace panda::ecmascript {
|
||||
size_t ConcurrentMarker::taskCounts_ = 0;
|
||||
os::memory::Mutex ConcurrentMarker::taskCountMutex_;
|
||||
Mutex ConcurrentMarker::taskCountMutex_;
|
||||
|
||||
ConcurrentMarker::ConcurrentMarker(Heap *heap, EnableConcurrentMarkType type)
|
||||
: heap_(heap),
|
||||
@ -83,7 +83,7 @@ void ConcurrentMarker::ReMark()
|
||||
|
||||
void ConcurrentMarker::HandleMarkingFinished() // js-thread wait for sweep
|
||||
{
|
||||
os::memory::LockHolder lock(waitMarkingFinishedMutex_);
|
||||
LockHolder lock(waitMarkingFinishedMutex_);
|
||||
if (notifyMarkingFinished_) {
|
||||
heap_->CollectGarbage(heap_->IsFullMark() ? TriggerGCType::OLD_GC : TriggerGCType::YOUNG_GC,
|
||||
GCReason::ALLOCATION_LIMIT);
|
||||
@ -92,7 +92,7 @@ void ConcurrentMarker::HandleMarkingFinished() // js-thread wait for sweep
|
||||
|
||||
void ConcurrentMarker::WaitMarkingFinished() // call in EcmaVm thread, wait for mark finished
|
||||
{
|
||||
os::memory::LockHolder lock(waitMarkingFinishedMutex_);
|
||||
LockHolder lock(waitMarkingFinishedMutex_);
|
||||
if (!notifyMarkingFinished_) {
|
||||
vmThreadWaitMarkingFinished_ = true;
|
||||
waitMarkingFinishedCV_.Wait(&waitMarkingFinishedMutex_);
|
||||
@ -164,7 +164,7 @@ bool ConcurrentMarker::MarkerTask::Run(uint32_t threadId)
|
||||
|
||||
void ConcurrentMarker::FinishMarking(float spendTime)
|
||||
{
|
||||
os::memory::LockHolder lock(waitMarkingFinishedMutex_);
|
||||
LockHolder lock(waitMarkingFinishedMutex_);
|
||||
thread_->SetMarkStatus(MarkStatus::MARK_FINISHED);
|
||||
thread_->SetCheckSafePointStatus();
|
||||
if (vmThreadWaitMarkingFinished_) {
|
||||
|
@ -24,7 +24,7 @@
|
||||
#include "ecmascript/mem/work_manager.h"
|
||||
#include "ecmascript/taskpool/task.h"
|
||||
|
||||
#include "libpandabase/os/mutex.h"
|
||||
#include "ecmascript/platform/mutex.h"
|
||||
|
||||
namespace panda::ecmascript {
|
||||
class EcmaVM;
|
||||
@ -48,7 +48,7 @@ public:
|
||||
{
|
||||
size_t taskPoolSize = Taskpool::GetCurrentTaskpool()->GetTotalThreadNum();
|
||||
{
|
||||
os::memory::LockHolder holder(taskCountMutex_);
|
||||
LockHolder holder(taskCountMutex_);
|
||||
// total counts of running concurrent mark tasks should be less than taskPoolSize
|
||||
if (taskCounts_ + 1 < taskPoolSize) {
|
||||
taskCounts_++;
|
||||
@ -61,7 +61,7 @@ public:
|
||||
|
||||
static void DecreaseTaskCounts()
|
||||
{
|
||||
os::memory::LockHolder holder(taskCountMutex_);
|
||||
LockHolder holder(taskCountMutex_);
|
||||
taskCounts_--;
|
||||
}
|
||||
|
||||
@ -145,7 +145,7 @@ private:
|
||||
void FinishMarking(float spendTime);
|
||||
|
||||
static size_t taskCounts_;
|
||||
static os::memory::Mutex taskCountMutex_;
|
||||
static Mutex taskCountMutex_;
|
||||
|
||||
Heap *heap_ {nullptr};
|
||||
EcmaVM *vm_ {nullptr};
|
||||
@ -160,8 +160,8 @@ private:
|
||||
bool notifyMarkingFinished_ {false}; // notify js-thread that marking is finished and sweeping is needed
|
||||
bool vmThreadWaitMarkingFinished_ {false}; // jsMainThread waiting for concurrentGC FINISHED
|
||||
bool isConcurrentMarking_ {false};
|
||||
os::memory::Mutex waitMarkingFinishedMutex_;
|
||||
os::memory::ConditionVariable waitMarkingFinishedCV_;
|
||||
Mutex waitMarkingFinishedMutex_;
|
||||
ConditionVariable waitMarkingFinishedCV_;
|
||||
};
|
||||
} // namespace panda::ecmascript
|
||||
#endif // ECMASCRIPT_MEM_CONCURRENT_MARKER_H
|
||||
|
@ -74,7 +74,7 @@ void ConcurrentSweeper::AsyncSweepSpace(MemSpaceType type, bool isMain)
|
||||
auto space = heap_->GetSpaceWithType(type);
|
||||
space->AsyncSweep(isMain);
|
||||
|
||||
os::memory::LockHolder holder(mutexs_[type]);
|
||||
LockHolder holder(mutexs_[type]);
|
||||
if (--remainingTaskNum_[type] == 0) {
|
||||
cvs_[type].SignalAll();
|
||||
}
|
||||
@ -87,7 +87,7 @@ void ConcurrentSweeper::WaitAllTaskFinished()
|
||||
}
|
||||
for (int i = startSpaceType_; i < FREE_LIST_NUM; i++) {
|
||||
if (remainingTaskNum_[i] > 0) {
|
||||
os::memory::LockHolder holder(mutexs_[i]);
|
||||
LockHolder holder(mutexs_[i]);
|
||||
while (remainingTaskNum_[i] > 0) {
|
||||
cvs_[i].Wait(&mutexs_[i]);
|
||||
}
|
||||
@ -123,11 +123,11 @@ void ConcurrentSweeper::WaitingTaskFinish(MemSpaceType type)
|
||||
{
|
||||
if (remainingTaskNum_[type] > 0) {
|
||||
{
|
||||
os::memory::LockHolder holder(mutexs_[type]);
|
||||
LockHolder holder(mutexs_[type]);
|
||||
remainingTaskNum_[type]++;
|
||||
}
|
||||
AsyncSweepSpace(type, true);
|
||||
os::memory::LockHolder holder(mutexs_[type]);
|
||||
LockHolder holder(mutexs_[type]);
|
||||
while (remainingTaskNum_[type] > 0) {
|
||||
cvs_[type].Wait(&mutexs_[type]);
|
||||
}
|
||||
|
@ -22,7 +22,7 @@
|
||||
#include "ecmascript/mem/space.h"
|
||||
#include "ecmascript/taskpool/task.h"
|
||||
|
||||
#include "libpandabase/os/mutex.h"
|
||||
#include "ecmascript/platform/mutex.h"
|
||||
|
||||
namespace panda::ecmascript {
|
||||
// CONFIG_DISABLE means concurrent sweeper is disabled by options or macros and cannot be changed.
|
||||
@ -108,8 +108,8 @@ private:
|
||||
|
||||
void WaitingTaskFinish(MemSpaceType type);
|
||||
|
||||
std::array<os::memory::Mutex, FREE_LIST_NUM> mutexs_;
|
||||
std::array<os::memory::ConditionVariable, FREE_LIST_NUM> cvs_;
|
||||
std::array<Mutex, FREE_LIST_NUM> mutexs_;
|
||||
std::array<ConditionVariable, FREE_LIST_NUM> cvs_;
|
||||
std::array<std::atomic_int, FREE_LIST_NUM> remainingTaskNum_ = {0, 0, 0};
|
||||
|
||||
Heap *heap_;
|
||||
|
@ -353,7 +353,7 @@ void Heap::ReclaimRegions(TriggerGCType gcType)
|
||||
region->ClearCrossRegionRSet();
|
||||
});
|
||||
if (!clearTaskFinished_) {
|
||||
os::memory::LockHolder holder(waitClearTaskFinishedMutex_);
|
||||
LockHolder holder(waitClearTaskFinishedMutex_);
|
||||
clearTaskFinished_ = true;
|
||||
waitClearTaskFinishedCV_.SignalAll();
|
||||
}
|
||||
|
@ -988,7 +988,7 @@ void Heap::TriggerConcurrentMarking()
|
||||
|
||||
void Heap::WaitRunningTaskFinished()
|
||||
{
|
||||
os::memory::LockHolder holder(waitTaskFinishedMutex_);
|
||||
LockHolder holder(waitTaskFinishedMutex_);
|
||||
while (runningTaskCount_ > 0) {
|
||||
waitTaskFinishedCV_.Wait(&waitTaskFinishedMutex_);
|
||||
}
|
||||
@ -996,7 +996,7 @@ void Heap::WaitRunningTaskFinished()
|
||||
|
||||
void Heap::WaitClearTaskFinished()
|
||||
{
|
||||
os::memory::LockHolder holder(waitClearTaskFinishedMutex_);
|
||||
LockHolder holder(waitClearTaskFinishedMutex_);
|
||||
while (!clearTaskFinished_) {
|
||||
waitClearTaskFinishedCV_.Wait(&waitClearTaskFinishedMutex_);
|
||||
}
|
||||
@ -1026,7 +1026,7 @@ void Heap::PostParallelGCTask(ParallelGCTaskPhase gcTask)
|
||||
|
||||
void Heap::IncreaseTaskCount()
|
||||
{
|
||||
os::memory::LockHolder holder(waitTaskFinishedMutex_);
|
||||
LockHolder holder(waitTaskFinishedMutex_);
|
||||
runningTaskCount_++;
|
||||
}
|
||||
|
||||
@ -1120,7 +1120,7 @@ void Heap::NotifyMemoryPressure(bool inHighMemoryPressure)
|
||||
void Heap::NotifyFinishColdStart(bool isMainThread)
|
||||
{
|
||||
{
|
||||
os::memory::LockHolder holder(finishColdStartMutex_);
|
||||
LockHolder holder(finishColdStartMutex_);
|
||||
if (!onStartupEvent_) {
|
||||
return;
|
||||
}
|
||||
@ -1187,13 +1187,13 @@ bool Heap::NeedStopCollection()
|
||||
|
||||
bool Heap::CheckCanDistributeTask()
|
||||
{
|
||||
os::memory::LockHolder holder(waitTaskFinishedMutex_);
|
||||
LockHolder holder(waitTaskFinishedMutex_);
|
||||
return runningTaskCount_ < maxMarkTaskCount_;
|
||||
}
|
||||
|
||||
void Heap::ReduceTaskCount()
|
||||
{
|
||||
os::memory::LockHolder holder(waitTaskFinishedMutex_);
|
||||
LockHolder holder(waitTaskFinishedMutex_);
|
||||
runningTaskCount_--;
|
||||
if (runningTaskCount_ == 0) {
|
||||
waitTaskFinishedCV_.SignalAll();
|
||||
|
@ -231,7 +231,7 @@ public:
|
||||
|
||||
void NotifyPostFork()
|
||||
{
|
||||
os::memory::LockHolder holder(finishColdStartMutex_);
|
||||
LockHolder holder(finishColdStartMutex_);
|
||||
onStartupEvent_ = true;
|
||||
}
|
||||
|
||||
@ -714,16 +714,16 @@ private:
|
||||
TriggerGCType gcType_ {TriggerGCType::YOUNG_GC};
|
||||
|
||||
bool clearTaskFinished_ {true};
|
||||
os::memory::Mutex waitClearTaskFinishedMutex_;
|
||||
os::memory::ConditionVariable waitClearTaskFinishedCV_;
|
||||
Mutex waitClearTaskFinishedMutex_;
|
||||
ConditionVariable waitClearTaskFinishedCV_;
|
||||
uint32_t runningTaskCount_ {0};
|
||||
// parallel marker task number.
|
||||
uint32_t maxMarkTaskCount_ {0};
|
||||
// parallel evacuator task number.
|
||||
uint32_t maxEvacuateTaskCount_ {0};
|
||||
os::memory::Mutex finishColdStartMutex_;
|
||||
os::memory::Mutex waitTaskFinishedMutex_;
|
||||
os::memory::ConditionVariable waitTaskFinishedCV_;
|
||||
Mutex finishColdStartMutex_;
|
||||
Mutex waitTaskFinishedMutex_;
|
||||
ConditionVariable waitTaskFinishedCV_;
|
||||
|
||||
/*
|
||||
* The memory controller providing memory statistics (by allocations and coleections),
|
||||
|
@ -185,13 +185,13 @@ void SemiSpace::Restart()
|
||||
|
||||
uintptr_t SemiSpace::AllocateSync(size_t size)
|
||||
{
|
||||
os::memory::LockHolder lock(lock_);
|
||||
LockHolder lock(lock_);
|
||||
return Allocate(size, true);
|
||||
}
|
||||
|
||||
bool SemiSpace::SwapRegion(Region *region, SemiSpace *fromSpace)
|
||||
{
|
||||
os::memory::LockHolder lock(lock_);
|
||||
LockHolder lock(lock_);
|
||||
if (committedSize_ + region->GetCapacity() > maximumCapacity_ + overShootSize_) {
|
||||
return false;
|
||||
}
|
||||
|
@ -86,7 +86,7 @@ public:
|
||||
|
||||
private:
|
||||
static constexpr int GROWING_FACTOR = 2;
|
||||
os::memory::Mutex lock_;
|
||||
Mutex lock_;
|
||||
size_t minimumCapacity_;
|
||||
};
|
||||
|
||||
|
@ -25,7 +25,7 @@
|
||||
#include "ecmascript/mem/mem_common.h"
|
||||
#include "ecmascript/log_wrapper.h"
|
||||
|
||||
#include "libpandabase/os/mutex.h"
|
||||
#include "ecmascript/platform/mutex.h"
|
||||
|
||||
namespace panda::ecmascript {
|
||||
// Regular region with length of DEFAULT_REGION_SIZE(256kb)
|
||||
@ -36,7 +36,7 @@ public:
|
||||
|
||||
void Finalize()
|
||||
{
|
||||
os::memory::LockHolder lock(lock_);
|
||||
LockHolder lock(lock_);
|
||||
for (auto &it : memMapVector_) {
|
||||
PageUnmap(it);
|
||||
}
|
||||
@ -54,7 +54,7 @@ public:
|
||||
MemMap GetMemFromCache([[maybe_unused]] size_t size)
|
||||
{
|
||||
ASSERT(size == REGULAR_MMAP_SIZE);
|
||||
os::memory::LockHolder lock(lock_);
|
||||
LockHolder lock(lock_);
|
||||
if (!memMapCache_.empty()) {
|
||||
MemMap mem = memMapCache_.front();
|
||||
memMapCache_.pop_front();
|
||||
@ -66,7 +66,7 @@ public:
|
||||
MemMap GetRegularMemFromCommitted([[maybe_unused]] size_t size)
|
||||
{
|
||||
ASSERT(size == REGULAR_MMAP_SIZE);
|
||||
os::memory::LockHolder lock(lock_);
|
||||
LockHolder lock(lock_);
|
||||
if (!regularMapCommitted_.empty()) {
|
||||
MemMap mem = regularMapCommitted_.back();
|
||||
regularMapCommitted_.pop_back();
|
||||
@ -76,13 +76,13 @@ public:
|
||||
}
|
||||
|
||||
bool IsRegularCommittedFull(size_t cachedSize) {
|
||||
os::memory::LockHolder lock(lock_);
|
||||
LockHolder lock(lock_);
|
||||
size_t size = regularMapCommitted_.size();
|
||||
return size > (cachedSize / REGULAR_MMAP_SIZE) ? true : false;
|
||||
}
|
||||
|
||||
int ShouldFreeMore(size_t cachedSize) {
|
||||
os::memory::LockHolder lock(lock_);
|
||||
LockHolder lock(lock_);
|
||||
int result = regularMapCommitted_.size();
|
||||
return result - static_cast<int>(cachedSize / REGULAR_MMAP_SIZE);
|
||||
}
|
||||
@ -90,7 +90,7 @@ public:
|
||||
void AddMemToCommittedCache(void *mem, size_t size)
|
||||
{
|
||||
ASSERT(size == REGULAR_MMAP_SIZE);
|
||||
os::memory::LockHolder lock(lock_);
|
||||
LockHolder lock(lock_);
|
||||
regularMapCommitted_.emplace_back(mem, size);
|
||||
}
|
||||
|
||||
@ -98,13 +98,13 @@ public:
|
||||
void AddMemToCache(void *mem, size_t size)
|
||||
{
|
||||
ASSERT(size == REGULAR_MMAP_SIZE);
|
||||
os::memory::LockHolder lock(lock_);
|
||||
LockHolder lock(lock_);
|
||||
memMapCache_.emplace_back(mem, size);
|
||||
}
|
||||
|
||||
MemMap SplitMemFromCache(MemMap memMap)
|
||||
{
|
||||
os::memory::LockHolder lock(lock_);
|
||||
LockHolder lock(lock_);
|
||||
auto remainderMem = reinterpret_cast<uintptr_t>(memMap.GetMem()) + REGULAR_MMAP_SIZE;
|
||||
size_t remainderSize = AlignDown(memMap.GetSize() - REGULAR_MMAP_SIZE, REGULAR_MMAP_SIZE);
|
||||
size_t count = remainderSize / REGULAR_MMAP_SIZE;
|
||||
@ -117,13 +117,13 @@ public:
|
||||
|
||||
void InsertMemMap(MemMap memMap)
|
||||
{
|
||||
os::memory::LockHolder lock(lock_);
|
||||
LockHolder lock(lock_);
|
||||
memMapVector_.emplace_back(memMap);
|
||||
}
|
||||
|
||||
private:
|
||||
static constexpr size_t REGULAR_MMAP_SIZE = 256_KB;
|
||||
os::memory::Mutex lock_;
|
||||
Mutex lock_;
|
||||
std::deque<MemMap> memMapCache_;
|
||||
std::vector<MemMap> regularMapCommitted_;
|
||||
std::vector<MemMap> memMapVector_;
|
||||
@ -183,7 +183,7 @@ public:
|
||||
LOG_GC(ERROR) << "Freelist pool oom: overflow(" << freeListPoolSize_ << ")";
|
||||
return MemMap();
|
||||
}
|
||||
os::memory::LockHolder lock(lock_);
|
||||
LockHolder lock(lock_);
|
||||
auto iterate = freeList_.lower_bound(size);
|
||||
if (iterate == freeList_.end()) {
|
||||
MergeList();
|
||||
@ -213,7 +213,7 @@ public:
|
||||
|
||||
void AddMemToList(MemMap memMap)
|
||||
{
|
||||
os::memory::LockHolder lock(lock_);
|
||||
LockHolder lock(lock_);
|
||||
auto search = freeSet_.find(reinterpret_cast<uintptr_t>(memMap.GetMem()));
|
||||
if (UNLIKELY(search != freeSet_.end())) {
|
||||
freeSetPoolSize_ -= memMap.GetSize();
|
||||
@ -226,7 +226,7 @@ public:
|
||||
}
|
||||
|
||||
private:
|
||||
os::memory::Mutex lock_;
|
||||
Mutex lock_;
|
||||
MemMap memMap_;
|
||||
std::multimap<size_t, MemMap> freeList_;
|
||||
std::set<uintptr_t> freeSet_;
|
||||
|
@ -173,7 +173,7 @@ void ParallelEvacuator::SetObjectRSet(ObjectSlot slot, Region *region)
|
||||
|
||||
std::unique_ptr<ParallelEvacuator::Workload> ParallelEvacuator::GetWorkloadSafe()
|
||||
{
|
||||
os::memory::LockHolder holder(mutex_);
|
||||
LockHolder holder(mutex_);
|
||||
std::unique_ptr<Workload> unit;
|
||||
if (!workloads_.empty()) {
|
||||
unit = std::move(workloads_.back());
|
||||
|
@ -65,7 +65,7 @@ void ParallelEvacuator::EvacuateSpace()
|
||||
AddWorkload(std::make_unique<EvacuateWorkload>(this, current));
|
||||
});
|
||||
if (heap_->IsParallelGCEnabled()) {
|
||||
os::memory::LockHolder holder(mutex_);
|
||||
LockHolder holder(mutex_);
|
||||
parallel_ = CalculateEvacuationThreadNum();
|
||||
for (int i = 0; i < parallel_; i++) {
|
||||
Taskpool::GetCurrentTaskpool()->PostTask(
|
||||
@ -86,7 +86,7 @@ bool ParallelEvacuator::EvacuateSpace(TlabAllocator *allocator, bool isMain)
|
||||
}
|
||||
allocator->Finalize();
|
||||
if (!isMain) {
|
||||
os::memory::LockHolder holder(mutex_);
|
||||
LockHolder holder(mutex_);
|
||||
if (--parallel_ <= 0) {
|
||||
condition_.SignalAll();
|
||||
}
|
||||
@ -214,7 +214,7 @@ void ParallelEvacuator::UpdateReference()
|
||||
<< "old space region count:" << oldRegionCount;
|
||||
|
||||
if (heap_->IsParallelGCEnabled()) {
|
||||
os::memory::LockHolder holder(mutex_);
|
||||
LockHolder holder(mutex_);
|
||||
parallel_ = CalculateUpdateThreadNum();
|
||||
for (int i = 0; i < parallel_; i++) {
|
||||
Taskpool::GetCurrentTaskpool()->PostTask(
|
||||
@ -410,7 +410,7 @@ void ParallelEvacuator::WaitFinished()
|
||||
{
|
||||
MEM_ALLOCATE_AND_GC_TRACE(heap_->GetEcmaVM(), WaitUpdateFinished);
|
||||
if (parallel_ > 0) {
|
||||
os::memory::LockHolder holder(mutex_);
|
||||
LockHolder holder(mutex_);
|
||||
while (parallel_ > 0) {
|
||||
condition_.Wait(&mutex_);
|
||||
}
|
||||
@ -425,7 +425,7 @@ bool ParallelEvacuator::ProcessWorkloads(bool isMain)
|
||||
region = GetWorkloadSafe();
|
||||
}
|
||||
if (!isMain) {
|
||||
os::memory::LockHolder holder(mutex_);
|
||||
LockHolder holder(mutex_);
|
||||
if (--parallel_ <= 0) {
|
||||
condition_.SignalAll();
|
||||
}
|
||||
|
@ -28,7 +28,7 @@
|
||||
#include "ecmascript/mem/tlab_allocator.h"
|
||||
#include "ecmascript/taskpool/task.h"
|
||||
|
||||
#include "libpandabase/os/mutex.h"
|
||||
#include "ecmascript/platform/mutex.h"
|
||||
|
||||
namespace panda::ecmascript {
|
||||
class ParallelEvacuator {
|
||||
@ -162,8 +162,8 @@ private:
|
||||
uintptr_t waterLine_ = 0;
|
||||
std::vector<std::unique_ptr<Workload>> workloads_;
|
||||
std::atomic_int parallel_ = 0;
|
||||
os::memory::Mutex mutex_;
|
||||
os::memory::ConditionVariable condition_;
|
||||
Mutex mutex_;
|
||||
ConditionVariable condition_;
|
||||
std::atomic<size_t> promotedSize_ = 0;
|
||||
};
|
||||
} // namespace panda::ecmascript
|
||||
|
@ -363,7 +363,7 @@ inline SlotStatus CompressGCMarker::MarkObject(uint32_t threadId, TaggedObject *
|
||||
|
||||
inline uintptr_t CompressGCMarker::AllocateReadOnlySpace(size_t size)
|
||||
{
|
||||
os::memory::LockHolder lock(mutex_);
|
||||
LockHolder lock(mutex_);
|
||||
uintptr_t forwardAddress = heap_->GetReadOnlySpace()->Allocate(size);
|
||||
if (UNLIKELY(forwardAddress == 0)) {
|
||||
LOG_ECMA_MEM(FATAL) << "Evacuate Read only Object: alloc failed: "
|
||||
@ -375,7 +375,7 @@ inline uintptr_t CompressGCMarker::AllocateReadOnlySpace(size_t size)
|
||||
|
||||
inline uintptr_t CompressGCMarker::AllocateAppSpawnSpace(size_t size)
|
||||
{
|
||||
os::memory::LockHolder lock(mutex_);
|
||||
LockHolder lock(mutex_);
|
||||
uintptr_t forwardAddress = heap_->GetAppSpawnSpace()->Allocate(size);
|
||||
if (UNLIKELY(forwardAddress == 0)) {
|
||||
LOG_ECMA_MEM(FATAL) << "Evacuate AppSpawn Object: alloc failed: "
|
||||
|
@ -181,7 +181,7 @@ protected:
|
||||
|
||||
private:
|
||||
bool isAppSpawn_ {false};
|
||||
os::memory::Mutex mutex_;
|
||||
Mutex mutex_;
|
||||
};
|
||||
} // namespace panda::ecmascript
|
||||
#endif // ECMASCRIPT_MEM_PARALLEL_MARKER_H
|
||||
|
@ -35,7 +35,7 @@ inline RememberedSet *Region::CreateRememberedSet()
|
||||
inline RememberedSet *Region::GetOrCreateCrossRegionRememberedSet()
|
||||
{
|
||||
if (UNLIKELY(crossRegionSet_ == nullptr)) {
|
||||
os::memory::LockHolder lock(*lock_);
|
||||
LockHolder lock(*lock_);
|
||||
if (crossRegionSet_ == nullptr) {
|
||||
crossRegionSet_ = CreateRememberedSet();
|
||||
}
|
||||
@ -46,7 +46,7 @@ inline RememberedSet *Region::GetOrCreateCrossRegionRememberedSet()
|
||||
inline RememberedSet *Region::GetOrCreateOldToNewRememberedSet()
|
||||
{
|
||||
if (UNLIKELY(packedData_.oldToNewSet_ == nullptr)) {
|
||||
os::memory::LockHolder lock(*lock_);
|
||||
LockHolder lock(*lock_);
|
||||
if (packedData_.oldToNewSet_ == nullptr) {
|
||||
if (sweepingRSet_ != nullptr && IsGCFlagSet(RegionGCFlags::HAS_BEEN_SWEPT)) {
|
||||
packedData_.oldToNewSet_ = sweepingRSet_;
|
||||
|
@ -25,7 +25,7 @@
|
||||
#include "ecmascript/mem/mem_common.h"
|
||||
#include "ecmascript/platform/map.h"
|
||||
|
||||
#include "libpandabase/os/mutex.h"
|
||||
#include "ecmascript/platform/mutex.h"
|
||||
|
||||
#include "securec.h"
|
||||
|
||||
@ -109,7 +109,7 @@ public:
|
||||
wasted_(0),
|
||||
snapshotData_(0)
|
||||
{
|
||||
lock_ = new os::memory::Mutex();
|
||||
lock_ = new Mutex();
|
||||
}
|
||||
|
||||
~Region() = default;
|
||||
@ -593,7 +593,7 @@ private:
|
||||
RememberedSet *crossRegionSet_ {nullptr};
|
||||
RememberedSet *sweepingRSet_ {nullptr};
|
||||
Span<FreeObjectSet *> freeObjectSets_;
|
||||
os::memory::Mutex *lock_ {nullptr};
|
||||
Mutex *lock_ {nullptr};
|
||||
uint64_t wasted_;
|
||||
// snapshotdata_ is used to encode the region for snapshot. Its upper 32 bits are used to store the size of
|
||||
// the huge object, and the lower 32 bits are used to store the region index
|
||||
|
@ -189,7 +189,7 @@ void SparseSpace::SortSweepingRegion()
|
||||
|
||||
Region *SparseSpace::GetSweepingRegionSafe()
|
||||
{
|
||||
os::memory::LockHolder holder(lock_);
|
||||
LockHolder holder(lock_);
|
||||
Region *region = nullptr;
|
||||
if (!sweepingList_.empty()) {
|
||||
region = sweepingList_.back();
|
||||
@ -200,13 +200,13 @@ Region *SparseSpace::GetSweepingRegionSafe()
|
||||
|
||||
void SparseSpace::AddSweptRegionSafe(Region *region)
|
||||
{
|
||||
os::memory::LockHolder holder(lock_);
|
||||
LockHolder holder(lock_);
|
||||
sweptList_.emplace_back(region);
|
||||
}
|
||||
|
||||
Region *SparseSpace::GetSweptRegionSafe()
|
||||
{
|
||||
os::memory::LockHolder holder(lock_);
|
||||
LockHolder holder(lock_);
|
||||
Region *region = nullptr;
|
||||
if (!sweptList_.empty()) {
|
||||
region = sweptList_.back();
|
||||
@ -231,7 +231,7 @@ Region *SparseSpace::TryToGetSuitableSweptRegion(size_t size)
|
||||
if (sweptList_.empty()) {
|
||||
return nullptr;
|
||||
}
|
||||
os::memory::LockHolder holder(lock_);
|
||||
LockHolder holder(lock_);
|
||||
for (auto iter = sweptList_.begin(); iter != sweptList_.end(); iter++) {
|
||||
if (allocator_->MatchFreeObjectSet(*iter, size)) {
|
||||
Region *region = *iter;
|
||||
@ -373,7 +373,7 @@ Region *OldSpace::TrySweepToGetSuitableRegion(size_t size)
|
||||
|
||||
Region *OldSpace::TryToGetExclusiveRegion(size_t size)
|
||||
{
|
||||
os::memory::LockHolder lock(lock_);
|
||||
LockHolder lock(lock_);
|
||||
uintptr_t result = allocator_->LookupSuitableFreeObject(size);
|
||||
if (result != 0) {
|
||||
// Remove region from global old space
|
||||
@ -397,7 +397,7 @@ Region *OldSpace::TryToGetExclusiveRegion(size_t size)
|
||||
void OldSpace::Merge(LocalSpace *localSpace)
|
||||
{
|
||||
localSpace->FreeBumpPoint();
|
||||
os::memory::LockHolder lock(lock_);
|
||||
LockHolder lock(lock_);
|
||||
size_t oldCommittedSize = committedSize_;
|
||||
localSpace->EnumerateRegions([&](Region *region) {
|
||||
localSpace->DetachFreeObjectSet(region);
|
||||
|
@ -136,7 +136,7 @@ private:
|
||||
// For sweeping
|
||||
uintptr_t AllocateAfterSweepingCompleted(size_t size);
|
||||
|
||||
os::memory::Mutex lock_;
|
||||
Mutex lock_;
|
||||
std::vector<Region *> sweepingList_;
|
||||
std::vector<Region *> sweptList_;
|
||||
size_t liveObjectSize_ {0};
|
||||
@ -198,7 +198,7 @@ private:
|
||||
static constexpr size_t PARTIAL_GC_MIN_COLLECT_REGION_SIZE = 5;
|
||||
|
||||
CVector<Region *> collectRegionSet_;
|
||||
os::memory::Mutex lock_;
|
||||
Mutex lock_;
|
||||
size_t mergeSize_ {0};
|
||||
};
|
||||
|
||||
|
@ -28,7 +28,7 @@
|
||||
#include "ecmascript/mem/visitor.h"
|
||||
#include "ecmascript/mem/work_manager.h"
|
||||
|
||||
#include "libpandabase/os/mutex.h"
|
||||
#include "ecmascript/platform/mutex.h"
|
||||
|
||||
namespace panda {
|
||||
namespace ecmascript {
|
||||
|
@ -178,7 +178,7 @@ WorkNode *WorkManager::AllocateWorkNode()
|
||||
do {
|
||||
begin = atomicField->load(std::memory_order_acquire);
|
||||
if (begin + totalSize >= spaceEnd_) {
|
||||
os::memory::LockHolder lock(mtx_);
|
||||
LockHolder lock(mtx_);
|
||||
begin = atomicField->load(std::memory_order_acquire);
|
||||
if (begin + totalSize >= spaceEnd_) {
|
||||
agedSpaces_.emplace_back(workSpace_);
|
||||
|
@ -108,14 +108,14 @@ public:
|
||||
if (node == nullptr) {
|
||||
return;
|
||||
}
|
||||
os::memory::LockHolder lock(mtx_);
|
||||
LockHolder lock(mtx_);
|
||||
node->SetNext(top_);
|
||||
top_ = node;
|
||||
}
|
||||
|
||||
bool Pop(WorkNode **node)
|
||||
{
|
||||
os::memory::LockHolder lock(mtx_);
|
||||
LockHolder lock(mtx_);
|
||||
if (top_ == nullptr) {
|
||||
return false;
|
||||
}
|
||||
@ -126,7 +126,7 @@ public:
|
||||
|
||||
private:
|
||||
WorkNode *top_ {nullptr};
|
||||
os::memory::Mutex mtx_;
|
||||
Mutex mtx_;
|
||||
};
|
||||
|
||||
struct WorkNodeHolder {
|
||||
@ -221,7 +221,7 @@ private:
|
||||
uintptr_t spaceStart_;
|
||||
uintptr_t spaceEnd_;
|
||||
std::vector<uintptr_t> agedSpaces_;
|
||||
os::memory::Mutex mtx_;
|
||||
Mutex mtx_;
|
||||
ParallelGCTaskPhase parallelGCTaskPhase_;
|
||||
std::atomic<bool> initialized_ {false};
|
||||
};
|
||||
|
@ -92,7 +92,7 @@
|
||||
|
||||
#include "ohos/init_data.h"
|
||||
|
||||
#include "libpandabase/os/mutex.h"
|
||||
#include "ecmascript/platform/mutex.h"
|
||||
|
||||
#if defined(ECMASCRIPT_SUPPORT_DEBUGGER) && defined(PANDA_TARGET_IOS)
|
||||
namespace OHOS::ArkCompiler::Toolchain {
|
||||
@ -160,6 +160,8 @@ using ecmascript::job::QueueType;
|
||||
using ecmascript::JSRuntimeOptions;
|
||||
using ecmascript::BigInt;
|
||||
using ecmascript::MemMapAllocator;
|
||||
using ecmascript::Mutex;
|
||||
using ecmascript::LockHolder;
|
||||
using ecmascript::JSMapIterator;
|
||||
using ecmascript::JSSetIterator;
|
||||
using ecmascript::IterationKind;
|
||||
@ -197,7 +199,7 @@ constexpr std::string_view ENTRY_POINTER = "_GLOBAL::func_main_0";
|
||||
}
|
||||
int JSNApi::vmCount_ = 0;
|
||||
bool JSNApi::initialize_ = false;
|
||||
static os::memory::Mutex *mutex = new panda::os::memory::Mutex();
|
||||
static Mutex *mutex = new panda::Mutex();
|
||||
#define XPM_PROC_PREFIX "/proc/"
|
||||
#define XPM_PROC_SUFFIX "/xpm_region"
|
||||
#define XPM_PROC_LENGTH 50
|
||||
@ -332,7 +334,7 @@ void JSNApi::DestroyJSContext(EcmaVM *vm, EcmaContext *context)
|
||||
EcmaVM *JSNApi::CreateEcmaVM(const JSRuntimeOptions &options)
|
||||
{
|
||||
{
|
||||
os::memory::LockHolder lock(*mutex);
|
||||
LockHolder lock(*mutex);
|
||||
vmCount_++;
|
||||
if (!initialize_) {
|
||||
ecmascript::Log::Initialize(options);
|
||||
@ -351,7 +353,7 @@ EcmaVM *JSNApi::CreateEcmaVM(const JSRuntimeOptions &options)
|
||||
|
||||
void JSNApi::DestroyJSVM(EcmaVM *ecmaVm)
|
||||
{
|
||||
os::memory::LockHolder lock(*mutex);
|
||||
LockHolder lock(*mutex);
|
||||
if (!initialize_) {
|
||||
return;
|
||||
}
|
||||
|
@ -129,7 +129,7 @@ void PGOProfiler::PGODump(JSTaggedType func)
|
||||
return;
|
||||
}
|
||||
{
|
||||
os::memory::LockHolder lock(mutex_);
|
||||
LockHolder lock(mutex_);
|
||||
dumpWorkList_.emplace_back(func);
|
||||
if (state_ == State::STOP) {
|
||||
state_ = State::START;
|
||||
@ -144,7 +144,7 @@ void PGOProfiler::PausePGODump()
|
||||
if (!isEnable_) {
|
||||
return;
|
||||
}
|
||||
os::memory::LockHolder lock(mutex_);
|
||||
LockHolder lock(mutex_);
|
||||
if (state_ == State::START) {
|
||||
state_ = State::PAUSE;
|
||||
condition_.Wait(&mutex_);
|
||||
@ -156,7 +156,7 @@ void PGOProfiler::ResumePGODump()
|
||||
if (!isEnable_) {
|
||||
return;
|
||||
}
|
||||
os::memory::LockHolder lock(mutex_);
|
||||
LockHolder lock(mutex_);
|
||||
if (state_ == State::PAUSE) {
|
||||
state_ = State::START;
|
||||
Taskpool::GetCurrentTaskpool()->PostTask(
|
||||
@ -169,7 +169,7 @@ void PGOProfiler::WaitPGODumpFinish()
|
||||
if (!isEnable_) {
|
||||
return;
|
||||
}
|
||||
os::memory::LockHolder lock(mutex_);
|
||||
LockHolder lock(mutex_);
|
||||
while (state_ == State::START) {
|
||||
condition_.Wait(&mutex_);
|
||||
}
|
||||
@ -180,7 +180,7 @@ void PGOProfiler::PGOPreDump(JSTaggedType func)
|
||||
if (!isEnable_) {
|
||||
return;
|
||||
}
|
||||
os::memory::LockHolder lock(mutex_);
|
||||
LockHolder lock(mutex_);
|
||||
preDumpWorkList_.emplace(func);
|
||||
}
|
||||
|
||||
@ -192,7 +192,7 @@ void PGOProfiler::HandlePGOPreDump()
|
||||
DISALLOW_GARBAGE_COLLECTION;
|
||||
CSet<JSTaggedType> preDumpWorkList;
|
||||
{
|
||||
os::memory::LockHolder lock(mutex_);
|
||||
LockHolder lock(mutex_);
|
||||
preDumpWorkList = preDumpWorkList_;
|
||||
}
|
||||
for (auto iter : preDumpWorkList) {
|
||||
@ -256,7 +256,7 @@ void PGOProfiler::HandlePGODump()
|
||||
|
||||
JSTaggedValue PGOProfiler::PopFromProfileQueue()
|
||||
{
|
||||
os::memory::LockHolder lock(mutex_);
|
||||
LockHolder lock(mutex_);
|
||||
auto result = JSTaggedValue::Undefined();
|
||||
while (result.IsUndefined()) {
|
||||
if (dumpWorkList_.empty()) {
|
||||
|
@ -25,6 +25,7 @@
|
||||
#include "ecmascript/jspandafile/method_literal.h"
|
||||
#include "ecmascript/mem/c_containers.h"
|
||||
#include "ecmascript/mem/visitor.h"
|
||||
#include "ecmascript/platform/mutex.h"
|
||||
#include "ecmascript/taskpool/task.h"
|
||||
#include "ecmascript/pgo_profiler/pgo_utils.h"
|
||||
#include "ecmascript/pgo_profiler/types/pgo_profile_type.h"
|
||||
@ -153,8 +154,8 @@ private:
|
||||
State state_ { State::STOP };
|
||||
uint32_t methodCount_ { 0 };
|
||||
std::chrono::system_clock::time_point saveTimestamp_;
|
||||
os::memory::Mutex mutex_;
|
||||
os::memory::ConditionVariable condition_;
|
||||
Mutex mutex_;
|
||||
ConditionVariable condition_;
|
||||
CList<JSTaggedType> dumpWorkList_;
|
||||
CSet<JSTaggedType> preDumpWorkList_;
|
||||
CMap<JSTaggedType, ProfileType> tracedProfiles_;
|
||||
|
@ -46,7 +46,7 @@ void PGOProfilerEncoder::Destroy()
|
||||
|
||||
bool PGOProfilerEncoder::ResetOutPathByModuleName(const std::string &moduleName)
|
||||
{
|
||||
os::memory::LockHolder lock(mutex_);
|
||||
LockHolder lock(mutex_);
|
||||
// only first assign takes effect
|
||||
if (!moduleName_.empty() || moduleName.empty()) {
|
||||
return false;
|
||||
@ -88,7 +88,7 @@ void PGOProfilerEncoder::SamplePandaFileInfo(uint32_t checksum, const CString &a
|
||||
if (!isInitialized_) {
|
||||
return;
|
||||
}
|
||||
os::memory::WriteLockHolder lock(rwLock_);
|
||||
WriteLockHolder lock(rwLock_);
|
||||
pandaFileInfos_->Sample(checksum);
|
||||
ApEntityId entryId(0);
|
||||
abcFilePool_->TryAdd(abcName, entryId);
|
||||
@ -99,7 +99,7 @@ bool PGOProfilerEncoder::GetPandaFileId(const CString &abcName, ApEntityId &entr
|
||||
if (!isInitialized_) {
|
||||
return false;
|
||||
}
|
||||
os::memory::ReadLockHolder lock(rwLock_);
|
||||
ReadLockHolder lock(rwLock_);
|
||||
return abcFilePool_->GetEntryId(abcName, entryId);
|
||||
}
|
||||
|
||||
@ -108,7 +108,7 @@ void PGOProfilerEncoder::Merge(const PGORecordDetailInfos &recordInfos)
|
||||
if (!isInitialized_) {
|
||||
return;
|
||||
}
|
||||
os::memory::LockHolder lock(mutex_);
|
||||
LockHolder lock(mutex_);
|
||||
globalRecordInfos_->Merge(recordInfos);
|
||||
}
|
||||
|
||||
@ -134,7 +134,7 @@ bool PGOProfilerEncoder::Save()
|
||||
if (!isInitialized_) {
|
||||
return false;
|
||||
}
|
||||
os::memory::LockHolder lock(mutex_);
|
||||
LockHolder lock(mutex_);
|
||||
return InternalSave();
|
||||
}
|
||||
|
||||
@ -151,7 +151,7 @@ bool PGOProfilerEncoder::SaveAndRename(const SaveTask *task)
|
||||
pandaFileInfos_->ProcessToBinary(fileStream, header_->GetPandaInfoSection());
|
||||
globalRecordInfos_->ProcessToBinary(task, fileStream, header_);
|
||||
{
|
||||
os::memory::ReadLockHolder lock(rwLock_);
|
||||
ReadLockHolder lock(rwLock_);
|
||||
PGOFileSectionInterface::ProcessSectionToBinary(fileStream, header_, *abcFilePool_->GetPool());
|
||||
}
|
||||
header_->SetFileSize(static_cast<uint32_t>(fileStream.tellp()));
|
||||
@ -234,7 +234,7 @@ void PGOProfilerEncoder::StartSaveTask(const SaveTask *task)
|
||||
LOG_ECMA(ERROR) << "StartSaveTask: task is already terminate";
|
||||
return;
|
||||
}
|
||||
os::memory::LockHolder lock(mutex_);
|
||||
LockHolder lock(mutex_);
|
||||
InternalSave(task);
|
||||
}
|
||||
|
||||
|
@ -78,8 +78,8 @@ private:
|
||||
std::unique_ptr<PGOPandaFileInfos> pandaFileInfos_;
|
||||
std::shared_ptr<PGOAbcFilePool> abcFilePool_;
|
||||
std::shared_ptr<PGORecordDetailInfos> globalRecordInfos_;
|
||||
os::memory::Mutex mutex_;
|
||||
os::memory::RWLock rwLock_;
|
||||
Mutex mutex_;
|
||||
RWLock rwLock_;
|
||||
std::string moduleName_;
|
||||
ApGenMode mode_ {OVERWRITE};
|
||||
friend SaveTask;
|
||||
|
199
ecmascript/platform/common/mutex.cpp
Normal file
199
ecmascript/platform/common/mutex.cpp
Normal file
@ -0,0 +1,199 @@
|
||||
/*
|
||||
* Copyright (c) 2023 Huawei Device Co., Ltd.
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "ecmascript/platform/mutex.h"
|
||||
|
||||
#include "ecmascript/log_wrapper.h"
|
||||
|
||||
#include <cstring>
|
||||
#include <ctime>
|
||||
|
||||
namespace panda::ecmascript {
|
||||
inline void FatalIfError(const char *f, int rc)
|
||||
{
|
||||
if (rc != 0) {
|
||||
LOG_ECMA(FATAL)<< f << " failed: " << rc;
|
||||
}
|
||||
}
|
||||
|
||||
Mutex::Mutex(bool is_init) : mutex_()
|
||||
{
|
||||
if (is_init) {
|
||||
Init(nullptr);
|
||||
}
|
||||
}
|
||||
|
||||
Mutex::~Mutex()
|
||||
{
|
||||
int rc = pthread_mutex_destroy(&mutex_);
|
||||
FatalIfError("pthread_mutex_destroy", rc);
|
||||
}
|
||||
|
||||
void Mutex::Init(pthread_mutexattr_t *attrs)
|
||||
{
|
||||
int rc = pthread_mutex_init(&mutex_, attrs);
|
||||
FatalIfError("pthread_mutex_init", rc);
|
||||
}
|
||||
|
||||
void Mutex::Lock()
|
||||
{
|
||||
int rc = pthread_mutex_lock(&mutex_);
|
||||
FatalIfError("pthread_mutex_lock", rc);
|
||||
}
|
||||
|
||||
bool Mutex::TryLock()
|
||||
{
|
||||
int rc = pthread_mutex_trylock(&mutex_);
|
||||
if (rc == EBUSY) {
|
||||
return false;
|
||||
}
|
||||
|
||||
FatalIfError("pthread_mutex_trylock", rc);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void Mutex::Unlock()
|
||||
{
|
||||
int rc = pthread_mutex_unlock(&mutex_);
|
||||
FatalIfError("pthread_mutex_unlock", rc);
|
||||
}
|
||||
|
||||
RecursiveMutex::RecursiveMutex() : Mutex(false)
|
||||
{
|
||||
pthread_mutexattr_t attrs;
|
||||
pthread_mutexattr_init(&attrs);
|
||||
pthread_mutexattr_settype(&attrs, PTHREAD_MUTEX_RECURSIVE);
|
||||
Init(&attrs);
|
||||
}
|
||||
|
||||
RWLock::RWLock() : rwlock_()
|
||||
{
|
||||
int rc = pthread_rwlock_init(&rwlock_, nullptr);
|
||||
FatalIfError("pthread_rwlock_init", rc);
|
||||
}
|
||||
|
||||
RWLock::~RWLock()
|
||||
{
|
||||
int rc = pthread_rwlock_destroy(&rwlock_);
|
||||
FatalIfError("pthread_rwlock_destroy", rc);
|
||||
}
|
||||
|
||||
void RWLock::ReadLock()
|
||||
{
|
||||
int rc = pthread_rwlock_rdlock(&rwlock_);
|
||||
FatalIfError("pthread_rwlock_rdlock", rc);
|
||||
}
|
||||
|
||||
void RWLock::WriteLock()
|
||||
{
|
||||
int rc = pthread_rwlock_wrlock(&rwlock_);
|
||||
FatalIfError("pthread_rwlock_wrlock", rc);
|
||||
}
|
||||
|
||||
bool RWLock::TryReadLock()
|
||||
{
|
||||
int rc = pthread_rwlock_tryrdlock(&rwlock_);
|
||||
if (rc == EBUSY) {
|
||||
return false;
|
||||
}
|
||||
|
||||
FatalIfError("pthread_rwlock_tryrdlock", rc);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool RWLock::TryWriteLock()
|
||||
{
|
||||
int rc = pthread_rwlock_trywrlock(&rwlock_);
|
||||
if (rc == EBUSY) {
|
||||
return false;
|
||||
}
|
||||
|
||||
FatalIfError("pthread_rwlock_trywrlock", rc);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void RWLock::Unlock()
|
||||
{
|
||||
int rc = pthread_rwlock_unlock(&rwlock_);
|
||||
FatalIfError("pthread_rwlock_unlock", rc);
|
||||
}
|
||||
|
||||
ConditionVariable::ConditionVariable() : cond_()
|
||||
{
|
||||
int rc = pthread_cond_init(&cond_, nullptr);
|
||||
FatalIfError("pthread_cond_init", rc);
|
||||
}
|
||||
|
||||
ConditionVariable::~ConditionVariable()
|
||||
{
|
||||
int rc = pthread_cond_destroy(&cond_);
|
||||
FatalIfError("pthread_cond_destroy", rc);
|
||||
}
|
||||
|
||||
void ConditionVariable::Signal()
|
||||
{
|
||||
int rc = pthread_cond_signal(&cond_);
|
||||
FatalIfError("pthread_cond_signal", rc);
|
||||
}
|
||||
|
||||
void ConditionVariable::SignalAll()
|
||||
{
|
||||
int rc = pthread_cond_broadcast(&cond_);
|
||||
FatalIfError("pthread_cond_broadcast", rc);
|
||||
}
|
||||
|
||||
void ConditionVariable::Wait(Mutex *mutex)
|
||||
{
|
||||
int rc = pthread_cond_wait(&cond_, &mutex->mutex_);
|
||||
FatalIfError("pthread_cond_wait", rc);
|
||||
}
|
||||
|
||||
struct timespec ConvertTime(uint64_t ms, uint64_t ns, bool is_absolute)
|
||||
{
|
||||
struct timespec abs_time = {0, 0};
|
||||
if (!is_absolute) {
|
||||
clock_gettime(CLOCK_REALTIME, &abs_time);
|
||||
}
|
||||
const int64_t MILLISECONDS_PER_SEC = 1000;
|
||||
const int64_t NANOSECONDS_PER_MILLISEC = 1000000;
|
||||
const int64_t NANOSECONDS_PER_SEC = 1000000000;
|
||||
time_t seconds = ms / MILLISECONDS_PER_SEC;
|
||||
time_t nanoseconds = (ms % MILLISECONDS_PER_SEC) * NANOSECONDS_PER_MILLISEC + ns;
|
||||
abs_time.tv_sec += seconds;
|
||||
abs_time.tv_nsec += nanoseconds;
|
||||
if (abs_time.tv_nsec >= NANOSECONDS_PER_SEC) {
|
||||
abs_time.tv_nsec -= NANOSECONDS_PER_SEC;
|
||||
abs_time.tv_sec++;
|
||||
}
|
||||
return abs_time;
|
||||
}
|
||||
|
||||
bool ConditionVariable::TimedWait(Mutex *mutex, uint64_t ms, uint64_t ns, bool is_absolute)
|
||||
{
|
||||
struct timespec abs_time = ConvertTime(ms, ns, is_absolute);
|
||||
int rc = pthread_cond_timedwait(&cond_, &mutex->mutex_, &abs_time);
|
||||
if (rc != 0) {
|
||||
if (rc == ETIMEDOUT) {
|
||||
// interrupted
|
||||
return true;
|
||||
}
|
||||
}
|
||||
FatalIfError("pthread_cond_timedwait", rc);
|
||||
return false;
|
||||
}
|
||||
} // namespace panda::ecmascript
|
160
ecmascript/platform/mutex.h
Normal file
160
ecmascript/platform/mutex.h
Normal file
@ -0,0 +1,160 @@
|
||||
/*
|
||||
* Copyright (c) 2023 Huawei Device Co., Ltd.
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef ECMASCRIPT_PLATFORM_MUTEX_H
|
||||
#define ECMASCRIPT_PLATFORM_MUTEX_H
|
||||
|
||||
#include <pthread.h>
|
||||
|
||||
#include "ecmascript/common.h"
|
||||
|
||||
namespace panda::ecmascript {
|
||||
|
||||
class PUBLIC_API Mutex {
|
||||
public:
|
||||
explicit Mutex(bool is_init = true);
|
||||
|
||||
~Mutex();
|
||||
|
||||
void Lock();
|
||||
|
||||
bool TryLock();
|
||||
|
||||
void Unlock();
|
||||
|
||||
protected:
|
||||
void Init(pthread_mutexattr_t *attrs);
|
||||
|
||||
private:
|
||||
pthread_mutex_t mutex_;
|
||||
|
||||
NO_COPY_SEMANTIC(Mutex);
|
||||
NO_MOVE_SEMANTIC(Mutex);
|
||||
|
||||
friend class ConditionVariable;
|
||||
};
|
||||
|
||||
class RecursiveMutex : public Mutex {
|
||||
public:
|
||||
RecursiveMutex();
|
||||
|
||||
~RecursiveMutex() = default;
|
||||
|
||||
NO_COPY_SEMANTIC(RecursiveMutex);
|
||||
NO_MOVE_SEMANTIC(RecursiveMutex);
|
||||
};
|
||||
|
||||
class RWLock {
|
||||
public:
|
||||
RWLock();
|
||||
|
||||
~RWLock();
|
||||
|
||||
void ReadLock();
|
||||
|
||||
void WriteLock();
|
||||
|
||||
bool TryReadLock();
|
||||
|
||||
bool TryWriteLock();
|
||||
|
||||
void Unlock();
|
||||
|
||||
private:
|
||||
pthread_rwlock_t rwlock_;
|
||||
|
||||
NO_COPY_SEMANTIC(RWLock);
|
||||
NO_MOVE_SEMANTIC(RWLock);
|
||||
};
|
||||
|
||||
class PUBLIC_API ConditionVariable {
|
||||
public:
|
||||
ConditionVariable();
|
||||
|
||||
~ConditionVariable();
|
||||
|
||||
void Signal();
|
||||
|
||||
void SignalAll();
|
||||
|
||||
void Wait(Mutex *mutex);
|
||||
|
||||
bool TimedWait(Mutex *mutex, uint64_t ms, uint64_t ns = 0, bool is_absolute = false);
|
||||
|
||||
private:
|
||||
pthread_cond_t cond_;
|
||||
|
||||
NO_COPY_SEMANTIC(ConditionVariable);
|
||||
NO_MOVE_SEMANTIC(ConditionVariable);
|
||||
};
|
||||
|
||||
class LockHolder {
|
||||
public:
|
||||
explicit LockHolder(Mutex &mtx) : lock_(mtx)
|
||||
{
|
||||
lock_.Lock();
|
||||
}
|
||||
|
||||
~LockHolder()
|
||||
{
|
||||
lock_.Unlock();
|
||||
}
|
||||
|
||||
private:
|
||||
Mutex &lock_;
|
||||
|
||||
NO_COPY_SEMANTIC(LockHolder);
|
||||
NO_MOVE_SEMANTIC(LockHolder);
|
||||
};
|
||||
|
||||
class ReadLockHolder {
|
||||
public:
|
||||
explicit ReadLockHolder(RWLock &lock) : lock_(lock)
|
||||
{
|
||||
lock_.ReadLock();
|
||||
}
|
||||
|
||||
~ReadLockHolder()
|
||||
{
|
||||
lock_.Unlock();
|
||||
}
|
||||
|
||||
private:
|
||||
RWLock &lock_;
|
||||
|
||||
NO_COPY_SEMANTIC(ReadLockHolder);
|
||||
NO_MOVE_SEMANTIC(ReadLockHolder);
|
||||
};
|
||||
|
||||
class WriteLockHolder {
|
||||
public:
|
||||
explicit WriteLockHolder(RWLock &lock) : lock_(lock)
|
||||
{
|
||||
lock_.WriteLock();
|
||||
}
|
||||
|
||||
~WriteLockHolder()
|
||||
{
|
||||
lock_.Unlock();
|
||||
}
|
||||
|
||||
private:
|
||||
RWLock &lock_;
|
||||
|
||||
NO_COPY_SEMANTIC(WriteLockHolder);
|
||||
NO_MOVE_SEMANTIC(WriteLockHolder);
|
||||
};
|
||||
} // namespace panda::ecmascript
|
||||
#endif // ECMASCRIPT_PLATFORM_MUTEX_H
|
@ -20,7 +20,7 @@ static constexpr size_t MALLOC_SIZE_LIMIT = 2147483648; // Max internal memory u
|
||||
|
||||
JSSharedMemoryManager::~JSSharedMemoryManager()
|
||||
{
|
||||
os::memory::LockHolder lock(jsSharedMemoryLock_);
|
||||
LockHolder lock(jsSharedMemoryLock_);
|
||||
auto iter = loadedJSSharedMemory_.begin();
|
||||
while (iter != loadedJSSharedMemory_.end()) {
|
||||
const void *pointer = ToVoidPtr(iter->first);
|
||||
@ -44,7 +44,7 @@ bool JSSharedMemoryManager::CreateOrLoad(void **pointer, size_t size)
|
||||
|
||||
void JSSharedMemoryManager::InsertSharedMemory(const void *pointer)
|
||||
{
|
||||
os::memory::LockHolder lock(jsSharedMemoryLock_);
|
||||
LockHolder lock(jsSharedMemoryLock_);
|
||||
if (loadedJSSharedMemory_.find((uint64_t)pointer) == loadedJSSharedMemory_.end()) {
|
||||
loadedJSSharedMemory_[(uint64_t)pointer] = 1;
|
||||
}
|
||||
@ -52,7 +52,7 @@ void JSSharedMemoryManager::InsertSharedMemory(const void *pointer)
|
||||
|
||||
void JSSharedMemoryManager::IncreaseRefSharedMemory(const void *pointer)
|
||||
{
|
||||
os::memory::LockHolder lock(jsSharedMemoryLock_);
|
||||
LockHolder lock(jsSharedMemoryLock_);
|
||||
if (loadedJSSharedMemory_.find((uint64_t)pointer) != loadedJSSharedMemory_.end()) {
|
||||
loadedJSSharedMemory_[(uint64_t)pointer]++;
|
||||
}
|
||||
@ -60,7 +60,7 @@ void JSSharedMemoryManager::IncreaseRefSharedMemory(const void *pointer)
|
||||
|
||||
void JSSharedMemoryManager::DecreaseRefSharedMemory(const void *pointer)
|
||||
{
|
||||
os::memory::LockHolder lock(jsSharedMemoryLock_);
|
||||
LockHolder lock(jsSharedMemoryLock_);
|
||||
auto iter = loadedJSSharedMemory_.find((uint64_t)pointer);
|
||||
if (iter != loadedJSSharedMemory_.end()) {
|
||||
if (iter->second > 1) {
|
||||
|
@ -17,7 +17,7 @@
|
||||
#define ECMASCRIPT_SHARED_MEMORY_MANAGER_MANAGER_H
|
||||
|
||||
#include "ecmascript/mem/c_containers.h"
|
||||
#include "libpandabase/os/mutex.h"
|
||||
#include "ecmascript/platform/mutex.h"
|
||||
|
||||
namespace panda {
|
||||
class EcmaVm;
|
||||
@ -41,7 +41,7 @@ private:
|
||||
void DecreaseRefSharedMemory(const void *pointer);
|
||||
void FreeBuffer(void *mem);
|
||||
void *AllocateBuffer(size_t size);
|
||||
os::memory::RecursiveMutex jsSharedMemoryLock_;
|
||||
RecursiveMutex jsSharedMemoryLock_;
|
||||
CMap<const uint64_t, int32_t> loadedJSSharedMemory_;
|
||||
};
|
||||
} // namespace ecmascript
|
||||
|
@ -15,7 +15,7 @@
|
||||
|
||||
#include "ecmascript/taskpool/runner.h"
|
||||
|
||||
#include "libpandabase/os/mutex.h"
|
||||
#include "ecmascript/platform/mutex.h"
|
||||
#include "libpandabase/os/thread.h"
|
||||
#ifdef ENABLE_QOS
|
||||
#include "qos.h"
|
||||
@ -38,7 +38,7 @@ Runner::Runner(uint32_t threadNum) : totalThreadNum_(threadNum)
|
||||
void Runner::TerminateTask(int32_t id, TaskType type)
|
||||
{
|
||||
taskQueue_.TerminateTask(id, type);
|
||||
os::memory::LockHolder holder(mtx_);
|
||||
LockHolder holder(mtx_);
|
||||
for (uint32_t i = 0; i < runningTask_.size(); i++) {
|
||||
if (runningTask_[i] != nullptr) {
|
||||
if (id != ALL_TASK_ID && id != runningTask_[i]->GetId()) {
|
||||
@ -57,7 +57,7 @@ void Runner::TerminateThread()
|
||||
TerminateTask(ALL_TASK_ID, TaskType::ALL);
|
||||
taskQueue_.Terminate();
|
||||
|
||||
os::memory::LockHolder holder(mtxPool_);
|
||||
LockHolder holder(mtxPool_);
|
||||
uint32_t threadNum = threadPool_.size();
|
||||
for (uint32_t i = 0; i < threadNum; i++) {
|
||||
threadPool_.at(i)->join();
|
||||
@ -82,13 +82,13 @@ void Runner::SetQosPriority([[maybe_unused]] bool isForeground)
|
||||
|
||||
void Runner::RecordThreadId()
|
||||
{
|
||||
os::memory::LockHolder holder(mtx_);
|
||||
LockHolder holder(mtx_);
|
||||
gcThreadId_.emplace_back(os::thread::GetCurrentThreadId());
|
||||
}
|
||||
|
||||
void Runner::SetRunTask(uint32_t threadId, Task *task)
|
||||
{
|
||||
os::memory::LockHolder holder(mtx_);
|
||||
LockHolder holder(mtx_);
|
||||
runningTask_[threadId] = task;
|
||||
}
|
||||
|
||||
|
@ -23,7 +23,7 @@
|
||||
|
||||
#include "ecmascript/common.h"
|
||||
#include "ecmascript/taskpool/task_queue.h"
|
||||
#include "libpandabase/os/mutex.h"
|
||||
#include "ecmascript/platform/mutex.h"
|
||||
|
||||
namespace panda::ecmascript {
|
||||
static constexpr uint32_t MIN_TASKPOOL_THREAD_NUM = 3;
|
||||
@ -55,7 +55,7 @@ public:
|
||||
|
||||
bool IsInThreadPool(std::thread::id id)
|
||||
{
|
||||
os::memory::LockHolder holder(mtxPool_);
|
||||
LockHolder holder(mtxPool_);
|
||||
for (auto &thread : threadPool_) {
|
||||
if (thread->get_id() == id) {
|
||||
return true;
|
||||
@ -73,8 +73,8 @@ private:
|
||||
std::array<Task*, MAX_TASKPOOL_THREAD_NUM + 1> runningTask_;
|
||||
uint32_t totalThreadNum_ {0};
|
||||
std::vector<uint32_t> gcThreadId_ {};
|
||||
os::memory::Mutex mtx_;
|
||||
os::memory::Mutex mtxPool_;
|
||||
Mutex mtx_;
|
||||
Mutex mtxPool_;
|
||||
};
|
||||
} // namespace panda::ecmascript
|
||||
#endif // ECMASCRIPT_TASKPOOL_RUNNER_H
|
||||
|
@ -18,7 +18,7 @@
|
||||
namespace panda::ecmascript {
|
||||
void TaskQueue::PostTask(std::unique_ptr<Task> task)
|
||||
{
|
||||
os::memory::LockHolder holder(mtx_);
|
||||
LockHolder holder(mtx_);
|
||||
ASSERT(!terminate_);
|
||||
tasks_.push_back(std::move(task));
|
||||
cv_.Signal();
|
||||
@ -26,7 +26,7 @@ void TaskQueue::PostTask(std::unique_ptr<Task> task)
|
||||
|
||||
std::unique_ptr<Task> TaskQueue::PopTask()
|
||||
{
|
||||
os::memory::LockHolder holder(mtx_);
|
||||
LockHolder holder(mtx_);
|
||||
while (true) {
|
||||
if (!tasks_.empty()) {
|
||||
std::unique_ptr<Task> task = std::move(tasks_.front());
|
||||
@ -43,7 +43,7 @@ std::unique_ptr<Task> TaskQueue::PopTask()
|
||||
|
||||
void TaskQueue::TerminateTask(int32_t id, TaskType type)
|
||||
{
|
||||
os::memory::LockHolder holder(mtx_);
|
||||
LockHolder holder(mtx_);
|
||||
for (auto &task : tasks_) {
|
||||
if (id != ALL_TASK_ID && id != task->GetId()) {
|
||||
continue;
|
||||
@ -57,7 +57,7 @@ void TaskQueue::TerminateTask(int32_t id, TaskType type)
|
||||
|
||||
void TaskQueue::Terminate()
|
||||
{
|
||||
os::memory::LockHolder holder(mtx_);
|
||||
LockHolder holder(mtx_);
|
||||
terminate_ = true;
|
||||
cv_.SignalAll();
|
||||
}
|
||||
|
@ -22,7 +22,7 @@
|
||||
#include <memory>
|
||||
|
||||
#include "ecmascript/taskpool/task.h"
|
||||
#include "libpandabase/os/mutex.h"
|
||||
#include "ecmascript/platform/mutex.h"
|
||||
|
||||
namespace panda::ecmascript {
|
||||
class TaskQueue {
|
||||
@ -43,8 +43,8 @@ private:
|
||||
std::deque<std::unique_ptr<Task>> tasks_;
|
||||
|
||||
std::atomic_bool terminate_ = false;
|
||||
os::memory::Mutex mtx_;
|
||||
os::memory::ConditionVariable cv_;
|
||||
Mutex mtx_;
|
||||
ConditionVariable cv_;
|
||||
};
|
||||
} // namespace panda::ecmascript
|
||||
#endif // ECMASCRIPT_TASKPOOL_TASK_QUEUE_H
|
||||
|
@ -26,7 +26,7 @@ Taskpool *Taskpool::GetCurrentTaskpool()
|
||||
|
||||
void Taskpool::Initialize(int threadNum)
|
||||
{
|
||||
os::memory::LockHolder lock(mutex_);
|
||||
LockHolder lock(mutex_);
|
||||
if (isInitialized_++ <= 0) {
|
||||
runner_ = std::make_unique<Runner>(TheMostSuitableThreadNum(threadNum));
|
||||
}
|
||||
@ -34,7 +34,7 @@ void Taskpool::Initialize(int threadNum)
|
||||
|
||||
void Taskpool::Destroy(int32_t id)
|
||||
{
|
||||
os::memory::LockHolder lock(mutex_);
|
||||
LockHolder lock(mutex_);
|
||||
if (isInitialized_ <= 0) {
|
||||
return;
|
||||
}
|
||||
|
@ -20,7 +20,7 @@
|
||||
|
||||
#include "ecmascript/common.h"
|
||||
#include "ecmascript/taskpool/runner.h"
|
||||
#include "libpandabase/os/mutex.h"
|
||||
#include "ecmascript/platform/mutex.h"
|
||||
|
||||
namespace panda::ecmascript {
|
||||
class Taskpool {
|
||||
@ -30,7 +30,7 @@ public:
|
||||
Taskpool() = default;
|
||||
PUBLIC_API ~Taskpool()
|
||||
{
|
||||
os::memory::LockHolder lock(mutex_);
|
||||
LockHolder lock(mutex_);
|
||||
runner_->TerminateThread();
|
||||
isInitialized_ = 0;
|
||||
}
|
||||
@ -71,7 +71,7 @@ private:
|
||||
|
||||
std::unique_ptr<Runner> runner_;
|
||||
volatile int isInitialized_ = 0;
|
||||
os::memory::Mutex mutex_;
|
||||
Mutex mutex_;
|
||||
};
|
||||
} // namespace panda::ecmascript
|
||||
#endif // ECMASCRIPT_PALTFORM_PLATFORM_H
|
||||
|
@ -19,12 +19,9 @@
|
||||
#include "ecmascript/ecma_macros.h"
|
||||
#include "ecmascript/mem/c_containers.h"
|
||||
|
||||
#include "libpandabase/os/mutex.h"
|
||||
#include "ecmascript/platform/mutex.h"
|
||||
|
||||
namespace panda::ecmascript {
|
||||
using Mutex = os::memory::Mutex;
|
||||
using LockHolder = os::memory::LockHolder<Mutex>;
|
||||
|
||||
class WaiterListNode {
|
||||
public:
|
||||
WaiterListNode() = default;
|
||||
@ -36,7 +33,7 @@ public:
|
||||
WaiterListNode *prev_ {nullptr};
|
||||
WaiterListNode *next_ {nullptr};
|
||||
// Used to call wait or Signal() to unlock wait and wake up
|
||||
os::memory::ConditionVariable cond_;
|
||||
ConditionVariable cond_;
|
||||
|
||||
// Managed Arraybuffer or SharedArrayBuffer memory data
|
||||
void *date_ {nullptr};
|
||||
@ -88,20 +85,19 @@ private:
|
||||
Singleton() = default;
|
||||
};
|
||||
|
||||
class SCOPED_CAPABILITY MutexGuard
|
||||
{
|
||||
class MutexGuard {
|
||||
public:
|
||||
explicit MutexGuard(Mutex *mutex) : mutex_(mutex), lockHolder_(*mutex) {}
|
||||
void Unlock() RELEASE()
|
||||
void Unlock()
|
||||
{
|
||||
mutex_->Unlock();
|
||||
}
|
||||
|
||||
void Lock() ACQUIRE()
|
||||
void Lock()
|
||||
{
|
||||
mutex_->Lock();
|
||||
}
|
||||
|
||||
private:
|
||||
Mutex *mutex_;
|
||||
LockHolder lockHolder_;
|
||||
};
|
||||
|
@ -81,6 +81,13 @@
|
||||
panda::os::unix::memory::*;
|
||||
panda::ecmascript::JSHClass::*;
|
||||
panda::ecmascript::EcmaHandleScope::*;
|
||||
panda::ecmascript::Mutex::*;
|
||||
panda::ecmascript::RecursiveMutex::*;
|
||||
panda::ecmascript::RWLock::*;
|
||||
panda::ecmascript::ConditionVariable::*;
|
||||
panda::ecmascript::LockHolder::*;
|
||||
panda::ecmascript::ReadLockHolder::*;
|
||||
panda::ecmascript::WriteLockHolder::*;
|
||||
};
|
||||
extern "C" {
|
||||
get_ark_js_heap_crash_info;
|
||||
|
Loading…
Reference in New Issue
Block a user