mirror of
https://gitee.com/openharmony/arkcompiler_runtime_core
synced 2025-04-17 10:00:42 +00:00
Fix codecheck runtime and platforms
Issue: #IAIB9Z Signed-off-by: Sergey Khil <khil.sergey@huawei.com>
This commit is contained in:
parent
e9c85f30a0
commit
cf8c323774
@ -18,6 +18,7 @@
|
||||
#include "utils/logger.h"
|
||||
#include "utils/type_helpers.h"
|
||||
|
||||
#include <cstdint>
|
||||
#include <cstring>
|
||||
#include <cerrno>
|
||||
#include <ctime>
|
||||
@ -129,6 +130,24 @@ RWLock::~RWLock()
|
||||
#endif // PANDA_TARGET_MOBILE
|
||||
}
|
||||
|
||||
void RWLock::FutexWait(int32_t curState)
|
||||
{
|
||||
IncrementWaiters();
|
||||
// Retry wait until lock not held. If we have more than one reader, curState check fail
|
||||
// doesn't mean this lock is unlocked.
|
||||
while (curState != UNLOCKED) {
|
||||
// NOLINTNEXTLINE(hicpp-signed-bitwise)
|
||||
if (futex(GetStateAddr(), FUTEX_WAIT_PRIVATE, curState, nullptr, nullptr, 0) != 0) {
|
||||
if ((errno != EAGAIN) && (errno != EINTR)) {
|
||||
LOG(FATAL, COMMON) << "Futex wait failed!";
|
||||
}
|
||||
}
|
||||
// Atomic with relaxed order reason: mutex synchronization
|
||||
curState = state_.load(std::memory_order_relaxed);
|
||||
}
|
||||
DecrementWaiters();
|
||||
}
|
||||
|
||||
void RWLock::WriteLock()
|
||||
{
|
||||
if (current_tid == 0) {
|
||||
@ -147,20 +166,7 @@ void RWLock::WriteLock()
|
||||
if (!WaitBrieflyFor(&state_, [](int32_t state) { return state == UNLOCKED; })) {
|
||||
// WaitBrieflyFor failed, go to futex wait
|
||||
// Increment waiters count.
|
||||
IncrementWaiters();
|
||||
// Retry wait until lock not held. If we have more than one reader, curState check fail
|
||||
// doesn't mean this lock is unlocked.
|
||||
while (curState != UNLOCKED) {
|
||||
// NOLINTNEXTLINE(hicpp-signed-bitwise)
|
||||
if (futex(GetStateAddr(), FUTEX_WAIT_PRIVATE, curState, nullptr, nullptr, 0) != 0) {
|
||||
if ((errno != EAGAIN) && (errno != EINTR)) {
|
||||
LOG(FATAL, COMMON) << "Futex wait failed!";
|
||||
}
|
||||
}
|
||||
// Atomic with relaxed order reason: mutex synchronization
|
||||
curState = state_.load(std::memory_order_relaxed);
|
||||
}
|
||||
DecrementWaiters();
|
||||
FutexWait(curState);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -165,6 +165,7 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
void FutexWait(int32_t curState) ACQUIRE();
|
||||
PANDA_PUBLIC_API void WriteLock() ACQUIRE();
|
||||
|
||||
PANDA_PUBLIC_API bool TryReadLock() TRY_ACQUIRE_SHARED(true);
|
||||
|
@ -15,8 +15,12 @@
|
||||
|
||||
#include "runtime/deoptimization.h"
|
||||
|
||||
#include "include/cframe.h"
|
||||
#include "include/managed_thread.h"
|
||||
#include "include/stack_walker.h"
|
||||
#include "libpandabase/events/events.h"
|
||||
#include "libpandafile/file_items.h"
|
||||
#include "macros.h"
|
||||
#include "runtime/include/locks.h"
|
||||
#include "runtime/include/runtime.h"
|
||||
#include "runtime/include/panda_vm.h"
|
||||
@ -115,68 +119,9 @@ void InvalidateCompiledEntryPoint(const PandaSet<Method *> &methods, bool isCha)
|
||||
}
|
||||
}
|
||||
|
||||
[[noreturn]] NO_ADDRESS_SANITIZE void Deoptimize(StackWalker *stack, const uint8_t *pc, bool hasException,
|
||||
Method *destroyMethod)
|
||||
void PrevFrameDeopt(FrameKind prevFrameKind, ManagedThread *thread, StackWalker *stack, const uint8_t *pc,
|
||||
Frame *lastIframe, Frame *iframe, CFrame &cframe)
|
||||
{
|
||||
ASSERT(stack != nullptr);
|
||||
auto *thread = ManagedThread::GetCurrent();
|
||||
ASSERT(thread != nullptr);
|
||||
ASSERT(stack->IsCFrame());
|
||||
auto &cframe = stack->GetCFrame();
|
||||
UnpoisonAsanStack(cframe.GetFrameOrigin());
|
||||
auto method = stack->GetMethod();
|
||||
if (pc == nullptr) {
|
||||
ASSERT(method != nullptr);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic)
|
||||
pc = method->GetInstructions() + stack->GetBytecodePc();
|
||||
}
|
||||
|
||||
LOG(INFO, INTEROP) << "Deoptimize frame: " << method->GetFullName() << ", pc=" << std::hex
|
||||
<< pc - method->GetInstructions() << std::dec;
|
||||
|
||||
thread->GetVM()->ClearInteropHandleScopes(thread->GetCurrentFrame());
|
||||
|
||||
auto context = thread->GetVM()->GetLanguageContext();
|
||||
// We must run InvalidateCompiledEntryPoint before we convert the frame, because GC is already may be in the
|
||||
// collecting phase and it can move some object in the deoptimized frame.
|
||||
if (destroyMethod != nullptr) {
|
||||
LOG(DEBUG, INTEROP) << "Destroy compiled method: " << destroyMethod->GetFullName();
|
||||
destroyMethod->SetDestroyed();
|
||||
PandaSet<Method *> destroyMethods;
|
||||
destroyMethods.insert(destroyMethod);
|
||||
InvalidateCompiledEntryPoint(destroyMethods, false);
|
||||
}
|
||||
|
||||
FrameKind prevFrameKind;
|
||||
// We need to execute(find catch block) in all inlined methods. For this we calculate the number of inlined method
|
||||
// Else we can execute previus interpreter frames and we will FreeFrames in incorrect order
|
||||
uint32_t numInlinedMethods = 0;
|
||||
Frame *iframe = stack->ConvertToIFrame(&prevFrameKind, &numInlinedMethods);
|
||||
ASSERT(iframe != nullptr);
|
||||
|
||||
Frame *lastIframe = iframe;
|
||||
while (numInlinedMethods-- != 0) {
|
||||
EVENT_METHOD_EXIT(last_iframe->GetMethod()->GetFullName() + "(deopt)", events::MethodExitKind::INLINED,
|
||||
thread->RecordMethodExit());
|
||||
lastIframe = lastIframe->GetPrevFrame();
|
||||
ASSERT(!StackWalker::IsBoundaryFrame<FrameKind::INTERPRETER>(lastIframe));
|
||||
}
|
||||
|
||||
EVENT_METHOD_EXIT(last_iframe->GetMethod()->GetFullName() + "(deopt)", events::MethodExitKind::COMPILED,
|
||||
thread->RecordMethodExit());
|
||||
|
||||
if (thread->HasPendingException()) {
|
||||
LOG(DEBUG, INTEROP) << "Deoptimization has pending exception: "
|
||||
<< thread->GetException()->ClassAddr<Class>()->GetName();
|
||||
context.SetExceptionToVReg(iframe->GetAcc(), thread->GetException());
|
||||
}
|
||||
|
||||
if (!hasException) {
|
||||
thread->ClearException();
|
||||
} else {
|
||||
ASSERT(thread->HasPendingException());
|
||||
}
|
||||
|
||||
switch (prevFrameKind) {
|
||||
case FrameKind::COMPILER:
|
||||
LOG(DEBUG, INTEROP) << "Deoptimize after cframe";
|
||||
@ -209,6 +154,76 @@ void InvalidateCompiledEntryPoint(const PandaSet<Method *> &methods, bool isCha)
|
||||
DeoptimizeAfterIFrame(thread, pc, iframe, cframe.GetFrameOrigin(), lastIframe,
|
||||
stack->GetCalleeRegsForDeoptimize().end());
|
||||
}
|
||||
}
|
||||
|
||||
NO_ADDRESS_SANITIZE void DestroyMethodWithInvalidatingEP(Method *destroyMethod)
|
||||
{
|
||||
LOG(DEBUG, INTEROP) << "Destroy compiled method: " << destroyMethod->GetFullName();
|
||||
destroyMethod->SetDestroyed();
|
||||
PandaSet<Method *> destroyMethods;
|
||||
destroyMethods.insert(destroyMethod);
|
||||
InvalidateCompiledEntryPoint(destroyMethods, false);
|
||||
}
|
||||
|
||||
[[noreturn]] NO_ADDRESS_SANITIZE void Deoptimize(StackWalker *stack, const uint8_t *pc, bool hasException,
|
||||
Method *destroyMethod)
|
||||
{
|
||||
ASSERT(stack != nullptr);
|
||||
auto *thread = ManagedThread::GetCurrent();
|
||||
ASSERT(thread != nullptr);
|
||||
ASSERT(stack->IsCFrame());
|
||||
auto &cframe = stack->GetCFrame();
|
||||
UnpoisonAsanStack(cframe.GetFrameOrigin());
|
||||
auto method = stack->GetMethod();
|
||||
if (pc == nullptr) {
|
||||
ASSERT(method != nullptr);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic)
|
||||
pc = method->GetInstructions() + stack->GetBytecodePc();
|
||||
}
|
||||
|
||||
LOG(INFO, INTEROP) << "Deoptimize frame: " << method->GetFullName() << ", pc=" << std::hex
|
||||
<< pc - method->GetInstructions() << std::dec;
|
||||
|
||||
thread->GetVM()->ClearInteropHandleScopes(thread->GetCurrentFrame());
|
||||
|
||||
auto context = thread->GetVM()->GetLanguageContext();
|
||||
// We must run InvalidateCompiledEntryPoint before we convert the frame, because GC is already may be in the
|
||||
// collecting phase and it can move some object in the deoptimized frame.
|
||||
if (destroyMethod != nullptr) {
|
||||
DestroyMethodWithInvalidatingEP(destroyMethod);
|
||||
}
|
||||
|
||||
FrameKind prevFrameKind;
|
||||
// We need to execute(find catch block) in all inlined methods. For this we calculate the number of inlined method
|
||||
// Else we can execute previus interpreter frames and we will FreeFrames in incorrect order
|
||||
uint32_t numInlinedMethods = 0;
|
||||
Frame *iframe = stack->ConvertToIFrame(&prevFrameKind, &numInlinedMethods);
|
||||
ASSERT(iframe != nullptr);
|
||||
|
||||
Frame *lastIframe = iframe;
|
||||
while (numInlinedMethods-- != 0) {
|
||||
EVENT_METHOD_EXIT(last_iframe->GetMethod()->GetFullName() + "(deopt)", events::MethodExitKind::INLINED,
|
||||
thread->RecordMethodExit());
|
||||
lastIframe = lastIframe->GetPrevFrame();
|
||||
ASSERT(!StackWalker::IsBoundaryFrame<FrameKind::INTERPRETER>(lastIframe));
|
||||
}
|
||||
|
||||
EVENT_METHOD_EXIT(last_iframe->GetMethod()->GetFullName() + "(deopt)", events::MethodExitKind::COMPILED,
|
||||
thread->RecordMethodExit());
|
||||
|
||||
if (thread->HasPendingException()) {
|
||||
LOG(DEBUG, INTEROP) << "Deoptimization has pending exception: "
|
||||
<< thread->GetException()->ClassAddr<Class>()->GetName();
|
||||
context.SetExceptionToVReg(iframe->GetAcc(), thread->GetException());
|
||||
}
|
||||
|
||||
if (!hasException) {
|
||||
thread->ClearException();
|
||||
} else {
|
||||
ASSERT(thread->HasPendingException());
|
||||
}
|
||||
|
||||
PrevFrameDeopt(prevFrameKind, thread, stack, pc, lastIframe, iframe, cframe);
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
|
@ -385,7 +385,6 @@ Type ArkHotreloadBase::MethodChangesCheck(ClassContainment *hCls)
|
||||
|
||||
auto oldMethods = runtimeClass->GetMethods();
|
||||
auto newMethods = tmpClass->GetMethods();
|
||||
|
||||
if (newMethods.size() > oldMethods.size() ||
|
||||
tmpClass->GetNumVirtualMethods() > runtimeClass->GetNumVirtualMethods()) {
|
||||
hCls->fChanges |= ChangesFlags::F_METHOD_ADDED;
|
||||
@ -654,4 +653,4 @@ void ArkHotreloadBase::AddObsoleteClassesToRuntime(ClassLinker *classLinker)
|
||||
classes_.clear();
|
||||
}
|
||||
|
||||
} // namespace ark::hotreload
|
||||
} // namespace ark::hotreload
|
||||
|
@ -601,19 +601,17 @@ Frame *StackWalker::ConvertToIFrame(FrameKind *prevFrameKind, uint32_t *numInlin
|
||||
if (prev == nullptr) {
|
||||
*prevFrameKind = FrameKind::NONE;
|
||||
prevFrame = nullptr;
|
||||
} else if (IsCompilerBoundFrame(prev)) {
|
||||
isInvoke = true;
|
||||
prevFrame =
|
||||
reinterpret_cast<Frame *>(StackWalker::GetPrevFromBoundary<FrameKind::COMPILER>(cframe.GetPrevFrame()));
|
||||
if (prevFrameKind != nullptr) {
|
||||
*prevFrameKind = FrameKind::INTERPRETER;
|
||||
}
|
||||
} else {
|
||||
if (IsCompilerBoundFrame(prev)) {
|
||||
isInvoke = true;
|
||||
prevFrame = reinterpret_cast<Frame *>(
|
||||
StackWalker::GetPrevFromBoundary<FrameKind::COMPILER>(cframe.GetPrevFrame()));
|
||||
if (prevFrameKind != nullptr) {
|
||||
*prevFrameKind = FrameKind::INTERPRETER;
|
||||
}
|
||||
} else {
|
||||
prevFrame = cframe.GetPrevFrame();
|
||||
if (prevFrameKind != nullptr) {
|
||||
*prevFrameKind = FrameKind::COMPILER;
|
||||
}
|
||||
prevFrame = cframe.GetPrevFrame();
|
||||
if (prevFrameKind != nullptr) {
|
||||
*prevFrameKind = FrameKind::COMPILER;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -97,6 +97,39 @@ inline T InvokeEntryPoint(Method *method)
|
||||
return GetInvokeHelper<T>()(gprData.data(), fprData.data(), stack.data(), 0, thread);
|
||||
}
|
||||
|
||||
inline void CountMethodTypes(panda_file::ShortyIterator &it, arch::ArgCounter<RUNTIME_ARCH> counter)
|
||||
{
|
||||
switch ((*it).GetId()) {
|
||||
case panda_file::Type::TypeId::U1:
|
||||
case panda_file::Type::TypeId::U8:
|
||||
case panda_file::Type::TypeId::I8:
|
||||
case panda_file::Type::TypeId::I16:
|
||||
case panda_file::Type::TypeId::U16:
|
||||
case panda_file::Type::TypeId::I32:
|
||||
case panda_file::Type::TypeId::U32:
|
||||
counter.Count<int32_t>();
|
||||
break;
|
||||
case panda_file::Type::TypeId::F32:
|
||||
counter.Count<float>();
|
||||
break;
|
||||
case panda_file::Type::TypeId::F64:
|
||||
counter.Count<double>();
|
||||
break;
|
||||
case panda_file::Type::TypeId::I64:
|
||||
case panda_file::Type::TypeId::U64:
|
||||
counter.Count<int64_t>();
|
||||
break;
|
||||
case panda_file::Type::TypeId::REFERENCE:
|
||||
counter.Count<ObjectHeader *>();
|
||||
break;
|
||||
case panda_file::Type::TypeId::TAGGED:
|
||||
counter.Count<coretypes::TaggedValue>();
|
||||
break;
|
||||
default:
|
||||
UNREACHABLE();
|
||||
}
|
||||
}
|
||||
|
||||
template <typename T, typename... Args>
|
||||
inline T InvokeEntryPoint(Method *method, Args... args)
|
||||
{
|
||||
@ -108,35 +141,7 @@ inline T InvokeEntryPoint(Method *method, Args... args)
|
||||
panda_file::ShortyIterator it(method->GetShorty());
|
||||
++it; // skip return type
|
||||
while (it != panda_file::ShortyIterator()) {
|
||||
switch ((*it).GetId()) {
|
||||
case panda_file::Type::TypeId::U1:
|
||||
case panda_file::Type::TypeId::U8:
|
||||
case panda_file::Type::TypeId::I8:
|
||||
case panda_file::Type::TypeId::I16:
|
||||
case panda_file::Type::TypeId::U16:
|
||||
case panda_file::Type::TypeId::I32:
|
||||
case panda_file::Type::TypeId::U32:
|
||||
counter.Count<int32_t>();
|
||||
break;
|
||||
case panda_file::Type::TypeId::F32:
|
||||
counter.Count<float>();
|
||||
break;
|
||||
case panda_file::Type::TypeId::F64:
|
||||
counter.Count<double>();
|
||||
break;
|
||||
case panda_file::Type::TypeId::I64:
|
||||
case panda_file::Type::TypeId::U64:
|
||||
counter.Count<int64_t>();
|
||||
break;
|
||||
case panda_file::Type::TypeId::REFERENCE:
|
||||
counter.Count<ObjectHeader *>();
|
||||
break;
|
||||
case panda_file::Type::TypeId::TAGGED:
|
||||
counter.Count<coretypes::TaggedValue>();
|
||||
break;
|
||||
default:
|
||||
UNREACHABLE();
|
||||
}
|
||||
CountMethodTypes(it, counter);
|
||||
++it;
|
||||
}
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user