2018-01-03 05:16:05 +00:00
|
|
|
/*
|
2022-10-23 02:55:20 +00:00
|
|
|
* Copyright (C) 2017-2020 Apple Inc. All rights reserved.
|
2018-01-03 05:16:05 +00:00
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
|
|
|
|
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
|
|
|
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
|
|
|
|
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
|
|
|
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
|
|
|
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
|
|
|
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
|
|
|
|
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
|
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
|
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "config.h"
|
|
|
|
#include "VMTraps.h"
|
|
|
|
|
2022-10-23 02:55:20 +00:00
|
|
|
#include "CallFrameInlines.h"
|
2018-01-03 05:16:05 +00:00
|
|
|
#include "CodeBlock.h"
|
|
|
|
#include "CodeBlockSet.h"
|
|
|
|
#include "DFGCommonData.h"
|
|
|
|
#include "ExceptionHelpers.h"
|
|
|
|
#include "HeapInlines.h"
|
2022-10-23 02:55:20 +00:00
|
|
|
#include "JSCJSValueInlines.h"
|
2018-01-03 05:16:05 +00:00
|
|
|
#include "LLIntPCRanges.h"
|
|
|
|
#include "MachineContext.h"
|
2020-08-29 13:27:11 +00:00
|
|
|
#include "MacroAssemblerCodeRef.h"
|
2018-01-03 05:16:05 +00:00
|
|
|
#include "VM.h"
|
|
|
|
#include "Watchdog.h"
|
|
|
|
#include <wtf/ProcessID.h>
|
|
|
|
#include <wtf/ThreadMessage.h>
|
|
|
|
#include <wtf/threads/Signals.h>
|
|
|
|
|
|
|
|
namespace JSC {
|
|
|
|
|
|
|
|
ALWAYS_INLINE VM& VMTraps::vm() const
|
|
|
|
{
|
|
|
|
return *bitwise_cast<VM*>(bitwise_cast<uintptr_t>(this) - OBJECT_OFFSETOF(VM, m_traps));
|
|
|
|
}
|
|
|
|
|
|
|
|
#if ENABLE(SIGNAL_BASED_VM_TRAPS)
|
|
|
|
|
|
|
|
struct SignalContext {
|
2020-08-29 13:27:11 +00:00
|
|
|
private:
|
|
|
|
SignalContext(PlatformRegisters& registers, MacroAssemblerCodePtr<PlatformRegistersPCPtrTag> trapPC)
|
2018-01-03 05:16:05 +00:00
|
|
|
: registers(registers)
|
2020-08-29 13:27:11 +00:00
|
|
|
, trapPC(trapPC)
|
2018-01-03 05:16:05 +00:00
|
|
|
, stackPointer(MachineContext::stackPointer(registers))
|
|
|
|
, framePointer(MachineContext::framePointer(registers))
|
2020-08-29 13:27:11 +00:00
|
|
|
{ }
|
|
|
|
|
|
|
|
public:
|
|
|
|
static Optional<SignalContext> tryCreate(PlatformRegisters& registers)
|
2018-01-03 05:16:05 +00:00
|
|
|
{
|
2020-08-29 13:27:11 +00:00
|
|
|
auto instructionPointer = MachineContext::instructionPointer(registers);
|
|
|
|
if (!instructionPointer)
|
|
|
|
return WTF::nullopt;
|
|
|
|
return SignalContext(registers, *instructionPointer);
|
2018-01-03 05:16:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
PlatformRegisters& registers;
|
2020-08-29 13:27:11 +00:00
|
|
|
MacroAssemblerCodePtr<PlatformRegistersPCPtrTag> trapPC;
|
2018-01-03 05:16:05 +00:00
|
|
|
void* stackPointer;
|
|
|
|
void* framePointer;
|
|
|
|
};
|
|
|
|
|
|
|
|
inline static bool vmIsInactive(VM& vm)
|
|
|
|
{
|
|
|
|
return !vm.entryScope && !vm.ownerThread();
|
|
|
|
}
|
|
|
|
|
2020-08-29 13:27:11 +00:00
|
|
|
static bool isSaneFrame(CallFrame* frame, CallFrame* calleeFrame, EntryFrame* entryFrame, StackBounds stackBounds)
|
2018-01-03 05:16:05 +00:00
|
|
|
{
|
|
|
|
if (reinterpret_cast<void*>(frame) >= reinterpret_cast<void*>(entryFrame))
|
|
|
|
return false;
|
|
|
|
if (calleeFrame >= frame)
|
|
|
|
return false;
|
|
|
|
return stackBounds.contains(frame);
|
|
|
|
}
|
|
|
|
|
|
|
|
void VMTraps::tryInstallTrapBreakpoints(SignalContext& context, StackBounds stackBounds)
|
|
|
|
{
|
|
|
|
// This must be the initial signal to get the mutator thread's attention.
|
|
|
|
// Let's get the thread to break at invalidation points if needed.
|
|
|
|
VM& vm = this->vm();
|
2020-08-29 13:27:11 +00:00
|
|
|
void* trapPC = context.trapPC.untaggedExecutableAddress();
|
|
|
|
// We must ensure we're in JIT/LLint code. If we are, we know a few things:
|
|
|
|
// - The JS thread isn't holding the malloc lock. Therefore, it's safe to malloc below.
|
|
|
|
// - The JS thread isn't holding the CodeBlockSet lock.
|
|
|
|
// If we're not in JIT/LLInt code, we can't run the C++ code below because it
|
|
|
|
// mallocs, and we must prove the JS thread isn't holding the malloc lock
|
|
|
|
// to be able to do that without risking a deadlock.
|
|
|
|
if (!isJITPC(trapPC) && !LLInt::isLLIntPC(trapPC))
|
|
|
|
return;
|
2018-01-03 05:16:05 +00:00
|
|
|
|
|
|
|
CallFrame* callFrame = reinterpret_cast<CallFrame*>(context.framePointer);
|
|
|
|
|
2020-08-29 13:27:11 +00:00
|
|
|
auto codeBlockSetLocker = holdLock(vm.heap.codeBlockSet().getLock());
|
2018-01-03 05:16:05 +00:00
|
|
|
|
|
|
|
CodeBlock* foundCodeBlock = nullptr;
|
2020-08-29 13:27:11 +00:00
|
|
|
EntryFrame* entryFrame = vm.topEntryFrame;
|
2018-01-03 05:16:05 +00:00
|
|
|
|
|
|
|
// We don't have a callee to start with. So, use the end of the stack to keep the
|
|
|
|
// isSaneFrame() checker below happy for the first iteration. It will still check
|
|
|
|
// to ensure that the address is in the stackBounds.
|
|
|
|
CallFrame* calleeFrame = reinterpret_cast<CallFrame*>(stackBounds.end());
|
|
|
|
|
2020-08-29 13:27:11 +00:00
|
|
|
if (!entryFrame || !callFrame)
|
2018-01-03 05:16:05 +00:00
|
|
|
return; // Not running JS code. Let the SignalSender try again later.
|
|
|
|
|
|
|
|
do {
|
2020-08-29 13:27:11 +00:00
|
|
|
if (!isSaneFrame(callFrame, calleeFrame, entryFrame, stackBounds))
|
2018-01-03 05:16:05 +00:00
|
|
|
return; // Let the SignalSender try again later.
|
|
|
|
|
2020-08-29 13:27:11 +00:00
|
|
|
CodeBlock* candidateCodeBlock = callFrame->unsafeCodeBlock();
|
2018-01-03 05:16:05 +00:00
|
|
|
if (candidateCodeBlock && vm.heap.codeBlockSet().contains(codeBlockSetLocker, candidateCodeBlock)) {
|
|
|
|
foundCodeBlock = candidateCodeBlock;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
calleeFrame = callFrame;
|
2020-08-29 13:27:11 +00:00
|
|
|
callFrame = callFrame->callerFrame(entryFrame);
|
2018-01-03 05:16:05 +00:00
|
|
|
|
2020-08-29 13:27:11 +00:00
|
|
|
} while (callFrame && entryFrame);
|
2018-01-03 05:16:05 +00:00
|
|
|
|
|
|
|
if (!foundCodeBlock) {
|
|
|
|
// We may have just entered the frame and the codeBlock pointer is not
|
|
|
|
// initialized yet. Just bail and let the SignalSender try again later.
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (JITCode::isOptimizingJIT(foundCodeBlock->jitType())) {
|
|
|
|
auto locker = tryHoldLock(*m_lock);
|
|
|
|
if (!locker)
|
|
|
|
return; // Let the SignalSender try again later.
|
|
|
|
|
2020-08-29 13:27:11 +00:00
|
|
|
if (!needTrapHandling()) {
|
|
|
|
// Too late. Someone else already handled the trap.
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2018-01-03 05:16:05 +00:00
|
|
|
if (!foundCodeBlock->hasInstalledVMTrapBreakpoints())
|
|
|
|
foundCodeBlock->installVMTrapBreakpoints();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void VMTraps::invalidateCodeBlocksOnStack()
|
|
|
|
{
|
|
|
|
invalidateCodeBlocksOnStack(vm().topCallFrame);
|
|
|
|
}
|
|
|
|
|
2022-10-23 02:55:20 +00:00
|
|
|
void VMTraps::invalidateCodeBlocksOnStack(CallFrame* topCallFrame)
|
2018-01-03 05:16:05 +00:00
|
|
|
{
|
|
|
|
auto codeBlockSetLocker = holdLock(vm().heap.codeBlockSet().getLock());
|
|
|
|
invalidateCodeBlocksOnStack(codeBlockSetLocker, topCallFrame);
|
|
|
|
}
|
|
|
|
|
2022-10-23 02:55:20 +00:00
|
|
|
void VMTraps::invalidateCodeBlocksOnStack(Locker<Lock>&, CallFrame* topCallFrame)
|
2018-01-03 05:16:05 +00:00
|
|
|
{
|
|
|
|
if (!m_needToInvalidatedCodeBlocks)
|
|
|
|
return;
|
|
|
|
|
|
|
|
m_needToInvalidatedCodeBlocks = false;
|
|
|
|
|
2020-08-29 13:27:11 +00:00
|
|
|
EntryFrame* entryFrame = vm().topEntryFrame;
|
2018-01-03 05:16:05 +00:00
|
|
|
CallFrame* callFrame = topCallFrame;
|
|
|
|
|
2020-08-29 13:27:11 +00:00
|
|
|
if (!entryFrame)
|
2018-01-03 05:16:05 +00:00
|
|
|
return; // Not running JS code. Nothing to invalidate.
|
|
|
|
|
|
|
|
while (callFrame) {
|
|
|
|
CodeBlock* codeBlock = callFrame->codeBlock();
|
|
|
|
if (codeBlock && JITCode::isOptimizingJIT(codeBlock->jitType()))
|
|
|
|
codeBlock->jettison(Profiler::JettisonDueToVMTraps);
|
2020-08-29 13:27:11 +00:00
|
|
|
callFrame = callFrame->callerFrame(entryFrame);
|
2018-01-03 05:16:05 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
class VMTraps::SignalSender final : public AutomaticThread {
|
|
|
|
public:
|
|
|
|
using Base = AutomaticThread;
|
|
|
|
SignalSender(const AbstractLocker& locker, VM& vm)
|
2020-08-29 13:27:11 +00:00
|
|
|
: Base(locker, vm.traps().m_lock, vm.traps().m_condition.copyRef())
|
2018-01-03 05:16:05 +00:00
|
|
|
, m_vm(vm)
|
2022-10-23 02:55:20 +00:00
|
|
|
{
|
|
|
|
activateSignalHandlersFor(Signal::AccessFault);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void initializeSignals()
|
2018-01-03 05:16:05 +00:00
|
|
|
{
|
|
|
|
static std::once_flag once;
|
|
|
|
std::call_once(once, [] {
|
2022-10-23 02:55:20 +00:00
|
|
|
addSignalHandler(Signal::AccessFault, [] (Signal, SigInfo&, PlatformRegisters& registers) -> SignalAction {
|
2020-08-29 13:27:11 +00:00
|
|
|
auto signalContext = SignalContext::tryCreate(registers);
|
|
|
|
if (!signalContext)
|
|
|
|
return SignalAction::NotHandled;
|
2018-01-03 05:16:05 +00:00
|
|
|
|
2020-08-29 13:27:11 +00:00
|
|
|
void* trapPC = signalContext->trapPC.untaggedExecutableAddress();
|
|
|
|
if (!isJITPC(trapPC))
|
2018-01-03 05:16:05 +00:00
|
|
|
return SignalAction::NotHandled;
|
|
|
|
|
2020-08-29 13:27:11 +00:00
|
|
|
CodeBlock* currentCodeBlock = DFG::codeBlockForVMTrapPC(trapPC);
|
|
|
|
if (!currentCodeBlock) {
|
|
|
|
// Either we trapped for some other reason, e.g. Wasm OOB, or we didn't properly monitor the PC. Regardless, we can't do much now...
|
|
|
|
return SignalAction::NotHandled;
|
|
|
|
}
|
2018-01-03 05:16:05 +00:00
|
|
|
ASSERT(currentCodeBlock->hasInstalledVMTrapBreakpoints());
|
2022-10-23 02:55:20 +00:00
|
|
|
VM& vm = currentCodeBlock->vm();
|
2018-01-03 05:16:05 +00:00
|
|
|
|
2020-08-29 13:27:11 +00:00
|
|
|
// We are in JIT code so it's safe to acquire this lock.
|
2018-01-03 05:16:05 +00:00
|
|
|
auto codeBlockSetLocker = holdLock(vm.heap.codeBlockSet().getLock());
|
|
|
|
bool sawCurrentCodeBlock = false;
|
|
|
|
vm.heap.forEachCodeBlockIgnoringJITPlans(codeBlockSetLocker, [&] (CodeBlock* codeBlock) {
|
|
|
|
// We want to jettison all code blocks that have vm traps breakpoints, otherwise we could hit them later.
|
|
|
|
if (codeBlock->hasInstalledVMTrapBreakpoints()) {
|
|
|
|
if (currentCodeBlock == codeBlock)
|
|
|
|
sawCurrentCodeBlock = true;
|
|
|
|
|
|
|
|
codeBlock->jettison(Profiler::JettisonDueToVMTraps);
|
|
|
|
}
|
|
|
|
});
|
|
|
|
RELEASE_ASSERT(sawCurrentCodeBlock);
|
|
|
|
|
|
|
|
return SignalAction::Handled; // We've successfully jettisoned the codeBlocks.
|
|
|
|
});
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
2022-10-23 02:55:20 +00:00
|
|
|
const char* name() const final
|
2020-08-29 13:27:11 +00:00
|
|
|
{
|
|
|
|
return "JSC VMTraps Signal Sender Thread";
|
|
|
|
}
|
|
|
|
|
2018-01-03 05:16:05 +00:00
|
|
|
VMTraps& traps() { return m_vm.traps(); }
|
|
|
|
|
2022-10-23 02:55:20 +00:00
|
|
|
private:
|
|
|
|
PollResult poll(const AbstractLocker&) final
|
2018-01-03 05:16:05 +00:00
|
|
|
{
|
|
|
|
if (traps().m_isShuttingDown)
|
|
|
|
return PollResult::Stop;
|
|
|
|
|
|
|
|
if (!traps().needTrapHandling())
|
|
|
|
return PollResult::Wait;
|
|
|
|
|
|
|
|
// We know that no trap could have been processed and re-added because we are holding the lock.
|
|
|
|
if (vmIsInactive(m_vm))
|
|
|
|
return PollResult::Wait;
|
|
|
|
return PollResult::Work;
|
|
|
|
}
|
|
|
|
|
2022-10-23 02:55:20 +00:00
|
|
|
WorkResult work() final
|
2018-01-03 05:16:05 +00:00
|
|
|
{
|
|
|
|
VM& vm = m_vm;
|
|
|
|
|
|
|
|
auto optionalOwnerThread = vm.ownerThread();
|
|
|
|
if (optionalOwnerThread) {
|
|
|
|
sendMessage(*optionalOwnerThread.value().get(), [&] (PlatformRegisters& registers) -> void {
|
2020-08-29 13:27:11 +00:00
|
|
|
auto signalContext = SignalContext::tryCreate(registers);
|
|
|
|
if (!signalContext)
|
|
|
|
return;
|
2018-01-03 05:16:05 +00:00
|
|
|
|
|
|
|
auto ownerThread = vm.apiLock().ownerThread();
|
|
|
|
// We can't mess with a thread unless it's the one we suspended.
|
|
|
|
if (!ownerThread || ownerThread != optionalOwnerThread)
|
|
|
|
return;
|
|
|
|
|
|
|
|
Thread& thread = *ownerThread->get();
|
2020-08-29 13:27:11 +00:00
|
|
|
vm.traps().tryInstallTrapBreakpoints(*signalContext, thread.stack());
|
2018-01-03 05:16:05 +00:00
|
|
|
});
|
|
|
|
}
|
|
|
|
|
|
|
|
{
|
|
|
|
auto locker = holdLock(*traps().m_lock);
|
|
|
|
if (traps().m_isShuttingDown)
|
|
|
|
return WorkResult::Stop;
|
2020-08-29 13:27:11 +00:00
|
|
|
traps().m_condition->waitFor(*traps().m_lock, 1_ms);
|
2018-01-03 05:16:05 +00:00
|
|
|
}
|
|
|
|
return WorkResult::Continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
VM& m_vm;
|
|
|
|
};
|
|
|
|
|
|
|
|
#endif // ENABLE(SIGNAL_BASED_VM_TRAPS)
|
|
|
|
|
2022-10-23 02:55:20 +00:00
|
|
|
void VMTraps::initializeSignals()
|
|
|
|
{
|
|
|
|
#if ENABLE(SIGNAL_BASED_VM_TRAPS)
|
|
|
|
if (!Options::usePollingTraps()) {
|
|
|
|
ASSERT(Options::useJIT());
|
|
|
|
SignalSender::initializeSignals();
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2018-01-03 05:16:05 +00:00
|
|
|
void VMTraps::willDestroyVM()
|
|
|
|
{
|
|
|
|
m_isShuttingDown = true;
|
|
|
|
#if ENABLE(SIGNAL_BASED_VM_TRAPS)
|
|
|
|
if (m_signalSender) {
|
|
|
|
{
|
|
|
|
auto locker = holdLock(*m_lock);
|
|
|
|
if (!m_signalSender->tryStop(locker))
|
2020-08-29 13:27:11 +00:00
|
|
|
m_condition->notifyAll(locker);
|
2018-01-03 05:16:05 +00:00
|
|
|
}
|
|
|
|
m_signalSender->join();
|
|
|
|
m_signalSender = nullptr;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
void VMTraps::fireTrap(VMTraps::EventType eventType)
|
|
|
|
{
|
|
|
|
ASSERT(!vm().currentThreadIsHoldingAPILock());
|
|
|
|
{
|
|
|
|
auto locker = holdLock(*m_lock);
|
|
|
|
ASSERT(!m_isShuttingDown);
|
|
|
|
setTrapForEvent(locker, eventType);
|
|
|
|
m_needToInvalidatedCodeBlocks = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
#if ENABLE(SIGNAL_BASED_VM_TRAPS)
|
|
|
|
if (!Options::usePollingTraps()) {
|
|
|
|
// sendSignal() can loop until it has confirmation that the mutator thread
|
2020-08-29 13:27:11 +00:00
|
|
|
// has received the trap request. We'll call it from another thread so that
|
2018-01-03 05:16:05 +00:00
|
|
|
// fireTrap() does not block.
|
|
|
|
auto locker = holdLock(*m_lock);
|
|
|
|
if (!m_signalSender)
|
|
|
|
m_signalSender = adoptRef(new SignalSender(locker, vm()));
|
2020-08-29 13:27:11 +00:00
|
|
|
m_condition->notifyAll(locker);
|
2018-01-03 05:16:05 +00:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2022-10-23 02:55:20 +00:00
|
|
|
void VMTraps::handleTraps(JSGlobalObject* globalObject, CallFrame* callFrame, VMTraps::Mask mask)
|
2018-01-03 05:16:05 +00:00
|
|
|
{
|
|
|
|
VM& vm = this->vm();
|
|
|
|
auto scope = DECLARE_THROW_SCOPE(vm);
|
|
|
|
|
|
|
|
{
|
|
|
|
auto codeBlockSetLocker = holdLock(vm.heap.codeBlockSet().getLock());
|
|
|
|
vm.heap.forEachCodeBlockIgnoringJITPlans(codeBlockSetLocker, [&] (CodeBlock* codeBlock) {
|
|
|
|
// We want to jettison all code blocks that have vm traps breakpoints, otherwise we could hit them later.
|
|
|
|
if (codeBlock->hasInstalledVMTrapBreakpoints())
|
|
|
|
codeBlock->jettison(Profiler::JettisonDueToVMTraps);
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
|
|
|
ASSERT(needTrapHandling(mask));
|
|
|
|
while (needTrapHandling(mask)) {
|
|
|
|
auto eventType = takeTopPriorityTrap(mask);
|
|
|
|
switch (eventType) {
|
|
|
|
case NeedDebuggerBreak:
|
|
|
|
dataLog("VM ", RawPointer(&vm), " on pid ", getCurrentProcessID(), " received NeedDebuggerBreak trap\n");
|
2022-10-23 02:55:20 +00:00
|
|
|
invalidateCodeBlocksOnStack(callFrame);
|
2018-01-03 05:16:05 +00:00
|
|
|
break;
|
2022-10-23 02:55:20 +00:00
|
|
|
|
|
|
|
case NeedShellTimeoutCheck:
|
|
|
|
RELEASE_ASSERT(g_jscConfig.shellTimeoutCheckCallback);
|
|
|
|
g_jscConfig.shellTimeoutCheckCallback(vm);
|
|
|
|
break;
|
|
|
|
|
2018-01-03 05:16:05 +00:00
|
|
|
case NeedWatchdogCheck:
|
|
|
|
ASSERT(vm.watchdog());
|
2022-10-23 02:55:20 +00:00
|
|
|
if (LIKELY(!vm.watchdog()->shouldTerminate(globalObject)))
|
2018-01-03 05:16:05 +00:00
|
|
|
continue;
|
|
|
|
FALLTHROUGH;
|
|
|
|
|
|
|
|
case NeedTermination:
|
2022-10-23 02:55:20 +00:00
|
|
|
throwException(globalObject, scope, createTerminatedExecutionException(&vm));
|
2018-01-03 05:16:05 +00:00
|
|
|
return;
|
|
|
|
|
|
|
|
default:
|
|
|
|
RELEASE_ASSERT_NOT_REACHED();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
auto VMTraps::takeTopPriorityTrap(VMTraps::Mask mask) -> EventType
|
|
|
|
{
|
|
|
|
auto locker = holdLock(*m_lock);
|
|
|
|
for (int i = 0; i < NumberOfEventTypes; ++i) {
|
|
|
|
EventType eventType = static_cast<EventType>(i);
|
|
|
|
if (hasTrapForEvent(locker, eventType, mask)) {
|
|
|
|
clearTrapForEvent(locker, eventType);
|
|
|
|
return eventType;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return Invalid;
|
|
|
|
}
|
|
|
|
|
|
|
|
VMTraps::VMTraps()
|
|
|
|
: m_lock(Box<Lock>::create())
|
2020-08-29 13:27:11 +00:00
|
|
|
, m_condition(AutomaticThreadCondition::create())
|
2018-01-03 05:16:05 +00:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
VMTraps::~VMTraps()
|
|
|
|
{
|
|
|
|
#if ENABLE(SIGNAL_BASED_VM_TRAPS)
|
|
|
|
ASSERT(!m_signalSender);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
} // namespace JSC
|