Bug 1288944 - Baldr: move the Instance* into TlsData (r=jolesen)

MozReview-Commit-ID: H0lsvvPIPmo

--HG--
extra : rebase_source : 730e0b01178453ab3a336c3beb1542397aa68e37
This commit is contained in:
Luke Wagner 2016-08-02 10:14:30 -05:00
parent 4d1575de95
commit 49ef4fdbc1
12 changed files with 114 additions and 96 deletions

View File

@ -91,12 +91,6 @@ Instance::addressOfContextPtr() const
return (JSContext**)(codeSegment().globalData() + ContextPtrGlobalDataOffset);
}
Instance**
Instance::addressOfInstancePtr() const
{
return (Instance**)(codeSegment().globalData() + InstancePtrGlobalDataOffset);
}
uint8_t**
Instance::addressOfMemoryBase() const
{
@ -300,7 +294,9 @@ Instance::Instance(JSContext* cx,
MOZ_ASSERT(tables_.length() == metadata().tables.length());
*addressOfContextPtr() = cx;
*addressOfInstancePtr() = this;
tlsData_.instance = this;
tlsData_.stackLimit = *(void**)cx->stackLimitAddressForJitCode(StackForUntrustedScript);
for (size_t i = 0; i < metadata().funcImports.length(); i++) {
const FuncImport& fi = metadata().funcImports[i];
@ -349,8 +345,6 @@ Instance::Instance(JSContext* cx,
for (size_t i = 0; i < tables_.length(); i++)
*addressOfTableBase(i) = tables_[i]->array();
updateStackLimit(cx);
}
bool
@ -416,13 +410,6 @@ Instance::memoryLength() const
return memory_->buffer().byteLength();
}
void
Instance::updateStackLimit(JSContext* cx)
{
// Capture the stack limit for cx's thread.
tlsData_.stackLimit = *(void**)cx->stackLimitAddressForJitCode(StackForUntrustedScript);
}
bool
Instance::callExport(JSContext* cx, uint32_t funcIndex, CallArgs args)
{

View File

@ -109,9 +109,6 @@ class Instance
MOZ_MUST_USE bool ensureProfilingState(JSContext* cx, bool enabled);
// Update the instance's copy of the stack limit.
void updateStackLimit(JSContext*);
// about:memory reporting:
void addSizeOfMisc(MallocSizeOf mallocSizeOf,

View File

@ -672,7 +672,7 @@ class FunctionCompiler
if (inDeadCode())
return nullptr;
auto* cas = MAsmJSCompareExchangeHeap::New(alloc(), base, access, oldv, newv);
auto* cas = MAsmJSCompareExchangeHeap::New(alloc(), base, access, oldv, newv, tlsPointer_);
curBlock_->add(cas);
return cas;
}
@ -683,7 +683,7 @@ class FunctionCompiler
if (inDeadCode())
return nullptr;
auto* cas = MAsmJSAtomicExchangeHeap::New(alloc(), base, access, value);
auto* cas = MAsmJSAtomicExchangeHeap::New(alloc(), base, access, value, tlsPointer_);
curBlock_->add(cas);
return cas;
}
@ -695,7 +695,7 @@ class FunctionCompiler
if (inDeadCode())
return nullptr;
auto* binop = MAsmJSAtomicBinopHeap::New(alloc(), op, base, access, v);
auto* binop = MAsmJSAtomicBinopHeap::New(alloc(), op, base, access, v, tlsPointer_);
curBlock_->add(binop);
return binop;
}
@ -789,7 +789,6 @@ class FunctionCompiler
MAsmJSCall::Args regArgs_;
Vector<MAsmJSPassStackArg*, 0, SystemAllocPolicy> stackArgs_;
bool childClobbers_;
bool preservesTlsReg_;
friend class FunctionCompiler;
@ -798,8 +797,7 @@ class FunctionCompiler
: lineOrBytecode_(lineOrBytecode),
maxChildStackBytes_(0),
spIncrement_(0),
childClobbers_(false),
preservesTlsReg_(false)
childClobbers_(false)
{ }
};
@ -840,16 +838,6 @@ class FunctionCompiler
}
}
// Add the hidden TLS pointer argument to CallArgs, and assume that it will
// be preserved by the call.
bool passTlsPointer(CallArgs* args)
{
if (inDeadCode())
return true;
args->preservesTlsReg_ = true;
return args->regArgs_.append(MAsmJSCall::Arg(AnyRegister(WasmTlsReg), tlsPointer_));
}
void propagateMaxStackArgBytes(uint32_t stackBytes)
{
if (callStack_.empty()) {
@ -865,13 +853,20 @@ class FunctionCompiler
outer->childClobbers_ = true;
}
void finishCallArgs(CallArgs* args)
enum class PassTls { False = false, True = true };
bool finishCallArgs(CallArgs* args, PassTls passTls)
{
MOZ_ALWAYS_TRUE(callStack_.popCopy() == args);
if (inDeadCode()) {
propagateMaxStackArgBytes(args->maxChildStackBytes_);
return;
return true;
}
if (passTls == PassTls::True) {
if (!args->regArgs_.append(MAsmJSCall::Arg(AnyRegister(WasmTlsReg), tlsPointer_)))
return false;
}
uint32_t stackBytes = args->abi_.stackBytesConsumedSoFar();
@ -887,15 +882,14 @@ class FunctionCompiler
}
propagateMaxStackArgBytes(stackBytes);
return true;
}
private:
bool callPrivate(MAsmJSCall::Callee callee, const CallArgs& args, ExprType ret, MDefinition** def)
bool callPrivate(MAsmJSCall::Callee callee, MAsmJSCall::PreservesTlsReg preservesTlsReg,
const CallArgs& args, ExprType ret, MDefinition** def)
{
if (inDeadCode()) {
*def = nullptr;
return true;
}
MOZ_ASSERT(!inDeadCode());
CallSiteDesc::Kind kind = CallSiteDesc::Kind(-1);
switch (callee.which()) {
@ -906,7 +900,7 @@ class FunctionCompiler
MAsmJSCall* ins =
MAsmJSCall::New(alloc(), CallSiteDesc(args.lineOrBytecode_, kind), callee, args.regArgs_,
ToMIRType(ret), args.spIncrement_, args.preservesTlsReg_);
ToMIRType(ret), args.spIncrement_, preservesTlsReg);
if (!ins)
return false;
@ -918,7 +912,13 @@ class FunctionCompiler
public:
bool internalCall(const Sig& sig, uint32_t funcIndex, const CallArgs& args, MDefinition** def)
{
return callPrivate(MAsmJSCall::Callee(funcIndex), args, sig.ret(), def);
if (inDeadCode()) {
*def = nullptr;
return true;
}
return callPrivate(MAsmJSCall::Callee(funcIndex), MAsmJSCall::PreservesTlsReg::True, args,
sig.ret(), def);
}
bool funcPtrCall(uint32_t sigIndex, uint32_t length, uint32_t globalDataOffset,
@ -946,7 +946,8 @@ class FunctionCompiler
callee = MAsmJSCall::Callee(ptrFun, mg_.sigs[sigIndex].id);
}
return callPrivate(callee, args, mg_.sigs[sigIndex].ret(), def);
return callPrivate(callee, MAsmJSCall::PreservesTlsReg::True, args,
mg_.sigs[sigIndex].ret(), def);
}
bool ffiCall(unsigned globalDataOffset, const CallArgs& args, ExprType ret, MDefinition** def)
@ -959,12 +960,14 @@ class FunctionCompiler
MAsmJSLoadFFIFunc* ptrFun = MAsmJSLoadFFIFunc::New(alloc(), globalDataOffset);
curBlock_->add(ptrFun);
return callPrivate(MAsmJSCall::Callee(ptrFun), args, ret, def);
return callPrivate(MAsmJSCall::Callee(ptrFun), MAsmJSCall::PreservesTlsReg::False,
args, ret, def);
}
bool builtinCall(SymbolicAddress builtin, const CallArgs& args, ValType type, MDefinition** def)
{
return callPrivate(MAsmJSCall::Callee(builtin), args, ToExprType(type), def);
return callPrivate(MAsmJSCall::Callee(builtin), MAsmJSCall::PreservesTlsReg::False,
args, ToExprType(type), def);
}
/*********************************************** Control flow generation */
@ -1696,16 +1699,8 @@ EmitReturn(FunctionCompiler& f)
return true;
}
// Is a callee within the same module instance?
enum class IntraModule
{
False,
True
};
static bool
EmitCallArgs(FunctionCompiler& f, const Sig& sig, IntraModule intraModule,
FunctionCompiler::CallArgs* args)
EmitCallArgs(FunctionCompiler& f, const Sig& sig, FunctionCompiler::CallArgs* args)
{
if (!f.startCallArgs(args))
return false;
@ -1724,13 +1719,7 @@ EmitCallArgs(FunctionCompiler& f, const Sig& sig, IntraModule intraModule,
if (!f.iter().readCallArgsEnd(numArgs))
return false;
// Calls within the module pass the module's TLS pointer.
// Calls to other modules go through stubs that set up their TLS pointers.
if (intraModule == IntraModule::True)
f.passTlsPointer(args);
f.finishCallArgs(args);
return true;
return f.finishCallArgs(args, FunctionCompiler::PassTls::True);
}
static bool
@ -1746,7 +1735,7 @@ EmitCall(FunctionCompiler& f, uint32_t callOffset)
const Sig& sig = *f.mg().funcSigs[calleeIndex];
FunctionCompiler::CallArgs args(f, lineOrBytecode);
if (!EmitCallArgs(f, sig, IntraModule::True, &args))
if (!EmitCallArgs(f, sig, &args))
return false;
if (!f.iter().readCallReturn(sig.ret()))
@ -1776,7 +1765,7 @@ EmitCallIndirect(FunctionCompiler& f, uint32_t callOffset)
const Sig& sig = f.mg().sigs[sigIndex];
FunctionCompiler::CallArgs args(f, lineOrBytecode);
if (!EmitCallArgs(f, sig, IntraModule::True, &args))
if (!EmitCallArgs(f, sig, &args))
return false;
MDefinition* callee;
@ -1815,7 +1804,7 @@ EmitCallImport(FunctionCompiler& f, uint32_t callOffset)
const Sig& sig = *funcImport.sig;
FunctionCompiler::CallArgs args(f, lineOrBytecode);
if (!EmitCallArgs(f, sig, IntraModule::False, &args))
if (!EmitCallArgs(f, sig, &args))
return false;
if (!f.iter().readCallReturn(sig.ret()))
@ -2230,7 +2219,8 @@ EmitUnaryMathBuiltinCall(FunctionCompiler& f, uint32_t callOffset, SymbolicAddre
if (!f.passArg(input, operandType, &args))
return false;
f.finishCallArgs(&args);
if (!f.finishCallArgs(&args, FunctionCompiler::PassTls::False))
return false;
MDefinition* def;
if (!f.builtinCall(callee, args, operandType, &def))
@ -2261,7 +2251,8 @@ EmitBinaryMathBuiltinCall(FunctionCompiler& f, uint32_t callOffset, SymbolicAddr
if (!f.passArg(rhs, operandType, &args))
return false;
f.finishCallArgs(&args);
if (!f.finishCallArgs(&args, FunctionCompiler::PassTls::False))
return false;
MDefinition* def;
if (!f.builtinCall(callee, args, operandType, &def))

View File

@ -482,10 +482,11 @@ wasm::GenerateInterpExit(MacroAssembler& masm, const FuncImport& fi, uint32_t fu
ABIArgMIRTypeIter i(invokeArgTypes);
// argument 0: Instance*
Address instancePtr(WasmTlsReg, offsetof(TlsData, instance));
if (i->kind() == ABIArg::GPR) {
masm.loadWasmGlobalPtr(InstancePtrGlobalDataOffset, i->gpr());
masm.loadPtr(instancePtr, i->gpr());
} else {
masm.loadWasmGlobalPtr(InstancePtrGlobalDataOffset, scratch);
masm.loadPtr(instancePtr, scratch);
masm.storePtr(scratch, Address(masm.getStackPointer(), i->offsetFromArgBase()));
}
i++;

View File

@ -1044,6 +1044,9 @@ struct ExportArg
//
struct TlsData
{
// Pointer to the Instance that contains this TLS data.
Instance* instance;
// Stack limit for the current thread. This limit is checked against the
// stack pointer in the prologue of functions that allocate stack space. See
// `CodeGenerator::generateWasm`.
@ -1064,8 +1067,7 @@ static const uint64_t MappedSize = 2 * Uint32Range + PageSize;
#endif
static const unsigned ContextPtrGlobalDataOffset = 0;
static const unsigned InstancePtrGlobalDataOffset = ContextPtrGlobalDataOffset + sizeof(void*);
static const unsigned HeapGlobalDataOffset = InstancePtrGlobalDataOffset + sizeof(void*);
static const unsigned HeapGlobalDataOffset = ContextPtrGlobalDataOffset + sizeof(void*);
static const unsigned NaN64GlobalDataOffset = HeapGlobalDataOffset + sizeof(void*);
static const unsigned NaN32GlobalDataOffset = NaN64GlobalDataOffset + sizeof(double);
static const unsigned InitialGlobalDataBytes = NaN32GlobalDataOffset + sizeof(float);

View File

@ -5362,7 +5362,7 @@ MAsmJSUnsignedToFloat32::foldsTo(TempAllocator& alloc)
MAsmJSCall*
MAsmJSCall::New(TempAllocator& alloc, const wasm::CallSiteDesc& desc, Callee callee,
const Args& args, MIRType resultType, size_t spIncrement,
bool preservesTlsReg)
PreservesTlsReg preservesTlsReg)
{
MAsmJSCall* call = new(alloc) MAsmJSCall(desc, callee, spIncrement, preservesTlsReg);
call->setResultType(resultType);

View File

@ -13223,13 +13223,13 @@ class MAsmJSStoreHeap
};
class MAsmJSCompareExchangeHeap
: public MTernaryInstruction,
: public MQuaternaryInstruction,
public MWasmMemoryAccess,
public NoTypePolicy::Data
{
MAsmJSCompareExchangeHeap(MDefinition* base, const MWasmMemoryAccess& access,
MDefinition* oldv, MDefinition* newv)
: MTernaryInstruction(base, oldv, newv),
MDefinition* oldv, MDefinition* newv, MDefinition* tls)
: MQuaternaryInstruction(base, oldv, newv, tls),
MWasmMemoryAccess(access)
{
setGuard(); // Not removable
@ -13243,6 +13243,7 @@ class MAsmJSCompareExchangeHeap
MDefinition* base() const { return getOperand(0); }
MDefinition* oldValue() const { return getOperand(1); }
MDefinition* newValue() const { return getOperand(2); }
MDefinition* tls() const { return getOperand(3); }
AliasSet getAliasSet() const override {
return AliasSet::Store(AliasSet::AsmJSHeap);
@ -13250,13 +13251,13 @@ class MAsmJSCompareExchangeHeap
};
class MAsmJSAtomicExchangeHeap
: public MBinaryInstruction,
: public MTernaryInstruction,
public MWasmMemoryAccess,
public NoTypePolicy::Data
{
MAsmJSAtomicExchangeHeap(MDefinition* base, const MWasmMemoryAccess& access,
MDefinition* value)
: MBinaryInstruction(base, value),
MDefinition* value, MDefinition* tls)
: MTernaryInstruction(base, value, tls),
MWasmMemoryAccess(access)
{
setGuard(); // Not removable
@ -13269,6 +13270,7 @@ class MAsmJSAtomicExchangeHeap
MDefinition* base() const { return getOperand(0); }
MDefinition* value() const { return getOperand(1); }
MDefinition* tls() const { return getOperand(2); }
AliasSet getAliasSet() const override {
return AliasSet::Store(AliasSet::AsmJSHeap);
@ -13276,15 +13278,15 @@ class MAsmJSAtomicExchangeHeap
};
class MAsmJSAtomicBinopHeap
: public MBinaryInstruction,
: public MTernaryInstruction,
public MWasmMemoryAccess,
public NoTypePolicy::Data
{
AtomicOp op_;
MAsmJSAtomicBinopHeap(AtomicOp op, MDefinition* base, const MWasmMemoryAccess& access,
MDefinition* v)
: MBinaryInstruction(base, v),
MDefinition* v, MDefinition* tls)
: MTernaryInstruction(base, v, tls),
MWasmMemoryAccess(access),
op_(op)
{
@ -13299,6 +13301,7 @@ class MAsmJSAtomicBinopHeap
AtomicOp operation() const { return op_; }
MDefinition* base() const { return getOperand(0); }
MDefinition* value() const { return getOperand(1); }
MDefinition* tls() const { return getOperand(2); }
AliasSet getAliasSet() const override {
return AliasSet::Store(AliasSet::AsmJSHeap);
@ -13539,6 +13542,11 @@ class MAsmJSCall final
}
};
enum PreservesTlsReg {
False = false,
True = true
};
private:
wasm::CallSiteDesc desc_;
Callee callee_;
@ -13547,11 +13555,11 @@ class MAsmJSCall final
bool preservesTlsReg_;
MAsmJSCall(const wasm::CallSiteDesc& desc, Callee callee, size_t spIncrement,
bool preservesTlsReg)
PreservesTlsReg preservesTlsReg)
: desc_(desc)
, callee_(callee)
, spIncrement_(spIncrement)
, preservesTlsReg_(preservesTlsReg)
, preservesTlsReg_(bool(preservesTlsReg))
{ }
public:
@ -13566,7 +13574,7 @@ class MAsmJSCall final
static MAsmJSCall* New(TempAllocator& alloc, const wasm::CallSiteDesc& desc, Callee callee,
const Args& args, MIRType resultType, size_t spIncrement,
bool preservesTlsReg);
PreservesTlsReg preservesTlsReg);
size_t numArgs() const {
return argRegs_.length();

View File

@ -2557,12 +2557,13 @@ CodeGeneratorARM::visitAsmJSCompareExchangeCallout(LAsmJSCompareExchangeCallout*
Register ptr = ToRegister(ins->ptr());
Register oldval = ToRegister(ins->oldval());
Register newval = ToRegister(ins->newval());
Register tls = ToRegister(ins->tls());
Register instance = ToRegister(ins->getTemp(0));
Register viewType = ToRegister(ins->getTemp(1));
MOZ_ASSERT(ToRegister(ins->output()) == ReturnReg);
masm.loadWasmGlobalPtr(wasm::InstancePtrGlobalDataOffset, instance);
masm.loadPtr(Address(tls, offsetof(wasm::TlsData, instance)), instance);
masm.ma_mov(Imm32(mir->accessType()), viewType);
masm.setupAlignedABICall();
@ -2600,12 +2601,13 @@ CodeGeneratorARM::visitAsmJSAtomicExchangeCallout(LAsmJSAtomicExchangeCallout* i
const MAsmJSAtomicExchangeHeap* mir = ins->mir();
Register ptr = ToRegister(ins->ptr());
Register value = ToRegister(ins->value());
Register tls = ToRegister(ins->tls());
Register instance = ToRegister(ins->getTemp(0));
Register viewType = ToRegister(ins->getTemp(1));
MOZ_ASSERT(ToRegister(ins->output()) == ReturnReg);
masm.loadWasmGlobalPtr(wasm::InstancePtrGlobalDataOffset, instance);
masm.loadPtr(Address(tls, offsetof(wasm::TlsData, instance)), instance);
masm.ma_mov(Imm32(mir->accessType()), viewType);
masm.setupAlignedABICall();
@ -2680,10 +2682,11 @@ CodeGeneratorARM::visitAsmJSAtomicBinopCallout(LAsmJSAtomicBinopCallout* ins)
const MAsmJSAtomicBinopHeap* mir = ins->mir();
Register ptr = ToRegister(ins->ptr());
Register value = ToRegister(ins->value());
Register tls = ToRegister(ins->tls());
Register instance = ToRegister(ins->getTemp(0));
Register viewType = ToRegister(ins->getTemp(1));
masm.loadWasmGlobalPtr(wasm::InstancePtrGlobalDataOffset, instance);
masm.loadPtr(Address(tls, offsetof(wasm::TlsData, instance)), instance);
masm.move32(Imm32(mir->accessType()), viewType);
masm.setupAlignedABICall();

View File

@ -472,17 +472,18 @@ class LSoftUDivOrMod : public LBinaryMath<3>
}
};
class LAsmJSCompareExchangeCallout : public LCallInstructionHelper<1, 3, 2>
class LAsmJSCompareExchangeCallout : public LCallInstructionHelper<1, 4, 2>
{
public:
LIR_HEADER(AsmJSCompareExchangeCallout)
LAsmJSCompareExchangeCallout(const LAllocation& ptr, const LAllocation& oldval,
const LAllocation& newval, const LDefinition& temp1,
const LDefinition& temp2)
const LAllocation& newval, const LAllocation& tls,
const LDefinition& temp1, const LDefinition& temp2)
{
setOperand(0, ptr);
setOperand(1, oldval);
setOperand(2, newval);
setOperand(3, tls);
setTemp(0, temp1);
setTemp(1, temp2);
}
@ -495,22 +496,27 @@ class LAsmJSCompareExchangeCallout : public LCallInstructionHelper<1, 3, 2>
const LAllocation* newval() {
return getOperand(2);
}
const LAllocation* tls() {
return getOperand(3);
}
const MAsmJSCompareExchangeHeap* mir() const {
return mir_->toAsmJSCompareExchangeHeap();
}
};
class LAsmJSAtomicExchangeCallout : public LCallInstructionHelper<1, 2, 2>
class LAsmJSAtomicExchangeCallout : public LCallInstructionHelper<1, 3, 2>
{
public:
LIR_HEADER(AsmJSAtomicExchangeCallout)
LAsmJSAtomicExchangeCallout(const LAllocation& ptr, const LAllocation& value,
const LDefinition& temp1, const LDefinition& temp2)
const LAllocation& tls, const LDefinition& temp1,
const LDefinition& temp2)
{
setOperand(0, ptr);
setOperand(1, value);
setOperand(2, tls);
setTemp(0, temp1);
setTemp(1, temp2);
}
@ -520,21 +526,26 @@ class LAsmJSAtomicExchangeCallout : public LCallInstructionHelper<1, 2, 2>
const LAllocation* value() {
return getOperand(1);
}
const LAllocation* tls() {
return getOperand(2);
}
const MAsmJSAtomicExchangeHeap* mir() const {
return mir_->toAsmJSAtomicExchangeHeap();
}
};
class LAsmJSAtomicBinopCallout : public LCallInstructionHelper<1, 2, 2>
class LAsmJSAtomicBinopCallout : public LCallInstructionHelper<1, 3, 2>
{
public:
LIR_HEADER(AsmJSAtomicBinopCallout)
LAsmJSAtomicBinopCallout(const LAllocation& ptr, const LAllocation& value,
const LDefinition& temp1, const LDefinition& temp2)
const LAllocation& tls, const LDefinition& temp1,
const LDefinition& temp2)
{
setOperand(0, ptr);
setOperand(1, value);
setOperand(2, tls);
setTemp(0, temp1);
setTemp(1, temp2);
}
@ -544,6 +555,9 @@ class LAsmJSAtomicBinopCallout : public LCallInstructionHelper<1, 2, 2>
const LAllocation* value() {
return getOperand(1);
}
const LAllocation* tls() {
return getOperand(2);
}
const MAsmJSAtomicBinopHeap* mir() const {
return mir_->toAsmJSAtomicBinopHeap();

View File

@ -838,6 +838,7 @@ LIRGeneratorARM::visitAsmJSCompareExchangeHeap(MAsmJSCompareExchangeHeap* ins)
new(alloc()) LAsmJSCompareExchangeCallout(useRegisterAtStart(base),
useRegisterAtStart(ins->oldValue()),
useRegisterAtStart(ins->newValue()),
useFixed(ins->tls(), WasmTlsReg),
temp(), temp());
defineReturn(lir, ins);
return;
@ -863,7 +864,9 @@ LIRGeneratorARM::visitAsmJSAtomicExchangeHeap(MAsmJSAtomicExchangeHeap* ins)
if (byteSize(ins->accessType()) < 4 && !HasLDSTREXBHD()) {
// Call out on ARMv6.
defineReturn(new(alloc()) LAsmJSAtomicExchangeCallout(base, value, temp(), temp()), ins);
defineReturn(new(alloc()) LAsmJSAtomicExchangeCallout(base, value,
useFixed(ins->tls(), WasmTlsReg),
temp(), temp()), ins);
return;
}
@ -883,6 +886,7 @@ LIRGeneratorARM::visitAsmJSAtomicBinopHeap(MAsmJSAtomicBinopHeap* ins)
LAsmJSAtomicBinopCallout* lir =
new(alloc()) LAsmJSAtomicBinopCallout(useRegisterAtStart(base),
useRegisterAtStart(ins->value()),
useFixed(ins->tls(), WasmTlsReg),
temp(), temp());
defineReturn(lir, ins);
return;

View File

@ -1038,6 +1038,16 @@ ExclusiveContext::stackLimitAddressForJitCode(StackKind kind)
#endif
}
uintptr_t
ExclusiveContext::stackLimitForJitCode(StackKind kind)
{
#ifdef JS_SIMULATOR
return runtime_->simulator()->stackLimit();
#else
return stackLimit(kind);
#endif
}
JSVersion
JSContext::findVersion() const
{

View File

@ -217,6 +217,7 @@ class ExclusiveContext : public ContextFriendFields,
void* stackLimitAddress(StackKind kind) { return &runtime_->mainThread.nativeStackLimit[kind]; }
void* stackLimitAddressForJitCode(StackKind kind);
uintptr_t stackLimit(StackKind kind) { return runtime_->mainThread.nativeStackLimit[kind]; }
uintptr_t stackLimitForJitCode(StackKind kind);
size_t gcSystemPageSize() { return gc::SystemPageSize(); }
bool jitSupportsFloatingPoint() const { return runtime_->jitSupportsFloatingPoint; }
bool jitSupportsUnalignedAccesses() const { return runtime_->jitSupportsUnalignedAccesses; }