Bug 1310125 part 2 - Port Baseline scripted getter IC stub to CacheIR. r=h4writer

This commit is contained in:
Jan de Mooij 2016-11-15 15:54:14 +01:00
parent 0146e7a2b0
commit f8afc80a70
13 changed files with 706 additions and 322 deletions

View File

@ -15,6 +15,8 @@
using namespace js;
using namespace js::jit;
using mozilla::Maybe;
// OperandLocation represents the location of an OperandId. The operand is
// either in a register or on the stack, and is either boxed or unboxed.
class OperandLocation
@ -146,6 +148,8 @@ class MOZ_RAII CacheRegisterAllocator
// clobbering it for something else, while we're still holding on to it.
LiveGeneralRegisterSet currentOpRegs_;
const AllocatableGeneralRegisterSet allocatableRegs_;
// Registers that are currently unused and available.
AllocatableGeneralRegisterSet availableRegs_;
@ -160,11 +164,15 @@ class MOZ_RAII CacheRegisterAllocator
CacheRegisterAllocator(const CacheRegisterAllocator&) = delete;
CacheRegisterAllocator& operator=(const CacheRegisterAllocator&) = delete;
void freeDeadOperandRegisters();
public:
friend class AutoScratchRegister;
friend class AutoScratchRegisterExcluding;
explicit CacheRegisterAllocator(const CacheIRWriter& writer)
: stackPushed_(0),
: allocatableRegs_(GeneralRegisterSet::All()),
stackPushed_(0),
currentInstruction_(0),
writer_(writer)
{}
@ -198,9 +206,24 @@ class MOZ_RAII CacheRegisterAllocator
return stackPushed_;
}
bool isAllocatable(Register reg) const {
return allocatableRegs_.has(reg);
}
// Allocates a new register.
Register allocateRegister(MacroAssembler& masm);
ValueOperand allocateValueRegister(MacroAssembler& masm);
void allocateFixedRegister(MacroAssembler& masm, Register reg);
// Releases a register so it can be reused later.
void releaseRegister(Register reg) {
MOZ_ASSERT(currentOpRegs_.has(reg));
availableRegs_.add(reg);
}
// Removes spilled values from the native stack. This should only be
// called after all registers have been allocated.
void discardStack(MacroAssembler& masm);
// Returns the register for the given operand. If the operand is currently
// not in a register, it will load it into one.
@ -211,23 +234,59 @@ class MOZ_RAII CacheRegisterAllocator
Register defineRegister(MacroAssembler& masm, ObjOperandId obj);
};
// RAII class to put a scratch register back in the allocator's availableRegs
// set when we're done with it.
// RAII class to allocate a scratch register and release it when we're done
// with it.
class MOZ_RAII AutoScratchRegister
{
CacheRegisterAllocator& alloc_;
Register reg_;
public:
AutoScratchRegister(CacheRegisterAllocator& alloc, MacroAssembler& masm)
AutoScratchRegister(CacheRegisterAllocator& alloc, MacroAssembler& masm,
Register reg = InvalidReg)
: alloc_(alloc)
{
reg_ = alloc.allocateRegister(masm);
if (reg != InvalidReg) {
alloc.allocateFixedRegister(masm, reg);
reg_ = reg;
} else {
reg_ = alloc.allocateRegister(masm);
}
MOZ_ASSERT(alloc_.currentOpRegs_.has(reg_));
}
~AutoScratchRegister() {
alloc_.releaseRegister(reg_);
}
operator Register() const { return reg_; }
};
// Like AutoScratchRegister, but lets the caller specify a register that should
// not be allocated here.
class MOZ_RAII AutoScratchRegisterExcluding
{
CacheRegisterAllocator& alloc_;
Register reg_;
public:
AutoScratchRegisterExcluding(CacheRegisterAllocator& alloc, MacroAssembler& masm,
Register excluding)
: alloc_(alloc)
{
MOZ_ASSERT(excluding != InvalidReg);
reg_ = alloc.allocateRegister(masm);
if (reg_ == excluding) {
// We need a different register, so try again.
reg_ = alloc.allocateRegister(masm);
MOZ_ASSERT(reg_ != excluding);
alloc_.releaseRegister(excluding);
}
MOZ_ASSERT(alloc_.currentOpRegs_.has(reg_));
alloc_.availableRegs_.add(reg_);
}
~AutoScratchRegisterExcluding() {
alloc_.releaseRegister(reg_);
}
operator Register() const { return reg_; }
};
@ -380,25 +439,47 @@ CacheIRCompiler::emitFailurePath(size_t i)
}
}
if (stackPushed > 0)
masm.addToStackPtr(Imm32(stackPushed));
allocator.discardStack(masm);
}
// BaselineCacheIRCompiler compiles CacheIR to BaselineIC native code.
class MOZ_RAII BaselineCacheIRCompiler : public CacheIRCompiler
{
// Some Baseline IC stubs can be used in IonMonkey through SharedStubs.
// Those stubs have different machine code, so we need to track whether
// we're compiling for Baseline or Ion.
ICStubEngine engine_;
#ifdef DEBUG
uint32_t framePushedAtEnterStubFrame_;
#endif
uint32_t stubDataOffset_;
bool inStubFrame_;
bool makesGCCalls_;
void enterStubFrame(MacroAssembler& masm, Register scratch);
void leaveStubFrame(MacroAssembler& masm, bool calledIntoIon);
public:
BaselineCacheIRCompiler(JSContext* cx, const CacheIRWriter& writer, uint32_t stubDataOffset)
BaselineCacheIRCompiler(JSContext* cx, const CacheIRWriter& writer, ICStubEngine engine,
uint32_t stubDataOffset)
: CacheIRCompiler(cx, writer),
stubDataOffset_(stubDataOffset)
engine_(engine),
#ifdef DEBUG
framePushedAtEnterStubFrame_(0),
#endif
stubDataOffset_(stubDataOffset),
inStubFrame_(false),
makesGCCalls_(false)
{}
MOZ_MUST_USE bool init(CacheKind kind);
JitCode* compile();
bool makesGCCalls() const { return makesGCCalls_; }
private:
#define DEFINE_OP(op) MOZ_MUST_USE bool emit##op();
CACHE_IR_OPS(DEFINE_OP)
@ -430,17 +511,51 @@ class MOZ_RAII BaselineCacheIRCompiler : public CacheIRCompiler
return true;
}
void emitEnterTypeMonitorIC() {
if (allocator.stackPushed() > 0)
masm.addToStackPtr(Imm32(allocator.stackPushed()));
allocator.discardStack(masm);
EmitEnterTypeMonitorIC(masm);
}
void emitReturnFromIC() {
if (allocator.stackPushed() > 0)
masm.addToStackPtr(Imm32(allocator.stackPushed()));
allocator.discardStack(masm);
EmitReturnFromIC(masm);
}
};
void
BaselineCacheIRCompiler::enterStubFrame(MacroAssembler& masm, Register scratch)
{
if (engine_ == ICStubEngine::Baseline) {
EmitBaselineEnterStubFrame(masm, scratch);
#ifdef DEBUG
framePushedAtEnterStubFrame_ = masm.framePushed();
#endif
} else {
EmitIonEnterStubFrame(masm, scratch);
}
MOZ_ASSERT(!inStubFrame_);
inStubFrame_ = true;
makesGCCalls_ = true;
}
void
BaselineCacheIRCompiler::leaveStubFrame(MacroAssembler& masm, bool calledIntoIon)
{
MOZ_ASSERT(inStubFrame_);
inStubFrame_ = false;
if (engine_ == ICStubEngine::Baseline) {
#ifdef DEBUG
masm.setFramePushed(framePushedAtEnterStubFrame_);
if (calledIntoIon)
masm.adjustFrame(sizeof(intptr_t)); // Calls into ion have this extra.
#endif
EmitBaselineLeaveStubFrame(masm, calledIntoIon);
} else {
EmitIonLeaveStubFrame(masm);
}
}
JitCode*
BaselineCacheIRCompiler::compile()
{
@ -602,33 +717,53 @@ CacheRegisterAllocator::defineRegister(MacroAssembler& masm, ObjOperandId op)
return reg;
}
void
CacheRegisterAllocator::freeDeadOperandRegisters()
{
// See if any operands are dead so we can reuse their registers. Note that
// we skip the input operands, as those are also used by failure paths, and
// we currently don't track those uses.
for (size_t i = writer_.numInputOperands(); i < operandLocations_.length(); i++) {
if (!writer_.operandIsDead(i, currentInstruction_))
continue;
OperandLocation& loc = operandLocations_[i];
switch (loc.kind()) {
case OperandLocation::PayloadReg:
availableRegs_.add(loc.payloadReg());
break;
case OperandLocation::ValueReg:
availableRegs_.add(loc.valueReg());
break;
case OperandLocation::Uninitialized:
case OperandLocation::PayloadStack:
case OperandLocation::ValueStack:
break;
}
loc.setUninitialized();
}
}
void
CacheRegisterAllocator::discardStack(MacroAssembler& masm)
{
// This should only be called when we are no longer using the operands,
// as we're discarding everything from the native stack. Set all operand
// locations to Uninitialized to catch bugs.
for (size_t i = 0; i < operandLocations_.length(); i++)
operandLocations_[i].setUninitialized();
if (stackPushed_ > 0) {
masm.addToStackPtr(Imm32(stackPushed_));
stackPushed_ = 0;
}
}
Register
CacheRegisterAllocator::allocateRegister(MacroAssembler& masm)
{
if (availableRegs_.empty()) {
// No registers available. See if any operands are dead so we can reuse
// their registers. Note that we skip the input operands, as those are
// also used by failure paths, and we currently don't track those uses.
for (size_t i = writer_.numInputOperands(); i < operandLocations_.length(); i++) {
if (!writer_.operandIsDead(i, currentInstruction_))
continue;
OperandLocation& loc = operandLocations_[i];
switch (loc.kind()) {
case OperandLocation::PayloadReg:
availableRegs_.add(loc.payloadReg());
break;
case OperandLocation::ValueReg:
availableRegs_.add(loc.valueReg());
break;
case OperandLocation::Uninitialized:
case OperandLocation::PayloadStack:
case OperandLocation::ValueStack:
break;
}
loc.setUninitialized();
}
}
if (availableRegs_.empty())
freeDeadOperandRegisters();
if (availableRegs_.empty()) {
// Still no registers available, try to spill unused operands to
@ -670,6 +805,51 @@ CacheRegisterAllocator::allocateRegister(MacroAssembler& masm)
return reg;
}
void
CacheRegisterAllocator::allocateFixedRegister(MacroAssembler& masm, Register reg)
{
// Fixed registers should be allocated first, to ensure they're
// still available.
MOZ_ASSERT(!currentOpRegs_.has(reg), "Register is in use");
freeDeadOperandRegisters();
if (availableRegs_.has(reg)) {
availableRegs_.take(reg);
currentOpRegs_.add(reg);
return;
}
// The register must be used by some operand. Spill it to the stack.
for (size_t i = 0; i < operandLocations_.length(); i++) {
OperandLocation& loc = operandLocations_[i];
if (loc.kind() == OperandLocation::PayloadReg) {
if (loc.payloadReg() != reg)
continue;
masm.push(reg);
stackPushed_ += sizeof(uintptr_t);
loc.setPayloadStack(stackPushed_, loc.payloadType());
currentOpRegs_.add(reg);
return;
}
if (loc.kind() == OperandLocation::ValueReg) {
if (!loc.valueReg().aliases(reg))
continue;
masm.pushValue(loc.valueReg());
stackPushed_ += sizeof(js::Value);
loc.setValueStack(stackPushed_);
availableRegs_.add(loc.valueReg());
availableRegs_.take(reg);
currentOpRegs_.add(reg);
return;
}
}
MOZ_CRASH("Invalid register");
}
ValueOperand
CacheRegisterAllocator::allocateValueRegister(MacroAssembler& masm)
{
@ -873,6 +1053,78 @@ BaselineCacheIRCompiler::emitLoadDynamicSlotResult()
return true;
}
bool
BaselineCacheIRCompiler::emitCallScriptedGetterResult()
{
MOZ_ASSERT(engine_ == ICStubEngine::Baseline);
// We use ICTailCallReg when entering the stub frame, so ensure it's not
// used for something else.
Maybe<AutoScratchRegister> tail;
if (allocator.isAllocatable(ICTailCallReg))
tail.emplace(allocator, masm, ICTailCallReg);
Register obj = allocator.useRegister(masm, reader.objOperandId());
Address getterAddr(stubAddress(reader.stubOffset()));
AutoScratchRegisterExcluding code(allocator, masm, ArgumentsRectifierReg);
AutoScratchRegister callee(allocator, masm);
AutoScratchRegister scratch(allocator, masm);
// First, ensure our getter is non-lazy and has JIT code.
{
FailurePath* failure;
if (!addFailurePath(&failure))
return false;
masm.loadPtr(getterAddr, callee);
masm.branchIfFunctionHasNoScript(callee, failure->label());
masm.loadPtr(Address(callee, JSFunction::offsetOfNativeOrScript()), code);
masm.loadBaselineOrIonRaw(code, code, failure->label());
}
allocator.discardStack(masm);
// Push a stub frame so that we can perform a non-tail call.
enterStubFrame(masm, scratch);
// Align the stack such that the JitFrameLayout is aligned on
// JitStackAlignment.
masm.alignJitStackBasedOnNArgs(0);
// Getter is called with 0 arguments, just |obj| as thisv.
// Note that we use Push, not push, so that callJit will align the stack
// properly on ARM.
masm.Push(TypedOrValueRegister(MIRType::Object, AnyRegister(obj)));
EmitBaselineCreateStubFrameDescriptor(masm, scratch, JitFrameLayout::Size());
masm.Push(Imm32(0)); // ActualArgc is 0
masm.Push(callee);
masm.Push(scratch);
// Handle arguments underflow.
Label noUnderflow;
masm.load16ZeroExtend(Address(callee, JSFunction::offsetOfNargs()), callee);
masm.branch32(Assembler::Equal, callee, Imm32(0), &noUnderflow);
{
// Call the arguments rectifier.
MOZ_ASSERT(ArgumentsRectifierReg != code);
JitCode* argumentsRectifier = cx_->runtime()->jitRuntime()->getArgumentsRectifier();
masm.movePtr(ImmGCPtr(argumentsRectifier), code);
masm.loadPtr(Address(code, JitCode::offsetOfCode()), code);
masm.movePtr(ImmWord(0), ArgumentsRectifierReg);
}
masm.bind(&noUnderflow);
masm.callJit(code);
leaveStubFrame(masm, true);
emitEnterTypeMonitorIC();
return true;
}
bool
BaselineCacheIRCompiler::emitLoadUnboxedPropertyResult()
{
@ -1131,7 +1383,9 @@ HashNumber
CacheIRStubKey::hash(const CacheIRStubKey::Lookup& l)
{
HashNumber hash = mozilla::HashBytes(l.code, l.length);
return mozilla::AddToHash(hash, uint32_t(l.kind));
hash = mozilla::AddToHash(hash, uint32_t(l.kind));
hash = mozilla::AddToHash(hash, uint32_t(l.engine));
return hash;
}
bool
@ -1140,6 +1394,9 @@ CacheIRStubKey::match(const CacheIRStubKey& entry, const CacheIRStubKey::Lookup&
if (entry.stubInfo->kind() != l.kind)
return false;
if (entry.stubInfo->engine() != l.engine)
return false;
if (entry.stubInfo->codeLength() != l.length)
return false;
@ -1154,7 +1411,8 @@ CacheIRReader::CacheIRReader(const CacheIRStubInfo* stubInfo)
{}
CacheIRStubInfo*
CacheIRStubInfo::New(CacheKind kind, uint32_t stubDataOffset, const CacheIRWriter& writer)
CacheIRStubInfo::New(CacheKind kind, ICStubEngine engine, bool makesGCCalls,
uint32_t stubDataOffset, const CacheIRWriter& writer)
{
size_t numStubFields = writer.numStubFields();
size_t bytesNeeded = sizeof(CacheIRStubInfo) +
@ -1177,13 +1435,15 @@ CacheIRStubInfo::New(CacheKind kind, uint32_t stubDataOffset, const CacheIRWrite
gcTypes[i] = uint8_t(writer.stubFieldGCType(i));
gcTypes[numStubFields] = uint8_t(StubField::GCType::Limit);
return new(p) CacheIRStubInfo(kind, stubDataOffset, codeStart, writer.codeLength(), gcTypes);
return new(p) CacheIRStubInfo(kind, engine, makesGCCalls, stubDataOffset, codeStart,
writer.codeLength(), gcTypes);
}
static const size_t MaxOptimizedCacheIRStubs = 16;
ICStub*
jit::AttachBaselineCacheIRStub(JSContext* cx, const CacheIRWriter& writer, CacheKind kind,
jit::AttachBaselineCacheIRStub(JSContext* cx, const CacheIRWriter& writer,
CacheKind kind, ICStubEngine engine, JSScript* outerScript,
ICFallbackStub* stub)
{
// We shouldn't GC or report OOM (or any other exception) here.
@ -1204,12 +1464,12 @@ jit::AttachBaselineCacheIRStub(JSContext* cx, const CacheIRWriter& writer, Cache
// Check if we already have JitCode for this stub.
CacheIRStubInfo* stubInfo;
CacheIRStubKey::Lookup lookup(kind, writer.codeStart(), writer.codeLength());
CacheIRStubKey::Lookup lookup(kind, engine, writer.codeStart(), writer.codeLength());
JitCode* code = jitCompartment->getCacheIRStubCode(lookup, &stubInfo);
if (!code) {
// We have to generate stub code.
JitContext jctx(cx, nullptr);
BaselineCacheIRCompiler comp(cx, writer, stubDataOffset);
BaselineCacheIRCompiler comp(cx, writer, engine, stubDataOffset);
if (!comp.init(kind))
return nullptr;
@ -1221,7 +1481,7 @@ jit::AttachBaselineCacheIRStub(JSContext* cx, const CacheIRWriter& writer, Cache
// call below will transfer ownership to the stub code HashMap, so we
// don't have to worry about freeing it below.
MOZ_ASSERT(!stubInfo);
stubInfo = CacheIRStubInfo::New(kind, stubDataOffset, writer);
stubInfo = CacheIRStubInfo::New(kind, engine, comp.makesGCCalls(), stubDataOffset, writer);
if (!stubInfo)
return nullptr;
@ -1236,12 +1496,13 @@ jit::AttachBaselineCacheIRStub(JSContext* cx, const CacheIRWriter& writer, Cache
MOZ_ASSERT(code);
MOZ_ASSERT(stubInfo);
MOZ_ASSERT(stub->isMonitoredFallback());
MOZ_ASSERT(stubInfo->stubDataSize() == writer.stubDataSize());
size_t bytesNeeded = stubInfo->stubDataOffset() + writer.stubDataSize();
size_t bytesNeeded = stubInfo->stubDataOffset() + stubInfo->stubDataSize();
// For now, no stubs can make calls so they are all allocated in the
// optimized stub space.
void* newStub = cx->zone()->jitZone()->optimizedStubSpace()->alloc(bytesNeeded);
ICStubSpace* stubSpace = ICStubCompiler::StubSpaceForStub(stubInfo->makesGCCalls(),
outerScript, engine);
void* newStub = stubSpace->alloc(bytesNeeded);
if (!newStub)
return nullptr;
@ -1281,3 +1542,70 @@ jit::TraceBaselineCacheIRStub(JSTracer* trc, ICStub* stub, const CacheIRStubInfo
field++;
}
}
size_t
CacheIRStubInfo::stubDataSize() const
{
size_t field = 0;
size_t size = 0;
while (true) {
switch (gcType(field++)) {
case StubField::GCType::NoGCThing:
case StubField::GCType::Shape:
case StubField::GCType::ObjectGroup:
case StubField::GCType::JSObject:
size += sizeof(uintptr_t);
continue;
case StubField::GCType::Limit:
return size;
}
MOZ_CRASH("unreachable");
}
}
void
CacheIRStubInfo::copyStubData(ICStub* src, ICStub* dest) const
{
uintptr_t* srcWords = reinterpret_cast<uintptr_t*>(src);
uintptr_t* destWords = reinterpret_cast<uintptr_t*>(dest);
size_t field = 0;
while (true) {
switch (gcType(field)) {
case StubField::GCType::NoGCThing:
destWords[field] = srcWords[field];
break;
case StubField::GCType::Shape:
getStubField<Shape*>(dest, field).init(getStubField<Shape*>(src, field));
break;
case StubField::GCType::JSObject:
getStubField<JSObject*>(dest, field).init(getStubField<JSObject*>(src, field));
break;
case StubField::GCType::ObjectGroup:
getStubField<ObjectGroup*>(dest, field).init(getStubField<ObjectGroup*>(src, field));
break;
case StubField::GCType::Limit:
return; // Done.
}
field++;
}
}
/* static */ ICCacheIR_Monitored*
ICCacheIR_Monitored::Clone(JSContext* cx, ICStubSpace* space, ICStub* firstMonitorStub,
ICCacheIR_Monitored& other)
{
const CacheIRStubInfo* stubInfo = other.stubInfo();
MOZ_ASSERT(stubInfo->makesGCCalls());
size_t bytesNeeded = stubInfo->stubDataOffset() + stubInfo->stubDataSize();
void* newStub = space->alloc(bytesNeeded);
if (!newStub)
return nullptr;
ICCacheIR_Monitored* res = new(newStub) ICCacheIR_Monitored(other.jitCode(), firstMonitorStub,
stubInfo);
stubInfo->copyStubData(&other, res);
return res;
}

View File

@ -20,20 +20,30 @@ class ICStub;
// of this class.
class CacheIRStubInfo
{
CacheKind kind_;
// These fields don't require 8 bits, but GCC complains if these fields are
// smaller than the size of the enums.
CacheKind kind_ : 8;
ICStubEngine engine_ : 8;
bool makesGCCalls_ : 1;
uint8_t stubDataOffset_;
const uint8_t* code_;
uint32_t length_;
const uint8_t* gcTypes_;
CacheIRStubInfo(CacheKind kind, uint32_t stubDataOffset, const uint8_t* code, uint32_t codeLength,
CacheIRStubInfo(CacheKind kind, ICStubEngine engine, bool makesGCCalls,
uint32_t stubDataOffset, const uint8_t* code, uint32_t codeLength,
const uint8_t* gcTypes)
: kind_(kind),
engine_(engine),
makesGCCalls_(makesGCCalls),
stubDataOffset_(stubDataOffset),
code_(code),
length_(codeLength),
gcTypes_(gcTypes)
{
MOZ_ASSERT(kind_ == kind, "Kind must fit in bitfield");
MOZ_ASSERT(engine_ == engine, "Engine must fit in bitfield");
MOZ_ASSERT(stubDataOffset_ == stubDataOffset, "stubDataOffset must fit in uint8_t");
}
@ -42,23 +52,30 @@ class CacheIRStubInfo
public:
CacheKind kind() const { return kind_; }
ICStubEngine engine() const { return engine_; }
bool makesGCCalls() const { return makesGCCalls_; }
const uint8_t* code() const { return code_; }
uint32_t codeLength() const { return length_; }
uint32_t stubDataOffset() const { return stubDataOffset_; }
size_t stubDataSize() const;
StubField::GCType gcType(uint32_t i) const { return (StubField::GCType)gcTypes_[i]; }
static CacheIRStubInfo* New(CacheKind kind, uint32_t stubDataOffset,
const CacheIRWriter& writer);
static CacheIRStubInfo* New(CacheKind kind, ICStubEngine engine, bool canMakeCalls,
uint32_t stubDataOffset, const CacheIRWriter& writer);
template <class T>
js::GCPtr<T>& getStubField(ICStub* stub, uint32_t field) const;
void copyStubData(ICStub* src, ICStub* dest) const;
};
void TraceBaselineCacheIRStub(JSTracer* trc, ICStub* stub, const CacheIRStubInfo* stubInfo);
ICStub* AttachBaselineCacheIRStub(JSContext* cx, const CacheIRWriter& writer, CacheKind kind,
ICStub* AttachBaselineCacheIRStub(JSContext* cx, const CacheIRWriter& writer,
CacheKind kind, ICStubEngine engine, JSScript* outerScript,
ICFallbackStub* stub);
} // namespace jit

View File

@ -690,6 +690,7 @@ RecompileBaselineScriptForDebugMode(JSContext* cx, JSScript* script,
}
#define PATCHABLE_ICSTUB_KIND_LIST(_) \
_(CacheIR_Monitored) \
_(Call_Scripted) \
_(Call_AnyScripted) \
_(Call_Native) \
@ -701,7 +702,6 @@ RecompileBaselineScriptForDebugMode(JSContext* cx, JSScript* script,
_(GetElem_NativePrototypeCallNativeSymbol) \
_(GetElem_NativePrototypeCallScriptedName) \
_(GetElem_NativePrototypeCallScriptedSymbol) \
_(GetProp_CallScripted) \
_(GetProp_CallNative) \
_(GetProp_CallNativeGlobal) \
_(GetProp_CallDOMProxyNative) \
@ -719,7 +719,7 @@ CloneOldBaselineStub(JSContext* cx, DebugModeOSREntryVector& entries, size_t ent
return true;
ICStub* oldStub = entry.oldStub;
MOZ_ASSERT(ICStub::CanMakeCalls(oldStub->kind()));
MOZ_ASSERT(oldStub->makesGCCalls());
if (entry.frameKind == ICEntry::Kind_Invalid) {
// The exception handler can modify the frame's override pc while
@ -763,7 +763,7 @@ CloneOldBaselineStub(JSContext* cx, DebugModeOSREntryVector& entries, size_t ent
} else {
firstMonitorStub = nullptr;
}
ICStubSpace* stubSpace = ICStubCompiler::StubSpaceForKind(oldStub->kind(), entry.script,
ICStubSpace* stubSpace = ICStubCompiler::StubSpaceForStub(oldStub->makesGCCalls(), entry.script,
ICStubCompiler::Engine::Baseline);
// Clone the existing stub into the recompiled IC.

View File

@ -696,6 +696,141 @@ GlobalShapeForGetPropFunction(ICStub* stub)
return nullptr;
}
static bool
MatchCacheIRReceiverGuard(CacheIRReader& reader, ICCacheIR_Monitored* stub, ObjOperandId objId,
ReceiverGuard* receiver)
{
// This matches the CacheIR emitted in TestMatchingReceiver.
//
// Either:
//
// GuardShape objId
//
// or:
//
// GuardGroup objId
// [GuardNoUnboxedExpando objId]
//
// or:
//
// GuardGroup objId
// expandoId: GuardAndLoadUnboxedExpando
// GuardShape expandoId
*receiver = ReceiverGuard();
if (reader.matchOp(CacheOp::GuardShape, objId)) {
// The first case.
receiver->shape = stub->stubInfo()->getStubField<Shape*>(stub, reader.stubOffset());
return true;
}
if (!reader.matchOp(CacheOp::GuardGroup, objId))
return false;
receiver->group = stub->stubInfo()->getStubField<ObjectGroup*>(stub, reader.stubOffset());
if (!reader.matchOp(CacheOp::GuardAndLoadUnboxedExpando, objId)) {
// Second case, just a group guard.
reader.matchOp(CacheOp::GuardNoUnboxedExpando, objId);
return true;
}
// Third case.
ObjOperandId expandoId = reader.objOperandId();
if (!reader.matchOp(CacheOp::GuardShape, expandoId))
return false;
receiver->shape = stub->stubInfo()->getStubField<Shape*>(stub, reader.stubOffset());
return true;
}
static bool
AddCacheIRGetPropFunction(ICCacheIR_Monitored* stub, JSObject** holder, Shape** holderShape,
JSFunction** commonGetter, Shape** globalShape, bool* isOwnProperty,
BaselineInspector::ReceiverVector& receivers,
BaselineInspector::ObjectGroupVector& convertUnboxedGroups)
{
// We match either an own getter:
//
// GuardIsObject objId
// <GuardReceiver objId>
// CallScriptedGetterResult objId
//
// Or a getter on the prototype:
//
// GuardIsObject objId
// <GuardReceiver objId>
// LoadObject holderId
// GuardShape holderId
// CallScriptedGetterResult objId
CacheIRReader reader(stub->stubInfo());
ObjOperandId objId = ObjOperandId(0);
if (!reader.matchOp(CacheOp::GuardIsObject, objId))
return false;
ReceiverGuard receiver;
if (!MatchCacheIRReceiverGuard(reader, stub, objId, &receiver))
return false;
if (reader.matchOp(CacheOp::CallScriptedGetterResult, objId)) {
// This is an own property getter, the first case.
MOZ_ASSERT(receiver.shape);
MOZ_ASSERT(!receiver.group);
size_t offset = reader.stubOffset();
JSFunction* getter =
&stub->stubInfo()->getStubField<JSObject*>(stub, offset)->as<JSFunction>();
if (*commonGetter && (!*isOwnProperty || *globalShape || *holderShape != receiver.shape))
return false;
MOZ_ASSERT_IF(*commonGetter, *commonGetter == getter);
*holder = nullptr;
*holderShape = receiver.shape;
*commonGetter = getter;
*isOwnProperty = true;
return true;
}
if (!reader.matchOp(CacheOp::LoadObject))
return false;
ObjOperandId holderId = reader.objOperandId();
JSObject* obj = stub->stubInfo()->getStubField<JSObject*>(stub, reader.stubOffset());
if (!reader.matchOp(CacheOp::GuardShape, holderId))
return false;
Shape* objShape = stub->stubInfo()->getStubField<Shape*>(stub, reader.stubOffset());
if (!reader.matchOp(CacheOp::CallScriptedGetterResult, objId))
return false;
// A getter on the prototype.
size_t offset = reader.stubOffset();
JSFunction* getter =
&stub->stubInfo()->getStubField<JSObject*>(stub, offset)->as<JSFunction>();
if (*commonGetter && (*isOwnProperty || *globalShape || *holderShape != objShape))
return false;
MOZ_ASSERT_IF(*commonGetter, *commonGetter == getter);
if (!AddReceiver(receiver, receivers, convertUnboxedGroups))
return false;
if (obj->as<NativeObject>().lastProperty() != objShape) {
// Skip this stub as the shape is no longer correct.
return true;
}
*holder = obj;
*holderShape = objShape;
*commonGetter = getter;
*isOwnProperty = false;
return true;
}
bool
BaselineInspector::commonGetPropFunction(jsbytecode* pc, JSObject** holder, Shape** holderShape,
JSFunction** commonGetter, Shape** globalShape,
@ -709,12 +844,12 @@ BaselineInspector::commonGetPropFunction(jsbytecode* pc, JSObject** holder, Shap
MOZ_ASSERT(receivers.empty());
MOZ_ASSERT(convertUnboxedGroups.empty());
*globalShape = nullptr;
*commonGetter = nullptr;
const ICEntry& entry = icEntryFromPC(pc);
for (ICStub* stub = entry.firstStub(); stub; stub = stub->next()) {
if (stub->isGetProp_CallScripted() ||
stub->isGetProp_CallNative() ||
if (stub->isGetProp_CallNative() ||
stub->isGetProp_CallNativeGlobal())
{
ICGetPropCallGetter* nstub = static_cast<ICGetPropCallGetter*>(stub);
@ -736,6 +871,13 @@ BaselineInspector::commonGetPropFunction(jsbytecode* pc, JSObject** holder, Shap
} else {
MOZ_ASSERT(*commonGetter == nstub->getter());
}
} else if (stub->isCacheIR_Monitored()) {
if (!AddCacheIRGetPropFunction(stub->toCacheIR_Monitored(), holder, holderShape,
commonGetter, globalShape, isOwnProperty, receivers,
convertUnboxedGroups))
{
return false;
}
} else if (stub->isGetProp_Fallback()) {
// If we have an unoptimizable access, don't try to optimize.
if (stub->toGetProp_Fallback()->hadUnoptimizableAccess())
@ -847,7 +989,6 @@ BaselineInspector::expectedPropertyAccessInputType(jsbytecode* pc)
// Either an object or magic arguments.
return MIRType::Value;
case ICStub::GetProp_CallScripted:
case ICStub::GetProp_CallNative:
case ICStub::GetProp_CallDOMProxyNative:
case ICStub::GetProp_CallDOMProxyWithGenerationNative:

View File

@ -18,13 +18,17 @@ using namespace js::jit;
using mozilla::Maybe;
GetPropIRGenerator::GetPropIRGenerator(JSContext* cx, jsbytecode* pc, HandleValue val, HandlePropertyName name,
GetPropIRGenerator::GetPropIRGenerator(JSContext* cx, jsbytecode* pc, ICStubEngine engine,
bool* isTemporarilyUnoptimizable,
HandleValue val, HandlePropertyName name,
MutableHandleValue res)
: cx_(cx),
pc_(pc),
val_(val),
name_(name),
res_(res),
engine_(engine),
isTemporarilyUnoptimizable_(isTemporarilyUnoptimizable),
emitted_(false),
preliminaryObjectAction_(PreliminaryObjectAction::None)
{}
@ -96,12 +100,13 @@ IsCacheableNoProperty(JSContext* cx, JSObject* obj, JSObject* holder, Shape* sha
enum NativeGetPropCacheability {
CanAttachNone,
CanAttachReadSlot,
CanAttachCallGetter,
};
static NativeGetPropCacheability
CanAttachNativeGetProp(JSContext* cx, HandleObject obj, HandleId id,
MutableHandleNativeObject holder, MutableHandleShape shape,
jsbytecode* pc, bool skipArrayLen = false)
jsbytecode* pc, ICStubEngine engine, bool* isTemporarilyUnoptimizable)
{
MOZ_ASSERT(JSID_IS_STRING(id) || JSID_IS_SYMBOL(id));
@ -126,6 +131,12 @@ CanAttachNativeGetProp(JSContext* cx, HandleObject obj, HandleId id,
return CanAttachReadSlot;
}
if (IsCacheableGetPropCallScripted(obj, holder, shape, isTemporarilyUnoptimizable)) {
// See bug 1226816.
if (engine == ICStubEngine::Baseline)
return CanAttachCallGetter;
}
return CanAttachNone;
}
@ -229,6 +240,28 @@ EmitReadSlotResult(CacheIRWriter& writer, JSObject* obj, JSObject* holder,
}
}
static void
EmitCallGetterResult(CacheIRWriter& writer, JSObject* obj, JSObject* holder,
Shape* shape, ObjOperandId objId)
{
Maybe<ObjOperandId> expandoId;
TestMatchingReceiver(writer, obj, shape, objId, &expandoId);
if (obj != holder) {
GeneratePrototypeGuards(writer, obj, holder, objId);
// Guard on the holder's shape.
ObjOperandId holderId = writer.loadObject(holder);
writer.guardShape(holderId, holder->as<NativeObject>().lastProperty());
}
MOZ_ASSERT(IsCacheableGetPropCallScripted(obj, holder, shape));
JSFunction* target = &shape->getterValue().toObject().as<JSFunction>();
MOZ_ASSERT(target->hasJITCode());
writer.callScriptedGetterResult(objId, target);
}
bool
GetPropIRGenerator::tryAttachNative(CacheIRWriter& writer, HandleObject obj, ObjOperandId objId)
{
@ -238,7 +271,8 @@ GetPropIRGenerator::tryAttachNative(CacheIRWriter& writer, HandleObject obj, Obj
RootedNativeObject holder(cx_);
RootedId id(cx_, NameToId(name_));
NativeGetPropCacheability type = CanAttachNativeGetProp(cx_, obj, id, &holder, &shape, pc_);
NativeGetPropCacheability type = CanAttachNativeGetProp(cx_, obj, id, &holder, &shape, pc_,
engine_, isTemporarilyUnoptimizable_);
if (type == CanAttachNone)
return true;
@ -258,6 +292,9 @@ GetPropIRGenerator::tryAttachNative(CacheIRWriter& writer, HandleObject obj, Obj
}
EmitReadSlotResult(writer, obj, holder, shape, objId);
break;
case CanAttachCallGetter:
EmitCallGetterResult(writer, obj, holder, shape, objId);
break;
default:
MOZ_CRASH("Bad NativeGetPropCacheability");
}

View File

@ -91,6 +91,8 @@ class ObjOperandId : public OperandId
_(GuardAndLoadUnboxedExpando) \
_(LoadObject) \
_(LoadProto) \
\
/* The *Result ops load a value into the cache's result register. */ \
_(LoadFixedSlotResult) \
_(LoadDynamicSlotResult) \
_(LoadUnboxedPropertyResult) \
@ -98,6 +100,7 @@ class ObjOperandId : public OperandId
_(LoadInt32ArrayLengthResult) \
_(LoadUnboxedArrayLengthResult) \
_(LoadArgumentsObjectLengthResult) \
_(CallScriptedGetterResult) \
_(LoadUndefinedResult)
enum class CacheOp {
@ -333,6 +336,10 @@ class MOZ_RAII CacheIRWriter
void loadArgumentsObjectLengthResult(ObjOperandId obj) {
writeOpWithOperandId(CacheOp::LoadArgumentsObjectLengthResult, obj);
}
void callScriptedGetterResult(ObjOperandId obj, JSFunction* getter) {
writeOpWithOperandId(CacheOp::CallScriptedGetterResult, obj);
addStubWord(uintptr_t(getter), StubField::GCType::JSObject);
}
};
class CacheIRStubInfo;
@ -405,6 +412,8 @@ class MOZ_RAII GetPropIRGenerator
HandleValue val_;
HandlePropertyName name_;
MutableHandleValue res_;
ICStubEngine engine_;
bool* isTemporarilyUnoptimizable_;
bool emitted_;
enum class PreliminaryObjectAction { None, Unlink, NotePreliminary };
@ -427,8 +436,9 @@ class MOZ_RAII GetPropIRGenerator
GetPropIRGenerator& operator=(const GetPropIRGenerator&) = delete;
public:
GetPropIRGenerator(JSContext* cx, jsbytecode* pc, HandleValue val, HandlePropertyName name,
MutableHandleValue res);
GetPropIRGenerator(JSContext* cx, jsbytecode* pc, ICStubEngine engine,
bool* isTemporarilyUnoptimizable,
HandleValue val, HandlePropertyName name, MutableHandleValue res);
bool emitted() const { return emitted_; }
@ -442,7 +452,7 @@ class MOZ_RAII GetPropIRGenerator
}
};
enum class CacheKind
enum class CacheKind : uint8_t
{
GetProp
};

View File

@ -582,8 +582,9 @@ IsCacheableGetPropCallNative(JSObject* obj, JSObject* holder, Shape* shape)
return !IsWindow(obj);
}
static bool
IsCacheableGetPropCallScripted(JSObject* obj, JSObject* holder, Shape* shape)
bool
jit::IsCacheableGetPropCallScripted(JSObject* obj, JSObject* holder, Shape* shape,
bool* isTemporarilyUnoptimizable)
{
if (!shape || !IsCacheableProtoChainForIonOrCacheIR(obj, holder))
return false;
@ -594,12 +595,18 @@ IsCacheableGetPropCallScripted(JSObject* obj, JSObject* holder, Shape* shape)
if (!shape->getterValue().toObject().is<JSFunction>())
return false;
JSFunction& getter = shape->getterValue().toObject().as<JSFunction>();
if (!getter.hasJITCode())
// See IsCacheableGetPropCallNative.
if (IsWindow(obj))
return false;
// See IsCacheableGetPropCallNative.
return !IsWindow(obj);
JSFunction& getter = shape->getterValue().toObject().as<JSFunction>();
if (!getter.hasJITCode()) {
if (isTemporarilyUnoptimizable)
*isTemporarilyUnoptimizable = true;
return false;
}
return true;
}
static bool

View File

@ -842,6 +842,9 @@ IONCACHE_KIND_LIST(CACHE_CASTS)
bool IsCacheableProtoChainForIonOrCacheIR(JSObject* obj, JSObject* holder);
bool IsCacheableGetPropReadSlotForIonOrCacheIR(JSObject* obj, JSObject* holder, Shape* shape);
bool IsCacheableGetPropCallScripted(JSObject* obj, JSObject* holder, Shape* shape,
bool* isTemporarilyUnoptimizable = nullptr);
} // namespace jit
} // namespace js

View File

@ -391,17 +391,23 @@ class JitZone
}
};
enum class CacheKind;
enum class CacheKind : uint8_t;
class CacheIRStubInfo;
enum class ICStubEngine : uint8_t {
Baseline = 0,
IonMonkey
};
struct CacheIRStubKey : public DefaultHasher<CacheIRStubKey> {
struct Lookup {
CacheKind kind;
ICStubEngine engine;
const uint8_t* code;
uint32_t length;
Lookup(CacheKind kind, const uint8_t* code, uint32_t length)
: kind(kind), code(code), length(length)
Lookup(CacheKind kind, ICStubEngine engine, const uint8_t* code, uint32_t length)
: kind(kind), engine(engine), code(code), length(length)
{}
};

View File

@ -1175,7 +1175,7 @@ MarkJitStubFrame(JSTracer* trc, const JitFrameIterator& frame)
JitStubFrameLayout* layout = (JitStubFrameLayout*)frame.fp();
if (ICStub* stub = layout->maybeStubPtr()) {
MOZ_ASSERT(ICStub::CanMakeCalls(stub->kind()));
MOZ_ASSERT(stub->makesGCCalls());
stub->trace(trc);
}
}

View File

@ -158,6 +158,59 @@ ICStubIterator::unlink(JSContext* cx)
unlinked_ = true;
}
/* static */ bool
ICStub::NonCacheIRStubMakesGCCalls(Kind kind)
{
MOZ_ASSERT(IsValidKind(kind));
MOZ_ASSERT(!IsCacheIRKind(kind));
switch (kind) {
case Call_Fallback:
case Call_Scripted:
case Call_AnyScripted:
case Call_Native:
case Call_ClassHook:
case Call_ScriptedApplyArray:
case Call_ScriptedApplyArguments:
case Call_ScriptedFunCall:
case Call_StringSplit:
case WarmUpCounter_Fallback:
case GetElem_NativeSlotName:
case GetElem_NativeSlotSymbol:
case GetElem_NativePrototypeSlotName:
case GetElem_NativePrototypeSlotSymbol:
case GetElem_NativePrototypeCallNativeName:
case GetElem_NativePrototypeCallNativeSymbol:
case GetElem_NativePrototypeCallScriptedName:
case GetElem_NativePrototypeCallScriptedSymbol:
case GetElem_UnboxedPropertyName:
case GetProp_CallNative:
case GetProp_CallNativeGlobal:
case GetProp_CallDOMProxyNative:
case GetProp_CallDOMProxyWithGenerationNative:
case GetProp_DOMProxyShadowed:
case GetProp_Generic:
case SetProp_CallScripted:
case SetProp_CallNative:
case RetSub_Fallback:
// These two fallback stubs don't actually make non-tail calls,
// but the fallback code for the bailout path needs to pop the stub frame
// pushed during the bailout.
case GetProp_Fallback:
case SetProp_Fallback:
return true;
default:
return false;
}
}
bool
ICStub::makesGCCalls() const
{
if (isCacheIR_Monitored())
return toCacheIR_Monitored()->stubInfo()->makesGCCalls();
return NonCacheIRStubMakesGCCalls(kind());
}
void
ICStub::markCode(JSTracer* trc, const char* name)
@ -434,14 +487,6 @@ ICStub::trace(JSTracer* trc)
TraceEdge(trc, &propStub->name(), "baseline-getproplistbaseshadowed-stub-name");
break;
}
case ICStub::GetProp_CallScripted: {
ICGetProp_CallScripted* callStub = toGetProp_CallScripted();
callStub->receiverGuard().trace(trc);
TraceEdge(trc, &callStub->holder(), "baseline-getpropcallscripted-stub-holder");
TraceEdge(trc, &callStub->holderShape(), "baseline-getpropcallscripted-stub-holdershape");
TraceEdge(trc, &callStub->getter(), "baseline-getpropcallscripted-stub-getter");
break;
}
case ICStub::GetProp_CallNative: {
ICGetProp_CallNative* callStub = toGetProp_CallNative();
callStub->receiverGuard().trace(trc);
@ -570,7 +615,7 @@ ICFallbackStub::unlinkStub(Zone* zone, ICStub* prev, ICStub* stub)
stub->trace(zone->barrierTracer());
}
if (ICStub::CanMakeCalls(stub->kind()) && stub->isMonitored()) {
if (stub->makesGCCalls() && stub->isMonitored()) {
// This stub can make calls so we can return to it if it's on the stack.
// We just have to reset its firstMonitorStub_ field to avoid a stale
// pointer when purgeOptimizedStubs destroys all optimized monitor
@ -584,7 +629,7 @@ ICFallbackStub::unlinkStub(Zone* zone, ICStub* prev, ICStub* stub)
// stub can make calls, a pointer to it may be stored in a stub frame on the
// stack, so we can't touch the stubCode_ or GC will crash when marking this
// pointer.
if (!ICStub::CanMakeCalls(stub->kind()))
if (!stub->makesGCCalls())
stub->stubCode_ = (uint8_t*)0xbad;
#endif
}
@ -720,7 +765,7 @@ ICStubCompiler::getStubCode()
// after this point.
postGenerateStubCode(masm, newStubCode);
MOZ_ASSERT(entersStubFrame_ == ICStub::CanMakeCalls(kind));
MOZ_ASSERT(entersStubFrame_ == ICStub::NonCacheIRStubMakesGCCalls(kind));
MOZ_ASSERT(!inStubFrame_);
#ifdef JS_ION_PERF
@ -2345,8 +2390,7 @@ UpdateExistingGetPropCallStubs(ICFallbackStub* fallbackStub,
HandleObject receiver,
HandleFunction getter)
{
MOZ_ASSERT(kind == ICStub::GetProp_CallScripted ||
kind == ICStub::GetProp_CallNative ||
MOZ_ASSERT(kind == ICStub::GetProp_CallNative ||
kind == ICStub::GetProp_CallNativeGlobal);
MOZ_ASSERT(fallbackStub->isGetName_Fallback() ||
fallbackStub->isGetProp_Fallback());
@ -2430,33 +2474,6 @@ TryAttachNativeGetAccessorPropStub(JSContext* cx, SharedStubInfo* info,
isTemporarilyUnoptimizable,
isDOMProxy);
// Try handling scripted getters.
if (cacheableCall && isScripted && !isDOMProxy &&
info->engine() == ICStubCompiler::Engine::Baseline)
{
RootedFunction callee(cx, &shape->getterObject()->as<JSFunction>());
MOZ_ASSERT(callee->hasScript());
if (UpdateExistingGetPropCallStubs(stub, ICStub::GetProp_CallScripted,
holder.as<NativeObject>(), obj, callee)) {
*attached = true;
return true;
}
JitSpew(JitSpew_BaselineIC, " Generating GetProp(NativeObj/ScriptedGetter %s:%" PRIuSIZE ") stub",
callee->nonLazyScript()->filename(), callee->nonLazyScript()->lineno());
ICGetProp_CallScripted::Compiler compiler(cx, monitorStub, obj, holder, callee,
info->pcOffset());
ICStub* newStub = compiler.getStub(compiler.getStubSpace(info->outerScript(cx)));
if (!newStub)
return false;
stub->addNewStub(newStub);
*attached = true;
return true;
}
// If it's a shadowed listbase proxy property, attach stub to call Proxy::get instead.
if (isDOMProxy && DOMProxyIsShadowing(domProxyShadowsResult)) {
MOZ_ASSERT(obj == holder);
@ -2677,11 +2694,12 @@ DoGetPropFallback(JSContext* cx, void* payload, ICGetProp_Fallback* stub_,
if (!attached && !JitOptions.disableCacheIR) {
mozilla::Maybe<CacheIRWriter> writer;
GetPropIRGenerator gen(cx, pc, val, name, res);
GetPropIRGenerator gen(cx, pc, engine, &isTemporarilyUnoptimizable, val, name, res);
if (!gen.tryAttachStub(writer))
return false;
if (gen.emitted()) {
ICStub* newStub = AttachBaselineCacheIRStub(cx, writer.ref(), CacheKind::GetProp, stub);
ICStub* newStub = AttachBaselineCacheIRStub(cx, writer.ref(), CacheKind::GetProp,
engine, info.outerScript(cx), stub);
if (newStub) {
JitSpew(JitSpew_BaselineIC, " Attached CacheIR stub");
attached = true;
@ -2693,7 +2711,7 @@ DoGetPropFallback(JSContext* cx, void* payload, ICGetProp_Fallback* stub_,
}
}
if (!attached && !stub.invalid() &&
if (!attached && !stub.invalid() && !isTemporarilyUnoptimizable &&
!TryAttachNativeGetAccessorPropStub(cx, &info, stub, name, val, res, &attached,
&isTemporarilyUnoptimizable))
{
@ -2962,98 +2980,6 @@ GetProtoShapes(JSObject* obj, size_t protoChainDepth, MutableHandle<ShapeVector>
return true;
}
bool
ICGetProp_CallScripted::Compiler::generateStubCode(MacroAssembler& masm)
{
MOZ_ASSERT(engine_ == Engine::Baseline);
Label failure;
Label failureLeaveStubFrame;
AllocatableGeneralRegisterSet regs(availableGeneralRegs(1));
Register scratch = regs.takeAnyExcluding(ICTailCallReg);
// Guard input is an object.
masm.branchTestObject(Assembler::NotEqual, R0, &failure);
// Unbox and shape guard.
Register objReg = masm.extractObject(R0, ExtractTemp0);
GuardReceiverObject(masm, ReceiverGuard(receiver_), objReg, scratch,
ICGetProp_CallScripted::offsetOfReceiverGuard(), &failure);
if (receiver_ != holder_) {
Register holderReg = regs.takeAny();
masm.loadPtr(Address(ICStubReg, ICGetProp_CallScripted::offsetOfHolder()), holderReg);
masm.loadPtr(Address(ICStubReg, ICGetProp_CallScripted::offsetOfHolderShape()), scratch);
masm.branchTestObjShape(Assembler::NotEqual, holderReg, scratch, &failure);
regs.add(holderReg);
}
// Push a stub frame so that we can perform a non-tail call.
enterStubFrame(masm, scratch);
// Load callee function and code. To ensure that |code| doesn't end up being
// ArgumentsRectifierReg, if it's available we assign it to |callee| instead.
Register callee;
if (regs.has(ArgumentsRectifierReg)) {
callee = ArgumentsRectifierReg;
regs.take(callee);
} else {
callee = regs.takeAny();
}
Register code = regs.takeAny();
masm.loadPtr(Address(ICStubReg, ICGetProp_CallScripted::offsetOfGetter()), callee);
masm.branchIfFunctionHasNoScript(callee, &failureLeaveStubFrame);
masm.loadPtr(Address(callee, JSFunction::offsetOfNativeOrScript()), code);
masm.loadBaselineOrIonRaw(code, code, &failureLeaveStubFrame);
// Align the stack such that the JitFrameLayout is aligned on
// JitStackAlignment.
masm.alignJitStackBasedOnNArgs(0);
// Getter is called with 0 arguments, just |obj| as thisv.
// Note that we use Push, not push, so that callJit will align the stack
// properly on ARM.
masm.Push(R0);
EmitBaselineCreateStubFrameDescriptor(masm, scratch, JitFrameLayout::Size());
masm.Push(Imm32(0)); // ActualArgc is 0
masm.Push(callee);
masm.Push(scratch);
// Handle arguments underflow.
Label noUnderflow;
masm.load16ZeroExtend(Address(callee, JSFunction::offsetOfNargs()), scratch);
masm.branch32(Assembler::Equal, scratch, Imm32(0), &noUnderflow);
{
// Call the arguments rectifier.
MOZ_ASSERT(ArgumentsRectifierReg != code);
JitCode* argumentsRectifier =
cx->runtime()->jitRuntime()->getArgumentsRectifier();
masm.movePtr(ImmGCPtr(argumentsRectifier), code);
masm.loadPtr(Address(code, JitCode::offsetOfCode()), code);
masm.movePtr(ImmWord(0), ArgumentsRectifierReg);
}
masm.bind(&noUnderflow);
masm.callJit(code);
leaveStubFrame(masm, true);
// Enter type monitor IC to type-check result.
EmitEnterTypeMonitorIC(masm);
// Leave stub frame and go to next stub.
masm.bind(&failureLeaveStubFrame);
inStubFrame_ = true;
leaveStubFrame(masm, false);
// Failure case - jump to next stub
masm.bind(&failure);
EmitStubGuardFailure(masm);
return true;
}
//
// VM function to help call native getters.
//
@ -3631,23 +3557,12 @@ ICGetPropCallGetter::ICGetPropCallGetter(Kind kind, JitCode* stubCode, ICStub* f
getter_(getter),
pcOffset_(pcOffset)
{
MOZ_ASSERT(kind == ICStub::GetProp_CallScripted ||
kind == ICStub::GetProp_CallNative ||
MOZ_ASSERT(kind == ICStub::GetProp_CallNative ||
kind == ICStub::GetProp_CallNativeGlobal ||
kind == ICStub::GetProp_CallDOMProxyNative ||
kind == ICStub::GetProp_CallDOMProxyWithGenerationNative);
}
/* static */ ICGetProp_CallScripted*
ICGetProp_CallScripted::Clone(JSContext* cx, ICStubSpace* space, ICStub* firstMonitorStub,
ICGetProp_CallScripted& other)
{
return New<ICGetProp_CallScripted>(cx, space, other.jitCode(), firstMonitorStub,
other.receiverGuard(),
other.holder_, other.holderShape_,
other.getter_, other.pcOffset_);
}
/* static */ ICGetProp_CallNative*
ICGetProp_CallNative::Clone(JSContext* cx, ICStubSpace* space, ICStub* firstMonitorStub,
ICGetProp_CallNative& other)
@ -4259,7 +4174,7 @@ DoNewObject(JSContext* cx, void* payload, ICNewObject_Fallback* stub, MutableHan
return false;
ICStubSpace* space =
ICStubCompiler::StubSpaceForKind(ICStub::NewObject_WithTemplate, script,
ICStubCompiler::StubSpaceForStub(/* makesGCCalls = */ false, script,
ICStubCompiler::Engine::Baseline);
ICStub* templateStub = ICStub::New<ICNewObject_WithTemplate>(cx, space, code);
if (!templateStub)

View File

@ -504,9 +504,12 @@ class ICStub
LIMIT
};
static inline bool IsValidKind(Kind k) {
static bool IsValidKind(Kind k) {
return (k > INVALID) && (k < LIMIT);
}
static bool IsCacheIRKind(Kind k) {
return k == CacheIR_Monitored;
}
static const char* KindString(Kind k) {
switch(k) {
@ -710,48 +713,8 @@ class ICStub
return offsetof(ICStub, extra_);
}
static bool CanMakeCalls(ICStub::Kind kind) {
MOZ_ASSERT(IsValidKind(kind));
switch (kind) {
case Call_Fallback:
case Call_Scripted:
case Call_AnyScripted:
case Call_Native:
case Call_ClassHook:
case Call_ScriptedApplyArray:
case Call_ScriptedApplyArguments:
case Call_ScriptedFunCall:
case Call_StringSplit:
case WarmUpCounter_Fallback:
case GetElem_NativeSlotName:
case GetElem_NativeSlotSymbol:
case GetElem_NativePrototypeSlotName:
case GetElem_NativePrototypeSlotSymbol:
case GetElem_NativePrototypeCallNativeName:
case GetElem_NativePrototypeCallNativeSymbol:
case GetElem_NativePrototypeCallScriptedName:
case GetElem_NativePrototypeCallScriptedSymbol:
case GetElem_UnboxedPropertyName:
case GetProp_CallScripted:
case GetProp_CallNative:
case GetProp_CallNativeGlobal:
case GetProp_CallDOMProxyNative:
case GetProp_CallDOMProxyWithGenerationNative:
case GetProp_DOMProxyShadowed:
case GetProp_Generic:
case SetProp_CallScripted:
case SetProp_CallNative:
case RetSub_Fallback:
// These two fallback stubs don't actually make non-tail calls,
// but the fallback code for the bailout path needs to pop the stub frame
// pushed during the bailout.
case GetProp_Fallback:
case SetProp_Fallback:
return true;
default:
return false;
}
}
static bool NonCacheIRStubMakesGCCalls(Kind kind);
bool makesGCCalls() const;
// Optimized stubs get purged on GC. But some stubs can be active on the
// stack during GC - specifically the ones that can make calls. To ensure
@ -759,7 +722,7 @@ class ICStub
// in the fallback stub space.
bool allocatedInFallbackSpace() const {
MOZ_ASSERT(next());
return CanMakeCalls(kind());
return makesGCCalls();
}
};
@ -902,14 +865,18 @@ class ICMonitoredStub : public ICStub
class ICCacheIR_Monitored : public ICMonitoredStub
{
CacheIRStubInfo* stubInfo_;
const CacheIRStubInfo* stubInfo_;
public:
ICCacheIR_Monitored(JitCode* stubCode, ICStub* firstMonitorStub, CacheIRStubInfo* stubInfo)
ICCacheIR_Monitored(JitCode* stubCode, ICStub* firstMonitorStub,
const CacheIRStubInfo* stubInfo)
: ICMonitoredStub(ICStub::CacheIR_Monitored, stubCode, firstMonitorStub),
stubInfo_(stubInfo)
{}
static ICCacheIR_Monitored* Clone(JSContext* cx, ICStubSpace* space, ICStub* firstMonitorStub,
ICCacheIR_Monitored& other);
void notePreliminaryObject() {
extra_ = 1;
}
@ -994,10 +961,7 @@ class ICStubCompiler
js::gc::AutoSuppressGC suppressGC;
public:
enum class Engine {
Baseline = 0,
IonMonkey
};
using Engine = ICStubEngine;
protected:
JSContext* cx;
@ -1107,17 +1071,16 @@ class ICStubCompiler
public:
virtual ICStub* getStub(ICStubSpace* space) = 0;
static ICStubSpace* StubSpaceForKind(ICStub::Kind kind, JSScript* outerScript, Engine engine) {
if (ICStub::CanMakeCalls(kind)) {
static ICStubSpace* StubSpaceForStub(bool makesGCCalls, JSScript* outerScript, Engine engine) {
if (makesGCCalls) {
if (engine == ICStubCompiler::Engine::Baseline)
return outerScript->baselineScript()->fallbackStubSpace();
return outerScript->ionScript()->fallbackStubSpace();
}
return outerScript->zone()->jitZone()->optimizedStubSpace();
}
ICStubSpace* getStubSpace(JSScript* outerScript) {
return StubSpaceForKind(kind, outerScript, engine_);
return StubSpaceForStub(ICStub::NonCacheIRStubMakesGCCalls(kind), outerScript, engine_);
}
};
@ -2666,54 +2629,12 @@ class ICGetPropCallGetter : public ICMonitoredStub
pcOffset_(pcOffset),
outerClass_(outerClass)
{
MOZ_ASSERT(kind == ICStub::GetProp_CallScripted ||
kind == ICStub::GetProp_CallNative ||
MOZ_ASSERT(kind == ICStub::GetProp_CallNative ||
kind == ICStub::GetProp_CallNativeGlobal);
}
};
};
// Stub for calling a scripted getter on a native object when the getter is kept on the
// proto-chain.
class ICGetProp_CallScripted : public ICGetPropCallGetter
{
friend class ICStubSpace;
protected:
ICGetProp_CallScripted(JitCode* stubCode, ICStub* firstMonitorStub,
ReceiverGuard receiverGuard,
JSObject* holder, Shape* holderShape,
JSFunction* getter, uint32_t pcOffset)
: ICGetPropCallGetter(GetProp_CallScripted, stubCode, firstMonitorStub,
receiverGuard, holder, holderShape, getter, pcOffset)
{}
public:
static ICGetProp_CallScripted* Clone(JSContext* cx, ICStubSpace* space,
ICStub* firstMonitorStub, ICGetProp_CallScripted& other);
class Compiler : public ICGetPropCallGetter::Compiler {
protected:
MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm);
public:
Compiler(JSContext* cx, ICStub* firstMonitorStub, HandleObject obj,
HandleObject holder, HandleFunction getter, uint32_t pcOffset)
: ICGetPropCallGetter::Compiler(cx, ICStub::GetProp_CallScripted, Engine::Baseline,
firstMonitorStub, obj, holder,
getter, pcOffset, /* outerClass = */ nullptr)
{}
ICStub* getStub(ICStubSpace* space) {
ReceiverGuard guard(receiver_);
Shape* holderShape = holder_->as<NativeObject>().lastProperty();
return newStub<ICGetProp_CallScripted>(space, getStubCode(), firstMonitorStub_,
guard, holder_, holderShape, getter_,
pcOffset_);
}
};
};
// Stub for calling a native getter on a native object.
class ICGetProp_CallNative : public ICGetPropCallGetter
{

View File

@ -36,7 +36,6 @@ namespace jit {
\
_(GetProp_Fallback) \
_(GetProp_StringLength) \
_(GetProp_CallScripted) \
_(GetProp_CallNative) \
_(GetProp_CallNativeGlobal) \
_(GetProp_CallDOMProxyNative) \