Bug 1326067 part 2 - Convert Baseline setslot IC stub to CacheIR. r=h4writer

This commit is contained in:
Jan de Mooij 2017-01-16 18:40:20 +01:00
parent 75dc1a06d1
commit d6ea6bd9d7
18 changed files with 588 additions and 255 deletions

View File

@ -18,6 +18,8 @@ using namespace js::jit;
using mozilla::Maybe;
class AutoStubFrame;
// BaselineCacheIRCompiler compiles CacheIR to BaselineIC native code.
class MOZ_RAII BaselineCacheIRCompiler : public CacheIRCompiler
{
@ -32,6 +34,11 @@ class MOZ_RAII BaselineCacheIRCompiler : public CacheIRCompiler
MOZ_MUST_USE bool callVM(MacroAssembler& masm, const VMFunction& fun);
MOZ_MUST_USE bool callTypeUpdateIC(AutoStubFrame& stubFrame, Register obj, ValueOperand val,
Register scratch, LiveGeneralRegisterSet saveRegs);
MOZ_MUST_USE bool emitStoreSlotShared(bool isFixed);
public:
friend class AutoStubFrame;
@ -65,6 +72,8 @@ class MOZ_RAII BaselineCacheIRCompiler : public CacheIRCompiler
CACHE_IR_SHARED_OPS(DEFINE_SHARED_OP)
#undef DEFINE_SHARED_OP
enum class CallCanGC { CanGC, CanNotGC };
// Instructions that have to perform a callVM require a stub frame. Use
// AutoStubFrame before allocating any registers, then call its enter() and
// leave() methods to enter/leave the stub frame.
@ -93,7 +102,7 @@ class MOZ_RAII AutoStubFrame
tail.emplace(compiler.allocator, compiler.masm, ICTailCallReg);
}
void enter(MacroAssembler& masm, Register scratch) {
void enter(MacroAssembler& masm, Register scratch, CallCanGC canGC = CallCanGC::CanGC) {
if (compiler.engine_ == ICStubEngine::Baseline) {
EmitBaselineEnterStubFrame(masm, scratch);
#ifdef DEBUG
@ -105,7 +114,8 @@ class MOZ_RAII AutoStubFrame
MOZ_ASSERT(!compiler.inStubFrame_);
compiler.inStubFrame_ = true;
compiler.makesGCCalls_ = true;
if (canGC == CallCanGC::CanGC)
compiler.makesGCCalls_ = true;
}
void leave(MacroAssembler& masm, bool calledIntoIon = false) {
MOZ_ASSERT(compiler.inStubFrame_);
@ -649,6 +659,112 @@ BaselineCacheIRCompiler::emitLoadEnvironmentDynamicSlotResult()
return true;
}
bool
BaselineCacheIRCompiler::callTypeUpdateIC(AutoStubFrame& stubFrame, Register obj, ValueOperand val,
Register scratch, LiveGeneralRegisterSet saveRegs)
{
// R0 contains the value that needs to be typechecked.
MOZ_ASSERT(val == R0);
MOZ_ASSERT(scratch == R1.scratchReg());
#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
static const bool CallClobbersTailReg = false;
#else
static const bool CallClobbersTailReg = true;
#endif
// Call the first type update stub.
if (CallClobbersTailReg)
masm.push(ICTailCallReg);
masm.push(ICStubReg);
masm.loadPtr(Address(ICStubReg, ICUpdatedStub::offsetOfFirstUpdateStub()),
ICStubReg);
masm.call(Address(ICStubReg, ICStub::offsetOfStubCode()));
masm.pop(ICStubReg);
if (CallClobbersTailReg)
masm.pop(ICTailCallReg);
// The update IC will store 0 or 1 in |scratch|, R1.scratchReg(), reflecting
// if the value in R0 type-checked properly or not.
Label done;
masm.branch32(Assembler::Equal, scratch, Imm32(1), &done);
stubFrame.enter(masm, scratch, CallCanGC::CanNotGC);
masm.PushRegsInMask(saveRegs);
masm.Push(val);
masm.Push(TypedOrValueRegister(MIRType::Object, AnyRegister(obj)));
masm.Push(ICStubReg);
// Load previous frame pointer, push BaselineFrame*.
masm.loadPtr(Address(BaselineFrameReg, 0), scratch);
masm.pushBaselineFramePtr(scratch, scratch);
if (!callVM(masm, DoTypeUpdateFallbackInfo))
return false;
masm.PopRegsInMask(saveRegs);
stubFrame.leave(masm);
masm.bind(&done);
return true;
}
bool
BaselineCacheIRCompiler::emitStoreSlotShared(bool isFixed)
{
ObjOperandId objId = reader.objOperandId();
Address offsetAddr = stubAddress(reader.stubOffset());
// Allocate the fixed registers first. These need to be fixed for
// callTypeUpdateIC.
AutoStubFrame stubFrame(*this);
AutoScratchRegister scratch(allocator, masm, R1.scratchReg());
ValueOperand val = allocator.useFixedValueRegister(masm, reader.valOperandId(), R0);
Register obj = allocator.useRegister(masm, objId);
LiveGeneralRegisterSet saveRegs;
saveRegs.add(obj);
saveRegs.add(val);
if (!callTypeUpdateIC(stubFrame, obj, val, scratch, saveRegs))
return false;
masm.load32(offsetAddr, scratch);
if (isFixed) {
BaseIndex slot(obj, scratch, TimesOne);
EmitPreBarrier(masm, slot, MIRType::Value);
masm.storeValue(val, slot);
} else {
// To avoid running out of registers on x86, use ICStubReg as scratch.
// We don't need it anymore.
Register slots = ICStubReg;
masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), slots);
BaseIndex slot(slots, scratch, TimesOne);
EmitPreBarrier(masm, slot, MIRType::Value);
masm.storeValue(val, slot);
}
if (cx_->gc.nursery.exists())
BaselineEmitPostWriteBarrierSlot(masm, obj, val, scratch, LiveGeneralRegisterSet(), cx_);
return true;
}
bool
BaselineCacheIRCompiler::emitStoreFixedSlot()
{
return emitStoreSlotShared(true);
}
bool
BaselineCacheIRCompiler::emitStoreDynamicSlot()
{
return emitStoreSlotShared(false);
}
bool
BaselineCacheIRCompiler::emitTypeMonitorResult()
{
@ -748,6 +864,7 @@ BaselineCacheIRCompiler::init(CacheKind kind)
allocator.initInputLocation(0, R0);
break;
case CacheKind::GetElem:
case CacheKind::SetProp:
MOZ_ASSERT(numInputs == 2);
allocator.initInputLocation(0, R0);
allocator.initInputLocation(1, R1);
@ -786,9 +903,22 @@ jit::AttachBaselineCacheIRStub(JSContext* cx, const CacheIRWriter& writer,
// unlimited number of stubs.
MOZ_ASSERT(stub->numOptimizedStubs() < MaxOptimizedCacheIRStubs);
MOZ_ASSERT(kind == CacheKind::GetProp || kind == CacheKind::GetElem ||
kind == CacheKind::GetName, "sizeof needs to change for SetProp!");
uint32_t stubDataOffset = sizeof(ICCacheIR_Monitored);
enum class CacheIRStubKind { Monitored, Updated };
uint32_t stubDataOffset;
CacheIRStubKind stubKind;
switch (kind) {
case CacheKind::GetProp:
case CacheKind::GetElem:
case CacheKind::GetName:
stubDataOffset = sizeof(ICCacheIR_Monitored);
stubKind = CacheIRStubKind::Monitored;
break;
case CacheKind::SetProp:
stubDataOffset = sizeof(ICCacheIR_Updated);
stubKind = CacheIRStubKind::Updated;
break;
}
JitCompartment* jitCompartment = cx->compartment()->jitCompartment();
@ -822,21 +952,34 @@ jit::AttachBaselineCacheIRStub(JSContext* cx, const CacheIRWriter& writer,
MOZ_ASSERT(code);
MOZ_ASSERT(stubInfo);
MOZ_ASSERT(stub->isMonitoredFallback());
MOZ_ASSERT(stubInfo->stubDataSize() == writer.stubDataSize());
// Ensure we don't attach duplicate stubs. This can happen if a stub failed
// for some reason and the IR generator doesn't check for exactly the same
// conditions.
for (ICStubConstIterator iter = stub->beginChainConst(); !iter.atEnd(); iter++) {
if (!iter->isCacheIR_Monitored())
continue;
ICCacheIR_Monitored* otherStub = iter->toCacheIR_Monitored();
if (otherStub->stubInfo() != stubInfo)
continue;
if (!writer.stubDataEquals(otherStub->stubDataStart()))
continue;
switch (stubKind) {
case CacheIRStubKind::Monitored: {
if (!iter->isCacheIR_Monitored())
continue;
auto otherStub = iter->toCacheIR_Monitored();
if (otherStub->stubInfo() != stubInfo)
continue;
if (!writer.stubDataEquals(otherStub->stubDataStart()))
continue;
break;
}
case CacheIRStubKind::Updated: {
if (!iter->isCacheIR_Updated())
continue;
auto otherStub = iter->toCacheIR_Updated();
if (otherStub->stubInfo() != stubInfo)
continue;
if (!writer.stubDataEquals(otherStub->stubDataStart()))
continue;
break;
}
}
// We found a stub that's exactly the same as the stub we're about to
// attach. Just return nullptr, the caller should do nothing in this
@ -854,12 +997,28 @@ jit::AttachBaselineCacheIRStub(JSContext* cx, const CacheIRWriter& writer,
if (!newStubMem)
return nullptr;
ICStub* monitorStub = stub->toMonitoredFallbackStub()->fallbackMonitorStub()->firstMonitorStub();
auto newStub = new(newStubMem) ICCacheIR_Monitored(code, monitorStub, stubInfo);
switch (stubKind) {
case CacheIRStubKind::Monitored: {
ICStub* monitorStub =
stub->toMonitoredFallbackStub()->fallbackMonitorStub()->firstMonitorStub();
auto newStub = new(newStubMem) ICCacheIR_Monitored(code, monitorStub, stubInfo);
writer.copyStubData(newStub->stubDataStart());
stub->addNewStub(newStub);
return newStub;
}
case CacheIRStubKind::Updated: {
auto newStub = new(newStubMem) ICCacheIR_Updated(code, stubInfo);
if (!newStub->initUpdatingChain(cx, stubSpace)) {
cx->recoverFromOutOfMemory();
return nullptr;
}
writer.copyStubData(newStub->stubDataStart());
stub->addNewStub(newStub);
return newStub;
}
}
writer.copyStubData(newStub->stubDataStart());
stub->addNewStub(newStub);
return newStub;
MOZ_CRASH("Invalid kind");
}
uint8_t*
@ -868,6 +1027,12 @@ ICCacheIR_Monitored::stubDataStart()
return reinterpret_cast<uint8_t*>(this) + stubInfo_->stubDataOffset();
}
uint8_t*
ICCacheIR_Updated::stubDataStart()
{
return reinterpret_cast<uint8_t*>(this) + stubInfo_->stubDataOffset();
}
/* static */ ICCacheIR_Monitored*
ICCacheIR_Monitored::Clone(JSContext* cx, ICStubSpace* space, ICStub* firstMonitorStub,
ICCacheIR_Monitored& other)

View File

@ -280,14 +280,18 @@ DoTypeUpdateFallback(JSContext* cx, BaselineFrame* frame, ICUpdatedStub* stub, H
RootedObject obj(cx, &objval.toObject());
RootedId id(cx);
switch(stub->kind()) {
switch (stub->kind()) {
case ICStub::CacheIR_Updated:
id = stub->toCacheIR_Updated()->updateStubId();
MOZ_ASSERT(id != JSID_EMPTY);
AddTypePropertyId(cx, obj, id, value);
break;
case ICStub::SetElem_DenseOrUnboxedArray:
case ICStub::SetElem_DenseOrUnboxedArrayAdd: {
id = JSID_VOID;
AddTypePropertyId(cx, obj, id, value);
break;
}
case ICStub::SetProp_Native:
case ICStub::SetProp_NativeAdd:
case ICStub::SetProp_Unboxed: {
MOZ_ASSERT(obj->isNative() || obj->is<UnboxedPlainObject>());
@ -737,23 +741,6 @@ LastPropertyForSetProp(JSObject* obj)
return nullptr;
}
static bool
IsCacheableSetPropWriteSlot(JSObject* obj, Shape* oldShape, Shape* propertyShape)
{
// Object shape must not have changed during the property set.
if (LastPropertyForSetProp(obj) != oldShape)
return false;
if (!propertyShape->hasSlot() ||
!propertyShape->hasDefaultSetter() ||
!propertyShape->writable())
{
return false;
}
return true;
}
static bool
IsCacheableSetPropAddSlot(JSContext* cx, JSObject* obj, Shape* oldShape,
jsid id, Shape* propertyShape, size_t* protoChainDepth)
@ -1531,7 +1518,7 @@ ICSetElem_DenseOrUnboxedArray::Compiler::generateStubCode(MacroAssembler& masm)
saveRegs.add(R0);
saveRegs.addUnchecked(obj);
saveRegs.add(ICStubReg);
emitPostWriteBarrierSlot(masm, obj, R1, scratchReg, saveRegs);
BaselineEmitPostWriteBarrierSlot(masm, obj, R1, scratchReg, saveRegs, cx);
masm.Pop(R1);
}
@ -1739,7 +1726,7 @@ ICSetElemDenseOrUnboxedArrayAddCompiler::generateStubCode(MacroAssembler& masm)
saveRegs.add(R0);
saveRegs.addUnchecked(obj);
saveRegs.add(ICStubReg);
emitPostWriteBarrierSlot(masm, obj, R1, scratchReg, saveRegs);
BaselineEmitPostWriteBarrierSlot(masm, obj, R1, scratchReg, saveRegs, cx);
masm.Pop(R1);
}
@ -2636,41 +2623,6 @@ TryAttachSetValuePropStub(JSContext* cx, HandleScript script, jsbytecode* pc, IC
return true;
}
if (IsCacheableSetPropWriteSlot(obj, oldShape, shape)) {
// For some property writes, such as the initial overwrite of global
// properties, TI will not mark the property as having been
// overwritten. Don't attach a stub in this case, so that we don't
// execute another write to the property without TI seeing that write.
EnsureTrackPropertyTypes(cx, obj, id);
if (!PropertyHasBeenMarkedNonConstant(obj, id)) {
*attached = true;
return true;
}
bool isFixedSlot;
uint32_t offset;
GetFixedOrDynamicSlotOffset(shape, &isFixedSlot, &offset);
JitSpew(JitSpew_BaselineIC, " Generating SetProp(NativeObject.PROP) stub");
MOZ_ASSERT(LastPropertyForSetProp(obj) == oldShape,
"Should this really be a SetPropWriteSlot?");
ICSetProp_Native::Compiler compiler(cx, obj, isFixedSlot, offset);
ICSetProp_Native* newStub = compiler.getStub(compiler.getStubSpace(script));
if (!newStub)
return false;
if (!newStub->addUpdateStubForValue(cx, script, obj, id, rhs))
return false;
if (IsPreliminaryObject(obj))
newStub->notePreliminaryObject();
else
StripPreliminaryObjectStubs(cx, stub);
stub->addNewStub(newStub);
*attached = true;
return true;
}
return true;
}
@ -2935,6 +2887,30 @@ DoSetPropFallback(JSContext* cx, BaselineFrame* frame, ICSetProp_Fallback* stub_
return false;
}
if (!attached &&
stub->numOptimizedStubs() < ICSetProp_Fallback::MAX_OPTIMIZED_STUBS &&
!JitOptions.disableCacheIR)
{
RootedValue idVal(cx, StringValue(name));
SetPropIRGenerator gen(cx, pc, CacheKind::SetProp, &isTemporarilyUnoptimizable,
lhs, idVal, rhs);
if (gen.tryAttachStub()) {
ICStub* newStub = AttachBaselineCacheIRStub(cx, gen.writerRef(), gen.cacheKind(),
ICStubEngine::Baseline, frame->script(), stub);
if (newStub) {
JitSpew(JitSpew_BaselineIC, " Attached CacheIR stub");
attached = true;
newStub->toCacheIR_Updated()->updateStubId() = gen.updateStubId();
if (gen.shouldNotePreliminaryObjectStub())
newStub->toCacheIR_Updated()->notePreliminaryObject();
else if (gen.shouldUnlinkPreliminaryObjectStubs())
StripPreliminaryObjectStubs(cx, stub);
}
}
}
if (op == JSOP_INITPROP ||
op == JSOP_INITLOCKEDPROP ||
op == JSOP_INITHIDDENPROP)
@ -3107,77 +3083,6 @@ GuardGroupAndShapeMaybeUnboxedExpando(MacroAssembler& masm, JSObject* obj,
}
}
bool
ICSetProp_Native::Compiler::generateStubCode(MacroAssembler& masm)
{
MOZ_ASSERT(engine_ == Engine::Baseline);
Label failure;
// Guard input is an object.
masm.branchTestObject(Assembler::NotEqual, R0, &failure);
Register objReg = masm.extractObject(R0, ExtractTemp0);
AllocatableGeneralRegisterSet regs(availableGeneralRegs(2));
Register scratch = regs.takeAny();
GuardGroupAndShapeMaybeUnboxedExpando(masm, obj_, objReg, scratch,
ICSetProp_Native::offsetOfGroup(),
ICSetProp_Native::offsetOfShape(),
&failure);
// Stow both R0 and R1 (object and value).
EmitStowICValues(masm, 2);
// Type update stub expects the value to check in R0.
masm.moveValue(R1, R0);
// Call the type-update stub.
if (!callTypeUpdateIC(masm, sizeof(Value)))
return false;
// Unstow R0 and R1 (object and key)
EmitUnstowICValues(masm, 2);
regs.add(R0);
regs.takeUnchecked(objReg);
Register holderReg;
if (obj_->is<UnboxedPlainObject>()) {
// We are loading off the expando object, so use that for the holder.
holderReg = regs.takeAny();
masm.loadPtr(Address(objReg, UnboxedPlainObject::offsetOfExpando()), holderReg);
if (!isFixedSlot_)
masm.loadPtr(Address(holderReg, NativeObject::offsetOfSlots()), holderReg);
} else if (isFixedSlot_) {
holderReg = objReg;
} else {
holderReg = regs.takeAny();
masm.loadPtr(Address(objReg, NativeObject::offsetOfSlots()), holderReg);
}
// Perform the store.
masm.load32(Address(ICStubReg, ICSetProp_Native::offsetOfOffset()), scratch);
EmitPreBarrier(masm, BaseIndex(holderReg, scratch, TimesOne), MIRType::Value);
masm.storeValue(R1, BaseIndex(holderReg, scratch, TimesOne));
if (holderReg != objReg)
regs.add(holderReg);
if (cx->runtime()->gc.nursery.exists()) {
Register scr = regs.takeAny();
LiveGeneralRegisterSet saveRegs;
saveRegs.add(R1);
emitPostWriteBarrierSlot(masm, objReg, R1, scr, saveRegs);
regs.add(scr);
}
EmitReturnFromIC(masm);
// Failure case - jump to next stub
masm.bind(&failure);
EmitStubGuardFailure(masm);
return true;
}
ICUpdatedStub*
ICSetPropNativeAddCompiler::getStub(ICStubSpace* space)
{
@ -3323,7 +3228,7 @@ ICSetPropNativeAddCompiler::generateStubCode(MacroAssembler& masm)
Register scr = regs.takeAny();
LiveGeneralRegisterSet saveRegs;
saveRegs.add(R1);
emitPostWriteBarrierSlot(masm, objReg, R1, scr, saveRegs);
BaselineEmitPostWriteBarrierSlot(masm, objReg, R1, scr, saveRegs, cx);
}
EmitReturnFromIC(masm);
@ -3380,7 +3285,7 @@ ICSetProp_Unboxed::Compiler::generateStubCode(MacroAssembler& masm)
saveRegs.add(R1);
saveRegs.addUnchecked(object);
saveRegs.add(ICStubReg);
emitPostWriteBarrierSlot(masm, object, R1, scratch, saveRegs);
BaselineEmitPostWriteBarrierSlot(masm, object, R1, scratch, saveRegs, cx);
}
// Compute the address being written to.
@ -3447,7 +3352,7 @@ ICSetProp_TypedObject::Compiler::generateStubCode(MacroAssembler& masm)
saveRegs.add(R1);
saveRegs.addUnchecked(object);
saveRegs.add(ICStubReg);
emitPostWriteBarrierSlot(masm, object, R1, scratch, saveRegs);
BaselineEmitPostWriteBarrierSlot(masm, object, R1, scratch, saveRegs, cx);
}
// Save the rhs on the stack so we can get a second scratch register.
@ -6682,28 +6587,6 @@ ICInstanceOf_Function::ICInstanceOf_Function(JitCode* stubCode, Shape* shape,
slot_(slot)
{ }
ICSetProp_Native::ICSetProp_Native(JitCode* stubCode, ObjectGroup* group, Shape* shape,
uint32_t offset)
: ICUpdatedStub(SetProp_Native, stubCode),
group_(group),
shape_(shape),
offset_(offset)
{ }
ICSetProp_Native*
ICSetProp_Native::Compiler::getStub(ICStubSpace* space)
{
RootedObjectGroup group(cx, JSObject::getGroup(cx, obj_));
if (!group)
return nullptr;
RootedShape shape(cx, LastPropertyForSetProp(obj_));
ICSetProp_Native* stub = newStub<ICSetProp_Native>(space, getStubCode(), group, shape, offset_);
if (!stub || !stub->initUpdatingChain(cx, space))
return nullptr;
return stub;
}
ICSetProp_NativeAdd::ICSetProp_NativeAdd(JitCode* stubCode, ObjectGroup* group,
size_t protoChainDepth,
Shape* newShape,

View File

@ -1111,69 +1111,6 @@ class ICSetProp_Fallback : public ICFallbackStub
};
};
// Optimized SETPROP/SETGNAME/SETNAME stub.
class ICSetProp_Native : public ICUpdatedStub
{
friend class ICStubSpace;
protected: // Protected to silence Clang warning.
GCPtrObjectGroup group_;
GCPtrShape shape_;
uint32_t offset_;
ICSetProp_Native(JitCode* stubCode, ObjectGroup* group, Shape* shape, uint32_t offset);
public:
GCPtrObjectGroup& group() {
return group_;
}
GCPtrShape& shape() {
return shape_;
}
void notePreliminaryObject() {
extra_ = 1;
}
bool hasPreliminaryObject() const {
return extra_;
}
static size_t offsetOfGroup() {
return offsetof(ICSetProp_Native, group_);
}
static size_t offsetOfShape() {
return offsetof(ICSetProp_Native, shape_);
}
static size_t offsetOfOffset() {
return offsetof(ICSetProp_Native, offset_);
}
class Compiler : public ICStubCompiler {
RootedObject obj_;
bool isFixedSlot_;
uint32_t offset_;
protected:
virtual int32_t getKey() const {
return static_cast<int32_t>(engine_) |
(static_cast<int32_t>(kind) << 1) |
(static_cast<int32_t>(isFixedSlot_) << 17) |
(static_cast<int32_t>(obj_->is<UnboxedPlainObject>()) << 18);
}
MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm);
public:
Compiler(JSContext* cx, HandleObject obj, bool isFixedSlot, uint32_t offset)
: ICStubCompiler(cx, ICStub::SetProp_Native, Engine::Baseline),
obj_(cx, obj),
isFixedSlot_(isFixedSlot),
offset_(offset)
{}
ICSetProp_Native* getStub(ICStubSpace* space);
};
};
template <size_t ProtoChainDepth> class ICSetProp_NativeAddImpl;
class ICSetProp_NativeAdd : public ICUpdatedStub

View File

@ -69,7 +69,6 @@ namespace jit {
_(GetIntrinsic_Constant) \
\
_(SetProp_Fallback) \
_(SetProp_Native) \
_(SetProp_NativeAdd) \
_(SetProp_Unboxed) \
_(SetProp_TypedObject) \

View File

@ -171,6 +171,49 @@ GetCacheIRReceiverForUnboxedProperty(ICCacheIR_Monitored* stub, ReceiverGuard* r
return reader.matchOp(CacheOp::LoadUnboxedPropertyResult, objId);
}
static bool
GetCacheIRReceiverForNativeSetSlot(ICCacheIR_Updated* stub, ReceiverGuard* receiver)
{
// We match either:
//
// GuardIsObject 0
// GuardGroup 0
// GuardShape 0
// StoreFixedSlot 0 or StoreDynamicSlot 0
//
// or
//
// GuardIsObject 0
// GuardGroup 0
// 1: GuardAndLoadUnboxedExpando 0
// GuardShape 1
// StoreFixedSlot 1 or StoreDynamicSlot 1
*receiver = ReceiverGuard();
CacheIRReader reader(stub->stubInfo());
ObjOperandId objId = ObjOperandId(0);
if (!reader.matchOp(CacheOp::GuardIsObject, objId))
return false;
if (!reader.matchOp(CacheOp::GuardGroup, objId))
return false;
ObjectGroup* group = stub->stubInfo()->getStubField<ObjectGroup*>(stub, reader.stubOffset());
if (reader.matchOp(CacheOp::GuardAndLoadUnboxedExpando, objId))
objId = reader.objOperandId();
if (!reader.matchOp(CacheOp::GuardShape, objId))
return false;
Shape* shape = stub->stubInfo()->getStubField<Shape*>(stub, reader.stubOffset());
if (!reader.matchOpEither(CacheOp::StoreFixedSlot, CacheOp::StoreDynamicSlot))
return false;
*receiver = ReceiverGuard(group, shape);
return true;
}
bool
BaselineInspector::maybeInfoForPropertyOp(jsbytecode* pc, ReceiverVector& receivers,
ObjectGroupVector& convertUnboxedGroups)
@ -199,9 +242,11 @@ BaselineInspector::maybeInfoForPropertyOp(jsbytecode* pc, ReceiverVector& receiv
receivers.clear();
return true;
}
} else if (stub->isSetProp_Native()) {
receiver = ReceiverGuard(stub->toSetProp_Native()->group(),
stub->toSetProp_Native()->shape());
} else if (stub->isCacheIR_Updated()) {
if (!GetCacheIRReceiverForNativeSetSlot(stub->toCacheIR_Updated(), &receiver)) {
receivers.clear();
return true;
}
} else if (stub->isSetProp_Unboxed()) {
receiver = ReceiverGuard(stub->toSetProp_Unboxed()->group(), nullptr);
} else {

View File

@ -1523,3 +1523,141 @@ IRGenerator::maybeGuardInt32Index(const Value& index, ValOperandId indexId,
return false;
}
SetPropIRGenerator::SetPropIRGenerator(JSContext* cx, jsbytecode* pc, CacheKind cacheKind,
bool* isTemporarilyUnoptimizable, HandleValue lhsVal,
HandleValue idVal, HandleValue rhsVal)
: IRGenerator(cx, pc, cacheKind),
lhsVal_(lhsVal),
idVal_(idVal),
rhsVal_(rhsVal),
isTemporarilyUnoptimizable_(isTemporarilyUnoptimizable),
preliminaryObjectAction_(PreliminaryObjectAction::None),
updateStubId_(cx, JSID_EMPTY),
needUpdateStub_(false)
{}
bool
SetPropIRGenerator::tryAttachStub()
{
AutoAssertNoPendingException aanpe(cx_);
ValOperandId lhsValId(writer.setInputOperandId(0));
ValOperandId rhsValId(writer.setInputOperandId(1));
RootedId id(cx_);
bool nameOrSymbol;
if (!ValueToNameOrSymbolId(cx_, idVal_, &id, &nameOrSymbol)) {
cx_->clearPendingException();
return false;
}
if (lhsVal_.isObject()) {
RootedObject obj(cx_, &lhsVal_.toObject());
if (obj->watched())
return false;
ObjOperandId objId = writer.guardIsObject(lhsValId);
if (nameOrSymbol) {
if (tryAttachNativeSetSlot(obj, objId, id, rhsValId))
return true;
if (tryAttachUnboxedExpandoSetSlot(obj, objId, id, rhsValId))
return true;
}
return false;
}
return false;
}
static void
EmitStoreSlotAndReturn(CacheIRWriter& writer, ObjOperandId objId, NativeObject* nobj, Shape* shape,
ValOperandId rhsId)
{
if (nobj->isFixedSlot(shape->slot())) {
size_t offset = NativeObject::getFixedSlotOffset(shape->slot());
writer.storeFixedSlot(objId, offset, rhsId);
} else {
size_t offset = nobj->dynamicSlotIndex(shape->slot()) * sizeof(Value);
writer.storeDynamicSlot(objId, offset, rhsId);
}
writer.returnFromIC();
}
static Shape*
LookupShapeForSetSlot(NativeObject* obj, jsid id)
{
Shape* shape = obj->lookupPure(id);
if (shape && shape->hasSlot() && shape->hasDefaultSetter() && shape->writable())
return shape;
return nullptr;
}
bool
SetPropIRGenerator::tryAttachNativeSetSlot(HandleObject obj, ObjOperandId objId, HandleId id,
ValOperandId rhsId)
{
if (!obj->isNative())
return false;
RootedShape propShape(cx_, LookupShapeForSetSlot(&obj->as<NativeObject>(), id));
if (!propShape)
return false;
RootedObjectGroup group(cx_, JSObject::getGroup(cx_, obj));
if (!group) {
cx_->recoverFromOutOfMemory();
return false;
}
// For some property writes, such as the initial overwrite of global
// properties, TI will not mark the property as having been
// overwritten. Don't attach a stub in this case, so that we don't
// execute another write to the property without TI seeing that write.
EnsureTrackPropertyTypes(cx_, obj, id);
if (!PropertyHasBeenMarkedNonConstant(obj, id)) {
*isTemporarilyUnoptimizable_ = true;
return false;
}
// For Baseline, we have to guard on both the shape and group, because the
// type update IC applies to a single group. When we port the Ion IC, we can
// do a bit better and avoid the group guard if we don't have to guard on
// the property types.
NativeObject* nobj = &obj->as<NativeObject>();
writer.guardGroup(objId, nobj->group());
writer.guardShape(objId, nobj->lastProperty());
if (IsPreliminaryObject(obj))
preliminaryObjectAction_ = PreliminaryObjectAction::NotePreliminary;
else
preliminaryObjectAction_ = PreliminaryObjectAction::Unlink;
setUpdateStubInfo(id);
EmitStoreSlotAndReturn(writer, objId, nobj, propShape, rhsId);
return true;
}
bool
SetPropIRGenerator::tryAttachUnboxedExpandoSetSlot(HandleObject obj, ObjOperandId objId,
HandleId id, ValOperandId rhsId)
{
if (!obj->is<UnboxedPlainObject>())
return false;
UnboxedExpandoObject* expando = obj->as<UnboxedPlainObject>().maybeExpando();
if (!expando)
return false;
Shape* propShape = LookupShapeForSetSlot(expando, id);
if (!propShape)
return false;
writer.guardGroup(objId, obj->group());
ObjOperandId expandoId = writer.guardAndLoadUnboxedExpando(objId);
writer.guardShape(expandoId, expando->lastProperty());
setUpdateStubInfo(id);
EmitStoreSlotAndReturn(writer, expandoId, expando, propShape, rhsId);
return true;
}

View File

@ -134,6 +134,7 @@ enum class CacheKind : uint8_t
GetProp,
GetElem,
GetName,
SetProp,
};
#define CACHE_IR_OPS(_) \
@ -168,6 +169,9 @@ enum class CacheKind : uint8_t
_(LoadDOMExpandoValueIgnoreGeneration)\
_(GuardDOMExpandoMissingOrGuardShape) \
\
_(StoreFixedSlot) \
_(StoreDynamicSlot) \
\
/* The *Result ops load a value into the cache's result register. */ \
_(LoadFixedSlotResult) \
_(LoadDynamicSlotResult) \
@ -545,6 +549,17 @@ class MOZ_RAII CacheIRWriter : public JS::CustomAutoRooter
return res;
}
void storeFixedSlot(ObjOperandId obj, size_t offset, ValOperandId rhs) {
writeOpWithOperandId(CacheOp::StoreFixedSlot, obj);
addStubField(offset, StubField::Type::RawWord);
writeOperandId(rhs);
}
void storeDynamicSlot(ObjOperandId obj, size_t offset, ValOperandId rhs) {
writeOpWithOperandId(CacheOp::StoreDynamicSlot, obj);
addStubField(offset, StubField::Type::RawWord);
writeOperandId(rhs);
}
void loadUndefinedResult() {
writeOp(CacheOp::LoadUndefinedResult);
}
@ -822,6 +837,51 @@ class MOZ_RAII GetNameIRGenerator : public IRGenerator
bool tryAttachStub();
};
// SetPropIRGenerator generates CacheIR for a SetProp IC.
class MOZ_RAII SetPropIRGenerator : public IRGenerator
{
HandleValue lhsVal_;
HandleValue idVal_;
HandleValue rhsVal_;
bool* isTemporarilyUnoptimizable_;
enum class PreliminaryObjectAction { None, Unlink, NotePreliminary };
PreliminaryObjectAction preliminaryObjectAction_;
// If Baseline needs an update stub, this contains information to create it.
RootedId updateStubId_;
bool needUpdateStub_;
void setUpdateStubInfo(jsid id) {
MOZ_ASSERT(!needUpdateStub_);
needUpdateStub_ = true;
updateStubId_ = id;
}
bool tryAttachNativeSetSlot(HandleObject obj, ObjOperandId objId, HandleId id,
ValOperandId rhsId);
bool tryAttachUnboxedExpandoSetSlot(HandleObject obj, ObjOperandId objId, HandleId id,
ValOperandId rhsId);
public:
SetPropIRGenerator(JSContext* cx, jsbytecode* pc, CacheKind cacheKind,
bool* isTemporarilyUnoptimizable, HandleValue lhsVal, HandleValue idVal,
HandleValue rhsVal);
bool tryAttachStub();
bool shouldUnlinkPreliminaryObjectStubs() const {
return preliminaryObjectAction_ == PreliminaryObjectAction::Unlink;
}
bool shouldNotePreliminaryObjectStub() const {
return preliminaryObjectAction_ == PreliminaryObjectAction::NotePreliminary;
}
jsid updateStubId() const {
MOZ_ASSERT(needUpdateStub_);
return updateStubId_;
}
};
} // namespace jit
} // namespace js

View File

@ -61,6 +61,42 @@ CacheRegisterAllocator::useValueRegister(MacroAssembler& masm, ValOperandId op)
MOZ_CRASH();
}
ValueOperand
CacheRegisterAllocator::useFixedValueRegister(MacroAssembler& masm, ValOperandId valId,
ValueOperand reg)
{
allocateFixedValueRegister(masm, reg);
OperandLocation& loc = operandLocations_[valId.id()];
switch (loc.kind()) {
case OperandLocation::ValueReg:
masm.moveValue(loc.valueReg(), reg);
MOZ_ASSERT(!currentOpRegs_.aliases(loc.valueReg()), "Register shouldn't be in use");
availableRegs_.add(loc.valueReg());
break;
case OperandLocation::ValueStack:
popValue(masm, &loc, reg);
break;
case OperandLocation::Constant:
masm.moveValue(loc.constant(), reg);
break;
case OperandLocation::PayloadReg:
masm.tagValue(loc.payloadType(), loc.payloadReg(), reg);
MOZ_ASSERT(!currentOpRegs_.has(loc.payloadReg()), "Register shouldn't be in use");
availableRegs_.add(loc.payloadReg());
break;
case OperandLocation::PayloadStack:
popPayload(masm, &loc, reg.scratchReg());
masm.tagValue(loc.payloadType(), reg.scratchReg(), reg);
break;
case OperandLocation::Uninitialized:
MOZ_CRASH();
}
loc.setValueReg(reg);
return reg;
}
Register
CacheRegisterAllocator::useRegister(MacroAssembler& masm, TypedOperandId typedId)
{

View File

@ -339,6 +339,7 @@ class MOZ_RAII CacheRegisterAllocator
// Returns the register for the given operand. If the operand is currently
// not in a register, it will load it into one.
ValueOperand useValueRegister(MacroAssembler& masm, ValOperandId val);
ValueOperand useFixedValueRegister(MacroAssembler& masm, ValOperandId valId, ValueOperand reg);
Register useRegister(MacroAssembler& masm, TypedOperandId typedId);
// Allocates an output register for the given operand.

View File

@ -257,6 +257,7 @@ CodeGenerator::visitOutOfLineICFallback(OutOfLineICFallback* ool)
return;
}
case CacheKind::GetName:
case CacheKind::SetProp:
MOZ_CRASH("Baseline-specific for now");
}
MOZ_CRASH();

View File

@ -820,6 +820,18 @@ IonCacheIRCompiler::emitLoadEnvironmentDynamicSlotResult()
MOZ_CRASH("Baseline-specific op");
}
bool
IonCacheIRCompiler::emitStoreFixedSlot()
{
MOZ_CRASH("Baseline-specific op");
}
bool
IonCacheIRCompiler::emitStoreDynamicSlot()
{
MOZ_CRASH("Baseline-specific op");
}
bool
IonCacheIRCompiler::emitLoadTypedObjectResult()
{

View File

@ -42,6 +42,7 @@ IonIC::scratchRegisterForEntryJump()
return output.hasValue() ? output.valueReg().scratchReg() : output.typedReg().gpr();
}
case CacheKind::GetName:
case CacheKind::SetProp:
MOZ_CRASH("Baseline-specific for now");
}

View File

@ -488,7 +488,7 @@ class MacroAssembler : public MacroAssemblerSpecific
CodeOffset call(Register reg) PER_SHARED_ARCH;
CodeOffset call(Label* label) PER_SHARED_ARCH;
void call(const Address& addr) DEFINED_ON(x86_shared);
void call(const Address& addr) DEFINED_ON(x86_shared, arm, arm64);
void call(ImmWord imm) PER_SHARED_ARCH;
// Call a target native function, which is neither traceable nor movable.
void call(ImmPtr imm) PER_SHARED_ARCH;

View File

@ -193,9 +193,14 @@ ICStub::NonCacheIRStubMakesGCCalls(Kind kind)
bool
ICStub::makesGCCalls() const
{
if (isCacheIR_Monitored())
switch (kind()) {
case CacheIR_Monitored:
return toCacheIR_Monitored()->stubInfo()->makesGCCalls();
return NonCacheIRStubMakesGCCalls(kind());
case CacheIR_Updated:
return toCacheIR_Updated()->stubInfo()->makesGCCalls();
default:
return NonCacheIRStubMakesGCCalls(kind());
}
}
void
@ -351,12 +356,6 @@ ICStub::trace(JSTracer* trc)
TraceEdge(trc, &constantStub->value(), "baseline-getintrinsic-constant-value");
break;
}
case ICStub::SetProp_Native: {
ICSetProp_Native* propStub = toSetProp_Native();
TraceEdge(trc, &propStub->shape(), "baseline-setpropnative-stub-shape");
TraceEdge(trc, &propStub->group(), "baseline-setpropnative-stub-group");
break;
}
case ICStub::SetProp_NativeAdd: {
ICSetProp_NativeAdd* propStub = toSetProp_NativeAdd();
TraceEdge(trc, &propStub->group(), "baseline-setpropnativeadd-stub-group");
@ -425,6 +424,12 @@ ICStub::trace(JSTracer* trc)
case ICStub::CacheIR_Monitored:
TraceCacheIRStub(trc, this, toCacheIR_Monitored()->stubInfo());
break;
case ICStub::CacheIR_Updated: {
ICCacheIR_Updated* stub = toCacheIR_Updated();
TraceEdge(trc, &stub->updateStubId(), "baseline-updated-id");
TraceCacheIRStub(trc, this, stub->stubInfo());
break;
}
default:
break;
}
@ -731,8 +736,9 @@ ICStubCompiler::PushStubPayload(MacroAssembler& masm, Register scratch)
}
void
ICStubCompiler::emitPostWriteBarrierSlot(MacroAssembler& masm, Register obj, ValueOperand val,
Register scratch, LiveGeneralRegisterSet saveRegs)
BaselineEmitPostWriteBarrierSlot(MacroAssembler& masm, Register obj, ValueOperand val,
Register scratch, LiveGeneralRegisterSet saveRegs,
JSRuntime* rt)
{
Label skipBarrier;
masm.branchPtrInNurseryChunk(Assembler::Equal, obj, scratch, &skipBarrier);
@ -745,7 +751,7 @@ ICStubCompiler::emitPostWriteBarrierSlot(MacroAssembler& masm, Register obj, Val
saveRegs.set() = GeneralRegisterSet::Intersect(saveRegs.set(), GeneralRegisterSet::Volatile());
masm.PushRegsInMask(saveRegs);
masm.setupUnalignedABICall(scratch);
masm.movePtr(ImmPtr(cx->runtime()), scratch);
masm.movePtr(ImmPtr(rt), scratch);
masm.passABIArg(scratch);
masm.passABIArg(obj);
masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, PostWriteBarrier));
@ -1975,7 +1981,7 @@ StripPreliminaryObjectStubs(JSContext* cx, ICFallbackStub* stub)
for (ICStubIterator iter = stub->beginChain(); !iter.atEnd(); iter++) {
if (iter->isCacheIR_Monitored() && iter->toCacheIR_Monitored()->hasPreliminaryObject())
iter.unlink(cx);
else if (iter->isSetProp_Native() && iter->toSetProp_Native()->hasPreliminaryObject())
else if (iter->isCacheIR_Updated() && iter->toCacheIR_Updated()->hasPreliminaryObject())
iter.unlink(cx);
}
}

View File

@ -508,7 +508,7 @@ class ICStub
return (k > INVALID) && (k < LIMIT);
}
static bool IsCacheIRKind(Kind k) {
return k == CacheIR_Monitored;
return k == CacheIR_Monitored || k == CacheIR_Updated;
}
static const char* KindString(Kind k) {
@ -956,6 +956,36 @@ class ICUpdatedStub : public ICStub
}
};
class ICCacheIR_Updated : public ICUpdatedStub
{
const CacheIRStubInfo* stubInfo_;
GCPtrId updateStubId_;
public:
ICCacheIR_Updated(JitCode* stubCode, const CacheIRStubInfo* stubInfo)
: ICUpdatedStub(ICStub::CacheIR_Updated, stubCode),
stubInfo_(stubInfo),
updateStubId_(JSID_EMPTY)
{}
GCPtrId& updateStubId() {
return updateStubId_;
}
void notePreliminaryObject() {
extra_ = 1;
}
bool hasPreliminaryObject() const {
return extra_;
}
const CacheIRStubInfo* stubInfo() const {
return stubInfo_;
}
uint8_t* stubDataStart();
};
// Base class for stubcode compilers.
class ICStubCompiler
{
@ -1062,9 +1092,6 @@ class ICStubCompiler
}
protected:
void emitPostWriteBarrierSlot(MacroAssembler& masm, Register obj, ValueOperand val,
Register scratch, LiveGeneralRegisterSet saveRegs);
template <typename T, typename... Args>
T* newStub(Args&&... args) {
return ICStub::New<T>(cx, mozilla::Forward<Args>(args)...);
@ -1086,6 +1113,10 @@ class ICStubCompiler
}
};
void BaselineEmitPostWriteBarrierSlot(MacroAssembler& masm, Register obj, ValueOperand val,
Register scratch, LiveGeneralRegisterSet saveRegs,
JSRuntime* rt);
class SharedStubInfo
{
BaselineFrame* maybeFrame_;

View File

@ -38,6 +38,7 @@ namespace jit {
_(GetProp_Generic) \
\
_(CacheIR_Monitored) \
_(CacheIR_Updated) \
\
} // namespace jit

View File

@ -5021,6 +5021,13 @@ MacroAssembler::call(wasm::SymbolicAddress imm)
call(CallReg);
}
void
MacroAssembler::call(const Address& addr)
{
loadPtr(addr, CallReg);
call(CallReg);
}
void
MacroAssembler::call(JitCode* c)
{

View File

@ -528,6 +528,16 @@ MacroAssembler::call(wasm::SymbolicAddress imm)
call(scratch);
}
void
MacroAssembler::call(const Address& addr)
{
vixl::UseScratchRegisterScope temps(this);
const Register scratch = temps.AcquireX().asUnsized();
syncStackPtr();
loadPtr(addr, scratch);
call(scratch);
}
void
MacroAssembler::call(JitCode* c)
{