Backed out changeset efb8b9a6a1a4 (bug 1322093)

This commit is contained in:
Carsten "Tomcat" Book 2016-12-23 16:06:46 +01:00
parent e048e377c4
commit 0114cbabae
20 changed files with 148 additions and 1507 deletions

View File

@ -254,10 +254,8 @@ Zone::discardJitCode(FreeOp* fop, bool discardBaselineCode)
*
* Defer freeing any allocated blocks until after the next minor GC.
*/
if (discardBaselineCode) {
if (discardBaselineCode)
jitZone()->optimizedStubSpace()->freeAllAfterMinorGC(fop->runtime());
jitZone()->purgeIonCacheIRStubInfo();
}
/*
* Free all control flow graphs that are cached on BaselineScripts.

View File

@ -181,8 +181,7 @@ BaselineCacheIRCompiler::compile()
// Done emitting the main IC code. Now emit the failure paths.
for (size_t i = 0; i < failurePaths.length(); i++) {
if (!emitFailurePath(i))
return nullptr;
emitFailurePath(i);
EmitStubGuardFailure(masm);
}
@ -576,8 +575,39 @@ BaselineCacheIRCompiler::emitLoadTypedObjectResult()
masm.load32(fieldOffset, scratch2);
masm.addPtr(scratch2, scratch1);
Address fieldAddr(scratch1, 0);
emitLoadTypedObjectResultShared(fieldAddr, scratch2, layout, typeDescr, output);
if (SimpleTypeDescrKeyIsScalar(typeDescr)) {
Scalar::Type type = ScalarTypeFromSimpleTypeDescrKey(typeDescr);
masm.loadFromTypedArray(type, Address(scratch1, 0), output.valueReg(),
/* allowDouble = */ true, scratch2, nullptr);
} else {
ReferenceTypeDescr::Type type = ReferenceTypeFromSimpleTypeDescrKey(typeDescr);
switch (type) {
case ReferenceTypeDescr::TYPE_ANY:
masm.loadValue(Address(scratch1, 0), output.valueReg());
break;
case ReferenceTypeDescr::TYPE_OBJECT: {
Label notNull, done;
masm.loadPtr(Address(scratch1, 0), scratch1);
masm.branchTestPtr(Assembler::NonZero, scratch1, scratch1, &notNull);
masm.moveValue(NullValue(), output.valueReg());
masm.jump(&done);
masm.bind(&notNull);
masm.tagValue(JSVAL_TYPE_OBJECT, scratch1, output.valueReg());
masm.bind(&done);
break;
}
case ReferenceTypeDescr::TYPE_STRING:
masm.loadPtr(Address(scratch1, 0), scratch1);
masm.tagValue(JSVAL_TYPE_STRING, scratch1, output.valueReg());
break;
default:
MOZ_CRASH("Invalid ReferenceTypeDescr");
}
}
return true;
}
@ -688,9 +718,6 @@ BaselineCacheIRCompiler::init(CacheKind kind)
if (!allocator.init(ICStubCompiler::availableGeneralRegs(numInputs)))
return false;
// Baseline ICs monitor values when needed, so returning doubles is fine.
allowDoubleResult_.emplace(true);
if (numInputs >= 1) {
allocator.initInputLocation(0, R0);
if (numInputs >= 2)
@ -794,6 +821,52 @@ jit::AttachBaselineCacheIRStub(JSContext* cx, const CacheIRWriter& writer,
return newStub;
}
void
jit::TraceBaselineCacheIRStub(JSTracer* trc, ICStub* stub, const CacheIRStubInfo* stubInfo)
{
uint32_t field = 0;
size_t offset = 0;
while (true) {
StubField::Type fieldType = stubInfo->fieldType(field);
switch (fieldType) {
case StubField::Type::RawWord:
case StubField::Type::RawInt64:
break;
case StubField::Type::Shape:
TraceNullableEdge(trc, &stubInfo->getStubField<Shape*>(stub, offset),
"baseline-cacheir-shape");
break;
case StubField::Type::ObjectGroup:
TraceNullableEdge(trc, &stubInfo->getStubField<ObjectGroup*>(stub, offset),
"baseline-cacheir-group");
break;
case StubField::Type::JSObject:
TraceNullableEdge(trc, &stubInfo->getStubField<JSObject*>(stub, offset),
"baseline-cacheir-object");
break;
case StubField::Type::Symbol:
TraceNullableEdge(trc, &stubInfo->getStubField<JS::Symbol*>(stub, offset),
"baseline-cacheir-symbol");
break;
case StubField::Type::String:
TraceNullableEdge(trc, &stubInfo->getStubField<JSString*>(stub, offset),
"baseline-cacheir-string");
break;
case StubField::Type::Id:
TraceEdge(trc, &stubInfo->getStubField<jsid>(stub, offset), "baseline-cacheir-id");
break;
case StubField::Type::Value:
TraceEdge(trc, &stubInfo->getStubField<JS::Value>(stub, offset),
"baseline-cacheir-value");
break;
case StubField::Type::Limit:
return; // Done.
}
field++;
offset += StubField::sizeInBytes(fieldType);
}
}
uint8_t*
ICCacheIR_Monitored::stubDataStart()
{

View File

@ -17,6 +17,8 @@ namespace jit {
class ICFallbackStub;
class ICStub;
void TraceBaselineCacheIRStub(JSTracer* trc, ICStub* stub, const CacheIRStubInfo* stubInfo);
ICStub* AttachBaselineCacheIRStub(JSContext* cx, const CacheIRWriter& writer,
CacheKind kind, ICStubEngine engine, JSScript* outerScript,
ICFallbackStub* stub);

View File

@ -63,7 +63,7 @@ class DebugModeOSRVolatileStub
{ }
bool invalid() const {
if (engine_ == ICStubCompiler::Engine::IonSharedIC)
if (engine_ == ICStubCompiler::Engine::IonMonkey)
return stub_->invalid();
MOZ_ASSERT(!frame_->isHandlingException());
ICEntry& entry = frame_->script()->baselineScript()->icEntryFromPCOffset(pcOffset_);

View File

@ -210,7 +210,7 @@ CanAttachNativeGetProp(JSContext* cx, HandleObject obj, HandleId id,
if (IsCacheableGetPropCallScripted(obj, holder, shape, isTemporarilyUnoptimizable)) {
// See bug 1226816.
if (engine != ICStubEngine::IonSharedIC)
if (engine == ICStubEngine::Baseline)
return CanAttachCallGetter;
}
@ -1070,16 +1070,6 @@ GetPropIRGenerator::tryAttachTypedElement(HandleObject obj, ObjOperandId objId,
return false;
}
if (idVal_.toNumber() < 0 || floor(idVal_.toNumber()) != idVal_.toNumber())
return false;
// Ensure the index is in-bounds so the element type gets monitored.
if (obj->is<TypedArrayObject>() &&
idVal_.toNumber() >= double(obj->as<TypedArrayObject>().length()))
{
return false;
}
// Don't attach typed object stubs if the underlying storage could be
// detached, as the stub will always bail out.
if (IsPrimitiveArrayTypedObject(obj) && cx_->compartment()->detachedTypedObjects)
@ -1093,13 +1083,7 @@ GetPropIRGenerator::tryAttachTypedElement(HandleObject obj, ObjOperandId objId,
Int32OperandId int32IndexId = writer.guardIsInt32(indexId);
writer.loadTypedElementResult(objId, int32IndexId, layout, TypedThingElementType(obj));
// Reading from Uint32Array may produce an int32 now but a double value
// later, so ensure we monitor the result.
if (TypedThingElementType(obj) == Scalar::Type::Uint32)
writer.typeMonitorResult();
else
writer.returnFromIC();
writer.returnFromIC();
return true;
}

View File

@ -43,14 +43,6 @@ namespace jit {
// share both the IR and JitCode between CacheIR stubs. This HashMap owns the
// stubInfo (it uses UniquePtr), so once there are no references left to the
// shared stub code, we can also free the CacheIRStubInfo.
//
// Ion stubs
// ---------
// Unlike Baseline stubs, Ion stubs do not share stub code, and data stored in
// the IonICStub is baked into JIT code. This is one of the reasons Ion stubs
// are faster than Baseline stubs. Also note that Ion ICs contain more state
// (see IonGetPropertyIC for example) and use dynamic input/output registers,
// so sharing stub code for Ion would be much more difficult.
// An OperandId represents either a cache input or a value returned by a
// CacheIR instruction. Most code should use the ValOperandId and ObjOperandId
@ -387,13 +379,6 @@ class MOZ_RAII CacheIRWriter : public JS::CustomAutoRooter
return buffer_.length();
}
// This should not be used when compiling Baseline code, as Baseline code
// shouldn't bake in stub values.
StubField readStubFieldForIon(size_t i, StubField::Type type) const {
MOZ_ASSERT(stubFields_[i].type() == type);
return stubFields_[i];
}
ObjOperandId guardIsObject(ValOperandId val) {
writeOpWithOperandId(CacheOp::GuardIsObject, val);
return ObjOperandId(val.id());

View File

@ -6,8 +6,6 @@
#include "jit/CacheIRCompiler.h"
#include "jit/IonIC.h"
#include "jit/MacroAssembler-inl.h"
using namespace js;
@ -38,20 +36,9 @@ CacheRegisterAllocator::useValueRegister(MacroAssembler& masm, ValOperandId op)
return reg;
}
case OperandLocation::PayloadReg: {
ValueOperand reg = allocateValueRegister(masm);
masm.tagValue(loc.payloadType(), loc.payloadReg(), reg);
loc.setValueReg(reg);
return reg;
}
case OperandLocation::PayloadStack: {
ValueOperand reg = allocateValueRegister(masm);
popPayload(masm, &loc, reg.scratchReg());
masm.tagValue(loc.payloadType(), reg.scratchReg(), reg);
loc.setValueReg(reg);
return reg;
}
// The operand should never be unboxed.
case OperandLocation::PayloadStack:
case OperandLocation::PayloadReg:
case OperandLocation::Uninitialized:
break;
}
@ -222,17 +209,9 @@ CacheRegisterAllocator::allocateRegister(MacroAssembler& masm)
}
}
if (availableRegs_.empty() && !availableRegsAfterSpill_.empty()) {
Register reg = availableRegsAfterSpill_.takeAny();
masm.push(reg);
stackPushed_ += sizeof(uintptr_t);
masm.propagateOOM(spilledRegs_.append(SpilledRegister(reg, stackPushed_)));
availableRegs_.add(reg);
}
// At this point, there must be a free register.
// At this point, there must be a free register. (Ion ICs don't have as
// many registers available, so once we support Ion code generation, we may
// have to spill some unrelated registers.)
MOZ_RELEASE_ASSERT(!availableRegs_.empty());
Register reg = availableRegs_.takeAny();
@ -318,46 +297,6 @@ CacheRegisterAllocator::init(const AllocatableGeneralRegisterSet& available)
return true;
}
void
CacheRegisterAllocator::initAvailableRegsAfterSpill()
{
// Registers not in availableRegs_ and not used by input operands are
// available after being spilled.
availableRegsAfterSpill_.set() =
GeneralRegisterSet::Intersect(GeneralRegisterSet::Not(availableRegs_.set()),
GeneralRegisterSet::Not(inputRegisterSet()));
}
GeneralRegisterSet
CacheRegisterAllocator::inputRegisterSet() const
{
MOZ_ASSERT(origInputLocations_.length() == writer_.numInputOperands());
AllocatableGeneralRegisterSet result;
for (size_t i = 0; i < writer_.numInputOperands(); i++) {
const OperandLocation& loc = operandLocations_[i];
MOZ_ASSERT(loc == origInputLocations_[i]);
switch (loc.kind()) {
case OperandLocation::PayloadReg:
result.add(loc.payloadReg());
continue;
case OperandLocation::ValueReg:
result.add(loc.valueReg());
continue;
case OperandLocation::PayloadStack:
case OperandLocation::ValueStack:
case OperandLocation::Constant:
continue;
case OperandLocation::Uninitialized:
break;
}
MOZ_CRASH("Invalid kind");
}
return result.set();
}
JSValueType
CacheRegisterAllocator::knownType(ValOperandId val) const
{
@ -511,7 +450,7 @@ OperandLocation::aliasesReg(const OperandLocation& other) const
}
void
CacheRegisterAllocator::restoreInputState(MacroAssembler& masm, bool shouldDiscardStack)
CacheRegisterAllocator::restoreInputState(MacroAssembler& masm)
{
size_t numInputOperands = origInputLocations_.length();
MOZ_ASSERT(writer_.numInputOperands() == numInputOperands);
@ -522,8 +461,6 @@ CacheRegisterAllocator::restoreInputState(MacroAssembler& masm, bool shouldDisca
if (dest == cur)
continue;
auto autoAssign = mozilla::MakeScopeExit([&] { cur = dest; });
// We have a cycle if a destination register will be used later
// as source register. If that happens, just push the current value
// on the stack and later get it from there.
@ -590,21 +527,7 @@ CacheRegisterAllocator::restoreInputState(MacroAssembler& masm, bool shouldDisca
MOZ_CRASH("Invalid kind");
}
for (const SpilledRegister& spill : spilledRegs_) {
MOZ_ASSERT(stackPushed_ >= sizeof(uintptr_t));
if (spill.stackPushed == stackPushed_) {
masm.pop(spill.reg);
stackPushed_ -= sizeof(uintptr_t);
} else {
MOZ_ASSERT(spill.stackPushed < stackPushed_);
masm.loadPtr(Address(masm.getStackPointer(), stackPushed_ - spill.stackPushed),
spill.reg);
}
}
if (shouldDiscardStack)
discardStack(masm);
discardStack(masm);
}
size_t
@ -640,25 +563,25 @@ CacheIRStubInfo::copyStubData(ICStub* src, ICStub* dest) const
*reinterpret_cast<uint64_t*>(srcBytes + offset);
break;
case StubField::Type::Shape:
getStubField<ICStub, Shape*>(dest, offset).init(getStubField<ICStub, Shape*>(src, offset));
getStubField<Shape*>(dest, offset).init(getStubField<Shape*>(src, offset));
break;
case StubField::Type::JSObject:
getStubField<ICStub, JSObject*>(dest, offset).init(getStubField<ICStub, JSObject*>(src, offset));
getStubField<JSObject*>(dest, offset).init(getStubField<JSObject*>(src, offset));
break;
case StubField::Type::ObjectGroup:
getStubField<ICStub, ObjectGroup*>(dest, offset).init(getStubField<ICStub, ObjectGroup*>(src, offset));
getStubField<ObjectGroup*>(dest, offset).init(getStubField<ObjectGroup*>(src, offset));
break;
case StubField::Type::Symbol:
getStubField<ICStub, JS::Symbol*>(dest, offset).init(getStubField<ICStub, JS::Symbol*>(src, offset));
getStubField<JS::Symbol*>(dest, offset).init(getStubField<JS::Symbol*>(src, offset));
break;
case StubField::Type::String:
getStubField<ICStub, JSString*>(dest, offset).init(getStubField<ICStub, JSString*>(src, offset));
getStubField<JSString*>(dest, offset).init(getStubField<JSString*>(src, offset));
break;
case StubField::Type::Id:
getStubField<ICStub, jsid>(dest, offset).init(getStubField<ICStub, jsid>(src, offset));
getStubField<jsid>(dest, offset).init(getStubField<jsid>(src, offset));
break;
case StubField::Type::Value:
getStubField<ICStub, Value>(dest, offset).init(getStubField<ICStub, Value>(src, offset));
getStubField<Value>(dest, offset).init(getStubField<Value>(src, offset));
break;
case StubField::Type::Limit:
return; // Done.
@ -675,9 +598,9 @@ AsGCPtr(uintptr_t* ptr)
return reinterpret_cast<GCPtr<T>*>(ptr);
}
template<class Stub, class T>
template<class T>
GCPtr<T>&
CacheIRStubInfo::getStubField(Stub* stub, uint32_t offset) const
CacheIRStubInfo::getStubField(ICStub* stub, uint32_t offset) const
{
uint8_t* stubData = (uint8_t*)stub + stubDataOffset_;
MOZ_ASSERT(uintptr_t(stubData) % sizeof(uintptr_t) == 0);
@ -685,13 +608,13 @@ CacheIRStubInfo::getStubField(Stub* stub, uint32_t offset) const
return *AsGCPtr<T>((uintptr_t*)(stubData + offset));
}
template GCPtr<Shape*>& CacheIRStubInfo::getStubField<ICStub>(ICStub* stub, uint32_t offset) const;
template GCPtr<ObjectGroup*>& CacheIRStubInfo::getStubField<ICStub>(ICStub* stub, uint32_t offset) const;
template GCPtr<JSObject*>& CacheIRStubInfo::getStubField<ICStub>(ICStub* stub, uint32_t offset) const;
template GCPtr<JSString*>& CacheIRStubInfo::getStubField<ICStub>(ICStub* stub, uint32_t offset) const;
template GCPtr<JS::Symbol*>& CacheIRStubInfo::getStubField<ICStub>(ICStub* stub, uint32_t offset) const;
template GCPtr<JS::Value>& CacheIRStubInfo::getStubField<ICStub>(ICStub* stub, uint32_t offset) const;
template GCPtr<jsid>& CacheIRStubInfo::getStubField<ICStub>(ICStub* stub, uint32_t offset) const;
template GCPtr<Shape*>& CacheIRStubInfo::getStubField(ICStub* stub, uint32_t offset) const;
template GCPtr<ObjectGroup*>& CacheIRStubInfo::getStubField(ICStub* stub, uint32_t offset) const;
template GCPtr<JSObject*>& CacheIRStubInfo::getStubField(ICStub* stub, uint32_t offset) const;
template GCPtr<JSString*>& CacheIRStubInfo::getStubField(ICStub* stub, uint32_t offset) const;
template GCPtr<JS::Symbol*>& CacheIRStubInfo::getStubField(ICStub* stub, uint32_t offset) const;
template GCPtr<JS::Value>& CacheIRStubInfo::getStubField(ICStub* stub, uint32_t offset) const;
template GCPtr<jsid>& CacheIRStubInfo::getStubField(ICStub* stub, uint32_t offset) const;
template <typename T, typename V>
static void
@ -743,59 +666,6 @@ CacheIRWriter::copyStubData(uint8_t* dest) const
}
}
template <typename T>
void
jit::TraceCacheIRStub(JSTracer* trc, T* stub, const CacheIRStubInfo* stubInfo)
{
uint32_t field = 0;
size_t offset = 0;
while (true) {
StubField::Type fieldType = stubInfo->fieldType(field);
switch (fieldType) {
case StubField::Type::RawWord:
case StubField::Type::RawInt64:
break;
case StubField::Type::Shape:
TraceNullableEdge(trc, &stubInfo->getStubField<T, Shape*>(stub, offset),
"cacheir-shape");
break;
case StubField::Type::ObjectGroup:
TraceNullableEdge(trc, &stubInfo->getStubField<T, ObjectGroup*>(stub, offset),
"cacheir-group");
break;
case StubField::Type::JSObject:
TraceNullableEdge(trc, &stubInfo->getStubField<T, JSObject*>(stub, offset),
"cacheir-object");
break;
case StubField::Type::Symbol:
TraceNullableEdge(trc, &stubInfo->getStubField<T, JS::Symbol*>(stub, offset),
"cacheir-symbol");
break;
case StubField::Type::String:
TraceNullableEdge(trc, &stubInfo->getStubField<T, JSString*>(stub, offset),
"cacheir-string");
break;
case StubField::Type::Id:
TraceEdge(trc, &stubInfo->getStubField<T, jsid>(stub, offset), "cacheir-id");
break;
case StubField::Type::Value:
TraceEdge(trc, &stubInfo->getStubField<T, JS::Value>(stub, offset),
"cacheir-value");
break;
case StubField::Type::Limit:
return; // Done.
}
field++;
offset += StubField::sizeInBytes(fieldType);
}
}
template
void jit::TraceCacheIRStub(JSTracer* trc, ICStub* stub, const CacheIRStubInfo* stubInfo);
template
void jit::TraceCacheIRStub(JSTracer* trc, IonICStub* stub, const CacheIRStubInfo* stubInfo);
bool
CacheIRWriter::stubDataEquals(const uint8_t* stubData) const
{
@ -927,14 +797,6 @@ FailurePath::canShareFailurePath(const FailurePath& other) const
if (stackPushed_ != other.stackPushed_)
return false;
if (spilledRegs_.length() != other.spilledRegs_.length())
return false;
for (size_t i = 0; i < spilledRegs_.length(); i++) {
if (spilledRegs_[i] != other.spilledRegs_[i])
return false;
}
MOZ_ASSERT(inputs_.length() == other.inputs_.length());
for (size_t i = 0; i < inputs_.length(); i++) {
@ -952,8 +814,6 @@ CacheIRCompiler::addFailurePath(FailurePath** failure)
if (!newFailure.appendInput(allocator.operandLocation(i)))
return false;
}
if (!newFailure.setSpilledRegs(allocator.spilledRegs()))
return false;
newFailure.setStackPushed(allocator.stackPushed());
// Reuse the previous failure path if the current one is the same, to
@ -970,7 +830,7 @@ CacheIRCompiler::addFailurePath(FailurePath** failure)
return true;
}
bool
void
CacheIRCompiler::emitFailurePath(size_t index)
{
FailurePath& failure = failurePaths[index];
@ -980,12 +840,8 @@ CacheIRCompiler::emitFailurePath(size_t index)
for (size_t i = 0; i < writer_.numInputOperands(); i++)
allocator.setOperandLocation(i, failure.input(i));
if (!allocator.setSpilledRegs(failure.spilledRegs()))
return false;
masm.bind(failure.label());
allocator.restoreInputState(masm);
return true;
}
bool
@ -1597,7 +1453,7 @@ CacheIRCompiler::emitLoadTypedElementResult()
// Load the value.
BaseIndex source(scratch, index, ScaleFromElemWidth(Scalar::byteSize(type)));
if (output.hasValue()) {
masm.loadFromTypedArray(type, source, output.valueReg(), *allowDoubleResult_, scratch,
masm.loadFromTypedArray(type, source, output.valueReg(), false, scratch,
failure->label());
} else {
bool needGpr = (type == Scalar::Int8 || type == Scalar::Uint8 ||
@ -1613,44 +1469,3 @@ CacheIRCompiler::emitLoadTypedElementResult()
}
return true;
}
void
CacheIRCompiler::emitLoadTypedObjectResultShared(const Address& fieldAddr, Register scratch,
TypedThingLayout layout, uint32_t typeDescr,
const AutoOutputRegister& output)
{
MOZ_ASSERT(output.hasValue());
if (SimpleTypeDescrKeyIsScalar(typeDescr)) {
Scalar::Type type = ScalarTypeFromSimpleTypeDescrKey(typeDescr);
masm.loadFromTypedArray(type, fieldAddr, output.valueReg(),
/* allowDouble = */ true, scratch, nullptr);
} else {
ReferenceTypeDescr::Type type = ReferenceTypeFromSimpleTypeDescrKey(typeDescr);
switch (type) {
case ReferenceTypeDescr::TYPE_ANY:
masm.loadValue(fieldAddr, output.valueReg());
break;
case ReferenceTypeDescr::TYPE_OBJECT: {
Label notNull, done;
masm.loadPtr(fieldAddr, scratch);
masm.branchTestPtr(Assembler::NonZero, scratch, scratch, &notNull);
masm.moveValue(NullValue(), output.valueReg());
masm.jump(&done);
masm.bind(&notNull);
masm.tagValue(JSVAL_TYPE_OBJECT, scratch, output.valueReg());
masm.bind(&done);
break;
}
case ReferenceTypeDescr::TYPE_STRING:
masm.loadPtr(fieldAddr, scratch);
masm.tagValue(JSVAL_TYPE_STRING, scratch, output.valueReg());
break;
default:
MOZ_CRASH("Invalid ReferenceTypeDescr");
}
}
}

View File

@ -135,30 +135,6 @@ class OperandLocation
data_.constant = v;
}
bool isInRegister() const { return kind_ == PayloadReg || kind_ == ValueReg; }
bool isOnStack() const { return kind_ == PayloadStack || kind_ == ValueStack; }
size_t stackPushed() const {
if (kind_ == PayloadStack)
return data_.payloadStack.stackPushed;
MOZ_ASSERT(kind_ == ValueStack);
return data_.valueStackPushed;
}
size_t stackSizeInBytes() const {
if (kind_ == PayloadStack)
return sizeof(uintptr_t);
MOZ_ASSERT(kind_ == ValueStack);
return sizeof(js::Value);
}
void adjustStackPushed(int32_t diff) {
if (kind_ == PayloadStack) {
data_.payloadStack.stackPushed += diff;
return;
}
MOZ_ASSERT(kind_ == ValueStack);
data_.valueStackPushed += diff;
}
bool aliasesReg(Register reg) const {
if (kind_ == PayloadReg)
return payloadReg() == reg;
@ -180,22 +156,6 @@ class OperandLocation
bool operator!=(const OperandLocation& other) const { return !operator==(other); }
};
struct SpilledRegister
{
Register reg;
uint32_t stackPushed;
SpilledRegister(Register reg, uint32_t stackPushed)
: reg(reg), stackPushed(stackPushed)
{}
bool operator==(const SpilledRegister& other) const {
return reg == other.reg && stackPushed == other.stackPushed;
}
bool operator!=(const SpilledRegister& other) const { return !(*this == other); }
};
using SpilledRegisterVector = Vector<SpilledRegister, 2, SystemAllocPolicy>;
// Class to track and allocate registers while emitting IC code.
class MOZ_RAII CacheRegisterAllocator
{
@ -215,13 +175,6 @@ class MOZ_RAII CacheRegisterAllocator
// Registers that are currently unused and available.
AllocatableGeneralRegisterSet availableRegs_;
// Registers that are available, but before use they must be saved and
// then restored when returning from the stub.
AllocatableGeneralRegisterSet availableRegsAfterSpill_;
// Registers we took from availableRegsAfterSpill_ and spilled to the stack.
SpilledRegisterVector spilledRegs_;
// The number of bytes pushed on the native stack.
uint32_t stackPushed_;
@ -254,8 +207,6 @@ class MOZ_RAII CacheRegisterAllocator
MOZ_MUST_USE bool init(const AllocatableGeneralRegisterSet& available);
void initAvailableRegsAfterSpill();
OperandLocation operandLocation(size_t i) const {
return operandLocations_[i];
}
@ -282,13 +233,6 @@ class MOZ_RAII CacheRegisterAllocator
void initInputLocation(size_t i, const TypedOrValueRegister& reg);
void initInputLocation(size_t i, const ConstantOrRegister& value);
const SpilledRegisterVector& spilledRegs() const { return spilledRegs_; }
MOZ_MUST_USE bool setSpilledRegs(const SpilledRegisterVector& regs) {
spilledRegs_.clear();
return spilledRegs_.appendAll(regs);
}
void nextOp() {
currentOpRegs_.clear();
currentInstruction_++;
@ -344,14 +288,7 @@ class MOZ_RAII CacheRegisterAllocator
// Emits code to restore registers and stack to the state at the start of
// the stub.
void restoreInputState(MacroAssembler& masm, bool discardStack = true);
// Returns the set of registers storing the IC input operands.
GeneralRegisterSet inputRegisterSet() const;
void saveIonLiveRegisters(MacroAssembler& masm, LiveRegisterSet liveRegs,
Register scratch, IonScript* ionScript);
void restoreIonLiveRegisters(MacroAssembler& masm, LiveRegisterSet liveRegs);
void restoreInputState(MacroAssembler& masm);
};
// RAII class to allocate a scratch register and release it when we're done
@ -361,9 +298,6 @@ class MOZ_RAII AutoScratchRegister
CacheRegisterAllocator& alloc_;
Register reg_;
AutoScratchRegister(const AutoScratchRegister&) = delete;
void operator=(const AutoScratchRegister&) = delete;
public:
AutoScratchRegister(CacheRegisterAllocator& alloc, MacroAssembler& masm,
Register reg = InvalidReg)
@ -380,8 +314,6 @@ class MOZ_RAII AutoScratchRegister
~AutoScratchRegister() {
alloc_.releaseRegister(reg_);
}
Register get() const { return reg_; }
operator Register() const { return reg_; }
};
@ -422,7 +354,6 @@ class MOZ_RAII AutoScratchRegisterExcluding
class FailurePath
{
Vector<OperandLocation, 4, SystemAllocPolicy> inputs_;
SpilledRegisterVector spilledRegs_;
NonAssertingLabel label_;
uint32_t stackPushed_;
@ -431,7 +362,6 @@ class FailurePath
FailurePath(FailurePath&& other)
: inputs_(Move(other.inputs_)),
spilledRegs_(Move(other.spilledRegs_)),
label_(other.label_),
stackPushed_(other.stackPushed_)
{}
@ -441,27 +371,18 @@ class FailurePath
void setStackPushed(uint32_t i) { stackPushed_ = i; }
uint32_t stackPushed() const { return stackPushed_; }
MOZ_MUST_USE bool appendInput(const OperandLocation& loc) {
bool appendInput(const OperandLocation& loc) {
return inputs_.append(loc);
}
OperandLocation input(size_t i) const {
return inputs_[i];
}
const SpilledRegisterVector& spilledRegs() const { return spilledRegs_; }
MOZ_MUST_USE bool setSpilledRegs(const SpilledRegisterVector& regs) {
MOZ_ASSERT(spilledRegs_.empty());
return spilledRegs_.appendAll(regs);
}
// If canShareFailurePath(other) returns true, the same machine code will
// be emitted for two failure paths, so we can share them.
bool canShareFailurePath(const FailurePath& other) const;
};
class AutoOutputRegister;
// Base class for BaselineCacheIRCompiler and IonCacheIRCompiler.
class MOZ_RAII CacheIRCompiler
{
@ -481,9 +402,6 @@ class MOZ_RAII CacheIRCompiler
Maybe<TypedOrValueRegister> outputUnchecked_;
Mode mode_;
// Whether this IC may read double values from uint32 arrays.
Maybe<bool> allowDoubleResult_;
CacheIRCompiler(JSContext* cx, const CacheIRWriter& writer, Mode mode)
: cx_(cx),
reader(writer),
@ -495,11 +413,8 @@ class MOZ_RAII CacheIRCompiler
}
MOZ_MUST_USE bool addFailurePath(FailurePath** failure);
MOZ_MUST_USE bool emitFailurePath(size_t i);
void emitLoadTypedObjectResultShared(const Address& fieldAddr, Register scratch,
TypedThingLayout layout, uint32_t typeDescr,
const AutoOutputRegister& output);
void emitFailurePath(size_t index);
#define DEFINE_SHARED_OP(op) MOZ_MUST_USE bool emit##op();
CACHE_IR_SHARED_OPS(DEFINE_SHARED_OP)
@ -612,20 +527,12 @@ class CacheIRStubInfo
static CacheIRStubInfo* New(CacheKind kind, ICStubEngine engine, bool canMakeCalls,
uint32_t stubDataOffset, const CacheIRWriter& writer);
template <class Stub, class T>
js::GCPtr<T>& getStubField(Stub* stub, uint32_t field) const;
template <class T>
js::GCPtr<T>& getStubField(ICStub* stub, uint32_t field) const {
return getStubField<ICStub, T>(stub, field);
}
js::GCPtr<T>& getStubField(ICStub* stub, uint32_t field) const;
void copyStubData(ICStub* src, ICStub* dest) const;
};
template <typename T>
void TraceCacheIRStub(JSTracer* trc, T* stub, const CacheIRStubInfo* stubInfo);
} // namespace jit
} // namespace js

View File

@ -9549,22 +9549,22 @@ CodeGenerator::linkSharedStubs(JSContext* cx)
switch (sharedStubs_[i].kind) {
case ICStub::Kind::BinaryArith_Fallback: {
ICBinaryArith_Fallback::Compiler stubCompiler(cx, ICStubCompiler::Engine::IonSharedIC);
ICBinaryArith_Fallback::Compiler stubCompiler(cx, ICStubCompiler::Engine::IonMonkey);
stub = stubCompiler.getStub(&stubSpace_);
break;
}
case ICStub::Kind::UnaryArith_Fallback: {
ICUnaryArith_Fallback::Compiler stubCompiler(cx, ICStubCompiler::Engine::IonSharedIC);
ICUnaryArith_Fallback::Compiler stubCompiler(cx, ICStubCompiler::Engine::IonMonkey);
stub = stubCompiler.getStub(&stubSpace_);
break;
}
case ICStub::Kind::Compare_Fallback: {
ICCompare_Fallback::Compiler stubCompiler(cx, ICStubCompiler::Engine::IonSharedIC);
ICCompare_Fallback::Compiler stubCompiler(cx, ICStubCompiler::Engine::IonMonkey);
stub = stubCompiler.getStub(&stubSpace_);
break;
}
case ICStub::Kind::GetProp_Fallback: {
ICGetProp_Fallback::Compiler stubCompiler(cx, ICStubCompiler::Engine::IonSharedIC);
ICGetProp_Fallback::Compiler stubCompiler(cx, ICStubCompiler::Engine::IonMonkey);
stub = stubCompiler.getStub(&stubSpace_);
break;
}
@ -9575,12 +9575,12 @@ CodeGenerator::linkSharedStubs(JSContext* cx)
if (!group)
return false;
ICNewArray_Fallback::Compiler stubCompiler(cx, group, ICStubCompiler::Engine::IonSharedIC);
ICNewArray_Fallback::Compiler stubCompiler(cx, group, ICStubCompiler::Engine::IonMonkey);
stub = stubCompiler.getStub(&stubSpace_);
break;
}
case ICStub::Kind::NewObject_Fallback: {
ICNewObject_Fallback::Compiler stubCompiler(cx, ICStubCompiler::Engine::IonSharedIC);
ICNewObject_Fallback::Compiler stubCompiler(cx, ICStubCompiler::Engine::IonMonkey);
stub = stubCompiler.getStub(&stubSpace_);
break;
}
@ -10178,8 +10178,7 @@ CodeGenerator::addGetPropertyCache(LInstruction* ins, LiveRegisterSet liveRegs,
if (idString->isAtom() && !idString->asAtom().isIndex(&dummy))
kind = CacheKind::GetProp;
}
IonGetPropertyIC cache(kind, liveRegs, objReg, id, output, maybeTemp, monitoredResult,
allowDoubleResult);
IonGetPropertyIC cache(kind, liveRegs, objReg, id, output, maybeTemp, monitoredResult);
addIC(ins, allocateIC(cache));
return;
}

View File

@ -1,989 +0,0 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=8 sts=4 et sw=4 tw=99:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "jit/CacheIRCompiler.h"
#include "jit/IonCaches.h"
#include "jit/IonIC.h"
#include "jit/Linker.h"
#include "jit/SharedICHelpers.h"
#include "jit/MacroAssembler-inl.h"
using namespace js;
using namespace js::jit;
namespace js {
namespace jit {
// IonCacheIRCompiler compiles CacheIR to IonIC native code.
class MOZ_RAII IonCacheIRCompiler : public CacheIRCompiler
{
public:
friend class AutoSaveLiveRegisters;
IonCacheIRCompiler(JSContext* cx, const CacheIRWriter& writer, IonIC* ic, IonScript* ionScript)
: CacheIRCompiler(cx, writer, Mode::Ion),
writer_(writer),
ic_(ic),
ionScript_(ionScript),
nextStubField_(0),
#ifdef DEBUG
calledPrepareVMCall_(false),
#endif
savedLiveRegs_(false)
{
MOZ_ASSERT(ic_);
MOZ_ASSERT(ionScript_);
}
MOZ_MUST_USE bool init();
JitCode* compile(IonICStub* stub);
private:
const CacheIRWriter& writer_;
IonIC* ic_;
IonScript* ionScript_;
CodeOffsetJump rejoinOffset_;
Vector<CodeOffset, 4, SystemAllocPolicy> nextCodeOffsets_;
Maybe<LiveRegisterSet> liveRegs_;
Maybe<CodeOffset> stubJitCodeOffset_;
uint32_t nextStubField_;
#ifdef DEBUG
bool calledPrepareVMCall_;
#endif
bool savedLiveRegs_;
uintptr_t readStubWord(uint32_t offset, StubField::Type type) {
MOZ_ASSERT((offset % sizeof(uintptr_t)) == 0);
return writer_.readStubFieldForIon(nextStubField_++, type).asWord();
}
uint64_t readStubInt64(uint32_t offset, StubField::Type type) {
MOZ_ASSERT((offset % sizeof(uintptr_t)) == 0);
return writer_.readStubFieldForIon(nextStubField_++, type).asInt64();
}
int32_t int32StubField(uint32_t offset) {
return readStubWord(offset, StubField::Type::RawWord);
}
Shape* shapeStubField(uint32_t offset) {
return (Shape*)readStubWord(offset, StubField::Type::Shape);
}
JSObject* objectStubField(uint32_t offset) {
return (JSObject*)readStubWord(offset, StubField::Type::JSObject);
}
JSString* stringStubField(uint32_t offset) {
return (JSString*)readStubWord(offset, StubField::Type::String);
}
JS::Symbol* symbolStubField(uint32_t offset) {
return (JS::Symbol*)readStubWord(offset, StubField::Type::Symbol);
}
ObjectGroup* groupStubField(uint32_t offset) {
return (ObjectGroup*)readStubWord(offset, StubField::Type::ObjectGroup);
}
jsid idStubField(uint32_t offset) {
return mozilla::BitwiseCast<jsid>(readStubWord(offset, StubField::Type::Id));
}
template <typename T>
T rawWordStubField(uint32_t offset) {
static_assert(sizeof(T) == sizeof(uintptr_t), "T must have word size");
return (T)readStubWord(offset, StubField::Type::RawWord);
}
template <typename T>
T rawInt64StubField(uint32_t offset) {
static_assert(sizeof(T) == sizeof(int64_t), "T musthave int64 size");
return (T)readStubInt64(offset, StubField::Type::RawInt64);
}
void prepareVMCall(MacroAssembler& masm);
MOZ_MUST_USE bool callVM(MacroAssembler& masm, const VMFunction& fun);
void pushStubCodePointer() {
stubJitCodeOffset_.emplace(masm.PushWithPatch(ImmPtr((void*)-1)));
}
#define DEFINE_OP(op) MOZ_MUST_USE bool emit##op();
CACHE_IR_OPS(DEFINE_OP)
#undef DEFINE_OP
};
// AutoSaveLiveRegisters must be used when we make a call that can GC. The
// constructor ensures all live registers are stored on the stack (where the GC
// expects them) and the destructor restores these registers.
class MOZ_RAII AutoSaveLiveRegisters
{
IonCacheIRCompiler& compiler_;
AutoSaveLiveRegisters(const AutoSaveLiveRegisters&) = delete;
void operator=(const AutoSaveLiveRegisters&) = delete;
public:
explicit AutoSaveLiveRegisters(IonCacheIRCompiler& compiler)
: compiler_(compiler)
{
MOZ_ASSERT(compiler_.liveRegs_.isSome());
compiler_.allocator.saveIonLiveRegisters(compiler_.masm,
compiler_.liveRegs_.ref(),
compiler_.ic_->scratchRegisterForEntryJump(),
compiler_.ionScript_);
compiler_.savedLiveRegs_ = true;
}
~AutoSaveLiveRegisters() {
MOZ_ASSERT(compiler_.stubJitCodeOffset_.isSome(), "Must have pushed JitCode* pointer");
compiler_.allocator.restoreIonLiveRegisters(compiler_.masm, compiler_.liveRegs_.ref());
MOZ_ASSERT(compiler_.masm.framePushed() == compiler_.ionScript_->frameSize());
}
};
} // namespace jit
} // namespace js
#define DEFINE_SHARED_OP(op) \
bool IonCacheIRCompiler::emit##op() { return CacheIRCompiler::emit##op(); }
CACHE_IR_SHARED_OPS(DEFINE_SHARED_OP)
#undef DEFINE_SHARED_OP
void
CacheRegisterAllocator::saveIonLiveRegisters(MacroAssembler& masm, LiveRegisterSet liveRegs,
Register scratch, IonScript* ionScript)
{
MOZ_ASSERT(!liveRegs.has(scratch));
// We have to push all registers in liveRegs on the stack. It's possible we
// stored other values in our live registers and stored operands on the
// stack (where our live registers should go), so this requires some careful
// work. Try to keep it simple by taking one small step at a time.
// Step 1. Discard any dead operands so we can reuse their registers.
freeDeadOperandRegisters();
// Step 2. Figure out the size of our live regs.
size_t sizeOfLiveRegsInBytes =
liveRegs.gprs().size() * sizeof(intptr_t) +
liveRegs.fpus().getPushSizeInBytes();
MOZ_ASSERT(sizeOfLiveRegsInBytes > 0);
// Step 3. Ensure all non-input operands are on the stack.
size_t numInputs = writer_.numInputOperands();
for (size_t i = numInputs; i < operandLocations_.length(); i++) {
OperandLocation& loc = operandLocations_[i];
if (loc.isInRegister())
spillOperandToStack(masm, &loc);
}
// Step 4. Restore the register state, but don't discard the stack as
// non-input operands are stored there.
restoreInputState(masm, /* shouldDiscardStack = */ false);
// We just restored the input state, so no input operands should be stored
// on the stack.
#ifdef DEBUG
for (size_t i = 0; i < numInputs; i++) {
const OperandLocation& loc = operandLocations_[i];
MOZ_ASSERT(!loc.isOnStack());
}
#endif
// Step 5. At this point our register state is correct. Stack values,
// however, may cover the space where we have to store the live registers.
// Move them out of the way.
bool hasOperandOnStack = false;
for (size_t i = numInputs; i < operandLocations_.length(); i++) {
OperandLocation& loc = operandLocations_[i];
if (!loc.isOnStack())
continue;
hasOperandOnStack = true;
size_t operandSize = loc.stackSizeInBytes();
size_t operandStackPushed = loc.stackPushed();
MOZ_ASSERT(operandSize > 0);
MOZ_ASSERT(stackPushed_ >= operandStackPushed);
MOZ_ASSERT(operandStackPushed >= operandSize);
// If this operand doesn't cover the live register space, there's
// nothing to do.
if (operandStackPushed - operandSize >= sizeOfLiveRegsInBytes) {
MOZ_ASSERT(stackPushed_ > sizeOfLiveRegsInBytes);
continue;
}
// Reserve stack space for the live registers if needed.
if (sizeOfLiveRegsInBytes > stackPushed_) {
size_t extraBytes = sizeOfLiveRegsInBytes - stackPushed_;
MOZ_ASSERT((extraBytes % sizeof(uintptr_t)) == 0);
masm.subFromStackPtr(Imm32(extraBytes));
stackPushed_ += extraBytes;
}
// Push the operand below the live register space.
if (loc.kind() == OperandLocation::PayloadStack) {
masm.push(Address(masm.getStackPointer(), stackPushed_ - operandStackPushed));
stackPushed_ += operandSize;
loc.setPayloadStack(stackPushed_, loc.payloadType());
continue;
}
MOZ_ASSERT(loc.kind() == OperandLocation::ValueStack);
masm.pushValue(Address(masm.getStackPointer(), stackPushed_ - operandStackPushed));
stackPushed_ += operandSize;
loc.setValueStack(stackPushed_);
}
// Step 6. If we have any operands on the stack, adjust their stackPushed
// values to not include sizeOfLiveRegsInBytes (this simplifies code down
// the line). Then push/store the live registers.
if (hasOperandOnStack) {
MOZ_ASSERT(stackPushed_ > sizeOfLiveRegsInBytes);
stackPushed_ -= sizeOfLiveRegsInBytes;
for (size_t i = numInputs; i < operandLocations_.length(); i++) {
OperandLocation& loc = operandLocations_[i];
if (loc.isOnStack())
loc.adjustStackPushed(-int32_t(sizeOfLiveRegsInBytes));
}
size_t stackBottom = stackPushed_ + sizeOfLiveRegsInBytes;
masm.storeRegsInMask(liveRegs, Address(masm.getStackPointer(), stackBottom), scratch);
masm.setFramePushed(masm.framePushed() + sizeOfLiveRegsInBytes);
} else {
// If no operands are on the stack, discard the unused stack space.
if (stackPushed_ > 0) {
masm.addToStackPtr(Imm32(stackPushed_));
stackPushed_ = 0;
}
masm.PushRegsInMask(liveRegs);
}
MOZ_ASSERT(masm.framePushed() == ionScript->frameSize() + sizeOfLiveRegsInBytes);
// Step 7. All live registers and non-input operands are stored on the stack
// now, so at this point all registers except for the input registers are
// available.
availableRegs_.set() = GeneralRegisterSet::Not(inputRegisterSet());
availableRegsAfterSpill_.set() = GeneralRegisterSet();
}
void
CacheRegisterAllocator::restoreIonLiveRegisters(MacroAssembler& masm, LiveRegisterSet liveRegs)
{
masm.PopRegsInMask(liveRegs);
availableRegs_.set() = GeneralRegisterSet();
availableRegsAfterSpill_.set() = GeneralRegisterSet::All();
}
void
IonCacheIRCompiler::prepareVMCall(MacroAssembler& masm)
{
uint32_t descriptor = MakeFrameDescriptor(masm.framePushed(), JitFrame_IonJS,
IonICCallFrameLayout::Size());
pushStubCodePointer();
masm.Push(Imm32(descriptor));
masm.Push(ImmPtr(GetReturnAddressToIonCode(cx_)));
#ifdef DEBUG
calledPrepareVMCall_ = true;
#endif
}
bool
IonCacheIRCompiler::callVM(MacroAssembler& masm, const VMFunction& fun)
{
MOZ_ASSERT(calledPrepareVMCall_);
JitCode* code = cx_->jitRuntime()->getVMWrapper(fun);
if (!code)
return false;
uint32_t frameSize = fun.explicitStackSlots() * sizeof(void*);
uint32_t descriptor = MakeFrameDescriptor(frameSize, JitFrame_IonICCall,
ExitFrameLayout::Size());
masm.Push(Imm32(descriptor));
masm.callJit(code);
// Remove rest of the frame left on the stack. We remove the return address
// which is implicitly poped when returning.
int framePop = sizeof(ExitFrameLayout) - sizeof(void*);
// Pop arguments from framePushed.
masm.implicitPop(frameSize + framePop);
masm.freeStack(IonICCallFrameLayout::Size());
return true;
}
bool
IonCacheIRCompiler::init()
{
size_t numInputs = writer_.numInputOperands();
AllocatableGeneralRegisterSet available;
if (ic_->kind() == CacheKind::GetProp || ic_->kind() == CacheKind::GetElem) {
IonGetPropertyIC* ic = ic_->asGetPropertyIC();
TypedOrValueRegister output = ic->output();
if (output.hasValue())
available.add(output.valueReg());
else if (!output.typedReg().isFloat())
available.add(output.typedReg().gpr());
if (ic->maybeTemp() != InvalidReg)
available.add(ic->maybeTemp());
liveRegs_.emplace(ic->liveRegs());
outputUnchecked_.emplace(output);
allowDoubleResult_.emplace(ic->allowDoubleResult());
if (!allocator.init(available))
return false;
MOZ_ASSERT(numInputs == 1 || numInputs == 2);
allocator.initInputLocation(0, ic->object(), JSVAL_TYPE_OBJECT);
if (numInputs > 1)
allocator.initInputLocation(1, ic->id());
} else {
MOZ_CRASH("Invalid cache");
}
allocator.initAvailableRegsAfterSpill();
return true;
}
JitCode*
IonCacheIRCompiler::compile(IonICStub* stub)
{
masm.setFramePushed(ionScript_->frameSize());
if (cx_->spsProfiler.enabled())
masm.enableProfilingInstrumentation();
do {
switch (reader.readOp()) {
#define DEFINE_OP(op) \
case CacheOp::op: \
if (!emit##op()) \
return nullptr; \
break;
CACHE_IR_OPS(DEFINE_OP)
#undef DEFINE_OP
default:
MOZ_CRASH("Invalid op");
}
allocator.nextOp();
} while (reader.more());
MOZ_ASSERT(nextStubField_ == writer_.numStubFields());
masm.assumeUnreachable("Should have returned from IC");
// Done emitting the main IC code. Now emit the failure paths.
for (size_t i = 0; i < failurePaths.length(); i++) {
if (!emitFailurePath(i))
return nullptr;
Register scratch = ic_->scratchRegisterForEntryJump();
CodeOffset offset = masm.movWithPatch(ImmWord(-1), scratch);
masm.jump(Address(scratch, 0));
if (!nextCodeOffsets_.append(offset))
return nullptr;
}
Linker linker(masm);
AutoFlushICache afc("getStubCode");
Rooted<JitCode*> newStubCode(cx_, linker.newCode<NoGC>(cx_, ION_CODE));
if (!newStubCode) {
cx_->recoverFromOutOfMemory();
return nullptr;
}
rejoinOffset_.fixup(&masm);
CodeLocationJump rejoinJump(newStubCode, rejoinOffset_);
PatchJump(rejoinJump, ic_->rejoinLabel());
for (CodeOffset offset : nextCodeOffsets_) {
Assembler::PatchDataWithValueCheck(CodeLocationLabel(newStubCode, offset),
ImmPtr(stub->nextCodeRawPtr()),
ImmPtr((void*)-1));
}
if (stubJitCodeOffset_) {
Assembler::PatchDataWithValueCheck(CodeLocationLabel(newStubCode, *stubJitCodeOffset_),
ImmPtr(newStubCode.get()),
ImmPtr((void*)-1));
}
// All barriers are emitted off-by-default, enable them if needed.
if (cx_->zone()->needsIncrementalBarrier())
newStubCode->togglePreBarriers(true, DontReprotect);
return newStubCode;
}
bool
IonCacheIRCompiler::emitGuardShape()
{
Register obj = allocator.useRegister(masm, reader.objOperandId());
Shape* shape = shapeStubField(reader.stubOffset());
FailurePath* failure;
if (!addFailurePath(&failure))
return false;
masm.branchTestObjShape(Assembler::NotEqual, obj, shape, failure->label());
return true;
}
bool
IonCacheIRCompiler::emitGuardGroup()
{
Register obj = allocator.useRegister(masm, reader.objOperandId());
ObjectGroup* group = groupStubField(reader.stubOffset());
FailurePath* failure;
if (!addFailurePath(&failure))
return false;
masm.branchTestObjGroup(Assembler::NotEqual, obj, group, failure->label());
return true;
}
bool
IonCacheIRCompiler::emitGuardProto()
{
Register obj = allocator.useRegister(masm, reader.objOperandId());
JSObject* proto = objectStubField(reader.stubOffset());
AutoScratchRegister scratch(allocator, masm);
FailurePath* failure;
if (!addFailurePath(&failure))
return false;
masm.loadObjProto(obj, scratch);
masm.branchPtr(Assembler::NotEqual, scratch, ImmGCPtr(proto), failure->label());
return true;
}
bool
IonCacheIRCompiler::emitGuardSpecificObject()
{
Register obj = allocator.useRegister(masm, reader.objOperandId());
JSObject* expected = objectStubField(reader.stubOffset());
FailurePath* failure;
if (!addFailurePath(&failure))
return false;
masm.branchPtr(Assembler::NotEqual, obj, ImmGCPtr(expected), failure->label());
return true;
}
bool
IonCacheIRCompiler::emitGuardSpecificAtom()
{
Register str = allocator.useRegister(masm, reader.stringOperandId());
AutoScratchRegister scratch(allocator, masm);
JSAtom* atom = &stringStubField(reader.stubOffset())->asAtom();
FailurePath* failure;
if (!addFailurePath(&failure))
return false;
Label done;
masm.branchPtr(Assembler::Equal, str, ImmGCPtr(atom), &done);
// The pointers are not equal, so if the input string is also an atom it
// must be a different string.
masm.branchTest32(Assembler::NonZero, Address(str, JSString::offsetOfFlags()),
Imm32(JSString::ATOM_BIT), failure->label());
// Check the length.
masm.branch32(Assembler::NotEqual, Address(str, JSString::offsetOfLength()),
Imm32(atom->length()), failure->label());
// We have a non-atomized string with the same length. Call a helper
// function to do the comparison.
LiveRegisterSet volatileRegs(RegisterSet::Volatile());
masm.PushRegsInMask(volatileRegs);
masm.setupUnalignedABICall(scratch);
masm.movePtr(ImmGCPtr(atom), scratch);
masm.passABIArg(scratch);
masm.passABIArg(str);
masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, EqualStringsHelper));
masm.mov(ReturnReg, scratch);
LiveRegisterSet ignore;
ignore.add(scratch);
masm.PopRegsInMaskIgnore(volatileRegs, ignore);
masm.branchIfFalseBool(scratch, failure->label());
masm.bind(&done);
return true;
}
bool
IonCacheIRCompiler::emitGuardSpecificSymbol()
{
Register sym = allocator.useRegister(masm, reader.symbolOperandId());
JS::Symbol* expected = symbolStubField(reader.stubOffset());
FailurePath* failure;
if (!addFailurePath(&failure))
return false;
masm.branchPtr(Assembler::NotEqual, sym, ImmGCPtr(expected), failure->label());
return true;
}
bool
IonCacheIRCompiler::emitLoadFixedSlotResult()
{
AutoOutputRegister output(*this);
Register obj = allocator.useRegister(masm, reader.objOperandId());
int32_t offset = int32StubField(reader.stubOffset());
masm.loadTypedOrValue(Address(obj, offset), output);
return true;
}
bool
IonCacheIRCompiler::emitLoadDynamicSlotResult()
{
AutoOutputRegister output(*this);
Register obj = allocator.useRegister(masm, reader.objOperandId());
int32_t offset = int32StubField(reader.stubOffset());
AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), scratch);
masm.loadTypedOrValue(Address(scratch, offset), output);
return true;
}
bool
IonCacheIRCompiler::emitCallScriptedGetterResult()
{
AutoSaveLiveRegisters save(*this);
AutoOutputRegister output(*this);
Register obj = allocator.useRegister(masm, reader.objOperandId());
JSFunction* target = &objectStubField(reader.stubOffset())->as<JSFunction>();
AutoScratchRegister scratch(allocator, masm);
allocator.discardStack(masm);
uint32_t framePushedBefore = masm.framePushed();
// Construct IonICCallFrameLayout.
uint32_t descriptor = MakeFrameDescriptor(masm.framePushed(), JitFrame_IonJS,
IonICCallFrameLayout::Size());
pushStubCodePointer();
masm.Push(Imm32(descriptor));
masm.Push(ImmPtr(GetReturnAddressToIonCode(cx_)));
// The JitFrameLayout pushed below will be aligned to JitStackAlignment,
// so we just have to make sure the stack is aligned after we push the
// |this| + argument Values.
uint32_t argSize = (target->nargs() + 1) * sizeof(Value);
uint32_t padding = ComputeByteAlignment(masm.framePushed() + argSize, JitStackAlignment);
MOZ_ASSERT(padding % sizeof(uintptr_t) == 0);
MOZ_ASSERT(padding < JitStackAlignment);
masm.reserveStack(padding);
for (size_t i = 0; i < target->nargs(); i++)
masm.Push(UndefinedValue());
masm.Push(TypedOrValueRegister(MIRType::Object, AnyRegister(obj)));
masm.movePtr(ImmGCPtr(target), scratch);
descriptor = MakeFrameDescriptor(argSize + padding, JitFrame_IonICCall,
JitFrameLayout::Size());
masm.Push(Imm32(0)); // argc
masm.Push(scratch);
masm.Push(Imm32(descriptor));
// Check stack alignment. Add sizeof(uintptr_t) for the return address.
MOZ_ASSERT(((masm.framePushed() + sizeof(uintptr_t)) % JitStackAlignment) == 0);
// The getter has JIT code now and we will only discard the getter's JIT
// code when discarding all JIT code in the Zone, so we can assume it'll
// still have JIT code.
MOZ_ASSERT(target->hasJITCode());
masm.loadPtr(Address(scratch, JSFunction::offsetOfNativeOrScript()), scratch);
masm.loadBaselineOrIonRaw(scratch, scratch, nullptr);
masm.callJit(scratch);
masm.storeCallResultValue(output);
masm.freeStack(masm.framePushed() - framePushedBefore);
return true;
}
bool
IonCacheIRCompiler::emitCallNativeGetterResult()
{
AutoSaveLiveRegisters save(*this);
AutoOutputRegister output(*this);
Register obj = allocator.useRegister(masm, reader.objOperandId());
JSFunction* target = &objectStubField(reader.stubOffset())->as<JSFunction>();
MOZ_ASSERT(target->isNative());
AutoScratchRegister argJSContext(allocator, masm);
AutoScratchRegister argUintN(allocator, masm);
AutoScratchRegister argVp(allocator, masm);
AutoScratchRegister scratch(allocator, masm);
allocator.discardStack(masm);
// Native functions have the signature:
// bool (*)(JSContext*, unsigned, Value* vp)
// Where vp[0] is space for an outparam, vp[1] is |this|, and vp[2] onward
// are the function arguments.
// Construct vp array:
// Push object value for |this|
masm.Push(TypedOrValueRegister(MIRType::Object, AnyRegister(obj)));
// Push callee/outparam.
masm.Push(ObjectValue(*target));
// Preload arguments into registers.
masm.loadJSContext(argJSContext);
masm.move32(Imm32(0), argUintN);
masm.moveStackPtrTo(argVp.get());
// Push marking data for later use.
masm.Push(argUintN);
pushStubCodePointer();
if (!masm.icBuildOOLFakeExitFrame(GetReturnAddressToIonCode(cx_), save))
return false;
masm.enterFakeExitFrame(IonOOLNativeExitFrameLayoutToken);
// Construct and execute call.
masm.setupUnalignedABICall(scratch);
masm.passABIArg(argJSContext);
masm.passABIArg(argUintN);
masm.passABIArg(argVp);
masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, target->native()));
// Test for failure.
masm.branchIfFalseBool(ReturnReg, masm.exceptionLabel());
// Load the outparam vp[0] into output register(s).
Address outparam(masm.getStackPointer(), IonOOLNativeExitFrameLayout::offsetOfResult());
masm.loadValue(outparam, output.valueReg());
masm.adjustStack(IonOOLNativeExitFrameLayout::Size(0));
return true;
}
bool
IonCacheIRCompiler::emitCallProxyGetResult()
{
AutoSaveLiveRegisters save(*this);
AutoOutputRegister output(*this);
Register obj = allocator.useRegister(masm, reader.objOperandId());
jsid id = idStubField(reader.stubOffset());
// ProxyGetProperty(JSContext* cx, HandleObject proxy, HandleId id,
// MutableHandleValue vp)
AutoScratchRegisterMaybeOutput argJSContext(allocator, masm, output);
AutoScratchRegister argProxy(allocator, masm);
AutoScratchRegister argId(allocator, masm);
AutoScratchRegister argVp(allocator, masm);
AutoScratchRegister scratch(allocator, masm);
allocator.discardStack(masm);
// Push stubCode for marking.
pushStubCodePointer();
// Push args on stack first so we can take pointers to make handles.
masm.Push(UndefinedValue());
masm.moveStackPtrTo(argVp.get());
masm.Push(id, scratch);
masm.moveStackPtrTo(argId.get());
// Push the proxy. Also used as receiver.
masm.Push(obj);
masm.moveStackPtrTo(argProxy.get());
masm.loadJSContext(argJSContext);
if (!masm.icBuildOOLFakeExitFrame(GetReturnAddressToIonCode(cx_), save))
return false;
masm.enterFakeExitFrame(IonOOLProxyExitFrameLayoutToken);
// Make the call.
masm.setupUnalignedABICall(scratch);
masm.passABIArg(argJSContext);
masm.passABIArg(argProxy);
masm.passABIArg(argId);
masm.passABIArg(argVp);
masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, ProxyGetProperty));
// Test for failure.
masm.branchIfFalseBool(ReturnReg, masm.exceptionLabel());
// Load the outparam vp[0] into output register(s).
Address outparam(masm.getStackPointer(), IonOOLProxyExitFrameLayout::offsetOfResult());
masm.loadValue(outparam, output.valueReg());
// masm.leaveExitFrame & pop locals
masm.adjustStack(IonOOLProxyExitFrameLayout::Size());
return true;
}
typedef bool (*ProxyGetPropertyByValueFn)(JSContext*, HandleObject, HandleValue, MutableHandleValue);
static const VMFunction ProxyGetPropertyByValueInfo =
FunctionInfo<ProxyGetPropertyByValueFn>(ProxyGetPropertyByValue, "ProxyGetPropertyByValue");
bool
IonCacheIRCompiler::emitCallProxyGetByValueResult()
{
AutoSaveLiveRegisters save(*this);
AutoOutputRegister output(*this);
Register obj = allocator.useRegister(masm, reader.objOperandId());
ValueOperand idVal = allocator.useValueRegister(masm, reader.valOperandId());
allocator.discardStack(masm);
prepareVMCall(masm);
masm.Push(idVal);
masm.Push(obj);
if (!callVM(masm, ProxyGetPropertyByValueInfo))
return false;
masm.storeCallResultValue(output);
return true;
}
bool
IonCacheIRCompiler::emitLoadUnboxedPropertyResult()
{
AutoOutputRegister output(*this);
Register obj = allocator.useRegister(masm, reader.objOperandId());
JSValueType fieldType = reader.valueType();
int32_t fieldOffset = int32StubField(reader.stubOffset());
masm.loadUnboxedProperty(Address(obj, fieldOffset), fieldType, output);
return true;
}
bool
IonCacheIRCompiler::emitGuardFrameHasNoArgumentsObject()
{
MOZ_CRASH("Baseline-specific op");
}
bool
IonCacheIRCompiler::emitLoadFrameCalleeResult()
{
MOZ_CRASH("Baseline-specific op");
}
bool
IonCacheIRCompiler::emitLoadFrameNumActualArgsResult()
{
MOZ_CRASH("Baseline-specific op");
}
bool
IonCacheIRCompiler::emitLoadFrameArgumentResult()
{
MOZ_CRASH("Baseline-specific op");
}
bool
IonCacheIRCompiler::emitLoadTypedObjectResult()
{
AutoOutputRegister output(*this);
Register obj = allocator.useRegister(masm, reader.objOperandId());
AutoScratchRegister scratch1(allocator, masm);
AutoScratchRegister scratch2(allocator, masm);
TypedThingLayout layout = reader.typedThingLayout();
uint32_t typeDescr = reader.typeDescrKey();
uint32_t fieldOffset = int32StubField(reader.stubOffset());
// Get the object's data pointer.
LoadTypedThingData(masm, layout, obj, scratch1);
Address fieldAddr(scratch1, fieldOffset);
emitLoadTypedObjectResultShared(fieldAddr, scratch2, layout, typeDescr, output);
return true;
}
bool
IonCacheIRCompiler::emitTypeMonitorResult()
{
return emitReturnFromIC();
}
bool
IonCacheIRCompiler::emitReturnFromIC()
{
if (!savedLiveRegs_)
allocator.restoreInputState(masm);
RepatchLabel rejoin;
rejoinOffset_ = masm.jumpWithPatch(&rejoin);
masm.bind(&rejoin);
return true;
}
bool
IonCacheIRCompiler::emitLoadObject()
{
Register reg = allocator.defineRegister(masm, reader.objOperandId());
JSObject* obj = objectStubField(reader.stubOffset());
masm.movePtr(ImmGCPtr(obj), reg);
return true;
}
bool
IonCacheIRCompiler::emitGuardDOMExpandoObject()
{
ValueOperand val = allocator.useValueRegister(masm, reader.valOperandId());
Shape* shape = shapeStubField(reader.stubOffset());
AutoScratchRegister objScratch(allocator, masm);
FailurePath* failure;
if (!addFailurePath(&failure))
return false;
Label done;
masm.branchTestUndefined(Assembler::Equal, val, &done);
masm.unboxObject(val, objScratch);
masm.branchTestObjShape(Assembler::NotEqual, objScratch, shape, failure->label());
masm.bind(&done);
return true;
}
bool
IonCacheIRCompiler::emitGuardDOMExpandoGeneration()
{
Register obj = allocator.useRegister(masm, reader.objOperandId());
ExpandoAndGeneration* expandoAndGeneration =
rawWordStubField<ExpandoAndGeneration*>(reader.stubOffset());
uint64_t generation = rawInt64StubField<uint64_t>(reader.stubOffset());
AutoScratchRegister scratch(allocator, masm);
ValueOperand output = allocator.defineValueRegister(masm, reader.valOperandId());
FailurePath* failure;
if (!addFailurePath(&failure))
return false;
masm.loadPtr(Address(obj, ProxyObject::offsetOfValues()), scratch);
Address expandoAddr(scratch, ProxyObject::offsetOfExtraSlotInValues(GetDOMProxyExpandoSlot()));
// Guard the ExpandoAndGeneration* matches the proxy's ExpandoAndGeneration.
masm.loadValue(expandoAddr, output);
masm.branchTestValue(Assembler::NotEqual, output, PrivateValue(expandoAndGeneration),
failure->label());
// Guard expandoAndGeneration->generation matches the expected generation.
masm.movePtr(ImmPtr(expandoAndGeneration), output.scratchReg());
masm.branch64(Assembler::NotEqual,
Address(output.scratchReg(), ExpandoAndGeneration::offsetOfGeneration()),
Imm64(generation),
failure->label());
// Load expandoAndGeneration->expando into the output Value register.
masm.loadValue(Address(output.scratchReg(), ExpandoAndGeneration::offsetOfExpando()), output);
return true;
}
bool
IonIC::attachCacheIRStub(JSContext* cx, const CacheIRWriter& writer, CacheKind kind,
HandleScript outerScript)
{
// We shouldn't GC or report OOM (or any other exception) here.
AutoAssertNoPendingException aanpe(cx);
JS::AutoCheckCannotGC nogc;
if (writer.failed())
return false;
JitContext jctx(cx, nullptr);
IonCacheIRCompiler compiler(cx, writer, this, outerScript->ionScript());
if (!compiler.init())
return false;
JitZone* jitZone = cx->zone()->jitZone();
uint32_t stubDataOffset = sizeof(IonICStub);
// Try to reuse a previously-allocated CacheIRStubInfo.
CacheIRStubKey::Lookup lookup(kind, ICStubEngine::IonIC,
writer.codeStart(), writer.codeLength());
CacheIRStubInfo* stubInfo = jitZone->getIonCacheIRStubInfo(lookup);
if (!stubInfo) {
// Allocate the shared CacheIRStubInfo. Note that the
// putIonCacheIRStubInfo call below will transfer ownership to
// the stub info HashSet, so we don't have to worry about freeing
// it below.
// For Ion ICs, we don't track/use the makesGCCalls flag, so just pass true.
bool makesGCCalls = true;
stubInfo = CacheIRStubInfo::New(kind, ICStubEngine::IonIC, makesGCCalls,
stubDataOffset, writer);
if (!stubInfo)
return false;
CacheIRStubKey key(stubInfo);
if (!jitZone->putIonCacheIRStubInfo(lookup, key))
return false;
}
MOZ_ASSERT(stubInfo);
// Ensure we don't attach duplicate stubs. This can happen if a stub failed
// for some reason and the IR generator doesn't check for exactly the same
// conditions.
for (IonICStub* stub = firstStub_; stub; stub = stub->next()) {
if (stub->stubInfo() != stubInfo)
continue;
if (!writer.stubDataEquals(stub->stubDataStart()))
continue;
return true;
}
size_t bytesNeeded = stubInfo->stubDataOffset() + stubInfo->stubDataSize();
// Allocate the IonICStub in the optimized stub space. Ion stubs and
// CacheIRStubInfo instances for Ion stubs can be purged on GC. That's okay
// because the stub code is rooted separately when we make a VM call, and
// stub code should never access the IonICStub after making a VM call. The
// IonICStub::poison method poisons the stub to catch bugs in this area.
ICStubSpace* stubSpace = cx->zone()->jitZone()->optimizedStubSpace();
void* newStubMem = stubSpace->alloc(bytesNeeded);
if (!newStubMem)
return false;
IonICStub* newStub = new(newStubMem) IonICStub(fallbackLabel_.raw(), stubInfo);
JitCode* code = compiler.compile(newStub);
if (!code)
return false;
writer.copyStubData(newStub->stubDataStart());
attachStub(newStub, code);
return true;
}

View File

@ -390,8 +390,8 @@ IonCache::trace(JSTracer* trc)
TraceManuallyBarrieredEdge(trc, &script_, "IonCache::script_");
}
void*
jit::GetReturnAddressToIonCode(JSContext* cx)
static void*
GetReturnAddressToIonCode(JSContext* cx)
{
JitFrameIterator iter(cx);
MOZ_ASSERT(iter.type() == JitFrame_Exit,

View File

@ -849,8 +849,6 @@ bool IsCacheableGetPropCallNative(JSObject* obj, JSObject* holder, Shape* shape)
bool ValueToNameOrSymbolId(JSContext* cx, HandleValue idval, MutableHandleId id,
bool* nameOrSymbol);
void* GetReturnAddressToIonCode(JSContext* cx);
} // namespace jit
} // namespace js

View File

@ -74,41 +74,6 @@ IonIC::trace(JSTracer* trc)
{
if (script_)
TraceManuallyBarrieredEdge(trc, &script_, "IonIC::script_");
uint8_t* nextCodeRaw = codeRaw_;
for (IonICStub* stub = firstStub_; stub; stub = stub->next()) {
JitCode* code = JitCode::FromExecutable(nextCodeRaw);
TraceManuallyBarrieredEdge(trc, &code, "ion-ic-code");
TraceCacheIRStub(trc, stub, stub->stubInfo());
nextCodeRaw = stub->nextCodeRaw();
}
MOZ_ASSERT(nextCodeRaw == fallbackLabel_.raw());
}
void
IonGetPropertyIC::maybeDisable(Zone* zone, bool attached)
{
if (attached) {
failedUpdates_ = 0;
return;
}
if (!canAttachStub() && kind() == CacheKind::GetProp) {
// Don't disable the cache (and discard stubs) if we have a GETPROP and
// attached the maximum number of stubs. This can happen when JS code
// uses an AST-like data structure and accesses a field of a "base
// class", like node.nodeType. This should be temporary until we handle
// this case better, see bug 1107515.
return;
}
if (++failedUpdates_ > MAX_FAILED_UPDATES) {
JitSpew(JitSpew_IonIC, "Disable inline cache");
disable(zone);
}
}
/* static */ bool
@ -123,22 +88,6 @@ IonGetPropertyIC::update(JSContext* cx, HandleScript outerScript, IonGetProperty
adi.disable();
bool attached = false;
if (!JitOptions.disableCacheIR && !ic->disabled()) {
if (ic->canAttachStub()) {
jsbytecode* pc = ic->idempotent() ? nullptr : ic->pc();
RootedValue objVal(cx, ObjectValue(*obj));
bool isTemporarilyUnoptimizable;
GetPropIRGenerator gen(cx, pc, ICStubEngine::IonIC, ic->kind(),
&isTemporarilyUnoptimizable,
objVal, idVal);
if (ic->idempotent() ? gen.tryAttachIdempotentStub() : gen.tryAttachStub()) {
attached = ic->attachCacheIRStub(cx, gen.writerRef(), gen.cacheKind(),
outerScript);
}
}
ic->maybeDisable(cx->zone(), attached);
}
if (!attached && ic->idempotent()) {
// Invalidate the cache if the property was not found, or was found on
// a non-native object. This ensures:
@ -173,29 +122,3 @@ IonGetPropertyIC::update(JSContext* cx, HandleScript outerScript, IonGetProperty
return true;
}
uint8_t*
IonICStub::stubDataStart()
{
return reinterpret_cast<uint8_t*>(this) + stubInfo_->stubDataOffset();
}
void
IonIC::attachStub(IonICStub* newStub, JitCode* code)
{
MOZ_ASSERT(canAttachStub());
MOZ_ASSERT(newStub);
MOZ_ASSERT(code);
if (firstStub_) {
IonICStub* last = firstStub_;
while (IonICStub* next = last->next())
last = next;
last->setNext(newStub, code);
} else {
firstStub_ = newStub;
codeRaw_ = code->raw();
}
numStubs_++;
}

View File

@ -38,8 +38,6 @@ class IonICStub
CacheIRStubInfo* stubInfo() const { return stubInfo_; }
IonICStub* next() const { return next_; }
uint8_t* stubDataStart();
void setNext(IonICStub* next, JitCode* nextCode) {
MOZ_ASSERT(!next_);
MOZ_ASSERT(next && nextCode);
@ -96,8 +94,6 @@ class IonIC
disabled_(false)
{}
void attachStub(IonICStub* newStub, JitCode* code);
public:
void setScriptedLocation(JSScript* script, jsbytecode* pc) {
MOZ_ASSERT(!script_ && !pc_);
@ -146,9 +142,6 @@ class IonIC
Register scratchRegisterForEntryJump();
void trace(JSTracer* trc);
bool attachCacheIRStub(JSContext* cx, const CacheIRWriter& writer, CacheKind kind,
HandleScript outerScript);
};
class IonGetPropertyIC : public IonIC
@ -164,12 +157,11 @@ class IonGetPropertyIC : public IonIC
uint16_t failedUpdates_;
bool monitoredResult_ : 1;
bool allowDoubleResult_ : 1;
public:
IonGetPropertyIC(CacheKind kind, LiveRegisterSet liveRegs, Register object,
const ConstantOrRegister& id, TypedOrValueRegister output, Register maybeTemp,
bool monitoredResult, bool allowDoubleResult)
ConstantOrRegister id, TypedOrValueRegister output, Register maybeTemp,
bool monitoredResult)
: IonIC(kind),
liveRegs_(liveRegs),
object_(object),
@ -177,8 +169,7 @@ class IonGetPropertyIC : public IonIC
output_(output),
maybeTemp_(maybeTemp),
failedUpdates_(0),
monitoredResult_(monitoredResult),
allowDoubleResult_(allowDoubleResult)
monitoredResult_(monitoredResult)
{ }
bool monitoredResult() const { return monitoredResult_; }
@ -187,9 +178,6 @@ class IonGetPropertyIC : public IonIC
TypedOrValueRegister output() const { return output_; }
Register maybeTemp() const { return maybeTemp_; }
LiveRegisterSet liveRegs() const { return liveRegs_; }
bool allowDoubleResult() const { return allowDoubleResult_; }
void maybeDisable(Zone* zone, bool attached);
static MOZ_MUST_USE bool update(JSContext* cx, HandleScript outerScript, IonGetPropertyIC* ic,
HandleObject obj, HandleValue idVal, MutableHandleValue res);

View File

@ -381,18 +381,28 @@ class JitRuntime
}
};
class JitZone
{
// Allocated space for optimized baseline stubs.
OptimizedICStubSpace optimizedStubSpace_;
// Allocated space for cached cfg.
CFGSpace cfgSpace_;
public:
OptimizedICStubSpace* optimizedStubSpace() {
return &optimizedStubSpace_;
}
CFGSpace* cfgSpace() {
return &cfgSpace_;
}
};
enum class CacheKind : uint8_t;
class CacheIRStubInfo;
enum class ICStubEngine : uint8_t {
// Baseline IC, see SharedIC.h and BaselineIC.h.
Baseline = 0,
// Ion IC that reuses Baseline IC code, see SharedIC.h.
IonSharedIC,
// Ion IC, see IonIC.h.
IonIC
IonMonkey
};
struct CacheIRStubKey : public DefaultHasher<CacheIRStubKey> {
@ -420,45 +430,6 @@ struct CacheIRStubKey : public DefaultHasher<CacheIRStubKey> {
}
};
class JitZone
{
// Allocated space for optimized baseline stubs.
OptimizedICStubSpace optimizedStubSpace_;
// Allocated space for cached cfg.
CFGSpace cfgSpace_;
// Set of CacheIRStubInfo instances used by Ion stubs in this Zone.
using IonCacheIRStubInfoSet = HashSet<CacheIRStubKey, CacheIRStubKey, SystemAllocPolicy>;
IonCacheIRStubInfoSet ionCacheIRStubInfoSet_;
public:
OptimizedICStubSpace* optimizedStubSpace() {
return &optimizedStubSpace_;
}
CFGSpace* cfgSpace() {
return &cfgSpace_;
}
CacheIRStubInfo* getIonCacheIRStubInfo(const CacheIRStubKey::Lookup& key) {
if (!ionCacheIRStubInfoSet_.initialized())
return nullptr;
IonCacheIRStubInfoSet::Ptr p = ionCacheIRStubInfoSet_.lookup(key);
return p ? p->stubInfo.get() : nullptr;
}
MOZ_MUST_USE bool putIonCacheIRStubInfo(const CacheIRStubKey::Lookup& lookup,
CacheIRStubKey& key)
{
if (!ionCacheIRStubInfoSet_.initialized() && !ionCacheIRStubInfoSet_.init())
return false;
IonCacheIRStubInfoSet::AddPtr p = ionCacheIRStubInfoSet_.lookupForAdd(lookup);
MOZ_ASSERT(!p);
return ionCacheIRStubInfoSet_.add(p, Move(key));
}
void purgeIonCacheIRStubInfo() {
ionCacheIRStubInfoSet_.finish();
}
};
class JitCompartment
{
friend class JitActivation;

View File

@ -2387,12 +2387,6 @@ MacroAssembler::icBuildOOLFakeExitFrame(void* fakeReturnAddr, AfterICSaveLive& a
return buildOOLFakeExitFrame(fakeReturnAddr);
}
bool
MacroAssembler::icBuildOOLFakeExitFrame(void* fakeReturnAddr, AutoSaveLiveRegisters& save)
{
return buildOOLFakeExitFrame(fakeReturnAddr);
}
void
MacroAssembler::icRestoreLive(LiveRegisterSet& liveRegs, AfterICSaveLive& aic)
{

View File

@ -191,8 +191,6 @@ namespace jit {
// Defined in JitFrames.h
enum ExitFrameTokenValues;
class AutoSaveLiveRegisters;
// The public entrypoint for emitting assembly. Note that a MacroAssembler can
// use cx->lifoAlloc, so take care not to interleave masm use with other
// lifoAlloc use if one will be destroyed before the other.
@ -2118,8 +2116,6 @@ class MacroAssembler : public MacroAssemblerSpecific
MOZ_MUST_USE bool icBuildOOLFakeExitFrame(void* fakeReturnAddr, AfterICSaveLive& aic);
void icRestoreLive(LiveRegisterSet& liveRegs, AfterICSaveLive& aic);
MOZ_MUST_USE bool icBuildOOLFakeExitFrame(void* fakeReturnAddr, AutoSaveLiveRegisters& save);
// Align the stack pointer based on the number of arguments which are pushed
// on the stack, such that the JitFrameLayout would be correctly aligned on
// the JitStackAlignment.

View File

@ -462,7 +462,7 @@ ICStub::trace(JSTracer* trc)
break;
}
case ICStub::CacheIR_Monitored:
TraceCacheIRStub(trc, this, toCacheIR_Monitored()->stubInfo());
TraceBaselineCacheIRStub(trc, this, toCacheIR_Monitored()->stubInfo());
break;
default:
break;
@ -749,7 +749,7 @@ ICStubCompiler::leaveStubFrame(MacroAssembler& masm, bool calledIntoIon)
void
ICStubCompiler::pushStubPayload(MacroAssembler& masm, Register scratch)
{
if (engine_ == Engine::IonSharedIC) {
if (engine_ == Engine::IonMonkey) {
masm.push(Imm32(0));
return;
}

View File

@ -1097,9 +1097,7 @@ class SharedStubInfo
SharedStubInfo(JSContext* cx, void* payload, ICEntry* entry);
ICStubCompiler::Engine engine() const {
return maybeFrame_
? ICStubCompiler::Engine::Baseline
: ICStubCompiler::Engine::IonSharedIC;
return maybeFrame_ ? ICStubCompiler::Engine::Baseline : ICStubCompiler::Engine::IonMonkey;
}
HandleScript script() const {

View File

@ -236,7 +236,6 @@ UNIFIED_SOURCES += [
'jit/Ion.cpp',
'jit/IonAnalysis.cpp',
'jit/IonBuilder.cpp',
'jit/IonCacheIRCompiler.cpp',
'jit/IonCaches.cpp',
'jit/IonControlFlow.cpp',
'jit/IonIC.cpp',