Bug 1007027 - Replace MPhi::slot by a flag based on ResumePoint indexes. r=h4writer

This commit is contained in:
Nicolas B. Pierron 2014-05-15 22:57:18 -07:00
parent d833d52486
commit aac381731f
9 changed files with 163 additions and 107 deletions

View File

@ -0,0 +1,5 @@
// |jit-test| error: ReferenceError
(function(x) {
x = i ? 4 : 2
y
})()

View File

@ -404,13 +404,31 @@ class CompileInfo
return executionMode_ == ParallelExecution;
}
bool canOptimizeOutSlot(uint32_t i) const {
if (script()->strict())
// Returns true if a slot can be observed out-side the current frame while
// the frame is active on the stack. This implies that these definitions
// would have to be executed and that they cannot be removed even if they
// are unused.
bool isObservableSlot(uint32_t slot) const {
if (!funMaybeLazy())
return false;
// The |this| value must always be observable.
if (slot == thisSlot())
return true;
// Function.arguments can be used to access all arguments in
// non-strict scripts, so we can't optimize out any arguments.
return !(firstArgSlot() <= i && i - firstArgSlot() < nargs());
// If the function may need an arguments object, then make sure to
// preserve the scope chain, because it may be needed to construct the
// arguments object during bailout. If we've already created an
// arguments object (or got one via OSR), preserve that as well.
if (hasArguments() && (slot == scopeChainSlot() || slot == argsObjSlot()))
return true;
// Function.arguments can be used to access all arguments in non-strict
// scripts, so we can't optimize out any arguments.
if (!script()->strict() && firstArgSlot() <= slot && slot - firstArgSlot() < nargs())
return true;
return false;
}
private:

View File

@ -107,6 +107,12 @@ jit::EliminateDeadResumePointOperands(MIRGenerator *mir, MIRGraph &graph)
if (ins->isImplicitlyUsed())
continue;
// If the instruction's is captured by one of the resume point, then
// it might be observed indirectly while the frame is live on the
// stack, so it has to be computed.
if (ins->isObserved())
continue;
// Check if this instruction's result is only used within the
// current block, and keep track of its last use in a definition
// (not resume point). This requires the instructions in the block
@ -143,14 +149,6 @@ jit::EliminateDeadResumePointOperands(MIRGenerator *mir, MIRGraph &graph)
continue;
}
// The operand is an uneliminable slot. This currently
// includes argument slots in non-strict scripts (due to being
// observable via Function.arguments).
if (!block->info().canOptimizeOutSlot(uses->index())) {
uses++;
continue;
}
// Store an optimized out magic value in place of all dead
// resume point operands. Making any such substitution can in
// general alter the interpreter's behavior, even though the
@ -232,30 +230,7 @@ IsPhiObservable(MPhi *phi, Observability observe)
break;
}
uint32_t slot = phi->slot();
CompileInfo &info = phi->block()->info();
JSFunction *fun = info.funMaybeLazy();
// If the Phi is of the |this| value, it must always be observable.
if (fun && slot == info.thisSlot())
return true;
// If the function may need an arguments object, then make sure to
// preserve the scope chain, because it may be needed to construct the
// arguments object during bailout. If we've already created an arguments
// object (or got one via OSR), preserve that as well.
if (fun && info.hasArguments() &&
(slot == info.scopeChainSlot() || slot == info.argsObjSlot()))
{
return true;
}
// The Phi is an uneliminable slot. Currently this includes argument slots
// in non-strict scripts (due to being observable via Function.arguments).
if (fun && !info.canOptimizeOutSlot(slot))
return true;
return false;
return phi->isObserved();
}
// Handles cases like:

View File

@ -444,14 +444,21 @@ IonBuilder::analyzeNewLoopTypes(MBasicBlock *entry, jsbytecode *start, jsbytecod
for (size_t i = 0; i < loopHeaders_.length(); i++) {
if (loopHeaders_[i].pc == start) {
MBasicBlock *oldEntry = loopHeaders_[i].header;
for (MPhiIterator oldPhi = oldEntry->phisBegin();
oldPhi != oldEntry->phisEnd();
oldPhi++)
{
MPhi *newPhi = entry->getSlot(oldPhi->slot())->toPhi();
MResumePoint *oldEntryRp = oldEntry->entryResumePoint();
size_t stackDepth = oldEntryRp->numOperands();
for (size_t slot = 0; slot < stackDepth; slot++) {
MDefinition *oldDef = oldEntryRp->getOperand(slot);
if (!oldDef->isPhi()) {
MOZ_ASSERT(oldDef->block()->id() < oldEntry->id());
MOZ_ASSERT(oldDef == entry->getSlot(slot));
continue;
}
MPhi *oldPhi = oldDef->toPhi();
MPhi *newPhi = entry->getSlot(slot)->toPhi();
if (!newPhi->addBackedgeType(oldPhi->type(), oldPhi->resultTypeSet()))
return false;
}
// Update the most recent header for this loop encountered, in case
// new types flow to the phis and the loop is processed at least
// three times.
@ -1150,26 +1157,24 @@ IonBuilder::maybeAddOsrTypeBarriers()
static const size_t OSR_PHI_POSITION = 1;
JS_ASSERT(preheader->getPredecessor(OSR_PHI_POSITION) == osrBlock);
MPhiIterator headerPhi = header->phisBegin();
while (headerPhi != header->phisEnd() && headerPhi->slot() < info().startArgSlot())
headerPhi++;
for (uint32_t i = info().startArgSlot(); i < osrBlock->stackDepth(); i++, headerPhi++) {
MResumePoint *headerRp = header->entryResumePoint();
size_t stackDepth = headerRp->numOperands();
MOZ_ASSERT(stackDepth == osrBlock->stackDepth());
for (uint32_t slot = info().startArgSlot(); slot < stackDepth; slot++) {
// Aliased slots are never accessed, since they need to go through
// the callobject. The typebarriers are added there and can be
// discarded here.
if (info().isSlotAliasedAtOsr(i))
if (info().isSlotAliasedAtOsr(slot))
continue;
MInstruction *def = osrBlock->getSlot(i)->toInstruction();
JS_ASSERT(headerPhi->slot() == i);
MPhi *preheaderPhi = preheader->getSlot(i)->toPhi();
MInstruction *def = osrBlock->getSlot(slot)->toInstruction();
MPhi *preheaderPhi = preheader->getSlot(slot)->toPhi();
MPhi *headerPhi = headerRp->getOperand(slot)->toPhi();
MIRType type = headerPhi->type();
types::TemporaryTypeSet *typeSet = headerPhi->resultTypeSet();
if (!addOsrValueTypeBarrier(i, &def, type, typeSet))
if (!addOsrValueTypeBarrier(slot, &def, type, typeSet))
return false;
preheaderPhi->replaceOperand(OSR_PHI_POSITION, def);
@ -4104,7 +4109,7 @@ IonBuilder::patchInlinedReturns(CallInfo &callInfo, MIRGraphReturns &returns, MB
return patchInlinedReturn(callInfo, returns[0], bottom);
// Accumulate multiple returns with a phi.
MPhi *phi = MPhi::New(alloc(), bottom->stackDepth());
MPhi *phi = MPhi::New(alloc());
if (!phi->reserveLength(returns.length()))
return nullptr;
@ -4533,7 +4538,7 @@ IonBuilder::inlineCalls(CallInfo &callInfo, ObjectVector &targets,
returnBlock->inheritSlots(dispatchBlock);
callInfo.popFormals(returnBlock);
MPhi *retPhi = MPhi::New(alloc(), returnBlock->stackDepth());
MPhi *retPhi = MPhi::New(alloc());
returnBlock->addPhi(retPhi);
returnBlock->push(retPhi);

View File

@ -2342,7 +2342,8 @@ MResumePoint::inherit(MBasicBlock *block)
}
}
void MResumePoint::dump(FILE *fp) const
void
MResumePoint::dump(FILE *fp) const
{
fprintf(fp, "resumepoint mode=");
@ -2377,6 +2378,12 @@ MResumePoint::dump() const
dump(stderr);
}
bool
MResumePoint::isObservableOperand(size_t index) const
{
return block()->info().isObservableSlot(index);
}
MDefinition *
MToInt32::foldsTo(TempAllocator &alloc, bool useValueNumbers)
{

View File

@ -67,6 +67,7 @@ MIRType MIRTypeFromValue(const js::Value &vp)
_(Movable) /* Allow LICM and GVN to move this instruction */ \
_(Lowered) /* (Debug only) has a virtual register */ \
_(Guard) /* Not removable if uses == 0 */ \
_(Observed) /* Cannot be optimized out */ \
\
/* Keep the flagged instruction in resume points and do not substitute this
* instruction by an UndefinedValue. This might be used by call inlining
@ -4687,7 +4688,6 @@ class MPhi MOZ_FINAL : public MDefinition, public InlineForwardListNode<MPhi>
{
js::Vector<MUse, 2, IonAllocPolicy> inputs_;
uint32_t slot_;
bool hasBackedgeType_;
bool triedToSpecialize_;
bool isIterator_;
@ -4707,9 +4707,8 @@ class MPhi MOZ_FINAL : public MDefinition, public InlineForwardListNode<MPhi>
public:
INSTRUCTION_HEADER(Phi)
MPhi(TempAllocator &alloc, uint32_t slot, MIRType resultType)
MPhi(TempAllocator &alloc, MIRType resultType)
: inputs_(alloc),
slot_(slot),
hasBackedgeType_(false),
triedToSpecialize_(false),
isIterator_(false),
@ -4723,8 +4722,8 @@ class MPhi MOZ_FINAL : public MDefinition, public InlineForwardListNode<MPhi>
setResultType(resultType);
}
static MPhi *New(TempAllocator &alloc, uint32_t slot, MIRType resultType = MIRType_Value) {
return new(alloc) MPhi(alloc, slot, resultType);
static MPhi *New(TempAllocator &alloc, MIRType resultType = MIRType_Value) {
return new(alloc) MPhi(alloc, resultType);
}
void setOperand(size_t index, MDefinition *operand) {
@ -4745,9 +4744,6 @@ class MPhi MOZ_FINAL : public MDefinition, public InlineForwardListNode<MPhi>
size_t numOperands() const {
return inputs_.length();
}
uint32_t slot() const {
return slot_;
}
bool hasBackedgeType() const {
return hasBackedgeType_;
}
@ -9692,8 +9688,12 @@ class MResumePoint MOZ_FINAL : public MNode, public InlineForwardListNode<MResum
// Overwrites an operand without updating its Uses.
void setOperand(size_t index, MDefinition *operand) {
JS_ASSERT(index < stackDepth_);
// Note: We do not remove the isObserved flag, as this would imply that
// we check the list of uses of the removed MDefinition.
operands_[index].set(operand, this, index);
operand->addUse(&operands_[index]);
if (!operand->isObserved() && isObservableOperand(index))
operand->setObserved();
}
void clearOperand(size_t index) {
@ -9715,8 +9715,12 @@ class MResumePoint MOZ_FINAL : public MNode, public InlineForwardListNode<MResum
size_t numOperands() const {
return stackDepth_;
}
bool isObservableOperand(size_t index) const;
MDefinition *getOperand(size_t index) const {
JS_ASSERT(index < stackDepth_);
MOZ_ASSERT_IF(isObservableOperand(index), operands_[index].producer()->isObserved());
return operands_[index].producer();
}
jsbytecode *pc() const {

View File

@ -273,15 +273,17 @@ MBasicBlock::NewAsmJS(MIRGraph &graph, CompileInfo &info, MBasicBlock *pred, Kin
if (!phis)
return nullptr;
// Note: Phis are inserted in the same order as the slots.
for (size_t i = 0; i < nphis; i++) {
MDefinition *predSlot = pred->getSlot(i);
JS_ASSERT(predSlot->type() != MIRType_Value);
MPhi *phi = new(phis + i) MPhi(alloc, i, predSlot->type());
MPhi *phi = new(phis + i) MPhi(alloc, predSlot->type());
JS_ALWAYS_TRUE(phi->reserveLength(2));
phi->addInput(predSlot);
// Add append Phis in the block.
block->addPhi(phi);
block->setSlot(i, phi);
}
@ -390,7 +392,7 @@ MBasicBlock::inherit(TempAllocator &alloc, BytecodeAnalysis *analysis, MBasicBlo
if (kind_ == PENDING_LOOP_HEADER) {
size_t i = 0;
for (i = 0; i < info().firstStackSlot(); i++) {
MPhi *phi = MPhi::New(alloc, i);
MPhi *phi = MPhi::New(alloc);
if (!phi->addInputSlow(pred->getSlot(i)))
return false;
addPhi(phi);
@ -411,7 +413,7 @@ MBasicBlock::inherit(TempAllocator &alloc, BytecodeAnalysis *analysis, MBasicBlo
}
for (; i < stackDepth(); i++) {
MPhi *phi = MPhi::New(alloc, i);
MPhi *phi = MPhi::New(alloc);
if (!phi->addInputSlow(pred->getSlot(i)))
return false;
addPhi(phi);
@ -910,9 +912,9 @@ MBasicBlock::addPredecessorPopN(TempAllocator &alloc, MBasicBlock *pred, uint32_
// Otherwise, create a new phi node.
MPhi *phi;
if (mine->type() == other->type())
phi = MPhi::New(alloc, i, mine->type());
phi = MPhi::New(alloc, mine->type());
else
phi = MPhi::New(alloc, i);
phi = MPhi::New(alloc);
addPhi(phi);
// Prime the phi for each predecessor, so input(x) comes from
@ -992,34 +994,8 @@ MBasicBlock::setBackedge(MBasicBlock *pred)
bool hadTypeChange = false;
// Add exit definitions to each corresponding phi at the entry.
for (MPhiIterator phi = phisBegin(); phi != phisEnd(); phi++) {
MPhi *entryDef = *phi;
MDefinition *exitDef = pred->slots_[entryDef->slot()];
// Assert that we already placed phis for each slot.
JS_ASSERT(entryDef->block() == this);
if (entryDef == exitDef) {
// If the exit def is the same as the entry def, make a redundant
// phi. Since loop headers have exactly two incoming edges, we
// know that that's just the first input.
//
// Note that we eliminate later rather than now, to avoid any
// weirdness around pending continue edges which might still hold
// onto phis.
exitDef = entryDef->getOperand(0);
}
bool typeChange = false;
if (!entryDef->addInputSlow(exitDef, &typeChange))
return AbortReason_Alloc;
hadTypeChange |= typeChange;
JS_ASSERT(entryDef->slot() < pred->stackDepth());
setSlot(entryDef->slot(), entryDef);
}
if (!inheritPhisFromBackedge(pred, &hadTypeChange))
return AbortReason_Alloc;
if (hadTypeChange) {
for (MPhiIterator phi = phisBegin(); phi != phisEnd(); phi++)
@ -1048,9 +1024,12 @@ MBasicBlock::setBackedgeAsmJS(MBasicBlock *pred)
JS_ASSERT(kind_ == PENDING_LOOP_HEADER);
// Add exit definitions to each corresponding phi at the entry.
for (MPhiIterator phi = phisBegin(); phi != phisEnd(); phi++) {
// Note: Phis are inserted in the same order as the slots. (see
// MBasicBlock::NewAsmJS)
size_t slot = 0;
for (MPhiIterator phi = phisBegin(); phi != phisEnd(); phi++, slot++) {
MPhi *entryDef = *phi;
MDefinition *exitDef = pred->getSlot(entryDef->slot());
MDefinition *exitDef = pred->getSlot(slot);
// Assert that we already placed phis for each slot.
JS_ASSERT(entryDef->block() == this);
@ -1073,8 +1052,8 @@ MBasicBlock::setBackedgeAsmJS(MBasicBlock *pred)
// MBasicBlock::NewAsmJS calls reserveLength(2) for loop header phis.
entryDef->addInput(exitDef);
JS_ASSERT(entryDef->slot() < pred->stackDepth());
setSlot(entryDef->slot(), entryDef);
MOZ_ASSERT(slot < pred->stackDepth());
setSlot(slot, entryDef);
}
// We are now a loop header proper
@ -1187,13 +1166,23 @@ MBasicBlock::removePredecessor(MBasicBlock *pred)
void
MBasicBlock::inheritPhis(MBasicBlock *header)
{
for (MPhiIterator iter = header->phisBegin(); iter != header->phisEnd(); iter++) {
MPhi *phi = *iter;
JS_ASSERT(phi->numOperands() == 2);
MResumePoint *headerRp = header->entryResumePoint();
size_t stackDepth = headerRp->numOperands();
for (size_t slot = 0; slot < stackDepth; slot++) {
MDefinition *exitDef = getSlot(slot);
MDefinition *loopDef = headerRp->getOperand(slot);
if (!loopDef->isPhi()) {
MOZ_ASSERT(loopDef->block()->id() < header->id());
MOZ_ASSERT(loopDef == exitDef);
continue;
}
// Phis are allocated by NewPendingLoopHeader.
MPhi *phi = loopDef->toPhi();
MOZ_ASSERT(phi->numOperands() == 2);
// The entry definition is always the leftmost input to the phi.
MDefinition *entryDef = phi->getOperand(0);
MDefinition *exitDef = getSlot(phi->slot());
if (entryDef != exitDef)
continue;
@ -1201,10 +1190,60 @@ MBasicBlock::inheritPhis(MBasicBlock *header)
// If the entryDef is the same as exitDef, then we must propagate the
// phi down to this successor. This chance was missed as part of
// setBackedge() because exits are not captured in resume points.
setSlot(phi->slot(), phi);
setSlot(slot, phi);
}
}
bool
MBasicBlock::inheritPhisFromBackedge(MBasicBlock *backedge, bool *hadTypeChange)
{
// We must be a pending loop header
MOZ_ASSERT(kind_ == PENDING_LOOP_HEADER);
size_t stackDepth = entryResumePoint()->numOperands();
for (size_t slot = 0; slot < stackDepth; slot++) {
// Get the value stack-slot of the back edge.
MDefinition *exitDef = backedge->getSlot(slot);
// Get the value of the loop header.
MDefinition *loopDef = entryResumePoint()->getOperand(slot);
if (!loopDef->isPhi()) {
// If we are finishing a pending loop header, then we need to ensure
// that all operands are phis. This is usualy the case, except for
// object/arrays build with generators, in which case we share the
// same allocations across all blocks.
MOZ_ASSERT(loopDef->block()->id() < id());
MOZ_ASSERT(loopDef == exitDef);
continue;
}
// Phis are allocated by NewPendingLoopHeader.
MPhi *entryDef = loopDef->toPhi();
MOZ_ASSERT(entryDef->block() == this);
if (entryDef == exitDef) {
// If the exit def is the same as the entry def, make a redundant
// phi. Since loop headers have exactly two incoming edges, we
// know that that's just the first input.
//
// Note that we eliminate later rather than now, to avoid any
// weirdness around pending continue edges which might still hold
// onto phis.
exitDef = entryDef->getOperand(0);
}
bool typeChange = false;
if (!entryDef->addInputSlow(exitDef, &typeChange))
return false;
*hadTypeChange |= typeChange;
setSlot(slot, entryDef);
}
return true;
}
bool
MBasicBlock::specializePhis()
{

View File

@ -207,6 +207,9 @@ class MBasicBlock : public TempObject, public InlineListNode<MBasicBlock>
// Propagates phis placed in a loop header down to this successor block.
void inheritPhis(MBasicBlock *header);
// Propagates backedge slots into phis operands of the loop header.
bool inheritPhisFromBackedge(MBasicBlock *backedge, bool *hadTypeChange);
// Compute the types for phis in this block according to their inputs.
bool specializePhis();

View File

@ -82,7 +82,7 @@ LRecoverInfo::OperandIter::canOptimizeOutIfUnused()
if ((ins->isUnused() || ins->type() == MIRType_MagicOptimizedOut) &&
(*it_)->isResumePoint())
{
return (*it_)->block()->info().canOptimizeOutSlot(op_);
return !(*it_)->toResumePoint()->isObservableOperand(op_);
}
return true;