X86: Avoid implicit iterator conversions, NFC

Avoid implicit conversions from MachineInstrBundleIterator to
MachineInstr*, mainly by preferring MachineInstr& over MachineInstr* and
using range-based for loops.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@275149 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Duncan P. N. Exon Smith 2016-07-12 03:18:50 +00:00
parent 5453d5cd36
commit a17edeb525
10 changed files with 219 additions and 219 deletions

View File

@ -352,7 +352,7 @@ void X86CallFrameOptimization::collectCallInfo(MachineFunction &MF,
// pointer is used directly.
if (!I->isCopy() || !I->getOperand(0).isReg())
return;
Context.SPCopy = I++;
Context.SPCopy = &*I++;
unsigned StackPtr = Context.SPCopy->getOperand(0).getReg();
@ -406,7 +406,7 @@ void X86CallFrameOptimization::collectCallInfo(MachineFunction &MF,
// If the same stack slot is being filled twice, something's fishy.
if (Context.MovVector[StackDisp] != nullptr)
return;
Context.MovVector[StackDisp] = I;
Context.MovVector[StackDisp] = &*I;
for (const MachineOperand &MO : I->uses()) {
if (!MO.isReg())
@ -424,7 +424,7 @@ void X86CallFrameOptimization::collectCallInfo(MachineFunction &MF,
if (I == MBB.end() || !I->isCall())
return;
Context.Call = I;
Context.Call = &*I;
if ((++I)->getOpcode() != FrameDestroyOpcode)
return;
@ -567,20 +567,20 @@ MachineInstr *X86CallFrameOptimization::canFoldIntoRegPush(
if (!MRI->hasOneNonDBGUse(Reg))
return nullptr;
MachineBasicBlock::iterator DefMI = MRI->getVRegDef(Reg);
MachineInstr &DefMI = *MRI->getVRegDef(Reg);
// Make sure the def is a MOV from memory.
// If the def is in another block, give up.
if ((DefMI->getOpcode() != X86::MOV32rm &&
DefMI->getOpcode() != X86::MOV64rm) ||
DefMI->getParent() != FrameSetup->getParent())
if ((DefMI.getOpcode() != X86::MOV32rm &&
DefMI.getOpcode() != X86::MOV64rm) ||
DefMI.getParent() != FrameSetup->getParent())
return nullptr;
// Make sure we don't have any instructions between DefMI and the
// push that make folding the load illegal.
for (auto I = DefMI; I != FrameSetup; ++I)
for (MachineBasicBlock::iterator I = DefMI; I != FrameSetup; ++I)
if (I->isLoadFoldBarrier())
return nullptr;
return DefMI;
return &DefMI;
}

View File

@ -134,8 +134,8 @@ bool X86ExpandPseudo::ExpandMI(MachineBasicBlock &MBB,
.addReg(JumpTarget.getReg(), RegState::Kill);
}
MachineInstr *NewMI = std::prev(MBBI);
NewMI->copyImplicitOps(*MBBI->getParent()->getParent(), *MBBI);
MachineInstr &NewMI = *std::prev(MBBI);
NewMI.copyImplicitOps(*MBBI->getParent()->getParent(), *MBBI);
// Delete the pseudo instruction TCRETURN.
MBB.erase(MBBI);

View File

@ -187,10 +187,10 @@ bool FixupLEAPass::runOnMachineFunction(MachineFunction &Func) {
FixupLEAPass::RegUsageState
FixupLEAPass::usesRegister(MachineOperand &p, MachineBasicBlock::iterator I) {
RegUsageState RegUsage = RU_NotUsed;
MachineInstr *MI = I;
MachineInstr &MI = *I;
for (unsigned int i = 0; i < MI->getNumOperands(); ++i) {
MachineOperand &opnd = MI->getOperand(i);
for (unsigned int i = 0; i < MI.getNumOperands(); ++i) {
MachineOperand &opnd = MI.getOperand(i);
if (opnd.isReg() && opnd.getReg() == p.getReg()) {
if (opnd.isDef())
return RU_Write;
@ -239,7 +239,7 @@ FixupLEAPass::searchBackwards(MachineOperand &p, MachineBasicBlock::iterator &I,
MF->getSubtarget().getInstrItineraryData(), *CurInst);
Found = getPreviousInstr(CurInst, MFI);
}
return nullptr;
return MachineBasicBlock::iterator();
}
static inline bool isLEA(const int opcode) {
@ -250,28 +250,28 @@ static inline bool isLEA(const int opcode) {
/// isLEASimpleIncOrDec - Does this LEA have one these forms:
/// lea %reg, 1(%reg)
/// lea %reg, -1(%reg)
static inline bool isLEASimpleIncOrDec(MachineInstr *LEA) {
unsigned SrcReg = LEA->getOperand(1 + X86::AddrBaseReg).getReg();
unsigned DstReg = LEA->getOperand(0).getReg();
static inline bool isLEASimpleIncOrDec(MachineInstr &LEA) {
unsigned SrcReg = LEA.getOperand(1 + X86::AddrBaseReg).getReg();
unsigned DstReg = LEA.getOperand(0).getReg();
unsigned AddrDispOp = 1 + X86::AddrDisp;
return SrcReg == DstReg &&
LEA->getOperand(1 + X86::AddrIndexReg).getReg() == 0 &&
LEA->getOperand(1 + X86::AddrSegmentReg).getReg() == 0 &&
LEA->getOperand(AddrDispOp).isImm() &&
(LEA->getOperand(AddrDispOp).getImm() == 1 ||
LEA->getOperand(AddrDispOp).getImm() == -1);
LEA.getOperand(1 + X86::AddrIndexReg).getReg() == 0 &&
LEA.getOperand(1 + X86::AddrSegmentReg).getReg() == 0 &&
LEA.getOperand(AddrDispOp).isImm() &&
(LEA.getOperand(AddrDispOp).getImm() == 1 ||
LEA.getOperand(AddrDispOp).getImm() == -1);
}
bool FixupLEAPass::fixupIncDec(MachineBasicBlock::iterator &I,
MachineFunction::iterator MFI) const {
MachineInstr *MI = I;
int Opcode = MI->getOpcode();
MachineInstr &MI = *I;
int Opcode = MI.getOpcode();
if (!isLEA(Opcode))
return false;
if (isLEASimpleIncOrDec(MI) && TII->isSafeToClobberEFLAGS(*MFI, I)) {
int NewOpcode;
bool isINC = MI->getOperand(4).getImm() == 1;
bool isINC = MI.getOperand(4).getImm() == 1;
switch (Opcode) {
case X86::LEA16r:
NewOpcode = isINC ? X86::INC16r : X86::DEC16r;
@ -286,9 +286,9 @@ bool FixupLEAPass::fixupIncDec(MachineBasicBlock::iterator &I,
}
MachineInstr *NewMI =
BuildMI(*MFI, I, MI->getDebugLoc(), TII->get(NewOpcode))
.addOperand(MI->getOperand(0))
.addOperand(MI->getOperand(1));
BuildMI(*MFI, I, MI.getDebugLoc(), TII->get(NewOpcode))
.addOperand(MI.getOperand(0))
.addOperand(MI.getOperand(1));
MFI->erase(I);
I = static_cast<MachineBasicBlock::iterator>(NewMI);
return true;
@ -299,16 +299,16 @@ bool FixupLEAPass::fixupIncDec(MachineBasicBlock::iterator &I,
void FixupLEAPass::processInstruction(MachineBasicBlock::iterator &I,
MachineFunction::iterator MFI) {
// Process a load, store, or LEA instruction.
MachineInstr *MI = I;
const MCInstrDesc &Desc = MI->getDesc();
MachineInstr &MI = *I;
const MCInstrDesc &Desc = MI.getDesc();
int AddrOffset = X86II::getMemoryOperandNo(Desc.TSFlags);
if (AddrOffset >= 0) {
AddrOffset += X86II::getOperandBias(Desc);
MachineOperand &p = MI->getOperand(AddrOffset + X86::AddrBaseReg);
MachineOperand &p = MI.getOperand(AddrOffset + X86::AddrBaseReg);
if (p.isReg() && p.getReg() != X86::ESP) {
seekLEAFixup(p, I, MFI);
}
MachineOperand &q = MI->getOperand(AddrOffset + X86::AddrIndexReg);
MachineOperand &q = MI.getOperand(AddrOffset + X86::AddrIndexReg);
if (q.isReg() && q.getReg() != X86::ESP) {
seekLEAFixup(q, I, MFI);
}
@ -319,7 +319,7 @@ void FixupLEAPass::seekLEAFixup(MachineOperand &p,
MachineBasicBlock::iterator &I,
MachineFunction::iterator MFI) {
MachineBasicBlock::iterator MBI = searchBackwards(p, I, MFI);
if (MBI) {
if (MBI != MachineBasicBlock::iterator()) {
MachineInstr *NewMI = postRAConvertToLEA(MFI, MBI);
if (NewMI) {
++NumLEAs;
@ -336,19 +336,19 @@ void FixupLEAPass::seekLEAFixup(MachineOperand &p,
void FixupLEAPass::processInstructionForSLM(MachineBasicBlock::iterator &I,
MachineFunction::iterator MFI) {
MachineInstr *MI = I;
const int opcode = MI->getOpcode();
MachineInstr &MI = *I;
const int opcode = MI.getOpcode();
if (!isLEA(opcode))
return;
if (MI->getOperand(5).getReg() != 0 || !MI->getOperand(4).isImm() ||
if (MI.getOperand(5).getReg() != 0 || !MI.getOperand(4).isImm() ||
!TII->isSafeToClobberEFLAGS(*MFI, I))
return;
const unsigned DstR = MI->getOperand(0).getReg();
const unsigned SrcR1 = MI->getOperand(1).getReg();
const unsigned SrcR2 = MI->getOperand(3).getReg();
const unsigned DstR = MI.getOperand(0).getReg();
const unsigned SrcR1 = MI.getOperand(1).getReg();
const unsigned SrcR2 = MI.getOperand(3).getReg();
if ((SrcR1 == 0 || SrcR1 != DstR) && (SrcR2 == 0 || SrcR2 != DstR))
return;
if (MI->getOperand(2).getImm() > 1)
if (MI.getOperand(2).getImm() > 1)
return;
int addrr_opcode, addri_opcode;
switch (opcode) {
@ -371,12 +371,12 @@ void FixupLEAPass::processInstructionForSLM(MachineBasicBlock::iterator &I,
DEBUG(dbgs() << "FixLEA: Candidate to replace:"; I->dump(););
DEBUG(dbgs() << "FixLEA: Replaced by: ";);
MachineInstr *NewMI = nullptr;
const MachineOperand &Dst = MI->getOperand(0);
const MachineOperand &Dst = MI.getOperand(0);
// Make ADD instruction for two registers writing to LEA's destination
if (SrcR1 != 0 && SrcR2 != 0) {
const MachineOperand &Src1 = MI->getOperand(SrcR1 == DstR ? 1 : 3);
const MachineOperand &Src2 = MI->getOperand(SrcR1 == DstR ? 3 : 1);
NewMI = BuildMI(*MF, MI->getDebugLoc(), TII->get(addrr_opcode))
const MachineOperand &Src1 = MI.getOperand(SrcR1 == DstR ? 1 : 3);
const MachineOperand &Src2 = MI.getOperand(SrcR1 == DstR ? 3 : 1);
NewMI = BuildMI(*MF, MI.getDebugLoc(), TII->get(addrr_opcode))
.addOperand(Dst)
.addOperand(Src1)
.addOperand(Src2);
@ -384,12 +384,12 @@ void FixupLEAPass::processInstructionForSLM(MachineBasicBlock::iterator &I,
DEBUG(NewMI->dump(););
}
// Make ADD instruction for immediate
if (MI->getOperand(4).getImm() != 0) {
const MachineOperand &SrcR = MI->getOperand(SrcR1 == DstR ? 1 : 3);
NewMI = BuildMI(*MF, MI->getDebugLoc(), TII->get(addri_opcode))
if (MI.getOperand(4).getImm() != 0) {
const MachineOperand &SrcR = MI.getOperand(SrcR1 == DstR ? 1 : 3);
NewMI = BuildMI(*MF, MI.getDebugLoc(), TII->get(addri_opcode))
.addOperand(Dst)
.addOperand(SrcR)
.addImm(MI->getOperand(4).getImm());
.addImm(MI.getOperand(4).getImm());
MFI->insert(I, NewMI);
DEBUG(NewMI->dump(););
}

View File

@ -227,7 +227,8 @@ namespace {
++NumFXCH;
}
void duplicateToTop(unsigned RegNo, unsigned AsReg, MachineInstr *I) {
void duplicateToTop(unsigned RegNo, unsigned AsReg,
MachineBasicBlock::iterator I) {
DebugLoc dl = I == MBB->end() ? DebugLoc() : I->getDebugLoc();
unsigned STReg = getSTReg(RegNo);
pushReg(AsReg); // New register on top of stack
@ -272,9 +273,9 @@ namespace {
void handleSpecialFP(MachineBasicBlock::iterator &I);
// Check if a COPY instruction is using FP registers.
static bool isFPCopy(MachineInstr *MI) {
unsigned DstReg = MI->getOperand(0).getReg();
unsigned SrcReg = MI->getOperand(1).getReg();
static bool isFPCopy(MachineInstr &MI) {
unsigned DstReg = MI.getOperand(0).getReg();
unsigned SrcReg = MI.getOperand(1).getReg();
return X86::RFP80RegClass.contains(DstReg) ||
X86::RFP80RegClass.contains(SrcReg);
@ -373,21 +374,21 @@ bool FPS::processBasicBlock(MachineFunction &MF, MachineBasicBlock &BB) {
setupBlockStack();
for (MachineBasicBlock::iterator I = BB.begin(); I != BB.end(); ++I) {
MachineInstr *MI = I;
uint64_t Flags = MI->getDesc().TSFlags;
MachineInstr &MI = *I;
uint64_t Flags = MI.getDesc().TSFlags;
unsigned FPInstClass = Flags & X86II::FPTypeMask;
if (MI->isInlineAsm())
if (MI.isInlineAsm())
FPInstClass = X86II::SpecialFP;
if (MI->isCopy() && isFPCopy(MI))
if (MI.isCopy() && isFPCopy(MI))
FPInstClass = X86II::SpecialFP;
if (MI->isImplicitDef() &&
X86::RFP80RegClass.contains(MI->getOperand(0).getReg()))
if (MI.isImplicitDef() &&
X86::RFP80RegClass.contains(MI.getOperand(0).getReg()))
FPInstClass = X86II::SpecialFP;
if (MI->isCall())
if (MI.isCall())
FPInstClass = X86II::SpecialFP;
if (FPInstClass == X86II::NotFP)
@ -395,16 +396,16 @@ bool FPS::processBasicBlock(MachineFunction &MF, MachineBasicBlock &BB) {
MachineInstr *PrevMI = nullptr;
if (I != BB.begin())
PrevMI = std::prev(I);
PrevMI = &*std::prev(I);
++NumFP; // Keep track of # of pseudo instrs
DEBUG(dbgs() << "\nFPInst:\t" << *MI);
DEBUG(dbgs() << "\nFPInst:\t" << MI);
// Get dead variables list now because the MI pointer may be deleted as part
// of processing!
SmallVector<unsigned, 8> DeadRegs;
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
const MachineOperand &MO = MI->getOperand(i);
for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
const MachineOperand &MO = MI.getOperand(i);
if (MO.isReg() && MO.isDead())
DeadRegs.push_back(MO.getReg());
}
@ -433,20 +434,22 @@ bool FPS::processBasicBlock(MachineFunction &MF, MachineBasicBlock &BB) {
}
// Print out all of the instructions expanded to if -debug
DEBUG(
MachineBasicBlock::iterator PrevI(PrevMI);
DEBUG({
MachineBasicBlock::iterator PrevI = PrevMI;
if (I == PrevI) {
dbgs() << "Just deleted pseudo instruction\n";
} else {
MachineBasicBlock::iterator Start = I;
// Rewind to first instruction newly inserted.
while (Start != BB.begin() && std::prev(Start) != PrevI) --Start;
while (Start != BB.begin() && std::prev(Start) != PrevI)
--Start;
dbgs() << "Inserted instructions:\n\t";
Start->print(dbgs());
while (++Start != std::next(I)) {}
while (++Start != std::next(I)) {
}
}
dumpStack();
);
});
(void)PrevMI;
Changed = true;
@ -785,8 +788,8 @@ static const TableEntry PopTable[] = {
/// instruction if it was modified in place.
///
void FPS::popStackAfter(MachineBasicBlock::iterator &I) {
MachineInstr* MI = I;
const DebugLoc &dl = MI->getDebugLoc();
MachineInstr &MI = *I;
const DebugLoc &dl = MI.getDebugLoc();
ASSERT_SORTED(PopTable);
if (StackTop == 0)
report_fatal_error("Cannot pop empty stack!");
@ -952,22 +955,22 @@ void FPS::handleCall(MachineBasicBlock::iterator &I) {
/// If RET has an FP register use operand, pass the first one in ST(0) and
/// the second one in ST(1).
void FPS::handleReturn(MachineBasicBlock::iterator &I) {
MachineInstr *MI = I;
MachineInstr &MI = *I;
// Find the register operands.
unsigned FirstFPRegOp = ~0U, SecondFPRegOp = ~0U;
unsigned LiveMask = 0;
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
MachineOperand &Op = MI->getOperand(i);
for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
MachineOperand &Op = MI.getOperand(i);
if (!Op.isReg() || Op.getReg() < X86::FP0 || Op.getReg() > X86::FP6)
continue;
// FP Register uses must be kills unless there are two uses of the same
// register, in which case only one will be a kill.
assert(Op.isUse() &&
(Op.isKill() || // Marked kill.
getFPReg(Op) == FirstFPRegOp || // Second instance.
MI->killsRegister(Op.getReg())) && // Later use is marked kill.
(Op.isKill() || // Marked kill.
getFPReg(Op) == FirstFPRegOp || // Second instance.
MI.killsRegister(Op.getReg())) && // Later use is marked kill.
"Ret only defs operands, and values aren't live beyond it");
if (FirstFPRegOp == ~0U)
@ -979,7 +982,7 @@ void FPS::handleReturn(MachineBasicBlock::iterator &I) {
LiveMask |= (1 << getFPReg(Op));
// Remove the operand so that later passes don't see it.
MI->RemoveOperand(i);
MI.RemoveOperand(i);
--i;
--e;
}
@ -1039,12 +1042,12 @@ void FPS::handleReturn(MachineBasicBlock::iterator &I) {
/// handleZeroArgFP - ST(0) = fld0 ST(0) = flds <mem>
///
void FPS::handleZeroArgFP(MachineBasicBlock::iterator &I) {
MachineInstr *MI = I;
unsigned DestReg = getFPReg(MI->getOperand(0));
MachineInstr &MI = *I;
unsigned DestReg = getFPReg(MI.getOperand(0));
// Change from the pseudo instruction to the concrete instruction.
MI->RemoveOperand(0); // Remove the explicit ST(0) operand
MI->setDesc(TII->get(getConcreteOpcode(MI->getOpcode())));
MI.RemoveOperand(0); // Remove the explicit ST(0) operand
MI.setDesc(TII->get(getConcreteOpcode(MI.getOpcode())));
// Result gets pushed on the stack.
pushReg(DestReg);
@ -1053,14 +1056,14 @@ void FPS::handleZeroArgFP(MachineBasicBlock::iterator &I) {
/// handleOneArgFP - fst <mem>, ST(0)
///
void FPS::handleOneArgFP(MachineBasicBlock::iterator &I) {
MachineInstr *MI = I;
unsigned NumOps = MI->getDesc().getNumOperands();
MachineInstr &MI = *I;
unsigned NumOps = MI.getDesc().getNumOperands();
assert((NumOps == X86::AddrNumOperands + 1 || NumOps == 1) &&
"Can only handle fst* & ftst instructions!");
// Is this the last use of the source register?
unsigned Reg = getFPReg(MI->getOperand(NumOps-1));
bool KillsSrc = MI->killsRegister(X86::FP0+Reg);
unsigned Reg = getFPReg(MI.getOperand(NumOps - 1));
bool KillsSrc = MI.killsRegister(X86::FP0 + Reg);
// FISTP64m is strange because there isn't a non-popping versions.
// If we have one _and_ we don't want to pop the operand, duplicate the value
@ -1068,34 +1071,31 @@ void FPS::handleOneArgFP(MachineBasicBlock::iterator &I) {
// always ok.
// Ditto FISTTP16m, FISTTP32m, FISTTP64m, ST_FpP80m.
//
if (!KillsSrc &&
(MI->getOpcode() == X86::IST_Fp64m32 ||
MI->getOpcode() == X86::ISTT_Fp16m32 ||
MI->getOpcode() == X86::ISTT_Fp32m32 ||
MI->getOpcode() == X86::ISTT_Fp64m32 ||
MI->getOpcode() == X86::IST_Fp64m64 ||
MI->getOpcode() == X86::ISTT_Fp16m64 ||
MI->getOpcode() == X86::ISTT_Fp32m64 ||
MI->getOpcode() == X86::ISTT_Fp64m64 ||
MI->getOpcode() == X86::IST_Fp64m80 ||
MI->getOpcode() == X86::ISTT_Fp16m80 ||
MI->getOpcode() == X86::ISTT_Fp32m80 ||
MI->getOpcode() == X86::ISTT_Fp64m80 ||
MI->getOpcode() == X86::ST_FpP80m)) {
if (!KillsSrc && (MI.getOpcode() == X86::IST_Fp64m32 ||
MI.getOpcode() == X86::ISTT_Fp16m32 ||
MI.getOpcode() == X86::ISTT_Fp32m32 ||
MI.getOpcode() == X86::ISTT_Fp64m32 ||
MI.getOpcode() == X86::IST_Fp64m64 ||
MI.getOpcode() == X86::ISTT_Fp16m64 ||
MI.getOpcode() == X86::ISTT_Fp32m64 ||
MI.getOpcode() == X86::ISTT_Fp64m64 ||
MI.getOpcode() == X86::IST_Fp64m80 ||
MI.getOpcode() == X86::ISTT_Fp16m80 ||
MI.getOpcode() == X86::ISTT_Fp32m80 ||
MI.getOpcode() == X86::ISTT_Fp64m80 ||
MI.getOpcode() == X86::ST_FpP80m)) {
duplicateToTop(Reg, ScratchFPReg, I);
} else {
moveToTop(Reg, I); // Move to the top of the stack...
}
// Convert from the pseudo instruction to the concrete instruction.
MI->RemoveOperand(NumOps-1); // Remove explicit ST(0) operand
MI->setDesc(TII->get(getConcreteOpcode(MI->getOpcode())));
MI.RemoveOperand(NumOps - 1); // Remove explicit ST(0) operand
MI.setDesc(TII->get(getConcreteOpcode(MI.getOpcode())));
if (MI->getOpcode() == X86::IST_FP64m ||
MI->getOpcode() == X86::ISTT_FP16m ||
MI->getOpcode() == X86::ISTT_FP32m ||
MI->getOpcode() == X86::ISTT_FP64m ||
MI->getOpcode() == X86::ST_FP80m) {
if (MI.getOpcode() == X86::IST_FP64m || MI.getOpcode() == X86::ISTT_FP16m ||
MI.getOpcode() == X86::ISTT_FP32m || MI.getOpcode() == X86::ISTT_FP64m ||
MI.getOpcode() == X86::ST_FP80m) {
if (StackTop == 0)
report_fatal_error("Stack empty??");
--StackTop;
@ -1114,15 +1114,15 @@ void FPS::handleOneArgFP(MachineBasicBlock::iterator &I) {
/// R1 = fadd R2, [mem]
///
void FPS::handleOneArgFPRW(MachineBasicBlock::iterator &I) {
MachineInstr *MI = I;
MachineInstr &MI = *I;
#ifndef NDEBUG
unsigned NumOps = MI->getDesc().getNumOperands();
unsigned NumOps = MI.getDesc().getNumOperands();
assert(NumOps >= 2 && "FPRW instructions must have 2 ops!!");
#endif
// Is this the last use of the source register?
unsigned Reg = getFPReg(MI->getOperand(1));
bool KillsSrc = MI->killsRegister(X86::FP0+Reg);
unsigned Reg = getFPReg(MI.getOperand(1));
bool KillsSrc = MI.killsRegister(X86::FP0 + Reg);
if (KillsSrc) {
// If this is the last use of the source register, just make sure it's on
@ -1131,17 +1131,17 @@ void FPS::handleOneArgFPRW(MachineBasicBlock::iterator &I) {
if (StackTop == 0)
report_fatal_error("Stack cannot be empty!");
--StackTop;
pushReg(getFPReg(MI->getOperand(0)));
pushReg(getFPReg(MI.getOperand(0)));
} else {
// If this is not the last use of the source register, _copy_ it to the top
// of the stack.
duplicateToTop(Reg, getFPReg(MI->getOperand(0)), I);
duplicateToTop(Reg, getFPReg(MI.getOperand(0)), I);
}
// Change from the pseudo instruction to the concrete instruction.
MI->RemoveOperand(1); // Drop the source operand.
MI->RemoveOperand(0); // Drop the destination operand.
MI->setDesc(TII->get(getConcreteOpcode(MI->getOpcode())));
MI.RemoveOperand(1); // Drop the source operand.
MI.RemoveOperand(0); // Drop the destination operand.
MI.setDesc(TII->get(getConcreteOpcode(MI.getOpcode())));
}
@ -1225,16 +1225,16 @@ static const TableEntry ReverseSTiTable[] = {
void FPS::handleTwoArgFP(MachineBasicBlock::iterator &I) {
ASSERT_SORTED(ForwardST0Table); ASSERT_SORTED(ReverseST0Table);
ASSERT_SORTED(ForwardSTiTable); ASSERT_SORTED(ReverseSTiTable);
MachineInstr *MI = I;
MachineInstr &MI = *I;
unsigned NumOperands = MI->getDesc().getNumOperands();
unsigned NumOperands = MI.getDesc().getNumOperands();
assert(NumOperands == 3 && "Illegal TwoArgFP instruction!");
unsigned Dest = getFPReg(MI->getOperand(0));
unsigned Op0 = getFPReg(MI->getOperand(NumOperands-2));
unsigned Op1 = getFPReg(MI->getOperand(NumOperands-1));
bool KillsOp0 = MI->killsRegister(X86::FP0+Op0);
bool KillsOp1 = MI->killsRegister(X86::FP0+Op1);
DebugLoc dl = MI->getDebugLoc();
unsigned Dest = getFPReg(MI.getOperand(0));
unsigned Op0 = getFPReg(MI.getOperand(NumOperands - 2));
unsigned Op1 = getFPReg(MI.getOperand(NumOperands - 1));
bool KillsOp0 = MI.killsRegister(X86::FP0 + Op0);
bool KillsOp1 = MI.killsRegister(X86::FP0 + Op1);
DebugLoc dl = MI.getDebugLoc();
unsigned TOS = getStackEntry(0);
@ -1291,14 +1291,14 @@ void FPS::handleTwoArgFP(MachineBasicBlock::iterator &I) {
InstTable = ReverseSTiTable;
}
int Opcode = Lookup(InstTable, MI->getOpcode());
int Opcode = Lookup(InstTable, MI.getOpcode());
assert(Opcode != -1 && "Unknown TwoArgFP pseudo instruction!");
// NotTOS - The register which is not on the top of stack...
unsigned NotTOS = (TOS == Op0) ? Op1 : Op0;
// Replace the old instruction with a new instruction
MBB->remove(I++);
MBB->remove(&*I++);
I = BuildMI(*MBB, I, dl, TII->get(Opcode)).addReg(getSTReg(NotTOS));
// If both operands are killed, pop one off of the stack in addition to
@ -1314,7 +1314,7 @@ void FPS::handleTwoArgFP(MachineBasicBlock::iterator &I) {
assert(UpdatedSlot < StackTop && Dest < 7);
Stack[UpdatedSlot] = Dest;
RegMap[Dest] = UpdatedSlot;
MBB->getParent()->DeleteMachineInstr(MI); // Remove the old instruction
MBB->getParent()->DeleteMachineInstr(&MI); // Remove the old instruction
}
/// handleCompareFP - Handle FUCOM and FUCOMI instructions, which have two FP
@ -1323,23 +1323,23 @@ void FPS::handleTwoArgFP(MachineBasicBlock::iterator &I) {
void FPS::handleCompareFP(MachineBasicBlock::iterator &I) {
ASSERT_SORTED(ForwardST0Table); ASSERT_SORTED(ReverseST0Table);
ASSERT_SORTED(ForwardSTiTable); ASSERT_SORTED(ReverseSTiTable);
MachineInstr *MI = I;
MachineInstr &MI = *I;
unsigned NumOperands = MI->getDesc().getNumOperands();
unsigned NumOperands = MI.getDesc().getNumOperands();
assert(NumOperands == 2 && "Illegal FUCOM* instruction!");
unsigned Op0 = getFPReg(MI->getOperand(NumOperands-2));
unsigned Op1 = getFPReg(MI->getOperand(NumOperands-1));
bool KillsOp0 = MI->killsRegister(X86::FP0+Op0);
bool KillsOp1 = MI->killsRegister(X86::FP0+Op1);
unsigned Op0 = getFPReg(MI.getOperand(NumOperands - 2));
unsigned Op1 = getFPReg(MI.getOperand(NumOperands - 1));
bool KillsOp0 = MI.killsRegister(X86::FP0 + Op0);
bool KillsOp1 = MI.killsRegister(X86::FP0 + Op1);
// Make sure the first operand is on the top of stack, the other one can be
// anywhere.
moveToTop(Op0, I);
// Change from the pseudo instruction to the concrete instruction.
MI->getOperand(0).setReg(getSTReg(Op1));
MI->RemoveOperand(1);
MI->setDesc(TII->get(getConcreteOpcode(MI->getOpcode())));
MI.getOperand(0).setReg(getSTReg(Op1));
MI.RemoveOperand(1);
MI.setDesc(TII->get(getConcreteOpcode(MI.getOpcode())));
// If any of the operands are killed by this instruction, free them.
if (KillsOp0) freeStackSlotAfter(I, Op0);
@ -1351,21 +1351,21 @@ void FPS::handleCompareFP(MachineBasicBlock::iterator &I) {
/// instructions require that the first operand is at the top of the stack, but
/// otherwise don't modify the stack at all.
void FPS::handleCondMovFP(MachineBasicBlock::iterator &I) {
MachineInstr *MI = I;
MachineInstr &MI = *I;
unsigned Op0 = getFPReg(MI->getOperand(0));
unsigned Op1 = getFPReg(MI->getOperand(2));
bool KillsOp1 = MI->killsRegister(X86::FP0+Op1);
unsigned Op0 = getFPReg(MI.getOperand(0));
unsigned Op1 = getFPReg(MI.getOperand(2));
bool KillsOp1 = MI.killsRegister(X86::FP0 + Op1);
// The first operand *must* be on the top of the stack.
moveToTop(Op0, I);
// Change the second operand to the stack register that the operand is in.
// Change from the pseudo instruction to the concrete instruction.
MI->RemoveOperand(0);
MI->RemoveOperand(1);
MI->getOperand(0).setReg(getSTReg(Op1));
MI->setDesc(TII->get(getConcreteOpcode(MI->getOpcode())));
MI.RemoveOperand(0);
MI.RemoveOperand(1);
MI.getOperand(0).setReg(getSTReg(Op1));
MI.setDesc(TII->get(getConcreteOpcode(MI.getOpcode())));
// If we kill the second operand, make sure to pop it from the stack.
if (Op0 != Op1 && KillsOp1) {
@ -1380,25 +1380,25 @@ void FPS::handleCondMovFP(MachineBasicBlock::iterator &I) {
/// instructions.
///
void FPS::handleSpecialFP(MachineBasicBlock::iterator &Inst) {
MachineInstr *MI = Inst;
MachineInstr &MI = *Inst;
if (MI->isCall()) {
if (MI.isCall()) {
handleCall(Inst);
return;
}
if (MI->isReturn()) {
if (MI.isReturn()) {
handleReturn(Inst);
return;
}
switch (MI->getOpcode()) {
switch (MI.getOpcode()) {
default: llvm_unreachable("Unknown SpecialFP instruction!");
case TargetOpcode::COPY: {
// We handle three kinds of copies: FP <- FP, FP <- ST, and ST <- FP.
const MachineOperand &MO1 = MI->getOperand(1);
const MachineOperand &MO0 = MI->getOperand(0);
bool KillsSrc = MI->killsRegister(MO1.getReg());
const MachineOperand &MO1 = MI.getOperand(1);
const MachineOperand &MO0 = MI.getOperand(0);
bool KillsSrc = MI.killsRegister(MO1.getReg());
// FP <- FP copy.
unsigned DstFP = getFPReg(MO0);
@ -1420,9 +1420,9 @@ void FPS::handleSpecialFP(MachineBasicBlock::iterator &Inst) {
case TargetOpcode::IMPLICIT_DEF: {
// All FP registers must be explicitly defined, so load a 0 instead.
unsigned Reg = MI->getOperand(0).getReg() - X86::FP0;
unsigned Reg = MI.getOperand(0).getReg() - X86::FP0;
DEBUG(dbgs() << "Emitting LD_F0 for implicit FP" << Reg << '\n');
BuildMI(*MBB, Inst, MI->getDebugLoc(), TII->get(X86::LD_F0));
BuildMI(*MBB, Inst, MI.getDebugLoc(), TII->get(X86::LD_F0));
pushReg(Reg);
break;
}
@ -1466,14 +1466,14 @@ void FPS::handleSpecialFP(MachineBasicBlock::iterator &Inst) {
SmallSet<unsigned, 1> FRegIdx;
unsigned RCID;
for (unsigned i = InlineAsm::MIOp_FirstOperand, e = MI->getNumOperands();
i != e && MI->getOperand(i).isImm(); i += 1 + NumOps) {
unsigned Flags = MI->getOperand(i).getImm();
for (unsigned i = InlineAsm::MIOp_FirstOperand, e = MI.getNumOperands();
i != e && MI.getOperand(i).isImm(); i += 1 + NumOps) {
unsigned Flags = MI.getOperand(i).getImm();
NumOps = InlineAsm::getNumOperandRegisters(Flags);
if (NumOps != 1)
continue;
const MachineOperand &MO = MI->getOperand(i + 1);
const MachineOperand &MO = MI.getOperand(i + 1);
if (!MO.isReg())
continue;
unsigned STReg = MO.getReg() - X86::FP0;
@ -1506,24 +1506,24 @@ void FPS::handleSpecialFP(MachineBasicBlock::iterator &Inst) {
}
if (STUses && !isMask_32(STUses))
MI->emitError("fixed input regs must be last on the x87 stack");
MI.emitError("fixed input regs must be last on the x87 stack");
unsigned NumSTUses = countTrailingOnes(STUses);
// Defs must be contiguous from the stack top. ST0-STn.
if (STDefs && !isMask_32(STDefs)) {
MI->emitError("output regs must be last on the x87 stack");
MI.emitError("output regs must be last on the x87 stack");
STDefs = NextPowerOf2(STDefs) - 1;
}
unsigned NumSTDefs = countTrailingOnes(STDefs);
// So must the clobbered stack slots. ST0-STm, m >= n.
if (STClobbers && !isMask_32(STDefs | STClobbers))
MI->emitError("clobbers must be last on the x87 stack");
MI.emitError("clobbers must be last on the x87 stack");
// Popped inputs are the ones that are also clobbered or defined.
unsigned STPopped = STUses & (STDefs | STClobbers);
if (STPopped && !isMask_32(STPopped))
MI->emitError("implicitly popped regs must be last on the x87 stack");
MI.emitError("implicitly popped regs must be last on the x87 stack");
unsigned NumSTPopped = countTrailingOnes(STPopped);
DEBUG(dbgs() << "Asm uses " << NumSTUses << " fixed regs, pops "
@ -1532,9 +1532,9 @@ void FPS::handleSpecialFP(MachineBasicBlock::iterator &Inst) {
#ifndef NDEBUG
// If any input operand uses constraint "f", all output register
// constraints must be early-clobber defs.
for (unsigned I = 0, E = MI->getNumOperands(); I < E; ++I)
for (unsigned I = 0, E = MI.getNumOperands(); I < E; ++I)
if (FRegIdx.count(I)) {
assert((1 << getFPReg(MI->getOperand(I)) & STDefs) == 0 &&
assert((1 << getFPReg(MI.getOperand(I)) & STDefs) == 0 &&
"Operands with constraint \"f\" cannot overlap with defs");
}
#endif
@ -1542,8 +1542,8 @@ void FPS::handleSpecialFP(MachineBasicBlock::iterator &Inst) {
// Collect all FP registers (register operands with constraints "t", "u",
// and "f") to kill afer the instruction.
unsigned FPKills = ((1u << NumFPRegs) - 1) & ~0xff;
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
MachineOperand &Op = MI->getOperand(i);
for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
MachineOperand &Op = MI.getOperand(i);
if (!Op.isReg() || Op.getReg() < X86::FP0 || Op.getReg() > X86::FP6)
continue;
unsigned FPReg = getFPReg(Op);
@ -1568,8 +1568,8 @@ void FPS::handleSpecialFP(MachineBasicBlock::iterator &Inst) {
DEBUG({dbgs() << "Before asm: "; dumpStack();});
// With the stack layout fixed, rewrite the FP registers.
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
MachineOperand &Op = MI->getOperand(i);
for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
MachineOperand &Op = MI.getOperand(i);
if (!Op.isReg() || Op.getReg() < X86::FP0 || Op.getReg() > X86::FP6)
continue;

View File

@ -699,7 +699,7 @@ MachineInstr *X86FrameLowering::emitStackProbeInline(
// Possible TODO: physreg liveness for InProlog case.
return ContinueMBBI;
return &*ContinueMBBI;
}
MachineInstr *X86FrameLowering::emitStackProbeCall(
@ -763,7 +763,7 @@ MachineInstr *X86FrameLowering::emitStackProbeCall(
ExpansionMBBI->setFlag(MachineInstr::FrameSetup);
}
return MBBI;
return &*MBBI;
}
MachineInstr *X86FrameLowering::emitStackProbeInlineStub(
@ -775,7 +775,7 @@ MachineInstr *X86FrameLowering::emitStackProbeInlineStub(
BuildMI(MBB, MBBI, DL, TII.get(X86::CALLpcrel32))
.addExternalSymbol("__chkstk_stub");
return MBBI;
return &*MBBI;
}
static unsigned calculateSetFPREG(uint64_t SPAdjust) {
@ -1406,8 +1406,8 @@ bool X86FrameLowering::canUseLEAForSPInEpilogue(
return !MF.getTarget().getMCAsmInfo()->usesWindowsCFI() || hasFP(MF);
}
static bool isFuncletReturnInstr(MachineInstr *MI) {
switch (MI->getOpcode()) {
static bool isFuncletReturnInstr(MachineInstr &MI) {
switch (MI.getOpcode()) {
case X86::CATCHRET:
case X86::CLEANUPRET:
return true;
@ -1492,7 +1492,7 @@ void X86FrameLowering::emitEpilogue(MachineFunction &MF,
bool IsWin64Prologue = MF.getTarget().getMCAsmInfo()->usesWindowsCFI();
bool NeedsWinCFI =
IsWin64Prologue && MF.getFunction()->needsUnwindTableEntry();
bool IsFunclet = isFuncletReturnInstr(MBBI);
bool IsFunclet = isFuncletReturnInstr(*MBBI);
MachineBasicBlock *TargetMBB = nullptr;
// Get the number of bytes to allocate from the FrameInfo.
@ -1956,7 +1956,7 @@ bool X86FrameLowering::restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
if (CSI.empty())
return false;
if (isFuncletReturnInstr(MI) && STI.isOSWindows()) {
if (isFuncletReturnInstr(*MI) && STI.isOSWindows()) {
// Don't restore CSRs in 32-bit EH funclets. Matches
// spillCalleeSavedRegisters.
if (STI.is32Bit())

View File

@ -2525,8 +2525,8 @@ void X86InstrInfo::reMaterialize(MachineBasicBlock &MBB,
MBB.insert(I, MI);
}
MachineInstr *NewMI = std::prev(I);
NewMI->substituteRegister(Orig.getOperand(0).getReg(), DestReg, SubIdx, TRI);
MachineInstr &NewMI = *std::prev(I);
NewMI.substituteRegister(Orig.getOperand(0).getReg(), DestReg, SubIdx, TRI);
}
/// True if MI has a condition code def, e.g. EFLAGS, that is not marked dead.
@ -4057,7 +4057,7 @@ bool X86InstrInfo::AnalyzeBranchImpl(
FBB = TBB;
TBB = I->getOperand(0).getMBB();
Cond.push_back(MachineOperand::CreateImm(BranchCode));
CondBranches.push_back(I);
CondBranches.push_back(&*I);
continue;
}
@ -4110,7 +4110,7 @@ bool X86InstrInfo::AnalyzeBranchImpl(
// Update the MachineOperand.
Cond[0].setImm(BranchCode);
CondBranches.push_back(I);
CondBranches.push_back(&*I);
}
return false;
@ -5123,7 +5123,8 @@ bool X86InstrInfo::optimizeCompareInstr(MachineInstr &CmpInstr, unsigned SrcReg,
ShouldUpdateCC = true; // Update CC later on.
// This is not a def of SrcReg, but still a def of EFLAGS. Keep going
// with the new def.
MI = Def = J;
Def = J;
MI = &*Def;
break;
}
@ -7467,9 +7468,9 @@ namespace {
case X86::TLS_base_addr32:
case X86::TLS_base_addr64:
if (TLSBaseAddrReg)
I = ReplaceTLSBaseAddrCall(I, TLSBaseAddrReg);
I = ReplaceTLSBaseAddrCall(*I, TLSBaseAddrReg);
else
I = SetRegister(I, &TLSBaseAddrReg);
I = SetRegister(*I, &TLSBaseAddrReg);
Changed = true;
break;
default:
@ -7488,29 +7489,29 @@ namespace {
// Replace the TLS_base_addr instruction I with a copy from
// TLSBaseAddrReg, returning the new instruction.
MachineInstr *ReplaceTLSBaseAddrCall(MachineInstr *I,
MachineInstr *ReplaceTLSBaseAddrCall(MachineInstr &I,
unsigned TLSBaseAddrReg) {
MachineFunction *MF = I->getParent()->getParent();
MachineFunction *MF = I.getParent()->getParent();
const X86Subtarget &STI = MF->getSubtarget<X86Subtarget>();
const bool is64Bit = STI.is64Bit();
const X86InstrInfo *TII = STI.getInstrInfo();
// Insert a Copy from TLSBaseAddrReg to RAX/EAX.
MachineInstr *Copy = BuildMI(*I->getParent(), I, I->getDebugLoc(),
TII->get(TargetOpcode::COPY),
is64Bit ? X86::RAX : X86::EAX)
.addReg(TLSBaseAddrReg);
MachineInstr *Copy =
BuildMI(*I.getParent(), I, I.getDebugLoc(),
TII->get(TargetOpcode::COPY), is64Bit ? X86::RAX : X86::EAX)
.addReg(TLSBaseAddrReg);
// Erase the TLS_base_addr instruction.
I->eraseFromParent();
I.eraseFromParent();
return Copy;
}
// Create a virtal register in *TLSBaseAddrReg, and populate it by
// inserting a copy instruction after I. Returns the new instruction.
MachineInstr *SetRegister(MachineInstr *I, unsigned *TLSBaseAddrReg) {
MachineFunction *MF = I->getParent()->getParent();
MachineInstr *SetRegister(MachineInstr &I, unsigned *TLSBaseAddrReg) {
MachineFunction *MF = I.getParent()->getParent();
const X86Subtarget &STI = MF->getSubtarget<X86Subtarget>();
const bool is64Bit = STI.is64Bit();
const X86InstrInfo *TII = STI.getInstrInfo();
@ -7522,11 +7523,11 @@ namespace {
: &X86::GR32RegClass);
// Insert a copy from RAX/EAX to TLSBaseAddrReg.
MachineInstr *Next = I->getNextNode();
MachineInstr *Copy = BuildMI(*I->getParent(), Next, I->getDebugLoc(),
TII->get(TargetOpcode::COPY),
*TLSBaseAddrReg)
.addReg(is64Bit ? X86::RAX : X86::EAX);
MachineInstr *Next = I.getNextNode();
MachineInstr *Copy =
BuildMI(*I.getParent(), Next, I.getDebugLoc(),
TII->get(TargetOpcode::COPY), *TLSBaseAddrReg)
.addReg(is64Bit ? X86::RAX : X86::EAX);
return Copy;
}

View File

@ -1025,7 +1025,7 @@ PrevCrossBBInst(MachineBasicBlock::const_iterator MBBI) {
const MachineBasicBlock *MBB = MBBI->getParent();
while (MBBI == MBB->begin()) {
if (MBB == &MBB->getParent()->front())
return nullptr;
return MachineBasicBlock::const_iterator();
MBB = MBB->getPrevNode();
MBBI = MBB->end();
}
@ -1305,7 +1305,9 @@ void X86AsmPrinter::EmitInstruction(const MachineInstr *MI) {
case X86::SEH_Epilogue: {
MachineBasicBlock::const_iterator MBBI(MI);
// Check if preceded by a call and emit nop if so.
for (MBBI = PrevCrossBBInst(MBBI); MBBI; MBBI = PrevCrossBBInst(MBBI)) {
for (MBBI = PrevCrossBBInst(MBBI);
MBBI != MachineBasicBlock::const_iterator();
MBBI = PrevCrossBBInst(MBBI)) {
// Conservatively assume that pseudo instructions don't emit code and keep
// looking for a call. We may emit an unnecessary nop in some cases.
if (!MBBI->isPseudo()) {

View File

@ -503,9 +503,10 @@ bool OptimizeLEAPass::removeRedundantAddrCalc(MemOpMap &LEAs) {
InstrPos[DefMI] = InstrPos[&MI] - 1;
// Make sure the instructions' position numbers are sane.
assert(((InstrPos[DefMI] == 1 && DefMI == MBB->begin()) ||
assert(((InstrPos[DefMI] == 1 &&
MachineBasicBlock::iterator(DefMI) == MBB->begin()) ||
InstrPos[DefMI] >
InstrPos[std::prev(MachineBasicBlock::iterator(DefMI))]) &&
InstrPos[&*std::prev(MachineBasicBlock::iterator(DefMI))]) &&
"Instruction positioning is broken");
}

View File

@ -187,19 +187,17 @@ bool PadShortFunc::cyclesUntilReturn(MachineBasicBlock *MBB,
unsigned int CyclesToEnd = 0;
for (MachineBasicBlock::iterator MBBI = MBB->begin();
MBBI != MBB->end(); ++MBBI) {
MachineInstr *MI = MBBI;
for (MachineInstr &MI : *MBB) {
// Mark basic blocks with a return instruction. Calls to other
// functions do not count because the called function will be padded,
// if necessary.
if (MI->isReturn() && !MI->isCall()) {
if (MI.isReturn() && !MI.isCall()) {
VisitedBBs[MBB] = VisitedBBInfo(true, CyclesToEnd);
Cycles += CyclesToEnd;
return true;
}
CyclesToEnd += TII->getInstrLatency(STI->getInstrItineraryData(), *MI);
CyclesToEnd += TII->getInstrLatency(STI->getInstrItineraryData(), MI);
}
VisitedBBs[MBB] = VisitedBBInfo(false, CyclesToEnd);

View File

@ -127,9 +127,9 @@ static bool clobbersAllYmmRegs(const MachineOperand &MO) {
return true;
}
static bool hasYmmReg(MachineInstr *MI) {
for (const MachineOperand &MO : MI->operands()) {
if (MI->isCall() && MO.isRegMask() && !clobbersAllYmmRegs(MO))
static bool hasYmmReg(MachineInstr &MI) {
for (const MachineOperand &MO : MI.operands()) {
if (MI.isCall() && MO.isRegMask() && !clobbersAllYmmRegs(MO))
return true;
if (!MO.isReg())
continue;
@ -142,9 +142,9 @@ static bool hasYmmReg(MachineInstr *MI) {
}
/// Check if any YMM register will be clobbered by this instruction.
static bool callClobbersAnyYmmReg(MachineInstr *MI) {
assert(MI->isCall() && "Can only be called on call instructions.");
for (const MachineOperand &MO : MI->operands()) {
static bool callClobbersAnyYmmReg(MachineInstr &MI) {
assert(MI.isCall() && "Can only be called on call instructions.");
for (const MachineOperand &MO : MI.operands()) {
if (!MO.isRegMask())
continue;
for (unsigned reg = X86::YMM0; reg <= X86::YMM15; ++reg) {
@ -181,16 +181,14 @@ void VZeroUpperInserter::processBasicBlock(MachineBasicBlock &MBB) {
BlockExitState CurState = PASS_THROUGH;
BlockStates[MBB.getNumber()].FirstUnguardedCall = MBB.end();
for (MachineBasicBlock::iterator I = MBB.begin(); I != MBB.end(); ++I) {
MachineInstr *MI = I;
for (MachineInstr &MI : MBB) {
// No need for vzeroupper before iret in interrupt handler function,
// epilogue will restore YMM registers if needed.
bool IsReturnFromX86INTR = IsX86INTR && MI->isReturn();
bool IsControlFlow = MI->isCall() || MI->isReturn();
bool IsReturnFromX86INTR = IsX86INTR && MI.isReturn();
bool IsControlFlow = MI.isCall() || MI.isReturn();
// An existing VZERO* instruction resets the state.
if (MI->getOpcode() == X86::VZEROALL ||
MI->getOpcode() == X86::VZEROUPPER) {
if (MI.getOpcode() == X86::VZEROALL || MI.getOpcode() == X86::VZEROUPPER) {
CurState = EXITS_CLEAN;
continue;
}
@ -216,7 +214,7 @@ void VZeroUpperInserter::processBasicBlock(MachineBasicBlock &MBB) {
// standard calling convention is not used (RegMask is not used to mark
// register clobbered and register usage (def/imp-def/use) is well-defined
// and explicitly specified.
if (MI->isCall() && !callClobbersAnyYmmReg(MI))
if (MI.isCall() && !callClobbersAnyYmmReg(MI))
continue;
// The VZEROUPPER instruction resets the upper 128 bits of all AVX
@ -230,7 +228,7 @@ void VZeroUpperInserter::processBasicBlock(MachineBasicBlock &MBB) {
// After the inserted VZEROUPPER the state becomes clean again, but
// other YMM may appear before other subsequent calls or even before
// the end of the BB.
insertVZeroUpper(I, MBB);
insertVZeroUpper(MI, MBB);
CurState = EXITS_CLEAN;
} else if (CurState == PASS_THROUGH) {
// If this block is currently in pass-through state and we encounter a
@ -238,7 +236,7 @@ void VZeroUpperInserter::processBasicBlock(MachineBasicBlock &MBB) {
// block has successors that exit dirty. Record the location of the call,
// and set the state to EXITS_CLEAN, but do not insert the vzeroupper yet.
// It will be inserted later if necessary.
BlockStates[MBB.getNumber()].FirstUnguardedCall = I;
BlockStates[MBB.getNumber()].FirstUnguardedCall = MI;
CurState = EXITS_CLEAN;
}
}