mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2025-01-23 20:34:58 +00:00
Remove non-DebugLoc versions of BuildMI from X86.
There were some that might even matter in X86FastISel. llvm-svn: 64437
This commit is contained in:
parent
0336a2cfd0
commit
560b03bbcd
@ -231,7 +231,7 @@ bool X86FastISel::X86FastEmitLoad(MVT VT, const X86AddressMode &AM,
|
||||
}
|
||||
|
||||
ResultReg = createResultReg(RC);
|
||||
addFullAddress(BuildMI(MBB, TII.get(Opc), ResultReg), AM);
|
||||
addFullAddress(BuildMI(MBB, DL, TII.get(Opc), ResultReg), AM);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -259,7 +259,7 @@ X86FastISel::X86FastEmitStore(MVT VT, unsigned Val,
|
||||
break;
|
||||
}
|
||||
|
||||
addFullAddress(BuildMI(MBB, TII.get(Opc)), AM).addReg(Val);
|
||||
addFullAddress(BuildMI(MBB, DL, TII.get(Opc)), AM).addReg(Val);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -285,7 +285,8 @@ bool X86FastISel::X86FastEmitStore(MVT VT, Value *Val,
|
||||
}
|
||||
|
||||
if (Opc) {
|
||||
addFullAddress(BuildMI(MBB, TII.get(Opc)), AM).addImm(CI->getSExtValue());
|
||||
addFullAddress(BuildMI(MBB, DL, TII.get(Opc)), AM)
|
||||
.addImm(CI->getSExtValue());
|
||||
return true;
|
||||
}
|
||||
}
|
||||
@ -463,7 +464,7 @@ bool X86FastISel::X86SelectAddress(Value *V, X86AddressMode &AM, bool isCall) {
|
||||
StubAM.Base.Reg = AM.Base.Reg;
|
||||
StubAM.GV = AM.GV;
|
||||
unsigned ResultReg = createResultReg(RC);
|
||||
addFullAddress(BuildMI(MBB, TII.get(Opc), ResultReg), StubAM);
|
||||
addFullAddress(BuildMI(MBB, DL, TII.get(Opc), ResultReg), StubAM);
|
||||
|
||||
// Now construct the final address. Note that the Disp, Scale,
|
||||
// and Index values may already be set here.
|
||||
@ -568,7 +569,7 @@ bool X86FastISel::X86FastEmitCompare(Value *Op0, Value *Op1, MVT VT) {
|
||||
// CMPri, otherwise use CMPrr.
|
||||
if (ConstantInt *Op1C = dyn_cast<ConstantInt>(Op1)) {
|
||||
if (unsigned CompareImmOpc = X86ChooseCmpImmediateOpcode(VT, Op1C)) {
|
||||
BuildMI(MBB, TII.get(CompareImmOpc)).addReg(Op0Reg)
|
||||
BuildMI(MBB, DL, TII.get(CompareImmOpc)).addReg(Op0Reg)
|
||||
.addImm(Op1C->getSExtValue());
|
||||
return true;
|
||||
}
|
||||
@ -579,7 +580,7 @@ bool X86FastISel::X86FastEmitCompare(Value *Op0, Value *Op1, MVT VT) {
|
||||
|
||||
unsigned Op1Reg = getRegForValue(Op1);
|
||||
if (Op1Reg == 0) return false;
|
||||
BuildMI(MBB, TII.get(CompareOpc)).addReg(Op0Reg).addReg(Op1Reg);
|
||||
BuildMI(MBB, DL, TII.get(CompareOpc)).addReg(Op0Reg).addReg(Op1Reg);
|
||||
|
||||
return true;
|
||||
}
|
||||
@ -601,9 +602,10 @@ bool X86FastISel::X86SelectCmp(Instruction *I) {
|
||||
|
||||
unsigned EReg = createResultReg(&X86::GR8RegClass);
|
||||
unsigned NPReg = createResultReg(&X86::GR8RegClass);
|
||||
BuildMI(MBB, TII.get(X86::SETEr), EReg);
|
||||
BuildMI(MBB, TII.get(X86::SETNPr), NPReg);
|
||||
BuildMI(MBB, TII.get(X86::AND8rr), ResultReg).addReg(NPReg).addReg(EReg);
|
||||
BuildMI(MBB, DL, TII.get(X86::SETEr), EReg);
|
||||
BuildMI(MBB, DL, TII.get(X86::SETNPr), NPReg);
|
||||
BuildMI(MBB, DL,
|
||||
TII.get(X86::AND8rr), ResultReg).addReg(NPReg).addReg(EReg);
|
||||
UpdateValueMap(I, ResultReg);
|
||||
return true;
|
||||
}
|
||||
@ -613,9 +615,9 @@ bool X86FastISel::X86SelectCmp(Instruction *I) {
|
||||
|
||||
unsigned NEReg = createResultReg(&X86::GR8RegClass);
|
||||
unsigned PReg = createResultReg(&X86::GR8RegClass);
|
||||
BuildMI(MBB, TII.get(X86::SETNEr), NEReg);
|
||||
BuildMI(MBB, TII.get(X86::SETPr), PReg);
|
||||
BuildMI(MBB, TII.get(X86::OR8rr), ResultReg).addReg(PReg).addReg(NEReg);
|
||||
BuildMI(MBB, DL, TII.get(X86::SETNEr), NEReg);
|
||||
BuildMI(MBB, DL, TII.get(X86::SETPr), PReg);
|
||||
BuildMI(MBB, DL, TII.get(X86::OR8rr), ResultReg).addReg(PReg).addReg(NEReg);
|
||||
UpdateValueMap(I, ResultReg);
|
||||
return true;
|
||||
}
|
||||
@ -654,7 +656,7 @@ bool X86FastISel::X86SelectCmp(Instruction *I) {
|
||||
if (!X86FastEmitCompare(Op0, Op1, VT))
|
||||
return false;
|
||||
|
||||
BuildMI(MBB, TII.get(SetCCOpc), ResultReg);
|
||||
BuildMI(MBB, DL, TII.get(SetCCOpc), ResultReg);
|
||||
UpdateValueMap(I, ResultReg);
|
||||
return true;
|
||||
}
|
||||
@ -737,12 +739,12 @@ bool X86FastISel::X86SelectBranch(Instruction *I) {
|
||||
if (!X86FastEmitCompare(Op0, Op1, VT))
|
||||
return false;
|
||||
|
||||
BuildMI(MBB, TII.get(BranchOpc)).addMBB(TrueMBB);
|
||||
BuildMI(MBB, DL, TII.get(BranchOpc)).addMBB(TrueMBB);
|
||||
|
||||
if (Predicate == CmpInst::FCMP_UNE) {
|
||||
// X86 requires a second branch to handle UNE (and OEQ,
|
||||
// which is mapped to UNE above).
|
||||
BuildMI(MBB, TII.get(X86::JP)).addMBB(TrueMBB);
|
||||
BuildMI(MBB, DL, TII.get(X86::JP)).addMBB(TrueMBB);
|
||||
}
|
||||
|
||||
FastEmitBranch(FalseMBB);
|
||||
@ -816,7 +818,7 @@ bool X86FastISel::X86SelectBranch(Instruction *I) {
|
||||
unsigned OpCode = SetMI->getOpcode();
|
||||
|
||||
if (OpCode == X86::SETOr || OpCode == X86::SETBr) {
|
||||
BuildMI(MBB, TII.get((OpCode == X86::SETOr) ?
|
||||
BuildMI(MBB, DL, TII.get((OpCode == X86::SETOr) ?
|
||||
X86::JO : X86::JB)).addMBB(TrueMBB);
|
||||
FastEmitBranch(FalseMBB);
|
||||
MBB->addSuccessor(TrueMBB);
|
||||
@ -833,8 +835,8 @@ bool X86FastISel::X86SelectBranch(Instruction *I) {
|
||||
unsigned OpReg = getRegForValue(BI->getCondition());
|
||||
if (OpReg == 0) return false;
|
||||
|
||||
BuildMI(MBB, TII.get(X86::TEST8rr)).addReg(OpReg).addReg(OpReg);
|
||||
BuildMI(MBB, TII.get(X86::JNE)).addMBB(TrueMBB);
|
||||
BuildMI(MBB, DL, TII.get(X86::TEST8rr)).addReg(OpReg).addReg(OpReg);
|
||||
BuildMI(MBB, DL, TII.get(X86::JNE)).addMBB(TrueMBB);
|
||||
FastEmitBranch(FalseMBB);
|
||||
MBB->addSuccessor(TrueMBB);
|
||||
return true;
|
||||
@ -893,7 +895,7 @@ bool X86FastISel::X86SelectShift(Instruction *I) {
|
||||
// Fold immediate in shl(x,3).
|
||||
if (ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1))) {
|
||||
unsigned ResultReg = createResultReg(RC);
|
||||
BuildMI(MBB, TII.get(OpImm),
|
||||
BuildMI(MBB, DL, TII.get(OpImm),
|
||||
ResultReg).addReg(Op0Reg).addImm(CI->getZExtValue() & 0xff);
|
||||
UpdateValueMap(I, ResultReg);
|
||||
return true;
|
||||
@ -907,11 +909,11 @@ bool X86FastISel::X86SelectShift(Instruction *I) {
|
||||
// of X86::CL, emit an EXTRACT_SUBREG to precisely describe what
|
||||
// we're doing here.
|
||||
if (CReg != X86::CL)
|
||||
BuildMI(MBB, TII.get(TargetInstrInfo::EXTRACT_SUBREG), X86::CL)
|
||||
BuildMI(MBB, DL, TII.get(TargetInstrInfo::EXTRACT_SUBREG), X86::CL)
|
||||
.addReg(CReg).addImm(X86::SUBREG_8BIT);
|
||||
|
||||
unsigned ResultReg = createResultReg(RC);
|
||||
BuildMI(MBB, TII.get(OpReg), ResultReg).addReg(Op0Reg);
|
||||
BuildMI(MBB, DL, TII.get(OpReg), ResultReg).addReg(Op0Reg);
|
||||
UpdateValueMap(I, ResultReg);
|
||||
return true;
|
||||
}
|
||||
@ -943,9 +945,9 @@ bool X86FastISel::X86SelectSelect(Instruction *I) {
|
||||
unsigned Op2Reg = getRegForValue(I->getOperand(2));
|
||||
if (Op2Reg == 0) return false;
|
||||
|
||||
BuildMI(MBB, TII.get(X86::TEST8rr)).addReg(Op0Reg).addReg(Op0Reg);
|
||||
BuildMI(MBB, DL, TII.get(X86::TEST8rr)).addReg(Op0Reg).addReg(Op0Reg);
|
||||
unsigned ResultReg = createResultReg(RC);
|
||||
BuildMI(MBB, TII.get(Opc), ResultReg).addReg(Op1Reg).addReg(Op2Reg);
|
||||
BuildMI(MBB, DL, TII.get(Opc), ResultReg).addReg(Op1Reg).addReg(Op2Reg);
|
||||
UpdateValueMap(I, ResultReg);
|
||||
return true;
|
||||
}
|
||||
@ -958,7 +960,7 @@ bool X86FastISel::X86SelectFPExt(Instruction *I) {
|
||||
unsigned OpReg = getRegForValue(V);
|
||||
if (OpReg == 0) return false;
|
||||
unsigned ResultReg = createResultReg(X86::FR64RegisterClass);
|
||||
BuildMI(MBB, TII.get(X86::CVTSS2SDrr), ResultReg).addReg(OpReg);
|
||||
BuildMI(MBB, DL, TII.get(X86::CVTSS2SDrr), ResultReg).addReg(OpReg);
|
||||
UpdateValueMap(I, ResultReg);
|
||||
return true;
|
||||
}
|
||||
@ -975,7 +977,7 @@ bool X86FastISel::X86SelectFPTrunc(Instruction *I) {
|
||||
unsigned OpReg = getRegForValue(V);
|
||||
if (OpReg == 0) return false;
|
||||
unsigned ResultReg = createResultReg(X86::FR32RegisterClass);
|
||||
BuildMI(MBB, TII.get(X86::CVTSD2SSrr), ResultReg).addReg(OpReg);
|
||||
BuildMI(MBB, DL, TII.get(X86::CVTSD2SSrr), ResultReg).addReg(OpReg);
|
||||
UpdateValueMap(I, ResultReg);
|
||||
return true;
|
||||
}
|
||||
@ -1008,7 +1010,7 @@ bool X86FastISel::X86SelectTrunc(Instruction *I) {
|
||||
const TargetRegisterClass *CopyRC = (SrcVT == MVT::i16)
|
||||
? X86::GR16_RegisterClass : X86::GR32_RegisterClass;
|
||||
unsigned CopyReg = createResultReg(CopyRC);
|
||||
BuildMI(MBB, TII.get(CopyOpc), CopyReg).addReg(InputReg);
|
||||
BuildMI(MBB, DL, TII.get(CopyOpc), CopyReg).addReg(InputReg);
|
||||
|
||||
// Then issue an extract_subreg.
|
||||
unsigned ResultReg = FastEmitInst_extractsubreg(DstVT.getSimpleVT(),
|
||||
@ -1083,11 +1085,11 @@ bool X86FastISel::X86VisitIntrinsicCall(CallInst &I, unsigned Intrinsic) {
|
||||
return false;
|
||||
|
||||
unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT));
|
||||
BuildMI(MBB, TII.get(OpC), ResultReg).addReg(Reg1).addReg(Reg2);
|
||||
BuildMI(MBB, DL, TII.get(OpC), ResultReg).addReg(Reg1).addReg(Reg2);
|
||||
UpdateValueMap(&I, ResultReg);
|
||||
|
||||
ResultReg = createResultReg(TLI.getRegClassFor(MVT::i8));
|
||||
BuildMI(MBB, TII.get((Intrinsic == Intrinsic::sadd_with_overflow) ?
|
||||
BuildMI(MBB, DL, TII.get((Intrinsic == Intrinsic::sadd_with_overflow) ?
|
||||
X86::SETOr : X86::SETBr), ResultReg);
|
||||
return true;
|
||||
}
|
||||
@ -1204,7 +1206,7 @@ bool X86FastISel::X86SelectCall(Instruction *I) {
|
||||
|
||||
// Issue CALLSEQ_START
|
||||
unsigned AdjStackDown = TM.getRegisterInfo()->getCallFrameSetupOpcode();
|
||||
BuildMI(MBB, TII.get(AdjStackDown)).addImm(NumBytes);
|
||||
BuildMI(MBB, DL, TII.get(AdjStackDown)).addImm(NumBytes);
|
||||
|
||||
// Process argument: walk the register/memloc assignments, inserting
|
||||
// copies / loads.
|
||||
@ -1291,8 +1293,8 @@ bool X86FastISel::X86SelectCall(Instruction *I) {
|
||||
? (Subtarget->is64Bit() ? X86::CALL64r : X86::CALL32r)
|
||||
: (Subtarget->is64Bit() ? X86::CALL64pcrel32 : X86::CALLpcrel32);
|
||||
MachineInstrBuilder MIB = CalleeOp
|
||||
? BuildMI(MBB, TII.get(CallOpc)).addReg(CalleeOp)
|
||||
: BuildMI(MBB, TII.get(CallOpc)).addGlobalAddress(GV);
|
||||
? BuildMI(MBB, DL, TII.get(CallOpc)).addReg(CalleeOp)
|
||||
: BuildMI(MBB, DL, TII.get(CallOpc)).addGlobalAddress(GV);
|
||||
|
||||
// Add an implicit use GOT pointer in EBX.
|
||||
if (!Subtarget->is64Bit() &&
|
||||
@ -1306,7 +1308,7 @@ bool X86FastISel::X86SelectCall(Instruction *I) {
|
||||
|
||||
// Issue CALLSEQ_END
|
||||
unsigned AdjStackUp = TM.getRegisterInfo()->getCallFrameDestroyOpcode();
|
||||
BuildMI(MBB, TII.get(AdjStackUp)).addImm(NumBytes).addImm(0);
|
||||
BuildMI(MBB, DL, TII.get(AdjStackUp)).addImm(NumBytes).addImm(0);
|
||||
|
||||
// Now handle call return value (if any).
|
||||
if (RetVT.getSimpleVT() != MVT::isVoid) {
|
||||
@ -1344,18 +1346,19 @@ bool X86FastISel::X86SelectCall(Instruction *I) {
|
||||
unsigned Opc = ResVT == MVT::f32 ? X86::ST_Fp80m32 : X86::ST_Fp80m64;
|
||||
unsigned MemSize = ResVT.getSizeInBits()/8;
|
||||
int FI = MFI.CreateStackObject(MemSize, MemSize);
|
||||
addFrameReference(BuildMI(MBB, TII.get(Opc)), FI).addReg(ResultReg);
|
||||
addFrameReference(BuildMI(MBB, DL, TII.get(Opc)), FI).addReg(ResultReg);
|
||||
DstRC = ResVT == MVT::f32
|
||||
? X86::FR32RegisterClass : X86::FR64RegisterClass;
|
||||
Opc = ResVT == MVT::f32 ? X86::MOVSSrm : X86::MOVSDrm;
|
||||
ResultReg = createResultReg(DstRC);
|
||||
addFrameReference(BuildMI(MBB, TII.get(Opc), ResultReg), FI);
|
||||
addFrameReference(BuildMI(MBB, DL, TII.get(Opc), ResultReg), FI);
|
||||
}
|
||||
|
||||
if (AndToI1) {
|
||||
// Mask out all but lowest bit for some call which produces an i1.
|
||||
unsigned AndResult = createResultReg(X86::GR8RegisterClass);
|
||||
BuildMI(MBB, TII.get(X86::AND8ri), AndResult).addReg(ResultReg).addImm(1);
|
||||
BuildMI(MBB, DL,
|
||||
TII.get(X86::AND8ri), AndResult).addReg(ResultReg).addImm(1);
|
||||
ResultReg = AndResult;
|
||||
}
|
||||
|
||||
@ -1461,7 +1464,7 @@ unsigned X86FastISel::TargetMaterializeConstant(Constant *C) {
|
||||
else
|
||||
Opc = X86::LEA64r;
|
||||
unsigned ResultReg = createResultReg(RC);
|
||||
addFullAddress(BuildMI(MBB, TII.get(Opc), ResultReg), AM);
|
||||
addFullAddress(BuildMI(MBB, DL, TII.get(Opc), ResultReg), AM);
|
||||
return ResultReg;
|
||||
}
|
||||
return 0;
|
||||
@ -1484,7 +1487,7 @@ unsigned X86FastISel::TargetMaterializeConstant(Constant *C) {
|
||||
// Create the load from the constant pool.
|
||||
unsigned MCPOffset = MCP.getConstantPoolIndex(C, Align);
|
||||
unsigned ResultReg = createResultReg(RC);
|
||||
addConstantPoolReference(BuildMI(MBB, TII.get(Opc), ResultReg), MCPOffset,
|
||||
addConstantPoolReference(BuildMI(MBB, DL, TII.get(Opc), ResultReg), MCPOffset,
|
||||
PICBase);
|
||||
|
||||
return ResultReg;
|
||||
@ -1507,7 +1510,7 @@ unsigned X86FastISel::TargetMaterializeAlloca(AllocaInst *C) {
|
||||
unsigned Opc = Subtarget->is64Bit() ? X86::LEA64r : X86::LEA32r;
|
||||
TargetRegisterClass* RC = TLI.getRegClassFor(TLI.getPointerTy());
|
||||
unsigned ResultReg = createResultReg(RC);
|
||||
addFullAddress(BuildMI(MBB, TII.get(Opc), ResultReg), AM);
|
||||
addFullAddress(BuildMI(MBB, DL, TII.get(Opc), ResultReg), AM);
|
||||
return ResultReg;
|
||||
}
|
||||
|
||||
|
@ -115,6 +115,8 @@ namespace {
|
||||
|
||||
bool isAtTop(unsigned RegNo) const { return getSlot(RegNo) == StackTop-1; }
|
||||
void moveToTop(unsigned RegNo, MachineBasicBlock::iterator I) {
|
||||
MachineInstr *MI = I;
|
||||
DebugLoc dl = MI->getDebugLoc();
|
||||
if (isAtTop(RegNo)) return;
|
||||
|
||||
unsigned STReg = getSTReg(RegNo);
|
||||
@ -128,15 +130,16 @@ namespace {
|
||||
std::swap(Stack[RegMap[RegOnTop]], Stack[StackTop-1]);
|
||||
|
||||
// Emit an fxch to update the runtime processors version of the state.
|
||||
BuildMI(*MBB, I, TII->get(X86::XCH_F)).addReg(STReg);
|
||||
BuildMI(*MBB, I, dl, TII->get(X86::XCH_F)).addReg(STReg);
|
||||
NumFXCH++;
|
||||
}
|
||||
|
||||
void duplicateToTop(unsigned RegNo, unsigned AsReg, MachineInstr *I) {
|
||||
DebugLoc dl = I->getDebugLoc();
|
||||
unsigned STReg = getSTReg(RegNo);
|
||||
pushReg(AsReg); // New register on top of stack
|
||||
|
||||
BuildMI(*MBB, I, TII->get(X86::LD_Frr)).addReg(STReg);
|
||||
BuildMI(*MBB, I, dl, TII->get(X86::LD_Frr)).addReg(STReg);
|
||||
}
|
||||
|
||||
// popStackAfter - Pop the current value off of the top of the FP stack
|
||||
@ -549,6 +552,8 @@ static const TableEntry PopTable[] = {
|
||||
/// instruction if it was modified in place.
|
||||
///
|
||||
void FPS::popStackAfter(MachineBasicBlock::iterator &I) {
|
||||
MachineInstr* MI = I;
|
||||
DebugLoc dl = MI->getDebugLoc();
|
||||
ASSERT_SORTED(PopTable);
|
||||
assert(StackTop > 0 && "Cannot pop empty stack!");
|
||||
RegMap[Stack[--StackTop]] = ~0; // Update state
|
||||
@ -560,7 +565,7 @@ void FPS::popStackAfter(MachineBasicBlock::iterator &I) {
|
||||
if (Opcode == X86::UCOM_FPPr)
|
||||
I->RemoveOperand(0);
|
||||
} else { // Insert an explicit pop
|
||||
I = BuildMI(*MBB, ++I, TII->get(X86::ST_FPrr)).addReg(X86::ST0);
|
||||
I = BuildMI(*MBB, ++I, dl, TII->get(X86::ST_FPrr)).addReg(X86::ST0);
|
||||
}
|
||||
}
|
||||
|
||||
@ -584,7 +589,9 @@ void FPS::freeStackSlotAfter(MachineBasicBlock::iterator &I, unsigned FPRegNo) {
|
||||
RegMap[TopReg] = OldSlot;
|
||||
RegMap[FPRegNo] = ~0;
|
||||
Stack[--StackTop] = ~0;
|
||||
I = BuildMI(*MBB, ++I, TII->get(X86::ST_FPrr)).addReg(STReg);
|
||||
MachineInstr *MI = I;
|
||||
DebugLoc dl = MI->getDebugLoc();
|
||||
I = BuildMI(*MBB, ++I, dl, TII->get(X86::ST_FPrr)).addReg(STReg);
|
||||
}
|
||||
|
||||
|
||||
@ -788,6 +795,7 @@ void FPS::handleTwoArgFP(MachineBasicBlock::iterator &I) {
|
||||
unsigned Op1 = getFPReg(MI->getOperand(NumOperands-1));
|
||||
bool KillsOp0 = MI->killsRegister(X86::FP0+Op0);
|
||||
bool KillsOp1 = MI->killsRegister(X86::FP0+Op1);
|
||||
DebugLoc dl = MI->getDebugLoc();
|
||||
|
||||
unsigned TOS = getStackEntry(0);
|
||||
|
||||
@ -853,7 +861,7 @@ void FPS::handleTwoArgFP(MachineBasicBlock::iterator &I) {
|
||||
|
||||
// Replace the old instruction with a new instruction
|
||||
MBB->remove(I++);
|
||||
I = BuildMI(*MBB, I, TII->get(Opcode)).addReg(getSTReg(NotTOS));
|
||||
I = BuildMI(*MBB, I, dl, TII->get(Opcode)).addReg(getSTReg(NotTOS));
|
||||
|
||||
// If both operands are killed, pop one off of the stack in addition to
|
||||
// overwriting the other one.
|
||||
@ -935,6 +943,7 @@ void FPS::handleCondMovFP(MachineBasicBlock::iterator &I) {
|
||||
///
|
||||
void FPS::handleSpecialFP(MachineBasicBlock::iterator &I) {
|
||||
MachineInstr *MI = I;
|
||||
DebugLoc dl = MI->getDebugLoc();
|
||||
switch (MI->getOpcode()) {
|
||||
default: assert(0 && "Unknown SpecialFP instruction!");
|
||||
case X86::FpGET_ST0_32:// Appears immediately after a call returning FP type!
|
||||
@ -991,7 +1000,7 @@ void FPS::handleSpecialFP(MachineBasicBlock::iterator &I) {
|
||||
case X86::FpSET_ST1_80:
|
||||
// StackTop can be 1 if a FpSET_ST0_* was before this. Exchange them.
|
||||
if (StackTop == 1) {
|
||||
BuildMI(*MBB, I, TII->get(X86::XCH_F)).addReg(X86::ST1);
|
||||
BuildMI(*MBB, I, dl, TII->get(X86::XCH_F)).addReg(X86::ST1);
|
||||
NumFXCH++;
|
||||
StackTop = 0;
|
||||
break;
|
||||
|
@ -128,7 +128,7 @@ bool FPRegKiller::runOnMachineFunction(MachineFunction &MF) {
|
||||
}
|
||||
// Finally, if we found any FP code, emit the FP_REG_KILL instruction.
|
||||
if (ContainsFPCode) {
|
||||
BuildMI(*MBB, MBBI->getFirstTerminator(),
|
||||
BuildMI(*MBB, MBBI->getFirstTerminator(), DebugLoc::getUnknownLoc(),
|
||||
MF.getTarget().getInstrInfo()->get(X86::FP_REG_KILL));
|
||||
++NumFPKill;
|
||||
Changed = true;
|
||||
|
@ -720,7 +720,8 @@ void X86DAGToDAGISel::EmitSpecialCodeForMain(MachineBasicBlock *BB,
|
||||
MachineFrameInfo *MFI) {
|
||||
const TargetInstrInfo *TII = TM.getInstrInfo();
|
||||
if (Subtarget->isTargetCygMing())
|
||||
BuildMI(BB, TII->get(X86::CALLpcrel32)).addExternalSymbol("__main");
|
||||
BuildMI(BB, DebugLoc::getUnknownLoc(),
|
||||
TII->get(X86::CALLpcrel32)).addExternalSymbol("__main");
|
||||
}
|
||||
|
||||
void X86DAGToDAGISel::EmitFunctionEntryCode(Function &Fn, MachineFunction &MF) {
|
||||
|
@ -1630,6 +1630,8 @@ unsigned
|
||||
X86InstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
|
||||
MachineBasicBlock *FBB,
|
||||
const SmallVectorImpl<MachineOperand> &Cond) const {
|
||||
// FIXME this should probably have a DebugLoc operand
|
||||
DebugLoc dl = DebugLoc::getUnknownLoc();
|
||||
// Shouldn't be a fall through.
|
||||
assert(TBB && "InsertBranch must not be told to insert a fallthrough");
|
||||
assert((Cond.size() == 1 || Cond.size() == 0) &&
|
||||
@ -1638,7 +1640,7 @@ X86InstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
|
||||
if (Cond.empty()) {
|
||||
// Unconditional branch?
|
||||
assert(!FBB && "Unconditional branch with multiple successors!");
|
||||
BuildMI(&MBB, get(X86::JMP)).addMBB(TBB);
|
||||
BuildMI(&MBB, dl, get(X86::JMP)).addMBB(TBB);
|
||||
return 1;
|
||||
}
|
||||
|
||||
@ -1648,27 +1650,27 @@ X86InstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
|
||||
switch (CC) {
|
||||
case X86::COND_NP_OR_E:
|
||||
// Synthesize NP_OR_E with two branches.
|
||||
BuildMI(&MBB, get(X86::JNP)).addMBB(TBB);
|
||||
BuildMI(&MBB, dl, get(X86::JNP)).addMBB(TBB);
|
||||
++Count;
|
||||
BuildMI(&MBB, get(X86::JE)).addMBB(TBB);
|
||||
BuildMI(&MBB, dl, get(X86::JE)).addMBB(TBB);
|
||||
++Count;
|
||||
break;
|
||||
case X86::COND_NE_OR_P:
|
||||
// Synthesize NE_OR_P with two branches.
|
||||
BuildMI(&MBB, get(X86::JNE)).addMBB(TBB);
|
||||
BuildMI(&MBB, dl, get(X86::JNE)).addMBB(TBB);
|
||||
++Count;
|
||||
BuildMI(&MBB, get(X86::JP)).addMBB(TBB);
|
||||
BuildMI(&MBB, dl, get(X86::JP)).addMBB(TBB);
|
||||
++Count;
|
||||
break;
|
||||
default: {
|
||||
unsigned Opc = GetCondBranchFromCond(CC);
|
||||
BuildMI(&MBB, get(Opc)).addMBB(TBB);
|
||||
BuildMI(&MBB, dl, get(Opc)).addMBB(TBB);
|
||||
++Count;
|
||||
}
|
||||
}
|
||||
if (FBB) {
|
||||
// Two-way Conditional branch. Insert the second branch.
|
||||
BuildMI(&MBB, get(X86::JMP)).addMBB(FBB);
|
||||
BuildMI(&MBB, dl, get(X86::JMP)).addMBB(FBB);
|
||||
++Count;
|
||||
}
|
||||
return Count;
|
||||
@ -1944,7 +1946,7 @@ bool X86InstrInfo::spillCalleeSavedRegisters(MachineBasicBlock &MBB,
|
||||
unsigned Reg = CSI[i-1].getReg();
|
||||
// Add the callee-saved register as live-in. It's killed at the spill.
|
||||
MBB.addLiveIn(Reg);
|
||||
BuildMI(MBB, MI, get(Opc))
|
||||
BuildMI(MBB, MI, DL, get(Opc))
|
||||
.addReg(Reg, /*isDef=*/false, /*isImp=*/false, /*isKill=*/true);
|
||||
}
|
||||
return true;
|
||||
|
@ -542,11 +542,14 @@ void emitSPUpdate(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI,
|
||||
(Is64Bit ? X86::ADD64ri8 : X86::ADD32ri8) :
|
||||
(Is64Bit ? X86::ADD64ri32 : X86::ADD32ri));
|
||||
uint64_t Chunk = (1LL << 31) - 1;
|
||||
// We could pass in a DebugLoc, but this is only called from prolog/epilog.
|
||||
DebugLoc DL = DebugLoc::getUnknownLoc();
|
||||
|
||||
while (Offset) {
|
||||
uint64_t ThisVal = (Offset > Chunk) ? Chunk : Offset;
|
||||
MachineInstr *MI =
|
||||
BuildMI(MBB, MBBI, TII.get(Opc), StackPtr).addReg(StackPtr).addImm(ThisVal);
|
||||
BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr)
|
||||
.addReg(StackPtr).addImm(ThisVal);
|
||||
// The EFLAGS implicit def is dead.
|
||||
MI->getOperand(3).setIsDead();
|
||||
Offset -= ThisVal;
|
||||
|
Loading…
x
Reference in New Issue
Block a user