[BOLT] Introduce MCPlus layer

Summary:
Refactor architecture-specific code out of llvm into llvm-bolt.

Introduce MCPlusBuilder, a class that is taking over MCInstrAnalysis
responsibilities, i.e. creating, analyzing, and modifying instructions.
To access the builder use BC->MIB, i.e. substitute MIA with MIB.
MIB is an acronym for MCInstBuilder, that's what MCPlusBuilder used
to be. The name stuck, and I find it better than MPB.

Instructions are still MCInst, and a bunch of BOLT-specific code still
lives in LLVM, but the staff under Target/* is significantly reduced.

(cherry picked from FBD7300101)
This commit is contained in:
Maksim Panchenko 2018-03-09 09:45:13 -08:00
parent 8c16594f2e
commit 48ae32a33b
43 changed files with 6254 additions and 574 deletions

View File

@ -66,7 +66,7 @@ bool BinaryBasicBlock::validateSuccessorInvariants() {
// Note: for now we assume that successors do not reference labels from
// any overlapping jump tables. We only look at the entries for the jump
// table that is referenced at the last instruction.
const auto Range = JT->getEntriesForAddress(BC.MIA->getJumpTable(*Inst));
const auto Range = JT->getEntriesForAddress(BC.MIB->getJumpTable(*Inst));
const std::vector<const MCSymbol *> Entries(&JT->Entries[Range.first],
&JT->Entries[Range.second]);
std::set<const MCSymbol *> UniqueSyms(Entries.begin(), Entries.end());
@ -108,7 +108,7 @@ bool BinaryBasicBlock::validateSuccessorInvariants() {
break;
case 1: {
const bool HasCondBlock = CondBranch &&
Function->getBasicBlockForLabel(BC.MIA->getTargetSymbol(*CondBranch));
Function->getBasicBlockForLabel(BC.MIB->getTargetSymbol(*CondBranch));
Valid = !CondBranch || !HasCondBlock;
break;
}
@ -128,7 +128,7 @@ bool BinaryBasicBlock::validateSuccessorInvariants() {
<< getName() << "\n";
if (JT) {
errs() << "Jump Table instruction addr = 0x"
<< Twine::utohexstr(BC.MIA->getJumpTable(*Inst)) << "\n";
<< Twine::utohexstr(BC.MIB->getJumpTable(*Inst)) << "\n";
JT->print(errs());
}
getFunction()->dump();
@ -188,7 +188,7 @@ int32_t BinaryBasicBlock::getCFIStateAtInstr(const MCInst *Instr) const {
InstrSeen = (&*RII == Instr);
continue;
}
if (Function->getBinaryContext().MIA->isCFI(*RII)) {
if (Function->getBinaryContext().MIB->isCFI(*RII)) {
LastCFI = &*RII;
break;
}
@ -322,8 +322,8 @@ bool BinaryBasicBlock::analyzeBranch(const MCSymbol *&TBB,
const MCSymbol *&FBB,
MCInst *&CondBranch,
MCInst *&UncondBranch) {
auto &MIA = Function->getBinaryContext().MIA;
return MIA->analyzeBranch(Instructions.begin(),
auto &MIB = Function->getBinaryContext().MIB;
return MIB->analyzeBranch(Instructions.begin(),
Instructions.end(),
TBB,
FBB,
@ -343,7 +343,7 @@ MCInst *BinaryBasicBlock::getTerminatorBefore(MCInst *Pos) {
++Itr;
continue;
}
if (BC.MIA->isTerminator(*Itr))
if (BC.MIB->isTerminator(*Itr))
FirstTerminator = &*Itr;
++Itr;
}
@ -356,7 +356,7 @@ bool BinaryBasicBlock::hasTerminatorAfter(MCInst *Pos) {
while (Itr != rend()) {
if (&*Itr == Pos)
return false;
if (BC.MIA->isTerminator(*Itr))
if (BC.MIB->isTerminator(*Itr))
return true;
++Itr;
}
@ -376,14 +376,14 @@ void BinaryBasicBlock::addBranchInstruction(const BinaryBasicBlock *Successor) {
assert(isSuccessor(Successor));
auto &BC = Function->getBinaryContext();
MCInst NewInst;
BC.MIA->createUncondBranch(NewInst, Successor->getLabel(), BC.Ctx.get());
BC.MIB->createUncondBranch(NewInst, Successor->getLabel(), BC.Ctx.get());
Instructions.emplace_back(std::move(NewInst));
}
void BinaryBasicBlock::addTailCallInstruction(const MCSymbol *Target) {
auto &BC = Function->getBinaryContext();
MCInst NewInst;
BC.MIA->createTailCall(NewInst, Target, BC.Ctx.get());
BC.MIB->createTailCall(NewInst, Target, BC.Ctx.get());
Instructions.emplace_back(std::move(NewInst));
}
@ -391,7 +391,7 @@ uint32_t BinaryBasicBlock::getNumCalls() const {
uint32_t N{0};
auto &BC = Function->getBinaryContext();
for (auto &Instr : Instructions) {
if (BC.MIA->isCall(Instr))
if (BC.MIB->isCall(Instr))
++N;
}
return N;

View File

@ -658,12 +658,12 @@ void BinaryContext::printInstruction(raw_ostream &OS,
bool PrintMCInst,
bool PrintMemData,
bool PrintRelocations) const {
if (MIA->isEHLabel(Instruction)) {
OS << " EH_LABEL: " << *MIA->getTargetSymbol(Instruction) << '\n';
if (MIB->isEHLabel(Instruction)) {
OS << " EH_LABEL: " << *MIB->getTargetSymbol(Instruction) << '\n';
return;
}
OS << format(" %08" PRIx64 ": ", Offset);
if (MIA->isCFI(Instruction)) {
if (MIB->isCFI(Instruction)) {
uint32_t Offset = Instruction.getOperand(0).getImm();
OS << "\t!CFI\t$" << Offset << "\t; ";
if (Function)
@ -672,31 +672,31 @@ void BinaryContext::printInstruction(raw_ostream &OS,
return;
}
InstPrinter->printInst(&Instruction, OS, "", *STI);
if (MIA->isCall(Instruction)) {
if (MIA->isTailCall(Instruction))
if (MIB->isCall(Instruction)) {
if (MIB->isTailCall(Instruction))
OS << " # TAILCALL ";
if (MIA->isInvoke(Instruction)) {
if (MIB->isInvoke(Instruction)) {
const MCSymbol *LP;
uint64_t Action;
std::tie(LP, Action) = MIA->getEHInfo(Instruction);
std::tie(LP, Action) = MIB->getEHInfo(Instruction);
OS << " # handler: ";
if (LP)
OS << *LP;
else
OS << '0';
OS << "; action: " << Action;
auto GnuArgsSize = MIA->getGnuArgsSize(Instruction);
auto GnuArgsSize = MIB->getGnuArgsSize(Instruction);
if (GnuArgsSize >= 0)
OS << "; GNU_args_size = " << GnuArgsSize;
}
}
if (MIA->isIndirectBranch(Instruction)) {
if (auto JTAddress = MIA->getJumpTable(Instruction)) {
if (MIB->isIndirectBranch(Instruction)) {
if (auto JTAddress = MIB->getJumpTable(Instruction)) {
OS << " # JUMPTABLE @0x" << Twine::utohexstr(JTAddress);
}
}
MIA->forEachAnnotation(
MIB->forEachAnnotation(
Instruction,
[&OS](const MCAnnotation *Annotation) {
OS << " # " << Annotation->getName() << ": ";
@ -726,7 +726,7 @@ void BinaryContext::printInstruction(raw_ostream &OS,
if ((opts::PrintMemData || PrintMemData) && Function) {
const auto *MD = Function->getMemData();
const auto MemDataOffset =
MIA->tryGetAnnotationAs<uint64_t>(Instruction, "MemDataOffset");
MIB->tryGetAnnotationAs<uint64_t>(Instruction, "MemDataOffset");
if (MD && MemDataOffset) {
bool DidPrint = false;
for (auto &MI : MD->getMemInfoRange(MemDataOffset.get())) {

View File

@ -17,6 +17,7 @@
#include "BinaryData.h"
#include "BinarySection.h"
#include "DebugData.h"
#include "MCPlusBuilder.h"
#include "llvm/ADT/iterator.h"
#include "llvm/ADT/Triple.h"
#include "llvm/DebugInfo/DWARF/DWARFCompileUnit.h"
@ -222,6 +223,8 @@ public:
std::unique_ptr<const MCInstrAnalysis> MIA;
std::unique_ptr<const MCPlusBuilder> MIB;
std::unique_ptr<const MCRegisterInfo> MRI;
std::unique_ptr<MCDisassembler> DisAsm;
@ -270,6 +273,7 @@ public:
std::unique_ptr<const MCSubtargetInfo> STI,
std::unique_ptr<MCInstPrinter> InstPrinter,
std::unique_ptr<const MCInstrAnalysis> MIA,
std::unique_ptr<const MCPlusBuilder> MIB,
std::unique_ptr<const MCRegisterInfo> MRI,
std::unique_ptr<MCDisassembler> DisAsm,
DataReader &DR) :
@ -285,6 +289,7 @@ public:
STI(std::move(STI)),
InstPrinter(std::move(InstPrinter)),
MIA(std::move(MIA)),
MIB(std::move(MIB)),
MRI(std::move(MRI)),
DisAsm(std::move(DisAsm)),
DR(DR) {
@ -612,7 +617,7 @@ public:
SmallString<256> Code;
SmallVector<MCFixup, 4> Fixups;
raw_svector_ostream VecOS(Code);
if (MIA->isCFI(*Beg) || MIA->isEHLabel(*Beg)) {
if (MIB->isCFI(*Beg) || MIB->isEHLabel(*Beg)) {
++Beg;
continue;
}
@ -637,8 +642,8 @@ public:
/// Return true if instruction \p Inst requires an offset for further
/// processing (e.g. assigning a profile).
bool keepOffsetForInstruction(const MCInst &Inst) const {
if (MIA->isCall(Inst) || MIA->isBranch(Inst) || MIA->isReturn(Inst) ||
MIA->isPrefix(Inst) || MIA->isIndirectBranch(Inst)) {
if (MIB->isCall(Inst) || MIB->isBranch(Inst) || MIB->isReturn(Inst) ||
MIB->isPrefix(Inst) || MIB->isIndirectBranch(Inst)) {
return true;
}
return false;

View File

@ -13,6 +13,7 @@
#include "BinaryBasicBlock.h"
#include "BinaryFunction.h"
#include "DataReader.h"
#include "MCPlusBuilder.h"
#include "llvm/ADT/edit_distance.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/DebugInfo/DWARF/DWARFContext.h"
@ -668,7 +669,7 @@ IndirectBranchType BinaryFunction::processIndirectBranch(MCInst &Instruction,
}
}
auto Type = BC.MIA->analyzeIndirectBranch(Instruction,
auto Type = BC.MIB->analyzeIndirectBranch(Instruction,
Begin,
End,
PtrSize,
@ -686,7 +687,7 @@ IndirectBranchType BinaryFunction::processIndirectBranch(MCInst &Instruction,
IndexRegNum = 0;
if (BC.TheTriple->getArch() == llvm::Triple::aarch64) {
const auto *Sym = BC.MIA->getTargetSymbol(*PCRelBaseInstr, 1);
const auto *Sym = BC.MIB->getTargetSymbol(*PCRelBaseInstr, 1);
assert (Sym && "Symbol extraction failed");
if (auto *BD = BC.getBinaryDataByName(Sym->getName())) {
PCRelAddr = BD->getAddress();
@ -710,7 +711,7 @@ IndirectBranchType BinaryFunction::processIndirectBranch(MCInst &Instruction,
// function (for example, if the indirect jump lives in the last basic
// block of the function, it will create a reference to the next function).
// This replaces a symbol reference with an immediate.
BC.MIA->replaceMemOperandDisp(*PCRelBaseInstr,
BC.MIB->replaceMemOperandDisp(*PCRelBaseInstr,
MCOperand::createImm(PCRelAddr - InstrAddr));
// FIXME: Disable full jump table processing for AArch64 until we have a
// proper way of determining the jump table limits.
@ -722,7 +723,7 @@ IndirectBranchType BinaryFunction::processIndirectBranch(MCInst &Instruction,
if (DispExpr) {
const MCSymbol *TargetSym;
uint64_t TargetOffset;
std::tie(TargetSym, TargetOffset) = BC.MIA->getTargetSymbolInfo(DispExpr);
std::tie(TargetSym, TargetOffset) = BC.MIB->getTargetSymbolInfo(DispExpr);
auto *BD = BC.getBinaryDataByName(TargetSym->getName());
assert(BD && "global symbol needs a value");
ArrayStart = BD->getAddress() + TargetOffset;
@ -773,9 +774,9 @@ IndirectBranchType BinaryFunction::processIndirectBranch(MCInst &Instruction,
LI = Result.first;
}
BC.MIA->replaceMemOperandDisp(const_cast<MCInst &>(*MemLocInstr),
BC.MIB->replaceMemOperandDisp(const_cast<MCInst &>(*MemLocInstr),
LI->second, BC.Ctx.get());
BC.MIA->setJumpTable(BC.Ctx.get(), Instruction, ArrayStart, IndexRegNum);
BC.MIB->setJumpTable(BC.Ctx.get(), Instruction, ArrayStart, IndexRegNum);
JTSites.emplace_back(Offset, ArrayStart);
@ -870,9 +871,9 @@ IndirectBranchType BinaryFunction::processIndirectBranch(MCInst &Instruction,
<< " in function " << *this << " with "
<< JTOffsetCandidates.size() << " entries.\n");
JumpTables.emplace(ArrayStart, JT.release());
BC.MIA->replaceMemOperandDisp(const_cast<MCInst &>(*MemLocInstr),
BC.MIB->replaceMemOperandDisp(const_cast<MCInst &>(*MemLocInstr),
JTStartLabel, BC.Ctx.get());
BC.MIA->setJumpTable(BC.Ctx.get(), Instruction, ArrayStart, IndexRegNum);
BC.MIB->setJumpTable(BC.Ctx.get(), Instruction, ArrayStart, IndexRegNum);
JTSites.emplace_back(Offset, ArrayStart);
@ -919,7 +920,7 @@ void BinaryFunction::disassemble(ArrayRef<uint8_t> FunctionData) {
"function size does not match raw data size");
auto &Ctx = BC.Ctx;
auto &MIA = BC.MIA;
auto &MIB = BC.MIB;
DWARFUnitLineTable ULT = getDWARFUnitLineTable();
@ -935,7 +936,7 @@ void BinaryFunction::disassemble(ArrayRef<uint8_t> FunctionData) {
uint64_t TargetAddress{0};
uint64_t TargetOffset{0};
MCSymbol *TargetSymbol{nullptr};
if (!MIA->evaluateMemOperandTarget(Instruction, TargetAddress, Address,
if (!MIB->evaluateMemOperandTarget(Instruction, TargetAddress, Address,
Size)) {
errs() << "BOLT-ERROR: PC-relative operand can't be evaluated:\n";
BC.InstPrinter->printInst(&Instruction, errs(), "", *BC.STI);
@ -985,7 +986,7 @@ void BinaryFunction::disassemble(ArrayRef<uint8_t> FunctionData) {
// without its supporting relocation.
if (!TargetSymbol && Section && Section->isText() &&
(BC.TheTriple->getArch() != llvm::Triple::aarch64 ||
!BC.MIA->isADRP(Instruction))) {
!BC.MIB->isADRP(Instruction))) {
if (containsAddress(TargetAddress, /*UseMaxSize=*/
BC.TheTriple->getArch() == llvm::Triple::aarch64)) {
if (TargetAddress != getAddress()) {
@ -1022,8 +1023,8 @@ void BinaryFunction::disassemble(ArrayRef<uint8_t> FunctionData) {
auto *Offset = MCConstantExpr::create(TargetOffset, *BC.Ctx);
Expr = MCBinaryExpr::createAdd(Expr, Offset, *BC.Ctx);
}
MIA->replaceMemOperandDisp(
Instruction, MCOperand::createExpr(BC.MIA->getTargetExprFor(
MIB->replaceMemOperandDisp(
Instruction, MCOperand::createExpr(BC.MIB->getTargetExprFor(
Instruction,
Expr,
*BC.Ctx, 0)));
@ -1080,7 +1081,7 @@ void BinaryFunction::disassemble(ArrayRef<uint8_t> FunctionData) {
}
// Cannot process functions with AVX-512 instructions.
if (MIA->hasEVEXEncoding(Instruction)) {
if (MIB->hasEVEXEncoding(Instruction)) {
if (opts::Verbosity >= 1) {
errs() << "BOLT-WARNING: function " << *this << " uses instruction"
" encoded with EVEX (AVX-512) at offset 0x"
@ -1113,7 +1114,7 @@ void BinaryFunction::disassemble(ArrayRef<uint8_t> FunctionData) {
<< " for instruction at offset 0x"
<< Twine::utohexstr(Offset) << '\n');
int64_t Value = Relocation.Value;
const auto Result = BC.MIA->replaceImmWithSymbol(Instruction,
const auto Result = BC.MIB->replaceImmWithSymbol(Instruction,
Relocation.Symbol,
Relocation.Addend,
Ctx.get(),
@ -1135,11 +1136,11 @@ void BinaryFunction::disassemble(ArrayRef<uint8_t> FunctionData) {
}
// Convert instruction to a shorter version that could be relaxed if needed.
MIA->shortenInstruction(Instruction);
MIB->shortenInstruction(Instruction);
if (MIA->isBranch(Instruction) || MIA->isCall(Instruction)) {
if (MIB->isBranch(Instruction) || MIB->isCall(Instruction)) {
uint64_t TargetAddress = 0;
if (MIA->evaluateBranch(Instruction,
if (MIB->evaluateBranch(Instruction,
AbsoluteInstrAddr,
Size,
TargetAddress)) {
@ -1148,8 +1149,8 @@ void BinaryFunction::disassemble(ArrayRef<uint8_t> FunctionData) {
//
// If the target *is* the function address it could be either a branch
// or a recursive call.
bool IsCall = MIA->isCall(Instruction);
const bool IsCondBranch = MIA->isConditionalBranch(Instruction);
bool IsCall = MIB->isCall(Instruction);
const bool IsCondBranch = MIB->isConditionalBranch(Instruction);
MCSymbol *TargetSymbol = nullptr;
if (IsCall && containsAddress(TargetAddress)) {
@ -1177,7 +1178,7 @@ void BinaryFunction::disassemble(ArrayRef<uint8_t> FunctionData) {
<< Twine::utohexstr(AbsoluteInstrAddr)
<< " in function " << *this
<< " : replacing with nop.\n");
BC.MIA->createNoop(Instruction);
BC.MIB->createNoop(Instruction);
if (IsCondBranch) {
// Register branch offset for profile validation.
IgnoredBranches.emplace_back(Offset, Offset + Size);
@ -1193,14 +1194,14 @@ void BinaryFunction::disassemble(ArrayRef<uint8_t> FunctionData) {
<< ". Code size will be increased.\n";
}
assert(!MIA->isTailCall(Instruction) &&
assert(!MIB->isTailCall(Instruction) &&
"synthetic tail call instruction found");
// This is a call regardless of the opcode.
// Assign proper opcode for tail calls, so that they could be
// treated as calls.
if (!IsCall) {
if (!MIA->convertJmpToTailCall(Instruction, BC.Ctx.get())) {
if (!MIB->convertJmpToTailCall(Instruction, BC.Ctx.get())) {
assert(IsCondBranch && "unknown tail call instruction");
if (opts::Verbosity >= 2) {
errs() << "BOLT-WARNING: conditional tail call detected in "
@ -1261,16 +1262,16 @@ void BinaryFunction::disassemble(ArrayRef<uint8_t> FunctionData) {
// Add taken branch info.
TakenBranches.emplace_back(Offset, TargetAddress - getAddress());
}
BC.MIA->replaceBranchTarget(Instruction, TargetSymbol, &*Ctx);
BC.MIB->replaceBranchTarget(Instruction, TargetSymbol, &*Ctx);
// Mark CTC.
if (IsCondBranch && IsCall) {
MIA->setConditionalTailCall(Instruction, TargetAddress);
MIB->setConditionalTailCall(Instruction, TargetAddress);
}
} else {
// Could not evaluate branch. Should be an indirect call or an
// indirect branch. Bail out on the latter case.
if (MIA->isIndirectBranch(Instruction)) {
if (MIB->isIndirectBranch(Instruction)) {
auto Result = processIndirectBranch(Instruction, Size, Offset);
switch (Result) {
default:
@ -1278,7 +1279,7 @@ void BinaryFunction::disassemble(ArrayRef<uint8_t> FunctionData) {
case IndirectBranchType::POSSIBLE_TAIL_CALL:
{
auto Result =
MIA->convertJmpToTailCall(Instruction, BC.Ctx.get());
MIB->convertJmpToTailCall(Instruction, BC.Ctx.get());
(void)Result;
assert(Result);
}
@ -1295,7 +1296,7 @@ void BinaryFunction::disassemble(ArrayRef<uint8_t> FunctionData) {
};
}
// Indirect call. We only need to fix it if the operand is RIP-relative
if (IsSimple && MIA->hasPCRelOperand(Instruction)) {
if (IsSimple && MIB->hasPCRelOperand(Instruction)) {
if (!handlePCRelOperand(Instruction, AbsoluteInstrAddr, Size)) {
errs() << "BOLT-ERROR: cannot handle PC-relative operand at 0x"
<< Twine::utohexstr(AbsoluteInstrAddr)
@ -1307,7 +1308,7 @@ void BinaryFunction::disassemble(ArrayRef<uint8_t> FunctionData) {
}
}
} else {
if (MIA->hasPCRelOperand(Instruction) && !UsedReloc) {
if (MIB->hasPCRelOperand(Instruction) && !UsedReloc) {
if (!handlePCRelOperand(Instruction, AbsoluteInstrAddr, Size)) {
errs() << "BOLT-ERROR: cannot handle PC-relative operand at 0x"
<< Twine::utohexstr(AbsoluteInstrAddr)
@ -1327,11 +1328,11 @@ add_instruction:
// Record offset of the instruction for profile matching.
if (BC.keepOffsetForInstruction(Instruction)) {
MIA->addAnnotation(Ctx.get(), Instruction, "Offset", Offset);
MIB->addAnnotation(Ctx.get(), Instruction, "Offset", Offset);
}
if (MemData && !emptyRange(MemData->getMemInfoRange(Offset))) {
MIA->addAnnotation(Ctx.get(), Instruction, "MemDataOffset", Offset);
MIB->addAnnotation(Ctx.get(), Instruction, "MemDataOffset", Offset);
}
addInstruction(Offset, std::move(Instruction));
@ -1408,19 +1409,19 @@ void BinaryFunction::postProcessJumpTables() {
bool BinaryFunction::postProcessIndirectBranches() {
for (auto *BB : layout()) {
for (auto &Instr : *BB) {
if (!BC.MIA->isIndirectBranch(Instr))
if (!BC.MIB->isIndirectBranch(Instr))
continue;
// If there's an indirect branch in a single-block function -
// it must be a tail call.
if (layout_size() == 1) {
BC.MIA->convertJmpToTailCall(Instr, BC.Ctx.get());
BC.MIB->convertJmpToTailCall(Instr, BC.Ctx.get());
return true;
}
// Validate the tail call or jump table assumptions.
if (BC.MIA->isTailCall(Instr) || BC.MIA->getJumpTable(Instr)) {
if (BC.MIA->getMemoryOperandNo(Instr) != -1) {
if (BC.MIB->isTailCall(Instr) || BC.MIB->getJumpTable(Instr)) {
if (BC.MIB->getMemoryOperandNo(Instr) != -1) {
// We have validated memory contents addressed by the jump
// instruction already.
continue;
@ -1441,7 +1442,7 @@ bool BinaryFunction::postProcessIndirectBranches() {
if (PrevInstr == BB->rend()) {
if (opts::Verbosity >= 2) {
outs() << "BOLT-INFO: rejected potential "
<< (BC.MIA->isTailCall(Instr) ? "indirect tail call"
<< (BC.MIB->isTailCall(Instr) ? "indirect tail call"
: "jump table")
<< " in function " << *this
<< " because the jump-on register was not defined in "
@ -1452,9 +1453,9 @@ bool BinaryFunction::postProcessIndirectBranches() {
return false;
}
// In case of PIC jump table we need to do more checks.
if (BC.MIA->isMoveMem2Reg(*PrevInstr))
if (BC.MIB->isMoveMem2Reg(*PrevInstr))
continue;
assert(BC.MIA->isADD64rr(*PrevInstr) && "add instruction expected");
assert(BC.MIB->isADD64rr(*PrevInstr) && "add instruction expected");
auto R2 = PrevInstr->getOperand(2).getReg();
// Make sure both regs are set in the same basic block prior to ADD.
bool IsR1Set = false;
@ -1478,7 +1479,7 @@ bool BinaryFunction::postProcessIndirectBranches() {
// what it is and conservatively reject the function's CFG.
bool IsEpilogue = false;
for (const auto &Instr : *BB) {
if (BC.MIA->isLeave(Instr) || BC.MIA->isPop(Instr)) {
if (BC.MIB->isLeave(Instr) || BC.MIB->isPop(Instr)) {
IsEpilogue = true;
break;
}
@ -1493,7 +1494,7 @@ bool BinaryFunction::postProcessIndirectBranches() {
}
return false;
}
BC.MIA->convertJmpToTailCall(Instr, BC.Ctx.get());
BC.MIB->convertJmpToTailCall(Instr, BC.Ctx.get());
}
}
return true;
@ -1510,12 +1511,12 @@ void BinaryFunction::recomputeLandingPads() {
for (auto *BB : BasicBlocks) {
std::unordered_set<const BinaryBasicBlock *> BBLandingPads;
for (auto &Instr : *BB) {
if (!BC.MIA->isInvoke(Instr))
if (!BC.MIB->isInvoke(Instr))
continue;
const MCSymbol *LPLabel;
uint64_t Action;
std::tie(LPLabel, Action) = BC.MIA->getEHInfo(Instr);
std::tie(LPLabel, Action) = BC.MIB->getEHInfo(Instr);
if (!LPLabel)
continue;
@ -1532,7 +1533,7 @@ void BinaryFunction::recomputeLandingPads() {
bool BinaryFunction::buildCFG() {
NamedRegionTimer T("buildcfg", "Build CFG", TimerGroupName, TimerGroupDesc,
opts::TimeBuild);
auto &MIA = BC.MIA;
auto &MIB = BC.MIB;
if (!isSimple()) {
assert(!BC.HasRelocations &&
@ -1581,8 +1582,8 @@ bool BinaryFunction::buildCFG() {
auto updateOffset = [&](uint64_t Offset) {
assert(PrevBB && PrevBB != InsertBB && "invalid previous block");
auto *PrevInstr = PrevBB->getLastNonPseudoInstr();
if (PrevInstr && !MIA->hasAnnotation(*PrevInstr, "Offset"))
MIA->addAnnotation(BC.Ctx.get(), *PrevInstr, "Offset", Offset);
if (PrevInstr && !MIB->hasAnnotation(*PrevInstr, "Offset"))
MIB->addAnnotation(BC.Ctx.get(), *PrevInstr, "Offset", Offset);
};
for (auto I = Instructions.begin(), E = Instructions.end(); I != E; ++I) {
@ -1603,7 +1604,7 @@ bool BinaryFunction::buildCFG() {
// Ignore nops. We use nops to derive alignment of the next basic block.
// It will not always work, as some blocks are naturally aligned, but
// it's just part of heuristic for block alignment.
if (MIA->isNoop(Instr) && !PreserveNops) {
if (MIB->isNoop(Instr) && !PreserveNops) {
IsLastInstrNop = true;
continue;
}
@ -1614,9 +1615,9 @@ bool BinaryFunction::buildCFG() {
assert(PrevBB && "no previous basic block for a fall through");
auto *PrevInstr = PrevBB->getLastNonPseudoInstr();
assert(PrevInstr && "no previous instruction for a fall through");
if (MIA->isUnconditionalBranch(Instr) &&
!MIA->isUnconditionalBranch(*PrevInstr) &&
!MIA->getConditionalTailCall(*PrevInstr)) {
if (MIB->isUnconditionalBranch(Instr) &&
!MIB->isUnconditionalBranch(*PrevInstr) &&
!MIB->getConditionalTailCall(*PrevInstr)) {
// Temporarily restore inserter basic block.
InsertBB = PrevBB;
} else {
@ -1631,8 +1632,8 @@ bool BinaryFunction::buildCFG() {
addCFIPlaceholders(0, InsertBB);
}
const auto IsBlockEnd = MIA->isTerminator(Instr);
IsLastInstrNop = MIA->isNoop(Instr);
const auto IsBlockEnd = MIB->isTerminator(Instr);
IsLastInstrNop = MIB->isNoop(Instr);
LastInstrOffset = Offset;
InsertBB->addInstruction(std::move(Instr));
@ -1702,10 +1703,10 @@ bool BinaryFunction::buildCFG() {
//
// Conditional tail call is a special case since we don't add a taken
// branch successor for it.
IsPrevFT = !MIA->isTerminator(*LastInstr) ||
MIA->getConditionalTailCall(*LastInstr);
IsPrevFT = !MIB->isTerminator(*LastInstr) ||
MIB->getConditionalTailCall(*LastInstr);
} else if (BB->succ_size() == 1) {
IsPrevFT = MIA->isConditionalBranch(*LastInstr);
IsPrevFT = MIB->isConditionalBranch(*LastInstr);
} else {
IsPrevFT = false;
}
@ -1784,7 +1785,7 @@ void BinaryFunction::postProcessCFG() {
// Remove "Offset" annotations.
for (auto *BB : layout())
for (auto &Inst : *BB)
BC.MIA->removeAnnotation(Inst, "Offset");
BC.MIB->removeAnnotation(Inst, "Offset");
assert((!isSimple() || validateCFG())
&& "Invalid CFG detected after post-processing CFG");
@ -1868,7 +1869,7 @@ void BinaryFunction::removeConditionalTailCalls() {
if (!CTCInstr)
continue;
auto TargetAddressOrNone = BC.MIA->getConditionalTailCall(*CTCInstr);
auto TargetAddressOrNone = BC.MIB->getConditionalTailCall(*CTCInstr);
if (!TargetAddressOrNone)
continue;
@ -1879,24 +1880,24 @@ void BinaryFunction::removeConditionalTailCalls() {
uint64_t CTCMispredCount = BinaryBasicBlock::COUNT_NO_PROFILE;
if (hasValidProfile()) {
CTCTakenCount =
BC.MIA->getAnnotationWithDefault<uint64_t>(*CTCInstr, "CTCTakenCount");
BC.MIB->getAnnotationWithDefault<uint64_t>(*CTCInstr, "CTCTakenCount");
CTCMispredCount =
BC.MIA->getAnnotationWithDefault<uint64_t>(*CTCInstr,
BC.MIB->getAnnotationWithDefault<uint64_t>(*CTCInstr,
"CTCMispredCount");
}
// Assert that the tail call does not throw.
const MCSymbol *LP;
uint64_t Action;
std::tie(LP, Action) = BC.MIA->getEHInfo(*CTCInstr);
std::tie(LP, Action) = BC.MIB->getEHInfo(*CTCInstr);
assert(!LP && "found tail call with associated landing pad");
// Create a basic block with an unconditional tail call instruction using
// the same destination.
const auto *CTCTargetLabel = BC.MIA->getTargetSymbol(*CTCInstr);
const auto *CTCTargetLabel = BC.MIB->getTargetSymbol(*CTCInstr);
assert(CTCTargetLabel && "symbol expected for conditional tail call");
MCInst TailCallInstr;
BC.MIA->createTailCall(TailCallInstr, CTCTargetLabel, BC.Ctx.get());
BC.MIB->createTailCall(TailCallInstr, CTCTargetLabel, BC.Ctx.get());
auto TailCallBB = createBasicBlock(BinaryBasicBlock::INVALID_OFFSET,
BC.Ctx->createTempSymbol("TC", true));
TailCallBB->addInstruction(TailCallInstr);
@ -1908,9 +1909,9 @@ void BinaryFunction::removeConditionalTailCalls() {
// Add execution count for the block.
TailCallBB->setExecutionCount(CTCTakenCount);
BC.MIA->convertTailCallToJmp(*CTCInstr);
BC.MIB->convertTailCallToJmp(*CTCInstr);
BC.MIA->replaceBranchTarget(*CTCInstr, TailCallBB->getLabel(),
BC.MIB->replaceBranchTarget(*CTCInstr, TailCallBB->getLabel(),
BC.Ctx.get());
// Add basic block to the list that will be added to the end.
@ -1920,7 +1921,7 @@ void BinaryFunction::removeConditionalTailCalls() {
BB.swapConditionalSuccessors();
// This branch is no longer a conditional tail call.
BC.MIA->unsetConditionalTailCall(*CTCInstr);
BC.MIB->unsetConditionalTailCall(*CTCInstr);
}
insertBasicBlocks(std::prev(end()),
@ -2079,7 +2080,7 @@ bool BinaryFunction::fixCFIState() {
int32_t OldState = BB->getCFIState();
// Remember state at function entry point (our reference state).
auto InsertIt = FDEStartBB->begin();
while (InsertIt != FDEStartBB->end() && BC.MIA->isCFI(*InsertIt))
while (InsertIt != FDEStartBB->end() && BC.MIB->isCFI(*InsertIt))
++InsertIt;
addCFIPseudo(FDEStartBB, InsertIt, FrameInstructions.size());
FrameInstructions.emplace_back(
@ -2106,7 +2107,7 @@ bool BinaryFunction::fixCFIState() {
}
}
auto Pos = BB->begin();
while (Pos != BB->end() && BC.MIA->isCFI(*Pos)) {
while (Pos != BB->end() && BC.MIB->isCFI(*Pos)) {
auto CFI = getCFIFor(*Pos);
if (CFI->getOperation() == MCCFIInstruction::OpRememberState)
++StackOffset;
@ -2171,14 +2172,14 @@ void BinaryFunction::emitBody(MCStreamer &Streamer, bool EmitColdPart) {
for (auto I = BB->begin(), E = BB->end(); I != E; ++I) {
auto &Instr = *I;
// Handle pseudo instructions.
if (BC.MIA->isEHLabel(Instr)) {
const auto *Label = BC.MIA->getTargetSymbol(Instr);
if (BC.MIB->isEHLabel(Instr)) {
const auto *Label = BC.MIB->getTargetSymbol(Instr);
assert(Instr.getNumOperands() == 1 && Label &&
"bad EH_LABEL instruction");
Streamer.EmitLabel(const_cast<MCSymbol *>(Label));
continue;
}
if (BC.MIA->isCFI(Instr)) {
if (BC.MIB->isCFI(Instr)) {
Streamer.EmitCFIInstruction(*getCFIFor(Instr));
continue;
}
@ -2187,8 +2188,8 @@ void BinaryFunction::emitBody(MCStreamer &Streamer, bool EmitColdPart) {
}
// Emit GNU_args_size CFIs as necessary.
if (usesGnuArgsSize() && BC.MIA->isInvoke(Instr)) {
auto NewGnuArgsSize = BC.MIA->getGnuArgsSize(Instr);
if (usesGnuArgsSize() && BC.MIB->isInvoke(Instr)) {
auto NewGnuArgsSize = BC.MIB->getGnuArgsSize(Instr);
assert(NewGnuArgsSize >= 0 && "expected non-negative GNU_args_size");
if (NewGnuArgsSize != CurrentGnuArgsSize) {
CurrentGnuArgsSize = NewGnuArgsSize;
@ -2267,7 +2268,7 @@ void BinaryFunction::setTrapOnEntry() {
for (const auto EntryOffset : EntryOffsets) {
MCInst TrapInstr;
BC.MIA->createTrap(TrapInstr);
BC.MIB->createTrap(TrapInstr);
addInstruction(EntryOffset, std::move(TrapInstr));
}
@ -2420,11 +2421,11 @@ void BinaryFunction::duplicateConstantIslands() {
++OpNum;
continue;
}
const auto *Symbol = BC.MIA->getTargetSymbol(Inst, OpNum);
const auto *Symbol = BC.MIB->getTargetSymbol(Inst, OpNum);
auto ISym = ColdIslandSymbols.find(Symbol);
if (ISym == ColdIslandSymbols.end())
continue;
Operand = MCOperand::createExpr(BC.MIA->getTargetExprFor(
Operand = MCOperand::createExpr(BC.MIB->getTargetExprFor(
Inst,
MCSymbolRefExpr::create(ISym->second, MCSymbolRefExpr::VK_None,
*BC.Ctx),
@ -2524,7 +2525,7 @@ void BinaryFunction::dumpGraph(raw_ostream& OS) const {
UncondBranch);
const auto *LastInstr = BB->getLastNonPseudoInstr();
const bool IsJumpTable = LastInstr && BC.MIA->getJumpTable(*LastInstr);
const bool IsJumpTable = LastInstr && BC.MIB->getJumpTable(*LastInstr);
auto BI = BB->branch_info_begin();
for (auto *Succ : BB->successors()) {
@ -2662,7 +2663,7 @@ bool BinaryFunction::validateCFG() const {
}
void BinaryFunction::fixBranches() {
auto &MIA = BC.MIA;
auto &MIB = BC.MIB;
auto *Ctx = BC.Ctx.get();
for (unsigned I = 0, E = BasicBlocksLayout.size(); I != E; ++I) {
@ -2698,19 +2699,19 @@ void BinaryFunction::fixBranches() {
const auto *TSuccessor = BB->getConditionalSuccessor(true);
const auto *FSuccessor = BB->getConditionalSuccessor(false);
if (NextBB && NextBB == TSuccessor &&
!BC.MIA->hasAnnotation(*CondBranch, "DoNotChangeTarget")) {
!BC.MIB->hasAnnotation(*CondBranch, "DoNotChangeTarget")) {
std::swap(TSuccessor, FSuccessor);
MIA->reverseBranchCondition(*CondBranch, TSuccessor->getLabel(), Ctx);
MIB->reverseBranchCondition(*CondBranch, TSuccessor->getLabel(), Ctx);
BB->swapConditionalSuccessors();
} else {
MIA->replaceBranchTarget(*CondBranch, TSuccessor->getLabel(), Ctx);
MIB->replaceBranchTarget(*CondBranch, TSuccessor->getLabel(), Ctx);
}
if (TSuccessor == FSuccessor) {
BB->removeDuplicateConditionalSuccessor(CondBranch);
}
if (!NextBB ||
((NextBB != TSuccessor ||
BC.MIA->hasAnnotation(*CondBranch, "DoNotChangeTarget")) &&
BC.MIB->hasAnnotation(*CondBranch, "DoNotChangeTarget")) &&
NextBB != FSuccessor)) {
BB->addBranchInstruction(FSuccessor);
}
@ -2737,7 +2738,7 @@ void BinaryFunction::propagateGnuArgsSizeInfo() {
for (auto BB : BasicBlocks) {
for (auto II = BB->begin(); II != BB->end(); ) {
auto &Instr = *II;
if (BC.MIA->isCFI(Instr)) {
if (BC.MIB->isCFI(Instr)) {
auto CFI = getCFIFor(Instr);
if (CFI->getOperation() == MCCFIInstruction::OpGnuArgsSize) {
CurrentGnuArgsSize = CFI->getOffset();
@ -2747,9 +2748,9 @@ void BinaryFunction::propagateGnuArgsSizeInfo() {
II = BB->erasePseudoInstruction(II);
continue;
}
} else if (BC.MIA->isInvoke(Instr)) {
} else if (BC.MIB->isInvoke(Instr)) {
// Add the value of GNU_args_size as an extra operand to invokes.
BC.MIA->addGnuArgsSize(Instr, CurrentGnuArgsSize);
BC.MIB->addGnuArgsSize(Instr, CurrentGnuArgsSize);
}
++II;
}
@ -2763,7 +2764,7 @@ void BinaryFunction::postProcessBranches() {
auto LastInstrRI = BB->getLastNonPseudo();
if (BB->succ_size() == 1) {
if (LastInstrRI != BB->rend() &&
BC.MIA->isConditionalBranch(*LastInstrRI)) {
BC.MIB->isConditionalBranch(*LastInstrRI)) {
// __builtin_unreachable() could create a conditional branch that
// falls-through into the next function - hence the block will have only
// one valid successor. Such behaviour is undefined and thus we remove
@ -2784,12 +2785,12 @@ void BinaryFunction::postProcessBranches() {
<< BB->getName() << " in function " << *this << '\n');
continue;
}
if (!BC.MIA->isTerminator(*LastInstrRI) &&
!BC.MIA->isCall(*LastInstrRI)) {
if (!BC.MIB->isTerminator(*LastInstrRI) &&
!BC.MIB->isCall(*LastInstrRI)) {
DEBUG(dbgs() << "BOLT-DEBUG: adding return to basic block "
<< BB->getName() << " in function " << *this << '\n');
MCInst ReturnInstr;
BC.MIA->createReturn(ReturnInstr);
BC.MIB->createReturn(ReturnInstr);
BB->addInstruction(ReturnInstr);
}
}
@ -2867,7 +2868,7 @@ BinaryFunction::BasicBlockOrderType BinaryFunction::dfs() const {
MCInst *UncondBranch = nullptr;
if (BB->analyzeBranch(TBB, FBB, CondBranch, UncondBranch) &&
CondBranch && BB->succ_size() == 2) {
if (BC.MIA->getCanonicalBranchOpcode(CondBranch->getOpcode()) ==
if (BC.MIB->getCanonicalBranchOpcode(CondBranch->getOpcode()) ==
CondBranch->getOpcode()) {
Stack.push(BB->getConditionalSuccessor(true));
Stack.push(BB->getConditionalSuccessor(false));
@ -3005,7 +3006,7 @@ bool BinaryFunction::isIdenticalWith(const BinaryFunction &OtherBF,
// is ignored for CFG purposes.
auto *TrailingInstr = (I != E ? &(*I)
: (OtherI != OtherE ? &(*OtherI) : 0));
if (TrailingInstr && !BC.MIA->isUnconditionalBranch(*TrailingInstr)) {
if (TrailingInstr && !BC.MIB->isUnconditionalBranch(*TrailingInstr)) {
return false;
}
@ -3097,7 +3098,7 @@ std::size_t BinaryFunction::hash(bool Recompute, bool UseDFS) const {
// Ignore unconditional jumps since we check CFG consistency by processing
// basic blocks in order and do not rely on branches to be in-sync with
// CFG. Note that we still use condition code of conditional jumps.
if (BC.MIA->isUnconditionalBranch(Inst))
if (BC.MIB->isUnconditionalBranch(Inst))
continue;
if (Opcode == 0) {
@ -3206,9 +3207,9 @@ bool BinaryFunction::replaceJumpTableEntryIn(BinaryBasicBlock *BB,
BinaryBasicBlock *OldDest,
BinaryBasicBlock *NewDest) {
auto *Instr = BB->getLastNonPseudoInstr();
if (!Instr || !BC.MIA->isIndirectBranch(*Instr))
if (!Instr || !BC.MIB->isIndirectBranch(*Instr))
return false;
auto JTAddress = BC.MIA->getJumpTable(*Instr);
auto JTAddress = BC.MIB->getJumpTable(*Instr);
assert(JTAddress && "Invalid jump table address");
auto *JT = getJumpTableContainingAddress(JTAddress);
assert(JT && "No jump table structure for this indirect branch");
@ -3585,7 +3586,7 @@ MCInst *BinaryFunction::getInstructionAtOffset(uint64_t Offset) {
for (auto &Inst : *BB) {
constexpr auto InvalidOffset = std::numeric_limits<uint64_t>::max();
if (Offset == BC.MIA->getAnnotationWithDefault<uint64_t>(Inst, "Offset",
if (Offset == BC.MIB->getAnnotationWithDefault<uint64_t>(Inst, "Offset",
InvalidOffset))
return &Inst;
}
@ -3773,23 +3774,23 @@ DynoStats BinaryFunction::getDynoStats() const {
// Count the number of calls by iterating through all instructions.
for (const auto &Instr : *BB) {
if (BC.MIA->isStore(Instr)) {
if (BC.MIB->isStore(Instr)) {
Stats[DynoStats::STORES] += BBExecutionCount;
}
if (BC.MIA->isLoad(Instr)) {
if (BC.MIB->isLoad(Instr)) {
Stats[DynoStats::LOADS] += BBExecutionCount;
}
if (!BC.MIA->isCall(Instr))
if (!BC.MIB->isCall(Instr))
continue;
uint64_t CallFreq = BBExecutionCount;
if (BC.MIA->getConditionalTailCall(Instr)) {
if (BC.MIB->getConditionalTailCall(Instr)) {
CallFreq =
BC.MIA->getAnnotationWithDefault<uint64_t>(Instr, "CTCTakenCount");
BC.MIB->getAnnotationWithDefault<uint64_t>(Instr, "CTCTakenCount");
}
Stats[DynoStats::FUNCTION_CALLS] += CallFreq;
if (BC.MIA->isIndirectCall(Instr)) {
if (BC.MIB->isIndirectCall(Instr)) {
Stats[DynoStats::INDIRECT_CALLS] += CallFreq;
} else if (const auto *CallSymbol = BC.MIA->getTargetSymbol(Instr)) {
} else if (const auto *CallSymbol = BC.MIB->getTargetSymbol(Instr)) {
const auto *BF = BC.getFunctionForSymbol(CallSymbol);
if (BF && BF->isPLTFunction()) {
Stats[DynoStats::PLT_CALLS] += CallFreq;
@ -3806,7 +3807,7 @@ DynoStats BinaryFunction::getDynoStats() const {
// Jump tables.
const auto *LastInstr = BB->getLastNonPseudoInstr();
if (BC.MIA->getJumpTable(*LastInstr)) {
if (BC.MIB->getJumpTable(*LastInstr)) {
Stats[DynoStats::JUMP_TABLE_BRANCHES] += BBExecutionCount;
DEBUG(
static uint64_t MostFrequentJT;
@ -3840,7 +3841,7 @@ DynoStats BinaryFunction::getDynoStats() const {
}
// CTCs
if (BC.MIA->getConditionalTailCall(*CondBranch)) {
if (BC.MIB->getConditionalTailCall(*CondBranch)) {
if (BB->branch_info_begin() != BB->branch_info_end())
Stats[DynoStats::UNCOND_BRANCHES] += BB->branch_info_begin()->Count;
continue;

View File

@ -32,7 +32,6 @@
#include "llvm/MC/MCDisassembler/MCDisassembler.h"
#include "llvm/MC/MCDwarf.h"
#include "llvm/MC/MCInst.h"
#include "llvm/MC/MCInstrAnalysis.h"
#include "llvm/MC/MCSubtargetInfo.h"
#include "llvm/MC/MCSymbol.h"
#include "llvm/Object/ObjectFile.h"
@ -433,8 +432,8 @@ private:
// NB: there's no need to compare jump table indirect jump instructions
// separately as jump tables are handled by comparing corresponding
// symbols.
const auto EHInfoA = BC.MIA->getEHInfo(InstA);
const auto EHInfoB = BC.MIA->getEHInfo(InstB);
const auto EHInfoA = BC.MIB->getEHInfo(InstA);
const auto EHInfoB = BC.MIB->getEHInfo(InstB);
// Action indices should match.
if (EHInfoA.second != EHInfoB.second)
@ -928,8 +927,8 @@ public:
/// that is in \p BB. Return nullptr if none exists
BinaryBasicBlock *getLandingPadBBFor(const BinaryBasicBlock &BB,
const MCInst &InvokeInst) {
assert(BC.MIA->isInvoke(InvokeInst) && "must be invoke instruction");
MCLandingPad LP = BC.MIA->getEHInfo(InvokeInst);
assert(BC.MIB->isInvoke(InvokeInst) && "must be invoke instruction");
MCLandingPad LP = BC.MIB->getEHInfo(InvokeInst);
if (LP.first) {
auto *LBB = BB.getLandingPad(LP.first);
assert (LBB && "Landing pad should be defined");
@ -1259,12 +1258,12 @@ public:
}
const JumpTable *getJumpTable(const MCInst &Inst) const {
const auto Address = BC.MIA->getJumpTable(Inst);
const auto Address = BC.MIB->getJumpTable(Inst);
return getJumpTableContainingAddress(Address);
}
JumpTable *getJumpTable(const MCInst &Inst) {
const auto Address = BC.MIA->getJumpTable(Inst);
const auto Address = BC.MIB->getJumpTable(Inst);
return getJumpTableContainingAddress(Address);
}
@ -1498,7 +1497,7 @@ public:
}
--I;
while (I != Instructions.begin() && BC.MIA->isNoop(I->second)) {
while (I != Instructions.begin() && BC.MIB->isNoop(I->second)) {
Offset = I->first;
--I;
}
@ -1522,13 +1521,13 @@ public:
BinaryBasicBlock::iterator Pos,
uint32_t Offset) {
MCInst CFIPseudo;
BC.MIA->createCFI(CFIPseudo, Offset);
BC.MIB->createCFI(CFIPseudo, Offset);
return BB->insertPseudoInstr(Pos, CFIPseudo);
}
/// Retrieve the MCCFIInstruction object associated with a CFI pseudo.
MCCFIInstruction* getCFIFor(const MCInst &Instr) {
if (!BC.MIA->isCFI(Instr))
if (!BC.MIB->isCFI(Instr))
return nullptr;
uint32_t Offset = Instr.getOperand(0).getImm();
assert(Offset < FrameInstructions.size() && "Invalid CFI offset");
@ -1536,7 +1535,7 @@ public:
}
const MCCFIInstruction* getCFIFor(const MCInst &Instr) const {
if (!BC.MIA->isCFI(Instr))
if (!BC.MIB->isCFI(Instr))
return nullptr;
uint32_t Offset = Instr.getOperand(0).getImm();
assert(Offset < FrameInstructions.size() && "Invalid CFI offset");

View File

@ -112,7 +112,7 @@ bool BinaryFunction::recordTrace(
auto *PrevBB = BasicBlocksLayout[FromBB->getIndex() - 1];
if (PrevBB->getSuccessor(FromBB->getLabel())) {
const auto *Instr = PrevBB->getLastNonPseudoInstr();
if (Instr && BC.MIA->isCall(*Instr)) {
if (Instr && BC.MIB->isCall(*Instr)) {
FromBB = PrevBB;
} else {
DEBUG(dbgs() << "invalid incoming LBR (no call): " << FirstLBR << '\n');
@ -156,7 +156,7 @@ bool BinaryFunction::recordTrace(
const auto *Instr = BB->getLastNonPseudoInstr();
uint64_t Offset{0};
if (Instr) {
Offset = BC.MIA->getAnnotationWithDefault<uint64_t>(*Instr, "Offset");
Offset = BC.MIB->getAnnotationWithDefault<uint64_t>(*Instr, "Offset");
} else {
Offset = BB->getOffset();
}
@ -186,7 +186,7 @@ bool BinaryFunction::recordBranch(uint64_t From, uint64_t To,
if (!opts::CompatMode)
return true;
auto *Instr = getInstructionAtOffset(0);
if (Instr && BC.MIA->isCall(*Instr))
if (Instr && BC.MIB->isCall(*Instr))
return true;
return false;
}
@ -211,10 +211,10 @@ bool BinaryFunction::recordBranch(uint64_t From, uint64_t To,
const auto *LastInstr = ToBB->getLastNonPseudoInstr();
if (LastInstr) {
const auto LastInstrOffset =
BC.MIA->getAnnotationWithDefault<uint64_t>(*LastInstr, "Offset");
BC.MIB->getAnnotationWithDefault<uint64_t>(*LastInstr, "Offset");
// With old .fdata we are getting FT branches for "jcc,jmp" sequences.
if (To == LastInstrOffset && BC.MIA->isUnconditionalBranch(*LastInstr)) {
if (To == LastInstrOffset && BC.MIB->isUnconditionalBranch(*LastInstr)) {
return true;
}
@ -249,8 +249,8 @@ bool BinaryFunction::recordBranch(uint64_t From, uint64_t To,
if (!FromBB->getSuccessor(ToBB->getLabel())) {
// Check if this is a recursive call or a return from a recursive call.
if (ToBB->isEntryPoint() && (BC.MIA->isCall(*FromInstruction) ||
BC.MIA->isIndirectBranch(*FromInstruction))) {
if (ToBB->isEntryPoint() && (BC.MIB->isCall(*FromInstruction) ||
BC.MIB->isIndirectBranch(*FromInstruction))) {
// Execution count is already accounted for.
return true;
}
@ -371,7 +371,7 @@ void BinaryFunction::postProcessProfile() {
const auto *LastInstr = BB->getLastNonPseudoInstr();
if (!LastInstr)
continue;
const auto JTAddress = BC.MIA->getJumpTable(*LastInstr);
const auto JTAddress = BC.MIB->getJumpTable(*LastInstr);
if (!JTAddress)
continue;
auto *JT = getJumpTableContainingAddress(JTAddress);
@ -492,27 +492,27 @@ void BinaryFunction::convertBranchData() {
auto *Instr = getInstructionAtOffset(BI.From.Offset);
if (!Instr ||
(!BC.MIA->isCall(*Instr) && !BC.MIA->isIndirectBranch(*Instr)))
(!BC.MIB->isCall(*Instr) && !BC.MIB->isIndirectBranch(*Instr)))
continue;
auto setOrUpdateAnnotation = [&](StringRef Name, uint64_t Count) {
if (opts::Verbosity >= 1 && BC.MIA->hasAnnotation(*Instr, Name)) {
if (opts::Verbosity >= 1 && BC.MIB->hasAnnotation(*Instr, Name)) {
errs() << "BOLT-WARNING: duplicate " << Name << " info for offset 0x"
<< Twine::utohexstr(BI.From.Offset)
<< " in function " << *this << '\n';
}
auto &Value = BC.MIA->getOrCreateAnnotationAs<uint64_t>(BC.Ctx.get(),
auto &Value = BC.MIB->getOrCreateAnnotationAs<uint64_t>(BC.Ctx.get(),
*Instr, Name);
Value += Count;
};
if (BC.MIA->isIndirectCall(*Instr) || BC.MIA->isIndirectBranch(*Instr)) {
if (BC.MIB->isIndirectCall(*Instr) || BC.MIB->isIndirectBranch(*Instr)) {
IndirectCallSiteProfile &CSP =
BC.MIA->getOrCreateAnnotationAs<IndirectCallSiteProfile>(BC.Ctx.get(),
BC.MIB->getOrCreateAnnotationAs<IndirectCallSiteProfile>(BC.Ctx.get(),
*Instr, "CallProfile");
CSP.emplace_back(BI.To.IsSymbol, BI.To.Name, BI.Branches,
BI.Mispreds);
} else if (BC.MIA->getConditionalTailCall(*Instr)) {
} else if (BC.MIB->getConditionalTailCall(*Instr)) {
setOrUpdateAnnotation("CTCTakenCount", BI.Branches);
setOrUpdateAnnotation("CTCMispredCount", BI.Mispreds);
} else {
@ -659,9 +659,9 @@ void BinaryFunction::inferFallThroughCounts() {
// Get taken count of conditional tail call if the block ends with one.
uint64_t CTCTakenCount = 0;
const auto CTCInstr = BB->getLastNonPseudoInstr();
if (CTCInstr && BC.MIA->getConditionalTailCall(*CTCInstr)) {
if (CTCInstr && BC.MIB->getConditionalTailCall(*CTCInstr)) {
CTCTakenCount =
BC.MIA->getAnnotationWithDefault<uint64_t>(*CTCInstr, "CTCTakenCount");
BC.MIB->getAnnotationWithDefault<uint64_t>(*CTCInstr, "CTCTakenCount");
}
// Calculate frequency of throws from this node according to LBR data
@ -696,8 +696,8 @@ void BinaryFunction::inferFallThroughCounts() {
// Skip if the last instruction is an unconditional jump.
const auto *LastInstr = BB->getLastNonPseudoInstr();
if (LastInstr &&
(BC.MIA->isUnconditionalBranch(*LastInstr) ||
BC.MIA->isIndirectBranch(*LastInstr)))
(BC.MIB->isUnconditionalBranch(*LastInstr) ||
BC.MIB->isIndirectBranch(*LastInstr)))
continue;
// If there is an FT it will be the last successor.
auto &SuccBI = *BB->branch_info_rbegin();
@ -832,12 +832,12 @@ float BinaryFunction::evaluateProfileData(const FuncBranchData &BranchData) {
// by regular branch instructions and we need isBranch() here.
auto *Instr = getInstructionAtOffset(BI.From.Offset);
// If it's a prefix - skip it.
if (Instr && BC.MIA->isPrefix(*Instr))
if (Instr && BC.MIB->isPrefix(*Instr))
Instr = getInstructionAtOffset(BI.From.Offset + 1);
if (Instr &&
(BC.MIA->isCall(*Instr) ||
BC.MIA->isBranch(*Instr) ||
BC.MIA->isReturn(*Instr))) {
(BC.MIB->isCall(*Instr) ||
BC.MIB->isBranch(*Instr) ||
BC.MIB->isReturn(*Instr))) {
IsValid = true;
}
}

View File

@ -1,5 +1,6 @@
add_subdirectory(merge-fdata)
add_subdirectory(Passes)
add_subdirectory(Target)
# Get the current git revision for BOLT.
function(get_version ofn)
@ -47,6 +48,8 @@ add_public_gen_version_target(GenBoltRevision)
set(LLVM_LINK_COMPONENTS
${LLVM_TARGETS_TO_BUILD}
BOLTPasses
BOLTTargetAArch64
BOLTTargetX86
CodeGen
Core
DebugInfoDWARF
@ -75,6 +78,7 @@ add_llvm_tool(llvm-bolt
DWARFRewriter.cpp
Exceptions.cpp
JumpTable.cpp
MCPlusBuilder.cpp
ProfileReader.cpp
ProfileWriter.cpp
Relocation.cpp

View File

@ -147,11 +147,11 @@ extractFunctionCalls(const std::vector<BinaryFunction *> &BinaryFunctions) {
for (auto BB : SrcFunction->layout()) {
// Find call instructions and extract target symbols from each one
for (auto &Inst : *BB) {
if (!BC.MIA->isCall(Inst))
if (!BC.MIB->isCall(Inst))
continue;
// Call info
const MCSymbol* DstSym = BC.MIA->getTargetSymbol(Inst);
const MCSymbol* DstSym = BC.MIB->getTargetSymbol(Inst);
auto Count = BB->getKnownExecutionCount();
// Ignore calls w/o information
if (DstSym == nullptr || Count == 0)

View File

@ -236,13 +236,13 @@ void BinaryFunction::parseLSDA(ArrayRef<uint8_t> LSDASectionData,
assert(II != IE && "exception range not pointing to an instruction");
do {
auto &Instruction = II->second;
if (BC.MIA->isCall(Instruction) &&
!BC.MIA->getConditionalTailCall(Instruction)) {
assert(!BC.MIA->isInvoke(Instruction) &&
if (BC.MIB->isCall(Instruction) &&
!BC.MIB->getConditionalTailCall(Instruction)) {
assert(!BC.MIB->isInvoke(Instruction) &&
"overlapping exception ranges detected");
// Add extra operands to a call instruction making it an invoke from
// now on.
BC.MIA->addEHInfo(Instruction,
BC.MIB->addEHInfo(Instruction,
MCLandingPad(LPSymbol, ActionEntry),
BC.Ctx.get());
}
@ -408,11 +408,11 @@ void BinaryFunction::updateEHRanges() {
for (auto II = BB->begin(); II != BB->end(); ++II) {
auto Instr = *II;
if (!BC.MIA->isCall(Instr))
if (!BC.MIB->isCall(Instr))
continue;
// Instruction can throw an exception that should be handled.
const bool Throws = BC.MIA->isInvoke(Instr);
const bool Throws = BC.MIB->isInvoke(Instr);
// Ignore the call if it's a continuation of a no-throw gap.
if (!Throws && !StartRange)
@ -421,7 +421,7 @@ void BinaryFunction::updateEHRanges() {
// Extract exception handling information from the instruction.
const MCSymbol *LP = nullptr;
uint64_t Action = 0;
std::tie(LP, Action) = BC.MIA->getEHInfo(Instr);
std::tie(LP, Action) = BC.MIB->getEHInfo(Instr);
// No action if the exception handler has not changed.
if (Throws &&
@ -433,7 +433,7 @@ void BinaryFunction::updateEHRanges() {
// Same symbol is used for the beginning and the end of the range.
const MCSymbol *EHSymbol = BC.Ctx->createTempSymbol("EH", true);
MCInst EHLabel;
BC.MIA->createEHLabel(EHLabel, EHSymbol, BC.Ctx.get());
BC.MIB->createEHLabel(EHLabel, EHSymbol, BC.Ctx.get());
II = std::next(BB->insertPseudoInstr(II, EHLabel));
// At this point we could be in one of the following states:

384
bolt/MCPlusBuilder.cpp Normal file
View File

@ -0,0 +1,384 @@
//===- MCPlusBuilder.cpp - main interface for MCPlus-level instructions ---===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Create/analyze/modify instructions at MC+ level.
//
//===----------------------------------------------------------------------===//
#include "MCPlusBuilder.h"
#include "llvm/MC/MCInstrAnalysis.h"
#include "llvm/MC/MCInst.h"
#include "llvm/MC/MCInstrDesc.h"
#include "llvm/MC/MCInstrInfo.h"
#include "llvm/Support/Debug.h"
#include <cstdint>
#include <queue>
#define DEBUG_TYPE "mcplus"
using namespace llvm;
using namespace bolt;
bool MCPlusBuilder::evaluateBranch(const MCInst &Inst, uint64_t Addr,
uint64_t Size, uint64_t &Target) const {
return Analysis->evaluateBranch(Inst, Addr, Size, Target);
}
namespace {
const MCLandingPad *findLandingPad(const MCInst &Inst) {
for (unsigned I = Inst.getNumOperands(); I > 0; --I) {
const auto &Op = Inst.getOperand(I - 1);
if (Op.isLandingPad()) {
return Op.getLandingPad();
}
}
return nullptr;
}
}
bool MCPlusBuilder::hasEHInfo(const MCInst &Inst) const {
return findLandingPad(Inst) != nullptr;
}
MCLandingPad MCPlusBuilder::getEHInfo(const MCInst &Inst) const {
const MCSymbol *LPSym = nullptr;
uint64_t Action = 0;
if (isCall(Inst)) {
if (auto LP = findLandingPad(Inst)) {
std::tie(LPSym, Action) = *LP;
}
}
return std::make_pair(LPSym, Action);
}
// Add handler and action info for call instruction.
void MCPlusBuilder::addEHInfo(MCInst &Inst,
const MCLandingPad &LP,
MCContext *Ctx) const {
if (isCall(Inst)) {
assert(!hasEHInfo(Inst));
Inst.addOperand(
MCOperand::createLandingPad(new (*Ctx) MCLandingPad(LP)));
}
}
int64_t MCPlusBuilder::getGnuArgsSize(const MCInst &Inst) const {
for (unsigned I = Inst.getNumOperands(); I > 0; --I) {
const auto &Op = Inst.getOperand(I - 1);
if (Op.isGnuArgsSize()) {
return Op.getGnuArgsSize();
}
}
return -1LL;
}
void MCPlusBuilder::addGnuArgsSize(MCInst &Inst, int64_t GnuArgsSize) const {
assert(GnuArgsSize >= 0 && "cannot set GNU_args_size to negative value");
assert(getGnuArgsSize(Inst) == -1LL && "GNU_args_size already set");
assert(isInvoke(Inst) && "GNU_args_size can only be set for invoke");
Inst.addOperand(MCOperand::createGnuArgsSize(GnuArgsSize));
}
uint64_t MCPlusBuilder::getJumpTable(const MCInst &Inst) const {
for (unsigned I = Inst.getNumOperands(); I > 0; --I) {
const auto &Op = Inst.getOperand(I - 1);
if (Op.isJumpTable()) {
return Op.getJumpTable();
}
}
return 0;
}
bool MCPlusBuilder::setJumpTable(MCContext *Ctx, MCInst &Inst, uint64_t Value,
uint16_t IndexReg) const {
if (!isIndirectBranch(Inst))
return false;
assert(getJumpTable(Inst) == 0 && "jump table already set");
Inst.addOperand(MCOperand::createJumpTable(Value));
addAnnotation<>(Ctx, Inst, "JTIndexReg", IndexReg);
return true;
}
Optional<uint64_t>
MCPlusBuilder::getConditionalTailCall(const MCInst &Inst) const {
for (unsigned I = Inst.getNumOperands(); I > 0; --I) {
const auto &Op = Inst.getOperand(I - 1);
if (Op.isConditionalTailCall()) {
return Op.getConditionalTailCall();
}
}
return NoneType();
}
bool
MCPlusBuilder::setConditionalTailCall(MCInst &Inst, uint64_t Dest) const {
if (!isConditionalBranch(Inst))
return false;
for (unsigned I = Inst.getNumOperands(); I > 0; --I) {
auto &Op = Inst.getOperand(I - 1);
if (Op.isConditionalTailCall()) {
Op.setConditionalTailCall(Dest);
return true;
}
}
Inst.addOperand(MCOperand::createConditionalTailCall(Dest));
return true;
}
bool MCPlusBuilder::unsetConditionalTailCall(MCInst &Inst) const {
for (auto OpI = Inst.begin(), OpE = Inst.end(); OpI != OpE; ++OpI) {
if (OpI->isConditionalTailCall()) {
Inst.erase(OpI);
return true;
}
}
return false;
}
namespace {
unsigned findAnnotationIndex(const MCInst &Inst, StringRef Name) {
for (unsigned I = Inst.getNumOperands(); I > 0; --I) {
const auto& Op = Inst.getOperand(I - 1);
if (Op.isAnnotation() && Op.getAnnotation()->getName() == Name) {
return I - 1;
}
}
return Inst.getNumOperands();
}
}
bool MCPlusBuilder::hasAnnotation(const MCInst &Inst, StringRef Name) const {
return findAnnotationIndex(Inst, Name) < Inst.getNumOperands();
}
bool MCPlusBuilder::removeAnnotation(MCInst &Inst, StringRef Name) const {
const auto Idx = findAnnotationIndex(Inst, Name);
if (Idx < Inst.getNumOperands()) {
auto *Annotation = Inst.getOperand(Idx).getAnnotation();
auto Itr = AnnotationPool.find(Annotation);
if (Itr != AnnotationPool.end()) {
AnnotationPool.erase(Itr);
Annotation->~MCAnnotation();
}
Inst.erase(Inst.begin() + Idx);
return true;
}
return false;
}
void MCPlusBuilder::removeAllAnnotations(MCInst &Inst) const {
for (auto Idx = Inst.getNumOperands(); Idx > 0; --Idx) {
auto &Op = Inst.getOperand(Idx - 1);
if (Op.isAnnotation()) {
auto *Annotation = Op.getAnnotation();
auto Itr = AnnotationPool.find(Annotation);
if (Itr != AnnotationPool.end()) {
AnnotationPool.erase(Itr);
Annotation->~MCAnnotation();
}
Inst.erase(Inst.begin() + Idx - 1);
}
}
}
bool MCPlusBuilder::renameAnnotation(MCInst &Inst,
StringRef Before,
StringRef After) const {
const auto Idx = findAnnotationIndex(Inst, Before);
if (Idx >= Inst.getNumOperands()) {
return false;
}
auto *Annotation = Inst.getOperand(Idx).getAnnotation();
auto PooledName = AnnotationNames.intern(After);
AnnotationNameRefs.insert(PooledName);
Annotation->setName(*PooledName);
return true;
}
const MCAnnotation *
MCPlusBuilder::getAnnotation(const MCInst &Inst, StringRef Name) const {
const auto Idx = findAnnotationIndex(Inst, Name);
assert(Idx < Inst.getNumOperands());
return Inst.getOperand(Idx).getAnnotation();
}
void MCPlusBuilder::getClobberedRegs(const MCInst &Inst,
BitVector &Regs) const {
if (isPrefix(Inst) || isCFI(Inst))
return;
const auto &InstInfo = Info->get(Inst.getOpcode());
const auto *ImplicitDefs = InstInfo.getImplicitDefs();
for (unsigned I = 0, E = InstInfo.getNumImplicitDefs(); I != E; ++I) {
Regs |= getAliases(ImplicitDefs[I], /*OnlySmaller=*/false);
}
for (unsigned I = 0, E = InstInfo.getNumDefs(); I != E; ++I) {
const auto &Operand = Inst.getOperand(I);
assert(Operand.isReg());
Regs |= getAliases(Operand.getReg(), /*OnlySmaller=*/false);
}
}
void MCPlusBuilder::getTouchedRegs(const MCInst &Inst,
BitVector &Regs) const {
if (isPrefix(Inst) || isCFI(Inst))
return;
const auto &InstInfo = Info->get(Inst.getOpcode());
const auto *ImplicitDefs = InstInfo.getImplicitDefs();
for (unsigned I = 0, E = InstInfo.getNumImplicitDefs(); I != E; ++I) {
Regs |= getAliases(ImplicitDefs[I], /*OnlySmaller=*/false);
}
const auto *ImplicitUses = InstInfo.getImplicitUses();
for (unsigned I = 0, E = InstInfo.getNumImplicitUses(); I != E; ++I) {
Regs |= getAliases(ImplicitUses[I], /*OnlySmaller=*/false);
}
for (unsigned I = 0, E = Inst.getNumOperands(); I != E; ++I) {
if (!Inst.getOperand(I).isReg())
continue;
Regs |= getAliases(Inst.getOperand(I).getReg(), /*OnlySmaller=*/false);
}
}
void MCPlusBuilder::getWrittenRegs(const MCInst &Inst,
BitVector &Regs) const {
if (isPrefix(Inst) || isCFI(Inst))
return;
const auto &InstInfo = Info->get(Inst.getOpcode());
const auto *ImplicitDefs = InstInfo.getImplicitDefs();
for (unsigned I = 0, E = InstInfo.getNumImplicitDefs(); I != E; ++I) {
Regs |= getAliases(ImplicitDefs[I], /*OnlySmaller=*/true);
}
for (unsigned I = 0, E = InstInfo.getNumDefs(); I != E; ++I) {
const auto &Operand = Inst.getOperand(I);
assert(Operand.isReg());
Regs |= getAliases(Operand.getReg(), /*OnlySmaller=*/true);
}
}
void MCPlusBuilder::getUsedRegs(const MCInst &Inst, BitVector &Regs) const {
if (isPrefix(Inst) || isCFI(Inst))
return;
const auto &InstInfo = Info->get(Inst.getOpcode());
const auto *ImplicitUses = InstInfo.getImplicitUses();
for (unsigned I = 0, E = InstInfo.getNumImplicitUses(); I != E; ++I) {
Regs |= getAliases(ImplicitUses[I], /*OnlySmaller=*/true);
}
for (unsigned I = 0, E = Inst.getNumOperands(); I != E; ++I) {
if (!Inst.getOperand(I).isReg())
continue;
Regs |= getAliases(Inst.getOperand(I).getReg(), /*OnlySmaller=*/true);
}
}
const BitVector &
MCPlusBuilder::getAliases(MCPhysReg Reg,
bool OnlySmaller) const {
// AliasMap caches a mapping of registers to the set of registers that
// alias (are sub or superregs of itself, including itself).
static std::vector<BitVector> AliasMap;
static std::vector<MCPhysReg> SuperReg;
if (AliasMap.size() > 0) {
if (OnlySmaller)
return AliasMap[Reg];
return AliasMap[SuperReg[Reg]];
}
// Build alias map
for (MCPhysReg I = 0, E = RegInfo->getNumRegs(); I != E; ++I) {
BitVector BV(RegInfo->getNumRegs(), false);
BV.set(I);
AliasMap.emplace_back(std::move(BV));
SuperReg.emplace_back(I);
}
std::queue<MCPhysReg> Worklist;
// Propagate alias info upwards
for (MCPhysReg I = 0, E = RegInfo->getNumRegs(); I != E; ++I) {
Worklist.push(I);
}
while (!Worklist.empty()) {
MCPhysReg I = Worklist.front();
Worklist.pop();
for (MCSubRegIterator SI(I, RegInfo); SI.isValid(); ++SI) {
AliasMap[I] |= AliasMap[*SI];
}
for (MCSuperRegIterator SI(I, RegInfo); SI.isValid(); ++SI) {
Worklist.push(*SI);
}
}
// Propagate parent reg downwards
for (MCPhysReg I = 0, E = RegInfo->getNumRegs(); I != E; ++I) {
Worklist.push(I);
}
while (!Worklist.empty()) {
MCPhysReg I = Worklist.front();
Worklist.pop();
for (MCSubRegIterator SI(I, RegInfo); SI.isValid(); ++SI) {
SuperReg[*SI] = SuperReg[I];
Worklist.push(*SI);
}
}
DEBUG({
dbgs() << "Dumping reg alias table:\n";
for (MCPhysReg I = 0, E = RegInfo->getNumRegs(); I != E; ++I) {
dbgs() << "Reg " << I << ": ";
const BitVector &BV = AliasMap[SuperReg[I]];
int Idx = BV.find_first();
while (Idx != -1) {
dbgs() << Idx << " ";
Idx = BV.find_next(Idx);
}
dbgs() << "\n";
}
});
if (OnlySmaller)
return AliasMap[Reg];
return AliasMap[SuperReg[Reg]];
}
uint8_t
MCPlusBuilder::getRegSize(MCPhysReg Reg) const {
// SizeMap caches a mapping of registers to their sizes
static std::vector<uint8_t> SizeMap;
if (SizeMap.size() > 0) {
return SizeMap[Reg];
}
SizeMap = std::vector<uint8_t>(RegInfo->getNumRegs());
// Build size map
for (auto I = RegInfo->regclass_begin(), E = RegInfo->regclass_end(); I != E;
++I) {
for (MCPhysReg Reg : *I) {
SizeMap[Reg] = I->getSize();
}
}
return SizeMap[Reg];
}

1347
bolt/MCPlusBuilder.h Normal file

File diff suppressed because it is too large Load Diff

View File

@ -18,24 +18,24 @@ namespace {
bool getStackAdjustmentSize(const BinaryContext &BC, const MCInst &Inst,
int64_t &Adjustment) {
return BC.MIA->evaluateSimple(Inst, Adjustment,
std::make_pair(BC.MIA->getStackPointer(), 0LL),
return BC.MIB->evaluateSimple(Inst, Adjustment,
std::make_pair(BC.MIB->getStackPointer(), 0LL),
std::make_pair(0, 0LL));
}
bool isIndifferentToSP(const MCInst &Inst, const BinaryContext &BC) {
if (BC.MIA->isCFI(Inst))
if (BC.MIB->isCFI(Inst))
return true;
const auto II = BC.MII->get(Inst.getOpcode());
if (BC.MIA->isTerminator(Inst) ||
II.hasImplicitDefOfPhysReg(BC.MIA->getStackPointer(), BC.MRI.get()) ||
II.hasImplicitUseOfPhysReg(BC.MIA->getStackPointer()))
if (BC.MIB->isTerminator(Inst) ||
II.hasImplicitDefOfPhysReg(BC.MIB->getStackPointer(), BC.MRI.get()) ||
II.hasImplicitUseOfPhysReg(BC.MIB->getStackPointer()))
return false;
for (int I = 0, E = Inst.getNumOperands(); I != E; ++I) {
const auto &Operand = Inst.getOperand(I);
if (Operand.isReg() && Operand.getReg() == BC.MIA->getStackPointer()) {
if (Operand.isReg() && Operand.getReg() == BC.MIB->getStackPointer()) {
return false;
}
}
@ -68,8 +68,8 @@ void AllocCombinerPass::combineAdjustments(BinaryContext &BC,
continue; // Skip updating Prev
int64_t Adjustment{0LL};
if (!Prev || !BC.MIA->isStackAdjustment(Inst) ||
!BC.MIA->isStackAdjustment(*Prev) ||
if (!Prev || !BC.MIB->isStackAdjustment(Inst) ||
!BC.MIB->isStackAdjustment(*Prev) ||
!getStackAdjustmentSize(BC, *Prev, Adjustment)) {
Prev = &Inst;
continue;
@ -82,10 +82,10 @@ void AllocCombinerPass::combineAdjustments(BinaryContext &BC,
dbgs() << "Adjustment: " << Adjustment << "\n";
});
if (BC.MIA->isSUB(Inst))
if (BC.MIB->isSUB(Inst))
Adjustment = -Adjustment;
BC.MIA->addToImm(Inst, Adjustment, BC.Ctx.get());
BC.MIB->addToImm(Inst, Adjustment, BC.Ctx.get());
DEBUG({
dbgs() << "After adjustment:\n";

View File

@ -171,12 +171,12 @@ BinaryFunctionCallGraph buildCallGraph(BinaryContext &BC,
// If there is no profiling data the count will be COUNT_NO_PROFILE.
auto getCallInfo = [&](const BinaryBasicBlock *BB, const MCInst &Inst) {
std::vector<std::pair<const MCSymbol *, uint64_t>> Counts;
const auto *DstSym = BC.MIA->getTargetSymbol(Inst);
const auto *DstSym = BC.MIB->getTargetSymbol(Inst);
// If this is an indirect call use perf data directly.
if (!DstSym && BC.MIA->hasAnnotation(Inst, "CallProfile")) {
if (!DstSym && BC.MIB->hasAnnotation(Inst, "CallProfile")) {
const auto &ICSP =
BC.MIA->getAnnotationAs<IndirectCallSiteProfile>(Inst, "CallProfile");
BC.MIB->getAnnotationAs<IndirectCallSiteProfile>(Inst, "CallProfile");
for (const auto &CSI : ICSP) {
if (!CSI.IsFunction)
continue;
@ -241,7 +241,7 @@ BinaryFunctionCallGraph buildCallGraph(BinaryContext &BC,
for (auto &Inst : *BB) {
// Find call instructions and extract target symbols from each one.
if (BC.MIA->isCall(Inst)) {
if (BC.MIB->isCall(Inst)) {
const auto CallInfo = getCallInfo(BB, Inst);
if (!CallInfo.empty()) {

View File

@ -250,9 +250,9 @@ void OptimizeBodylessFunctions::analyze(
const auto *FirstInstr = BF.front().getFirstNonPseudoInstr();
if (!FirstInstr)
return;
if (!BC.MIA->isTailCall(*FirstInstr))
if (!BC.MIB->isTailCall(*FirstInstr))
return;
const auto *TargetSymbol = BC.MIA->getTargetSymbol(*FirstInstr);
const auto *TargetSymbol = BC.MIB->getTargetSymbol(*FirstInstr);
if (!TargetSymbol)
return;
const auto *Function = BC.getFunctionForSymbol(TargetSymbol);
@ -266,9 +266,9 @@ void OptimizeBodylessFunctions::optimizeCalls(BinaryFunction &BF,
BinaryContext &BC) {
for (auto *BB : BF.layout()) {
for (auto &Inst : *BB) {
if (!BC.MIA->isCall(Inst))
if (!BC.MIB->isCall(Inst))
continue;
const auto *OriginalTarget = BC.MIA->getTargetSymbol(Inst);
const auto *OriginalTarget = BC.MIB->getTargetSymbol(Inst);
if (!OriginalTarget)
continue;
const auto *Target = OriginalTarget;
@ -288,7 +288,7 @@ void OptimizeBodylessFunctions::optimizeCalls(BinaryFunction &BF,
<< ": replacing call to " << OriginalTarget->getName()
<< " by call to " << Target->getName()
<< " while folding " << CallSites << " call sites\n");
BC.MIA->replaceBranchTarget(Inst, Target, BC.Ctx.get());
BC.MIB->replaceBranchTarget(Inst, Target, BC.Ctx.get());
NumOptimizedCallSites += CallSites;
if (BB->hasProfile()) {
@ -528,7 +528,7 @@ void ReorderBasicBlocks::splitFunction(BinaryFunction &BF) const {
// that the block never throws, it is safe to move the block to
// decrease the size of the function.
for (auto &Instr : *BB) {
if (BF.getBinaryContext().MIA->isInvoke(Instr)) {
if (BF.getBinaryContext().MIB->isInvoke(Instr)) {
BB->setCanOutline(false);
break;
}
@ -624,7 +624,7 @@ void StripAnnotations::runOnFunctions(
for (auto &BB : Function) {
for (auto &Inst : BB) {
BC.MIA->removeAllAnnotations(Inst);
BC.MIB->removeAllAnnotations(Inst);
}
}
}
@ -677,11 +677,11 @@ uint64_t fixDoubleJumps(BinaryContext &BC,
assert((CondBranch || (!CondBranch && Pred->succ_size() == 1)) &&
"Predecessor block has inconsistent number of successors");
if (CondBranch &&
BC.MIA->getTargetSymbol(*CondBranch) == BB.getLabel()) {
BC.MIA->replaceBranchTarget(*CondBranch, Succ->getLabel(), Ctx);
BC.MIB->getTargetSymbol(*CondBranch) == BB.getLabel()) {
BC.MIB->replaceBranchTarget(*CondBranch, Succ->getLabel(), Ctx);
} else if (UncondBranch &&
BC.MIA->getTargetSymbol(*UncondBranch) == BB.getLabel()) {
BC.MIA->replaceBranchTarget(*UncondBranch, Succ->getLabel(), Ctx);
BC.MIB->getTargetSymbol(*UncondBranch) == BB.getLabel()) {
BC.MIB->replaceBranchTarget(*UncondBranch, Succ->getLabel(), Ctx);
} else if (!UncondBranch) {
assert(Function.getBasicBlockAfter(Pred, false) != Succ &&
"Don't add an explicit jump to a fallthrough block.");
@ -691,8 +691,8 @@ uint64_t fixDoubleJumps(BinaryContext &BC,
// Succ will be null in the tail call case. In this case we
// need to explicitly add a tail call instruction.
auto *Branch = Pred->getLastNonPseudoInstr();
if (Branch && BC.MIA->isUnconditionalBranch(*Branch)) {
assert(BC.MIA->getTargetSymbol(*Branch) == BB.getLabel());
if (Branch && BC.MIB->isUnconditionalBranch(*Branch)) {
assert(BC.MIB->getTargetSymbol(*Branch) == BB.getLabel());
Pred->removeSuccessor(&BB);
Pred->eraseInstruction(Branch);
Pred->addTailCallInstruction(SuccSym);
@ -714,16 +714,16 @@ uint64_t fixDoubleJumps(BinaryContext &BC,
continue;
auto *Inst = BB.getFirstNonPseudoInstr();
const bool IsTailCall = BC.MIA->isTailCall(*Inst);
const bool IsTailCall = BC.MIB->isTailCall(*Inst);
if (!BC.MIA->isUnconditionalBranch(*Inst) && !IsTailCall)
if (!BC.MIB->isUnconditionalBranch(*Inst) && !IsTailCall)
continue;
// If we operate after SCTC make sure it's not a conditional tail call.
if (IsTailCall && BC.MIA->isConditionalBranch(*Inst))
if (IsTailCall && BC.MIB->isConditionalBranch(*Inst))
continue;
const auto *SuccSym = BC.MIA->getTargetSymbol(*Inst);
const auto *SuccSym = BC.MIB->getTargetSymbol(*Inst);
auto *Succ = BB.getSuccessor();
if (((!Succ || &BB == Succ) && !IsTailCall) || (IsTailCall && !SuccSym))
@ -799,7 +799,7 @@ uint64_t SimplifyConditionalTailCalls::fixTailCalls(BinaryContext &BC,
BF.updateLayoutIndices();
BF.markUnreachable();
auto &MIA = BC.MIA;
auto &MIB = BC.MIB;
uint64_t NumLocalCTCCandidates = 0;
uint64_t NumLocalCTCs = 0;
uint64_t LocalCTCTakenCount = 0;
@ -820,10 +820,10 @@ uint64_t SimplifyConditionalTailCalls::fixTailCalls(BinaryContext &BC,
continue;
auto *Instr = BB->getFirstNonPseudoInstr();
if (!MIA->isTailCall(*Instr) || BC.MIA->isConditionalBranch(*Instr))
if (!MIB->isTailCall(*Instr) || BC.MIB->isConditionalBranch(*Instr))
continue;
auto *CalleeSymbol = MIA->getTargetSymbol(*Instr);
auto *CalleeSymbol = MIB->getTargetSymbol(*Instr);
if (!CalleeSymbol)
continue;
@ -876,7 +876,7 @@ uint64_t SimplifyConditionalTailCalls::fixTailCalls(BinaryContext &BC,
bool BranchForStats;
if (CondSucc != BB) {
// Patch the new target address into the conditional branch.
MIA->reverseBranchCondition(*CondBranch, CalleeSymbol, BC.Ctx.get());
MIB->reverseBranchCondition(*CondBranch, CalleeSymbol, BC.Ctx.get());
// Since we reversed the condition on the branch we need to change
// the target for the unconditional branch or add a unconditional
// branch to the old target. This has to be done manually since
@ -885,7 +885,7 @@ uint64_t SimplifyConditionalTailCalls::fixTailCalls(BinaryContext &BC,
BranchForStats = false;
} else {
// Change destination of the conditional branch.
MIA->replaceBranchTarget(*CondBranch, CalleeSymbol, BC.Ctx.get());
MIB->replaceBranchTarget(*CondBranch, CalleeSymbol, BC.Ctx.get());
BranchForStats = true;
}
const auto Count = PredBB->getBranchInfo(BranchForStats).Count;
@ -893,11 +893,11 @@ uint64_t SimplifyConditionalTailCalls::fixTailCalls(BinaryContext &BC,
Count == BinaryBasicBlock::COUNT_NO_PROFILE ? 0 : Count;
// Annotate it, so "isCall" returns true for this jcc
MIA->setConditionalTailCall(*CondBranch);
MIB->setConditionalTailCall(*CondBranch);
// Add info abount the conditional tail call frequency, otherwise this
// info will be lost when we delete the associated BranchInfo entry
BC.MIA->removeAnnotation(*CondBranch, "CTCTakenCount");
BC.MIA->addAnnotation(BC.Ctx.get(), *CondBranch, "CTCTakenCount",
BC.MIB->removeAnnotation(*CondBranch, "CTCTakenCount");
BC.MIB->addAnnotation(BC.Ctx.get(), *CondBranch, "CTCTakenCount",
CTCTakenFreq);
// Remove the unused successor which may be eliminated later
@ -948,12 +948,12 @@ uint64_t SimplifyConditionalTailCalls::fixTailCalls(BinaryContext &BC,
if (HasFallthrough)
PredBB->eraseInstruction(UncondBranch);
else
MIA->replaceBranchTarget(*UncondBranch,
MIB->replaceBranchTarget(*UncondBranch,
CondSucc->getLabel(),
BC.Ctx.get());
} else if (!HasFallthrough) {
MCInst Branch;
MIA->createUncondBranch(Branch, CondSucc->getLabel(), BC.Ctx.get());
MIB->createUncondBranch(Branch, CondSucc->getLabel(), BC.Ctx.get());
PredBB->addInstruction(Branch);
}
}
@ -1018,7 +1018,7 @@ uint64_t Peepholes::shortenInstructions(BinaryContext &BC,
if (opts::Verbosity > 1) {
DebugInst = Inst;
}
if (BC.MIA->shortenInstruction(Inst)) {
if (BC.MIB->shortenInstruction(Inst)) {
if (opts::Verbosity > 1) {
outs() << "BOLT-INFO: peephole, shortening:\n"
<< "BOLT-INFO: ";
@ -1037,9 +1037,9 @@ void Peepholes::addTailcallTraps(BinaryContext &BC,
BinaryFunction &Function) {
for (auto &BB : Function) {
auto *Inst = BB.getLastNonPseudoInstr();
if (Inst && BC.MIA->isTailCall(*Inst) && BC.MIA->isIndirectBranch(*Inst)) {
if (Inst && BC.MIB->isTailCall(*Inst) && BC.MIB->isIndirectBranch(*Inst)) {
MCInst Trap;
if (BC.MIA->createTrap(Trap)) {
if (BC.MIB->createTrap(Trap)) {
BB.addInstruction(Trap);
++TailCallTraps;
}
@ -1112,7 +1112,7 @@ void Peepholes::runOnFunctions(BinaryContext &BC,
bool SimplifyRODataLoads::simplifyRODataLoads(
BinaryContext &BC, BinaryFunction &BF) {
auto &MIA = BC.MIA;
auto &MIB = BC.MIB;
uint64_t NumLocalLoadsSimplified = 0;
uint64_t NumDynamicLocalLoadsSimplified = 0;
@ -1131,9 +1131,9 @@ bool SimplifyRODataLoads::simplifyRODataLoads(
// Try to statically evaluate the target memory address;
uint64_t TargetAddress;
if (MIA->hasPCRelOperand(Inst)) {
if (MIB->hasPCRelOperand(Inst)) {
// Try to find the symbol that corresponds to the PC-relative operand.
auto DispOpI = MIA->getMemOperandDisp(Inst);
auto DispOpI = MIB->getMemOperandDisp(Inst);
assert(DispOpI != Inst.end() && "expected PC-relative displacement");
assert(DispOpI->isExpr() &&
"found PC-relative with non-symbolic displacement");
@ -1143,7 +1143,7 @@ bool SimplifyRODataLoads::simplifyRODataLoads(
uint64_t DisplOffset;
std::tie(DisplSymbol, DisplOffset) =
BC.MIA->getTargetSymbolInfo(DispOpI->getExpr());
BC.MIB->getTargetSymbolInfo(DispOpI->getExpr());
if (!DisplSymbol)
continue;
@ -1154,7 +1154,7 @@ bool SimplifyRODataLoads::simplifyRODataLoads(
if (!BD)
continue;
TargetAddress = BD->getAddress() + DisplOffset;
} else if (!MIA->evaluateMemOperandTarget(Inst, TargetAddress)) {
} else if (!MIB->evaluateMemOperandTarget(Inst, TargetAddress)) {
continue;
}
@ -1174,7 +1174,7 @@ bool SimplifyRODataLoads::simplifyRODataLoads(
if (BB->hasProfile())
NumDynamicLocalLoadsFound += BB->getExecutionCount();
if (MIA->replaceMemOperandWithImm(Inst, ConstantData, Offset)) {
if (MIB->replaceMemOperandWithImm(Inst, ConstantData, Offset)) {
++NumLocalLoadsSimplified;
if (BB->hasProfile())
NumDynamicLocalLoadsSimplified += BB->getExecutionCount();
@ -1547,7 +1547,7 @@ void InstructionLowering::runOnFunctions(
for (auto &BFI : BFs) {
for (auto &BB : BFI.second) {
for (auto &Instruction : BB) {
BC.MIA->lowerTailCall(Instruction);
BC.MIB->lowerTailCall(Instruction);
}
}
}
@ -1563,8 +1563,8 @@ void StripRepRet::runOnFunctions(
for (auto &BB : BFI.second) {
auto LastInstRIter = BB.getLastNonPseudo();
if (LastInstRIter == BB.rend() ||
!BC.MIA->isReturn(*LastInstRIter) ||
!BC.MIA->deleteREPPrefix(*LastInstRIter))
!BC.MIB->isReturn(*LastInstRIter) ||
!BC.MIB->deleteREPPrefix(*LastInstRIter))
continue;
NumPrefixesRemoved += BB.getKnownExecutionCount();

View File

@ -19,8 +19,8 @@ void doForAllPreds(const BinaryContext &BC, const BinaryBasicBlock &BB,
return;
for (auto Thrower : BB.throwers()) {
for (auto &Inst : *Thrower) {
if (!BC.MIA->isInvoke(Inst) ||
BC.MIA->getEHInfo(Inst).first != BB.getLabel())
if (!BC.MIB->isInvoke(Inst) ||
BC.MIB->getEHInfo(Inst).first != BB.getLabel())
continue;
Task(ProgramPoint(&Inst));
}

View File

@ -233,7 +233,7 @@ protected:
}
StateTy &getOrCreateStateAt(MCInst &Point) {
return BC.MIA->getOrCreateAnnotationAs<StateTy>(
return BC.MIB->getOrCreateAnnotationAs<StateTy>(
BC.Ctx.get(), Point, derived().getAnnotationName(), StatePrinterTy(BC));
}
@ -275,7 +275,7 @@ public:
/// Track the state at the end (start) of each MCInst in this function if
/// the direction of the dataflow is forward (backward).
ErrorOr<const StateTy &> getStateAt(const MCInst &Point) const {
return BC.MIA->tryGetAnnotationAs<StateTy>(
return BC.MIB->tryGetAnnotationAs<StateTy>(
Point, const_derived().getAnnotationName());
}
@ -304,7 +304,7 @@ public:
void cleanAnnotations() {
for (auto &BB : Func) {
for (auto &Inst : BB) {
BC.MIA->removeAnnotation(Inst, derived().getAnnotationName());
BC.MIB->removeAnnotation(Inst, derived().getAnnotationName());
}
}
}
@ -358,7 +358,7 @@ public:
StateTy StateAtEntry = getOrCreateStateAt(*BB);
if (BB->isLandingPad()) {
doForAllSuccsOrPreds(*BB, [&](ProgramPoint P) {
if (P.isInst() && BC.MIA->isInvoke(*P.getInst()))
if (P.isInst() && BC.MIB->isInvoke(*P.getInst()))
derived().doConfluenceWithLP(StateAtEntry, *getStateAt(P),
*P.getInst());
else
@ -388,7 +388,7 @@ public:
auto doNext = [&] (MCInst &Inst, const BinaryBasicBlock &BB) {
StateTy CurState = derived().computeNext(Inst, *PrevState);
if (Backward && BC.MIA->isInvoke(Inst)) {
if (Backward && BC.MIB->isInvoke(Inst)) {
auto *LBB = Func.getLandingPadBBFor(BB, Inst);
if (LBB) {
auto First = LBB->begin();

View File

@ -142,7 +142,7 @@ private:
BitVector computeNext(const MCInst &Point, const BitVector &Cur) {
BitVector Next = Cur;
// Gen
if (!this->BC.MIA->isCFI(Point)) {
if (!this->BC.MIB->isCFI(Point)) {
Next.set(this->ExprToIdx[&Point]);
}
return Next;

View File

@ -107,7 +107,7 @@ class FrameAccessAnalysis {
MCPhysReg Reg{0};
int64_t StackOffset{0};
bool IsIndexed{false};
if (!BC.MIA->isStackAccess(Inst, FIE.IsLoad, FIE.IsStore, FIE.IsStoreFromReg,
if (!BC.MIB->isStackAccess(Inst, FIE.IsLoad, FIE.IsStore, FIE.IsStoreFromReg,
Reg, SrcImm, FIE.StackPtrReg, StackOffset, FIE.Size,
FIE.IsSimple, IsIndexed)) {
return true;
@ -126,11 +126,11 @@ class FrameAccessAnalysis {
if (FIE.IsLoad || FIE.IsStoreFromReg)
FIE.RegOrImm = Reg;
if (FIE.StackPtrReg == BC.MIA->getStackPointer() && SPOffset != SPT.EMPTY &&
if (FIE.StackPtrReg == BC.MIB->getStackPointer() && SPOffset != SPT.EMPTY &&
SPOffset != SPT.SUPERPOSITION) {
DEBUG(dbgs() << "Adding access via SP while CFA reg is another one\n");
FIE.StackOffset = SPOffset + StackOffset;
} else if (FIE.StackPtrReg == BC.MIA->getFramePointer() &&
} else if (FIE.StackPtrReg == BC.MIB->getFramePointer() &&
FPOffset != SPT.EMPTY && FPOffset != SPT.SUPERPOSITION) {
DEBUG(dbgs() << "Adding access via FP while CFA reg is another one\n");
FIE.StackOffset = FPOffset + StackOffset;
@ -171,7 +171,7 @@ public:
Prev = &Inst;
// Use CFI information to keep track of which register is being used to
// access the frame
if (BC.MIA->isCFI(Inst)) {
if (BC.MIB->isCFI(Inst)) {
const auto *CFI = BF.getCFIFor(Inst);
switch (CFI->getOperation()) {
case MCCFIInstruction::OpDefCfa:
@ -206,7 +206,7 @@ public:
return true;
}
if (BC.MIA->escapesVariable(Inst, SPT.HasFramePointer)) {
if (BC.MIB->escapesVariable(Inst, SPT.HasFramePointer)) {
DEBUG(dbgs() << "Leaked stack address, giving up on this function.\n");
DEBUG(dbgs() << "Blame insn: ");
DEBUG(Inst.dump());
@ -228,10 +228,10 @@ void FrameAnalysis::addArgAccessesFor(MCInst &Inst, ArgAccesses &&AA) {
}
if (AA.AssumeEverything) {
// Index 0 in ArgAccessesVector represents an "assumeeverything" entry
BC.MIA->addAnnotation(BC.Ctx.get(), Inst, "ArgAccessEntry", 0U);
BC.MIB->addAnnotation(BC.Ctx.get(), Inst, "ArgAccessEntry", 0U);
return;
}
BC.MIA->addAnnotation(BC.Ctx.get(), Inst, "ArgAccessEntry",
BC.MIB->addAnnotation(BC.Ctx.get(), Inst, "ArgAccessEntry",
(unsigned)ArgAccessesVector.size());
ArgAccessesVector.emplace_back(std::move(AA));
}
@ -250,13 +250,13 @@ void FrameAnalysis::addArgInStackAccessFor(MCInst &Inst,
}
void FrameAnalysis::addFIEFor(MCInst &Inst, const FrameIndexEntry &FIE) {
BC.MIA->addAnnotation(BC.Ctx.get(), Inst, "FrameAccessEntry",
BC.MIB->addAnnotation(BC.Ctx.get(), Inst, "FrameAccessEntry",
(unsigned)FIEVector.size());
FIEVector.emplace_back(FIE);
}
ErrorOr<ArgAccesses &> FrameAnalysis::getArgAccessesFor(const MCInst &Inst) {
if (auto Idx = BC.MIA->tryGetAnnotationAs<unsigned>(Inst, "ArgAccessEntry")) {
if (auto Idx = BC.MIB->tryGetAnnotationAs<unsigned>(Inst, "ArgAccessEntry")) {
assert(ArgAccessesVector.size() > *Idx && "Out of bounds");
return ArgAccessesVector[*Idx];
}
@ -265,7 +265,7 @@ ErrorOr<ArgAccesses &> FrameAnalysis::getArgAccessesFor(const MCInst &Inst) {
ErrorOr<const ArgAccesses &>
FrameAnalysis::getArgAccessesFor(const MCInst &Inst) const {
if (auto Idx = BC.MIA->tryGetAnnotationAs<unsigned>(Inst, "ArgAccessEntry")) {
if (auto Idx = BC.MIB->tryGetAnnotationAs<unsigned>(Inst, "ArgAccessEntry")) {
assert(ArgAccessesVector.size() > *Idx && "Out of bounds");
return ArgAccessesVector[*Idx];
}
@ -275,7 +275,7 @@ FrameAnalysis::getArgAccessesFor(const MCInst &Inst) const {
ErrorOr<const FrameIndexEntry &>
FrameAnalysis::getFIEFor(const MCInst &Inst) const {
if (auto Idx =
BC.MIA->tryGetAnnotationAs<unsigned>(Inst, "FrameAccessEntry")) {
BC.MIB->tryGetAnnotationAs<unsigned>(Inst, "FrameAccessEntry")) {
assert(FIEVector.size() > *Idx && "Out of bounds");
return FIEVector[*Idx];
}
@ -309,11 +309,11 @@ void FrameAnalysis::traverseCG(BinaryFunctionCallGraph &CG) {
bool FrameAnalysis::updateArgsTouchedFor(const BinaryFunction &BF, MCInst &Inst,
int CurOffset) {
if (!BC.MIA->isCall(Inst))
if (!BC.MIB->isCall(Inst))
return false;
std::set<int64_t> Res;
const auto *TargetSymbol = BC.MIA->getTargetSymbol(Inst);
const auto *TargetSymbol = BC.MIB->getTargetSymbol(Inst);
// If indirect call, we conservatively assume it accesses all stack positions
if (TargetSymbol == nullptr) {
addArgAccessesFor(Inst, ArgAccesses(/*AssumeEverything=*/true));
@ -339,7 +339,7 @@ bool FrameAnalysis::updateArgsTouchedFor(const BinaryFunction &BF, MCInst &Inst,
auto Iter = ArgsTouchedMap.find(Function);
bool Changed = false;
if (BC.MIA->isTailCall(Inst) && Iter != ArgsTouchedMap.end()) {
if (BC.MIB->isTailCall(Inst) && Iter != ArgsTouchedMap.end()) {
// Ignore checking CurOffset because we can't always reliably determine the
// offset specially after an epilogue, where tailcalls happen. It should be
// -8.
@ -442,7 +442,7 @@ bool FrameAnalysis::computeArgsAccessed(BinaryFunction &BF) {
for (auto &BB : BF) {
for (auto &Inst : BB) {
if (BC.MIA->requiresAlignedAddress(Inst)) {
if (BC.MIB->requiresAlignedAddress(Inst)) {
FunctionsRequireAlignment.insert(&BF);
return true;
}
@ -488,8 +488,8 @@ void FrameAnalysis::cleanAnnotations() {
for (auto &I : BFs) {
for (auto &BB : I.second) {
for (auto &Inst : BB) {
BC.MIA->removeAnnotation(Inst, "ArgAccessEntry");
BC.MIA->removeAnnotation(Inst, "FrameAccessEntry");
BC.MIB->removeAnnotation(Inst, "ArgAccessEntry");
BC.MIB->removeAnnotation(Inst, "FrameAccessEntry");
}
}
}

View File

@ -104,7 +104,7 @@ void FrameOptimizerPass::removeUnnecessaryLoads(const RegAnalysis &RA,
if (FIEX->StackOffset != FIEY->StackOffset || FIEX->Size != FIEY->Size)
continue;
// TODO: Change push/pops to stack adjustment instruction
if (BC.MIA->isPop(Inst))
if (BC.MIB->isPop(Inst))
continue;
++NumRedundantLoads;
@ -116,14 +116,14 @@ void FrameOptimizerPass::removeUnnecessaryLoads(const RegAnalysis &RA,
DEBUG(dbgs() << "@BB: " << BB.getName() << "\n");
// Replace load
if (FIEY->IsStoreFromReg) {
if (!BC.MIA->replaceMemOperandWithReg(Inst, FIEY->RegOrImm)) {
if (!BC.MIB->replaceMemOperandWithReg(Inst, FIEY->RegOrImm)) {
DEBUG(dbgs() << "FAILED to change operand to a reg\n");
break;
}
++NumLoadsChangedToReg;
BC.MIA->removeAnnotation(Inst, "FrameAccessEntry");
BC.MIB->removeAnnotation(Inst, "FrameAccessEntry");
DEBUG(dbgs() << "Changed operand to a reg\n");
if (BC.MIA->isRedundantMove(Inst)) {
if (BC.MIB->isRedundantMove(Inst)) {
++NumLoadsDeleted;
DEBUG(dbgs() << "Created a redundant move\n");
// Delete it!
@ -133,11 +133,11 @@ void FrameOptimizerPass::removeUnnecessaryLoads(const RegAnalysis &RA,
char Buf[8] = {0, 0, 0, 0, 0, 0, 0, 0};
support::ulittle64_t::ref(Buf + 0) = FIEY->RegOrImm;
DEBUG(dbgs() << "Changing operand to an imm... ");
if (!BC.MIA->replaceMemOperandWithImm(Inst, StringRef(Buf, 8), 0)) {
if (!BC.MIB->replaceMemOperandWithImm(Inst, StringRef(Buf, 8), 0)) {
DEBUG(dbgs() << "FAILED\n");
} else {
++NumLoadsChangedToImm;
BC.MIA->removeAnnotation(Inst, "FrameAccessEntry");
BC.MIB->removeAnnotation(Inst, "FrameAccessEntry");
DEBUG(dbgs() << "Ok\n");
}
}
@ -197,7 +197,7 @@ void FrameOptimizerPass::removeUnusedStores(const FrameAnalysis &FA,
continue;
}
// TODO: Change push/pops to stack adjustment instruction
if (BC.MIA->isPush(Inst))
if (BC.MIB->isPush(Inst))
continue;
++NumRedundantStores;

View File

@ -169,7 +169,7 @@ IndirectCallPromotion::getCallTargets(
if (!opts::ICPJumpTablesByTarget && JT->Type == JumpTable::JTT_PIC)
return Targets;
const Location From(BF.getSymbol());
const auto Range = JT->getEntriesForAddress(BC.MIA->getJumpTable(Inst));
const auto Range = JT->getEntriesForAddress(BC.MIB->getJumpTable(Inst));
assert(JT->Counts.empty() || JT->Counts.size() >= Range.second);
JumpTable::JumpInfo DefaultJI;
const auto *JI = JT->Counts.empty() ? &DefaultJI : &JT->Counts[Range.first];
@ -233,7 +233,7 @@ IndirectCallPromotion::getCallTargets(
return Targets;
}
auto ICSP =
BC.MIA->tryGetAnnotationAs<IndirectCallSiteProfile>(Inst, "CallProfile");
BC.MIB->tryGetAnnotationAs<IndirectCallSiteProfile>(Inst, "CallProfile");
if (ICSP) {
for (const auto &CSP : ICSP.get()) {
Callsite Site(BF, CSP);
@ -308,7 +308,7 @@ IndirectCallPromotion::maybeGetHotJumpTableTargets(
int64_t DispValue;
const MCExpr *DispExpr;
MutableArrayRef<MCInst> Insts(&BB->front(), &CallInst);
const auto Type = BC.MIA->analyzeIndirectBranch(
const auto Type = BC.MIB->analyzeIndirectBranch(
CallInst, Insts.begin(), Insts.end(), BC.AsmInfo->getCodePointerSize(),
MemLocInstr, BaseReg, IndexReg, DispValue, DispExpr, PCRelBaseOut);
@ -341,7 +341,7 @@ IndirectCallPromotion::maybeGetHotJumpTableTargets(
++TotalIndexBasedCandidates;
// Try to get value profiling data for the method load instruction.
auto DataOffset = BC.MIA->tryGetAnnotationAs<uint64_t>(*MemLocInstr,
auto DataOffset = BC.MIB->tryGetAnnotationAs<uint64_t>(*MemLocInstr,
"MemDataOffset");
if (!DataOffset) {
@ -451,7 +451,7 @@ IndirectCallPromotion::maybeGetHotJumpTableTargets(
}
});
BC.MIA->getOrCreateAnnotationAs<uint16_t>(BC.Ctx.get(),
BC.MIB->getOrCreateAnnotationAs<uint16_t>(BC.Ctx.get(),
CallInst,
"JTIndexReg") = IndexReg;
@ -561,7 +561,7 @@ IndirectCallPromotion::maybeGetVtableAddrs(
return MethodInfoType();
MutableArrayRef<MCInst> Insts(&BB->front(), &Inst + 1);
if (!BC.MIA->analyzeVirtualMethodCall(Insts.begin(),
if (!BC.MIB->analyzeVirtualMethodCall(Insts.begin(),
Insts.end(),
MethodFetchInsns,
VtableReg,
@ -588,7 +588,7 @@ IndirectCallPromotion::maybeGetVtableAddrs(
);
// Try to get value profiling data for the method load instruction.
auto DataOffset = BC.MIA->tryGetAnnotationAs<uint64_t>(*MethodFetchInsns.back(),
auto DataOffset = BC.MIB->tryGetAnnotationAs<uint64_t>(*MethodFetchInsns.back(),
"MemDataOffset");
if (!DataOffset) {
@ -667,12 +667,12 @@ IndirectCallPromotion::rewriteCall(
BinaryFunction &Function,
BinaryBasicBlock *IndCallBlock,
const MCInst &CallInst,
MCInstrAnalysis::ICPdata &&ICPcode,
MCPlusBuilder::ICPdata &&ICPcode,
const std::vector<MCInst *> &MethodFetchInsns
) const {
// Create new basic blocks with correct code in each one first.
std::vector<std::unique_ptr<BinaryBasicBlock>> NewBBs;
const bool IsTailCallOrJT = (BC.MIA->isTailCall(CallInst) ||
const bool IsTailCallOrJT = (BC.MIB->isTailCall(CallInst) ||
Function.getJumpTable(CallInst));
// Move instructions from the tail of the original call block
@ -709,8 +709,8 @@ IndirectCallPromotion::rewriteCall(
assert(Sym);
auto TBB = Function.createBasicBlock(0, Sym);
for (auto &Inst : Insts) { // sanitize new instructions.
if (BC.MIA->isCall(Inst))
BC.MIA->removeAnnotation(Inst, "CallProfile");
if (BC.MIB->isCall(Inst))
BC.MIB->removeAnnotation(Inst, "CallProfile");
}
TBB->addInstructions(Insts.begin(), Insts.end());
NewBBs.emplace_back(std::move(TBB));
@ -942,7 +942,7 @@ IndirectCallPromotion::canPromoteCallsite(const BinaryBasicBlock *BB,
if (opts::ICPTopCallsites > 0) {
auto &BC = BB->getFunction()->getBinaryContext();
if (BC.MIA->hasAnnotation(Inst, "DoICP")) {
if (BC.MIB->hasAnnotation(Inst, "DoICP")) {
computeStats(TrialN);
return TrialN;
}
@ -1060,7 +1060,7 @@ IndirectCallPromotion::printCallsiteInfo(const BinaryBasicBlock *BB,
const size_t N,
uint64_t NumCalls) const {
auto &BC = BB->getFunction()->getBinaryContext();
const bool IsTailCall = BC.MIA->isTailCall(Inst);
const bool IsTailCall = BC.MIB->isTailCall(Inst);
const bool IsJumpTable = BB->getFunction()->getJumpTable(Inst);
const auto InstIdx = &Inst - &(*BB->begin());
@ -1132,7 +1132,7 @@ void IndirectCallPromotion::runOnFunctions(
bool PrintBB = false;
for (auto &Inst : BB) {
if (auto Mem =
BC.MIA->tryGetAnnotationAs<uint64_t>(Inst, "MemDataOffset")) {
BC.MIB->tryGetAnnotationAs<uint64_t>(Inst, "MemDataOffset")) {
for (auto &MI : MemData->getMemInfoRange(Mem.get())) {
if (MI.Addr.IsSymbol) {
PrintBB = true;
@ -1185,9 +1185,9 @@ void IndirectCallPromotion::runOnFunctions(
for (auto &Inst : BB) {
const bool IsJumpTable = Function.getJumpTable(Inst);
const bool HasIndirectCallProfile =
BC.MIA->hasAnnotation(Inst, "CallProfile");
const bool IsDirectCall = (BC.MIA->isCall(Inst) &&
BC.MIA->getTargetSymbol(Inst, 0));
BC.MIB->hasAnnotation(Inst, "CallProfile");
const bool IsDirectCall = (BC.MIB->isCall(Inst) &&
BC.MIB->getTargetSymbol(Inst, 0));
if (!IsDirectCall &&
((HasIndirectCallProfile && !IsJumpTable && OptimizeCalls) ||
@ -1225,7 +1225,7 @@ void IndirectCallPromotion::runOnFunctions(
// Mark sites to optimize with "DoICP" annotation.
for (size_t I = 0; I < Num; ++I) {
auto *Inst = IndirectCalls[I].second;
BC.MIA->addAnnotation(BC.Ctx.get(), *Inst, "DoICP", true);
BC.MIB->addAnnotation(BC.Ctx.get(), *Inst, "DoICP", true);
}
}
@ -1263,12 +1263,12 @@ void IndirectCallPromotion::runOnFunctions(
for (unsigned Idx = 0; Idx < BB->size(); ++Idx) {
auto &Inst = BB->getInstructionAtIndex(Idx);
const auto InstIdx = &Inst - &(*BB->begin());
const bool IsTailCall = BC.MIA->isTailCall(Inst);
const bool IsTailCall = BC.MIB->isTailCall(Inst);
const bool HasIndirectCallProfile =
BC.MIA->hasAnnotation(Inst, "CallProfile");
BC.MIB->hasAnnotation(Inst, "CallProfile");
const bool IsJumpTable = Function.getJumpTable(Inst);
if (BC.MIA->isCall(Inst)) {
if (BC.MIB->isCall(Inst)) {
TotalCalls += BB->getKnownExecutionCount();
}
@ -1277,10 +1277,10 @@ void IndirectCallPromotion::runOnFunctions(
continue;
// Ignore direct calls.
if (BC.MIA->isCall(Inst) && BC.MIA->getTargetSymbol(Inst, 0))
if (BC.MIB->isCall(Inst) && BC.MIB->getTargetSymbol(Inst, 0))
continue;
assert((BC.MIA->isCall(Inst) || BC.MIA->isIndirectBranch(Inst))
assert((BC.MIB->isCall(Inst) || BC.MIB->isIndirectBranch(Inst))
&& "expected a call or an indirect jump instruction");
if (IsJumpTable)
@ -1304,7 +1304,7 @@ void IndirectCallPromotion::runOnFunctions(
// promoting because we will clobber FLAGS.
if (IsJumpTable) {
auto State = Info.getLivenessAnalysis().getStateBefore(Inst);
if (!State || (State && (*State)[BC.MIA->getFlagsReg()])) {
if (!State || (State && (*State)[BC.MIB->getFlagsReg()])) {
if (opts::Verbosity >= 1) {
outs() << "BOLT-INFO: ICP failed in " << Function << " @ "
<< InstIdx << " in " << BB->getName()
@ -1372,11 +1372,11 @@ void IndirectCallPromotion::runOnFunctions(
// Generate new promoted call code for this callsite.
auto ICPcode =
(IsJumpTable && !opts::ICPJumpTablesByTarget)
? BC.MIA->jumpTablePromotion(Inst,
? BC.MIB->jumpTablePromotion(Inst,
SymTargets,
MethodInfo.second,
BC.Ctx.get())
: BC.MIA->indirectCallPromotion(
: BC.MIB->indirectCallPromotion(
Inst, SymTargets, MethodInfo.first, MethodInfo.second,
opts::ICPOldCodeSequence, BC.Ctx.get());

View File

@ -218,7 +218,7 @@ class IndirectCallPromotion : public BinaryFunctionPass {
BinaryFunction &Function,
BinaryBasicBlock *IndCallBlock,
const MCInst &CallInst,
MCInstrAnalysis::ICPdata &&ICPcode,
MCPlusBuilder::ICPdata &&ICPcode,
const std::vector<MCInst *> &MethodFetchInsns) const;
BinaryBasicBlock *fixCFG(BinaryContext &BC,

View File

@ -54,8 +54,8 @@ void InlineSmallFunctions::findInliningCandidates(
if (BB.size() > 0 &&
BB.getNumNonPseudos() <= kMaxInstructions &&
BB.lp_empty() &&
BC.MIA->isReturn(LastInstruction) &&
!BC.MIA->isTailCall(LastInstruction)) {
BC.MIB->isReturn(LastInstruction) &&
!BC.MIB->isTailCall(LastInstruction)) {
InliningCandidates.insert(&Function);
}
}
@ -90,7 +90,7 @@ void InlineSmallFunctions::findInliningCandidatesAggressive(
bool FoundCFI = false;
for (const auto BB : Function.layout()) {
for (const auto &Inst : *BB) {
if (BC.MIA->isEHLabel(Inst) || BC.MIA->isCFI(Inst)) {
if (BC.MIB->isEHLabel(Inst) || BC.MIB->isCFI(Inst)) {
FoundCFI = true;
break;
}
@ -122,8 +122,8 @@ void InlineSmallFunctions::inlineCall(
BinaryBasicBlock &BB,
MCInst *CallInst,
const BinaryBasicBlock &InlinedFunctionBB) {
assert(BC.MIA->isCall(*CallInst) && "Can only inline a call.");
assert(BC.MIA->isReturn(*InlinedFunctionBB.rbegin()) &&
assert(BC.MIB->isCall(*CallInst) && "Can only inline a call.");
assert(BC.MIB->isReturn(*InlinedFunctionBB.rbegin()) &&
"Inlined function should end with a return.");
std::vector<MCInst> InlinedInstance;
@ -133,16 +133,16 @@ void InlineSmallFunctions::inlineCall(
// Move stack like 'call' would if needed.
if (ShouldAdjustStack) {
MCInst StackInc;
BC.MIA->createStackPointerIncrement(StackInc);
BC.MIB->createStackPointerIncrement(StackInc);
InlinedInstance.push_back(StackInc);
}
for (auto Instruction : InlinedFunctionBB) {
if (BC.MIA->isReturn(Instruction)) {
if (BC.MIB->isReturn(Instruction)) {
break;
}
if (!BC.MIA->isEHLabel(Instruction) &&
!BC.MIA->isCFI(Instruction)) {
if (!BC.MIB->isEHLabel(Instruction) &&
!BC.MIB->isCFI(Instruction)) {
InlinedInstance.push_back(Instruction);
}
}
@ -150,7 +150,7 @@ void InlineSmallFunctions::inlineCall(
// Move stack pointer like 'ret' would.
if (ShouldAdjustStack) {
MCInst StackDec;
BC.MIA->createStackPointerDecrement(StackDec);
BC.MIB->createStackPointerDecrement(StackDec);
InlinedInstance.push_back(StackDec);
}
@ -166,7 +166,7 @@ InlineSmallFunctions::inlineCall(
const BinaryFunction &InlinedFunction) {
// Get the instruction to be replaced with inlined code.
MCInst &CallInst = CallerBB->getInstructionAtIndex(CallInstIndex);
assert(BC.MIA->isCall(CallInst) && "Can only inline a call.");
assert(BC.MIB->isCall(CallInst) && "Can only inline a call.");
// Point in the function after the inlined code.
BinaryBasicBlock *AfterInlinedBB = nullptr;
@ -174,7 +174,7 @@ InlineSmallFunctions::inlineCall(
// In case of a tail call we should not remove any ret instructions from the
// inlined instance.
bool IsTailCall = BC.MIA->isTailCall(CallInst);
bool IsTailCall = BC.MIB->isTailCall(CallInst);
// The first block of the function to be inlined can be merged with the caller
// basic block. This cannot happen if there are jumps to the first block.
@ -223,30 +223,30 @@ InlineSmallFunctions::inlineCall(
// Copy instructions into the inlined instance.
for (auto Instruction : *InlinedFunctionBB) {
if (!IsTailCall &&
BC.MIA->isReturn(Instruction) &&
!BC.MIA->isTailCall(Instruction)) {
BC.MIB->isReturn(Instruction) &&
!BC.MIB->isTailCall(Instruction)) {
// Skip returns when the caller does a normal call as opposed to a tail
// call.
IsExitingBlock = true;
continue;
}
if (!IsTailCall &&
BC.MIA->isTailCall(Instruction)) {
BC.MIB->isTailCall(Instruction)) {
// Convert tail calls to normal calls when the caller does a normal
// call.
if (!BC.MIA->convertTailCallToCall(Instruction))
if (!BC.MIB->convertTailCallToCall(Instruction))
assert(false && "unexpected tail call opcode found");
IsExitingBlock = true;
}
if (BC.MIA->isBranch(Instruction) &&
!BC.MIA->isIndirectBranch(Instruction)) {
if (BC.MIB->isBranch(Instruction) &&
!BC.MIB->isIndirectBranch(Instruction)) {
// Convert the branch targets in the branch instructions that will be
// added to the inlined instance.
const MCSymbol *OldTargetLabel = nullptr;
const MCSymbol *OldFTLabel = nullptr;
MCInst *CondBranch = nullptr;
MCInst *UncondBranch = nullptr;
const bool Result = BC.MIA->analyzeBranch(&Instruction,
const bool Result = BC.MIB->analyzeBranch(&Instruction,
&Instruction + 1,
OldTargetLabel,
OldFTLabel, CondBranch,
@ -263,12 +263,12 @@ InlineSmallFunctions::inlineCall(
}
}
assert(NewTargetLabel);
BC.MIA->replaceBranchTarget(Instruction, NewTargetLabel, BC.Ctx.get());
BC.MIB->replaceBranchTarget(Instruction, NewTargetLabel, BC.Ctx.get());
}
// TODO; Currently we simply ignore CFI instructions but we need to
// address them for correctness.
if (!BC.MIA->isEHLabel(Instruction) &&
!BC.MIA->isCFI(Instruction)) {
if (!BC.MIB->isEHLabel(Instruction) &&
!BC.MIB->isCFI(Instruction)) {
InlinedInstanceBB->addInstruction(std::move(Instruction));
}
}
@ -433,7 +433,7 @@ bool InlineSmallFunctions::inlineCallsInFunction(
for (auto BB : Blocks) {
for (auto InstIt = BB->begin(), End = BB->end(); InstIt != End; ++InstIt) {
auto &Inst = *InstIt;
if (BC.MIA->isCall(Inst)) {
if (BC.MIB->isCall(Inst)) {
TotalDynamicCalls += BB->getExecutionCount();
}
}
@ -447,11 +447,11 @@ bool InlineSmallFunctions::inlineCallsInFunction(
for (auto InstIt = BB->begin(), End = BB->end(); InstIt != End; ) {
auto &Inst = *InstIt;
if (BC.MIA->isCall(Inst) &&
!BC.MIA->isTailCall(Inst) &&
if (BC.MIB->isCall(Inst) &&
!BC.MIB->isTailCall(Inst) &&
Inst.getNumPrimeOperands() == 1 &&
Inst.getOperand(0).isExpr()) {
const auto *TargetSymbol = BC.MIA->getTargetSymbol(Inst);
const auto *TargetSymbol = BC.MIB->getTargetSymbol(Inst);
assert(TargetSymbol && "target symbol expected for direct call");
const auto *TargetFunction = BC.getFunctionForSymbol(TargetSymbol);
if (TargetFunction) {
@ -499,7 +499,7 @@ bool InlineSmallFunctions::inlineCallsInFunctionAggressive(
for (auto BB : Blocks) {
for (auto InstIt = BB->begin(), End = BB->end(); InstIt != End; ++InstIt) {
auto &Inst = *InstIt;
if (BC.MIA->isCall(Inst)) {
if (BC.MIB->isCall(Inst)) {
TotalDynamicCalls += BB->getExecutionCount();
}
}
@ -514,11 +514,11 @@ bool InlineSmallFunctions::inlineCallsInFunctionAggressive(
unsigned InstIndex = 0;
for (auto InstIt = BB->begin(); InstIt != BB->end(); ) {
auto &Inst = *InstIt;
if (BC.MIA->isCall(Inst) &&
if (BC.MIB->isCall(Inst) &&
Inst.getNumPrimeOperands() == 1 &&
Inst.getOperand(0).isExpr()) {
assert(!BC.MIA->isInvoke(Inst));
const auto *TargetSymbol = BC.MIA->getTargetSymbol(Inst);
assert(!BC.MIB->isInvoke(Inst));
const auto *TargetSymbol = BC.MIB->getTargetSymbol(Inst);
assert(TargetSymbol && "target symbol expected for direct call");
const auto *TargetFunction = BC.getFunctionForSymbol(TargetSymbol);
if (TargetFunction) {

View File

@ -60,11 +60,11 @@ void JTFootprintReduction::checkOpportunities(BinaryContext &BC,
uint64_t Scale;
// Try a standard indirect jump matcher
auto IndJmpMatcher = BC.MIA->matchIndJmp(
BC.MIA->matchAnyOperand(), BC.MIA->matchImm(Scale),
BC.MIA->matchReg(), BC.MIA->matchAnyOperand());
auto IndJmpMatcher = BC.MIB->matchIndJmp(
BC.MIB->matchAnyOperand(), BC.MIB->matchImm(Scale),
BC.MIB->matchReg(), BC.MIB->matchAnyOperand());
if (!opts::JTFootprintOnlyPIC &&
IndJmpMatcher->match(*BC.MRI, *BC.MIA,
IndJmpMatcher->match(*BC.MRI, *BC.MIB,
MutableArrayRef<MCInst>(&*BB.begin(), &Inst + 1),
-1) &&
Scale == 8) {
@ -86,19 +86,19 @@ void JTFootprintReduction::checkOpportunities(BinaryContext &BC,
MCPhysReg BaseReg1;
MCPhysReg BaseReg2;
uint64_t Offset;
auto PICIndJmpMatcher = BC.MIA->matchIndJmp(BC.MIA->matchAdd(
BC.MIA->matchReg(BaseReg1),
BC.MIA->matchLoad(BC.MIA->matchReg(BaseReg2), BC.MIA->matchImm(Scale),
BC.MIA->matchReg(), BC.MIA->matchImm(Offset))));
auto PICBaseAddrMatcher = BC.MIA->matchIndJmp(
BC.MIA->matchAdd(BC.MIA->matchLoadAddr(BC.MIA->matchSymbol()),
BC.MIA->matchAnyOperand()));
auto PICIndJmpMatcher = BC.MIB->matchIndJmp(BC.MIB->matchAdd(
BC.MIB->matchReg(BaseReg1),
BC.MIB->matchLoad(BC.MIB->matchReg(BaseReg2), BC.MIB->matchImm(Scale),
BC.MIB->matchReg(), BC.MIB->matchImm(Offset))));
auto PICBaseAddrMatcher = BC.MIB->matchIndJmp(
BC.MIB->matchAdd(BC.MIB->matchLoadAddr(BC.MIB->matchSymbol()),
BC.MIB->matchAnyOperand()));
if (!PICIndJmpMatcher->match(
*BC.MRI, *BC.MIA,
*BC.MRI, *BC.MIB,
MutableArrayRef<MCInst>(&*BB.begin(), &Inst + 1), -1) ||
Scale != 4 || BaseReg1 != BaseReg2 || Offset != 0 ||
!PICBaseAddrMatcher->match(
*BC.MRI, *BC.MIA,
*BC.MRI, *BC.MIB,
MutableArrayRef<MCInst>(&*BB.begin(), &Inst + 1), -1)) {
BlacklistedJTs.insert(JumpTable);
++IndJmpsDenied;
@ -133,10 +133,10 @@ bool JTFootprintReduction::tryOptimizeNonPIC(
uint64_t Scale;
MCPhysReg Index;
MCOperand Offset;
auto IndJmpMatcher = BC.MIA->matchIndJmp(
BC.MIA->matchAnyOperand(Base), BC.MIA->matchImm(Scale),
BC.MIA->matchReg(Index), BC.MIA->matchAnyOperand(Offset));
if (!IndJmpMatcher->match(*BC.MRI, *BC.MIA,
auto IndJmpMatcher = BC.MIB->matchIndJmp(
BC.MIB->matchAnyOperand(Base), BC.MIB->matchImm(Scale),
BC.MIB->matchReg(Index), BC.MIB->matchAnyOperand(Offset));
if (!IndJmpMatcher->match(*BC.MRI, *BC.MIB,
MutableArrayRef<MCInst>(&*BB.begin(), &Inst + 1),
-1)) {
return false;
@ -145,7 +145,7 @@ bool JTFootprintReduction::tryOptimizeNonPIC(
assert(Scale == 8 && "Wrong scale");
Scale = 4;
IndJmpMatcher->annotate(*BC.MIA, *BC.Ctx.get(), "DeleteMe");
IndJmpMatcher->annotate(*BC.MIB, *BC.Ctx.get(), "DeleteMe");
auto &LA = Info.getLivenessAnalysis();
MCPhysReg Reg = LA.scavengeRegAfter(&Inst);
@ -153,9 +153,9 @@ bool JTFootprintReduction::tryOptimizeNonPIC(
auto RegOp = MCOperand::createReg(Reg);
SmallVector<MCInst, 4> NewFrag;
BC.MIA->createIJmp32Frag(NewFrag, Base, MCOperand::createImm(Scale),
BC.MIB->createIJmp32Frag(NewFrag, Base, MCOperand::createImm(Scale),
MCOperand::createReg(Index), Offset, RegOp);
BC.MIA->setJumpTable(BC.Ctx.get(), NewFrag.back(), JTAddr, Index);
BC.MIB->setJumpTable(BC.Ctx.get(), NewFrag.back(), JTAddr, Index);
JumpTable->OutputEntrySize = 4;
@ -171,11 +171,11 @@ bool JTFootprintReduction::tryOptimizePIC(
MCPhysReg Index;
MCOperand Offset;
MCOperand JumpTableRef;
auto PICIndJmpMatcher = BC.MIA->matchIndJmp(BC.MIA->matchAdd(
BC.MIA->matchLoadAddr(BC.MIA->matchAnyOperand(JumpTableRef)),
BC.MIA->matchLoad(BC.MIA->matchReg(BaseReg), BC.MIA->matchImm(Scale),
BC.MIA->matchReg(Index), BC.MIA->matchAnyOperand())));
if (!PICIndJmpMatcher->match(*BC.MRI, *BC.MIA,
auto PICIndJmpMatcher = BC.MIB->matchIndJmp(BC.MIB->matchAdd(
BC.MIB->matchLoadAddr(BC.MIB->matchAnyOperand(JumpTableRef)),
BC.MIB->matchLoad(BC.MIB->matchReg(BaseReg), BC.MIB->matchImm(Scale),
BC.MIB->matchReg(Index), BC.MIB->matchAnyOperand())));
if (!PICIndJmpMatcher->match(*BC.MRI, *BC.MIB,
MutableArrayRef<MCInst>(&*BB.begin(), &Inst + 1),
-1)) {
return false;
@ -183,15 +183,15 @@ bool JTFootprintReduction::tryOptimizePIC(
assert(Scale == 4 && "Wrong scale");
PICIndJmpMatcher->annotate(*BC.MIA, *BC.Ctx.get(), "DeleteMe");
PICIndJmpMatcher->annotate(*BC.MIB, *BC.Ctx.get(), "DeleteMe");
auto RegOp = MCOperand::createReg(BaseReg);
SmallVector<MCInst, 4> NewFrag;
BC.MIA->createIJmp32Frag(NewFrag, MCOperand::createReg(0),
BC.MIB->createIJmp32Frag(NewFrag, MCOperand::createReg(0),
MCOperand::createImm(Scale),
MCOperand::createReg(Index), JumpTableRef, RegOp);
BC.MIA->setJumpTable(BC.Ctx.get(), NewFrag.back(), JTAddr, Index);
BC.MIB->setJumpTable(BC.Ctx.get(), NewFrag.back(), JTAddr, Index);
JumpTable->OutputEntrySize = 4;
// DePICify
@ -209,7 +209,7 @@ void JTFootprintReduction::optimizeFunction(BinaryContext &BC,
continue;
MCInst &IndJmp = *BB.getLastNonPseudo();
uint64_t JTAddr = BC.MIA->getJumpTable(IndJmp);
uint64_t JTAddr = BC.MIB->getJumpTable(IndJmp);
if (!JTAddr)
continue;
@ -232,7 +232,7 @@ void JTFootprintReduction::optimizeFunction(BinaryContext &BC,
for (auto &BB : Function) {
for (auto I = BB.rbegin(), E = BB.rend(); I != E; ++I) {
if (BC.MIA->hasAnnotation(*I, "DeleteMe"))
if (BC.MIB->hasAnnotation(*I, "DeleteMe"))
BB.eraseInstruction(&*I);
}
}

View File

@ -44,7 +44,7 @@ public:
bool isAlive(ProgramPoint PP, MCPhysReg Reg) const {
BitVector BV = (*this->getStateAt(PP));
const BitVector &RegAliases = BC.MIA->getAliases(Reg);
const BitVector &RegAliases = BC.MIB->getAliases(Reg);
BV &= RegAliases;
return BV.any();
}
@ -61,10 +61,10 @@ public:
BitVector BV = *this->getStateAt(P);
BV.flip();
BitVector GPRegs(NumRegs, false);
this->BC.MIA->getGPRegs(GPRegs, /*IncludeAlias=*/false);
this->BC.MIB->getGPRegs(GPRegs, /*IncludeAlias=*/false);
// Ignore the register used for frame pointer even if it is not alive (it
// may be used by CFI which is not represented in our dataflow).
auto FP = BC.MIA->getAliases(BC.MIA->getFramePointer());
auto FP = BC.MIB->getAliases(BC.MIB->getFramePointer());
FP.flip();
BV &= GPRegs;
BV &= FP;
@ -85,11 +85,11 @@ protected:
if (BB.succ_size() == 0) {
BitVector State(NumRegs, false);
if (opts::AssumeABI) {
BC.MIA->getDefaultLiveOut(State);
BC.MIA->getCalleeSavedRegs(State);
BC.MIB->getDefaultLiveOut(State);
BC.MIB->getCalleeSavedRegs(State);
} else {
State.set();
State.reset(BC.MIA->getFlagsReg());
State.reset(BC.MIB->getFlagsReg());
}
return State;
}
@ -106,11 +106,11 @@ protected:
BitVector computeNext(const MCInst &Point, const BitVector &Cur) {
BitVector Next = Cur;
bool IsCall = this->BC.MIA->isCall(Point);
bool IsCall = this->BC.MIB->isCall(Point);
// Kill
auto Written = BitVector(NumRegs, false);
if (!IsCall) {
this->BC.MIA->getWrittenRegs(Point, Written);
this->BC.MIB->getWrittenRegs(Point, Written);
} else {
RA.getInstClobberList(Point, Written);
// When clobber list is conservative, it is clobbering all/most registers,
@ -119,12 +119,12 @@ protected:
// because we don't really know what's going on.
if (RA.isConservative(Written)) {
Written.reset();
BC.MIA->getDefaultLiveOut(Written);
BC.MIB->getDefaultLiveOut(Written);
// If ABI is respected, everything except CSRs should be dead after a
// call
if (opts::AssumeABI) {
auto CSR = BitVector(NumRegs, false);
BC.MIA->getCalleeSavedRegs(CSR);
BC.MIB->getCalleeSavedRegs(CSR);
CSR.flip();
Written |= CSR;
}
@ -133,36 +133,36 @@ protected:
Written.flip();
Next &= Written;
// Gen
if (!this->BC.MIA->isCFI(Point)) {
if (BC.MIA->isCleanRegXOR(Point))
if (!this->BC.MIB->isCFI(Point)) {
if (BC.MIB->isCleanRegXOR(Point))
return Next;
auto Used = BitVector(NumRegs, false);
if (IsCall) {
RA.getInstUsedRegsList(Point, Used, /*GetClobbers*/true);
if (RA.isConservative(Used)) {
Used = BC.MIA->getRegsUsedAsParams();
BC.MIA->getDefaultLiveOut(Used);
Used = BC.MIB->getRegsUsedAsParams();
BC.MIB->getDefaultLiveOut(Used);
}
}
const auto InstInfo = BC.MII->get(Point.getOpcode());
for (unsigned I = 0, E = Point.getNumOperands(); I != E; ++I) {
if (!Point.getOperand(I).isReg() || I < InstInfo.getNumDefs())
continue;
Used |= BC.MIA->getAliases(Point.getOperand(I).getReg(),
Used |= BC.MIB->getAliases(Point.getOperand(I).getReg(),
/*OnlySmaller=*/false);
}
for (auto
I = InstInfo.getImplicitUses(),
E = InstInfo.getImplicitUses() + InstInfo.getNumImplicitUses();
I != E; ++I) {
Used |= BC.MIA->getAliases(*I, false);
Used |= BC.MIB->getAliases(*I, false);
}
if (IsCall &&
(!BC.MIA->isTailCall(Point) || !BC.MIA->isConditionalBranch(Point))) {
(!BC.MIB->isTailCall(Point) || !BC.MIB->isConditionalBranch(Point))) {
// Never gen FLAGS from a non-conditional call... this is overly
// conservative
Used.reset(BC.MIA->getFlagsReg());
Used.reset(BC.MIB->getFlagsReg());
}
Next |= Used;
}

View File

@ -34,7 +34,7 @@ createNewStub(const BinaryContext &BC, BinaryFunction &Func,
auto *StubSym = BC.Ctx->createTempSymbol("Stub", true);
auto StubBB = Func.createBasicBlock(0, StubSym);
std::vector<MCInst> Seq;
BC.MIA->createLongJmp(Seq, TgtSym, BC.Ctx.get());
BC.MIB->createLongJmp(Seq, TgtSym, BC.Ctx.get());
StubBB->addInstructions(Seq.begin(), Seq.end());
StubBB->setExecutionCount(0);
return std::make_pair(std::move(StubBB), StubSym);
@ -43,7 +43,7 @@ createNewStub(const BinaryContext &BC, BinaryFunction &Func,
void shrinkStubToShortJmp(const BinaryContext &BC, BinaryBasicBlock &StubBB,
const MCSymbol *Tgt) {
std::vector<MCInst> Seq;
BC.MIA->createShortJmp(Seq, Tgt, BC.Ctx.get());
BC.MIB->createShortJmp(Seq, Tgt, BC.Ctx.get());
StubBB.clear();
StubBB.addInstructions(Seq.begin(), Seq.end());
}
@ -51,9 +51,9 @@ void shrinkStubToShortJmp(const BinaryContext &BC, BinaryBasicBlock &StubBB,
void shrinkStubToSingleInst(const BinaryContext &BC, BinaryBasicBlock &StubBB,
const MCSymbol *Tgt, bool TgtIsFunc) {
MCInst Inst;
BC.MIA->createUncondBranch(Inst, Tgt, BC.Ctx.get());
BC.MIB->createUncondBranch(Inst, Tgt, BC.Ctx.get());
if (TgtIsFunc)
BC.MIA->convertJmpToTailCall(Inst, BC.Ctx.get());
BC.MIB->convertJmpToTailCall(Inst, BC.Ctx.get());
StubBB.clear();
StubBB.addInstruction(Inst);
}
@ -77,7 +77,7 @@ LongJmpPass::replaceTargetWithStub(const BinaryContext &BC,
BinaryFunction &Func, BinaryBasicBlock &BB,
MCInst &Inst) {
std::unique_ptr<BinaryBasicBlock> NewBB;
auto TgtSym = BC.MIA->getTargetSymbol(Inst);
auto TgtSym = BC.MIB->getTargetSymbol(Inst);
assert (TgtSym && "getTargetSymbol failed");
BinaryBasicBlock::BinaryBranchInfo BI{0, 0};
@ -87,14 +87,14 @@ LongJmpPass::replaceTargetWithStub(const BinaryContext &BC,
if (TgtBB && TgtBB->isCold() == BB.isCold()) {
// Suppose we have half the available space to account for increase in the
// function size due to extra blocks being inserted (conservative estimate)
auto BitsAvail = BC.MIA->getPCRelEncodingSize(Inst) - 2;
auto BitsAvail = BC.MIB->getPCRelEncodingSize(Inst) - 2;
uint64_t Mask = ~((1ULL << BitsAvail) - 1);
if (!(Func.getMaxSize() & Mask))
return nullptr;
// This is a special case for fixBranches, which is usually free to swap
// targets when a block has two successors. The other successor may not
// fit in this instruction as well.
BC.MIA->addAnnotation(BC.Ctx.get(), Inst, "DoNotChangeTarget", true);
BC.MIB->addAnnotation(BC.Ctx.get(), Inst, "DoNotChangeTarget", true);
}
BinaryBasicBlock *StubBB =
@ -128,7 +128,7 @@ LongJmpPass::replaceTargetWithStub(const BinaryContext &BC,
StubBB->setEntryPoint(true);
}
}
BC.MIA->replaceBranchTarget(Inst, StubSymbol, BC.Ctx.get());
BC.MIB->replaceBranchTarget(Inst, StubSymbol, BC.Ctx.get());
++StubRefCount[StubBB];
StubBits[StubBB] = BC.AsmInfo->getCodePointerSize() * 8;
@ -145,8 +145,8 @@ LongJmpPass::replaceTargetWithStub(const BinaryContext &BC,
namespace {
bool shouldInsertStub(const BinaryContext &BC, const MCInst &Inst) {
return (BC.MIA->isBranch(Inst) || BC.MIA->isCall(Inst)) &&
!BC.MIA->isIndirectBranch(Inst) && !BC.MIA->isIndirectCall(Inst);
return (BC.MIB->isBranch(Inst) || BC.MIB->isCall(Inst)) &&
!BC.MIB->isIndirectBranch(Inst) && !BC.MIB->isIndirectCall(Inst);
}
}
@ -166,8 +166,8 @@ void LongJmpPass::insertStubs(const BinaryContext &BC, BinaryFunction &Func) {
// Insert stubs close to the patched BB if call, but far away from the
// hot path if a branch, since this branch target is the cold region.
BinaryBasicBlock *InsertionPoint = &BB;
if (!BC.MIA->isCall(Inst) && Frontier && !BB.isCold()) {
auto BitsAvail = BC.MIA->getPCRelEncodingSize(Inst) - 2;
if (!BC.MIB->isCall(Inst) && Frontier && !BB.isCold()) {
auto BitsAvail = BC.MIB->getPCRelEncodingSize(Inst) - 2;
uint64_t Mask = ~((1ULL << BitsAvail) - 1);
if (!(Func.getMaxSize() & Mask))
InsertionPoint = Frontier;
@ -314,7 +314,7 @@ void LongJmpPass::removeStubRef(const BinaryContext &BC,
BinaryBasicBlock *StubBB,
const MCSymbol *Target,
BinaryBasicBlock *TgtBB) {
BC.MIA->replaceBranchTarget(Inst, Target, BC.Ctx.get());
BC.MIB->replaceBranchTarget(Inst, Target, BC.Ctx.get());
--StubRefCount[StubBB];
assert(StubRefCount[StubBB] >= 0 && "Ref count is lost");
@ -336,7 +336,7 @@ void LongJmpPass::removeStubRef(const BinaryContext &BC,
bool LongJmpPass::usesStub(const BinaryContext &BC, const BinaryFunction &Func,
const MCInst &Inst) const {
auto TgtSym = BC.MIA->getTargetSymbol(Inst);
auto TgtSym = BC.MIB->getTargetSymbol(Inst);
auto *TgtBB = Func.getBasicBlockForLabel(TgtSym);
auto Iter = Stubs.find(&Func);
if (Iter != Stubs.end())
@ -383,33 +383,33 @@ bool LongJmpPass::removeOrShrinkStubs(const BinaryContext &BC,
// Compute DoNotChangeTarget annotation, when fixBranches cannot swap
// targets
if (BC.MIA->isConditionalBranch(Inst) && BB.succ_size() == 2) {
if (BC.MIB->isConditionalBranch(Inst) && BB.succ_size() == 2) {
auto *SuccBB = BB.getConditionalSuccessor(false);
bool IsStub = false;
auto Iter = Stubs.find(&Func);
if (Iter != Stubs.end())
IsStub = Iter->second.count(SuccBB);
auto *RealTargetSym =
IsStub ? BC.MIA->getTargetSymbol(*SuccBB->begin()) : nullptr;
IsStub ? BC.MIB->getTargetSymbol(*SuccBB->begin()) : nullptr;
if (IsStub)
SuccBB = Func.getBasicBlockForLabel(RealTargetSym);
uint64_t Offset = getSymbolAddress(BC, RealTargetSym, SuccBB);
auto BitsAvail = BC.MIA->getPCRelEncodingSize(Inst) - 1;
auto BitsAvail = BC.MIB->getPCRelEncodingSize(Inst) - 1;
uint64_t Mask = ~((1ULL << BitsAvail) - 1);
if ((Offset & Mask) &&
!BC.MIA->hasAnnotation(Inst, "DoNotChangeTarget")) {
BC.MIA->addAnnotation(BC.Ctx.get(), Inst, "DoNotChangeTarget", true);
!BC.MIB->hasAnnotation(Inst, "DoNotChangeTarget")) {
BC.MIB->addAnnotation(BC.Ctx.get(), Inst, "DoNotChangeTarget", true);
} else if ((!(Offset & Mask)) &&
BC.MIA->hasAnnotation(Inst, "DoNotChangeTarget")) {
BC.MIA->removeAnnotation(Inst, "DoNotChangeTarget");
BC.MIB->hasAnnotation(Inst, "DoNotChangeTarget")) {
BC.MIB->removeAnnotation(Inst, "DoNotChangeTarget");
}
}
auto StubSym = BC.MIA->getTargetSymbol(Inst);
auto StubSym = BC.MIB->getTargetSymbol(Inst);
auto *StubBB = Func.getBasicBlockForLabel(StubSym);
auto *RealTargetSym = BC.MIA->getTargetSymbol(*StubBB->begin());
auto *RealTargetSym = BC.MIB->getTargetSymbol(*StubBB->begin());
auto *TgtBB = Func.getBasicBlockForLabel(RealTargetSym);
auto BitsAvail = BC.MIA->getPCRelEncodingSize(Inst) - 1;
auto BitsAvail = BC.MIB->getPCRelEncodingSize(Inst) - 1;
uint64_t Mask = ~((1ULL << BitsAvail) - 1);
uint64_t Offset = getSymbolAddress(BC, RealTargetSym, TgtBB);
if (DotAddress > Offset)
@ -425,8 +425,8 @@ bool LongJmpPass::removeOrShrinkStubs(const BinaryContext &BC,
}
}
auto RangeShortJmp = BC.MIA->getShortJmpEncodingSize();
auto RangeSingleInstr = BC.MIA->getUncondBranchEncodingSize();
auto RangeShortJmp = BC.MIB->getShortJmpEncodingSize();
auto RangeSingleInstr = BC.MIB->getUncondBranchEncodingSize();
uint64_t ShortJmpMask = ~((1ULL << RangeShortJmp) - 1);
uint64_t SingleInstrMask = ~((1ULL << (RangeSingleInstr - 1)) - 1);
// Shrink stubs from 64 to 32 or 28 bit whenever possible
@ -440,7 +440,7 @@ bool LongJmpPass::removeOrShrinkStubs(const BinaryContext &BC,
continue;
// Attempt to tight to short jmp
auto *RealTargetSym = BC.MIA->getTargetSymbol(*BB.begin());
auto *RealTargetSym = BC.MIB->getTargetSymbol(*BB.begin());
auto *TgtBB = Func.getBasicBlockForLabel(RealTargetSym);
uint64_t DotAddress = BBAddresses[&BB];
uint64_t TgtAddress = getSymbolAddress(BC, RealTargetSym, TgtBB);

View File

@ -65,15 +65,15 @@ void PLTCall::runOnFunctions(
continue;
for (auto &Instr : *BB) {
if (!BC.MIA->isCall(Instr))
if (!BC.MIB->isCall(Instr))
continue;
const auto *CallSymbol = BC.MIA->getTargetSymbol(Instr);
const auto *CallSymbol = BC.MIB->getTargetSymbol(Instr);
if (!CallSymbol)
continue;
const auto *CalleeBF = BC.getFunctionForSymbol(CallSymbol);
if (!CalleeBF || !CalleeBF->isPLTFunction())
continue;
BC.MIA->convertCallToIndirectCall(Instr,
BC.MIB->convertCallToIndirectCall(Instr,
CalleeBF->getPLTSymbol(),
BC.Ctx.get());
++NumCallsOptimized;

View File

@ -45,7 +45,7 @@ public:
if (Def) {
RA.getInstClobberList(**I, BV);
} else {
this->BC.MIA->getTouchedRegs(**I, BV);
this->BC.MIB->getTouchedRegs(**I, BV);
}
if (BV[Reg])
return true;
@ -102,7 +102,7 @@ protected:
if (Def)
RA.getInstClobberList(*Y, YClobbers);
else
this->BC.MIA->getTouchedRegs(*Y, YClobbers);
this->BC.MIB->getTouchedRegs(*Y, YClobbers);
// X kills Y if it clobbers Y completely -- this is a conservative approach.
// In practice, we may produce use-def links that may not exist.
XClobbers &= YClobbers;
@ -119,7 +119,7 @@ protected:
}
}
// Gen
if (!this->BC.MIA->isCFI(Point)) {
if (!this->BC.MIB->isCFI(Point)) {
Next.set(this->ExprToIdx[&Point]);
}
return Next;

View File

@ -79,7 +79,7 @@ protected:
BitVector computeNext(const MCInst &Point, const BitVector &Cur) {
BitVector Next = Cur;
// Gen
if (!this->BC.MIA->isCFI(Point)) {
if (!this->BC.MIB->isCFI(Point)) {
Next.set(this->ExprToIdx[&Point]);
}
return Next;

View File

@ -93,7 +93,7 @@ void RegAnalysis::beConservative(BitVector &Result) const {
Result.set();
} else {
BitVector BV(BC.MRI->getNumRegs(), false);
BC.MIA->getCalleeSavedRegs(BV);
BC.MIB->getCalleeSavedRegs(BV);
BV.flip();
Result |= BV;
}
@ -104,7 +104,7 @@ bool RegAnalysis::isConservative(BitVector &Vec) const {
return Vec.all();
} else {
BitVector BV(BC.MRI->getNumRegs(), false);
BC.MIA->getCalleeSavedRegs(BV);
BC.MIB->getCalleeSavedRegs(BV);
BV |= Vec;
return BV.all();
}
@ -112,15 +112,15 @@ bool RegAnalysis::isConservative(BitVector &Vec) const {
void RegAnalysis::getInstUsedRegsList(const MCInst &Inst, BitVector &RegSet,
bool GetClobbers) const {
if (!BC.MIA->isCall(Inst)) {
if (!BC.MIB->isCall(Inst)) {
if (GetClobbers)
BC.MIA->getClobberedRegs(Inst, RegSet);
BC.MIB->getClobberedRegs(Inst, RegSet);
else
BC.MIA->getUsedRegs(Inst, RegSet);
BC.MIB->getUsedRegs(Inst, RegSet);
return;
}
const auto *TargetSymbol = BC.MIA->getTargetSymbol(Inst);
const auto *TargetSymbol = BC.MIB->getTargetSymbol(Inst);
// If indirect call, we know nothing
if (TargetSymbol == nullptr) {
beConservative(RegSet);

View File

@ -38,8 +38,8 @@ namespace bolt {
void RegReAssign::swap(BinaryContext &BC, BinaryFunction &Function, MCPhysReg A,
MCPhysReg B) {
const BitVector &AliasA = BC.MIA->getAliases(A, false);
const BitVector &AliasB = BC.MIA->getAliases(B, false);
const BitVector &AliasA = BC.MIB->getAliases(A, false);
const BitVector &AliasB = BC.MIB->getAliases(B, false);
// Regular instructions
for (auto &BB : Function) {
@ -51,14 +51,14 @@ void RegReAssign::swap(BinaryContext &BC, BinaryFunction &Function, MCPhysReg A,
auto Reg = Operand.getReg();
if (AliasA.test(Reg)) {
Operand.setReg(BC.MIA->getAliasSized(B, BC.MIA->getRegSize(Reg)));
Operand.setReg(BC.MIB->getAliasSized(B, BC.MIB->getRegSize(Reg)));
--StaticBytesSaved;
DynBytesSaved -= BB.getKnownExecutionCount();
continue;
}
if (!AliasB.test(Reg))
continue;
Operand.setReg(BC.MIA->getAliasSized(A, BC.MIA->getRegSize(Reg)));
Operand.setReg(BC.MIB->getAliasSized(A, BC.MIB->getRegSize(Reg)));
++StaticBytesSaved;
DynBytesSaved += BB.getKnownExecutionCount();
}
@ -69,7 +69,7 @@ void RegReAssign::swap(BinaryContext &BC, BinaryFunction &Function, MCPhysReg A,
DenseSet<const MCCFIInstruction *> Changed;
for (auto &BB : Function) {
for (auto &Inst : BB) {
if (!BC.MIA->isCFI(Inst))
if (!BC.MIB->isCFI(Inst))
continue;
auto *CFI = Function.getCFIFor(Inst);
if (Changed.count(CFI))
@ -82,10 +82,10 @@ void RegReAssign::swap(BinaryContext &BC, BinaryFunction &Function, MCPhysReg A,
const MCPhysReg Reg2 = BC.MRI->getLLVMRegNum(CFIReg2, /*isEH=*/false);
if (AliasA.test(Reg2)) {
CFI->setRegister2(BC.MRI->getDwarfRegNum(
BC.MIA->getAliasSized(B, BC.MIA->getRegSize(Reg2)), false));
BC.MIB->getAliasSized(B, BC.MIB->getRegSize(Reg2)), false));
} else if (AliasB.test(Reg2)) {
CFI->setRegister2(BC.MRI->getDwarfRegNum(
BC.MIA->getAliasSized(A, BC.MIA->getRegSize(Reg2)), false));
BC.MIB->getAliasSized(A, BC.MIB->getRegSize(Reg2)), false));
}
}
// Fall-through
@ -102,10 +102,10 @@ void RegReAssign::swap(BinaryContext &BC, BinaryFunction &Function, MCPhysReg A,
const MCPhysReg Reg = BC.MRI->getLLVMRegNum(CFIReg, /*isEH=*/false);
if (AliasA.test(Reg)) {
CFI->setRegister(BC.MRI->getDwarfRegNum(
BC.MIA->getAliasSized(B, BC.MIA->getRegSize(Reg)), false));
BC.MIB->getAliasSized(B, BC.MIB->getRegSize(Reg)), false));
} else if (AliasB.test(Reg)) {
CFI->setRegister(BC.MRI->getDwarfRegNum(
BC.MIA->getAliasSized(A, BC.MIA->getRegSize(Reg)), false));
BC.MIB->getAliasSized(A, BC.MIB->getRegSize(Reg)), false));
}
break;
}
@ -122,14 +122,14 @@ void RegReAssign::rankRegisters(BinaryContext &BC, BinaryFunction &Function) {
for (auto &BB : Function) {
for (auto &Inst : BB) {
const bool CannotUseREX = BC.MIA->cannotUseREX(Inst);
const bool CannotUseREX = BC.MIB->cannotUseREX(Inst);
const auto &Desc = BC.MII->get(Inst.getOpcode());
// Disallow substituitions involving regs in implicit uses lists
const auto *ImplicitUses = Desc.getImplicitUses();
while (ImplicitUses && *ImplicitUses) {
const size_t RegEC =
BC.MIA->getAliases(*ImplicitUses, false).find_first();
BC.MIB->getAliases(*ImplicitUses, false).find_first();
RegScore[RegEC] =
std::numeric_limits<decltype(RegScore)::value_type>::min();
++ImplicitUses;
@ -139,7 +139,7 @@ void RegReAssign::rankRegisters(BinaryContext &BC, BinaryFunction &Function) {
const auto *ImplicitDefs = Desc.getImplicitDefs();
while (ImplicitDefs && *ImplicitDefs) {
const size_t RegEC =
BC.MIA->getAliases(*ImplicitDefs, false).find_first();
BC.MIB->getAliases(*ImplicitDefs, false).find_first();
RegScore[RegEC] =
std::numeric_limits<decltype(RegScore)::value_type>::min();
++ImplicitDefs;
@ -154,7 +154,7 @@ void RegReAssign::rankRegisters(BinaryContext &BC, BinaryFunction &Function) {
continue;
auto Reg = Operand.getReg();
size_t RegEC = BC.MIA->getAliases(Reg, false).find_first();
size_t RegEC = BC.MIB->getAliases(Reg, false).find_first();
if (RegEC == 0)
continue;
@ -166,7 +166,7 @@ void RegReAssign::rankRegisters(BinaryContext &BC, BinaryFunction &Function) {
}
// Unsupported substitution, cannot swap BH with R* regs, bail
if (BC.MIA->isUpper8BitReg(Reg) && ClassicCSR.test(Reg)) {
if (BC.MIB->isUpper8BitReg(Reg) && ClassicCSR.test(Reg)) {
RegScore[RegEC] =
std::numeric_limits<decltype(RegScore)::value_type>::min();
continue;
@ -238,9 +238,9 @@ void RegReAssign::aggressivePassOverFunction(BinaryContext &BC,
ProgramPoint::getFirstPointAt(BB));
}
// Mark frame pointer alive because of CFI
AliveAtStart |= BC.MIA->getAliases(BC.MIA->getFramePointer(), false);
AliveAtStart |= BC.MIB->getAliases(BC.MIB->getFramePointer(), false);
// Never touch return registers
BC.MIA->getDefaultLiveOut(AliveAtStart);
BC.MIB->getDefaultLiveOut(AliveAtStart);
// Try swapping more profitable options first
auto Begin = RankedRegs.begin();
@ -266,7 +266,7 @@ void RegReAssign::aggressivePassOverFunction(BinaryContext &BC,
}
BitVector AnyAliasAlive = AliveAtStart;
AnyAliasAlive &= BC.MIA->getAliases(ClassicReg);
AnyAliasAlive &= BC.MIB->getAliases(ClassicReg);
if (AnyAliasAlive.any()) {
DEBUG(dbgs() << " Bailed on " << BC.MRI->getName(ClassicReg) << " with "
<< BC.MRI->getName(ExtReg)
@ -275,7 +275,7 @@ void RegReAssign::aggressivePassOverFunction(BinaryContext &BC,
continue;
}
AnyAliasAlive = AliveAtStart;
AnyAliasAlive &= BC.MIA->getAliases(ExtReg);
AnyAliasAlive &= BC.MIB->getAliases(ExtReg);
if (AnyAliasAlive.any()) {
DEBUG(dbgs() << " Bailed on " << BC.MRI->getName(ClassicReg) << " with "
<< BC.MRI->getName(ExtReg)
@ -342,7 +342,7 @@ void RegReAssign::setupAggressivePass(BinaryContext &BC,
RA.reset(new RegAnalysis(BC, BFs, *CG));
GPRegs = BitVector(BC.MRI->getNumRegs(), false);
BC.MIA->getGPRegs(GPRegs);
BC.MIB->getGPRegs(GPRegs);
}
void RegReAssign::setupConservativePass(
@ -353,14 +353,14 @@ void RegReAssign::setupConservativePass(
ClassicCSR = BitVector(BC.MRI->getNumRegs(), false);
ExtendedCSR = BitVector(BC.MRI->getNumRegs(), false);
// Never consider the frame pointer
BC.MIA->getClassicGPRegs(ClassicRegs);
BC.MIB->getClassicGPRegs(ClassicRegs);
ClassicRegs.flip();
ClassicRegs |= BC.MIA->getAliases(BC.MIA->getFramePointer(), false);
ClassicRegs |= BC.MIB->getAliases(BC.MIB->getFramePointer(), false);
ClassicRegs.flip();
BC.MIA->getCalleeSavedRegs(CalleeSaved);
BC.MIB->getCalleeSavedRegs(CalleeSaved);
ClassicCSR |= ClassicRegs;
ClassicCSR &= CalleeSaved;
BC.MIA->getClassicGPRegs(ClassicRegs);
BC.MIB->getClassicGPRegs(ClassicRegs);
ExtendedCSR |= ClassicRegs;
ExtendedCSR.flip();
ExtendedCSR &= CalleeSaved;

View File

@ -101,7 +101,7 @@ void CalleeSavedAnalysis::analyzeSaves() {
CalleeSaved.set(FIE->RegOrImm);
SaveFIEByReg[FIE->RegOrImm] = &*FIE;
SavingCost[FIE->RegOrImm] += InsnToBB[&Inst]->getKnownExecutionCount();
BC.MIA->addAnnotation(BC.Ctx.get(), Inst, getSaveTag(), FIE->RegOrImm);
BC.MIB->addAnnotation(BC.Ctx.get(), Inst, getSaveTag(), FIE->RegOrImm);
OffsetsByReg[FIE->RegOrImm] = FIE->StackOffset;
DEBUG(dbgs() << "Logging new candidate for Callee-Saved Reg: "
<< FIE->RegOrImm << "\n");
@ -152,7 +152,7 @@ void CalleeSavedAnalysis::analyzeRestores() {
<< "\n");
if (LoadFIEByReg[FIE->RegOrImm] == nullptr)
LoadFIEByReg[FIE->RegOrImm] = &*FIE;
BC.MIA->addAnnotation(BC.Ctx.get(), Inst, getRestoreTag(),
BC.MIB->addAnnotation(BC.Ctx.get(), Inst, getRestoreTag(),
FIE->RegOrImm);
HasRestores.set(FIE->RegOrImm);
}
@ -186,8 +186,8 @@ std::vector<MCInst *> CalleeSavedAnalysis::getRestoresByReg(uint16_t Reg) {
CalleeSavedAnalysis::~CalleeSavedAnalysis() {
for (auto &BB : BF) {
for (auto &Inst : BB) {
BC.MIA->removeAnnotation(Inst, getSaveTag());
BC.MIA->removeAnnotation(Inst, getRestoreTag());
BC.MIB->removeAnnotation(Inst, getSaveTag());
BC.MIB->removeAnnotation(Inst, getRestoreTag());
}
}
}
@ -229,7 +229,7 @@ bool StackLayoutModifier::blacklistAllInConflictWith(int64_t Offset,
void StackLayoutModifier::checkFramePointerInitialization(MCInst &Point) {
auto &SPT = Info.getStackPointerTracking();
if (!BC.MII->get(Point.getOpcode())
.hasDefOfPhysReg(Point, BC.MIA->getFramePointer(), *BC.MRI))
.hasDefOfPhysReg(Point, BC.MIB->getFramePointer(), *BC.MRI))
return;
int SPVal, FPVal;
@ -237,18 +237,18 @@ void StackLayoutModifier::checkFramePointerInitialization(MCInst &Point) {
std::pair<MCPhysReg, int64_t> FP;
if (FPVal != SPT.EMPTY && FPVal != SPT.SUPERPOSITION)
FP = std::make_pair(BC.MIA->getFramePointer(), FPVal);
FP = std::make_pair(BC.MIB->getFramePointer(), FPVal);
else
FP = std::make_pair(0, 0);
std::pair<MCPhysReg, int64_t> SP;
if (SPVal != SPT.EMPTY && SPVal != SPT.SUPERPOSITION)
SP = std::make_pair(BC.MIA->getStackPointer(), SPVal);
SP = std::make_pair(BC.MIB->getStackPointer(), SPVal);
else
SP = std::make_pair(0, 0);
int64_t Output;
if (!BC.MIA->evaluateSimple(Point, Output, SP, FP))
if (!BC.MIB->evaluateSimple(Point, Output, SP, FP))
return;
// Not your regular frame pointer initialization... bail
@ -259,7 +259,7 @@ void StackLayoutModifier::checkFramePointerInitialization(MCInst &Point) {
void StackLayoutModifier::checkStackPointerRestore(MCInst &Point) {
auto &SPT = Info.getStackPointerTracking();
if (!BC.MII->get(Point.getOpcode())
.hasDefOfPhysReg(Point, BC.MIA->getStackPointer(), *BC.MRI))
.hasDefOfPhysReg(Point, BC.MIB->getStackPointer(), *BC.MRI))
return;
// Check if the definition of SP comes from FP -- in this case, this
// value may need to be updated depending on our stack layout changes
@ -270,7 +270,7 @@ void StackLayoutModifier::checkStackPointerRestore(MCInst &Point) {
auto &Operand = Point.getOperand(I);
if (!Operand.isReg())
continue;
if (Operand.getReg() == BC.MIA->getFramePointer()) {
if (Operand.getReg() == BC.MIB->getFramePointer()) {
UsesFP = true;
break;
}
@ -284,18 +284,18 @@ void StackLayoutModifier::checkStackPointerRestore(MCInst &Point) {
std::pair<MCPhysReg, int64_t> FP;
if (FPVal != SPT.EMPTY && FPVal != SPT.SUPERPOSITION)
FP = std::make_pair(BC.MIA->getFramePointer(), FPVal);
FP = std::make_pair(BC.MIB->getFramePointer(), FPVal);
else
FP = std::make_pair(0, 0);
std::pair<MCPhysReg, int64_t> SP;
if (SPVal != SPT.EMPTY && SPVal != SPT.SUPERPOSITION)
SP = std::make_pair(BC.MIA->getStackPointer(), SPVal);
SP = std::make_pair(BC.MIB->getStackPointer(), SPVal);
else
SP = std::make_pair(0, 0);
int64_t Output;
if (!BC.MIA->evaluateSimple(Point, Output, SP, FP))
if (!BC.MIB->evaluateSimple(Point, Output, SP, FP))
return;
// If the value is the same of FP, no need to adjust it
@ -310,7 +310,7 @@ void StackLayoutModifier::checkStackPointerRestore(MCInst &Point) {
// We are restoring SP to an old value based on FP. Mark it as a stack
// access to be fixed later.
BC.MIA->addAnnotation(BC.Ctx.get(), Point, getSlotTagName(), Output);
BC.MIB->addAnnotation(BC.Ctx.get(), Point, getSlotTagName(), Output);
}
void StackLayoutModifier::classifyStackAccesses() {
@ -353,7 +353,7 @@ void StackLayoutModifier::classifyStackAccesses() {
// We are free to go. Add it as available stack slot which we know how
// to move it.
AvailableRegions[FIEX->StackOffset] = FIEX->Size;
BC.MIA->addAnnotation(BC.Ctx.get(), Inst, getSlotTagName(),
BC.MIB->addAnnotation(BC.Ctx.get(), Inst, getSlotTagName(),
FIEX->StackOffset);
RegionToRegMap[FIEX->StackOffset].insert(FIEX->RegOrImm);
RegToRegionMap[FIEX->RegOrImm].insert(FIEX->StackOffset);
@ -370,8 +370,8 @@ void StackLayoutModifier::classifyCFIs() {
auto recordAccess = [&](MCInst *Inst, int64_t Offset) {
const uint16_t Reg = BC.MRI->getLLVMRegNum(CfaReg, /*isEH=*/false);
if (Reg == BC.MIA->getStackPointer() || Reg == BC.MIA->getFramePointer()) {
BC.MIA->addAnnotation(BC.Ctx.get(), *Inst, getSlotTagName(), Offset);
if (Reg == BC.MIB->getStackPointer() || Reg == BC.MIB->getFramePointer()) {
BC.MIB->addAnnotation(BC.Ctx.get(), *Inst, getSlotTagName(), Offset);
DEBUG(dbgs() << "Recording CFI " << Offset << "\n");
} else {
IsSimple = false;
@ -381,7 +381,7 @@ void StackLayoutModifier::classifyCFIs() {
for (auto &BB : BF.layout()) {
for (auto &Inst : *BB) {
if (!BC.MIA->isCFI(Inst))
if (!BC.MIB->isCFI(Inst))
continue;
auto *CFI = BF.getCFIFor(Inst);
switch (CFI->getOperation()) {
@ -398,12 +398,12 @@ void StackLayoutModifier::classifyCFIs() {
break;
case MCCFIInstruction::OpOffset:
recordAccess(&Inst, CFI->getOffset());
BC.MIA->addAnnotation(BC.Ctx.get(), Inst, getOffsetCFIRegTagName(),
BC.MIB->addAnnotation(BC.Ctx.get(), Inst, getOffsetCFIRegTagName(),
BC.MRI->getLLVMRegNum(CFI->getRegister(),
/*isEH=*/false));
break;
case MCCFIInstruction::OpSameValue:
BC.MIA->addAnnotation(BC.Ctx.get(), Inst, getOffsetCFIRegTagName(),
BC.MIB->addAnnotation(BC.Ctx.get(), Inst, getOffsetCFIRegTagName(),
BC.MRI->getLLVMRegNum(CFI->getRegister(),
/*isEH=*/false));
break;
@ -431,13 +431,13 @@ void StackLayoutModifier::classifyCFIs() {
void StackLayoutModifier::scheduleChange(
MCInst &Inst, StackLayoutModifier::WorklistItem Item) {
auto &WList = BC.MIA->getOrCreateAnnotationAs<std::vector<WorklistItem>>(
auto &WList = BC.MIB->getOrCreateAnnotationAs<std::vector<WorklistItem>>(
BC.Ctx.get(), Inst, getTodoTagName());
WList.push_back(Item);
}
bool StackLayoutModifier::canCollapseRegion(MCInst *DeletedPush) {
if (!IsSimple || !BC.MIA->isPush(*DeletedPush))
if (!IsSimple || !BC.MIB->isPush(*DeletedPush))
return false;
auto FIE = FA.getFIEFor(*DeletedPush);
@ -482,10 +482,10 @@ bool StackLayoutModifier::collapseRegion(MCInst *Alloc, int64_t RegionAddr,
for (auto &BB : BF) {
for (auto &Inst : BB) {
if (!BC.MIA->hasAnnotation(Inst, getSlotTagName()))
if (!BC.MIB->hasAnnotation(Inst, getSlotTagName()))
continue;
auto Slot =
BC.MIA->getAnnotationAs<decltype(FrameIndexEntry::StackOffset)>(
BC.MIB->getAnnotationAs<decltype(FrameIndexEntry::StackOffset)>(
Inst, getSlotTagName());
if (!AvailableRegions.count(Slot))
continue;
@ -493,7 +493,7 @@ bool StackLayoutModifier::collapseRegion(MCInst *Alloc, int64_t RegionAddr,
if (!(*SAA.getStateBefore(Inst))[SAA.ExprToIdx[Alloc]])
continue;
if (BC.MIA->isCFI(Inst)) {
if (BC.MIB->isCFI(Inst)) {
if (Slot > RegionAddr)
continue;
scheduleChange(Inst, WorklistItem(WorklistItem::AdjustCFI, RegionSz));
@ -510,18 +510,18 @@ bool StackLayoutModifier::collapseRegion(MCInst *Alloc, int64_t RegionAddr,
}
if (Slot == RegionAddr) {
BC.MIA->addAnnotation(BC.Ctx.get(), Inst, "AccessesDeletedPos", 0U);
BC.MIB->addAnnotation(BC.Ctx.get(), Inst, "AccessesDeletedPos", 0U);
continue;
}
if (BC.MIA->isPush(Inst) || BC.MIA->isPop(Inst)) {
if (BC.MIB->isPush(Inst) || BC.MIB->isPop(Inst)) {
continue;
}
if (FIE->StackPtrReg == BC.MIA->getStackPointer() && Slot < RegionAddr)
if (FIE->StackPtrReg == BC.MIB->getStackPointer() && Slot < RegionAddr)
continue;
if (FIE->StackPtrReg == BC.MIA->getFramePointer() && Slot > RegionAddr)
if (FIE->StackPtrReg == BC.MIB->getFramePointer() && Slot > RegionAddr)
continue;
scheduleChange(
@ -536,9 +536,9 @@ bool StackLayoutModifier::collapseRegion(MCInst *Alloc, int64_t RegionAddr,
void StackLayoutModifier::setOffsetForCollapsedAccesses(int64_t NewOffset) {
for (auto &BB : BF) {
for (auto &Inst : BB) {
if (!BC.MIA->hasAnnotation(Inst, "AccessesDeletedPos"))
if (!BC.MIB->hasAnnotation(Inst, "AccessesDeletedPos"))
continue;
BC.MIA->removeAnnotation(Inst, "AccessesDeletedPos");
BC.MIB->removeAnnotation(Inst, "AccessesDeletedPos");
scheduleChange(
Inst, WorklistItem(WorklistItem::AdjustLoadStoreOffset, NewOffset));
}
@ -583,10 +583,10 @@ bool StackLayoutModifier::insertRegion(ProgramPoint P, int64_t RegionSz) {
for (auto &BB : BF) {
for (auto &Inst : BB) {
if (!BC.MIA->hasAnnotation(Inst, getSlotTagName()))
if (!BC.MIB->hasAnnotation(Inst, getSlotTagName()))
continue;
auto Slot =
BC.MIA->getAnnotationAs<decltype(FrameIndexEntry::StackOffset)>(
BC.MIB->getAnnotationAs<decltype(FrameIndexEntry::StackOffset)>(
Inst, getSlotTagName());
if (!AvailableRegions.count(Slot))
continue;
@ -594,7 +594,7 @@ bool StackLayoutModifier::insertRegion(ProgramPoint P, int64_t RegionSz) {
if (!(DA.doesADominateB(P, Inst)))
continue;
if (BC.MIA->isCFI(Inst)) {
if (BC.MIB->isCFI(Inst)) {
if (Slot >= RegionAddr)
continue;
scheduleChange(Inst, WorklistItem(WorklistItem::AdjustCFI, -RegionSz));
@ -609,11 +609,11 @@ bool StackLayoutModifier::insertRegion(ProgramPoint P, int64_t RegionSz) {
continue;
}
if (FIE->StackPtrReg == BC.MIA->getStackPointer() && Slot < RegionAddr)
if (FIE->StackPtrReg == BC.MIB->getStackPointer() && Slot < RegionAddr)
continue;
if (FIE->StackPtrReg == BC.MIA->getFramePointer() && Slot >= RegionAddr)
if (FIE->StackPtrReg == BC.MIB->getFramePointer() && Slot >= RegionAddr)
continue;
if (BC.MIA->isPush(Inst) || BC.MIA->isPop(Inst))
if (BC.MIB->isPush(Inst) || BC.MIB->isPop(Inst))
continue;
scheduleChange(
Inst, WorklistItem(WorklistItem::AdjustLoadStoreOffset, -RegionSz));
@ -629,13 +629,13 @@ void StackLayoutModifier::performChanges() {
for (auto &BB : BF) {
for (auto I = BB.rbegin(), E = BB.rend(); I != E; ++I) {
auto &Inst = *I;
if (BC.MIA->hasAnnotation(Inst, "AccessesDeletedPos")) {
assert(BC.MIA->isPop(Inst) || BC.MIA->isPush(Inst));
BC.MIA->removeAnnotation(Inst, "AccessesDeletedPos");
if (BC.MIB->hasAnnotation(Inst, "AccessesDeletedPos")) {
assert(BC.MIB->isPop(Inst) || BC.MIB->isPush(Inst));
BC.MIB->removeAnnotation(Inst, "AccessesDeletedPos");
}
if (!BC.MIA->hasAnnotation(Inst, getTodoTagName()))
if (!BC.MIB->hasAnnotation(Inst, getTodoTagName()))
continue;
auto &WList = BC.MIA->getAnnotationAs<std::vector<WorklistItem>>(
auto &WList = BC.MIB->getAnnotationAs<std::vector<WorklistItem>>(
Inst, getTodoTagName());
int64_t Adjustment = 0;
WorklistItem::ActionType AdjustmentType = WorklistItem::None;
@ -653,7 +653,7 @@ void StackLayoutModifier::performChanges() {
if (!Adjustment)
continue;
if (AdjustmentType != WorklistItem::AdjustLoadStoreOffset) {
assert(BC.MIA->isCFI(Inst));
assert(BC.MIB->isCFI(Inst));
uint32_t CFINum = Inst.getOperand(0).getImm();
if (ModifiedCFIIndices.count(CFINum))
continue;
@ -675,23 +675,23 @@ void StackLayoutModifier::performChanges() {
bool IsStoreFromReg{false};
uint8_t Size{0};
bool Success{false};
Success = BC.MIA->isStackAccess(Inst, IsLoad, IsStore, IsStoreFromReg,
Success = BC.MIB->isStackAccess(Inst, IsLoad, IsStore, IsStoreFromReg,
Reg, SrcImm, StackPtrReg, StackOffset,
Size, IsSimple, IsIndexed);
if (!Success) {
// SP update based on FP value
Success = BC.MIA->addToImm(Inst, Adjustment, &*BC.Ctx);
Success = BC.MIB->addToImm(Inst, Adjustment, &*BC.Ctx);
assert(Success);
continue;
}
assert(Success && IsSimple && !IsIndexed && (!IsStore || IsStoreFromReg));
if (StackPtrReg != BC.MIA->getFramePointer())
if (StackPtrReg != BC.MIB->getFramePointer())
Adjustment = -Adjustment;
if (IsLoad)
Success = BC.MIA->createRestoreFromStack(
Success = BC.MIB->createRestoreFromStack(
Inst, StackPtrReg, StackOffset + Adjustment, Reg, Size);
else if (IsStore)
Success = BC.MIA->createSaveToStack(
Success = BC.MIB->createSaveToStack(
Inst, StackPtrReg, StackOffset + Adjustment, Reg, Size);
DEBUG({
dbgs() << "Adjusted instruction: ";
@ -720,13 +720,13 @@ void ShrinkWrapping::classifyCSRUses() {
BitVector(DA.NumInstrs, false));
const BitVector &FPAliases =
BC.MIA->getAliases(BC.MIA->getFramePointer());
BC.MIB->getAliases(BC.MIB->getFramePointer());
for (auto &BB : BF) {
for (auto &Inst : BB) {
if (BC.MIA->isCFI(Inst))
if (BC.MIB->isCFI(Inst))
continue;
auto BV = BitVector(BC.MRI->getNumRegs(), false);
BC.MIA->getTouchedRegs(Inst, BV);
BC.MIB->getTouchedRegs(Inst, BV);
BV &= CSA.CalleeSaved;
for (int I = BV.find_first(); I != -1; I = BV.find_next(I)) {
if (I == 0)
@ -734,7 +734,7 @@ void ShrinkWrapping::classifyCSRUses() {
if (CSA.getSavedReg(Inst) != I && CSA.getRestoredReg(Inst) != I)
UsesByReg[I].set(DA.ExprToIdx[&Inst]);
}
if (!SPT.HasFramePointer || !BC.MIA->isCall(Inst))
if (!SPT.HasFramePointer || !BC.MIB->isCall(Inst))
continue;
BV = CSA.CalleeSaved;
BV &= FPAliases;
@ -746,7 +746,7 @@ void ShrinkWrapping::classifyCSRUses() {
}
void ShrinkWrapping::pruneUnwantedCSRs() {
BitVector ParamRegs = BC.MIA->getRegsUsedAsParams();
BitVector ParamRegs = BC.MIB->getRegsUsedAsParams();
for (unsigned I = 0, E = BC.MRI->getNumRegs(); I != E; ++I) {
if (!CSA.CalleeSaved[I])
continue;
@ -936,7 +936,7 @@ void ShrinkWrapping::splitFrontierCritEdges(
// and not BBs).
if (NewBB->empty()) {
MCInst NewInst;
BC.MIA->createNoop(NewInst);
BC.MIB->createNoop(NewInst);
NewBB->addInstruction(std::move(NewInst));
scheduleChange(&*NewBB->begin(), WorklistItem(WorklistItem::Erase, 0));
}
@ -963,7 +963,7 @@ ShrinkWrapping::doRestorePlacement(MCInst *BestPosSave, unsigned CSR,
Frontier = DA.getDominanceFrontierFor(*BestPosSave);
for (auto &PP : Frontier) {
bool HasCritEdges{false};
if (PP.isInst() && BC.MIA->isTerminator(*PP.getInst()) &&
if (PP.isInst() && BC.MIB->isTerminator(*PP.getInst()) &&
doesInstUsesCSR(*PP.getInst(), CSR)) {
CannotPlace = true;
}
@ -973,7 +973,7 @@ ShrinkWrapping::doRestorePlacement(MCInst *BestPosSave, unsigned CSR,
auto &Dests = CritEdgesTo.back();
// Check for invoke instructions at the dominance frontier, which indicates
// the landing pad is not dominated.
if (PP.isInst() && BC.MIA->isInvoke(*PP.getInst())) {
if (PP.isInst() && BC.MIB->isInvoke(*PP.getInst())) {
DEBUG(dbgs() << "Bailing on restore placement to avoid LP splitting\n");
Frontier.clear();
return Frontier;
@ -1100,7 +1100,7 @@ void ShrinkWrapping::scheduleOldSaveRestoresRemoval(unsigned CSR,
std::vector<MCInst *> CFIs;
for (auto I = BB->rbegin(), E = BB->rend(); I != E; ++I) {
auto &Inst = *I;
if (BC.MIA->isCFI(Inst)) {
if (BC.MIB->isCFI(Inst)) {
// Delete all offset CFIs related to this CSR
if (SLM.getOffsetCFIReg(Inst) == CSR) {
HasDeletedOffsetCFIs[CSR] = true;
@ -1154,11 +1154,11 @@ void ShrinkWrapping::scheduleOldSaveRestoresRemoval(unsigned CSR,
}
bool ShrinkWrapping::doesInstUsesCSR(const MCInst &Inst, uint16_t CSR) {
if (BC.MIA->isCFI(Inst) || CSA.getSavedReg(Inst) == CSR ||
if (BC.MIB->isCFI(Inst) || CSA.getSavedReg(Inst) == CSR ||
CSA.getRestoredReg(Inst) == CSR)
return false;
BitVector BV = BitVector(BC.MRI->getNumRegs(), false);
BC.MIA->getTouchedRegs(Inst, BV);
BC.MIB->getTouchedRegs(Inst, BV);
return BV[CSR];
}
@ -1274,11 +1274,11 @@ void ShrinkWrapping::moveSaveRestores() {
}
for (auto I = BB.rbegin(), E = BB.rend(); I != E; ++I) {
auto &Inst = *I;
auto TodoList = BC.MIA->tryGetAnnotationAs<std::vector<WorklistItem>>(
auto TodoList = BC.MIB->tryGetAnnotationAs<std::vector<WorklistItem>>(
Inst, getAnnotationName());
if (!TodoList)
continue;
bool isCFI = BC.MIA->isCFI(Inst);
bool isCFI = BC.MIB->isCFI(Inst);
for (auto &Item : *TodoList) {
if (Item.Action == WorklistItem::InsertPushOrPop)
Item.Action = WorklistItem::InsertLoadOrStore;
@ -1388,13 +1388,13 @@ protected:
std::pair<int, int> &Res) {
for (const auto &Item : TodoItems) {
if (Item.Action == ShrinkWrapping::WorklistItem::Erase &&
BC.MIA->isPush(Point)) {
Res.first += BC.MIA->getPushSize(Point);
BC.MIB->isPush(Point)) {
Res.first += BC.MIB->getPushSize(Point);
continue;
}
if (Item.Action == ShrinkWrapping::WorklistItem::Erase &&
BC.MIA->isPop(Point)) {
Res.first -= BC.MIA->getPopSize(Point);
BC.MIB->isPop(Point)) {
Res.first -= BC.MIB->getPopSize(Point);
continue;
}
if (Item.Action == ShrinkWrapping::WorklistItem::InsertPushOrPop &&
@ -1419,7 +1419,7 @@ protected:
Res.first == StackPointerTracking::EMPTY)
return Res;
auto TodoItems =
BC.MIA->tryGetAnnotationAs<std::vector<ShrinkWrapping::WorklistItem>>(
BC.MIB->tryGetAnnotationAs<std::vector<ShrinkWrapping::WorklistItem>>(
Point, ShrinkWrapping::getAnnotationName());
if (TodoItems)
compNextAux(Point, *TodoItems, Res);
@ -1469,7 +1469,7 @@ void ShrinkWrapping::insertUpdatedCFI(unsigned CSR, int SPValPush,
bool IsSimple{false};
bool IsStoreFromReg{false};
uint8_t Size{0};
if (!BC.MIA->isStackAccess(*InstIter, IsLoad, IsStore, IsStoreFromReg,
if (!BC.MIB->isStackAccess(*InstIter, IsLoad, IsStore, IsStoreFromReg,
Reg, SrcImm, StackPtrReg, StackOffset,
Size, IsSimple, IsIndexed))
continue;
@ -1535,11 +1535,11 @@ void ShrinkWrapping::insertUpdatedCFI(unsigned CSR, int SPValPush,
void ShrinkWrapping::rebuildCFIForSP() {
for (auto &BB : BF) {
for (auto &Inst : BB) {
if (!BC.MIA->isCFI(Inst))
if (!BC.MIB->isCFI(Inst))
continue;
auto *CFI = BF.getCFIFor(Inst);
if (CFI->getOperation() == MCCFIInstruction::OpDefCfaOffset)
BC.MIA->addAnnotation(BC.Ctx.get(), Inst, "DeleteMe", 0U);
BC.MIB->addAnnotation(BC.Ctx.get(), Inst, "DeleteMe", 0U);
}
}
@ -1580,7 +1580,7 @@ void ShrinkWrapping::rebuildCFIForSP() {
for (auto &BB : BF)
for (auto I = BB.rbegin(), E = BB.rend(); I != E; ++I)
if (BC.MIA->hasAnnotation(*I, "DeleteMe"))
if (BC.MIB->hasAnnotation(*I, "DeleteMe"))
BB.eraseInstruction(&*I);
}
@ -1591,14 +1591,14 @@ MCInst ShrinkWrapping::createStackAccess(int SPVal, int FPVal,
if (SPVal != StackPointerTracking::SUPERPOSITION &&
SPVal != StackPointerTracking::EMPTY) {
if (FIE.IsLoad) {
if (!BC.MIA->createRestoreFromStack(NewInst, BC.MIA->getStackPointer(),
if (!BC.MIB->createRestoreFromStack(NewInst, BC.MIB->getStackPointer(),
FIE.StackOffset - SPVal, FIE.RegOrImm,
FIE.Size)) {
errs() << "createRestoreFromStack: not supported on this platform\n";
abort();
}
} else {
if (!BC.MIA->createSaveToStack(NewInst, BC.MIA->getStackPointer(),
if (!BC.MIB->createSaveToStack(NewInst, BC.MIB->getStackPointer(),
FIE.StackOffset - SPVal, FIE.RegOrImm,
FIE.Size)) {
errs() << "createSaveToStack: not supported on this platform\n";
@ -1606,21 +1606,21 @@ MCInst ShrinkWrapping::createStackAccess(int SPVal, int FPVal,
}
}
if (CreatePushOrPop)
BC.MIA->changeToPushOrPop(NewInst);
BC.MIB->changeToPushOrPop(NewInst);
return NewInst;
}
assert(FPVal != StackPointerTracking::SUPERPOSITION &&
FPVal != StackPointerTracking::EMPTY);
if (FIE.IsLoad) {
if (!BC.MIA->createRestoreFromStack(NewInst, BC.MIA->getFramePointer(),
if (!BC.MIB->createRestoreFromStack(NewInst, BC.MIB->getFramePointer(),
FIE.StackOffset - FPVal, FIE.RegOrImm,
FIE.Size)) {
errs() << "createRestoreFromStack: not supported on this platform\n";
abort();
}
} else {
if (!BC.MIA->createSaveToStack(NewInst, BC.MIA->getFramePointer(),
if (!BC.MIB->createSaveToStack(NewInst, BC.MIB->getFramePointer(),
FIE.StackOffset - FPVal, FIE.RegOrImm,
FIE.Size)) {
errs() << "createSaveToStack: not supported on this platform\n";
@ -1802,7 +1802,7 @@ bool ShrinkWrapping::processInsertions() {
// Process insertions before some inst.
for (auto I = BB.begin(); I != BB.end(); ++I) {
auto &Inst = *I;
auto TodoList = BC.MIA->tryGetAnnotationAs<std::vector<WorklistItem>>(
auto TodoList = BC.MIB->tryGetAnnotationAs<std::vector<WorklistItem>>(
Inst, getAnnotationName());
if (!TodoList)
continue;
@ -1835,7 +1835,7 @@ void ShrinkWrapping::processDeletions() {
for (auto &BB : BF) {
for (auto I = BB.rbegin(), E = BB.rend(); I != E; ++I) {
auto &Inst = *I;
auto TodoList = BC.MIA->tryGetAnnotationAs<std::vector<WorklistItem>>(
auto TodoList = BC.MIB->tryGetAnnotationAs<std::vector<WorklistItem>>(
Inst, getAnnotationName());
if (!TodoList)
continue;
@ -1847,13 +1847,13 @@ void ShrinkWrapping::processDeletions() {
if (Item.Action == WorklistItem::ChangeToAdjustment) {
// Is flag reg alive across this func?
bool DontClobberFlags = LA.isAlive(&Inst, BC.MIA->getFlagsReg());
if (auto Sz = BC.MIA->getPushSize(Inst)) {
BC.MIA->createStackPointerIncrement(Inst, Sz, DontClobberFlags);
bool DontClobberFlags = LA.isAlive(&Inst, BC.MIB->getFlagsReg());
if (auto Sz = BC.MIB->getPushSize(Inst)) {
BC.MIB->createStackPointerIncrement(Inst, Sz, DontClobberFlags);
continue;
}
if (auto Sz = BC.MIA->getPopSize(Inst)) {
BC.MIA->createStackPointerDecrement(Inst, Sz, DontClobberFlags);
if (auto Sz = BC.MIB->getPopSize(Inst)) {
BC.MIB->createStackPointerDecrement(Inst, Sz, DontClobberFlags);
continue;
}
}

View File

@ -74,7 +74,7 @@ public:
/// Retrieves the value of the callee-saved register that is saved by this
/// instruction or 0 if this is not a CSR save instruction.
uint16_t getSavedReg(const MCInst &Inst) {
auto Val = BC.MIA->tryGetAnnotationAs<decltype(FrameIndexEntry::RegOrImm)>(
auto Val = BC.MIB->tryGetAnnotationAs<decltype(FrameIndexEntry::RegOrImm)>(
Inst, getSaveTag());
if (Val)
return *Val;
@ -84,7 +84,7 @@ public:
/// Retrieves the value of the callee-saved register that is restored by this
/// instruction or 0 if this is not a CSR restore instruction.
uint16_t getRestoredReg(const MCInst &Inst) {
auto Val = BC.MIA->tryGetAnnotationAs<decltype(FrameIndexEntry::RegOrImm)>(
auto Val = BC.MIB->tryGetAnnotationAs<decltype(FrameIndexEntry::RegOrImm)>(
Inst, getRestoreTag());
if (Val)
return *Val;
@ -191,9 +191,9 @@ public:
~StackLayoutModifier() {
for (auto &BB : BF) {
for (auto &Inst : BB) {
BC.MIA->removeAnnotation(Inst, getTodoTagName());
BC.MIA->removeAnnotation(Inst, getSlotTagName());
BC.MIA->removeAnnotation(Inst, getOffsetCFIRegTagName());
BC.MIB->removeAnnotation(Inst, getTodoTagName());
BC.MIB->removeAnnotation(Inst, getSlotTagName());
BC.MIB->removeAnnotation(Inst, getOffsetCFIRegTagName());
}
}
}
@ -202,7 +202,7 @@ public:
/// instruction or 0 if this is not a CSR restore instruction.
uint16_t getOffsetCFIReg(const MCInst &Inst) {
auto Val =
BC.MIA->tryGetAnnotationAs<uint16_t>(Inst, getOffsetCFIRegTagName());
BC.MIB->tryGetAnnotationAs<uint16_t>(Inst, getOffsetCFIRegTagName());
if (Val)
return *Val;
return 0;
@ -326,7 +326,7 @@ private:
template <typename ...T>
void scheduleChange(ProgramPoint PP, T&& ...Item) {
if (PP.isInst()) {
auto &WList = BC.MIA->getOrCreateAnnotationAs<std::vector<WorklistItem>>(
auto &WList = BC.MIB->getOrCreateAnnotationAs<std::vector<WorklistItem>>(
BC.Ctx.get(), *PP.getInst(), getAnnotationName());
WList.emplace_back(std::forward<T>(Item)...);
return;
@ -343,7 +343,7 @@ private:
assert (BB->succ_size() == 1);
BB = *BB->succ_begin();
}
auto &WList = BC.MIA->getOrCreateAnnotationAs<std::vector<WorklistItem>>(
auto &WList = BC.MIB->getOrCreateAnnotationAs<std::vector<WorklistItem>>(
BC.Ctx.get(), *BB->begin(), getAnnotationName());
WList.emplace_back(std::forward<T>(Item)...);
}
@ -470,7 +470,7 @@ public:
~ShrinkWrapping() {
for (auto &BB : BF) {
for (auto &Inst : BB) {
BC.MIA->removeAnnotation(Inst, getAnnotationName());
BC.MIB->removeAnnotation(Inst, getAnnotationName());
}
}
}

View File

@ -24,11 +24,11 @@ void StackAllocationAnalysis::preflight() {
for (auto &BB : this->Func) {
for (auto &Inst : BB) {
MCPhysReg From, To;
if (!BC.MIA->isPush(Inst) && (!BC.MIA->isRegToRegMove(Inst, From, To) ||
To != BC.MIA->getStackPointer() ||
From != BC.MIA->getFramePointer()) &&
if (!BC.MIB->isPush(Inst) && (!BC.MIB->isRegToRegMove(Inst, From, To) ||
To != BC.MIB->getStackPointer() ||
From != BC.MIB->getFramePointer()) &&
!BC.MII->get(Inst.getOpcode())
.hasDefOfPhysReg(Inst, BC.MIA->getStackPointer(), *BC.MRI))
.hasDefOfPhysReg(Inst, BC.MIB->getStackPointer(), *BC.MRI))
continue;
this->Expressions.push_back(&Inst);
this->ExprToIdx[&Inst] = this->NumInstrs++;
@ -94,13 +94,13 @@ void StackAllocationAnalysis::doConfluenceWithLP(BitVector &StateOut,
BitVector StackAllocationAnalysis::computeNext(const MCInst &Point,
const BitVector &Cur) {
const auto &MIA = BC.MIA;
const auto &MIB = BC.MIB;
BitVector Next = Cur;
if (int Sz = MIA->getPopSize(Point)) {
if (int Sz = MIB->getPopSize(Point)) {
Next = doKill(Point, Next, Sz);
return Next;
}
if (MIA->isPush(Point)) {
if (MIB->isPush(Point)) {
Next.set(this->ExprToIdx[&Point]);
return Next;
}
@ -108,9 +108,9 @@ BitVector StackAllocationAnalysis::computeNext(const MCInst &Point,
MCPhysReg From, To;
int64_t SPOffset, FPOffset;
std::tie(SPOffset, FPOffset) = *SPT.getStateBefore(Point);
if (MIA->isRegToRegMove(Point, From, To) && To == MIA->getStackPointer() &&
From == MIA->getFramePointer()) {
if (MIA->isLeave(Point))
if (MIB->isRegToRegMove(Point, From, To) && To == MIB->getStackPointer() &&
From == MIB->getFramePointer()) {
if (MIB->isLeave(Point))
FPOffset += 8;
if (SPOffset < FPOffset) {
Next = doKill(Point, Next, FPOffset - SPOffset);
@ -122,19 +122,19 @@ BitVector StackAllocationAnalysis::computeNext(const MCInst &Point,
}
}
if (BC.MII->get(Point.getOpcode())
.hasDefOfPhysReg(Point, MIA->getStackPointer(), *BC.MRI)) {
.hasDefOfPhysReg(Point, MIB->getStackPointer(), *BC.MRI)) {
std::pair<MCPhysReg, int64_t> SP;
if (SPOffset != SPT.EMPTY && SPOffset != SPT.SUPERPOSITION)
SP = std::make_pair(MIA->getStackPointer(), SPOffset);
SP = std::make_pair(MIB->getStackPointer(), SPOffset);
else
SP = std::make_pair(0, 0);
std::pair<MCPhysReg, int64_t> FP;
if (FPOffset != SPT.EMPTY && FPOffset != SPT.SUPERPOSITION)
FP = std::make_pair(MIA->getFramePointer(), FPOffset);
FP = std::make_pair(MIB->getFramePointer(), FPOffset);
else
FP = std::make_pair(0, 0);
int64_t Output;
if (!MIA->evaluateSimple(Point, Output, SP, FP))
if (!MIB->evaluateSimple(Point, Output, SP, FP))
return Next;
if (SPOffset < Output) {

View File

@ -82,16 +82,16 @@ protected:
}
int computeNextSP(const MCInst &Point, int SPVal, int FPVal) {
const auto &MIA = this->BC.MIA;
const auto &MIB = this->BC.MIB;
if (int Sz = MIA->getPushSize(Point)) {
if (int Sz = MIB->getPushSize(Point)) {
if (SPVal == EMPTY || SPVal == SUPERPOSITION)
return SPVal;
return SPVal - Sz;
}
if (int Sz = MIA->getPopSize(Point)) {
if (int Sz = MIB->getPopSize(Point)) {
if (SPVal == EMPTY || SPVal == SUPERPOSITION)
return SPVal;
@ -99,31 +99,31 @@ protected:
}
MCPhysReg From, To;
if (MIA->isRegToRegMove(Point, From, To) && To == MIA->getStackPointer() &&
From == MIA->getFramePointer()) {
if (MIB->isRegToRegMove(Point, From, To) && To == MIB->getStackPointer() &&
From == MIB->getFramePointer()) {
if (FPVal == EMPTY || FPVal == SUPERPOSITION)
return FPVal;
if (MIA->isLeave(Point))
if (MIB->isLeave(Point))
return FPVal + 8;
else
return FPVal;
}
if (this->BC.MII->get(Point.getOpcode())
.hasDefOfPhysReg(Point, MIA->getStackPointer(), *this->BC.MRI)) {
.hasDefOfPhysReg(Point, MIB->getStackPointer(), *this->BC.MRI)) {
std::pair<MCPhysReg, int64_t> SP;
if (SPVal != EMPTY && SPVal != SUPERPOSITION)
SP = std::make_pair(MIA->getStackPointer(), SPVal);
SP = std::make_pair(MIB->getStackPointer(), SPVal);
else
SP = std::make_pair(0, 0);
std::pair<MCPhysReg, int64_t> FP;
if (FPVal != EMPTY && FPVal != SUPERPOSITION)
FP = std::make_pair(MIA->getFramePointer(), FPVal);
FP = std::make_pair(MIB->getFramePointer(), FPVal);
else
FP = std::make_pair(0, 0);
int64_t Output;
if (!MIA->evaluateSimple(Point, Output, SP, FP)) {
if (!MIB->evaluateSimple(Point, Output, SP, FP)) {
if (SPVal == EMPTY && FPVal == EMPTY)
return SPVal;
return SUPERPOSITION;
@ -136,36 +136,36 @@ protected:
}
int computeNextFP(const MCInst &Point, int SPVal, int FPVal) {
const auto &MIA = this->BC.MIA;
const auto &MIB = this->BC.MIB;
MCPhysReg From, To;
if (MIA->isRegToRegMove(Point, From, To) && To == MIA->getFramePointer() &&
From == MIA->getStackPointer()) {
if (MIB->isRegToRegMove(Point, From, To) && To == MIB->getFramePointer() &&
From == MIB->getStackPointer()) {
HasFramePointer = true;
return SPVal;
}
if (this->BC.MII->get(Point.getOpcode())
.hasDefOfPhysReg(Point, MIA->getFramePointer(), *this->BC.MRI)) {
.hasDefOfPhysReg(Point, MIB->getFramePointer(), *this->BC.MRI)) {
std::pair<MCPhysReg, int64_t> FP;
if (FPVal != EMPTY && FPVal != SUPERPOSITION)
FP = std::make_pair(MIA->getFramePointer(), FPVal);
FP = std::make_pair(MIB->getFramePointer(), FPVal);
else
FP = std::make_pair(0, 0);
std::pair<MCPhysReg, int64_t> SP;
if (SPVal != EMPTY && SPVal != SUPERPOSITION)
SP = std::make_pair(MIA->getStackPointer(), SPVal);
SP = std::make_pair(MIB->getStackPointer(), SPVal);
else
SP = std::make_pair(0, 0);
int64_t Output;
if (!MIA->evaluateSimple(Point, Output, SP, FP)) {
if (!MIB->evaluateSimple(Point, Output, SP, FP)) {
if (SPVal == EMPTY && FPVal == EMPTY)
return FPVal;
return SUPERPOSITION;
}
if (!HasFramePointer) {
if (MIA->escapesVariable(Point, false)) {
if (MIB->escapesVariable(Point, false)) {
HasFramePointer = true;
}
}

View File

@ -48,14 +48,14 @@ void StokeInfo::checkInstr(const BinaryContext &BC, const BinaryFunction &BF,
continue;
}
// skip function with exception handling yet
if (BC.MIA->isEHLabel(It) || BC.MIA->isInvoke(It) || BC.MIA->hasEHInfo(It)) {
if (BC.MIB->isEHLabel(It) || BC.MIB->isInvoke(It) || BC.MIB->hasEHInfo(It)) {
FuncInfo.Omitted = true;
return;
}
// check if this function contains call instruction
if (BC.MIA->isCall(It)) {
if (BC.MIB->isCall(It)) {
FuncInfo.HasCall = true;
const auto *TargetSymbol = BC.MIA->getTargetSymbol(It);
const auto *TargetSymbol = BC.MIB->getTargetSymbol(It);
// if it is an indirect call, skip
if (TargetSymbol == nullptr) {
FuncInfo.Omitted = true;
@ -64,12 +64,12 @@ void StokeInfo::checkInstr(const BinaryContext &BC, const BinaryFunction &BF,
}
// check if this function modify stack or heap
// TODO: more accurate analysis
auto IsPush = BC.MIA->isPush(It);
auto IsRipAddr = BC.MIA->hasPCRelOperand(It);
auto IsPush = BC.MIB->isPush(It);
auto IsRipAddr = BC.MIB->hasPCRelOperand(It);
if (IsPush) {
FuncInfo.StackOut = true;
}
if (BC.MIA->isStore(It) && !IsPush && !IsRipAddr) {
if (BC.MIB->isStore(It) && !IsPush && !IsRipAddr) {
FuncInfo.HeapOut = true;
}
if (IsRipAddr) {
@ -165,8 +165,8 @@ void StokeInfo::runOnFunctions(
DefaultDefInMask.resize(NumRegs, false);
DefaultLiveOutMask.resize(NumRegs, false);
BC.MIA->getDefaultDefIn(DefaultDefInMask);
BC.MIA->getDefaultLiveOut(DefaultLiveOutMask);
BC.MIB->getDefaultDefIn(DefaultDefInMask);
BC.MIB->getDefaultLiveOut(DefaultLiveOutMask);
getRegNameFromBitVec(BC, DefaultDefInMask);
getRegNameFromBitVec(BC, DefaultLiveOutMask);

View File

@ -111,7 +111,7 @@ ProfileReader::parseFunctionProfile(BinaryFunction &BF,
++MismatchedCalls;
continue;
}
if (!BC.MIA->isCall(*Instr) && !BC.MIA->isIndirectBranch(*Instr)) {
if (!BC.MIB->isCall(*Instr) && !BC.MIB->isIndirectBranch(*Instr)) {
if (opts::Verbosity >= 2)
errs() << "BOLT-WARNING: expected call at offset " << YamlCSI.Offset
<< " in block " << BB.getName() << '\n';
@ -120,22 +120,22 @@ ProfileReader::parseFunctionProfile(BinaryFunction &BF,
}
auto setAnnotation = [&](StringRef Name, uint64_t Count) {
if (BC.MIA->hasAnnotation(*Instr, Name)) {
if (BC.MIB->hasAnnotation(*Instr, Name)) {
if (opts::Verbosity >= 1)
errs() << "BOLT-WARNING: ignoring duplicate " << Name
<< " info for offset 0x" << Twine::utohexstr(YamlCSI.Offset)
<< " in function " << BF << '\n';
return;
}
BC.MIA->addAnnotation(BC.Ctx.get(), *Instr, Name, Count);
BC.MIB->addAnnotation(BC.Ctx.get(), *Instr, Name, Count);
};
if (BC.MIA->isIndirectCall(*Instr) || BC.MIA->isIndirectBranch(*Instr)) {
if (BC.MIB->isIndirectCall(*Instr) || BC.MIB->isIndirectBranch(*Instr)) {
IndirectCallSiteProfile &CSP =
BC.MIA->getOrCreateAnnotationAs<IndirectCallSiteProfile>(BC.Ctx.get(),
BC.MIB->getOrCreateAnnotationAs<IndirectCallSiteProfile>(BC.Ctx.get(),
*Instr, "CallProfile");
CSP.emplace_back(IsFunction, Name, YamlCSI.Count, YamlCSI.Mispreds);
} else if (BC.MIA->getConditionalTailCall(*Instr)) {
} else if (BC.MIB->getConditionalTailCall(*Instr)) {
setAnnotation("CTCTakenCount", YamlCSI.Count);
setAnnotation("CTCMispredCount", YamlCSI.Mispreds);
} else {

View File

@ -59,18 +59,18 @@ convert(const BinaryFunction &BF, yaml::bolt::BinaryFunctionProfile &YamlBF) {
YamlBB.ExecCount = BB->getKnownExecutionCount();
for (const auto &Instr : *BB) {
if (!BC.MIA->isCall(Instr) && !BC.MIA->isIndirectBranch(Instr))
if (!BC.MIB->isCall(Instr) && !BC.MIB->isIndirectBranch(Instr))
continue;
yaml::bolt::CallSiteInfo CSI;
auto Offset = BC.MIA->tryGetAnnotationAs<uint64_t>(Instr, "Offset");
auto Offset = BC.MIB->tryGetAnnotationAs<uint64_t>(Instr, "Offset");
if (!Offset || Offset.get() < BB->getInputOffset())
continue;
CSI.Offset = Offset.get() - BB->getInputOffset();
if (BC.MIA->isIndirectCall(Instr) || BC.MIA->isIndirectBranch(Instr)) {
if (BC.MIB->isIndirectCall(Instr) || BC.MIB->isIndirectBranch(Instr)) {
auto ICSP =
BC.MIA->tryGetAnnotationAs<IndirectCallSiteProfile>(Instr,
BC.MIB->tryGetAnnotationAs<IndirectCallSiteProfile>(Instr,
"CallProfile");
if (!ICSP)
continue;
@ -92,25 +92,25 @@ convert(const BinaryFunction &BF, yaml::bolt::BinaryFunctionProfile &YamlBF) {
YamlBB.CallSites.push_back(CSI);
}
} else { // direct call or a tail call
const auto *CalleeSymbol = BC.MIA->getTargetSymbol(Instr);
const auto *CalleeSymbol = BC.MIB->getTargetSymbol(Instr);
const auto Callee = BC.getFunctionForSymbol(CalleeSymbol);
if (Callee) {
CSI.DestId = Callee->getFunctionNumber();;
CSI.EntryDiscriminator = Callee->getEntryForSymbol(CalleeSymbol);
}
if (BC.MIA->getConditionalTailCall(Instr)) {
if (BC.MIB->getConditionalTailCall(Instr)) {
auto CTCCount =
BC.MIA->tryGetAnnotationAs<uint64_t>(Instr, "CTCTakenCount");
BC.MIB->tryGetAnnotationAs<uint64_t>(Instr, "CTCTakenCount");
if (CTCCount) {
CSI.Count = *CTCCount;
auto CTCMispreds =
BC.MIA->tryGetAnnotationAs<uint64_t>(Instr, "CTCMispredCount");
BC.MIB->tryGetAnnotationAs<uint64_t>(Instr, "CTCMispredCount");
if (CTCMispreds)
CSI.Mispreds = *CTCMispreds;
}
} else {
auto Count = BC.MIA->tryGetAnnotationAs<uint64_t>(Instr, "Count");
auto Count = BC.MIB->tryGetAnnotationAs<uint64_t>(Instr, "Count");
if (Count)
CSI.Count = *Count;
}

View File

@ -18,6 +18,7 @@
#include "DataAggregator.h"
#include "DataReader.h"
#include "Exceptions.h"
#include "MCPlusBuilder.h"
#include "ProfileReader.h"
#include "ProfileWriter.h"
#include "RewriteInstance.h"
@ -442,6 +443,28 @@ size_t padFunction(const BinaryFunction &Function) {
} // namespace opts
extern MCPlusBuilder * createX86MCPlusBuilder(const MCInstrAnalysis *,
const MCInstrInfo *,
const MCRegisterInfo *);
extern MCPlusBuilder * createAArch64MCPlusBuilder(const MCInstrAnalysis *,
const MCInstrInfo *,
const MCRegisterInfo *);
namespace {
MCPlusBuilder *createMCPlusBuilder(const Triple::ArchType Arch,
const MCInstrAnalysis *Analysis, const MCInstrInfo *Info,
const MCRegisterInfo *RegInfo) {
if (Arch == Triple::x86_64) {
return createX86MCPlusBuilder(Analysis, Info, RegInfo);
} else if (Arch == Triple::aarch64) {
return createAArch64MCPlusBuilder(Analysis, Info, RegInfo);
} else {
llvm_unreachable("architecture unsupport by MCPlusBuilder");
}
}
}
constexpr const char *RewriteInstance::SectionsToOverwrite[];
const std::string RewriteInstance::OrgSecPrefix = ".bolt.org";
@ -645,13 +668,22 @@ createBinaryContext(ELFObjectFileBase *File, DataReader &DR,
}
std::unique_ptr<const MCInstrAnalysis> MIA(
TheTarget->createMCInstrAnalysis(MII.get(), MRI.get()));
TheTarget->createMCInstrAnalysis(MII.get()));
if (!MIA) {
errs() << "BOLT-ERROR: failed to create instruction analysis for target"
<< TripleName << "\n";
return nullptr;
}
std::unique_ptr<const MCPlusBuilder> MIB(
createMCPlusBuilder(Arch, MIA.get(), MII.get(), MRI.get()));
if (!MIB) {
errs() << "BOLT-ERROR: failed to create instruction builder for target"
<< TripleName << "\n";
return nullptr;
}
int AsmPrinterVariant = AsmInfo->getAssemblerDialect();
std::unique_ptr<MCInstPrinter> InstructionPrinter(
TheTarget->createMCInstPrinter(Triple(TripleName), AsmPrinterVariant,
@ -684,6 +716,7 @@ createBinaryContext(ELFObjectFileBase *File, DataReader &DR,
std::move(STI),
std::move(InstructionPrinter),
std::move(MIA),
std::move(MIB),
std::move(MRI),
std::move(DisAsm),
DR);
@ -1465,11 +1498,11 @@ void RewriteInstance::disassemblePLT() {
exit(1);
}
if (!BC->MIA->isIndirectBranch(Instruction))
if (!BC->MIB->isIndirectBranch(Instruction))
continue;
uint64_t TargetAddress;
if (!BC->MIA->evaluateMemOperandTarget(Instruction,
if (!BC->MIB->evaluateMemOperandTarget(Instruction,
TargetAddress,
InstrAddr,
InstrSize)) {

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,11 @@
add_llvm_library(LLVMBOLTTargetAArch64
AArch64MCPlusBuilder.cpp
DEPENDS
intrinsics_gen
AArch64CommonTableGen
)
include_directories(${LLVM_MAIN_SRC_DIR}/lib/Target/AArch64 ${LLVM_BINARY_DIR}/lib/Target/AArch64)
include_directories(${LLVM_MAIN_SRC_DIR}/tools/llvm-bolt)

View File

@ -0,0 +1,2 @@
add_subdirectory(AArch64)
add_subdirectory(X86)

View File

@ -0,0 +1,11 @@
add_llvm_library(LLVMBOLTTargetX86
X86MCPlusBuilder.cpp
DEPENDS
intrinsics_gen
X86CommonTableGen
)
include_directories(${LLVM_MAIN_SRC_DIR}/lib/Target/X86 ${LLVM_BINARY_DIR}/lib/Target/X86)
include_directories(${LLVM_MAIN_SRC_DIR}/tools/llvm-bolt)

File diff suppressed because it is too large Load Diff