[BOLT][NFC] Do not pass BinaryContext alongside BinaryFunction

Summary:
BinaryContext is available via BinaryFunction::getBinaryContext(),
hence there's no reason to pass both as arguments to a function.

In a similar fashion, BinaryBasicBlock has an access to BinaryFunction
via getFunction(). Eliminate unneeded arguments.

(cherry picked from FBD31921680)
This commit is contained in:
Maksim Panchenko 2021-10-26 00:06:34 -07:00
parent 0559dab546
commit 60b0999723
38 changed files with 335 additions and 402 deletions

View File

@ -32,7 +32,7 @@ public:
/// Pass entry point
void runOnFunctions(BinaryContext &BC) override;
void runOnFunction(BinaryContext &BC, BinaryFunction &BF);
void runOnFunction(BinaryFunction &BF);
};
} // namespace bolt

View File

@ -23,9 +23,7 @@ class AllocCombinerPass : public BinaryFunctionPass {
uint64_t NumCombined{0};
DenseSet<const BinaryFunction *> FuncsChanged;
void combineAdjustments(BinaryContext &BC, BinaryFunction &BF);
void coalesceEmptySpace(BinaryContext &BC, BinaryFunction &BF,
DataflowInfoManager &Info, FrameAnalysis &FA);
void combineAdjustments(BinaryFunction &BF);
public:
explicit AllocCombinerPass(const cl::opt<bool> &PrintPass)

View File

@ -265,8 +265,9 @@ class SimplifyConditionalTailCalls : public BinaryFunctionPass {
const BinaryBasicBlock *BB,
const bool DirectionFlag);
uint64_t fixTailCalls(BinaryContext &BC, BinaryFunction &BF);
public:
uint64_t fixTailCalls(BinaryFunction &BF);
public:
explicit SimplifyConditionalTailCalls(const cl::opt<bool> &PrintPass)
: BinaryFunctionPass(PrintPass) { }
@ -293,12 +294,13 @@ class Peepholes : public BinaryFunctionPass {
/// Add trap instructions immediately after indirect tail calls to prevent
/// the processor from decoding instructions immediate following the
/// tailcall.
void addTailcallTraps(BinaryContext &BC, BinaryFunction &Function);
void addTailcallTraps(BinaryFunction &Function);
/// Remove useless duplicate successors. When the conditional
/// successor is the same as the unconditional successor, we can
/// remove the conditional successor and branch instruction.
void removeUselessCondBranches(BinaryContext &BC, BinaryFunction &Function);
void removeUselessCondBranches(BinaryFunction &Function);
public:
explicit Peepholes(const cl::opt<bool> &PrintPass)
: BinaryFunctionPass(PrintPass) { }
@ -327,7 +329,7 @@ class SimplifyRODataLoads : public BinaryFunctionPass {
uint64_t NumDynamicLoadsFound{0};
std::unordered_set<const BinaryFunction *> Modified;
bool simplifyRODataLoads(BinaryContext &BC, BinaryFunction &BF);
bool simplifyRODataLoads(BinaryFunction &BF);
public:
explicit SimplifyRODataLoads(const cl::opt<bool> &PrintPass)

View File

@ -93,7 +93,7 @@ public:
/// Convenience function to operate on all predecessors of a BB, as viewed
/// by a dataflow analysis. This includes throw sites if it is a landing pad.
void doForAllPreds(const BinaryContext &BC, const BinaryBasicBlock &BB,
void doForAllPreds(const BinaryBasicBlock &BB,
std::function<void(ProgramPoint)> Task);
/// Operates on all successors of a basic block.
@ -264,15 +264,15 @@ public:
void doForAllSuccsOrPreds(const BinaryBasicBlock &BB,
std::function<void(ProgramPoint)> Task) {
if (!Backward)
return doForAllPreds(BC, BB, Task);
return doForAllPreds(BB, Task);
return doForAllSuccs(BB, Task);
}
/// We need the current binary context and the function that will be processed
/// in this dataflow analysis.
DataflowAnalysis(const BinaryContext &BC, BinaryFunction &BF,
DataflowAnalysis(BinaryFunction &BF,
MCPlusBuilder::AllocatorIdTy AllocatorId = 0)
: BC(BC), Func(BF), AllocatorId(AllocatorId) {}
: BC(BF.getBinaryContext()), Func(BF), AllocatorId(AllocatorId) {}
virtual ~DataflowAnalysis() {
cleanAnnotations();
@ -550,10 +550,10 @@ public:
return count(*Expressions[PointIdx], Expr);
}
InstrsDataflowAnalysis(const BinaryContext &BC, BinaryFunction &BF,
InstrsDataflowAnalysis(BinaryFunction &BF,
MCPlusBuilder::AllocatorIdTy AllocId = 0)
: DataflowAnalysis<Derived, BitVector, Backward, StatePrinterTy>(
BC, BF, AllocId) {}
BF, AllocId) {}
virtual ~InstrsDataflowAnalysis() {}
};

View File

@ -51,10 +51,11 @@ class DataflowInfoManager {
MCPlusBuilder::AllocatorIdTy AllocatorId;
public:
DataflowInfoManager(const BinaryContext &BC, BinaryFunction &BF,
const RegAnalysis *RA, const FrameAnalysis *FA,
DataflowInfoManager(BinaryFunction &BF, const RegAnalysis *RA,
const FrameAnalysis *FA,
MCPlusBuilder::AllocatorIdTy AllocId = 0)
: RA(RA), FA(FA), BC(BC), BF(BF), AllocatorId(AllocId){};
: RA(RA), FA(FA), BC(BF.getBinaryContext()), BF(BF),
AllocatorId(AllocId){};
/// Helper function to fetch the parent BB associated with a program point
/// If PP is a BB itself, then return itself (cast to a BinaryBasicBlock)

View File

@ -34,9 +34,8 @@ class DominatorAnalysis
Backward>;
public:
DominatorAnalysis(const BinaryContext &BC, BinaryFunction &BF,
MCPlusBuilder::AllocatorIdTy AllocId)
: InstrsDataflowAnalysis<DominatorAnalysis<Backward>, Backward>(BC, BF,
DominatorAnalysis(BinaryFunction &BF, MCPlusBuilder::AllocatorIdTy AllocId)
: InstrsDataflowAnalysis<DominatorAnalysis<Backward>, Backward>(BF,
AllocId) {
}
virtual ~DominatorAnalysis() {}

View File

@ -209,7 +209,7 @@ public:
/// Get or create an SPT object and run the analysis
StackPointerTracking &getSPT(BinaryFunction &BF) {
if (!SPTMap.count(&BF)) {
SPTMap.emplace(&BF, std::make_unique<StackPointerTracking>(BC, BF));
SPTMap.emplace(&BF, std::make_unique<StackPointerTracking>(BF));
auto Iter = SPTMap.find(&BF);
assert(Iter != SPTMap.end() && "item should exist");
Iter->second->run();

View File

@ -92,12 +92,10 @@ class FrameOptimizerPass : public BinaryFunctionPass {
/// immediate loads. Delete redundant register moves.
void removeUnnecessaryLoads(const RegAnalysis &RA,
const FrameAnalysis &FA,
const BinaryContext &BC,
BinaryFunction &BF);
/// Use information from stack frame usage to delete unused stores.
void removeUnusedStores(const FrameAnalysis &FA,
const BinaryContext &BC,
BinaryFunction &BF);
/// Perform shrinkwrapping step

View File

@ -183,56 +183,38 @@ class IndirectCallPromotion : public BinaryFunctionPass {
std::vector<Callsite> getCallTargets(BinaryBasicBlock &BB,
const MCInst &Inst) const;
size_t canPromoteCallsite(const BinaryBasicBlock *BB,
const MCInst &Inst,
size_t canPromoteCallsite(const BinaryBasicBlock &BB, const MCInst &Inst,
const std::vector<Callsite> &Targets,
uint64_t NumCalls);
void printCallsiteInfo(const BinaryBasicBlock *BB,
const MCInst &Inst,
const std::vector<Callsite> &Targets,
const size_t N,
void printCallsiteInfo(const BinaryBasicBlock &BB, const MCInst &Inst,
const std::vector<Callsite> &Targets, const size_t N,
uint64_t NumCalls) const;
JumpTableInfoType
maybeGetHotJumpTableTargets(BinaryContext &BC,
BinaryFunction &Function,
BinaryBasicBlock *BB,
MCInst &Inst,
MCInst *&TargetFetchInst,
const JumpTable *JT) const;
JumpTableInfoType maybeGetHotJumpTableTargets(BinaryBasicBlock &BB,
MCInst &Inst,
MCInst *&TargetFetchInst,
const JumpTable *JT) const;
SymTargetsType findCallTargetSymbols(BinaryContext &BC,
std::vector<Callsite> &Targets,
size_t &N,
BinaryFunction &Function,
BinaryBasicBlock *BB,
SymTargetsType findCallTargetSymbols(std::vector<Callsite> &Targets,
size_t &N, BinaryBasicBlock &BB,
MCInst &Inst,
MCInst *&TargetFetchInst) const;
MethodInfoType maybeGetVtableSyms(BinaryContext &BC,
BinaryFunction &Function,
BinaryBasicBlock *BB,
MCInst &Inst,
MethodInfoType maybeGetVtableSyms(BinaryBasicBlock &BB, MCInst &Inst,
const SymTargetsType &SymTargets) const;
std::vector<std::unique_ptr<BinaryBasicBlock>>
rewriteCall(BinaryContext &BC,
BinaryFunction &Function,
BinaryBasicBlock *IndCallBlock,
const MCInst &CallInst,
rewriteCall(BinaryBasicBlock &IndCallBlock, const MCInst &CallInst,
MCPlusBuilder::BlocksVectorTy &&ICPcode,
const std::vector<MCInst *> &MethodFetchInsns) const;
BinaryBasicBlock *fixCFG(BinaryContext &BC,
BinaryFunction &Function,
BinaryBasicBlock *IndCallBlock,
const bool IsTailCall,
const bool IsJumpTable,
BinaryBasicBlock *fixCFG(BinaryBasicBlock &IndCallBlock,
const bool IsTailCall, const bool IsJumpTable,
BasicBlocksVector &&NewBBs,
const std::vector<Callsite> &Targets) const;
public:
public:
explicit IndirectCallPromotion(const cl::opt<bool> &PrintPass)
: BinaryFunctionPass(PrintPass) { }

View File

@ -34,8 +34,7 @@ public:
const char *getName() const override { return "instrumentation"; }
private:
void instrumentFunction(BinaryContext &BC, BinaryFunction &Function,
void instrumentFunction(BinaryFunction &Function,
MCPlusBuilder::AllocatorIdTy = 0);
/// Retrieve the string table index for the name of \p Function. We encode
@ -90,9 +89,9 @@ private:
FunctionDescription *FuncDesc,
uint32_t FromNodeID, uint32_t ToNodeID = 0);
void instrumentLeafNode(BinaryContext &BC, BinaryBasicBlock &BB,
BinaryBasicBlock::iterator Iter, bool IsLeaf,
FunctionDescription &FuncDesc, uint32_t Node);
void instrumentLeafNode(BinaryBasicBlock &BB, BinaryBasicBlock::iterator Iter,
bool IsLeaf, FunctionDescription &FuncDesc,
uint32_t Node);
void instrumentIndirectTarget(BinaryBasicBlock &BB,
BinaryBasicBlock::iterator &Iter,

View File

@ -39,8 +39,7 @@ class JTFootprintReduction : public BinaryFunctionPass {
/// Check if \p Function presents jump tables where all jump locations can
/// be safely changed to use a different code sequence. If this is true, we
/// will be able to emit the whole table with a smaller entry size.
void checkOpportunities(BinaryContext &BC, BinaryFunction &Function,
DataflowInfoManager &Info);
void checkOpportunities(BinaryFunction &Function, DataflowInfoManager &Info);
/// The Non-PIC jump table optimization consists of reducing the jump table
/// entry size from 8 to 4 bytes. For that, we need to change the jump code
@ -60,8 +59,7 @@ class JTFootprintReduction : public BinaryFunctionPass {
DataflowInfoManager &Info);
/// Run a pass for \p Function
void optimizeFunction(BinaryContext &BC, BinaryFunction &Function,
DataflowInfoManager &Info);
void optimizeFunction(BinaryFunction &Function, DataflowInfoManager &Info);
public:
explicit JTFootprintReduction(const cl::opt<bool> &PrintPass)

View File

@ -35,9 +35,10 @@ class LivenessAnalysis
RegStatePrinter>;
public:
LivenessAnalysis(const RegAnalysis &RA, const BinaryContext &BC,
BinaryFunction &BF, MCPlusBuilder::AllocatorIdTy AllocId)
: Parent(BC, BF, AllocId), RA(RA), NumRegs(BC.MRI->getNumRegs()) {}
LivenessAnalysis(const RegAnalysis &RA, BinaryFunction &BF,
MCPlusBuilder::AllocatorIdTy AllocId)
: Parent(BF, AllocId), RA(RA),
NumRegs(BF.getBinaryContext().MRI->getNumRegs()) {}
virtual ~LivenessAnalysis();
bool isAlive(ProgramPoint PP, MCPhysReg Reg) const {

View File

@ -34,10 +34,10 @@ class ReachingDefOrUse
friend class DataflowAnalysis<ReachingDefOrUse<Def>, BitVector, !Def>;
public:
ReachingDefOrUse(const RegAnalysis &RA, const BinaryContext &BC,
BinaryFunction &BF, Optional<MCPhysReg> TrackingReg = None,
ReachingDefOrUse(const RegAnalysis &RA, BinaryFunction &BF,
Optional<MCPhysReg> TrackingReg = None,
MCPlusBuilder::AllocatorIdTy AllocId = 0)
: InstrsDataflowAnalysis<ReachingDefOrUse<Def>, !Def>(BC, BF, AllocId),
: InstrsDataflowAnalysis<ReachingDefOrUse<Def>, !Def>(BF, AllocId),
RA(RA), TrackingReg(TrackingReg) {}
virtual ~ReachingDefOrUse() {}

View File

@ -28,9 +28,8 @@ class ReachingInsns
friend class DataflowAnalysis<ReachingInsns<Backward>, BitVector, Backward>;
public:
ReachingInsns(const BinaryContext &BC, BinaryFunction &BF,
MCPlusBuilder::AllocatorIdTy AllocId = 0)
: InstrsDataflowAnalysis<ReachingInsns, Backward>(BC, BF, AllocId) {}
ReachingInsns(BinaryFunction &BF, MCPlusBuilder::AllocatorIdTy AllocId = 0)
: InstrsDataflowAnalysis<ReachingInsns, Backward>(BF, AllocId) {}
virtual ~ReachingInsns() {}
bool isInLoop(const BinaryBasicBlock &BB) {

View File

@ -34,12 +34,10 @@ class RegReAssign : public BinaryFunctionPass {
int64_t StaticBytesSaved{0};
int64_t DynBytesSaved{0};
void swap(BinaryContext &BC, BinaryFunction &Function, MCPhysReg A,
MCPhysReg B);
void rankRegisters(BinaryContext &BC, BinaryFunction &Function);
void aggressivePassOverFunction(BinaryContext &BC, BinaryFunction &Function);
bool conservativePassOverFunction(BinaryContext &BC,
BinaryFunction &Function);
void swap(BinaryFunction &Function, MCPhysReg A, MCPhysReg B);
void rankRegisters(BinaryFunction &Function);
void aggressivePassOverFunction(BinaryFunction &Function);
bool conservativePassOverFunction(BinaryFunction &Function);
void setupAggressivePass(BinaryContext &BC,
std::map<uint64_t, BinaryFunction> &BFs);
void setupConservativePass(BinaryContext &BC,

View File

@ -61,11 +61,11 @@ public:
std::vector<const FrameIndexEntry*> SaveFIEByReg;
std::vector<const FrameIndexEntry*> LoadFIEByReg;
CalleeSavedAnalysis(const FrameAnalysis &FA, const BinaryContext &BC,
BinaryFunction &BF, DataflowInfoManager &Info,
CalleeSavedAnalysis(const FrameAnalysis &FA, BinaryFunction &BF,
DataflowInfoManager &Info,
MCPlusBuilder::AllocatorIdTy AllocId)
: FA(FA), BC(BC), BF(BF), Info(Info), AllocatorId(AllocId),
CalleeSaved(BC.MRI->getNumRegs(), false),
: FA(FA), BC(BF.getBinaryContext()), BF(BF), Info(Info),
AllocatorId(AllocId), CalleeSaved(BC.MRI->getNumRegs(), false),
OffsetsByReg(BC.MRI->getNumRegs(), 0LL),
HasRestores(BC.MRI->getNumRegs(), false),
SavingCost(BC.MRI->getNumRegs(), 0ULL),
@ -221,10 +221,11 @@ private:
}
public:
StackLayoutModifier(const FrameAnalysis &FA, const BinaryContext &BC,
BinaryFunction &BF, DataflowInfoManager &Info,
StackLayoutModifier(const FrameAnalysis &FA, BinaryFunction &BF,
DataflowInfoManager &Info,
MCPlusBuilder::AllocatorIdTy AllocId)
: FA(FA), BC(BC), BF(BF), Info(Info), AllocatorId(AllocId) {}
: FA(FA), BC(BF.getBinaryContext()), BF(BF), Info(Info),
AllocatorId(AllocId) {}
~StackLayoutModifier() {
for (BinaryBasicBlock &BB : BF) {
@ -516,11 +517,12 @@ private:
void processDeletions();
public:
ShrinkWrapping(const FrameAnalysis &FA, const BinaryContext &BC,
BinaryFunction &BF, DataflowInfoManager &Info,
ShrinkWrapping(const FrameAnalysis &FA, BinaryFunction &BF,
DataflowInfoManager &Info,
MCPlusBuilder::AllocatorIdTy AllocId)
: FA(FA), BC(BC), BF(BF), Info(Info), AllocatorId(AllocId),
SLM(FA, BC, BF, Info, AllocId), CSA(FA, BC, BF, Info, AllocId) {}
: FA(FA), BC(BF.getBinaryContext()), BF(BF), Info(Info),
AllocatorId(AllocId), SLM(FA, BF, Info, AllocId),
CSA(FA, BF, Info, AllocId) {}
~ShrinkWrapping() {
for (BinaryBasicBlock &BB : BF) {

View File

@ -32,10 +32,9 @@ class StackAllocationAnalysis
StackPointerTracking &SPT;
public:
StackAllocationAnalysis(const BinaryContext &BC, BinaryFunction &BF,
StackPointerTracking &SPT,
StackAllocationAnalysis(BinaryFunction &BF, StackPointerTracking &SPT,
MCPlusBuilder::AllocatorIdTy AllocId)
: InstrsDataflowAnalysis<StackAllocationAnalysis, false>(BC, BF, AllocId),
: InstrsDataflowAnalysis<StackAllocationAnalysis, false>(BF, AllocId),
SPT(SPT) {}
virtual ~StackAllocationAnalysis() {}

View File

@ -30,7 +30,7 @@ class StackAvailableExpressions
public:
StackAvailableExpressions(const RegAnalysis &RA, const FrameAnalysis &FA,
const BinaryContext &BC, BinaryFunction &BF);
BinaryFunction &BF);
virtual ~StackAvailableExpressions() {}
void run() {

View File

@ -181,9 +181,9 @@ protected:
}
public:
StackPointerTrackingBase(const BinaryContext &BC, BinaryFunction &BF,
StackPointerTrackingBase(BinaryFunction &BF,
MCPlusBuilder::AllocatorIdTy AllocatorId = 0)
: DataflowAnalysis<Derived, std::pair<int, int>>(BC, BF, AllocatorId) {}
: DataflowAnalysis<Derived, std::pair<int, int>>(BF, AllocatorId) {}
virtual ~StackPointerTrackingBase() {}
@ -198,7 +198,7 @@ class StackPointerTracking
friend class DataflowAnalysis<StackPointerTracking, std::pair<int, int>>;
public:
StackPointerTracking(const BinaryContext &BC, BinaryFunction &BF,
StackPointerTracking(BinaryFunction &BF,
MCPlusBuilder::AllocatorIdTy AllocatorId = 0);
virtual ~StackPointerTracking() {}

View File

@ -29,10 +29,9 @@ class StackReachingUses
friend class DataflowAnalysis<StackReachingUses, BitVector, true>;
public:
StackReachingUses(const FrameAnalysis &FA, const BinaryContext &BC,
BinaryFunction &BF,
StackReachingUses(const FrameAnalysis &FA, BinaryFunction &BF,
MCPlusBuilder::AllocatorIdTy AllocId = 0)
: InstrsDataflowAnalysis(BC, BF, AllocId), FA(FA) {}
: InstrsDataflowAnalysis(BF, AllocId), FA(FA) {}
virtual ~StackReachingUses() {}
/// Return true if the stack position written by the store in \p StoreFIE was

View File

@ -130,13 +130,11 @@ public:
return "stoke-get-stat";
}
void checkInstr(const BinaryContext &BC, const BinaryFunction &BF,
StokeFuncInfo &FuncInfo);
void checkInstr(const BinaryFunction &BF, StokeFuncInfo &FuncInfo);
/// Get all required information for the stoke optimization
bool checkFunction(const BinaryContext &BC, BinaryFunction &BF,
DataflowInfoManager &DInfo, RegAnalysis &RA,
StokeFuncInfo &FuncInfo);
bool checkFunction(BinaryFunction &BF, DataflowInfoManager &DInfo,
RegAnalysis &RA, StokeFuncInfo &FuncInfo);
void runOnFunctions(BinaryContext &BC) override;
};

View File

@ -26,7 +26,8 @@ static cl::opt<bool>
namespace llvm {
namespace bolt {
void ADRRelaxationPass::runOnFunction(BinaryContext &BC, BinaryFunction &BF) {
void ADRRelaxationPass::runOnFunction(BinaryFunction &BF) {
BinaryContext &BC = BF.getBinaryContext();
for (BinaryBasicBlock *BB : BF.layout()) {
for (auto It = BB->begin(); It != BB->end(); ++It) {
MCInst &Inst = *It;
@ -62,7 +63,7 @@ void ADRRelaxationPass::runOnFunctions(BinaryContext &BC) {
return;
ParallelUtilities::WorkFuncTy WorkFun = [&](BinaryFunction &BF) {
runOnFunction(BC, BF);
runOnFunction(BF);
};
ParallelUtilities::runOnEachFunction(

View File

@ -66,8 +66,8 @@ void runForAllWeCare(std::map<uint64_t, BinaryFunction> &BFs,
} // end anonymous namespace
void AllocCombinerPass::combineAdjustments(BinaryContext &BC,
BinaryFunction &BF) {
void AllocCombinerPass::combineAdjustments(BinaryFunction &BF) {
BinaryContext &BC = BF.getBinaryContext();
for (BinaryBasicBlock &BB : BF) {
MCInst *Prev = nullptr;
for (auto I = BB.rbegin(), E = BB.rend(); I != E; ++I) {
@ -112,9 +112,9 @@ void AllocCombinerPass::runOnFunctions(BinaryContext &BC) {
if (opts::FrameOptimization == FOP_NONE)
return;
runForAllWeCare(
BC.getBinaryFunctions(),
[&](BinaryFunction &Function) { combineAdjustments(BC, Function); });
runForAllWeCare(BC.getBinaryFunctions(), [&](BinaryFunction &Function) {
combineAdjustments(Function);
});
outs() << "BOLT-INFO: Allocation combiner: " << NumCombined
<< " empty spaces coalesced.\n";

View File

@ -588,11 +588,11 @@ namespace {
// B0: ...
// jmp B2 (or jcc B2)
//
uint64_t fixDoubleJumps(BinaryContext &BC,
BinaryFunction &Function,
bool MarkInvalid) {
uint64_t fixDoubleJumps(BinaryFunction &Function, bool MarkInvalid) {
uint64_t NumDoubleJumps = 0;
MCContext *Ctx = Function.getBinaryContext().Ctx.get();
MCPlusBuilder *MIB = Function.getBinaryContext().MIB.get();
for (BinaryBasicBlock &BB : Function) {
auto checkAndPatch = [&](BinaryBasicBlock *Pred,
BinaryBasicBlock *Succ,
@ -616,15 +616,13 @@ uint64_t fixDoubleJumps(BinaryContext &BC,
// We must patch up any existing branch instructions to match up
// with the new successor.
MCContext *Ctx = BC.Ctx.get();
assert((CondBranch || (!CondBranch && Pred->succ_size() == 1)) &&
"Predecessor block has inconsistent number of successors");
if (CondBranch &&
BC.MIB->getTargetSymbol(*CondBranch) == BB.getLabel()) {
BC.MIB->replaceBranchTarget(*CondBranch, Succ->getLabel(), Ctx);
if (CondBranch && MIB->getTargetSymbol(*CondBranch) == BB.getLabel()) {
MIB->replaceBranchTarget(*CondBranch, Succ->getLabel(), Ctx);
} else if (UncondBranch &&
BC.MIB->getTargetSymbol(*UncondBranch) == BB.getLabel()) {
BC.MIB->replaceBranchTarget(*UncondBranch, Succ->getLabel(), Ctx);
MIB->getTargetSymbol(*UncondBranch) == BB.getLabel()) {
MIB->replaceBranchTarget(*UncondBranch, Succ->getLabel(), Ctx);
} else if (!UncondBranch) {
assert(Function.getBasicBlockAfter(Pred, false) != Succ &&
"Don't add an explicit jump to a fallthrough block.");
@ -634,8 +632,8 @@ uint64_t fixDoubleJumps(BinaryContext &BC,
// Succ will be null in the tail call case. In this case we
// need to explicitly add a tail call instruction.
MCInst *Branch = Pred->getLastNonPseudoInstr();
if (Branch && BC.MIB->isUnconditionalBranch(*Branch)) {
assert(BC.MIB->getTargetSymbol(*Branch) == BB.getLabel());
if (Branch && MIB->isUnconditionalBranch(*Branch)) {
assert(MIB->getTargetSymbol(*Branch) == BB.getLabel());
Pred->removeSuccessor(&BB);
Pred->eraseInstruction(Pred->findInstruction(Branch));
Pred->addTailCallInstruction(SuccSym);
@ -657,16 +655,16 @@ uint64_t fixDoubleJumps(BinaryContext &BC,
continue;
MCInst *Inst = BB.getFirstNonPseudoInstr();
const bool IsTailCall = BC.MIB->isTailCall(*Inst);
const bool IsTailCall = MIB->isTailCall(*Inst);
if (!BC.MIB->isUnconditionalBranch(*Inst) && !IsTailCall)
if (!MIB->isUnconditionalBranch(*Inst) && !IsTailCall)
continue;
// If we operate after SCTC make sure it's not a conditional tail call.
if (IsTailCall && BC.MIB->isConditionalBranch(*Inst))
if (IsTailCall && MIB->isConditionalBranch(*Inst))
continue;
const MCSymbol *SuccSym = BC.MIB->getTargetSymbol(*Inst);
const MCSymbol *SuccSym = MIB->getTargetSymbol(*Inst);
BinaryBasicBlock *Succ = BB.getSuccessor();
if (((!Succ || &BB == Succ) && !IsTailCall) || (IsTailCall && !SuccSym))
@ -692,7 +690,6 @@ uint64_t fixDoubleJumps(BinaryContext &BC,
return NumDoubleJumps;
}
}
bool SimplifyConditionalTailCalls::shouldRewriteBranch(
@ -736,13 +733,13 @@ bool SimplifyConditionalTailCalls::shouldRewriteBranch(
return Result == DirectionFlag;
}
uint64_t SimplifyConditionalTailCalls::fixTailCalls(BinaryContext &BC,
BinaryFunction &BF) {
uint64_t SimplifyConditionalTailCalls::fixTailCalls(BinaryFunction &BF) {
// Need updated indices to correctly detect branch' direction.
BF.updateLayoutIndices();
BF.markUnreachableBlocks();
auto &MIB = BC.MIB;
MCPlusBuilder *MIB = BF.getBinaryContext().MIB.get();
MCContext *Ctx = BF.getBinaryContext().Ctx.get();
uint64_t NumLocalCTCCandidates = 0;
uint64_t NumLocalCTCs = 0;
uint64_t LocalCTCTakenCount = 0;
@ -763,7 +760,7 @@ uint64_t SimplifyConditionalTailCalls::fixTailCalls(BinaryContext &BC,
continue;
MCInst *Instr = BB->getFirstNonPseudoInstr();
if (!MIB->isTailCall(*Instr) || BC.MIB->isConditionalBranch(*Instr))
if (!MIB->isTailCall(*Instr) || MIB->isConditionalBranch(*Instr))
continue;
const MCSymbol *CalleeSymbol = MIB->getTargetSymbol(*Instr);
@ -819,7 +816,7 @@ uint64_t SimplifyConditionalTailCalls::fixTailCalls(BinaryContext &BC,
uint64_t Count = 0;
if (CondSucc != BB) {
// Patch the new target address into the conditional branch.
MIB->reverseBranchCondition(*CondBranch, CalleeSymbol, BC.Ctx.get());
MIB->reverseBranchCondition(*CondBranch, CalleeSymbol, Ctx);
// Since we reversed the condition on the branch we need to change
// the target for the unconditional branch or add a unconditional
// branch to the old target. This has to be done manually since
@ -828,7 +825,7 @@ uint64_t SimplifyConditionalTailCalls::fixTailCalls(BinaryContext &BC,
Count = PredBB->getFallthroughBranchInfo().Count;
} else {
// Change destination of the conditional branch.
MIB->replaceBranchTarget(*CondBranch, CalleeSymbol, BC.Ctx.get());
MIB->replaceBranchTarget(*CondBranch, CalleeSymbol, Ctx);
Count = PredBB->getTakenBranchInfo().Count;
}
const uint64_t CTCTakenFreq =
@ -838,8 +835,8 @@ uint64_t SimplifyConditionalTailCalls::fixTailCalls(BinaryContext &BC,
MIB->setConditionalTailCall(*CondBranch);
// Add info abount the conditional tail call frequency, otherwise this
// info will be lost when we delete the associated BranchInfo entry
auto &CTCAnnotation = BC.MIB->getOrCreateAnnotationAs<uint64_t>(
*CondBranch, "CTCTakenCount");
auto &CTCAnnotation =
MIB->getOrCreateAnnotationAs<uint64_t>(*CondBranch, "CTCTakenCount");
CTCAnnotation = CTCTakenFreq;
// Remove the unused successor which may be eliminated later
@ -890,18 +887,16 @@ uint64_t SimplifyConditionalTailCalls::fixTailCalls(BinaryContext &BC,
if (HasFallthrough)
PredBB->eraseInstruction(PredBB->findInstruction(UncondBranch));
else
MIB->replaceBranchTarget(*UncondBranch,
CondSucc->getLabel(),
BC.Ctx.get());
MIB->replaceBranchTarget(*UncondBranch, CondSucc->getLabel(), Ctx);
} else if (!HasFallthrough) {
MCInst Branch;
MIB->createUncondBranch(Branch, CondSucc->getLabel(), BC.Ctx.get());
MIB->createUncondBranch(Branch, CondSucc->getLabel(), Ctx);
PredBB->addInstruction(Branch);
}
}
if (NumLocalCTCs > 0) {
NumDoubleJumps += fixDoubleJumps(BC, BF, true);
NumDoubleJumps += fixDoubleJumps(BF, true);
// Clean-up unreachable tail-call blocks.
const std::pair<unsigned, uint64_t> Stats = BF.eraseInvalidBBs();
DeletedBlocks += Stats.first;
@ -935,7 +930,7 @@ void SimplifyConditionalTailCalls::runOnFunctions(BinaryContext &BC) {
if (!shouldOptimize(Function))
continue;
if (fixTailCalls(BC, Function)) {
if (fixTailCalls(Function)) {
Modified.insert(&Function);
Function.setHasCanonicalCFG(false);
}
@ -977,13 +972,13 @@ uint64_t Peepholes::shortenInstructions(BinaryContext &BC,
return Count;
}
void Peepholes::addTailcallTraps(BinaryContext &BC,
BinaryFunction &Function) {
void Peepholes::addTailcallTraps(BinaryFunction &Function) {
MCPlusBuilder *MIB = Function.getBinaryContext().MIB.get();
for (BinaryBasicBlock &BB : Function) {
MCInst *Inst = BB.getLastNonPseudoInstr();
if (Inst && BC.MIB->isTailCall(*Inst) && BC.MIB->isIndirectBranch(*Inst)) {
if (Inst && MIB->isTailCall(*Inst) && MIB->isIndirectBranch(*Inst)) {
MCInst Trap;
if (BC.MIB->createTrap(Trap)) {
if (MIB->createTrap(Trap)) {
BB.addInstruction(Trap);
++TailCallTraps;
}
@ -991,8 +986,7 @@ void Peepholes::addTailcallTraps(BinaryContext &BC,
}
}
void Peepholes::removeUselessCondBranches(BinaryContext &BC,
BinaryFunction &Function) {
void Peepholes::removeUselessCondBranches(BinaryFunction &Function) {
for (BinaryBasicBlock &BB : Function) {
if (BB.succ_size() != 2)
continue;
@ -1035,11 +1029,11 @@ void Peepholes::runOnFunctions(BinaryContext &BC) {
if (Opts & opts::PEEP_SHORTEN)
NumShortened += shortenInstructions(BC, Function);
if (Opts & opts::PEEP_DOUBLE_JUMPS)
NumDoubleJumps += fixDoubleJumps(BC, Function, false);
NumDoubleJumps += fixDoubleJumps(Function, false);
if (Opts & opts::PEEP_TAILCALL_TRAPS)
addTailcallTraps(BC, Function);
addTailcallTraps(Function);
if (Opts & opts::PEEP_USELESS_BRANCHES)
removeUselessCondBranches(BC, Function);
removeUselessCondBranches(Function);
assert(Function.validateCFG());
}
}
@ -1053,9 +1047,9 @@ void Peepholes::runOnFunctions(BinaryContext &BC) {
<< " useless conditional branches removed.\n";
}
bool SimplifyRODataLoads::simplifyRODataLoads(
BinaryContext &BC, BinaryFunction &BF) {
auto &MIB = BC.MIB;
bool SimplifyRODataLoads::simplifyRODataLoads(BinaryFunction &BF) {
BinaryContext &BC = BF.getBinaryContext();
MCPlusBuilder *MIB = BC.MIB.get();
uint64_t NumLocalLoadsSimplified = 0;
uint64_t NumDynamicLocalLoadsSimplified = 0;
@ -1086,7 +1080,7 @@ bool SimplifyRODataLoads::simplifyRODataLoads(
uint64_t DisplOffset;
std::tie(DisplSymbol, DisplOffset) =
BC.MIB->getTargetSymbolInfo(DispOpI->getExpr());
MIB->getTargetSymbolInfo(DispOpI->getExpr());
if (!DisplSymbol)
continue;
@ -1138,7 +1132,7 @@ bool SimplifyRODataLoads::simplifyRODataLoads(
void SimplifyRODataLoads::runOnFunctions(BinaryContext &BC) {
for (auto &It : BC.getBinaryFunctions()) {
BinaryFunction &Function = It.second;
if (shouldOptimize(Function) && simplifyRODataLoads(BC, Function)) {
if (shouldOptimize(Function) && simplifyRODataLoads(Function)) {
Modified.insert(&Function);
}
}

View File

@ -42,8 +42,9 @@ raw_ostream &operator<<(raw_ostream &OS, const BitVector &State) {
namespace bolt {
void doForAllPreds(const BinaryContext &BC, const BinaryBasicBlock &BB,
void doForAllPreds(const BinaryBasicBlock &BB,
std::function<void(ProgramPoint)> Task) {
MCPlusBuilder *MIB = BB.getFunction()->getBinaryContext().MIB.get();
for (BinaryBasicBlock *Pred : BB.predecessors()) {
if (Pred->isValid())
Task(ProgramPoint::getLastPointAt(*Pred));
@ -52,9 +53,9 @@ void doForAllPreds(const BinaryContext &BC, const BinaryBasicBlock &BB,
return;
for (BinaryBasicBlock *Thrower : BB.throwers()) {
for (MCInst &Inst : *Thrower) {
if (!BC.MIB->isInvoke(Inst))
if (!MIB->isInvoke(Inst))
continue;
const Optional<MCPlus::MCLandingPad> EHInfo = BC.MIB->getEHInfo(Inst);
const Optional<MCPlus::MCLandingPad> EHInfo = MIB->getEHInfo(Inst);
if (!EHInfo || EHInfo->first != BB.getLabel())
continue;
Task(ProgramPoint(&Inst));

View File

@ -17,7 +17,7 @@ ReachingDefOrUse</*Def=*/true> &DataflowInfoManager::getReachingDefs() {
if (RD)
return *RD;
assert(RA && "RegAnalysis required");
RD.reset(new ReachingDefOrUse<true>(*RA, BC, BF, None, AllocatorId));
RD.reset(new ReachingDefOrUse<true>(*RA, BF, None, AllocatorId));
RD->run();
return *RD;
}
@ -30,7 +30,7 @@ ReachingDefOrUse</*Def=*/false> &DataflowInfoManager::getReachingUses() {
if (RU)
return *RU;
assert(RA && "RegAnalysis required");
RU.reset(new ReachingDefOrUse<false>(*RA, BC, BF, None, AllocatorId));
RU.reset(new ReachingDefOrUse<false>(*RA, BF, None, AllocatorId));
RU->run();
return *RU;
}
@ -43,7 +43,7 @@ LivenessAnalysis &DataflowInfoManager::getLivenessAnalysis() {
if (LA)
return *LA;
assert(RA && "RegAnalysis required");
LA.reset(new LivenessAnalysis(*RA, BC, BF, AllocatorId));
LA.reset(new LivenessAnalysis(*RA, BF, AllocatorId));
LA->run();
return *LA;
}
@ -56,7 +56,7 @@ StackReachingUses &DataflowInfoManager::getStackReachingUses() {
if (SRU)
return *SRU;
assert(FA && "FrameAnalysis required");
SRU.reset(new StackReachingUses(*FA, BC, BF, AllocatorId));
SRU.reset(new StackReachingUses(*FA, BF, AllocatorId));
SRU->run();
return *SRU;
}
@ -68,7 +68,7 @@ void DataflowInfoManager::invalidateStackReachingUses() {
DominatorAnalysis<false> &DataflowInfoManager::getDominatorAnalysis() {
if (DA)
return *DA;
DA.reset(new DominatorAnalysis<false>(BC, BF, AllocatorId));
DA.reset(new DominatorAnalysis<false>(BF, AllocatorId));
DA->run();
return *DA;
}
@ -80,7 +80,7 @@ void DataflowInfoManager::invalidateDominatorAnalysis() {
DominatorAnalysis<true> &DataflowInfoManager::getPostDominatorAnalysis() {
if (PDA)
return *PDA;
PDA.reset(new DominatorAnalysis<true>(BC, BF, AllocatorId));
PDA.reset(new DominatorAnalysis<true>(BF, AllocatorId));
PDA->run();
return *PDA;
}
@ -92,7 +92,7 @@ void DataflowInfoManager::invalidatePostDominatorAnalysis() {
StackPointerTracking &DataflowInfoManager::getStackPointerTracking() {
if (SPT)
return *SPT;
SPT.reset(new StackPointerTracking(BC, BF, AllocatorId));
SPT.reset(new StackPointerTracking(BF, AllocatorId));
SPT->run();
return *SPT;
}
@ -105,7 +105,7 @@ void DataflowInfoManager::invalidateStackPointerTracking() {
ReachingInsns<false> &DataflowInfoManager::getReachingInsns() {
if (RI)
return *RI;
RI.reset(new ReachingInsns<false>(BC, BF, AllocatorId));
RI.reset(new ReachingInsns<false>(BF, AllocatorId));
RI->run();
return *RI;
}
@ -117,7 +117,7 @@ void DataflowInfoManager::invalidateReachingInsns() {
ReachingInsns<true> &DataflowInfoManager::getReachingInsnsBackwards() {
if (RIB)
return *RIB;
RIB.reset(new ReachingInsns<true>(BC, BF, AllocatorId));
RIB.reset(new ReachingInsns<true>(BF, AllocatorId));
RIB->run();
return *RIB;
}
@ -129,8 +129,8 @@ void DataflowInfoManager::invalidateReachingInsnsBackwards() {
StackAllocationAnalysis &DataflowInfoManager::getStackAllocationAnalysis() {
if (SAA)
return *SAA;
SAA.reset(new StackAllocationAnalysis(BC, BF, getStackPointerTracking(),
AllocatorId));
SAA.reset(
new StackAllocationAnalysis(BF, getStackPointerTracking(), AllocatorId));
SAA->run();
return *SAA;
}

View File

@ -165,9 +165,8 @@ class FrameAccessAnalysis {
}
public:
FrameAccessAnalysis(const BinaryContext &BC, BinaryFunction &BF,
StackPointerTracking &SPT)
: SPT(SPT), BC(BC), BF(BF) {}
FrameAccessAnalysis(BinaryFunction &BF, StackPointerTracking &SPT)
: SPT(SPT), BC(BF.getBinaryContext()), BF(BF) {}
void enterNewBB() { Prev = nullptr; }
const FrameIndexEntry &getFIE() const { return FIE; }
@ -406,7 +405,7 @@ bool FrameAnalysis::computeArgsAccessed(BinaryFunction &BF) {
<< "\n");
bool UpdatedArgsTouched = false;
bool NoInfo = false;
FrameAccessAnalysis FAA(BC, BF, getSPT(BF));
FrameAccessAnalysis FAA(BF, getSPT(BF));
for (BinaryBasicBlock *BB : BF.layout()) {
FAA.enterNewBB();
@ -465,7 +464,7 @@ bool FrameAnalysis::computeArgsAccessed(BinaryFunction &BF) {
}
bool FrameAnalysis::restoreFrameIndex(BinaryFunction &BF) {
FrameAccessAnalysis FAA(BC, BF, getSPT(BF));
FrameAccessAnalysis FAA(BF, getSPT(BF));
LLVM_DEBUG(dbgs() << "Restoring frame indices for \"" << BF.getPrintName()
<< "\"\n");
@ -630,7 +629,7 @@ void FrameAnalysis::preComputeSPT() {
[&](BinaryFunction &BF, MCPlusBuilder::AllocatorIdTy AllocId) {
std::unique_ptr<StackPointerTracking> &SPTPtr =
SPTMap.find(&BF)->second;
SPTPtr = std::make_unique<StackPointerTracking>(BC, BF, AllocId);
SPTPtr = std::make_unique<StackPointerTracking>(BF, AllocId);
SPTPtr->run();
};

View File

@ -56,15 +56,15 @@ namespace bolt {
void FrameOptimizerPass::removeUnnecessaryLoads(const RegAnalysis &RA,
const FrameAnalysis &FA,
const BinaryContext &BC,
BinaryFunction &BF) {
StackAvailableExpressions SAE(RA, FA, BC, BF);
StackAvailableExpressions SAE(RA, FA, BF);
SAE.run();
LLVM_DEBUG(dbgs() << "Performing unnecessary loads removal\n");
std::deque<std::pair<BinaryBasicBlock *, MCInst *>> ToErase;
bool Changed = false;
const auto ExprEnd = SAE.expr_end();
MCPlusBuilder *MIB = BF.getBinaryContext().MIB.get();
for (BinaryBasicBlock &BB : BF) {
LLVM_DEBUG(dbgs() <<"\tNow at BB " << BB.getName() << "\n");
const MCInst *Prev = nullptr;
@ -105,7 +105,7 @@ void FrameOptimizerPass::removeUnnecessaryLoads(const RegAnalysis &RA,
if (FIEX->StackOffset != FIEY->StackOffset || FIEX->Size != FIEY->Size)
continue;
// TODO: Change push/pops to stack adjustment instruction
if (BC.MIB->isPop(Inst))
if (MIB->isPop(Inst))
continue;
++NumRedundantLoads;
@ -117,14 +117,14 @@ void FrameOptimizerPass::removeUnnecessaryLoads(const RegAnalysis &RA,
LLVM_DEBUG(dbgs() << "@BB: " << BB.getName() << "\n");
// Replace load
if (FIEY->IsStoreFromReg) {
if (!BC.MIB->replaceMemOperandWithReg(Inst, FIEY->RegOrImm)) {
if (!MIB->replaceMemOperandWithReg(Inst, FIEY->RegOrImm)) {
LLVM_DEBUG(dbgs() << "FAILED to change operand to a reg\n");
break;
}
++NumLoadsChangedToReg;
BC.MIB->removeAnnotation(Inst, "FrameAccessEntry");
MIB->removeAnnotation(Inst, "FrameAccessEntry");
LLVM_DEBUG(dbgs() << "Changed operand to a reg\n");
if (BC.MIB->isRedundantMove(Inst)) {
if (MIB->isRedundantMove(Inst)) {
++NumLoadsDeleted;
LLVM_DEBUG(dbgs() << "Created a redundant move\n");
// Delete it!
@ -134,11 +134,11 @@ void FrameOptimizerPass::removeUnnecessaryLoads(const RegAnalysis &RA,
char Buf[8] = {0, 0, 0, 0, 0, 0, 0, 0};
support::ulittle64_t::ref(Buf + 0) = FIEY->RegOrImm;
LLVM_DEBUG(dbgs() << "Changing operand to an imm... ");
if (!BC.MIB->replaceMemOperandWithImm(Inst, StringRef(Buf, 8), 0)) {
if (!MIB->replaceMemOperandWithImm(Inst, StringRef(Buf, 8), 0)) {
LLVM_DEBUG(dbgs() << "FAILED\n");
} else {
++NumLoadsChangedToImm;
BC.MIB->removeAnnotation(Inst, "FrameAccessEntry");
MIB->removeAnnotation(Inst, "FrameAccessEntry");
LLVM_DEBUG(dbgs() << "Ok\n");
}
}
@ -160,9 +160,8 @@ void FrameOptimizerPass::removeUnnecessaryLoads(const RegAnalysis &RA,
}
void FrameOptimizerPass::removeUnusedStores(const FrameAnalysis &FA,
const BinaryContext &BC,
BinaryFunction &BF) {
StackReachingUses SRU(FA, BC, BF);
StackReachingUses SRU(FA, BF);
SRU.run();
LLVM_DEBUG(dbgs() << "Performing unused stores removal\n");
@ -198,7 +197,7 @@ void FrameOptimizerPass::removeUnusedStores(const FrameAnalysis &FA,
continue;
}
// TODO: Change push/pops to stack adjustment instruction
if (BC.MIB->isPush(Inst))
if (BF.getBinaryContext().MIB->isPush(Inst))
continue;
++NumRedundantStores;
@ -268,13 +267,13 @@ void FrameOptimizerPass::runOnFunctions(BinaryContext &BC) {
{
NamedRegionTimer T1("removeloads", "remove loads", "FOP", "FOP breakdown",
opts::TimeOpts);
removeUnnecessaryLoads(*RA, *FA, BC, I.second);
removeUnnecessaryLoads(*RA, *FA, I.second);
}
if (opts::RemoveStores) {
NamedRegionTimer T1("removestores", "remove stores", "FOP",
"FOP breakdown", opts::TimeOpts);
removeUnusedStores(*FA, BC, I.second);
removeUnusedStores(*FA, I.second);
}
// Don't even start shrink wrapping if no profiling info is available
if (I.second.getKnownExecutionCount() == 0)
@ -344,8 +343,8 @@ void FrameOptimizerPass::performShrinkWrapping(const RegAnalysis &RA,
ParallelUtilities::WorkFuncWithAllocTy WorkFunction =
[&](BinaryFunction &BF, MCPlusBuilder::AllocatorIdTy AllocatorId) {
DataflowInfoManager Info(BC, BF, &RA, &FA, AllocatorId);
ShrinkWrapping SW(FA, BC, BF, Info, AllocatorId);
DataflowInfoManager Info(BF, &RA, &FA, AllocatorId);
ShrinkWrapping SW(FA, BF, Info, AllocatorId);
if (SW.perform()) {
std::lock_guard<std::mutex> Lock(FuncsChangedMutex);

View File

@ -239,10 +239,8 @@ void IndirectCallPromotion::printDecision(
// Get list of targets for a given call sorted by most frequently
// called first.
std::vector<IndirectCallPromotion::Callsite>
IndirectCallPromotion::getCallTargets(
BinaryBasicBlock &BB,
const MCInst &Inst
) const {
IndirectCallPromotion::getCallTargets(BinaryBasicBlock &BB,
const MCInst &Inst) const {
BinaryFunction &BF = *BB.getFunction();
BinaryContext &BC = BF.getBinaryContext();
std::vector<Callsite> Targets;
@ -380,27 +378,25 @@ IndirectCallPromotion::getCallTargets(
}
IndirectCallPromotion::JumpTableInfoType
IndirectCallPromotion::maybeGetHotJumpTableTargets(
BinaryContext &BC,
BinaryFunction &Function,
BinaryBasicBlock *BB,
MCInst &CallInst,
MCInst *&TargetFetchInst,
const JumpTable *JT
) const {
JumpTableInfoType HotTargets;
IndirectCallPromotion::maybeGetHotJumpTableTargets(BinaryBasicBlock &BB,
MCInst &CallInst,
MCInst *&TargetFetchInst,
const JumpTable *JT) const {
assert(JT && "Can't get jump table addrs for non-jump tables.");
BinaryFunction &Function = *BB.getFunction();
BinaryContext &BC = Function.getBinaryContext();
if (!Function.hasMemoryProfile() || !opts::EliminateLoads)
return JumpTableInfoType();
JumpTableInfoType HotTargets;
MCInst *MemLocInstr;
MCInst *PCRelBaseOut;
unsigned BaseReg, IndexReg;
int64_t DispValue;
const MCExpr *DispExpr;
MutableArrayRef<MCInst> Insts(&BB->front(), &CallInst);
MutableArrayRef<MCInst> Insts(&BB.front(), &CallInst);
const IndirectBranchType Type = BC.MIB->analyzeIndirectBranch(
CallInst, Insts.begin(), Insts.end(), BC.AsmInfo->getCodePointerSize(),
MemLocInstr, BaseReg, IndexReg, DispValue, DispExpr, PCRelBaseOut);
@ -410,15 +406,15 @@ IndirectCallPromotion::maybeGetHotJumpTableTargets(
return JumpTableInfoType();
LLVM_DEBUG({
dbgs() << "BOLT-INFO: ICP attempting to find memory profiling data for "
<< "jump table in " << Function << " at @ "
<< (&CallInst - &BB->front()) << "\n"
<< "BOLT-INFO: ICP target fetch instructions:\n";
BC.printInstruction(dbgs(), *MemLocInstr, 0, &Function);
if (MemLocInstr != &CallInst) {
BC.printInstruction(dbgs(), CallInst, 0, &Function);
}
});
dbgs() << "BOLT-INFO: ICP attempting to find memory profiling data for "
<< "jump table in " << Function << " at @ "
<< (&CallInst - &BB.front()) << "\n"
<< "BOLT-INFO: ICP target fetch instructions:\n";
BC.printInstruction(dbgs(), *MemLocInstr, 0, &Function);
if (MemLocInstr != &CallInst) {
BC.printInstruction(dbgs(), CallInst, 0, &Function);
}
});
DEBUG_VERBOSE(1, {
dbgs() << "Jmp info: Type = " << (unsigned)Type << ", "
@ -542,21 +538,16 @@ IndirectCallPromotion::maybeGetHotJumpTableTargets(
}
IndirectCallPromotion::SymTargetsType
IndirectCallPromotion::findCallTargetSymbols(
BinaryContext &BC,
std::vector<Callsite> &Targets,
size_t &N,
BinaryFunction &Function,
BinaryBasicBlock *BB,
MCInst &CallInst,
MCInst *&TargetFetchInst
) const {
const JumpTable *JT = Function.getJumpTable(CallInst);
IndirectCallPromotion::findCallTargetSymbols(std::vector<Callsite> &Targets,
size_t &N, BinaryBasicBlock &BB,
MCInst &CallInst,
MCInst *&TargetFetchInst) const {
const JumpTable *JT = BB.getFunction()->getJumpTable(CallInst);
SymTargetsType SymTargets;
if (JT) {
JumpTableInfoType HotTargets = maybeGetHotJumpTableTargets(
BC, Function, BB, CallInst, TargetFetchInst, JT);
JumpTableInfoType HotTargets =
maybeGetHotJumpTableTargets(BB, CallInst, TargetFetchInst, JT);
if (!HotTargets.empty()) {
auto findTargetsIndex = [&](uint64_t JTIndex) {
@ -567,7 +558,7 @@ IndirectCallPromotion::findCallTargetSymbols(
}
LLVM_DEBUG(
dbgs() << "BOLT-ERROR: Unable to find target index for hot jump "
<< " table entry in " << Function << "\n");
<< " table entry in " << *BB.getFunction() << "\n");
llvm_unreachable("Hot indices must be referred to by at least one "
"callsite");
};
@ -634,8 +625,8 @@ IndirectCallPromotion::findCallTargetSymbols(
N = I;
if (N == 0 && opts::Verbosity >= 1) {
outs() << "BOLT-INFO: ICP failed in " << Function << " in "
<< BB->getName()
outs() << "BOLT-INFO: ICP failed in " << *BB.getFunction() << " in "
<< BB.getName()
<< ": failed to meet thresholds after memory profile data was "
"loaded.\n";
return SymTargets;
@ -664,14 +655,11 @@ IndirectCallPromotion::findCallTargetSymbols(
return SymTargets;
}
IndirectCallPromotion::MethodInfoType
IndirectCallPromotion::maybeGetVtableSyms(
BinaryContext &BC,
BinaryFunction &Function,
BinaryBasicBlock *BB,
MCInst &Inst,
const SymTargetsType &SymTargets
) const {
IndirectCallPromotion::MethodInfoType IndirectCallPromotion::maybeGetVtableSyms(
BinaryBasicBlock &BB, MCInst &Inst,
const SymTargetsType &SymTargets) const {
BinaryFunction &Function = *BB.getFunction();
BinaryContext &BC = Function.getBinaryContext();
std::vector<std::pair<MCSymbol *, uint64_t>> VtableSyms;
std::vector<MCInst *> MethodFetchInsns;
unsigned VtableReg, MethodReg;
@ -683,25 +671,24 @@ IndirectCallPromotion::maybeGetVtableSyms(
if (!Function.hasMemoryProfile() || !opts::EliminateLoads)
return MethodInfoType();
MutableArrayRef<MCInst> Insts(&BB->front(), &Inst + 1);
MutableArrayRef<MCInst> Insts(&BB.front(), &Inst + 1);
if (!BC.MIB->analyzeVirtualMethodCall(Insts.begin(),
Insts.end(),
MethodFetchInsns,
VtableReg,
MethodReg,
MethodOffset)) {
DEBUG_VERBOSE(1,
dbgs() << "BOLT-INFO: ICP unable to analyze method call in "
<< Function << " at @ " << (&Inst - &BB->front())
<< "\n");
DEBUG_VERBOSE(
1, dbgs() << "BOLT-INFO: ICP unable to analyze method call in "
<< Function << " at @ " << (&Inst - &BB.front()) << "\n");
return MethodInfoType();
}
++TotalMethodLoadEliminationCandidates;
DEBUG_VERBOSE(1, {
dbgs() << "BOLT-INFO: ICP found virtual method call in "
<< Function << " at @ " << (&Inst - &BB->front()) << "\n";
dbgs() << "BOLT-INFO: ICP found virtual method call in " << Function
<< " at @ " << (&Inst - &BB.front()) << "\n";
dbgs() << "BOLT-INFO: ICP method fetch instructions:\n";
for (MCInst *Inst : MethodFetchInsns) {
BC.printInstruction(dbgs(), *Inst, 0, &Function);
@ -789,17 +776,16 @@ IndirectCallPromotion::maybeGetVtableSyms(
std::vector<std::unique_ptr<BinaryBasicBlock>>
IndirectCallPromotion::rewriteCall(
BinaryContext &BC,
BinaryFunction &Function,
BinaryBasicBlock *IndCallBlock,
const MCInst &CallInst,
MCPlusBuilder::BlocksVectorTy &&ICPcode,
const std::vector<MCInst *> &MethodFetchInsns
) const {
BinaryBasicBlock &IndCallBlock, const MCInst &CallInst,
MCPlusBuilder::BlocksVectorTy &&ICPcode,
const std::vector<MCInst *> &MethodFetchInsns) const {
BinaryFunction &Function = *IndCallBlock.getFunction();
MCPlusBuilder *MIB = Function.getBinaryContext().MIB.get();
// Create new basic blocks with correct code in each one first.
std::vector<std::unique_ptr<BinaryBasicBlock>> NewBBs;
const bool IsTailCallOrJT = (BC.MIB->isTailCall(CallInst) ||
Function.getJumpTable(CallInst));
const bool IsTailCallOrJT =
(MIB->isTailCall(CallInst) || Function.getJumpTable(CallInst));
// Move instructions from the tail of the original call block
// to the merge block.
@ -809,29 +795,29 @@ IndirectCallPromotion::rewriteCall(
std::vector<MCInst> TailInsts;
const MCInst *TailInst = &CallInst;
if (IsTailCallOrJT) {
while (TailInst + 1 < &(*IndCallBlock->end()) &&
BC.MIB->isPseudo(*(TailInst + 1))) {
while (TailInst + 1 < &(*IndCallBlock.end()) &&
MIB->isPseudo(*(TailInst + 1))) {
TailInsts.push_back(*++TailInst);
}
}
std::vector<MCInst> MovedInst = IndCallBlock->splitInstructions(&CallInst);
std::vector<MCInst> MovedInst = IndCallBlock.splitInstructions(&CallInst);
// Link new BBs to the original input offset of the BB where the indirect
// call site is, so we can map samples recorded in new BBs back to the
// original BB seen in the input binary (if using BAT)
const uint32_t OrigOffset = IndCallBlock->getInputOffset();
const uint32_t OrigOffset = IndCallBlock.getInputOffset();
IndCallBlock->eraseInstructions(MethodFetchInsns.begin(),
MethodFetchInsns.end());
if (IndCallBlock->empty() ||
IndCallBlock.eraseInstructions(MethodFetchInsns.begin(),
MethodFetchInsns.end());
if (IndCallBlock.empty() ||
(!MethodFetchInsns.empty() && MethodFetchInsns.back() == &CallInst)) {
IndCallBlock->addInstructions(ICPcode.front().second.begin(),
ICPcode.front().second.end());
IndCallBlock.addInstructions(ICPcode.front().second.begin(),
ICPcode.front().second.end());
} else {
IndCallBlock->replaceInstruction(std::prev(IndCallBlock->end()),
ICPcode.front().second);
IndCallBlock.replaceInstruction(std::prev(IndCallBlock.end()),
ICPcode.front().second);
}
IndCallBlock->addInstructions(TailInsts.begin(), TailInsts.end());
IndCallBlock.addInstructions(TailInsts.begin(), TailInsts.end());
for (auto Itr = ICPcode.begin() + 1; Itr != ICPcode.end(); ++Itr) {
MCSymbol *&Sym = Itr->first;
@ -840,8 +826,8 @@ IndirectCallPromotion::rewriteCall(
std::unique_ptr<BinaryBasicBlock> TBB =
Function.createBasicBlock(OrigOffset, Sym);
for (MCInst &Inst : Insts) { // sanitize new instructions.
if (BC.MIB->isCall(Inst))
BC.MIB->removeAnnotation(Inst, "CallProfile");
if (MIB->isCall(Inst))
MIB->removeAnnotation(Inst, "CallProfile");
}
TBB->addInstructions(Insts.begin(), Insts.end());
NewBBs.emplace_back(std::move(TBB));
@ -856,21 +842,18 @@ IndirectCallPromotion::rewriteCall(
return NewBBs;
}
BinaryBasicBlock *IndirectCallPromotion::fixCFG(
BinaryContext &BC,
BinaryFunction &Function,
BinaryBasicBlock *IndCallBlock,
const bool IsTailCall,
const bool IsJumpTable,
IndirectCallPromotion::BasicBlocksVector &&NewBBs,
const std::vector<Callsite> &Targets
) const {
BinaryBasicBlock *
IndirectCallPromotion::fixCFG(BinaryBasicBlock &IndCallBlock,
const bool IsTailCall, const bool IsJumpTable,
IndirectCallPromotion::BasicBlocksVector &&NewBBs,
const std::vector<Callsite> &Targets) const {
BinaryFunction &Function = *IndCallBlock.getFunction();
using BinaryBranchInfo = BinaryBasicBlock::BinaryBranchInfo;
BinaryBasicBlock *MergeBlock = nullptr;
// Scale indirect call counts to the execution count of the original
// basic block containing the indirect call.
uint64_t TotalCount = IndCallBlock->getKnownExecutionCount();
uint64_t TotalCount = IndCallBlock.getKnownExecutionCount();
uint64_t TotalIndirectBranches = 0;
for (const Callsite &Target : Targets) {
TotalIndirectBranches += Target.Branches;
@ -895,7 +878,7 @@ BinaryBasicBlock *IndirectCallPromotion::fixCFG(
if (IsJumpTable) {
BinaryBasicBlock *NewIndCallBlock = NewBBs.back().get();
IndCallBlock->moveAllSuccessorsTo(NewIndCallBlock);
IndCallBlock.moveAllSuccessorsTo(NewIndCallBlock);
std::vector<MCSymbol*> SymTargets;
for (const Callsite &Target : Targets) {
@ -908,7 +891,7 @@ BinaryBasicBlock *IndirectCallPromotion::fixCFG(
"There must be a target symbol associated with each new BB.");
for (uint64_t I = 0; I < NewBBs.size(); ++I) {
BinaryBasicBlock *SourceBB = I ? NewBBs[I - 1].get() : IndCallBlock;
BinaryBasicBlock *SourceBB = I ? NewBBs[I - 1].get() : &IndCallBlock;
SourceBB->setExecutionCount(TotalCount);
BinaryBasicBlock *TargetBB =
@ -933,7 +916,7 @@ BinaryBasicBlock *IndirectCallPromotion::fixCFG(
}
} else {
assert(NewBBs.size() >= 2);
assert(NewBBs.size() % 2 == 1 || IndCallBlock->succ_empty());
assert(NewBBs.size() % 2 == 1 || IndCallBlock.succ_empty());
assert(NewBBs.size() % 2 == 1 || IsTailCall);
auto ScaledBI = ScaledBBI.begin();
@ -945,17 +928,17 @@ BinaryBasicBlock *IndirectCallPromotion::fixCFG(
if (!IsTailCall) {
MergeBlock = NewBBs.back().get();
IndCallBlock->moveAllSuccessorsTo(MergeBlock);
IndCallBlock.moveAllSuccessorsTo(MergeBlock);
}
// Fix up successors and execution counts.
updateCurrentBranchInfo();
IndCallBlock->addSuccessor(NewBBs[1].get(), TotalCount);
IndCallBlock->addSuccessor(NewBBs[0].get(), ScaledBBI[0]);
IndCallBlock.addSuccessor(NewBBs[1].get(), TotalCount);
IndCallBlock.addSuccessor(NewBBs[0].get(), ScaledBBI[0]);
const size_t Adj = IsTailCall ? 1 : 2;
for (size_t I = 0; I < NewBBs.size() - Adj; ++I) {
assert(TotalCount <= IndCallBlock->getExecutionCount() ||
assert(TotalCount <= IndCallBlock.getExecutionCount() ||
TotalCount <= uint64_t(TotalIndirectBranches));
uint64_t ExecCount = ScaledBBI[(I + 1) / 2].Count;
if (I % 2 == 0) {
@ -988,21 +971,19 @@ BinaryBasicBlock *IndirectCallPromotion::fixCFG(
NewBBs.back()->setExecutionCount(TotalCount);
// Update BB and BB layout.
Function.insertBasicBlocks(IndCallBlock, std::move(NewBBs));
Function.insertBasicBlocks(&IndCallBlock, std::move(NewBBs));
assert(Function.validateCFG());
return MergeBlock;
}
size_t
IndirectCallPromotion::canPromoteCallsite(const BinaryBasicBlock *BB,
const MCInst &Inst,
const std::vector<Callsite> &Targets,
uint64_t NumCalls) {
if (BB->getKnownExecutionCount() < opts::ExecutionCountThreshold)
size_t IndirectCallPromotion::canPromoteCallsite(
const BinaryBasicBlock &BB, const MCInst &Inst,
const std::vector<Callsite> &Targets, uint64_t NumCalls) {
if (BB.getKnownExecutionCount() < opts::ExecutionCountThreshold)
return 0;
const bool IsJumpTable = BB->getFunction()->getJumpTable(Inst);
const bool IsJumpTable = BB.getFunction()->getJumpTable(Inst);
auto computeStats = [&](size_t N) {
for (size_t I = 0; I < N; ++I) {
@ -1016,10 +997,9 @@ IndirectCallPromotion::canPromoteCallsite(const BinaryBasicBlock *BB,
// If we have no targets (or no calls), skip this callsite.
if (Targets.empty() || !NumCalls) {
if (opts::Verbosity >= 1) {
const auto InstIdx = &Inst - &(*BB->begin());
outs() << "BOLT-INFO: ICP failed in " << *BB->getFunction() << " @ "
<< InstIdx << " in " << BB->getName()
<< ", calls = " << NumCalls
const auto InstIdx = &Inst - &(*BB.begin());
outs() << "BOLT-INFO: ICP failed in " << *BB.getFunction() << " @ "
<< InstIdx << " in " << BB.getName() << ", calls = " << NumCalls
<< ", targets empty or NumCalls == 0.\n";
}
return 0;
@ -1035,7 +1015,7 @@ IndirectCallPromotion::canPromoteCallsite(const BinaryBasicBlock *BB,
const size_t TrialN = TopN ? std::min(TopN, Targets.size()) : Targets.size();
if (opts::ICPTopCallsites > 0) {
BinaryContext &BC = BB->getFunction()->getBinaryContext();
BinaryContext &BC = BB.getFunction()->getBinaryContext();
if (!BC.MIB->hasAnnotation(Inst, "DoICP"))
return 0;
}
@ -1058,18 +1038,18 @@ IndirectCallPromotion::canPromoteCallsite(const BinaryBasicBlock *BB,
computeStats(N);
// Compute the misprediction frequency of the top N call targets. If this
// frequency is greater than the threshold, we should try ICP on this callsite.
// frequency is greater than the threshold, we should try ICP on this
// callsite.
const double TopNFrequency = (100.0 * TotalMispredictsTopN) / NumCalls;
if (TopNFrequency == 0 ||
TopNFrequency < opts::IndirectCallPromotionMispredictThreshold) {
if (opts::Verbosity >= 1) {
const auto InstIdx = &Inst - &(*BB->begin());
outs() << "BOLT-INFO: ICP failed in " << *BB->getFunction() << " @ "
<< InstIdx << " in " << BB->getName() << ", calls = "
<< NumCalls << ", top N mis. frequency "
<< format("%.1f", TopNFrequency) << "% < "
<< opts::IndirectCallPromotionMispredictThreshold << "%\n";
const auto InstIdx = &Inst - &(*BB.begin());
outs() << "BOLT-INFO: ICP failed in " << *BB.getFunction() << " @ "
<< InstIdx << " in " << BB.getName() << ", calls = " << NumCalls
<< ", top N mis. frequency " << format("%.1f", TopNFrequency)
<< "% < " << opts::IndirectCallPromotionMispredictThreshold
<< "%\n";
}
return 0;
}
@ -1113,10 +1093,10 @@ IndirectCallPromotion::canPromoteCallsite(const BinaryBasicBlock *BB,
if (TopNMispredictFrequency <
opts::IndirectCallPromotionMispredictThreshold) {
if (opts::Verbosity >= 1) {
const auto InstIdx = &Inst - &(*BB->begin());
outs() << "BOLT-INFO: ICP failed in " << *BB->getFunction() << " @ "
<< InstIdx << " in " << BB->getName() << ", calls = "
<< NumCalls << ", top N mispredict frequency "
const auto InstIdx = &Inst - &(*BB.begin());
outs() << "BOLT-INFO: ICP failed in " << *BB.getFunction() << " @ "
<< InstIdx << " in " << BB.getName()
<< ", calls = " << NumCalls << ", top N mispredict frequency "
<< format("%.1f", TopNMispredictFrequency) << "% < "
<< opts::IndirectCallPromotionMispredictThreshold << "%\n";
}
@ -1128,7 +1108,7 @@ IndirectCallPromotion::canPromoteCallsite(const BinaryBasicBlock *BB,
// Filter functions that can have ICP applied (for debugging)
if (!opts::ICPFuncsList.empty()) {
for (std::string &Name : opts::ICPFuncsList) {
if (BB->getFunction()->hasName(Name))
if (BB.getFunction()->hasName(Name))
return N;
}
return 0;
@ -1137,20 +1117,17 @@ IndirectCallPromotion::canPromoteCallsite(const BinaryBasicBlock *BB,
return N;
}
void
IndirectCallPromotion::printCallsiteInfo(const BinaryBasicBlock *BB,
const MCInst &Inst,
const std::vector<Callsite> &Targets,
const size_t N,
uint64_t NumCalls) const {
BinaryContext &BC = BB->getFunction()->getBinaryContext();
void IndirectCallPromotion::printCallsiteInfo(
const BinaryBasicBlock &BB, const MCInst &Inst,
const std::vector<Callsite> &Targets, const size_t N,
uint64_t NumCalls) const {
BinaryContext &BC = BB.getFunction()->getBinaryContext();
const bool IsTailCall = BC.MIB->isTailCall(Inst);
const bool IsJumpTable = BB->getFunction()->getJumpTable(Inst);
const auto InstIdx = &Inst - &(*BB->begin());
const bool IsJumpTable = BB.getFunction()->getJumpTable(Inst);
const auto InstIdx = &Inst - &(*BB.begin());
outs() << "BOLT-INFO: ICP candidate branch info: "
<< *BB->getFunction() << " @ " << InstIdx
<< " in " << BB->getName()
outs() << "BOLT-INFO: ICP candidate branch info: " << *BB.getFunction()
<< " @ " << InstIdx << " in " << BB.getName()
<< " -> calls = " << NumCalls
<< (IsTailCall ? " (tail)" : (IsJumpTable ? " (jump table)" : ""))
<< "\n";
@ -1299,7 +1276,7 @@ void IndirectCallPromotion::runOnFunctions(BinaryContext &BC) {
if (BBs.empty())
continue;
DataflowInfoManager Info(BC, Function, RA.get(), nullptr);
DataflowInfoManager Info(Function, RA.get(), nullptr);
while (!BBs.empty()) {
BinaryBasicBlock *BB = BBs.back();
BBs.pop_back();
@ -1366,7 +1343,7 @@ void IndirectCallPromotion::runOnFunctions(BinaryContext &BC) {
// Should this callsite be optimized? Return the number of targets
// to use when promoting this call. A value of zero means to skip
// this callsite.
size_t N = canPromoteCallsite(BB, Inst, Targets, NumCalls);
size_t N = canPromoteCallsite(*BB, Inst, Targets, NumCalls);
// If it is a jump table and it failed to meet our initial threshold,
// proceed to findCallTargetSymbols -- it may reevaluate N if
@ -1375,13 +1352,13 @@ void IndirectCallPromotion::runOnFunctions(BinaryContext &BC) {
continue;
if (opts::Verbosity >= 1) {
printCallsiteInfo(BB, Inst, Targets, N, NumCalls);
printCallsiteInfo(*BB, Inst, Targets, N, NumCalls);
}
// Find MCSymbols or absolute addresses for each call target.
MCInst *TargetFetchInst = nullptr;
const SymTargetsType SymTargets = findCallTargetSymbols(
BC, Targets, N, Function, BB, Inst, TargetFetchInst);
const SymTargetsType SymTargets =
findCallTargetSymbols(Targets, N, *BB, Inst, TargetFetchInst);
// findCallTargetSymbols may have changed N if mem profile is available
// for jump tables
@ -1407,11 +1384,7 @@ void IndirectCallPromotion::runOnFunctions(BinaryContext &BC) {
MethodInfoType MethodInfo;
if (!IsJumpTable) {
MethodInfo = maybeGetVtableSyms(BC,
Function,
BB,
Inst,
SymTargets);
MethodInfo = maybeGetVtableSyms(*BB, Inst, SymTargets);
TotalMethodLoadsEliminated += MethodInfo.first.empty() ? 0 : 1;
LLVM_DEBUG(dbgs()
<< "BOLT-INFO: ICP "
@ -1457,13 +1430,12 @@ void IndirectCallPromotion::runOnFunctions(BinaryContext &BC) {
});
// Rewrite the CFG with the newly generated ICP code.
std::vector<std::unique_ptr<BinaryBasicBlock>> NewBBs = rewriteCall(
BC, Function, BB, Inst, std::move(ICPcode), MethodInfo.second);
std::vector<std::unique_ptr<BinaryBasicBlock>> NewBBs =
rewriteCall(*BB, Inst, std::move(ICPcode), MethodInfo.second);
// Fix the CFG after inserting the new basic blocks.
BinaryBasicBlock *MergeBlock =
fixCFG(BC, Function, BB, IsTailCall, IsJumpTable, std::move(NewBBs),
Targets);
fixCFG(*BB, IsTailCall, IsJumpTable, std::move(NewBBs), Targets);
// Since the tail of the original block was split off and it may contain
// additional indirect calls, we must add the merge block to the set of

View File

@ -208,14 +208,14 @@ insertInstructions(std::vector<MCInst>& Instrs,
}
void Instrumentation::instrumentLeafNode(BinaryContext &BC,
BinaryBasicBlock &BB,
void Instrumentation::instrumentLeafNode(BinaryBasicBlock &BB,
BinaryBasicBlock::iterator Iter,
bool IsLeaf,
FunctionDescription &FuncDesc,
uint32_t Node) {
createLeafNodeDescription(FuncDesc, Node);
std::vector<MCInst> CounterInstrs = createInstrumentationSnippet(BC, IsLeaf);
std::vector<MCInst> CounterInstrs = createInstrumentationSnippet(
BB.getFunction()->getBinaryContext(), IsLeaf);
insertInstructions(CounterInstrs, BB, Iter);
}
@ -297,12 +297,12 @@ bool Instrumentation::instrumentOneTarget(
return true;
}
void Instrumentation::instrumentFunction(BinaryContext &BC,
BinaryFunction &Function,
void Instrumentation::instrumentFunction(BinaryFunction &Function,
MCPlusBuilder::AllocatorIdTy AllocId) {
if (Function.hasUnknownControlFlow())
return;
BinaryContext &BC = Function.getBinaryContext();
if (BC.isMachO() && Function.hasName("___GLOBAL_init_65535/1"))
return;
@ -503,7 +503,7 @@ void Instrumentation::instrumentFunction(BinaryContext &BC,
for (auto BBI = Function.begin(), BBE = Function.end(); BBI != BBE; ++BBI) {
BinaryBasicBlock &BB = *BBI;
if (STOutSet[&BB].size() == 0)
instrumentLeafNode(BC, BB, BB.begin(), IsLeafFunction, *FuncDesc,
instrumentLeafNode(BB, BB.begin(), IsLeafFunction, *FuncDesc,
BBToID[&BB]);
}
}
@ -551,7 +551,7 @@ void Instrumentation::runOnFunctions(BinaryContext &BC) {
ParallelUtilities::WorkFuncWithAllocTy WorkFun =
[&](BinaryFunction &BF, MCPlusBuilder::AllocatorIdTy AllocatorId) {
instrumentFunction(BC, BF, AllocatorId);
instrumentFunction(BF, AllocatorId);
};
ParallelUtilities::runOnEachFunctionWithUniqueAllocId(

View File

@ -39,9 +39,9 @@ JTFootprintOnlyPIC("jt-footprint-optimize-for-icache",
namespace llvm {
namespace bolt {
void JTFootprintReduction::checkOpportunities(BinaryContext &BC,
BinaryFunction &Function,
void JTFootprintReduction::checkOpportunities(BinaryFunction &Function,
DataflowInfoManager &Info) {
BinaryContext &BC = Function.getBinaryContext();
std::map<JumpTable *, uint64_t> AllJTs;
for (BinaryBasicBlock &BB : Function) {
@ -210,9 +210,9 @@ bool JTFootprintReduction::tryOptimizePIC(
return true;
}
void JTFootprintReduction::optimizeFunction(BinaryContext &BC,
BinaryFunction &Function,
void JTFootprintReduction::optimizeFunction(BinaryFunction &Function,
DataflowInfoManager &Info) {
BinaryContext &BC = Function.getBinaryContext();
for (BinaryBasicBlock &BB : Function) {
if (!BB.getNumNonPseudos())
continue;
@ -269,10 +269,10 @@ void JTFootprintReduction::runOnFunctions(BinaryContext &BC) {
if (Function.getKnownExecutionCount() == 0)
continue;
DataflowInfoManager Info(BC, Function, RA.get(), nullptr);
DataflowInfoManager Info(Function, RA.get(), nullptr);
BlacklistedJTs.clear();
checkOpportunities(BC, Function, Info);
optimizeFunction(BC, Function, Info);
checkOpportunities(Function, Info);
optimizeFunction(Function, Info);
}
if (TotalJTs == TotalJTsDenied) {

View File

@ -381,7 +381,7 @@ createLoopNestLevelMap(BinaryFunction &BF) {
/// equal if we show that A dominates B, B post-dominates A and they are in the
/// same loop and same loop nesting level.
void equalizeBBCounts(BinaryFunction &BF) {
auto Info = DataflowInfoManager(BF.getBinaryContext(), BF, nullptr, nullptr);
auto Info = DataflowInfoManager(BF, nullptr, nullptr);
DominatorAnalysis<false> &DA = Info.getDominatorAnalysis();
DominatorAnalysis<true> &PDA = Info.getPostDominatorAnalysis();
auto &InsnToBB = Info.getInsnToBBMap();

View File

@ -37,8 +37,8 @@ AggressiveReAssign("use-aggr-reg-reassign",
namespace llvm {
namespace bolt {
void RegReAssign::swap(BinaryContext &BC, BinaryFunction &Function, MCPhysReg A,
MCPhysReg B) {
void RegReAssign::swap(BinaryFunction &Function, MCPhysReg A, MCPhysReg B) {
BinaryContext &BC = Function.getBinaryContext();
const BitVector &AliasA = BC.MIB->getAliases(A, false);
const BitVector &AliasB = BC.MIB->getAliases(B, false);
@ -138,7 +138,8 @@ void RegReAssign::swap(BinaryContext &BC, BinaryFunction &Function, MCPhysReg A,
}
}
void RegReAssign::rankRegisters(BinaryContext &BC, BinaryFunction &Function) {
void RegReAssign::rankRegisters(BinaryFunction &Function) {
BinaryContext &BC = Function.getBinaryContext();
std::fill(RegScore.begin(), RegScore.end(), 0);
std::fill(RankedRegs.begin(), RankedRegs.end(), 0);
@ -215,9 +216,9 @@ void RegReAssign::rankRegisters(BinaryContext &BC, BinaryFunction &Function) {
});
}
void RegReAssign::aggressivePassOverFunction(BinaryContext &BC,
BinaryFunction &Function) {
rankRegisters(BC, Function);
void RegReAssign::aggressivePassOverFunction(BinaryFunction &Function) {
BinaryContext &BC = Function.getBinaryContext();
rankRegisters(Function);
// Bail early if our registers are all black listed, before running expensive
// analysis passes
@ -251,7 +252,7 @@ void RegReAssign::aggressivePassOverFunction(BinaryContext &BC,
return;
// -- expensive pass -- determine all regs alive during func start
DataflowInfoManager Info(BC, Function, RA.get(), nullptr);
DataflowInfoManager Info(Function, RA.get(), nullptr);
BitVector AliveAtStart = *Info.getLivenessAnalysis().getStateAt(
ProgramPoint::getFirstPointAt(*Function.begin()));
for (BinaryBasicBlock &BB : Function) {
@ -309,7 +310,7 @@ void RegReAssign::aggressivePassOverFunction(BinaryContext &BC,
// Opportunity detected. Swap.
LLVM_DEBUG(dbgs() << "\n ** Swapping " << BC.MRI->getName(ClassicReg)
<< " with " << BC.MRI->getName(ExtReg) << "\n\n");
swap(BC, Function, ClassicReg, ExtReg);
swap(Function, ClassicReg, ExtReg);
FuncsChanged.insert(&Function);
++Begin;
if (Begin == End)
@ -318,9 +319,9 @@ void RegReAssign::aggressivePassOverFunction(BinaryContext &BC,
}
}
bool RegReAssign::conservativePassOverFunction(BinaryContext &BC,
BinaryFunction &Function) {
rankRegisters(BC, Function);
bool RegReAssign::conservativePassOverFunction(BinaryFunction &Function) {
BinaryContext &BC = Function.getBinaryContext();
rankRegisters(Function);
// Try swapping R12, R13, R14 or R15 with RBX (we work with all callee-saved
// regs except RBP)
@ -352,7 +353,7 @@ bool RegReAssign::conservativePassOverFunction(BinaryContext &BC,
LLVM_DEBUG(dbgs() << "\n ** Swapping " << BC.MRI->getName(RBX) << " with "
<< BC.MRI->getName(Candidate) << "\n\n");
swap(BC, Function, RBX, Candidate);
swap(Function, RBX, Candidate);
FuncsChanged.insert(&Function);
return true;
}
@ -418,9 +419,8 @@ void RegReAssign::runOnFunctions(BinaryContext &BC) {
LLVM_DEBUG(dbgs() << "====================================\n");
LLVM_DEBUG(dbgs() << " - " << Function.getPrintName() << "\n");
if (!conservativePassOverFunction(BC, Function) &&
opts::AggressiveReAssign) {
aggressivePassOverFunction(BC, Function);
if (!conservativePassOverFunction(Function) && opts::AggressiveReAssign) {
aggressivePassOverFunction(Function);
LLVM_DEBUG({
if (FuncsChanged.count(&Function)) {
dbgs() << "Aggressive pass successful on " << Function.getPrintName()

View File

@ -1519,11 +1519,11 @@ protected:
}
public:
PredictiveStackPointerTracking(const BinaryContext &BC, BinaryFunction &BF,
PredictiveStackPointerTracking(BinaryFunction &BF,
decltype(ShrinkWrapping::Todo) &TodoMap,
DataflowInfoManager &Info,
MCPlusBuilder::AllocatorIdTy AllocatorId = 0)
: StackPointerTrackingBase<PredictiveStackPointerTracking>(BC, BF,
: StackPointerTrackingBase<PredictiveStackPointerTracking>(BF,
AllocatorId),
TodoMap(TodoMap), Info(Info) {}
@ -1879,7 +1879,7 @@ BBIterTy ShrinkWrapping::processInsertionsList(
}
bool ShrinkWrapping::processInsertions() {
PredictiveStackPointerTracking PSPT(BC, BF, Todo, Info, AllocatorId);
PredictiveStackPointerTracking PSPT(BF, Todo, Info, AllocatorId);
PSPT.run();
bool Changes = false;

View File

@ -19,9 +19,8 @@ namespace bolt {
StackAvailableExpressions::StackAvailableExpressions(const RegAnalysis &RA,
const FrameAnalysis &FA,
const BinaryContext &BC,
BinaryFunction &BF)
: InstrsDataflowAnalysis(BC, BF), RA(RA), FA(FA) {}
: InstrsDataflowAnalysis(BF), RA(RA), FA(FA) {}
void StackAvailableExpressions::preflight() {
LLVM_DEBUG(dbgs() << "Starting StackAvailableExpressions on \""

View File

@ -14,9 +14,8 @@ namespace llvm {
namespace bolt {
StackPointerTracking::StackPointerTracking(
const BinaryContext &BC, BinaryFunction &BF,
MCPlusBuilder::AllocatorIdTy AllocatorId)
: StackPointerTrackingBase<StackPointerTracking>(BC, BF, AllocatorId) {}
BinaryFunction &BF, MCPlusBuilder::AllocatorIdTy AllocatorId)
: StackPointerTrackingBase<StackPointerTracking>(BF, AllocatorId) {}
} // end namespace bolt
} // end namespace llvm

View File

@ -46,26 +46,25 @@ void getRegNameFromBitVec(const BinaryContext &BC, const BitVector &RegV,
LLVM_DEBUG(dbgs() << "\n");
}
void StokeInfo::checkInstr(const BinaryContext &BC, const BinaryFunction &BF,
StokeFuncInfo &FuncInfo) {
void StokeInfo::checkInstr(const BinaryFunction &BF, StokeFuncInfo &FuncInfo) {
MCPlusBuilder *MIB = BF.getBinaryContext().MIB.get();
BitVector RegV(NumRegs, false);
for (BinaryBasicBlock *BB : BF.layout()) {
if (BB->empty()) {
continue;
}
for (MCInst &It : *BB) {
if (BC.MIB->isPseudo(It))
if (MIB->isPseudo(It))
continue;
// skip function with exception handling yet
if (BC.MIB->isEHLabel(It) || BC.MIB->isInvoke(It)) {
if (MIB->isEHLabel(It) || MIB->isInvoke(It)) {
FuncInfo.Omitted = true;
return;
}
// check if this function contains call instruction
if (BC.MIB->isCall(It)) {
if (MIB->isCall(It)) {
FuncInfo.HasCall = true;
const MCSymbol *TargetSymbol = BC.MIB->getTargetSymbol(It);
const MCSymbol *TargetSymbol = MIB->getTargetSymbol(It);
// if it is an indirect call, skip
if (TargetSymbol == nullptr) {
FuncInfo.Omitted = true;
@ -74,12 +73,12 @@ void StokeInfo::checkInstr(const BinaryContext &BC, const BinaryFunction &BF,
}
// check if this function modify stack or heap
// TODO: more accurate analysis
bool IsPush = BC.MIB->isPush(It);
bool IsRipAddr = BC.MIB->hasPCRelOperand(It);
bool IsPush = MIB->isPush(It);
bool IsRipAddr = MIB->hasPCRelOperand(It);
if (IsPush) {
FuncInfo.StackOut = true;
}
if (BC.MIB->isStore(It) && !IsPush && !IsRipAddr) {
if (MIB->isStore(It) && !IsPush && !IsRipAddr) {
FuncInfo.HeapOut = true;
}
if (IsRipAddr) {
@ -90,9 +89,8 @@ void StokeInfo::checkInstr(const BinaryContext &BC, const BinaryFunction &BF,
} // end of for (auto *BB : ...)
}
bool StokeInfo::checkFunction(const BinaryContext &BC, BinaryFunction &BF,
DataflowInfoManager &DInfo, RegAnalysis &RA,
StokeFuncInfo &FuncInfo) {
bool StokeInfo::checkFunction(BinaryFunction &BF, DataflowInfoManager &DInfo,
RegAnalysis &RA, StokeFuncInfo &FuncInfo) {
std::string Name = BF.getSymbol()->getName().str();
@ -122,7 +120,7 @@ bool StokeInfo::checkFunction(const BinaryContext &BC, BinaryFunction &BF,
FuncInfo.TotalSize = BF.estimateSize();
FuncInfo.Score = BF.getFunctionScore();
checkInstr(BC, BF, FuncInfo);
checkInstr(BF, FuncInfo);
// register analysis
BinaryBasicBlock &EntryBB = BF.front();
@ -137,12 +135,12 @@ bool StokeInfo::checkFunction(const BinaryContext &BC, BinaryFunction &BF,
BitVector LiveInBV =
*(DInfo.getLivenessAnalysis().getStateAt(FirstNonPseudo));
LiveInBV &= DefaultDefInMask;
getRegNameFromBitVec(BC, LiveInBV, &FuncInfo.DefIn);
getRegNameFromBitVec(BF.getBinaryContext(), LiveInBV, &FuncInfo.DefIn);
LLVM_DEBUG(dbgs() << "\t [LiveOut]\n\t ");
BitVector LiveOutBV = RA.getFunctionClobberList(&BF);
LiveOutBV &= DefaultLiveOutMask;
getRegNameFromBitVec(BC, LiveOutBV, &FuncInfo.LiveOut);
getRegNameFromBitVec(BF.getBinaryContext(), LiveOutBV, &FuncInfo.LiveOut);
outs() << " STOKE-INFO: end function \n";
return true;
@ -183,9 +181,9 @@ void StokeInfo::runOnFunctions(BinaryContext &BC) {
// analyze all functions
FuncInfo.printCsvHeader(Outfile);
for (auto &BF : BC.getBinaryFunctions()) {
DataflowInfoManager DInfo(BC, BF.second, &RA/*RA.get()*/, nullptr);
DataflowInfoManager DInfo(BF.second, &RA, nullptr);
FuncInfo.reset();
if (checkFunction(BC, BF.second, DInfo, RA, FuncInfo)) {
if (checkFunction(BF.second, DInfo, RA, FuncInfo)) {
FuncInfo.printData(Outfile);
}
}

View File

@ -79,10 +79,8 @@ protected:
}
public:
StackPointerTrackingForInternalCalls(const BinaryContext &BC,
BinaryFunction &BF)
: StackPointerTrackingBase<StackPointerTrackingForInternalCalls>(BC, BF) {
}
StackPointerTrackingForInternalCalls(BinaryFunction &BF)
: StackPointerTrackingBase<StackPointerTrackingForInternalCalls>(BF) {}
void run() {
StackPointerTrackingBase<StackPointerTrackingForInternalCalls>::run();
@ -123,16 +121,16 @@ bool ValidateInternalCalls::fixCFGForPIC(BinaryFunction &Function) const {
bool ValidateInternalCalls::fixCFGForIC(BinaryFunction &Function) const {
const BinaryContext &BC = Function.getBinaryContext();
// Track SP value
StackPointerTrackingForInternalCalls SPTIC(BC, Function);
StackPointerTrackingForInternalCalls SPTIC(Function);
SPTIC.run();
// Track instructions reaching a given point of the CFG to answer
// "There is a path from entry to point A that contains instruction B"
ReachingInsns<false> RI(BC, Function);
ReachingInsns<false> RI(Function);
RI.run();
// We use the InsnToBB map that DataflowInfoManager provides us
DataflowInfoManager Info(BC, Function, nullptr, nullptr);
DataflowInfoManager Info(Function, nullptr, nullptr);
bool Updated = false;
@ -244,7 +242,7 @@ bool ValidateInternalCalls::analyzeFunction(BinaryFunction &Function) const {
}
// Now track how the return address is used by tracking uses of Reg
ReachingDefOrUse</*Def=*/false> RU =
ReachingDefOrUse<false>(RA, BC, Function, Reg);
ReachingDefOrUse<false>(RA, Function, Reg);
RU.run();
int64_t Offset = static_cast<int64_t>(Target->getInputOffset());