wrap OptSize and MinSize attributes for easier and consistent access (NFCI)

Create wrapper methods in the Function class for the OptimizeForSize and MinSize
attributes. We want to hide the logic of "or'ing" them together when optimizing
just for size (-Os).

Currently, we are not consistent about this and rely on a front-end to always set
OptimizeForSize (-Os) if MinSize (-Oz) is on. Thus, there are 18 FIXME changes here
that should be added as follow-on patches with regression tests.

This patch is NFC-intended: it just replaces existing direct accesses of the attributes
by the equivalent wrapper call.

Differential Revision: http://reviews.llvm.org/D11734



git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@243994 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Sanjay Patel 2015-08-04 15:49:57 +00:00
parent 2cbb48f2e3
commit f360983642
27 changed files with 53 additions and 50 deletions

View File

@ -395,6 +395,16 @@ public:
addAttribute(n, Attribute::ReadOnly);
}
/// Optimize this function for minimum size (-Oz).
bool optForMinSize() const {
return hasFnAttribute(Attribute::MinSize);
};
/// Optimize this function for size (-Os) or minimum size (-Oz).
bool optForSize() const {
return hasFnAttribute(Attribute::OptimizeForSize) || optForMinSize();
}
/// copyAttributesFrom - copy all additional attributes (those not needed to
/// create a Function) from the Function Src to this one.
void copyAttributesFrom(const GlobalValue *Src) override;

View File

@ -606,6 +606,7 @@ static bool ProfitableToMerge(MachineBasicBlock *MBB1,
// instructions that would be deleted in the merge.
MachineFunction *MF = MBB1->getParent();
if (EffectiveTailLen >= 2 &&
// FIXME: Use Function::optForSize().
MF->getFunction()->hasFnAttribute(Attribute::OptimizeForSize) &&
(I1 == MBB1->begin() || I2 == MBB2->begin()))
return true;

View File

@ -214,6 +214,7 @@ bool CodeGenPrepare::runOnFunction(Function &F) {
TLI = TM->getSubtargetImpl(F)->getTargetLowering();
TLInfo = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
// FIXME: Use Function::optForSize().
OptSize = F.hasFnAttribute(Attribute::OptimizeForSize);
/// This optimization identifies DIV instructions that can be

View File

@ -1064,6 +1064,7 @@ void MachineBlockPlacement::buildCFGChains(MachineFunction &F) {
// exclusively on the loop info here so that we can align backedges in
// unnatural CFGs and backedges that were introduced purely because of the
// loop rotations done during this layout pass.
// FIXME: Use Function::optForSize().
if (F.getFunction()->hasFnAttribute(Attribute::OptimizeForSize))
return;
if (FunctionChain.begin() == FunctionChain.end())

View File

@ -427,6 +427,7 @@ bool MachineCombiner::runOnMachineFunction(MachineFunction &MF) {
Traces = &getAnalysis<MachineTraceMetrics>();
MinInstr = 0;
// FIXME: Use Function::optForSize().
OptSize = MF.getFunction()->hasFnAttribute(Attribute::OptimizeForSize);
DEBUG(dbgs() << getPassName() << ": " << MF.getName() << '\n');

View File

@ -79,6 +79,7 @@ MachineFunction::MachineFunction(const Function *F, const TargetMachine &TM,
Alignment = STI->getTargetLowering()->getMinFunctionAlignment();
// FIXME: Shouldn't use pref alignment if explicit alignment is set on Fn.
// FIXME: Use Function::optForSize().
if (!Fn->hasFnAttribute(Attribute::OptimizeForSize))
Alignment = std::max(Alignment,
STI->getTargetLowering()->getPrefFunctionAlignment());

View File

@ -428,9 +428,7 @@ namespace {
DAGCombiner(SelectionDAG &D, AliasAnalysis &A, CodeGenOpt::Level OL)
: DAG(D), TLI(D.getTargetLoweringInfo()), Level(BeforeLegalizeTypes),
OptLevel(OL), LegalOperations(false), LegalTypes(false), AA(A) {
auto *F = DAG.getMachineFunction().getFunction();
ForCodeSize = F->hasFnAttribute(Attribute::OptimizeForSize) ||
F->hasFnAttribute(Attribute::MinSize);
ForCodeSize = DAG.getMachineFunction().getFunction()->optForSize();
}
/// Runs the dag combiner on all nodes in the work list

View File

@ -4152,15 +4152,11 @@ static bool FindOptimalMemOpLowering(std::vector<EVT> &MemOps,
}
static bool shouldLowerMemFuncForSize(const MachineFunction &MF) {
const Function *F = MF.getFunction();
bool HasMinSize = F->hasFnAttribute(Attribute::MinSize);
bool HasOptSize = F->hasFnAttribute(Attribute::OptimizeForSize);
// On Darwin, -Os means optimize for size without hurting performance, so
// only really optimize for size when -Oz (MinSize) is used.
if (MF.getTarget().getTargetTriple().isOSDarwin())
return HasMinSize;
return HasOptSize || HasMinSize;
return MF.getFunction()->optForMinSize();
return MF.getFunction()->optForSize();
}
static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, SDLoc dl,

View File

@ -3968,6 +3968,7 @@ static SDValue ExpandPowI(SDLoc DL, SDValue LHS, SDValue RHS,
return DAG.getConstantFP(1.0, DL, LHS.getValueType());
const Function *F = DAG.getMachineFunction().getFunction();
// FIXME: Use Function::optForSize().
if (!F->hasFnAttribute(Attribute::OptimizeForSize) ||
// If optimizing for size, don't insert too many multiplies. This
// inserts up to 5 multiplies.

View File

@ -563,6 +563,7 @@ TailDuplicatePass::shouldTailDuplicate(const MachineFunction &MF,
// compensate for the duplication.
unsigned MaxDuplicateCount;
if (TailDuplicateSize.getNumOccurrences() == 0 &&
// FIXME: Use Function::optForSize().
MF.getFunction()->hasFnAttribute(Attribute::OptimizeForSize))
MaxDuplicateCount = 1;
else

View File

@ -899,7 +899,7 @@ bool AArch64ConditionalCompares::runOnMachineFunction(MachineFunction &MF) {
Loops = getAnalysisIfAvailable<MachineLoopInfo>();
Traces = &getAnalysis<MachineTraceMetrics>();
MinInstr = nullptr;
MinSize = MF.getFunction()->hasFnAttribute(Attribute::MinSize);
MinSize = MF.getFunction()->optForMinSize();
bool Changed = false;
CmpConv.runOnMachineFunction(MF);

View File

@ -53,9 +53,7 @@ public:
}
bool runOnMachineFunction(MachineFunction &MF) override {
ForCodeSize =
MF.getFunction()->hasFnAttribute(Attribute::OptimizeForSize) ||
MF.getFunction()->hasFnAttribute(Attribute::MinSize);
ForCodeSize = MF.getFunction()->optForSize();
Subtarget = &MF.getSubtarget<AArch64Subtarget>();
return SelectionDAGISel::runOnMachineFunction(MF);
}

View File

@ -8422,10 +8422,8 @@ static SDValue performSTORECombine(SDNode *N,
if (!Subtarget->isCyclone())
return SDValue();
// Don't split at Oz.
MachineFunction &MF = DAG.getMachineFunction();
bool IsMinSize = MF.getFunction()->hasFnAttribute(Attribute::MinSize);
if (IsMinSize)
// Don't split at -Oz.
if (DAG.getMachineFunction().getFunction()->optForMinSize())
return SDValue();
SDValue StVal = S->getValue();

View File

@ -1652,9 +1652,7 @@ isProfitableToIfCvt(MachineBasicBlock &MBB,
// If we are optimizing for size, see if the branch in the predecessor can be
// lowered to cbn?z by the constant island lowering pass, and return false if
// so. This results in a shorter instruction sequence.
const Function *F = MBB.getParent()->getFunction();
if (F->hasFnAttribute(Attribute::OptimizeForSize) ||
F->hasFnAttribute(Attribute::MinSize)) {
if (MBB.getParent()->getFunction()->optForSize()) {
MachineBasicBlock *Pred = *MBB.pred_begin();
if (!Pred->empty()) {
MachineInstr *LastMI = &*Pred->rbegin();
@ -1989,7 +1987,7 @@ bool llvm::tryFoldSPUpdateIntoPushPop(const ARMSubtarget &Subtarget,
unsigned NumBytes) {
// This optimisation potentially adds lots of load and store
// micro-operations, it's only really a great benefit to code-size.
if (!MF.getFunction()->hasFnAttribute(Attribute::MinSize))
if (!MF.getFunction()->optForMinSize())
return false;
// If only one register is pushed/popped, LLVM can use an LDR/STR
@ -3652,6 +3650,7 @@ ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
// instructions).
if (Latency > 0 && Subtarget.isThumb2()) {
const MachineFunction *MF = DefMI->getParent()->getParent();
// FIXME: Use Function::optForSize().
if (MF->getFunction()->hasFnAttribute(Attribute::OptimizeForSize))
--Latency;
}

View File

@ -1826,7 +1826,6 @@ ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
// FIXME: handle tail calls differently.
unsigned CallOpc;
bool HasMinSizeAttr = MF.getFunction()->hasFnAttribute(Attribute::MinSize);
if (Subtarget->isThumb()) {
if ((!isDirect || isARMFunc) && !Subtarget->hasV5TOps())
CallOpc = ARMISD::CALL_NOLINK;
@ -1836,8 +1835,8 @@ ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
if (!isDirect && !Subtarget->hasV5TOps())
CallOpc = ARMISD::CALL_NOLINK;
else if (doesNotRet && isDirect && Subtarget->hasRAS() &&
// Emit regular call when code size is the priority
!HasMinSizeAttr)
// Emit regular call when code size is the priority
!MF.getFunction()->optForMinSize())
// "mov lr, pc; b _foo" to avoid confusing the RSP
CallOpc = ARMISD::CALL_NOLINK;
else

View File

@ -294,8 +294,7 @@ bool ARMSubtarget::useMovt(const MachineFunction &MF) const {
// immediates as it is inherently position independent, and may be out of
// range otherwise.
return !NoMovt && hasV6T2Ops() &&
(isTargetWindows() ||
!MF.getFunction()->hasFnAttribute(Attribute::MinSize));
(isTargetWindows() || !MF.getFunction()->optForMinSize());
}
bool ARMSubtarget::useFastISel() const {

View File

@ -633,10 +633,9 @@ Thumb2SizeReduce::ReduceTo2Addr(MachineBasicBlock &MBB, MachineInstr *MI,
if (ReduceLimit2Addr != -1 && ((int)Num2Addrs >= ReduceLimit2Addr))
return false;
if (!MinimizeSize && !OptimizeSize && Entry.AvoidMovs &&
STI->avoidMOVsShifterOperand())
if (!OptimizeSize && Entry.AvoidMovs && STI->avoidMOVsShifterOperand())
// Don't issue movs with shifter operand for some CPUs unless we
// are optimizing / minimizing for size.
// are optimizing for size.
return false;
unsigned Reg0 = MI->getOperand(0).getReg();
@ -750,10 +749,9 @@ Thumb2SizeReduce::ReduceToNarrow(MachineBasicBlock &MBB, MachineInstr *MI,
if (ReduceLimit != -1 && ((int)NumNarrows >= ReduceLimit))
return false;
if (!MinimizeSize && !OptimizeSize && Entry.AvoidMovs &&
STI->avoidMOVsShifterOperand())
if (!OptimizeSize && Entry.AvoidMovs && STI->avoidMOVsShifterOperand())
// Don't issue movs with shifter operand for some CPUs unless we
// are optimizing / minimizing for size.
// are optimizing for size.
return false;
unsigned Limit = ~0U;
@ -1012,9 +1010,9 @@ bool Thumb2SizeReduce::runOnMachineFunction(MachineFunction &MF) {
TII = static_cast<const Thumb2InstrInfo *>(STI->getInstrInfo());
// Optimizing / minimizing size?
OptimizeSize = MF.getFunction()->hasFnAttribute(Attribute::OptimizeForSize);
MinimizeSize = MF.getFunction()->hasFnAttribute(Attribute::MinSize);
// Optimizing / minimizing size? Minimizing size implies optimizing for size.
OptimizeSize = MF.getFunction()->optForSize();
MinimizeSize = MF.getFunction()->optForMinSize();
BlockInfo.clear();
BlockInfo.resize(MF.getNumBlockIDs());

View File

@ -1219,6 +1219,7 @@ MachineInstr *HexagonFrameLowering::getAlignaInstr(MachineFunction &MF) const {
}
// FIXME: Use Function::optForSize().
inline static bool isOptSize(const MachineFunction &MF) {
AttributeSet AF = MF.getFunction()->getAttributes();
return AF.hasAttribute(AttributeSet::FunctionIndex,
@ -1226,8 +1227,7 @@ inline static bool isOptSize(const MachineFunction &MF) {
}
inline static bool isMinSize(const MachineFunction &MF) {
AttributeSet AF = MF.getFunction()->getAttributes();
return AF.hasAttribute(AttributeSet::FunctionIndex, Attribute::MinSize);
return MF.getFunction()->optForMinSize();
}

View File

@ -170,11 +170,7 @@ bool X86CallFrameOptimization::isProfitable(MachineFunction &MF,
return true;
// Don't do this when not optimizing for size.
bool OptForSize =
MF.getFunction()->hasFnAttribute(Attribute::OptimizeForSize) ||
MF.getFunction()->hasFnAttribute(Attribute::MinSize);
if (!OptForSize)
if (!MF.getFunction()->optForSize())
return false;
unsigned StackAlign = TFL->getStackAlignment();

View File

@ -462,6 +462,7 @@ static bool isCalleeLoad(SDValue Callee, SDValue &Chain, bool HasCallSeq) {
void X86DAGToDAGISel::PreprocessISelDAG() {
// OptForSize is used in pattern predicates that isel is matching.
// FIXME: Use Function::optForSize().
OptForSize = MF->getFunction()->hasFnAttribute(Attribute::OptimizeForSize);
for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(),

View File

@ -5189,6 +5189,7 @@ static SDValue LowerVectorBroadcast(SDValue Op, const X86Subtarget* Subtarget,
// it may be detrimental to overall size. There needs to be a way to detect
// that condition to know if this is truly a size win.
const Function *F = DAG.getMachineFunction().getFunction();
// FIXME: Use Function::optForSize().
bool OptForSize = F->hasFnAttribute(Attribute::OptimizeForSize);
// Handle broadcasting a single constant scalar from the constant pool
@ -11118,8 +11119,7 @@ SDValue X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
// Bits [3:0] of the constant are the zero mask. The DAG Combiner may
// combine either bitwise AND or insert of float 0.0 to set these bits.
const Function *F = DAG.getMachineFunction().getFunction();
bool MinSize = F->hasFnAttribute(Attribute::MinSize);
bool MinSize = DAG.getMachineFunction().getFunction()->optForMinSize();
if (IdxVal == 0 && (!MinSize || !MayFoldLoad(N1))) {
// If this is an insertion of 32-bits into the low 32-bits of
// a vector, we prefer to generate a blend with immediate rather
@ -13195,8 +13195,7 @@ SDValue X86TargetLowering::EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC,
// if we're optimizing for size, however, as that'll allow better folding
// of memory operations.
if (Op0.getValueType() != MVT::i32 && Op0.getValueType() != MVT::i64 &&
!DAG.getMachineFunction().getFunction()->hasFnAttribute(
Attribute::MinSize) &&
!DAG.getMachineFunction().getFunction()->optForMinSize() &&
!Subtarget->isAtom()) {
unsigned ExtendOp =
isX86CCUnsigned(X86CC) ? ISD::ZERO_EXTEND : ISD::SIGN_EXTEND;
@ -23962,6 +23961,7 @@ static SDValue PerformOrCombine(SDNode *N, SelectionDAG &DAG,
// fold (or (x << c) | (y >> (64 - c))) ==> (shld64 x, y, c)
MachineFunction &MF = DAG.getMachineFunction();
// FIXME: Use Function::optForSize().
bool OptForSize =
MF.getFunction()->hasFnAttribute(Attribute::OptimizeForSize);

View File

@ -4875,8 +4875,7 @@ MachineInstr *X86InstrInfo::foldMemoryOperandImpl(
// For CPUs that favor the register form of a call or push,
// do not fold loads into calls or pushes, unless optimizing for size
// aggressively.
if (isCallRegIndirect &&
!MF.getFunction()->hasFnAttribute(Attribute::MinSize) &&
if (isCallRegIndirect && !MF.getFunction()->optForMinSize() &&
(MI->getOpcode() == X86::CALL32r || MI->getOpcode() == X86::CALL64r ||
MI->getOpcode() == X86::PUSH16r || MI->getOpcode() == X86::PUSH32r ||
MI->getOpcode() == X86::PUSH64r))
@ -5242,6 +5241,7 @@ MachineInstr *X86InstrInfo::foldMemoryOperandImpl(
// Unless optimizing for size, don't fold to avoid partial
// register update stalls
// FIXME: Use Function::optForSize().
if (!MF.getFunction()->hasFnAttribute(Attribute::OptimizeForSize) &&
hasPartialRegUpdate(MI->getOpcode()))
return nullptr;
@ -5351,6 +5351,7 @@ MachineInstr *X86InstrInfo::foldMemoryOperandImpl(
// Unless optimizing for size, don't fold to avoid partial
// register update stalls
// FIXME: Use Function::optForSize().
if (!MF.getFunction()->hasFnAttribute(Attribute::OptimizeForSize) &&
hasPartialRegUpdate(MI->getOpcode()))
return nullptr;

View File

@ -93,8 +93,7 @@ FunctionPass *llvm::createX86PadShortFunctions() {
/// runOnMachineFunction - Loop over all of the basic blocks, inserting
/// NOOP instructions before early exits.
bool PadShortFunc::runOnMachineFunction(MachineFunction &MF) {
if (MF.getFunction()->hasFnAttribute(Attribute::OptimizeForSize) ||
MF.getFunction()->hasFnAttribute(Attribute::MinSize)) {
if (MF.getFunction()->optForSize()) {
return false;
}

View File

@ -265,6 +265,7 @@ unsigned Inliner::getInlineThreshold(CallSite CS) const {
// would decrease the threshold.
Function *Caller = CS.getCaller();
bool OptSize = Caller && !Caller->isDeclaration() &&
// FIXME: Use Function::optForSize().
Caller->hasFnAttribute(Attribute::OptimizeForSize);
if (!(InlineLimit.getNumOccurrences() > 0) && OptSize &&
OptSizeThreshold < thres)

View File

@ -208,6 +208,7 @@ namespace {
: UP.DynamicCostSavingsDiscount;
if (!UserThreshold &&
// FIXME: Use Function::optForSize().
L->getHeader()->getParent()->hasFnAttribute(
Attribute::OptimizeForSize)) {
Threshold = UP.OptSizeThreshold;

View File

@ -600,6 +600,7 @@ bool LoopUnswitch::UnswitchIfProfitable(Value *LoopCond, Constant *Val,
}
// Do not do non-trivial unswitch while optimizing for size.
// FIXME: Use Function::optForSize().
if (OptimizeForSize || F->hasFnAttribute(Attribute::OptimizeForSize))
return false;

View File

@ -1616,6 +1616,7 @@ struct LoopVectorize : public FunctionPass {
// Check the function attributes to find out if this function should be
// optimized for size.
bool OptForSize = Hints.getForce() != LoopVectorizeHints::FK_Enabled &&
// FIXME: Use Function::optForSize().
F->hasFnAttribute(Attribute::OptimizeForSize);
// Compute the weighted frequency of this loop being executed and see if it