wrap OptSize and MinSize attributes for easier and consistent access (NFCI)

Create wrapper methods in the Function class for the OptimizeForSize and MinSize
attributes. We want to hide the logic of "or'ing" them together when optimizing
just for size (-Os).

Currently, we are not consistent about this and rely on a front-end to always set
OptimizeForSize (-Os) if MinSize (-Oz) is on. Thus, there are 18 FIXME changes here
that should be added as follow-on patches with regression tests.

This patch is NFC-intended: it just replaces existing direct accesses of the attributes
by the equivalent wrapper call.

Differential Revision: http://reviews.llvm.org/D11734



git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@243994 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Sanjay Patel 2015-08-04 15:49:57 +00:00
parent 2cbb48f2e3
commit f360983642
27 changed files with 53 additions and 50 deletions

View File

@ -395,6 +395,16 @@ public:
addAttribute(n, Attribute::ReadOnly); addAttribute(n, Attribute::ReadOnly);
} }
/// Optimize this function for minimum size (-Oz).
bool optForMinSize() const {
return hasFnAttribute(Attribute::MinSize);
};
/// Optimize this function for size (-Os) or minimum size (-Oz).
bool optForSize() const {
return hasFnAttribute(Attribute::OptimizeForSize) || optForMinSize();
}
/// copyAttributesFrom - copy all additional attributes (those not needed to /// copyAttributesFrom - copy all additional attributes (those not needed to
/// create a Function) from the Function Src to this one. /// create a Function) from the Function Src to this one.
void copyAttributesFrom(const GlobalValue *Src) override; void copyAttributesFrom(const GlobalValue *Src) override;

View File

@ -606,6 +606,7 @@ static bool ProfitableToMerge(MachineBasicBlock *MBB1,
// instructions that would be deleted in the merge. // instructions that would be deleted in the merge.
MachineFunction *MF = MBB1->getParent(); MachineFunction *MF = MBB1->getParent();
if (EffectiveTailLen >= 2 && if (EffectiveTailLen >= 2 &&
// FIXME: Use Function::optForSize().
MF->getFunction()->hasFnAttribute(Attribute::OptimizeForSize) && MF->getFunction()->hasFnAttribute(Attribute::OptimizeForSize) &&
(I1 == MBB1->begin() || I2 == MBB2->begin())) (I1 == MBB1->begin() || I2 == MBB2->begin()))
return true; return true;

View File

@ -214,6 +214,7 @@ bool CodeGenPrepare::runOnFunction(Function &F) {
TLI = TM->getSubtargetImpl(F)->getTargetLowering(); TLI = TM->getSubtargetImpl(F)->getTargetLowering();
TLInfo = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(); TLInfo = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
// FIXME: Use Function::optForSize().
OptSize = F.hasFnAttribute(Attribute::OptimizeForSize); OptSize = F.hasFnAttribute(Attribute::OptimizeForSize);
/// This optimization identifies DIV instructions that can be /// This optimization identifies DIV instructions that can be

View File

@ -1064,6 +1064,7 @@ void MachineBlockPlacement::buildCFGChains(MachineFunction &F) {
// exclusively on the loop info here so that we can align backedges in // exclusively on the loop info here so that we can align backedges in
// unnatural CFGs and backedges that were introduced purely because of the // unnatural CFGs and backedges that were introduced purely because of the
// loop rotations done during this layout pass. // loop rotations done during this layout pass.
// FIXME: Use Function::optForSize().
if (F.getFunction()->hasFnAttribute(Attribute::OptimizeForSize)) if (F.getFunction()->hasFnAttribute(Attribute::OptimizeForSize))
return; return;
if (FunctionChain.begin() == FunctionChain.end()) if (FunctionChain.begin() == FunctionChain.end())

View File

@ -427,6 +427,7 @@ bool MachineCombiner::runOnMachineFunction(MachineFunction &MF) {
Traces = &getAnalysis<MachineTraceMetrics>(); Traces = &getAnalysis<MachineTraceMetrics>();
MinInstr = 0; MinInstr = 0;
// FIXME: Use Function::optForSize().
OptSize = MF.getFunction()->hasFnAttribute(Attribute::OptimizeForSize); OptSize = MF.getFunction()->hasFnAttribute(Attribute::OptimizeForSize);
DEBUG(dbgs() << getPassName() << ": " << MF.getName() << '\n'); DEBUG(dbgs() << getPassName() << ": " << MF.getName() << '\n');

View File

@ -79,6 +79,7 @@ MachineFunction::MachineFunction(const Function *F, const TargetMachine &TM,
Alignment = STI->getTargetLowering()->getMinFunctionAlignment(); Alignment = STI->getTargetLowering()->getMinFunctionAlignment();
// FIXME: Shouldn't use pref alignment if explicit alignment is set on Fn. // FIXME: Shouldn't use pref alignment if explicit alignment is set on Fn.
// FIXME: Use Function::optForSize().
if (!Fn->hasFnAttribute(Attribute::OptimizeForSize)) if (!Fn->hasFnAttribute(Attribute::OptimizeForSize))
Alignment = std::max(Alignment, Alignment = std::max(Alignment,
STI->getTargetLowering()->getPrefFunctionAlignment()); STI->getTargetLowering()->getPrefFunctionAlignment());

View File

@ -428,9 +428,7 @@ namespace {
DAGCombiner(SelectionDAG &D, AliasAnalysis &A, CodeGenOpt::Level OL) DAGCombiner(SelectionDAG &D, AliasAnalysis &A, CodeGenOpt::Level OL)
: DAG(D), TLI(D.getTargetLoweringInfo()), Level(BeforeLegalizeTypes), : DAG(D), TLI(D.getTargetLoweringInfo()), Level(BeforeLegalizeTypes),
OptLevel(OL), LegalOperations(false), LegalTypes(false), AA(A) { OptLevel(OL), LegalOperations(false), LegalTypes(false), AA(A) {
auto *F = DAG.getMachineFunction().getFunction(); ForCodeSize = DAG.getMachineFunction().getFunction()->optForSize();
ForCodeSize = F->hasFnAttribute(Attribute::OptimizeForSize) ||
F->hasFnAttribute(Attribute::MinSize);
} }
/// Runs the dag combiner on all nodes in the work list /// Runs the dag combiner on all nodes in the work list

View File

@ -4152,15 +4152,11 @@ static bool FindOptimalMemOpLowering(std::vector<EVT> &MemOps,
} }
static bool shouldLowerMemFuncForSize(const MachineFunction &MF) { static bool shouldLowerMemFuncForSize(const MachineFunction &MF) {
const Function *F = MF.getFunction();
bool HasMinSize = F->hasFnAttribute(Attribute::MinSize);
bool HasOptSize = F->hasFnAttribute(Attribute::OptimizeForSize);
// On Darwin, -Os means optimize for size without hurting performance, so // On Darwin, -Os means optimize for size without hurting performance, so
// only really optimize for size when -Oz (MinSize) is used. // only really optimize for size when -Oz (MinSize) is used.
if (MF.getTarget().getTargetTriple().isOSDarwin()) if (MF.getTarget().getTargetTriple().isOSDarwin())
return HasMinSize; return MF.getFunction()->optForMinSize();
return HasOptSize || HasMinSize; return MF.getFunction()->optForSize();
} }
static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, SDLoc dl, static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, SDLoc dl,

View File

@ -3968,6 +3968,7 @@ static SDValue ExpandPowI(SDLoc DL, SDValue LHS, SDValue RHS,
return DAG.getConstantFP(1.0, DL, LHS.getValueType()); return DAG.getConstantFP(1.0, DL, LHS.getValueType());
const Function *F = DAG.getMachineFunction().getFunction(); const Function *F = DAG.getMachineFunction().getFunction();
// FIXME: Use Function::optForSize().
if (!F->hasFnAttribute(Attribute::OptimizeForSize) || if (!F->hasFnAttribute(Attribute::OptimizeForSize) ||
// If optimizing for size, don't insert too many multiplies. This // If optimizing for size, don't insert too many multiplies. This
// inserts up to 5 multiplies. // inserts up to 5 multiplies.

View File

@ -563,6 +563,7 @@ TailDuplicatePass::shouldTailDuplicate(const MachineFunction &MF,
// compensate for the duplication. // compensate for the duplication.
unsigned MaxDuplicateCount; unsigned MaxDuplicateCount;
if (TailDuplicateSize.getNumOccurrences() == 0 && if (TailDuplicateSize.getNumOccurrences() == 0 &&
// FIXME: Use Function::optForSize().
MF.getFunction()->hasFnAttribute(Attribute::OptimizeForSize)) MF.getFunction()->hasFnAttribute(Attribute::OptimizeForSize))
MaxDuplicateCount = 1; MaxDuplicateCount = 1;
else else

View File

@ -899,7 +899,7 @@ bool AArch64ConditionalCompares::runOnMachineFunction(MachineFunction &MF) {
Loops = getAnalysisIfAvailable<MachineLoopInfo>(); Loops = getAnalysisIfAvailable<MachineLoopInfo>();
Traces = &getAnalysis<MachineTraceMetrics>(); Traces = &getAnalysis<MachineTraceMetrics>();
MinInstr = nullptr; MinInstr = nullptr;
MinSize = MF.getFunction()->hasFnAttribute(Attribute::MinSize); MinSize = MF.getFunction()->optForMinSize();
bool Changed = false; bool Changed = false;
CmpConv.runOnMachineFunction(MF); CmpConv.runOnMachineFunction(MF);

View File

@ -53,9 +53,7 @@ public:
} }
bool runOnMachineFunction(MachineFunction &MF) override { bool runOnMachineFunction(MachineFunction &MF) override {
ForCodeSize = ForCodeSize = MF.getFunction()->optForSize();
MF.getFunction()->hasFnAttribute(Attribute::OptimizeForSize) ||
MF.getFunction()->hasFnAttribute(Attribute::MinSize);
Subtarget = &MF.getSubtarget<AArch64Subtarget>(); Subtarget = &MF.getSubtarget<AArch64Subtarget>();
return SelectionDAGISel::runOnMachineFunction(MF); return SelectionDAGISel::runOnMachineFunction(MF);
} }

View File

@ -8422,10 +8422,8 @@ static SDValue performSTORECombine(SDNode *N,
if (!Subtarget->isCyclone()) if (!Subtarget->isCyclone())
return SDValue(); return SDValue();
// Don't split at Oz. // Don't split at -Oz.
MachineFunction &MF = DAG.getMachineFunction(); if (DAG.getMachineFunction().getFunction()->optForMinSize())
bool IsMinSize = MF.getFunction()->hasFnAttribute(Attribute::MinSize);
if (IsMinSize)
return SDValue(); return SDValue();
SDValue StVal = S->getValue(); SDValue StVal = S->getValue();

View File

@ -1652,9 +1652,7 @@ isProfitableToIfCvt(MachineBasicBlock &MBB,
// If we are optimizing for size, see if the branch in the predecessor can be // If we are optimizing for size, see if the branch in the predecessor can be
// lowered to cbn?z by the constant island lowering pass, and return false if // lowered to cbn?z by the constant island lowering pass, and return false if
// so. This results in a shorter instruction sequence. // so. This results in a shorter instruction sequence.
const Function *F = MBB.getParent()->getFunction(); if (MBB.getParent()->getFunction()->optForSize()) {
if (F->hasFnAttribute(Attribute::OptimizeForSize) ||
F->hasFnAttribute(Attribute::MinSize)) {
MachineBasicBlock *Pred = *MBB.pred_begin(); MachineBasicBlock *Pred = *MBB.pred_begin();
if (!Pred->empty()) { if (!Pred->empty()) {
MachineInstr *LastMI = &*Pred->rbegin(); MachineInstr *LastMI = &*Pred->rbegin();
@ -1989,7 +1987,7 @@ bool llvm::tryFoldSPUpdateIntoPushPop(const ARMSubtarget &Subtarget,
unsigned NumBytes) { unsigned NumBytes) {
// This optimisation potentially adds lots of load and store // This optimisation potentially adds lots of load and store
// micro-operations, it's only really a great benefit to code-size. // micro-operations, it's only really a great benefit to code-size.
if (!MF.getFunction()->hasFnAttribute(Attribute::MinSize)) if (!MF.getFunction()->optForMinSize())
return false; return false;
// If only one register is pushed/popped, LLVM can use an LDR/STR // If only one register is pushed/popped, LLVM can use an LDR/STR
@ -3652,6 +3650,7 @@ ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
// instructions). // instructions).
if (Latency > 0 && Subtarget.isThumb2()) { if (Latency > 0 && Subtarget.isThumb2()) {
const MachineFunction *MF = DefMI->getParent()->getParent(); const MachineFunction *MF = DefMI->getParent()->getParent();
// FIXME: Use Function::optForSize().
if (MF->getFunction()->hasFnAttribute(Attribute::OptimizeForSize)) if (MF->getFunction()->hasFnAttribute(Attribute::OptimizeForSize))
--Latency; --Latency;
} }

View File

@ -1826,7 +1826,6 @@ ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
// FIXME: handle tail calls differently. // FIXME: handle tail calls differently.
unsigned CallOpc; unsigned CallOpc;
bool HasMinSizeAttr = MF.getFunction()->hasFnAttribute(Attribute::MinSize);
if (Subtarget->isThumb()) { if (Subtarget->isThumb()) {
if ((!isDirect || isARMFunc) && !Subtarget->hasV5TOps()) if ((!isDirect || isARMFunc) && !Subtarget->hasV5TOps())
CallOpc = ARMISD::CALL_NOLINK; CallOpc = ARMISD::CALL_NOLINK;
@ -1836,8 +1835,8 @@ ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
if (!isDirect && !Subtarget->hasV5TOps()) if (!isDirect && !Subtarget->hasV5TOps())
CallOpc = ARMISD::CALL_NOLINK; CallOpc = ARMISD::CALL_NOLINK;
else if (doesNotRet && isDirect && Subtarget->hasRAS() && else if (doesNotRet && isDirect && Subtarget->hasRAS() &&
// Emit regular call when code size is the priority // Emit regular call when code size is the priority
!HasMinSizeAttr) !MF.getFunction()->optForMinSize())
// "mov lr, pc; b _foo" to avoid confusing the RSP // "mov lr, pc; b _foo" to avoid confusing the RSP
CallOpc = ARMISD::CALL_NOLINK; CallOpc = ARMISD::CALL_NOLINK;
else else

View File

@ -294,8 +294,7 @@ bool ARMSubtarget::useMovt(const MachineFunction &MF) const {
// immediates as it is inherently position independent, and may be out of // immediates as it is inherently position independent, and may be out of
// range otherwise. // range otherwise.
return !NoMovt && hasV6T2Ops() && return !NoMovt && hasV6T2Ops() &&
(isTargetWindows() || (isTargetWindows() || !MF.getFunction()->optForMinSize());
!MF.getFunction()->hasFnAttribute(Attribute::MinSize));
} }
bool ARMSubtarget::useFastISel() const { bool ARMSubtarget::useFastISel() const {

View File

@ -633,10 +633,9 @@ Thumb2SizeReduce::ReduceTo2Addr(MachineBasicBlock &MBB, MachineInstr *MI,
if (ReduceLimit2Addr != -1 && ((int)Num2Addrs >= ReduceLimit2Addr)) if (ReduceLimit2Addr != -1 && ((int)Num2Addrs >= ReduceLimit2Addr))
return false; return false;
if (!MinimizeSize && !OptimizeSize && Entry.AvoidMovs && if (!OptimizeSize && Entry.AvoidMovs && STI->avoidMOVsShifterOperand())
STI->avoidMOVsShifterOperand())
// Don't issue movs with shifter operand for some CPUs unless we // Don't issue movs with shifter operand for some CPUs unless we
// are optimizing / minimizing for size. // are optimizing for size.
return false; return false;
unsigned Reg0 = MI->getOperand(0).getReg(); unsigned Reg0 = MI->getOperand(0).getReg();
@ -750,10 +749,9 @@ Thumb2SizeReduce::ReduceToNarrow(MachineBasicBlock &MBB, MachineInstr *MI,
if (ReduceLimit != -1 && ((int)NumNarrows >= ReduceLimit)) if (ReduceLimit != -1 && ((int)NumNarrows >= ReduceLimit))
return false; return false;
if (!MinimizeSize && !OptimizeSize && Entry.AvoidMovs && if (!OptimizeSize && Entry.AvoidMovs && STI->avoidMOVsShifterOperand())
STI->avoidMOVsShifterOperand())
// Don't issue movs with shifter operand for some CPUs unless we // Don't issue movs with shifter operand for some CPUs unless we
// are optimizing / minimizing for size. // are optimizing for size.
return false; return false;
unsigned Limit = ~0U; unsigned Limit = ~0U;
@ -1012,9 +1010,9 @@ bool Thumb2SizeReduce::runOnMachineFunction(MachineFunction &MF) {
TII = static_cast<const Thumb2InstrInfo *>(STI->getInstrInfo()); TII = static_cast<const Thumb2InstrInfo *>(STI->getInstrInfo());
// Optimizing / minimizing size? // Optimizing / minimizing size? Minimizing size implies optimizing for size.
OptimizeSize = MF.getFunction()->hasFnAttribute(Attribute::OptimizeForSize); OptimizeSize = MF.getFunction()->optForSize();
MinimizeSize = MF.getFunction()->hasFnAttribute(Attribute::MinSize); MinimizeSize = MF.getFunction()->optForMinSize();
BlockInfo.clear(); BlockInfo.clear();
BlockInfo.resize(MF.getNumBlockIDs()); BlockInfo.resize(MF.getNumBlockIDs());

View File

@ -1219,6 +1219,7 @@ MachineInstr *HexagonFrameLowering::getAlignaInstr(MachineFunction &MF) const {
} }
// FIXME: Use Function::optForSize().
inline static bool isOptSize(const MachineFunction &MF) { inline static bool isOptSize(const MachineFunction &MF) {
AttributeSet AF = MF.getFunction()->getAttributes(); AttributeSet AF = MF.getFunction()->getAttributes();
return AF.hasAttribute(AttributeSet::FunctionIndex, return AF.hasAttribute(AttributeSet::FunctionIndex,
@ -1226,8 +1227,7 @@ inline static bool isOptSize(const MachineFunction &MF) {
} }
inline static bool isMinSize(const MachineFunction &MF) { inline static bool isMinSize(const MachineFunction &MF) {
AttributeSet AF = MF.getFunction()->getAttributes(); return MF.getFunction()->optForMinSize();
return AF.hasAttribute(AttributeSet::FunctionIndex, Attribute::MinSize);
} }

View File

@ -170,11 +170,7 @@ bool X86CallFrameOptimization::isProfitable(MachineFunction &MF,
return true; return true;
// Don't do this when not optimizing for size. // Don't do this when not optimizing for size.
bool OptForSize = if (!MF.getFunction()->optForSize())
MF.getFunction()->hasFnAttribute(Attribute::OptimizeForSize) ||
MF.getFunction()->hasFnAttribute(Attribute::MinSize);
if (!OptForSize)
return false; return false;
unsigned StackAlign = TFL->getStackAlignment(); unsigned StackAlign = TFL->getStackAlignment();

View File

@ -462,6 +462,7 @@ static bool isCalleeLoad(SDValue Callee, SDValue &Chain, bool HasCallSeq) {
void X86DAGToDAGISel::PreprocessISelDAG() { void X86DAGToDAGISel::PreprocessISelDAG() {
// OptForSize is used in pattern predicates that isel is matching. // OptForSize is used in pattern predicates that isel is matching.
// FIXME: Use Function::optForSize().
OptForSize = MF->getFunction()->hasFnAttribute(Attribute::OptimizeForSize); OptForSize = MF->getFunction()->hasFnAttribute(Attribute::OptimizeForSize);
for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(), for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(),

View File

@ -5189,6 +5189,7 @@ static SDValue LowerVectorBroadcast(SDValue Op, const X86Subtarget* Subtarget,
// it may be detrimental to overall size. There needs to be a way to detect // it may be detrimental to overall size. There needs to be a way to detect
// that condition to know if this is truly a size win. // that condition to know if this is truly a size win.
const Function *F = DAG.getMachineFunction().getFunction(); const Function *F = DAG.getMachineFunction().getFunction();
// FIXME: Use Function::optForSize().
bool OptForSize = F->hasFnAttribute(Attribute::OptimizeForSize); bool OptForSize = F->hasFnAttribute(Attribute::OptimizeForSize);
// Handle broadcasting a single constant scalar from the constant pool // Handle broadcasting a single constant scalar from the constant pool
@ -11118,8 +11119,7 @@ SDValue X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
// Bits [3:0] of the constant are the zero mask. The DAG Combiner may // Bits [3:0] of the constant are the zero mask. The DAG Combiner may
// combine either bitwise AND or insert of float 0.0 to set these bits. // combine either bitwise AND or insert of float 0.0 to set these bits.
const Function *F = DAG.getMachineFunction().getFunction(); bool MinSize = DAG.getMachineFunction().getFunction()->optForMinSize();
bool MinSize = F->hasFnAttribute(Attribute::MinSize);
if (IdxVal == 0 && (!MinSize || !MayFoldLoad(N1))) { if (IdxVal == 0 && (!MinSize || !MayFoldLoad(N1))) {
// If this is an insertion of 32-bits into the low 32-bits of // If this is an insertion of 32-bits into the low 32-bits of
// a vector, we prefer to generate a blend with immediate rather // a vector, we prefer to generate a blend with immediate rather
@ -13195,8 +13195,7 @@ SDValue X86TargetLowering::EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC,
// if we're optimizing for size, however, as that'll allow better folding // if we're optimizing for size, however, as that'll allow better folding
// of memory operations. // of memory operations.
if (Op0.getValueType() != MVT::i32 && Op0.getValueType() != MVT::i64 && if (Op0.getValueType() != MVT::i32 && Op0.getValueType() != MVT::i64 &&
!DAG.getMachineFunction().getFunction()->hasFnAttribute( !DAG.getMachineFunction().getFunction()->optForMinSize() &&
Attribute::MinSize) &&
!Subtarget->isAtom()) { !Subtarget->isAtom()) {
unsigned ExtendOp = unsigned ExtendOp =
isX86CCUnsigned(X86CC) ? ISD::ZERO_EXTEND : ISD::SIGN_EXTEND; isX86CCUnsigned(X86CC) ? ISD::ZERO_EXTEND : ISD::SIGN_EXTEND;
@ -23962,6 +23961,7 @@ static SDValue PerformOrCombine(SDNode *N, SelectionDAG &DAG,
// fold (or (x << c) | (y >> (64 - c))) ==> (shld64 x, y, c) // fold (or (x << c) | (y >> (64 - c))) ==> (shld64 x, y, c)
MachineFunction &MF = DAG.getMachineFunction(); MachineFunction &MF = DAG.getMachineFunction();
// FIXME: Use Function::optForSize().
bool OptForSize = bool OptForSize =
MF.getFunction()->hasFnAttribute(Attribute::OptimizeForSize); MF.getFunction()->hasFnAttribute(Attribute::OptimizeForSize);

View File

@ -4875,8 +4875,7 @@ MachineInstr *X86InstrInfo::foldMemoryOperandImpl(
// For CPUs that favor the register form of a call or push, // For CPUs that favor the register form of a call or push,
// do not fold loads into calls or pushes, unless optimizing for size // do not fold loads into calls or pushes, unless optimizing for size
// aggressively. // aggressively.
if (isCallRegIndirect && if (isCallRegIndirect && !MF.getFunction()->optForMinSize() &&
!MF.getFunction()->hasFnAttribute(Attribute::MinSize) &&
(MI->getOpcode() == X86::CALL32r || MI->getOpcode() == X86::CALL64r || (MI->getOpcode() == X86::CALL32r || MI->getOpcode() == X86::CALL64r ||
MI->getOpcode() == X86::PUSH16r || MI->getOpcode() == X86::PUSH32r || MI->getOpcode() == X86::PUSH16r || MI->getOpcode() == X86::PUSH32r ||
MI->getOpcode() == X86::PUSH64r)) MI->getOpcode() == X86::PUSH64r))
@ -5242,6 +5241,7 @@ MachineInstr *X86InstrInfo::foldMemoryOperandImpl(
// Unless optimizing for size, don't fold to avoid partial // Unless optimizing for size, don't fold to avoid partial
// register update stalls // register update stalls
// FIXME: Use Function::optForSize().
if (!MF.getFunction()->hasFnAttribute(Attribute::OptimizeForSize) && if (!MF.getFunction()->hasFnAttribute(Attribute::OptimizeForSize) &&
hasPartialRegUpdate(MI->getOpcode())) hasPartialRegUpdate(MI->getOpcode()))
return nullptr; return nullptr;
@ -5351,6 +5351,7 @@ MachineInstr *X86InstrInfo::foldMemoryOperandImpl(
// Unless optimizing for size, don't fold to avoid partial // Unless optimizing for size, don't fold to avoid partial
// register update stalls // register update stalls
// FIXME: Use Function::optForSize().
if (!MF.getFunction()->hasFnAttribute(Attribute::OptimizeForSize) && if (!MF.getFunction()->hasFnAttribute(Attribute::OptimizeForSize) &&
hasPartialRegUpdate(MI->getOpcode())) hasPartialRegUpdate(MI->getOpcode()))
return nullptr; return nullptr;

View File

@ -93,8 +93,7 @@ FunctionPass *llvm::createX86PadShortFunctions() {
/// runOnMachineFunction - Loop over all of the basic blocks, inserting /// runOnMachineFunction - Loop over all of the basic blocks, inserting
/// NOOP instructions before early exits. /// NOOP instructions before early exits.
bool PadShortFunc::runOnMachineFunction(MachineFunction &MF) { bool PadShortFunc::runOnMachineFunction(MachineFunction &MF) {
if (MF.getFunction()->hasFnAttribute(Attribute::OptimizeForSize) || if (MF.getFunction()->optForSize()) {
MF.getFunction()->hasFnAttribute(Attribute::MinSize)) {
return false; return false;
} }

View File

@ -265,6 +265,7 @@ unsigned Inliner::getInlineThreshold(CallSite CS) const {
// would decrease the threshold. // would decrease the threshold.
Function *Caller = CS.getCaller(); Function *Caller = CS.getCaller();
bool OptSize = Caller && !Caller->isDeclaration() && bool OptSize = Caller && !Caller->isDeclaration() &&
// FIXME: Use Function::optForSize().
Caller->hasFnAttribute(Attribute::OptimizeForSize); Caller->hasFnAttribute(Attribute::OptimizeForSize);
if (!(InlineLimit.getNumOccurrences() > 0) && OptSize && if (!(InlineLimit.getNumOccurrences() > 0) && OptSize &&
OptSizeThreshold < thres) OptSizeThreshold < thres)

View File

@ -208,6 +208,7 @@ namespace {
: UP.DynamicCostSavingsDiscount; : UP.DynamicCostSavingsDiscount;
if (!UserThreshold && if (!UserThreshold &&
// FIXME: Use Function::optForSize().
L->getHeader()->getParent()->hasFnAttribute( L->getHeader()->getParent()->hasFnAttribute(
Attribute::OptimizeForSize)) { Attribute::OptimizeForSize)) {
Threshold = UP.OptSizeThreshold; Threshold = UP.OptSizeThreshold;

View File

@ -600,6 +600,7 @@ bool LoopUnswitch::UnswitchIfProfitable(Value *LoopCond, Constant *Val,
} }
// Do not do non-trivial unswitch while optimizing for size. // Do not do non-trivial unswitch while optimizing for size.
// FIXME: Use Function::optForSize().
if (OptimizeForSize || F->hasFnAttribute(Attribute::OptimizeForSize)) if (OptimizeForSize || F->hasFnAttribute(Attribute::OptimizeForSize))
return false; return false;

View File

@ -1616,6 +1616,7 @@ struct LoopVectorize : public FunctionPass {
// Check the function attributes to find out if this function should be // Check the function attributes to find out if this function should be
// optimized for size. // optimized for size.
bool OptForSize = Hints.getForce() != LoopVectorizeHints::FK_Enabled && bool OptForSize = Hints.getForce() != LoopVectorizeHints::FK_Enabled &&
// FIXME: Use Function::optForSize().
F->hasFnAttribute(Attribute::OptimizeForSize); F->hasFnAttribute(Attribute::OptimizeForSize);
// Compute the weighted frequency of this loop being executed and see if it // Compute the weighted frequency of this loop being executed and see if it