Use range algorithms instead of unpacking begin/end

No functionality change is intended.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@278417 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
David Majnemer 2016-08-11 21:15:00 +00:00
parent 4110644365
commit dc9c737666
46 changed files with 137 additions and 153 deletions

View File

@ -13,6 +13,7 @@
#include "llvm/ADT/Hashing.h"
#include "llvm/ADT/None.h"
#include "llvm/ADT/SmallVector.h"
#include <array>
#include <vector>
namespace llvm {
@ -78,6 +79,11 @@ namespace llvm {
/*implicit*/ ArrayRef(const std::vector<T, A> &Vec)
: Data(Vec.data()), Length(Vec.size()) {}
/// Construct an ArrayRef from a std::array
template <size_t N>
/*implicit*/ LLVM_CONSTEXPR ArrayRef(const std::array<T, N> &Arr)
: Data(Arr.data()), Length(N) {}
/// Construct an ArrayRef from a C array.
template <size_t N>
/*implicit*/ LLVM_CONSTEXPR ArrayRef(const T (&Arr)[N])
@ -257,6 +263,11 @@ namespace llvm {
/*implicit*/ MutableArrayRef(std::vector<T> &Vec)
: ArrayRef<T>(Vec) {}
/// Construct an ArrayRef from a std::array
template <size_t N>
/*implicit*/ LLVM_CONSTEXPR MutableArrayRef(std::array<T, N> &Arr)
: ArrayRef<T>(Arr) {}
/// Construct an MutableArrayRef from a C array.
template <size_t N>
/*implicit*/ LLVM_CONSTEXPR MutableArrayRef(T (&Arr)[N])

View File

@ -21,6 +21,7 @@
#define LLVM_SUPPORT_COMMANDLINE_H
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringMap.h"
@ -278,7 +279,7 @@ public:
return getNumOccurrencesFlag() == cl::ConsumeAfter;
}
bool isInAllSubCommands() const {
return std::any_of(Subs.begin(), Subs.end(), [](const SubCommand *SC) {
return any_of(Subs, [](const SubCommand *SC) {
return SC == &*AllSubCommands;
});
}

View File

@ -45,8 +45,7 @@ static void completeEphemeralValues(SmallVector<const Value *, 16> &WorkSet,
continue;
// If all uses of this value are ephemeral, then so is this value.
if (!std::all_of(V->user_begin(), V->user_end(),
[&](const User *U) { return EphValues.count(U); }))
if (!all_of(V->users(), [&](const User *U) { return EphValues.count(U); }))
continue;
EphValues.insert(V);

View File

@ -857,22 +857,22 @@ ModRefInfo GlobalsAAResult::getModRefInfoForArgument(ImmutableCallSite CS,
if (CS.doesNotAccessMemory())
return MRI_NoModRef;
ModRefInfo ConservativeResult = CS.onlyReadsMemory() ? MRI_Ref : MRI_ModRef;
// Iterate through all the arguments to the called function. If any argument
// is based on GV, return the conservative result.
for (auto &A : CS.args()) {
SmallVector<Value*, 4> Objects;
GetUnderlyingObjects(A, Objects, DL);
// All objects must be identified.
if (!std::all_of(Objects.begin(), Objects.end(), isIdentifiedObject) &&
if (!all_of(Objects, isIdentifiedObject) &&
// Try ::alias to see if all objects are known not to alias GV.
!std::all_of(Objects.begin(), Objects.end(), [&](Value *V) {
!all_of(Objects, [&](Value *V) {
return this->alias(MemoryLocation(V), MemoryLocation(GV)) == NoAlias;
}))
}))
return ConservativeResult;
if (std::find(Objects.begin(), Objects.end(), GV) != Objects.end())
if (is_contained(Objects, GV))
return ConservativeResult;
}

View File

@ -2104,8 +2104,8 @@ computePointerICmp(const DataLayout &DL, const TargetLibraryInfo *TLI,
GetUnderlyingObjects(RHS, RHSUObjs, DL);
// Is the set of underlying objects all noalias calls?
auto IsNAC = [](SmallVectorImpl<Value *> &Objects) {
return std::all_of(Objects.begin(), Objects.end(), isNoAliasCall);
auto IsNAC = [](ArrayRef<Value *> Objects) {
return all_of(Objects, isNoAliasCall);
};
// Is the set of underlying objects all things which must be disjoint from
@ -2114,8 +2114,8 @@ computePointerICmp(const DataLayout &DL, const TargetLibraryInfo *TLI,
// live with the compared-to allocation). For globals, we exclude symbols
// that might be resolve lazily to symbols in another dynamically-loaded
// library (and, thus, could be malloc'ed by the implementation).
auto IsAllocDisjoint = [](SmallVectorImpl<Value *> &Objects) {
return std::all_of(Objects.begin(), Objects.end(), [](Value *V) {
auto IsAllocDisjoint = [](ArrayRef<Value *> Objects) {
return all_of(Objects, [](Value *V) {
if (const AllocaInst *AI = dyn_cast<AllocaInst>(V))
return AI->getParent() && AI->getFunction() && AI->isStaticAlloca();
if (const GlobalValue *GV = dyn_cast<GlobalValue>(V))

View File

@ -1207,8 +1207,7 @@ LazyCallGraph::RefSCC::removeInternalRefEdge(Node &SourceN, Node &TargetN) {
if (!Result.empty())
assert(!IsLeaf && "This SCC cannot be a leaf as we have split out new "
"SCCs by removing this edge.");
if (!std::any_of(G->LeafRefSCCs.begin(), G->LeafRefSCCs.end(),
[&](RefSCC *C) { return C == this; }))
if (none_of(G->LeafRefSCCs, [&](RefSCC *C) { return C == this; }))
assert(!IsLeaf && "This SCC cannot be a leaf as it already had child "
"SCCs before we removed this edge.");
#endif

View File

@ -177,9 +177,8 @@ bool Loop::isRecursivelyLCSSAForm(DominatorTree &DT) const {
if (!isLCSSAForm(DT))
return false;
return std::all_of(begin(), end(), [&](const Loop *L) {
return L->isRecursivelyLCSSAForm(DT);
});
return all_of(*this,
[&](const Loop *L) { return L->isRecursivelyLCSSAForm(DT); });
}
bool Loop::isLoopSimplifyForm() const {
@ -366,8 +365,7 @@ Loop::getUniqueExitBlocks(SmallVectorImpl<BasicBlock *> &ExitBlocks) const {
// In case of multiple edges from current block to exit block, collect
// only one edge in ExitBlocks. Use switchExitBlocks to keep track of
// duplicate edges.
if (std::find(SwitchExitBlocks.begin(), SwitchExitBlocks.end(), Successor)
== SwitchExitBlocks.end()) {
if (!is_contained(SwitchExitBlocks, Successor)) {
SwitchExitBlocks.push_back(Successor);
ExitBlocks.push_back(Successor);
}
@ -536,8 +534,7 @@ Loop *UnloopUpdater::getNearestLoop(BasicBlock *BB, Loop *BBLoop) {
assert(Subloop && "subloop is not an ancestor of the original loop");
}
// Get the current nearest parent of the Subloop exits, initially Unloop.
NearLoop =
SubloopParents.insert(std::make_pair(Subloop, &Unloop)).first->second;
NearLoop = SubloopParents.insert({Subloop, &Unloop}).first->second;
}
succ_iterator I = succ_begin(BB), E = succ_end(BB);

View File

@ -232,13 +232,13 @@ bool llvm::moduleCanBeRenamedForThinLTO(const Module &M) {
SmallPtrSet<GlobalValue *, 8> Used;
collectUsedGlobalVariables(M, Used, /*CompilerUsed*/ false);
bool LocalIsUsed =
llvm::any_of(Used, [](GlobalValue *V) { return V->hasLocalLinkage(); });
any_of(Used, [](GlobalValue *V) { return V->hasLocalLinkage(); });
if (!LocalIsUsed)
return true;
// Walk all the instructions in the module and find if one is inline ASM
auto HasInlineAsm = llvm::any_of(M, [](const Function &F) {
return llvm::any_of(instructions(F), [](const Instruction &I) {
auto HasInlineAsm = any_of(M, [](const Function &F) {
return any_of(instructions(F), [](const Instruction &I) {
const CallInst *CallI = dyn_cast<CallInst>(&I);
if (!CallI)
return false;

View File

@ -549,9 +549,8 @@ Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin,
while (const Loop *L = SE.LI.getLoopFor(Builder.GetInsertBlock())) {
if (!L->isLoopInvariant(V)) break;
bool AnyIndexNotLoopInvariant =
std::any_of(GepIndices.begin(), GepIndices.end(),
[L](Value *Op) { return !L->isLoopInvariant(Op); });
bool AnyIndexNotLoopInvariant = any_of(
GepIndices, [L](Value *Op) { return !L->isLoopInvariant(Op); });
if (AnyIndexNotLoopInvariant)
break;

View File

@ -406,7 +406,7 @@ static bool isEphemeralValueOf(Instruction *I, const Value *E) {
// The instruction defining an assumption's condition itself is always
// considered ephemeral to that assumption (even if it has other
// non-ephemeral users). See r246696's test case for an example.
if (std::find(I->op_begin(), I->op_end(), E) != I->op_end())
if (is_contained(I->operands(), E))
return true;
while (!WorkSet.empty()) {
@ -415,8 +415,7 @@ static bool isEphemeralValueOf(Instruction *I, const Value *E) {
continue;
// If all uses of this value are ephemeral, then so is this value.
if (std::all_of(V->user_begin(), V->user_end(),
[&](const User *U) { return EphValues.count(U); })) {
if (all_of(V->users(), [&](const User *U) { return EphValues.count(U); })) {
if (V == E)
return true;

View File

@ -817,7 +817,7 @@ lltok::Kind LLLexer::LexIdentifier() {
int len = CurPtr-TokStart-3;
uint32_t bits = len * 4;
StringRef HexStr(TokStart + 3, len);
if (!std::all_of(HexStr.begin(), HexStr.end(), isxdigit)) {
if (!all_of(HexStr, isxdigit)) {
// Bad token, return it as an error.
CurPtr = TokStart+3;
return lltok::Error;

View File

@ -128,7 +128,7 @@ public:
void addValues(ArrayRef<DebugLocEntry::Value> Vals) {
Values.append(Vals.begin(), Vals.end());
sortUniqueValues();
assert(std::all_of(Values.begin(), Values.end(), [](DebugLocEntry::Value V){
assert(all_of(Values, [](DebugLocEntry::Value V) {
return V.isBitPiece();
}) && "value must be a piece");
}

View File

@ -449,8 +449,8 @@ DwarfDebug::constructDwarfCompileUnit(const DICompileUnit *DIUnit) {
DIUnit->getSplitDebugFilename());
}
CUMap.insert(std::make_pair(DIUnit, &NewCU));
CUDieMap.insert(std::make_pair(&Die, &NewCU));
CUMap.insert({DIUnit, &NewCU});
CUDieMap.insert({&Die, &NewCU});
return NewCU;
}
@ -844,8 +844,7 @@ DwarfDebug::buildLocationList(SmallVectorImpl<DebugLocEntry> &DebugLoc,
// If this piece overlaps with any open ranges, truncate them.
const DIExpression *DIExpr = Begin->getDebugExpression();
auto Last = std::remove_if(OpenRanges.begin(), OpenRanges.end(),
[&](DebugLocEntry::Value R) {
auto Last = remove_if(OpenRanges, [&](DebugLocEntry::Value R) {
return piecesOverlap(DIExpr, R.getExpression());
});
OpenRanges.erase(Last, OpenRanges.end());
@ -1437,7 +1436,7 @@ void DebugLocEntry::finalize(const AsmPrinter &AP,
const DebugLocEntry::Value &Value = Values[0];
if (Value.isBitPiece()) {
// Emit all pieces that belong to the same variable and range.
assert(std::all_of(Values.begin(), Values.end(), [](DebugLocEntry::Value P) {
assert(all_of(Values, [](DebugLocEntry::Value P) {
return P.isBitPiece();
}) && "all values are expected to be pieces");
assert(std::is_sorted(Values.begin(), Values.end()) &&
@ -1889,8 +1888,7 @@ void DwarfDebug::addDwarfTypeUnitType(DwarfCompileUnit &CU,
getDwoLineTable(CU));
DwarfTypeUnit &NewTU = *OwnedUnit;
DIE &UnitDie = NewTU.getUnitDie();
TypeUnitsUnderConstruction.push_back(
std::make_pair(std::move(OwnedUnit), CTy));
TypeUnitsUnderConstruction.emplace_back(std::move(OwnedUnit), CTy);
NewTU.addUInt(UnitDie, dwarf::DW_AT_language, dwarf::DW_FORM_data2,
CU.getLanguage());

View File

@ -134,7 +134,7 @@ public:
Expr.append(V.Expr.begin(), V.Expr.end());
FrameIndex.append(V.FrameIndex.begin(), V.FrameIndex.end());
assert(std::all_of(Expr.begin(), Expr.end(), [](const DIExpression *E) {
assert(all_of(Expr, [](const DIExpression *E) {
return E && E->isBitPiece();
}) && "conflicting locations for variable");
}

View File

@ -2181,8 +2181,8 @@ void MachineInstr::setPhysRegsDeadExcept(ArrayRef<unsigned> UsedRegs,
unsigned Reg = MO.getReg();
if (!TargetRegisterInfo::isPhysicalRegister(Reg)) continue;
// If there are no uses, including partial uses, the def is dead.
if (std::none_of(UsedRegs.begin(), UsedRegs.end(),
[&](unsigned Use) { return TRI.regsOverlap(Use, Reg); }))
if (none_of(UsedRegs,
[&](unsigned Use) { return TRI.regsOverlap(Use, Reg); }))
MO.setIsDead();
}

View File

@ -11611,10 +11611,9 @@ bool DAGCombiner::MergeConsecutiveStores(StoreSDNode* St) {
// Check if this store interferes with any of the loads that we found.
// If we find a load that alias with this store. Stop the sequence.
if (std::any_of(AliasLoadNodes.begin(), AliasLoadNodes.end(),
[&](LSBaseSDNode* Ldn) {
return isAlias(Ldn, StoreNodes[i].MemNode);
}))
if (any_of(AliasLoadNodes, [&](LSBaseSDNode *Ldn) {
return isAlias(Ldn, StoreNodes[i].MemNode);
}))
break;
// Mark this node as useful.

View File

@ -3394,8 +3394,8 @@ SDValue SelectionDAG::FoldConstantVectorArithmetic(unsigned Opcode,
// All operands must be vector types with the same number of elements as
// the result type and must be either UNDEF or a build vector of constant
// or UNDEF scalars.
if (!std::all_of(Ops.begin(), Ops.end(), IsConstantBuildVectorOrUndef) ||
!std::all_of(Ops.begin(), Ops.end(), IsScalarOrSameVectorSize))
if (!all_of(Ops, IsConstantBuildVectorOrUndef) ||
!all_of(Ops, IsScalarOrSameVectorSize))
return SDValue();
// If we are comparing vectors, then the result needs to be a i1 boolean

View File

@ -2730,7 +2730,7 @@ void SelectionDAGBuilder::visitFCmp(const User &I) {
// Check if the condition of the select has one use or two users that are both
// selects with the same condition.
static bool hasOnlySelectUsers(const Value *Cond) {
return std::all_of(Cond->user_begin(), Cond->user_end(), [](const Value *V) {
return all_of(Cond->users(), [](const Value *V) {
return isa<SelectInst>(V);
});
}

View File

@ -721,10 +721,11 @@ AttributeSet AttributeSet::get(LLVMContext &C,
const std::pair<unsigned, Attribute> &RHS) {
return LHS.first < RHS.first;
}) && "Misordered Attributes list!");
assert(std::none_of(Attrs.begin(), Attrs.end(),
[](const std::pair<unsigned, Attribute> &Pair) {
return Pair.second.hasAttribute(Attribute::None);
}) && "Pointless attribute!");
assert(none_of(Attrs,
[](const std::pair<unsigned, Attribute> &Pair) {
return Pair.second.hasAttribute(Attribute::None);
}) &&
"Pointless attribute!");
// Create a vector if (unsigned, AttributeSetNode*) pairs from the attributes
// list.
@ -738,8 +739,7 @@ AttributeSet AttributeSet::get(LLVMContext &C,
++I;
}
AttrPairVec.push_back(std::make_pair(Index,
AttributeSetNode::get(C, AttrVec)));
AttrPairVec.emplace_back(Index, AttributeSetNode::get(C, AttrVec));
}
return getImpl(C, AttrPairVec);
@ -791,13 +791,12 @@ AttributeSet AttributeSet::get(LLVMContext &C, unsigned Index,
default:
Attr = Attribute::get(C, Kind);
}
Attrs.push_back(std::make_pair(Index, Attr));
Attrs.emplace_back(Index, Attr);
}
// Add target-dependent (string) attributes.
for (const auto &TDA : B.td_attrs())
Attrs.push_back(
std::make_pair(Index, Attribute::get(C, TDA.first, TDA.second)));
Attrs.emplace_back(Index, Attribute::get(C, TDA.first, TDA.second));
return get(C, Attrs);
}
@ -806,7 +805,7 @@ AttributeSet AttributeSet::get(LLVMContext &C, unsigned Index,
ArrayRef<Attribute::AttrKind> Kinds) {
SmallVector<std::pair<unsigned, Attribute>, 8> Attrs;
for (Attribute::AttrKind K : Kinds)
Attrs.push_back(std::make_pair(Index, Attribute::get(C, K)));
Attrs.emplace_back(Index, Attribute::get(C, K));
return get(C, Attrs);
}
@ -814,7 +813,7 @@ AttributeSet AttributeSet::get(LLVMContext &C, unsigned Index,
ArrayRef<StringRef> Kinds) {
SmallVector<std::pair<unsigned, Attribute>, 8> Attrs;
for (StringRef K : Kinds)
Attrs.push_back(std::make_pair(Index, Attribute::get(C, K)));
Attrs.emplace_back(Index, Attribute::get(C, K));
return get(C, Attrs);
}

View File

@ -1557,7 +1557,7 @@ MDNode *llvm::upgradeInstructionLoopAttachment(MDNode &N) {
if (!T)
return &N;
if (!llvm::any_of(T->operands(), isOldLoopArgument))
if (none_of(T->operands(), isOldLoopArgument))
return &N;
SmallVector<Metadata *, 8> Ops;

View File

@ -4304,8 +4304,8 @@ void Verifier::verifyCompileUnits() {
if (CUs)
Listed.insert(CUs->op_begin(), CUs->op_end());
Assert(
std::all_of(CUVisited.begin(), CUVisited.end(),
[&Listed](const Metadata *CU) { return Listed.count(CU); }),
all_of(CUVisited,
[&Listed](const Metadata *CU) { return Listed.count(CU); }),
"All DICompileUnits must be listed in llvm.dbg.cu");
CUVisited.clear();
}

View File

@ -208,8 +208,7 @@ bool InstrProfWriter::shouldEncodeData(const ProfilingData &PD) {
return true;
for (const auto &Func : PD) {
const InstrProfRecord &IPR = Func.second;
if (std::any_of(IPR.Counts.begin(), IPR.Counts.end(),
[](uint64_t Count) { return Count > 0; }))
if (any_of(IPR.Counts, [](uint64_t Count) { return Count > 0; }))
return true;
}
return false;

View File

@ -6328,7 +6328,7 @@ static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) {
EVT SubVT = SubV1.getValueType();
// We expect these to have been canonicalized to -1.
assert(std::all_of(ShuffleMask.begin(), ShuffleMask.end(), [&](int i) {
assert(all_of(ShuffleMask, [&](int i) {
return i < (int)VT.getVectorNumElements();
}) && "Unexpected shuffle index into UNDEF operand!");

View File

@ -245,7 +245,7 @@ bool HexagonDCE::rewrite(NodeAddr<InstrNode*> IA, SetVector<NodeId> &Remove) {
if (&DA.Addr->getOp() != &Op)
continue;
Defs = DFG.getRelatedRefs(IA, DA);
if (!std::all_of(Defs.begin(), Defs.end(), IsDead))
if (!all_of(Defs, IsDead))
return false;
break;
}

View File

@ -1509,7 +1509,7 @@ void DataFlowGraph::linkRefUp(NodeAddr<InstrNode*> IA, NodeAddr<T> TA,
bool PrecUp = RAI.covers(QR, RR);
// Skip all defs that are aliased to any of the defs that we have already
// seen. If we encounter a covering def, stop the stack traversal early.
if (std::any_of(Defs.begin(), Defs.end(), AliasQR)) {
if (any_of(Defs, AliasQR)) {
if (PrecUp)
break;
continue;

View File

@ -400,7 +400,7 @@ void Liveness::computePhiInfo() {
for (auto I = Uses.begin(), E = Uses.end(); I != E; ) {
auto UA = DFG.addr<UseNode*>(*I);
NodeList RDs = getAllReachingDefs(UI->first, UA);
if (std::any_of(RDs.begin(), RDs.end(), HasDef))
if (any_of(RDs, HasDef))
++I;
else
I = Uses.erase(I);

View File

@ -134,8 +134,7 @@ class PPCBoolRetToInt : public FunctionPass {
};
const auto &Users = P->users();
const auto &Operands = P->operands();
if (!std::all_of(Users.begin(), Users.end(), IsValidUser) ||
!std::all_of(Operands.begin(), Operands.end(), IsValidOperand))
if (!all_of(Users, IsValidUser) || !all_of(Operands, IsValidOperand))
ToRemove.push_back(P);
}
@ -153,8 +152,7 @@ class PPCBoolRetToInt : public FunctionPass {
// Condition 4 and 5
const auto &Users = P->users();
const auto &Operands = P->operands();
if (!std::all_of(Users.begin(), Users.end(), IsPromotable) ||
!std::all_of(Operands.begin(), Operands.end(), IsPromotable))
if (!all_of(Users, IsPromotable) || !all_of(Operands, IsPromotable))
ToRemove.push_back(P);
}
}
@ -199,7 +197,7 @@ class PPCBoolRetToInt : public FunctionPass {
auto Defs = findAllDefs(U);
// If the values are all Constants or Arguments, don't bother
if (!std::any_of(Defs.begin(), Defs.end(), isa<Instruction, Value *>))
if (none_of(Defs, isa<Instruction, Value *>))
return false;
// Presently, we only know how to handle PHINode, Constant, Arguments and

View File

@ -4060,8 +4060,7 @@ PPCTargetLowering::IsEligibleForTailCallOptimization_64SVR4(
return false;
// Functions containing by val parameters are not supported.
if (std::any_of(Ins.begin(), Ins.end(),
[](const ISD::InputArg& IA) { return IA.Flags.isByVal(); }))
if (any_of(Ins, [](const ISD::InputArg &IA) { return IA.Flags.isByVal(); }))
return false;
// No TCO/SCO on indirect call because Caller have to restore its TOC

View File

@ -4821,7 +4821,7 @@ static bool getTargetShuffleMaskIndices(SDValue MaskNode,
// We can always decode if the buildvector is all zero constants,
// but can't use isBuildVectorAllZeros as it might contain UNDEFs.
if (llvm::all_of(MaskNode->ops(), X86::isZeroNode)) {
if (all_of(MaskNode->ops(), X86::isZeroNode)) {
RawMask.append(VT.getSizeInBits() / MaskEltSizeInBits, 0);
return true;
}
@ -5087,7 +5087,7 @@ static bool getTargetShuffleMask(SDNode *N, MVT VT, bool AllowSentinelZero,
// Check if we're getting a shuffle mask with zero'd elements.
if (!AllowSentinelZero)
if (llvm::any_of(Mask, [](int M) { return M == SM_SentinelZero; }))
if (any_of(Mask, [](int M) { return M == SM_SentinelZero; }))
return false;
// If we have a fake unary shuffle, the shuffle mask is spread across two
@ -5197,11 +5197,10 @@ static bool resolveTargetShuffleInputs(SDValue Op, SDValue &Op0, SDValue &Op1,
return false;
int NumElts = Mask.size();
bool Op0InUse = std::any_of(Mask.begin(), Mask.end(), [NumElts](int Idx) {
bool Op0InUse = any_of(Mask, [NumElts](int Idx) {
return 0 <= Idx && Idx < NumElts;
});
bool Op1InUse = std::any_of(Mask.begin(), Mask.end(),
[NumElts](int Idx) { return NumElts <= Idx; });
bool Op1InUse = any_of(Mask, [NumElts](int Idx) { return NumElts <= Idx; });
Op0 = Op0InUse ? Ops[0] : SDValue();
Op1 = Op1InUse ? Ops[1] : SDValue();
@ -10352,8 +10351,8 @@ static SDValue lowerV16I8VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
// with a pack.
SDValue V = V1;
int LoBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
int HiBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
std::array<int, 8> LoBlendMask = {{-1, -1, -1, -1, -1, -1, -1, -1}};
std::array<int, 8> HiBlendMask = {{-1, -1, -1, -1, -1, -1, -1, -1}};
for (int i = 0; i < 16; ++i)
if (Mask[i] >= 0)
(i < 8 ? LoBlendMask[i] : HiBlendMask[i % 8]) = Mask[i];
@ -10364,10 +10363,8 @@ static SDValue lowerV16I8VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
// Check if any of the odd lanes in the v16i8 are used. If not, we can mask
// them out and avoid using UNPCK{L,H} to extract the elements of V as
// i16s.
if (std::none_of(std::begin(LoBlendMask), std::end(LoBlendMask),
[](int M) { return M >= 0 && M % 2 == 1; }) &&
std::none_of(std::begin(HiBlendMask), std::end(HiBlendMask),
[](int M) { return M >= 0 && M % 2 == 1; })) {
if (none_of(LoBlendMask, [](int M) { return M >= 0 && M % 2 == 1; }) &&
none_of(HiBlendMask, [](int M) { return M >= 0 && M % 2 == 1; })) {
// Use a mask to drop the high bytes.
VLoHalf = DAG.getBitcast(MVT::v8i16, V);
VLoHalf = DAG.getNode(ISD::AND, DL, MVT::v8i16, VLoHalf,
@ -14338,9 +14335,8 @@ SDValue X86TargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const {
Opcode == X86ISD::CMPP);
};
if (IsPackableComparison(In) ||
(In.getOpcode() == ISD::CONCAT_VECTORS &&
std::all_of(In->op_begin(), In->op_end(), IsPackableComparison))) {
if (IsPackableComparison(In) || (In.getOpcode() == ISD::CONCAT_VECTORS &&
all_of(In->ops(), IsPackableComparison))) {
if (SDValue V = truncateVectorCompareWithPACKSS(VT, In, DL, DAG, Subtarget))
return V;
}
@ -25282,7 +25278,7 @@ static bool combineX86ShuffleChain(ArrayRef<SDValue> Inputs, SDValue Root,
return false;
bool MaskContainsZeros =
llvm::any_of(Mask, [](int M) { return M == SM_SentinelZero; });
any_of(Mask, [](int M) { return M == SM_SentinelZero; });
// If we have a single input shuffle with different shuffle patterns in the
// the 128-bit lanes use the variable mask to VPERMILPS.
@ -25578,11 +25574,11 @@ static bool combineX86ShufflesRecursively(ArrayRef<SDValue> SrcOps,
}
// Handle the all undef/zero cases early.
if (llvm::all_of(Mask, [](int Idx) { return Idx == SM_SentinelUndef; })) {
if (all_of(Mask, [](int Idx) { return Idx == SM_SentinelUndef; })) {
DCI.CombineTo(Root.getNode(), DAG.getUNDEF(Root.getValueType()));
return true;
}
if (llvm::all_of(Mask, [](int Idx) { return Idx < 0; })) {
if (all_of(Mask, [](int Idx) { return Idx < 0; })) {
// TODO - should we handle the mixed zero/undef case as well? Just returning
// a zero mask will lose information on undef elements possibly reducing
// future combine possibilities.
@ -25596,8 +25592,7 @@ static bool combineX86ShufflesRecursively(ArrayRef<SDValue> SrcOps,
for (int i = 0, e = Ops.size(); i < e; ++i) {
int lo = UsedOps.size() * MaskWidth;
int hi = lo + MaskWidth;
if (std::any_of(Mask.begin(), Mask.end(),
[lo, hi](int i) { return (lo <= i) && (i < hi); })) {
if (any_of(Mask, [lo, hi](int i) { return (lo <= i) && (i < hi); })) {
UsedOps.push_back(Ops[i]);
continue;
}
@ -30124,7 +30119,7 @@ static SDValue combineVectorCompareTruncation(SDNode *N, SDLoc &DL,
MVT InSVT = InVT.getScalarType();
assert(DAG.getTargetLoweringInfo().getBooleanContents(InVT) ==
llvm::TargetLoweringBase::ZeroOrNegativeOneBooleanContent &&
TargetLoweringBase::ZeroOrNegativeOneBooleanContent &&
"Expected comparison result to be zero/all bits");
// Check we have a truncation suited for PACKSS.

View File

@ -1737,7 +1737,7 @@ static bool isPointerValueDeadOnEntryToFunction(
for (auto *L : Loads) {
auto *LTy = L->getType();
if (!std::any_of(Stores.begin(), Stores.end(), [&](StoreInst *S) {
if (none_of(Stores, [&](const StoreInst *S) {
auto *STy = S->getValueOperand()->getType();
// The load is only dominated by the store if DomTree says so
// and the number of bits loaded in L is less than or equal to

View File

@ -59,14 +59,14 @@ isOnlyCopiedFromConstantGlobal(Value *V, MemTransferInst *&TheCopy,
// eliminate the markers.
SmallVector<std::pair<Value *, bool>, 35> ValuesToInspect;
ValuesToInspect.push_back(std::make_pair(V, false));
ValuesToInspect.emplace_back(V, false);
while (!ValuesToInspect.empty()) {
auto ValuePair = ValuesToInspect.pop_back_val();
const bool IsOffset = ValuePair.second;
for (auto &U : ValuePair.first->uses()) {
Instruction *I = cast<Instruction>(U.getUser());
auto *I = cast<Instruction>(U.getUser());
if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
if (auto *LI = dyn_cast<LoadInst>(I)) {
// Ignore non-volatile loads, they are always ok.
if (!LI->isSimple()) return false;
continue;
@ -74,14 +74,13 @@ isOnlyCopiedFromConstantGlobal(Value *V, MemTransferInst *&TheCopy,
if (isa<BitCastInst>(I) || isa<AddrSpaceCastInst>(I)) {
// If uses of the bitcast are ok, we are ok.
ValuesToInspect.push_back(std::make_pair(I, IsOffset));
ValuesToInspect.emplace_back(I, IsOffset);
continue;
}
if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I)) {
if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
// If the GEP has all zero indices, it doesn't offset the pointer. If it
// doesn't, it does.
ValuesToInspect.push_back(
std::make_pair(I, IsOffset || !GEP->hasAllZeroIndices()));
ValuesToInspect.emplace_back(I, IsOffset || !GEP->hasAllZeroIndices());
continue;
}
@ -477,7 +476,7 @@ static Instruction *combineLoadToOperationType(InstCombiner &IC, LoadInst &LI) {
DL.isLegalInteger(DL.getTypeStoreSizeInBits(Ty)) &&
DL.getTypeStoreSizeInBits(Ty) == DL.getTypeSizeInBits(Ty) &&
!DL.isNonIntegralPointerType(Ty)) {
if (std::all_of(LI.user_begin(), LI.user_end(), [&LI](User *U) {
if (all_of(LI.users(), [&LI](User *U) {
auto *SI = dyn_cast<StoreInst>(U);
return SI && SI->getPointerOperand() != &LI;
})) {

View File

@ -843,8 +843,7 @@ private:
for (auto *Phi : UsePhis) {
auto In = Phi->incoming_values();
if (std::all_of(In.begin(), In.end(),
[&](Use &U){return U == NewMemAcc;})) {
if (all_of(In, [&](Use &U) { return U == NewMemAcc; })) {
Phi->replaceAllUsesWith(NewMemAcc);
MSSA->removeMemoryAccess(Phi);
}

View File

@ -599,8 +599,8 @@ struct LoopInterchange : public FunctionPass {
} // end of namespace
bool LoopInterchangeLegality::areAllUsesReductions(Instruction *Ins, Loop *L) {
return !std::any_of(Ins->user_begin(), Ins->user_end(), [=](User *U) -> bool {
PHINode *UserIns = dyn_cast<PHINode>(U);
return none_of(Ins->users(), [=](User *U) -> bool {
auto *UserIns = dyn_cast<PHINode>(U);
RecurrenceDescriptor RD;
return !UserIns || !RecurrenceDescriptor::isReductionPHI(UserIns, L, RD);
});

View File

@ -113,10 +113,9 @@ bool doesStoreDominatesAllLatches(BasicBlock *StoreBlock, Loop *L,
DominatorTree *DT) {
SmallVector<BasicBlock *, 8> Latches;
L->getLoopLatches(Latches);
return std::all_of(Latches.begin(), Latches.end(),
[&](const BasicBlock *Latch) {
return DT->dominates(StoreBlock, Latch);
});
return all_of(Latches, [&](const BasicBlock *Latch) {
return DT->dominates(StoreBlock, Latch);
});
}
/// \brief Return true if the load is not executed on all paths in the loop.

View File

@ -521,16 +521,14 @@ static bool moveUp(AliasAnalysis &AA, StoreInst *SI, Instruction *P) {
if (Args.erase(C))
NeedLift = true;
else if (MayAlias) {
NeedLift = std::any_of(MemLocs.begin(), MemLocs.end(),
[C, &AA](const MemoryLocation &ML) {
return AA.getModRefInfo(C, ML);
});
NeedLift = any_of(MemLocs, [C, &AA](const MemoryLocation &ML) {
return AA.getModRefInfo(C, ML);
});
if (!NeedLift)
NeedLift = std::any_of(CallSites.begin(), CallSites.end(),
[C, &AA](const ImmutableCallSite &CS) {
return AA.getModRefInfo(C, CS);
});
NeedLift = any_of(CallSites, [C, &AA](const ImmutableCallSite &CS) {
return AA.getModRefInfo(C, CS);
});
}
if (!NeedLift)

View File

@ -2268,8 +2268,7 @@ static bool shouldRewriteStatepointsIn(Function &F) {
void RewriteStatepointsForGC::stripNonValidAttributes(Module &M) {
#ifndef NDEBUG
assert(std::any_of(M.begin(), M.end(), shouldRewriteStatepointsIn) &&
"precondition!");
assert(any_of(M, shouldRewriteStatepointsIn) && "precondition!");
#endif
for (Function &F : M)

View File

@ -1534,8 +1534,7 @@ static bool tryToReplaceWithConstant(SCCPSolver &Solver, Value *V) {
Constant *Const = nullptr;
if (V->getType()->isStructTy()) {
std::vector<LatticeVal> IVs = Solver.getStructLatticeValueFor(V);
if (std::any_of(IVs.begin(), IVs.end(),
[](LatticeVal &LV) { return LV.isOverdefined(); }))
if (any_of(IVs, [](const LatticeVal &LV) { return LV.isOverdefined(); }))
return false;
std::vector<Constant *> ConstVals;
StructType *ST = dyn_cast<StructType>(V->getType());

View File

@ -432,19 +432,18 @@ class AllocaSlices::partition_iterator
// cannot change the max split slice end because we just checked that
// the prior partition ended prior to that max.
P.SplitTails.erase(
std::remove_if(
P.SplitTails.begin(), P.SplitTails.end(),
[&](Slice *S) { return S->endOffset() <= P.EndOffset; }),
remove_if(P.SplitTails,
[&](Slice *S) { return S->endOffset() <= P.EndOffset; }),
P.SplitTails.end());
assert(std::any_of(P.SplitTails.begin(), P.SplitTails.end(),
[&](Slice *S) {
return S->endOffset() == MaxSplitSliceEndOffset;
}) &&
assert(any_of(P.SplitTails,
[&](Slice *S) {
return S->endOffset() == MaxSplitSliceEndOffset;
}) &&
"Could not find the current max split slice offset!");
assert(std::all_of(P.SplitTails.begin(), P.SplitTails.end(),
[&](Slice *S) {
return S->endOffset() <= MaxSplitSliceEndOffset;
}) &&
assert(all_of(P.SplitTails,
[&](Slice *S) {
return S->endOffset() <= MaxSplitSliceEndOffset;
}) &&
"Max split slice end offset is not actually the max!");
}
}

View File

@ -229,7 +229,7 @@ blockDominatesAnExit(BasicBlock *BB,
DominatorTree &DT,
const SmallVectorImpl<BasicBlock *> &ExitBlocks) {
DomTreeNode *DomNode = DT.getNode(BB);
return llvm::any_of(ExitBlocks, [&](BasicBlock * EB) {
return any_of(ExitBlocks, [&](BasicBlock *EB) {
return DT.dominates(DomNode, DT.getNode(EB));
});
}

View File

@ -272,8 +272,9 @@ bool llvm::UnrollLoop(Loop *L, unsigned Count, unsigned TripCount, bool Force,
// now we just recompute LCSSA for the outer loop, but it should be possible
// to fix it in-place.
bool NeedToFixLCSSA = PreserveLCSSA && CompletelyUnroll &&
std::any_of(ExitBlocks.begin(), ExitBlocks.end(),
[&](BasicBlock *BB) { return isa<PHINode>(BB->begin()); });
any_of(ExitBlocks, [](const BasicBlock *BB) {
return isa<PHINode>(BB->begin());
});
// We assume a run-time trip count if the compiler cannot
// figure out the loop trip count and the unroll-runtime

View File

@ -920,7 +920,7 @@ SmallVector<Instruction *, 8> llvm::findDefsUsedOutsideOfLoop(Loop *L) {
// be adapted into a pointer.
for (auto &Inst : *Block) {
auto Users = Inst.users();
if (std::any_of(Users.begin(), Users.end(), [&](User *U) {
if (any_of(Users, [&](User *U) {
auto *Use = cast<Instruction>(U);
return !L->contains(Use->getParent());
}))

View File

@ -1822,7 +1822,7 @@ static bool FoldCondBranchOnPHI(BranchInst *BI, const DataLayout &DL) {
return false;
// Can't fold blocks that contain noduplicate or convergent calls.
if (llvm::any_of(*BB, [](const Instruction &I) {
if (any_of(*BB, [](const Instruction &I) {
const CallInst *CI = dyn_cast<CallInst>(&I);
return CI && (CI->cannotDuplicate() || CI->isConvergent());
}))

View File

@ -83,7 +83,7 @@ static bool isOnlyUsedInEqualityComparison(Value *V, Value *With) {
}
static bool callHasFloatingPointArgument(const CallInst *CI) {
return std::any_of(CI->op_begin(), CI->op_end(), [](const Use &OI) {
return any_of(CI->operands(), [](const Use &OI) {
return OI->getType()->isFloatingPointTy();
});
}

View File

@ -671,7 +671,7 @@ void MDNodeMapper::UniquedGraph::propagateChanges() {
if (D.HasChanged)
continue;
if (!llvm::any_of(N->operands(), [&](const Metadata *Op) {
if (none_of(N->operands(), [&](const Metadata *Op) {
auto Where = Info.find(Op);
return Where != Info.end() && Where->second.HasChanged;
}))

View File

@ -520,7 +520,7 @@ Vectorizer::getVectorizablePrefix(ArrayRef<Instruction *> Chain) {
unsigned ChainIdx, ChainLen;
for (ChainIdx = 0, ChainLen = Chain.size(); ChainIdx < ChainLen; ++ChainIdx) {
Instruction *I = Chain[ChainIdx];
if (!any_of(VectorizableChainInstrs,
if (none_of(VectorizableChainInstrs,
[I](std::pair<Instruction *, unsigned> CI) {
return I == CI.first;
}))

View File

@ -108,24 +108,24 @@ bool EEVT::TypeSet::FillWithPossibleTypes(TreePattern &TP,
/// hasIntegerTypes - Return true if this TypeSet contains iAny or an
/// integer value type.
bool EEVT::TypeSet::hasIntegerTypes() const {
return std::any_of(TypeVec.begin(), TypeVec.end(), isInteger);
return any_of(TypeVec, isInteger);
}
/// hasFloatingPointTypes - Return true if this TypeSet contains an fAny or
/// a floating point value type.
bool EEVT::TypeSet::hasFloatingPointTypes() const {
return std::any_of(TypeVec.begin(), TypeVec.end(), isFloatingPoint);
return any_of(TypeVec, isFloatingPoint);
}
/// hasScalarTypes - Return true if this TypeSet contains a scalar value type.
bool EEVT::TypeSet::hasScalarTypes() const {
return std::any_of(TypeVec.begin(), TypeVec.end(), isScalar);
return any_of(TypeVec, isScalar);
}
/// hasVectorTypes - Return true if this TypeSet contains a vAny or a vector
/// value type.
bool EEVT::TypeSet::hasVectorTypes() const {
return std::any_of(TypeVec.begin(), TypeVec.end(), isVector);
return any_of(TypeVec, isVector);
}
@ -3602,10 +3602,9 @@ static void CombineChildVariants(TreePatternNode *Orig,
// (and GPRC:$a, GPRC:$b) -> (and GPRC:$b, GPRC:$a)
// which are the same pattern. Ignore the dups.
if (R->canPatternMatch(ErrString, CDP) &&
std::none_of(OutVariants.begin(), OutVariants.end(),
[&](TreePatternNode *Variant) {
return R->isIsomorphicTo(Variant, DepVars);
}))
none_of(OutVariants, [&](TreePatternNode *Variant) {
return R->isIsomorphicTo(Variant, DepVars);
}))
OutVariants.push_back(R.release());
// Increment indices to the next permutation by incrementing the