[Analysis] Fix some Clang-tidy modernize and Include What You Use warnings; other minor fixes (NFC).

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@311212 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Eugene Zelenko 2017-08-18 23:51:26 +00:00
parent 3c29a5e3d5
commit 89688ce180
11 changed files with 334 additions and 250 deletions

View File

@ -6,6 +6,7 @@
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
/// \file
/// This file provides a collection of visitors which walk the (instruction)
/// uses of a pointer. These visitors all provide the same essential behavior
@ -16,23 +17,36 @@
/// global variable, or function argument.
///
/// FIXME: Provide a variant which doesn't track offsets and is cheaper.
///
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ANALYSIS_PTRUSEVISITOR_H
#define LLVM_ANALYSIS_PTRUSEVISITOR_H
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/IR/CallSite.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/InstVisitor.h"
#include "llvm/IR/Instruction.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/Support/Compiler.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/Type.h"
#include "llvm/IR/Use.h"
#include "llvm/IR/User.h"
#include "llvm/Support/Casting.h"
#include <algorithm>
#include <cassert>
#include <type_traits>
namespace llvm {
namespace detail {
/// \brief Implementation of non-dependent functionality for \c PtrUseVisitor.
///
/// See \c PtrUseVisitor for the public interface and detailed comments about
@ -115,7 +129,8 @@ protected:
/// This is used to maintain a worklist fo to-visit uses. This is used to
/// make the visit be iterative rather than recursive.
struct UseToVisit {
typedef PointerIntPair<Use *, 1, bool> UseAndIsOffsetKnownPair;
using UseAndIsOffsetKnownPair = PointerIntPair<Use *, 1, bool>;
UseAndIsOffsetKnownPair UseAndIsOffsetKnown;
APInt Offset;
};
@ -128,7 +143,6 @@ protected:
/// @}
/// \name Per-visit state
/// This state is reset for each instruction visited.
/// @{
@ -145,7 +159,6 @@ protected:
/// @}
/// Note that the constructor is protected because this class must be a base
/// class, we can't create instances directly of this class.
PtrUseVisitorBase(const DataLayout &DL) : DL(DL) {}
@ -162,6 +175,7 @@ protected:
/// offsets and looking through GEPs.
bool adjustOffsetForGEP(GetElementPtrInst &GEPI);
};
} // end namespace detail
/// \brief A base class for visitors over the uses of a pointer value.
@ -193,7 +207,8 @@ template <typename DerivedT>
class PtrUseVisitor : protected InstVisitor<DerivedT>,
public detail::PtrUseVisitorBase {
friend class InstVisitor<DerivedT>;
typedef InstVisitor<DerivedT> Base;
using Base = InstVisitor<DerivedT>;
public:
PtrUseVisitor(const DataLayout &DL) : PtrUseVisitorBase(DL) {
@ -283,6 +298,6 @@ protected:
}
};
}
} // end namespace llvm
#endif
#endif // LLVM_ANALYSIS_PTRUSEVISITOR_H

View File

@ -21,11 +21,21 @@
#ifndef LLVM_ANALYSIS_SCALAREVOLUTION_H
#define LLVM_ANALYSIS_SCALAREVOLUTION_H
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseMapInfo.h"
#include "llvm/ADT/FoldingSet.h"
#include "llvm/ADT/Hashing.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/IR/ConstantRange.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/InstrTypes.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Operator.h"
#include "llvm/IR/PassManager.h"
@ -33,30 +43,33 @@
#include "llvm/IR/ValueMap.h"
#include "llvm/Pass.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/DataTypes.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Compiler.h"
#include <algorithm>
#include <cassert>
#include <cstdint>
#include <memory>
#include <utility>
namespace llvm {
class APInt;
class AssumptionCache;
class BasicBlock;
class Constant;
class ConstantInt;
class DominatorTree;
class Type;
class ScalarEvolution;
class DataLayout;
class TargetLibraryInfo;
class DominatorTree;
class GEPOperator;
class Instruction;
class LLVMContext;
class Operator;
class SCEV;
class raw_ostream;
class ScalarEvolution;
class SCEVAddRecExpr;
class SCEVConstant;
class SCEVExpander;
class SCEVPredicate;
class SCEVUnknown;
class Function;
template <> struct FoldingSetTrait<SCEV>;
template <> struct FoldingSetTrait<SCEVPredicate>;
class StructType;
class TargetLibraryInfo;
class Type;
class Value;
/// This class represents an analyzed expression in the program. These are
/// opaque objects that the client is not allowed to do much with directly.
@ -74,11 +87,7 @@ class SCEV : public FoldingSetNode {
protected:
/// This field is initialized to zero and may be used in subclasses to store
/// miscellaneous information.
unsigned short SubclassData;
private:
SCEV(const SCEV &) = delete;
void operator=(const SCEV &) = delete;
unsigned short SubclassData = 0;
public:
/// NoWrapFlags are bitfield indices into SubclassData.
@ -108,24 +117,22 @@ public:
};
explicit SCEV(const FoldingSetNodeIDRef ID, unsigned SCEVTy)
: FastID(ID), SCEVType(SCEVTy), SubclassData(0) {}
: FastID(ID), SCEVType(SCEVTy) {}
SCEV(const SCEV &) = delete;
SCEV &operator=(const SCEV &) = delete;
unsigned getSCEVType() const { return SCEVType; }
/// Return the LLVM type of this SCEV expression.
///
Type *getType() const;
/// Return true if the expression is a constant zero.
///
bool isZero() const;
/// Return true if the expression is a constant one.
///
bool isOne() const;
/// Return true if the expression is a constant all-ones value.
///
bool isAllOnesValue() const;
/// Return true if the specified scev is negated, but not a constant.
@ -136,7 +143,6 @@ public:
void print(raw_ostream &OS) const;
/// This method is used for debugging.
///
void dump() const;
};
@ -144,10 +150,12 @@ public:
// temporary FoldingSetNodeID values.
template <> struct FoldingSetTrait<SCEV> : DefaultFoldingSetTrait<SCEV> {
static void Profile(const SCEV &X, FoldingSetNodeID &ID) { ID = X.FastID; }
static bool Equals(const SCEV &X, const FoldingSetNodeID &ID, unsigned IDHash,
FoldingSetNodeID &TempID) {
return ID == X.FastID;
}
static unsigned ComputeHash(const SCEV &X, FoldingSetNodeID &TempID) {
return X.FastID.ComputeHash();
}
@ -221,7 +229,6 @@ inline raw_ostream &operator<<(raw_ostream &OS, const SCEVPredicate &P) {
// temporary FoldingSetNodeID values.
template <>
struct FoldingSetTrait<SCEVPredicate> : DefaultFoldingSetTrait<SCEVPredicate> {
static void Profile(const SCEVPredicate &X, FoldingSetNodeID &ID) {
ID = X.FastID;
}
@ -230,6 +237,7 @@ struct FoldingSetTrait<SCEVPredicate> : DefaultFoldingSetTrait<SCEVPredicate> {
unsigned IDHash, FoldingSetNodeID &TempID) {
return ID == X.FastID;
}
static unsigned ComputeHash(const SCEVPredicate &X,
FoldingSetNodeID &TempID) {
return X.FastID.ComputeHash();
@ -351,6 +359,7 @@ public:
/// Returns the set assumed no overflow flags.
IncrementWrapFlags getFlags() const { return Flags; }
/// Implementation of the SCEVPredicate interface
const SCEV *getExpr() const override;
bool implies(const SCEVPredicate *N) const override;
@ -371,11 +380,12 @@ public:
/// ScalarEvolution::Preds folding set. This is why the \c add function is sound.
class SCEVUnionPredicate final : public SCEVPredicate {
private:
typedef DenseMap<const SCEV *, SmallVector<const SCEVPredicate *, 4>>
PredicateMap;
using PredicateMap =
DenseMap<const SCEV *, SmallVector<const SCEVPredicate *, 4>>;
/// Vector with references to all predicates in this union.
SmallVector<const SCEVPredicate *, 16> Preds;
/// Maps SCEVs to predicates for quick look-ups.
PredicateMap SCEVToPreds;
@ -422,13 +432,16 @@ template <> struct DenseMapInfo<ExitLimitQuery> {
static inline ExitLimitQuery getEmptyKey() {
return ExitLimitQuery(nullptr, nullptr, true);
}
static inline ExitLimitQuery getTombstoneKey() {
return ExitLimitQuery(nullptr, nullptr, false);
}
static unsigned getHashValue(ExitLimitQuery Val) {
return hash_combine(hash_combine(Val.L, Val.ExitingBlock),
Val.AllowPredicates);
}
static bool isEqual(ExitLimitQuery LHS, ExitLimitQuery RHS) {
return LHS.L == RHS.L && LHS.ExitingBlock == RHS.ExitingBlock &&
LHS.AllowPredicates == RHS.AllowPredicates;
@ -474,6 +487,7 @@ private:
/// Value is deleted.
class SCEVCallbackVH final : public CallbackVH {
ScalarEvolution *SE;
void deleted() override;
void allUsesReplacedWith(Value *New) override;
@ -486,44 +500,37 @@ private:
friend class SCEVUnknown;
/// The function we are analyzing.
///
Function &F;
/// Does the module have any calls to the llvm.experimental.guard intrinsic
/// at all? If this is false, we avoid doing work that will only help if
/// thare are guards present in the IR.
///
bool HasGuards;
/// The target library information for the target we are targeting.
///
TargetLibraryInfo &TLI;
/// The tracker for @llvm.assume intrinsics in this function.
AssumptionCache &AC;
/// The dominator tree.
///
DominatorTree &DT;
/// The loop information for the function we are currently analyzing.
///
LoopInfo &LI;
/// This SCEV is used to represent unknown trip counts and things.
std::unique_ptr<SCEVCouldNotCompute> CouldNotCompute;
/// The typedef for HasRecMap.
///
typedef DenseMap<const SCEV *, bool> HasRecMapType;
/// The type for HasRecMap.
using HasRecMapType = DenseMap<const SCEV *, bool>;
/// This is a cache to record whether a SCEV contains any scAddRecExpr.
HasRecMapType HasRecMap;
/// The typedef for ExprValueMap.
///
typedef std::pair<Value *, ConstantInt *> ValueOffsetPair;
typedef DenseMap<const SCEV *, SetVector<ValueOffsetPair>> ExprValueMapType;
/// The type for ExprValueMap.
using ValueOffsetPair = std::pair<Value *, ConstantInt *>;
using ExprValueMapType = DenseMap<const SCEV *, SetVector<ValueOffsetPair>>;
/// ExprValueMap -- This map records the original values from which
/// the SCEV expr is generated from.
@ -547,13 +554,11 @@ private:
/// to V - Offset.
ExprValueMapType ExprValueMap;
/// The typedef for ValueExprMap.
///
typedef DenseMap<SCEVCallbackVH, const SCEV *, DenseMapInfo<Value *>>
ValueExprMapType;
/// The type for ValueExprMap.
using ValueExprMapType =
DenseMap<SCEVCallbackVH, const SCEV *, DenseMapInfo<Value *>>;
/// This is a cache of the values we have analyzed so far.
///
ValueExprMapType ValueExprMap;
/// Mark predicate values currently being processed by isImpliedCond.
@ -561,11 +566,11 @@ private:
/// Set to true by isLoopBackedgeGuardedByCond when we're walking the set of
/// conditions dominating the backedge of a loop.
bool WalkingBEDominatingConds;
bool WalkingBEDominatingConds = false;
/// Set to true by isKnownPredicateViaSplitting when we're trying to prove a
/// predicate by splitting it into a set of independent predicates.
bool ProvingSplitPredicate;
bool ProvingSplitPredicate = false;
/// Memoized values for the GetMinTrailingZeros
DenseMap<const SCEV *, uint32_t> MinTrailingZerosCache;
@ -580,7 +585,9 @@ private:
struct ExitLimit {
const SCEV *ExactNotTaken; // The exit is not taken exactly this many times
const SCEV *MaxNotTaken; // The exit is not taken at most this many times
bool MaxOrZero; // Not taken either exactly MaxNotTaken or zero times
// Not taken either exactly MaxNotTaken or zero times
bool MaxOrZero = false;
/// A set of predicate guards for this ExitLimit. The result is only valid
/// if all of the predicates in \c Predicates evaluate to 'true' at
@ -624,15 +631,16 @@ private:
PoisoningVH<BasicBlock> ExitingBlock;
const SCEV *ExactNotTaken;
std::unique_ptr<SCEVUnionPredicate> Predicate;
bool hasAlwaysTruePredicate() const {
return !Predicate || Predicate->isAlwaysTrue();
}
explicit ExitNotTakenInfo(PoisoningVH<BasicBlock> ExitingBlock,
const SCEV *ExactNotTaken,
std::unique_ptr<SCEVUnionPredicate> Predicate)
: ExitingBlock(ExitingBlock), ExactNotTaken(ExactNotTaken),
Predicate(std::move(Predicate)) {}
bool hasAlwaysTruePredicate() const {
return !Predicate || Predicate->isAlwaysTrue();
}
};
/// Information about the backedge-taken count of a loop. This currently
@ -662,12 +670,11 @@ private:
/// @}
public:
BackedgeTakenInfo() : MaxAndComplete(nullptr, 0), MaxOrZero(false) {}
BackedgeTakenInfo() : MaxAndComplete(nullptr, 0) {}
BackedgeTakenInfo(BackedgeTakenInfo &&) = default;
BackedgeTakenInfo &operator=(BackedgeTakenInfo &&) = default;
typedef std::pair<BasicBlock *, ExitLimit> EdgeExitInfo;
using EdgeExitInfo = std::pair<BasicBlock *, ExitLimit>;
/// Initialize BackedgeTakenInfo from a list of exact exit counts.
BackedgeTakenInfo(SmallVectorImpl<EdgeExitInfo> &&ExitCounts, bool Complete,
@ -857,7 +864,6 @@ private:
/// Implementation code for getSCEVAtScope; called at most once for each
/// SCEV+Loop pair.
///
const SCEV *computeSCEVAtScope(const SCEV *S, const Loop *L);
/// This looks up computed SCEV values for all instructions that depend on
@ -936,7 +942,8 @@ private:
const ExitLimit &EL);
};
typedef ExitLimitCache ExitLimitCacheTy;
using ExitLimitCacheTy = ExitLimitCache;
ExitLimit computeExitLimitFromCondCached(ExitLimitCacheTy &Cache,
const Loop *L, Value *ExitCond,
BasicBlock *TBB, BasicBlock *FBB,
@ -1099,7 +1106,6 @@ private:
/// Test if the given expression is known to satisfy the condition described
/// by Pred and the known constant ranges of LHS and RHS.
///
bool isKnownPredicateViaConstantRanges(ICmpInst::Predicate Pred,
const SCEV *LHS, const SCEV *RHS);
@ -1145,7 +1151,6 @@ private:
/// equivalent to proving no signed (resp. unsigned) wrap in
/// {`Start`,+,`Step`} if `ExtendOpTy` is `SCEVSignExtendExpr`
/// (resp. `SCEVZeroExtendExpr`).
///
template <typename ExtendOpTy>
bool proveNoWrapByVaryingStart(const SCEV *Start, const SCEV *Step,
const Loop *L);
@ -1188,8 +1193,8 @@ private:
public:
ScalarEvolution(Function &F, TargetLibraryInfo &TLI, AssumptionCache &AC,
DominatorTree &DT, LoopInfo &LI);
~ScalarEvolution();
ScalarEvolution(ScalarEvolution &&Arg);
~ScalarEvolution();
LLVMContext &getContext() const { return F.getContext(); }
@ -1213,7 +1218,6 @@ public:
/// Return true if the SCEV is a scAddRecExpr or it contains
/// scAddRecExpr. The result will be cached in HasRecMap.
///
bool containsAddRecurrence(const SCEV *S);
/// Return the Value set from which the SCEV expr is generated.
@ -1305,20 +1309,16 @@ public:
const SCEV *getOne(Type *Ty) { return getConstant(Ty, 1); }
/// Return an expression for sizeof AllocTy that is type IntTy
///
const SCEV *getSizeOfExpr(Type *IntTy, Type *AllocTy);
/// Return an expression for offsetof on the given field with type IntTy
///
const SCEV *getOffsetOfExpr(Type *IntTy, StructType *STy, unsigned FieldNo);
/// Return the SCEV object corresponding to -V.
///
const SCEV *getNegativeSCEV(const SCEV *V,
SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap);
/// Return the SCEV object corresponding to ~V.
///
const SCEV *getNotSCEV(const SCEV *V);
/// Return LHS-RHS. Minus is represented in SCEV as A+B*-1.
@ -1446,7 +1446,6 @@ public:
/// Note that it is not valid to call this method on a loop without a
/// loop-invariant backedge-taken count (see
/// hasLoopInvariantBackedgeTakenCount).
///
const SCEV *getBackedgeTakenCount(const Loop *L);
/// Similar to getBackedgeTakenCount, except it will add a set of
@ -1527,28 +1526,22 @@ public:
}
/// Test if the given expression is known to be negative.
///
bool isKnownNegative(const SCEV *S);
/// Test if the given expression is known to be positive.
///
bool isKnownPositive(const SCEV *S);
/// Test if the given expression is known to be non-negative.
///
bool isKnownNonNegative(const SCEV *S);
/// Test if the given expression is known to be non-positive.
///
bool isKnownNonPositive(const SCEV *S);
/// Test if the given expression is known to be non-zero.
///
bool isKnownNonZero(const SCEV *S);
/// Test if the given expression is known to satisfy the condition described
/// by Pred, LHS, and RHS.
///
bool isKnownPredicate(ICmpInst::Predicate Pred, const SCEV *LHS,
const SCEV *RHS);
@ -1578,7 +1571,6 @@ public:
/// iff any changes were made. If the operands are provably equal or
/// unequal, LHS and RHS are set to the same value and Pred is set to either
/// ICMP_EQ or ICMP_NE.
///
bool SimplifyICmpOperands(ICmpInst::Predicate &Pred, const SCEV *&LHS,
const SCEV *&RHS, unsigned Depth = 0);
@ -1784,17 +1776,18 @@ private:
/// The head of a linked list of all SCEVUnknown values that have been
/// allocated. This is used by releaseMemory to locate them all and call
/// their destructors.
SCEVUnknown *FirstUnknown;
SCEVUnknown *FirstUnknown = nullptr;
};
/// Analysis pass that exposes the \c ScalarEvolution for a function.
class ScalarEvolutionAnalysis
: public AnalysisInfoMixin<ScalarEvolutionAnalysis> {
friend AnalysisInfoMixin<ScalarEvolutionAnalysis>;
static AnalysisKey Key;
public:
typedef ScalarEvolution Result;
using Result = ScalarEvolution;
ScalarEvolution run(Function &F, FunctionAnalysisManager &AM);
};
@ -1806,6 +1799,7 @@ class ScalarEvolutionPrinterPass
public:
explicit ScalarEvolutionPrinterPass(raw_ostream &OS) : OS(OS) {}
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};
@ -1843,6 +1837,7 @@ public:
class PredicatedScalarEvolution {
public:
PredicatedScalarEvolution(ScalarEvolution &SE, Loop &L);
const SCEVUnionPredicate &getUnionPredicate() const;
/// Returns the SCEV expression of V, in the context of the current SCEV
@ -1887,7 +1882,7 @@ private:
/// Holds a SCEV and the version number of the SCEV predicate used to
/// perform the rewrite of the expression.
typedef std::pair<unsigned, const SCEV *> RewriteEntry;
using RewriteEntry = std::pair<unsigned, const SCEV *>;
/// Maps a SCEV to the rewrite result of that SCEV at a certain version
/// number. If this number doesn't match the current Generation, we will
@ -1913,11 +1908,12 @@ private:
/// expression we mark it with the version of the predicate. We use this to
/// figure out if the predicate has changed from the last rewrite of the
/// SCEV. If so, we need to perform a new rewrite.
unsigned Generation;
unsigned Generation = 0;
/// The backedge taken count.
const SCEV *BackedgeCount;
const SCEV *BackedgeCount = nullptr;
};
}
#endif
} // end namespace llvm
#endif // LLVM_ANALYSIS_SCALAREVOLUTION_H

View File

@ -14,15 +14,27 @@
#ifndef LLVM_ANALYSIS_SCALAREVOLUTIONEXPRESSIONS_H
#define LLVM_ANALYSIS_SCALAREVOLUTIONEXPRESSIONS_H
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/FoldingSet.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/Analysis/ScalarEvolution.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/Value.h"
#include "llvm/IR/ValueHandle.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/ErrorHandling.h"
#include <cassert>
#include <cstddef>
namespace llvm {
class ConstantInt;
class ConstantRange;
class DominatorTree;
class APInt;
class Constant;
class ConstantRange;
class Loop;
class Type;
enum SCEVTypes {
// These should be ordered in terms of increasing complexity to make the
@ -37,8 +49,10 @@ namespace llvm {
friend class ScalarEvolution;
ConstantInt *V;
SCEVConstant(const FoldingSetNodeIDRef ID, ConstantInt *v) :
SCEV(ID, scConstant), V(v) {}
public:
ConstantInt *getValue() const { return V; }
const APInt &getAPInt() const { return getValue()->getValue(); }
@ -117,7 +131,6 @@ namespace llvm {
}
};
/// This node is a base class providing common functionality for
/// n'ary operators.
class SCEVNAryExpr : public SCEV {
@ -135,13 +148,15 @@ namespace llvm {
public:
size_t getNumOperands() const { return NumOperands; }
const SCEV *getOperand(unsigned i) const {
assert(i < NumOperands && "Operand index out of range!");
return Operands[i];
}
typedef const SCEV *const *op_iterator;
typedef iterator_range<op_iterator> op_range;
using op_iterator = const SCEV *const *;
using op_range = iterator_range<op_iterator>;
op_iterator op_begin() const { return Operands; }
op_iterator op_end() const { return Operands + NumOperands; }
op_range operands() const {
@ -198,15 +213,13 @@ namespace llvm {
}
};
/// This node represents an addition of some number of SCEVs.
class SCEVAddExpr : public SCEVCommutativeExpr {
friend class ScalarEvolution;
SCEVAddExpr(const FoldingSetNodeIDRef ID,
const SCEV *const *O, size_t N)
: SCEVCommutativeExpr(ID, scAddExpr, O, N) {
}
: SCEVCommutativeExpr(ID, scAddExpr, O, N) {}
public:
Type *getType() const {
@ -222,15 +235,13 @@ namespace llvm {
}
};
/// This node represents multiplication of some number of SCEVs.
class SCEVMulExpr : public SCEVCommutativeExpr {
friend class ScalarEvolution;
SCEVMulExpr(const FoldingSetNodeIDRef ID,
const SCEV *const *O, size_t N)
: SCEVCommutativeExpr(ID, scMulExpr, O, N) {
}
: SCEVCommutativeExpr(ID, scMulExpr, O, N) {}
public:
/// Methods for support type inquiry through isa, cast, and dyn_cast:
@ -239,13 +250,13 @@ namespace llvm {
}
};
/// This class represents a binary unsigned division operation.
class SCEVUDivExpr : public SCEV {
friend class ScalarEvolution;
const SCEV *LHS;
const SCEV *RHS;
SCEVUDivExpr(const FoldingSetNodeIDRef ID, const SCEV *lhs, const SCEV *rhs)
: SCEV(ID, scUDivExpr), LHS(lhs), RHS(rhs) {}
@ -268,7 +279,6 @@ namespace llvm {
}
};
/// This node represents a polynomial recurrence on the trip count
/// of the specified loop. This is the primary focus of the
/// ScalarEvolution framework; all the other SCEV subclasses are
@ -368,7 +378,6 @@ namespace llvm {
}
};
/// This class represents an unsigned maximum selection.
class SCEVUMaxExpr : public SCEVCommutativeExpr {
friend class ScalarEvolution;
@ -393,10 +402,6 @@ namespace llvm {
class SCEVUnknown final : public SCEV, private CallbackVH {
friend class ScalarEvolution;
// Implement CallbackVH.
void deleted() override;
void allUsesReplacedWith(Value *New) override;
/// The parent ScalarEvolution value. This is used to update the
/// parent's maps when the value associated with a SCEVUnknown is
/// deleted or RAUW'd.
@ -410,6 +415,10 @@ namespace llvm {
ScalarEvolution *se, SCEVUnknown *next) :
SCEV(ID, scUnknown), CallbackVH(V), SE(se), Next(next) {}
// Implement CallbackVH.
void deleted() override;
void allUsesReplacedWith(Value *New) override;
public:
Value *getValue() const { return getValPtr(); }
@ -490,6 +499,7 @@ namespace llvm {
if (Visited.insert(S).second && Visitor.follow(S))
Worklist.push_back(S);
}
public:
SCEVTraversal(SV& V): Visitor(V) {}
@ -682,7 +692,7 @@ namespace llvm {
}
};
typedef DenseMap<const Value*, Value*> ValueToValueMap;
using ValueToValueMap = DenseMap<const Value *, Value *>;
/// The SCEVParameterRewriter takes a scalar evolution expression and updates
/// the SCEVUnknown components following the Map (Value -> Value).
@ -714,22 +724,22 @@ namespace llvm {
bool InterpretConsts;
};
typedef DenseMap<const Loop*, const SCEV*> LoopToScevMapT;
using LoopToScevMapT = DenseMap<const Loop *, const SCEV *>;
/// The SCEVLoopAddRecRewriter takes a scalar evolution expression and applies
/// the Map (Loop -> SCEV) to all AddRecExprs.
class SCEVLoopAddRecRewriter
: public SCEVRewriteVisitor<SCEVLoopAddRecRewriter> {
public:
SCEVLoopAddRecRewriter(ScalarEvolution &SE, LoopToScevMapT &M)
: SCEVRewriteVisitor(SE), Map(M) {}
static const SCEV *rewrite(const SCEV *Scev, LoopToScevMapT &Map,
ScalarEvolution &SE) {
SCEVLoopAddRecRewriter Rewriter(SE, Map);
return Rewriter.visit(Scev);
}
SCEVLoopAddRecRewriter(ScalarEvolution &SE, LoopToScevMapT &M)
: SCEVRewriteVisitor(SE), Map(M) {}
const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) {
SmallVector<const SCEV *, 2> Operands;
for (int i = 0, e = Expr->getNumOperands(); i < e; ++i)
@ -748,6 +758,7 @@ namespace llvm {
private:
LoopToScevMapT &Map;
};
}
#endif
} // end namespace llvm
#endif // LLVM_ANALYSIS_SCALAREVOLUTIONEXPRESSIONS_H

View File

@ -6,22 +6,27 @@
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
/// \file
/// This is the interface for a metadata-based scoped no-alias analysis.
///
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ANALYSIS_SCOPEDNOALIASAA_H
#define LLVM_ANALYSIS_SCOPEDNOALIASAA_H
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Metadata.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/CallSite.h"
#include "llvm/IR/PassManager.h"
#include "llvm/Pass.h"
#include <memory>
namespace llvm {
class Function;
class MDNode;
class MemoryLocation;
/// A simple AA result which uses scoped-noalias metadata to answer queries.
class ScopedNoAliasAAResult : public AAResultBase<ScopedNoAliasAAResult> {
friend AAResultBase<ScopedNoAliasAAResult>;
@ -46,10 +51,11 @@ private:
/// Analysis pass providing a never-invalidated alias analysis result.
class ScopedNoAliasAA : public AnalysisInfoMixin<ScopedNoAliasAA> {
friend AnalysisInfoMixin<ScopedNoAliasAA>;
static AnalysisKey Key;
public:
typedef ScopedNoAliasAAResult Result;
using Result = ScopedNoAliasAAResult;
ScopedNoAliasAAResult run(Function &F, FunctionAnalysisManager &AM);
};
@ -77,6 +83,7 @@ public:
// scoped noalias analysis.
//
ImmutablePass *createScopedNoAliasAAWrapperPass();
}
#endif
} // end namespace llvm
#endif // LLVM_ANALYSIS_SCOPEDNOALIASAA_H

View File

@ -17,22 +17,22 @@
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/IR/BasicBlock.h"
#include <set>
#include <utility>
#include <vector>
namespace llvm {
class Value;
class Constant;
class Argument;
class BasicBlock;
class Constant;
class Function;
class Instruction;
class PHINode;
class TerminatorInst;
class BasicBlock;
class Function;
class SparseSolver;
class raw_ostream;
class SparseSolver;
class TerminatorInst;
class Value;
template <typename T> class SmallVectorImpl;
/// AbstractLatticeFunction - This class is implemented by the dataflow instance
@ -44,7 +44,7 @@ template <typename T> class SmallVectorImpl;
///
class AbstractLatticeFunction {
public:
typedef void *LatticeVal;
using LatticeVal = void *;
private:
LatticeVal UndefVal, OverdefinedVal, UntrackedVal;
@ -56,6 +56,7 @@ public:
OverdefinedVal = overdefinedVal;
UntrackedVal = untrackedVal;
}
virtual ~AbstractLatticeFunction();
LatticeVal getUndefVal() const { return UndefVal; }
@ -109,9 +110,8 @@ public:
/// SparseSolver - This class is a general purpose solver for Sparse Conditional
/// Propagation with a programmable lattice function.
///
class SparseSolver {
typedef AbstractLatticeFunction::LatticeVal LatticeVal;
using LatticeVal = AbstractLatticeFunction::LatticeVal;
/// LatticeFunc - This is the object that knows the lattice and how to do
/// compute transfer functions.
@ -126,19 +126,17 @@ class SparseSolver {
/// KnownFeasibleEdges - Entries in this set are edges which have already had
/// PHI nodes retriggered.
typedef std::pair<BasicBlock*,BasicBlock*> Edge;
using Edge = std::pair<BasicBlock *, BasicBlock *>;
std::set<Edge> KnownFeasibleEdges;
SparseSolver(const SparseSolver&) = delete;
void operator=(const SparseSolver&) = delete;
public:
explicit SparseSolver(AbstractLatticeFunction *Lattice)
: LatticeFunc(Lattice) {}
SparseSolver(const SparseSolver &) = delete;
SparseSolver &operator=(const SparseSolver &) = delete;
~SparseSolver() { delete LatticeFunc; }
/// Solve - Solve for constants and executable blocks.
///
void Solve(Function &F);
void Print(Function &F, raw_ostream &OS) const;
@ -156,7 +154,6 @@ public:
/// map yet. This function is necessary because not all values should start
/// out in the underdefined state... Arguments should be overdefined, and
/// constants should be marked as constants.
///
LatticeVal getOrInitValueState(Value *V);
/// isEdgeFeasible - Return true if the control flow edge from the 'From'

View File

@ -6,22 +6,28 @@
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
/// \file
/// This is the interface for a metadata-based TBAA. See the source file for
/// details on the algorithm.
///
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ANALYSIS_TYPEBASEDALIASANALYSIS_H
#define LLVM_ANALYSIS_TYPEBASEDALIASANALYSIS_H
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Metadata.h"
#include "llvm/IR/CallSite.h"
#include "llvm/IR/PassManager.h"
#include "llvm/Pass.h"
#include <memory>
namespace llvm {
class Function;
class MDNode;
class MemoryLocation;
/// A simple AA result that uses TBAA metadata to answer queries.
class TypeBasedAAResult : public AAResultBase<TypeBasedAAResult> {
friend AAResultBase<TypeBasedAAResult>;
@ -50,10 +56,11 @@ private:
/// Analysis pass providing a never-invalidated alias analysis result.
class TypeBasedAA : public AnalysisInfoMixin<TypeBasedAA> {
friend AnalysisInfoMixin<TypeBasedAA>;
static AnalysisKey Key;
public:
typedef TypeBasedAAResult Result;
using Result = TypeBasedAAResult;
TypeBasedAAResult run(Function &F, FunctionAnalysisManager &AM);
};
@ -81,6 +88,7 @@ public:
// type-based alias analysis.
//
ImmutablePass *createTypeBasedAAWrapperPass();
}
#endif
} // end namespace llvm
#endif // LLVM_ANALYSIS_TYPEBASEDALIASANALYSIS_H

View File

@ -6,12 +6,16 @@
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
/// \file
/// Implementation of the pointer use visitors.
///
//
//===----------------------------------------------------------------------===//
#include "llvm/Analysis/PtrUseVisitor.h"
#include "llvm/IR/Instruction.h"
#include "llvm/IR/Instructions.h"
#include <algorithm>
using namespace llvm;

View File

@ -59,12 +59,22 @@
//===----------------------------------------------------------------------===//
#include "llvm/Analysis/ScalarEvolution.h"
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DepthFirstIterator.h"
#include "llvm/ADT/FoldingSet.h"
#include "llvm/ADT/None.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/ScopeExit.h"
#include "llvm/ADT/Sequence.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Analysis/AssumptionCache.h"
#include "llvm/Analysis/ConstantFolding.h"
#include "llvm/Analysis/InstructionSimplify.h"
@ -72,28 +82,55 @@
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/IR/Argument.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/CFG.h"
#include "llvm/IR/CallSite.h"
#include "llvm/IR/Constant.h"
#include "llvm/IR/ConstantRange.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/GetElementPtrTypeIterator.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/GlobalAlias.h"
#include "llvm/IR/GlobalValue.h"
#include "llvm/IR/GlobalVariable.h"
#include "llvm/IR/InstIterator.h"
#include "llvm/IR/InstrTypes.h"
#include "llvm/IR/Instruction.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Metadata.h"
#include "llvm/IR/Operator.h"
#include "llvm/IR/PatternMatch.h"
#include "llvm/IR/Type.h"
#include "llvm/IR/Use.h"
#include "llvm/IR/User.h"
#include "llvm/IR/Value.h"
#include "llvm/Pass.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/KnownBits.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/SaveAndRestore.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
#include <cassert>
#include <climits>
#include <cstddef>
#include <cstdint>
#include <cstdlib>
#include <map>
#include <memory>
#include <tuple>
#include <utility>
#include <vector>
using namespace llvm;
#define DEBUG_TYPE "scalar-evolution"
@ -736,7 +773,6 @@ static int CompareSCEVComplexity(
/// results from this routine. In other words, we don't want the results of
/// this to depend on where the addresses of various SCEV objects happened to
/// land in memory.
///
static void GroupByComplexity(SmallVectorImpl<const SCEV *> &Ops,
LoopInfo *LI, DominatorTree &DT) {
if (Ops.size() < 2) return; // Noop
@ -782,14 +818,16 @@ static void GroupByComplexity(SmallVectorImpl<const SCEV *> &Ops,
// Returns the size of the SCEV S.
static inline int sizeOfSCEV(const SCEV *S) {
struct FindSCEVSize {
int Size;
FindSCEVSize() : Size(0) {}
int Size = 0;
FindSCEVSize() = default;
bool follow(const SCEV *S) {
++Size;
// Keep looking at all operands of S.
return true;
}
bool isDone() const {
return false;
}
@ -1029,7 +1067,7 @@ private:
const SCEV *Denominator, *Quotient, *Remainder, *Zero, *One;
};
}
} // end anonymous namespace
//===----------------------------------------------------------------------===//
// Simple SCEV method implementations
@ -1154,7 +1192,6 @@ static const SCEV *BinomialCoefficient(const SCEV *It, unsigned K,
/// A*BC(It, 0) + B*BC(It, 1) + C*BC(It, 2) + D*BC(It, 3)
///
/// where BC(It, k) stands for binomial coefficient.
///
const SCEV *SCEVAddRecExpr::evaluateAtIteration(const SCEV *It,
ScalarEvolution &SE) const {
const SCEV *Result = getStart();
@ -1340,7 +1377,8 @@ struct ExtendOpTraits<SCEVZeroExtendExpr> : public ExtendOpTraitsBase {
const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits<
SCEVZeroExtendExpr>::GetExtendExpr = &ScalarEvolution::getZeroExtendExpr;
}
} // end anonymous namespace
// The recurrence AR has been shown to have no signed/unsigned wrap or something
// close to it. Typically, if we can prove NSW/NUW for AR, then we can just as
@ -1470,7 +1508,6 @@ static const SCEV *getExtendAddRecStart(const SCEVAddRecExpr *AR, Type *Ty,
//
// In the current context, S is `Start`, X is `Step`, Ext is `ExtendOpTy` and T
// is `Delta` (defined below).
//
template <typename ExtendOpTy>
bool ScalarEvolution::proveNoWrapByVaryingStart(const SCEV *Start,
const SCEV *Step,
@ -1481,7 +1518,6 @@ bool ScalarEvolution::proveNoWrapByVaryingStart(const SCEV *Start,
// time here. It is correct (but more expensive) to continue with a
// non-constant `Start` and do a general SCEV subtraction to compute
// `PreStart` below.
//
const SCEVConstant *StartC = dyn_cast<SCEVConstant>(Start);
if (!StartC)
return false;
@ -1983,7 +2019,6 @@ ScalarEvolution::getSignExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth) {
/// getAnyExtendExpr - Return a SCEV for the given operand extended with
/// unspecified bits out to the given type.
///
const SCEV *ScalarEvolution::getAnyExtendExpr(const SCEV *Op,
Type *Ty) {
assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
@ -2054,7 +2089,6 @@ const SCEV *ScalarEvolution::getAnyExtendExpr(const SCEV *Op,
/// may be exposed. This helps getAddRecExpr short-circuit extra work in
/// the common case where no interesting opportunities are present, and
/// is also used as a check to avoid infinite recursion.
///
static bool
CollectAddOperandsWithScales(DenseMap<const SCEV *, APInt> &M,
SmallVectorImpl<const SCEV *> &NewOps,
@ -2129,7 +2163,8 @@ StrengthenNoWrapFlags(ScalarEvolution *SE, SCEVTypes Type,
const SmallVectorImpl<const SCEV *> &Ops,
SCEV::NoWrapFlags Flags) {
using namespace std::placeholders;
typedef OverflowingBinaryOperator OBO;
using OBO = OverflowingBinaryOperator;
bool CanAnalyze =
Type == scAddExpr || Type == scAddRecExpr || Type == scMulExpr;
@ -2684,6 +2719,7 @@ static bool containsConstantInAddMulChain(const SCEV *StartExpr) {
FoundConstant |= isa<SCEVConstant>(S);
return isa<SCEVAddExpr>(S) || isa<SCEVMulExpr>(S);
}
bool isDone() const {
return FoundConstant;
}
@ -3718,7 +3754,6 @@ const SCEV *ScalarEvolution::getExistingSCEV(Value *V) {
}
/// Return a SCEV corresponding to -V = -1*V
///
const SCEV *ScalarEvolution::getNegativeSCEV(const SCEV *V,
SCEV::NoWrapFlags Flags) {
if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V))
@ -3961,8 +3996,12 @@ void ScalarEvolution::forgetSymbolicName(Instruction *PN, const SCEV *SymName) {
}
namespace {
class SCEVInitRewriter : public SCEVRewriteVisitor<SCEVInitRewriter> {
public:
SCEVInitRewriter(const Loop *L, ScalarEvolution &SE)
: SCEVRewriteVisitor(SE), L(L) {}
static const SCEV *rewrite(const SCEV *S, const Loop *L,
ScalarEvolution &SE) {
SCEVInitRewriter Rewriter(L, SE);
@ -3970,9 +4009,6 @@ public:
return Rewriter.isValid() ? Result : SE.getCouldNotCompute();
}
SCEVInitRewriter(const Loop *L, ScalarEvolution &SE)
: SCEVRewriteVisitor(SE), L(L), Valid(true) {}
const SCEV *visitUnknown(const SCEVUnknown *Expr) {
if (!SE.isLoopInvariant(Expr, L))
Valid = false;
@ -3991,11 +4027,14 @@ public:
private:
const Loop *L;
bool Valid;
bool Valid = true;
};
class SCEVShiftRewriter : public SCEVRewriteVisitor<SCEVShiftRewriter> {
public:
SCEVShiftRewriter(const Loop *L, ScalarEvolution &SE)
: SCEVRewriteVisitor(SE), L(L) {}
static const SCEV *rewrite(const SCEV *S, const Loop *L,
ScalarEvolution &SE) {
SCEVShiftRewriter Rewriter(L, SE);
@ -4003,9 +4042,6 @@ public:
return Rewriter.isValid() ? Result : SE.getCouldNotCompute();
}
SCEVShiftRewriter(const Loop *L, ScalarEvolution &SE)
: SCEVRewriteVisitor(SE), L(L), Valid(true) {}
const SCEV *visitUnknown(const SCEVUnknown *Expr) {
// Only allow AddRecExprs for this loop.
if (!SE.isLoopInvariant(Expr, L))
@ -4019,12 +4055,14 @@ public:
Valid = false;
return Expr;
}
bool isValid() { return Valid; }
private:
const Loop *L;
bool Valid;
bool Valid = true;
};
} // end anonymous namespace
SCEV::NoWrapFlags
@ -4032,7 +4070,8 @@ ScalarEvolution::proveNoWrapViaConstantRanges(const SCEVAddRecExpr *AR) {
if (!AR->isAffine())
return SCEV::FlagAnyWrap;
typedef OverflowingBinaryOperator OBO;
using OBO = OverflowingBinaryOperator;
SCEV::NoWrapFlags Result = SCEV::FlagAnyWrap;
if (!AR->hasNoSignedWrap()) {
@ -4059,6 +4098,7 @@ ScalarEvolution::proveNoWrapViaConstantRanges(const SCEVAddRecExpr *AR) {
}
namespace {
/// Represents an abstract binary operation. This may exist as a
/// normal instruction or constant expression, or may have been
/// derived from an expression tree.
@ -4066,16 +4106,16 @@ struct BinaryOp {
unsigned Opcode;
Value *LHS;
Value *RHS;
bool IsNSW;
bool IsNUW;
bool IsNSW = false;
bool IsNUW = false;
/// Op is set if this BinaryOp corresponds to a concrete LLVM instruction or
/// constant expression.
Operator *Op;
Operator *Op = nullptr;
explicit BinaryOp(Operator *Op)
: Opcode(Op->getOpcode()), LHS(Op->getOperand(0)), RHS(Op->getOperand(1)),
IsNSW(false), IsNUW(false), Op(Op) {
Op(Op) {
if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(Op)) {
IsNSW = OBO->hasNoSignedWrap();
IsNUW = OBO->hasNoUnsignedWrap();
@ -4084,11 +4124,10 @@ struct BinaryOp {
explicit BinaryOp(unsigned Opcode, Value *LHS, Value *RHS, bool IsNSW = false,
bool IsNUW = false)
: Opcode(Opcode), LHS(LHS), RHS(RHS), IsNSW(IsNSW), IsNUW(IsNUW),
Op(nullptr) {}
: Opcode(Opcode), LHS(LHS), RHS(RHS), IsNSW(IsNSW), IsNUW(IsNUW) {}
};
}
} // end anonymous namespace
/// Try to map \p V into a BinaryOp, and return \c None on failure.
static Optional<BinaryOp> MatchBinaryOp(Value *V, DominatorTree &DT) {
@ -4149,7 +4188,7 @@ static Optional<BinaryOp> MatchBinaryOp(Value *V, DominatorTree &DT) {
if (auto *F = CI->getCalledFunction())
switch (F->getIntrinsicID()) {
case Intrinsic::sadd_with_overflow:
case Intrinsic::uadd_with_overflow: {
case Intrinsic::uadd_with_overflow:
if (!isOverflowIntrinsicNoWrap(cast<IntrinsicInst>(CI), DT))
return BinaryOp(Instruction::Add, CI->getArgOperand(0),
CI->getArgOperand(1));
@ -4165,10 +4204,8 @@ static Optional<BinaryOp> MatchBinaryOp(Value *V, DominatorTree &DT) {
return BinaryOp(Instruction::Add, CI->getArgOperand(0),
CI->getArgOperand(1), /* IsNSW = */ false,
/* IsNUW*/ true);
}
case Intrinsic::ssub_with_overflow:
case Intrinsic::usub_with_overflow: {
case Intrinsic::usub_with_overflow:
if (!isOverflowIntrinsicNoWrap(cast<IntrinsicInst>(CI), DT))
return BinaryOp(Instruction::Sub, CI->getArgOperand(0),
CI->getArgOperand(1));
@ -4182,7 +4219,6 @@ static Optional<BinaryOp> MatchBinaryOp(Value *V, DominatorTree &DT) {
return BinaryOp(Instruction::Sub, CI->getArgOperand(0),
CI->getArgOperand(1), /* IsNSW = */ false,
/* IsNUW = */ true);
}
case Intrinsic::smul_with_overflow:
case Intrinsic::umul_with_overflow:
return BinaryOp(Instruction::Mul, CI->getArgOperand(0),
@ -4212,7 +4248,6 @@ static Optional<BinaryOp> MatchBinaryOp(Value *V, DominatorTree &DT) {
/// \p Signed to true/false, respectively.
static Type *isSimpleCastedPHI(const SCEV *Op, const SCEVUnknown *SymbolicPHI,
bool &Signed, ScalarEvolution &SE) {
// The case where Op == SymbolicPHI (that is, with no type conversions on
// the way) is handled by the regular add recurrence creating logic and
// would have already been triggered in createAddRecForPHI. Reaching it here
@ -4243,7 +4278,7 @@ static Type *isSimpleCastedPHI(const SCEV *Op, const SCEVUnknown *SymbolicPHI,
const SCEV *X = Trunc->getOperand();
if (X != SymbolicPHI)
return nullptr;
Signed = SExt ? true : false;
Signed = SExt != nullptr;
return Trunc->getType();
}
@ -4309,7 +4344,6 @@ static const Loop *isIntegerLoopHeaderPHI(const PHINode *PN, LoopInfo &LI) {
// which correspond to a phi->trunc->add->sext/zext->phi update chain.
//
// 3) Outline common code with createAddRecFromPHI to avoid duplication.
//
Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>>
ScalarEvolution::createAddRecFromPHIWithCastsImpl(const SCEVUnknown *SymbolicPHI) {
SmallVector<const SCEVPredicate *, 3> Predicates;
@ -4319,7 +4353,7 @@ ScalarEvolution::createAddRecFromPHIWithCastsImpl(const SCEVUnknown *SymbolicPHI
auto *PN = cast<PHINode>(SymbolicPHI->getValue());
const Loop *L = isIntegerLoopHeaderPHI(PN, LI);
assert (L && "Expecting an integer loop header phi");
assert(L && "Expecting an integer loop header phi");
// The loop may have multiple entrances or multiple exits; we can analyze
// this phi as an addrec if it has a unique entry value and a unique
@ -4380,7 +4414,6 @@ ScalarEvolution::createAddRecFromPHIWithCastsImpl(const SCEVUnknown *SymbolicPHI
// varying inside the loop.
if (!isLoopInvariant(Accum, L))
return None;
// *** Part2: Create the predicates
@ -4447,7 +4480,7 @@ ScalarEvolution::createAddRecFromPHIWithCastsImpl(const SCEVUnknown *SymbolicPHI
// Create the Equal Predicates P2,P3:
auto AppendPredicate = [&](const SCEV *Expr) -> void {
assert (isLoopInvariant(Expr, L) && "Expr is expected to be invariant");
assert(isLoopInvariant(Expr, L) && "Expr is expected to be invariant");
const SCEV *TruncatedExpr = getTruncateExpr(Expr, TruncTy);
const SCEV *ExtendedExpr =
Signed ? getSignExtendExpr(TruncatedExpr, Expr->getType())
@ -4478,7 +4511,6 @@ ScalarEvolution::createAddRecFromPHIWithCastsImpl(const SCEVUnknown *SymbolicPHI
Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>>
ScalarEvolution::createAddRecFromPHIWithCasts(const SCEVUnknown *SymbolicPHI) {
auto *PN = cast<PHINode>(SymbolicPHI->getValue());
const Loop *L = isIntegerLoopHeaderPHI(PN, LI);
if (!L)
@ -5614,7 +5646,7 @@ bool ScalarEvolution::isAddRecNeverPoison(const Instruction *I, const Loop *L) {
ScalarEvolution::LoopProperties
ScalarEvolution::getLoopProperties(const Loop *L) {
typedef ScalarEvolution::LoopProperties LoopProperties;
using LoopProperties = ScalarEvolution::LoopProperties;
auto Itr = LoopPropertiesCache.find(L);
if (Itr == LoopPropertiesCache.end()) {
@ -5901,7 +5933,7 @@ const SCEV *ScalarEvolution::createSCEV(Value *V) {
}
break;
case Instruction::AShr:
case Instruction::AShr: {
// AShr X, C, where C is a constant.
ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS);
if (!CI)
@ -5953,6 +5985,7 @@ const SCEV *ScalarEvolution::createSCEV(Value *V) {
}
break;
}
}
}
switch (U->getOpcode()) {
@ -6017,8 +6050,6 @@ const SCEV *ScalarEvolution::createSCEV(Value *V) {
return getUnknown(V);
}
//===----------------------------------------------------------------------===//
// Iteration Count Computation Code
//
@ -6413,7 +6444,7 @@ bool ScalarEvolution::BackedgeTakenInfo::hasOperand(const SCEV *S,
}
ScalarEvolution::ExitLimit::ExitLimit(const SCEV *E)
: ExactNotTaken(E), MaxNotTaken(E), MaxOrZero(false) {
: ExactNotTaken(E), MaxNotTaken(E) {
assert((isa<SCEVCouldNotCompute>(MaxNotTaken) ||
isa<SCEVConstant>(MaxNotTaken)) &&
"No point in having a non-constant max backedge taken count!");
@ -6458,7 +6489,8 @@ ScalarEvolution::BackedgeTakenInfo::BackedgeTakenInfo(
&&ExitCounts,
bool Complete, const SCEV *MaxCount, bool MaxOrZero)
: MaxAndComplete(MaxCount, Complete), MaxOrZero(MaxOrZero) {
typedef ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo EdgeExitInfo;
using EdgeExitInfo = ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo;
ExitNotTaken.reserve(ExitCounts.size());
std::transform(
ExitCounts.begin(), ExitCounts.end(), std::back_inserter(ExitNotTaken),
@ -6490,7 +6522,7 @@ ScalarEvolution::computeBackedgeTakenCount(const Loop *L,
SmallVector<BasicBlock *, 8> ExitingBlocks;
L->getExitingBlocks(ExitingBlocks);
typedef ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo EdgeExitInfo;
using EdgeExitInfo = ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo;
SmallVector<EdgeExitInfo, 4> ExitCounts;
bool CouldComputeBECount = true;
@ -6570,7 +6602,6 @@ ScalarEvolution::computeExitLimit(const Loop *L, BasicBlock *ExitingBlock,
ScalarEvolution::ExitLimit
ScalarEvolution::computeExitLimitImpl(const Loop *L, BasicBlock *ExitingBlock,
bool AllowPredicates) {
// Okay, we've chosen an exiting block. See what condition causes us to exit
// at this block and remember the exit block and whether all other targets
// lead to the loop header.
@ -6833,7 +6864,6 @@ ScalarEvolution::computeExitLimitFromICmp(const Loop *L,
BasicBlock *FBB,
bool ControlsExit,
bool AllowPredicates) {
// If the condition was exit on true, convert the condition to exit on false
ICmpInst::Predicate Cond;
if (!L->contains(FBB))
@ -6968,7 +6998,6 @@ ScalarEvolution::computeLoadConstantCompareExitLimit(
Constant *RHS,
const Loop *L,
ICmpInst::Predicate predicate) {
if (LI->isVolatile()) return getCouldNotCompute();
// Check to see if the loaded pointer is a getelementptr of a global.
@ -7887,7 +7916,6 @@ static const SCEV *SolveLinEquationWithOverflow(const APInt &A, const SCEV *B,
/// Find the roots of the quadratic equation for the given quadratic chrec
/// {L,+,M,+,N}. This returns either the two roots (which might be the same) or
/// two SCEVCouldNotCompute objects.
///
static Optional<std::pair<const SCEVConstant *,const SCEVConstant *>>
SolveQuadraticEquation(const SCEVAddRecExpr *AddRec, ScalarEvolution &SE) {
assert(AddRec->getNumOperands() == 3 && "This is not a quadratic chrec!");
@ -8128,7 +8156,6 @@ ScalarEvolution::getPredecessorWithUniqueSuccessorForBB(BasicBlock *BB) {
/// expressions are equal, however for the purposes of looking for a condition
/// guarding a loop, it can be useful to be a little more general, since a
/// front-end may have replicated the controlling expression.
///
static bool HasSameValue(const SCEV *A, const SCEV *B) {
// Quick check to see if they are the same SCEV.
if (A == B) return true;
@ -8575,7 +8602,6 @@ bool ScalarEvolution::isKnownPredicateViaConstantRanges(
bool ScalarEvolution::isKnownPredicateViaNoOverflow(ICmpInst::Predicate Pred,
const SCEV *LHS,
const SCEV *RHS) {
// Match Result to (X + Y)<ExpectedFlags> where Y is a constant integer.
// Return Y via OutY.
auto MatchBinaryAddToConst =
@ -8741,7 +8767,6 @@ ScalarEvolution::isLoopBackedgeGuardedByCond(const Loop *L,
for (DomTreeNode *DTN = DT[Latch], *HeaderDTN = DT[L->getHeader()];
DTN != HeaderDTN; DTN = DTN->getIDom()) {
assert(DTN && "should reach the loop header before reaching the root!");
BasicBlock *BB = DTN->getBlock();
@ -9164,7 +9189,6 @@ bool ScalarEvolution::isImpliedCondOperands(ICmpInst::Predicate Pred,
getNotSCEV(FoundLHS));
}
/// If Expr computes ~A, return A else return nullptr
static const SCEV *MatchNotExpr(const SCEV *Expr) {
const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Expr);
@ -9180,7 +9204,6 @@ static const SCEV *MatchNotExpr(const SCEV *Expr) {
return AddRHS->getOperand(1);
}
/// Is MaybeMaxExpr an SMax or UMax of Candidate and some other values?
template<typename MaxExprType>
static bool IsMaxConsistingOf(const SCEV *MaybeMaxExpr,
@ -9191,7 +9214,6 @@ static bool IsMaxConsistingOf(const SCEV *MaybeMaxExpr,
return find(MaxExpr->operands(), Candidate) != MaxExpr->op_end();
}
/// Is MaybeMinExpr an SMin or UMin of Candidate and some other values?
template<typename MaxExprType>
static bool IsMinConsistingOf(ScalarEvolution &SE,
@ -9207,7 +9229,6 @@ static bool IsMinConsistingOf(ScalarEvolution &SE,
static bool IsKnownPredicateViaAddRecStart(ScalarEvolution &SE,
ICmpInst::Predicate Pred,
const SCEV *LHS, const SCEV *RHS) {
// If both sides are affine addrecs for the same loop, with equal
// steps, and we know the recurrences don't wrap, then we only
// need to check the predicate on the starting values.
@ -9343,7 +9364,9 @@ bool ScalarEvolution::isImpliedViaOperations(ICmpInst::Predicate Pred,
} else if (auto *LHSUnknownExpr = dyn_cast<SCEVUnknown>(LHS)) {
Value *LL, *LR;
// FIXME: Once we have SDiv implemented, we can get rid of this matching.
using namespace llvm::PatternMatch;
if (match(LHSUnknownExpr->getValue(), m_SDiv(m_Value(LL), m_Value(LR)))) {
// Rules for division.
// We are going to perform some comparisons with Denominator and its
@ -9636,7 +9659,6 @@ ScalarEvolution::howManyLessThans(const SCEV *LHS, const SCEV *RHS,
if (PredicatedIV || !NoWrap || isKnownNonPositive(Stride) ||
!loopHasNoSideEffects(L))
return getCouldNotCompute();
} else if (!Stride->isOne() &&
doesIVOverflowOnLT(RHS, Stride, IsSigned, NoWrap))
// Avoid proven overflow cases: this will ensure that the backedge taken
@ -9922,6 +9944,7 @@ static inline bool containsUndefs(const SCEV *S) {
}
namespace {
// Collect all steps of SCEV expressions.
struct SCEVCollectStrides {
ScalarEvolution &SE;
@ -9935,6 +9958,7 @@ struct SCEVCollectStrides {
Strides.push_back(AR->getStepRecurrence(SE));
return true;
}
bool isDone() const { return false; }
};
@ -9942,8 +9966,7 @@ struct SCEVCollectStrides {
struct SCEVCollectTerms {
SmallVectorImpl<const SCEV *> &Terms;
SCEVCollectTerms(SmallVectorImpl<const SCEV *> &T)
: Terms(T) {}
SCEVCollectTerms(SmallVectorImpl<const SCEV *> &T) : Terms(T) {}
bool follow(const SCEV *S) {
if (isa<SCEVUnknown>(S) || isa<SCEVMulExpr>(S) ||
@ -9958,6 +9981,7 @@ struct SCEVCollectTerms {
// Keep looking.
return true;
}
bool isDone() const { return false; }
};
@ -9966,7 +9990,7 @@ struct SCEVHasAddRec {
bool &ContainsAddRec;
SCEVHasAddRec(bool &ContainsAddRec) : ContainsAddRec(ContainsAddRec) {
ContainsAddRec = false;
ContainsAddRec = false;
}
bool follow(const SCEV *S) {
@ -9980,6 +10004,7 @@ struct SCEVHasAddRec {
// Keep looking.
return true;
}
bool isDone() const { return false; }
};
@ -10033,9 +10058,11 @@ struct SCEVCollectAddRecMultiplies {
// Keep looking.
return true;
}
bool isDone() const { return false; }
};
}
} // end anonymous namespace
/// Find parametric terms in this SCEVAddRecExpr. We first for parameters in
/// two places:
@ -10114,7 +10141,6 @@ static bool findArrayDimensionsRec(ScalarEvolution &SE,
return true;
}
// Returns true when one of the SCEVs of Terms contains a SCEVUnknown parameter.
static inline bool containsParameters(SmallVectorImpl<const SCEV *> &Terms) {
for (const SCEV *T : Terms)
@ -10229,7 +10255,6 @@ void ScalarEvolution::findArrayDimensions(SmallVectorImpl<const SCEV *> &Terms,
void ScalarEvolution::computeAccessFunctions(
const SCEV *Expr, SmallVectorImpl<const SCEV *> &Subscripts,
SmallVectorImpl<const SCEV *> &Sizes) {
// Early exit in case this SCEV is not an affine multivariate function.
if (Sizes.empty())
return;
@ -10333,7 +10358,6 @@ void ScalarEvolution::computeAccessFunctions(
/// DelinearizationPass that walks through all loads and stores of a function
/// asking for the SCEV of the memory access with respect to all enclosing
/// loops, calling SCEV->delinearize on that and printing the results.
void ScalarEvolution::delinearize(const SCEV *Expr,
SmallVectorImpl<const SCEV *> &Subscripts,
SmallVectorImpl<const SCEV *> &Sizes,
@ -10422,11 +10446,8 @@ ScalarEvolution::ScalarEvolution(Function &F, TargetLibraryInfo &TLI,
AssumptionCache &AC, DominatorTree &DT,
LoopInfo &LI)
: F(F), TLI(TLI), AC(AC), DT(DT), LI(LI),
CouldNotCompute(new SCEVCouldNotCompute()),
WalkingBEDominatingConds(false), ProvingSplitPredicate(false),
ValuesAtScopes(64), LoopDispositions(64), BlockDispositions(64),
FirstUnknown(nullptr) {
CouldNotCompute(new SCEVCouldNotCompute()), ValuesAtScopes(64),
LoopDispositions(64), BlockDispositions(64) {
// To use guards for proving predicates, we need to scan every instruction in
// relevant basic blocks, and not just terminators. Doing this is a waste of
// time if the IR does not actually contain any calls to
@ -10447,7 +10468,6 @@ ScalarEvolution::ScalarEvolution(ScalarEvolution &&Arg)
LI(Arg.LI), CouldNotCompute(std::move(Arg.CouldNotCompute)),
ValueExprMap(std::move(Arg.ValueExprMap)),
PendingLoopPredicates(std::move(Arg.PendingLoopPredicates)),
WalkingBEDominatingConds(false), ProvingSplitPredicate(false),
MinTrailingZerosCache(std::move(Arg.MinTrailingZerosCache)),
BackedgeTakenCounts(std::move(Arg.BackedgeTakenCounts)),
PredicatedBackedgeTakenCounts(
@ -10914,9 +10934,12 @@ void ScalarEvolution::verify() const {
// Map's SCEV expressions from one ScalarEvolution "universe" to another.
struct SCEVMapper : public SCEVRewriteVisitor<SCEVMapper> {
SCEVMapper(ScalarEvolution &SE) : SCEVRewriteVisitor<SCEVMapper>(SE) {}
const SCEV *visitConstant(const SCEVConstant *Constant) {
return SE.getConstant(Constant->getAPInt());
}
const SCEV *visitUnknown(const SCEVUnknown *Expr) {
return SE.getUnknown(Expr->getValue());
}
@ -10924,7 +10947,6 @@ void ScalarEvolution::verify() const {
const SCEV *visitCouldNotCompute(const SCEVCouldNotCompute *Expr) {
return SE.getCouldNotCompute();
}
SCEVMapper(ScalarEvolution &SE) : SCEVRewriteVisitor<SCEVMapper>(SE) {}
};
SCEVMapper SCM(SE2);
@ -11013,6 +11035,7 @@ INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
INITIALIZE_PASS_END(ScalarEvolutionWrapperPass, "scalar-evolution",
"Scalar Evolution Analysis", false, true)
char ScalarEvolutionWrapperPass::ID = 0;
ScalarEvolutionWrapperPass::ScalarEvolutionWrapperPass() : FunctionPass(ID) {
@ -11088,6 +11111,11 @@ namespace {
class SCEVPredicateRewriter : public SCEVRewriteVisitor<SCEVPredicateRewriter> {
public:
SCEVPredicateRewriter(const Loop *L, ScalarEvolution &SE,
SmallPtrSetImpl<const SCEVPredicate *> *NewPreds,
SCEVUnionPredicate *Pred)
: SCEVRewriteVisitor(SE), NewPreds(NewPreds), Pred(Pred), L(L) {}
/// Rewrites \p S in the context of a loop L and the SCEV predication
/// infrastructure.
///
@ -11103,11 +11131,6 @@ public:
return Rewriter.visit(S);
}
SCEVPredicateRewriter(const Loop *L, ScalarEvolution &SE,
SmallPtrSetImpl<const SCEVPredicate *> *NewPreds,
SCEVUnionPredicate *Pred)
: SCEVRewriteVisitor(SE), NewPreds(NewPreds), Pred(Pred), L(L) {}
const SCEV *visitUnknown(const SCEVUnknown *Expr) {
if (Pred) {
auto ExprPreds = Pred->getPredicatesForExpr(Expr);
@ -11191,6 +11214,7 @@ private:
SCEVUnionPredicate *Pred;
const Loop *L;
};
} // end anonymous namespace
const SCEV *ScalarEvolution::rewriteUsingPredicate(const SCEV *S, const Loop *L,
@ -11201,7 +11225,6 @@ const SCEV *ScalarEvolution::rewriteUsingPredicate(const SCEV *S, const Loop *L,
const SCEVAddRecExpr *ScalarEvolution::convertSCEVToAddRecWithPredicates(
const SCEV *S, const Loop *L,
SmallPtrSetImpl<const SCEVPredicate *> &Preds) {
SmallPtrSet<const SCEVPredicate *, 4> TransformPreds;
S = SCEVPredicateRewriter::rewrite(S, L, *this, &TransformPreds, nullptr);
auto *AddRec = dyn_cast<SCEVAddRecExpr>(S);
@ -11357,7 +11380,7 @@ void SCEVUnionPredicate::add(const SCEVPredicate *N) {
PredicatedScalarEvolution::PredicatedScalarEvolution(ScalarEvolution &SE,
Loop &L)
: SE(SE), L(L), Generation(0), BackedgeCount(nullptr) {}
: SE(SE), L(L) {}
const SCEV *PredicatedScalarEvolution::getSCEV(Value *V) {
const SCEV *Expr = SE.getSCEV(V);

View File

@ -34,11 +34,12 @@
#include "llvm/Analysis/ScopedNoAliasAA.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/IR/Constants.h"
#include "llvm/Analysis/MemoryLocation.h"
#include "llvm/IR/Instruction.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Metadata.h"
#include "llvm/IR/Module.h"
#include "llvm/Pass.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/CommandLine.h"
using namespace llvm;
@ -50,14 +51,15 @@ static cl::opt<bool> EnableScopedNoAlias("enable-scoped-noalias",
cl::init(true));
namespace {
/// This is a simple wrapper around an MDNode which provides a higher-level
/// interface by hiding the details of how alias analysis information is encoded
/// in its operands.
class AliasScopeNode {
const MDNode *Node;
const MDNode *Node = nullptr;
public:
AliasScopeNode() : Node(nullptr) {}
AliasScopeNode() = default;
explicit AliasScopeNode(const MDNode *N) : Node(N) {}
/// Get the MDNode for this AliasScopeNode.
@ -70,7 +72,8 @@ public:
return dyn_cast_or_null<MDNode>(Node->getOperand(1));
}
};
} // end of anonymous namespace
} // end anonymous namespace
AliasResult ScopedNoAliasAAResult::alias(const MemoryLocation &LocA,
const MemoryLocation &LocB) {
@ -181,6 +184,7 @@ ScopedNoAliasAAResult ScopedNoAliasAA::run(Function &F,
}
char ScopedNoAliasAAWrapperPass::ID = 0;
INITIALIZE_PASS(ScopedNoAliasAAWrapperPass, "scoped-noalias",
"Scoped NoAlias Alias Analysis", false, true)

View File

@ -13,11 +13,21 @@
//===----------------------------------------------------------------------===//
#include "llvm/Analysis/SparsePropagation.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/IR/Argument.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/Constant.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/InstrTypes.h"
#include "llvm/IR/Instruction.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/User.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
#define DEBUG_TYPE "sparseprop"
@ -26,7 +36,7 @@ using namespace llvm;
// AbstractLatticeFunction Implementation
//===----------------------------------------------------------------------===//
AbstractLatticeFunction::~AbstractLatticeFunction() {}
AbstractLatticeFunction::~AbstractLatticeFunction() = default;
/// PrintValue - Render the specified lattice value to the specified stream.
void AbstractLatticeFunction::PrintValue(LatticeVal V, raw_ostream &OS) {
@ -49,7 +59,6 @@ void AbstractLatticeFunction::PrintValue(LatticeVal V, raw_ostream &OS) {
/// map yet. This function is necessary because not all values should start
/// out in the underdefined state... Arguments should be overdefined, and
/// constants should be marked as constants.
///
SparseSolver::LatticeVal SparseSolver::getOrInitValueState(Value *V) {
DenseMap<Value*, LatticeVal>::iterator I = ValueState.find(V);
if (I != ValueState.end()) return I->second; // Common case, in the map
@ -109,13 +118,11 @@ void SparseSolver::markEdgeExecutable(BasicBlock *Source, BasicBlock *Dest) {
// because they have potentially new operands.
for (BasicBlock::iterator I = Dest->begin(); isa<PHINode>(I); ++I)
visitPHINode(*cast<PHINode>(I));
} else {
MarkBlockExecutable(Dest);
}
}
/// getFeasibleSuccessors - Return a vector of booleans to indicate which
/// successors are reachable from a given terminator instruction.
void SparseSolver::getFeasibleSuccessors(TerminatorInst &TI,
@ -199,7 +206,6 @@ void SparseSolver::getFeasibleSuccessors(TerminatorInst &TI,
Succs[Case.getSuccessorIndex()] = true;
}
/// isEdgeFeasible - Return true if the control flow edge from the 'From'
/// basic block to the 'To' basic block is currently feasible...
bool SparseSolver::isEdgeFeasible(BasicBlock *From, BasicBlock *To,
@ -273,7 +279,6 @@ void SparseSolver::visitPHINode(PHINode &PN) {
UpdateState(PN, PNIV);
}
void SparseSolver::visitInst(Instruction &I) {
// PHIs are handled by the propagation logic, they are never passed into the
// transfer functions.
@ -344,4 +349,3 @@ void SparseSolver::Print(Function &F, raw_ostream &OS) const {
OS << "\n";
}
}

View File

@ -123,10 +123,20 @@
#include "llvm/Analysis/TypeBasedAliasAnalysis.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/MemoryLocation.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Instruction.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Metadata.h"
#include "llvm/Pass.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/ErrorHandling.h"
#include <cassert>
#include <cstdint>
using namespace llvm;
// A handy option for disabling TBAA functionality. The same effect can also be
@ -135,15 +145,16 @@ using namespace llvm;
static cl::opt<bool> EnableTBAA("enable-tbaa", cl::init(true));
namespace {
/// This is a simple wrapper around an MDNode which provides a higher-level
/// interface by hiding the details of how alias analysis information is encoded
/// in its operands.
template<typename MDNodeTy>
class TBAANodeImpl {
MDNodeTy *Node;
MDNodeTy *Node = nullptr;
public:
TBAANodeImpl() : Node(nullptr) {}
TBAANodeImpl() = default;
explicit TBAANodeImpl(MDNodeTy *N) : Node(N) {}
/// getNode - Get the MDNode for this TBAANode.
@ -176,8 +187,8 @@ public:
/// \name Specializations of \c TBAANodeImpl for const and non const qualified
/// \c MDNode.
/// @{
typedef TBAANodeImpl<const MDNode> TBAANode;
typedef TBAANodeImpl<MDNode> MutableTBAANode;
using TBAANode = TBAANodeImpl<const MDNode>;
using MutableTBAANode = TBAANodeImpl<MDNode>;
/// @}
/// This is a simple wrapper around an MDNode which provides a
@ -197,12 +208,15 @@ public:
MDNodeTy *getBaseType() const {
return dyn_cast_or_null<MDNode>(Node->getOperand(0));
}
MDNodeTy *getAccessType() const {
return dyn_cast_or_null<MDNode>(Node->getOperand(1));
}
uint64_t getOffset() const {
return mdconst::extract<ConstantInt>(Node->getOperand(2))->getZExtValue();
}
/// Test if this TBAAStructTagNode represents a type for objects
/// which are not modified (by any means) in the context where this
/// AliasAnalysis is relevant.
@ -219,8 +233,8 @@ public:
/// \name Specializations of \c TBAAStructTagNodeImpl for const and non const
/// qualified \c MDNods.
/// @{
typedef TBAAStructTagNodeImpl<const MDNode> TBAAStructTagNode;
typedef TBAAStructTagNodeImpl<MDNode> MutableTBAAStructTagNode;
using TBAAStructTagNode = TBAAStructTagNodeImpl<const MDNode>;
using MutableTBAAStructTagNode = TBAAStructTagNodeImpl<MDNode>;
/// @}
/// This is a simple wrapper around an MDNode which provides a
@ -228,10 +242,10 @@ typedef TBAAStructTagNodeImpl<MDNode> MutableTBAAStructTagNode;
/// information is encoded in its operands.
class TBAAStructTypeNode {
/// This node should be created with createTBAAStructTypeNode.
const MDNode *Node;
const MDNode *Node = nullptr;
public:
TBAAStructTypeNode() : Node(nullptr) {}
TBAAStructTypeNode() = default;
explicit TBAAStructTypeNode(const MDNode *N) : Node(N) {}
/// Get the MDNode for this TBAAStructTypeNode.
@ -283,7 +297,8 @@ public:
return TBAAStructTypeNode(P);
}
};
}
} // end anonymous namespace
/// Check the first operand of the tbaa tag node, if it is a MDNode, we treat
/// it as struct-path aware TBAA format, otherwise, we treat it as scalar TBAA