[ModRefInfo] Make enum ModRefInfo an enum class [NFC].

Summary:
Make enum ModRefInfo an enum class. Changes to ModRefInfo values should
be done using inline wrappers.
This should prevent future bit-wise opearations from being added, which can be more error-prone.

Reviewers: sanjoy, dberlin, hfinkel, george.burgess.iv

Subscribers: llvm-commits

Differential Revision: https://reviews.llvm.org/D40933

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@320107 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Alina Sbirlea 2017-12-07 22:41:34 +00:00
parent 491343d2fa
commit c94e896e67
14 changed files with 183 additions and 171 deletions

View File

@ -98,55 +98,57 @@ enum AliasResult {
/// they form a two bit matrix and bit-tests for 'mod' or 'ref'
/// work with any of the possible values.
enum ModRefInfo {
enum class ModRefInfo {
/// The access neither references nor modifies the value stored in memory.
MRI_NoModRef = 0,
NoModRef = 0,
/// The access may reference the value stored in memory.
MRI_Ref = 1,
Ref = 1,
/// The access may modify the value stored in memory.
MRI_Mod = 2,
Mod = 2,
/// The access may reference and may modify the value stored in memory.
MRI_ModRef = MRI_Ref | MRI_Mod,
ModRef = Ref | Mod,
};
LLVM_NODISCARD inline bool isNoModRef(const ModRefInfo MRI) {
return MRI == MRI_NoModRef;
return MRI == ModRefInfo::NoModRef;
}
LLVM_NODISCARD inline bool isModOrRefSet(const ModRefInfo MRI) {
return MRI & MRI_ModRef;
return static_cast<int>(MRI) & static_cast<int>(ModRefInfo::ModRef);
}
LLVM_NODISCARD inline bool isModAndRefSet(const ModRefInfo MRI) {
return (MRI & MRI_ModRef) == MRI_ModRef;
return (static_cast<int>(MRI) & static_cast<int>(ModRefInfo::ModRef)) ==
static_cast<int>(ModRefInfo::ModRef);
}
LLVM_NODISCARD inline bool isModSet(const ModRefInfo MRI) {
return MRI & MRI_Mod;
return static_cast<int>(MRI) & static_cast<int>(ModRefInfo::Mod);
}
LLVM_NODISCARD inline bool isRefSet(const ModRefInfo MRI) {
return MRI & MRI_Ref;
return static_cast<int>(MRI) & static_cast<int>(ModRefInfo::Ref);
}
LLVM_NODISCARD inline ModRefInfo setRef(const ModRefInfo MRI) {
return ModRefInfo(MRI | MRI_Ref);
}
LLVM_NODISCARD inline ModRefInfo setMod(const ModRefInfo MRI) {
return ModRefInfo(MRI | MRI_Mod);
return ModRefInfo(static_cast<int>(MRI) | static_cast<int>(ModRefInfo::Mod));
}
LLVM_NODISCARD inline ModRefInfo setRef(const ModRefInfo MRI) {
return ModRefInfo(static_cast<int>(MRI) | static_cast<int>(ModRefInfo::Ref));
}
LLVM_NODISCARD inline ModRefInfo setModAndRef(const ModRefInfo MRI) {
return ModRefInfo(MRI | MRI_ModRef);
return ModRefInfo(static_cast<int>(MRI) |
static_cast<int>(ModRefInfo::ModRef));
}
LLVM_NODISCARD inline ModRefInfo clearMod(const ModRefInfo MRI) {
return ModRefInfo(MRI & MRI_Ref);
return ModRefInfo(static_cast<int>(MRI) & static_cast<int>(ModRefInfo::Ref));
}
LLVM_NODISCARD inline ModRefInfo clearRef(const ModRefInfo MRI) {
return ModRefInfo(MRI & MRI_Mod);
return ModRefInfo(static_cast<int>(MRI) & static_cast<int>(ModRefInfo::Mod));
}
LLVM_NODISCARD inline ModRefInfo unionModRef(const ModRefInfo MRI1,
const ModRefInfo MRI2) {
return ModRefInfo(MRI1 | MRI2);
return ModRefInfo(static_cast<int>(MRI1) | static_cast<int>(MRI2));
}
LLVM_NODISCARD inline ModRefInfo intersectModRef(const ModRefInfo MRI1,
const ModRefInfo MRI2) {
return ModRefInfo(MRI1 & MRI2);
return ModRefInfo(static_cast<int>(MRI1) & static_cast<int>(MRI2));
}
/// The locations at which a function might access memory.
@ -176,27 +178,31 @@ enum FunctionModRefBehavior {
/// This property corresponds to the GCC 'const' attribute.
/// This property corresponds to the LLVM IR 'readnone' attribute.
/// This property corresponds to the IntrNoMem LLVM intrinsic flag.
FMRB_DoesNotAccessMemory = FMRL_Nowhere | MRI_NoModRef,
FMRB_DoesNotAccessMemory =
FMRL_Nowhere | static_cast<int>(ModRefInfo::NoModRef),
/// The only memory references in this function (if it has any) are
/// non-volatile loads from objects pointed to by its pointer-typed
/// arguments, with arbitrary offsets.
///
/// This property corresponds to the IntrReadArgMem LLVM intrinsic flag.
FMRB_OnlyReadsArgumentPointees = FMRL_ArgumentPointees | MRI_Ref,
FMRB_OnlyReadsArgumentPointees =
FMRL_ArgumentPointees | static_cast<int>(ModRefInfo::Ref),
/// The only memory references in this function (if it has any) are
/// non-volatile loads and stores from objects pointed to by its
/// pointer-typed arguments, with arbitrary offsets.
///
/// This property corresponds to the IntrArgMemOnly LLVM intrinsic flag.
FMRB_OnlyAccessesArgumentPointees = FMRL_ArgumentPointees | MRI_ModRef,
FMRB_OnlyAccessesArgumentPointees =
FMRL_ArgumentPointees | static_cast<int>(ModRefInfo::ModRef),
/// The only memory references in this function (if it has any) are
/// references of memory that is otherwise inaccessible via LLVM IR.
///
/// This property corresponds to the LLVM IR inaccessiblememonly attribute.
FMRB_OnlyAccessesInaccessibleMem = FMRL_InaccessibleMem | MRI_ModRef,
FMRB_OnlyAccessesInaccessibleMem =
FMRL_InaccessibleMem | static_cast<int>(ModRefInfo::ModRef),
/// The function may perform non-volatile loads and stores of objects
/// pointed to by its pointer-typed arguments, with arbitrary offsets, and
@ -206,7 +212,8 @@ enum FunctionModRefBehavior {
/// This property corresponds to the LLVM IR
/// inaccessiblemem_or_argmemonly attribute.
FMRB_OnlyAccessesInaccessibleOrArgMem = FMRL_InaccessibleMem |
FMRL_ArgumentPointees | MRI_ModRef,
FMRL_ArgumentPointees |
static_cast<int>(ModRefInfo::ModRef),
/// This function does not perform any non-local stores or volatile loads,
/// but may read from any memory location.
@ -214,18 +221,19 @@ enum FunctionModRefBehavior {
/// This property corresponds to the GCC 'pure' attribute.
/// This property corresponds to the LLVM IR 'readonly' attribute.
/// This property corresponds to the IntrReadMem LLVM intrinsic flag.
FMRB_OnlyReadsMemory = FMRL_Anywhere | MRI_Ref,
FMRB_OnlyReadsMemory = FMRL_Anywhere | static_cast<int>(ModRefInfo::Ref),
// This function does not read from memory anywhere, but may write to any
// memory location.
//
// This property corresponds to the LLVM IR 'writeonly' attribute.
// This property corresponds to the IntrWriteMem LLVM intrinsic flag.
FMRB_DoesNotReadMemory = FMRL_Anywhere | MRI_Mod,
FMRB_DoesNotReadMemory = FMRL_Anywhere | static_cast<int>(ModRefInfo::Mod),
/// This indicates that the function could not be classified into one of the
/// behaviors above.
FMRB_UnknownModRefBehavior = FMRL_Anywhere | MRI_ModRef
FMRB_UnknownModRefBehavior =
FMRL_Anywhere | static_cast<int>(ModRefInfo::ModRef)
};
// Wrapper method strips bits significant only in FunctionModRefBehavior,
@ -234,7 +242,7 @@ enum FunctionModRefBehavior {
// entry with all bits set to 1.
LLVM_NODISCARD inline ModRefInfo
createModRefInfo(const FunctionModRefBehavior FMRB) {
return ModRefInfo(FMRB & MRI_ModRef);
return ModRefInfo(FMRB & static_cast<int>(ModRefInfo::ModRef));
}
class AAResults {
@ -593,7 +601,7 @@ public:
case Instruction::CatchRet:
return getModRefInfo((const CatchReturnInst *)I, Loc);
default:
return MRI_NoModRef;
return ModRefInfo::NoModRef;
}
}
@ -894,7 +902,7 @@ public:
}
ModRefInfo getArgModRefInfo(ImmutableCallSite CS, unsigned ArgIdx) {
return MRI_ModRef;
return ModRefInfo::ModRef;
}
FunctionModRefBehavior getModRefBehavior(ImmutableCallSite CS) {
@ -906,11 +914,11 @@ public:
}
ModRefInfo getModRefInfo(ImmutableCallSite CS, const MemoryLocation &Loc) {
return MRI_ModRef;
return ModRefInfo::ModRef;
}
ModRefInfo getModRefInfo(ImmutableCallSite CS1, ImmutableCallSite CS2) {
return MRI_ModRef;
return ModRefInfo::ModRef;
}
};

View File

@ -119,7 +119,7 @@ bool AAResults::pointsToConstantMemory(const MemoryLocation &Loc,
}
ModRefInfo AAResults::getArgModRefInfo(ImmutableCallSite CS, unsigned ArgIdx) {
ModRefInfo Result = MRI_ModRef;
ModRefInfo Result = ModRefInfo::ModRef;
for (const auto &AA : AAs) {
Result = intersectModRef(Result, AA->getArgModRefInfo(CS, ArgIdx));
@ -138,8 +138,8 @@ ModRefInfo AAResults::getModRefInfo(Instruction *I, ImmutableCallSite Call) {
// Check if the two calls modify the same memory
return getModRefInfo(CS, Call);
} else if (I->isFenceLike()) {
// If this is a fence, just return MRI_ModRef.
return MRI_ModRef;
// If this is a fence, just return ModRef.
return ModRefInfo::ModRef;
} else {
// Otherwise, check if the call modifies or references the
// location this memory access defines. The best we can say
@ -150,12 +150,12 @@ ModRefInfo AAResults::getModRefInfo(Instruction *I, ImmutableCallSite Call) {
if (isModOrRefSet(MR))
return setModAndRef(MR);
}
return MRI_NoModRef;
return ModRefInfo::NoModRef;
}
ModRefInfo AAResults::getModRefInfo(ImmutableCallSite CS,
const MemoryLocation &Loc) {
ModRefInfo Result = MRI_ModRef;
ModRefInfo Result = ModRefInfo::ModRef;
for (const auto &AA : AAs) {
Result = intersectModRef(Result, AA->getModRefInfo(CS, Loc));
@ -170,7 +170,7 @@ ModRefInfo AAResults::getModRefInfo(ImmutableCallSite CS,
auto MRB = getModRefBehavior(CS);
if (MRB == FMRB_DoesNotAccessMemory ||
MRB == FMRB_OnlyAccessesInaccessibleMem)
return MRI_NoModRef;
return ModRefInfo::NoModRef;
if (onlyReadsMemory(MRB))
Result = clearMod(Result);
@ -179,7 +179,7 @@ ModRefInfo AAResults::getModRefInfo(ImmutableCallSite CS,
if (onlyAccessesArgPointees(MRB) || onlyAccessesInaccessibleOrArgMem(MRB)) {
bool DoesAlias = false;
ModRefInfo AllArgsMask = MRI_NoModRef;
ModRefInfo AllArgsMask = ModRefInfo::NoModRef;
if (doesAccessArgPointees(MRB)) {
for (auto AI = CS.arg_begin(), AE = CS.arg_end(); AI != AE; ++AI) {
const Value *Arg = *AI;
@ -195,9 +195,9 @@ ModRefInfo AAResults::getModRefInfo(ImmutableCallSite CS,
}
}
}
// Return MRI_NoModRef if no alias found with any argument.
// Return NoModRef if no alias found with any argument.
if (!DoesAlias)
return MRI_NoModRef;
return ModRefInfo::NoModRef;
// Logical & between other AA analyses and argument analysis.
Result = intersectModRef(Result, AllArgsMask);
}
@ -212,7 +212,7 @@ ModRefInfo AAResults::getModRefInfo(ImmutableCallSite CS,
ModRefInfo AAResults::getModRefInfo(ImmutableCallSite CS1,
ImmutableCallSite CS2) {
ModRefInfo Result = MRI_ModRef;
ModRefInfo Result = ModRefInfo::ModRef;
for (const auto &AA : AAs) {
Result = intersectModRef(Result, AA->getModRefInfo(CS1, CS2));
@ -228,15 +228,15 @@ ModRefInfo AAResults::getModRefInfo(ImmutableCallSite CS1,
// If CS1 or CS2 are readnone, they don't interact.
auto CS1B = getModRefBehavior(CS1);
if (CS1B == FMRB_DoesNotAccessMemory)
return MRI_NoModRef;
return ModRefInfo::NoModRef;
auto CS2B = getModRefBehavior(CS2);
if (CS2B == FMRB_DoesNotAccessMemory)
return MRI_NoModRef;
return ModRefInfo::NoModRef;
// If they both only read from memory, there is no dependence.
if (onlyReadsMemory(CS1B) && onlyReadsMemory(CS2B))
return MRI_NoModRef;
return ModRefInfo::NoModRef;
// If CS1 only reads memory, the only dependence on CS2 can be
// from CS1 reading memory written by CS2.
@ -249,7 +249,7 @@ ModRefInfo AAResults::getModRefInfo(ImmutableCallSite CS1,
// information from CS1's references to the memory referenced by
// CS2's arguments.
if (onlyAccessesArgPointees(CS2B)) {
ModRefInfo R = MRI_NoModRef;
ModRefInfo R = ModRefInfo::NoModRef;
if (doesAccessArgPointees(CS2B)) {
for (auto I = CS2.arg_begin(), E = CS2.arg_end(); I != E; ++I) {
const Value *Arg = *I;
@ -263,11 +263,11 @@ ModRefInfo AAResults::getModRefInfo(ImmutableCallSite CS1,
// - If CS2 modifies location, dependence exists if CS1 reads or writes.
// - If CS2 only reads location, dependence exists if CS1 writes.
ModRefInfo ArgModRefCS2 = getArgModRefInfo(CS2, CS2ArgIdx);
ModRefInfo ArgMask = MRI_NoModRef;
ModRefInfo ArgMask = ModRefInfo::NoModRef;
if (isModSet(ArgModRefCS2))
ArgMask = MRI_ModRef;
ArgMask = ModRefInfo::ModRef;
else if (isRefSet(ArgModRefCS2))
ArgMask = MRI_Mod;
ArgMask = ModRefInfo::Mod;
// ModRefCS1 indicates what CS1 might do to CS2ArgLoc, and we use
// above ArgMask to update dependence info.
@ -285,7 +285,7 @@ ModRefInfo AAResults::getModRefInfo(ImmutableCallSite CS1,
// If CS1 only accesses memory through arguments, check if CS2 references
// any of the memory referenced by CS1's arguments. If not, return NoModRef.
if (onlyAccessesArgPointees(CS1B)) {
ModRefInfo R = MRI_NoModRef;
ModRefInfo R = ModRefInfo::NoModRef;
if (doesAccessArgPointees(CS1B)) {
for (auto I = CS1.arg_begin(), E = CS1.arg_end(); I != E; ++I) {
const Value *Arg = *I;
@ -349,45 +349,45 @@ ModRefInfo AAResults::getModRefInfo(const LoadInst *L,
const MemoryLocation &Loc) {
// Be conservative in the face of atomic.
if (isStrongerThan(L->getOrdering(), AtomicOrdering::Unordered))
return MRI_ModRef;
return ModRefInfo::ModRef;
// If the load address doesn't alias the given address, it doesn't read
// or write the specified memory.
if (Loc.Ptr && !alias(MemoryLocation::get(L), Loc))
return MRI_NoModRef;
return ModRefInfo::NoModRef;
// Otherwise, a load just reads.
return MRI_Ref;
return ModRefInfo::Ref;
}
ModRefInfo AAResults::getModRefInfo(const StoreInst *S,
const MemoryLocation &Loc) {
// Be conservative in the face of atomic.
if (isStrongerThan(S->getOrdering(), AtomicOrdering::Unordered))
return MRI_ModRef;
return ModRefInfo::ModRef;
if (Loc.Ptr) {
// If the store address cannot alias the pointer in question, then the
// specified memory cannot be modified by the store.
if (!alias(MemoryLocation::get(S), Loc))
return MRI_NoModRef;
return ModRefInfo::NoModRef;
// If the pointer is a pointer to constant memory, then it could not have
// been modified by this store.
if (pointsToConstantMemory(Loc))
return MRI_NoModRef;
return ModRefInfo::NoModRef;
}
// Otherwise, a store just writes.
return MRI_Mod;
return ModRefInfo::Mod;
}
ModRefInfo AAResults::getModRefInfo(const FenceInst *S, const MemoryLocation &Loc) {
// If we know that the location is a constant memory location, the fence
// cannot modify this location.
if (Loc.Ptr && pointsToConstantMemory(Loc))
return MRI_Ref;
return MRI_ModRef;
return ModRefInfo::Ref;
return ModRefInfo::ModRef;
}
ModRefInfo AAResults::getModRefInfo(const VAArgInst *V,
@ -396,16 +396,16 @@ ModRefInfo AAResults::getModRefInfo(const VAArgInst *V,
// If the va_arg address cannot alias the pointer in question, then the
// specified memory cannot be accessed by the va_arg.
if (!alias(MemoryLocation::get(V), Loc))
return MRI_NoModRef;
return ModRefInfo::NoModRef;
// If the pointer is a pointer to constant memory, then it could not have
// been modified by this va_arg.
if (pointsToConstantMemory(Loc))
return MRI_NoModRef;
return ModRefInfo::NoModRef;
}
// Otherwise, a va_arg reads and writes.
return MRI_ModRef;
return ModRefInfo::ModRef;
}
ModRefInfo AAResults::getModRefInfo(const CatchPadInst *CatchPad,
@ -414,11 +414,11 @@ ModRefInfo AAResults::getModRefInfo(const CatchPadInst *CatchPad,
// If the pointer is a pointer to constant memory,
// then it could not have been modified by this catchpad.
if (pointsToConstantMemory(Loc))
return MRI_NoModRef;
return ModRefInfo::NoModRef;
}
// Otherwise, a catchpad reads and writes.
return MRI_ModRef;
return ModRefInfo::ModRef;
}
ModRefInfo AAResults::getModRefInfo(const CatchReturnInst *CatchRet,
@ -427,37 +427,37 @@ ModRefInfo AAResults::getModRefInfo(const CatchReturnInst *CatchRet,
// If the pointer is a pointer to constant memory,
// then it could not have been modified by this catchpad.
if (pointsToConstantMemory(Loc))
return MRI_NoModRef;
return ModRefInfo::NoModRef;
}
// Otherwise, a catchret reads and writes.
return MRI_ModRef;
return ModRefInfo::ModRef;
}
ModRefInfo AAResults::getModRefInfo(const AtomicCmpXchgInst *CX,
const MemoryLocation &Loc) {
// Acquire/Release cmpxchg has properties that matter for arbitrary addresses.
if (isStrongerThanMonotonic(CX->getSuccessOrdering()))
return MRI_ModRef;
return ModRefInfo::ModRef;
// If the cmpxchg address does not alias the location, it does not access it.
if (Loc.Ptr && !alias(MemoryLocation::get(CX), Loc))
return MRI_NoModRef;
return ModRefInfo::NoModRef;
return MRI_ModRef;
return ModRefInfo::ModRef;
}
ModRefInfo AAResults::getModRefInfo(const AtomicRMWInst *RMW,
const MemoryLocation &Loc) {
// Acquire/Release atomicrmw has properties that matter for arbitrary addresses.
if (isStrongerThanMonotonic(RMW->getOrdering()))
return MRI_ModRef;
return ModRefInfo::ModRef;
// If the atomicrmw address does not alias the location, it does not access it.
if (Loc.Ptr && !alias(MemoryLocation::get(RMW), Loc))
return MRI_NoModRef;
return ModRefInfo::NoModRef;
return MRI_ModRef;
return ModRefInfo::ModRef;
}
/// \brief Return information about whether a particular call site modifies
@ -473,26 +473,26 @@ ModRefInfo AAResults::callCapturesBefore(const Instruction *I,
DominatorTree *DT,
OrderedBasicBlock *OBB) {
if (!DT)
return MRI_ModRef;
return ModRefInfo::ModRef;
const Value *Object =
GetUnderlyingObject(MemLoc.Ptr, I->getModule()->getDataLayout());
if (!isIdentifiedObject(Object) || isa<GlobalValue>(Object) ||
isa<Constant>(Object))
return MRI_ModRef;
return ModRefInfo::ModRef;
ImmutableCallSite CS(I);
if (!CS.getInstruction() || CS.getInstruction() == Object)
return MRI_ModRef;
return ModRefInfo::ModRef;
if (PointerMayBeCapturedBefore(Object, /* ReturnCaptures */ true,
/* StoreCaptures */ true, I, DT,
/* include Object */ true,
/* OrderedBasicBlock */ OBB))
return MRI_ModRef;
return ModRefInfo::ModRef;
unsigned ArgNo = 0;
ModRefInfo R = MRI_NoModRef;
ModRefInfo R = ModRefInfo::NoModRef;
for (auto CI = CS.data_operands_begin(), CE = CS.data_operands_end();
CI != CE; ++CI, ++ArgNo) {
// Only look at the no-capture or byval pointer arguments. If this
@ -512,10 +512,10 @@ ModRefInfo AAResults::callCapturesBefore(const Instruction *I,
if (CS.doesNotAccessMemory(ArgNo))
continue;
if (CS.onlyReadsMemory(ArgNo)) {
R = MRI_Ref;
R = ModRefInfo::Ref;
continue;
}
return MRI_ModRef;
return ModRefInfo::ModRef;
}
return R;
}
@ -525,7 +525,7 @@ ModRefInfo AAResults::callCapturesBefore(const Instruction *I,
///
bool AAResults::canBasicBlockModify(const BasicBlock &BB,
const MemoryLocation &Loc) {
return canInstructionRangeModRef(BB.front(), BB.back(), Loc, MRI_Mod);
return canInstructionRangeModRef(BB.front(), BB.back(), Loc, ModRefInfo::Mod);
}
/// canInstructionRangeModRef - Return true if it is possible for the

View File

@ -244,20 +244,20 @@ void AAEvaluator::runInternal(Function &F, AAResults &AA) {
if (ElTy->isSized()) Size = DL.getTypeStoreSize(ElTy);
switch (AA.getModRefInfo(C, Pointer, Size)) {
case MRI_NoModRef:
case ModRefInfo::NoModRef:
PrintModRefResults("NoModRef", PrintNoModRef, I, Pointer,
F.getParent());
++NoModRefCount;
break;
case MRI_Mod:
case ModRefInfo::Mod:
PrintModRefResults("Just Mod", PrintMod, I, Pointer, F.getParent());
++ModCount;
break;
case MRI_Ref:
case ModRefInfo::Ref:
PrintModRefResults("Just Ref", PrintRef, I, Pointer, F.getParent());
++RefCount;
break;
case MRI_ModRef:
case ModRefInfo::ModRef:
PrintModRefResults("Both ModRef", PrintModRef, I, Pointer,
F.getParent());
++ModRefCount;
@ -272,19 +272,19 @@ void AAEvaluator::runInternal(Function &F, AAResults &AA) {
if (D == C)
continue;
switch (AA.getModRefInfo(*C, *D)) {
case MRI_NoModRef:
case ModRefInfo::NoModRef:
PrintModRefResults("NoModRef", PrintNoModRef, *C, *D, F.getParent());
++NoModRefCount;
break;
case MRI_Mod:
case ModRefInfo::Mod:
PrintModRefResults("Just Mod", PrintMod, *C, *D, F.getParent());
++ModCount;
break;
case MRI_Ref:
case ModRefInfo::Ref:
PrintModRefResults("Just Ref", PrintRef, *C, *D, F.getParent());
++RefCount;
break;
case MRI_ModRef:
case ModRefInfo::ModRef:
PrintModRefResults("Both ModRef", PrintModRef, *C, *D, F.getParent());
++ModRefCount;
break;

View File

@ -687,13 +687,13 @@ ModRefInfo BasicAAResult::getArgModRefInfo(ImmutableCallSite CS,
unsigned ArgIdx) {
// Checking for known builtin intrinsics and target library functions.
if (isWriteOnlyParam(CS, ArgIdx, TLI))
return MRI_Mod;
return ModRefInfo::Mod;
if (CS.paramHasAttr(ArgIdx, Attribute::ReadOnly))
return MRI_Ref;
return ModRefInfo::Ref;
if (CS.paramHasAttr(ArgIdx, Attribute::ReadNone))
return MRI_NoModRef;
return ModRefInfo::NoModRef;
return AAResultBase::getArgModRefInfo(CS, ArgIdx);
}
@ -770,7 +770,7 @@ ModRefInfo BasicAAResult::getModRefInfo(ImmutableCallSite CS,
if (isa<AllocaInst>(Object))
if (const CallInst *CI = dyn_cast<CallInst>(CS.getInstruction()))
if (CI->isTailCall())
return MRI_NoModRef;
return ModRefInfo::NoModRef;
// If the pointer is to a locally allocated object that does not escape,
// then the call can not mod/ref the pointer unless the call takes the pointer
@ -780,7 +780,7 @@ ModRefInfo BasicAAResult::getModRefInfo(ImmutableCallSite CS,
// Optimistically assume that call doesn't touch Object and check this
// assumption in the following loop.
ModRefInfo Result = MRI_NoModRef;
ModRefInfo Result = ModRefInfo::NoModRef;
unsigned OperandNo = 0;
for (auto CI = CS.data_operands_begin(), CE = CS.data_operands_end();
@ -818,7 +818,7 @@ ModRefInfo BasicAAResult::getModRefInfo(ImmutableCallSite CS,
continue;
}
// This operand aliases 'Object' and call reads and writes into it.
Result = MRI_ModRef;
Result = ModRefInfo::ModRef;
break;
}
@ -838,7 +838,7 @@ ModRefInfo BasicAAResult::getModRefInfo(ImmutableCallSite CS,
// Be conservative if the accessed pointer may alias the allocation -
// fallback to the generic handling below.
if (getBestAAResults().alias(MemoryLocation(Inst), Loc) == NoAlias)
return MRI_NoModRef;
return ModRefInfo::NoModRef;
}
// The semantics of memcpy intrinsics forbid overlap between their respective
@ -851,14 +851,14 @@ ModRefInfo BasicAAResult::getModRefInfo(ImmutableCallSite CS,
if ((SrcAA = getBestAAResults().alias(MemoryLocation::getForSource(Inst),
Loc)) == MustAlias)
// Loc is exactly the memcpy source thus disjoint from memcpy dest.
return MRI_Ref;
return ModRefInfo::Ref;
if ((DestAA = getBestAAResults().alias(MemoryLocation::getForDest(Inst),
Loc)) == MustAlias)
// The converse case.
return MRI_Mod;
return ModRefInfo::Mod;
// It's also possible for Loc to alias both src and dest, or neither.
ModRefInfo rv = MRI_NoModRef;
ModRefInfo rv = ModRefInfo::NoModRef;
if (SrcAA != NoAlias)
rv = setRef(rv);
if (DestAA != NoAlias)
@ -870,7 +870,7 @@ ModRefInfo BasicAAResult::getModRefInfo(ImmutableCallSite CS,
// proper control dependencies will be maintained, it never aliases any
// particular memory location.
if (isIntrinsicCall(CS, Intrinsic::assume))
return MRI_NoModRef;
return ModRefInfo::NoModRef;
// Like assumes, guard intrinsics are also marked as arbitrarily writing so
// that proper control dependencies are maintained but they never mods any
@ -880,7 +880,7 @@ ModRefInfo BasicAAResult::getModRefInfo(ImmutableCallSite CS,
// heap state at the point the guard is issued needs to be consistent in case
// the guard invokes the "deopt" continuation.
if (isIntrinsicCall(CS, Intrinsic::experimental_guard))
return MRI_Ref;
return ModRefInfo::Ref;
// Like assumes, invariant.start intrinsics were also marked as arbitrarily
// writing so that proper control dependencies are maintained but they never
@ -906,7 +906,7 @@ ModRefInfo BasicAAResult::getModRefInfo(ImmutableCallSite CS,
// rules of invariant.start) and print 40, while the first program always
// prints 50.
if (isIntrinsicCall(CS, Intrinsic::invariant_start))
return MRI_Ref;
return ModRefInfo::Ref;
// The AAResultBase base class has some smarts, lets use them.
return AAResultBase::getModRefInfo(CS, Loc);
@ -919,7 +919,7 @@ ModRefInfo BasicAAResult::getModRefInfo(ImmutableCallSite CS1,
// particular memory location.
if (isIntrinsicCall(CS1, Intrinsic::assume) ||
isIntrinsicCall(CS2, Intrinsic::assume))
return MRI_NoModRef;
return ModRefInfo::NoModRef;
// Like assumes, guard intrinsics are also marked as arbitrarily writing so
// that proper control dependencies are maintained but they never mod any
@ -933,12 +933,14 @@ ModRefInfo BasicAAResult::getModRefInfo(ImmutableCallSite CS1,
// possibilities for guard intrinsics.
if (isIntrinsicCall(CS1, Intrinsic::experimental_guard))
return isModSet(createModRefInfo(getModRefBehavior(CS2))) ? MRI_Ref
: MRI_NoModRef;
return isModSet(createModRefInfo(getModRefBehavior(CS2)))
? ModRefInfo::Ref
: ModRefInfo::NoModRef;
if (isIntrinsicCall(CS2, Intrinsic::experimental_guard))
return isModSet(createModRefInfo(getModRefBehavior(CS1))) ? MRI_Mod
: MRI_NoModRef;
return isModSet(createModRefInfo(getModRefBehavior(CS1)))
? ModRefInfo::Mod
: ModRefInfo::NoModRef;
// The AAResultBase base class has some smarts, lets use them.
return AAResultBase::getModRefInfo(CS1, CS2);

View File

@ -88,9 +88,9 @@ class GlobalsAAResult::FunctionInfo {
enum { MayReadAnyGlobal = 4 };
/// Checks to document the invariants of the bit packing here.
static_assert((MayReadAnyGlobal & MRI_ModRef) == 0,
static_assert((MayReadAnyGlobal & static_cast<int>(ModRefInfo::ModRef)) == 0,
"ModRef and the MayReadAnyGlobal flag bits overlap.");
static_assert(((MayReadAnyGlobal | MRI_ModRef) >>
static_assert(((MayReadAnyGlobal | static_cast<int>(ModRefInfo::ModRef)) >>
AlignedMapPointerTraits::NumLowBitsAvailable) == 0,
"Insufficient low bits to store our flag and ModRef info.");
@ -127,12 +127,12 @@ public:
/// Returns the \c ModRefInfo info for this function.
ModRefInfo getModRefInfo() const {
return ModRefInfo(Info.getInt() & MRI_ModRef);
return ModRefInfo(Info.getInt() & static_cast<int>(ModRefInfo::ModRef));
}
/// Adds new \c ModRefInfo for this function to its state.
void addModRefInfo(ModRefInfo NewMRI) {
Info.setInt(Info.getInt() | NewMRI);
Info.setInt(Info.getInt() | static_cast<int>(NewMRI));
}
/// Returns whether this function may read any global variable, and we don't
@ -145,7 +145,8 @@ public:
/// Returns the \c ModRefInfo info for this function w.r.t. a particular
/// global, which may be more precise than the general information above.
ModRefInfo getModRefInfoForGlobal(const GlobalValue &GV) const {
ModRefInfo GlobalMRI = mayReadAnyGlobal() ? MRI_Ref : MRI_NoModRef;
ModRefInfo GlobalMRI =
mayReadAnyGlobal() ? ModRefInfo::Ref : ModRefInfo::NoModRef;
if (AlignedMap *P = Info.getPointer()) {
auto I = P->Map.find(&GV);
if (I != P->Map.end())
@ -155,7 +156,7 @@ public:
}
/// Add mod/ref info from another function into ours, saturating towards
/// MRI_ModRef.
/// ModRef.
void addFunctionInfo(const FunctionInfo &FI) {
addModRefInfo(FI.getModRefInfo());
@ -298,7 +299,7 @@ void GlobalsAAResult::AnalyzeGlobals(Module &M) {
Handles.emplace_front(*this, Reader);
Handles.front().I = Handles.begin();
}
FunctionInfos[Reader].addModRefInfoForGlobal(GV, MRI_Ref);
FunctionInfos[Reader].addModRefInfoForGlobal(GV, ModRefInfo::Ref);
}
if (!GV.isConstant()) // No need to keep track of writers to constants
@ -307,7 +308,7 @@ void GlobalsAAResult::AnalyzeGlobals(Module &M) {
Handles.emplace_front(*this, Writer);
Handles.front().I = Handles.begin();
}
FunctionInfos[Writer].addModRefInfoForGlobal(GV, MRI_Mod);
FunctionInfos[Writer].addModRefInfoForGlobal(GV, ModRefInfo::Mod);
}
++NumNonAddrTakenGlobalVars;
@ -503,13 +504,13 @@ void GlobalsAAResult::AnalyzeCallGraph(CallGraph &CG, Module &M) {
if (F->doesNotAccessMemory()) {
// Can't do better than that!
} else if (F->onlyReadsMemory()) {
FI.addModRefInfo(MRI_Ref);
FI.addModRefInfo(ModRefInfo::Ref);
if (!F->isIntrinsic() && !F->onlyAccessesArgMemory())
// This function might call back into the module and read a global -
// consider every global as possibly being read by this function.
FI.setMayReadAnyGlobal();
} else {
FI.addModRefInfo(MRI_ModRef);
FI.addModRefInfo(ModRefInfo::ModRef);
// Can't say anything useful unless it's an intrinsic - they don't
// read or write global variables of the kind considered here.
KnowNothing = !F->isIntrinsic();
@ -564,7 +565,7 @@ void GlobalsAAResult::AnalyzeCallGraph(CallGraph &CG, Module &M) {
if (isAllocationFn(&I, &TLI) || isFreeCall(&I, &TLI)) {
// FIXME: It is completely unclear why this is necessary and not
// handled by the above graph code.
FI.addModRefInfo(MRI_ModRef);
FI.addModRefInfo(ModRefInfo::ModRef);
} else if (Function *Callee = CS.getCalledFunction()) {
// The callgraph doesn't include intrinsic calls.
if (Callee->isIntrinsic()) {
@ -579,9 +580,9 @@ void GlobalsAAResult::AnalyzeCallGraph(CallGraph &CG, Module &M) {
// All non-call instructions we use the primary predicates for whether
// thay read or write memory.
if (I.mayReadFromMemory())
FI.addModRefInfo(MRI_Ref);
FI.addModRefInfo(ModRefInfo::Ref);
if (I.mayWriteToMemory())
FI.addModRefInfo(MRI_Mod);
FI.addModRefInfo(ModRefInfo::Mod);
}
}
@ -868,8 +869,9 @@ AliasResult GlobalsAAResult::alias(const MemoryLocation &LocA,
ModRefInfo GlobalsAAResult::getModRefInfoForArgument(ImmutableCallSite CS,
const GlobalValue *GV) {
if (CS.doesNotAccessMemory())
return MRI_NoModRef;
ModRefInfo ConservativeResult = CS.onlyReadsMemory() ? MRI_Ref : MRI_ModRef;
return ModRefInfo::NoModRef;
ModRefInfo ConservativeResult =
CS.onlyReadsMemory() ? ModRefInfo::Ref : ModRefInfo::ModRef;
// Iterate through all the arguments to the called function. If any argument
// is based on GV, return the conservative result.
@ -890,12 +892,12 @@ ModRefInfo GlobalsAAResult::getModRefInfoForArgument(ImmutableCallSite CS,
}
// We identified all objects in the argument list, and none of them were GV.
return MRI_NoModRef;
return ModRefInfo::NoModRef;
}
ModRefInfo GlobalsAAResult::getModRefInfo(ImmutableCallSite CS,
const MemoryLocation &Loc) {
ModRefInfo Known = MRI_ModRef;
ModRefInfo Known = ModRefInfo::ModRef;
// If we are asking for mod/ref info of a direct call with a pointer to a
// global we are tracking, return information if we have it.
@ -909,7 +911,7 @@ ModRefInfo GlobalsAAResult::getModRefInfo(ImmutableCallSite CS,
getModRefInfoForArgument(CS, GV));
if (!isModOrRefSet(Known))
return MRI_NoModRef; // No need to query other mod/ref analyses
return ModRefInfo::NoModRef; // No need to query other mod/ref analyses
return intersectModRef(Known, AAResultBase::getModRefInfo(CS, Loc));
}

View File

@ -119,38 +119,38 @@ static ModRefInfo GetLocation(const Instruction *Inst, MemoryLocation &Loc,
if (const LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
if (LI->isUnordered()) {
Loc = MemoryLocation::get(LI);
return MRI_Ref;
return ModRefInfo::Ref;
}
if (LI->getOrdering() == AtomicOrdering::Monotonic) {
Loc = MemoryLocation::get(LI);
return MRI_ModRef;
return ModRefInfo::ModRef;
}
Loc = MemoryLocation();
return MRI_ModRef;
return ModRefInfo::ModRef;
}
if (const StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
if (SI->isUnordered()) {
Loc = MemoryLocation::get(SI);
return MRI_Mod;
return ModRefInfo::Mod;
}
if (SI->getOrdering() == AtomicOrdering::Monotonic) {
Loc = MemoryLocation::get(SI);
return MRI_ModRef;
return ModRefInfo::ModRef;
}
Loc = MemoryLocation();
return MRI_ModRef;
return ModRefInfo::ModRef;
}
if (const VAArgInst *V = dyn_cast<VAArgInst>(Inst)) {
Loc = MemoryLocation::get(V);
return MRI_ModRef;
return ModRefInfo::ModRef;
}
if (const CallInst *CI = isFreeCall(Inst, &TLI)) {
// calls to free() deallocate the entire structure
Loc = MemoryLocation(CI->getArgOperand(0));
return MRI_Mod;
return ModRefInfo::Mod;
}
if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
@ -166,7 +166,7 @@ static ModRefInfo GetLocation(const Instruction *Inst, MemoryLocation &Loc,
cast<ConstantInt>(II->getArgOperand(0))->getZExtValue(), AAInfo);
// These intrinsics don't really modify the memory, but returning Mod
// will allow them to be handled conservatively.
return MRI_Mod;
return ModRefInfo::Mod;
case Intrinsic::invariant_end:
II->getAAMetadata(AAInfo);
Loc = MemoryLocation(
@ -174,7 +174,7 @@ static ModRefInfo GetLocation(const Instruction *Inst, MemoryLocation &Loc,
cast<ConstantInt>(II->getArgOperand(1))->getZExtValue(), AAInfo);
// These intrinsics don't really modify the memory, but returning Mod
// will allow them to be handled conservatively.
return MRI_Mod;
return ModRefInfo::Mod;
default:
break;
}
@ -182,10 +182,10 @@ static ModRefInfo GetLocation(const Instruction *Inst, MemoryLocation &Loc,
// Otherwise, just do the coarse-grained thing that always works.
if (Inst->mayWriteToMemory())
return MRI_ModRef;
return ModRefInfo::ModRef;
if (Inst->mayReadFromMemory())
return MRI_Ref;
return MRI_NoModRef;
return ModRefInfo::Ref;
return ModRefInfo::NoModRef;
}
/// Private helper for finding the local dependencies of a call site.
@ -689,12 +689,12 @@ MemDepResult MemoryDependenceResults::getSimplePointerDependencyFrom(
if (isModAndRefSet(MR))
MR = AA.callCapturesBefore(Inst, MemLoc, &DT, &OBB);
switch (MR) {
case MRI_NoModRef:
case ModRefInfo::NoModRef:
// If the call has no effect on the queried pointer, just ignore it.
continue;
case MRI_Mod:
case ModRefInfo::Mod:
return MemDepResult::getClobber(Inst);
case MRI_Ref:
case ModRefInfo::Ref:
// If the call is known to never store to the pointer, and if this is a
// load query, we can safely ignore it (scan past it).
if (isLoad)

View File

@ -123,7 +123,7 @@ ModRefInfo ObjCARCAAResult::getModRefInfo(ImmutableCallSite CS,
// These functions don't access any memory visible to the compiler.
// Note that this doesn't include objc_retainBlock, because it updates
// pointers when it copies block data.
return MRI_NoModRef;
return ModRefInfo::NoModRef;
default:
break;
}

View File

@ -102,12 +102,12 @@ ModRefInfo ScopedNoAliasAAResult::getModRefInfo(ImmutableCallSite CS,
if (!mayAliasInScopes(Loc.AATags.Scope, CS.getInstruction()->getMetadata(
LLVMContext::MD_noalias)))
return MRI_NoModRef;
return ModRefInfo::NoModRef;
if (!mayAliasInScopes(
CS.getInstruction()->getMetadata(LLVMContext::MD_alias_scope),
Loc.AATags.NoAlias))
return MRI_NoModRef;
return ModRefInfo::NoModRef;
return AAResultBase::getModRefInfo(CS, Loc);
}
@ -120,12 +120,12 @@ ModRefInfo ScopedNoAliasAAResult::getModRefInfo(ImmutableCallSite CS1,
if (!mayAliasInScopes(
CS1.getInstruction()->getMetadata(LLVMContext::MD_alias_scope),
CS2.getInstruction()->getMetadata(LLVMContext::MD_noalias)))
return MRI_NoModRef;
return ModRefInfo::NoModRef;
if (!mayAliasInScopes(
CS2.getInstruction()->getMetadata(LLVMContext::MD_alias_scope),
CS1.getInstruction()->getMetadata(LLVMContext::MD_noalias)))
return MRI_NoModRef;
return ModRefInfo::NoModRef;
return AAResultBase::getModRefInfo(CS1, CS2);
}

View File

@ -371,7 +371,7 @@ ModRefInfo TypeBasedAAResult::getModRefInfo(ImmutableCallSite CS,
if (const MDNode *M =
CS.getInstruction()->getMetadata(LLVMContext::MD_tbaa))
if (!Aliases(L, M))
return MRI_NoModRef;
return ModRefInfo::NoModRef;
return AAResultBase::getModRefInfo(CS, Loc);
}
@ -386,7 +386,7 @@ ModRefInfo TypeBasedAAResult::getModRefInfo(ImmutableCallSite CS1,
if (const MDNode *M2 =
CS2.getInstruction()->getMetadata(LLVMContext::MD_tbaa))
if (!Aliases(M1, M2))
return MRI_NoModRef;
return ModRefInfo::NoModRef;
return AAResultBase::getModRefInfo(CS1, CS2);
}

View File

@ -2009,12 +2009,12 @@ CleanupAndExit:
SmallPtrSet<Instruction*, 2> Ignore1;
Ignore1.insert(SI);
if (mayLoopAccessLocation(StoreBasePtr, MRI_ModRef, CurLoop, BECount,
if (mayLoopAccessLocation(StoreBasePtr, ModRefInfo::ModRef, CurLoop, BECount,
StoreSize, *AA, Ignore1)) {
// Check if the load is the offending instruction.
Ignore1.insert(LI);
if (mayLoopAccessLocation(StoreBasePtr, MRI_ModRef, CurLoop, BECount,
StoreSize, *AA, Ignore1)) {
if (mayLoopAccessLocation(StoreBasePtr, ModRefInfo::ModRef, CurLoop,
BECount, StoreSize, *AA, Ignore1)) {
// Still bad. Nothing we can do.
goto CleanupAndExit;
}
@ -2056,8 +2056,8 @@ CleanupAndExit:
SmallPtrSet<Instruction*, 2> Ignore2;
Ignore2.insert(SI);
if (mayLoopAccessLocation(LoadBasePtr, MRI_Mod, CurLoop, BECount, StoreSize,
*AA, Ignore2))
if (mayLoopAccessLocation(LoadBasePtr, ModRefInfo::Mod, CurLoop, BECount,
StoreSize, *AA, Ignore2))
goto CleanupAndExit;
// Check the stride.

View File

@ -719,7 +719,7 @@ static bool isSafeToPromoteArgument(Argument *Arg, bool isByValOrInAlloca,
BasicBlock *BB = Load->getParent();
MemoryLocation Loc = MemoryLocation::get(Load);
if (AAR.canInstructionRangeModRef(BB->front(), *Load, Loc, MRI_Mod))
if (AAR.canInstructionRangeModRef(BB->front(), *Load, Loc, ModRefInfo::Mod))
return false; // Pointer is invalidated!
// Now check every path from the entry block to the load for transparency.

View File

@ -887,8 +887,8 @@ bool LoopIdiomRecognize::processLoopStridedStore(
// base pointer and checking the region.
Value *BasePtr =
Expander.expandCodeFor(Start, DestInt8PtrTy, Preheader->getTerminator());
if (mayLoopAccessLocation(BasePtr, MRI_ModRef, CurLoop, BECount, StoreSize,
*AA, Stores)) {
if (mayLoopAccessLocation(BasePtr, ModRefInfo::ModRef, CurLoop, BECount,
StoreSize, *AA, Stores)) {
Expander.clear();
// If we generated new code for the base pointer, clean up.
RecursivelyDeleteTriviallyDeadInstructions(BasePtr, TLI);
@ -997,7 +997,7 @@ bool LoopIdiomRecognize::processLoopStoreOfLoopLoad(StoreInst *SI,
SmallPtrSet<Instruction *, 1> Stores;
Stores.insert(SI);
if (mayLoopAccessLocation(StoreBasePtr, MRI_ModRef, CurLoop, BECount,
if (mayLoopAccessLocation(StoreBasePtr, ModRefInfo::ModRef, CurLoop, BECount,
StoreSize, *AA, Stores)) {
Expander.clear();
// If we generated new code for the base pointer, clean up.
@ -1017,8 +1017,8 @@ bool LoopIdiomRecognize::processLoopStoreOfLoopLoad(StoreInst *SI,
Value *LoadBasePtr = Expander.expandCodeFor(
LdStart, Builder.getInt8PtrTy(LdAS), Preheader->getTerminator());
if (mayLoopAccessLocation(LoadBasePtr, MRI_Mod, CurLoop, BECount, StoreSize,
*AA, Stores)) {
if (mayLoopAccessLocation(LoadBasePtr, ModRefInfo::Mod, CurLoop, BECount,
StoreSize, *AA, Stores)) {
Expander.clear();
// If we generated new code for the base pointer, clean up.
RecursivelyDeleteTriviallyDeadInstructions(LoadBasePtr, TLI);

View File

@ -195,7 +195,7 @@ bool MergedLoadStoreMotion::isStoreSinkBarrierInRange(const Instruction &Start,
make_range(Start.getIterator(), End.getIterator()))
if (Inst.mayThrow())
return true;
return AA->canInstructionRangeModRef(Start, End, Loc, MRI_ModRef);
return AA->canInstructionRangeModRef(Start, End, Loc, ModRefInfo::ModRef);
}
///

View File

@ -191,18 +191,18 @@ TEST_F(AliasAnalysisTest, getModRefInfo) {
auto &AA = getAAResults(*F);
// Check basic results
EXPECT_EQ(AA.getModRefInfo(Store1, MemoryLocation()), MRI_Mod);
EXPECT_EQ(AA.getModRefInfo(Store1, None), MRI_Mod);
EXPECT_EQ(AA.getModRefInfo(Load1, MemoryLocation()), MRI_Ref);
EXPECT_EQ(AA.getModRefInfo(Load1, None), MRI_Ref);
EXPECT_EQ(AA.getModRefInfo(Add1, MemoryLocation()), MRI_NoModRef);
EXPECT_EQ(AA.getModRefInfo(Add1, None), MRI_NoModRef);
EXPECT_EQ(AA.getModRefInfo(VAArg1, MemoryLocation()), MRI_ModRef);
EXPECT_EQ(AA.getModRefInfo(VAArg1, None), MRI_ModRef);
EXPECT_EQ(AA.getModRefInfo(CmpXChg1, MemoryLocation()), MRI_ModRef);
EXPECT_EQ(AA.getModRefInfo(CmpXChg1, None), MRI_ModRef);
EXPECT_EQ(AA.getModRefInfo(AtomicRMW, MemoryLocation()), MRI_ModRef);
EXPECT_EQ(AA.getModRefInfo(AtomicRMW, None), MRI_ModRef);
EXPECT_EQ(AA.getModRefInfo(Store1, MemoryLocation()), ModRefInfo::Mod);
EXPECT_EQ(AA.getModRefInfo(Store1, None), ModRefInfo::Mod);
EXPECT_EQ(AA.getModRefInfo(Load1, MemoryLocation()), ModRefInfo::Ref);
EXPECT_EQ(AA.getModRefInfo(Load1, None), ModRefInfo::Ref);
EXPECT_EQ(AA.getModRefInfo(Add1, MemoryLocation()), ModRefInfo::NoModRef);
EXPECT_EQ(AA.getModRefInfo(Add1, None), ModRefInfo::NoModRef);
EXPECT_EQ(AA.getModRefInfo(VAArg1, MemoryLocation()), ModRefInfo::ModRef);
EXPECT_EQ(AA.getModRefInfo(VAArg1, None), ModRefInfo::ModRef);
EXPECT_EQ(AA.getModRefInfo(CmpXChg1, MemoryLocation()), ModRefInfo::ModRef);
EXPECT_EQ(AA.getModRefInfo(CmpXChg1, None), ModRefInfo::ModRef);
EXPECT_EQ(AA.getModRefInfo(AtomicRMW, MemoryLocation()), ModRefInfo::ModRef);
EXPECT_EQ(AA.getModRefInfo(AtomicRMW, None), ModRefInfo::ModRef);
}
class AAPassInfraTest : public testing::Test {