[Alignment][NFC] Use Align with CreateAlignedStore

Summary:
This is patch is part of a series to introduce an Alignment type.
See this thread for context: http://lists.llvm.org/pipermail/llvm-dev/2019-July/133851.html
See this patch for the introduction of the type: https://reviews.llvm.org/D64790

Reviewers: courbet, bollu

Subscribers: arsenm, jvesely, nhaehnle, hiraditya, kerbowa, cfe-commits, llvm-commits

Tags: #clang, #llvm

Differential Revision: https://reviews.llvm.org/D73274
This commit is contained in:
Guillaume Chatelet 2020-01-23 16:18:34 +01:00
parent dfec702290
commit 59f95222d4
28 changed files with 131 additions and 113 deletions

View File

@ -1449,7 +1449,8 @@ static llvm::Constant *buildGlobalBlock(CodeGenModule &CGM,
llvm::IRBuilder<> b(llvm::BasicBlock::Create(CGM.getLLVMContext(), "entry",
Init));
b.CreateAlignedStore(CGM.getNSConcreteGlobalBlock(),
b.CreateStructGEP(literal, 0), CGM.getPointerAlign().getQuantity());
b.CreateStructGEP(literal, 0),
CGM.getPointerAlign().getAsAlign());
b.CreateRetVoid();
// We can't use the normal LLVM global initialisation array, because we
// need to specify that this runs early in library initialisation.

View File

@ -113,7 +113,7 @@ public:
using CGBuilderBaseTy::CreateAlignedStore;
llvm::StoreInst *CreateAlignedStore(llvm::Value *Val, llvm::Value *Addr,
CharUnits Align, bool IsVolatile = false) {
return CreateAlignedStore(Val, Addr, Align.getQuantity(), IsVolatile);
return CreateAlignedStore(Val, Addr, Align.getAsAlign(), IsVolatile);
}
// FIXME: these "default-aligned" APIs should be removed,

View File

@ -3930,7 +3930,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
auto *V =
Builder.CreateZExtOrTrunc(EmitScalarExpr(E->getArg(I)), SizeTy);
Builder.CreateAlignedStore(
V, GEP, CGM.getDataLayout().getPrefTypeAlignment(SizeTy));
V, GEP, CGM.getDataLayout().getPrefTypeAlign(SizeTy));
}
return std::tie(ElemPtr, TmpSize, TmpPtr);
};

View File

@ -111,7 +111,7 @@ CodeGenFunction::EmitNVPTXDevicePrintfCallExpr(const CallExpr *E,
for (unsigned I = 1, NumArgs = Args.size(); I < NumArgs; ++I) {
llvm::Value *P = Builder.CreateStructGEP(AllocaTy, Alloca, I - 1);
llvm::Value *Arg = Args[I].getRValue(*this).getScalarVal();
Builder.CreateAlignedStore(Arg, P, DL.getPrefTypeAlignment(Arg->getType()));
Builder.CreateAlignedStore(Arg, P, DL.getPrefTypeAlign(Arg->getType()));
}
BufferPtr = Builder.CreatePointerCast(Alloca, llvm::Type::getInt8PtrTy(Ctx));
}

View File

@ -1647,8 +1647,10 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
for (const auto &lateInit : EarlyInitList) {
auto *global = TheModule.getGlobalVariable(lateInit.first);
if (global) {
b.CreateAlignedStore(global,
b.CreateStructGEP(lateInit.second.first, lateInit.second.second), CGM.getPointerAlign().getQuantity());
b.CreateAlignedStore(
global,
b.CreateStructGEP(lateInit.second.first, lateInit.second.second),
CGM.getPointerAlign().getAsAlign());
}
}
b.CreateRetVoid();

View File

@ -10042,9 +10042,9 @@ llvm::Function *AMDGPUTargetCodeGenInfo::createEnqueuedBlockKernel(
auto IP = CGF.Builder.saveIP();
auto *BB = llvm::BasicBlock::Create(C, "entry", F);
Builder.SetInsertPoint(BB);
unsigned BlockAlign = CGF.CGM.getDataLayout().getPrefTypeAlignment(BlockTy);
const auto BlockAlign = CGF.CGM.getDataLayout().getPrefTypeAlign(BlockTy);
auto *BlockPtr = Builder.CreateAlloca(BlockTy, nullptr);
BlockPtr->setAlignment(llvm::MaybeAlign(BlockAlign));
BlockPtr->setAlignment(BlockAlign);
Builder.CreateAlignedStore(F->arg_begin(), BlockPtr, BlockAlign);
auto *Cast = Builder.CreatePointerCast(BlockPtr, InvokeFT->getParamType(0));
llvm::SmallVector<llvm::Value *, 2> Args;

View File

@ -501,13 +501,17 @@ public:
}
/// Returns the minimum ABI-required alignment for the specified type.
/// FIXME: Deprecate this function once migration to Align is over.
unsigned getABITypeAlignment(Type *Ty) const;
/// Returns the minimum ABI-required alignment for the specified type.
Align getABITypeAlign(Type *Ty) const;
/// Helper function to return `Alignment` if it's set or the result of
/// `getABITypeAlignment(Ty)`, in any case the result is a valid alignment.
inline Align getValueOrABITypeAlignment(MaybeAlign Alignment,
Type *Ty) const {
return Alignment ? *Alignment : Align(getABITypeAlignment(Ty));
return Alignment ? *Alignment : getABITypeAlign(Ty);
}
/// Returns the minimum ABI-required alignment for an integer type of
@ -518,8 +522,15 @@ public:
/// type.
///
/// This is always at least as good as the ABI alignment.
/// FIXME: Deprecate this function once migration to Align is over.
unsigned getPrefTypeAlignment(Type *Ty) const;
/// Returns the preferred stack/global alignment for the specified
/// type.
///
/// This is always at least as good as the ABI alignment.
Align getPrefTypeAlign(Type *Ty) const;
/// Returns an integer type with size at least as big as that of a
/// pointer in the given address space.
IntegerType *getIntPtrType(LLVMContext &C, unsigned AddressSpace = 0) const;

View File

@ -70,11 +70,16 @@ private:
public:
GlobalObject(const GlobalObject &) = delete;
/// FIXME: Remove this function once transition to Align is over.
unsigned getAlignment() const {
MaybeAlign Align = getAlign();
return Align ? Align->value() : 0;
}
MaybeAlign getAlign() const {
unsigned Data = getGlobalValueSubClassData();
unsigned AlignmentData = Data & AlignmentMask;
MaybeAlign Align = decodeMaybeAlign(AlignmentData);
return Align ? Align->value() : 0;
return decodeMaybeAlign(AlignmentData);
}
/// FIXME: Remove this setter once the migration to MaybeAlign is over.

View File

@ -1836,15 +1836,19 @@ public:
Align, isVolatile, Name);
}
StoreInst *CreateAlignedStore(Value *Val, Value *Ptr, unsigned Align,
bool isVolatile = false) {
StoreInst *SI = CreateStore(Val, Ptr, isVolatile);
SI->setAlignment(MaybeAlign(Align));
return SI;
/// FIXME: Remove this function once transition to Align is over.
/// Use the version that takes MaybeAlign instead of this one.
LLVM_ATTRIBUTE_DEPRECATED(
StoreInst *CreateAlignedStore(Value *Val, Value *Ptr, unsigned Align,
bool isVolatile = false),
"Use the version that takes MaybeAlign instead") {
return CreateAlignedStore(Val, Ptr, MaybeAlign(Align), isVolatile);
}
StoreInst *CreateAlignedStore(Value *Val, Value *Ptr, MaybeAlign Align,
bool isVolatile = false) {
return CreateAlignedStore(Val, Ptr, Align ? Align->value() : 0, isVolatile);
StoreInst *SI = CreateStore(Val, Ptr, isVolatile);
SI->setAlignment(Align);
return SI;
}
FenceInst *CreateFence(AtomicOrdering Ordering,
SyncScope::ID SSID = SyncScope::System,

View File

@ -6885,8 +6885,8 @@ static bool splitMergedValStore(StoreInst &SI, const DataLayout &DL,
Addr = Builder.CreateGEP(
SplitStoreType, Addr,
ConstantInt::get(Type::getInt32Ty(SI.getContext()), 1));
Builder.CreateAlignedStore(
V, Addr, Upper ? SI.getAlignment() / 2 : SI.getAlignment());
Builder.CreateAlignedStore(V, Addr,
Upper ? SI.getAlign() / 2 : SI.getAlign());
};
CreateSplitStore(LValue, false);

View File

@ -269,7 +269,7 @@ static void scalarizeMaskedStore(CallInst *CI, bool &ModifiedDT) {
Value *Alignment = CI->getArgOperand(2);
Value *Mask = CI->getArgOperand(3);
unsigned AlignVal = cast<ConstantInt>(Alignment)->getZExtValue();
const Align AlignVal = cast<ConstantInt>(Alignment)->getAlignValue();
VectorType *VecType = cast<VectorType>(Src->getType());
Type *EltTy = VecType->getElementType();
@ -288,7 +288,8 @@ static void scalarizeMaskedStore(CallInst *CI, bool &ModifiedDT) {
}
// Adjust alignment for the scalar instruction.
AlignVal = MinAlign(AlignVal, EltTy->getPrimitiveSizeInBits() / 8);
const Align AdjustedAlignVal =
commonAlignment(AlignVal, EltTy->getPrimitiveSizeInBits() / 8);
// Bitcast %addr from i8* to EltTy*
Type *NewPtrType =
EltTy->getPointerTo(Ptr->getType()->getPointerAddressSpace());
@ -301,7 +302,7 @@ static void scalarizeMaskedStore(CallInst *CI, bool &ModifiedDT) {
continue;
Value *OneElt = Builder.CreateExtractElement(Src, Idx);
Value *Gep = Builder.CreateConstInBoundsGEP1_32(EltTy, FirstEltPtr, Idx);
Builder.CreateAlignedStore(OneElt, Gep, AlignVal);
Builder.CreateAlignedStore(OneElt, Gep, AdjustedAlignVal);
}
CI->eraseFromParent();
return;
@ -343,7 +344,7 @@ static void scalarizeMaskedStore(CallInst *CI, bool &ModifiedDT) {
Value *OneElt = Builder.CreateExtractElement(Src, Idx);
Value *Gep = Builder.CreateConstInBoundsGEP1_32(EltTy, FirstEltPtr, Idx);
Builder.CreateAlignedStore(OneElt, Gep, AlignVal);
Builder.CreateAlignedStore(OneElt, Gep, AdjustedAlignVal);
// Create "else" block, fill it in the next iteration
BasicBlock *NewIfBlock =
@ -530,7 +531,7 @@ static void scalarizeMaskedScatter(CallInst *CI, bool &ModifiedDT) {
Builder.SetInsertPoint(InsertPt);
Builder.SetCurrentDebugLocation(CI->getDebugLoc());
unsigned AlignVal = cast<ConstantInt>(Alignment)->getZExtValue();
MaybeAlign AlignVal(cast<ConstantInt>(Alignment)->getZExtValue());
unsigned VectorWidth = Src->getType()->getVectorNumElements();
// Shorten the way if the mask is a vector of constants.
@ -737,7 +738,7 @@ static void scalarizeMaskedCompressStore(CallInst *CI, bool &ModifiedDT) {
Value *OneElt =
Builder.CreateExtractElement(Src, Idx, "Elt" + Twine(Idx));
Value *NewPtr = Builder.CreateConstInBoundsGEP1_32(EltTy, Ptr, MemIndex);
Builder.CreateAlignedStore(OneElt, NewPtr, 1);
Builder.CreateAlignedStore(OneElt, NewPtr, Align(1));
++MemIndex;
}
CI->eraseFromParent();
@ -778,7 +779,7 @@ static void scalarizeMaskedCompressStore(CallInst *CI, bool &ModifiedDT) {
Builder.SetInsertPoint(InsertPt);
Value *OneElt = Builder.CreateExtractElement(Src, Idx);
Builder.CreateAlignedStore(OneElt, Ptr, 1);
Builder.CreateAlignedStore(OneElt, Ptr, Align(1));
// Move the pointer if there are more blocks to come.
Value *NewPtr;

View File

@ -1707,7 +1707,7 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
Value *Extract =
Builder.CreateExtractElement(Arg1, (uint64_t)0, "extractelement");
StoreInst *SI = Builder.CreateAlignedStore(Extract, Addr, 1);
StoreInst *SI = Builder.CreateAlignedStore(Extract, Addr, Align(1));
SI->setMetadata(M->getMDKindID("nontemporal"), Node);
// Remove intrinsic.
@ -1731,8 +1731,8 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
PointerType::getUnqual(Arg1->getType()),
"cast");
VectorType *VTy = cast<VectorType>(Arg1->getType());
StoreInst *SI = Builder.CreateAlignedStore(Arg1, BC,
VTy->getBitWidth() / 8);
StoreInst *SI =
Builder.CreateAlignedStore(Arg1, BC, Align(VTy->getBitWidth() / 8));
SI->setMetadata(M->getMDKindID("nontemporal"), Node);
// Remove intrinsic.
@ -1750,7 +1750,7 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
Value *BC = Builder.CreateBitCast(Arg0,
PointerType::getUnqual(Elt->getType()),
"cast");
Builder.CreateAlignedStore(Elt, BC, 1);
Builder.CreateAlignedStore(Elt, BC, Align(1));
// Remove intrinsic.
CI->eraseFromParent();
@ -1766,7 +1766,7 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
Arg0 = Builder.CreateBitCast(Arg0,
PointerType::getUnqual(Arg1->getType()),
"cast");
Builder.CreateAlignedStore(Arg1, Arg0, 1);
Builder.CreateAlignedStore(Arg1, Arg0, Align(1));
// Remove intrinsic.
CI->eraseFromParent();
@ -3437,7 +3437,7 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
// Cast the pointer to the right type.
Value *Ptr = Builder.CreateBitCast(CI->getArgOperand(3),
llvm::PointerType::getUnqual(Data->getType()));
Builder.CreateAlignedStore(Data, Ptr, 1);
Builder.CreateAlignedStore(Data, Ptr, Align(1));
// Replace the original call result with the first result of the new call.
Value *CF = Builder.CreateExtractValue(NewCall, 0);
@ -3659,7 +3659,7 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
// Cast the pointer to the right type.
Value *Ptr = Builder.CreateBitCast(CI->getArgOperand(0),
llvm::PointerType::getUnqual(Data->getType()));
Builder.CreateAlignedStore(Data, Ptr, 1);
Builder.CreateAlignedStore(Data, Ptr, Align(1));
// Replace the original call result with the first result of the new call.
Value *TSC = Builder.CreateExtractValue(NewCall, 0);

View File

@ -752,8 +752,13 @@ Align DataLayout::getAlignment(Type *Ty, bool abi_or_pref) const {
abi_or_pref, Ty);
}
/// TODO: Remove this function once the transition to Align is over.
unsigned DataLayout::getABITypeAlignment(Type *Ty) const {
return getAlignment(Ty, true).value();
return getABITypeAlign(Ty).value();
}
Align DataLayout::getABITypeAlign(Type *Ty) const {
return getAlignment(Ty, true);
}
/// getABIIntegerTypeAlignment - Return the minimum ABI-required alignment for
@ -762,8 +767,13 @@ Align DataLayout::getABIIntegerTypeAlignment(unsigned BitWidth) const {
return getAlignmentInfo(INTEGER_ALIGN, BitWidth, true, nullptr);
}
/// TODO: Remove this function once the transition to Align is over.
unsigned DataLayout::getPrefTypeAlignment(Type *Ty) const {
return getAlignment(Ty, false).value();
return getPrefTypeAlign(Ty).value();
}
Align DataLayout::getPrefTypeAlign(Type *Ty) const {
return getAlignment(Ty, false);
}
IntegerType *DataLayout::getIntPtrType(LLVMContext &C,

View File

@ -453,9 +453,8 @@ bool AMDGPURewriteOutArguments::runOnFunction(Function &F) {
PointerType *ArgType = cast<PointerType>(Arg.getType());
auto *EltTy = ArgType->getElementType();
unsigned Align = Arg.getParamAlignment();
if (Align == 0)
Align = DL->getABITypeAlignment(EltTy);
const auto Align =
DL->getValueOrABITypeAlignment(Arg.getParamAlign(), EltTy);
Value *Val = B.CreateExtractValue(StubCall, RetIdx++);
Type *PtrTy = Val->getType()->getPointerTo(ArgType->getAddressSpace());

View File

@ -793,8 +793,7 @@ bool X86InterleavedAccessGroup::lowerIntoOptimizedSequence() {
// 4. Generate a store instruction for wide-vec.
StoreInst *SI = cast<StoreInst>(Inst);
Builder.CreateAlignedStore(WideVec, SI->getPointerOperand(),
SI->getAlignment());
Builder.CreateAlignedStore(WideVec, SI->getPointerOperand(), SI->getAlign());
return true;
}

View File

@ -1269,7 +1269,7 @@ void LowerTypeTestsModule::moveInitializerToModuleConstructor(
IRBuilder<> IRB(WeakInitializerFn->getEntryBlock().getTerminator());
GV->setConstant(false);
IRB.CreateAlignedStore(GV->getInitializer(), GV, GV->getAlignment());
IRB.CreateAlignedStore(GV->getInitializer(), GV, GV->getAlign());
GV->setInitializer(Constant::getNullValue(GV->getValueType()));
}

View File

@ -489,7 +489,7 @@ static StoreInst *combineStoreToNewValue(InstCombiner &IC, StoreInst &SI, Value
StoreInst *NewStore = IC.Builder.CreateAlignedStore(
V, IC.Builder.CreateBitCast(Ptr, V->getType()->getPointerTo(AS)),
SI.getAlignment(), SI.isVolatile());
SI.getAlign(), SI.isVolatile());
NewStore->setAtomic(SI.getOrdering(), SI.getSyncScopeID());
for (const auto &MDPair : MD) {
unsigned ID = MDPair.first;
@ -1200,9 +1200,7 @@ static bool unpackStoreToAggregate(InstCombiner &IC, StoreInst &SI) {
if (SL->hasPadding())
return false;
auto Align = SI.getAlignment();
if (!Align)
Align = DL.getABITypeAlignment(ST);
const auto Align = DL.getValueOrABITypeAlignment(SI.getAlign(), ST);
SmallString<16> EltName = V->getName();
EltName += ".elt";
@ -1220,7 +1218,7 @@ static bool unpackStoreToAggregate(InstCombiner &IC, StoreInst &SI) {
auto *Ptr = IC.Builder.CreateInBoundsGEP(ST, Addr, makeArrayRef(Indices),
AddrName);
auto *Val = IC.Builder.CreateExtractValue(V, i, EltName);
auto EltAlign = MinAlign(Align, SL->getElementOffset(i));
auto EltAlign = commonAlignment(Align, SL->getElementOffset(i));
llvm::Instruction *NS = IC.Builder.CreateAlignedStore(Val, Ptr, EltAlign);
AAMDNodes AAMD;
SI.getAAMetadata(AAMD);
@ -1248,9 +1246,7 @@ static bool unpackStoreToAggregate(InstCombiner &IC, StoreInst &SI) {
const DataLayout &DL = IC.getDataLayout();
auto EltSize = DL.getTypeAllocSize(AT->getElementType());
auto Align = SI.getAlignment();
if (!Align)
Align = DL.getABITypeAlignment(T);
const auto Align = DL.getValueOrABITypeAlignment(SI.getAlign(), T);
SmallString<16> EltName = V->getName();
EltName += ".elt";
@ -1270,7 +1266,7 @@ static bool unpackStoreToAggregate(InstCombiner &IC, StoreInst &SI) {
auto *Ptr = IC.Builder.CreateInBoundsGEP(AT, Addr, makeArrayRef(Indices),
AddrName);
auto *Val = IC.Builder.CreateExtractValue(V, i, EltName);
auto EltAlign = MinAlign(Align, Offset);
auto EltAlign = commonAlignment(Align, Offset);
Instruction *NS = IC.Builder.CreateAlignedStore(Val, Ptr, EltAlign);
AAMDNodes AAMD;
SI.getAAMetadata(AAMD);

View File

@ -2836,7 +2836,8 @@ void FunctionStackPoisoner::copyToShadowInline(ArrayRef<uint8_t> ShadowMask,
Value *Ptr = IRB.CreateAdd(ShadowBase, ConstantInt::get(IntptrTy, i));
Value *Poison = IRB.getIntN(StoreSizeInBytes * 8, Val);
IRB.CreateAlignedStore(
Poison, IRB.CreateIntToPtr(Ptr, Poison->getType()->getPointerTo()), 1);
Poison, IRB.CreateIntToPtr(Ptr, Poison->getType()->getPointerTo()),
Align(1));
i += StoreSizeInBytes;
}

View File

@ -424,7 +424,7 @@ struct DFSanFunction {
Value *combineOperandShadows(Instruction *Inst);
Value *loadShadow(Value *ShadowAddr, uint64_t Size, uint64_t Align,
Instruction *Pos);
void storeShadow(Value *Addr, uint64_t Size, uint64_t Align, Value *Shadow,
void storeShadow(Value *Addr, uint64_t Size, Align Alignment, Value *Shadow,
Instruction *Pos);
};
@ -1328,7 +1328,7 @@ void DFSanVisitor::visitLoadInst(LoadInst &LI) {
DFSF.setShadow(&LI, Shadow);
}
void DFSanFunction::storeShadow(Value *Addr, uint64_t Size, uint64_t Align,
void DFSanFunction::storeShadow(Value *Addr, uint64_t Size, Align Alignment,
Value *Shadow, Instruction *Pos) {
if (AllocaInst *AI = dyn_cast<AllocaInst>(Addr)) {
const auto i = AllocaShadowMap.find(AI);
@ -1339,7 +1339,7 @@ void DFSanFunction::storeShadow(Value *Addr, uint64_t Size, uint64_t Align,
}
}
uint64_t ShadowAlign = Align * DFS.ShadowWidth / 8;
const Align ShadowAlign(Alignment.value() * (DFS.ShadowWidth / 8));
IRBuilder<> IRB(Pos);
Value *ShadowAddr = DFS.getShadowAddress(Addr, Pos);
if (Shadow == DFS.ZeroShadow) {
@ -1386,21 +1386,17 @@ void DFSanVisitor::visitStoreInst(StoreInst &SI) {
if (Size == 0)
return;
uint64_t Align;
if (ClPreserveAlignment) {
Align = SI.getAlignment();
if (Align == 0)
Align = DL.getABITypeAlignment(SI.getValueOperand()->getType());
} else {
Align = 1;
}
const Align Alignement =
ClPreserveAlignment ? DL.getValueOrABITypeAlignment(
SI.getAlign(), SI.getValueOperand()->getType())
: Align(1);
Value* Shadow = DFSF.getShadow(SI.getValueOperand());
if (ClCombinePointerLabelsOnStore) {
Value *PtrShadow = DFSF.getShadow(SI.getPointerOperand());
Shadow = DFSF.combineShadows(Shadow, PtrShadow, &SI);
}
DFSF.storeShadow(SI.getPointerOperand(), Size, Align, Shadow, &SI);
DFSF.storeShadow(SI.getPointerOperand(), Size, Alignement, Shadow, &SI);
}
void DFSanVisitor::visitUnaryOperator(UnaryOperator &UO) {

View File

@ -1104,7 +1104,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
for (unsigned i = 0; i < Size / IntptrSize; ++i) {
Value *Ptr = i ? IRB.CreateConstGEP1_32(MS.IntptrTy, IntptrOriginPtr, i)
: IntptrOriginPtr;
IRB.CreateAlignedStore(IntptrOrigin, Ptr, CurrentAlignment.value());
IRB.CreateAlignedStore(IntptrOrigin, Ptr, CurrentAlignment);
Ofs += IntptrSize / kOriginSize;
CurrentAlignment = IntptrAlignment;
}
@ -1113,7 +1113,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
for (unsigned i = Ofs; i < (Size + kOriginSize - 1) / kOriginSize; ++i) {
Value *GEP =
i ? IRB.CreateConstGEP1_32(MS.OriginTy, OriginPtr, i) : OriginPtr;
IRB.CreateAlignedStore(Origin, GEP, CurrentAlignment.value());
IRB.CreateAlignedStore(Origin, GEP, CurrentAlignment);
CurrentAlignment = kMinOriginAlignment;
}
}
@ -1170,8 +1170,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
std::tie(ShadowPtr, OriginPtr) =
getShadowOriginPtr(Addr, IRB, ShadowTy, Alignment, /*isStore*/ true);
StoreInst *NewSI =
IRB.CreateAlignedStore(Shadow, ShadowPtr, Alignment.value());
StoreInst *NewSI = IRB.CreateAlignedStore(Shadow, ShadowPtr, Alignment);
LLVM_DEBUG(dbgs() << " STORE: " << *NewSI << "\n");
(void)NewSI;
@ -2455,7 +2454,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
// Have to assume to worst case.
std::tie(ShadowPtr, OriginPtr) = getShadowOriginPtr(
Addr, IRB, Shadow->getType(), Align::None(), /*isStore*/ true);
IRB.CreateAlignedStore(Shadow, ShadowPtr, 1);
IRB.CreateAlignedStore(Shadow, ShadowPtr, Align(1));
if (ClCheckAccessAddress)
insertShadowCheck(Addr, &I);
@ -3329,7 +3328,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
Size = DL.getTypeAllocSize(A->getType());
if (ArgOffset + Size > kParamTLSSize) break;
Store = IRB.CreateAlignedStore(ArgShadow, ArgShadowBase,
kShadowTLSAlignment.value());
kShadowTLSAlignment);
Constant *Cst = dyn_cast<Constant>(ArgShadow);
if (Cst && Cst->isNullValue()) ArgIsInitialized = true;
}
@ -3355,8 +3354,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
IRBuilder<> IRBBefore(&I);
// Until we have full dynamic coverage, make sure the retval shadow is 0.
Value *Base = getShadowPtrForRetval(&I, IRBBefore);
IRBBefore.CreateAlignedStore(getCleanShadow(&I), Base,
kShadowTLSAlignment.value());
IRBBefore.CreateAlignedStore(getCleanShadow(&I), Base, kShadowTLSAlignment);
BasicBlock::iterator NextInsn;
if (CS.isCall()) {
NextInsn = ++I.getIterator();
@ -3407,10 +3405,10 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
if (CheckReturnValue) {
insertShadowCheck(RetVal, &I);
Value *Shadow = getCleanShadow(RetVal);
IRB.CreateAlignedStore(Shadow, ShadowPtr, kShadowTLSAlignment.value());
IRB.CreateAlignedStore(Shadow, ShadowPtr, kShadowTLSAlignment);
} else {
Value *Shadow = getShadow(RetVal);
IRB.CreateAlignedStore(Shadow, ShadowPtr, kShadowTLSAlignment.value());
IRB.CreateAlignedStore(Shadow, ShadowPtr, kShadowTLSAlignment);
if (MS.TrackOrigins)
IRB.CreateStore(getOrigin(RetVal), getOriginPtrForRetval(IRB));
}
@ -3868,7 +3866,7 @@ struct VarArgAMD64Helper : public VarArgHelper {
if (!ShadowBase)
continue;
Value *Shadow = MSV.getShadow(A);
IRB.CreateAlignedStore(Shadow, ShadowBase, kShadowTLSAlignment.value());
IRB.CreateAlignedStore(Shadow, ShadowBase, kShadowTLSAlignment);
if (MS.TrackOrigins) {
Value *Origin = MSV.getOrigin(A);
unsigned StoreSize = DL.getTypeStoreSize(Shadow->getType());
@ -4038,8 +4036,7 @@ struct VarArgMIPS64Helper : public VarArgHelper {
VAArgOffset = alignTo(VAArgOffset, 8);
if (!Base)
continue;
IRB.CreateAlignedStore(MSV.getShadow(A), Base,
kShadowTLSAlignment.value());
IRB.CreateAlignedStore(MSV.getShadow(A), Base, kShadowTLSAlignment);
}
Constant *TotalVAArgSize = ConstantInt::get(IRB.getInt64Ty(), VAArgOffset);
@ -4210,8 +4207,7 @@ struct VarArgAArch64Helper : public VarArgHelper {
continue;
if (!Base)
continue;
IRB.CreateAlignedStore(MSV.getShadow(A), Base,
kShadowTLSAlignment.value());
IRB.CreateAlignedStore(MSV.getShadow(A), Base, kShadowTLSAlignment);
}
Constant *OverflowSize =
ConstantInt::get(IRB.getInt64Ty(), OverflowOffset - AArch64VAEndOffset);
@ -4471,8 +4467,7 @@ struct VarArgPowerPC64Helper : public VarArgHelper {
Base = getShadowPtrForVAArgument(A->getType(), IRB,
VAArgOffset - VAArgBase, ArgSize);
if (Base)
IRB.CreateAlignedStore(MSV.getShadow(A), Base,
kShadowTLSAlignment.value());
IRB.CreateAlignedStore(MSV.getShadow(A), Base, kShadowTLSAlignment);
}
VAArgOffset += ArgSize;
VAArgOffset = alignTo(VAArgOffset, 8);

View File

@ -523,8 +523,8 @@ public:
StoreInst *createColumnStore(Value *ColumnValue, Value *ColumnPtr,
Type *EltType, IRBuilder<> Builder) {
unsigned Align = DL.getABITypeAlignment(EltType);
return Builder.CreateAlignedStore(ColumnValue, ColumnPtr, Align);
return Builder.CreateAlignedStore(ColumnValue, ColumnPtr,
DL.getABITypeAlign(EltType));
}

View File

@ -2612,7 +2612,7 @@ private:
NewAI.getAlign(), "load");
V = insertVector(IRB, Old, V, BeginIndex, "vec");
}
StoreInst *Store = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlignment());
StoreInst *Store = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlign());
if (AATags)
Store->setAAMetadata(AATags);
Pass.DeadInsts.insert(&SI);
@ -2633,7 +2633,7 @@ private:
V = insertInteger(DL, IRB, Old, SI.getValueOperand(), Offset, "insert");
}
V = convertValue(DL, IRB, V, NewAllocaTy);
StoreInst *Store = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlignment());
StoreInst *Store = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlign());
Store->copyMetadata(SI, {LLVMContext::MD_mem_parallel_loop_access,
LLVMContext::MD_access_group});
if (AATags)
@ -2695,8 +2695,8 @@ private:
}
V = convertValue(DL, IRB, V, NewAllocaTy);
NewSI = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlignment(),
SI.isVolatile());
NewSI =
IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlign(), SI.isVolatile());
} else {
unsigned AS = SI.getPointerAddressSpace();
Value *NewPtr = getNewAllocaSlicePtr(IRB, V->getType()->getPointerTo(AS));
@ -2863,8 +2863,8 @@ private:
V = convertValue(DL, IRB, V, AllocaTy);
}
StoreInst *New = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlignment(),
II.isVolatile());
StoreInst *New =
IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlign(), II.isVolatile());
if (AATags)
New->setAAMetadata(AATags);
LLVM_DEBUG(dbgs() << " to: " << *New << "\n");
@ -3403,7 +3403,7 @@ private:
Value *InBoundsGEP =
IRB.CreateInBoundsGEP(BaseTy, Ptr, GEPIndices, Name + ".gep");
StoreInst *Store =
IRB.CreateAlignedStore(ExtractValue, InBoundsGEP, Alignment.value());
IRB.CreateAlignedStore(ExtractValue, InBoundsGEP, Alignment);
if (AATags)
Store->setAAMetadata(AATags);
LLVM_DEBUG(dbgs() << " to: " << *Store << "\n");
@ -3918,7 +3918,7 @@ bool SROA::presplitLoadsAndStores(AllocaInst &AI, AllocaSlices &AS) {
getAdjustedPtr(IRB, DL, StoreBasePtr,
APInt(DL.getIndexSizeInBits(AS), PartOffset),
PartPtrTy, StoreBasePtr->getName() + "."),
getAdjustedAlignment(SI, PartOffset, DL).value(),
getAdjustedAlignment(SI, PartOffset, DL),
/*IsVolatile*/ false);
PStore->copyMetadata(*LI, {LLVMContext::MD_mem_parallel_loop_access,
LLVMContext::MD_access_group});
@ -4015,7 +4015,7 @@ bool SROA::presplitLoadsAndStores(AllocaInst &AI, AllocaSlices &AS) {
getAdjustedPtr(IRB, DL, StoreBasePtr,
APInt(DL.getIndexSizeInBits(AS), PartOffset),
StorePartPtrTy, StoreBasePtr->getName() + "."),
getAdjustedAlignment(SI, PartOffset, DL).value(),
getAdjustedAlignment(SI, PartOffset, DL),
/*IsVolatile*/ false);
// Now build a new slice for the alloca.

View File

@ -829,7 +829,7 @@ bool ScalarizerVisitor::visitStoreInst(StoreInst &SI) {
Stores.resize(NumElems);
for (unsigned I = 0; I < NumElems; ++I) {
unsigned Align = Layout.getElemAlign(I);
Stores[I] = Builder.CreateAlignedStore(Val[I], Ptr[I], Align);
Stores[I] = Builder.CreateAlignedStore(Val[I], Ptr[I], MaybeAlign(Align));
}
transferMetadataAndIRFlags(&SI, Stores);
return true;

View File

@ -129,20 +129,19 @@ public:
private:
unsigned getPointerAddressSpace(Value *I);
unsigned getAlignment(LoadInst *LI) const {
unsigned Align = LI->getAlignment();
if (Align != 0)
return Align;
/// TODO: Remove this function once transition to Align is over.
unsigned getAlignment(LoadInst *LI) const { return getAlign(LI).value(); }
return DL.getABITypeAlignment(LI->getType());
Align getAlign(LoadInst *LI) const {
return DL.getValueOrABITypeAlignment(LI->getAlign(), LI->getType());
}
unsigned getAlignment(StoreInst *SI) const {
unsigned Align = SI->getAlignment();
if (Align != 0)
return Align;
/// TODO: Remove this function once transition to Align is over.
unsigned getAlignment(StoreInst *SI) const { return getAlign(SI).value(); }
return DL.getABITypeAlignment(SI->getValueOperand()->getType());
Align getAlign(StoreInst *SI) const {
return DL.getValueOrABITypeAlignment(SI->getAlign(),
SI->getValueOperand()->getType());
}
static const unsigned MaxDepth = 3;
@ -961,7 +960,7 @@ bool Vectorizer::vectorizeStoreChain(
unsigned VecRegSize = TTI.getLoadStoreVecRegBitWidth(AS);
unsigned VF = VecRegSize / Sz;
unsigned ChainSize = Chain.size();
unsigned Alignment = getAlignment(S0);
Align Alignment = getAlign(S0);
if (!isPowerOf2_32(Sz) || VF < 2 || ChainSize < 2) {
InstructionsProcessed->insert(Chain.begin(), Chain.end());
@ -1019,7 +1018,7 @@ bool Vectorizer::vectorizeStoreChain(
InstructionsProcessed->insert(Chain.begin(), Chain.end());
// If the store is going to be misaligned, don't vectorize it.
if (accessIsMisaligned(SzInBytes, AS, Alignment)) {
if (accessIsMisaligned(SzInBytes, AS, Alignment.value())) {
if (S0->getPointerAddressSpace() != DL.getAllocaAddrSpace()) {
auto Chains = splitOddVectorElts(Chain, Sz);
return vectorizeStoreChain(Chains.first, InstructionsProcessed) |
@ -1030,10 +1029,10 @@ bool Vectorizer::vectorizeStoreChain(
StackAdjustedAlignment,
DL, S0, nullptr, &DT);
if (NewAlign != 0)
Alignment = NewAlign;
Alignment = Align(NewAlign);
}
if (!TTI.isLegalToVectorizeStoreChain(SzInBytes, Alignment, AS)) {
if (!TTI.isLegalToVectorizeStoreChain(SzInBytes, Alignment.value(), AS)) {
auto Chains = splitOddVectorElts(Chain, Sz);
return vectorizeStoreChain(Chains.first, InstructionsProcessed) |
vectorizeStoreChain(Chains.second, InstructionsProcessed);

View File

@ -2346,8 +2346,8 @@ void InnerLoopVectorizer::vectorizeInterleaveGroup(Instruction *Instr,
IVec, AddrParts[Part], Group->getAlign(), ShuffledMask);
}
else
NewStoreInstr = Builder.CreateAlignedStore(IVec, AddrParts[Part],
Group->getAlignment());
NewStoreInstr =
Builder.CreateAlignedStore(IVec, AddrParts[Part], Group->getAlign());
Group->addMetadata(NewStoreInstr);
}
@ -2452,8 +2452,7 @@ void InnerLoopVectorizer::vectorizeMemoryInstruction(Instruction *Instr,
NewSI = Builder.CreateMaskedStore(StoredVal, VecPtr, Alignment,
BlockInMaskParts[Part]);
else
NewSI =
Builder.CreateAlignedStore(StoredVal, VecPtr, Alignment.value());
NewSI = Builder.CreateAlignedStore(StoredVal, VecPtr, Alignment);
}
addMetadata(NewSI, SI);
}

View File

@ -343,7 +343,7 @@ void BlockGenerator::generateArrayStore(ScopStmt &Stmt, StoreInst *Store,
RuntimeDebugBuilder::createCPUPrinter(Builder, "Store to ", NewPointer,
": ", ValueOperand, "\n");
Builder.CreateAlignedStore(ValueOperand, NewPointer, Store->getAlignment());
Builder.CreateAlignedStore(ValueOperand, NewPointer, Store->getAlign());
});
}

View File

@ -176,7 +176,7 @@ ParallelLoopGeneratorKMP::createSubFn(Value *StrideNotUsed,
extractValuesFromStruct(Data, StructData->getAllocatedType(), UserContext,
Map);
const int Alignment = (is64BitArch()) ? 8 : 4;
const auto Alignment = llvm::Align(is64BitArch() ? 8 : 4);
Value *ID =
Builder.CreateAlignedLoad(IDPtr, Alignment, "polly.par.global_tid");

View File

@ -221,7 +221,7 @@ void RuntimeDebugBuilder::createGPUPrinterT(PollyIRBuilder &Builder,
Ty = Val->getType();
Ptr = Builder.CreatePointerBitCastOrAddrSpaceCast(Ptr, Ty->getPointerTo(5));
Builder.CreateAlignedStore(Val, Ptr, 4);
Builder.CreateAlignedStore(Val, Ptr, Align(4));
if (Ty->isFloatingPointTy())
str += "%f";