From 59f95222d4c5e997342b0514984823a99a16d44b Mon Sep 17 00:00:00 2001 From: Guillaume Chatelet Date: Thu, 23 Jan 2020 16:18:34 +0100 Subject: [PATCH] [Alignment][NFC] Use Align with CreateAlignedStore Summary: This is patch is part of a series to introduce an Alignment type. See this thread for context: http://lists.llvm.org/pipermail/llvm-dev/2019-July/133851.html See this patch for the introduction of the type: https://reviews.llvm.org/D64790 Reviewers: courbet, bollu Subscribers: arsenm, jvesely, nhaehnle, hiraditya, kerbowa, cfe-commits, llvm-commits Tags: #clang, #llvm Differential Revision: https://reviews.llvm.org/D73274 --- clang/lib/CodeGen/CGBlocks.cpp | 3 +- clang/lib/CodeGen/CGBuilder.h | 2 +- clang/lib/CodeGen/CGBuiltin.cpp | 2 +- clang/lib/CodeGen/CGGPUBuiltin.cpp | 2 +- clang/lib/CodeGen/CGObjCGNU.cpp | 6 ++-- clang/lib/CodeGen/TargetInfo.cpp | 4 +-- llvm/include/llvm/IR/DataLayout.h | 13 ++++++++- llvm/include/llvm/IR/GlobalObject.h | 9 ++++-- llvm/include/llvm/IR/IRBuilder.h | 16 ++++++---- llvm/lib/CodeGen/CodeGenPrepare.cpp | 4 +-- llvm/lib/CodeGen/ScalarizeMaskedMemIntrin.cpp | 15 +++++----- llvm/lib/IR/AutoUpgrade.cpp | 14 ++++----- llvm/lib/IR/DataLayout.cpp | 14 +++++++-- .../AMDGPU/AMDGPURewriteOutArguments.cpp | 5 ++-- llvm/lib/Target/X86/X86InterleavedAccess.cpp | 3 +- llvm/lib/Transforms/IPO/LowerTypeTests.cpp | 2 +- .../InstCombineLoadStoreAlloca.cpp | 14 ++++----- .../Instrumentation/AddressSanitizer.cpp | 3 +- .../Instrumentation/DataFlowSanitizer.cpp | 20 +++++-------- .../Instrumentation/MemorySanitizer.cpp | 29 ++++++++----------- .../Scalar/LowerMatrixIntrinsics.cpp | 4 +-- llvm/lib/Transforms/Scalar/SROA.cpp | 18 ++++++------ llvm/lib/Transforms/Scalar/Scalarizer.cpp | 2 +- .../Vectorize/LoadStoreVectorizer.cpp | 27 +++++++++-------- .../Transforms/Vectorize/LoopVectorize.cpp | 7 ++--- polly/lib/CodeGen/BlockGenerators.cpp | 2 +- polly/lib/CodeGen/LoopGeneratorsKMP.cpp | 2 +- polly/lib/CodeGen/RuntimeDebugBuilder.cpp | 2 +- 28 files changed, 131 insertions(+), 113 deletions(-) diff --git a/clang/lib/CodeGen/CGBlocks.cpp b/clang/lib/CodeGen/CGBlocks.cpp index 11f54d1f7fb2..33fad77eb4da 100644 --- a/clang/lib/CodeGen/CGBlocks.cpp +++ b/clang/lib/CodeGen/CGBlocks.cpp @@ -1449,7 +1449,8 @@ static llvm::Constant *buildGlobalBlock(CodeGenModule &CGM, llvm::IRBuilder<> b(llvm::BasicBlock::Create(CGM.getLLVMContext(), "entry", Init)); b.CreateAlignedStore(CGM.getNSConcreteGlobalBlock(), - b.CreateStructGEP(literal, 0), CGM.getPointerAlign().getQuantity()); + b.CreateStructGEP(literal, 0), + CGM.getPointerAlign().getAsAlign()); b.CreateRetVoid(); // We can't use the normal LLVM global initialisation array, because we // need to specify that this runs early in library initialisation. diff --git a/clang/lib/CodeGen/CGBuilder.h b/clang/lib/CodeGen/CGBuilder.h index 107c9275431c..049e1d4b7552 100644 --- a/clang/lib/CodeGen/CGBuilder.h +++ b/clang/lib/CodeGen/CGBuilder.h @@ -113,7 +113,7 @@ public: using CGBuilderBaseTy::CreateAlignedStore; llvm::StoreInst *CreateAlignedStore(llvm::Value *Val, llvm::Value *Addr, CharUnits Align, bool IsVolatile = false) { - return CreateAlignedStore(Val, Addr, Align.getQuantity(), IsVolatile); + return CreateAlignedStore(Val, Addr, Align.getAsAlign(), IsVolatile); } // FIXME: these "default-aligned" APIs should be removed, diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp index 86a3f1e0d237..04511e892adf 100644 --- a/clang/lib/CodeGen/CGBuiltin.cpp +++ b/clang/lib/CodeGen/CGBuiltin.cpp @@ -3930,7 +3930,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, auto *V = Builder.CreateZExtOrTrunc(EmitScalarExpr(E->getArg(I)), SizeTy); Builder.CreateAlignedStore( - V, GEP, CGM.getDataLayout().getPrefTypeAlignment(SizeTy)); + V, GEP, CGM.getDataLayout().getPrefTypeAlign(SizeTy)); } return std::tie(ElemPtr, TmpSize, TmpPtr); }; diff --git a/clang/lib/CodeGen/CGGPUBuiltin.cpp b/clang/lib/CodeGen/CGGPUBuiltin.cpp index bccce7dd7ff4..f860623e2bc3 100644 --- a/clang/lib/CodeGen/CGGPUBuiltin.cpp +++ b/clang/lib/CodeGen/CGGPUBuiltin.cpp @@ -111,7 +111,7 @@ CodeGenFunction::EmitNVPTXDevicePrintfCallExpr(const CallExpr *E, for (unsigned I = 1, NumArgs = Args.size(); I < NumArgs; ++I) { llvm::Value *P = Builder.CreateStructGEP(AllocaTy, Alloca, I - 1); llvm::Value *Arg = Args[I].getRValue(*this).getScalarVal(); - Builder.CreateAlignedStore(Arg, P, DL.getPrefTypeAlignment(Arg->getType())); + Builder.CreateAlignedStore(Arg, P, DL.getPrefTypeAlign(Arg->getType())); } BufferPtr = Builder.CreatePointerCast(Alloca, llvm::Type::getInt8PtrTy(Ctx)); } diff --git a/clang/lib/CodeGen/CGObjCGNU.cpp b/clang/lib/CodeGen/CGObjCGNU.cpp index a27b6d4ed637..4c4015255c97 100644 --- a/clang/lib/CodeGen/CGObjCGNU.cpp +++ b/clang/lib/CodeGen/CGObjCGNU.cpp @@ -1647,8 +1647,10 @@ class CGObjCGNUstep2 : public CGObjCGNUstep { for (const auto &lateInit : EarlyInitList) { auto *global = TheModule.getGlobalVariable(lateInit.first); if (global) { - b.CreateAlignedStore(global, - b.CreateStructGEP(lateInit.second.first, lateInit.second.second), CGM.getPointerAlign().getQuantity()); + b.CreateAlignedStore( + global, + b.CreateStructGEP(lateInit.second.first, lateInit.second.second), + CGM.getPointerAlign().getAsAlign()); } } b.CreateRetVoid(); diff --git a/clang/lib/CodeGen/TargetInfo.cpp b/clang/lib/CodeGen/TargetInfo.cpp index 682ef18da73b..c803785435ff 100644 --- a/clang/lib/CodeGen/TargetInfo.cpp +++ b/clang/lib/CodeGen/TargetInfo.cpp @@ -10042,9 +10042,9 @@ llvm::Function *AMDGPUTargetCodeGenInfo::createEnqueuedBlockKernel( auto IP = CGF.Builder.saveIP(); auto *BB = llvm::BasicBlock::Create(C, "entry", F); Builder.SetInsertPoint(BB); - unsigned BlockAlign = CGF.CGM.getDataLayout().getPrefTypeAlignment(BlockTy); + const auto BlockAlign = CGF.CGM.getDataLayout().getPrefTypeAlign(BlockTy); auto *BlockPtr = Builder.CreateAlloca(BlockTy, nullptr); - BlockPtr->setAlignment(llvm::MaybeAlign(BlockAlign)); + BlockPtr->setAlignment(BlockAlign); Builder.CreateAlignedStore(F->arg_begin(), BlockPtr, BlockAlign); auto *Cast = Builder.CreatePointerCast(BlockPtr, InvokeFT->getParamType(0)); llvm::SmallVector Args; diff --git a/llvm/include/llvm/IR/DataLayout.h b/llvm/include/llvm/IR/DataLayout.h index 85093dd218f8..98bdf30f5a46 100644 --- a/llvm/include/llvm/IR/DataLayout.h +++ b/llvm/include/llvm/IR/DataLayout.h @@ -501,13 +501,17 @@ public: } /// Returns the minimum ABI-required alignment for the specified type. + /// FIXME: Deprecate this function once migration to Align is over. unsigned getABITypeAlignment(Type *Ty) const; + /// Returns the minimum ABI-required alignment for the specified type. + Align getABITypeAlign(Type *Ty) const; + /// Helper function to return `Alignment` if it's set or the result of /// `getABITypeAlignment(Ty)`, in any case the result is a valid alignment. inline Align getValueOrABITypeAlignment(MaybeAlign Alignment, Type *Ty) const { - return Alignment ? *Alignment : Align(getABITypeAlignment(Ty)); + return Alignment ? *Alignment : getABITypeAlign(Ty); } /// Returns the minimum ABI-required alignment for an integer type of @@ -518,8 +522,15 @@ public: /// type. /// /// This is always at least as good as the ABI alignment. + /// FIXME: Deprecate this function once migration to Align is over. unsigned getPrefTypeAlignment(Type *Ty) const; + /// Returns the preferred stack/global alignment for the specified + /// type. + /// + /// This is always at least as good as the ABI alignment. + Align getPrefTypeAlign(Type *Ty) const; + /// Returns an integer type with size at least as big as that of a /// pointer in the given address space. IntegerType *getIntPtrType(LLVMContext &C, unsigned AddressSpace = 0) const; diff --git a/llvm/include/llvm/IR/GlobalObject.h b/llvm/include/llvm/IR/GlobalObject.h index ce81eb9f0719..25e073d42e4c 100644 --- a/llvm/include/llvm/IR/GlobalObject.h +++ b/llvm/include/llvm/IR/GlobalObject.h @@ -70,11 +70,16 @@ private: public: GlobalObject(const GlobalObject &) = delete; + /// FIXME: Remove this function once transition to Align is over. unsigned getAlignment() const { + MaybeAlign Align = getAlign(); + return Align ? Align->value() : 0; + } + + MaybeAlign getAlign() const { unsigned Data = getGlobalValueSubClassData(); unsigned AlignmentData = Data & AlignmentMask; - MaybeAlign Align = decodeMaybeAlign(AlignmentData); - return Align ? Align->value() : 0; + return decodeMaybeAlign(AlignmentData); } /// FIXME: Remove this setter once the migration to MaybeAlign is over. diff --git a/llvm/include/llvm/IR/IRBuilder.h b/llvm/include/llvm/IR/IRBuilder.h index 7499f68c9d9a..d3ff9d22c868 100644 --- a/llvm/include/llvm/IR/IRBuilder.h +++ b/llvm/include/llvm/IR/IRBuilder.h @@ -1836,15 +1836,19 @@ public: Align, isVolatile, Name); } - StoreInst *CreateAlignedStore(Value *Val, Value *Ptr, unsigned Align, - bool isVolatile = false) { - StoreInst *SI = CreateStore(Val, Ptr, isVolatile); - SI->setAlignment(MaybeAlign(Align)); - return SI; + /// FIXME: Remove this function once transition to Align is over. + /// Use the version that takes MaybeAlign instead of this one. + LLVM_ATTRIBUTE_DEPRECATED( + StoreInst *CreateAlignedStore(Value *Val, Value *Ptr, unsigned Align, + bool isVolatile = false), + "Use the version that takes MaybeAlign instead") { + return CreateAlignedStore(Val, Ptr, MaybeAlign(Align), isVolatile); } StoreInst *CreateAlignedStore(Value *Val, Value *Ptr, MaybeAlign Align, bool isVolatile = false) { - return CreateAlignedStore(Val, Ptr, Align ? Align->value() : 0, isVolatile); + StoreInst *SI = CreateStore(Val, Ptr, isVolatile); + SI->setAlignment(Align); + return SI; } FenceInst *CreateFence(AtomicOrdering Ordering, SyncScope::ID SSID = SyncScope::System, diff --git a/llvm/lib/CodeGen/CodeGenPrepare.cpp b/llvm/lib/CodeGen/CodeGenPrepare.cpp index a44d965029dc..34731d3836bd 100644 --- a/llvm/lib/CodeGen/CodeGenPrepare.cpp +++ b/llvm/lib/CodeGen/CodeGenPrepare.cpp @@ -6885,8 +6885,8 @@ static bool splitMergedValStore(StoreInst &SI, const DataLayout &DL, Addr = Builder.CreateGEP( SplitStoreType, Addr, ConstantInt::get(Type::getInt32Ty(SI.getContext()), 1)); - Builder.CreateAlignedStore( - V, Addr, Upper ? SI.getAlignment() / 2 : SI.getAlignment()); + Builder.CreateAlignedStore(V, Addr, + Upper ? SI.getAlign() / 2 : SI.getAlign()); }; CreateSplitStore(LValue, false); diff --git a/llvm/lib/CodeGen/ScalarizeMaskedMemIntrin.cpp b/llvm/lib/CodeGen/ScalarizeMaskedMemIntrin.cpp index 3fedb8210b83..12a71182b602 100644 --- a/llvm/lib/CodeGen/ScalarizeMaskedMemIntrin.cpp +++ b/llvm/lib/CodeGen/ScalarizeMaskedMemIntrin.cpp @@ -269,7 +269,7 @@ static void scalarizeMaskedStore(CallInst *CI, bool &ModifiedDT) { Value *Alignment = CI->getArgOperand(2); Value *Mask = CI->getArgOperand(3); - unsigned AlignVal = cast(Alignment)->getZExtValue(); + const Align AlignVal = cast(Alignment)->getAlignValue(); VectorType *VecType = cast(Src->getType()); Type *EltTy = VecType->getElementType(); @@ -288,7 +288,8 @@ static void scalarizeMaskedStore(CallInst *CI, bool &ModifiedDT) { } // Adjust alignment for the scalar instruction. - AlignVal = MinAlign(AlignVal, EltTy->getPrimitiveSizeInBits() / 8); + const Align AdjustedAlignVal = + commonAlignment(AlignVal, EltTy->getPrimitiveSizeInBits() / 8); // Bitcast %addr from i8* to EltTy* Type *NewPtrType = EltTy->getPointerTo(Ptr->getType()->getPointerAddressSpace()); @@ -301,7 +302,7 @@ static void scalarizeMaskedStore(CallInst *CI, bool &ModifiedDT) { continue; Value *OneElt = Builder.CreateExtractElement(Src, Idx); Value *Gep = Builder.CreateConstInBoundsGEP1_32(EltTy, FirstEltPtr, Idx); - Builder.CreateAlignedStore(OneElt, Gep, AlignVal); + Builder.CreateAlignedStore(OneElt, Gep, AdjustedAlignVal); } CI->eraseFromParent(); return; @@ -343,7 +344,7 @@ static void scalarizeMaskedStore(CallInst *CI, bool &ModifiedDT) { Value *OneElt = Builder.CreateExtractElement(Src, Idx); Value *Gep = Builder.CreateConstInBoundsGEP1_32(EltTy, FirstEltPtr, Idx); - Builder.CreateAlignedStore(OneElt, Gep, AlignVal); + Builder.CreateAlignedStore(OneElt, Gep, AdjustedAlignVal); // Create "else" block, fill it in the next iteration BasicBlock *NewIfBlock = @@ -530,7 +531,7 @@ static void scalarizeMaskedScatter(CallInst *CI, bool &ModifiedDT) { Builder.SetInsertPoint(InsertPt); Builder.SetCurrentDebugLocation(CI->getDebugLoc()); - unsigned AlignVal = cast(Alignment)->getZExtValue(); + MaybeAlign AlignVal(cast(Alignment)->getZExtValue()); unsigned VectorWidth = Src->getType()->getVectorNumElements(); // Shorten the way if the mask is a vector of constants. @@ -737,7 +738,7 @@ static void scalarizeMaskedCompressStore(CallInst *CI, bool &ModifiedDT) { Value *OneElt = Builder.CreateExtractElement(Src, Idx, "Elt" + Twine(Idx)); Value *NewPtr = Builder.CreateConstInBoundsGEP1_32(EltTy, Ptr, MemIndex); - Builder.CreateAlignedStore(OneElt, NewPtr, 1); + Builder.CreateAlignedStore(OneElt, NewPtr, Align(1)); ++MemIndex; } CI->eraseFromParent(); @@ -778,7 +779,7 @@ static void scalarizeMaskedCompressStore(CallInst *CI, bool &ModifiedDT) { Builder.SetInsertPoint(InsertPt); Value *OneElt = Builder.CreateExtractElement(Src, Idx); - Builder.CreateAlignedStore(OneElt, Ptr, 1); + Builder.CreateAlignedStore(OneElt, Ptr, Align(1)); // Move the pointer if there are more blocks to come. Value *NewPtr; diff --git a/llvm/lib/IR/AutoUpgrade.cpp b/llvm/lib/IR/AutoUpgrade.cpp index 60d0155b3796..115500cff1de 100644 --- a/llvm/lib/IR/AutoUpgrade.cpp +++ b/llvm/lib/IR/AutoUpgrade.cpp @@ -1707,7 +1707,7 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) { Value *Extract = Builder.CreateExtractElement(Arg1, (uint64_t)0, "extractelement"); - StoreInst *SI = Builder.CreateAlignedStore(Extract, Addr, 1); + StoreInst *SI = Builder.CreateAlignedStore(Extract, Addr, Align(1)); SI->setMetadata(M->getMDKindID("nontemporal"), Node); // Remove intrinsic. @@ -1731,8 +1731,8 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) { PointerType::getUnqual(Arg1->getType()), "cast"); VectorType *VTy = cast(Arg1->getType()); - StoreInst *SI = Builder.CreateAlignedStore(Arg1, BC, - VTy->getBitWidth() / 8); + StoreInst *SI = + Builder.CreateAlignedStore(Arg1, BC, Align(VTy->getBitWidth() / 8)); SI->setMetadata(M->getMDKindID("nontemporal"), Node); // Remove intrinsic. @@ -1750,7 +1750,7 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) { Value *BC = Builder.CreateBitCast(Arg0, PointerType::getUnqual(Elt->getType()), "cast"); - Builder.CreateAlignedStore(Elt, BC, 1); + Builder.CreateAlignedStore(Elt, BC, Align(1)); // Remove intrinsic. CI->eraseFromParent(); @@ -1766,7 +1766,7 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) { Arg0 = Builder.CreateBitCast(Arg0, PointerType::getUnqual(Arg1->getType()), "cast"); - Builder.CreateAlignedStore(Arg1, Arg0, 1); + Builder.CreateAlignedStore(Arg1, Arg0, Align(1)); // Remove intrinsic. CI->eraseFromParent(); @@ -3437,7 +3437,7 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) { // Cast the pointer to the right type. Value *Ptr = Builder.CreateBitCast(CI->getArgOperand(3), llvm::PointerType::getUnqual(Data->getType())); - Builder.CreateAlignedStore(Data, Ptr, 1); + Builder.CreateAlignedStore(Data, Ptr, Align(1)); // Replace the original call result with the first result of the new call. Value *CF = Builder.CreateExtractValue(NewCall, 0); @@ -3659,7 +3659,7 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) { // Cast the pointer to the right type. Value *Ptr = Builder.CreateBitCast(CI->getArgOperand(0), llvm::PointerType::getUnqual(Data->getType())); - Builder.CreateAlignedStore(Data, Ptr, 1); + Builder.CreateAlignedStore(Data, Ptr, Align(1)); // Replace the original call result with the first result of the new call. Value *TSC = Builder.CreateExtractValue(NewCall, 0); diff --git a/llvm/lib/IR/DataLayout.cpp b/llvm/lib/IR/DataLayout.cpp index 94e0740663cc..3fdd94b5a790 100644 --- a/llvm/lib/IR/DataLayout.cpp +++ b/llvm/lib/IR/DataLayout.cpp @@ -752,8 +752,13 @@ Align DataLayout::getAlignment(Type *Ty, bool abi_or_pref) const { abi_or_pref, Ty); } +/// TODO: Remove this function once the transition to Align is over. unsigned DataLayout::getABITypeAlignment(Type *Ty) const { - return getAlignment(Ty, true).value(); + return getABITypeAlign(Ty).value(); +} + +Align DataLayout::getABITypeAlign(Type *Ty) const { + return getAlignment(Ty, true); } /// getABIIntegerTypeAlignment - Return the minimum ABI-required alignment for @@ -762,8 +767,13 @@ Align DataLayout::getABIIntegerTypeAlignment(unsigned BitWidth) const { return getAlignmentInfo(INTEGER_ALIGN, BitWidth, true, nullptr); } +/// TODO: Remove this function once the transition to Align is over. unsigned DataLayout::getPrefTypeAlignment(Type *Ty) const { - return getAlignment(Ty, false).value(); + return getPrefTypeAlign(Ty).value(); +} + +Align DataLayout::getPrefTypeAlign(Type *Ty) const { + return getAlignment(Ty, false); } IntegerType *DataLayout::getIntPtrType(LLVMContext &C, diff --git a/llvm/lib/Target/AMDGPU/AMDGPURewriteOutArguments.cpp b/llvm/lib/Target/AMDGPU/AMDGPURewriteOutArguments.cpp index 9a1e2fc42ed5..cf641e3948ac 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPURewriteOutArguments.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPURewriteOutArguments.cpp @@ -453,9 +453,8 @@ bool AMDGPURewriteOutArguments::runOnFunction(Function &F) { PointerType *ArgType = cast(Arg.getType()); auto *EltTy = ArgType->getElementType(); - unsigned Align = Arg.getParamAlignment(); - if (Align == 0) - Align = DL->getABITypeAlignment(EltTy); + const auto Align = + DL->getValueOrABITypeAlignment(Arg.getParamAlign(), EltTy); Value *Val = B.CreateExtractValue(StubCall, RetIdx++); Type *PtrTy = Val->getType()->getPointerTo(ArgType->getAddressSpace()); diff --git a/llvm/lib/Target/X86/X86InterleavedAccess.cpp b/llvm/lib/Target/X86/X86InterleavedAccess.cpp index f4a487148e9a..36ee9d4ad382 100644 --- a/llvm/lib/Target/X86/X86InterleavedAccess.cpp +++ b/llvm/lib/Target/X86/X86InterleavedAccess.cpp @@ -793,8 +793,7 @@ bool X86InterleavedAccessGroup::lowerIntoOptimizedSequence() { // 4. Generate a store instruction for wide-vec. StoreInst *SI = cast(Inst); - Builder.CreateAlignedStore(WideVec, SI->getPointerOperand(), - SI->getAlignment()); + Builder.CreateAlignedStore(WideVec, SI->getPointerOperand(), SI->getAlign()); return true; } diff --git a/llvm/lib/Transforms/IPO/LowerTypeTests.cpp b/llvm/lib/Transforms/IPO/LowerTypeTests.cpp index fa664966faf7..e6747a68e672 100644 --- a/llvm/lib/Transforms/IPO/LowerTypeTests.cpp +++ b/llvm/lib/Transforms/IPO/LowerTypeTests.cpp @@ -1269,7 +1269,7 @@ void LowerTypeTestsModule::moveInitializerToModuleConstructor( IRBuilder<> IRB(WeakInitializerFn->getEntryBlock().getTerminator()); GV->setConstant(false); - IRB.CreateAlignedStore(GV->getInitializer(), GV, GV->getAlignment()); + IRB.CreateAlignedStore(GV->getInitializer(), GV, GV->getAlign()); GV->setInitializer(Constant::getNullValue(GV->getValueType())); } diff --git a/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp b/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp index f223cf5ab2af..62a75a325858 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp @@ -489,7 +489,7 @@ static StoreInst *combineStoreToNewValue(InstCombiner &IC, StoreInst &SI, Value StoreInst *NewStore = IC.Builder.CreateAlignedStore( V, IC.Builder.CreateBitCast(Ptr, V->getType()->getPointerTo(AS)), - SI.getAlignment(), SI.isVolatile()); + SI.getAlign(), SI.isVolatile()); NewStore->setAtomic(SI.getOrdering(), SI.getSyncScopeID()); for (const auto &MDPair : MD) { unsigned ID = MDPair.first; @@ -1200,9 +1200,7 @@ static bool unpackStoreToAggregate(InstCombiner &IC, StoreInst &SI) { if (SL->hasPadding()) return false; - auto Align = SI.getAlignment(); - if (!Align) - Align = DL.getABITypeAlignment(ST); + const auto Align = DL.getValueOrABITypeAlignment(SI.getAlign(), ST); SmallString<16> EltName = V->getName(); EltName += ".elt"; @@ -1220,7 +1218,7 @@ static bool unpackStoreToAggregate(InstCombiner &IC, StoreInst &SI) { auto *Ptr = IC.Builder.CreateInBoundsGEP(ST, Addr, makeArrayRef(Indices), AddrName); auto *Val = IC.Builder.CreateExtractValue(V, i, EltName); - auto EltAlign = MinAlign(Align, SL->getElementOffset(i)); + auto EltAlign = commonAlignment(Align, SL->getElementOffset(i)); llvm::Instruction *NS = IC.Builder.CreateAlignedStore(Val, Ptr, EltAlign); AAMDNodes AAMD; SI.getAAMetadata(AAMD); @@ -1248,9 +1246,7 @@ static bool unpackStoreToAggregate(InstCombiner &IC, StoreInst &SI) { const DataLayout &DL = IC.getDataLayout(); auto EltSize = DL.getTypeAllocSize(AT->getElementType()); - auto Align = SI.getAlignment(); - if (!Align) - Align = DL.getABITypeAlignment(T); + const auto Align = DL.getValueOrABITypeAlignment(SI.getAlign(), T); SmallString<16> EltName = V->getName(); EltName += ".elt"; @@ -1270,7 +1266,7 @@ static bool unpackStoreToAggregate(InstCombiner &IC, StoreInst &SI) { auto *Ptr = IC.Builder.CreateInBoundsGEP(AT, Addr, makeArrayRef(Indices), AddrName); auto *Val = IC.Builder.CreateExtractValue(V, i, EltName); - auto EltAlign = MinAlign(Align, Offset); + auto EltAlign = commonAlignment(Align, Offset); Instruction *NS = IC.Builder.CreateAlignedStore(Val, Ptr, EltAlign); AAMDNodes AAMD; SI.getAAMetadata(AAMD); diff --git a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp index 79c119489a65..190d4e22ae20 100644 --- a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp +++ b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp @@ -2836,7 +2836,8 @@ void FunctionStackPoisoner::copyToShadowInline(ArrayRef ShadowMask, Value *Ptr = IRB.CreateAdd(ShadowBase, ConstantInt::get(IntptrTy, i)); Value *Poison = IRB.getIntN(StoreSizeInBytes * 8, Val); IRB.CreateAlignedStore( - Poison, IRB.CreateIntToPtr(Ptr, Poison->getType()->getPointerTo()), 1); + Poison, IRB.CreateIntToPtr(Ptr, Poison->getType()->getPointerTo()), + Align(1)); i += StoreSizeInBytes; } diff --git a/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp index da9cc69676df..8053474d40e6 100644 --- a/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp +++ b/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp @@ -424,7 +424,7 @@ struct DFSanFunction { Value *combineOperandShadows(Instruction *Inst); Value *loadShadow(Value *ShadowAddr, uint64_t Size, uint64_t Align, Instruction *Pos); - void storeShadow(Value *Addr, uint64_t Size, uint64_t Align, Value *Shadow, + void storeShadow(Value *Addr, uint64_t Size, Align Alignment, Value *Shadow, Instruction *Pos); }; @@ -1328,7 +1328,7 @@ void DFSanVisitor::visitLoadInst(LoadInst &LI) { DFSF.setShadow(&LI, Shadow); } -void DFSanFunction::storeShadow(Value *Addr, uint64_t Size, uint64_t Align, +void DFSanFunction::storeShadow(Value *Addr, uint64_t Size, Align Alignment, Value *Shadow, Instruction *Pos) { if (AllocaInst *AI = dyn_cast(Addr)) { const auto i = AllocaShadowMap.find(AI); @@ -1339,7 +1339,7 @@ void DFSanFunction::storeShadow(Value *Addr, uint64_t Size, uint64_t Align, } } - uint64_t ShadowAlign = Align * DFS.ShadowWidth / 8; + const Align ShadowAlign(Alignment.value() * (DFS.ShadowWidth / 8)); IRBuilder<> IRB(Pos); Value *ShadowAddr = DFS.getShadowAddress(Addr, Pos); if (Shadow == DFS.ZeroShadow) { @@ -1386,21 +1386,17 @@ void DFSanVisitor::visitStoreInst(StoreInst &SI) { if (Size == 0) return; - uint64_t Align; - if (ClPreserveAlignment) { - Align = SI.getAlignment(); - if (Align == 0) - Align = DL.getABITypeAlignment(SI.getValueOperand()->getType()); - } else { - Align = 1; - } + const Align Alignement = + ClPreserveAlignment ? DL.getValueOrABITypeAlignment( + SI.getAlign(), SI.getValueOperand()->getType()) + : Align(1); Value* Shadow = DFSF.getShadow(SI.getValueOperand()); if (ClCombinePointerLabelsOnStore) { Value *PtrShadow = DFSF.getShadow(SI.getPointerOperand()); Shadow = DFSF.combineShadows(Shadow, PtrShadow, &SI); } - DFSF.storeShadow(SI.getPointerOperand(), Size, Align, Shadow, &SI); + DFSF.storeShadow(SI.getPointerOperand(), Size, Alignement, Shadow, &SI); } void DFSanVisitor::visitUnaryOperator(UnaryOperator &UO) { diff --git a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp index ab4c663fb099..0a5cf72a38cb 100644 --- a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp +++ b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp @@ -1104,7 +1104,7 @@ struct MemorySanitizerVisitor : public InstVisitor { for (unsigned i = 0; i < Size / IntptrSize; ++i) { Value *Ptr = i ? IRB.CreateConstGEP1_32(MS.IntptrTy, IntptrOriginPtr, i) : IntptrOriginPtr; - IRB.CreateAlignedStore(IntptrOrigin, Ptr, CurrentAlignment.value()); + IRB.CreateAlignedStore(IntptrOrigin, Ptr, CurrentAlignment); Ofs += IntptrSize / kOriginSize; CurrentAlignment = IntptrAlignment; } @@ -1113,7 +1113,7 @@ struct MemorySanitizerVisitor : public InstVisitor { for (unsigned i = Ofs; i < (Size + kOriginSize - 1) / kOriginSize; ++i) { Value *GEP = i ? IRB.CreateConstGEP1_32(MS.OriginTy, OriginPtr, i) : OriginPtr; - IRB.CreateAlignedStore(Origin, GEP, CurrentAlignment.value()); + IRB.CreateAlignedStore(Origin, GEP, CurrentAlignment); CurrentAlignment = kMinOriginAlignment; } } @@ -1170,8 +1170,7 @@ struct MemorySanitizerVisitor : public InstVisitor { std::tie(ShadowPtr, OriginPtr) = getShadowOriginPtr(Addr, IRB, ShadowTy, Alignment, /*isStore*/ true); - StoreInst *NewSI = - IRB.CreateAlignedStore(Shadow, ShadowPtr, Alignment.value()); + StoreInst *NewSI = IRB.CreateAlignedStore(Shadow, ShadowPtr, Alignment); LLVM_DEBUG(dbgs() << " STORE: " << *NewSI << "\n"); (void)NewSI; @@ -2455,7 +2454,7 @@ struct MemorySanitizerVisitor : public InstVisitor { // Have to assume to worst case. std::tie(ShadowPtr, OriginPtr) = getShadowOriginPtr( Addr, IRB, Shadow->getType(), Align::None(), /*isStore*/ true); - IRB.CreateAlignedStore(Shadow, ShadowPtr, 1); + IRB.CreateAlignedStore(Shadow, ShadowPtr, Align(1)); if (ClCheckAccessAddress) insertShadowCheck(Addr, &I); @@ -3329,7 +3328,7 @@ struct MemorySanitizerVisitor : public InstVisitor { Size = DL.getTypeAllocSize(A->getType()); if (ArgOffset + Size > kParamTLSSize) break; Store = IRB.CreateAlignedStore(ArgShadow, ArgShadowBase, - kShadowTLSAlignment.value()); + kShadowTLSAlignment); Constant *Cst = dyn_cast(ArgShadow); if (Cst && Cst->isNullValue()) ArgIsInitialized = true; } @@ -3355,8 +3354,7 @@ struct MemorySanitizerVisitor : public InstVisitor { IRBuilder<> IRBBefore(&I); // Until we have full dynamic coverage, make sure the retval shadow is 0. Value *Base = getShadowPtrForRetval(&I, IRBBefore); - IRBBefore.CreateAlignedStore(getCleanShadow(&I), Base, - kShadowTLSAlignment.value()); + IRBBefore.CreateAlignedStore(getCleanShadow(&I), Base, kShadowTLSAlignment); BasicBlock::iterator NextInsn; if (CS.isCall()) { NextInsn = ++I.getIterator(); @@ -3407,10 +3405,10 @@ struct MemorySanitizerVisitor : public InstVisitor { if (CheckReturnValue) { insertShadowCheck(RetVal, &I); Value *Shadow = getCleanShadow(RetVal); - IRB.CreateAlignedStore(Shadow, ShadowPtr, kShadowTLSAlignment.value()); + IRB.CreateAlignedStore(Shadow, ShadowPtr, kShadowTLSAlignment); } else { Value *Shadow = getShadow(RetVal); - IRB.CreateAlignedStore(Shadow, ShadowPtr, kShadowTLSAlignment.value()); + IRB.CreateAlignedStore(Shadow, ShadowPtr, kShadowTLSAlignment); if (MS.TrackOrigins) IRB.CreateStore(getOrigin(RetVal), getOriginPtrForRetval(IRB)); } @@ -3868,7 +3866,7 @@ struct VarArgAMD64Helper : public VarArgHelper { if (!ShadowBase) continue; Value *Shadow = MSV.getShadow(A); - IRB.CreateAlignedStore(Shadow, ShadowBase, kShadowTLSAlignment.value()); + IRB.CreateAlignedStore(Shadow, ShadowBase, kShadowTLSAlignment); if (MS.TrackOrigins) { Value *Origin = MSV.getOrigin(A); unsigned StoreSize = DL.getTypeStoreSize(Shadow->getType()); @@ -4038,8 +4036,7 @@ struct VarArgMIPS64Helper : public VarArgHelper { VAArgOffset = alignTo(VAArgOffset, 8); if (!Base) continue; - IRB.CreateAlignedStore(MSV.getShadow(A), Base, - kShadowTLSAlignment.value()); + IRB.CreateAlignedStore(MSV.getShadow(A), Base, kShadowTLSAlignment); } Constant *TotalVAArgSize = ConstantInt::get(IRB.getInt64Ty(), VAArgOffset); @@ -4210,8 +4207,7 @@ struct VarArgAArch64Helper : public VarArgHelper { continue; if (!Base) continue; - IRB.CreateAlignedStore(MSV.getShadow(A), Base, - kShadowTLSAlignment.value()); + IRB.CreateAlignedStore(MSV.getShadow(A), Base, kShadowTLSAlignment); } Constant *OverflowSize = ConstantInt::get(IRB.getInt64Ty(), OverflowOffset - AArch64VAEndOffset); @@ -4471,8 +4467,7 @@ struct VarArgPowerPC64Helper : public VarArgHelper { Base = getShadowPtrForVAArgument(A->getType(), IRB, VAArgOffset - VAArgBase, ArgSize); if (Base) - IRB.CreateAlignedStore(MSV.getShadow(A), Base, - kShadowTLSAlignment.value()); + IRB.CreateAlignedStore(MSV.getShadow(A), Base, kShadowTLSAlignment); } VAArgOffset += ArgSize; VAArgOffset = alignTo(VAArgOffset, 8); diff --git a/llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp b/llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp index 3f997aa4726e..ca6a6ae00758 100644 --- a/llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp +++ b/llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp @@ -523,8 +523,8 @@ public: StoreInst *createColumnStore(Value *ColumnValue, Value *ColumnPtr, Type *EltType, IRBuilder<> Builder) { - unsigned Align = DL.getABITypeAlignment(EltType); - return Builder.CreateAlignedStore(ColumnValue, ColumnPtr, Align); + return Builder.CreateAlignedStore(ColumnValue, ColumnPtr, + DL.getABITypeAlign(EltType)); } diff --git a/llvm/lib/Transforms/Scalar/SROA.cpp b/llvm/lib/Transforms/Scalar/SROA.cpp index 23cec1ea628a..6c9b62da5e79 100644 --- a/llvm/lib/Transforms/Scalar/SROA.cpp +++ b/llvm/lib/Transforms/Scalar/SROA.cpp @@ -2612,7 +2612,7 @@ private: NewAI.getAlign(), "load"); V = insertVector(IRB, Old, V, BeginIndex, "vec"); } - StoreInst *Store = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlignment()); + StoreInst *Store = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlign()); if (AATags) Store->setAAMetadata(AATags); Pass.DeadInsts.insert(&SI); @@ -2633,7 +2633,7 @@ private: V = insertInteger(DL, IRB, Old, SI.getValueOperand(), Offset, "insert"); } V = convertValue(DL, IRB, V, NewAllocaTy); - StoreInst *Store = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlignment()); + StoreInst *Store = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlign()); Store->copyMetadata(SI, {LLVMContext::MD_mem_parallel_loop_access, LLVMContext::MD_access_group}); if (AATags) @@ -2695,8 +2695,8 @@ private: } V = convertValue(DL, IRB, V, NewAllocaTy); - NewSI = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlignment(), - SI.isVolatile()); + NewSI = + IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlign(), SI.isVolatile()); } else { unsigned AS = SI.getPointerAddressSpace(); Value *NewPtr = getNewAllocaSlicePtr(IRB, V->getType()->getPointerTo(AS)); @@ -2863,8 +2863,8 @@ private: V = convertValue(DL, IRB, V, AllocaTy); } - StoreInst *New = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlignment(), - II.isVolatile()); + StoreInst *New = + IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlign(), II.isVolatile()); if (AATags) New->setAAMetadata(AATags); LLVM_DEBUG(dbgs() << " to: " << *New << "\n"); @@ -3403,7 +3403,7 @@ private: Value *InBoundsGEP = IRB.CreateInBoundsGEP(BaseTy, Ptr, GEPIndices, Name + ".gep"); StoreInst *Store = - IRB.CreateAlignedStore(ExtractValue, InBoundsGEP, Alignment.value()); + IRB.CreateAlignedStore(ExtractValue, InBoundsGEP, Alignment); if (AATags) Store->setAAMetadata(AATags); LLVM_DEBUG(dbgs() << " to: " << *Store << "\n"); @@ -3918,7 +3918,7 @@ bool SROA::presplitLoadsAndStores(AllocaInst &AI, AllocaSlices &AS) { getAdjustedPtr(IRB, DL, StoreBasePtr, APInt(DL.getIndexSizeInBits(AS), PartOffset), PartPtrTy, StoreBasePtr->getName() + "."), - getAdjustedAlignment(SI, PartOffset, DL).value(), + getAdjustedAlignment(SI, PartOffset, DL), /*IsVolatile*/ false); PStore->copyMetadata(*LI, {LLVMContext::MD_mem_parallel_loop_access, LLVMContext::MD_access_group}); @@ -4015,7 +4015,7 @@ bool SROA::presplitLoadsAndStores(AllocaInst &AI, AllocaSlices &AS) { getAdjustedPtr(IRB, DL, StoreBasePtr, APInt(DL.getIndexSizeInBits(AS), PartOffset), StorePartPtrTy, StoreBasePtr->getName() + "."), - getAdjustedAlignment(SI, PartOffset, DL).value(), + getAdjustedAlignment(SI, PartOffset, DL), /*IsVolatile*/ false); // Now build a new slice for the alloca. diff --git a/llvm/lib/Transforms/Scalar/Scalarizer.cpp b/llvm/lib/Transforms/Scalar/Scalarizer.cpp index b6e7cf32d3f3..dc64287550d3 100644 --- a/llvm/lib/Transforms/Scalar/Scalarizer.cpp +++ b/llvm/lib/Transforms/Scalar/Scalarizer.cpp @@ -829,7 +829,7 @@ bool ScalarizerVisitor::visitStoreInst(StoreInst &SI) { Stores.resize(NumElems); for (unsigned I = 0; I < NumElems; ++I) { unsigned Align = Layout.getElemAlign(I); - Stores[I] = Builder.CreateAlignedStore(Val[I], Ptr[I], Align); + Stores[I] = Builder.CreateAlignedStore(Val[I], Ptr[I], MaybeAlign(Align)); } transferMetadataAndIRFlags(&SI, Stores); return true; diff --git a/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp b/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp index 9c8fa3f456c1..3b22f3082c33 100644 --- a/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp +++ b/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp @@ -129,20 +129,19 @@ public: private: unsigned getPointerAddressSpace(Value *I); - unsigned getAlignment(LoadInst *LI) const { - unsigned Align = LI->getAlignment(); - if (Align != 0) - return Align; + /// TODO: Remove this function once transition to Align is over. + unsigned getAlignment(LoadInst *LI) const { return getAlign(LI).value(); } - return DL.getABITypeAlignment(LI->getType()); + Align getAlign(LoadInst *LI) const { + return DL.getValueOrABITypeAlignment(LI->getAlign(), LI->getType()); } - unsigned getAlignment(StoreInst *SI) const { - unsigned Align = SI->getAlignment(); - if (Align != 0) - return Align; + /// TODO: Remove this function once transition to Align is over. + unsigned getAlignment(StoreInst *SI) const { return getAlign(SI).value(); } - return DL.getABITypeAlignment(SI->getValueOperand()->getType()); + Align getAlign(StoreInst *SI) const { + return DL.getValueOrABITypeAlignment(SI->getAlign(), + SI->getValueOperand()->getType()); } static const unsigned MaxDepth = 3; @@ -961,7 +960,7 @@ bool Vectorizer::vectorizeStoreChain( unsigned VecRegSize = TTI.getLoadStoreVecRegBitWidth(AS); unsigned VF = VecRegSize / Sz; unsigned ChainSize = Chain.size(); - unsigned Alignment = getAlignment(S0); + Align Alignment = getAlign(S0); if (!isPowerOf2_32(Sz) || VF < 2 || ChainSize < 2) { InstructionsProcessed->insert(Chain.begin(), Chain.end()); @@ -1019,7 +1018,7 @@ bool Vectorizer::vectorizeStoreChain( InstructionsProcessed->insert(Chain.begin(), Chain.end()); // If the store is going to be misaligned, don't vectorize it. - if (accessIsMisaligned(SzInBytes, AS, Alignment)) { + if (accessIsMisaligned(SzInBytes, AS, Alignment.value())) { if (S0->getPointerAddressSpace() != DL.getAllocaAddrSpace()) { auto Chains = splitOddVectorElts(Chain, Sz); return vectorizeStoreChain(Chains.first, InstructionsProcessed) | @@ -1030,10 +1029,10 @@ bool Vectorizer::vectorizeStoreChain( StackAdjustedAlignment, DL, S0, nullptr, &DT); if (NewAlign != 0) - Alignment = NewAlign; + Alignment = Align(NewAlign); } - if (!TTI.isLegalToVectorizeStoreChain(SzInBytes, Alignment, AS)) { + if (!TTI.isLegalToVectorizeStoreChain(SzInBytes, Alignment.value(), AS)) { auto Chains = splitOddVectorElts(Chain, Sz); return vectorizeStoreChain(Chains.first, InstructionsProcessed) | vectorizeStoreChain(Chains.second, InstructionsProcessed); diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp index 0b7c5dea9948..2c9b9bdb4c08 100644 --- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp +++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp @@ -2346,8 +2346,8 @@ void InnerLoopVectorizer::vectorizeInterleaveGroup(Instruction *Instr, IVec, AddrParts[Part], Group->getAlign(), ShuffledMask); } else - NewStoreInstr = Builder.CreateAlignedStore(IVec, AddrParts[Part], - Group->getAlignment()); + NewStoreInstr = + Builder.CreateAlignedStore(IVec, AddrParts[Part], Group->getAlign()); Group->addMetadata(NewStoreInstr); } @@ -2452,8 +2452,7 @@ void InnerLoopVectorizer::vectorizeMemoryInstruction(Instruction *Instr, NewSI = Builder.CreateMaskedStore(StoredVal, VecPtr, Alignment, BlockInMaskParts[Part]); else - NewSI = - Builder.CreateAlignedStore(StoredVal, VecPtr, Alignment.value()); + NewSI = Builder.CreateAlignedStore(StoredVal, VecPtr, Alignment); } addMetadata(NewSI, SI); } diff --git a/polly/lib/CodeGen/BlockGenerators.cpp b/polly/lib/CodeGen/BlockGenerators.cpp index ca8d80f608cf..ca495ee9338d 100644 --- a/polly/lib/CodeGen/BlockGenerators.cpp +++ b/polly/lib/CodeGen/BlockGenerators.cpp @@ -343,7 +343,7 @@ void BlockGenerator::generateArrayStore(ScopStmt &Stmt, StoreInst *Store, RuntimeDebugBuilder::createCPUPrinter(Builder, "Store to ", NewPointer, ": ", ValueOperand, "\n"); - Builder.CreateAlignedStore(ValueOperand, NewPointer, Store->getAlignment()); + Builder.CreateAlignedStore(ValueOperand, NewPointer, Store->getAlign()); }); } diff --git a/polly/lib/CodeGen/LoopGeneratorsKMP.cpp b/polly/lib/CodeGen/LoopGeneratorsKMP.cpp index 895967dd5bf1..c52f947a7246 100644 --- a/polly/lib/CodeGen/LoopGeneratorsKMP.cpp +++ b/polly/lib/CodeGen/LoopGeneratorsKMP.cpp @@ -176,7 +176,7 @@ ParallelLoopGeneratorKMP::createSubFn(Value *StrideNotUsed, extractValuesFromStruct(Data, StructData->getAllocatedType(), UserContext, Map); - const int Alignment = (is64BitArch()) ? 8 : 4; + const auto Alignment = llvm::Align(is64BitArch() ? 8 : 4); Value *ID = Builder.CreateAlignedLoad(IDPtr, Alignment, "polly.par.global_tid"); diff --git a/polly/lib/CodeGen/RuntimeDebugBuilder.cpp b/polly/lib/CodeGen/RuntimeDebugBuilder.cpp index e49af27c4cea..2a349fe0f2b9 100644 --- a/polly/lib/CodeGen/RuntimeDebugBuilder.cpp +++ b/polly/lib/CodeGen/RuntimeDebugBuilder.cpp @@ -221,7 +221,7 @@ void RuntimeDebugBuilder::createGPUPrinterT(PollyIRBuilder &Builder, Ty = Val->getType(); Ptr = Builder.CreatePointerBitCastOrAddrSpaceCast(Ptr, Ty->getPointerTo(5)); - Builder.CreateAlignedStore(Val, Ptr, 4); + Builder.CreateAlignedStore(Val, Ptr, Align(4)); if (Ty->isFloatingPointTy()) str += "%f";