[opaque pointer types] Pass value type to LoadInst creation.

This cleans up all LoadInst creation in LLVM to explicitly pass the
value type rather than deriving it from the pointer's element-type.

Differential Revision: https://reviews.llvm.org/D57172

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@352911 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
James Y Knight 2019-02-01 20:44:24 +00:00
parent e84538e816
commit 6c00b3f35f
78 changed files with 460 additions and 364 deletions

View File

@ -4404,7 +4404,7 @@ Error BitcodeReader::parseFunctionBody(Function *F) {
unsigned Align;
if (Error Err = parseAlignmentValue(Record[OpNum], Align))
return Err;
I = new LoadInst(Op, "", Record[OpNum+1], Align, Ordering, SSID);
I = new LoadInst(Ty, Op, "", Record[OpNum + 1], Align, Ordering, SSID);
InstructionList.push_back(I);
break;

View File

@ -381,7 +381,7 @@ LoadInst *AtomicExpand::convertAtomicLoadToIntegerType(LoadInst *LI) {
Addr->getType()->getPointerAddressSpace());
Value *NewAddr = Builder.CreateBitCast(Addr, PT);
auto *NewLI = Builder.CreateLoad(NewAddr);
auto *NewLI = Builder.CreateLoad(NewTy, NewAddr);
NewLI->setAlignment(LI->getAlignment());
NewLI->setVolatile(LI->isVolatile());
NewLI->setAtomic(LI->getOrdering(), LI->getSyncScopeID());
@ -1769,8 +1769,8 @@ bool AtomicExpand::expandAtomicOpToLibcall(
// from call}
Type *FinalResultTy = I->getType();
Value *V = UndefValue::get(FinalResultTy);
Value *ExpectedOut =
Builder.CreateAlignedLoad(AllocaCASExpected, AllocaAlignment);
Value *ExpectedOut = Builder.CreateAlignedLoad(
CASExpected->getType(), AllocaCASExpected, AllocaAlignment);
Builder.CreateLifetimeEnd(AllocaCASExpected_i8, SizeVal64);
V = Builder.CreateInsertValue(V, ExpectedOut, 0);
V = Builder.CreateInsertValue(V, Result, 1);
@ -1780,7 +1780,8 @@ bool AtomicExpand::expandAtomicOpToLibcall(
if (UseSizedLibcall)
V = Builder.CreateBitOrPointerCast(Result, I->getType());
else {
V = Builder.CreateAlignedLoad(AllocaResult, AllocaAlignment);
V = Builder.CreateAlignedLoad(I->getType(), AllocaResult,
AllocaAlignment);
Builder.CreateLifetimeEnd(AllocaResult_i8, SizeVal64);
}
I->replaceAllUsesWith(V);

View File

@ -213,7 +213,7 @@ bool LowerIntrinsics::DoLowering(Function &F, GCStrategy &S) {
}
case Intrinsic::gcread: {
// Replace a read barrier with a simple load.
Value *Ld = new LoadInst(CI->getArgOperand(1), "", CI);
Value *Ld = new LoadInst(CI->getType(), CI->getArgOperand(1), "", CI);
Ld->takeName(CI);
CI->replaceAllUsesWith(Ld);
CI->eraseFromParent();

View File

@ -1218,7 +1218,7 @@ bool InterleavedLoadCombineImpl::combine(std::list<VectorInfo> &InterleavedLoad,
"interleaved.wide.ptrcast");
// Create the wide load and update the MemorySSA.
auto LI = Builder.CreateAlignedLoad(CI, InsertionPoint->getAlignment(),
auto LI = Builder.CreateAlignedLoad(ILTy, CI, InsertionPoint->getAlignment(),
"interleaved.wide.load");
auto MSSAU = MemorySSAUpdater(&MSSA);
MemoryUse *MSSALoad = cast<MemoryUse>(MSSAU.createMemoryAccessBefore(

View File

@ -44,7 +44,7 @@ static bool lowerLoadRelative(Function &F) {
Value *OffsetPtr =
B.CreateGEP(Int8Ty, CI->getArgOperand(0), CI->getArgOperand(1));
Value *OffsetPtrI32 = B.CreateBitCast(OffsetPtr, Int32PtrTy);
Value *OffsetI32 = B.CreateAlignedLoad(OffsetPtrI32, 4);
Value *OffsetI32 = B.CreateAlignedLoad(Int32Ty, OffsetPtrI32, 4);
Value *ResultPtr = B.CreateGEP(Int8Ty, CI->getArgOperand(0), OffsetI32);

View File

@ -371,7 +371,7 @@ Value *SafeStack::getStackGuard(IRBuilder<> &IRB, Function &F) {
if (!StackGuardVar)
StackGuardVar =
F.getParent()->getOrInsertGlobal("__stack_chk_guard", StackPtrTy);
return IRB.CreateLoad(StackGuardVar, "StackGuard");
return IRB.CreateLoad(StackPtrTy, StackGuardVar, "StackGuard");
}
void SafeStack::findInsts(Function &F,
@ -452,7 +452,8 @@ SafeStack::createStackRestorePoints(IRBuilder<> &IRB, Function &F,
++NumUnsafeStackRestorePoints;
IRB.SetInsertPoint(I->getNextNode());
Value *CurrentTop = DynamicTop ? IRB.CreateLoad(DynamicTop) : StaticTop;
Value *CurrentTop =
DynamicTop ? IRB.CreateLoad(StackPtrTy, DynamicTop) : StaticTop;
IRB.CreateStore(CurrentTop, UnsafeStackPtr);
}
@ -461,7 +462,7 @@ SafeStack::createStackRestorePoints(IRBuilder<> &IRB, Function &F,
void SafeStack::checkStackGuard(IRBuilder<> &IRB, Function &F, ReturnInst &RI,
AllocaInst *StackGuardSlot, Value *StackGuard) {
Value *V = IRB.CreateLoad(StackGuardSlot);
Value *V = IRB.CreateLoad(StackPtrTy, StackGuardSlot);
Value *Cmp = IRB.CreateICmpNE(StackGuard, V);
auto SuccessProb = BranchProbabilityInfo::getBranchProbStackProtector(true);
@ -659,7 +660,8 @@ void SafeStack::moveDynamicAllocasToUnsafeStack(
uint64_t TySize = DL.getTypeAllocSize(Ty);
Value *Size = IRB.CreateMul(ArraySize, ConstantInt::get(IntPtrTy, TySize));
Value *SP = IRB.CreatePtrToInt(IRB.CreateLoad(UnsafeStackPtr), IntPtrTy);
Value *SP = IRB.CreatePtrToInt(IRB.CreateLoad(StackPtrTy, UnsafeStackPtr),
IntPtrTy);
SP = IRB.CreateSub(SP, Size);
// Align the SP value to satisfy the AllocaInst, type and stack alignments.
@ -697,7 +699,7 @@ void SafeStack::moveDynamicAllocasToUnsafeStack(
if (II->getIntrinsicID() == Intrinsic::stacksave) {
IRBuilder<> IRB(II);
Instruction *LI = IRB.CreateLoad(UnsafeStackPtr);
Instruction *LI = IRB.CreateLoad(StackPtrTy, UnsafeStackPtr);
LI->takeName(II);
II->replaceAllUsesWith(LI);
II->eraseFromParent();
@ -792,7 +794,7 @@ bool SafeStack::run() {
// Load the current stack pointer (we'll also use it as a base pointer).
// FIXME: use a dedicated register for it ?
Instruction *BasePointer =
IRB.CreateLoad(UnsafeStackPtr, false, "unsafe_stack_ptr");
IRB.CreateLoad(StackPtrTy, UnsafeStackPtr, false, "unsafe_stack_ptr");
assert(BasePointer->getType() == StackPtrTy);
AllocaInst *StackGuardSlot = nullptr;

View File

@ -143,7 +143,7 @@ static void scalarizeMaskedLoad(CallInst *CI) {
// Short-cut if the mask is all-true.
if (isa<Constant>(Mask) && cast<Constant>(Mask)->isAllOnesValue()) {
Value *NewI = Builder.CreateAlignedLoad(Ptr, AlignVal);
Value *NewI = Builder.CreateAlignedLoad(VecType, Ptr, AlignVal);
CI->replaceAllUsesWith(NewI);
CI->eraseFromParent();
return;
@ -166,7 +166,7 @@ static void scalarizeMaskedLoad(CallInst *CI) {
continue;
Value *Gep =
Builder.CreateInBoundsGEP(EltTy, FirstEltPtr, Builder.getInt32(Idx));
LoadInst *Load = Builder.CreateAlignedLoad(Gep, AlignVal);
LoadInst *Load = Builder.CreateAlignedLoad(EltTy, Gep, AlignVal);
VResult =
Builder.CreateInsertElement(VResult, Load, Builder.getInt32(Idx));
}
@ -198,7 +198,7 @@ static void scalarizeMaskedLoad(CallInst *CI) {
Value *Gep =
Builder.CreateInBoundsGEP(EltTy, FirstEltPtr, Builder.getInt32(Idx));
LoadInst *Load = Builder.CreateAlignedLoad(Gep, AlignVal);
LoadInst *Load = Builder.CreateAlignedLoad(EltTy, Gep, AlignVal);
Value *NewVResult = Builder.CreateInsertElement(VResult, Load,
Builder.getInt32(Idx));
@ -366,6 +366,7 @@ static void scalarizeMaskedGather(CallInst *CI) {
Value *Src0 = CI->getArgOperand(3);
VectorType *VecType = cast<VectorType>(CI->getType());
Type *EltTy = VecType->getElementType();
IRBuilder<> Builder(CI->getContext());
Instruction *InsertPt = CI;
@ -387,7 +388,7 @@ static void scalarizeMaskedGather(CallInst *CI) {
Value *Ptr = Builder.CreateExtractElement(Ptrs, Builder.getInt32(Idx),
"Ptr" + Twine(Idx));
LoadInst *Load =
Builder.CreateAlignedLoad(Ptr, AlignVal, "Load" + Twine(Idx));
Builder.CreateAlignedLoad(EltTy, Ptr, AlignVal, "Load" + Twine(Idx));
VResult = Builder.CreateInsertElement(
VResult, Load, Builder.getInt32(Idx), "Res" + Twine(Idx));
}
@ -418,7 +419,7 @@ static void scalarizeMaskedGather(CallInst *CI) {
Value *Ptr = Builder.CreateExtractElement(Ptrs, Builder.getInt32(Idx),
"Ptr" + Twine(Idx));
LoadInst *Load =
Builder.CreateAlignedLoad(Ptr, AlignVal, "Load" + Twine(Idx));
Builder.CreateAlignedLoad(EltTy, Ptr, AlignVal, "Load" + Twine(Idx));
Value *NewVResult = Builder.CreateInsertElement(VResult, Load,
Builder.getInt32(Idx),
"Res" + Twine(Idx));

View File

@ -312,7 +312,8 @@ bool ShadowStackGCLowering::runOnFunction(Function &F) {
AtEntry.SetInsertPoint(IP->getParent(), IP);
// Initialize the map pointer and load the current head of the shadow stack.
Instruction *CurrentHead = AtEntry.CreateLoad(Head, "gc_currhead");
Instruction *CurrentHead =
AtEntry.CreateLoad(StackEntryTy->getPointerTo(), Head, "gc_currhead");
Instruction *EntryMapPtr = CreateGEP(Context, AtEntry, ConcreteStackEntryTy,
StackEntry, 0, 1, "gc_frame.map");
AtEntry.CreateStore(FrameMap, EntryMapPtr);
@ -353,7 +354,8 @@ bool ShadowStackGCLowering::runOnFunction(Function &F) {
Instruction *EntryNextPtr2 =
CreateGEP(Context, *AtExit, ConcreteStackEntryTy, StackEntry, 0, 0,
"gc_frame.next");
Value *SavedHead = AtExit->CreateLoad(EntryNextPtr2, "gc_savedhead");
Value *SavedHead = AtExit->CreateLoad(StackEntryTy->getPointerTo(),
EntryNextPtr2, "gc_savedhead");
AtExit->CreateStore(SavedHead, Head);
}

View File

@ -189,14 +189,16 @@ Value *SjLjEHPrepare::setupFunctionContext(Function &F,
Builder.CreateConstGEP2_32(FunctionContextTy, FuncCtx, 0, 2, "__data");
// The exception values come back in context->__data[0].
Type *Int32Ty = Type::getInt32Ty(F.getContext());
Value *ExceptionAddr = Builder.CreateConstGEP2_32(doubleUnderDataTy, FCData,
0, 0, "exception_gep");
Value *ExnVal = Builder.CreateLoad(ExceptionAddr, true, "exn_val");
Value *ExnVal = Builder.CreateLoad(Int32Ty, ExceptionAddr, true, "exn_val");
ExnVal = Builder.CreateIntToPtr(ExnVal, Builder.getInt8PtrTy());
Value *SelectorAddr = Builder.CreateConstGEP2_32(doubleUnderDataTy, FCData,
0, 1, "exn_selector_gep");
Value *SelVal = Builder.CreateLoad(SelectorAddr, true, "exn_selector_val");
Value *SelVal =
Builder.CreateLoad(Int32Ty, SelectorAddr, true, "exn_selector_val");
substituteLPadValues(LPI, ExnVal, SelVal);
}

View File

@ -322,7 +322,7 @@ static Value *getStackGuard(const TargetLoweringBase *TLI, Module *M,
IRBuilder<> &B,
bool *SupportsSelectionDAGSP = nullptr) {
if (Value *Guard = TLI->getIRStackGuard(B))
return B.CreateLoad(Guard, true, "StackGuard");
return B.CreateLoad(B.getInt8PtrTy(), Guard, true, "StackGuard");
// Use SelectionDAG SSP handling, since there isn't an IR guard.
//
@ -417,7 +417,7 @@ bool StackProtector::InsertStackProtectors() {
// Generate the function-based epilogue instrumentation.
// The target provides a guard check function, generate a call to it.
IRBuilder<> B(RI);
LoadInst *Guard = B.CreateLoad(AI, true, "Guard");
LoadInst *Guard = B.CreateLoad(B.getInt8PtrTy(), AI, true, "Guard");
CallInst *Call = B.CreateCall(GuardCheck, {Guard});
Call->setAttributes(GuardCheck->getAttributes());
Call->setCallingConv(GuardCheck->getCallingConv());
@ -472,7 +472,7 @@ bool StackProtector::InsertStackProtectors() {
// Generate the stack protector instructions in the old basic block.
IRBuilder<> B(BB);
Value *Guard = getStackGuard(TLI, M, B);
LoadInst *LI2 = B.CreateLoad(AI, true);
LoadInst *LI2 = B.CreateLoad(B.getInt8PtrTy(), AI, true);
Value *Cmp = B.CreateICmpEQ(Guard, LI2);
auto SuccessProb =
BranchProbabilityInfo::getBranchProbStackProtector(true);

View File

@ -344,7 +344,8 @@ void WasmEHPrepare::prepareEHPad(BasicBlock *BB, bool NeedLSDA,
PersCI->setDoesNotThrow();
// Pseudocode: int selector = __wasm.landingpad_context.selector;
Instruction *Selector = IRB.CreateLoad(SelectorField, "selector");
Instruction *Selector =
IRB.CreateLoad(IRB.getInt32Ty(), SelectorField, "selector");
// Replace the return value from wasm.get.ehselector() with the selector value
// loaded from __wasm_lpad_context.selector.

View File

@ -1079,7 +1079,8 @@ AllocaInst *WinEHPrepare::insertPHILoads(PHINode *PN, Function &F) {
SpillSlot = new AllocaInst(PN->getType(), DL->getAllocaAddrSpace(), nullptr,
Twine(PN->getName(), ".wineh.spillslot"),
&F.getEntryBlock().front());
Value *V = new LoadInst(SpillSlot, Twine(PN->getName(), ".wineh.reload"),
Value *V = new LoadInst(PN->getType(), SpillSlot,
Twine(PN->getName(), ".wineh.reload"),
&*PHIBlock->getFirstInsertionPt());
PN->replaceAllUsesWith(V);
return SpillSlot;
@ -1221,13 +1222,15 @@ void WinEHPrepare::replaceUseWithLoad(Value *V, Use &U, AllocaInst *&SpillSlot,
Value *&Load = Loads[IncomingBlock];
// Insert the load into the predecessor block
if (!Load)
Load = new LoadInst(SpillSlot, Twine(V->getName(), ".wineh.reload"),
Load = new LoadInst(V->getType(), SpillSlot,
Twine(V->getName(), ".wineh.reload"),
/*Volatile=*/false, IncomingBlock->getTerminator());
U.set(Load);
} else {
// Reload right before the old use.
auto *Load = new LoadInst(SpillSlot, Twine(V->getName(), ".wineh.reload"),
auto *Load = new LoadInst(V->getType(), SpillSlot,
Twine(V->getName(), ".wineh.reload"),
/*Volatile=*/false, UsingInst);
U.set(Load);
}

View File

@ -235,14 +235,13 @@ void makeStub(Function &F, Value &ImplPointer) {
assert(F.isDeclaration() && "Can't turn a definition into a stub.");
assert(F.getParent() && "Function isn't in a module.");
Module &M = *F.getParent();
FunctionType *FTy = F.getFunctionType();
BasicBlock *EntryBlock = BasicBlock::Create(M.getContext(), "entry", &F);
IRBuilder<> Builder(EntryBlock);
LoadInst *ImplAddr = Builder.CreateLoad(&ImplPointer);
LoadInst *ImplAddr = Builder.CreateLoad(F.getType(), &ImplPointer);
std::vector<Value*> CallArgs;
for (auto &A : F.args())
CallArgs.push_back(&A);
CallInst *Call = Builder.CreateCall(FTy, ImplAddr, CallArgs);
CallInst *Call = Builder.CreateCall(F.getFunctionType(), ImplAddr, CallArgs);
Call->setTailCall();
Call->setAttributes(F.getAttributes());
if (F.getReturnType()->isVoidTy())

View File

@ -53,7 +53,8 @@ Value *RandomIRBuilder::newSource(BasicBlock &BB, ArrayRef<Instruction *> Insts,
IP = ++I->getIterator();
assert(IP != BB.end() && "guaranteed by the findPointer");
}
auto *NewLoad = new LoadInst(Ptr, "L", &*IP);
auto *NewLoad = new LoadInst(
cast<PointerType>(Ptr->getType())->getElementType(), Ptr, "L", &*IP);
// Only sample this load if it really matches the descriptor
if (Pred.matches(Srcs, NewLoad))

View File

@ -1162,16 +1162,16 @@ static Value *UpgradeMaskedStore(IRBuilder<> &Builder,
static Value *UpgradeMaskedLoad(IRBuilder<> &Builder,
Value *Ptr, Value *Passthru, Value *Mask,
bool Aligned) {
Type *ValTy = Passthru->getType();
// Cast the pointer to the right type.
Ptr = Builder.CreateBitCast(Ptr,
llvm::PointerType::getUnqual(Passthru->getType()));
Ptr = Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(ValTy));
unsigned Align =
Aligned ? cast<VectorType>(Passthru->getType())->getBitWidth() / 8 : 1;
// If the mask is all ones just emit a regular store.
if (const auto *C = dyn_cast<Constant>(Mask))
if (C->isAllOnesValue())
return Builder.CreateAlignedLoad(Ptr, Align);
return Builder.CreateAlignedLoad(ValTy, Ptr, Align);
// Convert the mask from an integer type to a vector of i1.
unsigned NumElts = Passthru->getType()->getVectorNumElements();
@ -2199,7 +2199,7 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
Type *VT = VectorType::get(EltTy, NumSrcElts);
Value *Op = Builder.CreatePointerCast(CI->getArgOperand(0),
PointerType::getUnqual(VT));
Value *Load = Builder.CreateAlignedLoad(Op, 1);
Value *Load = Builder.CreateAlignedLoad(VT, Op, 1);
if (NumSrcElts == 2)
Rep = Builder.CreateShuffleVector(Load, UndefValue::get(Load->getType()),
{ 0, 1, 0, 1 });
@ -2945,7 +2945,7 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
// Convert the type of the pointer to a pointer to the stored type.
Value *BC =
Builder.CreateBitCast(Ptr, PointerType::getUnqual(VTy), "cast");
LoadInst *LI = Builder.CreateAlignedLoad(BC, VTy->getBitWidth() / 8);
LoadInst *LI = Builder.CreateAlignedLoad(VTy, BC, VTy->getBitWidth() / 8);
LI->setMetadata(M->getMDKindID("nontemporal"), Node);
Rep = LI;
} else if (IsX86 &&

View File

@ -493,7 +493,8 @@ void AArch64PromoteConstant::insertDefinitions(Function &F,
for (const auto &IPI : InsertPts) {
// Create the load of the global variable.
IRBuilder<> Builder(IPI.first);
LoadInst *LoadedCst = Builder.CreateLoad(&PromotedGV);
LoadInst *LoadedCst =
Builder.CreateLoad(PromotedGV.getValueType(), &PromotedGV);
LLVM_DEBUG(dbgs() << "**********\n");
LLVM_DEBUG(dbgs() << "New def: ");
LLVM_DEBUG(LoadedCst->print(dbgs()));

View File

@ -806,7 +806,7 @@ bool AMDGPUCodeGenPrepare::visitLoadInst(LoadInst &I) {
Type *I32Ty = Builder.getInt32Ty();
Type *PT = PointerType::get(I32Ty, I.getPointerAddressSpace());
Value *BitCast= Builder.CreateBitCast(I.getPointerOperand(), PT);
LoadInst *WidenLoad = Builder.CreateLoad(BitCast);
LoadInst *WidenLoad = Builder.CreateLoad(I32Ty, BitCast);
WidenLoad->copyMetadata(I);
// If we have range metadata, we need to convert the type, and not make

View File

@ -1357,12 +1357,12 @@ bool AMDGPULibCalls::fold_sincos(CallInst *CI, IRBuilder<> &B,
if (!isSin) { // CI->cos, UI->sin
B.SetInsertPoint(&*ItOld);
UI->replaceAllUsesWith(&*Call);
Instruction *Reload = B.CreateLoad(Alloc);
Instruction *Reload = B.CreateLoad(Alloc->getAllocatedType(), Alloc);
CI->replaceAllUsesWith(Reload);
UI->eraseFromParent();
CI->eraseFromParent();
} else { // CI->sin, UI->cos
Instruction *Reload = B.CreateLoad(Alloc);
Instruction *Reload = B.CreateLoad(Alloc->getAllocatedType(), Alloc);
UI->replaceAllUsesWith(Reload);
CI->replaceAllUsesWith(Call);
UI->eraseFromParent();

View File

@ -132,6 +132,7 @@ bool AMDGPULowerKernelArguments::runOnFunction(Function &F) {
KernArgBaseAlign);
Value *ArgPtr;
Type *AdjustedArgTy;
if (DoShiftOpt) { // FIXME: Handle aggregate types
// Since we don't have sub-dword scalar loads, avoid doing an extload by
// loading earlier than the argument address, and extracting the relevant
@ -144,25 +145,25 @@ bool AMDGPULowerKernelArguments::runOnFunction(Function &F) {
KernArgSegment,
AlignDownOffset,
Arg.getName() + ".kernarg.offset.align.down");
ArgPtr = Builder.CreateBitCast(ArgPtr,
Builder.getInt32Ty()->getPointerTo(AS),
ArgPtr->getName() + ".cast");
AdjustedArgTy = Builder.getInt32Ty();
} else {
ArgPtr = Builder.CreateConstInBoundsGEP1_64(
KernArgSegment,
EltOffset,
Arg.getName() + ".kernarg.offset");
ArgPtr = Builder.CreateBitCast(ArgPtr, ArgTy->getPointerTo(AS),
ArgPtr->getName() + ".cast");
AdjustedArgTy = ArgTy;
}
if (IsV3 && Size >= 32) {
V4Ty = VectorType::get(VT->getVectorElementType(), 4);
// Use the hack that clang uses to avoid SelectionDAG ruining v3 loads
ArgPtr = Builder.CreateBitCast(ArgPtr, V4Ty->getPointerTo(AS));
AdjustedArgTy = V4Ty;
}
LoadInst *Load = Builder.CreateAlignedLoad(ArgPtr, AdjustedAlign);
ArgPtr = Builder.CreateBitCast(ArgPtr, AdjustedArgTy->getPointerTo(AS),
ArgPtr->getName() + ".cast");
LoadInst *Load =
Builder.CreateAlignedLoad(AdjustedArgTy, ArgPtr, AdjustedAlign);
Load->setMetadata(LLVMContext::MD_invariant_load, MDNode::get(Ctx, {}));
MDBuilder MDB(Ctx);

View File

@ -245,10 +245,10 @@ AMDGPUPromoteAlloca::getLocalSizeYZ(IRBuilder<> &Builder) {
// 32-bit and extract sequence is already present, and it is probably easier
// to CSE this. The loads should be mergable later anyway.
Value *GEPXY = Builder.CreateConstInBoundsGEP1_64(CastDispatchPtr, 1);
LoadInst *LoadXY = Builder.CreateAlignedLoad(GEPXY, 4);
LoadInst *LoadXY = Builder.CreateAlignedLoad(I32Ty, GEPXY, 4);
Value *GEPZU = Builder.CreateConstInBoundsGEP1_64(CastDispatchPtr, 2);
LoadInst *LoadZU = Builder.CreateAlignedLoad(GEPZU, 4);
LoadInst *LoadZU = Builder.CreateAlignedLoad(I32Ty, GEPZU, 4);
MDNode *MD = MDNode::get(Mod->getContext(), None);
LoadXY->setMetadata(LLVMContext::MD_invariant_load, MD);
@ -426,7 +426,7 @@ static bool tryPromoteAllocaToVector(AllocaInst *Alloca) {
Value *Index = calculateVectorIndex(Ptr, GEPVectorIdx);
Value *BitCast = Builder.CreateBitCast(Alloca, VecPtrTy);
Value *VecValue = Builder.CreateLoad(BitCast);
Value *VecValue = Builder.CreateLoad(VectorTy, BitCast);
Value *ExtractElement = Builder.CreateExtractElement(VecValue, Index);
Inst->replaceAllUsesWith(ExtractElement);
Inst->eraseFromParent();
@ -441,7 +441,7 @@ static bool tryPromoteAllocaToVector(AllocaInst *Alloca) {
Value *Ptr = SI->getPointerOperand();
Value *Index = calculateVectorIndex(Ptr, GEPVectorIdx);
Value *BitCast = Builder.CreateBitCast(Alloca, VecPtrTy);
Value *VecValue = Builder.CreateLoad(BitCast);
Value *VecValue = Builder.CreateLoad(VectorTy, BitCast);
Value *NewVecValue = Builder.CreateInsertElement(VecValue,
SI->getValueOperand(),
Index);

View File

@ -688,12 +688,12 @@ bool ARMParallelDSP::MatchSMLAD(Function &F) {
}
static LoadInst *CreateLoadIns(IRBuilder<NoFolder> &IRB, LoadInst &BaseLoad,
const Type *LoadTy) {
Type *LoadTy) {
const unsigned AddrSpace = BaseLoad.getPointerAddressSpace();
Value *VecPtr = IRB.CreateBitCast(BaseLoad.getPointerOperand(),
LoadTy->getPointerTo(AddrSpace));
return IRB.CreateAlignedLoad(VecPtr, BaseLoad.getAlignment());
return IRB.CreateAlignedLoad(LoadTy, VecPtr, BaseLoad.getAlignment());
}
Instruction *ARMParallelDSP::CreateSMLADCall(LoadInst *VecLd0, LoadInst *VecLd1,
@ -709,7 +709,7 @@ Instruction *ARMParallelDSP::CreateSMLADCall(LoadInst *VecLd0, LoadInst *VecLd1,
++BasicBlock::iterator(InsertAfter));
// Replace the reduction chain with an intrinsic call
const Type *Ty = IntegerType::get(M->getContext(), 32);
Type *Ty = IntegerType::get(M->getContext(), 32);
LoadInst *NewLd0 = CreateLoadIns(Builder, VecLd0[0], Ty);
LoadInst *NewLd1 = CreateLoadIns(Builder, VecLd1[0], Ty);
Value* Args[] = { NewLd0, NewLd1, Acc };

View File

@ -169,7 +169,8 @@ void NVPTXLowerArgs::handleByValParam(Argument *Arg) {
Value *ArgInParam = new AddrSpaceCastInst(
Arg, PointerType::get(StructType, ADDRESS_SPACE_PARAM), Arg->getName(),
FirstInst);
LoadInst *LI = new LoadInst(ArgInParam, Arg->getName(), FirstInst);
LoadInst *LI =
new LoadInst(StructType, ArgInParam, Arg->getName(), FirstInst);
new StoreInst(LI, AllocA, FirstInst);
}

View File

@ -445,7 +445,8 @@ Value *WebAssemblyLowerEmscriptenEHSjLj::wrapInvoke(CallOrInvoke *CI) {
// Post-invoke
// %__THREW__.val = __THREW__; __THREW__ = 0;
Value *Threw = IRB.CreateLoad(ThrewGV, ThrewGV->getName() + ".val");
Value *Threw =
IRB.CreateLoad(IRB.getInt32Ty(), ThrewGV, ThrewGV->getName() + ".val");
IRB.CreateStore(IRB.getInt32(0), ThrewGV);
return Threw;
}
@ -548,8 +549,8 @@ void WebAssemblyLowerEmscriptenEHSjLj::wrapTestSetjmp(
BasicBlock *ElseBB1 = BasicBlock::Create(C, "if.else1", F);
BasicBlock *EndBB1 = BasicBlock::Create(C, "if.end", F);
Value *ThrewCmp = IRB.CreateICmpNE(Threw, IRB.getInt32(0));
Value *ThrewValue =
IRB.CreateLoad(ThrewValueGV, ThrewValueGV->getName() + ".val");
Value *ThrewValue = IRB.CreateLoad(IRB.getInt32Ty(), ThrewValueGV,
ThrewValueGV->getName() + ".val");
Value *ThrewValueCmp = IRB.CreateICmpNE(ThrewValue, IRB.getInt32(0));
Value *Cmp1 = IRB.CreateAnd(ThrewCmp, ThrewValueCmp, "cmp1");
IRB.CreateCondBr(Cmp1, ThenBB1, ElseBB1);
@ -561,8 +562,8 @@ void WebAssemblyLowerEmscriptenEHSjLj::wrapTestSetjmp(
BasicBlock *EndBB2 = BasicBlock::Create(C, "if.end2", F);
Value *ThrewInt = IRB.CreateIntToPtr(Threw, Type::getInt32PtrTy(C),
Threw->getName() + ".i32p");
Value *LoadedThrew =
IRB.CreateLoad(ThrewInt, ThrewInt->getName() + ".loaded");
Value *LoadedThrew = IRB.CreateLoad(IRB.getInt32Ty(), ThrewInt,
ThrewInt->getName() + ".loaded");
Value *ThenLabel = IRB.CreateCall(
TestSetjmpF, {LoadedThrew, SetjmpTable, SetjmpTableSize}, "label");
Value *Cmp2 = IRB.CreateICmpEQ(ThenLabel, IRB.getInt32(0));

View File

@ -25362,7 +25362,6 @@ X86TargetLowering::lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *AI) const {
// We must restrict the ordering to avoid generating loads with Release or
// ReleaseAcquire orderings.
auto Order = AtomicCmpXchgInst::getStrongestFailureOrdering(AI->getOrdering());
auto Ptr = AI->getPointerOperand();
// Before the load we need a fence. Here is an example lifted from
// http://www.hpl.hp.com/techreports/2012/HPL-2012-68.pdf showing why a fence
@ -25397,8 +25396,9 @@ X86TargetLowering::lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *AI) const {
Builder.CreateCall(MFence, {});
// Finally we can emit the atomic load.
LoadInst *Loaded = Builder.CreateAlignedLoad(Ptr,
AI->getType()->getPrimitiveSizeInBits());
LoadInst *Loaded =
Builder.CreateAlignedLoad(AI->getType(), AI->getPointerOperand(),
AI->getType()->getPrimitiveSizeInBits());
Loaded->setAtomic(Order, SSID);
AI->replaceAllUsesWith(Loaded);
AI->eraseFromParent();

View File

@ -193,7 +193,7 @@ void X86InterleavedAccessGroup::decompose(
// Decompose the load instruction.
LoadInst *LI = cast<LoadInst>(VecInst);
Type *VecBasePtrTy = SubVecTy->getPointerTo(LI->getPointerAddressSpace());
Type *VecBaseTy, *VecBasePtrTy;
Value *VecBasePtr;
unsigned int NumLoads = NumSubVectors;
// In the case of stride 3 with a vector of 32 elements load the information
@ -201,18 +201,21 @@ void X86InterleavedAccessGroup::decompose(
// [0,1...,VF/2-1,VF/2+VF,VF/2+VF+1,...,2VF-1]
unsigned VecLength = DL.getTypeSizeInBits(VecWidth);
if (VecLength == 768 || VecLength == 1536) {
Type *VecTran =
VectorType::get(Type::getInt8Ty(LI->getContext()), 16)->getPointerTo();
VecBasePtr = Builder.CreateBitCast(LI->getPointerOperand(), VecTran);
NumLoads = NumSubVectors * (VecLength / 384);
} else
VecBaseTy = VectorType::get(Type::getInt8Ty(LI->getContext()), 16);
VecBasePtrTy = VecBaseTy->getPointerTo(LI->getPointerAddressSpace());
VecBasePtr = Builder.CreateBitCast(LI->getPointerOperand(), VecBasePtrTy);
NumLoads = NumSubVectors * (VecLength / 384);
} else {
VecBaseTy = SubVecTy;
VecBasePtrTy = VecBaseTy->getPointerTo(LI->getPointerAddressSpace());
VecBasePtr = Builder.CreateBitCast(LI->getPointerOperand(), VecBasePtrTy);
}
// Generate N loads of T type.
for (unsigned i = 0; i < NumLoads; i++) {
// TODO: Support inbounds GEP.
Value *NewBasePtr = Builder.CreateGEP(VecBasePtr, Builder.getInt32(i));
Instruction *NewLoad =
Builder.CreateAlignedLoad(NewBasePtr, LI->getAlignment());
Builder.CreateAlignedLoad(VecBaseTy, NewBasePtr, LI->getAlignment());
DecomposedVectors.push_back(NewLoad);
}
}

View File

@ -432,7 +432,7 @@ void WinEHStatePass::linkExceptionRegistration(IRBuilder<> &Builder,
// Next = [fs:00]
Constant *FSZero =
Constant::getNullValue(LinkTy->getPointerTo()->getPointerTo(257));
Value *Next = Builder.CreateLoad(FSZero);
Value *Next = Builder.CreateLoad(LinkTy->getPointerTo(), FSZero);
Builder.CreateStore(Next, Builder.CreateStructGEP(LinkTy, Link, 0));
// [fs:00] = Link
Builder.CreateStore(Link, FSZero);
@ -447,8 +447,8 @@ void WinEHStatePass::unlinkExceptionRegistration(IRBuilder<> &Builder) {
}
Type *LinkTy = getEHLinkRegistrationType();
// [fs:00] = Link->Next
Value *Next =
Builder.CreateLoad(Builder.CreateStructGEP(LinkTy, Link, 0));
Value *Next = Builder.CreateLoad(LinkTy->getPointerTo(),
Builder.CreateStructGEP(LinkTy, Link, 0));
Constant *FSZero =
Constant::getNullValue(LinkTy->getPointerTo()->getPointerTo(257));
Builder.CreateStore(Next, FSZero);
@ -783,7 +783,7 @@ void WinEHStatePass::addStateStores(Function &F, WinEHFuncInfo &FuncInfo) {
if (InCleanup) {
Value *StateField =
Builder.CreateStructGEP(nullptr, RegNode, StateFieldIndex);
State = Builder.CreateLoad(StateField);
State = Builder.CreateLoad(Builder.getInt32Ty(), StateField);
} else {
State = Builder.getInt32(getStateForCallSite(BlockColors, FuncInfo, CS));
}

View File

@ -49,7 +49,7 @@ static void lowerSubFn(IRBuilder<> &Builder, CoroSubFnInst *SubFn) {
Builder.SetInsertPoint(SubFn);
auto *FramePtr = Builder.CreateBitCast(FrameRaw, FramePtrTy);
auto *Gep = Builder.CreateConstInBoundsGEP2_32(FrameTy, FramePtr, 0, Index);
auto *Load = Builder.CreateLoad(Gep);
auto *Load = Builder.CreateLoad(FrameTy->getElementType(Index), Gep);
SubFn->replaceAllUsesWith(Load);
}

View File

@ -97,7 +97,7 @@ void Lowerer::lowerCoroDone(IntrinsicInst *II) {
Builder.SetInsertPoint(II);
auto *BCI = Builder.CreateBitCast(Operand, FramePtrTy);
auto *Gep = Builder.CreateConstInBoundsGEP1_32(FrameTy, BCI, 0);
auto *Load = Builder.CreateLoad(Gep);
auto *Load = Builder.CreateLoad(FrameTy, Gep);
auto *Cond = Builder.CreateICmpEQ(Load, NullPtr);
II->replaceAllUsesWith(Cond);

View File

@ -471,10 +471,10 @@ static Instruction *splitBeforeCatchSwitch(CatchSwitchInst *CatchSwitch) {
static Instruction *insertSpills(SpillInfo &Spills, coro::Shape &Shape) {
auto *CB = Shape.CoroBegin;
IRBuilder<> Builder(CB->getNextNode());
PointerType *FramePtrTy = Shape.FrameTy->getPointerTo();
StructType *FrameTy = Shape.FrameTy;
PointerType *FramePtrTy = FrameTy->getPointerTo();
auto *FramePtr =
cast<Instruction>(Builder.CreateBitCast(CB, FramePtrTy, "FramePtr"));
Type *FrameTy = FramePtrTy->getElementType();
Value *CurrentValue = nullptr;
BasicBlock *CurrentBlock = nullptr;
@ -501,7 +501,7 @@ static Instruction *insertSpills(SpillInfo &Spills, coro::Shape &Shape) {
Twine(".reload.addr"));
return isa<AllocaInst>(CurrentValue)
? G
: Builder.CreateLoad(G,
: Builder.CreateLoad(FrameTy->getElementType(Index), G,
CurrentValue->getName() + Twine(".reload"));
};

View File

@ -93,7 +93,7 @@ static BasicBlock *createResumeEntryBlock(Function &F, coro::Shape &Shape) {
auto *FrameTy = Shape.FrameTy;
auto *GepIndex = Builder.CreateConstInBoundsGEP2_32(
FrameTy, FramePtr, 0, coro::Shape::IndexField, "index.addr");
auto *Index = Builder.CreateLoad(GepIndex, "index");
auto *Index = Builder.CreateLoad(Shape.getIndexType(), GepIndex, "index");
auto *Switch =
Builder.CreateSwitch(Index, UnreachBB, Shape.CoroSuspends.size());
Shape.ResumeSwitch = Switch;
@ -229,7 +229,8 @@ static void handleFinalSuspend(IRBuilder<> &Builder, Value *FramePtr,
Builder.SetInsertPoint(OldSwitchBB->getTerminator());
auto *GepIndex = Builder.CreateConstInBoundsGEP2_32(Shape.FrameTy, FramePtr,
0, 0, "ResumeFn.addr");
auto *Load = Builder.CreateLoad(GepIndex);
auto *Load = Builder.CreateLoad(
Shape.FrameTy->getElementType(coro::Shape::ResumeField), GepIndex);
auto *NullPtr =
ConstantPointerNull::get(cast<PointerType>(Load->getType()));
auto *Cond = Builder.CreateICmpEQ(Load, NullPtr);

View File

@ -263,7 +263,8 @@ doPromotion(Function *F, SmallPtrSetImpl<Argument *> &ArgsToPromote,
Value *Idx = GetElementPtrInst::Create(
STy, *AI, Idxs, (*AI)->getName() + "." + Twine(i), Call);
// TODO: Tell AA about the new values?
Args.push_back(new LoadInst(Idx, Idx->getName() + ".val", Call));
Args.push_back(new LoadInst(STy->getElementType(i), Idx,
Idx->getName() + ".val", Call));
ArgAttrVec.push_back(AttributeSet());
}
} else if (!I->use_empty()) {
@ -299,7 +300,8 @@ doPromotion(Function *F, SmallPtrSetImpl<Argument *> &ArgsToPromote,
}
// Since we're replacing a load make sure we take the alignment
// of the previous load.
LoadInst *newLoad = new LoadInst(V, V->getName() + ".val", Call);
LoadInst *newLoad =
new LoadInst(OrigLoad->getType(), V, V->getName() + ".val", Call);
newLoad->setAlignment(OrigLoad->getAlignment());
// Transfer the AA info too.
AAMDNodes AAInfo;

View File

@ -905,9 +905,10 @@ OptimizeGlobalAddressOfMalloc(GlobalVariable *GV, CallInst *CI, Type *AllocTy,
// Replace the cmp X, 0 with a use of the bool value.
// Sink the load to where the compare was, if atomic rules allow us to.
Value *LV = new LoadInst(InitBool, InitBool->getName()+".val", false, 0,
Value *LV = new LoadInst(InitBool->getValueType(), InitBool,
InitBool->getName() + ".val", false, 0,
LI->getOrdering(), LI->getSyncScopeID(),
LI->isUnordered() ? (Instruction*)ICI : LI);
LI->isUnordered() ? (Instruction *)ICI : LI);
InitBoolUsed = true;
switch (ICI->getPredicate()) {
default: llvm_unreachable("Unknown ICmp Predicate!");
@ -1040,7 +1041,8 @@ static void ReplaceUsesOfMallocWithGlobal(Instruction *Alloc,
}
// Insert a load from the global, and use it instead of the malloc.
Value *NL = new LoadInst(GV, GV->getName()+".val", InsertPt);
Value *NL =
new LoadInst(GV->getValueType(), GV, GV->getName() + ".val", InsertPt);
U->replaceUsesOfWith(Alloc, NL);
}
}
@ -1163,10 +1165,10 @@ static Value *GetHeapSROAValue(Value *V, unsigned FieldNo,
if (LoadInst *LI = dyn_cast<LoadInst>(V)) {
// This is a scalarized version of the load from the global. Just create
// a new Load of the scalarized global.
Result = new LoadInst(GetHeapSROAValue(LI->getOperand(0), FieldNo,
InsertedScalarizedValues,
PHIsToRewrite),
LI->getName()+".f"+Twine(FieldNo), LI);
Value *V = GetHeapSROAValue(LI->getOperand(0), FieldNo,
InsertedScalarizedValues, PHIsToRewrite);
Result = new LoadInst(V->getType()->getPointerElementType(), V,
LI->getName() + ".f" + Twine(FieldNo), LI);
} else {
PHINode *PN = cast<PHINode>(V);
// PN's type is pointer to struct. Make a new PHI of pointer to struct
@ -1356,7 +1358,9 @@ static GlobalVariable *PerformHeapAllocSRoA(GlobalVariable *GV, CallInst *CI,
// Within the NullPtrBlock, we need to emit a comparison and branch for each
// pointer, because some may be null while others are not.
for (unsigned i = 0, e = FieldGlobals.size(); i != e; ++i) {
Value *GVVal = new LoadInst(FieldGlobals[i], "tmp", NullPtrBlock);
Value *GVVal =
new LoadInst(cast<GlobalVariable>(FieldGlobals[i])->getValueType(),
FieldGlobals[i], "tmp", NullPtrBlock);
Value *Cmp = new ICmpInst(*NullPtrBlock, ICmpInst::ICMP_NE, GVVal,
Constant::getNullValue(GVVal->getType()));
BasicBlock *FreeBlock = BasicBlock::Create(Cmp->getContext(), "free_it",
@ -1700,7 +1704,8 @@ static bool TryToShrinkGlobalToBoolean(GlobalVariable *GV, Constant *OtherVal) {
if (LoadInst *LI = dyn_cast<LoadInst>(StoredVal)) {
assert(LI->getOperand(0) == GV && "Not a copy!");
// Insert a new load, to preserve the saved value.
StoreVal = new LoadInst(NewGV, LI->getName()+".b", false, 0,
StoreVal = new LoadInst(NewGV->getValueType(), NewGV,
LI->getName() + ".b", false, 0,
LI->getOrdering(), LI->getSyncScopeID(), LI);
} else {
assert((isa<CastInst>(StoredVal) || isa<SelectInst>(StoredVal)) &&
@ -1716,8 +1721,9 @@ static bool TryToShrinkGlobalToBoolean(GlobalVariable *GV, Constant *OtherVal) {
} else {
// Change the load into a load of bool then a select.
LoadInst *LI = cast<LoadInst>(UI);
LoadInst *NLI = new LoadInst(NewGV, LI->getName()+".b", false, 0,
LI->getOrdering(), LI->getSyncScopeID(), LI);
LoadInst *NLI =
new LoadInst(NewGV->getValueType(), NewGV, LI->getName() + ".b",
false, 0, LI->getOrdering(), LI->getSyncScopeID(), LI);
Instruction *NSI;
if (IsOneZero)
NSI = new ZExtInst(NLI, LI->getType(), "", LI);

View File

@ -618,7 +618,7 @@ Value *LowerTypeTestsModule::createBitSetTest(IRBuilder<> &B,
}
Value *ByteAddr = B.CreateGEP(Int8Ty, ByteArray, BitOffset);
Value *Byte = B.CreateLoad(ByteAddr);
Value *Byte = B.CreateLoad(Int8Ty, ByteAddr);
Value *ByteAndMask =
B.CreateAnd(Byte, ConstantExpr::getPtrToInt(TIL.BitMask, Int8Ty));

View File

@ -1183,7 +1183,7 @@ void DevirtModule::applyVirtualConstProp(CallSiteInfo &CSInfo, StringRef FnName,
Value *Addr =
B.CreateGEP(Int8Ty, B.CreateBitCast(Call.VTable, Int8PtrTy), Byte);
if (RetType->getBitWidth() == 1) {
Value *Bits = B.CreateLoad(Addr);
Value *Bits = B.CreateLoad(Int8Ty, Addr);
Value *BitsAndBit = B.CreateAnd(Bits, Bit);
auto IsBitSet = B.CreateICmpNE(BitsAndBit, ConstantInt::get(Int8Ty, 0));
Call.replaceAndErase("virtual-const-prop-1-bit", FnName, RemarksEnabled,

View File

@ -171,7 +171,7 @@ Instruction *InstCombiner::SimplifyAnyMemTransfer(AnyMemTransferInst *MI) {
Value *Src = Builder.CreateBitCast(MI->getArgOperand(1), NewSrcPtrTy);
Value *Dest = Builder.CreateBitCast(MI->getArgOperand(0), NewDstPtrTy);
LoadInst *L = Builder.CreateLoad(Src);
LoadInst *L = Builder.CreateLoad(IntType, Src);
// Alignment from the mem intrinsic will be better, so use it.
L->setAlignment(CopySrcAlign);
if (CopyMD)
@ -1182,7 +1182,8 @@ static Value *simplifyMaskedLoad(const IntrinsicInst &II,
if (maskIsAllOneOrUndef(II.getArgOperand(2))) {
Value *LoadPtr = II.getArgOperand(0);
unsigned Alignment = cast<ConstantInt>(II.getArgOperand(1))->getZExtValue();
return Builder.CreateAlignedLoad(LoadPtr, Alignment, "unmaskedload");
return Builder.CreateAlignedLoad(II.getType(), LoadPtr, Alignment,
"unmaskedload");
}
return nullptr;
@ -1499,7 +1500,7 @@ static Value *simplifyNeonVld1(const IntrinsicInst &II,
auto *BCastInst = Builder.CreateBitCast(II.getArgOperand(0),
PointerType::get(II.getType(), 0));
return Builder.CreateAlignedLoad(BCastInst, Alignment);
return Builder.CreateAlignedLoad(II.getType(), BCastInst, Alignment);
}
// Returns true iff the 2 intrinsics have the same operands, limiting the
@ -2300,7 +2301,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
&DT) >= 16) {
Value *Ptr = Builder.CreateBitCast(II->getArgOperand(0),
PointerType::getUnqual(II->getType()));
return new LoadInst(Ptr);
return new LoadInst(II->getType(), Ptr);
}
break;
case Intrinsic::ppc_vsx_lxvw4x:
@ -2308,7 +2309,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
// Turn PPC VSX loads into normal loads.
Value *Ptr = Builder.CreateBitCast(II->getArgOperand(0),
PointerType::getUnqual(II->getType()));
return new LoadInst(Ptr, Twine(""), false, 1);
return new LoadInst(II->getType(), Ptr, Twine(""), false, 1);
}
case Intrinsic::ppc_altivec_stvx:
case Intrinsic::ppc_altivec_stvxl:
@ -2336,7 +2337,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
II->getType()->getVectorNumElements());
Value *Ptr = Builder.CreateBitCast(II->getArgOperand(0),
PointerType::getUnqual(VTy));
Value *Load = Builder.CreateLoad(Ptr);
Value *Load = Builder.CreateLoad(VTy, Ptr);
return new FPExtInst(Load, II->getType());
}
break;
@ -2346,7 +2347,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
&DT) >= 32) {
Value *Ptr = Builder.CreateBitCast(II->getArgOperand(0),
PointerType::getUnqual(II->getType()));
return new LoadInst(Ptr);
return new LoadInst(II->getType(), Ptr);
}
break;
case Intrinsic::ppc_qpx_qvstfs:

View File

@ -298,7 +298,7 @@ void PointerReplacer::replace(Instruction *I) {
if (auto *LT = dyn_cast<LoadInst>(I)) {
auto *V = getReplacement(LT->getPointerOperand());
assert(V && "Operand not replaced");
auto *NewI = new LoadInst(V);
auto *NewI = new LoadInst(I->getType(), V);
NewI->takeName(LT);
IC.InsertNewInstWith(NewI, *LT);
IC.replaceInstUsesWith(*LT, NewI);
@ -465,7 +465,7 @@ static LoadInst *combineLoadToNewType(InstCombiner &IC, LoadInst &LI, Type *NewT
NewPtr = IC.Builder.CreateBitCast(Ptr, NewTy->getPointerTo(AS));
LoadInst *NewLoad = IC.Builder.CreateAlignedLoad(
NewPtr, LI.getAlignment(), LI.isVolatile(), LI.getName() + Suffix);
NewTy, NewPtr, LI.getAlignment(), LI.isVolatile(), LI.getName() + Suffix);
NewLoad->setAtomic(LI.getOrdering(), LI.getSyncScopeID());
MDBuilder MDB(NewLoad->getContext());
for (const auto &MDPair : MD) {
@ -724,7 +724,8 @@ static Instruction *unpackLoadToAggregate(InstCombiner &IC, LoadInst &LI) {
auto *Ptr = IC.Builder.CreateInBoundsGEP(ST, Addr, makeArrayRef(Indices),
Name + ".elt");
auto EltAlign = MinAlign(Align, SL->getElementOffset(i));
auto *L = IC.Builder.CreateAlignedLoad(Ptr, EltAlign, Name + ".unpack");
auto *L = IC.Builder.CreateAlignedLoad(ST->getElementType(i), Ptr,
EltAlign, Name + ".unpack");
// Propagate AA metadata. It'll still be valid on the narrowed load.
AAMDNodes AAMD;
LI.getAAMetadata(AAMD);
@ -774,8 +775,8 @@ static Instruction *unpackLoadToAggregate(InstCombiner &IC, LoadInst &LI) {
};
auto *Ptr = IC.Builder.CreateInBoundsGEP(AT, Addr, makeArrayRef(Indices),
Name + ".elt");
auto *L = IC.Builder.CreateAlignedLoad(Ptr, MinAlign(Align, Offset),
Name + ".unpack");
auto *L = IC.Builder.CreateAlignedLoad(
AT->getElementType(), Ptr, MinAlign(Align, Offset), Name + ".unpack");
AAMDNodes AAMD;
LI.getAAMetadata(AAMD);
L->setAAMetadata(AAMD);
@ -1065,10 +1066,12 @@ Instruction *InstCombiner::visitLoadInst(LoadInst &LI) {
unsigned Align = LI.getAlignment();
if (isSafeToLoadUnconditionally(SI->getOperand(1), Align, DL, SI) &&
isSafeToLoadUnconditionally(SI->getOperand(2), Align, DL, SI)) {
LoadInst *V1 = Builder.CreateLoad(SI->getOperand(1),
SI->getOperand(1)->getName()+".val");
LoadInst *V2 = Builder.CreateLoad(SI->getOperand(2),
SI->getOperand(2)->getName()+".val");
LoadInst *V1 =
Builder.CreateLoad(LI.getType(), SI->getOperand(1),
SI->getOperand(1)->getName() + ".val");
LoadInst *V2 =
Builder.CreateLoad(LI.getType(), SI->getOperand(2),
SI->getOperand(2)->getName() + ".val");
assert(LI.isUnordered() && "implied by above");
V1->setAlignment(Align);
V1->setAtomic(LI.getOrdering(), LI.getSyncScopeID());

View File

@ -595,7 +595,8 @@ Instruction *InstCombiner::FoldPHIArgLoadIntoPHI(PHINode &PN) {
Value *InVal = FirstLI->getOperand(0);
NewPN->addIncoming(InVal, PN.getIncomingBlock(0));
LoadInst *NewLI = new LoadInst(NewPN, "", isVolatile, LoadAlignment);
LoadInst *NewLI =
new LoadInst(FirstLI->getType(), NewPN, "", isVolatile, LoadAlignment);
unsigned KnownIDs[] = {
LLVMContext::MD_tbaa,

View File

@ -2686,7 +2686,7 @@ Instruction *InstCombiner::visitExtractValueInst(ExtractValueInst &EV) {
Builder.SetInsertPoint(L);
Value *GEP = Builder.CreateInBoundsGEP(L->getType(),
L->getPointerOperand(), Indices);
Instruction *NL = Builder.CreateLoad(GEP);
Instruction *NL = Builder.CreateLoad(EV.getType(), GEP);
// Whatever aliasing information we had for the orignal load must also
// hold for the smaller load, so propagate the annotations.
AAMDNodes Nodes;

View File

@ -949,8 +949,9 @@ struct FunctionStackPoisoner : public InstVisitor<FunctionStackPoisoner> {
DynamicAreaOffset);
}
IRB.CreateCall(AsanAllocasUnpoisonFunc,
{IRB.CreateLoad(DynamicAllocaLayout), DynamicAreaPtr});
IRB.CreateCall(
AsanAllocasUnpoisonFunc,
{IRB.CreateLoad(IntptrTy, DynamicAllocaLayout), DynamicAreaPtr});
}
// Unpoison dynamic allocas redzones.
@ -1552,7 +1553,7 @@ void AddressSanitizer::instrumentAddress(Instruction *OrigIns,
Value *ShadowPtr = memToShadow(AddrLong, IRB);
Value *CmpVal = Constant::getNullValue(ShadowTy);
Value *ShadowValue =
IRB.CreateLoad(IRB.CreateIntToPtr(ShadowPtr, ShadowPtrTy));
IRB.CreateLoad(ShadowTy, IRB.CreateIntToPtr(ShadowPtr, ShadowPtrTy));
Value *Cmp = IRB.CreateICmpNE(ShadowValue, CmpVal);
size_t Granularity = 1ULL << Mapping.Scale;
@ -2444,7 +2445,7 @@ void AddressSanitizer::maybeInsertDynamicShadowAtFunctionEntry(Function &F) {
} else {
Value *GlobalDynamicAddress = F.getParent()->getOrInsertGlobal(
kAsanShadowMemoryDynamicAddress, IntptrTy);
LocalDynamicShadow = IRB.CreateLoad(GlobalDynamicAddress);
LocalDynamicShadow = IRB.CreateLoad(IntptrTy, GlobalDynamicAddress);
}
}
@ -2948,9 +2949,9 @@ void FunctionStackPoisoner::processStaticAllocas() {
// void *LocalStackBase = (FakeStack) ? FakeStack : alloca(LocalStackSize);
Constant *OptionDetectUseAfterReturn = F.getParent()->getOrInsertGlobal(
kAsanOptionDetectUseAfterReturn, IRB.getInt32Ty());
Value *UseAfterReturnIsEnabled =
IRB.CreateICmpNE(IRB.CreateLoad(OptionDetectUseAfterReturn),
Constant::getNullValue(IRB.getInt32Ty()));
Value *UseAfterReturnIsEnabled = IRB.CreateICmpNE(
IRB.CreateLoad(IRB.getInt32Ty(), OptionDetectUseAfterReturn),
Constant::getNullValue(IRB.getInt32Ty()));
Instruction *Term =
SplitBlockAndInsertIfThen(UseAfterReturnIsEnabled, InsBefore, false);
IRBuilder<> IRBIf(Term);
@ -3084,7 +3085,7 @@ void FunctionStackPoisoner::processStaticAllocas() {
FakeStack,
ConstantInt::get(IntptrTy, ClassSize - ASan.LongSize / 8));
Value *SavedFlagPtr = IRBPoison.CreateLoad(
IRBPoison.CreateIntToPtr(SavedFlagPtrPtr, IntptrPtrTy));
IntptrTy, IRBPoison.CreateIntToPtr(SavedFlagPtrPtr, IntptrPtrTy));
IRBPoison.CreateStore(
Constant::getNullValue(IRBPoison.getInt8Ty()),
IRBPoison.CreateIntToPtr(SavedFlagPtr, IRBPoison.getInt8PtrTy()));

View File

@ -1033,7 +1033,8 @@ Value *DFSanFunction::getShadow(Value *V) {
DFS.ArgTLS ? &*F->getEntryBlock().begin()
: cast<Instruction>(ArgTLSPtr)->getNextNode();
IRBuilder<> IRB(ArgTLSPos);
Shadow = IRB.CreateLoad(getArgTLS(A->getArgNo(), ArgTLSPos));
Shadow =
IRB.CreateLoad(DFS.ShadowTy, getArgTLS(A->getArgNo(), ArgTLSPos));
break;
}
case DataFlowSanitizer::IA_Args: {
@ -1183,7 +1184,7 @@ Value *DFSanFunction::loadShadow(Value *Addr, uint64_t Size, uint64_t Align,
const auto i = AllocaShadowMap.find(AI);
if (i != AllocaShadowMap.end()) {
IRBuilder<> IRB(Pos);
return IRB.CreateLoad(i->second);
return IRB.CreateLoad(DFS.ShadowTy, i->second);
}
}
@ -1208,7 +1209,7 @@ Value *DFSanFunction::loadShadow(Value *Addr, uint64_t Size, uint64_t Align,
case 0:
return DFS.ZeroShadow;
case 1: {
LoadInst *LI = new LoadInst(ShadowAddr, "", Pos);
LoadInst *LI = new LoadInst(DFS.ShadowTy, ShadowAddr, "", Pos);
LI->setAlignment(ShadowAlign);
return LI;
}
@ -1216,8 +1217,9 @@ Value *DFSanFunction::loadShadow(Value *Addr, uint64_t Size, uint64_t Align,
IRBuilder<> IRB(Pos);
Value *ShadowAddr1 = IRB.CreateGEP(DFS.ShadowTy, ShadowAddr,
ConstantInt::get(DFS.IntptrTy, 1));
return combineShadows(IRB.CreateAlignedLoad(ShadowAddr, ShadowAlign),
IRB.CreateAlignedLoad(ShadowAddr1, ShadowAlign), Pos);
return combineShadows(
IRB.CreateAlignedLoad(DFS.ShadowTy, ShadowAddr, ShadowAlign),
IRB.CreateAlignedLoad(DFS.ShadowTy, ShadowAddr1, ShadowAlign), Pos);
}
}
if (!AvoidNewBlocks && Size % (64 / DFS.ShadowWidth) == 0) {
@ -1236,7 +1238,8 @@ Value *DFSanFunction::loadShadow(Value *Addr, uint64_t Size, uint64_t Align,
IRBuilder<> IRB(Pos);
Value *WideAddr =
IRB.CreateBitCast(ShadowAddr, Type::getInt64PtrTy(*DFS.Ctx));
Value *WideShadow = IRB.CreateAlignedLoad(WideAddr, ShadowAlign);
Value *WideShadow =
IRB.CreateAlignedLoad(IRB.getInt64Ty(), WideAddr, ShadowAlign);
Value *TruncShadow = IRB.CreateTrunc(WideShadow, DFS.ShadowTy);
Value *ShlShadow = IRB.CreateShl(WideShadow, DFS.ShadowWidth);
Value *ShrShadow = IRB.CreateLShr(WideShadow, 64 - DFS.ShadowWidth);
@ -1269,7 +1272,8 @@ Value *DFSanFunction::loadShadow(Value *Addr, uint64_t Size, uint64_t Align,
IRBuilder<> NextIRB(NextBB);
WideAddr = NextIRB.CreateGEP(Type::getInt64Ty(*DFS.Ctx), WideAddr,
ConstantInt::get(DFS.IntptrTy, 1));
Value *NextWideShadow = NextIRB.CreateAlignedLoad(WideAddr, ShadowAlign);
Value *NextWideShadow = NextIRB.CreateAlignedLoad(NextIRB.getInt64Ty(),
WideAddr, ShadowAlign);
ShadowsEq = NextIRB.CreateICmpEQ(WideShadow, NextWideShadow);
LastBr->setSuccessor(0, NextBB);
LastBr = NextIRB.CreateCondBr(ShadowsEq, FallbackBB, FallbackBB);
@ -1646,7 +1650,8 @@ void DFSanVisitor::visitCallSite(CallSite CS) {
}
if (!FT->getReturnType()->isVoidTy()) {
LoadInst *LabelLoad = IRB.CreateLoad(DFSF.LabelReturnAlloca);
LoadInst *LabelLoad =
IRB.CreateLoad(DFSF.DFS.ShadowTy, DFSF.LabelReturnAlloca);
DFSF.setShadow(CustomCI, LabelLoad);
}
@ -1684,7 +1689,7 @@ void DFSanVisitor::visitCallSite(CallSite CS) {
if (DFSF.DFS.getInstrumentedABI() == DataFlowSanitizer::IA_TLS) {
IRBuilder<> NextIRB(Next);
LoadInst *LI = NextIRB.CreateLoad(DFSF.getRetvalTLS());
LoadInst *LI = NextIRB.CreateLoad(DFSF.DFS.ShadowTy, DFSF.getRetvalTLS());
DFSF.SkipInsts.insert(LI);
DFSF.setShadow(CS.getInstruction(), LI);
DFSF.NonZeroChecks.push_back(LI);

View File

@ -790,7 +790,7 @@ bool EfficiencySanitizer::insertCounterUpdate(Instruction *I,
ConstantExpr::getGetElementPtr(
ArrayType::get(IRB.getInt64Ty(), getStructCounterSize(StructTy)),
CounterArray, Indices);
Value *Load = IRB.CreateLoad(Counter);
Value *Load = IRB.CreateLoad(IRB.getInt64Ty(), Counter);
IRB.CreateStore(IRB.CreateAdd(Load, ConstantInt::get(IRB.getInt64Ty(), 1)),
Counter);
return true;
@ -875,7 +875,8 @@ bool EfficiencySanitizer::instrumentFastpathWorkingSet(
// memory access, if they are not already set.
Value *ValueMask = ConstantInt::get(ShadowTy, 0x81); // 10000001B
Value *OldValue = IRB.CreateLoad(IRB.CreateIntToPtr(ShadowPtr, ShadowPtrTy));
Value *OldValue =
IRB.CreateLoad(ShadowTy, IRB.CreateIntToPtr(ShadowPtr, ShadowPtrTy));
// The AND and CMP will be turned into a TEST instruction by the compiler.
Value *Cmp = IRB.CreateICmpNE(IRB.CreateAnd(OldValue, ValueMask), ValueMask);
Instruction *CmpTerm = SplitBlockAndInsertIfThen(Cmp, I, false);

View File

@ -817,7 +817,7 @@ bool GCOVProfiler::emitProfileArcs() {
// Skip phis, landingpads.
IRBuilder<> Builder(&*BB.getFirstInsertionPt());
Value *Count = Builder.CreateLoad(Phi);
Value *Count = Builder.CreateLoad(Builder.getInt64Ty(), Phi);
Count = Builder.CreateAdd(Count, Builder.getInt64(1));
Builder.CreateStore(Count, Phi);
@ -828,7 +828,7 @@ bool GCOVProfiler::emitProfileArcs() {
const unsigned Edge = It->second;
Value *Counter =
Builder.CreateConstInBoundsGEP2_64(Counters, 0, Edge);
Value *Count = Builder.CreateLoad(Counter);
Value *Count = Builder.CreateLoad(Builder.getInt64Ty(), Counter);
Count = Builder.CreateAdd(Count, Builder.getInt64(1));
Builder.CreateStore(Count, Counter);
}
@ -1089,17 +1089,20 @@ Function *GCOVProfiler::insertCounterWriteout(
auto *StartFileCallArgsPtr = Builder.CreateStructGEP(FileInfoPtr, 0);
auto *StartFileCall = Builder.CreateCall(
StartFile,
{Builder.CreateLoad(Builder.CreateStructGEP(StartFileCallArgsPtr, 0)),
Builder.CreateLoad(Builder.CreateStructGEP(StartFileCallArgsPtr, 1)),
Builder.CreateLoad(Builder.CreateStructGEP(StartFileCallArgsPtr, 2))});
{Builder.CreateLoad(StartFileCallArgsTy->getElementType(0),
Builder.CreateStructGEP(StartFileCallArgsPtr, 0)),
Builder.CreateLoad(StartFileCallArgsTy->getElementType(1),
Builder.CreateStructGEP(StartFileCallArgsPtr, 1)),
Builder.CreateLoad(StartFileCallArgsTy->getElementType(2),
Builder.CreateStructGEP(StartFileCallArgsPtr, 2))});
if (auto AK = TLI->getExtAttrForI32Param(false))
StartFileCall->addParamAttr(2, AK);
auto *NumCounters =
Builder.CreateLoad(Builder.CreateStructGEP(FileInfoPtr, 1));
auto *EmitFunctionCallArgsArray =
Builder.CreateLoad(Builder.CreateStructGEP(FileInfoPtr, 2));
auto *EmitArcsCallArgsArray =
Builder.CreateLoad(Builder.CreateStructGEP(FileInfoPtr, 3));
auto *NumCounters = Builder.CreateLoad(
FileInfoTy->getElementType(1), Builder.CreateStructGEP(FileInfoPtr, 1));
auto *EmitFunctionCallArgsArray = Builder.CreateLoad(
FileInfoTy->getElementType(2), Builder.CreateStructGEP(FileInfoPtr, 2));
auto *EmitArcsCallArgsArray = Builder.CreateLoad(
FileInfoTy->getElementType(3), Builder.CreateStructGEP(FileInfoPtr, 3));
auto *EnterCounterLoopCond =
Builder.CreateICmpSLT(Builder.getInt32(0), NumCounters);
Builder.CreateCondBr(EnterCounterLoopCond, CounterLoopHeader, FileLoopLatch);
@ -1111,11 +1114,16 @@ Function *GCOVProfiler::insertCounterWriteout(
Builder.CreateInBoundsGEP(EmitFunctionCallArgsArray, {JV});
auto *EmitFunctionCall = Builder.CreateCall(
EmitFunction,
{Builder.CreateLoad(Builder.CreateStructGEP(EmitFunctionCallArgsPtr, 0)),
Builder.CreateLoad(Builder.CreateStructGEP(EmitFunctionCallArgsPtr, 1)),
Builder.CreateLoad(Builder.CreateStructGEP(EmitFunctionCallArgsPtr, 2)),
Builder.CreateLoad(Builder.CreateStructGEP(EmitFunctionCallArgsPtr, 3)),
{Builder.CreateLoad(EmitFunctionCallArgsTy->getElementType(0),
Builder.CreateStructGEP(EmitFunctionCallArgsPtr, 0)),
Builder.CreateLoad(EmitFunctionCallArgsTy->getElementType(1),
Builder.CreateStructGEP(EmitFunctionCallArgsPtr, 1)),
Builder.CreateLoad(EmitFunctionCallArgsTy->getElementType(2),
Builder.CreateStructGEP(EmitFunctionCallArgsPtr, 2)),
Builder.CreateLoad(EmitFunctionCallArgsTy->getElementType(3),
Builder.CreateStructGEP(EmitFunctionCallArgsPtr, 3)),
Builder.CreateLoad(
EmitFunctionCallArgsTy->getElementType(4),
Builder.CreateStructGEP(EmitFunctionCallArgsPtr, 4))});
if (auto AK = TLI->getExtAttrForI32Param(false)) {
EmitFunctionCall->addParamAttr(0, AK);
@ -1127,8 +1135,10 @@ Function *GCOVProfiler::insertCounterWriteout(
Builder.CreateInBoundsGEP(EmitArcsCallArgsArray, {JV});
auto *EmitArcsCall = Builder.CreateCall(
EmitArcs,
{Builder.CreateLoad(Builder.CreateStructGEP(EmitArcsCallArgsPtr, 0)),
Builder.CreateLoad(Builder.CreateStructGEP(EmitArcsCallArgsPtr, 1))});
{Builder.CreateLoad(EmitArcsCallArgsTy->getElementType(0),
Builder.CreateStructGEP(EmitArcsCallArgsPtr, 0)),
Builder.CreateLoad(EmitArcsCallArgsTy->getElementType(1),
Builder.CreateStructGEP(EmitArcsCallArgsPtr, 1))});
if (auto AK = TLI->getExtAttrForI32Param(false))
EmitArcsCall->addParamAttr(0, AK);
auto *NextJV = Builder.CreateAdd(JV, Builder.getInt32(1));

View File

@ -428,7 +428,7 @@ Value *HWAddressSanitizer::getDynamicShadowNonTls(IRBuilder<> &IRB) {
Value *GlobalDynamicAddress =
IRB.GetInsertBlock()->getParent()->getParent()->getOrInsertGlobal(
kHwasanShadowMemoryDynamicAddress, Int8PtrTy);
return IRB.CreateLoad(GlobalDynamicAddress);
return IRB.CreateLoad(Int8PtrTy, GlobalDynamicAddress);
}
}
@ -557,7 +557,7 @@ void HWAddressSanitizer::instrumentMemAccessInline(Value *Ptr, bool IsWrite,
IRB.getInt8Ty());
Value *AddrLong = untagPointer(IRB, PtrLong);
Value *Shadow = memToShadow(AddrLong, IRB);
Value *MemTag = IRB.CreateLoad(Shadow);
Value *MemTag = IRB.CreateLoad(Int8Ty, Shadow);
Value *TagMismatch = IRB.CreateICmpNE(PtrTag, MemTag);
int matchAllTag = ClMatchAllTag.getNumOccurrences() > 0 ?
@ -841,7 +841,7 @@ Value *HWAddressSanitizer::emitPrologue(IRBuilder<> &IRB,
Value *SlotPtr = getHwasanThreadSlotPtr(IRB, IntptrTy);
assert(SlotPtr);
Instruction *ThreadLong = IRB.CreateLoad(SlotPtr);
Instruction *ThreadLong = IRB.CreateLoad(IntptrTy, SlotPtr);
Function *F = IRB.GetInsertBlock()->getParent();
if (F->getFnAttribute("hwasan-abi").getValueAsString() == "interceptor") {
@ -855,7 +855,7 @@ Value *HWAddressSanitizer::emitPrologue(IRBuilder<> &IRB,
// FIXME: This should call a new runtime function with a custom calling
// convention to avoid needing to spill all arguments here.
IRB.CreateCall(HwasanThreadEnterFunc);
LoadInst *ReloadThreadLong = IRB.CreateLoad(SlotPtr);
LoadInst *ReloadThreadLong = IRB.CreateLoad(IntptrTy, SlotPtr);
IRB.SetInsertPoint(&*Br->getSuccessor(0)->begin());
PHINode *ThreadLongPhi = IRB.CreatePHI(IntptrTy, 2);

View File

@ -195,6 +195,7 @@ public:
// block.
Value *LiveInValue = SSA.GetValueInMiddleOfBlock(ExitBlock);
Value *Addr = cast<StoreInst>(Store)->getPointerOperand();
Type *Ty = LiveInValue->getType();
IRBuilder<> Builder(InsertPos);
if (AtomicCounterUpdatePromoted)
// automic update currently can only be promoted across the current
@ -202,7 +203,7 @@ public:
Builder.CreateAtomicRMW(AtomicRMWInst::Add, Addr, LiveInValue,
AtomicOrdering::SequentiallyConsistent);
else {
LoadInst *OldVal = Builder.CreateLoad(Addr, "pgocount.promoted");
LoadInst *OldVal = Builder.CreateLoad(Ty, Addr, "pgocount.promoted");
auto *NewVal = Builder.CreateAdd(OldVal, LiveInValue);
auto *NewStore = Builder.CreateStore(NewVal, Addr);
@ -603,7 +604,8 @@ void InstrProfiling::lowerIncrement(InstrProfIncrementInst *Inc) {
Builder.CreateAtomicRMW(AtomicRMWInst::Add, Addr, Inc->getStep(),
AtomicOrdering::Monotonic);
} else {
Value *Load = Builder.CreateLoad(Addr, "pgocount");
Value *IncStep = Inc->getStep();
Value *Load = Builder.CreateLoad(IncStep->getType(), Addr, "pgocount");
auto *Count = Builder.CreateAdd(Load, Inc->getStep());
auto *Store = Builder.CreateStore(Count, Addr);
if (isCounterPromotionEnabled())
@ -950,7 +952,7 @@ bool InstrProfiling::emitRuntimeHook() {
User->setComdat(M->getOrInsertComdat(User->getName()));
IRBuilder<> IRB(BasicBlock::Create(M->getContext(), "", User));
auto *Load = IRB.CreateLoad(Var);
auto *Load = IRB.CreateLoad(Int32Ty, Var);
IRB.CreateRet(Load);
// Mark the user variable as used so that it isn't stripped out.

View File

@ -1401,7 +1401,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
IRB.CreateAnd(OriginLong, ConstantInt::get(MS.IntptrTy, ~Mask));
}
OriginPtr =
IRB.CreateIntToPtr(OriginLong, PointerType::get(IRB.getInt32Ty(), 0));
IRB.CreateIntToPtr(OriginLong, PointerType::get(MS.OriginTy, 0));
}
return std::make_pair(ShadowPtr, OriginPtr);
}
@ -1618,8 +1618,8 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
// ParamTLS overflow.
*ShadowPtr = getCleanShadow(V);
} else {
*ShadowPtr =
EntryIRB.CreateAlignedLoad(Base, kShadowTLSAlignment);
*ShadowPtr = EntryIRB.CreateAlignedLoad(getShadowTy(&FArg), Base,
kShadowTLSAlignment);
}
}
LLVM_DEBUG(dbgs()
@ -1627,7 +1627,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
if (MS.TrackOrigins && !Overflow) {
Value *OriginPtr =
getOriginPtrForArgument(&FArg, EntryIRB, ArgOffset);
setOrigin(A, EntryIRB.CreateLoad(OriginPtr));
setOrigin(A, EntryIRB.CreateLoad(MS.OriginTy, OriginPtr));
} else {
setOrigin(A, getCleanOrigin());
}
@ -1758,7 +1758,8 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
if (PropagateShadow) {
std::tie(ShadowPtr, OriginPtr) =
getShadowOriginPtr(Addr, IRB, ShadowTy, Alignment, /*isStore*/ false);
setShadow(&I, IRB.CreateAlignedLoad(ShadowPtr, Alignment, "_msld"));
setShadow(&I,
IRB.CreateAlignedLoad(ShadowTy, ShadowPtr, Alignment, "_msld"));
} else {
setShadow(&I, getCleanShadow(&I));
}
@ -1772,7 +1773,8 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
if (MS.TrackOrigins) {
if (PropagateShadow) {
unsigned OriginAlignment = std::max(kMinOriginAlignment, Alignment);
setOrigin(&I, IRB.CreateAlignedLoad(OriginPtr, OriginAlignment));
setOrigin(
&I, IRB.CreateAlignedLoad(MS.OriginTy, OriginPtr, OriginAlignment));
} else {
setOrigin(&I, getCleanOrigin());
}
@ -2452,7 +2454,8 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
unsigned Alignment = 1;
std::tie(ShadowPtr, OriginPtr) =
getShadowOriginPtr(Addr, IRB, ShadowTy, Alignment, /*isStore*/ false);
setShadow(&I, IRB.CreateAlignedLoad(ShadowPtr, Alignment, "_msld"));
setShadow(&I,
IRB.CreateAlignedLoad(ShadowTy, ShadowPtr, Alignment, "_msld"));
} else {
setShadow(&I, getCleanShadow(&I));
}
@ -2462,7 +2465,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
if (MS.TrackOrigins) {
if (PropagateShadow)
setOrigin(&I, IRB.CreateLoad(OriginPtr));
setOrigin(&I, IRB.CreateLoad(MS.OriginTy, OriginPtr));
else
setOrigin(&I, getCleanOrigin());
}
@ -2845,9 +2848,9 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
if (ClCheckAccessAddress)
insertShadowCheck(Addr, &I);
Value *Shadow = IRB.CreateAlignedLoad(ShadowPtr, Alignment, "_ldmxcsr");
Value *Origin =
MS.TrackOrigins ? IRB.CreateLoad(OriginPtr) : getCleanOrigin();
Value *Shadow = IRB.CreateAlignedLoad(Ty, ShadowPtr, Alignment, "_ldmxcsr");
Value *Origin = MS.TrackOrigins ? IRB.CreateLoad(MS.OriginTy, OriginPtr)
: getCleanOrigin();
insertShadowCheck(Shadow, Origin, &I);
}
@ -2921,7 +2924,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
Value *Origin = IRB.CreateSelect(
IRB.CreateICmpNE(Acc, Constant::getNullValue(Acc->getType())),
getOrigin(PassThru), IRB.CreateLoad(OriginPtr));
getOrigin(PassThru), IRB.CreateLoad(MS.OriginTy, OriginPtr));
setOrigin(&I, Origin);
} else {
@ -3284,12 +3287,13 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
"Could not find insertion point for retval shadow load");
}
IRBuilder<> IRBAfter(&*NextInsn);
Value *RetvalShadow =
IRBAfter.CreateAlignedLoad(getShadowPtrForRetval(&I, IRBAfter),
kShadowTLSAlignment, "_msret");
Value *RetvalShadow = IRBAfter.CreateAlignedLoad(
getShadowTy(&I), getShadowPtrForRetval(&I, IRBAfter),
kShadowTLSAlignment, "_msret");
setShadow(&I, RetvalShadow);
if (MS.TrackOrigins)
setOrigin(&I, IRBAfter.CreateLoad(getOriginPtrForRetval(IRBAfter)));
setOrigin(&I, IRBAfter.CreateLoad(MS.OriginTy,
getOriginPtrForRetval(IRBAfter)));
}
bool isAMustTailRetVal(Value *RetVal) {
@ -3837,7 +3841,8 @@ struct VarArgAMD64Helper : public VarArgHelper {
// If there is a va_start in this function, make a backup copy of
// va_arg_tls somewhere in the function entry block.
IRBuilder<> IRB(MSV.ActualFnStart->getFirstNonPHI());
VAArgOverflowSize = IRB.CreateLoad(MS.VAArgOverflowSizeTLS);
VAArgOverflowSize =
IRB.CreateLoad(IRB.getInt64Ty(), MS.VAArgOverflowSizeTLS);
Value *CopySize =
IRB.CreateAdd(ConstantInt::get(MS.IntptrTy, AMD64FpEndOffset),
VAArgOverflowSize);
@ -3856,11 +3861,13 @@ struct VarArgAMD64Helper : public VarArgHelper {
IRBuilder<> IRB(OrigInst->getNextNode());
Value *VAListTag = OrigInst->getArgOperand(0);
Type *RegSaveAreaPtrTy = Type::getInt64PtrTy(*MS.C);
Value *RegSaveAreaPtrPtr = IRB.CreateIntToPtr(
IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
ConstantInt::get(MS.IntptrTy, 16)),
PointerType::get(Type::getInt64PtrTy(*MS.C), 0));
Value *RegSaveAreaPtr = IRB.CreateLoad(RegSaveAreaPtrPtr);
PointerType::get(RegSaveAreaPtrTy, 0));
Value *RegSaveAreaPtr =
IRB.CreateLoad(RegSaveAreaPtrTy, RegSaveAreaPtrPtr);
Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
unsigned Alignment = 16;
std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
@ -3871,11 +3878,13 @@ struct VarArgAMD64Helper : public VarArgHelper {
if (MS.TrackOrigins)
IRB.CreateMemCpy(RegSaveAreaOriginPtr, Alignment, VAArgTLSOriginCopy,
Alignment, AMD64FpEndOffset);
Type *OverflowArgAreaPtrTy = Type::getInt64PtrTy(*MS.C);
Value *OverflowArgAreaPtrPtr = IRB.CreateIntToPtr(
IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
ConstantInt::get(MS.IntptrTy, 8)),
PointerType::get(Type::getInt64PtrTy(*MS.C), 0));
Value *OverflowArgAreaPtr = IRB.CreateLoad(OverflowArgAreaPtrPtr);
PointerType::get(OverflowArgAreaPtrTy, 0));
Value *OverflowArgAreaPtr =
IRB.CreateLoad(OverflowArgAreaPtrTy, OverflowArgAreaPtrPtr);
Value *OverflowArgAreaShadowPtr, *OverflowArgAreaOriginPtr;
std::tie(OverflowArgAreaShadowPtr, OverflowArgAreaOriginPtr) =
MSV.getShadowOriginPtr(OverflowArgAreaPtr, IRB, IRB.getInt8Ty(),
@ -3977,7 +3986,7 @@ struct VarArgMIPS64Helper : public VarArgHelper {
assert(!VAArgSize && !VAArgTLSCopy &&
"finalizeInstrumentation called twice");
IRBuilder<> IRB(MSV.ActualFnStart->getFirstNonPHI());
VAArgSize = IRB.CreateLoad(MS.VAArgOverflowSizeTLS);
VAArgSize = IRB.CreateLoad(IRB.getInt64Ty(), MS.VAArgOverflowSizeTLS);
Value *CopySize = IRB.CreateAdd(ConstantInt::get(MS.IntptrTy, 0),
VAArgSize);
@ -3994,10 +4003,12 @@ struct VarArgMIPS64Helper : public VarArgHelper {
CallInst *OrigInst = VAStartInstrumentationList[i];
IRBuilder<> IRB(OrigInst->getNextNode());
Value *VAListTag = OrigInst->getArgOperand(0);
Type *RegSaveAreaPtrTy = Type::getInt64PtrTy(*MS.C);
Value *RegSaveAreaPtrPtr =
IRB.CreateIntToPtr(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
PointerType::get(Type::getInt64PtrTy(*MS.C), 0));
Value *RegSaveAreaPtr = IRB.CreateLoad(RegSaveAreaPtrPtr);
PointerType::get(RegSaveAreaPtrTy, 0));
Value *RegSaveAreaPtr =
IRB.CreateLoad(RegSaveAreaPtrTy, RegSaveAreaPtrPtr);
Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
unsigned Alignment = 8;
std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
@ -4147,7 +4158,7 @@ struct VarArgAArch64Helper : public VarArgHelper {
IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
ConstantInt::get(MS.IntptrTy, offset)),
Type::getInt64PtrTy(*MS.C));
return IRB.CreateLoad(SaveAreaPtrPtr);
return IRB.CreateLoad(Type::getInt64Ty(*MS.C), SaveAreaPtrPtr);
}
// Retrieve a va_list field of 'int' size.
@ -4157,7 +4168,7 @@ struct VarArgAArch64Helper : public VarArgHelper {
IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
ConstantInt::get(MS.IntptrTy, offset)),
Type::getInt32PtrTy(*MS.C));
Value *SaveArea32 = IRB.CreateLoad(SaveAreaPtr);
Value *SaveArea32 = IRB.CreateLoad(IRB.getInt32Ty(), SaveAreaPtr);
return IRB.CreateSExt(SaveArea32, MS.IntptrTy);
}
@ -4168,7 +4179,8 @@ struct VarArgAArch64Helper : public VarArgHelper {
// If there is a va_start in this function, make a backup copy of
// va_arg_tls somewhere in the function entry block.
IRBuilder<> IRB(MSV.ActualFnStart->getFirstNonPHI());
VAArgOverflowSize = IRB.CreateLoad(MS.VAArgOverflowSizeTLS);
VAArgOverflowSize =
IRB.CreateLoad(IRB.getInt64Ty(), MS.VAArgOverflowSizeTLS);
Value *CopySize =
IRB.CreateAdd(ConstantInt::get(MS.IntptrTy, AArch64VAEndOffset),
VAArgOverflowSize);
@ -4411,7 +4423,7 @@ struct VarArgPowerPC64Helper : public VarArgHelper {
assert(!VAArgSize && !VAArgTLSCopy &&
"finalizeInstrumentation called twice");
IRBuilder<> IRB(MSV.ActualFnStart->getFirstNonPHI());
VAArgSize = IRB.CreateLoad(MS.VAArgOverflowSizeTLS);
VAArgSize = IRB.CreateLoad(IRB.getInt64Ty(), MS.VAArgOverflowSizeTLS);
Value *CopySize = IRB.CreateAdd(ConstantInt::get(MS.IntptrTy, 0),
VAArgSize);
@ -4428,10 +4440,12 @@ struct VarArgPowerPC64Helper : public VarArgHelper {
CallInst *OrigInst = VAStartInstrumentationList[i];
IRBuilder<> IRB(OrigInst->getNextNode());
Value *VAListTag = OrigInst->getArgOperand(0);
Type *RegSaveAreaPtrTy = Type::getInt64PtrTy(*MS.C);
Value *RegSaveAreaPtrPtr =
IRB.CreateIntToPtr(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
PointerType::get(Type::getInt64PtrTy(*MS.C), 0));
Value *RegSaveAreaPtr = IRB.CreateLoad(RegSaveAreaPtrPtr);
PointerType::get(RegSaveAreaPtrTy, 0));
Value *RegSaveAreaPtr =
IRB.CreateLoad(RegSaveAreaPtrTy, RegSaveAreaPtrPtr);
Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
unsigned Alignment = 8;
std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =

View File

@ -822,7 +822,7 @@ void SanitizerCoverageModule::InjectCoverageAtBlock(Function &F, BasicBlock &BB,
auto CounterPtr = IRB.CreateGEP(
Function8bitCounterArray,
{ConstantInt::get(IntptrTy, 0), ConstantInt::get(IntptrTy, Idx)});
auto Load = IRB.CreateLoad(CounterPtr);
auto Load = IRB.CreateLoad(Int8Ty, CounterPtr);
auto Inc = IRB.CreateAdd(Load, ConstantInt::get(Int8Ty, 1));
auto Store = IRB.CreateStore(Inc, CounterPtr);
SetNoSanitizeMetadata(Load);
@ -835,7 +835,7 @@ void SanitizerCoverageModule::InjectCoverageAtBlock(Function &F, BasicBlock &BB,
auto FrameAddrPtr =
IRB.CreateCall(GetFrameAddr, {Constant::getNullValue(Int32Ty)});
auto FrameAddrInt = IRB.CreatePtrToInt(FrameAddrPtr, IntptrTy);
auto LowestStack = IRB.CreateLoad(SanCovLowestStack);
auto LowestStack = IRB.CreateLoad(IntptrTy, SanCovLowestStack);
auto IsStackLower = IRB.CreateICmpULT(FrameAddrInt, LowestStack);
auto ThenTerm = SplitBlockAndInsertIfThen(IsStackLower, &*IP, false);
IRBuilder<> ThenIRB(ThenTerm);

View File

@ -1234,10 +1234,10 @@ bool GVN::PerformLoadPRE(LoadInst *LI, AvailValInBlkVect &ValuesPerBlock,
BasicBlock *UnavailablePred = PredLoad.first;
Value *LoadPtr = PredLoad.second;
auto *NewLoad = new LoadInst(LoadPtr, LI->getName()+".pre",
LI->isVolatile(), LI->getAlignment(),
LI->getOrdering(), LI->getSyncScopeID(),
UnavailablePred->getTerminator());
auto *NewLoad =
new LoadInst(LI->getType(), LoadPtr, LI->getName() + ".pre",
LI->isVolatile(), LI->getAlignment(), LI->getOrdering(),
LI->getSyncScopeID(), UnavailablePred->getTerminator());
NewLoad->setDebugLoc(LI->getDebugLoc());
// Transfer the old load's AA tags to the new load.

View File

@ -1445,11 +1445,11 @@ bool JumpThreadingPass::SimplifyPartiallyRedundantLoad(LoadInst *LoadI) {
if (UnavailablePred) {
assert(UnavailablePred->getTerminator()->getNumSuccessors() == 1 &&
"Can't handle critical edge here!");
LoadInst *NewVal =
new LoadInst(LoadedPtr->DoPHITranslation(LoadBB, UnavailablePred),
LoadI->getName() + ".pr", false, LoadI->getAlignment(),
LoadI->getOrdering(), LoadI->getSyncScopeID(),
UnavailablePred->getTerminator());
LoadInst *NewVal = new LoadInst(
LoadI->getType(), LoadedPtr->DoPHITranslation(LoadBB, UnavailablePred),
LoadI->getName() + ".pr", false, LoadI->getAlignment(),
LoadI->getOrdering(), LoadI->getSyncScopeID(),
UnavailablePred->getTerminator());
NewVal->setDebugLoc(LoadI->getDebugLoc());
if (AATags)
NewVal->setAAMetadata(AATags);

View File

@ -1947,7 +1947,8 @@ bool llvm::promoteLoopAccessesToScalars(
// Set up the preheader to have a definition of the value. It is the live-out
// value from the preheader that uses in the loop will use.
LoadInst *PreheaderLoad = new LoadInst(
SomePtr, SomePtr->getName() + ".promoted", Preheader->getTerminator());
SomePtr->getType()->getPointerElementType(), SomePtr,
SomePtr->getName() + ".promoted", Preheader->getTerminator());
if (SawUnorderedAtomic)
PreheaderLoad->setOrdering(AtomicOrdering::Unordered);
PreheaderLoad->setAlignment(Alignment);

View File

@ -427,9 +427,9 @@ public:
auto *PH = L->getLoopPreheader();
Value *InitialPtr = SEE.expandCodeFor(PtrSCEV->getStart(), Ptr->getType(),
PH->getTerminator());
Value *Initial =
new LoadInst(InitialPtr, "load_initial", /* isVolatile */ false,
Cand.Load->getAlignment(), PH->getTerminator());
Value *Initial = new LoadInst(
Cand.Load->getType(), InitialPtr, "load_initial",
/* isVolatile */ false, Cand.Load->getAlignment(), PH->getTerminator());
PHINode *PHI = PHINode::Create(Initial->getType(), 2, "store_forwarded",
&L->getHeader()->front());

View File

@ -26,7 +26,7 @@ static bool LowerAtomicCmpXchgInst(AtomicCmpXchgInst *CXI) {
Value *Cmp = CXI->getCompareOperand();
Value *Val = CXI->getNewValOperand();
LoadInst *Orig = Builder.CreateLoad(Ptr);
LoadInst *Orig = Builder.CreateLoad(Val->getType(), Ptr);
Value *Equal = Builder.CreateICmpEQ(Orig, Cmp);
Value *Res = Builder.CreateSelect(Equal, Val, Orig);
Builder.CreateStore(Res, Ptr);
@ -44,7 +44,7 @@ static bool LowerAtomicRMWInst(AtomicRMWInst *RMWI) {
Value *Ptr = RMWI->getPointerOperand();
Value *Val = RMWI->getValOperand();
LoadInst *Orig = Builder.CreateLoad(Ptr);
LoadInst *Orig = Builder.CreateLoad(Val->getType(), Ptr);
Value *Res = nullptr;
switch (RMWI->getOperation()) {

View File

@ -1636,7 +1636,7 @@ makeStatepointExplicit(DominatorTree &DT, CallSite CS,
// for sanity checking.
static void
insertRelocationStores(iterator_range<Value::user_iterator> GCRelocs,
DenseMap<Value *, Value *> &AllocaMap,
DenseMap<Value *, AllocaInst *> &AllocaMap,
DenseSet<Value *> &VisitedLiveValues) {
for (User *U : GCRelocs) {
GCRelocateInst *Relocate = dyn_cast<GCRelocateInst>(U);
@ -1671,7 +1671,7 @@ insertRelocationStores(iterator_range<Value::user_iterator> GCRelocs,
// "insertRelocationStores" but works for rematerialized values.
static void insertRematerializationStores(
const RematerializedValueMapTy &RematerializedValues,
DenseMap<Value *, Value *> &AllocaMap,
DenseMap<Value *, AllocaInst *> &AllocaMap,
DenseSet<Value *> &VisitedLiveValues) {
for (auto RematerializedValuePair: RematerializedValues) {
Instruction *RematerializedValue = RematerializedValuePair.first;
@ -1704,7 +1704,7 @@ static void relocationViaAlloca(
#endif
// TODO-PERF: change data structures, reserve
DenseMap<Value *, Value *> AllocaMap;
DenseMap<Value *, AllocaInst *> AllocaMap;
SmallVector<AllocaInst *, 200> PromotableAllocas;
// Used later to chack that we have enough allocas to store all values
std::size_t NumRematerializedValues = 0;
@ -1774,7 +1774,7 @@ static void relocationViaAlloca(
SmallVector<AllocaInst *, 64> ToClobber;
for (auto Pair : AllocaMap) {
Value *Def = Pair.first;
AllocaInst *Alloca = cast<AllocaInst>(Pair.second);
AllocaInst *Alloca = Pair.second;
// This value was relocated
if (VisitedLiveValues.count(Def)) {
@ -1806,7 +1806,7 @@ static void relocationViaAlloca(
// Update use with load allocas and add store for gc_relocated.
for (auto Pair : AllocaMap) {
Value *Def = Pair.first;
Value *Alloca = Pair.second;
AllocaInst *Alloca = Pair.second;
// We pre-record the uses of allocas so that we dont have to worry about
// later update that changes the user information..
@ -1834,13 +1834,15 @@ static void relocationViaAlloca(
PHINode *Phi = cast<PHINode>(Use);
for (unsigned i = 0; i < Phi->getNumIncomingValues(); i++) {
if (Def == Phi->getIncomingValue(i)) {
LoadInst *Load = new LoadInst(
Alloca, "", Phi->getIncomingBlock(i)->getTerminator());
LoadInst *Load =
new LoadInst(Alloca->getAllocatedType(), Alloca, "",
Phi->getIncomingBlock(i)->getTerminator());
Phi->setIncomingValue(i, Load);
}
}
} else {
LoadInst *Load = new LoadInst(Alloca, "", Use);
LoadInst *Load =
new LoadInst(Alloca->getAllocatedType(), Alloca, "", Use);
Use->replaceUsesOfWith(Def, Load);
}
}

View File

@ -1231,15 +1231,14 @@ static bool isSafePHIToSpeculate(PHINode &PN) {
static void speculatePHINodeLoads(PHINode &PN) {
LLVM_DEBUG(dbgs() << " original: " << PN << "\n");
Type *LoadTy = cast<PointerType>(PN.getType())->getElementType();
LoadInst *SomeLoad = cast<LoadInst>(PN.user_back());
Type *LoadTy = SomeLoad->getType();
IRBuilderTy PHIBuilder(&PN);
PHINode *NewPN = PHIBuilder.CreatePHI(LoadTy, PN.getNumIncomingValues(),
PN.getName() + ".sroa.speculated");
// Get the AA tags and alignment to use from one of the loads. It doesn't
// matter which one we get and if any differ.
LoadInst *SomeLoad = cast<LoadInst>(PN.user_back());
AAMDNodes AATags;
SomeLoad->getAAMetadata(AATags);
unsigned Align = SomeLoad->getAlignment();
@ -1270,7 +1269,8 @@ static void speculatePHINodeLoads(PHINode &PN) {
IRBuilderTy PredBuilder(TI);
LoadInst *Load = PredBuilder.CreateLoad(
InVal, (PN.getName() + ".sroa.speculate.load." + Pred->getName()));
LoadTy, InVal,
(PN.getName() + ".sroa.speculate.load." + Pred->getName()));
++NumLoadsSpeculated;
Load->setAlignment(Align);
if (AATags)
@ -1330,10 +1330,10 @@ static void speculateSelectInstLoads(SelectInst &SI) {
assert(LI->isSimple() && "We only speculate simple loads");
IRB.SetInsertPoint(LI);
LoadInst *TL =
IRB.CreateLoad(TV, LI->getName() + ".sroa.speculate.load.true");
LoadInst *FL =
IRB.CreateLoad(FV, LI->getName() + ".sroa.speculate.load.false");
LoadInst *TL = IRB.CreateLoad(LI->getType(), TV,
LI->getName() + ".sroa.speculate.load.true");
LoadInst *FL = IRB.CreateLoad(LI->getType(), FV,
LI->getName() + ".sroa.speculate.load.false");
NumLoadsSpeculated += 2;
// Transfer alignment and AA info if present.
@ -2410,14 +2410,16 @@ private:
unsigned EndIndex = getIndex(NewEndOffset);
assert(EndIndex > BeginIndex && "Empty vector!");
Value *V = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), "load");
Value *V = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI,
NewAI.getAlignment(), "load");
return extractVector(IRB, V, BeginIndex, EndIndex, "vec");
}
Value *rewriteIntegerLoad(LoadInst &LI) {
assert(IntTy && "We cannot insert an integer to the alloca");
assert(!LI.isVolatile());
Value *V = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), "load");
Value *V = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI,
NewAI.getAlignment(), "load");
V = convertValue(DL, IRB, V, IntTy);
assert(NewBeginOffset >= NewAllocaBeginOffset && "Out of bounds offset");
uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset;
@ -2461,7 +2463,8 @@ private:
(canConvertValue(DL, NewAllocaTy, TargetTy) ||
(IsLoadPastEnd && NewAllocaTy->isIntegerTy() &&
TargetTy->isIntegerTy()))) {
LoadInst *NewLI = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(),
LoadInst *NewLI = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI,
NewAI.getAlignment(),
LI.isVolatile(), LI.getName());
if (AATags)
NewLI->setAAMetadata(AATags);
@ -2497,9 +2500,9 @@ private:
}
} else {
Type *LTy = TargetTy->getPointerTo(AS);
LoadInst *NewLI = IRB.CreateAlignedLoad(getNewAllocaSlicePtr(IRB, LTy),
getSliceAlign(TargetTy),
LI.isVolatile(), LI.getName());
LoadInst *NewLI = IRB.CreateAlignedLoad(
TargetTy, getNewAllocaSlicePtr(IRB, LTy), getSliceAlign(TargetTy),
LI.isVolatile(), LI.getName());
if (AATags)
NewLI->setAAMetadata(AATags);
if (LI.isVolatile())
@ -2525,8 +2528,8 @@ private:
// basis for the new value. This allows us to replace the uses of LI with
// the computed value, and then replace the placeholder with LI, leaving
// LI only used for this computation.
Value *Placeholder =
new LoadInst(UndefValue::get(LI.getType()->getPointerTo(AS)));
Value *Placeholder = new LoadInst(
LI.getType(), UndefValue::get(LI.getType()->getPointerTo(AS)));
V = insertInteger(DL, IRB, Placeholder, V, NewBeginOffset - BeginOffset,
"insert");
LI.replaceAllUsesWith(V);
@ -2557,7 +2560,8 @@ private:
V = convertValue(DL, IRB, V, SliceTy);
// Mix in the existing elements.
Value *Old = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), "load");
Value *Old = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI,
NewAI.getAlignment(), "load");
V = insertVector(IRB, Old, V, BeginIndex, "vec");
}
StoreInst *Store = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlignment());
@ -2573,8 +2577,8 @@ private:
assert(IntTy && "We cannot extract an integer from the alloca");
assert(!SI.isVolatile());
if (DL.getTypeSizeInBits(V->getType()) != IntTy->getBitWidth()) {
Value *Old =
IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), "oldload");
Value *Old = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI,
NewAI.getAlignment(), "oldload");
Old = convertValue(DL, IRB, Old, IntTy);
assert(BeginOffset >= NewAllocaBeginOffset && "Out of bounds offset");
uint64_t Offset = BeginOffset - NewAllocaBeginOffset;
@ -2766,8 +2770,8 @@ private:
if (NumElements > 1)
Splat = getVectorSplat(Splat, NumElements);
Value *Old =
IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), "oldload");
Value *Old = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI,
NewAI.getAlignment(), "oldload");
V = insertVector(IRB, Old, Splat, BeginIndex, "vec");
} else if (IntTy) {
// If this is a memset on an alloca where we can widen stores, insert the
@ -2779,8 +2783,8 @@ private:
if (IntTy && (BeginOffset != NewAllocaBeginOffset ||
EndOffset != NewAllocaBeginOffset)) {
Value *Old =
IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), "oldload");
Value *Old = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI,
NewAI.getAlignment(), "oldload");
Old = convertValue(DL, IRB, Old, IntTy);
uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset;
V = insertInteger(DL, IRB, Old, V, Offset, "insert");
@ -2940,18 +2944,18 @@ private:
// Reset the other pointer type to match the register type we're going to
// use, but using the address space of the original other pointer.
Type *OtherTy;
if (VecTy && !IsWholeAlloca) {
if (NumElements == 1)
OtherPtrTy = VecTy->getElementType();
OtherTy = VecTy->getElementType();
else
OtherPtrTy = VectorType::get(VecTy->getElementType(), NumElements);
OtherPtrTy = OtherPtrTy->getPointerTo(OtherAS);
OtherTy = VectorType::get(VecTy->getElementType(), NumElements);
} else if (IntTy && !IsWholeAlloca) {
OtherPtrTy = SubIntTy->getPointerTo(OtherAS);
OtherTy = SubIntTy;
} else {
OtherPtrTy = NewAllocaTy->getPointerTo(OtherAS);
OtherTy = NewAllocaTy;
}
OtherPtrTy = OtherTy->getPointerTo(OtherAS);
Value *SrcPtr = getAdjustedPtr(IRB, DL, OtherPtr, OtherOffset, OtherPtrTy,
OtherPtr->getName() + ".");
@ -2965,28 +2969,30 @@ private:
Value *Src;
if (VecTy && !IsWholeAlloca && !IsDest) {
Src = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), "load");
Src = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI,
NewAI.getAlignment(), "load");
Src = extractVector(IRB, Src, BeginIndex, EndIndex, "vec");
} else if (IntTy && !IsWholeAlloca && !IsDest) {
Src = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), "load");
Src = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI,
NewAI.getAlignment(), "load");
Src = convertValue(DL, IRB, Src, IntTy);
uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset;
Src = extractInteger(DL, IRB, Src, SubIntTy, Offset, "extract");
} else {
LoadInst *Load = IRB.CreateAlignedLoad(SrcPtr, SrcAlign, II.isVolatile(),
"copyload");
LoadInst *Load = IRB.CreateAlignedLoad(OtherTy, SrcPtr, SrcAlign,
II.isVolatile(), "copyload");
if (AATags)
Load->setAAMetadata(AATags);
Src = Load;
}
if (VecTy && !IsWholeAlloca && IsDest) {
Value *Old =
IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), "oldload");
Value *Old = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI,
NewAI.getAlignment(), "oldload");
Src = insertVector(IRB, Old, Src, BeginIndex, "vec");
} else if (IntTy && !IsWholeAlloca && IsDest) {
Value *Old =
IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), "oldload");
Value *Old = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI,
NewAI.getAlignment(), "oldload");
Old = convertValue(DL, IRB, Old, IntTy);
uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset;
Src = insertInteger(DL, IRB, Old, Src, Offset, "insert");
@ -3293,7 +3299,7 @@ private:
// Load the single value and insert it using the indices.
Value *GEP =
IRB.CreateInBoundsGEP(nullptr, Ptr, GEPIndices, Name + ".gep");
LoadInst *Load = IRB.CreateAlignedLoad(GEP, Align, Name + ".load");
LoadInst *Load = IRB.CreateAlignedLoad(Ty, GEP, Align, Name + ".load");
if (AATags)
Load->setAAMetadata(AATags);
Agg = IRB.CreateInsertValue(Agg, Load, Indices, Name + ".insert");
@ -3787,6 +3793,7 @@ bool SROA::presplitLoadsAndStores(AllocaInst &AI, AllocaSlices &AS) {
auto AS = LI->getPointerAddressSpace();
auto *PartPtrTy = PartTy->getPointerTo(AS);
LoadInst *PLoad = IRB.CreateAlignedLoad(
PartTy,
getAdjustedPtr(IRB, DL, BasePtr,
APInt(DL.getIndexSizeInBits(AS), PartOffset),
PartPtrTy, BasePtr->getName() + "."),
@ -3928,6 +3935,7 @@ bool SROA::presplitLoadsAndStores(AllocaInst &AI, AllocaSlices &AS) {
IRB.SetInsertPoint(LI);
auto AS = LI->getPointerAddressSpace();
PLoad = IRB.CreateAlignedLoad(
PartTy,
getAdjustedPtr(IRB, DL, LoadBasePtr,
APInt(DL.getIndexSizeInBits(AS), PartOffset),
LoadPartPtrTy, LoadBasePtr->getName() + "."),

View File

@ -743,7 +743,8 @@ bool ScalarizerVisitor::visitLoadInst(LoadInst &LI) {
Res.resize(NumElems);
for (unsigned I = 0; I < NumElems; ++I)
Res[I] = Builder.CreateAlignedLoad(Ptr[I], Layout.getElemAlign(I),
Res[I] = Builder.CreateAlignedLoad(Layout.VecTy->getElementType(), Ptr[I],
Layout.getElemAlign(I),
LI.getName() + ".i" + Twine(I));
gather(&LI, Res);
return true;

View File

@ -844,7 +844,8 @@ Function *CodeExtractor::constructFunction(const ValueSet &inputs,
Instruction *TI = newFunction->begin()->getTerminator();
GetElementPtrInst *GEP = GetElementPtrInst::Create(
StructTy, &*AI, Idx, "gep_" + inputs[i]->getName(), TI);
RewriteVal = new LoadInst(GEP, "loadgep_" + inputs[i]->getName(), TI);
RewriteVal = new LoadInst(StructTy->getElementType(i), GEP,
"loadgep_" + inputs[i]->getName(), TI);
} else
RewriteVal = &*AI++;
@ -1054,7 +1055,8 @@ CallInst *CodeExtractor::emitCallAndSwitchStatement(Function *newFunction,
} else {
Output = ReloadOutputs[i];
}
LoadInst *load = new LoadInst(Output, outputs[i]->getName()+".reload");
LoadInst *load = new LoadInst(outputs[i]->getType(), Output,
outputs[i]->getName() + ".reload");
Reloads.push_back(load);
codeReplacer->getInstList().push_back(load);
std::vector<User *> Users(outputs[i]->user_begin(), outputs[i]->user_end());

View File

@ -72,7 +72,8 @@ AllocaInst *llvm::DemoteRegToStack(Instruction &I, bool VolatileLoads,
Value *&V = Loads[PN->getIncomingBlock(i)];
if (!V) {
// Insert the load into the predecessor block
V = new LoadInst(Slot, I.getName()+".reload", VolatileLoads,
V = new LoadInst(I.getType(), Slot, I.getName() + ".reload",
VolatileLoads,
PN->getIncomingBlock(i)->getTerminator());
}
PN->setIncomingValue(i, V);
@ -80,7 +81,8 @@ AllocaInst *llvm::DemoteRegToStack(Instruction &I, bool VolatileLoads,
} else {
// If this is a normal instruction, just insert a load.
Value *V = new LoadInst(Slot, I.getName()+".reload", VolatileLoads, U);
Value *V = new LoadInst(I.getType(), Slot, I.getName() + ".reload",
VolatileLoads, U);
U->replaceUsesOfWith(&I, V);
}
}
@ -141,7 +143,8 @@ AllocaInst *llvm::DemotePHIToStack(PHINode *P, Instruction *AllocaPoint) {
for (; isa<PHINode>(InsertPt) || InsertPt->isEHPad(); ++InsertPt)
/* empty */; // Don't insert before PHI nodes or landingpad instrs.
Value *V = new LoadInst(Slot, P->getName() + ".reload", &*InsertPt);
Value *V =
new LoadInst(P->getType(), Slot, P->getName() + ".reload", &*InsertPt);
P->replaceAllUsesWith(V);
// Delete PHI.

View File

@ -72,7 +72,7 @@ void llvm::createMemCpyLoopKnownSize(Instruction *InsertBefore, Value *SrcAddr,
// Loop Body
Value *SrcGEP =
LoopBuilder.CreateInBoundsGEP(LoopOpType, SrcAddr, LoopIndex);
Value *Load = LoopBuilder.CreateLoad(SrcGEP, SrcIsVolatile);
Value *Load = LoopBuilder.CreateLoad(LoopOpType, SrcGEP, SrcIsVolatile);
Value *DstGEP =
LoopBuilder.CreateInBoundsGEP(LoopOpType, DstAddr, LoopIndex);
LoopBuilder.CreateStore(Load, DstGEP, DstIsVolatile);
@ -114,7 +114,7 @@ void llvm::createMemCpyLoopKnownSize(Instruction *InsertBefore, Value *SrcAddr,
: RBuilder.CreateBitCast(SrcAddr, SrcPtrType);
Value *SrcGEP = RBuilder.CreateInBoundsGEP(
OpTy, CastedSrc, ConstantInt::get(TypeOfCopyLen, GepIndex));
Value *Load = RBuilder.CreateLoad(SrcGEP, SrcIsVolatile);
Value *Load = RBuilder.CreateLoad(OpTy, SrcGEP, SrcIsVolatile);
// Cast destination to operand type and store.
PointerType *DstPtrType = PointerType::get(OpTy, DstAS);
@ -181,7 +181,7 @@ void llvm::createMemCpyLoopUnknownSize(Instruction *InsertBefore,
LoopIndex->addIncoming(ConstantInt::get(CopyLenType, 0U), PreLoopBB);
Value *SrcGEP = LoopBuilder.CreateInBoundsGEP(LoopOpType, SrcAddr, LoopIndex);
Value *Load = LoopBuilder.CreateLoad(SrcGEP, SrcIsVolatile);
Value *Load = LoopBuilder.CreateLoad(LoopOpType, SrcGEP, SrcIsVolatile);
Value *DstGEP = LoopBuilder.CreateInBoundsGEP(LoopOpType, DstAddr, LoopIndex);
LoopBuilder.CreateStore(Load, DstGEP, DstIsVolatile);
@ -234,7 +234,7 @@ void llvm::createMemCpyLoopUnknownSize(Instruction *InsertBefore,
Value *FullOffset = ResBuilder.CreateAdd(RuntimeBytesCopied, ResidualIndex);
Value *SrcGEP =
ResBuilder.CreateInBoundsGEP(Int8Type, SrcAsInt8, FullOffset);
Value *Load = ResBuilder.CreateLoad(SrcGEP, SrcIsVolatile);
Value *Load = ResBuilder.CreateLoad(Int8Type, SrcGEP, SrcIsVolatile);
Value *DstGEP =
ResBuilder.CreateInBoundsGEP(Int8Type, DstAsInt8, FullOffset);
ResBuilder.CreateStore(Load, DstGEP, DstIsVolatile);
@ -292,6 +292,8 @@ static void createMemMoveLoop(Instruction *InsertBefore,
BasicBlock *OrigBB = InsertBefore->getParent();
Function *F = OrigBB->getParent();
Type *EltTy = cast<PointerType>(SrcAddr->getType())->getElementType();
// Create the a comparison of src and dst, based on which we jump to either
// the forward-copy part of the function (if src >= dst) or the backwards-copy
// part (if src < dst).
@ -330,7 +332,7 @@ static void createMemMoveLoop(Instruction *InsertBefore,
Value *IndexPtr = LoopBuilder.CreateSub(
LoopPhi, ConstantInt::get(TypeOfCopyLen, 1), "index_ptr");
Value *Element = LoopBuilder.CreateLoad(
LoopBuilder.CreateInBoundsGEP(SrcAddr, IndexPtr), "element");
EltTy, LoopBuilder.CreateInBoundsGEP(SrcAddr, IndexPtr), "element");
LoopBuilder.CreateStore(Element,
LoopBuilder.CreateInBoundsGEP(DstAddr, IndexPtr));
LoopBuilder.CreateCondBr(
@ -347,7 +349,7 @@ static void createMemMoveLoop(Instruction *InsertBefore,
IRBuilder<> FwdLoopBuilder(FwdLoopBB);
PHINode *FwdCopyPhi = FwdLoopBuilder.CreatePHI(TypeOfCopyLen, 0, "index_ptr");
Value *FwdElement = FwdLoopBuilder.CreateLoad(
FwdLoopBuilder.CreateInBoundsGEP(SrcAddr, FwdCopyPhi), "element");
EltTy, FwdLoopBuilder.CreateInBoundsGEP(SrcAddr, FwdCopyPhi), "element");
FwdLoopBuilder.CreateStore(
FwdElement, FwdLoopBuilder.CreateInBoundsGEP(DstAddr, FwdCopyPhi));
Value *FwdIndexPtr = FwdLoopBuilder.CreateAdd(

View File

@ -5090,7 +5090,9 @@ Value *SwitchLookupTable::BuildLookup(Value *Index, IRBuilder<> &Builder) {
Value *GEPIndices[] = {Builder.getInt32(0), Index};
Value *GEP = Builder.CreateInBoundsGEP(Array->getValueType(), Array,
GEPIndices, "switch.gep");
return Builder.CreateLoad(GEP, "switch.load");
return Builder.CreateLoad(
cast<ArrayType>(Array->getValueType())->getElementType(), GEP,
"switch.load");
}
}
llvm_unreachable("Unknown lookup table kind!");

View File

@ -333,11 +333,12 @@ Value *LibCallSimplifier::optimizeStrCmp(CallInst *CI, IRBuilder<> &B) {
return ConstantInt::get(CI->getType(), Str1.compare(Str2));
if (HasStr1 && Str1.empty()) // strcmp("", x) -> -*x
return B.CreateNeg(
B.CreateZExt(B.CreateLoad(Str2P, "strcmpload"), CI->getType()));
return B.CreateNeg(B.CreateZExt(
B.CreateLoad(B.getInt8Ty(), Str2P, "strcmpload"), CI->getType()));
if (HasStr2 && Str2.empty()) // strcmp(x,"") -> *x
return B.CreateZExt(B.CreateLoad(Str1P, "strcmpload"), CI->getType());
return B.CreateZExt(B.CreateLoad(B.getInt8Ty(), Str1P, "strcmpload"),
CI->getType());
// strcmp(P, "x") -> memcmp(P, "x", 2)
uint64_t Len1 = GetStringLength(Str1P);
@ -397,11 +398,12 @@ Value *LibCallSimplifier::optimizeStrNCmp(CallInst *CI, IRBuilder<> &B) {
}
if (HasStr1 && Str1.empty()) // strncmp("", x, n) -> -*x
return B.CreateNeg(
B.CreateZExt(B.CreateLoad(Str2P, "strcmpload"), CI->getType()));
return B.CreateNeg(B.CreateZExt(
B.CreateLoad(B.getInt8Ty(), Str2P, "strcmpload"), CI->getType()));
if (HasStr2 && Str2.empty()) // strncmp(x, "", n) -> *x
return B.CreateZExt(B.CreateLoad(Str1P, "strcmpload"), CI->getType());
return B.CreateZExt(B.CreateLoad(B.getInt8Ty(), Str1P, "strcmpload"),
CI->getType());
uint64_t Len1 = GetStringLength(Str1P);
uint64_t Len2 = GetStringLength(Str2P);
@ -590,7 +592,8 @@ Value *LibCallSimplifier::optimizeStringLength(CallInst *CI, IRBuilder<> &B,
// strlen(x) != 0 --> *x != 0
// strlen(x) == 0 --> *x == 0
if (isOnlyUsedInZeroEqualityComparison(CI))
return B.CreateZExt(B.CreateLoad(Src, "strlenfirst"), CI->getType());
return B.CreateZExt(B.CreateLoad(B.getIntNTy(CharSize), Src, "strlenfirst"),
CI->getType());
return nullptr;
}
@ -844,10 +847,12 @@ Value *LibCallSimplifier::optimizeMemCmp(CallInst *CI, IRBuilder<> &B) {
// memcmp(S1,S2,1) -> *(unsigned char*)LHS - *(unsigned char*)RHS
if (Len == 1) {
Value *LHSV = B.CreateZExt(B.CreateLoad(castToCStr(LHS, B), "lhsc"),
CI->getType(), "lhsv");
Value *RHSV = B.CreateZExt(B.CreateLoad(castToCStr(RHS, B), "rhsc"),
CI->getType(), "rhsv");
Value *LHSV =
B.CreateZExt(B.CreateLoad(B.getInt8Ty(), castToCStr(LHS, B), "lhsc"),
CI->getType(), "lhsv");
Value *RHSV =
B.CreateZExt(B.CreateLoad(B.getInt8Ty(), castToCStr(RHS, B), "rhsc"),
CI->getType(), "rhsv");
return B.CreateSub(LHSV, RHSV, "chardiff");
}
@ -877,12 +882,12 @@ Value *LibCallSimplifier::optimizeMemCmp(CallInst *CI, IRBuilder<> &B) {
if (!LHSV) {
Type *LHSPtrTy =
IntType->getPointerTo(LHS->getType()->getPointerAddressSpace());
LHSV = B.CreateLoad(B.CreateBitCast(LHS, LHSPtrTy), "lhsv");
LHSV = B.CreateLoad(IntType, B.CreateBitCast(LHS, LHSPtrTy), "lhsv");
}
if (!RHSV) {
Type *RHSPtrTy =
IntType->getPointerTo(RHS->getType()->getPointerAddressSpace());
RHSV = B.CreateLoad(B.CreateBitCast(RHS, RHSPtrTy), "rhsv");
RHSV = B.CreateLoad(IntType, B.CreateBitCast(RHS, RHSPtrTy), "rhsv");
}
return B.CreateZExt(B.CreateICmpNE(LHSV, RHSV), CI->getType(), "memcmp");
}
@ -2286,7 +2291,8 @@ Value *LibCallSimplifier::optimizeFWrite(CallInst *CI, IRBuilder<> &B) {
// If this is writing one byte, turn it into fputc.
// This optimisation is only valid, if the return value is unused.
if (Bytes == 1 && CI->use_empty()) { // fwrite(S,1,1,F) -> fputc(S[0],F)
Value *Char = B.CreateLoad(castToCStr(CI->getArgOperand(0), B), "char");
Value *Char = B.CreateLoad(B.getInt8Ty(),
castToCStr(CI->getArgOperand(0), B), "char");
Value *NewCI = emitFPutC(Char, CI->getArgOperand(3), B, TLI);
return NewCI ? ConstantInt::get(CI->getType(), 1) : nullptr;
}

View File

@ -386,12 +386,12 @@ Value *getLoadValueForLoad(LoadInst *SrcVal, unsigned Offset, Type *LoadTy,
// memdep queries will find the new load. We can't easily remove the old
// load completely because it is already in the value numbering table.
IRBuilder<> Builder(SrcVal->getParent(), ++BasicBlock::iterator(SrcVal));
Type *DestPTy = IntegerType::get(LoadTy->getContext(), NewLoadSize * 8);
DestPTy =
PointerType::get(DestPTy, PtrVal->getType()->getPointerAddressSpace());
Type *DestTy = IntegerType::get(LoadTy->getContext(), NewLoadSize * 8);
Type *DestPTy =
PointerType::get(DestTy, PtrVal->getType()->getPointerAddressSpace());
Builder.SetCurrentDebugLocation(SrcVal->getDebugLoc());
PtrVal = Builder.CreateBitCast(PtrVal, DestPTy);
LoadInst *NewLoad = Builder.CreateLoad(PtrVal);
LoadInst *NewLoad = Builder.CreateLoad(DestTy, PtrVal);
NewLoad->takeName(SrcVal);
NewLoad->setAlignment(SrcVal->getAlignment());

View File

@ -1181,7 +1181,7 @@ bool Vectorizer::vectorizeLoadChain(
Value *Bitcast =
Builder.CreateBitCast(L0->getPointerOperand(), VecTy->getPointerTo(AS));
LoadInst *LI = Builder.CreateAlignedLoad(Bitcast, Alignment);
LoadInst *LI = Builder.CreateAlignedLoad(VecTy, Bitcast, Alignment);
propagateMetadata(LI, Chain);
if (VecLoadTy) {

View File

@ -2100,8 +2100,8 @@ void InnerLoopVectorizer::vectorizeInterleaveGroup(Instruction *Instr,
GroupMask, UndefVec, "wide.masked.vec");
}
else
NewLoad = Builder.CreateAlignedLoad(NewPtrs[Part],
Group->getAlignment(), "wide.vec");
NewLoad = Builder.CreateAlignedLoad(VecTy, NewPtrs[Part],
Group->getAlignment(), "wide.vec");
Group->addMetadata(NewLoad);
NewLoads.push_back(NewLoad);
}
@ -2312,7 +2312,8 @@ void InnerLoopVectorizer::vectorizeMemoryInstruction(Instruction *Instr,
UndefValue::get(DataTy),
"wide.masked.load");
else
NewLI = Builder.CreateAlignedLoad(VecPtr, Alignment, "wide.load");
NewLI =
Builder.CreateAlignedLoad(DataTy, VecPtr, Alignment, "wide.load");
// Add metadata to the load, but setVectorValue to the reverse shuffle.
addMetadata(NewLI, LI);

View File

@ -3136,7 +3136,7 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E) {
Builder.SetInsertPoint(LI);
PointerType *PtrTy = PointerType::get(VecTy, LI->getPointerAddressSpace());
Value *Ptr = Builder.CreateBitCast(LI->getOperand(0), PtrTy);
LoadInst *V = Builder.CreateAlignedLoad(Ptr, LI->getAlignment());
LoadInst *V = Builder.CreateAlignedLoad(VecTy, Ptr, LI->getAlignment());
Value *NewV = propagateMetadata(V, E->Scalars);
if (!E->ReorderIndices.empty()) {
OrdersType Mask;
@ -3341,7 +3341,7 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E) {
ExternalUses.push_back(ExternalUser(PO, cast<User>(VecPtr), 0));
unsigned Alignment = LI->getAlignment();
LI = Builder.CreateLoad(VecPtr);
LI = Builder.CreateLoad(VecTy, VecPtr);
if (!Alignment) {
Alignment = DL->getABITypeAlignment(ScalarLoadTy);
}

View File

@ -306,20 +306,18 @@ define amdgpu_kernel void @kern_v3i32(<3 x i32> %arg0) {
; HSA-LABEL: @kern_v3i32(
; HSA-NEXT: [[KERN_V3I32_KERNARG_SEGMENT:%.*]] = call nonnull align 16 dereferenceable(16) i8 addrspace(4)* @llvm.amdgcn.kernarg.segment.ptr()
; HSA-NEXT: [[ARG0_KERNARG_OFFSET:%.*]] = getelementptr inbounds i8, i8 addrspace(4)* [[KERN_V3I32_KERNARG_SEGMENT]], i64 0
; HSA-NEXT: [[ARG0_KERNARG_OFFSET_CAST:%.*]] = bitcast i8 addrspace(4)* [[ARG0_KERNARG_OFFSET]] to <3 x i32> addrspace(4)*
; HSA-NEXT: [[TMP1:%.*]] = bitcast <3 x i32> addrspace(4)* [[ARG0_KERNARG_OFFSET_CAST]] to <4 x i32> addrspace(4)*
; HSA-NEXT: [[TMP2:%.*]] = load <4 x i32>, <4 x i32> addrspace(4)* [[TMP1]], align 16, !invariant.load !0
; HSA-NEXT: [[ARG0_LOAD:%.*]] = shufflevector <4 x i32> [[TMP2]], <4 x i32> undef, <3 x i32> <i32 0, i32 1, i32 2>
; HSA-NEXT: [[ARG0_KERNARG_OFFSET_CAST:%.*]] = bitcast i8 addrspace(4)* [[ARG0_KERNARG_OFFSET]] to <4 x i32> addrspace(4)*
; HSA-NEXT: [[TMP:%.*]] = load <4 x i32>, <4 x i32> addrspace(4)* [[ARG0_KERNARG_OFFSET_CAST]], align 16, !invariant.load !0
; HSA-NEXT: [[ARG0_LOAD:%.*]] = shufflevector <4 x i32> [[TMP]], <4 x i32> undef, <3 x i32> <i32 0, i32 1, i32 2>
; HSA-NEXT: store <3 x i32> [[ARG0_LOAD]], <3 x i32> addrspace(1)* undef, align 4
; HSA-NEXT: ret void
;
; MESA-LABEL: @kern_v3i32(
; MESA-NEXT: [[KERN_V3I32_KERNARG_SEGMENT:%.*]] = call nonnull align 16 dereferenceable(52) i8 addrspace(4)* @llvm.amdgcn.kernarg.segment.ptr()
; MESA-NEXT: [[ARG0_KERNARG_OFFSET:%.*]] = getelementptr inbounds i8, i8 addrspace(4)* [[KERN_V3I32_KERNARG_SEGMENT]], i64 36
; MESA-NEXT: [[ARG0_KERNARG_OFFSET_CAST:%.*]] = bitcast i8 addrspace(4)* [[ARG0_KERNARG_OFFSET]] to <3 x i32> addrspace(4)*
; MESA-NEXT: [[TMP1:%.*]] = bitcast <3 x i32> addrspace(4)* [[ARG0_KERNARG_OFFSET_CAST]] to <4 x i32> addrspace(4)*
; MESA-NEXT: [[TMP2:%.*]] = load <4 x i32>, <4 x i32> addrspace(4)* [[TMP1]], align 4, !invariant.load !0
; MESA-NEXT: [[ARG0_LOAD:%.*]] = shufflevector <4 x i32> [[TMP2]], <4 x i32> undef, <3 x i32> <i32 0, i32 1, i32 2>
; MESA-NEXT: [[ARG0_KERNARG_OFFSET_CAST:%.*]] = bitcast i8 addrspace(4)* [[ARG0_KERNARG_OFFSET]] to <4 x i32> addrspace(4)*
; MESA-NEXT: [[TMP:%.*]] = load <4 x i32>, <4 x i32> addrspace(4)* [[ARG0_KERNARG_OFFSET_CAST]], align 4, !invariant.load !0
; MESA-NEXT: [[ARG0_LOAD:%.*]] = shufflevector <4 x i32> [[TMP]], <4 x i32> undef, <3 x i32> <i32 0, i32 1, i32 2>
; MESA-NEXT: store <3 x i32> [[ARG0_LOAD]], <3 x i32> addrspace(1)* undef, align 4
; MESA-NEXT: ret void
;
@ -397,10 +395,9 @@ define amdgpu_kernel void @kern_i32_v3i32(i32 %arg0, <3 x i32> %arg1) {
; HSA-NEXT: [[ARG0_KERNARG_OFFSET_CAST:%.*]] = bitcast i8 addrspace(4)* [[ARG0_KERNARG_OFFSET]] to i32 addrspace(4)*
; HSA-NEXT: [[ARG0_LOAD:%.*]] = load i32, i32 addrspace(4)* [[ARG0_KERNARG_OFFSET_CAST]], align 16, !invariant.load !0
; HSA-NEXT: [[ARG1_KERNARG_OFFSET:%.*]] = getelementptr inbounds i8, i8 addrspace(4)* [[KERN_I32_V3I32_KERNARG_SEGMENT]], i64 16
; HSA-NEXT: [[ARG1_KERNARG_OFFSET_CAST:%.*]] = bitcast i8 addrspace(4)* [[ARG1_KERNARG_OFFSET]] to <3 x i32> addrspace(4)*
; HSA-NEXT: [[TMP1:%.*]] = bitcast <3 x i32> addrspace(4)* [[ARG1_KERNARG_OFFSET_CAST]] to <4 x i32> addrspace(4)*
; HSA-NEXT: [[TMP2:%.*]] = load <4 x i32>, <4 x i32> addrspace(4)* [[TMP1]], align 16, !invariant.load !0
; HSA-NEXT: [[ARG1_LOAD:%.*]] = shufflevector <4 x i32> [[TMP2]], <4 x i32> undef, <3 x i32> <i32 0, i32 1, i32 2>
; HSA-NEXT: [[ARG1_KERNARG_OFFSET_CAST:%.*]] = bitcast i8 addrspace(4)* [[ARG1_KERNARG_OFFSET]] to <4 x i32> addrspace(4)*
; HSA-NEXT: [[TMP:%.*]] = load <4 x i32>, <4 x i32> addrspace(4)* [[ARG1_KERNARG_OFFSET_CAST]], align 16, !invariant.load !0
; HSA-NEXT: [[ARG1_LOAD:%.*]] = shufflevector <4 x i32> [[TMP]], <4 x i32> undef, <3 x i32> <i32 0, i32 1, i32 2>
; HSA-NEXT: store i32 [[ARG0_LOAD]], i32 addrspace(1)* undef
; HSA-NEXT: store <3 x i32> [[ARG1_LOAD]], <3 x i32> addrspace(1)* undef, align 4
; HSA-NEXT: ret void
@ -411,10 +408,9 @@ define amdgpu_kernel void @kern_i32_v3i32(i32 %arg0, <3 x i32> %arg1) {
; MESA-NEXT: [[ARG0_KERNARG_OFFSET_CAST:%.*]] = bitcast i8 addrspace(4)* [[ARG0_KERNARG_OFFSET]] to i32 addrspace(4)*
; MESA-NEXT: [[ARG0_LOAD:%.*]] = load i32, i32 addrspace(4)* [[ARG0_KERNARG_OFFSET_CAST]], align 4, !invariant.load !0
; MESA-NEXT: [[ARG1_KERNARG_OFFSET:%.*]] = getelementptr inbounds i8, i8 addrspace(4)* [[KERN_I32_V3I32_KERNARG_SEGMENT]], i64 52
; MESA-NEXT: [[ARG1_KERNARG_OFFSET_CAST:%.*]] = bitcast i8 addrspace(4)* [[ARG1_KERNARG_OFFSET]] to <3 x i32> addrspace(4)*
; MESA-NEXT: [[TMP1:%.*]] = bitcast <3 x i32> addrspace(4)* [[ARG1_KERNARG_OFFSET_CAST]] to <4 x i32> addrspace(4)*
; MESA-NEXT: [[TMP2:%.*]] = load <4 x i32>, <4 x i32> addrspace(4)* [[TMP1]], align 4, !invariant.load !0
; MESA-NEXT: [[ARG1_LOAD:%.*]] = shufflevector <4 x i32> [[TMP2]], <4 x i32> undef, <3 x i32> <i32 0, i32 1, i32 2>
; MESA-NEXT: [[ARG1_KERNARG_OFFSET_CAST:%.*]] = bitcast i8 addrspace(4)* [[ARG1_KERNARG_OFFSET]] to <4 x i32> addrspace(4)*
; MESA-NEXT: [[TMP:%.*]] = load <4 x i32>, <4 x i32> addrspace(4)* [[ARG1_KERNARG_OFFSET_CAST]], align 4, !invariant.load !0
; MESA-NEXT: [[ARG1_LOAD:%.*]] = shufflevector <4 x i32> [[TMP]], <4 x i32> undef, <3 x i32> <i32 0, i32 1, i32 2>
; MESA-NEXT: store i32 [[ARG0_LOAD]], i32 addrspace(1)* undef
; MESA-NEXT: store <3 x i32> [[ARG1_LOAD]], <3 x i32> addrspace(1)* undef, align 4
; MESA-NEXT: ret void

View File

@ -879,7 +879,8 @@ CleanupAndPrepareModules(BugDriver &BD, std::unique_ptr<Module> Test,
BasicBlock::Create(F->getContext(), "lookupfp", FuncWrapper);
// Check to see if we already looked up the value.
Value *CachedVal = new LoadInst(Cache, "fpcache", EntryBB);
Value *CachedVal =
new LoadInst(F->getType(), Cache, "fpcache", EntryBB);
Value *IsNull = new ICmpInst(*EntryBB, ICmpInst::ICMP_EQ, CachedVal,
NullPtr, "isNull");
BranchInst::Create(LookupBB, DoCallBB, IsNull, EntryBB);

View File

@ -174,7 +174,7 @@ TEST_F(AliasAnalysisTest, getModRefInfo) {
auto *Addr = ConstantPointerNull::get(PtrType);
auto *Store1 = new StoreInst(Value, Addr, BB);
auto *Load1 = new LoadInst(Addr, "load", BB);
auto *Load1 = new LoadInst(IntType, Addr, "load", BB);
auto *Add1 = BinaryOperator::CreateAdd(Value, Value, "add", BB);
auto *VAArg1 = new VAArgInst(Addr, PtrType, "vaarg", BB);
auto *CmpXChg1 = new AtomicCmpXchgInst(

View File

@ -92,7 +92,7 @@ TEST_F(MemorySSATest, CreateALoad) {
MemorySSAUpdater Updater(&MSSA);
// Add the load
B.SetInsertPoint(Merge);
LoadInst *LoadInst = B.CreateLoad(PointerArg);
LoadInst *LoadInst = B.CreateLoad(B.getInt8Ty(), PointerArg);
// MemoryPHI should already exist.
MemoryPhi *MP = MSSA.getMemoryAccess(Merge);
@ -138,7 +138,7 @@ TEST_F(MemorySSATest, CreateLoadsAndStoreUpdater) {
// Add the load
B.SetInsertPoint(Merge, Merge->begin());
LoadInst *FirstLoad = B.CreateLoad(PointerArg);
LoadInst *FirstLoad = B.CreateLoad(B.getInt8Ty(), PointerArg);
// MemoryPHI should not already exist.
MemoryPhi *MP = MSSA.getMemoryAccess(Merge);
@ -162,7 +162,7 @@ TEST_F(MemorySSATest, CreateLoadsAndStoreUpdater) {
// We don't touch existing loads, so we need to create a new one to get a phi
// Add the second load
B.SetInsertPoint(Merge, Merge->begin());
LoadInst *SecondLoad = B.CreateLoad(PointerArg);
LoadInst *SecondLoad = B.CreateLoad(B.getInt8Ty(), PointerArg);
// MemoryPHI should not already exist.
MP = MSSA.getMemoryAccess(Merge);
@ -228,7 +228,7 @@ TEST_F(MemorySSATest, CreateALoadUpdater) {
// Add the load
B.SetInsertPoint(Merge, Merge->begin());
LoadInst *LoadInst = B.CreateLoad(PointerArg);
LoadInst *LoadInst = B.CreateLoad(B.getInt8Ty(), PointerArg);
// MemoryPHI should not already exist.
MemoryPhi *MP = MSSA.getMemoryAccess(Merge);
@ -262,7 +262,7 @@ TEST_F(MemorySSATest, SinkLoad) {
// Load in left block
B.SetInsertPoint(Left, Left->begin());
LoadInst *LoadInst1 = B.CreateLoad(PointerArg);
LoadInst *LoadInst1 = B.CreateLoad(B.getInt8Ty(), PointerArg);
// Store in merge block
B.SetInsertPoint(Merge, Merge->begin());
B.CreateStore(B.getInt8(16), PointerArg);
@ -310,7 +310,7 @@ TEST_F(MemorySSATest, MoveAStore) {
BranchInst::Create(Merge, Left);
BranchInst::Create(Merge, Right);
B.SetInsertPoint(Merge);
B.CreateLoad(PointerArg);
B.CreateLoad(B.getInt8Ty(), PointerArg);
setupAnalyses();
MemorySSA &MSSA = *Analyses->MSSA;
MemorySSAUpdater Updater(&MSSA);
@ -346,7 +346,7 @@ TEST_F(MemorySSATest, MoveAStoreUpdater) {
BranchInst::Create(Merge, Left);
BranchInst::Create(Merge, Right);
B.SetInsertPoint(Merge);
auto *MergeLoad = B.CreateLoad(PointerArg);
auto *MergeLoad = B.CreateLoad(B.getInt8Ty(), PointerArg);
setupAnalyses();
MemorySSA &MSSA = *Analyses->MSSA;
MemorySSAUpdater Updater(&MSSA);
@ -392,7 +392,7 @@ TEST_F(MemorySSATest, MoveAStoreUpdaterMove) {
BranchInst::Create(Merge, Left);
BranchInst::Create(Merge, Right);
B.SetInsertPoint(Merge);
auto *MergeLoad = B.CreateLoad(PointerArg);
auto *MergeLoad = B.CreateLoad(B.getInt8Ty(), PointerArg);
setupAnalyses();
MemorySSA &MSSA = *Analyses->MSSA;
MemorySSAUpdater Updater(&MSSA);
@ -436,7 +436,7 @@ TEST_F(MemorySSATest, MoveAStoreAllAround) {
BranchInst::Create(Merge, Left);
BranchInst::Create(Merge, Right);
B.SetInsertPoint(Merge);
auto *MergeLoad = B.CreateLoad(PointerArg);
auto *MergeLoad = B.CreateLoad(B.getInt8Ty(), PointerArg);
setupAnalyses();
MemorySSA &MSSA = *Analyses->MSSA;
MemorySSAUpdater Updater(&MSSA);
@ -490,7 +490,7 @@ TEST_F(MemorySSATest, RemoveAPhi) {
BranchInst::Create(Merge, Left);
BranchInst::Create(Merge, Right);
B.SetInsertPoint(Merge);
LoadInst *LoadInst = B.CreateLoad(PointerArg);
LoadInst *LoadInst = B.CreateLoad(B.getInt8Ty(), PointerArg);
setupAnalyses();
MemorySSA &MSSA = *Analyses->MSSA;
@ -535,7 +535,7 @@ TEST_F(MemorySSATest, RemoveMemoryAccess) {
BranchInst::Create(Merge, Left);
BranchInst::Create(Merge, Right);
B.SetInsertPoint(Merge);
LoadInst *LoadInst = B.CreateLoad(PointerArg);
LoadInst *LoadInst = B.CreateLoad(B.getInt8Ty(), PointerArg);
setupAnalyses();
MemorySSA &MSSA = *Analyses->MSSA;
@ -631,7 +631,7 @@ TEST_F(MemorySSATest, TestStoreAndLoad) {
Type *Int8 = Type::getInt8Ty(C);
Value *Alloca = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "A");
Instruction *SI = B.CreateStore(ConstantInt::get(Int8, 0), Alloca);
Instruction *LI = B.CreateLoad(Alloca);
Instruction *LI = B.CreateLoad(Int8, Alloca);
setupAnalyses();
MemorySSA &MSSA = *Analyses->MSSA;
@ -715,12 +715,12 @@ TEST_F(MemorySSATest, PartialWalkerCacheWithPhis) {
B.SetInsertPoint(IfThen);
Instruction *FirstStore = B.CreateStore(Zero, AllocA);
B.CreateStore(Zero, AllocB);
Instruction *ALoad0 = B.CreateLoad(AllocA, "");
Instruction *ALoad0 = B.CreateLoad(Int8, AllocA, "");
Instruction *BStore = B.CreateStore(Zero, AllocB);
// Due to use optimization/etc. we make a store to A, which is removed after
// we build MSSA. This helps keep the test case simple-ish.
Instruction *KillStore = B.CreateStore(Zero, AllocA);
Instruction *ALoad = B.CreateLoad(AllocA, "");
Instruction *ALoad = B.CreateLoad(Int8, AllocA, "");
B.CreateBr(IfEnd);
B.SetInsertPoint(IfEnd);
@ -771,7 +771,7 @@ TEST_F(MemorySSATest, WalkerInvariantLoadOpt) {
Value *AllocA = B.CreateAlloca(Int8, One, "");
Instruction *Store = B.CreateStore(One, AllocA);
Instruction *Load = B.CreateLoad(AllocA);
Instruction *Load = B.CreateLoad(Int8, AllocA);
setupAnalyses();
MemorySSA &MSSA = *Analyses->MSSA;
@ -800,7 +800,7 @@ TEST_F(MemorySSATest, WalkerReopt) {
Instruction *SIA = B.CreateStore(ConstantInt::get(Int8, 0), AllocaA);
Value *AllocaB = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "B");
Instruction *SIB = B.CreateStore(ConstantInt::get(Int8, 0), AllocaB);
Instruction *LIA = B.CreateLoad(AllocaA);
Instruction *LIA = B.CreateLoad(Int8, AllocaA);
setupAnalyses();
MemorySSA &MSSA = *Analyses->MSSA;
@ -834,11 +834,11 @@ TEST_F(MemorySSATest, MoveAboveMemoryDef) {
StoreInst *StoreA0 = B.CreateStore(ConstantInt::get(Int8, 0), A);
StoreInst *StoreB = B.CreateStore(ConstantInt::get(Int8, 0), B_);
LoadInst *LoadB = B.CreateLoad(B_);
LoadInst *LoadB = B.CreateLoad(Int8, B_);
StoreInst *StoreA1 = B.CreateStore(ConstantInt::get(Int8, 4), A);
StoreInst *StoreC = B.CreateStore(ConstantInt::get(Int8, 4), C);
StoreInst *StoreA2 = B.CreateStore(ConstantInt::get(Int8, 4), A);
LoadInst *LoadC = B.CreateLoad(C);
LoadInst *LoadC = B.CreateLoad(Int8, C);
setupAnalyses();
MemorySSA &MSSA = *Analyses->MSSA;
@ -902,7 +902,7 @@ TEST_F(MemorySSATest, Irreducible) {
MemorySSA &MSSA = *Analyses->MSSA;
MemorySSAUpdater Updater(&MSSA);
// Create the load memory acccess
LoadInst *LoadInst = B.CreateLoad(FirstArg);
LoadInst *LoadInst = B.CreateLoad(B.getInt8Ty(), FirstArg);
MemoryUse *LoadAccess = cast<MemoryUse>(Updater.createMemoryAccessInBB(
LoadInst, nullptr, AfterLoopBB, MemorySSA::Beginning));
Updater.insertUse(LoadAccess);
@ -1010,15 +1010,15 @@ TEST_F(MemorySSATest, TestLoadMustAlias) {
B.CreateStore(ConstantInt::get(Int8, 1), AllocaB);
// Check load from LOE
LoadInst *LA1 = B.CreateLoad(AllocaA, "");
LoadInst *LA1 = B.CreateLoad(Int8, AllocaA, "");
// Check load alias cached for second load
LoadInst *LA2 = B.CreateLoad(AllocaA, "");
LoadInst *LA2 = B.CreateLoad(Int8, AllocaA, "");
B.CreateStore(ConstantInt::get(Int8, 1), AllocaA);
// Check load from store/def
LoadInst *LA3 = B.CreateLoad(AllocaA, "");
LoadInst *LA3 = B.CreateLoad(Int8, AllocaA, "");
// Check load alias cached for second load
LoadInst *LA4 = B.CreateLoad(AllocaA, "");
LoadInst *LA4 = B.CreateLoad(Int8, AllocaA, "");
setupAnalyses();
MemorySSA &MSSA = *Analyses->MSSA;
@ -1103,13 +1103,13 @@ TEST_F(MemorySSATest, TestLoadMayAlias) {
Argument *PointerA = &*ArgIt;
Argument *PointerB = &*(++ArgIt);
B.CreateStore(ConstantInt::get(Int8, 1), PointerB);
LoadInst *LA1 = B.CreateLoad(PointerA, "");
LoadInst *LA1 = B.CreateLoad(Int8, PointerA, "");
B.CreateStore(ConstantInt::get(Int8, 0), PointerA);
LoadInst *LB1 = B.CreateLoad(PointerB, "");
LoadInst *LB1 = B.CreateLoad(Int8, PointerB, "");
B.CreateStore(ConstantInt::get(Int8, 0), PointerA);
LoadInst *LA2 = B.CreateLoad(PointerA, "");
LoadInst *LA2 = B.CreateLoad(Int8, PointerA, "");
B.CreateStore(ConstantInt::get(Int8, 0), PointerB);
LoadInst *LB2 = B.CreateLoad(PointerB, "");
LoadInst *LB2 = B.CreateLoad(Int8, PointerB, "");
setupAnalyses();
MemorySSA &MSSA = *Analyses->MSSA;

View File

@ -44,13 +44,13 @@ TEST(OrderedInstructionsTest, DominanceTest) {
BasicBlock *BBX = BasicBlock::Create(Ctx, "bbx", F);
B.SetInsertPoint(BBX);
Argument *PointerArg = &*F->arg_begin();
LoadInst *LoadInstX = B.CreateLoad(PointerArg);
LoadInst *LoadInstY = B.CreateLoad(PointerArg);
LoadInst *LoadInstX = B.CreateLoad(B.getInt8Ty(), PointerArg);
LoadInst *LoadInstY = B.CreateLoad(B.getInt8Ty(), PointerArg);
// Create BBY with 1 load.
BasicBlock *BBY = BasicBlock::Create(Ctx, "bby", F);
B.SetInsertPoint(BBY);
LoadInst *LoadInstZ = B.CreateLoad(PointerArg);
LoadInst *LoadInstZ = B.CreateLoad(B.getInt8Ty(), PointerArg);
B.CreateRet(LoadInstZ);
std::unique_ptr<DominatorTree> DT(new DominatorTree(*F));
OrderedInstructions OI(&*DT);

View File

@ -37,10 +37,10 @@ TEST(PhiValuesTest, SimplePhi) {
BranchInst::Create(Then, If);
BranchInst::Create(Then, Else);
Value *Val1 = new LoadInst(UndefValue::get(I32PtrTy), "val1", Entry);
Value *Val2 = new LoadInst(UndefValue::get(I32PtrTy), "val2", Entry);
Value *Val3 = new LoadInst(UndefValue::get(I32PtrTy), "val3", Entry);
Value *Val4 = new LoadInst(UndefValue::get(I32PtrTy), "val4", Entry);
Value *Val1 = new LoadInst(I32Ty, UndefValue::get(I32PtrTy), "val1", Entry);
Value *Val2 = new LoadInst(I32Ty, UndefValue::get(I32PtrTy), "val2", Entry);
Value *Val3 = new LoadInst(I32Ty, UndefValue::get(I32PtrTy), "val3", Entry);
Value *Val4 = new LoadInst(I32Ty, UndefValue::get(I32PtrTy), "val4", Entry);
PHINode *Phi1 = PHINode::Create(I32Ty, 2, "phi1", Then);
Phi1->addIncoming(Val1, If);
@ -110,10 +110,10 @@ TEST(PhiValuesTest, DependentPhi) {
BranchInst::Create(End, If2);
BranchInst::Create(End, Else2);
Value *Val1 = new LoadInst(UndefValue::get(I32PtrTy), "val1", Entry);
Value *Val2 = new LoadInst(UndefValue::get(I32PtrTy), "val2", Entry);
Value *Val3 = new LoadInst(UndefValue::get(I32PtrTy), "val3", Entry);
Value *Val4 = new LoadInst(UndefValue::get(I32PtrTy), "val4", Entry);
Value *Val1 = new LoadInst(I32Ty, UndefValue::get(I32PtrTy), "val1", Entry);
Value *Val2 = new LoadInst(I32Ty, UndefValue::get(I32PtrTy), "val2", Entry);
Value *Val3 = new LoadInst(I32Ty, UndefValue::get(I32PtrTy), "val3", Entry);
Value *Val4 = new LoadInst(I32Ty, UndefValue::get(I32PtrTy), "val4", Entry);
PHINode *Phi1 = PHINode::Create(I32Ty, 2, "phi1", Then);
Phi1->addIncoming(Val1, If1);

View File

@ -407,9 +407,11 @@ TEST_F(ScalarEvolutionsTest, CompareValueComplexity) {
const int ValueDepth = 10;
for (int i = 0; i < ValueDepth; i++) {
X = new LoadInst(new IntToPtrInst(X, IntPtrPtrTy, "", EntryBB), "",
X = new LoadInst(IntPtrTy, new IntToPtrInst(X, IntPtrPtrTy, "", EntryBB),
"",
/*isVolatile*/ false, EntryBB);
Y = new LoadInst(new IntToPtrInst(Y, IntPtrPtrTy, "", EntryBB), "",
Y = new LoadInst(IntPtrTy, new IntToPtrInst(Y, IntPtrPtrTy, "", EntryBB),
"",
/*isVolatile*/ false, EntryBB);
}

View File

@ -381,7 +381,7 @@ TEST_F(SparsePropagationTest, FunctionDefined) {
BasicBlock *Else = BasicBlock::Create(Context, "else", F);
F->arg_begin()->setName("cond");
Builder.SetInsertPoint(If);
LoadInst *Cond = Builder.CreateLoad(F->arg_begin());
LoadInst *Cond = Builder.CreateLoad(Type::getInt1Ty(Context), F->arg_begin());
Builder.CreateCondBr(Cond, Then, Else);
Builder.SetInsertPoint(Then);
Builder.CreateRet(Builder.getInt64(1));
@ -421,7 +421,7 @@ TEST_F(SparsePropagationTest, FunctionOverDefined) {
BasicBlock *Else = BasicBlock::Create(Context, "else", F);
F->arg_begin()->setName("cond");
Builder.SetInsertPoint(If);
LoadInst *Cond = Builder.CreateLoad(F->arg_begin());
LoadInst *Cond = Builder.CreateLoad(Type::getInt1Ty(Context), F->arg_begin());
Builder.CreateCondBr(Cond, Then, Else);
Builder.SetInsertPoint(Then);
Builder.CreateRet(Builder.getInt64(0));

View File

@ -211,10 +211,10 @@ TEST_F(MCJITMultipleModuleTest, two_module_global_variables_case) {
GVB = insertGlobalInt32(B.get(), "GVB", initialNum);
FA = startFunction(A.get(),
FunctionType::get(Builder.getInt32Ty(), {}, false), "FA");
endFunctionWithRet(FA, Builder.CreateLoad(GVA));
endFunctionWithRet(FA, Builder.CreateLoad(Builder.getInt32Ty(), GVA));
FB = startFunction(B.get(),
FunctionType::get(Builder.getInt32Ty(), {}, false), "FB");
endFunctionWithRet(FB, Builder.CreateLoad(GVB));
endFunctionWithRet(FB, Builder.CreateLoad(Builder.getInt32Ty(), GVB));
GVC = insertGlobalInt32(B.get(), "GVC", initialNum);
GVC->setLinkage(GlobalValue::InternalLinkage);

View File

@ -101,7 +101,7 @@ TEST_F(MCJITTest, return_global) {
Function *ReturnGlobal =
startFunction(M.get(), FunctionType::get(Builder.getInt32Ty(), {}, false),
"ReturnGlobal");
Value *ReadGlobal = Builder.CreateLoad(GV);
Value *ReadGlobal = Builder.CreateLoad(Builder.getInt32Ty(), GV);
endFunctionWithRet(ReturnGlobal, ReadGlobal);
createJIT(std::move(M));

View File

@ -54,7 +54,7 @@ TEST_F(IRBuilderTest, Intrinsics) {
CallInst *Call;
IntrinsicInst *II;
V = Builder.CreateLoad(GV);
V = Builder.CreateLoad(GV->getValueType(), GV);
I = cast<Instruction>(Builder.CreateFAdd(V, V));
I->setHasNoInfs(true);
I->setHasNoNaNs(false);
@ -207,7 +207,7 @@ TEST_F(IRBuilderTest, FastMathFlags) {
Value *F, *FC;
Instruction *FDiv, *FAdd, *FCmp, *FCall;
F = Builder.CreateLoad(GV);
F = Builder.CreateLoad(GV->getValueType(), GV);
F = Builder.CreateFAdd(F, F);
EXPECT_FALSE(Builder.getFastMathFlags().any());
@ -394,7 +394,7 @@ TEST_F(IRBuilderTest, WrapFlags) {
// Test instructions.
GlobalVariable *G = new GlobalVariable(*M, Builder.getInt32Ty(), true,
GlobalValue::ExternalLinkage, nullptr);
Value *V = Builder.CreateLoad(G);
Value *V = Builder.CreateLoad(G->getValueType(), G);
EXPECT_TRUE(
cast<BinaryOperator>(Builder.CreateNSWAdd(V, V))->hasNoSignedWrap());
EXPECT_TRUE(
@ -461,7 +461,7 @@ TEST_F(IRBuilderTest, RAIIHelpersTest) {
EXPECT_FALSE(Builder.getFastMathFlags().allowReciprocal());
EXPECT_EQ(FPMathA, Builder.getDefaultFPMathTag());
Value *F = Builder.CreateLoad(GV);
Value *F = Builder.CreateLoad(GV->getValueType(), GV);
{
IRBuilder<>::InsertPointGuard Guard(Builder);

View File

@ -397,7 +397,7 @@ TEST_F(PatternMatchTest, LoadStoreOps) {
// store i32 42, i32* %0
Value *Alloca = IRB.CreateAlloca(IRB.getInt32Ty());
Value *LoadInst = IRB.CreateLoad(Alloca);
Value *LoadInst = IRB.CreateLoad(IRB.getInt32Ty(), Alloca);
Value *FourtyTwo = IRB.getInt32(42);
Value *StoreInst = IRB.CreateStore(FourtyTwo, Alloca);
Value *MatchLoad, *MatchStoreVal, *MatchStorePointer;

View File

@ -83,7 +83,7 @@ TEST_F(LinkModuleTest, BlockAddress) {
GEPIndices.push_back(&*F->arg_begin());
Value *GEP = Builder.CreateGEP(AT, GV, GEPIndices, "switch.gep");
Value *Load = Builder.CreateLoad(GEP, "switch.load");
Value *Load = Builder.CreateLoad(AT->getElementType(), GEP, "switch.load");
Builder.CreateRet(Load);

View File

@ -908,7 +908,8 @@ TEST_F(LoopPassManagerTest, LoopChildInsertion) {
ASSERT_THAT(BBI, F.end());
auto CreateCondBr = [&](BasicBlock *TrueBB, BasicBlock *FalseBB,
const char *Name, BasicBlock *BB) {
auto *Cond = new LoadInst(&Ptr, Name, /*isVolatile*/ true, BB);
auto *Cond = new LoadInst(Type::getInt1Ty(Context), &Ptr, Name,
/*isVolatile*/ true, BB);
BranchInst::Create(TrueBB, FalseBB, Cond, BB);
};
@ -1110,7 +1111,8 @@ TEST_F(LoopPassManagerTest, LoopPeerInsertion) {
ASSERT_THAT(BBI, F.end());
auto CreateCondBr = [&](BasicBlock *TrueBB, BasicBlock *FalseBB,
const char *Name, BasicBlock *BB) {
auto *Cond = new LoadInst(&Ptr, Name, /*isVolatile*/ true, BB);
auto *Cond = new LoadInst(Type::getInt1Ty(Context), &Ptr, Name,
/*isVolatile*/ true, BB);
BranchInst::Create(TrueBB, FalseBB, Cond, BB);
};
@ -1503,8 +1505,9 @@ TEST_F(LoopPassManagerTest, LoopDeletion) {
auto *NewLoop03BB =
BasicBlock::Create(Context, "loop.0.3", &F, &Loop0LatchBB);
BranchInst::Create(NewLoop03BB, NewLoop03PHBB);
auto *Cond = new LoadInst(&Ptr, "cond.0.3", /*isVolatile*/ true,
NewLoop03BB);
auto *Cond =
new LoadInst(Type::getInt1Ty(Context), &Ptr, "cond.0.3",
/*isVolatile*/ true, NewLoop03BB);
BranchInst::Create(&Loop0LatchBB, NewLoop03BB, Cond, NewLoop03BB);
Loop02PHBB.getTerminator()->replaceUsesOfWith(&Loop0LatchBB,
NewLoop03PHBB);

View File

@ -31,7 +31,7 @@ struct TestFunction {
BB = BasicBlock::Create(Ctx, "", F);
B.SetInsertPoint(BB);
Argument *PointerArg = &*F->arg_begin();
LoadInst *LoadInst = B.CreateLoad(PointerArg);
LoadInst *LoadInst = B.CreateLoad(T, PointerArg);
C = B.getInt8(addVal);
I = cast<Instruction>(B.CreateAdd(LoadInst, C));
B.CreateRet(I);