[globalisel] Rename G_GEP to G_PTR_ADD

Summary:
G_GEP is rather poorly named. It's a simple pointer+scalar addition and
doesn't support any of the complexities of getelementptr. I therefore
propose that we rename it. There's a G_PTR_MASK so let's follow that
convention and go with G_PTR_ADD

Reviewers: volkan, aditya_nandakumar, bogner, rovka, arsenm

Subscribers: sdardis, jvesely, wdng, nhaehnle, hiraditya, jrtc27, atanasyan, arphaman, Petar.Avramovic, llvm-commits

Tags: #llvm

Differential Revision: https://reviews.llvm.org/D69734
This commit is contained in:
Daniel Sanders 2019-11-01 13:18:00 -07:00
parent de56a89072
commit e74c5b9661
125 changed files with 7224 additions and 7222 deletions

View File

@ -134,7 +134,7 @@ public:
///
/// For example (pre-indexed):
///
/// $addr = G_GEP $base, $offset
/// $addr = G_PTR_ADD $base, $offset
/// [...]
/// $val = G_LOAD $addr
/// [...]
@ -150,7 +150,7 @@ public:
///
/// G_STORE $val, $base
/// [...]
/// $addr = G_GEP $base, $offset
/// $addr = G_PTR_ADD $base, $offset
/// [...]
/// $whatever = COPY $addr
///

View File

@ -487,7 +487,7 @@ protected:
bool isOperandImmEqual(const MachineOperand &MO, int64_t Value,
const MachineRegisterInfo &MRI) const;
/// Return true if the specified operand is a G_GEP with a G_CONSTANT on the
/// Return true if the specified operand is a G_PTR_ADD with a G_CONSTANT on the
/// right-hand side. GlobalISel's separation of pointer and integer types
/// means that we don't need to worry about G_OR with equivalent semantics.
bool isBaseWithConstantOffset(const MachineOperand &Root,

View File

@ -1178,7 +1178,7 @@ private:
/// {65, NarrowScalar} // bit sizes [65, +inf[
/// });
/// It may be that only 64-bit pointers are supported on your target:
/// setPointerAction(G_GEP, 0, LLT:pointer(1),
/// setPointerAction(G_PTR_ADD, 0, LLT:pointer(1),
/// {{1, Unsupported}, // bit sizes [ 1, 63[
/// {64, Legal}, // bit sizes [64, 65[
/// {65, Unsupported}, // bit sizes [65, +inf[

View File

@ -404,9 +404,9 @@ public:
/// \return a MachineInstrBuilder for the newly created instruction.
MachineInstrBuilder buildGlobalValue(const DstOp &Res, const GlobalValue *GV);
/// Build and insert \p Res = G_GEP \p Op0, \p Op1
/// Build and insert \p Res = G_PTR_ADD \p Op0, \p Op1
///
/// G_GEP adds \p Op1 bytes to the pointer specified by \p Op0,
/// G_PTR_ADD adds \p Op1 bytes to the pointer specified by \p Op0,
/// storing the resulting pointer in \p Res.
///
/// \pre setBasicBlock or setMI must have been called.
@ -415,28 +415,28 @@ public:
/// \pre \p Op1 must be a generic virtual register with scalar type.
///
/// \return a MachineInstrBuilder for the newly created instruction.
MachineInstrBuilder buildGEP(const DstOp &Res, const SrcOp &Op0,
const SrcOp &Op1);
MachineInstrBuilder buildPtrAdd(const DstOp &Res, const SrcOp &Op0,
const SrcOp &Op1);
/// Materialize and insert \p Res = G_GEP \p Op0, (G_CONSTANT \p Value)
/// Materialize and insert \p Res = G_PTR_ADD \p Op0, (G_CONSTANT \p Value)
///
/// G_GEP adds \p Value bytes to the pointer specified by \p Op0,
/// G_PTR_ADD adds \p Value bytes to the pointer specified by \p Op0,
/// storing the resulting pointer in \p Res. If \p Value is zero then no
/// G_GEP or G_CONSTANT will be created and \pre Op0 will be assigned to
/// G_PTR_ADD or G_CONSTANT will be created and \pre Op0 will be assigned to
/// \p Res.
///
/// \pre setBasicBlock or setMI must have been called.
/// \pre \p Op0 must be a generic virtual register with pointer type.
/// \pre \p ValueTy must be a scalar type.
/// \pre \p Res must be 0. This is to detect confusion between
/// materializeGEP() and buildGEP().
/// materializePtrAdd() and buildPtrAdd().
/// \post \p Res will either be a new generic virtual register of the same
/// type as \p Op0 or \p Op0 itself.
///
/// \return a MachineInstrBuilder for the newly created instruction.
Optional<MachineInstrBuilder> materializeGEP(Register &Res, Register Op0,
const LLT &ValueTy,
uint64_t Value);
Optional<MachineInstrBuilder> materializePtrAdd(Register &Res, Register Op0,
const LLT &ValueTy,
uint64_t Value);
/// Build and insert \p Res = G_PTR_MASK \p Op0, \p NumBits
///

View File

@ -524,7 +524,7 @@ HANDLE_TARGET_OPCODE(G_FMINIMUM)
HANDLE_TARGET_OPCODE(G_FMAXIMUM)
/// Generic pointer offset
HANDLE_TARGET_OPCODE(G_GEP)
HANDLE_TARGET_OPCODE(G_PTR_ADD)
/// Clear the specified number of low bits in a pointer. This rounds the value
/// *down* to the given alignment.

View File

@ -330,7 +330,7 @@ def G_SELECT : GenericInstruction {
}
// Generic pointer offset.
def G_GEP : GenericInstruction {
def G_PTR_ADD : GenericInstruction {
let OutOperandList = (outs type0:$dst);
let InOperandList = (ins type0:$src1, type1:$src2);
let hasSideEffects = 0;

View File

@ -52,7 +52,7 @@ bool CSEConfigFull::shouldCSEOpc(unsigned Opc) {
case TargetOpcode::G_ANYEXT:
case TargetOpcode::G_UNMERGE_VALUES:
case TargetOpcode::G_TRUNC:
case TargetOpcode::G_GEP:
case TargetOpcode::G_PTR_ADD:
return true;
}
return false;

View File

@ -571,7 +571,7 @@ bool CombinerHelper::findPostIndexCandidate(MachineInstr &MI, Register &Addr,
LLVM_DEBUG(dbgs() << "Searching for post-indexing opportunity for: " << MI);
for (auto &Use : MRI.use_instructions(Base)) {
if (Use.getOpcode() != TargetOpcode::G_GEP)
if (Use.getOpcode() != TargetOpcode::G_PTR_ADD)
continue;
Offset = Use.getOperand(2).getReg();
@ -597,8 +597,8 @@ bool CombinerHelper::findPostIndexCandidate(MachineInstr &MI, Register &Addr,
// forming an indexed one.
bool MemOpDominatesAddrUses = true;
for (auto &GEPUse : MRI.use_instructions(Use.getOperand(0).getReg())) {
if (!dominates(MI, GEPUse)) {
for (auto &PtrAddUse : MRI.use_instructions(Use.getOperand(0).getReg())) {
if (!dominates(MI, PtrAddUse)) {
MemOpDominatesAddrUses = false;
break;
}
@ -631,7 +631,7 @@ bool CombinerHelper::findPreIndexCandidate(MachineInstr &MI, Register &Addr,
#endif
Addr = MI.getOperand(1).getReg();
MachineInstr *AddrDef = getOpcodeDef(TargetOpcode::G_GEP, Addr, MRI);
MachineInstr *AddrDef = getOpcodeDef(TargetOpcode::G_PTR_ADD, Addr, MRI);
if (!AddrDef || MRI.hasOneUse(Addr))
return false;
@ -667,8 +667,8 @@ bool CombinerHelper::findPreIndexCandidate(MachineInstr &MI, Register &Addr,
}
}
// FIXME: check whether all uses of the base pointer are constant GEPs. That
// might allow us to end base's liveness here by adjusting the constant.
// FIXME: check whether all uses of the base pointer are constant PtrAdds.
// That might allow us to end base's liveness here by adjusting the constant.
for (auto &UseMI : MRI.use_instructions(Addr)) {
if (!dominates(MI, UseMI)) {
@ -1016,7 +1016,7 @@ bool CombinerHelper::optimizeMemset(MachineInstr &MI, Register Dst, Register Val
if (DstOff != 0) {
auto Offset =
MIB.buildConstant(LLT::scalar(PtrTy.getSizeInBits()), DstOff);
Ptr = MIB.buildGEP(PtrTy, Dst, Offset).getReg(0);
Ptr = MIB.buildPtrAdd(PtrTy, Dst, Offset).getReg(0);
}
MIB.buildStore(Value, Ptr, *StoreMMO);
@ -1121,13 +1121,13 @@ bool CombinerHelper::optimizeMemcpy(MachineInstr &MI, Register Dst,
if (CurrOffset != 0) {
Offset = MIB.buildConstant(LLT::scalar(PtrTy.getSizeInBits()), CurrOffset)
.getReg(0);
LoadPtr = MIB.buildGEP(PtrTy, Src, Offset).getReg(0);
LoadPtr = MIB.buildPtrAdd(PtrTy, Src, Offset).getReg(0);
}
auto LdVal = MIB.buildLoad(CopyTy, LoadPtr, *LoadMMO);
// Create the store.
Register StorePtr =
CurrOffset == 0 ? Dst : MIB.buildGEP(PtrTy, Dst, Offset).getReg(0);
CurrOffset == 0 ? Dst : MIB.buildPtrAdd(PtrTy, Dst, Offset).getReg(0);
MIB.buildStore(LdVal, StorePtr, *StoreMMO);
CurrOffset += CopyTy.getSizeInBytes();
Size -= CopyTy.getSizeInBytes();
@ -1218,7 +1218,7 @@ bool CombinerHelper::optimizeMemmove(MachineInstr &MI, Register Dst,
if (CurrOffset != 0) {
auto Offset =
MIB.buildConstant(LLT::scalar(PtrTy.getSizeInBits()), CurrOffset);
LoadPtr = MIB.buildGEP(PtrTy, Src, Offset).getReg(0);
LoadPtr = MIB.buildPtrAdd(PtrTy, Src, Offset).getReg(0);
}
LoadVals.push_back(MIB.buildLoad(CopyTy, LoadPtr, *LoadMMO).getReg(0));
CurrOffset += CopyTy.getSizeInBytes();
@ -1235,7 +1235,7 @@ bool CombinerHelper::optimizeMemmove(MachineInstr &MI, Register Dst,
if (CurrOffset != 0) {
auto Offset =
MIB.buildConstant(LLT::scalar(PtrTy.getSizeInBits()), CurrOffset);
StorePtr = MIB.buildGEP(PtrTy, Dst, Offset).getReg(0);
StorePtr = MIB.buildPtrAdd(PtrTy, Dst, Offset).getReg(0);
}
MIB.buildStore(LoadVals[I], StorePtr, *StoreMMO);
CurrOffset += CopyTy.getSizeInBytes();

View File

@ -179,8 +179,8 @@ void GISelKnownBits::computeKnownBitsImpl(Register R, KnownBits &Known,
Known.Zero = KnownZeroOut;
break;
}
case TargetOpcode::G_GEP: {
// G_GEP is like G_ADD. FIXME: Is this true for all targets?
case TargetOpcode::G_PTR_ADD: {
// G_PTR_ADD is like G_ADD. FIXME: Is this true for all targets?
LLT Ty = MRI.getType(MI.getOperand(1).getReg());
if (DL.isNonIntegralAddressSpace(Ty.getAddressSpace()))
break;

View File

@ -885,7 +885,7 @@ bool IRTranslator::translateLoad(const User &U, MachineIRBuilder &MIRBuilder) {
Regs.size() == 1 ? LI.getMetadata(LLVMContext::MD_range) : nullptr;
for (unsigned i = 0; i < Regs.size(); ++i) {
Register Addr;
MIRBuilder.materializeGEP(Addr, Base, OffsetTy, Offsets[i] / 8);
MIRBuilder.materializePtrAdd(Addr, Base, OffsetTy, Offsets[i] / 8);
MachinePointerInfo Ptr(LI.getPointerOperand(), Offsets[i] / 8);
unsigned BaseAlign = getMemOpAlignment(LI);
@ -926,7 +926,7 @@ bool IRTranslator::translateStore(const User &U, MachineIRBuilder &MIRBuilder) {
for (unsigned i = 0; i < Vals.size(); ++i) {
Register Addr;
MIRBuilder.materializeGEP(Addr, Base, OffsetTy, Offsets[i] / 8);
MIRBuilder.materializePtrAdd(Addr, Base, OffsetTy, Offsets[i] / 8);
MachinePointerInfo Ptr(SI.getPointerOperand(), Offsets[i] / 8);
unsigned BaseAlign = getMemOpAlignment(SI);
@ -1080,8 +1080,8 @@ bool IRTranslator::translateGetElementPtr(const User &U,
if (Offset != 0) {
LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL);
auto OffsetMIB = MIRBuilder.buildConstant({OffsetTy}, Offset);
BaseReg =
MIRBuilder.buildGEP(PtrTy, BaseReg, OffsetMIB.getReg(0)).getReg(0);
BaseReg = MIRBuilder.buildPtrAdd(PtrTy, BaseReg, OffsetMIB.getReg(0))
.getReg(0);
Offset = 0;
}
@ -1100,14 +1100,14 @@ bool IRTranslator::translateGetElementPtr(const User &U,
} else
GepOffsetReg = IdxReg;
BaseReg = MIRBuilder.buildGEP(PtrTy, BaseReg, GepOffsetReg).getReg(0);
BaseReg = MIRBuilder.buildPtrAdd(PtrTy, BaseReg, GepOffsetReg).getReg(0);
}
}
if (Offset != 0) {
auto OffsetMIB =
MIRBuilder.buildConstant(getLLTForType(*OffsetIRTy, *DL), Offset);
MIRBuilder.buildGEP(getOrCreateVReg(U), BaseReg, OffsetMIB.getReg(0));
MIRBuilder.buildPtrAdd(getOrCreateVReg(U), BaseReg, OffsetMIB.getReg(0));
return true;
}

View File

@ -60,7 +60,7 @@ bool InstructionSelector::isBaseWithConstantOffset(
return false;
MachineInstr *RootI = MRI.getVRegDef(Root.getReg());
if (RootI->getOpcode() != TargetOpcode::G_GEP)
if (RootI->getOpcode() != TargetOpcode::G_PTR_ADD)
return false;
MachineOperand &RHS = RootI->getOperand(2);

View File

@ -1748,8 +1748,8 @@ LegalizerHelper::widenScalar(MachineInstr &MI, unsigned TypeIdx, LLT WideTy) {
Observer.changedInstr(MI);
return Legalized;
case TargetOpcode::G_GEP:
assert(TypeIdx == 1 && "unable to legalize pointer of GEP");
case TargetOpcode::G_PTR_ADD:
assert(TypeIdx == 1 && "unable to legalize pointer of G_PTR_ADD");
Observer.changingInstr(MI);
widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_SEXT);
Observer.changedInstr(MI);
@ -2083,8 +2083,9 @@ LegalizerHelper::lower(MachineInstr &MI, unsigned TypeIdx, LLT Ty) {
auto OffsetCst =
MIRBuilder.buildConstant(LLT::scalar(64), LargeSplitSize / 8);
Register GEPReg = MRI.createGenericVirtualRegister(PtrTy);
auto SmallPtr = MIRBuilder.buildGEP(GEPReg, PtrReg, OffsetCst.getReg(0));
Register PtrAddReg = MRI.createGenericVirtualRegister(PtrTy);
auto SmallPtr =
MIRBuilder.buildPtrAdd(PtrAddReg, PtrReg, OffsetCst.getReg(0));
auto SmallLoad = MIRBuilder.buildLoad(SmallLdReg, SmallPtr.getReg(0),
*SmallMMO);
@ -2151,12 +2152,13 @@ LegalizerHelper::lower(MachineInstr &MI, unsigned TypeIdx, LLT Ty) {
auto ShiftAmt = MIRBuilder.buildConstant(ExtendTy, LargeSplitSize);
auto SmallVal = MIRBuilder.buildLShr(ExtendTy, ExtVal, ShiftAmt);
// Generate the GEP and truncating stores.
// Generate the PtrAdd and truncating stores.
LLT PtrTy = MRI.getType(PtrReg);
auto OffsetCst =
MIRBuilder.buildConstant(LLT::scalar(64), LargeSplitSize / 8);
Register GEPReg = MRI.createGenericVirtualRegister(PtrTy);
auto SmallPtr = MIRBuilder.buildGEP(GEPReg, PtrReg, OffsetCst.getReg(0));
Register PtrAddReg = MRI.createGenericVirtualRegister(PtrTy);
auto SmallPtr =
MIRBuilder.buildPtrAdd(PtrAddReg, PtrReg, OffsetCst.getReg(0));
MachineFunction &MF = MIRBuilder.getMF();
MachineMemOperand *LargeMMO =
@ -2908,7 +2910,7 @@ LegalizerHelper::reduceLoadStoreWidth(MachineInstr &MI, unsigned TypeIdx,
unsigned ByteOffset = Offset / 8;
Register NewAddrReg;
MIRBuilder.materializeGEP(NewAddrReg, AddrReg, OffsetTy, ByteOffset);
MIRBuilder.materializePtrAdd(NewAddrReg, AddrReg, OffsetTy, ByteOffset);
MachineMemOperand *NewMMO =
MF.getMachineMemOperand(MMO, ByteOffset, ByteSize);
@ -4176,7 +4178,7 @@ LegalizerHelper::lowerDynStackAlloc(MachineInstr &MI) {
// Subtract the final alloc from the SP. We use G_PTRTOINT here so we don't
// have to generate an extra instruction to negate the alloc and then use
// G_GEP to add the negative offset.
// G_PTR_ADD to add the negative offset.
auto Alloc = MIRBuilder.buildSub(IntPtrTy, SPTmp, AllocSize);
if (Align) {
APInt AlignMask(IntPtrTy.getSizeInBits(), Align, true);

View File

@ -219,19 +219,19 @@ void MachineIRBuilder::validateShiftOp(const LLT &Res, const LLT &Op0,
assert((Res == Op0) && "type mismatch");
}
MachineInstrBuilder MachineIRBuilder::buildGEP(const DstOp &Res,
const SrcOp &Op0,
const SrcOp &Op1) {
MachineInstrBuilder MachineIRBuilder::buildPtrAdd(const DstOp &Res,
const SrcOp &Op0,
const SrcOp &Op1) {
assert(Res.getLLTTy(*getMRI()).isPointer() &&
Res.getLLTTy(*getMRI()) == Op0.getLLTTy(*getMRI()) && "type mismatch");
assert(Op1.getLLTTy(*getMRI()).isScalar() && "invalid offset type");
return buildInstr(TargetOpcode::G_GEP, {Res}, {Op0, Op1});
return buildInstr(TargetOpcode::G_PTR_ADD, {Res}, {Op0, Op1});
}
Optional<MachineInstrBuilder>
MachineIRBuilder::materializeGEP(Register &Res, Register Op0,
const LLT &ValueTy, uint64_t Value) {
MachineIRBuilder::materializePtrAdd(Register &Res, Register Op0,
const LLT &ValueTy, uint64_t Value) {
assert(Res == 0 && "Res is a result argument");
assert(ValueTy.isScalar() && "invalid offset type");
@ -242,7 +242,7 @@ MachineIRBuilder::materializeGEP(Register &Res, Register Op0,
Res = getMRI()->createGenericVirtualRegister(getMRI()->getType(Op0));
auto Cst = buildConstant(ValueTy, Value);
return buildGEP(Res, Op0, Cst.getReg(0));
return buildPtrAdd(Res, Op0, Cst.getReg(0));
}
MachineInstrBuilder MachineIRBuilder::buildPtrMask(const DstOp &Res,

View File

@ -1100,7 +1100,7 @@ void MachineVerifier::verifyPreISelGenericInstruction(const MachineInstr *MI) {
break;
}
case TargetOpcode::G_GEP: {
case TargetOpcode::G_PTR_ADD: {
LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
LLT PtrTy = MRI->getType(MI->getOperand(1).getReg());
LLT OffsetTy = MRI->getType(MI->getOperand(2).getReg());

View File

@ -160,7 +160,7 @@ struct OutgoingArgHandler : public CallLowering::ValueHandler {
MIRBuilder.buildConstant(OffsetReg, Offset);
Register AddrReg = MRI.createGenericVirtualRegister(p0);
MIRBuilder.buildGEP(AddrReg, SPReg, OffsetReg);
MIRBuilder.buildPtrAdd(AddrReg, SPReg, OffsetReg);
MPO = MachinePointerInfo::getStack(MF, Offset);
return AddrReg;

View File

@ -462,7 +462,7 @@ static unsigned selectBinaryOp(unsigned GenericOpc, unsigned RegBankID,
}
} else if (OpSize == 64) {
switch (GenericOpc) {
case TargetOpcode::G_GEP:
case TargetOpcode::G_PTR_ADD:
return AArch64::ADDXrr;
case TargetOpcode::G_SHL:
return AArch64::LSLVXr;
@ -1765,7 +1765,7 @@ bool AArch64InstructionSelector::select(MachineInstr &I) {
auto *PtrMI = MRI.getVRegDef(PtrReg);
// Try to fold a GEP into our unsigned immediate addressing mode.
if (PtrMI->getOpcode() == TargetOpcode::G_GEP) {
if (PtrMI->getOpcode() == TargetOpcode::G_PTR_ADD) {
if (auto COff = getConstantVRegVal(PtrMI->getOperand(2).getReg(), MRI)) {
int64_t Imm = *COff;
const unsigned Size = MemSizeInBits / 8;
@ -1883,7 +1883,7 @@ bool AArch64InstructionSelector::select(MachineInstr &I) {
return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
}
case TargetOpcode::G_GEP: {
case TargetOpcode::G_PTR_ADD: {
MachineIRBuilder MIRBuilder(I);
emitADD(I.getOperand(0).getReg(), I.getOperand(1), I.getOperand(2),
MIRBuilder);
@ -4189,15 +4189,15 @@ AArch64InstructionSelector::selectAddrModeShiftedExtendXReg(
//
// val = G_CONSTANT LegalShiftVal
// shift = G_SHL off_reg val
// ptr = G_GEP base_reg shift
// ptr = G_PTR_ADD base_reg shift
// x = G_LOAD ptr
//
// And fold it into this addressing mode:
//
// ldr x, [base_reg, off_reg, lsl #LegalShiftVal]
// Check if we can find the G_GEP.
MachineInstr *Gep = getOpcodeDef(TargetOpcode::G_GEP, Root.getReg(), MRI);
// Check if we can find the G_PTR_ADD.
MachineInstr *Gep = getOpcodeDef(TargetOpcode::G_PTR_ADD, Root.getReg(), MRI);
if (!Gep || !isWorthFoldingIntoExtendedReg(*Gep, MRI))
return None;
@ -4275,7 +4275,7 @@ AArch64InstructionSelector::selectAddrModeShiftedExtendXReg(
///
/// Where x2 is the base register, and x3 is an offset register.
///
/// When possible (or profitable) to fold a G_GEP into the address calculation,
/// When possible (or profitable) to fold a G_PTR_ADD into the address calculation,
/// this will do so. Otherwise, it will return None.
InstructionSelector::ComplexRendererFns
AArch64InstructionSelector::selectAddrModeRegisterOffset(
@ -4284,7 +4284,7 @@ AArch64InstructionSelector::selectAddrModeRegisterOffset(
// We need a GEP.
MachineInstr *Gep = MRI.getVRegDef(Root.getReg());
if (!Gep || Gep->getOpcode() != TargetOpcode::G_GEP)
if (!Gep || Gep->getOpcode() != TargetOpcode::G_PTR_ADD)
return None;
// If this is used more than once, let's not bother folding.

View File

@ -104,7 +104,7 @@ AArch64LegalizerInfo::AArch64LegalizerInfo(const AArch64Subtarget &ST) {
.moreElementsToNextPow2(0)
.minScalarSameAs(1, 0);
getActionDefinitionsBuilder(G_GEP)
getActionDefinitionsBuilder(G_PTR_ADD)
.legalFor({{p0, s64}})
.clampScalar(1, s64, s64);
@ -743,7 +743,7 @@ bool AArch64LegalizerInfo::legalizeVaArg(MachineInstr &MI,
// Realign the list to the actual required alignment.
auto AlignMinus1 = MIRBuilder.buildConstant(IntPtrTy, Align - 1);
auto ListTmp = MIRBuilder.buildGEP(PtrTy, List, AlignMinus1.getReg(0));
auto ListTmp = MIRBuilder.buildPtrAdd(PtrTy, List, AlignMinus1.getReg(0));
DstPtr = MRI.createGenericVirtualRegister(PtrTy);
MIRBuilder.buildPtrMask(DstPtr, ListTmp, Log2_64(Align));
@ -758,7 +758,7 @@ bool AArch64LegalizerInfo::legalizeVaArg(MachineInstr &MI,
auto Size = MIRBuilder.buildConstant(IntPtrTy, alignTo(ValSize, PtrSize));
auto NewList = MIRBuilder.buildGEP(PtrTy, DstPtr, Size.getReg(0));
auto NewList = MIRBuilder.buildPtrAdd(PtrTy, DstPtr, Size.getReg(0));
MIRBuilder.buildStore(
NewList, ListPtr,

View File

@ -529,7 +529,7 @@ AArch64RegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
// Arithmetic ops.
case TargetOpcode::G_ADD:
case TargetOpcode::G_SUB:
case TargetOpcode::G_GEP:
case TargetOpcode::G_PTR_ADD:
case TargetOpcode::G_MUL:
case TargetOpcode::G_SDIV:
case TargetOpcode::G_UDIV:

View File

@ -356,7 +356,7 @@ Register AMDGPUCallLowering::lowerParameterPtr(MachineIRBuilder &B,
Register OffsetReg = MRI.createGenericVirtualRegister(LLT::scalar(64));
B.buildConstant(OffsetReg, Offset);
B.buildGEP(DstReg, KernArgSegmentVReg, OffsetReg);
B.buildPtrAdd(DstReg, KernArgSegmentVReg, OffsetReg);
return DstReg;
}

View File

@ -538,7 +538,7 @@ bool AMDGPUInstructionSelector::selectG_UNMERGE_VALUES(MachineInstr &MI) const {
return true;
}
bool AMDGPUInstructionSelector::selectG_GEP(MachineInstr &I) const {
bool AMDGPUInstructionSelector::selectG_PTR_ADD(MachineInstr &I) const {
return selectG_ADD_SUB(I);
}
@ -1478,7 +1478,7 @@ void AMDGPUInstructionSelector::getAddrModeInfo(const MachineInstr &Load,
assert(PtrMI);
if (PtrMI->getOpcode() != TargetOpcode::G_GEP)
if (PtrMI->getOpcode() != TargetOpcode::G_PTR_ADD)
return;
GEPInfo GEPInfo(*PtrMI);
@ -1710,8 +1710,8 @@ bool AMDGPUInstructionSelector::select(MachineInstr &I) {
return selectG_MERGE_VALUES(I);
case TargetOpcode::G_UNMERGE_VALUES:
return selectG_UNMERGE_VALUES(I);
case TargetOpcode::G_GEP:
return selectG_GEP(I);
case TargetOpcode::G_PTR_ADD:
return selectG_PTR_ADD(I);
case TargetOpcode::G_IMPLICIT_DEF:
return selectG_IMPLICIT_DEF(I);
case TargetOpcode::G_INSERT:
@ -1961,7 +1961,7 @@ AMDGPUInstructionSelector::selectFlatOffsetImpl(MachineOperand &Root) const {
return Default;
const MachineInstr *OpDef = MRI->getVRegDef(Root.getReg());
if (!OpDef || OpDef->getOpcode() != AMDGPU::G_GEP)
if (!OpDef || OpDef->getOpcode() != AMDGPU::G_PTR_ADD)
return Default;
Optional<int64_t> Offset =

View File

@ -87,7 +87,7 @@ private:
bool selectG_EXTRACT(MachineInstr &I) const;
bool selectG_MERGE_VALUES(MachineInstr &I) const;
bool selectG_UNMERGE_VALUES(MachineInstr &I) const;
bool selectG_GEP(MachineInstr &I) const;
bool selectG_PTR_ADD(MachineInstr &I) const;
bool selectG_IMPLICIT_DEF(MachineInstr &I) const;
bool selectG_INSERT(MachineInstr &I) const;
bool selectG_INTRINSIC(MachineInstr &I) const;

View File

@ -478,7 +478,7 @@ AMDGPULegalizerInfo::AMDGPULegalizerInfo(const GCNSubtarget &ST_,
.scalarize(0);
}
getActionDefinitionsBuilder(G_GEP)
getActionDefinitionsBuilder(G_PTR_ADD)
.legalForCartesianProduct(AddrSpaces64, {S64})
.legalForCartesianProduct(AddrSpaces32, {S32})
.scalarize(0);
@ -1202,7 +1202,7 @@ Register AMDGPULegalizerInfo::getSegmentAperture(
Register LoadResult = MRI.createGenericVirtualRegister(S32);
Register LoadAddr;
B.materializeGEP(LoadAddr, QueuePtr, LLT::scalar(64), StructOffset);
B.materializePtrAdd(LoadAddr, QueuePtr, LLT::scalar(64), StructOffset);
B.buildLoad(LoadResult, LoadAddr, *MMO);
return LoadResult;
}
@ -2130,7 +2130,7 @@ bool AMDGPULegalizerInfo::legalizeImplicitArgPtr(MachineInstr &MI,
if (!loadInputValue(KernargPtrReg, B, Arg))
return false;
B.buildGEP(DstReg, KernargPtrReg, B.buildConstant(IdxTy, Offset).getReg(0));
B.buildPtrAdd(DstReg, KernargPtrReg, B.buildConstant(IdxTy, Offset).getReg(0));
MI.eraseFromParent();
return true;
}

View File

@ -2268,7 +2268,7 @@ AMDGPURegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
LLVM_FALLTHROUGH;
}
case AMDGPU::G_GEP:
case AMDGPU::G_PTR_ADD:
case AMDGPU::G_ADD:
case AMDGPU::G_SUB:
case AMDGPU::G_MUL:

View File

@ -106,7 +106,7 @@ struct OutgoingValueHandler : public CallLowering::ValueHandler {
MIRBuilder.buildConstant(OffsetReg, Offset);
Register AddrReg = MRI.createGenericVirtualRegister(p0);
MIRBuilder.buildGEP(AddrReg, SPReg, OffsetReg);
MIRBuilder.buildPtrAdd(AddrReg, SPReg, OffsetReg);
MPO = MachinePointerInfo::getStack(MIRBuilder.getMF(), Offset);
return AddrReg;

View File

@ -1061,7 +1061,7 @@ bool ARMInstructionSelector::select(MachineInstr &I) {
case G_SHL: {
return selectShift(ARM_AM::ShiftOpc::lsl, MIB);
}
case G_GEP:
case G_PTR_ADD:
I.setDesc(TII.get(Opcodes.ADDrr));
MIB.add(predOps(ARMCC::AL)).add(condCodeOp());
break;

View File

@ -162,7 +162,7 @@ ARMLegalizerInfo::ARMLegalizerInfo(const ARMSubtarget &ST) {
.legalFor({s32, p0})
.minScalar(0, s32);
getActionDefinitionsBuilder(G_GEP)
getActionDefinitionsBuilder(G_PTR_ADD)
.legalFor({{p0, s32}})
.minScalar(1, s32);

View File

@ -249,7 +249,7 @@ ARMRegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
case G_SEXT:
case G_ZEXT:
case G_ANYEXT:
case G_GEP:
case G_PTR_ADD:
case G_INTTOPTR:
case G_PTRTOINT:
case G_CTLZ:

View File

@ -299,7 +299,7 @@ Register OutgoingValueHandler::getStackAddress(const CCValAssign &VA,
MIRBuilder.buildConstant(OffsetReg, Offset);
Register AddrReg = MRI.createGenericVirtualRegister(p0);
MIRBuilder.buildGEP(AddrReg, SPReg, OffsetReg);
MIRBuilder.buildPtrAdd(AddrReg, SPReg, OffsetReg);
MachinePointerInfo MPO =
MachinePointerInfo::getStack(MIRBuilder.getMF(), Offset);

View File

@ -302,7 +302,7 @@ bool MipsInstructionSelector::select(MachineInstr &I) {
I.eraseFromParent();
return true;
}
case G_GEP: {
case G_PTR_ADD: {
MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::ADDu))
.add(I.getOperand(0))
.add(I.getOperand(1))
@ -409,15 +409,15 @@ bool MipsInstructionSelector::select(MachineInstr &I) {
MachineOperand BaseAddr = I.getOperand(1);
int64_t SignedOffset = 0;
// Try to fold load/store + G_GEP + G_CONSTANT
// Try to fold load/store + G_PTR_ADD + G_CONSTANT
// %SignedOffset:(s32) = G_CONSTANT i32 16_bit_signed_immediate
// %Addr:(p0) = G_GEP %BaseAddr, %SignedOffset
// %Addr:(p0) = G_PTR_ADD %BaseAddr, %SignedOffset
// %LoadResult/%StoreSrc = load/store %Addr(p0)
// into:
// %LoadResult/%StoreSrc = NewOpc %BaseAddr(p0), 16_bit_signed_immediate
MachineInstr *Addr = MRI.getVRegDef(I.getOperand(1).getReg());
if (Addr->getOpcode() == G_GEP) {
if (Addr->getOpcode() == G_PTR_ADD) {
MachineInstr *Offset = MRI.getVRegDef(Addr->getOperand(2).getReg());
if (Offset->getOpcode() == G_CONSTANT) {
APInt OffsetValue = Offset->getOperand(1).getCImm()->getValue();

View File

@ -166,7 +166,7 @@ MipsLegalizerInfo::MipsLegalizerInfo(const MipsSubtarget &ST) {
.legalFor({s32})
.clampScalar(0, s32, s32);
getActionDefinitionsBuilder({G_GEP, G_INTTOPTR})
getActionDefinitionsBuilder({G_PTR_ADD, G_INTTOPTR})
.legalFor({{p0, s32}});
getActionDefinitionsBuilder(G_PTRTOINT)

View File

@ -440,7 +440,7 @@ MipsRegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
case G_UMULH:
case G_ZEXTLOAD:
case G_SEXTLOAD:
case G_GEP:
case G_PTR_ADD:
case G_INTTOPTR:
case G_PTRTOINT:
case G_AND:
@ -638,7 +638,7 @@ void MipsRegisterBankInfo::setRegBank(MachineInstr &MI,
MRI.setRegBank(Dest, getRegBank(Mips::GPRBRegBankID));
break;
}
case TargetOpcode::G_GEP: {
case TargetOpcode::G_PTR_ADD: {
assert(MRI.getType(Dest).isPointer() && "Unexpected operand type.");
MRI.setRegBank(Dest, getRegBank(Mips::GPRBRegBankID));
break;

View File

@ -115,7 +115,7 @@ struct OutgoingValueHandler : public CallLowering::ValueHandler {
MIRBuilder.buildConstant(OffsetReg, Offset);
Register AddrReg = MRI.createGenericVirtualRegister(p0);
MIRBuilder.buildGEP(AddrReg, SPReg, OffsetReg);
MIRBuilder.buildPtrAdd(AddrReg, SPReg, OffsetReg);
MPO = MachinePointerInfo::getStack(MIRBuilder.getMF(), Offset);
return AddrReg;

View File

@ -340,7 +340,7 @@ bool X86InstructionSelector::select(MachineInstr &I) {
case TargetOpcode::G_STORE:
case TargetOpcode::G_LOAD:
return selectLoadStoreOp(I, MRI, MF);
case TargetOpcode::G_GEP:
case TargetOpcode::G_PTR_ADD:
case TargetOpcode::G_FRAME_INDEX:
return selectFrameIndexOrGep(I, MRI, MF);
case TargetOpcode::G_GLOBAL_VALUE:
@ -476,7 +476,7 @@ static void X86SelectAddress(const MachineInstr &I,
assert(MRI.getType(I.getOperand(0).getReg()).isPointer() &&
"unsupported type.");
if (I.getOpcode() == TargetOpcode::G_GEP) {
if (I.getOpcode() == TargetOpcode::G_PTR_ADD) {
if (auto COff = getConstantVRegVal(I.getOperand(2).getReg(), MRI)) {
int64_t Imm = *COff;
if (isInt<32>(Imm)) { // Check for displacement overflow.
@ -560,7 +560,7 @@ bool X86InstructionSelector::selectFrameIndexOrGep(MachineInstr &I,
MachineFunction &MF) const {
unsigned Opc = I.getOpcode();
assert((Opc == TargetOpcode::G_FRAME_INDEX || Opc == TargetOpcode::G_GEP) &&
assert((Opc == TargetOpcode::G_FRAME_INDEX || Opc == TargetOpcode::G_PTR_ADD) &&
"unexpected instruction");
const Register DefReg = I.getOperand(0).getReg();

View File

@ -77,7 +77,7 @@ X86LegalizerInfo::X86LegalizerInfo(const X86Subtarget &STI,
setLegalizeScalarToDifferentSizeStrategy(MemOp, 0,
narrowToSmallerAndWidenToSmallest);
setLegalizeScalarToDifferentSizeStrategy(
G_GEP, 1, widenToLargerTypesUnsupportedOtherwise);
G_PTR_ADD, 1, widenToLargerTypesUnsupportedOtherwise);
setLegalizeScalarToDifferentSizeStrategy(
G_CONSTANT, 0, widenToLargerTypesAndNarrowToLargest);
@ -140,8 +140,8 @@ void X86LegalizerInfo::setLegalizerInfo32bit() {
setAction({G_FRAME_INDEX, p0}, Legal);
setAction({G_GLOBAL_VALUE, p0}, Legal);
setAction({G_GEP, p0}, Legal);
setAction({G_GEP, 1, s32}, Legal);
setAction({G_PTR_ADD, p0}, Legal);
setAction({G_PTR_ADD, 1, s32}, Legal);
if (!Subtarget.is64Bit()) {
getActionDefinitionsBuilder(G_PTRTOINT)
@ -223,7 +223,7 @@ void X86LegalizerInfo::setLegalizerInfo64bit() {
setAction({MemOp, s64}, Legal);
// Pointer-handling
setAction({G_GEP, 1, s64}, Legal);
setAction({G_PTR_ADD, 1, s64}, Legal);
getActionDefinitionsBuilder(G_PTRTOINT)
.legalForCartesianProduct({s1, s8, s16, s32, s64}, {p0})
.maxScalar(0, s64)

View File

@ -13,13 +13,13 @@ define i32 @cse_gep([4 x i32]* %ptr, i32 %idx) {
; O0: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[COPY1]](s32)
; O0: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
; O0: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[C]], [[SEXT]]
; O0: [[GEP:%[0-9]+]]:_(p0) = G_GEP [[COPY]], [[MUL]](s64)
; O0: [[GEP:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[MUL]](s64)
; O0: [[COPY2:%[0-9]+]]:_(p0) = COPY [[GEP]](p0)
; O0: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY2]](p0) :: (load 4 from %ir.gep1)
; O0: [[MUL1:%[0-9]+]]:_(s64) = G_MUL [[C]], [[SEXT]]
; O0: [[GEP1:%[0-9]+]]:_(p0) = G_GEP [[COPY]], [[MUL1]](s64)
; O0: [[GEP1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[MUL1]](s64)
; O0: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
; O0: [[GEP2:%[0-9]+]]:_(p0) = G_GEP [[GEP1]], [[C1]](s64)
; O0: [[GEP2:%[0-9]+]]:_(p0) = G_PTR_ADD [[GEP1]], [[C1]](s64)
; O0: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[GEP2]](p0) :: (load 4 from %ir.gep2)
; O0: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[LOAD1]], [[LOAD1]]
; O0: $w0 = COPY [[ADD]](s32)
@ -32,11 +32,11 @@ define i32 @cse_gep([4 x i32]* %ptr, i32 %idx) {
; O3: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[COPY1]](s32)
; O3: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
; O3: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[C]], [[SEXT]]
; O3: [[GEP:%[0-9]+]]:_(p0) = G_GEP [[COPY]], [[MUL]](s64)
; O3: [[GEP:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[MUL]](s64)
; O3: [[COPY2:%[0-9]+]]:_(p0) = COPY [[GEP]](p0)
; O3: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY2]](p0) :: (load 4 from %ir.gep1)
; O3: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
; O3: [[GEP1:%[0-9]+]]:_(p0) = G_GEP [[GEP]], [[C1]](s64)
; O3: [[GEP1:%[0-9]+]]:_(p0) = G_PTR_ADD [[GEP]], [[C1]](s64)
; O3: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[GEP1]](p0) :: (load 4 from %ir.gep2)
; O3: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[LOAD1]], [[LOAD1]]
; O3: $w0 = COPY [[ADD]](s32)

View File

@ -781,9 +781,9 @@ define void @jt_multiple_jump_tables(%1* %arg, i32 %arg1, i32* %arg2) {
; CHECK: [[PHI:%[0-9]+]]:_(s64) = G_PHI [[C55]](s64), %bb.1, [[C56]](s64), %bb.2, [[C57]](s64), %bb.3, [[C58]](s64), %bb.4, [[C59]](s64), %bb.5, [[C60]](s64), %bb.6, [[C61]](s64), %bb.7, [[C62]](s64), %bb.8, [[C63]](s64), %bb.9, [[C64]](s64), %bb.10, [[C65]](s64), %bb.11, [[C66]](s64), %bb.12, [[C67]](s64), %bb.13, [[C68]](s64), %bb.14, [[C69]](s64), %bb.15, [[C70]](s64), %bb.16, [[C71]](s64), %bb.17, [[C72]](s64), %bb.18, [[C73]](s64), %bb.19, [[C74]](s64), %bb.20, [[C75]](s64), %bb.21, [[C76]](s64), %bb.22, [[C77]](s64), %bb.23, [[C78]](s64), %bb.24, [[C79]](s64), %bb.25, [[C80]](s64), %bb.26, [[C81]](s64), %bb.27, [[C82]](s64), %bb.28, [[C83]](s64), %bb.29, [[C84]](s64), %bb.30, [[C85]](s64), %bb.31, [[C86]](s64), %bb.32, [[C87]](s64), %bb.33, [[C88]](s64), %bb.34, [[C89]](s64), %bb.35, [[C90]](s64), %bb.36, [[C91]](s64), %bb.37, [[C92]](s64), %bb.38, [[C93]](s64), %bb.39, [[C94]](s64), %bb.40, [[C95]](s64), %bb.41, [[C96]](s64), %bb.42, [[C97]](s64), %bb.43, [[C98]](s64), %bb.44, [[C99]](s64), %bb.45, [[C100]](s64), %bb.46, [[C101]](s64), %bb.47, [[C102]](s64), %bb.48, [[C103]](s64), %bb.49, [[C104]](s64), %bb.50, [[C105]](s64), %bb.51, [[C106]](s64), %bb.52, [[C107]](s64), %bb.53, [[C108]](s64), %bb.54, [[C109]](s64), %bb.55
; CHECK: [[C110:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
; CHECK: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[C110]], [[PHI]]
; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_GEP [[GV]], [[MUL]](s64)
; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_PTR_ADD [[GV]], [[MUL]](s64)
; CHECK: [[C111:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
; CHECK: [[GEP1:%[0-9]+]]:_(p0) = G_GEP [[GEP]], [[C111]](s64)
; CHECK: [[GEP1:%[0-9]+]]:_(p0) = G_PTR_ADD [[GEP]], [[C111]](s64)
; CHECK: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[GEP1]](p0) :: (load 8 from %ir.tmp59)
; CHECK: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
; CHECK: $x0 = COPY [[COPY]](p0)

View File

@ -605,10 +605,10 @@ define i8* @test_constant_null() {
; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[VAL1:%[0-9]+]]:_(s8) = G_LOAD %0(p0) :: (load 1 from %ir.addr, align 4)
; CHECK: [[CST1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
; CHECK: [[GEP1:%[0-9]+]]:_(p0) = G_GEP [[ADDR]], [[CST1]](s64)
; CHECK: [[GEP1:%[0-9]+]]:_(p0) = G_PTR_ADD [[ADDR]], [[CST1]](s64)
; CHECK: [[VAL2:%[0-9]+]]:_(s32) = G_LOAD [[GEP1]](p0) :: (load 4 from %ir.addr + 4)
; CHECK: G_STORE [[VAL1]](s8), [[ADDR]](p0) :: (store 1 into %ir.addr, align 4)
; CHECK: [[GEP2:%[0-9]+]]:_(p0) = G_GEP [[ADDR]], [[CST1]](s64)
; CHECK: [[GEP2:%[0-9]+]]:_(p0) = G_PTR_ADD [[ADDR]], [[CST1]](s64)
; CHECK: G_STORE [[VAL2]](s32), [[GEP2]](p0) :: (store 4 into %ir.addr + 4)
define void @test_struct_memops({ i8, i32 }* %addr) {
%val = load { i8, i32 }, { i8, i32 }* %addr
@ -732,7 +732,7 @@ define float @test_fneg_fmf(float %arg1) {
; CHECK: [[VAL:%[0-9]+]]:_(s32), [[OVERFLOW:%[0-9]+]]:_(s1) = G_SADDO [[LHS]], [[RHS]]
; CHECK: G_STORE [[VAL]](s32), [[ADDR]](p0) :: (store 4 into %ir.addr)
; CHECK: [[CST:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_GEP [[ADDR]], [[CST]](s64)
; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_PTR_ADD [[ADDR]], [[CST]](s64)
; CHECK: G_STORE [[OVERFLOW]](s1), [[GEP]](p0) :: (store 1 into %ir.addr + 4, align 4)
declare { i32, i1 } @llvm.sadd.with.overflow.i32(i32, i32)
define void @test_sadd_overflow(i32 %lhs, i32 %rhs, { i32, i1 }* %addr) {
@ -748,7 +748,7 @@ define void @test_sadd_overflow(i32 %lhs, i32 %rhs, { i32, i1 }* %addr) {
; CHECK: [[VAL:%[0-9]+]]:_(s32), [[OVERFLOW:%[0-9]+]]:_(s1) = G_UADDO [[LHS]], [[RHS]]
; CHECK: G_STORE [[VAL]](s32), [[ADDR]](p0) :: (store 4 into %ir.addr)
; CHECK: [[CST:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_GEP [[ADDR]], [[CST]](s64)
; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_PTR_ADD [[ADDR]], [[CST]](s64)
; CHECK: G_STORE [[OVERFLOW]](s1), [[GEP]](p0) :: (store 1 into %ir.addr + 4, align 4)
declare { i32, i1 } @llvm.uadd.with.overflow.i32(i32, i32)
define void @test_uadd_overflow(i32 %lhs, i32 %rhs, { i32, i1 }* %addr) {
@ -764,7 +764,7 @@ define void @test_uadd_overflow(i32 %lhs, i32 %rhs, { i32, i1 }* %addr) {
; CHECK: [[VAL:%[0-9]+]]:_(s32), [[OVERFLOW:%[0-9]+]]:_(s1) = G_SSUBO [[LHS]], [[RHS]]
; CHECK: G_STORE [[VAL]](s32), [[ADDR]](p0) :: (store 4 into %ir.subr)
; CHECK: [[CST:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_GEP [[ADDR]], [[CST]](s64)
; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_PTR_ADD [[ADDR]], [[CST]](s64)
; CHECK: G_STORE [[OVERFLOW]](s1), [[GEP]](p0) :: (store 1 into %ir.subr + 4, align 4)
declare { i32, i1 } @llvm.ssub.with.overflow.i32(i32, i32)
define void @test_ssub_overflow(i32 %lhs, i32 %rhs, { i32, i1 }* %subr) {
@ -780,7 +780,7 @@ define void @test_ssub_overflow(i32 %lhs, i32 %rhs, { i32, i1 }* %subr) {
; CHECK: [[VAL:%[0-9]+]]:_(s32), [[OVERFLOW:%[0-9]+]]:_(s1) = G_USUBO [[LHS]], [[RHS]]
; CHECK: G_STORE [[VAL]](s32), [[ADDR]](p0) :: (store 4 into %ir.subr)
; CHECK: [[CST:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_GEP [[ADDR]], [[CST]](s64)
; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_PTR_ADD [[ADDR]], [[CST]](s64)
; CHECK: G_STORE [[OVERFLOW]](s1), [[GEP]](p0) :: (store 1 into %ir.subr + 4, align 4)
declare { i32, i1 } @llvm.usub.with.overflow.i32(i32, i32)
define void @test_usub_overflow(i32 %lhs, i32 %rhs, { i32, i1 }* %subr) {
@ -796,7 +796,7 @@ define void @test_usub_overflow(i32 %lhs, i32 %rhs, { i32, i1 }* %subr) {
; CHECK: [[VAL:%[0-9]+]]:_(s32), [[OVERFLOW:%[0-9]+]]:_(s1) = G_SMULO [[LHS]], [[RHS]]
; CHECK: G_STORE [[VAL]](s32), [[ADDR]](p0) :: (store 4 into %ir.addr)
; CHECK: [[CST:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_GEP [[ADDR]], [[CST]](s64)
; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_PTR_ADD [[ADDR]], [[CST]](s64)
; CHECK: G_STORE [[OVERFLOW]](s1), [[GEP]](p0) :: (store 1 into %ir.addr + 4, align 4)
declare { i32, i1 } @llvm.smul.with.overflow.i32(i32, i32)
define void @test_smul_overflow(i32 %lhs, i32 %rhs, { i32, i1 }* %addr) {
@ -812,7 +812,7 @@ define void @test_smul_overflow(i32 %lhs, i32 %rhs, { i32, i1 }* %addr) {
; CHECK: [[VAL:%[0-9]+]]:_(s32), [[OVERFLOW:%[0-9]+]]:_(s1) = G_UMULO [[LHS]], [[RHS]]
; CHECK: G_STORE [[VAL]](s32), [[ADDR]](p0) :: (store 4 into %ir.addr)
; CHECK: [[CST:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_GEP [[ADDR]], [[CST]](s64)
; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_PTR_ADD [[ADDR]], [[CST]](s64)
; CHECK: G_STORE [[OVERFLOW]](s1), [[GEP]](p0) :: (store 1 into %ir.addr + 4, align 4)
declare { i32, i1 } @llvm.umul.with.overflow.i32(i32, i32)
define void @test_umul_overflow(i32 %lhs, i32 %rhs, { i32, i1 }* %addr) {
@ -825,13 +825,13 @@ define void @test_umul_overflow(i32 %lhs, i32 %rhs, { i32, i1 }* %addr) {
; CHECK: %0:_(p0) = COPY $x0
; CHECK: [[LD1:%[0-9]+]]:_(s8) = G_LOAD %0(p0) :: (load 1 from %ir.addr, align 4)
; CHECK: [[CST1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
; CHECK: [[GEP1:%[0-9]+]]:_(p0) = G_GEP %0, [[CST1]](s64)
; CHECK: [[GEP1:%[0-9]+]]:_(p0) = G_PTR_ADD %0, [[CST1]](s64)
; CHECK: [[LD2:%[0-9]+]]:_(s8) = G_LOAD [[GEP1]](p0) :: (load 1 from %ir.addr + 4, align 4)
; CHECK: [[CST2:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
; CHECK: [[GEP2:%[0-9]+]]:_(p0) = G_GEP %0, [[CST2]](s64)
; CHECK: [[GEP2:%[0-9]+]]:_(p0) = G_PTR_ADD %0, [[CST2]](s64)
; CHECK: [[LD3:%[0-9]+]]:_(s32) = G_LOAD [[GEP2]](p0) :: (load 4 from %ir.addr + 8)
; CHECK: [[CST3:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
; CHECK: [[GEP3:%[0-9]+]]:_(p0) = G_GEP %0, [[CST3]](s64)
; CHECK: [[GEP3:%[0-9]+]]:_(p0) = G_PTR_ADD %0, [[CST3]](s64)
; CHECK: [[LD4:%[0-9]+]]:_(s32) = G_LOAD [[GEP3]](p0) :: (load 4 from %ir.addr + 12)
; CHECK: $w0 = COPY [[LD3]](s32)
%struct.nested = type {i8, { i8, i32 }, i32}
@ -846,16 +846,16 @@ define i32 @test_extractvalue(%struct.nested* %addr) {
; CHECK: %1:_(p0) = COPY $x1
; CHECK: [[LD1:%[0-9]+]]:_(s8) = G_LOAD %0(p0) :: (load 1 from %ir.addr, align 4)
; CHECK: [[CST1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
; CHECK: [[GEP1:%[0-9]+]]:_(p0) = G_GEP %0, [[CST1]](s64)
; CHECK: [[GEP1:%[0-9]+]]:_(p0) = G_PTR_ADD %0, [[CST1]](s64)
; CHECK: [[LD2:%[0-9]+]]:_(s8) = G_LOAD [[GEP1]](p0) :: (load 1 from %ir.addr + 4, align 4)
; CHECK: [[CST2:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
; CHECK: [[GEP2:%[0-9]+]]:_(p0) = G_GEP %0, [[CST2]](s64)
; CHECK: [[GEP2:%[0-9]+]]:_(p0) = G_PTR_ADD %0, [[CST2]](s64)
; CHECK: [[LD3:%[0-9]+]]:_(s32) = G_LOAD [[GEP2]](p0) :: (load 4 from %ir.addr + 8)
; CHECK: [[CST3:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
; CHECK: [[GEP3:%[0-9]+]]:_(p0) = G_GEP %0, [[CST3]](s64)
; CHECK: [[GEP3:%[0-9]+]]:_(p0) = G_PTR_ADD %0, [[CST3]](s64)
; CHECK: [[LD4:%[0-9]+]]:_(s32) = G_LOAD [[GEP3]](p0) :: (load 4 from %ir.addr + 12)
; CHECK: G_STORE [[LD2]](s8), %1(p0) :: (store 1 into %ir.addr2, align 4)
; CHECK: [[GEP4:%[0-9]+]]:_(p0) = G_GEP %1, [[CST1]](s64)
; CHECK: [[GEP4:%[0-9]+]]:_(p0) = G_PTR_ADD %1, [[CST1]](s64)
; CHECK: G_STORE [[LD3]](s32), [[GEP4]](p0) :: (store 4 into %ir.addr2 + 4)
define void @test_extractvalue_agg(%struct.nested* %addr, {i8, i32}* %addr2) {
%struct = load %struct.nested, %struct.nested* %addr
@ -880,20 +880,20 @@ define void @test_trivial_extract_ptr([1 x i8*] %s, i8 %val) {
; CHECK: %1:_(s32) = COPY $w1
; CHECK: [[LD1:%[0-9]+]]:_(s8) = G_LOAD %0(p0) :: (load 1 from %ir.addr, align 4)
; CHECK: [[CST1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
; CHECK: [[GEP1:%[0-9]+]]:_(p0) = G_GEP %0, [[CST1]](s64)
; CHECK: [[GEP1:%[0-9]+]]:_(p0) = G_PTR_ADD %0, [[CST1]](s64)
; CHECK: [[LD2:%[0-9]+]]:_(s8) = G_LOAD [[GEP1]](p0) :: (load 1 from %ir.addr + 4, align 4)
; CHECK: [[CST2:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
; CHECK: [[GEP2:%[0-9]+]]:_(p0) = G_GEP %0, [[CST2]](s64)
; CHECK: [[GEP2:%[0-9]+]]:_(p0) = G_PTR_ADD %0, [[CST2]](s64)
; CHECK: [[LD3:%[0-9]+]]:_(s32) = G_LOAD [[GEP2]](p0) :: (load 4 from %ir.addr + 8)
; CHECK: [[CST3:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
; CHECK: [[GEP3:%[0-9]+]]:_(p0) = G_GEP %0, [[CST3]](s64)
; CHECK: [[GEP3:%[0-9]+]]:_(p0) = G_PTR_ADD %0, [[CST3]](s64)
; CHECK: [[LD4:%[0-9]+]]:_(s32) = G_LOAD [[GEP3]](p0) :: (load 4 from %ir.addr + 12)
; CHECK: G_STORE [[LD1]](s8), %0(p0) :: (store 1 into %ir.addr, align 4)
; CHECK: [[GEP4:%[0-9]+]]:_(p0) = G_GEP %0, [[CST1]](s64)
; CHECK: [[GEP4:%[0-9]+]]:_(p0) = G_PTR_ADD %0, [[CST1]](s64)
; CHECK: G_STORE [[LD2]](s8), [[GEP4]](p0) :: (store 1 into %ir.addr + 4, align 4)
; CHECK: [[GEP5:%[0-9]+]]:_(p0) = G_GEP %0, [[CST2]](s64)
; CHECK: [[GEP5:%[0-9]+]]:_(p0) = G_PTR_ADD %0, [[CST2]](s64)
; CHECK: G_STORE %1(s32), [[GEP5]](p0) :: (store 4 into %ir.addr + 8)
; CHECK: [[GEP6:%[0-9]+]]:_(p0) = G_GEP %0, [[CST3]](s64)
; CHECK: [[GEP6:%[0-9]+]]:_(p0) = G_PTR_ADD %0, [[CST3]](s64)
; CHECK: G_STORE [[LD4]](s32), [[GEP6]](p0) :: (store 4 into %ir.addr + 12)
define void @test_insertvalue(%struct.nested* %addr, i32 %val) {
%struct = load %struct.nested, %struct.nested* %addr
@ -925,23 +925,23 @@ define [1 x i8*] @test_trivial_insert_ptr([1 x i8*] %s, i8* %val) {
; CHECK: %1:_(p0) = COPY $x1
; CHECK: [[LD1:%[0-9]+]]:_(s8) = G_LOAD %1(p0) :: (load 1 from %ir.addr2, align 4)
; CHECK: [[CST1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
; CHECK: [[GEP1:%[0-9]+]]:_(p0) = G_GEP %1, [[CST1]](s64)
; CHECK: [[GEP1:%[0-9]+]]:_(p0) = G_PTR_ADD %1, [[CST1]](s64)
; CHECK: [[LD2:%[0-9]+]]:_(s32) = G_LOAD [[GEP1]](p0) :: (load 4 from %ir.addr2 + 4)
; CHECK: [[LD3:%[0-9]+]]:_(s8) = G_LOAD %0(p0) :: (load 1 from %ir.addr, align 4)
; CHECK: [[GEP2:%[0-9]+]]:_(p0) = G_GEP %0, [[CST1]](s64)
; CHECK: [[GEP2:%[0-9]+]]:_(p0) = G_PTR_ADD %0, [[CST1]](s64)
; CHECK: [[LD4:%[0-9]+]]:_(s8) = G_LOAD [[GEP2]](p0) :: (load 1 from %ir.addr + 4, align 4)
; CHECK: [[CST3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
; CHECK: [[GEP3:%[0-9]+]]:_(p0) = G_GEP %0, [[CST3]](s64)
; CHECK: [[GEP3:%[0-9]+]]:_(p0) = G_PTR_ADD %0, [[CST3]](s64)
; CHECK: [[LD5:%[0-9]+]]:_(s32) = G_LOAD [[GEP3]](p0) :: (load 4 from %ir.addr + 8)
; CHECK: [[CST4:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
; CHECK: [[GEP4:%[0-9]+]]:_(p0) = G_GEP %0, [[CST4]](s64)
; CHECK: [[GEP4:%[0-9]+]]:_(p0) = G_PTR_ADD %0, [[CST4]](s64)
; CHECK: [[LD6:%[0-9]+]]:_(s32) = G_LOAD [[GEP4]](p0) :: (load 4 from %ir.addr + 12)
; CHECK: G_STORE [[LD3]](s8), %0(p0) :: (store 1 into %ir.addr, align 4)
; CHECK: [[GEP5:%[0-9]+]]:_(p0) = G_GEP %0, [[CST1]](s64)
; CHECK: [[GEP5:%[0-9]+]]:_(p0) = G_PTR_ADD %0, [[CST1]](s64)
; CHECK: G_STORE [[LD1]](s8), [[GEP5]](p0) :: (store 1 into %ir.addr + 4, align 4)
; CHECK: [[GEP6:%[0-9]+]]:_(p0) = G_GEP %0, [[CST3]](s64)
; CHECK: [[GEP6:%[0-9]+]]:_(p0) = G_PTR_ADD %0, [[CST3]](s64)
; CHECK: G_STORE [[LD2]](s32), [[GEP6]](p0) :: (store 4 into %ir.addr + 8)
; CHECK: [[GEP7:%[0-9]+]]:_(p0) = G_GEP %0, [[CST4]](s64)
; CHECK: [[GEP7:%[0-9]+]]:_(p0) = G_PTR_ADD %0, [[CST4]](s64)
; CHECK: G_STORE [[LD6]](s32), [[GEP7]](p0) :: (store 4 into %ir.addr + 12)
define void @test_insertvalue_agg(%struct.nested* %addr, {i8, i32}* %addr2) {
%smallstruct = load {i8, i32}, {i8, i32}* %addr2
@ -1799,19 +1799,19 @@ define void @test_phi_diamond({ i8, i16, i32 }* %a.ptr, { i8, i16, i32 }* %b.ptr
; CHECK: [[LD1:%[0-9]+]]:_(s8) = G_LOAD [[ARG1]](p0) :: (load 1 from %ir.a.ptr, align 4)
; CHECK: [[CST1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
; CHECK: [[GEP1:%[0-9]+]]:_(p0) = G_GEP [[ARG1]], [[CST1]](s64)
; CHECK: [[GEP1:%[0-9]+]]:_(p0) = G_PTR_ADD [[ARG1]], [[CST1]](s64)
; CHECK: [[LD2:%[0-9]+]]:_(s16) = G_LOAD [[GEP1]](p0) :: (load 2 from %ir.a.ptr + 2)
; CHECK: [[CST2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
; CHECK: [[GEP2:%[0-9]+]]:_(p0) = G_GEP [[ARG1]], [[CST2]](s64)
; CHECK: [[GEP2:%[0-9]+]]:_(p0) = G_PTR_ADD [[ARG1]], [[CST2]](s64)
; CHECK: [[LD3:%[0-9]+]]:_(s32) = G_LOAD [[GEP2]](p0) :: (load 4 from %ir.a.ptr + 4)
; CHECK: G_BR %bb.4
; CHECK: [[LD4:%[0-9]+]]:_(s8) = G_LOAD [[ARG2]](p0) :: (load 1 from %ir.b.ptr, align 4)
; CHECK: [[CST3:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
; CHECK: [[GEP3:%[0-9]+]]:_(p0) = G_GEP [[ARG2]], [[CST3]](s64)
; CHECK: [[GEP3:%[0-9]+]]:_(p0) = G_PTR_ADD [[ARG2]], [[CST3]](s64)
; CHECK: [[LD5:%[0-9]+]]:_(s16) = G_LOAD [[GEP3]](p0) :: (load 2 from %ir.b.ptr + 2)
; CHECK: [[CST4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
; CHECK: [[GEP4:%[0-9]+]]:_(p0) = G_GEP [[ARG2]], [[CST4]](s64)
; CHECK: [[GEP4:%[0-9]+]]:_(p0) = G_PTR_ADD [[ARG2]], [[CST4]](s64)
; CHECK: [[LD6:%[0-9]+]]:_(s32) = G_LOAD [[GEP4]](p0) :: (load 4 from %ir.b.ptr + 4)
; CHECK: [[PN1:%[0-9]+]]:_(s8) = G_PHI [[LD1]](s8), %bb.2, [[LD4]](s8), %bb.3
@ -1819,10 +1819,10 @@ define void @test_phi_diamond({ i8, i16, i32 }* %a.ptr, { i8, i16, i32 }* %b.ptr
; CHECK: [[PN3:%[0-9]+]]:_(s32) = G_PHI [[LD3]](s32), %bb.2, [[LD6]](s32), %bb.3
; CHECK: G_STORE [[PN1]](s8), [[ARG4]](p0) :: (store 1 into %ir.dst, align 4)
; CHECK: [[CST5:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
; CHECK: [[GEP5:%[0-9]+]]:_(p0) = G_GEP [[ARG4]], [[CST5]](s64)
; CHECK: [[GEP5:%[0-9]+]]:_(p0) = G_PTR_ADD [[ARG4]], [[CST5]](s64)
; CHECK: G_STORE [[PN2]](s16), [[GEP5]](p0) :: (store 2 into %ir.dst + 2)
; CHECK: [[CST6:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
; CHECK: [[GEP6:%[0-9]+]]:_(p0) = G_GEP [[ARG4]], [[CST6]](s64)
; CHECK: [[GEP6:%[0-9]+]]:_(p0) = G_PTR_ADD [[ARG4]], [[CST6]](s64)
; CHECK: G_STORE [[PN3]](s32), [[GEP6]](p0) :: (store 4 into %ir.dst + 4)
; CHECK: RET_ReallyLR
@ -1858,22 +1858,22 @@ define void @test_nested_aggregate_const(%agg.nested *%ptr) {
; CHECK: [[CST6:%[0-9]+]]:_(s32) = G_CONSTANT i32 13
; CHECK: G_STORE [[CST1]](s32), [[BASE]](p0) :: (store 4 into %ir.ptr, align 8)
; CHECK: [[CST7:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
; CHECK: [[GEP1:%[0-9]+]]:_(p0) = G_GEP [[BASE]], [[CST7]](s64)
; CHECK: [[GEP1:%[0-9]+]]:_(p0) = G_PTR_ADD [[BASE]], [[CST7]](s64)
; CHECK: G_STORE [[CST1]](s32), [[GEP1]](p0) :: (store 4 into %ir.ptr + 4)
; CHECK: [[CST8:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
; CHECK: [[GEP2:%[0-9]+]]:_(p0) = G_GEP [[BASE]], [[CST8]](s64)
; CHECK: [[GEP2:%[0-9]+]]:_(p0) = G_PTR_ADD [[BASE]], [[CST8]](s64)
; CHECK: G_STORE [[CST2]](s16), [[GEP2]](p0) :: (store 2 into %ir.ptr + 8, align 8)
; CHECK: [[CST9:%[0-9]+]]:_(s64) = G_CONSTANT i64 10
; CHECK: [[GEP3:%[0-9]+]]:_(p0) = G_GEP [[BASE]], [[CST9]](s64)
; CHECK: [[GEP3:%[0-9]+]]:_(p0) = G_PTR_ADD [[BASE]], [[CST9]](s64)
; CHECK: G_STORE [[CST3]](s8), [[GEP3]](p0) :: (store 1 into %ir.ptr + 10, align 2)
; CHECK: [[CST10:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
; CHECK: [[GEP4:%[0-9]+]]:_(p0) = G_GEP [[BASE]], [[CST10]](s64)
; CHECK: [[GEP4:%[0-9]+]]:_(p0) = G_PTR_ADD [[BASE]], [[CST10]](s64)
; CHECK: G_STORE [[CST4]](s64), [[GEP4]](p0) :: (store 8 into %ir.ptr + 16)
; CHECK: [[CST11:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
; CHECK: [[GEP5:%[0-9]+]]:_(p0) = G_GEP [[BASE]], [[CST11]](s64)
; CHECK: [[GEP5:%[0-9]+]]:_(p0) = G_PTR_ADD [[BASE]], [[CST11]](s64)
; CHECK: G_STORE [[CST5]](s64), [[GEP5]](p0) :: (store 8 into %ir.ptr + 24)
; CHECK: [[CST12:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
; CHECK: [[GEP6:%[0-9]+]]:_(p0) = G_GEP [[BASE]], [[CST12]](s64)
; CHECK: [[GEP6:%[0-9]+]]:_(p0) = G_PTR_ADD [[BASE]], [[CST12]](s64)
; CHECK: G_STORE [[CST6]](s32), [[GEP6]](p0) :: (store 4 into %ir.ptr + 32, align 8)
store %agg.nested { i32 1, i32 1, %agg.inner { i16 2, i8 3, %agg.inner.inner {i64 5, i64 8} }, i32 13}, %agg.nested *%ptr
ret void

View File

@ -4,16 +4,16 @@
; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[LO:%[0-9]+]]:_(s64) = G_LOAD %0(p0) :: (load 8 from %ir.ptr)
; CHECK: [[CST:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_GEP [[ADDR]], [[CST]](s64)
; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_PTR_ADD [[ADDR]], [[CST]](s64)
; CHECK: [[HI:%[0-9]+]]:_(s64) = G_LOAD [[GEP]](p0) :: (load 8 from %ir.ptr + 8)
; CHECK: [[SP:%[0-9]+]]:_(p0) = COPY $sp
; CHECK: [[CST2:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; CHECK: [[GEP2:%[0-9]+]]:_(p0) = G_GEP [[SP]], [[CST2]](s64)
; CHECK: [[GEP2:%[0-9]+]]:_(p0) = G_PTR_ADD [[SP]], [[CST2]](s64)
; CHECK: G_STORE [[LO]](s64), [[GEP2]](p0) :: (store 8 into stack, align 1)
; CHECK: [[SP:%[0-9]+]]:_(p0) = COPY $sp
; CHECK: [[CST3:%[0-9]+]]:_(s64) = COPY [[CST]]
; CHECK: [[GEP3:%[0-9]+]]:_(p0) = G_GEP [[SP]], [[CST3]](s64)
; CHECK: [[GEP3:%[0-9]+]]:_(p0) = G_PTR_ADD [[SP]], [[CST3]](s64)
; CHECK: G_STORE [[HI]](s64), [[GEP3]](p0) :: (store 8 into stack + 8, align 1)
define void @test_split_struct([2 x i64]* %ptr) {
%struct = load [2 x i64], [2 x i64]* %ptr

View File

@ -22,11 +22,11 @@ define signext i8 @test_stack_slots([8 x i64], i8 signext %lhs, i8 signext %rhs)
; CHECK: [[C12:%[0-9]+]]:_(s8) = G_CONSTANT i8 12
; CHECK: [[SP:%[0-9]+]]:_(p0) = COPY $sp
; CHECK: [[C42_OFFS:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; CHECK: [[C42_LOC:%[0-9]+]]:_(p0) = G_GEP [[SP]], [[C42_OFFS]](s64)
; CHECK: [[C42_LOC:%[0-9]+]]:_(p0) = G_PTR_ADD [[SP]], [[C42_OFFS]](s64)
; CHECK: G_STORE [[C42]](s8), [[C42_LOC]](p0) :: (store 1 into stack)
; CHECK: [[SP:%[0-9]+]]:_(p0) = COPY $sp
; CHECK: [[C12_OFFS:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
; CHECK: [[C12_LOC:%[0-9]+]]:_(p0) = G_GEP [[SP]], [[C12_OFFS]](s64)
; CHECK: [[C12_LOC:%[0-9]+]]:_(p0) = G_PTR_ADD [[SP]], [[C12_OFFS]](s64)
; CHECK: G_STORE [[C12]](s8), [[C12_LOC]](p0) :: (store 1 into stack + 1)
; CHECK: BL @test_stack_slots
define void @test_call_stack() {
@ -57,17 +57,17 @@ define void @take_128bit_struct([2 x i64]* %ptr, [2 x i64] %in) {
; CHECK-LABEL: name: test_split_struct
; CHECK: [[LD1:%[0-9]+]]:_(s64) = G_LOAD %0(p0) :: (load 8 from %ir.ptr)
; CHECK: [[CST:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_GEP %0, [[CST]](s64)
; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_PTR_ADD %0, [[CST]](s64)
; CHECK: [[LD2:%[0-9]+]]:_(s64) = G_LOAD %3(p0) :: (load 8 from %ir.ptr + 8)
; CHECK: [[SP:%[0-9]+]]:_(p0) = COPY $sp
; CHECK: [[OFF:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; CHECK: [[ADDR:%[0-9]+]]:_(p0) = G_GEP [[SP]], [[OFF]](s64)
; CHECK: [[ADDR:%[0-9]+]]:_(p0) = G_PTR_ADD [[SP]], [[OFF]](s64)
; CHECK: G_STORE [[LD1]](s64), [[ADDR]](p0) :: (store 8 into stack, align 1)
; CHECK: [[SP:%[0-9]+]]:_(p0) = COPY $sp
; CHECK: [[OFF:%[0-9]+]]:_(s64) = COPY [[CST]]
; CHECK: [[ADDR:%[0-9]+]]:_(p0) = G_GEP [[SP]], [[OFF]]
; CHECK: [[ADDR:%[0-9]+]]:_(p0) = G_PTR_ADD [[SP]], [[OFF]]
; CHECK: G_STORE [[LD2]](s64), [[ADDR]](p0) :: (store 8 into stack + 8, align 1)
define void @test_split_struct([2 x i64]* %ptr) {
%struct = load [2 x i64], [2 x i64]* %ptr

View File

@ -67,10 +67,10 @@ define void @test_multiple_args(i64 %in) {
; CHECK: G_STORE [[DBL]](s64), [[ADDR]](p0) :: (store 8 into %ir.addr)
; CHECK: [[CST1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
; CHECK: [[GEP1:%[0-9]+]]:_(p0) = G_GEP [[ADDR]], [[CST1]](s64)
; CHECK: [[GEP1:%[0-9]+]]:_(p0) = G_PTR_ADD [[ADDR]], [[CST1]](s64)
; CHECK: G_STORE [[I64]](s64), [[GEP1]](p0) :: (store 8 into %ir.addr + 8)
; CHECK: [[CST2:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
; CHECK: [[GEP2:%[0-9]+]]:_(p0) = G_GEP [[ADDR]], [[CST2]](s64)
; CHECK: [[GEP2:%[0-9]+]]:_(p0) = G_PTR_ADD [[ADDR]], [[CST2]](s64)
; CHECK: G_STORE [[I8]](s8), [[GEP2]](p0) :: (store 1 into %ir.addr + 16, align 8)
; CHECK: RET_ReallyLR
define void @test_struct_formal({double, i64, i8} %in, {double, i64, i8}* %addr) {
@ -84,10 +84,10 @@ define void @test_struct_formal({double, i64, i8} %in, {double, i64, i8}* %addr)
; CHECK: [[LD1:%[0-9]+]]:_(s64) = G_LOAD [[ADDR]](p0) :: (load 8 from %ir.addr)
; CHECK: [[CST1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
; CHECK: [[GEP1:%[0-9]+]]:_(p0) = G_GEP [[ADDR]], [[CST1]](s64)
; CHECK: [[GEP1:%[0-9]+]]:_(p0) = G_PTR_ADD [[ADDR]], [[CST1]](s64)
; CHECK: [[LD2:%[0-9]+]]:_(s64) = G_LOAD [[GEP1]](p0) :: (load 8 from %ir.addr + 8)
; CHECK: [[CST2:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
; CHECK: [[GEP2:%[0-9]+]]:_(p0) = G_GEP [[ADDR]], [[CST2]](s64)
; CHECK: [[GEP2:%[0-9]+]]:_(p0) = G_PTR_ADD [[ADDR]], [[CST2]](s64)
; CHECK: [[LD3:%[0-9]+]]:_(s32) = G_LOAD [[GEP2]](p0) :: (load 4 from %ir.addr + 16, align 8)
; CHECK: $d0 = COPY [[LD1]](s64)
@ -103,13 +103,13 @@ define {double, i64, i32} @test_struct_return({double, i64, i32}* %addr) {
; CHECK: %0:_(p0) = COPY $x0
; CHECK: [[LD1:%[0-9]+]]:_(s64) = G_LOAD %0(p0) :: (load 8 from %ir.addr)
; CHECK: [[CST1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
; CHECK: [[GEP1:%[0-9]+]]:_(p0) = G_GEP %0, [[CST1]](s64)
; CHECK: [[GEP1:%[0-9]+]]:_(p0) = G_PTR_ADD %0, [[CST1]](s64)
; CHECK: [[LD2:%[0-9]+]]:_(s64) = G_LOAD [[GEP1]](p0) :: (load 8 from %ir.addr + 8)
; CHECK: [[CST2:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
; CHECK: [[GEP2:%[0-9]+]]:_(p0) = G_GEP %0, [[CST2]](s64)
; CHECK: [[GEP2:%[0-9]+]]:_(p0) = G_PTR_ADD %0, [[CST2]](s64)
; CHECK: [[LD3:%[0-9]+]]:_(s64) = G_LOAD [[GEP2]](p0) :: (load 8 from %ir.addr + 16)
; CHECK: [[CST3:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
; CHECK: [[GEP3:%[0-9]+]]:_(p0) = G_GEP %0, [[CST3]](s64)
; CHECK: [[GEP3:%[0-9]+]]:_(p0) = G_PTR_ADD %0, [[CST3]](s64)
; CHECK: [[LD4:%[0-9]+]]:_(s64) = G_LOAD [[GEP3]](p0) :: (load 8 from %ir.addr + 24)
; CHECK: $x0 = COPY [[LD1]](s64)
@ -198,15 +198,15 @@ define void @test_stack_slots([8 x i64], i64 %lhs, i64 %rhs, i64* %addr) {
; CHECK: ADJCALLSTACKDOWN 24, 0, implicit-def $sp, implicit $sp
; CHECK: [[SP:%[0-9]+]]:_(p0) = COPY $sp
; CHECK: [[C42_OFFS:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; CHECK: [[C42_LOC:%[0-9]+]]:_(p0) = G_GEP [[SP]], [[C42_OFFS]](s64)
; CHECK: [[C42_LOC:%[0-9]+]]:_(p0) = G_PTR_ADD [[SP]], [[C42_OFFS]](s64)
; CHECK: G_STORE [[C42]](s64), [[C42_LOC]](p0) :: (store 8 into stack, align 1)
; CHECK: [[SP:%[0-9]+]]:_(p0) = COPY $sp
; CHECK: [[C12_OFFS:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
; CHECK: [[C12_LOC:%[0-9]+]]:_(p0) = G_GEP [[SP]], [[C12_OFFS]](s64)
; CHECK: [[C12_LOC:%[0-9]+]]:_(p0) = G_PTR_ADD [[SP]], [[C12_OFFS]](s64)
; CHECK: G_STORE [[C12]](s64), [[C12_LOC]](p0) :: (store 8 into stack + 8, align 1)
; CHECK: [[SP:%[0-9]+]]:_(p0) = COPY $sp
; CHECK: [[PTR_OFFS:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
; CHECK: [[PTR_LOC:%[0-9]+]]:_(p0) = G_GEP [[SP]], [[PTR_OFFS]](s64)
; CHECK: [[PTR_LOC:%[0-9]+]]:_(p0) = G_PTR_ADD [[SP]], [[PTR_OFFS]](s64)
; CHECK: G_STORE [[PTR]](p0), [[PTR_LOC]](p0) :: (store 8 into stack + 16, align 1)
; CHECK: BL @test_stack_slots
; CHECK: ADJCALLSTACKUP 24, 0, implicit-def $sp, implicit $sp
@ -249,16 +249,16 @@ define void @take_128bit_struct([2 x i64]* %ptr, [2 x i64] %in) {
; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[LO:%[0-9]+]]:_(s64) = G_LOAD %0(p0) :: (load 8 from %ir.ptr)
; CHECK: [[CST:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_GEP [[ADDR]], [[CST]](s64)
; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_PTR_ADD [[ADDR]], [[CST]](s64)
; CHECK: [[HI:%[0-9]+]]:_(s64) = G_LOAD [[GEP]](p0) :: (load 8 from %ir.ptr + 8)
; CHECK: [[SP:%[0-9]+]]:_(p0) = COPY $sp
; CHECK: [[CST2:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; CHECK: [[GEP2:%[0-9]+]]:_(p0) = G_GEP [[SP]], [[CST2]](s64)
; CHECK: [[GEP2:%[0-9]+]]:_(p0) = G_PTR_ADD [[SP]], [[CST2]](s64)
; CHECK: G_STORE [[LO]](s64), [[GEP2]](p0) :: (store 8 into stack, align 1)
; CHECK: [[SP:%[0-9]+]]:_(p0) = COPY $sp
; CHECK: [[CST3:%[0-9]+]]:_(s64) = COPY [[CST]]
; CHECK: [[GEP3:%[0-9]+]]:_(p0) = G_GEP [[SP]], [[CST3]](s64)
; CHECK: [[GEP3:%[0-9]+]]:_(p0) = G_PTR_ADD [[SP]], [[CST3]](s64)
; CHECK: G_STORE [[HI]](s64), [[GEP3]](p0) :: (store 8 into stack + 8, align 1)
define void @test_split_struct([2 x i64]* %ptr) {
%struct = load [2 x i64], [2 x i64]* %ptr

View File

@ -4,7 +4,7 @@ define i8* @test_simple_load_pre(i8* %ptr) {
; CHECK-LABEL: name: test_simple_load_pre
; CHECK: [[BASE:%.*]]:_(p0) = COPY $x0
; CHECK: [[OFFSET:%.*]]:_(s64) = G_CONSTANT i64 42
; CHECK-NOT: G_GEP
; CHECK-NOT: G_PTR_ADD
; CHECK: {{%.*}}:_(s8), [[NEXT:%.*]]:_(p0) = G_INDEXED_LOAD [[BASE]], [[OFFSET]](s64), 1
; CHECK: $x0 = COPY [[NEXT]](p0)
@ -17,7 +17,7 @@ define void @test_load_multiple_dominated(i8* %ptr, i1 %tst, i1 %tst2) {
; CHECK-LABEL: name: test_load_multiple_dominated
; CHECK: [[BASE:%.*]]:_(p0) = COPY $x0
; CHECK: [[OFFSET:%.*]]:_(s64) = G_CONSTANT i64 42
; CHECK-NOT: G_GEP
; CHECK-NOT: G_PTR_ADD
; CHECK: {{%.*}}:_(s8), [[NEXT:%.*]]:_(p0) = G_INDEXED_LOAD [[BASE]], [[OFFSET]](s64), 1
; CHECK: $x0 = COPY [[NEXT]](p0)
%next = getelementptr i8, i8* %ptr, i32 42
@ -44,7 +44,7 @@ define i8* @test_simple_store_pre(i8* %ptr) {
; CHECK: [[BASE:%.*]]:_(p0) = COPY $x0
; CHECK: [[VAL:%.*]]:_(s8) = G_CONSTANT i8 0
; CHECK: [[OFFSET:%.*]]:_(s64) = G_CONSTANT i64 42
; CHECK-NOT: G_GEP
; CHECK-NOT: G_PTR_ADD
; CHECK: [[NEXT:%.*]]:_(p0) = G_INDEXED_STORE [[VAL]](s8), [[BASE]], [[OFFSET]](s64), 1
; CHECK: $x0 = COPY [[NEXT]](p0)
@ -57,7 +57,7 @@ define i8* @test_simple_store_pre(i8* %ptr) {
; would produce the value too late but only by one instruction.
define i64** @test_store_pre_val_loop(i64** %ptr) {
; CHECK-LABEL: name: test_store_pre_val_loop
; CHECK: G_GEP
; CHECK: G_PTR_ADD
; CHECK: G_STORE %
%next = getelementptr i64*, i64** %ptr, i32 42
@ -69,7 +69,7 @@ define i64** @test_store_pre_val_loop(i64** %ptr) {
; Potentially pre-indexed address is used between GEP computing it and load.
define i8* @test_load_pre_before(i8* %ptr) {
; CHECK-LABEL: name: test_load_pre_before
; CHECK: G_GEP
; CHECK: G_PTR_ADD
; CHECK: BL @bar
; CHECK: G_LOAD %
@ -83,7 +83,7 @@ define i8* @test_load_pre_before(i8* %ptr) {
; bad as the original GEP.
define i8* @test_alloca_load_pre() {
; CHECK-LABEL: name: test_alloca_load_pre
; CHECK: G_GEP
; CHECK: G_PTR_ADD
; CHECK: G_LOAD %
%ptr = alloca i8, i32 128
@ -95,7 +95,7 @@ define i8* @test_alloca_load_pre() {
; Load does not dominate use of its address. No indexing.
define i8* @test_pre_nodom(i8* %in, i1 %tst) {
; CHECK-LABEL: name: test_pre_nodom
; CHECK: G_GEP
; CHECK: G_PTR_ADD
; CHECK: G_LOAD %
%next = getelementptr i8, i8* %in, i32 16
@ -115,7 +115,7 @@ define i8* @test_simple_load_post(i8* %ptr) {
; CHECK-LABEL: name: test_simple_load_post
; CHECK: [[BASE:%.*]]:_(p0) = COPY $x0
; CHECK: [[OFFSET:%.*]]:_(s64) = G_CONSTANT i64 42
; CHECK-NOT: G_GEP
; CHECK-NOT: G_PTR_ADD
; CHECK: {{%.*}}:_(s8), [[NEXT:%.*]]:_(p0) = G_INDEXED_LOAD [[BASE]], [[OFFSET]](s64), 0
; CHECK: $x0 = COPY [[NEXT]](p0)
@ -154,7 +154,7 @@ define i8* @test_load_post_keep_looking(i8* %ptr) {
; Base is frame index. Using indexing would need copy anyway.
define i8* @test_load_post_alloca() {
; CHECK-LABEL: name: test_load_post_alloca
; CHECK: G_GEP
; CHECK: G_PTR_ADD
; CHECK: G_LOAD %
%ptr = alloca i8, i32 128
@ -168,7 +168,7 @@ define i8* @test_load_post_gep_offset_after(i8* %ptr) {
; CHECK-LABEL: name: test_load_post_gep_offset_after
; CHECK: G_LOAD %
; CHECK: BL @get_offset
; CHECK: G_GEP
; CHECK: G_PTR_ADD
load volatile i8, i8* %ptr
%offset = call i64 @get_offset()

View File

@ -99,24 +99,24 @@ body: |
; CHECK: [[LOAD:%[0-9]+]]:_(s128) = G_LOAD [[COPY1]](p0) :: (load 16 from %ir.1, align 4)
; CHECK: G_STORE [[LOAD]](s128), [[COPY]](p0) :: (store 16 into %ir.0, align 4)
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_GEP [[COPY1]], [[C]](s64)
; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C]](s64)
; CHECK: [[LOAD1:%[0-9]+]]:_(s128) = G_LOAD [[GEP]](p0) :: (load 16 from %ir.1 + 16, align 4)
; CHECK: [[GEP1:%[0-9]+]]:_(p0) = G_GEP [[COPY]], [[C]](s64)
; CHECK: [[GEP1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
; CHECK: G_STORE [[LOAD1]](s128), [[GEP1]](p0) :: (store 16 into %ir.0 + 16, align 4)
; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
; CHECK: [[GEP2:%[0-9]+]]:_(p0) = G_GEP [[COPY1]], [[C1]](s64)
; CHECK: [[GEP2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C1]](s64)
; CHECK: [[LOAD2:%[0-9]+]]:_(s128) = G_LOAD [[GEP2]](p0) :: (load 16 from %ir.1 + 32, align 4)
; CHECK: [[GEP3:%[0-9]+]]:_(p0) = G_GEP [[COPY]], [[C1]](s64)
; CHECK: [[GEP3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
; CHECK: G_STORE [[LOAD2]](s128), [[GEP3]](p0) :: (store 16 into %ir.0 + 32, align 4)
; CHECK: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 48
; CHECK: [[GEP4:%[0-9]+]]:_(p0) = G_GEP [[COPY1]], [[C2]](s64)
; CHECK: [[GEP4:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C2]](s64)
; CHECK: [[LOAD3:%[0-9]+]]:_(s128) = G_LOAD [[GEP4]](p0) :: (load 16 from %ir.1 + 48, align 4)
; CHECK: [[GEP5:%[0-9]+]]:_(p0) = G_GEP [[COPY]], [[C2]](s64)
; CHECK: [[GEP5:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
; CHECK: G_STORE [[LOAD3]](s128), [[GEP5]](p0) :: (store 16 into %ir.0 + 48, align 4)
; CHECK: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 64
; CHECK: [[GEP6:%[0-9]+]]:_(p0) = G_GEP [[COPY1]], [[C3]](s64)
; CHECK: [[GEP6:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C3]](s64)
; CHECK: [[LOAD4:%[0-9]+]]:_(s64) = G_LOAD [[GEP6]](p0) :: (load 8 from %ir.1 + 64, align 4)
; CHECK: [[GEP7:%[0-9]+]]:_(p0) = G_GEP [[COPY]], [[C3]](s64)
; CHECK: [[GEP7:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64)
; CHECK: G_STORE [[LOAD4]](s64), [[GEP7]](p0) :: (store 8 into %ir.0 + 64, align 4)
; CHECK: RET_ReallyLR
%0:_(p0) = COPY $x0
@ -146,24 +146,24 @@ body: |
; CHECK: [[LOAD:%[0-9]+]]:_(s128) = G_LOAD [[COPY1]](p0) :: (load 16 from %ir.1, align 4)
; CHECK: G_STORE [[LOAD]](s128), [[COPY]](p0) :: (store 16 into %ir.0, align 4)
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_GEP [[COPY1]], [[C]](s64)
; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C]](s64)
; CHECK: [[LOAD1:%[0-9]+]]:_(s128) = G_LOAD [[GEP]](p0) :: (load 16 from %ir.1 + 16, align 4)
; CHECK: [[GEP1:%[0-9]+]]:_(p0) = G_GEP [[COPY]], [[C]](s64)
; CHECK: [[GEP1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
; CHECK: G_STORE [[LOAD1]](s128), [[GEP1]](p0) :: (store 16 into %ir.0 + 16, align 4)
; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
; CHECK: [[GEP2:%[0-9]+]]:_(p0) = G_GEP [[COPY1]], [[C1]](s64)
; CHECK: [[GEP2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C1]](s64)
; CHECK: [[LOAD2:%[0-9]+]]:_(s128) = G_LOAD [[GEP2]](p0) :: (load 16 from %ir.1 + 32, align 4)
; CHECK: [[GEP3:%[0-9]+]]:_(p0) = G_GEP [[COPY]], [[C1]](s64)
; CHECK: [[GEP3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
; CHECK: G_STORE [[LOAD2]](s128), [[GEP3]](p0) :: (store 16 into %ir.0 + 32, align 4)
; CHECK: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 48
; CHECK: [[GEP4:%[0-9]+]]:_(p0) = G_GEP [[COPY1]], [[C2]](s64)
; CHECK: [[GEP4:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C2]](s64)
; CHECK: [[LOAD3:%[0-9]+]]:_(s128) = G_LOAD [[GEP4]](p0) :: (load 16 from %ir.1 + 48, align 4)
; CHECK: [[GEP5:%[0-9]+]]:_(p0) = G_GEP [[COPY]], [[C2]](s64)
; CHECK: [[GEP5:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
; CHECK: G_STORE [[LOAD3]](s128), [[GEP5]](p0) :: (store 16 into %ir.0 + 48, align 4)
; CHECK: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 64
; CHECK: [[GEP6:%[0-9]+]]:_(p0) = G_GEP [[COPY1]], [[C3]](s64)
; CHECK: [[GEP6:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C3]](s64)
; CHECK: [[LOAD4:%[0-9]+]]:_(s64) = G_LOAD [[GEP6]](p0) :: (load 8 from %ir.1 + 64, align 4)
; CHECK: [[GEP7:%[0-9]+]]:_(p0) = G_GEP [[COPY]], [[C3]](s64)
; CHECK: [[GEP7:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64)
; CHECK: G_STORE [[LOAD4]](s64), [[GEP7]](p0) :: (store 8 into %ir.0 + 64, align 4)
; CHECK: RET_ReallyLR
%0:_(p0) = COPY $x0
@ -220,44 +220,44 @@ body: |
; CHECK: [[LOAD:%[0-9]+]]:_(s128) = G_LOAD [[COPY1]](p0) :: (load 16 from %ir.1, align 4)
; CHECK: G_STORE [[LOAD]](s128), [[COPY]](p0) :: (store 16 into %ir.0, align 4)
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_GEP [[COPY1]], [[C]](s64)
; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C]](s64)
; CHECK: [[LOAD1:%[0-9]+]]:_(s128) = G_LOAD [[GEP]](p0) :: (load 16 from %ir.1 + 16, align 4)
; CHECK: [[GEP1:%[0-9]+]]:_(p0) = G_GEP [[COPY]], [[C]](s64)
; CHECK: [[GEP1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
; CHECK: G_STORE [[LOAD1]](s128), [[GEP1]](p0) :: (store 16 into %ir.0 + 16, align 4)
; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
; CHECK: [[GEP2:%[0-9]+]]:_(p0) = G_GEP [[COPY1]], [[C1]](s64)
; CHECK: [[GEP2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C1]](s64)
; CHECK: [[LOAD2:%[0-9]+]]:_(s128) = G_LOAD [[GEP2]](p0) :: (load 16 from %ir.1 + 32, align 4)
; CHECK: [[GEP3:%[0-9]+]]:_(p0) = G_GEP [[COPY]], [[C1]](s64)
; CHECK: [[GEP3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
; CHECK: G_STORE [[LOAD2]](s128), [[GEP3]](p0) :: (store 16 into %ir.0 + 32, align 4)
; CHECK: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 48
; CHECK: [[GEP4:%[0-9]+]]:_(p0) = G_GEP [[COPY1]], [[C2]](s64)
; CHECK: [[GEP4:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C2]](s64)
; CHECK: [[LOAD3:%[0-9]+]]:_(s128) = G_LOAD [[GEP4]](p0) :: (load 16 from %ir.1 + 48, align 4)
; CHECK: [[GEP5:%[0-9]+]]:_(p0) = G_GEP [[COPY]], [[C2]](s64)
; CHECK: [[GEP5:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
; CHECK: G_STORE [[LOAD3]](s128), [[GEP5]](p0) :: (store 16 into %ir.0 + 48, align 4)
; CHECK: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 64
; CHECK: [[GEP6:%[0-9]+]]:_(p0) = G_GEP [[COPY1]], [[C3]](s64)
; CHECK: [[GEP6:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C3]](s64)
; CHECK: [[LOAD4:%[0-9]+]]:_(s128) = G_LOAD [[GEP6]](p0) :: (load 16 from %ir.1 + 64, align 4)
; CHECK: [[GEP7:%[0-9]+]]:_(p0) = G_GEP [[COPY]], [[C3]](s64)
; CHECK: [[GEP7:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64)
; CHECK: G_STORE [[LOAD4]](s128), [[GEP7]](p0) :: (store 16 into %ir.0 + 64, align 4)
; CHECK: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 80
; CHECK: [[GEP8:%[0-9]+]]:_(p0) = G_GEP [[COPY1]], [[C4]](s64)
; CHECK: [[GEP8:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C4]](s64)
; CHECK: [[LOAD5:%[0-9]+]]:_(s128) = G_LOAD [[GEP8]](p0) :: (load 16 from %ir.1 + 80, align 4)
; CHECK: [[GEP9:%[0-9]+]]:_(p0) = G_GEP [[COPY]], [[C4]](s64)
; CHECK: [[GEP9:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64)
; CHECK: G_STORE [[LOAD5]](s128), [[GEP9]](p0) :: (store 16 into %ir.0 + 80, align 4)
; CHECK: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 96
; CHECK: [[GEP10:%[0-9]+]]:_(p0) = G_GEP [[COPY1]], [[C5]](s64)
; CHECK: [[GEP10:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C5]](s64)
; CHECK: [[LOAD6:%[0-9]+]]:_(s128) = G_LOAD [[GEP10]](p0) :: (load 16 from %ir.1 + 96, align 4)
; CHECK: [[GEP11:%[0-9]+]]:_(p0) = G_GEP [[COPY]], [[C5]](s64)
; CHECK: [[GEP11:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C5]](s64)
; CHECK: G_STORE [[LOAD6]](s128), [[GEP11]](p0) :: (store 16 into %ir.0 + 96, align 4)
; CHECK: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 112
; CHECK: [[GEP12:%[0-9]+]]:_(p0) = G_GEP [[COPY1]], [[C6]](s64)
; CHECK: [[GEP12:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C6]](s64)
; CHECK: [[LOAD7:%[0-9]+]]:_(s128) = G_LOAD [[GEP12]](p0) :: (load 16 from %ir.1 + 112, align 4)
; CHECK: [[GEP13:%[0-9]+]]:_(p0) = G_GEP [[COPY]], [[C6]](s64)
; CHECK: [[GEP13:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C6]](s64)
; CHECK: G_STORE [[LOAD7]](s128), [[GEP13]](p0) :: (store 16 into %ir.0 + 112, align 4)
; CHECK: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 127
; CHECK: [[GEP14:%[0-9]+]]:_(p0) = G_GEP [[COPY1]], [[C7]](s64)
; CHECK: [[GEP14:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C7]](s64)
; CHECK: [[LOAD8:%[0-9]+]]:_(s128) = G_LOAD [[GEP14]](p0) :: (load 16 from %ir.1 + 127, align 4)
; CHECK: [[GEP15:%[0-9]+]]:_(p0) = G_GEP [[COPY]], [[C7]](s64)
; CHECK: [[GEP15:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C7]](s64)
; CHECK: G_STORE [[LOAD8]](s128), [[GEP15]](p0) :: (store 16 into %ir.0 + 127, align 4)
; CHECK: RET_ReallyLR
%0:_(p0) = COPY $x0

View File

@ -78,17 +78,17 @@ body: |
; CHECK: [[COPY1:%[0-9]+]]:_(p0) = COPY $x1
; CHECK: [[LOAD:%[0-9]+]]:_(s128) = G_LOAD [[COPY1]](p0) :: (load 16 from %ir.1, align 4)
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_GEP [[COPY1]], [[C]](s64)
; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C]](s64)
; CHECK: [[LOAD1:%[0-9]+]]:_(s128) = G_LOAD [[GEP]](p0) :: (load 16 from %ir.1 + 16, align 4)
; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
; CHECK: [[GEP1:%[0-9]+]]:_(p0) = G_GEP [[COPY1]], [[C1]](s64)
; CHECK: [[GEP1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C1]](s64)
; CHECK: [[LOAD2:%[0-9]+]]:_(s128) = G_LOAD [[GEP1]](p0) :: (load 16 from %ir.1 + 32, align 4)
; CHECK: G_STORE [[LOAD]](s128), [[COPY]](p0) :: (store 16 into %ir.0, align 4)
; CHECK: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
; CHECK: [[GEP2:%[0-9]+]]:_(p0) = G_GEP [[COPY]], [[C2]](s64)
; CHECK: [[GEP2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
; CHECK: G_STORE [[LOAD1]](s128), [[GEP2]](p0) :: (store 16 into %ir.0 + 16, align 4)
; CHECK: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
; CHECK: [[GEP3:%[0-9]+]]:_(p0) = G_GEP [[COPY]], [[C3]](s64)
; CHECK: [[GEP3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64)
; CHECK: G_STORE [[LOAD2]](s128), [[GEP3]](p0) :: (store 16 into %ir.0 + 32, align 4)
; CHECK: RET_ReallyLR
%0:_(p0) = COPY $x0
@ -134,23 +134,23 @@ body: |
; CHECK: [[COPY1:%[0-9]+]]:_(p0) = COPY $x1
; CHECK: [[LOAD:%[0-9]+]]:_(s128) = G_LOAD [[COPY1]](p0) :: (load 16 from %ir.1, align 4)
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_GEP [[COPY1]], [[C]](s64)
; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C]](s64)
; CHECK: [[LOAD1:%[0-9]+]]:_(s128) = G_LOAD [[GEP]](p0) :: (load 16 from %ir.1 + 16, align 4)
; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
; CHECK: [[GEP1:%[0-9]+]]:_(p0) = G_GEP [[COPY1]], [[C1]](s64)
; CHECK: [[GEP1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C1]](s64)
; CHECK: [[LOAD2:%[0-9]+]]:_(s128) = G_LOAD [[GEP1]](p0) :: (load 16 from %ir.1 + 32, align 4)
; CHECK: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 48
; CHECK: [[GEP2:%[0-9]+]]:_(p0) = G_GEP [[COPY1]], [[C2]](s64)
; CHECK: [[GEP2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C2]](s64)
; CHECK: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[GEP2]](p0) :: (load 4 from %ir.1 + 48)
; CHECK: G_STORE [[LOAD]](s128), [[COPY]](p0) :: (store 16 into %ir.0, align 4)
; CHECK: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
; CHECK: [[GEP3:%[0-9]+]]:_(p0) = G_GEP [[COPY]], [[C3]](s64)
; CHECK: [[GEP3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64)
; CHECK: G_STORE [[LOAD1]](s128), [[GEP3]](p0) :: (store 16 into %ir.0 + 16, align 4)
; CHECK: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
; CHECK: [[GEP4:%[0-9]+]]:_(p0) = G_GEP [[COPY]], [[C4]](s64)
; CHECK: [[GEP4:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64)
; CHECK: G_STORE [[LOAD2]](s128), [[GEP4]](p0) :: (store 16 into %ir.0 + 32, align 4)
; CHECK: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 48
; CHECK: [[GEP5:%[0-9]+]]:_(p0) = G_GEP [[COPY]], [[C5]](s64)
; CHECK: [[GEP5:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C5]](s64)
; CHECK: G_STORE [[LOAD3]](s32), [[GEP5]](p0) :: (store 4 into %ir.0 + 48)
; CHECK: RET_ReallyLR
%0:_(p0) = COPY $x0

View File

@ -83,7 +83,7 @@ body: |
; CHECK: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[ZEXT]], [[C]]
; CHECK: G_STORE [[MUL]](s64), [[COPY]](p0) :: (store 8 into %ir.dst, align 1)
; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_GEP [[COPY]], [[C1]](s64)
; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
; CHECK: G_STORE [[MUL]](s64), [[GEP]](p0) :: (store 8 into %ir.dst + 8, align 1)
; CHECK: RET_ReallyLR
%0:_(p0) = COPY $x0
@ -108,7 +108,7 @@ body: |
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4629771061636907072
; CHECK: G_STORE [[C]](s64), [[COPY]](p0) :: (store 8 into %ir.dst, align 1)
; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_GEP [[COPY]], [[C1]](s64)
; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
; CHECK: G_STORE [[C]](s64), [[GEP]](p0) :: (store 8 into %ir.dst + 8, align 1)
; CHECK: RET_ReallyLR
%0:_(p0) = COPY $x0
@ -132,11 +132,11 @@ body: |
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4629771061636907072
; CHECK: G_STORE [[C]](s64), [[COPY]](p0) :: (store 8 into %ir.dst, align 1)
; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_GEP [[COPY]], [[C1]](s64)
; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
; CHECK: G_STORE [[C]](s64), [[GEP]](p0) :: (store 8 into %ir.dst + 8, align 1)
; CHECK: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[C]](s64)
; CHECK: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
; CHECK: [[GEP1:%[0-9]+]]:_(p0) = G_GEP [[COPY]], [[C2]](s64)
; CHECK: [[GEP1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
; CHECK: G_STORE [[TRUNC]](s16), [[GEP1]](p0) :: (store 2 into %ir.dst + 16, align 1)
; CHECK: RET_ReallyLR
%0:_(p0) = COPY $x0

View File

@ -45,9 +45,9 @@ body: |
; CHECK: [[LOAD:%[0-9]+]]:_(s128) = G_LOAD [[COPY1]](p0) :: (load 16 from %ir.1, align 4)
; CHECK: G_STORE [[LOAD]](s128), [[COPY]](p0) :: (store 16 into %ir.0, align 4)
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_GEP [[COPY1]], [[C]](s64)
; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C]](s64)
; CHECK: [[LOAD1:%[0-9]+]]:_(s128) = G_LOAD [[GEP]](p0) :: (load 16 from %ir.1 + 16, align 4)
; CHECK: [[GEP1:%[0-9]+]]:_(p0) = G_GEP [[COPY]], [[C]](s64)
; CHECK: [[GEP1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
; CHECK: G_STORE [[LOAD1]](s128), [[GEP1]](p0) :: (store 16 into %ir.0 + 16, align 4)
; CHECK: RET_ReallyLR
%0:_(p0) = COPY $x0

View File

@ -68,13 +68,13 @@ continue:
; CHECK: [[SP:%[0-9]+]]:_(p0) = COPY $sp
; CHECK: [[OFFSET:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; CHECK: [[SLOT:%[0-9]+]]:_(p0) = G_GEP [[SP]], [[OFFSET]](s64)
; CHECK: [[SLOT:%[0-9]+]]:_(p0) = G_PTR_ADD [[SP]], [[OFFSET]](s64)
; CHECK: [[ANSWER_EXT:%[0-9]+]]:_(s64) = G_ANYEXT [[ANSWER]]
; CHECK: G_STORE [[ANSWER_EXT]](s64), [[SLOT]]
; CHECK: [[SP:%[0-9]+]]:_(p0) = COPY $sp
; CHECK: [[OFFSET:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
; CHECK: [[SLOT:%[0-9]+]]:_(p0) = G_GEP [[SP]], [[OFFSET]](s64)
; CHECK: [[SLOT:%[0-9]+]]:_(p0) = G_PTR_ADD [[SP]], [[OFFSET]](s64)
; CHECK: G_STORE [[ONE]](s32), [[SLOT]]
; CHECK: BL @printf

View File

@ -26,7 +26,7 @@ body: |
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 2 from %ir.ptr, align 4)
; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_GEP [[COPY]], [[C1]](s64)
; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
; CHECK: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[GEP]](p0) :: (load 1 from %ir.ptr + 2, align 4)
; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C2]](s32)
@ -34,7 +34,7 @@ body: |
; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY [[OR]](s32)
; CHECK: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
; CHECK: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C3]](s64)
; CHECK: [[GEP1:%[0-9]+]]:_(p0) = G_GEP [[COPY1]], [[C1]](s64)
; CHECK: [[GEP1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C1]](s64)
; CHECK: G_STORE [[COPY2]](s32), [[COPY1]](p0) :: (store 2 into %ir.ptr2, align 4)
; CHECK: G_STORE [[LSHR]](s32), [[GEP1]](p0) :: (store 1 into %ir.ptr2 + 2, align 4)
; CHECK: $w0 = COPY [[C]](s32)

View File

@ -54,7 +54,7 @@ body: |
; CHECK: bb.2.bb3:
; CHECK: successors: %bb.3(0x40000000), %bb.1(0x40000000)
; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_GEP [[INTTOPTR]], [[C1]](s64)
; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_PTR_ADD [[INTTOPTR]], [[C1]](s64)
; CHECK: [[LOAD:%[0-9]+]]:_(s16) = G_LOAD [[PHI]](p0) :: (load 2 from %ir.lsr.iv)
; CHECK: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[LOAD]](s16)
; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
@ -64,7 +64,7 @@ body: |
; CHECK: [[ZEXT1:%[0-9]+]]:_(s32) = G_ZEXT [[LOAD1]](s16)
; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; CHECK: [[ICMP1:%[0-9]+]]:_(s32) = G_ICMP intpred(eq), [[ZEXT1]](s32), [[COPY]]
; CHECK: [[GEP1:%[0-9]+]]:_(p0) = G_GEP [[PHI]], [[C1]](s64)
; CHECK: [[GEP1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PHI]], [[C1]](s64)
; CHECK: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[ICMP1]](s32)
; CHECK: G_BRCOND [[TRUNC1]](s1), %bb.3
; CHECK: G_BR %bb.1
@ -84,12 +84,12 @@ body: |
bb.3.bb3:
%4:_(s64) = G_CONSTANT i64 4
%5:_(p0) = G_GEP %2, %4(s64)
%5:_(p0) = G_PTR_ADD %2, %4(s64)
%6:_(s16) = G_LOAD %0(p0) :: (load 2 from %ir.lsr.iv)
%8:_(s1) = G_ICMP intpred(eq), %6(s16), %7
%9:_(s16) = G_LOAD %5(p0) :: (load 2 from %ir.tmp5)
%10:_(s1) = G_ICMP intpred(eq), %9(s16), %7
%11:_(p0) = G_GEP %0, %4(s64)
%11:_(p0) = G_PTR_ADD %0, %4(s64)
G_BRCOND %8(s1), %bb.4
G_BR %bb.2

View File

@ -11,12 +11,12 @@ body: |
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 56
; CHECK: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY2]], [[C]](s64)
; CHECK: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[SHL]], [[C]](s64)
; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_GEP [[COPY]], [[ASHR]](s64)
; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[ASHR]](s64)
; CHECK: $x0 = COPY [[GEP]](p0)
%0:_(p0) = COPY $x0
%1:_(s64) = COPY $x1
%2:_(s8) = G_TRUNC %1(s64)
%3:_(p0) = G_GEP %0, %2(s8)
%3:_(p0) = G_PTR_ADD %0, %2(s8)
$x0 = COPY %3(p0)
...

View File

@ -15,16 +15,16 @@ body: |
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[COPY]](p0) :: (load 8)
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_GEP [[LOAD]], [[C]](s64)
; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_PTR_ADD [[LOAD]], [[C]](s64)
; CHECK: G_STORE [[GEP]](p0), [[COPY]](p0) :: (store 8)
; CHECK: [[LOAD1:%[0-9]+]]:_(p0) = G_LOAD [[COPY]](p0) :: (load 8)
; CHECK: [[GEP1:%[0-9]+]]:_(p0) = G_GEP [[LOAD1]], [[C]](s64)
; CHECK: [[GEP1:%[0-9]+]]:_(p0) = G_PTR_ADD [[LOAD1]], [[C]](s64)
; CHECK: G_STORE [[GEP1]](p0), [[COPY]](p0) :: (store 8)
; CHECK: [[LOAD2:%[0-9]+]]:_(p0) = G_LOAD [[COPY]](p0) :: (load 8)
; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 15
; CHECK: [[GEP2:%[0-9]+]]:_(p0) = G_GEP [[LOAD2]], [[C1]](s64)
; CHECK: [[GEP2:%[0-9]+]]:_(p0) = G_PTR_ADD [[LOAD2]], [[C1]](s64)
; CHECK: [[PTR_MASK:%[0-9]+]]:_(p0) = G_PTR_MASK [[GEP2]], 4
; CHECK: [[GEP3:%[0-9]+]]:_(p0) = G_GEP [[PTR_MASK]], [[C]](s64)
; CHECK: [[GEP3:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_MASK]], [[C]](s64)
; CHECK: G_STORE [[GEP3]](p0), [[COPY]](p0) :: (store 8)
%0:_(p0) = COPY $x0

View File

@ -410,7 +410,7 @@
# DEBUG-NEXT: G_FMAXIMUM (opcode {{[0-9]+}}): 1 type index
# DEBUG: .. type index coverage check SKIPPED: no rules defined
# DEBUG-NEXT: .. imm index coverage check SKIPPED: no rules defined
# DEBUG-NEXT: G_GEP (opcode {{[0-9]+}}): 2 type indices, 0 imm indices
# DEBUG-NEXT: G_PTR_ADD (opcode {{[0-9]+}}): 2 type indices, 0 imm indices
# DEBUG-NEXT: .. the first uncovered type index: 2, OK
# DEBUG-NEXT: .. the first uncovered imm index: 0, OK
# DEBUG-NEXT: G_PTR_MASK (opcode {{[0-9]+}}): 1 type index, 0 imm indices

View File

@ -47,7 +47,7 @@ body: |
; CHECK: RET_ReallyLR implicit $x0
%0:gpr(p0) = COPY $x0
%1:gpr(s64) = COPY $x1
%2:gpr(p0) = G_GEP %0, %1
%2:gpr(p0) = G_PTR_ADD %0, %1
%4:gpr(s64) = G_LOAD %2(p0) :: (load 8 from %ir.addr)
$x0 = COPY %4(s64)
RET_ReallyLR implicit $x0
@ -72,7 +72,7 @@ body: |
; CHECK: RET_ReallyLR implicit $d0
%0:gpr(p0) = COPY $d0
%1:gpr(s64) = COPY $x1
%2:gpr(p0) = G_GEP %0, %1
%2:gpr(p0) = G_PTR_ADD %0, %1
%4:fpr(s64) = G_LOAD %2(p0) :: (load 8 from %ir.addr)
$d0 = COPY %4(s64)
RET_ReallyLR implicit $d0
@ -87,7 +87,7 @@ machineFunctionInfo: {}
body: |
bb.0:
liveins: $x0, $x1
; This shouldn't be folded, since we reuse the result of the G_GEP outside
; This shouldn't be folded, since we reuse the result of the G_PTR_ADD outside
; the G_LOAD
; CHECK-LABEL: name: more_than_one_use
; CHECK: liveins: $x0, $x1
@ -101,7 +101,7 @@ body: |
; CHECK: RET_ReallyLR implicit $x0
%0:gpr(p0) = COPY $x0
%1:gpr(s64) = COPY $x1
%2:gpr(p0) = G_GEP %0, %1
%2:gpr(p0) = G_PTR_ADD %0, %1
%4:gpr(s64) = G_LOAD %2(p0) :: (load 8 from %ir.addr)
%5:gpr(s64) = G_PTRTOINT %2
%6:gpr(s64) = G_ADD %5, %4
@ -130,7 +130,7 @@ body: |
%1:gpr(s64) = G_CONSTANT i64 3
%2:gpr(s64) = G_SHL %0, %1(s64)
%3:gpr(p0) = COPY $x1
%4:gpr(p0) = G_GEP %3, %2
%4:gpr(p0) = G_PTR_ADD %3, %2
%5:gpr(s64) = G_LOAD %4(p0) :: (load 8 from %ir.addr)
$x2 = COPY %5(s64)
RET_ReallyLR implicit $x2
@ -157,7 +157,7 @@ body: |
%1:gpr(s64) = G_CONSTANT i64 3
%2:gpr(s64) = G_SHL %0, %1(s64)
%3:gpr(p0) = COPY $x1
%4:gpr(p0) = G_GEP %3, %2
%4:gpr(p0) = G_PTR_ADD %3, %2
%5:fpr(s64) = G_LOAD %4(p0) :: (load 8 from %ir.addr)
$d2 = COPY %5(s64)
RET_ReallyLR implicit $d2
@ -184,7 +184,7 @@ body: |
%1:gpr(s64) = G_CONSTANT i64 8
%2:gpr(s64) = G_MUL %0, %1(s64)
%3:gpr(p0) = COPY $x1
%4:gpr(p0) = G_GEP %3, %2
%4:gpr(p0) = G_PTR_ADD %3, %2
%5:gpr(s64) = G_LOAD %4(p0) :: (load 8 from %ir.addr)
$x2 = COPY %5(s64)
RET_ReallyLR implicit $x2
@ -211,7 +211,7 @@ body: |
%1:gpr(s64) = G_CONSTANT i64 8
%2:gpr(s64) = G_MUL %0, %1(s64)
%3:gpr(p0) = COPY $x1
%4:gpr(p0) = G_GEP %3, %2
%4:gpr(p0) = G_PTR_ADD %3, %2
%5:fpr(s64) = G_LOAD %4(p0) :: (load 8 from %ir.addr)
$d2 = COPY %5(s64)
RET_ReallyLR implicit $d2
@ -238,7 +238,7 @@ body: |
%1:gpr(s64) = G_CONSTANT i64 8
%2:gpr(s64) = G_MUL %1, %0(s64)
%3:gpr(p0) = COPY $x1
%4:gpr(p0) = G_GEP %3, %2
%4:gpr(p0) = G_PTR_ADD %3, %2
%5:gpr(s64) = G_LOAD %4(p0) :: (load 8 from %ir.addr)
$x2 = COPY %5(s64)
RET_ReallyLR implicit $x2
@ -265,7 +265,7 @@ body: |
%1:gpr(s64) = G_CONSTANT i64 8
%2:gpr(s64) = G_MUL %1, %0(s64)
%3:gpr(p0) = COPY $x1
%4:gpr(p0) = G_GEP %3, %2
%4:gpr(p0) = G_PTR_ADD %3, %2
%5:fpr(s64) = G_LOAD %4(p0) :: (load 8 from %ir.addr)
$d2 = COPY %5(s64)
RET_ReallyLR implicit $d2
@ -297,7 +297,7 @@ body: |
%1:gpr(s64) = G_CONSTANT i64 7
%2:gpr(s64) = G_MUL %1, %0(s64)
%3:gpr(p0) = COPY $x1
%4:gpr(p0) = G_GEP %3, %2
%4:gpr(p0) = G_PTR_ADD %3, %2
%5:fpr(s64) = G_LOAD %4(p0) :: (load 8 from %ir.addr)
$d2 = COPY %5(s64)
RET_ReallyLR implicit $d2
@ -329,7 +329,7 @@ body: |
%1:gpr(s64) = G_CONSTANT i64 16
%2:gpr(s64) = G_MUL %1, %0(s64)
%3:gpr(p0) = COPY $x1
%4:gpr(p0) = G_GEP %3, %2
%4:gpr(p0) = G_PTR_ADD %3, %2
%5:fpr(s64) = G_LOAD %4(p0) :: (load 8 from %ir.addr)
$d2 = COPY %5(s64)
RET_ReallyLR implicit $d2
@ -361,7 +361,7 @@ body: |
%1:gpr(s64) = G_CONSTANT i64 3
%2:gpr(s64) = G_SHL %0, %1(s64)
%3:gpr(p0) = COPY $x1
%4:gpr(p0) = G_GEP %3, %2
%4:gpr(p0) = G_PTR_ADD %3, %2
%5:gpr(s64) = G_LOAD %4(p0) :: (load 8 from %ir.addr)
%6:gpr(s64) = G_ADD %2, %1
%7:gpr(s64) = G_ADD %5, %6
@ -398,7 +398,7 @@ body: |
%1:gpr(s64) = G_CONSTANT i64 3
%2:gpr(s64) = G_SHL %0, %1(s64)
%3:gpr(p0) = COPY $x1
%4:gpr(p0) = G_GEP %3, %2
%4:gpr(p0) = G_PTR_ADD %3, %2
%5:gpr(s64) = G_LOAD %4(p0) :: (load 8 from %ir.addr)
%6:gpr(s64) = G_ADD %2, %1
%7:gpr(s64) = G_ADD %5, %6
@ -433,7 +433,7 @@ body: |
%1:gpr(s64) = G_CONSTANT i64 3
%2:gpr(s64) = G_SHL %0, %1(s64)
%3:gpr(p0) = COPY $x1
%4:gpr(p0) = G_GEP %3, %2
%4:gpr(p0) = G_PTR_ADD %3, %2
%5:gpr(s64) = G_LOAD %4(p0) :: (load 8 from %ir.addr)
%6:gpr(s64) = G_LOAD %4(p0) :: (load 8 from %ir.addr)
%7:gpr(s64) = G_ADD %5, %6
@ -468,7 +468,7 @@ body: |
%1:gpr(s64) = G_CONSTANT i64 3
%2:gpr(s64) = G_SHL %0, %1(s64)
%3:gpr(p0) = COPY $x1
%4:gpr(p0) = G_GEP %3, %2
%4:gpr(p0) = G_PTR_ADD %3, %2
%5:gpr(s64) = G_LOAD %4(p0) :: (load 8 from %ir.addr)
%6:gpr(s64) = G_LOAD %4(p0) :: (load 8 from %ir.addr)
%7:gpr(s64) = G_ADD %5, %6
@ -504,7 +504,7 @@ body: |
%1:gpr(s64) = G_CONSTANT i64 3
%2:gpr(s64) = G_SHL %0, %1(s64)
%3:gpr(p0) = COPY $x1
%4:gpr(p0) = G_GEP %3, %2
%4:gpr(p0) = G_PTR_ADD %3, %2
%5:gpr(s64) = G_LOAD %4(p0) :: (load 8 from %ir.addr)
%6:gpr(s64) = G_ADD %2, %1
%7:gpr(s64) = G_ADD %5, %6
@ -532,7 +532,7 @@ body: |
; CHECK: RET_ReallyLR implicit $w2
%0:gpr(p0) = COPY $x0
%1:gpr(s64) = COPY $x1
%2:gpr(p0) = G_GEP %0, %1
%2:gpr(p0) = G_PTR_ADD %0, %1
%4:gpr(s32) = G_LOAD %2(p0) :: (load 4 from %ir.addr)
$w2 = COPY %4(s32)
RET_ReallyLR implicit $w2
@ -556,7 +556,7 @@ body: |
; CHECK: RET_ReallyLR implicit $h2
%0:gpr(p0) = COPY $d0
%1:gpr(s64) = COPY $x1
%2:gpr(p0) = G_GEP %0, %1
%2:gpr(p0) = G_PTR_ADD %0, %1
%4:fpr(s32) = G_LOAD %2(p0) :: (load 4 from %ir.addr)
$s2 = COPY %4(s32)
RET_ReallyLR implicit $h2
@ -580,7 +580,7 @@ body: |
; CHECK: RET_ReallyLR implicit $h2
%0:gpr(p0) = COPY $x0
%1:gpr(s64) = COPY $x1
%2:gpr(p0) = G_GEP %0, %1
%2:gpr(p0) = G_PTR_ADD %0, %1
%4:fpr(s16) = G_LOAD %2(p0) :: (load 2 from %ir.addr)
$h2 = COPY %4(s16)
RET_ReallyLR implicit $h2
@ -604,7 +604,7 @@ body: |
; CHECK: RET_ReallyLR implicit $w2
%0:gpr(p0) = COPY $x0
%1:gpr(s64) = COPY $x1
%2:gpr(p0) = G_GEP %0, %1
%2:gpr(p0) = G_PTR_ADD %0, %1
%4:gpr(s32) = G_LOAD %2(p0) :: (load 1 from %ir.addr)
$w2 = COPY %4(s32)
RET_ReallyLR implicit $w2
@ -628,7 +628,7 @@ body: |
; CHECK: RET_ReallyLR implicit $q0
%0:gpr(p0) = COPY $d0
%1:gpr(s64) = COPY $x1
%2:gpr(p0) = G_GEP %0, %1
%2:gpr(p0) = G_PTR_ADD %0, %1
%4:fpr(<2 x s64>) = G_LOAD %2(p0) :: (load 16 from %ir.addr)
$q0 = COPY %4(<2 x s64>)
RET_ReallyLR implicit $q0

View File

@ -214,7 +214,7 @@ body: |
; CHECK: $x0 = COPY [[LDRXui]]
%0(p0) = COPY $x0
%1(s64) = G_CONSTANT i64 128
%2(p0) = G_GEP %0, %1
%2(p0) = G_PTR_ADD %0, %1
%3(s64) = G_LOAD %2 :: (load 8 from %ir.addr)
$x0 = COPY %3
...
@ -240,7 +240,7 @@ body: |
; CHECK: $w0 = COPY [[LDRWui]]
%0(p0) = COPY $x0
%1(s64) = G_CONSTANT i64 512
%2(p0) = G_GEP %0, %1
%2(p0) = G_PTR_ADD %0, %1
%3(s32) = G_LOAD %2 :: (load 4 from %ir.addr)
$w0 = COPY %3
...
@ -267,7 +267,7 @@ body: |
; CHECK: $w0 = COPY [[COPY1]]
%0(p0) = COPY $x0
%1(s64) = G_CONSTANT i64 64
%2(p0) = G_GEP %0, %1
%2(p0) = G_PTR_ADD %0, %1
%3(s16) = G_LOAD %2 :: (load 2 from %ir.addr)
%4:gpr(s32) = G_ANYEXT %3
$w0 = COPY %4
@ -295,7 +295,7 @@ body: |
; CHECK: $w0 = COPY [[COPY1]]
%0(p0) = COPY $x0
%1(s64) = G_CONSTANT i64 1
%2(p0) = G_GEP %0, %1
%2(p0) = G_PTR_ADD %0, %1
%3(s8) = G_LOAD %2 :: (load 1 from %ir.addr)
%4:gpr(s32) = G_ANYEXT %3
$w0 = COPY %4
@ -410,7 +410,7 @@ body: |
; CHECK: $d0 = COPY [[LDRDui]]
%0(p0) = COPY $x0
%1(s64) = G_CONSTANT i64 8
%2(p0) = G_GEP %0, %1
%2(p0) = G_PTR_ADD %0, %1
%3(s64) = G_LOAD %2 :: (load 8 from %ir.addr)
$d0 = COPY %3
...
@ -436,7 +436,7 @@ body: |
; CHECK: $s0 = COPY [[LDRSui]]
%0(p0) = COPY $x0
%1(s64) = G_CONSTANT i64 16
%2(p0) = G_GEP %0, %1
%2(p0) = G_PTR_ADD %0, %1
%3(s32) = G_LOAD %2 :: (load 4 from %ir.addr)
$s0 = COPY %3
...
@ -462,7 +462,7 @@ body: |
; CHECK: $h0 = COPY [[LDRHui]]
%0(p0) = COPY $x0
%1(s64) = G_CONSTANT i64 64
%2(p0) = G_GEP %0, %1
%2(p0) = G_PTR_ADD %0, %1
%3(s16) = G_LOAD %2 :: (load 2 from %ir.addr)
$h0 = COPY %3
...
@ -488,7 +488,7 @@ body: |
; CHECK: $b0 = COPY [[LDRBui]]
%0(p0) = COPY $x0
%1(s64) = G_CONSTANT i64 32
%2(p0) = G_GEP %0, %1
%2(p0) = G_PTR_ADD %0, %1
%3(s8) = G_LOAD %2 :: (load 1 from %ir.addr)
$b0 = COPY %3
...

View File

@ -222,7 +222,7 @@ body: |
%0(p0) = COPY $x0
%1(s64) = COPY $x1
%2(s64) = G_CONSTANT i64 128
%3(p0) = G_GEP %0, %2
%3(p0) = G_PTR_ADD %0, %2
G_STORE %1, %3 :: (store 8 into %ir.addr)
...
@ -248,7 +248,7 @@ body: |
%0(p0) = COPY $x0
%1(s32) = COPY $w1
%2(s64) = G_CONSTANT i64 512
%3(p0) = G_GEP %0, %2
%3(p0) = G_PTR_ADD %0, %2
G_STORE %1, %3 :: (store 4 into %ir.addr)
...
@ -275,7 +275,7 @@ body: |
%4:gpr(s32) = COPY $w1
%1(s16) = G_TRUNC %4
%2(s64) = G_CONSTANT i64 64
%3(p0) = G_GEP %0, %2
%3(p0) = G_PTR_ADD %0, %2
G_STORE %1, %3 :: (store 2 into %ir.addr)
...
@ -302,7 +302,7 @@ body: |
%4:gpr(s32) = COPY $w1
%1(s8) = G_TRUNC %4
%2(s64) = G_CONSTANT i64 1
%3(p0) = G_GEP %0, %2
%3(p0) = G_PTR_ADD %0, %2
G_STORE %1, %3 :: (store 1 into %ir.addr)
...
@ -374,7 +374,7 @@ body: |
%0(p0) = COPY $x0
%1(s64) = COPY $d1
%2(s64) = G_CONSTANT i64 8
%3(p0) = G_GEP %0, %2
%3(p0) = G_PTR_ADD %0, %2
G_STORE %1, %3 :: (store 8 into %ir.addr)
...
@ -400,7 +400,7 @@ body: |
%0(p0) = COPY $x0
%1(s32) = COPY $s1
%2(s64) = G_CONSTANT i64 8
%3(p0) = G_GEP %0, %2
%3(p0) = G_PTR_ADD %0, %2
G_STORE %1, %3 :: (store 4 into %ir.addr)
...
---

View File

@ -69,7 +69,7 @@ body: |
liveins: $x0
%0(p0) = COPY $x0
%1(s64) = G_CONSTANT i64 42
%2(p0) = G_GEP %0, %1(s64)
%2(p0) = G_PTR_ADD %0, %1(s64)
$x0 = COPY %2(p0)
...
@ -92,7 +92,7 @@ body: |
liveins: $x0, $x1
%0(p0) = COPY $x0
%1(s64) = COPY $x1
%2(p0) = G_GEP %0, %1(s64)
%2(p0) = G_PTR_ADD %0, %1(s64)
$x0 = COPY %2(p0)
...
@ -116,7 +116,7 @@ body: |
liveins: $x0, $x1
%0(p0) = COPY $x0
%1(s64) = G_CONSTANT i64 10000
%2(p0) = G_GEP %0, %1(s64)
%2(p0) = G_PTR_ADD %0, %1(s64)
$x0 = COPY %2(p0)
...

View File

@ -29,7 +29,7 @@ body: |
; CHECK: STRXroX [[COPY2]], [[COPY]], [[COPY1]], 0, 0 :: (store 8 into %ir.addr)
%0:gpr(p0) = COPY $x0
%1:gpr(s64) = COPY $x1
%ptr:gpr(p0) = G_GEP %0, %1
%ptr:gpr(p0) = G_PTR_ADD %0, %1
%3:gpr(s64) = COPY $x2
G_STORE %3, %ptr :: (store 8 into %ir.addr)
...
@ -51,7 +51,7 @@ body: |
; CHECK: STRDroX [[COPY2]], [[COPY]], [[COPY1]], 0, 0 :: (store 8 into %ir.addr)
%0:gpr(p0) = COPY $x0
%1:gpr(s64) = COPY $x1
%ptr:gpr(p0) = G_GEP %0, %1
%ptr:gpr(p0) = G_PTR_ADD %0, %1
%3:fpr(s64) = COPY $d2
G_STORE %3, %ptr :: (store 8 into %ir.addr)
...
@ -73,7 +73,7 @@ body: |
; CHECK: STRWroX [[COPY2]], [[COPY]], [[COPY1]], 0, 0 :: (store 4 into %ir.addr)
%0:gpr(p0) = COPY $x0
%1:gpr(s64) = COPY $x1
%ptr:gpr(p0) = G_GEP %0, %1
%ptr:gpr(p0) = G_PTR_ADD %0, %1
%3:gpr(s32) = COPY $w2
G_STORE %3, %ptr :: (store 4 into %ir.addr)
...
@ -95,7 +95,7 @@ body: |
; CHECK: STRSroX [[COPY2]], [[COPY]], [[COPY1]], 0, 0 :: (store 4 into %ir.addr)
%0:gpr(p0) = COPY $x0
%1:gpr(s64) = COPY $x1
%ptr:gpr(p0) = G_GEP %0, %1
%ptr:gpr(p0) = G_PTR_ADD %0, %1
%3:fpr(s32) = COPY $s2
G_STORE %3, %ptr :: (store 4 into %ir.addr)
...
@ -117,7 +117,7 @@ body: |
; CHECK: STRHroX [[COPY2]], [[COPY]], [[COPY1]], 0, 0 :: (store 2 into %ir.addr)
%0:gpr(p0) = COPY $x0
%1:gpr(s64) = COPY $x1
%ptr:gpr(p0) = G_GEP %0, %1
%ptr:gpr(p0) = G_PTR_ADD %0, %1
%3:fpr(s16) = COPY $h0
G_STORE %3, %ptr :: (store 2 into %ir.addr)
...
@ -139,7 +139,7 @@ body: |
; CHECK: STRQroX [[COPY2]], [[COPY]], [[COPY1]], 0, 0 :: (store 16 into %ir.addr)
%0:gpr(p0) = COPY $x0
%1:gpr(s64) = COPY $x1
%ptr:gpr(p0) = G_GEP %0, %1
%ptr:gpr(p0) = G_PTR_ADD %0, %1
%2:fpr(<2 x s64>) = COPY $q2
G_STORE %2, %ptr :: (store 16 into %ir.addr)
...
@ -163,6 +163,6 @@ body: |
%1:gpr(s64) = G_CONSTANT i64 3
%2:gpr(s64) = G_SHL %0, %1(s64)
%3:gpr(p0) = COPY $x1
%ptr:gpr(p0) = G_GEP %3, %2
%ptr:gpr(p0) = G_PTR_ADD %3, %2
%4:gpr(s64) = COPY $x2
G_STORE %4, %ptr :: (store 8 into %ir.addr)

View File

@ -10,7 +10,7 @@ define i8* @translate_element_size1(i64 %arg) {
; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; CHECK: [[INTTOPTR:%[0-9]+]]:_(p0) = G_INTTOPTR [[C]](s64)
; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_GEP [[INTTOPTR]], [[COPY]](s64)
; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_PTR_ADD [[INTTOPTR]], [[COPY]](s64)
; CHECK: [[COPY1:%[0-9]+]]:_(p0) = COPY [[GEP]](p0)
; CHECK: $x0 = COPY [[COPY1]](p0)
; CHECK: RET_ReallyLR implicit $x0
@ -25,7 +25,7 @@ define %type* @first_offset_const(%type* %addr) {
; CHECK: liveins: $x0
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_GEP [[COPY]], [[C]](s64)
; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
; CHECK: $x0 = COPY [[GEP]](p0)
; CHECK: RET_ReallyLR implicit $x0
%res = getelementptr %type, %type* %addr, i32 1
@ -54,7 +54,7 @@ define %type* @first_offset_variable(%type* %addr, i64 %idx) {
; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
; CHECK: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[C]], [[COPY1]]
; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_GEP [[COPY]], [[MUL]](s64)
; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[MUL]](s64)
; CHECK: [[COPY2:%[0-9]+]]:_(p0) = COPY [[GEP]](p0)
; CHECK: $x0 = COPY [[COPY2]](p0)
; CHECK: RET_ReallyLR implicit $x0
@ -72,7 +72,7 @@ define %type* @first_offset_ext(%type* %addr, i32 %idx) {
; CHECK: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[COPY1]](s32)
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
; CHECK: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[C]], [[SEXT]]
; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_GEP [[COPY]], [[MUL]](s64)
; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[MUL]](s64)
; CHECK: [[COPY2:%[0-9]+]]:_(p0) = COPY [[GEP]](p0)
; CHECK: $x0 = COPY [[COPY2]](p0)
; CHECK: RET_ReallyLR implicit $x0
@ -89,10 +89,10 @@ define i32* @const_then_var(%type1* %addr, i64 %idx) {
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 272
; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_GEP [[COPY]], [[C]](s64)
; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
; CHECK: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[C1]], [[COPY1]]
; CHECK: [[GEP1:%[0-9]+]]:_(p0) = G_GEP [[GEP]], [[MUL]](s64)
; CHECK: [[GEP1:%[0-9]+]]:_(p0) = G_PTR_ADD [[GEP]], [[MUL]](s64)
; CHECK: [[COPY2:%[0-9]+]]:_(p0) = COPY [[GEP1]](p0)
; CHECK: $x0 = COPY [[COPY2]](p0)
; CHECK: RET_ReallyLR implicit $x0
@ -109,9 +109,9 @@ define i32* @var_then_const(%type1* %addr, i64 %idx) {
; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 64
; CHECK: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[C]], [[COPY1]]
; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_GEP [[COPY]], [[MUL]](s64)
; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[MUL]](s64)
; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 40
; CHECK: [[GEP1:%[0-9]+]]:_(p0) = G_GEP [[GEP]], [[C1]](s64)
; CHECK: [[GEP1:%[0-9]+]]:_(p0) = G_PTR_ADD [[GEP]], [[C1]](s64)
; CHECK: $x0 = COPY [[GEP1]](p0)
; CHECK: RET_ReallyLR implicit $x0
%res = getelementptr %type1, %type1* %addr, i64 %idx, i32 2, i32 2

View File

@ -862,7 +862,7 @@ define {i8, i32} @struct_i8_i32_func_void() #0 {
; CHECK: bb.1 (%ir-block.0):
; CHECK: [[LOAD:%[0-9]+]]:_(s8) = G_LOAD [[DEF]](p1) :: (load 1 from `{ i8, i32 } addrspace(1)* undef`, align 4, addrspace 1)
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
; CHECK: [[GEP:%[0-9]+]]:_(p1) = G_GEP [[DEF]], [[C]](s64)
; CHECK: [[GEP:%[0-9]+]]:_(p1) = G_PTR_ADD [[DEF]], [[C]](s64)
; CHECK: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[GEP]](p1) :: (load 4 from `{ i8, i32 } addrspace(1)* undef` + 4, addrspace 1)
%val = load { i8, i32 }, { i8, i32 } addrspace(1)* undef
ret { i8, i32 } %val
@ -879,7 +879,7 @@ define void @void_func_sret_struct_i8_i32({ i8, i32 } addrspace(5)* sret %arg0)
; CHECK: [[LOAD:%[0-9]+]]:_(s8) = G_LOAD [[DEF]](p1) :: (volatile load 1 from `i8 addrspace(1)* undef`, addrspace 1)
; CHECK: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[DEF1]](p1) :: (volatile load 4 from `i32 addrspace(1)* undef`, addrspace 1)
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
; CHECK: [[GEP:%[0-9]+]]:_(p5) = G_GEP [[COPY]], [[C]](s32)
; CHECK: [[GEP:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
; CHECK: G_STORE [[LOAD]](s8), [[COPY]](p5) :: (store 1 into %ir.gep01, addrspace 5)
; CHECK: G_STORE [[LOAD1]](s32), [[GEP]](p5) :: (store 4 into %ir.gep1, addrspace 5)
; CHECK: [[COPY2:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY1]]
@ -924,7 +924,7 @@ define { <32 x i32>, i32 } @struct_v32i32_i32_func_void() #0 {
; CHECK: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (volatile load 8 from `{ <32 x i32>, i32 } addrspace(1)* addrspace(4)* undef`, addrspace 4)
; CHECK: [[LOAD1:%[0-9]+]]:_(<32 x s32>) = G_LOAD [[LOAD]](p1) :: (load 128 from %ir.ptr, addrspace 1)
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 128
; CHECK: [[GEP:%[0-9]+]]:_(p1) = G_GEP [[LOAD]], [[C]](s64)
; CHECK: [[GEP:%[0-9]+]]:_(p1) = G_PTR_ADD [[LOAD]], [[C]](s64)
; CHECK: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[GEP]](p1) :: (load 4 from %ir.ptr + 128, align 128, addrspace 1)
; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32), [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32), [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32), [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32), [[UV14:%[0-9]+]]:_(s32), [[UV15:%[0-9]+]]:_(s32), [[UV16:%[0-9]+]]:_(s32), [[UV17:%[0-9]+]]:_(s32), [[UV18:%[0-9]+]]:_(s32), [[UV19:%[0-9]+]]:_(s32), [[UV20:%[0-9]+]]:_(s32), [[UV21:%[0-9]+]]:_(s32), [[UV22:%[0-9]+]]:_(s32), [[UV23:%[0-9]+]]:_(s32), [[UV24:%[0-9]+]]:_(s32), [[UV25:%[0-9]+]]:_(s32), [[UV26:%[0-9]+]]:_(s32), [[UV27:%[0-9]+]]:_(s32), [[UV28:%[0-9]+]]:_(s32), [[UV29:%[0-9]+]]:_(s32), [[UV30:%[0-9]+]]:_(s32), [[UV31:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD1]](<32 x s32>)
%ptr = load volatile { <32 x i32>, i32 } addrspace(1)*, { <32 x i32>, i32 } addrspace(1)* addrspace(4)* undef
@ -943,7 +943,7 @@ define { i32, <32 x i32> } @struct_i32_v32i32_func_void() #0 {
; CHECK: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (volatile load 8 from `{ i32, <32 x i32> } addrspace(1)* addrspace(4)* undef`, addrspace 4)
; CHECK: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[LOAD]](p1) :: (load 4 from %ir.ptr, align 128, addrspace 1)
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 128
; CHECK: [[GEP:%[0-9]+]]:_(p1) = G_GEP [[LOAD]], [[C]](s64)
; CHECK: [[GEP:%[0-9]+]]:_(p1) = G_PTR_ADD [[LOAD]], [[C]](s64)
; CHECK: [[LOAD2:%[0-9]+]]:_(<32 x s32>) = G_LOAD [[GEP]](p1) :: (load 128 from %ir.ptr + 128, addrspace 1)
; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32), [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32), [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32), [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32), [[UV14:%[0-9]+]]:_(s32), [[UV15:%[0-9]+]]:_(s32), [[UV16:%[0-9]+]]:_(s32), [[UV17:%[0-9]+]]:_(s32), [[UV18:%[0-9]+]]:_(s32), [[UV19:%[0-9]+]]:_(s32), [[UV20:%[0-9]+]]:_(s32), [[UV21:%[0-9]+]]:_(s32), [[UV22:%[0-9]+]]:_(s32), [[UV23:%[0-9]+]]:_(s32), [[UV24:%[0-9]+]]:_(s32), [[UV25:%[0-9]+]]:_(s32), [[UV26:%[0-9]+]]:_(s32), [[UV27:%[0-9]+]]:_(s32), [[UV28:%[0-9]+]]:_(s32), [[UV29:%[0-9]+]]:_(s32), [[UV30:%[0-9]+]]:_(s32), [[UV31:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD2]](<32 x s32>)
%ptr = load volatile { i32, <32 x i32> } addrspace(1)*, { i32, <32 x i32> } addrspace(1)* addrspace(4)* undef

View File

@ -106,7 +106,7 @@ body: |
%2:vgpr(s32) = COPY $vgpr3
%3:vgpr(<2 x s32>) = G_BUILD_VECTOR %1, %2
%4:vgpr(s64) = G_CONSTANT i64 4
%5:vgpr(p0) = G_GEP %0, %4
%5:vgpr(p0) = G_PTR_ADD %0, %4
%6:vgpr(s32) = G_AMDGPU_ATOMIC_CMPXCHG %5, %3 :: (load store seq_cst 4, addrspace 0)
$vgpr0 = COPY %6
@ -214,7 +214,7 @@ body: |
%2:vgpr(s64) = COPY $vgpr4_vgpr5
%3:vgpr(<2 x s64>) = G_BUILD_VECTOR %1, %2
%4:vgpr(s64) = G_CONSTANT i64 4
%5:vgpr(p0) = G_GEP %0, %4
%5:vgpr(p0) = G_PTR_ADD %0, %4
%6:vgpr(s64) = G_AMDGPU_ATOMIC_CMPXCHG %5, %3 :: (load store seq_cst 8, addrspace 0)
$vgpr0_vgpr1 = COPY %6
@ -289,7 +289,7 @@ body: |
%2:vgpr(s32) = COPY $vgpr3
%3:vgpr(<2 x s32>) = G_BUILD_VECTOR %1, %2
%4:vgpr(s64) = G_CONSTANT i64 -4
%5:vgpr(p0) = G_GEP %0, %4
%5:vgpr(p0) = G_PTR_ADD %0, %4
%6:vgpr(s32) = G_AMDGPU_ATOMIC_CMPXCHG %5, %3 :: (load store seq_cst 4, addrspace 0)
$vgpr0 = COPY %6

View File

@ -84,7 +84,7 @@ body: |
%1:vgpr(s32) = COPY $vgpr1
%2:vgpr(s32) = COPY $vgpr2
%3:vgpr(s32) = G_CONSTANT i32 4
%4:vgpr(p3) = G_GEP %0, %3
%4:vgpr(p3) = G_PTR_ADD %0, %3
%5:vgpr(s32) = G_ATOMIC_CMPXCHG %4, %1, %2 :: (load store seq_cst 4, addrspace 3)
$vgpr0 = COPY %5

View File

@ -108,7 +108,7 @@ body: |
%0:vgpr(p3) = COPY $vgpr0
%1:vgpr(s32) = COPY $vgpr1
%2:vgpr(s32) = G_CONSTANT i32 4
%3:vgpr(p3) = G_GEP %0, %2
%3:vgpr(p3) = G_PTR_ADD %0, %2
%4:vgpr(s32) = G_ATOMICRMW_FADD %3(p3), %1 :: (load store seq_cst 4, addrspace 3)
$vgpr0 = COPY %4

View File

@ -76,7 +76,7 @@ body: |
%0:vgpr(p3) = COPY $vgpr0
%1:vgpr(s32) = COPY $vgpr1
%2:vgpr(s32) = G_CONSTANT i32 4
%3:vgpr(p3) = G_GEP %0, %2
%3:vgpr(p3) = G_PTR_ADD %0, %2
%4:vgpr(s32) = G_ATOMICRMW_XCHG %3(p3), %1 :: (load store seq_cst 4, addrspace 3)
$vgpr0 = COPY %4

View File

@ -261,7 +261,7 @@ body: |
; GFX9: $vgpr0 = COPY [[FLAT_LOAD_DWORD]]
%0:vgpr(p0) = COPY $vgpr0_vgpr1
%1:vgpr(s64) = G_CONSTANT i64 -2048
%2:vgpr(p0) = G_GEP %0, %1
%2:vgpr(p0) = G_PTR_ADD %0, %1
%3:vgpr(s32) = G_LOAD %2 :: (load seq_cst 4, align 4, addrspace 0)
$vgpr0 = COPY %3
@ -300,7 +300,7 @@ body: |
; GFX9: $vgpr0 = COPY [[FLAT_LOAD_DWORD]]
%0:vgpr(p0) = COPY $vgpr0_vgpr1
%1:vgpr(s64) = G_CONSTANT i64 4095
%2:vgpr(p0) = G_GEP %0, %1
%2:vgpr(p0) = G_PTR_ADD %0, %1
%3:vgpr(s32) = G_LOAD %2 :: (load seq_cst 4, align 4, addrspace 0)
$vgpr0 = COPY %3

View File

@ -274,7 +274,7 @@ body: |
; GFX6: liveins: $vgpr0_vgpr1
; GFX6: [[COPY:%[0-9]+]]:vgpr(p1) = COPY $vgpr0_vgpr1
; GFX6: [[C:%[0-9]+]]:vgpr(s64) = G_CONSTANT i64 -2048
; GFX6: [[GEP:%[0-9]+]]:vgpr(p1) = G_GEP [[COPY]], [[C]](s64)
; GFX6: [[GEP:%[0-9]+]]:vgpr(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
; GFX6: [[LOAD:%[0-9]+]]:vgpr_32(s32) = G_LOAD [[GEP]](p1) :: (load seq_cst 4, addrspace 1)
; GFX6: $vgpr0 = COPY [[LOAD]](s32)
; GFX7-LABEL: name: load_atomic_global_s32_seq_cst_gep_m2048
@ -309,7 +309,7 @@ body: |
; GFX9: $vgpr0 = COPY [[GLOBAL_LOAD_DWORD]]
%0:vgpr(p1) = COPY $vgpr0_vgpr1
%1:vgpr(s64) = G_CONSTANT i64 -2048
%2:vgpr(p1) = G_GEP %0, %1
%2:vgpr(p1) = G_PTR_ADD %0, %1
%3:vgpr(s32) = G_LOAD %2 :: (load seq_cst 4, align 4, addrspace 1)
$vgpr0 = COPY %3
@ -330,7 +330,7 @@ body: |
; GFX6: liveins: $vgpr0_vgpr1
; GFX6: [[COPY:%[0-9]+]]:vgpr(p1) = COPY $vgpr0_vgpr1
; GFX6: [[C:%[0-9]+]]:vgpr(s64) = G_CONSTANT i64 4095
; GFX6: [[GEP:%[0-9]+]]:vgpr(p1) = G_GEP [[COPY]], [[C]](s64)
; GFX6: [[GEP:%[0-9]+]]:vgpr(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
; GFX6: [[LOAD:%[0-9]+]]:vgpr_32(s32) = G_LOAD [[GEP]](p1) :: (load seq_cst 4, addrspace 1)
; GFX6: $vgpr0 = COPY [[LOAD]](s32)
; GFX7-LABEL: name: load_atomic_global_s32_seq_cst_gep_4095
@ -355,7 +355,7 @@ body: |
; GFX9: $vgpr0 = COPY [[GLOBAL_LOAD_DWORD]]
%0:vgpr(p1) = COPY $vgpr0_vgpr1
%1:vgpr(s64) = G_CONSTANT i64 4095
%2:vgpr(p1) = G_GEP %0, %1
%2:vgpr(p1) = G_PTR_ADD %0, %1
%3:vgpr(s32) = G_LOAD %2 :: (load seq_cst 4, align 4, addrspace 1)
$vgpr0 = COPY %3

View File

@ -307,7 +307,7 @@ body: |
; GFX9: $vgpr0 = COPY [[DS_READ_B32_gfx9_]]
%0:vgpr(p3) = COPY $vgpr0
%1:vgpr(s32) = G_CONSTANT i32 65535
%2:vgpr(p3) = G_GEP %0, %1
%2:vgpr(p3) = G_PTR_ADD %0, %1
%3:vgpr(s32) = G_LOAD %2 :: (load seq_cst 4, align 4, addrspace 3)
$vgpr0 = COPY %3

View File

@ -721,7 +721,7 @@ body: |
; GFX10: $sgpr0 = COPY [[S_LOAD_DWORD_IMM]]
%0:sgpr(p4) = COPY $sgpr0_sgpr1
%1:sgpr(s64) = G_CONSTANT i64 1020
%2:sgpr(p4) = G_GEP %0, %1
%2:sgpr(p4) = G_PTR_ADD %0, %1
%3:sgpr(s32) = G_LOAD %2 :: (load 4, align 4, addrspace 4)
$sgpr0 = COPY %3
@ -762,7 +762,7 @@ body: |
; GFX10: $sgpr0 = COPY [[S_LOAD_DWORD_IMM]]
%0:sgpr(p4) = COPY $sgpr0_sgpr1
%1:sgpr(s64) = G_CONSTANT i64 1024
%2:sgpr(p4) = G_GEP %0, %1
%2:sgpr(p4) = G_PTR_ADD %0, %1
%3:sgpr(s32) = G_LOAD %2 :: (load 4, align 4, addrspace 4)
$sgpr0 = COPY %3
@ -803,7 +803,7 @@ body: |
; GFX10: $sgpr0 = COPY [[S_LOAD_DWORD_IMM]]
%0:sgpr(p4) = COPY $sgpr0_sgpr1
%1:sgpr(s64) = G_CONSTANT i64 1048575
%2:sgpr(p4) = G_GEP %0, %1
%2:sgpr(p4) = G_PTR_ADD %0, %1
%3:sgpr(s32) = G_LOAD %2 :: (load 4, align 4, addrspace 4)
$sgpr0 = COPY %3
@ -846,7 +846,7 @@ body: |
; GFX10: $sgpr0 = COPY [[S_LOAD_DWORD_SGPR]]
%0:sgpr(p4) = COPY $sgpr0_sgpr1
%1:sgpr(s64) = G_CONSTANT i64 1048576
%2:sgpr(p4) = G_GEP %0, %1
%2:sgpr(p4) = G_PTR_ADD %0, %1
%3:sgpr(s32) = G_LOAD %2 :: (load 4, align 4, addrspace 4)
$sgpr0 = COPY %3
@ -889,7 +889,7 @@ body: |
; GFX10: $sgpr0 = COPY [[S_LOAD_DWORD_SGPR]]
%0:sgpr(p4) = COPY $sgpr0_sgpr1
%1:sgpr(s64) = G_CONSTANT i64 1073741823
%2:sgpr(p4) = G_GEP %0, %1
%2:sgpr(p4) = G_PTR_ADD %0, %1
%3:sgpr(s32) = G_LOAD %2 :: (load 4, align 4, addrspace 4)
$sgpr0 = COPY %3

View File

@ -779,7 +779,7 @@ body: |
; GFX10: $vgpr0 = COPY [[FLAT_LOAD_UBYTE]]
%0:vgpr(p1) = COPY $vgpr0_vgpr1
%1:vgpr(s64) = G_CONSTANT i64 2047
%2:vgpr(p1) = G_GEP %0, %1
%2:vgpr(p1) = G_PTR_ADD %0, %1
%3:vgpr(s32) = G_LOAD %2 :: (load 1, align 1, addrspace 0)
$vgpr0 = COPY %3
@ -849,7 +849,7 @@ body: |
; GFX10: $vgpr0 = COPY [[FLAT_LOAD_UBYTE]]
%0:vgpr(p1) = COPY $vgpr0_vgpr1
%1:vgpr(s64) = G_CONSTANT i64 2048
%2:vgpr(p1) = G_GEP %0, %1
%2:vgpr(p1) = G_PTR_ADD %0, %1
%3:vgpr(s32) = G_LOAD %2 :: (load 1, align 1, addrspace 0)
$vgpr0 = COPY %3
@ -929,7 +929,7 @@ body: |
; GFX10: $vgpr0 = COPY [[FLAT_LOAD_UBYTE]]
%0:vgpr(p1) = COPY $vgpr0_vgpr1
%1:vgpr(s64) = G_CONSTANT i64 -2047
%2:vgpr(p1) = G_GEP %0, %1
%2:vgpr(p1) = G_PTR_ADD %0, %1
%3:vgpr(s32) = G_LOAD %2 :: (load 1, align 1, addrspace 0)
$vgpr0 = COPY %3
@ -1009,7 +1009,7 @@ body: |
; GFX10: $vgpr0 = COPY [[FLAT_LOAD_UBYTE]]
%0:vgpr(p1) = COPY $vgpr0_vgpr1
%1:vgpr(s64) = G_CONSTANT i64 -2048
%2:vgpr(p1) = G_GEP %0, %1
%2:vgpr(p1) = G_PTR_ADD %0, %1
%3:vgpr(s32) = G_LOAD %2 :: (load 1, align 1, addrspace 0)
$vgpr0 = COPY %3
@ -1079,7 +1079,7 @@ body: |
; GFX10: $vgpr0 = COPY [[FLAT_LOAD_UBYTE]]
%0:vgpr(p1) = COPY $vgpr0_vgpr1
%1:vgpr(s64) = G_CONSTANT i64 4095
%2:vgpr(p1) = G_GEP %0, %1
%2:vgpr(p1) = G_PTR_ADD %0, %1
%3:vgpr(s32) = G_LOAD %2 :: (load 1, align 1, addrspace 0)
$vgpr0 = COPY %3
@ -1159,7 +1159,7 @@ body: |
; GFX10: $vgpr0 = COPY [[FLAT_LOAD_UBYTE]]
%0:vgpr(p1) = COPY $vgpr0_vgpr1
%1:vgpr(s64) = G_CONSTANT i64 4096
%2:vgpr(p1) = G_GEP %0, %1
%2:vgpr(p1) = G_PTR_ADD %0, %1
%3:vgpr(s32) = G_LOAD %2 :: (load 1, align 1, addrspace 0)
$vgpr0 = COPY %3
@ -1239,7 +1239,7 @@ body: |
; GFX10: $vgpr0 = COPY [[FLAT_LOAD_UBYTE]]
%0:vgpr(p1) = COPY $vgpr0_vgpr1
%1:vgpr(s64) = G_CONSTANT i64 -4095
%2:vgpr(p1) = G_GEP %0, %1
%2:vgpr(p1) = G_PTR_ADD %0, %1
%3:vgpr(s32) = G_LOAD %2 :: (load 1, align 1, addrspace 0)
$vgpr0 = COPY %3
@ -1319,7 +1319,7 @@ body: |
; GFX10: $vgpr0 = COPY [[FLAT_LOAD_UBYTE]]
%0:vgpr(p1) = COPY $vgpr0_vgpr1
%1:vgpr(s64) = G_CONSTANT i64 -4096
%2:vgpr(p1) = G_GEP %0, %1
%2:vgpr(p1) = G_PTR_ADD %0, %1
%3:vgpr(s32) = G_LOAD %2 :: (load 1, align 1, addrspace 0)
$vgpr0 = COPY %3
@ -1399,7 +1399,7 @@ body: |
; GFX10: $vgpr0 = COPY [[FLAT_LOAD_UBYTE]]
%0:vgpr(p1) = COPY $vgpr0_vgpr1
%1:vgpr(s64) = G_CONSTANT i64 8191
%2:vgpr(p1) = G_GEP %0, %1
%2:vgpr(p1) = G_PTR_ADD %0, %1
%3:vgpr(s32) = G_LOAD %2 :: (load 1, align 1, addrspace 0)
$vgpr0 = COPY %3
@ -1479,7 +1479,7 @@ body: |
; GFX10: $vgpr0 = COPY [[FLAT_LOAD_UBYTE]]
%0:vgpr(p1) = COPY $vgpr0_vgpr1
%1:vgpr(s64) = G_CONSTANT i64 8192
%2:vgpr(p1) = G_GEP %0, %1
%2:vgpr(p1) = G_PTR_ADD %0, %1
%3:vgpr(s32) = G_LOAD %2 :: (load 1, align 1, addrspace 0)
$vgpr0 = COPY %3
@ -1559,7 +1559,7 @@ body: |
; GFX10: $vgpr0 = COPY [[FLAT_LOAD_UBYTE]]
%0:vgpr(p1) = COPY $vgpr0_vgpr1
%1:vgpr(s64) = G_CONSTANT i64 -8191
%2:vgpr(p1) = G_GEP %0, %1
%2:vgpr(p1) = G_PTR_ADD %0, %1
%3:vgpr(s32) = G_LOAD %2 :: (load 1, align 1, addrspace 0)
$vgpr0 = COPY %3
@ -1639,7 +1639,7 @@ body: |
; GFX10: $vgpr0 = COPY [[FLAT_LOAD_UBYTE]]
%0:vgpr(p1) = COPY $vgpr0_vgpr1
%1:vgpr(s64) = G_CONSTANT i64 -8192
%2:vgpr(p1) = G_GEP %0, %1
%2:vgpr(p1) = G_PTR_ADD %0, %1
%3:vgpr(s32) = G_LOAD %2 :: (load 1, align 1, addrspace 0)
$vgpr0 = COPY %3

View File

@ -779,7 +779,7 @@ body: |
; GFX10: $vgpr0 = COPY [[GLOBAL_LOAD_UBYTE]]
%0:vgpr(p1) = COPY $vgpr0_vgpr1
%1:vgpr(s64) = G_CONSTANT i64 2047
%2:vgpr(p1) = G_GEP %0, %1
%2:vgpr(p1) = G_PTR_ADD %0, %1
%3:vgpr(s32) = G_LOAD %2 :: (load 1, align 1, addrspace 1)
$vgpr0 = COPY %3
@ -849,7 +849,7 @@ body: |
; GFX10: $vgpr0 = COPY [[GLOBAL_LOAD_UBYTE]]
%0:vgpr(p1) = COPY $vgpr0_vgpr1
%1:vgpr(s64) = G_CONSTANT i64 2048
%2:vgpr(p1) = G_GEP %0, %1
%2:vgpr(p1) = G_PTR_ADD %0, %1
%3:vgpr(s32) = G_LOAD %2 :: (load 1, align 1, addrspace 1)
$vgpr0 = COPY %3
@ -909,7 +909,7 @@ body: |
; GFX10: $vgpr0 = COPY [[GLOBAL_LOAD_UBYTE]]
%0:vgpr(p1) = COPY $vgpr0_vgpr1
%1:vgpr(s64) = G_CONSTANT i64 -2047
%2:vgpr(p1) = G_GEP %0, %1
%2:vgpr(p1) = G_PTR_ADD %0, %1
%3:vgpr(s32) = G_LOAD %2 :: (load 1, align 1, addrspace 1)
$vgpr0 = COPY %3
@ -969,7 +969,7 @@ body: |
; GFX10: $vgpr0 = COPY [[GLOBAL_LOAD_UBYTE]]
%0:vgpr(p1) = COPY $vgpr0_vgpr1
%1:vgpr(s64) = G_CONSTANT i64 -2048
%2:vgpr(p1) = G_GEP %0, %1
%2:vgpr(p1) = G_PTR_ADD %0, %1
%3:vgpr(s32) = G_LOAD %2 :: (load 1, align 1, addrspace 1)
$vgpr0 = COPY %3
@ -1039,7 +1039,7 @@ body: |
; GFX10: $vgpr0 = COPY [[GLOBAL_LOAD_UBYTE]]
%0:vgpr(p1) = COPY $vgpr0_vgpr1
%1:vgpr(s64) = G_CONSTANT i64 4095
%2:vgpr(p1) = G_GEP %0, %1
%2:vgpr(p1) = G_PTR_ADD %0, %1
%3:vgpr(s32) = G_LOAD %2 :: (load 1, align 1, addrspace 1)
$vgpr0 = COPY %3
@ -1119,7 +1119,7 @@ body: |
; GFX10: $vgpr0 = COPY [[GLOBAL_LOAD_UBYTE]]
%0:vgpr(p1) = COPY $vgpr0_vgpr1
%1:vgpr(s64) = G_CONSTANT i64 4096
%2:vgpr(p1) = G_GEP %0, %1
%2:vgpr(p1) = G_PTR_ADD %0, %1
%3:vgpr(s32) = G_LOAD %2 :: (load 1, align 1, addrspace 1)
$vgpr0 = COPY %3
@ -1189,7 +1189,7 @@ body: |
; GFX10: $vgpr0 = COPY [[GLOBAL_LOAD_UBYTE]]
%0:vgpr(p1) = COPY $vgpr0_vgpr1
%1:vgpr(s64) = G_CONSTANT i64 -4095
%2:vgpr(p1) = G_GEP %0, %1
%2:vgpr(p1) = G_PTR_ADD %0, %1
%3:vgpr(s32) = G_LOAD %2 :: (load 1, align 1, addrspace 1)
$vgpr0 = COPY %3
@ -1259,7 +1259,7 @@ body: |
; GFX10: $vgpr0 = COPY [[GLOBAL_LOAD_UBYTE]]
%0:vgpr(p1) = COPY $vgpr0_vgpr1
%1:vgpr(s64) = G_CONSTANT i64 -4096
%2:vgpr(p1) = G_GEP %0, %1
%2:vgpr(p1) = G_PTR_ADD %0, %1
%3:vgpr(s32) = G_LOAD %2 :: (load 1, align 1, addrspace 1)
$vgpr0 = COPY %3
@ -1339,7 +1339,7 @@ body: |
; GFX10: $vgpr0 = COPY [[GLOBAL_LOAD_UBYTE]]
%0:vgpr(p1) = COPY $vgpr0_vgpr1
%1:vgpr(s64) = G_CONSTANT i64 8191
%2:vgpr(p1) = G_GEP %0, %1
%2:vgpr(p1) = G_PTR_ADD %0, %1
%3:vgpr(s32) = G_LOAD %2 :: (load 1, align 1, addrspace 1)
$vgpr0 = COPY %3
@ -1419,7 +1419,7 @@ body: |
; GFX10: $vgpr0 = COPY [[GLOBAL_LOAD_UBYTE]]
%0:vgpr(p1) = COPY $vgpr0_vgpr1
%1:vgpr(s64) = G_CONSTANT i64 8192
%2:vgpr(p1) = G_GEP %0, %1
%2:vgpr(p1) = G_PTR_ADD %0, %1
%3:vgpr(s32) = G_LOAD %2 :: (load 1, align 1, addrspace 1)
$vgpr0 = COPY %3
@ -1499,7 +1499,7 @@ body: |
; GFX10: $vgpr0 = COPY [[GLOBAL_LOAD_UBYTE]]
%0:vgpr(p1) = COPY $vgpr0_vgpr1
%1:vgpr(s64) = G_CONSTANT i64 -8191
%2:vgpr(p1) = G_GEP %0, %1
%2:vgpr(p1) = G_PTR_ADD %0, %1
%3:vgpr(s32) = G_LOAD %2 :: (load 1, align 1, addrspace 1)
$vgpr0 = COPY %3
@ -1579,7 +1579,7 @@ body: |
; GFX10: $vgpr0 = COPY [[GLOBAL_LOAD_UBYTE]]
%0:vgpr(p1) = COPY $vgpr0_vgpr1
%1:vgpr(s64) = G_CONSTANT i64 -8192
%2:vgpr(p1) = G_GEP %0, %1
%2:vgpr(p1) = G_PTR_ADD %0, %1
%3:vgpr(s32) = G_LOAD %2 :: (load 1, align 1, addrspace 1)
$vgpr0 = COPY %3

View File

@ -672,7 +672,7 @@ body: |
; GFX9: $vgpr0 = COPY [[DS_READ_U8_gfx9_]]
%0:vgpr(p3) = COPY $vgpr0
%1:vgpr(s32) = G_CONSTANT i32 65535
%2:vgpr(p3) = G_GEP %0, %1
%2:vgpr(p3) = G_PTR_ADD %0, %1
%3:vgpr(s32) = G_LOAD %2 :: (load 1, align 1, addrspace 3)
$vgpr0 = COPY %3
@ -717,7 +717,7 @@ body: |
%2:vgpr(s32) = G_AND %0, %1
%3:vgpr(p3) = G_INTTOPTR %2
%4:vgpr(s32) = G_CONSTANT i32 65535
%5:vgpr(p3) = G_GEP %3, %4
%5:vgpr(p3) = G_PTR_ADD %3, %4
%6:vgpr(s32) = G_LOAD %5 :: (load 1, align 1, addrspace 3)
$vgpr0 = COPY %6
@ -767,7 +767,7 @@ body: |
; GFX9: $vgpr0 = COPY [[DS_READ_U8_gfx9_]]
%0:vgpr(p3) = COPY $vgpr0
%1:vgpr(s32) = G_CONSTANT i32 65536
%2:vgpr(p3) = G_GEP %0, %1
%2:vgpr(p3) = G_PTR_ADD %0, %1
%3:vgpr(s32) = G_LOAD %2 :: (load 1, align 1, addrspace 3)
$vgpr0 = COPY %3
@ -817,7 +817,7 @@ body: |
; GFX9: $vgpr0 = COPY [[DS_READ_U8_gfx9_]]
%0:vgpr(p3) = COPY $vgpr0
%1:vgpr(s32) = G_CONSTANT i32 -1
%2:vgpr(p3) = G_GEP %0, %1
%2:vgpr(p3) = G_PTR_ADD %0, %1
%3:vgpr(s32) = G_LOAD %2 :: (load 1, align 1, addrspace 3)
$vgpr0 = COPY %3

View File

@ -217,7 +217,7 @@ body: |
; GFX9: $vgpr0 = COPY [[BUFFER_LOAD_UBYTE_OFFEN]]
%0:vgpr(p5) = COPY $vgpr0
%1:vgpr(s32) = G_CONSTANT i32 2047
%2:vgpr(p5) = G_GEP %0, %1
%2:vgpr(p5) = G_PTR_ADD %0, %1
%3:vgpr(s32) = G_LOAD %2 :: (load 1, align 1, addrspace 5)
$vgpr0 = COPY %3
@ -257,7 +257,7 @@ body: |
%2:vgpr(s32) = G_AND %0, %1
%3:vgpr(p5) = G_INTTOPTR %2
%4:vgpr(s32) = G_CONSTANT i32 2047
%5:vgpr(p5) = G_GEP %3, %4
%5:vgpr(p5) = G_PTR_ADD %3, %4
%6:vgpr(s32) = G_LOAD %5 :: (load 1, align 1, addrspace 5)
$vgpr0 = COPY %6
@ -292,7 +292,7 @@ body: |
; GFX9: $vgpr0 = COPY [[BUFFER_LOAD_UBYTE_OFFEN]]
%0:vgpr(p5) = COPY $vgpr0
%1:vgpr(s32) = G_CONSTANT i32 2048
%2:vgpr(p5) = G_GEP %0, %1
%2:vgpr(p5) = G_PTR_ADD %0, %1
%3:vgpr(s32) = G_LOAD %2 :: (load 1, align 1, addrspace 5)
$vgpr0 = COPY %3
@ -329,7 +329,7 @@ body: |
; GFX9: $vgpr0 = COPY [[BUFFER_LOAD_UBYTE_OFFEN]]
%0:vgpr(p5) = COPY $vgpr0
%1:vgpr(s32) = G_CONSTANT i32 -2047
%2:vgpr(p5) = G_GEP %0, %1
%2:vgpr(p5) = G_PTR_ADD %0, %1
%3:vgpr(s32) = G_LOAD %2 :: (load 1, align 1, addrspace 5)
$vgpr0 = COPY %3
@ -366,7 +366,7 @@ body: |
; GFX9: $vgpr0 = COPY [[BUFFER_LOAD_UBYTE_OFFEN]]
%0:vgpr(p5) = COPY $vgpr0
%1:vgpr(s32) = G_CONSTANT i32 -2048
%2:vgpr(p5) = G_GEP %0, %1
%2:vgpr(p5) = G_PTR_ADD %0, %1
%3:vgpr(s32) = G_LOAD %2 :: (load 1, align 1, addrspace 5)
$vgpr0 = COPY %3
@ -401,7 +401,7 @@ body: |
; GFX9: $vgpr0 = COPY [[BUFFER_LOAD_UBYTE_OFFEN]]
%0:vgpr(p5) = COPY $vgpr0
%1:vgpr(s32) = G_CONSTANT i32 4095
%2:vgpr(p5) = G_GEP %0, %1
%2:vgpr(p5) = G_PTR_ADD %0, %1
%3:vgpr(s32) = G_LOAD %2 :: (load 1, align 1, addrspace 5)
$vgpr0 = COPY %3
@ -438,7 +438,7 @@ body: |
; GFX9: $vgpr0 = COPY [[BUFFER_LOAD_UBYTE_OFFEN]]
%0:vgpr(p5) = COPY $vgpr0
%1:vgpr(s32) = G_CONSTANT i32 4096
%2:vgpr(p5) = G_GEP %0, %1
%2:vgpr(p5) = G_PTR_ADD %0, %1
%3:vgpr(s32) = G_LOAD %2 :: (load 1, align 1, addrspace 5)
$vgpr0 = COPY %3
@ -475,7 +475,7 @@ body: |
; GFX9: $vgpr0 = COPY [[BUFFER_LOAD_UBYTE_OFFEN]]
%0:vgpr(p5) = COPY $vgpr0
%1:vgpr(s32) = G_CONSTANT i32 -4095
%2:vgpr(p5) = G_GEP %0, %1
%2:vgpr(p5) = G_PTR_ADD %0, %1
%3:vgpr(s32) = G_LOAD %2 :: (load 1, align 1, addrspace 5)
$vgpr0 = COPY %3
@ -512,7 +512,7 @@ body: |
; GFX9: $vgpr0 = COPY [[BUFFER_LOAD_UBYTE_OFFEN]]
%0:vgpr(p5) = COPY $vgpr0
%1:vgpr(s32) = G_CONSTANT i32 -4096
%2:vgpr(p5) = G_GEP %0, %1
%2:vgpr(p5) = G_PTR_ADD %0, %1
%3:vgpr(s32) = G_LOAD %2 :: (load 1, align 1, addrspace 5)
$vgpr0 = COPY %3
@ -549,7 +549,7 @@ body: |
; GFX9: $vgpr0 = COPY [[BUFFER_LOAD_UBYTE_OFFEN]]
%0:vgpr(p5) = COPY $vgpr0
%1:vgpr(s32) = G_CONSTANT i32 8191
%2:vgpr(p5) = G_GEP %0, %1
%2:vgpr(p5) = G_PTR_ADD %0, %1
%3:vgpr(s32) = G_LOAD %2 :: (load 1, align 1, addrspace 5)
$vgpr0 = COPY %3
@ -586,7 +586,7 @@ body: |
; GFX9: $vgpr0 = COPY [[BUFFER_LOAD_UBYTE_OFFEN]]
%0:vgpr(p5) = COPY $vgpr0
%1:vgpr(s32) = G_CONSTANT i32 8192
%2:vgpr(p5) = G_GEP %0, %1
%2:vgpr(p5) = G_PTR_ADD %0, %1
%3:vgpr(s32) = G_LOAD %2 :: (load 1, align 1, addrspace 5)
$vgpr0 = COPY %3
@ -623,7 +623,7 @@ body: |
; GFX9: $vgpr0 = COPY [[BUFFER_LOAD_UBYTE_OFFEN]]
%0:vgpr(p5) = COPY $vgpr0
%1:vgpr(s32) = G_CONSTANT i32 -8191
%2:vgpr(p5) = G_GEP %0, %1
%2:vgpr(p5) = G_PTR_ADD %0, %1
%3:vgpr(s32) = G_LOAD %2 :: (load 1, align 1, addrspace 5)
$vgpr0 = COPY %3
@ -660,7 +660,7 @@ body: |
; GFX9: $vgpr0 = COPY [[BUFFER_LOAD_UBYTE_OFFEN]]
%0:vgpr(p5) = COPY $vgpr0
%1:vgpr(s32) = G_CONSTANT i32 -8192
%2:vgpr(p5) = G_GEP %0, %1
%2:vgpr(p5) = G_PTR_ADD %0, %1
%3:vgpr(s32) = G_LOAD %2 :: (load 1, align 1, addrspace 5)
$vgpr0 = COPY %3
@ -827,7 +827,7 @@ body: |
; GFX9: $vgpr0 = COPY [[BUFFER_LOAD_UBYTE_OFFEN]]
%0:vgpr(p5) = G_FRAME_INDEX %stack.0
%1:vgpr(s32) = G_CONSTANT i32 4095
%2:vgpr(p5) = G_GEP %0, %1
%2:vgpr(p5) = G_PTR_ADD %0, %1
%3:vgpr(s32) = G_LOAD %2 :: (load 1, align 1, addrspace 5)
$vgpr0 = COPY %3
@ -863,7 +863,7 @@ body: |
; GFX9: $vgpr0 = COPY [[BUFFER_LOAD_UBYTE_OFFEN]]
%0:vgpr(p5) = G_FRAME_INDEX %stack.0
%1:vgpr(s32) = G_CONSTANT i32 4096
%2:vgpr(p5) = G_GEP %0, %1
%2:vgpr(p5) = G_PTR_ADD %0, %1
%3:vgpr(s32) = G_LOAD %2 :: (load 1, align 1, addrspace 5)
$vgpr0 = COPY %3

View File

@ -102,47 +102,47 @@ body: |
%0:sgpr(p4) = COPY $sgpr0_sgpr1
%1:sgpr(s64) = G_CONSTANT i64 4
%2:sgpr(p4) = G_GEP %0, %1
%2:sgpr(p4) = G_PTR_ADD %0, %1
%3:sgpr(s32) = G_LOAD %2 :: (load 4 from %ir.const0, addrspace 4)
$sgpr0 = COPY %3
%4:sgpr(s64) = G_CONSTANT i64 1020
%5:sgpr(p4) = G_GEP %0, %4
%5:sgpr(p4) = G_PTR_ADD %0, %4
%6:sgpr(s32) = G_LOAD %5 :: (load 4 from %ir.const0, addrspace 4)
$sgpr0 = COPY %6
%7:sgpr(s64) = G_CONSTANT i64 1024
%8:sgpr(p4) = G_GEP %0, %7
%8:sgpr(p4) = G_PTR_ADD %0, %7
%9:sgpr(s32) = G_LOAD %8 :: (load 4 from %ir.const0, addrspace 4)
$sgpr0 = COPY %9
%10:sgpr(s64) = G_CONSTANT i64 1048572
%11:sgpr(p4) = G_GEP %0, %10
%11:sgpr(p4) = G_PTR_ADD %0, %10
%12:sgpr(s32) = G_LOAD %11 :: (load 4 from %ir.const0, addrspace 4)
$sgpr0 = COPY %12
%13:sgpr(s64) = G_CONSTANT i64 1048576
%14:sgpr(p4) = G_GEP %0, %13
%14:sgpr(p4) = G_PTR_ADD %0, %13
%15:sgpr(s32) = G_LOAD %14 :: (load 4 from %ir.const0, addrspace 4)
$sgpr0 = COPY %15
%16:sgpr(s64) = G_CONSTANT i64 17179869180
%17:sgpr(p4) = G_GEP %0, %16
%17:sgpr(p4) = G_PTR_ADD %0, %16
%18:sgpr(s32) = G_LOAD %17 :: (load 4 from %ir.const0, addrspace 4)
$sgpr0 = COPY %18
%19:sgpr(s64) = G_CONSTANT i64 17179869184
%20:sgpr(p4) = G_GEP %0, %19
%20:sgpr(p4) = G_PTR_ADD %0, %19
%21:sgpr(s32) = G_LOAD %20 :: (load 4 from %ir.const0, addrspace 4)
$sgpr0 = COPY %21
%22:sgpr(s64) = G_CONSTANT i64 4294967292
%23:sgpr(p4) = G_GEP %0, %22
%23:sgpr(p4) = G_PTR_ADD %0, %22
%24:sgpr(s32) = G_LOAD %23 :: (load 4 from %ir.const0, addrspace 4)
$sgpr0 = COPY %24
%25:sgpr(s64) = G_CONSTANT i64 4294967296
%26:sgpr(p4) = G_GEP %0, %25
%26:sgpr(p4) = G_PTR_ADD %0, %25
%27:sgpr(s32) = G_LOAD %26 :: (load 4 from %ir.const0, addrspace 4)
$sgpr0 = COPY %27
@ -206,7 +206,7 @@ body: |
liveins: $sgpr0_sgpr1, $vgpr2_vgpr3
%0:sgpr(p4) = G_CONSTANT i64 44
%1:sgpr(s64) = G_CONSTANT i64 64
%2:sgpr(p4) = G_GEP %0, %1
%2:sgpr(p4) = G_PTR_ADD %0, %1
%3:sgpr(s32) = G_LOAD %2 :: (dereferenceable invariant load 4, align 4, addrspace 4)
S_ENDPGM 0, implicit %3
...

View File

@ -73,7 +73,7 @@ body: |
; GFX10-WAVE32: S_ENDPGM 0, implicit [[REG_SEQUENCE]]
%0:sgpr(p0) = COPY $sgpr0_sgpr1
%1:sgpr(s64) = COPY $sgpr2_sgpr3
%2:sgpr(p0) = G_GEP %0, %1
%2:sgpr(p0) = G_PTR_ADD %0, %1
S_ENDPGM 0, implicit %2
...
@ -146,7 +146,7 @@ body: |
; GFX10-WAVE32: S_ENDPGM 0, implicit [[REG_SEQUENCE]]
%0:vgpr(p0) = COPY $vgpr0_vgpr1
%1:vgpr(s64) = COPY $vgpr2_vgpr3
%2:vgpr(p0) = G_GEP %0, %1
%2:vgpr(p0) = G_PTR_ADD %0, %1
S_ENDPGM 0, implicit %2
...
@ -219,7 +219,7 @@ body: |
; GFX10-WAVE32: S_ENDPGM 0, implicit [[REG_SEQUENCE]]
%0:sgpr(p0) = COPY $sgpr0_sgpr1
%1:vgpr(s64) = COPY $vgpr0_vgpr1
%2:vgpr(p0) = G_GEP %0, %1
%2:vgpr(p0) = G_PTR_ADD %0, %1
S_ENDPGM 0, implicit %2
...
@ -262,7 +262,7 @@ body: |
; GFX10-WAVE32: S_ENDPGM 0, implicit [[S_ADD_U32_]]
%0:sgpr(p3) = COPY $sgpr0
%1:sgpr(s32) = COPY $sgpr1
%2:sgpr(p3) = G_GEP %0, %1
%2:sgpr(p3) = G_PTR_ADD %0, %1
S_ENDPGM 0, implicit %2
...
@ -305,7 +305,7 @@ body: |
; GFX10-WAVE32: S_ENDPGM 0, implicit [[V_ADD_U32_e64_]]
%0:vgpr(p3) = COPY $vgpr0
%1:vgpr(s32) = COPY $vgpr1
%2:vgpr(p3) = G_GEP %0, %1
%2:vgpr(p3) = G_PTR_ADD %0, %1
S_ENDPGM 0, implicit %2
...
@ -348,7 +348,7 @@ body: |
; GFX10-WAVE32: S_ENDPGM 0, implicit [[V_ADD_U32_e64_]]
%0:sgpr(p3) = COPY $sgpr0
%1:vgpr(s32) = COPY $vgpr0
%2:vgpr(p3) = G_GEP %0, %1
%2:vgpr(p3) = G_PTR_ADD %0, %1
S_ENDPGM 0, implicit %2
...
@ -391,7 +391,7 @@ body: |
; GFX10-WAVE32: S_ENDPGM 0, implicit [[S_ADD_U32_]]
%0:sgpr(p6) = COPY $sgpr0
%1:sgpr(s32) = COPY $sgpr1
%2:sgpr(p6) = G_GEP %0, %1
%2:sgpr(p6) = G_PTR_ADD %0, %1
S_ENDPGM 0, implicit %2
...
@ -434,7 +434,7 @@ body: |
; GFX10-WAVE32: S_ENDPGM 0, implicit [[S_ADD_U32_]]
%0:sgpr(p2) = COPY $sgpr0
%1:sgpr(s32) = COPY $sgpr1
%2:sgpr(p2) = G_GEP %0, %1
%2:sgpr(p2) = G_PTR_ADD %0, %1
S_ENDPGM 0, implicit %2
...

View File

@ -812,7 +812,7 @@ body: |
%0:vgpr(p1) = COPY $vgpr0_vgpr1
%1:vgpr(s32) = COPY $vgpr2
%2:vgpr(s64) = G_CONSTANT i64 2047
%3:vgpr(p1) = G_GEP %0, %2
%3:vgpr(p1) = G_PTR_ADD %0, %2
G_STORE %1, %3 :: (store 4, align 4, addrspace 0)
...

View File

@ -802,7 +802,7 @@ body: |
%0:vgpr(p1) = COPY $vgpr0_vgpr1
%1:vgpr(s32) = COPY $vgpr2
%2:vgpr(s64) = G_CONSTANT i64 2047
%3:vgpr(p1) = G_GEP %0, %2
%3:vgpr(p1) = G_PTR_ADD %0, %2
G_STORE %1, %3 :: (store 4, align 4, addrspace 1)
...

View File

@ -215,7 +215,7 @@ body: |
; GFX9: BUFFER_STORE_BYTE_OFFEN [[V_MOV_B32_e32_]], %stack.0, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr32, 4095, 0, 0, 0, 0, 0, implicit $exec :: (store 1, addrspace 5)
%0:vgpr(p5) = G_FRAME_INDEX %stack.0
%1:vgpr(s32) = G_CONSTANT i32 4095
%2:vgpr(p5) = G_GEP %0, %1
%2:vgpr(p5) = G_PTR_ADD %0, %1
%3:vgpr(s32) = G_CONSTANT i32 0
G_STORE %3, %2 :: (store 1, align 1, addrspace 5)

View File

@ -7,10 +7,10 @@ define amdgpu_kernel void @i8_arg(i32 addrspace(1)* nocapture %out, i8 %in) noun
; HSA-VI: liveins: $sgpr4_sgpr5
; HSA-VI: [[COPY:%[0-9]+]]:_(p4) = COPY $sgpr4_sgpr5
; HSA-VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C]](s64)
; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8 from `i32 addrspace(1)* addrspace(4)* undef`, align 16, addrspace 4)
; HSA-VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C1]](s64)
; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64)
; HSA-VI: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 1 from `i8 addrspace(4)* undef`, align 8, addrspace 4)
; HSA-VI: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[LOAD1]](s8)
; HSA-VI: G_STORE [[ZEXT]](s32), [[LOAD]](p1) :: (store 4 into %ir.out, addrspace 1)
@ -26,10 +26,10 @@ define amdgpu_kernel void @i8_zext_arg(i32 addrspace(1)* nocapture %out, i8 zero
; HSA-VI: liveins: $sgpr4_sgpr5
; HSA-VI: [[COPY:%[0-9]+]]:_(p4) = COPY $sgpr4_sgpr5
; HSA-VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C]](s64)
; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8 from `i32 addrspace(1)* addrspace(4)* undef`, align 16, addrspace 4)
; HSA-VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C1]](s64)
; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64)
; HSA-VI: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 1 from `i8 addrspace(4)* undef`, align 8, addrspace 4)
; HSA-VI: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[LOAD1]](s8)
; HSA-VI: G_STORE [[ZEXT]](s32), [[LOAD]](p1) :: (store 4 into %ir.out, addrspace 1)
@ -45,10 +45,10 @@ define amdgpu_kernel void @i8_sext_arg(i32 addrspace(1)* nocapture %out, i8 sign
; HSA-VI: liveins: $sgpr4_sgpr5
; HSA-VI: [[COPY:%[0-9]+]]:_(p4) = COPY $sgpr4_sgpr5
; HSA-VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C]](s64)
; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8 from `i32 addrspace(1)* addrspace(4)* undef`, align 16, addrspace 4)
; HSA-VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C1]](s64)
; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64)
; HSA-VI: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 1 from `i8 addrspace(4)* undef`, align 8, addrspace 4)
; HSA-VI: [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[LOAD1]](s8)
; HSA-VI: G_STORE [[SEXT]](s32), [[LOAD]](p1) :: (store 4 into %ir.out, addrspace 1)
@ -64,10 +64,10 @@ define amdgpu_kernel void @i16_arg(i32 addrspace(1)* nocapture %out, i16 %in) no
; HSA-VI: liveins: $sgpr4_sgpr5
; HSA-VI: [[COPY:%[0-9]+]]:_(p4) = COPY $sgpr4_sgpr5
; HSA-VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C]](s64)
; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8 from `i32 addrspace(1)* addrspace(4)* undef`, align 16, addrspace 4)
; HSA-VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C1]](s64)
; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64)
; HSA-VI: [[LOAD1:%[0-9]+]]:_(s16) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 2 from `i16 addrspace(4)* undef`, align 8, addrspace 4)
; HSA-VI: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[LOAD1]](s16)
; HSA-VI: G_STORE [[ZEXT]](s32), [[LOAD]](p1) :: (store 4 into %ir.out, addrspace 1)
@ -83,10 +83,10 @@ define amdgpu_kernel void @i16_zext_arg(i32 addrspace(1)* nocapture %out, i16 ze
; HSA-VI: liveins: $sgpr4_sgpr5
; HSA-VI: [[COPY:%[0-9]+]]:_(p4) = COPY $sgpr4_sgpr5
; HSA-VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C]](s64)
; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8 from `i32 addrspace(1)* addrspace(4)* undef`, align 16, addrspace 4)
; HSA-VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C1]](s64)
; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64)
; HSA-VI: [[LOAD1:%[0-9]+]]:_(s16) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 2 from `i16 addrspace(4)* undef`, align 8, addrspace 4)
; HSA-VI: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[LOAD1]](s16)
; HSA-VI: G_STORE [[ZEXT]](s32), [[LOAD]](p1) :: (store 4 into %ir.out, addrspace 1)
@ -102,10 +102,10 @@ define amdgpu_kernel void @i16_sext_arg(i32 addrspace(1)* nocapture %out, i16 si
; HSA-VI: liveins: $sgpr4_sgpr5
; HSA-VI: [[COPY:%[0-9]+]]:_(p4) = COPY $sgpr4_sgpr5
; HSA-VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C]](s64)
; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8 from `i32 addrspace(1)* addrspace(4)* undef`, align 16, addrspace 4)
; HSA-VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C1]](s64)
; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64)
; HSA-VI: [[LOAD1:%[0-9]+]]:_(s16) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 2 from `i16 addrspace(4)* undef`, align 8, addrspace 4)
; HSA-VI: [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[LOAD1]](s16)
; HSA-VI: G_STORE [[SEXT]](s32), [[LOAD]](p1) :: (store 4 into %ir.out, addrspace 1)
@ -121,10 +121,10 @@ define amdgpu_kernel void @i32_arg(i32 addrspace(1)* nocapture %out, i32 %in) no
; HSA-VI: liveins: $sgpr4_sgpr5
; HSA-VI: [[COPY:%[0-9]+]]:_(p4) = COPY $sgpr4_sgpr5
; HSA-VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C]](s64)
; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8 from `i32 addrspace(1)* addrspace(4)* undef`, align 16, addrspace 4)
; HSA-VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C1]](s64)
; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64)
; HSA-VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 4 from `i32 addrspace(4)* undef`, align 8, addrspace 4)
; HSA-VI: G_STORE [[LOAD1]](s32), [[LOAD]](p1) :: (store 4 into %ir.out, addrspace 1)
; HSA-VI: S_ENDPGM 0
@ -139,10 +139,10 @@ define amdgpu_kernel void @f32_arg(float addrspace(1)* nocapture %out, float %in
; HSA-VI: liveins: $sgpr4_sgpr5
; HSA-VI: [[COPY:%[0-9]+]]:_(p4) = COPY $sgpr4_sgpr5
; HSA-VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C]](s64)
; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8 from `float addrspace(1)* addrspace(4)* undef`, align 16, addrspace 4)
; HSA-VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C1]](s64)
; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64)
; HSA-VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 4 from `float addrspace(4)* undef`, align 8, addrspace 4)
; HSA-VI: G_STORE [[LOAD1]](s32), [[LOAD]](p1) :: (store 4 into %ir.out, addrspace 1)
; HSA-VI: S_ENDPGM 0
@ -157,10 +157,10 @@ define amdgpu_kernel void @v2i8_arg(<2 x i8> addrspace(1)* %out, <2 x i8> %in) {
; HSA-VI: liveins: $sgpr4_sgpr5
; HSA-VI: [[COPY:%[0-9]+]]:_(p4) = COPY $sgpr4_sgpr5
; HSA-VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C]](s64)
; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8 from `<2 x i8> addrspace(1)* addrspace(4)* undef`, align 16, addrspace 4)
; HSA-VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C1]](s64)
; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64)
; HSA-VI: [[LOAD1:%[0-9]+]]:_(<2 x s8>) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 2 from `<2 x i8> addrspace(4)* undef`, align 8, addrspace 4)
; HSA-VI: G_STORE [[LOAD1]](<2 x s8>), [[LOAD]](p1) :: (store 2 into %ir.out, addrspace 1)
; HSA-VI: S_ENDPGM 0
@ -175,10 +175,10 @@ define amdgpu_kernel void @v2i16_arg(<2 x i16> addrspace(1)* %out, <2 x i16> %in
; HSA-VI: liveins: $sgpr4_sgpr5
; HSA-VI: [[COPY:%[0-9]+]]:_(p4) = COPY $sgpr4_sgpr5
; HSA-VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C]](s64)
; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8 from `<2 x i16> addrspace(1)* addrspace(4)* undef`, align 16, addrspace 4)
; HSA-VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C1]](s64)
; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64)
; HSA-VI: [[LOAD1:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 4 from `<2 x i16> addrspace(4)* undef`, align 8, addrspace 4)
; HSA-VI: G_STORE [[LOAD1]](<2 x s16>), [[LOAD]](p1) :: (store 4 into %ir.out, addrspace 1)
; HSA-VI: S_ENDPGM 0
@ -193,10 +193,10 @@ define amdgpu_kernel void @v2i32_arg(<2 x i32> addrspace(1)* nocapture %out, <2
; HSA-VI: liveins: $sgpr4_sgpr5
; HSA-VI: [[COPY:%[0-9]+]]:_(p4) = COPY $sgpr4_sgpr5
; HSA-VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C]](s64)
; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8 from `<2 x i32> addrspace(1)* addrspace(4)* undef`, align 16, addrspace 4)
; HSA-VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C1]](s64)
; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64)
; HSA-VI: [[LOAD1:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 8 from `<2 x i32> addrspace(4)* undef`, addrspace 4)
; HSA-VI: G_STORE [[LOAD1]](<2 x s32>), [[LOAD]](p1) :: (store 8 into %ir.out, align 4, addrspace 1)
; HSA-VI: S_ENDPGM 0
@ -211,10 +211,10 @@ define amdgpu_kernel void @v2f32_arg(<2 x float> addrspace(1)* nocapture %out, <
; HSA-VI: liveins: $sgpr4_sgpr5
; HSA-VI: [[COPY:%[0-9]+]]:_(p4) = COPY $sgpr4_sgpr5
; HSA-VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C]](s64)
; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8 from `<2 x float> addrspace(1)* addrspace(4)* undef`, align 16, addrspace 4)
; HSA-VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C1]](s64)
; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64)
; HSA-VI: [[LOAD1:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 8 from `<2 x float> addrspace(4)* undef`, addrspace 4)
; HSA-VI: G_STORE [[LOAD1]](<2 x s32>), [[LOAD]](p1) :: (store 8 into %ir.out, align 4, addrspace 1)
; HSA-VI: S_ENDPGM 0
@ -229,10 +229,10 @@ define amdgpu_kernel void @v3i8_arg(<3 x i8> addrspace(1)* nocapture %out, <3 x
; HSA-VI: liveins: $sgpr4_sgpr5
; HSA-VI: [[COPY:%[0-9]+]]:_(p4) = COPY $sgpr4_sgpr5
; HSA-VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C]](s64)
; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8 from `<3 x i8> addrspace(1)* addrspace(4)* undef`, align 16, addrspace 4)
; HSA-VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C1]](s64)
; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64)
; HSA-VI: [[LOAD1:%[0-9]+]]:_(<3 x s8>) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 3 from `<3 x i8> addrspace(4)* undef`, align 8, addrspace 4)
; HSA-VI: G_STORE [[LOAD1]](<3 x s8>), [[LOAD]](p1) :: (store 3 into %ir.out, align 4, addrspace 1)
; HSA-VI: S_ENDPGM 0
@ -247,10 +247,10 @@ define amdgpu_kernel void @v3i16_arg(<3 x i16> addrspace(1)* nocapture %out, <3
; HSA-VI: liveins: $sgpr4_sgpr5
; HSA-VI: [[COPY:%[0-9]+]]:_(p4) = COPY $sgpr4_sgpr5
; HSA-VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C]](s64)
; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8 from `<3 x i16> addrspace(1)* addrspace(4)* undef`, align 16, addrspace 4)
; HSA-VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C1]](s64)
; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64)
; HSA-VI: [[LOAD1:%[0-9]+]]:_(<3 x s16>) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 6 from `<3 x i16> addrspace(4)* undef`, align 8, addrspace 4)
; HSA-VI: G_STORE [[LOAD1]](<3 x s16>), [[LOAD]](p1) :: (store 6 into %ir.out, align 4, addrspace 1)
; HSA-VI: S_ENDPGM 0
@ -265,10 +265,10 @@ define amdgpu_kernel void @v3i32_arg(<3 x i32> addrspace(1)* nocapture %out, <3
; HSA-VI: liveins: $sgpr4_sgpr5
; HSA-VI: [[COPY:%[0-9]+]]:_(p4) = COPY $sgpr4_sgpr5
; HSA-VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C]](s64)
; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8 from `<3 x i32> addrspace(1)* addrspace(4)* undef`, align 16, addrspace 4)
; HSA-VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C1]](s64)
; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64)
; HSA-VI: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 12 from `<3 x i32> addrspace(4)* undef`, align 16, addrspace 4)
; HSA-VI: G_STORE [[LOAD1]](<3 x s32>), [[LOAD]](p1) :: (store 12 into %ir.out, align 4, addrspace 1)
; HSA-VI: S_ENDPGM 0
@ -283,10 +283,10 @@ define amdgpu_kernel void @v3f32_arg(<3 x float> addrspace(1)* nocapture %out, <
; HSA-VI: liveins: $sgpr4_sgpr5
; HSA-VI: [[COPY:%[0-9]+]]:_(p4) = COPY $sgpr4_sgpr5
; HSA-VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C]](s64)
; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8 from `<3 x float> addrspace(1)* addrspace(4)* undef`, align 16, addrspace 4)
; HSA-VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C1]](s64)
; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64)
; HSA-VI: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 12 from `<3 x float> addrspace(4)* undef`, align 16, addrspace 4)
; HSA-VI: G_STORE [[LOAD1]](<3 x s32>), [[LOAD]](p1) :: (store 12 into %ir.out, align 4, addrspace 1)
; HSA-VI: S_ENDPGM 0
@ -301,10 +301,10 @@ define amdgpu_kernel void @v4i8_arg(<4 x i8> addrspace(1)* %out, <4 x i8> %in) {
; HSA-VI: liveins: $sgpr4_sgpr5
; HSA-VI: [[COPY:%[0-9]+]]:_(p4) = COPY $sgpr4_sgpr5
; HSA-VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C]](s64)
; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8 from `<4 x i8> addrspace(1)* addrspace(4)* undef`, align 16, addrspace 4)
; HSA-VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C1]](s64)
; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64)
; HSA-VI: [[LOAD1:%[0-9]+]]:_(<4 x s8>) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 4 from `<4 x i8> addrspace(4)* undef`, align 8, addrspace 4)
; HSA-VI: G_STORE [[LOAD1]](<4 x s8>), [[LOAD]](p1) :: (store 4 into %ir.out, addrspace 1)
; HSA-VI: S_ENDPGM 0
@ -319,10 +319,10 @@ define amdgpu_kernel void @v4i16_arg(<4 x i16> addrspace(1)* %out, <4 x i16> %in
; HSA-VI: liveins: $sgpr4_sgpr5
; HSA-VI: [[COPY:%[0-9]+]]:_(p4) = COPY $sgpr4_sgpr5
; HSA-VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C]](s64)
; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8 from `<4 x i16> addrspace(1)* addrspace(4)* undef`, align 16, addrspace 4)
; HSA-VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C1]](s64)
; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64)
; HSA-VI: [[LOAD1:%[0-9]+]]:_(<4 x s16>) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 8 from `<4 x i16> addrspace(4)* undef`, addrspace 4)
; HSA-VI: G_STORE [[LOAD1]](<4 x s16>), [[LOAD]](p1) :: (store 8 into %ir.out, addrspace 1)
; HSA-VI: S_ENDPGM 0
@ -337,10 +337,10 @@ define amdgpu_kernel void @v4i32_arg(<4 x i32> addrspace(1)* nocapture %out, <4
; HSA-VI: liveins: $sgpr4_sgpr5
; HSA-VI: [[COPY:%[0-9]+]]:_(p4) = COPY $sgpr4_sgpr5
; HSA-VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C]](s64)
; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8 from `<4 x i32> addrspace(1)* addrspace(4)* undef`, align 16, addrspace 4)
; HSA-VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C1]](s64)
; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64)
; HSA-VI: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 16 from `<4 x i32> addrspace(4)* undef`, addrspace 4)
; HSA-VI: G_STORE [[LOAD1]](<4 x s32>), [[LOAD]](p1) :: (store 16 into %ir.out, align 4, addrspace 1)
; HSA-VI: S_ENDPGM 0
@ -355,10 +355,10 @@ define amdgpu_kernel void @v4f32_arg(<4 x float> addrspace(1)* nocapture %out, <
; HSA-VI: liveins: $sgpr4_sgpr5
; HSA-VI: [[COPY:%[0-9]+]]:_(p4) = COPY $sgpr4_sgpr5
; HSA-VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C]](s64)
; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8 from `<4 x float> addrspace(1)* addrspace(4)* undef`, align 16, addrspace 4)
; HSA-VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C1]](s64)
; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64)
; HSA-VI: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 16 from `<4 x float> addrspace(4)* undef`, addrspace 4)
; HSA-VI: G_STORE [[LOAD1]](<4 x s32>), [[LOAD]](p1) :: (store 16 into %ir.out, align 4, addrspace 1)
; HSA-VI: S_ENDPGM 0
@ -373,10 +373,10 @@ define amdgpu_kernel void @v8i8_arg(<8 x i8> addrspace(1)* %out, <8 x i8> %in) {
; HSA-VI: liveins: $sgpr4_sgpr5
; HSA-VI: [[COPY:%[0-9]+]]:_(p4) = COPY $sgpr4_sgpr5
; HSA-VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C]](s64)
; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8 from `<8 x i8> addrspace(1)* addrspace(4)* undef`, align 16, addrspace 4)
; HSA-VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C1]](s64)
; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64)
; HSA-VI: [[LOAD1:%[0-9]+]]:_(<8 x s8>) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 8 from `<8 x i8> addrspace(4)* undef`, addrspace 4)
; HSA-VI: G_STORE [[LOAD1]](<8 x s8>), [[LOAD]](p1) :: (store 8 into %ir.out, addrspace 1)
; HSA-VI: S_ENDPGM 0
@ -391,10 +391,10 @@ define amdgpu_kernel void @v8i16_arg(<8 x i16> addrspace(1)* %out, <8 x i16> %in
; HSA-VI: liveins: $sgpr4_sgpr5
; HSA-VI: [[COPY:%[0-9]+]]:_(p4) = COPY $sgpr4_sgpr5
; HSA-VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C]](s64)
; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8 from `<8 x i16> addrspace(1)* addrspace(4)* undef`, align 16, addrspace 4)
; HSA-VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C1]](s64)
; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64)
; HSA-VI: [[LOAD1:%[0-9]+]]:_(<8 x s16>) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 16 from `<8 x i16> addrspace(4)* undef`, addrspace 4)
; HSA-VI: G_STORE [[LOAD1]](<8 x s16>), [[LOAD]](p1) :: (store 16 into %ir.out, addrspace 1)
; HSA-VI: S_ENDPGM 0
@ -409,10 +409,10 @@ define amdgpu_kernel void @v8i32_arg(<8 x i32> addrspace(1)* nocapture %out, <8
; HSA-VI: liveins: $sgpr4_sgpr5
; HSA-VI: [[COPY:%[0-9]+]]:_(p4) = COPY $sgpr4_sgpr5
; HSA-VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C]](s64)
; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8 from `<8 x i32> addrspace(1)* addrspace(4)* undef`, align 16, addrspace 4)
; HSA-VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C1]](s64)
; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64)
; HSA-VI: [[LOAD1:%[0-9]+]]:_(<8 x s32>) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 32 from `<8 x i32> addrspace(4)* undef`, align 16, addrspace 4)
; HSA-VI: G_STORE [[LOAD1]](<8 x s32>), [[LOAD]](p1) :: (store 32 into %ir.out, align 4, addrspace 1)
; HSA-VI: S_ENDPGM 0
@ -427,10 +427,10 @@ define amdgpu_kernel void @v8f32_arg(<8 x float> addrspace(1)* nocapture %out, <
; HSA-VI: liveins: $sgpr4_sgpr5
; HSA-VI: [[COPY:%[0-9]+]]:_(p4) = COPY $sgpr4_sgpr5
; HSA-VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C]](s64)
; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8 from `<8 x float> addrspace(1)* addrspace(4)* undef`, align 16, addrspace 4)
; HSA-VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C1]](s64)
; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64)
; HSA-VI: [[LOAD1:%[0-9]+]]:_(<8 x s32>) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 32 from `<8 x float> addrspace(4)* undef`, align 16, addrspace 4)
; HSA-VI: G_STORE [[LOAD1]](<8 x s32>), [[LOAD]](p1) :: (store 32 into %ir.out, align 4, addrspace 1)
; HSA-VI: S_ENDPGM 0
@ -445,10 +445,10 @@ define amdgpu_kernel void @v16i8_arg(<16 x i8> addrspace(1)* %out, <16 x i8> %in
; HSA-VI: liveins: $sgpr4_sgpr5
; HSA-VI: [[COPY:%[0-9]+]]:_(p4) = COPY $sgpr4_sgpr5
; HSA-VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C]](s64)
; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8 from `<16 x i8> addrspace(1)* addrspace(4)* undef`, align 16, addrspace 4)
; HSA-VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C1]](s64)
; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64)
; HSA-VI: [[LOAD1:%[0-9]+]]:_(<16 x s8>) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 16 from `<16 x i8> addrspace(4)* undef`, addrspace 4)
; HSA-VI: G_STORE [[LOAD1]](<16 x s8>), [[LOAD]](p1) :: (store 16 into %ir.out, addrspace 1)
; HSA-VI: S_ENDPGM 0
@ -463,10 +463,10 @@ define amdgpu_kernel void @v16i16_arg(<16 x i16> addrspace(1)* %out, <16 x i16>
; HSA-VI: liveins: $sgpr4_sgpr5
; HSA-VI: [[COPY:%[0-9]+]]:_(p4) = COPY $sgpr4_sgpr5
; HSA-VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C]](s64)
; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8 from `<16 x i16> addrspace(1)* addrspace(4)* undef`, align 16, addrspace 4)
; HSA-VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C1]](s64)
; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64)
; HSA-VI: [[LOAD1:%[0-9]+]]:_(<16 x s16>) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 32 from `<16 x i16> addrspace(4)* undef`, align 16, addrspace 4)
; HSA-VI: G_STORE [[LOAD1]](<16 x s16>), [[LOAD]](p1) :: (store 32 into %ir.out, addrspace 1)
; HSA-VI: S_ENDPGM 0
@ -481,10 +481,10 @@ define amdgpu_kernel void @v16i32_arg(<16 x i32> addrspace(1)* nocapture %out, <
; HSA-VI: liveins: $sgpr4_sgpr5
; HSA-VI: [[COPY:%[0-9]+]]:_(p4) = COPY $sgpr4_sgpr5
; HSA-VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C]](s64)
; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8 from `<16 x i32> addrspace(1)* addrspace(4)* undef`, align 16, addrspace 4)
; HSA-VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 64
; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C1]](s64)
; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64)
; HSA-VI: [[LOAD1:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 64 from `<16 x i32> addrspace(4)* undef`, align 16, addrspace 4)
; HSA-VI: G_STORE [[LOAD1]](<16 x s32>), [[LOAD]](p1) :: (store 64 into %ir.out, align 4, addrspace 1)
; HSA-VI: S_ENDPGM 0
@ -499,10 +499,10 @@ define amdgpu_kernel void @v16f32_arg(<16 x float> addrspace(1)* nocapture %out,
; HSA-VI: liveins: $sgpr4_sgpr5
; HSA-VI: [[COPY:%[0-9]+]]:_(p4) = COPY $sgpr4_sgpr5
; HSA-VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C]](s64)
; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8 from `<16 x float> addrspace(1)* addrspace(4)* undef`, align 16, addrspace 4)
; HSA-VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 64
; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C1]](s64)
; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64)
; HSA-VI: [[LOAD1:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 64 from `<16 x float> addrspace(4)* undef`, align 16, addrspace 4)
; HSA-VI: G_STORE [[LOAD1]](<16 x s32>), [[LOAD]](p1) :: (store 64 into %ir.out, align 4, addrspace 1)
; HSA-VI: S_ENDPGM 0
@ -517,10 +517,10 @@ define amdgpu_kernel void @kernel_arg_i64(i64 addrspace(1)* %out, i64 %a) nounwi
; HSA-VI: liveins: $sgpr4_sgpr5
; HSA-VI: [[COPY:%[0-9]+]]:_(p4) = COPY $sgpr4_sgpr5
; HSA-VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C]](s64)
; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8 from `i64 addrspace(1)* addrspace(4)* undef`, align 16, addrspace 4)
; HSA-VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C1]](s64)
; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64)
; HSA-VI: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 8 from `i64 addrspace(4)* undef`, addrspace 4)
; HSA-VI: G_STORE [[LOAD1]](s64), [[LOAD]](p1) :: (store 8 into %ir.out, addrspace 1)
; HSA-VI: S_ENDPGM 0
@ -534,10 +534,10 @@ define amdgpu_kernel void @f64_kernel_arg(double addrspace(1)* %out, double %in
; HSA-VI: liveins: $sgpr4_sgpr5
; HSA-VI: [[COPY:%[0-9]+]]:_(p4) = COPY $sgpr4_sgpr5
; HSA-VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C]](s64)
; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8 from `double addrspace(1)* addrspace(4)* undef`, align 16, addrspace 4)
; HSA-VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C1]](s64)
; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64)
; HSA-VI: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 8 from `double addrspace(4)* undef`, addrspace 4)
; HSA-VI: G_STORE [[LOAD1]](s64), [[LOAD]](p1) :: (store 8 into %ir.out, addrspace 1)
; HSA-VI: S_ENDPGM 0
@ -552,10 +552,10 @@ define amdgpu_kernel void @i1_arg(i1 addrspace(1)* %out, i1 %x) nounwind {
; HSA-VI: liveins: $sgpr4_sgpr5
; HSA-VI: [[COPY:%[0-9]+]]:_(p4) = COPY $sgpr4_sgpr5
; HSA-VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C]](s64)
; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8 from `i1 addrspace(1)* addrspace(4)* undef`, align 16, addrspace 4)
; HSA-VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C1]](s64)
; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64)
; HSA-VI: [[LOAD1:%[0-9]+]]:_(s1) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 1 from `i1 addrspace(4)* undef`, align 8, addrspace 4)
; HSA-VI: G_STORE [[LOAD1]](s1), [[LOAD]](p1) :: (store 1 into %ir.out, addrspace 1)
; HSA-VI: S_ENDPGM 0
@ -569,10 +569,10 @@ define amdgpu_kernel void @i1_arg_zext_i32(i32 addrspace(1)* %out, i1 %x) nounwi
; HSA-VI: liveins: $sgpr4_sgpr5
; HSA-VI: [[COPY:%[0-9]+]]:_(p4) = COPY $sgpr4_sgpr5
; HSA-VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C]](s64)
; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8 from `i32 addrspace(1)* addrspace(4)* undef`, align 16, addrspace 4)
; HSA-VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C1]](s64)
; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64)
; HSA-VI: [[LOAD1:%[0-9]+]]:_(s1) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 1 from `i1 addrspace(4)* undef`, align 8, addrspace 4)
; HSA-VI: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[LOAD1]](s1)
; HSA-VI: G_STORE [[ZEXT]](s32), [[LOAD]](p1) :: (store 4 into %ir.out, addrspace 1)
@ -588,10 +588,10 @@ define amdgpu_kernel void @i1_arg_zext_i64(i64 addrspace(1)* %out, i1 %x) nounwi
; HSA-VI: liveins: $sgpr4_sgpr5
; HSA-VI: [[COPY:%[0-9]+]]:_(p4) = COPY $sgpr4_sgpr5
; HSA-VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C]](s64)
; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8 from `i64 addrspace(1)* addrspace(4)* undef`, align 16, addrspace 4)
; HSA-VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C1]](s64)
; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64)
; HSA-VI: [[LOAD1:%[0-9]+]]:_(s1) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 1 from `i1 addrspace(4)* undef`, align 8, addrspace 4)
; HSA-VI: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[LOAD1]](s1)
; HSA-VI: G_STORE [[ZEXT]](s64), [[LOAD]](p1) :: (store 8 into %ir.out, addrspace 1)
@ -607,10 +607,10 @@ define amdgpu_kernel void @i1_arg_sext_i32(i32 addrspace(1)* %out, i1 %x) nounwi
; HSA-VI: liveins: $sgpr4_sgpr5
; HSA-VI: [[COPY:%[0-9]+]]:_(p4) = COPY $sgpr4_sgpr5
; HSA-VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C]](s64)
; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8 from `i32 addrspace(1)* addrspace(4)* undef`, align 16, addrspace 4)
; HSA-VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C1]](s64)
; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64)
; HSA-VI: [[LOAD1:%[0-9]+]]:_(s1) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 1 from `i1 addrspace(4)* undef`, align 8, addrspace 4)
; HSA-VI: [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[LOAD1]](s1)
; HSA-VI: G_STORE [[SEXT]](s32), [[LOAD]](p1) :: (store 4 into %ir.out, addrspace 1)
@ -626,10 +626,10 @@ define amdgpu_kernel void @i1_arg_sext_i64(i64 addrspace(1)* %out, i1 %x) nounwi
; HSA-VI: liveins: $sgpr4_sgpr5
; HSA-VI: [[COPY:%[0-9]+]]:_(p4) = COPY $sgpr4_sgpr5
; HSA-VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C]](s64)
; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8 from `i64 addrspace(1)* addrspace(4)* undef`, align 16, addrspace 4)
; HSA-VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C1]](s64)
; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64)
; HSA-VI: [[LOAD1:%[0-9]+]]:_(s1) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 1 from `i1 addrspace(4)* undef`, align 8, addrspace 4)
; HSA-VI: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[LOAD1]](s1)
; HSA-VI: G_STORE [[SEXT]](s64), [[LOAD]](p1) :: (store 8 into %ir.out, addrspace 1)
@ -673,15 +673,15 @@ define amdgpu_kernel void @struct_argument_alignment({i32, i64} %arg0, i8, {i32,
; HSA-VI: liveins: $sgpr4_sgpr5
; HSA-VI: [[COPY:%[0-9]+]]:_(p4) = COPY $sgpr4_sgpr5
; HSA-VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C]](s64)
; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
; HSA-VI: [[LOAD:%[0-9]+]]:_(s128) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 16 from `{ i32, i64 } addrspace(4)* undef`, addrspace 4)
; HSA-VI: [[EXTRACT:%[0-9]+]]:_(s32) = G_EXTRACT [[LOAD]](s128), 0
; HSA-VI: [[EXTRACT1:%[0-9]+]]:_(s64) = G_EXTRACT [[LOAD]](s128), 64
; HSA-VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C1]](s64)
; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64)
; HSA-VI: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 1 from `i8 addrspace(4)* undef`, align 16, addrspace 4)
; HSA-VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
; HSA-VI: [[GEP2:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C2]](s64)
; HSA-VI: [[GEP2:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64)
; HSA-VI: [[LOAD2:%[0-9]+]]:_(s128) = G_LOAD [[GEP2]](p4) :: (dereferenceable invariant load 16 from `{ i32, i64 } addrspace(4)* undef`, align 8, addrspace 4)
; HSA-VI: [[EXTRACT2:%[0-9]+]]:_(s32) = G_EXTRACT [[LOAD2]](s128), 0
; HSA-VI: [[EXTRACT3:%[0-9]+]]:_(s64) = G_EXTRACT [[LOAD2]](s128), 64
@ -705,15 +705,15 @@ define amdgpu_kernel void @packed_struct_argument_alignment(<{i32, i64}> %arg0,
; HSA-VI: liveins: $sgpr4_sgpr5
; HSA-VI: [[COPY:%[0-9]+]]:_(p4) = COPY $sgpr4_sgpr5
; HSA-VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C]](s64)
; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
; HSA-VI: [[LOAD:%[0-9]+]]:_(s96) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 12 from `<{ i32, i64 }> addrspace(4)* undef`, align 16, addrspace 4)
; HSA-VI: [[EXTRACT:%[0-9]+]]:_(s32) = G_EXTRACT [[LOAD]](s96), 0
; HSA-VI: [[EXTRACT1:%[0-9]+]]:_(s64) = G_EXTRACT [[LOAD]](s96), 32
; HSA-VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C1]](s64)
; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64)
; HSA-VI: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 1 from `i8 addrspace(4)* undef`, align 4, addrspace 4)
; HSA-VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 13
; HSA-VI: [[GEP2:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C2]](s64)
; HSA-VI: [[GEP2:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64)
; HSA-VI: [[LOAD2:%[0-9]+]]:_(s96) = G_LOAD [[GEP2]](p4) :: (dereferenceable invariant load 12 from `<{ i32, i64 }> addrspace(4)* undef`, align 1, addrspace 4)
; HSA-VI: [[EXTRACT2:%[0-9]+]]:_(s32) = G_EXTRACT [[LOAD2]](s96), 0
; HSA-VI: [[EXTRACT3:%[0-9]+]]:_(s64) = G_EXTRACT [[LOAD2]](s96), 32

View File

@ -1246,7 +1246,7 @@ define void @void_func_struct_i8_i32({ i8, i32 } %arg0) #0 {
; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
; CHECK: G_STORE [[TRUNC]](s8), [[DEF]](p1) :: (store 1 into `{ i8, i32 } addrspace(1)* undef`, align 4, addrspace 1)
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
; CHECK: [[GEP:%[0-9]+]]:_(p1) = G_GEP [[DEF]], [[C]](s64)
; CHECK: [[GEP:%[0-9]+]]:_(p1) = G_PTR_ADD [[DEF]], [[C]](s64)
; CHECK: G_STORE [[COPY1]](s32), [[GEP]](p1) :: (store 4 into `{ i8, i32 } addrspace(1)* undef` + 4, addrspace 1)
; CHECK: [[COPY3:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY2]]
; CHECK: S_SETPC_B64_return [[COPY3]]
@ -1264,11 +1264,11 @@ define void @void_func_byval_struct_i8_i32({ i8, i32 } addrspace(5)* byval %arg0
; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
; CHECK: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[LOAD]](p5) :: (load 1 from %ir.arg0, align 4, addrspace 5)
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
; CHECK: [[GEP:%[0-9]+]]:_(p5) = G_GEP [[LOAD]], [[C]](s32)
; CHECK: [[GEP:%[0-9]+]]:_(p5) = G_PTR_ADD [[LOAD]], [[C]](s32)
; CHECK: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[GEP]](p5) :: (load 4 from %ir.arg0 + 4, addrspace 5)
; CHECK: G_STORE [[LOAD1]](s8), [[DEF]](p1) :: (store 1 into `{ i8, i32 } addrspace(1)* undef`, align 4, addrspace 1)
; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
; CHECK: [[GEP1:%[0-9]+]]:_(p1) = G_GEP [[DEF]], [[C1]](s64)
; CHECK: [[GEP1:%[0-9]+]]:_(p1) = G_PTR_ADD [[DEF]], [[C1]](s64)
; CHECK: G_STORE [[LOAD2]](s32), [[GEP1]](p1) :: (store 4 into `{ i8, i32 } addrspace(1)* undef` + 4, addrspace 1)
; CHECK: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
; CHECK: S_SETPC_B64_return [[COPY1]]
@ -1291,17 +1291,17 @@ define void @void_func_byval_struct_i8_i32_x2({ i8, i32 } addrspace(5)* byval %a
; CHECK: [[DEF1:%[0-9]+]]:_(p3) = G_IMPLICIT_DEF
; CHECK: [[LOAD2:%[0-9]+]]:_(s8) = G_LOAD [[LOAD]](p5) :: (volatile load 1 from %ir.arg0, align 4, addrspace 5)
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
; CHECK: [[GEP:%[0-9]+]]:_(p5) = G_GEP [[LOAD]], [[C]](s32)
; CHECK: [[GEP:%[0-9]+]]:_(p5) = G_PTR_ADD [[LOAD]], [[C]](s32)
; CHECK: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[GEP]](p5) :: (volatile load 4 from %ir.arg0 + 4, addrspace 5)
; CHECK: [[LOAD4:%[0-9]+]]:_(s8) = G_LOAD [[LOAD1]](p5) :: (volatile load 1 from %ir.arg1, align 4, addrspace 5)
; CHECK: [[GEP1:%[0-9]+]]:_(p5) = G_GEP [[LOAD1]], [[C]](s32)
; CHECK: [[GEP1:%[0-9]+]]:_(p5) = G_PTR_ADD [[LOAD1]], [[C]](s32)
; CHECK: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[GEP1]](p5) :: (volatile load 4 from %ir.arg1 + 4, addrspace 5)
; CHECK: G_STORE [[LOAD2]](s8), [[DEF]](p1) :: (volatile store 1 into `{ i8, i32 } addrspace(1)* undef`, align 4, addrspace 1)
; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
; CHECK: [[GEP2:%[0-9]+]]:_(p1) = G_GEP [[DEF]], [[C1]](s64)
; CHECK: [[GEP2:%[0-9]+]]:_(p1) = G_PTR_ADD [[DEF]], [[C1]](s64)
; CHECK: G_STORE [[LOAD3]](s32), [[GEP2]](p1) :: (volatile store 4 into `{ i8, i32 } addrspace(1)* undef` + 4, addrspace 1)
; CHECK: G_STORE [[LOAD4]](s8), [[DEF]](p1) :: (volatile store 1 into `{ i8, i32 } addrspace(1)* undef`, align 4, addrspace 1)
; CHECK: [[GEP3:%[0-9]+]]:_(p1) = G_GEP [[DEF]], [[C1]](s64)
; CHECK: [[GEP3:%[0-9]+]]:_(p1) = G_PTR_ADD [[DEF]], [[C1]](s64)
; CHECK: G_STORE [[LOAD5]](s32), [[GEP3]](p1) :: (volatile store 4 into `{ i8, i32 } addrspace(1)* undef` + 4, addrspace 1)
; CHECK: G_STORE [[COPY]](s32), [[DEF1]](p3) :: (volatile store 4 into `i32 addrspace(3)* undef`, addrspace 3)
; CHECK: [[COPY2:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY1]]

View File

@ -177,7 +177,7 @@ body: |
; VI: [[C1:%[0-9]+]]:_(p0) = G_CONSTANT i64 0
; VI: [[COPY2:%[0-9]+]]:_(p4) = COPY [[COPY]](p4)
; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 68
; VI: [[GEP:%[0-9]+]]:_(p4) = G_GEP [[COPY2]], [[C2]](s64)
; VI: [[GEP:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY2]], [[C2]](s64)
; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 4 from `i8 addrspace(4)* undef` + 68, addrspace 4)
; VI: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[COPY1]](p5), [[C]]
; VI: [[PTRTOINT:%[0-9]+]]:_(s32) = G_PTRTOINT [[COPY1]](p5)
@ -260,7 +260,7 @@ body: |
; VI: [[C1:%[0-9]+]]:_(p0) = G_CONSTANT i64 0
; VI: [[COPY2:%[0-9]+]]:_(p4) = COPY [[COPY]](p4)
; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 64
; VI: [[GEP:%[0-9]+]]:_(p4) = G_GEP [[COPY2]], [[C2]](s64)
; VI: [[GEP:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY2]], [[C2]](s64)
; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 4 from `i8 addrspace(4)* undef` + 64, align 64, addrspace 4)
; VI: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[COPY1]](p3), [[C]]
; VI: [[PTRTOINT:%[0-9]+]]:_(s32) = G_PTRTOINT [[COPY1]](p3)
@ -466,14 +466,14 @@ body: |
; VI: [[C1:%[0-9]+]]:_(p0) = G_CONSTANT i64 0
; VI: [[COPY2:%[0-9]+]]:_(p4) = COPY [[COPY]](p4)
; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 64
; VI: [[GEP:%[0-9]+]]:_(p4) = G_GEP [[COPY2]], [[C2]](s64)
; VI: [[GEP:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY2]], [[C2]](s64)
; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 4 from `i8 addrspace(4)* undef` + 64, align 64, addrspace 4)
; VI: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV]](p3), [[C]]
; VI: [[PTRTOINT:%[0-9]+]]:_(s32) = G_PTRTOINT [[UV]](p3)
; VI: [[MV:%[0-9]+]]:_(p0) = G_MERGE_VALUES [[PTRTOINT]](s32), [[LOAD]](s32)
; VI: [[SELECT:%[0-9]+]]:_(p0) = G_SELECT [[ICMP]](s1), [[MV]], [[C1]]
; VI: [[COPY3:%[0-9]+]]:_(p4) = COPY [[COPY]](p4)
; VI: [[GEP1:%[0-9]+]]:_(p4) = G_GEP [[COPY3]], [[C2]](s64)
; VI: [[GEP1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY3]], [[C2]](s64)
; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 4 from `i8 addrspace(4)* undef` + 64, align 64, addrspace 4)
; VI: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV1]](p3), [[C]]
; VI: [[PTRTOINT1:%[0-9]+]]:_(s32) = G_PTRTOINT [[UV1]](p3)

View File

@ -13,13 +13,13 @@ body: |
; CI: [[MV:%[0-9]+]]:_(p4) = G_MERGE_VALUES [[COPY]](p6), [[C]](p6)
; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[MV]](p4) :: (load 1, addrspace 6)
; CI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
; CI: [[GEP:%[0-9]+]]:_(p4) = G_GEP [[MV]], [[C1]](s64)
; CI: [[GEP:%[0-9]+]]:_(p4) = G_PTR_ADD [[MV]], [[C1]](s64)
; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[GEP]](p4) :: (load 1, addrspace 6)
; CI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
; CI: [[GEP1:%[0-9]+]]:_(p4) = G_GEP [[MV]], [[C2]](s64)
; CI: [[GEP1:%[0-9]+]]:_(p4) = G_PTR_ADD [[MV]], [[C2]](s64)
; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[GEP1]](p4) :: (load 1, addrspace 6)
; CI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 3
; CI: [[GEP2:%[0-9]+]]:_(p4) = G_GEP [[MV]], [[C3]](s64)
; CI: [[GEP2:%[0-9]+]]:_(p4) = G_PTR_ADD [[MV]], [[C3]](s64)
; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[GEP2]](p4) :: (load 1, addrspace 6)
; CI: [[C4:%[0-9]+]]:_(s16) = G_CONSTANT i16 255
; CI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32)

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -943,7 +943,7 @@ body: |
; CHECK: bb.1:
; CHECK: successors: %bb.2(0x80000000)
; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CHECK: [[GEP:%[0-9]+]]:_(p3) = G_GEP [[COPY]], [[C1]](s32)
; CHECK: [[GEP:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
; CHECK: G_BR %bb.2
; CHECK: bb.2:
; CHECK: [[PHI:%[0-9]+]]:_(p3) = G_PHI [[COPY]](p3), %bb.0, [[GEP]](p3), %bb.1
@ -964,7 +964,7 @@ body: |
successors: %bb.2
%4:_(s32) = G_CONSTANT i32 8
%5:_(p3) = G_GEP %0, %4
%5:_(p3) = G_PTR_ADD %0, %4
G_BR %bb.2
bb.2:
@ -991,7 +991,7 @@ body: |
; CHECK: bb.1:
; CHECK: successors: %bb.2(0x80000000)
; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CHECK: [[GEP:%[0-9]+]]:_(p5) = G_GEP [[COPY]], [[C1]](s32)
; CHECK: [[GEP:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
; CHECK: G_BR %bb.2
; CHECK: bb.2:
; CHECK: [[PHI:%[0-9]+]]:_(p5) = G_PHI [[COPY]](p5), %bb.0, [[GEP]](p5), %bb.1
@ -1012,7 +1012,7 @@ body: |
successors: %bb.2
%4:_(s32) = G_CONSTANT i32 8
%5:_(p5) = G_GEP %0, %4
%5:_(p5) = G_PTR_ADD %0, %4
G_BR %bb.2
bb.2:
@ -1039,7 +1039,7 @@ body: |
; CHECK: bb.1:
; CHECK: successors: %bb.2(0x80000000)
; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_GEP [[COPY]], [[C1]](s64)
; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
; CHECK: G_BR %bb.2
; CHECK: bb.2:
; CHECK: [[PHI:%[0-9]+]]:_(p0) = G_PHI [[COPY]](p0), %bb.0, [[GEP]](p0), %bb.1
@ -1060,7 +1060,7 @@ body: |
successors: %bb.2
%4:_(s64) = G_CONSTANT i64 8
%5:_(p0) = G_GEP %0, %4
%5:_(p0) = G_PTR_ADD %0, %4
G_BR %bb.2
bb.2:
@ -1087,7 +1087,7 @@ body: |
; CHECK: bb.1:
; CHECK: successors: %bb.2(0x80000000)
; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
; CHECK: [[GEP:%[0-9]+]]:_(p1) = G_GEP [[COPY]], [[C1]](s64)
; CHECK: [[GEP:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
; CHECK: G_BR %bb.2
; CHECK: bb.2:
; CHECK: [[PHI:%[0-9]+]]:_(p1) = G_PHI [[COPY]](p1), %bb.0, [[GEP]](p1), %bb.1
@ -1108,7 +1108,7 @@ body: |
successors: %bb.2
%4:_(s64) = G_CONSTANT i64 8
%5:_(p1) = G_GEP %0, %4
%5:_(p1) = G_PTR_ADD %0, %4
G_BR %bb.2
bb.2:
@ -1135,7 +1135,7 @@ body: |
; CHECK: bb.1:
; CHECK: successors: %bb.2(0x80000000)
; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
; CHECK: [[GEP:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C1]](s64)
; CHECK: [[GEP:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64)
; CHECK: G_BR %bb.2
; CHECK: bb.2:
; CHECK: [[PHI:%[0-9]+]]:_(p4) = G_PHI [[COPY]](p4), %bb.0, [[GEP]](p4), %bb.1
@ -1156,7 +1156,7 @@ body: |
successors: %bb.2
%4:_(s64) = G_CONSTANT i64 8
%5:_(p4) = G_GEP %0, %4
%5:_(p4) = G_PTR_ADD %0, %4
G_BR %bb.2
bb.2:

View File

@ -10,11 +10,11 @@ body: |
; CHECK-LABEL: name: test_gep_global_i64_idx
; CHECK: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3
; CHECK: [[GEP:%[0-9]+]]:_(p1) = G_GEP [[COPY]], [[COPY1]](s64)
; CHECK: [[GEP:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[COPY1]](s64)
; CHECK: $vgpr0_vgpr1 = COPY [[GEP]](p1)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(s64) = COPY $vgpr2_vgpr3
%2:_(p1) = G_GEP %0, %1
%2:_(p1) = G_PTR_ADD %0, %1
$vgpr0_vgpr1 = COPY %2
...
@ -28,11 +28,11 @@ body: |
; CHECK-LABEL: name: test_gep_flat_i64_idx
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3
; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_GEP [[COPY]], [[COPY1]](s64)
; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[COPY1]](s64)
; CHECK: $vgpr0_vgpr1 = COPY [[GEP]](p0)
%0:_(p0) = COPY $vgpr0_vgpr1
%1:_(s64) = COPY $vgpr2_vgpr3
%2:_(p0) = G_GEP %0, %1
%2:_(p0) = G_PTR_ADD %0, %1
$vgpr0_vgpr1 = COPY %2
...
@ -46,11 +46,11 @@ body: |
; CHECK-LABEL: name: test_gep_constant_i64_idx
; CHECK: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3
; CHECK: [[GEP:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[COPY1]](s64)
; CHECK: [[GEP:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[COPY1]](s64)
; CHECK: $vgpr0_vgpr1 = COPY [[GEP]](p4)
%0:_(p4) = COPY $vgpr0_vgpr1
%1:_(s64) = COPY $vgpr2_vgpr3
%2:_(p4) = G_GEP %0, %1
%2:_(p4) = G_PTR_ADD %0, %1
$vgpr0_vgpr1 = COPY %2
...
@ -64,11 +64,11 @@ body: |
; CHECK-LABEL: name: test_gep_local_i32_idx
; CHECK: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
; CHECK: [[GEP:%[0-9]+]]:_(p3) = G_GEP [[COPY]], [[COPY1]](s32)
; CHECK: [[GEP:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[COPY1]](s32)
; CHECK: $vgpr0 = COPY [[GEP]](p3)
%0:_(p3) = COPY $vgpr0
%1:_(s32) = COPY $vgpr1
%2:_(p3) = G_GEP %0, %1
%2:_(p3) = G_PTR_ADD %0, %1
$vgpr0 = COPY %2
...
@ -82,11 +82,11 @@ body: |
; CHECK-LABEL: name: test_gep_private_i32_idx
; CHECK: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
; CHECK: [[GEP:%[0-9]+]]:_(p5) = G_GEP [[COPY]], [[COPY1]](s32)
; CHECK: [[GEP:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[COPY1]](s32)
; CHECK: $vgpr0 = COPY [[GEP]](p5)
%0:_(p5) = COPY $vgpr0
%1:_(s32) = COPY $vgpr1
%2:_(p5) = G_GEP %0, %1
%2:_(p5) = G_PTR_ADD %0, %1
$vgpr0 = COPY %2
...
@ -100,11 +100,11 @@ body: |
; CHECK-LABEL: name: test_gep_constant32_i32_idx
; CHECK: [[COPY:%[0-9]+]]:_(p6) = COPY $sgpr0
; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr1
; CHECK: [[GEP:%[0-9]+]]:_(p6) = G_GEP [[COPY]], [[COPY1]](s32)
; CHECK: [[GEP:%[0-9]+]]:_(p6) = G_PTR_ADD [[COPY]], [[COPY1]](s32)
; CHECK: $sgpr0 = COPY [[GEP]](p6)
%0:_(p6) = COPY $sgpr0
%1:_(s32) = COPY $sgpr1
%2:_(p6) = G_GEP %0, %1
%2:_(p6) = G_PTR_ADD %0, %1
$sgpr0 = COPY %2
...
@ -118,11 +118,11 @@ body: |
; CHECK-LABEL: name: test_gep_region_i32_idx
; CHECK: [[COPY:%[0-9]+]]:_(p2) = COPY $vgpr0
; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
; CHECK: [[GEP:%[0-9]+]]:_(p2) = G_GEP [[COPY]], [[COPY1]](s32)
; CHECK: [[GEP:%[0-9]+]]:_(p2) = G_PTR_ADD [[COPY]], [[COPY1]](s32)
; CHECK: $vgpr0 = COPY [[GEP]](p2)
%0:_(p2) = COPY $vgpr0
%1:_(s32) = COPY $vgpr1
%2:_(p2) = G_GEP %0, %1
%2:_(p2) = G_PTR_ADD %0, %1
$vgpr0 = COPY %2
...

View File

@ -389,15 +389,15 @@ body: |
; SI: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[UV]](s8)
; SI: G_STORE [[ANYEXT1]](s32), [[COPY]](p1) :: (store 1, align 4, addrspace 1)
; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
; SI: [[GEP:%[0-9]+]]:_(p1) = G_GEP [[COPY]], [[C]](s64)
; SI: [[GEP:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
; SI: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[UV1]](s8)
; SI: G_STORE [[ANYEXT2]](s32), [[GEP]](p1) :: (store 1, addrspace 1)
; SI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
; SI: [[GEP1:%[0-9]+]]:_(p1) = G_GEP [[COPY]], [[C1]](s64)
; SI: [[GEP1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
; SI: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[UV2]](s8)
; SI: G_STORE [[ANYEXT3]](s32), [[GEP1]](p1) :: (store 1, align 2, addrspace 1)
; SI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3
; SI: [[GEP2:%[0-9]+]]:_(p1) = G_GEP [[COPY]], [[C2]](s64)
; SI: [[GEP2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
; SI: [[ANYEXT4:%[0-9]+]]:_(s32) = G_ANYEXT [[UV3]](s8)
; SI: G_STORE [[ANYEXT4]](s32), [[GEP2]](p1) :: (store 1, addrspace 1)
; VI-LABEL: name: test_store_global_v3s8_align4
@ -411,15 +411,15 @@ body: |
; VI: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[UV]](s8)
; VI: G_STORE [[ANYEXT1]](s32), [[COPY]](p1) :: (store 1, align 4, addrspace 1)
; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
; VI: [[GEP:%[0-9]+]]:_(p1) = G_GEP [[COPY]], [[C]](s64)
; VI: [[GEP:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
; VI: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[UV1]](s8)
; VI: G_STORE [[ANYEXT2]](s32), [[GEP]](p1) :: (store 1, addrspace 1)
; VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
; VI: [[GEP1:%[0-9]+]]:_(p1) = G_GEP [[COPY]], [[C1]](s64)
; VI: [[GEP1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
; VI: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[UV2]](s8)
; VI: G_STORE [[ANYEXT3]](s32), [[GEP1]](p1) :: (store 1, align 2, addrspace 1)
; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3
; VI: [[GEP2:%[0-9]+]]:_(p1) = G_GEP [[COPY]], [[C2]](s64)
; VI: [[GEP2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
; VI: [[ANYEXT4:%[0-9]+]]:_(s32) = G_ANYEXT [[UV3]](s8)
; VI: G_STORE [[ANYEXT4]](s32), [[GEP2]](p1) :: (store 1, addrspace 1)
%0:_(p1) = COPY $vgpr0_vgpr1

View File

@ -87,7 +87,7 @@ body: |
; CHECK: [[PTR:%[0-9]+]]:sgpr(p1) = COPY $sgpr0_sgpr1
; CHECK: [[LOAD0:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD [[PTR]](p1) :: (load 16 from %ir.global.not.uniform.v8i32, align 32, addrspace 1)
; CHECK: [[OFFSET16:%[0-9]+]]:vgpr(s64) = G_CONSTANT i64 16
; CHECK: [[GEP16:%[0-9]+]]:vgpr(p1) = G_GEP [[PTR]], [[OFFSET16]](s64)
; CHECK: [[GEP16:%[0-9]+]]:vgpr(p1) = G_PTR_ADD [[PTR]], [[OFFSET16]](s64)
; CHECK: [[LOAD16:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD [[GEP16]](p1) :: (load 16 from %ir.global.not.uniform.v8i32 + 16, align 32, addrspace 1)
; CHECK: [[LOAD:%[0-9]+]]:vgpr(<8 x s32>) = G_CONCAT_VECTORS [[LOAD0]](<4 x s32>), [[LOAD16]](<4 x s32>)
; CHECK: [[IDX0:%[0-9]+]]:vgpr(s32) = G_CONSTANT i32 0
@ -121,7 +121,7 @@ body: |
; CHECK: [[PTR:%[0-9]+]]:sgpr(p1) = COPY $sgpr0_sgpr1
; CHECK: [[LOAD0:%[0-9]+]]:vgpr(<2 x s64>) = G_LOAD [[PTR]](p1) :: (load 16 from %ir.global.not.uniform.v4i64, align 32, addrspace 1)
; CHECK: [[OFFSET16:%[0-9]+]]:vgpr(s64) = G_CONSTANT i64 16
; CHECK: [[GEP16:%[0-9]+]]:vgpr(p1) = G_GEP [[PTR]], [[OFFSET16]](s64)
; CHECK: [[GEP16:%[0-9]+]]:vgpr(p1) = G_PTR_ADD [[PTR]], [[OFFSET16]](s64)
; CHECK: [[LOAD16:%[0-9]+]]:vgpr(<2 x s64>) = G_LOAD [[GEP16]](p1) :: (load 16 from %ir.global.not.uniform.v4i64 + 16, align 32, addrspace 1)
; CHECK: [[LOAD:%[0-9]+]]:vgpr(<4 x s64>) = G_CONCAT_VECTORS [[LOAD0]](<2 x s64>), [[LOAD16]](<2 x s64>)
; CHECK: [[IDX0:%[0-9]+]]:vgpr(s32) = G_CONSTANT i32 0
@ -149,13 +149,13 @@ body: |
; CHECK: [[PTR:%[0-9]+]]:sgpr(p1) = COPY $sgpr0_sgpr1
; CHECK: [[LOAD0:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD [[PTR]](p1) :: (load 16 from %ir.global.not.uniform.v16i32, align 64, addrspace 1)
; CHECK: [[OFFSET16:%[0-9]+]]:vgpr(s64) = G_CONSTANT i64 16
; CHECK: [[GEP16:%[0-9]+]]:vgpr(p1) = G_GEP [[PTR]], [[OFFSET16]](s64)
; CHECK: [[GEP16:%[0-9]+]]:vgpr(p1) = G_PTR_ADD [[PTR]], [[OFFSET16]](s64)
; CHECK: [[LOAD16:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD [[GEP16]](p1) :: (load 16 from %ir.global.not.uniform.v16i32 + 16, align 64, addrspace 1)
; CHECK: [[OFFSET32:%[0-9]+]]:vgpr(s64) = G_CONSTANT i64 32
; CHECK: [[GEP32:%[0-9]+]]:vgpr(p1) = G_GEP [[PTR]], [[OFFSET32]](s64)
; CHECK: [[GEP32:%[0-9]+]]:vgpr(p1) = G_PTR_ADD [[PTR]], [[OFFSET32]](s64)
; CHECK: [[LOAD32:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD [[GEP32]](p1) :: (load 16 from %ir.global.not.uniform.v16i32 + 32, align 64, addrspace 1)
; CHECK: [[OFFSET48:%[0-9]+]]:vgpr(s64) = G_CONSTANT i64 48
; CHECK: [[GEP48:%[0-9]+]]:vgpr(p1) = G_GEP [[PTR]], [[OFFSET48]](s64)
; CHECK: [[GEP48:%[0-9]+]]:vgpr(p1) = G_PTR_ADD [[PTR]], [[OFFSET48]](s64)
; CHECK: [[LOAD48:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD [[GEP48]](p1) :: (load 16 from %ir.global.not.uniform.v16i32 + 48, align 64, addrspace 1)
; CHECK: [[LOAD:%[0-9]+]]:vgpr(<16 x s32>) = G_CONCAT_VECTORS [[LOAD0]](<4 x s32>), [[LOAD16]](<4 x s32>), [[LOAD32]](<4 x s32>), [[LOAD48]](<4 x s32>)
; CHECK: [[IDX0:%[0-9]+]]:vgpr(s32) = G_CONSTANT i32 0
@ -205,13 +205,13 @@ body: |
; CHECK: [[PTR:%[0-9]+]]:sgpr(p1) = COPY $sgpr0_sgpr1
; CHECK: [[LOAD0:%[0-9]+]]:vgpr(<2 x s64>) = G_LOAD [[PTR]](p1) :: (load 16 from %ir.global.not.uniform.v8i64, align 64, addrspace 1)
; CHECK: [[OFFSET16:%[0-9]+]]:vgpr(s64) = G_CONSTANT i64 16
; CHECK: [[GEP16:%[0-9]+]]:vgpr(p1) = G_GEP [[PTR]], [[OFFSET16]](s64)
; CHECK: [[GEP16:%[0-9]+]]:vgpr(p1) = G_PTR_ADD [[PTR]], [[OFFSET16]](s64)
; CHECK: [[LOAD16:%[0-9]+]]:vgpr(<2 x s64>) = G_LOAD [[GEP16]](p1) :: (load 16 from %ir.global.not.uniform.v8i64 + 16, align 64, addrspace 1)
; CHECK: [[OFFSET32:%[0-9]+]]:vgpr(s64) = G_CONSTANT i64 32
; CHECK: [[GEP32:%[0-9]+]]:vgpr(p1) = G_GEP [[PTR]], [[OFFSET32]](s64)
; CHECK: [[GEP32:%[0-9]+]]:vgpr(p1) = G_PTR_ADD [[PTR]], [[OFFSET32]](s64)
; CHECK: [[LOAD32:%[0-9]+]]:vgpr(<2 x s64>) = G_LOAD [[GEP32]](p1) :: (load 16 from %ir.global.not.uniform.v8i64 + 32, align 64, addrspace 1)
; CHECK: [[OFFSET48:%[0-9]+]]:vgpr(s64) = G_CONSTANT i64 48
; CHECK: [[GEP48:%[0-9]+]]:vgpr(p1) = G_GEP [[PTR]], [[OFFSET48]](s64)
; CHECK: [[GEP48:%[0-9]+]]:vgpr(p1) = G_PTR_ADD [[PTR]], [[OFFSET48]](s64)
; CHECK: [[LOAD48:%[0-9]+]]:vgpr(<2 x s64>) = G_LOAD [[GEP48]](p1) :: (load 16 from %ir.global.not.uniform.v8i64 + 48, align 64, addrspace 1)
; CHECK: [[LOAD:%[0-9]+]]:vgpr(<8 x s64>) = G_CONCAT_VECTORS [[LOAD0]](<2 x s64>), [[LOAD16]](<2 x s64>), [[LOAD32]](<2 x s64>), [[LOAD48]](<2 x s64>)
; CHECK: [[IDX0:%[0-9]+]]:vgpr(s32) = G_CONSTANT i32 0
@ -298,7 +298,7 @@ body: |
; CHECK: [[PTR:%[0-9]+]]:sgpr(p4) = COPY $sgpr0_sgpr1
; CHECK: [[LOAD0:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD [[PTR]](p4) :: (load 16 from %ir.constant.not.uniform.v8i32, align 32, addrspace 4)
; CHECK: [[OFFSET16:%[0-9]+]]:vgpr(s64) = G_CONSTANT i64 16
; CHECK: [[GEP16:%[0-9]+]]:vgpr(p4) = G_GEP [[PTR]], [[OFFSET16]](s64)
; CHECK: [[GEP16:%[0-9]+]]:vgpr(p4) = G_PTR_ADD [[PTR]], [[OFFSET16]](s64)
; CHECK: [[LOAD16:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD [[GEP16]](p4) :: (load 16 from %ir.constant.not.uniform.v8i32 + 16, align 32, addrspace 4)
; CHECK: [[LOAD:%[0-9]+]]:vgpr(<8 x s32>) = G_CONCAT_VECTORS [[LOAD0]](<4 x s32>), [[LOAD16]](<4 x s32>)
; CHECK: [[IDX0:%[0-9]+]]:vgpr(s32) = G_CONSTANT i32 0
@ -333,7 +333,7 @@ body: |
; CHECK: [[PTR:%[0-9]+]]:sgpr(p4) = COPY $sgpr0_sgpr1
; CHECK: [[LOAD0:%[0-9]+]]:vgpr(<2 x s64>) = G_LOAD [[PTR]](p4) :: (load 16 from %ir.constant.not.uniform.v4i64, align 32, addrspace 4)
; CHECK: [[OFFSET16:%[0-9]+]]:vgpr(s64) = G_CONSTANT i64 16
; CHECK: [[GEP16:%[0-9]+]]:vgpr(p4) = G_GEP [[PTR]], [[OFFSET16]](s64)
; CHECK: [[GEP16:%[0-9]+]]:vgpr(p4) = G_PTR_ADD [[PTR]], [[OFFSET16]](s64)
; CHECK: [[LOAD16:%[0-9]+]]:vgpr(<2 x s64>) = G_LOAD [[GEP16]](p4) :: (load 16 from %ir.constant.not.uniform.v4i64 + 16, align 32, addrspace 4)
; CHECK: [[LOAD:%[0-9]+]]:vgpr(<4 x s64>) = G_CONCAT_VECTORS [[LOAD0]](<2 x s64>), [[LOAD16]](<2 x s64>)
; CHECK: [[IDX0:%[0-9]+]]:vgpr(s32) = G_CONSTANT i32 0
@ -360,13 +360,13 @@ body: |
; CHECK: [[PTR:%[0-9]+]]:sgpr(p4) = COPY $sgpr0_sgpr1
; CHECK: [[LOAD0:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD [[PTR]](p4) :: (load 16 from %ir.constant.not.uniform.v16i32, align 64, addrspace 4)
; CHECK: [[OFFSET16:%[0-9]+]]:vgpr(s64) = G_CONSTANT i64 16
; CHECK: [[GEP16:%[0-9]+]]:vgpr(p4) = G_GEP [[PTR]], [[OFFSET16]](s64)
; CHECK: [[GEP16:%[0-9]+]]:vgpr(p4) = G_PTR_ADD [[PTR]], [[OFFSET16]](s64)
; CHECK: [[LOAD16:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD [[GEP16]](p4) :: (load 16 from %ir.constant.not.uniform.v16i32 + 16, align 64, addrspace 4)
; CHECK: [[OFFSET32:%[0-9]+]]:vgpr(s64) = G_CONSTANT i64 32
; CHECK: [[GEP32:%[0-9]+]]:vgpr(p4) = G_GEP [[PTR]], [[OFFSET32]](s64)
; CHECK: [[GEP32:%[0-9]+]]:vgpr(p4) = G_PTR_ADD [[PTR]], [[OFFSET32]](s64)
; CHECK: [[LOAD32:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD [[GEP32]](p4) :: (load 16 from %ir.constant.not.uniform.v16i32 + 32, align 64, addrspace 4)
; CHECK: [[OFFSET48:%[0-9]+]]:vgpr(s64) = G_CONSTANT i64 48
; CHECK: [[GEP48:%[0-9]+]]:vgpr(p4) = G_GEP [[PTR]], [[OFFSET48]](s64)
; CHECK: [[GEP48:%[0-9]+]]:vgpr(p4) = G_PTR_ADD [[PTR]], [[OFFSET48]](s64)
; CHECK: [[LOAD48:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD [[GEP48]](p4) :: (load 16 from %ir.constant.not.uniform.v16i32 + 48, align 64, addrspace 4)
; CHECK: [[LOAD:%[0-9]+]]:vgpr(<16 x s32>) = G_CONCAT_VECTORS [[LOAD0]](<4 x s32>), [[LOAD16]](<4 x s32>), [[LOAD32]](<4 x s32>), [[LOAD48]](<4 x s32>)
; CHECK: [[IDX0:%[0-9]+]]:vgpr(s32) = G_CONSTANT i32 0
@ -417,13 +417,13 @@ body: |
; CHECK: [[PTR:%[0-9]+]]:sgpr(p4) = COPY $sgpr0_sgpr1
; CHECK: [[LOAD0:%[0-9]+]]:vgpr(<2 x s64>) = G_LOAD [[PTR]](p4) :: (load 16 from %ir.constant.not.uniform.v8i64, align 64, addrspace 4)
; CHECK: [[OFFSET16:%[0-9]+]]:vgpr(s64) = G_CONSTANT i64 16
; CHECK: [[GEP16:%[0-9]+]]:vgpr(p4) = G_GEP [[PTR]], [[OFFSET16]](s64)
; CHECK: [[GEP16:%[0-9]+]]:vgpr(p4) = G_PTR_ADD [[PTR]], [[OFFSET16]](s64)
; CHECK: [[LOAD16:%[0-9]+]]:vgpr(<2 x s64>) = G_LOAD [[GEP16]](p4) :: (load 16 from %ir.constant.not.uniform.v8i64 + 16, align 64, addrspace 4)
; CHECK: [[OFFSET32:%[0-9]+]]:vgpr(s64) = G_CONSTANT i64 32
; CHECK: [[GEP32:%[0-9]+]]:vgpr(p4) = G_GEP [[PTR]], [[OFFSET32]](s64)
; CHECK: [[GEP32:%[0-9]+]]:vgpr(p4) = G_PTR_ADD [[PTR]], [[OFFSET32]](s64)
; CHECK: [[LOAD32:%[0-9]+]]:vgpr(<2 x s64>) = G_LOAD [[GEP32]](p4) :: (load 16 from %ir.constant.not.uniform.v8i64 + 32, align 64, addrspace 4)
; CHECK: [[OFFSET48:%[0-9]+]]:vgpr(s64) = G_CONSTANT i64 48
; CHECK: [[GEP48:%[0-9]+]]:vgpr(p4) = G_GEP [[PTR]], [[OFFSET48]](s64)
; CHECK: [[GEP48:%[0-9]+]]:vgpr(p4) = G_PTR_ADD [[PTR]], [[OFFSET48]](s64)
; CHECK: [[LOAD48:%[0-9]+]]:vgpr(<2 x s64>) = G_LOAD [[GEP48]](p4) :: (load 16 from %ir.constant.not.uniform.v8i64 + 48, align 64, addrspace 4)
; CHECK: [[LOAD:%[0-9]+]]:vgpr(<8 x s64>) = G_CONCAT_VECTORS [[LOAD0]](<2 x s64>), [[LOAD16]](<2 x s64>), [[LOAD32]](<2 x s64>), [[LOAD48]](<2 x s64>)
; CHECK: [[IDX0:%[0-9]+]]:vgpr(s32) = G_CONSTANT i32 0

View File

@ -13,10 +13,10 @@ body: |
; CHECK-LABEL: name: gep_p1_s_k
; CHECK: [[COPY:%[0-9]+]]:sgpr(p1) = COPY $sgpr0_sgpr1
; CHECK: [[C:%[0-9]+]]:sgpr(s64) = G_CONSTANT i64 1
; CHECK: [[GEP:%[0-9]+]]:sgpr(p1) = G_GEP [[COPY]], [[C]](s64)
; CHECK: [[GEP:%[0-9]+]]:sgpr(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
%0:_(p1) = COPY $sgpr0_sgpr1
%1:_(s64) = G_CONSTANT i64 1
%2:_(p1) = G_GEP %0, %1
%2:_(p1) = G_PTR_ADD %0, %1
...
---
@ -30,10 +30,10 @@ body: |
; CHECK-LABEL: name: gep_p1_s_s
; CHECK: [[COPY:%[0-9]+]]:sgpr(p1) = COPY $sgpr0_sgpr1
; CHECK: [[COPY1:%[0-9]+]]:sgpr(s64) = COPY $sgpr2_sgpr3
; CHECK: [[GEP:%[0-9]+]]:sgpr(p1) = G_GEP [[COPY]], [[COPY1]](s64)
; CHECK: [[GEP:%[0-9]+]]:sgpr(p1) = G_PTR_ADD [[COPY]], [[COPY1]](s64)
%0:_(p1) = COPY $sgpr0_sgpr1
%1:_(s64) = COPY $sgpr2_sgpr3
%2:_(p1) = G_GEP %0, %1
%2:_(p1) = G_PTR_ADD %0, %1
...
---
@ -48,10 +48,10 @@ body: |
; CHECK: [[COPY:%[0-9]+]]:vgpr(p1) = COPY $vgpr0_vgpr1
; CHECK: [[C:%[0-9]+]]:sgpr(s64) = G_CONSTANT i64 1
; CHECK: [[COPY1:%[0-9]+]]:vgpr(s64) = COPY [[C]](s64)
; CHECK: [[GEP:%[0-9]+]]:vgpr(p1) = G_GEP [[COPY]], [[COPY1]](s64)
; CHECK: [[GEP:%[0-9]+]]:vgpr(p1) = G_PTR_ADD [[COPY]], [[COPY1]](s64)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(s64) = G_CONSTANT i64 1
%2:_(p1) = G_GEP %0, %1
%2:_(p1) = G_PTR_ADD %0, %1
...
---
@ -66,10 +66,10 @@ body: |
; CHECK: [[COPY:%[0-9]+]]:vgpr(p1) = COPY $vgpr0_vgpr1
; CHECK: [[COPY1:%[0-9]+]]:sgpr(s64) = COPY $sgpr0_sgpr1
; CHECK: [[COPY2:%[0-9]+]]:vgpr(s64) = COPY [[COPY1]](s64)
; CHECK: [[GEP:%[0-9]+]]:vgpr(p1) = G_GEP [[COPY]], [[COPY2]](s64)
; CHECK: [[GEP:%[0-9]+]]:vgpr(p1) = G_PTR_ADD [[COPY]], [[COPY2]](s64)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(s64) = COPY $sgpr0_sgpr1
%2:_(p1) = G_GEP %0, %1
%2:_(p1) = G_PTR_ADD %0, %1
...
---
@ -83,8 +83,8 @@ body: |
; CHECK-LABEL: name: gep_p1_v_v
; CHECK: [[COPY:%[0-9]+]]:vgpr(p1) = COPY $vgpr0_vgpr1
; CHECK: [[COPY1:%[0-9]+]]:vgpr(s64) = COPY $vgpr2_vgpr3
; CHECK: [[GEP:%[0-9]+]]:vgpr(p1) = G_GEP [[COPY]], [[COPY1]](s64)
; CHECK: [[GEP:%[0-9]+]]:vgpr(p1) = G_PTR_ADD [[COPY]], [[COPY1]](s64)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(s64) = COPY $vgpr2_vgpr3
%2:_(p1) = G_GEP %0, %1
%2:_(p1) = G_PTR_ADD %0, %1
...

View File

@ -1045,7 +1045,7 @@ body: |
%1(s32) = COPY $r1
; CHECK: %[[OFF:[0-9]+]]:gpr = COPY $r1
%2(p0) = G_GEP %0, %1(s32)
%2(p0) = G_PTR_ADD %0, %1(s32)
; CHECK: %[[GEP:[0-9]+]]:gpr = ADDrr %[[PTR]], %[[OFF]], 14, $noreg, $noreg
$r0 = COPY %2(p0)

View File

@ -557,7 +557,7 @@ define void @test_load_store_struct({i32, i32} *%addr) {
; CHECK: [[ADDR1:%[0-9]+]]:_(p0) = COPY $r0
; CHECK-DAG: [[VAL1:%[0-9]+]]:_(s32) = G_LOAD [[ADDR1]](p0) :: (load 4 from %ir.addr)
; CHECK-DAG: [[OFFSET:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
; CHECK-DAG: [[ADDR2:%[0-9]+]]:_(p0) = G_GEP [[ADDR1]], [[OFFSET]](s32)
; CHECK-DAG: [[ADDR2:%[0-9]+]]:_(p0) = G_PTR_ADD [[ADDR1]], [[OFFSET]](s32)
; CHECK-DAG: [[VAL2:%[0-9]+]]:_(s32) = G_LOAD [[ADDR2]](p0) :: (load 4 from %ir.addr + 4)
; CHECK-DAG: G_STORE [[VAL1]](s32), [[ADDR1]](p0) :: (store 4 into %ir.addr)
; CHECK-DAG: [[ADDR3:%[0-9]+]]:_(p0) = COPY [[ADDR2]]

View File

@ -108,7 +108,7 @@ body: |
; CHECK: [[ADDR1:%[0-9]+]]:_(p0) = COPY $r0
; CHECK-NEXT: [[V1:%[0-9]+]]:_(s32) = G_LOAD [[ADDR1]](p0) :: (load 4, align 1)
; CHECK-NEXT: [[OFF:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
; CHECK-NEXT: [[ADDR2:%[0-9]+]]:_(p0) = G_GEP [[ADDR1]], [[OFF]]
; CHECK-NEXT: [[ADDR2:%[0-9]+]]:_(p0) = G_PTR_ADD [[ADDR1]], [[OFF]]
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY [[ADDR2]]
; CHECK-NEXT: [[V2:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 4, align 1)
; CHECK-NEXT: G_STORE [[V1]](s32), [[ADDR1]](p0) :: (store 4, align 1)
@ -145,7 +145,7 @@ body: |
; CHECK: [[ADDR1:%[0-9]+]]:_(p0) = COPY $r0
; CHECK-NEXT: [[V1:%[0-9]+]]:_(s32) = G_LOAD [[ADDR1]](p0) :: (load 4, align 1)
; CHECK-NEXT: [[OFF:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
; CHECK-NEXT: [[ADDR2:%[0-9]+]]:_(p0) = G_GEP [[ADDR1]], [[OFF]]
; CHECK-NEXT: [[ADDR2:%[0-9]+]]:_(p0) = G_PTR_ADD [[ADDR1]], [[OFF]]
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY [[ADDR2]]
; CHECK-NEXT: [[V2:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 4, align 1)
; CHECK-NEXT: G_STORE [[V1]](s32), [[ADDR1]](p0) :: (store 4, align 1)
@ -184,8 +184,8 @@ body: |
%0(p0) = COPY $r0
%1(s32) = COPY $r1
; CHECK: {{%[0-9]+}}:_(p0) = G_GEP {{%[0-9]+}}, {{%[0-9]+}}(s32)
%2(p0) = G_GEP %0, %1(s32)
; CHECK: {{%[0-9]+}}:_(p0) = G_PTR_ADD {{%[0-9]+}}, {{%[0-9]+}}(s32)
%2(p0) = G_PTR_ADD %0, %1(s32)
$r0 = COPY %2(p0)
BX_RET 14, $noreg, implicit $r0
@ -209,10 +209,10 @@ body: |
%0(p0) = COPY $r0
%1(s16) = G_LOAD %0(p0) :: (load 2)
; CHECK-NOT: G_GEP {{%[0-9]+}}, {{%[0-9]+}}(s16)
; CHECK: {{%[0-9]+}}:_(p0) = G_GEP {{%[0-9]+}}, {{%[0-9]+}}(s32)
; CHECK-NOT: G_GEP {{%[0-9]+}}, {{%[0-9]+}}(s16)
%2(p0) = G_GEP %0, %1(s16)
; CHECK-NOT: G_PTR_ADD {{%[0-9]+}}, {{%[0-9]+}}(s16)
; CHECK: {{%[0-9]+}}:_(p0) = G_PTR_ADD {{%[0-9]+}}, {{%[0-9]+}}(s32)
; CHECK-NOT: G_PTR_ADD {{%[0-9]+}}, {{%[0-9]+}}(s16)
%2(p0) = G_PTR_ADD %0, %1(s16)
$r0 = COPY %2(p0)
BX_RET 14, $noreg, implicit $r0

View File

@ -94,12 +94,12 @@ body: |
; SOFT-ABI-DAG: $r{{[2-3]}} = COPY [[X1]]
; SOFT-ABI: [[SP1:%[0-9]+]]:_(p0) = COPY $sp
; SOFT-ABI: [[OFF1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; SOFT-ABI: [[FI1:%[0-9]+]]:_(p0) = G_GEP [[SP1]], [[OFF1]](s32)
; SOFT-ABI: [[FI1:%[0-9]+]]:_(p0) = G_PTR_ADD [[SP1]], [[OFF1]](s32)
; FIXME: This ought to be align 8 but ARM's call lowering hardcodes it to 1
; SOFT-ABI: G_STORE [[Y0]](s32), [[FI1]](p0){{.*}}store 4 into stack, align 1)
; SOFT-ABI: [[OFF2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
; SOFT-ABI: [[FI2:%[0-9]+]]:_(p0) = G_GEP [[FI1]], [[OFF2]](s32)
; SOFT-ABI: [[FI2:%[0-9]+]]:_(p0) = G_PTR_ADD [[FI1]], [[OFF2]](s32)
; SOFT-ABI: G_STORE [[Y1]](s32), [[FI2]](p0){{.*}}store 4 into stack + 4, align 1)
; SOFT-ABI: BL &fma, {{.*}}, implicit $r0, implicit $r1, implicit $r2, implicit $r3, implicit-def $r0, implicit-def $r1
; SOFT-ABI-DAG: [[R0:%[0-9]+]]:_(s32) = COPY $r0

View File

@ -36,11 +36,11 @@ define arm_aapcscc i32* @test_call_simple_stack_params(i32 *%a, i32 %b) {
; CHECK-DAG: $r3 = COPY [[AVREG]]
; CHECK: [[SP1:%[0-9]+]]:_(p0) = COPY $sp
; CHECK: [[OFF1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK: [[FI1:%[0-9]+]]:_(p0) = G_GEP [[SP1]], [[OFF1]](s32)
; CHECK: [[FI1:%[0-9]+]]:_(p0) = G_PTR_ADD [[SP1]], [[OFF1]](s32)
; CHECK: G_STORE [[BVREG]](s32), [[FI1]](p0){{.*}}store 4
; CHECK: [[SP2:%[0-9]+]]:_(p0) = COPY $sp
; CHECK: [[OFF2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
; CHECK: [[FI2:%[0-9]+]]:_(p0) = G_GEP [[SP2]], [[OFF2]](s32)
; CHECK: [[FI2:%[0-9]+]]:_(p0) = G_PTR_ADD [[SP2]], [[OFF2]](s32)
; CHECK: G_STORE [[AVREG]](p0), [[FI2]](p0){{.*}}store 4
; ARM: BL @simple_stack_params_target, csr_aapcs, implicit-def $lr, implicit $sp, implicit $r0, implicit $r1, implicit $r2, implicit $r3, implicit-def $r0
; THUMB: tBL 14, $noreg, @simple_stack_params_target, csr_aapcs, implicit-def $lr, implicit $sp, implicit $r0, implicit $r1, implicit $r2, implicit $r3, implicit-def $r0
@ -75,27 +75,27 @@ define arm_aapcscc signext i16 @test_call_ext_params(i8 %a, i16 %b, i1 %c) {
; CHECK: $r3 = COPY [[ZEXTB]]
; CHECK: [[SP1:%[0-9]+]]:_(p0) = COPY $sp
; CHECK: [[OFF1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK: [[FI1:%[0-9]+]]:_(p0) = G_GEP [[SP1]], [[OFF1]](s32)
; CHECK: [[FI1:%[0-9]+]]:_(p0) = G_PTR_ADD [[SP1]], [[OFF1]](s32)
; CHECK: [[SEXTA2:%[0-9]+]]:_(s32) = G_SEXT [[AVREG]]
; CHECK: G_STORE [[SEXTA2]](s32), [[FI1]](p0){{.*}}store 4
; CHECK: [[SP2:%[0-9]+]]:_(p0) = COPY $sp
; CHECK: [[OFF2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
; CHECK: [[FI2:%[0-9]+]]:_(p0) = G_GEP [[SP2]], [[OFF2]](s32)
; CHECK: [[FI2:%[0-9]+]]:_(p0) = G_PTR_ADD [[SP2]], [[OFF2]](s32)
; CHECK: [[ZEXTA2:%[0-9]+]]:_(s32) = G_ZEXT [[AVREG]]
; CHECK: G_STORE [[ZEXTA2]](s32), [[FI2]](p0){{.*}}store 4
; CHECK: [[SP3:%[0-9]+]]:_(p0) = COPY $sp
; CHECK: [[OFF3:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CHECK: [[FI3:%[0-9]+]]:_(p0) = G_GEP [[SP3]], [[OFF3]](s32)
; CHECK: [[FI3:%[0-9]+]]:_(p0) = G_PTR_ADD [[SP3]], [[OFF3]](s32)
; CHECK: [[SEXTB2:%[0-9]+]]:_(s32) = G_SEXT [[BVREG]]
; CHECK: G_STORE [[SEXTB2]](s32), [[FI3]](p0){{.*}}store 4
; CHECK: [[SP4:%[0-9]+]]:_(p0) = COPY $sp
; CHECK: [[OFF4:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
; CHECK: [[FI4:%[0-9]+]]:_(p0) = G_GEP [[SP4]], [[OFF4]](s32)
; CHECK: [[FI4:%[0-9]+]]:_(p0) = G_PTR_ADD [[SP4]], [[OFF4]](s32)
; CHECK: [[ZEXTB2:%[0-9]+]]:_(s32) = G_ZEXT [[BVREG]]
; CHECK: G_STORE [[ZEXTB2]](s32), [[FI4]](p0){{.*}}store 4
; CHECK: [[SP5:%[0-9]+]]:_(p0) = COPY $sp
; CHECK: [[OFF5:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CHECK: [[FI5:%[0-9]+]]:_(p0) = G_GEP [[SP5]], [[OFF5]](s32)
; CHECK: [[FI5:%[0-9]+]]:_(p0) = G_PTR_ADD [[SP5]], [[OFF5]](s32)
; CHECK: [[ZEXTC:%[0-9]+]]:_(s32) = G_ZEXT [[CVREG]]
; CHECK: G_STORE [[ZEXTC]](s32), [[FI5]](p0){{.*}}store 4
; ARM: BL @ext_target, csr_aapcs, implicit-def $lr, implicit $sp, implicit $r0, implicit $r1, implicit $r2, implicit $r3, implicit-def $r0
@ -151,11 +151,11 @@ define arm_aapcscc double @test_call_aapcs_fp_params(double %a, float %b) {
; BIG-DAG: $r3 = COPY [[A1]]
; CHECK: [[SP1:%[0-9]+]]:_(p0) = COPY $sp
; CHECK: [[OFF1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK: [[FI1:%[0-9]+]]:_(p0) = G_GEP [[SP1]], [[OFF1]](s32)
; CHECK: [[FI1:%[0-9]+]]:_(p0) = G_PTR_ADD [[SP1]], [[OFF1]](s32)
; CHECK: G_STORE [[BVREG]](s32), [[FI1]](p0){{.*}}store 4
; CHECK: [[SP2:%[0-9]+]]:_(p0) = COPY $sp
; CHECK: [[OFF2:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CHECK: [[FI2:%[0-9]+]]:_(p0) = G_GEP [[SP2]], [[OFF2]](s32)
; CHECK: [[FI2:%[0-9]+]]:_(p0) = G_PTR_ADD [[SP2]], [[OFF2]](s32)
; CHECK: G_STORE [[AVREG]](s64), [[FI2]](p0){{.*}}store 8
; ARM: BL @aapcscc_fp_target, csr_aapcs, implicit-def $lr, implicit $sp, implicit $r0, implicit $r2, implicit $r3, implicit-def $r0, implicit-def $r1
; THUMB: tBL 14, $noreg, @aapcscc_fp_target, csr_aapcs, implicit-def $lr, implicit $sp, implicit $r0, implicit $r2, implicit $r3, implicit-def $r0, implicit-def $r1
@ -273,13 +273,13 @@ define arm_aapcscc void @test_large_int_arrays([20 x i32] %arr) {
; CHECK: $r3 = COPY [[R3]]
; CHECK: [[SP:%[0-9]+]]:_(p0) = COPY $sp
; CHECK: [[OFF_FIRST_ELEMENT:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK: [[FIRST_STACK_ARG_ADDR:%[0-9]+]]:_(p0) = G_GEP [[SP]], [[OFF_FIRST_ELEMENT]](s32)
; CHECK: [[FIRST_STACK_ARG_ADDR:%[0-9]+]]:_(p0) = G_PTR_ADD [[SP]], [[OFF_FIRST_ELEMENT]](s32)
; CHECK: G_STORE [[FIRST_STACK_ELEMENT]](s32), [[FIRST_STACK_ARG_ADDR]]{{.*}}store 4
; Match the second-to-last offset, so we can get the correct SP for the last element
; CHECK: G_CONSTANT i32 56
; CHECK: [[SP:%[0-9]+]]:_(p0) = COPY $sp
; CHECK: [[OFF_LAST_ELEMENT:%[0-9]+]]:_(s32) = G_CONSTANT i32 60
; CHECK: [[LAST_STACK_ARG_ADDR:%[0-9]+]]:_(p0) = G_GEP [[SP]], [[OFF_LAST_ELEMENT]](s32)
; CHECK: [[LAST_STACK_ARG_ADDR:%[0-9]+]]:_(p0) = G_PTR_ADD [[SP]], [[OFF_LAST_ELEMENT]](s32)
; CHECK: G_STORE [[LAST_STACK_ELEMENT]](s32), [[LAST_STACK_ARG_ADDR]]{{.*}}store 4
; ARM: BL @large_int_arrays_target, csr_aapcs, implicit-def $lr, implicit $sp, implicit $r0, implicit $r1, implicit $r2, implicit $r3
; THUMB: tBL 14, $noreg, @large_int_arrays_target, csr_aapcs, implicit-def $lr, implicit $sp, implicit $r0, implicit $r1, implicit $r2, implicit $r3
@ -321,7 +321,7 @@ define arm_aapcscc [2 x float] @test_fp_arrays_aapcs([3 x double] %arr) {
; BIG: $r3 = COPY [[ARR1_0]](s32)
; CHECK: [[SP:%[0-9]+]]:_(p0) = COPY $sp
; CHECK: [[ARR2_OFFSET:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK: [[ARR2_ADDR:%[0-9]+]]:_(p0) = G_GEP [[SP]], [[ARR2_OFFSET]](s32)
; CHECK: [[ARR2_ADDR:%[0-9]+]]:_(p0) = G_PTR_ADD [[SP]], [[ARR2_OFFSET]](s32)
; CHECK: G_STORE [[ARR2]](s64), [[ARR2_ADDR]](p0){{.*}}store 8
; ARM: BL @fp_arrays_aapcs_target, csr_aapcs, implicit-def $lr, implicit $sp, implicit $r0, implicit $r1, implicit $r2, implicit $r3, implicit-def $r0, implicit-def $r1
; THUMB: tBL 14, $noreg, @fp_arrays_aapcs_target, csr_aapcs, implicit-def $lr, implicit $sp, implicit $r0, implicit $r1, implicit $r2, implicit $r3, implicit-def $r0, implicit-def $r1
@ -370,19 +370,19 @@ define arm_aapcs_vfpcc [4 x float] @test_fp_arrays_aapcs_vfp([3 x double] %x, [3
; CHECK: $s8 = COPY [[Y2]](s32)
; CHECK: [[SP:%[0-9]+]]:_(p0) = COPY $sp
; CHECK: [[Z0_OFFSET:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK: [[Z0_ADDR:%[0-9]+]]:_(p0) = G_GEP [[SP]], [[Z0_OFFSET]](s32)
; CHECK: [[Z0_ADDR:%[0-9]+]]:_(p0) = G_PTR_ADD [[SP]], [[Z0_OFFSET]](s32)
; CHECK: G_STORE [[Z0]](s64), [[Z0_ADDR]](p0){{.*}}store 8
; CHECK: [[SP:%[0-9]+]]:_(p0) = COPY $sp
; CHECK: [[Z1_OFFSET:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CHECK: [[Z1_ADDR:%[0-9]+]]:_(p0) = G_GEP [[SP]], [[Z1_OFFSET]](s32)
; CHECK: [[Z1_ADDR:%[0-9]+]]:_(p0) = G_PTR_ADD [[SP]], [[Z1_OFFSET]](s32)
; CHECK: G_STORE [[Z1]](s64), [[Z1_ADDR]](p0){{.*}}store 8
; CHECK: [[SP:%[0-9]+]]:_(p0) = COPY $sp
; CHECK: [[Z2_OFFSET:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CHECK: [[Z2_ADDR:%[0-9]+]]:_(p0) = G_GEP [[SP]], [[Z2_OFFSET]](s32)
; CHECK: [[Z2_ADDR:%[0-9]+]]:_(p0) = G_PTR_ADD [[SP]], [[Z2_OFFSET]](s32)
; CHECK: G_STORE [[Z2]](s64), [[Z2_ADDR]](p0){{.*}}store 8
; CHECK: [[SP:%[0-9]+]]:_(p0) = COPY $sp
; CHECK: [[Z3_OFFSET:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
; CHECK: [[Z3_ADDR:%[0-9]+]]:_(p0) = G_GEP [[SP]], [[Z3_OFFSET]](s32)
; CHECK: [[Z3_ADDR:%[0-9]+]]:_(p0) = G_PTR_ADD [[SP]], [[Z3_OFFSET]](s32)
; CHECK: G_STORE [[Z3]](s64), [[Z3_ADDR]](p0){{.*}}store 8
; ARM: BL @fp_arrays_aapcs_vfp_target, csr_aapcs, implicit-def $lr, implicit $sp, implicit $d0, implicit $d1, implicit $d2, implicit $s6, implicit $s7, implicit $s8, implicit-def $s0, implicit-def $s1, implicit-def $s2, implicit-def $s3
; THUMB: tBL 14, $noreg, @fp_arrays_aapcs_vfp_target, csr_aapcs, implicit-def $lr, implicit $sp, implicit $d0, implicit $d1, implicit $d2, implicit $s6, implicit $s7, implicit $s8, implicit-def $s0, implicit-def $s1, implicit-def $s2, implicit-def $s3
@ -427,13 +427,13 @@ define arm_aapcscc [2 x i32*] @test_tough_arrays([6 x [4 x i32]] %arr) {
; CHECK: $r3 = COPY [[R3]]
; CHECK: [[SP:%[0-9]+]]:_(p0) = COPY $sp
; CHECK: [[OFF_FIRST_ELEMENT:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK: [[FIRST_STACK_ARG_ADDR:%[0-9]+]]:_(p0) = G_GEP [[SP]], [[OFF_FIRST_ELEMENT]](s32)
; CHECK: [[FIRST_STACK_ARG_ADDR:%[0-9]+]]:_(p0) = G_PTR_ADD [[SP]], [[OFF_FIRST_ELEMENT]](s32)
; CHECK: G_STORE [[FIRST_STACK_ELEMENT]](s32), [[FIRST_STACK_ARG_ADDR]]{{.*}}store 4
; Match the second-to-last offset, so we can get the correct SP for the last element
; CHECK: G_CONSTANT i32 72
; CHECK: [[SP:%[0-9]+]]:_(p0) = COPY $sp
; CHECK: [[OFF_LAST_ELEMENT:%[0-9]+]]:_(s32) = G_CONSTANT i32 76
; CHECK: [[LAST_STACK_ARG_ADDR:%[0-9]+]]:_(p0) = G_GEP [[SP]], [[OFF_LAST_ELEMENT]](s32)
; CHECK: [[LAST_STACK_ARG_ADDR:%[0-9]+]]:_(p0) = G_PTR_ADD [[SP]], [[OFF_LAST_ELEMENT]](s32)
; CHECK: G_STORE [[LAST_STACK_ELEMENT]](s32), [[LAST_STACK_ARG_ADDR]]{{.*}}store 4
; ARM: BL @tough_arrays_target, csr_aapcs, implicit-def $lr, implicit $sp, implicit $r0, implicit $r1, implicit $r2, implicit $r3, implicit-def $r0, implicit-def $r1
; THUMB: tBL 14, $noreg, @tough_arrays_target, csr_aapcs, implicit-def $lr, implicit $sp, implicit $r0, implicit $r1, implicit $r2, implicit $r3, implicit-def $r0, implicit-def $r1

View File

@ -493,7 +493,7 @@ body: |
%2(p0) = COPY $sp
%3(s32) = G_CONSTANT i32 8
%4(p0) = G_GEP %2, %3(s32)
%4(p0) = G_PTR_ADD %2, %3(s32)
G_STORE %1(s32), %4(p0) :: (store 4)
BX_RET 14, $noreg
@ -520,7 +520,7 @@ body: |
%0(p0) = COPY $r0
%1(s32) = COPY $r1
%2(p0) = G_GEP %0, %1(s32)
%2(p0) = G_PTR_ADD %0, %1(s32)
$r0 = COPY %2(p0)
BX_RET 14, $noreg, implicit $r0
...

View File

@ -14,11 +14,11 @@ define arm_aapcscc i32 @test_call_to_varargs_with_ints(i32 *%a, i32 %b) {
; CHECK-DAG: $r3 = COPY [[AVREG]]
; CHECK: [[SP1:%[0-9]+]]:_(p0) = COPY $sp
; CHECK: [[OFF1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK: [[FI1:%[0-9]+]]:_(p0) = G_GEP [[SP1]], [[OFF1]](s32)
; CHECK: [[FI1:%[0-9]+]]:_(p0) = G_PTR_ADD [[SP1]], [[OFF1]](s32)
; CHECK: G_STORE [[BVREG]](s32), [[FI1]](p0){{.*}}store 4
; CHECK: [[SP2:%[0-9]+]]:_(p0) = COPY $sp
; CHECK: [[OFF2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
; CHECK: [[FI2:%[0-9]+]]:_(p0) = G_GEP [[SP2]], [[OFF2]](s32)
; CHECK: [[FI2:%[0-9]+]]:_(p0) = G_PTR_ADD [[SP2]], [[OFF2]](s32)
; CHECK: G_STORE [[AVREG]](p0), [[FI2]](p0){{.*}}store 4
; ARM: BL @int_varargs_target, csr_aapcs, implicit-def $lr, implicit $sp, implicit $r0, implicit $r1, implicit $r2, implicit $r3, implicit-def $r0
; THUMB: tBL 14, $noreg, @int_varargs_target, csr_aapcs, implicit-def $lr, implicit $sp, implicit $r0, implicit $r1, implicit $r2, implicit $r3, implicit-def $r0
@ -45,7 +45,7 @@ define arm_aapcs_vfpcc float @test_call_to_varargs_with_floats(float %a, double
; CHECK-DAG: $r3 = COPY [[B2]]
; CHECK: [[SP1:%[0-9]+]]:_(p0) = COPY $sp
; CHECK: [[OFF1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK: [[FI1:%[0-9]+]]:_(p0) = G_GEP [[SP1]], [[OFF1]](s32)
; CHECK: [[FI1:%[0-9]+]]:_(p0) = G_PTR_ADD [[SP1]], [[OFF1]](s32)
; CHECK: G_STORE [[BVREG]](s64), [[FI1]](p0){{.*}}store 8
; ARM: BL @float_varargs_target, csr_aapcs, implicit-def $lr, implicit $sp, implicit $r0, implicit $r2, implicit $r3, implicit-def $r0
; THUMB: tBL 14, $noreg, @float_varargs_target, csr_aapcs, implicit-def $lr, implicit $sp, implicit $r0, implicit $r2, implicit $r3, implicit-def $r0
@ -71,7 +71,7 @@ define arm_aapcs_vfpcc float @test_indirect_call_to_varargs(float (float, double
; CHECK-DAG: $r3 = COPY [[B2]]
; CHECK: [[SP1:%[0-9]+]]:_(p0) = COPY $sp
; CHECK: [[OFF1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK: [[FI1:%[0-9]+]]:_(p0) = G_GEP [[SP1]], [[OFF1]](s32)
; CHECK: [[FI1:%[0-9]+]]:_(p0) = G_PTR_ADD [[SP1]], [[OFF1]](s32)
; CHECK: G_STORE [[BVREG]](s64), [[FI1]](p0){{.*}}store 8
; ARM: BLX [[FPTRVREG]](p0), csr_aapcs, implicit-def $lr, implicit $sp, implicit $r0, implicit $r2, implicit $r3, implicit-def $r0
; THUMB: tBLXr 14, $noreg, [[FPTRVREG]](p0), csr_aapcs, implicit-def $lr, implicit $sp, implicit $r0, implicit $r2, implicit $r3, implicit-def $r0

View File

@ -135,7 +135,7 @@ body: |
%1(s32) = COPY $r1
; CHECK: [[OFF:%[0-9]+]]:rgpr = COPY $r1
%2(p0) = G_GEP %0, %1(s32)
%2(p0) = G_PTR_ADD %0, %1(s32)
; CHECK: [[GEP:%[0-9]+]]:gprnopc = t2ADDrr [[PTR]], [[OFF]], 14, $noreg, $noreg
$r0 = COPY %2(p0)

View File

@ -30,7 +30,7 @@ body: |
; MIPS32: RetRA implicit $v0
%0:gprb(p0) = COPY $a0
%1:gprb(s32) = G_CONSTANT i32 32767
%2:gprb(p0) = G_GEP %0, %1(s32)
%2:gprb(p0) = G_PTR_ADD %0, %1(s32)
%4:gprb(s32) = G_ZEXTLOAD %2(p0) :: (load 1)
$v0 = COPY %4(s32)
RetRA implicit $v0
@ -55,7 +55,7 @@ body: |
%2:gprb(s32) = COPY $a0
%1:gprb(p0) = COPY $a1
%3:gprb(s32) = G_CONSTANT i32 -32768
%4:gprb(p0) = G_GEP %1, %3(s32)
%4:gprb(p0) = G_PTR_ADD %1, %3(s32)
%5:gprb(s32) = COPY %2(s32)
G_STORE %5(s32), %4(p0) :: (store 1)
RetRA
@ -82,7 +82,7 @@ body: |
%2:gprb(s32) = COPY $a0
%1:gprb(p0) = COPY $a1
%3:gprb(s32) = G_CONSTANT i32 32768
%4:gprb(p0) = G_GEP %1, %3(s32)
%4:gprb(p0) = G_PTR_ADD %1, %3(s32)
%5:gprb(s32) = COPY %2(s32)
G_STORE %5(s32), %4(p0) :: (store 1)
RetRA
@ -109,7 +109,7 @@ body: |
; MIPS32: RetRA implicit $v0
%0:gprb(p0) = COPY $a0
%1:gprb(s32) = G_CONSTANT i32 -32769
%2:gprb(p0) = G_GEP %0, %1(s32)
%2:gprb(p0) = G_PTR_ADD %0, %1(s32)
%4:gprb(s32) = G_SEXTLOAD %2(p0) :: (load 1)
$v0 = COPY %4(s32)
RetRA implicit $v0
@ -133,7 +133,7 @@ body: |
; MIPS32: RetRA implicit $f0
%0:gprb(p0) = COPY $a0
%1:gprb(s32) = G_CONSTANT i32 40
%2:gprb(p0) = G_GEP %0, %1(s32)
%2:gprb(p0) = G_PTR_ADD %0, %1(s32)
%3:fprb(s32) = G_LOAD %2(p0) :: (load 4)
$f0 = COPY %3(s32)
RetRA implicit $f0
@ -158,7 +158,7 @@ body: |
%0:fprb(s64) = COPY $d6
%1:gprb(p0) = COPY $a2
%2:gprb(s32) = G_CONSTANT i32 -80
%3:gprb(p0) = G_GEP %1, %2(s32)
%3:gprb(p0) = G_PTR_ADD %1, %2(s32)
G_STORE %0(s64), %3(p0) :: (store 8)
RetRA
@ -181,7 +181,7 @@ body: |
; MIPS32: RetRA implicit $v0
%0:gprb(p0) = COPY $a0
%1:gprb(s32) = G_CONSTANT i32 -20
%2:gprb(p0) = G_GEP %0, %1(s32)
%2:gprb(p0) = G_PTR_ADD %0, %1(s32)
%4:gprb(s32) = G_LOAD %2(p0) :: (load 2)
$v0 = COPY %4(s32)
RetRA implicit $v0
@ -206,7 +206,7 @@ body: |
%0:gprb(s32) = COPY $a0
%1:gprb(p0) = COPY $a1
%2:gprb(s32) = G_CONSTANT i32 40
%3:gprb(p0) = G_GEP %1, %2(s32)
%3:gprb(p0) = G_PTR_ADD %1, %2(s32)
G_STORE %0(s32), %3(p0) :: (store 4)
RetRA

View File

@ -51,7 +51,7 @@ body: |
$a3 = COPY %3(s32)
%7:gprb(p0) = COPY $sp
%8:gprb(s32) = G_CONSTANT i32 16
%9:gprb(p0) = G_GEP %7, %8(s32)
%9:gprb(p0) = G_PTR_ADD %7, %8(s32)
G_STORE %4(s32), %9(p0) :: (store 4 into stack + 16, align 4)
JAL @f, csr_o32, implicit-def $ra, implicit-def $sp, implicit $a0, implicit $a1, implicit $a2, implicit $a3, implicit-def $v0
%6:gprb(s32) = COPY $v0

View File

@ -112,7 +112,7 @@ body: |
SW %19, %9(p0), 0 :: (store 4)
%11:gprb(p0) = G_LOAD %9(p0) :: (load 4 from %ir.aq)
%12:gprb(s32) = G_CONSTANT i32 4
%13:gprb(p0) = G_GEP %11, %12(s32)
%13:gprb(p0) = G_PTR_ADD %11, %12(s32)
G_STORE %13(p0), %9(p0) :: (store 4 into %ir.aq)
%14:gprb(p0) = G_LOAD %11(p0) :: (load 4 from %ir.2)
G_STORE %14(p0), %10(p0) :: (store 4 into %ir.s)

Some files were not shown because too many files have changed in this diff Show More