mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2025-01-10 13:51:37 +00:00
AMDGPU: Fix emitting invalid workitem intrinsics for HSA
The AMDGPUPromoteAlloca pass was emitting the read.local.size calls, which with HSA was incorrectly selected to reading from the offset mesa uses off of the kernarg pointer. Error on intrinsics which aren't supported by HSA, and start emitting the correct IR to read the workgroup size out of the dispatch pointer. Also initialize the pass so it can be tested with opt, and start moving towards not depending on the subtarget as an argument. Start emitting errors for the intrinsics not handled with HSA. llvm-svn: 259297
This commit is contained in:
parent
bcaaea3448
commit
2699008644
@ -70,7 +70,10 @@ void initializeSILoadStoreOptimizerPass(PassRegistry &);
|
||||
extern char &SILoadStoreOptimizerID;
|
||||
|
||||
// Passes common to R600 and SI
|
||||
FunctionPass *createAMDGPUPromoteAlloca(const AMDGPUSubtarget &ST);
|
||||
FunctionPass *createAMDGPUPromoteAlloca(const TargetMachine *TM = nullptr);
|
||||
void initializeAMDGPUPromoteAllocaPass(PassRegistry&);
|
||||
extern char &AMDGPUPromoteAllocaID;
|
||||
|
||||
Pass *createAMDGPUStructurizeCFGPass();
|
||||
FunctionPass *createAMDGPUISelDag(TargetMachine &tm);
|
||||
ModulePass *createAMDGPUAlwaysInlinePass();
|
||||
|
@ -17,6 +17,7 @@
|
||||
#include "llvm/Analysis/ValueTracking.h"
|
||||
#include "llvm/IR/IRBuilder.h"
|
||||
#include "llvm/IR/InstVisitor.h"
|
||||
#include "llvm/IR/MDBuilder.h"
|
||||
#include "llvm/Support/Debug.h"
|
||||
#include "llvm/Support/raw_ostream.h"
|
||||
|
||||
@ -26,20 +27,42 @@ using namespace llvm;
|
||||
|
||||
namespace {
|
||||
|
||||
// FIXME: This can create globals so should be a module pass.
|
||||
class AMDGPUPromoteAlloca : public FunctionPass,
|
||||
public InstVisitor<AMDGPUPromoteAlloca> {
|
||||
|
||||
static char ID;
|
||||
public InstVisitor<AMDGPUPromoteAlloca> {
|
||||
private:
|
||||
const TargetMachine *TM;
|
||||
Module *Mod;
|
||||
const AMDGPUSubtarget &ST;
|
||||
MDNode *MaxWorkGroupSizeRange;
|
||||
|
||||
// FIXME: This should be per-kernel.
|
||||
int LocalMemAvailable;
|
||||
|
||||
bool IsAMDGCN;
|
||||
bool IsAMDHSA;
|
||||
|
||||
std::pair<Value *, Value *> getLocalSizeYZ(IRBuilder<> &Builder);
|
||||
Value *getWorkitemID(IRBuilder<> &Builder, unsigned N);
|
||||
|
||||
public:
|
||||
AMDGPUPromoteAlloca(const AMDGPUSubtarget &st) : FunctionPass(ID), ST(st),
|
||||
LocalMemAvailable(0) { }
|
||||
static char ID;
|
||||
|
||||
AMDGPUPromoteAlloca(const TargetMachine *TM_ = nullptr) :
|
||||
FunctionPass(ID),
|
||||
TM(TM_),
|
||||
Mod(nullptr),
|
||||
MaxWorkGroupSizeRange(nullptr),
|
||||
LocalMemAvailable(0),
|
||||
IsAMDGCN(false),
|
||||
IsAMDHSA(false) { }
|
||||
|
||||
bool doInitialization(Module &M) override;
|
||||
bool runOnFunction(Function &F) override;
|
||||
const char *getPassName() const override { return "AMDGPU Promote Alloca"; }
|
||||
|
||||
const char *getPassName() const override {
|
||||
return "AMDGPU Promote Alloca";
|
||||
}
|
||||
|
||||
void visitAlloca(AllocaInst &I);
|
||||
};
|
||||
|
||||
@ -47,15 +70,40 @@ public:
|
||||
|
||||
char AMDGPUPromoteAlloca::ID = 0;
|
||||
|
||||
INITIALIZE_TM_PASS(AMDGPUPromoteAlloca, DEBUG_TYPE,
|
||||
"AMDGPU promote alloca to vector or LDS", false, false)
|
||||
|
||||
char &llvm::AMDGPUPromoteAllocaID = AMDGPUPromoteAlloca::ID;
|
||||
|
||||
|
||||
bool AMDGPUPromoteAlloca::doInitialization(Module &M) {
|
||||
if (!TM)
|
||||
return false;
|
||||
|
||||
Mod = &M;
|
||||
|
||||
// The maximum workitem id.
|
||||
//
|
||||
// FIXME: Should get as subtarget property. Usually runtime enforced max is
|
||||
// 256.
|
||||
MDBuilder MDB(Mod->getContext());
|
||||
MaxWorkGroupSizeRange = MDB.createRange(APInt(32, 0), APInt(32, 2048));
|
||||
|
||||
const Triple &TT = TM->getTargetTriple();
|
||||
|
||||
IsAMDGCN = TT.getArch() == Triple::amdgcn;
|
||||
IsAMDHSA = TT.getOS() == Triple::AMDHSA;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
bool AMDGPUPromoteAlloca::runOnFunction(Function &F) {
|
||||
if (!TM)
|
||||
return false;
|
||||
|
||||
const AMDGPUSubtarget &ST = TM->getSubtarget<AMDGPUSubtarget>(F);
|
||||
|
||||
FunctionType *FTy = F.getFunctionType();
|
||||
|
||||
LocalMemAvailable = ST.getLocalMemorySize();
|
||||
|
||||
|
||||
@ -100,6 +148,119 @@ bool AMDGPUPromoteAlloca::runOnFunction(Function &F) {
|
||||
return false;
|
||||
}
|
||||
|
||||
std::pair<Value *, Value *>
|
||||
AMDGPUPromoteAlloca::getLocalSizeYZ(IRBuilder<> &Builder) {
|
||||
if (!IsAMDHSA) {
|
||||
Function *LocalSizeYFn
|
||||
= Intrinsic::getDeclaration(Mod, Intrinsic::r600_read_local_size_y);
|
||||
Function *LocalSizeZFn
|
||||
= Intrinsic::getDeclaration(Mod, Intrinsic::r600_read_local_size_z);
|
||||
|
||||
CallInst *LocalSizeY = Builder.CreateCall(LocalSizeYFn, {});
|
||||
CallInst *LocalSizeZ = Builder.CreateCall(LocalSizeZFn, {});
|
||||
|
||||
LocalSizeY->setMetadata(LLVMContext::MD_range, MaxWorkGroupSizeRange);
|
||||
LocalSizeZ->setMetadata(LLVMContext::MD_range, MaxWorkGroupSizeRange);
|
||||
|
||||
return std::make_pair(LocalSizeY, LocalSizeZ);
|
||||
}
|
||||
|
||||
// We must read the size out of the dispatch pointer.
|
||||
assert(IsAMDGCN);
|
||||
|
||||
// We are indexing into this struct, and want to extract the workgroup_size_*
|
||||
// fields.
|
||||
//
|
||||
// typedef struct hsa_kernel_dispatch_packet_s {
|
||||
// uint16_t header;
|
||||
// uint16_t setup;
|
||||
// uint16_t workgroup_size_x ;
|
||||
// uint16_t workgroup_size_y;
|
||||
// uint16_t workgroup_size_z;
|
||||
// uint16_t reserved0;
|
||||
// uint32_t grid_size_x ;
|
||||
// uint32_t grid_size_y ;
|
||||
// uint32_t grid_size_z;
|
||||
//
|
||||
// uint32_t private_segment_size;
|
||||
// uint32_t group_segment_size;
|
||||
// uint64_t kernel_object;
|
||||
//
|
||||
// #ifdef HSA_LARGE_MODEL
|
||||
// void *kernarg_address;
|
||||
// #elif defined HSA_LITTLE_ENDIAN
|
||||
// void *kernarg_address;
|
||||
// uint32_t reserved1;
|
||||
// #else
|
||||
// uint32_t reserved1;
|
||||
// void *kernarg_address;
|
||||
// #endif
|
||||
// uint64_t reserved2;
|
||||
// hsa_signal_t completion_signal; // uint64_t wrapper
|
||||
// } hsa_kernel_dispatch_packet_t
|
||||
//
|
||||
Function *DispatchPtrFn
|
||||
= Intrinsic::getDeclaration(Mod, Intrinsic::amdgcn_dispatch_ptr);
|
||||
|
||||
CallInst *DispatchPtr = Builder.CreateCall(DispatchPtrFn, {});
|
||||
DispatchPtr->addAttribute(AttributeSet::ReturnIndex, Attribute::NoAlias);
|
||||
DispatchPtr->addAttribute(AttributeSet::ReturnIndex, Attribute::NonNull);
|
||||
|
||||
// Size of the dispatch packet struct.
|
||||
DispatchPtr->addDereferenceableAttr(AttributeSet::ReturnIndex, 64);
|
||||
|
||||
Type *I32Ty = Type::getInt32Ty(Mod->getContext());
|
||||
Value *CastDispatchPtr = Builder.CreateBitCast(
|
||||
DispatchPtr, PointerType::get(I32Ty, AMDGPUAS::CONSTANT_ADDRESS));
|
||||
|
||||
// We could do a single 64-bit load here, but it's likely that the basic
|
||||
// 32-bit and extract sequence is already present, and it is probably easier
|
||||
// to CSE this. The loads should be mergable later anyway.
|
||||
Value *GEPXY = Builder.CreateConstInBoundsGEP1_64(CastDispatchPtr, 1);
|
||||
LoadInst *LoadXY = Builder.CreateAlignedLoad(GEPXY, 4);
|
||||
|
||||
Value *GEPZU = Builder.CreateConstInBoundsGEP1_64(CastDispatchPtr, 2);
|
||||
LoadInst *LoadZU = Builder.CreateAlignedLoad(GEPZU, 4);
|
||||
|
||||
MDNode *MD = llvm::MDNode::get(Mod->getContext(), None);
|
||||
LoadXY->setMetadata(LLVMContext::MD_invariant_load, MD);
|
||||
LoadZU->setMetadata(LLVMContext::MD_invariant_load, MD);
|
||||
LoadZU->setMetadata(LLVMContext::MD_range, MaxWorkGroupSizeRange);
|
||||
|
||||
// Extract y component. Upper half of LoadZU should be zero already.
|
||||
Value *Y = Builder.CreateLShr(LoadXY, 16);
|
||||
|
||||
return std::make_pair(Y, LoadZU);
|
||||
}
|
||||
|
||||
Value *AMDGPUPromoteAlloca::getWorkitemID(IRBuilder<> &Builder, unsigned N) {
|
||||
Intrinsic::ID IntrID = Intrinsic::ID::not_intrinsic;
|
||||
|
||||
switch (N) {
|
||||
case 0:
|
||||
IntrID = IsAMDGCN ? Intrinsic::amdgcn_workitem_id_x
|
||||
: Intrinsic::r600_read_tidig_x;
|
||||
break;
|
||||
case 1:
|
||||
IntrID = IsAMDGCN ? Intrinsic::amdgcn_workitem_id_y
|
||||
: Intrinsic::r600_read_tidig_y;
|
||||
break;
|
||||
|
||||
case 2:
|
||||
IntrID = IsAMDGCN ? Intrinsic::amdgcn_workitem_id_z
|
||||
: Intrinsic::r600_read_tidig_z;
|
||||
break;
|
||||
default:
|
||||
llvm_unreachable("invalid dimension");
|
||||
}
|
||||
|
||||
Function *WorkitemIdFn = Intrinsic::getDeclaration(Mod, IntrID);
|
||||
CallInst *CI = Builder.CreateCall(WorkitemIdFn);
|
||||
CI->setMetadata(LLVMContext::MD_range, MaxWorkGroupSizeRange);
|
||||
|
||||
return CI;
|
||||
}
|
||||
|
||||
static VectorType *arrayTypeToVecType(Type *ArrayTy) {
|
||||
return VectorType::get(ArrayTy->getArrayElementType(),
|
||||
ArrayTy->getArrayNumElements());
|
||||
@ -317,27 +478,12 @@ void AMDGPUPromoteAlloca::visitAlloca(AllocaInst &I) {
|
||||
*Mod, GVTy, false, GlobalValue::ExternalLinkage, 0, I.getName(), 0,
|
||||
GlobalVariable::NotThreadLocal, AMDGPUAS::LOCAL_ADDRESS);
|
||||
|
||||
FunctionType *FTy = FunctionType::get(
|
||||
Type::getInt32Ty(Mod->getContext()), false);
|
||||
AttributeSet AttrSet;
|
||||
AttrSet.addAttribute(Mod->getContext(), 0, Attribute::ReadNone);
|
||||
Value *TCntY, *TCntZ;
|
||||
|
||||
Value *ReadLocalSizeY = Mod->getOrInsertFunction(
|
||||
"llvm.r600.read.local.size.y", FTy, AttrSet);
|
||||
Value *ReadLocalSizeZ = Mod->getOrInsertFunction(
|
||||
"llvm.r600.read.local.size.z", FTy, AttrSet);
|
||||
Value *ReadTIDIGX = Mod->getOrInsertFunction(
|
||||
"llvm.r600.read.tidig.x", FTy, AttrSet);
|
||||
Value *ReadTIDIGY = Mod->getOrInsertFunction(
|
||||
"llvm.r600.read.tidig.y", FTy, AttrSet);
|
||||
Value *ReadTIDIGZ = Mod->getOrInsertFunction(
|
||||
"llvm.r600.read.tidig.z", FTy, AttrSet);
|
||||
|
||||
Value *TCntY = Builder.CreateCall(ReadLocalSizeY, {});
|
||||
Value *TCntZ = Builder.CreateCall(ReadLocalSizeZ, {});
|
||||
Value *TIdX = Builder.CreateCall(ReadTIDIGX, {});
|
||||
Value *TIdY = Builder.CreateCall(ReadTIDIGY, {});
|
||||
Value *TIdZ = Builder.CreateCall(ReadTIDIGZ, {});
|
||||
std::tie(TCntY, TCntZ) = getLocalSizeYZ(Builder);
|
||||
Value *TIdX = getWorkitemID(Builder, 0);
|
||||
Value *TIdY = getWorkitemID(Builder, 1);
|
||||
Value *TIdZ = getWorkitemID(Builder, 2);
|
||||
|
||||
Value *Tmp0 = Builder.CreateMul(TCntY, TCntZ);
|
||||
Tmp0 = Builder.CreateMul(Tmp0, TIdX);
|
||||
@ -427,6 +573,6 @@ void AMDGPUPromoteAlloca::visitAlloca(AllocaInst &I) {
|
||||
}
|
||||
}
|
||||
|
||||
FunctionPass *llvm::createAMDGPUPromoteAlloca(const AMDGPUSubtarget &ST) {
|
||||
return new AMDGPUPromoteAlloca(ST);
|
||||
FunctionPass *llvm::createAMDGPUPromoteAlloca(const TargetMachine *TM) {
|
||||
return new AMDGPUPromoteAlloca(TM);
|
||||
}
|
||||
|
@ -52,6 +52,7 @@ extern "C" void LLVMInitializeAMDGPUTarget() {
|
||||
initializeSILoadStoreOptimizerPass(*PR);
|
||||
initializeAMDGPUAnnotateKernelFeaturesPass(*PR);
|
||||
initializeAMDGPUAnnotateUniformValuesPass(*PR);
|
||||
initializeAMDGPUPromoteAllocaPass(*PR);
|
||||
initializeSIAnnotateControlFlowPass(*PR);
|
||||
}
|
||||
|
||||
@ -226,9 +227,10 @@ void AMDGPUPassConfig::addIRPasses() {
|
||||
}
|
||||
|
||||
void AMDGPUPassConfig::addCodeGenPrepare() {
|
||||
const AMDGPUSubtarget &ST = *getAMDGPUTargetMachine().getSubtargetImpl();
|
||||
const AMDGPUTargetMachine &TM = getAMDGPUTargetMachine();
|
||||
const AMDGPUSubtarget &ST = *TM.getSubtargetImpl();
|
||||
if (ST.isPromoteAllocaEnabled()) {
|
||||
addPass(createAMDGPUPromoteAlloca(ST));
|
||||
addPass(createAMDGPUPromoteAlloca(&TM));
|
||||
addPass(createSROAPass());
|
||||
}
|
||||
TargetPassConfig::addCodeGenPrepare();
|
||||
|
@ -1304,6 +1304,13 @@ SDValue SITargetLowering::lowerImplicitZextParam(SelectionDAG &DAG,
|
||||
DAG.getValueType(VT));
|
||||
}
|
||||
|
||||
static SDValue emitNonHSAIntrinsicError(SelectionDAG& DAG, EVT VT) {
|
||||
DiagnosticInfoUnsupported BadIntrin(*DAG.getMachineFunction().getFunction(),
|
||||
"non-hsa intrinsic with hsa target");
|
||||
DAG.getContext()->diagnose(BadIntrin);
|
||||
return DAG.getUNDEF(VT);
|
||||
}
|
||||
|
||||
SDValue SITargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
|
||||
SelectionDAG &DAG) const {
|
||||
MachineFunction &MF = DAG.getMachineFunction();
|
||||
@ -1349,30 +1356,57 @@ SDValue SITargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
|
||||
DAG.getConstantFP(Min, DL, VT));
|
||||
}
|
||||
case Intrinsic::r600_read_ngroups_x:
|
||||
if (Subtarget->isAmdHsaOS())
|
||||
return emitNonHSAIntrinsicError(DAG, VT);
|
||||
|
||||
return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
|
||||
SI::KernelInputOffsets::NGROUPS_X, false);
|
||||
case Intrinsic::r600_read_ngroups_y:
|
||||
if (Subtarget->isAmdHsaOS())
|
||||
return emitNonHSAIntrinsicError(DAG, VT);
|
||||
|
||||
return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
|
||||
SI::KernelInputOffsets::NGROUPS_Y, false);
|
||||
case Intrinsic::r600_read_ngroups_z:
|
||||
if (Subtarget->isAmdHsaOS())
|
||||
return emitNonHSAIntrinsicError(DAG, VT);
|
||||
|
||||
return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
|
||||
SI::KernelInputOffsets::NGROUPS_Z, false);
|
||||
case Intrinsic::r600_read_global_size_x:
|
||||
if (Subtarget->isAmdHsaOS())
|
||||
return emitNonHSAIntrinsicError(DAG, VT);
|
||||
|
||||
return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
|
||||
SI::KernelInputOffsets::GLOBAL_SIZE_X, false);
|
||||
case Intrinsic::r600_read_global_size_y:
|
||||
if (Subtarget->isAmdHsaOS())
|
||||
return emitNonHSAIntrinsicError(DAG, VT);
|
||||
|
||||
return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
|
||||
SI::KernelInputOffsets::GLOBAL_SIZE_Y, false);
|
||||
case Intrinsic::r600_read_global_size_z:
|
||||
if (Subtarget->isAmdHsaOS())
|
||||
return emitNonHSAIntrinsicError(DAG, VT);
|
||||
|
||||
return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
|
||||
SI::KernelInputOffsets::GLOBAL_SIZE_Z, false);
|
||||
case Intrinsic::r600_read_local_size_x:
|
||||
if (Subtarget->isAmdHsaOS())
|
||||
return emitNonHSAIntrinsicError(DAG, VT);
|
||||
|
||||
return lowerImplicitZextParam(DAG, Op, MVT::i16,
|
||||
SI::KernelInputOffsets::LOCAL_SIZE_X);
|
||||
case Intrinsic::r600_read_local_size_y:
|
||||
if (Subtarget->isAmdHsaOS())
|
||||
return emitNonHSAIntrinsicError(DAG, VT);
|
||||
|
||||
return lowerImplicitZextParam(DAG, Op, MVT::i16,
|
||||
SI::KernelInputOffsets::LOCAL_SIZE_Y);
|
||||
case Intrinsic::r600_read_local_size_z:
|
||||
if (Subtarget->isAmdHsaOS())
|
||||
return emitNonHSAIntrinsicError(DAG, VT);
|
||||
|
||||
return lowerImplicitZextParam(DAG, Op, MVT::i16,
|
||||
SI::KernelInputOffsets::LOCAL_SIZE_Z);
|
||||
case Intrinsic::amdgcn_read_workdim:
|
||||
|
298
test/CodeGen/AMDGPU/private-memory-r600.ll
Normal file
298
test/CodeGen/AMDGPU/private-memory-r600.ll
Normal file
@ -0,0 +1,298 @@
|
||||
; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck %s -check-prefix=R600 -check-prefix=FUNC
|
||||
; RUN: opt -S -mtriple=r600-unknown-unknown -mcpu=redwood -amdgpu-promote-alloca < %s | FileCheck -check-prefix=OPT %s
|
||||
|
||||
declare i32 @llvm.r600.read.tidig.x() nounwind readnone
|
||||
|
||||
; FUNC-LABEL: {{^}}mova_same_clause:
|
||||
|
||||
; R600: LDS_WRITE
|
||||
; R600: LDS_WRITE
|
||||
; R600: LDS_READ
|
||||
; R600: LDS_READ
|
||||
|
||||
; OPT: call i32 @llvm.r600.read.local.size.y(), !range !0
|
||||
; OPT: call i32 @llvm.r600.read.local.size.z(), !range !0
|
||||
; OPT: call i32 @llvm.r600.read.tidig.x(), !range !0
|
||||
; OPT: call i32 @llvm.r600.read.tidig.y(), !range !0
|
||||
; OPT: call i32 @llvm.r600.read.tidig.z(), !range !0
|
||||
|
||||
define void @mova_same_clause(i32 addrspace(1)* nocapture %out, i32 addrspace(1)* nocapture %in) {
|
||||
entry:
|
||||
%stack = alloca [5 x i32], align 4
|
||||
%0 = load i32, i32 addrspace(1)* %in, align 4
|
||||
%arrayidx1 = getelementptr inbounds [5 x i32], [5 x i32]* %stack, i32 0, i32 %0
|
||||
store i32 4, i32* %arrayidx1, align 4
|
||||
%arrayidx2 = getelementptr inbounds i32, i32 addrspace(1)* %in, i32 1
|
||||
%1 = load i32, i32 addrspace(1)* %arrayidx2, align 4
|
||||
%arrayidx3 = getelementptr inbounds [5 x i32], [5 x i32]* %stack, i32 0, i32 %1
|
||||
store i32 5, i32* %arrayidx3, align 4
|
||||
%arrayidx10 = getelementptr inbounds [5 x i32], [5 x i32]* %stack, i32 0, i32 0
|
||||
%2 = load i32, i32* %arrayidx10, align 4
|
||||
store i32 %2, i32 addrspace(1)* %out, align 4
|
||||
%arrayidx12 = getelementptr inbounds [5 x i32], [5 x i32]* %stack, i32 0, i32 1
|
||||
%3 = load i32, i32* %arrayidx12
|
||||
%arrayidx13 = getelementptr inbounds i32, i32 addrspace(1)* %out, i32 1
|
||||
store i32 %3, i32 addrspace(1)* %arrayidx13
|
||||
ret void
|
||||
}
|
||||
|
||||
; This test checks that the stack offset is calculated correctly for structs.
|
||||
; All register loads/stores should be optimized away, so there shouldn't be
|
||||
; any MOVA instructions.
|
||||
;
|
||||
; XXX: This generated code has unnecessary MOVs, we should be able to optimize
|
||||
; this.
|
||||
|
||||
; FUNC-LABEL: {{^}}multiple_structs:
|
||||
; R600-NOT: MOVA_INT
|
||||
%struct.point = type { i32, i32 }
|
||||
|
||||
define void @multiple_structs(i32 addrspace(1)* %out) {
|
||||
entry:
|
||||
%a = alloca %struct.point
|
||||
%b = alloca %struct.point
|
||||
%a.x.ptr = getelementptr %struct.point, %struct.point* %a, i32 0, i32 0
|
||||
%a.y.ptr = getelementptr %struct.point, %struct.point* %a, i32 0, i32 1
|
||||
%b.x.ptr = getelementptr %struct.point, %struct.point* %b, i32 0, i32 0
|
||||
%b.y.ptr = getelementptr %struct.point, %struct.point* %b, i32 0, i32 1
|
||||
store i32 0, i32* %a.x.ptr
|
||||
store i32 1, i32* %a.y.ptr
|
||||
store i32 2, i32* %b.x.ptr
|
||||
store i32 3, i32* %b.y.ptr
|
||||
%a.indirect.ptr = getelementptr %struct.point, %struct.point* %a, i32 0, i32 0
|
||||
%b.indirect.ptr = getelementptr %struct.point, %struct.point* %b, i32 0, i32 0
|
||||
%a.indirect = load i32, i32* %a.indirect.ptr
|
||||
%b.indirect = load i32, i32* %b.indirect.ptr
|
||||
%0 = add i32 %a.indirect, %b.indirect
|
||||
store i32 %0, i32 addrspace(1)* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
; Test direct access of a private array inside a loop. The private array
|
||||
; loads and stores should be lowered to copies, so there shouldn't be any
|
||||
; MOVA instructions.
|
||||
|
||||
; FUNC-LABEL: {{^}}direct_loop:
|
||||
; R600-NOT: MOVA_INT
|
||||
|
||||
define void @direct_loop(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
|
||||
entry:
|
||||
%prv_array_const = alloca [2 x i32]
|
||||
%prv_array = alloca [2 x i32]
|
||||
%a = load i32, i32 addrspace(1)* %in
|
||||
%b_src_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
|
||||
%b = load i32, i32 addrspace(1)* %b_src_ptr
|
||||
%a_dst_ptr = getelementptr [2 x i32], [2 x i32]* %prv_array_const, i32 0, i32 0
|
||||
store i32 %a, i32* %a_dst_ptr
|
||||
%b_dst_ptr = getelementptr [2 x i32], [2 x i32]* %prv_array_const, i32 0, i32 1
|
||||
store i32 %b, i32* %b_dst_ptr
|
||||
br label %for.body
|
||||
|
||||
for.body:
|
||||
%inc = phi i32 [0, %entry], [%count, %for.body]
|
||||
%x_ptr = getelementptr [2 x i32], [2 x i32]* %prv_array_const, i32 0, i32 0
|
||||
%x = load i32, i32* %x_ptr
|
||||
%y_ptr = getelementptr [2 x i32], [2 x i32]* %prv_array, i32 0, i32 0
|
||||
%y = load i32, i32* %y_ptr
|
||||
%xy = add i32 %x, %y
|
||||
store i32 %xy, i32* %y_ptr
|
||||
%count = add i32 %inc, 1
|
||||
%done = icmp eq i32 %count, 4095
|
||||
br i1 %done, label %for.end, label %for.body
|
||||
|
||||
for.end:
|
||||
%value_ptr = getelementptr [2 x i32], [2 x i32]* %prv_array, i32 0, i32 0
|
||||
%value = load i32, i32* %value_ptr
|
||||
store i32 %value, i32 addrspace(1)* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
; FUNC-LABEL: {{^}}short_array:
|
||||
|
||||
; R600: MOVA_INT
|
||||
define void @short_array(i32 addrspace(1)* %out, i32 %index) {
|
||||
entry:
|
||||
%0 = alloca [2 x i16]
|
||||
%1 = getelementptr [2 x i16], [2 x i16]* %0, i32 0, i32 0
|
||||
%2 = getelementptr [2 x i16], [2 x i16]* %0, i32 0, i32 1
|
||||
store i16 0, i16* %1
|
||||
store i16 1, i16* %2
|
||||
%3 = getelementptr [2 x i16], [2 x i16]* %0, i32 0, i32 %index
|
||||
%4 = load i16, i16* %3
|
||||
%5 = sext i16 %4 to i32
|
||||
store i32 %5, i32 addrspace(1)* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
; FUNC-LABEL: {{^}}char_array:
|
||||
|
||||
; R600: MOVA_INT
|
||||
define void @char_array(i32 addrspace(1)* %out, i32 %index) {
|
||||
entry:
|
||||
%0 = alloca [2 x i8]
|
||||
%1 = getelementptr [2 x i8], [2 x i8]* %0, i32 0, i32 0
|
||||
%2 = getelementptr [2 x i8], [2 x i8]* %0, i32 0, i32 1
|
||||
store i8 0, i8* %1
|
||||
store i8 1, i8* %2
|
||||
%3 = getelementptr [2 x i8], [2 x i8]* %0, i32 0, i32 %index
|
||||
%4 = load i8, i8* %3
|
||||
%5 = sext i8 %4 to i32
|
||||
store i32 %5, i32 addrspace(1)* %out
|
||||
ret void
|
||||
|
||||
}
|
||||
|
||||
; Make sure we don't overwrite workitem information with private memory
|
||||
|
||||
; FUNC-LABEL: {{^}}work_item_info:
|
||||
; R600-NOT: MOV T0.X
|
||||
; Additional check in case the move ends up in the last slot
|
||||
; R600-NOT: MOV * TO.X
|
||||
define void @work_item_info(i32 addrspace(1)* %out, i32 %in) {
|
||||
entry:
|
||||
%0 = alloca [2 x i32]
|
||||
%1 = getelementptr [2 x i32], [2 x i32]* %0, i32 0, i32 0
|
||||
%2 = getelementptr [2 x i32], [2 x i32]* %0, i32 0, i32 1
|
||||
store i32 0, i32* %1
|
||||
store i32 1, i32* %2
|
||||
%3 = getelementptr [2 x i32], [2 x i32]* %0, i32 0, i32 %in
|
||||
%4 = load i32, i32* %3
|
||||
%5 = call i32 @llvm.r600.read.tidig.x()
|
||||
%6 = add i32 %4, %5
|
||||
store i32 %6, i32 addrspace(1)* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
; Test that two stack objects are not stored in the same register
|
||||
; The second stack object should be in T3.X
|
||||
; FUNC-LABEL: {{^}}no_overlap:
|
||||
; R600_CHECK: MOV
|
||||
; R600_CHECK: [[CHAN:[XYZW]]]+
|
||||
; R600-NOT: [[CHAN]]+
|
||||
define void @no_overlap(i32 addrspace(1)* %out, i32 %in) {
|
||||
entry:
|
||||
%0 = alloca [3 x i8], align 1
|
||||
%1 = alloca [2 x i8], align 1
|
||||
%2 = getelementptr [3 x i8], [3 x i8]* %0, i32 0, i32 0
|
||||
%3 = getelementptr [3 x i8], [3 x i8]* %0, i32 0, i32 1
|
||||
%4 = getelementptr [3 x i8], [3 x i8]* %0, i32 0, i32 2
|
||||
%5 = getelementptr [2 x i8], [2 x i8]* %1, i32 0, i32 0
|
||||
%6 = getelementptr [2 x i8], [2 x i8]* %1, i32 0, i32 1
|
||||
store i8 0, i8* %2
|
||||
store i8 1, i8* %3
|
||||
store i8 2, i8* %4
|
||||
store i8 1, i8* %5
|
||||
store i8 0, i8* %6
|
||||
%7 = getelementptr [3 x i8], [3 x i8]* %0, i32 0, i32 %in
|
||||
%8 = getelementptr [2 x i8], [2 x i8]* %1, i32 0, i32 %in
|
||||
%9 = load i8, i8* %7
|
||||
%10 = load i8, i8* %8
|
||||
%11 = add i8 %9, %10
|
||||
%12 = sext i8 %11 to i32
|
||||
store i32 %12, i32 addrspace(1)* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @char_array_array(i32 addrspace(1)* %out, i32 %index) {
|
||||
entry:
|
||||
%alloca = alloca [2 x [2 x i8]]
|
||||
%gep0 = getelementptr [2 x [2 x i8]], [2 x [2 x i8]]* %alloca, i32 0, i32 0, i32 0
|
||||
%gep1 = getelementptr [2 x [2 x i8]], [2 x [2 x i8]]* %alloca, i32 0, i32 0, i32 1
|
||||
store i8 0, i8* %gep0
|
||||
store i8 1, i8* %gep1
|
||||
%gep2 = getelementptr [2 x [2 x i8]], [2 x [2 x i8]]* %alloca, i32 0, i32 0, i32 %index
|
||||
%load = load i8, i8* %gep2
|
||||
%sext = sext i8 %load to i32
|
||||
store i32 %sext, i32 addrspace(1)* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @i32_array_array(i32 addrspace(1)* %out, i32 %index) {
|
||||
entry:
|
||||
%alloca = alloca [2 x [2 x i32]]
|
||||
%gep0 = getelementptr [2 x [2 x i32]], [2 x [2 x i32]]* %alloca, i32 0, i32 0, i32 0
|
||||
%gep1 = getelementptr [2 x [2 x i32]], [2 x [2 x i32]]* %alloca, i32 0, i32 0, i32 1
|
||||
store i32 0, i32* %gep0
|
||||
store i32 1, i32* %gep1
|
||||
%gep2 = getelementptr [2 x [2 x i32]], [2 x [2 x i32]]* %alloca, i32 0, i32 0, i32 %index
|
||||
%load = load i32, i32* %gep2
|
||||
store i32 %load, i32 addrspace(1)* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @i64_array_array(i64 addrspace(1)* %out, i32 %index) {
|
||||
entry:
|
||||
%alloca = alloca [2 x [2 x i64]]
|
||||
%gep0 = getelementptr [2 x [2 x i64]], [2 x [2 x i64]]* %alloca, i32 0, i32 0, i32 0
|
||||
%gep1 = getelementptr [2 x [2 x i64]], [2 x [2 x i64]]* %alloca, i32 0, i32 0, i32 1
|
||||
store i64 0, i64* %gep0
|
||||
store i64 1, i64* %gep1
|
||||
%gep2 = getelementptr [2 x [2 x i64]], [2 x [2 x i64]]* %alloca, i32 0, i32 0, i32 %index
|
||||
%load = load i64, i64* %gep2
|
||||
store i64 %load, i64 addrspace(1)* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
%struct.pair32 = type { i32, i32 }
|
||||
|
||||
define void @struct_array_array(i32 addrspace(1)* %out, i32 %index) {
|
||||
entry:
|
||||
%alloca = alloca [2 x [2 x %struct.pair32]]
|
||||
%gep0 = getelementptr [2 x [2 x %struct.pair32]], [2 x [2 x %struct.pair32]]* %alloca, i32 0, i32 0, i32 0, i32 1
|
||||
%gep1 = getelementptr [2 x [2 x %struct.pair32]], [2 x [2 x %struct.pair32]]* %alloca, i32 0, i32 0, i32 1, i32 1
|
||||
store i32 0, i32* %gep0
|
||||
store i32 1, i32* %gep1
|
||||
%gep2 = getelementptr [2 x [2 x %struct.pair32]], [2 x [2 x %struct.pair32]]* %alloca, i32 0, i32 0, i32 %index, i32 0
|
||||
%load = load i32, i32* %gep2
|
||||
store i32 %load, i32 addrspace(1)* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @struct_pair32_array(i32 addrspace(1)* %out, i32 %index) {
|
||||
entry:
|
||||
%alloca = alloca [2 x %struct.pair32]
|
||||
%gep0 = getelementptr [2 x %struct.pair32], [2 x %struct.pair32]* %alloca, i32 0, i32 0, i32 1
|
||||
%gep1 = getelementptr [2 x %struct.pair32], [2 x %struct.pair32]* %alloca, i32 0, i32 1, i32 0
|
||||
store i32 0, i32* %gep0
|
||||
store i32 1, i32* %gep1
|
||||
%gep2 = getelementptr [2 x %struct.pair32], [2 x %struct.pair32]* %alloca, i32 0, i32 %index, i32 0
|
||||
%load = load i32, i32* %gep2
|
||||
store i32 %load, i32 addrspace(1)* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @select_private(i32 addrspace(1)* %out, i32 %in) nounwind {
|
||||
entry:
|
||||
%tmp = alloca [2 x i32]
|
||||
%tmp1 = getelementptr [2 x i32], [2 x i32]* %tmp, i32 0, i32 0
|
||||
%tmp2 = getelementptr [2 x i32], [2 x i32]* %tmp, i32 0, i32 1
|
||||
store i32 0, i32* %tmp1
|
||||
store i32 1, i32* %tmp2
|
||||
%cmp = icmp eq i32 %in, 0
|
||||
%sel = select i1 %cmp, i32* %tmp1, i32* %tmp2
|
||||
%load = load i32, i32* %sel
|
||||
store i32 %load, i32 addrspace(1)* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
; AMDGPUPromoteAlloca does not know how to handle ptrtoint. When it
|
||||
; finds one, it should stop trying to promote.
|
||||
|
||||
; FUNC-LABEL: ptrtoint:
|
||||
; SI-NOT: ds_write
|
||||
; SI: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s[{{[0-9]+:[0-9]+}}], s{{[0-9]+}} offen
|
||||
; SI: buffer_load_dword v{{[0-9]+}}, v{{[0-9]+}}, s[{{[0-9]+:[0-9]+}}], s{{[0-9]+}} offen ;
|
||||
define void @ptrtoint(i32 addrspace(1)* %out, i32 %a, i32 %b) {
|
||||
%alloca = alloca [16 x i32]
|
||||
%tmp0 = getelementptr [16 x i32], [16 x i32]* %alloca, i32 0, i32 %a
|
||||
store i32 5, i32* %tmp0
|
||||
%tmp1 = ptrtoint [16 x i32]* %alloca to i32
|
||||
%tmp2 = add i32 %tmp1, 5
|
||||
%tmp3 = inttoptr i32 %tmp2 to i32*
|
||||
%tmp4 = getelementptr i32, i32* %tmp3, i32 %b
|
||||
%tmp5 = load i32, i32* %tmp4
|
||||
store i32 %tmp5, i32 addrspace(1)* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
; OPT: !0 = !{i32 0, i32 2048}
|
@ -1,4 +1,3 @@
|
||||
; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck %s -check-prefix=R600 -check-prefix=FUNC
|
||||
; RUN: llc -show-mc-encoding -mattr=+promote-alloca -verify-machineinstrs -march=amdgcn -mcpu=SI < %s | FileCheck %s -check-prefix=SI-PROMOTE -check-prefix=SI -check-prefix=FUNC
|
||||
; RUN: llc -show-mc-encoding -mattr=+promote-alloca -verify-machineinstrs -mtriple=amdgcn--amdhsa -mcpu=kaveri < %s | FileCheck %s -check-prefix=SI-PROMOTE -check-prefix=SI -check-prefix=FUNC -check-prefix=HSA-PROMOTE
|
||||
; RUN: llc -show-mc-encoding -mattr=-promote-alloca -verify-machineinstrs -march=amdgcn -mcpu=SI < %s | FileCheck %s -check-prefix=SI-ALLOCA -check-prefix=SI -check-prefix=FUNC
|
||||
@ -6,7 +5,10 @@
|
||||
; RUN: llc -show-mc-encoding -mattr=+promote-alloca -verify-machineinstrs -march=amdgcn -mcpu=tonga < %s | FileCheck %s -check-prefix=SI-PROMOTE -check-prefix=SI -check-prefix=FUNC
|
||||
; RUN: llc -show-mc-encoding -mattr=-promote-alloca -verify-machineinstrs -march=amdgcn -mcpu=tonga < %s | FileCheck %s -check-prefix=SI-ALLOCA -check-prefix=SI -check-prefix=FUNC
|
||||
|
||||
declare i32 @llvm.r600.read.tidig.x() nounwind readnone
|
||||
; RUN: opt -S -mtriple=amdgcn-unknown-amdhsa -mcpu=kaveri -amdgpu-promote-alloca < %s | FileCheck -check-prefix=HSAOPT %s
|
||||
; RUN: opt -S -mtriple=amdgcn-unknown-unknown -mcpu=kaveri -amdgpu-promote-alloca < %s | FileCheck -check-prefix=NOHSAOPT %s
|
||||
|
||||
declare i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
|
||||
|
||||
; FUNC-LABEL: {{^}}mova_same_clause:
|
||||
|
||||
@ -19,6 +21,10 @@ declare i32 @llvm.r600.read.tidig.x() nounwind readnone
|
||||
; HSA-PROMOTE: workgroup_group_segment_byte_size = 5120
|
||||
; HSA-PROMOTE: .end_amd_kernel_code_t
|
||||
|
||||
; FIXME: These should be merged
|
||||
; HSA-PROMOTE: s_load_dword s{{[0-9]+}}, s[4:5], 0x1
|
||||
; HSA-PROMOTE: s_load_dword s{{[0-9]+}}, s[4:5], 0x2
|
||||
|
||||
; SI-PROMOTE: ds_write_b32
|
||||
; SI-PROMOTE: ds_write_b32
|
||||
; SI-PROMOTE: ds_read_b32
|
||||
@ -32,6 +38,25 @@ declare i32 @llvm.r600.read.tidig.x() nounwind readnone
|
||||
|
||||
; SI-ALLOCA: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s[{{[0-9]+:[0-9]+}}], s{{[0-9]+}} offen ; encoding: [0x00,0x10,0x70,0xe0
|
||||
; SI-ALLOCA: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s[{{[0-9]+:[0-9]+}}], s{{[0-9]+}} offen ; encoding: [0x00,0x10,0x70,0xe0
|
||||
|
||||
|
||||
; HSAOPT: [[DISPATCH_PTR:%[0-9]+]] = call noalias nonnull dereferenceable(64) i8 addrspace(2)* @llvm.amdgcn.dispatch.ptr()
|
||||
; HSAOPT: [[CAST_DISPATCH_PTR:%[0-9]+]] = bitcast i8 addrspace(2)* [[DISPATCH_PTR]] to i32 addrspace(2)*
|
||||
; HSAOPT: [[GEP0:%[0-9]+]] = getelementptr inbounds i32, i32 addrspace(2)* [[CAST_DISPATCH_PTR]], i64 1
|
||||
; HSAOPT: [[LDXY:%[0-9]+]] = load i32, i32 addrspace(2)* [[GEP0]], align 4, !invariant.load !0
|
||||
; HSAOPT: [[GEP1:%[0-9]+]] = getelementptr inbounds i32, i32 addrspace(2)* [[CAST_DISPATCH_PTR]], i64 2
|
||||
; HSAOPT: [[LDZU:%[0-9]+]] = load i32, i32 addrspace(2)* [[GEP1]], align 4, !range !1, !invariant.load !0
|
||||
; HSAOPT: [[EXTRACTY:%[0-9]+]] = lshr i32 [[LDXY]], 16
|
||||
|
||||
; HSAOPT: call i32 @llvm.amdgcn.workitem.id.x(), !range !1
|
||||
; HSAOPT: call i32 @llvm.amdgcn.workitem.id.y(), !range !1
|
||||
; HSAOPT: call i32 @llvm.amdgcn.workitem.id.z(), !range !1
|
||||
|
||||
; NOHSAOPT: call i32 @llvm.r600.read.local.size.y(), !range !0
|
||||
; NOHSAOPT: call i32 @llvm.r600.read.local.size.z(), !range !0
|
||||
; NOHSAOPT: call i32 @llvm.amdgcn.workitem.id.x(), !range !0
|
||||
; NOHSAOPT: call i32 @llvm.amdgcn.workitem.id.y(), !range !0
|
||||
; NOHSAOPT: call i32 @llvm.amdgcn.workitem.id.z(), !range !0
|
||||
define void @mova_same_clause(i32 addrspace(1)* nocapture %out, i32 addrspace(1)* nocapture %in) {
|
||||
entry:
|
||||
%stack = alloca [5 x i32], align 4
|
||||
@ -185,7 +210,7 @@ entry:
|
||||
store i32 1, i32* %2
|
||||
%3 = getelementptr [2 x i32], [2 x i32]* %0, i32 0, i32 %in
|
||||
%4 = load i32, i32* %3
|
||||
%5 = call i32 @llvm.r600.read.tidig.x()
|
||||
%5 = call i32 @llvm.amdgcn.workitem.id.x()
|
||||
%6 = add i32 %4, %5
|
||||
store i32 %6, i32 addrspace(1)* %out
|
||||
ret void
|
||||
@ -323,3 +348,8 @@ define void @ptrtoint(i32 addrspace(1)* %out, i32 %a, i32 %b) {
|
||||
store i32 %tmp5, i32 addrspace(1)* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
; HSAOPT: !0 = !{}
|
||||
; HSAOPT: !1 = !{i32 0, i32 2048}
|
||||
|
||||
; NOHSAOPT: !0 = !{i32 0, i32 2048}
|
||||
|
Loading…
Reference in New Issue
Block a user