mirror of
https://github.com/RPCS3/llvm.git
synced 2025-05-19 20:06:06 +00:00

to reflect the new license. We understand that people may be surprised that we're moving the header entirely to discuss the new license. We checked this carefully with the Foundation's lawyer and we believe this is the correct approach. Essentially, all code in the project is now made available by the LLVM project under our new license, so you will see that the license headers include that license only. Some of our contributors have contributed code under our old license, and accordingly, we have retained a copy of our old license notice in the top-level files in each project and repository. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@351636 91177308-0d34-0410-b5e6-96231b3b80d8
732 lines
25 KiB
C++
732 lines
25 KiB
C++
//===- AMDGPUTargetTransformInfo.cpp - AMDGPU specific TTI pass -----------===//
|
|
//
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
//
|
|
//===----------------------------------------------------------------------===//
|
|
//
|
|
// \file
|
|
// This file implements a TargetTransformInfo analysis pass specific to the
|
|
// AMDGPU target machine. It uses the target's detailed information to provide
|
|
// more precise answers to certain TTI queries, while letting the target
|
|
// independent and default TTI implementations handle the rest.
|
|
//
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
#include "AMDGPUTargetTransformInfo.h"
|
|
#include "AMDGPUSubtarget.h"
|
|
#include "Utils/AMDGPUBaseInfo.h"
|
|
#include "llvm/ADT/STLExtras.h"
|
|
#include "llvm/Analysis/LoopInfo.h"
|
|
#include "llvm/Analysis/TargetTransformInfo.h"
|
|
#include "llvm/Analysis/ValueTracking.h"
|
|
#include "llvm/CodeGen/ISDOpcodes.h"
|
|
#include "llvm/CodeGen/ValueTypes.h"
|
|
#include "llvm/IR/Argument.h"
|
|
#include "llvm/IR/Attributes.h"
|
|
#include "llvm/IR/BasicBlock.h"
|
|
#include "llvm/IR/CallingConv.h"
|
|
#include "llvm/IR/DataLayout.h"
|
|
#include "llvm/IR/DerivedTypes.h"
|
|
#include "llvm/IR/Function.h"
|
|
#include "llvm/IR/Instruction.h"
|
|
#include "llvm/IR/Instructions.h"
|
|
#include "llvm/IR/IntrinsicInst.h"
|
|
#include "llvm/IR/Module.h"
|
|
#include "llvm/IR/PatternMatch.h"
|
|
#include "llvm/IR/Type.h"
|
|
#include "llvm/IR/Value.h"
|
|
#include "llvm/MC/SubtargetFeature.h"
|
|
#include "llvm/Support/Casting.h"
|
|
#include "llvm/Support/CommandLine.h"
|
|
#include "llvm/Support/Debug.h"
|
|
#include "llvm/Support/ErrorHandling.h"
|
|
#include "llvm/Support/MachineValueType.h"
|
|
#include "llvm/Support/raw_ostream.h"
|
|
#include "llvm/Target/TargetMachine.h"
|
|
#include <algorithm>
|
|
#include <cassert>
|
|
#include <limits>
|
|
#include <utility>
|
|
|
|
using namespace llvm;
|
|
|
|
#define DEBUG_TYPE "AMDGPUtti"
|
|
|
|
static cl::opt<unsigned> UnrollThresholdPrivate(
|
|
"amdgpu-unroll-threshold-private",
|
|
cl::desc("Unroll threshold for AMDGPU if private memory used in a loop"),
|
|
cl::init(2500), cl::Hidden);
|
|
|
|
static cl::opt<unsigned> UnrollThresholdLocal(
|
|
"amdgpu-unroll-threshold-local",
|
|
cl::desc("Unroll threshold for AMDGPU if local memory used in a loop"),
|
|
cl::init(1000), cl::Hidden);
|
|
|
|
static cl::opt<unsigned> UnrollThresholdIf(
|
|
"amdgpu-unroll-threshold-if",
|
|
cl::desc("Unroll threshold increment for AMDGPU for each if statement inside loop"),
|
|
cl::init(150), cl::Hidden);
|
|
|
|
static bool dependsOnLocalPhi(const Loop *L, const Value *Cond,
|
|
unsigned Depth = 0) {
|
|
const Instruction *I = dyn_cast<Instruction>(Cond);
|
|
if (!I)
|
|
return false;
|
|
|
|
for (const Value *V : I->operand_values()) {
|
|
if (!L->contains(I))
|
|
continue;
|
|
if (const PHINode *PHI = dyn_cast<PHINode>(V)) {
|
|
if (llvm::none_of(L->getSubLoops(), [PHI](const Loop* SubLoop) {
|
|
return SubLoop->contains(PHI); }))
|
|
return true;
|
|
} else if (Depth < 10 && dependsOnLocalPhi(L, V, Depth+1))
|
|
return true;
|
|
}
|
|
return false;
|
|
}
|
|
|
|
void AMDGPUTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
|
|
TTI::UnrollingPreferences &UP) {
|
|
UP.Threshold = 300; // Twice the default.
|
|
UP.MaxCount = std::numeric_limits<unsigned>::max();
|
|
UP.Partial = true;
|
|
|
|
// TODO: Do we want runtime unrolling?
|
|
|
|
// Maximum alloca size than can fit registers. Reserve 16 registers.
|
|
const unsigned MaxAlloca = (256 - 16) * 4;
|
|
unsigned ThresholdPrivate = UnrollThresholdPrivate;
|
|
unsigned ThresholdLocal = UnrollThresholdLocal;
|
|
unsigned MaxBoost = std::max(ThresholdPrivate, ThresholdLocal);
|
|
for (const BasicBlock *BB : L->getBlocks()) {
|
|
const DataLayout &DL = BB->getModule()->getDataLayout();
|
|
unsigned LocalGEPsSeen = 0;
|
|
|
|
if (llvm::any_of(L->getSubLoops(), [BB](const Loop* SubLoop) {
|
|
return SubLoop->contains(BB); }))
|
|
continue; // Block belongs to an inner loop.
|
|
|
|
for (const Instruction &I : *BB) {
|
|
// Unroll a loop which contains an "if" statement whose condition
|
|
// defined by a PHI belonging to the loop. This may help to eliminate
|
|
// if region and potentially even PHI itself, saving on both divergence
|
|
// and registers used for the PHI.
|
|
// Add a small bonus for each of such "if" statements.
|
|
if (const BranchInst *Br = dyn_cast<BranchInst>(&I)) {
|
|
if (UP.Threshold < MaxBoost && Br->isConditional()) {
|
|
if (L->isLoopExiting(Br->getSuccessor(0)) ||
|
|
L->isLoopExiting(Br->getSuccessor(1)))
|
|
continue;
|
|
if (dependsOnLocalPhi(L, Br->getCondition())) {
|
|
UP.Threshold += UnrollThresholdIf;
|
|
LLVM_DEBUG(dbgs() << "Set unroll threshold " << UP.Threshold
|
|
<< " for loop:\n"
|
|
<< *L << " due to " << *Br << '\n');
|
|
if (UP.Threshold >= MaxBoost)
|
|
return;
|
|
}
|
|
}
|
|
continue;
|
|
}
|
|
|
|
const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(&I);
|
|
if (!GEP)
|
|
continue;
|
|
|
|
unsigned AS = GEP->getAddressSpace();
|
|
unsigned Threshold = 0;
|
|
if (AS == AMDGPUAS::PRIVATE_ADDRESS)
|
|
Threshold = ThresholdPrivate;
|
|
else if (AS == AMDGPUAS::LOCAL_ADDRESS)
|
|
Threshold = ThresholdLocal;
|
|
else
|
|
continue;
|
|
|
|
if (UP.Threshold >= Threshold)
|
|
continue;
|
|
|
|
if (AS == AMDGPUAS::PRIVATE_ADDRESS) {
|
|
const Value *Ptr = GEP->getPointerOperand();
|
|
const AllocaInst *Alloca =
|
|
dyn_cast<AllocaInst>(GetUnderlyingObject(Ptr, DL));
|
|
if (!Alloca || !Alloca->isStaticAlloca())
|
|
continue;
|
|
Type *Ty = Alloca->getAllocatedType();
|
|
unsigned AllocaSize = Ty->isSized() ? DL.getTypeAllocSize(Ty) : 0;
|
|
if (AllocaSize > MaxAlloca)
|
|
continue;
|
|
} else if (AS == AMDGPUAS::LOCAL_ADDRESS) {
|
|
LocalGEPsSeen++;
|
|
// Inhibit unroll for local memory if we have seen addressing not to
|
|
// a variable, most likely we will be unable to combine it.
|
|
// Do not unroll too deep inner loops for local memory to give a chance
|
|
// to unroll an outer loop for a more important reason.
|
|
if (LocalGEPsSeen > 1 || L->getLoopDepth() > 2 ||
|
|
(!isa<GlobalVariable>(GEP->getPointerOperand()) &&
|
|
!isa<Argument>(GEP->getPointerOperand())))
|
|
continue;
|
|
}
|
|
|
|
// Check if GEP depends on a value defined by this loop itself.
|
|
bool HasLoopDef = false;
|
|
for (const Value *Op : GEP->operands()) {
|
|
const Instruction *Inst = dyn_cast<Instruction>(Op);
|
|
if (!Inst || L->isLoopInvariant(Op))
|
|
continue;
|
|
|
|
if (llvm::any_of(L->getSubLoops(), [Inst](const Loop* SubLoop) {
|
|
return SubLoop->contains(Inst); }))
|
|
continue;
|
|
HasLoopDef = true;
|
|
break;
|
|
}
|
|
if (!HasLoopDef)
|
|
continue;
|
|
|
|
// We want to do whatever we can to limit the number of alloca
|
|
// instructions that make it through to the code generator. allocas
|
|
// require us to use indirect addressing, which is slow and prone to
|
|
// compiler bugs. If this loop does an address calculation on an
|
|
// alloca ptr, then we want to use a higher than normal loop unroll
|
|
// threshold. This will give SROA a better chance to eliminate these
|
|
// allocas.
|
|
//
|
|
// We also want to have more unrolling for local memory to let ds
|
|
// instructions with different offsets combine.
|
|
//
|
|
// Don't use the maximum allowed value here as it will make some
|
|
// programs way too big.
|
|
UP.Threshold = Threshold;
|
|
LLVM_DEBUG(dbgs() << "Set unroll threshold " << Threshold
|
|
<< " for loop:\n"
|
|
<< *L << " due to " << *GEP << '\n');
|
|
if (UP.Threshold >= MaxBoost)
|
|
return;
|
|
}
|
|
}
|
|
}
|
|
|
|
unsigned GCNTTIImpl::getHardwareNumberOfRegisters(bool Vec) const {
|
|
// The concept of vector registers doesn't really exist. Some packed vector
|
|
// operations operate on the normal 32-bit registers.
|
|
return 256;
|
|
}
|
|
|
|
unsigned GCNTTIImpl::getNumberOfRegisters(bool Vec) const {
|
|
// This is really the number of registers to fill when vectorizing /
|
|
// interleaving loops, so we lie to avoid trying to use all registers.
|
|
return getHardwareNumberOfRegisters(Vec) >> 3;
|
|
}
|
|
|
|
unsigned GCNTTIImpl::getRegisterBitWidth(bool Vector) const {
|
|
return 32;
|
|
}
|
|
|
|
unsigned GCNTTIImpl::getMinVectorRegisterBitWidth() const {
|
|
return 32;
|
|
}
|
|
|
|
unsigned GCNTTIImpl::getLoadVectorFactor(unsigned VF, unsigned LoadSize,
|
|
unsigned ChainSizeInBytes,
|
|
VectorType *VecTy) const {
|
|
unsigned VecRegBitWidth = VF * LoadSize;
|
|
if (VecRegBitWidth > 128 && VecTy->getScalarSizeInBits() < 32)
|
|
// TODO: Support element-size less than 32bit?
|
|
return 128 / LoadSize;
|
|
|
|
return VF;
|
|
}
|
|
|
|
unsigned GCNTTIImpl::getStoreVectorFactor(unsigned VF, unsigned StoreSize,
|
|
unsigned ChainSizeInBytes,
|
|
VectorType *VecTy) const {
|
|
unsigned VecRegBitWidth = VF * StoreSize;
|
|
if (VecRegBitWidth > 128)
|
|
return 128 / StoreSize;
|
|
|
|
return VF;
|
|
}
|
|
|
|
unsigned GCNTTIImpl::getLoadStoreVecRegBitWidth(unsigned AddrSpace) const {
|
|
if (AddrSpace == AMDGPUAS::GLOBAL_ADDRESS ||
|
|
AddrSpace == AMDGPUAS::CONSTANT_ADDRESS ||
|
|
AddrSpace == AMDGPUAS::CONSTANT_ADDRESS_32BIT) {
|
|
return 512;
|
|
}
|
|
|
|
if (AddrSpace == AMDGPUAS::FLAT_ADDRESS ||
|
|
AddrSpace == AMDGPUAS::LOCAL_ADDRESS ||
|
|
AddrSpace == AMDGPUAS::REGION_ADDRESS)
|
|
return 128;
|
|
|
|
if (AddrSpace == AMDGPUAS::PRIVATE_ADDRESS)
|
|
return 8 * ST->getMaxPrivateElementSize();
|
|
|
|
llvm_unreachable("unhandled address space");
|
|
}
|
|
|
|
bool GCNTTIImpl::isLegalToVectorizeMemChain(unsigned ChainSizeInBytes,
|
|
unsigned Alignment,
|
|
unsigned AddrSpace) const {
|
|
// We allow vectorization of flat stores, even though we may need to decompose
|
|
// them later if they may access private memory. We don't have enough context
|
|
// here, and legalization can handle it.
|
|
if (AddrSpace == AMDGPUAS::PRIVATE_ADDRESS) {
|
|
return (Alignment >= 4 || ST->hasUnalignedScratchAccess()) &&
|
|
ChainSizeInBytes <= ST->getMaxPrivateElementSize();
|
|
}
|
|
return true;
|
|
}
|
|
|
|
bool GCNTTIImpl::isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes,
|
|
unsigned Alignment,
|
|
unsigned AddrSpace) const {
|
|
return isLegalToVectorizeMemChain(ChainSizeInBytes, Alignment, AddrSpace);
|
|
}
|
|
|
|
bool GCNTTIImpl::isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes,
|
|
unsigned Alignment,
|
|
unsigned AddrSpace) const {
|
|
return isLegalToVectorizeMemChain(ChainSizeInBytes, Alignment, AddrSpace);
|
|
}
|
|
|
|
unsigned GCNTTIImpl::getMaxInterleaveFactor(unsigned VF) {
|
|
// Disable unrolling if the loop is not vectorized.
|
|
// TODO: Enable this again.
|
|
if (VF == 1)
|
|
return 1;
|
|
|
|
return 8;
|
|
}
|
|
|
|
bool GCNTTIImpl::getTgtMemIntrinsic(IntrinsicInst *Inst,
|
|
MemIntrinsicInfo &Info) const {
|
|
switch (Inst->getIntrinsicID()) {
|
|
case Intrinsic::amdgcn_atomic_inc:
|
|
case Intrinsic::amdgcn_atomic_dec:
|
|
case Intrinsic::amdgcn_ds_ordered_add:
|
|
case Intrinsic::amdgcn_ds_ordered_swap:
|
|
case Intrinsic::amdgcn_ds_fadd:
|
|
case Intrinsic::amdgcn_ds_fmin:
|
|
case Intrinsic::amdgcn_ds_fmax: {
|
|
auto *Ordering = dyn_cast<ConstantInt>(Inst->getArgOperand(2));
|
|
auto *Volatile = dyn_cast<ConstantInt>(Inst->getArgOperand(4));
|
|
if (!Ordering || !Volatile)
|
|
return false; // Invalid.
|
|
|
|
unsigned OrderingVal = Ordering->getZExtValue();
|
|
if (OrderingVal > static_cast<unsigned>(AtomicOrdering::SequentiallyConsistent))
|
|
return false;
|
|
|
|
Info.PtrVal = Inst->getArgOperand(0);
|
|
Info.Ordering = static_cast<AtomicOrdering>(OrderingVal);
|
|
Info.ReadMem = true;
|
|
Info.WriteMem = true;
|
|
Info.IsVolatile = !Volatile->isNullValue();
|
|
return true;
|
|
}
|
|
default:
|
|
return false;
|
|
}
|
|
}
|
|
|
|
int GCNTTIImpl::getArithmeticInstrCost(
|
|
unsigned Opcode, Type *Ty, TTI::OperandValueKind Opd1Info,
|
|
TTI::OperandValueKind Opd2Info, TTI::OperandValueProperties Opd1PropInfo,
|
|
TTI::OperandValueProperties Opd2PropInfo, ArrayRef<const Value *> Args ) {
|
|
EVT OrigTy = TLI->getValueType(DL, Ty);
|
|
if (!OrigTy.isSimple()) {
|
|
return BaseT::getArithmeticInstrCost(Opcode, Ty, Opd1Info, Opd2Info,
|
|
Opd1PropInfo, Opd2PropInfo);
|
|
}
|
|
|
|
// Legalize the type.
|
|
std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
|
|
int ISD = TLI->InstructionOpcodeToISD(Opcode);
|
|
|
|
// Because we don't have any legal vector operations, but the legal types, we
|
|
// need to account for split vectors.
|
|
unsigned NElts = LT.second.isVector() ?
|
|
LT.second.getVectorNumElements() : 1;
|
|
|
|
MVT::SimpleValueType SLT = LT.second.getScalarType().SimpleTy;
|
|
|
|
switch (ISD) {
|
|
case ISD::SHL:
|
|
case ISD::SRL:
|
|
case ISD::SRA:
|
|
if (SLT == MVT::i64)
|
|
return get64BitInstrCost() * LT.first * NElts;
|
|
|
|
// i32
|
|
return getFullRateInstrCost() * LT.first * NElts;
|
|
case ISD::ADD:
|
|
case ISD::SUB:
|
|
case ISD::AND:
|
|
case ISD::OR:
|
|
case ISD::XOR:
|
|
if (SLT == MVT::i64){
|
|
// and, or and xor are typically split into 2 VALU instructions.
|
|
return 2 * getFullRateInstrCost() * LT.first * NElts;
|
|
}
|
|
|
|
return LT.first * NElts * getFullRateInstrCost();
|
|
case ISD::MUL: {
|
|
const int QuarterRateCost = getQuarterRateInstrCost();
|
|
if (SLT == MVT::i64) {
|
|
const int FullRateCost = getFullRateInstrCost();
|
|
return (4 * QuarterRateCost + (2 * 2) * FullRateCost) * LT.first * NElts;
|
|
}
|
|
|
|
// i32
|
|
return QuarterRateCost * NElts * LT.first;
|
|
}
|
|
case ISD::FADD:
|
|
case ISD::FSUB:
|
|
case ISD::FMUL:
|
|
if (SLT == MVT::f64)
|
|
return LT.first * NElts * get64BitInstrCost();
|
|
|
|
if (SLT == MVT::f32 || SLT == MVT::f16)
|
|
return LT.first * NElts * getFullRateInstrCost();
|
|
break;
|
|
case ISD::FDIV:
|
|
case ISD::FREM:
|
|
// FIXME: frem should be handled separately. The fdiv in it is most of it,
|
|
// but the current lowering is also not entirely correct.
|
|
if (SLT == MVT::f64) {
|
|
int Cost = 4 * get64BitInstrCost() + 7 * getQuarterRateInstrCost();
|
|
// Add cost of workaround.
|
|
if (ST->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS)
|
|
Cost += 3 * getFullRateInstrCost();
|
|
|
|
return LT.first * Cost * NElts;
|
|
}
|
|
|
|
if (!Args.empty() && match(Args[0], PatternMatch::m_FPOne())) {
|
|
// TODO: This is more complicated, unsafe flags etc.
|
|
if ((SLT == MVT::f32 && !ST->hasFP32Denormals()) ||
|
|
(SLT == MVT::f16 && ST->has16BitInsts())) {
|
|
return LT.first * getQuarterRateInstrCost() * NElts;
|
|
}
|
|
}
|
|
|
|
if (SLT == MVT::f16 && ST->has16BitInsts()) {
|
|
// 2 x v_cvt_f32_f16
|
|
// f32 rcp
|
|
// f32 fmul
|
|
// v_cvt_f16_f32
|
|
// f16 div_fixup
|
|
int Cost = 4 * getFullRateInstrCost() + 2 * getQuarterRateInstrCost();
|
|
return LT.first * Cost * NElts;
|
|
}
|
|
|
|
if (SLT == MVT::f32 || SLT == MVT::f16) {
|
|
int Cost = 7 * getFullRateInstrCost() + 1 * getQuarterRateInstrCost();
|
|
|
|
if (!ST->hasFP32Denormals()) {
|
|
// FP mode switches.
|
|
Cost += 2 * getFullRateInstrCost();
|
|
}
|
|
|
|
return LT.first * NElts * Cost;
|
|
}
|
|
break;
|
|
default:
|
|
break;
|
|
}
|
|
|
|
return BaseT::getArithmeticInstrCost(Opcode, Ty, Opd1Info, Opd2Info,
|
|
Opd1PropInfo, Opd2PropInfo);
|
|
}
|
|
|
|
unsigned GCNTTIImpl::getCFInstrCost(unsigned Opcode) {
|
|
// XXX - For some reason this isn't called for switch.
|
|
switch (Opcode) {
|
|
case Instruction::Br:
|
|
case Instruction::Ret:
|
|
return 10;
|
|
default:
|
|
return BaseT::getCFInstrCost(Opcode);
|
|
}
|
|
}
|
|
|
|
int GCNTTIImpl::getArithmeticReductionCost(unsigned Opcode, Type *Ty,
|
|
bool IsPairwise) {
|
|
EVT OrigTy = TLI->getValueType(DL, Ty);
|
|
|
|
// Computes cost on targets that have packed math instructions(which support
|
|
// 16-bit types only).
|
|
if (IsPairwise ||
|
|
!ST->hasVOP3PInsts() ||
|
|
OrigTy.getScalarSizeInBits() != 16)
|
|
return BaseT::getArithmeticReductionCost(Opcode, Ty, IsPairwise);
|
|
|
|
std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
|
|
return LT.first * getFullRateInstrCost();
|
|
}
|
|
|
|
int GCNTTIImpl::getMinMaxReductionCost(Type *Ty, Type *CondTy,
|
|
bool IsPairwise,
|
|
bool IsUnsigned) {
|
|
EVT OrigTy = TLI->getValueType(DL, Ty);
|
|
|
|
// Computes cost on targets that have packed math instructions(which support
|
|
// 16-bit types only).
|
|
if (IsPairwise ||
|
|
!ST->hasVOP3PInsts() ||
|
|
OrigTy.getScalarSizeInBits() != 16)
|
|
return BaseT::getMinMaxReductionCost(Ty, CondTy, IsPairwise, IsUnsigned);
|
|
|
|
std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
|
|
return LT.first * getHalfRateInstrCost();
|
|
}
|
|
|
|
int GCNTTIImpl::getVectorInstrCost(unsigned Opcode, Type *ValTy,
|
|
unsigned Index) {
|
|
switch (Opcode) {
|
|
case Instruction::ExtractElement:
|
|
case Instruction::InsertElement: {
|
|
unsigned EltSize
|
|
= DL.getTypeSizeInBits(cast<VectorType>(ValTy)->getElementType());
|
|
if (EltSize < 32) {
|
|
if (EltSize == 16 && Index == 0 && ST->has16BitInsts())
|
|
return 0;
|
|
return BaseT::getVectorInstrCost(Opcode, ValTy, Index);
|
|
}
|
|
|
|
// Extracts are just reads of a subregister, so are free. Inserts are
|
|
// considered free because we don't want to have any cost for scalarizing
|
|
// operations, and we don't have to copy into a different register class.
|
|
|
|
// Dynamic indexing isn't free and is best avoided.
|
|
return Index == ~0u ? 2 : 0;
|
|
}
|
|
default:
|
|
return BaseT::getVectorInstrCost(Opcode, ValTy, Index);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
static bool isArgPassedInSGPR(const Argument *A) {
|
|
const Function *F = A->getParent();
|
|
|
|
// Arguments to compute shaders are never a source of divergence.
|
|
CallingConv::ID CC = F->getCallingConv();
|
|
switch (CC) {
|
|
case CallingConv::AMDGPU_KERNEL:
|
|
case CallingConv::SPIR_KERNEL:
|
|
return true;
|
|
case CallingConv::AMDGPU_VS:
|
|
case CallingConv::AMDGPU_LS:
|
|
case CallingConv::AMDGPU_HS:
|
|
case CallingConv::AMDGPU_ES:
|
|
case CallingConv::AMDGPU_GS:
|
|
case CallingConv::AMDGPU_PS:
|
|
case CallingConv::AMDGPU_CS:
|
|
// For non-compute shaders, SGPR inputs are marked with either inreg or byval.
|
|
// Everything else is in VGPRs.
|
|
return F->getAttributes().hasParamAttribute(A->getArgNo(), Attribute::InReg) ||
|
|
F->getAttributes().hasParamAttribute(A->getArgNo(), Attribute::ByVal);
|
|
default:
|
|
// TODO: Should calls support inreg for SGPR inputs?
|
|
return false;
|
|
}
|
|
}
|
|
|
|
/// \returns true if the result of the value could potentially be
|
|
/// different across workitems in a wavefront.
|
|
bool GCNTTIImpl::isSourceOfDivergence(const Value *V) const {
|
|
if (const Argument *A = dyn_cast<Argument>(V))
|
|
return !isArgPassedInSGPR(A);
|
|
|
|
// Loads from the private and flat address spaces are divergent, because
|
|
// threads can execute the load instruction with the same inputs and get
|
|
// different results.
|
|
//
|
|
// All other loads are not divergent, because if threads issue loads with the
|
|
// same arguments, they will always get the same result.
|
|
if (const LoadInst *Load = dyn_cast<LoadInst>(V))
|
|
return Load->getPointerAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS ||
|
|
Load->getPointerAddressSpace() == AMDGPUAS::FLAT_ADDRESS;
|
|
|
|
// Atomics are divergent because they are executed sequentially: when an
|
|
// atomic operation refers to the same address in each thread, then each
|
|
// thread after the first sees the value written by the previous thread as
|
|
// original value.
|
|
if (isa<AtomicRMWInst>(V) || isa<AtomicCmpXchgInst>(V))
|
|
return true;
|
|
|
|
if (const IntrinsicInst *Intrinsic = dyn_cast<IntrinsicInst>(V))
|
|
return AMDGPU::isIntrinsicSourceOfDivergence(Intrinsic->getIntrinsicID());
|
|
|
|
// Assume all function calls are a source of divergence.
|
|
if (isa<CallInst>(V) || isa<InvokeInst>(V))
|
|
return true;
|
|
|
|
return false;
|
|
}
|
|
|
|
bool GCNTTIImpl::isAlwaysUniform(const Value *V) const {
|
|
if (const IntrinsicInst *Intrinsic = dyn_cast<IntrinsicInst>(V)) {
|
|
switch (Intrinsic->getIntrinsicID()) {
|
|
default:
|
|
return false;
|
|
case Intrinsic::amdgcn_readfirstlane:
|
|
case Intrinsic::amdgcn_readlane:
|
|
case Intrinsic::amdgcn_icmp:
|
|
case Intrinsic::amdgcn_fcmp:
|
|
return true;
|
|
}
|
|
}
|
|
return false;
|
|
}
|
|
|
|
unsigned GCNTTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index,
|
|
Type *SubTp) {
|
|
if (ST->hasVOP3PInsts()) {
|
|
VectorType *VT = cast<VectorType>(Tp);
|
|
if (VT->getNumElements() == 2 &&
|
|
DL.getTypeSizeInBits(VT->getElementType()) == 16) {
|
|
// With op_sel VOP3P instructions freely can access the low half or high
|
|
// half of a register, so any swizzle is free.
|
|
|
|
switch (Kind) {
|
|
case TTI::SK_Broadcast:
|
|
case TTI::SK_Reverse:
|
|
case TTI::SK_PermuteSingleSrc:
|
|
return 0;
|
|
default:
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
|
|
return BaseT::getShuffleCost(Kind, Tp, Index, SubTp);
|
|
}
|
|
|
|
bool GCNTTIImpl::areInlineCompatible(const Function *Caller,
|
|
const Function *Callee) const {
|
|
const TargetMachine &TM = getTLI()->getTargetMachine();
|
|
const FeatureBitset &CallerBits =
|
|
TM.getSubtargetImpl(*Caller)->getFeatureBits();
|
|
const FeatureBitset &CalleeBits =
|
|
TM.getSubtargetImpl(*Callee)->getFeatureBits();
|
|
|
|
FeatureBitset RealCallerBits = CallerBits & ~InlineFeatureIgnoreList;
|
|
FeatureBitset RealCalleeBits = CalleeBits & ~InlineFeatureIgnoreList;
|
|
return ((RealCallerBits & RealCalleeBits) == RealCalleeBits);
|
|
}
|
|
|
|
void GCNTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
|
|
TTI::UnrollingPreferences &UP) {
|
|
CommonTTI.getUnrollingPreferences(L, SE, UP);
|
|
}
|
|
|
|
unsigned R600TTIImpl::getHardwareNumberOfRegisters(bool Vec) const {
|
|
return 4 * 128; // XXX - 4 channels. Should these count as vector instead?
|
|
}
|
|
|
|
unsigned R600TTIImpl::getNumberOfRegisters(bool Vec) const {
|
|
return getHardwareNumberOfRegisters(Vec);
|
|
}
|
|
|
|
unsigned R600TTIImpl::getRegisterBitWidth(bool Vector) const {
|
|
return 32;
|
|
}
|
|
|
|
unsigned R600TTIImpl::getMinVectorRegisterBitWidth() const {
|
|
return 32;
|
|
}
|
|
|
|
unsigned R600TTIImpl::getLoadStoreVecRegBitWidth(unsigned AddrSpace) const {
|
|
if (AddrSpace == AMDGPUAS::GLOBAL_ADDRESS ||
|
|
AddrSpace == AMDGPUAS::CONSTANT_ADDRESS)
|
|
return 128;
|
|
if (AddrSpace == AMDGPUAS::LOCAL_ADDRESS ||
|
|
AddrSpace == AMDGPUAS::REGION_ADDRESS)
|
|
return 64;
|
|
if (AddrSpace == AMDGPUAS::PRIVATE_ADDRESS)
|
|
return 32;
|
|
|
|
if ((AddrSpace == AMDGPUAS::PARAM_D_ADDRESS ||
|
|
AddrSpace == AMDGPUAS::PARAM_I_ADDRESS ||
|
|
(AddrSpace >= AMDGPUAS::CONSTANT_BUFFER_0 &&
|
|
AddrSpace <= AMDGPUAS::CONSTANT_BUFFER_15)))
|
|
return 128;
|
|
llvm_unreachable("unhandled address space");
|
|
}
|
|
|
|
bool R600TTIImpl::isLegalToVectorizeMemChain(unsigned ChainSizeInBytes,
|
|
unsigned Alignment,
|
|
unsigned AddrSpace) const {
|
|
// We allow vectorization of flat stores, even though we may need to decompose
|
|
// them later if they may access private memory. We don't have enough context
|
|
// here, and legalization can handle it.
|
|
return (AddrSpace != AMDGPUAS::PRIVATE_ADDRESS);
|
|
}
|
|
|
|
bool R600TTIImpl::isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes,
|
|
unsigned Alignment,
|
|
unsigned AddrSpace) const {
|
|
return isLegalToVectorizeMemChain(ChainSizeInBytes, Alignment, AddrSpace);
|
|
}
|
|
|
|
bool R600TTIImpl::isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes,
|
|
unsigned Alignment,
|
|
unsigned AddrSpace) const {
|
|
return isLegalToVectorizeMemChain(ChainSizeInBytes, Alignment, AddrSpace);
|
|
}
|
|
|
|
unsigned R600TTIImpl::getMaxInterleaveFactor(unsigned VF) {
|
|
// Disable unrolling if the loop is not vectorized.
|
|
// TODO: Enable this again.
|
|
if (VF == 1)
|
|
return 1;
|
|
|
|
return 8;
|
|
}
|
|
|
|
unsigned R600TTIImpl::getCFInstrCost(unsigned Opcode) {
|
|
// XXX - For some reason this isn't called for switch.
|
|
switch (Opcode) {
|
|
case Instruction::Br:
|
|
case Instruction::Ret:
|
|
return 10;
|
|
default:
|
|
return BaseT::getCFInstrCost(Opcode);
|
|
}
|
|
}
|
|
|
|
int R600TTIImpl::getVectorInstrCost(unsigned Opcode, Type *ValTy,
|
|
unsigned Index) {
|
|
switch (Opcode) {
|
|
case Instruction::ExtractElement:
|
|
case Instruction::InsertElement: {
|
|
unsigned EltSize
|
|
= DL.getTypeSizeInBits(cast<VectorType>(ValTy)->getElementType());
|
|
if (EltSize < 32) {
|
|
return BaseT::getVectorInstrCost(Opcode, ValTy, Index);
|
|
}
|
|
|
|
// Extracts are just reads of a subregister, so are free. Inserts are
|
|
// considered free because we don't want to have any cost for scalarizing
|
|
// operations, and we don't have to copy into a different register class.
|
|
|
|
// Dynamic indexing isn't free and is best avoided.
|
|
return Index == ~0u ? 2 : 0;
|
|
}
|
|
default:
|
|
return BaseT::getVectorInstrCost(Opcode, ValTy, Index);
|
|
}
|
|
}
|
|
|
|
void R600TTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
|
|
TTI::UnrollingPreferences &UP) {
|
|
CommonTTI.getUnrollingPreferences(L, SE, UP);
|
|
}
|