GlobalISel: cache pointer sizes in LLT

Otherwise everything that needs to work out what size they are has to keep a
DataLayout handy, which is a bit silly and very annoying.

llvm-svn: 281597
This commit is contained in:
Tim Northover 2016-09-15 09:20:34 +00:00
parent 904baf9864
commit 6a9b1a6161
8 changed files with 78 additions and 64 deletions

View File

@ -56,8 +56,8 @@ public:
}
/// Get a low-level pointer in the given address space (defaulting to 0).
static LLT pointer(unsigned AddressSpace) {
return LLT{Pointer, 1, AddressSpace};
static LLT pointer(uint16_t AddressSpace, unsigned SizeInBits) {
return LLT{Pointer, AddressSpace, SizeInBits};
}
/// Get a low-level vector of some number of elements and element width.
@ -79,16 +79,16 @@ public:
return LLT{Unsized, 0, 0};
}
explicit LLT(TypeKind Kind, uint16_t NumElements, unsigned SizeOrAddrSpace)
: SizeOrAddrSpace(SizeOrAddrSpace), NumElements(NumElements), Kind(Kind) {
assert((Kind != Vector || NumElements > 1) &&
explicit LLT(TypeKind Kind, uint16_t NumElements, unsigned SizeInBits)
: SizeInBits(SizeInBits), ElementsOrAddrSpace(NumElements), Kind(Kind) {
assert((Kind != Vector || ElementsOrAddrSpace > 1) &&
"invalid number of vector elements");
}
explicit LLT() : SizeOrAddrSpace(0), NumElements(0), Kind(Invalid) {}
explicit LLT() : SizeInBits(0), ElementsOrAddrSpace(0), Kind(Invalid) {}
/// Construct a low-level type based on an LLVM type.
explicit LLT(Type &Ty, const DataLayout *DL = nullptr);
explicit LLT(Type &Ty, const DataLayout &DL);
bool isValid() const { return Kind != Invalid; }
@ -98,35 +98,39 @@ public:
bool isVector() const { return Kind == Vector; }
bool isSized() const { return Kind == Scalar || Kind == Vector; }
bool isSized() const {
return Kind == Scalar || Kind == Vector || Kind == Pointer;
}
/// Returns the number of elements in a vector LLT. Must only be called on
/// vector types.
uint16_t getNumElements() const {
assert(isVector() && "cannot get number of elements on scalar/aggregate");
return NumElements;
return ElementsOrAddrSpace;
}
/// Returns the total size of the type. Must only be called on sized types.
unsigned getSizeInBits() const {
assert(isSized() && "attempt to get size of unsized type");
return SizeOrAddrSpace * NumElements;
if (isPointer() || isScalar())
return SizeInBits;
return SizeInBits * ElementsOrAddrSpace;
}
unsigned getScalarSizeInBits() const {
assert(isSized() && "cannot get size of this type");
return SizeOrAddrSpace;
return SizeInBits;
}
unsigned getAddressSpace() const {
assert(isPointer() && "cannot get address space of non-pointer type");
return SizeOrAddrSpace;
return ElementsOrAddrSpace;
}
/// Returns the vector's element type. Only valid for vector types.
LLT getElementType() const {
assert(isVector() && "cannot get element type of scalar/aggregate");
return scalar(SizeOrAddrSpace);
return scalar(SizeInBits);
}
/// Get a low-level type with half the size of the original, by halving the
@ -135,7 +139,7 @@ public:
LLT halfScalarSize() const {
assert(isSized() && getScalarSizeInBits() > 1 &&
getScalarSizeInBits() % 2 == 0 && "cannot half size of this type");
return LLT{Kind, NumElements, SizeOrAddrSpace / 2};
return LLT{Kind, ElementsOrAddrSpace, SizeInBits / 2};
}
/// Get a low-level type with twice the size of the original, by doubling the
@ -143,7 +147,7 @@ public:
/// `<2 x s32>` will become `<2 x s64>`.
LLT doubleScalarSize() const {
assert(isSized() && "cannot change size of this type");
return LLT{Kind, NumElements, SizeOrAddrSpace * 2};
return LLT{Kind, ElementsOrAddrSpace, SizeInBits * 2};
}
/// Get a low-level type with half the size of the original, by halving the
@ -151,11 +155,13 @@ public:
/// a vector type with an even number of elements. For example `<4 x s32>`
/// will become `<2 x s32>`, `<2 x s32>` will become `s32`.
LLT halfElements() const {
assert(isVector() && NumElements % 2 == 0 && "cannot half odd vector");
if (NumElements == 2)
return scalar(SizeOrAddrSpace);
assert(isVector() && ElementsOrAddrSpace % 2 == 0 &&
"cannot half odd vector");
if (ElementsOrAddrSpace == 2)
return scalar(SizeInBits);
return LLT{Vector, static_cast<uint16_t>(NumElements / 2), SizeOrAddrSpace};
return LLT{Vector, static_cast<uint16_t>(ElementsOrAddrSpace / 2),
SizeInBits};
}
/// Get a low-level type with twice the size of the original, by doubling the
@ -163,22 +169,23 @@ public:
/// a vector type. For example `<2 x s32>` will become `<4 x s32>`. Doubling
/// the number of elements in sN produces <2 x sN>.
LLT doubleElements() const {
return LLT{Vector, static_cast<uint16_t>(NumElements * 2), SizeOrAddrSpace};
return LLT{Vector, static_cast<uint16_t>(ElementsOrAddrSpace * 2),
SizeInBits};
}
void print(raw_ostream &OS) const;
bool operator==(const LLT &RHS) const {
return Kind == RHS.Kind && SizeOrAddrSpace == RHS.SizeOrAddrSpace &&
NumElements == RHS.NumElements;
return Kind == RHS.Kind && SizeInBits == RHS.SizeInBits &&
ElementsOrAddrSpace == RHS.ElementsOrAddrSpace;
}
bool operator!=(const LLT &RHS) const { return !(*this == RHS); }
friend struct DenseMapInfo<LLT>;
private:
unsigned SizeOrAddrSpace;
uint16_t NumElements;
unsigned SizeInBits;
uint16_t ElementsOrAddrSpace;
TypeKind Kind;
};
@ -195,8 +202,8 @@ template<> struct DenseMapInfo<LLT> {
return LLT{LLT::Invalid, 0, -2u};
}
static inline unsigned getHashValue(const LLT &Ty) {
uint64_t Val = ((uint64_t)Ty.SizeOrAddrSpace << 32) |
((uint64_t)Ty.NumElements << 16) | (uint64_t)Ty.Kind;
uint64_t Val = ((uint64_t)Ty.SizeInBits << 32) |
((uint64_t)Ty.ElementsOrAddrSpace << 16) | (uint64_t)Ty.Kind;
return DenseMapInfo<uint64_t>::getHashValue(Val);
}
static bool isEqual(const LLT &LHS, const LLT &RHS) {

View File

@ -56,7 +56,7 @@ unsigned IRTranslator::getOrCreateVReg(const Value &Val) {
// we need to concat together to produce the value.
assert(Val.getType()->isSized() &&
"Don't know how to create an empty vreg");
unsigned VReg = MRI->createGenericVirtualRegister(LLT{*Val.getType(), DL});
unsigned VReg = MRI->createGenericVirtualRegister(LLT{*Val.getType(), *DL});
ValReg = VReg;
if (auto CV = dyn_cast<Constant>(&Val)) {
@ -176,7 +176,7 @@ bool IRTranslator::translateLoad(const User &U) {
MachineFunction &MF = MIRBuilder.getMF();
unsigned Res = getOrCreateVReg(LI);
unsigned Addr = getOrCreateVReg(*LI.getPointerOperand());
LLT VTy{*LI.getType(), DL}, PTy{*LI.getPointerOperand()->getType()};
LLT VTy{*LI.getType(), *DL}, PTy{*LI.getPointerOperand()->getType(), *DL};
MIRBuilder.buildLoad(
Res, Addr,
@ -197,8 +197,8 @@ bool IRTranslator::translateStore(const User &U) {
MachineFunction &MF = MIRBuilder.getMF();
unsigned Val = getOrCreateVReg(*SI.getValueOperand());
unsigned Addr = getOrCreateVReg(*SI.getPointerOperand());
LLT VTy{*SI.getValueOperand()->getType(), DL},
PTy{*SI.getPointerOperand()->getType()};
LLT VTy{*SI.getValueOperand()->getType(), *DL},
PTy{*SI.getPointerOperand()->getType(), *DL};
MIRBuilder.buildStore(
Val, Addr,
@ -270,7 +270,7 @@ bool IRTranslator::translateSelect(const User &U) {
}
bool IRTranslator::translateBitCast(const User &U) {
if (LLT{*U.getOperand(0)->getType()} == LLT{*U.getType()}) {
if (LLT{*U.getOperand(0)->getType(), *DL} == LLT{*U.getType(), *DL}) {
unsigned &Reg = ValToVReg[&U];
if (Reg)
MIRBuilder.buildCopy(Reg, getOrCreateVReg(*U.getOperand(0)));
@ -295,7 +295,7 @@ bool IRTranslator::translateGetElementPtr(const User &U) {
Value &Op0 = *U.getOperand(0);
unsigned BaseReg = getOrCreateVReg(Op0);
LLT PtrTy(*Op0.getType());
LLT PtrTy{*Op0.getType(), *DL};
unsigned PtrSize = DL->getPointerSizeInBits(PtrTy.getAddressSpace());
LLT OffsetTy = LLT::scalar(PtrSize);
@ -372,7 +372,7 @@ bool IRTranslator::translateKnownIntrinsic(const CallInst &CI,
case Intrinsic::smul_with_overflow: Op = TargetOpcode::G_SMULO; break;
}
LLT Ty{*CI.getOperand(0)->getType()};
LLT Ty{*CI.getOperand(0)->getType(), *DL};
LLT s1 = LLT::scalar(1);
unsigned Width = Ty.getSizeInBits();
unsigned Res = MRI->createGenericVirtualRegister(Ty);

View File

@ -18,32 +18,31 @@
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
LLT::LLT(Type &Ty, const DataLayout *DL) {
LLT::LLT(Type &Ty, const DataLayout &DL) {
if (auto VTy = dyn_cast<VectorType>(&Ty)) {
SizeOrAddrSpace = VTy->getElementType()->getPrimitiveSizeInBits();
NumElements = VTy->getNumElements();
Kind = NumElements == 1 ? Scalar : Vector;
SizeInBits = VTy->getElementType()->getPrimitiveSizeInBits();
ElementsOrAddrSpace = VTy->getNumElements();
Kind = ElementsOrAddrSpace == 1 ? Scalar : Vector;
} else if (auto PTy = dyn_cast<PointerType>(&Ty)) {
Kind = Pointer;
SizeOrAddrSpace = PTy->getAddressSpace();
NumElements = 1;
SizeInBits = DL.getTypeSizeInBits(&Ty);
ElementsOrAddrSpace = PTy->getAddressSpace();
} else if (Ty.isSized()) {
// Aggregates are no different from real scalars as far as GlobalISel is
// concerned.
Kind = Scalar;
SizeOrAddrSpace =
DL ? DL->getTypeSizeInBits(&Ty) : Ty.getPrimitiveSizeInBits();
NumElements = 1;
assert(SizeOrAddrSpace != 0 && "invalid zero-sized type");
SizeInBits = DL.getTypeSizeInBits(&Ty);
ElementsOrAddrSpace = 1;
assert(SizeInBits != 0 && "invalid zero-sized type");
} else {
Kind = Unsized;
SizeOrAddrSpace = NumElements = 0;
SizeInBits = ElementsOrAddrSpace = 0;
}
}
void LLT::print(raw_ostream &OS) const {
if (isVector())
OS << "<" << NumElements << " x s" << SizeOrAddrSpace << ">";
OS << "<" << ElementsOrAddrSpace << " x s" << SizeInBits << ">";
else if (isPointer())
OS << "p" << getAddressSpace();
else if (isSized())

View File

@ -1048,7 +1048,9 @@ bool MIParser::parseLowLevelType(StringRef::iterator Loc, LLT &Ty) {
lex();
return false;
} else if (Token.is(MIToken::PointerType)) {
Ty = LLT::pointer(APSInt(Token.range().drop_front()).getZExtValue());
const DataLayout &DL = MF.getFunction()->getParent()->getDataLayout();
unsigned AS = APSInt(Token.range().drop_front()).getZExtValue();
Ty = LLT::pointer(AS, DL.getPointerSizeInBits(AS));
lex();
return false;
}

View File

@ -232,9 +232,9 @@ bool AArch64InstructionSelector::select(MachineInstr &I) const {
case TargetOpcode::G_FRAME_INDEX: {
// allocas and G_FRAME_INDEX are only supported in addrspace(0).
if (Ty != LLT::pointer(0)) {
if (Ty != LLT::pointer(0, 64)) {
DEBUG(dbgs() << "G_FRAME_INDEX pointer has type: " << Ty
<< ", expected: " << LLT::pointer(0) << '\n');
<< ", expected: " << LLT::pointer(0, 64) << '\n');
return false;
}
@ -251,9 +251,9 @@ bool AArch64InstructionSelector::select(MachineInstr &I) const {
LLT MemTy = Ty;
LLT PtrTy = MRI.getType(I.getOperand(1).getReg());
if (PtrTy != LLT::pointer(0)) {
if (PtrTy != LLT::pointer(0, 64)) {
DEBUG(dbgs() << "Load/Store pointer has type: " << PtrTy
<< ", expected: " << LLT::pointer(0) << '\n');
<< ", expected: " << LLT::pointer(0, 64) << '\n');
return false;
}

View File

@ -26,7 +26,7 @@ using namespace llvm;
AArch64MachineLegalizer::AArch64MachineLegalizer() {
using namespace TargetOpcode;
const LLT p0 = LLT::pointer(0);
const LLT p0 = LLT::pointer(0, 64);
const LLT s1 = LLT::scalar(1);
const LLT s8 = LLT::scalar(8);
const LLT s16 = LLT::scalar(16);

View File

@ -109,18 +109,19 @@ TEST(MachineLegalizerTest, VectorRISC) {
TEST(MachineLegalizerTest, MultipleTypes) {
using namespace TargetOpcode;
MachineLegalizer L;
LLT p0 = LLT::pointer(0, 64);
LLT s32 = LLT::scalar(32);
LLT s64 = LLT::scalar(64);
// Typical RISCy set of operations based on AArch64.
L.setAction({G_PTRTOINT, 0, LLT::scalar(64)}, Legal);
L.setAction({G_PTRTOINT, 1, LLT::pointer(0)}, Legal);
L.setAction({G_PTRTOINT, 0, s64}, Legal);
L.setAction({G_PTRTOINT, 1, p0}, Legal);
L.setAction({G_PTRTOINT, 0, LLT::scalar(32)}, WidenScalar);
L.setAction({G_PTRTOINT, 0, s32}, WidenScalar);
L.computeTables();
// Check we infer the correct types and actually do what we're told.
ASSERT_EQ(L.getAction({G_PTRTOINT, 0, LLT::scalar(64)}),
std::make_pair(Legal, LLT::scalar(64)));
ASSERT_EQ(L.getAction({G_PTRTOINT, 1, LLT::pointer(0)}),
std::make_pair(Legal, LLT::pointer(0)));
ASSERT_EQ(L.getAction({G_PTRTOINT, 0, s64}), std::make_pair(Legal, s64));
ASSERT_EQ(L.getAction({G_PTRTOINT, 1, p0}), std::make_pair(Legal, p0));
}
}

View File

@ -8,6 +8,7 @@
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/LowLevelType.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Type.h"
@ -31,6 +32,7 @@ namespace {
TEST(LowLevelTypeTest, Scalar) {
LLVMContext C;
DataLayout DL("");
for (unsigned S : {1U, 17U, 32U, 64U, 0xfffffU}) {
const LLT Ty = LLT::scalar(S);
@ -67,12 +69,13 @@ TEST(LowLevelTypeTest, Scalar) {
// Test Type->LLT conversion.
Type *IRTy = IntegerType::get(C, S);
EXPECT_EQ(Ty, LLT(*IRTy));
EXPECT_EQ(Ty, LLT(*IRTy, DL));
}
}
TEST(LowLevelTypeTest, Vector) {
LLVMContext C;
DataLayout DL("");
for (unsigned S : {1U, 17U, 32U, 64U, 0xfffU}) {
for (uint16_t Elts : {2U, 3U, 4U, 32U, 0xffU}) {
@ -160,22 +163,23 @@ TEST(LowLevelTypeTest, Vector) {
// Test Type->LLT conversion.
Type *IRSTy = IntegerType::get(C, S);
Type *IRTy = VectorType::get(IRSTy, Elts);
EXPECT_EQ(VTy, LLT(*IRTy));
EXPECT_EQ(VTy, LLT(*IRTy, DL));
}
}
}
TEST(LowLevelTypeTest, Pointer) {
LLVMContext C;
DataLayout DL("");
for (unsigned AS : {0U, 1U, 127U, 0xffffU}) {
const LLT Ty = LLT::pointer(AS);
const LLT Ty = LLT::pointer(AS, DL.getPointerSizeInBits(AS));
// Test kind.
ASSERT_TRUE(Ty.isValid());
ASSERT_TRUE(Ty.isPointer());
ASSERT_TRUE(Ty.isSized());
ASSERT_FALSE(Ty.isSized());
ASSERT_FALSE(Ty.isScalar());
ASSERT_FALSE(Ty.isVector());
@ -188,7 +192,7 @@ TEST(LowLevelTypeTest, Pointer) {
// Test Type->LLT conversion.
Type *IRTy = PointerType::get(IntegerType::get(C, 8), AS);
EXPECT_EQ(Ty, LLT(*IRTy));
EXPECT_EQ(Ty, LLT(*IRTy, DL));
}
}
@ -204,6 +208,7 @@ TEST(LowLevelTypeTest, Invalid) {
TEST(LowLevelTypeTest, Unsized) {
LLVMContext C;
DataLayout DL("");
const LLT Ty = LLT::unsized();
@ -214,6 +219,6 @@ TEST(LowLevelTypeTest, Unsized) {
ASSERT_FALSE(Ty.isVector());
Type *IRTy = Type::getLabelTy(C);
EXPECT_EQ(Ty, LLT(*IRTy));
EXPECT_EQ(Ty, LLT(*IRTy, DL));
}
}