Recommit: [globalisel] Change LLT constructor string into an LLT-based object that knows how to generate it.

Summary:
This will allow future patches to inspect the details of the LLT. The implementation is now split between
the Support and CodeGen libraries to allow TableGen to use this class without introducing layering concerns.

Thanks to Ahmed Bougacha for finding a reasonable way to avoid the layering issue and providing the version of this patch without that problem.

The problem with the previous commit appears to have been that TableGen was including CodeGen/LowLevelType.h instead of Support/LowLevelTypeImpl.h.

Reviewers: t.p.northover, qcolombet, rovka, aditya_nandakumar, ab, javed.absar

Subscribers: arsenm, nhaehnle, mgorny, dberris, llvm-commits, kristof.beyls

Differential Revision: https://reviews.llvm.org/D30046

llvm-svn: 297241
This commit is contained in:
Daniel Sanders 2017-03-07 23:20:35 +00:00
parent c6f932dd97
commit ffb113ee36
12 changed files with 330 additions and 267 deletions

View File

@ -1,4 +1,4 @@
//== llvm/CodeGen/GlobalISel/LowLevelType.h -------------------- -*- C++ -*-==//
//== llvm/CodeGen/LowLevelType.h ------------------------------- -*- C++ -*-==//
//
// The LLVM Compiler Infrastructure
//
@ -10,197 +10,23 @@
/// Implement a low-level type suitable for MachineInstr level instruction
/// selection.
///
/// For a type attached to a MachineInstr, we only care about 2 details: total
/// size and the number of vector lanes (if any). Accordingly, there are 4
/// possible valid type-kinds:
///
/// * `sN` for scalars and aggregates
/// * `<N x sM>` for vectors, which must have at least 2 elements.
/// * `pN` for pointers
///
/// Other information required for correct selection is expected to be carried
/// by the opcode, or non-type flags. For example the distinction between G_ADD
/// and G_FADD for int/float or fast-math flags.
/// This provides the CodeGen aspects of LowLevelType, such as Type conversion.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CODEGEN_GLOBALISEL_LOWLEVELTYPE_H
#define LLVM_CODEGEN_GLOBALISEL_LOWLEVELTYPE_H
#ifndef LLVM_CODEGEN_LOWLEVELTYPE_H
#define LLVM_CODEGEN_LOWLEVELTYPE_H
#include <cassert>
#include "llvm/ADT/DenseMapInfo.h"
#include "llvm/CodeGen/ValueTypes.h"
#include "llvm/Support/LowLevelTypeImpl.h"
namespace llvm {
class DataLayout;
class LLVMContext;
class Type;
class raw_ostream;
class LLT {
public:
enum TypeKind : uint16_t {
Invalid,
Scalar,
Pointer,
Vector,
};
/// Get a low-level scalar or aggregate "bag of bits".
static LLT scalar(unsigned SizeInBits) {
assert(SizeInBits > 0 && "invalid scalar size");
return LLT{Scalar, 1, SizeInBits};
}
/// Get a low-level pointer in the given address space (defaulting to 0).
static LLT pointer(uint16_t AddressSpace, unsigned SizeInBits) {
return LLT{Pointer, AddressSpace, SizeInBits};
}
/// Get a low-level vector of some number of elements and element width.
/// \p NumElements must be at least 2.
static LLT vector(uint16_t NumElements, unsigned ScalarSizeInBits) {
assert(NumElements > 1 && "invalid number of vector elements");
return LLT{Vector, NumElements, ScalarSizeInBits};
}
/// Get a low-level vector of some number of elements and element type.
static LLT vector(uint16_t NumElements, LLT ScalarTy) {
assert(NumElements > 1 && "invalid number of vector elements");
assert(ScalarTy.isScalar() && "invalid vector element type");
return LLT{Vector, NumElements, ScalarTy.getSizeInBits()};
}
explicit LLT(TypeKind Kind, uint16_t NumElements, unsigned SizeInBits)
: SizeInBits(SizeInBits), ElementsOrAddrSpace(NumElements), Kind(Kind) {
assert((Kind != Vector || ElementsOrAddrSpace > 1) &&
"invalid number of vector elements");
}
explicit LLT() : SizeInBits(0), ElementsOrAddrSpace(0), Kind(Invalid) {}
/// Construct a low-level type based on an LLVM type.
explicit LLT(Type &Ty, const DataLayout &DL);
explicit LLT(MVT VT);
bool isValid() const { return Kind != Invalid; }
bool isScalar() const { return Kind == Scalar; }
bool isPointer() const { return Kind == Pointer; }
bool isVector() const { return Kind == Vector; }
/// Returns the number of elements in a vector LLT. Must only be called on
/// vector types.
uint16_t getNumElements() const {
assert(isVector() && "cannot get number of elements on scalar/aggregate");
return ElementsOrAddrSpace;
}
/// Returns the total size of the type. Must only be called on sized types.
unsigned getSizeInBits() const {
if (isPointer() || isScalar())
return SizeInBits;
return SizeInBits * ElementsOrAddrSpace;
}
unsigned getScalarSizeInBits() const {
return SizeInBits;
}
unsigned getAddressSpace() const {
assert(isPointer() && "cannot get address space of non-pointer type");
return ElementsOrAddrSpace;
}
/// Returns the vector's element type. Only valid for vector types.
LLT getElementType() const {
assert(isVector() && "cannot get element type of scalar/aggregate");
return scalar(SizeInBits);
}
/// Get a low-level type with half the size of the original, by halving the
/// size of the scalar type involved. For example `s32` will become `s16`,
/// `<2 x s32>` will become `<2 x s16>`.
LLT halfScalarSize() const {
assert(!isPointer() && getScalarSizeInBits() > 1 &&
getScalarSizeInBits() % 2 == 0 && "cannot half size of this type");
return LLT{Kind, ElementsOrAddrSpace, SizeInBits / 2};
}
/// Get a low-level type with twice the size of the original, by doubling the
/// size of the scalar type involved. For example `s32` will become `s64`,
/// `<2 x s32>` will become `<2 x s64>`.
LLT doubleScalarSize() const {
assert(!isPointer() && "cannot change size of this type");
return LLT{Kind, ElementsOrAddrSpace, SizeInBits * 2};
}
/// Get a low-level type with half the size of the original, by halving the
/// number of vector elements of the scalar type involved. The source must be
/// a vector type with an even number of elements. For example `<4 x s32>`
/// will become `<2 x s32>`, `<2 x s32>` will become `s32`.
LLT halfElements() const {
assert(isVector() && ElementsOrAddrSpace % 2 == 0 &&
"cannot half odd vector");
if (ElementsOrAddrSpace == 2)
return scalar(SizeInBits);
return LLT{Vector, static_cast<uint16_t>(ElementsOrAddrSpace / 2),
SizeInBits};
}
/// Get a low-level type with twice the size of the original, by doubling the
/// number of vector elements of the scalar type involved. The source must be
/// a vector type. For example `<2 x s32>` will become `<4 x s32>`. Doubling
/// the number of elements in sN produces <2 x sN>.
LLT doubleElements() const {
assert(!isPointer() && "cannot double elements in pointer");
return LLT{Vector, static_cast<uint16_t>(ElementsOrAddrSpace * 2),
SizeInBits};
}
void print(raw_ostream &OS) const;
bool operator==(const LLT &RHS) const {
return Kind == RHS.Kind && SizeInBits == RHS.SizeInBits &&
ElementsOrAddrSpace == RHS.ElementsOrAddrSpace;
}
bool operator!=(const LLT &RHS) const { return !(*this == RHS); }
friend struct DenseMapInfo<LLT>;
private:
unsigned SizeInBits;
uint16_t ElementsOrAddrSpace;
TypeKind Kind;
};
inline raw_ostream& operator<<(raw_ostream &OS, const LLT &Ty) {
Ty.print(OS);
return OS;
}
template<> struct DenseMapInfo<LLT> {
static inline LLT getEmptyKey() {
return LLT{LLT::Invalid, 0, -1u};
}
static inline LLT getTombstoneKey() {
return LLT{LLT::Invalid, 0, -2u};
}
static inline unsigned getHashValue(const LLT &Ty) {
uint64_t Val = ((uint64_t)Ty.SizeInBits << 32) |
((uint64_t)Ty.ElementsOrAddrSpace << 16) | (uint64_t)Ty.Kind;
return DenseMapInfo<uint64_t>::getHashValue(Val);
}
static bool isEqual(const LLT &LHS, const LLT &RHS) {
return LHS == RHS;
}
};
/// Construct a low-level type based on an LLVM type.
LLT getLLTForType(Type &Ty, const DataLayout &DL);
}
#endif
#endif // LLVM_CODEGEN_LOWLEVELTYPE_H

View File

@ -0,0 +1,202 @@
//== llvm/Support/LowLevelTypeImpl.h --------------------------- -*- C++ -*-==//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
/// Implement a low-level type suitable for MachineInstr level instruction
/// selection.
///
/// For a type attached to a MachineInstr, we only care about 2 details: total
/// size and the number of vector lanes (if any). Accordingly, there are 4
/// possible valid type-kinds:
///
/// * `sN` for scalars and aggregates
/// * `<N x sM>` for vectors, which must have at least 2 elements.
/// * `pN` for pointers
///
/// Other information required for correct selection is expected to be carried
/// by the opcode, or non-type flags. For example the distinction between G_ADD
/// and G_FADD for int/float or fast-math flags.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_SUPPORT_LOWLEVELTYPEIMPL_H
#define LLVM_SUPPORT_LOWLEVELTYPEIMPL_H
#include <cassert>
#include "llvm/ADT/DenseMapInfo.h"
#include "llvm/CodeGen/MachineValueType.h"
namespace llvm {
class DataLayout;
class Type;
class raw_ostream;
class LLT {
public:
enum TypeKind : uint16_t {
Invalid,
Scalar,
Pointer,
Vector,
};
/// Get a low-level scalar or aggregate "bag of bits".
static LLT scalar(unsigned SizeInBits) {
assert(SizeInBits > 0 && "invalid scalar size");
return LLT{Scalar, 1, SizeInBits};
}
/// Get a low-level pointer in the given address space (defaulting to 0).
static LLT pointer(uint16_t AddressSpace, unsigned SizeInBits) {
return LLT{Pointer, AddressSpace, SizeInBits};
}
/// Get a low-level vector of some number of elements and element width.
/// \p NumElements must be at least 2.
static LLT vector(uint16_t NumElements, unsigned ScalarSizeInBits) {
assert(NumElements > 1 && "invalid number of vector elements");
return LLT{Vector, NumElements, ScalarSizeInBits};
}
/// Get a low-level vector of some number of elements and element type.
static LLT vector(uint16_t NumElements, LLT ScalarTy) {
assert(NumElements > 1 && "invalid number of vector elements");
assert(ScalarTy.isScalar() && "invalid vector element type");
return LLT{Vector, NumElements, ScalarTy.getSizeInBits()};
}
explicit LLT(TypeKind Kind, uint16_t NumElements, unsigned SizeInBits)
: SizeInBits(SizeInBits), ElementsOrAddrSpace(NumElements), Kind(Kind) {
assert((Kind != Vector || ElementsOrAddrSpace > 1) &&
"invalid number of vector elements");
}
explicit LLT() : SizeInBits(0), ElementsOrAddrSpace(0), Kind(Invalid) {}
explicit LLT(MVT VT);
bool isValid() const { return Kind != Invalid; }
bool isScalar() const { return Kind == Scalar; }
bool isPointer() const { return Kind == Pointer; }
bool isVector() const { return Kind == Vector; }
/// Returns the number of elements in a vector LLT. Must only be called on
/// vector types.
uint16_t getNumElements() const {
assert(isVector() && "cannot get number of elements on scalar/aggregate");
return ElementsOrAddrSpace;
}
/// Returns the total size of the type. Must only be called on sized types.
unsigned getSizeInBits() const {
if (isPointer() || isScalar())
return SizeInBits;
return SizeInBits * ElementsOrAddrSpace;
}
unsigned getScalarSizeInBits() const {
return SizeInBits;
}
unsigned getAddressSpace() const {
assert(isPointer() && "cannot get address space of non-pointer type");
return ElementsOrAddrSpace;
}
/// Returns the vector's element type. Only valid for vector types.
LLT getElementType() const {
assert(isVector() && "cannot get element type of scalar/aggregate");
return scalar(SizeInBits);
}
/// Get a low-level type with half the size of the original, by halving the
/// size of the scalar type involved. For example `s32` will become `s16`,
/// `<2 x s32>` will become `<2 x s16>`.
LLT halfScalarSize() const {
assert(!isPointer() && getScalarSizeInBits() > 1 &&
getScalarSizeInBits() % 2 == 0 && "cannot half size of this type");
return LLT{Kind, ElementsOrAddrSpace, SizeInBits / 2};
}
/// Get a low-level type with twice the size of the original, by doubling the
/// size of the scalar type involved. For example `s32` will become `s64`,
/// `<2 x s32>` will become `<2 x s64>`.
LLT doubleScalarSize() const {
assert(!isPointer() && "cannot change size of this type");
return LLT{Kind, ElementsOrAddrSpace, SizeInBits * 2};
}
/// Get a low-level type with half the size of the original, by halving the
/// number of vector elements of the scalar type involved. The source must be
/// a vector type with an even number of elements. For example `<4 x s32>`
/// will become `<2 x s32>`, `<2 x s32>` will become `s32`.
LLT halfElements() const {
assert(isVector() && ElementsOrAddrSpace % 2 == 0 &&
"cannot half odd vector");
if (ElementsOrAddrSpace == 2)
return scalar(SizeInBits);
return LLT{Vector, static_cast<uint16_t>(ElementsOrAddrSpace / 2),
SizeInBits};
}
/// Get a low-level type with twice the size of the original, by doubling the
/// number of vector elements of the scalar type involved. The source must be
/// a vector type. For example `<2 x s32>` will become `<4 x s32>`. Doubling
/// the number of elements in sN produces <2 x sN>.
LLT doubleElements() const {
assert(!isPointer() && "cannot double elements in pointer");
return LLT{Vector, static_cast<uint16_t>(ElementsOrAddrSpace * 2),
SizeInBits};
}
void print(raw_ostream &OS) const;
bool operator==(const LLT &RHS) const {
return Kind == RHS.Kind && SizeInBits == RHS.SizeInBits &&
ElementsOrAddrSpace == RHS.ElementsOrAddrSpace;
}
bool operator!=(const LLT &RHS) const { return !(*this == RHS); }
friend struct DenseMapInfo<LLT>;
private:
unsigned SizeInBits;
uint16_t ElementsOrAddrSpace;
TypeKind Kind;
};
inline raw_ostream& operator<<(raw_ostream &OS, const LLT &Ty) {
Ty.print(OS);
return OS;
}
template<> struct DenseMapInfo<LLT> {
static inline LLT getEmptyKey() {
return LLT{LLT::Invalid, 0, -1u};
}
static inline LLT getTombstoneKey() {
return LLT{LLT::Invalid, 0, -2u};
}
static inline unsigned getHashValue(const LLT &Ty) {
uint64_t Val = ((uint64_t)Ty.SizeInBits << 32) |
((uint64_t)Ty.ElementsOrAddrSpace << 16) | (uint64_t)Ty.Kind;
return DenseMapInfo<uint64_t>::getHashValue(Val);
}
static bool isEqual(const LLT &LHS, const LLT &RHS) {
return LHS == RHS;
}
};
}
#endif // LLVM_SUPPORT_LOWLEVELTYPEIMPL_H

View File

@ -284,12 +284,12 @@ module LLVM_Utils {
header "Support/ConvertUTF.h"
export *
}
}
module LLVM_CodeGen_MachineValueType {
requires cplusplus
header "CodeGen/MachineValueType.h"
export *
module LLVM_CodeGen_MachineValueType {
requires cplusplus
header "CodeGen/MachineValueType.h"
export *
}
}
// This is used for a $src == $build compilation. Otherwise we use

View File

@ -82,7 +82,8 @@ unsigned IRTranslator::getOrCreateVReg(const Value &Val) {
// we need to concat together to produce the value.
assert(Val.getType()->isSized() &&
"Don't know how to create an empty vreg");
unsigned VReg = MRI->createGenericVirtualRegister(LLT{*Val.getType(), *DL});
unsigned VReg =
MRI->createGenericVirtualRegister(getLLTForType(*Val.getType(), *DL));
ValReg = VReg;
if (auto CV = dyn_cast<Constant>(&Val)) {
@ -245,7 +246,7 @@ bool IRTranslator::translateSwitch(const User &U,
const unsigned SwCondValue = getOrCreateVReg(*SwInst.getCondition());
const BasicBlock *OrigBB = SwInst.getParent();
LLT LLTi1 = LLT(*Type::getInt1Ty(U.getContext()), *DL);
LLT LLTi1 = getLLTForType(*Type::getInt1Ty(U.getContext()), *DL);
for (auto &CaseIt : SwInst.cases()) {
const unsigned CaseValueReg = getOrCreateVReg(*CaseIt.getCaseValue());
const unsigned Tst = MRI->createGenericVirtualRegister(LLTi1);
@ -301,7 +302,7 @@ bool IRTranslator::translateLoad(const User &U, MachineIRBuilder &MIRBuilder) {
unsigned Res = getOrCreateVReg(LI);
unsigned Addr = getOrCreateVReg(*LI.getPointerOperand());
LLT VTy{*LI.getType(), *DL}, PTy{*LI.getPointerOperand()->getType(), *DL};
MIRBuilder.buildLoad(
Res, Addr,
*MF->getMachineMemOperand(MachinePointerInfo(LI.getPointerOperand()),
@ -319,8 +320,6 @@ bool IRTranslator::translateStore(const User &U, MachineIRBuilder &MIRBuilder) {
unsigned Val = getOrCreateVReg(*SI.getValueOperand());
unsigned Addr = getOrCreateVReg(*SI.getPointerOperand());
LLT VTy{*SI.getValueOperand()->getType(), *DL},
PTy{*SI.getPointerOperand()->getType(), *DL};
MIRBuilder.buildStore(
Val, Addr,
@ -397,7 +396,8 @@ bool IRTranslator::translateSelect(const User &U,
bool IRTranslator::translateBitCast(const User &U,
MachineIRBuilder &MIRBuilder) {
// If we're bitcasting to the source type, we can reuse the source vreg.
if (LLT{*U.getOperand(0)->getType(), *DL} == LLT{*U.getType(), *DL}) {
if (getLLTForType(*U.getOperand(0)->getType(), *DL) ==
getLLTForType(*U.getType(), *DL)) {
// Get the source vreg now, to avoid invalidating ValToVReg.
unsigned SrcReg = getOrCreateVReg(*U.getOperand(0));
unsigned &Reg = ValToVReg[&U];
@ -428,7 +428,7 @@ bool IRTranslator::translateGetElementPtr(const User &U,
Value &Op0 = *U.getOperand(0);
unsigned BaseReg = getOrCreateVReg(Op0);
LLT PtrTy{*Op0.getType(), *DL};
LLT PtrTy = getLLTForType(*Op0.getType(), *DL);
unsigned PtrSize = DL->getPointerSizeInBits(PtrTy.getAddressSpace());
LLT OffsetTy = LLT::scalar(PtrSize);
@ -494,7 +494,7 @@ bool IRTranslator::translateGetElementPtr(const User &U,
bool IRTranslator::translateMemfunc(const CallInst &CI,
MachineIRBuilder &MIRBuilder,
unsigned ID) {
LLT SizeTy{*CI.getArgOperand(2)->getType(), *DL};
LLT SizeTy = getLLTForType(*CI.getArgOperand(2)->getType(), *DL);
Type *DstTy = CI.getArgOperand(0)->getType();
if (cast<PointerType>(DstTy)->getAddressSpace() != 0 ||
SizeTy.getSizeInBits() != DL->getPointerSizeInBits(0))
@ -551,7 +551,7 @@ void IRTranslator::getStackGuard(unsigned DstReg,
bool IRTranslator::translateOverflowIntrinsic(const CallInst &CI, unsigned Op,
MachineIRBuilder &MIRBuilder) {
LLT Ty{*CI.getOperand(0)->getType(), *DL};
LLT Ty = getLLTForType(*CI.getOperand(0)->getType(), *DL);
LLT s1 = LLT::scalar(1);
unsigned Width = Ty.getSizeInBits();
unsigned Res = MRI->createGenericVirtualRegister(Ty);
@ -694,7 +694,7 @@ bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
getStackGuard(getOrCreateVReg(CI), MIRBuilder);
return true;
case Intrinsic::stackprotector: {
LLT PtrTy{*CI.getArgOperand(0)->getType(), *DL};
LLT PtrTy = getLLTForType(*CI.getArgOperand(0)->getType(), *DL);
unsigned GuardVal = MRI->createGenericVirtualRegister(PtrTy);
getStackGuard(GuardVal, MIRBuilder);
@ -844,7 +844,7 @@ bool IRTranslator::translateLandingPad(const User &U,
SmallVector<LLT, 2> Tys;
for (Type *Ty : cast<StructType>(LP.getType())->elements())
Tys.push_back(LLT{*Ty, *DL});
Tys.push_back(getLLTForType(*Ty, *DL));
assert(Tys.size() == 2 && "Only two-valued landingpads are supported");
// Mark exception register as live in.
@ -907,7 +907,7 @@ bool IRTranslator::translateAlloca(const User &U,
MIRBuilder.buildConstant(TySize, -DL->getTypeAllocSize(Ty));
MIRBuilder.buildMul(AllocSize, NumElts, TySize);
LLT PtrTy = LLT{*AI.getType(), *DL};
LLT PtrTy = getLLTForType(*AI.getType(), *DL);
auto &TLI = *MF->getSubtarget().getTargetLowering();
unsigned SPReg = TLI.getStackPointerRegisterToSaveRestore();

View File

@ -1,4 +1,4 @@
//===-- llvm/CodeGen/GlobalISel/LowLevelType.cpp --------------------------===//
//===-- llvm/CodeGen/LowLevelType.cpp -------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
@ -18,54 +18,21 @@
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
LLT::LLT(Type &Ty, const DataLayout &DL) {
LLT llvm::getLLTForType(Type &Ty, const DataLayout &DL) {
if (auto VTy = dyn_cast<VectorType>(&Ty)) {
SizeInBits = VTy->getElementType()->getPrimitiveSizeInBits();
ElementsOrAddrSpace = VTy->getNumElements();
Kind = ElementsOrAddrSpace == 1 ? Scalar : Vector;
auto NumElements = VTy->getNumElements();
auto ScalarSizeInBits = VTy->getElementType()->getPrimitiveSizeInBits();
if (NumElements == 1)
return LLT::scalar(ScalarSizeInBits);
return LLT::vector(NumElements, ScalarSizeInBits);
} else if (auto PTy = dyn_cast<PointerType>(&Ty)) {
Kind = Pointer;
SizeInBits = DL.getTypeSizeInBits(&Ty);
ElementsOrAddrSpace = PTy->getAddressSpace();
return LLT::pointer(PTy->getAddressSpace(), DL.getTypeSizeInBits(&Ty));
} else if (Ty.isSized()) {
// Aggregates are no different from real scalars as far as GlobalISel is
// concerned.
Kind = Scalar;
SizeInBits = DL.getTypeSizeInBits(&Ty);
ElementsOrAddrSpace = 1;
auto SizeInBits = DL.getTypeSizeInBits(&Ty);
assert(SizeInBits != 0 && "invalid zero-sized type");
} else {
Kind = Invalid;
SizeInBits = ElementsOrAddrSpace = 0;
return LLT::scalar(SizeInBits);
}
}
LLT::LLT(MVT VT) {
if (VT.isVector()) {
SizeInBits = VT.getVectorElementType().getSizeInBits();
ElementsOrAddrSpace = VT.getVectorNumElements();
Kind = ElementsOrAddrSpace == 1 ? Scalar : Vector;
} else if (VT.isValid()) {
// Aggregates are no different from real scalars as far as GlobalISel is
// concerned.
Kind = Scalar;
SizeInBits = VT.getSizeInBits();
ElementsOrAddrSpace = 1;
assert(SizeInBits != 0 && "invalid zero-sized type");
} else {
Kind = Invalid;
SizeInBits = ElementsOrAddrSpace = 0;
}
}
void LLT::print(raw_ostream &OS) const {
if (isVector())
OS << "<" << ElementsOrAddrSpace << " x s" << SizeInBits << ">";
else if (isPointer())
OS << "p" << getAddressSpace();
else if (isValid()) {
assert(isScalar() && "unexpected type");
OS << "s" << getScalarSizeInBits();
} else
llvm_unreachable("trying to print an invalid type");
return LLT();
}

View File

@ -71,6 +71,7 @@ add_llvm_library(LLVMSupport
LineIterator.cpp
Locale.cpp
LockFileManager.cpp
LowLevelType.cpp
ManagedStatic.cpp
MathExtras.cpp
MemoryBuffer.cpp

View File

@ -0,0 +1,47 @@
//===-- llvm/Support/LowLevelType.cpp -------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
/// \file This file implements the more header-heavy bits of the LLT class to
/// avoid polluting users' namespaces.
//
//===----------------------------------------------------------------------===//
#include "llvm/Support/LowLevelTypeImpl.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
LLT::LLT(MVT VT) {
if (VT.isVector()) {
SizeInBits = VT.getVectorElementType().getSizeInBits();
ElementsOrAddrSpace = VT.getVectorNumElements();
Kind = ElementsOrAddrSpace == 1 ? Scalar : Vector;
} else if (VT.isValid()) {
// Aggregates are no different from real scalars as far as GlobalISel is
// concerned.
Kind = Scalar;
SizeInBits = VT.getSizeInBits();
ElementsOrAddrSpace = 1;
assert(SizeInBits != 0 && "invalid zero-sized type");
} else {
Kind = Invalid;
SizeInBits = ElementsOrAddrSpace = 0;
}
}
void LLT::print(raw_ostream &OS) const {
if (isVector())
OS << "<" << ElementsOrAddrSpace << " x s" << SizeInBits << ">";
else if (isPointer())
OS << "p" << getAddressSpace();
else if (isValid()) {
assert(isScalar() && "unexpected type");
OS << "s" << getScalarSizeInBits();
} else
llvm_unreachable("trying to print an invalid type");
}

View File

@ -196,8 +196,8 @@ void AArch64CallLowering::splitToValueTypes(
// FIXME: set split flags if they're actually used (e.g. i128 on AAPCS).
Type *SplitTy = SplitVT.getTypeForEVT(Ctx);
SplitArgs.push_back(
ArgInfo{MRI.createGenericVirtualRegister(LLT{*SplitTy, DL}), SplitTy,
OrigArg.Flags, OrigArg.IsFixed});
ArgInfo{MRI.createGenericVirtualRegister(getLLTForType(*SplitTy, DL)),
SplitTy, OrigArg.Flags, OrigArg.IsFixed});
}
for (unsigned i = 0; i < Offsets.size(); ++i)

View File

@ -50,7 +50,7 @@ unsigned AMDGPUCallLowering::lowerParameterPtr(MachineIRBuilder &MIRBuilder,
const Function &F = *MF.getFunction();
const DataLayout &DL = F.getParent()->getDataLayout();
PointerType *PtrTy = PointerType::get(ParamTy, AMDGPUAS::CONSTANT_ADDRESS);
LLT PtrType(*PtrTy, DL);
LLT PtrType = getLLTForType(*PtrTy, DL);
unsigned DstReg = MRI.createGenericVirtualRegister(PtrType);
unsigned KernArgSegmentPtr =
TRI->getPreloadedValue(MF, SIRegisterInfo::KERNARG_SEGMENT_PTR);

View File

@ -58,8 +58,9 @@ void X86CallLowering::splitToValueTypes(const ArgInfo &OrigArg,
Type *PartTy = PartVT.getTypeForEVT(Context);
for (unsigned i = 0; i < NumParts; ++i) {
ArgInfo Info = ArgInfo{MRI.createGenericVirtualRegister(LLT{*PartTy, DL}),
PartTy, OrigArg.Flags};
ArgInfo Info =
ArgInfo{MRI.createGenericVirtualRegister(getLLTForType(*PartTy, DL)),
PartTy, OrigArg.Flags};
SplitArgs.push_back(Info);
PerformArgSplit(Info.Reg, PartVT.getSizeInBits() * i);
}

View File

@ -68,7 +68,7 @@ TEST(LowLevelTypeTest, Scalar) {
// Test Type->LLT conversion.
Type *IRTy = IntegerType::get(C, S);
EXPECT_EQ(Ty, LLT(*IRTy, DL));
EXPECT_EQ(Ty, getLLTForType(*IRTy, DL));
}
}
@ -160,7 +160,7 @@ TEST(LowLevelTypeTest, Vector) {
// Test Type->LLT conversion.
Type *IRSTy = IntegerType::get(C, S);
Type *IRTy = VectorType::get(IRSTy, Elts);
EXPECT_EQ(VTy, LLT(*IRTy, DL));
EXPECT_EQ(VTy, getLLTForType(*IRTy, DL));
}
}
}
@ -188,7 +188,7 @@ TEST(LowLevelTypeTest, Pointer) {
// Test Type->LLT conversion.
Type *IRTy = PointerType::get(IntegerType::get(C, 8), AS);
EXPECT_EQ(Ty, LLT(*IRTy, DL));
EXPECT_EQ(Ty, getLLTForType(*IRTy, DL));
}
}

View File

@ -36,6 +36,7 @@
#include "llvm/CodeGen/MachineValueType.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/LowLevelTypeImpl.h"
#include "llvm/Support/ScopedPrinter.h"
#include "llvm/TableGen/Error.h"
#include "llvm/TableGen/Record.h"
@ -58,22 +59,38 @@ static cl::opt<bool> WarnOnSkippedPatterns(
//===- Helper functions ---------------------------------------------------===//
/// This class stands in for LLT wherever we want to tablegen-erate an
/// equivalent at compiler run-time.
class LLTCodeGen {
private:
LLT Ty;
public:
LLTCodeGen(const LLT &Ty) : Ty(Ty) {}
void emitCxxConstructorCall(raw_ostream &OS) const {
if (Ty.isScalar()) {
OS << "LLT::scalar(" << Ty.getSizeInBits() << ")";
return;
}
if (Ty.isVector()) {
OS << "LLT::vector(" << Ty.getNumElements() << ", " << Ty.getSizeInBits()
<< ")";
return;
}
llvm_unreachable("Unhandled LLT");
}
};
/// Convert an MVT to an equivalent LLT if possible, or the invalid LLT() for
/// MVTs that don't map cleanly to an LLT (e.g., iPTR, *any, ...).
static Optional<std::string> MVTToLLT(MVT::SimpleValueType SVT) {
std::string TyStr;
raw_string_ostream OS(TyStr);
static Optional<LLTCodeGen> MVTToLLT(MVT::SimpleValueType SVT) {
MVT VT(SVT);
if (VT.isVector() && VT.getVectorNumElements() != 1) {
OS << "LLT::vector(" << VT.getVectorNumElements() << ", "
<< VT.getScalarSizeInBits() << ")";
} else if (VT.isInteger() || VT.isFloatingPoint()) {
OS << "LLT::scalar(" << VT.getSizeInBits() << ")";
} else {
return None;
}
OS.flush();
return TyStr;
if (VT.isVector() && VT.getVectorNumElements() != 1)
return LLTCodeGen(LLT::vector(VT.getVectorNumElements(), VT.getScalarSizeInBits()));
if (VT.isInteger() || VT.isFloatingPoint())
return LLTCodeGen(LLT::scalar(VT.getSizeInBits()));
return None;
}
static bool isTrivialOperatorNode(const TreePatternNode *N) {
@ -167,10 +184,10 @@ public:
/// Generates code to check that an operand is a particular LLT.
class LLTOperandMatcher : public OperandPredicateMatcher {
protected:
std::string Ty;
LLTCodeGen Ty;
public:
LLTOperandMatcher(std::string Ty)
LLTOperandMatcher(const LLTCodeGen &Ty)
: OperandPredicateMatcher(OPM_LLT), Ty(Ty) {}
static bool classof(const OperandPredicateMatcher *P) {
@ -179,7 +196,9 @@ public:
void emitCxxPredicateExpr(raw_ostream &OS,
StringRef OperandExpr) const override {
OS << "MRI.getType(" << OperandExpr << ".getReg()) == (" << Ty << ")";
OS << "MRI.getType(" << OperandExpr << ".getReg()) == (";
Ty.emitCxxConstructorCall(OS);
OS << ")";
}
};