Revert r296474 - [globalisel] Change LLT constructor string into an LLT subclass that knows how to generate it.

There's a circular dependency that's only revealed when LLVM_ENABLE_MODULES=1.



git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@296478 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Daniel Sanders 2017-02-28 15:00:27 +00:00
parent bfdb3f2a5a
commit 1e598cbf73
10 changed files with 246 additions and 290 deletions

View File

@ -1,4 +1,4 @@
//== llvm/CodeGen/LowLevelType.h ------------------------------- -*- C++ -*-==//
//== llvm/CodeGen/GlobalISel/LowLevelType.h -------------------- -*- C++ -*-==//
//
// The LLVM Compiler Infrastructure
//
@ -10,23 +10,197 @@
/// Implement a low-level type suitable for MachineInstr level instruction
/// selection.
///
/// This provides the CodeGen aspects of LowLevelType, such as Type conversion.
/// For a type attached to a MachineInstr, we only care about 2 details: total
/// size and the number of vector lanes (if any). Accordingly, there are 4
/// possible valid type-kinds:
///
/// * `sN` for scalars and aggregates
/// * `<N x sM>` for vectors, which must have at least 2 elements.
/// * `pN` for pointers
///
/// Other information required for correct selection is expected to be carried
/// by the opcode, or non-type flags. For example the distinction between G_ADD
/// and G_FADD for int/float or fast-math flags.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CODEGEN_LOWLEVELTYPE_H
#define LLVM_CODEGEN_LOWLEVELTYPE_H
#ifndef LLVM_CODEGEN_GLOBALISEL_LOWLEVELTYPE_H
#define LLVM_CODEGEN_GLOBALISEL_LOWLEVELTYPE_H
#include "llvm/Support/LowLevelTypeImpl.h"
#include <cassert>
#include "llvm/ADT/DenseMapInfo.h"
#include "llvm/CodeGen/ValueTypes.h"
namespace llvm {
class DataLayout;
class LLVMContext;
class Type;
class raw_ostream;
/// Construct a low-level type based on an LLVM type.
LLT getLLTForType(Type &Ty, const DataLayout &DL);
class LLT {
public:
enum TypeKind : uint16_t {
Invalid,
Scalar,
Pointer,
Vector,
};
/// Get a low-level scalar or aggregate "bag of bits".
static LLT scalar(unsigned SizeInBits) {
assert(SizeInBits > 0 && "invalid scalar size");
return LLT{Scalar, 1, SizeInBits};
}
/// Get a low-level pointer in the given address space (defaulting to 0).
static LLT pointer(uint16_t AddressSpace, unsigned SizeInBits) {
return LLT{Pointer, AddressSpace, SizeInBits};
}
/// Get a low-level vector of some number of elements and element width.
/// \p NumElements must be at least 2.
static LLT vector(uint16_t NumElements, unsigned ScalarSizeInBits) {
assert(NumElements > 1 && "invalid number of vector elements");
return LLT{Vector, NumElements, ScalarSizeInBits};
}
/// Get a low-level vector of some number of elements and element type.
static LLT vector(uint16_t NumElements, LLT ScalarTy) {
assert(NumElements > 1 && "invalid number of vector elements");
assert(ScalarTy.isScalar() && "invalid vector element type");
return LLT{Vector, NumElements, ScalarTy.getSizeInBits()};
}
explicit LLT(TypeKind Kind, uint16_t NumElements, unsigned SizeInBits)
: SizeInBits(SizeInBits), ElementsOrAddrSpace(NumElements), Kind(Kind) {
assert((Kind != Vector || ElementsOrAddrSpace > 1) &&
"invalid number of vector elements");
}
explicit LLT() : SizeInBits(0), ElementsOrAddrSpace(0), Kind(Invalid) {}
/// Construct a low-level type based on an LLVM type.
explicit LLT(Type &Ty, const DataLayout &DL);
explicit LLT(MVT VT);
bool isValid() const { return Kind != Invalid; }
bool isScalar() const { return Kind == Scalar; }
bool isPointer() const { return Kind == Pointer; }
bool isVector() const { return Kind == Vector; }
/// Returns the number of elements in a vector LLT. Must only be called on
/// vector types.
uint16_t getNumElements() const {
assert(isVector() && "cannot get number of elements on scalar/aggregate");
return ElementsOrAddrSpace;
}
/// Returns the total size of the type. Must only be called on sized types.
unsigned getSizeInBits() const {
if (isPointer() || isScalar())
return SizeInBits;
return SizeInBits * ElementsOrAddrSpace;
}
unsigned getScalarSizeInBits() const {
return SizeInBits;
}
unsigned getAddressSpace() const {
assert(isPointer() && "cannot get address space of non-pointer type");
return ElementsOrAddrSpace;
}
/// Returns the vector's element type. Only valid for vector types.
LLT getElementType() const {
assert(isVector() && "cannot get element type of scalar/aggregate");
return scalar(SizeInBits);
}
/// Get a low-level type with half the size of the original, by halving the
/// size of the scalar type involved. For example `s32` will become `s16`,
/// `<2 x s32>` will become `<2 x s16>`.
LLT halfScalarSize() const {
assert(!isPointer() && getScalarSizeInBits() > 1 &&
getScalarSizeInBits() % 2 == 0 && "cannot half size of this type");
return LLT{Kind, ElementsOrAddrSpace, SizeInBits / 2};
}
/// Get a low-level type with twice the size of the original, by doubling the
/// size of the scalar type involved. For example `s32` will become `s64`,
/// `<2 x s32>` will become `<2 x s64>`.
LLT doubleScalarSize() const {
assert(!isPointer() && "cannot change size of this type");
return LLT{Kind, ElementsOrAddrSpace, SizeInBits * 2};
}
/// Get a low-level type with half the size of the original, by halving the
/// number of vector elements of the scalar type involved. The source must be
/// a vector type with an even number of elements. For example `<4 x s32>`
/// will become `<2 x s32>`, `<2 x s32>` will become `s32`.
LLT halfElements() const {
assert(isVector() && ElementsOrAddrSpace % 2 == 0 &&
"cannot half odd vector");
if (ElementsOrAddrSpace == 2)
return scalar(SizeInBits);
return LLT{Vector, static_cast<uint16_t>(ElementsOrAddrSpace / 2),
SizeInBits};
}
/// Get a low-level type with twice the size of the original, by doubling the
/// number of vector elements of the scalar type involved. The source must be
/// a vector type. For example `<2 x s32>` will become `<4 x s32>`. Doubling
/// the number of elements in sN produces <2 x sN>.
LLT doubleElements() const {
assert(!isPointer() && "cannot double elements in pointer");
return LLT{Vector, static_cast<uint16_t>(ElementsOrAddrSpace * 2),
SizeInBits};
}
void print(raw_ostream &OS) const;
bool operator==(const LLT &RHS) const {
return Kind == RHS.Kind && SizeInBits == RHS.SizeInBits &&
ElementsOrAddrSpace == RHS.ElementsOrAddrSpace;
}
bool operator!=(const LLT &RHS) const { return !(*this == RHS); }
friend struct DenseMapInfo<LLT>;
private:
unsigned SizeInBits;
uint16_t ElementsOrAddrSpace;
TypeKind Kind;
};
inline raw_ostream& operator<<(raw_ostream &OS, const LLT &Ty) {
Ty.print(OS);
return OS;
}
template<> struct DenseMapInfo<LLT> {
static inline LLT getEmptyKey() {
return LLT{LLT::Invalid, 0, -1u};
}
static inline LLT getTombstoneKey() {
return LLT{LLT::Invalid, 0, -2u};
}
static inline unsigned getHashValue(const LLT &Ty) {
uint64_t Val = ((uint64_t)Ty.SizeInBits << 32) |
((uint64_t)Ty.ElementsOrAddrSpace << 16) | (uint64_t)Ty.Kind;
return DenseMapInfo<uint64_t>::getHashValue(Val);
}
static bool isEqual(const LLT &LHS, const LLT &RHS) {
return LHS == RHS;
}
};
}
#endif // LLVM_CODEGEN_LOWLEVELTYPE_H
#endif

View File

@ -1,202 +0,0 @@
//== llvm/Support/LowLevelTypeImpl.h --------------------------- -*- C++ -*-==//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
/// Implement a low-level type suitable for MachineInstr level instruction
/// selection.
///
/// For a type attached to a MachineInstr, we only care about 2 details: total
/// size and the number of vector lanes (if any). Accordingly, there are 4
/// possible valid type-kinds:
///
/// * `sN` for scalars and aggregates
/// * `<N x sM>` for vectors, which must have at least 2 elements.
/// * `pN` for pointers
///
/// Other information required for correct selection is expected to be carried
/// by the opcode, or non-type flags. For example the distinction between G_ADD
/// and G_FADD for int/float or fast-math flags.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_SUPPORT_LOWLEVELTYPEIMPL_H
#define LLVM_SUPPORT_LOWLEVELTYPEIMPL_H
#include <cassert>
#include "llvm/ADT/DenseMapInfo.h"
#include "llvm/CodeGen/MachineValueType.h"
namespace llvm {
class DataLayout;
class Type;
class raw_ostream;
class LLT {
public:
enum TypeKind : uint16_t {
Invalid,
Scalar,
Pointer,
Vector,
};
/// Get a low-level scalar or aggregate "bag of bits".
static LLT scalar(unsigned SizeInBits) {
assert(SizeInBits > 0 && "invalid scalar size");
return LLT{Scalar, 1, SizeInBits};
}
/// Get a low-level pointer in the given address space (defaulting to 0).
static LLT pointer(uint16_t AddressSpace, unsigned SizeInBits) {
return LLT{Pointer, AddressSpace, SizeInBits};
}
/// Get a low-level vector of some number of elements and element width.
/// \p NumElements must be at least 2.
static LLT vector(uint16_t NumElements, unsigned ScalarSizeInBits) {
assert(NumElements > 1 && "invalid number of vector elements");
return LLT{Vector, NumElements, ScalarSizeInBits};
}
/// Get a low-level vector of some number of elements and element type.
static LLT vector(uint16_t NumElements, LLT ScalarTy) {
assert(NumElements > 1 && "invalid number of vector elements");
assert(ScalarTy.isScalar() && "invalid vector element type");
return LLT{Vector, NumElements, ScalarTy.getSizeInBits()};
}
explicit LLT(TypeKind Kind, uint16_t NumElements, unsigned SizeInBits)
: SizeInBits(SizeInBits), ElementsOrAddrSpace(NumElements), Kind(Kind) {
assert((Kind != Vector || ElementsOrAddrSpace > 1) &&
"invalid number of vector elements");
}
explicit LLT() : SizeInBits(0), ElementsOrAddrSpace(0), Kind(Invalid) {}
explicit LLT(MVT VT);
bool isValid() const { return Kind != Invalid; }
bool isScalar() const { return Kind == Scalar; }
bool isPointer() const { return Kind == Pointer; }
bool isVector() const { return Kind == Vector; }
/// Returns the number of elements in a vector LLT. Must only be called on
/// vector types.
uint16_t getNumElements() const {
assert(isVector() && "cannot get number of elements on scalar/aggregate");
return ElementsOrAddrSpace;
}
/// Returns the total size of the type. Must only be called on sized types.
unsigned getSizeInBits() const {
if (isPointer() || isScalar())
return SizeInBits;
return SizeInBits * ElementsOrAddrSpace;
}
unsigned getScalarSizeInBits() const {
return SizeInBits;
}
unsigned getAddressSpace() const {
assert(isPointer() && "cannot get address space of non-pointer type");
return ElementsOrAddrSpace;
}
/// Returns the vector's element type. Only valid for vector types.
LLT getElementType() const {
assert(isVector() && "cannot get element type of scalar/aggregate");
return scalar(SizeInBits);
}
/// Get a low-level type with half the size of the original, by halving the
/// size of the scalar type involved. For example `s32` will become `s16`,
/// `<2 x s32>` will become `<2 x s16>`.
LLT halfScalarSize() const {
assert(!isPointer() && getScalarSizeInBits() > 1 &&
getScalarSizeInBits() % 2 == 0 && "cannot half size of this type");
return LLT{Kind, ElementsOrAddrSpace, SizeInBits / 2};
}
/// Get a low-level type with twice the size of the original, by doubling the
/// size of the scalar type involved. For example `s32` will become `s64`,
/// `<2 x s32>` will become `<2 x s64>`.
LLT doubleScalarSize() const {
assert(!isPointer() && "cannot change size of this type");
return LLT{Kind, ElementsOrAddrSpace, SizeInBits * 2};
}
/// Get a low-level type with half the size of the original, by halving the
/// number of vector elements of the scalar type involved. The source must be
/// a vector type with an even number of elements. For example `<4 x s32>`
/// will become `<2 x s32>`, `<2 x s32>` will become `s32`.
LLT halfElements() const {
assert(isVector() && ElementsOrAddrSpace % 2 == 0 &&
"cannot half odd vector");
if (ElementsOrAddrSpace == 2)
return scalar(SizeInBits);
return LLT{Vector, static_cast<uint16_t>(ElementsOrAddrSpace / 2),
SizeInBits};
}
/// Get a low-level type with twice the size of the original, by doubling the
/// number of vector elements of the scalar type involved. The source must be
/// a vector type. For example `<2 x s32>` will become `<4 x s32>`. Doubling
/// the number of elements in sN produces <2 x sN>.
LLT doubleElements() const {
assert(!isPointer() && "cannot double elements in pointer");
return LLT{Vector, static_cast<uint16_t>(ElementsOrAddrSpace * 2),
SizeInBits};
}
void print(raw_ostream &OS) const;
bool operator==(const LLT &RHS) const {
return Kind == RHS.Kind && SizeInBits == RHS.SizeInBits &&
ElementsOrAddrSpace == RHS.ElementsOrAddrSpace;
}
bool operator!=(const LLT &RHS) const { return !(*this == RHS); }
friend struct DenseMapInfo<LLT>;
private:
unsigned SizeInBits;
uint16_t ElementsOrAddrSpace;
TypeKind Kind;
};
inline raw_ostream& operator<<(raw_ostream &OS, const LLT &Ty) {
Ty.print(OS);
return OS;
}
template<> struct DenseMapInfo<LLT> {
static inline LLT getEmptyKey() {
return LLT{LLT::Invalid, 0, -1u};
}
static inline LLT getTombstoneKey() {
return LLT{LLT::Invalid, 0, -2u};
}
static inline unsigned getHashValue(const LLT &Ty) {
uint64_t Val = ((uint64_t)Ty.SizeInBits << 32) |
((uint64_t)Ty.ElementsOrAddrSpace << 16) | (uint64_t)Ty.Kind;
return DenseMapInfo<uint64_t>::getHashValue(Val);
}
static bool isEqual(const LLT &LHS, const LLT &RHS) {
return LHS == RHS;
}
};
}
#endif // LLVM_SUPPORT_LOWLEVELTYPEIMPL_H

View File

@ -82,8 +82,7 @@ unsigned IRTranslator::getOrCreateVReg(const Value &Val) {
// we need to concat together to produce the value.
assert(Val.getType()->isSized() &&
"Don't know how to create an empty vreg");
unsigned VReg =
MRI->createGenericVirtualRegister(getLLTForType(*Val.getType(), *DL));
unsigned VReg = MRI->createGenericVirtualRegister(LLT{*Val.getType(), *DL});
ValReg = VReg;
if (auto CV = dyn_cast<Constant>(&Val)) {
@ -234,7 +233,7 @@ bool IRTranslator::translateSwitch(const User &U,
const unsigned SwCondValue = getOrCreateVReg(*SwInst.getCondition());
const BasicBlock *OrigBB = SwInst.getParent();
LLT LLTi1 = getLLTForType(*Type::getInt1Ty(U.getContext()), *DL);
LLT LLTi1 = LLT(*Type::getInt1Ty(U.getContext()), *DL);
for (auto &CaseIt : SwInst.cases()) {
const unsigned CaseValueReg = getOrCreateVReg(*CaseIt.getCaseValue());
const unsigned Tst = MRI->createGenericVirtualRegister(LLTi1);
@ -290,7 +289,7 @@ bool IRTranslator::translateLoad(const User &U, MachineIRBuilder &MIRBuilder) {
unsigned Res = getOrCreateVReg(LI);
unsigned Addr = getOrCreateVReg(*LI.getPointerOperand());
LLT VTy{*LI.getType(), *DL}, PTy{*LI.getPointerOperand()->getType(), *DL};
MIRBuilder.buildLoad(
Res, Addr,
*MF->getMachineMemOperand(MachinePointerInfo(LI.getPointerOperand()),
@ -308,6 +307,8 @@ bool IRTranslator::translateStore(const User &U, MachineIRBuilder &MIRBuilder) {
unsigned Val = getOrCreateVReg(*SI.getValueOperand());
unsigned Addr = getOrCreateVReg(*SI.getPointerOperand());
LLT VTy{*SI.getValueOperand()->getType(), *DL},
PTy{*SI.getPointerOperand()->getType(), *DL};
MIRBuilder.buildStore(
Val, Addr,
@ -383,8 +384,7 @@ bool IRTranslator::translateSelect(const User &U,
bool IRTranslator::translateBitCast(const User &U,
MachineIRBuilder &MIRBuilder) {
if (getLLTForType(*U.getOperand(0)->getType(), *DL) ==
getLLTForType(*U.getType(), *DL)) {
if (LLT{*U.getOperand(0)->getType(), *DL} == LLT{*U.getType(), *DL}) {
unsigned &Reg = ValToVReg[&U];
if (Reg)
MIRBuilder.buildCopy(Reg, getOrCreateVReg(*U.getOperand(0)));
@ -411,7 +411,7 @@ bool IRTranslator::translateGetElementPtr(const User &U,
Value &Op0 = *U.getOperand(0);
unsigned BaseReg = getOrCreateVReg(Op0);
LLT PtrTy = getLLTForType(*Op0.getType(), *DL);
LLT PtrTy{*Op0.getType(), *DL};
unsigned PtrSize = DL->getPointerSizeInBits(PtrTy.getAddressSpace());
LLT OffsetTy = LLT::scalar(PtrSize);
@ -477,7 +477,7 @@ bool IRTranslator::translateGetElementPtr(const User &U,
bool IRTranslator::translateMemfunc(const CallInst &CI,
MachineIRBuilder &MIRBuilder,
unsigned ID) {
LLT SizeTy = getLLTForType(*CI.getArgOperand(2)->getType(), *DL);
LLT SizeTy{*CI.getArgOperand(2)->getType(), *DL};
Type *DstTy = CI.getArgOperand(0)->getType();
if (cast<PointerType>(DstTy)->getAddressSpace() != 0 ||
SizeTy.getSizeInBits() != DL->getPointerSizeInBits(0))
@ -534,7 +534,7 @@ void IRTranslator::getStackGuard(unsigned DstReg,
bool IRTranslator::translateOverflowIntrinsic(const CallInst &CI, unsigned Op,
MachineIRBuilder &MIRBuilder) {
LLT Ty = getLLTForType(*CI.getOperand(0)->getType(), *DL);
LLT Ty{*CI.getOperand(0)->getType(), *DL};
LLT s1 = LLT::scalar(1);
unsigned Width = Ty.getSizeInBits();
unsigned Res = MRI->createGenericVirtualRegister(Ty);
@ -677,7 +677,7 @@ bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
getStackGuard(getOrCreateVReg(CI), MIRBuilder);
return true;
case Intrinsic::stackprotector: {
LLT PtrTy = getLLTForType(*CI.getArgOperand(0)->getType(), *DL);
LLT PtrTy{*CI.getArgOperand(0)->getType(), *DL};
unsigned GuardVal = MRI->createGenericVirtualRegister(PtrTy);
getStackGuard(GuardVal, MIRBuilder);
@ -820,7 +820,7 @@ bool IRTranslator::translateLandingPad(const User &U,
SmallVector<LLT, 2> Tys;
for (Type *Ty : cast<StructType>(LP.getType())->elements())
Tys.push_back(getLLTForType(*Ty, *DL));
Tys.push_back(LLT{*Ty, *DL});
assert(Tys.size() == 2 && "Only two-valued landingpads are supported");
// Mark exception register as live in.
@ -885,7 +885,7 @@ bool IRTranslator::translateAlloca(const User &U,
MIRBuilder.buildConstant(TySize, -DL->getTypeAllocSize(Ty));
MIRBuilder.buildMul(AllocSize, NumElts, TySize);
LLT PtrTy = getLLTForType(*AI.getType(), *DL);
LLT PtrTy = LLT{*AI.getType(), *DL};
auto &TLI = *MF->getSubtarget().getTargetLowering();
unsigned SPReg = TLI.getStackPointerRegisterToSaveRestore();

View File

@ -1,4 +1,4 @@
//===-- llvm/CodeGen/LowLevelType.cpp -------------------------------------===//
//===-- llvm/CodeGen/GlobalISel/LowLevelType.cpp --------------------------===//
//
// The LLVM Compiler Infrastructure
//
@ -18,21 +18,54 @@
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
LLT llvm::getLLTForType(Type &Ty, const DataLayout &DL) {
LLT::LLT(Type &Ty, const DataLayout &DL) {
if (auto VTy = dyn_cast<VectorType>(&Ty)) {
auto NumElements = VTy->getNumElements();
auto ScalarSizeInBits = VTy->getElementType()->getPrimitiveSizeInBits();
if (NumElements == 1)
return LLT::scalar(ScalarSizeInBits);
return LLT::vector(NumElements, ScalarSizeInBits);
SizeInBits = VTy->getElementType()->getPrimitiveSizeInBits();
ElementsOrAddrSpace = VTy->getNumElements();
Kind = ElementsOrAddrSpace == 1 ? Scalar : Vector;
} else if (auto PTy = dyn_cast<PointerType>(&Ty)) {
return LLT::pointer(PTy->getAddressSpace(), DL.getTypeSizeInBits(&Ty));
Kind = Pointer;
SizeInBits = DL.getTypeSizeInBits(&Ty);
ElementsOrAddrSpace = PTy->getAddressSpace();
} else if (Ty.isSized()) {
// Aggregates are no different from real scalars as far as GlobalISel is
// concerned.
auto SizeInBits = DL.getTypeSizeInBits(&Ty);
Kind = Scalar;
SizeInBits = DL.getTypeSizeInBits(&Ty);
ElementsOrAddrSpace = 1;
assert(SizeInBits != 0 && "invalid zero-sized type");
return LLT::scalar(SizeInBits);
} else {
Kind = Invalid;
SizeInBits = ElementsOrAddrSpace = 0;
}
return LLT();
}
LLT::LLT(MVT VT) {
if (VT.isVector()) {
SizeInBits = VT.getVectorElementType().getSizeInBits();
ElementsOrAddrSpace = VT.getVectorNumElements();
Kind = ElementsOrAddrSpace == 1 ? Scalar : Vector;
} else if (VT.isValid()) {
// Aggregates are no different from real scalars as far as GlobalISel is
// concerned.
Kind = Scalar;
SizeInBits = VT.getSizeInBits();
ElementsOrAddrSpace = 1;
assert(SizeInBits != 0 && "invalid zero-sized type");
} else {
Kind = Invalid;
SizeInBits = ElementsOrAddrSpace = 0;
}
}
void LLT::print(raw_ostream &OS) const {
if (isVector())
OS << "<" << ElementsOrAddrSpace << " x s" << SizeInBits << ">";
else if (isPointer())
OS << "p" << getAddressSpace();
else if (isValid()) {
assert(isScalar() && "unexpected type");
OS << "s" << getScalarSizeInBits();
} else
llvm_unreachable("trying to print an invalid type");
}

View File

@ -68,7 +68,6 @@ add_llvm_library(LLVMSupport
LineIterator.cpp
Locale.cpp
LockFileManager.cpp
LowLevelType.cpp
ManagedStatic.cpp
MathExtras.cpp
MemoryBuffer.cpp

View File

@ -1,47 +0,0 @@
//===-- llvm/Support/LowLevelType.cpp -------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
/// \file This file implements the more header-heavy bits of the LLT class to
/// avoid polluting users' namespaces.
//
//===----------------------------------------------------------------------===//
#include "llvm/Support/LowLevelTypeImpl.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
LLT::LLT(MVT VT) {
if (VT.isVector()) {
SizeInBits = VT.getVectorElementType().getSizeInBits();
ElementsOrAddrSpace = VT.getVectorNumElements();
Kind = ElementsOrAddrSpace == 1 ? Scalar : Vector;
} else if (VT.isValid()) {
// Aggregates are no different from real scalars as far as GlobalISel is
// concerned.
Kind = Scalar;
SizeInBits = VT.getSizeInBits();
ElementsOrAddrSpace = 1;
assert(SizeInBits != 0 && "invalid zero-sized type");
} else {
Kind = Invalid;
SizeInBits = ElementsOrAddrSpace = 0;
}
}
void LLT::print(raw_ostream &OS) const {
if (isVector())
OS << "<" << ElementsOrAddrSpace << " x s" << SizeInBits << ">";
else if (isPointer())
OS << "p" << getAddressSpace();
else if (isValid()) {
assert(isScalar() && "unexpected type");
OS << "s" << getScalarSizeInBits();
} else
llvm_unreachable("trying to print an invalid type");
}

View File

@ -192,8 +192,8 @@ void AArch64CallLowering::splitToValueTypes(
// FIXME: set split flags if they're actually used (e.g. i128 on AAPCS).
Type *SplitTy = SplitVT.getTypeForEVT(Ctx);
SplitArgs.push_back(
ArgInfo{MRI.createGenericVirtualRegister(getLLTForType(*SplitTy, DL)),
SplitTy, OrigArg.Flags, OrigArg.IsFixed});
ArgInfo{MRI.createGenericVirtualRegister(LLT{*SplitTy, DL}), SplitTy,
OrigArg.Flags, OrigArg.IsFixed});
}
SmallVector<uint64_t, 4> BitOffsets;

View File

@ -50,7 +50,7 @@ unsigned AMDGPUCallLowering::lowerParameterPtr(MachineIRBuilder &MIRBuilder,
const Function &F = *MF.getFunction();
const DataLayout &DL = F.getParent()->getDataLayout();
PointerType *PtrTy = PointerType::get(ParamTy, AMDGPUAS::CONSTANT_ADDRESS);
LLT PtrType = getLLTForType(*PtrTy, DL);
LLT PtrType(*PtrTy, DL);
unsigned DstReg = MRI.createGenericVirtualRegister(PtrType);
unsigned KernArgSegmentPtr =
TRI->getPreloadedValue(MF, SIRegisterInfo::KERNARG_SEGMENT_PTR);

View File

@ -58,9 +58,8 @@ void X86CallLowering::splitToValueTypes(const ArgInfo &OrigArg,
Type *PartTy = PartVT.getTypeForEVT(Context);
for (unsigned i = 0; i < NumParts; ++i) {
ArgInfo Info =
ArgInfo{MRI.createGenericVirtualRegister(getLLTForType(*PartTy, DL)),
PartTy, OrigArg.Flags};
ArgInfo Info = ArgInfo{MRI.createGenericVirtualRegister(LLT{*PartTy, DL}),
PartTy, OrigArg.Flags};
SplitArgs.push_back(Info);
BitOffsets.push_back(PartVT.getSizeInBits() * i);
SplitRegs.push_back(Info.Reg);

View File

@ -68,7 +68,7 @@ TEST(LowLevelTypeTest, Scalar) {
// Test Type->LLT conversion.
Type *IRTy = IntegerType::get(C, S);
EXPECT_EQ(Ty, getLLTForType(*IRTy, DL));
EXPECT_EQ(Ty, LLT(*IRTy, DL));
}
}
@ -160,7 +160,7 @@ TEST(LowLevelTypeTest, Vector) {
// Test Type->LLT conversion.
Type *IRSTy = IntegerType::get(C, S);
Type *IRTy = VectorType::get(IRSTy, Elts);
EXPECT_EQ(VTy, getLLTForType(*IRTy, DL));
EXPECT_EQ(VTy, LLT(*IRTy, DL));
}
}
}
@ -188,7 +188,7 @@ TEST(LowLevelTypeTest, Pointer) {
// Test Type->LLT conversion.
Type *IRTy = PointerType::get(IntegerType::get(C, 8), AS);
EXPECT_EQ(Ty, getLLTForType(*IRTy, DL));
EXPECT_EQ(Ty, LLT(*IRTy, DL));
}
}