Move size and alignment information of regclass to TargetRegisterInfo

1. RegisterClass::getSize() is split into two functions:
   - TargetRegisterInfo::getRegSizeInBits(const TargetRegisterClass &RC) const;
   - TargetRegisterInfo::getSpillSize(const TargetRegisterClass &RC) const;
2. RegisterClass::getAlignment() is replaced by:
   - TargetRegisterInfo::getSpillAlignment(const TargetRegisterClass &RC) const;

This will allow making those values depend on subtarget features in the
future.

Differential Revision: https://reviews.llvm.org/D31783


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@301221 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Krzysztof Parzyszek 2017-04-24 18:55:33 +00:00
parent 2b8b0a5695
commit 36d7c2b2e5
47 changed files with 304 additions and 240 deletions

View File

@ -93,13 +93,6 @@ public:
return MC->contains(Reg1, Reg2);
}
/// Return the size of the register in bytes, which is also the size
/// of a stack slot allocated to hold a spilled copy of this register.
unsigned getSize() const { return SpillSize; }
/// Return the minimum required alignment for a register of this class.
unsigned getAlignment() const { return SpillAlignment; }
/// Return the cost of copying a value between two registers in this class.
/// A negative number means the register class is very expensive
/// to copy e.g. status flag register classes.
@ -327,6 +320,23 @@ public:
return Index | (1u << 31);
}
/// Return the size in bits of a register from class RC.
unsigned getRegSizeInBits(const TargetRegisterClass &RC) const {
return RC.SpillSize * 8;
}
/// Return the size in bytes of the stack slot allocated to hold a spilled
/// copy of a register from class RC.
unsigned getSpillSize(const TargetRegisterClass &RC) const {
return RC.SpillSize;
}
/// Return the minimum required alignment for a spill slot for a register
/// of this class.
unsigned getSpillAlignment(const TargetRegisterClass &RC) const {
return RC.SpillAlignment;
}
/// Returns the Register Class of a physical register of the given type,
/// picking the most sub register class of the right type that contains this
/// physreg.

View File

@ -117,8 +117,9 @@ bool DwarfExpression::addMachineReg(const TargetRegisterInfo &TRI,
// Otherwise, attempt to find a covering set of sub-register numbers.
// For example, Q0 on ARM is a composition of D0+D1.
unsigned CurPos = 0;
// The size of the register in bits, assuming 8 bits per byte.
unsigned RegSize = TRI.getMinimalPhysRegClass(MachineReg)->getSize() * 8;
// The size of the register in bits.
const TargetRegisterClass *RC = TRI.getMinimalPhysRegClass(MachineReg);
unsigned RegSize = TRI.getRegSizeInBits(*RC);
// Keep track of the bits in the register we already emitted, so we
// can avoid emitting redundant aliasing subregs.
SmallBitVector Coverage(RegSize, false);

View File

@ -145,6 +145,8 @@ bool InstructionSelect::runOnMachineFunction(MachineFunction &MF) {
}
}
const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
// Now that selection is complete, there are no more generic vregs. Verify
// that the size of the now-constrained vreg is unchanged and that it has a
// register class.
@ -165,7 +167,7 @@ bool InstructionSelect::runOnMachineFunction(MachineFunction &MF) {
continue;
if (VRegToType.second.isValid() &&
VRegToType.second.getSizeInBits() > (RC->getSize() * 8)) {
VRegToType.second.getSizeInBits() > TRI.getRegSizeInBits(*RC)) {
reportGISelFailure(MF, TPC, MORE, "gisel-select",
"VReg has explicit size different from class size",
*MI);

View File

@ -48,7 +48,7 @@ bool RegisterBank::verify(const TargetRegisterInfo &TRI) const {
// Verify that the Size of the register bank is big enough to cover
// all the register classes it covers.
assert((getSize() >= SubRC.getSize() * 8) &&
assert(getSize() >= TRI.getRegSizeInBits(SubRC) &&
"Size is not big enough for all the subclasses!");
assert(covers(SubRC) && "Not all subclasses are covered");
}

View File

@ -421,7 +421,7 @@ unsigned RegisterBankInfo::getSizeInBits(unsigned Reg,
RC = MRI.getRegClass(Reg);
}
assert(RC && "Unable to deduce the register class");
return RC->getSize() * 8;
return TRI.getRegSizeInBits(*RC);
}
//------------------------------------------------------------------------------

View File

@ -373,22 +373,22 @@ static void assignCalleeSavedSpillSlots(MachineFunction &F,
FixedSlot->Reg != Reg)
++FixedSlot;
unsigned Size = RegInfo->getSpillSize(*RC);
if (FixedSlot == FixedSpillSlots + NumFixedSpillSlots) {
// Nope, just spill it anywhere convenient.
unsigned Align = RC->getAlignment();
unsigned Align = RegInfo->getSpillAlignment(*RC);
unsigned StackAlign = TFI->getStackAlignment();
// We may not be able to satisfy the desired alignment specification of
// the TargetRegisterClass if the stack alignment is smaller. Use the
// min.
Align = std::min(Align, StackAlign);
FrameIdx = MFI.CreateStackObject(RC->getSize(), Align, true);
FrameIdx = MFI.CreateStackObject(Size, Align, true);
if ((unsigned)FrameIdx < MinCSFrameIndex) MinCSFrameIndex = FrameIdx;
if ((unsigned)FrameIdx > MaxCSFrameIndex) MaxCSFrameIndex = FrameIdx;
} else {
// Spill it to the stack where we must.
FrameIdx =
MFI.CreateFixedSpillStackObject(RC->getSize(), FixedSlot->Offset);
FrameIdx = MFI.CreateFixedSpillStackObject(Size, FixedSlot->Offset);
}
CS.setFrameIdx(FrameIdx);

View File

@ -212,8 +212,9 @@ int RAFast::getStackSpaceFor(unsigned VirtReg, const TargetRegisterClass *RC) {
return SS; // Already has space allocated?
// Allocate a new stack object for this spill location...
int FrameIdx = MF->getFrameInfo().CreateSpillStackObject(RC->getSize(),
RC->getAlignment());
unsigned Size = TRI->getSpillSize(*RC);
unsigned Align = TRI->getSpillAlignment(*RC);
int FrameIdx = MF->getFrameInfo().CreateSpillStackObject(Size, Align);
// Assign the slot.
StackSlotForVirtReg[VirtReg] = FrameIdx;

View File

@ -395,8 +395,8 @@ unsigned RegScavenger::scavengeRegister(const TargetRegisterClass *RC,
// Find an available scavenging slot with size and alignment matching
// the requirements of the class RC.
const MachineFrameInfo &MFI = MF.getFrameInfo();
unsigned NeedSize = RC->getSize();
unsigned NeedAlign = RC->getAlignment();
unsigned NeedSize = TRI->getSpillSize(*RC);
unsigned NeedAlign = TRI->getSpillAlignment(*RC);
unsigned SI = Scavenged.size(), Diff = std::numeric_limits<unsigned>::max();
int FIB = MFI.getObjectIndexBegin(), FIE = MFI.getObjectIndexEnd();

View File

@ -161,7 +161,8 @@ StackMaps::parseOperand(MachineInstr::const_mop_iterator MOI,
if (SubRegIdx)
Offset = TRI->getSubRegIdxOffset(SubRegIdx);
Locs.emplace_back(Location::Register, RC->getSize(), DwarfRegNum, Offset);
Locs.emplace_back(Location::Register, TRI->getSpillSize(*RC),
DwarfRegNum, Offset);
return ++MOI;
}
@ -245,7 +246,7 @@ void StackMaps::print(raw_ostream &OS) {
StackMaps::LiveOutReg
StackMaps::createLiveOutReg(unsigned Reg, const TargetRegisterInfo *TRI) const {
unsigned DwarfRegNum = getDwarfRegNum(Reg, TRI);
unsigned Size = TRI->getMinimalPhysRegClass(Reg)->getSize();
unsigned Size = TRI->getSpillSize(*TRI->getMinimalPhysRegClass(Reg));
return LiveOutReg(Reg, DwarfRegNum, Size);
}

View File

@ -345,12 +345,12 @@ bool TargetInstrInfo::getStackSlotRange(const TargetRegisterClass *RC,
unsigned SubIdx, unsigned &Size,
unsigned &Offset,
const MachineFunction &MF) const {
const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
if (!SubIdx) {
Size = RC->getSize();
Size = TRI->getSpillSize(*RC);
Offset = 0;
return true;
}
const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
unsigned BitSize = TRI->getSubRegIdxSize(SubIdx);
// Convert bit size to byte size to be consistent with
// MCRegisterClass::getSize().
@ -364,10 +364,10 @@ bool TargetInstrInfo::getStackSlotRange(const TargetRegisterClass *RC,
Size = BitSize /= 8;
Offset = (unsigned)BitOffset / 8;
assert(RC->getSize() >= (Offset + Size) && "bad subregister range");
assert(TRI->getSpillSize(*RC) >= (Offset + Size) && "bad subregister range");
if (!MF.getDataLayout().isLittleEndian()) {
Offset = RC->getSize() - (Offset + Size);
Offset = TRI->getSpillSize(*RC) - (Offset + Size);
}
return true;
}

View File

@ -1299,7 +1299,7 @@ TargetLoweringBase::findRepresentativeClass(const TargetRegisterInfo *TRI,
for (int i = SuperRegRC.find_first(); i >= 0; i = SuperRegRC.find_next(i)) {
const TargetRegisterClass *SuperRC = TRI->getRegClass(i);
// We want the largest possible spill size.
if (SuperRC->getSize() <= BestRC->getSize())
if (TRI->getSpillSize(*SuperRC) <= TRI->getSpillSize(*BestRC))
continue;
if (!isLegalRC(SuperRC))
continue;

View File

@ -265,7 +265,7 @@ getCommonSuperRegClass(const TargetRegisterClass *RCA, unsigned SubA,
const TargetRegisterClass *BestRC = nullptr;
unsigned *BestPreA = &PreA;
unsigned *BestPreB = &PreB;
if (RCA->getSize() < RCB->getSize()) {
if (getRegSizeInBits(*RCA) < getRegSizeInBits(*RCB)) {
std::swap(RCA, RCB);
std::swap(SubA, SubB);
std::swap(BestPreA, BestPreB);
@ -273,7 +273,7 @@ getCommonSuperRegClass(const TargetRegisterClass *RCA, unsigned SubA,
// Also terminate the search one we have found a register class as small as
// RCA.
unsigned MinSize = RCA->getSize();
unsigned MinSize = getRegSizeInBits(*RCA);
for (SuperRegClassIterator IA(RCA, this, true); IA.isValid(); ++IA) {
unsigned FinalA = composeSubRegIndices(IA.getSubReg(), SubA);
@ -281,7 +281,7 @@ getCommonSuperRegClass(const TargetRegisterClass *RCA, unsigned SubA,
// Check if a common super-register class exists for this index pair.
const TargetRegisterClass *RC =
firstCommonClass(IA.getMask(), IB.getMask(), this);
if (!RC || RC->getSize() < MinSize)
if (!RC || getRegSizeInBits(*RC) < MinSize)
continue;
// The indexes must compose identically: PreA+SubA == PreB+SubB.
@ -290,7 +290,7 @@ getCommonSuperRegClass(const TargetRegisterClass *RCA, unsigned SubA,
continue;
// Is RC a better candidate than BestRC?
if (BestRC && RC->getSize() >= BestRC->getSize())
if (BestRC && getRegSizeInBits(*RC) >= getRegSizeInBits(*BestRC))
continue;
// Yes, RC is the smallest super-register seen so far.
@ -299,7 +299,7 @@ getCommonSuperRegClass(const TargetRegisterClass *RCA, unsigned SubA,
*BestPreB = IB.getSubReg();
// Bail early if we reached MinSize. We won't find a better candidate.
if (BestRC->getSize() == MinSize)
if (getRegSizeInBits(*BestRC) == MinSize)
return BestRC;
}
}

View File

@ -73,8 +73,9 @@ void VirtRegMap::grow() {
}
unsigned VirtRegMap::createSpillSlot(const TargetRegisterClass *RC) {
int SS = MF->getFrameInfo().CreateSpillStackObject(RC->getSize(),
RC->getAlignment());
unsigned Size = TRI->getSpillSize(*RC);
unsigned Align = TRI->getSpillAlignment(*RC);
int SS = MF->getFrameInfo().CreateSpillStackObject(Size, Align);
++NumSpillSlots;
return SS;
}

View File

@ -1203,8 +1203,11 @@ void AArch64FrameLowering::determineCalleeSaves(MachineFunction &MF,
// If we didn't find an extra callee-saved register to spill, create
// an emergency spill slot.
if (!ExtraCSSpill || MF.getRegInfo().isPhysRegUsed(ExtraCSSpill)) {
const TargetRegisterClass *RC = &AArch64::GPR64RegClass;
int FI = MFI.CreateStackObject(RC->getSize(), RC->getAlignment(), false);
const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
const TargetRegisterClass &RC = AArch64::GPR64RegClass;
unsigned Size = TRI->getSpillSize(RC);
unsigned Align = TRI->getSpillAlignment(RC);
int FI = MFI.CreateStackObject(Size, Align, false);
RS->addScavengingFrameIndex(FI);
DEBUG(dbgs() << "No available CS registers, allocated fi#" << FI
<< " as the emergency spill slot.\n");

View File

@ -2320,7 +2320,7 @@ void AArch64InstrInfo::storeRegToStackSlot(
PtrInfo, MachineMemOperand::MOStore, MFI.getObjectSize(FI), Align);
unsigned Opc = 0;
bool Offset = true;
switch (RC->getSize()) {
switch (TRI->getSpillSize(*RC)) {
case 1:
if (AArch64::FPR8RegClass.hasSubClassEq(RC))
Opc = AArch64::STRBui;
@ -2424,7 +2424,7 @@ void AArch64InstrInfo::loadRegFromStackSlot(
unsigned Opc = 0;
bool Offset = true;
switch (RC->getSize()) {
switch (TRI->getSpillSize(*RC)) {
case 1:
if (AArch64::FPR8RegClass.hasSubClassEq(RC))
Opc = AArch64::LDRBui;
@ -2649,7 +2649,8 @@ MachineInstr *AArch64InstrInfo::foldMemoryOperandImpl(
};
if (DstMO.getSubReg() == 0 && SrcMO.getSubReg() == 0) {
assert(getRegClass(DstReg)->getSize() == getRegClass(SrcReg)->getSize() &&
assert(TRI.getRegSizeInBits(*getRegClass(DstReg)) ==
TRI.getRegSizeInBits(*getRegClass(SrcReg)) &&
"Mismatched register size in non subreg COPY");
if (IsSpill)
storeRegToStackSlot(MBB, InsertPt, SrcReg, SrcMO.isKill(), FrameIndex,
@ -2735,7 +2736,8 @@ MachineInstr *AArch64InstrInfo::foldMemoryOperandImpl(
}
if (FillRC) {
assert(getRegClass(SrcReg)->getSize() == FillRC->getSize() &&
assert(TRI.getRegSizeInBits(*getRegClass(SrcReg)) ==
TRI.getRegSizeInBits(*FillRC) &&
"Mismatched regclass size on folded subreg COPY");
loadRegFromStackSlot(MBB, InsertPt, DstReg, FrameIndex, FillRC, &TRI);
MachineInstr &LoadMI = *--InsertPt;

View File

@ -83,8 +83,8 @@ unsigned GCNRegPressure::getRegKind(unsigned Reg,
const auto RC = MRI.getRegClass(Reg);
auto STI = static_cast<const SIRegisterInfo*>(MRI.getTargetRegisterInfo());
return STI->isSGPRClass(RC) ?
(RC->getSize() == 4 ? SGPR32 : SGPR_TUPLE) :
(RC->getSize() == 4 ? VGPR32 : VGPR_TUPLE);
(STI->getRegSizeInBits(*RC) == 32 ? SGPR32 : SGPR_TUPLE) :
(STI->getRegSizeInBits(*RC) == 32 ? VGPR32 : VGPR_TUPLE);
}
void GCNRegPressure::inc(unsigned Reg,

View File

@ -469,7 +469,7 @@ void SIFrameLowering::processFunctionBeforeFrameFinalized(
// this also ensures we shouldn't need a register for the offset when
// emergency scavenging.
int ScavengeFI = MFI.CreateFixedObject(
AMDGPU::SGPR_32RegClass.getSize(), 0, false);
TRI.getSpillSize(AMDGPU::SGPR_32RegClass), 0, false);
RS->addScavengingFrameIndex(ScavengeFI);
}
}

View File

@ -1644,7 +1644,7 @@ computeIndirectRegAndOffset(const SIRegisterInfo &TRI,
const TargetRegisterClass *SuperRC,
unsigned VecReg,
int Offset) {
int NumElts = SuperRC->getSize() / 4;
int NumElts = TRI.getRegSizeInBits(*SuperRC) / 32;
// Skip out of bounds offsets, or else we would end up using an undefined
// register.
@ -1793,17 +1793,18 @@ static MachineBasicBlock *emitIndirectSrc(MachineInstr &MI,
return LoopBB;
}
static unsigned getMOVRELDPseudo(const TargetRegisterClass *VecRC) {
switch (VecRC->getSize()) {
case 4:
static unsigned getMOVRELDPseudo(const SIRegisterInfo &TRI,
const TargetRegisterClass *VecRC) {
switch (TRI.getRegSizeInBits(*VecRC)) {
case 32: // 4 bytes
return AMDGPU::V_MOVRELD_B32_V1;
case 8:
case 64: // 8 bytes
return AMDGPU::V_MOVRELD_B32_V2;
case 16:
case 128: // 16 bytes
return AMDGPU::V_MOVRELD_B32_V4;
case 32:
case 256: // 32 bytes
return AMDGPU::V_MOVRELD_B32_V8;
case 64:
case 512: // 64 bytes
return AMDGPU::V_MOVRELD_B32_V16;
default:
llvm_unreachable("unsupported size for MOVRELD pseudos");
@ -1863,7 +1864,7 @@ static MachineBasicBlock *emitIndirectDst(MachineInstr &MI,
BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF));
} else {
const MCInstrDesc &MovRelDesc = TII->get(getMOVRELDPseudo(VecRC));
const MCInstrDesc &MovRelDesc = TII->get(getMOVRELDPseudo(TRI, VecRC));
BuildMI(MBB, I, DL, MovRelDesc)
.addReg(Dst, RegState::Define)
@ -1907,7 +1908,7 @@ static MachineBasicBlock *emitIndirectDst(MachineInstr &MI,
.addReg(PhiReg, RegState::Implicit)
.addReg(AMDGPU::M0, RegState::Implicit);
} else {
const MCInstrDesc &MovRelDesc = TII->get(getMOVRELDPseudo(VecRC));
const MCInstrDesc &MovRelDesc = TII->get(getMOVRELDPseudo(TRI, VecRC));
BuildMI(*LoopBB, InsPt, DL, MovRelDesc)
.addReg(Dst, RegState::Define)

View File

@ -428,8 +428,8 @@ RegInterval BlockWaitcntBrackets::getRegInterval(const MachineInstr *MI,
const MachineInstr &MIA = *MI;
const TargetRegisterClass *RC = TII->getOpRegClass(MIA, OpNo);
unsigned Size = RC->getSize();
Result.second = Result.first + (Size / 4);
unsigned Size = TRI->getRegSizeInBits(*RC);
Result.second = Result.first + (Size / 32);
return Result;
}

View File

@ -216,8 +216,8 @@ Counters SIInsertWaits::getHwCounts(MachineInstr &MI) {
// XXX - What if this is a write into a super register?
const TargetRegisterClass *RC = TII->getOpRegClass(MI, 0);
unsigned Size = RC->getSize();
Result.Named.LGKM = Size > 4 ? 2 : 1;
unsigned Size = TRI->getRegSizeInBits(*RC);
Result.Named.LGKM = Size > 32 ? 2 : 1;
} else {
// s_dcache_inv etc. do not have a a destination register. Assume we
// want a wait on these.
@ -289,12 +289,12 @@ bool SIInsertWaits::isOpRelevant(MachineOperand &Op) {
RegInterval SIInsertWaits::getRegInterval(const TargetRegisterClass *RC,
const MachineOperand &Reg) const {
unsigned Size = RC->getSize();
assert(Size >= 4);
unsigned Size = TRI->getRegSizeInBits(*RC);
assert(Size >= 32);
RegInterval Result;
Result.first = TRI->getEncodingValue(Reg.getReg());
Result.second = Result.first + Size / 4;
Result.second = Result.first + Size / 32;
return Result;
}

View File

@ -250,11 +250,11 @@ bool SIInstrInfo::getMemOpBaseRegImmOfs(MachineInstr &LdSt, unsigned &BaseReg,
unsigned EltSize;
if (LdSt.mayLoad())
EltSize = getOpRegClass(LdSt, 0)->getSize() / 2;
EltSize = TRI->getRegSizeInBits(*getOpRegClass(LdSt, 0)) / 16;
else {
assert(LdSt.mayStore());
int Data0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::data0);
EltSize = getOpRegClass(LdSt, Data0Idx)->getSize();
EltSize = TRI->getRegSizeInBits(*getOpRegClass(LdSt, Data0Idx)) / 8;
}
if (isStride64(Opc))
@ -350,7 +350,7 @@ bool SIInstrInfo::shouldClusterMemOps(MachineInstr &FirstLdSt,
FirstLdSt.getParent()->getParent()->getRegInfo();
const TargetRegisterClass *DstRC = MRI.getRegClass(FirstDst->getReg());
return (NumLoads * DstRC->getSize()) <= LoadClusterThreshold;
return (NumLoads * (RI.getRegSizeInBits(*DstRC) / 8)) <= LoadClusterThreshold;
}
static void reportIllegalCopy(const SIInstrInfo *TII, MachineBasicBlock &MBB,
@ -438,7 +438,7 @@ void SIInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
unsigned EltSize = 4;
unsigned Opcode = AMDGPU::V_MOV_B32_e32;
if (RI.isSGPRClass(RC)) {
if (RC->getSize() > 4) {
if (RI.getRegSizeInBits(*RC) > 32) {
Opcode = AMDGPU::S_MOV_B64;
EltSize = 8;
} else {
@ -498,11 +498,11 @@ int SIInstrInfo::commuteOpcode(unsigned Opcode) const {
unsigned SIInstrInfo::getMovOpcode(const TargetRegisterClass *DstRC) const {
if (DstRC->getSize() == 4) {
if (RI.getRegSizeInBits(*DstRC) == 32) {
return RI.isSGPRClass(DstRC) ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
} else if (DstRC->getSize() == 8 && RI.isSGPRClass(DstRC)) {
} else if (RI.getRegSizeInBits(*DstRC) == 64 && RI.isSGPRClass(DstRC)) {
return AMDGPU::S_MOV_B64;
} else if (DstRC->getSize() == 8 && !RI.isSGPRClass(DstRC)) {
} else if (RI.getRegSizeInBits(*DstRC) == 64 && !RI.isSGPRClass(DstRC)) {
return AMDGPU::V_MOV_B64_PSEUDO;
}
return AMDGPU::COPY;
@ -562,17 +562,18 @@ void SIInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
MachineMemOperand *MMO
= MF->getMachineMemOperand(PtrInfo, MachineMemOperand::MOStore,
Size, Align);
unsigned SpillSize = TRI->getSpillSize(*RC);
if (RI.isSGPRClass(RC)) {
MFI->setHasSpilledSGPRs();
// We are only allowed to create one new instruction when spilling
// registers, so we need to use pseudo instruction for spilling SGPRs.
const MCInstrDesc &OpDesc = get(getSGPRSpillSaveOpcode(RC->getSize()));
const MCInstrDesc &OpDesc = get(getSGPRSpillSaveOpcode(SpillSize));
// The SGPR spill/restore instructions only work on number sgprs, so we need
// to make sure we are using the correct register class.
if (TargetRegisterInfo::isVirtualRegister(SrcReg) && RC->getSize() == 4) {
if (TargetRegisterInfo::isVirtualRegister(SrcReg) && SpillSize == 4) {
MachineRegisterInfo &MRI = MF->getRegInfo();
MRI.constrainRegClass(SrcReg, &AMDGPU::SReg_32_XM0RegClass);
}
@ -607,7 +608,7 @@ void SIInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
assert(RI.hasVGPRs(RC) && "Only VGPR spilling expected");
unsigned Opcode = getVGPRSpillSaveOpcode(RC->getSize());
unsigned Opcode = getVGPRSpillSaveOpcode(SpillSize);
MFI->setHasSpilledVGPRs();
BuildMI(MBB, MI, DL, get(Opcode))
.addReg(SrcReg, getKillRegState(isKill)) // data
@ -665,6 +666,7 @@ void SIInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
DebugLoc DL = MBB.findDebugLoc(MI);
unsigned Align = FrameInfo.getObjectAlignment(FrameIndex);
unsigned Size = FrameInfo.getObjectSize(FrameIndex);
unsigned SpillSize = TRI->getSpillSize(*RC);
MachinePointerInfo PtrInfo
= MachinePointerInfo::getFixedStack(*MF, FrameIndex);
@ -675,8 +677,8 @@ void SIInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
if (RI.isSGPRClass(RC)) {
// FIXME: Maybe this should not include a memoperand because it will be
// lowered to non-memory instructions.
const MCInstrDesc &OpDesc = get(getSGPRSpillRestoreOpcode(RC->getSize()));
if (TargetRegisterInfo::isVirtualRegister(DestReg) && RC->getSize() == 4) {
const MCInstrDesc &OpDesc = get(getSGPRSpillRestoreOpcode(SpillSize));
if (TargetRegisterInfo::isVirtualRegister(DestReg) && SpillSize == 4) {
MachineRegisterInfo &MRI = MF->getRegInfo();
MRI.constrainRegClass(DestReg, &AMDGPU::SReg_32_XM0RegClass);
}
@ -706,7 +708,7 @@ void SIInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
assert(RI.hasVGPRs(RC) && "Only VGPR spilling expected");
unsigned Opcode = getVGPRSpillRestoreOpcode(RC->getSize());
unsigned Opcode = getVGPRSpillRestoreOpcode(SpillSize);
BuildMI(MBB, MI, DL, get(Opcode), DestReg)
.addFrameIndex(FrameIndex) // vaddr
.addReg(MFI->getScratchRSrcReg()) // scratch_rsrc
@ -1445,9 +1447,9 @@ void SIInstrInfo::insertSelect(MachineBasicBlock &MBB,
MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
const TargetRegisterClass *DstRC = MRI.getRegClass(DstReg);
unsigned DstSize = DstRC->getSize();
unsigned DstSize = RI.getRegSizeInBits(*DstRC);
if (DstSize == 4) {
if (DstSize == 32) {
unsigned SelOp = Pred == SCC_TRUE ?
AMDGPU::S_CSELECT_B32 : AMDGPU::V_CNDMASK_B32_e32;
@ -1461,7 +1463,7 @@ void SIInstrInfo::insertSelect(MachineBasicBlock &MBB,
return;
}
if (DstSize == 8 && Pred == SCC_TRUE) {
if (DstSize == 64 && Pred == SCC_TRUE) {
MachineInstr *Select =
BuildMI(MBB, I, DL, get(AMDGPU::S_CSELECT_B64), DstReg)
.addReg(FalseReg)
@ -1488,7 +1490,7 @@ void SIInstrInfo::insertSelect(MachineBasicBlock &MBB,
unsigned SelOp = AMDGPU::V_CNDMASK_B32_e32;
const TargetRegisterClass *EltRC = &AMDGPU::VGPR_32RegClass;
const int16_t *SubIndices = Sub0_15;
int NElts = DstSize / 4;
int NElts = DstSize / 32;
// 64-bit select is only avaialble for SALU.
if (Pred == SCC_TRUE) {
@ -2747,7 +2749,7 @@ unsigned SIInstrInfo::readlaneVGPRToSGPR(unsigned SrcReg, MachineInstr &UseMI,
const TargetRegisterClass *VRC = MRI.getRegClass(SrcReg);
const TargetRegisterClass *SRC = RI.getEquivalentSGPRClass(VRC);
unsigned DstReg = MRI.createVirtualRegister(SRC);
unsigned SubRegs = VRC->getSize() / 4;
unsigned SubRegs = RI.getRegSizeInBits(*VRC) / 32;
SmallVector<unsigned, 8> SRegs;
for (unsigned i = 0; i < SubRegs; ++i) {

View File

@ -626,13 +626,13 @@ public:
return 4;
}
return RI.getRegClass(OpInfo.RegClass)->getSize();
return RI.getRegSizeInBits(*RI.getRegClass(OpInfo.RegClass)) / 8;
}
/// \brief This form should usually be preferred since it handles operands
/// with unknown register classes.
unsigned getOpSize(const MachineInstr &MI, unsigned OpNo) const {
return getOpRegClass(MI, OpNo)->getSize();
return RI.getRegSizeInBits(*getOpRegClass(MI, OpNo)) / 8;
}
/// \returns true if it is legal for the operand at index \p OpNo

View File

@ -615,7 +615,8 @@ bool SIRegisterInfo::spillSGPR(MachineBasicBlock::iterator MI,
if (SpillToSMEM && isSGPRClass(RC)) {
// XXX - if private_element_size is larger than 4 it might be useful to be
// able to spill wider vmem spills.
std::tie(EltSize, ScalarStoreOp) = getSpillEltSize(RC->getSize(), true);
std::tie(EltSize, ScalarStoreOp) =
getSpillEltSize(getRegSizeInBits(*RC) / 8, true);
}
ArrayRef<int16_t> SplitParts = getRegSplitParts(RC, EltSize);
@ -775,7 +776,8 @@ bool SIRegisterInfo::restoreSGPR(MachineBasicBlock::iterator MI,
if (SpillToSMEM && isSGPRClass(RC)) {
// XXX - if private_element_size is larger than 4 it might be useful to be
// able to spill wider vmem spills.
std::tie(EltSize, ScalarLoadOp) = getSpillEltSize(RC->getSize(), false);
std::tie(EltSize, ScalarLoadOp) =
getSpillEltSize(getRegSizeInBits(*RC) / 8, false);
}
ArrayRef<int16_t> SplitParts = getRegSplitParts(RC, EltSize);
@ -1038,20 +1040,21 @@ const TargetRegisterClass *SIRegisterInfo::getPhysRegClass(unsigned Reg) const {
// TODO: It might be helpful to have some target specific flags in
// TargetRegisterClass to mark which classes are VGPRs to make this trivial.
bool SIRegisterInfo::hasVGPRs(const TargetRegisterClass *RC) const {
switch (RC->getSize()) {
case 0: return false;
case 1: return false;
case 4:
return getCommonSubClass(&AMDGPU::VGPR_32RegClass, RC) != nullptr;
case 8:
return getCommonSubClass(&AMDGPU::VReg_64RegClass, RC) != nullptr;
case 12:
return getCommonSubClass(&AMDGPU::VReg_96RegClass, RC) != nullptr;
case 16:
return getCommonSubClass(&AMDGPU::VReg_128RegClass, RC) != nullptr;
unsigned Size = getRegSizeInBits(*RC);
if (Size < 32)
return false;
switch (Size) {
case 32:
return getCommonSubClass(&AMDGPU::VReg_256RegClass, RC) != nullptr;
return getCommonSubClass(&AMDGPU::VGPR_32RegClass, RC) != nullptr;
case 64:
return getCommonSubClass(&AMDGPU::VReg_64RegClass, RC) != nullptr;
case 96:
return getCommonSubClass(&AMDGPU::VReg_96RegClass, RC) != nullptr;
case 128:
return getCommonSubClass(&AMDGPU::VReg_128RegClass, RC) != nullptr;
case 256:
return getCommonSubClass(&AMDGPU::VReg_256RegClass, RC) != nullptr;
case 512:
return getCommonSubClass(&AMDGPU::VReg_512RegClass, RC) != nullptr;
default:
llvm_unreachable("Invalid register class size");
@ -1060,18 +1063,18 @@ bool SIRegisterInfo::hasVGPRs(const TargetRegisterClass *RC) const {
const TargetRegisterClass *SIRegisterInfo::getEquivalentVGPRClass(
const TargetRegisterClass *SRC) const {
switch (SRC->getSize()) {
case 4:
return &AMDGPU::VGPR_32RegClass;
case 8:
return &AMDGPU::VReg_64RegClass;
case 12:
return &AMDGPU::VReg_96RegClass;
case 16:
return &AMDGPU::VReg_128RegClass;
switch (getRegSizeInBits(*SRC)) {
case 32:
return &AMDGPU::VReg_256RegClass;
return &AMDGPU::VGPR_32RegClass;
case 64:
return &AMDGPU::VReg_64RegClass;
case 96:
return &AMDGPU::VReg_96RegClass;
case 128:
return &AMDGPU::VReg_128RegClass;
case 256:
return &AMDGPU::VReg_256RegClass;
case 512:
return &AMDGPU::VReg_512RegClass;
default:
llvm_unreachable("Invalid register class size");
@ -1080,16 +1083,16 @@ const TargetRegisterClass *SIRegisterInfo::getEquivalentVGPRClass(
const TargetRegisterClass *SIRegisterInfo::getEquivalentSGPRClass(
const TargetRegisterClass *VRC) const {
switch (VRC->getSize()) {
case 4:
return &AMDGPU::SGPR_32RegClass;
case 8:
return &AMDGPU::SReg_64RegClass;
case 16:
return &AMDGPU::SReg_128RegClass;
switch (getRegSizeInBits(*VRC)) {
case 32:
return &AMDGPU::SReg_256RegClass;
return &AMDGPU::SGPR_32RegClass;
case 64:
return &AMDGPU::SReg_64RegClass;
case 128:
return &AMDGPU::SReg_128RegClass;
case 256:
return &AMDGPU::SReg_256RegClass;
case 512:
return &AMDGPU::SReg_512RegClass;
default:
llvm_unreachable("Invalid register class size");
@ -1354,15 +1357,15 @@ bool SIRegisterInfo::shouldCoalesce(MachineInstr *MI,
const TargetRegisterClass *DstRC,
unsigned DstSubReg,
const TargetRegisterClass *NewRC) const {
unsigned SrcSize = SrcRC->getSize();
unsigned DstSize = DstRC->getSize();
unsigned NewSize = NewRC->getSize();
unsigned SrcSize = getRegSizeInBits(*SrcRC);
unsigned DstSize = getRegSizeInBits(*DstRC);
unsigned NewSize = getRegSizeInBits(*NewRC);
// Do not increase size of registers beyond dword, we would need to allocate
// adjacent registers and constraint regalloc more than needed.
// Always allow dword coalescing.
if (SrcSize <= 4 || DstSize <= 4)
if (SrcSize <= 32 || DstSize <= 32)
return true;
return NewSize <= DstSize || NewSize <= SrcSize;

View File

@ -905,7 +905,7 @@ storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
MachinePointerInfo::getFixedStack(MF, FI), MachineMemOperand::MOStore,
MFI.getObjectSize(FI), Align);
switch (RC->getSize()) {
switch (TRI->getSpillSize(*RC)) {
case 4:
if (ARM::GPRRegClass.hasSubClassEq(RC)) {
BuildMI(MBB, I, DL, get(ARM::STRi12))
@ -1103,7 +1103,7 @@ loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
MachinePointerInfo::getFixedStack(MF, FI), MachineMemOperand::MOLoad,
MFI.getObjectSize(FI), Align);
switch (RC->getSize()) {
switch (TRI->getSpillSize(*RC)) {
case 4:
if (ARM::GPRRegClass.hasSubClassEq(RC)) {
BuildMI(MBB, I, DL, get(ARM::LDRi12), DestReg)

View File

@ -806,7 +806,8 @@ bool ARMBaseRegisterInfo::shouldCoalesce(MachineInstr *MI,
if (!DstSubReg)
return true;
// Small registers don't frequently cause a problem, so we can coalesce them.
if (NewRC->getSize() < 32 && DstRC->getSize() < 32 && SrcRC->getSize() < 32)
if (getRegSizeInBits(*NewRC) < 256 && getRegSizeInBits(*DstRC) < 256 &&
getRegSizeInBits(*SrcRC) < 256)
return true;
auto NewRCWeight =

View File

@ -1960,10 +1960,10 @@ void ARMFrameLowering::determineCalleeSaves(MachineFunction &MF,
// note: Thumb1 functions spill to R12, not the stack. Reserve a slot
// closest to SP or frame pointer.
assert(RS && "Register scavenging not provided");
const TargetRegisterClass *RC = &ARM::GPRRegClass;
RS->addScavengingFrameIndex(MFI.CreateStackObject(RC->getSize(),
RC->getAlignment(),
false));
const TargetRegisterClass &RC = ARM::GPRRegClass;
unsigned Size = TRI->getSpillSize(RC);
unsigned Align = TRI->getSpillAlignment(RC);
RS->addScavengingFrameIndex(MFI.CreateStackObject(Size, Align, false));
}
}
}

View File

@ -112,7 +112,8 @@ bool AVRAsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNum,
const AVRSubtarget &STI = MF->getSubtarget<AVRSubtarget>();
const TargetRegisterInfo &TRI = *STI.getRegisterInfo();
unsigned BytesPerReg = TRI.getMinimalPhysRegClass(Reg)->getSize();
const TargetRegisterClass *RC = TRI.getMinimalPhysRegClass(Reg);
unsigned BytesPerReg = TRI.getRegSizeInBits(*RC) / 8;
assert(BytesPerReg <= 2 && "Only 8 and 16 bit regs are supported.");
unsigned RegIdx = ByteNumber / BytesPerReg;

View File

@ -239,7 +239,7 @@ bool AVRFrameLowering::spillCalleeSavedRegisters(
unsigned Reg = CSI[i - 1].getReg();
bool IsNotLiveIn = !MBB.isLiveIn(Reg);
assert(TRI->getMinimalPhysRegClass(Reg)->getSize() == 1 &&
assert(TRI->getRegSizeInBits(*TRI->getMinimalPhysRegClass(Reg)) == 8 &&
"Invalid register size");
// Add the callee-saved register as live-in only if it is not already a
@ -277,7 +277,7 @@ bool AVRFrameLowering::restoreCalleeSavedRegisters(
for (const CalleeSavedInfo &CCSI : CSI) {
unsigned Reg = CCSI.getReg();
assert(TRI->getMinimalPhysRegClass(Reg)->getSize() == 1 &&
assert(TRI->getRegSizeInBits(*TRI->getMinimalPhysRegClass(Reg)) == 8 &&
"Invalid register size");
BuildMI(MBB, MI, DL, TII.get(AVR::POPRd), Reg);

View File

@ -347,7 +347,7 @@ uint16_t BT::MachineEvaluator::getRegBitWidth(const RegisterRef &RR) const {
unsigned PhysS = (RR.Sub == 0) ? PhysR : TRI.getSubReg(PhysR, RR.Sub);
const TargetRegisterClass *RC = TRI.getMinimalPhysRegClass(PhysS);
uint16_t BW = RC->getSize()*8;
uint16_t BW = TRI.getRegSizeInBits(*RC);
return BW;
}

View File

@ -286,9 +286,9 @@ void HexagonAsmPrinter::HexagonProcessInstruction(MCInst &Inst,
const MCRegisterInfo *RI = OutStreamer->getContext().getRegisterInfo();
const MachineFunction &MF = *MI.getParent()->getParent();
const auto &HST = MF.getSubtarget<HexagonSubtarget>();
unsigned VectorSize = HST.useHVXSglOps()
? Hexagon::VectorRegsRegClass.getSize()
: Hexagon::VectorRegs128BRegClass.getSize();
const auto &VecRC = HST.useHVXSglOps() ? Hexagon::VectorRegsRegClass
: Hexagon::VectorRegs128BRegClass;
unsigned VectorSize = HST.getRegisterInfo()->getSpillSize(VecRC);
switch (Inst.getOpcode()) {
default: return;

View File

@ -407,7 +407,7 @@ bool HexagonBitSimplify::getSubregMask(const BitTracker::RegisterRef &RR,
const TargetRegisterClass *RC = MRI.getRegClass(RR.Reg);
if (RR.Sub == 0) {
Begin = 0;
Width = RC->getSize()*8;
Width = MRI.getTargetRegisterInfo()->getRegSizeInBits(*RC);
return true;
}
@ -417,7 +417,7 @@ bool HexagonBitSimplify::getSubregMask(const BitTracker::RegisterRef &RR,
case Hexagon::DoubleRegsRegClassID:
case Hexagon::VecDblRegsRegClassID:
case Hexagon::VecDblRegs128BRegClassID:
Width = RC->getSize()*8 / 2;
Width = MRI.getTargetRegisterInfo()->getRegSizeInBits(*RC) / 2;
if (RR.Sub == Hexagon::isub_hi || RR.Sub == Hexagon::vsub_hi)
Begin = Width;
break;
@ -1054,8 +1054,8 @@ namespace {
class RedundantInstrElimination : public Transformation {
public:
RedundantInstrElimination(BitTracker &bt, const HexagonInstrInfo &hii,
MachineRegisterInfo &mri)
: Transformation(true), HII(hii), MRI(mri), BT(bt) {}
const HexagonRegisterInfo &hri, MachineRegisterInfo &mri)
: Transformation(true), HII(hii), HRI(hri), MRI(mri), BT(bt) {}
bool processBlock(MachineBasicBlock &B, const RegisterSet &AVs) override;
@ -1070,6 +1070,7 @@ namespace {
bool usedBitsEqual(BitTracker::RegisterRef RD, BitTracker::RegisterRef RS);
const HexagonInstrInfo &HII;
const HexagonRegisterInfo &HRI;
MachineRegisterInfo &MRI;
BitTracker &BT;
};
@ -1262,7 +1263,7 @@ bool RedundantInstrElimination::computeUsedBits(const MachineInstr &MI,
assert(MI.getOperand(OpN).isReg());
BitTracker::RegisterRef RR = MI.getOperand(OpN);
const TargetRegisterClass *RC = HBS::getFinalVRegClass(RR, MRI);
uint16_t Width = RC->getSize()*8;
uint16_t Width = HRI.getRegSizeInBits(*RC);
if (!GotBits)
T.set(Begin, Begin+Width);
@ -2651,7 +2652,7 @@ bool HexagonBitSimplify::runOnMachineFunction(MachineFunction &MF) {
Changed |= visitBlock(Entry, ImmG, AIG);
RegisterSet ARE; // Available registers for RIE.
RedundantInstrElimination RIE(BT, HII, MRI);
RedundantInstrElimination RIE(BT, HII, HRI, MRI);
bool Ried = visitBlock(Entry, RIE, ARE);
if (Ried) {
Changed = true;

View File

@ -559,10 +559,10 @@ unsigned HexagonExpandCondsets::getCondTfrOpcode(const MachineOperand &SO,
}
unsigned PhysS = (RS.Sub == 0) ? PhysR : TRI->getSubReg(PhysR, RS.Sub);
const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(PhysS);
switch (RC->getSize()) {
case 4:
switch (TRI->getRegSizeInBits(*RC)) {
case 32:
return IfTrue ? A2_tfrt : A2_tfrf;
case 8:
case 64:
return IfTrue ? A2_tfrpt : A2_tfrpf;
}
llvm_unreachable("Invalid register operand");

View File

@ -1425,7 +1425,7 @@ bool HexagonFrameLowering::assignCalleeSavedSpillSlots(MachineFunction &MF,
if (!SRegs[S->Reg])
continue;
const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(S->Reg);
int FI = MFI.CreateFixedSpillStackObject(RC->getSize(), S->Offset);
int FI = MFI.CreateFixedSpillStackObject(TRI->getSpillSize(*RC), S->Offset);
MinOffset = std::min(MinOffset, S->Offset);
CSI.push_back(CalleeSavedInfo(S->Reg, FI));
SRegs[S->Reg] = false;
@ -1437,11 +1437,12 @@ bool HexagonFrameLowering::assignCalleeSavedSpillSlots(MachineFunction &MF,
for (int x = SRegs.find_first(); x >= 0; x = SRegs.find_next(x)) {
unsigned R = x;
const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(R);
int Off = MinOffset - RC->getSize();
unsigned Align = std::min(RC->getAlignment(), getStackAlignment());
unsigned Size = TRI->getSpillSize(*RC);
int Off = MinOffset - Size;
unsigned Align = std::min(TRI->getSpillAlignment(*RC), getStackAlignment());
assert(isPowerOf2_32(Align));
Off &= -Align;
int FI = MFI.CreateFixedSpillStackObject(RC->getSize(), Off);
int FI = MFI.CreateFixedSpillStackObject(Size, Off);
MinOffset = std::min(MinOffset, Off);
CSI.push_back(CalleeSavedInfo(R, FI));
SRegs[R] = false;
@ -1677,10 +1678,10 @@ bool HexagonFrameLowering::expandStoreVec2(MachineBasicBlock &B,
int FI = MI->getOperand(0).getIndex();
bool Is128B = HST.useHVXDblOps();
auto *RC = !Is128B ? &Hexagon::VectorRegsRegClass
: &Hexagon::VectorRegs128BRegClass;
unsigned Size = RC->getSize();
unsigned NeedAlign = RC->getAlignment();
const auto &RC = !Is128B ? Hexagon::VectorRegsRegClass
: Hexagon::VectorRegs128BRegClass;
unsigned Size = HRI.getSpillSize(RC);
unsigned NeedAlign = HRI.getSpillAlignment(RC);
unsigned HasAlign = MFI.getObjectAlignment(FI);
unsigned StoreOpc;
@ -1734,10 +1735,10 @@ bool HexagonFrameLowering::expandLoadVec2(MachineBasicBlock &B,
int FI = MI->getOperand(1).getIndex();
bool Is128B = HST.useHVXDblOps();
auto *RC = !Is128B ? &Hexagon::VectorRegsRegClass
: &Hexagon::VectorRegs128BRegClass;
unsigned Size = RC->getSize();
unsigned NeedAlign = RC->getAlignment();
const auto &RC = !Is128B ? Hexagon::VectorRegsRegClass
: Hexagon::VectorRegs128BRegClass;
unsigned Size = HRI.getSpillSize(RC);
unsigned NeedAlign = HRI.getSpillAlignment(RC);
unsigned HasAlign = MFI.getObjectAlignment(FI);
unsigned LoadOpc;
@ -1777,16 +1778,16 @@ bool HexagonFrameLowering::expandStoreVec(MachineBasicBlock &B,
if (!MI->getOperand(0).isFI())
return false;
auto &HRI = *HST.getRegisterInfo();
DebugLoc DL = MI->getDebugLoc();
unsigned SrcR = MI->getOperand(2).getReg();
bool IsKill = MI->getOperand(2).isKill();
int FI = MI->getOperand(0).getIndex();
bool Is128B = HST.useHVXDblOps();
auto *RC = !Is128B ? &Hexagon::VectorRegsRegClass
: &Hexagon::VectorRegs128BRegClass;
unsigned NeedAlign = RC->getAlignment();
const auto &RC = !Is128B ? Hexagon::VectorRegsRegClass
: Hexagon::VectorRegs128BRegClass;
unsigned NeedAlign = HRI.getSpillAlignment(RC);
unsigned HasAlign = MFI.getObjectAlignment(FI);
unsigned StoreOpc;
@ -1815,15 +1816,15 @@ bool HexagonFrameLowering::expandLoadVec(MachineBasicBlock &B,
if (!MI->getOperand(1).isFI())
return false;
auto &HRI = *HST.getRegisterInfo();
DebugLoc DL = MI->getDebugLoc();
unsigned DstR = MI->getOperand(0).getReg();
int FI = MI->getOperand(1).getIndex();
bool Is128B = HST.useHVXDblOps();
auto *RC = !Is128B ? &Hexagon::VectorRegsRegClass
: &Hexagon::VectorRegs128BRegClass;
unsigned NeedAlign = RC->getAlignment();
const auto &RC = !Is128B ? Hexagon::VectorRegsRegClass
: Hexagon::VectorRegs128BRegClass;
unsigned NeedAlign = HRI.getSpillAlignment(RC);
unsigned HasAlign = MFI.getObjectAlignment(FI);
unsigned LoadOpc;
@ -1932,7 +1933,7 @@ void HexagonFrameLowering::determineCalleeSaves(MachineFunction &MF,
if (!needToReserveScavengingSpillSlots(MF, HRI, RC))
continue;
unsigned Num = RC == &Hexagon::IntRegsRegClass ? NumberScavengerSlots : 1;
unsigned S = RC->getSize(), A = RC->getAlignment();
unsigned S = HRI.getSpillSize(*RC), A = HRI.getSpillAlignment(*RC);
for (unsigned i = 0; i < Num; i++) {
int NewFI = MFI.CreateSpillStackObject(S, A);
RS->addScavengingFrameIndex(NewFI);

View File

@ -273,9 +273,9 @@ void MipsAsmPrinter::printSavedRegsBitmask() {
const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
const std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo();
// size of stack area to which FP callee-saved regs are saved.
unsigned CPURegSize = Mips::GPR32RegClass.getSize();
unsigned FGR32RegSize = Mips::FGR32RegClass.getSize();
unsigned AFGR64RegSize = Mips::AFGR64RegClass.getSize();
unsigned CPURegSize = TRI->getRegSizeInBits(Mips::GPR32RegClass) / 8;
unsigned FGR32RegSize = TRI->getRegSizeInBits(Mips::FGR32RegClass) / 8;
unsigned AFGR64RegSize = TRI->getRegSizeInBits(Mips::AFGR64RegClass) / 8;
bool HasAFGR64Reg = false;
unsigned CSFPRegsSize = 0;

View File

@ -119,7 +119,7 @@ uint64_t MipsFrameLowering::estimateStackSize(const MachineFunction &MF) const {
// Conservatively assume all callee-saved registers will be saved.
for (const MCPhysReg *R = TRI.getCalleeSavedRegs(&MF); *R; ++R) {
unsigned Size = TRI.getMinimalPhysRegClass(*R)->getSize();
unsigned Size = TRI.getSpillSize(*TRI.getMinimalPhysRegClass(*R));
Offset = alignTo(Offset + Size, Size);
}

View File

@ -53,14 +53,15 @@ unsigned MipsFunctionInfo::getGlobalBaseReg() {
}
void MipsFunctionInfo::createEhDataRegsFI() {
const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
for (int I = 0; I < 4; ++I) {
const TargetRegisterClass *RC =
const TargetRegisterClass &RC =
static_cast<const MipsTargetMachine &>(MF.getTarget()).getABI().IsN64()
? &Mips::GPR64RegClass
: &Mips::GPR32RegClass;
? Mips::GPR64RegClass
: Mips::GPR32RegClass;
EhDataRegFI[I] = MF.getFrameInfo().CreateStackObject(RC->getSize(),
RC->getAlignment(), false);
EhDataRegFI[I] = MF.getFrameInfo().CreateStackObject(TRI.getSpillSize(RC),
TRI.getSpillAlignment(RC), false);
}
}
@ -69,11 +70,12 @@ void MipsFunctionInfo::createISRRegFI() {
// The current implementation only supports Mips32r2+ not Mips64rX. Status
// is always 32 bits, ErrorPC is 32 or 64 bits dependent on architecture,
// however Mips32r2+ is the supported architecture.
const TargetRegisterClass *RC = &Mips::GPR32RegClass;
const TargetRegisterClass &RC = Mips::GPR32RegClass;
const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
for (int I = 0; I < 2; ++I)
ISRDataRegFI[I] = MF.getFrameInfo().CreateStackObject(
RC->getSize(), RC->getAlignment(), false);
TRI.getSpillSize(RC), TRI.getSpillAlignment(RC), false);
}
bool MipsFunctionInfo::isEhDataRegFI(int FI) const {
@ -93,9 +95,10 @@ MachinePointerInfo MipsFunctionInfo::callPtrInfo(const GlobalValue *GV) {
}
int MipsFunctionInfo::getMoveF64ViaSpillFI(const TargetRegisterClass *RC) {
const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
if (MoveF64ViaSpillFI == -1) {
MoveF64ViaSpillFI = MF.getFrameInfo().CreateStackObject(
RC->getSize(), RC->getAlignment(), false);
TRI.getSpillSize(*RC), TRI.getSpillAlignment(*RC), false);
}
return MoveF64ViaSpillFI;
}

View File

@ -260,7 +260,8 @@ bool ExpandPseudo::expandCopyACC(MachineBasicBlock &MBB, Iter I,
// copy dst_hi, $vr1
unsigned Dst = I->getOperand(0).getReg(), Src = I->getOperand(1).getReg();
unsigned VRegSize = RegInfo.getMinimalPhysRegClass(Dst)->getSize() / 2;
const TargetRegisterClass *DstRC = RegInfo.getMinimalPhysRegClass(Dst);
unsigned VRegSize = RegInfo.getRegSizeInBits(*DstRC) / 16;
const TargetRegisterClass *RC = RegInfo.intRegClass(VRegSize);
unsigned VR0 = MRI.createVirtualRegister(RC);
unsigned VR1 = MRI.createVirtualRegister(RC);
@ -858,6 +859,7 @@ void MipsSEFrameLowering::determineCalleeSaves(MachineFunction &MF,
BitVector &SavedRegs,
RegScavenger *RS) const {
TargetFrameLowering::determineCalleeSaves(MF, SavedRegs, RS);
const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>();
MipsABIInfo ABI = STI.getABI();
unsigned FP = ABI.GetFramePtr();
@ -883,10 +885,11 @@ void MipsSEFrameLowering::determineCalleeSaves(MachineFunction &MF,
if (ExpandPseudo(MF).expand()) {
// The spill slot should be half the size of the accumulator. If target is
// mips64, it should be 64-bit, otherwise it should be 32-bt.
const TargetRegisterClass *RC = STI.hasMips64() ?
&Mips::GPR64RegClass : &Mips::GPR32RegClass;
int FI = MF.getFrameInfo().CreateStackObject(RC->getSize(),
RC->getAlignment(), false);
const TargetRegisterClass &RC = STI.hasMips64() ?
Mips::GPR64RegClass : Mips::GPR32RegClass;
int FI = MF.getFrameInfo().CreateStackObject(TRI->getSpillSize(RC),
TRI->getSpillAlignment(RC),
false);
RS->addScavengingFrameIndex(FI);
}
@ -897,10 +900,11 @@ void MipsSEFrameLowering::determineCalleeSaves(MachineFunction &MF,
if (isInt<16>(MaxSPOffset))
return;
const TargetRegisterClass *RC =
ABI.ArePtrs64bit() ? &Mips::GPR64RegClass : &Mips::GPR32RegClass;
int FI = MF.getFrameInfo().CreateStackObject(RC->getSize(),
RC->getAlignment(), false);
const TargetRegisterClass &RC =
ABI.ArePtrs64bit() ? Mips::GPR64RegClass : Mips::GPR32RegClass;
int FI = MF.getFrameInfo().CreateStackObject(TRI->getSpillSize(RC),
TRI->getSpillAlignment(RC),
false);
RS->addScavengingFrameIndex(FI);
}

View File

@ -567,8 +567,8 @@ MipsSEInstrInfo::compareOpndSize(unsigned Opc,
const MCInstrDesc &Desc = get(Opc);
assert(Desc.NumOperands == 2 && "Unary instruction expected.");
const MipsRegisterInfo *RI = &getRegisterInfo();
unsigned DstRegSize = getRegClass(Desc, 0, RI, MF)->getSize();
unsigned SrcRegSize = getRegClass(Desc, 1, RI, MF)->getSize();
unsigned DstRegSize = RI->getRegSizeInBits(*getRegClass(Desc, 0, RI, MF));
unsigned SrcRegSize = RI->getRegSizeInBits(*getRegClass(Desc, 1, RI, MF));
return std::make_pair(DstRegSize > SrcRegSize, DstRegSize < SrcRegSize);
}

View File

@ -38,7 +38,7 @@ void NVPTXInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
const TargetRegisterClass *DestRC = MRI.getRegClass(DestReg);
const TargetRegisterClass *SrcRC = MRI.getRegClass(SrcReg);
if (DestRC->getSize() != SrcRC->getSize())
if (RegInfo.getRegSizeInBits(*DestRC) != RegInfo.getRegSizeInBits(*SrcRC))
report_fatal_error("Copy one register into another with a different width");
unsigned Op;

View File

@ -1898,12 +1898,13 @@ PPCFrameLowering::addScavengingSpillSlot(MachineFunction &MF,
MachineFrameInfo &MFI = MF.getFrameInfo();
if (MFI.hasVarSizedObjects() || spillsCR(MF) || spillsVRSAVE(MF) ||
hasNonRISpills(MF) || (hasSpills(MF) && !isInt<16>(StackSize))) {
const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
const TargetRegisterClass *G8RC = &PPC::G8RCRegClass;
const TargetRegisterClass *RC = Subtarget.isPPC64() ? G8RC : GPRC;
RS->addScavengingFrameIndex(MFI.CreateStackObject(RC->getSize(),
RC->getAlignment(),
false));
const TargetRegisterClass &GPRC = PPC::GPRCRegClass;
const TargetRegisterClass &G8RC = PPC::G8RCRegClass;
const TargetRegisterClass &RC = Subtarget.isPPC64() ? G8RC : GPRC;
const TargetRegisterInfo &TRI = *Subtarget.getRegisterInfo();
unsigned Size = TRI.getSpillSize(RC);
unsigned Align = TRI.getSpillAlignment(RC);
RS->addScavengingFrameIndex(MFI.CreateStackObject(Size, Align, false));
// Might we have over-aligned allocas?
bool HasAlVars = MFI.hasVarSizedObjects() &&
@ -1911,9 +1912,7 @@ PPCFrameLowering::addScavengingSpillSlot(MachineFunction &MF,
// These kinds of spills might need two registers.
if (spillsCR(MF) || spillsVRSAVE(MF) || HasAlVars)
RS->addScavengingFrameIndex(MFI.CreateStackObject(RC->getSize(),
RC->getAlignment(),
false));
RS->addScavengingFrameIndex(MFI.CreateStackObject(Size, Align, false));
}
}

View File

@ -1115,10 +1115,9 @@ MachineInstr *SystemZInstrInfo::foldMemoryOperandImpl(
return nullptr;
unsigned OpNum = Ops[0];
assert(Size ==
MF.getRegInfo()
.getRegClass(MI.getOperand(OpNum).getReg())
->getSize() &&
assert(Size * 8 ==
TRI->getRegSizeInBits(*MF.getRegInfo()
.getRegClass(MI.getOperand(OpNum).getReg())) &&
"Invalid size combination");
if ((Opcode == SystemZ::AHI || Opcode == SystemZ::AGHI) && OpNum == 0 &&

View File

@ -2149,7 +2149,8 @@ bool X86FastISel::X86FastEmitCMoveSelect(MVT RetVT, const Instruction *I) {
if (!LHSReg || !RHSReg)
return false;
unsigned Opc = X86::getCMovFromCond(CC, RC->getSize());
const TargetRegisterInfo &TRI = *Subtarget->getRegisterInfo();
unsigned Opc = X86::getCMovFromCond(CC, TRI.getRegSizeInBits(*RC)/8);
unsigned ResultReg = fastEmitInst_rr(Opc, RC, RHSReg, RHSIsKill,
LHSReg, LHSIsKill);
updateValueMap(I, ResultReg);

View File

@ -1924,14 +1924,15 @@ bool X86FrameLowering::assignCalleeSavedSpillSlots(
continue;
const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
unsigned Size = TRI->getSpillSize(*RC);
unsigned Align = TRI->getSpillAlignment(*RC);
// ensure alignment
SpillSlotOffset -= std::abs(SpillSlotOffset) % RC->getAlignment();
SpillSlotOffset -= std::abs(SpillSlotOffset) % Align;
// spill into slot
SpillSlotOffset -= RC->getSize();
int SlotIndex =
MFI.CreateFixedSpillStackObject(RC->getSize(), SpillSlotOffset);
SpillSlotOffset -= Size;
int SlotIndex = MFI.CreateFixedSpillStackObject(Size, SpillSlotOffset);
CSI[i - 1].setFrameIdx(SlotIndex);
MFI.ensureMaxAlignment(RC->getAlignment());
MFI.ensureMaxAlignment(Align);
}
return true;

View File

@ -6284,9 +6284,11 @@ void X86InstrInfo::insertSelect(MachineBasicBlock &MBB,
ArrayRef<MachineOperand> Cond, unsigned TrueReg,
unsigned FalseReg) const {
MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
const TargetRegisterInfo &TRI = *MRI.getTargetRegisterInfo();
const TargetRegisterClass &RC = *MRI.getRegClass(DstReg);
assert(Cond.size() == 1 && "Invalid Cond array");
unsigned Opc = getCMovFromCond((X86::CondCode)Cond[0].getImm(),
MRI.getRegClass(DstReg)->getSize(),
TRI.getRegSizeInBits(RC) / 8,
false /*HasMemoryOperand*/);
BuildMI(MBB, I, DL, get(Opc), DstReg).addReg(FalseReg).addReg(TrueReg);
}
@ -6557,7 +6559,7 @@ static unsigned getLoadStoreRegOpcode(unsigned Reg,
bool HasAVX512 = STI.hasAVX512();
bool HasVLX = STI.hasVLX();
switch (RC->getSize()) {
switch (STI.getRegisterInfo()->getSpillSize(*RC)) {
default:
llvm_unreachable("Unknown spill size");
case 1:
@ -6717,9 +6719,9 @@ void X86InstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
const TargetRegisterClass *RC,
const TargetRegisterInfo *TRI) const {
const MachineFunction &MF = *MBB.getParent();
assert(MF.getFrameInfo().getObjectSize(FrameIdx) >= RC->getSize() &&
assert(MF.getFrameInfo().getObjectSize(FrameIdx) >= TRI->getSpillSize(*RC) &&
"Stack slot too small for store");
unsigned Alignment = std::max<uint32_t>(RC->getSize(), 16);
unsigned Alignment = std::max<uint32_t>(TRI->getSpillSize(*RC), 16);
bool isAligned =
(Subtarget.getFrameLowering()->getStackAlignment() >= Alignment) ||
RI.canRealignStack(MF);
@ -6736,7 +6738,8 @@ void X86InstrInfo::storeRegToAddr(MachineFunction &MF, unsigned SrcReg,
MachineInstr::mmo_iterator MMOBegin,
MachineInstr::mmo_iterator MMOEnd,
SmallVectorImpl<MachineInstr*> &NewMIs) const {
unsigned Alignment = std::max<uint32_t>(RC->getSize(), 16);
const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
unsigned Alignment = std::max<uint32_t>(TRI.getSpillSize(*RC), 16);
bool isAligned = MMOBegin != MMOEnd &&
(*MMOBegin)->getAlignment() >= Alignment;
unsigned Opc = getStoreRegOpcode(SrcReg, RC, isAligned, Subtarget);
@ -6756,7 +6759,7 @@ void X86InstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
const TargetRegisterClass *RC,
const TargetRegisterInfo *TRI) const {
const MachineFunction &MF = *MBB.getParent();
unsigned Alignment = std::max<uint32_t>(RC->getSize(), 16);
unsigned Alignment = std::max<uint32_t>(TRI->getSpillSize(*RC), 16);
bool isAligned =
(Subtarget.getFrameLowering()->getStackAlignment() >= Alignment) ||
RI.canRealignStack(MF);
@ -6771,7 +6774,8 @@ void X86InstrInfo::loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
MachineInstr::mmo_iterator MMOBegin,
MachineInstr::mmo_iterator MMOEnd,
SmallVectorImpl<MachineInstr*> &NewMIs) const {
unsigned Alignment = std::max<uint32_t>(RC->getSize(), 16);
const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
unsigned Alignment = std::max<uint32_t>(TRI.getSpillSize(*RC), 16);
bool isAligned = MMOBegin != MMOEnd &&
(*MMOBegin)->getAlignment() >= Alignment;
unsigned Opc = getLoadRegOpcode(DestReg, RC, isAligned, Subtarget);
@ -7230,7 +7234,8 @@ bool X86InstrInfo::optimizeCompareInstr(MachineInstr &CmpInstr, unsigned SrcReg,
NewOpc = getSETFromCond(NewCC, HasMemoryOperand);
else {
unsigned DstReg = Instr.getOperand(0).getReg();
NewOpc = getCMovFromCond(NewCC, MRI->getRegClass(DstReg)->getSize(),
const TargetRegisterClass *DstRC = MRI->getRegClass(DstReg);
NewOpc = getCMovFromCond(NewCC, TRI->getRegSizeInBits(*DstRC)/8,
HasMemoryOperand);
}
@ -7758,7 +7763,9 @@ MachineInstr *X86InstrInfo::foldMemoryOperandCustom(
unsigned DstIdx = (Imm >> 4) & 3;
unsigned SrcIdx = (Imm >> 6) & 3;
unsigned RCSize = getRegClass(MI.getDesc(), OpNum, &RI, MF)->getSize();
const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
const TargetRegisterClass *RC = getRegClass(MI.getDesc(), OpNum, &RI, MF);
unsigned RCSize = TRI.getRegSizeInBits(*RC) / 8;
if (Size <= RCSize && 4 <= Align) {
int PtrOffset = SrcIdx * 4;
unsigned NewImm = (DstIdx << 4) | ZMask;
@ -7780,7 +7787,9 @@ MachineInstr *X86InstrInfo::foldMemoryOperandCustom(
// To fold the load, adjust the pointer to the upper and use (V)MOVLPS.
// TODO: In most cases AVX doesn't have a 8-byte alignment requirement.
if (OpNum == 2) {
unsigned RCSize = getRegClass(MI.getDesc(), OpNum, &RI, MF)->getSize();
const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
const TargetRegisterClass *RC = getRegClass(MI.getDesc(), OpNum, &RI, MF);
unsigned RCSize = TRI.getRegSizeInBits(*RC) / 8;
if (Size <= RCSize && 8 <= Align) {
unsigned NewOpCode =
(MI.getOpcode() == X86::VMOVHLPSZrr) ? X86::VMOVLPSZ128rm :
@ -7869,7 +7878,10 @@ MachineInstr *X86InstrInfo::foldMemoryOperandImpl(
return nullptr;
bool NarrowToMOV32rm = false;
if (Size) {
unsigned RCSize = getRegClass(MI.getDesc(), OpNum, &RI, MF)->getSize();
const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
const TargetRegisterClass *RC = getRegClass(MI.getDesc(), OpNum,
&RI, MF);
unsigned RCSize = TRI.getRegSizeInBits(*RC) / 8;
if (Size < RCSize) {
// Check if it's safe to fold the load. If the size of the object is
// narrower than the load width, then it's not.
@ -8310,11 +8322,13 @@ static bool isNonFoldablePartialRegisterLoad(const MachineInstr &LoadMI,
const MachineFunction &MF) {
unsigned Opc = LoadMI.getOpcode();
unsigned UserOpc = UserMI.getOpcode();
unsigned RegSize =
MF.getRegInfo().getRegClass(LoadMI.getOperand(0).getReg())->getSize();
const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
const TargetRegisterClass *RC =
MF.getRegInfo().getRegClass(LoadMI.getOperand(0).getReg());
unsigned RegSize = TRI.getRegSizeInBits(*RC);
if ((Opc == X86::MOVSSrm || Opc == X86::VMOVSSrm || Opc == X86::VMOVSSZrm) &&
RegSize > 4) {
RegSize > 32) {
// These instructions only load 32 bits, we can't fold them if the
// destination register is wider than 32 bits (4 bytes), and its user
// instruction isn't scalar (SS).
@ -8365,7 +8379,7 @@ static bool isNonFoldablePartialRegisterLoad(const MachineInstr &LoadMI,
}
if ((Opc == X86::MOVSDrm || Opc == X86::VMOVSDrm || Opc == X86::VMOVSDZrm) &&
RegSize > 8) {
RegSize > 64) {
// These instructions only load 64 bits, we can't fold them if the
// destination register is wider than 64 bits (8 bytes), and its user
// instruction isn't scalar (SD).
@ -8710,6 +8724,7 @@ X86InstrInfo::unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N,
bool FoldedStore = I->second.second & TB_FOLDED_STORE;
const MCInstrDesc &MCID = get(Opc);
MachineFunction &MF = DAG.getMachineFunction();
const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
const TargetRegisterClass *RC = getRegClass(MCID, Index, &RI, MF);
unsigned NumDefs = MCID.NumDefs;
std::vector<SDValue> AddrOps;
@ -8744,7 +8759,7 @@ X86InstrInfo::unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N,
return false;
// FIXME: If a VR128 can have size 32, we should be checking if a 32-byte
// memory access is slow above.
unsigned Alignment = std::max<uint32_t>(RC->getSize(), 16);
unsigned Alignment = std::max<uint32_t>(TRI.getSpillSize(*RC), 16);
bool isAligned = (*MMOs.first) &&
(*MMOs.first)->getAlignment() >= Alignment;
Load = DAG.getMachineNode(getLoadRegOpcode(0, RC, isAligned, Subtarget), dl,
@ -8789,7 +8804,7 @@ X86InstrInfo::unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N,
return false;
// FIXME: If a VR128 can have size 32, we should be checking if a 32-byte
// memory access is slow above.
unsigned Alignment = std::max<uint32_t>(RC->getSize(), 16);
unsigned Alignment = std::max<uint32_t>(TRI.getSpillSize(*RC), 16);
bool isAligned = (*MMOs.first) &&
(*MMOs.first)->getAlignment() >= Alignment;
SDNode *Store =

View File

@ -137,25 +137,29 @@ X86RegisterInfo::getLargestLegalSuperClass(const TargetRegisterClass *RC,
case X86::FR32RegClassID:
case X86::FR64RegClassID:
// If AVX-512 isn't supported we should only inflate to these classes.
if (!Subtarget.hasAVX512() && Super->getSize() == RC->getSize())
if (!Subtarget.hasAVX512() &&
getRegSizeInBits(*Super) == getRegSizeInBits(*RC))
return Super;
break;
case X86::VR128RegClassID:
case X86::VR256RegClassID:
// If VLX isn't supported we should only inflate to these classes.
if (!Subtarget.hasVLX() && Super->getSize() == RC->getSize())
if (!Subtarget.hasVLX() &&
getRegSizeInBits(*Super) == getRegSizeInBits(*RC))
return Super;
break;
case X86::VR128XRegClassID:
case X86::VR256XRegClassID:
// If VLX isn't support we shouldn't inflate to these classes.
if (Subtarget.hasVLX() && Super->getSize() == RC->getSize())
if (Subtarget.hasVLX() &&
getRegSizeInBits(*Super) == getRegSizeInBits(*RC))
return Super;
break;
case X86::FR32XRegClassID:
case X86::FR64XRegClassID:
// If AVX-512 isn't support we shouldn't inflate to these classes.
if (Subtarget.hasAVX512() && Super->getSize() == RC->getSize())
if (Subtarget.hasAVX512() &&
getRegSizeInBits(*Super) == getRegSizeInBits(*RC))
return Super;
break;
case X86::GR8RegClassID:
@ -168,7 +172,7 @@ X86RegisterInfo::getLargestLegalSuperClass(const TargetRegisterClass *RC,
case X86::VR512RegClassID:
// Don't return a super-class that would shrink the spill size.
// That can happen with the vector and float classes.
if (Super->getSize() == RC->getSize())
if (getRegSizeInBits(*Super) == getRegSizeInBits(*RC))
return Super;
}
Super = *I++;

View File

@ -575,18 +575,17 @@ processFunctionBeforeFrameFinalized(MachineFunction &MF,
RegScavenger *RS) const {
assert(RS && "requiresRegisterScavenging failed");
MachineFrameInfo &MFI = MF.getFrameInfo();
const TargetRegisterClass *RC = &XCore::GRRegsRegClass;
const TargetRegisterClass &RC = XCore::GRRegsRegClass;
const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>();
// Reserve slots close to SP or frame pointer for Scavenging spills.
// When using SP for small frames, we don't need any scratch registers.
// When using SP for large frames, we may need 2 scratch registers.
// When using FP, for large or small frames, we may need 1 scratch register.
unsigned Size = TRI.getSpillSize(RC);
unsigned Align = TRI.getSpillAlignment(RC);
if (XFI->isLargeFrame(MF) || hasFP(MF))
RS->addScavengingFrameIndex(MFI.CreateStackObject(RC->getSize(),
RC->getAlignment(),
false));
RS->addScavengingFrameIndex(MFI.CreateStackObject(Size, Align, false));
if (XFI->isLargeFrame(MF) && !hasFP(MF))
RS->addScavengingFrameIndex(MFI.CreateStackObject(RC->getSize(),
RC->getAlignment(),
false));
RS->addScavengingFrameIndex(MFI.CreateStackObject(Size, Align, false));
}

View File

@ -10,6 +10,7 @@
#include "XCoreMachineFunctionInfo.h"
#include "XCoreInstrInfo.h"
#include "llvm/IR/Function.h"
#include "llvm/Target/TargetSubtargetInfo.h"
using namespace llvm;
@ -35,13 +36,15 @@ int XCoreFunctionInfo::createLRSpillSlot(MachineFunction &MF) {
if (LRSpillSlotSet) {
return LRSpillSlot;
}
const TargetRegisterClass *RC = &XCore::GRRegsRegClass;
const TargetRegisterClass &RC = XCore::GRRegsRegClass;
const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
MachineFrameInfo &MFI = MF.getFrameInfo();
if (! MF.getFunction()->isVarArg()) {
// A fixed offset of 0 allows us to save / restore LR using entsp / retsp.
LRSpillSlot = MFI.CreateFixedObject(RC->getSize(), 0, true);
LRSpillSlot = MFI.CreateFixedObject(TRI.getSpillSize(RC), 0, true);
} else {
LRSpillSlot = MFI.CreateStackObject(RC->getSize(), RC->getAlignment(), true);
LRSpillSlot = MFI.CreateStackObject(TRI.getSpillSize(RC),
TRI.getSpillAlignment(RC), true);
}
LRSpillSlotSet = true;
return LRSpillSlot;
@ -51,9 +54,11 @@ int XCoreFunctionInfo::createFPSpillSlot(MachineFunction &MF) {
if (FPSpillSlotSet) {
return FPSpillSlot;
}
const TargetRegisterClass *RC = &XCore::GRRegsRegClass;
const TargetRegisterClass &RC = XCore::GRRegsRegClass;
const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
MachineFrameInfo &MFI = MF.getFrameInfo();
FPSpillSlot = MFI.CreateStackObject(RC->getSize(), RC->getAlignment(), true);
FPSpillSlot = MFI.CreateStackObject(TRI.getSpillSize(RC),
TRI.getSpillAlignment(RC), true);
FPSpillSlotSet = true;
return FPSpillSlot;
}
@ -62,10 +67,13 @@ const int* XCoreFunctionInfo::createEHSpillSlot(MachineFunction &MF) {
if (EHSpillSlotSet) {
return EHSpillSlot;
}
const TargetRegisterClass *RC = &XCore::GRRegsRegClass;
const TargetRegisterClass &RC = XCore::GRRegsRegClass;
const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
MachineFrameInfo &MFI = MF.getFrameInfo();
EHSpillSlot[0] = MFI.CreateStackObject(RC->getSize(), RC->getAlignment(), true);
EHSpillSlot[1] = MFI.CreateStackObject(RC->getSize(), RC->getAlignment(), true);
unsigned Size = TRI.getSpillSize(RC);
unsigned Align = TRI.getSpillAlignment(RC);
EHSpillSlot[0] = MFI.CreateStackObject(Size, Align, true);
EHSpillSlot[1] = MFI.CreateStackObject(Size, Align, true);
EHSpillSlotSet = true;
return EHSpillSlot;
}