Files
archived-llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
Lewis Revill f5bddce9ea [RISCV] Add pseudo instruction for calls with explicit register
This patch adds the PseudoCALLReg instruction which allows using an
explicit register operand as the destination for the return address.

GCC can successfully parse this form of the call instruction, which
would be used for calls to functions which do not use ra as the return
address register, such as the __riscv_save libcalls. This patch forms
the first part of an implementation of -msave-restore for RISC-V.

Differential Revision: https://reviews.llvm.org/D62685




git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@364403 91177308-0d34-0410-b5e6-96231b3b80d8
2019-06-26 10:35:58 +00:00

469 lines
15 KiB
C++

//===-- RISCVInstrInfo.cpp - RISCV Instruction Information ------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains the RISCV implementation of the TargetInstrInfo class.
//
//===----------------------------------------------------------------------===//
#include "RISCVInstrInfo.h"
#include "RISCV.h"
#include "RISCVSubtarget.h"
#include "RISCVTargetMachine.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/RegisterScavenging.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/TargetRegistry.h"
#define GET_INSTRINFO_CTOR_DTOR
#include "RISCVGenInstrInfo.inc"
using namespace llvm;
RISCVInstrInfo::RISCVInstrInfo()
: RISCVGenInstrInfo(RISCV::ADJCALLSTACKDOWN, RISCV::ADJCALLSTACKUP) {}
unsigned RISCVInstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
int &FrameIndex) const {
switch (MI.getOpcode()) {
default:
return 0;
case RISCV::LB:
case RISCV::LBU:
case RISCV::LH:
case RISCV::LHU:
case RISCV::LW:
case RISCV::FLW:
case RISCV::LWU:
case RISCV::LD:
case RISCV::FLD:
break;
}
if (MI.getOperand(1).isFI() && MI.getOperand(2).isImm() &&
MI.getOperand(2).getImm() == 0) {
FrameIndex = MI.getOperand(1).getIndex();
return MI.getOperand(0).getReg();
}
return 0;
}
unsigned RISCVInstrInfo::isStoreToStackSlot(const MachineInstr &MI,
int &FrameIndex) const {
switch (MI.getOpcode()) {
default:
return 0;
case RISCV::SB:
case RISCV::SH:
case RISCV::SW:
case RISCV::FSW:
case RISCV::SD:
case RISCV::FSD:
break;
}
if (MI.getOperand(0).isFI() && MI.getOperand(1).isImm() &&
MI.getOperand(1).getImm() == 0) {
FrameIndex = MI.getOperand(0).getIndex();
return MI.getOperand(2).getReg();
}
return 0;
}
void RISCVInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
const DebugLoc &DL, unsigned DstReg,
unsigned SrcReg, bool KillSrc) const {
if (RISCV::GPRRegClass.contains(DstReg, SrcReg)) {
BuildMI(MBB, MBBI, DL, get(RISCV::ADDI), DstReg)
.addReg(SrcReg, getKillRegState(KillSrc))
.addImm(0);
return;
}
// FPR->FPR copies
unsigned Opc;
if (RISCV::FPR32RegClass.contains(DstReg, SrcReg))
Opc = RISCV::FSGNJ_S;
else if (RISCV::FPR64RegClass.contains(DstReg, SrcReg))
Opc = RISCV::FSGNJ_D;
else
llvm_unreachable("Impossible reg-to-reg copy");
BuildMI(MBB, MBBI, DL, get(Opc), DstReg)
.addReg(SrcReg, getKillRegState(KillSrc))
.addReg(SrcReg, getKillRegState(KillSrc));
}
void RISCVInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I,
unsigned SrcReg, bool IsKill, int FI,
const TargetRegisterClass *RC,
const TargetRegisterInfo *TRI) const {
DebugLoc DL;
if (I != MBB.end())
DL = I->getDebugLoc();
unsigned Opcode;
if (RISCV::GPRRegClass.hasSubClassEq(RC))
Opcode = TRI->getRegSizeInBits(RISCV::GPRRegClass) == 32 ?
RISCV::SW : RISCV::SD;
else if (RISCV::FPR32RegClass.hasSubClassEq(RC))
Opcode = RISCV::FSW;
else if (RISCV::FPR64RegClass.hasSubClassEq(RC))
Opcode = RISCV::FSD;
else
llvm_unreachable("Can't store this register to stack slot");
BuildMI(MBB, I, DL, get(Opcode))
.addReg(SrcReg, getKillRegState(IsKill))
.addFrameIndex(FI)
.addImm(0);
}
void RISCVInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I,
unsigned DstReg, int FI,
const TargetRegisterClass *RC,
const TargetRegisterInfo *TRI) const {
DebugLoc DL;
if (I != MBB.end())
DL = I->getDebugLoc();
unsigned Opcode;
if (RISCV::GPRRegClass.hasSubClassEq(RC))
Opcode = TRI->getRegSizeInBits(RISCV::GPRRegClass) == 32 ?
RISCV::LW : RISCV::LD;
else if (RISCV::FPR32RegClass.hasSubClassEq(RC))
Opcode = RISCV::FLW;
else if (RISCV::FPR64RegClass.hasSubClassEq(RC))
Opcode = RISCV::FLD;
else
llvm_unreachable("Can't load this register from stack slot");
BuildMI(MBB, I, DL, get(Opcode), DstReg).addFrameIndex(FI).addImm(0);
}
void RISCVInstrInfo::movImm32(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
const DebugLoc &DL, unsigned DstReg, uint64_t Val,
MachineInstr::MIFlag Flag) const {
assert(isInt<32>(Val) && "Can only materialize 32-bit constants");
// TODO: If the value can be materialized using only one instruction, only
// insert a single instruction.
uint64_t Hi20 = ((Val + 0x800) >> 12) & 0xfffff;
uint64_t Lo12 = SignExtend64<12>(Val);
BuildMI(MBB, MBBI, DL, get(RISCV::LUI), DstReg)
.addImm(Hi20)
.setMIFlag(Flag);
BuildMI(MBB, MBBI, DL, get(RISCV::ADDI), DstReg)
.addReg(DstReg, RegState::Kill)
.addImm(Lo12)
.setMIFlag(Flag);
}
// The contents of values added to Cond are not examined outside of
// RISCVInstrInfo, giving us flexibility in what to push to it. For RISCV, we
// push BranchOpcode, Reg1, Reg2.
static void parseCondBranch(MachineInstr &LastInst, MachineBasicBlock *&Target,
SmallVectorImpl<MachineOperand> &Cond) {
// Block ends with fall-through condbranch.
assert(LastInst.getDesc().isConditionalBranch() &&
"Unknown conditional branch");
Target = LastInst.getOperand(2).getMBB();
Cond.push_back(MachineOperand::CreateImm(LastInst.getOpcode()));
Cond.push_back(LastInst.getOperand(0));
Cond.push_back(LastInst.getOperand(1));
}
static unsigned getOppositeBranchOpcode(int Opc) {
switch (Opc) {
default:
llvm_unreachable("Unrecognized conditional branch");
case RISCV::BEQ:
return RISCV::BNE;
case RISCV::BNE:
return RISCV::BEQ;
case RISCV::BLT:
return RISCV::BGE;
case RISCV::BGE:
return RISCV::BLT;
case RISCV::BLTU:
return RISCV::BGEU;
case RISCV::BGEU:
return RISCV::BLTU;
}
}
bool RISCVInstrInfo::analyzeBranch(MachineBasicBlock &MBB,
MachineBasicBlock *&TBB,
MachineBasicBlock *&FBB,
SmallVectorImpl<MachineOperand> &Cond,
bool AllowModify) const {
TBB = FBB = nullptr;
Cond.clear();
// If the block has no terminators, it just falls into the block after it.
MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr();
if (I == MBB.end() || !isUnpredicatedTerminator(*I))
return false;
// Count the number of terminators and find the first unconditional or
// indirect branch.
MachineBasicBlock::iterator FirstUncondOrIndirectBr = MBB.end();
int NumTerminators = 0;
for (auto J = I.getReverse(); J != MBB.rend() && isUnpredicatedTerminator(*J);
J++) {
NumTerminators++;
if (J->getDesc().isUnconditionalBranch() ||
J->getDesc().isIndirectBranch()) {
FirstUncondOrIndirectBr = J.getReverse();
}
}
// If AllowModify is true, we can erase any terminators after
// FirstUncondOrIndirectBR.
if (AllowModify && FirstUncondOrIndirectBr != MBB.end()) {
while (std::next(FirstUncondOrIndirectBr) != MBB.end()) {
std::next(FirstUncondOrIndirectBr)->eraseFromParent();
NumTerminators--;
}
I = FirstUncondOrIndirectBr;
}
// We can't handle blocks that end in an indirect branch.
if (I->getDesc().isIndirectBranch())
return true;
// We can't handle blocks with more than 2 terminators.
if (NumTerminators > 2)
return true;
// Handle a single unconditional branch.
if (NumTerminators == 1 && I->getDesc().isUnconditionalBranch()) {
TBB = I->getOperand(0).getMBB();
return false;
}
// Handle a single conditional branch.
if (NumTerminators == 1 && I->getDesc().isConditionalBranch()) {
parseCondBranch(*I, TBB, Cond);
return false;
}
// Handle a conditional branch followed by an unconditional branch.
if (NumTerminators == 2 && std::prev(I)->getDesc().isConditionalBranch() &&
I->getDesc().isUnconditionalBranch()) {
parseCondBranch(*std::prev(I), TBB, Cond);
FBB = I->getOperand(0).getMBB();
return false;
}
// Otherwise, we can't handle this.
return true;
}
unsigned RISCVInstrInfo::removeBranch(MachineBasicBlock &MBB,
int *BytesRemoved) const {
if (BytesRemoved)
*BytesRemoved = 0;
MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr();
if (I == MBB.end())
return 0;
if (!I->getDesc().isUnconditionalBranch() &&
!I->getDesc().isConditionalBranch())
return 0;
// Remove the branch.
I->eraseFromParent();
if (BytesRemoved)
*BytesRemoved += getInstSizeInBytes(*I);
I = MBB.end();
if (I == MBB.begin())
return 1;
--I;
if (!I->getDesc().isConditionalBranch())
return 1;
// Remove the branch.
I->eraseFromParent();
if (BytesRemoved)
*BytesRemoved += getInstSizeInBytes(*I);
return 2;
}
// Inserts a branch into the end of the specific MachineBasicBlock, returning
// the number of instructions inserted.
unsigned RISCVInstrInfo::insertBranch(
MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB,
ArrayRef<MachineOperand> Cond, const DebugLoc &DL, int *BytesAdded) const {
if (BytesAdded)
*BytesAdded = 0;
// Shouldn't be a fall through.
assert(TBB && "InsertBranch must not be told to insert a fallthrough");
assert((Cond.size() == 3 || Cond.size() == 0) &&
"RISCV branch conditions have two components!");
// Unconditional branch.
if (Cond.empty()) {
MachineInstr &MI = *BuildMI(&MBB, DL, get(RISCV::PseudoBR)).addMBB(TBB);
if (BytesAdded)
*BytesAdded += getInstSizeInBytes(MI);
return 1;
}
// Either a one or two-way conditional branch.
unsigned Opc = Cond[0].getImm();
MachineInstr &CondMI =
*BuildMI(&MBB, DL, get(Opc)).add(Cond[1]).add(Cond[2]).addMBB(TBB);
if (BytesAdded)
*BytesAdded += getInstSizeInBytes(CondMI);
// One-way conditional branch.
if (!FBB)
return 1;
// Two-way conditional branch.
MachineInstr &MI = *BuildMI(&MBB, DL, get(RISCV::PseudoBR)).addMBB(FBB);
if (BytesAdded)
*BytesAdded += getInstSizeInBytes(MI);
return 2;
}
unsigned RISCVInstrInfo::insertIndirectBranch(MachineBasicBlock &MBB,
MachineBasicBlock &DestBB,
const DebugLoc &DL,
int64_t BrOffset,
RegScavenger *RS) const {
assert(RS && "RegScavenger required for long branching");
assert(MBB.empty() &&
"new block should be inserted for expanding unconditional branch");
assert(MBB.pred_size() == 1);
MachineFunction *MF = MBB.getParent();
MachineRegisterInfo &MRI = MF->getRegInfo();
const auto &TM = static_cast<const RISCVTargetMachine &>(MF->getTarget());
if (TM.isPositionIndependent())
report_fatal_error("Unable to insert indirect branch");
if (!isInt<32>(BrOffset))
report_fatal_error(
"Branch offsets outside of the signed 32-bit range not supported");
// FIXME: A virtual register must be used initially, as the register
// scavenger won't work with empty blocks (SIInstrInfo::insertIndirectBranch
// uses the same workaround).
unsigned ScratchReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
auto II = MBB.end();
MachineInstr &LuiMI = *BuildMI(MBB, II, DL, get(RISCV::LUI), ScratchReg)
.addMBB(&DestBB, RISCVII::MO_HI);
BuildMI(MBB, II, DL, get(RISCV::PseudoBRIND))
.addReg(ScratchReg, RegState::Kill)
.addMBB(&DestBB, RISCVII::MO_LO);
RS->enterBasicBlockEnd(MBB);
unsigned Scav = RS->scavengeRegisterBackwards(RISCV::GPRRegClass,
LuiMI.getIterator(), false, 0);
MRI.replaceRegWith(ScratchReg, Scav);
MRI.clearVirtRegs();
RS->setRegUsed(Scav);
return 8;
}
bool RISCVInstrInfo::reverseBranchCondition(
SmallVectorImpl<MachineOperand> &Cond) const {
assert((Cond.size() == 3) && "Invalid branch condition!");
Cond[0].setImm(getOppositeBranchOpcode(Cond[0].getImm()));
return false;
}
MachineBasicBlock *
RISCVInstrInfo::getBranchDestBlock(const MachineInstr &MI) const {
assert(MI.getDesc().isBranch() && "Unexpected opcode!");
// The branch target is always the last operand.
int NumOp = MI.getNumExplicitOperands();
return MI.getOperand(NumOp - 1).getMBB();
}
bool RISCVInstrInfo::isBranchOffsetInRange(unsigned BranchOp,
int64_t BrOffset) const {
// Ideally we could determine the supported branch offset from the
// RISCVII::FormMask, but this can't be used for Pseudo instructions like
// PseudoBR.
switch (BranchOp) {
default:
llvm_unreachable("Unexpected opcode!");
case RISCV::BEQ:
case RISCV::BNE:
case RISCV::BLT:
case RISCV::BGE:
case RISCV::BLTU:
case RISCV::BGEU:
return isIntN(13, BrOffset);
case RISCV::JAL:
case RISCV::PseudoBR:
return isIntN(21, BrOffset);
}
}
unsigned RISCVInstrInfo::getInstSizeInBytes(const MachineInstr &MI) const {
unsigned Opcode = MI.getOpcode();
switch (Opcode) {
default: { return get(Opcode).getSize(); }
case TargetOpcode::EH_LABEL:
case TargetOpcode::IMPLICIT_DEF:
case TargetOpcode::KILL:
case TargetOpcode::DBG_VALUE:
return 0;
case RISCV::PseudoCALLReg:
case RISCV::PseudoCALL:
case RISCV::PseudoTAIL:
case RISCV::PseudoLLA:
case RISCV::PseudoLA:
case RISCV::PseudoLA_TLS_IE:
case RISCV::PseudoLA_TLS_GD:
return 8;
case TargetOpcode::INLINEASM:
case TargetOpcode::INLINEASM_BR: {
const MachineFunction &MF = *MI.getParent()->getParent();
const auto &TM = static_cast<const RISCVTargetMachine &>(MF.getTarget());
return getInlineAsmLength(MI.getOperand(0).getSymbolName(),
*TM.getMCAsmInfo());
}
}
}
bool RISCVInstrInfo::isAsCheapAsAMove(const MachineInstr &MI) const {
const unsigned Opcode = MI.getOpcode();
switch(Opcode) {
default:
break;
case RISCV::ADDI:
case RISCV::ORI:
case RISCV::XORI:
return (MI.getOperand(1).isReg() && MI.getOperand(1).getReg() == RISCV::X0);
}
return MI.isAsCheapAsAMove();
}