Revert r155365, r155366, and r155367. All three of these have regression

test suite failures. The failures occur at each stage, and only get
worse, so I'm reverting all of them.

Please resubmit these patches, one at a time, after verifying that the
regression test suite passes. Never submit a patch without running the
regression test suite.

llvm-svn: 155372
This commit is contained in:
Chandler Carruth 2012-04-23 18:25:57 +00:00
parent 9f4844f7da
commit 9460759e4f
57 changed files with 2485 additions and 13892 deletions

File diff suppressed because it is too large Load Diff

View File

@ -28,8 +28,6 @@ add_llvm_target(HexagonCodeGen
HexagonSubtarget.cpp
HexagonTargetMachine.cpp
HexagonTargetObjectFile.cpp
HexagonVLIWPacketizer.cpp
HexagonNewValueJump.cpp
)
add_subdirectory(TargetInfo)

View File

@ -40,8 +40,6 @@ namespace llvm {
FunctionPass *createHexagonHardwareLoops();
FunctionPass *createHexagonPeephole();
FunctionPass *createHexagonFixupHwLoops();
FunctionPass *createHexagonNewValueJump();
FunctionPass *createHexagonPacketizer();
/* TODO: object output.
MCCodeEmitter *createHexagonMCCodeEmitter(const Target &,

View File

@ -28,8 +28,6 @@ def ArchV3 : SubtargetFeature<"v3", "HexagonArchVersion", "V3",
"Hexagon v3">;
def ArchV4 : SubtargetFeature<"v4", "HexagonArchVersion", "V4",
"Hexagon v4">;
def ArchV5 : SubtargetFeature<"v5", "HexagonArchVersion", "V5",
"Hexagon v5">;
//===----------------------------------------------------------------------===//
// Register File, Calling Conv, Instruction Descriptions
@ -54,8 +52,6 @@ class Proc<string Name, ProcessorItineraries Itin,
def : Proc<"hexagonv2", HexagonItineraries, [ArchV2]>;
def : Proc<"hexagonv3", HexagonItineraries, [ArchV2, ArchV3]>;
def : Proc<"hexagonv4", HexagonItinerariesV4, [ArchV2, ArchV3, ArchV4]>;
def : Proc<"hexagonv5", HexagonItinerariesV4, [ArchV2, ArchV3, ArchV4, ArchV5]>;
// Hexagon Uses the MC printer for assembler output, so make sure the TableGen
// AsmWriter bits get associated with the correct class.

View File

@ -13,11 +13,11 @@
//
//===----------------------------------------------------------------------===//
#define DEBUG_TYPE "asm-printer"
#include "Hexagon.h"
#include "HexagonAsmPrinter.h"
#include "HexagonMachineFunctionInfo.h"
#include "HexagonMCInst.h"
#include "HexagonTargetMachine.h"
#include "HexagonSubtarget.h"
#include "InstPrinter/HexagonInstPrinter.h"
@ -54,7 +54,6 @@
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringExtras.h"
#include <map>
using namespace llvm;
@ -78,7 +77,8 @@ void HexagonAsmPrinter::printOperand(const MachineInstr *MI, unsigned OpNo,
const MachineOperand &MO = MI->getOperand(OpNo);
switch (MO.getType()) {
default: llvm_unreachable ("<unknown operand type>");
default:
assert(0 && "<unknown operand type>");
case MachineOperand::MO_Register:
O << HexagonInstPrinter::getRegisterName(MO.getReg());
return;
@ -196,45 +196,10 @@ void HexagonAsmPrinter::printPredicateOperand(const MachineInstr *MI,
/// the current output stream.
///
void HexagonAsmPrinter::EmitInstruction(const MachineInstr *MI) {
if (MI->isBundle()) {
std::vector<const MachineInstr*> BundleMIs;
MCInst MCI;
const MachineBasicBlock *MBB = MI->getParent();
MachineBasicBlock::const_instr_iterator MII = MI;
++MII;
unsigned int IgnoreCount = 0;
while (MII != MBB->end() && MII->isInsideBundle()) {
const MachineInstr *MInst = MII;
if (MInst->getOpcode() == TargetOpcode::DBG_VALUE ||
MInst->getOpcode() == TargetOpcode::IMPLICIT_DEF) {
IgnoreCount++;
++MII;
continue;
}
//BundleMIs.push_back(&*MII);
BundleMIs.push_back(MInst);
++MII;
}
unsigned Size = BundleMIs.size();
assert((Size+IgnoreCount) == MI->getBundleSize() && "Corrupt Bundle!");
for (unsigned Index = 0; Index < Size; Index++) {
HexagonMCInst MCI;
MCI.setStartPacket(Index == 0);
MCI.setEndPacket(Index == (Size-1));
HexagonLowerToMC(BundleMIs[Index], MCI, *this);
OutStreamer.EmitInstruction(MCI);
}
}
else {
HexagonMCInst MCI;
if (MI->getOpcode() == Hexagon::ENDLOOP0) {
MCI.setStartPacket(true);
MCI.setEndPacket(true);
}
HexagonLowerToMC(MI, MCI, *this);
OutStreamer.EmitInstruction(MCI);
}
HexagonLowerToMC(MI, MCI, *this);
OutStreamer.EmitInstruction(MCI);
return;
}
@ -277,17 +242,17 @@ void HexagonAsmPrinter::printJumpTable(const MachineInstr *MI, int OpNo,
raw_ostream &O) {
const MachineOperand &MO = MI->getOperand(OpNo);
assert( (MO.getType() == MachineOperand::MO_JumpTableIndex) &&
"Expecting jump table index");
"Expecting jump table index");
// Hexagon_TODO: Do we need name mangling?
O << *GetJTISymbol(MO.getIndex());
}
void HexagonAsmPrinter::printConstantPool(const MachineInstr *MI, int OpNo,
raw_ostream &O) {
raw_ostream &O) {
const MachineOperand &MO = MI->getOperand(OpNo);
assert( (MO.getType() == MachineOperand::MO_ConstantPoolIndex) &&
"Expecting constant pool index");
"Expecting constant pool index");
// Hexagon_TODO: Do we need name mangling?
O << *GetCPISymbol(MO.getIndex());

View File

@ -17,8 +17,8 @@
// Hexagon 32-bit C return-value convention.
def RetCC_Hexagon32 : CallingConv<[
CCIfType<[i32, f32], CCAssignToReg<[R0, R1, R2, R3, R4, R5]>>,
CCIfType<[i64, f64], CCAssignToReg<[D0, D1, D2]>>,
CCIfType<[i32], CCAssignToReg<[R0, R1, R2, R3, R4, R5]>>,
CCIfType<[i64], CCAssignToReg<[D0, D1, D2]>>,
// Alternatively, they are assigned to the stack in 4-byte aligned units.
CCAssignToStack<4, 4>
@ -27,8 +27,8 @@ def RetCC_Hexagon32 : CallingConv<[
// Hexagon 32-bit C Calling convention.
def CC_Hexagon32 : CallingConv<[
// All arguments get passed in integer registers if there is space.
CCIfType<[f32, i32, i16, i8], CCAssignToReg<[R0, R1, R2, R3, R4, R5]>>,
CCIfType<[f64, i64], CCAssignToReg<[D0, D1, D2]>>,
CCIfType<[i32, i16, i8], CCAssignToReg<[R0, R1, R2, R3, R4, R5]>>,
CCIfType<[i64], CCAssignToReg<[D0, D1, D2]>>,
// Alternatively, they are assigned to the stack in 4-byte aligned units.
CCAssignToStack<4, 4>

View File

@ -7,9 +7,9 @@
//
//===----------------------------------------------------------------------===//
// The Hexagon processor has no instructions that load or store predicate
// registers directly. So, when these registers must be spilled a general
// purpose register must be found and the value copied to/from it from/to
// the predicate register. This code currently does not use the register
// registers directly. So, when these registers must be spilled a general
// purpose register must be found and the value copied to/from it from/to
// the predicate register. This code currently does not use the register
// scavenger mechanism available in the allocator. There are two registers
// reserved to allow spilling/restoring predicate registers. One is used to
// hold the predicate value. The other is used when stack frame offsets are
@ -84,7 +84,7 @@ bool HexagonExpandPredSpillCode::runOnMachineFunction(MachineFunction &Fn) {
int SrcReg = MI->getOperand(2).getReg();
assert(Hexagon::PredRegsRegClass.contains(SrcReg) &&
"Not a predicate register");
if (!TII->isValidOffset(Hexagon::STriw_indexed, Offset)) {
if (!TII->isValidOffset(Hexagon::STriw, Offset)) {
if (!TII->isValidOffset(Hexagon::ADD_ri, Offset)) {
BuildMI(*MBB, MII, MI->getDebugLoc(),
TII->get(Hexagon::CONST32_Int_Real),
@ -95,7 +95,7 @@ bool HexagonExpandPredSpillCode::runOnMachineFunction(MachineFunction &Fn) {
BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::TFR_RsPd),
HEXAGON_RESERVED_REG_2).addReg(SrcReg);
BuildMI(*MBB, MII, MI->getDebugLoc(),
TII->get(Hexagon::STriw_indexed))
TII->get(Hexagon::STriw))
.addReg(HEXAGON_RESERVED_REG_1)
.addImm(0).addReg(HEXAGON_RESERVED_REG_2);
} else {
@ -103,7 +103,7 @@ bool HexagonExpandPredSpillCode::runOnMachineFunction(MachineFunction &Fn) {
HEXAGON_RESERVED_REG_1).addReg(FP).addImm(Offset);
BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::TFR_RsPd),
HEXAGON_RESERVED_REG_2).addReg(SrcReg);
BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::STriw_indexed))
BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::STriw))
.addReg(HEXAGON_RESERVED_REG_1)
.addImm(0)
.addReg(HEXAGON_RESERVED_REG_2);
@ -111,7 +111,7 @@ bool HexagonExpandPredSpillCode::runOnMachineFunction(MachineFunction &Fn) {
} else {
BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::TFR_RsPd),
HEXAGON_RESERVED_REG_2).addReg(SrcReg);
BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::STriw_indexed)).
BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::STriw)).
addReg(FP).addImm(Offset).addReg(HEXAGON_RESERVED_REG_2);
}
MII = MBB->erase(MI);

View File

@ -90,9 +90,7 @@ public:
SDNode *SelectMul(SDNode *N);
SDNode *SelectZeroExtend(SDNode *N);
SDNode *SelectIntrinsicWOChain(SDNode *N);
SDNode *SelectIntrinsicWChain(SDNode *N);
SDNode *SelectConstant(SDNode *N);
SDNode *SelectConstantFP(SDNode *N);
SDNode *SelectAdd(SDNode *N);
// Include the pieces autogenerated from the target description.
@ -320,8 +318,6 @@ SDNode *HexagonDAGToDAGISel::SelectBaseOffsetLoad(LoadSDNode *LD, DebugLoc dl) {
else if (LoadedVT == MVT::i32) Opcode = Hexagon::LDriw_indexed;
else if (LoadedVT == MVT::i16) Opcode = Hexagon::LDrih_indexed;
else if (LoadedVT == MVT::i8) Opcode = Hexagon::LDrib_indexed;
else if (LoadedVT == MVT::f32) Opcode = Hexagon::LDriw_indexed_f;
else if (LoadedVT == MVT::f64) Opcode = Hexagon::LDrid_indexed_f;
else assert (0 && "unknown memory type");
// Build indexed load.
@ -379,7 +375,7 @@ SDNode *HexagonDAGToDAGISel::SelectIndexedLoadSignExtend64(LoadSDNode *LD,
};
ReplaceUses(Froms, Tos, 3);
return Result_2;
}
}
SDValue TargetConst0 = CurDAG->getTargetConstant(0, MVT::i32);
SDValue TargetConstVal = CurDAG->getTargetConstant(Val, MVT::i32);
SDNode *Result_1 = CurDAG->getMachineNode(Opcode, dl, MVT::i32,
@ -640,7 +636,7 @@ SDNode *HexagonDAGToDAGISel::SelectIndexedStore(StoreSDNode *ST, DebugLoc dl) {
// Figure out the opcode.
if (StoredVT == MVT::i64) Opcode = Hexagon::STrid;
else if (StoredVT == MVT::i32) Opcode = Hexagon::STriw_indexed;
else if (StoredVT == MVT::i32) Opcode = Hexagon::STriw;
else if (StoredVT == MVT::i16) Opcode = Hexagon::STrih;
else if (StoredVT == MVT::i8) Opcode = Hexagon::STrib;
else assert (0 && "unknown memory type");
@ -697,8 +693,6 @@ SDNode *HexagonDAGToDAGISel::SelectBaseOffsetStore(StoreSDNode *ST,
else if (StoredVT == MVT::i32) Opcode = Hexagon::STriw_indexed;
else if (StoredVT == MVT::i16) Opcode = Hexagon::STrih_indexed;
else if (StoredVT == MVT::i8) Opcode = Hexagon::STrib_indexed;
else if (StoredVT == MVT::f32) Opcode = Hexagon::STriw_indexed_f;
else if (StoredVT == MVT::f64) Opcode = Hexagon::STrid_indexed_f;
else assert (0 && "unknown memory type");
SDValue Ops[] = {SDValue(NewBase,0),
@ -729,7 +723,7 @@ SDNode *HexagonDAGToDAGISel::SelectStore(SDNode *N) {
if (AM != ISD::UNINDEXED) {
return SelectIndexedStore(ST, dl);
}
return SelectBaseOffsetStore(ST, dl);
}
@ -758,7 +752,7 @@ SDNode *HexagonDAGToDAGISel::SelectMul(SDNode *N) {
if (MulOp0.getOpcode() == ISD::SIGN_EXTEND) {
SDValue Sext0 = MulOp0.getOperand(0);
if (Sext0.getNode()->getValueType(0) != MVT::i32) {
return SelectCode(N);
SelectCode(N);
}
OP0 = Sext0;
@ -767,7 +761,7 @@ SDNode *HexagonDAGToDAGISel::SelectMul(SDNode *N) {
if (LD->getMemoryVT() != MVT::i32 ||
LD->getExtensionType() != ISD::SEXTLOAD ||
LD->getAddressingMode() != ISD::UNINDEXED) {
return SelectCode(N);
SelectCode(N);
}
SDValue Chain = LD->getChain();
@ -1164,25 +1158,6 @@ SDNode *HexagonDAGToDAGISel::SelectIntrinsicWOChain(SDNode *N) {
return SelectCode(N);
}
//
// Map floating point constant values.
//
SDNode *HexagonDAGToDAGISel::SelectConstantFP(SDNode *N) {
DebugLoc dl = N->getDebugLoc();
ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N);
APFloat APF = CN->getValueAPF();
if (N->getValueType(0) == MVT::f32) {
return CurDAG->getMachineNode(Hexagon::TFRI_f, dl, MVT::f32,
CurDAG->getTargetConstantFP(APF.convertToFloat(), MVT::f32));
}
else if (N->getValueType(0) == MVT::f64) {
return CurDAG->getMachineNode(Hexagon::CONST64_Float_Real, dl, MVT::f64,
CurDAG->getTargetConstantFP(APF.convertToDouble(), MVT::f64));
}
return SelectCode(N);
}
//
// Map predicate true (encoded as -1 in LLVM) to a XOR.
@ -1259,9 +1234,6 @@ SDNode *HexagonDAGToDAGISel::Select(SDNode *N) {
case ISD::Constant:
return SelectConstant(N);
case ISD::ConstantFP:
return SelectConstantFP(N);
case ISD::ADD:
return SelectAdd(N);

View File

@ -103,12 +103,12 @@ CC_Hexagon_VarArg (unsigned ValNo, MVT ValVT,
State.addLoc(CCValAssign::getMem(ValNo, ValVT, ofst, LocVT, LocInfo));
return false;
}
if (LocVT == MVT::i32 || LocVT == MVT::f32) {
if (LocVT == MVT::i32) {
ofst = State.AllocateStack(4, 4);
State.addLoc(CCValAssign::getMem(ValNo, ValVT, ofst, LocVT, LocInfo));
return false;
}
if (LocVT == MVT::i64 || LocVT == MVT::f64) {
if (LocVT == MVT::i64) {
ofst = State.AllocateStack(8, 8);
State.addLoc(CCValAssign::getMem(ValNo, ValVT, ofst, LocVT, LocInfo));
return false;
@ -142,12 +142,12 @@ CC_Hexagon (unsigned ValNo, MVT ValVT,
LocInfo = CCValAssign::AExt;
}
if (LocVT == MVT::i32 || LocVT == MVT::f32) {
if (LocVT == MVT::i32) {
if (!CC_Hexagon32(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State))
return false;
}
if (LocVT == MVT::i64 || LocVT == MVT::f64) {
if (LocVT == MVT::i64) {
if (!CC_Hexagon64(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State))
return false;
}
@ -217,12 +217,12 @@ static bool RetCC_Hexagon(unsigned ValNo, MVT ValVT,
LocInfo = CCValAssign::AExt;
}
if (LocVT == MVT::i32 || LocVT == MVT::f32) {
if (LocVT == MVT::i32) {
if (!RetCC_Hexagon32(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State))
return false;
}
if (LocVT == MVT::i64 || LocVT == MVT::f64) {
if (LocVT == MVT::i64) {
if (!RetCC_Hexagon64(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State))
return false;
}
@ -234,7 +234,7 @@ static bool RetCC_Hexagon32(unsigned ValNo, MVT ValVT,
MVT LocVT, CCValAssign::LocInfo LocInfo,
ISD::ArgFlagsTy ArgFlags, CCState &State) {
if (LocVT == MVT::i32 || LocVT == MVT::f32) {
if (LocVT == MVT::i32) {
if (unsigned Reg = State.AllocateReg(Hexagon::R0)) {
State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
return false;
@ -249,7 +249,7 @@ static bool RetCC_Hexagon32(unsigned ValNo, MVT ValVT,
static bool RetCC_Hexagon64(unsigned ValNo, MVT ValVT,
MVT LocVT, CCValAssign::LocInfo LocInfo,
ISD::ArgFlagsTy ArgFlags, CCState &State) {
if (LocVT == MVT::i64 || LocVT == MVT::f64) {
if (LocVT == MVT::i64) {
if (unsigned Reg = State.AllocateReg(Hexagon::D0)) {
State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
return false;
@ -839,8 +839,7 @@ const {
// 1. int, long long, ptr args that get allocated in register.
// 2. Large struct that gets an register to put its address in.
EVT RegVT = VA.getLocVT();
if (RegVT == MVT::i8 || RegVT == MVT::i16 ||
RegVT == MVT::i32 || RegVT == MVT::f32) {
if (RegVT == MVT::i8 || RegVT == MVT::i16 || RegVT == MVT::i32) {
unsigned VReg =
RegInfo.createVirtualRegister(&Hexagon::IntRegsRegClass);
RegInfo.addLiveIn(VA.getLocReg(), VReg);
@ -919,33 +918,14 @@ HexagonTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
SDValue
HexagonTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
SDValue LHS = Op.getOperand(0);
SDValue RHS = Op.getOperand(1);
SDValue CC = Op.getOperand(4);
SDValue TrueVal = Op.getOperand(2);
SDValue FalseVal = Op.getOperand(3);
DebugLoc dl = Op.getDebugLoc();
SDNode* OpNode = Op.getNode();
EVT SVT = OpNode->getValueType(0);
SDValue Cond = DAG.getNode(ISD::SETCC, dl, MVT::i1, LHS, RHS, CC);
return DAG.getNode(ISD::SELECT, dl, SVT, Cond, TrueVal, FalseVal);
}
SDValue
HexagonTargetLowering::LowerConstantPool(SDValue Op, SelectionDAG &DAG) const {
EVT ValTy = Op.getValueType();
DebugLoc dl = Op.getDebugLoc();
ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
SDValue Res;
if (CP->isMachineConstantPoolEntry())
Res = DAG.getTargetConstantPool(CP->getMachineCPVal(), ValTy,
CP->getAlignment());
else
Res = DAG.getTargetConstantPool(CP->getConstVal(), ValTy,
CP->getAlignment());
return DAG.getNode(HexagonISD::CONST32, dl, ValTy, Res);
SDValue Cond = DAG.getNode(ISD::SETCC, Op.getDebugLoc(), MVT::i1,
Op.getOperand(2), Op.getOperand(3),
Op.getOperand(4));
return DAG.getNode(ISD::SELECT, Op.getDebugLoc(), OpNode->getValueType(0),
Cond, Op.getOperand(0),
Op.getOperand(1));
}
SDValue
@ -1030,17 +1010,10 @@ HexagonTargetLowering::HexagonTargetLowering(HexagonTargetMachine
: TargetLowering(targetmachine, new HexagonTargetObjectFile()),
TM(targetmachine) {
const HexagonRegisterInfo* QRI = TM.getRegisterInfo();
// Set up the register classes.
addRegisterClass(MVT::i32, &Hexagon::IntRegsRegClass);
addRegisterClass(MVT::i64, &Hexagon::DoubleRegsRegClass);
if (QRI->Subtarget.hasV5TOps()) {
addRegisterClass(MVT::f32, &Hexagon::IntRegsRegClass);
addRegisterClass(MVT::f64, &Hexagon::DoubleRegsRegClass);
}
addRegisterClass(MVT::i1, &Hexagon::PredRegsRegClass);
computeRegisterProperties();
@ -1055,16 +1028,32 @@ HexagonTargetLowering::HexagonTargetLowering(HexagonTargetMachine
//
// Library calls for unsupported operations
//
setLibcallName(RTLIB::OGT_F64, "__hexagon_gtdf2");
setLibcallName(RTLIB::SINTTOFP_I64_F64, "__hexagon_floatdidf");
setLibcallName(RTLIB::SINTTOFP_I128_F64, "__hexagon_floattidf");
setLibcallName(RTLIB::SINTTOFP_I128_F32, "__hexagon_floattisf");
setLibcallName(RTLIB::UINTTOFP_I32_F32, "__hexagon_floatunsisf");
setLibcallName(RTLIB::UINTTOFP_I64_F32, "__hexagon_floatundisf");
setLibcallName(RTLIB::SINTTOFP_I64_F32, "__hexagon_floatdisf");
setLibcallName(RTLIB::UINTTOFP_I64_F64, "__hexagon_floatundidf");
setLibcallName(RTLIB::FPTOUINT_F32_I32, "__hexagon_fixunssfsi");
setLibcallName(RTLIB::FPTOUINT_F32_I64, "__hexagon_fixunssfdi");
setLibcallName(RTLIB::FPTOUINT_F32_I128, "__hexagon_fixunssfti");
setLibcallName(RTLIB::FPTOUINT_F64_I32, "__hexagon_fixunsdfsi");
setLibcallName(RTLIB::FPTOUINT_F64_I64, "__hexagon_fixunsdfdi");
setLibcallName(RTLIB::FPTOUINT_F64_I128, "__hexagon_fixunsdfti");
setLibcallName(RTLIB::UINTTOFP_I32_F64, "__hexagon_floatunsidf");
setLibcallName(RTLIB::FPTOSINT_F32_I64, "__hexagon_fixsfdi");
setLibcallName(RTLIB::FPTOSINT_F32_I128, "__hexagon_fixsfti");
setLibcallName(RTLIB::FPTOSINT_F64_I64, "__hexagon_fixdfdi");
setLibcallName(RTLIB::FPTOSINT_F64_I128, "__hexagon_fixdfti");
setLibcallName(RTLIB::OGT_F64, "__hexagon_gtdf2");
setLibcallName(RTLIB::SDIV_I32, "__hexagon_divsi3");
setOperationAction(ISD::SDIV, MVT::i32, Expand);
setLibcallName(RTLIB::SREM_I32, "__hexagon_umodsi3");
@ -1093,185 +1082,93 @@ HexagonTargetLowering::HexagonTargetLowering(HexagonTargetMachine
setLibcallName(RTLIB::DIV_F64, "__hexagon_divdf3");
setOperationAction(ISD::FDIV, MVT::f64, Expand);
setOperationAction(ISD::FSQRT, MVT::f32, Expand);
setOperationAction(ISD::FSQRT, MVT::f64, Expand);
setOperationAction(ISD::FSIN, MVT::f32, Expand);
setOperationAction(ISD::FSIN, MVT::f64, Expand);
setLibcallName(RTLIB::FPEXT_F32_F64, "__hexagon_extendsfdf2");
setOperationAction(ISD::FP_EXTEND, MVT::f32, Expand);
if (QRI->Subtarget.hasV5TOps()) {
// Hexagon V5 Support.
setOperationAction(ISD::FADD, MVT::f32, Legal);
setOperationAction(ISD::FADD, MVT::f64, Legal);
setOperationAction(ISD::FP_EXTEND, MVT::f32, Legal);
setCondCodeAction(ISD::SETOEQ, MVT::f32, Legal);
setCondCodeAction(ISD::SETOEQ, MVT::f64, Legal);
setCondCodeAction(ISD::SETUEQ, MVT::f32, Legal);
setCondCodeAction(ISD::SETUEQ, MVT::f64, Legal);
setLibcallName(RTLIB::SINTTOFP_I32_F32, "__hexagon_floatsisf");
setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand);
setCondCodeAction(ISD::SETOGE, MVT::f32, Legal);
setCondCodeAction(ISD::SETOGE, MVT::f64, Legal);
setCondCodeAction(ISD::SETUGE, MVT::f32, Legal);
setCondCodeAction(ISD::SETUGE, MVT::f64, Legal);
setLibcallName(RTLIB::ADD_F64, "__hexagon_adddf3");
setOperationAction(ISD::FADD, MVT::f64, Expand);
setCondCodeAction(ISD::SETOGT, MVT::f32, Legal);
setCondCodeAction(ISD::SETOGT, MVT::f64, Legal);
setCondCodeAction(ISD::SETUGT, MVT::f32, Legal);
setCondCodeAction(ISD::SETUGT, MVT::f64, Legal);
setLibcallName(RTLIB::ADD_F32, "__hexagon_addsf3");
setOperationAction(ISD::FADD, MVT::f32, Expand);
setCondCodeAction(ISD::SETOLE, MVT::f32, Legal);
setCondCodeAction(ISD::SETOLE, MVT::f64, Legal);
setCondCodeAction(ISD::SETOLT, MVT::f32, Legal);
setCondCodeAction(ISD::SETOLT, MVT::f64, Legal);
setLibcallName(RTLIB::ADD_F32, "__hexagon_addsf3");
setOperationAction(ISD::FADD, MVT::f32, Expand);
setOperationAction(ISD::ConstantFP, MVT::f32, Legal);
setOperationAction(ISD::ConstantFP, MVT::f64, Legal);
setLibcallName(RTLIB::OEQ_F32, "__hexagon_eqsf2");
setCondCodeAction(ISD::SETOEQ, MVT::f32, Expand);
setOperationAction(ISD::FP_TO_UINT, MVT::i1, Promote);
setOperationAction(ISD::FP_TO_SINT, MVT::i1, Promote);
setOperationAction(ISD::UINT_TO_FP, MVT::i1, Promote);
setOperationAction(ISD::SINT_TO_FP, MVT::i1, Promote);
setLibcallName(RTLIB::FPTOSINT_F64_I32, "__hexagon_fixdfsi");
setOperationAction(ISD::FP_TO_SINT, MVT::f64, Expand);
setOperationAction(ISD::FP_TO_UINT, MVT::i8, Promote);
setOperationAction(ISD::FP_TO_SINT, MVT::i8, Promote);
setOperationAction(ISD::UINT_TO_FP, MVT::i8, Promote);
setOperationAction(ISD::SINT_TO_FP, MVT::i8, Promote);
setLibcallName(RTLIB::FPTOSINT_F32_I32, "__hexagon_fixsfsi");
setOperationAction(ISD::FP_TO_SINT, MVT::f32, Expand);
setOperationAction(ISD::FP_TO_UINT, MVT::i16, Promote);
setOperationAction(ISD::FP_TO_SINT, MVT::i16, Promote);
setOperationAction(ISD::UINT_TO_FP, MVT::i16, Promote);
setOperationAction(ISD::SINT_TO_FP, MVT::i16, Promote);
setLibcallName(RTLIB::SINTTOFP_I32_F64, "__hexagon_floatsidf");
setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand);
setOperationAction(ISD::FP_TO_UINT, MVT::i32, Legal);
setOperationAction(ISD::FP_TO_SINT, MVT::i32, Legal);
setOperationAction(ISD::UINT_TO_FP, MVT::i32, Legal);
setOperationAction(ISD::SINT_TO_FP, MVT::i32, Legal);
setLibcallName(RTLIB::OGE_F64, "__hexagon_gedf2");
setCondCodeAction(ISD::SETOGE, MVT::f64, Expand);
setOperationAction(ISD::FP_TO_UINT, MVT::i64, Legal);
setOperationAction(ISD::FP_TO_SINT, MVT::i64, Legal);
setOperationAction(ISD::UINT_TO_FP, MVT::i64, Legal);
setOperationAction(ISD::SINT_TO_FP, MVT::i64, Legal);
setLibcallName(RTLIB::OGE_F32, "__hexagon_gesf2");
setCondCodeAction(ISD::SETOGE, MVT::f32, Expand);
setOperationAction(ISD::FABS, MVT::f32, Legal);
setOperationAction(ISD::FABS, MVT::f64, Expand);
setLibcallName(RTLIB::OGT_F32, "__hexagon_gtsf2");
setCondCodeAction(ISD::SETOGT, MVT::f32, Expand);
setOperationAction(ISD::FNEG, MVT::f32, Legal);
setOperationAction(ISD::FNEG, MVT::f64, Expand);
} else {
setLibcallName(RTLIB::OLE_F64, "__hexagon_ledf2");
setCondCodeAction(ISD::SETOLE, MVT::f64, Expand);
// Expand fp<->uint.
setOperationAction(ISD::FP_TO_SINT, MVT::i32, Expand);
setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand);
setLibcallName(RTLIB::OLE_F32, "__hexagon_lesf2");
setCondCodeAction(ISD::SETOLE, MVT::f32, Expand);
setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand);
setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand);
setLibcallName(RTLIB::OLT_F64, "__hexagon_ltdf2");
setCondCodeAction(ISD::SETOLT, MVT::f64, Expand);
setLibcallName(RTLIB::SINTTOFP_I64_F32, "__hexagon_floatdisf");
setLibcallName(RTLIB::UINTTOFP_I64_F32, "__hexagon_floatundisf");
setLibcallName(RTLIB::UINTTOFP_I32_F32, "__hexagon_floatunsisf");
setLibcallName(RTLIB::SINTTOFP_I32_F32, "__hexagon_floatsisf");
setLibcallName(RTLIB::SINTTOFP_I64_F64, "__hexagon_floatdidf");
setLibcallName(RTLIB::UINTTOFP_I64_F64, "__hexagon_floatundidf");
setLibcallName(RTLIB::UINTTOFP_I32_F64, "__hexagon_floatunsidf");
setLibcallName(RTLIB::SINTTOFP_I32_F64, "__hexagon_floatsidf");
setLibcallName(RTLIB::FPTOUINT_F32_I32, "__hexagon_fixunssfsi");
setLibcallName(RTLIB::FPTOUINT_F32_I64, "__hexagon_fixunssfdi");
setLibcallName(RTLIB::FPTOSINT_F64_I64, "__hexagon_fixdfdi");
setLibcallName(RTLIB::FPTOSINT_F32_I64, "__hexagon_fixsfdi");
setLibcallName(RTLIB::FPTOUINT_F64_I32, "__hexagon_fixunsdfsi");
setLibcallName(RTLIB::FPTOUINT_F64_I64, "__hexagon_fixunsdfdi");
setLibcallName(RTLIB::ADD_F64, "__hexagon_adddf3");
setOperationAction(ISD::FADD, MVT::f64, Expand);
setLibcallName(RTLIB::ADD_F32, "__hexagon_addsf3");
setOperationAction(ISD::FADD, MVT::f32, Expand);
setLibcallName(RTLIB::FPEXT_F32_F64, "__hexagon_extendsfdf2");
setOperationAction(ISD::FP_EXTEND, MVT::f32, Expand);
setLibcallName(RTLIB::OEQ_F32, "__hexagon_eqsf2");
setCondCodeAction(ISD::SETOEQ, MVT::f32, Expand);
setLibcallName(RTLIB::OEQ_F64, "__hexagon_eqdf2");
setCondCodeAction(ISD::SETOEQ, MVT::f64, Expand);
setLibcallName(RTLIB::OGE_F32, "__hexagon_gesf2");
setCondCodeAction(ISD::SETOGE, MVT::f32, Expand);
setLibcallName(RTLIB::OGE_F64, "__hexagon_gedf2");
setCondCodeAction(ISD::SETOGE, MVT::f64, Expand);
setLibcallName(RTLIB::OGT_F32, "__hexagon_gtsf2");
setCondCodeAction(ISD::SETOGT, MVT::f32, Expand);
setLibcallName(RTLIB::OGT_F64, "__hexagon_gtdf2");
setCondCodeAction(ISD::SETOGT, MVT::f64, Expand);
setLibcallName(RTLIB::FPTOSINT_F64_I32, "__hexagon_fixdfsi");
setOperationAction(ISD::FP_TO_SINT, MVT::f64, Expand);
setLibcallName(RTLIB::FPTOSINT_F32_I32, "__hexagon_fixsfsi");
setOperationAction(ISD::FP_TO_SINT, MVT::f32, Expand);
setLibcallName(RTLIB::OLE_F64, "__hexagon_ledf2");
setCondCodeAction(ISD::SETOLE, MVT::f64, Expand);
setLibcallName(RTLIB::OLE_F32, "__hexagon_lesf2");
setCondCodeAction(ISD::SETOLE, MVT::f32, Expand);
setLibcallName(RTLIB::OLT_F64, "__hexagon_ltdf2");
setCondCodeAction(ISD::SETOLT, MVT::f64, Expand);
setLibcallName(RTLIB::OLT_F32, "__hexagon_ltsf2");
setCondCodeAction(ISD::SETOLT, MVT::f32, Expand);
setLibcallName(RTLIB::MUL_F64, "__hexagon_muldf3");
setOperationAction(ISD::FMUL, MVT::f64, Expand);
setLibcallName(RTLIB::MUL_F32, "__hexagon_mulsf3");
setOperationAction(ISD::MUL, MVT::f32, Expand);
setLibcallName(RTLIB::UNE_F64, "__hexagon_nedf2");
setCondCodeAction(ISD::SETUNE, MVT::f64, Expand);
setLibcallName(RTLIB::UNE_F32, "__hexagon_nesf2");
setLibcallName(RTLIB::SUB_F64, "__hexagon_subdf3");
setOperationAction(ISD::SUB, MVT::f64, Expand);
setLibcallName(RTLIB::SUB_F32, "__hexagon_subsf3");
setOperationAction(ISD::SUB, MVT::f32, Expand);
setLibcallName(RTLIB::FPROUND_F64_F32, "__hexagon_truncdfsf2");
setOperationAction(ISD::FP_ROUND, MVT::f64, Expand);
setLibcallName(RTLIB::UO_F64, "__hexagon_unorddf2");
setCondCodeAction(ISD::SETUO, MVT::f64, Expand);
setLibcallName(RTLIB::O_F64, "__hexagon_unorddf2");
setCondCodeAction(ISD::SETO, MVT::f64, Expand);
setLibcallName(RTLIB::O_F32, "__hexagon_unordsf2");
setCondCodeAction(ISD::SETO, MVT::f32, Expand);
setLibcallName(RTLIB::UO_F32, "__hexagon_unordsf2");
setCondCodeAction(ISD::SETUO, MVT::f32, Expand);
setOperationAction(ISD::FABS, MVT::f32, Expand);
setOperationAction(ISD::FABS, MVT::f64, Expand);
setOperationAction(ISD::FNEG, MVT::f32, Expand);
setOperationAction(ISD::FNEG, MVT::f64, Expand);
}
setLibcallName(RTLIB::OLT_F32, "__hexagon_ltsf2");
setCondCodeAction(ISD::SETOLT, MVT::f32, Expand);
setLibcallName(RTLIB::SREM_I32, "__hexagon_modsi3");
setOperationAction(ISD::SREM, MVT::i32, Expand);
setLibcallName(RTLIB::MUL_F64, "__hexagon_muldf3");
setOperationAction(ISD::FMUL, MVT::f64, Expand);
setLibcallName(RTLIB::MUL_F32, "__hexagon_mulsf3");
setOperationAction(ISD::MUL, MVT::f32, Expand);
setLibcallName(RTLIB::UNE_F64, "__hexagon_nedf2");
setCondCodeAction(ISD::SETUNE, MVT::f64, Expand);
setLibcallName(RTLIB::UNE_F32, "__hexagon_nesf2");
setLibcallName(RTLIB::SUB_F64, "__hexagon_subdf3");
setOperationAction(ISD::SUB, MVT::f64, Expand);
setLibcallName(RTLIB::SUB_F32, "__hexagon_subsf3");
setOperationAction(ISD::SUB, MVT::f32, Expand);
setLibcallName(RTLIB::FPROUND_F64_F32, "__hexagon_truncdfsf2");
setOperationAction(ISD::FP_ROUND, MVT::f64, Expand);
setLibcallName(RTLIB::UO_F64, "__hexagon_unorddf2");
setCondCodeAction(ISD::SETUO, MVT::f64, Expand);
setLibcallName(RTLIB::O_F64, "__hexagon_unorddf2");
setCondCodeAction(ISD::SETO, MVT::f64, Expand);
setLibcallName(RTLIB::OEQ_F64, "__hexagon_eqdf2");
setCondCodeAction(ISD::SETOEQ, MVT::f64, Expand);
setLibcallName(RTLIB::O_F32, "__hexagon_unordsf2");
setCondCodeAction(ISD::SETO, MVT::f32, Expand);
setLibcallName(RTLIB::UO_F32, "__hexagon_unordsf2");
setCondCodeAction(ISD::SETUO, MVT::f32, Expand);
setIndexedLoadAction(ISD::POST_INC, MVT::i8, Legal);
setIndexedLoadAction(ISD::POST_INC, MVT::i16, Legal);
setIndexedLoadAction(ISD::POST_INC, MVT::i32, Legal);
@ -1311,33 +1208,20 @@ HexagonTargetLowering::HexagonTargetLowering(HexagonTargetMachine
setOperationAction(ISD::BSWAP, MVT::i64, Expand);
// Expand fp<->uint.
setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand);
setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand);
// Hexagon has no select or setcc: expand to SELECT_CC.
setOperationAction(ISD::SELECT, MVT::f32, Expand);
setOperationAction(ISD::SELECT, MVT::f64, Expand);
// Lower SELECT_CC to SETCC and SELECT.
setOperationAction(ISD::SELECT_CC, MVT::i32, Custom);
setOperationAction(ISD::SELECT_CC, MVT::i64, Custom);
if (QRI->Subtarget.hasV5TOps()) {
// We need to make the operation type of SELECT node to be Custom,
// such that we don't go into the infinite loop of
// select -> setcc -> select_cc -> select loop.
setOperationAction(ISD::SELECT, MVT::f32, Custom);
setOperationAction(ISD::SELECT, MVT::f64, Custom);
setOperationAction(ISD::SELECT_CC, MVT::f32, Expand);
setOperationAction(ISD::SELECT_CC, MVT::f64, Expand);
setOperationAction(ISD::SELECT_CC, MVT::Other, Expand);
} else {
// Hexagon has no select or setcc: expand to SELECT_CC.
setOperationAction(ISD::SELECT, MVT::f32, Expand);
setOperationAction(ISD::SELECT, MVT::f64, Expand);
// This is a workaround documented in DAGCombiner.cpp:2892 We don't
// support SELECT_CC on every type.
setOperationAction(ISD::SELECT_CC, MVT::Other, Expand);
}
// This is a workaround documented in DAGCombiner.cpp:2892 We don't
// support SELECT_CC on every type.
setOperationAction(ISD::SELECT_CC, MVT::Other, Expand);
setOperationAction(ISD::BR_CC, MVT::Other, Expand);
setOperationAction(ISD::BRIND, MVT::Other, Expand);
@ -1423,22 +1307,22 @@ const char*
HexagonTargetLowering::getTargetNodeName(unsigned Opcode) const {
switch (Opcode) {
default: return 0;
case HexagonISD::CONST32: return "HexagonISD::CONST32";
case HexagonISD::CONST32: return "HexagonISD::CONST32";
case HexagonISD::ADJDYNALLOC: return "HexagonISD::ADJDYNALLOC";
case HexagonISD::CMPICC: return "HexagonISD::CMPICC";
case HexagonISD::CMPFCC: return "HexagonISD::CMPFCC";
case HexagonISD::BRICC: return "HexagonISD::BRICC";
case HexagonISD::BRFCC: return "HexagonISD::BRFCC";
case HexagonISD::SELECT_ICC: return "HexagonISD::SELECT_ICC";
case HexagonISD::SELECT_FCC: return "HexagonISD::SELECT_FCC";
case HexagonISD::Hi: return "HexagonISD::Hi";
case HexagonISD::Lo: return "HexagonISD::Lo";
case HexagonISD::FTOI: return "HexagonISD::FTOI";
case HexagonISD::ITOF: return "HexagonISD::ITOF";
case HexagonISD::CALL: return "HexagonISD::CALL";
case HexagonISD::RET_FLAG: return "HexagonISD::RET_FLAG";
case HexagonISD::BR_JT: return "HexagonISD::BR_JT";
case HexagonISD::TC_RETURN: return "HexagonISD::TC_RETURN";
case HexagonISD::CMPICC: return "HexagonISD::CMPICC";
case HexagonISD::CMPFCC: return "HexagonISD::CMPFCC";
case HexagonISD::BRICC: return "HexagonISD::BRICC";
case HexagonISD::BRFCC: return "HexagonISD::BRFCC";
case HexagonISD::SELECT_ICC: return "HexagonISD::SELECT_ICC";
case HexagonISD::SELECT_FCC: return "HexagonISD::SELECT_FCC";
case HexagonISD::Hi: return "HexagonISD::Hi";
case HexagonISD::Lo: return "HexagonISD::Lo";
case HexagonISD::FTOI: return "HexagonISD::FTOI";
case HexagonISD::ITOF: return "HexagonISD::ITOF";
case HexagonISD::CALL: return "HexagonISD::CALL";
case HexagonISD::RET_FLAG: return "HexagonISD::RET_FLAG";
case HexagonISD::BR_JT: return "HexagonISD::BR_JT";
case HexagonISD::TC_RETURN: return "HexagonISD::TC_RETURN";
}
}
@ -1463,10 +1347,9 @@ SDValue
HexagonTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
switch (Op.getOpcode()) {
default: llvm_unreachable("Should not custom lower this!");
case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
// Frame & Return address. Currently unimplemented.
case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
case ISD::GlobalTLSAddress:
llvm_unreachable("TLS not implemented for Hexagon.");
case ISD::MEMBARRIER: return LowerMEMBARRIER(Op, DAG);
@ -1476,10 +1359,9 @@ HexagonTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
case ISD::BR_JT: return LowerBR_JT(Op, DAG);
case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG);
case ISD::SELECT: return Op;
case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG);
case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
case ISD::INLINEASM: return LowerINLINEASM(Op, DAG);
case ISD::INLINEASM: return LowerINLINEASM(Op, DAG);
}
}
@ -1522,10 +1404,8 @@ HexagonTargetLowering::getRegForInlineAsmConstraint(const
case MVT::i32:
case MVT::i16:
case MVT::i8:
case MVT::f32:
return std::make_pair(0U, &Hexagon::IntRegsRegClass);
case MVT::i64:
case MVT::f64:
return std::make_pair(0U, &Hexagon::DoubleRegsRegClass);
}
default:
@ -1536,14 +1416,6 @@ HexagonTargetLowering::getRegForInlineAsmConstraint(const
return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
}
/// isFPImmLegal - Returns true if the target can instruction select the
/// specified FP immediate natively. If false, the legalizer will
/// materialize the FP immediate as a load from a constant pool.
bool HexagonTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const {
const HexagonRegisterInfo* QRI = TM.getRegisterInfo();
return QRI->Subtarget.hasV5TOps();
}
/// isLegalAddressingMode - Return true if the addressing mode represented by
/// AM is legal for this target, for a load/store of the specified type.
bool HexagonTargetLowering::isLegalAddressingMode(const AddrMode &AM,

View File

@ -27,7 +27,6 @@ namespace llvm {
CONST32,
CONST32_GP, // For marking data present in GP.
FCONST32,
SETCC,
ADJDYNALLOC,
ARGEXTEND,
@ -49,7 +48,6 @@ namespace llvm {
BR_JT, // Jump table.
BARRIER, // Memory barrier.
WrapperJT,
WrapperCP,
TC_RETURN
};
}
@ -130,7 +128,6 @@ namespace llvm {
MachineBasicBlock *BB) const;
SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
virtual EVT getSetCCResultType(EVT VT) const {
return MVT::i1;
}
@ -153,7 +150,6 @@ namespace llvm {
/// mode is legal for a load/store of any legal type.
/// TODO: Handle pre/postinc as well.
virtual bool isLegalAddressingMode(const AddrMode &AM, Type *Ty) const;
virtual bool isFPImmLegal(const APFloat &Imm, EVT VT) const;
/// isLegalICmpImmediate - Return true if the specified immediate is legal
/// icmp immediate, that is the target has icmp instructions which can

View File

@ -13,26 +13,13 @@
// *** Must match HexagonBaseInfo.h ***
//===----------------------------------------------------------------------===//
class Type<bits<5> t> {
bits<5> Value = t;
}
def TypePSEUDO : Type<0>;
def TypeALU32 : Type<1>;
def TypeCR : Type<2>;
def TypeJR : Type<3>;
def TypeJ : Type<4>;
def TypeLD : Type<5>;
def TypeST : Type<6>;
def TypeSYSTEM : Type<7>;
def TypeXTYPE : Type<8>;
def TypeMARKER : Type<31>;
//===----------------------------------------------------------------------===//
// Intruction Class Declaration +
//===----------------------------------------------------------------------===//
class InstHexagon<dag outs, dag ins, string asmstr, list<dag> pattern,
string cstr, InstrItinClass itin, Type type> : Instruction {
string cstr, InstrItinClass itin> : Instruction {
field bits<32> Inst;
let Namespace = "Hexagon";
@ -44,15 +31,11 @@ class InstHexagon<dag outs, dag ins, string asmstr, list<dag> pattern,
let Constraints = cstr;
let Itinerary = itin;
// *** Must match HexagonBaseInfo.h ***
Type HexagonType = type;
let TSFlags{4-0} = HexagonType.Value;
bits<1> isHexagonSolo = 0;
let TSFlags{5} = isHexagonSolo;
// *** The code below must match HexagonBaseInfo.h ***
// Predicated instructions.
bits<1> isPredicated = 0;
let TSFlags{6} = isPredicated;
let TSFlags{1} = isPredicated;
// *** The code above must match HexagonBaseInfo.h ***
}
@ -64,25 +47,17 @@ class InstHexagon<dag outs, dag ins, string asmstr, list<dag> pattern,
// LD Instruction Class in V2/V3/V4.
// Definition of the instruction class NOT CHANGED.
class LDInst<dag outs, dag ins, string asmstr, list<dag> pattern>
: InstHexagon<outs, ins, asmstr, pattern, "", LD, TypeLD> {
: InstHexagon<outs, ins, asmstr, pattern, "", LD> {
bits<5> rd;
bits<5> rs;
bits<13> imm13;
}
class LDInst2<dag outs, dag ins, string asmstr, list<dag> pattern>
: InstHexagon<outs, ins, asmstr, pattern, "", LD, TypeLD> {
bits<5> rd;
bits<5> rs;
bits<13> imm13;
let mayLoad = 1;
}
// LD Instruction Class in V2/V3/V4.
// Definition of the instruction class NOT CHANGED.
class LDInstPost<dag outs, dag ins, string asmstr, list<dag> pattern,
string cstr>
: InstHexagon<outs, ins, asmstr, pattern, cstr, LD, TypeLD> {
: InstHexagon<outs, ins, asmstr, pattern, cstr, LD> {
bits<5> rd;
bits<5> rs;
bits<5> rt;
@ -93,24 +68,7 @@ class LDInstPost<dag outs, dag ins, string asmstr, list<dag> pattern,
// ST Instruction Class in V4 can take SLOT0 & SLOT1.
// Definition of the instruction class CHANGED from V2/V3 to V4.
class STInst<dag outs, dag ins, string asmstr, list<dag> pattern>
: InstHexagon<outs, ins, asmstr, pattern, "", ST, TypeST> {
bits<5> rd;
bits<5> rs;
bits<13> imm13;
}
class STInst2<dag outs, dag ins, string asmstr, list<dag> pattern>
: InstHexagon<outs, ins, asmstr, pattern, "", ST, TypeST> {
bits<5> rd;
bits<5> rs;
bits<13> imm13;
let mayStore = 1;
}
// SYSTEM Instruction Class in V4 can take SLOT0 only
// In V2/V3 we used ST for this but in v4 ST can take SLOT0 or SLOT1.
class SYSInst<dag outs, dag ins, string asmstr, list<dag> pattern>
: InstHexagon<outs, ins, asmstr, pattern, "", SYS, TypeSYSTEM> {
: InstHexagon<outs, ins, asmstr, pattern, "", ST> {
bits<5> rd;
bits<5> rs;
bits<13> imm13;
@ -121,7 +79,7 @@ class SYSInst<dag outs, dag ins, string asmstr, list<dag> pattern>
// Definition of the instruction class CHANGED from V2/V3 to V4.
class STInstPost<dag outs, dag ins, string asmstr, list<dag> pattern,
string cstr>
: InstHexagon<outs, ins, asmstr, pattern, cstr, ST, TypeST> {
: InstHexagon<outs, ins, asmstr, pattern, cstr, ST> {
bits<5> rd;
bits<5> rs;
bits<5> rt;
@ -131,7 +89,7 @@ class STInstPost<dag outs, dag ins, string asmstr, list<dag> pattern,
// ALU32 Instruction Class in V2/V3/V4.
// Definition of the instruction class NOT CHANGED.
class ALU32Type<dag outs, dag ins, string asmstr, list<dag> pattern>
: InstHexagon<outs, ins, asmstr, pattern, "", ALU32, TypeALU32> {
: InstHexagon<outs, ins, asmstr, pattern, "", ALU32> {
bits<5> rd;
bits<5> rs;
bits<5> rt;
@ -144,17 +102,7 @@ class ALU32Type<dag outs, dag ins, string asmstr, list<dag> pattern>
// Definition of the instruction class NOT CHANGED.
// Name of the Instruction Class changed from ALU64 to XTYPE from V2/V3 to V4.
class ALU64Type<dag outs, dag ins, string asmstr, list<dag> pattern>
: InstHexagon<outs, ins, asmstr, pattern, "", ALU64, TypeXTYPE> {
bits<5> rd;
bits<5> rs;
bits<5> rt;
bits<16> imm16;
bits<16> imm16_2;
}
class ALU64_acc<dag outs, dag ins, string asmstr, list<dag> pattern,
string cstr>
: InstHexagon<outs, ins, asmstr, pattern, cstr, ALU64, TypeXTYPE> {
: InstHexagon<outs, ins, asmstr, pattern, "", ALU64> {
bits<5> rd;
bits<5> rs;
bits<5> rt;
@ -167,7 +115,7 @@ class ALU64_acc<dag outs, dag ins, string asmstr, list<dag> pattern,
// Definition of the instruction class NOT CHANGED.
// Name of the Instruction Class changed from M to XTYPE from V2/V3 to V4.
class MInst<dag outs, dag ins, string asmstr, list<dag> pattern>
: InstHexagon<outs, ins, asmstr, pattern, "", M, TypeXTYPE> {
: InstHexagon<outs, ins, asmstr, pattern, "", M> {
bits<5> rd;
bits<5> rs;
bits<5> rt;
@ -178,8 +126,8 @@ class MInst<dag outs, dag ins, string asmstr, list<dag> pattern>
// Definition of the instruction class NOT CHANGED.
// Name of the Instruction Class changed from M to XTYPE from V2/V3 to V4.
class MInst_acc<dag outs, dag ins, string asmstr, list<dag> pattern,
string cstr>
: InstHexagon<outs, ins, asmstr, pattern, cstr, M, TypeXTYPE> {
string cstr>
: InstHexagon<outs, ins, asmstr, pattern, cstr, M> {
bits<5> rd;
bits<5> rs;
bits<5> rt;
@ -190,7 +138,9 @@ class MInst_acc<dag outs, dag ins, string asmstr, list<dag> pattern,
// Definition of the instruction class NOT CHANGED.
// Name of the Instruction Class changed from S to XTYPE from V2/V3 to V4.
class SInst<dag outs, dag ins, string asmstr, list<dag> pattern>
: InstHexagon<outs, ins, asmstr, pattern, "", S, TypeXTYPE> {
//: InstHexagon<outs, ins, asmstr, pattern, cstr, !if(V4T, XTYPE_V4, M)> {
: InstHexagon<outs, ins, asmstr, pattern, "", S> {
// : InstHexagon<outs, ins, asmstr, pattern, "", S> {
bits<5> rd;
bits<5> rs;
bits<5> rt;
@ -201,8 +151,8 @@ class SInst<dag outs, dag ins, string asmstr, list<dag> pattern>
// Definition of the instruction class NOT CHANGED.
// Name of the Instruction Class changed from S to XTYPE from V2/V3 to V4.
class SInst_acc<dag outs, dag ins, string asmstr, list<dag> pattern,
string cstr>
: InstHexagon<outs, ins, asmstr, pattern, cstr, S, TypeXTYPE> {
string cstr>
: InstHexagon<outs, ins, asmstr, pattern, cstr, S> {
// : InstHexagon<outs, ins, asmstr, pattern, cstr, S> {
// : InstHexagon<outs, ins, asmstr, pattern, cstr, !if(V4T, XTYPE_V4, S)> {
bits<5> rd;
@ -213,14 +163,14 @@ class SInst_acc<dag outs, dag ins, string asmstr, list<dag> pattern,
// J Instruction Class in V2/V3/V4.
// Definition of the instruction class NOT CHANGED.
class JType<dag outs, dag ins, string asmstr, list<dag> pattern>
: InstHexagon<outs, ins, asmstr, pattern, "", J, TypeJ> {
: InstHexagon<outs, ins, asmstr, pattern, "", J> {
bits<16> imm16;
}
// JR Instruction Class in V2/V3/V4.
// Definition of the instruction class NOT CHANGED.
class JRType<dag outs, dag ins, string asmstr, list<dag> pattern>
: InstHexagon<outs, ins, asmstr, pattern, "", JR, TypeJR> {
: InstHexagon<outs, ins, asmstr, pattern, "", JR> {
bits<5> rs;
bits<5> pu; // Predicate register
}
@ -228,22 +178,15 @@ class JRType<dag outs, dag ins, string asmstr, list<dag> pattern>
// CR Instruction Class in V2/V3/V4.
// Definition of the instruction class NOT CHANGED.
class CRInst<dag outs, dag ins, string asmstr, list<dag> pattern>
: InstHexagon<outs, ins, asmstr, pattern, "", CR, TypeCR> {
: InstHexagon<outs, ins, asmstr, pattern, "", CR> {
bits<5> rs;
bits<10> imm10;
}
class Marker<dag outs, dag ins, string asmstr, list<dag> pattern>
: InstHexagon<outs, ins, asmstr, pattern, "", MARKER, TypeMARKER> {
let isCodeGenOnly = 1;
let isPseudo = 1;
}
class Pseudo<dag outs, dag ins, string asmstr, list<dag> pattern>
: InstHexagon<outs, ins, asmstr, pattern, "", PSEUDO, TypePSEUDO> {
let isCodeGenOnly = 1;
let isPseudo = 1;
}
: InstHexagon<outs, ins, asmstr, pattern, "", PSEUDO>;
//===----------------------------------------------------------------------===//
// Intruction Classes Definitions -
@ -279,11 +222,6 @@ class ALU64_rr<dag outs, dag ins, string asmstr, list<dag> pattern>
: ALU64Type<outs, ins, asmstr, pattern> {
}
class ALU64_ri<dag outs, dag ins, string asmstr, list<dag> pattern>
: ALU64Type<outs, ins, asmstr, pattern> {
let rt{0-4} = 0;
}
// J Type Instructions.
class JInst<dag outs, dag ins, string asmstr, list<dag> pattern>
: JType<outs, ins, asmstr, pattern> {
@ -301,27 +239,12 @@ class STInstPI<dag outs, dag ins, string asmstr, list<dag> pattern, string cstr>
let rt{0-4} = 0;
}
class STInst2PI<dag outs, dag ins, string asmstr, list<dag> pattern, string cstr>
: STInstPost<outs, ins, asmstr, pattern, cstr> {
let rt{0-4} = 0;
let mayStore = 1;
}
// Post increment LD Instruction.
class LDInstPI<dag outs, dag ins, string asmstr, list<dag> pattern, string cstr>
: LDInstPost<outs, ins, asmstr, pattern, cstr> {
let rt{0-4} = 0;
}
class LDInst2PI<dag outs, dag ins, string asmstr, list<dag> pattern, string cstr>
: LDInstPost<outs, ins, asmstr, pattern, cstr> {
let rt{0-4} = 0;
let mayLoad = 1;
}
//===----------------------------------------------------------------------===//
// V4 Instruction Format Definitions +
//===----------------------------------------------------------------------===//

View File

@ -11,25 +11,11 @@
//
//===----------------------------------------------------------------------===//
//----------------------------------------------------------------------------//
// Hexagon Intruction Flags +
//
// *** Must match BaseInfo.h ***
//----------------------------------------------------------------------------//
def TypeMEMOP : Type<9>;
def TypeNV : Type<10>;
def TypePREFIX : Type<30>;
//----------------------------------------------------------------------------//
// Intruction Classes Definitions +
//----------------------------------------------------------------------------//
//
// NV type instructions.
//
class NVInst_V4<dag outs, dag ins, string asmstr, list<dag> pattern>
: InstHexagon<outs, ins, asmstr, pattern, "", NV_V4, TypeNV> {
: InstHexagon<outs, ins, asmstr, pattern, "", NV_V4> {
bits<5> rd;
bits<5> rs;
bits<13> imm13;
@ -38,7 +24,7 @@ class NVInst_V4<dag outs, dag ins, string asmstr, list<dag> pattern>
// Definition of Post increment new value store.
class NVInstPost_V4<dag outs, dag ins, string asmstr, list<dag> pattern,
string cstr>
: InstHexagon<outs, ins, asmstr, pattern, cstr, NV_V4, TypeNV> {
: InstHexagon<outs, ins, asmstr, pattern, cstr, NV_V4> {
bits<5> rd;
bits<5> rs;
bits<5> rt;
@ -53,15 +39,8 @@ class NVInstPI_V4<dag outs, dag ins, string asmstr, list<dag> pattern,
}
class MEMInst_V4<dag outs, dag ins, string asmstr, list<dag> pattern>
: InstHexagon<outs, ins, asmstr, pattern, "", MEM_V4, TypeMEMOP> {
: InstHexagon<outs, ins, asmstr, pattern, "", MEM_V4> {
bits<5> rd;
bits<5> rs;
bits<6> imm6;
}
class Immext<dag outs, dag ins, string asmstr, list<dag> pattern>
: InstHexagon<outs, ins, asmstr, pattern, "", PREFIX, TypePREFIX> {
let isCodeGenOnly = 1;
bits<26> imm26;
}

File diff suppressed because it is too large Load Diff

View File

@ -107,8 +107,6 @@ public:
unsigned createVR(MachineFunction* MF, MVT VT) const;
virtual bool isExtendable(const MachineInstr* MI) const;
virtual bool isExtended(const MachineInstr* MI) const;
virtual bool isPredicable(MachineInstr *MI) const;
virtual bool
PredicateInstruction(MachineInstr *MI,
@ -138,10 +136,6 @@ public:
isProfitableToDupForIfCvt(MachineBasicBlock &MBB,unsigned NumCycles,
const BranchProbability &Probability) const;
unsigned getInvertedPredicatedOpcode(const int Opcode) const;
unsigned getImmExtForm(const MachineInstr* MI) const;
unsigned getNormalBranchForm(const MachineInstr* MI) const;
virtual DFAPacketizer*
CreateTargetScheduleState(const TargetMachine *TM,
const ScheduleDAG *DAG) const;
@ -166,16 +160,10 @@ public:
bool isS8_Immediate(const int value) const;
bool isS6_Immediate(const int value) const;
bool isConditionalTransfer(const MachineInstr* MI) const;
bool isConditionalALU32(const MachineInstr* MI) const;
bool isConditionalLoad(const MachineInstr* MI) const;
bool isConditionalStore(const MachineInstr* MI) const;
bool isConditionalALU32 (const MachineInstr* MI) const;
bool isConditionalLoad (const MachineInstr* MI) const;
bool isDeallocRet(const MachineInstr *MI) const;
bool isNewValueJumpCandidate(const MachineInstr *MI) const;
bool isNewValueJump(const MachineInstr* MI) const;
bool isNewValueStore(const MachineInstr* MI) const;
bool isPostIncrement(const MachineInstr* MI) const;
bool isSaveCalleeSavedRegsCall(const MachineInstr* MI) const;
unsigned getInvertedPredicatedOpcode(const int Opc) const;
private:
int getMatchingCondBranchOpcode(int Opc, bool sense) const;

File diff suppressed because it is too large Load Diff

View File

@ -41,11 +41,10 @@ let isCall = 1, neverHasSideEffects = 1,
}
// Jump to address from register
// if(p?.new) jumpr:t r?
let isReturn = 1, isTerminator = 1, isBarrier = 1,
Defs = [PC], Uses = [R31] in {
def JMPR_cdnPt_V3: JRInst<(outs), (ins PredRegs:$src1, IntRegs:$src2),
def JMPR_cPnewt: JRInst<(outs), (ins PredRegs:$src1, IntRegs:$src2),
"if ($src1.new) jumpr:t $src2",
[]>, Requires<[HasV3T]>;
}
@ -53,7 +52,7 @@ let isReturn = 1, isTerminator = 1, isBarrier = 1,
// if (!p?.new) jumpr:t r?
let isReturn = 1, isTerminator = 1, isBarrier = 1,
Defs = [PC], Uses = [R31] in {
def JMPR_cdnNotPt_V3: JRInst<(outs), (ins PredRegs:$src1, IntRegs:$src2),
def JMPR_cNotPnewt: JRInst<(outs), (ins PredRegs:$src1, IntRegs:$src2),
"if (!$src1.new) jumpr:t $src2",
[]>, Requires<[HasV3T]>;
}
@ -62,7 +61,7 @@ let isReturn = 1, isTerminator = 1, isBarrier = 1,
// if(p?.new) jumpr:nt r?
let isReturn = 1, isTerminator = 1, isBarrier = 1,
Defs = [PC], Uses = [R31] in {
def JMPR_cdnPnt: JRInst<(outs), (ins PredRegs:$src1, IntRegs:$src2),
def JMPR_cPnewNt: JRInst<(outs), (ins PredRegs:$src1, IntRegs:$src2),
"if ($src1.new) jumpr:nt $src2",
[]>, Requires<[HasV3T]>;
}
@ -70,7 +69,7 @@ let isReturn = 1, isTerminator = 1, isBarrier = 1,
// if (!p?.new) jumpr:nt r?
let isReturn = 1, isTerminator = 1, isBarrier = 1,
Defs = [PC], Uses = [R31] in {
def JMPR_cdnNotPnt: JRInst<(outs), (ins PredRegs:$src1, IntRegs:$src2),
def JMPR_cNotPnewNt: JRInst<(outs), (ins PredRegs:$src1, IntRegs:$src2),
"if (!$src1.new) jumpr:nt $src2",
[]>, Requires<[HasV3T]>;
}
@ -87,22 +86,20 @@ let AddedComplexity = 200 in
def MAXw_dd : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1,
DoubleRegs:$src2),
"$dst = max($src2, $src1)",
[(set (i64 DoubleRegs:$dst),
(i64 (select (i1 (setlt (i64 DoubleRegs:$src2),
(i64 DoubleRegs:$src1))),
(i64 DoubleRegs:$src1),
(i64 DoubleRegs:$src2))))]>,
[(set DoubleRegs:$dst, (select (i1 (setlt DoubleRegs:$src2,
DoubleRegs:$src1)),
DoubleRegs:$src1,
DoubleRegs:$src2))]>,
Requires<[HasV3T]>;
let AddedComplexity = 200 in
def MINw_dd : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1,
DoubleRegs:$src2),
"$dst = min($src2, $src1)",
[(set (i64 DoubleRegs:$dst),
(i64 (select (i1 (setgt (i64 DoubleRegs:$src2),
(i64 DoubleRegs:$src1))),
(i64 DoubleRegs:$src1),
(i64 DoubleRegs:$src2))))]>,
[(set DoubleRegs:$dst, (select (i1 (setgt DoubleRegs:$src2,
DoubleRegs:$src1)),
DoubleRegs:$src1,
DoubleRegs:$src2))]>,
Requires<[HasV3T]>;
//===----------------------------------------------------------------------===//
@ -112,25 +109,25 @@ Requires<[HasV3T]>;
//def : Pat <(brcond (i1 (seteq (i32 IntRegs:$src1), 0)), bb:$offset),
// (JMP_RegEzt (i32 IntRegs:$src1), bb:$offset)>, Requires<[HasV3T]>;
//def : Pat <(brcond (i1 (seteq IntRegs:$src1, 0)), bb:$offset),
// (JMP_RegEzt IntRegs:$src1, bb:$offset)>, Requires<[HasV3T]>;
//def : Pat <(brcond (i1 (setne (i32 IntRegs:$src1), 0)), bb:$offset),
// (JMP_RegNzt (i32 IntRegs:$src1), bb:$offset)>, Requires<[HasV3T]>;
//def : Pat <(brcond (i1 (setne IntRegs:$src1, 0)), bb:$offset),
// (JMP_RegNzt IntRegs:$src1, bb:$offset)>, Requires<[HasV3T]>;
//def : Pat <(brcond (i1 (setle (i32 IntRegs:$src1), 0)), bb:$offset),
// (JMP_RegLezt (i32 IntRegs:$src1), bb:$offset)>, Requires<[HasV3T]>;
//def : Pat <(brcond (i1 (setle IntRegs:$src1, 0)), bb:$offset),
// (JMP_RegLezt IntRegs:$src1, bb:$offset)>, Requires<[HasV3T]>;
//def : Pat <(brcond (i1 (setge (i32 IntRegs:$src1), 0)), bb:$offset),
// (JMP_RegGezt (i32 IntRegs:$src1), bb:$offset)>, Requires<[HasV3T]>;
//def : Pat <(brcond (i1 (setge IntRegs:$src1, 0)), bb:$offset),
// (JMP_RegGezt IntRegs:$src1, bb:$offset)>, Requires<[HasV3T]>;
//def : Pat <(brcond (i1 (setgt (i32 IntRegs:$src1), -1)), bb:$offset),
// (JMP_RegGezt (i32 IntRegs:$src1), bb:$offset)>, Requires<[HasV3T]>;
//def : Pat <(brcond (i1 (setgt IntRegs:$src1, -1)), bb:$offset),
// (JMP_RegGezt IntRegs:$src1, bb:$offset)>, Requires<[HasV3T]>;
// Map call instruction
def : Pat<(call (i32 IntRegs:$dst)),
(CALLRv3 (i32 IntRegs:$dst))>, Requires<[HasV3T]>;
def : Pat<(call IntRegs:$dst),
(CALLRv3 IntRegs:$dst)>, Requires<[HasV3T]>;
def : Pat<(call tglobaladdr:$dst),
(CALLv3 tglobaladdr:$dst)>, Requires<[HasV3T]>;
def : Pat<(call texternalsym:$dst),

File diff suppressed because it is too large Load Diff

View File

@ -1,626 +0,0 @@
def SDTHexagonFCONST32 : SDTypeProfile<1, 1, [
SDTCisVT<0, f32>,
SDTCisPtrTy<1>]>;
def HexagonFCONST32 : SDNode<"HexagonISD::FCONST32", SDTHexagonFCONST32>;
let isReMaterializable = 1, isMoveImm = 1 in
def FCONST32_nsdata : LDInst<(outs IntRegs:$dst), (ins globaladdress:$global),
"$dst = CONST32(#$global)",
[(set (f32 IntRegs:$dst),
(HexagonFCONST32 tglobaladdr:$global))]>,
Requires<[HasV5T]>;
let isReMaterializable = 1, isMoveImm = 1 in
def CONST64_Float_Real : LDInst<(outs DoubleRegs:$dst), (ins f64imm:$src1),
"$dst = CONST64(#$src1)",
[(set DoubleRegs:$dst, fpimm:$src1)]>,
Requires<[HasV5T]>;
let isReMaterializable = 1, isMoveImm = 1 in
def CONST32_Float_Real : LDInst<(outs IntRegs:$dst), (ins f32imm:$src1),
"$dst = CONST32(#$src1)",
[(set IntRegs:$dst, fpimm:$src1)]>,
Requires<[HasV5T]>;
// Transfer immediate float.
// Only works with single precision fp value.
// For double precision, use CONST64_float_real, as 64bit transfer
// can only hold 40-bit values - 32 from const ext + 8 bit immediate.
let isMoveImm = 1, isReMaterializable = 1, isPredicable = 1 in
def TFRI_f : ALU32_ri<(outs IntRegs:$dst), (ins f32imm:$src1),
"$dst = ##$src1",
[(set IntRegs:$dst, fpimm:$src1)]>,
Requires<[HasV5T]>;
def TFRI_cPt_f : ALU32_ri<(outs IntRegs:$dst),
(ins PredRegs:$src1, f32imm:$src2),
"if ($src1) $dst = ##$src2",
[]>,
Requires<[HasV5T]>;
let isPredicated = 1 in
def TFRI_cNotPt_f : ALU32_ri<(outs IntRegs:$dst),
(ins PredRegs:$src1, f32imm:$src2),
"if (!$src1) $dst = ##$src2",
[]>,
Requires<[HasV5T]>;
// Convert single precision to double precision and vice-versa.
def CONVERT_sf2df : ALU64_rr<(outs DoubleRegs:$dst), (ins IntRegs:$src),
"$dst = convert_sf2df($src)",
[(set DoubleRegs:$dst, (fextend IntRegs:$src))]>,
Requires<[HasV5T]>;
def CONVERT_df2sf : ALU64_rr<(outs IntRegs:$dst), (ins DoubleRegs:$src),
"$dst = convert_df2sf($src)",
[(set IntRegs:$dst, (fround DoubleRegs:$src))]>,
Requires<[HasV5T]>;
// Load.
def LDrid_f : LDInst<(outs DoubleRegs:$dst),
(ins MEMri:$addr),
"$dst = memd($addr)",
[(set DoubleRegs:$dst, (f64 (load ADDRriS11_3:$addr)))]>,
Requires<[HasV5T]>;
let AddedComplexity = 20 in
def LDrid_indexed_f : LDInst<(outs DoubleRegs:$dst),
(ins IntRegs:$src1, s11_3Imm:$offset),
"$dst = memd($src1+#$offset)",
[(set DoubleRegs:$dst, (f64 (load (add IntRegs:$src1,
s11_3ImmPred:$offset))))]>,
Requires<[HasV5T]>;
def LDriw_f : LDInst<(outs IntRegs:$dst),
(ins MEMri:$addr), "$dst = memw($addr)",
[(set IntRegs:$dst, (f32 (load ADDRriS11_2:$addr)))]>,
Requires<[HasV5T]>;
let AddedComplexity = 20 in
def LDriw_indexed_f : LDInst<(outs IntRegs:$dst),
(ins IntRegs:$src1, s11_2Imm:$offset),
"$dst = memw($src1+#$offset)",
[(set IntRegs:$dst, (f32 (load (add IntRegs:$src1,
s11_2ImmPred:$offset))))]>,
Requires<[HasV5T]>;
// Store.
def STriw_f : STInst<(outs),
(ins MEMri:$addr, IntRegs:$src1),
"memw($addr) = $src1",
[(store (f32 IntRegs:$src1), ADDRriS11_2:$addr)]>,
Requires<[HasV5T]>;
let AddedComplexity = 10 in
def STriw_indexed_f : STInst<(outs),
(ins IntRegs:$src1, s11_2Imm:$src2, IntRegs:$src3),
"memw($src1+#$src2) = $src3",
[(store (f32 IntRegs:$src3),
(add IntRegs:$src1, s11_2ImmPred:$src2))]>,
Requires<[HasV5T]>;
def STrid_f : STInst<(outs),
(ins MEMri:$addr, DoubleRegs:$src1),
"memd($addr) = $src1",
[(store (f64 DoubleRegs:$src1), ADDRriS11_2:$addr)]>,
Requires<[HasV5T]>;
// Indexed store double word.
let AddedComplexity = 10 in
def STrid_indexed_f : STInst<(outs),
(ins IntRegs:$src1, s11_3Imm:$src2, DoubleRegs:$src3),
"memd($src1+#$src2) = $src3",
[(store (f64 DoubleRegs:$src3),
(add IntRegs:$src1, s11_3ImmPred:$src2))]>,
Requires<[HasV5T]>;
// Add
let isCommutable = 1 in
def fADD_rr : ALU64_rr<(outs IntRegs:$dst),
(ins IntRegs:$src1, IntRegs:$src2),
"$dst = sfadd($src1, $src2)",
[(set IntRegs:$dst, (fadd IntRegs:$src1, IntRegs:$src2))]>,
Requires<[HasV5T]>;
let isCommutable = 1 in
def fADD64_rr : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1,
DoubleRegs:$src2),
"$dst = dfadd($src1, $src2)",
[(set DoubleRegs:$dst, (fadd DoubleRegs:$src1,
DoubleRegs:$src2))]>,
Requires<[HasV5T]>;
def fSUB_rr : ALU64_rr<(outs IntRegs:$dst),
(ins IntRegs:$src1, IntRegs:$src2),
"$dst = sfsub($src1, $src2)",
[(set IntRegs:$dst, (fsub IntRegs:$src1, IntRegs:$src2))]>,
Requires<[HasV5T]>;
def fSUB64_rr : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1,
DoubleRegs:$src2),
"$dst = dfsub($src1, $src2)",
[(set DoubleRegs:$dst, (fsub DoubleRegs:$src1,
DoubleRegs:$src2))]>,
Requires<[HasV5T]>;
let isCommutable = 1 in
def fMUL_rr : ALU64_rr<(outs IntRegs:$dst),
(ins IntRegs:$src1, IntRegs:$src2),
"$dst = sfmpy($src1, $src2)",
[(set IntRegs:$dst, (fmul IntRegs:$src1, IntRegs:$src2))]>,
Requires<[HasV5T]>;
let isCommutable = 1 in
def fMUL64_rr : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1,
DoubleRegs:$src2),
"$dst = dfmpy($src1, $src2)",
[(set DoubleRegs:$dst, (fmul DoubleRegs:$src1,
DoubleRegs:$src2))]>,
Requires<[HasV5T]>;
// Compare.
let isCompare = 1 in {
multiclass FCMP64_rr<string OpcStr, PatFrag OpNode> {
def _rr : ALU64_rr<(outs PredRegs:$dst), (ins DoubleRegs:$b, DoubleRegs:$c),
!strconcat("$dst = ", !strconcat(OpcStr, "($b, $c)")),
[(set PredRegs:$dst,
(OpNode (f64 DoubleRegs:$b), (f64 DoubleRegs:$c)))]>,
Requires<[HasV5T]>;
}
multiclass FCMP32_rr<string OpcStr, PatFrag OpNode> {
def _rr : ALU64_rr<(outs PredRegs:$dst), (ins IntRegs:$b, IntRegs:$c),
!strconcat("$dst = ", !strconcat(OpcStr, "($b, $c)")),
[(set PredRegs:$dst,
(OpNode (f32 IntRegs:$b), (f32 IntRegs:$c)))]>,
Requires<[HasV5T]>;
}
}
defm FCMPOEQ64 : FCMP64_rr<"dfcmp.eq", setoeq>;
defm FCMPUEQ64 : FCMP64_rr<"dfcmp.eq", setueq>;
defm FCMPOGT64 : FCMP64_rr<"dfcmp.gt", setogt>;
defm FCMPUGT64 : FCMP64_rr<"dfcmp.gt", setugt>;
defm FCMPOGE64 : FCMP64_rr<"dfcmp.ge", setoge>;
defm FCMPUGE64 : FCMP64_rr<"dfcmp.ge", setuge>;
defm FCMPOEQ32 : FCMP32_rr<"sfcmp.eq", setoeq>;
defm FCMPUEQ32 : FCMP32_rr<"sfcmp.eq", setueq>;
defm FCMPOGT32 : FCMP32_rr<"sfcmp.gt", setogt>;
defm FCMPUGT32 : FCMP32_rr<"sfcmp.gt", setugt>;
defm FCMPOGE32 : FCMP32_rr<"sfcmp.ge", setoge>;
defm FCMPUGE32 : FCMP32_rr<"sfcmp.ge", setuge>;
// olt.
def : Pat <(i1 (setolt (f32 IntRegs:$src1), (f32 IntRegs:$src2))),
(i1 (FCMPOGT32_rr IntRegs:$src2, IntRegs:$src1))>,
Requires<[HasV5T]>;
def : Pat <(i1 (setolt (f32 IntRegs:$src1), (fpimm:$src2))),
(i1 (FCMPOGT32_rr (f32 (TFRI_f fpimm:$src2)), (f32 IntRegs:$src1)))>,
Requires<[HasV5T]>;
def : Pat <(i1 (setolt (f64 DoubleRegs:$src1), (f64 DoubleRegs:$src2))),
(i1 (FCMPOGT64_rr DoubleRegs:$src2, DoubleRegs:$src1))>,
Requires<[HasV5T]>;
def : Pat <(i1 (setolt (f64 DoubleRegs:$src1), (fpimm:$src2))),
(i1 (FCMPOGT64_rr (f64 (CONST64_Float_Real fpimm:$src2)),
(f64 DoubleRegs:$src1)))>,
Requires<[HasV5T]>;
// gt.
def : Pat <(i1 (setugt (f64 DoubleRegs:$src1), (fpimm:$src2))),
(i1 (FCMPUGT64_rr (f64 DoubleRegs:$src1),
(f64 (CONST64_Float_Real fpimm:$src2))))>,
Requires<[HasV5T]>;
def : Pat <(i1 (setugt (f32 IntRegs:$src1), (fpimm:$src2))),
(i1 (FCMPUGT32_rr (f32 IntRegs:$src1), (f32 (TFRI_f fpimm:$src2))))>,
Requires<[HasV5T]>;
// ult.
def : Pat <(i1 (setult (f32 IntRegs:$src1), (f32 IntRegs:$src2))),
(i1 (FCMPUGT32_rr IntRegs:$src2, IntRegs:$src1))>,
Requires<[HasV5T]>;
def : Pat <(i1 (setult (f32 IntRegs:$src1), (fpimm:$src2))),
(i1 (FCMPUGT32_rr (f32 (TFRI_f fpimm:$src2)), (f32 IntRegs:$src1)))>,
Requires<[HasV5T]>;
def : Pat <(i1 (setult (f64 DoubleRegs:$src1), (f64 DoubleRegs:$src2))),
(i1 (FCMPUGT64_rr DoubleRegs:$src2, DoubleRegs:$src1))>,
Requires<[HasV5T]>;
def : Pat <(i1 (setult (f64 DoubleRegs:$src1), (fpimm:$src2))),
(i1 (FCMPUGT64_rr (f64 (CONST64_Float_Real fpimm:$src2)),
(f64 DoubleRegs:$src1)))>,
Requires<[HasV5T]>;
// le.
// rs <= rt -> rt >= rs.
def : Pat<(i1 (setole (f32 IntRegs:$src1), (f32 IntRegs:$src2))),
(i1 (FCMPOGE32_rr IntRegs:$src2, IntRegs:$src1))>,
Requires<[HasV5T]>;
def : Pat<(i1 (setole (f32 IntRegs:$src1), (fpimm:$src2))),
(i1 (FCMPOGE32_rr (f32 (TFRI_f fpimm:$src2)), IntRegs:$src1))>,
Requires<[HasV5T]>;
// Rss <= Rtt -> Rtt >= Rss.
def : Pat<(i1 (setole (f64 DoubleRegs:$src1), (f64 DoubleRegs:$src2))),
(i1 (FCMPOGE64_rr DoubleRegs:$src2, DoubleRegs:$src1))>,
Requires<[HasV5T]>;
def : Pat<(i1 (setole (f64 DoubleRegs:$src1), (fpimm:$src2))),
(i1 (FCMPOGE64_rr (f64 (CONST64_Float_Real fpimm:$src2)),
DoubleRegs:$src1))>,
Requires<[HasV5T]>;
// rs <= rt -> rt >= rs.
def : Pat<(i1 (setule (f32 IntRegs:$src1), (f32 IntRegs:$src2))),
(i1 (FCMPUGE32_rr IntRegs:$src2, IntRegs:$src1))>,
Requires<[HasV5T]>;
def : Pat<(i1 (setule (f32 IntRegs:$src1), (fpimm:$src2))),
(i1 (FCMPUGE32_rr (f32 (TFRI_f fpimm:$src2)), IntRegs:$src1))>,
Requires<[HasV5T]>;
// Rss <= Rtt -> Rtt >= Rss.
def : Pat<(i1 (setule (f64 DoubleRegs:$src1), (f64 DoubleRegs:$src2))),
(i1 (FCMPUGE64_rr DoubleRegs:$src2, DoubleRegs:$src1))>,
Requires<[HasV5T]>;
def : Pat<(i1 (setule (f64 DoubleRegs:$src1), (fpimm:$src2))),
(i1 (FCMPUGE64_rr (f64 (CONST64_Float_Real fpimm:$src2)),
DoubleRegs:$src1))>,
Requires<[HasV5T]>;
// ne.
def : Pat<(i1 (setone (f32 IntRegs:$src1), (f32 IntRegs:$src2))),
(i1 (NOT_p (FCMPOEQ32_rr IntRegs:$src1, IntRegs:$src2)))>,
Requires<[HasV5T]>;
def : Pat<(i1 (setone (f64 DoubleRegs:$src1), (f64 DoubleRegs:$src2))),
(i1 (NOT_p (FCMPOEQ64_rr DoubleRegs:$src1, DoubleRegs:$src2)))>,
Requires<[HasV5T]>;
def : Pat<(i1 (setune (f32 IntRegs:$src1), (f32 IntRegs:$src2))),
(i1 (NOT_p (FCMPUEQ32_rr IntRegs:$src1, IntRegs:$src2)))>,
Requires<[HasV5T]>;
def : Pat<(i1 (setune (f64 DoubleRegs:$src1), (f64 DoubleRegs:$src2))),
(i1 (NOT_p (FCMPUEQ64_rr DoubleRegs:$src1, DoubleRegs:$src2)))>,
Requires<[HasV5T]>;
def : Pat<(i1 (setone (f32 IntRegs:$src1), (fpimm:$src2))),
(i1 (NOT_p (FCMPOEQ32_rr IntRegs:$src1, (f32 (TFRI_f fpimm:$src2)))))>,
Requires<[HasV5T]>;
def : Pat<(i1 (setone (f64 DoubleRegs:$src1), (fpimm:$src2))),
(i1 (NOT_p (FCMPOEQ64_rr DoubleRegs:$src1,
(f64 (CONST64_Float_Real fpimm:$src2)))))>,
Requires<[HasV5T]>;
def : Pat<(i1 (setune (f32 IntRegs:$src1), (fpimm:$src2))),
(i1 (NOT_p (FCMPUEQ32_rr IntRegs:$src1, (f32 (TFRI_f fpimm:$src2)))))>,
Requires<[HasV5T]>;
def : Pat<(i1 (setune (f64 DoubleRegs:$src1), (fpimm:$src2))),
(i1 (NOT_p (FCMPUEQ64_rr DoubleRegs:$src1,
(f64 (CONST64_Float_Real fpimm:$src2)))))>,
Requires<[HasV5T]>;
// Convert Integer to Floating Point.
def CONVERT_d2sf : ALU64_rr<(outs IntRegs:$dst), (ins DoubleRegs:$src),
"$dst = convert_d2sf($src)",
[(set (f32 IntRegs:$dst), (sint_to_fp (i64 DoubleRegs:$src)))]>,
Requires<[HasV5T]>;
def CONVERT_ud2sf : ALU64_rr<(outs IntRegs:$dst), (ins DoubleRegs:$src),
"$dst = convert_ud2sf($src)",
[(set (f32 IntRegs:$dst), (uint_to_fp (i64 DoubleRegs:$src)))]>,
Requires<[HasV5T]>;
def CONVERT_uw2sf : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src),
"$dst = convert_uw2sf($src)",
[(set (f32 IntRegs:$dst), (uint_to_fp (i32 IntRegs:$src)))]>,
Requires<[HasV5T]>;
def CONVERT_w2sf : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src),
"$dst = convert_w2sf($src)",
[(set (f32 IntRegs:$dst), (sint_to_fp (i32 IntRegs:$src)))]>,
Requires<[HasV5T]>;
def CONVERT_d2df : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src),
"$dst = convert_d2df($src)",
[(set (f64 DoubleRegs:$dst), (sint_to_fp (i64 DoubleRegs:$src)))]>,
Requires<[HasV5T]>;
def CONVERT_ud2df : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src),
"$dst = convert_ud2df($src)",
[(set (f64 DoubleRegs:$dst), (uint_to_fp (i64 DoubleRegs:$src)))]>,
Requires<[HasV5T]>;
def CONVERT_uw2df : ALU64_rr<(outs DoubleRegs:$dst), (ins IntRegs:$src),
"$dst = convert_uw2df($src)",
[(set (f64 DoubleRegs:$dst), (uint_to_fp (i32 IntRegs:$src)))]>,
Requires<[HasV5T]>;
def CONVERT_w2df : ALU64_rr<(outs DoubleRegs:$dst), (ins IntRegs:$src),
"$dst = convert_w2df($src)",
[(set (f64 DoubleRegs:$dst), (sint_to_fp (i32 IntRegs:$src)))]>,
Requires<[HasV5T]>;
// Convert Floating Point to Integer - default.
def CONVERT_df2uw : ALU64_rr<(outs IntRegs:$dst), (ins DoubleRegs:$src),
"$dst = convert_df2uw($src):chop",
[(set (i32 IntRegs:$dst), (fp_to_uint (f64 DoubleRegs:$src)))]>,
Requires<[HasV5T]>;
def CONVERT_df2w : ALU64_rr<(outs IntRegs:$dst), (ins DoubleRegs:$src),
"$dst = convert_df2w($src):chop",
[(set (i32 IntRegs:$dst), (fp_to_sint (f64 DoubleRegs:$src)))]>,
Requires<[HasV5T]>;
def CONVERT_sf2uw : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src),
"$dst = convert_sf2uw($src):chop",
[(set (i32 IntRegs:$dst), (fp_to_uint (f32 IntRegs:$src)))]>,
Requires<[HasV5T]>;
def CONVERT_sf2w : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src),
"$dst = convert_sf2w($src):chop",
[(set (i32 IntRegs:$dst), (fp_to_sint (f32 IntRegs:$src)))]>,
Requires<[HasV5T]>;
def CONVERT_df2d : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src),
"$dst = convert_df2d($src):chop",
[(set (i64 DoubleRegs:$dst), (fp_to_sint (f64 DoubleRegs:$src)))]>,
Requires<[HasV5T]>;
def CONVERT_df2ud : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src),
"$dst = convert_df2ud($src):chop",
[(set (i64 DoubleRegs:$dst), (fp_to_uint (f64 DoubleRegs:$src)))]>,
Requires<[HasV5T]>;
def CONVERT_sf2d : ALU64_rr<(outs DoubleRegs:$dst), (ins IntRegs:$src),
"$dst = convert_sf2d($src):chop",
[(set (i64 DoubleRegs:$dst), (fp_to_sint (f32 IntRegs:$src)))]>,
Requires<[HasV5T]>;
def CONVERT_sf2ud : ALU64_rr<(outs DoubleRegs:$dst), (ins IntRegs:$src),
"$dst = convert_sf2ud($src):chop",
[(set (i64 DoubleRegs:$dst), (fp_to_uint (f32 IntRegs:$src)))]>,
Requires<[HasV5T]>;
// Convert Floating Point to Integer: non-chopped.
let AddedComplexity = 20 in
def CONVERT_df2uw_nchop : ALU64_rr<(outs IntRegs:$dst), (ins DoubleRegs:$src),
"$dst = convert_df2uw($src)",
[(set (i32 IntRegs:$dst), (fp_to_uint (f64 DoubleRegs:$src)))]>,
Requires<[HasV5T, IEEERndNearV5T]>;
let AddedComplexity = 20 in
def CONVERT_df2w_nchop : ALU64_rr<(outs IntRegs:$dst), (ins DoubleRegs:$src),
"$dst = convert_df2w($src)",
[(set (i32 IntRegs:$dst), (fp_to_sint (f64 DoubleRegs:$src)))]>,
Requires<[HasV5T, IEEERndNearV5T]>;
let AddedComplexity = 20 in
def CONVERT_sf2uw_nchop : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src),
"$dst = convert_sf2uw($src)",
[(set (i32 IntRegs:$dst), (fp_to_uint (f32 IntRegs:$src)))]>,
Requires<[HasV5T, IEEERndNearV5T]>;
let AddedComplexity = 20 in
def CONVERT_sf2w_nchop : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src),
"$dst = convert_sf2w($src)",
[(set (i32 IntRegs:$dst), (fp_to_sint (f32 IntRegs:$src)))]>,
Requires<[HasV5T, IEEERndNearV5T]>;
let AddedComplexity = 20 in
def CONVERT_df2d_nchop : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src),
"$dst = convert_df2d($src)",
[(set (i64 DoubleRegs:$dst), (fp_to_sint (f64 DoubleRegs:$src)))]>,
Requires<[HasV5T, IEEERndNearV5T]>;
let AddedComplexity = 20 in
def CONVERT_df2ud_nchop : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src),
"$dst = convert_df2ud($src)",
[(set (i64 DoubleRegs:$dst), (fp_to_uint (f64 DoubleRegs:$src)))]>,
Requires<[HasV5T, IEEERndNearV5T]>;
let AddedComplexity = 20 in
def CONVERT_sf2d_nchop : ALU64_rr<(outs DoubleRegs:$dst), (ins IntRegs:$src),
"$dst = convert_sf2d($src)",
[(set (i64 DoubleRegs:$dst), (fp_to_sint (f32 IntRegs:$src)))]>,
Requires<[HasV5T, IEEERndNearV5T]>;
let AddedComplexity = 20 in
def CONVERT_sf2ud_nchop : ALU64_rr<(outs DoubleRegs:$dst), (ins IntRegs:$src),
"$dst = convert_sf2ud($src)",
[(set (i64 DoubleRegs:$dst), (fp_to_uint (f32 IntRegs:$src)))]>,
Requires<[HasV5T, IEEERndNearV5T]>;
// Bitcast is different than [fp|sint|uint]_to_[sint|uint|fp].
def : Pat <(i32 (bitconvert (f32 IntRegs:$src))),
(i32 (TFR IntRegs:$src))>,
Requires<[HasV5T]>;
def : Pat <(f32 (bitconvert (i32 IntRegs:$src))),
(f32 (TFR IntRegs:$src))>,
Requires<[HasV5T]>;
def : Pat <(i64 (bitconvert (f64 DoubleRegs:$src))),
(i64 (TFR64 DoubleRegs:$src))>,
Requires<[HasV5T]>;
def : Pat <(f64 (bitconvert (i64 DoubleRegs:$src))),
(f64 (TFR64 DoubleRegs:$src))>,
Requires<[HasV5T]>;
// Floating point fused multiply-add.
def FMADD_dp : ALU64_acc<(outs DoubleRegs:$dst),
(ins DoubleRegs:$src1, DoubleRegs:$src2, DoubleRegs:$src3),
"$dst += dfmpy($src2, $src3)",
[(set (f64 DoubleRegs:$dst),
(fma DoubleRegs:$src2, DoubleRegs:$src3, DoubleRegs:$src1))],
"$src1 = $dst">,
Requires<[HasV5T]>;
def FMADD_sp : ALU64_acc<(outs IntRegs:$dst),
(ins IntRegs:$src1, IntRegs:$src2, IntRegs:$src3),
"$dst += sfmpy($src2, $src3)",
[(set (f32 IntRegs:$dst),
(fma IntRegs:$src2, IntRegs:$src3, IntRegs:$src1))],
"$src1 = $dst">,
Requires<[HasV5T]>;
// Floating point max/min.
let AddedComplexity = 100 in
def FMAX_dp : ALU64_rr<(outs DoubleRegs:$dst),
(ins DoubleRegs:$src1, DoubleRegs:$src2),
"$dst = dfmax($src1, $src2)",
[(set DoubleRegs:$dst, (f64 (select (i1 (setolt DoubleRegs:$src2,
DoubleRegs:$src1)),
DoubleRegs:$src1,
DoubleRegs:$src2)))]>,
Requires<[HasV5T]>;
let AddedComplexity = 100 in
def FMAX_sp : ALU64_rr<(outs IntRegs:$dst),
(ins IntRegs:$src1, IntRegs:$src2),
"$dst = sfmax($src1, $src2)",
[(set IntRegs:$dst, (f32 (select (i1 (setolt IntRegs:$src2,
IntRegs:$src1)),
IntRegs:$src1,
IntRegs:$src2)))]>,
Requires<[HasV5T]>;
let AddedComplexity = 100 in
def FMIN_dp : ALU64_rr<(outs DoubleRegs:$dst),
(ins DoubleRegs:$src1, DoubleRegs:$src2),
"$dst = dfmin($src1, $src2)",
[(set DoubleRegs:$dst, (f64 (select (i1 (setogt DoubleRegs:$src2,
DoubleRegs:$src1)),
DoubleRegs:$src1,
DoubleRegs:$src2)))]>,
Requires<[HasV5T]>;
let AddedComplexity = 100 in
def FMIN_sp : ALU64_rr<(outs IntRegs:$dst),
(ins IntRegs:$src1, IntRegs:$src2),
"$dst = sfmin($src1, $src2)",
[(set IntRegs:$dst, (f32 (select (i1 (setogt IntRegs:$src2,
IntRegs:$src1)),
IntRegs:$src1,
IntRegs:$src2)))]>,
Requires<[HasV5T]>;
// Pseudo instruction to encode a set of conditional transfers.
// This instruction is used instead of a mux and trades-off codesize
// for performance. We conduct this transformation optimistically in
// the hope that these instructions get promoted to dot-new transfers.
let AddedComplexity = 100, isPredicated = 1 in
def TFR_condset_rr_f : ALU32_rr<(outs IntRegs:$dst), (ins PredRegs:$src1,
IntRegs:$src2,
IntRegs:$src3),
"Error; should not emit",
[(set IntRegs:$dst, (f32 (select PredRegs:$src1,
IntRegs:$src2,
IntRegs:$src3)))]>,
Requires<[HasV5T]>;
let AddedComplexity = 100, isPredicated = 1 in
def TFR_condset_rr64_f : ALU32_rr<(outs DoubleRegs:$dst), (ins PredRegs:$src1,
DoubleRegs:$src2,
DoubleRegs:$src3),
"Error; should not emit",
[(set DoubleRegs:$dst, (f64 (select PredRegs:$src1,
DoubleRegs:$src2,
DoubleRegs:$src3)))]>,
Requires<[HasV5T]>;
let AddedComplexity = 100, isPredicated = 1 in
def TFR_condset_ri_f : ALU32_rr<(outs IntRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, f32imm:$src3),
"Error; should not emit",
[(set IntRegs:$dst,
(f32 (select PredRegs:$src1, IntRegs:$src2, fpimm:$src3)))]>,
Requires<[HasV5T]>;
let AddedComplexity = 100, isPredicated = 1 in
def TFR_condset_ir_f : ALU32_rr<(outs IntRegs:$dst),
(ins PredRegs:$src1, f32imm:$src2, IntRegs:$src3),
"Error; should not emit",
[(set IntRegs:$dst,
(f32 (select PredRegs:$src1, fpimm:$src2, IntRegs:$src3)))]>,
Requires<[HasV5T]>;
let AddedComplexity = 100, isPredicated = 1 in
def TFR_condset_ii_f : ALU32_rr<(outs IntRegs:$dst),
(ins PredRegs:$src1, f32imm:$src2, f32imm:$src3),
"Error; should not emit",
[(set IntRegs:$dst, (f32 (select PredRegs:$src1,
fpimm:$src2,
fpimm:$src3)))]>,
Requires<[HasV5T]>;
def : Pat <(select (i1 (setult (f32 IntRegs:$src1), (f32 IntRegs:$src2))),
(f32 IntRegs:$src3),
(f32 IntRegs:$src4)),
(TFR_condset_rr_f (FCMPUGT32_rr IntRegs:$src2, IntRegs:$src1), IntRegs:$src4,
IntRegs:$src3)>, Requires<[HasV5T]>;
def : Pat <(select (i1 (setult (f64 DoubleRegs:$src1), (f64 DoubleRegs:$src2))),
(f64 DoubleRegs:$src3),
(f64 DoubleRegs:$src4)),
(TFR_condset_rr64_f (FCMPUGT64_rr DoubleRegs:$src2, DoubleRegs:$src1),
DoubleRegs:$src4, DoubleRegs:$src3)>, Requires<[HasV5T]>;
// Map from p0 = pnot(p0); r0 = mux(p0, #i, #j) => r0 = mux(p0, #j, #i).
def : Pat <(select (not PredRegs:$src1), fpimm:$src2, fpimm:$src3),
(TFR_condset_ii_f PredRegs:$src1, fpimm:$src3, fpimm:$src2)>;
// Map from p0 = pnot(p0); r0 = select(p0, #i, r1)
// => r0 = TFR_condset_ri(p0, r1, #i)
def : Pat <(select (not PredRegs:$src1), fpimm:$src2, IntRegs:$src3),
(TFR_condset_ri_f PredRegs:$src1, IntRegs:$src3, fpimm:$src2)>;
// Map from p0 = pnot(p0); r0 = mux(p0, r1, #i)
// => r0 = TFR_condset_ir(p0, #i, r1)
def : Pat <(select (not PredRegs:$src1), IntRegs:$src2, fpimm:$src3),
(TFR_condset_ir_f PredRegs:$src1, fpimm:$src3, IntRegs:$src2)>;
def : Pat <(i32 (fp_to_sint (f64 DoubleRegs:$src1))),
(i32 (EXTRACT_SUBREG (i64 (CONVERT_df2d (f64 DoubleRegs:$src1))), subreg_loreg))>,
Requires<[HasV5T]>;
def : Pat <(fabs (f32 IntRegs:$src1)),
(CLRBIT_31 (f32 IntRegs:$src1), 31)>,
Requires<[HasV5T]>;
def : Pat <(fneg (f32 IntRegs:$src1)),
(TOGBIT_31 (f32 IntRegs:$src1), 31)>,
Requires<[HasV5T]>;
/*
def : Pat <(fabs (f64 DoubleRegs:$src1)),
(CLRBIT_31 (f32 (EXTRACT_SUBREG DoubleRegs:$src1, subreg_hireg)), 31)>,
Requires<[HasV5T]>;
def : Pat <(fabs (f64 DoubleRegs:$src1)),
(CLRBIT_31 (f32 (EXTRACT_SUBREG DoubleRegs:$src1, subreg_hireg)), 31)>,
Requires<[HasV5T]>;
*/

File diff suppressed because it is too large Load Diff

View File

@ -12,28 +12,18 @@
// Optimized with intrinisics accumulates
//
def : Pat <(mul DoubleRegs:$src1, DoubleRegs:$src2),
(i64
(COMBINE_rr
(HEXAGON_M2_maci
(HEXAGON_M2_maci
(i32
(EXTRACT_SUBREG
(i64
(MPYU64 (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src1),
subreg_loreg)),
(i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src2),
subreg_loreg)))),
subreg_hireg)),
(i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src1), subreg_loreg)),
(i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src2), subreg_hireg))),
(i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src2), subreg_loreg)),
(i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src1), subreg_hireg))),
(i32
(EXTRACT_SUBREG
(i64
(MPYU64 (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src1), subreg_loreg)),
(i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src2),
subreg_loreg)))), subreg_loreg))))>;
(COMBINE_rr
(Hexagon_M2_maci
(Hexagon_M2_maci (EXTRACT_SUBREG (MPYU64 (EXTRACT_SUBREG DoubleRegs:$src1, subreg_loreg),
(EXTRACT_SUBREG DoubleRegs:$src2, subreg_loreg)),
subreg_hireg),
(EXTRACT_SUBREG DoubleRegs:$src1, subreg_loreg),
(EXTRACT_SUBREG DoubleRegs:$src2, subreg_hireg)),
(EXTRACT_SUBREG DoubleRegs:$src2, subreg_loreg),
(EXTRACT_SUBREG DoubleRegs:$src1, subreg_hireg)),
(EXTRACT_SUBREG (MPYU64 (EXTRACT_SUBREG DoubleRegs:$src1, subreg_loreg),
(EXTRACT_SUBREG DoubleRegs:$src2, subreg_loreg)),
subreg_loreg))>;

View File

@ -1,395 +0,0 @@
class sf_SInst_sf<string opc, Intrinsic IntID>
: SInst<(outs IntRegs:$dst), (ins IntRegs:$src1),
!strconcat("$dst = ", !strconcat(opc , "($src1)")),
[(set IntRegs:$dst, (IntID IntRegs:$src1))]>;
class si_SInst_sf<string opc, Intrinsic IntID>
: SInst<(outs IntRegs:$dst), (ins IntRegs:$src1),
!strconcat("$dst = ", !strconcat(opc , "($src1)")),
[(set IntRegs:$dst, (IntID IntRegs:$src1))]>;
class sf_SInst_si<string opc, Intrinsic IntID>
: SInst<(outs IntRegs:$dst), (ins IntRegs:$src1),
!strconcat("$dst = ", !strconcat(opc , "($src1)")),
[(set IntRegs:$dst, (IntID IntRegs:$src1))]>;
class sf_SInst_di<string opc, Intrinsic IntID>
: SInst<(outs IntRegs:$dst), (ins DoubleRegs:$src1),
!strconcat("$dst = ", !strconcat(opc , "($src1)")),
[(set IntRegs:$dst, (IntID DoubleRegs:$src1))]>;
class sf_SInst_df<string opc, Intrinsic IntID>
: SInst<(outs IntRegs:$dst), (ins DoubleRegs:$src1),
!strconcat("$dst = ", !strconcat(opc , "($src1)")),
[(set IntRegs:$dst, (IntID DoubleRegs:$src1))]>;
class si_SInst_df<string opc, Intrinsic IntID>
: SInst<(outs IntRegs:$dst), (ins DoubleRegs:$src1),
!strconcat("$dst = ", !strconcat(opc , "($src1)")),
[(set IntRegs:$dst, (IntID DoubleRegs:$src1))]>;
class df_SInst_sf<string opc, Intrinsic IntID>
: SInst<(outs DoubleRegs:$dst), (ins IntRegs:$src1),
!strconcat("$dst = ", !strconcat(opc , "($src1)")),
[(set DoubleRegs:$dst, (IntID IntRegs:$src1))]>;
class di_SInst_sf<string opc, Intrinsic IntID>
: SInst<(outs DoubleRegs:$dst), (ins IntRegs:$src1),
!strconcat("$dst = ", !strconcat(opc , "($src1)")),
[(set DoubleRegs:$dst, (IntID IntRegs:$src1))]>;
class df_SInst_si<string opc, Intrinsic IntID>
: SInst<(outs DoubleRegs:$dst), (ins IntRegs:$src1),
!strconcat("$dst = ", !strconcat(opc , "($src1)")),
[(set DoubleRegs:$dst, (IntID IntRegs:$src1))]>;
class df_SInst_df<string opc, Intrinsic IntID>
: SInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1),
!strconcat("$dst = ", !strconcat(opc , "($src1)")),
[(set DoubleRegs:$dst, (IntID DoubleRegs:$src1))]>;
class di_SInst_df<string opc, Intrinsic IntID>
: SInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1),
!strconcat("$dst = ", !strconcat(opc , "($src1)")),
[(set DoubleRegs:$dst, (IntID DoubleRegs:$src1))]>;
class df_SInst_di<string opc, Intrinsic IntID>
: SInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1),
!strconcat("$dst = ", !strconcat(opc , "($src1)")),
[(set DoubleRegs:$dst, (IntID DoubleRegs:$src1))]>;
class sf_MInst_sfsf<string opc, Intrinsic IntID>
: MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
!strconcat("$dst = ", !strconcat(opc , "($src1, $src2)")),
[(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
class df_MInst_dfdf<string opc, Intrinsic IntID>
: MInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, DoubleRegs:$src2),
!strconcat("$dst = ", !strconcat(opc , "($src1, $src2)")),
[(set DoubleRegs:$dst, (IntID DoubleRegs:$src1, DoubleRegs:$src2))]>;
class qi_ALU64_dfdf<string opc, Intrinsic IntID>
: ALU64_rr<(outs PredRegs:$dst), (ins DoubleRegs:$src1, DoubleRegs:$src2),
!strconcat("$dst = ", !strconcat(opc , "($src1, $src2)")),
[(set PredRegs:$dst, (IntID DoubleRegs:$src1, DoubleRegs:$src2))]>;
class qi_ALU64_dfu5<string opc, Intrinsic IntID>
: ALU64_ri<(outs PredRegs:$dst), (ins DoubleRegs:$src1, u5Imm:$src2),
!strconcat("$dst = ", !strconcat(opc , "($src1, #$src2)")),
[(set PredRegs:$dst, (IntID DoubleRegs:$src1, imm:$src2))]>;
class sf_MInst_sfsfsf_acc<string opc, Intrinsic IntID>
: MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2,
IntRegs:$dst2),
!strconcat("$dst += ", !strconcat(opc ,
"($src1, $src2)")),
[(set IntRegs:$dst, (IntID IntRegs:$src1,
IntRegs:$src2, IntRegs:$dst2))],
"$dst2 = $dst">;
class sf_MInst_sfsfsf_nac<string opc, Intrinsic IntID>
: MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2,
IntRegs:$dst2),
!strconcat("$dst -= ", !strconcat(opc ,
"($src1, $src2)")),
[(set IntRegs:$dst, (IntID IntRegs:$src1,
IntRegs:$src2, IntRegs:$dst2))],
"$dst2 = $dst">;
class sf_MInst_sfsfsfsi_sc<string opc, Intrinsic IntID>
: MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src1,
IntRegs:$src2, IntRegs:$src3),
!strconcat("$dst += ", !strconcat(opc ,
"($src1, $src2, $src3):scale")),
[(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src1,
IntRegs:$src2, IntRegs:$src3))],
"$dst2 = $dst">;
class sf_MInst_sfsfsf_acc_lib<string opc, Intrinsic IntID>
: MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2,
IntRegs:$dst2),
!strconcat("$dst += ", !strconcat(opc ,
"($src1, $src2):lib")),
[(set IntRegs:$dst, (IntID IntRegs:$src1,
IntRegs:$src2, IntRegs:$dst2))],
"$dst2 = $dst">;
class sf_MInst_sfsfsf_nac_lib<string opc, Intrinsic IntID>
: MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2,
IntRegs:$dst2),
!strconcat("$dst -= ", !strconcat(opc ,
"($src1, $src2):lib")),
[(set IntRegs:$dst, (IntID IntRegs:$src1,
IntRegs:$src2, IntRegs:$dst2))],
"$dst2 = $dst">;
class df_MInst_dfdfdf_acc<string opc, Intrinsic IntID>
: MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, DoubleRegs:$src2,
DoubleRegs:$dst2),
!strconcat("$dst += ", !strconcat(opc ,
"($src1, $src2)")),
[(set DoubleRegs:$dst, (IntID DoubleRegs:$src1,
DoubleRegs:$src2, DoubleRegs:$dst2))],
"$dst2 = $dst">;
class df_MInst_dfdfdf_nac<string opc, Intrinsic IntID>
: MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, DoubleRegs:$src2,
DoubleRegs:$dst2),
!strconcat("$dst -= ", !strconcat(opc ,
"($src1, $src2)")),
[(set DoubleRegs:$dst, (IntID DoubleRegs:$src1,
DoubleRegs:$src2, DoubleRegs:$dst2))],
"$dst2 = $dst">;
class df_MInst_dfdfdfsi_sc<string opc, Intrinsic IntID>
: MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$dst2, DoubleRegs:$src1,
DoubleRegs:$src2, IntRegs:$src3),
!strconcat("$dst += ", !strconcat(opc ,
"($src1, $src2, $src3):scale")),
[(set DoubleRegs:$dst, (IntID DoubleRegs:$dst2, DoubleRegs:$src1,
DoubleRegs:$src2, IntRegs:$src3))],
"$dst2 = $dst">;
class df_MInst_dfdfdf_acc_lib<string opc, Intrinsic IntID>
: MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, DoubleRegs:$src2,
DoubleRegs:$dst2),
!strconcat("$dst += ", !strconcat(opc ,
"($src1, $src2):lib")),
[(set DoubleRegs:$dst, (IntID DoubleRegs:$src1,
DoubleRegs:$src2, DoubleRegs:$dst2))],
"$dst2 = $dst">;
class df_MInst_dfdfdf_nac_lib<string opc, Intrinsic IntID>
: MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, DoubleRegs:$src2,
DoubleRegs:$dst2),
!strconcat("$dst -= ", !strconcat(opc ,
"($src1, $src2):lib")),
[(set DoubleRegs:$dst, (IntID DoubleRegs:$src1,
DoubleRegs:$src2, DoubleRegs:$dst2))],
"$dst2 = $dst">;
class qi_SInst_sfsf<string opc, Intrinsic IntID>
: SInst<(outs PredRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
!strconcat("$dst = ", !strconcat(opc , "($src1, $src2)")),
[(set PredRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
class qi_SInst_sfu5<string opc, Intrinsic IntID>
: MInst<(outs PredRegs:$dst), (ins IntRegs:$src1, u5Imm:$src2),
!strconcat("$dst = ", !strconcat(opc , "($src1, #$src2)")),
[(set PredRegs:$dst, (IntID IntRegs:$src1, imm:$src2))]>;
class sf_ALU64_u10_pos<string opc, Intrinsic IntID>
: ALU64_ri<(outs IntRegs:$dst), (ins u10Imm:$src1),
!strconcat("$dst = ", !strconcat(opc , "#$src1):pos")),
[(set IntRegs:$dst, (IntID imm:$src1))]>;
class sf_ALU64_u10_neg<string opc, Intrinsic IntID>
: ALU64_ri<(outs IntRegs:$dst), (ins u10Imm:$src1),
!strconcat("$dst = ", !strconcat(opc , "#$src1):neg")),
[(set IntRegs:$dst, (IntID imm:$src1))]>;
class df_ALU64_u10_pos<string opc, Intrinsic IntID>
: ALU64_ri<(outs DoubleRegs:$dst), (ins u10Imm:$src1),
!strconcat("$dst = ", !strconcat(opc , "#$src1):pos")),
[(set DoubleRegs:$dst, (IntID imm:$src1))]>;
class df_ALU64_u10_neg<string opc, Intrinsic IntID>
: ALU64_ri<(outs DoubleRegs:$dst), (ins u10Imm:$src1),
!strconcat("$dst = ", !strconcat(opc , "#$src1):neg")),
[(set DoubleRegs:$dst, (IntID imm:$src1))]>;
class di_MInst_diu6<string opc, Intrinsic IntID>
: MInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, u6Imm:$src2),
!strconcat("$dst = ", !strconcat(opc , "($src1, #$src2)")),
[(set DoubleRegs:$dst, (IntID DoubleRegs:$src1, imm:$src2))]>;
class di_MInst_diu4_rnd<string opc, Intrinsic IntID>
: MInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, u4Imm:$src2),
!strconcat("$dst = ", !strconcat(opc , "($src1, #$src2):rnd")),
[(set DoubleRegs:$dst, (IntID DoubleRegs:$src1, imm:$src2))]>;
class si_MInst_diu4_rnd_sat<string opc, Intrinsic IntID>
: MInst<(outs IntRegs:$dst), (ins DoubleRegs:$src1, u4Imm:$src2),
!strconcat("$dst = ", !strconcat(opc , "($src1, #$src2):rnd:sat")),
[(set IntRegs:$dst, (IntID DoubleRegs:$src1, imm:$src2))]>;
class si_SInst_diu4_sat<string opc, Intrinsic IntID>
: SInst<(outs IntRegs:$dst), (ins DoubleRegs:$src1, u4Imm:$src2),
!strconcat("$dst = ", !strconcat(opc , "($src1, #$src2):sat")),
[(set IntRegs:$dst, (IntID DoubleRegs:$src1, imm:$src2))]>;
def HEXAGON_C4_fastcorner9:
qi_SInst_qiqi <"fastcorner9", int_hexagon_C4_fastcorner9>;
def HEXAGON_C4_fastcorner9_not:
qi_SInst_qiqi <"!fastcorner9", int_hexagon_C4_fastcorner9_not>;
def HEXAGON_M5_vrmpybuu:
di_MInst_didi <"vrmpybu", int_hexagon_M5_vrmpybuu>;
def HEXAGON_M5_vrmacbuu:
di_MInst_dididi_acc <"vrmpybu", int_hexagon_M5_vrmacbuu>;
def HEXAGON_M5_vrmpybsu:
di_MInst_didi <"vrmpybsu", int_hexagon_M5_vrmpybsu>;
def HEXAGON_M5_vrmacbsu:
di_MInst_dididi_acc <"vrmpybsu", int_hexagon_M5_vrmacbsu>;
def HEXAGON_M5_vmpybuu:
di_MInst_sisi <"vmpybu", int_hexagon_M5_vmpybuu>;
def HEXAGON_M5_vmpybsu:
di_MInst_sisi <"vmpybsu", int_hexagon_M5_vmpybsu>;
def HEXAGON_M5_vmacbuu:
di_MInst_disisi_acc <"vmpybu", int_hexagon_M5_vmacbuu>;
def HEXAGON_M5_vmacbsu:
di_MInst_disisi_acc <"vmpybsu", int_hexagon_M5_vmacbsu>;
def HEXAGON_M5_vdmpybsu:
di_MInst_didi_sat <"vdmpybsu", int_hexagon_M5_vdmpybsu>;
def HEXAGON_M5_vdmacbsu:
di_MInst_dididi_acc_sat <"vdmpybsu", int_hexagon_M5_vdmacbsu>;
def HEXAGON_A5_vaddhubs:
si_SInst_didi_sat <"vaddhub", int_hexagon_A5_vaddhubs>;
def HEXAGON_S5_popcountp:
si_SInst_di <"popcount", int_hexagon_S5_popcountp>;
def HEXAGON_S5_asrhub_rnd_sat_goodsyntax:
si_MInst_diu4_rnd_sat <"vasrhub", int_hexagon_S5_asrhub_rnd_sat_goodsyntax>;
def HEXAGON_S5_asrhub_sat:
si_SInst_diu4_sat <"vasrhub", int_hexagon_S5_asrhub_sat>;
def HEXAGON_S5_vasrhrnd_goodsyntax:
di_MInst_diu4_rnd <"vasrh", int_hexagon_S5_vasrhrnd_goodsyntax>;
def HEXAGON_S2_asr_i_p_rnd:
di_SInst_diu6 <"asr", int_hexagon_S2_asr_i_p_rnd>;
def HEXAGON_S2_asr_i_p_rnd_goodsyntax:
di_MInst_diu6 <"asrrnd", int_hexagon_S2_asr_i_p_rnd_goodsyntax>;
def HEXAGON_F2_sfadd:
sf_MInst_sfsf <"sfadd", int_hexagon_F2_sfadd>;
def HEXAGON_F2_sfsub:
sf_MInst_sfsf <"sfsub", int_hexagon_F2_sfsub>;
def HEXAGON_F2_sfmpy:
sf_MInst_sfsf <"sfmpy", int_hexagon_F2_sfmpy>;
def HEXAGON_F2_sffma:
sf_MInst_sfsfsf_acc <"sfmpy", int_hexagon_F2_sffma>;
def HEXAGON_F2_sffma_sc:
sf_MInst_sfsfsfsi_sc <"sfmpy", int_hexagon_F2_sffma_sc>;
def HEXAGON_F2_sffms:
sf_MInst_sfsfsf_nac <"sfmpy", int_hexagon_F2_sffms>;
def HEXAGON_F2_sffma_lib:
sf_MInst_sfsfsf_acc_lib <"sfmpy", int_hexagon_F2_sffma_lib>;
def HEXAGON_F2_sffms_lib:
sf_MInst_sfsfsf_nac_lib <"sfmpy", int_hexagon_F2_sffms_lib>;
def HEXAGON_F2_sfcmpeq:
qi_SInst_sfsf <"sfcmp.eq", int_hexagon_F2_sfcmpeq>;
def HEXAGON_F2_sfcmpgt:
qi_SInst_sfsf <"sfcmp.gt", int_hexagon_F2_sfcmpgt>;
def HEXAGON_F2_sfcmpge:
qi_SInst_sfsf <"sfcmp.ge", int_hexagon_F2_sfcmpge>;
def HEXAGON_F2_sfcmpuo:
qi_SInst_sfsf <"sfcmp.uo", int_hexagon_F2_sfcmpuo>;
def HEXAGON_F2_sfmax:
sf_MInst_sfsf <"sfmax", int_hexagon_F2_sfmax>;
def HEXAGON_F2_sfmin:
sf_MInst_sfsf <"sfmin", int_hexagon_F2_sfmin>;
def HEXAGON_F2_sfclass:
qi_SInst_sfu5 <"sfclass", int_hexagon_F2_sfclass>;
def HEXAGON_F2_sfimm_p:
sf_ALU64_u10_pos <"sfmake", int_hexagon_F2_sfimm_p>;
def HEXAGON_F2_sfimm_n:
sf_ALU64_u10_neg <"sfmake", int_hexagon_F2_sfimm_n>;
def HEXAGON_F2_sffixupn:
sf_MInst_sfsf <"sffixupn", int_hexagon_F2_sffixupn>;
def HEXAGON_F2_sffixupd:
sf_MInst_sfsf <"sffixupd", int_hexagon_F2_sffixupd>;
def HEXAGON_F2_sffixupr:
sf_SInst_sf <"sffixupr", int_hexagon_F2_sffixupr>;
def HEXAGON_F2_dfadd:
df_MInst_dfdf <"dfadd", int_hexagon_F2_dfadd>;
def HEXAGON_F2_dfsub:
df_MInst_dfdf <"dfsub", int_hexagon_F2_dfsub>;
def HEXAGON_F2_dfmpy:
df_MInst_dfdf <"dfmpy", int_hexagon_F2_dfmpy>;
def HEXAGON_F2_dffma:
df_MInst_dfdfdf_acc <"dfmpy", int_hexagon_F2_dffma>;
def HEXAGON_F2_dffms:
df_MInst_dfdfdf_nac <"dfmpy", int_hexagon_F2_dffms>;
def HEXAGON_F2_dffma_lib:
df_MInst_dfdfdf_acc_lib <"dfmpy", int_hexagon_F2_dffma_lib>;
def HEXAGON_F2_dffms_lib:
df_MInst_dfdfdf_nac_lib <"dfmpy", int_hexagon_F2_dffms_lib>;
def HEXAGON_F2_dffma_sc:
df_MInst_dfdfdfsi_sc <"dfmpy", int_hexagon_F2_dffma_sc>;
def HEXAGON_F2_dfmax:
df_MInst_dfdf <"dfmax", int_hexagon_F2_dfmax>;
def HEXAGON_F2_dfmin:
df_MInst_dfdf <"dfmin", int_hexagon_F2_dfmin>;
def HEXAGON_F2_dfcmpeq:
qi_ALU64_dfdf <"dfcmp.eq", int_hexagon_F2_dfcmpeq>;
def HEXAGON_F2_dfcmpgt:
qi_ALU64_dfdf <"dfcmp.gt", int_hexagon_F2_dfcmpgt>;
def HEXAGON_F2_dfcmpge:
qi_ALU64_dfdf <"dfcmp.ge", int_hexagon_F2_dfcmpge>;
def HEXAGON_F2_dfcmpuo:
qi_ALU64_dfdf <"dfcmp.uo", int_hexagon_F2_dfcmpuo>;
def HEXAGON_F2_dfclass:
qi_ALU64_dfu5 <"dfclass", int_hexagon_F2_dfclass>;
def HEXAGON_F2_dfimm_p:
df_ALU64_u10_pos <"dfmake", int_hexagon_F2_dfimm_p>;
def HEXAGON_F2_dfimm_n:
df_ALU64_u10_neg <"dfmake", int_hexagon_F2_dfimm_n>;
def HEXAGON_F2_dffixupn:
df_MInst_dfdf <"dffixupn", int_hexagon_F2_dffixupn>;
def HEXAGON_F2_dffixupd:
df_MInst_dfdf <"dffixupd", int_hexagon_F2_dffixupd>;
def HEXAGON_F2_dffixupr:
df_SInst_df <"dffixupr", int_hexagon_F2_dffixupr>;
def HEXAGON_F2_conv_sf2df:
df_SInst_sf <"convert_sf2df", int_hexagon_F2_conv_sf2df>;
def HEXAGON_F2_conv_df2sf:
sf_SInst_df <"convert_df2sf", int_hexagon_F2_conv_df2sf>;
def HEXAGON_F2_conv_uw2sf:
sf_SInst_si <"convert_uw2sf", int_hexagon_F2_conv_uw2sf>;
def HEXAGON_F2_conv_uw2df:
df_SInst_si <"convert_uw2df", int_hexagon_F2_conv_uw2df>;
def HEXAGON_F2_conv_w2sf:
sf_SInst_si <"convert_w2sf", int_hexagon_F2_conv_w2sf>;
def HEXAGON_F2_conv_w2df:
df_SInst_si <"convert_w2df", int_hexagon_F2_conv_w2df>;
def HEXAGON_F2_conv_ud2sf:
sf_SInst_di <"convert_ud2sf", int_hexagon_F2_conv_ud2sf>;
def HEXAGON_F2_conv_ud2df:
df_SInst_di <"convert_ud2df", int_hexagon_F2_conv_ud2df>;
def HEXAGON_F2_conv_d2sf:
sf_SInst_di <"convert_d2sf", int_hexagon_F2_conv_d2sf>;
def HEXAGON_F2_conv_d2df:
df_SInst_di <"convert_d2df", int_hexagon_F2_conv_d2df>;
def HEXAGON_F2_conv_sf2uw:
si_SInst_sf <"convert_sf2uw", int_hexagon_F2_conv_sf2uw>;
def HEXAGON_F2_conv_sf2w:
si_SInst_sf <"convert_sf2w", int_hexagon_F2_conv_sf2w>;
def HEXAGON_F2_conv_sf2ud:
di_SInst_sf <"convert_sf2ud", int_hexagon_F2_conv_sf2ud>;
def HEXAGON_F2_conv_sf2d:
di_SInst_sf <"convert_sf2d", int_hexagon_F2_conv_sf2d>;
def HEXAGON_F2_conv_df2uw:
si_SInst_df <"convert_df2uw", int_hexagon_F2_conv_df2uw>;
def HEXAGON_F2_conv_df2w:
si_SInst_df <"convert_df2w", int_hexagon_F2_conv_df2w>;
def HEXAGON_F2_conv_df2ud:
di_SInst_df <"convert_df2ud", int_hexagon_F2_conv_df2ud>;
def HEXAGON_F2_conv_df2d:
di_SInst_df <"convert_df2d", int_hexagon_F2_conv_df2d>;
def HEXAGON_F2_conv_sf2uw_chop:
si_SInst_sf <"convert_sf2uw", int_hexagon_F2_conv_sf2uw_chop>;
def HEXAGON_F2_conv_sf2w_chop:
si_SInst_sf <"convert_sf2w", int_hexagon_F2_conv_sf2w_chop>;
def HEXAGON_F2_conv_sf2ud_chop:
di_SInst_sf <"convert_sf2ud", int_hexagon_F2_conv_sf2ud_chop>;
def HEXAGON_F2_conv_sf2d_chop:
di_SInst_sf <"convert_sf2d", int_hexagon_F2_conv_sf2d_chop>;
def HEXAGON_F2_conv_df2uw_chop:
si_SInst_df <"convert_df2uw", int_hexagon_F2_conv_df2uw_chop>;
def HEXAGON_F2_conv_df2w_chop:
si_SInst_df <"convert_df2w", int_hexagon_F2_conv_df2w_chop>;
def HEXAGON_F2_conv_df2ud_chop:
di_SInst_df <"convert_df2ud", int_hexagon_F2_conv_df2ud_chop>;
def HEXAGON_F2_conv_df2d_chop:
di_SInst_df <"convert_df2d", int_hexagon_F2_conv_df2d_chop>;

View File

@ -1,41 +0,0 @@
//===- HexagonMCInst.h - Hexagon sub-class of MCInst ----------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This class extends MCInst to allow some VLIW annotation.
//
//===----------------------------------------------------------------------===//
#ifndef HEXAGONMCINST_H
#define HEXAGONMCINST_H
#include "llvm/MC/MCInst.h"
#include "llvm/CodeGen/MachineInstr.h"
namespace llvm {
class HexagonMCInst: public MCInst {
// Packet start and end markers
unsigned startPacket: 1, endPacket: 1;
const MachineInstr *MachineI;
public:
explicit HexagonMCInst(): MCInst(),
startPacket(0), endPacket(0) {}
const MachineInstr* getMI() const { return MachineI; };
void setMI(const MachineInstr *MI) { MachineI = MI; };
bool isStartPacket() const { return (startPacket); };
bool isEndPacket() const { return (endPacket); };
void setStartPacket(bool yes) { startPacket = yes; };
void setEndPacket(bool yes) { endPacket = yes; };
};
}
#endif

View File

@ -49,7 +49,7 @@ void llvm::HexagonLowerToMC(const MachineInstr* MI, MCInst& MCI,
switch (MO.getType()) {
default:
MI->dump();
llvm_unreachable("unknown operand type");
assert(0 && "unknown operand type");
case MachineOperand::MO_Register:
// Ignore all implicit register operands.
if (MO.isImplicit()) continue;

View File

@ -1,649 +0,0 @@
//===----- HexagonNewValueJump.cpp - Hexagon Backend New Value Jump -------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This implements NewValueJump pass in Hexagon.
// Ideally, we should merge this as a Peephole pass prior to register
// allocation, but becuase we have a spill in between the feeder and new value
// jump instructions, we are forced to write after register allocation.
// Having said that, we should re-attempt to pull this ealier at some piont
// in future.
// The basic approach looks for sequence of predicated jump, compare instruciton
// that genereates the predicate and, the feeder to the predicate. Once it finds
// all, it collapses compare and jump instruction into a new valu jump
// intstructions.
//
//
//===----------------------------------------------------------------------===//
#define DEBUG_TYPE "hexagon-nvj"
#include "llvm/PassSupport.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/CodeGen/ScheduleDAGInstrs.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/LiveVariables.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/MachineFunctionAnalysis.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "Hexagon.h"
#include "HexagonTargetMachine.h"
#include "HexagonRegisterInfo.h"
#include "HexagonSubtarget.h"
#include "HexagonInstrInfo.h"
#include "HexagonMachineFunctionInfo.h"
#include <map>
#include "llvm/Support/CommandLine.h"
using namespace llvm;
STATISTIC(NumNVJGenerated, "Number of New Value Jump Instructions created");
cl::opt<int> DebugHexagonNewValueJump("debug-nvj", cl::Hidden, cl::desc(""));
static cl::opt<int>
DbgNVJCount("nvj-count", cl::init(-1), cl::Hidden, cl::desc(
"Maximum number of predicated jumps to be converted to New Value Jump"));
static cl::opt<bool> DisableNewValueJumps("disable-nvjump", cl::Hidden,
cl::ZeroOrMore, cl::init(false),
cl::desc("Disable New Value Jumps"));
namespace {
struct HexagonNewValueJump : public MachineFunctionPass {
const HexagonInstrInfo *QII;
const HexagonRegisterInfo *QRI;
public:
static char ID;
HexagonNewValueJump() : MachineFunctionPass(ID) { }
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
MachineFunctionPass::getAnalysisUsage(AU);
}
const char *getPassName() const {
return "Hexagon NewValueJump";
}
virtual bool runOnMachineFunction(MachineFunction &Fn);
private:
};
} // end of anonymous namespace
char HexagonNewValueJump::ID = 0;
// We have identified this II could be feeder to NVJ,
// verify that it can be.
static bool canBeFeederToNewValueJump(const HexagonInstrInfo *QII,
const TargetRegisterInfo *TRI,
MachineBasicBlock::iterator II,
MachineBasicBlock::iterator end,
MachineBasicBlock::iterator skip,
MachineFunction &MF) {
// Predicated instruction can not be feeder to NVJ.
if (QII->isPredicated(II))
return false;
// Bail out if feederReg is a paired register (double regs in
// our case). One would think that we can check to see if a given
// register cmpReg1 or cmpReg2 is a sub register of feederReg
// using -- if (QRI->isSubRegister(feederReg, cmpReg1) logic
// before the callsite of this function
// But we can not as it comes in the following fashion.
// %D0<def> = Hexagon_S2_lsr_r_p %D0<kill>, %R2<kill>
// %R0<def> = KILL %R0, %D0<imp-use,kill>
// %P0<def> = CMPEQri %R0<kill>, 0
// Hence, we need to check if it's a KILL instruction.
if (II->getOpcode() == TargetOpcode::KILL)
return false;
// Make sure there there is no 'def' or 'use' of any of the uses of
// feeder insn between it's definition, this MI and jump, jmpInst
// skipping compare, cmpInst.
// Here's the example.
// r21=memub(r22+r24<<#0)
// p0 = cmp.eq(r21, #0)
// r4=memub(r3+r21<<#0)
// if (p0.new) jump:t .LBB29_45
// Without this check, it will be converted into
// r4=memub(r3+r21<<#0)
// r21=memub(r22+r24<<#0)
// p0 = cmp.eq(r21, #0)
// if (p0.new) jump:t .LBB29_45
// and result WAR hazards if converted to New Value Jump.
for (unsigned i = 0; i < II->getNumOperands(); ++i) {
if (II->getOperand(i).isReg() &&
(II->getOperand(i).isUse() || II->getOperand(i).isDef())) {
MachineBasicBlock::iterator localII = II;
++localII;
unsigned Reg = II->getOperand(i).getReg();
for (MachineBasicBlock::iterator localBegin = localII;
localBegin != end; ++localBegin) {
if (localBegin == skip ) continue;
// Check for Subregisters too.
if (localBegin->modifiesRegister(Reg, TRI) ||
localBegin->readsRegister(Reg, TRI))
return false;
}
}
}
return true;
}
// These are the common checks that need to performed
// to determine if
// 1. compare instruction can be moved before jump.
// 2. feeder to the compare instruction can be moved before jump.
static bool commonChecksToProhibitNewValueJump(bool afterRA,
MachineBasicBlock::iterator MII) {
// If store in path, bail out.
if (MII->getDesc().mayStore())
return false;
// if call in path, bail out.
if (MII->getOpcode() == Hexagon::CALLv3)
return false;
// if NVJ is running prior to RA, do the following checks.
if (!afterRA) {
// The following Target Opcode instructions are spurious
// to new value jump. If they are in the path, bail out.
// KILL sets kill flag on the opcode. It also sets up a
// single register, out of pair.
// %D0<def> = Hexagon_S2_lsr_r_p %D0<kill>, %R2<kill>
// %R0<def> = KILL %R0, %D0<imp-use,kill>
// %P0<def> = CMPEQri %R0<kill>, 0
// PHI can be anything after RA.
// COPY can remateriaze things in between feeder, compare and nvj.
if (MII->getOpcode() == TargetOpcode::KILL ||
MII->getOpcode() == TargetOpcode::PHI ||
MII->getOpcode() == TargetOpcode::COPY)
return false;
// The following pseudo Hexagon instructions sets "use" and "def"
// of registers by individual passes in the backend. At this time,
// we don't know the scope of usage and definitions of these
// instructions.
if (MII->getOpcode() == Hexagon::TFR_condset_rr ||
MII->getOpcode() == Hexagon::TFR_condset_ii ||
MII->getOpcode() == Hexagon::TFR_condset_ri ||
MII->getOpcode() == Hexagon::TFR_condset_ir ||
MII->getOpcode() == Hexagon::LDriw_pred ||
MII->getOpcode() == Hexagon::STriw_pred)
return false;
}
return true;
}
static bool canCompareBeNewValueJump(const HexagonInstrInfo *QII,
const TargetRegisterInfo *TRI,
MachineBasicBlock::iterator II,
unsigned pReg,
bool secondReg,
bool optLocation,
MachineBasicBlock::iterator end,
MachineFunction &MF) {
MachineInstr *MI = II;
// If the second operand of the compare is an imm, make sure it's in the
// range specified by the arch.
if (!secondReg) {
int64_t v = MI->getOperand(2).getImm();
if (MI->getOpcode() == Hexagon::CMPGEri ||
(MI->getOpcode() == Hexagon::CMPGEUri && v > 0))
--v;
if (!(isUInt<5>(v) ||
((MI->getOpcode() == Hexagon::CMPEQri ||
MI->getOpcode() == Hexagon::CMPGTri ||
MI->getOpcode() == Hexagon::CMPGEri) &&
(v == -1))))
return false;
}
unsigned cmpReg1, cmpOp2;
cmpReg1 = MI->getOperand(1).getReg();
if (secondReg) {
cmpOp2 = MI->getOperand(2).getReg();
// Make sure that that second register is not from COPY
// At machine code level, we don't need this, but if we decide
// to move new value jump prior to RA, we would be needing this.
MachineRegisterInfo &MRI = MF.getRegInfo();
if (secondReg && !TargetRegisterInfo::isPhysicalRegister(cmpOp2)) {
MachineInstr *def = MRI.getVRegDef(cmpOp2);
if (def->getOpcode() == TargetOpcode::COPY)
return false;
}
}
// Walk the instructions after the compare (predicate def) to the jump,
// and satisfy the following conditions.
++II ;
for (MachineBasicBlock::iterator localII = II; localII != end;
++localII) {
// Check 1.
// If "common" checks fail, bail out.
if (!commonChecksToProhibitNewValueJump(optLocation, localII))
return false;
// Check 2.
// If there is a def or use of predicate (result of compare), bail out.
if (localII->modifiesRegister(pReg, TRI) ||
localII->readsRegister(pReg, TRI))
return false;
// Check 3.
// If there is a def of any of the use of the compare (operands of compare),
// bail out.
// Eg.
// p0 = cmp.eq(r2, r0)
// r2 = r4
// if (p0.new) jump:t .LBB28_3
if (localII->modifiesRegister(cmpReg1, TRI) ||
(secondReg && localII->modifiesRegister(cmpOp2, TRI)))
return false;
}
return true;
}
// Given a compare operator, return a matching New Value Jump
// compare operator. Make sure that MI here is included in
// HexagonInstrInfo.cpp::isNewValueJumpCandidate
static unsigned getNewValueJumpOpcode(const MachineInstr *MI, int reg,
bool secondRegNewified) {
switch (MI->getOpcode()) {
case Hexagon::CMPEQrr:
return Hexagon::JMP_EQrrPt_nv_V4;
case Hexagon::CMPEQri: {
if (reg >= 0)
return Hexagon::JMP_EQriPt_nv_V4;
else
return Hexagon::JMP_EQriPtneg_nv_V4;
}
case Hexagon::CMPLTrr:
case Hexagon::CMPGTrr: {
if (secondRegNewified)
return Hexagon::JMP_GTrrdnPt_nv_V4;
else
return Hexagon::JMP_GTrrPt_nv_V4;
}
case Hexagon::CMPGEri: {
if (reg >= 1)
return Hexagon::JMP_GTriPt_nv_V4;
else
return Hexagon::JMP_GTriPtneg_nv_V4;
}
case Hexagon::CMPGTri: {
if (reg >= 0)
return Hexagon::JMP_GTriPt_nv_V4;
else
return Hexagon::JMP_GTriPtneg_nv_V4;
}
case Hexagon::CMPLTUrr:
case Hexagon::CMPGTUrr: {
if (secondRegNewified)
return Hexagon::JMP_GTUrrdnPt_nv_V4;
else
return Hexagon::JMP_GTUrrPt_nv_V4;
}
case Hexagon::CMPGTUri:
return Hexagon::JMP_GTUriPt_nv_V4;
case Hexagon::CMPGEUri: {
if (reg == 0)
return Hexagon::JMP_EQrrPt_nv_V4;
else
return Hexagon::JMP_GTUriPt_nv_V4;
}
default:
llvm_unreachable("Could not find matching New Value Jump instruction.");
}
// return *some value* to avoid compiler warning
return 0;
}
bool HexagonNewValueJump::runOnMachineFunction(MachineFunction &MF) {
DEBUG(dbgs() << "********** Hexagon New Value Jump **********\n"
<< "********** Function: "
<< MF.getFunction()->getName() << "\n");
#if 0
// for now disable this, if we move NewValueJump before register
// allocation we need this information.
LiveVariables &LVs = getAnalysis<LiveVariables>();
#endif
QII = static_cast<const HexagonInstrInfo *>(MF.getTarget().getInstrInfo());
QRI =
static_cast<const HexagonRegisterInfo *>(MF.getTarget().getRegisterInfo());
if (!QRI->Subtarget.hasV4TOps() ||
DisableNewValueJumps) {
return false;
}
int nvjCount = DbgNVJCount;
int nvjGenerated = 0;
// Loop through all the bb's of the function
for (MachineFunction::iterator MBBb = MF.begin(), MBBe = MF.end();
MBBb != MBBe; ++MBBb) {
MachineBasicBlock* MBB = MBBb;
DEBUG(dbgs() << "** dumping bb ** "
<< MBB->getNumber() << "\n");
DEBUG(MBB->dump());
DEBUG(dbgs() << "\n" << "********** dumping instr bottom up **********\n");
bool foundJump = false;
bool foundCompare = false;
bool invertPredicate = false;
unsigned predReg = 0; // predicate reg of the jump.
unsigned cmpReg1 = 0;
int cmpOp2 = 0;
bool MO1IsKill = false;
bool MO2IsKill = false;
MachineBasicBlock::iterator jmpPos;
MachineBasicBlock::iterator cmpPos;
MachineInstr *cmpInstr = NULL, *jmpInstr = NULL;
MachineBasicBlock *jmpTarget = NULL;
bool afterRA = false;
bool isSecondOpReg = false;
bool isSecondOpNewified = false;
// Traverse the basic block - bottom up
for (MachineBasicBlock::iterator MII = MBB->end(), E = MBB->begin();
MII != E;) {
MachineInstr *MI = --MII;
if (MI->isDebugValue()) {
continue;
}
if ((nvjCount == 0) || (nvjCount > -1 && nvjCount <= nvjGenerated))
break;
DEBUG(dbgs() << "Instr: "; MI->dump(); dbgs() << "\n");
if (!foundJump &&
(MI->getOpcode() == Hexagon::JMP_c ||
MI->getOpcode() == Hexagon::JMP_cNot ||
MI->getOpcode() == Hexagon::JMP_cdnPt ||
MI->getOpcode() == Hexagon::JMP_cdnPnt ||
MI->getOpcode() == Hexagon::JMP_cdnNotPt ||
MI->getOpcode() == Hexagon::JMP_cdnNotPnt)) {
// This is where you would insert your compare and
// instr that feeds compare
jmpPos = MII;
jmpInstr = MI;
predReg = MI->getOperand(0).getReg();
afterRA = TargetRegisterInfo::isPhysicalRegister(predReg);
// If ifconverter had not messed up with the kill flags of the
// operands, the following check on the kill flag would suffice.
// if(!jmpInstr->getOperand(0).isKill()) break;
// This predicate register is live out out of BB
// this would only work if we can actually use Live
// variable analysis on phy regs - but LLVM does not
// provide LV analysis on phys regs.
//if(LVs.isLiveOut(predReg, *MBB)) break;
// Get all the successors of this block - which will always
// be 2. Check if the predicate register is live in in those
// successor. If yes, we can not delete the predicate -
// I am doing this only because LLVM does not provide LiveOut
// at the BB level.
bool predLive = false;
for (MachineBasicBlock::const_succ_iterator SI = MBB->succ_begin(),
SIE = MBB->succ_end(); SI != SIE; ++SI) {
MachineBasicBlock* succMBB = *SI;
if (succMBB->isLiveIn(predReg)) {
predLive = true;
}
}
if (predLive)
break;
jmpTarget = MI->getOperand(1).getMBB();
foundJump = true;
if (MI->getOpcode() == Hexagon::JMP_cNot ||
MI->getOpcode() == Hexagon::JMP_cdnNotPt ||
MI->getOpcode() == Hexagon::JMP_cdnNotPnt) {
invertPredicate = true;
}
continue;
}
// No new value jump if there is a barrier. A barrier has to be in its
// own packet. A barrier has zero operands. We conservatively bail out
// here if we see any instruction with zero operands.
if (foundJump && MI->getNumOperands() == 0)
break;
if (foundJump &&
!foundCompare &&
MI->getOperand(0).isReg() &&
MI->getOperand(0).getReg() == predReg) {
// Not all compares can be new value compare. Arch Spec: 7.6.1.1
if (QII->isNewValueJumpCandidate(MI)) {
assert((MI->getDesc().isCompare()) &&
"Only compare instruction can be collapsed into New Value Jump");
isSecondOpReg = MI->getOperand(2).isReg();
if (!canCompareBeNewValueJump(QII, QRI, MII, predReg, isSecondOpReg,
afterRA, jmpPos, MF))
break;
cmpInstr = MI;
cmpPos = MII;
foundCompare = true;
// We need cmpReg1 and cmpOp2(imm or reg) while building
// new value jump instruction.
cmpReg1 = MI->getOperand(1).getReg();
if (MI->getOperand(1).isKill())
MO1IsKill = true;
if (isSecondOpReg) {
cmpOp2 = MI->getOperand(2).getReg();
if (MI->getOperand(2).isKill())
MO2IsKill = true;
} else
cmpOp2 = MI->getOperand(2).getImm();
continue;
}
}
if (foundCompare && foundJump) {
// If "common" checks fail, bail out on this BB.
if (!commonChecksToProhibitNewValueJump(afterRA, MII))
break;
bool foundFeeder = false;
MachineBasicBlock::iterator feederPos = MII;
if (MI->getOperand(0).isReg() &&
MI->getOperand(0).isDef() &&
(MI->getOperand(0).getReg() == cmpReg1 ||
(isSecondOpReg &&
MI->getOperand(0).getReg() == (unsigned) cmpOp2))) {
unsigned feederReg = MI->getOperand(0).getReg();
// First try to see if we can get the feeder from the first operand
// of the compare. If we can not, and if secondOpReg is true
// (second operand of the compare is also register), try that one.
// TODO: Try to come up with some heuristic to figure out which
// feeder would benefit.
if (feederReg == cmpReg1) {
if (!canBeFeederToNewValueJump(QII, QRI, MII, jmpPos, cmpPos, MF)) {
if (!isSecondOpReg)
break;
else
continue;
} else
foundFeeder = true;
}
if (!foundFeeder &&
isSecondOpReg &&
feederReg == (unsigned) cmpOp2)
if (!canBeFeederToNewValueJump(QII, QRI, MII, jmpPos, cmpPos, MF))
break;
if (isSecondOpReg) {
// In case of CMPLT, or CMPLTU, or EQ with the second register
// to newify, swap the operands.
if (cmpInstr->getOpcode() == Hexagon::CMPLTrr ||
cmpInstr->getOpcode() == Hexagon::CMPLTUrr ||
(cmpInstr->getOpcode() == Hexagon::CMPEQrr &&
feederReg == (unsigned) cmpOp2)) {
unsigned tmp = cmpReg1;
bool tmpIsKill = MO1IsKill;
cmpReg1 = cmpOp2;
MO1IsKill = MO2IsKill;
cmpOp2 = tmp;
MO2IsKill = tmpIsKill;
}
// Now we have swapped the operands, all we need to check is,
// if the second operand (after swap) is the feeder.
// And if it is, make a note.
if (feederReg == (unsigned)cmpOp2)
isSecondOpNewified = true;
}
// Now that we are moving feeder close the jump,
// make sure we are respecting the kill values of
// the operands of the feeder.
bool updatedIsKill = false;
for (unsigned i = 0; i < MI->getNumOperands(); i++) {
MachineOperand &MO = MI->getOperand(i);
if (MO.isReg() && MO.isUse()) {
unsigned feederReg = MO.getReg();
for (MachineBasicBlock::iterator localII = feederPos,
end = jmpPos; localII != end; localII++) {
MachineInstr *localMI = localII;
for (unsigned j = 0; j < localMI->getNumOperands(); j++) {
MachineOperand &localMO = localMI->getOperand(j);
if (localMO.isReg() && localMO.isUse() &&
localMO.isKill() && feederReg == localMO.getReg()) {
// We found that there is kill of a use register
// Set up a kill flag on the register
localMO.setIsKill(false);
MO.setIsKill();
updatedIsKill = true;
break;
}
}
if (updatedIsKill) break;
}
}
if (updatedIsKill) break;
}
MBB->splice(jmpPos, MI->getParent(), MI);
MBB->splice(jmpPos, MI->getParent(), cmpInstr);
DebugLoc dl = MI->getDebugLoc();
MachineInstr *NewMI;
assert((QII->isNewValueJumpCandidate(cmpInstr)) &&
"This compare is not a New Value Jump candidate.");
unsigned opc = getNewValueJumpOpcode(cmpInstr, cmpOp2,
isSecondOpNewified);
if (invertPredicate)
opc = QII->getInvertedPredicatedOpcode(opc);
// Manage the conversions from CMPGEUri to either CMPEQrr
// or CMPGTUri properly. See Arch spec for CMPGEUri instructions.
// This has to be after the getNewValueJumpOpcode function call as
// second operand of the compare could be modified in this logic.
if (cmpInstr->getOpcode() == Hexagon::CMPGEUri) {
if (cmpOp2 == 0) {
cmpOp2 = cmpReg1;
MO2IsKill = MO1IsKill;
isSecondOpReg = true;
} else
--cmpOp2;
}
// Manage the conversions from CMPGEri to CMPGTUri properly.
// See Arch spec for CMPGEri instructions.
if (cmpInstr->getOpcode() == Hexagon::CMPGEri)
--cmpOp2;
if (isSecondOpReg) {
NewMI = BuildMI(*MBB, jmpPos, dl,
QII->get(opc))
.addReg(cmpReg1, getKillRegState(MO1IsKill))
.addReg(cmpOp2, getKillRegState(MO2IsKill))
.addMBB(jmpTarget);
}
else {
NewMI = BuildMI(*MBB, jmpPos, dl,
QII->get(opc))
.addReg(cmpReg1, getKillRegState(MO1IsKill))
.addImm(cmpOp2)
.addMBB(jmpTarget);
}
assert(NewMI && "New Value Jump Instruction Not created!");
if (cmpInstr->getOperand(0).isReg() &&
cmpInstr->getOperand(0).isKill())
cmpInstr->getOperand(0).setIsKill(false);
if (cmpInstr->getOperand(1).isReg() &&
cmpInstr->getOperand(1).isKill())
cmpInstr->getOperand(1).setIsKill(false);
cmpInstr->eraseFromParent();
jmpInstr->eraseFromParent();
++nvjGenerated;
++NumNVJGenerated;
break;
}
}
}
}
return true;
}
FunctionPass *llvm::createHexagonNewValueJump() {
return new HexagonNewValueJump();
}

View File

@ -63,7 +63,6 @@ const uint16_t* HexagonRegisterInfo::getCalleeSavedRegs(const MachineFunction
return CalleeSavedRegsV2;
case HexagonSubtarget::V3:
case HexagonSubtarget::V4:
case HexagonSubtarget::V5:
return CalleeSavedRegsV3;
}
llvm_unreachable("Callee saved registers requested for unknown architecture "
@ -110,7 +109,6 @@ HexagonRegisterInfo::getCalleeSavedRegClasses(const MachineFunction *MF) const {
return CalleeSavedRegClassesV2;
case HexagonSubtarget::V3:
case HexagonSubtarget::V4:
case HexagonSubtarget::V5:
return CalleeSavedRegClassesV3;
}
llvm_unreachable("Callee saved register classes requested for unknown "
@ -181,13 +179,11 @@ void HexagonRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
// r0 = add(r30, #10000)
// r0 = memw(r0)
if ( (MI.getOpcode() == Hexagon::LDriw) ||
(MI.getOpcode() == Hexagon::LDrid) ||
(MI.getOpcode() == Hexagon::LDrih) ||
(MI.getOpcode() == Hexagon::LDriuh) ||
(MI.getOpcode() == Hexagon::LDrib) ||
(MI.getOpcode() == Hexagon::LDriub) ||
(MI.getOpcode() == Hexagon::LDriw_f) ||
(MI.getOpcode() == Hexagon::LDrid_f)) {
(MI.getOpcode() == Hexagon::LDrid) ||
(MI.getOpcode() == Hexagon::LDrih) ||
(MI.getOpcode() == Hexagon::LDriuh) ||
(MI.getOpcode() == Hexagon::LDrib) ||
(MI.getOpcode() == Hexagon::LDriub) ) {
unsigned dstReg = (MI.getOpcode() == Hexagon::LDrid) ?
*getSubRegisters(MI.getOperand(0).getReg()) :
MI.getOperand(0).getReg();
@ -207,13 +203,10 @@ void HexagonRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
MI.getOperand(i).ChangeToRegister(dstReg, false, false, true);
MI.getOperand(i+1).ChangeToImmediate(0);
} else if ((MI.getOpcode() == Hexagon::STriw_indexed) ||
(MI.getOpcode() == Hexagon::STriw) ||
} else if ((MI.getOpcode() == Hexagon::STriw) ||
(MI.getOpcode() == Hexagon::STrid) ||
(MI.getOpcode() == Hexagon::STrih) ||
(MI.getOpcode() == Hexagon::STrib) ||
(MI.getOpcode() == Hexagon::STrid_f) ||
(MI.getOpcode() == Hexagon::STriw_f)) {
(MI.getOpcode() == Hexagon::STrib)) {
// For stores, we need a reserved register. Change
// memw(r30 + #10000) = r0 to:
//

View File

@ -131,9 +131,6 @@ let Namespace = "Hexagon" in {
def SA1 : Rc<2, "sa1">, DwarfRegNum<[69]>;
def LC1 : Rc<3, "lc1">, DwarfRegNum<[70]>;
def M0 : Rc<6, "m0">, DwarfRegNum<[71]>;
def M1 : Rc<7, "m1">, DwarfRegNum<[72]>;
def PC : Rc<9, "pc">, DwarfRegNum<[32]>; // is the Dwarf number correct?
def GP : Rc<11, "gp">, DwarfRegNum<[33]>; // is the Dwarf number correct?
}
@ -143,13 +140,15 @@ let Namespace = "Hexagon" in {
// FIXME: the register order should be defined in terms of the preferred
// allocation order...
//
def IntRegs : RegisterClass<"Hexagon", [i32,f32], 32,
def IntRegs : RegisterClass<"Hexagon", [i32], 32,
(add (sequence "R%u", 0, 9),
(sequence "R%u", 12, 28),
R10, R11, R29, R30, R31)> {
}
def DoubleRegs : RegisterClass<"Hexagon", [i64,f64], 64,
def DoubleRegs : RegisterClass<"Hexagon", [i64], 64,
(add (sequence "D%u", 0, 4),
(sequence "D%u", 6, 13), D5, D14, D15)> {
let SubRegClasses = [(IntRegs subreg_loreg, subreg_hireg)];
@ -163,7 +162,6 @@ def PredRegs : RegisterClass<"Hexagon", [i1], 32, (add (sequence "P%u", 0, 3))>
def CRRegs : RegisterClass<"Hexagon", [i32], 32,
(add (sequence "LC%u", 0, 1),
(sequence "SA%u", 0, 1),
(sequence "M%u", 0, 1), PC, GP)> {
(sequence "SA%u", 0, 1), PC, GP)> {
let Size = 32;
}

View File

@ -13,6 +13,7 @@ def LSUNIT : FuncUnit;
def MUNIT : FuncUnit;
def SUNIT : FuncUnit;
// Itinerary classes
def ALU32 : InstrItinClass;
def ALU64 : InstrItinClass;
@ -23,25 +24,23 @@ def LD : InstrItinClass;
def M : InstrItinClass;
def ST : InstrItinClass;
def S : InstrItinClass;
def SYS : InstrItinClass;
def MARKER : InstrItinClass;
def PSEUDO : InstrItinClass;
def HexagonItineraries :
ProcessorItineraries<[LUNIT, LSUNIT, MUNIT, SUNIT], [], [
InstrItinData<ALU32 , [InstrStage<1, [LUNIT, LSUNIT, MUNIT, SUNIT]>]>,
InstrItinData<ALU64 , [InstrStage<1, [MUNIT, SUNIT]>]>,
InstrItinData<CR , [InstrStage<1, [SUNIT]>]>,
InstrItinData<J , [InstrStage<1, [SUNIT, MUNIT]>]>,
InstrItinData<JR , [InstrStage<1, [MUNIT]>]>,
InstrItinData<LD , [InstrStage<1, [LUNIT, LSUNIT]>]>,
InstrItinData<M , [InstrStage<1, [MUNIT, SUNIT]>]>,
InstrItinData<ST , [InstrStage<1, [LSUNIT]>]>,
InstrItinData<S , [InstrStage<1, [SUNIT, MUNIT]>]>,
InstrItinData<SYS , [InstrStage<1, [LSUNIT]>]>,
InstrItinData<MARKER , [InstrStage<1, [LUNIT, LSUNIT, MUNIT, SUNIT]>]>,
InstrItinData<PSEUDO , [InstrStage<1, [LUNIT, LSUNIT, MUNIT, SUNIT]>]>
]>;
ProcessorItineraries<[LUNIT, LSUNIT, MUNIT, SUNIT], [], [
InstrItinData<ALU32 , [InstrStage<1, [LUNIT, LSUNIT, MUNIT, SUNIT]>]>,
InstrItinData<ALU64 , [InstrStage<1, [MUNIT, SUNIT]>]>,
InstrItinData<CR , [InstrStage<1, [SUNIT]>]>,
InstrItinData<J , [InstrStage<1, [SUNIT, MUNIT]>]>,
InstrItinData<JR , [InstrStage<1, [MUNIT]>]>,
InstrItinData<LD , [InstrStage<1, [LUNIT, LSUNIT]>]>,
InstrItinData<M , [InstrStage<1, [MUNIT, SUNIT]>]>,
InstrItinData<ST , [InstrStage<1, [LSUNIT]>]>,
InstrItinData<S , [InstrStage<1, [SUNIT, MUNIT]>]>,
InstrItinData<PSEUDO , [InstrStage<1, [LUNIT, LSUNIT, MUNIT, SUNIT]>]>
]>;
//===----------------------------------------------------------------------===//
// V4 Machine Info +

View File

@ -23,6 +23,7 @@
// | SLOT3 | XTYPE ALU32 J CR |
// |===========|==================================================|
// Functional Units.
def SLOT0 : FuncUnit;
def SLOT1 : FuncUnit;
@ -33,26 +34,22 @@ def SLOT3 : FuncUnit;
def NV_V4 : InstrItinClass;
def MEM_V4 : InstrItinClass;
// ALU64/M/S Instruction classes of V2 are collectively knownn as XTYPE in V4.
def PREFIX : InstrItinClass;
def HexagonItinerariesV4 :
ProcessorItineraries<[SLOT0, SLOT1, SLOT2, SLOT3], [], [
InstrItinData<ALU32 , [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
InstrItinData<ALU64 , [InstrStage<1, [SLOT2, SLOT3]>]>,
InstrItinData<CR , [InstrStage<1, [SLOT3]>]>,
InstrItinData<J , [InstrStage<1, [SLOT2, SLOT3]>]>,
InstrItinData<JR , [InstrStage<1, [SLOT2]>]>,
InstrItinData<LD , [InstrStage<1, [SLOT0, SLOT1]>]>,
InstrItinData<M , [InstrStage<1, [SLOT2, SLOT3]>]>,
InstrItinData<ST , [InstrStage<1, [SLOT0, SLOT1]>]>,
InstrItinData<S , [InstrStage<1, [SLOT2, SLOT3]>]>,
InstrItinData<SYS , [InstrStage<1, [SLOT0]>]>,
InstrItinData<NV_V4 , [InstrStage<1, [SLOT0]>]>,
InstrItinData<MEM_V4 , [InstrStage<1, [SLOT0]>]>,
InstrItinData<MARKER , [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
InstrItinData<PREFIX , [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
InstrItinData<PSEUDO , [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>
]>;
def HexagonItinerariesV4 : ProcessorItineraries<
[SLOT0, SLOT1, SLOT2, SLOT3], [], [
InstrItinData<LD , [InstrStage<1, [SLOT0, SLOT1]>]>,
InstrItinData<ST , [InstrStage<1, [SLOT0, SLOT1]>]>,
InstrItinData<ALU32 , [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
InstrItinData<NV_V4 , [InstrStage<1, [SLOT0]>]>,
InstrItinData<MEM_V4 , [InstrStage<1, [SLOT0]>]>,
InstrItinData<J , [InstrStage<1, [SLOT2, SLOT3]>]>,
InstrItinData<JR , [InstrStage<1, [SLOT2]>]>,
InstrItinData<CR , [InstrStage<1, [SLOT3]>]>,
InstrItinData<PSEUDO , [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
InstrItinData<ALU64 , [InstrStage<1, [SLOT2, SLOT3]>]>,
InstrItinData<M , [InstrStage<1, [SLOT2, SLOT3]>]>,
InstrItinData<S , [InstrStage<1, [SLOT2, SLOT3]>]>
]>;
//===----------------------------------------------------------------------===//
// Hexagon V4 Resource Definitions -

View File

@ -14,7 +14,7 @@
// {p0 = cmp.eq(r0,r1)}
// {r3 = mux(p0,#1,#3)}
//
// This requires two packets. If we use .new predicated immediate transfers,
// This requires two packets. If we use .new predicated immediate transfers,
// then we can do this in a single packet, e.g.:
//
// {p0 = cmp.eq(r0,r1)
@ -81,124 +81,40 @@ bool HexagonSplitTFRCondSets::runOnMachineFunction(MachineFunction &Fn) {
for (MachineBasicBlock::iterator MII = MBB->begin(); MII != MBB->end();
++MII) {
MachineInstr *MI = MII;
int Opc1, Opc2;
switch(MI->getOpcode()) {
case Hexagon::TFR_condset_rr:
case Hexagon::TFR_condset_rr_f:
case Hexagon::TFR_condset_rr64_f: {
int DestReg = MI->getOperand(0).getReg();
int SrcReg1 = MI->getOperand(2).getReg();
int SrcReg2 = MI->getOperand(3).getReg();
int Opc = MI->getOpcode();
if (Opc == Hexagon::TFR_condset_rr) {
if (MI->getOpcode() == Hexagon::TFR_condset_rr ||
MI->getOpcode() == Hexagon::TFR_condset_rr_f) {
Opc1 = Hexagon::TFR_cPt;
Opc2 = Hexagon::TFR_cNotPt;
}
else if (MI->getOpcode() == Hexagon::TFR_condset_rr64_f) {
Opc1 = Hexagon::TFR64_cPt;
Opc2 = Hexagon::TFR64_cNotPt;
}
int DestReg = MI->getOperand(0).getReg();
int SrcReg1 = MI->getOperand(2).getReg();
int SrcReg2 = MI->getOperand(3).getReg();
// Minor optimization: do not emit the predicated copy if the source
// and the destination is the same register.
if (DestReg != SrcReg1) {
BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Opc1),
DestReg).addReg(MI->getOperand(1).getReg()).addReg(SrcReg1);
}
if (DestReg != SrcReg2) {
BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Opc2),
DestReg).addReg(MI->getOperand(1).getReg()).addReg(SrcReg2);
}
MII = MBB->erase(MI);
--MII;
break;
// Minor optimization: do not emit the predicated copy if the source and
// the destination is the same register
if (DestReg != SrcReg1) {
BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::TFR_cPt),
DestReg).addReg(MI->getOperand(1).getReg()).addReg(SrcReg1);
}
case Hexagon::TFR_condset_ri:
case Hexagon::TFR_condset_ri_f: {
int DestReg = MI->getOperand(0).getReg();
int SrcReg1 = MI->getOperand(2).getReg();
// Do not emit the predicated copy if the source and the destination
// is the same register.
if (DestReg != SrcReg1) {
BuildMI(*MBB, MII, MI->getDebugLoc(),
TII->get(Hexagon::TFR_cPt), DestReg).
addReg(MI->getOperand(1).getReg()).addReg(SrcReg1);
}
if (MI->getOpcode() == Hexagon::TFR_condset_ri ) {
BuildMI(*MBB, MII, MI->getDebugLoc(),
TII->get(Hexagon::TFRI_cNotPt), DestReg).
addReg(MI->getOperand(1).getReg()).
addImm(MI->getOperand(3).getImm());
} else if (MI->getOpcode() == Hexagon::TFR_condset_ri_f ) {
BuildMI(*MBB, MII, MI->getDebugLoc(),
TII->get(Hexagon::TFRI_cNotPt_f), DestReg).
addReg(MI->getOperand(1).getReg()).
addFPImm(MI->getOperand(3).getFPImm());
}
MII = MBB->erase(MI);
--MII;
break;
}
case Hexagon::TFR_condset_ir:
case Hexagon::TFR_condset_ir_f: {
int DestReg = MI->getOperand(0).getReg();
int SrcReg2 = MI->getOperand(3).getReg();
if (MI->getOpcode() == Hexagon::TFR_condset_ir ) {
BuildMI(*MBB, MII, MI->getDebugLoc(),
TII->get(Hexagon::TFRI_cPt), DestReg).
addReg(MI->getOperand(1).getReg()).
addImm(MI->getOperand(2).getImm());
} else if (MI->getOpcode() == Hexagon::TFR_condset_ir_f ) {
BuildMI(*MBB, MII, MI->getDebugLoc(),
TII->get(Hexagon::TFRI_cPt_f), DestReg).
addReg(MI->getOperand(1).getReg()).
addFPImm(MI->getOperand(2).getFPImm());
}
// Do not emit the predicated copy if the source and
// the destination is the same register.
if (DestReg != SrcReg2) {
BuildMI(*MBB, MII, MI->getDebugLoc(),
TII->get(Hexagon::TFR_cNotPt), DestReg).
addReg(MI->getOperand(1).getReg()).addReg(SrcReg2);
}
MII = MBB->erase(MI);
--MII;
break;
}
case Hexagon::TFR_condset_ii:
case Hexagon::TFR_condset_ii_f: {
int DestReg = MI->getOperand(0).getReg();
int SrcReg1 = MI->getOperand(1).getReg();
if (MI->getOpcode() == Hexagon::TFR_condset_ii ) {
int Immed1 = MI->getOperand(2).getImm();
int Immed2 = MI->getOperand(3).getImm();
BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::TFRI_cPt),
DestReg).addReg(SrcReg1).addImm(Immed1);
BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::TFRI_cNotPt),
DestReg).addReg(SrcReg1).addImm(Immed2);
} else if (MI->getOpcode() == Hexagon::TFR_condset_ii_f ) {
BuildMI(*MBB, MII, MI->getDebugLoc(),
TII->get(Hexagon::TFRI_cPt_f), DestReg).
addReg(SrcReg1).
addFPImm(MI->getOperand(2).getFPImm());
BuildMI(*MBB, MII, MI->getDebugLoc(),
TII->get(Hexagon::TFRI_cNotPt_f), DestReg).
addReg(SrcReg1).
addFPImm(MI->getOperand(3).getFPImm());
}
MII = MBB->erase(MI);
--MII;
break;
if (DestReg != SrcReg2) {
BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::TFR_cNotPt),
DestReg).addReg(MI->getOperand(1).getReg()).addReg(SrcReg2);
}
MII = MBB->erase(MI);
--MII;
} else if (Opc == Hexagon::TFR_condset_ii) {
int DestReg = MI->getOperand(0).getReg();
int SrcReg1 = MI->getOperand(1).getReg();
int Immed1 = MI->getOperand(2).getImm();
int Immed2 = MI->getOperand(3).getImm();
BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::TFRI_cPt),
DestReg).addReg(SrcReg1).addImm(Immed1);
BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::TFRI_cNotPt),
DestReg).addReg(SrcReg1).addImm(Immed2);
MII = MBB->erase(MI);
--MII;
}
}
}
return true;
}

View File

@ -13,7 +13,6 @@
#include "HexagonSubtarget.h"
#include "Hexagon.h"
#include "HexagonRegisterInfo.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/ErrorHandling.h"
using namespace llvm;
@ -32,15 +31,9 @@ EnableMemOps(
cl::Hidden, cl::ZeroOrMore, cl::ValueDisallowed,
cl::desc("Generate V4 MEMOP in code generation for Hexagon target"));
static cl::opt<bool>
EnableIEEERndNear(
"enable-hexagon-ieee-rnd-near",
cl::Hidden, cl::ZeroOrMore, cl::init(false),
cl::desc("Generate non-chopped conversion from fp to int for Hexagon target."));
HexagonSubtarget::HexagonSubtarget(StringRef TT, StringRef CPU, StringRef FS):
HexagonGenSubtargetInfo(TT, CPU, FS),
HexagonArchVersion(V2),
HexagonArchVersion(V1),
CPUString(CPU.str()) {
ParseSubtargetFeatures(CPU, FS);
@ -52,8 +45,6 @@ HexagonSubtarget::HexagonSubtarget(StringRef TT, StringRef CPU, StringRef FS):
break;
case HexagonSubtarget::V4:
break;
case HexagonSubtarget::V5:
break;
default:
llvm_unreachable("Unknown Architecture Version.");
}
@ -68,10 +59,4 @@ HexagonSubtarget::HexagonSubtarget(StringRef TT, StringRef CPU, StringRef FS):
UseMemOps = true;
else
UseMemOps = false;
if (EnableIEEERndNear)
ModeIEEERndNear = true;
else
ModeIEEERndNear = false;
}

View File

@ -22,18 +22,16 @@
#include "HexagonGenSubtargetInfo.inc"
#define Hexagon_SMALL_DATA_THRESHOLD 8
#define Hexagon_SLOTS 4
namespace llvm {
class HexagonSubtarget : public HexagonGenSubtargetInfo {
bool UseMemOps;
bool ModeIEEERndNear;
public:
enum HexagonArchEnum {
V1, V2, V3, V4, V5
V1, V2, V3, V4
};
HexagonArchEnum HexagonArchVersion;
@ -57,11 +55,7 @@ public:
bool hasV3TOps () const { return HexagonArchVersion >= V3; }
bool hasV3TOpsOnly () const { return HexagonArchVersion == V3; }
bool hasV4TOps () const { return HexagonArchVersion >= V4; }
bool hasV4TOpsOnly () const { return HexagonArchVersion == V4; }
bool useMemOps () const { return HexagonArchVersion >= V4 && UseMemOps; }
bool hasV5TOps () const { return HexagonArchVersion >= V5; }
bool hasV5TOpsOnly () const { return HexagonArchVersion == V5; }
bool modeIEEERndNear () const { return ModeIEEERndNear; }
bool isSubtargetV2() const { return HexagonArchVersion == V2;}
const std::string &getCPUString () const { return CPUString; }

View File

@ -55,9 +55,7 @@ HexagonTargetMachine::HexagonTargetMachine(const Target &T, StringRef TT,
CodeModel::Model CM,
CodeGenOpt::Level OL)
: LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL),
DataLayout("e-p:32:32:32-"
"i64:64:64-i32:32:32-i16:16:16-i1:32:32-"
"f64:64:64-f32:32:32-a0:0-n32") ,
DataLayout("e-p:32:32:32-i64:64:64-i32:32:32-i16:16:16-i1:32:32-a0:0") ,
Subtarget(TT, CPU, FS), InstrInfo(Subtarget), TLInfo(*this),
TSInfo(*this),
FrameLowering(Subtarget),
@ -134,16 +132,11 @@ bool HexagonPassConfig::addPreEmitPass() {
PM.add(createHexagonFixupHwLoops());
}
PM.add(createHexagonNewValueJump());
// Expand Spill code for predicate registers.
PM.add(createHexagonExpandPredSpillCode(getHexagonTargetMachine()));
// Split up TFRcondsets into conditional transfers.
PM.add(createHexagonSplitTFRCondSets(getHexagonTargetMachine()));
// Create Packets.
PM.add(createHexagonPacketizer());
return false;
}

File diff suppressed because it is too large Load Diff

View File

@ -15,7 +15,6 @@
#include "Hexagon.h"
#include "HexagonAsmPrinter.h"
#include "HexagonInstPrinter.h"
#include "HexagonMCInst.h"
#include "llvm/MC/MCInst.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCExpr.h"
@ -38,50 +37,20 @@ StringRef HexagonInstPrinter::getRegName(unsigned RegNo) const {
void HexagonInstPrinter::printInst(const MCInst *MI, raw_ostream &O,
StringRef Annot) {
printInst((const HexagonMCInst*)(MI), O, Annot);
}
void HexagonInstPrinter::printInst(const HexagonMCInst *MI, raw_ostream &O,
StringRef Annot) {
const char packetPadding[] = " ";
const char startPacket = '{',
endPacket = '}';
// TODO: add outer HW loop when it's supported too.
if (MI->getOpcode() == Hexagon::ENDLOOP0) {
// Ending a harware loop is different from ending an regular packet.
assert(MI->isEndPacket() && "Loop end must also end the packet");
MCInst Nop;
if (MI->isStartPacket()) {
// There must be a packet to end a loop.
// FIXME: when shuffling is always run, this shouldn't be needed.
HexagonMCInst Nop;
StringRef NoAnnot;
Nop.setOpcode (Hexagon::NOP);
Nop.setStartPacket (MI->isStartPacket());
printInst (&Nop, O, NoAnnot);
}
// Close the packet.
if (MI->isEndPacket())
O << packetPadding << endPacket;
printInstruction(MI, O);
}
else {
// Prefix the insn opening the packet.
if (MI->isStartPacket())
O << packetPadding << startPacket << '\n';
printInstruction(MI, O);
// Suffix the insn closing the packet.
if (MI->isEndPacket())
// Suffix the packet in a new line always, since the GNU assembler has
// issues with a closing brace on the same line as CONST{32,64}.
O << '\n' << packetPadding << endPacket;
O << packetPadding << startPacket << '\n';
Nop.setOpcode(Hexagon::NOP);
printInstruction(&Nop, O);
O << packetPadding << endPacket;
}
printInstruction(MI, O);
printAnnotation(O, Annot);
}
@ -96,22 +65,22 @@ void HexagonInstPrinter::printOperand(const MCInst *MI, unsigned OpNo,
} else if(MO.isImm()) {
printImmOperand(MI, OpNo, O);
} else {
llvm_unreachable("Unknown operand");
assert(false && "Unknown operand");
}
}
void HexagonInstPrinter::printImmOperand(const MCInst *MI, unsigned OpNo,
raw_ostream &O) const {
void HexagonInstPrinter::printImmOperand
(const MCInst *MI, unsigned OpNo, raw_ostream &O) const {
O << MI->getOperand(OpNo).getImm();
}
void HexagonInstPrinter::printExtOperand(const MCInst *MI, unsigned OpNo,
raw_ostream &O) const {
raw_ostream &O) const {
O << MI->getOperand(OpNo).getImm();
}
void HexagonInstPrinter::printUnsignedImmOperand(const MCInst *MI, unsigned OpNo,
raw_ostream &O) const {
void HexagonInstPrinter::printUnsignedImmOperand
(const MCInst *MI, unsigned OpNo, raw_ostream &O) const {
O << MI->getOperand(OpNo).getImm();
}
@ -120,13 +89,13 @@ void HexagonInstPrinter::printNegImmOperand(const MCInst *MI, unsigned OpNo,
O << -MI->getOperand(OpNo).getImm();
}
void HexagonInstPrinter::printNOneImmOperand(const MCInst *MI, unsigned OpNo,
raw_ostream &O) const {
void HexagonInstPrinter::printNOneImmOperand
(const MCInst *MI, unsigned OpNo, raw_ostream &O) const {
O << -1;
}
void HexagonInstPrinter::printMEMriOperand(const MCInst *MI, unsigned OpNo,
raw_ostream &O) const {
void HexagonInstPrinter::printMEMriOperand
(const MCInst *MI, unsigned OpNo, raw_ostream &O) const {
const MCOperand& MO0 = MI->getOperand(OpNo);
const MCOperand& MO1 = MI->getOperand(OpNo + 1);
@ -134,8 +103,8 @@ void HexagonInstPrinter::printMEMriOperand(const MCInst *MI, unsigned OpNo,
O << " + #" << MO1.getImm();
}
void HexagonInstPrinter::printFrameIndexOperand(const MCInst *MI, unsigned OpNo,
raw_ostream &O) const {
void HexagonInstPrinter::printFrameIndexOperand
(const MCInst *MI, unsigned OpNo, raw_ostream &O) const {
const MCOperand& MO0 = MI->getOperand(OpNo);
const MCOperand& MO1 = MI->getOperand(OpNo + 1);

View File

@ -14,7 +14,6 @@
#ifndef HEXAGONINSTPRINTER_H
#define HEXAGONINSTPRINTER_H
#include "HexagonMCInst.h"
#include "llvm/MC/MCInstPrinter.h"
namespace llvm {
@ -26,7 +25,6 @@ namespace llvm {
: MCInstPrinter(MAI, MII, MRI) {}
virtual void printInst(const MCInst *MI, raw_ostream &O, StringRef Annot);
void printInst(const HexagonMCInst *MI, raw_ostream &O, StringRef Annot);
virtual StringRef getOpcodeName(unsigned Opcode) const;
void printInstruction(const MCInst *MI, raw_ostream &O);
StringRef getRegName(unsigned RegNo) const;

View File

@ -23,41 +23,14 @@ namespace llvm {
/// instruction info tracks.
///
namespace HexagonII {
// *** The code below must match HexagonInstrFormat*.td *** //
// Insn types.
// *** Must match HexagonInstrFormat*.td ***
enum Type {
TypePSEUDO = 0,
TypeALU32 = 1,
TypeCR = 2,
TypeJR = 3,
TypeJ = 4,
TypeLD = 5,
TypeST = 6,
TypeSYSTEM = 7,
TypeXTYPE = 8,
TypeMEMOP = 9,
TypeNV = 10,
TypePREFIX = 30, // Such as extenders.
TypeMARKER = 31 // Such as end of a HW loop.
};
// MCInstrDesc TSFlags
// *** Must match HexagonInstrFormat*.td ***
enum {
// This 5-bit field describes the insn type.
TypePos = 0,
TypeMask = 0x1f,
// Solo instructions.
SoloPos = 5,
SoloMask = 0x1,
// Predicated instructions.
PredicatedPos = 6,
PredicatedPos = 1,
PredicatedMask = 0x1
};

View File

@ -1,26 +0,0 @@
; RUN: llc -march=hexagon -mcpu=hexagonv5 < %s | FileCheck %s
; Check that we generate conversion from double precision floating point
; to 32-bit int value in IEEE complaint mode in V5.
; CHECK: r{{[0-9]+}} = convert_df2w(r{{[0-9]+}}:{{[0-9]+}}):chop
define i32 @main() nounwind {
entry:
%retval = alloca i32, align 4
%i = alloca i32, align 4
%a = alloca double, align 8
%b = alloca double, align 8
%c = alloca double, align 8
store i32 0, i32* %retval
store double 1.540000e+01, double* %a, align 8
store double 9.100000e+00, double* %b, align 8
%0 = load double* %a, align 8
%1 = load double* %b, align 8
%add = fadd double %0, %1
store double %add, double* %c, align 8
%2 = load double* %c, align 8
%conv = fptosi double %2 to i32
store i32 %conv, i32* %i, align 4
%3 = load i32* %i, align 4
ret i32 %3
}

View File

@ -1,27 +0,0 @@
; RUN: llc -march=hexagon -mcpu=hexagonv5 < %s | FileCheck %s
; Check that we generate conversion from double precision floating point
; to 64-bit integer value in IEEE complaint mode in V5.
; CHECK: r{{[0-9]+}} = convert_df2d(r{{[0-9]+}}:{{[0-9]+}}):chop
define i32 @main() nounwind {
entry:
%retval = alloca i32, align 4
%i = alloca i64, align 8
%a = alloca double, align 8
%b = alloca double, align 8
%c = alloca double, align 8
store i32 0, i32* %retval
store double 1.540000e+01, double* %a, align 8
store double 9.100000e+00, double* %b, align 8
%0 = load double* %a, align 8
%1 = load double* %b, align 8
%add = fadd double %0, %1
store double %add, double* %c, align 8
%2 = load double* %c, align 8
%conv = fptosi double %2 to i64
store i64 %conv, i64* %i, align 8
%3 = load i64* %i, align 8
%conv1 = trunc i64 %3 to i32
ret i32 %conv1
}

View File

@ -1,26 +0,0 @@
; RUN: llc -march=hexagon -mcpu=hexagonv5 < %s | FileCheck %s
; Check that we generate conversion from single precision floating point
; to 32-bit int value in IEEE complaint mode in V5.
; CHECK: r{{[0-9]+}} = convert_sf2w(r{{[0-9]+}}):chop
define i32 @main() nounwind {
entry:
%retval = alloca i32, align 4
%i = alloca i32, align 4
%a = alloca float, align 4
%b = alloca float, align 4
%c = alloca float, align 4
store i32 0, i32* %retval
store float 0x402ECCCCC0000000, float* %a, align 4
store float 0x4022333340000000, float* %b, align 4
%0 = load float* %a, align 4
%1 = load float* %b, align 4
%add = fadd float %0, %1
store float %add, float* %c, align 4
%2 = load float* %c, align 4
%conv = fptosi float %2 to i32
store i32 %conv, i32* %i, align 4
%3 = load i32* %i, align 4
ret i32 %3
}

View File

@ -1,27 +0,0 @@
; RUN: llc -march=hexagon -mcpu=hexagonv5 < %s | FileCheck %s
; Check that we generate conversion from single precision floating point
; to 64-bit int value in IEEE complaint mode in V5.
; CHECK: r{{[0-9]+}} = convert_sf2d(r{{[0-9]+}})
define i32 @main() nounwind {
entry:
%retval = alloca i32, align 4
%i = alloca i64, align 8
%a = alloca float, align 4
%b = alloca float, align 4
%c = alloca float, align 4
store i32 0, i32* %retval
store float 0x402ECCCCC0000000, float* %a, align 4
store float 0x4022333340000000, float* %b, align 4
%0 = load float* %a, align 4
%1 = load float* %b, align 4
%add = fadd float %0, %1
store float %add, float* %c, align 4
%2 = load float* %c, align 4
%conv = fptosi float %2 to i64
store i64 %conv, i64* %i, align 8
%3 = load i64* %i, align 8
%conv1 = trunc i64 %3 to i32
ret i32 %conv1
}

View File

@ -1,19 +0,0 @@
; RUN: llc -march=hexagon -mcpu=hexagonv5 < %s | FileCheck %s
; Check that we generate double precision floating point add in V5.
; CHECK: r{{[0-9]+}} = dfadd(r{{[0-9]+}}, r{{[0-9]+}})
define i32 @main() nounwind {
entry:
%a = alloca double, align 8
%b = alloca double, align 8
%c = alloca double, align 8
store double 1.540000e+01, double* %a, align 8
store double 9.100000e+00, double* %b, align 8
%0 = load double* %a, align 8
%1 = load double* %b, align 8
%add = fadd double %0, %1
store double %add, double* %c, align 8
ret i32 0
}

View File

@ -1,18 +0,0 @@
; RUN: llc -march=hexagon -mcpu=hexagonv5 < %s | FileCheck %s
; Check that we generate double precision floating point multiply in V5.
; CHECK: r{{[0-9]+}}:{{[0-9]+}} = dfmpy(r{{[0-9]+}}:{{[0-9]+}}, r{{[0-9]+}}:{{[0-9]+}})
define i32 @main() nounwind {
entry:
%a = alloca double, align 8
%b = alloca double, align 8
%c = alloca double, align 8
store double 1.540000e+01, double* %a, align 8
store double 9.100000e+00, double* %b, align 8
%0 = load double* %b, align 8
%1 = load double* %a, align 8
%mul = fmul double %0, %1
store double %mul, double* %c, align 8
ret i32 0
}

View File

@ -1,26 +0,0 @@
; RUN: llc -march=hexagon -mcpu=hexagonv5 -enable-hexagon-ieee-rnd-near < %s | FileCheck %s
; Check that we generate conversion from double precision floating point
; to 32-bit int value in IEEE rounding to the nearest mode in V5.
; CHECK: r{{[0-9]+}} = convert_df2w(r{{[0-9]+}}:{{[0-9]+}})
define i32 @main() nounwind {
entry:
%retval = alloca i32, align 4
%i = alloca i32, align 4
%a = alloca double, align 8
%b = alloca double, align 8
%c = alloca double, align 8
store i32 0, i32* %retval
store double 1.540000e+01, double* %a, align 8
store double 9.100000e+00, double* %b, align 8
%0 = load double* %a, align 8
%1 = load double* %b, align 8
%add = fadd double %0, %1
store double %add, double* %c, align 8
%2 = load double* %c, align 8
%conv = fptosi double %2 to i32
store i32 %conv, i32* %i, align 4
%3 = load i32* %i, align 4
ret i32 %3
}

View File

@ -1,18 +0,0 @@
; RUN: llc -march=hexagon -mcpu=hexagonv5 < %s | FileCheck %s
; Check that we generate double precision floating point subtract in V5.
; CHECK: r{{[0-9]+}} = dfsub(r{{[0-9]+}}, r{{[0-9]+}})
define i32 @main() nounwind {
entry:
%a = alloca double, align 8
%b = alloca double, align 8
%c = alloca double, align 8
store double 1.540000e+01, double* %a, align 8
store double 9.100000e+00, double* %b, align 8
%0 = load double* %b, align 8
%1 = load double* %a, align 8
%sub = fsub double %0, %1
store double %sub, double* %c, align 8
ret i32 0
}

View File

@ -1,19 +0,0 @@
; RUN: llc -march=hexagon -mcpu=hexagonv4 < %s | FileCheck %s
; Check that we generate dual stores in one packet in V4
; CHECK: {
; CHECK-NEXT: memw(r{{[0-9]+}} + #{{[0-9]+}} = r{{[0-9]+}}
; CHECK-NEXT: memw(r{{[0-9]+}} + #{{[0-9]+}} = r{{[0-9]+}}
; CHECK-NEXT: }
@Reg = global i32 0, align 4
define i32 @main() nounwind {
entry:
%number= alloca i32, align 4
store i32 500000, i32* %number, align 4
%number1= alloca i32, align 4
store i32 100000, i32* %number1, align 4
ret i32 0
}

View File

@ -1,18 +0,0 @@
; RUN: llc -march=hexagon -mcpu=hexagonv5 < %s | FileCheck %s
; Check that we generate sp floating point add in V5.
; CHECK: r{{[0-9]+}} = sfadd(r{{[0-9]+}}, r{{[0-9]+}})
define i32 @main() nounwind {
entry:
%a = alloca float, align 4
%b = alloca float, align 4
%c = alloca float, align 4
store float 0x402ECCCCC0000000, float* %a, align 4
store float 0x4022333340000000, float* %b, align 4
%0 = load float* %a, align 4
%1 = load float* %b, align 4
%add = fadd float %0, %1
store float %add, float* %c, align 4
ret i32 0
}

View File

@ -1,37 +0,0 @@
; RUN: llc -march=hexagon -mcpu=hexagonv5 < %s | FileCheck %s
; Check that we generate floating point compare in V5
; CHECK: p{{[0-2]+}} = sfcmp.{{.}}
define i32 @foo(float %y) nounwind {
entry:
%retval = alloca i32, align 4
%y.addr = alloca float, align 4
store float %y, float* %y.addr, align 4
%0 = load float* %y.addr, align 4
%cmp = fcmp ogt float %0, 0x406AD7EFA0000000
br i1 %cmp, label %if.then, label %if.else
if.then: ; preds = %entry
store i32 1, i32* %retval
br label %return
if.else: ; preds = %entry
store i32 2, i32* %retval
br label %return
return: ; preds = %if.else, %if.then
%1 = load i32* %retval
ret i32 %1
}
define i32 @main() nounwind {
entry:
%retval = alloca i32, align 4
%a = alloca float, align 4
store i32 0, i32* %retval
store float 0x40012E0A00000000, float* %a, align 4
%0 = load float* %a, align 4
%call = call i32 @foo(float %0)
ret i32 %call
}

View File

@ -1,26 +0,0 @@
; RUN: llc -march=hexagon -mcpu=hexagonv5 < %s | FileCheck %s
; Check that we generate conversion from double precision floating point
; to 32-bit int value in IEEE rounding to the nearest mode in V5.
; CHECK: r{{[0-9]+}} = convert_sf2w(r{{[0-9]+}})
define i32 @main() nounwind {
entry:
%retval = alloca i32, align 4
%i = alloca i32, align 4
%a = alloca float, align 4
%b = alloca float, align 4
%c = alloca float, align 4
store i32 0, i32* %retval
store float 0x402ECCCCC0000000, float* %a, align 4
store float 0x4022333340000000, float* %b, align 4
%0 = load float* %a, align 4
%1 = load float* %b, align 4
%add = fadd float %0, %1
store float %add, float* %c, align 4
%2 = load float* %c, align 4
%conv = fptosi float %2 to i32
store i32 %conv, i32* %i, align 4
%3 = load i32* %i, align 4
ret i32 %3
}

View File

@ -1,19 +0,0 @@
; RUN: llc -march=hexagon -mcpu=hexagonv5 < %s | FileCheck %s
; Check that we generate single precision floating point multiply in V5.
; CHECK: r{{[0-9]+}} = sfmpy(r{{[0-9]+}}, r{{[0-9]+}})
define i32 @main() nounwind {
entry:
%a = alloca float, align 4
%b = alloca float, align 4
%c = alloca float, align 4
store float 0x402ECCCCC0000000, float* %a, align 4
store float 0x4022333340000000, float* %b, align 4
%0 = load float* %b, align 4
%1 = load float* %a, align 4
%mul = fmul float %0, %1
store float %mul, float* %c, align 4
ret i32 0
}

View File

@ -1,18 +0,0 @@
; RUN: llc -march=hexagon -mcpu=hexagonv5 < %s | FileCheck %s
; Check that we generate sp floating point subtract in V5.
; CHECK: r{{[0-9]+}} = sfsub(r{{[0-9]+}}, r{{[0-9]+}})
define i32 @main() nounwind {
entry:
%a = alloca float, align 4
%b = alloca float, align 4
%c = alloca float, align 4
store float 0x402ECCCCC0000000, float* %a, align 4
store float 0x4022333340000000, float* %b, align 4
%0 = load float* %b, align 4
%1 = load float* %a, align 4
%sub = fsub float %0, %1
store float %sub, float* %c, align 4
ret i32 0
}

View File

@ -1,16 +0,0 @@
; RUN: llc -march=hexagon -mcpu=hexagonv4 < %s | FileCheck %s
; Check that we generate fused logical and with shift instruction.
; CHECK: r{{[0-9]+}} = and(#15, lsr(r{{[0-9]+}}, #{{[0-9]+}})
define i32 @main(i16* %a, i16* %b) nounwind {
entry:
%0 = load i16* %a, align 2
%conv1 = sext i16 %0 to i32
%shr1 = ashr i32 %conv1, 3
%and1 = and i32 %shr1, 15
%conv2 = trunc i32 %and1 to i16
store i16 %conv2, i16* %b, align 2
ret i32 0
}

View File

@ -1,14 +0,0 @@
; RUN: llc -march=hexagon -mcpu=hexagonv5 < %s | FileCheck %s
; Check that we generate integer multiply accumulate.
; CHECK: r{{[0-9]+}} += mpyi(r{{[0-9]+}}, r{{[0-9]+}})
define i32 @main(i32* %a, i32* %b) nounwind {
entry:
%0 = load i32* %a, align 4
%div = udiv i32 %0, 10000
%rem = urem i32 %div, 10
store i32 %rem, i32* %b, align 4
ret i32 0
}

View File

@ -1,33 +0,0 @@
; RUN: llc -march=hexagon -mcpu=hexagonv4 < %s | FileCheck %s
; Check that we generate new value jump.
@i = global i32 0, align 4
@j = global i32 10, align 4
define i32 @foo(i32 %a) nounwind {
entry:
; CHECK: if (cmp.eq(r{{[0-9]+}}.new, #0)) jump{{.}}
%addr1 = alloca i32, align 4
%addr2 = alloca i32, align 4
%0 = load i32* @i, align 4
store i32 %0, i32* %addr1, align 4
call void @bar(i32 1, i32 2)
%1 = load i32* @j, align 4
%tobool = icmp ne i32 %1, 0
br i1 %tobool, label %if.then, label %if.else
if.then:
call void @baz(i32 1, i32 2)
br label %if.end
if.else:
call void @guy(i32 10, i32 20)
br label %if.end
if.end:
ret i32 0
}
declare void @guy(i32, i32)
declare void @bar(i32, i32)
declare void @baz(i32, i32)

View File

@ -1,30 +0,0 @@
; RUN: llc -march=hexagon -mcpu=hexagonv4 < %s | FileCheck %s
; Check that we generate new value jump, both registers, with one
; of the registers as new.
@Reg = common global i8 0, align 1
define i32 @main() nounwind {
entry:
; CHECK: if (cmp.eq(r{{[0-9]+}}.new, r{{[0-9]+}})) jump{{.}}
%Reg2 = alloca i8, align 1
%0 = load i8* %Reg2, align 1
%conv0 = zext i8 %0 to i32
%1 = load i8* @Reg, align 1
%conv1 = zext i8 %1 to i32
%tobool = icmp sle i32 %conv0, %conv1
br i1 %tobool, label %if.then, label %if.else
if.then:
call void @bar(i32 1, i32 2)
br label %if.end
if.else:
call void @baz(i32 10, i32 20)
br label %if.end
if.end:
ret i32 0
}
declare void @bar(i32, i32)
declare void @baz(i32, i32)

View File

@ -1,22 +0,0 @@
; RUN: llc -march=hexagon -mcpu=hexagonv4 < %s | FileCheck %s
; Check that we generate new value store packet in V4
@i = global i32 0, align 4
@j = global i32 10, align 4
@k = global i32 100, align 4
define i32 @main() nounwind {
entry:
; CHECK: memw(r{{[0-9]+}} + #{{[0-9]+}}) = r{{[0-9]+}}.new
%number1 = alloca i32, align 4
%number2 = alloca i32, align 4
%number3 = alloca i32, align 4
%0 = load i32 * @i, align 4
store i32 %0, i32* %number1, align 4
%1 = load i32 * @j, align 4
store i32 %1, i32* %number2, align 4
%2 = load i32 * @k, align 4
store i32 %2, i32* %number3, align 4
ret i32 %0
}

View File

@ -1,15 +0,0 @@
; RUN: llc -march=hexagon -mcpu=hexagonv5 < %s | FileCheck %s
; Optimize fabsf to clrbit in V5.
; CHECK: r{{[0-9]+}} = clrbit(r{{[0-9]+}}, #31)
define float @my_fabsf(float %x) nounwind {
entry:
%x.addr = alloca float, align 4
store float %x, float* %x.addr, align 4
%0 = load float* %x.addr, align 4
%call = call float @fabsf(float %0)
ret float %call
}
declare float @fabsf(float)

View File

@ -1,24 +0,0 @@
; RUN: llc -march=hexagon -mcpu=hexagonv5 < %s | FileCheck %s
; Optimize fneg to togglebit in V5.
define float @bar(float %x) nounwind {
entry:
; CHECK: r{{[0-9]+}} = togglebit(r{{[0-9]+}}, #31)
%x.addr = alloca float, align 4
store float %x, float* %x.addr, align 4
%0 = load float* %x.addr, align 4
%sub = fsub float -0.000000e+00, %0
ret float %sub
}
define float @baz(float %x) nounwind {
entry:
; CHECK: r{{[0-9]+}} = togglebit(r{{[0-9]+}}, #31)
%x.addr = alloca float, align 4
store float %x, float* %x.addr, align 4
%0 = load float* %x.addr, align 4
%conv = fpext float %0 to double
%mul = fmul double %conv, -1.000000e+00
%conv1 = fptrunc double %mul to float
ret float %conv1
}