[MSP430] Generate EABI-compliant libcalls

Updates the MSP430 target to generate EABI-compatible libcall names.
As a byproduct, adjusts the hardware multiplier options available in
the MSP430 target, adds support for promotion of the ISD::MUL operation
for 8-bit integers, and correctly marks R11 as used by call instructions.

Patch by Andrew Wygle.

Differential Revision: https://reviews.llvm.org/D32676

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@302820 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Vadzim Dambrouski 2017-05-11 19:56:14 +00:00
parent ded71be5b1
commit 29165da1cd
11 changed files with 967 additions and 40 deletions

View File

@ -201,6 +201,10 @@ namespace CallingConv {
/// shaders)
AMDGPU_HS = 93,
/// Calling convention used for special MSP430 rtlib functions
/// which have an "optimized" convention using additional registers.
MSP430_BUILTIN = 94,
/// The highest possible calling convention ID. Must be some 2^k - 1.
MaxID = 1023
};

View File

@ -4187,6 +4187,7 @@ void SelectionDAGLegalize::PromoteNode(SDNode *Node) {
ReplacedNode(Node);
break;
}
case ISD::MUL:
case ISD::SDIV:
case ISD::SREM:
case ISD::UDIV:

View File

@ -40,21 +40,24 @@ using namespace llvm;
typedef enum {
NoHWMult,
HWMultIntr,
HWMultNoIntr
HWMult16,
HWMult32,
HWMultF5
} HWMultUseMode;
static cl::opt<HWMultUseMode>
HWMultMode("msp430-hwmult-mode", cl::Hidden,
HWMultMode("mhwmult", cl::Hidden,
cl::desc("Hardware multiplier use mode"),
cl::init(HWMultNoIntr),
cl::init(NoHWMult),
cl::values(
clEnumValN(NoHWMult, "no",
clEnumValN(NoHWMult, "none",
"Do not use hardware multiplier"),
clEnumValN(HWMultIntr, "interrupts",
"Assume hardware multiplier can be used inside interrupts"),
clEnumValN(HWMultNoIntr, "use",
"Assume hardware multiplier cannot be used inside interrupts")));
clEnumValN(HWMult16, "16bit",
"Use 16-bit hardware multiplier"),
clEnumValN(HWMult32, "32bit",
"Use 32-bit hardware multiplier"),
clEnumValN(HWMultF5, "f5series",
"Use F5 series hardware multiplier")));
MSP430TargetLowering::MSP430TargetLowering(const TargetMachine &TM,
const MSP430Subtarget &STI)
@ -131,29 +134,29 @@ MSP430TargetLowering::MSP430TargetLowering(const TargetMachine &TM,
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
// FIXME: Implement efficiently multiplication by a constant
setOperationAction(ISD::MUL, MVT::i8, Expand);
setOperationAction(ISD::MULHS, MVT::i8, Expand);
setOperationAction(ISD::MULHU, MVT::i8, Expand);
setOperationAction(ISD::SMUL_LOHI, MVT::i8, Expand);
setOperationAction(ISD::UMUL_LOHI, MVT::i8, Expand);
setOperationAction(ISD::MUL, MVT::i16, Expand);
setOperationAction(ISD::MUL, MVT::i8, Promote);
setOperationAction(ISD::MULHS, MVT::i8, Promote);
setOperationAction(ISD::MULHU, MVT::i8, Promote);
setOperationAction(ISD::SMUL_LOHI, MVT::i8, Promote);
setOperationAction(ISD::UMUL_LOHI, MVT::i8, Promote);
setOperationAction(ISD::MUL, MVT::i16, LibCall);
setOperationAction(ISD::MULHS, MVT::i16, Expand);
setOperationAction(ISD::MULHU, MVT::i16, Expand);
setOperationAction(ISD::SMUL_LOHI, MVT::i16, Expand);
setOperationAction(ISD::UMUL_LOHI, MVT::i16, Expand);
setOperationAction(ISD::UDIV, MVT::i8, Expand);
setOperationAction(ISD::UDIVREM, MVT::i8, Expand);
setOperationAction(ISD::UREM, MVT::i8, Expand);
setOperationAction(ISD::SDIV, MVT::i8, Expand);
setOperationAction(ISD::SDIVREM, MVT::i8, Expand);
setOperationAction(ISD::SREM, MVT::i8, Expand);
setOperationAction(ISD::UDIV, MVT::i16, Expand);
setOperationAction(ISD::UDIV, MVT::i8, Promote);
setOperationAction(ISD::UDIVREM, MVT::i8, Promote);
setOperationAction(ISD::UREM, MVT::i8, Promote);
setOperationAction(ISD::SDIV, MVT::i8, Promote);
setOperationAction(ISD::SDIVREM, MVT::i8, Promote);
setOperationAction(ISD::SREM, MVT::i8, Promote);
setOperationAction(ISD::UDIV, MVT::i16, LibCall);
setOperationAction(ISD::UDIVREM, MVT::i16, Expand);
setOperationAction(ISD::UREM, MVT::i16, Expand);
setOperationAction(ISD::SDIV, MVT::i16, Expand);
setOperationAction(ISD::UREM, MVT::i16, LibCall);
setOperationAction(ISD::SDIV, MVT::i16, LibCall);
setOperationAction(ISD::SDIVREM, MVT::i16, Expand);
setOperationAction(ISD::SREM, MVT::i16, Expand);
setOperationAction(ISD::SREM, MVT::i16, LibCall);
// varargs support
setOperationAction(ISD::VASTART, MVT::Other, Custom);
@ -162,15 +165,183 @@ MSP430TargetLowering::MSP430TargetLowering(const TargetMachine &TM,
setOperationAction(ISD::VACOPY, MVT::Other, Expand);
setOperationAction(ISD::JumpTable, MVT::i16, Custom);
// Libcalls names.
if (HWMultMode == HWMultIntr) {
setLibcallName(RTLIB::MUL_I8, "__mulqi3hw");
setLibcallName(RTLIB::MUL_I16, "__mulhi3hw");
} else if (HWMultMode == HWMultNoIntr) {
setLibcallName(RTLIB::MUL_I8, "__mulqi3hw_noint");
setLibcallName(RTLIB::MUL_I16, "__mulhi3hw_noint");
// EABI Libcalls - EABI Section 6.2
const struct {
const RTLIB::Libcall Op;
const char * const Name;
const ISD::CondCode Cond;
} LibraryCalls[] = {
// Floating point conversions - EABI Table 6
{ RTLIB::FPROUND_F64_F32, "__mspabi_cvtdf", ISD::SETCC_INVALID },
{ RTLIB::FPEXT_F32_F64, "__mspabi_cvtfd", ISD::SETCC_INVALID },
// The following is NOT implemented in libgcc
//{ RTLIB::FPTOSINT_F64_I16, "__mspabi_fixdi", ISD::SETCC_INVALID },
{ RTLIB::FPTOSINT_F64_I32, "__mspabi_fixdli", ISD::SETCC_INVALID },
{ RTLIB::FPTOSINT_F64_I64, "__mspabi_fixdlli", ISD::SETCC_INVALID },
// The following is NOT implemented in libgcc
//{ RTLIB::FPTOUINT_F64_I16, "__mspabi_fixdu", ISD::SETCC_INVALID },
{ RTLIB::FPTOUINT_F64_I32, "__mspabi_fixdul", ISD::SETCC_INVALID },
{ RTLIB::FPTOUINT_F64_I64, "__mspabi_fixdull", ISD::SETCC_INVALID },
// The following is NOT implemented in libgcc
//{ RTLIB::FPTOSINT_F32_I16, "__mspabi_fixfi", ISD::SETCC_INVALID },
{ RTLIB::FPTOSINT_F32_I32, "__mspabi_fixfli", ISD::SETCC_INVALID },
{ RTLIB::FPTOSINT_F32_I64, "__mspabi_fixflli", ISD::SETCC_INVALID },
// The following is NOT implemented in libgcc
//{ RTLIB::FPTOUINT_F32_I16, "__mspabi_fixfu", ISD::SETCC_INVALID },
{ RTLIB::FPTOUINT_F32_I32, "__mspabi_fixful", ISD::SETCC_INVALID },
{ RTLIB::FPTOUINT_F32_I64, "__mspabi_fixfull", ISD::SETCC_INVALID },
// TODO The following IS implemented in libgcc
//{ RTLIB::SINTTOFP_I16_F64, "__mspabi_fltid", ISD::SETCC_INVALID },
{ RTLIB::SINTTOFP_I32_F64, "__mspabi_fltlid", ISD::SETCC_INVALID },
// TODO The following IS implemented in libgcc but is not in the EABI
{ RTLIB::SINTTOFP_I64_F64, "__mspabi_fltllid", ISD::SETCC_INVALID },
// TODO The following IS implemented in libgcc
//{ RTLIB::UINTTOFP_I16_F64, "__mspabi_fltud", ISD::SETCC_INVALID },
{ RTLIB::UINTTOFP_I32_F64, "__mspabi_fltuld", ISD::SETCC_INVALID },
// The following IS implemented in libgcc but is not in the EABI
{ RTLIB::UINTTOFP_I64_F64, "__mspabi_fltulld", ISD::SETCC_INVALID },
// TODO The following IS implemented in libgcc
//{ RTLIB::SINTTOFP_I16_F32, "__mspabi_fltif", ISD::SETCC_INVALID },
{ RTLIB::SINTTOFP_I32_F32, "__mspabi_fltlif", ISD::SETCC_INVALID },
// TODO The following IS implemented in libgcc but is not in the EABI
{ RTLIB::SINTTOFP_I64_F32, "__mspabi_fltllif", ISD::SETCC_INVALID },
// TODO The following IS implemented in libgcc
//{ RTLIB::UINTTOFP_I16_F32, "__mspabi_fltuf", ISD::SETCC_INVALID },
{ RTLIB::UINTTOFP_I32_F32, "__mspabi_fltulf", ISD::SETCC_INVALID },
// The following IS implemented in libgcc but is not in the EABI
{ RTLIB::UINTTOFP_I64_F32, "__mspabi_fltullf", ISD::SETCC_INVALID },
// Floating point comparisons - EABI Table 7
{ RTLIB::OEQ_F64, "__mspabi_cmpd", ISD::SETEQ },
{ RTLIB::UNE_F64, "__mspabi_cmpd", ISD::SETNE },
{ RTLIB::OGE_F64, "__mspabi_cmpd", ISD::SETGE },
{ RTLIB::OLT_F64, "__mspabi_cmpd", ISD::SETLT },
{ RTLIB::OLE_F64, "__mspabi_cmpd", ISD::SETLE },
{ RTLIB::OGT_F64, "__mspabi_cmpd", ISD::SETGT },
{ RTLIB::OEQ_F32, "__mspabi_cmpf", ISD::SETEQ },
{ RTLIB::UNE_F32, "__mspabi_cmpf", ISD::SETNE },
{ RTLIB::OGE_F32, "__mspabi_cmpf", ISD::SETGE },
{ RTLIB::OLT_F32, "__mspabi_cmpf", ISD::SETLT },
{ RTLIB::OLE_F32, "__mspabi_cmpf", ISD::SETLE },
{ RTLIB::OGT_F32, "__mspabi_cmpf", ISD::SETGT },
// Floating point arithmetic - EABI Table 8
{ RTLIB::ADD_F64, "__mspabi_addd", ISD::SETCC_INVALID },
{ RTLIB::ADD_F32, "__mspabi_addf", ISD::SETCC_INVALID },
{ RTLIB::DIV_F64, "__mspabi_divd", ISD::SETCC_INVALID },
{ RTLIB::DIV_F32, "__mspabi_divf", ISD::SETCC_INVALID },
{ RTLIB::MUL_F64, "__mspabi_mpyd", ISD::SETCC_INVALID },
{ RTLIB::MUL_F32, "__mspabi_mpyf", ISD::SETCC_INVALID },
{ RTLIB::SUB_F64, "__mspabi_subd", ISD::SETCC_INVALID },
{ RTLIB::SUB_F32, "__mspabi_subf", ISD::SETCC_INVALID },
// The following are NOT implemented in libgcc
// { RTLIB::NEG_F64, "__mspabi_negd", ISD::SETCC_INVALID },
// { RTLIB::NEG_F32, "__mspabi_negf", ISD::SETCC_INVALID },
// TODO: SLL/SRA/SRL are in libgcc, RLL isn't
// Universal Integer Operations - EABI Table 9
{ RTLIB::SDIV_I16, "__mspabi_divi", ISD::SETCC_INVALID },
{ RTLIB::SDIV_I32, "__mspabi_divli", ISD::SETCC_INVALID },
{ RTLIB::SDIV_I64, "__mspabi_divlli", ISD::SETCC_INVALID },
{ RTLIB::UDIV_I16, "__mspabi_divu", ISD::SETCC_INVALID },
{ RTLIB::UDIV_I32, "__mspabi_divul", ISD::SETCC_INVALID },
{ RTLIB::UDIV_I64, "__mspabi_divull", ISD::SETCC_INVALID },
{ RTLIB::SREM_I16, "__mspabi_remi", ISD::SETCC_INVALID },
{ RTLIB::SREM_I32, "__mspabi_remli", ISD::SETCC_INVALID },
{ RTLIB::SREM_I64, "__mspabi_remlli", ISD::SETCC_INVALID },
{ RTLIB::UREM_I16, "__mspabi_remu", ISD::SETCC_INVALID },
{ RTLIB::UREM_I32, "__mspabi_remul", ISD::SETCC_INVALID },
{ RTLIB::UREM_I64, "__mspabi_remull", ISD::SETCC_INVALID },
};
for (const auto &LC : LibraryCalls) {
setLibcallName(LC.Op, LC.Name);
if (LC.Cond != ISD::SETCC_INVALID)
setCmpLibcallCC(LC.Op, LC.Cond);
}
if (HWMultMode == HWMult16) {
const struct {
const RTLIB::Libcall Op;
const char * const Name;
} LibraryCalls[] = {
// Integer Multiply - EABI Table 9
{ RTLIB::MUL_I16, "__mspabi_mpyi_hw" },
{ RTLIB::MUL_I32, "__mspabi_mpyl_hw" },
{ RTLIB::MUL_I64, "__mspabi_mpyll_hw" },
// TODO The __mspabi_mpysl*_hw functions ARE implemented in libgcc
// TODO The __mspabi_mpyul*_hw functions ARE implemented in libgcc
};
for (const auto &LC : LibraryCalls) {
setLibcallName(LC.Op, LC.Name);
}
} else if (HWMultMode == HWMult32) {
const struct {
const RTLIB::Libcall Op;
const char * const Name;
} LibraryCalls[] = {
// Integer Multiply - EABI Table 9
{ RTLIB::MUL_I16, "__mspabi_mpyi_hw" },
{ RTLIB::MUL_I32, "__mspabi_mpyl_hw32" },
{ RTLIB::MUL_I64, "__mspabi_mpyll_hw32" },
// TODO The __mspabi_mpysl*_hw32 functions ARE implemented in libgcc
// TODO The __mspabi_mpyul*_hw32 functions ARE implemented in libgcc
};
for (const auto &LC : LibraryCalls) {
setLibcallName(LC.Op, LC.Name);
}
} else if (HWMultMode == HWMultF5) {
const struct {
const RTLIB::Libcall Op;
const char * const Name;
} LibraryCalls[] = {
// Integer Multiply - EABI Table 9
{ RTLIB::MUL_I16, "__mspabi_mpyi_f5hw" },
{ RTLIB::MUL_I32, "__mspabi_mpyl_f5hw" },
{ RTLIB::MUL_I64, "__mspabi_mpyll_f5hw" },
// TODO The __mspabi_mpysl*_f5hw functions ARE implemented in libgcc
// TODO The __mspabi_mpyul*_f5hw functions ARE implemented in libgcc
};
for (const auto &LC : LibraryCalls) {
setLibcallName(LC.Op, LC.Name);
}
} else { // NoHWMult
const struct {
const RTLIB::Libcall Op;
const char * const Name;
} LibraryCalls[] = {
// Integer Multiply - EABI Table 9
{ RTLIB::MUL_I16, "__mspabi_mpyi" },
{ RTLIB::MUL_I32, "__mspabi_mpyl" },
{ RTLIB::MUL_I64, "__mspabi_mpyll" },
// The __mspabi_mpysl* functions are NOT implemented in libgcc
// The __mspabi_mpyul* functions are NOT implemented in libgcc
};
for (const auto &LC : LibraryCalls) {
setLibcallName(LC.Op, LC.Name);
}
setLibcallCallingConv(RTLIB::MUL_I64, CallingConv::MSP430_BUILTIN);
}
// Several of the runtime library functions use a special calling conv
setLibcallCallingConv(RTLIB::UDIV_I64, CallingConv::MSP430_BUILTIN);
setLibcallCallingConv(RTLIB::UREM_I64, CallingConv::MSP430_BUILTIN);
setLibcallCallingConv(RTLIB::SDIV_I64, CallingConv::MSP430_BUILTIN);
setLibcallCallingConv(RTLIB::SREM_I64, CallingConv::MSP430_BUILTIN);
setLibcallCallingConv(RTLIB::ADD_F64, CallingConv::MSP430_BUILTIN);
setLibcallCallingConv(RTLIB::SUB_F64, CallingConv::MSP430_BUILTIN);
setLibcallCallingConv(RTLIB::MUL_F64, CallingConv::MSP430_BUILTIN);
setLibcallCallingConv(RTLIB::DIV_F64, CallingConv::MSP430_BUILTIN);
setLibcallCallingConv(RTLIB::OEQ_F64, CallingConv::MSP430_BUILTIN);
setLibcallCallingConv(RTLIB::UNE_F64, CallingConv::MSP430_BUILTIN);
setLibcallCallingConv(RTLIB::OGE_F64, CallingConv::MSP430_BUILTIN);
setLibcallCallingConv(RTLIB::OLT_F64, CallingConv::MSP430_BUILTIN);
setLibcallCallingConv(RTLIB::OLE_F64, CallingConv::MSP430_BUILTIN);
setLibcallCallingConv(RTLIB::OGT_F64, CallingConv::MSP430_BUILTIN);
// TODO: __mspabi_srall, __mspabi_srlll, __mspabi_sllll
setMinFunctionAlignment(1);
setPrefFunctionAlignment(2);
}
@ -281,10 +452,27 @@ template<typename ArgT>
static void AnalyzeArguments(CCState &State,
SmallVectorImpl<CCValAssign> &ArgLocs,
const SmallVectorImpl<ArgT> &Args) {
static const MCPhysReg RegList[] = {
static const MCPhysReg CRegList[] = {
MSP430::R12, MSP430::R13, MSP430::R14, MSP430::R15
};
static const unsigned NbRegs = array_lengthof(RegList);
static const unsigned CNbRegs = array_lengthof(CRegList);
static const MCPhysReg BuiltinRegList[] = {
MSP430::R8, MSP430::R9, MSP430::R10, MSP430::R11,
MSP430::R12, MSP430::R13, MSP430::R14, MSP430::R15
};
static const unsigned BuiltinNbRegs = array_lengthof(BuiltinRegList);
ArrayRef<MCPhysReg> RegList;
unsigned NbRegs;
bool Builtin = (State.getCallingConv() == CallingConv::MSP430_BUILTIN);
if (Builtin) {
RegList = BuiltinRegList;
NbRegs = BuiltinNbRegs;
} else {
RegList = CRegList;
NbRegs = CNbRegs;
}
if (State.isVarArg()) {
AnalyzeVarArgs(State, Args);
@ -294,6 +482,11 @@ static void AnalyzeArguments(CCState &State,
SmallVector<unsigned, 4> ArgsParts;
ParseFunctionArgs(Args, ArgsParts);
if (Builtin) {
assert(ArgsParts.size() == 2 &&
"Builtin calling convention requires two arguments");
}
unsigned RegsLeft = NbRegs;
bool UsedStack = false;
unsigned ValNo = 0;
@ -323,6 +516,11 @@ static void AnalyzeArguments(CCState &State,
unsigned Parts = ArgsParts[i];
if (Builtin) {
assert(Parts == 4 &&
"Builtin calling convention requires 64-bit arguments");
}
if (!UsedStack && Parts == 2 && RegsLeft == 1) {
// Special case for 32-bit register split, see EABI section 3.3.3
unsigned Reg = State.AllocateReg(RegList);
@ -400,6 +598,7 @@ MSP430TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
switch (CallConv) {
default:
llvm_unreachable("Unsupported calling convention");
case CallingConv::MSP430_BUILTIN:
case CallingConv::Fast:
case CallingConv::C:
return LowerCCCCallTo(Chain, Callee, CallConv, isVarArg, isTailCall,
@ -598,7 +797,6 @@ MSP430TargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
/// LowerCCCCallTo - functions arguments are copied from virtual regs to
/// (physical regs)/(stack frame), CALLSEQ_START and CALLSEQ_END are emitted.
// TODO: sret.
SDValue MSP430TargetLowering::LowerCCCCallTo(
SDValue Chain, SDValue Callee, CallingConv::ID CallConv, bool isVarArg,
bool isTailCall, const SmallVectorImpl<ISD::OutputArg> &Outs,

View File

@ -210,7 +210,7 @@ let isCall = 1 in
// a use to prevent stack-pointer assignments that appear immediately
// before calls from potentially appearing dead. Uses for argument
// registers are added manually.
let Defs = [R12, R13, R14, R15, SR],
let Defs = [R11, R12, R13, R14, R15, SR],
Uses = [SP] in {
def CALLi : II16i<0x0,
(outs), (ins i16imm:$dst),

View File

@ -41,12 +41,12 @@ MSP430RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
const Function* F = MF->getFunction();
static const MCPhysReg CalleeSavedRegs[] = {
MSP430::FP, MSP430::R5, MSP430::R6, MSP430::R7,
MSP430::R8, MSP430::R9, MSP430::R10, MSP430::R11,
MSP430::R8, MSP430::R9, MSP430::R10,
0
};
static const MCPhysReg CalleeSavedRegsFP[] = {
MSP430::R5, MSP430::R6, MSP430::R7,
MSP430::R8, MSP430::R9, MSP430::R10, MSP430::R11,
MSP430::R8, MSP430::R9, MSP430::R10,
0
};
static const MCPhysReg CalleeSavedRegsIntr[] = {

View File

@ -0,0 +1,43 @@
; RUN: llc -O0 -mhwmult=16bit < %s | FileCheck %s
target datalayout = "e-p:16:16:16-i8:8:8-i16:16:16-i32:16:32-n8:16-a0:16:16"
target triple = "msp430---elf"
@g_i32 = global i32 123, align 8
@g_i64 = global i64 456, align 8
@g_i16 = global i16 789, align 8
define i16 @mpyi() #0 {
entry:
; CHECK: mpyi:
; CHECK: call #__mspabi_mpyi_hw
%0 = load volatile i16, i16* @g_i16, align 8
%1 = mul i16 %0, %0
ret i16 %1
}
define i32 @mpyli() #0 {
entry:
; CHECK: mpyli:
; CHECK: call #__mspabi_mpyl_hw
%0 = load volatile i32, i32* @g_i32, align 8
%1 = mul i32 %0, %0
ret i32 %1
}
define i64 @mpylli() #0 {
entry:
; CHECK: mpylli:
; CHECK: call #__mspabi_mpyll_hw
%0 = load volatile i64, i64* @g_i64, align 8
%1 = mul i64 %0, %0
ret i64 %1
}
attributes #0 = { nounwind }

View File

@ -0,0 +1,43 @@
; RUN: llc -O0 -mhwmult=32bit < %s | FileCheck %s
target datalayout = "e-p:16:16:16-i8:8:8-i16:16:16-i32:16:32-n8:16-a0:16:16"
target triple = "msp430---elf"
@g_i32 = global i32 123, align 8
@g_i64 = global i64 456, align 8
@g_i16 = global i16 789, align 8
define i16 @mpyi() #0 {
entry:
; CHECK: mpyi:
; CHECK: call #__mspabi_mpyi_hw
%0 = load volatile i16, i16* @g_i16, align 8
%1 = mul i16 %0, %0
ret i16 %1
}
define i32 @mpyli() #0 {
entry:
; CHECK: mpyli:
; CHECK: call #__mspabi_mpyl_hw32
%0 = load volatile i32, i32* @g_i32, align 8
%1 = mul i32 %0, %0
ret i32 %1
}
define i64 @mpylli() #0 {
entry:
; CHECK: mpylli:
; CHECK: call #__mspabi_mpyll_hw32
%0 = load volatile i64, i64* @g_i64, align 8
%1 = mul i64 %0, %0
ret i64 %1
}
attributes #0 = { nounwind }

View File

@ -0,0 +1,43 @@
; RUN: llc -O0 -mhwmult=f5series < %s | FileCheck %s
target datalayout = "e-p:16:16:16-i8:8:8-i16:16:16-i32:16:32-n8:16-a0:16:16"
target triple = "msp430---elf"
@g_i32 = global i32 123, align 8
@g_i64 = global i64 456, align 8
@g_i16 = global i16 789, align 8
define i16 @mpyi() #0 {
entry:
; CHECK: mpyi:
; CHECK: call #__mspabi_mpyi_f5hw
%0 = load volatile i16, i16* @g_i16, align 8
%1 = mul i16 %0, %0
ret i16 %1
}
define i32 @mpyli() #0 {
entry:
; CHECK: mpyli:
; CHECK: call #__mspabi_mpyl_f5hw
%0 = load volatile i32, i32* @g_i32, align 8
%1 = mul i32 %0, %0
ret i32 %1
}
define i64 @mpylli() #0 {
entry:
; CHECK: mpylli:
; CHECK: call #__mspabi_mpyll_f5hw
%0 = load volatile i64, i64* @g_i64, align 8
%1 = mul i64 %0, %0
ret i64 %1
}
attributes #0 = { nounwind }

View File

@ -12,7 +12,7 @@ entry:
store i16 %i, i16* %i.addr, align 2
%0 = load i16, i16* %i.addr, align 2
; CHECK: mov.w #2, r13
; CHECK: call #__mulhi3hw_noint
; CHECK: call #__mspabi_mpyi
; CHECK: br .LJTI0_0(r12)
switch i16 %0, label %sw.default [
i16 0, label %sw.bb

View File

@ -0,0 +1,595 @@
; RUN: llc -O0 < %s | FileCheck %s
target datalayout = "e-p:16:16:16-i8:8:8-i16:16:16-i32:16:32-n8:16-a0:16:16"
target triple = "msp430---elf"
@g_double = global double 123.0, align 8
@g_float = global float 123.0, align 8
@g_i32 = global i32 123, align 8
@g_i64 = global i64 456, align 8
@g_i16 = global i16 789, align 8
define float @d2f() #0 {
entry:
; CHECK: d2f:
; CHECK: call #__mspabi_cvtdf
%0 = load volatile double, double* @g_double, align 8
%1 = fptrunc double %0 to float
ret float %1
}
define double @f2d() #0 {
entry:
; CHECK: f2d:
; CHECK: call #__mspabi_cvtfd
%0 = load volatile float, float* @g_float, align 8
%1 = fpext float %0 to double
ret double %1
}
define i32 @d2l() #0 {
entry:
; CHECK: d2l:
; CHECK: call #__mspabi_fixdli
%0 = load volatile double, double* @g_double, align 8
%1 = fptosi double %0 to i32
ret i32 %1
}
define i64 @d2ll() #0 {
entry:
; CHECK: d2ll:
; CHECK: call #__mspabi_fixdlli
%0 = load volatile double, double* @g_double, align 8
%1 = fptosi double %0 to i64
ret i64 %1
}
define i32 @d2ul() #0 {
entry:
; CHECK: d2ul:
; CHECK: call #__mspabi_fixdul
%0 = load volatile double, double* @g_double, align 8
%1 = fptoui double %0 to i32
ret i32 %1
}
define i64 @d2ull() #0 {
entry:
; CHECK: d2ull:
; CHECK: call #__mspabi_fixdull
%0 = load volatile double, double* @g_double, align 8
%1 = fptoui double %0 to i64
ret i64 %1
}
define i32 @f2l() #0 {
entry:
; CHECK: f2l:
; CHECK: call #__mspabi_fixfli
%0 = load volatile float, float* @g_float, align 8
%1 = fptosi float %0 to i32
ret i32 %1
}
define i64 @f2ll() #0 {
entry:
; CHECK: f2ll:
; CHECK: call #__mspabi_fixflli
%0 = load volatile float, float* @g_float, align 8
%1 = fptosi float %0 to i64
ret i64 %1
}
define i32 @f2ul() #0 {
entry:
; CHECK: f2ul:
; CHECK: call #__mspabi_fixful
%0 = load volatile float, float* @g_float, align 8
%1 = fptoui float %0 to i32
ret i32 %1
}
define i64 @f2ull() #0 {
entry:
; CHECK: f2ull:
; CHECK: call #__mspabi_fixfull
%0 = load volatile float, float* @g_float, align 8
%1 = fptoui float %0 to i64
ret i64 %1
}
define double @l2d() #0 {
entry:
; CHECK: l2d:
; CHECK: call #__mspabi_fltlid
%0 = load volatile i32, i32* @g_i32, align 8
%1 = sitofp i32 %0 to double
ret double %1
}
define double @ll2d() #0 {
entry:
; CHECK: ll2d:
; CHECK: call #__mspabi_fltllid
%0 = load volatile i64, i64* @g_i64, align 8
%1 = sitofp i64 %0 to double
ret double %1
}
define double @ul2d() #0 {
entry:
; CHECK: ul2d:
; CHECK: call #__mspabi_fltuld
%0 = load volatile i32, i32* @g_i32, align 8
%1 = uitofp i32 %0 to double
ret double %1
}
define double @ull2d() #0 {
entry:
; CHECK: ull2d:
; CHECK: call #__mspabi_fltulld
%0 = load volatile i64, i64* @g_i64, align 8
%1 = uitofp i64 %0 to double
ret double %1
}
define float @l2f() #0 {
entry:
; CHECK: l2f:
; CHECK: call #__mspabi_fltlif
%0 = load volatile i32, i32* @g_i32, align 8
%1 = sitofp i32 %0 to float
ret float %1
}
define float @ll2f() #0 {
entry:
; CHECK: ll2f:
; CHECK: call #__mspabi_fltllif
%0 = load volatile i64, i64* @g_i64, align 8
%1 = sitofp i64 %0 to float
ret float %1
}
define float @ul2f() #0 {
entry:
; CHECK: ul2f:
; CHECK: call #__mspabi_fltulf
%0 = load volatile i32, i32* @g_i32, align 8
%1 = uitofp i32 %0 to float
ret float %1
}
define float @ull2f() #0 {
entry:
; CHECK: ull2f:
; CHECK: call #__mspabi_fltullf
%0 = load volatile i64, i64* @g_i64, align 8
%1 = uitofp i64 %0 to float
ret float %1
}
define i1 @cmpd_oeq() #0 {
entry:
; CHECK: cmpd_oeq:
; CHECK: call #__mspabi_cmpd
%0 = load volatile double, double* @g_double, align 8
%1 = fcmp oeq double %0, 123.0
ret i1 %1
}
define i1 @cmpd_une() #0 {
entry:
; CHECK: cmpd_une:
; CHECK: call #__mspabi_cmpd
%0 = load volatile double, double* @g_double, align 8
%1 = fcmp une double %0, 123.0
ret i1 %1
}
define i1 @cmpd_oge() #0 {
entry:
; CHECK: cmpd_oge:
; CHECK: call #__mspabi_cmpd
%0 = load volatile double, double* @g_double, align 8
%1 = fcmp oge double %0, 123.0
ret i1 %1
}
define i1 @cmpd_olt() #0 {
entry:
; CHECK: cmpd_olt:
; CHECK: call #__mspabi_cmpd
%0 = load volatile double, double* @g_double, align 8
%1 = fcmp olt double %0, 123.0
ret i1 %1
}
define i1 @cmpd_ole() #0 {
entry:
; CHECK: cmpd_ole:
; CHECK: call #__mspabi_cmpd
%0 = load volatile double, double* @g_double, align 8
%1 = fcmp ole double %0, 123.0
ret i1 %1
}
define i1 @cmpd_ogt() #0 {
entry:
; CHECK: cmpd_ogt:
; CHECK: call #__mspabi_cmpd
%0 = load volatile double, double* @g_double, align 8
%1 = fcmp ogt double %0, 123.0
ret i1 %1
}
define i1 @cmpf_oeq() #0 {
entry:
; CHECK: cmpf_oeq:
; CHECK: call #__mspabi_cmpf
%0 = load volatile float, float* @g_float, align 8
%1 = fcmp oeq float %0, 123.0
ret i1 %1
}
define i1 @cmpf_une() #0 {
entry:
; CHECK: cmpf_une:
; CHECK: call #__mspabi_cmpf
%0 = load volatile float, float* @g_float, align 8
%1 = fcmp une float %0, 123.0
ret i1 %1
}
define i1 @cmpf_oge() #0 {
entry:
; CHECK: cmpf_oge:
; CHECK: call #__mspabi_cmpf
%0 = load volatile float, float* @g_float, align 8
%1 = fcmp oge float %0, 123.0
ret i1 %1
}
define i1 @cmpf_olt() #0 {
entry:
; CHECK: cmpf_olt:
; CHECK: call #__mspabi_cmpf
%0 = load volatile float, float* @g_float, align 8
%1 = fcmp olt float %0, 123.0
ret i1 %1
}
define i1 @cmpf_ole() #0 {
entry:
; CHECK: cmpf_ole:
; CHECK: call #__mspabi_cmpf
%0 = load volatile float, float* @g_float, align 8
%1 = fcmp ole float %0, 123.0
ret i1 %1
}
define i1 @cmpf_ogt() #0 {
entry:
; CHECK: cmpf_ogt:
; CHECK: call #__mspabi_cmpf
%0 = load volatile float, float* @g_float, align 8
%1 = fcmp ogt float %0, 123.0
ret i1 %1
}
define double @addd() #0 {
entry:
; CHECK: addd:
; CHECK: call #__mspabi_addd
%0 = load volatile double, double* @g_double, align 8
%1 = fadd double %0, 123.0
ret double %1
}
define float @addf() #0 {
entry:
; CHECK: addf:
; CHECK: call #__mspabi_addf
%0 = load volatile float, float* @g_float, align 8
%1 = fadd float %0, 123.0
ret float %1
}
define double @divd() #0 {
entry:
; CHECK: divd:
; CHECK: call #__mspabi_divd
%0 = load volatile double, double* @g_double, align 8
%1 = fdiv double %0, 123.0
ret double %1
}
define float @divf() #0 {
entry:
; CHECK: divf:
; CHECK: call #__mspabi_divf
%0 = load volatile float, float* @g_float, align 8
%1 = fdiv float %0, 123.0
ret float %1
}
define double @mpyd() #0 {
entry:
; CHECK: mpyd:
; CHECK: call #__mspabi_mpyd
%0 = load volatile double, double* @g_double, align 8
%1 = fmul double %0, 123.0
ret double %1
}
define float @mpyf() #0 {
entry:
; CHECK: mpyf:
; CHECK: call #__mspabi_mpyf
%0 = load volatile float, float* @g_float, align 8
%1 = fmul float %0, 123.0
ret float %1
}
define double @subd() #0 {
entry:
; CHECK: subd:
; CHECK: call #__mspabi_subd
%0 = load volatile double, double* @g_double, align 8
%1 = fsub double %0, %0
ret double %1
}
define float @subf() #0 {
entry:
; CHECK: subf:
; CHECK: call #__mspabi_subf
%0 = load volatile float, float* @g_float, align 8
%1 = fsub float %0, %0
ret float %1
}
define i16 @divi() #0 {
entry:
; CHECK: divi:
; CHECK: call #__mspabi_divi
%0 = load volatile i16, i16* @g_i16, align 8
%1 = sdiv i16 %0, %0
ret i16 %1
}
define i32 @divli() #0 {
entry:
; CHECK: divli:
; CHECK: call #__mspabi_divli
%0 = load volatile i32, i32* @g_i32, align 8
%1 = sdiv i32 %0, %0
ret i32 %1
}
define i64 @divlli() #0 {
entry:
; CHECK: divlli:
; CHECK: call #__mspabi_divlli
%0 = load volatile i64, i64* @g_i64, align 8
%1 = sdiv i64 %0, %0
ret i64 %1
}
define i16 @divu() #0 {
entry:
; CHECK: divu:
; CHECK: call #__mspabi_divu
%0 = load volatile i16, i16* @g_i16, align 8
%1 = udiv i16 %0, %0
ret i16 %1
}
define i32 @divul() #0 {
entry:
; CHECK: divul:
; CHECK: call #__mspabi_divul
%0 = load volatile i32, i32* @g_i32, align 8
%1 = udiv i32 %0, %0
ret i32 %1
}
define i64 @divull() #0 {
entry:
; CHECK: divull:
; CHECK: call #__mspabi_divull
%0 = load volatile i64, i64* @g_i64, align 8
%1 = udiv i64 %0, %0
ret i64 %1
}
define i16 @remi() #0 {
entry:
; CHECK: remi:
; CHECK: call #__mspabi_remi
%0 = load volatile i16, i16* @g_i16, align 8
%1 = srem i16 %0, %0
ret i16 %1
}
define i32 @remli() #0 {
entry:
; CHECK: remli:
; CHECK: call #__mspabi_remli
%0 = load volatile i32, i32* @g_i32, align 8
%1 = srem i32 %0, %0
ret i32 %1
}
define i64 @remlli() #0 {
entry:
; CHECK: remlli:
; CHECK: call #__mspabi_remlli
%0 = load volatile i64, i64* @g_i64, align 8
%1 = srem i64 %0, %0
ret i64 %1
}
define i16 @remu() #0 {
entry:
; CHECK: remu:
; CHECK: call #__mspabi_remu
%0 = load volatile i16, i16* @g_i16, align 8
%1 = urem i16 %0, %0
ret i16 %1
}
define i32 @remul() #0 {
entry:
; CHECK: remul:
; CHECK: call #__mspabi_remul
%0 = load volatile i32, i32* @g_i32, align 8
%1 = urem i32 %0, %0
ret i32 %1
}
define i64 @remull() #0 {
entry:
; CHECK: remull:
; CHECK: call #__mspabi_remull
%0 = load volatile i64, i64* @g_i64, align 8
%1 = urem i64 %0, %0
ret i64 %1
}
define i16 @mpyi() #0 {
entry:
; CHECK: mpyi:
; CHECK: call #__mspabi_mpyi
%0 = load volatile i16, i16* @g_i16, align 8
%1 = mul i16 %0, %0
ret i16 %1
}
define i32 @mpyli() #0 {
entry:
; CHECK: mpyli:
; CHECK: call #__mspabi_mpyl
%0 = load volatile i32, i32* @g_i32, align 8
%1 = mul i32 %0, %0
ret i32 %1
}
define i64 @mpylli() #0 {
entry:
; CHECK: mpylli:
; CHECK: call #__mspabi_mpyll
%0 = load volatile i64, i64* @g_i64, align 8
%1 = mul i64 %0, %0
ret i64 %1
}
attributes #0 = { nounwind }

View File

@ -8,7 +8,7 @@ target triple = "msp430-elf"
define signext i8 @foo(i8 signext %_si1, i8 signext %_si2) nounwind readnone {
entry:
; CHECK-LABEL: foo:
; CHECK: call #__mulqi3
; CHECK: call #__mspabi_mpyi
%mul = mul i8 %_si2, %_si1 ; <i8> [#uses=1]
ret i8 %mul
}