diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 13d606b064b..c139dd27e9b 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -145,8 +145,10 @@ X86TargetLowering::X86TargetLowering(TargetMachine &TM) } // TODO: when we have SSE, these could be more efficient, by using movd/movq. - setOperationAction(ISD::BIT_CONVERT , MVT::f32 , Expand); - setOperationAction(ISD::BIT_CONVERT , MVT::i32 , Expand); + if (!X86ScalarSSE) { + setOperationAction(ISD::BIT_CONVERT , MVT::f32 , Expand); + setOperationAction(ISD::BIT_CONVERT , MVT::i32 , Expand); + } if (Subtarget->is64Bit()) { setOperationAction(ISD::BIT_CONVERT , MVT::f64 , Expand); setOperationAction(ISD::BIT_CONVERT , MVT::i64 , Expand); diff --git a/lib/Target/X86/X86InstrSSE.td b/lib/Target/X86/X86InstrSSE.td index 2de8081ea7c..72ae8e9662c 100644 --- a/lib/Target/X86/X86InstrSSE.td +++ b/lib/Target/X86/X86InstrSSE.td @@ -1683,6 +1683,11 @@ def MOVDI2PDIrm : PDI<0x6E, MRMSrcMem, (ops VR128:$dst, i32mem:$src), [(set VR128:$dst, (v4i32 (scalar_to_vector (loadi32 addr:$src))))]>; +def MOVDI2SSrr : PDI<0x6E, MRMSrcReg, (ops FR32:$dst, GR32:$src), + "movd {$src, $dst|$dst, $src}", + [(set FR32:$dst, (bitconvert GR32:$src))]>; + + // SSE2 instructions with XS prefix def MOVQI2PQIrm : I<0x7E, MRMSrcMem, (ops VR128:$dst, i64mem:$src), "movq {$src, $dst|$dst, $src}", @@ -1724,6 +1729,11 @@ def MOVPDI2DImr : PDI<0x7E, MRMDestMem, (ops i32mem:$dst, VR128:$src), [(store (i32 (vector_extract (v4i32 VR128:$src), (iPTR 0))), addr:$dst)]>; +def MOVDSS2DIrr : PDI<0x7E, MRMDestReg, (ops GR32:$dst, FR32:$src), + "movd {$src, $dst|$dst, $src}", + [(set GR32:$dst, (bitconvert FR32:$src))]>; + + // Move to lower bits of a VR128, leaving upper bits alone. // Three operand (but two address) aliases. let isTwoAddress = 1 in {