llvm/test/CodeGen/X86/widen_load-0.ll
Nadav Rotem 2dd83eb1ab Improve the loading of load-anyext vectors by allowing the codegen to load
multiple scalars and insert them into a vector. Next, we shuffle the elements
into the correct places, as before.
Also fix a small dagcombine bug in SimplifyBinOpWithSameOpcodeHands, when the
migration of bitcasts happened too late in the SelectionDAG process.



git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@159991 91177308-0d34-0410-b5e6-96231b3b80d8
2012-07-10 13:25:08 +00:00

22 lines
805 B
LLVM

; RUN: llc < %s -o - -mtriple=x86_64-linux -mcpu=corei7 | FileCheck %s
; PR4891
; Both loads should happen before either store.
; CHECK: movl ({{.*}}), {{.*}}
; CHECK: movl ({{.*}}), {{.*}}
; CHECK: movl {{.*}}, ({{.*}})
; CHECK: movl {{.*}}, ({{.*}})
define void @short2_int_swap(<2 x i16>* nocapture %b, i32* nocapture %c) nounwind {
entry:
%0 = load <2 x i16>* %b, align 2 ; <<2 x i16>> [#uses=1]
%1 = load i32* %c, align 4 ; <i32> [#uses=1]
%tmp1 = bitcast i32 %1 to <2 x i16> ; <<2 x i16>> [#uses=1]
store <2 x i16> %tmp1, <2 x i16>* %b, align 2
%tmp5 = bitcast <2 x i16> %0 to <1 x i32> ; <<1 x i32>> [#uses=1]
%tmp3 = extractelement <1 x i32> %tmp5, i32 0 ; <i32> [#uses=1]
store i32 %tmp3, i32* %c, align 4
ret void
}