mirror of
https://github.com/RPCSX/llvm.git
synced 2024-12-12 06:06:19 +00:00
Fix a bunch of ARM tests to be register allocation independent.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@130800 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
4dfdf242c1
commit
feaf34758a
@ -13,6 +13,7 @@ entry:
|
||||
; Make sure the scheduler schedules all uses of the preincrement
|
||||
; induction variable before defining the postincrement value.
|
||||
; CHECK: t:
|
||||
; CHECK: %bb
|
||||
; CHECK-NOT: mov
|
||||
bb: ; preds = %entry, %bb
|
||||
%j.05 = phi i32 [ %2, %bb ], [ 0, %entry ]
|
||||
|
@ -14,15 +14,15 @@ for.cond:
|
||||
br i1 %cmp, label %for.body, label %return
|
||||
|
||||
for.body:
|
||||
; CHECK: %for.body
|
||||
; CHECK: movs r{{[0-9]+}}, #1
|
||||
; CHECK: %for.
|
||||
; CHECK: movs r{{[0-9]+}}, #{{[01]}}
|
||||
%arrayidx = getelementptr i32* %A, i32 %0
|
||||
%tmp4 = load i32* %arrayidx, align 4
|
||||
%cmp6 = icmp eq i32 %tmp4, %value
|
||||
br i1 %cmp6, label %return, label %for.inc
|
||||
|
||||
; CHECK: %for.cond
|
||||
; CHECK: movs r{{[0-9]+}}, #0
|
||||
; CHECK: %for.
|
||||
; CHECK: movs r{{[0-9]+}}, #{{[01]}}
|
||||
|
||||
for.inc:
|
||||
%inc = add i32 %0, 1
|
||||
|
@ -31,8 +31,7 @@ define i32 @f3(i32 %A, i32 %B) nounwind {
|
||||
entry:
|
||||
; CHECK: f3
|
||||
; CHECK: lsr{{.*}} #7
|
||||
; CHECK: mov r0, r1
|
||||
; CHECK: bfi r0, r2, #7, #16
|
||||
; CHECK: bfi {{.*}}, #7, #16
|
||||
%and = and i32 %A, 8388480 ; <i32> [#uses=1]
|
||||
%and2 = and i32 %B, -8388481 ; <i32> [#uses=1]
|
||||
%or = or i32 %and2, %and ; <i32> [#uses=1]
|
||||
@ -42,8 +41,8 @@ entry:
|
||||
; rdar://8752056
|
||||
define i32 @f4(i32 %a) nounwind {
|
||||
; CHECK: f4
|
||||
; CHECK: movw r1, #3137
|
||||
; CHECK: bfi r1, r0, #15, #5
|
||||
; CHECK: movw [[R1:r[0-9]+]], #3137
|
||||
; CHECK: bfi [[R1]], {{r[0-9]+}}, #15, #5
|
||||
%1 = shl i32 %a, 15
|
||||
%ins7 = and i32 %1, 1015808
|
||||
%ins12 = or i32 %ins7, 3137
|
||||
|
@ -10,7 +10,7 @@ entry:
|
||||
|
||||
; HARD: test1:
|
||||
; HARD: vmov.i32 [[REG1:(d[0-9]+)]], #0x80000000
|
||||
; HARD: vbsl [[REG1]], d2, d0
|
||||
; HARD: vbsl [[REG1]], d
|
||||
%0 = tail call float @copysignf(float %x, float %y) nounwind
|
||||
ret float %0
|
||||
}
|
||||
|
@ -10,8 +10,8 @@ entry:
|
||||
br i1 %0, label %return, label %bb
|
||||
|
||||
bb:
|
||||
; CHECK: ldr [[REGISTER:(r[0-9]+)]], [r1], r3
|
||||
; CHECK: str [[REGISTER]], [r2], #4
|
||||
; CHECK: ldr [[REGISTER:(r[0-9]+)]], [{{r[0-9]+}}], {{r[0-9]+}}
|
||||
; CHECK: str [[REGISTER]], [{{r[0-9]+}}], #4
|
||||
%j.05 = phi i32 [ %2, %bb ], [ 0, %entry ]
|
||||
%tmp = mul i32 %j.05, %index
|
||||
%uglygep = getelementptr i8* %src6, i32 %tmp
|
||||
|
@ -5,8 +5,8 @@
|
||||
define i32 @t1(i32 %c) nounwind readnone {
|
||||
entry:
|
||||
; ARM: t1:
|
||||
; ARM: mov r1, #101
|
||||
; ARM: orr r1, r1, #1, #24
|
||||
; ARM: mov [[R1:r[0-9]+]], #101
|
||||
; ARM: orr [[R1b:r[0-9]+]], [[R1]], #1, #24
|
||||
; ARM: movgt r0, #123
|
||||
|
||||
; ARMT2: t1:
|
||||
@ -34,7 +34,7 @@ entry:
|
||||
; ARMT2: movwgt r0, #357
|
||||
|
||||
; THUMB2: t2:
|
||||
; THUMB2: mov.w r0, #123
|
||||
; THUMB2: mov{{(s|\.w)}} r0, #123
|
||||
; THUMB2: movwgt r0, #357
|
||||
|
||||
%0 = icmp sgt i32 %c, 1
|
||||
@ -53,7 +53,7 @@ entry:
|
||||
; ARMT2: moveq r0, #1
|
||||
|
||||
; THUMB2: t3:
|
||||
; THUMB2: mov.w r0, #0
|
||||
; THUMB2: mov{{(s|\.w)}} r0, #0
|
||||
; THUMB2: moveq r0, #1
|
||||
%0 = icmp eq i32 %a, 160
|
||||
%1 = zext i1 %0 to i32
|
||||
@ -67,11 +67,11 @@ entry:
|
||||
; ARM: movlt
|
||||
|
||||
; ARMT2: t4:
|
||||
; ARMT2: movwlt r0, #65365
|
||||
; ARMT2: movtlt r0, #65365
|
||||
; ARMT2: movwlt [[R0:r[0-9]+]], #65365
|
||||
; ARMT2: movtlt [[R0]], #65365
|
||||
|
||||
; THUMB2: t4:
|
||||
; THUMB2: mvnlt.w r0, #11141290
|
||||
; THUMB2: mvnlt.w [[R0:r[0-9]+]], #11141290
|
||||
%0 = icmp slt i32 %a, %b
|
||||
%1 = select i1 %0, i32 4283826005, i32 %x
|
||||
ret i32 %1
|
||||
|
@ -9,7 +9,7 @@ define i32 @main() nounwind {
|
||||
entry:
|
||||
; CHECK: main
|
||||
; CHECK: push
|
||||
; CHECK: stmib
|
||||
; CHECK: stm
|
||||
%0 = tail call i32 (i8*, ...)* @printf(i8* getelementptr ([26 x i8]* @"\01LC1", i32 0, i32 0), i32 -2, i32 -3, i32 2, i32 -6) nounwind ; <i32> [#uses=0]
|
||||
%1 = tail call i32 (i8*, ...)* @printf(i8* getelementptr ([32 x i8]* @"\01LC", i32 0, i32 0), i32 0, i32 1, i32 0, i32 1, i32 0, i32 1) nounwind ; <i32> [#uses=0]
|
||||
ret i32 0
|
||||
|
@ -125,7 +125,7 @@ define <2 x i32> @vld2lanei32(i32* %A, <2 x i32>* %B) nounwind {
|
||||
;Check for a post-increment updating load.
|
||||
define <2 x i32> @vld2lanei32_update(i32** %ptr, <2 x i32>* %B) nounwind {
|
||||
;CHECK: vld2lanei32_update:
|
||||
;CHECK: vld2.32 {d16[1], d17[1]}, [r1]!
|
||||
;CHECK: vld2.32 {d16[1], d17[1]}, [{{r[0-9]+}}]!
|
||||
%A = load i32** %ptr
|
||||
%tmp0 = bitcast i32* %A to i8*
|
||||
%tmp1 = load <2 x i32>* %B
|
||||
@ -153,7 +153,7 @@ define <2 x float> @vld2lanef(float* %A, <2 x float>* %B) nounwind {
|
||||
define <8 x i16> @vld2laneQi16(i16* %A, <8 x i16>* %B) nounwind {
|
||||
;CHECK: vld2laneQi16:
|
||||
;Check the (default) alignment.
|
||||
;CHECK: vld2.16 {d17[1], d19[1]}, [r0]
|
||||
;CHECK: vld2.16 {d17[1], d19[1]}, [{{r[0-9]+}}]
|
||||
%tmp0 = bitcast i16* %A to i8*
|
||||
%tmp1 = load <8 x i16>* %B
|
||||
%tmp2 = call %struct.__neon_int16x8x2_t @llvm.arm.neon.vld2lane.v8i16(i8* %tmp0, <8 x i16> %tmp1, <8 x i16> %tmp1, i32 5, i32 1)
|
||||
@ -166,7 +166,7 @@ define <8 x i16> @vld2laneQi16(i16* %A, <8 x i16>* %B) nounwind {
|
||||
define <4 x i32> @vld2laneQi32(i32* %A, <4 x i32>* %B) nounwind {
|
||||
;CHECK: vld2laneQi32:
|
||||
;Check the alignment value. Max for this instruction is 64 bits:
|
||||
;CHECK: vld2.32 {d17[0], d19[0]}, [r0, :64]
|
||||
;CHECK: vld2.32 {d17[0], d19[0]}, [{{r[0-9]+}}, :64]
|
||||
%tmp0 = bitcast i32* %A to i8*
|
||||
%tmp1 = load <4 x i32>* %B
|
||||
%tmp2 = call %struct.__neon_int32x4x2_t @llvm.arm.neon.vld2lane.v4i32(i8* %tmp0, <4 x i32> %tmp1, <4 x i32> %tmp1, i32 2, i32 16)
|
||||
@ -222,7 +222,7 @@ define <8 x i8> @vld3lanei8(i8* %A, <8 x i8>* %B) nounwind {
|
||||
define <4 x i16> @vld3lanei16(i16* %A, <4 x i16>* %B) nounwind {
|
||||
;CHECK: vld3lanei16:
|
||||
;Check the (default) alignment value. VLD3 does not support alignment.
|
||||
;CHECK: vld3.16 {d16[1], d17[1], d18[1]}, [r0]
|
||||
;CHECK: vld3.16 {d16[1], d17[1], d18[1]}, [{{r[0-9]+}}]
|
||||
%tmp0 = bitcast i16* %A to i8*
|
||||
%tmp1 = load <4 x i16>* %B
|
||||
%tmp2 = call %struct.__neon_int16x4x3_t @llvm.arm.neon.vld3lane.v4i16(i8* %tmp0, <4 x i16> %tmp1, <4 x i16> %tmp1, <4 x i16> %tmp1, i32 1, i32 8)
|
||||
@ -265,7 +265,7 @@ define <2 x float> @vld3lanef(float* %A, <2 x float>* %B) nounwind {
|
||||
define <8 x i16> @vld3laneQi16(i16* %A, <8 x i16>* %B) nounwind {
|
||||
;CHECK: vld3laneQi16:
|
||||
;Check the (default) alignment value. VLD3 does not support alignment.
|
||||
;CHECK: vld3.16 {d16[1], d18[1], d20[1]}, [r0]
|
||||
;CHECK: vld3.16 {d16[1], d18[1], d20[1]}, [{{r[0-9]+}}]
|
||||
%tmp0 = bitcast i16* %A to i8*
|
||||
%tmp1 = load <8 x i16>* %B
|
||||
%tmp2 = call %struct.__neon_int16x8x3_t @llvm.arm.neon.vld3lane.v8i16(i8* %tmp0, <8 x i16> %tmp1, <8 x i16> %tmp1, <8 x i16> %tmp1, i32 1, i32 8)
|
||||
@ -344,7 +344,7 @@ declare %struct.__neon_float32x4x3_t @llvm.arm.neon.vld3lane.v4f32(i8*, <4 x flo
|
||||
define <8 x i8> @vld4lanei8(i8* %A, <8 x i8>* %B) nounwind {
|
||||
;CHECK: vld4lanei8:
|
||||
;Check the alignment value. Max for this instruction is 32 bits:
|
||||
;CHECK: vld4.8 {d16[1], d17[1], d18[1], d19[1]}, [r0, :32]
|
||||
;CHECK: vld4.8 {d16[1], d17[1], d18[1], d19[1]}, [{{r[0-9]+}}, :32]
|
||||
%tmp1 = load <8 x i8>* %B
|
||||
%tmp2 = call %struct.__neon_int8x8x4_t @llvm.arm.neon.vld4lane.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %tmp1, i32 1, i32 8)
|
||||
%tmp3 = extractvalue %struct.__neon_int8x8x4_t %tmp2, 0
|
||||
@ -360,7 +360,7 @@ define <8 x i8> @vld4lanei8(i8* %A, <8 x i8>* %B) nounwind {
|
||||
;Check for a post-increment updating load.
|
||||
define <8 x i8> @vld4lanei8_update(i8** %ptr, <8 x i8>* %B) nounwind {
|
||||
;CHECK: vld4lanei8_update:
|
||||
;CHECK: vld4.8 {d16[1], d17[1], d18[1], d19[1]}, [r1, :32]!
|
||||
;CHECK: vld4.8 {d16[1], d17[1], d18[1], d19[1]}, [{{r[0-9]+}}, :32]!
|
||||
%A = load i8** %ptr
|
||||
%tmp1 = load <8 x i8>* %B
|
||||
%tmp2 = call %struct.__neon_int8x8x4_t @llvm.arm.neon.vld4lane.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %tmp1, i32 1, i32 8)
|
||||
@ -380,7 +380,7 @@ define <4 x i16> @vld4lanei16(i16* %A, <4 x i16>* %B) nounwind {
|
||||
;CHECK: vld4lanei16:
|
||||
;Check that a power-of-two alignment smaller than the total size of the memory
|
||||
;being loaded is ignored.
|
||||
;CHECK: vld4.16 {d16[1], d17[1], d18[1], d19[1]}, [r0]
|
||||
;CHECK: vld4.16 {d16[1], d17[1], d18[1], d19[1]}, [{{r[0-9]+}}]
|
||||
%tmp0 = bitcast i16* %A to i8*
|
||||
%tmp1 = load <4 x i16>* %B
|
||||
%tmp2 = call %struct.__neon_int16x4x4_t @llvm.arm.neon.vld4lane.v4i16(i8* %tmp0, <4 x i16> %tmp1, <4 x i16> %tmp1, <4 x i16> %tmp1, <4 x i16> %tmp1, i32 1, i32 4)
|
||||
@ -398,7 +398,7 @@ define <2 x i32> @vld4lanei32(i32* %A, <2 x i32>* %B) nounwind {
|
||||
;CHECK: vld4lanei32:
|
||||
;Check the alignment value. An 8-byte alignment is allowed here even though
|
||||
;it is smaller than the total size of the memory being loaded.
|
||||
;CHECK: vld4.32 {d16[1], d17[1], d18[1], d19[1]}, [r0, :64]
|
||||
;CHECK: vld4.32 {d16[1], d17[1], d18[1], d19[1]}, [{{r[0-9]+}}, :64]
|
||||
%tmp0 = bitcast i32* %A to i8*
|
||||
%tmp1 = load <2 x i32>* %B
|
||||
%tmp2 = call %struct.__neon_int32x2x4_t @llvm.arm.neon.vld4lane.v2i32(i8* %tmp0, <2 x i32> %tmp1, <2 x i32> %tmp1, <2 x i32> %tmp1, <2 x i32> %tmp1, i32 1, i32 8)
|
||||
@ -431,7 +431,7 @@ define <2 x float> @vld4lanef(float* %A, <2 x float>* %B) nounwind {
|
||||
define <8 x i16> @vld4laneQi16(i16* %A, <8 x i16>* %B) nounwind {
|
||||
;CHECK: vld4laneQi16:
|
||||
;Check the alignment value. Max for this instruction is 64 bits:
|
||||
;CHECK: vld4.16 {d16[1], d18[1], d20[1], d22[1]}, [r0, :64]
|
||||
;CHECK: vld4.16 {d16[1], d18[1], d20[1], d22[1]}, [{{r[0-9]+}}, :64]
|
||||
%tmp0 = bitcast i16* %A to i8*
|
||||
%tmp1 = load <8 x i16>* %B
|
||||
%tmp2 = call %struct.__neon_int16x8x4_t @llvm.arm.neon.vld4lane.v8i16(i8* %tmp0, <8 x i16> %tmp1, <8 x i16> %tmp1, <8 x i16> %tmp1, <8 x i16> %tmp1, i32 1, i32 16)
|
||||
@ -448,7 +448,7 @@ define <8 x i16> @vld4laneQi16(i16* %A, <8 x i16>* %B) nounwind {
|
||||
define <4 x i32> @vld4laneQi32(i32* %A, <4 x i32>* %B) nounwind {
|
||||
;CHECK: vld4laneQi32:
|
||||
;Check the (default) alignment.
|
||||
;CHECK: vld4.32 {d17[0], d19[0], d21[0], d23[0]}, [r0]
|
||||
;CHECK: vld4.32 {d17[0], d19[0], d21[0], d23[0]}, [{{r[0-9]+}}]
|
||||
%tmp0 = bitcast i32* %A to i8*
|
||||
%tmp1 = load <4 x i32>* %B
|
||||
%tmp2 = call %struct.__neon_int32x4x4_t @llvm.arm.neon.vld4lane.v4i32(i8* %tmp0, <4 x i32> %tmp1, <4 x i32> %tmp1, <4 x i32> %tmp1, <4 x i32> %tmp1, i32 2, i32 1)
|
||||
|
Loading…
Reference in New Issue
Block a user