test/CodeGen/X86: Add a pattern for Win64.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@127733 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
NAKAMURA Takumi 2011-03-16 13:52:51 +00:00
parent ddbfbcf72e
commit 37947c6bad
7 changed files with 141 additions and 22 deletions

View File

@ -1,10 +1,16 @@
; RUN: llc < %s -o - -march=x86-64 | FileCheck %s
; RUN: llc < %s -mtriple=x86_64-linux | FileCheck %s
; RUN: llc < %s -mtriple=x86_64-win32 | FileCheck %s -check-prefix=WIN64
; PR8573
; CHECK: foo:
; CHECK: leaq (%rdi), %rax
; CHECK-NEXT: movl %esi, %ecx
; CHECK-NEXT: monitor
; WIN64: foo:
; WIN64: leaq (%rcx), %rax
; WIN64-NEXT: movl %edx, %ecx
; WIN64-NEXT: movl %r8d, %edx
; WIN64-NEXT: monitor
define void @foo(i8* %P, i32 %E, i32 %H) nounwind {
entry:
tail call void @llvm.x86.sse3.monitor(i8* %P, i32 %E, i32 %H)
@ -17,6 +23,9 @@ declare void @llvm.x86.sse3.monitor(i8*, i32, i32) nounwind
; CHECK: movl %edi, %ecx
; CHECK-NEXT: movl %esi, %eax
; CHECK-NEXT: mwait
; WIN64: bar:
; WIN64: movl %edx, %eax
; WIN64-NEXT: mwait
define void @bar(i32 %E, i32 %H) nounwind {
entry:
tail call void @llvm.x86.sse3.mwait(i32 %E, i32 %H)

View File

@ -1,9 +1,29 @@
; RUN: llc < %s -march=x86-64 > %t
; RUN: grep mov %t | count 6
; RUN: grep {movb %ah, (%rsi)} %t | count 3
; RUN: llc < %s -march=x86 > %t
; RUN: grep mov %t | count 3
; RUN: grep {movb %ah, (%e} %t | count 3
; RUN: llc < %s -mtriple=x86_64-linux | FileCheck %s -check-prefix=X64
; X64: mov
; X64-NEXT: movb %ah, (%rsi)
; X64: mov
; X64-NEXT: movb %ah, (%rsi)
; X64: mov
; X64-NEXT: movb %ah, (%rsi)
; X64-NOT: mov
; RUN: llc < %s -mtriple=x86_64-win32 | FileCheck %s -check-prefix=W64
; W64-NOT: mov
; W64: movb %ch, (%rdx)
; W64-NOT: mov
; W64: movb %ch, (%rdx)
; W64-NOT: mov
; W64: movb %ch, (%rdx)
; W64-NOT: mov
; RUN: llc < %s -march=x86 | FileCheck %s -check-prefix=X32
; X32-NOT: mov
; X32: movb %ah, (%e
; X32-NOT: mov
; X32: movb %ah, (%e
; X32-NOT: mov
; X32: movb %ah, (%e
; X32-NOT: mov
; Use h-register extract and store.

View File

@ -1,4 +1,5 @@
; RUN: llc < %s -march=x86-64 | FileCheck %s -check-prefix=X86-64
; RUN: llc < %s -mtriple=x86_64-linux | FileCheck %s -check-prefix=X86-64
; RUN: llc < %s -mtriple=x86_64-win32 | FileCheck %s -check-prefix=WIN64
; RUN: llc < %s -march=x86 | FileCheck %s -check-prefix=X86-32
; Use h registers. On x86-64, codegen doesn't support general allocation
@ -9,6 +10,12 @@ define void @bar64(i64 inreg %x, i8* inreg %p) nounwind {
; X86-64: shrq $8, %rdi
; X86-64: incb %dil
; See FIXME: on regclass GR8.
; It could be optimally transformed like; incb %ch; movb %ch, (%rdx)
; WIN64: bar64:
; WIN64: shrq $8, %rcx
; WIN64: incb %cl
; X86-32: bar64:
; X86-32: incb %ah
%t0 = lshr i64 %x, 8
@ -23,6 +30,10 @@ define void @bar32(i32 inreg %x, i8* inreg %p) nounwind {
; X86-64: shrl $8, %edi
; X86-64: incb %dil
; WIN64: bar32:
; WIN64: shrl $8, %ecx
; WIN64: incb %cl
; X86-32: bar32:
; X86-32: incb %ah
%t0 = lshr i32 %x, 8
@ -37,6 +48,10 @@ define void @bar16(i16 inreg %x, i8* inreg %p) nounwind {
; X86-64: shrl $8, %edi
; X86-64: incb %dil
; WIN64: bar16:
; WIN64: shrl $8, %ecx
; WIN64: incb %cl
; X86-32: bar16:
; X86-32: incb %ah
%t0 = lshr i16 %x, 8
@ -51,6 +66,9 @@ define i64 @qux64(i64 inreg %x) nounwind {
; X86-64: movq %rdi, %rax
; X86-64: movzbl %ah, %eax
; WIN64: qux64:
; WIN64: movzbl %ch, %eax
; X86-32: qux64:
; X86-32: movzbl %ah, %eax
%t0 = lshr i64 %x, 8
@ -63,6 +81,9 @@ define i32 @qux32(i32 inreg %x) nounwind {
; X86-64: movl %edi, %eax
; X86-64: movzbl %ah, %eax
; WIN64: qux32:
; WIN64: movzbl %ch, %eax
; X86-32: qux32:
; X86-32: movzbl %ah, %eax
%t0 = lshr i32 %x, 8
@ -75,6 +96,9 @@ define i16 @qux16(i16 inreg %x) nounwind {
; X86-64: movl %edi, %eax
; X86-64: movzbl %ah, %eax
; WIN64: qux16:
; WIN64: movzbl %ch, %eax
; X86-32: qux16:
; X86-32: movzbl %ah, %eax
%t0 = lshr i16 %x, 8

View File

@ -1,4 +1,9 @@
; RUN: llc < %s -march=x86-64 -mattr=+sse2,-sse41 | grep {pshufd \$3, %xmm0, %xmm0}
; RUN: llc < %s -mtriple=x86_64-linux -mattr=+sse2,-sse41 | FileCheck %s
; CHECK: pshufd $3, %xmm0, %xmm0
; RUN: llc < %s -mtriple=x86_64-win32 -mattr=+sse2,-sse41 | FileCheck %s -check-prefix=WIN64
; %a is passed indirectly on Win64.
; WIN64: movss 12(%rcx), %xmm0
define float @foo(<8 x float> %a) nounwind {
%c = extractelement <8 x float> %a, i32 3

View File

@ -1,8 +1,13 @@
; RUN: llc < %s -march=x86-64 -mattr=+sse41 -asm-verbose=0 | FileCheck %s
; RUN: llc < %s -mtriple=x86_64-linux -mattr=+sse41 -asm-verbose=0 | FileCheck %s
; RUN: llc < %s -mtriple=x86_64-win32 -mattr=+sse41 -asm-verbose=0 | FileCheck %s -check-prefix=WIN64
define <4 x i32> @test1(<4 x i32> %A, <4 x i32> %B) nounwind {
; CHECK: test1:
; CHECK-NEXT: pmulld
; WIN64: test1:
; WIN64-NEXT: movdqa (%rcx), %xmm0
; WIN64-NEXT: pmulld (%rdx), %xmm0
%C = mul <4 x i32> %A, %B
ret <4 x i32> %C
}
@ -10,6 +15,11 @@ define <4 x i32> @test1(<4 x i32> %A, <4 x i32> %B) nounwind {
define <4 x i32> @test1a(<4 x i32> %A, <4 x i32> *%Bp) nounwind {
; CHECK: test1a:
; CHECK-NEXT: pmulld
; WIN64: test1a:
; WIN64-NEXT: movdqa (%rcx), %xmm0
; WIN64-NEXT: pmulld (%rdx), %xmm0
%B = load <4 x i32>* %Bp
%C = mul <4 x i32> %A, %B
ret <4 x i32> %C

View File

@ -1,4 +1,5 @@
; RUN: llc < %s -march=x86-64 -mcpu=penryn -asm-verbose=0 -o - | FileCheck %s -check-prefix=X64
; RUN: llc < %s -mtriple=x86_64-linux -mcpu=penryn -asm-verbose=0 -o - | FileCheck %s -check-prefix=X64
; RUN: llc < %s -mtriple=x86_64-win32 -mcpu=penryn -asm-verbose=0 -o - | FileCheck %s -check-prefix=W64
; RUN: llc < %s -mcpu=yonah -march=x86 -asm-verbose=0 -o - | FileCheck %s -check-prefix=X32
; PR7518
@ -15,6 +16,13 @@ define void @test1(<2 x float> %Q, float *%P2) nounwind {
; X64-NEXT: movss %xmm1, (%rdi)
; X64-NEXT: ret
; W64: test1:
; W64-NEXT: movdqa (%rcx), %xmm0
; W64-NEXT: pshufd $1, %xmm0, %xmm1
; W64-NEXT: addss %xmm0, %xmm1
; W64-NEXT: movss %xmm1, (%rdx)
; W64-NEXT: ret
; X32: test1:
; X32-NEXT: pshufd $1, %xmm0, %xmm1
; X32-NEXT: addss %xmm0, %xmm1
@ -31,6 +39,14 @@ define <2 x float> @test2(<2 x float> %Q, <2 x float> %R, <2 x float> *%P) nounw
; X64: test2:
; X64-NEXT: addps %xmm1, %xmm0
; X64-NEXT: ret
; W64: test2:
; W64-NEXT: movaps (%rcx), %xmm0
; W64-NEXT: addps (%rdx), %xmm0
; W64-NEXT: ret
; X32: test2:
; X32: addps %xmm1, %xmm0
}
@ -38,17 +54,35 @@ define <2 x float> @test3(<4 x float> %A) nounwind {
%B = shufflevector <4 x float> %A, <4 x float> undef, <2 x i32> <i32 0, i32 1>
%C = fadd <2 x float> %B, %B
ret <2 x float> %C
; CHECK: test3:
; CHECK-NEXT: addps %xmm0, %xmm0
; CHECK-NEXT: ret
; X64: test3:
; X64-NEXT: addps %xmm0, %xmm0
; X64-NEXT: ret
; W64: test3:
; W64-NEXT: movaps (%rcx), %xmm0
; W64-NEXT: addps %xmm0, %xmm0
; W64-NEXT: ret
; X32: test3:
; X32-NEXT: addps %xmm0, %xmm0
; X32-NEXT: ret
}
define <2 x float> @test4(<2 x float> %A) nounwind {
%C = fadd <2 x float> %A, %A
ret <2 x float> %C
; CHECK: test4:
; CHECK-NEXT: addps %xmm0, %xmm0
; CHECK-NEXT: ret
; X64: test4:
; X64-NEXT: addps %xmm0, %xmm0
; X64-NEXT: ret
; W64: test4:
; W64-NEXT: movaps (%rcx), %xmm0
; W64-NEXT: addps %xmm0, %xmm0
; W64-NEXT: ret
; X32: test4:
; X32-NEXT: addps %xmm0, %xmm0
; X32-NEXT: ret
}
define <4 x float> @test5(<4 x float> %A) nounwind {
@ -61,10 +95,21 @@ BB:
%E = shufflevector <2 x float> %D, <2 x float> undef, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
ret <4 x float> %E
; CHECK: _test5:
; CHECK-NEXT: addps %xmm0, %xmm0
; CHECK-NEXT: addps %xmm0, %xmm0
; CHECK-NEXT: ret
; X64: test5:
; X64-NEXT: addps %xmm0, %xmm0
; X64-NEXT: addps %xmm0, %xmm0
; X64-NEXT: ret
; W64: test5:
; W64-NEXT: movaps (%rcx), %xmm0
; W64-NEXT: addps %xmm0, %xmm0
; W64-NEXT: addps %xmm0, %xmm0
; W64-NEXT: ret
; X32: test5:
; X32-NEXT: addps %xmm0, %xmm0
; X32-NEXT: addps %xmm0, %xmm0
; X32-NEXT: ret
}

View File

@ -1,4 +1,5 @@
; RUN: llc < %s -o - -march=x86-64 | FileCheck %s
; RUN: llc < %s -o - -mtriple=x86_64-linux | FileCheck %s
; RUN: llc < %s -o - -mtriple=x86_64-win32 | FileCheck %s -check-prefix=WIN64
; PR4891
; Both loads should happen before either store.
@ -8,6 +9,11 @@
; CHECK: movl %ecx, (%rdi)
; CHECK: movl %eax, (%rsi)
; WIN64: movl (%rcx), %eax
; WIN64: movl (%rdx), %esi
; WIN64: movl %esi, (%rcx)
; WIN64: movl %eax, (%rdx)
define void @short2_int_swap(<2 x i16>* nocapture %b, i32* nocapture %c) nounwind {
entry:
%0 = load <2 x i16>* %b, align 2 ; <<2 x i16>> [#uses=1]