From 0613751dcb3f77730567146d4364785a7e5c06a9 Mon Sep 17 00:00:00 2001 From: Chandler Carruth Date: Sun, 15 Feb 2015 00:08:01 +0000 Subject: [PATCH] [x86] Teach my test updating script about another quirk of the printed asm and port the mmx vector shuffle test to it. Not thrilled with how it handles the stack manipulation logic, but I'm much less bothered by that than I am by updating the test manually. =] If anyone wants to teach the test checks management script about stack adjustment patterns, that'd be cool too. llvm-svn: 229268 --- llvm/test/CodeGen/X86/vector-shuffle-mmx.ll | 77 +++++++++++++++++---- llvm/utils/update_llc_test_checks.py | 2 +- 2 files changed, 65 insertions(+), 14 deletions(-) diff --git a/llvm/test/CodeGen/X86/vector-shuffle-mmx.ll b/llvm/test/CodeGen/X86/vector-shuffle-mmx.ll index 1d04bc06ffe5..6d1be9149254 100644 --- a/llvm/test/CodeGen/X86/vector-shuffle-mmx.ll +++ b/llvm/test/CodeGen/X86/vector-shuffle-mmx.ll @@ -1,15 +1,23 @@ -; RUN: llc < %s -mtriple=i686-darwin -mattr=+mmx,+sse2 | FileCheck -check-prefix=X32 %s -; RUN: llc < %s -mtriple=x86_64-darwin -mattr=+mmx,+sse2 | FileCheck -check-prefix=X64 %s +; RUN: llc < %s -mtriple=i686-darwin -mattr=+mmx,+sse2 | FileCheck --check-prefix=X32 %s +; RUN: llc < %s -mtriple=x86_64-darwin -mattr=+mmx,+sse2 | FileCheck --check-prefix=X64 %s ; If there is no explicit MMX type usage, always promote to XMM. define void @test0(<1 x i64>* %x) { -; X32-LABEL: test0 -; X64-LABEL: test0 -; X32: pshufd $213 -; X64: pshufd $213 -; X32-NEXT: movlpd %xmm -; X64-NEXT: movq %xmm +; X32-LABEL: test0: +; X32: ## BB#0: ## %entry +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero +; X32-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,3] +; X32-NEXT: movlpd %xmm0, (%eax) +; X32-NEXT: retl +; +; X64-LABEL: test0: +; X64: ## BB#0: ## %entry +; X64-NEXT: movq {{.*#+}} xmm0 = mem[0],zero +; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,3] +; X64-NEXT: movq %xmm0, (%rdi) +; X64-NEXT: retq entry: %tmp2 = load <1 x i64>* %x %tmp6 = bitcast <1 x i64> %tmp2 to <2 x i32> @@ -21,10 +29,42 @@ entry: define void @test1() { ; X32-LABEL: test1: -; X32: pshuflw -; X32-NEXT: pshufhw -; X32-NEXT: pshufd -; X32: maskmovq +; X32: ## BB#0: ## %entry +; X32-NEXT: pushl %edi +; X32-NEXT: Ltmp0: +; X32-NEXT: .cfi_def_cfa_offset 8 +; X32-NEXT: subl $16, %esp +; X32-NEXT: Ltmp1: +; X32-NEXT: .cfi_def_cfa_offset 24 +; X32-NEXT: Ltmp2: +; X32-NEXT: .cfi_offset %edi, -8 +; X32-NEXT: xorpd %xmm0, %xmm0 +; X32-NEXT: movlpd %xmm0, (%esp) +; X32-NEXT: movq (%esp), %mm0 +; X32-NEXT: pshuflw {{.*#+}} xmm0 = mem[0,2,2,3,4,5,6,7] +; X32-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7] +; X32-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] +; X32-NEXT: movlpd %xmm0, {{[0-9]+}}(%esp) +; X32-NEXT: movq {{[0-9]+}}(%esp), %mm1 +; X32-NEXT: xorl %edi, %edi +; X32-NEXT: maskmovq %mm1, %mm0 +; X32-NEXT: addl $16, %esp +; X32-NEXT: popl %edi +; X32-NEXT: retl +; +; X64-LABEL: test1: +; X64: ## BB#0: ## %entry +; X64-NEXT: xorps %xmm0, %xmm0 +; X64-NEXT: movq %xmm0, -{{[0-9]+}}(%rsp) +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %mm0 +; X64-NEXT: pshuflw {{.*#+}} xmm0 = mem[0,2,2,3,4,5,6,7] +; X64-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7] +; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] +; X64-NEXT: movq %xmm0, -{{[0-9]+}}(%rsp) +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %mm1 +; X64-NEXT: xorl %edi, %edi +; X64-NEXT: maskmovq %mm1, %mm0 +; X64-NEXT: retq entry: %tmp528 = bitcast <8 x i8> zeroinitializer to <2 x i32> %tmp529 = and <2 x i32> %tmp528, bitcast (<4 x i16> < i16 -32640, i16 16448, i16 8224, i16 4112 > to <2 x i32>) @@ -41,10 +81,21 @@ entry: define void @test2() nounwind { ; X32-LABEL: test2: -; X32: movsd +; X32: ## BB#0: ## %entry +; X32-NEXT: movl L_tmp_V2i$non_lazy_ptr, %eax +; X32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; X32-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0,0] ; X32-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] ; X32-NEXT: movlpd %xmm0, (%eax) +; X32-NEXT: retl +; +; X64-LABEL: test2: +; X64: ## BB#0: ## %entry +; X64-NEXT: movq _tmp_V2i@{{.*}}(%rip), %rax +; X64-NEXT: movq {{.*#+}} xmm0 = mem[0],zero +; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,1] +; X64-NEXT: movq %xmm0, (%rax) +; X64-NEXT: retq entry: %0 = load <2 x i32>* @tmp_V2i, align 8 %1 = shufflevector <2 x i32> %0, <2 x i32> undef, <2 x i32> zeroinitializer diff --git a/llvm/utils/update_llc_test_checks.py b/llvm/utils/update_llc_test_checks.py index cb1ae09dedbe..df01d8973c46 100755 --- a/llvm/utils/update_llc_test_checks.py +++ b/llvm/utils/update_llc_test_checks.py @@ -69,7 +69,7 @@ def main(): asm_function_re = re.compile( r'^_?(?P[^:]+):[ \t]*#+[ \t]*@(?P=f)\n[^:]*?' r'(?P^##?[ \t]+[^:]+:.*?)\s*' - r'^\s*(?:[^:\n]+?:\s*\n\s*\.size|\.cfi_endproc|\.globl|\.(?:sub)?section)', + r'^\s*(?:[^:\n]+?:\s*\n\s*\.size|\.cfi_endproc|\.globl|\.comm|\.(?:sub)?section)', flags=(re.M | re.S)) check_prefix_re = re.compile('--check-prefix=(\S+)') check_re = re.compile(r'^\s*;\s*([^:]+?)(?:-NEXT|-NOT|-DAG|-LABEL)?:')