mirror of
https://github.com/RPCSX/llvm.git
synced 2024-12-04 18:06:49 +00:00
8879a17087
rL230225 made the assumption that only the lower 32-bits of an MMX register load is used as a shift value, when in fact the whole 64-bits are reloaded and treated as a i64 to determine the shift value. This patch reverts rL230225 to ensure that the whole 64-bits of memory are folded and ensures that the upper 32-bit are zero'd for cases where the shift value has come from a scalar source. Found during fuzz testing. Differential Revision: https://reviews.llvm.org/D30833 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@297667 91177308-0d34-0410-b5e6-96231b3b80d8
138 lines
4.1 KiB
LLVM
138 lines
4.1 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
; RUN: llc < %s -mtriple=i686-unknown -mattr=+mmx,+sse2 | FileCheck %s --check-prefix=X86
|
|
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+mmx,+sse2 | FileCheck %s --check-prefix=X64
|
|
|
|
define i32 @t0(i64 %x) nounwind {
|
|
; X86-LABEL: t0:
|
|
; X86: # BB#0: # %entry
|
|
; X86-NEXT: pshufw $238, {{[0-9]+}}(%esp), %mm0 # mm0 = mem[2,3,2,3]
|
|
; X86-NEXT: movd %mm0, %eax
|
|
; X86-NEXT: retl
|
|
;
|
|
; X64-LABEL: t0:
|
|
; X64: # BB#0: # %entry
|
|
; X64-NEXT: movd %rdi, %mm0
|
|
; X64-NEXT: pshufw $238, %mm0, %mm0 # mm0 = mm0[2,3,2,3]
|
|
; X64-NEXT: movd %mm0, %eax
|
|
; X64-NEXT: retq
|
|
entry:
|
|
%0 = bitcast i64 %x to <4 x i16>
|
|
%1 = bitcast <4 x i16> %0 to x86_mmx
|
|
%2 = tail call x86_mmx @llvm.x86.sse.pshuf.w(x86_mmx %1, i8 -18)
|
|
%3 = bitcast x86_mmx %2 to <4 x i16>
|
|
%4 = bitcast <4 x i16> %3 to <1 x i64>
|
|
%5 = extractelement <1 x i64> %4, i32 0
|
|
%6 = bitcast i64 %5 to <2 x i32>
|
|
%7 = extractelement <2 x i32> %6, i32 0
|
|
ret i32 %7
|
|
}
|
|
|
|
define i64 @t1(i64 %x, i32 %n) nounwind {
|
|
; X86-LABEL: t1:
|
|
; X86: # BB#0: # %entry
|
|
; X86-NEXT: pushl %ebp
|
|
; X86-NEXT: movl %esp, %ebp
|
|
; X86-NEXT: andl $-8, %esp
|
|
; X86-NEXT: subl $8, %esp
|
|
; X86-NEXT: movd 16(%ebp), %mm0
|
|
; X86-NEXT: movq 8(%ebp), %mm1
|
|
; X86-NEXT: psllq %mm0, %mm1
|
|
; X86-NEXT: movq %mm1, (%esp)
|
|
; X86-NEXT: movl (%esp), %eax
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
|
|
; X86-NEXT: movl %ebp, %esp
|
|
; X86-NEXT: popl %ebp
|
|
; X86-NEXT: retl
|
|
;
|
|
; X64-LABEL: t1:
|
|
; X64: # BB#0: # %entry
|
|
; X64-NEXT: movd %esi, %mm0
|
|
; X64-NEXT: movd %rdi, %mm1
|
|
; X64-NEXT: psllq %mm0, %mm1
|
|
; X64-NEXT: movd %mm1, %rax
|
|
; X64-NEXT: retq
|
|
entry:
|
|
%0 = bitcast i64 %x to x86_mmx
|
|
%1 = tail call x86_mmx @llvm.x86.mmx.pslli.q(x86_mmx %0, i32 %n)
|
|
%2 = bitcast x86_mmx %1 to i64
|
|
ret i64 %2
|
|
}
|
|
|
|
define i64 @t2(i64 %x, i32 %n, i32 %w) nounwind {
|
|
; X86-LABEL: t2:
|
|
; X86: # BB#0: # %entry
|
|
; X86-NEXT: pushl %ebp
|
|
; X86-NEXT: movl %esp, %ebp
|
|
; X86-NEXT: andl $-8, %esp
|
|
; X86-NEXT: subl $8, %esp
|
|
; X86-NEXT: movd 16(%ebp), %mm0
|
|
; X86-NEXT: movd 20(%ebp), %mm1
|
|
; X86-NEXT: psllq %mm0, %mm1
|
|
; X86-NEXT: por 8(%ebp), %mm1
|
|
; X86-NEXT: movq %mm1, (%esp)
|
|
; X86-NEXT: movl (%esp), %eax
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
|
|
; X86-NEXT: movl %ebp, %esp
|
|
; X86-NEXT: popl %ebp
|
|
; X86-NEXT: retl
|
|
;
|
|
; X64-LABEL: t2:
|
|
; X64: # BB#0: # %entry
|
|
; X64-NEXT: movd %esi, %mm0
|
|
; X64-NEXT: movd %edx, %mm1
|
|
; X64-NEXT: psllq %mm0, %mm1
|
|
; X64-NEXT: movd %rdi, %mm0
|
|
; X64-NEXT: por %mm1, %mm0
|
|
; X64-NEXT: movd %mm0, %rax
|
|
; X64-NEXT: retq
|
|
entry:
|
|
%0 = insertelement <2 x i32> undef, i32 %w, i32 0
|
|
%1 = insertelement <2 x i32> %0, i32 0, i32 1
|
|
%2 = bitcast <2 x i32> %1 to x86_mmx
|
|
%3 = tail call x86_mmx @llvm.x86.mmx.pslli.q(x86_mmx %2, i32 %n)
|
|
%4 = bitcast i64 %x to x86_mmx
|
|
%5 = tail call x86_mmx @llvm.x86.mmx.por(x86_mmx %4, x86_mmx %3)
|
|
%6 = bitcast x86_mmx %5 to i64
|
|
ret i64 %6
|
|
}
|
|
|
|
define i64 @t3(<1 x i64>* %y, i32* %n) nounwind {
|
|
; X86-LABEL: t3:
|
|
; X86: # BB#0: # %entry
|
|
; X86-NEXT: pushl %ebp
|
|
; X86-NEXT: movl %esp, %ebp
|
|
; X86-NEXT: andl $-8, %esp
|
|
; X86-NEXT: subl $8, %esp
|
|
; X86-NEXT: movl 12(%ebp), %eax
|
|
; X86-NEXT: movl 8(%ebp), %ecx
|
|
; X86-NEXT: movq (%ecx), %mm0
|
|
; X86-NEXT: movd (%eax), %mm1
|
|
; X86-NEXT: psllq %mm1, %mm0
|
|
; X86-NEXT: movq %mm0, (%esp)
|
|
; X86-NEXT: movl (%esp), %eax
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
|
|
; X86-NEXT: movl %ebp, %esp
|
|
; X86-NEXT: popl %ebp
|
|
; X86-NEXT: retl
|
|
;
|
|
; X64-LABEL: t3:
|
|
; X64: # BB#0: # %entry
|
|
; X64-NEXT: movq (%rdi), %mm0
|
|
; X64-NEXT: movd (%rsi), %mm1
|
|
; X64-NEXT: psllq %mm1, %mm0
|
|
; X64-NEXT: movd %mm0, %rax
|
|
; X64-NEXT: retq
|
|
entry:
|
|
%0 = bitcast <1 x i64>* %y to x86_mmx*
|
|
%1 = load x86_mmx, x86_mmx* %0, align 8
|
|
%2 = load i32, i32* %n, align 4
|
|
%3 = tail call x86_mmx @llvm.x86.mmx.pslli.q(x86_mmx %1, i32 %2)
|
|
%4 = bitcast x86_mmx %3 to i64
|
|
ret i64 %4
|
|
}
|
|
|
|
declare x86_mmx @llvm.x86.sse.pshuf.w(x86_mmx, i8)
|
|
declare x86_mmx @llvm.x86.mmx.pslli.q(x86_mmx, i32)
|
|
declare x86_mmx @llvm.x86.mmx.por(x86_mmx, x86_mmx)
|
|
|