mirror of
https://github.com/RPCS3/llvm.git
synced 2024-12-13 14:47:00 +00:00
49c9300c28
As discussed in D11886, this patch moves the SSE/AVX vector blend folding to instcombiner from PerformINTRINSIC_WO_CHAINCombine (which allows us to remove this completely). InstCombiner already had partial support for this, I just had to add support for zero (ConstantAggregateZero) masks and also the case where both selection inputs were the same (allowing us to ignore the mask). I also moved all the relevant combine tests into InstCombine/blend_x86.ll Differential Revision: http://reviews.llvm.org/D11934 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@244723 91177308-0d34-0410-b5e6-96231b3b80d8
91 lines
2.6 KiB
LLVM
91 lines
2.6 KiB
LLVM
; RUN: llc < %s -march=x86-64 -mcpu=core-avx2 | FileCheck %s
|
|
|
|
; Verify that the backend correctly combines AVX2 builtin intrinsics.
|
|
|
|
|
|
define <16 x i16> @test_x86_avx2_pblendw(<16 x i16> %a0) {
|
|
%res = call <16 x i16> @llvm.x86.avx2.pblendw(<16 x i16> %a0, <16 x i16> %a0, i32 7)
|
|
ret <16 x i16> %res
|
|
}
|
|
; CHECK-LABEL: test_x86_avx2_pblendw
|
|
; CHECK-NOT: vpblendw
|
|
; CHECK: ret
|
|
|
|
|
|
define <4 x i32> @test_x86_avx2_pblendd_128(<4 x i32> %a0) {
|
|
%res = call <4 x i32> @llvm.x86.avx2.pblendd.128(<4 x i32> %a0, <4 x i32> %a0, i32 7)
|
|
ret <4 x i32> %res
|
|
}
|
|
; CHECK-LABEL: test_x86_avx2_pblendd_128
|
|
; CHECK-NOT: vpblendd
|
|
; CHECK: ret
|
|
|
|
|
|
define <8 x i32> @test_x86_avx2_pblendd_256(<8 x i32> %a0) {
|
|
%res = call <8 x i32> @llvm.x86.avx2.pblendd.256(<8 x i32> %a0, <8 x i32> %a0, i32 7)
|
|
ret <8 x i32> %res
|
|
}
|
|
; CHECK-LABEL: test_x86_avx2_pblendd_256
|
|
; CHECK-NOT: vpblendd
|
|
; CHECK: ret
|
|
|
|
|
|
define <16 x i16> @test2_x86_avx2_pblendw(<16 x i16> %a0, <16 x i16> %a1) {
|
|
%res = call <16 x i16> @llvm.x86.avx2.pblendw(<16 x i16> %a0, <16 x i16> %a1, i32 0)
|
|
ret <16 x i16> %res
|
|
}
|
|
; CHECK-LABEL: test2_x86_avx2_pblendw
|
|
; CHECK-NOT: vpblendw
|
|
; CHECK: ret
|
|
|
|
|
|
define <4 x i32> @test2_x86_avx2_pblendd_128(<4 x i32> %a0, <4 x i32> %a1) {
|
|
%res = call <4 x i32> @llvm.x86.avx2.pblendd.128(<4 x i32> %a0, <4 x i32> %a1, i32 0)
|
|
ret <4 x i32> %res
|
|
}
|
|
; CHECK-LABEL: test2_x86_avx2_pblendd_128
|
|
; CHECK-NOT: vpblendd
|
|
; CHECK: ret
|
|
|
|
|
|
define <8 x i32> @test2_x86_avx2_pblendd_256(<8 x i32> %a0, <8 x i32> %a1) {
|
|
%res = call <8 x i32> @llvm.x86.avx2.pblendd.256(<8 x i32> %a0, <8 x i32> %a1, i32 0)
|
|
ret <8 x i32> %res
|
|
}
|
|
; CHECK-LABEL: test2_x86_avx2_pblendd_256
|
|
; CHECK-NOT: vpblendd
|
|
; CHECK: ret
|
|
|
|
|
|
define <16 x i16> @test3_x86_avx2_pblendw(<16 x i16> %a0, <16 x i16> %a1) {
|
|
%res = call <16 x i16> @llvm.x86.avx2.pblendw(<16 x i16> %a0, <16 x i16> %a1, i32 -1)
|
|
ret <16 x i16> %res
|
|
}
|
|
; CHECK-LABEL: test3_x86_avx2_pblendw
|
|
; CHECK-NOT: vpblendw
|
|
; CHECK: ret
|
|
|
|
|
|
define <4 x i32> @test3_x86_avx2_pblendd_128(<4 x i32> %a0, <4 x i32> %a1) {
|
|
%res = call <4 x i32> @llvm.x86.avx2.pblendd.128(<4 x i32> %a0, <4 x i32> %a1, i32 -1)
|
|
ret <4 x i32> %res
|
|
}
|
|
; CHECK-LABEL: test3_x86_avx2_pblendd_128
|
|
; CHECK-NOT: vpblendd
|
|
; CHECK: ret
|
|
|
|
|
|
define <8 x i32> @test3_x86_avx2_pblendd_256(<8 x i32> %a0, <8 x i32> %a1) {
|
|
%res = call <8 x i32> @llvm.x86.avx2.pblendd.256(<8 x i32> %a0, <8 x i32> %a1, i32 -1)
|
|
ret <8 x i32> %res
|
|
}
|
|
; CHECK-LABEL: test3_x86_avx2_pblendd_256
|
|
; CHECK-NOT: vpblendd
|
|
; CHECK: ret
|
|
|
|
|
|
declare <16 x i16> @llvm.x86.avx2.pblendw(<16 x i16>, <16 x i16>, i32)
|
|
declare <4 x i32> @llvm.x86.avx2.pblendd.128(<4 x i32>, <4 x i32>, i32)
|
|
declare <8 x i32> @llvm.x86.avx2.pblendd.256(<8 x i32>, <8 x i32>, i32)
|
|
|