mirror of
https://github.com/RPCS3/llvm.git
synced 2025-02-12 13:48:45 +00:00
AMD family 17h (znver1) scheduler model update.
Summary: This patch enables the following: 1) Regex based Instruction itineraries for integer instructions. 2) The instructions are grouped as per the nature of the instructions (move, arithmetic, logic, Misc, Control Transfer). 3) FP instructions and their itineraries are added which includes values for SSE4A, BMI, BMI2 and SHA instructions. Patch by Ganesh Gopalasubramanian Reviewers: RKSimon, craig.topper Subscribers: vprasad, shivaram, ddibyend, andreadb, javed.absar, llvm-commits Differential Revision: https://reviews.llvm.org/D36617 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@312237 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
4387849902
commit
1914cbfcb6
File diff suppressed because it is too large
Load Diff
@ -50,7 +50,7 @@ define <2 x i64> @test_aesdec(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> *%a2) {
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vaesdec %xmm1, %xmm0, %xmm0 # sched: [4:0.50]
|
||||
; ZNVER1-NEXT: vaesdec (%rdi), %xmm0, %xmm0 # sched: [11:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = load <2 x i64>, <2 x i64> *%a2, align 16
|
||||
%2 = call <2 x i64> @llvm.x86.aesni.aesdec(<2 x i64> %a0, <2 x i64> %a1)
|
||||
%3 = call <2 x i64> @llvm.x86.aesni.aesdec(<2 x i64> %2, <2 x i64> %1)
|
||||
@ -99,7 +99,7 @@ define <2 x i64> @test_aesdeclast(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> *%a2)
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vaesdeclast %xmm1, %xmm0, %xmm0 # sched: [4:0.50]
|
||||
; ZNVER1-NEXT: vaesdeclast (%rdi), %xmm0, %xmm0 # sched: [11:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = load <2 x i64>, <2 x i64> *%a2, align 16
|
||||
%2 = call <2 x i64> @llvm.x86.aesni.aesdeclast(<2 x i64> %a0, <2 x i64> %a1)
|
||||
%3 = call <2 x i64> @llvm.x86.aesni.aesdeclast(<2 x i64> %2, <2 x i64> %1)
|
||||
@ -148,7 +148,7 @@ define <2 x i64> @test_aesenc(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> *%a2) {
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vaesenc %xmm1, %xmm0, %xmm0 # sched: [4:0.50]
|
||||
; ZNVER1-NEXT: vaesenc (%rdi), %xmm0, %xmm0 # sched: [11:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = load <2 x i64>, <2 x i64> *%a2, align 16
|
||||
%2 = call <2 x i64> @llvm.x86.aesni.aesenc(<2 x i64> %a0, <2 x i64> %a1)
|
||||
%3 = call <2 x i64> @llvm.x86.aesni.aesenc(<2 x i64> %2, <2 x i64> %1)
|
||||
@ -197,7 +197,7 @@ define <2 x i64> @test_aesenclast(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> *%a2)
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vaesenclast %xmm1, %xmm0, %xmm0 # sched: [4:0.50]
|
||||
; ZNVER1-NEXT: vaesenclast (%rdi), %xmm0, %xmm0 # sched: [11:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = load <2 x i64>, <2 x i64> *%a2, align 16
|
||||
%2 = call <2 x i64> @llvm.x86.aesni.aesenclast(<2 x i64> %a0, <2 x i64> %a1)
|
||||
%3 = call <2 x i64> @llvm.x86.aesni.aesenclast(<2 x i64> %2, <2 x i64> %1)
|
||||
@ -253,7 +253,7 @@ define <2 x i64> @test_aesimc(<2 x i64> %a0, <2 x i64> *%a1) {
|
||||
; ZNVER1-NEXT: vaesimc (%rdi), %xmm1 # sched: [11:0.50]
|
||||
; ZNVER1-NEXT: vaesimc %xmm0, %xmm0 # sched: [4:0.50]
|
||||
; ZNVER1-NEXT: vpor %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = load <2 x i64>, <2 x i64> *%a1, align 16
|
||||
%2 = call <2 x i64> @llvm.x86.aesni.aesimc(<2 x i64> %a0)
|
||||
%3 = call <2 x i64> @llvm.x86.aesni.aesimc(<2 x i64> %1)
|
||||
@ -310,7 +310,7 @@ define <2 x i64> @test_aeskeygenassist(<2 x i64> %a0, <2 x i64> *%a1) {
|
||||
; ZNVER1-NEXT: vaeskeygenassist $7, (%rdi), %xmm1 # sched: [11:0.50]
|
||||
; ZNVER1-NEXT: vaeskeygenassist $7, %xmm0, %xmm0 # sched: [4:0.50]
|
||||
; ZNVER1-NEXT: vpor %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = load <2 x i64>, <2 x i64> *%a1, align 16
|
||||
%2 = call <2 x i64> @llvm.x86.aesni.aeskeygenassist(<2 x i64> %a0, i8 7)
|
||||
%3 = call <2 x i64> @llvm.x86.aesni.aeskeygenassist(<2 x i64> %1, i8 7)
|
||||
|
@ -42,7 +42,7 @@ define <4 x double> @test_addpd(<4 x double> %a0, <4 x double> %a1, <4 x double>
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
|
||||
; ZNVER1-NEXT: vaddpd (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = fadd <4 x double> %a0, %a1
|
||||
%2 = load <4 x double>, <4 x double> *%a2, align 32
|
||||
%3 = fadd <4 x double> %1, %2
|
||||
@ -84,7 +84,7 @@ define <8 x float> @test_addps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
|
||||
; ZNVER1-NEXT: vaddps (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = fadd <8 x float> %a0, %a1
|
||||
%2 = load <8 x float>, <8 x float> *%a2, align 32
|
||||
%3 = fadd <8 x float> %1, %2
|
||||
@ -126,7 +126,7 @@ define <4 x double> @test_addsubpd(<4 x double> %a0, <4 x double> %a1, <4 x doub
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vaddsubpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
|
||||
; ZNVER1-NEXT: vaddsubpd (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = call <4 x double> @llvm.x86.avx.addsub.pd.256(<4 x double> %a0, <4 x double> %a1)
|
||||
%2 = load <4 x double>, <4 x double> *%a2, align 32
|
||||
%3 = call <4 x double> @llvm.x86.avx.addsub.pd.256(<4 x double> %1, <4 x double> %2)
|
||||
@ -169,7 +169,7 @@ define <8 x float> @test_addsubps(<8 x float> %a0, <8 x float> %a1, <8 x float>
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vaddsubps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
|
||||
; ZNVER1-NEXT: vaddsubps (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = call <8 x float> @llvm.x86.avx.addsub.ps.256(<8 x float> %a0, <8 x float> %a1)
|
||||
%2 = load <8 x float>, <8 x float> *%a2, align 32
|
||||
%3 = call <8 x float> @llvm.x86.avx.addsub.ps.256(<8 x float> %1, <8 x float> %2)
|
||||
@ -218,7 +218,7 @@ define <4 x double> @test_andnotpd(<4 x double> %a0, <4 x double> %a1, <4 x doub
|
||||
; ZNVER1-NEXT: vandnpd %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: vandnpd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
|
||||
; ZNVER1-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = bitcast <4 x double> %a0 to <4 x i64>
|
||||
%2 = bitcast <4 x double> %a1 to <4 x i64>
|
||||
%3 = xor <4 x i64> %1, <i64 -1, i64 -1, i64 -1, i64 -1>
|
||||
@ -273,7 +273,7 @@ define <8 x float> @test_andnotps(<8 x float> %a0, <8 x float> %a1, <8 x float>
|
||||
; ZNVER1-NEXT: vandnps %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: vandnps (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
|
||||
; ZNVER1-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = bitcast <8 x float> %a0 to <4 x i64>
|
||||
%2 = bitcast <8 x float> %a1 to <4 x i64>
|
||||
%3 = xor <4 x i64> %1, <i64 -1, i64 -1, i64 -1, i64 -1>
|
||||
@ -328,7 +328,7 @@ define <4 x double> @test_andpd(<4 x double> %a0, <4 x double> %a1, <4 x double>
|
||||
; ZNVER1-NEXT: vandpd %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: vandpd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
|
||||
; ZNVER1-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = bitcast <4 x double> %a0 to <4 x i64>
|
||||
%2 = bitcast <4 x double> %a1 to <4 x i64>
|
||||
%3 = and <4 x i64> %1, %2
|
||||
@ -381,7 +381,7 @@ define <8 x float> @test_andps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a
|
||||
; ZNVER1-NEXT: vandps %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: vandps (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
|
||||
; ZNVER1-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = bitcast <8 x float> %a0 to <4 x i64>
|
||||
%2 = bitcast <8 x float> %a1 to <4 x i64>
|
||||
%3 = and <4 x i64> %1, %2
|
||||
@ -434,7 +434,7 @@ define <4 x double> @test_blendpd(<4 x double> %a0, <4 x double> %a1, <4 x doubl
|
||||
; ZNVER1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2],ymm0[3] sched: [1:0.50]
|
||||
; ZNVER1-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
|
||||
; ZNVER1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],mem[1,2],ymm0[3] sched: [8:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = shufflevector <4 x double> %a0, <4 x double> %a1, <4 x i32> <i32 0, i32 5, i32 6, i32 3>
|
||||
%2 = load <4 x double>, <4 x double> *%a2, align 32
|
||||
%3 = fadd <4 x double> %a1, %1
|
||||
@ -477,7 +477,7 @@ define <8 x float> @test_blendps(<8 x float> %a0, <8 x float> %a1, <8 x float> *
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2],ymm0[3,4,5,6,7] sched: [1:0.50]
|
||||
; ZNVER1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2],ymm0[3],mem[4,5,6],ymm0[7] sched: [8:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = shufflevector <8 x float> %a0, <8 x float> %a1, <8 x i32> <i32 0, i32 9, i32 10, i32 3, i32 4, i32 5, i32 6, i32 7>
|
||||
%2 = load <8 x float>, <8 x float> *%a2, align 32
|
||||
%3 = shufflevector <8 x float> %1, <8 x float> %2, <8 x i32> <i32 0, i32 1, i32 10, i32 3, i32 12, i32 13, i32 14, i32 7>
|
||||
@ -519,7 +519,7 @@ define <4 x double> @test_blendvpd(<4 x double> %a0, <4 x double> %a1, <4 x doub
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vblendvpd %ymm2, %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
|
||||
; ZNVER1-NEXT: vblendvpd %ymm2, (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = call <4 x double> @llvm.x86.avx.blendv.pd.256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2)
|
||||
%2 = load <4 x double>, <4 x double> *%a3, align 32
|
||||
%3 = call <4 x double> @llvm.x86.avx.blendv.pd.256(<4 x double> %1, <4 x double> %2, <4 x double> %a2)
|
||||
@ -562,7 +562,7 @@ define <8 x float> @test_blendvps(<8 x float> %a0, <8 x float> %a1, <8 x float>
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vblendvps %ymm2, %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
|
||||
; ZNVER1-NEXT: vblendvps %ymm2, (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = call <8 x float> @llvm.x86.avx.blendv.ps.256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2)
|
||||
%2 = load <8 x float>, <8 x float> *%a3, align 32
|
||||
%3 = call <8 x float> @llvm.x86.avx.blendv.ps.256(<8 x float> %1, <8 x float> %2, <8 x float> %a2)
|
||||
@ -599,7 +599,7 @@ define <8 x float> @test_broadcastf128(<4 x float> *%a0) {
|
||||
; ZNVER1-LABEL: test_broadcastf128:
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1] sched: [8:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = load <4 x float>, <4 x float> *%a0, align 32
|
||||
%2 = shufflevector <4 x float> %1, <4 x float> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
|
||||
ret <8 x float> %2
|
||||
@ -634,7 +634,7 @@ define <4 x double> @test_broadcastsd_ymm(double *%a0) {
|
||||
; ZNVER1-LABEL: test_broadcastsd_ymm:
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vbroadcastsd (%rdi), %ymm0 # sched: [8:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = load double, double *%a0, align 8
|
||||
%2 = insertelement <4 x double> undef, double %1, i32 0
|
||||
%3 = shufflevector <4 x double> %2, <4 x double> undef, <4 x i32> zeroinitializer
|
||||
@ -670,7 +670,7 @@ define <4 x float> @test_broadcastss(float *%a0) {
|
||||
; ZNVER1-LABEL: test_broadcastss:
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vbroadcastss (%rdi), %xmm0 # sched: [8:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = load float, float *%a0, align 4
|
||||
%2 = insertelement <4 x float> undef, float %1, i32 0
|
||||
%3 = shufflevector <4 x float> %2, <4 x float> undef, <4 x i32> zeroinitializer
|
||||
@ -706,7 +706,7 @@ define <8 x float> @test_broadcastss_ymm(float *%a0) {
|
||||
; ZNVER1-LABEL: test_broadcastss_ymm:
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vbroadcastss (%rdi), %ymm0 # sched: [8:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = load float, float *%a0, align 4
|
||||
%2 = insertelement <8 x float> undef, float %1, i32 0
|
||||
%3 = shufflevector <8 x float> %2, <8 x float> undef, <8 x i32> zeroinitializer
|
||||
@ -754,7 +754,7 @@ define <4 x double> @test_cmppd(<4 x double> %a0, <4 x double> %a1, <4 x double>
|
||||
; ZNVER1-NEXT: vcmpeqpd %ymm1, %ymm0, %ymm1 # sched: [3:1.00]
|
||||
; ZNVER1-NEXT: vcmpeqpd (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
|
||||
; ZNVER1-NEXT: vorpd %ymm0, %ymm1, %ymm0 # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = fcmp oeq <4 x double> %a0, %a1
|
||||
%2 = load <4 x double>, <4 x double> *%a2, align 32
|
||||
%3 = fcmp oeq <4 x double> %a0, %2
|
||||
@ -806,7 +806,7 @@ define <8 x float> @test_cmpps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a
|
||||
; ZNVER1-NEXT: vcmpeqps %ymm1, %ymm0, %ymm1 # sched: [3:1.00]
|
||||
; ZNVER1-NEXT: vcmpeqps (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
|
||||
; ZNVER1-NEXT: vorps %ymm0, %ymm1, %ymm0 # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = fcmp oeq <8 x float> %a0, %a1
|
||||
%2 = load <8 x float>, <8 x float> *%a2, align 32
|
||||
%3 = fcmp oeq <8 x float> %a0, %2
|
||||
@ -858,7 +858,7 @@ define <4 x double> @test_cvtdq2pd(<4 x i32> %a0, <4 x i32> *%a1) {
|
||||
; ZNVER1-NEXT: vcvtdq2pd (%rdi), %ymm1 # sched: [12:1.00]
|
||||
; ZNVER1-NEXT: vcvtdq2pd %xmm0, %ymm0 # sched: [5:1.00]
|
||||
; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = sitofp <4 x i32> %a0 to <4 x double>
|
||||
%2 = load <4 x i32>, <4 x i32> *%a1, align 16
|
||||
%3 = sitofp <4 x i32> %2 to <4 x double>
|
||||
@ -909,7 +909,7 @@ define <8 x float> @test_cvtdq2ps(<8 x i32> %a0, <8 x i32> *%a1) {
|
||||
; ZNVER1-NEXT: vcvtdq2ps (%rdi), %ymm1 # sched: [12:1.00]
|
||||
; ZNVER1-NEXT: vcvtdq2ps %ymm0, %ymm0 # sched: [5:1.00]
|
||||
; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = sitofp <8 x i32> %a0 to <8 x float>
|
||||
%2 = load <8 x i32>, <8 x i32> *%a1, align 16
|
||||
%3 = sitofp <8 x i32> %2 to <8 x float>
|
||||
@ -957,8 +957,8 @@ define <8 x i32> @test_cvtpd2dq(<4 x double> %a0, <4 x double> *%a1) {
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vcvttpd2dqy (%rdi), %xmm1 # sched: [12:1.00]
|
||||
; ZNVER1-NEXT: vcvttpd2dq %ymm0, %xmm0 # sched: [5:1.00]
|
||||
; ZNVER1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 # sched: [1:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 # sched: [2:0.67]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = fptosi <4 x double> %a0 to <4 x i32>
|
||||
%2 = load <4 x double>, <4 x double> *%a1, align 32
|
||||
%3 = fptosi <4 x double> %2 to <4 x i32>
|
||||
@ -1004,10 +1004,10 @@ define <8 x float> @test_cvtpd2ps(<4 x double> %a0, <4 x double> *%a1) {
|
||||
;
|
||||
; ZNVER1-LABEL: test_cvtpd2ps:
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vcvtpd2psy (%rdi), %xmm1 # sched: [12:1.00]
|
||||
; ZNVER1-NEXT: vcvtpd2psy (%rdi), %xmm1 # sched: [11:1.00]
|
||||
; ZNVER1-NEXT: vcvtpd2ps %ymm0, %xmm0 # sched: [5:1.00]
|
||||
; ZNVER1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 # sched: [1:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 # sched: [2:0.67]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = fptrunc <4 x double> %a0 to <4 x float>
|
||||
%2 = load <4 x double>, <4 x double> *%a1, align 32
|
||||
%3 = fptrunc <4 x double> %2 to <4 x float>
|
||||
@ -1056,7 +1056,7 @@ define <8 x i32> @test_cvtps2dq(<8 x float> %a0, <8 x float> *%a1) {
|
||||
; ZNVER1-NEXT: vcvttps2dq (%rdi), %ymm1 # sched: [12:1.00]
|
||||
; ZNVER1-NEXT: vcvttps2dq %ymm0, %ymm0 # sched: [5:1.00]
|
||||
; ZNVER1-NEXT: vorps %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = fptosi <8 x float> %a0 to <8 x i32>
|
||||
%2 = load <8 x float>, <8 x float> *%a1, align 32
|
||||
%3 = fptosi <8 x float> %2 to <8 x i32>
|
||||
@ -1097,9 +1097,9 @@ define <4 x double> @test_divpd(<4 x double> %a0, <4 x double> %a1, <4 x double>
|
||||
;
|
||||
; ZNVER1-LABEL: test_divpd:
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vdivpd %ymm1, %ymm0, %ymm0 # sched: [15:1.00]
|
||||
; ZNVER1-NEXT: vdivpd (%rdi), %ymm0, %ymm0 # sched: [22:1.00]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: vdivpd %ymm1, %ymm0, %ymm0 # sched: [15:15.00]
|
||||
; ZNVER1-NEXT: vdivpd (%rdi), %ymm0, %ymm0 # sched: [22:22.00]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = fdiv <4 x double> %a0, %a1
|
||||
%2 = load <4 x double>, <4 x double> *%a2, align 32
|
||||
%3 = fdiv <4 x double> %1, %2
|
||||
@ -1139,9 +1139,9 @@ define <8 x float> @test_divps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a
|
||||
;
|
||||
; ZNVER1-LABEL: test_divps:
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vdivps %ymm1, %ymm0, %ymm0 # sched: [15:1.00]
|
||||
; ZNVER1-NEXT: vdivps (%rdi), %ymm0, %ymm0 # sched: [22:1.00]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: vdivps %ymm1, %ymm0, %ymm0 # sched: [12:12.00]
|
||||
; ZNVER1-NEXT: vdivps (%rdi), %ymm0, %ymm0 # sched: [19:19.00]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = fdiv <8 x float> %a0, %a1
|
||||
%2 = load <8 x float>, <8 x float> *%a2, align 32
|
||||
%3 = fdiv <8 x float> %1, %2
|
||||
@ -1181,9 +1181,9 @@ define <8 x float> @test_dpps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a2
|
||||
;
|
||||
; ZNVER1-LABEL: test_dpps:
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vdpps $7, %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
|
||||
; ZNVER1-NEXT: vdpps $7, (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: vdpps $7, %ymm1, %ymm0, %ymm0 # sched: [100:?]
|
||||
; ZNVER1-NEXT: vdpps $7, (%rdi), %ymm0, %ymm0 # sched: [100:?]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = call <8 x float> @llvm.x86.avx.dp.ps.256(<8 x float> %a0, <8 x float> %a1, i8 7)
|
||||
%2 = load <8 x float>, <8 x float> *%a2, align 32
|
||||
%3 = call <8 x float> @llvm.x86.avx.dp.ps.256(<8 x float> %1, <8 x float> %2, i8 7)
|
||||
@ -1228,10 +1228,10 @@ define <4 x float> @test_extractf128(<8 x float> %a0, <8 x float> %a1, <4 x floa
|
||||
;
|
||||
; ZNVER1-LABEL: test_extractf128:
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vextractf128 $1, %ymm0, %xmm0 # sched: [1:0.50]
|
||||
; ZNVER1-NEXT: vextractf128 $1, %ymm1, (%rdi) # sched: [1:0.50]
|
||||
; ZNVER1-NEXT: vzeroupper
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: vextractf128 $1, %ymm0, %xmm0 # sched: [1:0.33]
|
||||
; ZNVER1-NEXT: vextractf128 $1, %ymm1, (%rdi) # sched: [8:0.50]
|
||||
; ZNVER1-NEXT: vzeroupper # sched: [100:?]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = shufflevector <8 x float> %a0, <8 x float> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
|
||||
%2 = shufflevector <8 x float> %a1, <8 x float> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
|
||||
store <4 x float> %2, <4 x float> *%a2
|
||||
@ -1271,9 +1271,9 @@ define <4 x double> @test_haddpd(<4 x double> %a0, <4 x double> %a1, <4 x double
|
||||
;
|
||||
; ZNVER1-LABEL: test_haddpd:
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vhaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
|
||||
; ZNVER1-NEXT: vhaddpd (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: vhaddpd %ymm1, %ymm0, %ymm0 # sched: [100:?]
|
||||
; ZNVER1-NEXT: vhaddpd (%rdi), %ymm0, %ymm0 # sched: [100:?]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = call <4 x double> @llvm.x86.avx.hadd.pd.256(<4 x double> %a0, <4 x double> %a1)
|
||||
%2 = load <4 x double>, <4 x double> *%a2, align 32
|
||||
%3 = call <4 x double> @llvm.x86.avx.hadd.pd.256(<4 x double> %1, <4 x double> %2)
|
||||
@ -1314,9 +1314,9 @@ define <8 x float> @test_haddps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%
|
||||
;
|
||||
; ZNVER1-LABEL: test_haddps:
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vhaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
|
||||
; ZNVER1-NEXT: vhaddps (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: vhaddps %ymm1, %ymm0, %ymm0 # sched: [100:?]
|
||||
; ZNVER1-NEXT: vhaddps (%rdi), %ymm0, %ymm0 # sched: [100:?]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = call <8 x float> @llvm.x86.avx.hadd.ps.256(<8 x float> %a0, <8 x float> %a1)
|
||||
%2 = load <8 x float>, <8 x float> *%a2, align 32
|
||||
%3 = call <8 x float> @llvm.x86.avx.hadd.ps.256(<8 x float> %1, <8 x float> %2)
|
||||
@ -1357,9 +1357,9 @@ define <4 x double> @test_hsubpd(<4 x double> %a0, <4 x double> %a1, <4 x double
|
||||
;
|
||||
; ZNVER1-LABEL: test_hsubpd:
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vhsubpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
|
||||
; ZNVER1-NEXT: vhsubpd (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: vhsubpd %ymm1, %ymm0, %ymm0 # sched: [100:?]
|
||||
; ZNVER1-NEXT: vhsubpd (%rdi), %ymm0, %ymm0 # sched: [100:?]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = call <4 x double> @llvm.x86.avx.hsub.pd.256(<4 x double> %a0, <4 x double> %a1)
|
||||
%2 = load <4 x double>, <4 x double> *%a2, align 32
|
||||
%3 = call <4 x double> @llvm.x86.avx.hsub.pd.256(<4 x double> %1, <4 x double> %2)
|
||||
@ -1400,9 +1400,9 @@ define <8 x float> @test_hsubps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%
|
||||
;
|
||||
; ZNVER1-LABEL: test_hsubps:
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vhsubps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
|
||||
; ZNVER1-NEXT: vhsubps (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: vhsubps %ymm1, %ymm0, %ymm0 # sched: [100:?]
|
||||
; ZNVER1-NEXT: vhsubps (%rdi), %ymm0, %ymm0 # sched: [100:?]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = call <8 x float> @llvm.x86.avx.hsub.ps.256(<8 x float> %a0, <8 x float> %a1)
|
||||
%2 = load <8 x float>, <8 x float> *%a2, align 32
|
||||
%3 = call <8 x float> @llvm.x86.avx.hsub.ps.256(<8 x float> %1, <8 x float> %2)
|
||||
@ -1448,10 +1448,10 @@ define <8 x float> @test_insertf128(<8 x float> %a0, <4 x float> %a1, <4 x float
|
||||
;
|
||||
; ZNVER1-LABEL: test_insertf128:
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1 # sched: [1:0.50]
|
||||
; ZNVER1-NEXT: vinsertf128 $1, (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
|
||||
; ZNVER1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1 # sched: [2:0.67]
|
||||
; ZNVER1-NEXT: vinsertf128 $1, (%rdi), %ymm0, %ymm0 # sched: [9:0.67]
|
||||
; ZNVER1-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = shufflevector <4 x float> %a1, <4 x float> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
|
||||
%2 = shufflevector <8 x float> %a0, <8 x float> %1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 10, i32 11>
|
||||
%3 = load <4 x float>, <4 x float> *%a2, align 16
|
||||
@ -1490,7 +1490,7 @@ define <32 x i8> @test_lddqu(i8* %a0) {
|
||||
; ZNVER1-LABEL: test_lddqu:
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vlddqu (%rdi), %ymm0 # sched: [8:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = call <32 x i8> @llvm.x86.avx.ldu.dq.256(i8* %a0)
|
||||
ret <32 x i8> %1
|
||||
}
|
||||
@ -1534,10 +1534,10 @@ define <2 x double> @test_maskmovpd(i8* %a0, <2 x i64> %a1, <2 x double> %a2) {
|
||||
;
|
||||
; ZNVER1-LABEL: test_maskmovpd:
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vmaskmovpd (%rdi), %xmm0, %xmm2
|
||||
; ZNVER1-NEXT: vmaskmovpd %xmm1, %xmm0, (%rdi)
|
||||
; ZNVER1-NEXT: vmaskmovpd (%rdi), %xmm0, %xmm2 # sched: [8:0.50]
|
||||
; ZNVER1-NEXT: vmaskmovpd %xmm1, %xmm0, (%rdi) # sched: [4:0.50]
|
||||
; ZNVER1-NEXT: vmovapd %xmm2, %xmm0 # sched: [1:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = call <2 x double> @llvm.x86.avx.maskload.pd(i8* %a0, <2 x i64> %a1)
|
||||
call void @llvm.x86.avx.maskstore.pd(i8* %a0, <2 x i64> %a1, <2 x double> %a2)
|
||||
ret <2 x double> %1
|
||||
@ -1583,10 +1583,10 @@ define <4 x double> @test_maskmovpd_ymm(i8* %a0, <4 x i64> %a1, <4 x double> %a2
|
||||
;
|
||||
; ZNVER1-LABEL: test_maskmovpd_ymm:
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vmaskmovpd (%rdi), %ymm0, %ymm2
|
||||
; ZNVER1-NEXT: vmaskmovpd %ymm1, %ymm0, (%rdi)
|
||||
; ZNVER1-NEXT: vmaskmovpd (%rdi), %ymm0, %ymm2 # sched: [8:1.00]
|
||||
; ZNVER1-NEXT: vmaskmovpd %ymm1, %ymm0, (%rdi) # sched: [5:1.00]
|
||||
; ZNVER1-NEXT: vmovapd %ymm2, %ymm0 # sched: [1:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = call <4 x double> @llvm.x86.avx.maskload.pd.256(i8* %a0, <4 x i64> %a1)
|
||||
call void @llvm.x86.avx.maskstore.pd.256(i8* %a0, <4 x i64> %a1, <4 x double> %a2)
|
||||
ret <4 x double> %1
|
||||
@ -1632,10 +1632,10 @@ define <4 x float> @test_maskmovps(i8* %a0, <4 x i32> %a1, <4 x float> %a2) {
|
||||
;
|
||||
; ZNVER1-LABEL: test_maskmovps:
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vmaskmovps (%rdi), %xmm0, %xmm2
|
||||
; ZNVER1-NEXT: vmaskmovps %xmm1, %xmm0, (%rdi)
|
||||
; ZNVER1-NEXT: vmaskmovps (%rdi), %xmm0, %xmm2 # sched: [8:0.50]
|
||||
; ZNVER1-NEXT: vmaskmovps %xmm1, %xmm0, (%rdi) # sched: [4:0.50]
|
||||
; ZNVER1-NEXT: vmovaps %xmm2, %xmm0 # sched: [1:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = call <4 x float> @llvm.x86.avx.maskload.ps(i8* %a0, <4 x i32> %a1)
|
||||
call void @llvm.x86.avx.maskstore.ps(i8* %a0, <4 x i32> %a1, <4 x float> %a2)
|
||||
ret <4 x float> %1
|
||||
@ -1681,10 +1681,10 @@ define <8 x float> @test_maskmovps_ymm(i8* %a0, <8 x i32> %a1, <8 x float> %a2)
|
||||
;
|
||||
; ZNVER1-LABEL: test_maskmovps_ymm:
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vmaskmovps (%rdi), %ymm0, %ymm2
|
||||
; ZNVER1-NEXT: vmaskmovps %ymm1, %ymm0, (%rdi)
|
||||
; ZNVER1-NEXT: vmaskmovps (%rdi), %ymm0, %ymm2 # sched: [8:1.00]
|
||||
; ZNVER1-NEXT: vmaskmovps %ymm1, %ymm0, (%rdi) # sched: [5:1.00]
|
||||
; ZNVER1-NEXT: vmovaps %ymm2, %ymm0 # sched: [1:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = call <8 x float> @llvm.x86.avx.maskload.ps.256(i8* %a0, <8 x i32> %a1)
|
||||
call void @llvm.x86.avx.maskstore.ps.256(i8* %a0, <8 x i32> %a1, <8 x float> %a2)
|
||||
ret <8 x float> %1
|
||||
@ -1727,7 +1727,7 @@ define <4 x double> @test_maxpd(<4 x double> %a0, <4 x double> %a1, <4 x double>
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vmaxpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
|
||||
; ZNVER1-NEXT: vmaxpd (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = call <4 x double> @llvm.x86.avx.max.pd.256(<4 x double> %a0, <4 x double> %a1)
|
||||
%2 = load <4 x double>, <4 x double> *%a2, align 32
|
||||
%3 = call <4 x double> @llvm.x86.avx.max.pd.256(<4 x double> %1, <4 x double> %2)
|
||||
@ -1770,7 +1770,7 @@ define <8 x float> @test_maxps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vmaxps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
|
||||
; ZNVER1-NEXT: vmaxps (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = call <8 x float> @llvm.x86.avx.max.ps.256(<8 x float> %a0, <8 x float> %a1)
|
||||
%2 = load <8 x float>, <8 x float> *%a2, align 32
|
||||
%3 = call <8 x float> @llvm.x86.avx.max.ps.256(<8 x float> %1, <8 x float> %2)
|
||||
@ -1813,7 +1813,7 @@ define <4 x double> @test_minpd(<4 x double> %a0, <4 x double> %a1, <4 x double>
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vminpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
|
||||
; ZNVER1-NEXT: vminpd (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = call <4 x double> @llvm.x86.avx.min.pd.256(<4 x double> %a0, <4 x double> %a1)
|
||||
%2 = load <4 x double>, <4 x double> *%a2, align 32
|
||||
%3 = call <4 x double> @llvm.x86.avx.min.pd.256(<4 x double> %1, <4 x double> %2)
|
||||
@ -1856,7 +1856,7 @@ define <8 x float> @test_minps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vminps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
|
||||
; ZNVER1-NEXT: vminps (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = call <8 x float> @llvm.x86.avx.min.ps.256(<8 x float> %a0, <8 x float> %a1)
|
||||
%2 = load <8 x float>, <8 x float> *%a2, align 32
|
||||
%3 = call <8 x float> @llvm.x86.avx.min.ps.256(<8 x float> %1, <8 x float> %2)
|
||||
@ -1905,7 +1905,7 @@ define <4 x double> @test_movapd(<4 x double> *%a0, <4 x double> *%a1) {
|
||||
; ZNVER1-NEXT: vmovapd (%rdi), %ymm0 # sched: [8:0.50]
|
||||
; ZNVER1-NEXT: vaddpd %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
|
||||
; ZNVER1-NEXT: vmovapd %ymm0, (%rsi) # sched: [1:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = load <4 x double>, <4 x double> *%a0, align 32
|
||||
%2 = fadd <4 x double> %1, %1
|
||||
store <4 x double> %2, <4 x double> *%a1, align 32
|
||||
@ -1953,7 +1953,7 @@ define <8 x float> @test_movaps(<8 x float> *%a0, <8 x float> *%a1) {
|
||||
; ZNVER1-NEXT: vmovaps (%rdi), %ymm0 # sched: [8:0.50]
|
||||
; ZNVER1-NEXT: vaddps %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
|
||||
; ZNVER1-NEXT: vmovaps %ymm0, (%rsi) # sched: [1:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = load <8 x float>, <8 x float> *%a0, align 32
|
||||
%2 = fadd <8 x float> %1, %1
|
||||
store <8 x float> %2, <8 x float> *%a1, align 32
|
||||
@ -2001,7 +2001,7 @@ define <4 x double> @test_movddup(<4 x double> %a0, <4 x double> *%a1) {
|
||||
; ZNVER1-NEXT: vmovddup {{.*#+}} ymm1 = mem[0,0,2,2] sched: [8:0.50]
|
||||
; ZNVER1-NEXT: vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2] sched: [1:0.50]
|
||||
; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = shufflevector <4 x double> %a0, <4 x double> undef, <4 x i32> <i32 0, i32 0, i32 2, i32 2>
|
||||
%2 = load <4 x double>, <4 x double> *%a1, align 32
|
||||
%3 = shufflevector <4 x double> %2, <4 x double> undef, <4 x i32> <i32 0, i32 0, i32 2, i32 2>
|
||||
@ -2041,9 +2041,9 @@ define i32 @test_movmskpd(<4 x double> %a0) {
|
||||
;
|
||||
; ZNVER1-LABEL: test_movmskpd:
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vmovmskpd %ymm0, %eax # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: vzeroupper
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: vmovmskpd %ymm0, %eax # sched: [1:1.00]
|
||||
; ZNVER1-NEXT: vzeroupper # sched: [100:?]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = call i32 @llvm.x86.avx.movmsk.pd.256(<4 x double> %a0)
|
||||
ret i32 %1
|
||||
}
|
||||
@ -2081,9 +2081,9 @@ define i32 @test_movmskps(<8 x float> %a0) {
|
||||
;
|
||||
; ZNVER1-LABEL: test_movmskps:
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vmovmskps %ymm0, %eax # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: vzeroupper
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: vmovmskps %ymm0, %eax # sched: [1:1.00]
|
||||
; ZNVER1-NEXT: vzeroupper # sched: [100:?]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = call i32 @llvm.x86.avx.movmsk.ps.256(<8 x float> %a0)
|
||||
ret i32 %1
|
||||
}
|
||||
@ -2124,7 +2124,7 @@ define <4 x double> @test_movntpd(<4 x double> %a0, <4 x double> *%a1) {
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vaddpd %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
|
||||
; ZNVER1-NEXT: vmovntpd %ymm0, (%rdi) # sched: [1:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = fadd <4 x double> %a0, %a0
|
||||
store <4 x double> %1, <4 x double> *%a1, align 32, !nontemporal !0
|
||||
ret <4 x double> %1
|
||||
@ -2165,7 +2165,7 @@ define <8 x float> @test_movntps(<8 x float> %a0, <8 x float> *%a1) {
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vaddps %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
|
||||
; ZNVER1-NEXT: vmovntps %ymm0, (%rdi) # sched: [1:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = fadd <8 x float> %a0, %a0
|
||||
store <8 x float> %1, <8 x float> *%a1, align 32, !nontemporal !0
|
||||
ret <8 x float> %1
|
||||
@ -2212,7 +2212,7 @@ define <8 x float> @test_movshdup(<8 x float> %a0, <8 x float> *%a1) {
|
||||
; ZNVER1-NEXT: vmovshdup {{.*#+}} ymm1 = mem[1,1,3,3,5,5,7,7] sched: [8:0.50]
|
||||
; ZNVER1-NEXT: vmovshdup {{.*#+}} ymm0 = ymm0[1,1,3,3,5,5,7,7] sched: [1:0.50]
|
||||
; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = shufflevector <8 x float> %a0, <8 x float> undef, <8 x i32> <i32 1, i32 1, i32 3, i32 3, i32 5, i32 5, i32 7, i32 7>
|
||||
%2 = load <8 x float>, <8 x float> *%a1, align 32
|
||||
%3 = shufflevector <8 x float> %2, <8 x float> undef, <8 x i32> <i32 1, i32 1, i32 3, i32 3, i32 5, i32 5, i32 7, i32 7>
|
||||
@ -2261,7 +2261,7 @@ define <8 x float> @test_movsldup(<8 x float> %a0, <8 x float> *%a1) {
|
||||
; ZNVER1-NEXT: vmovsldup {{.*#+}} ymm1 = mem[0,0,2,2,4,4,6,6] sched: [8:0.50]
|
||||
; ZNVER1-NEXT: vmovsldup {{.*#+}} ymm0 = ymm0[0,0,2,2,4,4,6,6] sched: [1:0.50]
|
||||
; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = shufflevector <8 x float> %a0, <8 x float> undef, <8 x i32> <i32 0, i32 0, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6>
|
||||
%2 = load <8 x float>, <8 x float> *%a1, align 32
|
||||
%3 = shufflevector <8 x float> %2, <8 x float> undef, <8 x i32> <i32 0, i32 0, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6>
|
||||
@ -2312,7 +2312,7 @@ define <4 x double> @test_movupd(<4 x double> *%a0, <4 x double> *%a1) {
|
||||
; ZNVER1-NEXT: vmovupd (%rdi), %ymm0 # sched: [8:0.50]
|
||||
; ZNVER1-NEXT: vaddpd %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
|
||||
; ZNVER1-NEXT: vmovupd %ymm0, (%rsi) # sched: [1:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = load <4 x double>, <4 x double> *%a0, align 1
|
||||
%2 = fadd <4 x double> %1, %1
|
||||
store <4 x double> %2, <4 x double> *%a1, align 1
|
||||
@ -2362,7 +2362,7 @@ define <8 x float> @test_movups(<8 x float> *%a0, <8 x float> *%a1) {
|
||||
; ZNVER1-NEXT: vmovups (%rdi), %ymm0 # sched: [8:0.50]
|
||||
; ZNVER1-NEXT: vaddps %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
|
||||
; ZNVER1-NEXT: vmovups %ymm0, (%rsi) # sched: [1:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = load <8 x float>, <8 x float> *%a0, align 1
|
||||
%2 = fadd <8 x float> %1, %1
|
||||
store <8 x float> %2, <8 x float> *%a1, align 1
|
||||
@ -2402,9 +2402,9 @@ define <4 x double> @test_mulpd(<4 x double> %a0, <4 x double> %a1, <4 x double>
|
||||
;
|
||||
; ZNVER1-LABEL: test_mulpd:
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vmulpd %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
|
||||
; ZNVER1-NEXT: vmulpd (%rdi), %ymm0, %ymm0 # sched: [12:1.00]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: vmulpd %ymm1, %ymm0, %ymm0 # sched: [4:0.50]
|
||||
; ZNVER1-NEXT: vmulpd (%rdi), %ymm0, %ymm0 # sched: [11:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = fmul <4 x double> %a0, %a1
|
||||
%2 = load <4 x double>, <4 x double> *%a2, align 32
|
||||
%3 = fmul <4 x double> %1, %2
|
||||
@ -2444,9 +2444,9 @@ define <8 x float> @test_mulps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a
|
||||
;
|
||||
; ZNVER1-LABEL: test_mulps:
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vmulps %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
|
||||
; ZNVER1-NEXT: vmulps (%rdi), %ymm0, %ymm0 # sched: [12:1.00]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: vmulps %ymm1, %ymm0, %ymm0 # sched: [4:0.50]
|
||||
; ZNVER1-NEXT: vmulps (%rdi), %ymm0, %ymm0 # sched: [11:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = fmul <8 x float> %a0, %a1
|
||||
%2 = load <8 x float>, <8 x float> *%a2, align 32
|
||||
%3 = fmul <8 x float> %1, %2
|
||||
@ -2494,7 +2494,7 @@ define <4 x double> @orpd(<4 x double> %a0, <4 x double> %a1, <4 x double> *%a2)
|
||||
; ZNVER1-NEXT: vorpd %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: vorpd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
|
||||
; ZNVER1-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = bitcast <4 x double> %a0 to <4 x i64>
|
||||
%2 = bitcast <4 x double> %a1 to <4 x i64>
|
||||
%3 = or <4 x i64> %1, %2
|
||||
@ -2547,7 +2547,7 @@ define <8 x float> @test_orps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a2
|
||||
; ZNVER1-NEXT: vorps %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: vorps (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
|
||||
; ZNVER1-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = bitcast <8 x float> %a0 to <4 x i64>
|
||||
%2 = bitcast <8 x float> %a1 to <4 x i64>
|
||||
%3 = or <4 x i64> %1, %2
|
||||
@ -2600,7 +2600,7 @@ define <2 x double> @test_permilpd(<2 x double> %a0, <2 x double> *%a1) {
|
||||
; ZNVER1-NEXT: vpermilpd {{.*#+}} xmm1 = mem[1,0] sched: [8:0.50]
|
||||
; ZNVER1-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] sched: [1:0.50]
|
||||
; ZNVER1-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = shufflevector <2 x double> %a0, <2 x double> undef, <2 x i32> <i32 1, i32 0>
|
||||
%2 = load <2 x double>, <2 x double> *%a1, align 16
|
||||
%3 = shufflevector <2 x double> %2, <2 x double> undef, <2 x i32> <i32 1, i32 0>
|
||||
@ -2649,7 +2649,7 @@ define <4 x double> @test_permilpd_ymm(<4 x double> %a0, <4 x double> *%a1) {
|
||||
; ZNVER1-NEXT: vpermilpd {{.*#+}} ymm1 = mem[1,0,2,3] sched: [8:0.50]
|
||||
; ZNVER1-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[1,0,2,3] sched: [1:0.50]
|
||||
; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = shufflevector <4 x double> %a0, <4 x double> undef, <4 x i32> <i32 1, i32 0, i32 2, i32 3>
|
||||
%2 = load <4 x double>, <4 x double> *%a1, align 32
|
||||
%3 = shufflevector <4 x double> %2, <4 x double> undef, <4 x i32> <i32 1, i32 0, i32 2, i32 3>
|
||||
@ -2698,7 +2698,7 @@ define <4 x float> @test_permilps(<4 x float> %a0, <4 x float> *%a1) {
|
||||
; ZNVER1-NEXT: vpermilps {{.*#+}} xmm1 = mem[3,2,1,0] sched: [8:0.50]
|
||||
; ZNVER1-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,2,1,0] sched: [1:0.50]
|
||||
; ZNVER1-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = shufflevector <4 x float> %a0, <4 x float> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
|
||||
%2 = load <4 x float>, <4 x float> *%a1, align 16
|
||||
%3 = shufflevector <4 x float> %2, <4 x float> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
|
||||
@ -2747,7 +2747,7 @@ define <8 x float> @test_permilps_ymm(<8 x float> %a0, <8 x float> *%a1) {
|
||||
; ZNVER1-NEXT: vpermilps {{.*#+}} ymm1 = mem[3,2,1,0,7,6,5,4] sched: [8:0.50]
|
||||
; ZNVER1-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4] sched: [1:0.50]
|
||||
; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = shufflevector <8 x float> %a0, <8 x float> undef, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
|
||||
%2 = load <8 x float>, <8 x float> *%a1, align 32
|
||||
%3 = shufflevector <8 x float> %2, <8 x float> undef, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
|
||||
@ -2790,7 +2790,7 @@ define <2 x double> @test_permilvarpd(<2 x double> %a0, <2 x i64> %a1, <2 x i64>
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vpermilpd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
|
||||
; ZNVER1-NEXT: vpermilpd (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = call <2 x double> @llvm.x86.avx.vpermilvar.pd(<2 x double> %a0, <2 x i64> %a1)
|
||||
%2 = load <2 x i64>, <2 x i64> *%a2, align 16
|
||||
%3 = call <2 x double> @llvm.x86.avx.vpermilvar.pd(<2 x double> %1, <2 x i64> %2)
|
||||
@ -2833,7 +2833,7 @@ define <4 x double> @test_permilvarpd_ymm(<4 x double> %a0, <4 x i64> %a1, <4 x
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vpermilpd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
|
||||
; ZNVER1-NEXT: vpermilpd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = call <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double> %a0, <4 x i64> %a1)
|
||||
%2 = load <4 x i64>, <4 x i64> *%a2, align 32
|
||||
%3 = call <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double> %1, <4 x i64> %2)
|
||||
@ -2876,7 +2876,7 @@ define <4 x float> @test_permilvarps(<4 x float> %a0, <4 x i32> %a1, <4 x i32> *
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vpermilps %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
|
||||
; ZNVER1-NEXT: vpermilps (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %a0, <4 x i32> %a1)
|
||||
%2 = load <4 x i32>, <4 x i32> *%a2, align 16
|
||||
%3 = call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %1, <4 x i32> %2)
|
||||
@ -2919,7 +2919,7 @@ define <8 x float> @test_permilvarps_ymm(<8 x float> %a0, <8 x i32> %a1, <8 x i3
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vpermilps %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
|
||||
; ZNVER1-NEXT: vpermilps (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %a0, <8 x i32> %a1)
|
||||
%2 = load <8 x i32>, <8 x i32> *%a2, align 32
|
||||
%3 = call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %1, <8 x i32> %2)
|
||||
@ -2968,7 +2968,7 @@ define <8 x float> @test_rcpps(<8 x float> %a0, <8 x float> *%a1) {
|
||||
; ZNVER1-NEXT: vrcpps (%rdi), %ymm1 # sched: [12:0.50]
|
||||
; ZNVER1-NEXT: vrcpps %ymm0, %ymm0 # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = call <8 x float> @llvm.x86.avx.rcp.ps.256(<8 x float> %a0)
|
||||
%2 = load <8 x float>, <8 x float> *%a1, align 32
|
||||
%3 = call <8 x float> @llvm.x86.avx.rcp.ps.256(<8 x float> %2)
|
||||
@ -3015,10 +3015,10 @@ define <4 x double> @test_roundpd(<4 x double> %a0, <4 x double> *%a1) {
|
||||
;
|
||||
; ZNVER1-LABEL: test_roundpd:
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vroundpd $7, (%rdi), %ymm1 # sched: [10:1.00]
|
||||
; ZNVER1-NEXT: vroundpd $7, %ymm0, %ymm0 # sched: [3:1.00]
|
||||
; ZNVER1-NEXT: vroundpd $7, (%rdi), %ymm1 # sched: [11:1.00]
|
||||
; ZNVER1-NEXT: vroundpd $7, %ymm0, %ymm0 # sched: [4:1.00]
|
||||
; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = call <4 x double> @llvm.x86.avx.round.pd.256(<4 x double> %a0, i32 7)
|
||||
%2 = load <4 x double>, <4 x double> *%a1, align 32
|
||||
%3 = call <4 x double> @llvm.x86.avx.round.pd.256(<4 x double> %2, i32 7)
|
||||
@ -3065,10 +3065,10 @@ define <8 x float> @test_roundps(<8 x float> %a0, <8 x float> *%a1) {
|
||||
;
|
||||
; ZNVER1-LABEL: test_roundps:
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vroundps $7, (%rdi), %ymm1 # sched: [10:1.00]
|
||||
; ZNVER1-NEXT: vroundps $7, %ymm0, %ymm0 # sched: [3:1.00]
|
||||
; ZNVER1-NEXT: vroundps $7, (%rdi), %ymm1 # sched: [11:1.00]
|
||||
; ZNVER1-NEXT: vroundps $7, %ymm0, %ymm0 # sched: [4:1.00]
|
||||
; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = call <8 x float> @llvm.x86.avx.round.ps.256(<8 x float> %a0, i32 7)
|
||||
%2 = load <8 x float>, <8 x float> *%a1, align 32
|
||||
%3 = call <8 x float> @llvm.x86.avx.round.ps.256(<8 x float> %2, i32 7)
|
||||
@ -3118,7 +3118,7 @@ define <8 x float> @test_rsqrtps(<8 x float> %a0, <8 x float> *%a1) {
|
||||
; ZNVER1-NEXT: vrsqrtps (%rdi), %ymm1 # sched: [12:0.50]
|
||||
; ZNVER1-NEXT: vrsqrtps %ymm0, %ymm0 # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = call <8 x float> @llvm.x86.avx.rsqrt.ps.256(<8 x float> %a0)
|
||||
%2 = load <8 x float>, <8 x float> *%a1, align 32
|
||||
%3 = call <8 x float> @llvm.x86.avx.rsqrt.ps.256(<8 x float> %2)
|
||||
@ -3168,7 +3168,7 @@ define <4 x double> @test_shufpd(<4 x double> %a0, <4 x double> %a1, <4 x double
|
||||
; ZNVER1-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[1],ymm1[0],ymm0[2],ymm1[3] sched: [1:0.50]
|
||||
; ZNVER1-NEXT: vshufpd {{.*#+}} ymm1 = ymm1[1],mem[0],ymm1[2],mem[3] sched: [8:0.50]
|
||||
; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = shufflevector <4 x double> %a0, <4 x double> %a1, <4 x i32> <i32 1, i32 4, i32 2, i32 7>
|
||||
%2 = load <4 x double>, <4 x double> *%a2, align 32
|
||||
%3 = shufflevector <4 x double> %a1, <4 x double> %2, <4 x i32> <i32 1, i32 4, i32 2, i32 7>
|
||||
@ -3211,7 +3211,7 @@ define <8 x float> @test_shufps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,0],ymm1[0,0],ymm0[4,4],ymm1[4,4] sched: [1:0.50]
|
||||
; ZNVER1-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,3],mem[0,0],ymm0[4,7],mem[4,4] sched: [8:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = shufflevector <8 x float> %a0, <8 x float> %a1, <8 x i32> <i32 0, i32 0, i32 8, i32 8, i32 4, i32 4, i32 12, i32 12>
|
||||
%2 = load <8 x float>, <8 x float> *%a2, align 32
|
||||
%3 = shufflevector <8 x float> %1, <8 x float> %2, <8 x i32> <i32 0, i32 3, i32 8, i32 8, i32 4, i32 7, i32 12, i32 12>
|
||||
@ -3256,10 +3256,10 @@ define <4 x double> @test_sqrtpd(<4 x double> %a0, <4 x double> *%a1) {
|
||||
;
|
||||
; ZNVER1-LABEL: test_sqrtpd:
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vsqrtpd (%rdi), %ymm1 # sched: [27:1.00]
|
||||
; ZNVER1-NEXT: vsqrtpd %ymm0, %ymm0 # sched: [20:1.00]
|
||||
; ZNVER1-NEXT: vsqrtpd (%rdi), %ymm1 # sched: [47:47.00]
|
||||
; ZNVER1-NEXT: vsqrtpd %ymm0, %ymm0 # sched: [40:40.00]
|
||||
; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = call <4 x double> @llvm.x86.avx.sqrt.pd.256(<4 x double> %a0)
|
||||
%2 = load <4 x double>, <4 x double> *%a1, align 32
|
||||
%3 = call <4 x double> @llvm.x86.avx.sqrt.pd.256(<4 x double> %2)
|
||||
@ -3306,10 +3306,10 @@ define <8 x float> @test_sqrtps(<8 x float> %a0, <8 x float> *%a1) {
|
||||
;
|
||||
; ZNVER1-LABEL: test_sqrtps:
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vsqrtps (%rdi), %ymm1 # sched: [27:1.00]
|
||||
; ZNVER1-NEXT: vsqrtps %ymm0, %ymm0 # sched: [20:1.00]
|
||||
; ZNVER1-NEXT: vsqrtps (%rdi), %ymm1 # sched: [35:35.00]
|
||||
; ZNVER1-NEXT: vsqrtps %ymm0, %ymm0 # sched: [28:28.00]
|
||||
; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = call <8 x float> @llvm.x86.avx.sqrt.ps.256(<8 x float> %a0)
|
||||
%2 = load <8 x float>, <8 x float> *%a1, align 32
|
||||
%3 = call <8 x float> @llvm.x86.avx.sqrt.ps.256(<8 x float> %2)
|
||||
@ -3353,7 +3353,7 @@ define <4 x double> @test_subpd(<4 x double> %a0, <4 x double> %a1, <4 x double>
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vsubpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
|
||||
; ZNVER1-NEXT: vsubpd (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = fsub <4 x double> %a0, %a1
|
||||
%2 = load <4 x double>, <4 x double> *%a2, align 32
|
||||
%3 = fsub <4 x double> %1, %2
|
||||
@ -3395,7 +3395,7 @@ define <8 x float> @test_subps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vsubps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
|
||||
; ZNVER1-NEXT: vsubps (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = fsub <8 x float> %a0, %a1
|
||||
%2 = load <8 x float>, <8 x float> *%a2, align 32
|
||||
%3 = fsub <8 x float> %1, %2
|
||||
@ -3455,7 +3455,7 @@ define i32 @test_testpd(<2 x double> %a0, <2 x double> %a1, <2 x double> *%a2) {
|
||||
; ZNVER1-NEXT: setb %al # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: vtestpd (%rdi), %xmm0 # sched: [8:0.50]
|
||||
; ZNVER1-NEXT: adcl $0, %eax # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = call i32 @llvm.x86.avx.vtestc.pd(<2 x double> %a0, <2 x double> %a1)
|
||||
%2 = load <2 x double>, <2 x double> *%a2, align 16
|
||||
%3 = call i32 @llvm.x86.avx.vtestc.pd(<2 x double> %a0, <2 x double> %2)
|
||||
@ -3521,8 +3521,8 @@ define i32 @test_testpd_ymm(<4 x double> %a0, <4 x double> %a1, <4 x double> *%a
|
||||
; ZNVER1-NEXT: setb %al # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: vtestpd (%rdi), %ymm0 # sched: [8:0.50]
|
||||
; ZNVER1-NEXT: adcl $0, %eax # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: vzeroupper
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: vzeroupper # sched: [100:?]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = call i32 @llvm.x86.avx.vtestc.pd.256(<4 x double> %a0, <4 x double> %a1)
|
||||
%2 = load <4 x double>, <4 x double> *%a2, align 32
|
||||
%3 = call i32 @llvm.x86.avx.vtestc.pd.256(<4 x double> %a0, <4 x double> %2)
|
||||
@ -3584,7 +3584,7 @@ define i32 @test_testps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a2) {
|
||||
; ZNVER1-NEXT: setb %al # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: vtestps (%rdi), %xmm0 # sched: [8:0.50]
|
||||
; ZNVER1-NEXT: adcl $0, %eax # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = call i32 @llvm.x86.avx.vtestc.ps(<4 x float> %a0, <4 x float> %a1)
|
||||
%2 = load <4 x float>, <4 x float> *%a2, align 16
|
||||
%3 = call i32 @llvm.x86.avx.vtestc.ps(<4 x float> %a0, <4 x float> %2)
|
||||
@ -3650,8 +3650,8 @@ define i32 @test_testps_ymm(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a2)
|
||||
; ZNVER1-NEXT: setb %al # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: vtestps (%rdi), %ymm0 # sched: [8:0.50]
|
||||
; ZNVER1-NEXT: adcl $0, %eax # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: vzeroupper
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: vzeroupper # sched: [100:?]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = call i32 @llvm.x86.avx.vtestc.ps.256(<8 x float> %a0, <8 x float> %a1)
|
||||
%2 = load <8 x float>, <8 x float> *%a2, align 32
|
||||
%3 = call i32 @llvm.x86.avx.vtestc.ps.256(<8 x float> %a0, <8 x float> %2)
|
||||
@ -3701,7 +3701,7 @@ define <4 x double> @test_unpckhpd(<4 x double> %a0, <4 x double> %a1, <4 x doub
|
||||
; ZNVER1-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3] sched: [1:0.50]
|
||||
; ZNVER1-NEXT: vunpckhpd {{.*#+}} ymm1 = ymm1[1],mem[1],ymm1[3],mem[3] sched: [8:0.50]
|
||||
; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = shufflevector <4 x double> %a0, <4 x double> %a1, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
|
||||
%2 = load <4 x double>, <4 x double> *%a2, align 32
|
||||
%3 = shufflevector <4 x double> %a1, <4 x double> %2, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
|
||||
@ -3744,7 +3744,7 @@ define <8 x float> @test_unpckhps(<8 x float> %a0, <8 x float> %a1, <8 x float>
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7] sched: [1:0.50]
|
||||
; ZNVER1-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7] sched: [8:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = shufflevector <8 x float> %a0, <8 x float> %a1, <8 x i32> <i32 2, i32 10, i32 3, i32 11, i32 6, i32 14, i32 7, i32 15>
|
||||
%2 = load <8 x float>, <8 x float> *%a2, align 32
|
||||
%3 = shufflevector <8 x float> %1, <8 x float> %2, <8 x i32> <i32 2, i32 10, i32 3, i32 11, i32 6, i32 14, i32 7, i32 15>
|
||||
@ -3792,7 +3792,7 @@ define <4 x double> @test_unpcklpd(<4 x double> %a0, <4 x double> %a1, <4 x doub
|
||||
; ZNVER1-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] sched: [1:0.50]
|
||||
; ZNVER1-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm1[0],mem[0],ymm1[2],mem[2] sched: [8:0.50]
|
||||
; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = shufflevector <4 x double> %a0, <4 x double> %a1, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
|
||||
%2 = load <4 x double>, <4 x double> *%a2, align 32
|
||||
%3 = shufflevector <4 x double> %a1, <4 x double> %2, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
|
||||
@ -3835,7 +3835,7 @@ define <8 x float> @test_unpcklps(<8 x float> %a0, <8 x float> %a1, <8 x float>
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vunpcklps {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5] sched: [1:0.50]
|
||||
; ZNVER1-NEXT: vunpcklps {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5] sched: [8:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = shufflevector <8 x float> %a0, <8 x float> %a1, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 4, i32 12, i32 5, i32 13>
|
||||
%2 = load <8 x float>, <8 x float> *%a2, align 32
|
||||
%3 = shufflevector <8 x float> %1, <8 x float> %2, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 4, i32 12, i32 5, i32 13>
|
||||
@ -3883,7 +3883,7 @@ define <4 x double> @test_xorpd(<4 x double> %a0, <4 x double> %a1, <4 x double>
|
||||
; ZNVER1-NEXT: vxorpd %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: vxorpd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
|
||||
; ZNVER1-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = bitcast <4 x double> %a0 to <4 x i64>
|
||||
%2 = bitcast <4 x double> %a1 to <4 x i64>
|
||||
%3 = xor <4 x i64> %1, %2
|
||||
@ -3936,7 +3936,7 @@ define <8 x float> @test_xorps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a
|
||||
; ZNVER1-NEXT: vxorps %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: vxorps (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
|
||||
; ZNVER1-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = bitcast <8 x float> %a0 to <4 x i64>
|
||||
%2 = bitcast <8 x float> %a1 to <4 x i64>
|
||||
%3 = xor <4 x i64> %1, %2
|
||||
@ -3976,8 +3976,8 @@ define void @test_zeroall() {
|
||||
;
|
||||
; ZNVER1-LABEL: test_zeroall:
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vzeroall
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: vzeroall # sched: [100:?]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
call void @llvm.x86.avx.vzeroall()
|
||||
ret void
|
||||
}
|
||||
@ -4011,8 +4011,8 @@ define void @test_zeroupper() {
|
||||
;
|
||||
; ZNVER1-LABEL: test_zeroupper:
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vzeroupper
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: vzeroupper # sched: [100:?]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
call void @llvm.x86.avx.vzeroupper()
|
||||
ret void
|
||||
}
|
||||
|
@ -31,7 +31,7 @@ define <32 x i8> @test_pabsb(<32 x i8> %a0, <32 x i8> *%a1) {
|
||||
; ZNVER1-NEXT: vpabsb (%rdi), %ymm1 # sched: [8:0.50]
|
||||
; ZNVER1-NEXT: vpabsb %ymm0, %ymm0 # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = call <32 x i8> @llvm.x86.avx2.pabs.b(<32 x i8> %a0)
|
||||
%2 = load <32 x i8>, <32 x i8> *%a1, align 32
|
||||
%3 = call <32 x i8> @llvm.x86.avx2.pabs.b(<32 x i8> %2)
|
||||
@ -67,7 +67,7 @@ define <8 x i32> @test_pabsd(<8 x i32> %a0, <8 x i32> *%a1) {
|
||||
; ZNVER1-NEXT: vpabsd (%rdi), %ymm1 # sched: [8:0.50]
|
||||
; ZNVER1-NEXT: vpabsd %ymm0, %ymm0 # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = call <8 x i32> @llvm.x86.avx2.pabs.d(<8 x i32> %a0)
|
||||
%2 = load <8 x i32>, <8 x i32> *%a1, align 32
|
||||
%3 = call <8 x i32> @llvm.x86.avx2.pabs.d(<8 x i32> %2)
|
||||
@ -103,7 +103,7 @@ define <16 x i16> @test_pabsw(<16 x i16> %a0, <16 x i16> *%a1) {
|
||||
; ZNVER1-NEXT: vpabsw (%rdi), %ymm1 # sched: [8:0.50]
|
||||
; ZNVER1-NEXT: vpabsw %ymm0, %ymm0 # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = call <16 x i16> @llvm.x86.avx2.pabs.w(<16 x i16> %a0)
|
||||
%2 = load <16 x i16>, <16 x i16> *%a1, align 32
|
||||
%3 = call <16 x i16> @llvm.x86.avx2.pabs.w(<16 x i16> %2)
|
||||
@ -135,7 +135,7 @@ define <32 x i8> @test_paddb(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> *%a2) {
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vpaddb %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: vpaddb (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = add <32 x i8> %a0, %a1
|
||||
%2 = load <32 x i8>, <32 x i8> *%a2, align 32
|
||||
%3 = add <32 x i8> %1, %2
|
||||
@ -165,7 +165,7 @@ define <8 x i32> @test_paddd(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) {
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: vpaddd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = add <8 x i32> %a0, %a1
|
||||
%2 = load <8 x i32>, <8 x i32> *%a2, align 32
|
||||
%3 = add <8 x i32> %1, %2
|
||||
@ -195,7 +195,7 @@ define <4 x i64> @test_paddq(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> *%a2) {
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: vpaddq (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = add <4 x i64> %a0, %a1
|
||||
%2 = load <4 x i64>, <4 x i64> *%a2, align 32
|
||||
%3 = add <4 x i64> %1, %2
|
||||
@ -225,7 +225,7 @@ define <16 x i16> @test_paddw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) {
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vpaddw %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: vpaddw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = add <16 x i16> %a0, %a1
|
||||
%2 = load <16 x i16>, <16 x i16> *%a2, align 32
|
||||
%3 = add <16 x i16> %1, %2
|
||||
@ -259,7 +259,7 @@ define <4 x i64> @test_pand(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> *%a2) {
|
||||
; ZNVER1-NEXT: vpand %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: vpand (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
|
||||
; ZNVER1-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = and <4 x i64> %a0, %a1
|
||||
%2 = load <4 x i64>, <4 x i64> *%a2, align 32
|
||||
%3 = and <4 x i64> %1, %2
|
||||
@ -294,7 +294,7 @@ define <4 x i64> @test_pandn(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> *%a2) {
|
||||
; ZNVER1-NEXT: vpandn %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: vpandn (%rdi), %ymm0, %ymm1 # sched: [8:0.50]
|
||||
; ZNVER1-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = xor <4 x i64> %a0, <i64 -1, i64 -1, i64 -1, i64 -1>
|
||||
%2 = and <4 x i64> %a1, %1
|
||||
%3 = load <4 x i64>, <4 x i64> *%a2, align 32
|
||||
@ -325,9 +325,9 @@ define <8 x i32> @test_pmulld(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) {
|
||||
;
|
||||
; ZNVER1-LABEL: test_pmulld:
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vpmulld %ymm1, %ymm0, %ymm0 # sched: [4:1.00]
|
||||
; ZNVER1-NEXT: vpmulld (%rdi), %ymm0, %ymm0 # sched: [11:1.00]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: vpmulld %ymm1, %ymm0, %ymm0 # sched: [5:2.00]
|
||||
; ZNVER1-NEXT: vpmulld (%rdi), %ymm0, %ymm0 # sched: [12:2.00]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = mul <8 x i32> %a0, %a1
|
||||
%2 = load <8 x i32>, <8 x i32> *%a2, align 32
|
||||
%3 = mul <8 x i32> %1, %2
|
||||
@ -357,7 +357,7 @@ define <16 x i16> @test_pmullw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2)
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vpmullw %ymm1, %ymm0, %ymm0 # sched: [4:1.00]
|
||||
; ZNVER1-NEXT: vpmullw (%rdi), %ymm0, %ymm0 # sched: [11:1.00]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = mul <16 x i16> %a0, %a1
|
||||
%2 = load <16 x i16>, <16 x i16> *%a2, align 32
|
||||
%3 = mul <16 x i16> %1, %2
|
||||
@ -391,7 +391,7 @@ define <4 x i64> @test_por(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> *%a2) {
|
||||
; ZNVER1-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: vpor (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
|
||||
; ZNVER1-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = or <4 x i64> %a0, %a1
|
||||
%2 = load <4 x i64>, <4 x i64> *%a2, align 32
|
||||
%3 = or <4 x i64> %1, %2
|
||||
@ -422,7 +422,7 @@ define <32 x i8> @test_psubb(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> *%a2) {
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vpsubb %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: vpsubb (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = sub <32 x i8> %a0, %a1
|
||||
%2 = load <32 x i8>, <32 x i8> *%a2, align 32
|
||||
%3 = sub <32 x i8> %1, %2
|
||||
@ -452,7 +452,7 @@ define <8 x i32> @test_psubd(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) {
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vpsubd %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: vpsubd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = sub <8 x i32> %a0, %a1
|
||||
%2 = load <8 x i32>, <8 x i32> *%a2, align 32
|
||||
%3 = sub <8 x i32> %1, %2
|
||||
@ -482,7 +482,7 @@ define <4 x i64> @test_psubq(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> *%a2) {
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vpsubq %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: vpsubq (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = sub <4 x i64> %a0, %a1
|
||||
%2 = load <4 x i64>, <4 x i64> *%a2, align 32
|
||||
%3 = sub <4 x i64> %1, %2
|
||||
@ -512,7 +512,7 @@ define <16 x i16> @test_psubw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) {
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vpsubw %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: vpsubw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = sub <16 x i16> %a0, %a1
|
||||
%2 = load <16 x i16>, <16 x i16> *%a2, align 32
|
||||
%3 = sub <16 x i16> %1, %2
|
||||
@ -546,7 +546,7 @@ define <4 x i64> @test_pxor(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> *%a2) {
|
||||
; ZNVER1-NEXT: vpxor %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: vpxor (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
|
||||
; ZNVER1-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = xor <4 x i64> %a0, %a1
|
||||
%2 = load <4 x i64>, <4 x i64> *%a2, align 32
|
||||
%3 = xor <4 x i64> %1, %2
|
||||
|
@ -41,7 +41,7 @@ define i16 @test_andn_i16(i16 zeroext %a0, i16 zeroext %a1, i16 *%a2) {
|
||||
; ZNVER1-NEXT: andw (%rdx), %di # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: addl %edi, %eax # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = load i16, i16 *%a2
|
||||
%2 = xor i16 %a0, -1
|
||||
%3 = and i16 %2, %a1
|
||||
@ -77,7 +77,7 @@ define i32 @test_andn_i32(i32 %a0, i32 %a1, i32 *%a2) {
|
||||
; ZNVER1-NEXT: andnl (%rdx), %edi, %eax # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: andnl %esi, %edi, %ecx # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: addl %ecx, %eax # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = load i32, i32 *%a2
|
||||
%2 = xor i32 %a0, -1
|
||||
%3 = and i32 %2, %a1
|
||||
@ -113,7 +113,7 @@ define i64 @test_andn_i64(i64 %a0, i64 %a1, i64 *%a2) {
|
||||
; ZNVER1-NEXT: andnq (%rdx), %rdi, %rax # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: andnq %rsi, %rdi, %rcx # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: addq %rcx, %rax # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = load i64, i64 *%a2
|
||||
%2 = xor i64 %a0, -1
|
||||
%3 = and i64 %2, %a1
|
||||
@ -146,10 +146,10 @@ define i32 @test_bextr_i32(i32 %a0, i32 %a1, i32 *%a2) {
|
||||
;
|
||||
; ZNVER1-LABEL: test_bextr_i32:
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: bextrl %edi, (%rdx), %ecx
|
||||
; ZNVER1-NEXT: bextrl %edi, %esi, %eax
|
||||
; ZNVER1-NEXT: bextrl %edi, (%rdx), %ecx # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: bextrl %edi, %esi, %eax # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: addl %ecx, %eax # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = load i32, i32 *%a2
|
||||
%2 = tail call i32 @llvm.x86.bmi.bextr.32(i32 %1, i32 %a0)
|
||||
%3 = tail call i32 @llvm.x86.bmi.bextr.32(i32 %a1, i32 %a0)
|
||||
@ -182,10 +182,10 @@ define i64 @test_bextr_i64(i64 %a0, i64 %a1, i64 *%a2) {
|
||||
;
|
||||
; ZNVER1-LABEL: test_bextr_i64:
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: bextrq %rdi, (%rdx), %rcx
|
||||
; ZNVER1-NEXT: bextrq %rdi, %rsi, %rax
|
||||
; ZNVER1-NEXT: bextrq %rdi, (%rdx), %rcx # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: bextrq %rdi, %rsi, %rax # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: addq %rcx, %rax # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = load i64, i64 *%a2
|
||||
%2 = tail call i64 @llvm.x86.bmi.bextr.64(i64 %1, i64 %a0)
|
||||
%3 = tail call i64 @llvm.x86.bmi.bextr.64(i64 %a1, i64 %a0)
|
||||
@ -218,10 +218,10 @@ define i32 @test_blsi_i32(i32 %a0, i32 *%a1) {
|
||||
;
|
||||
; ZNVER1-LABEL: test_blsi_i32:
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: blsil (%rsi), %ecx
|
||||
; ZNVER1-NEXT: blsil %edi, %eax
|
||||
; ZNVER1-NEXT: blsil (%rsi), %ecx # sched: [6:0.50]
|
||||
; ZNVER1-NEXT: blsil %edi, %eax # sched: [2:0.25]
|
||||
; ZNVER1-NEXT: addl %ecx, %eax # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = load i32, i32 *%a1
|
||||
%2 = sub i32 0, %1
|
||||
%3 = sub i32 0, %a0
|
||||
@ -255,10 +255,10 @@ define i64 @test_blsi_i64(i64 %a0, i64 *%a1) {
|
||||
;
|
||||
; ZNVER1-LABEL: test_blsi_i64:
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: blsiq (%rsi), %rcx
|
||||
; ZNVER1-NEXT: blsiq %rdi, %rax
|
||||
; ZNVER1-NEXT: blsiq (%rsi), %rcx # sched: [6:0.50]
|
||||
; ZNVER1-NEXT: blsiq %rdi, %rax # sched: [2:0.25]
|
||||
; ZNVER1-NEXT: addq %rcx, %rax # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = load i64, i64 *%a1
|
||||
%2 = sub i64 0, %1
|
||||
%3 = sub i64 0, %a0
|
||||
@ -292,10 +292,10 @@ define i32 @test_blsmsk_i32(i32 %a0, i32 *%a1) {
|
||||
;
|
||||
; ZNVER1-LABEL: test_blsmsk_i32:
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: blsmskl (%rsi), %ecx
|
||||
; ZNVER1-NEXT: blsmskl %edi, %eax
|
||||
; ZNVER1-NEXT: blsmskl (%rsi), %ecx # sched: [6:0.50]
|
||||
; ZNVER1-NEXT: blsmskl %edi, %eax # sched: [2:0.25]
|
||||
; ZNVER1-NEXT: addl %ecx, %eax # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = load i32, i32 *%a1
|
||||
%2 = sub i32 %1, 1
|
||||
%3 = sub i32 %a0, 1
|
||||
@ -329,10 +329,10 @@ define i64 @test_blsmsk_i64(i64 %a0, i64 *%a1) {
|
||||
;
|
||||
; ZNVER1-LABEL: test_blsmsk_i64:
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: blsmskq (%rsi), %rcx
|
||||
; ZNVER1-NEXT: blsmskq %rdi, %rax
|
||||
; ZNVER1-NEXT: blsmskq (%rsi), %rcx # sched: [6:0.50]
|
||||
; ZNVER1-NEXT: blsmskq %rdi, %rax # sched: [2:0.25]
|
||||
; ZNVER1-NEXT: addq %rcx, %rax # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = load i64, i64 *%a1
|
||||
%2 = sub i64 %1, 1
|
||||
%3 = sub i64 %a0, 1
|
||||
@ -366,10 +366,10 @@ define i32 @test_blsr_i32(i32 %a0, i32 *%a1) {
|
||||
;
|
||||
; ZNVER1-LABEL: test_blsr_i32:
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: blsrl (%rsi), %ecx
|
||||
; ZNVER1-NEXT: blsrl %edi, %eax
|
||||
; ZNVER1-NEXT: blsrl (%rsi), %ecx # sched: [6:0.50]
|
||||
; ZNVER1-NEXT: blsrl %edi, %eax # sched: [2:0.25]
|
||||
; ZNVER1-NEXT: addl %ecx, %eax # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = load i32, i32 *%a1
|
||||
%2 = sub i32 %1, 1
|
||||
%3 = sub i32 %a0, 1
|
||||
@ -403,10 +403,10 @@ define i64 @test_blsr_i64(i64 %a0, i64 *%a1) {
|
||||
;
|
||||
; ZNVER1-LABEL: test_blsr_i64:
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: blsrq (%rsi), %rcx
|
||||
; ZNVER1-NEXT: blsrq %rdi, %rax
|
||||
; ZNVER1-NEXT: blsrq (%rsi), %rcx # sched: [6:0.50]
|
||||
; ZNVER1-NEXT: blsrq %rdi, %rax # sched: [2:0.25]
|
||||
; ZNVER1-NEXT: addq %rcx, %rax # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = load i64, i64 *%a1
|
||||
%2 = sub i64 %1, 1
|
||||
%3 = sub i64 %a0, 1
|
||||
@ -443,11 +443,11 @@ define i16 @test_cttz_i16(i16 zeroext %a0, i16 *%a1) {
|
||||
;
|
||||
; ZNVER1-LABEL: test_cttz_i16:
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: tzcntw (%rsi), %cx
|
||||
; ZNVER1-NEXT: tzcntw %di, %ax
|
||||
; ZNVER1-NEXT: tzcntw (%rsi), %cx # sched: [6:0.50]
|
||||
; ZNVER1-NEXT: tzcntw %di, %ax # sched: [2:0.25]
|
||||
; ZNVER1-NEXT: orl %ecx, %eax # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = load i16, i16 *%a1
|
||||
%2 = tail call i16 @llvm.cttz.i16( i16 %1, i1 false )
|
||||
%3 = tail call i16 @llvm.cttz.i16( i16 %a0, i1 false )
|
||||
@ -480,10 +480,10 @@ define i32 @test_cttz_i32(i32 %a0, i32 *%a1) {
|
||||
;
|
||||
; ZNVER1-LABEL: test_cttz_i32:
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: tzcntl (%rsi), %ecx
|
||||
; ZNVER1-NEXT: tzcntl %edi, %eax
|
||||
; ZNVER1-NEXT: tzcntl (%rsi), %ecx # sched: [6:0.50]
|
||||
; ZNVER1-NEXT: tzcntl %edi, %eax # sched: [2:0.25]
|
||||
; ZNVER1-NEXT: orl %ecx, %eax # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = load i32, i32 *%a1
|
||||
%2 = tail call i32 @llvm.cttz.i32( i32 %1, i1 false )
|
||||
%3 = tail call i32 @llvm.cttz.i32( i32 %a0, i1 false )
|
||||
@ -516,10 +516,10 @@ define i64 @test_cttz_i64(i64 %a0, i64 *%a1) {
|
||||
;
|
||||
; ZNVER1-LABEL: test_cttz_i64:
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: tzcntq (%rsi), %rcx
|
||||
; ZNVER1-NEXT: tzcntq %rdi, %rax
|
||||
; ZNVER1-NEXT: tzcntq (%rsi), %rcx # sched: [6:0.50]
|
||||
; ZNVER1-NEXT: tzcntq %rdi, %rax # sched: [2:0.25]
|
||||
; ZNVER1-NEXT: orq %rcx, %rax # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = load i64, i64 *%a1
|
||||
%2 = tail call i64 @llvm.cttz.i64( i64 %1, i1 false )
|
||||
%3 = tail call i64 @llvm.cttz.i64( i64 %a0, i1 false )
|
||||
|
@ -22,10 +22,10 @@ define i32 @test_bzhi_i32(i32 %a0, i32 %a1, i32 *%a2) {
|
||||
;
|
||||
; ZNVER1-LABEL: test_bzhi_i32:
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: bzhil %edi, (%rdx), %ecx
|
||||
; ZNVER1-NEXT: bzhil %edi, %esi, %eax
|
||||
; ZNVER1-NEXT: bzhil %edi, (%rdx), %ecx # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: bzhil %edi, %esi, %eax # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: addl %ecx, %eax # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = load i32, i32 *%a2
|
||||
%2 = tail call i32 @llvm.x86.bmi.bzhi.32(i32 %1, i32 %a0)
|
||||
%3 = tail call i32 @llvm.x86.bmi.bzhi.32(i32 %a1, i32 %a0)
|
||||
@ -51,10 +51,10 @@ define i64 @test_bzhi_i64(i64 %a0, i64 %a1, i64 *%a2) {
|
||||
;
|
||||
; ZNVER1-LABEL: test_bzhi_i64:
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: bzhiq %rdi, (%rdx), %rcx
|
||||
; ZNVER1-NEXT: bzhiq %rdi, %rsi, %rax
|
||||
; ZNVER1-NEXT: bzhiq %rdi, (%rdx), %rcx # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: bzhiq %rdi, %rsi, %rax # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: addq %rcx, %rax # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = load i64, i64 *%a2
|
||||
%2 = tail call i64 @llvm.x86.bmi.bzhi.64(i64 %1, i64 %a0)
|
||||
%3 = tail call i64 @llvm.x86.bmi.bzhi.64(i64 %a1, i64 %a0)
|
||||
@ -88,10 +88,10 @@ define i64 @test_mulx_i64(i64 %a0, i64 %a1, i64 *%a2) {
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: movq %rdx, %rax # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: movq %rdi, %rdx # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: mulxq %rsi, %rsi, %rcx # sched: [4:2.00]
|
||||
; ZNVER1-NEXT: mulxq (%rax), %rdx, %rax # sched: [8:2.00]
|
||||
; ZNVER1-NEXT: mulxq %rsi, %rsi, %rcx # sched: [3:1.00]
|
||||
; ZNVER1-NEXT: mulxq (%rax), %rdx, %rax # sched: [8:1.00]
|
||||
; ZNVER1-NEXT: orq %rcx, %rax # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = load i64, i64 *%a2
|
||||
%2 = zext i64 %a0 to i128
|
||||
%3 = zext i64 %a1 to i128
|
||||
@ -123,10 +123,10 @@ define i32 @test_pdep_i32(i32 %a0, i32 %a1, i32 *%a2) {
|
||||
;
|
||||
; ZNVER1-LABEL: test_pdep_i32:
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: pdepl (%rdx), %edi, %ecx
|
||||
; ZNVER1-NEXT: pdepl %esi, %edi, %eax
|
||||
; ZNVER1-NEXT: pdepl (%rdx), %edi, %ecx # sched: [100:?]
|
||||
; ZNVER1-NEXT: pdepl %esi, %edi, %eax # sched: [100:?]
|
||||
; ZNVER1-NEXT: addl %ecx, %eax # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = load i32, i32 *%a2
|
||||
%2 = tail call i32 @llvm.x86.bmi.pdep.32(i32 %a0, i32 %1)
|
||||
%3 = tail call i32 @llvm.x86.bmi.pdep.32(i32 %a0, i32 %a1)
|
||||
@ -152,10 +152,10 @@ define i64 @test_pdep_i64(i64 %a0, i64 %a1, i64 *%a2) {
|
||||
;
|
||||
; ZNVER1-LABEL: test_pdep_i64:
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: pdepq (%rdx), %rdi, %rcx
|
||||
; ZNVER1-NEXT: pdepq %rsi, %rdi, %rax
|
||||
; ZNVER1-NEXT: pdepq (%rdx), %rdi, %rcx # sched: [100:?]
|
||||
; ZNVER1-NEXT: pdepq %rsi, %rdi, %rax # sched: [100:?]
|
||||
; ZNVER1-NEXT: addq %rcx, %rax # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = load i64, i64 *%a2
|
||||
%2 = tail call i64 @llvm.x86.bmi.pdep.64(i64 %a0, i64 %1)
|
||||
%3 = tail call i64 @llvm.x86.bmi.pdep.64(i64 %a0, i64 %a1)
|
||||
@ -181,10 +181,10 @@ define i32 @test_pext_i32(i32 %a0, i32 %a1, i32 *%a2) {
|
||||
;
|
||||
; ZNVER1-LABEL: test_pext_i32:
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: pextl (%rdx), %edi, %ecx
|
||||
; ZNVER1-NEXT: pextl %esi, %edi, %eax
|
||||
; ZNVER1-NEXT: pextl (%rdx), %edi, %ecx # sched: [100:?]
|
||||
; ZNVER1-NEXT: pextl %esi, %edi, %eax # sched: [100:?]
|
||||
; ZNVER1-NEXT: addl %ecx, %eax # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = load i32, i32 *%a2
|
||||
%2 = tail call i32 @llvm.x86.bmi.pext.32(i32 %a0, i32 %1)
|
||||
%3 = tail call i32 @llvm.x86.bmi.pext.32(i32 %a0, i32 %a1)
|
||||
@ -210,10 +210,10 @@ define i64 @test_pext_i64(i64 %a0, i64 %a1, i64 *%a2) {
|
||||
;
|
||||
; ZNVER1-LABEL: test_pext_i64:
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: pextq (%rdx), %rdi, %rcx
|
||||
; ZNVER1-NEXT: pextq %rsi, %rdi, %rax
|
||||
; ZNVER1-NEXT: pextq (%rdx), %rdi, %rcx # sched: [100:?]
|
||||
; ZNVER1-NEXT: pextq %rsi, %rdi, %rax # sched: [100:?]
|
||||
; ZNVER1-NEXT: addq %rcx, %rax # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = load i64, i64 *%a2
|
||||
%2 = tail call i64 @llvm.x86.bmi.pext.64(i64 %a0, i64 %1)
|
||||
%3 = tail call i64 @llvm.x86.bmi.pext.64(i64 %a0, i64 %a1)
|
||||
@ -242,7 +242,7 @@ define i32 @test_rorx_i32(i32 %a0, i32 %a1, i32 *%a2) {
|
||||
; ZNVER1-NEXT: rorxl $5, (%rdx), %eax # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: rorxl $5, %edi, %ecx # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: addl %ecx, %eax # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = load i32, i32 *%a2
|
||||
%2 = lshr i32 %a0, 5
|
||||
%3 = shl i32 %a0, 27
|
||||
@ -274,7 +274,7 @@ define i64 @test_rorx_i64(i64 %a0, i64 %a1, i64 *%a2) {
|
||||
; ZNVER1-NEXT: rorxq $5, (%rdx), %rax # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: rorxq $5, %rdi, %rcx # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: addq %rcx, %rax # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = load i64, i64 *%a2
|
||||
%2 = lshr i64 %a0, 5
|
||||
%3 = shl i64 %a0, 59
|
||||
@ -306,7 +306,7 @@ define i32 @test_sarx_i32(i32 %a0, i32 %a1, i32 *%a2) {
|
||||
; ZNVER1-NEXT: sarxl %esi, (%rdx), %eax # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: sarxl %esi, %edi, %ecx # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: addl %ecx, %eax # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = load i32, i32 *%a2
|
||||
%2 = ashr i32 %a0, %a1
|
||||
%3 = ashr i32 %1, %a1
|
||||
@ -334,7 +334,7 @@ define i64 @test_sarx_i64(i64 %a0, i64 %a1, i64 *%a2) {
|
||||
; ZNVER1-NEXT: sarxq %rsi, (%rdx), %rax # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: sarxq %rsi, %rdi, %rcx # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: addq %rcx, %rax # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = load i64, i64 *%a2
|
||||
%2 = ashr i64 %a0, %a1
|
||||
%3 = ashr i64 %1, %a1
|
||||
@ -362,7 +362,7 @@ define i32 @test_shlx_i32(i32 %a0, i32 %a1, i32 *%a2) {
|
||||
; ZNVER1-NEXT: shlxl %esi, (%rdx), %eax # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: shlxl %esi, %edi, %ecx # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: addl %ecx, %eax # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = load i32, i32 *%a2
|
||||
%2 = shl i32 %a0, %a1
|
||||
%3 = shl i32 %1, %a1
|
||||
@ -390,7 +390,7 @@ define i64 @test_shlx_i64(i64 %a0, i64 %a1, i64 *%a2) {
|
||||
; ZNVER1-NEXT: shlxq %rsi, (%rdx), %rax # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: shlxq %rsi, %rdi, %rcx # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: addq %rcx, %rax # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = load i64, i64 *%a2
|
||||
%2 = shl i64 %a0, %a1
|
||||
%3 = shl i64 %1, %a1
|
||||
@ -418,7 +418,7 @@ define i32 @test_shrx_i32(i32 %a0, i32 %a1, i32 *%a2) {
|
||||
; ZNVER1-NEXT: shrxl %esi, (%rdx), %eax # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: shrxl %esi, %edi, %ecx # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: addl %ecx, %eax # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = load i32, i32 *%a2
|
||||
%2 = lshr i32 %a0, %a1
|
||||
%3 = lshr i32 %1, %a1
|
||||
@ -446,7 +446,7 @@ define i64 @test_shrx_i64(i64 %a0, i64 %a1, i64 *%a2) {
|
||||
; ZNVER1-NEXT: shrxq %rsi, (%rdx), %rax # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: shrxq %rsi, %rdi, %rcx # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: addq %rcx, %rax # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = load i64, i64 *%a2
|
||||
%2 = lshr i64 %a0, %a1
|
||||
%3 = lshr i64 %1, %a1
|
||||
|
@ -44,10 +44,10 @@ define <4 x float> @test_vcvtph2ps_128(<8 x i16> %a0, <8 x i16> *%a1) {
|
||||
;
|
||||
; ZNVER1-LABEL: test_vcvtph2ps_128:
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vcvtph2ps (%rdi), %xmm1 # sched: [12:1.00]
|
||||
; ZNVER1-NEXT: vcvtph2ps %xmm0, %xmm0 # sched: [5:1.00]
|
||||
; ZNVER1-NEXT: vcvtph2ps (%rdi), %xmm1 # sched: [100:?]
|
||||
; ZNVER1-NEXT: vcvtph2ps %xmm0, %xmm0 # sched: [100:?]
|
||||
; ZNVER1-NEXT: vaddps %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = load <8 x i16>, <8 x i16> *%a1
|
||||
%2 = call <4 x float> @llvm.x86.vcvtph2ps.128(<8 x i16> %1)
|
||||
%3 = call <4 x float> @llvm.x86.vcvtph2ps.128(<8 x i16> %a0)
|
||||
@ -94,10 +94,10 @@ define <8 x float> @test_vcvtph2ps_256(<8 x i16> %a0, <8 x i16> *%a1) {
|
||||
;
|
||||
; ZNVER1-LABEL: test_vcvtph2ps_256:
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vcvtph2ps (%rdi), %ymm1 # sched: [12:1.00]
|
||||
; ZNVER1-NEXT: vcvtph2ps %xmm0, %ymm0 # sched: [5:1.00]
|
||||
; ZNVER1-NEXT: vcvtph2ps (%rdi), %ymm1 # sched: [100:?]
|
||||
; ZNVER1-NEXT: vcvtph2ps %xmm0, %ymm0 # sched: [100:?]
|
||||
; ZNVER1-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = load <8 x i16>, <8 x i16> *%a1
|
||||
%2 = call <8 x float> @llvm.x86.vcvtph2ps.256(<8 x i16> %1)
|
||||
%3 = call <8 x float> @llvm.x86.vcvtph2ps.256(<8 x i16> %a0)
|
||||
@ -139,9 +139,9 @@ define <8 x i16> @test_vcvtps2ph_128(<4 x float> %a0, <4 x float> %a1, <4 x i16>
|
||||
;
|
||||
; ZNVER1-LABEL: test_vcvtps2ph_128:
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vcvtps2ph $0, %xmm0, %xmm0 # sched: [5:1.00]
|
||||
; ZNVER1-NEXT: vcvtps2ph $0, %xmm1, (%rdi) # sched: [12:1.00]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: vcvtps2ph $0, %xmm0, %xmm0 # sched: [100:?]
|
||||
; ZNVER1-NEXT: vcvtps2ph $0, %xmm1, (%rdi) # sched: [100:?]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = call <8 x i16> @llvm.x86.vcvtps2ph.128(<4 x float> %a0, i32 0)
|
||||
%2 = call <8 x i16> @llvm.x86.vcvtps2ph.128(<4 x float> %a1, i32 0)
|
||||
%3 = shufflevector <8 x i16> %2, <8 x i16> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
||||
@ -187,10 +187,10 @@ define <8 x i16> @test_vcvtps2ph_256(<8 x float> %a0, <8 x float> %a1, <8 x i16>
|
||||
;
|
||||
; ZNVER1-LABEL: test_vcvtps2ph_256:
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vcvtps2ph $0, %ymm0, %xmm0 # sched: [5:1.00]
|
||||
; ZNVER1-NEXT: vcvtps2ph $0, %ymm1, (%rdi) # sched: [12:1.00]
|
||||
; ZNVER1-NEXT: vzeroupper
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: vcvtps2ph $0, %ymm0, %xmm0 # sched: [100:?]
|
||||
; ZNVER1-NEXT: vcvtps2ph $0, %ymm1, (%rdi) # sched: [100:?]
|
||||
; ZNVER1-NEXT: vzeroupper # sched: [100:?]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = call <8 x i16> @llvm.x86.vcvtps2ph.256(<8 x float> %a0, i32 0)
|
||||
%2 = call <8 x i16> @llvm.x86.vcvtps2ph.256(<8 x float> %a1, i32 0)
|
||||
store <8 x i16> %2, <8 x i16> *%a2
|
||||
|
@ -57,7 +57,7 @@ define i32 @test_lea_offset(i32) {
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
|
||||
; ZNVER1-NEXT: leal -24(%rdi), %eax # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%2 = add nsw i32 %0, -24
|
||||
ret i32 %2
|
||||
}
|
||||
@ -109,7 +109,7 @@ define i32 @test_lea_offset_big(i32) {
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
|
||||
; ZNVER1-NEXT: leal 1024(%rdi), %eax # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%2 = add nsw i32 %0, 1024
|
||||
ret i32 %2
|
||||
}
|
||||
@ -169,7 +169,7 @@ define i32 @test_lea_add(i32, i32) {
|
||||
; ZNVER1-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
|
||||
; ZNVER1-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
|
||||
; ZNVER1-NEXT: leal (%rdi,%rsi), %eax # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%3 = add nsw i32 %1, %0
|
||||
ret i32 %3
|
||||
}
|
||||
@ -231,7 +231,7 @@ define i32 @test_lea_add_offset(i32, i32) {
|
||||
; ZNVER1-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
|
||||
; ZNVER1-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
|
||||
; ZNVER1-NEXT: leal 16(%rdi,%rsi), %eax # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%3 = add i32 %0, 16
|
||||
%4 = add i32 %3, %1
|
||||
ret i32 %4
|
||||
@ -297,7 +297,7 @@ define i32 @test_lea_add_offset_big(i32, i32) {
|
||||
; ZNVER1-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
|
||||
; ZNVER1-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
|
||||
; ZNVER1-NEXT: leal -4096(%rdi,%rsi), %eax # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%3 = add i32 %0, -4096
|
||||
%4 = add i32 %3, %1
|
||||
ret i32 %4
|
||||
@ -350,7 +350,7 @@ define i32 @test_lea_mul(i32) {
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
|
||||
; ZNVER1-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%2 = mul nsw i32 %0, 3
|
||||
ret i32 %2
|
||||
}
|
||||
@ -405,7 +405,7 @@ define i32 @test_lea_mul_offset(i32) {
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
|
||||
; ZNVER1-NEXT: leal -32(%rdi,%rdi,2), %eax # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%2 = mul nsw i32 %0, 3
|
||||
%3 = add nsw i32 %2, -32
|
||||
ret i32 %3
|
||||
@ -464,7 +464,7 @@ define i32 @test_lea_mul_offset_big(i32) {
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
|
||||
; ZNVER1-NEXT: leal 10000(%rdi,%rdi,8), %eax # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%2 = mul nsw i32 %0, 9
|
||||
%3 = add nsw i32 %2, 10000
|
||||
ret i32 %3
|
||||
@ -524,7 +524,7 @@ define i32 @test_lea_add_scale(i32, i32) {
|
||||
; ZNVER1-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
|
||||
; ZNVER1-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
|
||||
; ZNVER1-NEXT: leal (%rdi,%rsi,2), %eax # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%3 = shl i32 %1, 1
|
||||
%4 = add nsw i32 %3, %0
|
||||
ret i32 %4
|
||||
@ -587,7 +587,7 @@ define i32 @test_lea_add_scale_offset(i32, i32) {
|
||||
; ZNVER1-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
|
||||
; ZNVER1-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
|
||||
; ZNVER1-NEXT: leal 96(%rdi,%rsi,4), %eax # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%3 = shl i32 %1, 2
|
||||
%4 = add i32 %0, 96
|
||||
%5 = add i32 %4, %3
|
||||
@ -654,7 +654,7 @@ define i32 @test_lea_add_scale_offset_big(i32, i32) {
|
||||
; ZNVER1-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
|
||||
; ZNVER1-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
|
||||
; ZNVER1-NEXT: leal -1200(%rdi,%rsi,8), %eax # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%3 = shl i32 %1, 3
|
||||
%4 = add i32 %0, -1200
|
||||
%5 = add i32 %4, %3
|
||||
|
@ -50,7 +50,7 @@ define i64 @test_lea_offset(i64) {
|
||||
; ZNVER1-LABEL: test_lea_offset:
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: leaq -24(%rdi), %rax # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%2 = add nsw i64 %0, -24
|
||||
ret i64 %2
|
||||
}
|
||||
@ -95,7 +95,7 @@ define i64 @test_lea_offset_big(i64) {
|
||||
; ZNVER1-LABEL: test_lea_offset_big:
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: leaq 1024(%rdi), %rax # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%2 = add nsw i64 %0, 1024
|
||||
ret i64 %2
|
||||
}
|
||||
@ -141,7 +141,7 @@ define i64 @test_lea_add(i64, i64) {
|
||||
; ZNVER1-LABEL: test_lea_add:
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: leaq (%rdi,%rsi), %rax # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%3 = add nsw i64 %1, %0
|
||||
ret i64 %3
|
||||
}
|
||||
@ -189,7 +189,7 @@ define i64 @test_lea_add_offset(i64, i64) {
|
||||
; ZNVER1-LABEL: test_lea_add_offset:
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: leaq 16(%rdi,%rsi), %rax # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%3 = add i64 %0, 16
|
||||
%4 = add i64 %3, %1
|
||||
ret i64 %4
|
||||
@ -241,7 +241,7 @@ define i64 @test_lea_add_offset_big(i64, i64) {
|
||||
; ZNVER1-LABEL: test_lea_add_offset_big:
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: leaq -4096(%rdi,%rsi), %rax # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%3 = add i64 %0, -4096
|
||||
%4 = add i64 %3, %1
|
||||
ret i64 %4
|
||||
@ -287,7 +287,7 @@ define i64 @test_lea_mul(i64) {
|
||||
; ZNVER1-LABEL: test_lea_mul:
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: leaq (%rdi,%rdi,2), %rax # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%2 = mul nsw i64 %0, 3
|
||||
ret i64 %2
|
||||
}
|
||||
@ -335,7 +335,7 @@ define i64 @test_lea_mul_offset(i64) {
|
||||
; ZNVER1-LABEL: test_lea_mul_offset:
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: leaq -32(%rdi,%rdi,2), %rax # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%2 = mul nsw i64 %0, 3
|
||||
%3 = add nsw i64 %2, -32
|
||||
ret i64 %3
|
||||
@ -387,7 +387,7 @@ define i64 @test_lea_mul_offset_big(i64) {
|
||||
; ZNVER1-LABEL: test_lea_mul_offset_big:
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: leaq 10000(%rdi,%rdi,8), %rax # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%2 = mul nsw i64 %0, 9
|
||||
%3 = add nsw i64 %2, 10000
|
||||
ret i64 %3
|
||||
@ -433,7 +433,7 @@ define i64 @test_lea_add_scale(i64, i64) {
|
||||
; ZNVER1-LABEL: test_lea_add_scale:
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: leaq (%rdi,%rsi,2), %rax # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%3 = shl i64 %1, 1
|
||||
%4 = add nsw i64 %3, %0
|
||||
ret i64 %4
|
||||
@ -482,7 +482,7 @@ define i64 @test_lea_add_scale_offset(i64, i64) {
|
||||
; ZNVER1-LABEL: test_lea_add_scale_offset:
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: leaq 96(%rdi,%rsi,4), %rax # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%3 = shl i64 %1, 2
|
||||
%4 = add i64 %0, 96
|
||||
%5 = add i64 %4, %3
|
||||
@ -535,7 +535,7 @@ define i64 @test_lea_add_scale_offset_big(i64, i64) {
|
||||
; ZNVER1-LABEL: test_lea_add_scale_offset_big:
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: leaq -1200(%rdi,%rsi,8), %rax # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%3 = shl i64 %1, 3
|
||||
%4 = add i64 %0, -1200
|
||||
%5 = add i64 %4, %3
|
||||
|
@ -33,11 +33,11 @@ define i16 @test_ctlz_i16(i16 zeroext %a0, i16 *%a1) {
|
||||
;
|
||||
; ZNVER1-LABEL: test_ctlz_i16:
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: lzcntw (%rsi), %cx
|
||||
; ZNVER1-NEXT: lzcntw %di, %ax
|
||||
; ZNVER1-NEXT: lzcntw (%rsi), %cx # sched: [6:0.50]
|
||||
; ZNVER1-NEXT: lzcntw %di, %ax # sched: [2:0.25]
|
||||
; ZNVER1-NEXT: orl %ecx, %eax # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = load i16, i16 *%a1
|
||||
%2 = tail call i16 @llvm.ctlz.i16( i16 %1, i1 false )
|
||||
%3 = tail call i16 @llvm.ctlz.i16( i16 %a0, i1 false )
|
||||
@ -70,10 +70,10 @@ define i32 @test_ctlz_i32(i32 %a0, i32 *%a1) {
|
||||
;
|
||||
; ZNVER1-LABEL: test_ctlz_i32:
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: lzcntl (%rsi), %ecx
|
||||
; ZNVER1-NEXT: lzcntl %edi, %eax
|
||||
; ZNVER1-NEXT: lzcntl (%rsi), %ecx # sched: [6:0.50]
|
||||
; ZNVER1-NEXT: lzcntl %edi, %eax # sched: [2:0.25]
|
||||
; ZNVER1-NEXT: orl %ecx, %eax # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = load i32, i32 *%a1
|
||||
%2 = tail call i32 @llvm.ctlz.i32( i32 %1, i1 false )
|
||||
%3 = tail call i32 @llvm.ctlz.i32( i32 %a0, i1 false )
|
||||
@ -106,10 +106,10 @@ define i64 @test_ctlz_i64(i64 %a0, i64 *%a1) {
|
||||
;
|
||||
; ZNVER1-LABEL: test_ctlz_i64:
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: lzcntq (%rsi), %rcx
|
||||
; ZNVER1-NEXT: lzcntq %rdi, %rax
|
||||
; ZNVER1-NEXT: lzcntq (%rsi), %rcx # sched: [6:0.50]
|
||||
; ZNVER1-NEXT: lzcntq %rdi, %rax # sched: [2:0.25]
|
||||
; ZNVER1-NEXT: orq %rcx, %rax # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = load i64, i64 *%a1
|
||||
%2 = tail call i64 @llvm.ctlz.i64( i64 %1, i1 false )
|
||||
%3 = tail call i64 @llvm.ctlz.i64( i64 %a0, i1 false )
|
||||
|
@ -46,8 +46,8 @@ define i16 @test_ctlz_i16(i16 *%a0, i16 %a1, i16 *%a2) {
|
||||
; ZNVER1-LABEL: test_ctlz_i16:
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: movbew (%rdi), %ax # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: movbew %si, (%rdx) # sched: [1:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: movbew %si, (%rdx) # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = load i16, i16 *%a0
|
||||
%2 = tail call i16 @llvm.bswap.i16( i16 %1 )
|
||||
%3 = tail call i16 @llvm.bswap.i16( i16 %a1 )
|
||||
@ -94,8 +94,8 @@ define i32 @test_ctlz_i32(i32 *%a0, i32 %a1, i32 *%a2) {
|
||||
; ZNVER1-LABEL: test_ctlz_i32:
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: movbel (%rdi), %eax # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: movbel %esi, (%rdx) # sched: [1:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: movbel %esi, (%rdx) # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = load i32, i32 *%a0
|
||||
%2 = tail call i32 @llvm.bswap.i32( i32 %1 )
|
||||
%3 = tail call i32 @llvm.bswap.i32( i32 %a1 )
|
||||
@ -142,8 +142,8 @@ define i64 @test_ctlz_i64(i64 *%a0, i64 %a1, i64 *%a2) {
|
||||
; ZNVER1-LABEL: test_ctlz_i64:
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: movbeq (%rdi), %rax # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: movbeq %rsi, (%rdx) # sched: [1:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: movbeq %rsi, (%rdx) # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = load i64, i64 *%a0
|
||||
%2 = tail call i64 @llvm.bswap.i64( i64 %1 )
|
||||
%3 = tail call i64 @llvm.bswap.i64( i64 %a1 )
|
||||
|
@ -57,7 +57,7 @@ define i16 @test_ctpop_i16(i16 zeroext %a0, i16 *%a1) {
|
||||
; ZNVER1-NEXT: popcntw %di, %ax # sched: [3:1.00]
|
||||
; ZNVER1-NEXT: orl %ecx, %eax # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = load i16, i16 *%a1
|
||||
%2 = tail call i16 @llvm.ctpop.i16( i16 %1 )
|
||||
%3 = tail call i16 @llvm.ctpop.i16( i16 %a0 )
|
||||
@ -107,7 +107,7 @@ define i32 @test_ctpop_i32(i32 %a0, i32 *%a1) {
|
||||
; ZNVER1-NEXT: popcntl (%rsi), %ecx # sched: [10:1.00]
|
||||
; ZNVER1-NEXT: popcntl %edi, %eax # sched: [3:1.00]
|
||||
; ZNVER1-NEXT: orl %ecx, %eax # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = load i32, i32 *%a1
|
||||
%2 = tail call i32 @llvm.ctpop.i32( i32 %1 )
|
||||
%3 = tail call i32 @llvm.ctpop.i32( i32 %a0 )
|
||||
@ -157,7 +157,7 @@ define i64 @test_ctpop_i64(i64 %a0, i64 *%a1) {
|
||||
; ZNVER1-NEXT: popcntq (%rsi), %rcx # sched: [10:1.00]
|
||||
; ZNVER1-NEXT: popcntq %rdi, %rax # sched: [3:1.00]
|
||||
; ZNVER1-NEXT: orq %rcx, %rax # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = load i64, i64 *%a1
|
||||
%2 = tail call i64 @llvm.ctpop.i64( i64 %1 )
|
||||
%3 = tail call i64 @llvm.ctpop.i64( i64 %a0 )
|
||||
|
@ -29,9 +29,9 @@ define <4 x i32> @test_sha1msg1(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
|
||||
;
|
||||
; ZNVER1-LABEL: test_sha1msg1:
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: sha1msg1 %xmm1, %xmm0
|
||||
; ZNVER1-NEXT: sha1msg1 (%rdi), %xmm0
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: sha1msg1 %xmm1, %xmm0 # sched: [2:1.00]
|
||||
; ZNVER1-NEXT: sha1msg1 (%rdi), %xmm0 # sched: [9:1.00]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = load <4 x i32>, <4 x i32>* %a2
|
||||
%2 = tail call <4 x i32> @llvm.x86.sha1msg1(<4 x i32> %a0, <4 x i32> %a1)
|
||||
%3 = tail call <4 x i32> @llvm.x86.sha1msg1(<4 x i32> %2, <4 x i32> %1)
|
||||
@ -60,9 +60,9 @@ define <4 x i32> @test_sha1msg2(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
|
||||
;
|
||||
; ZNVER1-LABEL: test_sha1msg2:
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: sha1msg2 %xmm1, %xmm0
|
||||
; ZNVER1-NEXT: sha1msg2 (%rdi), %xmm0
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: sha1msg2 %xmm1, %xmm0 # sched: [1:0.50]
|
||||
; ZNVER1-NEXT: sha1msg2 (%rdi), %xmm0 # sched: [8:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = load <4 x i32>, <4 x i32>* %a2
|
||||
%2 = tail call <4 x i32> @llvm.x86.sha1msg2(<4 x i32> %a0, <4 x i32> %a1)
|
||||
%3 = tail call <4 x i32> @llvm.x86.sha1msg2(<4 x i32> %2, <4 x i32> %1)
|
||||
@ -91,9 +91,9 @@ define <4 x i32> @test_sha1nexte(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
|
||||
;
|
||||
; ZNVER1-LABEL: test_sha1nexte:
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: sha1nexte %xmm1, %xmm0
|
||||
; ZNVER1-NEXT: sha1nexte (%rdi), %xmm0
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: sha1nexte %xmm1, %xmm0 # sched: [1:1.00]
|
||||
; ZNVER1-NEXT: sha1nexte (%rdi), %xmm0 # sched: [8:1.00]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = load <4 x i32>, <4 x i32>* %a2
|
||||
%2 = tail call <4 x i32> @llvm.x86.sha1nexte(<4 x i32> %a0, <4 x i32> %a1)
|
||||
%3 = tail call <4 x i32> @llvm.x86.sha1nexte(<4 x i32> %2, <4 x i32> %1)
|
||||
@ -122,9 +122,9 @@ define <4 x i32> @test_sha1rnds4(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
|
||||
;
|
||||
; ZNVER1-LABEL: test_sha1rnds4:
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: sha1rnds4 $3, %xmm1, %xmm0
|
||||
; ZNVER1-NEXT: sha1rnds4 $3, (%rdi), %xmm0
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: sha1rnds4 $3, %xmm1, %xmm0 # sched: [6:1.00]
|
||||
; ZNVER1-NEXT: sha1rnds4 $3, (%rdi), %xmm0 # sched: [13:1.00]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = load <4 x i32>, <4 x i32>* %a2
|
||||
%2 = tail call <4 x i32> @llvm.x86.sha1rnds4(<4 x i32> %a0, <4 x i32> %a1, i8 3)
|
||||
%3 = tail call <4 x i32> @llvm.x86.sha1rnds4(<4 x i32> %2, <4 x i32> %1, i8 3)
|
||||
@ -157,9 +157,9 @@ define <4 x i32> @test_sha256msg1(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2)
|
||||
;
|
||||
; ZNVER1-LABEL: test_sha256msg1:
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: sha256msg1 %xmm1, %xmm0
|
||||
; ZNVER1-NEXT: sha256msg1 (%rdi), %xmm0
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: sha256msg1 %xmm1, %xmm0 # sched: [2:1.00]
|
||||
; ZNVER1-NEXT: sha256msg1 (%rdi), %xmm0 # sched: [9:1.00]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = load <4 x i32>, <4 x i32>* %a2
|
||||
%2 = tail call <4 x i32> @llvm.x86.sha256msg1(<4 x i32> %a0, <4 x i32> %a1)
|
||||
%3 = tail call <4 x i32> @llvm.x86.sha256msg1(<4 x i32> %2, <4 x i32> %1)
|
||||
@ -188,9 +188,9 @@ define <4 x i32> @test_sha256msg2(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2)
|
||||
;
|
||||
; ZNVER1-LABEL: test_sha256msg2:
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: sha256msg2 %xmm1, %xmm0
|
||||
; ZNVER1-NEXT: sha256msg2 (%rdi), %xmm0
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: sha256msg2 %xmm1, %xmm0 # sched: [100:?]
|
||||
; ZNVER1-NEXT: sha256msg2 (%rdi), %xmm0 # sched: [100:?]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = load <4 x i32>, <4 x i32>* %a2
|
||||
%2 = tail call <4 x i32> @llvm.x86.sha256msg2(<4 x i32> %a0, <4 x i32> %a1)
|
||||
%3 = tail call <4 x i32> @llvm.x86.sha256msg2(<4 x i32> %2, <4 x i32> %1)
|
||||
@ -230,10 +230,10 @@ define <4 x i32> @test_sha256rnds2(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2,
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vmovaps %xmm0, %xmm3 # sched: [1:0.50]
|
||||
; ZNVER1-NEXT: vmovaps %xmm2, %xmm0 # sched: [1:0.50]
|
||||
; ZNVER1-NEXT: sha256rnds2 %xmm0, %xmm1, %xmm3
|
||||
; ZNVER1-NEXT: sha256rnds2 %xmm0, (%rdi), %xmm3
|
||||
; ZNVER1-NEXT: sha256rnds2 %xmm0, %xmm1, %xmm3 # sched: [4:1.00]
|
||||
; ZNVER1-NEXT: sha256rnds2 %xmm0, (%rdi), %xmm3 # sched: [11:1.00]
|
||||
; ZNVER1-NEXT: vmovaps %xmm3, %xmm0 # sched: [1:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = load <4 x i32>, <4 x i32>* %a3
|
||||
%2 = tail call <4 x i32> @llvm.x86.sha256rnds2(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2)
|
||||
%3 = tail call <4 x i32> @llvm.x86.sha256rnds2(<4 x i32> %2, <4 x i32> %1, <4 x i32> %a2)
|
||||
|
@ -56,7 +56,7 @@ define <4 x float> @test_addps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
|
||||
; ZNVER1-NEXT: vaddps (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = fadd <4 x float> %a0, %a1
|
||||
%2 = load <4 x float>, <4 x float> *%a2, align 16
|
||||
%3 = fadd <4 x float> %1, %2
|
||||
@ -110,7 +110,7 @@ define float @test_addss(float %a0, float %a1, float *%a2) {
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vaddss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
|
||||
; ZNVER1-NEXT: vaddss (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = fadd float %a0, %a1
|
||||
%2 = load float, float *%a2, align 4
|
||||
%3 = fadd float %1, %2
|
||||
@ -168,7 +168,7 @@ define <4 x float> @test_andps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vandps %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: vandps (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = bitcast <4 x float> %a0 to <4 x i32>
|
||||
%2 = bitcast <4 x float> %a1 to <4 x i32>
|
||||
%3 = and <4 x i32> %1, %2
|
||||
@ -230,7 +230,7 @@ define <4 x float> @test_andnotps(<4 x float> %a0, <4 x float> %a1, <4 x float>
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vandnps %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: vandnps (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = bitcast <4 x float> %a0 to <4 x i32>
|
||||
%2 = bitcast <4 x float> %a1 to <4 x i32>
|
||||
%3 = xor <4 x i32> %1, <i32 -1, i32 -1, i32 -1, i32 -1>
|
||||
@ -298,7 +298,7 @@ define <4 x float> @test_cmpps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a
|
||||
; ZNVER1-NEXT: vcmpeqps %xmm1, %xmm0, %xmm1 # sched: [3:1.00]
|
||||
; ZNVER1-NEXT: vcmpeqps (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
|
||||
; ZNVER1-NEXT: vorps %xmm0, %xmm1, %xmm0 # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = fcmp oeq <4 x float> %a0, %a1
|
||||
%2 = load <4 x float>, <4 x float> *%a2, align 16
|
||||
%3 = fcmp oeq <4 x float> %a0, %2
|
||||
@ -355,7 +355,7 @@ define float @test_cmpss(float %a0, float %a1, float *%a2) {
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vcmpeqss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
|
||||
; ZNVER1-NEXT: vcmpeqss (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = insertelement <4 x float> undef, float %a0, i32 0
|
||||
%2 = insertelement <4 x float> undef, float %a1, i32 0
|
||||
%3 = call <4 x float> @llvm.x86.sse.cmp.ss(<4 x float> %1, <4 x float> %2, i8 0)
|
||||
@ -478,7 +478,7 @@ define i32 @test_comiss(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a2) {
|
||||
; ZNVER1-NEXT: andb %al, %dl # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: orb %cl, %dl # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: movzbl %dl, %eax # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = call i32 @llvm.x86.sse.comieq.ss(<4 x float> %a0, <4 x float> %a1)
|
||||
%2 = load <4 x float>, <4 x float> *%a2, align 4
|
||||
%3 = call i32 @llvm.x86.sse.comieq.ss(<4 x float> %a0, <4 x float> %2)
|
||||
@ -542,7 +542,7 @@ define float @test_cvtsi2ss(i32 %a0, i32 *%a1) {
|
||||
; ZNVER1-NEXT: vcvtsi2ssl %edi, %xmm0, %xmm0 # sched: [5:1.00]
|
||||
; ZNVER1-NEXT: vcvtsi2ssl (%rsi), %xmm1, %xmm1 # sched: [12:1.00]
|
||||
; ZNVER1-NEXT: vaddss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = sitofp i32 %a0 to float
|
||||
%2 = load i32, i32 *%a1, align 4
|
||||
%3 = sitofp i32 %2 to float
|
||||
@ -605,7 +605,7 @@ define float @test_cvtsi2ssq(i64 %a0, i64 *%a1) {
|
||||
; ZNVER1-NEXT: vcvtsi2ssq %rdi, %xmm0, %xmm0 # sched: [5:1.00]
|
||||
; ZNVER1-NEXT: vcvtsi2ssq (%rsi), %xmm1, %xmm1 # sched: [12:1.00]
|
||||
; ZNVER1-NEXT: vaddss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = sitofp i64 %a0 to float
|
||||
%2 = load i64, i64 *%a1, align 8
|
||||
%3 = sitofp i64 %2 to float
|
||||
@ -668,7 +668,7 @@ define i32 @test_cvtss2si(float %a0, float *%a1) {
|
||||
; ZNVER1-NEXT: vcvtss2si (%rdi), %eax # sched: [12:1.00]
|
||||
; ZNVER1-NEXT: vcvtss2si %xmm0, %ecx # sched: [5:1.00]
|
||||
; ZNVER1-NEXT: addl %ecx, %eax # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = insertelement <4 x float> undef, float %a0, i32 0
|
||||
%2 = call i32 @llvm.x86.sse.cvtss2si(<4 x float> %1)
|
||||
%3 = load float, float *%a1, align 4
|
||||
@ -734,7 +734,7 @@ define i64 @test_cvtss2siq(float %a0, float *%a1) {
|
||||
; ZNVER1-NEXT: vcvtss2si (%rdi), %rax # sched: [12:1.00]
|
||||
; ZNVER1-NEXT: vcvtss2si %xmm0, %rcx # sched: [5:1.00]
|
||||
; ZNVER1-NEXT: addq %rcx, %rax # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = insertelement <4 x float> undef, float %a0, i32 0
|
||||
%2 = call i64 @llvm.x86.sse.cvtss2si64(<4 x float> %1)
|
||||
%3 = load float, float *%a1, align 4
|
||||
@ -800,7 +800,7 @@ define i32 @test_cvttss2si(float %a0, float *%a1) {
|
||||
; ZNVER1-NEXT: vcvttss2si (%rdi), %eax # sched: [12:1.00]
|
||||
; ZNVER1-NEXT: vcvttss2si %xmm0, %ecx # sched: [5:1.00]
|
||||
; ZNVER1-NEXT: addl %ecx, %eax # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = fptosi float %a0 to i32
|
||||
%2 = load float, float *%a1, align 4
|
||||
%3 = fptosi float %2 to i32
|
||||
@ -863,7 +863,7 @@ define i64 @test_cvttss2siq(float %a0, float *%a1) {
|
||||
; ZNVER1-NEXT: vcvttss2si (%rdi), %rax # sched: [12:1.00]
|
||||
; ZNVER1-NEXT: vcvttss2si %xmm0, %rcx # sched: [5:1.00]
|
||||
; ZNVER1-NEXT: addq %rcx, %rax # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = fptosi float %a0 to i64
|
||||
%2 = load float, float *%a1, align 4
|
||||
%3 = fptosi float %2 to i64
|
||||
@ -918,7 +918,7 @@ define <4 x float> @test_divps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vdivps %xmm1, %xmm0, %xmm0 # sched: [15:1.00]
|
||||
; ZNVER1-NEXT: vdivps (%rdi), %xmm0, %xmm0 # sched: [22:1.00]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = fdiv <4 x float> %a0, %a1
|
||||
%2 = load <4 x float>, <4 x float> *%a2, align 16
|
||||
%3 = fdiv <4 x float> %1, %2
|
||||
@ -972,7 +972,7 @@ define float @test_divss(float %a0, float %a1, float *%a2) {
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vdivss %xmm1, %xmm0, %xmm0 # sched: [15:1.00]
|
||||
; ZNVER1-NEXT: vdivss (%rdi), %xmm0, %xmm0 # sched: [22:1.00]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = fdiv float %a0, %a1
|
||||
%2 = load float, float *%a2, align 4
|
||||
%3 = fdiv float %1, %2
|
||||
@ -1025,8 +1025,8 @@ define void @test_ldmxcsr(i32 %a0) {
|
||||
; ZNVER1-LABEL: test_ldmxcsr:
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: movl %edi, -{{[0-9]+}}(%rsp) # sched: [1:0.50]
|
||||
; ZNVER1-NEXT: vldmxcsr -{{[0-9]+}}(%rsp) # sched: [8:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: vldmxcsr -{{[0-9]+}}(%rsp) # sched: [100:?]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = alloca i32, align 4
|
||||
%2 = bitcast i32* %1 to i8*
|
||||
store i32 %a0, i32* %1
|
||||
@ -1082,7 +1082,7 @@ define <4 x float> @test_maxps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vmaxps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
|
||||
; ZNVER1-NEXT: vmaxps (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = call <4 x float> @llvm.x86.sse.max.ps(<4 x float> %a0, <4 x float> %a1)
|
||||
%2 = load <4 x float>, <4 x float> *%a2, align 16
|
||||
%3 = call <4 x float> @llvm.x86.sse.max.ps(<4 x float> %1, <4 x float> %2)
|
||||
@ -1137,7 +1137,7 @@ define <4 x float> @test_maxss(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vmaxss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
|
||||
; ZNVER1-NEXT: vmaxss (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = call <4 x float> @llvm.x86.sse.max.ss(<4 x float> %a0, <4 x float> %a1)
|
||||
%2 = load <4 x float>, <4 x float> *%a2, align 16
|
||||
%3 = call <4 x float> @llvm.x86.sse.max.ss(<4 x float> %1, <4 x float> %2)
|
||||
@ -1192,7 +1192,7 @@ define <4 x float> @test_minps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vminps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
|
||||
; ZNVER1-NEXT: vminps (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = call <4 x float> @llvm.x86.sse.min.ps(<4 x float> %a0, <4 x float> %a1)
|
||||
%2 = load <4 x float>, <4 x float> *%a2, align 16
|
||||
%3 = call <4 x float> @llvm.x86.sse.min.ps(<4 x float> %1, <4 x float> %2)
|
||||
@ -1247,7 +1247,7 @@ define <4 x float> @test_minss(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vminss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
|
||||
; ZNVER1-NEXT: vminss (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = call <4 x float> @llvm.x86.sse.min.ss(<4 x float> %a0, <4 x float> %a1)
|
||||
%2 = load <4 x float>, <4 x float> *%a2, align 16
|
||||
%3 = call <4 x float> @llvm.x86.sse.min.ss(<4 x float> %1, <4 x float> %2)
|
||||
@ -1310,7 +1310,7 @@ define void @test_movaps(<4 x float> *%a0, <4 x float> *%a1) {
|
||||
; ZNVER1-NEXT: vmovaps (%rdi), %xmm0 # sched: [8:0.50]
|
||||
; ZNVER1-NEXT: vaddps %xmm0, %xmm0, %xmm0 # sched: [3:1.00]
|
||||
; ZNVER1-NEXT: vmovaps %xmm0, (%rsi) # sched: [1:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = load <4 x float>, <4 x float> *%a0, align 16
|
||||
%2 = fadd <4 x float> %1, %1
|
||||
store <4 x float> %2, <4 x float> *%a1, align 16
|
||||
@ -1364,7 +1364,7 @@ define <4 x float> @test_movhlps(<4 x float> %a0, <4 x float> %a1) {
|
||||
; ZNVER1-LABEL: test_movhlps:
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] sched: [1:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = shufflevector <4 x float> %a0, <4 x float> %a1, <4 x i32> <i32 6, i32 7, i32 2, i32 3>
|
||||
ret <4 x float> %1
|
||||
}
|
||||
@ -1428,7 +1428,7 @@ define void @test_movhps(<4 x float> %a0, <4 x float> %a1, x86_mmx *%a2) {
|
||||
; ZNVER1-NEXT: vmovhpd {{.*#+}} xmm1 = xmm1[0],mem[0] sched: [8:0.50]
|
||||
; ZNVER1-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
|
||||
; ZNVER1-NEXT: vpextrq $1, %xmm0, (%rdi) # sched: [8:1.00]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = bitcast x86_mmx* %a2 to <2 x float>*
|
||||
%2 = load <2 x float>, <2 x float> *%1, align 8
|
||||
%3 = shufflevector <2 x float> %2, <2 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
||||
@ -1488,7 +1488,7 @@ define <4 x float> @test_movlhps(<4 x float> %a0, <4 x float> %a1) {
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0] sched: [1:0.50]
|
||||
; ZNVER1-NEXT: vaddps %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = shufflevector <4 x float> %a0, <4 x float> %a1, <4 x i32> <i32 0, i32 1, i32 4, i32 5>
|
||||
%2 = fadd <4 x float> %a1, %1
|
||||
ret <4 x float> %2
|
||||
@ -1549,7 +1549,7 @@ define void @test_movlps(<4 x float> %a0, <4 x float> %a1, x86_mmx *%a2) {
|
||||
; ZNVER1-NEXT: vmovlpd {{.*#+}} xmm1 = mem[0],xmm1[1] sched: [8:0.50]
|
||||
; ZNVER1-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
|
||||
; ZNVER1-NEXT: vmovlps %xmm0, (%rdi) # sched: [1:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = bitcast x86_mmx* %a2 to <2 x float>*
|
||||
%2 = load <2 x float>, <2 x float> *%1, align 8
|
||||
%3 = shufflevector <2 x float> %2, <2 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
||||
@ -1600,8 +1600,8 @@ define i32 @test_movmskps(<4 x float> %a0) {
|
||||
;
|
||||
; ZNVER1-LABEL: test_movmskps:
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vmovmskps %xmm0, %eax # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: vmovmskps %xmm0, %eax # sched: [1:1.00]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = call i32 @llvm.x86.sse.movmsk.ps(<4 x float> %a0)
|
||||
ret i32 %1
|
||||
}
|
||||
@ -1652,7 +1652,7 @@ define void @test_movntps(<4 x float> %a0, <4 x float> *%a1) {
|
||||
; ZNVER1-LABEL: test_movntps:
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vmovntps %xmm0, (%rdi) # sched: [1:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
store <4 x float> %a0, <4 x float> *%a1, align 16, !nontemporal !0
|
||||
ret void
|
||||
}
|
||||
@ -1712,7 +1712,7 @@ define void @test_movss_mem(float* %a0, float* %a1) {
|
||||
; ZNVER1-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero sched: [8:0.50]
|
||||
; ZNVER1-NEXT: vaddss %xmm0, %xmm0, %xmm0 # sched: [3:1.00]
|
||||
; ZNVER1-NEXT: vmovss %xmm0, (%rsi) # sched: [1:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = load float, float* %a0, align 1
|
||||
%2 = fadd float %1, %1
|
||||
store float %2, float *%a1, align 1
|
||||
@ -1764,7 +1764,7 @@ define <4 x float> @test_movss_reg(<4 x float> %a0, <4 x float> %a1) {
|
||||
; ZNVER1-LABEL: test_movss_reg:
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3] sched: [1:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = shufflevector <4 x float> %a0, <4 x float> %a1, <4 x i32> <i32 4, i32 1, i32 2, i32 3>
|
||||
ret <4 x float> %1
|
||||
}
|
||||
@ -1824,7 +1824,7 @@ define void @test_movups(<4 x float> *%a0, <4 x float> *%a1) {
|
||||
; ZNVER1-NEXT: vmovups (%rdi), %xmm0 # sched: [8:0.50]
|
||||
; ZNVER1-NEXT: vaddps %xmm0, %xmm0, %xmm0 # sched: [3:1.00]
|
||||
; ZNVER1-NEXT: vmovups %xmm0, (%rsi) # sched: [1:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = load <4 x float>, <4 x float> *%a0, align 1
|
||||
%2 = fadd <4 x float> %1, %1
|
||||
store <4 x float> %2, <4 x float> *%a1, align 1
|
||||
@ -1876,9 +1876,9 @@ define <4 x float> @test_mulps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a
|
||||
;
|
||||
; ZNVER1-LABEL: test_mulps:
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vmulps %xmm1, %xmm0, %xmm0 # sched: [5:1.00]
|
||||
; ZNVER1-NEXT: vmulps (%rdi), %xmm0, %xmm0 # sched: [12:1.00]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: vmulps %xmm1, %xmm0, %xmm0 # sched: [3:0.50]
|
||||
; ZNVER1-NEXT: vmulps (%rdi), %xmm0, %xmm0 # sched: [10:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = fmul <4 x float> %a0, %a1
|
||||
%2 = load <4 x float>, <4 x float> *%a2, align 16
|
||||
%3 = fmul <4 x float> %1, %2
|
||||
@ -1930,9 +1930,9 @@ define float @test_mulss(float %a0, float %a1, float *%a2) {
|
||||
;
|
||||
; ZNVER1-LABEL: test_mulss:
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vmulss %xmm1, %xmm0, %xmm0 # sched: [5:1.00]
|
||||
; ZNVER1-NEXT: vmulss (%rdi), %xmm0, %xmm0 # sched: [12:1.00]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: vmulss %xmm1, %xmm0, %xmm0 # sched: [3:0.50]
|
||||
; ZNVER1-NEXT: vmulss (%rdi), %xmm0, %xmm0 # sched: [10:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = fmul float %a0, %a1
|
||||
%2 = load float, float *%a2, align 4
|
||||
%3 = fmul float %1, %2
|
||||
@ -1990,7 +1990,7 @@ define <4 x float> @test_orps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a2
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vorps %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: vorps (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = bitcast <4 x float> %a0 to <4 x i32>
|
||||
%2 = bitcast <4 x float> %a1 to <4 x i32>
|
||||
%3 = or <4 x i32> %1, %2
|
||||
@ -2046,7 +2046,7 @@ define void @test_prefetchnta(i8* %a0) {
|
||||
; ZNVER1-LABEL: test_prefetchnta:
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: prefetchnta (%rdi) # sched: [8:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
call void @llvm.prefetch(i8* %a0, i32 0, i32 0, i32 1)
|
||||
ret void
|
||||
}
|
||||
@ -2109,7 +2109,7 @@ define <4 x float> @test_rcpps(<4 x float> %a0, <4 x float> *%a1) {
|
||||
; ZNVER1-NEXT: vrcpps (%rdi), %xmm1 # sched: [12:0.50]
|
||||
; ZNVER1-NEXT: vrcpps %xmm0, %xmm0 # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = call <4 x float> @llvm.x86.sse.rcp.ps(<4 x float> %a0)
|
||||
%2 = load <4 x float>, <4 x float> *%a1, align 16
|
||||
%3 = call <4 x float> @llvm.x86.sse.rcp.ps(<4 x float> %2)
|
||||
@ -2183,7 +2183,7 @@ define <4 x float> @test_rcpss(float %a0, float *%a1) {
|
||||
; ZNVER1-NEXT: vrcpss %xmm0, %xmm0, %xmm0 # sched: [12:0.50]
|
||||
; ZNVER1-NEXT: vrcpss %xmm1, %xmm1, %xmm1 # sched: [12:0.50]
|
||||
; ZNVER1-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = insertelement <4 x float> undef, float %a0, i32 0
|
||||
%2 = call <4 x float> @llvm.x86.sse.rcp.ss(<4 x float> %1)
|
||||
%3 = load float, float *%a1, align 4
|
||||
@ -2251,7 +2251,7 @@ define <4 x float> @test_rsqrtps(<4 x float> %a0, <4 x float> *%a1) {
|
||||
; ZNVER1-NEXT: vrsqrtps (%rdi), %xmm1 # sched: [12:0.50]
|
||||
; ZNVER1-NEXT: vrsqrtps %xmm0, %xmm0 # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = call <4 x float> @llvm.x86.sse.rsqrt.ps(<4 x float> %a0)
|
||||
%2 = load <4 x float>, <4 x float> *%a1, align 16
|
||||
%3 = call <4 x float> @llvm.x86.sse.rsqrt.ps(<4 x float> %2)
|
||||
@ -2322,10 +2322,10 @@ define <4 x float> @test_rsqrtss(float %a0, float *%a1) {
|
||||
; ZNVER1-LABEL: test_rsqrtss:
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero sched: [8:0.50]
|
||||
; ZNVER1-NEXT: vrsqrtss %xmm0, %xmm0, %xmm0 # sched: [12:0.50]
|
||||
; ZNVER1-NEXT: vrsqrtss %xmm1, %xmm1, %xmm1 # sched: [12:0.50]
|
||||
; ZNVER1-NEXT: vrsqrtss %xmm0, %xmm0, %xmm0 # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: vrsqrtss %xmm1, %xmm1, %xmm1 # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = insertelement <4 x float> undef, float %a0, i32 0
|
||||
%2 = call <4 x float> @llvm.x86.sse.rsqrt.ss(<4 x float> %1)
|
||||
%3 = load float, float *%a1, align 4
|
||||
@ -2381,7 +2381,7 @@ define void @test_sfence() {
|
||||
; ZNVER1-LABEL: test_sfence:
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: sfence # sched: [1:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
call void @llvm.x86.sse.sfence()
|
||||
ret void
|
||||
}
|
||||
@ -2438,7 +2438,7 @@ define <4 x float> @test_shufps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[0,0] sched: [1:0.50]
|
||||
; ZNVER1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,3],mem[0,0] sched: [8:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = shufflevector <4 x float> %a0, <4 x float> %a1, <4 x i32> <i32 0, i32 0, i32 4, i32 4>
|
||||
%2 = load <4 x float>, <4 x float> *%a2, align 16
|
||||
%3 = shufflevector <4 x float> %1, <4 x float> %2, <4 x i32> <i32 0, i32 3, i32 4, i32 4>
|
||||
@ -2501,7 +2501,7 @@ define <4 x float> @test_sqrtps(<4 x float> %a0, <4 x float> *%a1) {
|
||||
; ZNVER1-NEXT: vsqrtps (%rdi), %xmm1 # sched: [27:1.00]
|
||||
; ZNVER1-NEXT: vsqrtps %xmm0, %xmm0 # sched: [20:1.00]
|
||||
; ZNVER1-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = call <4 x float> @llvm.x86.sse.sqrt.ps(<4 x float> %a0)
|
||||
%2 = load <4 x float>, <4 x float> *%a1, align 16
|
||||
%3 = call <4 x float> @llvm.x86.sse.sqrt.ps(<4 x float> %2)
|
||||
@ -2575,7 +2575,7 @@ define <4 x float> @test_sqrtss(<4 x float> %a0, <4 x float> *%a1) {
|
||||
; ZNVER1-NEXT: vsqrtss %xmm0, %xmm0, %xmm0 # sched: [27:1.00]
|
||||
; ZNVER1-NEXT: vsqrtss %xmm1, %xmm1, %xmm1 # sched: [27:1.00]
|
||||
; ZNVER1-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = call <4 x float> @llvm.x86.sse.sqrt.ss(<4 x float> %a0)
|
||||
%2 = load <4 x float>, <4 x float> *%a1, align 16
|
||||
%3 = call <4 x float> @llvm.x86.sse.sqrt.ss(<4 x float> %2)
|
||||
@ -2629,9 +2629,9 @@ define i32 @test_stmxcsr() {
|
||||
;
|
||||
; ZNVER1-LABEL: test_stmxcsr:
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vstmxcsr -{{[0-9]+}}(%rsp) # sched: [1:0.50]
|
||||
; ZNVER1-NEXT: vstmxcsr -{{[0-9]+}}(%rsp) # sched: [100:?]
|
||||
; ZNVER1-NEXT: movl -{{[0-9]+}}(%rsp), %eax # sched: [8:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = alloca i32, align 4
|
||||
%2 = bitcast i32* %1 to i8*
|
||||
call void @llvm.x86.sse.stmxcsr(i8* %2)
|
||||
@ -2687,7 +2687,7 @@ define <4 x float> @test_subps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vsubps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
|
||||
; ZNVER1-NEXT: vsubps (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = fsub <4 x float> %a0, %a1
|
||||
%2 = load <4 x float>, <4 x float> *%a2, align 16
|
||||
%3 = fsub <4 x float> %1, %2
|
||||
@ -2741,7 +2741,7 @@ define float @test_subss(float %a0, float %a1, float *%a2) {
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vsubss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
|
||||
; ZNVER1-NEXT: vsubss (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = fsub float %a0, %a1
|
||||
%2 = load float, float *%a2, align 4
|
||||
%3 = fsub float %1, %2
|
||||
@ -2859,7 +2859,7 @@ define i32 @test_ucomiss(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a2) {
|
||||
; ZNVER1-NEXT: andb %al, %dl # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: orb %cl, %dl # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: movzbl %dl, %eax # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = call i32 @llvm.x86.sse.ucomieq.ss(<4 x float> %a0, <4 x float> %a1)
|
||||
%2 = load <4 x float>, <4 x float> *%a2, align 4
|
||||
%3 = call i32 @llvm.x86.sse.ucomieq.ss(<4 x float> %a0, <4 x float> %2)
|
||||
@ -2919,7 +2919,7 @@ define <4 x float> @test_unpckhps(<4 x float> %a0, <4 x float> %a1, <4 x float>
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vunpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3] sched: [1:0.50]
|
||||
; ZNVER1-NEXT: vunpckhps {{.*#+}} xmm0 = xmm0[2],mem[2],xmm0[3],mem[3] sched: [8:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = shufflevector <4 x float> %a0, <4 x float> %a1, <4 x i32> <i32 2, i32 6, i32 3, i32 7>
|
||||
%2 = load <4 x float>, <4 x float> *%a2, align 16
|
||||
%3 = shufflevector <4 x float> %1, <4 x float> %2, <4 x i32> <i32 2, i32 6, i32 3, i32 7>
|
||||
@ -2977,7 +2977,7 @@ define <4 x float> @test_unpcklps(<4 x float> %a0, <4 x float> %a1, <4 x float>
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vunpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] sched: [1:0.50]
|
||||
; ZNVER1-NEXT: vunpcklps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1] sched: [8:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = shufflevector <4 x float> %a0, <4 x float> %a1, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
|
||||
%2 = load <4 x float>, <4 x float> *%a2, align 16
|
||||
%3 = shufflevector <4 x float> %1, <4 x float> %2, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
|
||||
@ -3035,7 +3035,7 @@ define <4 x float> @test_xorps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vxorps %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: vxorps (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = bitcast <4 x float> %a0 to <4 x i32>
|
||||
%2 = bitcast <4 x float> %a1 to <4 x i32>
|
||||
%3 = xor <4 x i32> %1, %2
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -56,7 +56,7 @@ define <2 x double> @test_addsubpd(<2 x double> %a0, <2 x double> %a1, <2 x doub
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vaddsubpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
|
||||
; ZNVER1-NEXT: vaddsubpd (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = call <2 x double> @llvm.x86.sse3.addsub.pd(<2 x double> %a0, <2 x double> %a1)
|
||||
%2 = load <2 x double>, <2 x double> *%a2, align 16
|
||||
%3 = call <2 x double> @llvm.x86.sse3.addsub.pd(<2 x double> %1, <2 x double> %2)
|
||||
@ -111,7 +111,7 @@ define <4 x float> @test_addsubps(<4 x float> %a0, <4 x float> %a1, <4 x float>
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vaddsubps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
|
||||
; ZNVER1-NEXT: vaddsubps (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = call <4 x float> @llvm.x86.sse3.addsub.ps(<4 x float> %a0, <4 x float> %a1)
|
||||
%2 = load <4 x float>, <4 x float> *%a2, align 16
|
||||
%3 = call <4 x float> @llvm.x86.sse3.addsub.ps(<4 x float> %1, <4 x float> %2)
|
||||
@ -164,9 +164,9 @@ define <2 x double> @test_haddpd(<2 x double> %a0, <2 x double> %a1, <2 x double
|
||||
;
|
||||
; ZNVER1-LABEL: test_haddpd:
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vhaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
|
||||
; ZNVER1-NEXT: vhaddpd (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: vhaddpd %xmm1, %xmm0, %xmm0 # sched: [100:?]
|
||||
; ZNVER1-NEXT: vhaddpd (%rdi), %xmm0, %xmm0 # sched: [100:?]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = call <2 x double> @llvm.x86.sse3.hadd.pd(<2 x double> %a0, <2 x double> %a1)
|
||||
%2 = load <2 x double>, <2 x double> *%a2, align 16
|
||||
%3 = call <2 x double> @llvm.x86.sse3.hadd.pd(<2 x double> %1, <2 x double> %2)
|
||||
@ -219,9 +219,9 @@ define <4 x float> @test_haddps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%
|
||||
;
|
||||
; ZNVER1-LABEL: test_haddps:
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vhaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
|
||||
; ZNVER1-NEXT: vhaddps (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: vhaddps %xmm1, %xmm0, %xmm0 # sched: [100:?]
|
||||
; ZNVER1-NEXT: vhaddps (%rdi), %xmm0, %xmm0 # sched: [100:?]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = call <4 x float> @llvm.x86.sse3.hadd.ps(<4 x float> %a0, <4 x float> %a1)
|
||||
%2 = load <4 x float>, <4 x float> *%a2, align 16
|
||||
%3 = call <4 x float> @llvm.x86.sse3.hadd.ps(<4 x float> %1, <4 x float> %2)
|
||||
@ -274,9 +274,9 @@ define <2 x double> @test_hsubpd(<2 x double> %a0, <2 x double> %a1, <2 x double
|
||||
;
|
||||
; ZNVER1-LABEL: test_hsubpd:
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vhsubpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
|
||||
; ZNVER1-NEXT: vhsubpd (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: vhsubpd %xmm1, %xmm0, %xmm0 # sched: [100:?]
|
||||
; ZNVER1-NEXT: vhsubpd (%rdi), %xmm0, %xmm0 # sched: [100:?]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = call <2 x double> @llvm.x86.sse3.hsub.pd(<2 x double> %a0, <2 x double> %a1)
|
||||
%2 = load <2 x double>, <2 x double> *%a2, align 16
|
||||
%3 = call <2 x double> @llvm.x86.sse3.hsub.pd(<2 x double> %1, <2 x double> %2)
|
||||
@ -329,9 +329,9 @@ define <4 x float> @test_hsubps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%
|
||||
;
|
||||
; ZNVER1-LABEL: test_hsubps:
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vhsubps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
|
||||
; ZNVER1-NEXT: vhsubps (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: vhsubps %xmm1, %xmm0, %xmm0 # sched: [100:?]
|
||||
; ZNVER1-NEXT: vhsubps (%rdi), %xmm0, %xmm0 # sched: [100:?]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = call <4 x float> @llvm.x86.sse3.hsub.ps(<4 x float> %a0, <4 x float> %a1)
|
||||
%2 = load <4 x float>, <4 x float> *%a2, align 16
|
||||
%3 = call <4 x float> @llvm.x86.sse3.hsub.ps(<4 x float> %1, <4 x float> %2)
|
||||
@ -380,7 +380,7 @@ define <16 x i8> @test_lddqu(i8* %a0) {
|
||||
; ZNVER1-LABEL: test_lddqu:
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vlddqu (%rdi), %xmm0 # sched: [8:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = call <16 x i8> @llvm.x86.sse3.ldu.dq(i8* %a0)
|
||||
ret <16 x i8> %1
|
||||
}
|
||||
@ -441,7 +441,7 @@ define void @test_monitor(i8* %a0, i32 %a1, i32 %a2) {
|
||||
; ZNVER1-NEXT: leaq (%rdi), %rax # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: movl %esi, %ecx # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: monitor # sched: [100:?]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
tail call void @llvm.x86.sse3.monitor(i8* %a0, i32 %a1, i32 %a2)
|
||||
ret void
|
||||
}
|
||||
@ -503,7 +503,7 @@ define <2 x double> @test_movddup(<2 x double> %a0, <2 x double> *%a1) {
|
||||
; ZNVER1-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0] sched: [8:0.50]
|
||||
; ZNVER1-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0] sched: [1:0.50]
|
||||
; ZNVER1-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = shufflevector <2 x double> %a0, <2 x double> undef, <2 x i32> zeroinitializer
|
||||
%2 = load <2 x double>, <2 x double> *%a1, align 16
|
||||
%3 = shufflevector <2 x double> %2, <2 x double> undef, <2 x i32> zeroinitializer
|
||||
@ -567,7 +567,7 @@ define <4 x float> @test_movshdup(<4 x float> %a0, <4 x float> *%a1) {
|
||||
; ZNVER1-NEXT: vmovshdup {{.*#+}} xmm1 = mem[1,1,3,3] sched: [8:0.50]
|
||||
; ZNVER1-NEXT: vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3] sched: [1:0.50]
|
||||
; ZNVER1-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = shufflevector <4 x float> %a0, <4 x float> undef, <4 x i32> <i32 1, i32 1, i32 3, i32 3>
|
||||
%2 = load <4 x float>, <4 x float> *%a1, align 16
|
||||
%3 = shufflevector <4 x float> %2, <4 x float> undef, <4 x i32> <i32 1, i32 1, i32 3, i32 3>
|
||||
@ -631,7 +631,7 @@ define <4 x float> @test_movsldup(<4 x float> %a0, <4 x float> *%a1) {
|
||||
; ZNVER1-NEXT: vmovsldup {{.*#+}} xmm1 = mem[0,0,2,2] sched: [8:0.50]
|
||||
; ZNVER1-NEXT: vmovsldup {{.*#+}} xmm0 = xmm0[0,0,2,2] sched: [1:0.50]
|
||||
; ZNVER1-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = shufflevector <4 x float> %a0, <4 x float> undef, <4 x i32> <i32 0, i32 0, i32 2, i32 2>
|
||||
%2 = load <4 x float>, <4 x float> *%a1, align 16
|
||||
%3 = shufflevector <4 x float> %2, <4 x float> undef, <4 x i32> <i32 0, i32 0, i32 2, i32 2>
|
||||
@ -694,7 +694,7 @@ define void @test_mwait(i32 %a0, i32 %a1) {
|
||||
; ZNVER1-NEXT: movl %edi, %ecx # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: movl %esi, %eax # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: mwait # sched: [100:?]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
tail call void @llvm.x86.sse3.mwait(i32 %a0, i32 %a1)
|
||||
ret void
|
||||
}
|
||||
|
@ -56,7 +56,7 @@ define <2 x double> @test_blendpd(<2 x double> %a0, <2 x double> %a1, <2 x doubl
|
||||
; ZNVER1-NEXT: vblendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1] sched: [1:0.50]
|
||||
; ZNVER1-NEXT: vaddpd %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
|
||||
; ZNVER1-NEXT: vblendpd {{.*#+}} xmm0 = xmm0[0],mem[1] sched: [8:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = shufflevector <2 x double> %a0, <2 x double> %a1, <2 x i32> <i32 0, i32 3>
|
||||
%2 = load <2 x double>, <2 x double> *%a2, align 16
|
||||
%3 = fadd <2 x double> %a1, %1
|
||||
@ -105,7 +105,7 @@ define <4 x float> @test_blendps(<4 x float> %a0, <4 x float> %a1, <4 x float> *
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2],xmm0[3] sched: [1:0.50]
|
||||
; ZNVER1-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],mem[1],xmm0[2,3] sched: [8:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = shufflevector <4 x float> %a0, <4 x float> %a1, <4 x i32> <i32 0, i32 5, i32 6, i32 3>
|
||||
%2 = load <4 x float>, <4 x float> *%a2, align 16
|
||||
%3 = shufflevector <4 x float> %1, <4 x float> %2, <4 x i32> <i32 0, i32 5, i32 2, i32 3>
|
||||
@ -159,7 +159,7 @@ define <2 x double> @test_blendvpd(<2 x double> %a0, <2 x double> %a1, <2 x doub
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vblendvpd %xmm2, %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
|
||||
; ZNVER1-NEXT: vblendvpd %xmm2, (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = call <2 x double> @llvm.x86.sse41.blendvpd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2)
|
||||
%2 = load <2 x double>, <2 x double> *%a3, align 16
|
||||
%3 = call <2 x double> @llvm.x86.sse41.blendvpd(<2 x double> %1, <2 x double> %2, <2 x double> %a2)
|
||||
@ -214,7 +214,7 @@ define <4 x float> @test_blendvps(<4 x float> %a0, <4 x float> %a1, <4 x float>
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vblendvps %xmm2, %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
|
||||
; ZNVER1-NEXT: vblendvps %xmm2, (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = call <4 x float> @llvm.x86.sse41.blendvps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2)
|
||||
%2 = load <4 x float>, <4 x float> *%a3
|
||||
%3 = call <4 x float> @llvm.x86.sse41.blendvps(<4 x float> %1, <4 x float> %2, <4 x float> %a2)
|
||||
@ -261,9 +261,9 @@ define <2 x double> @test_dppd(<2 x double> %a0, <2 x double> %a1, <2 x double>
|
||||
;
|
||||
; ZNVER1-LABEL: test_dppd:
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vdppd $7, %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
|
||||
; ZNVER1-NEXT: vdppd $7, (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: vdppd $7, %xmm1, %xmm0, %xmm0 # sched: [100:?]
|
||||
; ZNVER1-NEXT: vdppd $7, (%rdi), %xmm0, %xmm0 # sched: [100:?]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = call <2 x double> @llvm.x86.sse41.dppd(<2 x double> %a0, <2 x double> %a1, i8 7)
|
||||
%2 = load <2 x double>, <2 x double> *%a2, align 16
|
||||
%3 = call <2 x double> @llvm.x86.sse41.dppd(<2 x double> %1, <2 x double> %2, i8 7)
|
||||
@ -310,9 +310,9 @@ define <4 x float> @test_dpps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a2
|
||||
;
|
||||
; ZNVER1-LABEL: test_dpps:
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vdpps $7, %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
|
||||
; ZNVER1-NEXT: vdpps $7, (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: vdpps $7, %xmm1, %xmm0, %xmm0 # sched: [100:?]
|
||||
; ZNVER1-NEXT: vdpps $7, (%rdi), %xmm0, %xmm0 # sched: [100:?]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = call <4 x float> @llvm.x86.sse41.dpps(<4 x float> %a0, <4 x float> %a1, i8 7)
|
||||
%2 = load <4 x float>, <4 x float> *%a2, align 16
|
||||
%3 = call <4 x float> @llvm.x86.sse41.dpps(<4 x float> %1, <4 x float> %2, i8 7)
|
||||
@ -361,7 +361,7 @@ define <4 x float> @test_insertps(<4 x float> %a0, <4 x float> %a1, float *%a2)
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vinsertps {{.*#+}} xmm0 = zero,xmm1[0],xmm0[2,3] sched: [1:0.50]
|
||||
; ZNVER1-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],mem[0] sched: [8:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a0, <4 x float> %a1, i8 17)
|
||||
%2 = load float, float *%a2
|
||||
%3 = insertelement <4 x float> %1, float %2, i32 3
|
||||
@ -403,7 +403,7 @@ define <2 x i64> @test_movntdqa(i8* %a0) {
|
||||
; ZNVER1-LABEL: test_movntdqa:
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vmovntdqa (%rdi), %xmm0 # sched: [8:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = call <2 x i64> @llvm.x86.sse41.movntdqa(i8* %a0)
|
||||
ret <2 x i64> %1
|
||||
}
|
||||
@ -450,7 +450,7 @@ define <8 x i16> @test_mpsadbw(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vmpsadbw $7, %xmm1, %xmm0, %xmm0 # sched: [100:?]
|
||||
; ZNVER1-NEXT: vmpsadbw $7, (%rdi), %xmm0, %xmm0 # sched: [100:?]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = call <8 x i16> @llvm.x86.sse41.mpsadbw(<16 x i8> %a0, <16 x i8> %a1, i8 7)
|
||||
%2 = bitcast <8 x i16> %1 to <16 x i8>
|
||||
%3 = load <16 x i8>, <16 x i8> *%a2, align 16
|
||||
@ -500,7 +500,7 @@ define <8 x i16> @test_packusdw(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: vpackusdw (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = call <8 x i16> @llvm.x86.sse41.packusdw(<4 x i32> %a0, <4 x i32> %a1)
|
||||
%2 = bitcast <8 x i16> %1 to <4 x i32>
|
||||
%3 = load <4 x i32>, <4 x i32> *%a2, align 16
|
||||
@ -556,7 +556,7 @@ define <16 x i8> @test_pblendvb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> %a2, <16
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
|
||||
; ZNVER1-NEXT: vpblendvb %xmm2, (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = call <16 x i8> @llvm.x86.sse41.pblendvb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> %a2)
|
||||
%2 = load <16 x i8>, <16 x i8> *%a3, align 16
|
||||
%3 = call <16 x i8> @llvm.x86.sse41.pblendvb(<16 x i8> %1, <16 x i8> %2, <16 x i8> %a2)
|
||||
@ -603,9 +603,9 @@ define <8 x i16> @test_pblendw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
|
||||
;
|
||||
; ZNVER1-LABEL: test_pblendw:
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7] sched: [1:0.50]
|
||||
; ZNVER1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7] sched: [1:0.33]
|
||||
; ZNVER1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],mem[2,3],xmm0[4,5,6],mem[7] sched: [8:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = shufflevector <8 x i16> %a0, <8 x i16> %a1, <8 x i32> <i32 0, i32 9, i32 2, i32 11, i32 4, i32 13, i32 6, i32 15>
|
||||
%2 = load <8 x i16>, <8 x i16> *%a2, align 16
|
||||
%3 = shufflevector <8 x i16> %1, <8 x i16> %2, <8 x i32> <i32 0, i32 1, i32 10, i32 11, i32 4, i32 5, i32 6, i32 15>
|
||||
@ -653,7 +653,7 @@ define <2 x i64> @test_pcmpeqq(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> *%a2) {
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: vpcmpeqq (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = icmp eq <2 x i64> %a0, %a1
|
||||
%2 = sext <2 x i1> %1 to <2 x i64>
|
||||
%3 = load <2 x i64>, <2 x i64>*%a2, align 16
|
||||
@ -703,7 +703,7 @@ define i32 @test_pextrb(<16 x i8> %a0, i8 *%a1) {
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vpextrb $3, %xmm0, %eax # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: vpextrb $1, %xmm0, (%rdi) # sched: [8:1.00]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = extractelement <16 x i8> %a0, i32 3
|
||||
%2 = extractelement <16 x i8> %a0, i32 1
|
||||
store i8 %2, i8 *%a1
|
||||
@ -752,7 +752,7 @@ define i32 @test_pextrd(<4 x i32> %a0, i32 *%a1) {
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vpextrd $3, %xmm0, %eax # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: vpextrd $1, %xmm0, (%rdi) # sched: [8:1.00]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = extractelement <4 x i32> %a0, i32 3
|
||||
%2 = extractelement <4 x i32> %a0, i32 1
|
||||
store i32 %2, i32 *%a1
|
||||
@ -800,7 +800,7 @@ define i64 @test_pextrq(<2 x i64> %a0, <2 x i64> %a1, i64 *%a2) {
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vpextrq $1, %xmm0, %rax # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: vpextrq $1, %xmm0, (%rdi) # sched: [8:1.00]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = extractelement <2 x i64> %a0, i32 1
|
||||
%2 = extractelement <2 x i64> %a0, i32 1
|
||||
store i64 %2, i64 *%a2
|
||||
@ -848,7 +848,7 @@ define i32 @test_pextrw(<8 x i16> %a0, i16 *%a1) {
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vpextrw $3, %xmm0, %eax # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: vpextrw $1, %xmm0, (%rdi) # sched: [8:1.00]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = extractelement <8 x i16> %a0, i32 3
|
||||
%2 = extractelement <8 x i16> %a0, i32 1
|
||||
store i16 %2, i16 *%a1
|
||||
@ -897,7 +897,7 @@ define <8 x i16> @test_phminposuw(<8 x i16> *%a0) {
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vphminposuw (%rdi), %xmm0 # sched: [11:1.00]
|
||||
; ZNVER1-NEXT: vphminposuw %xmm0, %xmm0 # sched: [4:1.00]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = load <8 x i16>, <8 x i16> *%a0, align 16
|
||||
%2 = call <8 x i16> @llvm.x86.sse41.phminposuw(<8 x i16> %1)
|
||||
%3 = call <8 x i16> @llvm.x86.sse41.phminposuw(<8 x i16> %2)
|
||||
@ -946,7 +946,7 @@ define <16 x i8> @test_pinsrb(<16 x i8> %a0, i8 %a1, i8 *%a2) {
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vpinsrb $1, %edi, %xmm0, %xmm0 # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: vpinsrb $3, (%rsi), %xmm0, %xmm0 # sched: [8:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = insertelement <16 x i8> %a0, i8 %a1, i32 1
|
||||
%2 = load i8, i8 *%a2
|
||||
%3 = insertelement <16 x i8> %1, i8 %2, i32 3
|
||||
@ -994,7 +994,7 @@ define <4 x i32> @test_pinsrd(<4 x i32> %a0, i32 %a1, i32 *%a2) {
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vpinsrd $1, %edi, %xmm0, %xmm0 # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: vpinsrd $3, (%rsi), %xmm0, %xmm0 # sched: [8:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = insertelement <4 x i32> %a0, i32 %a1, i32 1
|
||||
%2 = load i32, i32 *%a2
|
||||
%3 = insertelement <4 x i32> %1, i32 %2, i32 3
|
||||
@ -1049,7 +1049,7 @@ define <2 x i64> @test_pinsrq(<2 x i64> %a0, <2 x i64> %a1, i64 %a2, i64 *%a3) {
|
||||
; ZNVER1-NEXT: vpinsrq $1, (%rsi), %xmm1, %xmm1 # sched: [8:0.50]
|
||||
; ZNVER1-NEXT: vpinsrq $1, %rdi, %xmm0, %xmm0 # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = insertelement <2 x i64> %a0, i64 %a2, i32 1
|
||||
%2 = load i64, i64 *%a3
|
||||
%3 = insertelement <2 x i64> %a1, i64 %2, i32 1
|
||||
@ -1098,7 +1098,7 @@ define <16 x i8> @test_pmaxsb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vpmaxsb %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: vpmaxsb (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = call <16 x i8> @llvm.x86.sse41.pmaxsb(<16 x i8> %a0, <16 x i8> %a1)
|
||||
%2 = load <16 x i8>, <16 x i8> *%a2, align 16
|
||||
%3 = call <16 x i8> @llvm.x86.sse41.pmaxsb(<16 x i8> %1, <16 x i8> %2)
|
||||
@ -1147,7 +1147,7 @@ define <4 x i32> @test_pmaxsd(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: vpmaxsd (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = call <4 x i32> @llvm.x86.sse41.pmaxsd(<4 x i32> %a0, <4 x i32> %a1)
|
||||
%2 = load <4 x i32>, <4 x i32> *%a2, align 16
|
||||
%3 = call <4 x i32> @llvm.x86.sse41.pmaxsd(<4 x i32> %1, <4 x i32> %2)
|
||||
@ -1196,7 +1196,7 @@ define <4 x i32> @test_pmaxud(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vpmaxud %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: vpmaxud (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = call <4 x i32> @llvm.x86.sse41.pmaxud(<4 x i32> %a0, <4 x i32> %a1)
|
||||
%2 = load <4 x i32>, <4 x i32> *%a2, align 16
|
||||
%3 = call <4 x i32> @llvm.x86.sse41.pmaxud(<4 x i32> %1, <4 x i32> %2)
|
||||
@ -1245,7 +1245,7 @@ define <8 x i16> @test_pmaxuw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vpmaxuw %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: vpmaxuw (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = call <8 x i16> @llvm.x86.sse41.pmaxuw(<8 x i16> %a0, <8 x i16> %a1)
|
||||
%2 = load <8 x i16>, <8 x i16> *%a2, align 16
|
||||
%3 = call <8 x i16> @llvm.x86.sse41.pmaxuw(<8 x i16> %1, <8 x i16> %2)
|
||||
@ -1294,7 +1294,7 @@ define <16 x i8> @test_pminsb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vpminsb %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: vpminsb (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = call <16 x i8> @llvm.x86.sse41.pminsb(<16 x i8> %a0, <16 x i8> %a1)
|
||||
%2 = load <16 x i8>, <16 x i8> *%a2, align 16
|
||||
%3 = call <16 x i8> @llvm.x86.sse41.pminsb(<16 x i8> %1, <16 x i8> %2)
|
||||
@ -1343,7 +1343,7 @@ define <4 x i32> @test_pminsd(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vpminsd %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: vpminsd (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = call <4 x i32> @llvm.x86.sse41.pminsd(<4 x i32> %a0, <4 x i32> %a1)
|
||||
%2 = load <4 x i32>, <4 x i32> *%a2, align 16
|
||||
%3 = call <4 x i32> @llvm.x86.sse41.pminsd(<4 x i32> %1, <4 x i32> %2)
|
||||
@ -1392,7 +1392,7 @@ define <4 x i32> @test_pminud(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vpminud %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: vpminud (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = call <4 x i32> @llvm.x86.sse41.pminud(<4 x i32> %a0, <4 x i32> %a1)
|
||||
%2 = load <4 x i32>, <4 x i32> *%a2, align 16
|
||||
%3 = call <4 x i32> @llvm.x86.sse41.pminud(<4 x i32> %1, <4 x i32> %2)
|
||||
@ -1441,7 +1441,7 @@ define <8 x i16> @test_pminuw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vpminuw %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: vpminuw (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = call <8 x i16> @llvm.x86.sse41.pminuw(<8 x i16> %a0, <8 x i16> %a1)
|
||||
%2 = load <8 x i16>, <8 x i16> *%a2, align 16
|
||||
%3 = call <8 x i16> @llvm.x86.sse41.pminuw(<8 x i16> %1, <8 x i16> %2)
|
||||
@ -1498,7 +1498,7 @@ define <8 x i16> @test_pmovsxbw(<16 x i8> %a0, <8 x i8> *%a1) {
|
||||
; ZNVER1-NEXT: vpmovsxbw (%rdi), %xmm1 # sched: [8:0.50]
|
||||
; ZNVER1-NEXT: vpmovsxbw %xmm0, %xmm0 # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: vpaddw %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = shufflevector <16 x i8> %a0, <16 x i8> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
|
||||
%2 = sext <8 x i8> %1 to <8 x i16>
|
||||
%3 = load <8 x i8>, <8 x i8>* %a1, align 1
|
||||
@ -1556,7 +1556,7 @@ define <4 x i32> @test_pmovsxbd(<16 x i8> %a0, <4 x i8> *%a1) {
|
||||
; ZNVER1-NEXT: vpmovsxbd (%rdi), %xmm1 # sched: [8:0.50]
|
||||
; ZNVER1-NEXT: vpmovsxbd %xmm0, %xmm0 # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = shufflevector <16 x i8> %a0, <16 x i8> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
||||
%2 = sext <4 x i8> %1 to <4 x i32>
|
||||
%3 = load <4 x i8>, <4 x i8>* %a1, align 1
|
||||
@ -1614,7 +1614,7 @@ define <2 x i64> @test_pmovsxbq(<16 x i8> %a0, <2 x i8> *%a1) {
|
||||
; ZNVER1-NEXT: vpmovsxbq (%rdi), %xmm1 # sched: [8:0.50]
|
||||
; ZNVER1-NEXT: vpmovsxbq %xmm0, %xmm0 # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = shufflevector <16 x i8> %a0, <16 x i8> undef, <2 x i32> <i32 0, i32 1>
|
||||
%2 = sext <2 x i8> %1 to <2 x i64>
|
||||
%3 = load <2 x i8>, <2 x i8>* %a1, align 1
|
||||
@ -1672,7 +1672,7 @@ define <2 x i64> @test_pmovsxdq(<4 x i32> %a0, <2 x i32> *%a1) {
|
||||
; ZNVER1-NEXT: vpmovsxdq (%rdi), %xmm1 # sched: [8:0.50]
|
||||
; ZNVER1-NEXT: vpmovsxdq %xmm0, %xmm0 # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = shufflevector <4 x i32> %a0, <4 x i32> undef, <2 x i32> <i32 0, i32 1>
|
||||
%2 = sext <2 x i32> %1 to <2 x i64>
|
||||
%3 = load <2 x i32>, <2 x i32>* %a1, align 1
|
||||
@ -1730,7 +1730,7 @@ define <4 x i32> @test_pmovsxwd(<8 x i16> %a0, <4 x i16> *%a1) {
|
||||
; ZNVER1-NEXT: vpmovsxwd (%rdi), %xmm1 # sched: [8:0.50]
|
||||
; ZNVER1-NEXT: vpmovsxwd %xmm0, %xmm0 # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = shufflevector <8 x i16> %a0, <8 x i16> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
||||
%2 = sext <4 x i16> %1 to <4 x i32>
|
||||
%3 = load <4 x i16>, <4 x i16>* %a1, align 1
|
||||
@ -1788,7 +1788,7 @@ define <2 x i64> @test_pmovsxwq(<8 x i16> %a0, <2 x i16> *%a1) {
|
||||
; ZNVER1-NEXT: vpmovsxwq (%rdi), %xmm1 # sched: [8:0.50]
|
||||
; ZNVER1-NEXT: vpmovsxwq %xmm0, %xmm0 # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = shufflevector <8 x i16> %a0, <8 x i16> undef, <2 x i32> <i32 0, i32 1>
|
||||
%2 = sext <2 x i16> %1 to <2 x i64>
|
||||
%3 = load <2 x i16>, <2 x i16>* %a1, align 1
|
||||
@ -1846,7 +1846,7 @@ define <8 x i16> @test_pmovzxbw(<16 x i8> %a0, <8 x i8> *%a1) {
|
||||
; ZNVER1-NEXT: vpmovzxbw {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero sched: [8:0.50]
|
||||
; ZNVER1-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero sched: [1:0.25]
|
||||
; ZNVER1-NEXT: vpaddw %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = shufflevector <16 x i8> %a0, <16 x i8> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
|
||||
%2 = zext <8 x i8> %1 to <8 x i16>
|
||||
%3 = load <8 x i8>, <8 x i8>* %a1, align 1
|
||||
@ -1904,7 +1904,7 @@ define <4 x i32> @test_pmovzxbd(<16 x i8> %a0, <4 x i8> *%a1) {
|
||||
; ZNVER1-NEXT: vpmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero sched: [8:0.50]
|
||||
; ZNVER1-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero sched: [1:0.25]
|
||||
; ZNVER1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = shufflevector <16 x i8> %a0, <16 x i8> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
||||
%2 = zext <4 x i8> %1 to <4 x i32>
|
||||
%3 = load <4 x i8>, <4 x i8>* %a1, align 1
|
||||
@ -1962,7 +1962,7 @@ define <2 x i64> @test_pmovzxbq(<16 x i8> %a0, <2 x i8> *%a1) {
|
||||
; ZNVER1-NEXT: vpmovzxbq {{.*#+}} xmm1 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero sched: [8:0.50]
|
||||
; ZNVER1-NEXT: vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero sched: [1:0.25]
|
||||
; ZNVER1-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = shufflevector <16 x i8> %a0, <16 x i8> undef, <2 x i32> <i32 0, i32 1>
|
||||
%2 = zext <2 x i8> %1 to <2 x i64>
|
||||
%3 = load <2 x i8>, <2 x i8>* %a1, align 1
|
||||
@ -2020,7 +2020,7 @@ define <2 x i64> @test_pmovzxdq(<4 x i32> %a0, <2 x i32> *%a1) {
|
||||
; ZNVER1-NEXT: vpmovzxdq {{.*#+}} xmm1 = mem[0],zero,mem[1],zero sched: [8:0.50]
|
||||
; ZNVER1-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero sched: [1:0.25]
|
||||
; ZNVER1-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = shufflevector <4 x i32> %a0, <4 x i32> undef, <2 x i32> <i32 0, i32 1>
|
||||
%2 = zext <2 x i32> %1 to <2 x i64>
|
||||
%3 = load <2 x i32>, <2 x i32>* %a1, align 1
|
||||
@ -2078,7 +2078,7 @@ define <4 x i32> @test_pmovzxwd(<8 x i16> %a0, <4 x i16> *%a1) {
|
||||
; ZNVER1-NEXT: vpmovzxwd {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero sched: [8:0.50]
|
||||
; ZNVER1-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero sched: [1:0.25]
|
||||
; ZNVER1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = shufflevector <8 x i16> %a0, <8 x i16> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
||||
%2 = zext <4 x i16> %1 to <4 x i32>
|
||||
%3 = load <4 x i16>, <4 x i16>* %a1, align 1
|
||||
@ -2136,7 +2136,7 @@ define <2 x i64> @test_pmovzxwq(<8 x i16> %a0, <2 x i16> *%a1) {
|
||||
; ZNVER1-NEXT: vpmovzxwq {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero sched: [8:0.50]
|
||||
; ZNVER1-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero sched: [1:0.25]
|
||||
; ZNVER1-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = shufflevector <8 x i16> %a0, <8 x i16> undef, <2 x i32> <i32 0, i32 1>
|
||||
%2 = zext <2 x i16> %1 to <2 x i64>
|
||||
%3 = load <2 x i16>, <2 x i16>* %a1, align 1
|
||||
@ -2186,7 +2186,7 @@ define <2 x i64> @test_pmuldq(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vpmuldq %xmm1, %xmm0, %xmm0 # sched: [4:1.00]
|
||||
; ZNVER1-NEXT: vpmuldq (%rdi), %xmm0, %xmm0 # sched: [11:1.00]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = call <2 x i64> @llvm.x86.sse41.pmuldq(<4 x i32> %a0, <4 x i32> %a1)
|
||||
%2 = bitcast <2 x i64> %1 to <4 x i32>
|
||||
%3 = load <4 x i32>, <4 x i32> *%a2, align 16
|
||||
@ -2236,7 +2236,7 @@ define <4 x i32> @test_pmulld(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vpmulld %xmm1, %xmm0, %xmm0 # sched: [4:1.00]
|
||||
; ZNVER1-NEXT: vpmulld (%rdi), %xmm0, %xmm0 # sched: [11:1.00]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = mul <4 x i32> %a0, %a1
|
||||
%2 = load <4 x i32>, <4 x i32> *%a2, align 16
|
||||
%3 = mul <4 x i32> %1, %2
|
||||
@ -2306,13 +2306,13 @@ define i32 @test_ptest(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> *%a2) {
|
||||
;
|
||||
; ZNVER1-LABEL: test_ptest:
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vptest %xmm1, %xmm0 # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: vptest %xmm1, %xmm0 # sched: [1:1.00]
|
||||
; ZNVER1-NEXT: setb %al # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: vptest (%rdi), %xmm0 # sched: [8:0.50]
|
||||
; ZNVER1-NEXT: vptest (%rdi), %xmm0 # sched: [8:1.00]
|
||||
; ZNVER1-NEXT: setb %cl # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: andb %al, %cl # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: movzbl %cl, %eax # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = call i32 @llvm.x86.sse41.ptestc(<2 x i64> %a0, <2 x i64> %a1)
|
||||
%2 = load <2 x i64>, <2 x i64> *%a2, align 16
|
||||
%3 = call i32 @llvm.x86.sse41.ptestc(<2 x i64> %a0, <2 x i64> %2)
|
||||
@ -2367,10 +2367,10 @@ define <2 x double> @test_roundpd(<2 x double> %a0, <2 x double> *%a1) {
|
||||
;
|
||||
; ZNVER1-LABEL: test_roundpd:
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vroundpd $7, (%rdi), %xmm1 # sched: [10:1.00]
|
||||
; ZNVER1-NEXT: vroundpd $7, %xmm0, %xmm0 # sched: [3:1.00]
|
||||
; ZNVER1-NEXT: vroundpd $7, (%rdi), %xmm1 # sched: [11:1.00]
|
||||
; ZNVER1-NEXT: vroundpd $7, %xmm0, %xmm0 # sched: [4:1.00]
|
||||
; ZNVER1-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = call <2 x double> @llvm.x86.sse41.round.pd(<2 x double> %a0, i32 7)
|
||||
%2 = load <2 x double>, <2 x double> *%a1, align 16
|
||||
%3 = call <2 x double> @llvm.x86.sse41.round.pd(<2 x double> %2, i32 7)
|
||||
@ -2425,10 +2425,10 @@ define <4 x float> @test_roundps(<4 x float> %a0, <4 x float> *%a1) {
|
||||
;
|
||||
; ZNVER1-LABEL: test_roundps:
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vroundps $7, (%rdi), %xmm1 # sched: [10:1.00]
|
||||
; ZNVER1-NEXT: vroundps $7, %xmm0, %xmm0 # sched: [3:1.00]
|
||||
; ZNVER1-NEXT: vroundps $7, (%rdi), %xmm1 # sched: [11:1.00]
|
||||
; ZNVER1-NEXT: vroundps $7, %xmm0, %xmm0 # sched: [4:1.00]
|
||||
; ZNVER1-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = call <4 x float> @llvm.x86.sse41.round.ps(<4 x float> %a0, i32 7)
|
||||
%2 = load <4 x float>, <4 x float> *%a1, align 16
|
||||
%3 = call <4 x float> @llvm.x86.sse41.round.ps(<4 x float> %2, i32 7)
|
||||
@ -2484,10 +2484,10 @@ define <2 x double> @test_roundsd(<2 x double> %a0, <2 x double> %a1, <2 x doubl
|
||||
;
|
||||
; ZNVER1-LABEL: test_roundsd:
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vroundsd $7, %xmm1, %xmm0, %xmm1 # sched: [3:1.00]
|
||||
; ZNVER1-NEXT: vroundsd $7, (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
|
||||
; ZNVER1-NEXT: vroundsd $7, %xmm1, %xmm0, %xmm1 # sched: [4:1.00]
|
||||
; ZNVER1-NEXT: vroundsd $7, (%rdi), %xmm0, %xmm0 # sched: [11:1.00]
|
||||
; ZNVER1-NEXT: vaddpd %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = call <2 x double> @llvm.x86.sse41.round.sd(<2 x double> %a0, <2 x double> %a1, i32 7)
|
||||
%2 = load <2 x double>, <2 x double>* %a2, align 16
|
||||
%3 = call <2 x double> @llvm.x86.sse41.round.sd(<2 x double> %a0, <2 x double> %2, i32 7)
|
||||
@ -2543,10 +2543,10 @@ define <4 x float> @test_roundss(<4 x float> %a0, <4 x float> %a1, <4 x float> *
|
||||
;
|
||||
; ZNVER1-LABEL: test_roundss:
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vroundss $7, %xmm1, %xmm0, %xmm1 # sched: [3:1.00]
|
||||
; ZNVER1-NEXT: vroundss $7, (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
|
||||
; ZNVER1-NEXT: vroundss $7, %xmm1, %xmm0, %xmm1 # sched: [4:1.00]
|
||||
; ZNVER1-NEXT: vroundss $7, (%rdi), %xmm0, %xmm0 # sched: [11:1.00]
|
||||
; ZNVER1-NEXT: vaddps %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = call <4 x float> @llvm.x86.sse41.round.ss(<4 x float> %a0, <4 x float> %a1, i32 7)
|
||||
%2 = load <4 x float>, <4 x float> *%a2, align 16
|
||||
%3 = call <4 x float> @llvm.x86.sse41.round.ss(<4 x float> %a0, <4 x float> %2, i32 7)
|
||||
|
@ -56,7 +56,7 @@ define i32 @crc32_32_8(i32 %a0, i8 %a1, i8 *%a2) {
|
||||
; ZNVER1-NEXT: crc32b %sil, %edi # sched: [3:1.00]
|
||||
; ZNVER1-NEXT: crc32b (%rdx), %edi # sched: [10:1.00]
|
||||
; ZNVER1-NEXT: movl %edi, %eax # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = call i32 @llvm.x86.sse42.crc32.32.8(i32 %a0, i8 %a1)
|
||||
%2 = load i8, i8 *%a2
|
||||
%3 = call i32 @llvm.x86.sse42.crc32.32.8(i32 %1, i8 %2)
|
||||
@ -112,7 +112,7 @@ define i32 @crc32_32_16(i32 %a0, i16 %a1, i16 *%a2) {
|
||||
; ZNVER1-NEXT: crc32w %si, %edi # sched: [3:1.00]
|
||||
; ZNVER1-NEXT: crc32w (%rdx), %edi # sched: [10:1.00]
|
||||
; ZNVER1-NEXT: movl %edi, %eax # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = call i32 @llvm.x86.sse42.crc32.32.16(i32 %a0, i16 %a1)
|
||||
%2 = load i16, i16 *%a2
|
||||
%3 = call i32 @llvm.x86.sse42.crc32.32.16(i32 %1, i16 %2)
|
||||
@ -168,7 +168,7 @@ define i32 @crc32_32_32(i32 %a0, i32 %a1, i32 *%a2) {
|
||||
; ZNVER1-NEXT: crc32l %esi, %edi # sched: [3:1.00]
|
||||
; ZNVER1-NEXT: crc32l (%rdx), %edi # sched: [10:1.00]
|
||||
; ZNVER1-NEXT: movl %edi, %eax # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = call i32 @llvm.x86.sse42.crc32.32.32(i32 %a0, i32 %a1)
|
||||
%2 = load i32, i32 *%a2
|
||||
%3 = call i32 @llvm.x86.sse42.crc32.32.32(i32 %1, i32 %2)
|
||||
@ -224,7 +224,7 @@ define i64 @crc32_64_8(i64 %a0, i8 %a1, i8 *%a2) nounwind {
|
||||
; ZNVER1-NEXT: crc32b %sil, %edi # sched: [3:1.00]
|
||||
; ZNVER1-NEXT: crc32b (%rdx), %edi # sched: [10:1.00]
|
||||
; ZNVER1-NEXT: movq %rdi, %rax # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = call i64 @llvm.x86.sse42.crc32.64.8(i64 %a0, i8 %a1)
|
||||
%2 = load i8, i8 *%a2
|
||||
%3 = call i64 @llvm.x86.sse42.crc32.64.8(i64 %1, i8 %2)
|
||||
@ -280,7 +280,7 @@ define i64 @crc32_64_64(i64 %a0, i64 %a1, i64 *%a2) {
|
||||
; ZNVER1-NEXT: crc32q %rsi, %rdi # sched: [3:1.00]
|
||||
; ZNVER1-NEXT: crc32q (%rdx), %rdi # sched: [10:1.00]
|
||||
; ZNVER1-NEXT: movq %rdi, %rax # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = call i64 @llvm.x86.sse42.crc32.64.64(i64 %a0, i64 %a1)
|
||||
%2 = load i64, i64 *%a2
|
||||
%3 = call i64 @llvm.x86.sse42.crc32.64.64(i64 %1, i64 %2)
|
||||
@ -378,7 +378,7 @@ define i32 @test_pcmpestri(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
|
||||
; ZNVER1-NEXT: vpcmpestri $7, (%rdi), %xmm0 # sched: [100:?]
|
||||
; ZNVER1-NEXT: # kill: %ECX<def> %ECX<kill> %RCX<def>
|
||||
; ZNVER1-NEXT: leal (%rcx,%rsi), %eax # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = call i32 @llvm.x86.sse42.pcmpestri128(<16 x i8> %a0, i32 7, <16 x i8> %a1, i32 7, i8 7)
|
||||
%2 = load <16 x i8>, <16 x i8> *%a2, align 16
|
||||
%3 = call i32 @llvm.x86.sse42.pcmpestri128(<16 x i8> %a0, i32 7, <16 x i8> %2, i32 7, i8 7)
|
||||
@ -456,7 +456,7 @@ define <16 x i8> @test_pcmpestrm(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
|
||||
; ZNVER1-NEXT: movl $7, %eax # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: movl $7, %edx # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: vpcmpestrm $7, (%rdi), %xmm0 # sched: [100:?]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = call <16 x i8> @llvm.x86.sse42.pcmpestrm128(<16 x i8> %a0, i32 7, <16 x i8> %a1, i32 7, i8 7)
|
||||
%2 = load <16 x i8>, <16 x i8> *%a2, align 16
|
||||
%3 = call <16 x i8> @llvm.x86.sse42.pcmpestrm128(<16 x i8> %1, i32 7, <16 x i8> %2, i32 7, i8 7)
|
||||
@ -526,7 +526,7 @@ define i32 @test_pcmpistri(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
|
||||
; ZNVER1-NEXT: vpcmpistri $7, (%rdi), %xmm0 # sched: [100:?]
|
||||
; ZNVER1-NEXT: # kill: %ECX<def> %ECX<kill> %RCX<def>
|
||||
; ZNVER1-NEXT: leal (%rcx,%rax), %eax # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = call i32 @llvm.x86.sse42.pcmpistri128(<16 x i8> %a0, <16 x i8> %a1, i8 7)
|
||||
%2 = load <16 x i8>, <16 x i8> *%a2, align 16
|
||||
%3 = call i32 @llvm.x86.sse42.pcmpistri128(<16 x i8> %a0, <16 x i8> %2, i8 7)
|
||||
@ -576,7 +576,7 @@ define <16 x i8> @test_pcmpistrm(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vpcmpistrm $7, %xmm1, %xmm0 # sched: [100:?]
|
||||
; ZNVER1-NEXT: vpcmpistrm $7, (%rdi), %xmm0 # sched: [100:?]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = call <16 x i8> @llvm.x86.sse42.pcmpistrm128(<16 x i8> %a0, <16 x i8> %a1, i8 7)
|
||||
%2 = load <16 x i8>, <16 x i8> *%a2, align 16
|
||||
%3 = call <16 x i8> @llvm.x86.sse42.pcmpistrm128(<16 x i8> %1, <16 x i8> %2, i8 7)
|
||||
@ -623,9 +623,9 @@ define <2 x i64> @test_pcmpgtq(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> *%a2) {
|
||||
;
|
||||
; ZNVER1-LABEL: test_pcmpgtq:
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
|
||||
; ZNVER1-NEXT: vpcmpgtq (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = icmp sgt <2 x i64> %a0, %a1
|
||||
%2 = sext <2 x i1> %1 to <2 x i64>
|
||||
%3 = load <2 x i64>, <2 x i64>*%a2, align 16
|
||||
@ -675,7 +675,7 @@ define <2 x i64> @test_pclmulqdq(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> *%a2) {
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vpclmulqdq $0, %xmm1, %xmm0, %xmm0 # sched: [100:?]
|
||||
; ZNVER1-NEXT: vpclmulqdq $0, (%rdi), %xmm0, %xmm0 # sched: [100:?]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = load <2 x i64>, <2 x i64> *%a2, align 16
|
||||
%2 = call <2 x i64> @llvm.x86.pclmulqdq(<2 x i64> %a0, <2 x i64> %a1, i8 0)
|
||||
%3 = call <2 x i64> @llvm.x86.pclmulqdq(<2 x i64> %1, <2 x i64> %2, i8 0)
|
||||
|
@ -16,8 +16,8 @@ define <2 x i64> @test_extrq(<2 x i64> %a0, <16 x i8> %a1) {
|
||||
;
|
||||
; ZNVER1-LABEL: test_extrq:
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: extrq %xmm1, %xmm0
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: extrq %xmm1, %xmm0 # sched: [2:1.00]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = tail call <2 x i64> @llvm.x86.sse4a.extrq(<2 x i64> %a0, <16 x i8> %a1)
|
||||
ret <2 x i64> %1
|
||||
}
|
||||
@ -36,8 +36,8 @@ define <2 x i64> @test_extrqi(<2 x i64> %a0) {
|
||||
;
|
||||
; ZNVER1-LABEL: test_extrqi:
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: extrq $2, $3, %xmm0
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: extrq $2, $3, %xmm0 # sched: [2:1.00]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = tail call <2 x i64> @llvm.x86.sse4a.extrqi(<2 x i64> %a0, i8 3, i8 2)
|
||||
ret <2 x i64> %1
|
||||
}
|
||||
@ -56,8 +56,8 @@ define <2 x i64> @test_insertq(<2 x i64> %a0, <2 x i64> %a1) {
|
||||
;
|
||||
; ZNVER1-LABEL: test_insertq:
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: insertq %xmm1, %xmm0
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: insertq %xmm1, %xmm0 # sched: [4:1.00]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = tail call <2 x i64> @llvm.x86.sse4a.insertq(<2 x i64> %a0, <2 x i64> %a1)
|
||||
ret <2 x i64> %1
|
||||
}
|
||||
@ -76,8 +76,8 @@ define <2 x i64> @test_insertqi(<2 x i64> %a0, <2 x i64> %a1) {
|
||||
;
|
||||
; ZNVER1-LABEL: test_insertqi:
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: insertq $6, $5, %xmm1, %xmm0
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: insertq $6, $5, %xmm1, %xmm0 # sched: [4:1.00]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = tail call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> %a0, <2 x i64> %a1, i8 5, i8 6)
|
||||
ret <2 x i64> %1
|
||||
}
|
||||
@ -96,8 +96,8 @@ define void @test_movntsd(i8* %p, <2 x double> %a) {
|
||||
;
|
||||
; ZNVER1-LABEL: test_movntsd:
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: movntsd %xmm0, (%rdi) # sched: [1:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: movntsd %xmm0, (%rdi) # sched: [8:1.00]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
tail call void @llvm.x86.sse4a.movnt.sd(i8* %p, <2 x double> %a)
|
||||
ret void
|
||||
}
|
||||
@ -116,8 +116,8 @@ define void @test_movntss(i8* %p, <4 x float> %a) {
|
||||
;
|
||||
; ZNVER1-LABEL: test_movntss:
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: movntss %xmm0, (%rdi) # sched: [1:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: movntss %xmm0, (%rdi) # sched: [8:1.00]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
tail call void @llvm.x86.sse4a.movnt.ss(i8* %p, <4 x float> %a)
|
||||
ret void
|
||||
}
|
||||
|
@ -65,7 +65,7 @@ define <16 x i8> @test_pabsb(<16 x i8> %a0, <16 x i8> *%a1) {
|
||||
; ZNVER1-NEXT: vpabsb (%rdi), %xmm1 # sched: [8:0.50]
|
||||
; ZNVER1-NEXT: vpabsb %xmm0, %xmm0 # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: vpor %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = call <16 x i8> @llvm.x86.ssse3.pabs.b.128(<16 x i8> %a0)
|
||||
%2 = load <16 x i8>, <16 x i8> *%a1, align 16
|
||||
%3 = call <16 x i8> @llvm.x86.ssse3.pabs.b.128(<16 x i8> %2)
|
||||
@ -130,7 +130,7 @@ define <4 x i32> @test_pabsd(<4 x i32> %a0, <4 x i32> *%a1) {
|
||||
; ZNVER1-NEXT: vpabsd (%rdi), %xmm1 # sched: [8:0.50]
|
||||
; ZNVER1-NEXT: vpabsd %xmm0, %xmm0 # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: vpor %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = call <4 x i32> @llvm.x86.ssse3.pabs.d.128(<4 x i32> %a0)
|
||||
%2 = load <4 x i32>, <4 x i32> *%a1, align 16
|
||||
%3 = call <4 x i32> @llvm.x86.ssse3.pabs.d.128(<4 x i32> %2)
|
||||
@ -195,7 +195,7 @@ define <8 x i16> @test_pabsw(<8 x i16> %a0, <8 x i16> *%a1) {
|
||||
; ZNVER1-NEXT: vpabsw (%rdi), %xmm1 # sched: [8:0.50]
|
||||
; ZNVER1-NEXT: vpabsw %xmm0, %xmm0 # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: vpor %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = call <8 x i16> @llvm.x86.ssse3.pabs.w.128(<8 x i16> %a0)
|
||||
%2 = load <8 x i16>, <8 x i16> *%a1, align 16
|
||||
%3 = call <8 x i16> @llvm.x86.ssse3.pabs.w.128(<8 x i16> %2)
|
||||
@ -256,7 +256,7 @@ define <8 x i16> @test_palignr(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[6,7,8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5] sched: [1:0.25]
|
||||
; ZNVER1-NEXT: vpalignr {{.*#+}} xmm0 = mem[14,15],xmm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13] sched: [8:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = shufflevector <8 x i16> %a0, <8 x i16> %a1, <8 x i32> <i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10>
|
||||
%2 = load <8 x i16>, <8 x i16> *%a2, align 16
|
||||
%3 = shufflevector <8 x i16> %2, <8 x i16> %1, <8 x i32> <i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14>
|
||||
@ -308,9 +308,9 @@ define <4 x i32> @test_phaddd(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
|
||||
;
|
||||
; ZNVER1-LABEL: test_phaddd:
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vphaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: vphaddd (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: vphaddd %xmm1, %xmm0, %xmm0 # sched: [100:?]
|
||||
; ZNVER1-NEXT: vphaddd (%rdi), %xmm0, %xmm0 # sched: [100:?]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = call <4 x i32> @llvm.x86.ssse3.phadd.d.128(<4 x i32> %a0, <4 x i32> %a1)
|
||||
%2 = load <4 x i32>, <4 x i32> *%a2, align 16
|
||||
%3 = call <4 x i32> @llvm.x86.ssse3.phadd.d.128(<4 x i32> %1, <4 x i32> %2)
|
||||
@ -363,9 +363,9 @@ define <8 x i16> @test_phaddsw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
|
||||
;
|
||||
; ZNVER1-LABEL: test_phaddsw:
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vphaddsw %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: vphaddsw (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: vphaddsw %xmm1, %xmm0, %xmm0 # sched: [100:?]
|
||||
; ZNVER1-NEXT: vphaddsw (%rdi), %xmm0, %xmm0 # sched: [100:?]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = call <8 x i16> @llvm.x86.ssse3.phadd.sw.128(<8 x i16> %a0, <8 x i16> %a1)
|
||||
%2 = load <8 x i16>, <8 x i16> *%a2, align 16
|
||||
%3 = call <8 x i16> @llvm.x86.ssse3.phadd.sw.128(<8 x i16> %1, <8 x i16> %2)
|
||||
@ -418,9 +418,9 @@ define <8 x i16> @test_phaddw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
|
||||
;
|
||||
; ZNVER1-LABEL: test_phaddw:
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vphaddw %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: vphaddw (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: vphaddw %xmm1, %xmm0, %xmm0 # sched: [100:?]
|
||||
; ZNVER1-NEXT: vphaddw (%rdi), %xmm0, %xmm0 # sched: [100:?]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = call <8 x i16> @llvm.x86.ssse3.phadd.w.128(<8 x i16> %a0, <8 x i16> %a1)
|
||||
%2 = load <8 x i16>, <8 x i16> *%a2, align 16
|
||||
%3 = call <8 x i16> @llvm.x86.ssse3.phadd.w.128(<8 x i16> %1, <8 x i16> %2)
|
||||
@ -473,9 +473,9 @@ define <4 x i32> @test_phsubd(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
|
||||
;
|
||||
; ZNVER1-LABEL: test_phsubd:
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vphsubd %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: vphsubd (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: vphsubd %xmm1, %xmm0, %xmm0 # sched: [100:?]
|
||||
; ZNVER1-NEXT: vphsubd (%rdi), %xmm0, %xmm0 # sched: [100:?]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = call <4 x i32> @llvm.x86.ssse3.phsub.d.128(<4 x i32> %a0, <4 x i32> %a1)
|
||||
%2 = load <4 x i32>, <4 x i32> *%a2, align 16
|
||||
%3 = call <4 x i32> @llvm.x86.ssse3.phsub.d.128(<4 x i32> %1, <4 x i32> %2)
|
||||
@ -528,9 +528,9 @@ define <8 x i16> @test_phsubsw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
|
||||
;
|
||||
; ZNVER1-LABEL: test_phsubsw:
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vphsubsw %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: vphsubsw (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: vphsubsw %xmm1, %xmm0, %xmm0 # sched: [100:?]
|
||||
; ZNVER1-NEXT: vphsubsw (%rdi), %xmm0, %xmm0 # sched: [100:?]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = call <8 x i16> @llvm.x86.ssse3.phsub.sw.128(<8 x i16> %a0, <8 x i16> %a1)
|
||||
%2 = load <8 x i16>, <8 x i16> *%a2, align 16
|
||||
%3 = call <8 x i16> @llvm.x86.ssse3.phsub.sw.128(<8 x i16> %1, <8 x i16> %2)
|
||||
@ -583,9 +583,9 @@ define <8 x i16> @test_phsubw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
|
||||
;
|
||||
; ZNVER1-LABEL: test_phsubw:
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vphsubw %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: vphsubw (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: vphsubw %xmm1, %xmm0, %xmm0 # sched: [100:?]
|
||||
; ZNVER1-NEXT: vphsubw (%rdi), %xmm0, %xmm0 # sched: [100:?]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = call <8 x i16> @llvm.x86.ssse3.phsub.w.128(<8 x i16> %a0, <8 x i16> %a1)
|
||||
%2 = load <8 x i16>, <8 x i16> *%a2, align 16
|
||||
%3 = call <8 x i16> @llvm.x86.ssse3.phsub.w.128(<8 x i16> %1, <8 x i16> %2)
|
||||
@ -640,7 +640,7 @@ define <8 x i16> @test_pmaddubsw(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vpmaddubsw %xmm1, %xmm0, %xmm0 # sched: [4:1.00]
|
||||
; ZNVER1-NEXT: vpmaddubsw (%rdi), %xmm0, %xmm0 # sched: [11:1.00]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = call <8 x i16> @llvm.x86.ssse3.pmadd.ub.sw.128(<16 x i8> %a0, <16 x i8> %a1)
|
||||
%2 = load <16 x i8>, <16 x i8> *%a2, align 16
|
||||
%3 = bitcast <8 x i16> %1 to <16 x i8>
|
||||
@ -696,7 +696,7 @@ define <8 x i16> @test_pmulhrsw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vpmulhrsw %xmm1, %xmm0, %xmm0 # sched: [4:1.00]
|
||||
; ZNVER1-NEXT: vpmulhrsw (%rdi), %xmm0, %xmm0 # sched: [11:1.00]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = call <8 x i16> @llvm.x86.ssse3.pmul.hr.sw.128(<8 x i16> %a0, <8 x i16> %a1)
|
||||
%2 = load <8 x i16>, <8 x i16> *%a2, align 16
|
||||
%3 = call <8 x i16> @llvm.x86.ssse3.pmul.hr.sw.128(<8 x i16> %1, <8 x i16> %2)
|
||||
@ -751,7 +751,7 @@ define <16 x i8> @test_pshufb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vpshufb %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: vpshufb (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> %a1)
|
||||
%2 = load <16 x i8>, <16 x i8> *%a2, align 16
|
||||
%3 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %1, <16 x i8> %2)
|
||||
@ -810,7 +810,7 @@ define <16 x i8> @test_psignb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vpsignb %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: vpsignb (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = call <16 x i8> @llvm.x86.ssse3.psign.b.128(<16 x i8> %a0, <16 x i8> %a1)
|
||||
%2 = load <16 x i8>, <16 x i8> *%a2, align 16
|
||||
%3 = call <16 x i8> @llvm.x86.ssse3.psign.b.128(<16 x i8> %1, <16 x i8> %2)
|
||||
@ -869,7 +869,7 @@ define <4 x i32> @test_psignd(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vpsignd %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: vpsignd (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = call <4 x i32> @llvm.x86.ssse3.psign.d.128(<4 x i32> %a0, <4 x i32> %a1)
|
||||
%2 = load <4 x i32>, <4 x i32> *%a2, align 16
|
||||
%3 = call <4 x i32> @llvm.x86.ssse3.psign.d.128(<4 x i32> %1, <4 x i32> %2)
|
||||
@ -928,7 +928,7 @@ define <8 x i16> @test_psignw(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
|
||||
; ZNVER1: # BB#0:
|
||||
; ZNVER1-NEXT: vpsignw %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
|
||||
; ZNVER1-NEXT: vpsignw (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [5:0.50]
|
||||
; ZNVER1-NEXT: retq # sched: [1:0.50]
|
||||
%1 = call <8 x i16> @llvm.x86.ssse3.psign.w.128(<8 x i16> %a0, <8 x i16> %a1)
|
||||
%2 = load <8 x i16>, <8 x i16> *%a2, align 16
|
||||
%3 = call <8 x i16> @llvm.x86.ssse3.psign.w.128(<8 x i16> %1, <8 x i16> %2)
|
||||
|
Loading…
x
Reference in New Issue
Block a user