From 7e89a31db38faefd04c658b5dbd671604389f586 Mon Sep 17 00:00:00 2001 From: Craig Topper Date: Sun, 8 May 2016 07:10:50 +0000 Subject: [PATCH] [X86] Add patterns for 256-bit non-temporal stores when only AVX1 is supported. While there, add a predicate to the SSE2 patterns to avoid an ordering dependency. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@268872 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86InstrSSE.td | 26 ++++++++++++++++----- test/CodeGen/X86/nontemporal-2.ll | 39 +++++++++++++++++++++++++++++++ 2 files changed, 59 insertions(+), 6 deletions(-) diff --git a/lib/Target/X86/X86InstrSSE.td b/lib/Target/X86/X86InstrSSE.td index 1490fd03fe5..587d500dc56 100644 --- a/lib/Target/X86/X86InstrSSE.td +++ b/lib/Target/X86/X86InstrSSE.td @@ -3648,6 +3648,18 @@ let Predicates = [HasAVX2, NoVLX] in { (VMOVNTDQYmr addr:$dst, VR256:$src)>; } +// If integer type isn't available, use the floating point type. +let Predicates = [HasAVX1Only] in { + def : Pat<(alignednontemporalstore (v4i64 VR256:$src), addr:$dst), + (VMOVNTPSYmr addr:$dst, VR256:$src)>; + def : Pat<(alignednontemporalstore (v8i32 VR256:$src), addr:$dst), + (VMOVNTPSYmr addr:$dst, VR256:$src)>; + def : Pat<(alignednontemporalstore (v16i16 VR256:$src), addr:$dst), + (VMOVNTPSYmr addr:$dst, VR256:$src)>; + def : Pat<(alignednontemporalstore (v32i8 VR256:$src), addr:$dst), + (VMOVNTPSYmr addr:$dst, VR256:$src)>; +} + let Predicates = [HasAVX, NoVLX] in { def : Pat<(alignednontemporalstore (v4i32 VR128:$src), addr:$dst), (VMOVNTDQmr addr:$dst, VR128:$src)>; @@ -3657,12 +3669,14 @@ let Predicates = [HasAVX, NoVLX] in { (VMOVNTDQmr addr:$dst, VR128:$src)>; } -def : Pat<(alignednontemporalstore (v4i32 VR128:$src), addr:$dst), - (MOVNTDQmr addr:$dst, VR128:$src)>; -def : Pat<(alignednontemporalstore (v8i16 VR128:$src), addr:$dst), - (MOVNTDQmr addr:$dst, VR128:$src)>; -def : Pat<(alignednontemporalstore (v16i8 VR128:$src), addr:$dst), - (MOVNTDQmr addr:$dst, VR128:$src)>; +let Predicates = [UseSSE2] in { + def : Pat<(alignednontemporalstore (v4i32 VR128:$src), addr:$dst), + (MOVNTDQmr addr:$dst, VR128:$src)>; + def : Pat<(alignednontemporalstore (v8i16 VR128:$src), addr:$dst), + (MOVNTDQmr addr:$dst, VR128:$src)>; + def : Pat<(alignednontemporalstore (v16i8 VR128:$src), addr:$dst), + (MOVNTDQmr addr:$dst, VR128:$src)>; +} } // AddedComplexity diff --git a/test/CodeGen/X86/nontemporal-2.ll b/test/CodeGen/X86/nontemporal-2.ll index c9767f88488..58c30f4b4d1 100644 --- a/test/CodeGen/X86/nontemporal-2.ll +++ b/test/CodeGen/X86/nontemporal-2.ll @@ -9,6 +9,7 @@ define void @test_zero_v4f32(<4 x float>* %dst) { ; CHECK-LABEL: test_zero_v4f32: ; SSE: movntps ; AVX: vmovntps +; AVX2: vmovntps store <4 x float> zeroinitializer, <4 x float>* %dst, align 16, !nontemporal !1 ret void } @@ -17,6 +18,8 @@ define void @test_zero_v4i32(<4 x i32>* %dst) { ; CHECK-LABEL: test_zero_v4i32: ; SSE: movntps ; AVX: vmovntps +; AVX2: vmovntps + store <4 x i32> zeroinitializer, <4 x i32>* %dst, align 16, !nontemporal !1 store <4 x i32> zeroinitializer, <4 x i32>* %dst, align 16, !nontemporal !1 ret void } @@ -25,6 +28,7 @@ define void @test_zero_v2f64(<2 x double>* %dst) { ; CHECK-LABEL: test_zero_v2f64: ; SSE: movntps ; AVX: vmovntps +; AVX2: vmovntps store <2 x double> zeroinitializer, <2 x double>* %dst, align 16, !nontemporal !1 ret void } @@ -33,6 +37,7 @@ define void @test_zero_v2i64(<2 x i64>* %dst) { ; CHECK-LABEL: test_zero_v2i64: ; SSE: movntps ; AVX: vmovntps +; AVX2: vmovntps store <2 x i64> zeroinitializer, <2 x i64>* %dst, align 16, !nontemporal !1 ret void } @@ -41,6 +46,7 @@ define void @test_zero_v8i16(<8 x i16>* %dst) { ; CHECK-LABEL: test_zero_v8i16: ; SSE: movntps ; AVX: vmovntps +; AVX2: vmovntps store <8 x i16> zeroinitializer, <8 x i16>* %dst, align 16, !nontemporal !1 ret void } @@ -49,6 +55,7 @@ define void @test_zero_v16i8(<16 x i8>* %dst) { ; CHECK-LABEL: test_zero_v16i8: ; SSE: movntps ; AVX: vmovntps +; AVX2: vmovntps store <16 x i8> zeroinitializer, <16 x i8>* %dst, align 16, !nontemporal !1 ret void } @@ -58,12 +65,14 @@ define void @test_zero_v16i8(<16 x i8>* %dst) { define void @test_zero_v8f32(<8 x float>* %dst) { ; CHECK-LABEL: test_zero_v8f32: ; AVX: vmovntps %ymm +; AVX2: vmovntps %ymm store <8 x float> zeroinitializer, <8 x float>* %dst, align 32, !nontemporal !1 ret void } define void @test_zero_v8i32(<8 x i32>* %dst) { ; CHECK-LABEL: test_zero_v8i32: +; AVX: vmovntps %ymm ; AVX2: vmovntps %ymm store <8 x i32> zeroinitializer, <8 x i32>* %dst, align 32, !nontemporal !1 ret void @@ -72,12 +81,14 @@ define void @test_zero_v8i32(<8 x i32>* %dst) { define void @test_zero_v4f64(<4 x double>* %dst) { ; CHECK-LABEL: test_zero_v4f64: ; AVX: vmovntps %ymm +; AVX2: vmovntps %ymm store <4 x double> zeroinitializer, <4 x double>* %dst, align 32, !nontemporal !1 ret void } define void @test_zero_v4i64(<4 x i64>* %dst) { ; CHECK-LABEL: test_zero_v4i64: +; AVX: vmovntps %ymm ; AVX2: vmovntps %ymm store <4 x i64> zeroinitializer, <4 x i64>* %dst, align 32, !nontemporal !1 ret void @@ -85,6 +96,7 @@ define void @test_zero_v4i64(<4 x i64>* %dst) { define void @test_zero_v16i16(<16 x i16>* %dst) { ; CHECK-LABEL: test_zero_v16i16: +; AVX: vmovntps %ymm ; AVX2: vmovntps %ymm store <16 x i16> zeroinitializer, <16 x i16>* %dst, align 32, !nontemporal !1 ret void @@ -92,6 +104,7 @@ define void @test_zero_v16i16(<16 x i16>* %dst) { define void @test_zero_v32i8(<32 x i8>* %dst) { ; CHECK-LABEL: test_zero_v32i8: +; AVX: vmovntps %ymm ; AVX2: vmovntps %ymm store <32 x i8> zeroinitializer, <32 x i8>* %dst, align 32, !nontemporal !1 ret void @@ -104,6 +117,7 @@ define void @test_arg_v4f32(<4 x float> %arg, <4 x float>* %dst) { ; CHECK-LABEL: test_arg_v4f32: ; SSE: movntps ; AVX: vmovntps +; AVX2: vmovntps store <4 x float> %arg, <4 x float>* %dst, align 16, !nontemporal !1 ret void } @@ -112,6 +126,7 @@ define void @test_arg_v4i32(<4 x i32> %arg, <4 x i32>* %dst) { ; CHECK-LABEL: test_arg_v4i32: ; SSE: movntps ; AVX: vmovntps +; AVX2: vmovntps store <4 x i32> %arg, <4 x i32>* %dst, align 16, !nontemporal !1 ret void } @@ -120,6 +135,7 @@ define void @test_arg_v2f64(<2 x double> %arg, <2 x double>* %dst) { ; CHECK-LABEL: test_arg_v2f64: ; SSE: movntps ; AVX: vmovntps +; AVX2: vmovntps store <2 x double> %arg, <2 x double>* %dst, align 16, !nontemporal !1 ret void } @@ -128,6 +144,7 @@ define void @test_arg_v2i64(<2 x i64> %arg, <2 x i64>* %dst) { ; CHECK-LABEL: test_arg_v2i64: ; SSE: movntps ; AVX: vmovntps +; AVX2: vmovntps store <2 x i64> %arg, <2 x i64>* %dst, align 16, !nontemporal !1 ret void } @@ -136,6 +153,7 @@ define void @test_arg_v8i16(<8 x i16> %arg, <8 x i16>* %dst) { ; CHECK-LABEL: test_arg_v8i16: ; SSE: movntps ; AVX: vmovntps +; AVX2: vmovntps store <8 x i16> %arg, <8 x i16>* %dst, align 16, !nontemporal !1 ret void } @@ -144,6 +162,7 @@ define void @test_arg_v16i8(<16 x i8> %arg, <16 x i8>* %dst) { ; CHECK-LABEL: test_arg_v16i8: ; SSE: movntps ; AVX: vmovntps +; AVX2: vmovntps store <16 x i8> %arg, <16 x i8>* %dst, align 16, !nontemporal !1 ret void } @@ -153,12 +172,14 @@ define void @test_arg_v16i8(<16 x i8> %arg, <16 x i8>* %dst) { define void @test_arg_v8f32(<8 x float> %arg, <8 x float>* %dst) { ; CHECK-LABEL: test_arg_v8f32: ; AVX: vmovntps %ymm +; AVX2: vmovntps %ymm store <8 x float> %arg, <8 x float>* %dst, align 32, !nontemporal !1 ret void } define void @test_arg_v8i32(<8 x i32> %arg, <8 x i32>* %dst) { ; CHECK-LABEL: test_arg_v8i32: +; AVX: vmovntps %ymm ; AVX2: vmovntps %ymm store <8 x i32> %arg, <8 x i32>* %dst, align 32, !nontemporal !1 ret void @@ -167,12 +188,14 @@ define void @test_arg_v8i32(<8 x i32> %arg, <8 x i32>* %dst) { define void @test_arg_v4f64(<4 x double> %arg, <4 x double>* %dst) { ; CHECK-LABEL: test_arg_v4f64: ; AVX: vmovntps %ymm +; AVX2: vmovntps %ymm store <4 x double> %arg, <4 x double>* %dst, align 32, !nontemporal !1 ret void } define void @test_arg_v4i64(<4 x i64> %arg, <4 x i64>* %dst) { ; CHECK-LABEL: test_arg_v4i64: +; AVX: vmovntps %ymm ; AVX2: vmovntps %ymm store <4 x i64> %arg, <4 x i64>* %dst, align 32, !nontemporal !1 ret void @@ -180,6 +203,7 @@ define void @test_arg_v4i64(<4 x i64> %arg, <4 x i64>* %dst) { define void @test_arg_v16i16(<16 x i16> %arg, <16 x i16>* %dst) { ; CHECK-LABEL: test_arg_v16i16: +; AVX: vmovntps %ymm ; AVX2: vmovntps %ymm store <16 x i16> %arg, <16 x i16>* %dst, align 32, !nontemporal !1 ret void @@ -187,6 +211,7 @@ define void @test_arg_v16i16(<16 x i16> %arg, <16 x i16>* %dst) { define void @test_arg_v32i8(<32 x i8> %arg, <32 x i8>* %dst) { ; CHECK-LABEL: test_arg_v32i8: +; AVX: vmovntps %ymm ; AVX2: vmovntps %ymm store <32 x i8> %arg, <32 x i8>* %dst, align 32, !nontemporal !1 ret void @@ -200,6 +225,7 @@ define void @test_op_v4f32(<4 x float> %a, <4 x float> %b, <4 x float>* %dst) { ; CHECK-LABEL: test_op_v4f32: ; SSE: movntps ; AVX: vmovntps +; AVX2: vmovntps %r = fadd <4 x float> %a, %b store <4 x float> %r, <4 x float>* %dst, align 16, !nontemporal !1 ret void @@ -209,6 +235,7 @@ define void @test_op_v4i32(<4 x i32> %a, <4 x i32> %b, <4 x i32>* %dst) { ; CHECK-LABEL: test_op_v4i32: ; SSE: movntdq ; AVX: vmovntdq +; AVX2: vmovntdq %r = add <4 x i32> %a, %b store <4 x i32> %r, <4 x i32>* %dst, align 16, !nontemporal !1 ret void @@ -218,6 +245,7 @@ define void @test_op_v2f64(<2 x double> %a, <2 x double> %b, <2 x double>* %dst) ; CHECK-LABEL: test_op_v2f64: ; SSE: movntpd ; AVX: vmovntpd +; AVX2: vmovntpd %r = fadd <2 x double> %a, %b store <2 x double> %r, <2 x double>* %dst, align 16, !nontemporal !1 ret void @@ -227,6 +255,7 @@ define void @test_op_v2i64(<2 x i64> %a, <2 x i64> %b, <2 x i64>* %dst) { ; CHECK-LABEL: test_op_v2i64: ; SSE: movntdq ; AVX: vmovntdq +; AVX2: vmovntdq %r = add <2 x i64> %a, %b store <2 x i64> %r, <2 x i64>* %dst, align 16, !nontemporal !1 ret void @@ -236,6 +265,7 @@ define void @test_op_v8i16(<8 x i16> %a, <8 x i16> %b, <8 x i16>* %dst) { ; CHECK-LABEL: test_op_v8i16: ; SSE: movntdq ; AVX: vmovntdq +; AVX2: vmovntdq %r = add <8 x i16> %a, %b store <8 x i16> %r, <8 x i16>* %dst, align 16, !nontemporal !1 ret void @@ -245,6 +275,7 @@ define void @test_op_v16i8(<16 x i8> %a, <16 x i8> %b, <16 x i8>* %dst) { ; CHECK-LABEL: test_op_v16i8: ; SSE: movntdq ; AVX: vmovntdq +; AVX2: vmovntdq %r = add <16 x i8> %a, %b store <16 x i8> %r, <16 x i8>* %dst, align 16, !nontemporal !1 ret void @@ -255,6 +286,7 @@ define void @test_op_v16i8(<16 x i8> %a, <16 x i8> %b, <16 x i8>* %dst) { define void @test_op_v8f32(<8 x float> %a, <8 x float> %b, <8 x float>* %dst) { ; CHECK-LABEL: test_op_v8f32: ; AVX: vmovntps %ymm +; AVX2: vmovntps %ymm %r = fadd <8 x float> %a, %b store <8 x float> %r, <8 x float>* %dst, align 32, !nontemporal !1 ret void @@ -262,6 +294,7 @@ define void @test_op_v8f32(<8 x float> %a, <8 x float> %b, <8 x float>* %dst) { define void @test_op_v8i32(<8 x i32> %a, <8 x i32> %b, <8 x i32>* %dst) { ; CHECK-LABEL: test_op_v8i32: +; AVX: vmovntps %ymm ; AVX2: vmovntdq %ymm %r = add <8 x i32> %a, %b store <8 x i32> %r, <8 x i32>* %dst, align 32, !nontemporal !1 @@ -271,6 +304,7 @@ define void @test_op_v8i32(<8 x i32> %a, <8 x i32> %b, <8 x i32>* %dst) { define void @test_op_v4f64(<4 x double> %a, <4 x double> %b, <4 x double>* %dst) { ; CHECK-LABEL: test_op_v4f64: ; AVX: vmovntpd %ymm +; AVX2: vmovntpd %ymm %r = fadd <4 x double> %a, %b store <4 x double> %r, <4 x double>* %dst, align 32, !nontemporal !1 ret void @@ -278,6 +312,7 @@ define void @test_op_v4f64(<4 x double> %a, <4 x double> %b, <4 x double>* %dst) define void @test_op_v4i64(<4 x i64> %a, <4 x i64> %b, <4 x i64>* %dst) { ; CHECK-LABEL: test_op_v4i64: +; AVX: vmovntps %ymm ; AVX2: vmovntdq %ymm %r = add <4 x i64> %a, %b store <4 x i64> %r, <4 x i64>* %dst, align 32, !nontemporal !1 @@ -286,6 +321,7 @@ define void @test_op_v4i64(<4 x i64> %a, <4 x i64> %b, <4 x i64>* %dst) { define void @test_op_v16i16(<16 x i16> %a, <16 x i16> %b, <16 x i16>* %dst) { ; CHECK-LABEL: test_op_v16i16: +; AVX: vmovntps %ymm ; AVX2: vmovntdq %ymm %r = add <16 x i16> %a, %b store <16 x i16> %r, <16 x i16>* %dst, align 32, !nontemporal !1 @@ -294,6 +330,7 @@ define void @test_op_v16i16(<16 x i16> %a, <16 x i16> %b, <16 x i16>* %dst) { define void @test_op_v32i8(<32 x i8> %a, <32 x i8> %b, <32 x i8>* %dst) { ; CHECK-LABEL: test_op_v32i8: +; AVX: vmovntps %ymm ; AVX2: vmovntdq %ymm %r = add <32 x i8> %a, %b store <32 x i8> %r, <32 x i8>* %dst, align 32, !nontemporal !1 @@ -310,6 +347,8 @@ define void @test_unaligned_v8f32(<8 x float> %a, <8 x float> %b, <8 x float>* % ; SSE: movntps %xmm ; AVX-NOT: movnt ; AVX: vmovups %ymm +; AVX2-NOT: movnt +; AVX2: vmovups %ymm %r = fadd <8 x float> %a, %b store <8 x float> %r, <8 x float>* %dst, align 16, !nontemporal !1 ret void