From 42694a3a7e75f7249e830ae344d15fa2ce48e874 Mon Sep 17 00:00:00 2001 From: Zvi Rackover Date: Tue, 6 Dec 2016 19:35:20 +0000 Subject: [PATCH] [X86] Prefer reduced width multiplication over pmulld on Silvermont Summary: Prefer expansions such as: pmullw,pmulhw,unpacklwd,unpackhwd over pmulld. On Silvermont [source: Optimization Reference Manual]: PMULLD has a throughput of 1/11 [instruction/cycles]. PMULHUW/PMULHW/PMULLW have a throughput of 1/2 [instruction/cycles]. Fixes pr31202. Analysis of this issue was done by Fahana Aleen. Reviewers: wmi, delena, mkuper Subscribers: RKSimon, llvm-commits Differential Revision: https://reviews.llvm.org/D27203 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@288844 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86.td | 3 ++ lib/Target/X86/X86ISelLowering.cpp | 13 ++++-- lib/Target/X86/X86Subtarget.cpp | 4 ++ lib/Target/X86/X86Subtarget.h | 5 +++ test/CodeGen/X86/slow-pmulld.ll | 71 ++++++++++++++++++++++++++++++ 5 files changed, 93 insertions(+), 3 deletions(-) create mode 100644 test/CodeGen/X86/slow-pmulld.ll diff --git a/lib/Target/X86/X86.td b/lib/Target/X86/X86.td index e8fbe82902a..dc18a59a30b 100644 --- a/lib/Target/X86/X86.td +++ b/lib/Target/X86/X86.td @@ -99,6 +99,8 @@ def FeatureSlowBTMem : SubtargetFeature<"slow-bt-mem", "IsBTMemSlow", "true", "Bit testing of memory is slow">; def FeatureSlowSHLD : SubtargetFeature<"slow-shld", "IsSHLDSlow", "true", "SHLD instruction is slow">; +def FeatureSlowPMULLD : SubtargetFeature<"slow-pmulld", "IsPMULLDSlow", "true", + "PMULLD instruction is slow">; // FIXME: This should not apply to CPUs that do not have SSE. def FeatureSlowUAMem16 : SubtargetFeature<"slow-unaligned-mem-16", "IsUAMem16Slow", "true", @@ -403,6 +405,7 @@ class SilvermontProc : ProcessorModel; def : SilvermontProc<"silvermont">; diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index bac2c8e8145..eff9a7a92c0 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -29302,10 +29302,17 @@ static bool canReduceVMulWidth(SDNode *N, SelectionDAG &DAG, ShrinkMode &Mode) { /// generate pmullw+pmulhuw for it (MULU16 mode). static SDValue reduceVMULWidth(SDNode *N, SelectionDAG &DAG, const X86Subtarget &Subtarget) { - // pmulld is supported since SSE41. It is better to use pmulld - // instead of pmullw+pmulhw. + // Check for legality // pmullw/pmulhw are not supported by SSE. - if (Subtarget.hasSSE41() || !Subtarget.hasSSE2()) + if (!Subtarget.hasSSE2()) + return SDValue(); + + // Check for profitability + // pmulld is supported since SSE41. It is better to use pmulld + // instead of pmullw+pmulhw, except for subtargets where pmulld is slower than + // the expansion. + bool OptForMinSize = DAG.getMachineFunction().getFunction()->optForMinSize(); + if (Subtarget.hasSSE41() && (OptForMinSize || !Subtarget.isPMULLDSlow())) return SDValue(); ShrinkMode Mode; diff --git a/lib/Target/X86/X86Subtarget.cpp b/lib/Target/X86/X86Subtarget.cpp index ee37ea7df1c..f1c490ca6b9 100644 --- a/lib/Target/X86/X86Subtarget.cpp +++ b/lib/Target/X86/X86Subtarget.cpp @@ -228,6 +228,9 @@ void X86Subtarget::initSubtargetFeatures(StringRef CPU, StringRef FS) { else if (isTargetDarwin() || isTargetLinux() || isTargetSolaris() || isTargetKFreeBSD() || In64BitMode) stackAlignment = 16; + + assert((!isPMULLDSlow() || hasSSE41()) && + "Feature Slow PMULLD can only be set on a subtarget with SSE4.1"); } void X86Subtarget::initializeEnvironment() { @@ -275,6 +278,7 @@ void X86Subtarget::initializeEnvironment() { HasMWAITX = false; HasMPX = false; IsBTMemSlow = false; + IsPMULLDSlow = false; IsSHLDSlow = false; IsUAMem16Slow = false; IsUAMem32Slow = false; diff --git a/lib/Target/X86/X86Subtarget.h b/lib/Target/X86/X86Subtarget.h index 6940d1e7ce5..92c16214aa4 100644 --- a/lib/Target/X86/X86Subtarget.h +++ b/lib/Target/X86/X86Subtarget.h @@ -178,6 +178,10 @@ protected: /// True if SHLD instructions are slow. bool IsSHLDSlow; + /// True if the PMULLD instruction is slow compared to PMULLW/PMULHW and + // PMULUDQ. + bool IsPMULLDSlow; + /// True if unaligned memory accesses of 16-bytes are slow. bool IsUAMem16Slow; @@ -452,6 +456,7 @@ public: bool hasMWAITX() const { return HasMWAITX; } bool isBTMemSlow() const { return IsBTMemSlow; } bool isSHLDSlow() const { return IsSHLDSlow; } + bool isPMULLDSlow() const { return IsPMULLDSlow; } bool isUnalignedMem16Slow() const { return IsUAMem16Slow; } bool isUnalignedMem32Slow() const { return IsUAMem32Slow; } bool hasSSEUnalignedMem() const { return HasSSEUnalignedMem; } diff --git a/test/CodeGen/X86/slow-pmulld.ll b/test/CodeGen/X86/slow-pmulld.ll new file mode 100644 index 00000000000..ff6682090a2 --- /dev/null +++ b/test/CodeGen/X86/slow-pmulld.ll @@ -0,0 +1,71 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=i386-unknown-unknown -mcpu=silvermont | FileCheck %s --check-prefix=CHECK32 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=silvermont | FileCheck %s --check-prefix=CHECK64 +; RUN: llc < %s -mtriple=i386-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE4-32 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE4-64 + +define <4 x i32> @foo(<4 x i8> %A) { +; CHECK32-LABEL: foo: +; CHECK32: # BB#0: +; CHECK32-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0],zero,xmm0[4],zero,xmm0[8],zero,xmm0[12],zero,xmm0[u,u,u,u,u,u,u,u] +; CHECK32-NEXT: movdqa {{.*#+}} xmm1 = <18778,18778,18778,18778,u,u,u,u> +; CHECK32-NEXT: movdqa %xmm0, %xmm2 +; CHECK32-NEXT: pmullw %xmm1, %xmm0 +; CHECK32-NEXT: pmulhw %xmm1, %xmm2 +; CHECK32-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3] +; CHECK32-NEXT: retl +; +; CHECK64-LABEL: foo: +; CHECK64: # BB#0: +; CHECK64-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0],zero,xmm0[4],zero,xmm0[8],zero,xmm0[12],zero,xmm0[u,u,u,u,u,u,u,u] +; CHECK64-NEXT: movdqa {{.*#+}} xmm1 = <18778,18778,18778,18778,u,u,u,u> +; CHECK64-NEXT: movdqa %xmm0, %xmm2 +; CHECK64-NEXT: pmullw %xmm1, %xmm0 +; CHECK64-NEXT: pmulhw %xmm1, %xmm2 +; CHECK64-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3] +; CHECK64-NEXT: retq +; +; SSE4-32-LABEL: foo: +; SSE4-32: # BB#0: +; SSE4-32-NEXT: pand {{\.LCPI.*}}, %xmm0 +; SSE4-32-NEXT: pmulld {{\.LCPI.*}}, %xmm0 +; SSE4-32-NEXT: retl +; +; SSE4-64-LABEL: foo: +; SSE4-64: # BB#0: +; SSE4-64-NEXT: pand {{.*}}(%rip), %xmm0 +; SSE4-64-NEXT: pmulld {{.*}}(%rip), %xmm0 +; SSE4-64-NEXT: retq + %z = zext <4 x i8> %A to <4 x i32> + %m = mul nuw nsw <4 x i32> %z, + ret <4 x i32> %m +} + +define <4 x i32> @foo_os(<4 x i8> %A) minsize { +; CHECK32-LABEL: foo_os: +; CHECK32: # BB#0: +; CHECK32-NEXT: pand {{\.LCPI.*}}, %xmm0 +; CHECK32-NEXT: pmulld {{\.LCPI.*}}, %xmm0 +; CHECK32-NEXT: retl +; +; CHECK64-LABEL: foo_os: +; CHECK64: # BB#0: +; CHECK64-NEXT: pand {{.*}}(%rip), %xmm0 +; CHECK64-NEXT: pmulld {{.*}}(%rip), %xmm0 +; CHECK64-NEXT: retq +; +; SSE4-32-LABEL: foo_os: +; SSE4-32: # BB#0: +; SSE4-32-NEXT: pand {{\.LCPI.*}}, %xmm0 +; SSE4-32-NEXT: pmulld {{\.LCPI.*}}, %xmm0 +; SSE4-32-NEXT: retl +; +; SSE4-64-LABEL: foo_os: +; SSE4-64: # BB#0: +; SSE4-64-NEXT: pand {{.*}}(%rip), %xmm0 +; SSE4-64-NEXT: pmulld {{.*}}(%rip), %xmm0 +; SSE4-64-NEXT: retq + %z = zext <4 x i8> %A to <4 x i32> + %m = mul nuw nsw <4 x i32> %z, + ret <4 x i32> %m +}