Rename FastString flag.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@300959 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Clement Courbet 2017-04-21 09:20:50 +00:00
parent f3a411fb40
commit e5aaed5aca
6 changed files with 17 additions and 14 deletions

View File

@ -273,11 +273,14 @@ def FeatureFastSHLDRotate
"fast-shld-rotate", "HasFastSHLDRotate", "true",
"SHLD can be used as a faster rotate">;
// String operations (e.g. REP MOVS) are fast. See "REP String Enhancement" in
// the Intel Software Development Manual.
def FeatureFastString
// Ivy Bridge and newer processors have enhanced REP MOVSB and STOSB (aka
// "string operations"). See "REP String Enhancement" in the Intel Software
// Development Manual. This feature essentially meanis that REP MOVSB will copy
// using the largest available size instead of copying bytes one by one, making
// it at least as fast as REPMOVS{W,D,Q}.
def FeatureERMSB
: SubtargetFeature<
"fast-string", "HasFastString", "true",
"ermsb", "HasERMSB", "true",
"REP MOVS/STOS are fast">;
//===----------------------------------------------------------------------===//
@ -505,7 +508,7 @@ def HSWFeatures : ProcessorFeatures<IVBFeatures.Value, [
FeatureAVX2,
FeatureBMI,
FeatureBMI2,
FeatureFastString,
FeatureERMSB,
FeatureFMA,
FeatureLZCNT,
FeatureMOVBE,

View File

@ -897,7 +897,7 @@ def NotSlowIncDec : Predicate<"!Subtarget->slowIncDec()">;
def HasFastMem32 : Predicate<"!Subtarget->isUnalignedMem32Slow()">;
def HasFastLZCNT : Predicate<"Subtarget->hasFastLZCNT()">;
def HasFastSHLDRotate : Predicate<"Subtarget->hasFastSHLDRotate()">;
def HasFastString : Predicate<"Subtarget->hasFastString()">;
def HasERMSB : Predicate<"Subtarget->hasERMSB()">;
def HasMFence : Predicate<"Subtarget->hasMFence()">;
//===----------------------------------------------------------------------===//

View File

@ -215,8 +215,8 @@ SDValue X86SelectionDAGInfo::EmitTargetCodeForMemcpy(
return SDValue();
MVT AVT;
if (Subtarget.hasFastString())
// If the target has fast strings, then it's at least as fast to use
if (Subtarget.hasERMSB())
// If the target has enhanced REPMOVSB, then it's at least as fast to use
// REP MOVSB instead of REP MOVS{W,D,Q}, and it avoids having to handle
// BytesLeft.
AVT = MVT::i8;

View File

@ -303,7 +303,7 @@ void X86Subtarget::initializeEnvironment() {
HasFastVectorFSQRT = false;
HasFastLZCNT = false;
HasFastSHLDRotate = false;
HasFastString = false;
HasERMSB = false;
HasSlowDivide32 = false;
HasSlowDivide64 = false;
PadShortFunctions = false;

View File

@ -232,8 +232,8 @@ protected:
/// True if SHLD based rotate is fast.
bool HasFastSHLDRotate;
/// True if the processor has fast REP MOVS.
bool HasFastString;
/// True if the processor has enhanced REP MOVSB/STOSB.
bool HasERMSB;
/// True if the short functions should be padded to prevent
/// a stall when returning too early.
@ -475,7 +475,7 @@ public:
bool hasFastVectorFSQRT() const { return HasFastVectorFSQRT; }
bool hasFastLZCNT() const { return HasFastLZCNT; }
bool hasFastSHLDRotate() const { return HasFastSHLDRotate; }
bool hasFastString() const { return HasFastString; }
bool hasERMSB() const { return HasERMSB; }
bool hasSlowDivide32() const { return HasSlowDivide32; }
bool hasSlowDivide64() const { return HasSlowDivide64; }
bool padShortFunctions() const { return PadShortFunctions; }

View File

@ -1,5 +1,5 @@
; RUN: llc -mtriple=x86_64-linux-gnu -mattr=-fast-string < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=NOFAST
; RUN: llc -mtriple=x86_64-linux-gnu -mattr=+fast-string < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=FAST
; RUN: llc -mtriple=x86_64-linux-gnu -mattr=-ermsb < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=NOFAST
; RUN: llc -mtriple=x86_64-linux-gnu -mattr=+ermsb < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=FAST
; RUN: llc -mtriple=x86_64-linux-gnu -mcpu=haswell < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=HASWELL
; RUN: llc -mtriple=x86_64-linux-gnu -mcpu=generic < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=GENERIC