Optimize shufflevector that copies an i64/f64 and zeros the rest.

Summary:
Also ran clang-format on the function. The code added is the last else
if block.

Reviewers: nadav, craig.topper

Subscribers: llvm-commits

Differential Revision: http://reviews.llvm.org/D3518

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@207992 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Filipe Cabecinhas 2014-05-05 19:36:28 +00:00
parent f6518f6034
commit a0fa9eb606
3 changed files with 37 additions and 12 deletions

View File

@ -7468,9 +7468,8 @@ static SDValue LowerVectorIntExtend(SDValue Op, const X86Subtarget *Subtarget,
DAG.getNode(X86ISD::VZEXT, DL, NVT, V1));
}
static SDValue
NormalizeVectorShuffle(SDValue Op, const X86Subtarget *Subtarget,
SelectionDAG &DAG) {
static SDValue NormalizeVectorShuffle(SDValue Op, const X86Subtarget *Subtarget,
SelectionDAG &DAG) {
ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
MVT VT = Op.getSimpleValueType();
SDLoc dl(Op);
@ -7495,33 +7494,43 @@ NormalizeVectorShuffle(SDValue Op, const X86Subtarget *Subtarget,
// If the shuffle can be profitably rewritten as a narrower shuffle, then
// do it!
if (VT == MVT::v8i16 || VT == MVT::v16i8 ||
VT == MVT::v16i16 || VT == MVT::v32i8) {
if (VT == MVT::v8i16 || VT == MVT::v16i8 || VT == MVT::v16i16 ||
VT == MVT::v32i8) {
SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG);
if (NewOp.getNode())
return DAG.getNode(ISD::BITCAST, dl, VT, NewOp);
} else if ((VT == MVT::v4i32 ||
(VT == MVT::v4f32 && Subtarget->hasSSE2()))) {
} else if ((VT == MVT::v4i32 || (VT == MVT::v4f32 && Subtarget->hasSSE2()))) {
// FIXME: Figure out a cleaner way to do this.
// Try to make use of movq to zero out the top part.
if (ISD::isBuildVectorAllZeros(V2.getNode())) {
SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG);
if (NewOp.getNode()) {
MVT NewVT = NewOp.getSimpleValueType();
if (isCommutedMOVLMask(cast<ShuffleVectorSDNode>(NewOp)->getMask(),
NewVT, true, false))
return getVZextMovL(VT, NewVT, NewOp.getOperand(0),
DAG, Subtarget, dl);
return getVZextMovL(VT, NewVT, NewOp.getOperand(0), DAG, Subtarget,
dl);
}
} else if (ISD::isBuildVectorAllZeros(V1.getNode())) {
SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG);
if (NewOp.getNode()) {
MVT NewVT = NewOp.getSimpleValueType();
if (isMOVLMask(cast<ShuffleVectorSDNode>(NewOp)->getMask(), NewVT))
return getVZextMovL(VT, NewVT, NewOp.getOperand(1),
DAG, Subtarget, dl);
return getVZextMovL(VT, NewVT, NewOp.getOperand(1), DAG, Subtarget,
dl);
}
}
} else if ((VT == MVT::v2i64 || VT == MVT::v2f64) && Subtarget->hasSSE2()) {
// Emit movq and vmovq to copy an i64 or f64 to a vector and zero the
// other bits.
if (ISD::isBuildVectorAllZeros(V2.getNode())) {
MVT NewVT = SVOp->getSimpleValueType(0);
if (isCommutedMOVLMask(SVOp->getMask(), NewVT, true, false))
return getVZextMovL(VT, NewVT, SVOp->getOperand(0), DAG, Subtarget, dl);
} else if (ISD::isBuildVectorAllZeros(V1.getNode())) {
MVT NewVT = SVOp->getSimpleValueType(0);
if (isMOVLMask(SVOp->getMask(), NewVT))
return getVZextMovL(VT, NewVT, SVOp->getOperand(1), DAG, Subtarget, dl);
}
}
return SDValue();
}

View File

@ -306,3 +306,11 @@ define void @test20() {
store <3 x double> %a1, <3 x double>* undef, align 1
ret void
}
define <2 x i64> @test_insert_64_zext(<2 x i64> %i) {
; CHECK-LABEL: test_insert_64_zext
; CHECK-NOT: xor
; CHECK: vmovq
%1 = shufflevector <2 x i64> %i, <2 x i64> <i64 0, i64 undef>, <2 x i32> <i32 0, i32 2>
ret <2 x i64> %1
}

View File

@ -221,3 +221,11 @@ entry:
%double2float.i = fptrunc <4 x double> %0 to <4 x float>
ret <4 x float> %double2float.i
}
define <2 x i64> @test_insert_64_zext(<2 x i64> %i) {
; CHECK-LABEL: test_insert_64_zext
; CHECK-NOT: xor
; CHECK: movq
%1 = shufflevector <2 x i64> %i, <2 x i64> <i64 0, i64 undef>, <2 x i32> <i32 0, i32 2>
ret <2 x i64> %1
}