[PPC64LE] Fix lowering of BUILD_VECTOR and SHUFFLE_VECTOR for little endian

This patch fixes a couple of lowering issues for little endian
PowerPC.  The code for lowering BUILD_VECTOR contains a number of
optimizations that are only valid for big endian.  For now, we disable
those optimizations for correctness.  In the future, we will add
analogous optimizations that are correct for little endian.

When lowering a SHUFFLE_VECTOR to a VPERM operation, we again need to
make the now-familiar transformation of swapping the input operands
and complementing the permute control vector.  Correctness of this
transformation is tested by the accompanying test case.


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@210336 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Bill Schmidt 2014-06-06 14:06:26 +00:00
parent 53b344dcca
commit 6c9eb10784
2 changed files with 100 additions and 3 deletions

View File

@ -5558,6 +5558,22 @@ SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op,
return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
}
// The remaining cases assume either big endian element order or
// a splat-size that equates to the element size of the vector
// to be built. An example that doesn't work for little endian is
// {0, -1, 0, -1, 0, -1, 0, -1} which has a splat size of 32 bits
// and a vector element size of 16 bits. The code below will
// produce the vector in big endian element order, which for little
// endian is {-1, 0, -1, 0, -1, 0, -1, 0}.
// For now, just avoid these optimizations in that case.
// FIXME: Develop correct optimizations for LE with mismatched
// splat and element sizes.
if (PPCSubTarget.isLittleEndian() &&
SplatSize != Op.getValueType().getVectorElementType().getSizeInBits())
return SDValue();
// Check to see if this is a wide variety of vsplti*, binop self cases.
static const signed char SplatCsts[] = {
-1, 1, -2, 2, -3, 3, -4, 4, -5, 5, -6, 6, -7, 7,
@ -5821,21 +5837,36 @@ SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
// The SHUFFLE_VECTOR mask is almost exactly what we want for vperm, except
// that it is in input element units, not in bytes. Convert now.
// For little endian, the order of the input vectors is reversed, and
// the permutation mask is complemented with respect to 31. This is
// necessary to produce proper semantics with the big-endian-biased vperm
// instruction.
EVT EltVT = V1.getValueType().getVectorElementType();
unsigned BytesPerElement = EltVT.getSizeInBits()/8;
bool isLittleEndian = PPCSubTarget.isLittleEndian();
SmallVector<SDValue, 16> ResultMask;
for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) {
unsigned SrcElt = PermMask[i] < 0 ? 0 : PermMask[i];
for (unsigned j = 0; j != BytesPerElement; ++j)
ResultMask.push_back(DAG.getConstant(SrcElt*BytesPerElement+j,
MVT::i32));
if (isLittleEndian)
ResultMask.push_back(DAG.getConstant(31 - (SrcElt*BytesPerElement+j),
MVT::i32));
else
ResultMask.push_back(DAG.getConstant(SrcElt*BytesPerElement+j,
MVT::i32));
}
SDValue VPermMask = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v16i8,
ResultMask);
return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(), V1, V2, VPermMask);
if (isLittleEndian)
return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(),
V2, V1, VPermMask);
else
return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(),
V1, V2, VPermMask);
}
/// getAltivecCompareInfo - Given an intrinsic, return false if it is not an

View File

@ -0,0 +1,66 @@
; RUN: llc -O0 -fast-isel=false -mcpu=ppc64 < %s | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v128:128:128-n32:64"
target triple = "powerpc64le-unknown-linux-gnu"
define <16 x i8> @foo() nounwind ssp {
%1 = shufflevector <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, <16 x i32> <i32 0, i32 5, i32 10, i32 15, i32 20, i32 25, i32 30, i32 3, i32 8, i32 13, i32 18, i32 23, i32 28, i32 1, i32 6, i32 11>
ret <16 x i8> %1
}
; CHECK: .LCPI0_0:
; CHECK: .byte 31
; CHECK: .byte 26
; CHECK: .byte 21
; CHECK: .byte 16
; CHECK: .byte 11
; CHECK: .byte 6
; CHECK: .byte 1
; CHECK: .byte 28
; CHECK: .byte 23
; CHECK: .byte 18
; CHECK: .byte 13
; CHECK: .byte 8
; CHECK: .byte 3
; CHECK: .byte 30
; CHECK: .byte 25
; CHECK: .byte 20
; CHECK: .LCPI0_1:
; CHECK: .byte 0
; CHECK: .byte 1
; CHECK: .byte 2
; CHECK: .byte 3
; CHECK: .byte 4
; CHECK: .byte 5
; CHECK: .byte 6
; CHECK: .byte 7
; CHECK: .byte 8
; CHECK: .byte 9
; CHECK: .byte 10
; CHECK: .byte 11
; CHECK: .byte 12
; CHECK: .byte 13
; CHECK: .byte 14
; CHECK: .byte 15
; CHECK: .LCPI0_2:
; CHECK: .byte 16
; CHECK: .byte 17
; CHECK: .byte 18
; CHECK: .byte 19
; CHECK: .byte 20
; CHECK: .byte 21
; CHECK: .byte 22
; CHECK: .byte 23
; CHECK: .byte 24
; CHECK: .byte 25
; CHECK: .byte 26
; CHECK: .byte 27
; CHECK: .byte 28
; CHECK: .byte 29
; CHECK: .byte 30
; CHECK: .byte 31
; CHECK: foo:
; CHECK: addis [[REG1:[0-9]+]], 2, .LCPI0_2@toc@ha
; CHECK: addi [[REG2:[0-9]+]], [[REG1]], .LCPI0_2@toc@l
; CHECK: lvx [[REG3:[0-9]+]], 0, [[REG2]]
; CHECK: vperm {{[0-9]+}}, [[REG3]], {{[0-9]+}}, {{[0-9]+}}