mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2025-01-19 10:13:13 +00:00
Remove unneeded ARM-specific DAG nodes for VLD* and VST* Neon operations.
The instructions can be selected directly from the intrinsics. We will need to add some ARM-specific nodes for VLD/VST of 3 and 4 128-bit vectors, but those are not yet implemented. llvm-svn: 80117
This commit is contained in:
parent
c61d5bbbe2
commit
5240e9de02
@ -1263,117 +1263,6 @@ SDNode *ARMDAGToDAGISel::Select(SDValue Op) {
|
||||
return CurDAG->SelectNodeTo(Op.getNode(), Opc, VT, Ops, 5);
|
||||
}
|
||||
|
||||
case ARMISD::VLD2D: {
|
||||
SDValue MemAddr, MemUpdate, MemOpc;
|
||||
if (!SelectAddrMode6(Op, N->getOperand(1), MemAddr, MemUpdate, MemOpc))
|
||||
return NULL;
|
||||
unsigned Opc = 0;
|
||||
EVT VT = Op.getValueType();
|
||||
switch (VT.getSimpleVT().SimpleTy) {
|
||||
default: llvm_unreachable("unhandled VLD2D type");
|
||||
case MVT::v8i8: Opc = ARM::VLD2d8; break;
|
||||
case MVT::v4i16: Opc = ARM::VLD2d16; break;
|
||||
case MVT::v2f32:
|
||||
case MVT::v2i32: Opc = ARM::VLD2d32; break;
|
||||
}
|
||||
SDValue Chain = N->getOperand(0);
|
||||
const SDValue Ops[] = { MemAddr, MemUpdate, MemOpc, Chain };
|
||||
return CurDAG->getTargetNode(Opc, dl, VT, VT, MVT::Other, Ops, 4);
|
||||
}
|
||||
|
||||
case ARMISD::VLD3D: {
|
||||
SDValue MemAddr, MemUpdate, MemOpc;
|
||||
if (!SelectAddrMode6(Op, N->getOperand(1), MemAddr, MemUpdate, MemOpc))
|
||||
return NULL;
|
||||
unsigned Opc = 0;
|
||||
EVT VT = Op.getValueType();
|
||||
switch (VT.getSimpleVT().SimpleTy) {
|
||||
default: llvm_unreachable("unhandled VLD3D type");
|
||||
case MVT::v8i8: Opc = ARM::VLD3d8; break;
|
||||
case MVT::v4i16: Opc = ARM::VLD3d16; break;
|
||||
case MVT::v2f32:
|
||||
case MVT::v2i32: Opc = ARM::VLD3d32; break;
|
||||
}
|
||||
SDValue Chain = N->getOperand(0);
|
||||
const SDValue Ops[] = { MemAddr, MemUpdate, MemOpc, Chain };
|
||||
return CurDAG->getTargetNode(Opc, dl, VT, VT, VT, MVT::Other, Ops, 4);
|
||||
}
|
||||
|
||||
case ARMISD::VLD4D: {
|
||||
SDValue MemAddr, MemUpdate, MemOpc;
|
||||
if (!SelectAddrMode6(Op, N->getOperand(1), MemAddr, MemUpdate, MemOpc))
|
||||
return NULL;
|
||||
unsigned Opc = 0;
|
||||
EVT VT = Op.getValueType();
|
||||
switch (VT.getSimpleVT().SimpleTy) {
|
||||
default: llvm_unreachable("unhandled VLD4D type");
|
||||
case MVT::v8i8: Opc = ARM::VLD4d8; break;
|
||||
case MVT::v4i16: Opc = ARM::VLD4d16; break;
|
||||
case MVT::v2f32:
|
||||
case MVT::v2i32: Opc = ARM::VLD4d32; break;
|
||||
}
|
||||
SDValue Chain = N->getOperand(0);
|
||||
const SDValue Ops[] = { MemAddr, MemUpdate, MemOpc, Chain };
|
||||
std::vector<EVT> ResTys(4, VT);
|
||||
ResTys.push_back(MVT::Other);
|
||||
return CurDAG->getTargetNode(Opc, dl, ResTys, Ops, 4);
|
||||
}
|
||||
|
||||
case ARMISD::VST2D: {
|
||||
SDValue MemAddr, MemUpdate, MemOpc;
|
||||
if (!SelectAddrMode6(Op, N->getOperand(1), MemAddr, MemUpdate, MemOpc))
|
||||
return NULL;
|
||||
unsigned Opc = 0;
|
||||
switch (N->getOperand(2).getValueType().getSimpleVT().SimpleTy) {
|
||||
default: llvm_unreachable("unhandled VST2D type");
|
||||
case MVT::v8i8: Opc = ARM::VST2d8; break;
|
||||
case MVT::v4i16: Opc = ARM::VST2d16; break;
|
||||
case MVT::v2f32:
|
||||
case MVT::v2i32: Opc = ARM::VST2d32; break;
|
||||
}
|
||||
SDValue Chain = N->getOperand(0);
|
||||
const SDValue Ops[] = { MemAddr, MemUpdate, MemOpc,
|
||||
N->getOperand(2), N->getOperand(3), Chain };
|
||||
return CurDAG->getTargetNode(Opc, dl, MVT::Other, Ops, 6);
|
||||
}
|
||||
|
||||
case ARMISD::VST3D: {
|
||||
SDValue MemAddr, MemUpdate, MemOpc;
|
||||
if (!SelectAddrMode6(Op, N->getOperand(1), MemAddr, MemUpdate, MemOpc))
|
||||
return NULL;
|
||||
unsigned Opc = 0;
|
||||
switch (N->getOperand(2).getValueType().getSimpleVT().SimpleTy) {
|
||||
default: llvm_unreachable("unhandled VST3D type");
|
||||
case MVT::v8i8: Opc = ARM::VST3d8; break;
|
||||
case MVT::v4i16: Opc = ARM::VST3d16; break;
|
||||
case MVT::v2f32:
|
||||
case MVT::v2i32: Opc = ARM::VST3d32; break;
|
||||
}
|
||||
SDValue Chain = N->getOperand(0);
|
||||
const SDValue Ops[] = { MemAddr, MemUpdate, MemOpc,
|
||||
N->getOperand(2), N->getOperand(3),
|
||||
N->getOperand(4), Chain };
|
||||
return CurDAG->getTargetNode(Opc, dl, MVT::Other, Ops, 7);
|
||||
}
|
||||
|
||||
case ARMISD::VST4D: {
|
||||
SDValue MemAddr, MemUpdate, MemOpc;
|
||||
if (!SelectAddrMode6(Op, N->getOperand(1), MemAddr, MemUpdate, MemOpc))
|
||||
return NULL;
|
||||
unsigned Opc = 0;
|
||||
switch (N->getOperand(2).getValueType().getSimpleVT().SimpleTy) {
|
||||
default: llvm_unreachable("unhandled VST4D type");
|
||||
case MVT::v8i8: Opc = ARM::VST4d8; break;
|
||||
case MVT::v4i16: Opc = ARM::VST4d16; break;
|
||||
case MVT::v2f32:
|
||||
case MVT::v2i32: Opc = ARM::VST4d32; break;
|
||||
}
|
||||
SDValue Chain = N->getOperand(0);
|
||||
const SDValue Ops[] = { MemAddr, MemUpdate, MemOpc,
|
||||
N->getOperand(2), N->getOperand(3),
|
||||
N->getOperand(4), N->getOperand(5), Chain };
|
||||
return CurDAG->getTargetNode(Opc, dl, MVT::Other, Ops, 8);
|
||||
}
|
||||
case ARMISD::VZIP: {
|
||||
unsigned Opc = 0;
|
||||
EVT VT = N->getValueType(0);
|
||||
@ -1425,6 +1314,121 @@ SDNode *ARMDAGToDAGISel::Select(SDValue Op) {
|
||||
return CurDAG->getTargetNode(Opc, dl, VT, VT,
|
||||
N->getOperand(0), N->getOperand(1));
|
||||
}
|
||||
|
||||
case ISD::INTRINSIC_VOID:
|
||||
case ISD::INTRINSIC_W_CHAIN: {
|
||||
unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
|
||||
EVT VT = N->getValueType(0);
|
||||
unsigned Opc = 0;
|
||||
|
||||
switch (IntNo) {
|
||||
default:
|
||||
break;
|
||||
|
||||
case Intrinsic::arm_neon_vld2: {
|
||||
SDValue MemAddr, MemUpdate, MemOpc;
|
||||
if (!SelectAddrMode6(Op, N->getOperand(2), MemAddr, MemUpdate, MemOpc))
|
||||
return NULL;
|
||||
switch (VT.getSimpleVT().SimpleTy) {
|
||||
default: llvm_unreachable("unhandled vld2 type");
|
||||
case MVT::v8i8: Opc = ARM::VLD2d8; break;
|
||||
case MVT::v4i16: Opc = ARM::VLD2d16; break;
|
||||
case MVT::v2f32:
|
||||
case MVT::v2i32: Opc = ARM::VLD2d32; break;
|
||||
}
|
||||
SDValue Chain = N->getOperand(0);
|
||||
const SDValue Ops[] = { MemAddr, MemUpdate, MemOpc, Chain };
|
||||
return CurDAG->getTargetNode(Opc, dl, VT, VT, MVT::Other, Ops, 4);
|
||||
}
|
||||
|
||||
case Intrinsic::arm_neon_vld3: {
|
||||
SDValue MemAddr, MemUpdate, MemOpc;
|
||||
if (!SelectAddrMode6(Op, N->getOperand(2), MemAddr, MemUpdate, MemOpc))
|
||||
return NULL;
|
||||
switch (VT.getSimpleVT().SimpleTy) {
|
||||
default: llvm_unreachable("unhandled vld3 type");
|
||||
case MVT::v8i8: Opc = ARM::VLD3d8; break;
|
||||
case MVT::v4i16: Opc = ARM::VLD3d16; break;
|
||||
case MVT::v2f32:
|
||||
case MVT::v2i32: Opc = ARM::VLD3d32; break;
|
||||
}
|
||||
SDValue Chain = N->getOperand(0);
|
||||
const SDValue Ops[] = { MemAddr, MemUpdate, MemOpc, Chain };
|
||||
return CurDAG->getTargetNode(Opc, dl, VT, VT, VT, MVT::Other, Ops, 4);
|
||||
}
|
||||
|
||||
case Intrinsic::arm_neon_vld4: {
|
||||
SDValue MemAddr, MemUpdate, MemOpc;
|
||||
if (!SelectAddrMode6(Op, N->getOperand(2), MemAddr, MemUpdate, MemOpc))
|
||||
return NULL;
|
||||
switch (VT.getSimpleVT().SimpleTy) {
|
||||
default: llvm_unreachable("unhandled vld4 type");
|
||||
case MVT::v8i8: Opc = ARM::VLD4d8; break;
|
||||
case MVT::v4i16: Opc = ARM::VLD4d16; break;
|
||||
case MVT::v2f32:
|
||||
case MVT::v2i32: Opc = ARM::VLD4d32; break;
|
||||
}
|
||||
SDValue Chain = N->getOperand(0);
|
||||
const SDValue Ops[] = { MemAddr, MemUpdate, MemOpc, Chain };
|
||||
std::vector<EVT> ResTys(4, VT);
|
||||
ResTys.push_back(MVT::Other);
|
||||
return CurDAG->getTargetNode(Opc, dl, ResTys, Ops, 4);
|
||||
}
|
||||
|
||||
case Intrinsic::arm_neon_vst2: {
|
||||
SDValue MemAddr, MemUpdate, MemOpc;
|
||||
if (!SelectAddrMode6(Op, N->getOperand(2), MemAddr, MemUpdate, MemOpc))
|
||||
return NULL;
|
||||
switch (N->getOperand(3).getValueType().getSimpleVT().SimpleTy) {
|
||||
default: llvm_unreachable("unhandled vst2 type");
|
||||
case MVT::v8i8: Opc = ARM::VST2d8; break;
|
||||
case MVT::v4i16: Opc = ARM::VST2d16; break;
|
||||
case MVT::v2f32:
|
||||
case MVT::v2i32: Opc = ARM::VST2d32; break;
|
||||
}
|
||||
SDValue Chain = N->getOperand(0);
|
||||
const SDValue Ops[] = { MemAddr, MemUpdate, MemOpc,
|
||||
N->getOperand(3), N->getOperand(4), Chain };
|
||||
return CurDAG->getTargetNode(Opc, dl, MVT::Other, Ops, 6);
|
||||
}
|
||||
|
||||
case Intrinsic::arm_neon_vst3: {
|
||||
SDValue MemAddr, MemUpdate, MemOpc;
|
||||
if (!SelectAddrMode6(Op, N->getOperand(2), MemAddr, MemUpdate, MemOpc))
|
||||
return NULL;
|
||||
switch (N->getOperand(3).getValueType().getSimpleVT().SimpleTy) {
|
||||
default: llvm_unreachable("unhandled vst3 type");
|
||||
case MVT::v8i8: Opc = ARM::VST3d8; break;
|
||||
case MVT::v4i16: Opc = ARM::VST3d16; break;
|
||||
case MVT::v2f32:
|
||||
case MVT::v2i32: Opc = ARM::VST3d32; break;
|
||||
}
|
||||
SDValue Chain = N->getOperand(0);
|
||||
const SDValue Ops[] = { MemAddr, MemUpdate, MemOpc,
|
||||
N->getOperand(3), N->getOperand(4),
|
||||
N->getOperand(5), Chain };
|
||||
return CurDAG->getTargetNode(Opc, dl, MVT::Other, Ops, 7);
|
||||
}
|
||||
|
||||
case Intrinsic::arm_neon_vst4: {
|
||||
SDValue MemAddr, MemUpdate, MemOpc;
|
||||
if (!SelectAddrMode6(Op, N->getOperand(2), MemAddr, MemUpdate, MemOpc))
|
||||
return NULL;
|
||||
switch (N->getOperand(3).getValueType().getSimpleVT().SimpleTy) {
|
||||
default: llvm_unreachable("unhandled vst4 type");
|
||||
case MVT::v8i8: Opc = ARM::VST4d8; break;
|
||||
case MVT::v4i16: Opc = ARM::VST4d16; break;
|
||||
case MVT::v2f32:
|
||||
case MVT::v2i32: Opc = ARM::VST4d32; break;
|
||||
}
|
||||
SDValue Chain = N->getOperand(0);
|
||||
const SDValue Ops[] = { MemAddr, MemUpdate, MemOpc,
|
||||
N->getOperand(3), N->getOperand(4),
|
||||
N->getOperand(5), N->getOperand(6), Chain };
|
||||
return CurDAG->getTargetNode(Opc, dl, MVT::Other, Ops, 8);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return SelectCode(Op);
|
||||
|
@ -483,12 +483,6 @@ const char *ARMTargetLowering::getTargetNodeName(unsigned Opcode) const {
|
||||
case ARMISD::VGETLANEs: return "ARMISD::VGETLANEs";
|
||||
case ARMISD::VDUP: return "ARMISD::VDUP";
|
||||
case ARMISD::VDUPLANE: return "ARMISD::VDUPLANE";
|
||||
case ARMISD::VLD2D: return "ARMISD::VLD2D";
|
||||
case ARMISD::VLD3D: return "ARMISD::VLD3D";
|
||||
case ARMISD::VLD4D: return "ARMISD::VLD4D";
|
||||
case ARMISD::VST2D: return "ARMISD::VST2D";
|
||||
case ARMISD::VST3D: return "ARMISD::VST3D";
|
||||
case ARMISD::VST4D: return "ARMISD::VST4D";
|
||||
case ARMISD::VEXT: return "ARMISD::VEXT";
|
||||
case ARMISD::VREV64: return "ARMISD::VREV64";
|
||||
case ARMISD::VREV32: return "ARMISD::VREV32";
|
||||
@ -1345,52 +1339,45 @@ SDValue ARMTargetLowering::LowerGLOBAL_OFFSET_TABLE(SDValue Op,
|
||||
}
|
||||
|
||||
static SDValue LowerNeonVLDIntrinsic(SDValue Op, SelectionDAG &DAG,
|
||||
unsigned Opcode) {
|
||||
unsigned NumVecs) {
|
||||
SDNode *Node = Op.getNode();
|
||||
EVT VT = Node->getValueType(0);
|
||||
DebugLoc dl = Op.getDebugLoc();
|
||||
|
||||
if (!VT.is64BitVector())
|
||||
return SDValue(); // unimplemented
|
||||
// No expansion needed for 64-bit vectors.
|
||||
if (VT.is64BitVector())
|
||||
return SDValue();
|
||||
|
||||
SDValue Ops[] = { Node->getOperand(0),
|
||||
Node->getOperand(2) };
|
||||
return DAG.getNode(Opcode, dl, Node->getVTList(), Ops, 2);
|
||||
// FIXME: We need to expand VLD3 and VLD4 of 128-bit vectors into separate
|
||||
// operations to load the even and odd registers.
|
||||
return SDValue();
|
||||
}
|
||||
|
||||
static SDValue LowerNeonVSTIntrinsic(SDValue Op, SelectionDAG &DAG,
|
||||
unsigned Opcode, unsigned NumVecs) {
|
||||
unsigned NumVecs) {
|
||||
SDNode *Node = Op.getNode();
|
||||
EVT VT = Node->getOperand(3).getValueType();
|
||||
DebugLoc dl = Op.getDebugLoc();
|
||||
|
||||
if (!VT.is64BitVector())
|
||||
return SDValue(); // unimplemented
|
||||
// No expansion needed for 64-bit vectors.
|
||||
if (VT.is64BitVector())
|
||||
return SDValue();
|
||||
|
||||
SmallVector<SDValue, 6> Ops;
|
||||
Ops.push_back(Node->getOperand(0));
|
||||
Ops.push_back(Node->getOperand(2));
|
||||
for (unsigned N = 0; N < NumVecs; ++N)
|
||||
Ops.push_back(Node->getOperand(N + 3));
|
||||
return DAG.getNode(Opcode, dl, MVT::Other, Ops.data(), Ops.size());
|
||||
// FIXME: We need to expand VST3 and VST4 of 128-bit vectors into separate
|
||||
// operations to store the even and odd registers.
|
||||
return SDValue();
|
||||
}
|
||||
|
||||
SDValue
|
||||
ARMTargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op, SelectionDAG &DAG) {
|
||||
unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
|
||||
switch (IntNo) {
|
||||
case Intrinsic::arm_neon_vld2:
|
||||
return LowerNeonVLDIntrinsic(Op, DAG, ARMISD::VLD2D);
|
||||
case Intrinsic::arm_neon_vld3:
|
||||
return LowerNeonVLDIntrinsic(Op, DAG, ARMISD::VLD3D);
|
||||
return LowerNeonVLDIntrinsic(Op, DAG, 3);
|
||||
case Intrinsic::arm_neon_vld4:
|
||||
return LowerNeonVLDIntrinsic(Op, DAG, ARMISD::VLD4D);
|
||||
case Intrinsic::arm_neon_vst2:
|
||||
return LowerNeonVSTIntrinsic(Op, DAG, ARMISD::VST2D, 2);
|
||||
return LowerNeonVLDIntrinsic(Op, DAG, 4);
|
||||
case Intrinsic::arm_neon_vst3:
|
||||
return LowerNeonVSTIntrinsic(Op, DAG, ARMISD::VST3D, 3);
|
||||
return LowerNeonVSTIntrinsic(Op, DAG, 3);
|
||||
case Intrinsic::arm_neon_vst4:
|
||||
return LowerNeonVSTIntrinsic(Op, DAG, ARMISD::VST4D, 4);
|
||||
return LowerNeonVSTIntrinsic(Op, DAG, 4);
|
||||
default: return SDValue(); // Don't custom lower most intrinsics.
|
||||
}
|
||||
}
|
||||
|
@ -119,14 +119,6 @@ namespace llvm {
|
||||
VDUP,
|
||||
VDUPLANE,
|
||||
|
||||
// Vector load/store with (de)interleaving
|
||||
VLD2D,
|
||||
VLD3D,
|
||||
VLD4D,
|
||||
VST2D,
|
||||
VST3D,
|
||||
VST4D,
|
||||
|
||||
// Vector shuffles:
|
||||
VEXT, // extract
|
||||
VREV64, // reverse elements within 64-bit doublewords
|
||||
|
Loading…
x
Reference in New Issue
Block a user