mirror of
https://github.com/RPCSX/llvm.git
synced 2024-11-23 19:59:57 +00:00
Teach the AArch64 backend about v4f16 and v8f16
This teaches the AArch64 backend to deal with the operations required to deal with the operations on v4f16 and v8f16 which are exposed by NEON intrinsics, plus the add, sub, mul and div operations. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@216555 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
b8c95a89e6
commit
5e487f8dc7
@ -196,21 +196,24 @@ namespace llvm {
|
||||
/// is32BitVector - Return true if this is a 32-bit vector type.
|
||||
bool is32BitVector() const {
|
||||
return (SimpleTy == MVT::v4i8 || SimpleTy == MVT::v2i16 ||
|
||||
SimpleTy == MVT::v1i32);
|
||||
SimpleTy == MVT::v1i32 || SimpleTy == MVT::v2f16 ||
|
||||
SimpleTy == MVT::v1f32);
|
||||
}
|
||||
|
||||
/// is64BitVector - Return true if this is a 64-bit vector type.
|
||||
bool is64BitVector() const {
|
||||
return (SimpleTy == MVT::v8i8 || SimpleTy == MVT::v4i16 ||
|
||||
SimpleTy == MVT::v2i32 || SimpleTy == MVT::v1i64 ||
|
||||
SimpleTy == MVT::v1f64 || SimpleTy == MVT::v2f32);
|
||||
SimpleTy == MVT::v4f16 || SimpleTy == MVT::v2f32 ||
|
||||
SimpleTy == MVT::v1f64);
|
||||
}
|
||||
|
||||
/// is128BitVector - Return true if this is a 128-bit vector type.
|
||||
bool is128BitVector() const {
|
||||
return (SimpleTy == MVT::v16i8 || SimpleTy == MVT::v8i16 ||
|
||||
SimpleTy == MVT::v4i32 || SimpleTy == MVT::v2i64 ||
|
||||
SimpleTy == MVT::v4f32 || SimpleTy == MVT::v2f64);
|
||||
SimpleTy == MVT::v8f16 || SimpleTy == MVT::v4f32 ||
|
||||
SimpleTy == MVT::v2f64);
|
||||
}
|
||||
|
||||
/// is256BitVector - Return true if this is a 256-bit vector type.
|
||||
|
@ -370,9 +370,11 @@ SDValue VectorLegalizer::Promote(SDValue Op) {
|
||||
return PromoteFP_TO_INT(Op, Op->getOpcode() == ISD::FP_TO_SINT);
|
||||
}
|
||||
|
||||
// The rest of the time, vector "promotion" is basically just bitcasting and
|
||||
// doing the operation in a different type. For example, x86 promotes
|
||||
// ISD::AND on v2i32 to v1i64.
|
||||
// There are currently two cases of vector promotion:
|
||||
// 1) Bitcasting a vector of integers to a different type to a vector of the
|
||||
// same overall length. For example, x86 promotes ISD::AND on v2i32 to v1i64.
|
||||
// 2) Extending a vector of floats to a vector of the same number oflarger
|
||||
// floats. For example, AArch64 promotes ISD::FADD on v4f16 to v4f32.
|
||||
MVT VT = Op.getSimpleValueType();
|
||||
assert(Op.getNode()->getNumValues() == 1 &&
|
||||
"Can't promote a vector with multiple results!");
|
||||
@ -382,14 +384,23 @@ SDValue VectorLegalizer::Promote(SDValue Op) {
|
||||
|
||||
for (unsigned j = 0; j != Op.getNumOperands(); ++j) {
|
||||
if (Op.getOperand(j).getValueType().isVector())
|
||||
Operands[j] = DAG.getNode(ISD::BITCAST, dl, NVT, Op.getOperand(j));
|
||||
if (Op.getOperand(j)
|
||||
.getValueType()
|
||||
.getVectorElementType()
|
||||
.isFloatingPoint())
|
||||
Operands[j] = DAG.getNode(ISD::FP_EXTEND, dl, NVT, Op.getOperand(j));
|
||||
else
|
||||
Operands[j] = DAG.getNode(ISD::BITCAST, dl, NVT, Op.getOperand(j));
|
||||
else
|
||||
Operands[j] = Op.getOperand(j);
|
||||
}
|
||||
|
||||
Op = DAG.getNode(Op.getOpcode(), dl, NVT, Operands);
|
||||
|
||||
return DAG.getNode(ISD::BITCAST, dl, VT, Op);
|
||||
if (VT.isFloatingPoint() ||
|
||||
(VT.isVector() && VT.getVectorElementType().isFloatingPoint()))
|
||||
return DAG.getNode(ISD::FP_ROUND, dl, VT, Op, DAG.getIntPtrConstant(0));
|
||||
else
|
||||
return DAG.getNode(ISD::BITCAST, dl, VT, Op);
|
||||
}
|
||||
|
||||
SDValue VectorLegalizer::PromoteINT_TO_FP(SDValue Op) {
|
||||
|
@ -60,18 +60,18 @@ def CC_AArch64_AAPCS : CallingConv<[
|
||||
[Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>,
|
||||
CCIfType<[f64], CCAssignToRegWithShadow<[D0, D1, D2, D3, D4, D5, D6, D7],
|
||||
[Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>,
|
||||
CCIfType<[v1i64, v2i32, v4i16, v8i8, v1f64, v2f32],
|
||||
CCIfType<[v1i64, v2i32, v4i16, v8i8, v1f64, v2f32, v4f16],
|
||||
CCAssignToRegWithShadow<[D0, D1, D2, D3, D4, D5, D6, D7],
|
||||
[Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>,
|
||||
CCIfType<[f128, v2i64, v4i32, v8i16, v16i8, v4f32, v2f64],
|
||||
CCIfType<[f128, v2i64, v4i32, v8i16, v16i8, v4f32, v2f64, v8f16],
|
||||
CCAssignToReg<[Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>,
|
||||
|
||||
// If more than will fit in registers, pass them on the stack instead.
|
||||
CCIfType<[i1, i8, i16, f16], CCAssignToStack<8, 8>>,
|
||||
CCIfType<[i32, f32], CCAssignToStack<8, 8>>,
|
||||
CCIfType<[i64, f64, v1f64, v2f32, v1i64, v2i32, v4i16, v8i8],
|
||||
CCIfType<[i64, f64, v1f64, v2f32, v1i64, v2i32, v4i16, v8i8, v4f16],
|
||||
CCAssignToStack<8, 8>>,
|
||||
CCIfType<[f128, v2i64, v4i32, v8i16, v16i8, v4f32, v2f64],
|
||||
CCIfType<[f128, v2i64, v4i32, v8i16, v16i8, v4f32, v2f64, v8f16],
|
||||
CCAssignToStack<16, 16>>
|
||||
]>;
|
||||
|
||||
@ -96,10 +96,10 @@ def RetCC_AArch64_AAPCS : CallingConv<[
|
||||
[Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>,
|
||||
CCIfType<[f64], CCAssignToRegWithShadow<[D0, D1, D2, D3, D4, D5, D6, D7],
|
||||
[Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>,
|
||||
CCIfType<[v1i64, v2i32, v4i16, v8i8, v1f64, v2f32],
|
||||
CCIfType<[v1i64, v2i32, v4i16, v8i8, v1f64, v2f32, v4f16],
|
||||
CCAssignToRegWithShadow<[D0, D1, D2, D3, D4, D5, D6, D7],
|
||||
[Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>,
|
||||
CCIfType<[f128, v2i64, v4i32, v8i16, v16i8, v4f32, v2f64],
|
||||
CCIfType<[f128, v2i64, v4i32, v8i16, v16i8, v4f32, v2f64, v8f16],
|
||||
CCAssignToReg<[Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>
|
||||
]>;
|
||||
|
||||
@ -139,19 +139,20 @@ def CC_AArch64_DarwinPCS : CallingConv<[
|
||||
[Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>,
|
||||
CCIfType<[f64], CCAssignToRegWithShadow<[D0, D1, D2, D3, D4, D5, D6, D7],
|
||||
[Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>,
|
||||
CCIfType<[v1i64, v2i32, v4i16, v8i8, v1f64, v2f32],
|
||||
CCIfType<[v1i64, v2i32, v4i16, v8i8, v1f64, v2f32, v4f16],
|
||||
CCAssignToRegWithShadow<[D0, D1, D2, D3, D4, D5, D6, D7],
|
||||
[Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>,
|
||||
CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32, v2f64],
|
||||
CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32, v2f64, v8f16],
|
||||
CCAssignToReg<[Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>,
|
||||
|
||||
// If more than will fit in registers, pass them on the stack instead.
|
||||
CCIf<"ValVT == MVT::i1 || ValVT == MVT::i8", CCAssignToStack<1, 1>>,
|
||||
CCIf<"ValVT == MVT::i16 || ValVT == MVT::f16", CCAssignToStack<2, 2>>,
|
||||
CCIfType<[i32, f32], CCAssignToStack<4, 4>>,
|
||||
CCIfType<[i64, f64, v1f64, v2f32, v1i64, v2i32, v4i16, v8i8],
|
||||
CCIfType<[i64, f64, v1f64, v2f32, v1i64, v2i32, v4i16, v8i8, v4f16],
|
||||
CCAssignToStack<8, 8>>,
|
||||
CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32, v2f64], CCAssignToStack<16, 16>>
|
||||
CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32, v2f64, v8f16],
|
||||
CCAssignToStack<16, 16>>
|
||||
]>;
|
||||
|
||||
def CC_AArch64_DarwinPCS_VarArg : CallingConv<[
|
||||
@ -165,8 +166,10 @@ def CC_AArch64_DarwinPCS_VarArg : CallingConv<[
|
||||
// Everything is on the stack.
|
||||
// i128 is split to two i64s, and its stack alignment is 16 bytes.
|
||||
CCIfType<[i64], CCIfSplit<CCAssignToStack<8, 16>>>,
|
||||
CCIfType<[i64, f64, v1i64, v2i32, v4i16, v8i8, v1f64, v2f32], CCAssignToStack<8, 8>>,
|
||||
CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32, v2f64], CCAssignToStack<16, 16>>
|
||||
CCIfType<[i64, f64, v1i64, v2i32, v4i16, v8i8, v1f64, v2f32, v4f16],
|
||||
CCAssignToStack<8, 8>>,
|
||||
CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32, v2f64, v8f16],
|
||||
CCAssignToStack<16, 16>>
|
||||
]>;
|
||||
|
||||
// The WebKit_JS calling convention only passes the first argument (the callee)
|
||||
|
@ -2116,7 +2116,9 @@ SDNode *AArch64DAGToDAGISel::Select(SDNode *Node) {
|
||||
case 32:
|
||||
SubReg = AArch64::ssub;
|
||||
break;
|
||||
case 16: // FALLTHROUGH
|
||||
case 16:
|
||||
SubReg = AArch64::hsub;
|
||||
break;
|
||||
case 8:
|
||||
llvm_unreachable("unexpected zext-requiring extract element!");
|
||||
}
|
||||
@ -2204,9 +2206,9 @@ SDNode *AArch64DAGToDAGISel::Select(SDNode *Node) {
|
||||
return SelectLoad(Node, 2, AArch64::LD1Twov8b, AArch64::dsub0);
|
||||
else if (VT == MVT::v16i8)
|
||||
return SelectLoad(Node, 2, AArch64::LD1Twov16b, AArch64::qsub0);
|
||||
else if (VT == MVT::v4i16)
|
||||
else if (VT == MVT::v4i16 || VT == MVT::v4f16)
|
||||
return SelectLoad(Node, 2, AArch64::LD1Twov4h, AArch64::dsub0);
|
||||
else if (VT == MVT::v8i16)
|
||||
else if (VT == MVT::v8i16 || VT == MVT::v8f16)
|
||||
return SelectLoad(Node, 2, AArch64::LD1Twov8h, AArch64::qsub0);
|
||||
else if (VT == MVT::v2i32 || VT == MVT::v2f32)
|
||||
return SelectLoad(Node, 2, AArch64::LD1Twov2s, AArch64::dsub0);
|
||||
@ -2222,9 +2224,9 @@ SDNode *AArch64DAGToDAGISel::Select(SDNode *Node) {
|
||||
return SelectLoad(Node, 3, AArch64::LD1Threev8b, AArch64::dsub0);
|
||||
else if (VT == MVT::v16i8)
|
||||
return SelectLoad(Node, 3, AArch64::LD1Threev16b, AArch64::qsub0);
|
||||
else if (VT == MVT::v4i16)
|
||||
else if (VT == MVT::v4i16 || VT == MVT::v4f16)
|
||||
return SelectLoad(Node, 3, AArch64::LD1Threev4h, AArch64::dsub0);
|
||||
else if (VT == MVT::v8i16)
|
||||
else if (VT == MVT::v8i16 || VT == MVT::v8f16)
|
||||
return SelectLoad(Node, 3, AArch64::LD1Threev8h, AArch64::qsub0);
|
||||
else if (VT == MVT::v2i32 || VT == MVT::v2f32)
|
||||
return SelectLoad(Node, 3, AArch64::LD1Threev2s, AArch64::dsub0);
|
||||
@ -2240,9 +2242,9 @@ SDNode *AArch64DAGToDAGISel::Select(SDNode *Node) {
|
||||
return SelectLoad(Node, 4, AArch64::LD1Fourv8b, AArch64::dsub0);
|
||||
else if (VT == MVT::v16i8)
|
||||
return SelectLoad(Node, 4, AArch64::LD1Fourv16b, AArch64::qsub0);
|
||||
else if (VT == MVT::v4i16)
|
||||
else if (VT == MVT::v4i16 || VT == MVT::v4f16)
|
||||
return SelectLoad(Node, 4, AArch64::LD1Fourv4h, AArch64::dsub0);
|
||||
else if (VT == MVT::v8i16)
|
||||
else if (VT == MVT::v8i16 || VT == MVT::v8f16)
|
||||
return SelectLoad(Node, 4, AArch64::LD1Fourv8h, AArch64::qsub0);
|
||||
else if (VT == MVT::v2i32 || VT == MVT::v2f32)
|
||||
return SelectLoad(Node, 4, AArch64::LD1Fourv2s, AArch64::dsub0);
|
||||
@ -2258,9 +2260,9 @@ SDNode *AArch64DAGToDAGISel::Select(SDNode *Node) {
|
||||
return SelectLoad(Node, 2, AArch64::LD2Twov8b, AArch64::dsub0);
|
||||
else if (VT == MVT::v16i8)
|
||||
return SelectLoad(Node, 2, AArch64::LD2Twov16b, AArch64::qsub0);
|
||||
else if (VT == MVT::v4i16)
|
||||
else if (VT == MVT::v4i16 || VT == MVT::v4f16)
|
||||
return SelectLoad(Node, 2, AArch64::LD2Twov4h, AArch64::dsub0);
|
||||
else if (VT == MVT::v8i16)
|
||||
else if (VT == MVT::v8i16 || VT == MVT::v8f16)
|
||||
return SelectLoad(Node, 2, AArch64::LD2Twov8h, AArch64::qsub0);
|
||||
else if (VT == MVT::v2i32 || VT == MVT::v2f32)
|
||||
return SelectLoad(Node, 2, AArch64::LD2Twov2s, AArch64::dsub0);
|
||||
@ -2276,9 +2278,9 @@ SDNode *AArch64DAGToDAGISel::Select(SDNode *Node) {
|
||||
return SelectLoad(Node, 3, AArch64::LD3Threev8b, AArch64::dsub0);
|
||||
else if (VT == MVT::v16i8)
|
||||
return SelectLoad(Node, 3, AArch64::LD3Threev16b, AArch64::qsub0);
|
||||
else if (VT == MVT::v4i16)
|
||||
else if (VT == MVT::v4i16 || VT == MVT::v4f16)
|
||||
return SelectLoad(Node, 3, AArch64::LD3Threev4h, AArch64::dsub0);
|
||||
else if (VT == MVT::v8i16)
|
||||
else if (VT == MVT::v8i16 || VT == MVT::v8f16)
|
||||
return SelectLoad(Node, 3, AArch64::LD3Threev8h, AArch64::qsub0);
|
||||
else if (VT == MVT::v2i32 || VT == MVT::v2f32)
|
||||
return SelectLoad(Node, 3, AArch64::LD3Threev2s, AArch64::dsub0);
|
||||
@ -2294,9 +2296,9 @@ SDNode *AArch64DAGToDAGISel::Select(SDNode *Node) {
|
||||
return SelectLoad(Node, 4, AArch64::LD4Fourv8b, AArch64::dsub0);
|
||||
else if (VT == MVT::v16i8)
|
||||
return SelectLoad(Node, 4, AArch64::LD4Fourv16b, AArch64::qsub0);
|
||||
else if (VT == MVT::v4i16)
|
||||
else if (VT == MVT::v4i16 || VT == MVT::v4f16)
|
||||
return SelectLoad(Node, 4, AArch64::LD4Fourv4h, AArch64::dsub0);
|
||||
else if (VT == MVT::v8i16)
|
||||
else if (VT == MVT::v8i16 || VT == MVT::v8f16)
|
||||
return SelectLoad(Node, 4, AArch64::LD4Fourv8h, AArch64::qsub0);
|
||||
else if (VT == MVT::v2i32 || VT == MVT::v2f32)
|
||||
return SelectLoad(Node, 4, AArch64::LD4Fourv2s, AArch64::dsub0);
|
||||
@ -2312,9 +2314,9 @@ SDNode *AArch64DAGToDAGISel::Select(SDNode *Node) {
|
||||
return SelectLoad(Node, 2, AArch64::LD2Rv8b, AArch64::dsub0);
|
||||
else if (VT == MVT::v16i8)
|
||||
return SelectLoad(Node, 2, AArch64::LD2Rv16b, AArch64::qsub0);
|
||||
else if (VT == MVT::v4i16)
|
||||
else if (VT == MVT::v4i16 || VT == MVT::v4f16)
|
||||
return SelectLoad(Node, 2, AArch64::LD2Rv4h, AArch64::dsub0);
|
||||
else if (VT == MVT::v8i16)
|
||||
else if (VT == MVT::v8i16 || VT == MVT::v8f16)
|
||||
return SelectLoad(Node, 2, AArch64::LD2Rv8h, AArch64::qsub0);
|
||||
else if (VT == MVT::v2i32 || VT == MVT::v2f32)
|
||||
return SelectLoad(Node, 2, AArch64::LD2Rv2s, AArch64::dsub0);
|
||||
@ -2330,9 +2332,9 @@ SDNode *AArch64DAGToDAGISel::Select(SDNode *Node) {
|
||||
return SelectLoad(Node, 3, AArch64::LD3Rv8b, AArch64::dsub0);
|
||||
else if (VT == MVT::v16i8)
|
||||
return SelectLoad(Node, 3, AArch64::LD3Rv16b, AArch64::qsub0);
|
||||
else if (VT == MVT::v4i16)
|
||||
else if (VT == MVT::v4i16 || VT == MVT::v4f16)
|
||||
return SelectLoad(Node, 3, AArch64::LD3Rv4h, AArch64::dsub0);
|
||||
else if (VT == MVT::v8i16)
|
||||
else if (VT == MVT::v8i16 || VT == MVT::v8f16)
|
||||
return SelectLoad(Node, 3, AArch64::LD3Rv8h, AArch64::qsub0);
|
||||
else if (VT == MVT::v2i32 || VT == MVT::v2f32)
|
||||
return SelectLoad(Node, 3, AArch64::LD3Rv2s, AArch64::dsub0);
|
||||
@ -2348,9 +2350,9 @@ SDNode *AArch64DAGToDAGISel::Select(SDNode *Node) {
|
||||
return SelectLoad(Node, 4, AArch64::LD4Rv8b, AArch64::dsub0);
|
||||
else if (VT == MVT::v16i8)
|
||||
return SelectLoad(Node, 4, AArch64::LD4Rv16b, AArch64::qsub0);
|
||||
else if (VT == MVT::v4i16)
|
||||
else if (VT == MVT::v4i16 || VT == MVT::v4f16)
|
||||
return SelectLoad(Node, 4, AArch64::LD4Rv4h, AArch64::dsub0);
|
||||
else if (VT == MVT::v8i16)
|
||||
else if (VT == MVT::v8i16 || VT == MVT::v8f16)
|
||||
return SelectLoad(Node, 4, AArch64::LD4Rv8h, AArch64::qsub0);
|
||||
else if (VT == MVT::v2i32 || VT == MVT::v2f32)
|
||||
return SelectLoad(Node, 4, AArch64::LD4Rv2s, AArch64::dsub0);
|
||||
@ -2364,7 +2366,8 @@ SDNode *AArch64DAGToDAGISel::Select(SDNode *Node) {
|
||||
case Intrinsic::aarch64_neon_ld2lane:
|
||||
if (VT == MVT::v16i8 || VT == MVT::v8i8)
|
||||
return SelectLoadLane(Node, 2, AArch64::LD2i8);
|
||||
else if (VT == MVT::v8i16 || VT == MVT::v4i16)
|
||||
else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
|
||||
VT == MVT::v8f16)
|
||||
return SelectLoadLane(Node, 2, AArch64::LD2i16);
|
||||
else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
|
||||
VT == MVT::v2f32)
|
||||
@ -2376,7 +2379,8 @@ SDNode *AArch64DAGToDAGISel::Select(SDNode *Node) {
|
||||
case Intrinsic::aarch64_neon_ld3lane:
|
||||
if (VT == MVT::v16i8 || VT == MVT::v8i8)
|
||||
return SelectLoadLane(Node, 3, AArch64::LD3i8);
|
||||
else if (VT == MVT::v8i16 || VT == MVT::v4i16)
|
||||
else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
|
||||
VT == MVT::v8f16)
|
||||
return SelectLoadLane(Node, 3, AArch64::LD3i16);
|
||||
else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
|
||||
VT == MVT::v2f32)
|
||||
@ -2388,7 +2392,8 @@ SDNode *AArch64DAGToDAGISel::Select(SDNode *Node) {
|
||||
case Intrinsic::aarch64_neon_ld4lane:
|
||||
if (VT == MVT::v16i8 || VT == MVT::v8i8)
|
||||
return SelectLoadLane(Node, 4, AArch64::LD4i8);
|
||||
else if (VT == MVT::v8i16 || VT == MVT::v4i16)
|
||||
else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
|
||||
VT == MVT::v8f16)
|
||||
return SelectLoadLane(Node, 4, AArch64::LD4i16);
|
||||
else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
|
||||
VT == MVT::v2f32)
|
||||
@ -2448,9 +2453,9 @@ SDNode *AArch64DAGToDAGISel::Select(SDNode *Node) {
|
||||
return SelectStore(Node, 2, AArch64::ST1Twov8b);
|
||||
else if (VT == MVT::v16i8)
|
||||
return SelectStore(Node, 2, AArch64::ST1Twov16b);
|
||||
else if (VT == MVT::v4i16)
|
||||
else if (VT == MVT::v4i16 || VT == MVT::v4f16)
|
||||
return SelectStore(Node, 2, AArch64::ST1Twov4h);
|
||||
else if (VT == MVT::v8i16)
|
||||
else if (VT == MVT::v8i16 || VT == MVT::v8f16)
|
||||
return SelectStore(Node, 2, AArch64::ST1Twov8h);
|
||||
else if (VT == MVT::v2i32 || VT == MVT::v2f32)
|
||||
return SelectStore(Node, 2, AArch64::ST1Twov2s);
|
||||
@ -2467,9 +2472,9 @@ SDNode *AArch64DAGToDAGISel::Select(SDNode *Node) {
|
||||
return SelectStore(Node, 3, AArch64::ST1Threev8b);
|
||||
else if (VT == MVT::v16i8)
|
||||
return SelectStore(Node, 3, AArch64::ST1Threev16b);
|
||||
else if (VT == MVT::v4i16)
|
||||
else if (VT == MVT::v4i16 || VT == MVT::v4f16)
|
||||
return SelectStore(Node, 3, AArch64::ST1Threev4h);
|
||||
else if (VT == MVT::v8i16)
|
||||
else if (VT == MVT::v8i16 || VT == MVT::v8f16)
|
||||
return SelectStore(Node, 3, AArch64::ST1Threev8h);
|
||||
else if (VT == MVT::v2i32 || VT == MVT::v2f32)
|
||||
return SelectStore(Node, 3, AArch64::ST1Threev2s);
|
||||
@ -2486,9 +2491,9 @@ SDNode *AArch64DAGToDAGISel::Select(SDNode *Node) {
|
||||
return SelectStore(Node, 4, AArch64::ST1Fourv8b);
|
||||
else if (VT == MVT::v16i8)
|
||||
return SelectStore(Node, 4, AArch64::ST1Fourv16b);
|
||||
else if (VT == MVT::v4i16)
|
||||
else if (VT == MVT::v4i16 || VT == MVT::v4f16)
|
||||
return SelectStore(Node, 4, AArch64::ST1Fourv4h);
|
||||
else if (VT == MVT::v8i16)
|
||||
else if (VT == MVT::v8i16 || VT == MVT::v8f16)
|
||||
return SelectStore(Node, 4, AArch64::ST1Fourv8h);
|
||||
else if (VT == MVT::v2i32 || VT == MVT::v2f32)
|
||||
return SelectStore(Node, 4, AArch64::ST1Fourv2s);
|
||||
@ -2505,9 +2510,9 @@ SDNode *AArch64DAGToDAGISel::Select(SDNode *Node) {
|
||||
return SelectStore(Node, 2, AArch64::ST2Twov8b);
|
||||
else if (VT == MVT::v16i8)
|
||||
return SelectStore(Node, 2, AArch64::ST2Twov16b);
|
||||
else if (VT == MVT::v4i16)
|
||||
else if (VT == MVT::v4i16 || VT == MVT::v4f16)
|
||||
return SelectStore(Node, 2, AArch64::ST2Twov4h);
|
||||
else if (VT == MVT::v8i16)
|
||||
else if (VT == MVT::v8i16 || VT == MVT::v8f16)
|
||||
return SelectStore(Node, 2, AArch64::ST2Twov8h);
|
||||
else if (VT == MVT::v2i32 || VT == MVT::v2f32)
|
||||
return SelectStore(Node, 2, AArch64::ST2Twov2s);
|
||||
@ -2524,9 +2529,9 @@ SDNode *AArch64DAGToDAGISel::Select(SDNode *Node) {
|
||||
return SelectStore(Node, 3, AArch64::ST3Threev8b);
|
||||
else if (VT == MVT::v16i8)
|
||||
return SelectStore(Node, 3, AArch64::ST3Threev16b);
|
||||
else if (VT == MVT::v4i16)
|
||||
else if (VT == MVT::v4i16 || VT == MVT::v4f16)
|
||||
return SelectStore(Node, 3, AArch64::ST3Threev4h);
|
||||
else if (VT == MVT::v8i16)
|
||||
else if (VT == MVT::v8i16 || VT == MVT::v8f16)
|
||||
return SelectStore(Node, 3, AArch64::ST3Threev8h);
|
||||
else if (VT == MVT::v2i32 || VT == MVT::v2f32)
|
||||
return SelectStore(Node, 3, AArch64::ST3Threev2s);
|
||||
@ -2543,9 +2548,9 @@ SDNode *AArch64DAGToDAGISel::Select(SDNode *Node) {
|
||||
return SelectStore(Node, 4, AArch64::ST4Fourv8b);
|
||||
else if (VT == MVT::v16i8)
|
||||
return SelectStore(Node, 4, AArch64::ST4Fourv16b);
|
||||
else if (VT == MVT::v4i16)
|
||||
else if (VT == MVT::v4i16 || VT == MVT::v4f16)
|
||||
return SelectStore(Node, 4, AArch64::ST4Fourv4h);
|
||||
else if (VT == MVT::v8i16)
|
||||
else if (VT == MVT::v8i16 || VT == MVT::v8f16)
|
||||
return SelectStore(Node, 4, AArch64::ST4Fourv8h);
|
||||
else if (VT == MVT::v2i32 || VT == MVT::v2f32)
|
||||
return SelectStore(Node, 4, AArch64::ST4Fourv2s);
|
||||
@ -2560,7 +2565,8 @@ SDNode *AArch64DAGToDAGISel::Select(SDNode *Node) {
|
||||
case Intrinsic::aarch64_neon_st2lane: {
|
||||
if (VT == MVT::v16i8 || VT == MVT::v8i8)
|
||||
return SelectStoreLane(Node, 2, AArch64::ST2i8);
|
||||
else if (VT == MVT::v8i16 || VT == MVT::v4i16)
|
||||
else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
|
||||
VT == MVT::v8f16)
|
||||
return SelectStoreLane(Node, 2, AArch64::ST2i16);
|
||||
else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
|
||||
VT == MVT::v2f32)
|
||||
@ -2573,7 +2579,8 @@ SDNode *AArch64DAGToDAGISel::Select(SDNode *Node) {
|
||||
case Intrinsic::aarch64_neon_st3lane: {
|
||||
if (VT == MVT::v16i8 || VT == MVT::v8i8)
|
||||
return SelectStoreLane(Node, 3, AArch64::ST3i8);
|
||||
else if (VT == MVT::v8i16 || VT == MVT::v4i16)
|
||||
else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
|
||||
VT == MVT::v8f16)
|
||||
return SelectStoreLane(Node, 3, AArch64::ST3i16);
|
||||
else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
|
||||
VT == MVT::v2f32)
|
||||
@ -2586,7 +2593,8 @@ SDNode *AArch64DAGToDAGISel::Select(SDNode *Node) {
|
||||
case Intrinsic::aarch64_neon_st4lane: {
|
||||
if (VT == MVT::v16i8 || VT == MVT::v8i8)
|
||||
return SelectStoreLane(Node, 4, AArch64::ST4i8);
|
||||
else if (VT == MVT::v8i16 || VT == MVT::v4i16)
|
||||
else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
|
||||
VT == MVT::v8f16)
|
||||
return SelectStoreLane(Node, 4, AArch64::ST4i16);
|
||||
else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
|
||||
VT == MVT::v2f32)
|
||||
@ -2603,9 +2611,9 @@ SDNode *AArch64DAGToDAGISel::Select(SDNode *Node) {
|
||||
return SelectPostLoad(Node, 2, AArch64::LD2Twov8b_POST, AArch64::dsub0);
|
||||
else if (VT == MVT::v16i8)
|
||||
return SelectPostLoad(Node, 2, AArch64::LD2Twov16b_POST, AArch64::qsub0);
|
||||
else if (VT == MVT::v4i16)
|
||||
else if (VT == MVT::v4i16 || VT == MVT::v4f16)
|
||||
return SelectPostLoad(Node, 2, AArch64::LD2Twov4h_POST, AArch64::dsub0);
|
||||
else if (VT == MVT::v8i16)
|
||||
else if (VT == MVT::v8i16 || VT == MVT::v8f16)
|
||||
return SelectPostLoad(Node, 2, AArch64::LD2Twov8h_POST, AArch64::qsub0);
|
||||
else if (VT == MVT::v2i32 || VT == MVT::v2f32)
|
||||
return SelectPostLoad(Node, 2, AArch64::LD2Twov2s_POST, AArch64::dsub0);
|
||||
@ -2622,9 +2630,9 @@ SDNode *AArch64DAGToDAGISel::Select(SDNode *Node) {
|
||||
return SelectPostLoad(Node, 3, AArch64::LD3Threev8b_POST, AArch64::dsub0);
|
||||
else if (VT == MVT::v16i8)
|
||||
return SelectPostLoad(Node, 3, AArch64::LD3Threev16b_POST, AArch64::qsub0);
|
||||
else if (VT == MVT::v4i16)
|
||||
else if (VT == MVT::v4i16 || VT == MVT::v4f16)
|
||||
return SelectPostLoad(Node, 3, AArch64::LD3Threev4h_POST, AArch64::dsub0);
|
||||
else if (VT == MVT::v8i16)
|
||||
else if (VT == MVT::v8i16 || VT == MVT::v8f16)
|
||||
return SelectPostLoad(Node, 3, AArch64::LD3Threev8h_POST, AArch64::qsub0);
|
||||
else if (VT == MVT::v2i32 || VT == MVT::v2f32)
|
||||
return SelectPostLoad(Node, 3, AArch64::LD3Threev2s_POST, AArch64::dsub0);
|
||||
@ -2641,9 +2649,9 @@ SDNode *AArch64DAGToDAGISel::Select(SDNode *Node) {
|
||||
return SelectPostLoad(Node, 4, AArch64::LD4Fourv8b_POST, AArch64::dsub0);
|
||||
else if (VT == MVT::v16i8)
|
||||
return SelectPostLoad(Node, 4, AArch64::LD4Fourv16b_POST, AArch64::qsub0);
|
||||
else if (VT == MVT::v4i16)
|
||||
else if (VT == MVT::v4i16 || VT == MVT::v4f16)
|
||||
return SelectPostLoad(Node, 4, AArch64::LD4Fourv4h_POST, AArch64::dsub0);
|
||||
else if (VT == MVT::v8i16)
|
||||
else if (VT == MVT::v8i16 || VT == MVT::v8f16)
|
||||
return SelectPostLoad(Node, 4, AArch64::LD4Fourv8h_POST, AArch64::qsub0);
|
||||
else if (VT == MVT::v2i32 || VT == MVT::v2f32)
|
||||
return SelectPostLoad(Node, 4, AArch64::LD4Fourv2s_POST, AArch64::dsub0);
|
||||
@ -2660,9 +2668,9 @@ SDNode *AArch64DAGToDAGISel::Select(SDNode *Node) {
|
||||
return SelectPostLoad(Node, 2, AArch64::LD1Twov8b_POST, AArch64::dsub0);
|
||||
else if (VT == MVT::v16i8)
|
||||
return SelectPostLoad(Node, 2, AArch64::LD1Twov16b_POST, AArch64::qsub0);
|
||||
else if (VT == MVT::v4i16)
|
||||
else if (VT == MVT::v4i16 || VT == MVT::v4f16)
|
||||
return SelectPostLoad(Node, 2, AArch64::LD1Twov4h_POST, AArch64::dsub0);
|
||||
else if (VT == MVT::v8i16)
|
||||
else if (VT == MVT::v8i16 || VT == MVT::v8f16)
|
||||
return SelectPostLoad(Node, 2, AArch64::LD1Twov8h_POST, AArch64::qsub0);
|
||||
else if (VT == MVT::v2i32 || VT == MVT::v2f32)
|
||||
return SelectPostLoad(Node, 2, AArch64::LD1Twov2s_POST, AArch64::dsub0);
|
||||
@ -2679,9 +2687,9 @@ SDNode *AArch64DAGToDAGISel::Select(SDNode *Node) {
|
||||
return SelectPostLoad(Node, 3, AArch64::LD1Threev8b_POST, AArch64::dsub0);
|
||||
else if (VT == MVT::v16i8)
|
||||
return SelectPostLoad(Node, 3, AArch64::LD1Threev16b_POST, AArch64::qsub0);
|
||||
else if (VT == MVT::v4i16)
|
||||
else if (VT == MVT::v4i16 || VT == MVT::v4f16)
|
||||
return SelectPostLoad(Node, 3, AArch64::LD1Threev4h_POST, AArch64::dsub0);
|
||||
else if (VT == MVT::v8i16)
|
||||
else if (VT == MVT::v8i16 || VT == MVT::v8f16)
|
||||
return SelectPostLoad(Node, 3, AArch64::LD1Threev8h_POST, AArch64::qsub0);
|
||||
else if (VT == MVT::v2i32 || VT == MVT::v2f32)
|
||||
return SelectPostLoad(Node, 3, AArch64::LD1Threev2s_POST, AArch64::dsub0);
|
||||
@ -2698,9 +2706,9 @@ SDNode *AArch64DAGToDAGISel::Select(SDNode *Node) {
|
||||
return SelectPostLoad(Node, 4, AArch64::LD1Fourv8b_POST, AArch64::dsub0);
|
||||
else if (VT == MVT::v16i8)
|
||||
return SelectPostLoad(Node, 4, AArch64::LD1Fourv16b_POST, AArch64::qsub0);
|
||||
else if (VT == MVT::v4i16)
|
||||
else if (VT == MVT::v4i16 || VT == MVT::v4f16)
|
||||
return SelectPostLoad(Node, 4, AArch64::LD1Fourv4h_POST, AArch64::dsub0);
|
||||
else if (VT == MVT::v8i16)
|
||||
else if (VT == MVT::v8i16 || VT == MVT::v8f16)
|
||||
return SelectPostLoad(Node, 4, AArch64::LD1Fourv8h_POST, AArch64::qsub0);
|
||||
else if (VT == MVT::v2i32 || VT == MVT::v2f32)
|
||||
return SelectPostLoad(Node, 4, AArch64::LD1Fourv2s_POST, AArch64::dsub0);
|
||||
@ -2717,9 +2725,9 @@ SDNode *AArch64DAGToDAGISel::Select(SDNode *Node) {
|
||||
return SelectPostLoad(Node, 1, AArch64::LD1Rv8b_POST, AArch64::dsub0);
|
||||
else if (VT == MVT::v16i8)
|
||||
return SelectPostLoad(Node, 1, AArch64::LD1Rv16b_POST, AArch64::qsub0);
|
||||
else if (VT == MVT::v4i16)
|
||||
else if (VT == MVT::v4i16 || VT == MVT::v4f16)
|
||||
return SelectPostLoad(Node, 1, AArch64::LD1Rv4h_POST, AArch64::dsub0);
|
||||
else if (VT == MVT::v8i16)
|
||||
else if (VT == MVT::v8i16 || VT == MVT::v8f16)
|
||||
return SelectPostLoad(Node, 1, AArch64::LD1Rv8h_POST, AArch64::qsub0);
|
||||
else if (VT == MVT::v2i32 || VT == MVT::v2f32)
|
||||
return SelectPostLoad(Node, 1, AArch64::LD1Rv2s_POST, AArch64::dsub0);
|
||||
@ -2736,9 +2744,9 @@ SDNode *AArch64DAGToDAGISel::Select(SDNode *Node) {
|
||||
return SelectPostLoad(Node, 2, AArch64::LD2Rv8b_POST, AArch64::dsub0);
|
||||
else if (VT == MVT::v16i8)
|
||||
return SelectPostLoad(Node, 2, AArch64::LD2Rv16b_POST, AArch64::qsub0);
|
||||
else if (VT == MVT::v4i16)
|
||||
else if (VT == MVT::v4i16 || VT == MVT::v4f16)
|
||||
return SelectPostLoad(Node, 2, AArch64::LD2Rv4h_POST, AArch64::dsub0);
|
||||
else if (VT == MVT::v8i16)
|
||||
else if (VT == MVT::v8i16 || VT == MVT::v8f16)
|
||||
return SelectPostLoad(Node, 2, AArch64::LD2Rv8h_POST, AArch64::qsub0);
|
||||
else if (VT == MVT::v2i32 || VT == MVT::v2f32)
|
||||
return SelectPostLoad(Node, 2, AArch64::LD2Rv2s_POST, AArch64::dsub0);
|
||||
@ -2755,9 +2763,9 @@ SDNode *AArch64DAGToDAGISel::Select(SDNode *Node) {
|
||||
return SelectPostLoad(Node, 3, AArch64::LD3Rv8b_POST, AArch64::dsub0);
|
||||
else if (VT == MVT::v16i8)
|
||||
return SelectPostLoad(Node, 3, AArch64::LD3Rv16b_POST, AArch64::qsub0);
|
||||
else if (VT == MVT::v4i16)
|
||||
else if (VT == MVT::v4i16 || VT == MVT::v4f16)
|
||||
return SelectPostLoad(Node, 3, AArch64::LD3Rv4h_POST, AArch64::dsub0);
|
||||
else if (VT == MVT::v8i16)
|
||||
else if (VT == MVT::v8i16 || VT == MVT::v8f16)
|
||||
return SelectPostLoad(Node, 3, AArch64::LD3Rv8h_POST, AArch64::qsub0);
|
||||
else if (VT == MVT::v2i32 || VT == MVT::v2f32)
|
||||
return SelectPostLoad(Node, 3, AArch64::LD3Rv2s_POST, AArch64::dsub0);
|
||||
@ -2774,9 +2782,9 @@ SDNode *AArch64DAGToDAGISel::Select(SDNode *Node) {
|
||||
return SelectPostLoad(Node, 4, AArch64::LD4Rv8b_POST, AArch64::dsub0);
|
||||
else if (VT == MVT::v16i8)
|
||||
return SelectPostLoad(Node, 4, AArch64::LD4Rv16b_POST, AArch64::qsub0);
|
||||
else if (VT == MVT::v4i16)
|
||||
else if (VT == MVT::v4i16 || VT == MVT::v4f16)
|
||||
return SelectPostLoad(Node, 4, AArch64::LD4Rv4h_POST, AArch64::dsub0);
|
||||
else if (VT == MVT::v8i16)
|
||||
else if (VT == MVT::v8i16 || VT == MVT::v8f16)
|
||||
return SelectPostLoad(Node, 4, AArch64::LD4Rv8h_POST, AArch64::qsub0);
|
||||
else if (VT == MVT::v2i32 || VT == MVT::v2f32)
|
||||
return SelectPostLoad(Node, 4, AArch64::LD4Rv2s_POST, AArch64::dsub0);
|
||||
@ -2791,7 +2799,8 @@ SDNode *AArch64DAGToDAGISel::Select(SDNode *Node) {
|
||||
case AArch64ISD::LD1LANEpost: {
|
||||
if (VT == MVT::v16i8 || VT == MVT::v8i8)
|
||||
return SelectPostLoadLane(Node, 1, AArch64::LD1i8_POST);
|
||||
else if (VT == MVT::v8i16 || VT == MVT::v4i16)
|
||||
else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
|
||||
VT == MVT::v8f16)
|
||||
return SelectPostLoadLane(Node, 1, AArch64::LD1i16_POST);
|
||||
else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
|
||||
VT == MVT::v2f32)
|
||||
@ -2804,7 +2813,8 @@ SDNode *AArch64DAGToDAGISel::Select(SDNode *Node) {
|
||||
case AArch64ISD::LD2LANEpost: {
|
||||
if (VT == MVT::v16i8 || VT == MVT::v8i8)
|
||||
return SelectPostLoadLane(Node, 2, AArch64::LD2i8_POST);
|
||||
else if (VT == MVT::v8i16 || VT == MVT::v4i16)
|
||||
else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
|
||||
VT == MVT::v8f16)
|
||||
return SelectPostLoadLane(Node, 2, AArch64::LD2i16_POST);
|
||||
else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
|
||||
VT == MVT::v2f32)
|
||||
@ -2817,7 +2827,8 @@ SDNode *AArch64DAGToDAGISel::Select(SDNode *Node) {
|
||||
case AArch64ISD::LD3LANEpost: {
|
||||
if (VT == MVT::v16i8 || VT == MVT::v8i8)
|
||||
return SelectPostLoadLane(Node, 3, AArch64::LD3i8_POST);
|
||||
else if (VT == MVT::v8i16 || VT == MVT::v4i16)
|
||||
else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
|
||||
VT == MVT::v8f16)
|
||||
return SelectPostLoadLane(Node, 3, AArch64::LD3i16_POST);
|
||||
else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
|
||||
VT == MVT::v2f32)
|
||||
@ -2830,7 +2841,8 @@ SDNode *AArch64DAGToDAGISel::Select(SDNode *Node) {
|
||||
case AArch64ISD::LD4LANEpost: {
|
||||
if (VT == MVT::v16i8 || VT == MVT::v8i8)
|
||||
return SelectPostLoadLane(Node, 4, AArch64::LD4i8_POST);
|
||||
else if (VT == MVT::v8i16 || VT == MVT::v4i16)
|
||||
else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
|
||||
VT == MVT::v8f16)
|
||||
return SelectPostLoadLane(Node, 4, AArch64::LD4i16_POST);
|
||||
else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
|
||||
VT == MVT::v2f32)
|
||||
@ -2846,9 +2858,9 @@ SDNode *AArch64DAGToDAGISel::Select(SDNode *Node) {
|
||||
return SelectPostStore(Node, 2, AArch64::ST2Twov8b_POST);
|
||||
else if (VT == MVT::v16i8)
|
||||
return SelectPostStore(Node, 2, AArch64::ST2Twov16b_POST);
|
||||
else if (VT == MVT::v4i16)
|
||||
else if (VT == MVT::v4i16 || VT == MVT::v4f16)
|
||||
return SelectPostStore(Node, 2, AArch64::ST2Twov4h_POST);
|
||||
else if (VT == MVT::v8i16)
|
||||
else if (VT == MVT::v8i16 || VT == MVT::v8f16)
|
||||
return SelectPostStore(Node, 2, AArch64::ST2Twov8h_POST);
|
||||
else if (VT == MVT::v2i32 || VT == MVT::v2f32)
|
||||
return SelectPostStore(Node, 2, AArch64::ST2Twov2s_POST);
|
||||
@ -2866,9 +2878,9 @@ SDNode *AArch64DAGToDAGISel::Select(SDNode *Node) {
|
||||
return SelectPostStore(Node, 3, AArch64::ST3Threev8b_POST);
|
||||
else if (VT == MVT::v16i8)
|
||||
return SelectPostStore(Node, 3, AArch64::ST3Threev16b_POST);
|
||||
else if (VT == MVT::v4i16)
|
||||
else if (VT == MVT::v4i16 || VT == MVT::v4f16)
|
||||
return SelectPostStore(Node, 3, AArch64::ST3Threev4h_POST);
|
||||
else if (VT == MVT::v8i16)
|
||||
else if (VT == MVT::v8i16 || VT == MVT::v8f16)
|
||||
return SelectPostStore(Node, 3, AArch64::ST3Threev8h_POST);
|
||||
else if (VT == MVT::v2i32 || VT == MVT::v2f32)
|
||||
return SelectPostStore(Node, 3, AArch64::ST3Threev2s_POST);
|
||||
@ -2886,9 +2898,9 @@ SDNode *AArch64DAGToDAGISel::Select(SDNode *Node) {
|
||||
return SelectPostStore(Node, 4, AArch64::ST4Fourv8b_POST);
|
||||
else if (VT == MVT::v16i8)
|
||||
return SelectPostStore(Node, 4, AArch64::ST4Fourv16b_POST);
|
||||
else if (VT == MVT::v4i16)
|
||||
else if (VT == MVT::v4i16 || VT == MVT::v4f16)
|
||||
return SelectPostStore(Node, 4, AArch64::ST4Fourv4h_POST);
|
||||
else if (VT == MVT::v8i16)
|
||||
else if (VT == MVT::v8i16 || VT == MVT::v8f16)
|
||||
return SelectPostStore(Node, 4, AArch64::ST4Fourv8h_POST);
|
||||
else if (VT == MVT::v2i32 || VT == MVT::v2f32)
|
||||
return SelectPostStore(Node, 4, AArch64::ST4Fourv2s_POST);
|
||||
@ -2906,9 +2918,9 @@ SDNode *AArch64DAGToDAGISel::Select(SDNode *Node) {
|
||||
return SelectPostStore(Node, 2, AArch64::ST1Twov8b_POST);
|
||||
else if (VT == MVT::v16i8)
|
||||
return SelectPostStore(Node, 2, AArch64::ST1Twov16b_POST);
|
||||
else if (VT == MVT::v4i16)
|
||||
else if (VT == MVT::v4i16 || VT == MVT::v4f16)
|
||||
return SelectPostStore(Node, 2, AArch64::ST1Twov4h_POST);
|
||||
else if (VT == MVT::v8i16)
|
||||
else if (VT == MVT::v8i16 || VT == MVT::v8f16)
|
||||
return SelectPostStore(Node, 2, AArch64::ST1Twov8h_POST);
|
||||
else if (VT == MVT::v2i32 || VT == MVT::v2f32)
|
||||
return SelectPostStore(Node, 2, AArch64::ST1Twov2s_POST);
|
||||
@ -2926,9 +2938,9 @@ SDNode *AArch64DAGToDAGISel::Select(SDNode *Node) {
|
||||
return SelectPostStore(Node, 3, AArch64::ST1Threev8b_POST);
|
||||
else if (VT == MVT::v16i8)
|
||||
return SelectPostStore(Node, 3, AArch64::ST1Threev16b_POST);
|
||||
else if (VT == MVT::v4i16)
|
||||
else if (VT == MVT::v4i16 || VT == MVT::v4f16)
|
||||
return SelectPostStore(Node, 3, AArch64::ST1Threev4h_POST);
|
||||
else if (VT == MVT::v8i16)
|
||||
else if (VT == MVT::v8i16 || VT == MVT::v8f16)
|
||||
return SelectPostStore(Node, 3, AArch64::ST1Threev8h_POST);
|
||||
else if (VT == MVT::v2i32 || VT == MVT::v2f32)
|
||||
return SelectPostStore(Node, 3, AArch64::ST1Threev2s_POST);
|
||||
@ -2946,9 +2958,9 @@ SDNode *AArch64DAGToDAGISel::Select(SDNode *Node) {
|
||||
return SelectPostStore(Node, 4, AArch64::ST1Fourv8b_POST);
|
||||
else if (VT == MVT::v16i8)
|
||||
return SelectPostStore(Node, 4, AArch64::ST1Fourv16b_POST);
|
||||
else if (VT == MVT::v4i16)
|
||||
else if (VT == MVT::v4i16 || VT == MVT::v4f16)
|
||||
return SelectPostStore(Node, 4, AArch64::ST1Fourv4h_POST);
|
||||
else if (VT == MVT::v8i16)
|
||||
else if (VT == MVT::v8i16 || VT == MVT::v8f16)
|
||||
return SelectPostStore(Node, 4, AArch64::ST1Fourv8h_POST);
|
||||
else if (VT == MVT::v2i32 || VT == MVT::v2f32)
|
||||
return SelectPostStore(Node, 4, AArch64::ST1Fourv2s_POST);
|
||||
@ -2964,7 +2976,8 @@ SDNode *AArch64DAGToDAGISel::Select(SDNode *Node) {
|
||||
VT = Node->getOperand(1).getValueType();
|
||||
if (VT == MVT::v16i8 || VT == MVT::v8i8)
|
||||
return SelectPostStoreLane(Node, 2, AArch64::ST2i8_POST);
|
||||
else if (VT == MVT::v8i16 || VT == MVT::v4i16)
|
||||
else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
|
||||
VT == MVT::v8f16)
|
||||
return SelectPostStoreLane(Node, 2, AArch64::ST2i16_POST);
|
||||
else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
|
||||
VT == MVT::v2f32)
|
||||
@ -2978,7 +2991,8 @@ SDNode *AArch64DAGToDAGISel::Select(SDNode *Node) {
|
||||
VT = Node->getOperand(1).getValueType();
|
||||
if (VT == MVT::v16i8 || VT == MVT::v8i8)
|
||||
return SelectPostStoreLane(Node, 3, AArch64::ST3i8_POST);
|
||||
else if (VT == MVT::v8i16 || VT == MVT::v4i16)
|
||||
else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
|
||||
VT == MVT::v8f16)
|
||||
return SelectPostStoreLane(Node, 3, AArch64::ST3i16_POST);
|
||||
else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
|
||||
VT == MVT::v2f32)
|
||||
@ -2992,7 +3006,8 @@ SDNode *AArch64DAGToDAGISel::Select(SDNode *Node) {
|
||||
VT = Node->getOperand(1).getValueType();
|
||||
if (VT == MVT::v16i8 || VT == MVT::v8i8)
|
||||
return SelectPostStoreLane(Node, 4, AArch64::ST4i8_POST);
|
||||
else if (VT == MVT::v8i16 || VT == MVT::v4i16)
|
||||
else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
|
||||
VT == MVT::v8f16)
|
||||
return SelectPostStoreLane(Node, 4, AArch64::ST4i16_POST);
|
||||
else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
|
||||
VT == MVT::v2f32)
|
||||
|
@ -108,6 +108,7 @@ AArch64TargetLowering::AArch64TargetLowering(TargetMachine &TM)
|
||||
addDRTypeForNEON(MVT::v2i32);
|
||||
addDRTypeForNEON(MVT::v1i64);
|
||||
addDRTypeForNEON(MVT::v1f64);
|
||||
addDRTypeForNEON(MVT::v4f16);
|
||||
|
||||
addQRTypeForNEON(MVT::v4f32);
|
||||
addQRTypeForNEON(MVT::v2f64);
|
||||
@ -115,6 +116,7 @@ AArch64TargetLowering::AArch64TargetLowering(TargetMachine &TM)
|
||||
addQRTypeForNEON(MVT::v8i16);
|
||||
addQRTypeForNEON(MVT::v4i32);
|
||||
addQRTypeForNEON(MVT::v2i64);
|
||||
addQRTypeForNEON(MVT::v8f16);
|
||||
}
|
||||
|
||||
// Compute derived properties from the register classes
|
||||
@ -289,6 +291,85 @@ AArch64TargetLowering::AArch64TargetLowering(TargetMachine &TM)
|
||||
setOperationAction(ISD::FMUL, MVT::f16, Promote);
|
||||
setOperationAction(ISD::FSUB, MVT::f16, Promote);
|
||||
|
||||
// v4f16 is also a storage-only type, so promote it to v4f32 when that is
|
||||
// known to be safe.
|
||||
setOperationAction(ISD::FADD, MVT::v4f16, Promote);
|
||||
setOperationAction(ISD::FSUB, MVT::v4f16, Promote);
|
||||
setOperationAction(ISD::FMUL, MVT::v4f16, Promote);
|
||||
setOperationAction(ISD::FDIV, MVT::v4f16, Promote);
|
||||
setOperationAction(ISD::FP_EXTEND, MVT::v4f16, Promote);
|
||||
setOperationAction(ISD::FP_ROUND, MVT::v4f16, Promote);
|
||||
AddPromotedToType(ISD::FADD, MVT::v4f16, MVT::v4f32);
|
||||
AddPromotedToType(ISD::FSUB, MVT::v4f16, MVT::v4f32);
|
||||
AddPromotedToType(ISD::FMUL, MVT::v4f16, MVT::v4f32);
|
||||
AddPromotedToType(ISD::FDIV, MVT::v4f16, MVT::v4f32);
|
||||
AddPromotedToType(ISD::FP_EXTEND, MVT::v4f16, MVT::v4f32);
|
||||
AddPromotedToType(ISD::FP_ROUND, MVT::v4f16, MVT::v4f32);
|
||||
|
||||
// Expand all other v4f16 operations.
|
||||
// FIXME: We could generate better code by promoting some operations to
|
||||
// a pair of v4f32s
|
||||
setOperationAction(ISD::FABS, MVT::v4f16, Expand);
|
||||
setOperationAction(ISD::FCEIL, MVT::v4f16, Expand);
|
||||
setOperationAction(ISD::FCOPYSIGN, MVT::v4f16, Expand);
|
||||
setOperationAction(ISD::FCOS, MVT::v4f16, Expand);
|
||||
setOperationAction(ISD::FFLOOR, MVT::v4f16, Expand);
|
||||
setOperationAction(ISD::FMA, MVT::v4f16, Expand);
|
||||
setOperationAction(ISD::FNEARBYINT, MVT::v4f16, Expand);
|
||||
setOperationAction(ISD::FNEG, MVT::v4f16, Expand);
|
||||
setOperationAction(ISD::FPOW, MVT::v4f16, Expand);
|
||||
setOperationAction(ISD::FPOWI, MVT::v4f16, Expand);
|
||||
setOperationAction(ISD::FREM, MVT::v4f16, Expand);
|
||||
setOperationAction(ISD::FROUND, MVT::v4f16, Expand);
|
||||
setOperationAction(ISD::FRINT, MVT::v4f16, Expand);
|
||||
setOperationAction(ISD::FSIN, MVT::v4f16, Expand);
|
||||
setOperationAction(ISD::FSINCOS, MVT::v4f16, Expand);
|
||||
setOperationAction(ISD::FSQRT, MVT::v4f16, Expand);
|
||||
setOperationAction(ISD::FTRUNC, MVT::v4f16, Expand);
|
||||
setOperationAction(ISD::SETCC, MVT::v4f16, Expand);
|
||||
setOperationAction(ISD::BR_CC, MVT::v4f16, Expand);
|
||||
setOperationAction(ISD::SELECT, MVT::v4f16, Expand);
|
||||
setOperationAction(ISD::SELECT_CC, MVT::v4f16, Expand);
|
||||
setOperationAction(ISD::FEXP, MVT::v4f16, Expand);
|
||||
setOperationAction(ISD::FEXP2, MVT::v4f16, Expand);
|
||||
setOperationAction(ISD::FLOG, MVT::v4f16, Expand);
|
||||
setOperationAction(ISD::FLOG2, MVT::v4f16, Expand);
|
||||
setOperationAction(ISD::FLOG10, MVT::v4f16, Expand);
|
||||
|
||||
|
||||
// v8f16 is also a storage-only type, so expand it.
|
||||
setOperationAction(ISD::FABS, MVT::v8f16, Expand);
|
||||
setOperationAction(ISD::FADD, MVT::v8f16, Expand);
|
||||
setOperationAction(ISD::FCEIL, MVT::v8f16, Expand);
|
||||
setOperationAction(ISD::FCOPYSIGN, MVT::v8f16, Expand);
|
||||
setOperationAction(ISD::FCOS, MVT::v8f16, Expand);
|
||||
setOperationAction(ISD::FDIV, MVT::v8f16, Expand);
|
||||
setOperationAction(ISD::FFLOOR, MVT::v8f16, Expand);
|
||||
setOperationAction(ISD::FMA, MVT::v8f16, Expand);
|
||||
setOperationAction(ISD::FMUL, MVT::v8f16, Expand);
|
||||
setOperationAction(ISD::FNEARBYINT, MVT::v8f16, Expand);
|
||||
setOperationAction(ISD::FNEG, MVT::v8f16, Expand);
|
||||
setOperationAction(ISD::FPOW, MVT::v8f16, Expand);
|
||||
setOperationAction(ISD::FPOWI, MVT::v8f16, Expand);
|
||||
setOperationAction(ISD::FREM, MVT::v8f16, Expand);
|
||||
setOperationAction(ISD::FROUND, MVT::v8f16, Expand);
|
||||
setOperationAction(ISD::FRINT, MVT::v8f16, Expand);
|
||||
setOperationAction(ISD::FSIN, MVT::v8f16, Expand);
|
||||
setOperationAction(ISD::FSINCOS, MVT::v8f16, Expand);
|
||||
setOperationAction(ISD::FSQRT, MVT::v8f16, Expand);
|
||||
setOperationAction(ISD::FSUB, MVT::v8f16, Expand);
|
||||
setOperationAction(ISD::FTRUNC, MVT::v8f16, Expand);
|
||||
setOperationAction(ISD::SETCC, MVT::v8f16, Expand);
|
||||
setOperationAction(ISD::BR_CC, MVT::v8f16, Expand);
|
||||
setOperationAction(ISD::SELECT, MVT::v8f16, Expand);
|
||||
setOperationAction(ISD::SELECT_CC, MVT::v8f16, Expand);
|
||||
setOperationAction(ISD::FP_EXTEND, MVT::v8f16, Expand);
|
||||
setOperationAction(ISD::FEXP, MVT::v8f16, Expand);
|
||||
setOperationAction(ISD::FEXP2, MVT::v8f16, Expand);
|
||||
setOperationAction(ISD::FLOG, MVT::v8f16, Expand);
|
||||
setOperationAction(ISD::FLOG2, MVT::v8f16, Expand);
|
||||
setOperationAction(ISD::FLOG10, MVT::v8f16, Expand);
|
||||
|
||||
// AArch64 has implementations of a lot of rounding-like FP operations.
|
||||
static MVT RoundingTypes[] = { MVT::f32, MVT::f64};
|
||||
for (unsigned I = 0; I < array_lengthof(RoundingTypes); ++I) {
|
||||
@ -1416,7 +1497,10 @@ static SDValue LowerVectorFP_TO_INT(SDValue Op, SelectionDAG &DAG) {
|
||||
|
||||
if (VT.getSizeInBits() > InVT.getSizeInBits()) {
|
||||
SDLoc dl(Op);
|
||||
SDValue Ext = DAG.getNode(ISD::FP_EXTEND, dl, MVT::v2f64, Op.getOperand(0));
|
||||
MVT ExtVT =
|
||||
MVT::getVectorVT(MVT::getFloatingPointVT(VT.getScalarSizeInBits()),
|
||||
VT.getVectorNumElements());
|
||||
SDValue Ext = DAG.getNode(ISD::FP_EXTEND, dl, ExtVT, Op.getOperand(0));
|
||||
return DAG.getNode(Op.getOpcode(), dl, VT, Ext);
|
||||
}
|
||||
|
||||
@ -4687,7 +4771,8 @@ static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS,
|
||||
VT.getVectorElementType() == MVT::f32)
|
||||
return DAG.getNode(AArch64ISD::REV64, dl, VT, OpLHS);
|
||||
// vrev <4 x i16> -> REV32
|
||||
if (VT.getVectorElementType() == MVT::i16)
|
||||
if (VT.getVectorElementType() == MVT::i16 ||
|
||||
VT.getVectorElementType() == MVT::f16)
|
||||
return DAG.getNode(AArch64ISD::REV32, dl, VT, OpLHS);
|
||||
// vrev <4 x i8> -> REV16
|
||||
assert(VT.getVectorElementType() == MVT::i8);
|
||||
@ -4807,7 +4892,7 @@ static SDValue GenerateTBL(SDValue Op, ArrayRef<int> ShuffleMask,
|
||||
static unsigned getDUPLANEOp(EVT EltType) {
|
||||
if (EltType == MVT::i8)
|
||||
return AArch64ISD::DUPLANE8;
|
||||
if (EltType == MVT::i16)
|
||||
if (EltType == MVT::i16 || EltType == MVT::f16)
|
||||
return AArch64ISD::DUPLANE16;
|
||||
if (EltType == MVT::i32 || EltType == MVT::f32)
|
||||
return AArch64ISD::DUPLANE32;
|
||||
@ -4937,7 +5022,8 @@ SDValue AArch64TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
|
||||
SDValue SrcLaneV = DAG.getConstant(SrcLane, MVT::i64);
|
||||
|
||||
EVT ScalarVT = VT.getVectorElementType();
|
||||
if (ScalarVT.getSizeInBits() < 32)
|
||||
|
||||
if (ScalarVT.getSizeInBits() < 32 && ScalarVT.isInteger())
|
||||
ScalarVT = MVT::i32;
|
||||
|
||||
return DAG.getNode(
|
||||
@ -5696,11 +5782,12 @@ SDValue AArch64TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
|
||||
|
||||
// Insertion/extraction are legal for V128 types.
|
||||
if (VT == MVT::v16i8 || VT == MVT::v8i16 || VT == MVT::v4i32 ||
|
||||
VT == MVT::v2i64 || VT == MVT::v4f32 || VT == MVT::v2f64)
|
||||
VT == MVT::v2i64 || VT == MVT::v4f32 || VT == MVT::v2f64 ||
|
||||
VT == MVT::v8f16)
|
||||
return Op;
|
||||
|
||||
if (VT != MVT::v8i8 && VT != MVT::v4i16 && VT != MVT::v2i32 &&
|
||||
VT != MVT::v1i64 && VT != MVT::v2f32)
|
||||
VT != MVT::v1i64 && VT != MVT::v2f32 && VT != MVT::v4f16)
|
||||
return SDValue();
|
||||
|
||||
// For V64 types, we perform insertion by expanding the value
|
||||
@ -5729,11 +5816,12 @@ AArch64TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
|
||||
|
||||
// Insertion/extraction are legal for V128 types.
|
||||
if (VT == MVT::v16i8 || VT == MVT::v8i16 || VT == MVT::v4i32 ||
|
||||
VT == MVT::v2i64 || VT == MVT::v4f32 || VT == MVT::v2f64)
|
||||
VT == MVT::v2i64 || VT == MVT::v4f32 || VT == MVT::v2f64 ||
|
||||
VT == MVT::v8f16)
|
||||
return Op;
|
||||
|
||||
if (VT != MVT::v8i8 && VT != MVT::v4i16 && VT != MVT::v2i32 &&
|
||||
VT != MVT::v1i64 && VT != MVT::v2f32)
|
||||
VT != MVT::v1i64 && VT != MVT::v2f32 && VT != MVT::v4f16)
|
||||
return SDValue();
|
||||
|
||||
// For V64 types, we perform extraction by expanding the value
|
||||
|
@ -5262,6 +5262,10 @@ multiclass SIMDZipVector<bits<3>opc, string asm,
|
||||
def v2i64 : BaseSIMDZipVector<0b111, opc, V128,
|
||||
asm, ".2d", OpNode, v2i64>;
|
||||
|
||||
def : Pat<(v4f16 (OpNode V64:$Rn, V64:$Rm)),
|
||||
(!cast<Instruction>(NAME#"v4i16") V64:$Rn, V64:$Rm)>;
|
||||
def : Pat<(v8f16 (OpNode V128:$Rn, V128:$Rm)),
|
||||
(!cast<Instruction>(NAME#"v8i16") V128:$Rn, V128:$Rm)>;
|
||||
def : Pat<(v2f32 (OpNode V64:$Rn, V64:$Rm)),
|
||||
(!cast<Instruction>(NAME#"v2i32") V64:$Rn, V64:$Rm)>;
|
||||
def : Pat<(v4f32 (OpNode V128:$Rn, V128:$Rm)),
|
||||
|
@ -1174,6 +1174,9 @@ defm : ScalToVecROLoadPat<ro8, extloadi8, i32, v16i8, LDRBroW, LDRBroX, bsub>;
|
||||
defm : ScalToVecROLoadPat<ro16, extloadi16, i32, v4i16, LDRHroW, LDRHroX, hsub>;
|
||||
defm : ScalToVecROLoadPat<ro16, extloadi16, i32, v8i16, LDRHroW, LDRHroX, hsub>;
|
||||
|
||||
defm : ScalToVecROLoadPat<ro16, load, i32, v4f16, LDRHroW, LDRHroX, hsub>;
|
||||
defm : ScalToVecROLoadPat<ro16, load, i32, v8f16, LDRHroW, LDRHroX, hsub>;
|
||||
|
||||
defm : ScalToVecROLoadPat<ro32, load, i32, v2i32, LDRSroW, LDRSroX, ssub>;
|
||||
defm : ScalToVecROLoadPat<ro32, load, i32, v4i32, LDRSroW, LDRSroX, ssub>;
|
||||
|
||||
@ -1214,6 +1217,7 @@ let Predicates = [IsLE] in {
|
||||
defm : VecROLoadPat<ro64, v2f32, LDRDroW, LDRDroX>;
|
||||
defm : VecROLoadPat<ro64, v8i8, LDRDroW, LDRDroX>;
|
||||
defm : VecROLoadPat<ro64, v4i16, LDRDroW, LDRDroX>;
|
||||
defm : VecROLoadPat<ro64, v4f16, LDRDroW, LDRDroX>;
|
||||
}
|
||||
|
||||
defm : VecROLoadPat<ro64, v1i64, LDRDroW, LDRDroX>;
|
||||
@ -1227,6 +1231,7 @@ let Predicates = [IsLE] in {
|
||||
defm : VecROLoadPat<ro128, v4i32, LDRQroW, LDRQroX>;
|
||||
defm : VecROLoadPat<ro128, v4f32, LDRQroW, LDRQroX>;
|
||||
defm : VecROLoadPat<ro128, v8i16, LDRQroW, LDRQroX>;
|
||||
defm : VecROLoadPat<ro128, v8f16, LDRQroW, LDRQroX>;
|
||||
defm : VecROLoadPat<ro128, v16i8, LDRQroW, LDRQroX>;
|
||||
}
|
||||
} // AddedComplexity = 10
|
||||
@ -1356,6 +1361,8 @@ let Predicates = [IsLE] in {
|
||||
(LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
|
||||
def : Pat<(v2i32 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
|
||||
(LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
|
||||
def : Pat<(v4f16 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
|
||||
(LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
|
||||
}
|
||||
def : Pat<(v1f64 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
|
||||
(LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
|
||||
@ -1377,6 +1384,8 @@ let Predicates = [IsLE] in {
|
||||
(LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
|
||||
def : Pat<(v2i64 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
|
||||
(LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
|
||||
def : Pat<(v8f16 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
|
||||
(LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
|
||||
}
|
||||
def : Pat<(f128 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
|
||||
(LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
|
||||
@ -1513,6 +1522,8 @@ let Predicates = [IsLE] in {
|
||||
(LDURDi GPR64sp:$Rn, simm9:$offset)>;
|
||||
def : Pat<(v8i8 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))),
|
||||
(LDURDi GPR64sp:$Rn, simm9:$offset)>;
|
||||
def : Pat<(v4f16 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))),
|
||||
(LDURDi GPR64sp:$Rn, simm9:$offset)>;
|
||||
}
|
||||
def : Pat<(v1f64 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))),
|
||||
(LDURDi GPR64sp:$Rn, simm9:$offset)>;
|
||||
@ -1533,6 +1544,8 @@ let Predicates = [IsLE] in {
|
||||
(LDURQi GPR64sp:$Rn, simm9:$offset)>;
|
||||
def : Pat<(v16i8 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))),
|
||||
(LDURQi GPR64sp:$Rn, simm9:$offset)>;
|
||||
def : Pat<(v8f16 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))),
|
||||
(LDURQi GPR64sp:$Rn, simm9:$offset)>;
|
||||
}
|
||||
|
||||
// anyext -> zext
|
||||
@ -1829,6 +1842,7 @@ let Predicates = [IsLE] in {
|
||||
defm : VecROStorePat<ro64, v2f32, FPR64, STRDroW, STRDroX>;
|
||||
defm : VecROStorePat<ro64, v4i16, FPR64, STRDroW, STRDroX>;
|
||||
defm : VecROStorePat<ro64, v8i8, FPR64, STRDroW, STRDroX>;
|
||||
defm : VecROStorePat<ro64, v4f16, FPR64, STRDroW, STRDroX>;
|
||||
}
|
||||
|
||||
defm : VecROStorePat<ro64, v1i64, FPR64, STRDroW, STRDroX>;
|
||||
@ -1843,6 +1857,7 @@ let Predicates = [IsLE] in {
|
||||
defm : VecROStorePat<ro128, v4f32, FPR128, STRQroW, STRQroX>;
|
||||
defm : VecROStorePat<ro128, v8i16, FPR128, STRQroW, STRQroX>;
|
||||
defm : VecROStorePat<ro128, v16i8, FPR128, STRQroW, STRQroX>;
|
||||
defm : VecROStorePat<ro128, v8f16, FPR128, STRQroW, STRQroX>;
|
||||
}
|
||||
} // AddedComplexity = 10
|
||||
|
||||
@ -1893,6 +1908,9 @@ let Predicates = [IsLE] in {
|
||||
def : Pat<(store (v2i32 FPR64:$Rt),
|
||||
(am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
|
||||
(STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
|
||||
def : Pat<(store (v4f16 FPR64:$Rt),
|
||||
(am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
|
||||
(STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
|
||||
}
|
||||
def : Pat<(store (v1f64 FPR64:$Rt),
|
||||
(am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
|
||||
@ -1922,6 +1940,9 @@ let Predicates = [IsLE] in {
|
||||
def : Pat<(store (v2i64 FPR128:$Rt),
|
||||
(am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
|
||||
(STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
|
||||
def : Pat<(store (v8f16 FPR128:$Rt),
|
||||
(am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
|
||||
(STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
|
||||
}
|
||||
def : Pat<(store (f128 FPR128:$Rt),
|
||||
(am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
|
||||
@ -1984,6 +2005,9 @@ let Predicates = [IsLE] in {
|
||||
def : Pat<(store (v2i32 FPR64:$Rt),
|
||||
(am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
|
||||
(STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
|
||||
def : Pat<(store (v4f16 FPR64:$Rt),
|
||||
(am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
|
||||
(STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
|
||||
}
|
||||
def : Pat<(store (v1f64 FPR64:$Rt), (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
|
||||
(STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
|
||||
@ -2014,6 +2038,9 @@ let Predicates = [IsLE] in {
|
||||
def : Pat<(store (v2f64 FPR128:$Rt),
|
||||
(am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
|
||||
(STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
|
||||
def : Pat<(store (v8f16 FPR128:$Rt),
|
||||
(am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
|
||||
(STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
|
||||
}
|
||||
|
||||
// unscaled i64 truncating stores
|
||||
@ -2090,6 +2117,8 @@ def : Pat<(pre_store (v1i64 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
|
||||
(STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
|
||||
def : Pat<(pre_store (v1f64 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
|
||||
(STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
|
||||
def : Pat<(pre_store (v4f16 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
|
||||
(STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
|
||||
|
||||
def : Pat<(pre_store (v16i8 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
|
||||
(STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
|
||||
@ -2103,6 +2132,8 @@ def : Pat<(pre_store (v2i64 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
|
||||
(STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
|
||||
def : Pat<(pre_store (v2f64 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
|
||||
(STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
|
||||
def : Pat<(pre_store (v8f16 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
|
||||
(STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
|
||||
|
||||
//---
|
||||
// (immediate post-indexed)
|
||||
@ -2140,6 +2171,8 @@ def : Pat<(post_store (v1i64 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
|
||||
(STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
|
||||
def : Pat<(post_store (v1f64 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
|
||||
(STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
|
||||
def : Pat<(post_store (v4f16 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
|
||||
(STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
|
||||
|
||||
def : Pat<(post_store (v16i8 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
|
||||
(STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
|
||||
@ -2153,6 +2186,8 @@ def : Pat<(post_store (v2i64 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
|
||||
(STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
|
||||
def : Pat<(post_store (v2f64 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
|
||||
(STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
|
||||
def : Pat<(post_store (v8f16 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
|
||||
(STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// Load/store exclusive instructions.
|
||||
@ -2413,6 +2448,11 @@ def : Pat<(v2f64 (fextend (v2f32 (extract_subvector (v4f32 V128:$Rn),
|
||||
(i64 2))))),
|
||||
(FCVTLv4i32 V128:$Rn)>;
|
||||
|
||||
def : Pat<(v4f32 (fextend (v4f16 V64:$Rn))), (FCVTLv4i16 V64:$Rn)>;
|
||||
def : Pat<(v4f32 (fextend (v4f16 (extract_subvector (v8f16 V128:$Rn),
|
||||
(i64 4))))),
|
||||
(FCVTLv8i16 V128:$Rn)>;
|
||||
|
||||
defm FCVTMS : SIMDTwoVectorFPToInt<0,0,0b11011, "fcvtms",int_aarch64_neon_fcvtms>;
|
||||
defm FCVTMU : SIMDTwoVectorFPToInt<1,0,0b11011, "fcvtmu",int_aarch64_neon_fcvtmu>;
|
||||
defm FCVTNS : SIMDTwoVectorFPToInt<0,0,0b11010, "fcvtns",int_aarch64_neon_fcvtns>;
|
||||
@ -2424,6 +2464,7 @@ def : Pat<(concat_vectors V64:$Rd,
|
||||
(v4i16 (int_aarch64_neon_vcvtfp2hf (v4f32 V128:$Rn)))),
|
||||
(FCVTNv8i16 (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub), V128:$Rn)>;
|
||||
def : Pat<(v2f32 (fround (v2f64 V128:$Rn))), (FCVTNv2i32 V128:$Rn)>;
|
||||
def : Pat<(v4f16 (fround (v4f32 V128:$Rn))), (FCVTNv4i16 V128:$Rn)>;
|
||||
def : Pat<(concat_vectors V64:$Rd, (v2f32 (fround (v2f64 V128:$Rn)))),
|
||||
(FCVTNv4i32 (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub), V128:$Rn)>;
|
||||
defm FCVTPS : SIMDTwoVectorFPToInt<0,1,0b11010, "fcvtps",int_aarch64_neon_fcvtps>;
|
||||
@ -2506,6 +2547,10 @@ defm URSQRTE: SIMDTwoVectorS<1, 1, 0b11100, "ursqrte", int_aarch64_neon_ursqrte>
|
||||
defm USQADD : SIMDTwoVectorBHSDTied<1, 0b00011, "usqadd",int_aarch64_neon_usqadd>;
|
||||
defm XTN : SIMDMixedTwoVector<0, 0b10010, "xtn", trunc>;
|
||||
|
||||
def : Pat<(v4f16 (AArch64rev32 V64:$Rn)), (REV32v4i16 V64:$Rn)>;
|
||||
def : Pat<(v4f16 (AArch64rev64 V64:$Rn)), (REV64v4i16 V64:$Rn)>;
|
||||
def : Pat<(v8f16 (AArch64rev32 V128:$Rn)), (REV32v8i16 V128:$Rn)>;
|
||||
def : Pat<(v8f16 (AArch64rev64 V128:$Rn)), (REV64v8i16 V128:$Rn)>;
|
||||
def : Pat<(v2f32 (AArch64rev64 V64:$Rn)), (REV64v2i32 V64:$Rn)>;
|
||||
def : Pat<(v4f32 (AArch64rev64 V128:$Rn)), (REV64v4i32 V128:$Rn)>;
|
||||
|
||||
@ -3184,6 +3229,10 @@ def : Pat<(v2i64 (AArch64ext V128:$Rn, V128:$Rm, (i32 imm:$imm))),
|
||||
(EXTv16i8 V128:$Rn, V128:$Rm, imm:$imm)>;
|
||||
def : Pat<(v2f64 (AArch64ext V128:$Rn, V128:$Rm, (i32 imm:$imm))),
|
||||
(EXTv16i8 V128:$Rn, V128:$Rm, imm:$imm)>;
|
||||
def : Pat<(v4f16 (AArch64ext V64:$Rn, V64:$Rm, (i32 imm:$imm))),
|
||||
(EXTv8i8 V64:$Rn, V64:$Rm, imm:$imm)>;
|
||||
def : Pat<(v8f16 (AArch64ext V128:$Rn, V128:$Rm, (i32 imm:$imm))),
|
||||
(EXTv16i8 V128:$Rn, V128:$Rm, imm:$imm)>;
|
||||
|
||||
// We use EXT to handle extract_subvector to copy the upper 64-bits of a
|
||||
// 128-bit vector.
|
||||
@ -3195,6 +3244,8 @@ def : Pat<(v2i32 (extract_subvector V128:$Rn, (i64 2))),
|
||||
(EXTRACT_SUBREG (EXTv16i8 V128:$Rn, V128:$Rn, 8), dsub)>;
|
||||
def : Pat<(v1i64 (extract_subvector V128:$Rn, (i64 1))),
|
||||
(EXTRACT_SUBREG (EXTv16i8 V128:$Rn, V128:$Rn, 8), dsub)>;
|
||||
def : Pat<(v4f16 (extract_subvector V128:$Rn, (i64 4))),
|
||||
(EXTRACT_SUBREG (EXTv16i8 V128:$Rn, V128:$Rn, 8), dsub)>;
|
||||
def : Pat<(v2f32 (extract_subvector V128:$Rn, (i64 2))),
|
||||
(EXTRACT_SUBREG (EXTv16i8 V128:$Rn, V128:$Rn, 8), dsub)>;
|
||||
def : Pat<(v1f64 (extract_subvector V128:$Rn, (i64 1))),
|
||||
@ -3307,6 +3358,19 @@ def : Pat<(v2f64 (AArch64dup (f64 FPR64:$Rn))),
|
||||
(v2f64 (DUPv2i64lane
|
||||
(INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR64:$Rn, dsub),
|
||||
(i64 0)))>;
|
||||
def : Pat<(v4f16 (AArch64dup (f16 FPR16:$Rn))),
|
||||
(v4f16 (DUPv4i16lane
|
||||
(INSERT_SUBREG (v8i16 (IMPLICIT_DEF)), FPR16:$Rn, hsub),
|
||||
(i64 0)))>;
|
||||
def : Pat<(v8f16 (AArch64dup (f16 FPR16:$Rn))),
|
||||
(v8f16 (DUPv8i16lane
|
||||
(INSERT_SUBREG (v8i16 (IMPLICIT_DEF)), FPR16:$Rn, hsub),
|
||||
(i64 0)))>;
|
||||
|
||||
def : Pat<(v4f16 (AArch64duplane16 (v8f16 V128:$Rn), VectorIndexH:$imm)),
|
||||
(DUPv4i16lane V128:$Rn, VectorIndexH:$imm)>;
|
||||
def : Pat<(v8f16 (AArch64duplane16 (v8f16 V128:$Rn), VectorIndexH:$imm)),
|
||||
(DUPv8i16lane V128:$Rn, VectorIndexH:$imm)>;
|
||||
|
||||
def : Pat<(v2f32 (AArch64duplane32 (v4f32 V128:$Rn), VectorIndexS:$imm)),
|
||||
(DUPv2i32lane V128:$Rn, VectorIndexS:$imm)>;
|
||||
@ -3428,6 +3492,23 @@ def : Pat<(v2f32 (scalar_to_vector (f32 FPR32:$Rn))),
|
||||
def : Pat<(v2f64 (scalar_to_vector (f64 FPR64:$Rn))),
|
||||
(INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FPR64:$Rn, dsub)>;
|
||||
|
||||
def : Pat<(v4f16 (vector_insert (v4f16 V64:$Rn),
|
||||
(f16 FPR16:$Rm), (i64 VectorIndexS:$imm))),
|
||||
(EXTRACT_SUBREG
|
||||
(INSvi16lane
|
||||
(v8f16 (INSERT_SUBREG (v8f16 (IMPLICIT_DEF)), V64:$Rn, dsub)),
|
||||
VectorIndexS:$imm,
|
||||
(v8f16 (INSERT_SUBREG (v8f16 (IMPLICIT_DEF)), FPR16:$Rm, hsub)),
|
||||
(i64 0)),
|
||||
dsub)>;
|
||||
|
||||
def : Pat<(v8f16 (vector_insert (v8f16 V128:$Rn),
|
||||
(f16 FPR16:$Rm), (i64 VectorIndexH:$imm))),
|
||||
(INSvi16lane
|
||||
V128:$Rn, VectorIndexH:$imm,
|
||||
(v8f16 (INSERT_SUBREG (v8f16 (IMPLICIT_DEF)), FPR16:$Rm, hsub)),
|
||||
(i64 0))>;
|
||||
|
||||
def : Pat<(v2f32 (vector_insert (v2f32 V64:$Rn),
|
||||
(f32 FPR32:$Rm), (i64 VectorIndexS:$imm))),
|
||||
(EXTRACT_SUBREG
|
||||
@ -3508,6 +3589,7 @@ multiclass Neon_INS_elt_pattern<ValueType VT128, ValueType VT64,
|
||||
dsub)>;
|
||||
}
|
||||
|
||||
defm : Neon_INS_elt_pattern<v8f16, v4f16, f16, INSvi16lane>;
|
||||
defm : Neon_INS_elt_pattern<v4f32, v2f32, f32, INSvi32lane>;
|
||||
defm : Neon_INS_elt_pattern<v2f64, v1f64, f64, INSvi64lane>;
|
||||
defm : Neon_INS_elt_pattern<v16i8, v8i8, i32, INSvi8lane>;
|
||||
@ -3523,6 +3605,8 @@ def : Pat<(vector_extract (v2f64 V128:$Rn), 0),
|
||||
(f64 (EXTRACT_SUBREG V128:$Rn, dsub))>;
|
||||
def : Pat<(vector_extract (v4f32 V128:$Rn), 0),
|
||||
(f32 (EXTRACT_SUBREG V128:$Rn, ssub))>;
|
||||
def : Pat<(vector_extract (v8f16 V128:$Rn), 0),
|
||||
(f16 (EXTRACT_SUBREG V128:$Rn, hsub))>;
|
||||
def : Pat<(vector_extract (v2f64 V128:$Rn), VectorIndexD:$idx),
|
||||
(f64 (EXTRACT_SUBREG
|
||||
(INSvi64lane (v2f64 (IMPLICIT_DEF)), 0,
|
||||
@ -3533,6 +3617,11 @@ def : Pat<(vector_extract (v4f32 V128:$Rn), VectorIndexS:$idx),
|
||||
(INSvi32lane (v4f32 (IMPLICIT_DEF)), 0,
|
||||
V128:$Rn, VectorIndexS:$idx),
|
||||
ssub))>;
|
||||
def : Pat<(vector_extract (v8f16 V128:$Rn), VectorIndexH:$idx),
|
||||
(f16 (EXTRACT_SUBREG
|
||||
(INSvi16lane (v8f16 (IMPLICIT_DEF)), 0,
|
||||
V128:$Rn, VectorIndexH:$idx),
|
||||
hsub))>;
|
||||
|
||||
// All concat_vectors operations are canonicalised to act on i64 vectors for
|
||||
// AArch64. In the general case we need an instruction, which had just as well be
|
||||
@ -3547,6 +3636,7 @@ def : ConcatPat<v2f64, v1f64>;
|
||||
def : ConcatPat<v4i32, v2i32>;
|
||||
def : ConcatPat<v4f32, v2f32>;
|
||||
def : ConcatPat<v8i16, v4i16>;
|
||||
def : ConcatPat<v8f16, v4f16>;
|
||||
def : ConcatPat<v16i8, v8i8>;
|
||||
|
||||
// If the high lanes are undef, though, we can just ignore them:
|
||||
@ -4564,6 +4654,10 @@ def : Pat<(v2f64 (AArch64dup (f64 (load GPR64sp:$Rn)))),
|
||||
(LD1Rv2d GPR64sp:$Rn)>;
|
||||
def : Pat<(v1f64 (AArch64dup (f64 (load GPR64sp:$Rn)))),
|
||||
(LD1Rv1d GPR64sp:$Rn)>;
|
||||
def : Pat<(v4f16 (AArch64dup (f16 (load GPR64sp:$Rn)))),
|
||||
(LD1Rv4h GPR64sp:$Rn)>;
|
||||
def : Pat<(v8f16 (AArch64dup (f16 (load GPR64sp:$Rn)))),
|
||||
(LD1Rv8h GPR64sp:$Rn)>;
|
||||
|
||||
class Ld1Lane128Pat<SDPatternOperator scalar_load, Operand VecIndex,
|
||||
ValueType VTy, ValueType STy, Instruction LD1>
|
||||
@ -4577,6 +4671,7 @@ def : Ld1Lane128Pat<load, VectorIndexS, v4i32, i32, LD1i32>;
|
||||
def : Ld1Lane128Pat<load, VectorIndexS, v4f32, f32, LD1i32>;
|
||||
def : Ld1Lane128Pat<load, VectorIndexD, v2i64, i64, LD1i64>;
|
||||
def : Ld1Lane128Pat<load, VectorIndexD, v2f64, f64, LD1i64>;
|
||||
def : Ld1Lane128Pat<load, VectorIndexH, v8f16, f16, LD1i16>;
|
||||
|
||||
class Ld1Lane64Pat<SDPatternOperator scalar_load, Operand VecIndex,
|
||||
ValueType VTy, ValueType STy, Instruction LD1>
|
||||
@ -4591,6 +4686,7 @@ def : Ld1Lane64Pat<extloadi8, VectorIndexB, v8i8, i32, LD1i8>;
|
||||
def : Ld1Lane64Pat<extloadi16, VectorIndexH, v4i16, i32, LD1i16>;
|
||||
def : Ld1Lane64Pat<load, VectorIndexS, v2i32, i32, LD1i32>;
|
||||
def : Ld1Lane64Pat<load, VectorIndexS, v2f32, f32, LD1i32>;
|
||||
def : Ld1Lane64Pat<load, VectorIndexH, v4f16, f16, LD1i16>;
|
||||
|
||||
|
||||
defm LD1 : SIMDLdSt1SingleAliases<"ld1">;
|
||||
@ -4618,6 +4714,7 @@ def : St1Lane128Pat<store, VectorIndexS, v4i32, i32, ST1i32>;
|
||||
def : St1Lane128Pat<store, VectorIndexS, v4f32, f32, ST1i32>;
|
||||
def : St1Lane128Pat<store, VectorIndexD, v2i64, i64, ST1i64>;
|
||||
def : St1Lane128Pat<store, VectorIndexD, v2f64, f64, ST1i64>;
|
||||
def : St1Lane128Pat<store, VectorIndexH, v8f16, f16, ST1i16>;
|
||||
|
||||
let AddedComplexity = 15 in
|
||||
class St1Lane64Pat<SDPatternOperator scalar_store, Operand VecIndex,
|
||||
@ -4632,6 +4729,7 @@ def : St1Lane64Pat<truncstorei8, VectorIndexB, v8i8, i32, ST1i8>;
|
||||
def : St1Lane64Pat<truncstorei16, VectorIndexH, v4i16, i32, ST1i16>;
|
||||
def : St1Lane64Pat<store, VectorIndexS, v2i32, i32, ST1i32>;
|
||||
def : St1Lane64Pat<store, VectorIndexS, v2f32, f32, ST1i32>;
|
||||
def : St1Lane64Pat<store, VectorIndexH, v4f16, f16, ST1i16>;
|
||||
|
||||
multiclass St1LanePost64Pat<SDPatternOperator scalar_store, Operand VecIndex,
|
||||
ValueType VTy, ValueType STy, Instruction ST1,
|
||||
@ -4656,6 +4754,7 @@ defm : St1LanePost64Pat<post_store, VectorIndexS, v2i32, i32, ST1i32_POST, 4>;
|
||||
defm : St1LanePost64Pat<post_store, VectorIndexS, v2f32, f32, ST1i32_POST, 4>;
|
||||
defm : St1LanePost64Pat<post_store, VectorIndexD, v1i64, i64, ST1i64_POST, 8>;
|
||||
defm : St1LanePost64Pat<post_store, VectorIndexD, v1f64, f64, ST1i64_POST, 8>;
|
||||
defm : St1LanePost64Pat<post_store, VectorIndexH, v4f16, f16, ST1i16_POST, 2>;
|
||||
|
||||
multiclass St1LanePost128Pat<SDPatternOperator scalar_store, Operand VecIndex,
|
||||
ValueType VTy, ValueType STy, Instruction ST1,
|
||||
@ -4679,6 +4778,7 @@ defm : St1LanePost128Pat<post_store, VectorIndexS, v4i32, i32, ST1i32_POST, 4>;
|
||||
defm : St1LanePost128Pat<post_store, VectorIndexS, v4f32, f32, ST1i32_POST, 4>;
|
||||
defm : St1LanePost128Pat<post_store, VectorIndexD, v2i64, i64, ST1i64_POST, 8>;
|
||||
defm : St1LanePost128Pat<post_store, VectorIndexD, v2f64, f64, ST1i64_POST, 8>;
|
||||
defm : St1LanePost128Pat<post_store, VectorIndexH, v8f16, f16, ST1i16_POST, 2>;
|
||||
|
||||
let mayStore = 1, neverHasSideEffects = 1 in {
|
||||
defm ST2 : SIMDStSingleB<1, 0b000, "st2", VecListTwob, GPR64pi2>;
|
||||
@ -4861,6 +4961,7 @@ let Predicates = [IsLE] in {
|
||||
def : Pat<(v8i8 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
|
||||
def : Pat<(v4i16 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
|
||||
def : Pat<(v2i32 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
|
||||
def : Pat<(v4f16 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
|
||||
def : Pat<(v2f32 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
|
||||
|
||||
def : Pat<(i64 (bitconvert (v8i8 V64:$Vn))),
|
||||
@ -4869,6 +4970,8 @@ def : Pat<(i64 (bitconvert (v4i16 V64:$Vn))),
|
||||
(COPY_TO_REGCLASS V64:$Vn, GPR64)>;
|
||||
def : Pat<(i64 (bitconvert (v2i32 V64:$Vn))),
|
||||
(COPY_TO_REGCLASS V64:$Vn, GPR64)>;
|
||||
def : Pat<(i64 (bitconvert (v4f16 V64:$Vn))),
|
||||
(COPY_TO_REGCLASS V64:$Vn, GPR64)>;
|
||||
def : Pat<(i64 (bitconvert (v2f32 V64:$Vn))),
|
||||
(COPY_TO_REGCLASS V64:$Vn, GPR64)>;
|
||||
def : Pat<(i64 (bitconvert (v1f64 V64:$Vn))),
|
||||
@ -4881,6 +4984,8 @@ def : Pat<(v4i16 (bitconvert GPR64:$Xn)),
|
||||
(REV64v4i16 (COPY_TO_REGCLASS GPR64:$Xn, FPR64))>;
|
||||
def : Pat<(v2i32 (bitconvert GPR64:$Xn)),
|
||||
(REV64v2i32 (COPY_TO_REGCLASS GPR64:$Xn, FPR64))>;
|
||||
def : Pat<(v4f16 (bitconvert GPR64:$Xn)),
|
||||
(REV64v4i16 (COPY_TO_REGCLASS GPR64:$Xn, FPR64))>;
|
||||
def : Pat<(v2f32 (bitconvert GPR64:$Xn)),
|
||||
(REV64v2i32 (COPY_TO_REGCLASS GPR64:$Xn, FPR64))>;
|
||||
|
||||
@ -4890,6 +4995,8 @@ def : Pat<(i64 (bitconvert (v4i16 V64:$Vn))),
|
||||
(REV64v4i16 (COPY_TO_REGCLASS V64:$Vn, GPR64))>;
|
||||
def : Pat<(i64 (bitconvert (v2i32 V64:$Vn))),
|
||||
(REV64v2i32 (COPY_TO_REGCLASS V64:$Vn, GPR64))>;
|
||||
def : Pat<(i64 (bitconvert (v4f16 V64:$Vn))),
|
||||
(REV64v4i16 (COPY_TO_REGCLASS V64:$Vn, GPR64))>;
|
||||
def : Pat<(i64 (bitconvert (v2f32 V64:$Vn))),
|
||||
(REV64v2i32 (COPY_TO_REGCLASS V64:$Vn, GPR64))>;
|
||||
}
|
||||
@ -4918,6 +5025,7 @@ let Predicates = [IsLE] in {
|
||||
def : Pat<(v1i64 (bitconvert (v2i32 FPR64:$src))), (v1i64 FPR64:$src)>;
|
||||
def : Pat<(v1i64 (bitconvert (v4i16 FPR64:$src))), (v1i64 FPR64:$src)>;
|
||||
def : Pat<(v1i64 (bitconvert (v8i8 FPR64:$src))), (v1i64 FPR64:$src)>;
|
||||
def : Pat<(v1i64 (bitconvert (v4f16 FPR64:$src))), (v1i64 FPR64:$src)>;
|
||||
def : Pat<(v1i64 (bitconvert (v2f32 FPR64:$src))), (v1i64 FPR64:$src)>;
|
||||
}
|
||||
let Predicates = [IsBE] in {
|
||||
@ -4927,6 +5035,8 @@ def : Pat<(v1i64 (bitconvert (v4i16 FPR64:$src))),
|
||||
(v1i64 (REV64v4i16 FPR64:$src))>;
|
||||
def : Pat<(v1i64 (bitconvert (v8i8 FPR64:$src))),
|
||||
(v1i64 (REV64v8i8 FPR64:$src))>;
|
||||
def : Pat<(v1i64 (bitconvert (v4f16 FPR64:$src))),
|
||||
(v1i64 (REV64v4i16 FPR64:$src))>;
|
||||
def : Pat<(v1i64 (bitconvert (v2f32 FPR64:$src))),
|
||||
(v1i64 (REV64v2i32 FPR64:$src))>;
|
||||
}
|
||||
@ -4939,6 +5049,7 @@ def : Pat<(v2i32 (bitconvert (v4i16 FPR64:$src))), (v2i32 FPR64:$src)>;
|
||||
def : Pat<(v2i32 (bitconvert (v8i8 FPR64:$src))), (v2i32 FPR64:$src)>;
|
||||
def : Pat<(v2i32 (bitconvert (f64 FPR64:$src))), (v2i32 FPR64:$src)>;
|
||||
def : Pat<(v2i32 (bitconvert (v1f64 FPR64:$src))), (v2i32 FPR64:$src)>;
|
||||
def : Pat<(v2i32 (bitconvert (v4f16 FPR64:$src))), (v2i32 FPR64:$src)>;
|
||||
}
|
||||
let Predicates = [IsBE] in {
|
||||
def : Pat<(v2i32 (bitconvert (v1i64 FPR64:$src))),
|
||||
@ -4951,6 +5062,8 @@ def : Pat<(v2i32 (bitconvert (f64 FPR64:$src))),
|
||||
(v2i32 (REV64v2i32 FPR64:$src))>;
|
||||
def : Pat<(v2i32 (bitconvert (v1f64 FPR64:$src))),
|
||||
(v2i32 (REV64v2i32 FPR64:$src))>;
|
||||
def : Pat<(v2i32 (bitconvert (v4f16 FPR64:$src))),
|
||||
(v2i32 (REV64v4i16 FPR64:$src))>;
|
||||
}
|
||||
def : Pat<(v2i32 (bitconvert (v2f32 FPR64:$src))), (v2i32 FPR64:$src)>;
|
||||
|
||||
@ -4959,6 +5072,7 @@ def : Pat<(v4i16 (bitconvert (v1i64 FPR64:$src))), (v4i16 FPR64:$src)>;
|
||||
def : Pat<(v4i16 (bitconvert (v2i32 FPR64:$src))), (v4i16 FPR64:$src)>;
|
||||
def : Pat<(v4i16 (bitconvert (v8i8 FPR64:$src))), (v4i16 FPR64:$src)>;
|
||||
def : Pat<(v4i16 (bitconvert (f64 FPR64:$src))), (v4i16 FPR64:$src)>;
|
||||
def : Pat<(v4i16 (bitconvert (v4f16 FPR64:$src))), (v4i16 FPR64:$src)>;
|
||||
def : Pat<(v4i16 (bitconvert (v2f32 FPR64:$src))), (v4i16 FPR64:$src)>;
|
||||
def : Pat<(v4i16 (bitconvert (v1f64 FPR64:$src))), (v4i16 FPR64:$src)>;
|
||||
}
|
||||
@ -4971,12 +5085,42 @@ def : Pat<(v4i16 (bitconvert (v8i8 FPR64:$src))),
|
||||
(v4i16 (REV16v8i8 FPR64:$src))>;
|
||||
def : Pat<(v4i16 (bitconvert (f64 FPR64:$src))),
|
||||
(v4i16 (REV64v4i16 FPR64:$src))>;
|
||||
def : Pat<(v4i16 (bitconvert (v4f16 FPR64:$src))),
|
||||
(v4i16 (REV32v4i16 FPR64:$src))>;
|
||||
def : Pat<(v4i16 (bitconvert (v2f32 FPR64:$src))),
|
||||
(v4i16 (REV32v4i16 FPR64:$src))>;
|
||||
def : Pat<(v4i16 (bitconvert (v1f64 FPR64:$src))),
|
||||
(v4i16 (REV64v4i16 FPR64:$src))>;
|
||||
}
|
||||
|
||||
let Predicates = [IsLE] in {
|
||||
def : Pat<(v4f16 (bitconvert (v1i64 FPR64:$src))), (v4f16 FPR64:$src)>;
|
||||
def : Pat<(v4f16 (bitconvert (v2i32 FPR64:$src))), (v4f16 FPR64:$src)>;
|
||||
def : Pat<(v4f16 (bitconvert (v4i16 FPR64:$src))), (v4f16 FPR64:$src)>;
|
||||
def : Pat<(v4f16 (bitconvert (v8i8 FPR64:$src))), (v4f16 FPR64:$src)>;
|
||||
def : Pat<(v4f16 (bitconvert (f64 FPR64:$src))), (v4f16 FPR64:$src)>;
|
||||
def : Pat<(v4f16 (bitconvert (v2f32 FPR64:$src))), (v4f16 FPR64:$src)>;
|
||||
def : Pat<(v4f16 (bitconvert (v1f64 FPR64:$src))), (v4f16 FPR64:$src)>;
|
||||
}
|
||||
let Predicates = [IsBE] in {
|
||||
def : Pat<(v4f16 (bitconvert (v1i64 FPR64:$src))),
|
||||
(v4f16 (REV64v4i16 FPR64:$src))>;
|
||||
def : Pat<(v4f16 (bitconvert (v2i32 FPR64:$src))),
|
||||
(v4f16 (REV64v4i16 FPR64:$src))>;
|
||||
def : Pat<(v4f16 (bitconvert (v4i16 FPR64:$src))),
|
||||
(v4f16 (REV64v4i16 FPR64:$src))>;
|
||||
def : Pat<(v4f16 (bitconvert (v8i8 FPR64:$src))),
|
||||
(v4f16 (REV16v8i8 FPR64:$src))>;
|
||||
def : Pat<(v4f16 (bitconvert (f64 FPR64:$src))),
|
||||
(v4f16 (REV64v4i16 FPR64:$src))>;
|
||||
def : Pat<(v4f16 (bitconvert (v2f32 FPR64:$src))),
|
||||
(v4f16 (REV64v4i16 FPR64:$src))>;
|
||||
def : Pat<(v4f16 (bitconvert (v1f64 FPR64:$src))),
|
||||
(v4f16 (REV64v4i16 FPR64:$src))>;
|
||||
}
|
||||
|
||||
|
||||
|
||||
let Predicates = [IsLE] in {
|
||||
def : Pat<(v8i8 (bitconvert (v1i64 FPR64:$src))), (v8i8 FPR64:$src)>;
|
||||
def : Pat<(v8i8 (bitconvert (v2i32 FPR64:$src))), (v8i8 FPR64:$src)>;
|
||||
@ -4984,6 +5128,7 @@ def : Pat<(v8i8 (bitconvert (v4i16 FPR64:$src))), (v8i8 FPR64:$src)>;
|
||||
def : Pat<(v8i8 (bitconvert (f64 FPR64:$src))), (v8i8 FPR64:$src)>;
|
||||
def : Pat<(v8i8 (bitconvert (v2f32 FPR64:$src))), (v8i8 FPR64:$src)>;
|
||||
def : Pat<(v8i8 (bitconvert (v1f64 FPR64:$src))), (v8i8 FPR64:$src)>;
|
||||
def : Pat<(v8i8 (bitconvert (v4f16 FPR64:$src))), (v8i8 FPR64:$src)>;
|
||||
}
|
||||
let Predicates = [IsBE] in {
|
||||
def : Pat<(v8i8 (bitconvert (v1i64 FPR64:$src))),
|
||||
@ -4998,6 +5143,8 @@ def : Pat<(v8i8 (bitconvert (v2f32 FPR64:$src))),
|
||||
(v8i8 (REV32v8i8 FPR64:$src))>;
|
||||
def : Pat<(v8i8 (bitconvert (v1f64 FPR64:$src))),
|
||||
(v8i8 (REV64v8i8 FPR64:$src))>;
|
||||
def : Pat<(v8i8 (bitconvert (v4f16 FPR64:$src))),
|
||||
(v8i8 (REV16v8i8 FPR64:$src))>;
|
||||
}
|
||||
|
||||
let Predicates = [IsLE] in {
|
||||
@ -5005,6 +5152,7 @@ def : Pat<(f64 (bitconvert (v2i32 FPR64:$src))), (f64 FPR64:$src)>;
|
||||
def : Pat<(f64 (bitconvert (v4i16 FPR64:$src))), (f64 FPR64:$src)>;
|
||||
def : Pat<(f64 (bitconvert (v2f32 FPR64:$src))), (f64 FPR64:$src)>;
|
||||
def : Pat<(f64 (bitconvert (v8i8 FPR64:$src))), (f64 FPR64:$src)>;
|
||||
def : Pat<(f64 (bitconvert (v4f16 FPR64:$src))), (f64 FPR64:$src)>;
|
||||
}
|
||||
let Predicates = [IsBE] in {
|
||||
def : Pat<(f64 (bitconvert (v2i32 FPR64:$src))),
|
||||
@ -5015,6 +5163,8 @@ def : Pat<(f64 (bitconvert (v2f32 FPR64:$src))),
|
||||
(f64 (REV64v2i32 FPR64:$src))>;
|
||||
def : Pat<(f64 (bitconvert (v8i8 FPR64:$src))),
|
||||
(f64 (REV64v8i8 FPR64:$src))>;
|
||||
def : Pat<(f64 (bitconvert (v4f16 FPR64:$src))),
|
||||
(f64 (REV64v4i16 FPR64:$src))>;
|
||||
}
|
||||
def : Pat<(f64 (bitconvert (v1i64 FPR64:$src))), (f64 FPR64:$src)>;
|
||||
def : Pat<(f64 (bitconvert (v1f64 FPR64:$src))), (f64 FPR64:$src)>;
|
||||
@ -5024,6 +5174,7 @@ def : Pat<(v1f64 (bitconvert (v2i32 FPR64:$src))), (v1f64 FPR64:$src)>;
|
||||
def : Pat<(v1f64 (bitconvert (v4i16 FPR64:$src))), (v1f64 FPR64:$src)>;
|
||||
def : Pat<(v1f64 (bitconvert (v8i8 FPR64:$src))), (v1f64 FPR64:$src)>;
|
||||
def : Pat<(v1f64 (bitconvert (v2f32 FPR64:$src))), (v1f64 FPR64:$src)>;
|
||||
def : Pat<(v1f64 (bitconvert (v4f16 FPR64:$src))), (v1f64 FPR64:$src)>;
|
||||
}
|
||||
let Predicates = [IsBE] in {
|
||||
def : Pat<(v1f64 (bitconvert (v2i32 FPR64:$src))),
|
||||
@ -5034,6 +5185,8 @@ def : Pat<(v1f64 (bitconvert (v8i8 FPR64:$src))),
|
||||
(v1f64 (REV64v8i8 FPR64:$src))>;
|
||||
def : Pat<(v1f64 (bitconvert (v2f32 FPR64:$src))),
|
||||
(v1f64 (REV64v2i32 FPR64:$src))>;
|
||||
def : Pat<(v1f64 (bitconvert (v4f16 FPR64:$src))),
|
||||
(v1f64 (REV64v4i16 FPR64:$src))>;
|
||||
}
|
||||
def : Pat<(v1f64 (bitconvert (v1i64 FPR64:$src))), (v1f64 FPR64:$src)>;
|
||||
def : Pat<(v1f64 (bitconvert (f64 FPR64:$src))), (v1f64 FPR64:$src)>;
|
||||
@ -5044,6 +5197,7 @@ def : Pat<(v2f32 (bitconvert (v4i16 FPR64:$src))), (v2f32 FPR64:$src)>;
|
||||
def : Pat<(v2f32 (bitconvert (v8i8 FPR64:$src))), (v2f32 FPR64:$src)>;
|
||||
def : Pat<(v2f32 (bitconvert (v1f64 FPR64:$src))), (v2f32 FPR64:$src)>;
|
||||
def : Pat<(v2f32 (bitconvert (f64 FPR64:$src))), (v2f32 FPR64:$src)>;
|
||||
def : Pat<(v2f32 (bitconvert (v4f16 FPR64:$src))), (v2f32 FPR64:$src)>;
|
||||
}
|
||||
let Predicates = [IsBE] in {
|
||||
def : Pat<(v2f32 (bitconvert (v1i64 FPR64:$src))),
|
||||
@ -5056,6 +5210,8 @@ def : Pat<(v2f32 (bitconvert (v1f64 FPR64:$src))),
|
||||
(v2f32 (REV64v2i32 FPR64:$src))>;
|
||||
def : Pat<(v2f32 (bitconvert (f64 FPR64:$src))),
|
||||
(v2f32 (REV64v2i32 FPR64:$src))>;
|
||||
def : Pat<(v2f32 (bitconvert (v4f16 FPR64:$src))),
|
||||
(v2f32 (REV64v4i16 FPR64:$src))>;
|
||||
}
|
||||
def : Pat<(v2f32 (bitconvert (v2i32 FPR64:$src))), (v2f32 FPR64:$src)>;
|
||||
|
||||
@ -5065,6 +5221,7 @@ def : Pat<(f128 (bitconvert (v4i32 FPR128:$src))), (f128 FPR128:$src)>;
|
||||
def : Pat<(f128 (bitconvert (v8i16 FPR128:$src))), (f128 FPR128:$src)>;
|
||||
def : Pat<(f128 (bitconvert (v2f64 FPR128:$src))), (f128 FPR128:$src)>;
|
||||
def : Pat<(f128 (bitconvert (v4f32 FPR128:$src))), (f128 FPR128:$src)>;
|
||||
def : Pat<(f128 (bitconvert (v8f16 FPR128:$src))), (f128 FPR128:$src)>;
|
||||
def : Pat<(f128 (bitconvert (v16i8 FPR128:$src))), (f128 FPR128:$src)>;
|
||||
}
|
||||
let Predicates = [IsBE] in {
|
||||
@ -5136,6 +5293,7 @@ def : Pat<(v2i64 (bitconvert (v4i32 FPR128:$src))), (v2i64 FPR128:$src)>;
|
||||
def : Pat<(v2i64 (bitconvert (v8i16 FPR128:$src))), (v2i64 FPR128:$src)>;
|
||||
def : Pat<(v2i64 (bitconvert (v16i8 FPR128:$src))), (v2i64 FPR128:$src)>;
|
||||
def : Pat<(v2i64 (bitconvert (v4f32 FPR128:$src))), (v2i64 FPR128:$src)>;
|
||||
def : Pat<(v2i64 (bitconvert (v8f16 FPR128:$src))), (v2i64 FPR128:$src)>;
|
||||
}
|
||||
let Predicates = [IsBE] in {
|
||||
def : Pat<(v2i64 (bitconvert (f128 FPR128:$src))),
|
||||
@ -5149,6 +5307,8 @@ def : Pat<(v2i64 (bitconvert (v16i8 FPR128:$src))),
|
||||
(v2i64 (REV64v16i8 FPR128:$src))>;
|
||||
def : Pat<(v2i64 (bitconvert (v4f32 FPR128:$src))),
|
||||
(v2i64 (REV64v4i32 FPR128:$src))>;
|
||||
def : Pat<(v2i64 (bitconvert (v8f16 FPR128:$src))),
|
||||
(v2i64 (REV64v8i16 FPR128:$src))>;
|
||||
}
|
||||
def : Pat<(v2i64 (bitconvert (v2f64 FPR128:$src))), (v2i64 FPR128:$src)>;
|
||||
|
||||
@ -5158,6 +5318,7 @@ def : Pat<(v4i32 (bitconvert (v2i64 FPR128:$src))), (v4i32 FPR128:$src)>;
|
||||
def : Pat<(v4i32 (bitconvert (v8i16 FPR128:$src))), (v4i32 FPR128:$src)>;
|
||||
def : Pat<(v4i32 (bitconvert (v16i8 FPR128:$src))), (v4i32 FPR128:$src)>;
|
||||
def : Pat<(v4i32 (bitconvert (v2f64 FPR128:$src))), (v4i32 FPR128:$src)>;
|
||||
def : Pat<(v4i32 (bitconvert (v8f16 FPR128:$src))), (v4i32 FPR128:$src)>;
|
||||
}
|
||||
let Predicates = [IsBE] in {
|
||||
def : Pat<(v4i32 (bitconvert (f128 FPR128:$src))),
|
||||
@ -5172,6 +5333,8 @@ def : Pat<(v4i32 (bitconvert (v16i8 FPR128:$src))),
|
||||
(v4i32 (REV32v16i8 FPR128:$src))>;
|
||||
def : Pat<(v4i32 (bitconvert (v2f64 FPR128:$src))),
|
||||
(v4i32 (REV64v4i32 FPR128:$src))>;
|
||||
def : Pat<(v4i32 (bitconvert (v8f16 FPR128:$src))),
|
||||
(v4i32 (REV32v8i16 FPR128:$src))>;
|
||||
}
|
||||
def : Pat<(v4i32 (bitconvert (v4f32 FPR128:$src))), (v4i32 FPR128:$src)>;
|
||||
|
||||
@ -5182,6 +5345,7 @@ def : Pat<(v8i16 (bitconvert (v4i32 FPR128:$src))), (v8i16 FPR128:$src)>;
|
||||
def : Pat<(v8i16 (bitconvert (v16i8 FPR128:$src))), (v8i16 FPR128:$src)>;
|
||||
def : Pat<(v8i16 (bitconvert (v2f64 FPR128:$src))), (v8i16 FPR128:$src)>;
|
||||
def : Pat<(v8i16 (bitconvert (v4f32 FPR128:$src))), (v8i16 FPR128:$src)>;
|
||||
def : Pat<(v8i16 (bitconvert (v8f16 FPR128:$src))), (v8i16 FPR128:$src)>;
|
||||
}
|
||||
let Predicates = [IsBE] in {
|
||||
def : Pat<(v8i16 (bitconvert (f128 FPR128:$src))),
|
||||
@ -5198,6 +5362,36 @@ def : Pat<(v8i16 (bitconvert (v2f64 FPR128:$src))),
|
||||
(v8i16 (REV64v8i16 FPR128:$src))>;
|
||||
def : Pat<(v8i16 (bitconvert (v4f32 FPR128:$src))),
|
||||
(v8i16 (REV32v8i16 FPR128:$src))>;
|
||||
def : Pat<(v8i16 (bitconvert (v8f16 FPR128:$src))),
|
||||
(v8i16 (REV32v8i16 FPR128:$src))>;
|
||||
}
|
||||
|
||||
let Predicates = [IsLE] in {
|
||||
def : Pat<(v8f16 (bitconvert (f128 FPR128:$src))), (v8f16 FPR128:$src)>;
|
||||
def : Pat<(v8f16 (bitconvert (v2i64 FPR128:$src))), (v8f16 FPR128:$src)>;
|
||||
def : Pat<(v8f16 (bitconvert (v4i32 FPR128:$src))), (v8f16 FPR128:$src)>;
|
||||
def : Pat<(v8f16 (bitconvert (v8i16 FPR128:$src))), (v8f16 FPR128:$src)>;
|
||||
def : Pat<(v8f16 (bitconvert (v16i8 FPR128:$src))), (v8f16 FPR128:$src)>;
|
||||
def : Pat<(v8f16 (bitconvert (v2f64 FPR128:$src))), (v8f16 FPR128:$src)>;
|
||||
def : Pat<(v8f16 (bitconvert (v4f32 FPR128:$src))), (v8f16 FPR128:$src)>;
|
||||
}
|
||||
let Predicates = [IsBE] in {
|
||||
def : Pat<(v8f16 (bitconvert (f128 FPR128:$src))),
|
||||
(v8f16 (EXTv16i8 (REV64v8i16 FPR128:$src),
|
||||
(REV64v8i16 FPR128:$src),
|
||||
(i32 8)))>;
|
||||
def : Pat<(v8f16 (bitconvert (v2i64 FPR128:$src))),
|
||||
(v8f16 (REV64v8i16 FPR128:$src))>;
|
||||
def : Pat<(v8f16 (bitconvert (v4i32 FPR128:$src))),
|
||||
(v8f16 (REV32v8i16 FPR128:$src))>;
|
||||
def : Pat<(v8f16 (bitconvert (v8i16 FPR128:$src))),
|
||||
(v8f16 (REV64v8i16 FPR128:$src))>;
|
||||
def : Pat<(v8f16 (bitconvert (v16i8 FPR128:$src))),
|
||||
(v8f16 (REV16v16i8 FPR128:$src))>;
|
||||
def : Pat<(v8f16 (bitconvert (v2f64 FPR128:$src))),
|
||||
(v8f16 (REV64v8i16 FPR128:$src))>;
|
||||
def : Pat<(v8f16 (bitconvert (v4f32 FPR128:$src))),
|
||||
(v8f16 (REV32v8i16 FPR128:$src))>;
|
||||
}
|
||||
|
||||
let Predicates = [IsLE] in {
|
||||
@ -5207,6 +5401,7 @@ def : Pat<(v16i8 (bitconvert (v4i32 FPR128:$src))), (v16i8 FPR128:$src)>;
|
||||
def : Pat<(v16i8 (bitconvert (v8i16 FPR128:$src))), (v16i8 FPR128:$src)>;
|
||||
def : Pat<(v16i8 (bitconvert (v2f64 FPR128:$src))), (v16i8 FPR128:$src)>;
|
||||
def : Pat<(v16i8 (bitconvert (v4f32 FPR128:$src))), (v16i8 FPR128:$src)>;
|
||||
def : Pat<(v16i8 (bitconvert (v8f16 FPR128:$src))), (v16i8 FPR128:$src)>;
|
||||
}
|
||||
let Predicates = [IsBE] in {
|
||||
def : Pat<(v16i8 (bitconvert (f128 FPR128:$src))),
|
||||
@ -5223,6 +5418,8 @@ def : Pat<(v16i8 (bitconvert (v2f64 FPR128:$src))),
|
||||
(v16i8 (REV64v16i8 FPR128:$src))>;
|
||||
def : Pat<(v16i8 (bitconvert (v4f32 FPR128:$src))),
|
||||
(v16i8 (REV32v16i8 FPR128:$src))>;
|
||||
def : Pat<(v16i8 (bitconvert (v8f16 FPR128:$src))),
|
||||
(v16i8 (REV16v16i8 FPR128:$src))>;
|
||||
}
|
||||
|
||||
def : Pat<(v8i8 (extract_subvector (v16i8 FPR128:$Rn), (i64 1))),
|
||||
@ -5246,6 +5443,8 @@ def : Pat<(insert_subvector undef, (v2f32 FPR64:$src), (i32 0)),
|
||||
(INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
|
||||
def : Pat<(insert_subvector undef, (v4i16 FPR64:$src), (i32 0)),
|
||||
(INSERT_SUBREG (v8i16 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
|
||||
def : Pat<(insert_subvector undef, (v4f16 FPR64:$src), (i32 0)),
|
||||
(INSERT_SUBREG (v8f16 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
|
||||
def : Pat<(insert_subvector undef, (v8i8 FPR64:$src), (i32 0)),
|
||||
(INSERT_SUBREG (v16i8 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
|
||||
|
||||
|
@ -390,13 +390,14 @@ def FPR16 : RegisterClass<"AArch64", [f16], 16, (sequence "H%u", 0, 31)> {
|
||||
}
|
||||
def FPR32 : RegisterClass<"AArch64", [f32, i32], 32,(sequence "S%u", 0, 31)>;
|
||||
def FPR64 : RegisterClass<"AArch64", [f64, i64, v2f32, v1f64, v8i8, v4i16, v2i32,
|
||||
v1i64],
|
||||
v1i64, v4f16],
|
||||
64, (sequence "D%u", 0, 31)>;
|
||||
// We don't (yet) have an f128 legal type, so don't use that here. We
|
||||
// normalize 128-bit vectors to v2f64 for arg passing and such, so use
|
||||
// that here.
|
||||
def FPR128 : RegisterClass<"AArch64",
|
||||
[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64, f128],
|
||||
[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64, f128,
|
||||
v8f16],
|
||||
128, (sequence "Q%u", 0, 31)>;
|
||||
|
||||
// The lower 16 vector registers. Some instructions can only take registers
|
||||
|
@ -113,7 +113,7 @@ entry:
|
||||
; Check that f16 can be passed and returned (ACLE 2.0 extension)
|
||||
define half @test_half(float, half %arg) {
|
||||
; CHECK-LABEL: test_half:
|
||||
; CHECK: mov v0.16b, v{{[0-9]+}}.16b
|
||||
; CHECK: mov v0.16b, v1.16b
|
||||
ret half %arg;
|
||||
}
|
||||
|
||||
@ -123,3 +123,31 @@ define half @test_half_const() {
|
||||
; CHECK: ldr h0, [x{{[0-9]+}}, :lo12:{{.*}}]
|
||||
ret half 0xH4248
|
||||
}
|
||||
|
||||
; Check that v4f16 can be passed and returned in registers
|
||||
define <4 x half> @test_v4_half_register(float, <4 x half> %arg) {
|
||||
; CHECK-LABEL: test_v4_half_register:
|
||||
; CHECK: mov v0.16b, v1.16b
|
||||
ret <4 x half> %arg;
|
||||
}
|
||||
|
||||
; Check that v8f16 can be passed and returned in registers
|
||||
define <8 x half> @test_v8_half_register(float, <8 x half> %arg) {
|
||||
; CHECK-LABEL: test_v8_half_register:
|
||||
; CHECK: mov v0.16b, v1.16b
|
||||
ret <8 x half> %arg;
|
||||
}
|
||||
|
||||
; Check that v4f16 can be passed and returned on the stack
|
||||
define <4 x half> @test_v4_half_stack([8 x <2 x double>], <4 x half> %arg) {
|
||||
; CHECK-LABEL: test_v4_half_stack:
|
||||
; CHECK: ldr d0, [sp]
|
||||
ret <4 x half> %arg;
|
||||
}
|
||||
|
||||
; Check that v8f16 can be passed and returned on the stack
|
||||
define <8 x half> @test_v8_half_stack([8 x <2 x double>], <8 x half> %arg) {
|
||||
; CHECK-LABEL: test_v8_half_stack:
|
||||
; CHECK: ldr q0, [sp]
|
||||
ret <8 x half> %arg;
|
||||
}
|
||||
|
@ -3,10 +3,10 @@
|
||||
define half @add_h(half %a, half %b) {
|
||||
entry:
|
||||
; CHECK-LABEL: add_h:
|
||||
; CHECK: fcvt
|
||||
; CHECK: fcvt
|
||||
; CHECK: fadd
|
||||
; CHECK: fcvt
|
||||
; CHECK-DAG: fcvt [[OP1:s[0-9]+]], h0
|
||||
; CHECK-DAG: fcvt [[OP2:s[0-9]+]], h1
|
||||
; CHECK: fadd [[RES:s[0-9]+]], [[OP1]], [[OP2]]
|
||||
; CHECK: fcvt h0, [[RES]]
|
||||
%0 = fadd half %a, %b
|
||||
ret half %0
|
||||
}
|
||||
@ -15,10 +15,10 @@ entry:
|
||||
define half @sub_h(half %a, half %b) {
|
||||
entry:
|
||||
; CHECK-LABEL: sub_h:
|
||||
; CHECK: fcvt
|
||||
; CHECK: fcvt
|
||||
; CHECK: fsub
|
||||
; CHECK: fcvt
|
||||
; CHECK-DAG: fcvt [[OP1:s[0-9]+]], h0
|
||||
; CHECK-DAG: fcvt [[OP2:s[0-9]+]], h1
|
||||
; CHECK: fsub [[RES:s[0-9]+]], [[OP1]], [[OP2]]
|
||||
; CHECK: fcvt h0, [[RES]]
|
||||
%0 = fsub half %a, %b
|
||||
ret half %0
|
||||
}
|
||||
@ -27,10 +27,10 @@ entry:
|
||||
define half @mul_h(half %a, half %b) {
|
||||
entry:
|
||||
; CHECK-LABEL: mul_h:
|
||||
; CHECK: fcvt
|
||||
; CHECK: fcvt
|
||||
; CHECK: fmul
|
||||
; CHECK: fcvt
|
||||
; CHECK-DAG: fcvt [[OP1:s[0-9]+]], h0
|
||||
; CHECK-DAG: fcvt [[OP2:s[0-9]+]], h1
|
||||
; CHECK: fmul [[RES:s[0-9]+]], [[OP1]], [[OP2]]
|
||||
; CHECK: fcvt h0, [[RES]]
|
||||
%0 = fmul half %a, %b
|
||||
ret half %0
|
||||
}
|
||||
@ -39,10 +39,10 @@ entry:
|
||||
define half @div_h(half %a, half %b) {
|
||||
entry:
|
||||
; CHECK-LABEL: div_h:
|
||||
; CHECK: fcvt
|
||||
; CHECK: fcvt
|
||||
; CHECK: fdiv
|
||||
; CHECK: fcvt
|
||||
; CHECK-DAG: fcvt [[OP1:s[0-9]+]], h0
|
||||
; CHECK-DAG: fcvt [[OP2:s[0-9]+]], h1
|
||||
; CHECK: fdiv [[RES:s[0-9]+]], [[OP1]], [[OP2]]
|
||||
; CHECK: fcvt h0, [[RES]]
|
||||
%0 = fdiv half %a, %b
|
||||
ret half %0
|
||||
}
|
||||
@ -51,7 +51,7 @@ entry:
|
||||
define half @load_h(half* %a) {
|
||||
entry:
|
||||
; CHECK-LABEL: load_h:
|
||||
; CHECK: ldr h
|
||||
; CHECK: ldr h0, [x0]
|
||||
%0 = load half* %a, align 4
|
||||
ret half %0
|
||||
}
|
||||
@ -60,42 +60,42 @@ entry:
|
||||
define void @store_h(half* %a, half %b) {
|
||||
entry:
|
||||
; CHECK-LABEL: store_h:
|
||||
; CHECK: str h
|
||||
; CHECK: str h0, [x0]
|
||||
store half %b, half* %a, align 4
|
||||
ret void
|
||||
}
|
||||
|
||||
define half @s_to_h(float %a) {
|
||||
; CHECK-LABEL: s_to_h:
|
||||
; CHECK: fcvt
|
||||
; CHECK: fcvt h0, s0
|
||||
%1 = fptrunc float %a to half
|
||||
ret half %1
|
||||
}
|
||||
|
||||
define half @d_to_h(double %a) {
|
||||
; CHECK-LABEL: d_to_h:
|
||||
; CHECK: fcvt
|
||||
; CHECK: fcvt h0, d0
|
||||
%1 = fptrunc double %a to half
|
||||
ret half %1
|
||||
}
|
||||
|
||||
define float @h_to_s(half %a) {
|
||||
; CHECK-LABEL: h_to_s:
|
||||
; CHECK: fcvt
|
||||
; CHECK: fcvt s0, h0
|
||||
%1 = fpext half %a to float
|
||||
ret float %1
|
||||
}
|
||||
|
||||
define double @h_to_d(half %a) {
|
||||
; CHECK-LABEL: h_to_d:
|
||||
; CHECK: fcvt
|
||||
; CHECK: fcvt d0, h0
|
||||
%1 = fpext half %a to double
|
||||
ret double %1
|
||||
}
|
||||
|
||||
define half @bitcast_i_to_h(i16 %a) {
|
||||
; CHECK-LABEL: bitcast_i_to_h:
|
||||
; CHECK: fmov
|
||||
; CHECK: fmov s0, w0
|
||||
%1 = bitcast i16 %a to half
|
||||
ret half %1
|
||||
}
|
||||
@ -103,7 +103,7 @@ define half @bitcast_i_to_h(i16 %a) {
|
||||
|
||||
define i16 @bitcast_h_to_i(half %a) {
|
||||
; CHECK-LABEL: bitcast_h_to_i:
|
||||
; CHECK: fmov
|
||||
; CHECK: fmov w0, s0
|
||||
%1 = bitcast half %a to i16
|
||||
ret i16 %1
|
||||
}
|
||||
|
122
test/CodeGen/AArch64/fp16-v4-instructions.ll
Normal file
122
test/CodeGen/AArch64/fp16-v4-instructions.ll
Normal file
@ -0,0 +1,122 @@
|
||||
; RUN: llc < %s -mtriple=aarch64-none-eabi | FileCheck %s
|
||||
|
||||
define <4 x half> @add_h(<4 x half> %a, <4 x half> %b) {
|
||||
entry:
|
||||
; CHECK-LABEL: add_h:
|
||||
; CHECK-DAG: fcvtl [[OP1:v[0-9]+\.4s]], v0.4h
|
||||
; CHECK-DAG: fcvtl [[OP2:v[0-9]+\.4s]], v1.4h
|
||||
; CHECK: fadd [[RES:v[0-9]+.4s]], [[OP1]], [[OP2]]
|
||||
; CHECK: fcvtn v0.4h, [[RES]]
|
||||
%0 = fadd <4 x half> %a, %b
|
||||
ret <4 x half> %0
|
||||
}
|
||||
|
||||
|
||||
define <4 x half> @sub_h(<4 x half> %a, <4 x half> %b) {
|
||||
entry:
|
||||
; CHECK-LABEL: sub_h:
|
||||
; CHECK-DAG: fcvtl [[OP1:v[0-9]+\.4s]], v0.4h
|
||||
; CHECK-DAG: fcvtl [[OP2:v[0-9]+\.4s]], v1.4h
|
||||
; CHECK: fsub [[RES:v[0-9]+.4s]], [[OP1]], [[OP2]]
|
||||
; CHECK: fcvtn v0.4h, [[RES]]
|
||||
%0 = fsub <4 x half> %a, %b
|
||||
ret <4 x half> %0
|
||||
}
|
||||
|
||||
|
||||
define <4 x half> @mul_h(<4 x half> %a, <4 x half> %b) {
|
||||
entry:
|
||||
; CHECK-LABEL: mul_h:
|
||||
; CHECK-DAG: fcvtl [[OP1:v[0-9]+\.4s]], v0.4h
|
||||
; CHECK-DAG: fcvtl [[OP2:v[0-9]+\.4s]], v1.4h
|
||||
; CHECK: fmul [[RES:v[0-9]+.4s]], [[OP1]], [[OP2]]
|
||||
; CHECK: fcvtn v0.4h, [[RES]]
|
||||
%0 = fmul <4 x half> %a, %b
|
||||
ret <4 x half> %0
|
||||
}
|
||||
|
||||
|
||||
define <4 x half> @div_h(<4 x half> %a, <4 x half> %b) {
|
||||
entry:
|
||||
; CHECK-LABEL: div_h:
|
||||
; CHECK-DAG: fcvtl [[OP1:v[0-9]+\.4s]], v0.4h
|
||||
; CHECK-DAG: fcvtl [[OP2:v[0-9]+\.4s]], v1.4h
|
||||
; CHECK: fdiv [[RES:v[0-9]+.4s]], [[OP1]], [[OP2]]
|
||||
; CHECK: fcvtn v0.4h, [[RES]]
|
||||
%0 = fdiv <4 x half> %a, %b
|
||||
ret <4 x half> %0
|
||||
}
|
||||
|
||||
|
||||
define <4 x half> @load_h(<4 x half>* %a) {
|
||||
entry:
|
||||
; CHECK-LABEL: load_h:
|
||||
; CHECK: ldr d0, [x0]
|
||||
%0 = load <4 x half>* %a, align 4
|
||||
ret <4 x half> %0
|
||||
}
|
||||
|
||||
|
||||
define void @store_h(<4 x half>* %a, <4 x half> %b) {
|
||||
entry:
|
||||
; CHECK-LABEL: store_h:
|
||||
; CHECK: str d0, [x0]
|
||||
store <4 x half> %b, <4 x half>* %a, align 4
|
||||
ret void
|
||||
}
|
||||
|
||||
define <4 x half> @s_to_h(<4 x float> %a) {
|
||||
; CHECK-LABEL: s_to_h:
|
||||
; CHECK: fcvtn v0.4h, v0.4s
|
||||
%1 = fptrunc <4 x float> %a to <4 x half>
|
||||
ret <4 x half> %1
|
||||
}
|
||||
|
||||
define <4 x half> @d_to_h(<4 x double> %a) {
|
||||
; CHECK-LABEL: d_to_h:
|
||||
; CHECK-DAG: fcvt
|
||||
; CHECK-DAG: fcvt
|
||||
; CHECK-DAG: fcvt
|
||||
; CHECK-DAG: fcvt
|
||||
; CHECK-DAG: ins
|
||||
; CHECK-DAG: ins
|
||||
; CHECK-DAG: ins
|
||||
; CHECK-DAG: ins
|
||||
%1 = fptrunc <4 x double> %a to <4 x half>
|
||||
ret <4 x half> %1
|
||||
}
|
||||
|
||||
define <4 x float> @h_to_s(<4 x half> %a) {
|
||||
; CHECK-LABEL: h_to_s:
|
||||
; CHECK: fcvtl v0.4s, v0.4h
|
||||
%1 = fpext <4 x half> %a to <4 x float>
|
||||
ret <4 x float> %1
|
||||
}
|
||||
|
||||
define <4 x double> @h_to_d(<4 x half> %a) {
|
||||
; CHECK-LABEL: h_to_d:
|
||||
; CHECK-DAG: fcvt
|
||||
; CHECK-DAG: fcvt
|
||||
; CHECK-DAG: fcvt
|
||||
; CHECK-DAG: fcvt
|
||||
; CHECK-DAG: ins
|
||||
; CHECK-DAG: ins
|
||||
; CHECK-DAG: ins
|
||||
; CHECK-DAG: ins
|
||||
%1 = fpext <4 x half> %a to <4 x double>
|
||||
ret <4 x double> %1
|
||||
}
|
||||
|
||||
define <4 x half> @bitcast_i_to_h(float, <4 x i16> %a) {
|
||||
; CHECK-LABEL: bitcast_i_to_h:
|
||||
; CHECK: mov v0.16b, v1.16b
|
||||
%2 = bitcast <4 x i16> %a to <4 x half>
|
||||
ret <4 x half> %2
|
||||
}
|
||||
|
||||
define <4 x i16> @bitcast_h_to_i(float, <4 x half> %a) {
|
||||
; CHECK-LABEL: bitcast_h_to_i:
|
||||
; CHECK: mov v0.16b, v1.16b
|
||||
%2 = bitcast <4 x half> %a to <4 x i16>
|
||||
ret <4 x i16> %2
|
||||
}
|
255
test/CodeGen/AArch64/fp16-v8-instructions.ll
Normal file
255
test/CodeGen/AArch64/fp16-v8-instructions.ll
Normal file
@ -0,0 +1,255 @@
|
||||
; RUN: llc < %s -mtriple=aarch64-none-eabi | FileCheck %s
|
||||
|
||||
define <8 x half> @add_h(<8 x half> %a, <8 x half> %b) {
|
||||
entry:
|
||||
; CHECK-LABEL: add_h:
|
||||
; CHECK: fcvt
|
||||
; CHECK: fcvt
|
||||
; CHECK-DAG: fadd
|
||||
; CHECK-DAG: fcvt
|
||||
; CHECK-DAG: fcvt
|
||||
; CHECK-DAG: fadd
|
||||
; CHECK-DAG: fcvt
|
||||
; CHECK-DAG: fcvt
|
||||
; CHECK-DAG: fadd
|
||||
; CHECK-DAG: fcvt
|
||||
; CHECK-DAG: fcvt
|
||||
; CHECK-DAG: fadd
|
||||
; CHECK-DAG: fcvt
|
||||
; CHECK-DAG: fcvt
|
||||
; CHECK-DAG: fadd
|
||||
; CHECK-DAG: fcvt
|
||||
; CHECK-DAG: fcvt
|
||||
; CHECK-DAG: fadd
|
||||
; CHECK-DAG: fcvt
|
||||
; CHECK-DAG: fcvt
|
||||
; CHECK-DAG: fadd
|
||||
; CHECK-DAG: fcvt
|
||||
; CHECK-DAG: fcvt
|
||||
; CHECK-DAG: fadd
|
||||
; CHECK-DAG: fcvt
|
||||
; CHECK-DAG: fcvt
|
||||
; CHECK-DAG: fcvt
|
||||
; CHECK-DAG: fcvt
|
||||
; CHECK-DAG: fcvt
|
||||
; CHECK-DAG: fcvt
|
||||
; CHECK-DAG: fcvt
|
||||
; CHECK: fcvt
|
||||
%0 = fadd <8 x half> %a, %b
|
||||
ret <8 x half> %0
|
||||
}
|
||||
|
||||
|
||||
define <8 x half> @sub_h(<8 x half> %a, <8 x half> %b) {
|
||||
entry:
|
||||
; CHECK-LABEL: sub_h:
|
||||
; CHECK: fcvt
|
||||
; CHECK: fcvt
|
||||
; CHECK-DAG: fsub
|
||||
; CHECK-DAG: fcvt
|
||||
; CHECK-DAG: fcvt
|
||||
; CHECK-DAG: fsub
|
||||
; CHECK-DAG: fcvt
|
||||
; CHECK-DAG: fcvt
|
||||
; CHECK-DAG: fsub
|
||||
; CHECK-DAG: fcvt
|
||||
; CHECK-DAG: fcvt
|
||||
; CHECK-DAG: fsub
|
||||
; CHECK-DAG: fcvt
|
||||
; CHECK-DAG: fcvt
|
||||
; CHECK-DAG: fsub
|
||||
; CHECK-DAG: fcvt
|
||||
; CHECK-DAG: fcvt
|
||||
; CHECK-DAG: fsub
|
||||
; CHECK-DAG: fcvt
|
||||
; CHECK-DAG: fcvt
|
||||
; CHECK-DAG: fsub
|
||||
; CHECK-DAG: fcvt
|
||||
; CHECK-DAG: fcvt
|
||||
; CHECK-DAG: fsub
|
||||
; CHECK-DAG: fcvt
|
||||
; CHECK-DAG: fcvt
|
||||
; CHECK-DAG: fcvt
|
||||
; CHECK-DAG: fcvt
|
||||
; CHECK-DAG: fcvt
|
||||
; CHECK-DAG: fcvt
|
||||
; CHECK-DAG: fcvt
|
||||
; CHECK: fcvt
|
||||
%0 = fsub <8 x half> %a, %b
|
||||
ret <8 x half> %0
|
||||
}
|
||||
|
||||
|
||||
define <8 x half> @mul_h(<8 x half> %a, <8 x half> %b) {
|
||||
entry:
|
||||
; CHECK-LABEL: mul_h:
|
||||
; CHECK: fcvt
|
||||
; CHECK: fcvt
|
||||
; CHECK-DAG: fmul
|
||||
; CHECK-DAG: fcvt
|
||||
; CHECK-DAG: fcvt
|
||||
; CHECK-DAG: fmul
|
||||
; CHECK-DAG: fcvt
|
||||
; CHECK-DAG: fcvt
|
||||
; CHECK-DAG: fmul
|
||||
; CHECK-DAG: fcvt
|
||||
; CHECK-DAG: fcvt
|
||||
; CHECK-DAG: fmul
|
||||
; CHECK-DAG: fcvt
|
||||
; CHECK-DAG: fcvt
|
||||
; CHECK-DAG: fmul
|
||||
; CHECK-DAG: fcvt
|
||||
; CHECK-DAG: fcvt
|
||||
; CHECK-DAG: fmul
|
||||
; CHECK-DAG: fcvt
|
||||
; CHECK-DAG: fcvt
|
||||
; CHECK-DAG: fmul
|
||||
; CHECK-DAG: fcvt
|
||||
; CHECK-DAG: fcvt
|
||||
; CHECK-DAG: fmul
|
||||
; CHECK-DAG: fcvt
|
||||
; CHECK-DAG: fcvt
|
||||
; CHECK-DAG: fcvt
|
||||
; CHECK-DAG: fcvt
|
||||
; CHECK-DAG: fcvt
|
||||
; CHECK-DAG: fcvt
|
||||
; CHECK-DAG: fcvt
|
||||
; CHECK: fcvt
|
||||
%0 = fmul <8 x half> %a, %b
|
||||
ret <8 x half> %0
|
||||
}
|
||||
|
||||
|
||||
define <8 x half> @div_h(<8 x half> %a, <8 x half> %b) {
|
||||
entry:
|
||||
; CHECK-LABEL: div_h:
|
||||
; CHECK: fcvt
|
||||
; CHECK: fcvt
|
||||
; CHECK-DAG: fdiv
|
||||
; CHECK-DAG: fcvt
|
||||
; CHECK-DAG: fcvt
|
||||
; CHECK-DAG: fdiv
|
||||
; CHECK-DAG: fcvt
|
||||
; CHECK-DAG: fcvt
|
||||
; CHECK-DAG: fdiv
|
||||
; CHECK-DAG: fcvt
|
||||
; CHECK-DAG: fcvt
|
||||
; CHECK-DAG: fdiv
|
||||
; CHECK-DAG: fcvt
|
||||
; CHECK-DAG: fcvt
|
||||
; CHECK-DAG: fdiv
|
||||
; CHECK-DAG: fcvt
|
||||
; CHECK-DAG: fcvt
|
||||
; CHECK-DAG: fdiv
|
||||
; CHECK-DAG: fcvt
|
||||
; CHECK-DAG: fcvt
|
||||
; CHECK-DAG: fdiv
|
||||
; CHECK-DAG: fcvt
|
||||
; CHECK-DAG: fcvt
|
||||
; CHECK-DAG: fdiv
|
||||
; CHECK-DAG: fcvt
|
||||
; CHECK-DAG: fcvt
|
||||
; CHECK-DAG: fcvt
|
||||
; CHECK-DAG: fcvt
|
||||
; CHECK-DAG: fcvt
|
||||
; CHECK-DAG: fcvt
|
||||
; CHECK-DAG: fcvt
|
||||
; CHECK: fcvt
|
||||
%0 = fdiv <8 x half> %a, %b
|
||||
ret <8 x half> %0
|
||||
}
|
||||
|
||||
|
||||
define <8 x half> @load_h(<8 x half>* %a) {
|
||||
entry:
|
||||
; CHECK-LABEL: load_h:
|
||||
; CHECK: ldr q0, [x0]
|
||||
%0 = load <8 x half>* %a, align 4
|
||||
ret <8 x half> %0
|
||||
}
|
||||
|
||||
|
||||
define void @store_h(<8 x half>* %a, <8 x half> %b) {
|
||||
entry:
|
||||
; CHECK-LABEL: store_h:
|
||||
; CHECK: str q0, [x0]
|
||||
store <8 x half> %b, <8 x half>* %a, align 4
|
||||
ret void
|
||||
}
|
||||
|
||||
define <8 x half> @s_to_h(<8 x float> %a) {
|
||||
; CHECK-LABEL: s_to_h:
|
||||
; CHECK-DAG: fcvtn v0.4h, v0.4s
|
||||
; CHECK-DAG: fcvtn [[REG:v[0-9+]]].4h, v1.4s
|
||||
; CHECK: ins v0.d[1], [[REG]].d[0]
|
||||
%1 = fptrunc <8 x float> %a to <8 x half>
|
||||
ret <8 x half> %1
|
||||
}
|
||||
|
||||
define <8 x half> @d_to_h(<8 x double> %a) {
|
||||
; CHECK-LABEL: d_to_h:
|
||||
; CHECK-DAG: ins v{{[0-9]+}}.d
|
||||
; CHECK-DAG: ins v{{[0-9]+}}.d
|
||||
; CHECK-DAG: ins v{{[0-9]+}}.d
|
||||
; CHECK-DAG: ins v{{[0-9]+}}.d
|
||||
; CHECK-DAG: fcvt h
|
||||
; CHECK-DAG: fcvt h
|
||||
; CHECK-DAG: fcvt h
|
||||
; CHECK-DAG: fcvt h
|
||||
; CHECK-DAG: fcvt h
|
||||
; CHECK-DAG: fcvt h
|
||||
; CHECK-DAG: fcvt h
|
||||
; CHECK-DAG: fcvt h
|
||||
; CHECK-DAG: ins v{{[0-9]+}}.h
|
||||
; CHECK-DAG: ins v{{[0-9]+}}.h
|
||||
; CHECK-DAG: ins v{{[0-9]+}}.h
|
||||
; CHECK-DAG: ins v{{[0-9]+}}.h
|
||||
; CHECK-DAG: ins v{{[0-9]+}}.h
|
||||
; CHECK-DAG: ins v{{[0-9]+}}.h
|
||||
; CHECK-DAG: ins v{{[0-9]+}}.h
|
||||
; CHECK-DAG: ins v{{[0-9]+}}.h
|
||||
%1 = fptrunc <8 x double> %a to <8 x half>
|
||||
ret <8 x half> %1
|
||||
}
|
||||
|
||||
define <8 x float> @h_to_s(<8 x half> %a) {
|
||||
; CHECK-LABEL: h_to_s:
|
||||
; CHECK: fcvtl2 v1.4s, v0.8h
|
||||
; CHECK: fcvtl v0.4s, v0.4h
|
||||
%1 = fpext <8 x half> %a to <8 x float>
|
||||
ret <8 x float> %1
|
||||
}
|
||||
|
||||
define <8 x double> @h_to_d(<8 x half> %a) {
|
||||
; CHECK-LABEL: h_to_d:
|
||||
; CHECK-DAG: fcvt d
|
||||
; CHECK-DAG: fcvt d
|
||||
; CHECK-DAG: fcvt d
|
||||
; CHECK-DAG: fcvt d
|
||||
; CHECK-DAG: fcvt d
|
||||
; CHECK-DAG: fcvt d
|
||||
; CHECK-DAG: fcvt d
|
||||
; CHECK-DAG: fcvt d
|
||||
; CHECK-DAG: ins
|
||||
; CHECK-DAG: ins
|
||||
; CHECK-DAG: ins
|
||||
; CHECK-DAG: ins
|
||||
%1 = fpext <8 x half> %a to <8 x double>
|
||||
ret <8 x double> %1
|
||||
}
|
||||
|
||||
|
||||
define <8 x half> @bitcast_i_to_h(float, <8 x i16> %a) {
|
||||
; CHECK-LABEL: bitcast_i_to_h:
|
||||
; CHECK: mov v0.16b, v1.16b
|
||||
%2 = bitcast <8 x i16> %a to <8 x half>
|
||||
ret <8 x half> %2
|
||||
}
|
||||
|
||||
define <8 x i16> @bitcast_h_to_i(float, <8 x half> %a) {
|
||||
; CHECK-LABEL: bitcast_h_to_i:
|
||||
; CHECK: mov v0.16b, v1.16b
|
||||
%2 = bitcast <8 x half> %a to <8 x i16>
|
||||
ret <8 x i16> %2
|
||||
}
|
||||
|
203
test/CodeGen/AArch64/fp16-vector-bitcast.ll
Normal file
203
test/CodeGen/AArch64/fp16-vector-bitcast.ll
Normal file
@ -0,0 +1,203 @@
|
||||
; RUN: llc < %s -mtriple=aarch64-none-eabi | FileCheck %s
|
||||
|
||||
define <4 x i16> @v4f16_to_v4i16(float, <4 x half> %a) #0 {
|
||||
; CHECK-LABEL: v4f16_to_v4i16:
|
||||
; CHECK: mov v0.16b, v1.16b
|
||||
entry:
|
||||
%1 = bitcast <4 x half> %a to <4 x i16>
|
||||
ret <4 x i16> %1
|
||||
}
|
||||
|
||||
define <2 x i32> @v4f16_to_v2i32(float, <4 x half> %a) #0 {
|
||||
; CHECK-LABEL: v4f16_to_v2i32:
|
||||
; CHECK: mov v0.16b, v1.16b
|
||||
entry:
|
||||
%1 = bitcast <4 x half> %a to <2 x i32>
|
||||
ret <2 x i32> %1
|
||||
}
|
||||
|
||||
define <1 x i64> @v4f16_to_v1i64(float, <4 x half> %a) #0 {
|
||||
; CHECK-LABEL: v4f16_to_v1i64:
|
||||
; CHECK: mov v0.16b, v1.16b
|
||||
entry:
|
||||
%1 = bitcast <4 x half> %a to <1 x i64>
|
||||
ret <1 x i64> %1
|
||||
}
|
||||
|
||||
define i64 @v4f16_to_i64(float, <4 x half> %a) #0 {
|
||||
; CHECK-LABEL: v4f16_to_i64:
|
||||
; CHECK: fmov x0, d1
|
||||
entry:
|
||||
%1 = bitcast <4 x half> %a to i64
|
||||
ret i64 %1
|
||||
}
|
||||
|
||||
define <2 x float> @v4f16_to_v2float(float, <4 x half> %a) #0 {
|
||||
; CHECK-LABEL: v4f16_to_v2float:
|
||||
; CHECK: mov v0.16b, v1.16b
|
||||
entry:
|
||||
%1 = bitcast <4 x half> %a to <2 x float>
|
||||
ret <2 x float> %1
|
||||
}
|
||||
|
||||
define <1 x double> @v4f16_to_v1double(float, <4 x half> %a) #0 {
|
||||
; CHECK-LABEL: v4f16_to_v1double:
|
||||
; CHECK: mov v0.16b, v1.16b
|
||||
entry:
|
||||
%1 = bitcast <4 x half> %a to <1 x double>
|
||||
ret <1 x double> %1
|
||||
}
|
||||
|
||||
define double @v4f16_to_double(float, <4 x half> %a) #0 {
|
||||
; CHECK-LABEL: v4f16_to_double:
|
||||
; CHECK: mov v0.16b, v1.16b
|
||||
entry:
|
||||
%1 = bitcast <4 x half> %a to double
|
||||
ret double %1
|
||||
}
|
||||
|
||||
|
||||
define <4 x half> @v4i16_to_v4f16(float, <4 x i16> %a) #0 {
|
||||
; CHECK-LABEL: v4i16_to_v4f16:
|
||||
; CHECK: mov v0.16b, v1.16b
|
||||
entry:
|
||||
%1 = bitcast <4 x i16> %a to <4 x half>
|
||||
ret <4 x half> %1
|
||||
}
|
||||
|
||||
define <4 x half> @v2i32_to_v4f16(float, <2 x i32> %a) #0 {
|
||||
; CHECK-LABEL: v2i32_to_v4f16:
|
||||
; CHECK: mov v0.16b, v1.16b
|
||||
entry:
|
||||
%1 = bitcast <2 x i32> %a to <4 x half>
|
||||
ret <4 x half> %1
|
||||
}
|
||||
|
||||
define <4 x half> @v1i64_to_v4f16(float, <1 x i64> %a) #0 {
|
||||
; CHECK-LABEL: v1i64_to_v4f16:
|
||||
; CHECK: mov v0.16b, v1.16b
|
||||
entry:
|
||||
%1 = bitcast <1 x i64> %a to <4 x half>
|
||||
ret <4 x half> %1
|
||||
}
|
||||
|
||||
define <4 x half> @i64_to_v4f16(float, i64 %a) #0 {
|
||||
; CHECK-LABEL: i64_to_v4f16:
|
||||
; CHECK: fmov d0, x0
|
||||
entry:
|
||||
%1 = bitcast i64 %a to <4 x half>
|
||||
ret <4 x half> %1
|
||||
}
|
||||
|
||||
define <4 x half> @v2float_to_v4f16(float, <2 x float> %a) #0 {
|
||||
; CHECK-LABEL: v2float_to_v4f16:
|
||||
; CHECK: mov v0.16b, v1.16b
|
||||
entry:
|
||||
%1 = bitcast <2 x float> %a to <4 x half>
|
||||
ret <4 x half> %1
|
||||
}
|
||||
|
||||
define <4 x half> @v1double_to_v4f16(float, <1 x double> %a) #0 {
|
||||
; CHECK-LABEL: v1double_to_v4f16:
|
||||
; CHECK: mov v0.16b, v1.16b
|
||||
entry:
|
||||
%1 = bitcast <1 x double> %a to <4 x half>
|
||||
ret <4 x half> %1
|
||||
}
|
||||
|
||||
define <4 x half> @double_to_v4f16(float, double %a) #0 {
|
||||
; CHECK-LABEL: double_to_v4f16:
|
||||
; CHECK: mov v0.16b, v1.16b
|
||||
entry:
|
||||
%1 = bitcast double %a to <4 x half>
|
||||
ret <4 x half> %1
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
define <8 x i16> @v8f16_to_v8i16(float, <8 x half> %a) #0 {
|
||||
; CHECK-LABEL: v8f16_to_v8i16:
|
||||
; CHECK: mov v0.16b, v1.16b
|
||||
entry:
|
||||
%1 = bitcast <8 x half> %a to <8 x i16>
|
||||
ret <8 x i16> %1
|
||||
}
|
||||
|
||||
define <4 x i32> @v8f16_to_v4i32(float, <8 x half> %a) #0 {
|
||||
; CHECK-LABEL: v8f16_to_v4i32:
|
||||
; CHECK: mov v0.16b, v1.16b
|
||||
entry:
|
||||
%1 = bitcast <8 x half> %a to <4 x i32>
|
||||
ret <4 x i32> %1
|
||||
}
|
||||
|
||||
define <2 x i64> @v8f16_to_v2i64(float, <8 x half> %a) #0 {
|
||||
; CHECK-LABEL: v8f16_to_v2i64:
|
||||
; CHECK: mov v0.16b, v1.16b
|
||||
entry:
|
||||
%1 = bitcast <8 x half> %a to <2 x i64>
|
||||
ret <2 x i64> %1
|
||||
}
|
||||
|
||||
define <4 x float> @v8f16_to_v4float(float, <8 x half> %a) #0 {
|
||||
; CHECK-LABEL: v8f16_to_v4float:
|
||||
; CHECK: mov v0.16b, v1.16b
|
||||
entry:
|
||||
%1 = bitcast <8 x half> %a to <4 x float>
|
||||
ret <4 x float> %1
|
||||
}
|
||||
|
||||
define <2 x double> @v8f16_to_v2double(float, <8 x half> %a) #0 {
|
||||
; CHECK-LABEL: v8f16_to_v2double:
|
||||
; CHECK: mov v0.16b, v1.16b
|
||||
entry:
|
||||
%1 = bitcast <8 x half> %a to <2 x double>
|
||||
ret <2 x double> %1
|
||||
}
|
||||
|
||||
define <8 x half> @v8i16_to_v8f16(float, <8 x i16> %a) #0 {
|
||||
; CHECK-LABEL: v8i16_to_v8f16:
|
||||
; CHECK: mov v0.16b, v1.16b
|
||||
entry:
|
||||
%1 = bitcast <8 x i16> %a to <8 x half>
|
||||
ret <8 x half> %1
|
||||
}
|
||||
|
||||
define <8 x half> @v4i32_to_v8f16(float, <4 x i32> %a) #0 {
|
||||
; CHECK-LABEL: v4i32_to_v8f16:
|
||||
; CHECK: mov v0.16b, v1.16b
|
||||
entry:
|
||||
%1 = bitcast <4 x i32> %a to <8 x half>
|
||||
ret <8 x half> %1
|
||||
}
|
||||
|
||||
define <8 x half> @v2i64_to_v8f16(float, <2 x i64> %a) #0 {
|
||||
; CHECK-LABEL: v2i64_to_v8f16:
|
||||
; CHECK: mov v0.16b, v1.16b
|
||||
entry:
|
||||
%1 = bitcast <2 x i64> %a to <8 x half>
|
||||
ret <8 x half> %1
|
||||
}
|
||||
|
||||
define <8 x half> @v4float_to_v8f16(float, <4 x float> %a) #0 {
|
||||
; CHECK-LABEL: v4float_to_v8f16:
|
||||
; CHECK: mov v0.16b, v1.16b
|
||||
entry:
|
||||
%1 = bitcast <4 x float> %a to <8 x half>
|
||||
ret <8 x half> %1
|
||||
}
|
||||
|
||||
define <8 x half> @v2double_to_v8f16(float, <2 x double> %a) #0 {
|
||||
; CHECK-LABEL: v2double_to_v8f16:
|
||||
; CHECK: mov v0.16b, v1.16b
|
||||
entry:
|
||||
%1 = bitcast <2 x double> %a to <8 x half>
|
||||
ret <8 x half> %1
|
||||
}
|
528
test/CodeGen/AArch64/fp16-vector-load-store.ll
Normal file
528
test/CodeGen/AArch64/fp16-vector-load-store.ll
Normal file
@ -0,0 +1,528 @@
|
||||
; RUN: llc < %s -mtriple=aarch64-none-eabi | FileCheck %s
|
||||
|
||||
; Simple load of v4i16
|
||||
define <4 x half> @load_64(<4 x half>* nocapture readonly %a) #0 {
|
||||
; CHECK-LABEL: load_64:
|
||||
; CHECK: ldr d0, [x0]
|
||||
entry:
|
||||
%0 = load <4 x half>* %a, align 8
|
||||
ret <4 x half> %0
|
||||
}
|
||||
|
||||
; Simple load of v8i16
|
||||
define <8 x half> @load_128(<8 x half>* nocapture readonly %a) #0 {
|
||||
; CHECK-LABEL: load_128:
|
||||
; CHECK: ldr q0, [x0]
|
||||
entry:
|
||||
%0 = load <8 x half>* %a, align 16
|
||||
ret <8 x half> %0
|
||||
}
|
||||
|
||||
; Duplicating load to v4i16
|
||||
define <4 x half> @load_dup_64(half* nocapture readonly %a) #0 {
|
||||
; CHECK-LABEL: load_dup_64:
|
||||
; CHECK: ld1r { v0.4h }, [x0]
|
||||
entry:
|
||||
%0 = load half* %a, align 2
|
||||
%1 = insertelement <4 x half> undef, half %0, i32 0
|
||||
%2 = shufflevector <4 x half> %1, <4 x half> undef, <4 x i32> zeroinitializer
|
||||
ret <4 x half> %2
|
||||
}
|
||||
|
||||
; Duplicating load to v8i16
|
||||
define <8 x half> @load_dup_128(half* nocapture readonly %a) #0 {
|
||||
; CHECK-LABEL: load_dup_128:
|
||||
; CHECK: ld1r { v0.8h }, [x0]
|
||||
entry:
|
||||
%0 = load half* %a, align 2
|
||||
%1 = insertelement <8 x half> undef, half %0, i32 0
|
||||
%2 = shufflevector <8 x half> %1, <8 x half> undef, <8 x i32> zeroinitializer
|
||||
ret <8 x half> %2
|
||||
}
|
||||
|
||||
; Load to one lane of v4f16
|
||||
define <4 x half> @load_lane_64(half* nocapture readonly %a, <4 x half> %b) #0 {
|
||||
; CHECK-LABEL: load_lane_64:
|
||||
; CHECK: ld1 { v0.h }[2], [x0]
|
||||
entry:
|
||||
%0 = load half* %a, align 2
|
||||
%1 = insertelement <4 x half> %b, half %0, i32 2
|
||||
ret <4 x half> %1
|
||||
}
|
||||
|
||||
; Load to one lane of v8f16
|
||||
define <8 x half> @load_lane_128(half* nocapture readonly %a, <8 x half> %b) #0 {
|
||||
; CHECK-LABEL: load_lane_128:
|
||||
; CHECK: ld1 { v0.h }[5], [x0]
|
||||
entry:
|
||||
%0 = load half* %a, align 2
|
||||
%1 = insertelement <8 x half> %b, half %0, i32 5
|
||||
ret <8 x half> %1
|
||||
}
|
||||
|
||||
; Simple store of v4f16
|
||||
define void @store_64(<4 x half>* nocapture %a, <4 x half> %b) #1 {
|
||||
; CHECK-LABEL: store_64:
|
||||
; CHECK: str d0, [x0]
|
||||
entry:
|
||||
store <4 x half> %b, <4 x half>* %a, align 8
|
||||
ret void
|
||||
}
|
||||
|
||||
; Simple store of v8f16
|
||||
define void @store_128(<8 x half>* nocapture %a, <8 x half> %b) #1 {
|
||||
; CHECK-LABEL: store_128:
|
||||
; CHECK: str q0, [x0]
|
||||
entry:
|
||||
store <8 x half> %b, <8 x half>* %a, align 16
|
||||
ret void
|
||||
}
|
||||
|
||||
; Store from one lane of v4f16
|
||||
define void @store_lane_64(half* nocapture %a, <4 x half> %b) #1 {
|
||||
; CHECK-LABEL: store_lane_64:
|
||||
; CHECK: st1 { v0.h }[2], [x0]
|
||||
entry:
|
||||
%0 = extractelement <4 x half> %b, i32 2
|
||||
store half %0, half* %a, align 2
|
||||
ret void
|
||||
}
|
||||
|
||||
; Store from one lane of v8f16
|
||||
define void @store_lane_128(half* nocapture %a, <8 x half> %b) #1 {
|
||||
; CHECK-LABEL: store_lane_128:
|
||||
; CHECK: st1 { v0.h }[5], [x0]
|
||||
entry:
|
||||
%0 = extractelement <8 x half> %b, i32 5
|
||||
store half %0, half* %a, align 2
|
||||
ret void
|
||||
}
|
||||
|
||||
; NEON intrinsics - (de-)interleaving loads and stores
|
||||
declare { <4 x half>, <4 x half> } @llvm.aarch64.neon.ld2.v4f16.p0v4f16(<4 x half>*)
|
||||
declare { <4 x half>, <4 x half>, <4 x half> } @llvm.aarch64.neon.ld3.v4f16.p0v4f16(<4 x half>*)
|
||||
declare { <4 x half>, <4 x half>, <4 x half>, <4 x half> } @llvm.aarch64.neon.ld4.v4f16.p0v4f16(<4 x half>*)
|
||||
declare void @llvm.aarch64.neon.st2.v4f16.p0v4f16(<4 x half>, <4 x half>, <4 x half>*)
|
||||
declare void @llvm.aarch64.neon.st3.v4f16.p0v4f16(<4 x half>, <4 x half>, <4 x half>, <4 x half>*)
|
||||
declare void @llvm.aarch64.neon.st4.v4f16.p0v4f16(<4 x half>, <4 x half>, <4 x half>, <4 x half>, <4 x half>*)
|
||||
declare { <8 x half>, <8 x half> } @llvm.aarch64.neon.ld2.v8f16.p0v8f16(<8 x half>*)
|
||||
declare { <8 x half>, <8 x half>, <8 x half> } @llvm.aarch64.neon.ld3.v8f16.p0v8f16(<8 x half>*)
|
||||
declare { <8 x half>, <8 x half>, <8 x half>, <8 x half> } @llvm.aarch64.neon.ld4.v8f16.p0v8f16(<8 x half>*)
|
||||
declare void @llvm.aarch64.neon.st2.v8f16.p0v8f16(<8 x half>, <8 x half>, <8 x half>*)
|
||||
declare void @llvm.aarch64.neon.st3.v8f16.p0v8f16(<8 x half>, <8 x half>, <8 x half>, <8 x half>*)
|
||||
declare void @llvm.aarch64.neon.st4.v8f16.p0v8f16(<8 x half>, <8 x half>, <8 x half>, <8 x half>, <8 x half>*)
|
||||
|
||||
; Load 2 x v4f16 with de-interleaving
|
||||
define { <4 x half>, <4 x half> } @load_interleave_64_2(<4 x half>* %a) #0 {
|
||||
; CHECK-LABEL: load_interleave_64_2:
|
||||
; CHECK: ld2 { v0.4h, v1.4h }, [x0]
|
||||
entry:
|
||||
%0 = tail call { <4 x half>, <4 x half> } @llvm.aarch64.neon.ld2.v4f16.p0v4f16(<4 x half>* %a)
|
||||
ret { <4 x half>, <4 x half> } %0
|
||||
}
|
||||
|
||||
; Load 3 x v4f16 with de-interleaving
|
||||
define { <4 x half>, <4 x half>, <4 x half> } @load_interleave_64_3(<4 x half>* %a) #0 {
|
||||
; CHECK-LABEL: load_interleave_64_3:
|
||||
; CHECK: ld3 { v0.4h, v1.4h, v2.4h }, [x0]
|
||||
entry:
|
||||
%0 = tail call { <4 x half>, <4 x half>, <4 x half> } @llvm.aarch64.neon.ld3.v4f16.p0v4f16(<4 x half>* %a)
|
||||
ret { <4 x half>, <4 x half>, <4 x half> } %0
|
||||
}
|
||||
|
||||
; Load 4 x v4f16 with de-interleaving
|
||||
define { <4 x half>, <4 x half>, <4 x half>, <4 x half> } @load_interleave_64_4(<4 x half>* %a) #0 {
|
||||
; CHECK-LABEL: load_interleave_64_4:
|
||||
; CHECK: ld4 { v0.4h, v1.4h, v2.4h, v3.4h }, [x0]
|
||||
entry:
|
||||
%0 = tail call { <4 x half>, <4 x half>, <4 x half>, <4 x half> } @llvm.aarch64.neon.ld4.v4f16.p0v4f16(<4 x half>* %a)
|
||||
ret { <4 x half>, <4 x half>, <4 x half>, <4 x half> } %0
|
||||
}
|
||||
|
||||
; Store 2 x v4f16 with interleaving
|
||||
define void @store_interleave_64_2(<4 x half>* %a, <4 x half> %b, <4 x half> %c) #0 {
|
||||
; CHECK-LABEL: store_interleave_64_2:
|
||||
; CHECK: st2 { v0.4h, v1.4h }, [x0]
|
||||
entry:
|
||||
tail call void @llvm.aarch64.neon.st2.v4f16.p0v4f16(<4 x half> %b, <4 x half> %c, <4 x half>* %a)
|
||||
ret void
|
||||
}
|
||||
|
||||
; Store 3 x v4f16 with interleaving
|
||||
define void @store_interleave_64_3(<4 x half>* %a, <4 x half> %b, <4 x half> %c, <4 x half> %d) #0 {
|
||||
; CHECK-LABEL: store_interleave_64_3:
|
||||
; CHECK: st3 { v0.4h, v1.4h, v2.4h }, [x0]
|
||||
entry:
|
||||
tail call void @llvm.aarch64.neon.st3.v4f16.p0v4f16(<4 x half> %b, <4 x half> %c, <4 x half> %d, <4 x half>* %a)
|
||||
ret void
|
||||
}
|
||||
|
||||
; Store 4 x v4f16 with interleaving
|
||||
define void @store_interleave_64_4(<4 x half>* %a, <4 x half> %b, <4 x half> %c, <4 x half> %d, <4 x half> %e) #0 {
|
||||
; CHECK-LABEL: store_interleave_64_4:
|
||||
; CHECK: st4 { v0.4h, v1.4h, v2.4h, v3.4h }, [x0]
|
||||
entry:
|
||||
tail call void @llvm.aarch64.neon.st4.v4f16.p0v4f16(<4 x half> %b, <4 x half> %c, <4 x half> %d, <4 x half> %e, <4 x half>* %a)
|
||||
ret void
|
||||
}
|
||||
|
||||
; Load 2 x v8f16 with de-interleaving
|
||||
define { <8 x half>, <8 x half> } @load_interleave_128_2(<8 x half>* %a) #0 {
|
||||
; CHECK-LABEL: load_interleave_128_2:
|
||||
; CHECK: ld2 { v0.8h, v1.8h }, [x0]
|
||||
entry:
|
||||
%0 = tail call { <8 x half>, <8 x half> } @llvm.aarch64.neon.ld2.v8f16.p0v8f16(<8 x half>* %a)
|
||||
ret { <8 x half>, <8 x half> } %0
|
||||
}
|
||||
|
||||
; Load 3 x v8f16 with de-interleaving
|
||||
define { <8 x half>, <8 x half>, <8 x half> } @load_interleave_128_3(<8 x half>* %a) #0 {
|
||||
; CHECK-LABEL: load_interleave_128_3:
|
||||
; CHECK: ld3 { v0.8h, v1.8h, v2.8h }, [x0]
|
||||
entry:
|
||||
%0 = tail call { <8 x half>, <8 x half>, <8 x half> } @llvm.aarch64.neon.ld3.v8f16.p0v8f16(<8 x half>* %a)
|
||||
ret { <8 x half>, <8 x half>, <8 x half> } %0
|
||||
}
|
||||
|
||||
; Load 8 x v8f16 with de-interleaving
|
||||
define { <8 x half>, <8 x half>, <8 x half>, <8 x half> } @load_interleave_128_4(<8 x half>* %a) #0 {
|
||||
; CHECK-LABEL: load_interleave_128_4:
|
||||
; CHECK: ld4 { v0.8h, v1.8h, v2.8h, v3.8h }, [x0]
|
||||
entry:
|
||||
%0 = tail call { <8 x half>, <8 x half>, <8 x half>, <8 x half> } @llvm.aarch64.neon.ld4.v8f16.p0v8f16(<8 x half>* %a)
|
||||
ret { <8 x half>, <8 x half>, <8 x half>, <8 x half> } %0
|
||||
}
|
||||
|
||||
; Store 2 x v8f16 with interleaving
|
||||
define void @store_interleave_128_2(<8 x half>* %a, <8 x half> %b, <8 x half> %c) #0 {
|
||||
; CHECK-LABEL: store_interleave_128_2:
|
||||
; CHECK: st2 { v0.8h, v1.8h }, [x0]
|
||||
entry:
|
||||
tail call void @llvm.aarch64.neon.st2.v8f16.p0v8f16(<8 x half> %b, <8 x half> %c, <8 x half>* %a)
|
||||
ret void
|
||||
}
|
||||
|
||||
; Store 3 x v8f16 with interleaving
|
||||
define void @store_interleave_128_3(<8 x half>* %a, <8 x half> %b, <8 x half> %c, <8 x half> %d) #0 {
|
||||
; CHECK-LABEL: store_interleave_128_3:
|
||||
; CHECK: st3 { v0.8h, v1.8h, v2.8h }, [x0]
|
||||
entry:
|
||||
tail call void @llvm.aarch64.neon.st3.v8f16.p0v8f16(<8 x half> %b, <8 x half> %c, <8 x half> %d, <8 x half>* %a)
|
||||
ret void
|
||||
}
|
||||
|
||||
; Store 8 x v8f16 with interleaving
|
||||
define void @store_interleave_128_4(<8 x half>* %a, <8 x half> %b, <8 x half> %c, <8 x half> %d, <8 x half> %e) #0 {
|
||||
; CHECK-LABEL: store_interleave_128_4:
|
||||
; CHECK: st4 { v0.8h, v1.8h, v2.8h, v3.8h }, [x0]
|
||||
entry:
|
||||
tail call void @llvm.aarch64.neon.st4.v8f16.p0v8f16(<8 x half> %b, <8 x half> %c, <8 x half> %d, <8 x half> %e, <8 x half>* %a)
|
||||
ret void
|
||||
}
|
||||
|
||||
; NEON intrinsics - duplicating loads
|
||||
declare { <4 x half>, <4 x half> } @llvm.aarch64.neon.ld2r.v4f16.p0f16(half*)
|
||||
declare { <4 x half>, <4 x half>, <4 x half> } @llvm.aarch64.neon.ld3r.v4f16.p0f16(half*)
|
||||
declare { <4 x half>, <4 x half>, <4 x half>, <4 x half> } @llvm.aarch64.neon.ld4r.v4f16.p0f16(half*)
|
||||
declare { <8 x half>, <8 x half> } @llvm.aarch64.neon.ld2r.v8f16.p0f16(half*)
|
||||
declare { <8 x half>, <8 x half>, <8 x half> } @llvm.aarch64.neon.ld3r.v8f16.p0f16(half*)
|
||||
declare { <8 x half>, <8 x half>, <8 x half>, <8 x half> } @llvm.aarch64.neon.ld4r.v8f16.p0f16(half*)
|
||||
|
||||
; Load 2 x v4f16 with duplication
|
||||
define { <4 x half>, <4 x half> } @load_dup_64_2(half* %a) #0 {
|
||||
; CHECK-LABEL: load_dup_64_2:
|
||||
; CHECK: ld2r { v0.4h, v1.4h }, [x0]
|
||||
entry:
|
||||
%0 = tail call { <4 x half>, <4 x half> } @llvm.aarch64.neon.ld2r.v4f16.p0f16(half* %a)
|
||||
ret { <4 x half>, <4 x half> } %0
|
||||
}
|
||||
|
||||
; Load 3 x v4f16 with duplication
|
||||
define { <4 x half>, <4 x half>, <4 x half> } @load_dup_64_3(half* %a) #0 {
|
||||
; CHECK-LABEL: load_dup_64_3:
|
||||
; CHECK: ld3r { v0.4h, v1.4h, v2.4h }, [x0]
|
||||
entry:
|
||||
%0 = tail call { <4 x half>, <4 x half>, <4 x half> } @llvm.aarch64.neon.ld3r.v4f16.p0f16(half* %a)
|
||||
ret { <4 x half>, <4 x half>, <4 x half> } %0
|
||||
}
|
||||
|
||||
; Load 4 x v4f16 with duplication
|
||||
define { <4 x half>, <4 x half>, <4 x half>, <4 x half> } @load_dup_64_4(half* %a) #0 {
|
||||
; CHECK-LABEL: load_dup_64_4:
|
||||
; CHECK: ld4r { v0.4h, v1.4h, v2.4h, v3.4h }, [x0]
|
||||
entry:
|
||||
%0 = tail call { <4 x half>, <4 x half>, <4 x half>, <4 x half> } @llvm.aarch64.neon.ld4r.v4f16.p0f16(half* %a)
|
||||
ret { <4 x half>, <4 x half>, <4 x half>, <4 x half> } %0
|
||||
}
|
||||
|
||||
; Load 2 x v8f16 with duplication
|
||||
define { <8 x half>, <8 x half> } @load_dup_128_2(half* %a) #0 {
|
||||
; CHECK-LABEL: load_dup_128_2:
|
||||
; CHECK: ld2r { v0.8h, v1.8h }, [x0]
|
||||
entry:
|
||||
%0 = tail call { <8 x half>, <8 x half> } @llvm.aarch64.neon.ld2r.v8f16.p0f16(half* %a)
|
||||
ret { <8 x half>, <8 x half> } %0
|
||||
}
|
||||
|
||||
; Load 3 x v8f16 with duplication
|
||||
define { <8 x half>, <8 x half>, <8 x half> } @load_dup_128_3(half* %a) #0 {
|
||||
; CHECK-LABEL: load_dup_128_3:
|
||||
; CHECK: ld3r { v0.8h, v1.8h, v2.8h }, [x0]
|
||||
entry:
|
||||
%0 = tail call { <8 x half>, <8 x half>, <8 x half> } @llvm.aarch64.neon.ld3r.v8f16.p0f16(half* %a)
|
||||
ret { <8 x half>, <8 x half>, <8 x half> } %0
|
||||
}
|
||||
|
||||
; Load 8 x v8f16 with duplication
|
||||
define { <8 x half>, <8 x half>, <8 x half>, <8 x half> } @load_dup_128_4(half* %a) #0 {
|
||||
; CHECK-LABEL: load_dup_128_4:
|
||||
; CHECK: ld4r { v0.8h, v1.8h, v2.8h, v3.8h }, [x0]
|
||||
entry:
|
||||
%0 = tail call { <8 x half>, <8 x half>, <8 x half>, <8 x half> } @llvm.aarch64.neon.ld4r.v8f16.p0f16(half* %a)
|
||||
ret { <8 x half>, <8 x half>, <8 x half>, <8 x half> } %0
|
||||
}
|
||||
|
||||
|
||||
; NEON intrinsics - loads and stores to/from one lane
|
||||
declare { <4 x half>, <4 x half> } @llvm.aarch64.neon.ld2lane.v4f16.p0f16(<4 x half>, <4 x half>, i64, half*)
|
||||
declare { <4 x half>, <4 x half>, <4 x half> } @llvm.aarch64.neon.ld3lane.v4f16.p0f16(<4 x half>, <4 x half>, <4 x half>, i64, half*)
|
||||
declare { <4 x half>, <4 x half>, <4 x half>, <4 x half> } @llvm.aarch64.neon.ld4lane.v4f16.p0f16(<4 x half>, <4 x half>, <4 x half>, <4 x half>, i64, half*)
|
||||
declare void @llvm.aarch64.neon.st2lane.v4f16.p0f16(<4 x half>, <4 x half>, i64, half*)
|
||||
declare void @llvm.aarch64.neon.st3lane.v4f16.p0f16(<4 x half>, <4 x half>, <4 x half>, i64, half*)
|
||||
declare void @llvm.aarch64.neon.st4lane.v4f16.p0f16(<4 x half>, <4 x half>, <4 x half>, <4 x half>, i64, half*)
|
||||
declare { <8 x half>, <8 x half> } @llvm.aarch64.neon.ld2lane.v8f16.p0f16(<8 x half>, <8 x half>, i64, half*)
|
||||
declare { <8 x half>, <8 x half>, <8 x half> } @llvm.aarch64.neon.ld3lane.v8f16.p0f16(<8 x half>, <8 x half>, <8 x half>, i64, half*)
|
||||
declare { <8 x half>, <8 x half>, <8 x half>, <8 x half> } @llvm.aarch64.neon.ld4lane.v8f16.p0f16(<8 x half>, <8 x half>, <8 x half>, <8 x half>, i64, half*)
|
||||
declare void @llvm.aarch64.neon.st2lane.v8f16.p0f16(<8 x half>, <8 x half>, i64, half*)
|
||||
declare void @llvm.aarch64.neon.st3lane.v8f16.p0f16(<8 x half>, <8 x half>, <8 x half>, i64, half*)
|
||||
declare void @llvm.aarch64.neon.st4lane.v8f16.p0f16(<8 x half>, <8 x half>, <8 x half>, <8 x half>, i64, half*)
|
||||
|
||||
; Load one lane of 2 x v4f16
|
||||
define { <4 x half>, <4 x half> } @load_lane_64_2(half* %a, <4 x half> %b, <4 x half> %c) #0 {
|
||||
; CHECK-LABEL: load_lane_64_2:
|
||||
; CHECK: ld2 { v0.h, v1.h }[2], [x0]
|
||||
entry:
|
||||
%0 = tail call { <4 x half>, <4 x half> } @llvm.aarch64.neon.ld2lane.v4f16.p0f16(<4 x half> %b, <4 x half> %c, i64 2, half* %a)
|
||||
ret { <4 x half>, <4 x half> } %0
|
||||
}
|
||||
|
||||
; Load one lane of 3 x v4f16
|
||||
define { <4 x half>, <4 x half>, <4 x half> } @load_lane_64_3(half* %a, <4 x half> %b, <4 x half> %c, <4 x half> %d) #0 {
|
||||
; CHECK-LABEL: load_lane_64_3:
|
||||
; CHECK: ld3 { v0.h, v1.h, v2.h }[2], [x0]
|
||||
entry:
|
||||
%0 = tail call { <4 x half>, <4 x half>, <4 x half> } @llvm.aarch64.neon.ld3lane.v4f16.p0f16(<4 x half> %b, <4 x half> %c, <4 x half> %d, i64 2, half* %a)
|
||||
ret { <4 x half>, <4 x half>, <4 x half> } %0
|
||||
}
|
||||
|
||||
; Load one lane of 4 x v4f16
|
||||
define { <4 x half>, <4 x half>, <4 x half>, <4 x half> } @load_lane_64_4(half* %a, <4 x half> %b, <4 x half> %c, <4 x half> %d, <4 x half> %e) #0 {
|
||||
; CHECK-LABEL: load_lane_64_4:
|
||||
; CHECK: ld4 { v0.h, v1.h, v2.h, v3.h }[2], [x0]
|
||||
entry:
|
||||
%0 = tail call { <4 x half>, <4 x half>, <4 x half>, <4 x half> } @llvm.aarch64.neon.ld4lane.v4f16.p0f16(<4 x half> %b, <4 x half> %c, <4 x half> %d, <4 x half> %e, i64 2, half* %a)
|
||||
ret { <4 x half>, <4 x half>, <4 x half>, <4 x half> } %0
|
||||
}
|
||||
|
||||
; Store one lane of 2 x v4f16
|
||||
define void @store_lane_64_2(half* %a, <4 x half> %b, <4 x half> %c) #0 {
|
||||
; CHECK-LABEL: store_lane_64_2:
|
||||
; CHECK: st2 { v0.h, v1.h }[2], [x0]
|
||||
entry:
|
||||
tail call void @llvm.aarch64.neon.st2lane.v4f16.p0f16(<4 x half> %b, <4 x half> %c, i64 2, half* %a)
|
||||
ret void
|
||||
}
|
||||
|
||||
; Store one lane of 3 x v4f16
|
||||
define void @store_lane_64_3(half* %a, <4 x half> %b, <4 x half> %c, <4 x half> %d) #0 {
|
||||
; CHECK-LABEL: store_lane_64_3:
|
||||
; CHECK: st3 { v0.h, v1.h, v2.h }[2], [x0]
|
||||
entry:
|
||||
tail call void @llvm.aarch64.neon.st3lane.v4f16.p0f16(<4 x half> %b, <4 x half> %c, <4 x half> %d, i64 2, half* %a)
|
||||
ret void
|
||||
}
|
||||
|
||||
; Store one lane of 4 x v4f16
|
||||
define void @store_lane_64_4(half* %a, <4 x half> %b, <4 x half> %c, <4 x half> %d, <4 x half> %e) #0 {
|
||||
; CHECK-LABEL: store_lane_64_4:
|
||||
; CHECK: st4 { v0.h, v1.h, v2.h, v3.h }[2], [x0]
|
||||
entry:
|
||||
tail call void @llvm.aarch64.neon.st4lane.v4f16.p0f16(<4 x half> %b, <4 x half> %c, <4 x half> %d, <4 x half> %e, i64 2, half* %a)
|
||||
ret void
|
||||
}
|
||||
|
||||
; Load one lane of 2 x v8f16
|
||||
define { <8 x half>, <8 x half> } @load_lane_128_2(half* %a, <8 x half> %b, <8 x half> %c) #0 {
|
||||
; CHECK-LABEL: load_lane_128_2:
|
||||
; CHECK: ld2 { v0.h, v1.h }[2], [x0]
|
||||
entry:
|
||||
%0 = tail call { <8 x half>, <8 x half> } @llvm.aarch64.neon.ld2lane.v8f16.p0f16(<8 x half> %b, <8 x half> %c, i64 2, half* %a)
|
||||
ret { <8 x half>, <8 x half> } %0
|
||||
}
|
||||
|
||||
; Load one lane of 3 x v8f16
|
||||
define { <8 x half>, <8 x half>, <8 x half> } @load_lane_128_3(half* %a, <8 x half> %b, <8 x half> %c, <8 x half> %d) #0 {
|
||||
; CHECK-LABEL: load_lane_128_3:
|
||||
; CHECK: ld3 { v0.h, v1.h, v2.h }[2], [x0]
|
||||
entry:
|
||||
%0 = tail call { <8 x half>, <8 x half>, <8 x half> } @llvm.aarch64.neon.ld3lane.v8f16.p0f16(<8 x half> %b, <8 x half> %c, <8 x half> %d, i64 2, half* %a)
|
||||
ret { <8 x half>, <8 x half>, <8 x half> } %0
|
||||
}
|
||||
|
||||
; Load one lane of 8 x v8f16
|
||||
define { <8 x half>, <8 x half>, <8 x half>, <8 x half> } @load_lane_128_4(half* %a, <8 x half> %b, <8 x half> %c, <8 x half> %d, <8 x half> %e) #0 {
|
||||
; CHECK-LABEL: load_lane_128_4:
|
||||
; CHECK: ld4 { v0.h, v1.h, v2.h, v3.h }[2], [x0]
|
||||
entry:
|
||||
%0 = tail call { <8 x half>, <8 x half>, <8 x half>, <8 x half> } @llvm.aarch64.neon.ld4lane.v8f16.p0f16(<8 x half> %b, <8 x half> %c, <8 x half> %d, <8 x half> %e, i64 2, half* %a)
|
||||
ret { <8 x half>, <8 x half>, <8 x half>, <8 x half> } %0
|
||||
}
|
||||
|
||||
; Store one lane of 2 x v8f16
|
||||
define void @store_lane_128_2(half* %a, <8 x half> %b, <8 x half> %c) #0 {
|
||||
; CHECK-LABEL: store_lane_128_2:
|
||||
; CHECK: st2 { v0.h, v1.h }[2], [x0]
|
||||
entry:
|
||||
tail call void @llvm.aarch64.neon.st2lane.v8f16.p0f16(<8 x half> %b, <8 x half> %c, i64 2, half* %a)
|
||||
ret void
|
||||
}
|
||||
|
||||
; Store one lane of 3 x v8f16
|
||||
define void @store_lane_128_3(half* %a, <8 x half> %b, <8 x half> %c, <8 x half> %d) #0 {
|
||||
; CHECK-LABEL: store_lane_128_3:
|
||||
; CHECK: st3 { v0.h, v1.h, v2.h }[2], [x0]
|
||||
entry:
|
||||
tail call void @llvm.aarch64.neon.st3lane.v8f16.p0f16(<8 x half> %b, <8 x half> %c, <8 x half> %d, i64 2, half* %a)
|
||||
ret void
|
||||
}
|
||||
|
||||
; Store one lane of 8 x v8f16
|
||||
define void @store_lane_128_4(half* %a, <8 x half> %b, <8 x half> %c, <8 x half> %d, <8 x half> %e) #0 {
|
||||
; CHECK-LABEL: store_lane_128_4:
|
||||
; CHECK: st4 { v0.h, v1.h, v2.h, v3.h }[2], [x0]
|
||||
entry:
|
||||
tail call void @llvm.aarch64.neon.st4lane.v8f16.p0f16(<8 x half> %b, <8 x half> %c, <8 x half> %d, <8 x half> %e, i64 2, half* %a)
|
||||
ret void
|
||||
}
|
||||
|
||||
; NEON intrinsics - load/store without interleaving
|
||||
declare { <4 x half>, <4 x half> } @llvm.aarch64.neon.ld1x2.v4f16.p0v4f16(<4 x half>*)
|
||||
declare { <4 x half>, <4 x half>, <4 x half> } @llvm.aarch64.neon.ld1x3.v4f16.p0v4f16(<4 x half>*)
|
||||
declare { <4 x half>, <4 x half>, <4 x half>, <4 x half> } @llvm.aarch64.neon.ld1x4.v4f16.p0v4f16(<4 x half>*)
|
||||
declare void @llvm.aarch64.neon.st1x2.v4f16.p0v4f16(<4 x half>, <4 x half>, <4 x half>*)
|
||||
declare void @llvm.aarch64.neon.st1x3.v4f16.p0v4f16(<4 x half>, <4 x half>, <4 x half>, <4 x half>*)
|
||||
declare void @llvm.aarch64.neon.st1x4.v4f16.p0v4f16(<4 x half>, <4 x half>, <4 x half>, <4 x half>, <4 x half>*)
|
||||
declare { <8 x half>, <8 x half> } @llvm.aarch64.neon.ld1x2.v8f16.p0v8f16(<8 x half>*)
|
||||
declare { <8 x half>, <8 x half>, <8 x half> } @llvm.aarch64.neon.ld1x3.v8f16.p0v8f16(<8 x half>*)
|
||||
declare { <8 x half>, <8 x half>, <8 x half>, <8 x half> } @llvm.aarch64.neon.ld1x4.v8f16.p0v8f16(<8 x half>*)
|
||||
declare void @llvm.aarch64.neon.st1x2.v8f16.p0v8f16(<8 x half>, <8 x half>, <8 x half>*)
|
||||
declare void @llvm.aarch64.neon.st1x3.v8f16.p0v8f16(<8 x half>, <8 x half>, <8 x half>, <8 x half>*)
|
||||
declare void @llvm.aarch64.neon.st1x4.v8f16.p0v8f16(<8 x half>, <8 x half>, <8 x half>, <8 x half>, <8 x half>*)
|
||||
|
||||
; Load 2 x v4f16 without de-interleaving
|
||||
define { <4 x half>, <4 x half> } @load_64_2(<4 x half>* %a) #0 {
|
||||
; CHECK-LABEL: load_64_2:
|
||||
; CHECK: ld1 { v0.4h, v1.4h }, [x0]
|
||||
entry:
|
||||
%0 = tail call { <4 x half>, <4 x half> } @llvm.aarch64.neon.ld1x2.v4f16.p0v4f16(<4 x half>* %a)
|
||||
ret { <4 x half>, <4 x half> } %0
|
||||
}
|
||||
|
||||
; Load 3 x v4f16 without de-interleaving
|
||||
define { <4 x half>, <4 x half>, <4 x half> } @load_64_3(<4 x half>* %a) #0 {
|
||||
; CHECK-LABEL: load_64_3:
|
||||
; CHECK: ld1 { v0.4h, v1.4h, v2.4h }, [x0]
|
||||
entry:
|
||||
%0 = tail call { <4 x half>, <4 x half>, <4 x half> } @llvm.aarch64.neon.ld1x3.v4f16.p0v4f16(<4 x half>* %a)
|
||||
ret { <4 x half>, <4 x half>, <4 x half> } %0
|
||||
}
|
||||
|
||||
; Load 4 x v4f16 without de-interleaving
|
||||
define { <4 x half>, <4 x half>, <4 x half>, <4 x half> } @load_64_4(<4 x half>* %a) #0 {
|
||||
; CHECK-LABEL: load_64_4:
|
||||
; CHECK: ld1 { v0.4h, v1.4h, v2.4h, v3.4h }, [x0]
|
||||
entry:
|
||||
%0 = tail call { <4 x half>, <4 x half>, <4 x half>, <4 x half> } @llvm.aarch64.neon.ld1x4.v4f16.p0v4f16(<4 x half>* %a)
|
||||
ret { <4 x half>, <4 x half>, <4 x half>, <4 x half> } %0
|
||||
}
|
||||
|
||||
; Store 2 x v4f16 without interleaving
|
||||
define void @store_64_2(<4 x half>* %a, <4 x half> %b, <4 x half> %c) #0 {
|
||||
; CHECK-LABEL: store_64_2:
|
||||
; CHECK: st1 { v0.4h, v1.4h }, [x0]
|
||||
entry:
|
||||
tail call void @llvm.aarch64.neon.st1x2.v4f16.p0v4f16(<4 x half> %b, <4 x half> %c, <4 x half>* %a)
|
||||
ret void
|
||||
}
|
||||
|
||||
; Store 3 x v4f16 without interleaving
|
||||
define void @store_64_3(<4 x half>* %a, <4 x half> %b, <4 x half> %c, <4 x half> %d) #0 {
|
||||
; CHECK-LABEL: store_64_3:
|
||||
; CHECK: st1 { v0.4h, v1.4h, v2.4h }, [x0]
|
||||
entry:
|
||||
tail call void @llvm.aarch64.neon.st1x3.v4f16.p0v4f16(<4 x half> %b, <4 x half> %c, <4 x half> %d, <4 x half>* %a)
|
||||
ret void
|
||||
}
|
||||
|
||||
; Store 4 x v4f16 without interleaving
|
||||
define void @store_64_4(<4 x half>* %a, <4 x half> %b, <4 x half> %c, <4 x half> %d, <4 x half> %e) #0 {
|
||||
; CHECK-LABEL: store_64_4:
|
||||
; CHECK: st1 { v0.4h, v1.4h, v2.4h, v3.4h }, [x0]
|
||||
entry:
|
||||
tail call void @llvm.aarch64.neon.st1x4.v4f16.p0v4f16(<4 x half> %b, <4 x half> %c, <4 x half> %d, <4 x half> %e, <4 x half>* %a)
|
||||
ret void
|
||||
}
|
||||
|
||||
; Load 2 x v8f16 without de-interleaving
|
||||
define { <8 x half>, <8 x half> } @load_128_2(<8 x half>* %a) #0 {
|
||||
; CHECK-LABEL: load_128_2:
|
||||
; CHECK: ld1 { v0.8h, v1.8h }, [x0]
|
||||
entry:
|
||||
%0 = tail call { <8 x half>, <8 x half> } @llvm.aarch64.neon.ld1x2.v8f16.p0v8f16(<8 x half>* %a)
|
||||
ret { <8 x half>, <8 x half> } %0
|
||||
}
|
||||
|
||||
; Load 3 x v8f16 without de-interleaving
|
||||
define { <8 x half>, <8 x half>, <8 x half> } @load_128_3(<8 x half>* %a) #0 {
|
||||
; CHECK-LABEL: load_128_3:
|
||||
; CHECK: ld1 { v0.8h, v1.8h, v2.8h }, [x0]
|
||||
entry:
|
||||
%0 = tail call { <8 x half>, <8 x half>, <8 x half> } @llvm.aarch64.neon.ld1x3.v8f16.p0v8f16(<8 x half>* %a)
|
||||
ret { <8 x half>, <8 x half>, <8 x half> } %0
|
||||
}
|
||||
|
||||
; Load 8 x v8f16 without de-interleaving
|
||||
define { <8 x half>, <8 x half>, <8 x half>, <8 x half> } @load_128_4(<8 x half>* %a) #0 {
|
||||
; CHECK-LABEL: load_128_4:
|
||||
; CHECK: ld1 { v0.8h, v1.8h, v2.8h, v3.8h }, [x0]
|
||||
entry:
|
||||
%0 = tail call { <8 x half>, <8 x half>, <8 x half>, <8 x half> } @llvm.aarch64.neon.ld1x4.v8f16.p0v8f16(<8 x half>* %a)
|
||||
ret { <8 x half>, <8 x half>, <8 x half>, <8 x half> } %0
|
||||
}
|
||||
|
||||
; Store 2 x v8f16 without interleaving
|
||||
define void @store_128_2(<8 x half>* %a, <8 x half> %b, <8 x half> %c) #0 {
|
||||
; CHECK-LABEL: store_128_2:
|
||||
; CHECK: st1 { v0.8h, v1.8h }, [x0]
|
||||
entry:
|
||||
tail call void @llvm.aarch64.neon.st1x2.v8f16.p0v8f16(<8 x half> %b, <8 x half> %c, <8 x half>* %a)
|
||||
ret void
|
||||
}
|
||||
|
||||
; Store 3 x v8f16 without interleaving
|
||||
define void @store_128_3(<8 x half>* %a, <8 x half> %b, <8 x half> %c, <8 x half> %d) #0 {
|
||||
; CHECK-LABEL: store_128_3:
|
||||
; CHECK: st1 { v0.8h, v1.8h, v2.8h }, [x0]
|
||||
entry:
|
||||
tail call void @llvm.aarch64.neon.st1x3.v8f16.p0v8f16(<8 x half> %b, <8 x half> %c, <8 x half> %d, <8 x half>* %a)
|
||||
ret void
|
||||
}
|
||||
|
||||
; Store 8 x v8f16 without interleaving
|
||||
define void @store_128_4(<8 x half>* %a, <8 x half> %b, <8 x half> %c, <8 x half> %d, <8 x half> %e) #0 {
|
||||
; CHECK-LABEL: store_128_4:
|
||||
; CHECK: st1 { v0.8h, v1.8h, v2.8h, v3.8h }, [x0]
|
||||
entry:
|
||||
tail call void @llvm.aarch64.neon.st1x4.v8f16.p0v8f16(<8 x half> %b, <8 x half> %c, <8 x half> %d, <8 x half> %e, <8 x half>* %a)
|
||||
ret void
|
||||
}
|
301
test/CodeGen/AArch64/fp16-vector-shuffle.ll
Normal file
301
test/CodeGen/AArch64/fp16-vector-shuffle.ll
Normal file
@ -0,0 +1,301 @@
|
||||
; RUN: llc < %s -mtriple=aarch64-none-eabi | FileCheck %s
|
||||
|
||||
; float16x4_t select_64(float16x4_t a, float16x4_t b, uint16x4_t c) { return vbsl_u16(c, a, b); }
|
||||
define <4 x half> @select_64(<4 x half> %a, <4 x half> %b, <4 x i16> %c) #0 {
|
||||
; CHECK-LABEL: select_64:
|
||||
; CHECK: bsl
|
||||
entry:
|
||||
%0 = bitcast <4 x half> %a to <4 x i16>
|
||||
%1 = bitcast <4 x half> %b to <4 x i16>
|
||||
%vbsl3.i = and <4 x i16> %0, %c
|
||||
%2 = xor <4 x i16> %c, <i16 -1, i16 -1, i16 -1, i16 -1>
|
||||
%vbsl4.i = and <4 x i16> %1, %2
|
||||
%vbsl5.i = or <4 x i16> %vbsl3.i, %vbsl4.i
|
||||
%3 = bitcast <4 x i16> %vbsl5.i to <4 x half>
|
||||
ret <4 x half> %3
|
||||
}
|
||||
|
||||
; float16x8_t select_128(float16x8_t a, float16x8_t b, uint16x8_t c) { return vbslq_u16(c, a, b); }
|
||||
define <8 x half> @select_128(<8 x half> %a, <8 x half> %b, <8 x i16> %c) #0 {
|
||||
; CHECK-LABEL: select_128:
|
||||
; CHECK: bsl
|
||||
entry:
|
||||
%0 = bitcast <8 x half> %a to <8 x i16>
|
||||
%1 = bitcast <8 x half> %b to <8 x i16>
|
||||
%vbsl3.i = and <8 x i16> %0, %c
|
||||
%2 = xor <8 x i16> %c, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
|
||||
%vbsl4.i = and <8 x i16> %1, %2
|
||||
%vbsl5.i = or <8 x i16> %vbsl3.i, %vbsl4.i
|
||||
%3 = bitcast <8 x i16> %vbsl5.i to <8 x half>
|
||||
ret <8 x half> %3
|
||||
}
|
||||
|
||||
; float16x4_t lane_64_64(float16x4_t a, float16x4_t b) {
|
||||
; return vcopy_lane_s16(a, 1, b, 2);
|
||||
; }
|
||||
define <4 x half> @lane_64_64(<4 x half> %a, <4 x half> %b) #0 {
|
||||
; CHECK-LABEL: lane_64_64:
|
||||
; CHECK: ins
|
||||
entry:
|
||||
%0 = shufflevector <4 x half> %a, <4 x half> %b, <4 x i32> <i32 0, i32 6, i32 2, i32 3>
|
||||
ret <4 x half> %0
|
||||
}
|
||||
|
||||
; float16x8_t lane_128_64(float16x8_t a, float16x4_t b) {
|
||||
; return vcopyq_lane_s16(a, 1, b, 2);
|
||||
; }
|
||||
define <8 x half> @lane_128_64(<8 x half> %a, <4 x half> %b) #0 {
|
||||
; CHECK-LABEL: lane_128_64:
|
||||
; CHECK: ins
|
||||
entry:
|
||||
%0 = bitcast <4 x half> %b to <4 x i16>
|
||||
%vget_lane = extractelement <4 x i16> %0, i32 2
|
||||
%1 = bitcast <8 x half> %a to <8 x i16>
|
||||
%vset_lane = insertelement <8 x i16> %1, i16 %vget_lane, i32 1
|
||||
%2 = bitcast <8 x i16> %vset_lane to <8 x half>
|
||||
ret <8 x half> %2
|
||||
}
|
||||
|
||||
; float16x4_t lane_64_128(float16x4_t a, float16x8_t b) {
|
||||
; return vcopy_laneq_s16(a, 3, b, 5);
|
||||
; }
|
||||
define <4 x half> @lane_64_128(<4 x half> %a, <8 x half> %b) #0 {
|
||||
; CHECK-LABEL: lane_64_128:
|
||||
; CHECK: ins
|
||||
entry:
|
||||
%0 = bitcast <8 x half> %b to <8 x i16>
|
||||
%vgetq_lane = extractelement <8 x i16> %0, i32 5
|
||||
%1 = bitcast <4 x half> %a to <4 x i16>
|
||||
%vset_lane = insertelement <4 x i16> %1, i16 %vgetq_lane, i32 3
|
||||
%2 = bitcast <4 x i16> %vset_lane to <4 x half>
|
||||
ret <4 x half> %2
|
||||
}
|
||||
|
||||
; float16x8_t lane_128_128(float16x8_t a, float16x8_t b) {
|
||||
; return vcopyq_laneq_s16(a, 3, b, 5);
|
||||
; }
|
||||
define <8 x half> @lane_128_128(<8 x half> %a, <8 x half> %b) #0 {
|
||||
; CHECK-LABEL: lane_128_128:
|
||||
; CHECK: ins
|
||||
entry:
|
||||
%0 = shufflevector <8 x half> %a, <8 x half> %b, <8 x i32> <i32 0, i32 1, i32 2, i32 13, i32 4, i32 5, i32 6, i32 7>
|
||||
ret <8 x half> %0
|
||||
}
|
||||
|
||||
; float16x4_t ext_64(float16x4_t a, float16x4_t b) {
|
||||
; return vext_s16(a, b, 3);
|
||||
; }
|
||||
define <4 x half> @ext_64(<4 x half> %a, <4 x half> %b) #0 {
|
||||
; CHECK-LABEL: ext_64:
|
||||
; CHECK: ext
|
||||
entry:
|
||||
%0 = shufflevector <4 x half> %a, <4 x half> %b, <4 x i32> <i32 3, i32 4, i32 5, i32 6>
|
||||
ret <4 x half> %0
|
||||
}
|
||||
|
||||
; float16x8_t ext_128(float16x8_t a, float16x8_t b) {
|
||||
; return vextq_s16(a, b, 3);
|
||||
; }
|
||||
define <8 x half> @ext_128(<8 x half> %a, <8 x half> %b) #0 {
|
||||
; CHECK-LABEL: ext_128:
|
||||
; CHECK: ext
|
||||
entry:
|
||||
%0 = shufflevector <8 x half> %a, <8 x half> %b, <8 x i32> <i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10>
|
||||
ret <8 x half> %0
|
||||
}
|
||||
|
||||
; float16x4_t rev32_64(float16x4_t a) {
|
||||
; return vrev32_s16(a);
|
||||
; }
|
||||
define <4 x half> @rev32_64(<4 x half> %a) #0 {
|
||||
entry:
|
||||
; CHECK-LABEL: rev32_64:
|
||||
; CHECK: rev32
|
||||
%0 = shufflevector <4 x half> %a, <4 x half> undef, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
|
||||
ret <4 x half> %0
|
||||
}
|
||||
|
||||
; float16x4_t rev64_64(float16x4_t a) {
|
||||
; return vrev64_s16(a);
|
||||
; }
|
||||
define <4 x half> @rev64_64(<4 x half> %a) #0 {
|
||||
entry:
|
||||
; CHECK-LABEL: rev64_64:
|
||||
; CHECK: rev64
|
||||
%0 = shufflevector <4 x half> %a, <4 x half> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
|
||||
ret <4 x half> %0
|
||||
}
|
||||
|
||||
; float16x8_t rev32_128(float16x8_t a) {
|
||||
; return vrev32q_s16(a);
|
||||
; }
|
||||
define <8 x half> @rev32_128(<8 x half> %a) #0 {
|
||||
entry:
|
||||
; CHECK-LABEL: rev32_128:
|
||||
; CHECK: rev32
|
||||
%0 = shufflevector <8 x half> %a, <8 x half> undef, <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6>
|
||||
ret <8 x half> %0
|
||||
}
|
||||
|
||||
; float16x8_t rev64_128(float16x8_t a) {
|
||||
; return vrev64q_s16(a);
|
||||
; }
|
||||
define <8 x half> @rev64_128(<8 x half> %a) #0 {
|
||||
entry:
|
||||
; CHECK-LABEL: rev64_128:
|
||||
; CHECK: rev64
|
||||
%0 = shufflevector <8 x half> %a, <8 x half> undef, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
|
||||
ret <8 x half> %0
|
||||
}
|
||||
|
||||
; float16x4_t create_64(long long a) { return vcreate_f16(a); }
|
||||
define <4 x half> @create_64(i64 %a) #0 {
|
||||
; CHECK-LABEL: create_64:
|
||||
; CHECK: fmov
|
||||
entry:
|
||||
%0 = bitcast i64 %a to <4 x half>
|
||||
ret <4 x half> %0
|
||||
}
|
||||
|
||||
; float16x4_t dup_64(__fp16 a) { return vdup_n_f16(a); }
|
||||
define <4 x half> @dup_64(half %a) #0 {
|
||||
; CHECK-LABEL: dup_64:
|
||||
; CHECK: dup
|
||||
entry:
|
||||
%vecinit = insertelement <4 x half> undef, half %a, i32 0
|
||||
%vecinit1 = insertelement <4 x half> %vecinit, half %a, i32 1
|
||||
%vecinit2 = insertelement <4 x half> %vecinit1, half %a, i32 2
|
||||
%vecinit3 = insertelement <4 x half> %vecinit2, half %a, i32 3
|
||||
ret <4 x half> %vecinit3
|
||||
}
|
||||
|
||||
; float16x8_t dup_128(__fp16 a) { return vdupq_n_f16(a); }
|
||||
define <8 x half> @dup_128(half %a) #0 {
|
||||
entry:
|
||||
; CHECK-LABEL: dup_128:
|
||||
; CHECK: dup
|
||||
%vecinit = insertelement <8 x half> undef, half %a, i32 0
|
||||
%vecinit1 = insertelement <8 x half> %vecinit, half %a, i32 1
|
||||
%vecinit2 = insertelement <8 x half> %vecinit1, half %a, i32 2
|
||||
%vecinit3 = insertelement <8 x half> %vecinit2, half %a, i32 3
|
||||
%vecinit4 = insertelement <8 x half> %vecinit3, half %a, i32 4
|
||||
%vecinit5 = insertelement <8 x half> %vecinit4, half %a, i32 5
|
||||
%vecinit6 = insertelement <8 x half> %vecinit5, half %a, i32 6
|
||||
%vecinit7 = insertelement <8 x half> %vecinit6, half %a, i32 7
|
||||
ret <8 x half> %vecinit7
|
||||
}
|
||||
|
||||
; float16x4_t dup_lane_64(float16x4_t a) { return vdup_lane_f16(a, 2); }
|
||||
define <4 x half> @dup_lane_64(<4 x half> %a) #0 {
|
||||
entry:
|
||||
; CHECK-LABEL: dup_lane_64:
|
||||
; CHECK: dup
|
||||
%shuffle = shufflevector <4 x half> %a, <4 x half> undef, <4 x i32> <i32 2, i32 2, i32 2, i32 2>
|
||||
ret <4 x half> %shuffle
|
||||
}
|
||||
|
||||
; float16x8_t dup_lane_128(float16x4_t a) { return vdupq_lane_f16(a, 2); }
|
||||
define <8 x half> @dup_lane_128(<4 x half> %a) #0 {
|
||||
entry:
|
||||
; CHECK-LABEL: dup_lane_128:
|
||||
; CHECK: dup
|
||||
%shuffle = shufflevector <4 x half> %a, <4 x half> undef, <8 x i32> <i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2>
|
||||
ret <8 x half> %shuffle
|
||||
}
|
||||
|
||||
; float16x4_t dup_laneq_64(float16x8_t a) { return vdup_laneq_f16(a, 2); }
|
||||
define <4 x half> @dup_laneq_64(<8 x half> %a) #0 {
|
||||
entry:
|
||||
; CHECK-LABEL: dup_laneq_64:
|
||||
; CHECK: dup
|
||||
%shuffle = shufflevector <8 x half> %a, <8 x half> undef, <4 x i32> <i32 2, i32 2, i32 2, i32 2>
|
||||
ret <4 x half> %shuffle
|
||||
}
|
||||
|
||||
; float16x8_t dup_laneq_128(float16x8_t a) { return vdupq_laneq_f16(a, 2); }
|
||||
define <8 x half> @dup_laneq_128(<8 x half> %a) #0 {
|
||||
entry:
|
||||
; CHECK-LABEL: dup_laneq_128:
|
||||
; CHECK: dup
|
||||
%shuffle = shufflevector <8 x half> %a, <8 x half> undef, <8 x i32> <i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2>
|
||||
ret <8 x half> %shuffle
|
||||
}
|
||||
|
||||
; float16x8_t vcombine(float16x4_t a, float16x4_t b) { return vcombine_f16(a, b); }
|
||||
define <8 x half> @vcombine(<4 x half> %a, <4 x half> %b) #0 {
|
||||
entry:
|
||||
; CHECK-LABEL: vcombine:
|
||||
; CHECK: ins
|
||||
%shuffle.i = shufflevector <4 x half> %a, <4 x half> %b, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
|
||||
ret <8 x half> %shuffle.i
|
||||
}
|
||||
|
||||
; float16x4_t get_high(float16x8_t a) { return vget_high_f16(a); }
|
||||
define <4 x half> @get_high(<8 x half> %a) #0 {
|
||||
; CHECK-LABEL: get_high:
|
||||
; CHECK: ext
|
||||
entry:
|
||||
%shuffle.i = shufflevector <8 x half> %a, <8 x half> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
|
||||
ret <4 x half> %shuffle.i
|
||||
}
|
||||
|
||||
|
||||
; float16x4_t get_low(float16x8_t a) { return vget_low_f16(a); }
|
||||
define <4 x half> @get_low(<8 x half> %a) #0 {
|
||||
; CHECK-LABEL: get_low:
|
||||
; CHECK-NOT: ext
|
||||
entry:
|
||||
%shuffle.i = shufflevector <8 x half> %a, <8 x half> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
||||
ret <4 x half> %shuffle.i
|
||||
}
|
||||
|
||||
; float16x4_t set_lane_64(float16x4_t a, __fp16 b) { return vset_lane_f16(b, a, 2); }
|
||||
define <4 x half> @set_lane_64(<4 x half> %a, half %b) #0 {
|
||||
; CHECK-LABEL: set_lane_64:
|
||||
; CHECK: fmov
|
||||
; CHECK: ins
|
||||
entry:
|
||||
%0 = bitcast half %b to i16
|
||||
%1 = bitcast <4 x half> %a to <4 x i16>
|
||||
%vset_lane = insertelement <4 x i16> %1, i16 %0, i32 2
|
||||
%2 = bitcast <4 x i16> %vset_lane to <4 x half>
|
||||
ret <4 x half> %2
|
||||
}
|
||||
|
||||
|
||||
; float16x8_t set_lane_128(float16x8_t a, __fp16 b) { return vsetq_lane_f16(b, a, 2); }
|
||||
define <8 x half> @set_lane_128(<8 x half> %a, half %b) #0 {
|
||||
; CHECK-LABEL: set_lane_128:
|
||||
; CHECK: fmov
|
||||
; CHECK: ins
|
||||
entry:
|
||||
%0 = bitcast half %b to i16
|
||||
%1 = bitcast <8 x half> %a to <8 x i16>
|
||||
%vset_lane = insertelement <8 x i16> %1, i16 %0, i32 2
|
||||
%2 = bitcast <8 x i16> %vset_lane to <8 x half>
|
||||
ret <8 x half> %2
|
||||
}
|
||||
|
||||
; __fp16 get_lane_64(float16x4_t a) { return vget_lane_f16(a, 2); }
|
||||
define half @get_lane_64(<4 x half> %a) #0 {
|
||||
; CHECK-LABEL: get_lane_64:
|
||||
; CHECK: umov
|
||||
; CHECK: fmov
|
||||
entry:
|
||||
%0 = bitcast <4 x half> %a to <4 x i16>
|
||||
%vget_lane = extractelement <4 x i16> %0, i32 2
|
||||
%1 = bitcast i16 %vget_lane to half
|
||||
ret half %1
|
||||
}
|
||||
|
||||
; __fp16 get_lane_128(float16x8_t a) { return vgetq_lane_f16(a, 2); }
|
||||
define half @get_lane_128(<8 x half> %a) #0 {
|
||||
; CHECK-LABEL: get_lane_128:
|
||||
; CHECK: umov
|
||||
; CHECK: fmov
|
||||
entry:
|
||||
%0 = bitcast <8 x half> %a to <8 x i16>
|
||||
%vgetq_lane = extractelement <8 x i16> %0, i32 2
|
||||
%1 = bitcast i16 %vgetq_lane to half
|
||||
ret half %1
|
||||
}
|
Loading…
Reference in New Issue
Block a user