ARM64: add 128-bit MLA operations to the custom selection code.

Without this change, the llvm_unreachable kicked in. The code pattern
being spotted is rather non-canonical for 128-bit MLAs, but it can
happen and there's no point in generating sub-optimal code for it just
because it looks odd.

Should fix PR19332.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@205615 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Tim Northover 2014-04-04 09:03:02 +00:00
parent a669270654
commit 604dff27c9
2 changed files with 35 additions and 3 deletions

View File

@ -431,9 +431,9 @@ static bool checkV64LaneV128(SDValue Op0, SDValue Op1, SDValue &StdOp,
return true;
}
/// SelectMLAV64LaneV128 - ARM64 supports 64-bit vector MLAs (v4i16 and v2i32)
/// where one multiplicand is a lane in the upper half of a 128-bit vector.
/// Recognize and select this so that we don't emit unnecessary lane extracts.
/// SelectMLAV64LaneV128 - ARM64 supports vector MLAs where one multiplicand is
/// a lane in the upper half of a 128-bit vector. Recognize and select this so
/// that we don't emit unnecessary lane extracts.
SDNode *ARM64DAGToDAGISel::SelectMLAV64LaneV128(SDNode *N) {
SDValue Op0 = N->getOperand(0);
SDValue Op1 = N->getOperand(1);
@ -463,9 +463,15 @@ SDNode *ARM64DAGToDAGISel::SelectMLAV64LaneV128(SDNode *N) {
case MVT::v4i16:
MLAOpc = ARM64::MLAv4i16_indexed;
break;
case MVT::v8i16:
MLAOpc = ARM64::MLAv8i16_indexed;
break;
case MVT::v2i32:
MLAOpc = ARM64::MLAv2i32_indexed;
break;
case MVT::v4i32:
MLAOpc = ARM64::MLAv4i32_indexed;
break;
}
return CurDAG->getMachineNode(MLAOpc, SDLoc(N), N->getValueType(0), Ops);

View File

@ -1598,6 +1598,32 @@ entry:
ret <2 x i32> %add
}
define <8 x i16> @not_really_vmlaq_laneq_s16_test(<8 x i16> %a, <8 x i16> %b, <8 x i16> %c) nounwind readnone ssp {
entry:
; CHECK: not_really_vmlaq_laneq_s16_test
; CHECK-NOT: ext
; CHECK: mla.8h v0, v1, v2[5]
; CHECK-NEXT: ret
%shuffle1 = shufflevector <8 x i16> %c, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
%shuffle2 = shufflevector <4 x i16> %shuffle1, <4 x i16> undef, <8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
%mul = mul <8 x i16> %shuffle2, %b
%add = add <8 x i16> %mul, %a
ret <8 x i16> %add
}
define <4 x i32> @not_really_vmlaq_laneq_s32_test(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) nounwind readnone ssp {
entry:
; CHECK: not_really_vmlaq_laneq_s32_test
; CHECK-NOT: ext
; CHECK: mla.4s v0, v1, v2[3]
; CHECK-NEXT: ret
%shuffle1 = shufflevector <4 x i32> %c, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
%shuffle2 = shufflevector <2 x i32> %shuffle1, <2 x i32> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
%mul = mul <4 x i32> %shuffle2, %b
%add = add <4 x i32> %mul, %a
ret <4 x i32> %add
}
define <4 x i32> @vmull_laneq_s16_test(<4 x i16> %a, <8 x i16> %b) nounwind readnone ssp {
entry:
; CHECK: vmull_laneq_s16_test