[AArch64] Improve load/store optimizer to handle LDUR + LDR.

This patch allows the mixing of scaled and unscaled load/stores to form
load/store pairs.

This is a reapplication of r259812, which had an incorrect assert.  The
test_stur_str_no_assert() test is a reduced version of the issue hit in
the AArch64 self-host.

PR24465

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@260523 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Chad Rosier 2016-02-11 14:25:08 +00:00
parent a63bae5730
commit ac5172baad
2 changed files with 193 additions and 11 deletions

View File

@ -259,6 +259,10 @@ static bool isNarrowLoad(MachineInstr *MI) {
return isNarrowLoad(MI->getOpcode());
}
static bool isNarrowLoadOrStore(unsigned Opc) {
return isNarrowLoad(Opc) || isNarrowStore(Opc);
}
// Scaling factor for unscaled load or store.
static int getMemScale(MachineInstr *MI) {
switch (MI->getOpcode()) {
@ -825,10 +829,28 @@ AArch64LoadStoreOpt::mergePairedInsns(MachineBasicBlock::iterator I,
const MachineOperand &BaseRegOp =
MergeForward ? getLdStBaseOp(Paired) : getLdStBaseOp(I);
int Offset = getLdStOffsetOp(I).getImm();
int PairedOffset = getLdStOffsetOp(Paired).getImm();
bool PairedIsUnscaled = isUnscaledLdSt(Paired->getOpcode());
if (IsUnscaled != PairedIsUnscaled) {
// We're trying to pair instructions that differ in how they are scaled. If
// I is scaled then scale the offset of Paired accordingly. Otherwise, do
// the opposite (i.e., make Paired's offset unscaled).
int MemSize = getMemScale(Paired);
if (PairedIsUnscaled) {
// If the unscaled offset isn't a multiple of the MemSize, we can't
// pair the operations together.
assert(!(PairedOffset % getMemScale(Paired)) &&
"Offset should be a multiple of the stride!");
PairedOffset /= MemSize;
} else {
PairedOffset *= MemSize;
}
}
// Which register is Rt and which is Rt2 depends on the offset order.
MachineInstr *RtMI, *Rt2MI;
if (getLdStOffsetOp(I).getImm() ==
getLdStOffsetOp(Paired).getImm() + OffsetStride) {
if (Offset == PairedOffset + OffsetStride) {
RtMI = Paired;
Rt2MI = I;
// Here we swapped the assumption made for SExtIdx.
@ -841,10 +863,11 @@ AArch64LoadStoreOpt::mergePairedInsns(MachineBasicBlock::iterator I,
Rt2MI = Paired;
}
int OffsetImm = getLdStOffsetOp(RtMI).getImm();
// Handle Unscaled.
if (IsUnscaled) {
assert (!(OffsetImm % OffsetStride) && "Unscaled offset cannot be scaled.");
OffsetImm /= OffsetStride;
// Scale the immediate offset, if necessary.
if (isUnscaledLdSt(RtMI->getOpcode())) {
assert(!(OffsetImm % getMemScale(RtMI)) &&
"Unscaled offset cannot be scaled.");
OffsetImm /= getMemScale(RtMI);
}
// Construct the new instruction.
@ -1039,9 +1062,13 @@ static void trackRegDefsUses(const MachineInstr *MI, BitVector &ModifiedRegs,
static bool inBoundsForPair(bool IsUnscaled, int Offset, int OffsetStride) {
// Convert the byte-offset used by unscaled into an "element" offset used
// by the scaled pair load/store instructions.
if (IsUnscaled)
if (IsUnscaled) {
// If the byte-offset isn't a multiple of the stride, there's no point
// trying to match it.
if (Offset % OffsetStride)
return false;
Offset /= OffsetStride;
}
return Offset <= 63 && Offset >= -64;
}
@ -1148,7 +1175,21 @@ static bool canMergeOpc(unsigned OpcA, unsigned OpcB, LdStPairFlags &Flags) {
Flags.setSExtIdx(NonSExtOpc == (unsigned)OpcA ? 1 : 0);
return true;
}
return false;
// If the second instruction isn't even a load/store, bail out.
if (!PairIsValidLdStrOpc)
return false;
// FIXME: We don't support merging narrow loads/stores with mixed
// scaled/unscaled offsets.
if (isNarrowLoadOrStore(OpcA) || isNarrowLoadOrStore(OpcB))
return false;
// Try to match an unscaled load/store with a scaled load/store.
return isUnscaledLdSt(OpcA) != isUnscaledLdSt(OpcB) &&
getMatchingPairOpcode(OpcA) == getMatchingPairOpcode(OpcB);
// FIXME: Can we also match a mixed sext/zext unscaled/scaled pair?
}
/// Scan the instructions looking for a load/store that can be combined with the
@ -1204,6 +1245,23 @@ AArch64LoadStoreOpt::findMatchingInsn(MachineBasicBlock::iterator I,
// final offset must be in range.
unsigned MIBaseReg = getLdStBaseOp(MI).getReg();
int MIOffset = getLdStOffsetOp(MI).getImm();
bool MIIsUnscaled = isUnscaledLdSt(MI);
if (IsUnscaled != MIIsUnscaled) {
// We're trying to pair instructions that differ in how they are scaled.
// If FirstMI is scaled then scale the offset of MI accordingly.
// Otherwise, do the opposite (i.e., make MI's offset unscaled).
int MemSize = getMemScale(MI);
if (MIIsUnscaled) {
// If the unscaled offset isn't a multiple of the MemSize, we can't
// pair the operations together: bail and keep looking.
if (MIOffset % MemSize)
continue;
MIOffset /= MemSize;
} else {
MIOffset *= MemSize;
}
}
if (BaseReg == MIBaseReg && ((Offset == MIOffset + OffsetStride) ||
(Offset + OffsetStride == MIOffset))) {
int MinOffset = Offset < MIOffset ? Offset : MIOffset;
@ -1214,10 +1272,9 @@ AArch64LoadStoreOpt::findMatchingInsn(MachineBasicBlock::iterator I,
return E;
// If the resultant immediate offset of merging these instructions
// is out of range for a pairwise instruction, bail and keep looking.
bool MIIsUnscaled = isUnscaledLdSt(MI);
bool IsNarrowLoad = isNarrowLoad(MI->getOpcode());
if (!IsNarrowLoad &&
!inBoundsForPair(MIIsUnscaled, MinOffset, OffsetStride)) {
!inBoundsForPair(IsUnscaled, MinOffset, OffsetStride)) {
trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
MemInsns.push_back(MI);
continue;

View File

@ -0,0 +1,125 @@
; RUN: llc < %s -march=aarch64 -aarch64-neon-syntax=apple -aarch64-stp-suppress=false -verify-machineinstrs -asm-verbose=false | FileCheck %s
; CHECK-LABEL: test_strd_sturd:
; CHECK-NEXT: stp d0, d1, [x0, #-8]
; CHECK-NEXT: ret
define void @test_strd_sturd(float* %ptr, <2 x float> %v1, <2 x float> %v2) #0 {
%tmp1 = bitcast float* %ptr to <2 x float>*
store <2 x float> %v2, <2 x float>* %tmp1, align 16
%add.ptr = getelementptr inbounds float, float* %ptr, i64 -2
%tmp = bitcast float* %add.ptr to <2 x float>*
store <2 x float> %v1, <2 x float>* %tmp, align 16
ret void
}
; CHECK-LABEL: test_sturd_strd:
; CHECK-NEXT: stp d0, d1, [x0, #-8]
; CHECK-NEXT: ret
define void @test_sturd_strd(float* %ptr, <2 x float> %v1, <2 x float> %v2) #0 {
%add.ptr = getelementptr inbounds float, float* %ptr, i64 -2
%tmp = bitcast float* %add.ptr to <2 x float>*
store <2 x float> %v1, <2 x float>* %tmp, align 16
%tmp1 = bitcast float* %ptr to <2 x float>*
store <2 x float> %v2, <2 x float>* %tmp1, align 16
ret void
}
; CHECK-LABEL: test_strq_sturq:
; CHECK-NEXT: stp q0, q1, [x0, #-16]
; CHECK-NEXT: ret
define void @test_strq_sturq(double* %ptr, <2 x double> %v1, <2 x double> %v2) #0 {
%tmp1 = bitcast double* %ptr to <2 x double>*
store <2 x double> %v2, <2 x double>* %tmp1, align 16
%add.ptr = getelementptr inbounds double, double* %ptr, i64 -2
%tmp = bitcast double* %add.ptr to <2 x double>*
store <2 x double> %v1, <2 x double>* %tmp, align 16
ret void
}
; CHECK-LABEL: test_sturq_strq:
; CHECK-NEXT: stp q0, q1, [x0, #-16]
; CHECK-NEXT: ret
define void @test_sturq_strq(double* %ptr, <2 x double> %v1, <2 x double> %v2) #0 {
%add.ptr = getelementptr inbounds double, double* %ptr, i64 -2
%tmp = bitcast double* %add.ptr to <2 x double>*
store <2 x double> %v1, <2 x double>* %tmp, align 16
%tmp1 = bitcast double* %ptr to <2 x double>*
store <2 x double> %v2, <2 x double>* %tmp1, align 16
ret void
}
; CHECK-LABEL: test_ldrx_ldurx:
; CHECK-NEXT: ldp [[V0:x[0-9]+]], [[V1:x[0-9]+]], [x0, #-8]
; CHECK-NEXT: add x0, [[V0]], [[V1]]
; CHECK-NEXT: ret
define i64 @test_ldrx_ldurx(i64* %p) #0 {
%tmp = load i64, i64* %p, align 4
%add.ptr = getelementptr inbounds i64, i64* %p, i64 -1
%tmp1 = load i64, i64* %add.ptr, align 4
%add = add nsw i64 %tmp1, %tmp
ret i64 %add
}
; CHECK-LABEL: test_ldurx_ldrx:
; CHECK-NEXT: ldp [[V0:x[0-9]+]], [[V1:x[0-9]+]], [x0, #-8]
; CHECK-NEXT: add x0, [[V0]], [[V1]]
; CHECK-NEXT: ret
define i64 @test_ldurx_ldrx(i64* %p) #0 {
%add.ptr = getelementptr inbounds i64, i64* %p, i64 -1
%tmp1 = load i64, i64* %add.ptr, align 4
%tmp = load i64, i64* %p, align 4
%add = add nsw i64 %tmp1, %tmp
ret i64 %add
}
; CHECK-LABEL: test_ldrsw_ldursw:
; CHECK-NEXT: ldpsw [[V0:x[0-9]+]], [[V1:x[0-9]+]], [x0, #-4]
; CHECK-NEXT: add x0, [[V0]], [[V1]]
; CHECK-NEXT: ret
define i64 @test_ldrsw_ldursw(i32* %p) #0 {
%tmp = load i32, i32* %p, align 4
%add.ptr = getelementptr inbounds i32, i32* %p, i64 -1
%tmp1 = load i32, i32* %add.ptr, align 4
%sexttmp = sext i32 %tmp to i64
%sexttmp1 = sext i32 %tmp1 to i64
%add = add nsw i64 %sexttmp1, %sexttmp
ret i64 %add
}
; Also make sure we only match valid offsets.
; CHECK-LABEL: test_ldrq_ldruq_invalidoffset:
; CHECK-NEXT: ldr q[[V0:[0-9]+]], [x0]
; CHECK-NEXT: ldur q[[V1:[0-9]+]], [x0, #24]
; CHECK-NEXT: add.2d v0, v[[V0]], v[[V1]]
; CHECK-NEXT: ret
define <2 x i64> @test_ldrq_ldruq_invalidoffset(i64* %p) #0 {
%a1 = bitcast i64* %p to <2 x i64>*
%tmp1 = load <2 x i64>, < 2 x i64>* %a1, align 8
%add.ptr2 = getelementptr inbounds i64, i64* %p, i64 3
%a2 = bitcast i64* %add.ptr2 to <2 x i64>*
%tmp2 = load <2 x i64>, <2 x i64>* %a2, align 8
%add = add nsw <2 x i64> %tmp1, %tmp2
ret <2 x i64> %add
}
; Pair an unscaled store with a scaled store where the scaled store has a
; non-zero offset. This should not hit an assert.
; CHECK-LABEL: test_stur_str_no_assert
; CHECK: stp xzr, xzr, [sp, #16]
; CHECK: ret
define void @test_stur_str_no_assert() #0 {
entry:
%a1 = alloca i64, align 4
%a2 = alloca [12 x i8], align 4
%0 = bitcast i64* %a1 to i8*
%C = getelementptr inbounds [12 x i8], [12 x i8]* %a2, i64 0, i64 4
%1 = bitcast i8* %C to i64*
store i64 0, i64* %1, align 4
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 8, i32 8, i1 false)
ret void
}
declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1)
attributes #0 = { nounwind }