[FastISel][AArch64] Try to fold the offset into the add instruction when simplifying a memory address.

Small optimization in 'simplifyAddress'. When the offset cannot be encoded in
the load/store instruction, then we need to materialize the address manually.
The add instruction can encode a wider range of immediates than the load/store
instructions. This change tries to fold the offset into the add instruction
first before materializing the offset in a register.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@218031 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Juergen Ributzka 2014-09-18 05:40:47 +00:00
parent ffbd4879eb
commit 22b557d942
2 changed files with 23 additions and 29 deletions

View File

@ -918,10 +918,16 @@ bool AArch64FastISel::simplifyAddress(Address &Addr, MVT VT) {
// reg+offset into a register.
if (ImmediateOffsetNeedsLowering) {
unsigned ResultReg = 0;
if (Addr.getReg())
ResultReg = fastEmit_ri_(MVT::i64, ISD::ADD, Addr.getReg(),
/*IsKill=*/false, Offset, MVT::i64);
else
if (Addr.getReg()) {
// Try to fold the immediate into the add instruction.
ResultReg = emitAddSub_ri(/*UseAdd=*/true, MVT::i64, Addr.getReg(),
/*IsKill=*/false, Offset);
if (!ResultReg) {
unsigned ImmReg = fastEmit_i(MVT::i64, MVT::i64, ISD::Constant, Offset);
ResultReg = emitAddSub_rr(/*UseAdd=*/true, MVT::i64, Addr.getReg(),
/*IsKill=*/false, ImmReg, /*IsKill=*/true);
}
} else
ResultReg = fastEmit_i(MVT::i64, MVT::i64, ISD::Constant, Offset);
if (!ResultReg)

View File

@ -154,12 +154,9 @@ define i32 @load_breg_immoff_3(i64 %a) {
; Min un-supported unscaled offset
define i32 @load_breg_immoff_4(i64 %a) {
; SDAG-LABEL: load_breg_immoff_4
; SDAG: add [[REG:x[0-9]+]], x0, #257
; SDAG-NEXT: ldr {{w[0-9]+}}, {{\[}}[[REG]]{{\]}}
; FAST-LABEL: load_breg_immoff_4
; FAST: add [[REG:x[0-9]+]], x0, {{x[0-9]+}}
; FAST-NEXT: ldr {{w[0-9]+}}, {{\[}}[[REG]]{{\]}}
; CHECK-LABEL: load_breg_immoff_4
; CHECK: add [[REG:x[0-9]+]], x0, #257
; CHECK-NEXT: ldr {{w[0-9]+}}, {{\[}}[[REG]]{{\]}}
%1 = add i64 %a, 257
%2 = inttoptr i64 %1 to i32*
%3 = load i32* %2
@ -178,12 +175,9 @@ define i32 @load_breg_immoff_5(i64 %a) {
; Min un-supported scaled offset
define i32 @load_breg_immoff_6(i64 %a) {
; SDAG-LABEL: load_breg_immoff_6
; SDAG: add [[REG:x[0-9]+]], x0, #4, lsl #12
; SDAG-NEXT: ldr {{w[0-9]+}}, {{\[}}[[REG]]{{\]}}
; FAST-LABEL: load_breg_immoff_6
; FAST: add [[REG:x[0-9]+]], x0, {{x[0-9]+}}
; FAST-NEXT: ldr {{w[0-9]+}}, {{\[}}[[REG]]{{\]}}
; CHECK-LABEL: load_breg_immoff_6
; CHECK: add [[REG:x[0-9]+]], x0, #4, lsl #12
; CHECK-NEXT: ldr {{w[0-9]+}}, {{\[}}[[REG]]{{\]}}
%1 = add i64 %a, 16384
%2 = inttoptr i64 %1 to i32*
%3 = load i32* %2
@ -226,12 +220,9 @@ define void @store_breg_immoff_3(i64 %a) {
; Min un-supported unscaled offset
define void @store_breg_immoff_4(i64 %a) {
; SDAG-LABEL: store_breg_immoff_4
; SDAG: add [[REG:x[0-9]+]], x0, #257
; SDAG-NEXT: str wzr, {{\[}}[[REG]]{{\]}}
; FAST-LABEL: store_breg_immoff_4
; FAST: add [[REG:x[0-9]+]], x0, {{x[0-9]+}}
; FAST-NEXT: str wzr, {{\[}}[[REG]]{{\]}}
; CHECK-LABEL: store_breg_immoff_4
; CHECK: add [[REG:x[0-9]+]], x0, #257
; CHECK-NEXT: str wzr, {{\[}}[[REG]]{{\]}}
%1 = add i64 %a, 257
%2 = inttoptr i64 %1 to i32*
store i32 0, i32* %2
@ -250,12 +241,9 @@ define void @store_breg_immoff_5(i64 %a) {
; Min un-supported scaled offset
define void @store_breg_immoff_6(i64 %a) {
; SDAG-LABEL: store_breg_immoff_6
; SDAG: add [[REG:x[0-9]+]], x0, #4, lsl #12
; SDAG-NEXT: str wzr, {{\[}}[[REG]]{{\]}}
; FAST-LABEL: store_breg_immoff_6
; FAST: add [[REG:x[0-9]+]], x0, {{x[0-9]+}}
; FAST-NEXT: str wzr, {{\[}}[[REG]]{{\]}}
; CHECK-LABEL: store_breg_immoff_6
; CHECK: add [[REG:x[0-9]+]], x0, #4, lsl #12
; CHECK-NEXT: str wzr, {{\[}}[[REG]]{{\]}}
%1 = add i64 %a, 16384
%2 = inttoptr i64 %1 to i32*
store i32 0, i32* %2
@ -319,7 +307,7 @@ define i64 @load_breg_offreg_immoff_2(i64 %a, i64 %b) {
; SDAG-NEXT: add [[REG2:x[0-9]+]], [[REG1]], #15, lsl #12
; SDAG-NEXT: ldr x0, {{\[}}[[REG2]]{{\]}}
; FAST-LABEL: load_breg_offreg_immoff_2
; FAST: add [[REG:x[0-9]+]], x0, {{x[0-9]+}}
; FAST: add [[REG:x[0-9]+]], x0, #15, lsl #12
; FAST-NEXT: ldr x0, {{\[}}[[REG]], x1{{\]}}
%1 = add i64 %a, %b
%2 = add i64 %1, 61440