[SDAG] Teach Chain Analysis about BaseIndexOffset addressing.

While we use BaseIndexOffset in FindBetterNeighborChains to
appropriately realize they're almost the same address and should be
improved concurrently we do not use it in isAlias using the non-index
understanding FindBaseOffset instead. Adding a BaseIndexOffset check
in isAlias like should allow indexed stores to be merged.

FindBaseOffset to be excised in subsequent patch.

Reviewers: jyknight, aditya_nandakumar, bogner

Subscribers: llvm-commits

Differential Revision: https://reviews.llvm.org/D31987

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@301187 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Nirav Dave 2017-04-24 15:37:20 +00:00
parent 8073bf67dd
commit a43d9cd666
3 changed files with 39 additions and 36 deletions

View File

@ -16088,6 +16088,19 @@ bool DAGCombiner::isAlias(LSBaseSDNode *Op0, LSBaseSDNode *Op1) const {
if (Op1->isInvariant() && Op0->writeMem())
return false;
unsigned NumBytes0 = Op0->getMemoryVT().getSizeInBits() >> 3;
unsigned NumBytes1 = Op1->getMemoryVT().getSizeInBits() >> 3;
// Check for BaseIndexOffset matching.
BaseIndexOffset BasePtr0 = BaseIndexOffset::match(Op0->getBasePtr(), DAG);
BaseIndexOffset BasePtr1 = BaseIndexOffset::match(Op1->getBasePtr(), DAG);
if (BasePtr0.equalBaseIndex(BasePtr1))
return !((BasePtr0.Offset + NumBytes0 <= BasePtr1.Offset) ||
(BasePtr1.Offset + NumBytes1 <= BasePtr0.Offset));
// FIXME: findBaseOffset and ConstantValue/GlobalValue/FrameIndex analysis
// modified to use BaseIndexOffset.
// Gather base node and offset information.
SDValue Base0, Base1;
int64_t Offset0, Offset1;
@ -16099,8 +16112,6 @@ bool DAGCombiner::isAlias(LSBaseSDNode *Op0, LSBaseSDNode *Op1) const {
Base1, Offset1, GV1, CV1);
// If they have the same base address, then check to see if they overlap.
unsigned NumBytes0 = Op0->getMemoryVT().getSizeInBits() >> 3;
unsigned NumBytes1 = Op1->getMemoryVT().getSizeInBits() >> 3;
if (Base0 == Base1 || (GV0 && (GV0 == GV1)) || (CV0 && (CV0 == CV1)))
return !((Offset0 + NumBytes0) <= Offset1 ||
(Offset1 + NumBytes1) <= Offset0);

View File

@ -12,34 +12,35 @@ define void @add(i256* %p, i256* %q) nounwind {
; X32-NEXT: subl $12, %esp
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: movl 8(%ecx), %edx
; X32-NEXT: movl (%ecx), %ebx
; X32-NEXT: movl 4(%ecx), %edi
; X32-NEXT: movl 8(%ecx), %edi
; X32-NEXT: movl (%ecx), %edx
; X32-NEXT: movl 4(%ecx), %ebx
; X32-NEXT: movl 28(%eax), %esi
; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: movl 24(%eax), %ebp
; X32-NEXT: addl (%eax), %ebx
; X32-NEXT: adcl 4(%eax), %edi
; X32-NEXT: adcl 8(%eax), %edx
; X32-NEXT: addl (%eax), %edx
; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: movl 20(%eax), %esi
; X32-NEXT: adcl 4(%eax), %ebx
; X32-NEXT: adcl 8(%eax), %edi
; X32-NEXT: movl %edi, (%esp) # 4-byte Spill
; X32-NEXT: movl 20(%eax), %edi
; X32-NEXT: movl 12(%eax), %edx
; X32-NEXT: movl 16(%eax), %eax
; X32-NEXT: movl 16(%eax), %esi
; X32-NEXT: adcl 12(%ecx), %edx
; X32-NEXT: adcl 16(%ecx), %eax
; X32-NEXT: adcl 20(%ecx), %esi
; X32-NEXT: adcl 24(%ecx), %ebp
; X32-NEXT: movl %ebp, (%esp) # 4-byte Spill
; X32-NEXT: adcl 16(%ecx), %esi
; X32-NEXT: adcl 20(%ecx), %edi
; X32-NEXT: movl %ebp, %eax
; X32-NEXT: adcl 24(%ecx), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ebp # 4-byte Reload
; X32-NEXT: adcl %ebp, 28(%ecx)
; X32-NEXT: movl (%esp), %ebp # 4-byte Reload
; X32-NEXT: movl %ebp, 8(%ecx)
; X32-NEXT: movl %ebx, 4(%ecx)
; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload
; X32-NEXT: movl %ebx, (%ecx)
; X32-NEXT: movl %edi, 4(%ecx)
; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload
; X32-NEXT: movl %edi, 8(%ecx)
; X32-NEXT: movl %edx, 12(%ecx)
; X32-NEXT: movl %eax, 16(%ecx)
; X32-NEXT: movl %esi, 20(%ecx)
; X32-NEXT: movl (%esp), %eax # 4-byte Reload
; X32-NEXT: movl %esi, 16(%ecx)
; X32-NEXT: movl %edi, 20(%ecx)
; X32-NEXT: movl %eax, 24(%ecx)
; X32-NEXT: addl $12, %esp
; X32-NEXT: popl %esi
@ -58,9 +59,9 @@ define void @add(i256* %p, i256* %q) nounwind {
; X64-NEXT: adcq 8(%rsi), %rdx
; X64-NEXT: adcq 16(%rsi), %rax
; X64-NEXT: adcq %r8, 24(%rdi)
; X64-NEXT: movq %rcx, (%rdi)
; X64-NEXT: movq %rdx, 8(%rdi)
; X64-NEXT: movq %rax, 16(%rdi)
; X64-NEXT: movq %rdx, 8(%rdi)
; X64-NEXT: movq %rcx, (%rdi)
; X64-NEXT: retq
%a = load i256, i256* %p
%b = load i256, i256* %q
@ -96,9 +97,9 @@ define void @sub(i256* %p, i256* %q) nounwind {
; X32-NEXT: sbbl 24(%esi), %eax
; X32-NEXT: movl 28(%esi), %esi
; X32-NEXT: sbbl %esi, 28(%ecx)
; X32-NEXT: movl %ebx, (%ecx)
; X32-NEXT: movl %ebp, 4(%ecx)
; X32-NEXT: movl %edi, 8(%ecx)
; X32-NEXT: movl %ebp, 4(%ecx)
; X32-NEXT: movl %ebx, (%ecx)
; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload
; X32-NEXT: movl %esi, 12(%ecx)
; X32-NEXT: movl (%esp), %esi # 4-byte Reload
@ -122,9 +123,9 @@ define void @sub(i256* %p, i256* %q) nounwind {
; X64-NEXT: sbbq 8(%rsi), %rdx
; X64-NEXT: sbbq 16(%rsi), %rax
; X64-NEXT: sbbq %r8, 24(%rdi)
; X64-NEXT: movq %rcx, (%rdi)
; X64-NEXT: movq %rdx, 8(%rdi)
; X64-NEXT: movq %rax, 16(%rdi)
; X64-NEXT: movq %rdx, 8(%rdi)
; X64-NEXT: movq %rcx, (%rdi)
; X64-NEXT: retq
%a = load i256, i256* %p
%b = load i256, i256* %q

View File

@ -29,17 +29,8 @@ entry:
ret void
}
;; CHECK-LABEL: indexed-store-merge
;; We should be able to merge the 4 consecutive stores.
;; FIXMECHECK: movl $0, 2(%rsi,%rdi)
;; CHECK: movb $0, 2(%rsi,%rdi)
;; CHECK: movb $0, 3(%rsi,%rdi)
;; CHECK: movb $0, 4(%rsi,%rdi)
;; CHECK: movb $0, 5(%rsi,%rdi)
;; CHECK: movl $0, 2(%rsi,%rdi)
;; CHECK: movb $0, (%rsi)
define void @indexed-store-merge(i64 %p, i8* %v) {
entry: