1
0
mirror of https://github.com/RPCSX/llvm.git synced 2025-03-02 09:58:06 +00:00

Rever -r295314 "[DAGCombiner] Support {a|s}ext, {a|z|s}ext load nodes in load combine"

This change causes some of AMDGPU and PowerPC tests to fail.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@295316 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Artur Pilipenko 2017-02-16 13:04:46 +00:00
parent 40edfac454
commit 914d7a67a3
7 changed files with 148 additions and 41 deletions

@ -4446,8 +4446,6 @@ const Optional<ByteProvider> calculateByteProvider(SDValue Op, unsigned Index,
: calculateByteProvider(Op->getOperand(0), Index - ByteShift,
Depth + 1);
}
case ISD::ANY_EXTEND:
case ISD::SIGN_EXTEND:
case ISD::ZERO_EXTEND: {
SDValue NarrowOp = Op->getOperand(0);
unsigned NarrowBitWidth = NarrowOp.getScalarValueSizeInBits();
@ -4455,32 +4453,22 @@ const Optional<ByteProvider> calculateByteProvider(SDValue Op, unsigned Index,
return None;
uint64_t NarrowByteWidth = NarrowBitWidth / 8;
if (Index >= NarrowByteWidth)
return Op.getOpcode() == ISD::ZERO_EXTEND
? Optional<ByteProvider>(ByteProvider::getConstantZero())
: None;
else
return calculateByteProvider(NarrowOp, Index, Depth + 1);
return Index >= NarrowByteWidth
? ByteProvider::getConstantZero()
: calculateByteProvider(NarrowOp, Index, Depth + 1);
}
case ISD::BSWAP:
return calculateByteProvider(Op->getOperand(0), ByteWidth - Index - 1,
Depth + 1);
case ISD::LOAD: {
auto L = cast<LoadSDNode>(Op.getNode());
if (L->isVolatile() || L->isIndexed())
// TODO: support ext loads
if (L->isVolatile() || L->isIndexed() ||
L->getExtensionType() != ISD::NON_EXTLOAD)
return None;
unsigned NarrowBitWidth = L->getMemoryVT().getSizeInBits();
if (NarrowBitWidth % 8 != 0)
return None;
uint64_t NarrowByteWidth = NarrowBitWidth / 8;
if (Index >= NarrowByteWidth)
return L->getExtensionType() == ISD::ZEXTLOAD
? Optional<ByteProvider>(ByteProvider::getConstantZero())
: None;
else
return ByteProvider::getMemory(L, Index);
return ByteProvider::getMemory(L, Index);
}
}
@ -4560,6 +4548,7 @@ SDValue DAGCombiner::MatchLoadCombine(SDNode *N) {
LoadSDNode *L = P->Load;
assert(L->hasNUsesOfValue(1, 0) && !L->isVolatile() && !L->isIndexed() &&
(L->getExtensionType() == ISD::NON_EXTLOAD) &&
"Must be enforced by calculateByteProvider");
assert(L->getOffset().isUndef() && "Unindexed load must have undef offset");

@ -336,8 +336,11 @@ define i32 @load_i32_by_bswap_i16(i32* %arg) {
; (i32) p[1] | (sext(p[0] << 16) to i32)
define i32 @load_i32_by_sext_i16(i32* %arg) {
; CHECK-LABEL: load_i32_by_sext_i16:
; CHECK: ldr w0, [x0]
; CHECK: ldrh w8, [x0]
; CHECK-NEXT: ldrh w0, [x0, #2]
; CHECK-NEXT: bfi w0, w8, #16, #16
; CHECK-NEXT: ret
%tmp = bitcast i32* %arg to i16*
%tmp1 = load i16, i16* %tmp, align 4
%tmp2 = sext i16 %tmp1 to i32
@ -396,6 +399,7 @@ define i32 @load_i32_by_i8_base_offset_index_2(i8* %arg, i32 %i) {
; CHECK-NEXT: ldur w8, [x8, #13]
; CHECK-NEXT: rev w0, w8
; CHECK-NEXT: ret
%tmp = add nuw nsw i32 %i, 4
%tmp2 = add nuw nsw i32 %i, 3
%tmp3 = add nuw nsw i32 %i, 2

@ -324,8 +324,12 @@ define i32 @load_i32_by_bswap_i16(i32* %arg) {
; (i32) p[0] | (sext(p[1] << 16) to i32)
define i32 @load_i32_by_sext_i16(i32* %arg) {
; CHECK-LABEL: load_i32_by_sext_i16:
; CHECK: ldr w0, [x0]
; CHECK: ldrh w8, [x0]
; CHECK-NEXT: ldrh w9, [x0, #2]
; CHECK-NEXT: bfi w8, w9, #16, #16
; CHECK-NEXT: mov w0, w8
; CHECK-NEXT: ret
%tmp = bitcast i32* %arg to i16*
%tmp1 = load i16, i16* %tmp, align 4
%tmp2 = zext i16 %tmp1 to i32
@ -382,6 +386,7 @@ define i32 @load_i32_by_i8_base_offset_index_2(i8* %arg, i32 %i) {
; CHECK: add x8, x0, w1, uxtw
; CHECK-NEXT: ldur w0, [x8, #13]
; CHECK-NEXT: ret
%tmp = add nuw nsw i32 %i, 4
%tmp2 = add nuw nsw i32 %i, 3
%tmp3 = add nuw nsw i32 %i, 2

@ -847,15 +847,21 @@ define void @test_insertelement(half* %p, <4 x half>* %q, i32 %i) #0 {
}
; CHECK-ALL-LABEL: test_extractelement:
; CHECK-VFP: push {{{.*}}, lr}
; CHECK-VFP: sub sp, sp, #8
; CHECK-VFP: ldrd
; CHECK-VFP: ldrh
; CHECK-VFP: ldrh
; CHECK-VFP: orr
; CHECK-VFP: str
; CHECK-VFP: ldrh
; CHECK-VFP: ldrh
; CHECK-VFP: orr
; CHECK-VFP: str
; CHECK-VFP: mov
; CHECK-VFP: orr
; CHECK-VFP: ldrh
; CHECK-VFP: strh
; CHECK-VFP: add sp, sp, #8
; CHECK-VFP: pop {{{.*}}, pc}
; CHECK-VFP: bx lr
; CHECK-NOVFP: ldrh
; CHECK-NOVFP: strh
; CHECK-NOVFP: ldrh

@ -456,12 +456,17 @@ define i32 @load_i32_by_bswap_i16(i32* %arg) {
; (i32) p[1] | (sext(p[0] << 16) to i32)
define i32 @load_i32_by_sext_i16(i32* %arg) {
; CHECK-LABEL: load_i32_by_sext_i16:
; CHECK: ldr r0, [r0]
; CHECK: ldrh r1, [r0]
; CHECK-NEXT: ldrh r0, [r0, #2]
; CHECK-NEXT: orr r0, r0, r1, lsl #16
; CHECK-NEXT: mov pc, lr
;
; CHECK-ARMv6-LABEL: load_i32_by_sext_i16:
; CHECK-ARMv6: ldr r0, [r0]
; CHECK-ARMv6: ldrh r1, [r0]
; CHECK-ARMv6-NEXT: ldrh r0, [r0, #2]
; CHECK-ARMv6-NEXT: orr r0, r0, r1, lsl #16
; CHECK-ARMv6-NEXT: bx lr
%tmp = bitcast i32* %arg to i16*
%tmp1 = load i16, i16* %tmp, align 4
%tmp2 = sext i16 %tmp1 to i32

@ -414,12 +414,17 @@ define i32 @load_i32_by_bswap_i16(i32* %arg) {
; (i32) p[0] | (sext(p[1] << 16) to i32)
define i32 @load_i32_by_sext_i16(i32* %arg) {
; CHECK-LABEL: load_i32_by_sext_i16:
; CHECK: ldr r0, [r0]
; CHECK: ldrh r1, [r0, #2]
; CHECK-NEXT: ldrh r0, [r0]
; CHECK-NEXT: orr r0, r0, r1, lsl #16
; CHECK-NEXT: mov pc, lr
;
; CHECK-ARMv6-LABEL: load_i32_by_sext_i16:
; CHECK-ARMv6: ldr r0, [r0]
; CHECK-ARMv6-NEXT: bx lr
; CHECK-ARMv6: ldrh r1, [r0, #2]
; CHECK-ARMv6-NEXT: ldrh r0, [r0]
; CHECK-ARMv6-NEXT: orr r0, r0, r1, lsl #16
; CHECK-ARMv6-NEXT: bx lr
%tmp = bitcast i32* %arg to i16*
%tmp1 = load i16, i16* %tmp, align 4
%tmp2 = zext i16 %tmp1 to i32
@ -487,6 +492,7 @@ define i32 @load_i32_by_i8_base_offset_index_2(i8* %arg, i32 %i) {
; CHECK-ARMv6: add r0, r0, r1
; CHECK-ARMv6-NEXT: ldr r0, [r0, #13]
; CHECK-ARMv6-NEXT: bx lr
%tmp = add nuw nsw i32 %i, 4
%tmp2 = add nuw nsw i32 %i, 3
%tmp3 = add nuw nsw i32 %i, 2

@ -733,8 +733,16 @@ define i32 @load_i32_by_i8_bswap_base_index_offset(i32* %arg, i32 %arg1) {
; CHECK64-LABEL: load_i32_by_i8_bswap_base_index_offset:
; CHECK64: # BB#0:
; CHECK64-NEXT: movslq %esi, %rax
; CHECK64-NEXT: movl (%rdi,%rax), %eax
; CHECK64-NEXT: bswapl %eax
; CHECK64-NEXT: movzbl (%rdi,%rax), %ecx
; CHECK64-NEXT: shll $24, %ecx
; CHECK64-NEXT: movzbl 1(%rdi,%rax), %edx
; CHECK64-NEXT: shll $16, %edx
; CHECK64-NEXT: orl %ecx, %edx
; CHECK64-NEXT: movzbl 2(%rdi,%rax), %ecx
; CHECK64-NEXT: shll $8, %ecx
; CHECK64-NEXT: orl %edx, %ecx
; CHECK64-NEXT: movzbl 3(%rdi,%rax), %eax
; CHECK64-NEXT: orl %ecx, %eax
; CHECK64-NEXT: retq
%tmp = bitcast i32* %arg to i8*
%tmp2 = getelementptr inbounds i8, i8* %tmp, i32 %arg1
@ -827,12 +835,18 @@ define i32 @load_i32_by_sext_i16(i32* %arg) {
; CHECK-LABEL: load_i32_by_sext_i16:
; CHECK: # BB#0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: movl (%eax), %eax
; CHECK-NEXT: movzwl (%eax), %ecx
; CHECK-NEXT: movzwl 2(%eax), %eax
; CHECK-NEXT: shll $16, %eax
; CHECK-NEXT: orl %ecx, %eax
; CHECK-NEXT: retl
;
; CHECK64-LABEL: load_i32_by_sext_i16:
; CHECK64: # BB#0:
; CHECK64-NEXT: movl (%rdi), %eax
; CHECK64-NEXT: movzwl (%rdi), %ecx
; CHECK64-NEXT: movzwl 2(%rdi), %eax
; CHECK64-NEXT: shll $16, %eax
; CHECK64-NEXT: orl %ecx, %eax
; CHECK64-NEXT: retq
%tmp = bitcast i32* %arg to i16*
%tmp1 = load i16, i16* %tmp, align 1
@ -851,9 +865,24 @@ define i32 @load_i32_by_sext_i16(i32* %arg) {
define i32 @load_i32_by_i8_base_offset_index(i8* %arg, i32 %i) {
; CHECK-LABEL: load_i32_by_i8_base_offset_index:
; CHECK: # BB#0:
; CHECK-NEXT: pushl %esi
; CHECK-NEXT: .Lcfi4:
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: .Lcfi5:
; CHECK-NEXT: .cfi_offset %esi, -8
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
; CHECK-NEXT: movl 12(%eax,%ecx), %eax
; CHECK-NEXT: movzbl 12(%eax,%ecx), %edx
; CHECK-NEXT: movzbl 13(%eax,%ecx), %esi
; CHECK-NEXT: shll $8, %esi
; CHECK-NEXT: orl %edx, %esi
; CHECK-NEXT: movzbl 14(%eax,%ecx), %edx
; CHECK-NEXT: shll $16, %edx
; CHECK-NEXT: orl %esi, %edx
; CHECK-NEXT: movzbl 15(%eax,%ecx), %eax
; CHECK-NEXT: shll $24, %eax
; CHECK-NEXT: orl %edx, %eax
; CHECK-NEXT: popl %esi
; CHECK-NEXT: retl
;
; CHECK64-LABEL: load_i32_by_i8_base_offset_index:
@ -896,9 +925,24 @@ define i32 @load_i32_by_i8_base_offset_index(i8* %arg, i32 %i) {
define i32 @load_i32_by_i8_base_offset_index_2(i8* %arg, i32 %i) {
; CHECK-LABEL: load_i32_by_i8_base_offset_index_2:
; CHECK: # BB#0:
; CHECK-NEXT: pushl %esi
; CHECK-NEXT: .Lcfi6:
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: .Lcfi7:
; CHECK-NEXT: .cfi_offset %esi, -8
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
; CHECK-NEXT: movl 13(%eax,%ecx), %eax
; CHECK-NEXT: movzbl 13(%eax,%ecx), %edx
; CHECK-NEXT: movzbl 14(%eax,%ecx), %esi
; CHECK-NEXT: shll $8, %esi
; CHECK-NEXT: orl %edx, %esi
; CHECK-NEXT: movzbl 15(%eax,%ecx), %edx
; CHECK-NEXT: shll $16, %edx
; CHECK-NEXT: orl %esi, %edx
; CHECK-NEXT: movzbl 16(%eax,%ecx), %eax
; CHECK-NEXT: shll $24, %eax
; CHECK-NEXT: orl %edx, %eax
; CHECK-NEXT: popl %esi
; CHECK-NEXT: retl
;
; CHECK64-LABEL: load_i32_by_i8_base_offset_index_2:
@ -952,15 +996,39 @@ define i32 @load_i32_by_i8_base_offset_index_2(i8* %arg, i32 %i) {
define i32 @load_i32_by_i8_zaext_loads(i8* %arg, i32 %arg1) {
; CHECK-LABEL: load_i32_by_i8_zaext_loads:
; CHECK: # BB#0:
; CHECK-NEXT: pushl %esi
; CHECK-NEXT: .Lcfi8:
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: .Lcfi9:
; CHECK-NEXT: .cfi_offset %esi, -8
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
; CHECK-NEXT: movl 12(%eax,%ecx), %eax
; CHECK-NEXT: movzbl 12(%eax,%ecx), %edx
; CHECK-NEXT: movzbl 13(%eax,%ecx), %esi
; CHECK-NEXT: shll $8, %esi
; CHECK-NEXT: orl %edx, %esi
; CHECK-NEXT: movzbl 14(%eax,%ecx), %edx
; CHECK-NEXT: shll $16, %edx
; CHECK-NEXT: orl %esi, %edx
; CHECK-NEXT: movzbl 15(%eax,%ecx), %eax
; CHECK-NEXT: shll $24, %eax
; CHECK-NEXT: orl %edx, %eax
; CHECK-NEXT: popl %esi
; CHECK-NEXT: retl
;
; CHECK64-LABEL: load_i32_by_i8_zaext_loads:
; CHECK64: # BB#0:
; CHECK64-NEXT: movl %esi, %eax
; CHECK64-NEXT: movl 12(%rdi,%rax), %eax
; CHECK64-NEXT: movzbl 12(%rdi,%rax), %ecx
; CHECK64-NEXT: movzbl 13(%rdi,%rax), %edx
; CHECK64-NEXT: shll $8, %edx
; CHECK64-NEXT: orl %ecx, %edx
; CHECK64-NEXT: movzbl 14(%rdi,%rax), %ecx
; CHECK64-NEXT: shll $16, %ecx
; CHECK64-NEXT: orl %edx, %ecx
; CHECK64-NEXT: movzbl 15(%rdi,%rax), %eax
; CHECK64-NEXT: shll $24, %eax
; CHECK64-NEXT: orl %ecx, %eax
; CHECK64-NEXT: retq
%tmp = add nuw nsw i32 %arg1, 3
%tmp2 = add nuw nsw i32 %arg1, 2
@ -1008,15 +1076,39 @@ define i32 @load_i32_by_i8_zaext_loads(i8* %arg, i32 %arg1) {
define i32 @load_i32_by_i8_zsext_loads(i8* %arg, i32 %arg1) {
; CHECK-LABEL: load_i32_by_i8_zsext_loads:
; CHECK: # BB#0:
; CHECK-NEXT: pushl %esi
; CHECK-NEXT: .Lcfi10:
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: .Lcfi11:
; CHECK-NEXT: .cfi_offset %esi, -8
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
; CHECK-NEXT: movl 12(%eax,%ecx), %eax
; CHECK-NEXT: movzbl 12(%eax,%ecx), %edx
; CHECK-NEXT: movzbl 13(%eax,%ecx), %esi
; CHECK-NEXT: shll $8, %esi
; CHECK-NEXT: orl %edx, %esi
; CHECK-NEXT: movzbl 14(%eax,%ecx), %edx
; CHECK-NEXT: shll $16, %edx
; CHECK-NEXT: orl %esi, %edx
; CHECK-NEXT: movsbl 15(%eax,%ecx), %eax
; CHECK-NEXT: shll $24, %eax
; CHECK-NEXT: orl %edx, %eax
; CHECK-NEXT: popl %esi
; CHECK-NEXT: retl
;
; CHECK64-LABEL: load_i32_by_i8_zsext_loads:
; CHECK64: # BB#0:
; CHECK64-NEXT: movl %esi, %eax
; CHECK64-NEXT: movl 12(%rdi,%rax), %eax
; CHECK64-NEXT: movzbl 12(%rdi,%rax), %ecx
; CHECK64-NEXT: movzbl 13(%rdi,%rax), %edx
; CHECK64-NEXT: shll $8, %edx
; CHECK64-NEXT: orl %ecx, %edx
; CHECK64-NEXT: movzbl 14(%rdi,%rax), %ecx
; CHECK64-NEXT: shll $16, %ecx
; CHECK64-NEXT: orl %edx, %ecx
; CHECK64-NEXT: movsbl 15(%rdi,%rax), %eax
; CHECK64-NEXT: shll $24, %eax
; CHECK64-NEXT: orl %ecx, %eax
; CHECK64-NEXT: retq
%tmp = add nuw nsw i32 %arg1, 3
%tmp2 = add nuw nsw i32 %arg1, 2