From e1819eb4e384bbd65a7ec3320142f7c885ba175d Mon Sep 17 00:00:00 2001 From: Matt Arsenault Date: Fri, 10 Jul 2015 22:17:40 +0000 Subject: [PATCH] DAGCombiner: Assume invariant load cannot alias a store The motivation is to allow GatherAllAliases / FindBetterChain to not give up on dependent loads of a pointer from constant memory. This is important for AMDGPU, because most loads are pointers derived from a load of a kernel argument from constant memory. llvm-svn: 241948 --- lib/CodeGen/SelectionDAG/DAGCombiner.cpp | 9 +++++ .../AMDGPU/invariant-load-no-alias-store.ll | 35 +++++++++++++++++++ 2 files changed, 44 insertions(+) create mode 100644 test/CodeGen/AMDGPU/invariant-load-no-alias-store.ll diff --git a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp index e05714342d2..52d620b1d54 100644 --- a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -13844,6 +13844,15 @@ bool DAGCombiner::isAlias(LSBaseSDNode *Op0, LSBaseSDNode *Op1) const { // If they are both volatile then they cannot be reordered. if (Op0->isVolatile() && Op1->isVolatile()) return true; + // If one operation reads from invariant memory, and the other may store, they + // cannot alias. These should really be checking the equivalent of mayWrite, + // but it only matters for memory nodes other than load /store. + if (Op0->isInvariant() && Op1->writeMem()) + return false; + + if (Op1->isInvariant() && Op0->writeMem()) + return false; + // Gather base node and offset information. SDValue Base1, Base2; int64_t Offset1, Offset2; diff --git a/test/CodeGen/AMDGPU/invariant-load-no-alias-store.ll b/test/CodeGen/AMDGPU/invariant-load-no-alias-store.ll new file mode 100644 index 00000000000..2a01a621fc4 --- /dev/null +++ b/test/CodeGen/AMDGPU/invariant-load-no-alias-store.ll @@ -0,0 +1,35 @@ +; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s + +; GatherAllAliases gives up on trying to analyze cases where the +; pointer may have been loaded from an aliased store, so make sure +; that this works and allows moving the stores to a better chain to +; allow them to be merged merged when it's clear the pointer is loaded +; from constant/invariant memory. + +; GCN-LABEL: {{^}}test_merge_store_constant_i16_invariant_global_pointer_load: +; GCN: buffer_load_dwordx2 [[PTR:v\[[0-9]+:[0-9]+\]]], +; GCN: v_mov_b32_e32 [[K:v[0-9]+]], 0x1c8007b +; GCN: buffer_store_dword [[K]], [[PTR]] +define void @test_merge_store_constant_i16_invariant_global_pointer_load(i16 addrspace(1)* addrspace(1)* dereferenceable(4096) nonnull %in) #0 { + %ptr = load i16 addrspace(1)*, i16 addrspace(1)* addrspace(1)* %in, !invariant.load !0 + %ptr.1 = getelementptr i16, i16 addrspace(1)* %ptr, i64 1 + store i16 123, i16 addrspace(1)* %ptr, align 4 + store i16 456, i16 addrspace(1)* %ptr.1 + ret void +} + +; GCN-LABEL: {{^}}test_merge_store_constant_i16_invariant_constant_pointer_load: +; GCN: s_load_dwordx2 s{{\[}}[[SPTR_LO:[0-9]+]]:[[SPTR_HI:[0-9]+]]{{\]}} +; GCN: v_mov_b32_e32 [[K:v[0-9]+]], 0x1c8007b +; GCN: buffer_store_dword [[K]], s{{\[}}[[SPTR_LO]]: +define void @test_merge_store_constant_i16_invariant_constant_pointer_load(i16 addrspace(1)* addrspace(2)* dereferenceable(4096) nonnull %in) #0 { + %ptr = load i16 addrspace(1)*, i16 addrspace(1)* addrspace(2)* %in, !invariant.load !0 + %ptr.1 = getelementptr i16, i16 addrspace(1)* %ptr, i64 1 + store i16 123, i16 addrspace(1)* %ptr, align 4 + store i16 456, i16 addrspace(1)* %ptr.1 + ret void +} + +!0 = !{} + +attributes #0 = { nounwind } \ No newline at end of file