From c74d601f15cb0ffa4af209a20b3dd09f15bde7c1 Mon Sep 17 00:00:00 2001 From: Matt Arsenault Date: Mon, 10 Feb 2020 10:30:34 -0500 Subject: [PATCH] AMDGPU: Don't report 2-byte alignment as fast This is apparently worse than 1-byte alignment. This does not attempt to decompose 2-byte aligned wide stores, but will stop trying to produce them. Also fix bug in LoadStoreVectorizer which was decreasing the alignment and vectorizing stack accesses. It was assuming a stack object was an alloca that could have its base alignment changed, which is not true if the pointer is derived from a function argument. --- lib/Target/AMDGPU/SIISelLowering.cpp | 4 +- .../Vectorize/LoadStoreVectorizer.cpp | 12 +- test/CodeGen/AMDGPU/chain-hi-to-lo.ll | 13 +- .../fast-unaligned-load-store.global.ll | 328 ++++++++++++++++++ .../fast-unaligned-load-store.private.ll | 245 +++++++++++++ test/CodeGen/AMDGPU/unaligned-load-store.ll | 21 ++ .../AMDGPU/adjust-alloca-alignment.ll | 51 +++ .../AMDGPU/merge-stores-private.ll | 24 +- .../AMDGPU/merge-stores.ll | 26 +- 9 files changed, 695 insertions(+), 29 deletions(-) create mode 100644 test/CodeGen/AMDGPU/fast-unaligned-load-store.global.ll create mode 100644 test/CodeGen/AMDGPU/fast-unaligned-load-store.private.ll diff --git a/lib/Target/AMDGPU/SIISelLowering.cpp b/lib/Target/AMDGPU/SIISelLowering.cpp index b6966e66c36..55003521b8b 100644 --- a/lib/Target/AMDGPU/SIISelLowering.cpp +++ b/lib/Target/AMDGPU/SIISelLowering.cpp @@ -1251,9 +1251,11 @@ bool SITargetLowering::allowsMisalignedMemoryAccessesImpl( // If we have an uniform constant load, it still requires using a slow // buffer instruction if unaligned. if (IsFast) { + // Accesses can really be issued as 1-byte aligned or 4-byte aligned, so + // 2-byte alignment is worse than 1 unless doing a 2-byte accesss. *IsFast = (AddrSpace == AMDGPUAS::CONSTANT_ADDRESS || AddrSpace == AMDGPUAS::CONSTANT_ADDRESS_32BIT) ? - (Align % 4 == 0) : true; + Align >= 4 : Align != 2; } return true; diff --git a/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp b/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp index 3b22f3082c3..8ab03c34335 100644 --- a/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp +++ b/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp @@ -1028,8 +1028,10 @@ bool Vectorizer::vectorizeStoreChain( unsigned NewAlign = getOrEnforceKnownAlignment(S0->getPointerOperand(), StackAdjustedAlignment, DL, S0, nullptr, &DT); - if (NewAlign != 0) + if (NewAlign >= Alignment.value()) Alignment = Align(NewAlign); + else + return false; } if (!TTI.isLegalToVectorizeStoreChain(SzInBytes, Alignment.value(), AS)) { @@ -1168,8 +1170,12 @@ bool Vectorizer::vectorizeLoadChain( vectorizeLoadChain(Chains.second, InstructionsProcessed); } - Alignment = getOrEnforceKnownAlignment( - L0->getPointerOperand(), StackAdjustedAlignment, DL, L0, nullptr, &DT); + unsigned NewAlign = getOrEnforceKnownAlignment( + L0->getPointerOperand(), StackAdjustedAlignment, DL, L0, nullptr, &DT); + if (NewAlign >= Alignment) + Alignment = NewAlign; + else + return false; } if (!TTI.isLegalToVectorizeLoadChain(SzInBytes, Alignment, AS)) { diff --git a/test/CodeGen/AMDGPU/chain-hi-to-lo.ll b/test/CodeGen/AMDGPU/chain-hi-to-lo.ll index 9ec8b7573ce..0df32537808 100644 --- a/test/CodeGen/AMDGPU/chain-hi-to-lo.ll +++ b/test/CodeGen/AMDGPU/chain-hi-to-lo.ll @@ -199,14 +199,17 @@ define amdgpu_kernel void @vload2_private(i16 addrspace(1)* nocapture readonly % ; GCN-NEXT: s_waitcnt lgkmcnt(0) ; GCN-NEXT: v_mov_b32_e32 v2, s4 ; GCN-NEXT: v_mov_b32_e32 v3, s5 -; GCN-NEXT: global_load_ushort v4, v[2:3], off offset:4 -; GCN-NEXT: global_load_dword v2, v[2:3], off +; GCN-NEXT: global_load_ushort v4, v[2:3], off ; GCN-NEXT: v_mov_b32_e32 v0, s6 ; GCN-NEXT: v_mov_b32_e32 v1, s7 ; GCN-NEXT: s_waitcnt vmcnt(0) -; GCN-NEXT: buffer_store_short v2, off, s[0:3], s9 offset:4 -; GCN-NEXT: buffer_store_short_d16_hi v2, off, s[0:3], s9 offset:6 -; GCN-NEXT: buffer_store_short v4, off, s[0:3], s9 offset:8 +; GCN-NEXT: buffer_store_short v4, off, s[0:3], s9 offset:4 +; GCN-NEXT: global_load_ushort v4, v[2:3], off offset:2 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: buffer_store_short v4, off, s[0:3], s9 offset:6 +; GCN-NEXT: global_load_ushort v2, v[2:3], off offset:4 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: buffer_store_short v2, off, s[0:3], s9 offset:8 ; GCN-NEXT: buffer_load_ushort v2, off, s[0:3], s9 offset:4 ; GCN-NEXT: buffer_load_ushort v4, off, s[0:3], s9 offset:6 ; GCN-NEXT: s_waitcnt vmcnt(1) diff --git a/test/CodeGen/AMDGPU/fast-unaligned-load-store.global.ll b/test/CodeGen/AMDGPU/fast-unaligned-load-store.global.ll new file mode 100644 index 00000000000..34f8706ac66 --- /dev/null +++ b/test/CodeGen/AMDGPU/fast-unaligned-load-store.global.ll @@ -0,0 +1,328 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=hawaii -mattr=-unaligned-buffer-access < %s | FileCheck -check-prefixes=GCN,GFX7-ALIGNED %s +; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=hawaii -mattr=+unaligned-buffer-access < %s | FileCheck -check-prefixes=GCN,GFX7-UNALIGNED %s +; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -mattr=+unaligned-buffer-access < %s | FileCheck -check-prefixes=GCN,GFX9 %s + +; Should not merge this to a dword load +define i32 @global_load_2xi16_align2(i16 addrspace(1)* %p) #0 { +; GFX7-ALIGNED-LABEL: global_load_2xi16_align2: +; GFX7-ALIGNED: ; %bb.0: +; GFX7-ALIGNED-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX7-ALIGNED-NEXT: v_add_i32_e32 v2, vcc, 2, v0 +; GFX7-ALIGNED-NEXT: v_addc_u32_e32 v3, vcc, 0, v1, vcc +; GFX7-ALIGNED-NEXT: flat_load_ushort v0, v[0:1] +; GFX7-ALIGNED-NEXT: flat_load_ushort v1, v[2:3] +; GFX7-ALIGNED-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX7-ALIGNED-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; GFX7-ALIGNED-NEXT: v_or_b32_e32 v0, v0, v1 +; GFX7-ALIGNED-NEXT: s_setpc_b64 s[30:31] +; +; GFX7-UNALIGNED-LABEL: global_load_2xi16_align2: +; GFX7-UNALIGNED: ; %bb.0: +; GFX7-UNALIGNED-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX7-UNALIGNED-NEXT: v_add_i32_e32 v2, vcc, 2, v0 +; GFX7-UNALIGNED-NEXT: v_addc_u32_e32 v3, vcc, 0, v1, vcc +; GFX7-UNALIGNED-NEXT: flat_load_ushort v0, v[0:1] +; GFX7-UNALIGNED-NEXT: flat_load_ushort v1, v[2:3] +; GFX7-UNALIGNED-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX7-UNALIGNED-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; GFX7-UNALIGNED-NEXT: v_or_b32_e32 v0, v0, v1 +; GFX7-UNALIGNED-NEXT: s_setpc_b64 s[30:31] +; +; GFX9-LABEL: global_load_2xi16_align2: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: global_load_ushort v2, v[0:1], off +; GFX9-NEXT: global_load_ushort v0, v[0:1], off offset:2 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_lshl_or_b32 v0, v0, 16, v2 +; GFX9-NEXT: s_setpc_b64 s[30:31] + %gep.p = getelementptr i16, i16 addrspace(1)* %p, i64 1 + %p.0 = load i16, i16 addrspace(1)* %p, align 2 + %p.1 = load i16, i16 addrspace(1)* %gep.p, align 2 + %zext.0 = zext i16 %p.0 to i32 + %zext.1 = zext i16 %p.1 to i32 + %shl.1 = shl i32 %zext.1, 16 + %or = or i32 %zext.0, %shl.1 + ret i32 %or +} + +; Should not merge this to a dword store +define amdgpu_kernel void @global_store_2xi16_align2(i16 addrspace(1)* %p, i16 addrspace(1)* %r) #0 { +; GFX7-ALIGNED-LABEL: global_store_2xi16_align2: +; GFX7-ALIGNED: ; %bb.0: +; GFX7-ALIGNED-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x2 +; GFX7-ALIGNED-NEXT: v_mov_b32_e32 v2, 1 +; GFX7-ALIGNED-NEXT: s_waitcnt lgkmcnt(0) +; GFX7-ALIGNED-NEXT: v_mov_b32_e32 v0, s0 +; GFX7-ALIGNED-NEXT: s_add_u32 s2, s0, 2 +; GFX7-ALIGNED-NEXT: v_mov_b32_e32 v1, s1 +; GFX7-ALIGNED-NEXT: flat_store_short v[0:1], v2 +; GFX7-ALIGNED-NEXT: s_addc_u32 s3, s1, 0 +; GFX7-ALIGNED-NEXT: v_mov_b32_e32 v0, s2 +; GFX7-ALIGNED-NEXT: v_mov_b32_e32 v2, 2 +; GFX7-ALIGNED-NEXT: v_mov_b32_e32 v1, s3 +; GFX7-ALIGNED-NEXT: flat_store_short v[0:1], v2 +; GFX7-ALIGNED-NEXT: s_endpgm +; +; GFX7-UNALIGNED-LABEL: global_store_2xi16_align2: +; GFX7-UNALIGNED: ; %bb.0: +; GFX7-UNALIGNED-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x2 +; GFX7-UNALIGNED-NEXT: v_mov_b32_e32 v2, 1 +; GFX7-UNALIGNED-NEXT: s_waitcnt lgkmcnt(0) +; GFX7-UNALIGNED-NEXT: v_mov_b32_e32 v0, s0 +; GFX7-UNALIGNED-NEXT: s_add_u32 s2, s0, 2 +; GFX7-UNALIGNED-NEXT: v_mov_b32_e32 v1, s1 +; GFX7-UNALIGNED-NEXT: flat_store_short v[0:1], v2 +; GFX7-UNALIGNED-NEXT: s_addc_u32 s3, s1, 0 +; GFX7-UNALIGNED-NEXT: v_mov_b32_e32 v0, s2 +; GFX7-UNALIGNED-NEXT: v_mov_b32_e32 v2, 2 +; GFX7-UNALIGNED-NEXT: v_mov_b32_e32 v1, s3 +; GFX7-UNALIGNED-NEXT: flat_store_short v[0:1], v2 +; GFX7-UNALIGNED-NEXT: s_endpgm +; +; GFX9-LABEL: global_store_2xi16_align2: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x8 +; GFX9-NEXT: v_mov_b32_e32 v2, 1 +; GFX9-NEXT: v_mov_b32_e32 v3, 2 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v0, s0 +; GFX9-NEXT: v_mov_b32_e32 v1, s1 +; GFX9-NEXT: global_store_short v[0:1], v2, off +; GFX9-NEXT: global_store_short v[0:1], v3, off offset:2 +; GFX9-NEXT: s_endpgm + %gep.r = getelementptr i16, i16 addrspace(1)* %r, i64 1 + store i16 1, i16 addrspace(1)* %r, align 2 + store i16 2, i16 addrspace(1)* %gep.r, align 2 + ret void +} + +; Should produce align 1 dword when legal +define i32 @global_load_2xi16_align1(i16 addrspace(1)* %p) #0 { +; GFX7-ALIGNED-LABEL: global_load_2xi16_align1: +; GFX7-ALIGNED: ; %bb.0: +; GFX7-ALIGNED-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX7-ALIGNED-NEXT: v_add_i32_e32 v2, vcc, 2, v0 +; GFX7-ALIGNED-NEXT: v_addc_u32_e32 v3, vcc, 0, v1, vcc +; GFX7-ALIGNED-NEXT: v_add_i32_e32 v4, vcc, 1, v0 +; GFX7-ALIGNED-NEXT: v_addc_u32_e32 v5, vcc, 0, v1, vcc +; GFX7-ALIGNED-NEXT: flat_load_ubyte v6, v[0:1] +; GFX7-ALIGNED-NEXT: v_add_i32_e32 v0, vcc, 3, v0 +; GFX7-ALIGNED-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc +; GFX7-ALIGNED-NEXT: flat_load_ubyte v2, v[2:3] +; GFX7-ALIGNED-NEXT: flat_load_ubyte v3, v[4:5] +; GFX7-ALIGNED-NEXT: flat_load_ubyte v0, v[0:1] +; GFX7-ALIGNED-NEXT: s_waitcnt vmcnt(1) lgkmcnt(1) +; GFX7-ALIGNED-NEXT: v_lshlrev_b32_e32 v1, 8, v3 +; GFX7-ALIGNED-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX7-ALIGNED-NEXT: v_lshlrev_b32_e32 v0, 8, v0 +; GFX7-ALIGNED-NEXT: v_or_b32_e32 v0, v0, v2 +; GFX7-ALIGNED-NEXT: v_or_b32_e32 v1, v1, v6 +; GFX7-ALIGNED-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; GFX7-ALIGNED-NEXT: v_or_b32_e32 v0, v1, v0 +; GFX7-ALIGNED-NEXT: s_setpc_b64 s[30:31] +; +; GFX7-UNALIGNED-LABEL: global_load_2xi16_align1: +; GFX7-UNALIGNED: ; %bb.0: +; GFX7-UNALIGNED-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX7-UNALIGNED-NEXT: flat_load_dword v0, v[0:1] +; GFX7-UNALIGNED-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX7-UNALIGNED-NEXT: s_setpc_b64 s[30:31] +; +; GFX9-LABEL: global_load_2xi16_align1: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: global_load_dword v0, v[0:1], off +; GFX9-NEXT: v_mov_b32_e32 v1, 0xffff +; GFX9-NEXT: s_mov_b32 s4, 0xffff +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_bfi_b32 v1, v1, 0, v0 +; GFX9-NEXT: v_and_or_b32 v0, v0, s4, v1 +; GFX9-NEXT: s_setpc_b64 s[30:31] + %gep.p = getelementptr i16, i16 addrspace(1)* %p, i64 1 + %p.0 = load i16, i16 addrspace(1)* %p, align 1 + %p.1 = load i16, i16 addrspace(1)* %gep.p, align 1 + %zext.0 = zext i16 %p.0 to i32 + %zext.1 = zext i16 %p.1 to i32 + %shl.1 = shl i32 %zext.1, 16 + %or = or i32 %zext.0, %shl.1 + ret i32 %or +} + +; Should produce align 1 dword when legal +define amdgpu_kernel void @global_store_2xi16_align1(i16 addrspace(1)* %p, i16 addrspace(1)* %r) #0 { +; GFX7-ALIGNED-LABEL: global_store_2xi16_align1: +; GFX7-ALIGNED: ; %bb.0: +; GFX7-ALIGNED-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x2 +; GFX7-ALIGNED-NEXT: v_mov_b32_e32 v4, 1 +; GFX7-ALIGNED-NEXT: v_mov_b32_e32 v5, 0 +; GFX7-ALIGNED-NEXT: s_waitcnt lgkmcnt(0) +; GFX7-ALIGNED-NEXT: s_add_u32 s2, s0, 2 +; GFX7-ALIGNED-NEXT: s_addc_u32 s3, s1, 0 +; GFX7-ALIGNED-NEXT: s_add_u32 s4, s0, 1 +; GFX7-ALIGNED-NEXT: v_mov_b32_e32 v0, s0 +; GFX7-ALIGNED-NEXT: s_addc_u32 s5, s1, 0 +; GFX7-ALIGNED-NEXT: v_mov_b32_e32 v1, s1 +; GFX7-ALIGNED-NEXT: s_add_u32 s0, s0, 3 +; GFX7-ALIGNED-NEXT: v_mov_b32_e32 v2, s4 +; GFX7-ALIGNED-NEXT: v_mov_b32_e32 v3, s5 +; GFX7-ALIGNED-NEXT: flat_store_byte v[0:1], v4 +; GFX7-ALIGNED-NEXT: flat_store_byte v[2:3], v5 +; GFX7-ALIGNED-NEXT: s_addc_u32 s1, s1, 0 +; GFX7-ALIGNED-NEXT: v_mov_b32_e32 v0, s0 +; GFX7-ALIGNED-NEXT: v_mov_b32_e32 v2, s2 +; GFX7-ALIGNED-NEXT: v_mov_b32_e32 v1, s1 +; GFX7-ALIGNED-NEXT: v_mov_b32_e32 v4, 2 +; GFX7-ALIGNED-NEXT: v_mov_b32_e32 v3, s3 +; GFX7-ALIGNED-NEXT: flat_store_byte v[0:1], v5 +; GFX7-ALIGNED-NEXT: flat_store_byte v[2:3], v4 +; GFX7-ALIGNED-NEXT: s_endpgm +; +; GFX7-UNALIGNED-LABEL: global_store_2xi16_align1: +; GFX7-UNALIGNED: ; %bb.0: +; GFX7-UNALIGNED-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x2 +; GFX7-UNALIGNED-NEXT: v_mov_b32_e32 v2, 0x20001 +; GFX7-UNALIGNED-NEXT: s_waitcnt lgkmcnt(0) +; GFX7-UNALIGNED-NEXT: v_mov_b32_e32 v0, s0 +; GFX7-UNALIGNED-NEXT: v_mov_b32_e32 v1, s1 +; GFX7-UNALIGNED-NEXT: flat_store_dword v[0:1], v2 +; GFX7-UNALIGNED-NEXT: s_endpgm +; +; GFX9-LABEL: global_store_2xi16_align1: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x8 +; GFX9-NEXT: v_mov_b32_e32 v2, 0x20001 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v0, s0 +; GFX9-NEXT: v_mov_b32_e32 v1, s1 +; GFX9-NEXT: global_store_dword v[0:1], v2, off +; GFX9-NEXT: s_endpgm + %gep.r = getelementptr i16, i16 addrspace(1)* %r, i64 1 + store i16 1, i16 addrspace(1)* %r, align 1 + store i16 2, i16 addrspace(1)* %gep.r, align 1 + ret void +} + +; Should merge this to a dword load +define i32 @global_load_2xi16_align4(i16 addrspace(1)* %p) #0 { +; GFX7-LABEL: load_2xi16_align4: +; GFX7: ; %bb.0: +; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX7-NEXT: flat_load_dword v0, v[0:1] +; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX7-NEXT: s_setpc_b64 s[30:31] +; +; GFX7-ALIGNED-LABEL: global_load_2xi16_align4: +; GFX7-ALIGNED: ; %bb.0: +; GFX7-ALIGNED-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX7-ALIGNED-NEXT: flat_load_dword v0, v[0:1] +; GFX7-ALIGNED-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX7-ALIGNED-NEXT: s_setpc_b64 s[30:31] +; +; GFX7-UNALIGNED-LABEL: global_load_2xi16_align4: +; GFX7-UNALIGNED: ; %bb.0: +; GFX7-UNALIGNED-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX7-UNALIGNED-NEXT: flat_load_dword v0, v[0:1] +; GFX7-UNALIGNED-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX7-UNALIGNED-NEXT: s_setpc_b64 s[30:31] +; +; GFX9-LABEL: global_load_2xi16_align4: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: global_load_dword v0, v[0:1], off +; GFX9-NEXT: v_mov_b32_e32 v1, 0xffff +; GFX9-NEXT: s_mov_b32 s4, 0xffff +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_bfi_b32 v1, v1, 0, v0 +; GFX9-NEXT: v_and_or_b32 v0, v0, s4, v1 +; GFX9-NEXT: s_setpc_b64 s[30:31] + %gep.p = getelementptr i16, i16 addrspace(1)* %p, i64 1 + %p.0 = load i16, i16 addrspace(1)* %p, align 4 + %p.1 = load i16, i16 addrspace(1)* %gep.p, align 2 + %zext.0 = zext i16 %p.0 to i32 + %zext.1 = zext i16 %p.1 to i32 + %shl.1 = shl i32 %zext.1, 16 + %or = or i32 %zext.0, %shl.1 + ret i32 %or +} + +; Should merge this to a dword store +define amdgpu_kernel void @global_store_2xi16_align4(i16 addrspace(1)* %p, i16 addrspace(1)* %r) #0 { +; GFX7-LABEL: global_store_2xi16_align4: +; GFX7: ; %bb.0: +; GFX7-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x2 +; GFX7-NEXT: v_mov_b32_e32 v2, 0x20001 +; GFX7-NEXT: s_waitcnt lgkmcnt(0) +; GFX7-NEXT: v_mov_b32_e32 v0, s0 +; GFX7-NEXT: v_mov_b32_e32 v1, s1 +; GFX7-NEXT: flat_store_dword v[0:1], v2 +; GFX7-NEXT: s_endpgm +; +; GFX7-ALIGNED-LABEL: global_store_2xi16_align4: +; GFX7-ALIGNED: ; %bb.0: +; GFX7-ALIGNED-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x2 +; GFX7-ALIGNED-NEXT: v_mov_b32_e32 v2, 0x20001 +; GFX7-ALIGNED-NEXT: s_waitcnt lgkmcnt(0) +; GFX7-ALIGNED-NEXT: v_mov_b32_e32 v0, s0 +; GFX7-ALIGNED-NEXT: v_mov_b32_e32 v1, s1 +; GFX7-ALIGNED-NEXT: flat_store_dword v[0:1], v2 +; GFX7-ALIGNED-NEXT: s_endpgm +; +; GFX7-UNALIGNED-LABEL: global_store_2xi16_align4: +; GFX7-UNALIGNED: ; %bb.0: +; GFX7-UNALIGNED-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x2 +; GFX7-UNALIGNED-NEXT: v_mov_b32_e32 v2, 0x20001 +; GFX7-UNALIGNED-NEXT: s_waitcnt lgkmcnt(0) +; GFX7-UNALIGNED-NEXT: v_mov_b32_e32 v0, s0 +; GFX7-UNALIGNED-NEXT: v_mov_b32_e32 v1, s1 +; GFX7-UNALIGNED-NEXT: flat_store_dword v[0:1], v2 +; GFX7-UNALIGNED-NEXT: s_endpgm +; +; GFX9-LABEL: global_store_2xi16_align4: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x8 +; GFX9-NEXT: v_mov_b32_e32 v2, 0x20001 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v0, s0 +; GFX9-NEXT: v_mov_b32_e32 v1, s1 +; GFX9-NEXT: global_store_dword v[0:1], v2, off +; GFX9-NEXT: s_endpgm + %gep.r = getelementptr i16, i16 addrspace(1)* %r, i64 1 + store i16 1, i16 addrspace(1)* %r, align 4 + store i16 2, i16 addrspace(1)* %gep.r, align 2 + ret void +} + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/test/CodeGen/AMDGPU/fast-unaligned-load-store.private.ll b/test/CodeGen/AMDGPU/fast-unaligned-load-store.private.ll new file mode 100644 index 00000000000..0053d2f3019 --- /dev/null +++ b/test/CodeGen/AMDGPU/fast-unaligned-load-store.private.ll @@ -0,0 +1,245 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=hawaii -mattr=-unaligned-scratch-access < %s | FileCheck -check-prefixes=GCN,GFX7-ALIGNED %s +; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=hawaii -mattr=+unaligned-scratch-access < %s | FileCheck -check-prefixes=GCN,GFX7-UNALIGNED %s +; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -mattr=+unaligned-scratch-access < %s | FileCheck -check-prefixes=GCN,GFX9 %s + +; Should not merge this to a dword load +define i32 @private_load_2xi16_align2(i16 addrspace(5)* %p) #0 { +; GFX7-ALIGNED-LABEL: private_load_2xi16_align2: +; GFX7-ALIGNED: ; %bb.0: +; GFX7-ALIGNED-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX7-ALIGNED-NEXT: v_add_i32_e32 v1, vcc, 2, v0 +; GFX7-ALIGNED-NEXT: buffer_load_ushort v1, v1, s[0:3], s33 offen +; GFX7-ALIGNED-NEXT: buffer_load_ushort v0, v0, s[0:3], s33 offen +; GFX7-ALIGNED-NEXT: s_waitcnt vmcnt(1) +; GFX7-ALIGNED-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; GFX7-ALIGNED-NEXT: s_waitcnt vmcnt(0) +; GFX7-ALIGNED-NEXT: v_or_b32_e32 v0, v0, v1 +; GFX7-ALIGNED-NEXT: s_setpc_b64 s[30:31] +; +; GFX7-UNALIGNED-LABEL: private_load_2xi16_align2: +; GFX7-UNALIGNED: ; %bb.0: +; GFX7-UNALIGNED-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX7-UNALIGNED-NEXT: v_add_i32_e32 v1, vcc, 2, v0 +; GFX7-UNALIGNED-NEXT: buffer_load_ushort v1, v1, s[0:3], s33 offen +; GFX7-UNALIGNED-NEXT: buffer_load_ushort v0, v0, s[0:3], s33 offen +; GFX7-UNALIGNED-NEXT: s_waitcnt vmcnt(1) +; GFX7-UNALIGNED-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; GFX7-UNALIGNED-NEXT: s_waitcnt vmcnt(0) +; GFX7-UNALIGNED-NEXT: v_or_b32_e32 v0, v0, v1 +; GFX7-UNALIGNED-NEXT: s_setpc_b64 s[30:31] +; +; GFX9-LABEL: private_load_2xi16_align2: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: buffer_load_ushort v1, v0, s[0:3], s33 offen +; GFX9-NEXT: buffer_load_ushort v0, v0, s[0:3], s33 offen offset:2 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_lshl_or_b32 v0, v0, 16, v1 +; GFX9-NEXT: s_setpc_b64 s[30:31] + %gep.p = getelementptr i16, i16 addrspace(5)* %p, i64 1 + %p.0 = load i16, i16 addrspace(5)* %p, align 2 + %p.1 = load i16, i16 addrspace(5)* %gep.p, align 2 + %zext.0 = zext i16 %p.0 to i32 + %zext.1 = zext i16 %p.1 to i32 + %shl.1 = shl i32 %zext.1, 16 + %or = or i32 %zext.0, %shl.1 + ret i32 %or +} + +; Should not merge this to a dword store +define void @private_store_2xi16_align2(i16 addrspace(5)* %p, i16 addrspace(5)* %r) #0 { +; GFX7-ALIGNED-LABEL: private_store_2xi16_align2: +; GFX7-ALIGNED: ; %bb.0: +; GFX7-ALIGNED-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX7-ALIGNED-NEXT: v_mov_b32_e32 v3, 1 +; GFX7-ALIGNED-NEXT: v_mov_b32_e32 v0, 2 +; GFX7-ALIGNED-NEXT: v_add_i32_e32 v2, vcc, 2, v1 +; GFX7-ALIGNED-NEXT: buffer_store_short v3, v1, s[0:3], s33 offen +; GFX7-ALIGNED-NEXT: buffer_store_short v0, v2, s[0:3], s33 offen +; GFX7-ALIGNED-NEXT: s_waitcnt vmcnt(0) +; GFX7-ALIGNED-NEXT: s_setpc_b64 s[30:31] +; +; GFX7-UNALIGNED-LABEL: private_store_2xi16_align2: +; GFX7-UNALIGNED: ; %bb.0: +; GFX7-UNALIGNED-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX7-UNALIGNED-NEXT: v_mov_b32_e32 v3, 1 +; GFX7-UNALIGNED-NEXT: v_mov_b32_e32 v0, 2 +; GFX7-UNALIGNED-NEXT: v_add_i32_e32 v2, vcc, 2, v1 +; GFX7-UNALIGNED-NEXT: buffer_store_short v3, v1, s[0:3], s33 offen +; GFX7-UNALIGNED-NEXT: buffer_store_short v0, v2, s[0:3], s33 offen +; GFX7-UNALIGNED-NEXT: s_waitcnt vmcnt(0) +; GFX7-UNALIGNED-NEXT: s_setpc_b64 s[30:31] +; +; GFX9-LABEL: private_store_2xi16_align2: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v0, 1 +; GFX9-NEXT: buffer_store_short v0, v1, s[0:3], s33 offen +; GFX9-NEXT: v_mov_b32_e32 v0, 2 +; GFX9-NEXT: buffer_store_short v0, v1, s[0:3], s33 offen offset:2 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: s_setpc_b64 s[30:31] + %gep.r = getelementptr i16, i16 addrspace(5)* %r, i64 1 + store i16 1, i16 addrspace(5)* %r, align 2 + store i16 2, i16 addrspace(5)* %gep.r, align 2 + ret void +} + +; Should produce align 1 dword when legal +define i32 @private_load_2xi16_align1(i16 addrspace(5)* %p) #0 { +; GFX7-ALIGNED-LABEL: private_load_2xi16_align1: +; GFX7-ALIGNED: ; %bb.0: +; GFX7-ALIGNED-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX7-ALIGNED-NEXT: v_add_i32_e32 v1, vcc, 3, v0 +; GFX7-ALIGNED-NEXT: v_add_i32_e32 v2, vcc, 2, v0 +; GFX7-ALIGNED-NEXT: v_add_i32_e32 v3, vcc, 1, v0 +; GFX7-ALIGNED-NEXT: buffer_load_ubyte v1, v1, s[0:3], s33 offen +; GFX7-ALIGNED-NEXT: buffer_load_ubyte v3, v3, s[0:3], s33 offen +; GFX7-ALIGNED-NEXT: buffer_load_ubyte v2, v2, s[0:3], s33 offen +; GFX7-ALIGNED-NEXT: buffer_load_ubyte v0, v0, s[0:3], s33 offen +; GFX7-ALIGNED-NEXT: s_waitcnt vmcnt(3) +; GFX7-ALIGNED-NEXT: v_lshlrev_b32_e32 v1, 8, v1 +; GFX7-ALIGNED-NEXT: s_waitcnt vmcnt(2) +; GFX7-ALIGNED-NEXT: v_lshlrev_b32_e32 v3, 8, v3 +; GFX7-ALIGNED-NEXT: s_waitcnt vmcnt(1) +; GFX7-ALIGNED-NEXT: v_or_b32_e32 v1, v1, v2 +; GFX7-ALIGNED-NEXT: s_waitcnt vmcnt(0) +; GFX7-ALIGNED-NEXT: v_or_b32_e32 v0, v3, v0 +; GFX7-ALIGNED-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; GFX7-ALIGNED-NEXT: v_or_b32_e32 v0, v0, v1 +; GFX7-ALIGNED-NEXT: s_setpc_b64 s[30:31] +; +; GFX7-UNALIGNED-LABEL: private_load_2xi16_align1: +; GFX7-UNALIGNED: ; %bb.0: +; GFX7-UNALIGNED-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX7-UNALIGNED-NEXT: buffer_load_dword v0, v0, s[0:3], s33 offen +; GFX7-UNALIGNED-NEXT: s_waitcnt vmcnt(0) +; GFX7-UNALIGNED-NEXT: s_setpc_b64 s[30:31] +; +; GFX9-LABEL: private_load_2xi16_align1: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: buffer_load_dword v0, v0, s[0:3], s33 offen +; GFX9-NEXT: v_mov_b32_e32 v1, 0xffff +; GFX9-NEXT: s_mov_b32 s4, 0xffff +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_bfi_b32 v1, v1, 0, v0 +; GFX9-NEXT: v_and_or_b32 v0, v0, s4, v1 +; GFX9-NEXT: s_setpc_b64 s[30:31] + %gep.p = getelementptr i16, i16 addrspace(5)* %p, i64 1 + %p.0 = load i16, i16 addrspace(5)* %p, align 1 + %p.1 = load i16, i16 addrspace(5)* %gep.p, align 1 + %zext.0 = zext i16 %p.0 to i32 + %zext.1 = zext i16 %p.1 to i32 + %shl.1 = shl i32 %zext.1, 16 + %or = or i32 %zext.0, %shl.1 + ret i32 %or +} + +; Should produce align 1 dword when legal +define void @private_store_2xi16_align1(i16 addrspace(5)* %p, i16 addrspace(5)* %r) #0 { +; GFX7-ALIGNED-LABEL: private_store_2xi16_align1: +; GFX7-ALIGNED: ; %bb.0: +; GFX7-ALIGNED-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX7-ALIGNED-NEXT: v_mov_b32_e32 v3, 1 +; GFX7-ALIGNED-NEXT: buffer_store_byte v3, v1, s[0:3], s33 offen +; GFX7-ALIGNED-NEXT: v_add_i32_e32 v2, vcc, 2, v1 +; GFX7-ALIGNED-NEXT: v_add_i32_e32 v3, vcc, 1, v1 +; GFX7-ALIGNED-NEXT: v_mov_b32_e32 v4, 0 +; GFX7-ALIGNED-NEXT: v_add_i32_e32 v1, vcc, 3, v1 +; GFX7-ALIGNED-NEXT: v_mov_b32_e32 v0, 2 +; GFX7-ALIGNED-NEXT: buffer_store_byte v4, v3, s[0:3], s33 offen +; GFX7-ALIGNED-NEXT: buffer_store_byte v4, v1, s[0:3], s33 offen +; GFX7-ALIGNED-NEXT: buffer_store_byte v0, v2, s[0:3], s33 offen +; GFX7-ALIGNED-NEXT: s_waitcnt vmcnt(0) +; GFX7-ALIGNED-NEXT: s_setpc_b64 s[30:31] +; +; GFX7-UNALIGNED-LABEL: private_store_2xi16_align1: +; GFX7-UNALIGNED: ; %bb.0: +; GFX7-UNALIGNED-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX7-UNALIGNED-NEXT: v_mov_b32_e32 v0, 0x20001 +; GFX7-UNALIGNED-NEXT: buffer_store_dword v0, v1, s[0:3], s33 offen +; GFX7-UNALIGNED-NEXT: s_waitcnt vmcnt(0) +; GFX7-UNALIGNED-NEXT: s_setpc_b64 s[30:31] +; +; GFX9-LABEL: private_store_2xi16_align1: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v0, 0x20001 +; GFX9-NEXT: buffer_store_dword v0, v1, s[0:3], s33 offen +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: s_setpc_b64 s[30:31] + %gep.r = getelementptr i16, i16 addrspace(5)* %r, i64 1 + store i16 1, i16 addrspace(5)* %r, align 1 + store i16 2, i16 addrspace(5)* %gep.r, align 1 + ret void +} + +; Should merge this to a dword load +define i32 @private_load_2xi16_align4(i16 addrspace(5)* %p) #0 { +; GFX7-LABEL: load_2xi16_align4: +; GFX7: ; %bb.0: +; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX7-NEXT: flat_load_dword v0, v[0:1] +; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX7-NEXT: s_setpc_b64 s[30:31] +; +; GFX7-ALIGNED-LABEL: private_load_2xi16_align4: +; GFX7-ALIGNED: ; %bb.0: +; GFX7-ALIGNED-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX7-ALIGNED-NEXT: buffer_load_dword v0, v0, s[0:3], s33 offen +; GFX7-ALIGNED-NEXT: s_waitcnt vmcnt(0) +; GFX7-ALIGNED-NEXT: s_setpc_b64 s[30:31] +; +; GFX7-UNALIGNED-LABEL: private_load_2xi16_align4: +; GFX7-UNALIGNED: ; %bb.0: +; GFX7-UNALIGNED-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX7-UNALIGNED-NEXT: buffer_load_dword v0, v0, s[0:3], s33 offen +; GFX7-UNALIGNED-NEXT: s_waitcnt vmcnt(0) +; GFX7-UNALIGNED-NEXT: s_setpc_b64 s[30:31] +; +; GFX9-LABEL: private_load_2xi16_align4: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: buffer_load_dword v0, v0, s[0:3], s33 offen +; GFX9-NEXT: v_mov_b32_e32 v1, 0xffff +; GFX9-NEXT: s_mov_b32 s4, 0xffff +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_bfi_b32 v1, v1, 0, v0 +; GFX9-NEXT: v_and_or_b32 v0, v0, s4, v1 +; GFX9-NEXT: s_setpc_b64 s[30:31] + %gep.p = getelementptr i16, i16 addrspace(5)* %p, i64 1 + %p.0 = load i16, i16 addrspace(5)* %p, align 4 + %p.1 = load i16, i16 addrspace(5)* %gep.p, align 2 + %zext.0 = zext i16 %p.0 to i32 + %zext.1 = zext i16 %p.1 to i32 + %shl.1 = shl i32 %zext.1, 16 + %or = or i32 %zext.0, %shl.1 + ret i32 %or +} + +; Should merge this to a dword store +define void @private_store_2xi16_align4(i16 addrspace(5)* %p, i16 addrspace(5)* %r) #0 { +; GFX7-LABEL: private_store_2xi16_align4: +; GFX7: ; %bb.0: +; GFX7-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x2 +; GFX7-NEXT: v_mov_b32_e32 v2, 0x20001 +; GFX7-NEXT: s_waitcnt lgkmcnt(0) +; GFX7-NEXT: v_mov_b32_e32 v0, s0 +; GFX7-NEXT: v_mov_b32_e32 v1, s1 +; GFX7-NEXT: flat_store_dword v[0:1], v2 +; GFX7-NEXT: s_endpgm +; +; GCN-LABEL: private_store_2xi16_align4: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_mov_b32_e32 v0, 0x20001 +; GCN-NEXT: buffer_store_dword v0, v1, s[0:3], s33 offen +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: s_setpc_b64 s[30:31] + %gep.r = getelementptr i16, i16 addrspace(5)* %r, i64 1 + store i16 1, i16 addrspace(5)* %r, align 4 + store i16 2, i16 addrspace(5)* %gep.r, align 2 + ret void +} diff --git a/test/CodeGen/AMDGPU/unaligned-load-store.ll b/test/CodeGen/AMDGPU/unaligned-load-store.ll index 9bcf35e13a1..020f677ee3c 100644 --- a/test/CodeGen/AMDGPU/unaligned-load-store.ll +++ b/test/CodeGen/AMDGPU/unaligned-load-store.ll @@ -665,4 +665,25 @@ define void @private_store_align2_f64(double addrspace(5)* %out, double %x) #0 { ret void } +; Should not merge this to a dword store +define amdgpu_kernel void @global_store_2xi16_align2(i16 addrspace(1)* %p, i16 addrspace(1)* %r) #0 { + %gep.r = getelementptr i16, i16 addrspace(1)* %r, i64 1 + %v = load i16, i16 addrspace(1)* %p, align 2 + store i16 1, i16 addrspace(1)* %r, align 2 + store i16 2, i16 addrspace(1)* %gep.r, align 2 + ret void +} + +; Should not merge this to a word load +define i32 @load_2xi16_align2(i16 addrspace(1)* %p) #0 { + %gep.p = getelementptr i16, i16 addrspace(1)* %p, i64 1 + %p.0 = load i16, i16 addrspace(1)* %p, align 2 + %p.1 = load i16, i16 addrspace(1)* %gep.p, align 2 + %zext.0 = zext i16 %p.0 to i32 + %zext.1 = zext i16 %p.1 to i32 + %shl.1 = shl i32 %zext.1, 16 + %or = or i32 %zext.0, %shl.1 + ret i32 %or +} + attributes #0 = { nounwind } diff --git a/test/Transforms/LoadStoreVectorizer/AMDGPU/adjust-alloca-alignment.ll b/test/Transforms/LoadStoreVectorizer/AMDGPU/adjust-alloca-alignment.ll index b0dd5d185c7..9f85fec33ba 100644 --- a/test/Transforms/LoadStoreVectorizer/AMDGPU/adjust-alloca-alignment.ll +++ b/test/Transforms/LoadStoreVectorizer/AMDGPU/adjust-alloca-alignment.ll @@ -207,4 +207,55 @@ define amdgpu_kernel void @merge_private_load_4_vector_elts_loads_v4i8() { ret void } +; Make sure we don't think the alignment will increase if the base address isn't an alloca +; ALL-LABEL: @private_store_2xi16_align2_not_alloca( +; ALL: store i16 +; ALL: store i16 +define void @private_store_2xi16_align2_not_alloca(i16 addrspace(5)* %p, i16 addrspace(5)* %r) #0 { + %gep.r = getelementptr i16, i16 addrspace(5)* %r, i32 1 + store i16 1, i16 addrspace(5)* %r, align 2 + store i16 2, i16 addrspace(5)* %gep.r, align 2 + ret void +} + +; ALL-LABEL: @private_store_2xi16_align1_not_alloca( +; ALIGNED: store i16 +; ALIGNED: store i16 +; UNALIGNED: store <2 x i16> +define void @private_store_2xi16_align1_not_alloca(i16 addrspace(5)* %p, i16 addrspace(5)* %r) #0 { + %gep.r = getelementptr i16, i16 addrspace(5)* %r, i32 1 + store i16 1, i16 addrspace(5)* %r, align 1 + store i16 2, i16 addrspace(5)* %gep.r, align 1 + ret void +} + +; ALL-LABEL: @private_load_2xi16_align2_not_alloca( +; ALL: load i16 +; ALL: load i16 +define i32 @private_load_2xi16_align2_not_alloca(i16 addrspace(5)* %p) #0 { + %gep.p = getelementptr i16, i16 addrspace(5)* %p, i64 1 + %p.0 = load i16, i16 addrspace(5)* %p, align 2 + %p.1 = load i16, i16 addrspace(5)* %gep.p, align 2 + %zext.0 = zext i16 %p.0 to i32 + %zext.1 = zext i16 %p.1 to i32 + %shl.1 = shl i32 %zext.1, 16 + %or = or i32 %zext.0, %shl.1 + ret i32 %or +} + +; ALL-LABEL: @private_load_2xi16_align1_not_alloca( +; ALIGNED: load i16 +; ALIGNED: load i16 +; UNALIGNED: load <2 x i16> +define i32 @private_load_2xi16_align1_not_alloca(i16 addrspace(5)* %p) #0 { + %gep.p = getelementptr i16, i16 addrspace(5)* %p, i64 1 + %p.0 = load i16, i16 addrspace(5)* %p, align 1 + %p.1 = load i16, i16 addrspace(5)* %gep.p, align 1 + %zext.0 = zext i16 %p.0 to i32 + %zext.1 = zext i16 %p.1 to i32 + %shl.1 = shl i32 %zext.1, 16 + %or = or i32 %zext.0, %shl.1 + ret i32 %or +} + attributes #0 = { nounwind } diff --git a/test/Transforms/LoadStoreVectorizer/AMDGPU/merge-stores-private.ll b/test/Transforms/LoadStoreVectorizer/AMDGPU/merge-stores-private.ll index 4292cbcec85..31a1c270bd0 100644 --- a/test/Transforms/LoadStoreVectorizer/AMDGPU/merge-stores-private.ll +++ b/test/Transforms/LoadStoreVectorizer/AMDGPU/merge-stores-private.ll @@ -57,20 +57,10 @@ define amdgpu_kernel void @merge_private_store_4_vector_elts_loads_v4i32_align1( } ; ALL-LABEL: @merge_private_store_4_vector_elts_loads_v4i32_align2( -; ALIGNED: store i32 9, i32 addrspace(5)* %out, align 2 -; ALIGNED: store i32 1, i32 addrspace(5)* %out.gep.1, align 2 -; ALIGNED: store i32 23, i32 addrspace(5)* %out.gep.2, align 2 -; ALIGNED: store i32 19, i32 addrspace(5)* %out.gep.3, align 2 - -; ELT16-UNALIGNED: store <4 x i32> , <4 x i32> addrspace(5)* %1, align 2 - -; ELT8-UNALIGNED: store <2 x i32> -; ELT8-UNALIGNED: store <2 x i32> - -; ELT4-UNALIGNED: store i32 -; ELT4-UNALIGNED: store i32 -; ELT4-UNALIGNED: store i32 -; ELT4-UNALIGNED: store i32 +; ALL: store i32 +; ALL: store i32 +; ALL: store i32 +; ALL: store i32 define amdgpu_kernel void @merge_private_store_4_vector_elts_loads_v4i32_align2(i32 addrspace(5)* %out) #0 { %out.gep.1 = getelementptr i32, i32 addrspace(5)* %out, i32 1 %out.gep.2 = getelementptr i32, i32 addrspace(5)* %out, i32 2 @@ -127,10 +117,8 @@ define amdgpu_kernel void @merge_private_store_4_vector_elts_loads_v2i16(i16 add } ; ALL-LABEL: @merge_private_store_4_vector_elts_loads_v2i16_align2( -; ALIGNED: store i16 -; ALIGNED: store i16 - -; UNALIGNED: store <2 x i16> , <2 x i16> addrspace(5)* %1, align 2 +; ALL: store i16 +; ALL: store i16 define amdgpu_kernel void @merge_private_store_4_vector_elts_loads_v2i16_align2(i16 addrspace(5)* %out) #0 { %out.gep.1 = getelementptr i16, i16 addrspace(5)* %out, i32 1 diff --git a/test/Transforms/LoadStoreVectorizer/AMDGPU/merge-stores.ll b/test/Transforms/LoadStoreVectorizer/AMDGPU/merge-stores.ll index 0d9a4184e71..8302ad9562f 100644 --- a/test/Transforms/LoadStoreVectorizer/AMDGPU/merge-stores.ll +++ b/test/Transforms/LoadStoreVectorizer/AMDGPU/merge-stores.ll @@ -49,7 +49,8 @@ define amdgpu_kernel void @merge_global_store_2_constants_0_i16(i16 addrspace(1) } ; CHECK-LABEL: @merge_global_store_2_constants_i16_natural_align -; CHECK: store <2 x i16> +; CHECK: store i16 +; CHECK: store i16 define amdgpu_kernel void @merge_global_store_2_constants_i16_natural_align(i16 addrspace(1)* %out) #0 { %out.gep.1 = getelementptr i16, i16 addrspace(1)* %out, i32 1 @@ -58,8 +59,19 @@ define amdgpu_kernel void @merge_global_store_2_constants_i16_natural_align(i16 ret void } +; CHECK-LABEL: @merge_global_store_2_constants_i16_align_1 +; CHECK: store <2 x i16> +define amdgpu_kernel void @merge_global_store_2_constants_i16_align_1(i16 addrspace(1)* %out) #0 { + %out.gep.1 = getelementptr i16, i16 addrspace(1)* %out, i32 1 + + store i16 123, i16 addrspace(1)* %out.gep.1, align 1 + store i16 456, i16 addrspace(1)* %out, align 1 + ret void +} + ; CHECK-LABEL: @merge_global_store_2_constants_half_natural_align -; CHECK: store <2 x half> +; CHECK: store half +; CHECK: store half define amdgpu_kernel void @merge_global_store_2_constants_half_natural_align(half addrspace(1)* %out) #0 { %out.gep.1 = getelementptr half, half addrspace(1)* %out, i32 1 @@ -68,6 +80,16 @@ define amdgpu_kernel void @merge_global_store_2_constants_half_natural_align(hal ret void } +; CHECK-LABEL: @merge_global_store_2_constants_half_align_1 +; CHECK: store <2 x half> +define amdgpu_kernel void @merge_global_store_2_constants_half_align_1(half addrspace(1)* %out) #0 { + %out.gep.1 = getelementptr half, half addrspace(1)* %out, i32 1 + + store half 2.0, half addrspace(1)* %out.gep.1, align 1 + store half 1.0, half addrspace(1)* %out, align 1 + ret void +} + ; CHECK-LABEL: @merge_global_store_2_constants_i32 ; CHECK: store <2 x i32> , <2 x i32> addrspace(1)* %{{[0-9]+}}, align 4 define amdgpu_kernel void @merge_global_store_2_constants_i32(i32 addrspace(1)* %out) #0 {