mirror of
https://github.com/RPCS3/llvm.git
synced 2024-11-27 21:50:29 +00:00
afa2e7e6a6
Summary: This is a resurrection of work first proposed and discussed in Aug 2015: http://lists.llvm.org/pipermail/llvm-dev/2015-August/089384.html and initially landed (but then backed out) in Nov 2015: http://lists.llvm.org/pipermail/llvm-commits/Week-of-Mon-20151109/312083.html The @llvm.memcpy/memmove/memset intrinsics currently have an explicit argument which is required to be a constant integer. It represents the alignment of the dest (and source), and so must be the minimum of the actual alignment of the two. This change is the first in a series that allows source and dest to each have their own alignments by using the alignment attribute on their arguments. In this change we: 1) Remove the alignment argument. 2) Add alignment attributes to the source & dest arguments. We, temporarily, require that the alignments for source & dest be equal. For example, code which used to read: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* %src, i32 100, i32 4, i1 false) will now read call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %dest, i8* align 4 %src, i32 100, i1 false) Downstream users may have to update their lit tests that check for @llvm.memcpy/memmove/memset call/declaration patterns. The following extended sed script may help with updating the majority of your tests, but it does not catch all possible patterns so some manual checking and updating will be required. s~declare void @llvm\.mem(set|cpy|move)\.p([^(]*)\((.*), i32, i1\)~declare void @llvm.mem\1.p\2(\3, i1)~g s~call void @llvm\.memset\.p([^(]*)i8\(i8([^*]*)\* (.*), i8 (.*), i8 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i8(i8\2* \3, i8 \4, i8 \5, i1 \6)~g s~call void @llvm\.memset\.p([^(]*)i16\(i8([^*]*)\* (.*), i8 (.*), i16 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i16(i8\2* \3, i8 \4, i16 \5, i1 \6)~g s~call void @llvm\.memset\.p([^(]*)i32\(i8([^*]*)\* (.*), i8 (.*), i32 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i32(i8\2* \3, i8 \4, i32 \5, i1 \6)~g s~call void @llvm\.memset\.p([^(]*)i64\(i8([^*]*)\* (.*), i8 (.*), i64 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i64(i8\2* \3, i8 \4, i64 \5, i1 \6)~g s~call void @llvm\.memset\.p([^(]*)i128\(i8([^*]*)\* (.*), i8 (.*), i128 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i128(i8\2* \3, i8 \4, i128 \5, i1 \6)~g s~call void @llvm\.memset\.p([^(]*)i8\(i8([^*]*)\* (.*), i8 (.*), i8 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i8(i8\2* align \6 \3, i8 \4, i8 \5, i1 \7)~g s~call void @llvm\.memset\.p([^(]*)i16\(i8([^*]*)\* (.*), i8 (.*), i16 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i16(i8\2* align \6 \3, i8 \4, i16 \5, i1 \7)~g s~call void @llvm\.memset\.p([^(]*)i32\(i8([^*]*)\* (.*), i8 (.*), i32 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i32(i8\2* align \6 \3, i8 \4, i32 \5, i1 \7)~g s~call void @llvm\.memset\.p([^(]*)i64\(i8([^*]*)\* (.*), i8 (.*), i64 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i64(i8\2* align \6 \3, i8 \4, i64 \5, i1 \7)~g s~call void @llvm\.memset\.p([^(]*)i128\(i8([^*]*)\* (.*), i8 (.*), i128 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i128(i8\2* align \6 \3, i8 \4, i128 \5, i1 \7)~g s~call void @llvm\.mem(cpy|move)\.p([^(]*)i8\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i8 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i8(i8\3* \4, i8\5* \6, i8 \7, i1 \8)~g s~call void @llvm\.mem(cpy|move)\.p([^(]*)i16\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i16 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i16(i8\3* \4, i8\5* \6, i16 \7, i1 \8)~g s~call void @llvm\.mem(cpy|move)\.p([^(]*)i32\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i32 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i32(i8\3* \4, i8\5* \6, i32 \7, i1 \8)~g s~call void @llvm\.mem(cpy|move)\.p([^(]*)i64\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i64 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i64(i8\3* \4, i8\5* \6, i64 \7, i1 \8)~g s~call void @llvm\.mem(cpy|move)\.p([^(]*)i128\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i128 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i128(i8\3* \4, i8\5* \6, i128 \7, i1 \8)~g s~call void @llvm\.mem(cpy|move)\.p([^(]*)i8\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i8 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i8(i8\3* align \8 \4, i8\5* align \8 \6, i8 \7, i1 \9)~g s~call void @llvm\.mem(cpy|move)\.p([^(]*)i16\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i16 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i16(i8\3* align \8 \4, i8\5* align \8 \6, i16 \7, i1 \9)~g s~call void @llvm\.mem(cpy|move)\.p([^(]*)i32\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i32 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i32(i8\3* align \8 \4, i8\5* align \8 \6, i32 \7, i1 \9)~g s~call void @llvm\.mem(cpy|move)\.p([^(]*)i64\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i64 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i64(i8\3* align \8 \4, i8\5* align \8 \6, i64 \7, i1 \9)~g s~call void @llvm\.mem(cpy|move)\.p([^(]*)i128\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i128 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i128(i8\3* align \8 \4, i8\5* align \8 \6, i128 \7, i1 \9)~g The remaining changes in the series will: Step 2) Expand the IRBuilder API to allow creation of memcpy/memmove with differing source and dest alignments. Step 3) Update Clang to use the new IRBuilder API. Step 4) Update Polly to use the new IRBuilder API. Step 5) Update LLVM passes that create memcpy/memmove calls to use the new IRBuilder API, and those that use use MemIntrinsicInst::[get|set]Alignment() to use getDestAlignment() and getSourceAlignment() instead. Step 6) Remove the single-alignment IRBuilder API for memcpy/memmove, and the MemIntrinsicInst::[get|set]Alignment() methods. Reviewers: pete, hfinkel, lhames, reames, bollu Reviewed By: reames Subscribers: niosHD, reames, jholewinski, qcolombet, jfb, sanjoy, arsenm, dschuff, dylanmckay, mehdi_amini, sdardis, nemanjai, david2050, nhaehnle, javed.absar, sbc100, jgravelle-google, eraman, aheejin, kbarton, JDevlieghere, asb, rbar, johnrusso, simoncook, jordy.potman.lists, apazos, sabuasal, llvm-commits Differential Revision: https://reviews.llvm.org/D41675 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@322965 91177308-0d34-0410-b5e6-96231b3b80d8
140 lines
5.9 KiB
LLVM
140 lines
5.9 KiB
LLVM
; RUN: llc -mtriple x86_64-apple-macosx -mcpu=corei7-avx -combiner-stress-load-slicing < %s -o - | FileCheck %s --check-prefix=STRESS
|
|
; RUN: llc -mtriple x86_64-apple-macosx -mcpu=corei7-avx < %s -o - | FileCheck %s --check-prefix=REGULAR
|
|
;
|
|
; <rdar://problem/14477220>
|
|
|
|
%class.Complex = type { float, float }
|
|
|
|
|
|
; Check that independent slices leads to independent loads then the slices leads to
|
|
; different register file.
|
|
;
|
|
; The layout is:
|
|
; LSB 0 1 2 3 | 4 5 6 7 MSB
|
|
; Low High
|
|
; The base address points to 0 and is 8-bytes aligned.
|
|
; Low slice starts at 0 (base) and is 8-bytes aligned.
|
|
; High slice starts at 4 (base + 4-bytes) and is 4-bytes aligned.
|
|
;
|
|
; STRESS-LABEL: t1:
|
|
; Load out[out_start + 8].real, this is base + 8 * 8 + 0.
|
|
; STRESS: vmovss 64([[BASE:[^(]+]]), [[OUT_Real:%xmm[0-9]+]]
|
|
; Load out[out_start + 8].imm, this is base + 8 * 8 + 4.
|
|
; STRESS-NEXT: vmovss 68([[BASE]]), [[OUT_Imm:%xmm[0-9]+]]
|
|
; Add low slice: out[out_start].real, this is base + 0.
|
|
; STRESS-NEXT: vaddss ([[BASE]]), [[OUT_Real]], [[RES_Real:%xmm[0-9]+]]
|
|
; Add high slice: out[out_start].imm, this is base + 4.
|
|
; STRESS-NEXT: vaddss 4([[BASE]]), [[OUT_Imm]], [[RES_Imm:%xmm[0-9]+]]
|
|
; Swap Imm and Real.
|
|
; STRESS-NEXT: vinsertps $16, [[RES_Imm]], [[RES_Real]], [[RES_Vec:%xmm[0-9]+]]
|
|
; Put the results back into out[out_start].
|
|
; STRESS-NEXT: vmovlps [[RES_Vec]], ([[BASE]])
|
|
;
|
|
; Same for REGULAR, we eliminate register bank copy with each slices.
|
|
; REGULAR-LABEL: t1:
|
|
; Load out[out_start + 8].real, this is base + 8 * 8 + 0.
|
|
; REGULAR: vmovss 64([[BASE:[^)]+]]), [[OUT_Real:%xmm[0-9]+]]
|
|
; Load out[out_start + 8].imm, this is base + 8 * 8 + 4.
|
|
; REGULAR-NEXT: vmovss 68([[BASE]]), [[OUT_Imm:%xmm[0-9]+]]
|
|
; Add low slice: out[out_start].real, this is base + 0.
|
|
; REGULAR-NEXT: vaddss ([[BASE]]), [[OUT_Real]], [[RES_Real:%xmm[0-9]+]]
|
|
; Add high slice: out[out_start].imm, this is base + 4.
|
|
; REGULAR-NEXT: vaddss 4([[BASE]]), [[OUT_Imm]], [[RES_Imm:%xmm[0-9]+]]
|
|
; Swap Imm and Real.
|
|
; REGULAR-NEXT: vinsertps $16, [[RES_Imm]], [[RES_Real]], [[RES_Vec:%xmm[0-9]+]]
|
|
; Put the results back into out[out_start].
|
|
; REGULAR-NEXT: vmovlps [[RES_Vec]], ([[BASE]])
|
|
define void @t1(%class.Complex* nocapture %out, i64 %out_start) {
|
|
entry:
|
|
%arrayidx = getelementptr inbounds %class.Complex, %class.Complex* %out, i64 %out_start
|
|
%tmp = bitcast %class.Complex* %arrayidx to i64*
|
|
%tmp1 = load i64, i64* %tmp, align 8
|
|
%t0.sroa.0.0.extract.trunc = trunc i64 %tmp1 to i32
|
|
%tmp2 = bitcast i32 %t0.sroa.0.0.extract.trunc to float
|
|
%t0.sroa.2.0.extract.shift = lshr i64 %tmp1, 32
|
|
%t0.sroa.2.0.extract.trunc = trunc i64 %t0.sroa.2.0.extract.shift to i32
|
|
%tmp3 = bitcast i32 %t0.sroa.2.0.extract.trunc to float
|
|
%add = add i64 %out_start, 8
|
|
%arrayidx2 = getelementptr inbounds %class.Complex, %class.Complex* %out, i64 %add
|
|
%i.i = getelementptr inbounds %class.Complex, %class.Complex* %arrayidx2, i64 0, i32 0
|
|
%tmp4 = load float, float* %i.i, align 4
|
|
%add.i = fadd float %tmp4, %tmp2
|
|
%retval.sroa.0.0.vec.insert.i = insertelement <2 x float> undef, float %add.i, i32 0
|
|
%r.i = getelementptr inbounds %class.Complex, %class.Complex* %arrayidx2, i64 0, i32 1
|
|
%tmp5 = load float, float* %r.i, align 4
|
|
%add5.i = fadd float %tmp5, %tmp3
|
|
%retval.sroa.0.4.vec.insert.i = insertelement <2 x float> %retval.sroa.0.0.vec.insert.i, float %add5.i, i32 1
|
|
%ref.tmp.sroa.0.0.cast = bitcast %class.Complex* %arrayidx to <2 x float>*
|
|
store <2 x float> %retval.sroa.0.4.vec.insert.i, <2 x float>* %ref.tmp.sroa.0.0.cast, align 4
|
|
ret void
|
|
}
|
|
|
|
; Function Attrs: nounwind
|
|
declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i1) #1
|
|
|
|
; Function Attrs: nounwind
|
|
declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture)
|
|
|
|
; Function Attrs: nounwind
|
|
declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture)
|
|
|
|
; Check that we do not read outside of the chunk of bits of the original loads.
|
|
;
|
|
; The 64-bits should have been split in one 32-bits and one 16-bits slices.
|
|
; The 16-bits should be zero extended to match the final type.
|
|
;
|
|
; The memory layout is:
|
|
; LSB 0 1 2 3 | 4 5 | 6 7 MSB
|
|
; Low High
|
|
; The base address points to 0 and is 8-bytes aligned.
|
|
; Low slice starts at 0 (base) and is 8-bytes aligned.
|
|
; High slice starts at 6 (base + 6-bytes) and is 2-bytes aligned.
|
|
;
|
|
; STRESS-LABEL: t2:
|
|
; STRESS: movzwl 6([[BASE:[^)]+]]), %eax
|
|
; STRESS-NEXT: addl ([[BASE]]), %eax
|
|
; STRESS-NEXT: ret
|
|
;
|
|
; For the REGULAR heuristic, this is not profitable to slice things that are not
|
|
; next to each other in memory. Here we have a hole with bytes #4-5.
|
|
; REGULAR-LABEL: t2:
|
|
; REGULAR: shrq $48
|
|
define i32 @t2(%class.Complex* nocapture %out, i64 %out_start) {
|
|
%arrayidx = getelementptr inbounds %class.Complex, %class.Complex* %out, i64 %out_start
|
|
%bitcast = bitcast %class.Complex* %arrayidx to i64*
|
|
%chunk64 = load i64, i64* %bitcast, align 8
|
|
%slice32_low = trunc i64 %chunk64 to i32
|
|
%shift48 = lshr i64 %chunk64, 48
|
|
%slice32_high = trunc i64 %shift48 to i32
|
|
%res = add i32 %slice32_high, %slice32_low
|
|
ret i32 %res
|
|
}
|
|
|
|
; Check that we do not optimize overlapping slices.
|
|
;
|
|
; The 64-bits should NOT have been split in as slices are overlapping.
|
|
; First slice uses bytes numbered 0 to 3.
|
|
; Second slice uses bytes numbered 6 and 7.
|
|
; Third slice uses bytes numbered 4 to 7.
|
|
;
|
|
; STRESS-LABEL: t3:
|
|
; STRESS: shrq $48
|
|
; STRESS: shrq $32
|
|
;
|
|
; REGULAR-LABEL: t3:
|
|
; REGULAR: shrq $48
|
|
; REGULAR: shrq $32
|
|
define i32 @t3(%class.Complex* nocapture %out, i64 %out_start) {
|
|
%arrayidx = getelementptr inbounds %class.Complex, %class.Complex* %out, i64 %out_start
|
|
%bitcast = bitcast %class.Complex* %arrayidx to i64*
|
|
%chunk64 = load i64, i64* %bitcast, align 8
|
|
%slice32_low = trunc i64 %chunk64 to i32
|
|
%shift48 = lshr i64 %chunk64, 48
|
|
%slice32_high = trunc i64 %shift48 to i32
|
|
%shift32 = lshr i64 %chunk64, 32
|
|
%slice32_lowhigh = trunc i64 %shift32 to i32
|
|
%tmpres = add i32 %slice32_high, %slice32_low
|
|
%res = add i32 %slice32_lowhigh, %tmpres
|
|
ret i32 %res
|
|
}
|