mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-12-01 07:10:37 +00:00
[Alignment] Fix misaligned interleaved loads
Summary: Tentatively fixing https://bugs.llvm.org/show_bug.cgi?id=45957 Reviewers: craig.topper, nlopes Subscribers: hiraditya, llvm-commits, RKSimon, jdoerfert, efriedma Tags: #llvm Differential Revision: https://reviews.llvm.org/D80276
This commit is contained in:
parent
8541a94229
commit
465e0b55ff
@ -150,7 +150,7 @@ bool X86InterleavedAccessGroup::isSupported() const {
|
||||
// We support shuffle represents stride 4 for byte type with size of
|
||||
// WideInstSize.
|
||||
if (ShuffleElemSize == 64 && WideInstSize == 1024 && Factor == 4)
|
||||
return true;
|
||||
return true;
|
||||
|
||||
if (ShuffleElemSize == 8 && isa<StoreInst>(Inst) && Factor == 4 &&
|
||||
(WideInstSize == 256 || WideInstSize == 512 || WideInstSize == 1024 ||
|
||||
@ -211,13 +211,20 @@ void X86InterleavedAccessGroup::decompose(
|
||||
VecBasePtr = Builder.CreateBitCast(LI->getPointerOperand(), VecBasePtrTy);
|
||||
}
|
||||
// Generate N loads of T type.
|
||||
assert(VecBaseTy->getPrimitiveSizeInBits().isByteSized() &&
|
||||
"VecBaseTy's size must be a multiple of 8");
|
||||
const Align FirstAlignment = LI->getAlign();
|
||||
const Align SubsequentAlignment = commonAlignment(
|
||||
FirstAlignment, VecBaseTy->getPrimitiveSizeInBits().getFixedSize() / 8);
|
||||
Align Alignment = FirstAlignment;
|
||||
for (unsigned i = 0; i < NumLoads; i++) {
|
||||
// TODO: Support inbounds GEP.
|
||||
Value *NewBasePtr =
|
||||
Builder.CreateGEP(VecBaseTy, VecBasePtr, Builder.getInt32(i));
|
||||
Instruction *NewLoad =
|
||||
Builder.CreateAlignedLoad(VecBaseTy, NewBasePtr, LI->getAlign());
|
||||
Builder.CreateAlignedLoad(VecBaseTy, NewBasePtr, Alignment);
|
||||
DecomposedVectors.push_back(NewLoad);
|
||||
Alignment = SubsequentAlignment;
|
||||
}
|
||||
}
|
||||
|
||||
@ -255,7 +262,7 @@ static void genShuffleBland(MVT VT, ArrayRef<int> Mask,
|
||||
SmallVectorImpl<int> &Out, int LowOffset,
|
||||
int HighOffset) {
|
||||
assert(VT.getSizeInBits() >= 256 &&
|
||||
"This function doesn't accept width smaller then 256");
|
||||
"This function doesn't accept width smaller then 256");
|
||||
unsigned NumOfElm = VT.getVectorNumElements();
|
||||
for (unsigned i = 0; i < Mask.size(); i++)
|
||||
Out.push_back(Mask[i] + LowOffset);
|
||||
@ -289,7 +296,7 @@ static void reorderSubVector(MVT VT, SmallVectorImpl<Value *> &TransposedMatrix,
|
||||
if (VecElems == 16) {
|
||||
for (unsigned i = 0; i < Stride; i++)
|
||||
TransposedMatrix[i] = Builder.CreateShuffleVector(
|
||||
Vec[i], UndefValue::get(Vec[i]->getType()), VPShuf);
|
||||
Vec[i], UndefValue::get(Vec[i]->getType()), VPShuf);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -298,20 +305,19 @@ static void reorderSubVector(MVT VT, SmallVectorImpl<Value *> &TransposedMatrix,
|
||||
|
||||
for (unsigned i = 0; i < (VecElems / 16) * Stride; i += 2) {
|
||||
genShuffleBland(VT, VPShuf, OptimizeShuf, (i / Stride) * 16,
|
||||
(i + 1) / Stride * 16);
|
||||
(i + 1) / Stride * 16);
|
||||
Temp[i / 2] = Builder.CreateShuffleVector(
|
||||
Vec[i % Stride], Vec[(i + 1) % Stride], OptimizeShuf);
|
||||
Vec[i % Stride], Vec[(i + 1) % Stride], OptimizeShuf);
|
||||
OptimizeShuf.clear();
|
||||
}
|
||||
|
||||
if (VecElems == 32) {
|
||||
std::copy(Temp, Temp + Stride, TransposedMatrix.begin());
|
||||
return;
|
||||
}
|
||||
else
|
||||
} else
|
||||
for (unsigned i = 0; i < Stride; i++)
|
||||
TransposedMatrix[i] =
|
||||
Builder.CreateShuffleVector(Temp[2 * i], Temp[2 * i + 1], Concat);
|
||||
Builder.CreateShuffleVector(Temp[2 * i], Temp[2 * i + 1], Concat);
|
||||
}
|
||||
|
||||
void X86InterleavedAccessGroup::interleave8bitStride4VF8(
|
||||
@ -682,7 +688,7 @@ void X86InterleavedAccessGroup::interleave8bitStride3(
|
||||
|
||||
unsigned NumOfElm = VT.getVectorNumElements();
|
||||
group2Shuffle(VT, GroupSize, VPShuf);
|
||||
reorderSubVector(VT, TransposedMatrix, Vec, VPShuf, NumOfElm,3, Builder);
|
||||
reorderSubVector(VT, TransposedMatrix, Vec, VPShuf, NumOfElm, 3, Builder);
|
||||
}
|
||||
|
||||
void X86InterleavedAccessGroup::transpose_4x4(
|
||||
|
@ -8,15 +8,15 @@ define <32 x i8> @interleaved_load_vf32_i8_stride3(<96 x i8>* %ptr){
|
||||
; AVX2-NEXT: [[TMP2:%.*]] = getelementptr <16 x i8>, <16 x i8>* [[TMP1]], i32 0
|
||||
; AVX2-NEXT: [[TMP3:%.*]] = load <16 x i8>, <16 x i8>* [[TMP2]], align 128
|
||||
; AVX2-NEXT: [[TMP4:%.*]] = getelementptr <16 x i8>, <16 x i8>* [[TMP1]], i32 1
|
||||
; AVX2-NEXT: [[TMP5:%.*]] = load <16 x i8>, <16 x i8>* [[TMP4]], align 128
|
||||
; AVX2-NEXT: [[TMP5:%.*]] = load <16 x i8>, <16 x i8>* [[TMP4]], align 16
|
||||
; AVX2-NEXT: [[TMP6:%.*]] = getelementptr <16 x i8>, <16 x i8>* [[TMP1]], i32 2
|
||||
; AVX2-NEXT: [[TMP7:%.*]] = load <16 x i8>, <16 x i8>* [[TMP6]], align 128
|
||||
; AVX2-NEXT: [[TMP7:%.*]] = load <16 x i8>, <16 x i8>* [[TMP6]], align 16
|
||||
; AVX2-NEXT: [[TMP8:%.*]] = getelementptr <16 x i8>, <16 x i8>* [[TMP1]], i32 3
|
||||
; AVX2-NEXT: [[TMP9:%.*]] = load <16 x i8>, <16 x i8>* [[TMP8]], align 128
|
||||
; AVX2-NEXT: [[TMP9:%.*]] = load <16 x i8>, <16 x i8>* [[TMP8]], align 16
|
||||
; AVX2-NEXT: [[TMP10:%.*]] = getelementptr <16 x i8>, <16 x i8>* [[TMP1]], i32 4
|
||||
; AVX2-NEXT: [[TMP11:%.*]] = load <16 x i8>, <16 x i8>* [[TMP10]], align 128
|
||||
; AVX2-NEXT: [[TMP11:%.*]] = load <16 x i8>, <16 x i8>* [[TMP10]], align 16
|
||||
; AVX2-NEXT: [[TMP12:%.*]] = getelementptr <16 x i8>, <16 x i8>* [[TMP1]], i32 5
|
||||
; AVX2-NEXT: [[TMP13:%.*]] = load <16 x i8>, <16 x i8>* [[TMP12]], align 128
|
||||
; AVX2-NEXT: [[TMP13:%.*]] = load <16 x i8>, <16 x i8>* [[TMP12]], align 16
|
||||
; AVX2-NEXT: [[TMP14:%.*]] = shufflevector <16 x i8> [[TMP3]], <16 x i8> [[TMP9]], <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
|
||||
; AVX2-NEXT: [[TMP15:%.*]] = shufflevector <16 x i8> [[TMP5]], <16 x i8> [[TMP11]], <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
|
||||
; AVX2-NEXT: [[TMP16:%.*]] = shufflevector <16 x i8> [[TMP7]], <16 x i8> [[TMP13]], <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
|
||||
@ -50,9 +50,9 @@ define <16 x i8> @interleaved_load_vf16_i8_stride3(<48 x i8>* %ptr){
|
||||
; AVX2-NEXT: [[TMP2:%.*]] = getelementptr <16 x i8>, <16 x i8>* [[TMP1]], i32 0
|
||||
; AVX2-NEXT: [[TMP3:%.*]] = load <16 x i8>, <16 x i8>* [[TMP2]], align 64
|
||||
; AVX2-NEXT: [[TMP4:%.*]] = getelementptr <16 x i8>, <16 x i8>* [[TMP1]], i32 1
|
||||
; AVX2-NEXT: [[TMP5:%.*]] = load <16 x i8>, <16 x i8>* [[TMP4]], align 64
|
||||
; AVX2-NEXT: [[TMP5:%.*]] = load <16 x i8>, <16 x i8>* [[TMP4]], align 16
|
||||
; AVX2-NEXT: [[TMP6:%.*]] = getelementptr <16 x i8>, <16 x i8>* [[TMP1]], i32 2
|
||||
; AVX2-NEXT: [[TMP7:%.*]] = load <16 x i8>, <16 x i8>* [[TMP6]], align 64
|
||||
; AVX2-NEXT: [[TMP7:%.*]] = load <16 x i8>, <16 x i8>* [[TMP6]], align 16
|
||||
; AVX2-NEXT: [[TMP8:%.*]] = shufflevector <16 x i8> [[TMP3]], <16 x i8> undef, <16 x i32> <i32 0, i32 3, i32 6, i32 9, i32 12, i32 15, i32 2, i32 5, i32 8, i32 11, i32 14, i32 1, i32 4, i32 7, i32 10, i32 13>
|
||||
; AVX2-NEXT: [[TMP9:%.*]] = shufflevector <16 x i8> [[TMP5]], <16 x i8> undef, <16 x i32> <i32 0, i32 3, i32 6, i32 9, i32 12, i32 15, i32 2, i32 5, i32 8, i32 11, i32 14, i32 1, i32 4, i32 7, i32 10, i32 13>
|
||||
; AVX2-NEXT: [[TMP10:%.*]] = shufflevector <16 x i8> [[TMP7]], <16 x i8> undef, <16 x i32> <i32 0, i32 3, i32 6, i32 9, i32 12, i32 15, i32 2, i32 5, i32 8, i32 11, i32 14, i32 1, i32 4, i32 7, i32 10, i32 13>
|
||||
|
Loading…
Reference in New Issue
Block a user