mirror of
https://github.com/capstone-engine/llvm-capstone.git
synced 2025-04-01 12:43:47 +00:00
GlobalISel: Don't reduce elements for atomic load/store
This is invalid for the same reason as in the narrowScalar handling for load. llvm-svn: 352334
This commit is contained in:
parent
ebe6b43aec
commit
cfca2a7adf
@ -1445,6 +1445,14 @@ LegalizerHelper::fewerElementsVectorLoadStore(MachineInstr &MI, unsigned TypeIdx
|
||||
if (TypeIdx != 0)
|
||||
return UnableToLegalize;
|
||||
|
||||
MachineMemOperand *MMO = *MI.memoperands_begin();
|
||||
|
||||
// This implementation doesn't work for atomics. Give up instead of doing
|
||||
// something invalid.
|
||||
if (MMO->getOrdering() != AtomicOrdering::NotAtomic ||
|
||||
MMO->getFailureOrdering() != AtomicOrdering::NotAtomic)
|
||||
return UnableToLegalize;
|
||||
|
||||
bool IsLoad = MI.getOpcode() == TargetOpcode::G_LOAD;
|
||||
unsigned ValReg = MI.getOperand(0).getReg();
|
||||
unsigned AddrReg = MI.getOperand(1).getReg();
|
||||
@ -1459,7 +1467,7 @@ LegalizerHelper::fewerElementsVectorLoadStore(MachineInstr &MI, unsigned TypeIdx
|
||||
const LLT OffsetTy =
|
||||
LLT::scalar(MRI.getType(AddrReg).getScalarSizeInBits());
|
||||
MachineFunction &MF = *MI.getMF();
|
||||
MachineMemOperand *MMO = *MI.memoperands_begin();
|
||||
|
||||
for (unsigned Idx = 0; Idx < NumParts; ++Idx) {
|
||||
unsigned Adjustment = Idx * NarrowTy.getSizeInBits() / 8;
|
||||
unsigned Alignment = MinAlign(MMO->getAlignment(), Adjustment);
|
||||
|
46
llvm/test/CodeGen/AMDGPU/GlobalISel/no-legalize-atomic.mir
Normal file
46
llvm/test/CodeGen/AMDGPU/GlobalISel/no-legalize-atomic.mir
Normal file
@ -0,0 +1,46 @@
|
||||
# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=tahiti -O0 -run-pass=legalizer -global-isel-abort=0 -o - %s | FileCheck %s
|
||||
|
||||
# CHECK: %1:_(<8 x s32>) = G_LOAD %0(p1) :: (load monotonic 32, addrspace 1)
|
||||
# CHECK: G_STORE %1(<8 x s32>), %0(p1) :: (store monotonic 32, addrspace 1)
|
||||
# CHECK: %1:_(s256) = G_LOAD %0(p1) :: (load monotonic 32, addrspace 1)
|
||||
# CHECK: G_STORE %1(s256), %0(p1) :: (store monotonic 32, addrspace 1)
|
||||
|
||||
---
|
||||
name: test_atomic_load_global_v8s32
|
||||
body: |
|
||||
bb.0:
|
||||
liveins: $vgpr0_vgpr1
|
||||
%0:_(p1) = COPY $vgpr0_vgpr1
|
||||
%1:_(<8 x s32>) = G_LOAD %0 :: (load monotonic 32, addrspace 1, align 32)
|
||||
$vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY %1
|
||||
...
|
||||
|
||||
---
|
||||
name: test_atomic_store_global_v8s32
|
||||
body: |
|
||||
bb.0:
|
||||
liveins: $vgpr0_vgpr1, $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9
|
||||
%0:_(p1) = COPY $vgpr0_vgpr1
|
||||
%1:_(<8 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9
|
||||
G_STORE %1, %0 :: (store monotonic 32, addrspace 1, align 32)
|
||||
...
|
||||
|
||||
---
|
||||
name: test_atomic_load_global_s256
|
||||
body: |
|
||||
bb.0:
|
||||
liveins: $vgpr0_vgpr1
|
||||
%0:_(p1) = COPY $vgpr0_vgpr1
|
||||
%1:_(s256) = G_LOAD %0 :: (load monotonic 32, addrspace 1, align 32)
|
||||
$vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY %1
|
||||
...
|
||||
|
||||
---
|
||||
name: test_atomic_store_global_s256
|
||||
body: |
|
||||
bb.0:
|
||||
liveins: $vgpr0_vgpr1, $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9
|
||||
%0:_(p1) = COPY $vgpr0_vgpr1
|
||||
%1:_(s256) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9
|
||||
G_STORE %1, %0 :: (store monotonic 32, addrspace 1, align 32)
|
||||
...
|
Loading…
x
Reference in New Issue
Block a user