mirror of
https://github.com/RPCSX/llvm.git
synced 2024-12-22 20:18:38 +00:00
GlobalISel: support translating volatile loads and stores.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@284603 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
57ea7480ce
commit
dc8499fbc7
@ -168,31 +168,36 @@ bool IRTranslator::translateBr(const User &U) {
|
||||
bool IRTranslator::translateLoad(const User &U) {
|
||||
const LoadInst &LI = cast<LoadInst>(U);
|
||||
|
||||
if (!TPC->isGlobalISelAbortEnabled() && !LI.isSimple())
|
||||
if (!TPC->isGlobalISelAbortEnabled() && LI.isAtomic())
|
||||
return false;
|
||||
|
||||
assert(LI.isSimple() && "only simple loads are supported at the moment");
|
||||
assert(!LI.isAtomic() && "only non-atomic loads are supported at the moment");
|
||||
auto Flags = LI.isVolatile() ? MachineMemOperand::MOVolatile
|
||||
: MachineMemOperand::MONone;
|
||||
Flags |= MachineMemOperand::MOLoad;
|
||||
|
||||
MachineFunction &MF = MIRBuilder.getMF();
|
||||
unsigned Res = getOrCreateVReg(LI);
|
||||
unsigned Addr = getOrCreateVReg(*LI.getPointerOperand());
|
||||
LLT VTy{*LI.getType(), *DL}, PTy{*LI.getPointerOperand()->getType(), *DL};
|
||||
|
||||
MIRBuilder.buildLoad(
|
||||
Res, Addr,
|
||||
*MF.getMachineMemOperand(
|
||||
MachinePointerInfo(LI.getPointerOperand()), MachineMemOperand::MOLoad,
|
||||
DL->getTypeStoreSize(LI.getType()), getMemOpAlignment(LI)));
|
||||
*MF.getMachineMemOperand(MachinePointerInfo(LI.getPointerOperand()),
|
||||
Flags, DL->getTypeStoreSize(LI.getType()),
|
||||
getMemOpAlignment(LI)));
|
||||
return true;
|
||||
}
|
||||
|
||||
bool IRTranslator::translateStore(const User &U) {
|
||||
const StoreInst &SI = cast<StoreInst>(U);
|
||||
|
||||
if (!TPC->isGlobalISelAbortEnabled() && !SI.isSimple())
|
||||
if (!TPC->isGlobalISelAbortEnabled() && SI.isAtomic())
|
||||
return false;
|
||||
|
||||
assert(SI.isSimple() && "only simple loads are supported at the moment");
|
||||
assert(!SI.isAtomic() && "only non-atomic stores supported at the moment");
|
||||
auto Flags = SI.isVolatile() ? MachineMemOperand::MOVolatile
|
||||
: MachineMemOperand::MONone;
|
||||
Flags |= MachineMemOperand::MOStore;
|
||||
|
||||
MachineFunction &MF = MIRBuilder.getMF();
|
||||
unsigned Val = getOrCreateVReg(*SI.getValueOperand());
|
||||
@ -201,12 +206,10 @@ bool IRTranslator::translateStore(const User &U) {
|
||||
PTy{*SI.getPointerOperand()->getType(), *DL};
|
||||
|
||||
MIRBuilder.buildStore(
|
||||
Val, Addr,
|
||||
*MF.getMachineMemOperand(
|
||||
MachinePointerInfo(SI.getPointerOperand()),
|
||||
MachineMemOperand::MOStore,
|
||||
DL->getTypeStoreSize(SI.getValueOperand()->getType()),
|
||||
getMemOpAlignment(SI)));
|
||||
Val, Addr, *MF.getMachineMemOperand(
|
||||
MachinePointerInfo(SI.getPointerOperand()), Flags,
|
||||
DL->getTypeStoreSize(SI.getValueOperand()->getType()),
|
||||
getMemOpAlignment(SI)));
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -271,14 +271,20 @@ define void @trunc(i64 %a) {
|
||||
; CHECK: [[ADDR42:%[0-9]+]](p42) = COPY %x1
|
||||
; CHECK: [[VAL1:%[0-9]+]](s64) = G_LOAD [[ADDR]](p0) :: (load 8 from %ir.addr, align 16)
|
||||
; CHECK: [[VAL2:%[0-9]+]](s64) = G_LOAD [[ADDR42]](p42) :: (load 8 from %ir.addr42)
|
||||
; CHECK: [[SUM:%.*]](s64) = G_ADD [[VAL1]], [[VAL2]]
|
||||
; CHECK: %x0 = COPY [[SUM]]
|
||||
; CHECK: [[SUM2:%.*]](s64) = G_ADD [[VAL1]], [[VAL2]]
|
||||
; CHECK: [[VAL3:%[0-9]+]](s64) = G_LOAD [[ADDR]](p0) :: (volatile load 8 from %ir.addr)
|
||||
; CHECK: [[SUM3:%[0-9]+]](s64) = G_ADD [[SUM2]], [[VAL3]]
|
||||
; CHECK: %x0 = COPY [[SUM3]]
|
||||
; CHECK: RET_ReallyLR implicit %x0
|
||||
define i64 @load(i64* %addr, i64 addrspace(42)* %addr42) {
|
||||
%val1 = load i64, i64* %addr, align 16
|
||||
|
||||
%val2 = load i64, i64 addrspace(42)* %addr42
|
||||
%sum = add i64 %val1, %val2
|
||||
ret i64 %sum
|
||||
%sum2 = add i64 %val1, %val2
|
||||
|
||||
%val3 = load volatile i64, i64* %addr
|
||||
%sum3 = add i64 %sum2, %val3
|
||||
ret i64 %sum3
|
||||
}
|
||||
|
||||
; CHECK-LABEL: name: store
|
||||
@ -288,10 +294,12 @@ define i64 @load(i64* %addr, i64 addrspace(42)* %addr42) {
|
||||
; CHECK: [[VAL2:%[0-9]+]](s64) = COPY %x3
|
||||
; CHECK: G_STORE [[VAL1]](s64), [[ADDR]](p0) :: (store 8 into %ir.addr, align 16)
|
||||
; CHECK: G_STORE [[VAL2]](s64), [[ADDR42]](p42) :: (store 8 into %ir.addr42)
|
||||
; CHECK: G_STORE [[VAL1]](s64), [[ADDR]](p0) :: (volatile store 8 into %ir.addr)
|
||||
; CHECK: RET_ReallyLR
|
||||
define void @store(i64* %addr, i64 addrspace(42)* %addr42, i64 %val1, i64 %val2) {
|
||||
store i64 %val1, i64* %addr, align 16
|
||||
store i64 %val2, i64 addrspace(42)* %addr42
|
||||
store volatile i64 %val1, i64* %addr
|
||||
%sum = add i64 %val1, %val2
|
||||
ret void
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user