Revert r319691: [globalisel][tablegen] Split atomic load/store into separate opcode and enable for AArch64.

Some concerns were raised with the direction. Revert while we discuss it and look into an alternative

llvm-svn: 319739
This commit is contained in:
Daniel Sanders 2017-12-05 05:52:07 +00:00
parent 91eccfcbfe
commit f0a9960826
17 changed files with 99 additions and 742 deletions

View File

@ -277,7 +277,7 @@ bool InstructionSelector::executeMatchTable(
return false;
for (const auto &MMO : State.MIs[InsnID]->memoperands())
if (isAtLeastOrStrongerThan(MMO->getOrdering(), Ordering))
if (!isStrongerThan(Ordering, MMO->getOrdering()))
if (handleReject() == RejectAndGiveUp)
return false;
break;

View File

@ -571,30 +571,6 @@ public:
MachineInstrBuilder buildStore(unsigned Val, unsigned Addr,
MachineMemOperand &MMO);
/// Build and insert `Res<def> = G_ATOMIC_LOAD Addr, MMO`.
///
/// Loads the value stored at \p Addr. Puts the result in \p Res.
///
/// \pre setBasicBlock or setMI must have been called.
/// \pre \p Res must be a generic virtual register.
/// \pre \p Addr must be a generic virtual register with pointer type.
///
/// \return a MachineInstrBuilder for the newly created instruction.
MachineInstrBuilder buildAtomicLoad(unsigned Res, unsigned Addr,
MachineMemOperand &MMO);
/// Build and insert `G_ATOMIC_STORE Val, Addr, MMO`.
///
/// Stores the value \p Val to \p Addr.
///
/// \pre setBasicBlock or setMI must have been called.
/// \pre \p Val must be a generic virtual register.
/// \pre \p Addr must be a generic virtual register with pointer type.
///
/// \return a MachineInstrBuilder for the newly created instruction.
MachineInstrBuilder buildAtomicStore(unsigned Val, unsigned Addr,
MachineMemOperand &MMO);
/// Build and insert `Res0<def>, ... = G_EXTRACT Src, Idx0`.
///
/// \pre setBasicBlock or setMI must have been called.

View File

@ -265,12 +265,6 @@ HANDLE_TARGET_OPCODE(G_LOAD)
/// Generic store.
HANDLE_TARGET_OPCODE(G_STORE)
/// Generic atomic load
HANDLE_TARGET_OPCODE(G_ATOMIC_LOAD)
/// Generic atomic store
HANDLE_TARGET_OPCODE(G_ATOMIC_STORE)
/// Generic atomic cmpxchg with internal success check.
HANDLE_TARGET_OPCODE(G_ATOMIC_CMPXCHG_WITH_SUCCESS)

View File

@ -484,28 +484,6 @@ def G_STORE : GenericInstruction {
let mayStore = 1;
}
// Generic atomic load. Expects a MachineMemOperand in addition to explicit
// operands. Technically, we could have handled this as a G_LOAD, however we
// decided to keep it separate on the basis that atomic loads tend to have
// very different handling to non-atomic loads.
def G_ATOMIC_LOAD : GenericInstruction {
let OutOperandList = (outs type0:$dst);
let InOperandList = (ins ptype1:$addr);
let hasSideEffects = 0;
let mayLoad = 1;
}
// Generic atomic store. Expects a MachineMemOperand in addition to explicit
// operands. Technically, we could have handled this as a G_STORE, however we
// decided to keep it separate on the basis that atomic stores tend to have
// very different handling to non-atomic stores.
def G_ATOMIC_STORE : GenericInstruction {
let OutOperandList = (outs);
let InOperandList = (ins type0:$src, ptype1:$addr);
let hasSideEffects = 0;
let mayStore = 1;
}
// Generic atomic cmpxchg with internal success check. Expects a
// MachineMemOperand in addition to explicit operands.
def G_ATOMIC_CMPXCHG_WITH_SUCCESS : GenericInstruction {

View File

@ -23,6 +23,11 @@
class GINodeEquiv<Instruction i, SDNode node> {
Instruction I = i;
SDNode Node = node;
// SelectionDAG has separate nodes for atomic and non-atomic memory operations
// (ISD::LOAD, ISD::ATOMIC_LOAD, ISD::STORE, ISD::ATOMIC_STORE) but GlobalISel
// stores this information in the MachineMemoryOperand.
bit CheckMMOIsNonAtomic = 0;
}
// These are defined in the same order as the G_* instructions.
@ -75,16 +80,19 @@ def : GINodeEquiv<G_BSWAP, bswap>;
// Broadly speaking G_LOAD is equivalent to ISD::LOAD but there are some
// complications that tablegen must take care of. For example, Predicates such
// as isSignExtLoad require that this is not a perfect 1:1 mapping since a
// sign-extending load is (G_SEXT (G_LOAD x)) in GlobalISel.
def : GINodeEquiv<G_LOAD, ld>;
// sign-extending load is (G_SEXT (G_LOAD x)) in GlobalISel. Additionally,
// G_LOAD handles both atomic and non-atomic loads where as SelectionDAG had
// separate nodes for them. This GINodeEquiv maps the non-atomic loads to
// G_LOAD with a non-atomic MachineMemOperand.
def : GINodeEquiv<G_LOAD, ld> { let CheckMMOIsNonAtomic = 1; }
// Broadly speaking G_STORE is equivalent to ISD::STORE but there are some
// complications that tablegen must take care of. For example, predicates such
// as isTruncStore require that this is not a perfect 1:1 mapping since a
// truncating store is (G_STORE (G_TRUNCATE x)) in GlobalISel.
def : GINodeEquiv<G_STORE, st>;
def : GINodeEquiv<G_ATOMIC_LOAD, atomic_load>;
def : GINodeEquiv<G_ATOMIC_STORE, atomic_store>;
// truncating store is (G_STORE (G_TRUNCATE x)) in GlobalISel. Additionally,
// G_STORE handles both atomic and non-atomic stores where as SelectionDAG had
// separate nodes for them. This GINodeEquiv maps the non-atomic stores to
// G_STORE with a non-atomic MachineMemOperand.
def : GINodeEquiv<G_STORE, st> { let CheckMMOIsNonAtomic = 1; }
def : GINodeEquiv<G_ATOMIC_CMPXCHG, atomic_cmp_swap>;
def : GINodeEquiv<G_ATOMICRMW_XCHG, atomic_swap>;

View File

@ -345,16 +345,6 @@ bool IRTranslator::translateLoad(const User &U, MachineIRBuilder &MIRBuilder) {
unsigned Res = getOrCreateVReg(LI);
unsigned Addr = getOrCreateVReg(*LI.getPointerOperand());
if (LI.getOrdering() != AtomicOrdering::NotAtomic) {
MIRBuilder.buildAtomicLoad(
Res, Addr,
*MF->getMachineMemOperand(MachinePointerInfo(LI.getPointerOperand()),
Flags, DL->getTypeStoreSize(LI.getType()),
getMemOpAlignment(LI), AAMDNodes(), nullptr,
LI.getSyncScopeID(), LI.getOrdering()));
return true;
}
MIRBuilder.buildLoad(
Res, Addr,
*MF->getMachineMemOperand(MachinePointerInfo(LI.getPointerOperand()),
@ -376,17 +366,6 @@ bool IRTranslator::translateStore(const User &U, MachineIRBuilder &MIRBuilder) {
unsigned Val = getOrCreateVReg(*SI.getValueOperand());
unsigned Addr = getOrCreateVReg(*SI.getPointerOperand());
if (SI.getOrdering() != AtomicOrdering::NotAtomic) {
MIRBuilder.buildAtomicStore(
Val, Addr,
*MF->getMachineMemOperand(
MachinePointerInfo(SI.getPointerOperand()), Flags,
DL->getTypeStoreSize(SI.getValueOperand()->getType()),
getMemOpAlignment(SI), AAMDNodes(), nullptr, SI.getSyncScopeID(),
SI.getOrdering()));
return true;
}
MIRBuilder.buildStore(
Val, Addr,
*MF->getMachineMemOperand(

View File

@ -295,8 +295,6 @@ MachineInstrBuilder MachineIRBuilder::buildLoad(unsigned Res, unsigned Addr,
MachineMemOperand &MMO) {
assert(MRI->getType(Res).isValid() && "invalid operand type");
assert(MRI->getType(Addr).isPointer() && "invalid operand type");
assert(MMO.getOrdering() == AtomicOrdering::NotAtomic &&
"invalid atomic ordering");
return buildInstr(TargetOpcode::G_LOAD)
.addDef(Res)
@ -308,8 +306,6 @@ MachineInstrBuilder MachineIRBuilder::buildStore(unsigned Val, unsigned Addr,
MachineMemOperand &MMO) {
assert(MRI->getType(Val).isValid() && "invalid operand type");
assert(MRI->getType(Addr).isPointer() && "invalid operand type");
assert(MMO.getOrdering() == AtomicOrdering::NotAtomic &&
"invalid atomic ordering");
return buildInstr(TargetOpcode::G_STORE)
.addUse(Val)
@ -317,34 +313,6 @@ MachineInstrBuilder MachineIRBuilder::buildStore(unsigned Val, unsigned Addr,
.addMemOperand(&MMO);
}
MachineInstrBuilder MachineIRBuilder::buildAtomicLoad(unsigned Res,
unsigned Addr,
MachineMemOperand &MMO) {
assert(MRI->getType(Res).isValid() && "invalid operand type");
assert(MRI->getType(Addr).isPointer() && "invalid operand type");
assert(MMO.getOrdering() != AtomicOrdering::NotAtomic &&
"invalid atomic ordering");
return buildInstr(TargetOpcode::G_ATOMIC_LOAD)
.addDef(Res)
.addUse(Addr)
.addMemOperand(&MMO);
}
MachineInstrBuilder MachineIRBuilder::buildAtomicStore(unsigned Val,
unsigned Addr,
MachineMemOperand &MMO) {
assert(MRI->getType(Val).isValid() && "invalid operand type");
assert(MRI->getType(Addr).isPointer() && "invalid operand type");
assert(MMO.getOrdering() != AtomicOrdering::NotAtomic &&
"invalid atomic ordering");
return buildInstr(TargetOpcode::G_ATOMIC_STORE)
.addUse(Val)
.addUse(Addr)
.addMemOperand(&MMO);
}
MachineInstrBuilder MachineIRBuilder::buildUAdde(unsigned Res,
unsigned CarryOut,
unsigned Op0, unsigned Op1,

View File

@ -889,6 +889,12 @@ bool AArch64InstructionSelector::select(MachineInstr &I,
return false;
}
auto &MemOp = **I.memoperands_begin();
if (MemOp.getOrdering() != AtomicOrdering::NotAtomic) {
DEBUG(dbgs() << "Atomic load/store not supported yet\n");
return false;
}
const unsigned PtrReg = I.getOperand(1).getReg();
#ifndef NDEBUG
const RegisterBank &PtrRB = *RBI.getRegBank(PtrReg, MRI, TRI);

View File

@ -231,14 +231,6 @@ AArch64LegalizerInfo::AArch64LegalizerInfo(const AArch64Subtarget &ST) {
setAction({MemOp, 1, p0}, Legal);
}
for (unsigned MemOp : {G_ATOMIC_LOAD, G_ATOMIC_STORE}) {
for (auto Ty : {s8, s16, s32, s64, p0})
setAction({MemOp, Ty}, Legal);
// And everything's fine in addrspace 0.
setAction({MemOp, 1, p0}, Legal);
}
// Constants
for (auto Ty : {s32, s64}) {
setAction({TargetOpcode::G_CONSTANT, Ty}, Legal);

View File

@ -801,6 +801,12 @@ bool ARMInstructionSelector::select(MachineInstr &I,
return selectGlobal(MIB, MRI);
case G_STORE:
case G_LOAD: {
const auto &MemOp = **I.memoperands_begin();
if (MemOp.getOrdering() != AtomicOrdering::NotAtomic) {
DEBUG(dbgs() << "Atomic load/store not supported yet\n");
return false;
}
unsigned Reg = I.getOperand(0).getReg();
unsigned RegBank = RBI.getRegBank(Reg, MRI, TRI)->getID();

View File

@ -484,6 +484,11 @@ bool X86InstructionSelector::selectLoadStoreOp(MachineInstr &I,
const RegisterBank &RB = *RBI.getRegBank(DefReg, MRI, TRI);
auto &MemOp = **I.memoperands_begin();
if (MemOp.getOrdering() != AtomicOrdering::NotAtomic) {
DEBUG(dbgs() << "Atomic load/store not supported yet\n");
return false;
}
unsigned NewOpc = getLoadStoreOp(Ty, RB, Opc, MemOp.getAlignment());
if (NewOpc == Opc)
return false;

View File

@ -40,10 +40,10 @@ define [1 x double] @constant() {
ret [1 x double] [double 1.0]
}
; The key problem here is that we may fail to create an MBB referenced by a
; PHI. If so, we cannot complete the G_PHI and mustn't try or bad things
; happen.
; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to translate constant: [1 x double] (in function: pending_phis)
; The key problem here is that we may fail to create an MBB referenced by a
; PHI. If so, we cannot complete the G_PHI and mustn't try or bad things
; happen.
; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: cannot select: G_STORE %6, %2; mem:ST4[%addr] GPR:%6,%2 (in function: pending_phis)
; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for pending_phis
; FALLBACK-WITH-REPORT-OUT-LABEL: pending_phis:
define i32 @pending_phis(i1 %tst, i32 %val, i32* %addr) {
@ -54,7 +54,7 @@ end:
ret i32 %res
true:
%t = extractvalue [1 x double] [double 1.0], 0
store atomic i32 42, i32* %addr seq_cst, align 4
br label %end
false:
@ -90,6 +90,16 @@ define i128 @sequence_sizes([8 x i8] %in) {
ret i128 undef
}
; Just to make sure we don't accidentally emit a normal load/store.
; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: cannot select: %2<def>(s64) = G_LOAD %0; mem:LD8[%addr] GPR:%2,%0 (in function: atomic_ops)
; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for atomic_ops
; FALLBACK-WITH-REPORT-LABEL: atomic_ops:
define i64 @atomic_ops(i64* %addr) {
store atomic i64 0, i64* %addr unordered, align 8
%res = load atomic i64, i64* %addr seq_cst, align 8
ret i64 %res
}
; Make sure we don't mess up metadata arguments.
declare void @llvm.write_register.i64(metadata, i64)

View File

@ -1332,12 +1332,12 @@ define void @test_lifetime_intrin() {
define void @test_load_store_atomics(i8* %addr) {
; CHECK-LABEL: name: test_load_store_atomics
; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY %x0
; CHECK: [[V0:%[0-9]+]]:_(s8) = G_ATOMIC_LOAD [[ADDR]](p0) :: (load unordered 1 from %ir.addr)
; CHECK: G_ATOMIC_STORE [[V0]](s8), [[ADDR]](p0) :: (store monotonic 1 into %ir.addr)
; CHECK: [[V1:%[0-9]+]]:_(s8) = G_ATOMIC_LOAD [[ADDR]](p0) :: (load acquire 1 from %ir.addr)
; CHECK: G_ATOMIC_STORE [[V1]](s8), [[ADDR]](p0) :: (store release 1 into %ir.addr)
; CHECK: [[V2:%[0-9]+]]:_(s8) = G_ATOMIC_LOAD [[ADDR]](p0) :: (load syncscope("singlethread") seq_cst 1 from %ir.addr)
; CHECK: G_ATOMIC_STORE [[V2]](s8), [[ADDR]](p0) :: (store syncscope("singlethread") monotonic 1 into %ir.addr)
; CHECK: [[V0:%[0-9]+]]:_(s8) = G_LOAD [[ADDR]](p0) :: (load unordered 1 from %ir.addr)
; CHECK: G_STORE [[V0]](s8), [[ADDR]](p0) :: (store monotonic 1 into %ir.addr)
; CHECK: [[V1:%[0-9]+]]:_(s8) = G_LOAD [[ADDR]](p0) :: (load acquire 1 from %ir.addr)
; CHECK: G_STORE [[V1]](s8), [[ADDR]](p0) :: (store release 1 into %ir.addr)
; CHECK: [[V2:%[0-9]+]]:_(s8) = G_LOAD [[ADDR]](p0) :: (load syncscope("singlethread") seq_cst 1 from %ir.addr)
; CHECK: G_STORE [[V2]](s8), [[ADDR]](p0) :: (store syncscope("singlethread") monotonic 1 into %ir.addr)
%v0 = load atomic i8, i8* %addr unordered, align 1
store atomic i8 %v0, i8* %addr monotonic, align 1

View File

@ -1,91 +0,0 @@
# RUN: llc -O0 -run-pass=legalizer -global-isel %s -o - | FileCheck %s
--- |
target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
target triple = "aarch64--"
define void @test_load(i8* %addr) {
entry:
ret void
}
define void @test_store(i8* %addr) {
entry:
ret void
}
...
---
name: test_load
registers:
- { id: 0, class: _ }
- { id: 1, class: _ }
- { id: 2, class: _ }
- { id: 3, class: _ }
- { id: 4, class: _ }
- { id: 5, class: _ }
- { id: 6, class: _ }
body: |
bb.0.entry:
liveins: %x0, %x1, %x2, %x3
; CHECK-LABEL: name: test_load
%0(p0) = COPY %x0
; CHECK: %1:_(s8) = G_ATOMIC_LOAD %0(p0) :: (load unordered 1 from %ir.addr)
%1(s8) = G_ATOMIC_LOAD %0 :: (load unordered 1 from %ir.addr)
%10:_(s32) = G_ANYEXT %1
%w0 = COPY %10
; CHECK: %2:_(s16) = G_ATOMIC_LOAD %0(p0) :: (load unordered 2 from %ir.addr)
%2(s16) = G_ATOMIC_LOAD %0 :: (load unordered 2 from %ir.addr)
%11:_(s32) = G_ANYEXT %2
%w0 = COPY %11
; CHECK: %3:_(s32) = G_ATOMIC_LOAD %0(p0) :: (load unordered 4 from %ir.addr)
%3(s32) = G_ATOMIC_LOAD %0 :: (load unordered 4 from %ir.addr)
%w0 = COPY %3
; CHECK: %4:_(s64) = G_ATOMIC_LOAD %0(p0) :: (load unordered 8 from %ir.addr)
%4(s64) = G_ATOMIC_LOAD %0 :: (load unordered 8 from %ir.addr)
%x0 = COPY %4
%5(p0) = G_ATOMIC_LOAD %0(p0) :: (load unordered 8 from %ir.addr)
%12:_(s64) = G_PTRTOINT %5
%x0 = COPY %12
...
---
name: test_store
registers:
- { id: 0, class: _ }
- { id: 1, class: _ }
- { id: 2, class: _ }
- { id: 3, class: _ }
- { id: 4, class: _ }
- { id: 5, class: _ }
- { id: 6, class: _ }
- { id: 7, class: _ }
body: |
bb.0.entry:
liveins: %x0, %x1, %x2, %x3
; CHECK-LABEL: name: test_store
%0(p0) = COPY %x0
%1(s32) = COPY %w1
; CHECK: G_ATOMIC_STORE %2(s8), %0(p0) :: (store unordered 1 into %ir.addr)
%2(s8) = G_TRUNC %1
G_ATOMIC_STORE %2, %0 :: (store unordered 1 into %ir.addr)
; CHECK: G_ATOMIC_STORE %3(s16), %0(p0) :: (store unordered 2 into %ir.addr)
%3(s16) = G_TRUNC %1
G_ATOMIC_STORE %3, %0 :: (store unordered 2 into %ir.addr)
; CHECK: G_ATOMIC_STORE %1(s32), %0(p0) :: (store unordered 4 into %ir.addr)
G_ATOMIC_STORE %1, %0 :: (store unordered 4 into %ir.addr)
; CHECK: G_ATOMIC_STORE %4(s64), %0(p0) :: (store unordered 8 into %ir.addr)
%4(s64) = G_PTRTOINT %0(p0)
G_ATOMIC_STORE %4, %0 :: (store unordered 8 into %ir.addr)
; CHECK: G_ATOMIC_STORE %0(p0), %0(p0) :: (store unordered 8 into %ir.addr)
G_ATOMIC_STORE %0(p0), %0(p0) :: (store unordered 8 into %ir.addr)
...

View File

@ -1,431 +0,0 @@
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -mtriple=aarch64-- -run-pass=instruction-select -verify-machineinstrs -global-isel %s -o - | FileCheck %s
--- |
target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
define void @load_s8_gpr_unordered(i64* %addr) { ret void }
define void @load_s8_gpr_monotonic(i64* %addr) { ret void }
define void @load_s8_gpr_acquire(i64* %addr) { ret void }
define void @load_s8_gpr_release(i64* %addr) { ret void }
define void @load_s8_gpr_acq_rel(i64* %addr) { ret void }
define void @load_s8_gpr_seq_cst(i64* %addr) { ret void }
define void @load_s32_gpr_unordered(i64* %addr) { ret void }
define void @load_s32_gpr_monotonic(i64* %addr) { ret void }
define void @load_s32_gpr_acquire(i64* %addr) { ret void }
define void @load_s32_gpr_release(i64* %addr) { ret void }
define void @load_s32_gpr_acq_rel(i64* %addr) { ret void }
define void @load_s32_gpr_seq_cst(i64* %addr) { ret void }
define void @load_s64_gpr_unordered(i64* %addr) { ret void }
define void @load_s64_gpr_monotonic(i64* %addr) { ret void }
define void @load_s64_gpr_acquire(i64* %addr) { ret void }
define void @load_s64_gpr_release(i64* %addr) { ret void }
define void @load_s64_gpr_acq_rel(i64* %addr) { ret void }
define void @load_s64_gpr_seq_cst(i64* %addr) { ret void }
...
---
name: load_s8_gpr_unordered
legalized: true
regBankSelected: true
registers:
- { id: 0, class: gpr }
- { id: 1, class: gpr }
body: |
bb.0:
liveins: %x0
; CHECK-LABEL: name: load_s8_gpr_unordered
; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0
; CHECK: [[T0:%[0-9]+]]:gpr32 = LDRBBui [[COPY]], 0 :: (load release 4 from %ir.addr)
; CHECK: %x0 = COPY [[T0]]
%0(p0) = COPY %x0
%1(s8) = G_ATOMIC_LOAD %0 :: (load release 4 from %ir.addr)
%2:gpr(s32) = G_ANYEXT %1
%x0 = COPY %2(s32)
...
---
name: load_s8_gpr_monotonic
legalized: true
regBankSelected: true
registers:
- { id: 0, class: gpr }
- { id: 1, class: gpr }
body: |
bb.0:
liveins: %x0
; CHECK-LABEL: name: load_s8_gpr_monotonic
; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0
; CHECK: [[T0:%[0-9]+]]:gpr32 = LDRBBui [[COPY]], 0 :: (load release 4 from %ir.addr)
; CHECK: %x0 = COPY [[T0]]
%0(p0) = COPY %x0
%1(s8) = G_ATOMIC_LOAD %0 :: (load release 4 from %ir.addr)
%2:gpr(s32) = G_ANYEXT %1
%x0 = COPY %2(s32)
...
---
name: load_s8_gpr_acquire
legalized: true
regBankSelected: true
registers:
- { id: 0, class: gpr }
- { id: 1, class: gpr }
body: |
bb.0:
liveins: %x0
; CHECK-LABEL: name: load_s8_gpr_acquire
; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0
; CHECK: [[T0:%[0-9]+]]:gpr32 = LDARB [[COPY]] :: (load acquire 1 from %ir.addr)
; CHECK: %x0 = COPY [[T0]]
%0(p0) = COPY %x0
%1(s8) = G_ATOMIC_LOAD %0 :: (load acquire 1 from %ir.addr)
%2:gpr(s32) = G_ANYEXT %1
%x0 = COPY %2(s32)
...
---
name: load_s8_gpr_release
legalized: true
regBankSelected: true
registers:
- { id: 0, class: gpr }
- { id: 1, class: gpr }
body: |
bb.0:
liveins: %x0
; CHECK-LABEL: name: load_s8_gpr_release
; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0
; CHECK: [[T0:%[0-9]+]]:gpr32 = LDRBBui [[COPY]], 0 :: (load release 1 from %ir.addr)
; CHECK: %x0 = COPY [[T0]]
%0(p0) = COPY %x0
%1(s8) = G_ATOMIC_LOAD %0 :: (load release 1 from %ir.addr)
%2:gpr(s32) = G_ANYEXT %1
%x0 = COPY %2(s32)
...
---
name: load_s8_gpr_acq_rel
legalized: true
regBankSelected: true
registers:
- { id: 0, class: gpr }
- { id: 1, class: gpr }
body: |
bb.0:
liveins: %x0
; CHECK-LABEL: name: load_s8_gpr_acq_rel
; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0
; CHECK: [[T0:%[0-9]+]]:gpr32 = LDARB [[COPY]] :: (load acq_rel 1 from %ir.addr)
; CHECK: %x0 = COPY [[T0]]
%0(p0) = COPY %x0
%1(s8) = G_ATOMIC_LOAD %0 :: (load acq_rel 1 from %ir.addr)
%2:gpr(s32) = G_ANYEXT %1
%x0 = COPY %2(s32)
...
---
name: load_s8_gpr_seq_cst
legalized: true
regBankSelected: true
registers:
- { id: 0, class: gpr }
- { id: 1, class: gpr }
body: |
bb.0:
liveins: %x0
; CHECK-LABEL: name: load_s8_gpr_seq_cst
; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0
; CHECK: [[T0:%[0-9]+]]:gpr32 = LDARB [[COPY]] :: (load seq_cst 1 from %ir.addr)
; CHECK: %x0 = COPY [[T0]]
%0(p0) = COPY %x0
%1(s8) = G_ATOMIC_LOAD %0 :: (load seq_cst 1 from %ir.addr)
%2:gpr(s32) = G_ANYEXT %1
%x0 = COPY %2(s32)
...
---
name: load_s32_gpr_unordered
legalized: true
regBankSelected: true
registers:
- { id: 0, class: gpr }
- { id: 1, class: gpr }
body: |
bb.0:
liveins: %x0
; CHECK-LABEL: name: load_s32_gpr_unordered
; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0
; CHECK: [[T0:%[0-9]+]]:gpr32 = LDRWui [[COPY]], 0 :: (load release 4 from %ir.addr)
; CHECK: %x0 = COPY [[T0]]
%0(p0) = COPY %x0
%1(s32) = G_ATOMIC_LOAD %0 :: (load release 4 from %ir.addr)
%x0 = COPY %1(s32)
...
---
name: load_s32_gpr_monotonic
legalized: true
regBankSelected: true
registers:
- { id: 0, class: gpr }
- { id: 1, class: gpr }
body: |
bb.0:
liveins: %x0
; CHECK-LABEL: name: load_s32_gpr_monotonic
; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0
; CHECK: [[T0:%[0-9]+]]:gpr32 = LDRWui [[COPY]], 0 :: (load release 4 from %ir.addr)
; CHECK: %x0 = COPY [[T0]]
%0(p0) = COPY %x0
%1(s32) = G_ATOMIC_LOAD %0 :: (load release 4 from %ir.addr)
%x0 = COPY %1(s32)
...
---
name: load_s32_gpr_acquire
legalized: true
regBankSelected: true
registers:
- { id: 0, class: gpr }
- { id: 1, class: gpr }
body: |
bb.0:
liveins: %x0
; CHECK-LABEL: name: load_s32_gpr_acquire
; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0
; CHECK: [[T0:%[0-9]+]]:gpr32 = LDARW [[COPY]] :: (load acquire 4 from %ir.addr)
; CHECK: %x0 = COPY [[T0]]
%0(p0) = COPY %x0
%1(s32) = G_ATOMIC_LOAD %0 :: (load acquire 4 from %ir.addr)
%x0 = COPY %1(s32)
...
---
name: load_s32_gpr_release
legalized: true
regBankSelected: true
registers:
- { id: 0, class: gpr }
- { id: 1, class: gpr }
body: |
bb.0:
liveins: %x0
; CHECK-LABEL: name: load_s32_gpr_release
; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0
; CHECK: [[T0:%[0-9]+]]:gpr32 = LDRWui [[COPY]], 0 :: (load release 4 from %ir.addr)
; CHECK: %x0 = COPY [[T0]]
%0(p0) = COPY %x0
%1(s32) = G_ATOMIC_LOAD %0 :: (load release 4 from %ir.addr)
%x0 = COPY %1(s32)
...
---
name: load_s32_gpr_acq_rel
legalized: true
regBankSelected: true
registers:
- { id: 0, class: gpr }
- { id: 1, class: gpr }
body: |
bb.0:
liveins: %x0
; CHECK-LABEL: name: load_s32_gpr_acq_rel
; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0
; CHECK: [[T0:%[0-9]+]]:gpr32 = LDARW [[COPY]] :: (load acq_rel 4 from %ir.addr)
; CHECK: %x0 = COPY [[T0]]
%0(p0) = COPY %x0
%1(s32) = G_ATOMIC_LOAD %0 :: (load acq_rel 4 from %ir.addr)
%x0 = COPY %1(s32)
...
---
name: load_s32_gpr_seq_cst
legalized: true
regBankSelected: true
registers:
- { id: 0, class: gpr }
- { id: 1, class: gpr }
body: |
bb.0:
liveins: %x0
; CHECK-LABEL: name: load_s32_gpr_seq_cst
; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0
; CHECK: [[T0:%[0-9]+]]:gpr32 = LDARW [[COPY]] :: (load seq_cst 4 from %ir.addr)
; CHECK: %x0 = COPY [[T0]]
%0(p0) = COPY %x0
%1(s32) = G_ATOMIC_LOAD %0 :: (load seq_cst 4 from %ir.addr)
%x0 = COPY %1(s32)
...
---
name: load_s64_gpr_unordered
legalized: true
regBankSelected: true
registers:
- { id: 0, class: gpr }
- { id: 1, class: gpr }
body: |
bb.0:
liveins: %x0
; CHECK-LABEL: name: load_s64_gpr_unordered
; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0
; CHECK: [[LDRXui:%[0-9]+]]:gpr64 = LDRXui [[COPY]], 0 :: (load release 8 from %ir.addr)
; CHECK: %x0 = COPY [[LDRXui]]
%0(p0) = COPY %x0
%1(s64) = G_ATOMIC_LOAD %0 :: (load release 8 from %ir.addr)
%x0 = COPY %1(s64)
...
---
name: load_s64_gpr_monotonic
legalized: true
regBankSelected: true
registers:
- { id: 0, class: gpr }
- { id: 1, class: gpr }
body: |
bb.0:
liveins: %x0
; CHECK-LABEL: name: load_s64_gpr_monotonic
; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0
; CHECK: [[LDRXui:%[0-9]+]]:gpr64 = LDRXui [[COPY]], 0 :: (load release 8 from %ir.addr)
; CHECK: %x0 = COPY [[LDRXui]]
%0(p0) = COPY %x0
%1(s64) = G_ATOMIC_LOAD %0 :: (load release 8 from %ir.addr)
%x0 = COPY %1(s64)
...
---
name: load_s64_gpr_acquire
legalized: true
regBankSelected: true
registers:
- { id: 0, class: gpr }
- { id: 1, class: gpr }
body: |
bb.0:
liveins: %x0
; CHECK-LABEL: name: load_s64_gpr_acquire
; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0
; CHECK: [[LDRXui:%[0-9]+]]:gpr64 = LDARX [[COPY]] :: (load acquire 8 from %ir.addr)
; CHECK: %x0 = COPY [[LDRXui]]
%0(p0) = COPY %x0
%1(s64) = G_ATOMIC_LOAD %0 :: (load acquire 8 from %ir.addr)
%x0 = COPY %1(s64)
...
---
name: load_s64_gpr_release
legalized: true
regBankSelected: true
registers:
- { id: 0, class: gpr }
- { id: 1, class: gpr }
body: |
bb.0:
liveins: %x0
; CHECK-LABEL: name: load_s64_gpr_release
; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0
; CHECK: [[LDRXui:%[0-9]+]]:gpr64 = LDRXui [[COPY]], 0 :: (load release 8 from %ir.addr)
; CHECK: %x0 = COPY [[LDRXui]]
%0(p0) = COPY %x0
%1(s64) = G_ATOMIC_LOAD %0 :: (load release 8 from %ir.addr)
%x0 = COPY %1(s64)
...
---
name: load_s64_gpr_acq_rel
legalized: true
regBankSelected: true
registers:
- { id: 0, class: gpr }
- { id: 1, class: gpr }
body: |
bb.0:
liveins: %x0
; CHECK-LABEL: name: load_s64_gpr_acq_rel
; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0
; CHECK: [[LDRXui:%[0-9]+]]:gpr64 = LDARX [[COPY]] :: (load acq_rel 8 from %ir.addr)
; CHECK: %x0 = COPY [[LDRXui]]
%0(p0) = COPY %x0
%1(s64) = G_ATOMIC_LOAD %0 :: (load acq_rel 8 from %ir.addr)
%x0 = COPY %1(s64)
...
---
name: load_s64_gpr_seq_cst
legalized: true
regBankSelected: true
registers:
- { id: 0, class: gpr }
- { id: 1, class: gpr }
body: |
bb.0:
liveins: %x0
; CHECK-LABEL: name: load_s64_gpr_seq_cst
; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0
; CHECK: [[LDRXui:%[0-9]+]]:gpr64 = LDARX [[COPY]] :: (load seq_cst 8 from %ir.addr)
; CHECK: %x0 = COPY [[LDRXui]]
%0(p0) = COPY %x0
%1(s64) = G_ATOMIC_LOAD %0 :: (load seq_cst 8 from %ir.addr)
%x0 = COPY %1(s64)
...

View File

@ -832,6 +832,7 @@ def MOVfpimmz : I<(outs FPR32:$dst), (ins f32imm:$imm), [(set FPR32:$dst, fpimmz
// CHECK-NEXT: GIM_Try, /*On fail goto*//*Label 22*/ [[LABEL:[0-9]+]],
// CHECK-NEXT: GIM_CheckNumOperands, /*MI*/0, /*Expected*/2,
// CHECK-NEXT: GIM_CheckOpcode, /*MI*/0, TargetOpcode::G_LOAD,
// CHECK-NEXT: GIM_CheckAtomicOrdering, /*MI*/0, /*Order*/(int64_t)AtomicOrdering::NotAtomic,
// CHECK-NEXT: // MIs[0] dst
// CHECK-NEXT: GIM_CheckType, /*MI*/0, /*Op*/0, /*Type*/GILLT_s32,
// CHECK-NEXT: GIM_CheckRegBankForClass, /*MI*/0, /*Op*/0, /*RC*/MyTarget::GPR32RegClassID,
@ -860,6 +861,7 @@ def LOAD : I<(outs GPR32:$dst), (ins GPR32:$src1),
// CHECK-NEXT: // MIs[0] Operand 1
// CHECK-NEXT: GIM_CheckType, /*MI*/0, /*Op*/1, /*Type*/GILLT_s16,
// CHECK-NEXT: GIM_CheckOpcode, /*MI*/1, TargetOpcode::G_LOAD,
// CHECK-NEXT: GIM_CheckAtomicOrdering, /*MI*/1, /*Order*/(int64_t)AtomicOrdering::NotAtomic,
// CHECK-NEXT: // MIs[1] Operand 0
// CHECK-NEXT: GIM_CheckType, /*MI*/1, /*Op*/0, /*Type*/GILLT_s16,
// CHECK-NEXT: // MIs[1] src1

View File

@ -2378,9 +2378,8 @@ private:
CodeGenRegBank CGRegs;
/// Keep track of the equivalence between SDNodes and Instruction by mapping
/// SDNodes to the GINodeEquiv mapping. We map to the GINodeEquiv in case we
/// need to check for attributes on the relation such as (the now removed)
/// CheckMMOIsNonAtomic.
/// SDNodes to the GINodeEquiv mapping. We need to map to the GINodeEquiv to
/// check for attributes on the relation such as CheckMMOIsNonAtomic.
/// This is defined using 'GINodeEquiv' in the target description.
DenseMap<Record *, Record *> NodeEquivs;
@ -2399,8 +2398,6 @@ private:
Record *findNodeEquiv(Record *N) const;
Error importRulePredicates(RuleMatcher &M, ArrayRef<Predicate> Predicates);
Error importInstructionPredicates(InstructionMatcher &InsnMatcher,
const TreePatternNode *Src) const;
Expected<InstructionMatcher &> createAndImportSelDAGMatcher(
RuleMatcher &Rule, InstructionMatcher &InsnMatcher,
const TreePatternNode *Src, unsigned &TempOpIdx) const;
@ -2486,8 +2483,45 @@ GlobalISelEmitter::importRulePredicates(RuleMatcher &M,
return Error::success();
}
Error GlobalISelEmitter::importInstructionPredicates(
InstructionMatcher &InsnMatcher, const TreePatternNode *Src) const {
Expected<InstructionMatcher &> GlobalISelEmitter::createAndImportSelDAGMatcher(
RuleMatcher &Rule, InstructionMatcher &InsnMatcher,
const TreePatternNode *Src, unsigned &TempOpIdx) const {
Record *SrcGIEquivOrNull = nullptr;
const CodeGenInstruction *SrcGIOrNull = nullptr;
// Start with the defined operands (i.e., the results of the root operator).
if (Src->getExtTypes().size() > 1)
return failedImport("Src pattern has multiple results");
if (Src->isLeaf()) {
Init *SrcInit = Src->getLeafValue();
if (isa<IntInit>(SrcInit)) {
InsnMatcher.addPredicate<InstructionOpcodeMatcher>(
&Target.getInstruction(RK.getDef("G_CONSTANT")));
} else
return failedImport(
"Unable to deduce gMIR opcode to handle Src (which is a leaf)");
} else {
SrcGIEquivOrNull = findNodeEquiv(Src->getOperator());
if (!SrcGIEquivOrNull)
return failedImport("Pattern operator lacks an equivalent Instruction" +
explainOperator(Src->getOperator()));
SrcGIOrNull = &Target.getInstruction(SrcGIEquivOrNull->getValueAsDef("I"));
// The operators look good: match the opcode
InsnMatcher.addPredicate<InstructionOpcodeMatcher>(SrcGIOrNull);
}
unsigned OpIdx = 0;
for (const TypeSetByHwMode &VTy : Src->getExtTypes()) {
// Results don't have a name unless they are the root node. The caller will
// set the name if appropriate.
OperandMatcher &OM = InsnMatcher.addOperand(OpIdx++, "", TempOpIdx);
if (auto Error = OM.addTypeCheckPredicate(VTy, false /* OperandIsAPointer */))
return failedImport(toString(std::move(Error)) +
" for result of Src pattern operator");
}
for (const auto &Predicate : Src->getPredicateFns()) {
if (Predicate.isAlwaysTrue())
continue;
@ -2576,50 +2610,9 @@ Error GlobalISelEmitter::importInstructionPredicates(
return failedImport("Src pattern child has predicate (" +
explainPredicates(Src) + ")");
}
if (SrcGIEquivOrNull && SrcGIEquivOrNull->getValueAsBit("CheckMMOIsNonAtomic"))
InsnMatcher.addPredicate<AtomicOrderingMMOPredicateMatcher>("NotAtomic");
return Error::success();
}
Expected<InstructionMatcher &> GlobalISelEmitter::createAndImportSelDAGMatcher(
RuleMatcher &Rule, InstructionMatcher &InsnMatcher,
const TreePatternNode *Src, unsigned &TempOpIdx) const {
Record *SrcGIEquivOrNull = nullptr;
const CodeGenInstruction *SrcGIOrNull = nullptr;
// Start with the defined operands (i.e., the results of the root operator).
if (Src->getExtTypes().size() > 1)
return failedImport("Src pattern has multiple results");
if (Src->isLeaf()) {
Init *SrcInit = Src->getLeafValue();
if (isa<IntInit>(SrcInit)) {
InsnMatcher.addPredicate<InstructionOpcodeMatcher>(
&Target.getInstruction(RK.getDef("G_CONSTANT")));
} else
return failedImport(
"Unable to deduce gMIR opcode to handle Src (which is a leaf)");
} else {
SrcGIEquivOrNull = findNodeEquiv(Src->getOperator());
if (!SrcGIEquivOrNull)
return failedImport("Pattern operator lacks an equivalent Instruction" +
explainOperator(Src->getOperator()));
SrcGIOrNull = &Target.getInstruction(SrcGIEquivOrNull->getValueAsDef("I"));
// The operators look good: match the opcode
InsnMatcher.addPredicate<InstructionOpcodeMatcher>(SrcGIOrNull);
}
unsigned OpIdx = 0;
for (const TypeSetByHwMode &VTy : Src->getExtTypes()) {
// Results don't have a name unless they are the root node. The caller will
// set the name if appropriate.
OperandMatcher &OM = InsnMatcher.addOperand(OpIdx++, "", TempOpIdx);
if (auto Error = OM.addTypeCheckPredicate(VTy, false /* OperandIsAPointer */))
return failedImport(toString(std::move(Error)) +
" for result of Src pattern operator");
}
if (Src->isLeaf()) {
Init *SrcInit = Src->getLeafValue();
if (IntInit *SrcIntInit = dyn_cast<IntInit>(SrcInit)) {
@ -2638,8 +2631,6 @@ Expected<InstructionMatcher &> GlobalISelEmitter::createAndImportSelDAGMatcher(
// here since we don't support ImmLeaf predicates yet. However, we still
// need to note the hidden operand to get GIM_CheckNumOperands correct.
InsnMatcher.addOperand(OpIdx++, "", TempOpIdx);
if (auto Error = importInstructionPredicates(InsnMatcher, Src))
return std::move(Error);
return InsnMatcher;
}
@ -2674,8 +2665,6 @@ Expected<InstructionMatcher &> GlobalISelEmitter::createAndImportSelDAGMatcher(
}
}
if (auto Error = importInstructionPredicates(InsnMatcher, Src))
return std::move(Error);
return InsnMatcher;
}
@ -3709,40 +3698,6 @@ TreePatternNode *GlobalISelEmitter::fixupPatternNode(TreePatternNode *N) {
return Ext;
}
}
if (N->getOperator()->getName() == "atomic_load") {
// If it's a atomic-load we need to adapt the pattern slightly. We need
// to split the node into (anyext (atomic_load ...)), and then apply the
// <<atomic_load_TY>> predicate by updating the result type of the load.
//
// For example:
// (atomic_load:[i32] [iPTR])<<atomic_load_i16>>
// must be transformed into:
// (anyext:[i32] (atomic_load:[i16] [iPTR]))
std::vector<TreePredicateFn> Predicates;
Record *MemVT = nullptr;
for (const auto &P : N->getPredicateFns()) {
if (P.isAtomic() && P.getMemoryVT()) {
MemVT = P.getMemoryVT();
continue;
}
Predicates.push_back(P);
}
if (MemVT) {
TypeSetByHwMode ValueTy = getValueType(MemVT);
if (ValueTy != N->getType(0)) {
TreePatternNode *Ext =
new TreePatternNode(RK.getDef("anyext"), {N}, 1);
Ext->setType(0, N->getType(0));
N->clearPredicateFns();
N->setPredicateFns(Predicates);
N->setType(0, ValueTy);
return Ext;
}
}
}
}
return N;