mirror of
https://github.com/RPCS3/llvm.git
synced 2025-01-27 05:32:22 +00:00
Add remaining 64-bit atomic patterns for x86-64.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@55029 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
3e697cfa97
commit
a99e38495f
@ -6568,6 +6568,38 @@ X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
|
||||
X86::NOT8r, X86::AL,
|
||||
X86::GR8RegisterClass, true);
|
||||
// FIXME: There are no CMOV8 instructions; MIN/MAX need some other way.
|
||||
case X86::ATOMAND64:
|
||||
return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND64rr,
|
||||
X86::AND64ri32, X86::MOV64rm,
|
||||
X86::LCMPXCHG64, X86::MOV64rr,
|
||||
X86::NOT64r, X86::RAX,
|
||||
X86::GR64RegisterClass);
|
||||
case X86::ATOMOR64:
|
||||
return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::OR64rr,
|
||||
X86::OR64ri32, X86::MOV64rm,
|
||||
X86::LCMPXCHG64, X86::MOV64rr,
|
||||
X86::NOT64r, X86::RAX,
|
||||
X86::GR64RegisterClass);
|
||||
case X86::ATOMXOR64:
|
||||
return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::XOR64rr,
|
||||
X86::XOR64ri32, X86::MOV64rm,
|
||||
X86::LCMPXCHG64, X86::MOV64rr,
|
||||
X86::NOT64r, X86::RAX,
|
||||
X86::GR64RegisterClass);
|
||||
case X86::ATOMNAND64:
|
||||
return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND64rr,
|
||||
X86::AND64ri32, X86::MOV64rm,
|
||||
X86::LCMPXCHG64, X86::MOV64rr,
|
||||
X86::NOT64r, X86::RAX,
|
||||
X86::GR64RegisterClass, true);
|
||||
case X86::ATOMMIN64:
|
||||
return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVL64rr);
|
||||
case X86::ATOMMAX64:
|
||||
return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVG64rr);
|
||||
case X86::ATOMUMIN64:
|
||||
return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVB64rr);
|
||||
case X86::ATOMUMAX64:
|
||||
return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVA64rr);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1148,6 +1148,34 @@ def XCHG64rm : RI<0x87, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$ptr,GR64:$val)
|
||||
[(set GR64:$dst, (atomic_swap_64 addr:$ptr, GR64:$val))]>;
|
||||
}
|
||||
|
||||
// Atomic exchange, and, or, xor
|
||||
let Constraints = "$val = $dst", Defs = [EFLAGS],
|
||||
usesCustomDAGSchedInserter = 1 in {
|
||||
def ATOMAND64 : I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
|
||||
"#ATOMAND64 PSUEDO!",
|
||||
[(set GR64:$dst, (atomic_load_and addr:$ptr, GR64:$val))]>;
|
||||
def ATOMOR64 : I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
|
||||
"#ATOMOR64 PSUEDO!",
|
||||
[(set GR64:$dst, (atomic_load_or addr:$ptr, GR64:$val))]>;
|
||||
def ATOMXOR64 : I<0, Pseudo,(outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
|
||||
"#ATOMXOR64 PSUEDO!",
|
||||
[(set GR64:$dst, (atomic_load_xor addr:$ptr, GR64:$val))]>;
|
||||
def ATOMNAND64 : I<0, Pseudo,(outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
|
||||
"#ATOMNAND64 PSUEDO!",
|
||||
[(set GR64:$dst, (atomic_load_nand addr:$ptr, GR64:$val))]>;
|
||||
def ATOMMIN64: I<0, Pseudo, (outs GR64:$dst), (ins i64mem:$ptr, GR64:$val),
|
||||
"#ATOMMIN64 PSUEDO!",
|
||||
[(set GR64:$dst, (atomic_load_min addr:$ptr, GR64:$val))]>;
|
||||
def ATOMMAX64: I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
|
||||
"#ATOMMAX64 PSUEDO!",
|
||||
[(set GR64:$dst, (atomic_load_max addr:$ptr, GR64:$val))]>;
|
||||
def ATOMUMIN64: I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
|
||||
"#ATOMUMIN64 PSUEDO!",
|
||||
[(set GR64:$dst, (atomic_load_umin addr:$ptr, GR64:$val))]>;
|
||||
def ATOMUMAX64: I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
|
||||
"#ATOMUMAX64 PSUEDO!",
|
||||
[(set GR64:$dst, (atomic_load_umax addr:$ptr, GR64:$val))]>;
|
||||
}
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// Non-Instruction Patterns
|
||||
|
Loading…
x
Reference in New Issue
Block a user