mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-12-02 08:26:29 +00:00
Generic expansion for atomic load/store into cmpxchg/atomicrmw xchg; implements 64-bit atomic load/store for ARM.
llvm-svn: 138872
This commit is contained in:
parent
a2f9012605
commit
8ae6a88723
@ -1057,6 +1057,7 @@ void DAGTypeLegalizer::ExpandIntegerResult(SDNode *N, unsigned ResNo) {
|
||||
case ISD::UDIV: ExpandIntRes_UDIV(N, Lo, Hi); break;
|
||||
case ISD::UREM: ExpandIntRes_UREM(N, Lo, Hi); break;
|
||||
case ISD::ZERO_EXTEND: ExpandIntRes_ZERO_EXTEND(N, Lo, Hi); break;
|
||||
case ISD::ATOMIC_LOAD: ExpandIntRes_ATOMIC_LOAD(N, Lo, Hi); break;
|
||||
|
||||
case ISD::ATOMIC_LOAD_ADD:
|
||||
case ISD::ATOMIC_LOAD_SUB:
|
||||
@ -2323,6 +2324,20 @@ void DAGTypeLegalizer::ExpandIntRes_ZERO_EXTEND(SDNode *N,
|
||||
}
|
||||
}
|
||||
|
||||
void DAGTypeLegalizer::ExpandIntRes_ATOMIC_LOAD(SDNode *N,
|
||||
SDValue &Lo, SDValue &Hi) {
|
||||
DebugLoc dl = N->getDebugLoc();
|
||||
EVT VT = cast<AtomicSDNode>(N)->getMemoryVT();
|
||||
SDValue Zero = DAG.getConstant(0, VT);
|
||||
SDValue Swap = DAG.getAtomic(ISD::ATOMIC_CMP_SWAP, dl, VT,
|
||||
N->getOperand(0),
|
||||
N->getOperand(1), Zero, Zero,
|
||||
cast<AtomicSDNode>(N)->getMemOperand(),
|
||||
cast<AtomicSDNode>(N)->getOrdering(),
|
||||
cast<AtomicSDNode>(N)->getSynchScope());
|
||||
ReplaceValueWith(SDValue(N, 0), Swap.getValue(0));
|
||||
ReplaceValueWith(SDValue(N, 1), Swap.getValue(1));
|
||||
}
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// Integer Operand Expansion
|
||||
@ -2367,6 +2382,8 @@ bool DAGTypeLegalizer::ExpandIntegerOperand(SDNode *N, unsigned OpNo) {
|
||||
case ISD::ROTR: Res = ExpandIntOp_Shift(N); break;
|
||||
case ISD::RETURNADDR:
|
||||
case ISD::FRAMEADDR: Res = ExpandIntOp_RETURNADDR(N); break;
|
||||
|
||||
case ISD::ATOMIC_STORE: Res = ExpandIntOp_ATOMIC_STORE(N); break;
|
||||
}
|
||||
|
||||
// If the result is null, the sub-method took care of registering results etc.
|
||||
@ -2744,6 +2761,19 @@ SDValue DAGTypeLegalizer::ExpandIntOp_UINT_TO_FP(SDNode *N) {
|
||||
return MakeLibCall(LC, DstVT, &Op, 1, true, dl);
|
||||
}
|
||||
|
||||
SDValue DAGTypeLegalizer::ExpandIntOp_ATOMIC_STORE(SDNode *N) {
|
||||
DebugLoc dl = N->getDebugLoc();
|
||||
SDValue Swap = DAG.getAtomic(ISD::ATOMIC_SWAP, dl,
|
||||
cast<AtomicSDNode>(N)->getMemoryVT(),
|
||||
N->getOperand(0),
|
||||
N->getOperand(1), N->getOperand(2),
|
||||
cast<AtomicSDNode>(N)->getMemOperand(),
|
||||
cast<AtomicSDNode>(N)->getOrdering(),
|
||||
cast<AtomicSDNode>(N)->getSynchScope());
|
||||
return Swap.getValue(1);
|
||||
}
|
||||
|
||||
|
||||
SDValue DAGTypeLegalizer::PromoteIntRes_EXTRACT_SUBVECTOR(SDNode *N) {
|
||||
SDValue InOp0 = N->getOperand(0);
|
||||
EVT InVT = InOp0.getValueType();
|
||||
|
@ -320,6 +320,8 @@ private:
|
||||
void ExpandIntRes_UADDSUBO (SDNode *N, SDValue &Lo, SDValue &Hi);
|
||||
void ExpandIntRes_XMULO (SDNode *N, SDValue &Lo, SDValue &Hi);
|
||||
|
||||
void ExpandIntRes_ATOMIC_LOAD (SDNode *N, SDValue &Lo, SDValue &Hi);
|
||||
|
||||
void ExpandShiftByConstant(SDNode *N, unsigned Amt,
|
||||
SDValue &Lo, SDValue &Hi);
|
||||
bool ExpandShiftWithKnownAmountBit(SDNode *N, SDValue &Lo, SDValue &Hi);
|
||||
@ -339,6 +341,7 @@ private:
|
||||
SDValue ExpandIntOp_TRUNCATE(SDNode *N);
|
||||
SDValue ExpandIntOp_UINT_TO_FP(SDNode *N);
|
||||
SDValue ExpandIntOp_RETURNADDR(SDNode *N);
|
||||
SDValue ExpandIntOp_ATOMIC_STORE(SDNode *N);
|
||||
|
||||
void IntegerExpandSetCCOperands(SDValue &NewLHS, SDValue &NewRHS,
|
||||
ISD::CondCode &CCCode, DebugLoc dl);
|
||||
|
@ -6,7 +6,7 @@ define i64 @test1(i64* %ptr, i64 %val) {
|
||||
; CHECK: ldrexd r2, r3
|
||||
; CHECK: adds r0, r2
|
||||
; CHECK: adc r1, r3
|
||||
; CHECK: strexd {{r[0-9]+}}, r0, r1
|
||||
; CHECK: strexd {{[a-z0-9]+}}, r0, r1
|
||||
; CHECK: cmp
|
||||
; CHECK: bne
|
||||
; CHECK: dmb ish
|
||||
@ -20,7 +20,7 @@ define i64 @test2(i64* %ptr, i64 %val) {
|
||||
; CHECK: ldrexd r2, r3
|
||||
; CHECK: subs r0, r2
|
||||
; CHECK: sbc r1, r3
|
||||
; CHECK: strexd {{r[0-9]+}}, r0, r1
|
||||
; CHECK: strexd {{[a-z0-9]+}}, r0, r1
|
||||
; CHECK: cmp
|
||||
; CHECK: bne
|
||||
; CHECK: dmb ish
|
||||
@ -34,7 +34,7 @@ define i64 @test3(i64* %ptr, i64 %val) {
|
||||
; CHECK: ldrexd r2, r3
|
||||
; CHECK: and r0, r2
|
||||
; CHECK: and r1, r3
|
||||
; CHECK: strexd {{r[0-9]+}}, r0, r1
|
||||
; CHECK: strexd {{[a-z0-9]+}}, r0, r1
|
||||
; CHECK: cmp
|
||||
; CHECK: bne
|
||||
; CHECK: dmb ish
|
||||
@ -48,7 +48,7 @@ define i64 @test4(i64* %ptr, i64 %val) {
|
||||
; CHECK: ldrexd r2, r3
|
||||
; CHECK: orr r0, r2
|
||||
; CHECK: orr r1, r3
|
||||
; CHECK: strexd {{r[0-9]+}}, r0, r1
|
||||
; CHECK: strexd {{[a-z0-9]+}}, r0, r1
|
||||
; CHECK: cmp
|
||||
; CHECK: bne
|
||||
; CHECK: dmb ish
|
||||
@ -62,7 +62,7 @@ define i64 @test5(i64* %ptr, i64 %val) {
|
||||
; CHECK: ldrexd r2, r3
|
||||
; CHECK: eor r0, r2
|
||||
; CHECK: eor r1, r3
|
||||
; CHECK: strexd {{r[0-9]+}}, r0, r1
|
||||
; CHECK: strexd {{[a-z0-9]+}}, r0, r1
|
||||
; CHECK: cmp
|
||||
; CHECK: bne
|
||||
; CHECK: dmb ish
|
||||
@ -74,7 +74,7 @@ define i64 @test6(i64* %ptr, i64 %val) {
|
||||
; CHECK: test6
|
||||
; CHECK: dmb ish
|
||||
; CHECK: ldrexd r2, r3
|
||||
; CHECK: strexd {{r[0-9]+}}, r0, r1
|
||||
; CHECK: strexd {{[a-z0-9]+}}, r0, r1
|
||||
; CHECK: cmp
|
||||
; CHECK: bne
|
||||
; CHECK: dmb ish
|
||||
@ -89,10 +89,40 @@ define i64 @test7(i64* %ptr, i64 %val1, i64 %val2) {
|
||||
; CHECK: cmp r2
|
||||
; CHECK: cmpeq r3
|
||||
; CHECK: bne
|
||||
; CHECK: strexd {{r[0-9]+}}, r0, r1
|
||||
; CHECK: strexd {{[a-z0-9]+}}, r0, r1
|
||||
; CHECK: cmp
|
||||
; CHECK: bne
|
||||
; CHECK: dmb ish
|
||||
%r = cmpxchg i64* %ptr, i64 %val1, i64 %val2 seq_cst
|
||||
ret i64 %r
|
||||
}
|
||||
|
||||
; Compiles down to cmpxchg
|
||||
; FIXME: Should compile to a single ldrexd
|
||||
define i64 @test8(i64* %ptr) {
|
||||
; CHECK: test8
|
||||
; CHECK: ldrexd r2, r3
|
||||
; CHECK: cmp r2
|
||||
; CHECK: cmpeq r3
|
||||
; CHECK: bne
|
||||
; CHECK: strexd {{[a-z0-9]+}}, r0, r1
|
||||
; CHECK: cmp
|
||||
; CHECK: bne
|
||||
; CHECK: dmb ish
|
||||
%r = load atomic i64* %ptr seq_cst, align 8
|
||||
ret i64 %r
|
||||
}
|
||||
|
||||
; Compiles down to atomicrmw xchg; there really isn't any more efficient
|
||||
; way to write it.
|
||||
define void @test9(i64* %ptr, i64 %val) {
|
||||
; CHECK: test9
|
||||
; CHECK: dmb ish
|
||||
; CHECK: ldrexd r2, r3
|
||||
; CHECK: strexd {{[a-z0-9]+}}, r0, r1
|
||||
; CHECK: cmp
|
||||
; CHECK: bne
|
||||
; CHECK: dmb ish
|
||||
store atomic i64 %val, i64* %ptr seq_cst, align 8
|
||||
ret void
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user