Representation of 'atomic load' and 'atomic store' in IR.

llvm-svn: 137170
This commit is contained in:
Eli Friedman 2011-08-09 23:02:53 +00:00
parent 8ad37f68a2
commit 5a2d27800e
12 changed files with 390 additions and 78 deletions

View File

@ -3,6 +3,7 @@
<html> <html>
<head> <head>
<title>LLVM Atomic Instructions and Concurrency Guide</title> <title>LLVM Atomic Instructions and Concurrency Guide</title>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<link rel="stylesheet" href="llvm.css" type="text/css"> <link rel="stylesheet" href="llvm.css" type="text/css">
</head> </head>
<body> <body>

View File

@ -1583,8 +1583,10 @@ as if it writes to the relevant surrounding bytes.
<div class="doc_text"> <div class="doc_text">
<p>Atomic instructions (<a href="#i_cmpxchg"><code>cmpxchg</code></a>, <p>Atomic instructions (<a href="#i_cmpxchg"><code>cmpxchg</code></a>,
<a href="#i_atomicrmw"><code>atomicrmw</code></a>, and <a href="#i_atomicrmw"><code>atomicrmw</code></a>,
<a href="#i_fence"><code>fence</code></a>) take an ordering parameter <a href="#i_fence"><code>fence</code></a>,
<a href="#i_load"><code>atomic load</code></a>, and
<a href="#i_load"><code>atomic store</code></a>) take an ordering parameter
that determines which other atomic instructions on the same address they that determines which other atomic instructions on the same address they
<i>synchronize with</i>. These semantics are borrowed from Java and C++0x, <i>synchronize with</i>. These semantics are borrowed from Java and C++0x,
but are somewhat more colloquial. If these descriptions aren't precise enough, but are somewhat more colloquial. If these descriptions aren't precise enough,
@ -1592,11 +1594,7 @@ check those specs. <a href="#i_fence"><code>fence</code></a> instructions
treat these orderings somewhat differently since they don't take an address. treat these orderings somewhat differently since they don't take an address.
See that instruction's documentation for details.</p> See that instruction's documentation for details.</p>
<!-- FIXME Note atomic load+store here once those get added. -->
<dl> <dl>
<!-- FIXME: unordered is intended to be used for atomic load and store;
it isn't allowed for any instruction yet. -->
<dt><code>unordered</code></dt> <dt><code>unordered</code></dt>
<dd>The set of values that can be read is governed by the happens-before <dd>The set of values that can be read is governed by the happens-before
partial order. A value cannot be read unless some operation wrote it. partial order. A value cannot be read unless some operation wrote it.
@ -4572,8 +4570,8 @@ that the invoke/unwind semantics are likely to change in future versions.</p>
<h5>Syntax:</h5> <h5>Syntax:</h5>
<pre> <pre>
&lt;result&gt; = load &lt;ty&gt;* &lt;pointer&gt;[, align &lt;alignment&gt;][, !nontemporal !&lt;index&gt;] &lt;result&gt; = [volatile] load &lt;ty&gt;* &lt;pointer&gt;[, align &lt;alignment&gt;][, !nontemporal !&lt;index&gt;]
&lt;result&gt; = volatile load &lt;ty&gt;* &lt;pointer&gt;[, align &lt;alignment&gt;][, !nontemporal !&lt;index&gt;] &lt;result&gt; = atomic [volatile] load &lt;ty&gt;* &lt;pointer&gt; [singlethread] &lt;ordering&gt;, align &lt;alignment&gt;
!&lt;index&gt; = !{ i32 1 } !&lt;index&gt; = !{ i32 1 }
</pre> </pre>
@ -4588,6 +4586,19 @@ that the invoke/unwind semantics are likely to change in future versions.</p>
number or order of execution of this <tt>load</tt> with other <a number or order of execution of this <tt>load</tt> with other <a
href="#volatile">volatile operations</a>.</p> href="#volatile">volatile operations</a>.</p>
<p>If the <code>load</code> is marked as <code>atomic</code>, it takes an extra
<a href="#ordering">ordering</a> and optional <code>singlethread</code>
argument. The <code>release</code> and <code>acq_rel</code> orderings are
not valid on <code>load</code> instructions. Atomic loads produce <a
href="#memorymodel">defined</a> results when they may see multiple atomic
stores. The type of the pointee must be an integer type whose bit width
is a power of two greater than or equal to eight and less than or equal
to a target-specific size limit. <code>align</code> must be explicitly
specified on atomic loads, and the load has undefined behavior if the
alignment is not set to a value which is at least the size in bytes of
the pointee. <code>!nontemporal</code> does not have any defined semantics
for atomic loads.</p>
<p>The optional constant <tt>align</tt> argument specifies the alignment of the <p>The optional constant <tt>align</tt> argument specifies the alignment of the
operation (that is, the alignment of the memory address). A value of 0 or an operation (that is, the alignment of the memory address). A value of 0 or an
omitted <tt>align</tt> argument means that the operation has the preferential omitted <tt>align</tt> argument means that the operation has the preferential
@ -4631,8 +4642,8 @@ that the invoke/unwind semantics are likely to change in future versions.</p>
<h5>Syntax:</h5> <h5>Syntax:</h5>
<pre> <pre>
store &lt;ty&gt; &lt;value&gt;, &lt;ty&gt;* &lt;pointer&gt;[, align &lt;alignment&gt;][, !nontemporal !&lt;index&gt;] <i>; yields {void}</i> [volatile] store &lt;ty&gt; &lt;value&gt;, &lt;ty&gt;* &lt;pointer&gt;[, align &lt;alignment&gt;][, !nontemporal !&lt;index&gt;] <i>; yields {void}</i>
volatile store &lt;ty&gt; &lt;value&gt;, &lt;ty&gt;* &lt;pointer&gt;[, align &lt;alignment&gt;][, !nontemporal !&lt;index&gt;] <i>; yields {void}</i> atomic [volatile] store &lt;ty&gt; &lt;value&gt;, &lt;ty&gt;* &lt;pointer&gt; [singlethread] &lt;ordering&gt;, align &lt;alignment&gt; <i>; yields {void}</i>
</pre> </pre>
<h5>Overview:</h5> <h5>Overview:</h5>
@ -4648,6 +4659,19 @@ that the invoke/unwind semantics are likely to change in future versions.</p>
order of execution of this <tt>store</tt> with other <a order of execution of this <tt>store</tt> with other <a
href="#volatile">volatile operations</a>.</p> href="#volatile">volatile operations</a>.</p>
<p>If the <code>store</code> is marked as <code>atomic</code>, it takes an extra
<a href="#ordering">ordering</a> and optional <code>singlethread</code>
argument. The <code>acquire</code> and <code>acq_rel</code> orderings aren't
valid on <code>store</code> instructions. Atomic loads produce <a
href="#memorymodel">defined</a> results when they may see multiple atomic
stores. The type of the pointee must be an integer type whose bit width
is a power of two greater than or equal to eight and less than or equal
to a target-specific size limit. <code>align</code> must be explicitly
specified on atomic stores, and the store has undefined behavior if the
alignment is not set to a value which is at least the size in bytes of
the pointee. <code>!nontemporal</code> does not have any defined semantics
for atomic stores.</p>
<p>The optional constant "align" argument specifies the alignment of the <p>The optional constant "align" argument specifies the alignment of the
operation (that is, the alignment of the memory address). A value of 0 or an operation (that is, the alignment of the memory address). A value of 0 or an
omitted "align" argument means that the operation has the preferential omitted "align" argument means that the operation has the preferential
@ -4730,9 +4754,6 @@ operations and/or fences.</p>
specifies that the fence only synchronizes with other fences in the same specifies that the fence only synchronizes with other fences in the same
thread. (This is useful for interacting with signal handlers.)</p> thread. (This is useful for interacting with signal handlers.)</p>
<p>FIXME: This instruction is a work in progress; until it is finished, use
llvm.memory.barrier.
<h5>Example:</h5> <h5>Example:</h5>
<pre> <pre>
fence acquire <i>; yields {void}</i> fence acquire <i>; yields {void}</i>

View File

@ -307,7 +307,11 @@ namespace bitc {
FUNC_CODE_INST_ATOMICRMW = 38, // ATOMICRMW: [ptrty,ptr,val, operation, FUNC_CODE_INST_ATOMICRMW = 38, // ATOMICRMW: [ptrty,ptr,val, operation,
// align, vol, // align, vol,
// ordering, synchscope] // ordering, synchscope]
FUNC_CODE_INST_RESUME = 39 // RESUME: [opval] FUNC_CODE_INST_RESUME = 39, // RESUME: [opval]
FUNC_CODE_INST_LOADATOMIC = 40, // LOAD: [opty, op, align, vol,
// ordering, synchscope]
FUNC_CODE_INST_STOREATOMIC = 41 // STORE: [ptrty,ptr,val, align, vol
// ordering, synchscope]
}; };
} // End bitc namespace } // End bitc namespace
} // End llvm namespace } // End llvm namespace

View File

@ -142,12 +142,20 @@ public:
LoadInst(Value *Ptr, const Twine &NameStr, BasicBlock *InsertAtEnd); LoadInst(Value *Ptr, const Twine &NameStr, BasicBlock *InsertAtEnd);
LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile = false, LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile = false,
Instruction *InsertBefore = 0); Instruction *InsertBefore = 0);
LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile,
unsigned Align, Instruction *InsertBefore = 0);
LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile, LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile,
BasicBlock *InsertAtEnd); BasicBlock *InsertAtEnd);
LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile,
unsigned Align, Instruction *InsertBefore = 0);
LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile, LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile,
unsigned Align, BasicBlock *InsertAtEnd); unsigned Align, BasicBlock *InsertAtEnd);
LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile,
unsigned Align, AtomicOrdering Order,
SynchronizationScope SynchScope = CrossThread,
Instruction *InsertBefore = 0);
LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile,
unsigned Align, AtomicOrdering Order,
SynchronizationScope SynchScope,
BasicBlock *InsertAtEnd);
LoadInst(Value *Ptr, const char *NameStr, Instruction *InsertBefore); LoadInst(Value *Ptr, const char *NameStr, Instruction *InsertBefore);
LoadInst(Value *Ptr, const char *NameStr, BasicBlock *InsertAtEnd); LoadInst(Value *Ptr, const char *NameStr, BasicBlock *InsertAtEnd);
@ -171,11 +179,47 @@ public:
/// getAlignment - Return the alignment of the access that is being performed /// getAlignment - Return the alignment of the access that is being performed
/// ///
unsigned getAlignment() const { unsigned getAlignment() const {
return (1 << (getSubclassDataFromInstruction() >> 1)) >> 1; return (1 << ((getSubclassDataFromInstruction() >> 1) & 31)) >> 1;
} }
void setAlignment(unsigned Align); void setAlignment(unsigned Align);
/// Returns the ordering effect of this fence.
AtomicOrdering getOrdering() const {
return AtomicOrdering((getSubclassDataFromInstruction() >> 7) & 7);
}
/// Set the ordering constraint on this load. May not be Release or
/// AcquireRelease.
void setOrdering(AtomicOrdering Ordering) {
setInstructionSubclassData((getSubclassDataFromInstruction() & ~(7 << 7)) |
(Ordering << 7));
}
SynchronizationScope getSynchScope() const {
return SynchronizationScope((getSubclassDataFromInstruction() >> 6) & 1);
}
/// Specify whether this load is ordered with respect to all
/// concurrently executing threads, or only with respect to signal handlers
/// executing in the same thread.
void setSynchScope(SynchronizationScope xthread) {
setInstructionSubclassData((getSubclassDataFromInstruction() & ~(1 << 6)) |
(xthread << 6));
}
bool isAtomic() const { return getOrdering() != NotAtomic; }
void setAtomic(AtomicOrdering Ordering,
SynchronizationScope SynchScope = CrossThread) {
setOrdering(Ordering);
setSynchScope(SynchScope);
}
bool isSimple() const { return !isAtomic() && !isVolatile(); }
bool isUnordered() const {
return getOrdering() <= Unordered && !isVolatile();
}
Value *getPointerOperand() { return getOperand(0); } Value *getPointerOperand() { return getOperand(0); }
const Value *getPointerOperand() const { return getOperand(0); } const Value *getPointerOperand() const { return getOperand(0); }
static unsigned getPointerOperandIndex() { return 0U; } static unsigned getPointerOperandIndex() { return 0U; }
@ -222,19 +266,27 @@ public:
StoreInst(Value *Val, Value *Ptr, BasicBlock *InsertAtEnd); StoreInst(Value *Val, Value *Ptr, BasicBlock *InsertAtEnd);
StoreInst(Value *Val, Value *Ptr, bool isVolatile = false, StoreInst(Value *Val, Value *Ptr, bool isVolatile = false,
Instruction *InsertBefore = 0); Instruction *InsertBefore = 0);
StoreInst(Value *Val, Value *Ptr, bool isVolatile,
unsigned Align, Instruction *InsertBefore = 0);
StoreInst(Value *Val, Value *Ptr, bool isVolatile, BasicBlock *InsertAtEnd); StoreInst(Value *Val, Value *Ptr, bool isVolatile, BasicBlock *InsertAtEnd);
StoreInst(Value *Val, Value *Ptr, bool isVolatile,
unsigned Align, Instruction *InsertBefore = 0);
StoreInst(Value *Val, Value *Ptr, bool isVolatile, StoreInst(Value *Val, Value *Ptr, bool isVolatile,
unsigned Align, BasicBlock *InsertAtEnd); unsigned Align, BasicBlock *InsertAtEnd);
StoreInst(Value *Val, Value *Ptr, bool isVolatile,
unsigned Align, AtomicOrdering Order,
SynchronizationScope SynchScope = CrossThread,
Instruction *InsertBefore = 0);
StoreInst(Value *Val, Value *Ptr, bool isVolatile,
unsigned Align, AtomicOrdering Order,
SynchronizationScope SynchScope,
BasicBlock *InsertAtEnd);
/// isVolatile - Return true if this is a store to a volatile memory
/// isVolatile - Return true if this is a load from a volatile memory
/// location. /// location.
/// ///
bool isVolatile() const { return getSubclassDataFromInstruction() & 1; } bool isVolatile() const { return getSubclassDataFromInstruction() & 1; }
/// setVolatile - Specify whether this is a volatile load or not. /// setVolatile - Specify whether this is a volatile store or not.
/// ///
void setVolatile(bool V) { void setVolatile(bool V) {
setInstructionSubclassData((getSubclassDataFromInstruction() & ~1) | setInstructionSubclassData((getSubclassDataFromInstruction() & ~1) |
@ -247,11 +299,47 @@ public:
/// getAlignment - Return the alignment of the access that is being performed /// getAlignment - Return the alignment of the access that is being performed
/// ///
unsigned getAlignment() const { unsigned getAlignment() const {
return (1 << (getSubclassDataFromInstruction() >> 1)) >> 1; return (1 << ((getSubclassDataFromInstruction() >> 1) & 31)) >> 1;
} }
void setAlignment(unsigned Align); void setAlignment(unsigned Align);
/// Returns the ordering effect of this store.
AtomicOrdering getOrdering() const {
return AtomicOrdering((getSubclassDataFromInstruction() >> 7) & 7);
}
/// Set the ordering constraint on this store. May not be Acquire or
/// AcquireRelease.
void setOrdering(AtomicOrdering Ordering) {
setInstructionSubclassData((getSubclassDataFromInstruction() & ~(7 << 7)) |
(Ordering << 7));
}
SynchronizationScope getSynchScope() const {
return SynchronizationScope((getSubclassDataFromInstruction() >> 6) & 1);
}
/// Specify whether this store instruction is ordered with respect to all
/// concurrently executing threads, or only with respect to signal handlers
/// executing in the same thread.
void setSynchScope(SynchronizationScope xthread) {
setInstructionSubclassData((getSubclassDataFromInstruction() & ~(1 << 6)) |
(xthread << 6));
}
bool isAtomic() const { return getOrdering() != NotAtomic; }
void setAtomic(AtomicOrdering Ordering,
SynchronizationScope SynchScope = CrossThread) {
setOrdering(Ordering);
setSynchScope(SynchScope);
}
bool isSimple() const { return !isAtomic() && !isVolatile(); }
bool isUnordered() const {
return getOrdering() <= Unordered && !isVolatile();
}
Value *getValueOperand() { return getOperand(0); } Value *getValueOperand() { return getOperand(0); }
const Value *getValueOperand() const { return getOperand(0); } const Value *getValueOperand() const { return getOperand(0); }
@ -319,18 +407,8 @@ public:
/// Set the ordering constraint on this fence. May only be Acquire, Release, /// Set the ordering constraint on this fence. May only be Acquire, Release,
/// AcquireRelease, or SequentiallyConsistent. /// AcquireRelease, or SequentiallyConsistent.
void setOrdering(AtomicOrdering Ordering) { void setOrdering(AtomicOrdering Ordering) {
switch (Ordering) { setInstructionSubclassData((getSubclassDataFromInstruction() & 1) |
case Acquire: (Ordering << 1));
case Release:
case AcquireRelease:
case SequentiallyConsistent:
setInstructionSubclassData((getSubclassDataFromInstruction() & 1) |
(Ordering << 1));
return;
default:
llvm_unreachable("FenceInst ordering must be Acquire, Release,"
" AcquireRelease, or SequentiallyConsistent");
}
} }
SynchronizationScope getSynchScope() const { SynchronizationScope getSynchScope() const {
@ -555,7 +633,7 @@ public:
void setOrdering(AtomicOrdering Ordering) { void setOrdering(AtomicOrdering Ordering) {
assert(Ordering != NotAtomic && assert(Ordering != NotAtomic &&
"atomicrmw instructions can only be atomic."); "atomicrmw instructions can only be atomic.");
setInstructionSubclassData((getSubclassDataFromInstruction() & ~28) | setInstructionSubclassData((getSubclassDataFromInstruction() & ~(7 << 2)) |
(Ordering << 2)); (Ordering << 2));
} }
@ -569,7 +647,7 @@ public:
/// Returns the ordering constraint on this RMW. /// Returns the ordering constraint on this RMW.
AtomicOrdering getOrdering() const { AtomicOrdering getOrdering() const {
return AtomicOrdering((getSubclassDataFromInstruction() & 28) >> 2); return AtomicOrdering((getSubclassDataFromInstruction() >> 2) & 7);
} }
/// Returns whether this RMW is atomic between threads or only within a /// Returns whether this RMW is atomic between threads or only within a

View File

@ -2949,16 +2949,23 @@ int LLParser::ParseInstruction(Instruction *&Inst, BasicBlock *BB,
case lltok::kw_tail: return ParseCall(Inst, PFS, true); case lltok::kw_tail: return ParseCall(Inst, PFS, true);
// Memory. // Memory.
case lltok::kw_alloca: return ParseAlloc(Inst, PFS); case lltok::kw_alloca: return ParseAlloc(Inst, PFS);
case lltok::kw_load: return ParseLoad(Inst, PFS, false); case lltok::kw_load: return ParseLoad(Inst, PFS, false, false);
case lltok::kw_store: return ParseStore(Inst, PFS, false); case lltok::kw_store: return ParseStore(Inst, PFS, false, false);
case lltok::kw_cmpxchg: return ParseCmpXchg(Inst, PFS, false); case lltok::kw_cmpxchg: return ParseCmpXchg(Inst, PFS, false);
case lltok::kw_atomicrmw: return ParseAtomicRMW(Inst, PFS, false); case lltok::kw_atomicrmw: return ParseAtomicRMW(Inst, PFS, false);
case lltok::kw_fence: return ParseFence(Inst, PFS); case lltok::kw_fence: return ParseFence(Inst, PFS);
case lltok::kw_atomic: {
bool isVolatile = EatIfPresent(lltok::kw_volatile);
if (EatIfPresent(lltok::kw_load))
return ParseLoad(Inst, PFS, true, isVolatile);
else if (EatIfPresent(lltok::kw_store))
return ParseStore(Inst, PFS, true, isVolatile);
}
case lltok::kw_volatile: case lltok::kw_volatile:
if (EatIfPresent(lltok::kw_load)) if (EatIfPresent(lltok::kw_load))
return ParseLoad(Inst, PFS, true); return ParseLoad(Inst, PFS, false, true);
else if (EatIfPresent(lltok::kw_store)) else if (EatIfPresent(lltok::kw_store))
return ParseStore(Inst, PFS, true); return ParseStore(Inst, PFS, false, true);
else if (EatIfPresent(lltok::kw_cmpxchg)) else if (EatIfPresent(lltok::kw_cmpxchg))
return ParseCmpXchg(Inst, PFS, true); return ParseCmpXchg(Inst, PFS, true);
else if (EatIfPresent(lltok::kw_atomicrmw)) else if (EatIfPresent(lltok::kw_atomicrmw))
@ -3635,34 +3642,48 @@ int LLParser::ParseAlloc(Instruction *&Inst, PerFunctionState &PFS) {
} }
/// ParseLoad /// ParseLoad
/// ::= 'volatile'? 'load' TypeAndValue (',' OptionalInfo)? /// ::= 'volatile'? 'load' TypeAndValue (',' 'align' i32)?
// ::= 'atomic' 'volatile'? 'load' TypeAndValue
// 'singlethread'? AtomicOrdering (',' 'align' i32)?
int LLParser::ParseLoad(Instruction *&Inst, PerFunctionState &PFS, int LLParser::ParseLoad(Instruction *&Inst, PerFunctionState &PFS,
bool isVolatile) { bool isAtomic, bool isVolatile) {
Value *Val; LocTy Loc; Value *Val; LocTy Loc;
unsigned Alignment = 0; unsigned Alignment = 0;
bool AteExtraComma = false; bool AteExtraComma = false;
AtomicOrdering Ordering = NotAtomic;
SynchronizationScope Scope = CrossThread;
if (ParseTypeAndValue(Val, Loc, PFS) || if (ParseTypeAndValue(Val, Loc, PFS) ||
ParseScopeAndOrdering(isAtomic, Scope, Ordering) ||
ParseOptionalCommaAlign(Alignment, AteExtraComma)) ParseOptionalCommaAlign(Alignment, AteExtraComma))
return true; return true;
if (!Val->getType()->isPointerTy() || if (!Val->getType()->isPointerTy() ||
!cast<PointerType>(Val->getType())->getElementType()->isFirstClassType()) !cast<PointerType>(Val->getType())->getElementType()->isFirstClassType())
return Error(Loc, "load operand must be a pointer to a first class type"); return Error(Loc, "load operand must be a pointer to a first class type");
if (isAtomic && !Alignment)
return Error(Loc, "atomic load must have explicit non-zero alignment");
if (Ordering == Release || Ordering == AcquireRelease)
return Error(Loc, "atomic load cannot use Release ordering");
Inst = new LoadInst(Val, "", isVolatile, Alignment); Inst = new LoadInst(Val, "", isVolatile, Alignment, Ordering, Scope);
return AteExtraComma ? InstExtraComma : InstNormal; return AteExtraComma ? InstExtraComma : InstNormal;
} }
/// ParseStore /// ParseStore
/// ::= 'volatile'? 'store' TypeAndValue ',' TypeAndValue (',' 'align' i32)? /// ::= 'volatile'? 'store' TypeAndValue ',' TypeAndValue (',' 'align' i32)?
/// ::= 'atomic' 'volatile'? 'store' TypeAndValue ',' TypeAndValue
/// 'singlethread'? AtomicOrdering (',' 'align' i32)?
int LLParser::ParseStore(Instruction *&Inst, PerFunctionState &PFS, int LLParser::ParseStore(Instruction *&Inst, PerFunctionState &PFS,
bool isVolatile) { bool isAtomic, bool isVolatile) {
Value *Val, *Ptr; LocTy Loc, PtrLoc; Value *Val, *Ptr; LocTy Loc, PtrLoc;
unsigned Alignment = 0; unsigned Alignment = 0;
bool AteExtraComma = false; bool AteExtraComma = false;
AtomicOrdering Ordering = NotAtomic;
SynchronizationScope Scope = CrossThread;
if (ParseTypeAndValue(Val, Loc, PFS) || if (ParseTypeAndValue(Val, Loc, PFS) ||
ParseToken(lltok::comma, "expected ',' after store operand") || ParseToken(lltok::comma, "expected ',' after store operand") ||
ParseTypeAndValue(Ptr, PtrLoc, PFS) || ParseTypeAndValue(Ptr, PtrLoc, PFS) ||
ParseScopeAndOrdering(isAtomic, Scope, Ordering) ||
ParseOptionalCommaAlign(Alignment, AteExtraComma)) ParseOptionalCommaAlign(Alignment, AteExtraComma))
return true; return true;
@ -3672,8 +3693,12 @@ int LLParser::ParseStore(Instruction *&Inst, PerFunctionState &PFS,
return Error(Loc, "store operand must be a first class value"); return Error(Loc, "store operand must be a first class value");
if (cast<PointerType>(Ptr->getType())->getElementType() != Val->getType()) if (cast<PointerType>(Ptr->getType())->getElementType() != Val->getType())
return Error(Loc, "stored value and pointer type do not match"); return Error(Loc, "stored value and pointer type do not match");
if (isAtomic && !Alignment)
return Error(Loc, "atomic store must have explicit non-zero alignment");
if (Ordering == Acquire || Ordering == AcquireRelease)
return Error(Loc, "atomic store cannot use Acquire ordering");
Inst = new StoreInst(Val, Ptr, isVolatile, Alignment); Inst = new StoreInst(Val, Ptr, isVolatile, Alignment, Ordering, Scope);
return AteExtraComma ? InstExtraComma : InstNormal; return AteExtraComma ? InstExtraComma : InstNormal;
} }

View File

@ -362,8 +362,10 @@ namespace llvm {
int ParsePHI(Instruction *&I, PerFunctionState &PFS); int ParsePHI(Instruction *&I, PerFunctionState &PFS);
bool ParseCall(Instruction *&I, PerFunctionState &PFS, bool isTail); bool ParseCall(Instruction *&I, PerFunctionState &PFS, bool isTail);
int ParseAlloc(Instruction *&I, PerFunctionState &PFS); int ParseAlloc(Instruction *&I, PerFunctionState &PFS);
int ParseLoad(Instruction *&I, PerFunctionState &PFS, bool isVolatile); int ParseLoad(Instruction *&I, PerFunctionState &PFS,
int ParseStore(Instruction *&I, PerFunctionState &PFS, bool isVolatile); bool isAtomic, bool isVolatile);
int ParseStore(Instruction *&I, PerFunctionState &PFS,
bool isAtomic, bool isVolatile);
int ParseCmpXchg(Instruction *&I, PerFunctionState &PFS, bool isVolatile); int ParseCmpXchg(Instruction *&I, PerFunctionState &PFS, bool isVolatile);
int ParseAtomicRMW(Instruction *&I, PerFunctionState &PFS, bool isVolatile); int ParseAtomicRMW(Instruction *&I, PerFunctionState &PFS, bool isVolatile);
int ParseFence(Instruction *&I, PerFunctionState &PFS); int ParseFence(Instruction *&I, PerFunctionState &PFS);

View File

@ -2567,6 +2567,28 @@ bool BitcodeReader::ParseFunctionBody(Function *F) {
InstructionList.push_back(I); InstructionList.push_back(I);
break; break;
} }
case bitc::FUNC_CODE_INST_LOADATOMIC: {
// LOADATOMIC: [opty, op, align, vol, ordering, synchscope]
unsigned OpNum = 0;
Value *Op;
if (getValueTypePair(Record, OpNum, NextValueNo, Op) ||
OpNum+4 != Record.size())
return Error("Invalid LOADATOMIC record");
AtomicOrdering Ordering = GetDecodedOrdering(Record[OpNum+2]);
if (Ordering == NotAtomic || Ordering == Release ||
Ordering == AcquireRelease)
return Error("Invalid LOADATOMIC record");
if (Ordering != NotAtomic && Record[OpNum] == 0)
return Error("Invalid LOADATOMIC record");
SynchronizationScope SynchScope = GetDecodedSynchScope(Record[OpNum+3]);
I = new LoadInst(Op, "", Record[OpNum+1], (1 << Record[OpNum]) >> 1,
Ordering, SynchScope);
InstructionList.push_back(I);
break;
}
case bitc::FUNC_CODE_INST_STORE: { // STORE2:[ptrty, ptr, val, align, vol] case bitc::FUNC_CODE_INST_STORE: { // STORE2:[ptrty, ptr, val, align, vol]
unsigned OpNum = 0; unsigned OpNum = 0;
Value *Val, *Ptr; Value *Val, *Ptr;
@ -2580,6 +2602,29 @@ bool BitcodeReader::ParseFunctionBody(Function *F) {
InstructionList.push_back(I); InstructionList.push_back(I);
break; break;
} }
case bitc::FUNC_CODE_INST_STOREATOMIC: {
// STOREATOMIC: [ptrty, ptr, val, align, vol, ordering, synchscope]
unsigned OpNum = 0;
Value *Val, *Ptr;
if (getValueTypePair(Record, OpNum, NextValueNo, Ptr) ||
getValue(Record, OpNum,
cast<PointerType>(Ptr->getType())->getElementType(), Val) ||
OpNum+4 != Record.size())
return Error("Invalid STOREATOMIC record");
AtomicOrdering Ordering = GetDecodedOrdering(Record[OpNum+2]);
if (Ordering == NotAtomic || Ordering == Release ||
Ordering == AcquireRelease)
return Error("Invalid STOREATOMIC record");
SynchronizationScope SynchScope = GetDecodedSynchScope(Record[OpNum+3]);
if (Ordering != NotAtomic && Record[OpNum] == 0)
return Error("Invalid STOREATOMIC record");
I = new StoreInst(Val, Ptr, Record[OpNum+1], (1 << Record[OpNum]) >> 1,
Ordering, SynchScope);
InstructionList.push_back(I);
break;
}
case bitc::FUNC_CODE_INST_CMPXCHG: { case bitc::FUNC_CODE_INST_CMPXCHG: {
// CMPXCHG:[ptrty, ptr, cmp, new, vol, ordering, synchscope] // CMPXCHG:[ptrty, ptr, cmp, new, vol, ordering, synchscope]
unsigned OpNum = 0; unsigned OpNum = 0;
@ -2592,7 +2637,7 @@ bool BitcodeReader::ParseFunctionBody(Function *F) {
OpNum+3 != Record.size()) OpNum+3 != Record.size())
return Error("Invalid CMPXCHG record"); return Error("Invalid CMPXCHG record");
AtomicOrdering Ordering = GetDecodedOrdering(Record[OpNum+1]); AtomicOrdering Ordering = GetDecodedOrdering(Record[OpNum+1]);
if (Ordering == NotAtomic) if (Ordering == NotAtomic || Ordering == Unordered)
return Error("Invalid CMPXCHG record"); return Error("Invalid CMPXCHG record");
SynchronizationScope SynchScope = GetDecodedSynchScope(Record[OpNum+2]); SynchronizationScope SynchScope = GetDecodedSynchScope(Record[OpNum+2]);
I = new AtomicCmpXchgInst(Ptr, Cmp, New, Ordering, SynchScope); I = new AtomicCmpXchgInst(Ptr, Cmp, New, Ordering, SynchScope);
@ -2614,7 +2659,7 @@ bool BitcodeReader::ParseFunctionBody(Function *F) {
Operation > AtomicRMWInst::LAST_BINOP) Operation > AtomicRMWInst::LAST_BINOP)
return Error("Invalid ATOMICRMW record"); return Error("Invalid ATOMICRMW record");
AtomicOrdering Ordering = GetDecodedOrdering(Record[OpNum+2]); AtomicOrdering Ordering = GetDecodedOrdering(Record[OpNum+2]);
if (Ordering == NotAtomic) if (Ordering == NotAtomic || Ordering == Unordered)
return Error("Invalid ATOMICRMW record"); return Error("Invalid ATOMICRMW record");
SynchronizationScope SynchScope = GetDecodedSynchScope(Record[OpNum+3]); SynchronizationScope SynchScope = GetDecodedSynchScope(Record[OpNum+3]);
I = new AtomicRMWInst(Operation, Ptr, Val, Ordering, SynchScope); I = new AtomicRMWInst(Operation, Ptr, Val, Ordering, SynchScope);

View File

@ -1175,19 +1175,34 @@ static void WriteInstruction(const Instruction &I, unsigned InstID,
break; break;
case Instruction::Load: case Instruction::Load:
Code = bitc::FUNC_CODE_INST_LOAD; if (cast<LoadInst>(I).isAtomic()) {
if (!PushValueAndType(I.getOperand(0), InstID, Vals, VE)) // ptr Code = bitc::FUNC_CODE_INST_LOADATOMIC;
AbbrevToUse = FUNCTION_INST_LOAD_ABBREV; PushValueAndType(I.getOperand(0), InstID, Vals, VE);
} else {
Code = bitc::FUNC_CODE_INST_LOAD;
if (!PushValueAndType(I.getOperand(0), InstID, Vals, VE)) // ptr
AbbrevToUse = FUNCTION_INST_LOAD_ABBREV;
}
Vals.push_back(Log2_32(cast<LoadInst>(I).getAlignment())+1); Vals.push_back(Log2_32(cast<LoadInst>(I).getAlignment())+1);
Vals.push_back(cast<LoadInst>(I).isVolatile()); Vals.push_back(cast<LoadInst>(I).isVolatile());
if (cast<LoadInst>(I).isAtomic()) {
Vals.push_back(GetEncodedOrdering(cast<LoadInst>(I).getOrdering()));
Vals.push_back(GetEncodedSynchScope(cast<LoadInst>(I).getSynchScope()));
}
break; break;
case Instruction::Store: case Instruction::Store:
Code = bitc::FUNC_CODE_INST_STORE; if (cast<StoreInst>(I).isAtomic())
Code = bitc::FUNC_CODE_INST_STOREATOMIC;
else
Code = bitc::FUNC_CODE_INST_STORE;
PushValueAndType(I.getOperand(1), InstID, Vals, VE); // ptrty + ptr PushValueAndType(I.getOperand(1), InstID, Vals, VE); // ptrty + ptr
Vals.push_back(VE.getValueID(I.getOperand(0))); // val. Vals.push_back(VE.getValueID(I.getOperand(0))); // val.
Vals.push_back(Log2_32(cast<StoreInst>(I).getAlignment())+1); Vals.push_back(Log2_32(cast<StoreInst>(I).getAlignment())+1);
Vals.push_back(cast<StoreInst>(I).isVolatile()); Vals.push_back(cast<StoreInst>(I).isVolatile());
if (cast<StoreInst>(I).isAtomic()) {
Vals.push_back(GetEncodedOrdering(cast<StoreInst>(I).getOrdering()));
Vals.push_back(GetEncodedSynchScope(cast<StoreInst>(I).getSynchScope()));
}
break; break;
case Instruction::AtomicCmpXchg: case Instruction::AtomicCmpXchg:
Code = bitc::FUNC_CODE_INST_CMPXCHG; Code = bitc::FUNC_CODE_INST_CMPXCHG;

View File

@ -190,6 +190,16 @@ static bool LowerFenceInst(FenceInst *FI) {
return true; return true;
} }
static bool LowerLoadInst(LoadInst *LI) {
LI->setAtomic(NotAtomic);
return true;
}
static bool LowerStoreInst(StoreInst *SI) {
SI->setAtomic(NotAtomic);
return true;
}
namespace { namespace {
struct LowerAtomic : public BasicBlockPass { struct LowerAtomic : public BasicBlockPass {
static char ID; static char ID;
@ -208,6 +218,13 @@ namespace {
Changed |= LowerAtomicCmpXchgInst(CXI); Changed |= LowerAtomicCmpXchgInst(CXI);
else if (AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(Inst)) else if (AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(Inst))
Changed |= LowerAtomicRMWInst(RMWI); Changed |= LowerAtomicRMWInst(RMWI);
else if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
if (LI->isAtomic())
LowerLoadInst(LI);
} else if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
if (SI->isAtomic())
LowerStoreInst(SI);
}
} }
return Changed; return Changed;
} }

View File

@ -1659,14 +1659,18 @@ void AssemblyWriter::printInstruction(const Instruction &I) {
Out << '%' << SlotNum << " = "; Out << '%' << SlotNum << " = ";
} }
// If this is an atomic load or store, print out the atomic marker.
if ((isa<LoadInst>(I) && cast<LoadInst>(I).isAtomic()) ||
(isa<StoreInst>(I) && cast<StoreInst>(I).isAtomic()))
Out << "atomic ";
// If this is a volatile load or store, print out the volatile marker. // If this is a volatile load or store, print out the volatile marker.
if ((isa<LoadInst>(I) && cast<LoadInst>(I).isVolatile()) || if ((isa<LoadInst>(I) && cast<LoadInst>(I).isVolatile()) ||
(isa<StoreInst>(I) && cast<StoreInst>(I).isVolatile())) { (isa<StoreInst>(I) && cast<StoreInst>(I).isVolatile()))
Out << "volatile "; Out << "volatile ";
} else if (isa<CallInst>(I) && cast<CallInst>(I).isTailCall()) {
// If this is a call, check if it's a tail call. if (isa<CallInst>(I) && cast<CallInst>(I).isTailCall())
Out << "tail "; Out << "tail ";
}
// Print out the opcode... // Print out the opcode...
Out << I.getOpcodeName(); Out << I.getOpcodeName();
@ -1913,11 +1917,17 @@ void AssemblyWriter::printInstruction(const Instruction &I) {
} }
} }
// Print post operand alignment for load/store. // Print atomic ordering/alignment for memory operations
if (isa<LoadInst>(I) && cast<LoadInst>(I).getAlignment()) { if (const LoadInst *LI = dyn_cast<LoadInst>(&I)) {
Out << ", align " << cast<LoadInst>(I).getAlignment(); if (LI->isAtomic())
} else if (isa<StoreInst>(I) && cast<StoreInst>(I).getAlignment()) { writeAtomic(LI->getOrdering(), LI->getSynchScope());
Out << ", align " << cast<StoreInst>(I).getAlignment(); if (LI->getAlignment())
Out << ", align " << LI->getAlignment();
} else if (const StoreInst *SI = dyn_cast<StoreInst>(&I)) {
if (SI->isAtomic())
writeAtomic(SI->getOrdering(), SI->getSynchScope());
if (SI->getAlignment())
Out << ", align " << SI->getAlignment();
} else if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(&I)) { } else if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(&I)) {
writeAtomic(CXI->getOrdering(), CXI->getSynchScope()); writeAtomic(CXI->getOrdering(), CXI->getSynchScope());
} else if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(&I)) { } else if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(&I)) {

View File

@ -822,6 +822,8 @@ bool AllocaInst::isStaticAlloca() const {
void LoadInst::AssertOK() { void LoadInst::AssertOK() {
assert(getOperand(0)->getType()->isPointerTy() && assert(getOperand(0)->getType()->isPointerTy() &&
"Ptr must have pointer type."); "Ptr must have pointer type.");
assert(!(isAtomic() && getAlignment() == 0) &&
"Alignment required for atomic load");
} }
LoadInst::LoadInst(Value *Ptr, const Twine &Name, Instruction *InsertBef) LoadInst::LoadInst(Value *Ptr, const Twine &Name, Instruction *InsertBef)
@ -829,6 +831,7 @@ LoadInst::LoadInst(Value *Ptr, const Twine &Name, Instruction *InsertBef)
Load, Ptr, InsertBef) { Load, Ptr, InsertBef) {
setVolatile(false); setVolatile(false);
setAlignment(0); setAlignment(0);
setAtomic(NotAtomic);
AssertOK(); AssertOK();
setName(Name); setName(Name);
} }
@ -838,6 +841,7 @@ LoadInst::LoadInst(Value *Ptr, const Twine &Name, BasicBlock *InsertAE)
Load, Ptr, InsertAE) { Load, Ptr, InsertAE) {
setVolatile(false); setVolatile(false);
setAlignment(0); setAlignment(0);
setAtomic(NotAtomic);
AssertOK(); AssertOK();
setName(Name); setName(Name);
} }
@ -848,6 +852,18 @@ LoadInst::LoadInst(Value *Ptr, const Twine &Name, bool isVolatile,
Load, Ptr, InsertBef) { Load, Ptr, InsertBef) {
setVolatile(isVolatile); setVolatile(isVolatile);
setAlignment(0); setAlignment(0);
setAtomic(NotAtomic);
AssertOK();
setName(Name);
}
LoadInst::LoadInst(Value *Ptr, const Twine &Name, bool isVolatile,
BasicBlock *InsertAE)
: UnaryInstruction(cast<PointerType>(Ptr->getType())->getElementType(),
Load, Ptr, InsertAE) {
setVolatile(isVolatile);
setAlignment(0);
setAtomic(NotAtomic);
AssertOK(); AssertOK();
setName(Name); setName(Name);
} }
@ -858,6 +874,7 @@ LoadInst::LoadInst(Value *Ptr, const Twine &Name, bool isVolatile,
Load, Ptr, InsertBef) { Load, Ptr, InsertBef) {
setVolatile(isVolatile); setVolatile(isVolatile);
setAlignment(Align); setAlignment(Align);
setAtomic(NotAtomic);
AssertOK(); AssertOK();
setName(Name); setName(Name);
} }
@ -868,27 +885,43 @@ LoadInst::LoadInst(Value *Ptr, const Twine &Name, bool isVolatile,
Load, Ptr, InsertAE) { Load, Ptr, InsertAE) {
setVolatile(isVolatile); setVolatile(isVolatile);
setAlignment(Align); setAlignment(Align);
setAtomic(NotAtomic);
AssertOK(); AssertOK();
setName(Name); setName(Name);
} }
LoadInst::LoadInst(Value *Ptr, const Twine &Name, bool isVolatile, LoadInst::LoadInst(Value *Ptr, const Twine &Name, bool isVolatile,
unsigned Align, AtomicOrdering Order,
SynchronizationScope SynchScope,
Instruction *InsertBef)
: UnaryInstruction(cast<PointerType>(Ptr->getType())->getElementType(),
Load, Ptr, InsertBef) {
setVolatile(isVolatile);
setAlignment(Align);
setAtomic(Order, SynchScope);
AssertOK();
setName(Name);
}
LoadInst::LoadInst(Value *Ptr, const Twine &Name, bool isVolatile,
unsigned Align, AtomicOrdering Order,
SynchronizationScope SynchScope,
BasicBlock *InsertAE) BasicBlock *InsertAE)
: UnaryInstruction(cast<PointerType>(Ptr->getType())->getElementType(), : UnaryInstruction(cast<PointerType>(Ptr->getType())->getElementType(),
Load, Ptr, InsertAE) { Load, Ptr, InsertAE) {
setVolatile(isVolatile); setVolatile(isVolatile);
setAlignment(0); setAlignment(Align);
setAtomic(Order, SynchScope);
AssertOK(); AssertOK();
setName(Name); setName(Name);
} }
LoadInst::LoadInst(Value *Ptr, const char *Name, Instruction *InsertBef) LoadInst::LoadInst(Value *Ptr, const char *Name, Instruction *InsertBef)
: UnaryInstruction(cast<PointerType>(Ptr->getType())->getElementType(), : UnaryInstruction(cast<PointerType>(Ptr->getType())->getElementType(),
Load, Ptr, InsertBef) { Load, Ptr, InsertBef) {
setVolatile(false); setVolatile(false);
setAlignment(0); setAlignment(0);
setAtomic(NotAtomic);
AssertOK(); AssertOK();
if (Name && Name[0]) setName(Name); if (Name && Name[0]) setName(Name);
} }
@ -898,6 +931,7 @@ LoadInst::LoadInst(Value *Ptr, const char *Name, BasicBlock *InsertAE)
Load, Ptr, InsertAE) { Load, Ptr, InsertAE) {
setVolatile(false); setVolatile(false);
setAlignment(0); setAlignment(0);
setAtomic(NotAtomic);
AssertOK(); AssertOK();
if (Name && Name[0]) setName(Name); if (Name && Name[0]) setName(Name);
} }
@ -908,6 +942,7 @@ LoadInst::LoadInst(Value *Ptr, const char *Name, bool isVolatile,
Load, Ptr, InsertBef) { Load, Ptr, InsertBef) {
setVolatile(isVolatile); setVolatile(isVolatile);
setAlignment(0); setAlignment(0);
setAtomic(NotAtomic);
AssertOK(); AssertOK();
if (Name && Name[0]) setName(Name); if (Name && Name[0]) setName(Name);
} }
@ -918,6 +953,7 @@ LoadInst::LoadInst(Value *Ptr, const char *Name, bool isVolatile,
Load, Ptr, InsertAE) { Load, Ptr, InsertAE) {
setVolatile(isVolatile); setVolatile(isVolatile);
setAlignment(0); setAlignment(0);
setAtomic(NotAtomic);
AssertOK(); AssertOK();
if (Name && Name[0]) setName(Name); if (Name && Name[0]) setName(Name);
} }
@ -926,7 +962,7 @@ void LoadInst::setAlignment(unsigned Align) {
assert((Align & (Align-1)) == 0 && "Alignment is not a power of 2!"); assert((Align & (Align-1)) == 0 && "Alignment is not a power of 2!");
assert(Align <= MaximumAlignment && assert(Align <= MaximumAlignment &&
"Alignment is greater than MaximumAlignment!"); "Alignment is greater than MaximumAlignment!");
setInstructionSubclassData((getSubclassDataFromInstruction() & 1) | setInstructionSubclassData((getSubclassDataFromInstruction() & ~(31 << 1)) |
((Log2_32(Align)+1)<<1)); ((Log2_32(Align)+1)<<1));
assert(getAlignment() == Align && "Alignment representation error!"); assert(getAlignment() == Align && "Alignment representation error!");
} }
@ -942,6 +978,8 @@ void StoreInst::AssertOK() {
assert(getOperand(0)->getType() == assert(getOperand(0)->getType() ==
cast<PointerType>(getOperand(1)->getType())->getElementType() cast<PointerType>(getOperand(1)->getType())->getElementType()
&& "Ptr must be a pointer to Val type!"); && "Ptr must be a pointer to Val type!");
assert(!(isAtomic() && getAlignment() == 0) &&
"Alignment required for atomic load");
} }
@ -954,6 +992,7 @@ StoreInst::StoreInst(Value *val, Value *addr, Instruction *InsertBefore)
Op<1>() = addr; Op<1>() = addr;
setVolatile(false); setVolatile(false);
setAlignment(0); setAlignment(0);
setAtomic(NotAtomic);
AssertOK(); AssertOK();
} }
@ -966,6 +1005,7 @@ StoreInst::StoreInst(Value *val, Value *addr, BasicBlock *InsertAtEnd)
Op<1>() = addr; Op<1>() = addr;
setVolatile(false); setVolatile(false);
setAlignment(0); setAlignment(0);
setAtomic(NotAtomic);
AssertOK(); AssertOK();
} }
@ -979,6 +1019,7 @@ StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
Op<1>() = addr; Op<1>() = addr;
setVolatile(isVolatile); setVolatile(isVolatile);
setAlignment(0); setAlignment(0);
setAtomic(NotAtomic);
AssertOK(); AssertOK();
} }
@ -992,19 +1033,23 @@ StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
Op<1>() = addr; Op<1>() = addr;
setVolatile(isVolatile); setVolatile(isVolatile);
setAlignment(Align); setAlignment(Align);
setAtomic(NotAtomic);
AssertOK(); AssertOK();
} }
StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
unsigned Align, BasicBlock *InsertAtEnd) unsigned Align, AtomicOrdering Order,
SynchronizationScope SynchScope,
Instruction *InsertBefore)
: Instruction(Type::getVoidTy(val->getContext()), Store, : Instruction(Type::getVoidTy(val->getContext()), Store,
OperandTraits<StoreInst>::op_begin(this), OperandTraits<StoreInst>::op_begin(this),
OperandTraits<StoreInst>::operands(this), OperandTraits<StoreInst>::operands(this),
InsertAtEnd) { InsertBefore) {
Op<0>() = val; Op<0>() = val;
Op<1>() = addr; Op<1>() = addr;
setVolatile(isVolatile); setVolatile(isVolatile);
setAlignment(Align); setAlignment(Align);
setAtomic(Order, SynchScope);
AssertOK(); AssertOK();
} }
@ -1018,6 +1063,37 @@ StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
Op<1>() = addr; Op<1>() = addr;
setVolatile(isVolatile); setVolatile(isVolatile);
setAlignment(0); setAlignment(0);
setAtomic(NotAtomic);
AssertOK();
}
StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
unsigned Align, BasicBlock *InsertAtEnd)
: Instruction(Type::getVoidTy(val->getContext()), Store,
OperandTraits<StoreInst>::op_begin(this),
OperandTraits<StoreInst>::operands(this),
InsertAtEnd) {
Op<0>() = val;
Op<1>() = addr;
setVolatile(isVolatile);
setAlignment(Align);
setAtomic(NotAtomic);
AssertOK();
}
StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
unsigned Align, AtomicOrdering Order,
SynchronizationScope SynchScope,
BasicBlock *InsertAtEnd)
: Instruction(Type::getVoidTy(val->getContext()), Store,
OperandTraits<StoreInst>::op_begin(this),
OperandTraits<StoreInst>::operands(this),
InsertAtEnd) {
Op<0>() = val;
Op<1>() = addr;
setVolatile(isVolatile);
setAlignment(Align);
setAtomic(Order, SynchScope);
AssertOK(); AssertOK();
} }
@ -1025,7 +1101,7 @@ void StoreInst::setAlignment(unsigned Align) {
assert((Align & (Align-1)) == 0 && "Alignment is not a power of 2!"); assert((Align & (Align-1)) == 0 && "Alignment is not a power of 2!");
assert(Align <= MaximumAlignment && assert(Align <= MaximumAlignment &&
"Alignment is greater than MaximumAlignment!"); "Alignment is greater than MaximumAlignment!");
setInstructionSubclassData((getSubclassDataFromInstruction() & 1) | setInstructionSubclassData((getSubclassDataFromInstruction() & ~(31 << 1)) |
((Log2_32(Align)+1) << 1)); ((Log2_32(Align)+1) << 1));
assert(getAlignment() == Align && "Alignment representation error!"); assert(getAlignment() == Align && "Alignment representation error!");
} }
@ -3158,14 +3234,14 @@ AllocaInst *AllocaInst::clone_impl() const {
} }
LoadInst *LoadInst::clone_impl() const { LoadInst *LoadInst::clone_impl() const {
return new LoadInst(getOperand(0), return new LoadInst(getOperand(0), Twine(), isVolatile(),
Twine(), isVolatile(), getAlignment(), getOrdering(), getSynchScope());
getAlignment());
} }
StoreInst *StoreInst::clone_impl() const { StoreInst *StoreInst::clone_impl() const {
return new StoreInst(getOperand(0), getOperand(1), return new StoreInst(getOperand(0), getOperand(1),isVolatile(),
isVolatile(), getAlignment()); getAlignment(), getOrdering(), getSynchScope());
} }
AtomicCmpXchgInst *AtomicCmpXchgInst::clone_impl() const { AtomicCmpXchgInst *AtomicCmpXchgInst::clone_impl() const {

View File

@ -1297,6 +1297,15 @@ void Verifier::visitLoadInst(LoadInst &LI) {
Type *ElTy = PTy->getElementType(); Type *ElTy = PTy->getElementType();
Assert2(ElTy == LI.getType(), Assert2(ElTy == LI.getType(),
"Load result type does not match pointer operand type!", &LI, ElTy); "Load result type does not match pointer operand type!", &LI, ElTy);
if (LI.isAtomic()) {
Assert1(LI.getOrdering() != Release && LI.getOrdering() != AcquireRelease,
"Load cannot have Release ordering", &LI);
Assert1(LI.getAlignment() != 0,
"Atomic load must specify explicit alignment", &LI);
} else {
Assert1(LI.getSynchScope() == CrossThread,
"Non-atomic load cannot have SynchronizationScope specified", &LI);
}
visitInstruction(LI); visitInstruction(LI);
} }
@ -1307,6 +1316,15 @@ void Verifier::visitStoreInst(StoreInst &SI) {
Assert2(ElTy == SI.getOperand(0)->getType(), Assert2(ElTy == SI.getOperand(0)->getType(),
"Stored value type does not match pointer operand type!", "Stored value type does not match pointer operand type!",
&SI, ElTy); &SI, ElTy);
if (SI.isAtomic()) {
Assert1(SI.getOrdering() != Acquire && SI.getOrdering() != AcquireRelease,
"Store cannot have Acquire ordering", &SI);
Assert1(SI.getAlignment() != 0,
"Atomic store must specify explicit alignment", &SI);
} else {
Assert1(SI.getSynchScope() == CrossThread,
"Non-atomic store cannot have SynchronizationScope specified", &SI);
}
visitInstruction(SI); visitInstruction(SI);
} }