aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--docs/LangRef.html272
-rw-r--r--include/llvm/CodeGen/SelectionDAG.h10
-rw-r--r--include/llvm/CodeGen/SelectionDAGNodes.h150
-rw-r--r--include/llvm/Intrinsics.td18
-rw-r--r--lib/AsmParser/llvmAsmParser.cpp.cvs590
-rw-r--r--lib/AsmParser/llvmAsmParser.h.cvs2
-rw-r--r--lib/AsmParser/llvmAsmParser.y.cvs20
-rw-r--r--lib/CodeGen/SelectionDAG/LegalizeDAG.cpp30
-rw-r--r--lib/CodeGen/SelectionDAG/SelectionDAG.cpp79
-rw-r--r--lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp18
-rw-r--r--lib/Target/Alpha/AlphaInstrInfo.td8
-rw-r--r--lib/Target/PowerPC/PPCISelLowering.cpp16
-rw-r--r--lib/Target/PowerPC/PPCISelLowering.h4
-rw-r--r--lib/Target/TargetSelectionDAG.td71
-rw-r--r--lib/Target/X86/X86ISelLowering.cpp34
-rw-r--r--lib/Target/X86/X86ISelLowering.h6
-rw-r--r--lib/Target/X86/X86Instr64bit.td2
-rw-r--r--lib/Target/X86/X86InstrInfo.td10
-rw-r--r--lib/VMCore/AutoUpgrade.cpp24
-rw-r--r--test/CodeGen/PowerPC/atomic-1.ll12
-rw-r--r--test/CodeGen/PowerPC/atomic-2.ll8
-rw-r--r--test/CodeGen/X86/atomic_op.ll18
-rw-r--r--utils/TableGen/CodeGenDAGPatterns.cpp2
-rw-r--r--utils/TableGen/CodeGenTarget.cpp2
-rw-r--r--utils/TableGen/CodeGenTarget.h5
-rw-r--r--utils/TableGen/DAGISelEmitter.cpp7
26 files changed, 911 insertions, 507 deletions
diff --git a/docs/LangRef.html b/docs/LangRef.html
index 01b971bf2f..4de69b0e72 100644
--- a/docs/LangRef.html
+++ b/docs/LangRef.html
@@ -216,9 +216,18 @@
<li><a href="#int_atomics">Atomic intrinsics</a>
<ol>
<li><a href="#int_memory_barrier"><tt>llvm.memory_barrier</tt></a></li>
- <li><a href="#int_atomic_lcs"><tt>llvm.atomic.lcs</tt></a></li>
- <li><a href="#int_atomic_las"><tt>llvm.atomic.las</tt></a></li>
+ <li><a href="#int_atomic_cmp_swap"><tt>llvm.atomic.cmp.swap</tt></a></li>
<li><a href="#int_atomic_swap"><tt>llvm.atomic.swap</tt></a></li>
+ <li><a href="#int_atomic_load_add"><tt>llvm.atomic.load.add</tt></a></li>
+ <li><a href="#int_atomic_load_sub"><tt>llvm.atomic.load.sub</tt></a></li>
+ <li><a href="#int_atomic_load_and"><tt>llvm.atomic.load.and</tt></a></li>
+ <li><a href="#int_atomic_load_nand"><tt>llvm.atomic.load.nand</tt></a></li>
+ <li><a href="#int_atomic_load_or"><tt>llvm.atomic.load.or</tt></a></li>
+ <li><a href="#int_atomic_load_xor"><tt>llvm.atomic.load.xor</tt></a></li>
+ <li><a href="#int_atomic_load_max"><tt>llvm.atomic.load.max</tt></a></li>
+ <li><a href="#int_atomic_load_min"><tt>llvm.atomic.load.min</tt></a></li>
+ <li><a href="#int_atomic_load_umax"><tt>llvm.atomic.load.umax</tt></a></li>
+ <li><a href="#int_atomic_load_umin"><tt>llvm.atomic.load.umin</tt></a></li>
</ol>
</li>
<li><a href="#int_general">General intrinsics</a>
@@ -5777,19 +5786,19 @@ i1 &lt;device&gt; )
<!-- _______________________________________________________________________ -->
<div class="doc_subsubsection">
- <a name="int_atomic_lcs">'<tt>llvm.atomic.lcs.*</tt>' Intrinsic</a>
+ <a name="int_atomic_cmp_swap">'<tt>llvm.atomic.cmp.swap.*</tt>' Intrinsic</a>
</div>
<div class="doc_text">
<h5>Syntax:</h5>
<p>
- This is an overloaded intrinsic. You can use <tt>llvm.atomic.lcs</tt> on any
+ This is an overloaded intrinsic. You can use <tt>llvm.atomic.cmp.swap</tt> on any
integer bit width. Not all targets support all bit widths however.</p>
<pre>
-declare i8 @llvm.atomic.lcs.i8( i8* &lt;ptr&gt;, i8 &lt;cmp&gt;, i8 &lt;val&gt; )
-declare i16 @llvm.atomic.lcs.i16( i16* &lt;ptr&gt;, i16 &lt;cmp&gt;, i16 &lt;val&gt; )
-declare i32 @llvm.atomic.lcs.i32( i32* &lt;ptr&gt;, i32 &lt;cmp&gt;, i32 &lt;val&gt; )
-declare i64 @llvm.atomic.lcs.i64( i64* &lt;ptr&gt;, i64 &lt;cmp&gt;, i64 &lt;val&gt; )
+declare i8 @llvm.atomic.cmp.swap.i8( i8* &lt;ptr&gt;, i8 &lt;cmp&gt;, i8 &lt;val&gt; )
+declare i16 @llvm.atomic.cmp.swap.i16( i16* &lt;ptr&gt;, i16 &lt;cmp&gt;, i16 &lt;val&gt; )
+declare i32 @llvm.atomic.cmp.swap.i32( i32* &lt;ptr&gt;, i32 &lt;cmp&gt;, i32 &lt;val&gt; )
+declare i64 @llvm.atomic.cmp.swap.i64( i64* &lt;ptr&gt;, i64 &lt;cmp&gt;, i64 &lt;val&gt; )
</pre>
<h5>Overview:</h5>
@@ -5799,7 +5808,7 @@ declare i64 @llvm.atomic.lcs.i64( i64* &lt;ptr&gt;, i64 &lt;cmp&gt;, i64 &lt;val
</p>
<h5>Arguments:</h5>
<p>
- The <tt>llvm.atomic.lcs</tt> intrinsic takes three arguments. The result as
+ The <tt>llvm.atomic.cmp.swap</tt> intrinsic takes three arguments. The result as
well as both <tt>cmp</tt> and <tt>val</tt> must be integer values with the
same bit width. The <tt>ptr</tt> argument must be a pointer to a value of
this integer type. While any bit width integer may be used, targets may only
@@ -5821,13 +5830,13 @@ declare i64 @llvm.atomic.lcs.i64( i64* &lt;ptr&gt;, i64 &lt;cmp&gt;, i64 &lt;val
store i32 4, %ptr
%val1 = add i32 4, 4
-%result1 = call i32 @llvm.atomic.lcs.i32( i32* %ptr, i32 4, %val1 )
+%result1 = call i32 @llvm.atomic.cmp.swap.i32( i32* %ptr, i32 4, %val1 )
<i>; yields {i32}:result1 = 4</i>
%stored1 = icmp eq i32 %result1, 4 <i>; yields {i1}:stored1 = true</i>
%memval1 = load i32* %ptr <i>; yields {i32}:memval1 = 8</i>
%val2 = add i32 1, 1
-%result2 = call i32 @llvm.atomic.lcs.i32( i32* %ptr, i32 5, %val2 )
+%result2 = call i32 @llvm.atomic.cmp.swap.i32( i32* %ptr, i32 5, %val2 )
<i>; yields {i32}:result2 = 8</i>
%stored2 = icmp eq i32 %result2, 5 <i>; yields {i1}:stored2 = false</i>
@@ -5861,7 +5870,7 @@ declare i64 @llvm.atomic.swap.i64( i64* &lt;ptr&gt;, i64 &lt;val&gt; )
<h5>Arguments:</h5>
<p>
- The <tt>llvm.atomic.ls</tt> intrinsic takes two arguments. Both the
+ The <tt>llvm.atomic.swap</tt> intrinsic takes two arguments. Both the
<tt>val</tt> argument and the result must be integers of the same bit width.
The first argument, <tt>ptr</tt>, must be a pointer to a value of this
integer type. The targets may only lower integer representations they
@@ -5896,19 +5905,19 @@ declare i64 @llvm.atomic.swap.i64( i64* &lt;ptr&gt;, i64 &lt;val&gt; )
<!-- _______________________________________________________________________ -->
<div class="doc_subsubsection">
- <a name="int_atomic_las">'<tt>llvm.atomic.las.*</tt>' Intrinsic</a>
+ <a name="int_atomic_load_add">'<tt>llvm.atomic.load.add.*</tt>' Intrinsic</a>
</div>
<div class="doc_text">
<h5>Syntax:</h5>
<p>
- This is an overloaded intrinsic. You can use <tt>llvm.atomic.las</tt> on any
+ This is an overloaded intrinsic. You can use <tt>llvm.atomic.load.add</tt> on any
integer bit width. Not all targets support all bit widths however.</p>
<pre>
-declare i8 @llvm.atomic.las.i8.( i8* &lt;ptr&gt;, i8 &lt;delta&gt; )
-declare i16 @llvm.atomic.las.i16.( i16* &lt;ptr&gt;, i16 &lt;delta&gt; )
-declare i32 @llvm.atomic.las.i32.( i32* &lt;ptr&gt;, i32 &lt;delta&gt; )
-declare i64 @llvm.atomic.las.i64.( i64* &lt;ptr&gt;, i64 &lt;delta&gt; )
+declare i8 @llvm.atomic.load.add.i8.( i8* &lt;ptr&gt;, i8 &lt;delta&gt; )
+declare i16 @llvm.atomic.load.add.i16.( i16* &lt;ptr&gt;, i16 &lt;delta&gt; )
+declare i32 @llvm.atomic.load.add.i32.( i32* &lt;ptr&gt;, i32 &lt;delta&gt; )
+declare i64 @llvm.atomic.load.add.i64.( i64* &lt;ptr&gt;, i64 &lt;delta&gt; )
</pre>
<h5>Overview:</h5>
@@ -5935,16 +5944,235 @@ declare i64 @llvm.atomic.las.i64.( i64* &lt;ptr&gt;, i64 &lt;delta&gt; )
<pre>
%ptr = malloc i32
store i32 4, %ptr
-%result1 = call i32 @llvm.atomic.las.i32( i32* %ptr, i32 4 )
+%result1 = call i32 @llvm.atomic.load.add.i32( i32* %ptr, i32 4 )
<i>; yields {i32}:result1 = 4</i>
-%result2 = call i32 @llvm.atomic.las.i32( i32* %ptr, i32 2 )
+%result2 = call i32 @llvm.atomic.load.add.i32( i32* %ptr, i32 2 )
<i>; yields {i32}:result2 = 8</i>
-%result3 = call i32 @llvm.atomic.las.i32( i32* %ptr, i32 5 )
+%result3 = call i32 @llvm.atomic.load.add.i32( i32* %ptr, i32 5 )
<i>; yields {i32}:result3 = 10</i>
-%memval = load i32* %ptr <i>; yields {i32}:memval1 = 15</i>
+%memval1 = load i32* %ptr <i>; yields {i32}:memval1 = 15</i>
</pre>
</div>
+<!-- _______________________________________________________________________ -->
+<div class="doc_subsubsection">
+ <a name="int_atomic_load_sub">'<tt>llvm.atomic.load.sub.*</tt>' Intrinsic</a>
+
+</div>
+<div class="doc_text">
+<h5>Syntax:</h5>
+<p>
+ This is an overloaded intrinsic. You can use <tt>llvm.atomic.load.sub</tt> on
+ any integer bit width. Not all targets support all bit widths however.</p>
+<pre>
+declare i8 @llvm.atomic.load.sub.i8.( i8* &lt;ptr&gt;, i8 &lt;delta&gt; )
+declare i16 @llvm.atomic.load.sub.i16.( i16* &lt;ptr&gt;, i16 &lt;delta&gt; )
+declare i32 @llvm.atomic.load.sub.i32.( i32* &lt;ptr&gt;, i32 &lt;delta&gt; )
+declare i64 @llvm.atomic.load.sub.i64.( i64* &lt;ptr&gt;, i64 &lt;delta&gt; )
+
+</pre>
+<h5>Overview:</h5>
+<p>
+ This intrinsic subtracts <tt>delta</tt> to the value stored in memory at
+ <tt>ptr</tt>. It yields the original value at <tt>ptr</tt>.
+</p>
+<h5>Arguments:</h5>
+<p>
+
+ The intrinsic takes two arguments, the first a pointer to an integer value
+ and the second an integer value. The result is also an integer value. These
+ integer types can have any bit width, but they must all have the same bit
+ width. The targets may only lower integer representations they support.
+</p>
+<h5>Semantics:</h5>
+<p>
+ This intrinsic does a series of operations atomically. It first loads the
+ value stored at <tt>ptr</tt>. It then subtracts <tt>delta</tt>, stores the
+ result to <tt>ptr</tt>. It yields the original value stored at <tt>ptr</tt>.
+</p>
+
+<h5>Examples:</h5>
+<pre>
+%ptr = malloc i32
+ store i32 8, %ptr
+%result1 = call i32 @llvm.atomic.load.sub.i32( i32* %ptr, i32 4 )
+ <i>; yields {i32}:result1 = 8</i>
+%result2 = call i32 @llvm.atomic.load.sub.i32( i32* %ptr, i32 2 )
+ <i>; yields {i32}:result2 = 4</i>
+%result3 = call i32 @llvm.atomic.load.sub.i32( i32* %ptr, i32 5 )
+ <i>; yields {i32}:result3 = 2</i>
+%memval1 = load i32* %ptr <i>; yields {i32}:memval1 = -3</i>
+</pre>
+</div>
+
+<!-- _______________________________________________________________________ -->
+<div class="doc_subsubsection">
+ <a name="int_atomic_load_and">'<tt>llvm.atomic.load.and.*</tt>' Intrinsic</a><br>
+ <a name="int_atomic_load_nand">'<tt>llvm.atomic.load.nand.*</tt>' Intrinsic</a><br>
+ <a name="int_atomic_load_or">'<tt>llvm.atomic.load.or.*</tt>' Intrinsic</a><br>
+ <a name="int_atomic_load_xor">'<tt>llvm.atomic.load.xor.*</tt>' Intrinsic</a><br>
+
+</div>
+<div class="doc_text">
+<h5>Syntax:</h5>
+<p>
+ These are overloaded intrinsics. You can use <tt>llvm.atomic.load_and</tt>,
+ <tt>llvm.atomic.load_nand</tt>, <tt>llvm.atomic.load_or</tt>, and
+ <tt>llvm.atomic.load_xor</tt> on any integer bit width. Not all targets
+ support all bit widths however.</p>
+<pre>
+declare i8 @llvm.atomic.load.and.i8.( i8* &lt;ptr&gt;, i8 &lt;delta&gt; )
+declare i16 @llvm.atomic.load.and.i16.( i16* &lt;ptr&gt;, i16 &lt;delta&gt; )
+declare i32 @llvm.atomic.load.and.i32.( i32* &lt;ptr&gt;, i32 &lt;delta&gt; )
+declare i64 @llvm.atomic.load.and.i64.( i64* &lt;ptr&gt;, i64 &lt;delta&gt; )
+
+</pre>
+
+<pre>
+declare i8 @llvm.atomic.load.or.i8.( i8* &lt;ptr&gt;, i8 &lt;delta&gt; )
+declare i16 @llvm.atomic.load.or.i16.( i16* &lt;ptr&gt;, i16 &lt;delta&gt; )
+declare i32 @llvm.atomic.load.or.i32.( i32* &lt;ptr&gt;, i32 &lt;delta&gt; )
+declare i64 @llvm.atomic.load.or.i64.( i64* &lt;ptr&gt;, i64 &lt;delta&gt; )
+
+</pre>
+
+<pre>
+declare i8 @llvm.atomic.load.nand.i8.( i8* &lt;ptr&gt;, i8 &lt;delta&gt; )
+declare i16 @llvm.atomic.load.nand.i16.( i16* &lt;ptr&gt;, i16 &lt;delta&gt; )
+declare i32 @llvm.atomic.load.nand.i32.( i32* &lt;ptr&gt;, i32 &lt;delta&gt; )
+declare i64 @llvm.atomic.load.nand.i64.( i64* &lt;ptr&gt;, i64 &lt;delta&gt; )
+
+</pre>
+
+<pre>
+declare i8 @llvm.atomic.load.xor.i8.( i8* &lt;ptr&gt;, i8 &lt;delta&gt; )
+declare i16 @llvm.atomic.load.xor.i16.( i16* &lt;ptr&gt;, i16 &lt;delta&gt; )
+declare i32 @llvm.atomic.load.xor.i32.( i32* &lt;ptr&gt;, i32 &lt;delta&gt; )
+declare i64 @llvm.atomic.load.xor.i64.( i64* &lt;ptr&gt;, i64 &lt;delta&gt; )
+
+</pre>
+<h5>Overview:</h5>
+<p>
+ These intrinsics bitwise the operation (and, nand, or, xor) <tt>delta</tt> to
+ the value stored in memory at <tt>ptr</tt>. It yields the original value
+ at <tt>ptr</tt>.
+</p>
+<h5>Arguments:</h5>
+<p>
+
+ These intrinsics take two arguments, the first a pointer to an integer value
+ and the second an integer value. The result is also an integer value. These
+ integer types can have any bit width, but they must all have the same bit
+ width. The targets may only lower integer representations they support.
+</p>
+<h5>Semantics:</h5>
+<p>
+ These intrinsics does a series of operations atomically. They first load the
+ value stored at <tt>ptr</tt>. They then do the bitwise operation
+ <tt>delta</tt>, store the result to <tt>ptr</tt>. They yield the original
+ value stored at <tt>ptr</tt>.
+</p>
+
+<h5>Examples:</h5>
+<pre>
+%ptr = malloc i32
+ store i32 0x0F0F, %ptr
+%result0 = call i32 @llvm.atomic.load.nand.i32( i32* %ptr, i32 0xFF )
+ <i>; yields {i32}:result0 = 0x0F0F</i>
+%result1 = call i32 @llvm.atomic.load.and.i32( i32* %ptr, i32 0xFF )
+ <i>; yields {i32}:result1 = 0xFFFFFFF0</i>
+%result2 = call i32 @llvm.atomic.load.or.i32( i32* %ptr, i32 0F )
+ <i>; yields {i32}:result2 = 0xF0</i>
+%result3 = call i32 @llvm.atomic.load.xor.i32( i32* %ptr, i32 0F )
+ <i>; yields {i32}:result3 = FF</i>
+%memval1 = load i32* %ptr <i>; yields {i32}:memval1 = F0</i>
+</pre>
+</div>
+
+
+<!-- _______________________________________________________________________ -->
+<div class="doc_subsubsection">
+ <a name="int_atomic_load_max">'<tt>llvm.atomic.load.max.*</tt>' Intrinsic</a><br>
+ <a name="int_atomic_load_min">'<tt>llvm.atomic.load.min.*</tt>' Intrinsic</a><br>
+ <a name="int_atomic_load_umax">'<tt>llvm.atomic.load.umax.*</tt>' Intrinsic</a><br>
+ <a name="int_atomic_load_umin">'<tt>llvm.atomic.load.umin.*</tt>' Intrinsic</a><br>
+
+</div>
+<div class="doc_text">
+<h5>Syntax:</h5>
+<p>
+ These are overloaded intrinsics. You can use <tt>llvm.atomic.load_max</tt>,
+ <tt>llvm.atomic.load_min</tt>, <tt>llvm.atomic.load_umax</tt>, and
+ <tt>llvm.atomic.load_umin</tt> on any integer bit width. Not all targets
+ support all bit widths however.</p>
+<pre>
+declare i8 @llvm.atomic.load.max.i8.( i8* &lt;ptr&gt;, i8 &lt;delta&gt; )
+declare i16 @llvm.atomic.load.max.i16.( i16* &lt;ptr&gt;, i16 &lt;delta&gt; )
+declare i32 @llvm.atomic.load.max.i32.( i32* &lt;ptr&gt;, i32 &lt;delta&gt; )
+declare i64 @llvm.atomic.load.max.i64.( i64* &lt;ptr&gt;, i64 &lt;delta&gt; )
+
+</pre>
+
+<pre>
+declare i8 @llvm.atomic.load.min.i8.( i8* &lt;ptr&gt;, i8 &lt;delta&gt; )
+declare i16 @llvm.atomic.load.min.i16.( i16* &lt;ptr&gt;, i16 &lt;delta&gt; )
+declare i32 @llvm.atomic.load.min.i32.( i32* &lt;ptr&gt;, i32 &lt;delta&gt; )
+declare i64 @llvm.atomic.load.min.i64.( i64* &lt;ptr&gt;, i64 &lt;delta&gt; )
+
+</pre>
+
+<pre>
+declare i8 @llvm.atomic.load.umax.i8.( i8* &lt;ptr&gt;, i8 &lt;delta&gt; )
+declare i16 @llvm.atomic.load.umax.i16.( i16* &lt;ptr&gt;, i16 &lt;delta&gt; )
+declare i32 @llvm.atomic.load.umax.i32.( i32* &lt;ptr&gt;, i32 &lt;delta&gt; )
+declare i64 @llvm.atomic.load.umax.i64.( i64* &lt;ptr&gt;, i64 &lt;delta&gt; )
+
+</pre>
+
+<pre>
+declare i8 @llvm.atomic.load.umin.i8.( i8* &lt;ptr&gt;, i8 &lt;delta&gt; )
+declare i16 @llvm.atomic.load.umin.i16.( i16* &lt;ptr&gt;, i16 &lt;delta&gt; )
+declare i32 @llvm.atomic.load.umin.i32.( i32* &lt;ptr&gt;, i32 &lt;delta&gt; )
+declare i64 @llvm.atomic.load.umin.i64.( i64* &lt;ptr&gt;, i64 &lt;delta&gt; )
+
+</pre>
+<h5>Overview:</h5>
+<p>
+ These intrinsics takes the signed or unsigned minimum or maximum of
+ <tt>delta</tt> and the value stored in memory at <tt>ptr</tt>. It yields the
+ original value at <tt>ptr</tt>.
+</p>
+<h5>Arguments:</h5>
+<p>
+
+ These intrinsics take two arguments, the first a pointer to an integer value
+ and the second an integer value. The result is also an integer value. These
+ integer types can have any bit width, but they must all have the same bit
+ width. The targets may only lower integer representations they support.
+</p>
+<h5>Semantics:</h5>
+<p>
+ These intrinsics does a series of operations atomically. They first load the
+ value stored at <tt>ptr</tt>. They then do the signed or unsigned min or max
+ <tt>delta</tt> and the value, store the result to <tt>ptr</tt>. They yield
+ the original value stored at <tt>ptr</tt>.
+</p>
+
+<h5>Examples:</h5>
+<pre>
+%ptr = malloc i32
+ store i32 7, %ptr
+%result0 = call i32 @llvm.atomic.load.min.i32( i32* %ptr, i32 -2 )
+ <i>; yields {i32}:result0 = 7</i>
+%result1 = call i32 @llvm.atomic.load.max.i32( i32* %ptr, i32 8 )
+ <i>; yields {i32}:result1 = -2</i>
+%result2 = call i32 @llvm.atomic.load.umin.i32( i32* %ptr, i32 10 )
+ <i>; yields {i32}:result2 = 8</i>
+%result3 = call i32 @llvm.atomic.load.umax.i32( i32* %ptr, i32 30 )
+ <i>; yields {i32}:result3 = 8</i>
+%memval1 = load i32* %ptr <i>; yields {i32}:memval1 = 30</i>
+</pre>
+</div>
<!-- ======================================================================= -->
<div class="doc_subsection">
diff --git a/include/llvm/CodeGen/SelectionDAG.h b/include/llvm/CodeGen/SelectionDAG.h
index bd9a8957fd..4c2ddce8a9 100644
--- a/include/llvm/CodeGen/SelectionDAG.h
+++ b/include/llvm/CodeGen/SelectionDAG.h
@@ -368,14 +368,16 @@ public:
SDOperand SV);
/// getAtomic - Gets a node for an atomic op, produces result and chain, takes
- // 3 operands
+ /// 3 operands
SDOperand getAtomic(unsigned Opcode, SDOperand Chain, SDOperand Ptr,
- SDOperand Cmp, SDOperand Swp, MVT VT);
+ SDOperand Cmp, SDOperand Swp, MVT VT, const Value* PtrVal,
+ unsigned Alignment=0);
/// getAtomic - Gets a node for an atomic op, produces result and chain, takes
- // 2 operands
+ /// 2 operands
SDOperand getAtomic(unsigned Opcode, SDOperand Chain, SDOperand Ptr,
- SDOperand Val, MVT VT);
+ SDOperand Val, MVT VT, const Value* PtrVal,
+ unsigned Alignment = 0);
/// getLoad - Loads are not normal binary operators: their result type is not
/// determined by their operands, and they produce a value AND a token chain.
diff --git a/include/llvm/CodeGen/SelectionDAGNodes.h b/include/llvm/CodeGen/SelectionDAGNodes.h
index 1c3a50de14..67daaafd32 100644
--- a/include/llvm/CodeGen/SelectionDAGNodes.h
+++ b/include/llvm/CodeGen/SelectionDAGNodes.h
@@ -584,17 +584,17 @@ namespace ISD {
// and produces an output chain.
MEMBARRIER,
- // Val, OUTCHAIN = ATOMIC_LCS(INCHAIN, ptr, cmp, swap)
+ // Val, OUTCHAIN = ATOMIC_CMP_SWAP(INCHAIN, ptr, cmp, swap)
// this corresponds to the atomic.lcs intrinsic.
// cmp is compared to *ptr, and if equal, swap is stored in *ptr.
// the return is always the original value in *ptr
- ATOMIC_LCS,
+ ATOMIC_CMP_SWAP,
- // Val, OUTCHAIN = ATOMIC_LAS(INCHAIN, ptr, amt)
+ // Val, OUTCHAIN = ATOMIC_LOAD_ADD(INCHAIN, ptr, amt)
// this corresponds to the atomic.las intrinsic.
// *ptr + amt is stored to *ptr atomically.
// the return is always the original value in *ptr
- ATOMIC_LAS,
+ ATOMIC_LOAD_ADD,
// Val, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amt)
// this corresponds to the atomic.swap intrinsic.
@@ -602,11 +602,11 @@ namespace ISD {
// the return is always the original value in *ptr
ATOMIC_SWAP,
- // Val, OUTCHAIN = ATOMIC_LSS(INCHAIN, ptr, amt)
+ // Val, OUTCHAIN = ATOMIC_LOAD_SUB(INCHAIN, ptr, amt)
// this corresponds to the atomic.lss intrinsic.
// *ptr - amt is stored to *ptr atomically.
// the return is always the original value in *ptr
- ATOMIC_LSS,
+ ATOMIC_LOAD_SUB,
// Val, OUTCHAIN = ATOMIC_L[OpName]S(INCHAIN, ptr, amt)
// this corresponds to the atomic.[OpName] intrinsic.
@@ -1437,32 +1437,122 @@ public:
SDUse getValue() const { return Op; }
};
-class AtomicSDNode : public SDNode {
+/// Abstact virtual class for operations for memory operations
+class MemSDNode : public SDNode {
+ virtual void ANCHOR(); // Out-of-line virtual method to give class a home.
+
+private:
+ //! SrcValue - Memory location for alias analysis.
+ const Value *SrcValue;
+
+ //! Alignment - Alignment of memory location in bytes.
+ unsigned Alignment;
+
+public:
+ MemSDNode(unsigned Opc, SDVTList VTs, const Value *SrcValue,
+ unsigned Alignment)
+ : SDNode(Opc, VTs), SrcValue(SrcValue), Alignment(Alignment) {}
+
+ virtual ~MemSDNode() {}
+
+ /// Returns alignment and volatility of the memory access
+ unsigned getAlignment() const { return Alignment; }
+ virtual bool isVolatile() const = 0;
+
+ /// Returns the SrcValue and offset that describes the location of the access
+ const Value *getSrcValue() const { return SrcValue; }
+ virtual int getSrcValueOffset() const = 0;
+
+ /// getMemOperand - Return a MachineMemOperand object describing the memory
+ /// reference performed by operation.
+ virtual MachineMemOperand getMemOperand() const = 0;
+
+ // Methods to support isa and dyn_cast
+ static bool classof(const MemSDNode *) { return true; }
+ static bool classof(const SDNode *N) {
+ return N->getOpcode() == ISD::LOAD ||
+ N->getOpcode() == ISD::STORE ||
+ N->getOpcode() == ISD::ATOMIC_CMP_SWAP ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_ADD ||
+ N->getOpcode() == ISD::ATOMIC_SWAP ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_SUB ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_AND ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_OR ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_XOR ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_NAND ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_MIN ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_MAX ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_UMIN ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_UMAX;
+ }
+};
+
+/// Atomic operations node
+class AtomicSDNode : public MemSDNode {
virtual void ANCHOR(); // Out-of-line virtual method to give class a home.
SDUse Ops[4];
MVT OrigVT;
-public:
+
+ public:
+ // Opc: opcode for atomic
+ // VTL: value type list
+ // Chain: memory chain for operaand
+ // Ptr: address to update as a SDOperand
+ // Cmp: compare value
+ // Swp: swap value
+ // VT: resulting value type
+ // SrcVal: address to update as a Value (used for MemOperand)
+ // Align: alignment of memory
AtomicSDNode(unsigned Opc, SDVTList VTL, SDOperand Chain, SDOperand Ptr,
- SDOperand Cmp, SDOperand Swp, MVT VT)
- : SDNode(Opc, VTL) {
+ SDOperand Cmp, SDOperand Swp, MVT VT, const Value* SrcVal,
+ unsigned Align=0)
+ : MemSDNode(Opc, VTL, SrcVal, Align), OrigVT(VT) {
Ops[0] = Chain;
Ops[1] = Ptr;
Ops[2] = Swp;
Ops[3] = Cmp;
InitOperands(Ops, 4);
- OrigVT=VT;
}
AtomicSDNode(unsigned Opc, SDVTList VTL, SDOperand Chain, SDOperand Ptr,
- SDOperand Val, MVT VT)
- : SDNode(Opc, VTL) {
+ SDOperand Val, MVT VT, const Value* SrcVal, unsigned Align=0)
+ : MemSDNode(Opc, VTL, SrcVal, Align), OrigVT(VT) {
Ops[0] = Chain;
Ops[1] = Ptr;
Ops[2] = Val;
InitOperands(Ops, 3);
- OrigVT=VT;
}
+
MVT getVT() const { return OrigVT; }
- bool isCompareAndSwap() const { return getOpcode() == ISD::ATOMIC_LCS; }
+ const SDOperand &getChain() const { return getOperand(0); }
+ const SDOperand &getBasePtr() const { return getOperand(1); }
+ const SDOperand &getVal() const { return getOperand(2); }
+
+ bool isCompareAndSwap() const { return getOpcode() == ISD::ATOMIC_CMP_SWAP; }
+
+ // Implementation for MemSDNode
+ virtual int getSrcValueOffset() const { return 0; }
+ virtual bool isVolatile() const { return true; }
+
+ /// getMemOperand - Return a MachineMemOperand object describing the memory
+ /// reference performed by this atomic load/store.
+ virtual MachineMemOperand getMemOperand() const;
+
+ // Methods to support isa and dyn_cast
+ static bool classof(const AtomicSDNode *) { return true; }
+ static bool classof(const SDNode *N) {
+ return N->getOpcode() == ISD::ATOMIC_CMP_SWAP ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_ADD ||
+ N->getOpcode() == ISD::ATOMIC_SWAP ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_SUB ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_AND ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_OR ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_XOR ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_NAND ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_MIN ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_MAX ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_UMIN ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_UMAX;
+ }
};
class StringSDNode : public SDNode {
@@ -1934,7 +2024,7 @@ public:
/// LSBaseSDNode - Base class for LoadSDNode and StoreSDNode
///
-class LSBaseSDNode : public SDNode {
+class LSBaseSDNode : public MemSDNode {
private:
// AddrMode - unindexed, pre-indexed, post-indexed.
ISD::MemIndexedMode AddrMode;
@@ -1942,17 +2032,12 @@ private:
// MemoryVT - VT of in-memory value.
MVT MemoryVT;
- //! SrcValue - Memory location for alias analysis.
- const Value *SrcValue;
-
- //! SVOffset - Memory location offset.
+ //! SVOffset - Memory location offset. Note that base is defined in MemSDNode
int SVOffset;
- //! Alignment - Alignment of memory location in bytes.
- unsigned Alignment;
-
- //! IsVolatile - True if the store is volatile.
+ //! IsVolatile - True if the load/store is volatile.
bool IsVolatile;
+
protected:
//! Operand array for load and store
/*!
@@ -1965,9 +2050,8 @@ public:
LSBaseSDNode(ISD::NodeType NodeTy, SDOperand *Operands, unsigned numOperands,
SDVTList VTs, ISD::MemIndexedMode AM, MVT VT,
const Value *SV, int SVO, unsigned Align, bool Vol)
- : SDNode(NodeTy, VTs),
- AddrMode(AM), MemoryVT(VT),
- SrcValue(SV), SVOffset(SVO), Alignment(Align), IsVolatile(Vol) {
+ : MemSDNode(NodeTy, VTs, SV, Align), AddrMode(AM), MemoryVT(VT),
+ SVOffset(SVO), IsVolatile(Vol) {
for (unsigned i = 0; i != numOperands; ++i)
Ops[i] = Operands[i];
InitOperands(Ops, numOperands);
@@ -1984,12 +2068,8 @@ public:
return getOperand(getOpcode() == ISD::LOAD ? 2 : 3);
}
- const Value *getSrcValue() const { return SrcValue; }
- int getSrcValueOffset() const { return SVOffset; }
- unsigned getAlignment() const { return Alignment; }
MVT getMemoryVT() const { return MemoryVT; }
- bool isVolatile() const { return IsVolatile; }
-
+
ISD::MemIndexedMode getAddressingMode() const { return AddrMode; }
/// isIndexed - Return true if this is a pre/post inc/dec load/store.
@@ -1998,9 +2078,13 @@ public:
/// isUnindexed - Return true if this is NOT a pre/post inc/dec load/store.
bool isUnindexed() const { return AddrMode == ISD::UNINDEXED; }
+ // Implementation for MemSDNode
+ virtual int getSrcValueOffset() const { return SVOffset; }
+ virtual bool isVolatile() const { return IsVolatile; }
+
/// getMemOperand - Return a MachineMemOperand object describing the memory
/// reference performed by this load or store.
- MachineMemOperand getMemOperand() const;
+ virtual MachineMemOperand getMemOperand() const;
static bool classof(const LSBaseSDNode *) { return true; }
static bool classof(const SDNode *N) {
diff --git a/include/llvm/Intrinsics.td b/include/llvm/Intrinsics.td
index 866107cbfb..aea92c9071 100644
--- a/include/llvm/Intrinsics.td
+++ b/include/llvm/Intrinsics.td
@@ -270,26 +270,26 @@ def int_init_trampoline : Intrinsic<[llvm_ptr_ty, llvm_ptr_ty, llvm_ptr_ty,
def int_memory_barrier : Intrinsic<[llvm_void_ty, llvm_i1_ty, llvm_i1_ty,
llvm_i1_ty, llvm_i1_ty, llvm_i1_ty], []>;
-def int_atomic_lcs : Intrinsic<[llvm_anyint_ty,
+def int_atomic_cmp_swap : Intrinsic<[llvm_anyint_ty,
LLVMPointerType<LLVMMatchType<0>>,
LLVMMatchType<0>, LLVMMatchType<0>],
[IntrWriteArgMem]>,
- GCCBuiltin<"__sync_val_compare_and_swap">;
-def int_atomic_las : Intrinsic<[llvm_anyint_ty,
+ GCCBuiltin<"__sync_val_compare_and_swap">;
+def int_atomic_load_add : Intrinsic<[llvm_anyint_ty,
LLVMPointerType<LLVMMatchType<0>>,
LLVMMatchType<0>],
[IntrWriteArgMem]>,
- GCCBuiltin<"__sync_fetch_and_add">;
-def int_atomic_swap : Intrinsic<[llvm_anyint_ty,
+ GCCBuiltin<"__sync_fetch_and_add">;
+def int_atomic_swap : Intrinsic<[llvm_anyint_ty,
LLVMPointerType<LLVMMatchType<0>>,
LLVMMatchType<0>],
[IntrWriteArgMem]>,
- GCCBuiltin<"__sync_lock_test_and_set">;
-def int_atomic_lss : Intrinsic<[llvm_anyint_ty,
+ GCCBuiltin<"__sync_lock_test_and_set">;
+def int_atomic_load_sub : Intrinsic<[llvm_anyint_ty,
LLVMPointerType<LLVMMatchType<0>>,
LLVMMatchType<0>],
[IntrWriteArgMem]>,
- GCCBuiltin<"__sync_fetch_and_sub">;
+ GCCBuiltin<"__sync_fetch_and_sub">;
def int_atomic_load_and : Intrinsic<[llvm_anyint_ty,
LLVMPointerType<LLVMMatchType<0>>,