aboutsummaryrefslogtreecommitdiff
path: root/docs
diff options
context:
space:
mode:
authorAnton Korobeynikov <asl@math.spbu.ru>2007-03-22 00:02:17 +0000
committerAnton Korobeynikov <asl@math.spbu.ru>2007-03-22 00:02:17 +0000
commitec43a0662a468ca0b48580e174ca861512e778cd (patch)
tree6d59dc1b3c721cb35da414042a5e742190a33fb3 /docs
parent5d522f36ca33e7d6ee1b75dca348586c16f95138 (diff)
More LangRef fixes. Corrected names of intrinsics.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@35249 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'docs')
-rw-r--r--docs/LangRef.html74
1 files changed, 37 insertions, 37 deletions
diff --git a/docs/LangRef.html b/docs/LangRef.html
index d1061412a6..60777ade19 100644
--- a/docs/LangRef.html
+++ b/docs/LangRef.html
@@ -3689,7 +3689,7 @@ last argument of the function, the compiler can figure that out.</p>
<div class="doc_text">
<h5>Syntax:</h5>
-<pre> declare void %llvm.va_end(i8* &lt;arglist&gt;)<br></pre>
+<pre> declare void @llvm.va_end(i8* &lt;arglist&gt;)<br></pre>
<h5>Overview:</h5>
<p>The '<tt>llvm.va_end</tt>' intrinsic destroys <tt>&lt;arglist&gt;</tt>
@@ -3720,7 +3720,7 @@ with calls to <tt>llvm.va_end</tt>.</p>
<h5>Syntax:</h5>
<pre>
- declare void %llvm.va_copy(i8* &lt;destarglist&gt;, i8* &lt;srcarglist&gt;)
+ declare void @llvm.va_copy(i8* &lt;destarglist&gt;, i8* &lt;srcarglist&gt;)
</pre>
<h5>Overview:</h5>
@@ -3773,7 +3773,7 @@ href="GarbageCollection.html">Accurate Garbage Collection with LLVM</a>.
<h5>Syntax:</h5>
<pre>
- declare void %llvm.gcroot(&lt;ty&gt;** %ptrloc, &lt;ty2&gt;* %metadata)
+ declare void @llvm.gcroot(&lt;ty&gt;** %ptrloc, &lt;ty2&gt;* %metadata)
</pre>
<h5>Overview:</h5>
@@ -3807,7 +3807,7 @@ the runtime to find the pointer at GC safe points.
<h5>Syntax:</h5>
<pre>
- declare i8 * %llvm.gcread(i8 * %ObjPtr, i8 ** %Ptr)
+ declare i8 * @llvm.gcread(i8 * %ObjPtr, i8 ** %Ptr)
</pre>
<h5>Overview:</h5>
@@ -3842,7 +3842,7 @@ garbage collector runtime, as needed.</p>
<h5>Syntax:</h5>
<pre>
- declare void %llvm.gcwrite(i8 * %P1, i8 * %Obj, i8 ** %P2)
+ declare void @llvm.gcwrite(i8 * %P1, i8 * %Obj, i8 ** %P2)
</pre>
<h5>Overview:</h5>
@@ -3890,7 +3890,7 @@ be implemented with code generator support.
<h5>Syntax:</h5>
<pre>
- declare i8 *%llvm.returnaddress(i32 &lt;level&gt;)
+ declare i8 *@llvm.returnaddress(i32 &lt;level&gt;)
</pre>
<h5>Overview:</h5>
@@ -3935,7 +3935,7 @@ source-language caller.
<h5>Syntax:</h5>
<pre>
- declare i8 *%llvm.frameaddress(i32 &lt;level&gt;)
+ declare i8 *@llvm.frameaddress(i32 &lt;level&gt;)
</pre>
<h5>Overview:</h5>
@@ -3978,7 +3978,7 @@ source-language caller.
<h5>Syntax:</h5>
<pre>
- declare i8 *%llvm.stacksave()
+ declare i8 *@llvm.stacksave()
</pre>
<h5>Overview:</h5>
@@ -4013,7 +4013,7 @@ that were allocated after the <tt>llvm.stacksave</tt> was executed.
<h5>Syntax:</h5>
<pre>
- declare void %llvm.stackrestore(i8 * %ptr)
+ declare void @llvm.stackrestore(i8 * %ptr)
</pre>
<h5>Overview:</h5>
@@ -4044,7 +4044,7 @@ See the description for <a href="#i_stacksave"><tt>llvm.stacksave</tt></a>.
<h5>Syntax:</h5>
<pre>
- declare void %llvm.prefetch(i8 * &lt;address&gt;,
+ declare void @llvm.prefetch(i8 * &lt;address&gt;,
i32 &lt;rw&gt;, i32 &lt;locality&gt;)
</pre>
@@ -4089,7 +4089,7 @@ performance.
<h5>Syntax:</h5>
<pre>
- declare void %llvm.pcmarker( i32 &lt;id&gt; )
+ declare void @llvm.pcmarker( i32 &lt;id&gt; )
</pre>
<h5>Overview:</h5>
@@ -4130,7 +4130,7 @@ support this intrinisic may ignore it.
<h5>Syntax:</h5>
<pre>
- declare i64 %llvm.readcyclecounter( )
+ declare i64 @llvm.readcyclecounter( )
</pre>
<h5>Overview:</h5>
@@ -4178,9 +4178,9 @@ for more efficient code generation.
<h5>Syntax:</h5>
<pre>
- declare void %llvm.memcpy.i32(i8 * &lt;dest&gt;, i8 * &lt;src&gt;,
+ declare void @llvm.memcpy.i32(i8 * &lt;dest&gt;, i8 * &lt;src&gt;,
i32 &lt;len&gt;, i32 &lt;align&gt;)
- declare void %llvm.memcpy.i64(i8 * &lt;dest&gt;, i8 * &lt;src&gt;,
+ declare void @llvm.memcpy.i64(i8 * &lt;dest&gt;, i8 * &lt;src&gt;,
i64 &lt;len&gt;, i32 &lt;align&gt;)
</pre>
@@ -4232,9 +4232,9 @@ be set to 0 or 1.
<h5>Syntax:</h5>
<pre>
- declare void %llvm.memmove.i32(i8 * &lt;dest&gt;, i8 * &lt;src&gt;,
+ declare void @llvm.memmove.i32(i8 * &lt;dest&gt;, i8 * &lt;src&gt;,
i32 &lt;len&gt;, i32 &lt;align&gt;)
- declare void %llvm.memmove.i64(i8 * &lt;dest&gt;, i8 * &lt;src&gt;,
+ declare void @llvm.memmove.i64(i8 * &lt;dest&gt;, i8 * &lt;src&gt;,
i64 &lt;len&gt;, i32 &lt;align&gt;)
</pre>
@@ -4287,9 +4287,9 @@ be set to 0 or 1.
<h5>Syntax:</h5>
<pre>
- declare void %llvm.memset.i32(i8 * &lt;dest&gt;, i8 &lt;val&gt;,
+ declare void @llvm.memset.i32(i8 * &lt;dest&gt;, i8 &lt;val&gt;,
i32 &lt;len&gt;, i32 &lt;align&gt;)
- declare void %llvm.memset.i64(i8 * &lt;dest&gt;, i8 &lt;val&gt;,
+ declare void @llvm.memset.i64(i8 * &lt;dest&gt;, i8 &lt;val&gt;,
i64 &lt;len&gt;, i32 &lt;align&gt;)
</pre>
@@ -4340,8 +4340,8 @@ this can be specified as the fourth argument, otherwise it should be set to 0 or
<h5>Syntax:</h5>
<pre>
- declare float %llvm.sqrt.f32(float %Val)
- declare double %llvm.sqrt.f64(double %Val)
+ declare float @llvm.sqrt.f32(float %Val)
+ declare double @llvm.sqrt.f64(double %Val)
</pre>
<h5>Overview:</h5>
@@ -4376,8 +4376,8 @@ floating point number.
<h5>Syntax:</h5>
<pre>
- declare float %llvm.powi.f32(float %Val, i32 %power)
- declare double %llvm.powi.f64(double %Val, i32 %power)
+ declare float @llvm.powi.f32(float %Val, i32 %power)
+ declare double @llvm.powi.f64(double %Val, i32 %power)
</pre>
<h5>Overview:</h5>
@@ -4425,9 +4425,9 @@ These allow efficient code generation for some algorithms.
<h5>Syntax:</h5>
<pre>
- declare i16 %llvm.bswap.i16(i16 &lt;id&gt;)
- declare i32 %llvm.bswap.i32(i32 &lt;id&gt;)
- declare i64 %llvm.bswap.i64(i64 &lt;id&gt;)
+ declare i16 @llvm.bswap.i16(i16 &lt;id&gt;)
+ declare i32 @llvm.bswap.i32(i32 &lt;id&gt;)
+ declare i64 @llvm.bswap.i64(i64 &lt;id&gt;)
</pre>
<h5>Overview:</h5>
@@ -4460,10 +4460,10 @@ intrinsic extends this concept to 64 bits.
<h5>Syntax:</h5>
<pre>
- declare i8 %llvm.ctpop.i8 (i8 &lt;src&gt;)
- declare i16 %llvm.ctpop.i16(i16 &lt;src&gt;)
- declare i32 %llvm.ctpop.i32(i32 &lt;src&gt;)
- declare i64 %llvm.ctpop.i64(i64 &lt;src&gt;)
+ declare i8 @llvm.ctpop.i8 (i8 &lt;src&gt;)
+ declare i16 @llvm.ctpop.i16(i16 &lt;src&gt;)
+ declare i32 @llvm.ctpop.i32(i32 &lt;src&gt;)
+ declare i64 @llvm.ctpop.i64(i64 &lt;src&gt;)
</pre>
<h5>Overview:</h5>
@@ -4496,10 +4496,10 @@ The '<tt>llvm.ctpop</tt>' intrinsic counts the 1's in a variable.
<h5>Syntax:</h5>
<pre>
- declare i8 %llvm.ctlz.i8 (i8 &lt;src&gt;)
- declare i16 %llvm.ctlz.i16(i16 &lt;src&gt;)
- declare i32 %llvm.ctlz.i32(i32 &lt;src&gt;)
- declare i64 %llvm.ctlz.i64(i64 &lt;src&gt;)
+ declare i8 @llvm.ctlz.i8 (i8 &lt;src&gt;)
+ declare i16 @llvm.ctlz.i16(i16 &lt;src&gt;)
+ declare i32 @llvm.ctlz.i32(i32 &lt;src&gt;)
+ declare i64 @llvm.ctlz.i64(i64 &lt;src&gt;)
</pre>
<h5>Overview:</h5>
@@ -4536,10 +4536,10 @@ of src. For example, <tt>llvm.ctlz(i32 2) = 30</tt>.
<h5>Syntax:</h5>
<pre>
- declare i8 %llvm.cttz.i8 (i8 &lt;src&gt;)
- declare i16 %llvm.cttz.i16(i16 &lt;src&gt;)
- declare i32 %llvm.cttz.i32(i32 &lt;src&gt;)
- declare i64 %llvm.cttz.i64(i64 &lt;src&gt;)
+ declare i8 @llvm.cttz.i8 (i8 &lt;src&gt;)
+ declare i16 @llvm.cttz.i16(i16 &lt;src&gt;)
+ declare i32 @llvm.cttz.i32(i32 &lt;src&gt;)
+ declare i64 @llvm.cttz.i64(i64 &lt;src&gt;)
</pre>
<h5>Overview:</h5>