aboutsummaryrefslogtreecommitdiff
path: root/test/Feature
diff options
context:
space:
mode:
authorChandler Carruth <chandlerc@gmail.com>2011-12-12 11:59:10 +0000
committerChandler Carruth <chandlerc@gmail.com>2011-12-12 11:59:10 +0000
commitddbc274169ed4ee0e0ac32ed194b925a180202fe (patch)
treea15e94ed378c7e90d4b6985af905b07f82ad8a36 /test/Feature
parent2106badea341062643d4e11d6e9975b871fa61b9 (diff)
Manually upgrade the test suite to specify the flag to cttz and ctlz.
I followed three heuristics for deciding whether to set 'true' or 'false': - Everything target independent got 'true' as that is the expected common output of the GCC builtins. - If the target arch only has one way of implementing this operation, set the flag in the way that exercises the most of codegen. For most architectures this is also the likely path from a GCC builtin, with 'true' being set. It will (eventually) require lowering away that difference, and then lowering to the architecture's operation. - Otherwise, set the flag differently dependending on which target operation should be tested. Let me know if anyone has any issue with this pattern or would like specific tests of another form. This should allow the x86 codegen to just iteratively improve as I teach the backend how to differentiate between the two forms, and everything else should remain exactly the same. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@146370 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/Feature')
-rw-r--r--test/Feature/intrinsics.ll32
1 files changed, 16 insertions, 16 deletions
diff --git a/test/Feature/intrinsics.ll b/test/Feature/intrinsics.ll
index fc13d5a631..c4e3db6174 100644
--- a/test/Feature/intrinsics.ll
+++ b/test/Feature/intrinsics.ll
@@ -15,21 +15,21 @@ declare i32 @llvm.ctpop.i32(i32)
declare i64 @llvm.ctpop.i64(i64)
-declare i8 @llvm.cttz.i8(i8)
+declare i8 @llvm.cttz.i8(i8, i1)
-declare i16 @llvm.cttz.i16(i16)
+declare i16 @llvm.cttz.i16(i16, i1)
-declare i32 @llvm.cttz.i32(i32)
+declare i32 @llvm.cttz.i32(i32, i1)
-declare i64 @llvm.cttz.i64(i64)
+declare i64 @llvm.cttz.i64(i64, i1)
-declare i8 @llvm.ctlz.i8(i8)
+declare i8 @llvm.ctlz.i8(i8, i1)
-declare i16 @llvm.ctlz.i16(i16)
+declare i16 @llvm.ctlz.i16(i16, i1)
-declare i32 @llvm.ctlz.i32(i32)
+declare i32 @llvm.ctlz.i32(i32, i1)
-declare i64 @llvm.ctlz.i64(i64)
+declare i64 @llvm.ctlz.i64(i64, i1)
declare float @llvm.sqrt.f32(float)
@@ -46,14 +46,14 @@ define void @libm() {
call i16 @llvm.ctpop.i16( i16 11 ) ; <i32>:6 [#uses=0]
call i32 @llvm.ctpop.i32( i32 12 ) ; <i32>:7 [#uses=0]
call i64 @llvm.ctpop.i64( i64 13 ) ; <i32>:8 [#uses=0]
- call i8 @llvm.ctlz.i8( i8 14 ) ; <i32>:9 [#uses=0]
- call i16 @llvm.ctlz.i16( i16 15 ) ; <i32>:10 [#uses=0]
- call i32 @llvm.ctlz.i32( i32 16 ) ; <i32>:11 [#uses=0]
- call i64 @llvm.ctlz.i64( i64 17 ) ; <i32>:12 [#uses=0]
- call i8 @llvm.cttz.i8( i8 18 ) ; <i32>:13 [#uses=0]
- call i16 @llvm.cttz.i16( i16 19 ) ; <i32>:14 [#uses=0]
- call i32 @llvm.cttz.i32( i32 20 ) ; <i32>:15 [#uses=0]
- call i64 @llvm.cttz.i64( i64 21 ) ; <i32>:16 [#uses=0]
+ call i8 @llvm.ctlz.i8( i8 14, i1 true ) ; <i32>:9 [#uses=0]
+ call i16 @llvm.ctlz.i16( i16 15, i1 true ) ; <i32>:10 [#uses=0]
+ call i32 @llvm.ctlz.i32( i32 16, i1 true ) ; <i32>:11 [#uses=0]
+ call i64 @llvm.ctlz.i64( i64 17, i1 true ) ; <i32>:12 [#uses=0]
+ call i8 @llvm.cttz.i8( i8 18, i1 true ) ; <i32>:13 [#uses=0]
+ call i16 @llvm.cttz.i16( i16 19, i1 true ) ; <i32>:14 [#uses=0]
+ call i32 @llvm.cttz.i32( i32 20, i1 true ) ; <i32>:15 [#uses=0]
+ call i64 @llvm.cttz.i64( i64 21, i1 true ) ; <i32>:16 [#uses=0]
ret void
}