aboutsummaryrefslogtreecommitdiff
path: root/test
diff options
context:
space:
mode:
authorKarl Schimpf <kschimpf@google.com>2013-09-03 13:59:55 -0700
committerKarl Schimpf <kschimpf@google.com>2013-09-03 13:59:55 -0700
commit30aa17affbfa35a9d32895ff6f4b5f5fbfc9575a (patch)
tree387b9668a029e22fcbcbf428bc46b195e5c8e146 /test
parent264065105b2b9be73662b3c7e5a66c9d70d26a2c (diff)
Remove all remaining pointer casts from PNaCl bitcode files.
For PNaClVersion==2, removes remaining pointer casts from bitcode files. This includes: * Return value can be casted to a scalar value. * Intrinsic calls may return an inherent pointer, which may need casting to a scalar value. Also modifies tests bitcast-elide.ll and inttoptr-elide.ll by removing tests that assumed there were remaining pointer bitcasts that do not get removed. BUG= https://code.google.com/p/nativeclient/issues/detail?id=3544 R=jvoung@chromium.org Review URL: https://codereview.chromium.org/23524003
Diffstat (limited to 'test')
-rw-r--r--test/NaCl/Bitcode/bitcast-elide.ll35
-rw-r--r--test/NaCl/Bitcode/call-elide.ll57
-rw-r--r--test/NaCl/Bitcode/inttoptr-elide.ll35
3 files changed, 47 insertions, 80 deletions
diff --git a/test/NaCl/Bitcode/bitcast-elide.ll b/test/NaCl/Bitcode/bitcast-elide.ll
index 418b593762..bc1edb7bf5 100644
--- a/test/NaCl/Bitcode/bitcast-elide.ll
+++ b/test/NaCl/Bitcode/bitcast-elide.ll
@@ -93,41 +93,6 @@ define void @SimpleLoadAlloca() {
; ------------------------------------------------------
-; Test that we don't elide an bitcast if one of its uses is not a load.
-define i32* @NonsimpleLoad(i32 %i) {
- %1 = bitcast [4 x i8]* @bytes to i32*
- %2 = load i32* %1, align 4
- ret i32* %1
-}
-
-; TD1: define i32* @NonsimpleLoad(i32 %i) {
-; TD1-NEXT: %1 = bitcast [4 x i8]* @bytes to i32*
-; TD1-NEXT: %2 = load i32* %1, align 4
-; TD1-NEXT: ret i32* %1
-; TD1-NEXT: }
-
-; PF1: <FUNCTION_BLOCK>
-; PF1-NEXT: <DECLAREBLOCKS op0=1/>
-; PF1-NEXT: <INST_CAST op0=2 op1=1 op2=11/>
-; PF1-NEXT: <INST_LOAD op0=1 op1=3 op2=0/>
-; PF1-NEXT: <INST_RET op0=2/>
-; PF1: </FUNCTION_BLOCK>
-
-; TD2: define i32* @NonsimpleLoad(i32 %i) {
-; TD2-NEXT: %1 = bitcast [4 x i8]* @bytes to i32*
-; TD2-NEXT: %2 = load i32* %1, align 4
-; TD2-NEXT: ret i32* %1
-; TD2-NEXT: }
-
-; PF2: <FUNCTION_BLOCK>
-; PF2-NEXT: <DECLAREBLOCKS op0=1/>
-; PF2-NEXT: <INST_CAST op0=2 op1=1 op2=11/>
-; PF2-NEXT: <INST_LOAD op0=1 op1=3 op2=0/>
-; PF2-NEXT: <INST_RET op0=2/>
-; PF2: </FUNCTION_BLOCK>
-
-; ------------------------------------------------------
-
; Test that we can handle multiple bitcasts.
define i32 @TwoLoads(i32 %i) {
%1 = bitcast [4 x i8]* @bytes to i32*
diff --git a/test/NaCl/Bitcode/call-elide.ll b/test/NaCl/Bitcode/call-elide.ll
index b3acb36a9d..c0b9cfc31b 100644
--- a/test/NaCl/Bitcode/call-elide.ll
+++ b/test/NaCl/Bitcode/call-elide.ll
@@ -37,7 +37,7 @@ define void @DirectCall() {
; PF1: <FUNCTION_BLOCK>
; PF1: </CONSTANTS_BLOCK>
-; PF1-NEXT: <INST_CALL op0=0 op1=14 op2=1/>
+; PF1-NEXT: <INST_CALL op0=0 op1=16 op2=1/>
; PF1-NEXT: <INST_RET/>
; PF1-NEXT: </FUNCTION_BLOCK>
@@ -48,7 +48,7 @@ define void @DirectCall() {
; PF2: <FUNCTION_BLOCK>
; PF2: </CONSTANTS_BLOCK>
-; PF2-NEXT: <INST_CALL op0=0 op1=14 op2=1/>
+; PF2-NEXT: <INST_CALL op0=0 op1=16 op2=1/>
; PF2-NEXT: <INST_RET/>
; PF2-NEXT: </FUNCTION_BLOCK>
@@ -73,7 +73,7 @@ define void @DirectCallIntToPtrArg(i32 %i) {
; PF1: <FUNCTION_BLOCK>
; PF1-NEXT: <DECLAREBLOCKS op0=1/>
; PF1-NEXT: <INST_CAST op0=1 op1=4 op2=10/>
-; PF1-NEXT: <INST_CALL op0=0 op1=14 op2=1/>
+; PF1-NEXT: <INST_CALL op0=0 op1=16 op2=1/>
; PF1-NEXT: <INST_RET/>
; PF1: </FUNCTION_BLOCK>
@@ -85,7 +85,7 @@ define void @DirectCallIntToPtrArg(i32 %i) {
; PF2: <FUNCTION_BLOCK>
; PF2-NEXT: <DECLAREBLOCKS op0=1/>
-; PF2-NEXT: <INST_CALL op0=0 op1=13 op2=1/>
+; PF2-NEXT: <INST_CALL op0=0 op1=15 op2=1/>
; PF2-NEXT: <INST_RET/>
; PF2: </FUNCTION_BLOCK>
@@ -110,7 +110,7 @@ define void @DirectCallPtrToIntArg() {
; PF1: </CONSTANTS_BLOCK>
; PF1-NEXT: <INST_ALLOCA op0=1 op1=4/>
; PF1-NEXT: <INST_CAST op0=1 op1=0 op2=9/>
-; PF1-NEXT: <INST_CALL op0=0 op1=16 op2=1/>
+; PF1-NEXT: <INST_CALL op0=0 op1=18 op2=1/>
; PF1-NEXT: <INST_RET/>
; PF1-NEXT: </FUNCTION_BLOCK>
@@ -124,7 +124,7 @@ define void @DirectCallPtrToIntArg() {
; PF2: <FUNCTION_BLOCK>
; PF2: </CONSTANTS_BLOCK>
; PF2-NEXT: <INST_ALLOCA op0=1 op1=4/>
-; PF2-NEXT: <INST_CALL op0=0 op1=15 op2=1/>
+; PF2-NEXT: <INST_CALL op0=0 op1=17 op2=1/>
; PF2-NEXT: <INST_RET/>
; PF2-NEXT: </FUNCTION_BLOCK>
@@ -146,7 +146,7 @@ define void @DirectCallBitcastArg(i32 %i) {
; PF1: <FUNCTION_BLOCK>
; PF1-NEXT: <DECLAREBLOCKS op0=1/>
; PF1-NEXT: <INST_CAST op0=2 op1=4 op2=11/>
-; PF1-NEXT: <INST_CALL op0=0 op1=14 op2=1/>
+; PF1-NEXT: <INST_CALL op0=0 op1=16 op2=1/>
; PF1-NEXT: <INST_RET/>
; PF1: </FUNCTION_BLOCK>
@@ -158,7 +158,7 @@ define void @DirectCallBitcastArg(i32 %i) {
; PF2: <FUNCTION_BLOCK>
; PF2-NEXT: <DECLAREBLOCKS op0=1/>
-; PF2-NEXT: <INST_CALL op0=0 op1=13 op2=2/>
+; PF2-NEXT: <INST_CALL op0=0 op1=15 op2=2/>
; PF2-NEXT: <INST_RET/>
; PF2: </FUNCTION_BLOCK>
@@ -180,7 +180,7 @@ define void @DirectCallScalarArg(i32* %ptr) {
; PF1: <FUNCTION_BLOCK>
; PF1-NEXT: <DECLAREBLOCKS op0=1/>
; PF1-NEXT: <INST_CAST op0=2 op1=0 op2=9/>
-; PF1-NEXT: <INST_CALL op0=0 op1=15 op2=1/>
+; PF1-NEXT: <INST_CALL op0=0 op1=17 op2=1/>
; PF1-NEXT: <INST_RET/>
; PF1: </FUNCTION_BLOCK>
@@ -192,7 +192,7 @@ define void @DirectCallScalarArg(i32* %ptr) {
; PF2: <FUNCTION_BLOCK>
; PF2-NEXT: <DECLAREBLOCKS op0=1/>
-; PF2-NEXT: <INST_CALL op0=0 op1=14 op2=2/>
+; PF2-NEXT: <INST_CALL op0=0 op1=16 op2=2/>
; PF2-NEXT: <INST_RET/>
; PF2: </FUNCTION_BLOCK>
@@ -389,3 +389,40 @@ define void @IndirectCallScalarArg(i32 %i, i32* %ptr) {
; PF2-NEXT: <INST_CALL_INDIRECT op0=0 op1=2 op2=2 op3=3/>
; PF2-NEXT: <INST_RET/>
; PF2: </FUNCTION_BLOCK>
+
+; ------------------------------------------------------
+; Test how we handle intrinsics that can return (inherent) pointers, and
+; return statements that expect scalar values.
+
+declare i8* @llvm.nacl.read.tp()
+
+define i32 @ReturnPtrIntrinsic() {
+ %1 = call i8* @llvm.nacl.read.tp()
+ %2 = ptrtoint i8* %1 to i32
+ ret i32 %2
+}
+
+; TD1: define i32 @ReturnPtrIntrinsic() {
+; TD1-NEXT: %1 = call i8* @llvm.nacl.read.tp()
+; TD1-NEXT: %2 = ptrtoint i8* %1 to i32
+; TD1-NEXT: ret i32 %2
+; TD1-NEXT: }
+
+; PF1: <FUNCTION_BLOCK>
+; PF1-NEXT: <DECLAREBLOCKS op0=1/>
+; PF1-NEXT: <INST_CALL op0=0 op1=3/>
+; PF1-NEXT: <INST_CAST op0=1 op1=0 op2=9/>
+; PF1-NEXT: <INST_RET op0=1/>
+; PF1-NEXT: </FUNCTION_BLOCK>
+
+; TD2: define i32 @ReturnPtrIntrinsic() {
+; TD2-NEXT: %1 = call i8* @llvm.nacl.read.tp()
+; TD2-NEXT: %2 = ptrtoint i8* %1 to i32
+; TD2-NEXT: ret i32 %2
+; TD2-NEXT: }
+
+; PF2: <FUNCTION_BLOCK>
+; PF2-NEXT: <DECLAREBLOCKS op0=1/>
+; PF2-NEXT: <INST_CALL op0=0 op1=3/>
+; PF2-NEXT: <INST_RET op0=1/>
+; PF2-NEXT: </FUNCTION_BLOCK>
diff --git a/test/NaCl/Bitcode/inttoptr-elide.ll b/test/NaCl/Bitcode/inttoptr-elide.ll
index a01aaae1f7..40859ed9a6 100644
--- a/test/NaCl/Bitcode/inttoptr-elide.ll
+++ b/test/NaCl/Bitcode/inttoptr-elide.ll
@@ -50,41 +50,6 @@ define void @SimpleLoad(i32 %i) {
; ------------------------------------------------------
-; Test that we don't elide an inttoptr if one of its uses is not a load.
-define i32* @NonsimpleLoad(i32 %i) {
- %1 = inttoptr i32 %i to i32*
- %2 = load i32* %1, align 4
- ret i32* %1
-}
-
-; TD1: define i32* @NonsimpleLoad(i32 %i) {
-; TD1-NEXT: %1 = inttoptr i32 %i to i32*
-; TD1-NEXT: %2 = load i32* %1, align 4
-; TD1-NEXT: ret i32* %1
-; TD1-NEXT: }
-
-; PF1: <FUNCTION_BLOCK>
-; PF1-NEXT: <DECLAREBLOCKS op0=1/>
-; PF1-NEXT: <INST_CAST op0=1 op1=1 op2=10/>
-; PF1-NEXT: <INST_LOAD op0=1 op1=3 op2=0/>
-; PF1-NEXT: <INST_RET op0=2/>
-; PF1: </FUNCTION_BLOCK>
-
-; TD2: define i32* @NonsimpleLoad(i32 %i) {
-; TD2-NEXT: %1 = inttoptr i32 %i to i32*
-; TD2-NEXT: %2 = load i32* %1, align 4
-; TD2-NEXT: ret i32* %1
-; TD2-NEXT: }
-
-; PF2: <FUNCTION_BLOCK>
-; PF2-NEXT: <DECLAREBLOCKS op0=1/>
-; PF2-NEXT: <INST_CAST op0=1 op1=1 op2=10/>
-; PF2-NEXT: <INST_LOAD op0=1 op1=3 op2=0/>
-; PF2-NEXT: <INST_RET op0=2/>
-; PF2: </FUNCTION_BLOCK>
-
-; ------------------------------------------------------
-
; Test that we can handle multiple inttoptr of loads.
define i32 @TwoLoads(i32 %i) {
%1 = inttoptr i32 %i to i32*