aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--lib/Bitcode/NaCl/Reader/NaClBitcodeReader.cpp2
-rw-r--r--lib/Bitcode/NaCl/Writer/NaClBitcodeWriter.cpp11
-rw-r--r--lib/Bitcode/NaCl/Writer/NaClValueEnumerator.cpp45
-rw-r--r--test/NaCl/Bitcode/bitcast-elide.ll35
-rw-r--r--test/NaCl/Bitcode/call-elide.ll57
-rw-r--r--test/NaCl/Bitcode/inttoptr-elide.ll35
6 files changed, 89 insertions, 96 deletions
diff --git a/lib/Bitcode/NaCl/Reader/NaClBitcodeReader.cpp b/lib/Bitcode/NaCl/Reader/NaClBitcodeReader.cpp
index 994bf712ae..aac03678d1 100644
--- a/lib/Bitcode/NaCl/Reader/NaClBitcodeReader.cpp
+++ b/lib/Bitcode/NaCl/Reader/NaClBitcodeReader.cpp
@@ -1429,7 +1429,7 @@ bool NaClBitcodeReader::ParseFunctionBody(Function *F) {
if (OpNum != Record.size())
return Error("Invalid RET record");
- I = ReturnInst::Create(Context, Op);
+ I = ReturnInst::Create(Context, ConvertOpToScalar(Op, CurBBNo));
break;
}
case naclbitc::FUNC_CODE_INST_BR: { // BR: [bb#, bb#, opval] or [bb#]
diff --git a/lib/Bitcode/NaCl/Writer/NaClBitcodeWriter.cpp b/lib/Bitcode/NaCl/Writer/NaClBitcodeWriter.cpp
index ea5e9cde54..4a6a454796 100644
--- a/lib/Bitcode/NaCl/Writer/NaClBitcodeWriter.cpp
+++ b/lib/Bitcode/NaCl/Writer/NaClBitcodeWriter.cpp
@@ -718,7 +718,16 @@ static bool WriteInstruction(const Instruction &I, unsigned InstID,
AbbrevToUse = FUNCTION_INST_CAST_ABBREV;
pushValue(I.getOperand(0), InstID, Vals, VE, Stream);
Vals.push_back(VE.getTypeID(I.getType()));
- Vals.push_back(GetEncodedCastOpcode(I.getOpcode(), I));
+ unsigned Opcode = I.getOpcode();
+ Vals.push_back(GetEncodedCastOpcode(Opcode, I));
+ if (PNaClVersion >= 2 &&
+ (Opcode == Instruction::PtrToInt ||
+ Opcode == Instruction::IntToPtr ||
+ (Opcode == Instruction::BitCast &&
+ (I.getOperand(0)->getType()->isPointerTy() ||
+ I.getType()->isPointerTy())))) {
+ ReportIllegalValue("(PNaCl ABI) pointer cast", I);
+ }
} else if (isa<BinaryOperator>(I)) {
// BINOP: [opval, opval, opcode[, flags]]
Code = naclbitc::FUNC_CODE_INST_BINOP;
diff --git a/lib/Bitcode/NaCl/Writer/NaClValueEnumerator.cpp b/lib/Bitcode/NaCl/Writer/NaClValueEnumerator.cpp
index 8cfdf13240..5dbc525eec 100644
--- a/lib/Bitcode/NaCl/Writer/NaClValueEnumerator.cpp
+++ b/lib/Bitcode/NaCl/Writer/NaClValueEnumerator.cpp
@@ -18,6 +18,7 @@
#include "llvm/IR/Constants.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Instructions.h"
+#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/ValueSymbolTable.h"
#include "llvm/Support/Debug.h"
@@ -466,11 +467,13 @@ static bool AllUsesExpectsNormalizedPtr(const Instruction *I2P) {
return true;
}
-// Given Value that uses scalar value Arg, returns true if the bitcode
-// writer can assume that Value always expects Arg to be scalar. This
-// function is used to infer cases where PtrToInt casts can be
-// removed.
-static bool ExpectsScalarValue(const Value *V, const Instruction *Arg) {
+// Given Value V that uses argument Arg, returns true if the bitcode
+// writer can assume that V always expects Arg to be scalar.
+// Assumes that the type of Arg is the integer type used to model
+// pointers. Hence, this function determines if the reader would
+// have to convert pointers to integer. This function
+// is used to infer cases where PtrToInt casts can be removed.
+static bool ExpectsIntPtrType(const Value *V, const Instruction *Arg) {
const Instruction *I = dyn_cast<Instruction>(V);
if (I == 0) return false;
@@ -487,6 +490,7 @@ static bool ExpectsScalarValue(const Value *V, const Instruction *Arg) {
case Instruction::UIToFP:
case Instruction::SIToFP:
case Instruction::ICmp:
+ case Instruction::Ret:
return true;
case Instruction::Store:
return Arg == I->getOperand(0);
@@ -495,8 +499,12 @@ static bool ExpectsScalarValue(const Value *V, const Instruction *Arg) {
return Arg == Op->getTrueValue() || Arg == Op->getFalseValue();
}
case Instruction::Call: {
- // All operands (except the first, which must be a function pointer),
- // can be scalar values.
+ // All operands (except the first, which must be a function
+ // pointer), must be scalar values. Note: For non-intrinsic
+ // calls, this is a requirement of the PNaCl ABI. For intrinsic
+ // calls, the condition that Arg's type is an integer type,
+ // implies that the intrinsic (for that argument) requires a
+ // scalar value at that position.
const CallInst *Call = cast<CallInst>(I);
return Call->getCalledValue() != Arg;
}
@@ -504,13 +512,12 @@ static bool ExpectsScalarValue(const Value *V, const Instruction *Arg) {
}
}
-// Returns true if the bitcode reader and writer can assume that the
-// uses of the given PtrToInt expect scalar values (i.e. non-pointer),
-// and hence, we can elide the PtrToInt cast.
-static bool AllUsesExpectsScalarValue(const Instruction *I) {
+// Returns true if all uses of I expect I to be scalar, given that
+// the type of I is the integer type used to represent pointers.
+static bool AllUsesExpectsIntPtrType(const Instruction *I) {
for (Value::const_use_iterator Use = I->use_begin(), UseEnd = I->use_end();
Use != UseEnd; ++Use) {
- if (!ExpectsScalarValue(*Use, I)) return false;
+ if (!ExpectsIntPtrType(*Use, I)) return false;
}
// If reached, all uses expect a scalar value (and hence we know how
// to automatically add it back), or there were no uses (and hence
@@ -518,10 +525,20 @@ static bool AllUsesExpectsScalarValue(const Instruction *I) {
return true;
}
+// Returns true if the value is an intrinsic instruction returns
+// a pointer value.
+static inline bool IsIntrinsicReturningPtr(const Value *V) {
+ if (const IntrinsicInst *ICall = dyn_cast<IntrinsicInst>(V)) {
+ return V->getType()->isPointerTy();
+ }
+ return false;
+}
+
// Returns true if the value is an InherentPtr (as defined in
// llvm/lib/Transforms/NaCl/ReplacePtrsWithInts.cpp).
static inline bool IsInherentPtr(const Value *V) {
- return isa<AllocaInst>(V) || isa<GlobalValue>(V);
+ return isa<AllocaInst>(V) || isa<GlobalValue>(V) ||
+ IsIntrinsicReturningPtr(V);
}
// Note: This function is based on the comments in
@@ -547,7 +564,7 @@ const Value *NaClValueEnumerator::ElideCasts(const Value *V) {
case Instruction::PtrToInt:
if (IsIntPtrType(I->getType()) &&
IsInherentPtr(I->getOperand(0)) &&
- AllUsesExpectsScalarValue(I)) {
+ AllUsesExpectsIntPtrType(I)) {
V = I->getOperand(0);
}
break;
diff --git a/test/NaCl/Bitcode/bitcast-elide.ll b/test/NaCl/Bitcode/bitcast-elide.ll
index 418b593762..bc1edb7bf5 100644
--- a/test/NaCl/Bitcode/bitcast-elide.ll
+++ b/test/NaCl/Bitcode/bitcast-elide.ll
@@ -93,41 +93,6 @@ define void @SimpleLoadAlloca() {
; ------------------------------------------------------
-; Test that we don't elide an bitcast if one of its uses is not a load.
-define i32* @NonsimpleLoad(i32 %i) {
- %1 = bitcast [4 x i8]* @bytes to i32*
- %2 = load i32* %1, align 4
- ret i32* %1
-}
-
-; TD1: define i32* @NonsimpleLoad(i32 %i) {
-; TD1-NEXT: %1 = bitcast [4 x i8]* @bytes to i32*
-; TD1-NEXT: %2 = load i32* %1, align 4
-; TD1-NEXT: ret i32* %1
-; TD1-NEXT: }
-
-; PF1: <FUNCTION_BLOCK>
-; PF1-NEXT: <DECLAREBLOCKS op0=1/>
-; PF1-NEXT: <INST_CAST op0=2 op1=1 op2=11/>
-; PF1-NEXT: <INST_LOAD op0=1 op1=3 op2=0/>
-; PF1-NEXT: <INST_RET op0=2/>
-; PF1: </FUNCTION_BLOCK>
-
-; TD2: define i32* @NonsimpleLoad(i32 %i) {
-; TD2-NEXT: %1 = bitcast [4 x i8]* @bytes to i32*
-; TD2-NEXT: %2 = load i32* %1, align 4
-; TD2-NEXT: ret i32* %1
-; TD2-NEXT: }
-
-; PF2: <FUNCTION_BLOCK>
-; PF2-NEXT: <DECLAREBLOCKS op0=1/>
-; PF2-NEXT: <INST_CAST op0=2 op1=1 op2=11/>
-; PF2-NEXT: <INST_LOAD op0=1 op1=3 op2=0/>
-; PF2-NEXT: <INST_RET op0=2/>
-; PF2: </FUNCTION_BLOCK>
-
-; ------------------------------------------------------
-
; Test that we can handle multiple bitcasts.
define i32 @TwoLoads(i32 %i) {
%1 = bitcast [4 x i8]* @bytes to i32*
diff --git a/test/NaCl/Bitcode/call-elide.ll b/test/NaCl/Bitcode/call-elide.ll
index b3acb36a9d..c0b9cfc31b 100644
--- a/test/NaCl/Bitcode/call-elide.ll
+++ b/test/NaCl/Bitcode/call-elide.ll
@@ -37,7 +37,7 @@ define void @DirectCall() {
; PF1: <FUNCTION_BLOCK>
; PF1: </CONSTANTS_BLOCK>
-; PF1-NEXT: <INST_CALL op0=0 op1=14 op2=1/>
+; PF1-NEXT: <INST_CALL op0=0 op1=16 op2=1/>
; PF1-NEXT: <INST_RET/>
; PF1-NEXT: </FUNCTION_BLOCK>
@@ -48,7 +48,7 @@ define void @DirectCall() {
; PF2: <FUNCTION_BLOCK>
; PF2: </CONSTANTS_BLOCK>
-; PF2-NEXT: <INST_CALL op0=0 op1=14 op2=1/>
+; PF2-NEXT: <INST_CALL op0=0 op1=16 op2=1/>
; PF2-NEXT: <INST_RET/>
; PF2-NEXT: </FUNCTION_BLOCK>
@@ -73,7 +73,7 @@ define void @DirectCallIntToPtrArg(i32 %i) {
; PF1: <FUNCTION_BLOCK>
; PF1-NEXT: <DECLAREBLOCKS op0=1/>
; PF1-NEXT: <INST_CAST op0=1 op1=4 op2=10/>
-; PF1-NEXT: <INST_CALL op0=0 op1=14 op2=1/>
+; PF1-NEXT: <INST_CALL op0=0 op1=16 op2=1/>
; PF1-NEXT: <INST_RET/>
; PF1: </FUNCTION_BLOCK>
@@ -85,7 +85,7 @@ define void @DirectCallIntToPtrArg(i32 %i) {
; PF2: <FUNCTION_BLOCK>
; PF2-NEXT: <DECLAREBLOCKS op0=1/>
-; PF2-NEXT: <INST_CALL op0=0 op1=13 op2=1/>
+; PF2-NEXT: <INST_CALL op0=0 op1=15 op2=1/>
; PF2-NEXT: <INST_RET/>
; PF2: </FUNCTION_BLOCK>
@@ -110,7 +110,7 @@ define void @DirectCallPtrToIntArg() {
; PF1: </CONSTANTS_BLOCK>
; PF1-NEXT: <INST_ALLOCA op0=1 op1=4/>
; PF1-NEXT: <INST_CAST op0=1 op1=0 op2=9/>
-; PF1-NEXT: <INST_CALL op0=0 op1=16 op2=1/>
+; PF1-NEXT: <INST_CALL op0=0 op1=18 op2=1/>
; PF1-NEXT: <INST_RET/>
; PF1-NEXT: </FUNCTION_BLOCK>
@@ -124,7 +124,7 @@ define void @DirectCallPtrToIntArg() {
; PF2: <FUNCTION_BLOCK>
; PF2: </CONSTANTS_BLOCK>
; PF2-NEXT: <INST_ALLOCA op0=1 op1=4/>
-; PF2-NEXT: <INST_CALL op0=0 op1=15 op2=1/>
+; PF2-NEXT: <INST_CALL op0=0 op1=17 op2=1/>
; PF2-NEXT: <INST_RET/>
; PF2-NEXT: </FUNCTION_BLOCK>
@@ -146,7 +146,7 @@ define void @DirectCallBitcastArg(i32 %i) {
; PF1: <FUNCTION_BLOCK>
; PF1-NEXT: <DECLAREBLOCKS op0=1/>
; PF1-NEXT: <INST_CAST op0=2 op1=4 op2=11/>
-; PF1-NEXT: <INST_CALL op0=0 op1=14 op2=1/>
+; PF1-NEXT: <INST_CALL op0=0 op1=16 op2=1/>
; PF1-NEXT: <INST_RET/>
; PF1: </FUNCTION_BLOCK>
@@ -158,7 +158,7 @@ define void @DirectCallBitcastArg(i32 %i) {
; PF2: <FUNCTION_BLOCK>
; PF2-NEXT: <DECLAREBLOCKS op0=1/>
-; PF2-NEXT: <INST_CALL op0=0 op1=13 op2=2/>
+; PF2-NEXT: <INST_CALL op0=0 op1=15 op2=2/>
; PF2-NEXT: <INST_RET/>
; PF2: </FUNCTION_BLOCK>
@@ -180,7 +180,7 @@ define void @DirectCallScalarArg(i32* %ptr) {
; PF1: <FUNCTION_BLOCK>
; PF1-NEXT: <DECLAREBLOCKS op0=1/>
; PF1-NEXT: <INST_CAST op0=2 op1=0 op2=9/>
-; PF1-NEXT: <INST_CALL op0=0 op1=15 op2=1/>
+; PF1-NEXT: <INST_CALL op0=0 op1=17 op2=1/>
; PF1-NEXT: <INST_RET/>
; PF1: </FUNCTION_BLOCK>
@@ -192,7 +192,7 @@ define void @DirectCallScalarArg(i32* %ptr) {
; PF2: <FUNCTION_BLOCK>
; PF2-NEXT: <DECLAREBLOCKS op0=1/>
-; PF2-NEXT: <INST_CALL op0=0 op1=14 op2=2/>
+; PF2-NEXT: <INST_CALL op0=0 op1=16 op2=2/>
; PF2-NEXT: <INST_RET/>
; PF2: </FUNCTION_BLOCK>
@@ -389,3 +389,40 @@ define void @IndirectCallScalarArg(i32 %i, i32* %ptr) {
; PF2-NEXT: <INST_CALL_INDIRECT op0=0 op1=2 op2=2 op3=3/>
; PF2-NEXT: <INST_RET/>
; PF2: </FUNCTION_BLOCK>
+
+; ------------------------------------------------------
+; Test how we handle intrinsics that can return (inherent) pointers, and
+; return statements that expect scalar values.
+
+declare i8* @llvm.nacl.read.tp()
+
+define i32 @ReturnPtrIntrinsic() {
+ %1 = call i8* @llvm.nacl.read.tp()
+ %2 = ptrtoint i8* %1 to i32
+ ret i32 %2
+}
+
+; TD1: define i32 @ReturnPtrIntrinsic() {
+; TD1-NEXT: %1 = call i8* @llvm.nacl.read.tp()
+; TD1-NEXT: %2 = ptrtoint i8* %1 to i32
+; TD1-NEXT: ret i32 %2
+; TD1-NEXT: }
+
+; PF1: <FUNCTION_BLOCK>
+; PF1-NEXT: <DECLAREBLOCKS op0=1/>
+; PF1-NEXT: <INST_CALL op0=0 op1=3/>
+; PF1-NEXT: <INST_CAST op0=1 op1=0 op2=9/>
+; PF1-NEXT: <INST_RET op0=1/>
+; PF1-NEXT: </FUNCTION_BLOCK>
+
+; TD2: define i32 @ReturnPtrIntrinsic() {
+; TD2-NEXT: %1 = call i8* @llvm.nacl.read.tp()
+; TD2-NEXT: %2 = ptrtoint i8* %1 to i32
+; TD2-NEXT: ret i32 %2
+; TD2-NEXT: }
+
+; PF2: <FUNCTION_BLOCK>
+; PF2-NEXT: <DECLAREBLOCKS op0=1/>
+; PF2-NEXT: <INST_CALL op0=0 op1=3/>
+; PF2-NEXT: <INST_RET op0=1/>
+; PF2-NEXT: </FUNCTION_BLOCK>
diff --git a/test/NaCl/Bitcode/inttoptr-elide.ll b/test/NaCl/Bitcode/inttoptr-elide.ll
index a01aaae1f7..40859ed9a6 100644
--- a/test/NaCl/Bitcode/inttoptr-elide.ll
+++ b/test/NaCl/Bitcode/inttoptr-elide.ll
@@ -50,41 +50,6 @@ define void @SimpleLoad(i32 %i) {
; ------------------------------------------------------
-; Test that we don't elide an inttoptr if one of its uses is not a load.
-define i32* @NonsimpleLoad(i32 %i) {
- %1 = inttoptr i32 %i to i32*
- %2 = load i32* %1, align 4
- ret i32* %1
-}
-
-; TD1: define i32* @NonsimpleLoad(i32 %i) {
-; TD1-NEXT: %1 = inttoptr i32 %i to i32*
-; TD1-NEXT: %2 = load i32* %1, align 4
-; TD1-NEXT: ret i32* %1
-; TD1-NEXT: }
-
-; PF1: <FUNCTION_BLOCK>
-; PF1-NEXT: <DECLAREBLOCKS op0=1/>
-; PF1-NEXT: <INST_CAST op0=1 op1=1 op2=10/>
-; PF1-NEXT: <INST_LOAD op0=1 op1=3 op2=0/>
-; PF1-NEXT: <INST_RET op0=2/>
-; PF1: </FUNCTION_BLOCK>
-
-; TD2: define i32* @NonsimpleLoad(i32 %i) {
-; TD2-NEXT: %1 = inttoptr i32 %i to i32*
-; TD2-NEXT: %2 = load i32* %1, align 4
-; TD2-NEXT: ret i32* %1
-; TD2-NEXT: }
-
-; PF2: <FUNCTION_BLOCK>
-; PF2-NEXT: <DECLAREBLOCKS op0=1/>
-; PF2-NEXT: <INST_CAST op0=1 op1=1 op2=10/>
-; PF2-NEXT: <INST_LOAD op0=1 op1=3 op2=0/>
-; PF2-NEXT: <INST_RET op0=2/>
-; PF2: </FUNCTION_BLOCK>
-
-; ------------------------------------------------------
-
; Test that we can handle multiple inttoptr of loads.
define i32 @TwoLoads(i32 %i) {
%1 = inttoptr i32 %i to i32*