aboutsummaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorEli Bendersky <eliben@chromium.org>2013-07-18 18:00:27 -0700
committerEli Bendersky <eliben@chromium.org>2013-07-18 18:00:27 -0700
commit4412ea4b8e019d00dc7574fe1723eea0473a8ec1 (patch)
tree2badd5ce0727bfad02f10d0d82c8bcfa65677676 /lib
parent4a9f2a703db400ccf760f34101bcdd57642f96e4 (diff)
parent5b548094edef39376e17445aea28ad2b37d701c4 (diff)
Merge remote-tracking branch 'origin/master'
Diffstat (limited to 'lib')
-rw-r--r--lib/Analysis/NaCl/PNaClABIVerifyFunctions.cpp119
-rw-r--r--lib/Analysis/NaCl/PNaClABIVerifyModule.cpp16
-rw-r--r--lib/Bitcode/NaCl/Reader/NaClBitcodeReader.cpp32
-rw-r--r--lib/Bitcode/NaCl/Reader/NaClBitcodeReader.h1
-rw-r--r--lib/IR/CMakeLists.txt1
-rw-r--r--lib/IR/NaClAtomicIntrinsics.cpp84
-rw-r--r--lib/Target/X86/X86ISelDAGToDAG.cpp64
-rw-r--r--lib/Transforms/NaCl/CMakeLists.txt5
-rw-r--r--lib/Transforms/NaCl/PNaClABISimplify.cpp2
-rw-r--r--lib/Transforms/NaCl/ReplacePtrsWithInts.cpp14
-rw-r--r--lib/Transforms/NaCl/ResolvePNaClIntrinsics.cpp263
-rw-r--r--lib/Transforms/NaCl/RewriteAtomics.cpp332
12 files changed, 811 insertions, 122 deletions
diff --git a/lib/Analysis/NaCl/PNaClABIVerifyFunctions.cpp b/lib/Analysis/NaCl/PNaClABIVerifyFunctions.cpp
index 80d7da3f19..5318fc8af0 100644
--- a/lib/Analysis/NaCl/PNaClABIVerifyFunctions.cpp
+++ b/lib/Analysis/NaCl/PNaClABIVerifyFunctions.cpp
@@ -12,6 +12,7 @@
//
//===----------------------------------------------------------------------===//
+#include "llvm/ADT/OwningPtr.h"
#include "llvm/ADT/Twine.h"
#include "llvm/Analysis/NaCl.h"
#include "llvm/IR/Function.h"
@@ -19,6 +20,8 @@
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Metadata.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/NaClAtomicIntrinsics.h"
#include "llvm/IR/Operator.h"
#include "llvm/Pass.h"
#include "llvm/Support/raw_ostream.h"
@@ -49,6 +52,10 @@ class PNaClABIVerifyFunctions : public FunctionPass {
if (ReporterIsOwned)
delete Reporter;
}
+ virtual bool doInitialization(Module &M) {
+ AtomicIntrinsics.reset(new NaCl::AtomicIntrinsics(M.getContext()));
+ return false;
+ }
bool runOnFunction(Function &F);
virtual void print(raw_ostream &O, const Module *M) const;
private:
@@ -56,6 +63,7 @@ class PNaClABIVerifyFunctions : public FunctionPass {
const char *checkInstruction(const Instruction *Inst);
PNaClABIErrorReporter *Reporter;
bool ReporterIsOwned;
+ OwningPtr<NaCl::AtomicIntrinsics> AtomicIntrinsics;
};
} // and anonymous namespace
@@ -144,16 +152,7 @@ static bool isValidScalarOperand(const Value *Val) {
isa<UndefValue>(Val));
}
-static bool isAllowedAlignment(unsigned Alignment, Type *Ty, bool IsAtomic) {
- if (IsAtomic) {
- // For atomic operations, the alignment must match the size of the type.
- if (Ty->isIntegerTy()) {
- unsigned Bits = Ty->getIntegerBitWidth();
- return Bits % 8 == 0 && Alignment == Bits / 8;
- }
- return (Ty->isDoubleTy() && Alignment == 8) ||
- (Ty->isFloatTy() && Alignment == 4);
- }
+static bool isAllowedAlignment(unsigned Alignment, Type *Ty) {
// Non-atomic integer operations must always use "align 1", since we
// do not want the backend to generate code with non-portable
// undefined behaviour (such as misaligned access faults) if user
@@ -169,6 +168,51 @@ static bool isAllowedAlignment(unsigned Alignment, Type *Ty, bool IsAtomic) {
(Ty->isFloatTy() && Alignment == 4);
}
+static bool hasAllowedAtomicRMWOperation(
+ const NaCl::AtomicIntrinsics::AtomicIntrinsic *I, const CallInst *Call) {
+ for (size_t P = 0; P != I->NumParams; ++P) {
+ if (I->ParamType[P] != NaCl::AtomicIntrinsics::RMW)
+ continue;
+
+ const Value *Operation = Call->getOperand(P);
+ if (!Operation)
+ return false;
+ const Constant *C = dyn_cast<Constant>(Operation);
+ if (!C)
+ return false;
+ const APInt &I = C->getUniqueInteger();
+ if (I.ule(NaCl::AtomicInvalid) || I.uge(NaCl::AtomicNum))
+ return false;
+ }
+ return true;
+}
+
+static bool hasAllowedAtomicMemoryOrder(
+ const NaCl::AtomicIntrinsics::AtomicIntrinsic *I, const CallInst *Call) {
+ for (size_t P = 0; P != I->NumParams; ++P) {
+ if (I->ParamType[P] != NaCl::AtomicIntrinsics::Mem)
+ continue;
+
+ const Value *MemoryOrder = Call->getOperand(P);
+ if (!MemoryOrder)
+ return false;
+ const Constant *C = dyn_cast<Constant>(MemoryOrder);
+ if (!C)
+ return false;
+ const APInt &I = C->getUniqueInteger();
+ if (I.ule(NaCl::MemoryOrderInvalid) || I.uge(NaCl::MemoryOrderNum))
+ return false;
+ // TODO For now only sequential consistency is allowed. When more
+ // are allowed we need to validate that the memory order is
+ // allowed on the specific atomic operation (e.g. no store
+ // acquire, and relationship between success/failure memory
+ // order on compare exchange).
+ if (I != NaCl::MemoryOrderSequentiallyConsistent)
+ return false;
+ }
+ return true;
+}
+
// Check the instruction's opcode and its operands. The operands may
// require opcode-specific checking.
//
@@ -198,6 +242,10 @@ const char *PNaClABIVerifyFunctions::checkInstruction(const Instruction *Inst) {
// ExtractValue and InsertValue operate on struct values.
case Instruction::ExtractValue:
case Instruction::InsertValue:
+ // Atomics should become NaCl intrinsics.
+ case Instruction::AtomicCmpXchg:
+ case Instruction::AtomicRMW:
+ case Instruction::Fence:
return "bad instruction opcode";
default:
return "unknown instruction opcode";
@@ -216,8 +264,6 @@ const char *PNaClABIVerifyFunctions::checkInstruction(const Instruction *Inst) {
case Instruction::And:
case Instruction::Or:
case Instruction::Xor:
- // Memory instructions
- case Instruction::Fence:
// Conversion operations
case Instruction::Trunc:
case Instruction::ZExt:
@@ -256,32 +302,32 @@ const char *PNaClABIVerifyFunctions::checkInstruction(const Instruction *Inst) {
// Memory accesses.
case Instruction::Load: {
const LoadInst *Load = cast<LoadInst>(Inst);
+ PtrOperandIndex = Load->getPointerOperandIndex();
+ if (Load->isAtomic())
+ return "atomic load";
+ if (Load->isVolatile())
+ return "volatile load";
if (!isAllowedAlignment(Load->getAlignment(),
- Load->getType(),
- Load->isAtomic()))
+ Load->getType()))
return "bad alignment";
- PtrOperandIndex = 0;
if (!isNormalizedPtr(Inst->getOperand(PtrOperandIndex)))
return "bad pointer";
break;
}
case Instruction::Store: {
const StoreInst *Store = cast<StoreInst>(Inst);
+ PtrOperandIndex = Store->getPointerOperandIndex();
+ if (Store->isAtomic())
+ return "atomic store";
+ if (Store->isVolatile())
+ return "volatile store";
if (!isAllowedAlignment(Store->getAlignment(),
- Store->getValueOperand()->getType(),
- Store->isAtomic()))
+ Store->getValueOperand()->getType()))
return "bad alignment";
- PtrOperandIndex = 1;
if (!isNormalizedPtr(Inst->getOperand(PtrOperandIndex)))
return "bad pointer";
break;
}
- case Instruction::AtomicCmpXchg:
- case Instruction::AtomicRMW:
- PtrOperandIndex = 0;
- if (!isNormalizedPtr(Inst->getOperand(PtrOperandIndex)))
- return "bad pointer";
- break;
// Casts.
case Instruction::BitCast:
@@ -332,6 +378,7 @@ const char *PNaClABIVerifyFunctions::checkInstruction(const Instruction *Inst) {
isa<MDNode>(Arg)))
return "bad intrinsic operand";
}
+
// Disallow alignments other than 1 on memcpy() etc., for the
// same reason that we disallow them on integer loads and
// stores.
@@ -344,6 +391,30 @@ const char *PNaClABIVerifyFunctions::checkInstruction(const Instruction *Inst) {
return "bad alignment";
}
}
+
+ // Disallow NaCl atomic intrinsics which don't have valid
+ // constant NaCl::AtomicOperation and NaCl::MemoryOrder
+ // parameters.
+ switch (Call->getIntrinsicID()) {
+ default: break; // Non-atomic intrinsic.
+ case Intrinsic::nacl_atomic_load:
+ case Intrinsic::nacl_atomic_store:
+ case Intrinsic::nacl_atomic_rmw:
+ case Intrinsic::nacl_atomic_cmpxchg:
+ case Intrinsic::nacl_atomic_fence: {
+ // All overloads have memory order and RMW operation in the
+ // same parameter, arbitrarily use the I32 overload.
+ Type *T = Type::getInt32Ty(
+ Inst->getParent()->getParent()->getContext());
+ const NaCl::AtomicIntrinsics::AtomicIntrinsic *I =
+ AtomicIntrinsics->find(Call->getIntrinsicID(), T);
+ if (!hasAllowedAtomicMemoryOrder(I, Call))
+ return "invalid memory order";
+ if (!hasAllowedAtomicRMWOperation(I, Call))
+ return "invalid atomicRMW operation";
+ } break;
+ }
+
// Allow the instruction and skip the later checks.
return NULL;
}
diff --git a/lib/Analysis/NaCl/PNaClABIVerifyModule.cpp b/lib/Analysis/NaCl/PNaClABIVerifyModule.cpp
index 17852ebbef..a418246bae 100644
--- a/lib/Analysis/NaCl/PNaClABIVerifyModule.cpp
+++ b/lib/Analysis/NaCl/PNaClABIVerifyModule.cpp
@@ -13,14 +13,16 @@
//
//===----------------------------------------------------------------------===//
-#include "llvm/Pass.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/Twine.h"
#include "llvm/Analysis/NaCl.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/Instructions.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/Module.h"
+#include "llvm/Pass.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
@@ -173,6 +175,7 @@ void PNaClABIVerifyModule::checkGlobalValueCommon(const GlobalValue *GV) {
AllowedIntrinsics::AllowedIntrinsics(LLVMContext *Context) : Context(Context) {
Type *I8Ptr = Type::getInt8PtrTy(*Context);
+ Type *I8 = Type::getInt8Ty(*Context);
Type *I16 = Type::getInt16Ty(*Context);
Type *I32 = Type::getInt32Ty(*Context);
Type *I64 = Type::getInt64Ty(*Context);
@@ -203,6 +206,15 @@ AllowedIntrinsics::AllowedIntrinsics(LLVMContext *Context) : Context(Context) {
addIntrinsic(Intrinsic::sqrt, Float);
addIntrinsic(Intrinsic::sqrt, Double);
+ Type *AtomicTypes[] = { I8, I16, I32, I64 };
+ for (size_t T = 0, E = array_lengthof(AtomicTypes); T != E; ++T) {
+ addIntrinsic(Intrinsic::nacl_atomic_load, AtomicTypes[T]);
+ addIntrinsic(Intrinsic::nacl_atomic_store, AtomicTypes[T]);
+ addIntrinsic(Intrinsic::nacl_atomic_rmw, AtomicTypes[T]);
+ addIntrinsic(Intrinsic::nacl_atomic_cmpxchg, AtomicTypes[T]);
+ }
+ addIntrinsic(Intrinsic::nacl_atomic_fence);
+
// Stack save and restore are used to support C99 VLAs.
addIntrinsic(Intrinsic::stacksave);
addIntrinsic(Intrinsic::stackrestore);
@@ -221,7 +233,7 @@ AllowedIntrinsics::AllowedIntrinsics(LLVMContext *Context) : Context(Context) {
bool AllowedIntrinsics::isAllowed(const Function *Func) {
// Keep 3 categories of intrinsics for now.
// (1) Allowed always, provided the exact name and type match.
- // (2) Never allowed
+ // (2) Never allowed.
// (3) "Dev" intrinsics, which may or may not be allowed.
// "Dev" intrinsics are controlled by the PNaClABIAllowDevIntrinsics flag.
// Please keep these sorted or grouped in a sensible way, within
diff --git a/lib/Bitcode/NaCl/Reader/NaClBitcodeReader.cpp b/lib/Bitcode/NaCl/Reader/NaClBitcodeReader.cpp
index 5f14a639ba..0b7506fad6 100644
--- a/lib/Bitcode/NaCl/Reader/NaClBitcodeReader.cpp
+++ b/lib/Bitcode/NaCl/Reader/NaClBitcodeReader.cpp
@@ -1657,7 +1657,6 @@ bool NaClBitcodeReader::ParseFunctionBody(Function *F) {
if (Stream.EnterSubBlock(naclbitc::FUNCTION_BLOCK_ID))
return Error("Malformed block record");
- InstructionList.clear();
unsigned ModuleValueListSize = ValueList.size();
// Add all the function arguments to the value table.
@@ -1732,7 +1731,6 @@ bool NaClBitcodeReader::ParseFunctionBody(Function *F) {
int Opc = GetDecodedBinaryOpcode(Record[OpNum++], LHS->getType());
if (Opc == -1) return Error("Invalid BINOP record");
I = BinaryOperator::Create((Instruction::BinaryOps)Opc, LHS, RHS);
- InstructionList.push_back(I);
if (OpNum < Record.size()) {
if (Opc == Instruction::Add ||
Opc == Instruction::Sub ||
@@ -1779,7 +1777,6 @@ bool NaClBitcodeReader::ParseFunctionBody(Function *F) {
if (Opc == -1 || ResTy == 0)
return Error("Invalid CAST record");
I = CastInst::Create((Instruction::CastOps)Opc, Op, ResTy);
- InstructionList.push_back(I);
break;
}
case naclbitc::FUNC_CODE_INST_INBOUNDS_GEP:
@@ -1798,7 +1795,6 @@ bool NaClBitcodeReader::ParseFunctionBody(Function *F) {
}
I = GetElementPtrInst::Create(BasePtr, GEPIdx);
- InstructionList.push_back(I);
if (BitCode == naclbitc::FUNC_CODE_INST_INBOUNDS_GEP)
cast<GetElementPtrInst>(I)->setIsInBounds(true);
break;
@@ -1821,7 +1817,6 @@ bool NaClBitcodeReader::ParseFunctionBody(Function *F) {
}
I = ExtractValueInst::Create(Agg, EXTRACTVALIdx);
- InstructionList.push_back(I);
break;
}
@@ -1845,7 +1840,6 @@ bool NaClBitcodeReader::ParseFunctionBody(Function *F) {
}
I = InsertValueInst::Create(Agg, Val, INSERTVALIdx);
- InstructionList.push_back(I);
break;
}
@@ -1860,7 +1854,6 @@ bool NaClBitcodeReader::ParseFunctionBody(Function *F) {
return Error("Invalid SELECT record");
I = SelectInst::Create(Cond, TrueVal, FalseVal);
- InstructionList.push_back(I);
break;
}
@@ -1887,7 +1880,6 @@ bool NaClBitcodeReader::ParseFunctionBody(Function *F) {
}
I = SelectInst::Create(Cond, TrueVal, FalseVal);
- InstructionList.push_back(I);
break;
}
@@ -1898,7 +1890,6 @@ bool NaClBitcodeReader::ParseFunctionBody(Function *F) {
popValue(Record, &OpNum, NextValueNo, &Idx))
return Error("Invalid EXTRACTELT record");
I = ExtractElementInst::Create(Vec, Idx);
- InstructionList.push_back(I);
break;
}
@@ -1910,7 +1901,6 @@ bool NaClBitcodeReader::ParseFunctionBody(Function *F) {
popValue(Record, &OpNum, NextValueNo, &Idx))
return Error("Invalid INSERTELT record");
I = InsertElementInst::Create(Vec, Elt, Idx);
- InstructionList.push_back(I);
break;
}
@@ -1924,7 +1914,6 @@ bool NaClBitcodeReader::ParseFunctionBody(Function *F) {
if (popValue(Record, &OpNum, NextValueNo, &Mask))
return Error("Invalid SHUFFLEVEC record");
I = new ShuffleVectorInst(Vec1, Vec2, Mask);
- InstructionList.push_back(I);
break;
}
@@ -1946,7 +1935,6 @@ bool NaClBitcodeReader::ParseFunctionBody(Function *F) {
I = new FCmpInst((FCmpInst::Predicate)Record[OpNum], LHS, RHS);
else
I = new ICmpInst((ICmpInst::Predicate)Record[OpNum], LHS, RHS);
- InstructionList.push_back(I);
break;
}
@@ -1955,7 +1943,6 @@ bool NaClBitcodeReader::ParseFunctionBody(Function *F) {
unsigned Size = Record.size();
if (Size == 0) {
I = ReturnInst::Create(Context);
- InstructionList.push_back(I);
break;
}
@@ -1967,7 +1954,6 @@ bool NaClBitcodeReader::ParseFunctionBody(Function *F) {
return Error("Invalid RET record");
I = ReturnInst::Create(Context, Op);
- InstructionList.push_back(I);
break;
}
case naclbitc::FUNC_CODE_INST_BR: { // BR: [bb#, bb#, opval] or [bb#]
@@ -1979,7 +1965,6 @@ bool NaClBitcodeReader::ParseFunctionBody(Function *F) {
if (Record.size() == 1) {
I = BranchInst::Create(TrueDest);
- InstructionList.push_back(I);
}
else {
BasicBlock *FalseDest = getBasicBlock(Record[1]);
@@ -1987,7 +1972,6 @@ bool NaClBitcodeReader::ParseFunctionBody(Function *F) {
if (FalseDest == 0 || Cond == 0)
return Error("Invalid BR record");
I = BranchInst::Create(TrueDest, FalseDest, Cond);
- InstructionList.push_back(I);
}
break;
}
@@ -2006,7 +1990,6 @@ bool NaClBitcodeReader::ParseFunctionBody(Function *F) {
unsigned NumCases = Record[3];
SwitchInst *SI = SwitchInst::Create(Cond, Default, NumCases);
- InstructionList.push_back(SI);
unsigned CurIdx = 4;
for (unsigned i = 0; i != NumCases; ++i) {
@@ -2053,7 +2036,6 @@ bool NaClBitcodeReader::ParseFunctionBody(Function *F) {
return Error("Invalid INDIRECTBR record");
unsigned NumDests = Record.size()-2;
IndirectBrInst *IBI = IndirectBrInst::Create(Address, NumDests);
- InstructionList.push_back(IBI);
for (unsigned i = 0, e = NumDests; i != e; ++i) {
if (BasicBlock *DestBB = getBasicBlock(Record[2+i])) {
IBI->addDestination(DestBB);
@@ -2075,12 +2057,10 @@ bool NaClBitcodeReader::ParseFunctionBody(Function *F) {
if (popValue(Record, &Idx, NextValueNo, &Val))
return Error("Invalid RESUME record");
I = ResumeInst::Create(Val);
- InstructionList.push_back(I);
break;
}
case naclbitc::FUNC_CODE_INST_UNREACHABLE: // UNREACHABLE
I = new UnreachableInst(Context);
- InstructionList.push_back(I);
break;
case naclbitc::FUNC_CODE_INST_PHI: { // PHI: [ty, val0,bb0, ...]
if (Record.size() < 1 || ((Record.size()-1)&1))
@@ -2089,7 +2069,6 @@ bool NaClBitcodeReader::ParseFunctionBody(Function *F) {
if (!Ty) return Error("Invalid PHI record");
PHINode *PN = PHINode::Create(Ty, (Record.size()-1)/2);
- InstructionList.push_back(PN);
for (unsigned i = 0, e = Record.size()-1; i != e; i += 2) {
Value *V;
@@ -2143,7 +2122,6 @@ bool NaClBitcodeReader::ParseFunctionBody(Function *F) {
}
I = LP;
- InstructionList.push_back(I);
break;
}
@@ -2156,7 +2134,6 @@ bool NaClBitcodeReader::ParseFunctionBody(Function *F) {
return Error("Invalid ALLOCA record");
unsigned Align = Record[1];
I = new AllocaInst(Type::getInt8Ty(Context), Size, (1 << Align) >> 1);
- InstructionList.push_back(I);
break;
}
case naclbitc::FUNC_CODE_INST_LOAD: { // LOAD: [op, align, vol]
@@ -2167,7 +2144,6 @@ bool NaClBitcodeReader::ParseFunctionBody(Function *F) {
return Error("Invalid LOAD record");
I = new LoadInst(Op, "", Record[OpNum+1], (1 << Record[OpNum]) >> 1);
- InstructionList.push_back(I);
break;
}
case naclbitc::FUNC_CODE_INST_LOADATOMIC: {
@@ -2189,7 +2165,6 @@ bool NaClBitcodeReader::ParseFunctionBody(Function *F) {
I = new LoadInst(Op, "", Record[OpNum+1], (1 << Record[OpNum]) >> 1,
Ordering, SynchScope);
- InstructionList.push_back(I);
break;
}
case naclbitc::FUNC_CODE_INST_STORE: { // STORE2:[ptr, val, align, vol]
@@ -2201,7 +2176,6 @@ bool NaClBitcodeReader::ParseFunctionBody(Function *F) {
return Error("Invalid STORE record");
I = new StoreInst(Val, Ptr, Record[OpNum+1], (1 << Record[OpNum]) >> 1);
- InstructionList.push_back(I);
break;
}
case naclbitc::FUNC_CODE_INST_STOREATOMIC: {
@@ -2223,7 +2197,6 @@ bool NaClBitcodeReader::ParseFunctionBody(Function *F) {
I = new StoreInst(Val, Ptr, Record[OpNum+1], (1 << Record[OpNum]) >> 1,
Ordering, SynchScope);
- InstructionList.push_back(I);
break;
}
case naclbitc::FUNC_CODE_INST_CMPXCHG: {
@@ -2241,7 +2214,6 @@ bool NaClBitcodeReader::ParseFunctionBody(Function *F) {
SynchronizationScope SynchScope = GetDecodedSynchScope(Record[OpNum+2]);
I = new AtomicCmpXchgInst(Ptr, Cmp, New, Ordering, SynchScope);
cast<AtomicCmpXchgInst>(I)->setVolatile(Record[OpNum]);
- InstructionList.push_back(I);
break;
}
case naclbitc::FUNC_CODE_INST_ATOMICRMW: {
@@ -2262,7 +2234,6 @@ bool NaClBitcodeReader::ParseFunctionBody(Function *F) {
SynchronizationScope SynchScope = GetDecodedSynchScope(Record[OpNum+3]);
I = new AtomicRMWInst(Operation, Ptr, Val, Ordering, SynchScope);
cast<AtomicRMWInst>(I)->setVolatile(Record[OpNum+1]);
- InstructionList.push_back(I);
break;
}
case naclbitc::FUNC_CODE_INST_FENCE: { // FENCE:[ordering, synchscope]
@@ -2274,7 +2245,6 @@ bool NaClBitcodeReader::ParseFunctionBody(Function *F) {
return Error("Invalid FENCE record");
SynchronizationScope SynchScope = GetDecodedSynchScope(Record[1]);
I = new FenceInst(Context, Ordering, SynchScope);
- InstructionList.push_back(I);
break;
}
case naclbitc::FUNC_CODE_INST_CALL: {
@@ -2319,7 +2289,6 @@ bool NaClBitcodeReader::ParseFunctionBody(Function *F) {
}
I = CallInst::Create(Callee, Args);
- InstructionList.push_back(I);
cast<CallInst>(I)->setCallingConv(GetDecodedCallingConv(CCInfo>>1));
cast<CallInst>(I)->setTailCall(CCInfo & 1);
break;
@@ -2333,7 +2302,6 @@ bool NaClBitcodeReader::ParseFunctionBody(Function *F) {
if (!OpTy || !Op || !ResTy)
return Error("Invalid VAARG record");
I = new VAArgInst(Op, ResTy);
- InstructionList.push_back(I);
break;
}
case naclbitc::FUNC_CODE_INST_FORWARDTYPEREF:
diff --git a/lib/Bitcode/NaCl/Reader/NaClBitcodeReader.h b/lib/Bitcode/NaCl/Reader/NaClBitcodeReader.h
index 454875796a..4105eb53b8 100644
--- a/lib/Bitcode/NaCl/Reader/NaClBitcodeReader.h
+++ b/lib/Bitcode/NaCl/Reader/NaClBitcodeReader.h
@@ -130,7 +130,6 @@ class NaClBitcodeReader : public GVMaterializer {
std::vector<Type*> TypeList;
NaClBitcodeReaderValueList ValueList;
- SmallVector<Instruction *, 64> InstructionList;
SmallVector<SmallVector<uint64_t, 64>, 64> UseListRecords;
std::vector<std::pair<GlobalAlias*, unsigned> > AliasInits;
diff --git a/lib/IR/CMakeLists.txt b/lib/IR/CMakeLists.txt
index c2a4ee3aae..e60a04ae3b 100644
--- a/lib/IR/CMakeLists.txt
+++ b/lib/IR/CMakeLists.txt
@@ -25,6 +25,7 @@ add_llvm_library(LLVMCore
LeakDetector.cpp
Metadata.cpp
Module.cpp
+ NaClAtomicIntrinsics.cpp
Pass.cpp
PassManager.cpp
PassRegistry.cpp
diff --git a/lib/IR/NaClAtomicIntrinsics.cpp b/lib/IR/NaClAtomicIntrinsics.cpp
new file mode 100644
index 0000000000..5f463380c4
--- /dev/null
+++ b/lib/IR/NaClAtomicIntrinsics.cpp
@@ -0,0 +1,84 @@
+//=== llvm/IR/NaClAtomicIntrinsics.cpp - NaCl Atomic Intrinsics -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file describes atomic intrinsic functions that are specific to NaCl.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/NaClAtomicIntrinsics.h"
+#include "llvm/IR/Type.h"
+
+namespace llvm {
+
+namespace NaCl {
+
+AtomicIntrinsics::AtomicIntrinsics(LLVMContext &C) {
+ Type *IT[NumAtomicIntrinsicOverloadTypes] = { Type::getInt8Ty(C),
+ Type::getInt16Ty(C),
+ Type::getInt32Ty(C),
+ Type::getInt64Ty(C) };
+ size_t CurIntrin = 0;
+
+ // Initialize each of the atomic intrinsics and their overloads. They
+ // have up to 5 parameters, the following macro will take care of
+ // overloading.
+#define INIT(P0, P1, P2, P3, P4, INTRIN) \
+ do { \
+ for (size_t CurType = 0; CurType != NumAtomicIntrinsicOverloadTypes; \
+ ++CurType) { \
+ size_t Param = 0; \
+ I[CurIntrin][CurType].OverloadedType = IT[CurType]; \
+ I[CurIntrin][CurType].ID = Intrinsic::nacl_atomic_##INTRIN; \
+ I[CurIntrin][CurType].Overloaded = \
+ P0 == Int || P0 == Ptr || P1 == Int || P1 == Ptr || P2 == Int || \
+ P2 == Ptr || P3 == Int || P3 == Ptr || P4 == Int || P4 == Ptr; \
+ I[CurIntrin][CurType].NumParams = \
+ (P0 != NoP) + (P1 != NoP) + (P2 != NoP) + (P3 != NoP) + (P4 != NoP); \
+ I[CurIntrin][CurType].ParamType[Param++] = P0; \
+ I[CurIntrin][CurType].ParamType[Param++] = P1; \
+ I[CurIntrin][CurType].ParamType[Param++] = P2; \
+ I[CurIntrin][CurType].ParamType[Param++] = P3; \
+ I[CurIntrin][CurType].ParamType[Param++] = P4; \
+ } \
+ ++CurIntrin; \
+ } while (0)
+
+ INIT(Ptr, Mem, NoP, NoP, NoP, load);
+ INIT(Ptr, Int, Mem, NoP, NoP, store);
+ INIT(RMW, Ptr, Int, Mem, NoP, rmw);
+ INIT(Ptr, Int, Int, Mem, Mem, cmpxchg);
+ INIT(Mem, NoP, NoP, NoP, NoP, fence);
+}
+
+AtomicIntrinsics::View AtomicIntrinsics::allIntrinsicsAndOverloads() const {
+ return View(&I[0][0], NumAtomicIntrinsics * NumAtomicIntrinsicOverloadTypes);
+}
+
+AtomicIntrinsics::View AtomicIntrinsics::overloadsFor(Intrinsic::ID ID) const {
+ // Overloads are stored consecutively.
+ View R = allIntrinsicsAndOverloads();
+ for (const AtomicIntrinsic *AI = R.begin(), *E = R.end(); AI != E; ++AI)
+ if (AI->ID == ID)
+ return View(AI, NumAtomicIntrinsicOverloadTypes);
+ llvm_unreachable("unhandled atomic intrinsic");
+}
+
+const AtomicIntrinsics::AtomicIntrinsic *
+AtomicIntrinsics::find(Intrinsic::ID ID, Type *OverloadedType) const {
+ View R = allIntrinsicsAndOverloads();
+ for (const AtomicIntrinsic *AI = R.begin(), *E = R.end(); AI != E; ++AI)
+ if (AI->ID == ID && AI->OverloadedType == OverloadedType)
+ return AI;
+ llvm_unreachable("unhandled atomic intrinsic");
+}
+
+} // End NaCl namespace
+
+} // End llvm namespace
diff --git a/lib/Target/X86/X86ISelDAGToDAG.cpp b/lib/Target/X86/X86ISelDAGToDAG.cpp
index cfd0d95c77..791b960b0a 100644
--- a/lib/Target/X86/X86ISelDAGToDAG.cpp
+++ b/lib/Target/X86/X86ISelDAGToDAG.cpp
@@ -78,6 +78,24 @@ namespace {
return GV != 0 || CP != 0 || ES != 0 || JT != -1 || BlockAddr != 0;
}
+ // @LOCALMOD-BEGIN
+ /// clearSymbolicDisplacement - Remove all sources of symbolic
+ /// constant displacement from the addressing mode. This is
+ /// needed when the symbolic constant is pulled out of the address
+ /// computation, e.g. when
+ /// ... DISP(%r15,%rax,1) ...
+ /// is replaced with
+ /// lea DISP(%rax),%tmp
+ /// ... (%r15,%tmp,1) ...
+ void clearSymbolicDisplacement() {
+ GV = 0;
+ CP = 0;
+ BlockAddr = 0;
+ ES = 0;
+ JT = -1;
+ }
+ // @LOCALMOD-END
+
bool hasBaseOrIndexReg() const {
return IndexReg.getNode() != 0 || Base_Reg.getNode() != 0;
}
@@ -1646,16 +1664,31 @@ void X86DAGToDAGISel::LegalizeAddressingModeForNaCl(SDValue N,
// Case 2 above comprises two sub-cases:
// sub-case 1: Prevent negative indexes
- bool NeedsFixing1 =
- (AM.BaseType == X86ISelAddressMode::FrameIndexBase || AM.GV || AM.CP) &&
- AM.IndexReg.getNode() &&
- AM.Disp > 0;
- // sub-case 2: Both index and base registers are being used
+ // This is relevant only if there is an index register involved.
+ if (!AM.IndexReg.getNode())
+ return;
+
+ // There are two situations to deal with. The first is as described
+ // above, which is essentially a potentially negative index into an
+ // interior pointer to a stack-allocated structure. The second is a
+ // potentially negative index into an interior pointer to a global
+ // array. In this case, the global array will be a symbolic
+ // displacement. In theory, we could recognize that it is an
+ // interior pointer only when the concrete displacement AM.Disp is
+ // nonzero, but there is at least one test case (aha.c in the LLVM
+ // test suite) that incorrectly uses a negative index to dereference
+ // a global array, so we conservatively apply the translation to all
+ // global dereferences.
+ bool HasSymbolic = AM.hasSymbolicDisplacement();
+ bool NeedsFixing1 = HasSymbolic ||
+ (AM.BaseType == X86ISelAddressMode::FrameIndexBase && AM.Disp > 0);
+
+ // sub-case 2: Both index and base registers are being used. The
+ // test for index register being used is done above, so we only need
+ // to test for a base register being used.
bool NeedsFixing2 =
- (AM.BaseType == X86ISelAddressMode::RegBase) &&
- AM.Base_Reg.getNode() &&
- AM.IndexReg.getNode();
+ (AM.BaseType == X86ISelAddressMode::RegBase) && AM.Base_Reg.getNode();
if (!NeedsFixing1 && !NeedsFixing2)
return;
@@ -1675,8 +1708,18 @@ void X86DAGToDAGISel::LegalizeAddressingModeForNaCl(SDValue N,
NewNodes.push_back(ShlNode.getNode());
NewIndex = ShlNode;
}
- if (AM.Disp > 0) {
- SDValue DispNode = CurDAG->getConstant(AM.Disp, N.getValueType());
+ if (AM.Disp > 0 || HasSymbolic) {
+ // If the addressing mode has a potentially problematic
+ // displacement, we pull it out into a new instruction node. If
+ // it contains a symbolic displacement, we need to express it as a
+ // Wrapper node to make LLVM work.
+ SDValue Base, Scale, Index, Disp, Segment;
+ getAddressOperands(AM, Base, Scale, Index, Disp, Segment);
+ SDValue DispNode;
+ if (HasSymbolic)
+ DispNode = CurDAG->getNode(X86ISD::Wrapper, dl, N.getValueType(), Disp);
+ else
+ DispNode = CurDAG->getConstant(AM.Disp, N.getValueType());
NewNodes.push_back(DispNode.getNode());
SDValue AddNode = CurDAG->getNode(ISD::ADD, dl, N.getValueType(),
@@ -1693,6 +1736,7 @@ void X86DAGToDAGISel::LegalizeAddressingModeForNaCl(SDValue N,
AM.setBaseReg(SDValue());
}
AM.Disp = 0;
+ AM.clearSymbolicDisplacement();
AM.Scale = 1;
AM.IndexReg = NewIndex;
diff --git a/lib/Transforms/NaCl/CMakeLists.txt b/lib/Transforms/NaCl/CMakeLists.txt
index 9cf164926b..e91d79f184 100644
--- a/lib/Transforms/NaCl/CMakeLists.txt
+++ b/lib/Transforms/NaCl/CMakeLists.txt
@@ -12,15 +12,16 @@ add_llvm_library(LLVMNaClTransforms
ExpandTlsConstantExpr.cpp
ExpandUtils.cpp
ExpandVarArgs.cpp
- InsertDivideCheck.cpp
FlattenGlobals.cpp
GlobalCleanup.cpp
+ InsertDivideCheck.cpp
PNaClABISimplify.cpp
PromoteI1Ops.cpp
PromoteIntegers.cpp
ReplacePtrsWithInts.cpp
- RewriteLLVMIntrinsics.cpp
ResolvePNaClIntrinsics.cpp
+ RewriteAtomics.cpp
+ RewriteLLVMIntrinsics.cpp
RewritePNaClLibraryCalls.cpp
StripAttributes.cpp
StripMetadata.cpp
diff --git a/lib/Transforms/NaCl/PNaClABISimplify.cpp b/lib/Transforms/NaCl/PNaClABISimplify.cpp
index f8f78135a8..189d87f2cb 100644
--- a/lib/Transforms/NaCl/PNaClABISimplify.cpp
+++ b/lib/Transforms/NaCl/PNaClABISimplify.cpp
@@ -94,6 +94,8 @@ void llvm::PNaClABISimplifyAddPostOptPasses(PassManager &PM) {
// ExpandGetElementPtr must follow ExpandConstantExpr to expand the
// getelementptr instructions it creates.
PM.add(createExpandGetElementPtrPass());
+ // Rewrite atomic and volatile instructions with intrinsic calls.
+ PM.add(createRewriteAtomicsPass());
// ReplacePtrsWithInts assumes that getelementptr instructions and
// ConstantExprs have already been expanded out.
PM.add(createReplacePtrsWithIntsPass());
diff --git a/lib/Transforms/NaCl/ReplacePtrsWithInts.cpp b/lib/Transforms/NaCl/ReplacePtrsWithInts.cpp
index 0a00fe4361..ae6015b54e 100644
--- a/lib/Transforms/NaCl/ReplacePtrsWithInts.cpp
+++ b/lib/Transforms/NaCl/ReplacePtrsWithInts.cpp
@@ -486,19 +486,19 @@ static void ConvertInstruction(DataLayout *DL, Type *IntPtrType,
Value *Alloca2 = new PtrToIntInst(Tmp, IntPtrType,
Tmp->getName() + ".asint", Inst);
FC->recordConvertedAndErase(Alloca, Alloca2);
- } else if (// These atomics only operate on integer pointers, not
- // other pointers, so we don't need to recreate the
- // instruction.
- isa<AtomicCmpXchgInst>(Inst) ||
- isa<AtomicRMWInst>(Inst) ||
- // Handle these instructions as a convenience to allow
+ } else if (// Handle these instructions as a convenience to allow
// the pass to be used in more situations, even though we
// don't expect them in PNaCl's stable ABI.
isa<GetElementPtrInst>(Inst) ||
isa<VAArgInst>(Inst) ||
isa<IndirectBrInst>(Inst) ||
isa<ExtractValueInst>(Inst) ||
- isa<InsertValueInst>(Inst)) {
+ isa<InsertValueInst>(Inst) ||
+ // These atomics only operate on integer pointers, not
+ // other pointers, so we don't need to recreate the
+ // instruction.
+ isa<AtomicCmpXchgInst>(Inst) ||
+ isa<AtomicRMWInst>(Inst)) {
FC->convertInPlace(Inst);
}
}
diff --git a/lib/Transforms/NaCl/ResolvePNaClIntrinsics.cpp b/lib/Transforms/NaCl/ResolvePNaClIntrinsics.cpp
index e4efeb67c3..9b526a102a 100644
--- a/lib/Transforms/NaCl/ResolvePNaClIntrinsics.cpp
+++ b/