diff options
author | JF Bastien <jfb@chromium.org> | 2013-07-13 13:29:35 -0700 |
---|---|---|
committer | JF Bastien <jfb@chromium.org> | 2013-07-13 13:29:35 -0700 |
commit | 4c1316ea42eb48ec8da6753f3e0319b676e50a75 (patch) | |
tree | 7ad96b9cafa6c054d79d91b16e89f89b1eaee2c8 /lib/Analysis | |
parent | c75199c649c739aade160289d93f257edc798cde (diff) |
Concurrency support for PNaCl ABI
Add portable support for concurrency in PNaCl's ABI:
- Promote volatile to atomic.
- Promote all memory ordering to sequential consistency.
- Rewrite all atomic operations to frozen NaCl intrinsics for pexe.
- Rewrite atomic intrinsics to LLVM instructions for translation.
This change also adds documentation to the PNaCl language reference, as
well as tests where it makes sense.
A future CL could clean up more of our code which mentions atomics,
volatiles, memory orderings.
Multiple reviewers because this is a big patch:
- eliben: LLVM-fu and ResolvePNaClIntrinsics.
- dschuff: ABI stability.
- mseaborn: ABI stability.
- sehr: Tron-duty (fight for the user's programs to work).
BUG= https://code.google.com/p/nativeclient/issues/detail?id=3475
R=dschuff@chromium.org, eliben@chromium.org, sehr@google.com
TEST= (cd ./pnacl/build/llvm_x86_64; ninja check-all) && ./pnacl/test.sh test-x86-32 && ./pnacl/test.sh test-x86-64 && ./pnacl/test.sh test-arm && ./pnacl/test.sh test-x86-32-sbtc && ./pnacl/test.sh test-x86-64-sbtc && ./pnacl/test.sh test-arm-sbtc
Review URL: https://codereview.chromium.org/17777004
Diffstat (limited to 'lib/Analysis')
-rw-r--r-- | lib/Analysis/NaCl/PNaClABIVerifyFunctions.cpp | 119 | ||||
-rw-r--r-- | lib/Analysis/NaCl/PNaClABIVerifyModule.cpp | 16 |
2 files changed, 109 insertions, 26 deletions
diff --git a/lib/Analysis/NaCl/PNaClABIVerifyFunctions.cpp b/lib/Analysis/NaCl/PNaClABIVerifyFunctions.cpp index 80d7da3f19..5318fc8af0 100644 --- a/lib/Analysis/NaCl/PNaClABIVerifyFunctions.cpp +++ b/lib/Analysis/NaCl/PNaClABIVerifyFunctions.cpp @@ -12,6 +12,7 @@ // //===----------------------------------------------------------------------===// +#include "llvm/ADT/OwningPtr.h" #include "llvm/ADT/Twine.h" #include "llvm/Analysis/NaCl.h" #include "llvm/IR/Function.h" @@ -19,6 +20,8 @@ #include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/LLVMContext.h" #include "llvm/IR/Metadata.h" +#include "llvm/IR/Module.h" +#include "llvm/IR/NaClAtomicIntrinsics.h" #include "llvm/IR/Operator.h" #include "llvm/Pass.h" #include "llvm/Support/raw_ostream.h" @@ -49,6 +52,10 @@ class PNaClABIVerifyFunctions : public FunctionPass { if (ReporterIsOwned) delete Reporter; } + virtual bool doInitialization(Module &M) { + AtomicIntrinsics.reset(new NaCl::AtomicIntrinsics(M.getContext())); + return false; + } bool runOnFunction(Function &F); virtual void print(raw_ostream &O, const Module *M) const; private: @@ -56,6 +63,7 @@ class PNaClABIVerifyFunctions : public FunctionPass { const char *checkInstruction(const Instruction *Inst); PNaClABIErrorReporter *Reporter; bool ReporterIsOwned; + OwningPtr<NaCl::AtomicIntrinsics> AtomicIntrinsics; }; } // and anonymous namespace @@ -144,16 +152,7 @@ static bool isValidScalarOperand(const Value *Val) { isa<UndefValue>(Val)); } -static bool isAllowedAlignment(unsigned Alignment, Type *Ty, bool IsAtomic) { - if (IsAtomic) { - // For atomic operations, the alignment must match the size of the type. - if (Ty->isIntegerTy()) { - unsigned Bits = Ty->getIntegerBitWidth(); - return Bits % 8 == 0 && Alignment == Bits / 8; - } - return (Ty->isDoubleTy() && Alignment == 8) || - (Ty->isFloatTy() && Alignment == 4); - } +static bool isAllowedAlignment(unsigned Alignment, Type *Ty) { // Non-atomic integer operations must always use "align 1", since we // do not want the backend to generate code with non-portable // undefined behaviour (such as misaligned access faults) if user @@ -169,6 +168,51 @@ static bool isAllowedAlignment(unsigned Alignment, Type *Ty, bool IsAtomic) { (Ty->isFloatTy() && Alignment == 4); } +static bool hasAllowedAtomicRMWOperation( + const NaCl::AtomicIntrinsics::AtomicIntrinsic *I, const CallInst *Call) { + for (size_t P = 0; P != I->NumParams; ++P) { + if (I->ParamType[P] != NaCl::AtomicIntrinsics::RMW) + continue; + + const Value *Operation = Call->getOperand(P); + if (!Operation) + return false; + const Constant *C = dyn_cast<Constant>(Operation); + if (!C) + return false; + const APInt &I = C->getUniqueInteger(); + if (I.ule(NaCl::AtomicInvalid) || I.uge(NaCl::AtomicNum)) + return false; + } + return true; +} + +static bool hasAllowedAtomicMemoryOrder( + const NaCl::AtomicIntrinsics::AtomicIntrinsic *I, const CallInst *Call) { + for (size_t P = 0; P != I->NumParams; ++P) { + if (I->ParamType[P] != NaCl::AtomicIntrinsics::Mem) + continue; + + const Value *MemoryOrder = Call->getOperand(P); + if (!MemoryOrder) + return false; + const Constant *C = dyn_cast<Constant>(MemoryOrder); + if (!C) + return false; + const APInt &I = C->getUniqueInteger(); + if (I.ule(NaCl::MemoryOrderInvalid) || I.uge(NaCl::MemoryOrderNum)) + return false; + // TODO For now only sequential consistency is allowed. When more + // are allowed we need to validate that the memory order is + // allowed on the specific atomic operation (e.g. no store + // acquire, and relationship between success/failure memory + // order on compare exchange). + if (I != NaCl::MemoryOrderSequentiallyConsistent) + return false; + } + return true; +} + // Check the instruction's opcode and its operands. The operands may // require opcode-specific checking. // @@ -198,6 +242,10 @@ const char *PNaClABIVerifyFunctions::checkInstruction(const Instruction *Inst) { // ExtractValue and InsertValue operate on struct values. case Instruction::ExtractValue: case Instruction::InsertValue: + // Atomics should become NaCl intrinsics. + case Instruction::AtomicCmpXchg: + case Instruction::AtomicRMW: + case Instruction::Fence: return "bad instruction opcode"; default: return "unknown instruction opcode"; @@ -216,8 +264,6 @@ const char *PNaClABIVerifyFunctions::checkInstruction(const Instruction *Inst) { case Instruction::And: case Instruction::Or: case Instruction::Xor: - // Memory instructions - case Instruction::Fence: // Conversion operations case Instruction::Trunc: case Instruction::ZExt: @@ -256,32 +302,32 @@ const char *PNaClABIVerifyFunctions::checkInstruction(const Instruction *Inst) { // Memory accesses. case Instruction::Load: { const LoadInst *Load = cast<LoadInst>(Inst); + PtrOperandIndex = Load->getPointerOperandIndex(); + if (Load->isAtomic()) + return "atomic load"; + if (Load->isVolatile()) + return "volatile load"; if (!isAllowedAlignment(Load->getAlignment(), - Load->getType(), - Load->isAtomic())) + Load->getType())) return "bad alignment"; - PtrOperandIndex = 0; if (!isNormalizedPtr(Inst->getOperand(PtrOperandIndex))) return "bad pointer"; break; } case Instruction::Store: { const StoreInst *Store = cast<StoreInst>(Inst); + PtrOperandIndex = Store->getPointerOperandIndex(); + if (Store->isAtomic()) + return "atomic store"; + if (Store->isVolatile()) + return "volatile store"; if (!isAllowedAlignment(Store->getAlignment(), - Store->getValueOperand()->getType(), - Store->isAtomic())) + Store->getValueOperand()->getType())) return "bad alignment"; - PtrOperandIndex = 1; if (!isNormalizedPtr(Inst->getOperand(PtrOperandIndex))) return "bad pointer"; break; } - case Instruction::AtomicCmpXchg: - case Instruction::AtomicRMW: - PtrOperandIndex = 0; - if (!isNormalizedPtr(Inst->getOperand(PtrOperandIndex))) - return "bad pointer"; - break; // Casts. case Instruction::BitCast: @@ -332,6 +378,7 @@ const char *PNaClABIVerifyFunctions::checkInstruction(const Instruction *Inst) { isa<MDNode>(Arg))) return "bad intrinsic operand"; } + // Disallow alignments other than 1 on memcpy() etc., for the // same reason that we disallow them on integer loads and // stores. @@ -344,6 +391,30 @@ const char *PNaClABIVerifyFunctions::checkInstruction(const Instruction *Inst) { return "bad alignment"; } } + + // Disallow NaCl atomic intrinsics which don't have valid + // constant NaCl::AtomicOperation and NaCl::MemoryOrder + // parameters. + switch (Call->getIntrinsicID()) { + default: break; // Non-atomic intrinsic. + case Intrinsic::nacl_atomic_load: + case Intrinsic::nacl_atomic_store: + case Intrinsic::nacl_atomic_rmw: + case Intrinsic::nacl_atomic_cmpxchg: + case Intrinsic::nacl_atomic_fence: { + // All overloads have memory order and RMW operation in the + // same parameter, arbitrarily use the I32 overload. + Type *T = Type::getInt32Ty( + Inst->getParent()->getParent()->getContext()); + const NaCl::AtomicIntrinsics::AtomicIntrinsic *I = + AtomicIntrinsics->find(Call->getIntrinsicID(), T); + if (!hasAllowedAtomicMemoryOrder(I, Call)) + return "invalid memory order"; + if (!hasAllowedAtomicRMWOperation(I, Call)) + return "invalid atomicRMW operation"; + } break; + } + // Allow the instruction and skip the later checks. return NULL; } diff --git a/lib/Analysis/NaCl/PNaClABIVerifyModule.cpp b/lib/Analysis/NaCl/PNaClABIVerifyModule.cpp index 17852ebbef..a418246bae 100644 --- a/lib/Analysis/NaCl/PNaClABIVerifyModule.cpp +++ b/lib/Analysis/NaCl/PNaClABIVerifyModule.cpp @@ -13,14 +13,16 @@ // //===----------------------------------------------------------------------===// -#include "llvm/Pass.h" +#include "llvm/ADT/STLExtras.h" #include "llvm/ADT/StringMap.h" #include "llvm/ADT/Twine.h" #include "llvm/Analysis/NaCl.h" #include "llvm/IR/Constants.h" #include "llvm/IR/DerivedTypes.h" +#include "llvm/IR/Instructions.h" #include "llvm/IR/Intrinsics.h" #include "llvm/IR/Module.h" +#include "llvm/Pass.h" #include "llvm/Support/Debug.h" #include "llvm/Support/raw_ostream.h" @@ -173,6 +175,7 @@ void PNaClABIVerifyModule::checkGlobalValueCommon(const GlobalValue *GV) { AllowedIntrinsics::AllowedIntrinsics(LLVMContext *Context) : Context(Context) { Type *I8Ptr = Type::getInt8PtrTy(*Context); + Type *I8 = Type::getInt8Ty(*Context); Type *I16 = Type::getInt16Ty(*Context); Type *I32 = Type::getInt32Ty(*Context); Type *I64 = Type::getInt64Ty(*Context); @@ -203,6 +206,15 @@ AllowedIntrinsics::AllowedIntrinsics(LLVMContext *Context) : Context(Context) { addIntrinsic(Intrinsic::sqrt, Float); addIntrinsic(Intrinsic::sqrt, Double); + Type *AtomicTypes[] = { I8, I16, I32, I64 }; + for (size_t T = 0, E = array_lengthof(AtomicTypes); T != E; ++T) { + addIntrinsic(Intrinsic::nacl_atomic_load, AtomicTypes[T]); + addIntrinsic(Intrinsic::nacl_atomic_store, AtomicTypes[T]); + addIntrinsic(Intrinsic::nacl_atomic_rmw, AtomicTypes[T]); + addIntrinsic(Intrinsic::nacl_atomic_cmpxchg, AtomicTypes[T]); + } + addIntrinsic(Intrinsic::nacl_atomic_fence); + // Stack save and restore are used to support C99 VLAs. addIntrinsic(Intrinsic::stacksave); addIntrinsic(Intrinsic::stackrestore); @@ -221,7 +233,7 @@ AllowedIntrinsics::AllowedIntrinsics(LLVMContext *Context) : Context(Context) { bool AllowedIntrinsics::isAllowed(const Function *Func) { // Keep 3 categories of intrinsics for now. // (1) Allowed always, provided the exact name and type match. - // (2) Never allowed + // (2) Never allowed. // (3) "Dev" intrinsics, which may or may not be allowed. // "Dev" intrinsics are controlled by the PNaClABIAllowDevIntrinsics flag. // Please keep these sorted or grouped in a sensible way, within |