diff options
27 files changed, 927 insertions, 2 deletions
diff --git a/include/clang/AST/Expr.h b/include/clang/AST/Expr.h index 882124bfdd..d7b6247b63 100644 --- a/include/clang/AST/Expr.h +++ b/include/clang/AST/Expr.h @@ -4162,6 +4162,152 @@ public: // Iterators child_range children() { return child_range(&SrcExpr, &SrcExpr+1); } }; + +/// AtomicExpr - Variadic atomic builtins: __atomic_exchange, __atomic_fetch_*, +/// __atomic_load, __atomic_store, and __atomic_compare_exchange_*, for the +/// similarly-named C++0x instructions. All of these instructions take one +/// primary pointer and at least one memory order. +class AtomicExpr : public Expr { +public: + enum AtomicOp { Load, Store, CmpXchgStrong, CmpXchgWeak, Xchg, + Add, Sub, And, Or, Xor }; +private: + enum { PTR, ORDER, VAL1, ORDER_FAIL, VAL2, END_EXPR }; + Stmt* SubExprs[END_EXPR]; + unsigned NumSubExprs; + SourceLocation BuiltinLoc, RParenLoc; + AtomicOp Op; + +public: + // Constructor for Load + AtomicExpr(SourceLocation BLoc, Expr *ptr, Expr *order, QualType t, + AtomicOp op, SourceLocation RP, + bool TypeDependent, bool ValueDependent) + : Expr(AtomicExprClass, t, VK_RValue, OK_Ordinary, + TypeDependent, ValueDependent, + ptr->isInstantiationDependent(), + ptr->containsUnexpandedParameterPack()), + BuiltinLoc(BLoc), RParenLoc(RP), Op(op) { + assert(op == Load && "single-argument atomic must be load"); + SubExprs[PTR] = ptr; + SubExprs[ORDER] = order; + NumSubExprs = 2; + } + + // Constructor for Store, Xchg, Add, Sub, And, Or, Xor + AtomicExpr(SourceLocation BLoc, Expr *ptr, Expr *val, Expr *order, + QualType t, AtomicOp op, SourceLocation RP, + bool TypeDependent, bool ValueDependent) + : Expr(AtomicExprClass, t, VK_RValue, OK_Ordinary, + TypeDependent, ValueDependent, + (ptr->isInstantiationDependent() || + val->isInstantiationDependent()), + (ptr->containsUnexpandedParameterPack() || + val->containsUnexpandedParameterPack())), + BuiltinLoc(BLoc), RParenLoc(RP), Op(op) { + assert(!isCmpXChg() && op != Load && + "two-argument atomic store or binop"); + SubExprs[PTR] = ptr; + SubExprs[ORDER] = order; + SubExprs[VAL1] = val; + NumSubExprs = 3; + } + + // Constructor for CmpXchgStrong, CmpXchgWeak + AtomicExpr(SourceLocation BLoc, Expr *ptr, Expr *val1, Expr *val2, + Expr *order, Expr *order_fail, QualType t, AtomicOp op, + SourceLocation RP, bool TypeDependent, bool ValueDependent) + : Expr(AtomicExprClass, t, VK_RValue, OK_Ordinary, + TypeDependent, ValueDependent, + (ptr->isInstantiationDependent() || + val1->isInstantiationDependent() || + val2->isInstantiationDependent()), + (ptr->containsUnexpandedParameterPack() || + val1->containsUnexpandedParameterPack() || + val2->containsUnexpandedParameterPack())), + BuiltinLoc(BLoc), RParenLoc(RP), Op(op) { + assert(isCmpXChg() && "three-argument atomic must be cmpxchg"); + SubExprs[PTR] = ptr; + SubExprs[ORDER] = order; + SubExprs[VAL1] = val1; + SubExprs[VAL2] = val2; + SubExprs[ORDER_FAIL] = order_fail; + NumSubExprs = 5; + } + + /// \brief Build an empty AtomicExpr. + explicit AtomicExpr(EmptyShell Empty) : Expr(AtomicExprClass, Empty) { } + + Expr *getPtr() const { + return cast<Expr>(SubExprs[PTR]); + } + void setPtr(Expr *E) { + SubExprs[PTR] = E; + } + Expr *getOrder() const { + return cast<Expr>(SubExprs[ORDER]); + } + void setOrder(Expr *E) { + SubExprs[ORDER] = E; + } + Expr *getVal1() const { + assert(NumSubExprs >= 3); + return cast<Expr>(SubExprs[VAL1]); + } + void setVal1(Expr *E) { + assert(NumSubExprs >= 3); + SubExprs[VAL1] = E; + } + Expr *getOrderFail() const { + assert(NumSubExprs == 5); + return cast<Expr>(SubExprs[ORDER_FAIL]); + } + void setOrderFail(Expr *E) { + assert(NumSubExprs == 5); + SubExprs[ORDER_FAIL] = E; + } + Expr *getVal2() const { + assert(NumSubExprs == 5); + return cast<Expr>(SubExprs[VAL2]); + } + void setVal2(Expr *E) { + assert(NumSubExprs == 5); + SubExprs[VAL2] = E; + } + + AtomicOp getOp() const { return Op; } + void setOp(AtomicOp op) { Op = op; } + unsigned getNumSubExprs() { return NumSubExprs; } + void setNumSubExprs(unsigned num) { NumSubExprs = num; } + + bool isVolatile() const { + return getPtr()->getType()->getPointeeType().isVolatileQualified(); + } + + bool isCmpXChg() const { + return getOp() == AtomicExpr::CmpXchgStrong || + getOp() == AtomicExpr::CmpXchgWeak; + } + + SourceLocation getBuiltinLoc() const { return BuiltinLoc; } + void setBuiltinLoc(SourceLocation L) { BuiltinLoc = L; } + + SourceLocation getRParenLoc() const { return RParenLoc; } + void setRParenLoc(SourceLocation L) { RParenLoc = L; } + + SourceRange getSourceRange() const { + return SourceRange(BuiltinLoc, RParenLoc); + } + static bool classof(const Stmt *T) { + return T->getStmtClass() == AtomicExprClass; + } + static bool classof(const AtomicExpr *) { return true; } + + // Iterators + child_range children() { + return child_range(SubExprs, SubExprs+NumSubExprs); + } +}; } // end namespace clang #endif diff --git a/include/clang/AST/RecursiveASTVisitor.h b/include/clang/AST/RecursiveASTVisitor.h index a224bb0a40..0ec09c9b09 100644 --- a/include/clang/AST/RecursiveASTVisitor.h +++ b/include/clang/AST/RecursiveASTVisitor.h @@ -1992,6 +1992,7 @@ DEF_TRAVERSE_STMT(SizeOfPackExpr, { }) DEF_TRAVERSE_STMT(SubstNonTypeTemplateParmPackExpr, { }) DEF_TRAVERSE_STMT(SubstNonTypeTemplateParmExpr, { }) DEF_TRAVERSE_STMT(MaterializeTemporaryExpr, { }) +DEF_TRAVERSE_STMT(AtomicExpr, { }) // These literals (all of them) do not need any action. DEF_TRAVERSE_STMT(IntegerLiteral, { }) diff --git a/include/clang/Basic/Builtins.def b/include/clang/Basic/Builtins.def index 50b51c7a4f..60bcde372a 100644 --- a/include/clang/Basic/Builtins.def +++ b/include/clang/Basic/Builtins.def @@ -585,7 +585,18 @@ BUILTIN(__sync_swap_4, "iiD*i.", "n") BUILTIN(__sync_swap_8, "LLiLLiD*LLi.", "n") BUILTIN(__sync_swap_16, "LLLiLLLiD*LLLi.", "n") - +BUILTIN(__atomic_load, "v.", "t") +BUILTIN(__atomic_store, "v.", "t") +BUILTIN(__atomic_exchange, "v.", "t") +BUILTIN(__atomic_compare_exchange_strong, "v.", "t") +BUILTIN(__atomic_compare_exchange_weak, "v.", "t") +BUILTIN(__atomic_fetch_add, "v.", "t") +BUILTIN(__atomic_fetch_sub, "v.", "t") +BUILTIN(__atomic_fetch_and, "v.", "t") +BUILTIN(__atomic_fetch_or, "v.", "t") +BUILTIN(__atomic_fetch_xor, "v.", "t") +BUILTIN(__atomic_thread_fence, "vi", "t") +BUILTIN(__atomic_signal_fence, "vi", "t") // Non-overloaded atomic builtins. BUILTIN(__sync_synchronize, "v.", "n") diff --git a/include/clang/Basic/DiagnosticSemaKinds.td b/include/clang/Basic/DiagnosticSemaKinds.td index a1be656fac..240401b393 100644 --- a/include/clang/Basic/DiagnosticSemaKinds.td +++ b/include/clang/Basic/DiagnosticSemaKinds.td @@ -4012,6 +4012,15 @@ def err_atomic_builtin_must_be_pointer_intptr : Error< def err_atomic_builtin_pointer_size : Error< "first argument to atomic builtin must be a pointer to 1,2,4,8 or 16 byte " "type (%0 invalid)">; +def err_atomic_op_needs_atomic : Error< + "first argument to atomic operation must be a pointer to _Atomic " + "type (%0 invalid)">; +def err_atomic_op_needs_atomic_int_or_ptr : Error< + "first argument to atomic operation must be a pointer to atomic " + "integer or pointer (%0 invalid)">; +def err_atomic_op_logical_needs_atomic_int : Error< + "first argument to logical atomic operation must be a pointer to atomic " + "integer (%0 invalid)">; def err_deleted_function_use : Error<"attempt to use a deleted function">; diff --git a/include/clang/Basic/StmtNodes.td b/include/clang/Basic/StmtNodes.td index 73996e43d5..7b3d7762c2 100644 --- a/include/clang/Basic/StmtNodes.td +++ b/include/clang/Basic/StmtNodes.td @@ -78,6 +78,9 @@ def ParenListExpr : DStmt<Expr>; def VAArgExpr : DStmt<Expr>; def GenericSelectionExpr : DStmt<Expr>; +// Atomic expressions +def AtomicExpr : DStmt<Expr>; + // GNU Extensions. def AddrLabelExpr : DStmt<Expr>; def StmtExpr : DStmt<Expr>; diff --git a/include/clang/Sema/Sema.h b/include/clang/Sema/Sema.h index 39c9899761..fec14c5826 100644 --- a/include/clang/Sema/Sema.h +++ b/include/clang/Sema/Sema.h @@ -6084,6 +6084,8 @@ private: bool SemaBuiltinObjectSize(CallExpr *TheCall); bool SemaBuiltinLongjmp(CallExpr *TheCall); ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult); + ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult, + AtomicExpr::AtomicOp Op); bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum, llvm::APSInt &Result); diff --git a/include/clang/Serialization/ASTBitCodes.h b/include/clang/Serialization/ASTBitCodes.h index 9a8eba2427..1efd754217 100644 --- a/include/clang/Serialization/ASTBitCodes.h +++ b/include/clang/Serialization/ASTBitCodes.h @@ -973,7 +973,9 @@ namespace clang { EXPR_BLOCK_DECL_REF, /// \brief A GenericSelectionExpr record. EXPR_GENERIC_SELECTION, - + /// \brief An AtomicExpr record. + EXPR_ATOMIC, + // Objective-C /// \brief An ObjCStringLiteral record. diff --git a/lib/AST/Expr.cpp b/lib/AST/Expr.cpp index dc37ac9226..465b490d47 100644 --- a/lib/AST/Expr.cpp +++ b/lib/AST/Expr.cpp @@ -1538,6 +1538,7 @@ bool Expr::isUnusedResultAWarning(SourceLocation &Loc, SourceRange &R1, } case CompoundAssignOperatorClass: case VAArgExprClass: + case AtomicExprClass: return false; case ConditionalOperatorClass: { diff --git a/lib/AST/ExprClassification.cpp b/lib/AST/ExprClassification.cpp index 624e9d2944..49c68213aa 100644 --- a/lib/AST/ExprClassification.cpp +++ b/lib/AST/ExprClassification.cpp @@ -162,6 +162,7 @@ static Cl::Kinds ClassifyInternal(ASTContext &Ctx, const Expr *E) { case Expr::SubstNonTypeTemplateParmPackExprClass: case Expr::AsTypeExprClass: case Expr::ObjCIndirectCopyRestoreExprClass: + case Expr::AtomicExprClass: return Cl::CL_PRValue; // Next come the complicated cases. diff --git a/lib/AST/ExprConstant.cpp b/lib/AST/ExprConstant.cpp index c0f913d7ed..85cb40f9e0 100644 --- a/lib/AST/ExprConstant.cpp +++ b/lib/AST/ExprConstant.cpp @@ -2857,6 +2857,7 @@ static ICEDiag CheckICE(const Expr* E, ASTContext &Ctx) { case Expr::AsTypeExprClass: case Expr::ObjCIndirectCopyRestoreExprClass: case Expr::MaterializeTemporaryExprClass: + case Expr::AtomicExprClass: return ICEDiag(2, E->getLocStart()); case Expr::InitListExprClass: diff --git a/lib/AST/ItaniumMangle.cpp b/lib/AST/ItaniumMangle.cpp index 2b93250fad..d4ac7229b2 100644 --- a/lib/AST/ItaniumMangle.cpp +++ b/lib/AST/ItaniumMangle.cpp @@ -2255,6 +2255,7 @@ recurse: case Expr::CXXNoexceptExprClass: case Expr::CUDAKernelCallExprClass: case Expr::AsTypeExprClass: + case Expr::AtomicExprClass: { // As bad as this diagnostic is, it's better than crashing. DiagnosticsEngine &Diags = Context.getDiags(); diff --git a/lib/AST/StmtPrinter.cpp b/lib/AST/StmtPrinter.cpp index dfa2612152..78d9a596fb 100644 --- a/lib/AST/StmtPrinter.cpp +++ b/lib/AST/StmtPrinter.cpp @@ -1011,6 +1011,59 @@ void StmtPrinter::VisitVAArgExpr(VAArgExpr *Node) { OS << ")"; } +void StmtPrinter::VisitAtomicExpr(AtomicExpr *Node) { + const char *Name; + switch (Node->getOp()) { + case AtomicExpr::Load: + Name = "__atomic_load("; + break; + case AtomicExpr::Store: + Name = "__atomic_store("; + break; + case AtomicExpr::CmpXchgStrong: + Name = "__atomic_compare_exchange_strong("; + break; + case AtomicExpr::CmpXchgWeak: + Name = "__atomic_compare_exchange_weak("; + break; + case AtomicExpr::Xchg: + Name = "__atomic_exchange("; + break; + case AtomicExpr::Add: + Name = "__atomic_fetch_add("; + break; + case AtomicExpr::Sub: + Name = "__atomic_fetch_sub("; + break; + case AtomicExpr::And: + Name = "__atomic_fetch_and("; + break; + case AtomicExpr::Or: + Name = "__atomic_fetch_or("; + break; + case AtomicExpr::Xor: + Name = "__atomic_fetch_xor("; + break; + } + OS << Name; + PrintExpr(Node->getPtr()); + OS << ", "; + if (Node->getOp() != AtomicExpr::Load) { + PrintExpr(Node->getVal1()); + OS << ", "; + } + if (Node->isCmpXChg()) { + PrintExpr(Node->getVal2()); + OS << ", "; + } + PrintExpr(Node->getOrder()); + if (Node->isCmpXChg()) { + OS << ", "; + PrintExpr(Node->getOrderFail()); + } + OS << ")"; +} + // C++ void StmtPrinter::VisitCXXOperatorCallExpr(CXXOperatorCallExpr *Node) { const char *OpStrings[NUM_OVERLOADED_OPERATORS] = { diff --git a/lib/AST/StmtProfile.cpp b/lib/AST/StmtProfile.cpp index 12321ef0d6..df49e843f9 100644 --- a/lib/AST/StmtProfile.cpp +++ b/lib/AST/StmtProfile.cpp @@ -468,6 +468,10 @@ void StmtProfiler::VisitGenericSelectionExpr(const GenericSelectionExpr *S) { } } +void StmtProfiler::VisitAtomicExpr(const AtomicExpr *S) { + VisitExpr(S); +} + static Stmt::StmtClass DecodeOperatorCall(const CXXOperatorCallExpr *S, UnaryOperatorKind &UnaryOp, BinaryOperatorKind &BinaryOp) { diff --git a/lib/CodeGen/CGBuiltin.cpp b/lib/CodeGen/CGBuiltin.cpp index f7179befeb..ec0ca42422 100644 --- a/lib/CodeGen/CGBuiltin.cpp +++ b/lib/CodeGen/CGBuiltin.cpp @@ -948,6 +948,72 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD, return RValue::get(0); } + case Builtin::BI__atomic_thread_fence: + case Builtin::BI__atomic_signal_fence: { + llvm::SynchronizationScope Scope; + if (BuiltinID == Builtin::BI__atomic_signal_fence) + Scope = llvm::SingleThread; + else + Scope = llvm::CrossThread; + Value *Order = EmitScalarExpr(E->getArg(0)); + if (isa<llvm::ConstantInt>(Order)) { + int ord = cast<llvm::ConstantInt>(Order)->getZExtValue(); + switch (ord) { + case 0: // memory_order_relaxed + default: // invalid order + break; + case 1: // memory_order_consume + case 2: // memory_order_acquire + Builder.CreateFence(llvm::Acquire, Scope); + break; + case 3: // memory_order_release + Builder.CreateFence(llvm::Release, Scope); + break; + case 4: // memory_order_acq_rel + Builder.CreateFence(llvm::AcquireRelease, Scope); + break; + case 5: // memory_order_seq_cst + Builder.CreateFence(llvm::SequentiallyConsistent, Scope); + break; + } + return RValue::get(0); + } + + llvm::BasicBlock *AcquireBB, *ReleaseBB, *AcqRelBB, *SeqCstBB; + AcquireBB = createBasicBlock("acquire", CurFn); + ReleaseBB = createBasicBlock("release", CurFn); + AcqRelBB = createBasicBlock("acqrel", CurFn); + SeqCstBB = createBasicBlock("seqcst", CurFn); + llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn); + + Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false); + llvm::SwitchInst *SI = Builder.CreateSwitch(Order, ContBB); + + Builder.SetInsertPoint(AcquireBB); + Builder.CreateFence(llvm::Acquire, Scope); + Builder.CreateBr(ContBB); + SI->addCase(Builder.getInt32(1), AcquireBB); + SI->addCase(Builder.getInt32(2), AcquireBB); + + Builder.SetInsertPoint(ReleaseBB); + Builder.CreateFence(llvm::Release, Scope); + Builder.CreateBr(ContBB); + SI->addCase(Builder.getInt32(3), ReleaseBB); + + Builder.SetInsertPoint(AcqRelBB); + Builder.CreateFence(llvm::AcquireRelease, Scope); + Builder.CreateBr(ContBB); + SI->addCase(Builder.getInt32(4), AcqRelBB); + + Builder.SetInsertPoint(SeqCstBB); + Builder.CreateFence(llvm::SequentiallyConsistent, Scope); + Builder.CreateBr(ContBB); + SI->addCase(Builder.getInt32(5), SeqCstBB); + + Builder.SetInsertPoint(ContBB); + return RValue::get(0); + } + // Library functions with special handling. case Builtin::BIsqrt: case Builtin::BIsqrtf: diff --git a/lib/CodeGen/CGExpr.cpp b/lib/CodeGen/CGExpr.cpp index b242061cf1..cb60df181d 100644 --- a/lib/CodeGen/CGExpr.cpp +++ b/lib/CodeGen/CGExpr.cpp @@ -2478,3 +2478,280 @@ EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E) { return MakeAddrLValue(AddV, MPT->getPointeeType()); } + +static void +EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, llvm::Value *Dest, + llvm::Value *Ptr, llvm::Value *Val1, llvm::Value *Val2, + uint64_t Size, unsigned Align, llvm::AtomicOrdering Order) { + if (E->isCmpXChg()) { + // Note that cmpxchg only supports specifying one ordering and + // doesn't support weak cmpxchg, at least at the moment. + llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1); + LoadVal1->setAlignment(Align); + llvm::LoadInst *LoadVal2 = CGF.Builder.CreateLoad(Val2); + LoadVal2->setAlignment(Align); + llvm::AtomicCmpXchgInst *CXI = + CGF.Builder.CreateAtomicCmpXchg(Ptr, LoadVal1, LoadVal2, Order); + CXI->setVolatile(E->isVolatile()); + llvm::StoreInst *StoreVal1 = CGF.Builder.CreateStore(CXI, Val1); + StoreVal1->setAlignment(Align); + llvm::Value *Cmp = CGF.Builder.CreateICmpEQ(CXI, LoadVal1); + CGF.EmitStoreOfScalar(Cmp, CGF.MakeAddrLValue(Dest, E->getType())); + return; + } + + if (E->getOp() == AtomicExpr::Load) { + llvm::LoadInst *Load = CGF.Builder.CreateLoad(Ptr); + Load->setAtomic(Order); + Load->setAlignment(Size); + Load->setVolatile(E->isVolatile()); + llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(Load, Dest); + StoreDest->setAlignment(Align); + return; + } + + if (E->getOp() == AtomicExpr::Store) { + assert(!Dest && "Store does not return a value"); + llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1); + LoadVal1->setAlignment(Align); + llvm::StoreInst *Store = CGF.Builder.CreateStore(LoadVal1, Ptr); + Store->setAtomic(Order); + Store->setAlignment(Size); + Store->setVolatile(E->isVolatile()); + return; + } + + llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add; + switch (E->getOp()) { + case AtomicExpr::CmpXchgWeak: + case AtomicExpr::CmpXchgStrong: + case AtomicExpr::Store: + case AtomicExpr::Load: assert(0 && "Already handled!"); + case AtomicExpr::Add: Op = llvm::AtomicRMWInst::Add; break; + case AtomicExpr::Sub: Op = llvm::AtomicRMWInst::Sub; break; + case AtomicExpr::And: Op = llvm::AtomicRMWInst::And; break; + case AtomicExpr::Or: Op = llvm::AtomicRMWInst::Or; break; + case AtomicExpr::Xor: Op = llvm::AtomicRMWInst::Xor; break; + case AtomicExpr::Xchg: Op = llvm::AtomicRMWInst::Xchg; break; + } + llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1); + LoadVal1->setAlignment(Align); + llvm::AtomicRMWInst *RMWI = + CGF.Builder.CreateAtomicRMW(Op, Ptr, LoadVal1, Order); + RMWI->setVolatile(E->isVolatile()); + llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(RMWI, Dest); + StoreDest->setAlignment(Align); +} + +// This function emits any expression (scalar, complex, or aggregate) +// into a temporary alloca. +static llvm::Value * +EmitValToTemp(CodeGenFunction &CGF, Expr *E) { + llvm::Value *DeclPtr = CGF.CreateMemTemp(E->getType(), ".atomictmp"); + CGF.EmitAnyExprToMem(E, DeclPtr, E->getType().getQualifiers(), + /*Init*/ true); + return DeclPtr; +} + +static RValue ConvertTempToRValue(CodeGenFunction &CGF, QualType Ty, + llvm::Value *Dest) { + if (Ty->isAnyComplexType()) + return RValue::getComplex(CGF.LoadComplexFromAddr(Dest, false)); + if (CGF.hasAggregateLLVMType(Ty)) + return RValue::getAggregate(Dest); + return RValue::get(CGF.EmitLoadOfScalar(CGF.MakeAddrLValue(Dest, Ty))); +} + +RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) { + QualType AtomicTy = E->getPtr()->getType()->getPointeeType(); + QualType MemTy = AtomicTy->getAs<AtomicType>()->getValueType(); + CharUnits sizeChars = getContext().getTypeSizeInChars(AtomicTy); + uint64_t Size = sizeChars.getQuantity(); + CharUnits alignChars = getContext().getTypeAlignInChars(AtomicTy); + unsigned Align = alignChars.getQuantity(); + // FIXME: Bound on Size should not be hardcoded. + bool UseLibcall = (sizeChars != alignChars || !llvm::isPowerOf2_64(Size) || + Size > 8); + + llvm::Value *Ptr, *Order, *OrderFail = 0, *Val1 = 0, *Val2 = 0; + Ptr = EmitScalarExpr(E->getPtr()); + Order = EmitScalarExpr(E->getOrder()); + if (E->isCmpXChg()) { + Val1 = EmitScalarExpr(E->getVal1()); + Val2 = EmitValToTemp(*this, E->getVal2()); + OrderFail = EmitScalarExpr(E->getOrderFail()); + (void)OrderFail; // OrderFail is unused at the moment + } else if ((E->getOp() == AtomicExpr::Add || E->getOp() == AtomicExpr::Sub) && + MemTy->isPointerType()) { + // For pointers, we're required to do a bit of math: adding 1 to an int* + // is not the same as adding 1 to a uintptr_t. + QualType Val1Ty = E->getVal1()->getType(); + llvm::Value *Val1Scalar = EmitScalarExpr(E->getVal1()); + uint64_t PointeeIncAmt = + getContext().getTypeSizeInChars(MemTy->getPointeeType()).getQuantity(); + llvm::Value *PointeeIncAmtVal = + llvm::ConstantInt::get(Val1Scalar->getType(), PointeeIncAmt); + Val1Scalar = Builder.CreateMul(Val1Scalar, PointeeIncAmtVal); + Val1 = CreateMemTemp(Val1Ty, ".atomictmp"); + EmitStoreOfScalar(Val1Scalar, MakeAddrLValue(Val1, Val1Ty)); + } else if (E->getOp() != AtomicExpr::Load) { + Val1 = EmitValToTemp(*this, E->getVal1()); + } + + if (E->getOp() != AtomicExpr::Store && !Dest) + Dest = CreateMemTemp(E->getType(), ".atomicdst"); + + if (UseLibcall) { + // FIXME: Finalize what the libcalls are actually supposed to look like. + // See also http://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary . + return EmitUnsupportedRValue(E, "atomic library call"); + } +#if 0 + if (UseLibcall) { + const char* LibCallName; + switch (E->getOp()) { + case AtomicExpr::CmpXchgWeak: + LibCallName = "__atomic_compare_exchange_generic"; break; + case AtomicExpr::CmpXchgStrong: + LibCallName = "__atomic_compare_exchange_generic"; break; + case AtomicExpr::Add: LibCallName = "__atomic_fetch_add_generic"; break; + case AtomicExpr::Sub: LibCallName = "__atomic_fetch_sub_generic"; break; + case AtomicExpr::And: LibCallName = "__atomic_fetch_and_generic"; break; + case AtomicExpr::Or: LibCallName = "__atomic_fetch_or_generic"; break; + case AtomicExpr::Xor: LibCallName = "__atomic_fetch_xor_generic"; break; + case AtomicExpr::Xchg: LibCallName = "__atomic_exchange_generic"; break; + case AtomicExpr::Store: LibCallName = "__atomic_store_generic"; break; + case AtomicExpr::Load: LibCallName = "__atomic_load_generic"; break; + } + llvm::SmallVector<QualType, 4> Params; + CallArgList Args; + QualType RetTy = getContext().VoidTy; + if (E->getOp() != AtomicExpr::Store && !E->isCmpXChg()) + Args.add(RValue::get(EmitCastToVoidPtr(Dest)), + getContext().VoidPtrTy); + Args.add(RValue::get(EmitCastToVoidPtr(Ptr)), + getContext().VoidPtrTy); + if (E->getOp() != AtomicExpr::Load) + Args.add(RValue::get(EmitCastToVoidPtr(Val1)), + getContext().VoidPtrTy); + if (E->isCmpXChg()) { + Args.add(RValue::get(EmitCastToVoidPtr(Val2)), + getContext().VoidPtrTy); + RetTy = getContext().IntTy; + } + Args.add(RValue::get(llvm::ConstantInt::get(SizeTy, Size)), + getContext().getSizeType()); + const CGFunctionInfo &FuncInfo = + CGM.getTypes().getFunctionInfo(RetTy, Args, FunctionType::ExtInfo()); + llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FuncInfo, false); + llvm::Constant *Func = CGM.CreateRuntimeFunction(FTy, LibCallName); + RValue Res = EmitCall(FuncInfo, Func, ReturnValueSlot(), Args); + if (E->isCmpXChg()) + return Res; + if (E->getOp() == AtomicExpr::Store) + return RValue::get(0); + return ConvertTempToRValue(*this, E->getType(), Dest); + } +#endif + llvm::Type *IPtrTy = + llvm::IntegerType::get(getLLVMContext(), Size * 8)->getPointerTo(); + llvm::Value *OrigDest = Dest; + Ptr = Builder.CreateBitCast(Ptr, IPtrTy); + if (Val1) Val1 = Builder.CreateBitCast(Val1, IPtrTy); + if (Val2) Val2 = Builder.CreateBitCast(Val2, IPtrTy); + if (Dest && !E->isCmpXChg()) Dest = Builder.CreateBitCast(Dest, IPtrTy); + + if (isa<llvm::ConstantInt>(Order)) { + int ord = cast<llvm::ConstantInt>(Order)->getZExtValue(); + switch (ord) { + case 0: // memory_order_relaxed + EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align, + llvm::Monotonic); + break; + case 1: // memory_order_consume + case 2: // memory_order_acquire + EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align, + llvm::Acquire); + break; + case 3: // memory_order_release + EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align, + llvm::Release); + break; + case 4: // memory_order_acq_rel + EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align, + llvm::AcquireRelease); + break; + case 5: // memory_order_seq_cst + EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align, + llvm::SequentiallyConsistent); + break; + default: // invalid order + // We should not ever get here normally, but it's hard to + // enforce that in general. + break; + } + if (E->getOp() == AtomicExpr::Store) + return RValue::get(0); + return ConvertTempToRValue(*this, E->getType(), OrigDest); + } + + // Long case, when Order isn't obviously constant. + + // Create all the relevant BB's + llvm::BasicBlock *MonotonicBB, *AcquireBB, *ReleaseBB, *AcqRelBB, *SeqCstBB; + MonotonicBB = createBasicBlock("monotonic", CurFn); + if (E->getOp() != AtomicExpr::Store) + AcquireBB = createBasicBlock("acquire", CurFn); + if (E->getOp() != AtomicExpr::Load) + ReleaseBB = createBasicBlock("release", CurFn); + if (E->getOp() != AtomicExpr::Load && E->getOp() != AtomicExpr::Store) + AcqRelBB = createBasicBlock("acqrel", CurFn); + SeqCstBB = createBasicBlock("seqcst", CurFn); + llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn); + + // Create the switch for the split + // MonotonicBB is arbitrarily chosen as the default case; in practice, this + // doesn't matter unless someone is crazy enough to use something that + // doesn't fold to a constant for the ordering. + Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false); + llvm::SwitchInst *SI = Builder.CreateSwitch(Order, MonotonicBB); + + // Emit all the different atomics + Builder.SetInsertPoint(MonotonicBB); + EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align, + llvm::Monotonic); + Builder.CreateBr(ContBB); + if (E->getOp() != AtomicExpr::Store) { + Builder.SetInsertPoint(AcquireBB); + EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align, + llvm::Acquire); + Builder.CreateBr(ContBB); + SI->addCase(Builder.getInt32(1), AcquireBB); + SI->addCase(Builder.getInt32(2), AcquireBB); + } + if (E->getOp() != AtomicExpr::Load) { + Builder.SetInsertPoint(ReleaseBB); |