diff options
-rw-r--r-- | include/clang/AST/Expr.h | 37 | ||||
-rw-r--r-- | include/clang/Basic/Builtins.def | 66 | ||||
-rw-r--r-- | include/clang/Basic/DiagnosticSemaKinds.td | 41 | ||||
-rw-r--r-- | lib/AST/Expr.cpp | 46 | ||||
-rw-r--r-- | lib/AST/StmtPrinter.cpp | 54 | ||||
-rw-r--r-- | lib/CodeGen/CGExpr.cpp | 225 | ||||
-rw-r--r-- | lib/Sema/SemaChecking.cpp | 310 | ||||
-rw-r--r-- | test/CodeGen/atomic-ops.c | 143 | ||||
-rw-r--r-- | test/CodeGen/atomic_init.c | 14 | ||||
-rw-r--r-- | test/Misc/serialized-diags.c | 3 | ||||
-rw-r--r-- | test/Sema/atomic-ops.c | 74 |
11 files changed, 732 insertions, 281 deletions
diff --git a/include/clang/AST/Expr.h b/include/clang/AST/Expr.h index 0db9195a27..558bd00ba9 100644 --- a/include/clang/AST/Expr.h +++ b/include/clang/AST/Expr.h @@ -4470,14 +4470,21 @@ public: /// AtomicExpr - Variadic atomic builtins: __atomic_exchange, __atomic_fetch_*, /// __atomic_load, __atomic_store, and __atomic_compare_exchange_*, for the -/// similarly-named C++11 instructions. All of these instructions take one -/// primary pointer and at least one memory order. +/// similarly-named C++11 instructions, and __c11 variants for <stdatomic.h>. +/// All of these instructions take one primary pointer and at least one memory +/// order. class AtomicExpr : public Expr { public: - enum AtomicOp { Load, Store, CmpXchgStrong, CmpXchgWeak, Xchg, - Add, Sub, And, Or, Xor, Init }; + enum AtomicOp { +#define BUILTIN(ID, TYPE, ATTRS) +#define ATOMIC_BUILTIN(ID, TYPE, ATTRS) AO ## ID, +#include "clang/Basic/Builtins.def" + // Avoid trailing comma + BI_First = 0 + }; + private: - enum { PTR, ORDER, VAL1, ORDER_FAIL, VAL2, END_EXPR }; + enum { PTR, ORDER, VAL1, ORDER_FAIL, VAL2, WEAK, END_EXPR }; Stmt* SubExprs[END_EXPR]; unsigned NumSubExprs; SourceLocation BuiltinLoc, RParenLoc; @@ -4503,19 +4510,25 @@ public: return cast<Expr>(SubExprs[ORDER]); } Expr *getVal1() const { - if (Op == Init) + if (Op == AO__c11_atomic_init) return cast<Expr>(SubExprs[ORDER]); - assert(NumSubExprs >= 3); + assert(NumSubExprs > VAL1); return cast<Expr>(SubExprs[VAL1]); } Expr *getOrderFail() const { - assert(NumSubExprs == 5); + assert(NumSubExprs > ORDER_FAIL); return cast<Expr>(SubExprs[ORDER_FAIL]); } Expr *getVal2() const { - assert(NumSubExprs == 5); + if (Op == AO__atomic_exchange) + return cast<Expr>(SubExprs[ORDER_FAIL]); + assert(NumSubExprs > VAL2); return cast<Expr>(SubExprs[VAL2]); } + Expr *getWeak() const { + assert(NumSubExprs > WEAK); + return cast<Expr>(SubExprs[WEAK]); + } AtomicOp getOp() const { return Op; } unsigned getNumSubExprs() { return NumSubExprs; } @@ -4527,8 +4540,10 @@ public: } bool isCmpXChg() const { - return getOp() == AtomicExpr::CmpXchgStrong || - getOp() == AtomicExpr::CmpXchgWeak; + return getOp() == AO__c11_atomic_compare_exchange_strong || + getOp() == AO__c11_atomic_compare_exchange_weak || + getOp() == AO__atomic_compare_exchange || + getOp() == AO__atomic_compare_exchange_n; } SourceLocation getBuiltinLoc() const { return BuiltinLoc; } diff --git a/include/clang/Basic/Builtins.def b/include/clang/Basic/Builtins.def index 82f0463d8c..2823190ca4 100644 --- a/include/clang/Basic/Builtins.def +++ b/include/clang/Basic/Builtins.def @@ -594,37 +594,55 @@ BUILTIN(__sync_swap_4, "iiD*i.", "tn") BUILTIN(__sync_swap_8, "LLiLLiD*LLi.", "tn") BUILTIN(__sync_swap_16, "LLLiLLLiD*LLLi.", "tn") +// Some of our atomics builtins are handled by AtomicExpr rather than +// as normal builtin CallExprs. This macro is used for such builtins. +#ifndef ATOMIC_BUILTIN +#define ATOMIC_BUILTIN(ID, TYPE, ATTRS) BUILTIN(ID, TYPE, ATTRS) +#endif + // C11 _Atomic operations for <stdatomic.h>. -BUILTIN(__c11_atomic_load, "v.", "t") -BUILTIN(__c11_atomic_store, "v.", "t") -BUILTIN(__c11_atomic_exchange, "v.", "t") -BUILTIN(__c11_atomic_compare_exchange_strong, "v.", "t") -BUILTIN(__c11_atomic_compare_exchange_weak, "v.", "t") -BUILTIN(__c11_atomic_fetch_add, "v.", "t") -BUILTIN(__c11_atomic_fetch_sub, "v.", "t") -BUILTIN(__c11_atomic_fetch_and, "v.", "t") -BUILTIN(__c11_atomic_fetch_or, "v.", "t") -BUILTIN(__c11_atomic_fetch_xor, "v.", "t") +ATOMIC_BUILTIN(__c11_atomic_init, "v.", "t") +ATOMIC_BUILTIN(__c11_atomic_load, "v.", "t") +ATOMIC_BUILTIN(__c11_atomic_store, "v.", "t") +ATOMIC_BUILTIN(__c11_atomic_exchange, "v.", "t") +ATOMIC_BUILTIN(__c11_atomic_compare_exchange_strong, "v.", "t") +ATOMIC_BUILTIN(__c11_atomic_compare_exchange_weak, "v.", "t") +ATOMIC_BUILTIN(__c11_atomic_fetch_add, "v.", "t") +ATOMIC_BUILTIN(__c11_atomic_fetch_sub, "v.", "t") +ATOMIC_BUILTIN(__c11_atomic_fetch_and, "v.", "t") +ATOMIC_BUILTIN(__c11_atomic_fetch_or, "v.", "t") +ATOMIC_BUILTIN(__c11_atomic_fetch_xor, "v.", "t") BUILTIN(__c11_atomic_thread_fence, "vi", "n") BUILTIN(__c11_atomic_signal_fence, "vi", "n") -BUILTIN(__c11_atomic_init, "v.", "t") BUILTIN(__c11_atomic_is_lock_free, "iz", "n") -// FIXME: Convert these to implementing GNU atomic builtins. -BUILTIN(__atomic_load, "v.", "t") -BUILTIN(__atomic_store, "v.", "t") -BUILTIN(__atomic_exchange, "v.", "t") -BUILTIN(__atomic_compare_exchange_strong, "v.", "t") -BUILTIN(__atomic_compare_exchange_weak, "v.", "t") -BUILTIN(__atomic_fetch_add, "v.", "t") -BUILTIN(__atomic_fetch_sub, "v.", "t") -BUILTIN(__atomic_fetch_and, "v.", "t") -BUILTIN(__atomic_fetch_or, "v.", "t") -BUILTIN(__atomic_fetch_xor, "v.", "t") +// GNU atomic builtins. +ATOMIC_BUILTIN(__atomic_load, "v.", "t") +ATOMIC_BUILTIN(__atomic_load_n, "v.", "t") +ATOMIC_BUILTIN(__atomic_store, "v.", "t") +ATOMIC_BUILTIN(__atomic_store_n, "v.", "t") +ATOMIC_BUILTIN(__atomic_exchange, "v.", "t") +ATOMIC_BUILTIN(__atomic_exchange_n, "v.", "t") +ATOMIC_BUILTIN(__atomic_compare_exchange, "v.", "t") +ATOMIC_BUILTIN(__atomic_compare_exchange_n, "v.", "t") +ATOMIC_BUILTIN(__atomic_fetch_add, "v.", "t") +ATOMIC_BUILTIN(__atomic_fetch_sub, "v.", "t") +ATOMIC_BUILTIN(__atomic_fetch_and, "v.", "t") +ATOMIC_BUILTIN(__atomic_fetch_or, "v.", "t") +ATOMIC_BUILTIN(__atomic_fetch_xor, "v.", "t") +ATOMIC_BUILTIN(__atomic_add_fetch, "v.", "t") +ATOMIC_BUILTIN(__atomic_sub_fetch, "v.", "t") +ATOMIC_BUILTIN(__atomic_and_fetch, "v.", "t") +ATOMIC_BUILTIN(__atomic_or_fetch, "v.", "t") +ATOMIC_BUILTIN(__atomic_xor_fetch, "v.", "t") +BUILTIN(__atomic_test_and_set, "vv*i", "n") +BUILTIN(__atomic_clear, "vb*i", "n") BUILTIN(__atomic_thread_fence, "vi", "n") BUILTIN(__atomic_signal_fence, "vi", "n") -BUILTIN(__atomic_init, "v.", "t") -BUILTIN(__atomic_is_lock_free, "iz", "n") +BUILTIN(__atomic_always_lock_free, "izv*", "n") +BUILTIN(__atomic_is_lock_free, "izv*", "n") + +#undef ATOMIC_BUILTIN // Non-overloaded atomic builtins. BUILTIN(__sync_synchronize, "v.", "n") diff --git a/include/clang/Basic/DiagnosticSemaKinds.td b/include/clang/Basic/DiagnosticSemaKinds.td index 39bd5ac94e..86d139da0c 100644 --- a/include/clang/Basic/DiagnosticSemaKinds.td +++ b/include/clang/Basic/DiagnosticSemaKinds.td @@ -4367,22 +4367,22 @@ def ext_typecheck_convert_pointer_int : ExtWarn< "%select{assigning to|passing|returning|converting|initializing|sending|casting}2" " %0 " "%select{from|to parameter of type|from a function with result type|to type|" - "with an expression of type|to parameter of type|to type}2 %1; " - "%select{|dereference with *|" - "take the address with &|" - "remove *|" - "remove &}3">, + "with an expression of type|to parameter of type|to type}2 %1" + "%select{|; dereference with *|" + "; take the address with &|" + "; remove *|" + "; remove &}3">, InGroup<IntConversion>; def ext_typecheck_convert_int_pointer : ExtWarn< "incompatible integer to pointer conversion " "%select{assigning to|passing|returning|converting|initializing|sending|casting}2" " %0 " "%select{from|to parameter of type|from a function with result type|to type|" - "with an expression of type|to parameter of type|to type}2 %1; " - "%select{|dereference with *|" - "take the address with &|" - "remove *|" - "remove &}3">, + "with an expression of type|to parameter of type|to type}2 %1" + "%select{|; dereference with *|" + "; take the address with &|" + "; remove *|" + "; remove &}3">, InGroup<IntConversion>; def ext_typecheck_convert_pointer_void_func : Extension< "%select{assigning to|passing|returning|converting|initializing|sending|casting}2" @@ -4403,10 +4403,10 @@ def ext_typecheck_convert_incompatible_pointer : ExtWarn< " %0 " "%select{from|to parameter of type|from a function with result type|to type|" "with an expression of type|to parameter of type|to type}2 %1" - "%select{|dereference with *|" - "take the address with &|" - "remove *|" - "remove &}3">, + "%select{|; dereference with *|" + "; take the address with &|" + "; remove *|" + "; remove &}3">, InGroup<IncompatiblePointerTypes>; def ext_typecheck_convert_discards_qualifiers : ExtWarn< "%select{assigning to|passing|returning|converting|initializing|sending|casting}2" @@ -4522,12 +4522,15 @@ def err_atomic_builtin_pointer_size : Error< def err_atomic_op_needs_atomic : Error< "first argument to atomic operation must be a pointer to _Atomic " "type (%0 invalid)">; +def err_atomic_op_needs_trivial_copy : Error< + "first argument to atomic operation must be a pointer to a trivially-copyable" + " type (%0 invalid)">; def err_atomic_op_needs_atomic_int_or_ptr : Error< - "first argument to atomic operation must be a pointer to atomic " - "integer or pointer (%0 invalid)">; -def err_atomic_op_logical_needs_atomic_int : Error< - "first argument to logical atomic operation must be a pointer to atomic " - "integer (%0 invalid)">; + "first argument to atomic operation must be a pointer to %select{|atomic }0" + "integer or pointer (%1 invalid)">; +def err_atomic_op_bitwise_needs_atomic_int : Error< + "first argument to bitwise atomic operation must be a pointer to " + "%select{|atomic }0integer (%1 invalid)">; def err_deleted_function_use : Error<"attempt to use a deleted function">; diff --git a/lib/AST/Expr.cpp b/lib/AST/Expr.cpp index 2bb79a0a4a..eb185b2c5a 100644 --- a/lib/AST/Expr.cpp +++ b/lib/AST/Expr.cpp @@ -3858,20 +3858,44 @@ AtomicExpr::AtomicExpr(SourceLocation BLoc, Expr **args, unsigned nexpr, unsigned AtomicExpr::getNumSubExprs(AtomicOp Op) { switch (Op) { - case Init: - case Load: + case AO__c11_atomic_init: + case AO__c11_atomic_load: + case AO__atomic_load_n: return 2; - case Store: - case Xchg: - case Add: - case Sub: - case And: - case Or: - case Xor: + + case AO__c11_atomic_store: + case AO__c11_atomic_exchange: + case AO__atomic_load: + case AO__atomic_store: + case AO__atomic_store_n: + case AO__atomic_exchange_n: + case AO__c11_atomic_fetch_add: + case AO__c11_atomic_fetch_sub: + case AO__c11_atomic_fetch_and: + case AO__c11_atomic_fetch_or: + case AO__c11_atomic_fetch_xor: + case AO__atomic_fetch_add: + case AO__atomic_fetch_sub: + case AO__atomic_fetch_and: + case AO__atomic_fetch_or: + case AO__atomic_fetch_xor: + case AO__atomic_add_fetch: + case AO__atomic_sub_fetch: + case AO__atomic_and_fetch: + case AO__atomic_or_fetch: + case AO__atomic_xor_fetch: return 3; - case CmpXchgStrong: - case CmpXchgWeak: + + case AO__atomic_exchange: + return 4; + + case AO__c11_atomic_compare_exchange_strong: + case AO__c11_atomic_compare_exchange_weak: return 5; + + case AO__atomic_compare_exchange: + case AO__atomic_compare_exchange_n: + return 6; } llvm_unreachable("unknown atomic op"); } diff --git a/lib/AST/StmtPrinter.cpp b/lib/AST/StmtPrinter.cpp index 651b88b5d3..3a44183e20 100644 --- a/lib/AST/StmtPrinter.cpp +++ b/lib/AST/StmtPrinter.cpp @@ -1108,52 +1108,34 @@ void StmtPrinter::VisitPseudoObjectExpr(PseudoObjectExpr *Node) { void StmtPrinter::VisitAtomicExpr(AtomicExpr *Node) { const char *Name = 0; switch (Node->getOp()) { - case AtomicExpr::Init: - Name = "__c11_atomic_init("; - break; - case AtomicExpr::Load: - Name = "__c11_atomic_load("; - break; - case AtomicExpr::Store: - Name = "__c11_atomic_store("; - break; - case AtomicExpr::CmpXchgStrong: - Name = "__c11_atomic_compare_exchange_strong("; - break; - case AtomicExpr::CmpXchgWeak: - Name = "__c11_atomic_compare_exchange_weak("; - break; - case AtomicExpr::Xchg: - Name = "__c11_atomic_exchange("; - break; - case AtomicExpr::Add: - Name = "__c11_atomic_fetch_add("; - break; - case AtomicExpr::Sub: - Name = "__c11_atomic_fetch_sub("; - break; - case AtomicExpr::And: - Name = "__c11_atomic_fetch_and("; - break; - case AtomicExpr::Or: - Name = "__c11_atomic_fetch_or("; - break; - case AtomicExpr::Xor: - Name = "__c11_atomic_fetch_xor("; - break; +#define BUILTIN(ID, TYPE, ATTRS) +#define ATOMIC_BUILTIN(ID, TYPE, ATTRS) \ + case AtomicExpr::AO ## ID: \ + Name = #ID "("; \ + break; +#include "clang/Basic/Builtins.def" } OS << Name; + + // AtomicExpr stores its subexpressions in a permuted order. PrintExpr(Node->getPtr()); OS << ", "; - if (Node->getOp() != AtomicExpr::Load) { + if (Node->getOp() != AtomicExpr::AO__c11_atomic_load && + Node->getOp() != AtomicExpr::AO__atomic_load_n) { PrintExpr(Node->getVal1()); OS << ", "; } - if (Node->isCmpXChg()) { + if (Node->getOp() == AtomicExpr::AO__atomic_exchange || + Node->isCmpXChg()) { PrintExpr(Node->getVal2()); OS << ", "; } - if (Node->getOp() != AtomicExpr::Init) + if (Node->getOp() == AtomicExpr::AO__atomic_compare_exchange || + Node->getOp() == AtomicExpr::AO__atomic_compare_exchange_n) { + PrintExpr(Node->getWeak()); + OS << ", "; + } + if (Node->getOp() != AtomicExpr::AO__c11_atomic_init) PrintExpr(Node->getOrder()); if (Node->isCmpXChg()) { OS << ", "; diff --git a/lib/CodeGen/CGExpr.cpp b/lib/CodeGen/CGExpr.cpp index 260fa5b529..147e7276bc 100644 --- a/lib/CodeGen/CGExpr.cpp +++ b/lib/CodeGen/CGExpr.cpp @@ -2688,7 +2688,17 @@ static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, llvm::Value *Dest, llvm::Value *Ptr, llvm::Value *Val1, llvm::Value *Val2, uint64_t Size, unsigned Align, llvm::AtomicOrdering Order) { - if (E->isCmpXChg()) { + llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add; + llvm::Instruction::BinaryOps PostOp = (llvm::Instruction::BinaryOps)0; + + switch (E->getOp()) { + case AtomicExpr::AO__c11_atomic_init: + llvm_unreachable("Already handled!"); + + case AtomicExpr::AO__c11_atomic_compare_exchange_strong: + case AtomicExpr::AO__c11_atomic_compare_exchange_weak: + case AtomicExpr::AO__atomic_compare_exchange: + case AtomicExpr::AO__atomic_compare_exchange_n: { // Note that cmpxchg only supports specifying one ordering and // doesn't support weak cmpxchg, at least at the moment. llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1); @@ -2705,7 +2715,9 @@ EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, llvm::Value *Dest, return; } - if (E->getOp() == AtomicExpr::Load) { + case AtomicExpr::AO__c11_atomic_load: + case AtomicExpr::AO__atomic_load_n: + case AtomicExpr::AO__atomic_load: { llvm::LoadInst *Load = CGF.Builder.CreateLoad(Ptr); Load->setAtomic(Order); Load->setAlignment(Size); @@ -2715,7 +2727,9 @@ EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, llvm::Value *Dest, return; } - if (E->getOp() == AtomicExpr::Store) { + case AtomicExpr::AO__c11_atomic_store: + case AtomicExpr::AO__atomic_store: + case AtomicExpr::AO__atomic_store_n: { assert(!Dest && "Store does not return a value"); llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1); LoadVal1->setAlignment(Align); @@ -2726,26 +2740,66 @@ EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, llvm::Value *Dest, return; } - llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add; - switch (E->getOp()) { - case AtomicExpr::CmpXchgWeak: - case AtomicExpr::CmpXchgStrong: - case AtomicExpr::Store: - case AtomicExpr::Init: - case AtomicExpr::Load: assert(0 && "Already handled!"); - case AtomicExpr::Add: Op = llvm::AtomicRMWInst::Add; break; - case AtomicExpr::Sub: Op = llvm::AtomicRMWInst::Sub; break; - case AtomicExpr::And: Op = llvm::AtomicRMWInst::And; break; - case AtomicExpr::Or: Op = llvm::AtomicRMWInst::Or; break; - case AtomicExpr::Xor: Op = llvm::AtomicRMWInst::Xor; break; - case AtomicExpr::Xchg: Op = llvm::AtomicRMWInst::Xchg; break; + case AtomicExpr::AO__c11_atomic_exchange: + case AtomicExpr::AO__atomic_exchange_n: + case AtomicExpr::AO__atomic_exchange: + Op = llvm::AtomicRMWInst::Xchg; + break; + + case AtomicExpr::AO__atomic_add_fetch: + PostOp = llvm::Instruction::Add; + // Fall through. + case AtomicExpr::AO__c11_atomic_fetch_add: + case AtomicExpr::AO__atomic_fetch_add: + Op = llvm::AtomicRMWInst::Add; + break; + + case AtomicExpr::AO__atomic_sub_fetch: + PostOp = llvm::Instruction::Sub; + // Fall through. + case AtomicExpr::AO__c11_atomic_fetch_sub: + case AtomicExpr::AO__atomic_fetch_sub: + Op = llvm::AtomicRMWInst::Sub; + break; + + case AtomicExpr::AO__atomic_and_fetch: + PostOp = llvm::Instruction::And; + // Fall through. + case AtomicExpr::AO__c11_atomic_fetch_and: + case AtomicExpr::AO__atomic_fetch_and: + Op = llvm::AtomicRMWInst::And; + break; + + case AtomicExpr::AO__atomic_or_fetch: + PostOp = llvm::Instruction::Or; + // Fall through. + case AtomicExpr::AO__c11_atomic_fetch_or: + case AtomicExpr::AO__atomic_fetch_or: + Op = llvm::AtomicRMWInst::Or; + break; + + case AtomicExpr::AO__atomic_xor_fetch: + PostOp = llvm::Instruction::Xor; + // Fall through. + case AtomicExpr::AO__c11_atomic_fetch_xor: + case AtomicExpr::AO__atomic_fetch_xor: + Op = llvm::AtomicRMWInst::Xor; + break; } + llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1); LoadVal1->setAlignment(Align); llvm::AtomicRMWInst *RMWI = CGF.Builder.CreateAtomicRMW(Op, Ptr, LoadVal1, Order); RMWI->setVolatile(E->isVolatile()); - llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(RMWI, Dest); + + // For __atomic_*_fetch operations, perform the operation again to + // determine the value which was written. + llvm::Value *Result = RMWI; + if (PostOp) + Result = CGF.Builder.CreateBinOp(PostOp, RMWI, LoadVal1); + + llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(Result, Dest); StoreDest->setAlignment(Align); } @@ -2770,7 +2824,9 @@ static RValue ConvertTempToRValue(CodeGenFunction &CGF, QualType Ty, RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) { QualType AtomicTy = E->getPtr()->getType()->getPointeeType(); - QualType MemTy = AtomicTy->getAs<AtomicType>()->getValueType(); + QualType MemTy = AtomicTy; + if (const AtomicType *AT = AtomicTy->getAs<AtomicType>()) + MemTy = AT->getValueType(); CharUnits sizeChars = getContext().getTypeSizeInChars(AtomicTy); uint64_t Size = sizeChars.getQuantity(); CharUnits alignChars = getContext().getTypeAlignInChars(AtomicTy); @@ -2784,7 +2840,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) { llvm::Value *Ptr, *Order, *OrderFail = 0, *Val1 = 0, *Val2 = 0; Ptr = EmitScalarExpr(E->getPtr()); - if (E->getOp() == AtomicExpr::Init) { + if (E->getOp() == AtomicExpr::AO__c11_atomic_init) { assert(!Dest && "Init does not return a value"); if (!hasAggregateLLVMType(E->getVal1()->getType())) { llvm::StoreInst *Store = @@ -2805,26 +2861,80 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) { } Order = EmitScalarExpr(E->getOrder()); - if (E->isCmpXChg()) { + + switch (E->getOp()) { + case AtomicExpr::AO__c11_atomic_init: + llvm_unreachable("Already handled!"); + + case AtomicExpr::AO__c11_atomic_load: + case AtomicExpr::AO__atomic_load_n: + break; + + case AtomicExpr::AO__atomic_load: + Dest = EmitScalarExpr(E->getVal1()); + break; + + case AtomicExpr::AO__atomic_store: + Val1 = EmitScalarExpr(E->getVal1()); + break; + + case AtomicExpr::AO__atomic_exchange: + Val1 = EmitScalarExpr(E->getVal1()); + Dest = EmitScalarExpr(E->getVal2()); + break; + + case AtomicExpr::AO__c11_atomic_compare_exchange_strong: + case AtomicExpr::AO__c11_atomic_compare_exchange_weak: + case AtomicExpr::AO__atomic_compare_exchange_n: + case AtomicExpr::AO__atomic_compare_exchange: Val1 = EmitScalarExpr(E->getVal1()); - Val2 = EmitValToTemp(*this, E->getVal2()); + if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange) + Val2 = EmitScalarExpr(E->getVal2()); + else + Val2 = EmitValToTemp(*this, E->getVal2()); OrderFail = EmitScalarExpr(E->getOrderFail()); - } else if ((E->getOp() == AtomicExpr::Add || E->getOp() == AtomicExpr::Sub) && - MemTy->isPointerType()) { - // For pointers, we're required to do a bit of math: adding 1 to an int* - // is not the same as adding 1 to a uintptr_t. - QualType Val1Ty = E->getVal1()->getType(); - llvm::Value *Val1Scalar = EmitScalarExpr(E->getVal1()); - CharUnits PointeeIncAmt = - getContext().getTypeSizeInChars(MemTy->getPointeeType()); - Val1Scalar = Builder.CreateMul(Val1Scalar, CGM.getSize(PointeeIncAmt)); - Val1 = CreateMemTemp(Val1Ty, ".atomictmp"); - EmitStoreOfScalar(Val1Scalar, MakeAddrLValue(Val1, Val1Ty)); - } else if (E->getOp() != AtomicExpr::Load) { + // Evaluate and discard the 'weak' argument. + if (E->getNumSubExprs() == 6) + EmitScalarExpr(E->getWeak()); + break; + + case AtomicExpr::AO__c11_atomic_fetch_add: + case AtomicExpr::AO__c11_atomic_fetch_sub: + case AtomicExpr::AO__atomic_fetch_add: + case AtomicExpr::AO__atomic_fetch_sub: + case AtomicExpr::AO__atomic_add_fetch: + case AtomicExpr::AO__atomic_sub_fetch: + if (MemTy->isPointerType()) { + // For pointer arithmetic, we're required to do a bit of math: + // adding 1 to an int* is not the same as adding 1 to a uintptr_t. + QualType Val1Ty = E->getVal1()->getType(); + llvm::Value *Val1Scalar = EmitScalarExpr(E->getVal1()); + CharUnits PointeeIncAmt = + getContext().getTypeSizeInChars(MemTy->getPointeeType()); + Val1Scalar = Builder.CreateMul(Val1Scalar, CGM.getSize(PointeeIncAmt)); + Val1 = CreateMemTemp(Val1Ty, ".atomictmp"); + EmitStoreOfScalar(Val1Scalar, MakeAddrLValue(Val1, Val1Ty)); + break; + } + // Fall through. + case AtomicExpr::AO__c11_atomic_store: + case AtomicExpr::AO__c11_atomic_exchange: + case AtomicExpr::AO__atomic_store_n: + case AtomicExpr::AO__atomic_exchange_n: + case AtomicExpr::AO__c11_atomic_fetch_and: + case AtomicExpr::AO__c11_atomic_fetch_or: + case AtomicExpr::AO__c11_atomic_fetch_xor: + case AtomicExpr::AO__atomic_fetch_and: + case AtomicExpr::AO__atomic_fetch_or: + case AtomicExpr::AO__atomic_fetch_xor: + case AtomicExpr::AO__atomic_and_fetch: + case AtomicExpr::AO__atomic_or_fetch: + case AtomicExpr::AO__atomic_xor_fetch: Val1 = EmitValToTemp(*this, E->getVal1()); + break; } - if (E->getOp() != AtomicExpr::Store && !Dest) + if (!E->getType()->isVoidType() && !Dest) Dest = CreateMemTemp(E->getType(), ".atomicdst"); // Use a library call. See: http://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary . @@ -2846,9 +2956,11 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) { // optimisation benefit possible from a libcall version of a weak compare // and exchange. // bool __atomic_compare_exchange(size_t size, void *obj, void *expected, - // void *desired, int success, int failure) - case AtomicExpr::CmpXchgWeak: - case AtomicExpr::CmpXchgStrong: + // void *desired, int success, int failure) + case AtomicExpr::AO__c11_atomic_compare_exchange_weak: + case AtomicExpr::AO__c11_atomic_compare_exchange_strong: + case AtomicExpr::AO__atomic_compare_exchange: + case AtomicExpr::AO__atomic_compare_exchange_n: LibCallName = "__atomic_compare_exchange"; RetTy = getContext().BoolTy; Args.add(RValue::get(EmitCastToVoidPtr(Val1)), @@ -2861,7 +2973,9 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) { break; // void __atomic_exchange(size_t size, void *mem, void *val, void *return, // int order) - case AtomicExpr::Xchg: + case AtomicExpr::AO__c11_atomic_exchange: + case AtomicExpr::AO__atomic_exchange_n: + case AtomicExpr::AO__atomic_exchange: LibCallName = "__atomic_exchange"; Args.add(RValue::get(EmitCastToVoidPtr(Val1)), getContext().VoidPtrTy); @@ -2869,13 +2983,17 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) { getContext().VoidPtrTy); break; // void __atomic_store(size_t size, void *mem, void *val, int order) - case AtomicExpr::Store: + case AtomicExpr::AO__c11_atomic_store: + case AtomicExpr::AO__atomic_store: + case AtomicExpr::AO__atomic_store_n: LibCallName = "__atomic_store"; Args.add(RValue::get(EmitCastToVoidPtr(Val1)), getContext().VoidPtrTy); break; // void __atomic_load(size_t size, void *mem, void *return, int order) - case AtomicExpr::Load: + case AtomicExpr::AO__c11_atomic_load: + case AtomicExpr::AO__atomic_load: + case AtomicExpr::AO__atomic_load_n: LibCallName = "__atomic_load"; Args.add(RValue::get(EmitCastToVoidPtr(Dest)), getContext().VoidPtrTy); @@ -2903,7 +3021,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) { RValue Res = EmitCall(FuncInfo, Func, ReturnValueSlot(), Args); if (E->isCmpXChg()) return Res; - if (E->getOp() == AtomicExpr::Store) + if (E->getType()->isVoidType()) return RValue::get(0); return ConvertTempToRValue(*this, E->getType(), Dest); } @@ -2943,24 +3061,31 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) { default: // invalid order // We should not ever get here normally, but it's hard to // enforce that in general. - break; + break; } - if (E->getOp() == AtomicExpr::Store || E->getOp() == AtomicExpr::Init) + if (E->getType()->isVoidType()) return RValue::get(0); return ConvertTempToRValue(*this, E->getType(), OrigDest); } // Long case, when Order isn't obviously constant. + bool IsStore = E->getOp() == AtomicExpr::AO__c11_atomic_store || + E->getOp() == AtomicExpr::AO__atomic_store || + E->getOp() == AtomicExpr::AO__atomic_store_n; + bool IsLoad = E->getOp() == AtomicExpr::AO__c11_atomic_load || + E->getOp() == AtomicExpr::AO__atomic_load || + E->getOp() == AtomicExpr::AO__atomic_load_n; + // Create all the relevant BB's llvm::BasicBlock *MonotonicBB = 0, *AcquireBB = 0, *ReleaseBB = 0, *AcqRelBB = 0, *SeqCstBB = 0; MonotonicBB = createBasicBlock("monotonic", CurFn); - if (E->getOp() != AtomicExpr::Store) + if (!IsStore) AcquireBB = createBasicBlock("acquire", CurFn); - if (E->getOp() != AtomicExpr::Load) + if (!IsLoad) ReleaseBB = createBasicBlock("release", CurFn); - if (E->getOp() != AtomicExpr::Load && E->getOp() != AtomicExpr::Store) + if (!IsLoad && !IsStore) AcqRelBB = createBasicBlock("acqrel", CurFn); SeqCstBB = createBasicBlock("seqcst", CurFn); llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn); @@ -2977,7 +3102,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) { EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align, llvm::Monotonic); Builder.CreateBr(ContBB); - if (E->getOp() != AtomicExpr::Store) { + if (!IsStore) { Builder.SetInsertPoint(AcquireBB); EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align, llvm::Acquire); @@ -2985,14 +3110,14 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) { SI->addCase(Builder.getInt32(1), AcquireBB); SI->addCase(Builder.getInt32(2), AcquireBB); } - if (E->getOp() != AtomicExpr::Load) { + if (!IsLoad) { Builder.SetInsertPoint(ReleaseBB); EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align, llvm::Release); Builder.CreateBr(ContBB); SI->addCase(Builder.getInt32(3), ReleaseBB); } - if (E->getOp() != AtomicExpr::Load && E->getOp() != AtomicExpr::Store) { + if (!IsLoad && !IsStore) { Builder.SetInsertPoint(AcqRelBB); EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align, llvm::AcquireRelease); @@ -3007,7 +3132,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) { // Cleanup and return Builder.SetInsertPoint(ContBB); - if (E->getOp() == AtomicExpr::Store) + if (E->getType()->isVoidType()) return RValue::get(0); return ConvertTempToRValue(*this, E->getType(), OrigDest); } diff --git a/lib/Sema/SemaChecking.cpp b/lib/Sema/SemaChecking.cpp index c4ed0b0e52..1606e336ee 100644 --- a/lib/Sema/SemaChecking.cpp +++ b/lib/Sema/SemaChecking.cpp @@ -250,41 +250,11 @@ Sema::CheckBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { case Builtin::BI__sync_swap_8: case Builtin::BI__sync_swap_16: return SemaBuiltinAtomicOverloaded(move(TheCallResult)); - case Builtin::BI__atomic_load: - case Builtin::BI__c11_atomic_load: - return SemaAtomicOpsOverloaded(move(TheCallResult), AtomicExpr::Load); - case Builtin::BI__atomic_store: - case Builtin::BI__c11_atomic_store: - return SemaAtomicOpsOverloaded(move(TheCallResult), AtomicExpr::Store); - case Builtin::BI__atomic_init: - case Builtin::BI__c11_atomic_init: - return SemaAtomicOpsOverloaded(move(TheCallResult), AtomicExpr::Init); - case Builtin::BI__atomic_exchange: - case Builtin::BI__c11_atomic_exchange: - return SemaAtomicOpsOverloaded(move(TheCallResult), AtomicExpr::Xchg); - case Builtin::BI__atomic_compare_exchange_strong: - case Builtin::BI__c11_atomic_compare_exchange_strong: - return SemaAtomicOpsOverloaded(move(TheCallResult), - AtomicExpr::CmpXchgStrong); - case Builtin::BI__atomic_compare_exchange_weak: - case Builtin::BI__c11_atomic_compare_exchange_weak: - return SemaAtomicOpsOverloaded(move(TheCallResult), - AtomicExpr::CmpXchgWeak); - case Builtin::BI__atomic_fetch_add: - case Builtin::BI__c11_atomic_fetch_add: - return SemaAtomicOpsOverloaded(move(TheCallResult), AtomicExpr::Add); - case Builtin::BI__atomic_fetch_sub: - case Builtin::BI__c11_atomic_fetch_sub: - return SemaAtomicOpsOverloaded(move(TheCallResult), AtomicExpr::Sub); - case Builtin::BI__atomic_fetch_and: - case Builtin::BI__c11_atomic_fetch_and: - return SemaAtomicOpsOverloaded(move(TheCallResult), AtomicExpr::And); - case Builtin::BI__atomic_fetch_or: - case Builtin::BI__c11_atomic_fetch_or: - return SemaAtomicOpsOverloaded(move(TheCallResult), AtomicExpr::Or); - case Builtin::BI__atomic_fetch_xor: - case Builtin::BI__c11_atomic_fetch_xor: - return SemaAtomicOpsOverloaded(move(TheCallResult), AtomicExpr::Xor); +#define BUILTIN(ID, TYPE, ATTRS) +#define ATOMIC_BUILTIN(ID, TYPE, ATTRS) \ + case Builtin::BI##ID: \ + return SemaAtomicOpsOverloaded(move(TheCallResult), AtomicExpr::AO##ID); +#include "clang/Basic/Builtins.def" case Builtin::BI__builtin_annotation: if (CheckBuiltinAnnotationString(*this, TheCall->getArg(1))) return ExprError(); @@ -515,75 +485,175 @@ bool Sema::CheckBlockCall(NamedDecl *NDecl, CallExpr *TheCall) { return false; } -ExprResult -Sema::SemaAtomicOpsOverloaded(ExprResult TheCallResult, AtomicExpr::AtomicOp Op) { +ExprResult Sema::SemaAtomicOpsOverloaded(ExprResult TheCallResult, + AtomicExpr::AtomicOp Op) { CallExpr *TheCall = cast<CallExpr>(TheCallResult.get()); DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); - // All these operations take one of the following four forms: - // T __c11_atomic_load(_Atomic(T)*, int) (loads) - // T* __c11_atomic_add(_Atomic(T*)*, ptrdiff_t, int) (pointer add/sub) - // int __c11_atomic_compare_exchange_strong(_Atomic(T)*, T*, T, int, int) - // (cmpxchg) - // T __c11_atomic_exchange(_Atomic(T)*, T, int) (everything else) - // where T is an appropriate type, and the int paremeterss are for orderings. - unsigned NumVals = 1; - unsigned NumOrders = 1; - if (Op == AtomicExpr::Load) { - NumVals = 0; - } else if (Op == AtomicExpr::CmpXchgWeak || Op == AtomicExpr::CmpXchgStrong) { - NumVals = 2; - NumOrders = 2; - } - if (Op == AtomicExpr::Init) - NumOrders = 0; - - if (TheCall->getNumArgs() < NumVals+NumOrders+1) { + // All these operations take one of the following forms: + enum { + // C __c11_atomic_init(A *, C) + Init, + // C __c11_atomic_load(A *, int) + Load, + // void __atomic_load(A *, CP, int) + Copy, + // C __c11_atomic_add(A *, M, int) + Arithmetic, + // C __atomic_exchange_n(A *, CP, int) + Xchg, + // void __atomic_exchange(A *, C *, CP, int) + GNUXchg, + // bool __c11_atomic_compare_exchange_strong(A *, C *, CP, int, int) + C11CmpXchg, + // bool __atomic_compare_exchange(A *, C *, CP, bool, int, int) + GNUCmpXchg + } Form = Init; + const unsigned NumArgs[] = { 2, 2, 3, 3, 3, 4, 5, 6 }; + const unsigned NumVals[] = { 1, 0, 1, 1, 1, 2, 2, 3 }; + // where: + // C is an appropriate type, + // A is volatile _Atomic(C) for __c11 builtins and is C for GNU builtins, + // CP is C for __c11 builtins and GNU _n builtins and is C * otherwise, + // M is C if C is an integer, and ptrdiff_t if C is a pointer, and + // the int parameters are for orderings. + + assert(AtomicExpr::AO__c11_atomic_init == 0 && + AtomicExpr::AO__c11_atomic_fetch_xor + 1 == AtomicExpr::AO__atomic_load + && "need to update code for modified C11 atomics"); + bool IsC11 = Op >= AtomicExpr::AO__c11_atomic_init && + Op <= AtomicExpr::AO__c11_atomic_fetch_xor; + bool IsN = Op == AtomicExpr::AO__atomic_load_n || + Op == AtomicExpr::AO__atomic_store_n || + Op == AtomicExpr::AO__atomic_exchange_n || + Op == AtomicExpr::AO__atomic_compare_exchange_n; + bool IsAddSub = false; + + switch (Op) { + case AtomicExpr::AO__c11_atomic_init: + Form = Init; + break; + + case AtomicExpr::AO__c11_atomic_load: + case AtomicExpr::AO__atomic_load_n: + Form = Load; + break; + + case AtomicExpr::AO__c11_atomic_store: + case AtomicExpr::AO__atomic_load: + case AtomicExpr::AO__atomic_store: + case AtomicExpr::AO__atomic_store_n: + Form = Copy; + break; + + case AtomicExpr::AO__c11_atomic_fetch_add: + case AtomicExpr::AO__c11_atomic_fetch_sub: + case AtomicExpr::AO__atomic_fetch_add: + case AtomicExpr::AO__atomic_fetch_sub: + case AtomicExpr::AO__atomic_add_fetch: + case AtomicExpr::AO__atomic_sub_fetch: + IsAddSub = true; + // Fall through. + case AtomicExpr::AO__c11_atomic_fetch_and: + case AtomicExpr::AO__c11_atomic_fetch_or: + case AtomicExpr::AO__c11_atomic_fetch_xor: + case AtomicExp |