aboutsummaryrefslogtreecommitdiff
path: root/lib/CodeGen
diff options
context:
space:
mode:
authorRichard Smith <richard-llvm@metafoo.co.uk>2012-04-13 00:45:38 +0000
committerRichard Smith <richard-llvm@metafoo.co.uk>2012-04-13 00:45:38 +0000
commit2c39d71bb7cefdfe6116fa52454f3b3dc5abd517 (patch)
tree392cd2bf791d9bd77edc058462ae4a43678adf51 /lib/CodeGen
parente2d87bc0be8e5a5b2e45118675ff878d89daee44 (diff)
Implement the missing pieces needed to support libstdc++4.7's <atomic>:
__atomic_test_and_set, __atomic_clear, plus a pile of undocumented __GCC_* predefined macros. Implement library fallback for __atomic_is_lock_free and __c11_atomic_is_lock_free, and implement __atomic_always_lock_free. Contrary to their documentation, GCC's __atomic_fetch_add family don't multiply the operand by sizeof(T) when operating on a pointer type. libstdc++ relies on this quirk. Remove this handling for all but the __c11_atomic_fetch_add and __c11_atomic_fetch_sub builtins. Contrary to their documentation, __atomic_test_and_set and __atomic_clear take a first argument of type 'volatile void *', not 'void *' or 'bool *', and __atomic_is_lock_free and __atomic_always_lock_free have an argument of type 'const volatile void *', not 'void *'. With this change, libstdc++4.7's <atomic> passes libc++'s atomic test suite, except for a couple of libstdc++ bugs and some cases where libc++'s test suite tests for properties which implementations have latitude to vary. git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@154640 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/CodeGen')
-rw-r--r--lib/CodeGen/CGBuiltin.cpp173
-rw-r--r--lib/CodeGen/CGExpr.cpp10
2 files changed, 179 insertions, 4 deletions
diff --git a/lib/CodeGen/CGBuiltin.cpp b/lib/CodeGen/CGBuiltin.cpp
index 5eab5db94a..e30b5136ba 100644
--- a/lib/CodeGen/CGBuiltin.cpp
+++ b/lib/CodeGen/CGBuiltin.cpp
@@ -966,6 +966,179 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
return RValue::get(0);
}
+ case Builtin::BI__c11_atomic_is_lock_free:
+ case Builtin::BI__atomic_is_lock_free: {
+ // Call "bool __atomic_is_lock_free(size_t size, void *ptr)". For the
+ // __c11 builtin, ptr is 0 (indicating a properly-aligned object), since
+ // _Atomic(T) is always properly-aligned.
+ const char *LibCallName = "__atomic_is_lock_free";
+ CallArgList Args;
+ Args.add(RValue::get(EmitScalarExpr(E->getArg(0))),
+ getContext().getSizeType());
+ if (BuiltinID == Builtin::BI__atomic_is_lock_free)
+ Args.add(RValue::get(EmitScalarExpr(E->getArg(1))),
+ getContext().VoidPtrTy);
+ else
+ Args.add(RValue::get(llvm::Constant::getNullValue(VoidPtrTy)),
+ getContext().VoidPtrTy);
+ const CGFunctionInfo &FuncInfo =
+ CGM.getTypes().arrangeFunctionCall(E->getType(), Args,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All);
+ llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FuncInfo);
+ llvm::Constant *Func = CGM.CreateRuntimeFunction(FTy, LibCallName);
+ return EmitCall(FuncInfo, Func, ReturnValueSlot(), Args);
+ }
+
+ case Builtin::BI__atomic_test_and_set: {
+ // Look at the argument type to determine whether this is a volatile
+ // operation. The parameter type is always volatile.
+ QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType();
+ bool Volatile =
+ PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified();
+
+ Value *Ptr = EmitScalarExpr(E->getArg(0));
+ unsigned AddrSpace =
+ cast<llvm::PointerType>(Ptr->getType())->getAddressSpace();
+ Ptr = Builder.CreateBitCast(Ptr, Int8Ty->getPointerTo(AddrSpace));
+ Value *NewVal = Builder.getInt8(1);
+ Value *Order = EmitScalarExpr(E->getArg(1));
+ if (isa<llvm::ConstantInt>(Order)) {
+ int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
+ AtomicRMWInst *Result = 0;
+ switch (ord) {
+ case 0: // memory_order_relaxed
+ default: // invalid order
+ Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
+ Ptr, NewVal,
+ llvm::Monotonic);
+ break;
+ case 1: // memory_order_consume
+ case 2: // memory_order_acquire
+ Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
+ Ptr, NewVal,
+ llvm::Acquire);
+ break;
+ case 3: // memory_order_release
+ Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
+ Ptr, NewVal,
+ llvm::Release);
+ break;
+ case 4: // memory_order_acq_rel
+ Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
+ Ptr, NewVal,
+ llvm::AcquireRelease);
+ break;
+ case 5: // memory_order_seq_cst
+ Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
+ Ptr, NewVal,
+ llvm::SequentiallyConsistent);
+ break;
+ }
+ Result->setVolatile(Volatile);
+ return RValue::get(Builder.CreateIsNotNull(Result, "tobool"));
+ }
+
+ llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
+
+ llvm::BasicBlock *BBs[5] = {
+ createBasicBlock("monotonic", CurFn),
+ createBasicBlock("acquire", CurFn),
+ createBasicBlock("release", CurFn),
+ createBasicBlock("acqrel", CurFn),
+ createBasicBlock("seqcst", CurFn)
+ };
+ llvm::AtomicOrdering Orders[5] = {
+ llvm::Monotonic, llvm::Acquire, llvm::Release,
+ llvm::AcquireRelease, llvm::SequentiallyConsistent
+ };
+
+ Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
+ llvm::SwitchInst *SI = Builder.CreateSwitch(Order, BBs[0]);
+
+ Builder.SetInsertPoint(ContBB);
+ PHINode *Result = Builder.CreatePHI(Int8Ty, 5, "was_set");
+
+ for (unsigned i = 0; i < 5; ++i) {
+ Builder.SetInsertPoint(BBs[i]);
+ AtomicRMWInst *RMW = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
+ Ptr, NewVal, Orders[i]);
+ RMW->setVolatile(Volatile);
+ Result->addIncoming(RMW, BBs[i]);
+ Builder.CreateBr(ContBB);
+ }
+
+ SI->addCase(Builder.getInt32(0), BBs[0]);
+ SI->addCase(Builder.getInt32(1), BBs[1]);
+ SI->addCase(Builder.getInt32(2), BBs[1]);
+ SI->addCase(Builder.getInt32(3), BBs[2]);
+ SI->addCase(Builder.getInt32(4), BBs[3]);
+ SI->addCase(Builder.getInt32(5), BBs[4]);
+
+ Builder.SetInsertPoint(ContBB);
+ return RValue::get(Builder.CreateIsNotNull(Result, "tobool"));
+ }
+
+ case Builtin::BI__atomic_clear: {
+ QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType();
+ bool Volatile =
+ PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified();
+
+ Value *Ptr = EmitScalarExpr(E->getArg(0));
+ unsigned AddrSpace =
+ cast<llvm::PointerType>(Ptr->getType())->getAddressSpace();
+ Ptr = Builder.CreateBitCast(Ptr, Int8Ty->getPointerTo(AddrSpace));
+ Value *NewVal = Builder.getInt8(0);
+ Value *Order = EmitScalarExpr(E->getArg(1));
+ if (isa<llvm::ConstantInt>(Order)) {
+ int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
+ StoreInst *Store = Builder.CreateStore(NewVal, Ptr, Volatile);
+ Store->setAlignment(1);
+ switch (ord) {
+ case 0: // memory_order_relaxed
+ default: // invalid order
+ Store->setOrdering(llvm::Monotonic);
+ break;
+ case 3: // memory_order_release
+ Store->setOrdering(llvm::Release);
+ break;
+ case 5: // memory_order_seq_cst
+ Store->setOrdering(llvm::SequentiallyConsistent);
+ break;
+ }
+ return RValue::get(0);
+ }
+
+ llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
+
+ llvm::BasicBlock *BBs[3] = {
+ createBasicBlock("monotonic", CurFn),
+ createBasicBlock("release", CurFn),
+ createBasicBlock("seqcst", CurFn)
+ };
+ llvm::AtomicOrdering Orders[3] = {
+ llvm::Monotonic, llvm::Release, llvm::SequentiallyConsistent
+ };
+
+ Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
+ llvm::SwitchInst *SI = Builder.CreateSwitch(Order, BBs[0]);
+
+ for (unsigned i = 0; i < 3; ++i) {
+ Builder.SetInsertPoint(BBs[i]);
+ StoreInst *Store = Builder.CreateStore(NewVal, Ptr, Volatile);
+ Store->setAlignment(1);
+ Store->setOrdering(Orders[i]);
+ Builder.CreateBr(ContBB);
+ }
+
+ SI->addCase(Builder.getInt32(0), BBs[0]);
+ SI->addCase(Builder.getInt32(3), BBs[1]);
+ SI->addCase(Builder.getInt32(5), BBs[2]);
+
+ Builder.SetInsertPoint(ContBB);
+ return RValue::get(0);
+ }
+
case Builtin::BI__atomic_thread_fence:
case Builtin::BI__atomic_signal_fence:
case Builtin::BI__c11_atomic_thread_fence:
diff --git a/lib/CodeGen/CGExpr.cpp b/lib/CodeGen/CGExpr.cpp
index 0c33fb5371..c92cbb2010 100644
--- a/lib/CodeGen/CGExpr.cpp
+++ b/lib/CodeGen/CGExpr.cpp
@@ -2904,13 +2904,11 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
case AtomicExpr::AO__c11_atomic_fetch_add:
case AtomicExpr::AO__c11_atomic_fetch_sub:
- case AtomicExpr::AO__atomic_fetch_add:
- case AtomicExpr::AO__atomic_fetch_sub:
- case AtomicExpr::AO__atomic_add_fetch:
- case AtomicExpr::AO__atomic_sub_fetch:
if (MemTy->isPointerType()) {
// For pointer arithmetic, we're required to do a bit of math:
// adding 1 to an int* is not the same as adding 1 to a uintptr_t.
+ // ... but only for the C11 builtins. The GNU builtins expect the
+ // user to multiply by sizeof(T).
QualType Val1Ty = E->getVal1()->getType();
llvm::Value *Val1Scalar = EmitScalarExpr(E->getVal1());
CharUnits PointeeIncAmt =
@@ -2921,6 +2919,10 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
break;
}
// Fall through.
+ case AtomicExpr::AO__atomic_fetch_add:
+ case AtomicExpr::AO__atomic_fetch_sub:
+ case AtomicExpr::AO__atomic_add_fetch:
+ case AtomicExpr::AO__atomic_sub_fetch:
case AtomicExpr::AO__c11_atomic_store:
case AtomicExpr::AO__c11_atomic_exchange:
case AtomicExpr::AO__atomic_store_n: