aboutsummaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/AST/ExprConstant.cpp52
-rw-r--r--lib/CodeGen/CGBuiltin.cpp173
-rw-r--r--lib/CodeGen/CGExpr.cpp10
-rw-r--r--lib/Frontend/InitPreprocessor.cpp40
4 files changed, 245 insertions, 30 deletions
diff --git a/lib/AST/ExprConstant.cpp b/lib/AST/ExprConstant.cpp
index ce41308344..01c9fe7cd8 100644
--- a/lib/AST/ExprConstant.cpp
+++ b/lib/AST/ExprConstant.cpp
@@ -4306,7 +4306,7 @@ bool IntExprEvaluator::TryEvaluateBuiltinObjectSize(const CallExpr *E) {
}
bool IntExprEvaluator::VisitCallExpr(const CallExpr *E) {
- switch (E->isBuiltinCall()) {
+ switch (unsigned BuiltinOp = E->isBuiltinCall()) {
default:
return ExprEvaluatorBaseTy::VisitCallExpr(E);
@@ -4365,6 +4365,7 @@ bool IntExprEvaluator::VisitCallExpr(const CallExpr *E) {
return Error(E);
+ case Builtin::BI__atomic_always_lock_free:
case Builtin::BI__atomic_is_lock_free:
case Builtin::BI__c11_atomic_is_lock_free: {
APSInt SizeVal;
@@ -4382,32 +4383,31 @@ bool IntExprEvaluator::VisitCallExpr(const CallExpr *E) {
// Check power-of-two.
CharUnits Size = CharUnits::fromQuantity(SizeVal.getZExtValue());
- if (!Size.isPowerOfTwo())
-#if 0
- // FIXME: Suppress this folding until the ABI for the promotion width
- // settles.
- return Success(0, E);
-#else
- return Error(E);
-#endif
-
-#if 0
- // Check against promotion width.
- // FIXME: Suppress this folding until the ABI for the promotion width
- // settles.
- unsigned PromoteWidthBits =
- Info.Ctx.getTargetInfo().getMaxAtomicPromoteWidth();
- if (Size > Info.Ctx.toCharUnitsFromBits(PromoteWidthBits))
- return Success(0, E);
-#endif
-
- // Check against inlining width.
- unsigned InlineWidthBits =
- Info.Ctx.getTargetInfo().getMaxAtomicInlineWidth();
- if (Size <= Info.Ctx.toCharUnitsFromBits(InlineWidthBits))
- return Success(1, E);
+ if (Size.isPowerOfTwo()) {
+ // Check against inlining width.
+ unsigned InlineWidthBits =
+ Info.Ctx.getTargetInfo().getMaxAtomicInlineWidth();
+ if (Size <= Info.Ctx.toCharUnitsFromBits(InlineWidthBits)) {
+ if (BuiltinOp == Builtin::BI__c11_atomic_is_lock_free ||
+ Size == CharUnits::One() ||
+ E->getArg(1)->isNullPointerConstant(Info.Ctx,
+ Expr::NPC_NeverValueDependent))
+ // OK, we will inline appropriately-aligned operations of this size,
+ // and _Atomic(T) is appropriately-aligned.
+ return Success(1, E);
+
+ QualType PointeeType = E->getArg(1)->IgnoreImpCasts()->getType()->
+ castAs<PointerType>()->getPointeeType();
+ if (!PointeeType->isIncompleteType() &&
+ Info.Ctx.getTypeAlignInChars(PointeeType) >= Size) {
+ // OK, we will inline operations on this object.
+ return Success(1, E);
+ }
+ }
+ }
- return Error(E);
+ return BuiltinOp == Builtin::BI__atomic_always_lock_free ?
+ Success(0, E) : Error(E);
}
}
}
diff --git a/lib/CodeGen/CGBuiltin.cpp b/lib/CodeGen/CGBuiltin.cpp
index 5eab5db94a..e30b5136ba 100644
--- a/lib/CodeGen/CGBuiltin.cpp
+++ b/lib/CodeGen/CGBuiltin.cpp
@@ -966,6 +966,179 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
return RValue::get(0);
}
+ case Builtin::BI__c11_atomic_is_lock_free:
+ case Builtin::BI__atomic_is_lock_free: {
+ // Call "bool __atomic_is_lock_free(size_t size, void *ptr)". For the
+ // __c11 builtin, ptr is 0 (indicating a properly-aligned object), since
+ // _Atomic(T) is always properly-aligned.
+ const char *LibCallName = "__atomic_is_lock_free";
+ CallArgList Args;
+ Args.add(RValue::get(EmitScalarExpr(E->getArg(0))),
+ getContext().getSizeType());
+ if (BuiltinID == Builtin::BI__atomic_is_lock_free)
+ Args.add(RValue::get(EmitScalarExpr(E->getArg(1))),
+ getContext().VoidPtrTy);
+ else
+ Args.add(RValue::get(llvm::Constant::getNullValue(VoidPtrTy)),
+ getContext().VoidPtrTy);
+ const CGFunctionInfo &FuncInfo =
+ CGM.getTypes().arrangeFunctionCall(E->getType(), Args,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All);
+ llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FuncInfo);
+ llvm::Constant *Func = CGM.CreateRuntimeFunction(FTy, LibCallName);
+ return EmitCall(FuncInfo, Func, ReturnValueSlot(), Args);
+ }
+
+ case Builtin::BI__atomic_test_and_set: {
+ // Look at the argument type to determine whether this is a volatile
+ // operation. The parameter type is always volatile.
+ QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType();
+ bool Volatile =
+ PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified();
+
+ Value *Ptr = EmitScalarExpr(E->getArg(0));
+ unsigned AddrSpace =
+ cast<llvm::PointerType>(Ptr->getType())->getAddressSpace();
+ Ptr = Builder.CreateBitCast(Ptr, Int8Ty->getPointerTo(AddrSpace));
+ Value *NewVal = Builder.getInt8(1);
+ Value *Order = EmitScalarExpr(E->getArg(1));
+ if (isa<llvm::ConstantInt>(Order)) {
+ int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
+ AtomicRMWInst *Result = 0;
+ switch (ord) {
+ case 0: // memory_order_relaxed
+ default: // invalid order
+ Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
+ Ptr, NewVal,
+ llvm::Monotonic);
+ break;
+ case 1: // memory_order_consume
+ case 2: // memory_order_acquire
+ Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
+ Ptr, NewVal,
+ llvm::Acquire);
+ break;
+ case 3: // memory_order_release
+ Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
+ Ptr, NewVal,
+ llvm::Release);
+ break;
+ case 4: // memory_order_acq_rel
+ Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
+ Ptr, NewVal,
+ llvm::AcquireRelease);
+ break;
+ case 5: // memory_order_seq_cst
+ Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
+ Ptr, NewVal,
+ llvm::SequentiallyConsistent);
+ break;
+ }
+ Result->setVolatile(Volatile);
+ return RValue::get(Builder.CreateIsNotNull(Result, "tobool"));
+ }
+
+ llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
+
+ llvm::BasicBlock *BBs[5] = {
+ createBasicBlock("monotonic", CurFn),
+ createBasicBlock("acquire", CurFn),
+ createBasicBlock("release", CurFn),
+ createBasicBlock("acqrel", CurFn),
+ createBasicBlock("seqcst", CurFn)
+ };
+ llvm::AtomicOrdering Orders[5] = {
+ llvm::Monotonic, llvm::Acquire, llvm::Release,
+ llvm::AcquireRelease, llvm::SequentiallyConsistent
+ };
+
+ Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
+ llvm::SwitchInst *SI = Builder.CreateSwitch(Order, BBs[0]);
+
+ Builder.SetInsertPoint(ContBB);
+ PHINode *Result = Builder.CreatePHI(Int8Ty, 5, "was_set");
+
+ for (unsigned i = 0; i < 5; ++i) {
+ Builder.SetInsertPoint(BBs[i]);
+ AtomicRMWInst *RMW = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
+ Ptr, NewVal, Orders[i]);
+ RMW->setVolatile(Volatile);
+ Result->addIncoming(RMW, BBs[i]);
+ Builder.CreateBr(ContBB);
+ }
+
+ SI->addCase(Builder.getInt32(0), BBs[0]);
+ SI->addCase(Builder.getInt32(1), BBs[1]);
+ SI->addCase(Builder.getInt32(2), BBs[1]);
+ SI->addCase(Builder.getInt32(3), BBs[2]);
+ SI->addCase(Builder.getInt32(4), BBs[3]);
+ SI->addCase(Builder.getInt32(5), BBs[4]);
+
+ Builder.SetInsertPoint(ContBB);
+ return RValue::get(Builder.CreateIsNotNull(Result, "tobool"));
+ }
+
+ case Builtin::BI__atomic_clear: {
+ QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType();
+ bool Volatile =
+ PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified();
+
+ Value *Ptr = EmitScalarExpr(E->getArg(0));
+ unsigned AddrSpace =
+ cast<llvm::PointerType>(Ptr->getType())->getAddressSpace();
+ Ptr = Builder.CreateBitCast(Ptr, Int8Ty->getPointerTo(AddrSpace));
+ Value *NewVal = Builder.getInt8(0);
+ Value *Order = EmitScalarExpr(E->getArg(1));
+ if (isa<llvm::ConstantInt>(Order)) {
+ int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
+ StoreInst *Store = Builder.CreateStore(NewVal, Ptr, Volatile);
+ Store->setAlignment(1);
+ switch (ord) {
+ case 0: // memory_order_relaxed
+ default: // invalid order
+ Store->setOrdering(llvm::Monotonic);
+ break;
+ case 3: // memory_order_release
+ Store->setOrdering(llvm::Release);
+ break;
+ case 5: // memory_order_seq_cst
+ Store->setOrdering(llvm::SequentiallyConsistent);
+ break;
+ }
+ return RValue::get(0);
+ }
+
+ llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
+
+ llvm::BasicBlock *BBs[3] = {
+ createBasicBlock("monotonic", CurFn),
+ createBasicBlock("release", CurFn),
+ createBasicBlock("seqcst", CurFn)
+ };
+ llvm::AtomicOrdering Orders[3] = {
+ llvm::Monotonic, llvm::Release, llvm::SequentiallyConsistent
+ };
+
+ Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
+ llvm::SwitchInst *SI = Builder.CreateSwitch(Order, BBs[0]);
+
+ for (unsigned i = 0; i < 3; ++i) {
+ Builder.SetInsertPoint(BBs[i]);
+ StoreInst *Store = Builder.CreateStore(NewVal, Ptr, Volatile);
+ Store->setAlignment(1);
+ Store->setOrdering(Orders[i]);
+ Builder.CreateBr(ContBB);
+ }
+
+ SI->addCase(Builder.getInt32(0), BBs[0]);
+ SI->addCase(Builder.getInt32(3), BBs[1]);
+ SI->addCase(Builder.getInt32(5), BBs[2]);
+
+ Builder.SetInsertPoint(ContBB);
+ return RValue::get(0);
+ }
+
case Builtin::BI__atomic_thread_fence:
case Builtin::BI__atomic_signal_fence:
case Builtin::BI__c11_atomic_thread_fence:
diff --git a/lib/CodeGen/CGExpr.cpp b/lib/CodeGen/CGExpr.cpp
index 0c33fb5371..c92cbb2010 100644
--- a/lib/CodeGen/CGExpr.cpp
+++ b/lib/CodeGen/CGExpr.cpp
@@ -2904,13 +2904,11 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
case AtomicExpr::AO__c11_atomic_fetch_add:
case AtomicExpr::AO__c11_atomic_fetch_sub:
- case AtomicExpr::AO__atomic_fetch_add:
- case AtomicExpr::AO__atomic_fetch_sub:
- case AtomicExpr::AO__atomic_add_fetch:
- case AtomicExpr::AO__atomic_sub_fetch:
if (MemTy->isPointerType()) {
// For pointer arithmetic, we're required to do a bit of math:
// adding 1 to an int* is not the same as adding 1 to a uintptr_t.
+ // ... but only for the C11 builtins. The GNU builtins expect the
+ // user to multiply by sizeof(T).
QualType Val1Ty = E->getVal1()->getType();
llvm::Value *Val1Scalar = EmitScalarExpr(E->getVal1());
CharUnits PointeeIncAmt =
@@ -2921,6 +2919,10 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
break;
}
// Fall through.
+ case AtomicExpr::AO__atomic_fetch_add:
+ case AtomicExpr::AO__atomic_fetch_sub:
+ case AtomicExpr::AO__atomic_add_fetch:
+ case AtomicExpr::AO__atomic_sub_fetch:
case AtomicExpr::AO__c11_atomic_store:
case AtomicExpr::AO__c11_atomic_exchange:
case AtomicExpr::AO__atomic_store_n:
diff --git a/lib/Frontend/InitPreprocessor.cpp b/lib/Frontend/InitPreprocessor.cpp
index c1b9d57f09..93d49b0563 100644
--- a/lib/Frontend/InitPreprocessor.cpp
+++ b/lib/Frontend/InitPreprocessor.cpp
@@ -202,6 +202,20 @@ static void DefineExactWidthIntType(TargetInfo::IntType Ty,
ConstSuffix);
}
+/// Get the value the ATOMIC_*_LOCK_FREE macro should have for a type with
+/// the specified properties.
+static const char *getLockFreeValue(unsigned TypeWidth, unsigned TypeAlign,
+ unsigned InlineWidth) {
+ // Fully-aligned, power-of-2 sizes no larger than the inline
+ // width will be inlined as lock-free operations.
+ if (TypeWidth == TypeAlign && (TypeWidth & (TypeWidth - 1)) == 0 &&
+ TypeWidth <= InlineWidth)
+ return "2"; // "always lock free"
+ // We cannot be certain what operations the lib calls might be
+ // able to implement as lock-free on future processors.
+ return "1"; // "sometimes lock free"
+}
+
/// \brief Add definitions required for a smooth interaction between
/// Objective-C++ automated reference counting and libstdc++ (4.2).
static void AddObjCXXARCLibstdcxxDefines(const LangOptions &LangOpts,
@@ -521,6 +535,32 @@ static void InitializePredefinedMacros(const TargetInfo &TI,
else
Builder.defineMacro("__GNUC_STDC_INLINE__");
+ // The value written by __atomic_test_and_set.
+ // FIXME: This is target-dependent.
+ Builder.defineMacro("__GCC_ATOMIC_TEST_AND_SET_TRUEVAL", "1");
+
+ // Used by libstdc++ to implement ATOMIC_<foo>_LOCK_FREE.
+ unsigned InlineWidthBits = TI.getMaxAtomicInlineWidth();
+#define DEFINE_LOCK_FREE_MACRO(TYPE, Type) \
+ Builder.defineMacro("__GCC_ATOMIC_" #TYPE "_LOCK_FREE", \
+ getLockFreeValue(TI.get##Type##Width(), \
+ TI.get##Type##Align(), \
+ InlineWidthBits));
+ DEFINE_LOCK_FREE_MACRO(BOOL, Bool);
+ DEFINE_LOCK_FREE_MACRO(CHAR, Char);
+ DEFINE_LOCK_FREE_MACRO(CHAR16_T, Char16);
+ DEFINE_LOCK_FREE_MACRO(CHAR32_T, Char32);
+ DEFINE_LOCK_FREE_MACRO(WCHAR_T, WChar);
+ DEFINE_LOCK_FREE_MACRO(SHORT, Short);
+ DEFINE_LOCK_FREE_MACRO(INT, Int);
+ DEFINE_LOCK_FREE_MACRO(LONG, Long);
+ DEFINE_LOCK_FREE_MACRO(LLONG, LongLong);
+ Builder.defineMacro("__GCC_ATOMIC_POINTER_LOCK_FREE",
+ getLockFreeValue(TI.getPointerWidth(0),
+ TI.getPointerAlign(0),
+ InlineWidthBits));
+#undef DEFINE_LOCK_FREE_MACRO
+
if (LangOpts.NoInlineDefine)
Builder.defineMacro("__NO_INLINE__");