aboutsummaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorChris Lattner <sabre@nondot.org>2009-05-08 06:58:22 +0000
committerChris Lattner <sabre@nondot.org>2009-05-08 06:58:22 +0000
commit5caa370ea6f70bd3e7e4a9cc3b69ac1a849c8534 (patch)
tree325c140baaa5ecb1abc5f7d3a8353040d468253e /lib
parentfb1e3310da7e3886c8057a5f009d2cdf30d8804f (diff)
reimplement __sync_* builtins to be variadic and to follow the same
semantic rules that gcc and icc use. This implements the variadic and concrete versions as builtins and has sema do the disambiguation. There are probably a bunch of details to finish up but this seems like a large monotonic step forward :) git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@71212 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib')
-rw-r--r--lib/CodeGen/CGBuiltin.cpp112
-rw-r--r--lib/Sema/Sema.h1
-rw-r--r--lib/Sema/SemaChecking.cpp184
3 files changed, 279 insertions, 18 deletions
diff --git a/lib/CodeGen/CGBuiltin.cpp b/lib/CodeGen/CGBuiltin.cpp
index d14bb53e6f..8e78c2fd7e 100644
--- a/lib/CodeGen/CGBuiltin.cpp
+++ b/lib/CodeGen/CGBuiltin.cpp
@@ -315,9 +315,52 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
return RValue::get(EmitScalarExpr(E->getArg(0)));
}
case Builtin::BI__sync_fetch_and_add:
- return EmitBinaryAtomic(*this, Intrinsic::atomic_load_add, E);
case Builtin::BI__sync_fetch_and_sub:
+ case Builtin::BI__sync_fetch_and_or:
+ case Builtin::BI__sync_fetch_and_and:
+ case Builtin::BI__sync_fetch_and_xor:
+ case Builtin::BI__sync_add_and_fetch:
+ case Builtin::BI__sync_sub_and_fetch:
+ case Builtin::BI__sync_and_and_fetch:
+ case Builtin::BI__sync_or_and_fetch:
+ case Builtin::BI__sync_xor_and_fetch:
+ case Builtin::BI__sync_val_compare_and_swap:
+ case Builtin::BI__sync_bool_compare_and_swap:
+ case Builtin::BI__sync_lock_test_and_set:
+ case Builtin::BI__sync_lock_release:
+ assert(0 && "Shouldn't make it through sema");
+ case Builtin::BI__sync_fetch_and_add_1:
+ case Builtin::BI__sync_fetch_and_add_2:
+ case Builtin::BI__sync_fetch_and_add_4:
+ case Builtin::BI__sync_fetch_and_add_8:
+ case Builtin::BI__sync_fetch_and_add_16:
+ return EmitBinaryAtomic(*this, Intrinsic::atomic_load_add, E);
+ case Builtin::BI__sync_fetch_and_sub_1:
+ case Builtin::BI__sync_fetch_and_sub_2:
+ case Builtin::BI__sync_fetch_and_sub_4:
+ case Builtin::BI__sync_fetch_and_sub_8:
+ case Builtin::BI__sync_fetch_and_sub_16:
return EmitBinaryAtomic(*this, Intrinsic::atomic_load_sub, E);
+ case Builtin::BI__sync_fetch_and_or_1:
+ case Builtin::BI__sync_fetch_and_or_2:
+ case Builtin::BI__sync_fetch_and_or_4:
+ case Builtin::BI__sync_fetch_and_or_8:
+ case Builtin::BI__sync_fetch_and_or_16:
+ return EmitBinaryAtomic(*this, Intrinsic::atomic_load_or, E);
+ case Builtin::BI__sync_fetch_and_and_1:
+ case Builtin::BI__sync_fetch_and_and_2:
+ case Builtin::BI__sync_fetch_and_and_4:
+ case Builtin::BI__sync_fetch_and_and_8:
+ case Builtin::BI__sync_fetch_and_and_16:
+ return EmitBinaryAtomic(*this, Intrinsic::atomic_load_and, E);
+ case Builtin::BI__sync_fetch_and_xor_1:
+ case Builtin::BI__sync_fetch_and_xor_2:
+ case Builtin::BI__sync_fetch_and_xor_4:
+ case Builtin::BI__sync_fetch_and_xor_8:
+ case Builtin::BI__sync_fetch_and_xor_16:
+ return EmitBinaryAtomic(*this, Intrinsic::atomic_load_xor, E);
+
+ // Clang extensions: not overloaded yet.
case Builtin::BI__sync_fetch_and_min:
return EmitBinaryAtomic(*this, Intrinsic::atomic_load_min, E);
case Builtin::BI__sync_fetch_and_max:
@@ -326,30 +369,49 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
return EmitBinaryAtomic(*this, Intrinsic::atomic_load_umin, E);
case Builtin::BI__sync_fetch_and_umax:
return EmitBinaryAtomic(*this, Intrinsic::atomic_load_umax, E);
- case Builtin::BI__sync_fetch_and_and:
- return EmitBinaryAtomic(*this, Intrinsic::atomic_load_and, E);
- case Builtin::BI__sync_fetch_and_or:
- return EmitBinaryAtomic(*this, Intrinsic::atomic_load_or, E);
- case Builtin::BI__sync_fetch_and_xor:
- return EmitBinaryAtomic(*this, Intrinsic::atomic_load_xor, E);
- case Builtin::BI__sync_add_and_fetch:
+ case Builtin::BI__sync_add_and_fetch_1:
+ case Builtin::BI__sync_add_and_fetch_2:
+ case Builtin::BI__sync_add_and_fetch_4:
+ case Builtin::BI__sync_add_and_fetch_8:
+ case Builtin::BI__sync_add_and_fetch_16:
return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_add, E,
llvm::Instruction::Add);
- case Builtin::BI__sync_sub_and_fetch:
+ case Builtin::BI__sync_sub_and_fetch_1:
+ case Builtin::BI__sync_sub_and_fetch_2:
+ case Builtin::BI__sync_sub_and_fetch_4:
+ case Builtin::BI__sync_sub_and_fetch_8:
+ case Builtin::BI__sync_sub_and_fetch_16:
return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_sub, E,
llvm::Instruction::Sub);
- case Builtin::BI__sync_and_and_fetch:
+ case Builtin::BI__sync_and_and_fetch_1:
+ case Builtin::BI__sync_and_and_fetch_2:
+ case Builtin::BI__sync_and_and_fetch_4:
+ case Builtin::BI__sync_and_and_fetch_8:
+ case Builtin::BI__sync_and_and_fetch_16:
return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_and, E,
llvm::Instruction::And);
- case Builtin::BI__sync_or_and_fetch:
+ case Builtin::BI__sync_or_and_fetch_1:
+ case Builtin::BI__sync_or_and_fetch_2:
+ case Builtin::BI__sync_or_and_fetch_4:
+ case Builtin::BI__sync_or_and_fetch_8:
+ case Builtin::BI__sync_or_and_fetch_16:
return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_or, E,
llvm::Instruction::Or);
- case Builtin::BI__sync_xor_and_fetch:
+ case Builtin::BI__sync_xor_and_fetch_1:
+ case Builtin::BI__sync_xor_and_fetch_2:
+ case Builtin::BI__sync_xor_and_fetch_4:
+ case Builtin::BI__sync_xor_and_fetch_8:
+ case Builtin::BI__sync_xor_and_fetch_16:
return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_xor, E,
llvm::Instruction::Xor);
- case Builtin::BI__sync_val_compare_and_swap: {
+ case Builtin::BI__sync_val_compare_and_swap_1:
+ case Builtin::BI__sync_val_compare_and_swap_2:
+ case Builtin::BI__sync_val_compare_and_swap_4:
+ case Builtin::BI__sync_val_compare_and_swap_8:
+ case Builtin::BI__sync_val_compare_and_swap_16:
+ {
const llvm::Type *ResType[2];
ResType[0]= ConvertType(E->getType());
ResType[1] = ConvertType(E->getArg(0)->getType());
@@ -360,10 +422,15 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
EmitScalarExpr(E->getArg(2))));
}
- case Builtin::BI__sync_bool_compare_and_swap: {
+ case Builtin::BI__sync_bool_compare_and_swap_1:
+ case Builtin::BI__sync_bool_compare_and_swap_2:
+ case Builtin::BI__sync_bool_compare_and_swap_4:
+ case Builtin::BI__sync_bool_compare_and_swap_8:
+ case Builtin::BI__sync_bool_compare_and_swap_16:
+ {
const llvm::Type *ResType[2];
- ResType[0]= ConvertType(E->getType());
- ResType[1] = ConvertType(E->getArg(0)->getType());
+ ResType[0]= ConvertType(E->getArg(1)->getType());
+ ResType[1] = llvm::PointerType::getUnqual(ResType[0]);
Value *AtomF = CGM.getIntrinsic(Intrinsic::atomic_cmp_swap, ResType, 2);
Value *OldVal = EmitScalarExpr(E->getArg(1));
Value *PrevVal = Builder.CreateCall3(AtomF,
@@ -375,9 +442,18 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
return RValue::get(Builder.CreateZExt(Result, ConvertType(E->getType())));
}
- case Builtin::BI__sync_lock_test_and_set:
+ case Builtin::BI__sync_lock_test_and_set_1:
+ case Builtin::BI__sync_lock_test_and_set_2:
+ case Builtin::BI__sync_lock_test_and_set_4:
+ case Builtin::BI__sync_lock_test_and_set_8:
+ case Builtin::BI__sync_lock_test_and_set_16:
return EmitBinaryAtomic(*this, Intrinsic::atomic_swap, E);
-
+ case Builtin::BI__sync_lock_release_1:
+ case Builtin::BI__sync_lock_release_2:
+ case Builtin::BI__sync_lock_release_4:
+ case Builtin::BI__sync_lock_release_8:
+ case Builtin::BI__sync_lock_release_16:
+ assert(0 && "FIXME: Implement");
// Library functions with special handling.
diff --git a/lib/Sema/Sema.h b/lib/Sema/Sema.h
index 2700600d40..389d554813 100644
--- a/lib/Sema/Sema.h
+++ b/lib/Sema/Sema.h
@@ -2590,6 +2590,7 @@ private:
bool SemaBuiltinPrefetch(CallExpr *TheCall);
bool SemaBuiltinObjectSize(CallExpr *TheCall);
bool SemaBuiltinLongjmp(CallExpr *TheCall);
+ bool SemaBuiltinAtomicOverloaded(CallExpr *TheCall);
bool SemaCheckStringLiteral(const Expr *E, const CallExpr *TheCall,
bool HasVAListArg, unsigned format_idx,
unsigned firstDataArg);
diff --git a/lib/Sema/SemaChecking.cpp b/lib/Sema/SemaChecking.cpp
index be1faace9c..e8b908ed3f 100644
--- a/lib/Sema/SemaChecking.cpp
+++ b/lib/Sema/SemaChecking.cpp
@@ -139,6 +139,23 @@ Sema::CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall) {
if (SemaBuiltinLongjmp(TheCall))
return ExprError();
return move(TheCallResult);
+ case Builtin::BI__sync_fetch_and_add:
+ case Builtin::BI__sync_fetch_and_sub:
+ case Builtin::BI__sync_fetch_and_or:
+ case Builtin::BI__sync_fetch_and_and:
+ case Builtin::BI__sync_fetch_and_xor:
+ case Builtin::BI__sync_add_and_fetch:
+ case Builtin::BI__sync_sub_and_fetch:
+ case Builtin::BI__sync_and_and_fetch:
+ case Builtin::BI__sync_or_and_fetch:
+ case Builtin::BI__sync_xor_and_fetch:
+ case Builtin::BI__sync_val_compare_and_swap:
+ case Builtin::BI__sync_bool_compare_and_swap:
+ case Builtin::BI__sync_lock_test_and_set:
+ case Builtin::BI__sync_lock_release:
+ if (SemaBuiltinAtomicOverloaded(TheCall))
+ return ExprError();
+ return move(TheCallResult);
}
// FIXME: This mechanism should be abstracted to be less fragile and
@@ -162,6 +179,173 @@ Sema::CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall) {
return move(TheCallResult);
}
+/// SemaBuiltinAtomicOverloaded - We have a call to a function like
+/// __sync_fetch_and_add, which is an overloaded function based on the pointer
+/// type of its first argument. The main ActOnCallExpr routines have already
+/// promoted the types of arguments because all of these calls are prototyped as
+/// void(...).
+///
+/// This function goes through and does final semantic checking for these
+/// builtins,
+bool Sema::SemaBuiltinAtomicOverloaded(CallExpr *TheCall) {
+ DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts());
+ FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl());
+
+ // Ensure that we have at least one argument to do type inference from.
+ if (TheCall->getNumArgs() < 1)
+ return Diag(TheCall->getLocEnd(), diag::err_typecheck_call_too_few_args)
+ << 0 << TheCall->getCallee()->getSourceRange();
+
+ // Inspect the first argument of the atomic builtin. This should always be
+ // a pointer type, whose element is an integral scalar or pointer type.
+ // Because it is a pointer type, we don't have to worry about any implicit
+ // casts here.
+ Expr *FirstArg = TheCall->getArg(0);
+ if (!FirstArg->getType()->isPointerType())
+ return Diag(DRE->getLocStart(), diag::err_atomic_builtin_must_be_pointer)
+ << FirstArg->getType() << FirstArg->getSourceRange();
+
+ QualType ValType = FirstArg->getType()->getAsPointerType()->getPointeeType();
+ if (!ValType->isIntegerType() && !ValType->isPointerType() &&
+ !ValType->isBlockPointerType())
+ return Diag(DRE->getLocStart(),
+ diag::err_atomic_builtin_must_be_pointer_intptr)
+ << FirstArg->getType() << FirstArg->getSourceRange();
+
+ // We need to figure out which concrete builtin this maps onto. For example,
+ // __sync_fetch_and_add with a 2 byte object turns into
+ // __sync_fetch_and_add_2.
+#define BUILTIN_ROW(x) \
+ { Builtin::BI##x##_1, Builtin::BI##x##_2, Builtin::BI##x##_4, \
+ Builtin::BI##x##_8, Builtin::BI##x##_16 }
+
+ static const unsigned BuiltinIndices[][5] = {
+ BUILTIN_ROW(__sync_fetch_and_add),
+ BUILTIN_ROW(__sync_fetch_and_sub),
+ BUILTIN_ROW(__sync_fetch_and_or),
+ BUILTIN_ROW(__sync_fetch_and_and),
+ BUILTIN_ROW(__sync_fetch_and_xor),
+
+ BUILTIN_ROW(__sync_add_and_fetch),
+ BUILTIN_ROW(__sync_sub_and_fetch),
+ BUILTIN_ROW(__sync_and_and_fetch),
+ BUILTIN_ROW(__sync_or_and_fetch),
+ BUILTIN_ROW(__sync_xor_and_fetch),
+
+ BUILTIN_ROW(__sync_val_compare_and_swap),
+ BUILTIN_ROW(__sync_bool_compare_and_swap),
+ BUILTIN_ROW(__sync_lock_test_and_set),
+ BUILTIN_ROW(__sync_lock_release)
+ };
+#undef BUILTIN_ROW
+
+ // Determine the index of the size.
+ unsigned SizeIndex;
+ switch (Context.getTypeSize(ValType)/8) {
+ case 1: SizeIndex = 0; break;
+ case 2: SizeIndex = 1; break;
+ case 4: SizeIndex = 2; break;
+ case 8: SizeIndex = 3; break;
+ case 16: SizeIndex = 4; break;
+ default:
+ return Diag(DRE->getLocStart(), diag::err_atomic_builtin_pointer_size)
+ << FirstArg->getType() << FirstArg->getSourceRange();
+ }
+
+ // Each of these builtins has one pointer argument, followed by some number of
+ // values (0, 1 or 2) followed by a potentially empty varags list of stuff
+ // that we ignore. Find out which row of BuiltinIndices to read from as well
+ // as the number of fixed args.
+ unsigned BuiltinID = FDecl->getBuiltinID(Context);
+ unsigned BuiltinIndex, NumFixed = 1;
+ switch (BuiltinID) {
+ default: assert(0 && "Unknown overloaded atomic builtin!");
+ case Builtin::BI__sync_fetch_and_add: BuiltinIndex = 0; break;
+ case Builtin::BI__sync_fetch_and_sub: BuiltinIndex = 1; break;
+ case Builtin::BI__sync_fetch_and_or: BuiltinIndex = 2; break;
+ case Builtin::BI__sync_fetch_and_and: BuiltinIndex = 3; break;
+ case Builtin::BI__sync_fetch_and_xor: BuiltinIndex = 4; break;
+
+ case Builtin::BI__sync_add_and_fetch: BuiltinIndex = 5; break;
+ case Builtin::BI__sync_sub_and_fetch: BuiltinIndex = 6; break;
+ case Builtin::BI__sync_and_and_fetch: BuiltinIndex = 7; break;
+ case Builtin::BI__sync_or_and_fetch: BuiltinIndex = 8; break;
+ case Builtin::BI__sync_xor_and_fetch: BuiltinIndex = 9; break;
+
+ case Builtin::BI__sync_val_compare_and_swap:
+ BuiltinIndex = 10;
+ NumFixed = 2;
+ break;
+ case Builtin::BI__sync_bool_compare_and_swap:
+ BuiltinIndex = 11;
+ NumFixed = 2;
+ break;
+ case Builtin::BI__sync_lock_test_and_set: BuiltinIndex = 12; break;
+ case Builtin::BI__sync_lock_release:
+ BuiltinIndex = 13;
+ NumFixed = 0;
+ break;
+ }
+
+ // Now that we know how many fixed arguments we expect, first check that we
+ // have at least that many.
+ if (TheCall->getNumArgs() < 1+NumFixed)
+ return Diag(TheCall->getLocEnd(), diag::err_typecheck_call_too_few_args)
+ << 0 << TheCall->getCallee()->getSourceRange();
+
+ // Next, walk the valid ones promoting to the right type.
+ for (unsigned i = 0; i != NumFixed; ++i) {
+ Expr *Arg = TheCall->getArg(i+1);
+
+ // If the argument is an implicit cast, then there was a promotion due to
+ // "...", just remove it now.
+ if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Arg)) {
+ Arg = ICE->getSubExpr();
+ ICE->setSubExpr(0);
+ ICE->Destroy(Context);
+ TheCall->setArg(i+1, Arg);
+ }
+
+ // GCC does an implicit conversion to the pointer or integer ValType. This
+ // can fail in some cases (1i -> int**), check for this error case now.
+ if (CheckCastTypes(Arg->getSourceRange(), ValType, Arg))
+ return true;
+
+ // Okay, we have something that *can* be converted to the right type. Check
+ // to see if there is a potentially weird extension going on here. This can
+ // happen when you do an atomic operation on something like an char* and
+ // pass in 42. The 42 gets converted to char. This is even more strange
+ // for things like 45.123 -> char, etc.
+ // FIXME: Do this check.
+ ImpCastExprToType(Arg, ValType, false);
+ TheCall->setArg(i+1, Arg);
+ }
+
+ // Okay, if we get here, everything is good. Get the decl for the concrete
+ // builtin.
+ unsigned NewBuiltinID = BuiltinIndices[BuiltinIndex][SizeIndex];
+ const char *NewBuiltinName = Context.BuiltinInfo.GetName(NewBuiltinID);
+ IdentifierInfo *NewBuiltinII = PP.getIdentifierInfo(NewBuiltinName);
+ FunctionDecl *NewBuiltinDecl =
+ cast<FunctionDecl>(LazilyCreateBuiltin(NewBuiltinII, NewBuiltinID,
+ TUScope, false, DRE->getLocStart()));
+ // Switch the DeclRefExpr to refer to the new decl.
+ DRE->setDecl(NewBuiltinDecl);
+ DRE->setType(NewBuiltinDecl->getType());
+
+ // Set the callee in the CallExpr.
+ // FIXME: This leaks the original parens and implicit casts.
+ Expr *PromotedCall = DRE;
+ UsualUnaryConversions(PromotedCall);
+ TheCall->setCallee(PromotedCall);
+
+
+ // Change the result type of the call to match the result type of the decl.
+ TheCall->setType(NewBuiltinDecl->getResultType());
+ return false;
+}
+
+
/// CheckObjCString - Checks that the argument to the builtin
/// CFString constructor is correct
/// FIXME: GCC currently emits the following warning: