aboutsummaryrefslogtreecommitdiff
path: root/lib/CodeGen
diff options
context:
space:
mode:
Diffstat (limited to 'lib/CodeGen')
-rw-r--r--lib/CodeGen/ABIInfo.h2
-rw-r--r--lib/CodeGen/BackendUtil.cpp23
-rw-r--r--lib/CodeGen/CGAtomic.cpp11
-rw-r--r--lib/CodeGen/CGBlocks.cpp93
-rw-r--r--lib/CodeGen/CGBuiltin.cpp45
-rw-r--r--lib/CodeGen/CGCXXABI.cpp33
-rw-r--r--lib/CodeGen/CGCXXABI.h57
-rw-r--r--lib/CodeGen/CGCall.cpp214
-rw-r--r--lib/CodeGen/CGCall.h19
-rw-r--r--lib/CodeGen/CGClass.cpp28
-rw-r--r--lib/CodeGen/CGCleanup.cpp12
-rw-r--r--lib/CodeGen/CGDebugInfo.cpp184
-rw-r--r--lib/CodeGen/CGDebugInfo.h18
-rw-r--r--lib/CodeGen/CGDecl.cpp261
-rw-r--r--lib/CodeGen/CGDeclCXX.cpp89
-rw-r--r--lib/CodeGen/CGException.cpp12
-rw-r--r--lib/CodeGen/CGExpr.cpp374
-rw-r--r--lib/CodeGen/CGExprAgg.cpp11
-rw-r--r--lib/CodeGen/CGExprCXX.cpp2
-rw-r--r--lib/CodeGen/CGExprComplex.cpp4
-rw-r--r--lib/CodeGen/CGExprConstant.cpp9
-rw-r--r--lib/CodeGen/CGExprScalar.cpp105
-rw-r--r--lib/CodeGen/CGObjC.cpp90
-rw-r--r--lib/CodeGen/CGObjCMac.cpp27
-rw-r--r--lib/CodeGen/CGObjCRuntime.cpp2
-rw-r--r--lib/CodeGen/CGRTTI.cpp6
-rw-r--r--lib/CodeGen/CGRecordLayoutBuilder.cpp8
-rw-r--r--lib/CodeGen/CGStmt.cpp86
-rw-r--r--lib/CodeGen/CGValue.h31
-rw-r--r--lib/CodeGen/CMakeLists.txt1
-rw-r--r--lib/CodeGen/CodeGenAction.cpp2
-rw-r--r--lib/CodeGen/CodeGenFunction.cpp171
-rw-r--r--lib/CodeGen/CodeGenFunction.h153
-rw-r--r--lib/CodeGen/CodeGenModule.cpp176
-rw-r--r--lib/CodeGen/CodeGenModule.h117
-rw-r--r--lib/CodeGen/CodeGenTBAA.cpp137
-rw-r--r--lib/CodeGen/CodeGenTBAA.h65
-rw-r--r--lib/CodeGen/CodeGenTypes.cpp14
-rw-r--r--lib/CodeGen/CodeGenTypes.h13
-rw-r--r--lib/CodeGen/ItaniumCXXABI.cpp220
-rw-r--r--lib/CodeGen/MicrosoftCXXABI.cpp505
-rw-r--r--lib/CodeGen/ModuleBuilder.cpp14
-rw-r--r--lib/CodeGen/TargetInfo.cpp624
43 files changed, 3087 insertions, 981 deletions
diff --git a/lib/CodeGen/ABIInfo.h b/lib/CodeGen/ABIInfo.h
index 35780f1556..df6dc7216a 100644
--- a/lib/CodeGen/ABIInfo.h
+++ b/lib/CodeGen/ABIInfo.h
@@ -22,6 +22,7 @@ namespace llvm {
namespace clang {
class ASTContext;
+ class TargetInfo;
namespace CodeGen {
class CGFunctionInfo;
@@ -196,6 +197,7 @@ namespace clang {
ASTContext &getContext() const;
llvm::LLVMContext &getVMContext() const;
const llvm::DataLayout &getDataLayout() const;
+ const TargetInfo &getTarget() const;
/// Return the calling convention to use for system runtime
/// functions.
diff --git a/lib/CodeGen/BackendUtil.cpp b/lib/CodeGen/BackendUtil.cpp
index ab65801ce5..45079c0989 100644
--- a/lib/CodeGen/BackendUtil.cpp
+++ b/lib/CodeGen/BackendUtil.cpp
@@ -302,14 +302,19 @@ void EmitAssemblyHelper::CreatePasses(TargetMachine *TM) {
// Set up the per-module pass manager.
PassManager *MPM = getPerModulePasses(TM);
- if (CodeGenOpts.EmitGcovArcs || CodeGenOpts.EmitGcovNotes) {
- MPM->add(createGCOVProfilerPass(CodeGenOpts.EmitGcovNotes,
- CodeGenOpts.EmitGcovArcs,
- CodeGenOpts.CoverageVersion,
- CodeGenOpts.CoverageExtraChecksum,
- CodeGenOpts.DisableRedZone,
- CodeGenOpts.CoverageFunctionNamesInData));
-
+ if (!CodeGenOpts.DisableGCov &&
+ (CodeGenOpts.EmitGcovArcs || CodeGenOpts.EmitGcovNotes)) {
+ // Not using 'GCOVOptions::getDefault' allows us to avoid exiting if
+ // LLVM's -default-gcov-version flag is set to something invalid.
+ GCOVOptions Options;
+ Options.EmitNotes = CodeGenOpts.EmitGcovNotes;
+ Options.EmitData = CodeGenOpts.EmitGcovArcs;
+ memcpy(Options.Version, CodeGenOpts.CoverageVersion, 4);
+ Options.UseCfgChecksum = CodeGenOpts.CoverageExtraChecksum;
+ Options.NoRedZone = CodeGenOpts.DisableRedZone;
+ Options.FunctionNamesInData =
+ !CodeGenOpts.CoverageNoFunctionNamesInData;
+ MPM->add(createGCOVProfilerPass(Options));
if (CodeGenOpts.getDebugInfo() == CodeGenOptions::NoDebugInfo)
MPM->add(createStripSymbolsPass(true));
}
@@ -452,6 +457,7 @@ TargetMachine *EmitAssemblyHelper::CreateTargetMachine(bool MustCreateTM) {
Options.TrapFuncName = CodeGenOpts.TrapFuncName;
Options.PositionIndependentExecutable = LangOpts.PIELevel != 0;
Options.SSPBufferSize = CodeGenOpts.SSPBufferSize;
+ Options.EnableSegmentedStacks = CodeGenOpts.EnableSegmentedStacks;
TargetMachine *TM = TheTarget->createTargetMachine(Triple, TargetOpts.CPU,
FeaturesStr, Options,
@@ -522,6 +528,7 @@ void EmitAssemblyHelper::EmitAssembly(BackendAction Action, raw_ostream *OS) {
Action != Backend_EmitBC &&
Action != Backend_EmitLL);
TargetMachine *TM = CreateTargetMachine(UsesCodeGen);
+ if (UsesCodeGen && !TM) return;
CreatePasses(TM);
switch (Action) {
diff --git a/lib/CodeGen/CGAtomic.cpp b/lib/CodeGen/CGAtomic.cpp
index 817d5c4cc6..0b48a5c664 100644
--- a/lib/CodeGen/CGAtomic.cpp
+++ b/lib/CodeGen/CGAtomic.cpp
@@ -327,7 +327,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
CharUnits alignChars = getContext().getTypeAlignInChars(AtomicTy);
unsigned Align = alignChars.getQuantity();
unsigned MaxInlineWidthInBits =
- getContext().getTargetInfo().getMaxAtomicInlineWidth();
+ getTarget().getMaxAtomicInlineWidth();
bool UseLibcall = (Size != Align ||
getContext().toBits(sizeChars) > MaxInlineWidthInBits);
@@ -483,15 +483,6 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
Args.add(RValue::get(EmitCastToVoidPtr(Dest)),
getContext().VoidPtrTy);
break;
-#if 0
- // These are only defined for 1-16 byte integers. It is not clear what
- // their semantics would be on anything else...
- case AtomicExpr::Add: LibCallName = "__atomic_fetch_add_generic"; break;
- case AtomicExpr::Sub: LibCallName = "__atomic_fetch_sub_generic"; break;
- case AtomicExpr::And: LibCallName = "__atomic_fetch_and_generic"; break;
- case AtomicExpr::Or: LibCallName = "__atomic_fetch_or_generic"; break;
- case AtomicExpr::Xor: LibCallName = "__atomic_fetch_xor_generic"; break;
-#endif
default: return EmitUnsupportedRValue(E, "atomic library call");
}
// order is always the last parameter
diff --git a/lib/CodeGen/CGBlocks.cpp b/lib/CodeGen/CGBlocks.cpp
index b9f466117c..ded019e64a 100644
--- a/lib/CodeGen/CGBlocks.cpp
+++ b/lib/CodeGen/CGBlocks.cpp
@@ -695,8 +695,8 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo) {
bool isLambdaConv = blockInfo.getBlockDecl()->isConversionFromLambda();
llvm::Constant *blockFn
= CodeGenFunction(CGM, true).GenerateBlockFunction(CurGD, blockInfo,
- CurFuncDecl, LocalDeclMap,
- isLambdaConv);
+ LocalDeclMap,
+ isLambdaConv);
blockFn = llvm::ConstantExpr::getBitCast(blockFn, VoidPtrTy);
// If there is nothing to capture, we can emit this as a global block.
@@ -753,6 +753,7 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo) {
if (capture.isConstant()) continue;
QualType type = variable->getType();
+ CharUnits align = getContext().getDeclAlign(variable);
// This will be a [[type]]*, except that a byref entry will just be
// an i8**.
@@ -796,21 +797,21 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo) {
if (ci->isByRef()) {
// Get a void* that points to the byref struct.
if (ci->isNested())
- src = Builder.CreateLoad(src, "byref.capture");
+ src = Builder.CreateAlignedLoad(src, align.getQuantity(),
+ "byref.capture");
else
src = Builder.CreateBitCast(src, VoidPtrTy);
// Write that void* into the capture field.
- Builder.CreateStore(src, blockField);
+ Builder.CreateAlignedStore(src, blockField, align.getQuantity());
// If we have a copy constructor, evaluate that into the block field.
} else if (const Expr *copyExpr = ci->getCopyExpr()) {
if (blockDecl->isConversionFromLambda()) {
// If we have a lambda conversion, emit the expression
// directly into the block instead.
- CharUnits Align = getContext().getTypeAlignInChars(type);
AggValueSlot Slot =
- AggValueSlot::forAddr(blockField, Align, Qualifiers(),
+ AggValueSlot::forAddr(blockField, align, Qualifiers(),
AggValueSlot::IsDestructed,
AggValueSlot::DoesNotNeedGCBarriers,
AggValueSlot::IsNotAliased);
@@ -821,7 +822,27 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo) {
// If it's a reference variable, copy the reference into the block field.
} else if (type->isReferenceType()) {
- Builder.CreateStore(Builder.CreateLoad(src, "ref.val"), blockField);
+ llvm::Value *ref =
+ Builder.CreateAlignedLoad(src, align.getQuantity(), "ref.val");
+ Builder.CreateAlignedStore(ref, blockField, align.getQuantity());
+
+ // If this is an ARC __strong block-pointer variable, don't do a
+ // block copy.
+ //
+ // TODO: this can be generalized into the normal initialization logic:
+ // we should never need to do a block-copy when initializing a local
+ // variable, because the local variable's lifetime should be strictly
+ // contained within the stack block's.
+ } else if (type.getObjCLifetime() == Qualifiers::OCL_Strong &&
+ type->isBlockPointerType()) {
+ // Load the block and do a simple retain.
+ LValue srcLV = MakeAddrLValue(src, type, align);
+ llvm::Value *value = EmitLoadOfScalar(srcLV);
+ value = EmitARCRetainNonBlock(value);
+
+ // Do a primitive store to the block field.
+ LValue destLV = MakeAddrLValue(blockField, type, align);
+ EmitStoreOfScalar(value, destLV, /*init*/ true);
// Otherwise, fake up a POD copy into the block field.
} else {
@@ -839,8 +860,7 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo) {
ImplicitCastExpr l2r(ImplicitCastExpr::OnStack, type, CK_LValueToRValue,
&declRef, VK_RValue);
EmitExprAsInit(&l2r, &blockFieldPseudoVar,
- MakeAddrLValue(blockField, type,
- getContext().getDeclAlign(variable)),
+ MakeAddrLValue(blockField, type, align),
/*captured by init*/ false);
}
@@ -1014,7 +1034,7 @@ CodeGenModule::GetAddrOfGlobalBlock(const BlockExpr *blockExpr,
llvm::DenseMap<const Decl*, llvm::Value*> LocalDeclMap;
blockFn = CodeGenFunction(*this).GenerateBlockFunction(GlobalDecl(),
blockInfo,
- 0, LocalDeclMap,
+ LocalDeclMap,
false);
}
blockFn = llvm::ConstantExpr::getBitCast(blockFn, VoidPtrTy);
@@ -1068,7 +1088,6 @@ static llvm::Constant *buildGlobalBlock(CodeGenModule &CGM,
llvm::Function *
CodeGenFunction::GenerateBlockFunction(GlobalDecl GD,
const CGBlockInfo &blockInfo,
- const Decl *outerFnDecl,
const DeclMapTy &ldm,
bool IsLambdaConversionToBlock) {
const BlockDecl *blockDecl = blockInfo.getBlockDecl();
@@ -1128,7 +1147,6 @@ CodeGenFunction::GenerateBlockFunction(GlobalDecl GD,
// Begin generating the function.
StartFunction(blockDecl, fnType->getResultType(), fn, fnInfo, args,
blockInfo.getBlockExpr()->getBody()->getLocStart());
- CurFuncDecl = outerFnDecl; // StartFunction sets this to blockDecl
// Okay. Undo some of what StartFunction did.
@@ -1138,6 +1156,22 @@ CodeGenFunction::GenerateBlockFunction(GlobalDecl GD,
BlockPointer = Builder.CreateBitCast(blockAddr,
blockInfo.StructureType->getPointerTo(),
"block");
+ // At -O0 we generate an explicit alloca for the BlockPointer, so the RA
+ // won't delete the dbg.declare intrinsics for captured variables.
+ llvm::Value *BlockPointerDbgLoc = BlockPointer;
+ if (CGM.getCodeGenOpts().OptimizationLevel == 0) {
+ // Allocate a stack slot for it, so we can point the debugger to it
+ llvm::AllocaInst *Alloca = CreateTempAlloca(BlockPointer->getType(),
+ "block.addr");
+ unsigned Align = getContext().getDeclAlign(&selfDecl).getQuantity();
+ Alloca->setAlignment(Align);
+ // Set the DebugLocation to empty, so the store is recognized as a
+ // frame setup instruction by llvm::DwarfDebug::beginFunction().
+ Builder.DisableDebugLocations();
+ Builder.CreateAlignedStore(BlockPointer, Alloca, Align);
+ Builder.EnableDebugLocations();
+ BlockPointerDbgLoc = Alloca;
+ }
// If we have a C++ 'this' reference, go ahead and force it into
// existence now.
@@ -1148,22 +1182,6 @@ CodeGenFunction::GenerateBlockFunction(GlobalDecl GD,
CXXThisValue = Builder.CreateLoad(addr, "this");
}
- // LoadObjCSelf() expects there to be an entry for 'self' in LocalDeclMap;
- // appease it.
- if (const ObjCMethodDecl *method
- = dyn_cast_or_null<ObjCMethodDecl>(CurFuncDecl)) {
- const VarDecl *self = method->getSelfDecl();
-
- // There might not be a capture for 'self', but if there is...
- if (blockInfo.Captures.count(self)) {
- const CGBlockInfo::Capture &capture = blockInfo.getCapture(self);
- llvm::Value *selfAddr = Builder.CreateStructGEP(BlockPointer,
- capture.getIndex(),
- "block.captured-self");
- LocalDeclMap[self] = selfAddr;
- }
- }
-
// Also force all the constant captures.
for (BlockDecl::capture_const_iterator ci = blockDecl->capture_begin(),
ce = blockDecl->capture_end(); ci != ce; ++ci) {
@@ -1177,7 +1195,7 @@ CodeGenFunction::GenerateBlockFunction(GlobalDecl GD,
CreateMemTemp(variable->getType(), "block.captured-const");
alloca->setAlignment(align);
- Builder.CreateStore(capture.getConstant(), alloca, align);
+ Builder.CreateAlignedStore(capture.getConstant(), alloca, align);
LocalDeclMap[variable] = alloca;
}
@@ -1216,13 +1234,13 @@ CodeGenFunction::GenerateBlockFunction(GlobalDecl GD,
continue;
}
- DI->EmitDeclareOfBlockDeclRefVariable(variable, BlockPointer,
+ DI->EmitDeclareOfBlockDeclRefVariable(variable, BlockPointerDbgLoc,
Builder, blockInfo);
}
}
// Recover location if it was changed in the above loop.
DI->EmitLocation(Builder,
- cast<CompoundStmt>(blockDecl->getBody())->getRBracLoc());
+ cast<CompoundStmt>(blockDecl->getBody())->getRBracLoc());
}
// And resume where we left off.
@@ -1297,7 +1315,6 @@ CodeGenFunction::GenerateCopyHelperFunction(const CGBlockInfo &blockInfo) {
SourceLocation(),
SourceLocation(), II, C.VoidTy, 0,
SC_Static,
- SC_None,
false,
false);
StartFunction(FD, C.VoidTy, Fn, FI, args, SourceLocation());
@@ -1472,7 +1489,6 @@ CodeGenFunction::GenerateDestroyHelperFunction(const CGBlockInfo &blockInfo) {
SourceLocation(),
SourceLocation(), II, C.VoidTy, 0,
SC_Static,
- SC_None,
false, false);
StartFunction(FD, C.VoidTy, Fn, FI, args, SourceLocation());
@@ -1547,7 +1563,7 @@ CodeGenFunction::GenerateDestroyHelperFunction(const CGBlockInfo &blockInfo) {
// Destroy strong objects with a call if requested.
} else if (useARCStrongDestroy) {
- EmitARCDestroyStrong(srcField, /*precise*/ false);
+ EmitARCDestroyStrong(srcField, ARCImpreciseLifetime);
// Otherwise we call _Block_object_dispose. It wouldn't be too
// hard to just emit this as a cleanup if we wanted to make sure
@@ -1656,7 +1672,7 @@ public:
}
void emitDispose(CodeGenFunction &CGF, llvm::Value *field) {
- CGF.EmitARCDestroyStrong(field, /*precise*/ false);
+ CGF.EmitARCDestroyStrong(field, ARCImpreciseLifetime);
}
void profileImpl(llvm::FoldingSetNodeID &id) const {
@@ -1686,7 +1702,7 @@ public:
}
void emitDispose(CodeGenFunction &CGF, llvm::Value *field) {
- CGF.EmitARCDestroyStrong(field, /*precise*/ false);
+ CGF.EmitARCDestroyStrong(field, ARCImpreciseLifetime);
}
void profileImpl(llvm::FoldingSetNodeID &id) const {
@@ -1763,7 +1779,6 @@ generateByrefCopyHelper(CodeGenFunction &CGF,
SourceLocation(),
SourceLocation(), II, R, 0,
SC_Static,
- SC_None,
false, false);
// Initialize debug info if necessary.
@@ -1838,7 +1853,6 @@ generateByrefDisposeHelper(CodeGenFunction &CGF,
SourceLocation(),
SourceLocation(), II, R, 0,
SC_Static,
- SC_None,
false, false);
// Initialize debug info if necessary.
CGF.maybeInitializeDebugInfo();
@@ -2047,7 +2061,8 @@ llvm::Type *CodeGenFunction::BuildByRefType(const VarDecl *D) {
bool Packed = false;
CharUnits Align = getContext().getDeclAlign(D);
- if (Align > getContext().toCharUnitsFromBits(Target.getPointerAlign(0))) {
+ if (Align >
+ getContext().toCharUnitsFromBits(getTarget().getPointerAlign(0))) {
// We have to insert padding.
// The struct above has 2 32-bit integers.
diff --git a/lib/CodeGen/CGBuiltin.cpp b/lib/CodeGen/CGBuiltin.cpp
index e9c1431d30..a655966d6e 100644
--- a/lib/CodeGen/CGBuiltin.cpp
+++ b/lib/CodeGen/CGBuiltin.cpp
@@ -296,7 +296,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Value *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
llvm::Type *ResultType = ConvertType(E->getType());
- Value *ZeroUndef = Builder.getInt1(Target.isCLZForZeroUndef());
+ Value *ZeroUndef = Builder.getInt1(getTarget().isCLZForZeroUndef());
Value *Result = Builder.CreateCall2(F, ArgValue, ZeroUndef);
if (Result->getType() != ResultType)
Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
@@ -313,7 +313,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Value *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
llvm::Type *ResultType = ConvertType(E->getType());
- Value *ZeroUndef = Builder.getInt1(Target.isCLZForZeroUndef());
+ Value *ZeroUndef = Builder.getInt1(getTarget().isCLZForZeroUndef());
Value *Result = Builder.CreateCall2(F, ArgValue, ZeroUndef);
if (Result->getType() != ResultType)
Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
@@ -1434,7 +1434,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
const char *Name = getContext().BuiltinInfo.GetName(BuiltinID);
Intrinsic::ID IntrinsicID = Intrinsic::not_intrinsic;
if (const char *Prefix =
- llvm::Triple::getArchTypePrefix(Target.getTriple().getArch()))
+ llvm::Triple::getArchTypePrefix(getTarget().getTriple().getArch()))
IntrinsicID = Intrinsic::getIntrinsicForGCCBuiltin(Prefix, Name);
if (IntrinsicID != Intrinsic::not_intrinsic) {
@@ -1505,7 +1505,9 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Value *CodeGenFunction::EmitTargetBuiltinExpr(unsigned BuiltinID,
const CallExpr *E) {
- switch (Target.getTriple().getArch()) {
+ switch (getTarget().getTriple().getArch()) {
+ case llvm::Triple::aarch64:
+ return EmitAArch64BuiltinExpr(BuiltinID, E);
case llvm::Triple::arm:
case llvm::Triple::thumb:
return EmitARMBuiltinExpr(BuiltinID, E);
@@ -1625,6 +1627,25 @@ CodeGenFunction::EmitPointerWithAlignment(const Expr *Addr) {
return std::make_pair(EmitScalarExpr(Addr), Align);
}
+Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
+ const CallExpr *E) {
+ if (BuiltinID == AArch64::BI__clear_cache) {
+ assert(E->getNumArgs() == 2 &&
+ "Variadic __clear_cache slipped through on AArch64");
+
+ const FunctionDecl *FD = E->getDirectCallee();
+ SmallVector<Value *, 2> Ops;
+ for (unsigned i = 0; i < E->getNumArgs(); i++)
+ Ops.push_back(EmitScalarExpr(E->getArg(i)));
+ llvm::Type *Ty = CGM.getTypes().ConvertType(FD->getType());
+ llvm::FunctionType *FTy = cast<llvm::FunctionType>(Ty);
+ StringRef Name = FD->getName();
+ return EmitNounwindRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Ops);
+ }
+
+ return 0;
+}
+
Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
const CallExpr *E) {
if (BuiltinID == ARM::BI__clear_cache) {
@@ -1856,7 +1877,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
// Generate target-independent intrinsic; also need to add second argument
// for whether or not clz of zero is undefined; on ARM it isn't.
Function *F = CGM.getIntrinsic(Intrinsic::ctlz, Ty);
- Ops.push_back(Builder.getInt1(Target.isCLZForZeroUndef()));
+ Ops.push_back(Builder.getInt1(getTarget().isCLZForZeroUndef()));
return EmitNeonCall(F, Ops, "vclz");
}
case ARM::BI__builtin_neon_vcnt_v:
@@ -2713,7 +2734,10 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
}
case X86::BI__builtin_ia32_rdrand16_step:
case X86::BI__builtin_ia32_rdrand32_step:
- case X86::BI__builtin_ia32_rdrand64_step: {
+ case X86::BI__builtin_ia32_rdrand64_step:
+ case X86::BI__builtin_ia32_rdseed16_step:
+ case X86::BI__builtin_ia32_rdseed32_step:
+ case X86::BI__builtin_ia32_rdseed64_step: {
Intrinsic::ID ID;
switch (BuiltinID) {
default: llvm_unreachable("Unsupported intrinsic!");
@@ -2726,6 +2750,15 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_rdrand64_step:
ID = Intrinsic::x86_rdrand_64;
break;
+ case X86::BI__builtin_ia32_rdseed16_step:
+ ID = Intrinsic::x86_rdseed_16;
+ break;
+ case X86::BI__builtin_ia32_rdseed32_step:
+ ID = Intrinsic::x86_rdseed_32;
+ break;
+ case X86::BI__builtin_ia32_rdseed64_step:
+ ID = Intrinsic::x86_rdseed_64;
+ break;
}
Value *Call = Builder.CreateCall(CGM.getIntrinsic(ID));
diff --git a/lib/CodeGen/CGCXXABI.cpp b/lib/CodeGen/CGCXXABI.cpp
index f9fea57ea6..68fecb2d0c 100644
--- a/lib/CodeGen/CGCXXABI.cpp
+++ b/lib/CodeGen/CGCXXABI.cpp
@@ -19,8 +19,7 @@ using namespace CodeGen;
CGCXXABI::~CGCXXABI() { }
-static void ErrorUnsupportedABI(CodeGenFunction &CGF,
- StringRef S) {
+void CGCXXABI::ErrorUnsupportedABI(CodeGenFunction &CGF, StringRef S) {
DiagnosticsEngine &Diags = CGF.CGM.getDiags();
unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
"cannot yet compile %0 in this ABI");
@@ -29,8 +28,7 @@ static void ErrorUnsupportedABI(CodeGenFunction &CGF,
<< S;
}
-static llvm::Constant *GetBogusMemberPointer(CodeGenModule &CGM,
- QualType T) {
+llvm::Constant *CGCXXABI::GetBogusMemberPointer(QualType T) {
return llvm::Constant::getNullValue(CGM.getTypes().ConvertType(T));
}
@@ -67,12 +65,12 @@ llvm::Value *CGCXXABI::EmitMemberPointerConversion(CodeGenFunction &CGF,
const CastExpr *E,
llvm::Value *Src) {
ErrorUnsupportedABI(CGF, "member function pointer conversions");
- return GetBogusMemberPointer(CGM, E->getType());
+ return GetBogusMemberPointer(E->getType());
}
llvm::Constant *CGCXXABI::EmitMemberPointerConversion(const CastExpr *E,
llvm::Constant *Src) {
- return GetBogusMemberPointer(CGM, E->getType());
+ return GetBogusMemberPointer(E->getType());
}
llvm::Value *
@@ -95,22 +93,22 @@ CGCXXABI::EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
llvm::Constant *
CGCXXABI::EmitNullMemberPointer(const MemberPointerType *MPT) {
- return GetBogusMemberPointer(CGM, QualType(MPT, 0));
+ return GetBogusMemberPointer(QualType(MPT, 0));
}
llvm::Constant *CGCXXABI::EmitMemberPointer(const CXXMethodDecl *MD) {
- return GetBogusMemberPointer(CGM,
+ return GetBogusMemberPointer(
CGM.getContext().getMemberPointerType(MD->getType(),
MD->getParent()->getTypeForDecl()));
}
llvm::Constant *CGCXXABI::EmitMemberDataPointer(const MemberPointerType *MPT,
CharUnits offset) {
- return GetBogusMemberPointer(CGM, QualType(MPT, 0));
+ return GetBogusMemberPointer(QualType(MPT, 0));
}
llvm::Constant *CGCXXABI::EmitMemberPointer(const APValue &MP, QualType MPT) {
- return GetBogusMemberPointer(CGM, MPT);
+ return GetBogusMemberPointer(MPT);
}
bool CGCXXABI::isZeroInitializable(const MemberPointerType *MPT) {
@@ -222,8 +220,12 @@ void CGCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
}
void CGCXXABI::registerGlobalDtor(CodeGenFunction &CGF,
+ const VarDecl &D,
llvm::Constant *dtor,
llvm::Constant *addr) {
+ if (D.getTLSKind())
+ CGM.ErrorUnsupported(&D, "non-trivial TLS destruction");
+
// The default behavior is to use atexit.
CGF.registerGlobalDtorWithAtExit(dtor, addr);
}
@@ -257,3 +259,14 @@ llvm::BasicBlock *CGCXXABI::EmitCtorCompleteObjectHandler(
ErrorUnsupportedABI(CGF, "complete object detection in ctor");
return 0;
}
+
+void CGCXXABI::EmitThreadLocalInitFuncs(
+ llvm::ArrayRef<std::pair<const VarDecl *, llvm::GlobalVariable *> > Decls,
+ llvm::Function *InitFunc) {
+}
+
+LValue CGCXXABI::EmitThreadLocalDeclRefExpr(CodeGenFunction &CGF,
+ const DeclRefExpr *DRE) {
+ ErrorUnsupportedABI(CGF, "odr-use of thread_local global");
+ return LValue();
+}
diff --git a/lib/CodeGen/CGCXXABI.h b/lib/CodeGen/CGCXXABI.h
index cdc87b70e5..1e4da631d6 100644
--- a/lib/CodeGen/CGCXXABI.h
+++ b/lib/CodeGen/CGCXXABI.h
@@ -54,6 +54,12 @@ protected:
return CGF.CXXABIThisValue;
}
+ /// Issue a diagnostic about unsupported features in the ABI.
+ void ErrorUnsupportedABI(CodeGenFunction &CGF, StringRef S);
+
+ /// Get a null value for unsupported member pointers.
+ llvm::Constant *GetBogusMemberPointer(QualType T);
+
// FIXME: Every place that calls getVTT{Decl,Value} is something
// that needs to be abstracted properly.
ImplicitParamDecl *&getVTTDecl(CodeGenFunction &CGF) {
@@ -91,6 +97,31 @@ public:
return *MangleCtx;
}
+ /// Returns true if the given instance method is one of the
+ /// kinds that the ABI says returns 'this'.
+ virtual bool HasThisReturn(GlobalDecl GD) const { return false; }
+
+ /// Returns true if the given record type should be returned indirectly.
+ virtual bool isReturnTypeIndirect(const CXXRecordDecl *RD) const = 0;
+
+ /// Specify how one should pass an argument of a record type.
+ enum RecordArgABI {
+ /// Pass it using the normal C aggregate rules for the ABI, potentially
+ /// introducing extra copies and passing some or all of it in registers.
+ RAA_Default = 0,
+
+ /// Pass it on the stack using its defined layout. The argument must be
+ /// evaluated directly into the correct stack position in the arguments area,
+ /// and the call machinery must not move it or introduce extra copies.
+ RAA_DirectInMemory,
+
+ /// Pass it as a pointer to temporary memory.
+ RAA_Indirect
+ };
+
+ /// Returns how an argument of the given record type should be passed.
+ virtual RecordArgABI getRecordArgABI(const CXXRecordDecl *RD) const = 0;
+
/// Find the LLVM type used to represent the given member pointer
/// type.
virtual llvm::Type *
@@ -209,7 +240,8 @@ public:
/// Emit the ABI-specific prolog for the function.
virtual void EmitInstanceFunctionProlog(CodeGenFunction &CGF) = 0;
- virtual void EmitConstructorCall(CodeGenFunction &CGF,
+ /// Emit the constructor call. Return the function that is called.
+ virtual llvm::Value *EmitConstructorCall(CodeGenFunction &CGF,
const CXXConstructorDecl *D,
CXXCtorType Type, bool ForVirtualBase,
bool Delegating,
@@ -319,8 +351,27 @@ public:
///
/// \param dtor - a function taking a single pointer argument
/// \param addr - a pointer to pass to the destructor function.
- virtual void registerGlobalDtor(CodeGenFunction &CGF, llvm::Constant *dtor,
- llvm::Constant *addr);
+ virtual void registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
+ llvm::Constant *dtor, llvm::Constant *addr);
+
+ /*************************** thread_local initialization ********************/
+
+ /// Emits ABI-required functions necessary to initialize thread_local
+ /// variables in this translation unit.
+ ///
+ /// \param Decls The thread_local declarations in this translation unit.
+ /// \param InitFunc If this translation unit contains any non-constant
+ /// initialization or non-trivial destruction for thread_local
+ /// variables, a function to perform the initialization. Otherwise, 0.
+ virtual void EmitThreadLocalInitFuncs(
+ llvm::ArrayRef<std::pair<const VarDecl *, llvm::GlobalVariable *> > Decls,
+ llvm::Function *InitFunc);
+
+ /// Emit a reference to a non-local thread_local variable (including
+ /// triggering the initialization of all thread_local variables in its
+ /// translation unit).
+ virtual LValue EmitThreadLocalDeclRefExpr(CodeGenFunction &CGF,
+ const DeclRefExpr *DRE);
};
// Create an instance of a C++ ABI class:
diff --git a/lib/CodeGen/CGCall.cpp b/lib/CodeGen/CGCall.cpp
index fa5ce7fdca..b0f460ec0e 100644
--- a/lib/CodeGen/CGCall.cpp
+++ b/lib/CodeGen/CGCall.cpp
@@ -77,9 +77,7 @@ CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionNoProtoType> FTNP) {
// When translating an unprototyped function type, always use a
// variadic type.
return arrangeLLVMFunctionInfo(FTNP->getResultType().getUnqualifiedType(),
- ArrayRef<CanQualType>(),
- FTNP->getExtInfo(),
- RequiredArgs(0));
+ None, FTNP->getExtInfo(), RequiredArgs(0));
}
/// Arrange the LLVM function layout for a value of the given function
@@ -257,10 +255,8 @@ CodeGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) {
// non-variadic type.
if (isa<FunctionNoProtoType>(FTy)) {
CanQual<FunctionNoProtoType> noProto = FTy.getAs<FunctionNoProtoType>();
- return arrangeLLVMFunctionInfo(noProto->getResultType(),
- ArrayRef<CanQualType>(),
- noProto->getExtInfo(),
- RequiredArgs::All);
+ return arrangeLLVMFunctionInfo(noProto->getResultType(), None,
+ noProto->getExtInfo(), RequiredArgs::All);
}
assert(isa<FunctionProtoType>(FTy));
@@ -420,7 +416,7 @@ CodeGenTypes::arrangeFunctionDeclaration(QualType resultType,
}
const CGFunctionInfo &CodeGenTypes::arrangeNullaryFunction() {
- return arrangeLLVMFunctionInfo(getContext().VoidTy, ArrayRef<CanQualType>(),
+ return arrangeLLVMFunctionInfo(getContext().VoidTy, None,
FunctionType::ExtInfo(), RequiredArgs::All);
}
@@ -837,12 +833,11 @@ bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) {
default:
return false;
case BuiltinType::Float:
- return getContext().getTargetInfo().useObjCFPRetForRealType(TargetInfo::Float);
+ return getTarget().useObjCFPRetForRealType(TargetInfo::Float);
case BuiltinType::Double:
- return getContext().getTargetInfo().useObjCFPRetForRealType(TargetInfo::Double);
+ return getTarget().useObjCFPRetForRealType(TargetInfo::Double);
case BuiltinType::LongDouble:
- return getContext().getTargetInfo().useObjCFPRetForRealType(
- TargetInfo::LongDouble);
+ return getTarget().useObjCFPRetForRealType(TargetInfo::LongDouble);
}
}
@@ -853,7 +848,7 @@ bool CodeGenModule::ReturnTypeUsesFP2Ret(QualType ResultType) {
if (const ComplexType *CT = ResultType->getAs<ComplexType>()) {
if (const BuiltinType *BT = CT->getElementType()->getAs<BuiltinType>()) {
if (BT->getKind() == BuiltinType::LongDouble)
- return getContext().getTargetInfo().useObjCFP2RetForComplexLongDouble();
+ return getTarget().useObjCFP2RetForComplexLongDouble();
}
}
@@ -1027,63 +1022,27 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
FuncAttrs.addAttribute(llvm::Attribute::NoBuiltin);
} else {
// Attributes that should go on the function, but not the call site.
- if (!CodeGenOpts.CodeModel.empty())
- FuncAttrs.addAttribute("code-model", CodeGenOpts.CodeModel);
- if (!CodeGenOpts.RelocationModel.empty())
- FuncAttrs.addAttribute("relocation-model", CodeGenOpts.RelocationModel);
-
- if (CodeGenOpts.FloatABI == "soft" || CodeGenOpts.FloatABI == "softfp")
- FuncAttrs.addAttribute("float-abi", "soft");
- else if (CodeGenOpts.FloatABI == "hard")
- FuncAttrs.addAttribute("float-abi", "hard");
-
if (!CodeGenOpts.DisableFPElim) {
- /* ignore */ ;
+ FuncAttrs.addAttribute("no-frame-pointer-elim", "false");
+ FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf", "false");
} else if (CodeGenOpts.OmitLeafFramePointer) {
- FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf");
+ FuncAttrs.addAttribute("no-frame-pointer-elim", "false");
+ FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf", "true");
} else {
- FuncAttrs.addAttribute("no-frame-pointer-elim");
- FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf");
+ FuncAttrs.addAttribute("no-frame-pointer-elim", "true");
+ FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf", "true");
}
- switch (CodeGenOpts.getFPContractMode()) {
- case CodeGenOptions::FPC_Off:
- FuncAttrs.addAttribute("fp-contract-model", "strict");
- break;
- case CodeGenOptions::FPC_On:
- FuncAttrs.addAttribute("fp-contract-model", "standard");
- break;
- case CodeGenOptions::FPC_Fast:
- FuncAttrs.addAttribute("fp-contract-model", "fast");
- break;
- }
-
- if (CodeGenOpts.LessPreciseFPMAD)
- FuncAttrs.addAttribute("less-precise-fpmad");
- if (CodeGenOpts.NoInfsFPMath)
- FuncAttrs.addAttribute("no-infs-fp-math");
- if (CodeGenOpts.NoNaNsFPMath)
- FuncAttrs.addAttribute("no-nans-fp-math");
- if (CodeGenOpts.NoZeroInitializedInBSS)
- FuncAttrs.addAttribute("no-zero-init-in-bss");
- if (CodeGenOpts.UnsafeFPMath)
- FuncAttrs.addAttribute("unsafe-fp-math");
- if (CodeGenOpts.SoftFloat)
- FuncAttrs.addAttribute("use-soft-float");
- if (CodeGenOpts.StackAlignment)
- FuncAttrs.addAttribute("stack-align-override",
- llvm::utostr(CodeGenOpts.StackAlignment));
- if (CodeGenOpts.StackRealignment)
- FuncAttrs.addAttribute("realign-stack");
- if (CodeGenOpts.DisableTailCalls)
- FuncAttrs.addAttribute("disable-tail-calls");
- if (!CodeGenOpts.TrapFuncName.empty())
- FuncAttrs.addAttribute("trap-func-name", CodeGenOpts.TrapFuncName);
- if (LangOpts.PIELevel != 0)
- FuncAttrs.addAttribute("pie");
- if (CodeGenOpts.SSPBufferSize)
- FuncAttrs.addAttribute("ssp-buffers-size",
- llvm::utostr(CodeGenOpts.SSPBufferSize));
+ FuncAttrs.addAttribute("less-precise-fpmad",
+ CodeGenOpts.LessPreciseFPMAD ? "true" : "false");
+ FuncAttrs.addAttribute("no-infs-fp-math",
+ CodeGenOpts.NoInfsFPMath ? "true" : "false");
+ FuncAttrs.addAttribute("no-nans-fp-math",
+ CodeGenOpts.NoNaNsFPMath ? "true" : "false");
+ FuncAttrs.addAttribute("unsafe-fp-math",
+ CodeGenOpts.UnsafeFPMath ? "true" : "false");
+ FuncAttrs.addAttribute("use-soft-float",
+ CodeGenOpts.SoftFloat ? "true" : "false");
}
QualType RetTy = FI.getReturnType();
@@ -1233,7 +1192,7 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
// initialize the return value. TODO: it might be nice to have
// a more general mechanism for this that didn't require synthesized
// return statements.
- if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl)) {
+ if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl)) {
if (FD->hasImplicitReturnZero()) {
QualType RetTy = FD->getResultType().getUnqualifiedType();
llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy);
@@ -1650,7 +1609,20 @@ static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) {
return store;
}
-void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI) {
+/// Check whether 'this' argument of a callsite matches 'this' of the caller.
+static bool checkThisPointer(llvm::Value *ThisArg, llvm::Value *This) {
+ if (ThisArg == This)
+ return true;
+ // Check whether ThisArg is a bitcast of This.
+ llvm::BitCastInst *Bitcast;
+ if ((Bitcast = dyn_cast<llvm::BitCastInst>(ThisArg)) &&
+ Bitcast->getOperand(0) == This)
+ return true;
+ return false;
+}
+
+void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
+ bool EmitRetDbgLoc) {
// Functions with no result always return void.
if (ReturnValue == 0) {
Builder.CreateRetVoid();
@@ -1695,8 +1667,10 @@ void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI) {
// If there is a dominating store to ReturnValue, we can elide
// the load, zap the store, and usually zap the alloca.
if (llvm::StoreInst *SI = findDominatingStoreToReturnValue(*this)) {
+ // Reuse the debug location from the store unless we're told not to.
+ if (EmitRetDbgLoc)
+ RetDbgLoc = SI->getDebugLoc();
// Get the stored value and nuke the now-dead store.
- RetDbgLoc = SI->getDebugLoc();
RV = SI->getValueOperand();
SI->eraseFromParent();
@@ -1741,6 +1715,19 @@ void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI) {
llvm_unreachable("Invalid ABI kind for return argument");
}
+ // If this function returns 'this', the last instruction is a CallInst
+ // that returns 'this', and 'this' argument of the CallInst points to
+ // the same object as CXXThisValue, use the return value from the CallInst.
+ // We will not need to keep 'this' alive through the callsite. It also enables
+ // optimizations in the backend, such as tail call optimization.
+ if (CalleeWithThisReturn && CGM.getCXXABI().HasThisReturn(CurGD)) {
+ llvm::BasicBlock *IP = Builder.GetInsertBlock();
+ llvm::CallInst *Callsite;
+ if (!IP->empty() && (Callsite = dyn_cast<llvm::CallInst>(&IP->back())) &&
+ Callsite->getCalledFunction() == CalleeWithThisReturn &&
+ checkThisPointer(Callsite->getOperand(0), CXXThisValue))
+ RV = Builder.CreateBitCast(Callsite, RetAI.getCoerceToType());
+ }
llvm::Instruction *Ret = RV ? Builder.CreateRet(RV) : Builder.CreateRetVoid();
if (!RetDbgLoc.isUnknown())
Ret->setDebugLoc(RetDbgLoc);
@@ -1782,7 +1769,8 @@ static bool isProvablyNonNull(llvm::Value *addr) {
/// Emit the actual writing-back of a writeback.
static void emitWriteback(CodeGenFunction &CGF,
const CallArgList::Writeback &writeback) {
- llvm::Value *srcAddr = writeback.Address;
+ const LValue &srcLV = writeback.Source;
+ llvm::Value *srcAddr = srcLV.getAddress();
assert(!isProvablyNull(srcAddr) &&
"shouldn't have writeback for provably null argument");
@@ -1809,9 +1797,35 @@ static void emitWriteback(CodeGenFunction &CGF,
"icr.writeback-cast");
// Perform the writeback.
- QualType srcAddrType = writeback.AddressType;
- CGF.EmitStoreThroughLValue(RValue::get(value),
- CGF.MakeAddrLValue(srcAddr, srcAddrType));
+
+ // If we have a "to use" value, it's something we need to emit a use
+ // of. This has to be carefully threaded in: if it's done after the
+ // release it's potentially undefined behavior (and the optimizer
+ // will ignore it), and if it happens before the retain then the
+ // optimizer could move the release there.
+ if (writeback.ToUse) {
+ assert(srcLV.getObjCLifetime() == Qualifiers::OCL_Strong);
+
+ // Retain the new value. No need to block-copy here: the block's
+ // being passed up the stack.
+ value = CGF.EmitARCRetainNonBlock(value);
+
+ // Emit the intrinsic use here.
+ CGF.EmitARCIntrinsicUse(writeback.ToUse);
+
+ // Load the old value (primitively).
+ llvm::Value *oldValue = CGF.EmitLoadOfScalar(srcLV);
+
+ // Put the new value in place (primitively).
+ CGF.EmitStoreOfScalar(value, srcLV, /*init*/ false);
+
+ // Release the old value.
+ CGF.EmitARCRelease(oldValue, srcLV.isARCPreciseLifetime());
+
+ // Otherwise, we can just do a normal lvalue store.
+ } else {
+ CGF.EmitStoreThroughLValue(RValue::get(value), srcLV);
+ }
// Jump to the continuation block.
if (!provablyNonNull)
@@ -1825,11 +1839,33 @@ static void emitWritebacks(CodeGenFunction &CGF,
emitWriteback(CGF, *i);
}
+static const Expr *maybeGetUnaryAddrOfOperand(const Expr *E) {
+ if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E->IgnoreParens()))
+ if (uop->getOpcode() == UO_AddrOf)
+ return uop->getSubExpr();
+ return 0;
+}
+
/// Emit an argument that's being passed call-by-writeback. That is,
/// we are passing the address of
static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args,
const ObjCIndirectCopyRestoreExpr *CRE) {
- llvm::Value *srcAddr = CGF.EmitScalarExpr(CRE->getSubExpr());
+ LValue srcLV;
+
+ // Make an optimistic effort to emit the address as an l-value.
+ // This can fail if the the argument expression is more complicated.
+ if (const Expr *lvExpr = maybeGetUnaryAddrOfOperand(CRE->getSubExpr())) {
+ srcLV = CGF.EmitLValue(lvExpr);
+
+ // Otherwise, just emit it as a scalar.
+ } else {
+ llvm::Value *srcAddr = CGF.EmitScalarExpr(CRE->getSubExpr());
+
+ QualType srcAddrType =
+ CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType();
+ srcLV = CGF.MakeNaturalAlignAddrLValue(srcAddr, srcAddrType);
+ }
+ llvm::Value *srcAddr = srcLV.getAddress();
// The dest and src types don't necessarily match in LLVM terms
// because of the crazy ObjC compatibility rules.
@@ -1844,9 +1880,6 @@ static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args,
return;
}
- QualType srcAddrType =
- CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType();
-
// Create the temporary.
llvm::Value *temp = CGF.CreateTempAlloca(destType->getElementType(),
"icr.temp");
@@ -1866,6 +1899,7 @@ static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args,
}
llvm::BasicBlock *contBB = 0;
+ llvm::BasicBlock *originBB = 0;
// If the address is *not* known to be non-null, we need to switch.
llvm::Value *finalArgument;
@@ -1883,6 +1917,7 @@ static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args,
// If we need to copy, then the load has to be conditional, which
// means we need control flow.
if (shouldCopy) {
+ originBB = CGF.Builder.GetInsertBlock();
contBB = CGF.createBasicBlock("icr.cont");
llvm::BasicBlock *copyBB = CGF.createBasicBlock("icr.copy");
CGF.Builder.CreateCondBr(isNull, contBB, copyBB);
@@ -1891,9 +1926,10 @@ static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args,
}
}
+ llvm::Value *valueToUse = 0;
+
// Perform a copy if necessary.
if (shouldCopy) {
- LValue srcLV = CGF.MakeAddrLValue(srcAddr, srcAddrType);
RValue srcRV = CGF.EmitLoadOfLValue(srcLV);
assert(srcRV.isScalar());
@@ -1903,15 +1939,37 @@ static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args,
// Use an ordinary store, not a store-to-lvalue.
CGF.Builder.CreateStore(src, temp);
+
+ // If optimization is enabled, and the value was held in a
+ // __strong variable, we need to tell the optimizer that this
+ // value has to stay alive until we're doing the store back.
+ // This is because the temporary is effectively unretained,
+ // and so otherwise we can violate the high-level semantics.
+ if (CGF.CGM.getCodeGenOpts().OptimizationLevel != 0 &&
+ srcLV.getObjCLifetime() == Qualifiers::OCL_Strong) {
+ valueToUse = src;
+ }
}
// Finish the control flow if we needed it.
if (shouldCopy && !provablyNonNull) {
+ llvm::BasicBlock *copyBB = CGF.Builder.GetInsertBlock();
CGF.EmitBlock(contBB);
+
+ // Make a phi for the value to intrinsically use.
+ if (valueToUse) {
+ llvm::PHINode *phiToUse = CGF.Builder.CreatePHI(valueToUse->getType(), 2,
+ "icr.to-use");
+ phiToUse->addIncoming(valueToUse, copyBB);
+ phiToUse->addIncoming(llvm::UndefValue::get(valueToUse->getType()),
+ originBB);
+ valueToUse = phiToUse;
+ }
+
condEval.end(CGF);
}
- args.addWriteback(srcAddr, srcAddrType, temp);
+ args.addWriteback(srcLV, temp, valueToUse);
args.add(RValue::get(finalArgument), CRE->getType());
}
diff --git a/lib/CodeGen/CGCall.h b/lib/CodeGen/CGCall.h
index 0b68617f3d..85c3320ec0 100644
--- a/lib/CodeGen/CGCall.h
+++ b/lib/CodeGen/CGCall.h
@@ -56,14 +56,15 @@ namespace CodeGen {
public SmallVector<CallArg, 16> {
public:
struct Writeback {
- /// The original argument.
- llvm::Value *Address;
-
- /// The pointee type of the original argument.
- QualType AddressType;
+ /// The original argument. Note that the argument l-value
+ /// is potentially null.
+ LValue Source;
/// The temporary alloca.
llvm::Value *Temporary;
+
+ /// A value to "use" after the writeback, or null.
+ llvm::Value *ToUse;
};
void add(RValue rvalue, QualType type, bool needscopy = false) {
@@ -76,12 +77,12 @@ namespace CodeGen {
other.Writebacks.begin(), other.Writebacks.end());
}
- void addWriteback(llvm::Value *address, QualType addressType,
- llvm::Value *temporary) {
+ void addWriteback(LValue srcLV, llvm::Value *temporary,
+ llvm::Value *toUse) {
Writeback writeback;
- writeback.Address = address;
- writeback.AddressType = addressType;
+ writeback.Source = srcLV;
writeback.Temporary = temporary;
+ writeback.ToUse = toUse;
Writebacks.push_back(writeback);
}
diff --git a/lib/CodeGen/CGClass.cpp b/lib/CodeGen/CGClass.cpp
index 287d164cb9..3fd075701d 100644
--- a/lib/CodeGen/CGClass.cpp
+++ b/lib/CodeGen/CGClass.cpp
@@ -290,7 +290,7 @@ llvm::Value *CodeGenFunction::GetVTTParameter(GlobalDecl GD,
return 0;
}
- const CXXRecordDecl *RD = cast<CXXMethodDecl>(CurFuncDecl)->getParent();
+ const CXXRecordDecl *RD = cast<CXXMethodDecl>(CurCodeDecl)->getParent();
const CXXRecordDecl *Base = cast<CXXMethodDecl>(GD.getDecl())->getParent();
llvm::Value *VTT;
@@ -710,7 +710,7 @@ void CodeGenFunction::EmitConstructorBody(FunctionArgList &Args) {
// Before we go any further, try the complete->base constructor
// delegation optimization.
if (CtorType == Ctor_Complete && IsConstructorDelegationValid(Ctor) &&
- CGM.getContext().getTargetInfo().getCXXABI().hasConstructorVariants()) {
+ CGM.getTarget().getCXXABI().hasConstructorVariants()) {
if (CGDebugInfo *DI = getDebugInfo())
DI->EmitLocation(Builder, Ctor->getLocEnd());
EmitDelegateCXXConstructorCall(Ctor, Ctor_Base, Args);
@@ -1139,6 +1139,7 @@ void CodeGenFunction::EmitCtorPrologue(const CXXConstructorDecl *CD,
InitializeVTablePointers(ClassDecl);
// And finally, initialize class members.
+ FieldConstructionScope FCS(*this, CXXThisValue);
ConstructorMemcpyizer CM(*this, CD, Args);
for (; B != E; B++) {
CXXCtorInitializer *Member = (*B);
@@ -1278,7 +1279,7 @@ void CodeGenFunction::EmitDestructorBody(FunctionArgList &Args) {
EnterDtorCleanups(Dtor, Dtor_Complete);
if (!isTryBody &&
- CGM.getContext().getTargetInfo().getCXXABI().hasDestructorVariants()) {
+ CGM.getTarget().getCXXABI().hasDestructorVariants()) {
EmitCXXDestructorCall(Dtor, Dtor_Base, /*ForVirtualBase=*/false,
/*Delegating=*/false, LoadCXXThis());
break;
@@ -1666,8 +1667,11 @@ CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D,
}
// Non-trivial constructors are handled in an ABI-specific manner.
- CGM.getCXXABI().EmitConstructorCall(*this, D, Type, ForVirtualBase,
- Delegating, This, ArgBeg, ArgEnd);
+ llvm::Value *Callee = CGM.getCXXABI().EmitConstructorCall(*this, D, Type,
+ ForVirtualBase, Delegating, This, ArgBeg, ArgEnd);
+ if (CGM.getCXXABI().HasThisReturn(CurGD) &&
+ CGM.getCXXABI().HasThisReturn(GlobalDecl(D, Type)))
+ CalleeWithThisReturn = Callee;
}
void
@@ -1756,9 +1760,12 @@ CodeGenFunction::EmitDelegateCXXConstructorCall(const CXXConstructorDecl *Ctor,
EmitDelegateCallArg(DelegateArgs, param);
}
+ llvm::Value *Callee = CGM.GetAddrOfCXXConstructor(Ctor, CtorType);
EmitCall(CGM.getTypes().arrangeCXXConstructorDeclaration(Ctor, CtorType),
- CGM.GetAddrOfCXXConstructor(Ctor, CtorType),
- ReturnValueSlot(), DelegateArgs, Ctor);
+ Callee, ReturnValueSlot(), DelegateArgs, Ctor);
+ if (CGM.getCXXABI().HasThisReturn(CurGD) &&
+ CGM.getCXXABI().HasThisReturn(GlobalDecl(Ctor, CtorType)))
+ CalleeWithThisReturn = Callee;
}
namespace {
@@ -1825,6 +1832,9 @@ void CodeGenFunction::EmitCXXDestructorCall(const CXXDestructorDecl *DD,
EmitCXXMemberCall(DD, SourceLocation(), Callee, ReturnValueSlot(), This,
VTT, getContext().getPointerType(getContext().VoidPtrTy),
0, 0);
+ if (CGM.getCXXABI().HasThisReturn(CurGD) &&
+ CGM.getCXXABI().HasThisReturn(GlobalDecl(DD, Type)))
+ CalleeWithThisReturn = Callee;
}
namespace {
@@ -2222,10 +2232,10 @@ void CodeGenFunction::EmitLambdaBlockInvokeBody() {
}
void CodeGenFunction::EmitLambdaToBlockPointerBody(FunctionArgList &Args) {
- if (cast<CXXMethodDecl>(CurFuncDecl)->isVariadic()) {
+ if (cast<CXXMethodDecl>(CurCodeDecl)->isVariadic()) {
// FIXME: Making this work correctly is nasty because it requires either
// cloning the body of the call operator or making the call operator forward.
- CGM.ErrorUnsupported(CurFuncDecl, "lambda conversion to variadic function");
+ CGM.ErrorUnsupported(CurCodeDecl, "lambda conversion to variadic function");
return;
}
diff --git a/lib/CodeGen/CGCleanup.cpp b/lib/CodeGen/CGCleanup.cpp
index 861d31fb7f..ba6b56c267 100644
--- a/lib/CodeGen/CGCleanup.cpp
+++ b/lib/CodeGen/CGCleanup.cpp
@@ -371,7 +371,8 @@ void CodeGenFunction::ResolveBranchFixups(llvm::BasicBlock *Block) {
}
/// Pops cleanup blocks until the given savepoint is reached.
-void CodeGenFunction::PopCleanupBlocks(EHScopeStack::stable_iterator Old) {
+void CodeGenFunction::PopCleanupBlocks(EHScopeStack::stable_iterator Old,
+ SourceLocation EHLoc) {
assert(Old.isValid());
while (EHStack.stable_begin() != Old) {
@@ -383,7 +384,7 @@ void CodeGenFunction::PopCleanupBlocks(EHScopeStack::stable_iterator Old) {
bool FallThroughIsBranchThrough =
Old.strictlyEncloses(Scope.getEnclosingNormalCleanup());
- PopCleanupBlock(FallThroughIsBranchThrough);
+ PopCleanupBlock(FallThroughIsBranchThrough, EHLoc);
}
}
@@ -532,7 +533,8 @@ static void destroyOptimisticNormalEntry(CodeGenFunction &CGF,
/// Pops a cleanup block. If the block includes a normal cleanup, the
/// current insertion point is threaded through the cleanup, as are
/// any branch fixups on the cleanup.
-void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) {
+void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough,
+ SourceLocation EHLoc) {
assert(!EHStack.empty() && "cleanup stack is empty!");
assert(isa<EHCleanupScope>(*EHStack.begin()) && "top not a cleanup!");
EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.begin());
@@ -833,6 +835,9 @@ void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) {
// Emit the EH cleanup if required.
if (RequiresEHCleanup) {
+ if (CGDebugInfo *DI = getDebugInfo())
+ DI->EmitLocation(Builder, EHLoc);
+
CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP();
EmitBlock(EHEntry);
@@ -840,6 +845,7 @@ void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) {
// We only actually emit the cleanup code if the cleanup is either
// active or was used before it was deactivated.
if (EHActiveFlag || IsActive) {
+
cleanupFlags.setIsForEHCleanup();
EmitCleanup(*this, Fn, cleanupFlags, EHActiveFlag);
}
diff --git a/lib/CodeGen/CGDebugInfo.cpp b/lib/CodeGen/CGDebugInfo.cpp
index a139597c26..ddcb931e46 100644
--- a/lib/CodeGen/CGDebugInfo.cpp
+++ b/lib/CodeGen/CGDebugInfo.cpp
@@ -89,7 +89,7 @@ void CGDebugInfo::setLocation(SourceLocation Loc) {
}
/// getContextDescriptor - Get context info for the decl.
-llvm::DIDescriptor CGDebugInfo::getContextDescriptor(const Decl *Context) {
+llvm::DIScope CGDebugInfo::getContextDescriptor(const Decl *Context) {
if (!Context)
return TheCU;
@@ -97,20 +97,17 @@ llvm::DIDescriptor CGDebugInfo::getContextDescriptor(const Decl *Context) {
I = RegionMap.find(Context);
if (I != RegionMap.end()) {
llvm::Value *V = I->second;
- return llvm::DIDescriptor(dyn_cast_or_null<llvm::MDNode>(V));
+ return llvm::DIScope(dyn_cast_or_null<llvm::MDNode>(V));
}
// Check namespace.
if (const NamespaceDecl *NSDecl = dyn_cast<NamespaceDecl>(Context))
- return llvm::DIDescriptor(getOrCreateNameSpace(NSDecl));
+ return getOrCreateNameSpace(NSDecl);
- if (const RecordDecl *RDecl = dyn_cast<RecordDecl>(Context)) {
- if (!RDecl->isDependentType()) {
- llvm::DIType Ty = getOrCreateType(CGM.getContext().getTypeDeclType(RDecl),
+ if (const RecordDecl *RDecl = dyn_cast<RecordDecl>(Context))
+ if (!RDecl->isDependentType())
+ return getOrCreateType(CGM.getContext().getTypeDeclType(RDecl),
getOrCreateMainFile());
- return llvm::DIDescriptor(Ty);
- }
- }
return TheCU;
}
@@ -262,9 +259,9 @@ unsigned CGDebugInfo::getLineNumber(SourceLocation Loc) {
}
/// getColumnNumber - Get column number for the location.
-unsigned CGDebugInfo::getColumnNumber(SourceLocation Loc) {
+unsigned CGDebugInfo::getColumnNumber(SourceLocation Loc, bool Force) {
// We may not want column information at all.
- if (!CGM.getCodeGenOpts().DebugColumnInfo)
+ if (!Force && !CGM.getCodeGenOpts().DebugColumnInfo)
return 0;
// If the location is invalid then use the current column.
@@ -392,21 +389,12 @@ llvm::DIType CGDebugInfo::CreateType(const BuiltinType *BT) {
llvm::DIType ISATy = DBuilder.createPointerType(ClassTy, Size);
- llvm::DIType FwdTy =
+ ObjTy =
DBuilder.createStructType(TheCU, "objc_object", getOrCreateMainFile(),
0, 0, 0, 0, llvm::DIType(), llvm::DIArray());
- llvm::TrackingVH<llvm::MDNode> ObjNode(FwdTy);
- SmallVector<llvm::Value *, 1> EltTys;
- llvm::DIType FieldTy =
- DBuilder.createMemberType(llvm::DIDescriptor(ObjNode), "isa",
- getOrCreateMainFile(), 0, Size,
- 0, 0, 0, ISATy);
- EltTys.push_back(FieldTy);
- llvm::DIArray Elements = DBuilder.getOrCreateArray(EltTys);
-
- ObjNode->replaceOperandWith(10, Elements);
- ObjTy = llvm::DIType(ObjNode);
+ ObjTy.setTypeArray(DBuilder.getOrCreateArray(&*DBuilder.createMemberType(
+ ObjTy, "isa", getOrCreateMainFile(), 0, Size, 0, 0, 0, ISATy)));
return ObjTy;
}
case BuiltinType::ObjCSel: {
@@ -651,7 +639,7 @@ llvm::DIType CGDebugInfo::CreatePointerLikeType(unsigned Tag,
// Size is always the size of a pointer. We can't use getTypeSize here
// because that does not return the correct value for references.
unsigned AS = CGM.getContext().getTargetAddressSpace(PointeeTy);
- uint64_t Size = CGM.getContext().getTargetInfo().getPointerWidth(AS);
+ uint64_t Size = CGM.getTarget().getPointerWidth(AS);
uint64_t Align = CGM.getContext().getTypeAlign(Ty);
return DBuilder.createPointerType(CreatePointeeType(PointeeTy, Unit),
@@ -993,7 +981,7 @@ llvm::DIType CGDebugInfo::getOrCreateInstanceMethodType(
const PointerType *ThisPtrTy = cast<PointerType>(ThisPtr);
QualType PointeeTy = ThisPtrTy->getPointeeType();
unsigned AS = CGM.getContext().getTargetAddressSpace(PointeeTy);
- uint64_t Size = CGM.getContext().getTargetInfo().getPointerWidth(AS);
+ uint64_t Size = CGM.getTarget().getPointerWidth(AS);
uint64_t Align = CGM.getContext().getTypeAlign(ThisPtrTy);
llvm::DIType PointeeType = getOrCreateType(PointeeTy, Unit);
llvm::DIType ThisPtrType = DBuilder.createPointerType(PointeeType, Size, Align);
@@ -1332,15 +1320,16 @@ llvm::DIType CGDebugInfo::CreateType(const RecordType *Ty) {
// may refer to the forward decl if the struct is recursive) and replace all
// uses of the forward declaration with the final definition.
- llvm::DIType FwdDecl = getOrCreateLimitedType(QualType(Ty, 0), DefUnit);
+ llvm::DICompositeType FwdDecl(
+ getOrCreateLimitedType(QualType(Ty, 0), DefUnit));
+ assert(FwdDecl.Verify() &&
+ "The debug type of a RecordType should be a DICompositeType");
if (FwdDecl.isForwardDecl())
return FwdDecl;
- llvm::TrackingVH<llvm::MDNode> FwdDeclNode(FwdDecl);
-
// Push the struct on region stack.
- LexicalBlockStack.push_back(FwdDeclNode);
+ LexicalBlockStack.push_back(&*FwdDecl);
RegionMap[Ty->getDecl()] = llvm::WeakVH(FwdDecl);
// Add this to the completed-type cache while we're completing it recursively.
@@ -1374,19 +1363,10 @@ llvm::DIType CGDebugInfo::CreateType(const RecordType *Ty) {
RegionMap.erase(Ty->getDecl());
llvm::DIArray Elements = DBuilder.getOrCreateArray(EltTys);
- // FIXME: Magic numbers ahoy! These should be changed when we
- // get some enums in llvm/Analysis/DebugInfo.h to refer to
- // them.
- if (RD->isUnion())
- FwdDeclNode->replaceOperandWith(10, Elements);
- else if (CXXDecl) {
- FwdDeclNode->replaceOperandWith(10, Elements);
- FwdDeclNode->replaceOperandWith(13, TParamsArray);
- } else
- FwdDeclNode->replaceOperandWith(10, Elements);
+ FwdDecl.setTypeArray(Elements, TParamsArray);
- RegionMap[Ty->getDecl()] = llvm::WeakVH(FwdDeclNode);
- return llvm::DIType(FwdDeclNode);
+ RegionMap[Ty->getDecl()] = llvm::WeakVH(FwdDecl);
+ return FwdDecl;
}
/// CreateType - get objective-c object type.
@@ -1429,7 +1409,7 @@ llvm::DIType CGDebugInfo::CreateType(const ObjCInterfaceType *Ty,
if (ID->getImplementation())
Flags |= llvm::DIDescriptor::FlagObjcClassComplete;
- llvm::DIType RealDecl =
+ llvm::DICompositeType RealDecl =
DBuilder.createStructType(Unit, ID->getName(), DefUnit,
Line, Size, Align, Flags,
llvm::DIType(), llvm::DIArray(), RuntimeLang);
@@ -1439,9 +1419,8 @@ llvm::DIType CGDebugInfo::CreateType(const ObjCInterfaceType *Ty,
QualType QualTy = QualType(Ty, 0);
CompletedTypeCache[QualTy.getAsOpaquePtr()] = RealDecl;
// Push the struct on region stack.
- llvm::TrackingVH<llvm::MDNode> FwdDeclNode(RealDecl);
- LexicalBlockStack.push_back(FwdDeclNode);
+ LexicalBlockStack.push_back(static_cast<llvm::MDNode*>(RealDecl));
RegionMap[Ty->getDecl()] = llvm::WeakVH(RealDecl);
// Convert all the elements.
@@ -1561,7 +1540,7 @@ llvm::DIType CGDebugInfo::CreateType(const ObjCInterfaceType *Ty,
}
llvm::DIArray Elements = DBuilder.getOrCreateArray(EltTys);
- FwdDeclNode->replaceOperandWith(10, Elements);
+ RealDecl.setTypeArray(Elements);
// If the implementation is not yet set, we do not want to mark it
// as complete. An implementation may declare additional
@@ -1570,7 +1549,7 @@ llvm::DIType CGDebugInfo::CreateType(const ObjCInterfaceType *Ty,
CompletedTypeCache.erase(QualTy.getAsOpaquePtr());
LexicalBlockStack.pop_back();
- return llvm::DIType(FwdDeclNode);
+ return RealDecl;
}
llvm::DIType CGDebugInfo::CreateType(const VectorType *Ty, llvm::DIFile Unit) {
@@ -1717,7 +1696,7 @@ llvm::DIType CGDebugInfo::CreateEnumType(const EnumDecl *ED) {
unsigned Line = getLineNumber(ED->getLocation());
llvm::DIDescriptor EnumContext =
getContextDescriptor(cast<Decl>(ED->getDeclContext()));
- llvm::DIType ClassTy = ED->isScopedUsingClassTag() ?
+ llvm::DIType ClassTy = ED->isFixed() ?
getOrCreateType(ED->getIntegerType(), DefUnit) : llvm::DIType();
llvm::DIType DbgTy =
DBuilder.createEnumerationType(EnumContext, ED->getName(), DefUnit, Line,
@@ -1872,9 +1851,9 @@ llvm::DIType CGDebugInfo::getOrCreateType(QualType Ty, llvm::DIFile Unit) {
TC = llvm::DIType(cast<llvm::MDNode>(it->second.first));
else
TC = DBuilder.createForwardDecl(llvm::dwarf::DW_TAG_structure_type,
- Decl->getName(), TheCU, Unit,
- getLineNumber(Decl->getLocation()),
- TheCU.getLanguage());
+ Decl->getName(), TheCU, Unit,
+ getLineNumber(Decl->getLocation()),
+ TheCU.getLanguage());
// Store the forward declaration in the cache.
ObjCInterfaceCache[TyPtr] = std::make_pair(TC, Checksum(Decl));
@@ -1891,7 +1870,7 @@ llvm::DIType CGDebugInfo::getOrCreateType(QualType Ty, llvm::DIFile Unit) {
/// Currently the checksum merely consists of the number of ivars.
unsigned CGDebugInfo::Checksum(const ObjCInterfaceDecl
- *InterfaceDecl) {
+ *InterfaceDecl) {
unsigned IvarNo = 0;
for (const ObjCIvarDecl *Ivar = InterfaceDecl->all_declared_ivar_begin();
Ivar != 0; Ivar = Ivar->getNextIvar()) ++IvarNo;
@@ -2041,7 +2020,7 @@ llvm::DIType CGDebugInfo::CreateLimitedType(const RecordType *Ty) {
uint64_t Size = CGM.getContext().getTypeSize(Ty);
uint64_t Align = CGM.getContext().getTypeAlign(Ty);
const CXXRecordDecl *CXXDecl = dyn_cast<CXXRecordDecl>(RD);
- llvm::TrackingVH<llvm::MDNode> RealDecl;
+ llvm::DICompositeType RealDecl;
if (RD->isUnion())
RealDecl = DBuilder.createUnionType(RDContext, RDName, DefUnit, Line,
@@ -2055,14 +2034,14 @@ llvm::DIType CGDebugInfo::CreateLimitedType(const RecordType *Ty) {
llvm::DIArray());
} else
RealDecl = DBuilder.createStructType(RDContext, RDName, DefUnit, Line,
- Size, Align, 0, llvm::DIType(), llvm::DIArray());
+ Size, Align, 0, llvm::DIType(), llvm::DIArray());
RegionMap[Ty->getDecl()] = llvm::WeakVH(RealDecl);
- TypeCache[QualType(Ty, 0).getAsOpaquePtr()] = llvm::DIType(RealDecl);
+ TypeCache[QualType(Ty, 0).getAsOpaquePtr()] = RealDecl;
if (CXXDecl) {
// A class's primary base or the class itself contains the vtable.
- llvm::MDNode *ContainingType = NULL;
+ llvm::DICompositeType ContainingType;
const ASTRecordLayout &RL = CGM.getContext().getASTRecordLayout(RD);
if (const CXXRecordDecl *PBase = RL.getPrimaryBase()) {
// Seek non virtual primary base root.
@@ -2074,13 +2053,12 @@ llvm::DIType CGDebugInfo::CreateLimitedType(const RecordType *Ty) {
else
break;
}
- ContainingType =
- getOrCreateType(QualType(PBase->getTypeForDecl(), 0), DefUnit);
- }
- else if (CXXDecl->isDynamicClass())
+ ContainingType = llvm::DICompositeType(
+ getOrCreateType(QualType(PBase->getTypeForDecl(), 0), DefUnit));
+ } else if (CXXDecl->isDynamicClass())
ContainingType = RealDecl;
- RealDecl->replaceOperandWith(12, ContainingType);
+ RealDecl.setContainingType(ContainingType);
}
return llvm::DIType(RealDecl);
}
@@ -2167,8 +2145,9 @@ llvm::DIType CGDebugInfo::getOrCreateFunctionType(const Decl *D,
// First element is always return type. For 'void' functions it is NULL.
Elts.push_back(getOrCreateType(OMethod->getResultType(), F));
// "self" pointer is always first argument.
- llvm::DIType SelfTy = getOrCreateType(OMethod->getSelfDecl()->getType(), F);
- Elts.push_back(DBuilder.createObjectPointerType(SelfTy));
+ QualType SelfDeclTy = OMethod->getSelfDecl()->getType();
+ llvm::DIType SelfTy = getOrCreateType(SelfDeclTy, F);
+ Elts.push_back(CreateSelfType(SelfDeclTy, SelfTy));
// "_cmd" pointer is always second argument.
llvm::DIType CmdTy = getOrCreateType(OMethod->getCmdDecl()->getType(), F);
Elts.push_back(DBuilder.createArtificialType(CmdTy));
@@ -2224,13 +2203,18 @@ void CGDebugInfo::EmitFunctionStart(GlobalDecl GD, QualType FnType,
}
}
Name = getFunctionName(FD);
- // Use mangled name as linkage name for c/c++ functions.
+ // Use mangled name as linkage name for C/C++ functions.
if (FD->hasPrototype()) {
LinkageName = CGM.getMangledName(GD);
Flags |= llvm::DIDescriptor::FlagPrototyped;
}
+ // No need to replicate the linkage name if it isn't different from the
+ // subprogram name, no need to have it at all unless coverage is enabled or
+ // debug is set to more than just line tables.
if (LinkageName == Name ||
- CGM.getCodeGenOpts().getDebugInfo() <= CodeGenOptions::DebugLineTablesOnly)
+ (!CGM.getCodeGenOpts().EmitGcovArcs &&
+ !CGM.getCodeGenOpts().EmitGcovNotes &&
+ CGM.getCodeGenOpts().getDebugInfo() <= CodeGenOptions::DebugLineTablesOnly))
LinkageName = StringRef();
if (CGM.getCodeGenOpts().getDebugInfo() >= CodeGenOptions::LimitedDebugInfo) {
@@ -2291,7 +2275,8 @@ void CGDebugInfo::EmitFunctionStart(GlobalDecl GD, QualType FnType,
/// EmitLocation - Emit metadata to indicate a change in line/column
/// information in the source file.
-void CGDebugInfo::EmitLocation(CGBuilderTy &Builder, SourceLocation Loc) {
+void CGDebugInfo::EmitLocation(CGBuilderTy &Builder, SourceLocation Loc,
+ bool ForceColumnInfo) {
// Update our current location
setLocation(Loc);
@@ -2312,9 +2297,10 @@ void CGDebugInfo::EmitLocation(CGBuilderTy &Builder, SourceLocation Loc) {
PrevLoc = CurLoc;
llvm::MDNode *Scope = LexicalBlockStack.back();
- Builder.SetCurrentDebugLocation(llvm::DebugLoc::get(getLineNumber(CurLoc),
- getColumnNumber(CurLoc),
- Scope));
+ Builder.SetCurrentDebugLocation(llvm::DebugLoc::get
+ (getLineNumber(CurLoc),
+ getColumnNumber(CurLoc, ForceColumnInfo),
+ Scope));
}
/// CreateLexicalBlock - Creates a new lexical block node and pushes it on
@@ -2409,7 +2395,7 @@ llvm::DIType CGDebugInfo::EmitTypeForVarWithBlocksAttr(const VarDecl *VD,
CharUnits Align = CGM.getContext().getDeclAlign(VD);
if (Align > CGM.getContext().toCharUnitsFromBits(
- CGM.getContext().getTargetInfo().getPointerAlign(0))) {
+ CGM.getTarget().getPointerAlign(0))) {
CharUnits FieldOffsetInBytes
= CGM.getContext().toCharUnitsFromBits(FieldOffset);
CharUnits AlignedOffsetInBytes
@@ -2505,7 +2491,7 @@ void CGDebugInfo::EmitDeclare(const VarDecl *VD, unsigned Tag,
addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIBuilder::OpPlus));
// offset of __forwarding field
offset = CGM.getContext().toCharUnitsFromBits(
- CGM.getContext().getTargetInfo().getPointerWidth(0));
+ CGM.getTarget().getPointerWidth(0));
addr.push_back(llvm::ConstantInt::get(Int64Ty, offset.getQuantity()));
addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIBuilder::OpDeref));
addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIBuilder::OpPlus));
@@ -2593,6 +2579,19 @@ void CGDebugInfo::EmitDeclareOfAutoVariable(const VarDecl *VD,
EmitDeclare(VD, llvm::dwarf::DW_TAG_auto_variable, Storage, 0, Builder);
}
+/// Look up the completed type for a self pointer in the TypeCache and
+/// create a copy of it with the ObjectPointer and Artificial flags
+/// set. If the type is not cached, a new one is created. This should
+/// never happen though, since creating a type for the implicit self
+/// argument implies that we already parsed the interface definition
+/// and the ivar declarations in the implementation.
+llvm::DIType CGDebugInfo::CreateSelfType(const QualType &QualTy, llvm::DIType Ty) {
+ llvm::DIType CachedTy = getTypeOrNull(QualTy);
+ if (CachedTy.Verify()) Ty = CachedTy;
+ else DEBUG(llvm::dbgs() << "No cached type for self.");
+ return DBuilder.createObjectPointerType(Ty);
+}
+
void CGDebugInfo::EmitDeclareOfBlockDeclRefVariable(const VarDecl *VD,
llvm::Value *Storage,
CGBuilderTy &Builder,
@@ -2616,7 +2615,7 @@ void CGDebugInfo::EmitDeclareOfBlockDeclRefVariable(const VarDecl *VD,
// Self is passed along as an implicit non-arg variable in a
// block. Mark it as the object pointer.
if (isa<ImplicitParamDecl>(VD) && VD->getName() == "self")
- Ty = DBuilder.createObjectPointerType(Ty);
+ Ty = CreateSelfType(VD->getType(), Ty);
// Get location information.
unsigned Line = getLineNumber(VD->getLocation());
@@ -2630,6 +2629,8 @@ void CGDebugInfo::EmitDeclareOfBlockDeclRefVariable(const VarDecl *VD,
SmallVector<llvm::Value *, 9> addr;
llvm::Type *Int64Ty = CGM.Int64Ty;
+ if (isa<llvm::AllocaInst>(Storage))
+ addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIBuilder::OpDeref));
addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIBuilder::OpPlus));
addr.push_back(llvm::ConstantInt::get(Int64Ty, offset.getQuantity()));
if (isByRef) {
@@ -2651,6 +2652,7 @@ void CGDebugInfo::EmitDeclareOfBlockDeclRefVariable(const VarDecl *VD,
DBuilder.createComplexVariable(llvm::dwarf::DW_TAG_auto_variable,
llvm::DIDescriptor(LexicalBlockStack.back()),
VD->getName(), Unit, Line, Ty, addr);
+
// Insert an llvm.dbg.declare into the current block.
llvm::Instruction *Call =
DBuilder.insertDeclare(Storage, D, Builder.GetInsertPoint());
@@ -2678,7 +2680,8 @@ namespace {
}
void CGDebugInfo::EmitDeclareOfBlockLiteralArgVariable(const CGBlockInfo &block,
- llvm::Value *addr,
+ llvm::Value *Arg,
+ llvm::Value *LocalAddr,
CGBuilderTy &Builder) {
assert(CGM.getCodeGenOpts().getDebugInfo() >= CodeGenOptions::LimitedDebugInfo);
ASTContext &C = CGM.getContext();
@@ -2805,21 +2808,27 @@ void CGDebugInfo::EmitDeclareOfBlockLiteralArgVariable(const CGBlockInfo &block,
// Get overall information about the block.
unsigned flags = llvm::DIDescriptor::FlagArtificial;
llvm::MDNode *scope = LexicalBlockStack.back();
- StringRef name = ".block_descriptor";
// Create the descriptor for the parameter.
llvm::DIVariable debugVar =
DBuilder.createLocalVariable(llvm::dwarf::DW_TAG_arg_variable,
llvm::DIDescriptor(scope),
- name, tunit, line, type,
+ Arg->getName(), tunit, line, type,
CGM.getLangOpts().Optimize, flags,
- cast<llvm::Argument>(addr)->getArgNo() + 1);
-
- // Insert an llvm.dbg.value into the current block.
- llvm::Instruction *declare =
- DBuilder.insertDbgValueIntrinsic(addr, 0, debugVar,
- Builder.GetInsertBlock());
- declare->setDebugLoc(llvm::DebugLoc::get(line, column, scope));
+ cast<llvm::Argument>(Arg)->getArgNo() + 1);
+
+ if (LocalAddr) {
+ // Insert an llvm.dbg.value into the current block.
+ llvm::Instruction *DbgVal =
+ DBuilder.insertDbgValueIntrinsic(LocalAddr, 0, debugVar,
+ Builder.GetInsertBlock());
+ DbgVal->setDebugLoc(llvm::DebugLoc::get(line, column, scope));
+ }
+
+ // Insert an llvm.dbg.declare into the current block.
+ llvm::Instruction *DbgDecl =
+ DBuilder.insertDeclare(Arg, debugVar, Builder.GetInsertBlock());
+ DbgDecl->setDebugLoc(llvm::DebugLoc::get(line, column, scope));
}
/// getStaticDataMemberDeclaration - If D is an out-of-class definition of
@@ -2920,6 +2929,16 @@ void CGDebugInfo::EmitGlobalVariable(const ValueDecl *VD,
getStaticDataMemberDeclaration(VD));
}
+void CGDebugInfo::EmitUsingDirective(const UsingDirectiveDecl &UD) {
+ llvm::DIScope Scope =
+ LexicalBlockStack.empty()
+ ? getContextDescriptor(cast<Decl>(UD.getDeclContext()))
+ : llvm::DIScope(LexicalBlockStack.back());
+ DBuilder.createImportedModule(
+ Scope, getOrCreateNameSpace(UD.getNominatedNamespace()),
+ getLineNumber(UD.getLocation()));
+}
+
/// getOrCreateNamesSpace - Return namespace descriptor for the given
/// namespace decl.
llvm::DINameSpace
@@ -2955,9 +2974,8 @@ void CGDebugInfo::finalize() {
RepTy = llvm::DIType(cast<llvm::MDNode>(V));
}
- if (Ty.Verify() && Ty.isForwardDecl() && RepTy.Verify()) {
+ if (Ty.Verify() && Ty.isForwardDecl() && RepTy.Verify())
Ty.replaceAllUsesWith(RepTy);
- }
}
// We keep our own list of retained types, because we need to look
diff --git a/lib/CodeGen/CGDebugInfo.h b/lib/CodeGen/CGDebugInfo.h
index 2e896cfe22..4080492a1c 100644
--- a/lib/CodeGen/CGDebugInfo.h
+++ b/lib/CodeGen/CGDebugInfo.h
@@ -51,7 +51,7 @@ class CGDebugInfo {
SourceLocation CurLoc, PrevLoc;
llvm::DIType VTablePtrType;
llvm::DIType ClassTy;
- llvm::DIType ObjTy;
+ llvm::DICompositeType ObjTy;
llvm::DIType SelTy;
llvm::DIType OCLImage1dDITy, OCLImage1dArrayDITy, OCLImage1dBufferDITy;
llvm::DIType OCLImage2dDITy, OCLImage2dArrayDITy;
@@ -119,6 +119,7 @@ class CGDebugInfo {
llvm::DIType CreateType(const MemberPointerType *Ty, llvm::DIFile F);
llvm::DIType CreateType(const AtomicType *Ty, llvm::DIFile F);
llvm::DIType CreateEnumType(const EnumDecl *ED);
+ llvm::DIType CreateSelfType(const QualType &QualTy, llvm::DIType Ty);
llvm::DIType getTypeOrNull(const QualType);
llvm::DIType getCompletedTypeOrNull(const QualType);
llvm::DIType getOrCreateMethodType(const CXXMethodDecl *Method,
@@ -207,7 +208,9 @@ public:
/// EmitLocation - Emit metadata to indicate a change in line/column
/// information in the source file.
- void EmitLocation(CGBuilderTy &Builder, SourceLocation Loc);
+ /// \param ForceColumnInfo Assume DebugColumnInfo option is true.
+ void EmitLocation(CGBuilderTy &Builder, SourceLocation Loc,
+ bool ForceColumnInfo = false);
/// EmitFunctionStart - Emit a call to llvm.dbg.function.start to indicate
/// start of a new function.
@@ -246,7 +249,8 @@ public:
/// llvm.dbg.declare for the block-literal argument to a block
/// invocation function.
void EmitDeclareOfBlockLiteralArgVariable(const CGBlockInfo &block,
- llvm::Value *addr,
+ llvm::Value *Arg,
+ llvm::Value *LocalAddr,
CGBuilderTy &Builder);
/// EmitGlobalVariable - Emit information about a global variable.
@@ -258,6 +262,9 @@ public:
/// EmitGlobalVariable - Emit global variable's debug info.
void EmitGlobalVariable(const ValueDecl *VD, llvm::Constant *Init);
+ /// \brief - Emit C++ using directive.
+ void EmitUsingDirective(const UsingDirectiveDecl &UD);
+
/// getOrCreateRecordType - Emit record type's standalone debug info.
llvm::DIType getOrCreateRecordType(QualType Ty, SourceLocation L);
@@ -277,7 +284,7 @@ private:
uint64_t *OffSet);
/// getContextDescriptor - Get context info for the decl.
- llvm::DIDescriptor getContextDescriptor(const Decl *Decl);
+ llvm::DIScope getContextDescriptor(const Decl *Decl);
/// createRecordFwdDecl - Create a forward decl for a RecordType in a given
/// context.
@@ -356,7 +363,8 @@ private:
/// getColumnNumber - Get column number for the location. If location is
/// invalid then use current location.
- unsigned getColumnNumber(SourceLocation Loc);
+ /// \param Force Assume DebugColumnInfo option is true.
+ unsigned getColumnNumber(SourceLocation Loc, bool Force=false);
};
} // namespace CodeGen
} // namespace clang
diff --git a/lib/CodeGen/CGDecl.cpp b/lib/CodeGen/CGDecl.cpp
index 0e001301ae..3ce6dec6a5 100644
--- a/lib/CodeGen/CGDecl.cpp
+++ b/lib/CodeGen/CGDecl.cpp
@@ -45,6 +45,7 @@ void CodeGenFunction::EmitDecl(const Decl &D) {
case Decl::CXXDestructor:
case Decl::CXXConversion:
case Decl::Field:
+ case Decl::MSProperty:
case Decl::IndirectField:
case Decl::ObjCIvar:
case Decl::ObjCAtDefsField:
@@ -69,6 +70,7 @@ void CodeGenFunction::EmitDecl(const Decl &D) {
case Decl::Friend:
case Decl::FriendTemplate:
case Decl::Block:
+ case Decl::Captured:
case Decl::ClassScopeFunctionSpecialization:
llvm_unreachable("Declaration should not be in declstmts!");
case Decl::Function: // void X();
@@ -78,15 +80,19 @@ void CodeGenFunction::EmitDecl(const Decl &D) {
case Decl::CXXRecord: // struct/union/class X; [C++]
case Decl::Using: // using X; [C++]
case Decl::UsingShadow:
- case Decl::UsingDirective: // using namespace X; [C++]
case Decl::NamespaceAlias:
case Decl::StaticAssert: // static_assert(X, ""); [C++0x]
case Decl::Label: // __label__ x;
case Decl::Import:
+ case Decl::OMPThreadPrivate:
case Decl::Empty:
// None of these decls require codegen support.
return;
+ case Decl::UsingDirective: // using namespace X; [C++]
+ if (CGDebugInfo *DI = getDebugInfo())
+ DI->EmitUsingDirective(cast<UsingDirectiveDecl>(D));
+ return;
case Decl::Var: {
const VarDecl &VD = cast<VarDecl>(D);
assert(VD.isLocalVarDecl() &&
@@ -108,7 +114,7 @@ void CodeGenFunction::EmitDecl(const Decl &D) {
/// EmitVarDecl - This method handles emission of any variable declaration
/// inside a function, including static vars etc.
void CodeGenFunction::EmitVarDecl(const VarDecl &D) {
- switch (D.getStorageClassAsWritten()) {
+ switch (D.getStorageClass()) {
case SC_None:
case SC_Auto:
case SC_Register:
@@ -197,7 +203,7 @@ CodeGenFunction::CreateStaticVarDecl(const VarDecl &D,
if (Linkage != llvm::GlobalValue::InternalLinkage)
GV->setVisibility(CurFn->getVisibility());
- if (D.isThreadSpecified())
+ if (D.getTLSKind())
CGM.setTLSMode(GV, D);
return GV;
@@ -451,6 +457,22 @@ namespace {
CGF.EmitCall(FnInfo, CleanupFn, ReturnValueSlot(), Args);
}
};
+
+ /// A cleanup to call @llvm.lifetime.end.
+ class CallLifetimeEnd : public EHScopeStack::Cleanup {
+ llvm::Value *Addr;
+ llvm::Value *Size;
+ public:
+ CallLifetimeEnd(llvm::Value *addr, llvm::Value *size)
+ : Addr(addr), Size(size) {}
+
+ void Emit(CodeGenFunction &CGF, Flags flags) {
+ llvm::Value *castAddr = CGF.Builder.CreateBitCast(Addr, CGF.Int8PtrTy);
+ CGF.Builder.CreateCall2(CGF.CGM.getLLVMLifetimeEndFn(),
+ Size, castAddr)
+ ->setDoesNotThrow();
+ }
+ };
}
/// EmitAutoVarWithLifetime - Does the setup required for an automatic
@@ -627,7 +649,7 @@ void CodeGenFunction::EmitScalarInit(const Expr *init,
if (accessedByInit && lifetime == Qualifiers::OCL_Strong) {
llvm::Value *oldValue = EmitLoadOfScalar(lvalue);
EmitStoreOfScalar(value, lvalue, /* isInitialization */ true);
- EmitARCRelease(oldValue, /*precise*/ false);
+ EmitARCRelease(oldValue, ARCImpreciseLifetime);
return;
}
@@ -755,7 +777,6 @@ static bool shouldUseMemSetPlusStoresToInitialize(llvm::Constant *Init,
// If a global is all zeros, always use a memset.
if (isa<llvm::ConstantAggregateZero>(Init)) return true;
-
// If a non-zero global is <= 32 bytes, always use a memcpy. If it is large,
// do it if it will require 6 or fewer scalar stores.
// TODO: Should budget depends on the size? Avoiding a large global warrants
@@ -767,6 +788,23 @@ static bool shouldUseMemSetPlusStoresToInitialize(llvm::Constant *Init,
canEmitInitWithFewStoresAfterMemset(Init, StoreBudget);
}
+/// Should we use the LLVM lifetime intrinsics for the given local variable?
+static bool shouldUseLifetimeMarkers(CodeGenFunction &CGF, const VarDecl &D,
+ unsigned Size) {
+ // Always emit lifetime markers in -fsanitize=use-after-scope mode.
+ if (CGF.getLangOpts().Sanitize.UseAfterScope)
+ return true;
+ // For now, only in optimized builds.
+ if (CGF.CGM.getCodeGenOpts().OptimizationLevel == 0)
+ return false;
+
+ // Limit the size of marked objects to 32 bytes. We don't want to increase
+ // compile time by marking tiny objects.
+ unsigned SizeThreshold = 32;
+
+ return Size > SizeThreshold;
+}
+
/// EmitAutoVarDecl - Emit code and set up an entry in LocalDeclMap for a
/// variable declaration with auto, register, or no storage class specifier.
@@ -797,85 +835,91 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
llvm::Value *DeclPtr;
if (Ty->isConstantSizeType()) {
- if (!Target.useGlobalsForAutomaticVariables()) {
- bool NRVO = getLangOpts().ElideConstructors &&
- D.isNRVOVariable();
-
- // If this value is a POD array or struct with a statically
- // determinable constant initializer, there are optimizations we can do.
- //
- // TODO: We should constant-evaluate the initializer of any variable,
- // as long as it is initialized by a constant expression. Currently,
- // isConstantInitializer produces wrong answers for structs with
- // reference or bitfield members, and a few other cases, and checking
- // for POD-ness protects us from some of these.
- if (D.getInit() &&
- (Ty->isArrayType() || Ty->isRecordType()) &&
- (Ty.isPODType(getContext()) ||
- getContext().getBaseElementType(Ty)->isObjCObjectPointerType()) &&
- D.getInit()->isConstantInitializer(getContext(), false)) {
-
- // If the variable's a const type, and it's neither an NRVO
- // candidate nor a __block variable and has no mutable members,
- // emit it as a global instead.
- if (CGM.getCodeGenOpts().MergeAllConstants && !NRVO && !isByRef &&
- CGM.isTypeConstant(Ty, true)) {
- EmitStaticVarDecl(D, llvm::GlobalValue::InternalLinkage);
-
- emission.Address = 0; // signal this condition to later callbacks
- assert(emission.wasEmittedAsGlobal());
- return emission;
- }
-
- // Otherwise, tell the initialization code that we're in this case.
- emission.IsConstantAggregate = true;
+ bool NRVO = getLangOpts().ElideConstructors &&
+ D.isNRVOVariable();
+
+ // If this value is a POD array or struct with a statically
+ // determinable constant initializer, there are optimizations we can do.
+ //
+ // TODO: We should constant-evaluate the initializer of any variable,
+ // as long as it is initialized by a constant expression. Currently,
+ // isConstantInitializer produces wrong answers for structs with
+ // reference or bitfield members, and a few other cases, and checking
+ // for POD-ness protects us from some of these.
+ if (D.getInit() &&
+ (Ty->isArrayType() || Ty->isRecordType()) &&
+ (Ty.isPODType(getContext()) ||
+ getContext().getBaseElementType(Ty)->isObjCObjectPointerType()) &&
+ D.getInit()->isConstantInitializer(getContext(), false)) {
+
+ // If the variable's a const type, and it's neither an NRVO
+ // candidate nor a __block variable and has no mutable members,
+ // emit it as a global instead.
+ if (CGM.getCodeGenOpts().MergeAllConstants && !NRVO && !isByRef &&
+ CGM.isTypeConstant(Ty, true)) {
+ EmitStaticVarDecl(D, llvm::GlobalValue::InternalLinkage);
+
+ emission.Address = 0; // signal this condition to later callbacks
+ assert(emission.wasEmittedAsGlobal());
+ return emission;
}
- // A normal fixed sized variable becomes an alloca in the entry block,
- // unless it's an NRVO variable.
- llvm::Type *LTy = ConvertTypeForMem(Ty);
+ // Otherwise, tell the initialization code that we're in this case.
+ emission.IsConstantAggregate = true;
+ }
- if (NRVO) {
- // The named return value optimization: allocate this variable in the
- // return slot, so that we can elide the copy when returning this
- // variable (C++0x [class.copy]p34).
- DeclPtr = ReturnValue;
-
- if (const RecordType *RecordTy = Ty->getAs<RecordType>()) {
- if (!cast<CXXRecordDecl>(RecordTy->getDecl())->hasTrivialDestructor()) {
- // Create a flag that is used to indicate when the NRVO was applied
- // to this variable. Set it to zero to indicate that NRVO was not
- // applied.
- llvm::Value *Zero = Builder.getFalse();
- llvm::Value *NRVOFlag = CreateTempAlloca(Zero->getType(), "nrvo");
- EnsureInsertPoint();
- Builder.CreateStore(Zero, NRVOFlag);
-
- // Record the NRVO flag for this variable.
- NRVOFlags[&D] = NRVOFlag;
- emission.NRVOFlag = NRVOFlag;
- }
+ // A normal fixed sized variable becomes an alloca in the entry block,
+ // unless it's an NRVO variable.
+ llvm::Type *LTy = ConvertTypeForMem(Ty);
+
+ if (NRVO) {
+ // The named return value optimization: allocate this variable in the
+ // return slot, so that we can elide the copy when returning this
+ // variable (C++0x [class.copy]p34).
+ DeclPtr = ReturnValue;
+
+ if (const RecordType *RecordTy = Ty->getAs<RecordType>()) {
+ if (!cast<CXXRecordDecl>(RecordTy->getDecl())->hasTrivialDestructor()) {
+ // Create a flag that is used to indicate when the NRVO was applied
+ // to this variable. Set it to zero to indicate that NRVO was not
+ // applied.
+ llvm::Value *Zero = Builder.getFalse();
+ llvm::Value *NRVOFlag = CreateTempAlloca(Zero->getType(), "nrvo");
+ EnsureInsertPoint();
+ Builder.CreateStore(Zero, NRVOFlag);
+
+ // Record the NRVO flag for this variable.
+ NRVOFlags[&D] = NRVOFlag;
+ emission.NRVOFlag = NRVOFlag;
}
- } else {
- if (isByRef)
- LTy = BuildByRefType(&D);
-
- llvm::AllocaInst *Alloc = CreateTempAlloca(LTy);
- Alloc->setName(D.getName());
-
- CharUnits allocaAlignment = alignment;
- if (isByRef)
- allocaAlignment = std::max(allocaAlignment,
- getContext().toCharUnitsFromBits(Target.getPointerAlign(0)));
- Alloc->setAlignment(allocaAlignment.getQuantity());
- DeclPtr = Alloc;
}
} else {
- // Targets that don't support recursion emit locals as globals.
- const char *Class =
- D.getStorageClass() == SC_Register ? ".reg." : ".auto.";
- DeclPtr = CreateStaticVarDecl(D, Class,
- llvm::GlobalValue::InternalLinkage);
+ if (isByRef)
+ LTy = BuildByRefType(&D);
+
+ llvm::AllocaInst *Alloc = CreateTempAlloca(LTy);
+ Alloc->setName(D.getName());
+
+ CharUnits allocaAlignment = alignment;
+ if (isByRef)
+ allocaAlignment = std::max(allocaAlignment,
+ getContext().toCharUnitsFromBits(getTarget().getPointerAlign(0)));
+ Alloc->setAlignment(allocaAlignment.getQuantity());
+ DeclPtr = Alloc;
+
+ // Emit a lifetime intrinsic if meaningful. There's no point
+ // in doing this if we don't have a valid insertion point (?).
+ uint64_t size = CGM.getDataLayout().getTypeAllocSize(LTy);
+ if (HaveInsertPoint() && shouldUseLifetimeMarkers(*this, D, size)) {
+ llvm::Value *sizeV = llvm::ConstantInt::get(Int64Ty, size);
+
+ emission.SizeForLifetimeMarkers = sizeV;
+ llvm::Value *castAddr = Builder.CreateBitCast(Alloc, Int8PtrTy);
+ Builder.CreateCall2(CGM.getLLVMLifetimeStartFn(), sizeV, castAddr)
+ ->setDoesNotThrow();
+ } else {
+ assert(!emission.useLifetimeMarkers());
+ }
}
} else {
EnsureInsertPoint();
@@ -920,11 +964,7 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
if (CGM.getCodeGenOpts().getDebugInfo()
>= CodeGenOptions::LimitedDebugInfo) {
DI->setLocation(D.getLocation());
- if (Target.useGlobalsForAutomaticVariables()) {
- DI->EmitGlobalVariable(static_cast<llvm::GlobalVariable *>(DeclPtr),
- &D);
- } else
- DI->EmitDeclareOfAutoVariable(&D, DeclPtr, Builder);
+ DI->EmitDeclareOfAutoVariable(&D, DeclPtr, Builder);
}
}
@@ -1214,6 +1254,14 @@ void CodeGenFunction::EmitAutoVarCleanups(const AutoVarEmission &emission) {
const VarDecl &D = *emission.Variable;
+ // Make sure we call @llvm.lifetime.end. This needs to happen
+ // *last*, so the cleanup needs to be pushed *first*.
+ if (emission.useLifetimeMarkers()) {
+ EHStack.pushCleanup<CallLifetimeEnd>(NormalCleanup,
+ emission.getAllocatedAddress(),
+ emission.getSizeForLifetimeMarkers());
+ }
+
// Check the type for a cleanup.
if (QualType::DestructionKind dtorKind = D.getType().isDestructedType())
emitAutoVarTypeCleanup(emission, dtorKind);
@@ -1484,18 +1532,37 @@ void CodeGenFunction::pushRegularPartialArrayCleanup(llvm::Value *arrayBegin,
elementType, destroyer);
}
+/// Lazily declare the @llvm.lifetime.start intrinsic.
+llvm::Constant *CodeGenModule::getLLVMLifetimeStartFn() {
+ if (LifetimeStartFn) return LifetimeStartFn;
+ LifetimeStartFn = llvm::Intrinsic::getDeclaration(&getModule(),
+ llvm::Intrinsic::lifetime_start);
+ return LifetimeStartFn;
+}
+
+/// Lazily declare the @llvm.lifetime.end intrinsic.
+llvm::Constant *CodeGenModule::getLLVMLifetimeEndFn() {
+ if (LifetimeEndFn) return LifetimeEndFn;
+ LifetimeEndFn = llvm::Intrinsic::getDeclaration(&getModule(),
+ llvm::Intrinsic::lifetime_end);
+ return LifetimeEndFn;
+}
+
namespace {
/// A cleanup to perform a release of an object at the end of a
/// function. This is used to balance out the incoming +1 of a
/// ns_consumed argument when we can't reasonably do that just by
/// not doing the initial retain for a __block argument.
struct ConsumeARCParameter : EHScopeStack::Cleanup {
- ConsumeARCParameter(llvm::Value *param) : Param(param) {}
+ ConsumeARCParameter(llvm::Value *param,
+ ARCPreciseLifetime_t precise)
+ : Param(param), Precise(precise) {}
llvm::Value *Param;
+ ARCPreciseLifetime_t Precise;
void Emit(CodeGenFunction &CGF, Flags flags) {
- CGF.EmitARCRelease(Param, /*precise*/ false);
+ CGF.EmitARCRelease(Param, Precise);
}
};
}
@@ -1510,17 +1577,29 @@ void CodeGenFunction::EmitParmDecl(const VarDecl &D, llvm::Value *Arg,
Arg->setName(D.getName());
+ QualType Ty = D.getType();
+
// Use better IR generation for certain implicit parameters.
if (isa<ImplicitParamDecl>(D)) {
// The only implicit argument a block has is its literal.
if (BlockInfo) {
LocalDeclMap[&D] = Arg;
+ llvm::Value *LocalAddr = 0;
+ if (CGM.getCodeGenOpts().OptimizationLevel == 0) {
+ // Allocate a stack slot to let the debug info survive the RA.
+ llvm::AllocaInst *Alloc = CreateTempAlloca(ConvertTypeForMem(Ty),
+ D.getName() + ".addr");
+ Alloc->setAlignment(getContext().getDeclAlign(&D).getQuantity());
+ LValue lv = MakeAddrLValue(Alloc, Ty, getContext().getDeclAlign(&D));
+ EmitStoreOfScalar(Arg, lv, /* isInitialization */ true);
+ LocalAddr = Builder.CreateLoad(Alloc);
+ }
if (CGDebugInfo *DI = getDebugInfo()) {
if (CGM.getCodeGenOpts().getDebugInfo()
>= CodeGenOptions::LimitedDebugInfo) {
DI->setLocation(D.getLocation());
- DI->EmitDeclareOfBlockLiteralArgVariable(*BlockInfo, Arg, Builder);
+ DI->EmitDeclareOfBlockLiteralArgVariable(*BlockInfo, Arg, LocalAddr, Builder);
}
}
@@ -1528,8 +1607,6 @@ void CodeGenFunction::EmitParmDecl(const VarDecl &D, llvm::Value *Arg,
}
}
- QualType Ty = D.getType();
-
llvm::Value *DeclPtr;
// If this is an aggregate or variable sized value, reuse the input pointer.
if (!Ty->isConstantSizeType() ||
@@ -1585,8 +1662,12 @@ void CodeGenFunction::EmitParmDecl(const VarDecl &D, llvm::Value *Arg,
}
} else {
// Push the cleanup for a consumed parameter.
- if (isConsumed)
- EHStack.pushCleanup<ConsumeARCParameter>(getARCCleanupKind(), Arg);
+ if (isConsumed) {
+ ARCPreciseLifetime_t precise = (D.hasAttr<ObjCPreciseLifetimeAttr>()
+ ? ARCPreciseLifetime : ARCImpreciseLifetime);
+ EHStack.pushCleanup<ConsumeARCParameter>(getARCCleanupKind(), Arg,
+ precise);
+ }
if (lt == Qualifiers::OCL_Weak) {
EmitARCInitWeak(DeclPtr, Arg);
diff --git a/lib/CodeGen/CGDeclCXX.cpp b/lib/CodeGen/CGDeclCXX.cpp
index 0448d31f40..9ffcff2766 100644
--- a/lib/CodeGen/CGDeclCXX.cpp
+++ b/lib/CodeGen/CGDeclCXX.cpp
@@ -39,7 +39,7 @@ static void EmitDeclInit(CodeGenFunction &CGF, const VarDecl &D,
CodeGenModule &CGM = CGF.CGM;
if (lv.isObjCStrong())
CGM.getObjCRuntime().EmitObjCGlobalAssign(CGF, CGF.EmitScalarExpr(Init),
- DeclPtr, D.isThreadSpecified());
+ DeclPtr, D.getTLSKind());
else if (lv.isObjCWeak())
CGM.getObjCRuntime().EmitObjCWeakAssign(CGF, CGF.EmitScalarExpr(Init),
DeclPtr);
@@ -80,6 +80,7 @@ static void EmitDeclDestroy(CodeGenFunction &CGF, const VarDecl &D,
case QualType::DK_objc_strong_lifetime:
case QualType::DK_objc_weak_lifetime:
// We don't care about releasing objects during process teardown.
+ assert(!D.getTLSKind() && "should have rejected this");
return;
}
@@ -105,7 +106,7 @@ static void EmitDeclDestroy(CodeGenFunction &CGF, const VarDecl &D,
argument = llvm::Constant::getNullValue(CGF.Int8PtrTy);
}
- CGM.getCXXABI().registerGlobalDtor(CGF, function, argument);
+ CGM.getCXXABI().registerGlobalDtor(CGF, D, function, argument);
}
/// Emit code to cause the variable at the given address to be considered as
@@ -155,7 +156,8 @@ void CodeGenFunction::EmitCXXGlobalVarDeclInit(const VarDecl &D,
static llvm::Function *
CreateGlobalInitOrDestructFunction(CodeGenModule &CGM,
llvm::FunctionType *ty,
- const Twine &name);
+ const Twine &name,
+ bool TLS = false);
/// Create a stub function, suitable for being passed to atexit,
/// which passes the given address to the given destructor function.
@@ -224,14 +226,14 @@ void CodeGenFunction::EmitCXXGuardedInit(const VarDecl &D,
static llvm::Function *
CreateGlobalInitOrDestructFunction(CodeGenModule &CGM,
llvm::FunctionType *FTy,
- const Twine &Name) {
+ const Twine &Name, bool TLS) {
llvm::Function *Fn =
llvm::Function::Create(FTy, llvm::GlobalValue::InternalLinkage,
Name, &CGM.getModule());
- if (!CGM.getLangOpts().AppleKext) {
+ if (!CGM.getLangOpts().AppleKext && !TLS) {
// Set the section if needed.
if (const char *Section =
- CGM.getContext().getTargetInfo().getStaticInitSectionSpecifier())
+ CGM.getTarget().getStaticInitSectionSpecifier())
Fn->setSection(Section);
}
@@ -263,12 +265,20 @@ CodeGenModule::EmitCXXGlobalVarDeclInitFunc(const VarDecl *D,
CodeGenFunction(*this).GenerateCXXGlobalVarDeclInitFunc(Fn, D, Addr,
PerformInit);
- if (D->hasAttr<InitPriorityAttr>()) {
+ if (D->getTLSKind()) {
+ // FIXME: Should we support init_priority for thread_local?
+ // FIXME: Ideally, initialization of instantiated thread_local static data
+ // members of class templates should not trigger initialization of other
+ // entities in the TU.
+ // FIXME: We only need to register one __cxa_thread_atexit function for the
+ // entire TU.
+ CXXThreadLocalInits.push_back(Fn);
+ } else if (D->hasAttr<InitPriorityAttr>()) {
unsigned int order = D->getAttr<InitPriorityAttr>()->getPriority();
OrderGlobalInits Key(order, PrioritizedCXXGlobalInits.size());
PrioritizedCXXGlobalInits.push_back(std::make_pair(Key, Fn));
DelayedCXXInitPosition.erase(D);
- } else {
+ } else {
llvm::DenseMap<const Decl *, unsigned>::iterator I =
DelayedCXXInitPosition.find(D);
if (I == DelayedCXXInitPosition.end()) {
@@ -281,6 +291,27 @@ CodeGenModule::EmitCXXGlobalVarDeclInitFunc(const VarDecl *D,
}
}
+void CodeGenModule::EmitCXXThreadLocalInitFunc() {
+ llvm::Function *InitFn = 0;
+ if (!CXXThreadLocalInits.empty()) {
+ // Generate a guarded initialization function.
+ llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
+ InitFn = CreateGlobalInitOrDestructFunction(*this, FTy, "__tls_init",
+ /*TLS*/ true);
+ llvm::GlobalVariable *Guard = new llvm::GlobalVariable(
+ getModule(), Int8Ty, false, llvm::GlobalVariable::InternalLinkage,
+ llvm::ConstantInt::get(Int8Ty, 0), "__tls_guard");
+ Guard->setThreadLocal(true);
+ CodeGenFunction(*this)
+ .GenerateCXXGlobalInitFunc(InitFn, CXXThreadLocalInits, Guard);
+ }
+
+ getCXXABI().EmitThreadLocalInitFuncs(CXXThreadLocals, InitFn);
+
+ CXXThreadLocalInits.clear();
+ CXXThreadLocals.clear();
+}
+
void
CodeGenModule::EmitCXXGlobalInitFunc() {
while (!CXXGlobalInits.empty() && !CXXGlobalInits.back())
@@ -320,9 +351,7 @@ CodeGenModule::EmitCXXGlobalInitFunc() {
for (; I < PrioE; ++I)
LocalCXXGlobalInits.push_back(I->second);
- CodeGenFunction(*this).GenerateCXXGlobalInitFunc(Fn,
- &LocalCXXGlobalInits[0],
- LocalCXXGlobalInits.size());
+ CodeGenFunction(*this).GenerateCXXGlobalInitFunc(Fn, LocalCXXGlobalInits);
AddGlobalCtor(Fn, Priority);
}
}
@@ -330,9 +359,7 @@ CodeGenModule::EmitCXXGlobalInitFunc() {
llvm::Function *Fn =
CreateGlobalInitOrDestructFunction(*this, FTy, "_GLOBAL__I_a");
- CodeGenFunction(*this).GenerateCXXGlobalInitFunc(Fn,
- &CXXGlobalInits[0],
- CXXGlobalInits.size());
+ CodeGenFunction(*this).GenerateCXXGlobalInitFunc(Fn, CXXGlobalInits);
AddGlobalCtor(Fn);
CXXGlobalInits.clear();
@@ -379,9 +406,10 @@ void CodeGenFunction::GenerateCXXGlobalVarDeclInitFunc(llvm::Function *Fn,
FinishFunction();
}
-void CodeGenFunction::GenerateCXXGlobalInitFunc(llvm::Function *Fn,
- llvm::Constant **Decls,
- unsigned NumDecls) {
+void
+CodeGenFunction::GenerateCXXGlobalInitFunc(llvm::Function *Fn,
+ ArrayRef<llvm::Constant *> Decls,
+ llvm::GlobalVariable *Guard) {
// Initialize debug info if needed.
maybeInitializeDebugInfo();
@@ -389,6 +417,22 @@ void CodeGenFunction::GenerateCXXGlobalInitFunc(llvm::Function *Fn,
getTypes().arrangeNullaryFunction(),
FunctionArgList(), SourceLocation());
+ llvm::BasicBlock *ExitBlock = 0;
+ if (Guard) {
+ // If we have a guard variable, check whether we've already performed these
+ // initializations. This happens for TLS initialization functions.
+ llvm::Value *GuardVal = Builder.CreateLoad(Guard);
+ llvm::Value *Uninit = Builder.CreateIsNull(GuardVal, "guard.uninitialized");
+ // Mark as initialized before initializing anything else. If the
+ // initializers use previously-initialized thread_local vars, that's
+ // probably supposed to be OK, but the standard doesn't say.
+ Builder.CreateStore(llvm::ConstantInt::get(GuardVal->getType(), 1), Guard);
+ llvm::BasicBlock *InitBlock = createBasicBlock("init");
+ ExitBlock = createBasicBlock("exit");
+ Builder.CreateCondBr(Uninit, InitBlock, ExitBlock);
+ EmitBlock(InitBlock);
+ }
+
RunCleanupsScope Scope(*this);
// When building in Objective-C++ ARC mode, create an autorelease pool
@@ -397,13 +441,18 @@ void CodeGenFunction::GenerateCXXGlobalInitFunc(llvm::Function *Fn,
llvm::Value *token = EmitObjCAutoreleasePoolPush();
EmitObjCAutoreleasePoolCleanup(token);
}
-
- for (unsigned i = 0; i != NumDecls; ++i)
+
+ for (unsigned i = 0, e = Decls.size(); i != e; ++i)
if (Decls[i])
EmitRuntimeCall(Decls[i]);
Scope.ForceCleanup();
-
+
+ if (ExitBlock) {
+ Builder.CreateBr(ExitBlock);
+ EmitBlock(ExitBlock);
+ }
+
FinishFunction();
}
diff --git a/lib/CodeGen/CGException.cpp b/lib/CodeGen/CGException.cpp
index 36642bcc48..a088d78641 100644
--- a/lib/CodeGen/CGException.cpp
+++ b/lib/CodeGen/CGException.cpp
@@ -419,14 +419,16 @@ llvm::Value *CodeGenFunction::getSelectorFromSlot() {
return Builder.CreateLoad(getEHSelectorSlot(), "sel");
}
-void CodeGenFunction::EmitCXXThrowExpr(const CXXThrowExpr *E) {
+void CodeGenFunction::EmitCXXThrowExpr(const CXXThrowExpr *E,
+ bool KeepInsertionPoint) {
if (!E->getSubExpr()) {
EmitNoreturnRuntimeCallOrInvoke(getReThrowFn(CGM),
ArrayRef<llvm::Value*>());
// throw is an expression, and the expression emitters expect us
// to leave ourselves at a valid insertion point.
- EmitBlock(createBasicBlock("throw.cont"));
+ if (KeepInsertionPoint)
+ EmitBlock(createBasicBlock("throw.cont"));
return;
}
@@ -440,7 +442,8 @@ void CodeGenFunction::EmitCXXThrowExpr(const CXXThrowExpr *E) {
CGM.getObjCRuntime().EmitThrowStmt(*this, S, false);
// This will clear insertion point which was not cleared in
// call to EmitThrowStmt.
- EmitBlock(createBasicBlock("throw.cont"));
+ if (KeepInsertionPoint)
+ EmitBlock(createBasicBlock("throw.cont"));
return;
}
@@ -478,7 +481,8 @@ void CodeGenFunction::EmitCXXThrowExpr(const CXXThrowExpr *E) {
// throw is an expression, and the expression emitters expect us
// to leave ourselves at a valid insertion point.
- EmitBlock(createBasicBlock("throw.cont"));
+ if (KeepInsertionPoint)
+ EmitBlock(createBasicBlock("throw.cont"));
}
void CodeGenFunction::EmitStartEHSpec(const Decl *D) {
diff --git a/lib/CodeGen/CGExpr.cpp b/lib/CodeGen/CGExpr.cpp
index 830b1218c1..64670c5e81 100644
--- a/lib/CodeGen/CGExpr.cpp
+++ b/lib/CodeGen/CGExpr.cpp
@@ -184,12 +184,16 @@ CreateReferenceTemporary(CodeGenFunction &CGF, QualType Type,
llvm::Type *RefTempTy = CGF.ConvertTypeForMem(Type);
// Create the reference temporary.
- llvm::GlobalValue *RefTemp =
+ llvm::GlobalVariable *RefTemp =
new llvm::GlobalVariable(CGF.CGM.getModule(),
RefTempTy, /*isConstant=*/false,
llvm::GlobalValue::InternalLinkage,
llvm::Constant::getNullValue(RefTempTy),
Name.str());
+ // If we're binding to a thread_local variable, the temporary is also
+ // thread local.
+ if (VD->getTLSKind())
+ CGF.CGM.setTLSMode(RefTemp, *VD);
return RefTemp;
}
}
@@ -201,6 +205,7 @@ static llvm::Value *
EmitExprForReferenceBinding(CodeGenFunction &CGF, const Expr *E,
llvm::Value *&ReferenceTemporary,
const CXXDestructorDecl *&ReferenceTemporaryDtor,
+ const InitListExpr *&ReferenceInitializerList,
QualType &ObjCARCReferenceLifetimeType,
const NamedDecl *InitializedDecl) {
const MaterializeTemporaryExpr *M = NULL;
@@ -222,156 +227,157 @@ EmitExprForReferenceBinding(CodeGenFunction &CGF, const Expr *E,
return EmitExprForReferenceBinding(CGF, EWC->getSubExpr(),
ReferenceTemporary,
ReferenceTemporaryDtor,
+ ReferenceInitializerList,
ObjCARCReferenceLifetimeType,
InitializedDecl);
}
- RValue RV;
if (E->isGLValue()) {
// Emit the expression as an lvalue.
LValue LV = CGF.EmitLValue(E);
+ assert(LV.isSimple());
+ return LV.getAddress();
+ }
+
+ if (!ObjCARCReferenceLifetimeType.isNull()) {
+ ReferenceTemporary = CreateReferenceTemporary(CGF,
+ ObjCARCReferenceLifetimeType,
+ InitializedDecl);
- if (LV.isSimple())
- return LV.getAddress();
- // We have to load the lvalue.
- RV = CGF.EmitLoadOfLValue(LV);
- } else {
- if (!ObjCARCReferenceLifetimeType.isNull()) {
- ReferenceTemporary = CreateReferenceTemporary(CGF,
- ObjCARCReferenceLifetimeType,
- InitializedDecl);
-
-
- LValue RefTempDst = CGF.MakeAddrLValue(ReferenceTemporary,
- ObjCARCReferenceLifetimeType);
+ LValue RefTempDst = CGF.MakeAddrLValue(ReferenceTemporary,
+ ObjCARCReferenceLifetimeType);
- CGF.EmitScalarInit(E, dyn_cast_or_null<ValueDecl>(InitializedDecl),
- RefTempDst, false);
-
- bool ExtendsLifeOfTemporary = false;
- if (const VarDecl *Var = dyn_cast_or_null<VarDecl>(InitializedDecl)) {
- if (Var->extendsLifetimeOfTemporary())
- ExtendsLifeOfTemporary = true;
- } else if (InitializedDecl && isa<FieldDecl>(InitializedDecl)) {
+ CGF.EmitScalarInit(E, dyn_cast_or_null<ValueDecl>(InitializedDecl),
+ RefTempDst, false);
+
+ bool ExtendsLifeOfTemporary = false;
+ if (const VarDecl *Var = dyn_cast_or_null<VarDecl>(InitializedDecl)) {
+ if (Var->extendsLifetimeOfTemporary())
ExtendsLifeOfTemporary = true;
- }
-
- if (!ExtendsLifeOfTemporary) {
- // Since the lifetime of this temporary isn't going to be extended,
- // we need to clean it up ourselves at the end of the full expression.
- switch (ObjCARCReferenceLifetimeType.getObjCLifetime()) {
- case Qualifiers::OCL_None:
- case Qualifiers::OCL_ExplicitNone:
- case Qualifiers::OCL_Autoreleasing:
- break;
-
- case Qualifiers::OCL_Strong: {
- assert(!ObjCARCReferenceLifetimeType->isArrayType());
- CleanupKind cleanupKind = CGF.getARCCleanupKind();
- CGF.pushDestroy(cleanupKind,
- ReferenceTemporary,
- ObjCARCReferenceLifetimeType,
- CodeGenFunction::destroyARCStrongImprecise,
- cleanupKind & EHCleanup);
- break;
- }
+ } else if (InitializedDecl && isa<FieldDecl>(InitializedDecl)) {
+ ExtendsLifeOfTemporary = true;
+ }
+
+ if (!ExtendsLifeOfTemporary) {
+ // Since the lifetime of this temporary isn't going to be extended,
+ // we need to clean it up ourselves at the end of the full expression.
+ switch (ObjCARCReferenceLifetimeType.getObjCLifetime()) {
+ case Qualifiers::OCL_None:
+ case Qualifiers::OCL_ExplicitNone:
+ case Qualifiers::OCL_Autoreleasing:
+ break;
- case Qualifiers::OCL_Weak:
- assert(!ObjCARCReferenceLifetimeType->isArrayType());
- CGF.pushDestroy(NormalAndEHCleanup,
- ReferenceTemporary,
- ObjCARCReferenceLifetimeType,
- CodeGenFunction::destroyARCWeak,
- /*useEHCleanupForArray*/ true);
- break;
- }
+ case Qualifiers::OCL_Strong: {
+ assert(!ObjCARCReferenceLifetimeType->isArrayType());
+ CleanupKind cleanupKind = CGF.getARCCleanupKind();
+ CGF.pushDestroy(cleanupKind,
+ ReferenceTemporary,
+ ObjCARCReferenceLifetimeType,
+ CodeGenFunction::destroyARCStrongImprecise,
+ cleanupKind & EHCleanup);
+ break;
+ }
- ObjCARCReferenceLifetimeType = QualType();
+ case Qualifiers::OCL_Weak:
+ assert(!ObjCARCReferenceLifetimeType->isArrayType());
+ CGF.pushDestroy(NormalAndEHCleanup,
+ ReferenceTemporary,
+ ObjCARCReferenceLifetimeType,
+ CodeGenFunction::destroyARCWeak,
+ /*useEHCleanupForArray*/ true);
+ break;
}
- return ReferenceTemporary;
+ ObjCARCReferenceLifetimeType = QualType();
}
+
+ return ReferenceTemporary;
+ }
- SmallVector<SubobjectAdjustment, 2> Adjustments;
- E = E->skipRValueSubobjectAdjustments(Adjustments);
- if (const OpaqueValueExpr *opaque = dyn_cast<OpaqueValueExpr>(E))
- if (opaque->getType()->isRecordType())
- return CGF.EmitOpaqueValueLValue(opaque).getAddress();
-
- // Create a reference temporary if necessary.
- AggValueSlot AggSlot = AggValueSlot::ignored();
- if (CGF.hasAggregateEvaluationKind(E->getType())) {
- ReferenceTemporary = CreateReferenceTemporary(CGF, E->getType(),
- InitializedDecl);
- CharUnits Alignment = CGF.getContext().getTypeAlignInChars(E->getType());
- AggValueSlot::IsDestructed_t isDestructed
- = AggValueSlot::IsDestructed_t(InitializedDecl != 0);
- AggSlot = AggValueSlot::forAddr(ReferenceTemporary, Alignment,
- Qualifiers(), isDestructed,
- AggValueSlot::DoesNotNeedGCBarriers,
- AggValueSlot::IsNotAliased);
+ SmallVector<SubobjectAdjustment, 2> Adjustments;
+ E = E->skipRValueSubobjectAdjustments(Adjustments);
+ if (const OpaqueValueExpr *opaque = dyn_cast<OpaqueValueExpr>(E))
+ if (opaque->getType()->isRecordType())
+ return CGF.EmitOpaqueValueLValue(opaque).getAddress();
+
+ // Create a reference temporary if necessary.
+ AggValueSlot AggSlot = AggValueSlot::ignored();
+ if (CGF.hasAggregateEvaluationKind(E->getType())) {
+ ReferenceTemporary = CreateReferenceTemporary(CGF, E->getType(),
+ InitializedDecl);
+ CharUnits Alignment = CGF.getContext().getTypeAlignInChars(E->getType());
+ AggValueSlot::IsDestructed_t isDestructed
+ = AggValueSlot::IsDestructed_t(InitializedDecl != 0);
+ AggSlot = AggValueSlot::forAddr(ReferenceTemporary, Alignment,
+ Qualifiers(), isDestructed,
+ AggValueSlot::DoesNotNeedGCBarriers,
+ AggValueSlot::IsNotAliased);
+ }
+
+ if (InitializedDecl) {
+ if (const InitListExpr *ILE = dyn_cast<InitListExpr>(E)) {
+ if (ILE->initializesStdInitializerList()) {
+ ReferenceInitializerList = ILE;
+ }
}
-
- if (InitializedDecl) {
+ else if (const RecordType *RT =
+ E->getType()->getBaseElementTypeUnsafe()->getAs<RecordType>()){
// Get the destructor for the reference temporary.
- if (const RecordType *RT =
- E->getType()->getBaseElementTypeUnsafe()->getAs<RecordType>()) {
- CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(RT->getDecl());
- if (!ClassDecl->hasTrivialDestructor())
- ReferenceTemporaryDtor = ClassDecl->getDestructor();
- }
+ CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(RT->getDecl());
+ if (!ClassDecl->hasTrivialDestructor())
+ ReferenceTemporaryDtor = ClassDecl->getDestructor();
}
+ }
- RV = CGF.EmitAnyExpr(E, AggSlot);
-
- // Check if need to perform derived-to-base casts and/or field accesses, to
- // get from the temporary object we created (and, potentially, for which we
- // extended the lifetime) to the subobject we're binding the reference to.
- if (!Adjustments.empty()) {
- llvm::Value *Object = RV.getAggregateAddr();
- for (unsigned I = Adjustments.size(); I != 0; --I) {
- SubobjectAdjustment &Adjustment = Adjustments[I-1];
- switch (Adjustment.Kind) {
- case SubobjectAdjustment::DerivedToBaseAdjustment:
- Object =
- CGF.GetAddressOfBaseClass(Object,
- Adjustment.DerivedToBase.DerivedClass,
- Adjustment.DerivedToBase.BasePath->path_begin(),
- Adjustment.DerivedToBase.BasePath->path_end(),
- /*NullCheckValue=*/false);
- break;
-
- case SubobjectAdjustment::FieldAdjustment: {
- LValue LV = CGF.MakeAddrLValue(Object, E->getType());
- LV = CGF.EmitLValueForField(LV, Adjustment.Field);
- if (LV.isSimple()) {
- Object = LV.getAddress();
- break;
- }
+ RValue RV = CGF.EmitAnyExpr(E, AggSlot);
+
+ // Check if need to perform derived-to-base casts and/or field accesses, to
+ // get from the temporary object we created (and, potentially, for which we
+ // extended the lifetime) to the subobject we're binding the reference to.
+ if (!Adjustments.empty()) {
+ llvm::Value *Object = RV.getAggregateAddr();
+ for (unsigned I = Adjustments.size(); I != 0; --I) {
+ SubobjectAdjustment &Adjustment = Adjustments[I-1];
+ switch (Adjustment.Kind) {
+ case SubobjectAdjustment::DerivedToBaseAdjustment:
+ Object =
+ CGF.GetAddressOfBaseClass(Object,
+ Adjustment.DerivedToBase.DerivedClass,
+ Adjustment.DerivedToBase.BasePath->path_begin(),
+ Adjustment.DerivedToBase.BasePath->path_end(),
+ /*NullCheckValue=*/false);
+ break;
- // For non-simple lvalues, we actually have to create a copy of
- // the object we're binding to.
- QualType T = Adjustment.Field->getType().getNonReferenceType()
- .getUnqualifiedType();
- Object = CreateReferenceTemporary(CGF, T, InitializedDecl);
- LValue TempLV = CGF.MakeAddrLValue(Object,
- Adjustment.Field->getType());
- CGF.EmitStoreThroughLValue(CGF.EmitLoadOfLValue(LV), TempLV);
+ case SubobjectAdjustment::FieldAdjustment: {
+ LValue LV = CGF.MakeAddrLValue(Object, E->getType());
+ LV = CGF.EmitLValueForField(LV, Adjustment.Field);
+ if (LV.isSimple()) {
+ Object = LV.getAddress();
break;
}
-
- case SubobjectAdjustment::MemberPointerAdjustment: {
- llvm::Value *Ptr = CGF.EmitScalarExpr(Adjustment.Ptr.RHS);
- Object = CGF.CGM.getCXXABI().EmitMemberDataPointerAddress(
- CGF, Object, Ptr, Adjustment.Ptr.MPT);
- break;
- }
- }
+
+ // For non-simple lvalues, we actually have to create a copy of
+ // the object we're binding to.
+ QualType T = Adjustment.Field->getType().getNonReferenceType()
+ .getUnqualifiedType();
+ Object = CreateReferenceTemporary(CGF, T, InitializedDecl);
+ LValue TempLV = CGF.MakeAddrLValue(Object,
+ Adjustment.Field->getType());
+ CGF.EmitStoreThroughLValue(CGF.EmitLoadOfLValue(LV), TempLV);
+ break;
}
- return Object;
+ case SubobjectAdjustment::MemberPointerAdjustment: {
+ llvm::Value *Ptr = CGF.EmitScalarExpr(Adjustment.Ptr.RHS);
+ Object = CGF.CGM.getCXXABI().EmitMemberDataPointerAddress(
+ CGF, Object, Ptr, Adjustment.Ptr.MPT);
+ break;
+ }
+ }
}
+
+ return Object;
}
if (RV.isAggregate())
@@ -396,9 +402,11 @@ CodeGenFunction::EmitReferenceBindingToExpr(const Expr *E,
const NamedDecl *InitializedDecl) {
llvm::Value *ReferenceTemporary = 0;
const CXXDestructorDecl *ReferenceTemporaryDtor = 0;
+ const InitListExpr *ReferenceInitializerList = 0;
QualType ObjCARCReferenceLifetimeType;
llvm::Value *Value = EmitExprForReferenceBinding(*this, E, ReferenceTemporary,
ReferenceTemporaryDtor,
+ ReferenceInitializerList,
ObjCARCReferenceLifetimeType,
InitializedDecl);
if (SanitizePerformTypeCheck && !E->getType()->isFunctionType()) {
@@ -410,7 +418,8 @@ CodeGenFunction::EmitReferenceBindingToExpr(const Expr *E,
QualType Ty = E->getType();
EmitTypeCheck(TCK_ReferenceBinding, E->getExprLoc(), Value, Ty);
}
- if (!ReferenceTemporaryDtor && ObjCARCReferenceLifetimeType.isNull())
+ if (!ReferenceTemporaryDtor && !ReferenceInitializerList &&
+ ObjCARCReferenceLifetimeType.isNull())
return RValue::get(Value);
// Make sure to call the destructor for the reference temporary.
@@ -429,9 +438,15 @@ CodeGenFunction::EmitReferenceBindingToExpr(const Expr *E,
CGM.GetAddrOfCXXDestructor(ReferenceTemporaryDtor, Dtor_Complete);
CleanupArg = cast<llvm::Constant>(ReferenceTemporary);
}
- CGM.getCXXABI().registerGlobalDtor(*this, CleanupFn, CleanupArg);
+ CGM.getCXXABI().registerGlobalDtor(*this, *VD, CleanupFn, CleanupArg);
+ } else if (ReferenceInitializerList) {
+ // FIXME: This is wrong. We need to register a global destructor to clean
+ // up the initializer_list object, rather than adding it as a local
+ // cleanup.
+ EmitStdInitializerListCleanup(ReferenceTemporary,
+ ReferenceInitializerList);
} else {
- assert(!ObjCARCReferenceLifetimeType.isNull());
+ assert(!ObjCARCReferenceLifetimeType.isNull() && !VD->getTLSKind());
// Note: We intentionally do not register a global "destructor" to
// release the object.
}
@@ -445,6 +460,9 @@ CodeGenFunction::EmitReferenceBindingToExpr(const Expr *E,
destroyCXXObject, getLangOpts().Exceptions);
else
PushDestructorCleanup(ReferenceTemporaryDtor, ReferenceTemporary);
+ } else if (ReferenceInitializerList) {
+ EmitStdInitializerListCleanup(ReferenceTemporary,
+ ReferenceInitializerList);
} else {
switch (ObjCARCReferenceLifetimeType.getObjCLifetime()) {
case Qualifiers::OCL_None:
@@ -885,6 +903,10 @@ LValue CodeGenFunction::EmitLValue(const Expr *E) {
return EmitNullInitializationLValue(cast<CXXScalarValueInitExpr>(E));
case Expr::CXXDefaultArgExprClass:
return EmitLValue(cast<CXXDefaultArgExpr>(E)->getExpr());
+ case Expr::CXXDefaultInitExprClass: {
+ CXXDefaultInitExprScope Scope(*this);
+ return EmitLValue(cast<CXXDefaultInitExpr>(E)->getExpr());
+ }
case Expr::CXXTypeidExprClass:
return EmitCXXTypeidLValue(cast<CXXTypeidExpr>(E));
@@ -1044,7 +1066,8 @@ CodeGenFunction::tryEmitAsConstant(DeclRefExpr *refExpr) {
llvm::Value *CodeGenFunction::EmitLoadOfScalar(LValue lvalue) {
return EmitLoadOfScalar(lvalue.getAddress(), lvalue.isVolatile(),
lvalue.getAlignment().getQuantity(),
- lvalue.getType(), lvalue.getTBAAInfo());
+ lvalue.getType(), lvalue.getTBAAInfo(),
+ lvalue.getTBAABaseType(), lvalue.getTBAAOffset());
}
static bool hasBooleanRepresentation(QualType Ty) {
@@ -1106,7 +1129,9 @@ llvm::MDNode *CodeGenFunction::getRangeForLoadFromType(QualType Ty) {
llvm::Value *CodeGenFunction::EmitLoadOfScalar(llvm::Value *Addr, bool Volatile,
unsigned Alignment, QualType Ty,
- llvm::MDNode *TBAAInfo) {
+ llvm::MDNode *TBAAInfo,
+ QualType TBAABaseType,
+ uint64_t TBAAOffset) {
// For better performance, handle vector loads differently.
if (Ty->isVectorType()) {
llvm::Value *V;
@@ -1158,8 +1183,11 @@ llvm::Value *CodeGenFunction::EmitLoadOfScalar(llvm::Value *Addr, bool Volatile,
Load->setVolatile(true);
if (Alignment)
Load->setAlignment(Alignment);
- if (TBAAInfo)
- CGM.DecorateInstruction(Load, TBAAInfo);
+ if (TBAAInfo) {
+ llvm::MDNode *TBAAPath = CGM.getTBAAStructTagInfo(TBAABaseType, TBAAInfo,
+ TBAAOffset);
+ CGM.DecorateInstruction(Load, TBAAPath, false/*ConvertTypeToTag*/);
+ }
if ((SanOpts->Bool && hasBooleanRepresentation(Ty)) ||
(SanOpts->Enum && Ty->getAs<EnumType>())) {
@@ -1217,7 +1245,8 @@ void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr,
bool Volatile, unsigned Alignment,
QualType Ty,
llvm::MDNode *TBAAInfo,
- bool isInit) {
+ bool isInit, QualType TBAABaseType,
+ uint64_t TBAAOffset) {
// Handle vectors differently to get better performance.
if (Ty->isVectorType()) {
@@ -1268,15 +1297,19 @@ void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr,
llvm::StoreInst *Store = Builder.CreateStore(Value, Addr, Volatile);
if (Alignment)
Store->setAlignment(Alignment);
- if (TBAAInfo)
- CGM.DecorateInstruction(Store, TBAAInfo);
+ if (TBAAInfo) {
+ llvm::MDNode *TBAAPath = CGM.getTBAAStructTagInfo(TBAABaseType, TBAAInfo,
+ TBAAOffset);
+ CGM.DecorateInstruction(Store, TBAAPath, false/*ConvertTypeToTag*/);
+ }
}
void CodeGenFunction::EmitStoreOfScalar(llvm::Value *value, LValue lvalue,
bool isInit) {
EmitStoreOfScalar(value, lvalue.getAddress(), lvalue.isVolatile(),
lvalue.getAlignment().getQuantity(), lvalue.getType(),
- lvalue.getTBAAInfo(), isInit);
+ lvalue.getTBAAInfo(), isInit, lvalue.getTBAABaseType(),
+ lvalue.getTBAAOffset());
}
/// EmitLoadOfLValue - Given an expression that represents a value lvalue, this
@@ -1645,7 +1678,7 @@ static void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E,
if (const VarDecl *VD = dyn_cast<VarDecl>(Exp->getDecl())) {
if (VD->hasGlobalStorage()) {
LV.setGlobalObjCRef(true);
- LV.setThreadLocalRef(VD->isThreadSpecified());
+ LV.setThreadLocalRef(VD->getTLSKind() != VarDecl::TLS_None);
}
}
LV.setObjCArray(E->getType()->isArrayType());
@@ -1795,15 +1828,15 @@ LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
if (const VarDecl *VD = dyn_cast<VarDecl>(ND)) {
// Check if this is a global variable.
- if (VD->hasLinkage() || VD->isStaticDataMember())
+ if (VD->hasLinkage() || VD->isStaticDataMember()) {
+ // If it's thread_local, emit a call to its wrapper function instead.
+ if (VD->getTLSKind() == VarDecl::TLS_Dynamic)
+ return CGM.getCXXABI().EmitThreadLocalDeclRefExpr(*this, E);
return EmitGlobalVarDeclLValue(*this, E, VD);
+ }
bool isBlockVariable = VD->hasAttr<BlocksAttr>();
- bool NonGCable = VD->hasLocalStorage() &&
- !VD->getType()->isReferenceType() &&
- !isBlockVariable;
-
llvm::Value *V = LocalDeclMap.lookup(VD);
if (!V && VD->isStaticLocal())
V = CGM.getStaticLocalDeclAddress(VD);
@@ -1837,10 +1870,20 @@ LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
LV = MakeAddrLValue(V, T, Alignment);
}
+ bool isLocalStorage = VD->hasLocalStorage();
+
+ bool NonGCable = isLocalStorage &&
+ !VD->getType()->isReferenceType() &&
+ !isBlockVariable;
if (NonGCable) {
LV.getQuals().removeObjCGCAttr();
LV.setNonGC(true);
}
+
+ bool isImpreciseLifetime =
+ (isLocalStorage && !VD->hasAttr<ObjCPreciseLifetimeAttr>());
+ if (isImpreciseLifetime)
+ LV.setARCPreciseLifetime(ARCImpreciseLifetime);
setObjCGCLValueClass(getContext(), E, LV);
return LV;
}
@@ -2072,6 +2115,15 @@ llvm::Constant *CodeGenFunction::EmitCheckTypeDescriptor(QualType T) {
llvm::Value *CodeGenFunction::EmitCheckValue(llvm::Value *V) {
llvm::Type *TargetTy = IntPtrTy;
+ // Floating-point types which fit into intptr_t are bitcast to integers
+ // and then passed directly (after zero-extension, if necessary).
+ if (V->getType()->isFloatingPointTy()) {
+ unsigned Bits = V->getType()->getPrimitiveSizeInBits();
+ if (Bits <= TargetTy->getIntegerBitWidth())
+ V = Builder.CreateBitCast(V, llvm::Type::getIntNTy(getLLVMContext(),
+ Bits));
+ }
+
// Integers which fit in intptr_t are zero-extended and passed directly.
if (V->getType()->isIntegerTy() &&
V->getType()->getIntegerBitWidth() <= TargetTy->getIntegerBitWidth())
@@ -2079,7 +2131,7 @@ llvm::Value *CodeGenFunction::EmitCheckValue(llvm::Value *V) {
// Pointers are passed directly, everything else is passed by address.
if (!V->getType()->isPointerTy()) {
- llvm::Value *Ptr = Builder.CreateAlloca(V->getType());
+ llvm::Value *Ptr = CreateTempAlloca(V->getType());
Builder.CreateStore(V, Ptr);
V = Ptr;
}
@@ -2443,6 +2495,17 @@ LValue CodeGenFunction::EmitMemberExpr(const MemberExpr *E) {
llvm_unreachable("Unhandled member declaration!");
}
+/// Given that we are currently emitting a lambda, emit an l-value for
+/// one of its members.
+LValue CodeGenFunction::EmitLValueForLambdaField(const FieldDecl *Field) {
+ assert(cast<CXXMethodDecl>(CurCodeDecl)->getParent()->isLambda());
+ assert(cast<CXXMethodDecl>(CurCodeDecl)->getParent() == Field->getParent());
+ QualType LambdaTagType =
+ getContext().getTagDeclType(Field->getParent());
+ LValue LambdaLV = MakeNaturalAlignAddrLValue(CXXABIThisValue, LambdaTagType);
+ return EmitLValueForField(LambdaLV, Field);
+}
+
LValue CodeGenFunction::EmitLValueForField(LValue base,
const FieldDecl *field) {
if (field->isBitField()) {
@@ -2479,9 +2542,12 @@ LValue CodeGenFunction::EmitLValueForField(LValue base,
llvm::Value *addr = base.getAddress();
unsigned cvr = base.getVRQualifiers();
+ bool TBAAPath = CGM.getCodeGenOpts().StructPathTBAA;
if (rec->isUnion()) {
// For unions, there is no pointer adjustment.
assert(!type->isReferenceType() && "union has reference member");
+ // TODO: handle path-aware TBAA for union.
+ TBAAPath = false;
} else {
// For structs, we GEP to the field that the record layout suggests.
unsigned idx = CGM.getTypes().getCGRecordLayout(rec).getLLVMFieldNo(field);
@@ -2493,6 +2559,8 @@ LValue CodeGenFunction::EmitLValueForField(LValue base,
if (cvr & Qualifiers::Volatile) load->setVolatile(true);
load->setAlignment(alignment.getQuantity());
+ // Loading the reference will disable path-aware TBAA.
+ TBAAPath = false;
if (CGM.shouldUseTBAA()) {
llvm::MDNode *tbaa;
if (mayAlias)
@@ -2526,6 +2594,16 @@ LValue CodeGenFunction::EmitLValueForField(LValue base,
LValue LV = MakeAddrLValue(addr, type, alignment);
LV.getQuals().addCVRQualifiers(cvr);
+ if (TBAAPath) {
+ const ASTRecordLayout &Layout =
+ getContext().getASTRecordLayout(field->getParent());
+ // Set the base type to be the base type of the base LValue and
+ // update offset to be relative to the base type.
+ LV.setTBAABaseType(mayAlias ? getContext().CharTy : base.getTBAABaseType());
+ LV.setTBAAOffset(mayAlias ? 0 : base.getTBAAOffset() +
+ Layout.getFieldOffset(field->getFieldIndex()) /
+ getContext().getCharWidth());
+ }
// __weak attribute on a field is ignored.
if (LV.getQuals().getObjCGCAttr() == Qualifiers::Weak)
@@ -2849,8 +2927,14 @@ RValue CodeGenFunction::EmitRValueForField(LValue LV,
RValue CodeGenFunction::EmitCallExpr(const CallExpr *E,
ReturnValueSlot ReturnValue) {
- if (CGDebugInfo *DI = getDebugInfo())
- DI->EmitLocation(Builder, E->getLocStart());
+ if (CGDebugInfo *DI = getDebugInfo()) {
+ SourceLocation Loc = E->getLocStart();
+ // Force column info to be generated so we can differentiate
+ // multiple call sites on the same line in the debug info.
+ const FunctionDecl* Callee = E->getDirectCallee();
+ bool ForceColumnInfo = Callee && Callee->isInlineSpecified();
+ DI->EmitLocation(Builder, Loc, ForceColumnInfo);
+ }
// Builtins never have block type.
if (E->getCallee()->getType()->isBlockPointerType())
@@ -2907,7 +2991,7 @@ RValue CodeGenFunction::EmitCallExpr(const CallExpr *E,
case Qualifiers::OCL_Strong:
EmitARCRelease(Builder.CreateLoad(BaseValue,
PseudoDtor->getDestroyedType().isVolatileQualified()),
- /*precise*/ true);
+ ARCPreciseLifetime);
break;
case Qualifiers::OCL_Weak:
diff --git a/lib/CodeGen/CGExprAgg.cpp b/lib/CodeGen/CGExprAgg.cpp
index 1ac13c01ed..b974e1dcc6 100644
--- a/lib/CodeGen/CGExprAgg.cpp
+++ b/lib/CodeGen/CGExprAgg.cpp
@@ -170,6 +170,10 @@ public:
void VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
Visit(DAE->getExpr());
}
+ void VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE) {
+ CodeGenFunction::CXXDefaultInitExprScope Scope(CGF);
+ Visit(DIE->getExpr());
+ }
void VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E);
void VisitCXXConstructExpr(const CXXConstructExpr *E);
void VisitLambdaExpr(LambdaExpr *E);
@@ -1189,7 +1193,10 @@ void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
// the optimizer, especially with bitfields.
unsigned NumInitElements = E->getNumInits();
RecordDecl *record = E->getType()->castAs<RecordType>()->getDecl();
-
+
+ // Prepare a 'this' for CXXDefaultInitExprs.
+ CodeGenFunction::FieldConstructionScope FCS(CGF, Dest.getAddr());
+
if (record->isUnion()) {
// Only initialize one field of a union. The field itself is
// specified by the initializer list.
@@ -1341,7 +1348,7 @@ static CharUnits GetNumNonZeroBytesInInit(const Expr *E, CodeGenFunction &CGF) {
// Reference values are always non-null and have the width of a pointer.
if (Field->getType()->isReferenceType())
NumNonZeroBytes += CGF.getContext().toCharUnitsFromBits(
- CGF.getContext().getTargetInfo().getPointerWidth(0));
+ CGF.getTarget().getPointerWidth(0));
else
NumNonZeroBytes += GetNumNonZeroBytesInInit(E, CGF);
}
diff --git a/lib/CodeGen/CGExprCXX.cpp b/lib/CodeGen/CGExprCXX.cpp
index 13ae8bb01a..83c8ace98c 100644
--- a/lib/CodeGen/CGExprCXX.cpp
+++ b/lib/CodeGen/CGExprCXX.cpp
@@ -1451,7 +1451,7 @@ static void EmitObjectDelete(CodeGenFunction &CGF,
llvm::Value *PtrValue = CGF.Builder.CreateLoad(Ptr,
ElementType.isVolatileQualified());
- CGF.EmitARCRelease(PtrValue, /*precise*/ true);
+ CGF.EmitARCRelease(PtrValue, ARCPreciseLifetime);
break;
}
diff --git a/lib/CodeGen/CGExprComplex.cpp b/lib/CodeGen/CGExprComplex.cpp
index 5fc73aa790..36f974a313 100644
--- a/lib/CodeGen/CGExprComplex.cpp
+++ b/lib/CodeGen/CGExprComplex.cpp
@@ -182,6 +182,10 @@ public:
ComplexPairTy VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
return Visit(DAE->getExpr());
}
+ ComplexPairTy VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE) {
+ CodeGenFunction::CXXDefaultInitExprScope Scope(CGF);
+ return Visit(DIE->getExpr());
+ }
ComplexPairTy VisitExprWithCleanups(ExprWithCleanups *E) {
CGF.enterFullExpression(E);
CodeGenFunction::RunCleanupsScope Scope(CGF);
diff --git a/lib/CodeGen/CGExprConstant.cpp b/lib/CodeGen/CGExprConstant.cpp
index e3e5d66605..f5c8187c26 100644
--- a/lib/CodeGen/CGExprConstant.cpp
+++ b/lib/CodeGen/CGExprConstant.cpp
@@ -757,6 +757,12 @@ public:
return Visit(DAE->getExpr());
}
+ llvm::Constant *VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE) {
+ // No need for a DefaultInitExprScope: we don't handle 'this' in a
+ // constant expression.
+ return Visit(DIE->getExpr());
+ }
+
llvm::Constant *VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E) {
return Visit(E->GetTemporaryExpr());
}
@@ -1018,8 +1024,7 @@ llvm::Constant *CodeGenModule::EmitConstantInit(const VarDecl &D,
if (const CXXConstructExpr *E =
dyn_cast_or_null<CXXConstructExpr>(D.getInit())) {
const CXXConstructorDecl *CD = E->getConstructor();
- if (CD->isTrivial() && CD->isDefaultConstructor() &&
- Ty->getAsCXXRecordDecl()->hasTrivialDestructor())
+ if (CD->isTrivial() && CD->isDefaultConstructor())
return EmitNullConstant(D.getType());
}
}
diff --git a/lib/CodeGen/CGExprScalar.cpp b/lib/CodeGen/CGExprScalar.cpp
index 56b150ad38..c1c252d12b 100644
--- a/lib/CodeGen/CGExprScalar.cpp
+++ b/lib/CodeGen/CGExprScalar.cpp
@@ -344,6 +344,10 @@ public:
Value *VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
return Visit(DAE->getExpr());
}
+ Value *VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE) {
+ CodeGenFunction::CXXDefaultInitExprScope Scope(CGF);
+ return Visit(DIE->getExpr());
+ }
Value *VisitCXXThisExpr(CXXThisExpr *TE) {
return CGF.LoadCXXThis();
}
@@ -583,62 +587,93 @@ void ScalarExprEmitter::EmitFloatConversionCheck(Value *OrigSrc,
Check = Builder.CreateAnd(GE, LE);
}
} else {
- // Floating-point to integer or floating-point to floating-point. This has
- // undefined behavior if the source is +-Inf, NaN, or doesn't fit into the
- // destination type.
const llvm::fltSemantics &SrcSema =
CGF.getContext().getFloatTypeSemantics(OrigSrcType);
- APFloat MaxSrc(SrcSema, APFloat::uninitialized);
- APFloat MinSrc(SrcSema, APFloat::uninitialized);
-
if (isa<llvm::IntegerType>(DstTy)) {
+ // Floating-point to integer. This has undefined behavior if the source is
+ // +-Inf, NaN, or doesn't fit into the destination type (after truncation
+ // to an integer).
unsigned Width = CGF.getContext().getIntWidth(DstType);
bool Unsigned = DstType->isUnsignedIntegerOrEnumerationType();
APSInt Min = APSInt::getMinValue(Width, Unsigned);
+ APFloat MinSrc(SrcSema, APFloat::uninitialized);
if (MinSrc.convertFromAPInt(Min, !Unsigned, APFloat::rmTowardZero) &
APFloat::opOverflow)
// Don't need an overflow check for lower bound. Just check for
// -Inf/NaN.
- MinSrc = APFloat::getLargest(SrcSema, true);
+ MinSrc = APFloat::getInf(SrcSema, true);
+ else
+ // Find the largest value which is too small to represent (before
+ // truncation toward zero).
+ MinSrc.subtract(APFloat(SrcSema, 1), APFloat::rmTowardNegative);
APSInt Max = APSInt::getMaxValue(Width, Unsigned);
+ APFloat MaxSrc(SrcSema, APFloat::uninitialized);
if (MaxSrc.convertFromAPInt(Max, !Unsigned, APFloat::rmTowardZero) &
APFloat::opOverflow)
// Don't need an overflow check for upper bound. Just check for
// +Inf/NaN.
- MaxSrc = APFloat::getLargest(SrcSema, false);
+ MaxSrc = APFloat::getInf(SrcSema, false);
+ else
+ // Find the smallest value which is too large to represent (before
+ // truncation toward zero).
+ MaxSrc.add(APFloat(SrcSema, 1), APFloat::rmTowardPositive);
+
+ // If we're converting from __half, convert the range to float to match
+ // the type of src.
+ if (OrigSrcType->isHalfType()) {
+ const llvm::fltSemantics &Sema =
+ CGF.getContext().getFloatTypeSemantics(SrcType);
+ bool IsInexact;
+ MinSrc.convert(Sema, APFloat::rmTowardZero, &IsInexact);
+ MaxSrc.convert(Sema, APFloat::rmTowardZero, &IsInexact);
+ }
+
+ llvm::Value *GE =
+ Builder.CreateFCmpOGT(Src, llvm::ConstantFP::get(VMContext, MinSrc));
+ llvm::Value *LE =
+ Builder.CreateFCmpOLT(Src, llvm::ConstantFP::get(VMContext, MaxSrc));
+ Check = Builder.CreateAnd(GE, LE);
} else {
+ // FIXME: Maybe split this sanitizer out from float-cast-overflow.
+ //
+ // Floating-point to floating-point. This has undefined behavior if the
+ // source is not in the range of representable values of the destination
+ // type. The C and C++ standards are spectacularly unclear here. We
+ // diagnose finite out-of-range conversions, but allow infinities and NaNs
+ // to convert to the corresponding value in the smaller type.
+ //
+ // C11 Annex F gives all such conversions defined behavior for IEC 60559
+ // conforming implementations. Unfortunately, LLVM's fptrunc instruction
+ // does not.
+
+ // Converting from a lower rank to a higher rank can never have
+ // undefined behavior, since higher-rank types must have a superset
+ // of values of lower-rank types.
+ if (CGF.getContext().getFloatingTypeOrder(OrigSrcType, DstType) != 1)
+ return;
+
+ assert(!OrigSrcType->isHalfType() &&
+ "should not check conversion from __half, it has the lowest rank");
+
const llvm::fltSemantics &DstSema =
CGF.getContext().getFloatTypeSemantics(DstType);
- bool IsInexact;
-
- MinSrc = APFloat::getLargest(DstSema, true);
- if (MinSrc.convert(SrcSema, APFloat::rmTowardZero, &IsInexact) &
- APFloat::opOverflow)
- MinSrc = APFloat::getLargest(SrcSema, true);
+ APFloat MinBad = APFloat::getLargest(DstSema, false);
+ APFloat MaxBad = APFloat::getInf(DstSema, false);
- MaxSrc = APFloat::getLargest(DstSema, false);
- if (MaxSrc.convert(SrcSema, APFloat::rmTowardZero, &IsInexact) &
- APFloat::opOverflow)
- MaxSrc = APFloat::getLargest(SrcSema, false);
- }
-
- // If we're converting from __half, convert the range to float to match
- // the type of src.
- if (OrigSrcType->isHalfType()) {
- const llvm::fltSemantics &Sema =
- CGF.getContext().getFloatTypeSemantics(SrcType);
bool IsInexact;
- MinSrc.convert(Sema, APFloat::rmTowardZero, &IsInexact);
- MaxSrc.convert(Sema, APFloat::rmTowardZero, &IsInexact);
+ MinBad.convert(SrcSema, APFloat::rmTowardZero, &IsInexact);
+ MaxBad.convert(SrcSema, APFloat::rmTowardZero, &IsInexact);
+
+ Value *AbsSrc = CGF.EmitNounwindRuntimeCall(
+ CGF.CGM.getIntrinsic(llvm::Intrinsic::fabs, Src->getType()), Src);
+ llvm::Value *GE =
+ Builder.CreateFCmpOGT(AbsSrc, llvm::ConstantFP::get(VMContext, MinBad));
+ llvm::Value *LE =
+ Builder.CreateFCmpOLT(AbsSrc, llvm::ConstantFP::get(VMContext, MaxBad));
+ Check = Builder.CreateNot(Builder.CreateAnd(GE, LE));
}
-
- llvm::Value *GE =
- Builder.CreateFCmpOGE(Src, llvm::ConstantFP::get(VMContext, MinSrc));
- llvm::Value *LE =
- Builder.CreateFCmpOLE(Src, llvm::ConstantFP::get(VMContext, MaxSrc));
- Check = Builder.CreateAnd(GE, LE);
}
// FIXME: Provide a SourceLocation.
@@ -1603,8 +1638,8 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
else {
llvm::APFloat F(static_cast<float>(amount));
bool ignored;
- F.convert(CGF.Target.getLongDoubleFormat(), llvm::APFloat::rmTowardZero,
- &ignored);
+ F.convert(CGF.getTarget().getLongDoubleFormat(),
+ llvm::APFloat::rmTowardZero, &ignored);
amt = llvm::ConstantFP::get(VMContext, F);
}
value = Builder.CreateFAdd(value, amt, isInc ? "inc" : "dec");
diff --git a/lib/CodeGen/CGObjC.cpp b/lib/CodeGen/CGObjC.cpp
index ad7d62951a..713509bf67 100644
--- a/lib/CodeGen/CGObjC.cpp
+++ b/lib/CodeGen/CGObjC.cpp
@@ -21,6 +21,7 @@
#include "clang/AST/StmtObjC.h"
#include "clang/Basic/Diagnostic.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/Support/CallSite.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/InlineAsm.h"
using namespace clang;
@@ -109,32 +110,50 @@ llvm::Value *CodeGenFunction::EmitObjCCollectionLiteral(const Expr *E,
if (DLE)
Keys = CreateMemTemp(ElementArrayType, "keys");
+ // In ARC, we may need to do extra work to keep all the keys and
+ // values alive until after the call.
+ SmallVector<llvm::Value *, 16> NeededObjects;
+ bool TrackNeededObjects =
+ (getLangOpts().ObjCAutoRefCount &&
+ CGM.getCodeGenOpts().OptimizationLevel != 0);
+
// Perform the actual initialialization of the array(s).
for (uint64_t i = 0; i < NumElements; i++) {
if (ALE) {
- // Emit the initializer.
+ // Emit the element and store it to the appropriate array slot.
const Expr *Rhs = ALE->getElement(i);
LValue LV = LValue::MakeAddr(Builder.CreateStructGEP(Objects, i),
ElementType,
Context.getTypeAlignInChars(Rhs->getType()),
Context);
- EmitScalarInit(Rhs, /*D=*/0, LV, /*capturedByInit=*/false);
+
+ llvm::Value *value = EmitScalarExpr(Rhs);
+ EmitStoreThroughLValue(RValue::get(value), LV, true);
+ if (TrackNeededObjects) {
+ NeededObjects.push_back(value);
+ }
} else {
- // Emit the key initializer.
+ // Emit the key and store it to the appropriate array slot.
const Expr *Key = DLE->getKeyValueElement(i).Key;
LValue KeyLV = LValue::MakeAddr(Builder.CreateStructGEP(Keys, i),
ElementType,
Context.getTypeAlignInChars(Key->getType()),
Context);
- EmitScalarInit(Key, /*D=*/0, KeyLV, /*capturedByInit=*/false);
+ llvm::Value *keyValue = EmitScalarExpr(Key);
+ EmitStoreThroughLValue(RValue::get(keyValue), KeyLV, /*isInit=*/true);
- // Emit the value initializer.
+ // Emit the value and store it to the appropriate array slot.
const Expr *Value = DLE->getKeyValueElement(i).Value;
LValue ValueLV = LValue::MakeAddr(Builder.CreateStructGEP(Objects, i),
ElementType,
Context.getTypeAlignInChars(Value->getType()),
Context);
- EmitScalarInit(Value, /*D=*/0, ValueLV, /*capturedByInit=*/false);
+ llvm::Value *valueValue = EmitScalarExpr(Value);
+ EmitStoreThroughLValue(RValue::get(valueValue), ValueLV, /*isInit=*/true);
+ if (TrackNeededObjects) {
+ NeededObjects.push_back(keyValue);
+ NeededObjects.push_back(valueValue);
+ }
}
}
@@ -172,6 +191,15 @@ llvm::Value *CodeGenFunction::EmitObjCCollectionLiteral(const Expr *E,
Sel,
Receiver, Args, Class,
MethodWithObjects);
+
+ // The above message send needs these objects, but in ARC they are
+ // passed in a buffer that is essentially __unsafe_unretained.
+ // Therefore we must prevent the optimizer from releasing them until
+ // after the call.
+ if (TrackNeededObjects) {
+ EmitARCIntrinsicUse(NeededObjects);
+ }
+
return Builder.CreateBitCast(result.getScalarVal(),
ConvertType(E->getType()));
}
@@ -686,7 +714,7 @@ PropertyImplStrategy::PropertyImplStrategy(CodeGenModule &CGM,
}
llvm::Triple::ArchType arch =
- CGM.getContext().getTargetInfo().getTriple().getArch();
+ CGM.getTarget().getTriple().getArch();
// Most architectures require memory to fit within a single cache
// line, so the alignment has to be at least the size of the access.
@@ -1189,7 +1217,8 @@ CodeGenFunction::generateObjCSetterBody(const ObjCImplementationDecl *classImpl,
selfDecl->getType(), CK_LValueToRValue, &self,
VK_RValue);
ObjCIvarRefExpr ivarRef(ivar, ivar->getType().getNonReferenceType(),
- SourceLocation(), &selfLoad, true, true);
+ SourceLocation(), SourceLocation(),
+ &selfLoad, true, true);
ParmVarDecl *argDecl = *setterMethod->param_begin();
QualType argType = argDecl->getType().getNonReferenceType();
@@ -1372,8 +1401,10 @@ bool CodeGenFunction::IvarTypeWithAggrGCObjects(QualType Ty) {
}
llvm::Value *CodeGenFunction::LoadObjCSelf() {
- const ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(CurFuncDecl);
- return Builder.CreateLoad(LocalDeclMap[OMD->getSelfDecl()], "self");
+ VarDecl *Self = cast<ObjCMethodDecl>(CurFuncDecl)->getSelfDecl();
+ DeclRefExpr DRE(Self, /*is enclosing local*/ (CurFuncDecl != CurCodeDecl),
+ Self->getType(), VK_LValue, SourceLocation());
+ return EmitLoadOfScalar(EmitDeclRefLValue(&DRE));
}
QualType CodeGenFunction::TypeOfSelfObject() {
@@ -1686,7 +1717,8 @@ namespace {
llvm::Value *object;
void Emit(CodeGenFunction &CGF, Flags flags) {
- CGF.EmitARCRelease(object, /*precise*/ true);
+ // Releases at the end of the full-expression are imprecise.
+ CGF.EmitARCRelease(object, ARCImpreciseLifetime);
}
};
}
@@ -1706,6 +1738,21 @@ llvm::Value *CodeGenFunction::EmitObjCExtendObjectLifetime(QualType type,
return EmitARCRetainAutorelease(type, value);
}
+/// Given a number of pointers, inform the optimizer that they're
+/// being intrinsically used up until this point in the program.
+void CodeGenFunction::EmitARCIntrinsicUse(ArrayRef<llvm::Value*> values) {
+ llvm::Constant *&fn = CGM.getARCEntrypoints().clang_arc_use;
+ if (!fn) {
+ llvm::FunctionType *fnType =
+ llvm::FunctionType::get(CGM.VoidTy, ArrayRef<llvm::Type*>(), true);
+ fn = CGM.CreateRuntimeFunction(fnType, "clang.arc.use");
+ }
+
+ // This isn't really a "runtime" function, but as an intrinsic it
+ // doesn't really matter as long as we align things up.
+ EmitNounwindRuntimeCall(fn, values);
+}
+
static llvm::Constant *createARCRuntimeFunction(CodeGenModule &CGM,
llvm::FunctionType *type,
@@ -1940,7 +1987,8 @@ CodeGenFunction::EmitARCRetainAutoreleasedReturnValue(llvm::Value *value) {
/// Release the given object.
/// call void \@objc_release(i8* %value)
-void CodeGenFunction::EmitARCRelease(llvm::Value *value, bool precise) {
+void CodeGenFunction::EmitARCRelease(llvm::Value *value,
+ ARCPreciseLifetime_t precise) {
if (isa<llvm::ConstantPointerNull>(value)) return;
llvm::Constant *&fn = CGM.getARCEntrypoints().objc_release;
@@ -1956,7 +2004,7 @@ void CodeGenFunction::EmitARCRelease(llvm::Value *value, bool precise) {
// Call objc_release.
llvm::CallInst *call = EmitNounwindRuntimeCall(fn, value);
- if (!precise) {
+ if (precise == ARCImpreciseLifetime) {
SmallVector<llvm::Value*,1> args;
call->setMetadata("clang.imprecise_release",
llvm::MDNode::get(Builder.getContext(), args));
@@ -1972,7 +2020,8 @@ void CodeGenFunction::EmitARCRelease(llvm::Value *value, bool precise) {
/// At -O1 and above, just load and call objc_release.
///
/// call void \@objc_storeStrong(i8** %addr, i8* null)
-void CodeGenFunction::EmitARCDestroyStrong(llvm::Value *addr, bool precise) {
+void CodeGenFunction::EmitARCDestroyStrong(llvm::Value *addr,
+ ARCPreciseLifetime_t precise) {
if (CGM.getCodeGenOpts().OptimizationLevel == 0) {
llvm::PointerType *addrTy = cast<llvm::PointerType>(addr->getType());
llvm::Value *null = llvm::ConstantPointerNull::get(
@@ -2042,7 +2091,7 @@ llvm::Value *CodeGenFunction::EmitARCStoreStrong(LValue dst,
EmitStoreOfScalar(newValue, dst);
// Finally, release the old value.
- EmitARCRelease(oldValue, /*precise*/ false);
+ EmitARCRelease(oldValue, dst.isARCPreciseLifetime());
return newValue;
}
@@ -2210,7 +2259,8 @@ void CodeGenFunction::EmitObjCAutoreleasePoolPop(llvm::Value *value) {
fn = createARCRuntimeFunction(CGM, fnType, "objc_autoreleasePoolPop");
}
- EmitNounwindRuntimeCall(fn, value);
+ // objc_autoreleasePoolPop can throw.
+ EmitRuntimeCallOrInvoke(fn, value);
}
/// Produce the code to do an MRR version objc_autoreleasepool_push.
@@ -2254,13 +2304,13 @@ void CodeGenFunction::EmitObjCMRRAutoreleasePoolPop(llvm::Value *Arg) {
void CodeGenFunction::destroyARCStrongPrecise(CodeGenFunction &CGF,
llvm::Value *addr,
QualType type) {
- CGF.EmitARCDestroyStrong(addr, /*precise*/ true);
+ CGF.EmitARCDestroyStrong(addr, ARCPreciseLifetime);
}
void CodeGenFunction::destroyARCStrongImprecise(CodeGenFunction &CGF,
llvm::Value *addr,
QualType type) {
- CGF.EmitARCDestroyStrong(addr, /*precise*/ false);
+ CGF.EmitARCDestroyStrong(addr, ARCImpreciseLifetime);
}
void CodeGenFunction::destroyARCWeak(CodeGenFunction &CGF,
@@ -2737,7 +2787,7 @@ CodeGenFunction::EmitARCStoreStrong(const BinaryOperator *e,
llvm::Value *oldValue =
EmitLoadOfScalar(lvalue);
EmitStoreOfScalar(value, lvalue);
- EmitARCRelease(oldValue, /*precise*/ false);
+ EmitARCRelease(oldValue, lvalue.isARCPreciseLifetime());
} else {
value = EmitARCStoreStrong(lvalue, value, ignored);
}
@@ -2829,7 +2879,6 @@ CodeGenFunction::GenerateObjCAtomicSetterCopyHelperFunction(
SourceLocation(),
SourceLocation(), II, C.VoidTy, 0,
SC_Static,
- SC_None,
false,
false);
@@ -2914,7 +2963,6 @@ CodeGenFunction::GenerateObjCAtomicGetterCopyHelperFunction(
SourceLocation(),
SourceLocation(), II, C.VoidTy, 0,
SC_Static,
- SC_None,
false,
false);
diff --git a/lib/CodeGen/CGObjCMac.cpp b/lib/CodeGen/CGObjCMac.cpp
index 975f9ac4af..e8498b06ad 100644
--- a/lib/CodeGen/CGObjCMac.cpp
+++ b/lib/CodeGen/CGObjCMac.cpp
@@ -1617,7 +1617,7 @@ struct NullReturnState {
RValue RV = I->RV;
assert(RV.isScalar() &&
"NullReturnState::complete - arg not on object");
- CGF.EmitARCRelease(RV.getScalarVal(), true);
+ CGF.EmitARCRelease(RV.getScalarVal(), ARCImpreciseLifetime);
}
}
}
@@ -1949,8 +1949,8 @@ llvm::Constant *CGObjCCommonMac::BuildGCBlockLayout(CodeGenModule &CGM,
bool hasUnion = false;
SkipIvars.clear();
IvarsInfo.clear();
- unsigned WordSizeInBits = CGM.getContext().getTargetInfo().getPointerWidth(0);
- unsigned ByteSizeInBits = CGM.getContext().getTargetInfo().getCharWidth();
+ unsigned WordSizeInBits = CGM.getTarget().getPointerWidth(0);
+ unsigned ByteSizeInBits = CGM.getTarget().getCharWidth();
// __isa is the first field in block descriptor and must assume by runtime's
// convention that it is GC'able.
@@ -2077,7 +2077,7 @@ void CGObjCCommonMac::BuildRCRecordLayout(const llvm::StructLayout *RecLayout,
if (RecFields.empty())
return;
- unsigned ByteSizeInBits = CGM.getContext().getTargetInfo().getCharWidth();
+ unsigned ByteSizeInBits = CGM.getTarget().getCharWidth();
for (unsigned i = 0, e = RecFields.size(); i != e; ++i) {
const FieldDecl *Field = RecFields[i];
@@ -2316,8 +2316,8 @@ llvm::Constant *CGObjCCommonMac::getBitmapBlockLayout(bool ComputeByrefLayout) {
llvm::Constant *nullPtr = llvm::Constant::getNullValue(CGM.Int8PtrTy);
if (RunSkipBlockVars.empty())
return nullPtr;
- unsigned WordSizeInBits = CGM.getContext().getTargetInfo().getPointerWidth(0);
- unsigned ByteSizeInBits = CGM.getContext().getTargetInfo().getCharWidth();
+ unsigned WordSizeInBits = CGM.getTarget().getPointerWidth(0);
+ unsigned ByteSizeInBits = CGM.getTarget().getCharWidth();
unsigned WordSizeInBytes = WordSizeInBits/ByteSizeInBits;
// Sort on byte position; captures might not be allocated in order,
@@ -2393,7 +2393,7 @@ llvm::Constant *CGObjCCommonMac::getBitmapBlockLayout(bool ComputeByrefLayout) {
printf("\n Inline instruction for BYREF variable layout: ");
else
printf("\n Inline instruction for block variable layout: ");
- printf("0x0%llx\n", (unsigned long long)Result);
+ printf("0x0%" PRIx64 "\n", Result);
}
if (WordSizeInBytes == 8) {
const llvm::APInt Instruction(64, Result);
@@ -2468,8 +2468,8 @@ llvm::Constant *CGObjCCommonMac::BuildRCBlockLayout(CodeGenModule &CGM,
RunSkipBlockVars.clear();
bool hasUnion = false;
- unsigned WordSizeInBits = CGM.getContext().getTargetInfo().getPointerWidth(0);
- unsigned ByteSizeInBits = CGM.getContext().getTargetInfo().getCharWidth();
+ unsigned WordSizeInBits = CGM.getTarget().getPointerWidth(0);
+ unsigned ByteSizeInBits = CGM.getTarget().getCharWidth();
unsigned WordSizeInBytes = WordSizeInBits/ByteSizeInBits;
const BlockDecl *blockDecl = blockInfo.getBlockDecl();
@@ -3519,6 +3519,9 @@ namespace {
if (isa<ObjCAtTryStmt>(S)) {
if (const ObjCAtFinallyStmt* FinallyStmt =
cast<ObjCAtTryStmt>(S).getFinallyStmt()) {
+ // Don't try to do the @finally if this is an EH cleanup.
+ if (flags.isForEHCleanup()) return;
+
// Save the current cleanup destination in case there's
// control flow inside the finally statement.
llvm::Value *CurCleanupDest =
@@ -3860,7 +3863,7 @@ void CGObjCMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
llvm::Value *PropagatingExnVar = 0;
// Push a normal cleanup to leave the try scope.
- CGF.EHStack.pushCleanup<PerformFragileFinally>(NormalCleanup, &S,
+ CGF.EHStack.pushCleanup<PerformFragileFinally>(NormalAndEHCleanup, &S,
SyncArgSlot,
CallTryExitVar,
ExceptionData,
@@ -4534,8 +4537,8 @@ void CGObjCCommonMac::BuildAggrIvarLayout(const ObjCImplementationDecl *OI,
if (RecFields.empty())
return;
- unsigned WordSizeInBits = CGM.getContext().getTargetInfo().getPointerWidth(0);
- unsigned ByteSizeInBits = CGM.getContext().getTargetInfo().getCharWidth();
+ unsigned WordSizeInBits = CGM.getTarget().getPointerWidth(0);
+ unsigned ByteSizeInBits = CGM.getTarget().getCharWidth();
if (!RD && CGM.getLangOpts().ObjCAutoRefCount) {
const FieldDecl *FirstField = RecFields[0];
FirstFieldDelta =
diff --git a/lib/CodeGen/CGObjCRuntime.cpp b/lib/CodeGen/CGObjCRuntime.cpp
index abd10a29c9..9c0d5189f8 100644
--- a/lib/CodeGen/CGObjCRuntime.cpp
+++ b/lib/CodeGen/CGObjCRuntime.cpp
@@ -117,7 +117,7 @@ LValue CGObjCRuntime::EmitValueForIvarAtOffset(CodeGen::CodeGenFunction &CGF,
// a synthesized ivar can never be a bit-field, so this is safe.
uint64_t FieldBitOffset = LookupFieldBitOffset(CGF.CGM, OID, 0, Ivar);
uint64_t BitOffset = FieldBitOffset % CGF.CGM.getContext().getCharWidth();
- uint64_t AlignmentBits = CGF.CGM.getContext().getTargetInfo().getCharAlign();
+ uint64_t AlignmentBits = CGF.CGM.getTarget().getCharAlign();
uint64_t BitFieldSize = Ivar->getBitWidthValue(CGF.getContext());
CharUnits StorageSize =
CGF.CGM.getContext().toCharUnitsFromBits(
diff --git a/lib/CodeGen/CGRTTI.cpp b/lib/CodeGen/CGRTTI.cpp
index 869843cbd4..40dc6bfa3b 100644
--- a/lib/CodeGen/CGRTTI.cpp
+++ b/lib/CodeGen/CGRTTI.cpp
@@ -412,6 +412,9 @@ void RTTIBuilder::BuildVTablePointer(const Type *Ty) {
case Type::RValueReference:
llvm_unreachable("References shouldn't get here");
+ case Type::Auto:
+ llvm_unreachable("Undeduced auto type shouldn't get here");
+
case Type::Builtin:
// GCC treats vector and complex types as fundamental types.
case Type::Vector:
@@ -619,6 +622,9 @@ llvm::Constant *RTTIBuilder::BuildTypeInfo(QualType Ty, bool Force) {
case Type::RValueReference:
llvm_unreachable("References shouldn't get here");
+ case Type::Auto:
+ llvm_unreachable("Undeduced auto type shouldn't get here");
+
case Type::ConstantArray:
case Type::IncompleteArray:
case Type::VariableArray:
diff --git a/lib/CodeGen/CGRecordLayoutBuilder.cpp b/lib/CodeGen/CGRecordLayoutBuilder.cpp
index 2c6438b0b6..30ab528ffb 100644
--- a/lib/CodeGen/CGRecordLayoutBuilder.cpp
+++ b/lib/CodeGen/CGRecordLayoutBuilder.cpp
@@ -276,7 +276,7 @@ bool CGRecordLayoutBuilder::LayoutBitfields(const ASTRecordLayout &Layout,
uint64_t FirstFieldOffset = Layout.getFieldOffset(FirstFieldNo);
uint64_t NextFieldOffsetInBits = Types.getContext().toBits(NextFieldOffset);
- unsigned CharAlign = Types.getContext().getTargetInfo().getCharAlign();
+ unsigned CharAlign = Types.getTarget().getCharAlign();
assert(FirstFieldOffset % CharAlign == 0 &&
"First field offset is misaligned");
CharUnits FirstFieldOffsetInBytes
@@ -352,7 +352,7 @@ bool CGRecordLayoutBuilder::LayoutBitfields(const ASTRecordLayout &Layout,
assert(EndOffset >= (FirstFieldOffset + TotalBits) &&
"End offset is not past the end of the known storage bits.");
uint64_t SpaceBits = EndOffset - FirstFieldOffset;
- uint64_t LongBits = Types.getContext().getTargetInfo().getLongWidth();
+ uint64_t LongBits = Types.getTarget().getLongWidth();
uint64_t WidenedBits = (StorageBits / LongBits) * LongBits +
llvm::NextPowerOf2(StorageBits % LongBits - 1);
assert(WidenedBits >= StorageBits && "Widening shrunk the bits!");
@@ -455,7 +455,7 @@ CGRecordLayoutBuilder::LayoutUnionField(const FieldDecl *Field,
return 0;
unsigned StorageBits = llvm::RoundUpToAlignment(
- FieldSize, Types.getContext().getTargetInfo().getCharAlign());
+ FieldSize, Types.getTarget().getCharAlign());
CharUnits NumBytesToAppend
= Types.getContext().toCharUnitsFromBits(StorageBits);
@@ -814,7 +814,7 @@ bool CGRecordLayoutBuilder::LayoutFields(const RecordDecl *D) {
// Lay out the virtual bases. The MS ABI uses a different
// algorithm here due to the lack of primary virtual bases.
- if (Types.getContext().getTargetInfo().getCXXABI().hasPrimaryVBases()) {
+ if (Types.getTarget().getCXXABI().hasPrimaryVBases()) {
RD->getIndirectPrimaryBases(IndirectPrimaryBases);
if (Layout.isPrimaryBaseVirtual())
IndirectPrimaryBases.insert(Layout.getPrimaryBase());
diff --git a/lib/CodeGen/CGStmt.cpp b/lib/CodeGen/CGStmt.cpp
index 9e7ddfbdc1..5e2ebe0d9c 100644
--- a/lib/CodeGen/CGStmt.cpp
+++ b/lib/CodeGen/CGStmt.cpp
@@ -37,6 +37,9 @@ void CodeGenFunction::EmitStopPoint(const Stmt *S) {
else
Loc = S->getLocStart();
DI->EmitLocation(Builder, Loc);
+
+ //if (++NumStopPoints == 1)
+ LastStopPoint = Loc;
}
}
@@ -134,7 +137,9 @@ void CodeGenFunction::EmitStmt(const Stmt *S) {
case Stmt::SwitchStmtClass: EmitSwitchStmt(cast<SwitchStmt>(*S)); break;
case Stmt::GCCAsmStmtClass: // Intentional fall-through.
case Stmt::MSAsmStmtClass: EmitAsmStmt(cast<AsmStmt>(*S)); break;
-
+ case Stmt::CapturedStmtClass:
+ EmitCapturedStmt(cast<CapturedStmt>(*S));
+ break;
case Stmt::ObjCAtTryStmtClass:
EmitObjCAtTryStmt(cast<ObjCAtTryStmt>(*S));
break;
@@ -319,6 +324,12 @@ CodeGenFunction::getJumpDestForLabel(const LabelDecl *D) {
}
void CodeGenFunction::EmitLabel(const LabelDecl *D) {
+ // Add this label to the current lexical scope if we're within any
+ // normal cleanups. Jumps "in" to this label --- when permitted by
+ // the language --- may need to be routed around such cleanups.
+ if (EHStack.hasNormalCleanups() && CurLexicalScope)
+ CurLexicalScope->addLabel(D);
+
JumpDest &Dest = LabelMap[D];
// If we didn't need a forward reference to this label, just go
@@ -330,16 +341,36 @@ void CodeGenFunction::EmitLabel(const LabelDecl *D) {
// it from the branch-fixups list.
} else {
assert(!Dest.getScopeDepth().isValid() && "already emitted label!");
- Dest = JumpDest(Dest.getBlock(),
- EHStack.stable_begin(),
- Dest.getDestIndex());
-
+ Dest.setScopeDepth(EHStack.stable_begin());
ResolveBranchFixups(Dest.getBlock());
}
EmitBlock(Dest.getBlock());
}
+/// Change the cleanup scope of the labels in this lexical scope to
+/// match the scope of the enclosing context.
+void CodeGenFunction::LexicalScope::rescopeLabels() {
+ assert(!Labels.empty());
+ EHScopeStack::stable_iterator innermostScope
+ = CGF.EHStack.getInnermostNormalCleanup();
+
+ // Change the scope depth of all the labels.
+ for (SmallVectorImpl<const LabelDecl*>::const_iterator
+ i = Labels.begin(), e = Labels.end(); i != e; ++i) {
+ assert(CGF.LabelMap.count(*i));
+ JumpDest &dest = CGF.LabelMap.find(*i)->second;
+ assert(dest.getScopeDepth().isValid());
+ assert(innermostScope.encloses(dest.getScopeDepth()));
+ dest.setScopeDepth(innermostScope);
+ }
+
+ // Reparent the labels if the new scope also has cleanups.
+ if (innermostScope != EHScopeStack::stable_end() && ParentScope) {
+ ParentScope->Labels.append(Labels.begin(), Labels.end());
+ }
+}
+
void CodeGenFunction::EmitLabelStmt(const LabelStmt &S) {
EmitLabel(S.getDecl());
@@ -768,8 +799,7 @@ void CodeGenFunction::EmitReturnStmt(const ReturnStmt &S) {
// FIXME: Clean this up by using an LValue for ReturnTemp,
// EmitStoreThroughLValue, and EmitAnyExpr.
- if (S.getNRVOCandidate() && S.getNRVOCandidate()->isNRVOVariable() &&
- !Target.useGlobalsForAutomaticVariables()) {
+ if (S.getNRVOCandidate() && S.getNRVOCandidate()->isNRVOVariable()) {
// Apply the named return value optimization for this return statement,
// which means doing nothing: the appropriate result has already been
// constructed into the NRVO variable.
@@ -812,6 +842,10 @@ void CodeGenFunction::EmitReturnStmt(const ReturnStmt &S) {
}
}
+ NumReturnExprs += 1;
+ if (RV == 0 || RV->isEvaluatable(getContext()))
+ NumSimpleReturnExprs += 1;
+
cleanupScope.ForceCleanup();
EmitBranchThroughCleanup(ReturnBlock);
}
@@ -1424,7 +1458,7 @@ static llvm::MDNode *getAsmSrcLocInfo(const StringLiteral *Str,
for (unsigned i = 0, e = StrVal.size()-1; i != e; ++i) {
if (StrVal[i] != '\n') continue;
SourceLocation LineLoc = Str->getLocationOfByte(i+1, SM, LangOpts,
- CGF.Target);
+ CGF.getTarget());
Locs.push_back(llvm::ConstantInt::get(CGF.Int32Ty,
LineLoc.getRawEncoding()));
}
@@ -1442,18 +1476,23 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
SmallVector<TargetInfo::ConstraintInfo, 4> InputConstraintInfos;
for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) {
- TargetInfo::ConstraintInfo Info(S.getOutputConstraint(i),
- S.getOutputName(i));
- bool IsValid = Target.validateOutputConstraint(Info); (void)IsValid;
+ StringRef Name;
+ if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S))
+ Name = GAS->getOutputName(i);
+ TargetInfo::ConstraintInfo Info(S.getOutputConstraint(i), Name);
+ bool IsValid = getTarget().validateOutputConstraint(Info); (void)IsValid;
assert(IsValid && "Failed to parse output constraint");
OutputConstraintInfos.push_back(Info);
}
for (unsigned i = 0, e = S.getNumInputs(); i != e; i++) {
- TargetInfo::ConstraintInfo Info(S.getInputConstraint(i),
- S.getInputName(i));
- bool IsValid = Target.validateInputConstraint(OutputConstraintInfos.data(),
- S.getNumOutputs(), Info);
+ StringRef Name;
+ if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S))
+ Name = GAS->getInputName(i);
+ TargetInfo::ConstraintInfo Info(S.getInputConstraint(i), Name);
+ bool IsValid =
+ getTarget().validateInputConstraint(OutputConstraintInfos.data(),
+ S.getNumOutputs(), Info);
assert(IsValid && "Failed to parse input constraint"); (void)IsValid;
InputConstraintInfos.push_back(Info);
}
@@ -1477,13 +1516,14 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
// Simplify the output constraint.
std::string OutputConstraint(S.getOutputConstraint(i));
- OutputConstraint = SimplifyConstraint(OutputConstraint.c_str() + 1, Target);
+ OutputConstraint = SimplifyConstraint(OutputConstraint.c_str() + 1,
+ getTarget());
const Expr *OutExpr = S.getOutputExpr(i);
OutExpr = OutExpr->IgnoreParenNoopCasts(getContext());
OutputConstraint = AddVariableConstraints(OutputConstraint, *OutExpr,
- Target, CGM, S);
+ getTarget(), CGM, S);
LValue Dest = EmitLValue(OutExpr);
if (!Constraints.empty())
@@ -1564,13 +1604,13 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
// Simplify the input constraint.
std::string InputConstraint(S.getInputConstraint(i));
- InputConstraint = SimplifyConstraint(InputConstraint.c_str(), Target,
+ InputConstraint = SimplifyConstraint(InputConstraint.c_str(), getTarget(),
&OutputConstraintInfos);
InputConstraint =
AddVariableConstraints(InputConstraint,
*InputExpr->IgnoreParenNoopCasts(getContext()),
- Target, CGM, S);
+ getTarget(), CGM, S);
llvm::Value *Arg = EmitAsmInput(Info, InputExpr, Constraints);
@@ -1622,7 +1662,7 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
StringRef Clobber = S.getClobber(i);
if (Clobber != "memory" && Clobber != "cc")
- Clobber = Target.getNormalizedGCCRegisterName(Clobber);
+ Clobber = getTarget().getNormalizedGCCRegisterName(Clobber);
if (i != 0 || NumConstraints != 0)
Constraints += ',';
@@ -1633,7 +1673,7 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
}
// Add machine specific clobbers
- std::string MachineClobbers = Target.getClobbers();
+ std::string MachineClobbers = getTarget().getClobbers();
if (!MachineClobbers.empty()) {
if (!Constraints.empty())
Constraints += ',';
@@ -1710,3 +1750,7 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
EmitStoreThroughLValue(RValue::get(Tmp), ResultRegDests[i]);
}
}
+
+void CodeGenFunction::EmitCapturedStmt(const CapturedStmt &S) {
+ llvm_unreachable("not implemented yet");
+}
diff --git a/lib/CodeGen/CGValue.h b/lib/CodeGen/CGValue.h
index 6b0c271a08..b625b866c0 100644
--- a/lib/CodeGen/CGValue.h
+++ b/lib/CodeGen/CGValue.h
@@ -97,6 +97,10 @@ public:
}
};
+/// Does an ARC strong l-value have precise lifetime?
+enum ARCPreciseLifetime_t {
+ ARCImpreciseLifetime, ARCPreciseLifetime
+};
/// LValue - This represents an lvalue references. Because C/C++ allow
/// bitfields, this is not a simple LLVM pointer, it may be a pointer plus a
@@ -147,8 +151,17 @@ class LValue {
// Lvalue is a thread local reference
bool ThreadLocalRef : 1;
+ // Lvalue has ARC imprecise lifetime. We store this inverted to try
+ // to make the default bitfield pattern all-zeroes.
+ bool ImpreciseLifetime : 1;
+
Expr *BaseIvarExp;
+ /// Used by struct-path-aware TBAA.
+ QualType TBAABaseType;
+ /// Offset relative to the base type.
+ uint64_t TBAAOffset;
+
/// TBAAInfo - TBAA information to attach to dereferences of this LValue.
llvm::MDNode *TBAAInfo;
@@ -164,8 +177,13 @@ private:
// Initialize Objective-C flags.
this->Ivar = this->ObjIsArray = this->NonGC = this->GlobalObjCRef = false;
+ this->ImpreciseLifetime = false;
this->ThreadLocalRef = false;
this->BaseIvarExp = 0;
+
+ // Initialize fields for TBAA.
+ this->TBAABaseType = Type;
+ this->TBAAOffset = 0;
this->TBAAInfo = TBAAInfo;
}
@@ -202,6 +220,13 @@ public:
bool isThreadLocalRef() const { return ThreadLocalRef; }
void setThreadLocalRef(bool Value) { ThreadLocalRef = Value;}
+ ARCPreciseLifetime_t isARCPreciseLifetime() const {
+ return ARCPreciseLifetime_t(!ImpreciseLifetime);
+ }
+ void setARCPreciseLifetime(ARCPreciseLifetime_t value) {
+ ImpreciseLifetime = (value == ARCImpreciseLifetime);
+ }
+
bool isObjCWeak() const {
return Quals.getObjCGCAttr() == Qualifiers::Weak;
}
@@ -216,6 +241,12 @@ public:
Expr *getBaseIvarExp() const { return BaseIvarExp; }
void setBaseIvarExp(Expr *V) { BaseIvarExp = V; }
+ QualType getTBAABaseType() const { return TBAABaseType; }
+ void setTBAABaseType(QualType T) { TBAABaseType = T; }
+
+ uint64_t getTBAAOffset() const { return TBAAOffset; }
+ void setTBAAOffset(uint64_t O) { TBAAOffset = O; }
+
llvm::MDNode *getTBAAInfo() const { return TBAAInfo; }
void setTBAAInfo(llvm::MDNode *N) { TBAAInfo = N; }
diff --git a/lib/CodeGen/CMakeLists.txt b/lib/CodeGen/CMakeLists.txt
index 98008ccde6..9ca2295a92 100644
--- a/lib/CodeGen/CMakeLists.txt
+++ b/lib/CodeGen/CMakeLists.txt
@@ -2,6 +2,7 @@ set(LLVM_LINK_COMPONENTS
asmparser
bitreader
bitwriter
+ irreader
instrumentation
ipo
linker
diff --git a/lib/CodeGen/CodeGenAction.cpp b/lib/CodeGen/CodeGenAction.cpp
index 8591961928..679cfeb6ed 100644
--- a/lib/CodeGen/CodeGenAction.cpp
+++ b/lib/CodeGen/CodeGenAction.cpp
@@ -23,9 +23,9 @@
#include "llvm/Bitcode/ReaderWriter.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Module.h"
+#include "llvm/IRReader/IRReader.h"
#include "llvm/Linker.h"
#include "llvm/Pass.h"
-#include "llvm/Support/IRReader.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/SourceMgr.h"
#include "llvm/Support/Timer.h"
diff --git a/lib/CodeGen/CodeGenFunction.cpp b/lib/CodeGen/CodeGenFunction.cpp
index 27ef65fa94..75c60edbba 100644
--- a/lib/CodeGen/CodeGenFunction.cpp
+++ b/lib/CodeGen/CodeGenFunction.cpp
@@ -20,6 +20,7 @@
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/StmtCXX.h"
+#include "clang/Basic/OpenCL.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Frontend/CodeGenOptions.h"
#include "llvm/IR/DataLayout.h"
@@ -30,8 +31,7 @@ using namespace clang;
using namespace CodeGen;
CodeGenFunction::CodeGenFunction(CodeGenModule &cgm, bool suppressNewContext)
- : CodeGenTypeCache(cgm), CGM(cgm),
- Target(CGM.getContext().getTargetInfo()),
+ : CodeGenTypeCache(cgm), CGM(cgm), Target(cgm.getTarget()),
Builder(cgm.getModule().getContext()),
SanitizePerformTypeCheck(CGM.getSanOpts().Null |
CGM.getSanOpts().Alignment |
@@ -41,11 +41,14 @@ CodeGenFunction::CodeGenFunction(CodeGenModule &cgm, bool suppressNewContext)
AutoreleaseResult(false), BlockInfo(0), BlockPointer(0),
LambdaThisCaptureField(0), NormalCleanupDest(0), NextCleanupDestIndex(1),
FirstBlockInfo(0), EHResumeBlock(0), ExceptionSlot(0), EHSelectorSlot(0),
- DebugInfo(0), DisableDebugInfo(false), DidCallStackSave(false),
+ DebugInfo(0), DisableDebugInfo(false), CalleeWithThisReturn(0),
+ DidCallStackSave(false),
IndirectBranch(0), SwitchInsn(0), CaseRangeBlock(0), UnreachableBlock(0),
+ NumReturnExprs(0), NumSimpleReturnExprs(0),
CXXABIThisDecl(0), CXXABIThisValue(0), CXXThisValue(0),
+ CXXDefaultInitExprThis(0),
CXXStructorImplicitParamDecl(0), CXXStructorImplicitParamValue(0),
- OutermostConditional(0), TerminateLandingPad(0),
+ OutermostConditional(0), CurLexicalScope(0), TerminateLandingPad(0),
TerminateHandler(0), TrapBB(0) {
if (!suppressNewContext)
CGM.getCXXABI().getMangleContext().startNewFunction();
@@ -89,6 +92,9 @@ TypeEvaluationKind CodeGenFunction::getEvaluationKind(QualType type) {
#include "clang/AST/TypeNodes.def"
llvm_unreachable("non-canonical or dependent type in IR-generation");
+ case Type::Auto:
+ llvm_unreachable("undeduced auto type in IR-generation");
+
// Various scalar types.
case Type::Builtin:
case Type::Pointer:
@@ -182,15 +188,36 @@ void CodeGenFunction::FinishFunction(SourceLocation EndLoc) {
assert(BreakContinueStack.empty() &&
"mismatched push/pop in break/continue stack!");
- if (CGDebugInfo *DI = getDebugInfo())
- DI->EmitLocation(Builder, EndLoc);
+ bool OnlySimpleReturnStmts = NumSimpleReturnExprs > 0
+ && NumSimpleReturnExprs == NumReturnExprs;
+ // If the function contains only a simple return statement, the
+ // cleanup code may become the first breakpoint in the function. To
+ // be safe, set the debug location for it to the location of the
+ // return statement. Otherwise point it to end of the function's
+ // lexical scope.
+ if (CGDebugInfo *DI = getDebugInfo()) {
+ if (OnlySimpleReturnStmts)
+ DI->EmitLocation(Builder, LastStopPoint);
+ else
+ DI->EmitLocation(Builder, EndLoc);
+ }
// Pop any cleanups that might have been associated with the
// parameters. Do this in whatever block we're currently in; it's
// important to do this before we enter the return block or return
// edges will be *really* confused.
- if (EHStack.stable_begin() != PrologueCleanupDepth)
- PopCleanupBlocks(PrologueCleanupDepth);
+ bool EmitRetDbgLoc = true;
+ if (EHStack.stable_begin() != PrologueCleanupDepth) {
+ PopCleanupBlocks(PrologueCleanupDepth, EndLoc);
+
+ // Make sure the line table doesn't jump back into the body for
+ // the ret after it's been at EndLoc.
+ EmitRetDbgLoc = false;
+
+ if (CGDebugInfo *DI = getDebugInfo())
+ if (OnlySimpleReturnStmts)
+ DI->EmitLocation(Builder, EndLoc);
+ }
// Emit function epilog (to return).
EmitReturnBlock();
@@ -203,7 +230,7 @@ void CodeGenFunction::FinishFunction(SourceLocation EndLoc) {
DI->EmitFunctionEnd(Builder);
}
- EmitFunctionEpilog(*CurFnInfo);
+ EmitFunctionEpilog(*CurFnInfo, EmitRetDbgLoc);
EmitEndEHSpec(CurCodeDecl);
assert(EHStack.empty() &&
@@ -277,35 +304,112 @@ void CodeGenFunction::EmitFunctionInstrumentation(const char *Fn) {
void CodeGenFunction::EmitMCountInstrumentation() {
llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
- llvm::Constant *MCountFn = CGM.CreateRuntimeFunction(FTy,
- Target.getMCountName());
+ llvm::Constant *MCountFn =
+ CGM.CreateRuntimeFunction(FTy, getTarget().getMCountName());
EmitNounwindRuntimeCall(MCountFn);
}
// OpenCL v1.2 s5.6.4.6 allows the compiler to store kernel argument
// information in the program executable. The argument information stored
// includes the argument name, its type, the address and access qualifiers used.
-// FIXME: Add type, address, and access qualifiers.
static void GenOpenCLArgMetadata(const FunctionDecl *FD, llvm::Function *Fn,
CodeGenModule &CGM,llvm::LLVMContext &Context,
- SmallVector <llvm::Value*, 5> &kernelMDArgs) {
-
- // Create MDNodes that represents the kernel arg metadata.
+ SmallVector <llvm::Value*, 5> &kernelMDArgs,
+ CGBuilderTy& Builder, ASTContext &ASTCtx) {
+ // Create MDNodes that represent the kernel arg metadata.
// Each MDNode is a list in the form of "key", N number of values which is
// the same number of values as their are kernel arguments.
+ // MDNode for the kernel argument address space qualifiers.
+ SmallVector<llvm::Value*, 8> addressQuals;
+ addressQuals.push_back(llvm::MDString::get(Context, "kernel_arg_addr_space"));
+
+ // MDNode for the kernel argument access qualifiers (images only).
+ SmallVector<llvm::Value*, 8> accessQuals;
+ accessQuals.push_back(llvm::MDString::get(Context, "kernel_arg_access_qual"));
+
+ // MDNode for the kernel argument type names.
+ SmallVector<llvm::Value*, 8> argTypeNames;
+ argTypeNames.push_back(llvm::MDString::get(Context, "kernel_arg_type"));
+
+ // MDNode for the kernel argument type qualifiers.
+ SmallVector<llvm::Value*, 8> argTypeQuals;
+ argTypeQuals.push_back(llvm::MDString::get(Context, "kernel_arg_type_qual"));
+
// MDNode for the kernel argument names.
SmallVector<llvm::Value*, 8> argNames;
argNames.push_back(llvm::MDString::get(Context, "kernel_arg_name"));
for (unsigned i = 0, e = FD->getNumParams(); i != e; ++i) {
const ParmVarDecl *parm = FD->getParamDecl(i);
+ QualType ty = parm->getType();
+ std::string typeQuals;
+
+ if (ty->isPointerType()) {
+ QualType pointeeTy = ty->getPointeeType();
+
+ // Get address qualifier.
+ addressQuals.push_back(Builder.getInt32(ASTCtx.getTargetAddressSpace(
+ pointeeTy.getAddressSpace())));
+
+ // Get argument type name.
+ std::string typeName = pointeeTy.getUnqualifiedType().getAsString() + "*";
+
+ // Turn "unsigned type" to "utype"
+ std::string::size_type pos = typeName.find("unsigned");
+ if (pos != std::string::npos)
+ typeName.erase(pos+1, 8);
+
+ argTypeNames.push_back(llvm::MDString::get(Context, typeName));
+
+ // Get argument type qualifiers:
+ if (ty.isRestrictQualified())
+ typeQuals = "restrict";
+ if (pointeeTy.isConstQualified() ||
+ (pointeeTy.getAddressSpace() == LangAS::opencl_constant))
+ typeQuals += typeQuals.empty() ? "const" : " const";
+ if (pointeeTy.isVolatileQualified())
+ typeQuals += typeQuals.empty() ? "volatile" : " volatile";
+ } else {
+ addressQuals.push_back(Builder.getInt32(0));
+
+ // Get argument type name.
+ std::string typeName = ty.getUnqualifiedType().getAsString();
+
+ // Turn "unsigned type" to "utype"
+ std::string::size_type pos = typeName.find("unsigned");
+ if (pos != std::string::npos)
+ typeName.erase(pos+1, 8);
+
+ argTypeNames.push_back(llvm::MDString::get(Context, typeName));
+
+ // Get argument type qualifiers:
+ if (ty.isConstQualified())
+ typeQuals = "const";
+ if (ty.isVolatileQualified())
+ typeQuals += typeQuals.empty() ? "volatile" : " volatile";
+ }
+
+ argTypeQuals.push_back(llvm::MDString::get(Context, typeQuals));
+
+ // Get image access qualifier:
+ if (ty->isImageType()) {
+ if (parm->hasAttr<OpenCLImageAccessAttr>() &&
+ parm->getAttr<OpenCLImageAccessAttr>()->getAccess() == CLIA_write_only)
+ accessQuals.push_back(llvm::MDString::get(Context, "write_only"));
+ else
+ accessQuals.push_back(llvm::MDString::get(Context, "read_only"));
+ } else
+ accessQuals.push_back(llvm::MDString::get(Context, "none"));
// Get argument name.
argNames.push_back(llvm::MDString::get(Context, parm->getName()));
-
}
- // Add MDNode to the list of all metadata.
+
+ kernelMDArgs.push_back(llvm::MDNode::get(Context, addressQuals));
+ kernelMDArgs.push_back(llvm::MDNode::get(Context, accessQuals));
+ kernelMDArgs.push_back(llvm::MDNode::get(Context, argTypeNames));
+ kernelMDArgs.push_back(llvm::MDNode::get(Context, argTypeQuals));
kernelMDArgs.push_back(llvm::MDNode::get(Context, argNames));
}
@@ -321,7 +425,8 @@ void CodeGenFunction::EmitOpenCLKernelMetadata(const FunctionDecl *FD,
kernelMDArgs.push_back(Fn);
if (CGM.getCodeGenOpts().EmitOpenCLArgMetadata)
- GenOpenCLArgMetadata(FD, Fn, CGM, Context, kernelMDArgs);
+ GenOpenCLArgMetadata(FD, Fn, CGM, Context, kernelMDArgs,
+ Builder, getContext());
if (FD->hasAttr<VecTypeHintAttr>()) {
VecTypeHintAttr *attr = FD->getAttr<VecTypeHintAttr>();
@@ -368,7 +473,8 @@ void CodeGenFunction::EmitOpenCLKernelMetadata(const FunctionDecl *FD,
OpenCLKernelMetadata->addOperand(kernelMDNode);
}
-void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
+void CodeGenFunction::StartFunction(GlobalDecl GD,
+ QualType RetTy,
llvm::Function *Fn,
const CGFunctionInfo &FnInfo,
const FunctionArgList &Args,
@@ -376,7 +482,8 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
const Decl *D = GD.getDecl();
DidCallStackSave = false;
- CurCodeDecl = CurFuncDecl = D;
+ CurCodeDecl = D;
+ CurFuncDecl = (D ? D->getNonClosureContext() : 0);
FnRetTy = RetTy;
CurFn = Fn;
CurFnInfo = &FnInfo;
@@ -475,12 +582,7 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
LambdaThisCaptureField);
if (LambdaThisCaptureField) {
// If this lambda captures this, load it.
- QualType LambdaTagType =
- getContext().getTagDeclType(LambdaThisCaptureField->getParent());
- LValue LambdaLV = MakeNaturalAlignAddrLValue(CXXABIThisValue,
- LambdaTagType);
- LValue ThisLValue = EmitLValueForField(LambdaLV,
- LambdaThisCaptureField);
+ LValue ThisLValue = EmitLValueForLambdaField(LambdaThisCaptureField);
CXXThisValue = EmitLoadOfLValue(ThisLValue).getScalarVal();
}
} else {
@@ -564,6 +666,10 @@ void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn,
SourceRange BodyRange;
if (Stmt *Body = FD->getBody()) BodyRange = Body->getSourceRange();
+ // CalleeWithThisReturn keeps track of the last callee inside this function
+ // that returns 'this'. Before starting the function, we set it to null.
+ CalleeWithThisReturn = 0;
+
// Emit the standard function prologue.
StartFunction(GD, ResTy, Fn, FnInfo, Args, BodyRange.getBegin());
@@ -615,6 +721,9 @@ void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn,
// Emit the standard function epilogue.
FinishFunction(BodyRange.getEnd());
+ // CalleeWithThisReturn keeps track of the last callee inside this function
+ // that returns 'this'. After finishing the function, we set it to null.
+ CalleeWithThisReturn = 0;
// If we haven't marked the function nothrow through other means, do
// a quick pass now to see if we can.
@@ -819,6 +928,16 @@ void CodeGenFunction::EmitBranchOnBoolExpr(const Expr *Cond,
return;
}
+ if (const CXXThrowExpr *Throw = dyn_cast<CXXThrowExpr>(Cond)) {
+ // Conditional operator handling can give us a throw expression as a
+ // condition for a case like:
+ // br(c ? throw x : y, t, f) -> br(c, br(throw x, t, f), br(y, t, f)
+ // Fold this to:
+ // br(c, throw x, br(y, t, f))
+ EmitCXXThrowExpr(Throw, /*KeepInsertionPoint*/false);
+ return;
+ }
+
// Emit the code with the fully general case.
llvm::Value *CondV = EvaluateExprAsBool(Cond);
Builder.CreateCondBr(CondV, TrueBlock, FalseBlock);
diff --git a/lib/CodeGen/CodeGenFunction.h b/lib/CodeGen/CodeGenFunction.h
index 0155f033e6..ff74c15c38 100644
--- a/lib/CodeGen/CodeGenFunction.h
+++ b/lib/CodeGen/CodeGenFunction.h
@@ -562,6 +562,11 @@ public:
EHScopeStack::stable_iterator getScopeDepth() const { return ScopeDepth; }
unsigned getDestIndex() const { return Index; }
+ // This should be used cautiously.
+ void setScopeDepth(EHScopeStack::stable_iterator depth) {
+ ScopeDepth = depth;
+ }
+
private:
llvm::BasicBlock *Block;
EHScopeStack::stable_iterator ScopeDepth;
@@ -574,8 +579,8 @@ public:
typedef std::pair<llvm::Value *, llvm::Value *> ComplexPairTy;
CGBuilderTy Builder;
- /// CurFuncDecl - Holds the Decl for the current function or ObjC method.
- /// This excludes BlockDecls.
+ /// CurFuncDecl - Holds the Decl for the current outermost
+ /// non-closure context.
const Decl *CurFuncDecl;
/// CurCodeDecl - This is the inner-most code context, which includes blocks.
const Decl *CurCodeDecl;
@@ -779,7 +784,9 @@ public:
/// PopCleanupBlock - Will pop the cleanup entry on the stack and
/// process all branch fixups.
- void PopCleanupBlock(bool FallThroughIsBranchThrough = false);
+ /// \param EHLoc - Optional debug location for EH code.
+ void PopCleanupBlock(bool FallThroughIsBranchThrough = false,
+ SourceLocation EHLoc=SourceLocation());
/// DeactivateCleanupBlock - Deactivates the given cleanup block.
/// The block cannot be reactivated. Pops it if it's the top of the
@@ -853,6 +860,8 @@ public:
class LexicalScope: protected RunCleanupsScope {
SourceRange Range;
+ SmallVector<const LabelDecl*, 4> Labels;
+ LexicalScope *ParentScope;
LexicalScope(const LexicalScope &) LLVM_DELETED_FUNCTION;
void operator=(const LexicalScope &) LLVM_DELETED_FUNCTION;
@@ -860,35 +869,47 @@ public:
public:
/// \brief Enter a new cleanup scope.
explicit LexicalScope(CodeGenFunction &CGF, SourceRange Range)
- : RunCleanupsScope(CGF), Range(Range) {
+ : RunCleanupsScope(CGF), Range(Range), ParentScope(CGF.CurLexicalScope) {
+ CGF.CurLexicalScope = this;
if (CGDebugInfo *DI = CGF.getDebugInfo())
DI->EmitLexicalBlockStart(CGF.Builder, Range.getBegin());
}
+ void addLabel(const LabelDecl *label) {
+ assert(PerformCleanup && "adding label to dead scope?");
+ Labels.push_back(label);
+ }
+
/// \brief Exit this cleanup scope, emitting any accumulated
/// cleanups.
~LexicalScope() {
- if (PerformCleanup) endLexicalScope();
+ if (CGDebugInfo *DI = CGF.getDebugInfo())
+ DI->EmitLexicalBlockEnd(CGF.Builder, Range.getEnd());
+
+ // If we should perform a cleanup, force them now. Note that
+ // this ends the cleanup scope before rescoping any labels.
+ if (PerformCleanup) ForceCleanup();
}
/// \brief Force the emission of cleanups now, instead of waiting
/// until this object is destroyed.
void ForceCleanup() {
+ CGF.CurLexicalScope = ParentScope;
RunCleanupsScope::ForceCleanup();
- endLexicalScope();
- }
- private:
- void endLexicalScope() {
- if (CGDebugInfo *DI = CGF.getDebugInfo())
- DI->EmitLexicalBlockEnd(CGF.Builder, Range.getEnd());
+ if (!Labels.empty())
+ rescopeLabels();
}
+
+ void rescopeLabels();
};
/// PopCleanupBlocks - Takes the old cleanup stack size and emits
/// the cleanup blocks that have been added.
- void PopCleanupBlocks(EHScopeStack::stable_iterator OldCleanupStackSize);
+ /// \param EHLoc - Optional debug location for EH code.
+ void PopCleanupBlocks(EHScopeStack::stable_iterator OldCleanupStackSize,
+ SourceLocation EHLoc=SourceLocation());
void ResolveBranchFixups(llvm::BasicBlock *Target);
@@ -1131,6 +1152,10 @@ private:
CGDebugInfo *DebugInfo;
bool DisableDebugInfo;
+ /// If the current function returns 'this', use the field to keep track of
+ /// the callee that returns 'this'.
+ llvm::Value *CalleeWithThisReturn;
+
/// DidCallStackSave - Whether llvm.stacksave has been called. Used to avoid
/// calling llvm.stacksave for multiple VLAs in the same scope.
bool DidCallStackSave;
@@ -1185,12 +1210,62 @@ private:
/// lazily by getUnreachableBlock().
llvm::BasicBlock *UnreachableBlock;
+ /// Counts of the number return expressions in the function.
+ unsigned NumReturnExprs;
+
+ /// Count the number of simple (constant) return expressions in the function.
+ unsigned NumSimpleReturnExprs;
+
+ /// The last regular (non-return) debug location (breakpoint) in the function.
+ SourceLocation LastStopPoint;
+
+public:
+ /// A scope within which we are constructing the fields of an object which
+ /// might use a CXXDefaultInitExpr. This stashes away a 'this' value to use
+ /// if we need to evaluate a CXXDefaultInitExpr within the evaluation.
+ class FieldConstructionScope {
+ public:
+ FieldConstructionScope(CodeGenFunction &CGF, llvm::Value *This)
+ : CGF(CGF), OldCXXDefaultInitExprThis(CGF.CXXDefaultInitExprThis) {
+ CGF.CXXDefaultInitExprThis = This;
+ }
+ ~FieldConstructionScope() {
+ CGF.CXXDefaultInitExprThis = OldCXXDefaultInitExprThis;
+ }
+
+ private:
+ CodeGenFunction &CGF;
+ llvm::Value *OldCXXDefaultInitExprThis;
+ };
+
+ /// The scope of a CXXDefaultInitExpr. Within this scope, the value of 'this'
+ /// is overridden to be the object under construction.
+ class CXXDefaultInitExprScope {
+ public:
+ CXXDefaultInitExprScope(CodeGenFunction &CGF)
+ : CGF(CGF), OldCXXThisValue(CGF.CXXThisValue) {
+ CGF.CXXThisValue = CGF.CXXDefaultInitExprThis;
+ }
+ ~CXXDefaultInitExprScope() {
+ CGF.CXXThisValue = OldCXXThisValue;
+ }
+
+ public:
+ CodeGenFunction &CGF;
+ llvm::Value *OldCXXThisValue;
+ };
+
+private:
/// CXXThisDecl - When generating code for a C++ member function,
/// this will hold the implicit 'this' declaration.
ImplicitParamDecl *CXXABIThisDecl;
llvm::Value *CXXABIThisValue;
llvm::Value *CXXThisValue;
+ /// The value of 'this' to use when evaluating CXXDefaultInitExprs within
+ /// this expression.
+ llvm::Value *CXXDefaultInitExprThis;
+
/// CXXStructorImplicitParamDecl - When generating code for a constructor or
/// destructor, this will hold the implicit argument (e.g. VTT).
ImplicitParamDecl *CXXStructorImplicitParamDecl;
@@ -1201,6 +1276,8 @@ private:
/// temporary should be destroyed conditionally.
ConditionalEvaluation *OutermostConditional;
+ /// The current lexical scope.
+ LexicalScope *CurLexicalScope;
/// ByrefValueInfoMap - For each __block variable, contains a pair of the LLVM
/// type as well as the field number that contains the actual data.
@@ -1277,6 +1354,7 @@ public:
return getInvokeDestImpl();
}
+ const TargetInfo &getTarget() const { return Target; }
llvm::LLVMContext &getLLVMContext() { return CGM.getLLVMContext(); }
//===--------------------------------------------------------------------===//
@@ -1377,7 +1455,6 @@ public:
llvm::Function *GenerateBlockFunction(GlobalDecl GD,
const CGBlockInfo &Info,
- const Decl *OuterFuncDecl,
const DeclMapTy &ldm,
bool IsLambdaConversionToBlock);
@@ -1408,7 +1485,8 @@ public:
void GenerateCode(GlobalDecl GD, llvm::Function *Fn,
const CGFunctionInfo &FnInfo);
- void StartFunction(GlobalDecl GD, QualType RetTy,
+ void StartFunction(GlobalDecl GD,
+ QualType RetTy,
llvm::Function *Fn,
const CGFunctionInfo &FnInfo,
const FunctionArgList &Args,
@@ -1498,7 +1576,7 @@ public:
/// EmitFunctionEpilog - Emit the target specific LLVM code to return the
/// given temporary.
- void EmitFunctionEpilog(const CGFunctionInfo &FI);
+ void EmitFunctionEpilog(const CGFunctionInfo &FI, bool EmitRetDbgLoc);
/// EmitStartEHSpec - Emit the start of the exception spec.
void EmitStartEHSpec(const Decl *D);
@@ -1997,18 +2075,34 @@ public:
/// initializer.
bool IsConstantAggregate;
+ /// Non-null if we should use lifetime annotations.
+ llvm::Value *SizeForLifetimeMarkers;
+
struct Invalid {};
AutoVarEmission(Invalid) : Variable(0) {}
AutoVarEmission(const VarDecl &variable)
: Variable(&variable), Address(0), NRVOFlag(0),
- IsByRef(false), IsConstantAggregate(false) {}
+ IsByRef(false), IsConstantAggregate(false),
+ SizeForLifetimeMarkers(0) {}
bool wasEmittedAsGlobal() const { return Address == 0; }
public:
static AutoVarEmission invalid() { return AutoVarEmission(Invalid()); }
+ bool useLifetimeMarkers() const { return SizeForLifetimeMarkers != 0; }
+ llvm::Value *getSizeForLifetimeMarkers() const {
+ assert(useLifetimeMarkers());
+ return SizeForLifetimeMarkers;
+ }
+
+ /// Returns the raw, allocated address, which is not necessarily
+ /// the address of the object itself.
+ llvm::Value *getAllocatedAddress() const {
+ return Address;
+ }
+
/// Returns the address of the object within this declaration.
/// Note that this does not chase the forwarding pointer for
/// __block decls.
@@ -2094,6 +2188,7 @@ public:
void EmitCaseStmt(const CaseStmt &S);
void EmitCaseStmtRange(const CaseStmt &S);
void EmitAsmStmt(const AsmStmt &S);
+ void EmitCapturedStmt(const CapturedStmt &S);
void EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S);
void EmitObjCAtTryStmt(const ObjCAtTryStmt &S);
@@ -2172,7 +2267,9 @@ public:
/// the LLVM value representation.
llvm::Value *EmitLoadOfScalar(llvm::Value *Addr, bool Volatile,
unsigned Alignment, QualType Ty,
- llvm::MDNode *TBAAInfo = 0);
+ llvm::MDNode *TBAAInfo = 0,
+ QualType TBAABaseTy = QualType(),
+ uint64_t TBAAOffset = 0);
/// EmitLoadOfScalar - Load a scalar value from an address, taking
/// care to appropriately convert from the memory representation to
@@ -2185,7 +2282,9 @@ public:
/// the LLVM value representation.
void EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr,
bool Volatile, unsigned Alignment, QualType Ty,
- llvm::MDNode *TBAAInfo = 0, bool isInit=false);
+ llvm::MDNode *TBAAInfo = 0, bool isInit = false,
+ QualType TBAABaseTy = QualType(),
+ uint64_t TBAAOffset = 0);
/// EmitStoreOfScalar - Store a scalar value to an address, taking
/// care to appropriately convert from the memory representation to
@@ -2284,6 +2383,7 @@ public:
llvm::Value *EmitIvarOffset(const ObjCInterfaceDecl *Interface,
const ObjCIvarDecl *Ivar);
LValue EmitLValueForField(LValue Base, const FieldDecl* Field);
+ LValue EmitLValueForLambdaField(const FieldDecl *Field);
/// EmitLValueForFieldInitialization - Like EmitLValueForField, except that
/// if the Field is a reference, this will return the address of the reference
@@ -2403,6 +2503,7 @@ public:
/// is unhandled by the current target.
llvm::Value *EmitTargetBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
+ llvm::Value *EmitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E);
llvm::Value *EmitARMBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
llvm::Value *EmitNeonCall(llvm::Function *F,
SmallVectorImpl<llvm::Value*> &O,
@@ -2446,14 +2547,14 @@ public:
llvm::Value *EmitARCRetainAutorelease(QualType type, llvm::Value *value);
llvm::Value *EmitARCRetainAutoreleaseNonBlock(llvm::Value *value);
llvm::Value *EmitARCStoreStrong(LValue lvalue, llvm::Value *value,
- bool ignored);
+ bool resultIgnored);
llvm::Value *EmitARCStoreStrongCall(llvm::Value *addr, llvm::Value *value,
- bool ignored);
+ bool resultIgnored);
llvm::Value *EmitARCRetain(QualType type, llvm::Value *value);
llvm::Value *EmitARCRetainNonBlock(llvm::Value *value);
llvm::Value *EmitARCRetainBlock(llvm::Value *value, bool mandatory);
- void EmitARCDestroyStrong(llvm::Value *addr, bool precise);
- void EmitARCRelease(llvm::Value *value, bool precise);
+ void EmitARCDestroyStrong(llvm::Value *addr, ARCPreciseLifetime_t precise);
+ void EmitARCRelease(llvm::Value *value, ARCPreciseLifetime_t precise);
llvm::Value *EmitARCAutorelease(llvm::Value *value);
llvm::Value *EmitARCAutoreleaseReturnValue(llvm::Value *value);
llvm::Value *EmitARCRetainAutoreleaseReturnValue(llvm::Value *value);
@@ -2474,6 +2575,8 @@ public:
llvm::Value *EmitARCRetainScalarExpr(const Expr *expr);
llvm::Value *EmitARCRetainAutoreleaseScalarExpr(const Expr *expr);
+ void EmitARCIntrinsicUse(llvm::ArrayRef<llvm::Value*> values);
+
static Destroyer destroyARCStrongImprecise;
static Destroyer destroyARCStrongPrecise;
static Destroyer destroyARCWeak;
@@ -2580,8 +2683,8 @@ public:
/// GenerateCXXGlobalInitFunc - Generates code for initializing global
/// variables.
void GenerateCXXGlobalInitFunc(llvm::Function *Fn,
- llvm::Constant **Decls,
- unsigned NumDecls);
+ ArrayRef<llvm::Constant *> Decls,
+ llvm::GlobalVariable *Guard = 0);
/// GenerateCXXGlobalDtorsFunc - Generates code for destroying global
/// variables.
@@ -2605,7 +2708,7 @@ public:
}
void enterNonTrivialFullExpression(const ExprWithCleanups *E);
- void EmitCXXThrowExpr(const CXXThrowExpr *E);
+ void EmitCXXThrowExpr(const CXXThrowExpr *E, bool KeepInsertionPoint = true);
void EmitLambdaExpr(const LambdaExpr *E, AggValueSlot Dest);
diff --git a/lib/CodeGen/CodeGenModule.cpp b/lib/CodeGen/CodeGenModule.cpp
index 402b309f8e..0b03a3c4b6 100644
--- a/lib/CodeGen/CodeGenModule.cpp
+++ b/lib/CodeGen/CodeGenModule.cpp
@@ -35,7 +35,6 @@
#include "clang/Basic/Module.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/TargetInfo.h"
-#include "clang/Basic/TargetOptions.h"
#include "clang/Frontend/CodeGenOptions.h"
#include "llvm/ADT/APSInt.h"
#include "llvm/ADT/Triple.h"
@@ -55,7 +54,7 @@ using namespace CodeGen;
static const char AnnotationSection[] = "llvm.metadata";
static CGCXXABI &createCXXABI(CodeGenModule &CGM) {
- switch (CGM.getContext().getTargetInfo().getCXXABI().getKind()) {
+ switch (CGM.getTarget().getCXXABI().getKind()) {
case TargetCXXABI::GenericAArch64:
case TargetCXXABI::GenericARM:
case TargetCXXABI::iOS:
@@ -70,22 +69,20 @@ static CGCXXABI &createCXXABI(CodeGenModule &CGM) {
CodeGenModule::CodeGenModule(ASTContext &C, const CodeGenOptions &CGO,
- const TargetOptions &TO, llvm::Module &M,
- const llvm::DataLayout &TD,
+ llvm::Module &M, const llvm::DataLayout &TD,
DiagnosticsEngine &diags)
- : Context(C), LangOpts(C.getLangOpts()), CodeGenOpts(CGO), TargetOpts(TO),
- TheModule(M), TheDataLayout(TD), TheTargetCodeGenInfo(0), Diags(diags),
- ABI(createCXXABI(*this)),
- Types(*this),
- TBAA(0),
- VTables(*this), ObjCRuntime(0), OpenCLRuntime(0), CUDARuntime(0),
+ : Context(C), LangOpts(C.getLangOpts()), CodeGenOpts(CGO), TheModule(M),
+ Diags(diags), TheDataLayout(TD), Target(C.getTargetInfo()),
+ ABI(createCXXABI(*this)), VMContext(M.getContext()), TBAA(0),
+ TheTargetCodeGenInfo(0), Types(*this), VTables(*this),
+ ObjCRuntime(0), OpenCLRuntime(0), CUDARuntime(0),
DebugInfo(0), ARCData(0), NoObjCARCExceptionsMetadata(0),
RRData(0), CFConstantStringClassRef(0),
ConstantStringClassRef(0), NSConstantStringType(0),
- VMContext(M.getContext()),
NSConcreteGlobalBlock(0), NSConcreteStackBlock(0),
BlockObjectAssign(0), BlockObjectDispose(0),
BlockDescriptorType(0), GenericBlockLiteralType(0),
+ LifetimeStartFn(0), LifetimeEndFn(0),
SanitizerBlacklist(CGO.SanitizerBlacklistFile),
SanOpts(SanitizerBlacklist.isIn(M) ?
SanitizerOptions::Disabled : LangOpts.Sanitize) {
@@ -179,15 +176,17 @@ void CodeGenModule::Release() {
EmitDeferred();
EmitCXXGlobalInitFunc();
EmitCXXGlobalDtorFunc();
+ EmitCXXThreadLocalInitFunc();
if (ObjCRuntime)
if (llvm::Function *ObjCInitFunction = ObjCRuntime->ModuleInitFunction())
AddGlobalCtor(ObjCInitFunction);
EmitCtorList(GlobalCtors, "llvm.global_ctors");
EmitCtorList(GlobalDtors, "llvm.global_dtors");
EmitGlobalAnnotations();
+ EmitStaticExternCAliases();
EmitLLVMUsed();
- if (CodeGenOpts.ModulesAutolink) {
+ if (CodeGenOpts.Autolink && Context.getLangOpts().Modules) {
EmitModuleLinkOptions();
}
@@ -226,13 +225,32 @@ llvm::MDNode *CodeGenModule::getTBAAStructInfo(QualType QTy) {
return TBAA->getTBAAStructInfo(QTy);
}
-void CodeGenModule::DecorateInstruction(llvm::Instruction *Inst,
- llvm::MDNode *TBAAInfo) {
- Inst->setMetadata(llvm::LLVMContext::MD_tbaa, TBAAInfo);
+llvm::MDNode *CodeGenModule::getTBAAStructTypeInfo(QualType QTy) {
+ if (!TBAA)
+ return 0;
+ return TBAA->getTBAAStructTypeInfo(QTy);
}
-bool CodeGenModule::isTargetDarwin() const {
- return getContext().getTargetInfo().getTriple().isOSDarwin();
+llvm::MDNode *CodeGenModule::getTBAAStructTagInfo(QualType BaseTy,
+ llvm::MDNode *AccessN,
+ uint64_t O) {
+ if (!TBAA)
+ return 0;
+ return TBAA->getTBAAStructTagInfo(BaseTy, AccessN, O);
+}
+
+/// Decorate the instruction with a TBAA tag. For scalar TBAA, the tag
+/// is the same as the type. For struct-path aware TBAA, the tag
+/// is different from the type: base type, access type and offset.
+/// When ConvertTypeToTag is true, we create a tag based on the scalar type.
+void CodeGenModule::DecorateInstruction(llvm::Instruction *Inst,
+ llvm::MDNode *TBAAInfo,
+ bool ConvertTypeToTag) {
+ if (ConvertTypeToTag && TBAA && CodeGenOpts.StructPathTBAA)
+ Inst->setMetadata(llvm::LLVMContext::MD_tbaa,
+ TBAA->getTBAAScalarTagInfo(TBAAInfo));
+ else
+ Inst->setMetadata(llvm::LLVMContext::MD_tbaa, TBAAInfo);
}
void CodeGenModule::Error(SourceLocation loc, StringRef error) {
@@ -308,7 +326,7 @@ static llvm::GlobalVariable::ThreadLocalMode GetLLVMTLSModel(
void CodeGenModule::setTLSMode(llvm::GlobalVariable *GV,
const VarDecl &D) const {
- assert(D.isThreadSpecified() && "setting TLS mode on non-TLS var!");
+ assert(D.getTLSKind() && "setting TLS mode on non-TLS var!");
llvm::GlobalVariable::ThreadLocalMode TLM;
TLM = GetLLVMTLSModel(CodeGenOpts.getDefaultTLSModel());
@@ -656,7 +674,9 @@ void CodeGenModule::SetCommonAttributes(const Decl *D,
if (const SectionAttr *SA = D->getAttr<SectionAttr>())
GV->setSection(SA->getName());
- getTargetCodeGenInfo().SetTargetAttributes(D, GV, *this);
+ // Alias cannot have attributes. Filter them here.
+ if (!isa<llvm::GlobalAlias>(GV))
+ getTargetCodeGenInfo().SetTargetAttributes(D, GV, *this);
}
void CodeGenModule::SetInternalFunctionAttributes(const Decl *D,
@@ -1458,8 +1478,11 @@ CodeGenModule::GetOrCreateLLVMGlobal(StringRef MangledName,
GV->setVisibility(GetLLVMVisibility(LV.getVisibility()));
}
- if (D->isThreadSpecified())
+ if (D->getTLSKind()) {
+ if (D->getTLSKind() == VarDecl::TLS_Dynamic)
+ CXXThreadLocals.push_back(std::make_pair(D, GV));
setTLSMode(GV, *D);
+ }
}
if (AddrSpace != Ty->getAddressSpace())
@@ -1599,7 +1622,8 @@ CodeGenModule::MaybeEmitGlobalStdInitializerListInitializer(const VarDecl *D,
D->getDeclContext()),
D->getLocStart(), D->getLocation(),
name, arrayType, sourceInfo,
- SC_Static, SC_Static);
+ SC_Static);
+ backingArray->setTSCSpec(D->getTSCSpec());
// Now clone the InitListExpr to initialize the array instead.
// Incredible hack: we want to use the existing InitListExpr here, so we need
@@ -1690,6 +1714,39 @@ unsigned CodeGenModule::GetGlobalVarAddressSpace(const VarDecl *D,
return AddrSpace;
}
+template<typename SomeDecl>
+void CodeGenModule::MaybeHandleStaticInExternC(const SomeDecl *D,
+ llvm::GlobalValue *GV) {
+ if (!getLangOpts().CPlusPlus)
+ return;
+
+ // Must have 'used' attribute, or else inline assembly can't rely on
+ // the name existing.
+ if (!D->template hasAttr<UsedAttr>())
+ return;
+
+ // Must have internal linkage and an ordinary name.
+ if (!D->getIdentifier() || D->getLinkage() != InternalLinkage)
+ return;
+
+ // Must be in an extern "C" context. Entities declared directly within
+ // a record are not extern "C" even if the record is in such a context.
+ const SomeDecl *First = D->getFirstDeclaration();
+ if (First->getDeclContext()->isRecord() || !First->isInExternCContext())
+ return;
+
+ // OK, this is an internal linkage entity inside an extern "C" linkage
+ // specification. Make a note of that so we can give it the "expected"
+ // mangled name if nothing else is using that name.
+ std::pair<StaticExternCMap::iterator, bool> R =
+ StaticExternCValues.insert(std::make_pair(D->getIdentifier(), GV));
+
+ // If we have multiple internal linkage entities with the same name
+ // in extern "C" regions, none of them gets that name.
+ if (!R.second)
+ R.first->second = 0;
+}
+
void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D) {
llvm::Constant *Init = 0;
QualType ASTTy = D->getType();
@@ -1788,6 +1845,8 @@ void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D) {
cast<llvm::GlobalValue>(Entry)->eraseFromParent();
}
+ MaybeHandleStaticInExternC(D, GV);
+
if (D->hasAttr<AnnotateAttr>())
AddGlobalAnnotations(D, GV);
@@ -1853,11 +1912,17 @@ CodeGenModule::GetLLVMLinkageVarDefinition(const VarDecl *D,
((!CodeGenOpts.NoCommon && !D->getAttr<NoCommonAttr>()) ||
D->getAttr<CommonAttr>()) &&
!D->hasExternalStorage() && !D->getInit() &&
- !D->getAttr<SectionAttr>() && !D->isThreadSpecified() &&
+ !D->getAttr<SectionAttr>() && !D->getTLSKind() &&
!D->getAttr<WeakImportAttr>()) {
// Thread local vars aren't considered common linkage.
return llvm::GlobalVariable::CommonLinkage;
- }
+ } else if (D->getTLSKind() == VarDecl::TLS_Dynamic &&
+ getTarget().getTriple().isMacOSX())
+ // On Darwin, the backing variable for a C++11 thread_local variable always
+ // has internal linkage; all accesses should just be calls to the
+ // Itanium-specified entry point, which has the normal linkage of the
+ // variable.
+ return llvm::GlobalValue::InternalLinkage;
return llvm::GlobalVariable::ExternalLinkage;
}
@@ -2066,6 +2131,8 @@ void CodeGenModule::EmitGlobalFunctionDefinition(GlobalDecl GD) {
// FIXME: this is redundant with part of SetFunctionDefinitionAttributes
setGlobalVisibility(Fn, D);
+ MaybeHandleStaticInExternC(D, Fn);
+
CodeGenFunction(*this).GenerateCode(D, Fn, FI);
SetFunctionDefinitionAttributes(D, Fn);
@@ -2214,7 +2281,8 @@ CodeGenModule::GetAddrOfConstantCFString(const StringLiteral *Literal) {
llvm::Constant *Zero = llvm::Constant::getNullValue(Int32Ty);
llvm::Constant *Zeros[] = { Zero, Zero };
-
+ llvm::Value *V;
+
// If we don't already have it, get __CFConstantStringClassReference.
if (!CFConstantStringClassRef) {
llvm::Type *Ty = getTypes().ConvertType(getContext().IntTy);
@@ -2222,9 +2290,11 @@ CodeGenModule::GetAddrOfConstantCFString(const StringLiteral *Literal) {
llvm::Constant *GV = CreateRuntimeVariable(Ty,
"__CFConstantStringClassReference");
// Decay array -> ptr
- CFConstantStringClassRef =
- llvm::ConstantExpr::getGetElementPtr(GV, Zeros);
+ V = llvm::ConstantExpr::getGetElementPtr(GV, Zeros);
+ CFConstantStringClassRef = V;
}
+ else
+ V = CFConstantStringClassRef;
QualType CFTy = getContext().getCFConstantStringType();
@@ -2234,7 +2304,7 @@ CodeGenModule::GetAddrOfConstantCFString(const StringLiteral *Literal) {
llvm::Constant *Fields[4];
// Class pointer.
- Fields[0] = CFConstantStringClassRef;
+ Fields[0] = cast<llvm::ConstantExpr>(V);
// Flags.
llvm::Type *Ty = getTypes().ConvertType(getContext().UnsignedIntTy);
@@ -2269,6 +2339,8 @@ CodeGenModule::GetAddrOfConstantCFString(const StringLiteral *Literal) {
new llvm::GlobalVariable(getModule(), C->getType(), /*isConstant=*/true,
Linkage, C, ".str");
GV->setUnnamedAddr(true);
+ // Don't enforce the target's minimum global alignment, since the only use
+ // of the string is via this class initializer.
if (isUTF16) {
CharUnits Align = getContext().getTypeAlignInChars(getContext().ShortTy);
GV->setAlignment(Align.getQuantity());
@@ -2293,7 +2365,7 @@ CodeGenModule::GetAddrOfConstantCFString(const StringLiteral *Literal) {
GV = new llvm::GlobalVariable(getModule(), C->getType(), true,
llvm::GlobalVariable::PrivateLinkage, C,
"_unnamed_cfstring_");
- if (const char *Sect = getContext().getTargetInfo().getCFStringSection())
+ if (const char *Sect = getTarget().getCFStringSection())
GV->setSection(Sect);
Entry.setValue(GV);
@@ -2321,7 +2393,7 @@ CodeGenModule::GetAddrOfConstantString(const StringLiteral *Literal) {
llvm::Constant *Zero = llvm::Constant::getNullValue(Int32Ty);
llvm::Constant *Zeros[] = { Zero, Zero };
-
+ llvm::Value *V;
// If we don't already have it, get _NSConstantStringClassReference.
if (!ConstantStringClassRef) {
std::string StringClass(getLangOpts().ObjCConstantStringClass);
@@ -2334,8 +2406,8 @@ CodeGenModule::GetAddrOfConstantString(const StringLiteral *Literal) {
GV = getObjCRuntime().GetClassGlobal(str);
// Make sure the result is of the correct type.
llvm::Type *PTy = llvm::PointerType::getUnqual(Ty);
- ConstantStringClassRef =
- llvm::ConstantExpr::getBitCast(GV, PTy);
+ V = llvm::ConstantExpr::getBitCast(GV, PTy);
+ ConstantStringClassRef = V;
} else {
std::string str =
StringClass.empty() ? "_NSConstantStringClassReference"
@@ -2343,10 +2415,12 @@ CodeGenModule::GetAddrOfConstantString(const StringLiteral *Literal) {
llvm::Type *PTy = llvm::ArrayType::get(Ty, 0);
GV = CreateRuntimeVariable(PTy, str);
// Decay array -> ptr
- ConstantStringClassRef =
- llvm::ConstantExpr::getGetElementPtr(GV, Zeros);
+ V = llvm::ConstantExpr::getGetElementPtr(GV, Zeros);
+ ConstantStringClassRef = V;
}
}
+ else
+ V = ConstantStringClassRef;
if (!NSConstantStringType) {
// Construct the type for a constant NSString.
@@ -2385,7 +2459,7 @@ CodeGenModule::GetAddrOfConstantString(const StringLiteral *Literal) {
llvm::Constant *Fields[3];
// Class pointer.
- Fields[0] = ConstantStringClassRef;
+ Fields[0] = cast<llvm::ConstantExpr>(V);
// String pointer.
llvm::Constant *C =
@@ -2400,6 +2474,8 @@ CodeGenModule::GetAddrOfConstantString(const StringLiteral *Literal) {
new llvm::GlobalVariable(getModule(), C->getType(), isConstant, Linkage, C,
".str");
GV->setUnnamedAddr(true);
+ // Don't enforce the target's minimum global alignment, since the only use
+ // of the string is via this class initializer.
CharUnits Align = getContext().getTypeAlignInChars(getContext().CharTy);
GV->setAlignment(Align.getQuantity());
Fields[1] = llvm::ConstantExpr::getGetElementPtr(GV, Zeros);
@@ -2416,8 +2492,8 @@ CodeGenModule::GetAddrOfConstantString(const StringLiteral *Literal) {
// FIXME. Fix section.
if (const char *Sect =
LangOpts.ObjCRuntime.isNonFragile()
- ? getContext().getTargetInfo().getNSStringNonFragileABISection()
- : getContext().getTargetInfo().getNSStringSection())
+ ? getTarget().getNSStringNonFragileABISection()
+ : getTarget().getNSStringSection())
GV->setSection(Sect);
Entry.setValue(GV);
@@ -2504,7 +2580,7 @@ CodeGenModule::GetConstantArrayFromStringLiteral(const StringLiteral *E) {
/// constant array for the given string literal.
llvm::Constant *
CodeGenModule::GetAddrOfConstantStringFromLiteral(const StringLiteral *S) {
- CharUnits Align = getContext().getTypeAlignInChars(S->getType());
+ CharUnits Align = getContext().getAlignOfGlobalVarInChars(S->getType());
if (S->isAscii() || S->isUTF8()) {
SmallString<64> Str(S->getString());
@@ -2573,6 +2649,10 @@ llvm::Constant *CodeGenModule::GetAddrOfConstantString(StringRef Str,
if (!GlobalName)
GlobalName = ".str";
+ if (Alignment == 0)
+ Alignment = getContext().getAlignOfGlobalVarInChars(getContext().CharTy)
+ .getQuantity();
+
// Don't share any string literals if strings aren't constant.
if (LangOpts.WritableStrings)
return GenerateStringLiteral(Str, false, *this, GlobalName, Alignment);
@@ -2753,7 +2833,6 @@ void CodeGenModule::EmitTopLevelDecl(Decl *D) {
// No code generation needed.
case Decl::UsingShadow:
case Decl::Using:
- case Decl::UsingDirective:
case Decl::ClassTemplate:
case Decl::FunctionTemplate:
case Decl::TypeAliasTemplate:
@@ -2761,6 +2840,10 @@ void CodeGenModule::EmitTopLevelDecl(Decl *D) {
case Decl::Block:
case Decl::Empty:
break;
+ case Decl::UsingDirective: // using namespace X; [C++]
+ if (CGDebugInfo *DI = getModuleDebugInfo())
+ DI->EmitUsingDirective(cast<UsingDirectiveDecl>(*D));
+ return;
case Decl::CXXConstructor:
// Skip function templates
if (cast<FunctionDecl>(D)->getDescribedFunctionTemplate() ||
@@ -2886,6 +2969,23 @@ static void EmitGlobalDeclMetadata(CodeGenModule &CGM,
GlobalMetadata->addOperand(llvm::MDNode::get(CGM.getLLVMContext(), Ops));
}
+/// For each function which is declared within an extern "C" region and marked
+/// as 'used', but has internal linkage, create an alias from the unmangled
+/// name to the mangled name if possible. People expect to be able to refer
+/// to such functions with an unmangled name from inline assembly within the
+/// same translation unit.
+void CodeGenModule::EmitStaticExternCAliases() {
+ for (StaticExternCMap::iterator I = StaticExternCValues.begin(),
+ E = StaticExternCValues.end();
+ I != E; ++I) {
+ IdentifierInfo *Name = I->first;
+ llvm::GlobalValue *Val = I->second;
+ if (Val && !getModule().getNamedValue(Name->getName()))
+ AddUsedGlobal(new llvm::GlobalAlias(Val->getType(), Val->getLinkage(),
+ Name->getName(), Val, &getModule()));
+ }
+}
+
/// Emits metadata nodes associating all the global values in the
/// current module with the Decls they came from. This is useful for
/// projects using IR gen as a subroutine.
diff --git a/lib/CodeGen/CodeGenModule.h b/lib/CodeGen/CodeGenModule.h
index 2bddb6f79f..91138c607c 100644
--- a/lib/CodeGen/CodeGenModule.h
+++ b/lib/CodeGen/CodeGenModule.h
@@ -67,7 +67,6 @@ namespace clang {
class VarDecl;
class LangOptions;
class CodeGenOptions;
- class TargetOptions;
class DiagnosticsEngine;
class AnnotateAttr;
class CXXDestructorDecl;
@@ -217,6 +216,9 @@ struct ARCEntrypoints {
/// A void(void) inline asm to use to mark that the return value of
/// a call will be immediately retain.
llvm::InlineAsm *retainAutoreleasedReturnValueMarker;
+
+ /// void clang.arc.use(...);
+ llvm::Constant *clang_arc_use;
};
/// CodeGenModule - This class organizes the cross-function state that is used
@@ -230,15 +232,22 @@ class CodeGenModule : public CodeGenTypeCache {
ASTContext &Context;
const LangOptions &LangOpts;
const CodeGenOptions &CodeGenOpts;
- const TargetOptions &TargetOpts;
llvm::Module &TheModule;
- const llvm::DataLayout &TheDataLayout;
- mutable const TargetCodeGenInfo *TheTargetCodeGenInfo;
DiagnosticsEngine &Diags;
+ const llvm::DataLayout &TheDataLayout;
+ const TargetInfo &Target;
CGCXXABI &ABI;
- CodeGenTypes Types;
- CodeGenTBAA *TBAA;
+ llvm::LLVMContext &VMContext;
+ CodeGenTBAA *TBAA;
+
+ mutable const TargetCodeGenInfo *TheTargetCodeGenInfo;
+
+ // This should not be moved earlier, since its initialization depends on some
+ // of the previous reference members being already initialized and also checks
+ // if TheTargetCodeGenInfo is NULL
+ CodeGenTypes Types;
+
/// VTables - Holds information about C++ vtables.
CodeGenVTables VTables;
friend class CodeGenVTables;
@@ -252,8 +261,8 @@ class CodeGenModule : public CodeGenTypeCache {
RREntrypoints *RRData;
// WeakRefReferences - A set of references that have only been seen via
- // a weakref so far. This is used to remove the weak of the reference if we ever
- // see a direct reference or a definition.
+ // a weakref so far. This is used to remove the weak of the reference if we
+ // ever see a direct reference or a definition.
llvm::SmallPtrSet<llvm::GlobalValue*, 10> WeakRefReferences;
/// DeferredDecls - This contains all the decls which have definitions but
@@ -302,6 +311,20 @@ class CodeGenModule : public CodeGenTypeCache {
llvm::DenseMap<QualType, llvm::Constant *> AtomicSetterHelperFnMap;
llvm::DenseMap<QualType, llvm::Constant *> AtomicGetterHelperFnMap;
+ /// Map used to track internal linkage functions declared within
+ /// extern "C" regions.
+ typedef llvm::MapVector<IdentifierInfo *,
+ llvm::GlobalValue *> StaticExternCMap;
+ StaticExternCMap StaticExternCValues;
+
+ /// \brief thread_local variables defined or used in this TU.
+ std::vector<std::pair<const VarDecl *, llvm::GlobalVariable *> >
+ CXXThreadLocals;
+
+ /// \brief thread_local variables with initializers that need to run
+ /// before any thread_local variable in this TU is odr-used.
+ std::vector<llvm::Constant*> CXXThreadLocalInits;
+
/// CXXGlobalInits - Global variables with initializers that need to run
/// before main.
std::vector<llvm::Constant*> CXXGlobalInits;
@@ -337,11 +360,11 @@ class CodeGenModule : public CodeGenTypeCache {
/// CFConstantStringClassRef - Cached reference to the class for constant
/// strings. This value has type int * but is actually an Obj-C class pointer.
- llvm::Constant *CFConstantStringClassRef;
+ llvm::WeakVH CFConstantStringClassRef;
/// ConstantStringClassRef - Cached reference to the class for constant
/// strings. This value has type int * but is actually an Obj-C class pointer.
- llvm::Constant *ConstantStringClassRef;
+ llvm::WeakVH ConstantStringClassRef;
/// \brief The LLVM type corresponding to NSConstantString.
llvm::StructType *NSConstantStringType;
@@ -360,7 +383,6 @@ class CodeGenModule : public CodeGenTypeCache {
bool isTriviallyRecursive(const FunctionDecl *F);
bool shouldEmitFunction(const FunctionDecl *F);
- llvm::LLVMContext &VMContext;
/// @name Cache for Blocks Runtime Globals
/// @{
@@ -378,6 +400,12 @@ class CodeGenModule : public CodeGenTypeCache {
int GlobalUniqueCount;
} Block;
+ /// void @llvm.lifetime.start(i64 %size, i8* nocapture <ptr>)
+ llvm::Constant *LifetimeStartFn;
+
+ /// void @llvm.lifetime.end(i64 %size, i8* nocapture <ptr>)
+ llvm::Constant *LifetimeEndFn;
+
GlobalDecl initializedGlobalDecl;
llvm::BlackList SanitizerBlacklist;
@@ -387,8 +415,8 @@ class CodeGenModule : public CodeGenTypeCache {
/// @}
public:
CodeGenModule(ASTContext &C, const CodeGenOptions &CodeGenOpts,
- const TargetOptions &TargetOpts, llvm::Module &M,
- const llvm::DataLayout &TD, DiagnosticsEngine &Diags);
+ llvm::Module &M, const llvm::DataLayout &TD,
+ DiagnosticsEngine &Diags);
~CodeGenModule();
@@ -418,9 +446,6 @@ public:
return *CUDARuntime;
}
- /// getCXXABI() - Return a reference to the configured C++ ABI.
- CGCXXABI &getCXXABI() { return ABI; }
-
ARCEntrypoints &getARCEntrypoints() const {
assert(getLangOpts().ObjCAutoRefCount && ARCData != 0);
return *ARCData;
@@ -474,32 +499,45 @@ public:
}
ASTContext &getContext() const { return Context; }
- const CodeGenOptions &getCodeGenOpts() const { return CodeGenOpts; }
const LangOptions &getLangOpts() const { return LangOpts; }
+ const CodeGenOptions &getCodeGenOpts() const { return CodeGenOpts; }
llvm::Module &getModule() const { return TheModule; }
- CodeGenTypes &getTypes() { return Types; }
- CodeGenVTables &getVTables() { return VTables; }
- VTableContext &getVTableContext() { return VTables.getVTableContext(); }
DiagnosticsEngine &getDiags() const { return Diags; }
const llvm::DataLayout &getDataLayout() const { return TheDataLayout; }
- const TargetInfo &getTarget() const { return Context.getTargetInfo(); }
+ const TargetInfo &getTarget() const { return Target; }
+ CGCXXABI &getCXXABI() { return ABI; }
llvm::LLVMContext &getLLVMContext() { return VMContext; }
- const TargetCodeGenInfo &getTargetCodeGenInfo();
- bool isTargetDarwin() const;
-
+
bool shouldUseTBAA() const { return TBAA != 0; }
+ const TargetCodeGenInfo &getTargetCodeGenInfo();
+
+ CodeGenTypes &getTypes() { return Types; }
+
+ CodeGenVTables &getVTables() { return VTables; }
+ VTableContext &getVTableContext() { return VTables.getVTableContext(); }
+
llvm::MDNode *getTBAAInfo(QualType QTy);
llvm::MDNode *getTBAAInfoForVTablePtr();
llvm::MDNode *getTBAAStructInfo(QualType QTy);
+ /// Return the MDNode in the type DAG for the given struct type.
+ llvm::MDNode *getTBAAStructTypeInfo(QualType QTy);
+ /// Return the path-aware tag for given base type, access node and offset.
+ llvm::MDNode *getTBAAStructTagInfo(QualType BaseTy, llvm::MDNode *AccessN,
+ uint64_t O);
bool isTypeConstant(QualType QTy, bool ExcludeCtorDtor);
bool isPaddedAtomicType(QualType type);
bool isPaddedAtomicType(const AtomicType *type);
- static void DecorateInstruction(llvm::Instruction *Inst,
- llvm::MDNode *TBAAInfo);
+ /// Decorate the instruction with a TBAA tag. For scalar TBAA, the tag
+ /// is the same as the type. For struct-path aware TBAA, the tag
+ /// is different from the type: base type, access type and offset.
+ /// When ConvertTypeToTag is true, we create a tag based on the scalar type.
+ void DecorateInstruction(llvm::Instruction *Inst,
+ llvm::MDNode *TBAAInfo,
+ bool ConvertTypeToTag = true);
/// getSize - Emit the given number of characters as a value of type size_t.
llvm::ConstantInt *getSize(CharUnits numChars);
@@ -549,8 +587,8 @@ public:
return GetAddrOfGlobalVar(cast<VarDecl>(GD.getDecl()));
}
- /// CreateOrReplaceCXXRuntimeVariable - Will return a global variable of the given
- /// type. If a variable with a different type already exists then a new
+ /// CreateOrReplaceCXXRuntimeVariable - Will return a global variable of the
+ /// given type. If a variable with a different type already exists then a new
/// variable with the right type will be created and all uses of the old
/// variable will be replaced with a bitcast to the new variable.
llvm::GlobalVariable *
@@ -675,7 +713,7 @@ public:
/// (if one is created).
llvm::Constant *GetAddrOfConstantString(StringRef Str,
const char *GlobalName=0,
- unsigned Alignment=1);
+ unsigned Alignment=0);
/// GetAddrOfConstantCString - Returns a pointer to a character array
/// containing the literal and a terminating '\0' character. The result has
@@ -685,7 +723,7 @@ public:
/// created).
llvm::Constant *GetAddrOfConstantCString(const std::string &str,
const char *GlobalName=0,
- unsigned Alignment=1);
+ unsigned Alignment=0);
/// GetAddrOfConstantCompoundLiteral - Returns a pointer to a constant global
/// variable for the given file-scope compound literal expression.
@@ -712,8 +750,7 @@ public:
llvm::Value *getBuiltinLibFunction(const FunctionDecl *FD,
unsigned BuiltinID);
- llvm::Function *getIntrinsic(unsigned IID, ArrayRef<llvm::Type*> Tys =
- ArrayRef<llvm::Type*>());
+ llvm::Function *getIntrinsic(unsigned IID, ArrayRef<llvm::Type*> Tys = None);
/// EmitTopLevelDecl - Emit code for a single top level declaration.
void EmitTopLevelDecl(Decl *D);
@@ -722,6 +759,12 @@ public:
// variable has been instantiated.
void HandleCXXStaticMemberVarInstantiation(VarDecl *VD);
+ /// \brief If the declaration has internal linkage but is inside an
+ /// extern "C" linkage specification, prepare to emit an alias for it
+ /// to the expected name.
+ template<typename SomeDecl>
+ void MaybeHandleStaticInExternC(const SomeDecl *D, llvm::GlobalValue *GV);
+
/// AddUsedGlobal - Add a global which should be forced to be
/// present in the object file; these are emitted to the llvm.used
/// metadata global.
@@ -754,6 +797,9 @@ public:
///@}
+ llvm::Constant *getLLVMLifetimeStartFn();
+ llvm::Constant *getLLVMLifetimeEndFn();
+
// UpdateCompleteType - Make sure that this type is translated.
void UpdateCompletedType(const TagDecl *TD);
@@ -987,6 +1033,9 @@ private:
/// a C++ destructor Decl.
void EmitCXXDestructor(const CXXDestructorDecl *D, CXXDtorType Type);
+ /// \brief Emit the function that initializes C++ thread_local variables.
+ void EmitCXXThreadLocalInitFunc();
+
/// EmitCXXGlobalInitFunc - Emit the function that initializes C++ globals.
void EmitCXXGlobalInitFunc();
@@ -1031,6 +1080,10 @@ private:
/// \brief Emit the link options introduced by imported modules.
void EmitModuleLinkOptions();
+ /// \brief Emit aliases for internal-linkage declarations inside "C" language
+ /// linkage specifications, giving them the "expected" name where possible.
+ void EmitStaticExternCAliases();
+
void EmitDeclMetadata();
/// EmitCoverageFile - Emit the llvm.gcov metadata used to tell LLVM where
diff --git a/lib/CodeGen/CodeGenTBAA.cpp b/lib/CodeGen/CodeGenTBAA.cpp
index cfa141ea9a..5ff1560a48 100644
--- a/lib/CodeGen/CodeGenTBAA.cpp
+++ b/lib/CodeGen/CodeGenTBAA.cpp
@@ -21,6 +21,7 @@
#include "clang/AST/Mangle.h"
#include "clang/AST/RecordLayout.h"
#include "clang/Frontend/CodeGenOptions.h"
+#include "llvm/ADT/SmallSet.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Metadata.h"
@@ -49,13 +50,25 @@ llvm::MDNode *CodeGenTBAA::getRoot() {
return Root;
}
+// For struct-path aware TBAA, the scalar type has the same format as
+// the struct type: name, offset, pointer to another node in the type DAG.
+// For scalar TBAA, the scalar type is the same as the scalar tag:
+// name and a parent pointer.
+llvm::MDNode *CodeGenTBAA::createTBAAScalarType(StringRef Name,
+ llvm::MDNode *Parent) {
+ if (CodeGenOpts.StructPathTBAA)
+ return MDHelper.createTBAAScalarTypeNode(Name, Parent);
+ else
+ return MDHelper.createTBAANode(Name, Parent);
+}
+
llvm::MDNode *CodeGenTBAA::getChar() {
// Define the root of the tree for user-accessible memory. C and C++
// give special powers to char and certain similar types. However,
// these special powers only cover user-accessible memory, and doesn't
// include things like vtables.
if (!Char)
- Char = MDHelper.createTBAANode("omnipotent char", getRoot());
+ Char = createTBAAScalarType("omnipotent char", getRoot());
return Char;
}
@@ -123,7 +136,7 @@ CodeGenTBAA::getTBAAInfo(QualType QTy) {
// "underlying types".
default:
return MetadataCache[Ty] =
- MDHelper.createTBAANode(BTy->getName(Features), getChar());
+ createTBAAScalarType(BTy->getName(Features), getChar());
}
}
@@ -131,8 +144,8 @@ CodeGenTBAA::getTBAAInfo(QualType QTy) {
// TODO: Implement C++'s type "similarity" and consider dis-"similar"
// pointers distinct.
if (Ty->isPointerType())
- return MetadataCache[Ty] = MDHelper.createTBAANode("any pointer",
- getChar());
+ return MetadataCache[Ty] = createTBAAScalarType("any pointer",
+ getChar());
// Enum types are distinct types. In C++ they have "underlying types",
// however they aren't related for TBAA.
@@ -159,7 +172,7 @@ CodeGenTBAA::getTBAAInfo(QualType QTy) {
llvm::raw_svector_ostream Out(OutName);
MContext.mangleCXXRTTIName(QualType(ETy, 0), Out);
Out.flush();
- return MetadataCache[Ty] = MDHelper.createTBAANode(OutName, getChar());
+ return MetadataCache[Ty] = createTBAAScalarType(OutName, getChar());
}
// For now, handle any other kind of type conservatively.
@@ -167,7 +180,7 @@ CodeGenTBAA::getTBAAInfo(QualType QTy) {
}
llvm::MDNode *CodeGenTBAA::getTBAAInfoForVTablePtr() {
- return MDHelper.createTBAANode("vtable pointer", getRoot());
+ return createTBAAScalarType("vtable pointer", getRoot());
}
bool
@@ -191,8 +204,18 @@ CodeGenTBAA::CollectFields(uint64_t BaseOffset,
const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
unsigned idx = 0;
+ const FieldDecl *LastFD = 0;
+ bool IsMsStruct = RD->isMsStruct(Context);
for (RecordDecl::field_iterator i = RD->field_begin(),
e = RD->field_end(); i != e; ++i, ++idx) {
+ if (IsMsStruct) {
+ // Zero-length bitfields following non-bitfield members are ignored.
+ if (Context.ZeroBitfieldFollowsNonBitfield(*i, LastFD)) {
+ --idx;
+ continue;
+ }
+ LastFD = *i;
+ }
uint64_t Offset = BaseOffset +
Layout.getFieldOffset(idx) / Context.getCharWidth();
QualType FieldQTy = i->getType();
@@ -207,7 +230,9 @@ CodeGenTBAA::CollectFields(uint64_t BaseOffset,
uint64_t Offset = BaseOffset;
uint64_t Size = Context.getTypeSizeInChars(QTy).getQuantity();
llvm::MDNode *TBAAInfo = MayAlias ? getChar() : getTBAAInfo(QTy);
- Fields.push_back(llvm::MDBuilder::TBAAStructField(Offset, Size, TBAAInfo));
+ llvm::MDNode *TBAATag = CodeGenOpts.StructPathTBAA ?
+ getTBAAScalarTagInfo(TBAAInfo) : TBAAInfo;
+ Fields.push_back(llvm::MDBuilder::TBAAStructField(Offset, Size, TBAATag));
return true;
}
@@ -225,3 +250,101 @@ CodeGenTBAA::getTBAAStructInfo(QualType QTy) {
// For now, handle any other kind of type conservatively.
return StructMetadataCache[Ty] = NULL;
}
+
+/// Check if the given type can be handled by path-aware TBAA.
+static bool isTBAAPathStruct(QualType QTy) {
+ if (const RecordType *TTy = QTy->getAs<RecordType>()) {
+ const RecordDecl *RD = TTy->getDecl()->getDefinition();
+ if (RD->hasFlexibleArrayMember())
+ return false;
+ // RD can be struct, union, class, interface or enum.
+ // For now, we only handle struct and class.
+ if (RD->isStruct() || RD->isClass())
+ return true;
+ }
+ return false;
+}
+
+llvm::MDNode *
+CodeGenTBAA::getTBAAStructTypeInfo(QualType QTy) {
+ const Type *Ty = Context.getCanonicalType(QTy).getTypePtr();
+ assert(isTBAAPathStruct(QTy));
+
+ if (llvm::MDNode *N = StructTypeMetadataCache[Ty])
+ return N;
+
+ if (const RecordType *TTy = QTy->getAs<RecordType>()) {
+ const RecordDecl *RD = TTy->getDecl()->getDefinition();
+
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+ SmallVector <std::pair<llvm::MDNode*, uint64_t>, 4> Fields;
+ unsigned idx = 0;
+ const FieldDecl *LastFD = 0;
+ bool IsMsStruct = RD->isMsStruct(Context);
+ for (RecordDecl::field_iterator i = RD->field_begin(),
+ e = RD->field_end(); i != e; ++i, ++idx) {
+ if (IsMsStruct) {
+ // Zero-length bitfields following non-bitfield members are ignored.
+ if (Context.ZeroBitfieldFollowsNonBitfield(*i, LastFD)) {
+ --idx;
+ continue;
+ }
+ LastFD = *i;
+ }
+
+ QualType FieldQTy = i->getType();
+ llvm::MDNode *FieldNode;
+ if (isTBAAPathStruct(FieldQTy))
+ FieldNode = getTBAAStructTypeInfo(FieldQTy);
+ else
+ FieldNode = getTBAAInfo(FieldQTy);
+ if (!FieldNode)
+ return StructTypeMetadataCache[Ty] = NULL;
+ Fields.push_back(std::make_pair(
+ FieldNode, Layout.getFieldOffset(idx) / Context.getCharWidth()));
+ }
+
+ // TODO: This is using the RTTI name. Is there a better way to get
+ // a unique string for a type?
+ SmallString<256> OutName;
+ llvm::raw_svector_ostream Out(OutName);
+ MContext.mangleCXXRTTIName(QualType(Ty, 0), Out);
+ Out.flush();
+ // Create the struct type node with a vector of pairs (offset, type).
+ return StructTypeMetadataCache[Ty] =
+ MDHelper.createTBAAStructTypeNode(OutName, Fields);
+ }
+
+ return StructMetadataCache[Ty] = NULL;
+}
+
+llvm::MDNode *
+CodeGenTBAA::getTBAAStructTagInfo(QualType BaseQTy, llvm::MDNode *AccessNode,
+ uint64_t Offset) {
+ if (!CodeGenOpts.StructPathTBAA)
+ return AccessNode;
+
+ const Type *BTy = Context.getCanonicalType(BaseQTy).getTypePtr();
+ TBAAPathTag PathTag = TBAAPathTag(BTy, AccessNode, Offset);
+ if (llvm::MDNode *N = StructTagMetadataCache[PathTag])
+ return N;
+
+ llvm::MDNode *BNode = 0;
+ if (isTBAAPathStruct(BaseQTy))
+ BNode = getTBAAStructTypeInfo(BaseQTy);
+ if (!BNode)
+ return StructTagMetadataCache[PathTag] =
+ MDHelper.createTBAAStructTagNode(AccessNode, AccessNode, 0);
+
+ return StructTagMetadataCache[PathTag] =
+ MDHelper.createTBAAStructTagNode(BNode, AccessNode, Offset);
+}
+
+llvm::MDNode *
+CodeGenTBAA::getTBAAScalarTagInfo(llvm::MDNode *AccessNode) {
+ if (llvm::MDNode *N = ScalarTagMetadataCache[AccessNode])
+ return N;
+
+ return ScalarTagMetadataCache[AccessNode] =
+ MDHelper.createTBAAStructTagNode(AccessNode, AccessNode, 0);
+}
diff --git a/lib/CodeGen/CodeGenTBAA.h b/lib/CodeGen/CodeGenTBAA.h
index 4c6e045fce..f0c9e06f02 100644
--- a/lib/CodeGen/CodeGenTBAA.h
+++ b/lib/CodeGen/CodeGenTBAA.h
@@ -35,6 +35,14 @@ namespace clang {
namespace CodeGen {
class CGRecordLayout;
+ struct TBAAPathTag {
+ TBAAPathTag(const Type *B, const llvm::MDNode *A, uint64_t O)
+ : BaseT(B), AccessN(A), Offset(O) {}
+ const Type *BaseT;
+ const llvm::MDNode *AccessN;
+ uint64_t Offset;
+ };
+
/// CodeGenTBAA - This class organizes the cross-module state that is used
/// while lowering AST types to LLVM types.
class CodeGenTBAA {
@@ -46,8 +54,15 @@ class CodeGenTBAA {
// MDHelper - Helper for creating metadata.
llvm::MDBuilder MDHelper;
- /// MetadataCache - This maps clang::Types to llvm::MDNodes describing them.
+ /// MetadataCache - This maps clang::Types to scalar llvm::MDNodes describing
+ /// them.
llvm::DenseMap<const Type *, llvm::MDNode *> MetadataCache;
+ /// This maps clang::Types to a struct node in the type DAG.
+ llvm::DenseMap<const Type *, llvm::MDNode *> StructTypeMetadataCache;
+ /// This maps TBAAPathTags to a tag node.
+ llvm::DenseMap<TBAAPathTag, llvm::MDNode *> StructTagMetadataCache;
+ /// This maps a scalar type to a scalar tag node.
+ llvm::DenseMap<const llvm::MDNode *, llvm::MDNode *> ScalarTagMetadataCache;
/// StructMetadataCache - This maps clang::Types to llvm::MDNodes describing
/// them for struct assignments.
@@ -71,6 +86,11 @@ class CodeGenTBAA {
SmallVectorImpl<llvm::MDBuilder::TBAAStructField> &Fields,
bool MayAlias);
+ /// A wrapper function to create a scalar type. For struct-path aware TBAA,
+ /// the scalar type has the same format as the struct type: name, offset,
+ /// pointer to another node in the type DAG.
+ llvm::MDNode *createTBAAScalarType(StringRef Name, llvm::MDNode *Parent);
+
public:
CodeGenTBAA(ASTContext &Ctx, llvm::LLVMContext &VMContext,
const CodeGenOptions &CGO,
@@ -89,9 +109,52 @@ public:
/// getTBAAStructInfo - Get the TBAAStruct MDNode to be used for a memcpy of
/// the given type.
llvm::MDNode *getTBAAStructInfo(QualType QTy);
+
+ /// Get the MDNode in the type DAG for given struct type QType.
+ llvm::MDNode *getTBAAStructTypeInfo(QualType QType);
+ /// Get the tag MDNode for a given base type, the actual scalar access MDNode
+ /// and offset into the base type.
+ llvm::MDNode *getTBAAStructTagInfo(QualType BaseQType,
+ llvm::MDNode *AccessNode, uint64_t Offset);
+
+ /// Get the sclar tag MDNode for a given scalar type.
+ llvm::MDNode *getTBAAScalarTagInfo(llvm::MDNode *AccessNode);
};
} // end namespace CodeGen
} // end namespace clang
+namespace llvm {
+
+template<> struct DenseMapInfo<clang::CodeGen::TBAAPathTag> {
+ static clang::CodeGen::TBAAPathTag getEmptyKey() {
+ return clang::CodeGen::TBAAPathTag(
+ DenseMapInfo<const clang::Type *>::getEmptyKey(),
+ DenseMapInfo<const MDNode *>::getEmptyKey(),
+ DenseMapInfo<uint64_t>::getEmptyKey());
+ }
+
+ static clang::CodeGen::TBAAPathTag getTombstoneKey() {
+ return clang::CodeGen::TBAAPathTag(
+ DenseMapInfo<const clang::Type *>::getTombstoneKey(),
+ DenseMapInfo<const MDNode *>::getTombstoneKey(),
+ DenseMapInfo<uint64_t>::getTombstoneKey());
+ }
+
+ static unsigned getHashValue(const clang::CodeGen::TBAAPathTag &Val) {
+ return DenseMapInfo<const clang::Type *>::getHashValue(Val.BaseT) ^
+ DenseMapInfo<const MDNode *>::getHashValue(Val.AccessN) ^
+ DenseMapInfo<uint64_t>::getHashValue(Val.Offset);
+ }
+
+ static bool isEqual(const clang::CodeGen::TBAAPathTag &LHS,
+ const clang::CodeGen::TBAAPathTag &RHS) {
+ return LHS.BaseT == RHS.BaseT &&
+ LHS.AccessN == RHS.AccessN &&
+ LHS.Offset == RHS.Offset;
+ }
+};
+
+} // end namespace llvm
+
#endif
diff --git a/lib/CodeGen/CodeGenTypes.cpp b/lib/CodeGen/CodeGenTypes.cpp
index 8fc78e3de6..4240216b23 100644
--- a/lib/CodeGen/CodeGenTypes.cpp
+++ b/lib/CodeGen/CodeGenTypes.cpp
@@ -28,12 +28,12 @@
using namespace clang;
using namespace CodeGen;
-CodeGenTypes::CodeGenTypes(CodeGenModule &CGM)
- : Context(CGM.getContext()), Target(Context.getTargetInfo()),
- TheModule(CGM.getModule()), TheDataLayout(CGM.getDataLayout()),
- TheABIInfo(CGM.getTargetCodeGenInfo().getABIInfo()),
- TheCXXABI(CGM.getCXXABI()),
- CodeGenOpts(CGM.getCodeGenOpts()), CGM(CGM) {
+CodeGenTypes::CodeGenTypes(CodeGenModule &cgm)
+ : CGM(cgm), Context(cgm.getContext()), TheModule(cgm.getModule()),
+ TheDataLayout(cgm.getDataLayout()),
+ Target(cgm.getTarget()), TheCXXABI(cgm.getCXXABI()),
+ CodeGenOpts(cgm.getCodeGenOpts()),
+ TheABIInfo(cgm.getTargetCodeGenInfo().getABIInfo()) {
SkippedLayout = false;
}
@@ -392,6 +392,8 @@ llvm::Type *CodeGenTypes::ConvertType(QualType T) {
}
break;
}
+ case Type::Auto:
+ llvm_unreachable("Unexpected undeduced auto type!");
case Type::Complex: {
llvm::Type *EltTy = ConvertType(cast<ComplexType>(Ty)->getElementType());
ResultType = llvm::StructType::get(EltTy, EltTy, NULL);
diff --git a/lib/CodeGen/CodeGenTypes.h b/lib/CodeGen/CodeGenTypes.h
index 11fd76fb19..452375f374 100644
--- a/lib/CodeGen/CodeGenTypes.h
+++ b/lib/CodeGen/CodeGenTypes.h
@@ -60,14 +60,17 @@ namespace CodeGen {
class CodeGenTypes {
public:
// Some of this stuff should probably be left on the CGM.
+ CodeGenModule &CGM;
ASTContext &Context;
- const TargetInfo &Target;
llvm::Module &TheModule;
const llvm::DataLayout &TheDataLayout;
- const ABIInfo &TheABIInfo;
+ const TargetInfo &Target;
CGCXXABI &TheCXXABI;
const CodeGenOptions &CodeGenOpts;
- CodeGenModule &CGM;
+
+ // This should not be moved earlier, since its initialization depends on some
+ // of the previous reference members being already initialized
+ const ABIInfo &TheABIInfo;
private:
/// The opaque type map for Objective-C interfaces. All direct
@@ -107,14 +110,14 @@ private:
llvm::DenseMap<const Type *, llvm::Type *> TypeCache;
public:
- CodeGenTypes(CodeGenModule &CGM);
+ CodeGenTypes(CodeGenModule &cgm);
~CodeGenTypes();
const llvm::DataLayout &getDataLayout() const { return TheDataLayout; }
- const TargetInfo &getTarget() const { return Target; }
ASTContext &getContext() const { return Context; }
const ABIInfo &getABIInfo() const { return TheABIInfo; }
const CodeGenOptions &getCodeGenOpts() const { return CodeGenOpts; }
+ const TargetInfo &getTarget() const { return Target; }
CGCXXABI &getCXXABI() const { return TheCXXABI; }
llvm::LLVMContext &getLLVMContext() { return TheModule.getContext(); }
diff --git a/lib/CodeGen/ItaniumCXXABI.cpp b/lib/CodeGen/ItaniumCXXABI.cpp
index b314fd6a39..9ab3f8c3ee 100644
--- a/lib/CodeGen/ItaniumCXXABI.cpp
+++ b/lib/CodeGen/ItaniumCXXABI.cpp
@@ -39,7 +39,6 @@ private:
protected:
bool UseARMMethodPtrABI;
bool UseARMGuardVarABI;
-
// It's a little silly for us to cache this.
llvm::IntegerType *getPtrDiffTy() {
if (!PtrDiffTy) {
@@ -117,7 +116,7 @@ public:
void EmitInstanceFunctionProlog(CodeGenFunction &CGF);
- void EmitConstructorCall(CodeGenFunction &CGF,
+ llvm::Value *EmitConstructorCall(CodeGenFunction &CGF,
const CXXConstructorDecl *D,
CXXCtorType Type, bool ForVirtualBase,
bool Delegating,
@@ -147,8 +146,16 @@ public:
void EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D,
llvm::GlobalVariable *DeclPtr, bool PerformInit);
- void registerGlobalDtor(CodeGenFunction &CGF, llvm::Constant *dtor,
- llvm::Constant *addr);
+ void registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
+ llvm::Constant *dtor, llvm::Constant *addr);
+
+ llvm::Function *getOrCreateThreadLocalWrapper(const VarDecl *VD,
+ llvm::GlobalVariable *Var);
+ void EmitThreadLocalInitFuncs(
+ llvm::ArrayRef<std::pair<const VarDecl *, llvm::GlobalVariable *> > Decls,
+ llvm::Function *InitFunc);
+ LValue EmitThreadLocalDeclRefExpr(CodeGenFunction &CGF,
+ const DeclRefExpr *DRE);
};
class ARMCXXABI : public ItaniumCXXABI {
@@ -184,11 +191,11 @@ public:
llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF, llvm::Value *allocPtr,
CharUnits cookieSize);
-private:
/// \brief Returns true if the given instance method is one of the
/// kinds that the ARM ABI says returns 'this'.
- static bool HasThisReturn(GlobalDecl GD) {
- const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
+ bool HasThisReturn(GlobalDecl GD) const {
+ const CXXMethodDecl *MD = dyn_cast_or_null<CXXMethodDecl>(GD.getDecl());
+ if (!MD) return false;
return ((isa<CXXDestructorDecl>(MD) && GD.getDtorType() != Dtor_Deleting) ||
(isa<CXXConstructorDecl>(MD)));
}
@@ -196,7 +203,7 @@ private:
}
CodeGen::CGCXXABI *CodeGen::CreateItaniumCXXABI(CodeGenModule &CGM) {
- switch (CGM.getContext().getTargetInfo().getCXXABI().getKind()) {
+ switch (CGM.getTarget().getCXXABI().getKind()) {
// For IR-generation purposes, there's no significant difference
// between the ARM and iOS ABIs.
case TargetCXXABI::GenericARM:
@@ -230,8 +237,8 @@ CodeGen::CGCXXABI *CodeGen::CreateItaniumCXXABI(CodeGenModule &CGM) {
llvm::Type *
ItaniumCXXABI::ConvertMemberPointerType(const MemberPointerType *MPT) {
if (MPT->isMemberDataPointer())
- return getPtrDiffTy();
- return llvm::StructType::get(getPtrDiffTy(), getPtrDiffTy(), NULL);
+ return CGM.PtrDiffTy;
+ return llvm::StructType::get(CGM.PtrDiffTy, CGM.PtrDiffTy, NULL);
}
/// In the Itanium and ARM ABIs, method pointers have the form:
@@ -270,8 +277,7 @@ ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(CodeGenFunction &CGF,
CGM.getTypes().GetFunctionType(
CGM.getTypes().arrangeCXXMethodType(RD, FPT));
- llvm::IntegerType *ptrdiff = getPtrDiffTy();
- llvm::Constant *ptrdiff_1 = llvm::ConstantInt::get(ptrdiff, 1);
+ llvm::Constant *ptrdiff_1 = llvm::ConstantInt::get(CGM.PtrDiffTy, 1);
llvm::BasicBlock *FnVirtual = CGF.createBasicBlock("memptr.virtual");
llvm::BasicBlock *FnNonVirtual = CGF.createBasicBlock("memptr.nonvirtual");
@@ -345,7 +351,7 @@ llvm::Value *ItaniumCXXABI::EmitMemberDataPointerAddress(CodeGenFunction &CGF,
llvm::Value *Base,
llvm::Value *MemPtr,
const MemberPointerType *MPT) {
- assert(MemPtr->getType() == getPtrDiffTy());
+ assert(MemPtr->getType() == CGM.PtrDiffTy);
CGBuilderTy &Builder = CGF.Builder;
@@ -493,14 +499,12 @@ ItaniumCXXABI::EmitMemberPointerConversion(const CastExpr *E,
llvm::Constant *
ItaniumCXXABI::EmitNullMemberPointer(const MemberPointerType *MPT) {
- llvm::Type *ptrdiff_t = getPtrDiffTy();
-
// Itanium C++ ABI 2.3:
// A NULL pointer is represented as -1.
if (MPT->isMemberDataPointer())
- return llvm::ConstantInt::get(ptrdiff_t, -1ULL, /*isSigned=*/true);
+ return llvm::ConstantInt::get(CGM.PtrDiffTy, -1ULL, /*isSigned=*/true);
- llvm::Constant *Zero = llvm::ConstantInt::get(ptrdiff_t, 0);
+ llvm::Constant *Zero = llvm::ConstantInt::get(CGM.PtrDiffTy, 0);
llvm::Constant *Values[2] = { Zero, Zero };
return llvm::ConstantStruct::getAnon(Values);
}
@@ -511,7 +515,7 @@ ItaniumCXXABI::EmitMemberDataPointer(const MemberPointerType *MPT,
// Itanium C++ ABI 2.3:
// A pointer to data member is an offset from the base address of
// the class object containing it, represented as a ptrdiff_t
- return llvm::ConstantInt::get(getPtrDiffTy(), offset.getQuantity());
+ return llvm::ConstantInt::get(CGM.PtrDiffTy, offset.getQuantity());
}
llvm::Constant *ItaniumCXXABI::EmitMemberPointer(const CXXMethodDecl *MD) {
@@ -524,7 +528,6 @@ llvm::Constant *ItaniumCXXABI::BuildMemberPointer(const CXXMethodDecl *MD,
MD = MD->getCanonicalDecl();
CodeGenTypes &Types = CGM.getTypes();
- llvm::Type *ptrdiff_t = getPtrDiffTy();
// Get the function pointer (or index if this is a virtual function).
llvm::Constant *MemPtr[2];
@@ -543,16 +546,16 @@ llvm::Constant *ItaniumCXXABI::BuildMemberPointer(const CXXMethodDecl *MD,
// least significant bit of adj then makes exactly the same
// discrimination as the least significant bit of ptr does for
// Itanium.
- MemPtr[0] = llvm::ConstantInt::get(ptrdiff_t, VTableOffset);
- MemPtr[1] = llvm::ConstantInt::get(ptrdiff_t,
+ MemPtr[0] = llvm::ConstantInt::get(CGM.PtrDiffTy, VTableOffset);
+ MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy,
2 * ThisAdjustment.getQuantity() + 1);
} else {
// Itanium C++ ABI 2.3:
// For a virtual function, [the pointer field] is 1 plus the
// virtual table offset (in bytes) of the function,
// represented as a ptrdiff_t.
- MemPtr[0] = llvm::ConstantInt::get(ptrdiff_t, VTableOffset + 1);
- MemPtr[1] = llvm::ConstantInt::get(ptrdiff_t,
+ MemPtr[0] = llvm::ConstantInt::get(CGM.PtrDiffTy, VTableOffset + 1);
+ MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy,
ThisAdjustment.getQuantity());
}
} else {
@@ -565,7 +568,7 @@ llvm::Constant *ItaniumCXXABI::BuildMemberPointer(const CXXMethodDecl *MD,
} else {
// Use an arbitrary non-function type to tell GetAddrOfFunction that the
// function type is incomplete.
- Ty = ptrdiff_t;
+ Ty = CGM.PtrDiffTy;
}
llvm::Constant *addr = CGM.GetAddrOfFunction(MD, Ty);
@@ -696,7 +699,7 @@ ItaniumCXXABI::EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
/// For member data pointers, this is just a check against -1.
if (MPT->isMemberDataPointer()) {
- assert(MemPtr->getType() == getPtrDiffTy());
+ assert(MemPtr->getType() == CGM.PtrDiffTy);
llvm::Value *NegativeOne =
llvm::Constant::getAllOnesValue(MemPtr->getType());
return Builder.CreateICmpNE(MemPtr, NegativeOne, "memptr.tobool");
@@ -852,7 +855,7 @@ void ARMCXXABI::EmitInstanceFunctionProlog(CodeGenFunction &CGF) {
CGF.Builder.CreateStore(getThisValue(CGF), CGF.ReturnValue);
}
-void ItaniumCXXABI::EmitConstructorCall(CodeGenFunction &CGF,
+llvm::Value *ItaniumCXXABI::EmitConstructorCall(CodeGenFunction &CGF,
const CXXConstructorDecl *D,
CXXCtorType Type, bool ForVirtualBase,
bool Delegating,
@@ -867,6 +870,7 @@ void ItaniumCXXABI::EmitConstructorCall(CodeGenFunction &CGF,
// FIXME: Provide a source location here.
CGF.EmitCXXMemberCall(D, SourceLocation(), Callee, ReturnValueSlot(), This,
VTT, VTTTy, ArgBeg, ArgEnd);
+ return Callee;
}
RValue ItaniumCXXABI::EmitVirtualDestructorCall(CodeGenFunction &CGF,
@@ -1075,10 +1079,10 @@ void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
bool shouldPerformInit) {
CGBuilderTy &Builder = CGF.Builder;
- // We only need to use thread-safe statics for local variables;
+ // We only need to use thread-safe statics for local non-TLS variables;
// global initialization is always single-threaded.
- bool threadsafe =
- (getContext().getLangOpts().ThreadsafeStatics && D.isLocalVarDecl());
+ bool threadsafe = getContext().getLangOpts().ThreadsafeStatics &&
+ D.isLocalVarDecl() && !D.getTLSKind();
// If we have a global variable with internal linkage and thread-safe statics
// are disabled, we can just let the guard variable be of type i8.
@@ -1113,6 +1117,8 @@ void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
llvm::ConstantInt::get(guardTy, 0),
guardName.str());
guard->setVisibility(var->getVisibility());
+ // If the variable is thread-local, so is its guard variable.
+ guard->setThreadLocalMode(var->getThreadLocalMode());
CGM.setStaticLocalDeclGuardAddress(&D, guard);
}
@@ -1213,7 +1219,14 @@ void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
/// Register a global destructor using __cxa_atexit.
static void emitGlobalDtorWithCXAAtExit(CodeGenFunction &CGF,
llvm::Constant *dtor,
- llvm::Constant *addr) {
+ llvm::Constant *addr,
+ bool TLS) {
+ const char *Name = "__cxa_atexit";
+ if (TLS) {
+ const llvm::Triple &T = CGF.getTarget().getTriple();
+ Name = T.isMacOSX() ? "_tlv_atexit" : "__cxa_thread_atexit";
+ }
+
// We're assuming that the destructor function is something we can
// reasonably call with the default CC. Go ahead and cast it to the
// right prototype.
@@ -1226,8 +1239,7 @@ static void emitGlobalDtorWithCXAAtExit(CodeGenFunction &CGF,
llvm::FunctionType::get(CGF.IntTy, paramTys, false);
// Fetch the actual function.
- llvm::Constant *atexit =
- CGF.CGM.CreateRuntimeFunction(atexitTy, "__cxa_atexit");
+ llvm::Constant *atexit = CGF.CGM.CreateRuntimeFunction(atexitTy, Name);
if (llvm::Function *fn = dyn_cast<llvm::Function>(atexit))
fn->setDoesNotThrow();
@@ -1245,12 +1257,15 @@ static void emitGlobalDtorWithCXAAtExit(CodeGenFunction &CGF,
/// Register a global destructor as best as we know how.
void ItaniumCXXABI::registerGlobalDtor(CodeGenFunction &CGF,
+ const VarDecl &D,
llvm::Constant *dtor,
llvm::Constant *addr) {
// Use __cxa_atexit if available.
- if (CGM.getCodeGenOpts().CXAAtExit) {
- return emitGlobalDtorWithCXAAtExit(CGF, dtor, addr);
- }
+ if (CGM.getCodeGenOpts().CXAAtExit)
+ return emitGlobalDtorWithCXAAtExit(CGF, dtor, addr, D.getTLSKind());
+
+ if (D.getTLSKind())
+ CGM.ErrorUnsupported(&D, "non-trivial TLS destruction");
// In Apple kexts, we want to add a global destructor entry.
// FIXME: shouldn't this be guarded by some variable?
@@ -1261,3 +1276,138 @@ void ItaniumCXXABI::registerGlobalDtor(CodeGenFunction &CGF,
CGF.registerGlobalDtorWithAtExit(dtor, addr);
}
+
+/// Get the appropriate linkage for the wrapper function. This is essentially
+/// the weak form of the variable's linkage; every translation unit which wneeds
+/// the wrapper emits a copy, and we want the linker to merge them.
+static llvm::GlobalValue::LinkageTypes getThreadLocalWrapperLinkage(
+ llvm::GlobalValue::LinkageTypes VarLinkage) {
+ if (llvm::GlobalValue::isLinkerPrivateLinkage(VarLinkage))
+ return llvm::GlobalValue::LinkerPrivateWeakLinkage;
+ // For internal linkage variables, we don't need an external or weak wrapper.
+ if (llvm::GlobalValue::isLocalLinkage(VarLinkage))
+ return VarLinkage;
+ return llvm::GlobalValue::WeakODRLinkage;
+}
+
+llvm::Function *
+ItaniumCXXABI::getOrCreateThreadLocalWrapper(const VarDecl *VD,
+ llvm::GlobalVariable *Var) {
+ // Mangle the name for the thread_local wrapper function.
+ SmallString<256> WrapperName;
+ {
+ llvm::raw_svector_ostream Out(WrapperName);
+ getMangleContext().mangleItaniumThreadLocalWrapper(VD, Out);
+ Out.flush();
+ }
+
+ if (llvm::Value *V = Var->getParent()->getNamedValue(WrapperName))
+ return cast<llvm::Function>(V);
+
+ llvm::Type *RetTy = Var->getType();
+ if (VD->getType()->isReferenceType())
+ RetTy = RetTy->getPointerElementType();
+
+ llvm::FunctionType *FnTy = llvm::FunctionType::get(RetTy, false);
+ llvm::Function *Wrapper = llvm::Function::Create(
+ FnTy, getThreadLocalWrapperLinkage(Var->getLinkage()), WrapperName.str(),
+ &CGM.getModule());
+ // Always resolve references to the wrapper at link time.
+ Wrapper->setVisibility(llvm::GlobalValue::HiddenVisibility);
+ return Wrapper;
+}
+
+void ItaniumCXXABI::EmitThreadLocalInitFuncs(
+ llvm::ArrayRef<std::pair<const VarDecl *, llvm::GlobalVariable *> > Decls,
+ llvm::Function *InitFunc) {
+ for (unsigned I = 0, N = Decls.size(); I != N; ++I) {
+ const VarDecl *VD = Decls[I].first;
+ llvm::GlobalVariable *Var = Decls[I].second;
+
+ // Mangle the name for the thread_local initialization function.
+ SmallString<256> InitFnName;
+ {
+ llvm::raw_svector_ostream Out(InitFnName);
+ getMangleContext().mangleItaniumThreadLocalInit(VD, Out);
+ Out.flush();
+ }
+
+ // If we have a definition for the variable, emit the initialization
+ // function as an alias to the global Init function (if any). Otherwise,
+ // produce a declaration of the initialization function.
+ llvm::GlobalValue *Init = 0;
+ bool InitIsInitFunc = false;
+ if (VD->hasDefinition()) {
+ InitIsInitFunc = true;
+ if (InitFunc)
+ Init =
+ new llvm::GlobalAlias(InitFunc->getType(), Var->getLinkage(),
+ InitFnName.str(), InitFunc, &CGM.getModule());
+ } else {
+ // Emit a weak global function referring to the initialization function.
+ // This function will not exist if the TU defining the thread_local
+ // variable in question does not need any dynamic initialization for
+ // its thread_local variables.
+ llvm::FunctionType *FnTy = llvm::FunctionType::get(CGM.VoidTy, false);
+ Init = llvm::Function::Create(
+ FnTy, llvm::GlobalVariable::ExternalWeakLinkage, InitFnName.str(),
+ &CGM.getModule());
+ }
+
+ if (Init)
+ Init->setVisibility(Var->getVisibility());
+
+ llvm::Function *Wrapper = getOrCreateThreadLocalWrapper(VD, Var);
+ llvm::LLVMContext &Context = CGM.getModule().getContext();
+ llvm::BasicBlock *Entry = llvm::BasicBlock::Create(Context, "", Wrapper);
+ CGBuilderTy Builder(Entry);
+ if (InitIsInitFunc) {
+ if (Init)
+ Builder.CreateCall(Init);
+ } else {
+ // Don't know whether we have an init function. Call it if it exists.
+ llvm::Value *Have = Builder.CreateIsNotNull(Init);
+ llvm::BasicBlock *InitBB = llvm::BasicBlock::Create(Context, "", Wrapper);
+ llvm::BasicBlock *ExitBB = llvm::BasicBlock::Create(Context, "", Wrapper);
+ Builder.CreateCondBr(Have, InitBB, ExitBB);
+
+ Builder.SetInsertPoint(InitBB);
+ Builder.CreateCall(Init);
+ Builder.CreateBr(ExitBB);
+
+ Builder.SetInsertPoint(ExitBB);
+ }
+
+ // For a reference, the result of the wrapper function is a pointer to
+ // the referenced object.
+ llvm::Value *Val = Var;
+ if (VD->getType()->isReferenceType()) {
+ llvm::LoadInst *LI = Builder.CreateLoad(Val);
+ LI->setAlignment(CGM.getContext().getDeclAlign(VD).getQuantity());
+ Val = LI;
+ }
+
+ Builder.CreateRet(Val);
+ }
+}
+
+LValue ItaniumCXXABI::EmitThreadLocalDeclRefExpr(CodeGenFunction &CGF,
+ const DeclRefExpr *DRE) {
+ const VarDecl *VD = cast<VarDecl>(DRE->getDecl());
+ QualType T = VD->getType();
+ llvm::Type *Ty = CGF.getTypes().ConvertTypeForMem(T);
+ llvm::Value *Val = CGF.CGM.GetAddrOfGlobalVar(VD, Ty);
+ llvm::Function *Wrapper =
+ getOrCreateThreadLocalWrapper(VD, cast<llvm::GlobalVariable>(Val));
+
+ Val = CGF.Builder.CreateCall(Wrapper);
+
+ LValue LV;
+ if (VD->getType()->isReferenceType())
+ LV = CGF.MakeNaturalAlignAddrLValue(Val, T);
+ else
+ LV = CGF.MakeAddrLValue(Val, DRE->getType(),
+ CGF.getContext().getDeclAlign(VD));
+ // FIXME: need setObjCGCLValueClass?
+ return LV;
+}
diff --git a/lib/CodeGen/MicrosoftCXXABI.cpp b/lib/CodeGen/MicrosoftCXXABI.cpp
index fb6b86d878..f5242eaaa2 100644
--- a/lib/CodeGen/MicrosoftCXXABI.cpp
+++ b/lib/CodeGen/MicrosoftCXXABI.cpp
@@ -28,6 +28,17 @@ class MicrosoftCXXABI : public CGCXXABI {
public:
MicrosoftCXXABI(CodeGenModule &CGM) : CGCXXABI(CGM) {}
+ bool isReturnTypeIndirect(const CXXRecordDecl *RD) const {
+ // Structures that are not C++03 PODs are always indirect.
+ return !RD->isPOD();
+ }
+
+ RecordArgABI getRecordArgABI(const CXXRecordDecl *RD) const {
+ if (RD->hasNonTrivialCopyConstructor())
+ return RAA_DirectInMemory;
+ return RAA_Default;
+ }
+
StringRef GetPureVirtualCallName() { return "_purecall"; }
// No known support for deleted functions in MSVC yet, so this choice is
// arbitrary.
@@ -55,7 +66,7 @@ public:
void EmitInstanceFunctionProlog(CodeGenFunction &CGF);
- void EmitConstructorCall(CodeGenFunction &CGF,
+ llvm::Value *EmitConstructorCall(CodeGenFunction &CGF,
const CXXConstructorDecl *D,
CXXCtorType Type, bool ForVirtualBase,
bool Delegating,
@@ -109,6 +120,64 @@ public:
llvm::Value *allocPtr,
CharUnits cookieSize);
static bool needThisReturn(GlobalDecl GD);
+
+private:
+ llvm::Constant *getZeroInt() {
+ return llvm::ConstantInt::get(CGM.IntTy, 0);
+ }
+
+ llvm::Constant *getAllOnesInt() {
+ return llvm::Constant::getAllOnesValue(CGM.IntTy);
+ }
+
+ void
+ GetNullMemberPointerFields(const MemberPointerType *MPT,
+ llvm::SmallVectorImpl<llvm::Constant *> &fields);
+
+ llvm::Value *AdjustVirtualBase(CodeGenFunction &CGF, const CXXRecordDecl *RD,
+ llvm::Value *Base,
+ llvm::Value *VirtualBaseAdjustmentOffset,
+ llvm::Value *VBPtrOffset /* optional */);
+
+ /// \brief Emits a full member pointer with the fields common to data and
+ /// function member pointers.
+ llvm::Constant *EmitFullMemberPointer(llvm::Constant *FirstField,
+ bool IsMemberFunction,
+ const CXXRecordDecl *RD);
+
+public:
+ virtual llvm::Type *ConvertMemberPointerType(const MemberPointerType *MPT);
+
+ virtual bool isZeroInitializable(const MemberPointerType *MPT);
+
+ virtual llvm::Constant *EmitNullMemberPointer(const MemberPointerType *MPT);
+
+ virtual llvm::Constant *EmitMemberDataPointer(const MemberPointerType *MPT,
+ CharUnits offset);
+ virtual llvm::Constant *EmitMemberPointer(const CXXMethodDecl *MD);
+ virtual llvm::Constant *EmitMemberPointer(const APValue &MP, QualType MPT);
+
+ virtual llvm::Value *EmitMemberPointerComparison(CodeGenFunction &CGF,
+ llvm::Value *L,
+ llvm::Value *R,
+ const MemberPointerType *MPT,
+ bool Inequality);
+
+ virtual llvm::Value *EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
+ llvm::Value *MemPtr,
+ const MemberPointerType *MPT);
+
+ virtual llvm::Value *EmitMemberDataPointerAddress(CodeGenFunction &CGF,
+ llvm::Value *Base,
+ llvm::Value *MemPtr,
+ const MemberPointerType *MPT);
+
+ virtual llvm::Value *
+ EmitLoadOfMemberFunctionPointer(CodeGenFunction &CGF,
+ llvm::Value *&This,
+ llvm::Value *MemPtr,
+ const MemberPointerType *MPT);
+
};
}
@@ -238,7 +307,7 @@ void MicrosoftCXXABI::EmitInstanceFunctionProlog(CodeGenFunction &CGF) {
}
}
-void MicrosoftCXXABI::EmitConstructorCall(CodeGenFunction &CGF,
+llvm::Value *MicrosoftCXXABI::EmitConstructorCall(CodeGenFunction &CGF,
const CXXConstructorDecl *D,
CXXCtorType Type, bool ForVirtualBase,
bool Delegating,
@@ -259,6 +328,7 @@ void MicrosoftCXXABI::EmitConstructorCall(CodeGenFunction &CGF,
CGF.EmitCXXMemberCall(D, SourceLocation(), Callee, ReturnValueSlot(), This,
ImplicitParam, ImplicitParamTy,
ArgBeg, ArgEnd);
+ return Callee;
}
RValue MicrosoftCXXABI::EmitVirtualDestructorCall(CodeGenFunction &CGF,
@@ -347,10 +417,441 @@ void MicrosoftCXXABI::EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D,
// Not sure whether we want thread-safe static local variables as VS
// doesn't make them thread-safe.
+ if (D.getTLSKind())
+ CGM.ErrorUnsupported(&D, "dynamic TLS initialization");
+
// Emit the initializer and add a global destructor if appropriate.
CGF.EmitCXXGlobalVarDeclInit(D, DeclPtr, PerformInit);
}
+// Member pointer helpers.
+static bool hasVBPtrOffsetField(MSInheritanceModel Inheritance) {
+ return Inheritance == MSIM_Unspecified;
+}
+
+static bool hasOnlyOneField(MSInheritanceModel Inheritance) {
+ return Inheritance <= MSIM_SinglePolymorphic;
+}
+
+// Only member pointers to functions need a this adjustment, since it can be
+// combined with the field offset for data pointers.
+static bool hasNonVirtualBaseAdjustmentField(bool IsMemberFunction,
+ MSInheritanceModel Inheritance) {
+ return (IsMemberFunction && Inheritance >= MSIM_Multiple);
+}
+
+static bool hasVirtualBaseAdjustmentField(MSInheritanceModel Inheritance) {
+ return Inheritance >= MSIM_Virtual;
+}
+
+// Use zero for the field offset of a null data member pointer if we can
+// guarantee that zero is not a valid field offset, or if the member pointer has
+// multiple fields. Polymorphic classes have a vfptr at offset zero, so we can
+// use zero for null. If there are multiple fields, we can use zero even if it
+// is a valid field offset because null-ness testing will check the other
+// fields.
+static bool nullFieldOffsetIsZero(MSInheritanceModel Inheritance) {
+ return Inheritance != MSIM_Multiple && Inheritance != MSIM_Single;
+}
+
+bool MicrosoftCXXABI::isZeroInitializable(const MemberPointerType *MPT) {
+ // Null-ness for function memptrs only depends on the first field, which is
+ // the function pointer. The rest don't matter, so we can zero initialize.
+ if (MPT->isMemberFunctionPointer())
+ return true;
+
+ // The virtual base adjustment field is always -1 for null, so if we have one
+ // we can't zero initialize. The field offset is sometimes also -1 if 0 is a
+ // valid field offset.
+ const CXXRecordDecl *RD = MPT->getClass()->getAsCXXRecordDecl();
+ MSInheritanceModel Inheritance = RD->getMSInheritanceModel();
+ return (!hasVirtualBaseAdjustmentField(Inheritance) &&
+ nullFieldOffsetIsZero(Inheritance));
+}
+
+llvm::Type *
+MicrosoftCXXABI::ConvertMemberPointerType(const MemberPointerType *MPT) {
+ const CXXRecordDecl *RD = MPT->getClass()->getAsCXXRecordDecl();
+ MSInheritanceModel Inheritance = RD->getMSInheritanceModel();
+ llvm::SmallVector<llvm::Type *, 4> fields;
+ if (MPT->isMemberFunctionPointer())
+ fields.push_back(CGM.VoidPtrTy); // FunctionPointerOrVirtualThunk
+ else
+ fields.push_back(CGM.IntTy); // FieldOffset
+
+ if (hasNonVirtualBaseAdjustmentField(MPT->isMemberFunctionPointer(),
+ Inheritance))
+ fields.push_back(CGM.IntTy);
+ if (hasVBPtrOffsetField(Inheritance))
+ fields.push_back(CGM.IntTy);
+ if (hasVirtualBaseAdjustmentField(Inheritance))
+ fields.push_back(CGM.IntTy); // VirtualBaseAdjustmentOffset
+
+ if (fields.size() == 1)
+ return fields[0];
+ return llvm::StructType::get(CGM.getLLVMContext(), fields);
+}
+
+void MicrosoftCXXABI::
+GetNullMemberPointerFields(const MemberPointerType *MPT,
+ llvm::SmallVectorImpl<llvm::Constant *> &fields) {
+ assert(fields.empty());
+ const CXXRecordDecl *RD = MPT->getClass()->getAsCXXRecordDecl();
+ MSInheritanceModel Inheritance = RD->getMSInheritanceModel();
+ if (MPT->isMemberFunctionPointer()) {
+ // FunctionPointerOrVirtualThunk
+ fields.push_back(llvm::Constant::getNullValue(CGM.VoidPtrTy));
+ } else {
+ if (nullFieldOffsetIsZero(Inheritance))
+ fields.push_back(getZeroInt()); // FieldOffset
+ else
+ fields.push_back(getAllOnesInt()); // FieldOffset
+ }
+
+ if (hasNonVirtualBaseAdjustmentField(MPT->isMemberFunctionPointer(),
+ Inheritance))
+ fields.push_back(getZeroInt());
+ if (hasVBPtrOffsetField(Inheritance))
+ fields.push_back(getZeroInt());
+ if (hasVirtualBaseAdjustmentField(Inheritance))
+ fields.push_back(getAllOnesInt());
+}
+
+llvm::Constant *
+MicrosoftCXXABI::EmitNullMemberPointer(const MemberPointerType *MPT) {
+ llvm::SmallVector<llvm::Constant *, 4> fields;
+ GetNullMemberPointerFields(MPT, fields);
+ if (fields.size() == 1)
+ return fields[0];
+ llvm::Constant *Res = llvm::ConstantStruct::getAnon(fields);
+ assert(Res->getType() == ConvertMemberPointerType(MPT));
+ return Res;
+}
+
+llvm::Constant *
+MicrosoftCXXABI::EmitFullMemberPointer(llvm::Constant *FirstField,
+ bool IsMemberFunction,
+ const CXXRecordDecl *RD)
+{
+ MSInheritanceModel Inheritance = RD->getMSInheritanceModel();
+
+ // Single inheritance class member pointer are represented as scalars instead
+ // of aggregates.
+ if (hasOnlyOneField(Inheritance))
+ return FirstField;
+
+ llvm::SmallVector<llvm::Constant *, 4> fields;
+ fields.push_back(FirstField);
+
+ if (hasNonVirtualBaseAdjustmentField(IsMemberFunction, Inheritance))
+ fields.push_back(getZeroInt());
+
+ if (hasVBPtrOffsetField(Inheritance)) {
+ int64_t VBPtrOffset =
+ getContext().getASTRecordLayout(RD).getVBPtrOffset().getQuantity();
+ if (VBPtrOffset == -1)
+ VBPtrOffset = 0;
+ fields.push_back(llvm::ConstantInt::get(CGM.IntTy, VBPtrOffset));
+ }
+
+ // The rest of the fields are adjusted by conversions to a more derived class.
+ if (hasVirtualBaseAdjustmentField(Inheritance))
+ fields.push_back(getZeroInt());
+
+ return llvm::ConstantStruct::getAnon(fields);
+}
+
+llvm::Constant *
+MicrosoftCXXABI::EmitMemberDataPointer(const MemberPointerType *MPT,
+ CharUnits offset) {
+ const CXXRecordDecl *RD = MPT->getClass()->getAsCXXRecordDecl();
+ llvm::Constant *FirstField =
+ llvm::ConstantInt::get(CGM.IntTy, offset.getQuantity());
+ return EmitFullMemberPointer(FirstField, /*IsMemberFunction=*/false, RD);
+}
+
+llvm::Constant *
+MicrosoftCXXABI::EmitMemberPointer(const CXXMethodDecl *MD) {
+ assert(MD->isInstance() && "Member function must not be static!");
+ MD = MD->getCanonicalDecl();
+ const CXXRecordDecl *RD = MD->getParent();
+ CodeGenTypes &Types = CGM.getTypes();
+
+ llvm::Constant *FirstField;
+ if (MD->isVirtual()) {
+ // FIXME: We have to instantiate a thunk that loads the vftable and jumps to
+ // the right offset.
+ FirstField = llvm::Constant::getNullValue(CGM.VoidPtrTy);
+ } else {
+ const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
+ llvm::Type *Ty;
+ // Check whether the function has a computable LLVM signature.
+ if (Types.isFuncTypeConvertible(FPT)) {
+ // The function has a computable LLVM signature; use the correct type.
+ Ty = Types.GetFunctionType(Types.arrangeCXXMethodDeclaration(MD));
+ } else {
+ // Use an arbitrary non-function type to tell GetAddrOfFunction that the
+ // function type is incomplete.
+ Ty = CGM.PtrDiffTy;
+ }
+ FirstField = CGM.GetAddrOfFunction(MD, Ty);
+ FirstField = llvm::ConstantExpr::getBitCast(FirstField, CGM.VoidPtrTy);
+ }
+
+ // The rest of the fields are common with data member pointers.
+ return EmitFullMemberPointer(FirstField, /*IsMemberFunction=*/true, RD);
+}
+
+llvm::Constant *
+MicrosoftCXXABI::EmitMemberPointer(const APValue &MP, QualType MPT) {
+ // FIXME PR15875: Implement member pointer conversions for Constants.
+ const CXXRecordDecl *RD = MPT->castAs<MemberPointerType>()->getClass()->getAsCXXRecordDecl();
+ return EmitFullMemberPointer(llvm::Constant::getNullValue(CGM.VoidPtrTy),
+ /*IsMemberFunction=*/true, RD);
+}
+
+/// Member pointers are the same if they're either bitwise identical *or* both
+/// null. Null-ness for function members is determined by the first field,
+/// while for data member pointers we must compare all fields.
+llvm::Value *
+MicrosoftCXXABI::EmitMemberPointerComparison(CodeGenFunction &CGF,
+ llvm::Value *L,
+ llvm::Value *R,
+ const MemberPointerType *MPT,
+ bool Inequality) {
+ CGBuilderTy &Builder = CGF.Builder;
+
+ // Handle != comparisons by switching the sense of all boolean operations.
+ llvm::ICmpInst::Predicate Eq;
+ llvm::Instruction::BinaryOps And, Or;
+ if (Inequality) {
+ Eq = llvm::ICmpInst::ICMP_NE;
+ And = llvm::Instruction::Or;
+ Or = llvm::Instruction::And;
+ } else {
+ Eq = llvm::ICmpInst::ICMP_EQ;
+ And = llvm::Instruction::And;
+ Or = llvm::Instruction::Or;
+ }
+
+ // If this is a single field member pointer (single inheritance), this is a
+ // single icmp.
+ const CXXRecordDecl *RD = MPT->getClass()->getAsCXXRecordDecl();
+ MSInheritanceModel Inheritance = RD->getMSInheritanceModel();
+ if (hasOnlyOneField(Inheritance))
+ return Builder.CreateICmp(Eq, L, R);
+
+ // Compare the first field.
+ llvm::Value *L0 = Builder.CreateExtractValue(L, 0, "lhs.0");
+ llvm::Value *R0 = Builder.CreateExtractValue(R, 0, "rhs.0");
+ llvm::Value *Cmp0 = Builder.CreateICmp(Eq, L0, R0, "memptr.cmp.first");
+
+ // Compare everything other than the first field.
+ llvm::Value *Res = 0;
+ llvm::StructType *LType = cast<llvm::StructType>(L->getType());
+ for (unsigned I = 1, E = LType->getNumElements(); I != E; ++I) {
+ llvm::Value *LF = Builder.CreateExtractValue(L, I);
+ llvm::Value *RF = Builder.CreateExtractValue(R, I);
+ llvm::Value *Cmp = Builder.CreateICmp(Eq, LF, RF, "memptr.cmp.rest");
+ if (Res)
+ Res = Builder.CreateBinOp(And, Res, Cmp);
+ else
+ Res = Cmp;
+ }
+
+ // Check if the first field is 0 if this is a function pointer.
+ if (MPT->isMemberFunctionPointer()) {
+ // (l1 == r1 && ...) || l0 == 0
+ llvm::Value *Zero = llvm::Constant::getNullValue(L0->getType());
+ llvm::Value *IsZero = Builder.CreateICmp(Eq, L0, Zero, "memptr.cmp.iszero");
+ Res = Builder.CreateBinOp(Or, Res, IsZero);
+ }
+
+ // Combine the comparison of the first field, which must always be true for
+ // this comparison to succeeed.
+ return Builder.CreateBinOp(And, Res, Cmp0, "memptr.cmp");
+}
+
+llvm::Value *
+MicrosoftCXXABI::EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
+ llvm::Value *MemPtr,
+ const MemberPointerType *MPT) {
+ CGBuilderTy &Builder = CGF.Builder;
+ llvm::SmallVector<llvm::Constant *, 4> fields;
+ // We only need one field for member functions.
+ if (MPT->isMemberFunctionPointer())
+ fields.push_back(llvm::Constant::getNullValue(CGM.VoidPtrTy));
+ else
+ GetNullMemberPointerFields(MPT, fields);
+ assert(!fields.empty());
+ llvm::Value *FirstField = MemPtr;
+ if (MemPtr->getType()->isStructTy())
+ FirstField = Builder.CreateExtractValue(MemPtr, 0);
+ llvm::Value *Res = Builder.CreateICmpNE(FirstField, fields[0], "memptr.cmp0");
+
+ // For function member pointers, we only need to test the function pointer
+ // field. The other fields if any can be garbage.
+ if (MPT->isMemberFunctionPointer())
+ return Res;
+
+ // Otherwise, emit a series of compares and combine the results.
+ for (int I = 1, E = fields.size(); I < E; ++I) {
+ llvm::Value *Field = Builder.CreateExtractValue(MemPtr, I);
+ llvm::Value *Next = Builder.CreateICmpNE(Field, fields[I], "memptr.cmp");
+ Res = Builder.CreateAnd(Res, Next, "memptr.tobool");
+ }
+ return Res;
+}
+
+// Returns an adjusted base cast to i8*, since we do more address arithmetic on
+// it.
+llvm::Value *
+MicrosoftCXXABI::AdjustVirtualBase(CodeGenFunction &CGF,
+ const CXXRecordDecl *RD, llvm::Value *Base,
+ llvm::Value *VirtualBaseAdjustmentOffset,
+ llvm::Value *VBPtrOffset) {
+ CGBuilderTy &Builder = CGF.Builder;
+ Base = Builder.CreateBitCast(Base, CGM.Int8PtrTy);
+ llvm::BasicBlock *OriginalBB = 0;
+ llvm::BasicBlock *SkipAdjustBB = 0;
+ llvm::BasicBlock *VBaseAdjustBB = 0;
+
+ // In the unspecified inheritance model, there might not be a vbtable at all,
+ // in which case we need to skip the virtual base lookup. If there is a
+ // vbtable, the first entry is a no-op entry that gives back the original
+ // base, so look for a virtual base adjustment offset of zero.
+ if (VBPtrOffset) {
+ OriginalBB = Builder.GetInsertBlock();
+ VBaseAdjustBB = CGF.createBasicBlock("memptr.vadjust");
+ SkipAdjustBB = CGF.createBasicBlock("memptr.skip_vadjust");
+ llvm::Value *IsVirtual =
+ Builder.CreateICmpNE(VirtualBaseAdjustmentOffset, getZeroInt(),
+ "memptr.is_vbase");
+ Builder.CreateCondBr(IsVirtual, VBaseAdjustBB, SkipAdjustBB);
+ CGF.EmitBlock(VBaseAdjustBB);
+ }
+
+ // If we weren't given a dynamic vbptr offset, RD should be complete and we'll
+ // know the vbptr offset.
+ if (!VBPtrOffset) {
+ CharUnits offs = getContext().getASTRecordLayout(RD).getVBPtrOffset();
+ VBPtrOffset = llvm::ConstantInt::get(CGM.IntTy, offs.getQuantity());
+ }
+ // Load the vbtable pointer from the vbtable offset in the instance.
+ llvm::Value *VBPtr =
+ Builder.CreateInBoundsGEP(Base, VBPtrOffset, "memptr.vbptr");
+ llvm::Value *VBTable =
+ Builder.CreateBitCast(VBPtr, CGM.Int8PtrTy->getPointerTo(0));
+ VBTable = Builder.CreateLoad(VBTable, "memptr.vbtable");
+ // Load an i32 offset from the vb-table.
+ llvm::Value *VBaseOffs =
+ Builder.CreateInBoundsGEP(VBTable, VirtualBaseAdjustmentOffset);
+ VBaseOffs = Builder.CreateBitCast(VBaseOffs, CGM.Int32Ty->getPointerTo(0));
+ VBaseOffs = Builder.CreateLoad(VBaseOffs, "memptr.vbase_offs");
+ // Add it to VBPtr. GEP will sign extend the i32 value for us.
+ llvm::Value *AdjustedBase = Builder.CreateInBoundsGEP(VBPtr, VBaseOffs);
+
+ // Merge control flow with the case where we didn't have to adjust.
+ if (VBaseAdjustBB) {
+ Builder.CreateBr(SkipAdjustBB);
+ CGF.EmitBlock(SkipAdjustBB);
+ llvm::PHINode *Phi = Builder.CreatePHI(CGM.Int8PtrTy, 2, "memptr.base");
+ Phi->addIncoming(Base, OriginalBB);
+ Phi->addIncoming(AdjustedBase, VBaseAdjustBB);
+ return Phi;
+ }
+ return AdjustedBase;
+}
+
+llvm::Value *
+MicrosoftCXXABI::EmitMemberDataPointerAddress(CodeGenFunction &CGF,
+ llvm::Value *Base,
+ llvm::Value *MemPtr,
+ const MemberPointerType *MPT) {
+ assert(MPT->isMemberDataPointer());
+ unsigned AS = Base->getType()->getPointerAddressSpace();
+ llvm::Type *PType =
+ CGF.ConvertTypeForMem(MPT->getPointeeType())->getPointerTo(AS);
+ CGBuilderTy &Builder = CGF.Builder;
+ const CXXRecordDecl *RD = MPT->getClass()->getAsCXXRecordDecl();
+ MSInheritanceModel Inheritance = RD->getMSInheritanceModel();
+
+ // Extract the fields we need, regardless of model. We'll apply them if we
+ // have them.
+ llvm::Value *FieldOffset = MemPtr;
+ llvm::Value *VirtualBaseAdjustmentOffset = 0;
+ llvm::Value *VBPtrOffset = 0;
+ if (MemPtr->getType()->isStructTy()) {
+ // We need to extract values.
+ unsigned I = 0;
+ FieldOffset = Builder.CreateExtractValue(MemPtr, I++);
+ if (hasVBPtrOffsetField(Inheritance))
+ VBPtrOffset = Builder.CreateExtractValue(MemPtr, I++);
+ if (hasVirtualBaseAdjustmentField(Inheritance))
+ VirtualBaseAdjustmentOffset = Builder.CreateExtractValue(MemPtr, I++);
+ }
+
+ if (VirtualBaseAdjustmentOffset) {
+ Base = AdjustVirtualBase(CGF, RD, Base, VirtualBaseAdjustmentOffset,
+ VBPtrOffset);
+ }
+ llvm::Value *Addr =
+ Builder.CreateInBoundsGEP(Base, FieldOffset, "memptr.offset");
+
+ // Cast the address to the appropriate pointer type, adopting the address
+ // space of the base pointer.
+ return Builder.CreateBitCast(Addr, PType);
+}
+
+llvm::Value *
+MicrosoftCXXABI::EmitLoadOfMemberFunctionPointer(CodeGenFunction &CGF,
+ llvm::Value *&This,
+ llvm::Value *MemPtr,
+ const MemberPointerType *MPT) {
+ assert(MPT->isMemberFunctionPointer());
+ const FunctionProtoType *FPT =
+ MPT->getPointeeType()->castAs<FunctionProtoType>();
+ const CXXRecordDecl *RD = MPT->getClass()->getAsCXXRecordDecl();
+ llvm::FunctionType *FTy =
+ CGM.getTypes().GetFunctionType(
+ CGM.getTypes().arrangeCXXMethodType(RD, FPT));
+ CGBuilderTy &Builder = CGF.Builder;
+
+ MSInheritanceModel Inheritance = RD->getMSInheritanceModel();
+
+ // Extract the fields we need, regardless of model. We'll apply them if we
+ // have them.
+ llvm::Value *FunctionPointer = MemPtr;
+ llvm::Value *NonVirtualBaseAdjustment = NULL;
+ llvm::Value *VirtualBaseAdjustmentOffset = NULL;
+ llvm::Value *VBPtrOffset = NULL;
+ if (MemPtr->getType()->isStructTy()) {
+ // We need to extract values.
+ unsigned I = 0;
+ FunctionPointer = Builder.CreateExtractValue(MemPtr, I++);
+ if (hasNonVirtualBaseAdjustmentField(MPT, Inheritance))
+ NonVirtualBaseAdjustment = Builder.CreateExtractValue(MemPtr, I++);
+ if (hasVBPtrOffsetField(Inheritance))
+ VBPtrOffset = Builder.CreateExtractValue(MemPtr, I++);
+ if (hasVirtualBaseAdjustmentField(Inheritance))
+ VirtualBaseAdjustmentOffset = Builder.CreateExtractValue(MemPtr, I++);
+ }
+
+ if (VirtualBaseAdjustmentOffset) {
+ This = AdjustVirtualBase(CGF, RD, This, VirtualBaseAdjustmentOffset,
+ VBPtrOffset);
+ }
+
+ if (NonVirtualBaseAdjustment) {
+ // Apply the adjustment and cast back to the original struct type.
+ llvm::Value *Ptr = Builder.CreateBitCast(This, Builder.getInt8PtrTy());
+ Ptr = Builder.CreateInBoundsGEP(Ptr, NonVirtualBaseAdjustment);
+ This = Builder.CreateBitCast(Ptr, This->getType(), "this.adjusted");
+ }
+
+ return Builder.CreateBitCast(FunctionPointer, FTy->getPointerTo());
+}
+
CGCXXABI *clang::CodeGen::CreateMicrosoftCXXABI(CodeGenModule &CGM) {
return new MicrosoftCXXABI(CGM);
}
diff --git a/lib/CodeGen/ModuleBuilder.cpp b/lib/CodeGen/ModuleBuilder.cpp
index d6e5f0673f..69e5b32304 100644
--- a/lib/CodeGen/ModuleBuilder.cpp
+++ b/lib/CodeGen/ModuleBuilder.cpp
@@ -31,15 +31,13 @@ namespace {
OwningPtr<const llvm::DataLayout> TD;
ASTContext *Ctx;
const CodeGenOptions CodeGenOpts; // Intentionally copied in.
- const TargetOptions TargetOpts; // Intentionally copied in.
protected:
OwningPtr<llvm::Module> M;
OwningPtr<CodeGen::CodeGenModule> Builder;
public:
CodeGeneratorImpl(DiagnosticsEngine &diags, const std::string& ModuleName,
- const CodeGenOptions &CGO, const TargetOptions &TO,
- llvm::LLVMContext& C)
- : Diags(diags), CodeGenOpts(CGO), TargetOpts(TO),
+ const CodeGenOptions &CGO, llvm::LLVMContext& C)
+ : Diags(diags), CodeGenOpts(CGO),
M(new llvm::Module(ModuleName, C)) {}
virtual ~CodeGeneratorImpl() {}
@@ -58,8 +56,8 @@ namespace {
M->setTargetTriple(Ctx->getTargetInfo().getTriple().getTriple());
M->setDataLayout(Ctx->getTargetInfo().getTargetDescription());
TD.reset(new llvm::DataLayout(Ctx->getTargetInfo().getTargetDescription()));
- Builder.reset(new CodeGen::CodeGenModule(Context, CodeGenOpts, TargetOpts,
- *M, *TD, Diags));
+ Builder.reset(new CodeGen::CodeGenModule(Context, CodeGenOpts, *M, *TD,
+ Diags));
}
virtual void HandleCXXStaticMemberVarInstantiation(VarDecl *VD) {
@@ -125,7 +123,7 @@ void CodeGenerator::anchor() { }
CodeGenerator *clang::CreateLLVMCodeGen(DiagnosticsEngine &Diags,
const std::string& ModuleName,
const CodeGenOptions &CGO,
- const TargetOptions &TO,
+ const TargetOptions &/*TO*/,
llvm::LLVMContext& C) {
- return new CodeGeneratorImpl(Diags, ModuleName, CGO, TO, C);
+ return new CodeGeneratorImpl(Diags, ModuleName, CGO, C);
}
diff --git a/lib/CodeGen/TargetInfo.cpp b/lib/CodeGen/TargetInfo.cpp
index 612e72bdd7..f9689affa0 100644
--- a/lib/CodeGen/TargetInfo.cpp
+++ b/lib/CodeGen/TargetInfo.cpp
@@ -14,6 +14,7 @@
#include "TargetInfo.h"
#include "ABIInfo.h"
+#include "CGCXXABI.h"
#include "CodeGenFunction.h"
#include "clang/AST/RecordLayout.h"
#include "clang/Frontend/CodeGenOptions.h"
@@ -43,6 +44,37 @@ static bool isAggregateTypeForABI(QualType T) {
ABIInfo::~ABIInfo() {}
+static bool isRecordReturnIndirect(const RecordType *RT, CodeGen::CodeGenTypes &CGT) {
+ const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl());
+ if (!RD)
+ return false;
+ return CGT.CGM.getCXXABI().isReturnTypeIndirect(RD);
+}
+
+
+static bool isRecordReturnIndirect(QualType T, CodeGen::CodeGenTypes &CGT) {
+ const RecordType *RT = T->getAs<RecordType>();
+ if (!RT)
+ return false;
+ return isRecordReturnIndirect(RT, CGT);
+}
+
+static CGCXXABI::RecordArgABI getRecordArgABI(const RecordType *RT,
+ CodeGen::CodeGenTypes &CGT) {
+ const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl());
+ if (!RD)
+ return CGCXXABI::RAA_Default;
+ return CGT.CGM.getCXXABI().getRecordArgABI(RD);
+}
+
+static CGCXXABI::RecordArgABI getRecordArgABI(QualType T,
+ CodeGen::CodeGenTypes &CGT) {
+ const RecordType *RT = T->getAs<RecordType>();
+ if (!RT)
+ return CGCXXABI::RAA_Default;
+ return getRecordArgABI(RT, CGT);
+}
+
ASTContext &ABIInfo::getContext() const {
return CGT.getContext();
}
@@ -55,6 +87,9 @@ const llvm::DataLayout &ABIInfo::getDataLayout() const {
return CGT.getDataLayout();
}
+const TargetInfo &ABIInfo::getTarget() const {
+ return CGT.getTarget();
+}
void ABIArgInfo::dump() const {
raw_ostream &OS = llvm::errs();
@@ -167,27 +202,6 @@ static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays) {
return true;
}
-/// hasNonTrivialDestructorOrCopyConstructor - Determine if a type has either
-/// a non-trivial destructor or a non-trivial copy constructor.
-static bool hasNonTrivialDestructorOrCopyConstructor(const RecordType *RT) {
- const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl());
- if (!RD)
- return false;
-
- return !RD->hasTrivialDestructor() || RD->hasNonTrivialCopyConstructor();
-}
-
-/// isRecordWithNonTrivialDestructorOrCopyConstructor - Determine if a type is
-/// a record type with either a non-trivial destructor or a non-trivial copy
-/// constructor.
-static bool isRecordWithNonTrivialDestructorOrCopyConstructor(QualType T) {
- const RecordType *RT = T->getAs<RecordType>();
- if (!RT)
- return false;
-
- return hasNonTrivialDestructorOrCopyConstructor(RT);
-}
-
/// isSingleElementStruct - Determine if a structure is a "single
/// element struct", i.e. it has exactly one non-empty field or
/// exactly one field which is itself a single element
@@ -367,7 +381,7 @@ ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty) const {
if (isAggregateTypeForABI(Ty)) {
// Records with non trivial destructors/constructors should not be passed
// by value.
- if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty))
+ if (isRecordReturnIndirect(Ty, CGT))
return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
return ABIArgInfo::getIndirect(0);
@@ -398,6 +412,9 @@ ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy) const {
//===----------------------------------------------------------------------===//
// le32/PNaCl bitcode ABI Implementation
+//
+// This is a simplified version of the x86_32 ABI. Arguments and return values
+// are always passed on the stack.
//===----------------------------------------------------------------------===//
class PNaClABIInfo : public ABIInfo {
@@ -405,7 +422,7 @@ class PNaClABIInfo : public ABIInfo {
PNaClABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {}
ABIArgInfo classifyReturnType(QualType RetTy) const;
- ABIArgInfo classifyArgumentType(QualType RetTy, unsigned &FreeRegs) const;
+ ABIArgInfo classifyArgumentType(QualType RetTy) const;
virtual void computeInfo(CGFunctionInfo &FI) const;
virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
@@ -425,11 +442,9 @@ class PNaClTargetCodeGenInfo : public TargetCodeGenInfo {
void PNaClABIInfo::computeInfo(CGFunctionInfo &FI) const {
FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
- unsigned FreeRegs = FI.getHasRegParm() ? FI.getRegParm() : 0;
-
for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
it != ie; ++it)
- it->info = classifyArgumentType(it->type, FreeRegs);
+ it->info = classifyArgumentType(it->type);
}
llvm::Value *PNaClABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
@@ -437,42 +452,29 @@ llvm::Value *PNaClABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
return 0;
}
-ABIArgInfo PNaClABIInfo::classifyArgumentType(QualType Ty,
- unsigned &FreeRegs) const {
+/// \brief Classify argument of given type \p Ty.
+ABIArgInfo PNaClABIInfo::classifyArgumentType(QualType Ty) const {
if (isAggregateTypeForABI(Ty)) {
- // Records with non trivial destructors/constructors should not be passed
- // by value.
- FreeRegs = 0;
- if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty))
- return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
-
+ if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, CGT))
+ return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
return ABIArgInfo::getIndirect(0);
- }
-
- // Treat an enum type as its underlying type.
- if (const EnumType *EnumTy = Ty->getAs<EnumType>())
+ } else if (const EnumType *EnumTy = Ty->getAs<EnumType>()) {
+ // Treat an enum type as its underlying type.
Ty = EnumTy->getDecl()->getIntegerType();
+ } else if (Ty->isFloatingType()) {
+ // Floating-point types don't go inreg.
+ return ABIArgInfo::getDirect();
+ }
- ABIArgInfo BaseInfo = (Ty->isPromotableIntegerType() ?
+ return (Ty->isPromotableIntegerType() ?
ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
-
- // Regparm regs hold 32 bits.
- unsigned SizeInRegs = (getContext().getTypeSize(Ty) + 31) / 32;
- if (SizeInRegs == 0) return BaseInfo;
- if (SizeInRegs > FreeRegs) {
- FreeRegs = 0;
- return BaseInfo;
- }
- FreeRegs -= SizeInRegs;
- return BaseInfo.isDirect() ?
- ABIArgInfo::getDirectInReg(BaseInfo.getCoerceToType()) :
- ABIArgInfo::getExtendInReg(BaseInfo.getCoerceToType());
}
ABIArgInfo PNaClABIInfo::classifyReturnType(QualType RetTy) const {
if (RetTy->isVoidType())
return ABIArgInfo::getIgnore();
+ // In the PNaCl ABI we always return records/structures on the stack.
if (isAggregateTypeForABI(RetTy))
return ABIArgInfo::getIndirect(0);
@@ -484,11 +486,9 @@ ABIArgInfo PNaClABIInfo::classifyReturnType(QualType RetTy) const {
ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
}
-/// UseX86_MMXType - Return true if this is an MMX type that should use the
-/// special x86_mmx type.
-bool UseX86_MMXType(llvm::Type *IRType) {
- // If the type is an MMX type <2 x i32>, <4 x i16>, or <8 x i8>, use the
- // special x86_mmx type.
+/// IsX86_MMXType - Return true if this is an MMX type.
+bool IsX86_MMXType(llvm::Type *IRType) {
+ // Return true if the type is an MMX type <2 x i32>, <4 x i16>, or <8 x i8>.
return IRType->isVectorTy() && IRType->getPrimitiveSizeInBits() == 64 &&
cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy() &&
IRType->getScalarSizeInBits() != 64;
@@ -517,8 +517,7 @@ class X86_32ABIInfo : public ABIInfo {
bool IsDarwinVectorABI;
bool IsSmallStructInRegABI;
- bool IsMMXDisabled;
- bool IsWin32FloatStructABI;
+ bool IsWin32StructABI;
unsigned DefaultNumRegisterParameters;
static bool isRegisterSize(unsigned Size) {
@@ -550,26 +549,24 @@ public:
virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
CodeGenFunction &CGF) const;
- X86_32ABIInfo(CodeGen::CodeGenTypes &CGT, bool d, bool p, bool m, bool w,
+ X86_32ABIInfo(CodeGen::CodeGenTypes &CGT, bool d, bool p, bool w,
unsigned r)
: ABIInfo(CGT), IsDarwinVectorABI(d), IsSmallStructInRegABI(p),
- IsMMXDisabled(m), IsWin32FloatStructABI(w),
- DefaultNumRegisterParameters(r) {}
+ IsWin32StructABI(w), DefaultNumRegisterParameters(r) {}
};
class X86_32TargetCodeGenInfo : public TargetCodeGenInfo {
public:
X86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
- bool d, bool p, bool m, bool w, unsigned r)
- :TargetCodeGenInfo(new X86_32ABIInfo(CGT, d, p, m, w, r)) {}
+ bool d, bool p, bool w, unsigned r)
+ :TargetCodeGenInfo(new X86_32ABIInfo(CGT, d, p, w, r)) {}
void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
CodeGen::CodeGenModule &CGM) const;
int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const {
// Darwin uses different dwarf register numbers for EH.
- if (CGM.isTargetDarwin()) return 5;
-
+ if (CGM.getTarget().getTriple().isOSDarwin()) return 5;
return 4;
}
@@ -681,9 +678,7 @@ ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy,
if (isAggregateTypeForABI(RetTy)) {
if (const RecordType *RT = RetTy->getAs<RecordType>()) {
- // Structures with either a non-trivial destructor or a non-trivial
- // copy constructor are always indirect.
- if (hasNonTrivialDestructorOrCopyConstructor(RT))
+ if (isRecordReturnIndirect(RT, CGT))
return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
// Structures with flexible arrays are always indirect.
@@ -707,7 +702,7 @@ ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy,
// We apply a similar transformation for pointer types to improve the
// quality of the generated IR.
if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext()))
- if ((!IsWin32FloatStructABI && SeltTy->isRealFloatingType())
+ if ((!IsWin32StructABI && SeltTy->isRealFloatingType())
|| SeltTy->hasPointerRepresentation())
return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
@@ -864,13 +859,14 @@ ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty,
bool IsFastCall) const {
// FIXME: Set alignment on indirect arguments.
if (isAggregateTypeForABI(Ty)) {
- // Structures with flexible arrays are always indirect.
if (const RecordType *RT = Ty->getAs<RecordType>()) {
- // Structures with either a non-trivial destructor or a non-trivial
- // copy constructor are always indirect.
- if (hasNonTrivialDestructorOrCopyConstructor(RT))
- return getIndirectResult(Ty, false, FreeRegs);
+ if (IsWin32StructABI)
+ return getIndirectResult(Ty, true, FreeRegs);
+
+ if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, CGT))
+ return getIndirectResult(Ty, RAA == CGCXXABI::RAA_DirectInMemory, FreeRegs);
+ // Structures with flexible arrays are always indirect.
if (RT->getDecl()->hasFlexibleArrayMember())
return getIndirectResult(Ty, true, FreeRegs);
}
@@ -914,15 +910,8 @@ ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty,
Size));
}
- llvm::Type *IRType = CGT.ConvertType(Ty);
- if (UseX86_MMXType(IRType)) {
- if (IsMMXDisabled)
- return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
- 64));
- ABIArgInfo AAI = ABIArgInfo::getDirect(IRType);
- AAI.setCoerceToType(llvm::Type::getX86_MMXTy(getVMContext()));
- return AAI;
- }
+ if (IsX86_MMXType(CGT.ConvertType(Ty)))
+ return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 64));
return ABIArgInfo::getDirect();
}
@@ -1044,7 +1033,7 @@ bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable(
// 8 is %eip.
AssignToArrayRange(Builder, Address, Four8, 0, 8);
- if (CGF.CGM.isTargetDarwin()) {
+ if (CGF.CGM.getTarget().getTriple().isOSDarwin()) {
// 12-16 are st(0..4). Not sure why we stop at 4.
// These have size 16, which is sizeof(long double) on
// platforms with 8-byte alignment for that type.
@@ -1169,7 +1158,7 @@ class X86_64ABIInfo : public ABIInfo {
/// required strict binary compatibility with older versions of GCC
/// may need to exempt themselves.
bool honorsRevision0_98() const {
- return !getContext().getTargetInfo().getTriple().isOSDarwin();
+ return !getTarget().getTriple().isOSDarwin();
}
bool HasAVX;
@@ -1204,7 +1193,7 @@ public:
/// WinX86_64ABIInfo - The Windows X86_64 ABI information.
class WinX86_64ABIInfo : public ABIInfo {
- ABIArgInfo classify(QualType Ty) const;
+ ABIArgInfo classify(QualType Ty, bool IsReturnType) const;
public:
WinX86_64ABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {}
@@ -1393,8 +1382,7 @@ void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
Current = Integer;
} else if ((k == BuiltinType::Float || k == BuiltinType::Double) ||
(k == BuiltinType::LongDouble &&
- getContext().getTargetInfo().getTriple().getOS() ==
- llvm::Triple::NaCl)) {
+ getTarget().getTriple().getOS() == llvm::Triple::NaCl)) {
Current = SSE;
} else if (k == BuiltinType::LongDouble) {
Lo = X87;
@@ -1482,8 +1470,7 @@ void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
Current = SSE;
else if (ET == getContext().DoubleTy ||
(ET == getContext().LongDoubleTy &&
- getContext().getTargetInfo().getTriple().getOS() ==
- llvm::Triple::NaCl))
+ getTarget().getTriple().getOS() == llvm::Triple::NaCl))
Lo = Hi = SSE;
else if (ET == getContext().LongDoubleTy)
Current = ComplexX87;
@@ -1552,7 +1539,7 @@ void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
// AMD64-ABI 3.2.3p2: Rule 2. If a C++ object has either a non-trivial
// copy constructor or a non-trivial destructor, it is passed by invisible
// reference.
- if (hasNonTrivialDestructorOrCopyConstructor(RT))
+ if (getRecordArgABI(RT, CGT))
return;
const RecordDecl *RD = RT->getDecl();
@@ -1702,8 +1689,8 @@ ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty,
ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
}
- if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty))
- return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
+ if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, CGT))
+ return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
// Compute the byval alignment. We specify the alignment of the byval in all
// cases so that the mid-level optimizer knows the alignment of the byval.
@@ -2191,7 +2178,7 @@ ABIArgInfo X86_64ABIInfo::classifyArgumentType(
// COMPLEX_X87, it is passed in memory.
case X87:
case ComplexX87:
- if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty))
+ if (getRecordArgABI(Ty, CGT) == CGCXXABI::RAA_Indirect)
++neededInt;
return getIndirectResult(Ty, freeIntRegs);
@@ -2522,7 +2509,7 @@ llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
return ResAddr;
}
-ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty) const {
+ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty, bool IsReturnType) const {
if (Ty->isVoidType())
return ABIArgInfo::getIgnore();
@@ -2533,14 +2520,19 @@ ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty) const {
uint64_t Size = getContext().getTypeSize(Ty);
if (const RecordType *RT = Ty->getAs<RecordType>()) {
- if (hasNonTrivialDestructorOrCopyConstructor(RT) ||
- RT->getDecl()->hasFlexibleArrayMember())
+ if (IsReturnType) {
+ if (isRecordReturnIndirect(RT, CGT))
+ return ABIArgInfo::getIndirect(0, false);
+ } else {
+ if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, CGT))
+ return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
+ }
+
+ if (RT->getDecl()->hasFlexibleArrayMember())
return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
// FIXME: mingw-w64-gcc emits 128-bit struct as i128
- if (Size == 128 &&
- getContext().getTargetInfo().getTriple().getOS()
- == llvm::Triple::MinGW32)
+ if (Size == 128 && getTarget().getTriple().getOS() == llvm::Triple::MinGW32)
return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
Size));
@@ -2563,11 +2555,11 @@ ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty) const {
void WinX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
QualType RetTy = FI.getReturnType();
- FI.getReturnInfo() = classify(RetTy);
+ FI.getReturnInfo() = classify(RetTy, true);
for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
it != ie; ++it)
- it->info = classify(it->type);
+ it->info = classify(it->type, false);
}
llvm::Value *WinX86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
@@ -2794,10 +2786,8 @@ PPC64_SVR4_ABIInfo::classifyArgumentType(QualType Ty) const {
return ABIArgInfo::getDirect();
if (isAggregateTypeForABI(Ty)) {
- // Records with non trivial destructors/constructors should not be passed
- // by value.
- if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty))
- return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
+ if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, CGT))
+ return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
return ABIArgInfo::getIndirect(0);
}
@@ -2970,8 +2960,7 @@ public:
}
bool isEABI() const {
- StringRef Env =
- getContext().getTargetInfo().getTriple().getEnvironmentName();
+ StringRef Env = getTarget().getTriple().getEnvironmentName();
return (Env == "gnueabi" || Env == "eabi" ||
Env == "android" || Env == "androideabi");
}
@@ -3070,7 +3059,7 @@ void ARMABIInfo::computeInfo(CGFunctionInfo &FI) const {
/// Return the default calling convention that LLVM will use.
llvm::CallingConv::ID ARMABIInfo::getLLVMDefaultCC() const {
// The default calling convention that LLVM will infer.
- if (getContext().getTargetInfo().getTriple().getEnvironmentName()=="gnueabihf")
+ if (getTarget().getTriple().getEnvironmentName()=="gnueabihf")
return llvm::CallingConv::ARM_AAPCS_VFP;
else if (isEABI())
return llvm::CallingConv::ARM_AAPCS;
@@ -3262,10 +3251,8 @@ ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty, int *VFPRegs,
if (isEmptyRecord(getContext(), Ty, true))
return ABIArgInfo::getIgnore();
- // Structures with either a non-trivial destructor or a non-trivial
- // copy constructor are always indirect.
- if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty))
- return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
+ if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, CGT))
+ return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
if (getABIKind() == ARMABIInfo::AAPCS_VFP) {
// Homogeneous Aggregates need to be expanded when we can fit the aggregate
@@ -3428,7 +3415,7 @@ ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy) const {
// Structures with either a non-trivial destructor or a non-trivial
// copy constructor are always indirect.
- if (isRecordWithNonTrivialDestructorOrCopyConstructor(RetTy))
+ if (isRecordReturnIndirect(RetTy, CGT))
return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
// Are we following APCS?
@@ -3752,12 +3739,10 @@ ABIArgInfo AArch64ABIInfo::classifyGenericType(QualType Ty,
return tryUseRegs(Ty, FreeIntRegs, RegsNeeded, /*IsInt=*/ true);
}
- // Structures with either a non-trivial destructor or a non-trivial
- // copy constructor are always indirect.
- if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty)) {
- if (FreeIntRegs > 0)
+ if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, CGT)) {
+ if (FreeIntRegs > 0 && RAA == CGCXXABI::RAA_Indirect)
--FreeIntRegs;
- return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
+ return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
}
if (isEmptyRecord(getContext(), Ty, true)) {
@@ -4032,7 +4017,7 @@ namespace {
class NVPTXABIInfo : public ABIInfo {
public:
- NVPTXABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) { setRuntimeCC(); }
+ NVPTXABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
ABIArgInfo classifyReturnType(QualType RetTy) const;
ABIArgInfo classifyArgumentType(QualType Ty) const;
@@ -4040,8 +4025,6 @@ public:
virtual void computeInfo(CGFunctionInfo &FI) const;
virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
CodeGenFunction &CFG) const;
-private:
- void setRuntimeCC();
};
class NVPTXTargetCodeGenInfo : public TargetCodeGenInfo {
@@ -4051,6 +4034,8 @@ public:
virtual void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
CodeGen::CodeGenModule &M) const;
+private:
+ static void addKernelMetadata(llvm::Function *F);
};
ABIArgInfo NVPTXABIInfo::classifyReturnType(QualType RetTy) const {
@@ -4081,25 +4066,6 @@ void NVPTXABIInfo::computeInfo(CGFunctionInfo &FI) const {
FI.setEffectiveCallingConvention(getRuntimeCC());
}
-void NVPTXABIInfo::setRuntimeCC() {
- // Calling convention as default by an ABI.
- // We're still using the PTX_Kernel/PTX_Device calling conventions here,
- // but we should switch to NVVM metadata later on.
- const LangOptions &LangOpts = getContext().getLangOpts();
- if (LangOpts.OpenCL || LangOpts.CUDA) {
- // If we are in OpenCL or CUDA mode, then default to device functions
- RuntimeCC = llvm::CallingConv::PTX_Device;
- } else {
- // If we are in standard C/C++ mode, use the triple to decide on the default
- StringRef Env =
- getContext().getTargetInfo().getTriple().getEnvironmentName();
- if (Env == "device")
- RuntimeCC = llvm::CallingConv::PTX_Device;
- else
- RuntimeCC = llvm::CallingConv::PTX_Kernel;
- }
-}
-
llvm::Value *NVPTXABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
CodeGenFunction &CFG) const {
llvm_unreachable("NVPTX does not support varargs");
@@ -4115,11 +4081,11 @@ SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
// Perform special handling in OpenCL mode
if (M.getLangOpts().OpenCL) {
- // Use OpenCL function attributes to set proper calling conventions
+ // Use OpenCL function attributes to check for kernel functions
// By default, all functions are device functions
if (FD->hasAttr<OpenCLKernelAttr>()) {
- // OpenCL __kernel functions get a kernel calling convention
- F->setCallingConv(llvm::CallingConv::PTX_Kernel);
+ // OpenCL __kernel functions get kernel metadata
+ addKernelMetadata(F);
// And kernel functions are not subject to inlining
F->addFnAttr(llvm::Attribute::NoInline);
}
@@ -4127,14 +4093,318 @@ SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
// Perform special handling in CUDA mode.
if (M.getLangOpts().CUDA) {
- // CUDA __global__ functions get a kernel calling convention. Since
+ // CUDA __global__ functions get a kernel metadata entry. Since
// __global__ functions cannot be called from the device, we do not
// need to set the noinline attribute.
if (FD->getAttr<CUDAGlobalAttr>())
- F->setCallingConv(llvm::CallingConv::PTX_Kernel);
+ addKernelMetadata(F);
}
}
+void NVPTXTargetCodeGenInfo::addKernelMetadata(llvm::Function *F) {
+ llvm::Module *M = F->getParent();
+ llvm::LLVMContext &Ctx = M->getContext();
+
+ // Get "nvvm.annotations" metadata node
+ llvm::NamedMDNode *MD = M->getOrInsertNamedMetadata("nvvm.annotations");
+
+ // Create !{<func-ref>, metadata !"kernel", i32 1} node
+ llvm::SmallVector<llvm::Value *, 3> MDVals;
+ MDVals.push_back(F);
+ MDVals.push_back(llvm::MDString::get(Ctx, "kernel"));
+ MDVals.push_back(llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx), 1));
+
+ // Append metadata to nvvm.annotations
+ MD->addOperand(llvm::MDNode::get(Ctx, MDVals));
+}
+
+}
+
+//===----------------------------------------------------------------------===//
+// SystemZ ABI Implementation
+//===----------------------------------------------------------------------===//
+
+namespace {
+
+class SystemZABIInfo : public ABIInfo {
+public:
+ SystemZABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
+
+ bool isPromotableIntegerType(QualType Ty) const;
+ bool isCompoundType(QualType Ty) const;
+ bool isFPArgumentType(QualType Ty) const;
+
+ ABIArgInfo classifyReturnType(QualType RetTy) const;
+ ABIArgInfo classifyArgumentType(QualType ArgTy) const;
+
+ virtual void computeInfo(CGFunctionInfo &FI) const {
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
+ for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
+ it != ie; ++it)
+ it->info = classifyArgumentType(it->type);
+ }
+
+ virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const;
+};
+
+class SystemZTargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ SystemZTargetCodeGenInfo(CodeGenTypes &CGT)
+ : TargetCodeGenInfo(new SystemZABIInfo(CGT)) {}
+};
+
+}
+
+bool SystemZABIInfo::isPromotableIntegerType(QualType Ty) const {
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = Ty->getAs<EnumType>())
+ Ty = EnumTy->getDecl()->getIntegerType();
+
+ // Promotable integer types are required to be promoted by the ABI.
+ if (Ty->isPromotableIntegerType())
+ return true;
+
+ // 32-bit values must also be promoted.
+ if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
+ switch (BT->getKind()) {
+ case BuiltinType::Int:
+ case BuiltinType::UInt:
+ return true;
+ default:
+ return false;
+ }
+ return false;
+}
+
+bool SystemZABIInfo::isCompoundType(QualType Ty) const {
+ return Ty->isAnyComplexType() || isAggregateTypeForABI(Ty);
+}
+
+bool SystemZABIInfo::isFPArgumentType(QualType Ty) const {
+ if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
+ switch (BT->getKind()) {
+ case BuiltinType::Float:
+ case BuiltinType::Double:
+ return true;
+ default:
+ return false;
+ }
+
+ if (const RecordType *RT = Ty->getAsStructureType()) {
+ const RecordDecl *RD = RT->getDecl();
+ bool Found = false;
+
+ // If this is a C++ record, check the bases first.
+ if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
+ for (CXXRecordDecl::base_class_const_iterator I = CXXRD->bases_begin(),
+ E = CXXRD->bases_end(); I != E; ++I) {
+ QualType Base = I->getType();
+
+ // Empty bases don't affect things either way.
+ if (isEmptyRecord(getContext(), Base, true))
+ continue;
+
+ if (Found)
+ return false;
+ Found = isFPArgumentType(Base);
+ if (!Found)
+ return false;
+ }
+
+ // Check the fields.
+ for (RecordDecl::field_iterator I = RD->field_begin(),
+ E = RD->field_end(); I != E; ++I) {
+ const FieldDecl *FD = *I;
+
+ // Empty bitfields don't affect things either way.
+ // Unlike isSingleElementStruct(), empty structure and array fields
+ // do count. So do anonymous bitfields that aren't zero-sized.
+ if (FD->isBitField() && FD->getBitWidthValue(getContext()) == 0)
+ return true;
+
+ // Unlike isSingleElementStruct(), arrays do not count.
+ // Nested isFPArgumentType structures still do though.
+ if (Found)
+ return false;
+ Found = isFPArgumentType(FD->getType());
+ if (!Found)
+ return false;
+ }
+
+ // Unlike isSingleElementStruct(), trailing padding is allowed.
+ // An 8-byte aligned struct s { float f; } is passed as a double.
+ return Found;
+ }
+
+ return false;
+}
+
+llvm::Value *SystemZABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const {
+ // Assume that va_list type is correct; should be pointer to LLVM type:
+ // struct {
+ // i64 __gpr;
+ // i64 __fpr;
+ // i8 *__overflow_arg_area;
+ // i8 *__reg_save_area;
+ // };
+
+ // Every argument occupies 8 bytes and is passed by preference in either
+ // GPRs or FPRs.
+ Ty = CGF.getContext().getCanonicalType(Ty);
+ ABIArgInfo AI = classifyArgumentType(Ty);
+ bool InFPRs = isFPArgumentType(Ty);
+
+ llvm::Type *APTy = llvm::PointerType::getUnqual(CGF.ConvertTypeForMem(Ty));
+ bool IsIndirect = AI.isIndirect();
+ unsigned UnpaddedBitSize;
+ if (IsIndirect) {
+ APTy = llvm::PointerType::getUnqual(APTy);
+ UnpaddedBitSize = 64;
+ } else
+ UnpaddedBitSize = getContext().getTypeSize(Ty);
+ unsigned PaddedBitSize = 64;
+ assert((UnpaddedBitSize <= PaddedBitSize) && "Invalid argument size.");
+
+ unsigned PaddedSize = PaddedBitSize / 8;
+ unsigned Padding = (PaddedBitSize - UnpaddedBitSize) / 8;
+
+ unsigned MaxRegs, RegCountField, RegSaveIndex, RegPadding;
+ if (InFPRs) {
+ MaxRegs = 4; // Maximum of 4 FPR arguments
+ RegCountField = 1; // __fpr
+ RegSaveIndex = 16; // save offset for f0
+ RegPadding = 0; // floats are passed in the high bits of an FPR
+ } else {
+ MaxRegs = 5; // Maximum of 5 GPR arguments
+ RegCountField = 0; // __gpr
+ RegSaveIndex = 2; // save offset for r2
+ RegPadding = Padding; // values are passed in the low bits of a GPR
+ }
+
+ llvm::Value *RegCountPtr =
+ CGF.Builder.CreateStructGEP(VAListAddr, RegCountField, "reg_count_ptr");
+ llvm::Value *RegCount = CGF.Builder.CreateLoad(RegCountPtr, "reg_count");
+ llvm::Type *IndexTy = RegCount->getType();
+ llvm::Value *MaxRegsV = llvm::ConstantInt::get(IndexTy, MaxRegs);
+ llvm::Value *InRegs = CGF.Builder.CreateICmpULT(RegCount, MaxRegsV,
+ "fits_in_regs");
+
+ llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
+ llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem");
+ llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
+ CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
+
+ // Emit code to load the value if it was passed in registers.
+ CGF.EmitBlock(InRegBlock);
+
+ // Work out the address of an argument register.
+ llvm::Value *PaddedSizeV = llvm::ConstantInt::get(IndexTy, PaddedSize);
+ llvm::Value *ScaledRegCount =
+ CGF.Builder.CreateMul(RegCount, PaddedSizeV, "scaled_reg_count");
+ llvm::Value *RegBase =
+ llvm::ConstantInt::get(IndexTy, RegSaveIndex * PaddedSize + RegPadding);
+ llvm::Value *RegOffset =
+ CGF.Builder.CreateAdd(ScaledRegCount, RegBase, "reg_offset");
+ llvm::Value *RegSaveAreaPtr =
+ CGF.Builder.CreateStructGEP(VAListAddr, 3, "reg_save_area_ptr");
+ llvm::Value *RegSaveArea =
+ CGF.Builder.CreateLoad(RegSaveAreaPtr, "reg_save_area");
+ llvm::Value *RawRegAddr =
+ CGF.Builder.CreateGEP(RegSaveArea, RegOffset, "raw_reg_addr");
+ llvm::Value *RegAddr =
+ CGF.Builder.CreateBitCast(RawRegAddr, APTy, "reg_addr");
+
+ // Update the register count
+ llvm::Value *One = llvm::ConstantInt::get(IndexTy, 1);
+ llvm::Value *NewRegCount =
+ CGF.Builder.CreateAdd(RegCount, One, "reg_count");
+ CGF.Builder.CreateStore(NewRegCount, RegCountPtr);
+ CGF.EmitBranch(ContBlock);
+
+ // Emit code to load the value if it was passed in memory.
+ CGF.EmitBlock(InMemBlock);
+
+ // Work out the address of a stack argument.
+ llvm::Value *OverflowArgAreaPtr =
+ CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_ptr");
+ llvm::Value *OverflowArgArea =
+ CGF.Builder.CreateLoad(OverflowArgAreaPtr, "overflow_arg_area");
+ llvm::Value *PaddingV = llvm::ConstantInt::get(IndexTy, Padding);
+ llvm::Value *RawMemAddr =
+ CGF.Builder.CreateGEP(OverflowArgArea, PaddingV, "raw_mem_addr");
+ llvm::Value *MemAddr =
+ CGF.Builder.CreateBitCast(RawMemAddr, APTy, "mem_addr");
+
+ // Update overflow_arg_area_ptr pointer
+ llvm::Value *NewOverflowArgArea =
+ CGF.Builder.CreateGEP(OverflowArgArea, PaddedSizeV, "overflow_arg_area");
+ CGF.Builder.CreateStore(NewOverflowArgArea, OverflowArgAreaPtr);
+ CGF.EmitBranch(ContBlock);
+
+ // Return the appropriate result.
+ CGF.EmitBlock(ContBlock);
+ llvm::PHINode *ResAddr = CGF.Builder.CreatePHI(APTy, 2, "va_arg.addr");
+ ResAddr->addIncoming(RegAddr, InRegBlock);
+ ResAddr->addIncoming(MemAddr, InMemBlock);
+
+ if (IsIndirect)
+ return CGF.Builder.CreateLoad(ResAddr, "indirect_arg");
+
+ return ResAddr;
+}
+
+
+ABIArgInfo SystemZABIInfo::classifyReturnType(QualType RetTy) const {
+ if (RetTy->isVoidType())
+ return ABIArgInfo::getIgnore();
+ if (isCompoundType(RetTy) || getContext().getTypeSize(RetTy) > 64)
+ return ABIArgInfo::getIndirect(0);
+ return (isPromotableIntegerType(RetTy) ?
+ ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+}
+
+ABIArgInfo SystemZABIInfo::classifyArgumentType(QualType Ty) const {
+ // Handle the generic C++ ABI.
+ if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, CGT))
+ return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
+
+ // Integers and enums are extended to full register width.
+ if (isPromotableIntegerType(Ty))
+ return ABIArgInfo::getExtend();
+
+ // Values that are not 1, 2, 4 or 8 bytes in size are passed indirectly.
+ uint64_t Size = getContext().getTypeSize(Ty);
+ if (Size != 8 && Size != 16 && Size != 32 && Size != 64)
+ return ABIArgInfo::getIndirect(0);
+
+ // Handle small structures.
+ if (const RecordType *RT = Ty->getAs<RecordType>()) {
+ // Structures with flexible arrays have variable length, so really
+ // fail the size test above.
+ const RecordDecl *RD = RT->getDecl();
+ if (RD->hasFlexibleArrayMember())
+ return ABIArgInfo::getIndirect(0);
+
+ // The structure is passed as an unextended integer, a float, or a double.
+ llvm::Type *PassTy;
+ if (isFPArgumentType(Ty)) {
+ assert(Size == 32 || Size == 64);
+ if (Size == 32)
+ PassTy = llvm::Type::getFloatTy(getVMContext());
+ else
+ PassTy = llvm::Type::getDoubleTy(getVMContext());
+ } else
+ PassTy = llvm::IntegerType::get(getVMContext(), Size);
+ return ABIArgInfo::getDirect(PassTy);
+ }
+
+ // Non-structure compounds are passed indirectly.
+ if (isCompoundType(Ty))
+ return ABIArgInfo::getIndirect(0);
+
+ return ABIArgInfo::getDirect(0);
}
//===----------------------------------------------------------------------===//
@@ -4325,11 +4595,17 @@ public:
void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
CodeGen::CodeGenModule &CGM) const {
- //
- // can fill this in when new attribute work in llvm is done.
- // attributes mips16 and nomips16 need to be handled here.
- //
+ const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
+ if (!FD) return;
+ llvm::Function *Fn = cast<llvm::Function>(GV);
+ if (FD->hasAttr<Mips16Attr>()) {
+ Fn->addFnAttr("mips16");
+ }
+ else if (FD->hasAttr<NoMips16Attr>()) {
+ Fn->addFnAttr("nomips16");
+ }
}
+
bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
llvm::Value *Address) const;
@@ -4438,11 +4714,9 @@ MipsABIInfo::classifyArgumentType(QualType Ty, uint64_t &Offset) const {
if (TySize == 0)
return ABIArgInfo::getIgnore();
- // Records with non trivial destructors/constructors should not be passed
- // by value.
- if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty)) {
+ if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, CGT)) {
Offset = OrigOffset + MinABIStackAlignInBytes;
- return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
+ return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
}
// If we have reached here, aggregates are passed directly by coercing to
@@ -4512,6 +4786,9 @@ ABIArgInfo MipsABIInfo::classifyReturnType(QualType RetTy) const {
return ABIArgInfo::getIgnore();
if (isAggregateTypeForABI(RetTy) || RetTy->isVectorType()) {
+ if (isRecordReturnIndirect(RetTy, CGT))
+ return ABIArgInfo::getIndirect(0);
+
if (Size <= 128) {
if (RetTy->isAnyComplexType())
return ABIArgInfo::getDirect();
@@ -4520,7 +4797,7 @@ ABIArgInfo MipsABIInfo::classifyReturnType(QualType RetTy) const {
if (IsO32 && RetTy->isVectorType() && !RetTy->hasFloatingRepresentation())
return ABIArgInfo::getDirect(returnAggregateInRegs(RetTy, Size));
- if (!IsO32 && !isRecordWithNonTrivialDestructorOrCopyConstructor(RetTy))
+ if (!IsO32)
return ABIArgInfo::getDirect(returnAggregateInRegs(RetTy, Size));
}
@@ -4558,7 +4835,7 @@ llvm::Value* MipsABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
int64_t TypeAlign = getContext().getTypeAlign(Ty) / 8;
llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
llvm::Value *AddrTyped;
- unsigned PtrWidth = getContext().getTargetInfo().getPointerWidth(0);
+ unsigned PtrWidth = getTarget().getPointerWidth(0);
llvm::IntegerType *IntTy = (PtrWidth == 32) ? CGF.Int32Ty : CGF.Int64Ty;
if (TypeAlign > MinABIStackAlignInBytes) {
@@ -4730,10 +5007,8 @@ ABIArgInfo HexagonABIInfo::classifyArgumentType(QualType Ty) const {
if (isEmptyRecord(getContext(), Ty, true))
return ABIArgInfo::getIgnore();
- // Structures with either a non-trivial destructor or a non-trivial
- // copy constructor are always indirect.
- if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty))
- return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
+ if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, CGT))
+ return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
uint64_t Size = getContext().getTypeSize(Ty);
if (Size > 64)
@@ -4768,7 +5043,7 @@ ABIArgInfo HexagonABIInfo::classifyReturnType(QualType RetTy) const {
// Structures with either a non-trivial destructor or a non-trivial
// copy constructor are always indirect.
- if (isRecordWithNonTrivialDestructorOrCopyConstructor(RetTy))
+ if (isRecordReturnIndirect(RetTy, CGT))
return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
if (isEmptyRecord(getContext(), RetTy, true))
@@ -4819,7 +5094,7 @@ const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
if (TheTargetCodeGenInfo)
return *TheTargetCodeGenInfo;
- const llvm::Triple &Triple = getContext().getTargetInfo().getTriple();
+ const llvm::Triple &Triple = getTarget().getTriple();
switch (Triple.getArch()) {
default:
return *(TheTargetCodeGenInfo = new DefaultTargetCodeGenInfo(Types));
@@ -4841,10 +5116,11 @@ const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
case llvm::Triple::thumb:
{
ARMABIInfo::ABIKind Kind = ARMABIInfo::AAPCS;
- if (strcmp(getContext().getTargetInfo().getABI(), "apcs-gnu") == 0)
+ if (strcmp(getTarget().getABI(), "apcs-gnu") == 0)
Kind = ARMABIInfo::APCS;
else if (CodeGenOpts.FloatABI == "hard" ||
- (CodeGenOpts.FloatABI != "soft" && Triple.getEnvironment()==llvm::Triple::GNUEABIHF))
+ (CodeGenOpts.FloatABI != "soft" &&
+ Triple.getEnvironment() == llvm::Triple::GNUEABIHF))
Kind = ARMABIInfo::AAPCS_VFP;
switch (Triple.getOS()) {
@@ -4875,15 +5151,16 @@ const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
case llvm::Triple::msp430:
return *(TheTargetCodeGenInfo = new MSP430TargetCodeGenInfo(Types));
+ case llvm::Triple::systemz:
+ return *(TheTargetCodeGenInfo = new SystemZTargetCodeGenInfo(Types));
+
case llvm::Triple::tce:
return *(TheTargetCodeGenInfo = new TCETargetCodeGenInfo(Types));
case llvm::Triple::x86: {
- bool DisableMMX = strcmp(getContext().getTargetInfo().getABI(), "no-mmx") == 0;
-
if (Triple.isOSDarwin())
return *(TheTargetCodeGenInfo =
- new X86_32TargetCodeGenInfo(Types, true, true, DisableMMX, false,
+ new X86_32TargetCodeGenInfo(Types, true, true, false,
CodeGenOpts.NumRegisterParameters));
switch (Triple.getOS()) {
@@ -4895,25 +5172,23 @@ const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
case llvm::Triple::OpenBSD:
case llvm::Triple::Bitrig:
return *(TheTargetCodeGenInfo =
- new X86_32TargetCodeGenInfo(Types, false, true, DisableMMX,
- false,
+ new X86_32TargetCodeGenInfo(Types, false, true, false,
CodeGenOpts.NumRegisterParameters));
case llvm::Triple::Win32:
return *(TheTargetCodeGenInfo =
- new X86_32TargetCodeGenInfo(Types, false, true, DisableMMX, true,
+ new X86_32TargetCodeGenInfo(Types, false, true, true,
CodeGenOpts.NumRegisterParameters));
default:
return *(TheTargetCodeGenInfo =
- new X86_32TargetCodeGenInfo(Types, false, false, DisableMMX,
- false,
+ new X86_32TargetCodeGenInfo(Types, false, false, false,
CodeGenOpts.NumRegisterParameters));
}
}
case llvm::Triple::x86_64: {
- bool HasAVX = strcmp(getContext().getTargetInfo().getABI(), "avx") == 0;
+ bool HasAVX = strcmp(getTarget().getABI(), "avx") == 0;
switch (Triple.getOS()) {
case llvm::Triple::Win32:
@@ -4921,7 +5196,8 @@ const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
case llvm::Triple::Cygwin:
return *(TheTargetCodeGenInfo = new WinX86_64TargetCodeGenInfo(Types));
case llvm::Triple::NaCl:
- return *(TheTargetCodeGenInfo = new NaClX86_64TargetCodeGenInfo(Types, HasAVX));
+ return *(TheTargetCodeGenInfo = new NaClX86_64TargetCodeGenInfo(Types,
+ HasAVX));
default:
return *(TheTargetCodeGenInfo = new X86_64TargetCodeGenInfo(Types,
HasAVX));