diff options
-rw-r--r-- | include/clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h | 5 | ||||
-rw-r--r-- | lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp | 354 | ||||
-rw-r--r-- | test/Analysis/NSString.m | 1 |
3 files changed, 184 insertions, 176 deletions
diff --git a/include/clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h b/include/clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h index 6910c299cc..60c35f807e 100644 --- a/include/clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h +++ b/include/clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h @@ -532,7 +532,10 @@ private: void examineStackFrames(const Decl *D, const LocationContext *LCtx, bool &IsRecursive, unsigned &StackDepth); - bool shouldInlineDecl(const Decl *D, ExplodedNode *Pred); + /// Checks our policies and decides weither the given call should be inlined. + bool shouldInlineCall(const CallEvent &Call, const Decl *D, + const ExplodedNode *Pred); + bool inlineCall(const CallEvent &Call, const Decl *D, NodeBuilder &Bldr, ExplodedNode *Pred, ProgramStateRef State); diff --git a/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp b/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp index a4d33ee82f..02317bcf2e 100644 --- a/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp +++ b/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp @@ -394,71 +394,6 @@ static bool IsInStdNamespace(const FunctionDecl *FD) { return ND->getName() == "std"; } -// Determine if we should inline the call. -bool ExprEngine::shouldInlineDecl(const Decl *D, ExplodedNode *Pred) { - AnalysisDeclContext *CalleeADC = AMgr.getAnalysisDeclContext(D); - const CFG *CalleeCFG = CalleeADC->getCFG(); - - // It is possible that the CFG cannot be constructed. - // Be safe, and check if the CalleeCFG is valid. - if (!CalleeCFG) - return false; - - bool IsRecursive = false; - unsigned StackDepth = 0; - examineStackFrames(D, Pred->getLocationContext(), IsRecursive, StackDepth); - if ((StackDepth >= AMgr.options.InlineMaxStackDepth) && - ((CalleeCFG->getNumBlockIDs() > AMgr.options.getAlwaysInlineSize()) - || IsRecursive)) - return false; - - if (Engine.FunctionSummaries->hasReachedMaxBlockCount(D)) - return false; - - if (CalleeCFG->getNumBlockIDs() > AMgr.options.getMaxInlinableSize()) - return false; - - // Do not inline variadic calls (for now). - if (const BlockDecl *BD = dyn_cast<BlockDecl>(D)) { - if (BD->isVariadic()) - return false; - } - else if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { - if (FD->isVariadic()) - return false; - } - - if (getContext().getLangOpts().CPlusPlus) { - if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { - // Conditionally allow the inlining of template functions. - if (!AMgr.options.mayInlineTemplateFunctions()) - if (FD->getTemplatedKind() != FunctionDecl::TK_NonTemplate) - return false; - - // Conditionally allow the inlining of C++ standard library functions. - if (!AMgr.options.mayInlineCXXStandardLibrary()) - if (getContext().getSourceManager().isInSystemHeader(FD->getLocation())) - if (IsInStdNamespace(FD)) - return false; - } - } - - // It is possible that the live variables analysis cannot be - // run. If so, bail out. - if (!CalleeADC->getAnalysis<RelaxedLiveVariables>()) - return false; - - if (Engine.FunctionSummaries->getNumTimesInlined(D) > - AMgr.options.getMaxTimesInlineLarge() && - CalleeCFG->getNumBlockIDs() > 13) { - NumReachedInlineCountMax++; - return false; - } - Engine.FunctionSummaries->bumpNumTimesInlined(D); - - return true; -} - // The GDM component containing the dynamic dispatch bifurcation info. When // the exact type of the receiver is not known, we want to explore both paths - // one on which we do inline it and the other one on which we don't. This is @@ -482,108 +417,16 @@ bool ExprEngine::inlineCall(const CallEvent &Call, const Decl *D, const LocationContext *CurLC = Pred->getLocationContext(); const StackFrameContext *CallerSFC = CurLC->getCurrentStackFrame(); - const LocationContext *ParentOfCallee = 0; - - AnalyzerOptions &Opts = getAnalysisManager().options; - - // FIXME: Refactor this check into a hypothetical CallEvent::canInline. - switch (Call.getKind()) { - case CE_Function: - break; - case CE_CXXMember: - case CE_CXXMemberOperator: - if (!Opts.mayInlineCXXMemberFunction(CIMK_MemberFunctions)) - return false; - break; - case CE_CXXConstructor: { - if (!Opts.mayInlineCXXMemberFunction(CIMK_Constructors)) - return false; - - const CXXConstructorCall &Ctor = cast<CXXConstructorCall>(Call); - - // FIXME: We don't handle constructors or destructors for arrays properly. - const MemRegion *Target = Ctor.getCXXThisVal().getAsRegion(); - if (Target && isa<ElementRegion>(Target)) - return false; - - // FIXME: This is a hack. We don't use the correct region for a new - // expression, so if we inline the constructor its result will just be - // thrown away. This short-term hack is tracked in <rdar://problem/12180598> - // and the longer-term possible fix is discussed in PR12014. - const CXXConstructExpr *CtorExpr = Ctor.getOriginExpr(); - if (const Stmt *Parent = CurLC->getParentMap().getParent(CtorExpr)) - if (isa<CXXNewExpr>(Parent)) - return false; - - // Inlining constructors requires including initializers in the CFG. - const AnalysisDeclContext *ADC = CallerSFC->getAnalysisDeclContext(); - assert(ADC->getCFGBuildOptions().AddInitializers && "No CFG initializers"); - (void)ADC; - - // If the destructor is trivial, it's always safe to inline the constructor. - if (Ctor.getDecl()->getParent()->hasTrivialDestructor()) - break; - - // For other types, only inline constructors if destructor inlining is - // also enabled. - if (!Opts.mayInlineCXXMemberFunction(CIMK_Destructors)) - return false; - - // FIXME: This is a hack. We don't handle temporary destructors - // right now, so we shouldn't inline their constructors. - if (CtorExpr->getConstructionKind() == CXXConstructExpr::CK_Complete) - if (!Target || !isa<DeclRegion>(Target)) - return false; - - break; - } - case CE_CXXDestructor: { - if (!Opts.mayInlineCXXMemberFunction(CIMK_Destructors)) - return false; - - // Inlining destructors requires building the CFG correctly. - const AnalysisDeclContext *ADC = CallerSFC->getAnalysisDeclContext(); - assert(ADC->getCFGBuildOptions().AddImplicitDtors && "No CFG destructors"); - (void)ADC; - - const CXXDestructorCall &Dtor = cast<CXXDestructorCall>(Call); - - // FIXME: We don't handle constructors or destructors for arrays properly. - const MemRegion *Target = Dtor.getCXXThisVal().getAsRegion(); - if (Target && isa<ElementRegion>(Target)) - return false; - - break; - } - case CE_CXXAllocator: - // Do not inline allocators until we model deallocators. - // This is unfortunate, but basically necessary for smart pointers and such. - return false; - case CE_Block: { + const LocationContext *ParentOfCallee = CallerSFC; + if (Call.getKind() == CE_Block) { const BlockDataRegion *BR = cast<BlockCall>(Call).getBlockRegion(); assert(BR && "If we have the block definition we should have its region"); AnalysisDeclContext *BlockCtx = AMgr.getAnalysisDeclContext(D); ParentOfCallee = BlockCtx->getBlockInvocationContext(CallerSFC, cast<BlockDecl>(D), BR); - break; - } - case CE_ObjCMessage: - if (!Opts.mayInlineObjCMethod()) - return false; - AnalyzerOptions &Options = getAnalysisManager().options; - if (!(Options.getIPAMode() == IPAK_DynamicDispatch || - Options.getIPAMode() == IPAK_DynamicDispatchBifurcate)) - return false; - break; } - - if (!shouldInlineDecl(D, Pred)) - return false; - if (!ParentOfCallee) - ParentOfCallee = CallerSFC; - // This may be NULL, but that's fine. const Expr *CallE = Call.getOriginExpr(); @@ -594,6 +437,7 @@ bool ExprEngine::inlineCall(const CallEvent &Call, const Decl *D, currBldrCtx->getBlock(), currStmtIdx); + CallEnter Loc(CallE, CalleeSFC, CurLC); // Construct a new state which contains the mapping from actual to @@ -733,18 +577,182 @@ void ExprEngine::conservativeEvalCall(const CallEvent &Call, NodeBuilder &Bldr, Bldr.generateNode(Call.getProgramPoint(), State, Pred); } -static bool isEssentialToInline(const CallEvent &Call) { - const Decl *D = Call.getDecl(); - if (D) { - AnalysisDeclContext *AD = - Call.getLocationContext()->getAnalysisDeclContext()-> - getManager()->getContext(D); +static bool shouldInlineCallKind(const CallEvent &Call, + const ExplodedNode *Pred, + AnalyzerOptions &Opts) { + const LocationContext *CurLC = Pred->getLocationContext(); + const StackFrameContext *CallerSFC = CurLC->getCurrentStackFrame(); + switch (Call.getKind()) { + case CE_Function: + case CE_Block: + break; + case CE_CXXMember: + case CE_CXXMemberOperator: + if (!Opts.mayInlineCXXMemberFunction(CIMK_MemberFunctions)) + return false; + break; + case CE_CXXConstructor: { + if (!Opts.mayInlineCXXMemberFunction(CIMK_Constructors)) + return false; + + const CXXConstructorCall &Ctor = cast<CXXConstructorCall>(Call); + + // FIXME: We don't handle constructors or destructors for arrays properly. + const MemRegion *Target = Ctor.getCXXThisVal().getAsRegion(); + if (Target && isa<ElementRegion>(Target)) + return false; + + // FIXME: This is a hack. We don't use the correct region for a new + // expression, so if we inline the constructor its result will just be + // thrown away. This short-term hack is tracked in <rdar://problem/12180598> + // and the longer-term possible fix is discussed in PR12014. + const CXXConstructExpr *CtorExpr = Ctor.getOriginExpr(); + if (const Stmt *Parent = CurLC->getParentMap().getParent(CtorExpr)) + if (isa<CXXNewExpr>(Parent)) + return false; + + // Inlining constructors requires including initializers in the CFG. + const AnalysisDeclContext *ADC = CallerSFC->getAnalysisDeclContext(); + assert(ADC->getCFGBuildOptions().AddInitializers && "No CFG initializers"); + (void)ADC; + + // If the destructor is trivial, it's always safe to inline the constructor. + if (Ctor.getDecl()->getParent()->hasTrivialDestructor()) + break; + + // For other types, only inline constructors if destructor inlining is + // also enabled. + if (!Opts.mayInlineCXXMemberFunction(CIMK_Destructors)) + return false; + + // FIXME: This is a hack. We don't handle temporary destructors + // right now, so we shouldn't inline their constructors. + if (CtorExpr->getConstructionKind() == CXXConstructExpr::CK_Complete) + if (!Target || !isa<DeclRegion>(Target)) + return false; + + break; + } + case CE_CXXDestructor: { + if (!Opts.mayInlineCXXMemberFunction(CIMK_Destructors)) + return false; + + // Inlining destructors requires building the CFG correctly. + const AnalysisDeclContext *ADC = CallerSFC->getAnalysisDeclContext(); + assert(ADC->getCFGBuildOptions().AddImplicitDtors && "No CFG destructors"); + (void)ADC; + + const CXXDestructorCall &Dtor = cast<CXXDestructorCall>(Call); + + // FIXME: We don't handle constructors or destructors for arrays properly. + const MemRegion *Target = Dtor.getCXXThisVal().getAsRegion(); + if (Target && isa<ElementRegion>(Target)) + return false; + + break; + } + case CE_CXXAllocator: + // Do not inline allocators until we model deallocators. + // This is unfortunate, but basically necessary for smart pointers and such. + return false; + case CE_ObjCMessage: + if (!Opts.mayInlineObjCMethod()) + return false; + if (!(Opts.getIPAMode() == IPAK_DynamicDispatch || + Opts.getIPAMode() == IPAK_DynamicDispatchBifurcate)) + return false; + break; + } + return true; +} + +bool ExprEngine::shouldInlineCall(const CallEvent &Call, const Decl *D, + const ExplodedNode *Pred) { + if (!D ) + return false; + + AnalyzerOptions &Opts = getAnalysisManager().options; + AnalysisDeclContext *CalleeADC = + Call.getLocationContext()->getAnalysisDeclContext()-> + getManager()->getContext(D); + + // The auto-synthesized bodies are essential to inline as they are + // usually small and commonly used. note, we should do this check early on to + // ensure we always inline these calls. + if (CalleeADC->isBodyAutosynthesized()) + return true; + + if (HowToInline == Inline_None) + return false; + + // Check if we should inline a call based on it's kind. + if (!shouldInlineCallKind(Call, Pred, Opts)) + return false; + + // It is possible that the CFG cannot be constructed. + // Be safe, and check if the CalleeCFG is valid. + const CFG *CalleeCFG = CalleeADC->getCFG(); + if (!CalleeCFG) + return false; + + // Do not inline if recursive or we've reached max stack frame count. + bool IsRecursive = false; + unsigned StackDepth = 0; + examineStackFrames(D, Pred->getLocationContext(), IsRecursive, StackDepth); + if ((StackDepth >= Opts.InlineMaxStackDepth) && + ((CalleeCFG->getNumBlockIDs() > Opts.getAlwaysInlineSize()) + || IsRecursive)) + return false; + + // Do not inline if it took too long to inline previously. + if (Engine.FunctionSummaries->hasReachedMaxBlockCount(D)) + return false; + + // Or if the function is too big. + if (CalleeCFG->getNumBlockIDs() > Opts.getMaxInlinableSize()) + return false; + + // Do not inline variadic calls (for now). + if (const BlockDecl *BD = dyn_cast<BlockDecl>(D)) { + if (BD->isVariadic()) + return false; + } + else if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { + if (FD->isVariadic()) + return false; + } + + // Check our template policy. + if (getContext().getLangOpts().CPlusPlus) { + if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { + // Conditionally allow the inlining of template functions. + if (!Opts.mayInlineTemplateFunctions()) + if (FD->getTemplatedKind() != FunctionDecl::TK_NonTemplate) + return false; + + // Conditionally allow the inlining of C++ standard library functions. + if (!Opts.mayInlineCXXStandardLibrary()) + if (getContext().getSourceManager().isInSystemHeader(FD->getLocation())) + if (IsInStdNamespace(FD)) + return false; + } + } + + // It is possible that the live variables analysis cannot be + // run. If so, bail out. + if (!CalleeADC->getAnalysis<RelaxedLiveVariables>()) + return false; - // The auto-synthesized bodies are essential to inline as they are - // usually small and commonly used. - return AD->isBodyAutosynthesized(); + // Do not inline large functions too many times. + if (Engine.FunctionSummaries->getNumTimesInlined(D) > + Opts.getMaxTimesInlineLarge() && + CalleeCFG->getNumBlockIDs() > 13) { + NumReachedInlineCountMax++; + return false; } - return false; + Engine.FunctionSummaries->bumpNumTimesInlined(D); + + return true; } void ExprEngine::defaultEvalCall(NodeBuilder &Bldr, ExplodedNode *Pred, @@ -753,23 +761,19 @@ void ExprEngine::defaultEvalCall(NodeBuilder &Bldr, ExplodedNode *Pred, ProgramStateRef State = Pred->getState(); CallEventRef<> Call = CallTemplate.cloneWithState(State); - if (HowToInline == Inline_None && !isEssentialToInline(CallTemplate)) { - conservativeEvalCall(*Call, Bldr, Pred, State); - return; - } // Try to inline the call. // The origin expression here is just used as a kind of checksum; // this should still be safe even for CallEvents that don't come from exprs. const Expr *E = Call->getOriginExpr(); - ProgramStateRef InlinedFailedState = getInlineFailedState(State, E); + ProgramStateRef InlinedFailedState = getInlineFailedState(State, E); if (InlinedFailedState) { // If we already tried once and failed, make sure we don't retry later. State = InlinedFailedState; } else { RuntimeDefinition RD = Call->getRuntimeDefinition(); const Decl *D = RD.getDecl(); - if (D) { + if (shouldInlineCall(*Call, D, Pred)) { if (RD.mayHaveOtherDefinitions()) { AnalyzerOptions &Options = getAnalysisManager().options; diff --git a/test/Analysis/NSString.m b/test/Analysis/NSString.m index e7ac730c72..e390033483 100644 --- a/test/Analysis/NSString.m +++ b/test/Analysis/NSString.m @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -triple i386-apple-darwin10 -analyze -analyzer-checker=core,osx.cocoa.NilArg,osx.cocoa.RetainCount,alpha.core -analyzer-store=region -analyzer-constraints=range -verify -Wno-objc-root-class %s +// RUN: %clang_cc1 -triple i386-apple-darwin10 -analyze -analyzer-checker=core,osx.cocoa.NilArg,osx.cocoa.RetainCount,alpha.core -analyzer-store=region -analyzer-constraints=range -analyzer-config mode=shallow -verify -Wno-objc-root-class %s // RUN: %clang_cc1 -DTEST_64 -triple x86_64-apple-darwin10 -analyze -analyzer-checker=core,osx.cocoa.NilArg,osx.cocoa.RetainCount,alpha.core -analyzer-store=region -analyzer-constraints=range -verify -Wno-objc-root-class %s |