aboutsummaryrefslogtreecommitdiff
path: root/lib/StaticAnalyzer/Core
diff options
context:
space:
mode:
authorAnna Zaks <ganna@apple.com>2012-08-09 00:21:33 +0000
committerAnna Zaks <ganna@apple.com>2012-08-09 00:21:33 +0000
commite90d3f847dcce76237078b67db8895eb7a24189e (patch)
tree884fc90cbbf939cdb1d1e736abb0accb61e00fd5 /lib/StaticAnalyzer/Core
parent7b7af0201b0f0bac47663b4daf530afd8df85595 (diff)
[analyzer] Bifurcate the path with dynamic dispatch.
This is an initial (unoptimized) version. We split the path when inlining ObjC instance methods. On one branch we always assume that the type information for the given memory region is precise. On the other we assume that we don't have the exact type info. It is important to check since the class could be subclassed and the method can be overridden. If we always inline we can loose coverage. Had to refactor some of the call eval functions. git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@161552 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/StaticAnalyzer/Core')
-rw-r--r--lib/StaticAnalyzer/Core/CallEvent.cpp40
-rw-r--r--lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp123
2 files changed, 124 insertions, 39 deletions
diff --git a/lib/StaticAnalyzer/Core/CallEvent.cpp b/lib/StaticAnalyzer/Core/CallEvent.cpp
index 006ca1043a..2d96a1ea76 100644
--- a/lib/StaticAnalyzer/Core/CallEvent.cpp
+++ b/lib/StaticAnalyzer/Core/CallEvent.cpp
@@ -382,14 +382,14 @@ static const CXXMethodDecl *devirtualize(const CXXMethodDecl *MD, SVal ThisVal){
}
-const Decl *CXXInstanceCall::getRuntimeDefinition() const {
- const Decl *D = SimpleCall::getRuntimeDefinition();
+RuntimeDefinition CXXInstanceCall::getRuntimeDefinition() const {
+ const Decl *D = SimpleCall::getRuntimeDefinition().Decl;
if (!D)
- return 0;
+ return RuntimeDefinition();
const CXXMethodDecl *MD = cast<CXXMethodDecl>(D);
if (!MD->isVirtual())
- return MD;
+ return RuntimeDefinition(MD, 0);
// If the method is virtual, see if we can find the actual implementation
// based on context-sensitivity.
@@ -398,9 +398,9 @@ const Decl *CXXInstanceCall::getRuntimeDefinition() const {
// because a /partially/ constructed object can be referred to through a
// base pointer. We'll eventually want to use DynamicTypeInfo here.
if (const CXXMethodDecl *Devirtualized = devirtualize(MD, getCXXThisVal()))
- return Devirtualized;
+ return RuntimeDefinition(Devirtualized, 0);
- return 0;
+ return RuntimeDefinition();
}
void CXXInstanceCall::getInitialStackFrameContents(
@@ -512,14 +512,14 @@ void CXXDestructorCall::getExtraInvalidatedRegions(RegionList &Regions) const {
Regions.push_back(static_cast<const MemRegion *>(Data));
}
-const Decl *CXXDestructorCall::getRuntimeDefinition() const {
- const Decl *D = AnyFunctionCall::getRuntimeDefinition();
+RuntimeDefinition CXXDestructorCall::getRuntimeDefinition() const {
+ const Decl *D = AnyFunctionCall::getRuntimeDefinition().Decl;
if (!D)
- return 0;
+ return RuntimeDefinition();
const CXXMethodDecl *MD = cast<CXXMethodDecl>(D);
if (!MD->isVirtual())
- return MD;
+ return RuntimeDefinition(MD, 0);
// If the method is virtual, see if we can find the actual implementation
// based on context-sensitivity.
@@ -528,9 +528,9 @@ const Decl *CXXDestructorCall::getRuntimeDefinition() const {
// because a /partially/ constructed object can be referred to through a
// base pointer. We'll eventually want to use DynamicTypeInfo here.
if (const CXXMethodDecl *Devirtualized = devirtualize(MD, getCXXThisVal()))
- return Devirtualized;
+ return RuntimeDefinition(Devirtualized, 0);
- return 0;
+ return RuntimeDefinition();
}
void CXXDestructorCall::getInitialStackFrameContents(
@@ -659,7 +659,7 @@ ObjCMessageKind ObjCMethodCall::getMessageKind() const {
return static_cast<ObjCMessageKind>(Info.getInt());
}
-const Decl *ObjCMethodCall::getRuntimeDefinition() const {
+RuntimeDefinition ObjCMethodCall::getRuntimeDefinition() const {
const ObjCMessageExpr *E = getOriginExpr();
assert(E);
Selector Sel = E->getSelector();
@@ -669,12 +669,16 @@ const Decl *ObjCMethodCall::getRuntimeDefinition() const {
// Find the the receiver type.
const ObjCObjectPointerType *ReceiverT = 0;
QualType SupersType = E->getSuperType();
+ const MemRegion *Receiver = 0;
+
if (!SupersType.isNull()) {
+ // Super always means the type of immediate predecessor to the method
+ // where the call occurs.
ReceiverT = cast<ObjCObjectPointerType>(SupersType);
} else {
- const MemRegion *Receiver = getReceiverSVal().getAsRegion();
+ Receiver = getReceiverSVal().getAsRegion();
if (!Receiver)
- return 0;
+ return RuntimeDefinition();
QualType DynType = getState()->getDynamicTypeInfo(Receiver).getType();
ReceiverT = dyn_cast<ObjCObjectPointerType>(DynType);
@@ -683,7 +687,7 @@ const Decl *ObjCMethodCall::getRuntimeDefinition() const {
// Lookup the method implementation.
if (ReceiverT)
if (ObjCInterfaceDecl *IDecl = ReceiverT->getInterfaceDecl())
- return IDecl->lookupPrivateMethod(Sel);
+ return RuntimeDefinition(IDecl->lookupPrivateMethod(Sel), Receiver);
} else {
// This is a class method.
@@ -691,11 +695,11 @@ const Decl *ObjCMethodCall::getRuntimeDefinition() const {
// class name.
if (ObjCInterfaceDecl *IDecl = E->getReceiverInterface()) {
// Find/Return the method implementation.
- return IDecl->lookupPrivateClassMethod(Sel);
+ return RuntimeDefinition(IDecl->lookupPrivateClassMethod(Sel), 0);
}
}
- return 0;
+ return RuntimeDefinition();
}
void ObjCMethodCall::getInitialStackFrameContents(
diff --git a/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp b/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp
index c1e010073c..9dd100ad77 100644
--- a/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp
+++ b/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp
@@ -11,12 +11,15 @@
//
//===----------------------------------------------------------------------===//
+#define DEBUG_TYPE "ExprEngine"
+
#include "clang/Analysis/Analyses/LiveVariables.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/AST/DeclCXX.h"
#include "llvm/ADT/SmallSet.h"
+#include "llvm/ADT/Statistic.h"
#include "llvm/Support/SaveAndRestore.h"
#define CXX_INLINING_ENABLED 1
@@ -24,6 +27,9 @@
using namespace clang;
using namespace ento;
+STATISTIC(NumOfDynamicDispatchPathSplits,
+ "The # of times we split the path due to imprecise dynamic dispatch info");
+
void ExprEngine::processCallEnter(CallEnter CE, ExplodedNode *Pred) {
// Get the entry block in the CFG of the callee.
const StackFrameContext *calleeCtx = CE.getCalleeContext();
@@ -273,14 +279,27 @@ bool ExprEngine::shouldInlineDecl(const Decl *D, ExplodedNode *Pred) {
return true;
}
-bool ExprEngine::inlineCall(const CallEvent &Call,
- ExplodedNode *Pred) {
- if (!getAnalysisManager().shouldInlineCall())
- return false;
-
- const Decl *D = Call.getRuntimeDefinition();
- if (!D)
- return false;
+/// The GDM component containing the dynamic dispatch bifurcation info. When
+/// the exact type of the receiver is not known, we want to explore both paths -
+/// one on which we do inline it and the other one on which we don't. This is
+/// done to ensure we do not drop coverage.
+/// This is the map from the receiver region to a bool, specifying either we
+/// consider this region's information precise or not along the given path.
+namespace clang {
+namespace ento {
+struct DynamicDispatchBifurcationMap {};
+typedef llvm::ImmutableMap<const MemRegion*,
+ int> DynamicDispatchBifur;
+template<> struct ProgramStateTrait<DynamicDispatchBifurcationMap>
+ : public ProgramStatePartialTrait<DynamicDispatchBifur> {
+ static void *GDMIndex() { static int index; return &index; }
+};
+}}
+
+bool ExprEngine::inlineCall(const CallEvent &Call, const Decl *D,
+ NodeBuilder &Bldr, ExplodedNode *Pred,
+ ProgramStateRef State) {
+ assert(D);
const LocationContext *CurLC = Pred->getLocationContext();
const StackFrameContext *CallerSFC = CurLC->getCurrentStackFrame();
@@ -359,7 +378,8 @@ bool ExprEngine::inlineCall(const CallEvent &Call,
break;
}
case CE_ObjCMessage:
- if (getAnalysisManager().IPAMode != DynamicDispatch)
+ if (!(getAnalysisManager().IPAMode == DynamicDispatch ||
+ getAnalysisManager().IPAMode == DynamicDispatchBifurcate))
return false;
break;
}
@@ -384,7 +404,7 @@ bool ExprEngine::inlineCall(const CallEvent &Call,
// Construct a new state which contains the mapping from actual to
// formal arguments.
- ProgramStateRef State = Pred->getState()->enterStackFrame(Call, CalleeSFC);
+ State = State->enterStackFrame(Call, CalleeSFC);
bool isNew;
if (ExplodedNode *N = G.getNode(Loc, State, false, &isNew)) {
@@ -392,6 +412,11 @@ bool ExprEngine::inlineCall(const CallEvent &Call,
if (isNew)
Engine.getWorkList()->enqueue(N);
}
+
+ // If we decided to inline the call, the successor has been manually
+ // added onto the work list so remove it from the node builder.
+ Bldr.takeNodes(Pred);
+
return true;
}
@@ -491,6 +516,18 @@ ProgramStateRef ExprEngine::bindReturnValue(const CallEvent &Call,
return State->BindExpr(E, LCtx, R);
}
+// Conservatively evaluate call by invalidating regions and binding
+// a conjured return value.
+void ExprEngine::conservativeEvalCall(const CallEvent &Call, NodeBuilder &Bldr,
+ ExplodedNode *Pred, ProgramStateRef State) {
+ unsigned Count = currentBuilderContext->getCurrentBlockCount();
+ State = Call.invalidateRegions(Count, State);
+ State = bindReturnValue(Call, Pred->getLocationContext(), State);
+
+ // And make the result node.
+ Bldr.generateNode(Call.getProgramPoint(), State, Pred);
+}
+
void ExprEngine::defaultEvalCall(NodeBuilder &Bldr, ExplodedNode *Pred,
const CallEvent &CallTemplate) {
// Make sure we have the most recent state attached to the call.
@@ -506,23 +543,67 @@ void ExprEngine::defaultEvalCall(NodeBuilder &Bldr, ExplodedNode *Pred,
if (InlinedFailedState) {
// If we already tried once and failed, make sure we don't retry later.
State = InlinedFailedState;
- } else if (inlineCall(*Call, Pred)) {
- // If we decided to inline the call, the successor has been manually
- // added onto the work list and we should not perform our generic
- // call-handling steps.
- Bldr.takeNodes(Pred);
- return;
+ } else if (getAnalysisManager().shouldInlineCall()) {
+ RuntimeDefinition RD = Call->getRuntimeDefinition();
+ const Decl *D = RD.Decl;
+ if (D) {
+ // Explore with and without inlining the call.
+ const MemRegion *BifurReg = RD.Reg;
+ if (BifurReg &&
+ getAnalysisManager().IPAMode == DynamicDispatchBifurcate) {
+ BifurcateCall(BifurReg, *Call, D, Bldr, Pred);
+ return;
+ } else {
+ // We are not bifurcating and we do have a Decl, so just inline.
+ if (inlineCall(*Call, D, Bldr, Pred, State))
+ return;
+ }
+ }
}
// If we can't inline it, handle the return value and invalidate the regions.
- unsigned Count = currentBuilderContext->getCurrentBlockCount();
- State = Call->invalidateRegions(Count, State);
- State = bindReturnValue(*Call, Pred->getLocationContext(), State);
+ conservativeEvalCall(*Call, Bldr, Pred, State);
+}
- // And make the result node.
- Bldr.generateNode(Call->getProgramPoint(), State, Pred);
+void ExprEngine::BifurcateCall(const MemRegion *BifurReg,
+ const CallEvent &Call, const Decl *D,
+ NodeBuilder &Bldr, ExplodedNode *Pred) {
+ assert(BifurReg);
+
+ // Check if we've performed the split already - note, we only want
+ // to split the path once per memory region.
+ ProgramStateRef State = Pred->getState();
+ DynamicDispatchBifur BM = State->get<DynamicDispatchBifurcationMap>();
+ for (DynamicDispatchBifur::iterator I = BM.begin(),
+ E = BM.end(); I != E; ++I) {
+ if (I->first == BifurReg) {
+ // If we are on "inline path", keep inlining if possible.
+ if (I->second == true)
+ if (inlineCall(Call, D, Bldr, Pred, State))
+ return;
+ // If inline failed, or we are on the path where we assume we
+ // don't have enough info about the receiver to inline, conjure the
+ // return value and invalidate the regions.
+ conservativeEvalCall(Call, Bldr, Pred, State);
+ return;
+ }
+ }
+
+ // If we got here, this is the first time we process a message to this
+ // region, so split the path.
+ ProgramStateRef IState =
+ State->set<DynamicDispatchBifurcationMap>(BifurReg, true);
+ inlineCall(Call, D, Bldr, Pred, IState);
+
+ ProgramStateRef NoIState =
+ State->set<DynamicDispatchBifurcationMap>(BifurReg, false);
+ conservativeEvalCall(Call, Bldr, Pred, NoIState);
+
+ NumOfDynamicDispatchPathSplits++;
+ return;
}
+
void ExprEngine::VisitReturnStmt(const ReturnStmt *RS, ExplodedNode *Pred,
ExplodedNodeSet &Dst) {