aboutsummaryrefslogtreecommitdiff
path: root/lib/StaticAnalyzer
diff options
context:
space:
mode:
Diffstat (limited to 'lib/StaticAnalyzer')
-rw-r--r--lib/StaticAnalyzer/Checkers/AllocationDiagnostics.cpp24
-rw-r--r--lib/StaticAnalyzer/Checkers/AllocationDiagnostics.h31
-rw-r--r--lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp208
-rw-r--r--lib/StaticAnalyzer/Checkers/CMakeLists.txt3
-rw-r--r--lib/StaticAnalyzer/Checkers/CStringChecker.cpp62
-rw-r--r--lib/StaticAnalyzer/Checkers/CStringSyntaxChecker.cpp2
-rw-r--r--lib/StaticAnalyzer/Checkers/CallAndMessageChecker.cpp38
-rw-r--r--lib/StaticAnalyzer/Checkers/CheckSecuritySyntaxOnly.cpp6
-rw-r--r--lib/StaticAnalyzer/Checkers/CheckerDocumentation.cpp12
-rw-r--r--lib/StaticAnalyzer/Checkers/Checkers.td23
-rw-r--r--lib/StaticAnalyzer/Checkers/DeadStoresChecker.cpp14
-rw-r--r--lib/StaticAnalyzer/Checkers/DebugCheckers.cpp2
-rw-r--r--lib/StaticAnalyzer/Checkers/DynamicTypePropagation.cpp18
-rw-r--r--lib/StaticAnalyzer/Checkers/IdempotentOperationChecker.cpp4
-rw-r--r--lib/StaticAnalyzer/Checkers/IvarInvalidationChecker.cpp47
-rw-r--r--lib/StaticAnalyzer/Checkers/MacOSKeychainAPIChecker.cpp3
-rw-r--r--lib/StaticAnalyzer/Checkers/MacOSXAPIChecker.cpp4
-rw-r--r--lib/StaticAnalyzer/Checkers/MallocChecker.cpp790
-rw-r--r--lib/StaticAnalyzer/Checkers/MallocSizeofChecker.cpp2
-rw-r--r--lib/StaticAnalyzer/Checkers/RetainCountChecker.cpp270
-rw-r--r--lib/StaticAnalyzer/Checkers/ReturnUndefChecker.cpp13
-rw-r--r--lib/StaticAnalyzer/Checkers/StreamChecker.cpp3
-rw-r--r--lib/StaticAnalyzer/Checkers/TraversalChecker.cpp24
-rw-r--r--lib/StaticAnalyzer/Checkers/UndefCapturedBlockVarChecker.cpp3
-rw-r--r--lib/StaticAnalyzer/Checkers/UndefinedArraySubscriptChecker.cpp34
-rw-r--r--lib/StaticAnalyzer/Core/APSIntType.cpp31
-rw-r--r--lib/StaticAnalyzer/Core/AnalysisManager.cpp3
-rw-r--r--lib/StaticAnalyzer/Core/AnalyzerOptions.cpp20
-rw-r--r--lib/StaticAnalyzer/Core/BugReporter.cpp1155
-rw-r--r--lib/StaticAnalyzer/Core/BugReporterVisitors.cpp541
-rw-r--r--lib/StaticAnalyzer/Core/CallEvent.cpp96
-rw-r--r--lib/StaticAnalyzer/Core/CheckerManager.cpp23
-rw-r--r--lib/StaticAnalyzer/Core/CoreEngine.cpp18
-rw-r--r--lib/StaticAnalyzer/Core/Environment.cpp42
-rw-r--r--lib/StaticAnalyzer/Core/ExplodedGraph.cpp60
-rw-r--r--lib/StaticAnalyzer/Core/ExprEngine.cpp284
-rw-r--r--lib/StaticAnalyzer/Core/ExprEngineC.cpp98
-rw-r--r--lib/StaticAnalyzer/Core/ExprEngineCXX.cpp164
-rw-r--r--lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp224
-rw-r--r--lib/StaticAnalyzer/Core/FunctionSummary.cpp14
-rw-r--r--lib/StaticAnalyzer/Core/MemRegion.cpp62
-rw-r--r--lib/StaticAnalyzer/Core/PathDiagnostic.cpp92
-rw-r--r--lib/StaticAnalyzer/Core/PlistDiagnostics.cpp2
-rw-r--r--lib/StaticAnalyzer/Core/ProgramState.cpp106
-rw-r--r--lib/StaticAnalyzer/Core/RangeConstraintManager.cpp22
-rw-r--r--lib/StaticAnalyzer/Core/RegionStore.cpp459
-rw-r--r--lib/StaticAnalyzer/Core/SValBuilder.cpp105
-rw-r--r--lib/StaticAnalyzer/Core/SVals.cpp27
-rw-r--r--lib/StaticAnalyzer/Core/SimpleConstraintManager.cpp105
-rw-r--r--lib/StaticAnalyzer/Core/SimpleConstraintManager.h9
-rw-r--r--lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp92
-rw-r--r--lib/StaticAnalyzer/Core/Store.cpp88
-rw-r--r--lib/StaticAnalyzer/Core/SymbolManager.cpp57
-rw-r--r--lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp25
-rw-r--r--lib/StaticAnalyzer/Frontend/CheckerRegistration.cpp5
55 files changed, 3951 insertions, 1718 deletions
diff --git a/lib/StaticAnalyzer/Checkers/AllocationDiagnostics.cpp b/lib/StaticAnalyzer/Checkers/AllocationDiagnostics.cpp
new file mode 100644
index 0000000000..3dec8a58c9
--- /dev/null
+++ b/lib/StaticAnalyzer/Checkers/AllocationDiagnostics.cpp
@@ -0,0 +1,24 @@
+//=- AllocationDiagnostics.cpp - Config options for allocation diags *- C++ -*-//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Declares the configuration functions for leaks/allocation diagnostics.
+//
+//===--------------------------
+
+#include "AllocationDiagnostics.h"
+
+namespace clang {
+namespace ento {
+
+bool shouldIncludeAllocationSiteInLeakDiagnostics(AnalyzerOptions &AOpts) {
+ return AOpts.getBooleanOption("leak-diagnostics-reference-allocation",
+ false);
+}
+
+}}
diff --git a/lib/StaticAnalyzer/Checkers/AllocationDiagnostics.h b/lib/StaticAnalyzer/Checkers/AllocationDiagnostics.h
new file mode 100644
index 0000000000..2b314a3b74
--- /dev/null
+++ b/lib/StaticAnalyzer/Checkers/AllocationDiagnostics.h
@@ -0,0 +1,31 @@
+//=--- AllocationDiagnostics.h - Config options for allocation diags *- C++ -*-//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Declares the configuration functions for leaks/allocation diagnostics.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_SA_LIB_CHECKERS_ALLOC_DIAGS_H
+#define LLVM_CLANG_SA_LIB_CHECKERS_ALLOC_DIAGS_H
+
+#include "clang/StaticAnalyzer/Core/AnalyzerOptions.h"
+
+namespace clang { namespace ento {
+
+/// \brief Returns true if leak diagnostics should directly reference
+/// the allocatin site (where possible).
+///
+/// The default is false.
+///
+bool shouldIncludeAllocationSiteInLeakDiagnostics(AnalyzerOptions &AOpts);
+
+}}
+
+#endif
+
diff --git a/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp b/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp
index fdb6bbbe52..fba14a0fc4 100644
--- a/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp
+++ b/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp
@@ -83,10 +83,6 @@ static FoundationClass findKnownClass(const ObjCInterfaceDecl *ID) {
return result;
}
-static inline bool isNil(SVal X) {
- return X.getAs<loc::ConcreteInt>().hasValue();
-}
-
//===----------------------------------------------------------------------===//
// NilArgChecker - Check for prohibited nil arguments to ObjC method calls.
//===----------------------------------------------------------------------===//
@@ -95,29 +91,66 @@ namespace {
class NilArgChecker : public Checker<check::PreObjCMessage> {
mutable OwningPtr<APIMisuse> BT;
- void WarnNilArg(CheckerContext &C,
- const ObjCMethodCall &msg, unsigned Arg) const;
+ void WarnIfNilArg(CheckerContext &C,
+ const ObjCMethodCall &msg, unsigned Arg,
+ FoundationClass Class,
+ bool CanBeSubscript = false) const;
public:
void checkPreObjCMessage(const ObjCMethodCall &M, CheckerContext &C) const;
};
}
-void NilArgChecker::WarnNilArg(CheckerContext &C,
- const ObjCMethodCall &msg,
- unsigned int Arg) const
-{
+void NilArgChecker::WarnIfNilArg(CheckerContext &C,
+ const ObjCMethodCall &msg,
+ unsigned int Arg,
+ FoundationClass Class,
+ bool CanBeSubscript) const {
+ // Check if the argument is nil.
+ ProgramStateRef State = C.getState();
+ if (!State->isNull(msg.getArgSVal(Arg)).isConstrainedTrue())
+ return;
+
if (!BT)
BT.reset(new APIMisuse("nil argument"));
-
+
if (ExplodedNode *N = C.generateSink()) {
SmallString<128> sbuf;
llvm::raw_svector_ostream os(sbuf);
- os << "Argument to '" << GetReceiverInterfaceName(msg) << "' method '"
- << msg.getSelector().getAsString() << "' cannot be nil";
+
+ if (CanBeSubscript && msg.getMessageKind() == OCM_Subscript) {
+
+ if (Class == FC_NSArray) {
+ os << "Array element cannot be nil";
+ } else if (Class == FC_NSDictionary) {
+ if (Arg == 0) {
+ os << "Value stored into '";
+ os << GetReceiverInterfaceName(msg) << "' cannot be nil";
+ } else {
+ assert(Arg == 1);
+ os << "'"<< GetReceiverInterfaceName(msg) << "' key cannot be nil";
+ }
+ } else
+ llvm_unreachable("Missing foundation class for the subscript expr");
+
+ } else {
+ if (Class == FC_NSDictionary) {
+ if (Arg == 0)
+ os << "Value argument ";
+ else {
+ assert(Arg == 1);
+ os << "Key argument ";
+ }
+ os << "to '" << msg.getSelector().getAsString() << "' cannot be nil";
+ } else {
+ os << "Argument to '" << GetReceiverInterfaceName(msg) << "' method '"
+ << msg.getSelector().getAsString() << "' cannot be nil";
+ }
+ }
BugReport *R = new BugReport(*BT, os.str(), N);
R->addRange(msg.getArgSourceRange(Arg));
+ bugreporter::trackNullOrUndefValue(N, msg.getArgExpr(Arg), *R);
C.emitReport(R);
}
}
@@ -127,8 +160,14 @@ void NilArgChecker::checkPreObjCMessage(const ObjCMethodCall &msg,
const ObjCInterfaceDecl *ID = msg.getReceiverInterface();
if (!ID)
return;
+
+ FoundationClass Class = findKnownClass(ID);
+
+ static const unsigned InvalidArgIndex = UINT_MAX;
+ unsigned Arg = InvalidArgIndex;
+ bool CanBeSubscript = false;
- if (findKnownClass(ID) == FC_NSString) {
+ if (Class == FC_NSString) {
Selector S = msg.getSelector();
if (S.isUnarySelector())
@@ -152,10 +191,58 @@ void NilArgChecker::checkPreObjCMessage(const ObjCMethodCall &msg,
Name == "compare:options:range:locale:" ||
Name == "componentsSeparatedByCharactersInSet:" ||
Name == "initWithFormat:") {
- if (isNil(msg.getArgSVal(0)))
- WarnNilArg(C, msg, 0);
+ Arg = 0;
+ }
+ } else if (Class == FC_NSArray) {
+ Selector S = msg.getSelector();
+
+ if (S.isUnarySelector())
+ return;
+
+ if (S.getNameForSlot(0).equals("addObject")) {
+ Arg = 0;
+ } else if (S.getNameForSlot(0).equals("insertObject") &&
+ S.getNameForSlot(1).equals("atIndex")) {
+ Arg = 0;
+ } else if (S.getNameForSlot(0).equals("replaceObjectAtIndex") &&
+ S.getNameForSlot(1).equals("withObject")) {
+ Arg = 1;
+ } else if (S.getNameForSlot(0).equals("setObject") &&
+ S.getNameForSlot(1).equals("atIndexedSubscript")) {
+ Arg = 0;
+ CanBeSubscript = true;
+ } else if (S.getNameForSlot(0).equals("arrayByAddingObject")) {
+ Arg = 0;
+ }
+ } else if (Class == FC_NSDictionary) {
+ Selector S = msg.getSelector();
+
+ if (S.isUnarySelector())
+ return;
+
+ if (S.getNameForSlot(0).equals("dictionaryWithObject") &&
+ S.getNameForSlot(1).equals("forKey")) {
+ Arg = 0;
+ WarnIfNilArg(C, msg, /* Arg */1, Class);
+ } else if (S.getNameForSlot(0).equals("setObject") &&
+ S.getNameForSlot(1).equals("forKey")) {
+ Arg = 0;
+ WarnIfNilArg(C, msg, /* Arg */1, Class);
+ } else if (S.getNameForSlot(0).equals("setObject") &&
+ S.getNameForSlot(1).equals("forKeyedSubscript")) {
+ CanBeSubscript = true;
+ Arg = 0;
+ WarnIfNilArg(C, msg, /* Arg */1, Class, CanBeSubscript);
+ } else if (S.getNameForSlot(0).equals("removeObjectForKey")) {
+ Arg = 0;
}
}
+
+
+ // If argument is '0', report a warning.
+ if ((Arg != InvalidArgIndex))
+ WarnIfNilArg(C, msg, Arg, Class, CanBeSubscript);
+
}
//===----------------------------------------------------------------------===//
@@ -301,7 +388,7 @@ void CFNumberCreateChecker::checkPreStmt(const CallExpr *CE,
// FIXME: If the pointee isn't an integer type, should we flag a warning?
// People can do weird stuff with pointers.
- if (!T->isIntegerType())
+ if (!T->isIntegralOrEnumerationType())
return;
uint64_t SourceSize = Ctx.getTypeSize(T);
@@ -672,38 +759,81 @@ static bool isKnownNonNilCollectionType(QualType T) {
}
}
-void ObjCLoopChecker::checkPostStmt(const ObjCForCollectionStmt *FCS,
- CheckerContext &C) const {
- ProgramStateRef State = C.getState();
-
- // Check if this is the branch for the end of the loop.
- SVal CollectionSentinel = State->getSVal(FCS, C.getLocationContext());
- if (CollectionSentinel.isZeroConstant())
- return;
-
+/// Assumes that the collection is non-nil.
+///
+/// If the collection is known to be nil, returns NULL to indicate an infeasible
+/// path.
+static ProgramStateRef checkCollectionNonNil(CheckerContext &C,
+ ProgramStateRef State,
+ const ObjCForCollectionStmt *FCS) {
+ if (!State)
+ return NULL;
+
+ SVal CollectionVal = C.getSVal(FCS->getCollection());
+ Optional<DefinedSVal> KnownCollection = CollectionVal.getAs<DefinedSVal>();
+ if (!KnownCollection)
+ return State;
+
+ ProgramStateRef StNonNil, StNil;
+ llvm::tie(StNonNil, StNil) = State->assume(*KnownCollection);
+ if (StNil && !StNonNil) {
+ // The collection is nil. This path is infeasible.
+ return NULL;
+ }
+
+ return StNonNil;
+}
+
+/// Assumes that the collection elements are non-nil.
+///
+/// This only applies if the collection is one of those known not to contain
+/// nil values.
+static ProgramStateRef checkElementNonNil(CheckerContext &C,
+ ProgramStateRef State,
+ const ObjCForCollectionStmt *FCS) {
+ if (!State)
+ return NULL;
+
// See if the collection is one where we /know/ the elements are non-nil.
- const Expr *Collection = FCS->getCollection();
- if (!isKnownNonNilCollectionType(Collection->getType()))
- return;
-
- // FIXME: Copied from ExprEngineObjC.
+ if (!isKnownNonNilCollectionType(FCS->getCollection()->getType()))
+ return State;
+
+ const LocationContext *LCtx = C.getLocationContext();
const Stmt *Element = FCS->getElement();
- SVal ElementVar;
+
+ // FIXME: Copied from ExprEngineObjC.
+ Optional<Loc> ElementLoc;
if (const DeclStmt *DS = dyn_cast<DeclStmt>(Element)) {
const VarDecl *ElemDecl = cast<VarDecl>(DS->getSingleDecl());
assert(ElemDecl->getInit() == 0);
- ElementVar = State->getLValue(ElemDecl, C.getLocationContext());
+ ElementLoc = State->getLValue(ElemDecl, LCtx);
} else {
- ElementVar = State->getSVal(Element, C.getLocationContext());
+ ElementLoc = State->getSVal(Element, LCtx).getAs<Loc>();
}
- if (!ElementVar.getAs<Loc>())
- return;
+ if (!ElementLoc)
+ return State;
// Go ahead and assume the value is non-nil.
- SVal Val = State->getSVal(ElementVar.castAs<Loc>());
- State = State->assume(Val.castAs<DefinedOrUnknownSVal>(), true);
- C.addTransition(State);
+ SVal Val = State->getSVal(*ElementLoc);
+ return State->assume(Val.castAs<DefinedOrUnknownSVal>(), true);
+}
+
+void ObjCLoopChecker::checkPostStmt(const ObjCForCollectionStmt *FCS,
+ CheckerContext &C) const {
+ // Check if this is the branch for the end of the loop.
+ SVal CollectionSentinel = C.getSVal(FCS);
+ if (CollectionSentinel.isZeroConstant())
+ return;
+
+ ProgramStateRef State = C.getState();
+ State = checkCollectionNonNil(C, State, FCS);
+ State = checkElementNonNil(C, State, FCS);
+
+ if (!State)
+ C.generateSink();
+ else if (State != C.getState())
+ C.addTransition(State);
}
namespace {
diff --git a/lib/StaticAnalyzer/Checkers/CMakeLists.txt b/lib/StaticAnalyzer/Checkers/CMakeLists.txt
index b7df10e7ff..7da6825106 100644
--- a/lib/StaticAnalyzer/Checkers/CMakeLists.txt
+++ b/lib/StaticAnalyzer/Checkers/CMakeLists.txt
@@ -4,6 +4,7 @@ clang_tablegen(Checkers.inc -gen-clang-sa-checkers
TARGET ClangSACheckers)
add_clang_library(clangStaticAnalyzerCheckers
+ AllocationDiagnostics.cpp
AnalyzerStatsChecker.cpp
ArrayBoundChecker.cpp
ArrayBoundCheckerV2.cpp
@@ -42,8 +43,8 @@ add_clang_library(clangStaticAnalyzerCheckers
MallocSizeofChecker.cpp
NSAutoreleasePoolChecker.cpp
NSErrorChecker.cpp
- NonNullParamChecker.cpp
NoReturnFunctionChecker.cpp
+ NonNullParamChecker.cpp
ObjCAtSyncChecker.cpp
ObjCContainersASTChecker.cpp
ObjCContainersChecker.cpp
diff --git a/lib/StaticAnalyzer/Checkers/CStringChecker.cpp b/lib/StaticAnalyzer/Checkers/CStringChecker.cpp
index cc55e9f6ec..aa1ca6f2f8 100644
--- a/lib/StaticAnalyzer/Checkers/CStringChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/CStringChecker.cpp
@@ -114,6 +114,8 @@ public:
bool isBounded = false,
bool ignoreCase = false) const;
+ void evalStrsep(CheckerContext &C, const CallExpr *CE) const;
+
// Utility methods
std::pair<ProgramStateRef , ProgramStateRef >
static assumeZero(CheckerContext &C,
@@ -1752,6 +1754,63 @@ void CStringChecker::evalStrcmpCommon(CheckerContext &C, const CallExpr *CE,
C.addTransition(state);
}
+void CStringChecker::evalStrsep(CheckerContext &C, const CallExpr *CE) const {
+ //char *strsep(char **stringp, const char *delim);
+ if (CE->getNumArgs() < 2)
+ return;
+
+ // Sanity: does the search string parameter match the return type?
+ const Expr *SearchStrPtr = CE->getArg(0);
+ QualType CharPtrTy = SearchStrPtr->getType()->getPointeeType();
+ if (CharPtrTy.isNull() ||
+ CE->getType().getUnqualifiedType() != CharPtrTy.getUnqualifiedType())
+ return;
+
+ CurrentFunctionDescription = "strsep()";
+ ProgramStateRef State = C.getState();
+ const LocationContext *LCtx = C.getLocationContext();
+
+ // Check that the search string pointer is non-null (though it may point to
+ // a null string).
+ SVal SearchStrVal = State->getSVal(SearchStrPtr, LCtx);
+ State = checkNonNull(C, State, SearchStrPtr, SearchStrVal);
+ if (!State)
+ return;
+
+ // Check that the delimiter string is non-null.
+ const Expr *DelimStr = CE->getArg(1);
+ SVal DelimStrVal = State->getSVal(DelimStr, LCtx);
+ State = checkNonNull(C, State, DelimStr, DelimStrVal);
+ if (!State)
+ return;
+
+ SValBuilder &SVB = C.getSValBuilder();
+ SVal Result;
+ if (Optional<Loc> SearchStrLoc = SearchStrVal.getAs<Loc>()) {
+ // Get the current value of the search string pointer, as a char*.
+ Result = State->getSVal(*SearchStrLoc, CharPtrTy);
+
+ // Invalidate the search string, representing the change of one delimiter
+ // character to NUL.
+ State = InvalidateBuffer(C, State, SearchStrPtr, Result);
+
+ // Overwrite the search string pointer. The new value is either an address
+ // further along in the same string, or NULL if there are no more tokens.
+ State = State->bindLoc(*SearchStrLoc,
+ SVB.conjureSymbolVal(getTag(), CE, LCtx, CharPtrTy,
+ C.blockCount()));
+ } else {
+ assert(SearchStrVal.isUnknown());
+ // Conjure a symbolic value. It's the best we can do.
+ Result = SVB.conjureSymbolVal(0, CE, LCtx, C.blockCount());
+ }
+
+ // Set the return value, and finish.
+ State = State->BindExpr(CE, LCtx, Result);
+ C.addTransition(State);
+}
+
+
//===----------------------------------------------------------------------===//
// The driver method, and other Checker callbacks.
//===----------------------------------------------------------------------===//
@@ -1762,6 +1821,7 @@ bool CStringChecker::evalCall(const CallExpr *CE, CheckerContext &C) const {
if (!FDecl)
return false;
+ // FIXME: Poorly-factored string switches are slow.
FnCheck evalFunction = 0;
if (C.isCLibraryFunction(FDecl, "memcpy"))
evalFunction = &CStringChecker::evalMemcpy;
@@ -1793,6 +1853,8 @@ bool CStringChecker::evalCall(const CallExpr *CE, CheckerContext &C) const {
evalFunction = &CStringChecker::evalStrcasecmp;
else if (C.isCLibraryFunction(FDecl, "strncasecmp"))
evalFunction = &CStringChecker::evalStrncasecmp;
+ else if (C.isCLibraryFunction(FDecl, "strsep"))
+ evalFunction = &CStringChecker::evalStrsep;
else if (C.isCLibraryFunction(FDecl, "bcopy"))
evalFunction = &CStringChecker::evalBcopy;
else if (C.isCLibraryFunction(FDecl, "bcmp"))
diff --git a/lib/StaticAnalyzer/Checkers/CStringSyntaxChecker.cpp b/lib/StaticAnalyzer/Checkers/CStringSyntaxChecker.cpp
index 3a57a56aea..92c0eef3e8 100644
--- a/lib/StaticAnalyzer/Checkers/CStringSyntaxChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/CStringSyntaxChecker.cpp
@@ -101,6 +101,8 @@ public:
// - strncat(dst, src, sizeof(dst) - 1);
// - strncat(dst, src, sizeof(dst));
bool WalkAST::containsBadStrncatPattern(const CallExpr *CE) {
+ if (CE->getNumArgs() != 3)
+ return false;
const Expr *DstArg = CE->getArg(0);
const Expr *SrcArg = CE->getArg(1);
const Expr *LenArg = CE->getArg(2);
diff --git a/lib/StaticAnalyzer/Checkers/CallAndMessageChecker.cpp b/lib/StaticAnalyzer/Checkers/CallAndMessageChecker.cpp
index 37e203df8e..4965d22996 100644
--- a/lib/StaticAnalyzer/Checkers/CallAndMessageChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/CallAndMessageChecker.cpp
@@ -366,17 +366,23 @@ void CallAndMessageChecker::emitNilReceiverBug(CheckerContext &C,
if (!BT_msg_ret)
BT_msg_ret.reset(
- new BuiltinBug("Receiver in message expression is "
- "'nil' and returns a garbage value"));
+ new BuiltinBug("Receiver in message expression is 'nil'"));
const ObjCMessageExpr *ME = msg.getOriginExpr();
+ QualType ResTy = msg.getResultType();
+
SmallString<200> buf;
llvm::raw_svector_ostream os(buf);
os << "The receiver of message '" << ME->getSelector().getAsString()
- << "' is nil and returns a value of type '";
- msg.getResultType().print(os, C.getLangOpts());
- os << "' that will be garbage";
+ << "' is nil";
+ if (ResTy->isReferenceType()) {
+ os << ", which results in forming a null reference";
+ } else {
+ os << " and returns a value of type '";
+ msg.getResultType().print(os, C.getLangOpts());
+ os << "' that will be garbage";
+ }
BugReport *report = new BugReport(*BT_msg_ret, os.str(), N);
report->addRange(ME->getReceiverRange());
@@ -397,6 +403,7 @@ void CallAndMessageChecker::HandleNilReceiver(CheckerContext &C,
ProgramStateRef state,
const ObjCMethodCall &Msg) const {
ASTContext &Ctx = C.getASTContext();
+ static SimpleProgramPointTag Tag("CallAndMessageChecker : NilReceiver");
// Check the return type of the message expression. A message to nil will
// return different values depending on the return type and the architecture.
@@ -407,7 +414,7 @@ void CallAndMessageChecker::HandleNilReceiver(CheckerContext &C,
if (CanRetTy->isStructureOrClassType()) {
// Structure returns are safe since the compiler zeroes them out.
SVal V = C.getSValBuilder().makeZeroVal(RetTy);
- C.addTransition(state->BindExpr(Msg.getOriginExpr(), LCtx, V));
+ C.addTransition(state->BindExpr(Msg.getOriginExpr(), LCtx, V), &Tag);
return;
}
@@ -418,14 +425,15 @@ void CallAndMessageChecker::HandleNilReceiver(CheckerContext &C,
const uint64_t voidPtrSize = Ctx.getTypeSize(Ctx.VoidPtrTy);
const uint64_t returnTypeSize = Ctx.getTypeSize(CanRetTy);
- if (voidPtrSize < returnTypeSize &&
- !(supportsNilWithFloatRet(Ctx.getTargetInfo().getTriple()) &&
- (Ctx.FloatTy == CanRetTy ||
- Ctx.DoubleTy == CanRetTy ||
- Ctx.LongDoubleTy == CanRetTy ||
- Ctx.LongLongTy == CanRetTy ||
- Ctx.UnsignedLongLongTy == CanRetTy))) {
- if (ExplodedNode *N = C.generateSink(state))
+ if (CanRetTy.getTypePtr()->isReferenceType()||
+ (voidPtrSize < returnTypeSize &&
+ !(supportsNilWithFloatRet(Ctx.getTargetInfo().getTriple()) &&
+ (Ctx.FloatTy == CanRetTy ||
+ Ctx.DoubleTy == CanRetTy ||
+ Ctx.LongDoubleTy == CanRetTy ||
+ Ctx.LongLongTy == CanRetTy ||
+ Ctx.UnsignedLongLongTy == CanRetTy)))) {
+ if (ExplodedNode *N = C.generateSink(state, 0 , &Tag))
emitNilReceiverBug(C, Msg, N);
return;
}
@@ -444,7 +452,7 @@ void CallAndMessageChecker::HandleNilReceiver(CheckerContext &C,
// of this case unless we have *a lot* more knowledge.
//
SVal V = C.getSValBuilder().makeZeroVal(RetTy);
- C.addTransition(state->BindExpr(Msg.getOriginExpr(), LCtx, V));
+ C.addTransition(state->BindExpr(Msg.getOriginExpr(), LCtx, V), &Tag);
return;
}
diff --git a/lib/StaticAnalyzer/Checkers/CheckSecuritySyntaxOnly.cpp b/lib/StaticAnalyzer/Checkers/CheckSecuritySyntaxOnly.cpp
index 7ef13ab538..63080ea230 100644
--- a/lib/StaticAnalyzer/Checkers/CheckSecuritySyntaxOnly.cpp
+++ b/lib/StaticAnalyzer/Checkers/CheckSecuritySyntaxOnly.cpp
@@ -345,7 +345,7 @@ void WalkAST::checkCall_getpw(const CallExpr *CE, const FunctionDecl *FD) {
return;
// Verify the first argument type is integer.
- if (!FPT->getArgType(0)->isIntegerType())
+ if (!FPT->getArgType(0)->isIntegralOrUnscopedEnumerationType())
return;
// Verify the second argument type is char*.
@@ -602,7 +602,7 @@ void WalkAST::checkCall_rand(const CallExpr *CE, const FunctionDecl *FD) {
if (!PT)
return;
- if (! PT->getPointeeType()->isIntegerType())
+ if (! PT->getPointeeType()->isIntegralOrUnscopedEnumerationType())
return;
}
else if (FTP->getNumArgs() != 0)
@@ -725,7 +725,7 @@ void WalkAST::checkUncheckedReturnValue(CallExpr *CE) {
// The arguments must be integers.
for (unsigned i = 0; i < FTP->getNumArgs(); i++)
- if (! FTP->getArgType(i)->isIntegerType())
+ if (! FTP->getArgType(i)->isIntegralOrUnscopedEnumerationType())
return;
// Issue a warning.
diff --git a/lib/StaticAnalyzer/Checkers/CheckerDocumentation.cpp b/lib/StaticAnalyzer/Checkers/CheckerDocumentation.cpp
index ed89788e90..a9dd19a395 100644
--- a/lib/StaticAnalyzer/Checkers/CheckerDocumentation.cpp
+++ b/lib/StaticAnalyzer/Checkers/CheckerDocumentation.cpp
@@ -52,6 +52,7 @@ class CheckerDocumentation : public Checker< check::PreStmt<ReturnStmt>,
check::LiveSymbols,
check::RegionChanges,
check::PointerEscape,
+ check::ConstPointerEscape,
check::Event<ImplicitNullDerefEvent>,
check::ASTDecl<FunctionDecl> > {
public:
@@ -274,6 +275,17 @@ public:
return State;
}
+ /// \brief Called when const pointers escape.
+ ///
+ /// Note: in most cases checkPointerEscape callback is sufficient.
+ /// \sa checkPointerEscape
+ ProgramStateRef checkConstPointerEscape(ProgramStateRef State,
+ const InvalidatedSymbols &Escaped,
+ const CallEvent *Call,
+ PointerEscapeKind Kind) const {
+ return State;
+ }
+
/// check::Event<ImplicitNullDerefEvent>
void checkEvent(ImplicitNullDerefEvent Event) const {}
diff --git a/lib/StaticAnalyzer/Checkers/Checkers.td b/lib/StaticAnalyzer/Checkers/Checkers.td
index bbcf9aa438..fc35b223ee 100644
--- a/lib/StaticAnalyzer/Checkers/Checkers.td
+++ b/lib/StaticAnalyzer/Checkers/Checkers.td
@@ -166,12 +166,24 @@ def ReturnUndefChecker : Checker<"UndefReturn">,
// C++ checkers.
//===----------------------------------------------------------------------===//
+let ParentPackage = Cplusplus in {
+
+def NewDeleteChecker : Checker<"NewDelete">,
+ HelpText<"Check for double-free and use-after-free problems. Traces memory managed by new/delete.">,
+ DescFile<"MallocChecker.cpp">;
+
+} // end: "cplusplus"
+
let ParentPackage = CplusplusAlpha in {
def VirtualCallChecker : Checker<"VirtualCall">,
HelpText<"Check virtual function calls during construction or destruction">,
DescFile<"VirtualCallChecker.cpp">;
+def NewDeleteLeaksChecker : Checker<"NewDeleteLeaks">,
+ HelpText<"Check for memory leaks. Traces memory managed by new/delete.">,
+ DescFile<"MallocChecker.cpp">;
+
} // end: "alpha.cplusplus"
//===----------------------------------------------------------------------===//
@@ -276,12 +288,16 @@ def UnixAPIChecker : Checker<"API">,
DescFile<"UnixAPIChecker.cpp">;
def MallocPessimistic : Checker<"Malloc">,
- HelpText<"Check for memory leaks, double free, and use-after-free problems.">,
+ HelpText<"Check for memory leaks, double free, and use-after-free problems. Traces memory managed by malloc()/free().">,
DescFile<"MallocChecker.cpp">;
def MallocSizeofChecker : Checker<"MallocSizeof">,
HelpText<"Check for dubious malloc arguments involving sizeof">,
DescFile<"MallocSizeofChecker.cpp">;
+
+def MismatchedDeallocatorChecker : Checker<"MismatchedDeallocator">,
+ HelpText<"Check for mismatched deallocators.">,
+ DescFile<"MallocChecker.cpp">;
} // end "unix"
@@ -292,7 +308,7 @@ def ChrootChecker : Checker<"Chroot">,
DescFile<"ChrootChecker.cpp">;
def MallocOptimistic : Checker<"MallocWithAnnotations">,
- HelpText<"Check for memory leaks, double free, and use-after-free problems. Assumes that all user-defined functions which might free a pointer are annotated.">,
+ HelpText<"Check for memory leaks, double free, and use-after-free problems. Traces memory managed by malloc()/free(). Assumes that all user-defined functions which might free a pointer are annotated.">,
DescFile<"MallocChecker.cpp">;
def PthreadLockChecker : Checker<"PthreadLock">,
@@ -343,7 +359,7 @@ let ParentPackage = OSX in {
def MacOSXAPIChecker : Checker<"API">,
InPackage<OSX>,
- HelpText<"Check for proper uses of various Mac OS X APIs">,
+ HelpText<"Check for proper uses of various Apple APIs">,
DescFile<"MacOSXAPIChecker.cpp">;
def MacOSKeychainAPIChecker : Checker<"SecKeychainAPI">,
@@ -523,4 +539,3 @@ def ExprInspectionChecker : Checker<"ExprInspection">,
DescFile<"ExprInspectionChecker.cpp">;
} // end "debug"
-
diff --git a/lib/StaticAnalyzer/Checkers/DeadStoresChecker.cpp b/lib/StaticAnalyzer/Checkers/DeadStoresChecker.cpp
index f2e3e6d781..f336a6e680 100644
--- a/lib/StaticAnalyzer/Checkers/DeadStoresChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/DeadStoresChecker.cpp
@@ -101,7 +101,8 @@ void ReachableCode::computeReachableBlocks() {
}
}
-static const Expr *LookThroughTransitiveAssignments(const Expr *Ex) {
+static const Expr *
+LookThroughTransitiveAssignmentsAndCommaOperators(const Expr *Ex) {
while (Ex) {
const BinaryOperator *BO =
dyn_cast<BinaryOperator>(Ex->IgnoreParenCasts());
@@ -111,6 +112,10 @@ static const Expr *LookThroughTransitiveAssignments(const Expr *Ex) {
Ex = BO->getRHS();
continue;
}
+ if (BO->getOpcode() == BO_Comma) {
+ Ex = BO->getRHS();
+ continue;
+ }
break;
}
return Ex;
@@ -266,7 +271,9 @@ public:
if (VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) {
// Special case: check for assigning null to a pointer.
// This is a common form of defensive programming.
- const Expr *RHS = LookThroughTransitiveAssignments(B->getRHS());
+ const Expr *RHS =
+ LookThroughTransitiveAssignmentsAndCommaOperators(B->getRHS());
+ RHS = RHS->IgnoreParenCasts();
QualType T = VD->getType();
if (T->isPointerType() || T->isObjCObjectPointerType()) {
@@ -274,7 +281,6 @@ public:
return;
}
- RHS = RHS->IgnoreParenCasts();
// Special case: self-assignments. These are often used to shut up
// "unused variable" compiler warnings.
if (const DeclRefExpr *RhsDR = dyn_cast<DeclRefExpr>(RHS))
@@ -326,7 +332,7 @@ public:
// Look through transitive assignments, e.g.:
// int x = y = 0;
- E = LookThroughTransitiveAssignments(E);
+ E = LookThroughTransitiveAssignmentsAndCommaOperators(E);
// Don't warn on C++ objects (yet) until we can show that their
// constructors/destructors don't have side effects.
diff --git a/lib/StaticAnalyzer/Checkers/DebugCheckers.cpp b/lib/StaticAnalyzer/Checkers/DebugCheckers.cpp
index 29b4a637cd..fe12866e51 100644
--- a/lib/StaticAnalyzer/Checkers/DebugCheckers.cpp
+++ b/lib/StaticAnalyzer/Checkers/DebugCheckers.cpp
@@ -7,7 +7,7 @@
//
//===----------------------------------------------------------------------===//
//
-// This file defines a checkers that display debugging information.
+// This file defines checkers that display debugging information.
//
//===----------------------------------------------------------------------===//
diff --git a/lib/StaticAnalyzer/Checkers/DynamicTypePropagation.cpp b/lib/StaticAnalyzer/Checkers/DynamicTypePropagation.cpp
index 9f176a4b5b..759aa6605e 100644
--- a/lib/StaticAnalyzer/Checkers/DynamicTypePropagation.cpp
+++ b/lib/StaticAnalyzer/Checkers/DynamicTypePropagation.cpp
@@ -27,7 +27,8 @@ namespace {
class DynamicTypePropagation:
public Checker< check::PreCall,
check::PostCall,
- check::PostStmt<ImplicitCastExpr> > {
+ check::PostStmt<ImplicitCastExpr>,
+ check::PostStmt<CXXNewExpr> > {
const ObjCObjectType *getObjectTypeForAllocAndNew(const ObjCMessageExpr *MsgE,
CheckerContext &C) const;
@@ -38,6 +39,7 @@ public:
void checkPreCall(const CallEvent &Call, CheckerContext &C) const;
void checkPostCall(const CallEvent &Call, CheckerContext &C) const;
void checkPostStmt(const ImplicitCastExpr *CastE, CheckerContext &C) const;
+ void checkPostStmt(const CXXNewExpr *NewE, CheckerContext &C) const;
};
}
@@ -190,6 +192,20 @@ void DynamicTypePropagation::checkPostStmt(const ImplicitCastExpr *CastE,
return;
}
+void DynamicTypePropagation::checkPostStmt(const CXXNewExpr *NewE,
+ CheckerContext &C) const {
+ if (NewE->isArray())
+ return;
+
+ // We only track dynamic type info for regions.
+ const MemRegion *MR = C.getSVal(NewE).getAsRegion();
+ if (!MR)
+ return;
+
+ C.addTransition(C.getState()->setDynamicTypeInfo(MR, NewE->getType(),
+ /*CanBeSubclass=*/false));
+}
+
const ObjCObjectType *
DynamicTypePropagation::getObjectTypeForAllocAndNew(const ObjCMessageExpr *MsgE,
CheckerContext &C) const {
diff --git a/lib/StaticAnalyzer/Checkers/IdempotentOperationChecker.cpp b/lib/StaticAnalyzer/Checkers/IdempotentOperationChecker.cpp
index 579ba9cf80..271ba4702c 100644
--- a/lib/StaticAnalyzer/Checkers/IdempotentOperationChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/IdempotentOperationChecker.cpp
@@ -423,12 +423,12 @@ void IdempotentOperationChecker::checkEndAnalysis(ExplodedGraph &G,
if (LHSRelevant) {
const Expr *LHS = i->first->getLHS();
report->addRange(LHS->getSourceRange());
- FindLastStoreBRVisitor::registerStatementVarDecls(*report, LHS);
+ FindLastStoreBRVisitor::registerStatementVarDecls(*report, LHS, false);
}
if (RHSRelevant) {
const Expr *RHS = i->first->getRHS();
report->addRange(i->first->getRHS()->getSourceRange());
- FindLastStoreBRVisitor::registerStatementVarDecls(*report, RHS);
+ FindLastStoreBRVisitor::registerStatementVarDecls(*report, RHS, false);
}
BR.emitReport(report);
diff --git a/lib/StaticAnalyzer/Checkers/IvarInvalidationChecker.cpp b/lib/StaticAnalyzer/Checkers/IvarInvalidationChecker.cpp
index 5ed28e955d..cc940be7b1 100644
--- a/lib/StaticAnalyzer/Checkers/IvarInvalidationChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/IvarInvalidationChecker.cpp
@@ -437,6 +437,7 @@ visit(const ObjCImplementationDecl *ImplD) const {
// Remove ivars invalidated by the partial invalidation methods. They do not
// need to be invalidated in the regular invalidation methods.
+ bool AtImplementationContainsAtLeastOnePartialInvalidationMethod = false;
for (MethodSet::iterator
I = PartialInfo.InvalidationMethods.begin(),
E = PartialInfo.InvalidationMethods.end(); I != E; ++I) {
@@ -446,6 +447,8 @@ visit(const ObjCImplementationDecl *ImplD) const {
const ObjCMethodDecl *D = ImplD->getMethod(InterfD->getSelector(),
InterfD->isInstanceMethod());
if (D && D->hasBody()) {
+ AtImplementationContainsAtLeastOnePartialInvalidationMethod = true;
+
bool CalledAnotherInvalidationMethod = false;
// The MethodCrowler is going to remove the invalidated ivars.
MethodCrawler(Ivars,
@@ -471,7 +474,7 @@ visit(const ObjCImplementationDecl *ImplD) const {
containsInvalidationMethod(InterfaceD, Info, /*LookForPartial*/ false);
// Report an error in case none of the invalidation methods are declared.
- if (!Info.needsInvalidation()) {
+ if (!Info.needsInvalidation() && !PartialInfo.needsInvalidation()) {
if (Filter.check_MissingInvalidationMethod)
reportNoInvalidationMethod(FirstIvarDecl, IvarToPopertyMap, InterfaceD,
/*MissingDeclaration*/ true);
@@ -520,9 +523,19 @@ visit(const ObjCImplementationDecl *ImplD) const {
}
// Report an error in case none of the invalidation methods are implemented.
- if (!AtImplementationContainsAtLeastOneInvalidationMethod)
- reportNoInvalidationMethod(FirstIvarDecl, IvarToPopertyMap, InterfaceD,
- /*MissingDeclaration*/ false);
+ if (!AtImplementationContainsAtLeastOneInvalidationMethod) {
+ if (AtImplementationContainsAtLeastOnePartialInvalidationMethod) {
+ // Warn on the ivars that were not invalidated by the prrtial
+ // invalidation methods.
+ for (IvarSet::const_iterator
+ I = Ivars.begin(), E = Ivars.end(); I != E; ++I)
+ reportIvarNeedsInvalidation(I->first, IvarToPopertyMap, 0);
+ } else {
+ // Otherwise, no invalidation methods were implemented.
+ reportNoInvalidationMethod(FirstIvarDecl, IvarToPopertyMap, InterfaceD,
+ /*MissingDeclaration*/ false);
+ }
+ }
}
void IvarInvalidationCheckerImpl::
@@ -551,19 +564,27 @@ reportNoInvalidationMethod(const ObjCIvarDecl *FirstIvarDecl,
void IvarInvalidationCheckerImpl::
reportIvarNeedsInvalidation(const ObjCIvarDecl *IvarD,
- const IvarToPropMapTy &IvarToPopertyMap,
- const ObjCMethodDecl *MethodD) const {
+ const IvarToPropMapTy &IvarToPopertyMap,
+ const ObjCMethodDecl *MethodD) const {
SmallString<128> sbuf;
llvm::raw_svector_ostream os(sbuf);
printIvar(os, IvarD, IvarToPopertyMap);
os << "needs to be invalidated or set to nil";
- PathDiagnosticLocation MethodDecLocation =
- PathDiagnosticLocation::createEnd(MethodD->getBody(),
- BR.getSourceManager(),
- Mgr.getAnalysisDeclContext(MethodD));
- BR.EmitBasicReport(MethodD, "Incomplete invalidation",
- categories::CoreFoundationObjectiveC, os.str(),
- MethodDecLocation);
+ if (MethodD) {
+ PathDiagnosticLocation MethodDecLocation =
+ PathDiagnosticLocation::createEnd(MethodD->getBody(),
+ BR.getSourceManager(),
+ Mgr.getAnalysisDeclContext(MethodD));
+ BR.EmitBasicReport(MethodD, "Incomplete invalidation",
+ categories::CoreFoundationObjectiveC, os.str(),
+ MethodDecLocation);
+ } else {
+ BR.EmitBasicReport(IvarD, "Incomplete invalidation",
+ categories::CoreFoundationObjectiveC, os.str(),
+ PathDiagnosticLocation::createBegin(IvarD,
+ BR.getSourceManager()));
+
+ }
}
void IvarInvalidationCheckerImpl::MethodCrawler::markInvalidated(
diff --git a/lib/StaticAnalyzer/Checkers/MacOSKeychainAPIChecker.cpp b/lib/StaticAnalyzer/Checkers/MacOSKeychainAPIChecker.cpp
index 2cd4afe718..f1f06c798c 100644
--- a/lib/StaticAnalyzer/Checkers/MacOSKeychainAPIChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/MacOSKeychainAPIChecker.cpp
@@ -91,7 +91,8 @@ private:
inline void initBugType() const {
if (!BT)
- BT.reset(new BugType("Improper use of SecKeychain API", "Mac OS API"));
+ BT.reset(new BugType("Improper use of SecKeychain API",
+ "API Misuse (Apple)"));
}
void generateDeallocatorMismatchReport(const AllocationPair &AP,
diff --git a/lib/StaticAnalyzer/Checkers/MacOSXAPIChecker.cpp b/lib/StaticAnalyzer/Checkers/MacOSXAPIChecker.cpp
index a76b3d7a7e..32ebb51226 100644
--- a/lib/StaticAnalyzer/Checkers/MacOSXAPIChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/MacOSXAPIChecker.cpp
@@ -8,7 +8,7 @@
//===----------------------------------------------------------------------===//
//
// This defines MacOSXAPIChecker, which is an assortment of checks on calls
-// to various, widely used Mac OS X functions.
+// to various, widely used Apple APIs.
//
// FIXME: What's currently in BasicObjCFoundationChecks.cpp should be migrated
// to here, using the new Checker interface.
@@ -68,7 +68,7 @@ void MacOSXAPIChecker::CheckDispatchOnce(CheckerContext &C, const CallExpr *CE,
if (!BT_dispatchOnce)
BT_dispatchOnce.reset(new BugType("Improper use of 'dispatch_once'",
- "Mac OS X API"));
+ "API Misuse (Apple)"));
// Handle _dispatch_once. In some versions of the OS X SDK we have the case
// that dispatch_once is a macro that wraps a call to _dispatch_once.
diff --git a/lib/StaticAnalyzer/Checkers/MallocChecker.cpp b/lib/StaticAnalyzer/Checkers/MallocChecker.cpp
index 28a2999f04..5d3eb65148 100644
--- a/lib/StaticAnalyzer/Checkers/MallocChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/MallocChecker.cpp
@@ -35,6 +35,14 @@ using namespace ento;
namespace {
+// Used to check correspondence between allocators and deallocators.
+enum AllocationFamily {
+ AF_None,
+ AF_Malloc,
+ AF_CXXNew,
+ AF_CXXNewArray
+};
+
class RefState {
enum Kind { // Reference to allocated memory.
Allocated,
@@ -42,33 +50,53 @@ class RefState {
Released,
// The responsibility for freeing resources has transfered from
// this reference. A relinquished symbol should not be freed.
- Relinquished } K;
+ Relinquished,
+ // We are no longer guaranteed to have observed all manipulations
+ // of this pointer/memory. For example, it could have been
+ // passed as a parameter to an opaque function.
+ Escaped
+ };
+
const Stmt *S;
+ unsigned K : 2; // Kind enum, but stored as a bitfield.
+ unsigned Family : 30; // Rest of 32-bit word, currently just an allocation
+ // family.
+ RefState(Kind k, const Stmt *s, unsigned family)
+ : S(s), K(k), Family(family) {
+ assert(family != AF_None);
+ }
public:
- RefState(Kind k, const Stmt *s) : K(k), S(s) {}
-
bool isAllocated() const { return K == Allocated; }
bool isReleased() const { return K == Released; }
bool isRelinquished() const { return K == Relinquished; }
-
+ bool isEscaped() const { return K == Escaped; }
+ AllocationFamily getAllocationFamily() const {
+ return (AllocationFamily)Family;
+ }
const Stmt *getStmt() const { return S; }
bool operator==(const RefState &X) const {
- return K == X.K && S == X.S;
+ return K == X.K && S == X.S && Family == X.Family;
}
- static RefState getAllocated(const Stmt *s) {
- return RefState(Allocated, s);
+ static RefState getAllocated(unsigned family, const Stmt *s) {
+ return RefState(Allocated, s, family);
+ }
+ static RefState getReleased(unsigned family, const Stmt *s) {
+ return RefState(Released, s, family);
}
- static RefState getReleased(const Stmt *s) { return RefState(Released, s); }
- static RefState getRelinquished(const Stmt *s) {
- return RefState(Relinquished, s);
+ static RefState getRelinquished(unsigned family, const Stmt *s) {
+ return RefState(Relinquished, s, family);
+ }
+ static RefState getEscaped(const RefState *RS) {
+ return RefState(Escaped, RS->getStmt(), RS->getAllocationFamily());
}
void Profile(llvm::FoldingSetNodeID &ID) const {
ID.AddInteger(K);
ID.AddPointer(S);
+ ID.AddInteger(Family);
}
void dump(raw_ostream &OS) const {
@@ -117,9 +145,12 @@ typedef std::pair<const ExplodedNode*, const MemRegion*> LeakInfo;
class MallocChecker : public Checker<check::DeadSymbols,
check::PointerEscape,
+ check::ConstPointerEscape,
check::PreStmt<ReturnStmt>,
- check::PreStmt<CallExpr>,
+ check::PreCall,
check::PostStmt<CallExpr>,
+ check::PostStmt<CXXNewExpr>,
+ check::PreStmt<CXXDeleteExpr>,
check::PostStmt<BlockExpr>,
check::PostObjCMessage,
check::Location,
@@ -129,6 +160,7 @@ class MallocChecker : public Checker<check::DeadSymbols,
mutable OwningPtr<BugType> BT_Leak;
mutable OwningPtr<BugType> BT_UseFree;
mutable OwningPtr<BugType> BT_BadFree;
+ mutable OwningPtr<BugType> BT_MismatchedDealloc;
mutable OwningPtr<BugType> BT_OffsetFree;
mutable IdentifierInfo *II_malloc, *II_free, *II_realloc, *II_calloc,
*II_valloc, *II_reallocf, *II_strndup, *II_strdup;
@@ -142,12 +174,17 @@ public:
struct ChecksFilter {
DefaultBool CMallocPessimistic;
DefaultBool CMallocOptimistic;
+ DefaultBool CNewDeleteChecker;
+ DefaultBool CNewDeleteLeaksChecker;
+ DefaultBool CMismatchedDeallocatorChecker;
};
ChecksFilter Filter;
- void checkPreStmt(const CallExpr *S, CheckerContext &C) const;
+ void checkPreCall(const CallEvent &Call, CheckerContext &C) const;
void checkPostStmt(const CallExpr *CE, CheckerContext &C) const;
+ void checkPostStmt(const CXXNewExpr *NE, CheckerContext &C) const;
+ void checkPreStmt(const CXXDeleteExpr *DE, CheckerContext &C) const;
void checkPostObjCMessage(const ObjCMethodCall &Call, CheckerContext &C) const;
void checkPostStmt(const BlockExpr *BE, CheckerContext &C) const;
void checkDeadSymbols(SymbolReaper &SymReaper, CheckerContext &C) const;
@@ -161,6 +198,10 @@ public:
const InvalidatedSymbols &Escaped,
const CallEvent *Call,
PointerEscapeKind Kind) const;
+ ProgramStateRef checkConstPointerEscape(ProgramStateRef State,
+ const InvalidatedSymbols &Escaped,
+ const CallEvent *Call,
+ PointerEscapeKind Kind) const;
void printState(raw_ostream &Out, ProgramStateRef State,
const char *NL, const char *Sep) const;
@@ -168,32 +209,52 @@ public:
private:
void initIdentifierInfo(ASTContext &C) const;
+ /// \brief Determine family of a deallocation expression.
+ AllocationFamily getAllocationFamily(CheckerContext &C, const Stmt *S) const;
+
+ /// \brief Print names of allocators and deallocators.
+ ///
+ /// \returns true on success.
+ bool printAllocDeallocName(raw_ostream &os, CheckerContext &C,
+ const Expr *E) const;
+
+ /// \brief Print expected name of an allocator based on the deallocator's
+ /// family derived from the DeallocExpr.
+ void printExpectedAllocName(raw_ostream &os, CheckerContext &C,
+ const Expr *DeallocExpr) const;
+ /// \brief Print expected name of a deallocator based on the allocator's
+ /// family.
+ void printExpectedDeallocName(raw_ostream &os, AllocationFamily Family) const;
+
///@{
/// Check if this is one of the functions which can allocate/reallocate memory
/// pointed to by one of its arguments.
bool isMemFunction(const FunctionDecl *FD, ASTContext &C) const;
bool isFreeFunction(const FunctionDecl *FD, ASTContext &C) const;
bool isAllocationFunction(const FunctionDecl *FD, ASTContext &C) const;
+ bool isStandardNewDelete(const FunctionDecl *FD, ASTContext &C) const;
///@}
static ProgramStateRef MallocMemReturnsAttr(CheckerContext &C,
const CallExpr *CE,
const OwnershipAttr* Att);
static ProgramStateRef MallocMemAux(CheckerContext &C, const CallExpr *CE,
const Expr *SizeEx, SVal Init,
- ProgramStateRef state) {
+ ProgramStateRef State,
+ AllocationFamily Family = AF_Malloc) {
return MallocMemAux(C, CE,
- state->getSVal(SizeEx, C.getLocationContext()),
- Init, state);
+ State->getSVal(SizeEx, C.getLocationContext()),
+ Init, State, Family);
}
static ProgramStateRef MallocMemAux(CheckerContext &C, const CallExpr *CE,
SVal SizeEx, SVal Init,
- ProgramStateRef state);
+ ProgramStateRef State,
+ AllocationFamily Family = AF_Malloc);
/// Update the RefState to reflect the new memory allocation.
- static ProgramStateRef MallocUpdateRefState(CheckerContext &C,
- const CallExpr *CE,
- ProgramStateRef state);
+ static ProgramStateRef
+ MallocUpdateRefState(CheckerContext &C, const Expr *E, ProgramStateRef State,
+ AllocationFamily Family = AF_Malloc);
ProgramStateRef FreeMemAttr(CheckerContext &C, const CallExpr *CE,
const OwnershipAttr* Att) const;
@@ -216,8 +277,7 @@ private:
///\brief Check if the memory associated with this symbol was released.
bool isReleased(SymbolRef Sym, CheckerContext &C) const;
- bool checkUseAfterFree(SymbolRef Sym, CheckerContext &C,
- const Stmt *S = 0) const;
+ bool checkUseAfterFree(SymbolRef Sym, CheckerContext &C, const Stmt *S) const;
/// Check if the function is known not to free memory, or if it is
/// "interesting" and should be modeled explicitly.
@@ -227,10 +287,34 @@ private:
bool doesNotFreeMemOrInteresting(const CallEvent *Call,
ProgramStateRef State) const;
+ // Implementation of the checkPointerEscape callabcks.
+ ProgramStateRef checkPointerEscapeAux(ProgramStateRef State,
+ const InvalidatedSymbols &Escaped,
+ const CallEvent *Call,
+ PointerEscapeKind Kind,
+ bool(*CheckRefState)(const RefState*)) const;
+
+ ///@{
+ /// Tells if a given family/call/symbol is tracked by the current checker.
+ bool isTrackedByCurrentChecker(AllocationFamily Family) const;
+ bool isTrackedByCurrentChecker(CheckerContext &C,
+ const Stmt *AllocDeallocStmt) const;
+ bool isTrackedByCurrentChecker(CheckerContext &C, SymbolRef Sym) const;
+ ///@}
static bool SummarizeValue(raw_ostream &os, SVal V);
static bool SummarizeRegion(raw_ostream &os, const MemRegion *MR);
- void ReportBadFree(CheckerContext &C, SVal ArgVal, SourceRange range) const;
- void ReportOffsetFree(CheckerContext &C, SVal ArgVal, SourceRange Range)const;
+ void ReportBadFree(CheckerContext &C, SVal ArgVal, SourceRange Range,
+ const Expr *DeallocExpr) const;
+ void ReportMismatchedDealloc(CheckerContext &C, SourceRange Range,
+ const Expr *DeallocExpr, const RefState *RS,
+ SymbolRef Sym) const;
+ void ReportOffsetFree(CheckerContext &C, SVal ArgVal, SourceRange Range,
+ const Expr *DeallocExpr,
+ const Expr *AllocExpr = 0) const;
+ void ReportUseAfterFree(CheckerContext &C, SourceRange Range,
+ SymbolRef Sym) const;
+ void ReportDoubleFree(CheckerContext &C, SourceRange Range, bool Released,
+ SymbolRef Sym, SymbolRef PrevSym) const;
/// Find the location of the allocation for Sym on the path leading to the
/// exploded node N.
@@ -275,14 +359,14 @@ private:
inline bool isAllocated(const RefState *S, const RefState *SPrev,
const Stmt *Stmt) {
// Did not track -> allocated. Other state (released) -> allocated.
- return (Stmt && isa<CallExpr>(Stmt) &&
+ return (Stmt && (isa<CallExpr>(Stmt) || isa<CXXNewExpr>(Stmt)) &&
(S && S->isAllocated()) && (!SPrev || !SPrev->isAllocated()));
}
inline bool isReleased(const RefState *S, const RefState *SPrev,
const Stmt *Stmt) {
// Did not track -> released. Other state (allocated) -> released.
- return (Stmt && isa<CallExpr>(Stmt) &&
+ return (Stmt && (isa<CallExpr>(Stmt) || isa<CXXDeleteExpr>(Stmt)) &&
(S && S->isReleased()) && (!SPrev || !SPrev->isReleased()));
}
@@ -392,6 +476,9 @@ bool MallocChecker::isMemFunction(const FunctionDecl *FD, ASTContext &C) const {
if (isAllocationFunction(FD, C))
return true;
+ if (isStandardNewDelete(FD, C))
+ return true;
+
return false;
}
@@ -443,6 +530,39 @@ bool MallocChecker::isFreeFunction(const FunctionDecl *FD, ASTContext &C) const
return false;
}
+// Tells if the callee is one of the following:
+// 1) A global non-placement new/delete operator function.
+// 2) A global placement operator function with the single placement argument
+// of type std::nothrow_t.
+bool MallocChecker::isStandardNewDelete(const FunctionDecl *FD,
+ ASTContext &C) const {
+ if (!FD)
+ return false;
+
+ OverloadedOperatorKind Kind = FD->getOverloadedOperator();
+ if (Kind != OO_New && Kind != OO_Array_New &&
+ Kind != OO_Delete && Kind != OO_Array_Delete)
+ return false;
+
+ // Skip all operator new/delete methods.
+ if (isa<CXXMethodDecl>(FD))
+ return false;
+
+ // Return true if tested operator is a standard placement nothrow operator.
+ if (FD->getNumParams() == 2) {
+ QualType T = FD->getParamDecl(1)->getType();
+ if (const IdentifierInfo *II = T.getBaseTypeIdentifier())
+ return II->getName().equals("nothrow_t");
+ }
+
+ // Skip placement operators.
+ if (FD->getNumParams() != 1 || FD->isVariadic())
+ return false;
+
+ // One of the standard new/new[]/delete/delete[] non-placement operators.
+ return true;
+}
+
void MallocChecker::checkPostStmt(const CallExpr *CE, CheckerContext &C) const {
if (C.wasInlined)
return;
@@ -475,9 +595,26 @@ void MallocChecker::checkPostStmt(const CallExpr *CE, CheckerContext &C) const {
} else if (FunI == II_strndup) {
State = MallocUpdateRefState(C, CE, State);
}
+ else if (isStandardNewDelete(FD, C.getASTContext())) {
+ // Process direct calls to operator new/new[]/delete/delete[] functions
+ // as distinct from new/new[]/delete/delete[] expressions that are
+ // processed by the checkPostStmt callbacks for CXXNewExpr and
+ // CXXDeleteExpr.
+ OverloadedOperatorKind K = FD->getOverloadedOperator();
+ if (K == OO_New)
+ State = MallocMemAux(C, CE, CE->getArg(0), UndefinedVal(), State,
+ AF_CXXNew);
+ else if (K == OO_Array_New)
+ State = MallocMemAux(C, CE, CE->getArg(0), UndefinedVal(), State,
+ AF_CXXNewArray);
+ else if (K == OO_Delete || K == OO_Array_Delete)
+ State = FreeMemAux(C, CE, State, 0, false, ReleasedAllocatedMemory);
+ else
+ llvm_unreachable("not a new/delete operator");
+ }
}
- if (Filter.CMallocOptimistic) {
+ if (Filter.CMallocOptimistic || Filter.CMismatchedDeallocatorChecker) {
// Check all the attributes, if there are any.
// There can be multiple of these attributes.
if (FD->hasAttrs())
@@ -499,6 +636,46 @@ void MallocChecker::checkPostStmt(const CallExpr *CE, CheckerContext &C) const {
C.addTransition(State);
}
+void MallocChecker::checkPostStmt(const CXXNewExpr *NE,
+ CheckerContext &C) const {
+
+ if (NE->getNumPlacementArgs())
+ for (CXXNewExpr::const_arg_iterator I = NE->placement_arg_begin(),
+ E = NE->placement_arg_end(); I != E; ++I)
+ if (SymbolRef Sym = C.getSVal(*I).getAsSymbol())
+ checkUseAfterFree(Sym, C, *I);
+
+ if (!isStandardNewDelete(NE->getOperatorNew(), C.getASTContext()))
+ return;
+
+ ProgramStateRef State = C.getState();
+ // The return value from operator new is bound to a specified initialization
+ // value (if any) and we don't want to loose this value. So we call
+ // MallocUpdateRefState() instead of MallocMemAux() which breakes the
+ // existing binding.
+ State = MallocUpdateRefState(C, NE, State, NE->isArray() ? AF_CXXNewArray
+ : AF_CXXNew);
+ C.addTransition(State);
+}
+
+void MallocChecker::checkPreStmt(const CXXDeleteExpr *DE,
+ CheckerContext &C) const {
+
+ if (!Filter.CNewDeleteChecker)
+ if (SymbolRef Sym = C.getSVal(DE->getArgument()).getAsSymbol())
+ checkUseAfterFree(Sym, C, DE->getArgument());
+
+ if (!isStandardNewDelete(DE->getOperatorDelete(), C.getASTContext()))
+ return;
+
+ ProgramStateRef State = C.getState();
+ bool ReleasedAllocated;
+ State = FreeMemAux(C, DE->getArgument(), DE, State,
+ /*Hold*/false, ReleasedAllocated);
+
+ C.addTransition(State);
+}
+
static bool isKnownDeallocObjCMethodName(const ObjCMethodCall &Call) {
// If the first selector piece is one of the names below, assume that the
// object takes ownership of the memory, promising to eventually deallocate it
@@ -562,7 +739,8 @@ ProgramStateRef MallocChecker::MallocMemReturnsAttr(CheckerContext &C,
ProgramStateRef MallocChecker::MallocMemAux(CheckerContext &C,
const CallExpr *CE,
SVal Size, SVal Init,
- ProgramStateRef state) {
+ ProgramStateRef State,
+ AllocationFamily Family) {
// Bind the return value to the symbolic value from the heap region.
// TODO: We could rewrite post visit to eval call; 'malloc' does not have
@@ -572,14 +750,14 @@ ProgramStateRef MallocChecker::MallocMemAux(CheckerContext &C,
const LocationContext *LCtx = C.getPredecessor()->getLocationContext();
DefinedSVal RetVal = svalBuilder.getConjuredHeapSymbolVal(CE, LCtx, Count)
.castAs<DefinedSVal>();
- state = state->BindExpr(CE, C.getLocationContext(), RetVal);
+ State = State->BindExpr(CE, C.getLocationContext(), RetVal);
// We expect the malloc functions to return a pointer.
if (!RetVal.getAs<Loc>())
return 0;
// Fill the region with the initialization value.
- state = state->bindDefault(RetVal, Init);
+ State = State->bindDefault(RetVal, Init);
// Set the region's extent equal to the Size parameter.
const SymbolicRegion *R =
@@ -591,20 +769,21 @@ ProgramStateRef MallocChecker::MallocMemAux(CheckerContext &C,
SValBuilder &svalBuilder = C.getSValBuilder();
DefinedOrUnknownSVal Extent = R->getExtent(svalBuilder);
DefinedOrUnknownSVal extentMatchesSize =
- svalBuilder.evalEQ(state, Extent, *DefinedSize);
+ svalBuilder.evalEQ(State, Extent, *DefinedSize);
- state = state->assume(extentMatchesSize, true);
- assert(state);
+ State = State->assume(extentMatchesSize, true);
+ assert(State);
}
- return MallocUpdateRefState(C, CE, state);
+ return MallocUpdateRefState(C, CE, State, Family);
}
ProgramStateRef MallocChecker::MallocUpdateRefState(CheckerContext &C,
- const CallExpr *CE,
- ProgramStateRef state) {
+ const Expr *E,
+ ProgramStateRef State,
+ AllocationFamily Family) {
// Get the return value.
- SVal retVal = state->getSVal(CE, C.getLocationContext());
+ SVal retVal = State->getSVal(E, C.getLocationContext());
// We expect the malloc functions to return a pointer.
if (!retVal.getAs<Loc>())
@@ -614,8 +793,7 @@ ProgramStateRef MallocChecker::MallocUpdateRefState(CheckerContext &C,
assert(Sym);
// Set the symbol's state to Allocated.
- return state->set<RegionState>(Sym, RefState::getAllocated(CE));
-
+ return State->set<RegionState>(Sym, RefState::getAllocated(Family, E));
}
ProgramStateRef MallocChecker::FreeMemAttr(CheckerContext &C,
@@ -667,6 +845,107 @@ static bool didPreviousFreeFail(ProgramStateRef State,
return false;
}
+AllocationFamily MallocChecker::getAllocationFamily(CheckerContext &C,
+ const Stmt *S) const {
+ if (!S)
+ return AF_None;
+
+ if (const CallExpr *CE = dyn_cast<CallExpr>(S)) {
+ const FunctionDecl *FD = C.getCalleeDecl(CE);
+
+ if (!FD)
+ FD = dyn_cast<FunctionDecl>(CE->getCalleeDecl());
+
+ ASTContext &Ctx = C.getASTContext();
+
+ if (isAllocationFunction(FD, Ctx) || isFreeFunction(FD, Ctx))
+ return AF_Malloc;
+
+ if (isStandardNewDelete(FD, Ctx)) {
+ OverloadedOperatorKind Kind = FD->getOverloadedOperator();
+ if (Kind == OO_New || Kind == OO_Delete)
+ return AF_CXXNew;
+ else if (Kind == OO_Array_New || Kind == OO_Array_Delete)
+ return AF_CXXNewArray;
+ }
+
+ return AF_None;
+ }
+
+ if (const CXXNewExpr *NE = dyn_cast<CXXNewExpr>(S))
+ return NE->isArray() ? AF_CXXNewArray : AF_CXXNew;
+
+ if (const CXXDeleteExpr *DE = dyn_cast<CXXDeleteExpr>(S))
+ return DE->isArrayForm() ? AF_CXXNewArray : AF_CXXNew;
+
+ if (isa<ObjCMessageExpr>(S))
+ return AF_Malloc;
+
+ return AF_None;
+}
+
+bool MallocChecker::printAllocDeallocName(raw_ostream &os, CheckerContext &C,
+ const Expr *E) const {
+ if (const CallExpr *CE = dyn_cast<CallExpr>(E)) {
+ // FIXME: This doesn't handle indirect calls.
+ const FunctionDecl *FD = CE->getDirectCallee();
+ if (!FD)
+ return false;
+
+ os << *FD;
+ if (!FD->isOverloadedOperator())
+ os << "()";
+ return true;
+ }
+
+ if (const ObjCMessageExpr *Msg = dyn_cast<ObjCMessageExpr>(E)) {
+ if (Msg->isInstanceMessage())
+ os << "-";
+ else
+ os << "+";
+ os << Msg->getSelector().getAsString();
+ return true;
+ }
+
+ if (const CXXNewExpr *NE = dyn_cast<CXXNewExpr>(E)) {
+ os << "'"
+ << getOperatorSpelling(NE->getOperatorNew()->getOverloadedOperator())
+ << "'";
+ return true;
+ }
+
+ if (const CXXDeleteExpr *DE = dyn_cast<CXXDeleteExpr>(E)) {
+ os << "'"
+ << getOperatorSpelling(DE->getOperatorDelete()->getOverloadedOperator())
+ << "'";
+ return true;
+ }
+
+ return false;
+}
+
+void MallocChecker::printExpectedAllocName(raw_ostream &os, CheckerContext &C,
+ const Expr *E) const {
+ AllocationFamily Family = getAllocationFamily(C, E);
+
+ switch(Family) {
+ case AF_Malloc: os << "malloc()"; return;
+ case AF_CXXNew: os << "'new'"; return;
+ case AF_CXXNewArray: os << "'new[]'"; return;
+ case AF_None: llvm_unreachable("not a deallocation expression");
+ }
+}
+
+void MallocChecker::printExpectedDeallocName(raw_ostream &os,
+ AllocationFamily Family) const {
+ switch(Family) {
+ case AF_Malloc: os << "free()"; return;
+ case AF_CXXNew: os << "'delete'"; return;
+ case AF_CXXNewArray: os << "'delete[]'"; return;
+ case AF_None: llvm_unreachable("suspicious AF_None argument");
+ }
+}
+
ProgramStateRef MallocChecker::FreeMemAux(CheckerContext &C,
const Expr *ArgExpr,
const Expr *ParentExpr,
@@ -700,7 +979,7 @@ ProgramStateRef MallocChecker::FreeMemAux(CheckerContext &C,
// Nonlocs can't be freed, of course.
// Non-region locations (labels and fixed addresses) also shouldn't be freed.
if (!R) {
- ReportBadFree(C, ArgVal, ArgExpr->getSourceRange());
+ ReportBadFree(C, ArgVal, ArgExpr->getSourceRange(), ParentExpr);
return 0;
}
@@ -708,13 +987,14 @@ ProgramStateRef MallocChecker::FreeMemAux(CheckerContext &C,
// Blocks might show up as heap data, but should not be free()d
if (isa<BlockDataRegion>(R)) {
- ReportBadFree(C, ArgVal, ArgExpr->getSourceRange());
+ ReportBadFree(C, ArgVal, ArgExpr->getSourceRange(), ParentExpr);
return 0;
}
const MemSpaceRegion *MS = R->getMemorySpace();
- // Parameters, locals, statics, and globals shouldn't be freed.
+ // Parameters, locals, statics, globals, and memory returned by alloca()
+ // shouldn't be freed.
if (!(isa<UnknownSpaceRegion>(MS) || isa<HeapSpaceRegion>(MS))) {
// FIXME: at the time this code was written, malloc() regions were
// represented by conjured symbols, which are all in UnknownSpaceRegion.
@@ -724,7 +1004,7 @@ ProgramStateRef MallocChecker::FreeMemAux(CheckerContext &C,
// function, so UnknownSpaceRegion is always a possibility.
// False negatives are better than false positives.
- ReportBadFree(C, ArgVal, ArgExpr->getSourceRange());
+ ReportBadFree(C, ArgVal, ArgExpr->getSourceRange(), ParentExpr);
return 0;
}
@@ -738,38 +1018,40 @@ ProgramStateRef MallocChecker::FreeMemAux(CheckerContext &C,
const RefState *RsBase = State->get<RegionState>(SymBase);
SymbolRef PreviousRetStatusSymbol = 0;
- // Check double free.
- if (RsBase &&
- (RsBase->isReleased() || RsBase->isRelinquished()) &&
- !didPreviousFreeFail(State, SymBase, PreviousRetStatusSymbol)) {
-
- if (ExplodedNode *N = C.generateSink()) {
- if (!BT_DoubleFree)
- BT_DoubleFree.reset(
- new BugType("Double free", "Memory Error"));
- BugReport *R = new BugReport(*BT_DoubleFree,
- (RsBase->isReleased() ? "Attempt to free released memory"
- : "Attempt to free non-owned memory"),
- N);
- R->addRange(ArgExpr->getSourceRange());
- R->markInteresting(SymBase);
- if (PreviousRetStatusSymbol)
- R->markInteresting(PreviousRetStatusSymbol);
- R->addVisitor(new MallocBugVisitor(SymBase));
- C.emitReport(R);
- }
- return 0;
- }
+ if (RsBase) {
- // Check if the memory location being freed is the actual location
- // allocated, or an offset.
- RegionOffset Offset = R->getAsOffset();
- if (RsBase && RsBase->isAllocated() &&
- Offset.isValid() &&
- !Offset.hasSymbolicOffset() &&
- Offset.getOffset() != 0) {
- ReportOffsetFree(C, ArgVal, ArgExpr->getSourceRange());
- return 0;
+ // Check for double free first.
+ if ((RsBase->isReleased() || RsBase->isRelinquished()) &&
+ !didPreviousFreeFail(State, SymBase, PreviousRetStatusSymbol)) {
+ ReportDoubleFree(C, ParentExpr->getSourceRange(), RsBase->isReleased(),
+ SymBase, PreviousRetStatusSymbol);
+ return 0;
+
+ // If the pointer is allocated or escaped, but we are now trying to free it,
+ // check that the call to free is proper.
+ } else if (RsBase->isAllocated() || RsBase->isEscaped()) {
+
+ // Check if an expected deallocation function matches the real one.
+ bool DeallocMatchesAlloc =
+ RsBase->getAllocationFamily() == getAllocationFamily(C, ParentExpr);
+ if (!DeallocMatchesAlloc) {
+ ReportMismatchedDealloc(C, ArgExpr->getSourceRange(),
+ ParentExpr, RsBase, SymBase);
+ return 0;
+ }
+
+ // Check if the memory location being freed is the actual location
+ // allocated, or an offset.
+ RegionOffset Offset = R->getAsOffset();
+ if (Offset.isValid() &&
+ !Offset.hasSymbolicOffset() &&
+ Offset.getOffset() != 0) {
+ const Expr *AllocExpr = cast<Expr>(RsBase->getStmt());
+ ReportOffsetFree(C, ArgVal, ArgExpr->getSourceRange(), ParentExpr,
+ AllocExpr);
+ return 0;
+ }
+ }
}
ReleasedAllocated = (RsBase != 0);
@@ -788,12 +1070,50 @@ ProgramStateRef MallocChecker::FreeMemAux(CheckerContext &C,
}
}
+ AllocationFamily Family = RsBase ? RsBase->getAllocationFamily()
+ : getAllocationFamily(C, ParentExpr);
// Normal free.
- if (Hold) {
+ if (Hold)
return State->set<RegionState>(SymBase,
- RefState::getRelinquished(ParentExpr));
+ RefState::getRelinquished(Family,
+ ParentExpr));
+
+ return State->set<RegionState>(SymBase,
+ RefState::getReleased(Family, ParentExpr));
+}
+
+bool MallocChecker::isTrackedByCurrentChecker(AllocationFamily Family) const {
+ switch (Family) {
+ case AF_Malloc: {
+ if (!Filter.CMallocOptimistic && !Filter.CMallocPessimistic)
+ return false;
+ return true;
+ }
+ case AF_CXXNew:
+ case AF_CXXNewArray: {
+ if (!Filter.CNewDeleteChecker)
+ return false;
+ return true;
+ }
+ case AF_None: {
+ llvm_unreachable("no family");
}
- return State->set<RegionState>(SymBase, RefState::getReleased(ParentExpr));
+ }
+ llvm_unreachable("unhandled family");
+}
+
+bool
+MallocChecker::isTrackedByCurrentChecker(CheckerContext &C,
+ const Stmt *AllocDeallocStmt) const {
+ return isTrackedByCurrentChecker(getAllocationFamily(C, AllocDeallocStmt));
+}
+
+bool MallocChecker::isTrackedByCurrentChecker(CheckerContext &C,
+ SymbolRef Sym) const {
+
+ const RefState *RS = C.getState()->get<RegionState>(Sym);
+ assert(RS);
+ return isTrackedByCurrentChecker(RS->getAllocationFamily());
}
bool MallocChecker::SummarizeValue(raw_ostream &os, SVal V) {
@@ -883,47 +1203,105 @@ bool MallocChecker::SummarizeRegion(raw_ostream &os,
}
}
-void MallocChecker::ReportBadFree(CheckerContext &C, SVal ArgVal,
- SourceRange range) const {
+void MallocChecker::ReportBadFree(CheckerContext &C, SVal ArgVal,
+ SourceRange Range,
+ const Expr *DeallocExpr) const {
+
+ if (!Filter.CMallocOptimistic && !Filter.CMallocPessimistic &&
+ !Filter.CNewDeleteChecker)
+ return;
+
+ if (!isTrackedByCurrentChecker(C, DeallocExpr))
+ return;
+
if (ExplodedNode *N = C.generateSink()) {
if (!BT_BadFree)
BT_BadFree.reset(new BugType("Bad free", "Memory Error"));
SmallString<100> buf;
llvm::raw_svector_ostream os(buf);
-
+
const MemRegion *MR = ArgVal.getAsRegion();
- if (MR) {
- while (const ElementRegion *ER = dyn_cast<ElementRegion>(MR))
- MR = ER->getSuperRegion();
-
- // Special case for alloca()
- if (isa<AllocaRegion>(MR))
- os << "Argument to free() was allocated by alloca(), not malloc()";
- else {
- os << "Argument to free() is ";
- if (SummarizeRegion(os, MR))
- os << ", which is not memory allocated by malloc()";
- else
- os << "not memory allocated by malloc()";
- }
- } else {
- os << "Argument to free() is ";
- if (SummarizeValue(os, ArgVal))
- os << ", which is not memory allocated by malloc()";
+ while (const ElementRegion *ER = dyn_cast_or_null<ElementRegion>(MR))
+ MR = ER->getSuperRegion();
+
+ if (MR && isa<AllocaRegion>(MR))
+ os << "Memory allocated by alloca() should not be deallocated";
+ else {
+ os << "Argument to ";
+ if (!printAllocDeallocName(os, C, DeallocExpr))
+ os << "deallocator";
+
+ os << " is ";
+ bool Summarized = MR ? SummarizeRegion(os, MR)
+ : SummarizeValue(os, ArgVal);
+ if (Summarized)
+ os << ", which is not memory allocated by ";
else
- os << "not memory allocated by malloc()";
+ os << "not memory allocated by ";
+
+ printExpectedAllocName(os, C, DeallocExpr);
}
-
+
BugReport *R = new BugReport(*BT_BadFree, os.str(), N);
R->markInteresting(MR);
- R->addRange(range);
+ R->addRange(Range);
+ C.emitReport(R);
+ }
+}
+
+void MallocChecker::ReportMismatchedDealloc(CheckerContext &C,
+ SourceRange Range,
+ const Expr *DeallocExpr,
+ const RefState *RS,
+ SymbolRef Sym) const {
+
+ if (!Filter.CMismatchedDeallocatorChecker)
+ return;
+
+ if (ExplodedNode *N = C.generateSink()) {
+ if (!BT_MismatchedDealloc)
+ BT_MismatchedDealloc.reset(new BugType("Bad deallocator",
+ "Memory Error"));
+
+ SmallString<100> buf;
+ llvm::raw_svector_ostream os(buf);
+
+ const Expr *AllocExpr = cast<Expr>(RS->getStmt());
+ SmallString<20> AllocBuf;
+ llvm::raw_svector_ostream AllocOs(AllocBuf);
+ SmallString<20> DeallocBuf;
+ llvm::raw_svector_ostream DeallocOs(DeallocBuf);
+
+ os << "Memory";
+ if (printAllocDeallocName(AllocOs, C, AllocExpr))
+ os << " allocated by " << AllocOs.str();
+
+ os << " should be deallocated by ";
+ printExpectedDeallocName(os, RS->getAllocationFamily());
+
+ if (printAllocDeallocName(DeallocOs, C, DeallocExpr))
+ os << ", not " << DeallocOs.str();
+
+ BugReport *R = new BugReport(*BT_MismatchedDealloc, os.str(), N);
+ R->markInteresting(Sym);
+ R->addRange(Range);
+ R->addVisitor(new MallocBugVisitor(Sym));
C.emitReport(R);
}
}
void MallocChecker::ReportOffsetFree(CheckerContext &C, SVal ArgVal,
- SourceRange Range) const {
+ SourceRange Range, const Expr *DeallocExpr,
+ const Expr *AllocExpr) const {
+
+ if (!Filter.CMallocOptimistic && !Filter.CMallocPessimistic &&
+ !Filter.CNewDeleteChecker)
+ return;
+
+ if (!isTrackedByCurrentChecker(C, AllocExpr))
+ return;
+
ExplodedNode *N = C.generateSink();
if (N == NULL)
return;
@@ -933,6 +1311,8 @@ void MallocChecker::ReportOffsetFree(CheckerContext &C, SVal ArgVal,
SmallString<100> buf;
llvm::raw_svector_ostream os(buf);
+ SmallString<20> AllocNameBuf;
+ llvm::raw_svector_ostream AllocNameOs(AllocNameBuf);
const MemRegion *MR = ArgVal.getAsRegion();
assert(MR && "Only MemRegion based symbols can have offset free errors");
@@ -945,11 +1325,18 @@ void MallocChecker::ReportOffsetFree(CheckerContext &C, SVal ArgVal,
int offsetBytes = Offset.getOffset() / C.getASTContext().getCharWidth();
- os << "Argument to free() is offset by "
+ os << "Argument to ";
+ if (!printAllocDeallocName(os, C, DeallocExpr))
+ os << "deallocator";
+ os << " is offset by "
<< offsetBytes
<< " "
<< ((abs(offsetBytes) > 1) ? "bytes" : "byte")
- << " from the start of memory allocated by malloc()";
+ << " from the start of ";
+ if (AllocExpr && printAllocDeallocName(AllocNameOs, C, AllocExpr))
+ os << "memory allocated by " << AllocNameOs.str();
+ else
+ os << "allocated memory";
BugReport *R = new BugReport(*BT_OffsetFree, os.str(), N);
R->markInteresting(MR->getBaseRegion());
@@ -957,6 +1344,58 @@ void MallocChecker::ReportOffsetFree(CheckerContext &C, SVal ArgVal,
C.emitReport(R);
}
+void MallocChecker::ReportUseAfterFree(CheckerContext &C, SourceRange Range,
+ SymbolRef Sym) const {
+
+ if (!Filter.CMallocOptimistic && !Filter.CMallocPessimistic &&
+ !Filter.CNewDeleteChecker)
+ return;
+
+ if (!isTrackedByCurrentChecker(C, Sym))
+ return;
+
+ if (ExplodedNode *N = C.generateSink()) {
+ if (!BT_UseFree)
+ BT_UseFree.reset(new BugType("Use-after-free", "Memory Error"));
+
+ BugReport *R = new BugReport(*BT_UseFree,
+ "Use of memory after it is freed", N);
+
+ R->markInteresting(Sym);
+ R->addRange(Range);
+ R->addVisitor(new MallocBugVisitor(Sym));
+ C.emitReport(R);
+ }
+}
+
+void MallocChecker::ReportDoubleFree(CheckerContext &C, SourceRange Range,
+ bool Released, SymbolRef Sym,
+ SymbolRef PrevSym) const {
+
+ if (!Filter.CMallocOptimistic && !Filter.CMallocPessimistic &&
+ !Filter.CNewDeleteChecker)
+ return;
+
+ if (!isTrackedByCurrentChecker(C, Sym))
+ return;
+
+ if (ExplodedNode *N = C.generateSink()) {
+ if (!BT_DoubleFree)
+ BT_DoubleFree.reset(new BugType("Double free", "Memory Error"));
+
+ BugReport *R = new BugReport(*BT_DoubleFree,
+ (Released ? "Attempt to free released memory"
+ : "Attempt to free non-owned memory"),
+ N);
+ R->addRange(Range);
+ R->markInteresting(Sym);
+ if (PrevSym)
+ R->markInteresting(PrevSym);
+ R->addVisitor(new MallocBugVisitor(Sym));
+ C.emitReport(R);
+ }
+}
+
ProgramStateRef MallocChecker::ReallocMem(CheckerContext &C,
const CallExpr *CE,
bool FreesOnFail) const {
@@ -1091,13 +1530,19 @@ MallocChecker::getAllocationSite(const ExplodedNode *N, SymbolRef Sym,
// Find the most recent expression bound to the symbol in the current
// context.
- if (!ReferenceRegion) {
- if (const MemRegion *MR = C.getLocationRegionIfPostStore(N)) {
- SVal Val = State->getSVal(MR);
- if (Val.getAsLocSymbol() == Sym)
- ReferenceRegion = MR;
+ if (!ReferenceRegion) {
+ if (const MemRegion *MR = C.getLocationRegionIfPostStore(N)) {
+ SVal Val = State->getSVal(MR);
+ if (Val.getAsLocSymbol() == Sym) {
+ const VarRegion* VR = MR->getBaseRegion()->getAs<VarRegion>();
+ // Do not show local variables belonging to a function other than
+ // where the error is reported.
+ if (!VR ||
+ (VR->getStackFrame() == LeakContext->getCurrentStackFrame()))
+ ReferenceRegion = MR;
+ }
+ }
}
- }
// Allocation node, is the last node in the current context in which the
// symbol was tracked.
@@ -1111,6 +1556,23 @@ MallocChecker::getAllocationSite(const ExplodedNode *N, SymbolRef Sym,
void MallocChecker::reportLeak(SymbolRef Sym, ExplodedNode *N,
CheckerContext &C) const {
+
+ if (!Filter.CMallocOptimistic && !Filter.CMallocPessimistic &&
+ !Filter.CNewDeleteLeaksChecker)
+ return;
+
+ const RefState *RS = C.getState()->get<RegionState>(Sym);
+ assert(RS && "cannot leak an untracked symbol");
+ AllocationFamily Family = RS->getAllocationFamily();
+ if (!isTrackedByCurrentChecker(Family))
+ return;
+
+ // Special case for new and new[]; these are controlled by a separate checker
+ // flag so that they can be selectively disabled.
+ if (Family == AF_CXXNew || Family == AF_CXXNewArray)
+ if (!Filter.CNewDeleteLeaksChecker)
+ return;
+
assert(N);
if (!BT_Leak) {
BT_Leak.reset(new BugType("Memory leak", "Memory Error"));
@@ -1143,11 +1605,11 @@ void MallocChecker::reportLeak(SymbolRef Sym, ExplodedNode *N,
SmallString<200> buf;
llvm::raw_svector_ostream os(buf);
- os << "Memory is never released; potential leak";
if (Region && Region->canPrintPretty()) {
- os << " of memory pointed to by '";
+ os << "Potential leak of memory pointed to by ";
Region->printPretty(os);
- os << '\'';
+ } else {
+ os << "Potential memory leak";
}
BugReport *R = new BugReport(*BT_Leak, os.str(), N,
@@ -1211,21 +1673,39 @@ void MallocChecker::checkDeadSymbols(SymbolReaper &SymReaper,
C.addTransition(state->set<RegionState>(RS), N);
}
-void MallocChecker::checkPreStmt(const CallExpr *CE, CheckerContext &C) const {
+void MallocChecker::checkPreCall(const CallEvent &Call,
+ CheckerContext &C) const {
+
// We will check for double free in the post visit.
- if (isFreeFunction(C.getCalleeDecl(CE), C.getASTContext()))
- return;
+ if (const AnyFunctionCall *FC = dyn_cast<AnyFunctionCall>(&Call)) {
+ const FunctionDecl *FD = FC->getDecl();
+ if (!FD)
+ return;
- // Check use after free, when a freed pointer is passed to a call.
- ProgramStateRef State = C.getState();
- for (CallExpr::const_arg_iterator I = CE->arg_begin(),
- E = CE->arg_end(); I != E; ++I) {
- const Expr *A = *I;
- if (A->getType().getTypePtr()->isAnyPointerType()) {
- SymbolRef Sym = State->getSVal(A, C.getLocationContext()).getAsSymbol();
+ if ((Filter.CMallocOptimistic || Filter.CMallocPessimistic) &&
+ isFreeFunction(FD, C.getASTContext()))
+ return;
+
+ if (Filter.CNewDeleteChecker &&
+ isStandardNewDelete(FD, C.getASTContext()))
+ return;
+ }
+
+ // Check if the callee of a method is deleted.
+ if (const CXXInstanceCall *CC = dyn_cast<CXXInstanceCall>(&Call)) {
+ SymbolRef Sym = CC->getCXXThisVal().getAsSymbol();
+ if (!Sym || checkUseAfterFree(Sym, C, CC->getCXXThisExpr()))
+ return;
+ }
+
+ // Check arguments for being used after free.
+ for (unsigned I = 0, E = Call.getNumArgs(); I != E; ++I) {
+ SVal ArgSVal = Call.getArgSVal(I);
+ if (ArgSVal.getAs<Loc>()) {
+ SymbolRef Sym = ArgSVal.getAsSymbol();
if (!Sym)
continue;
- if (checkUseAfterFree(Sym, C, A))
+ if (checkUseAfterFree(Sym, C, Call.getArgExpr(I)))
return;
}
}
@@ -1303,21 +1783,12 @@ bool MallocChecker::isReleased(SymbolRef Sym, CheckerContext &C) const {
bool MallocChecker::checkUseAfterFree(SymbolRef Sym, CheckerContext &C,
const Stmt *S) const {
+
if (isReleased(Sym, C)) {
- if (ExplodedNode *N = C.generateSink()) {
- if (!BT_UseFree)
- BT_UseFree.reset(new BugType("Use-after-free", "Memory Error"));
-
- BugReport *R = new BugReport(*BT_UseFree,
- "Use of memory after it is freed",N);
- if (S)
- R->addRange(S->getSourceRange());
- R->markInteresting(Sym);
- R->addVisitor(new MallocBugVisitor(Sym));
- C.emitReport(R);
- return true;
- }
+ ReportUseAfterFree(C, S->getSourceRange(), Sym);
+ return true;
}
+
return false;
}
@@ -1358,7 +1829,7 @@ ProgramStateRef MallocChecker::evalAssume(ProgramStateRef state,
if (RS->isReleased()) {
if (I.getData().Kind == RPToBeFreedAfterFailure)
state = state->set<RegionState>(ReallocSym,
- RefState::getAllocated(RS->getStmt()));
+ RefState::getAllocated(RS->getAllocationFamily(), RS->getStmt()));
else if (I.getData().Kind == RPDoNotTrackAfterFailure)
state = state->remove<RegionState>(ReallocSym);
else
@@ -1510,10 +1981,35 @@ bool MallocChecker::doesNotFreeMemOrInteresting(const CallEvent *Call,
return true;
}
+static bool retTrue(const RefState *RS) {
+ return true;
+}
+
+static bool checkIfNewOrNewArrayFamily(const RefState *RS) {
+ return (RS->getAllocationFamily() == AF_CXXNewArray ||
+ RS->getAllocationFamily() == AF_CXXNew);
+}
+
ProgramStateRef MallocChecker::checkPointerEscape(ProgramStateRef State,
const InvalidatedSymbols &Escaped,
const CallEvent *Call,
PointerEscapeKind Kind) const {
+ return checkPointerEscapeAux(State, Escaped, Call, Kind, &retTrue);
+}
+
+ProgramStateRef MallocChecker::checkConstPointerEscape(ProgramStateRef State,
+ const InvalidatedSymbols &Escaped,
+ const CallEvent *Call,
+ PointerEscapeKind Kind) const {
+ return checkPointerEscapeAux(State, Escaped, Call, Kind,
+ &checkIfNewOrNewArrayFamily);
+}
+
+ProgramStateRef MallocChecker::checkPointerEscapeAux(ProgramStateRef State,
+ const InvalidatedSymbols &Escaped,
+ const CallEvent *Call,
+ PointerEscapeKind Kind,
+ bool(*CheckRefState)(const RefState*)) const {
// If we know that the call does not free memory, or we want to process the
// call later, keep tracking the top level arguments.
if ((Kind == PSK_DirectEscapeOnCall ||
@@ -1523,13 +2019,15 @@ ProgramStateRef MallocChecker::checkPointerEscape(ProgramStateRef State,
}
for (InvalidatedSymbols::const_iterator I = Escaped.begin(),
- E = Escaped.end();
- I != E; ++I) {
+ E = Escaped.end();
+ I != E; ++I) {
SymbolRef sym = *I;
if (const RefState *RS = State->get<RegionState>(sym)) {
- if (RS->isAllocated())
+ if (RS->isAllocated() && CheckRefState(RS)) {
State = State->remove<RegionState>(sym);
+ State = State->set<RegionState>(sym, RefState::getEscaped(RS));
+ }
}
}
return State;
@@ -1594,7 +2092,7 @@ MallocChecker::MallocBugVisitor::VisitNode(const ExplodedNode *N,
} else if (isReleased(RS, RSPrev, S)) {
Msg = "Memory is released";
StackHint = new StackHintGeneratorForSymbol(Sym,
- "Returned released memory");
+ "Returning; memory was released");
} else if (isRelinquished(RS, RSPrev, S)) {
Msg = "Memory ownership is transfered";
StackHint = new StackHintGeneratorForSymbol(Sym, "");
@@ -1654,6 +2152,14 @@ void MallocChecker::printState(raw_ostream &Out, ProgramStateRef State,
}
}
+void ento::registerNewDeleteLeaksChecker(CheckerManager &mgr) {
+ registerCStringCheckerBasic(mgr);
+ mgr.registerChecker<MallocChecker>()->Filter.CNewDeleteLeaksChecker = true;
+ // We currently treat NewDeleteLeaks checker as a subchecker of NewDelete
+ // checker.
+ mgr.registerChecker<MallocChecker>()->Filter.CNewDeleteChecker = true;
+}
+
#define REGISTER_CHECKER(name) \
void ento::register##name(CheckerManager &mgr) {\
registerCStringCheckerBasic(mgr); \
@@ -1662,3 +2168,5 @@ void ento::register##name(CheckerManager &mgr) {\
REGISTER_CHECKER(MallocPessimistic)
REGISTER_CHECKER(MallocOptimistic)
+REGISTER_CHECKER(NewDeleteChecker)
+REGISTER_CHECKER(MismatchedDeallocatorChecker)
diff --git a/lib/StaticAnalyzer/Checkers/MallocSizeofChecker.cpp b/lib/StaticAnalyzer/Checkers/MallocSizeofChecker.cpp
index ce7d4ccf7a..d29f34fb03 100644
--- a/lib/StaticAnalyzer/Checkers/MallocSizeofChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/MallocSizeofChecker.cpp
@@ -188,7 +188,7 @@ public:
for (CallExpr::const_arg_iterator ai = i->AllocCall->arg_begin(),
ae = i->AllocCall->arg_end(); ai != ae; ++ai) {
- if (!(*ai)->getType()->isIntegerType())
+ if (!(*ai)->getType()->isIntegralOrUnscopedEnumerationType())
continue;
SizeofFinder SFinder;
diff --git a/lib/StaticAnalyzer/Checkers/RetainCountChecker.cpp b/lib/StaticAnalyzer/Checkers/RetainCountChecker.cpp
index 856463a981..0f456ea8d7 100644
--- a/lib/StaticAnalyzer/Checkers/RetainCountChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/RetainCountChecker.cpp
@@ -37,6 +37,8 @@
#include "llvm/ADT/StringExtras.h"
#include <cstdarg>
+#include "AllocationDiagnostics.h"
+
using namespace clang;
using namespace ento;
using llvm::StrInStrNoCase;
@@ -324,7 +326,7 @@ void RefVal::print(raw_ostream &Out) const {
break;
case RefVal::ErrorOverAutorelease:
- Out << "Over autoreleased";
+ Out << "Over-autoreleased";
break;
case RefVal::ErrorReturnedNotOwned:
@@ -782,6 +784,10 @@ public:
const RetainSummary *getStandardMethodSummary(const ObjCMethodDecl *MD,
Selector S, QualType RetTy);
+ /// Determine if there is a special return effect for this function or method.
+ Optional<RetEffect> getRetEffectFromAnnotations(QualType RetTy,
+ const Decl *D);
+
void updateSummaryFromAnnotations(const RetainSummary *&Summ,
const ObjCMethodDecl *MD);
@@ -1110,12 +1116,14 @@ RetainSummaryManager::getFunctionSummary(const FunctionDecl *FD) {
// correctly.
ScratchArgs = AF.add(ScratchArgs, 12, StopTracking);
S = getPersistentSummary(RetEffect::MakeNoRet(), DoNothing, DoNothing);
- } else if (FName == "dispatch_set_context") {
+ } else if (FName == "dispatch_set_context" ||
+ FName == "xpc_connection_set_context") {
// <rdar://problem/11059275> - The analyzer currently doesn't have
// a good way to reason about the finalizer function for libdispatch.
// If we pass a context object that is memory managed, stop tracking it.
+ // <rdar://problem/13783514> - Same problem, but for XPC.
// FIXME: this hack should possibly go away once we can handle
- // libdispatch finalizers.
+ // libdispatch and XPC finalizers.
ScratchArgs = AF.add(ScratchArgs, 1, StopTracking);
S = getPersistentSummary(RetEffect::MakeNoRet(), DoNothing, DoNothing);
} else if (FName.startswith("NSLog")) {
@@ -1271,6 +1279,30 @@ RetainSummaryManager::getCFSummaryGetRule(const FunctionDecl *FD) {
// Summary creation for Selectors.
//===----------------------------------------------------------------------===//
+Optional<RetEffect>
+RetainSummaryManager::getRetEffectFromAnnotations(QualType RetTy,
+ const Decl *D) {
+ if (cocoa::isCocoaObjectRef(RetTy)) {
+ if (D->getAttr<NSReturnsRetainedAttr>())
+ return ObjCAllocRetE;
+
+ if (D->getAttr<NSReturnsNotRetainedAttr>() ||
+ D->getAttr<NSReturnsAutoreleasedAttr>())
+ return RetEffect::MakeNotOwned(RetEffect::ObjC);
+
+ } else if (!RetTy->isPointerType()) {
+ return None;
+ }
+
+ if (D->getAttr<CFReturnsRetainedAttr>())
+ return RetEffect::MakeOwned(RetEffect::CF, true);
+
+ if (D->getAttr<CFReturnsNotRetainedAttr>())
+ return RetEffect::MakeNotOwned(RetEffect::CF);
+
+ return None;
+}
+
void
RetainSummaryManager::updateSummaryFromAnnotations(const RetainSummary *&Summ,
const FunctionDecl *FD) {
@@ -1285,40 +1317,15 @@ RetainSummaryManager::updateSummaryFromAnnotations(const RetainSummary *&Summ,
for (FunctionDecl::param_const_iterator pi = FD->param_begin(),
pe = FD->param_end(); pi != pe; ++pi, ++parm_idx) {
const ParmVarDecl *pd = *pi;
- if (pd->getAttr<NSConsumedAttr>()) {
- if (!GCEnabled) {
- Template->addArg(AF, parm_idx, DecRef);
- }
- } else if (pd->getAttr<CFConsumedAttr>()) {
+ if (pd->getAttr<NSConsumedAttr>())
+ Template->addArg(AF, parm_idx, DecRefMsg);
+ else if (pd->getAttr<CFConsumedAttr>())
Template->addArg(AF, parm_idx, DecRef);
- }
}
QualType RetTy = FD->getResultType();
-
- // Determine if there is a special return effect for this method.
- if (cocoa::isCocoaObjectRef(RetTy)) {
- if (FD->getAttr<NSReturnsRetainedAttr>()) {
- Template->setRetEffect(ObjCAllocRetE);
- }
- else if (FD->getAttr<CFReturnsRetainedAttr>()) {
- Template->setRetEffect(RetEffect::MakeOwned(RetEffect::CF, true));
- }
- else if (FD->getAttr<NSReturnsNotRetainedAttr>() ||
- FD->getAttr<NSReturnsAutoreleasedAttr>()) {
- Template->setRetEffect(RetEffect::MakeNotOwned(RetEffect::ObjC));
- }
- else if (FD->getAttr<CFReturnsNotRetainedAttr>())
- Template->setRetEffect(RetEffect::MakeNotOwned(RetEffect::CF));
- }
- else if (RetTy->getAs<PointerType>()) {
- if (FD->getAttr<CFReturnsRetainedAttr>()) {
- Template->setRetEffect(RetEffect::MakeOwned(RetEffect::CF, true));
- }
- else if (FD->getAttr<CFReturnsNotRetainedAttr>()) {
- Template->setRetEffect(RetEffect::MakeNotOwned(RetEffect::CF));
- }
- }
+ if (Optional<RetEffect> RetE = getRetEffectFromAnnotations(RetTy, FD))
+ Template->setRetEffect(*RetE);
}
void
@@ -1329,13 +1336,10 @@ RetainSummaryManager::updateSummaryFromAnnotations(const RetainSummary *&Summ,
assert(Summ && "Must have a valid summary to add annotations to");
RetainSummaryTemplate Template(Summ, *this);
- bool isTrackedLoc = false;
// Effects on the receiver.
- if (MD->getAttr<NSConsumesSelfAttr>()) {
- if (!GCEnabled)
- Template->setReceiverEffect(DecRefMsg);
- }
+ if (MD->getAttr<NSConsumesSelfAttr>())
+ Template->setReceiverEffect(DecRefMsg);
// Effects on the parameters.
unsigned parm_idx = 0;
@@ -1343,38 +1347,16 @@ RetainSummaryManager::updateSummaryFromAnnotations(const RetainSummary *&Summ,
pi=MD->param_begin(), pe=MD->param_end();
pi != pe; ++pi, ++parm_idx) {
const ParmVarDecl *pd = *pi;
- if (pd->getAttr<NSConsumedAttr>()) {
- if (!GCEnabled)
- Template->addArg(AF, parm_idx, DecRef);
- }
- else if(pd->getAttr<CFConsumedAttr>()) {
+ if (pd->getAttr<NSConsumedAttr>())
+ Template->addArg(AF, parm_idx, DecRefMsg);
+ else if (pd->getAttr<CFConsumedAttr>()) {
Template->addArg(AF, parm_idx, DecRef);
}
}
- // Determine if there is a special return effect for this method.
- if (cocoa::isCocoaObjectRef(MD->getResultType())) {
- if (MD->getAttr<NSReturnsRetainedAttr>()) {
- Template->setRetEffect(ObjCAllocRetE);
- return;
- }
- if (MD->getAttr<NSReturnsNotRetainedAttr>() ||
- MD->getAttr<NSReturnsAutoreleasedAttr>()) {
- Template->setRetEffect(RetEffect::MakeNotOwned(RetEffect::ObjC));
- return;
- }
-
- isTrackedLoc = true;
- } else {
- isTrackedLoc = MD->getResultType()->getAs<PointerType>() != NULL;
- }
-
- if (isTrackedLoc) {
- if (MD->getAttr<CFReturnsRetainedAttr>())
- Template->setRetEffect(RetEffect::MakeOwned(RetEffect::CF, true));
- else if (MD->getAttr<CFReturnsNotRetainedAttr>())
- Template->setRetEffect(RetEffect::MakeNotOwned(RetEffect::CF));
- }
+ QualType RetTy = MD->getResultType();
+ if (Optional<RetEffect> RetE = getRetEffectFromAnnotations(RetTy, MD))
+ Template->setRetEffect(*RetE);
}
const RetainSummary *
@@ -1682,10 +1664,10 @@ namespace {
class OverAutorelease : public CFRefBug {
public:
OverAutorelease()
- : CFRefBug("Object sent -autorelease too many times") {}
+ : CFRefBug("Object autoreleased too many times") {}
const char *getDescription() const {
- return "Object sent -autorelease too many times";
+ return "Object autoreleased too many times";
}
};
@@ -1795,11 +1777,11 @@ namespace {
class CFRefLeakReport : public CFRefReport {
const MemRegion* AllocBinding;
-
public:
CFRefLeakReport(CFRefBug &D, const LangOptions &LOpts, bool GCEnabled,
const SummaryLogTy &Log, ExplodedNode *n, SymbolRef sym,
- CheckerContext &Ctx);
+ CheckerContext &Ctx,
+ bool IncludeAllocationLine);
PathDiagnosticLocation getLocation(const SourceManager &SM) const {
assert(Location.isValid());
@@ -2070,7 +2052,7 @@ PathDiagnosticPiece *CFRefReportVisitor::VisitNode(const ExplodedNode *N,
return 0;
assert(PrevV.getAutoreleaseCount() < CurrV.getAutoreleaseCount());
- os << "Object sent -autorelease message";
+ os << "Object autoreleased";
break;
}
@@ -2156,32 +2138,86 @@ PathDiagnosticPiece *CFRefReportVisitor::VisitNode(const ExplodedNode *N,
// Find the first node in the current function context that referred to the
// tracked symbol and the memory location that value was stored to. Note, the
// value is only reported if the allocation occurred in the same function as
-// the leak.
-static std::pair<const ExplodedNode*,const MemRegion*>
+// the leak. The function can also return a location context, which should be
+// treated as interesting.
+struct AllocationInfo {
+ const ExplodedNode* N;
+ const MemRegion *R;
+ const LocationContext *InterestingMethodContext;
+ AllocationInfo(const ExplodedNode *InN,
+ const MemRegion *InR,
+ const LocationContext *InInterestingMethodContext) :
+ N(InN), R(InR), InterestingMethodContext(InInterestingMethodContext) {}
+};
+
+static AllocationInfo
GetAllocationSite(ProgramStateManager& StateMgr, const ExplodedNode *N,
SymbolRef Sym) {
- const ExplodedNode *Last = N;
+ const ExplodedNode *AllocationNode = N;
+ const ExplodedNode *AllocationNodeInCurrentContext = N;
const MemRegion* FirstBinding = 0;
const LocationContext *LeakContext = N->getLocationContext();
+ // The location context of the init method called on the leaked object, if
+ // available.
+ const LocationContext *InitMethodContext = 0;
+
while (N) {
ProgramStateRef St = N->getState();
+ const LocationContext *NContext = N->getLocationContext();
if (!getRefBinding(St, Sym))
break;
StoreManager::FindUniqueBinding FB(Sym);
StateMgr.iterBindings(St, FB);
- if (FB) FirstBinding = FB.getRegion();
-
- // Allocation node, is the last node in the current context in which the
- // symbol was tracked.
- if (N->getLocationContext() == LeakContext)
- Last = N;
+
+ if (FB) {
+ const MemRegion *R = FB.getRegion();
+ const VarRegion *VR = R->getBaseRegion()->getAs<VarRegion>();
+ // Do not show local variables belonging to a function other than
+ // where the error is reported.
+ if (!VR || VR->getStackFrame() == LeakContext->getCurrentStackFrame())
+ FirstBinding = R;
+ }
+
+ // AllocationNode is the last node in which the symbol was tracked.
+ AllocationNode = N;
+
+ // AllocationNodeInCurrentContext, is the last node in the current context
+ // in which the symbol was tracked.
+ if (NContext == LeakContext)
+ AllocationNodeInCurrentContext = N;
+
+ // Find the last init that was called on the given symbol and store the
+ // init method's location context.
+ if (!InitMethodContext)
+ if (Optional<CallEnter> CEP = N->getLocation().getAs<CallEnter>()) {
+ const Stmt *CE = CEP->getCallExpr();
+ if (const ObjCMessageExpr *ME = dyn_cast_or_null<ObjCMessageExpr>(CE)) {
+ const Stmt *RecExpr = ME->getInstanceReceiver();
+ if (RecExpr) {
+ SVal RecV = St->getSVal(RecExpr, NContext);
+ if (ME->getMethodFamily() == OMF_init && RecV.getAsSymbol() == Sym)
+ InitMethodContext = CEP->getCalleeContext();
+ }
+ }
+ }
N = N->pred_empty() ? NULL : *(N->pred_begin());
}
+ // If we are reporting a leak of the object that was allocated with alloc,
+ // mark its init method as interesting.
+ const LocationContext *InterestingMethodContext = 0;
+ if (InitMethodContext) {
+ const ProgramPoint AllocPP = AllocationNode->getLocation();
+ if (Optional<StmtPoint> SP = AllocPP.getAs<StmtPoint>())
+ if (const ObjCMessageExpr *ME = SP->getStmtAs<ObjCMessageExpr>())
+ if (ME->getMethodFamily() == OMF_alloc)
+ InterestingMethodContext = InitMethodContext;
+ }
+
// If allocation happened in a function different from the leak node context,
// do not report the binding.
assert(N && "Could not find allocation node");
@@ -2189,7 +2225,9 @@ GetAllocationSite(ProgramStateManager& StateMgr, const ExplodedNode *N,
FirstBinding = 0;
}
- return std::make_pair(Last, FirstBinding);
+ return AllocationInfo(AllocationNodeInCurrentContext,
+ FirstBinding,
+ InterestingMethodContext);
}
PathDiagnosticPiece*
@@ -2212,12 +2250,12 @@ CFRefLeakReportVisitor::getEndPath(BugReporterContext &BRC,
// We are reporting a leak. Walk up the graph to get to the first node where
// the symbol appeared, and also get the first VarDecl that tracked object
// is stored to.
- const ExplodedNode *AllocNode = 0;
- const MemRegion* FirstBinding = 0;
-
- llvm::tie(AllocNode, FirstBinding) =
+ AllocationInfo AllocI =
GetAllocationSite(BRC.getStateManager(), EndN, Sym);
+ const MemRegion* FirstBinding = AllocI.R;
+ BR.markInteresting(AllocI.InterestingMethodContext);
+
SourceManager& SM = BRC.getSourceManager();
// Compute an actual location for the leak. Sometimes a leak doesn't
@@ -2289,8 +2327,9 @@ CFRefLeakReportVisitor::getEndPath(BugReporterContext &BRC,
CFRefLeakReport::CFRefLeakReport(CFRefBug &D, const LangOptions &LOpts,
bool GCEnabled, const SummaryLogTy &Log,
ExplodedNode *n, SymbolRef sym,
- CheckerContext &Ctx)
-: CFRefReport(D, LOpts, GCEnabled, Log, n, sym, false) {
+ CheckerContext &Ctx,
+ bool IncludeAllocationLine)
+ : CFRefReport(D, LOpts, GCEnabled, Log, n, sym, false) {
// Most bug reports are cached at the location where they occurred.
// With leaks, we want to unique them by the location where they were
@@ -2304,9 +2343,13 @@ CFRefLeakReport::CFRefLeakReport(CFRefBug &D, const LangOptions &LOpts,
const SourceManager& SMgr = Ctx.getSourceManager();
- llvm::tie(AllocNode, AllocBinding) = // Set AllocBinding.
+ AllocationInfo AllocI =
GetAllocationSite(Ctx.getStateManager(), getErrorNode(), sym);
+ AllocNode = AllocI.N;
+ AllocBinding = AllocI.R;
+ markInteresting(AllocI.InterestingMethodContext);
+
// Get the SourceLocation for the allocation site.
// FIXME: This will crash the analyzer if an allocation comes from an
// implicit call. (Currently there are no such allocations in Cocoa, though.)
@@ -2317,8 +2360,17 @@ CFRefLeakReport::CFRefLeakReport(CFRefBug &D, const LangOptions &LOpts,
else
AllocStmt = P.castAs<PostStmt>().getStmt();
assert(AllocStmt && "All allocations must come from explicit calls");
- Location = PathDiagnosticLocation::createBegin(AllocStmt, SMgr,
- n->getLocationContext());
+
+ PathDiagnosticLocation AllocLocation =
+ PathDiagnosticLocation::createBegin(AllocStmt, SMgr,
+ AllocNode->getLocationContext());
+ Location = AllocLocation;
+
+ // Set uniqieing info, which will be used for unique the bug reports. The
+ // leaks should be uniqued on the allocation site.
+ UniqueingLocation = AllocLocation;
+ UniqueingDecl = AllocNode->getLocationContext()->getDecl();
+
// Fill in the description of the bug.
Description.clear();
llvm::raw_string_ostream os(Description);
@@ -2327,9 +2379,13 @@ CFRefLeakReport::CFRefLeakReport(CFRefBug &D, const LangOptions &LOpts,
os << "(when using garbage collection) ";
os << "of an object";
- // FIXME: AllocBinding doesn't get populated for RegionStore yet.
- if (AllocBinding)
+ if (AllocBinding) {
os << " stored into '" << AllocBinding->getString() << '\'';
+ if (IncludeAllocationLine) {
+ FullSourceLoc SL(AllocStmt->getLocStart(), Ctx.getSourceManager());
+ os << " (allocated on line " << SL.getSpellingLineNumber() << ")";
+ }
+ }
addVisitor(new CFRefLeakReportVisitor(sym, GCEnabled, Log));
}
@@ -2370,8 +2426,14 @@ class RetainCountChecker
mutable SummaryLogTy SummaryLog;
mutable bool ShouldResetSummaryLog;
+ /// Optional setting to indicate if leak reports should include
+ /// the allocation line.
+ mutable bool IncludeAllocationLine;
+
public:
- RetainCountChecker() : ShouldResetSummaryLog(false) {}
+ RetainCountChecker(AnalyzerOptions &AO)
+ : ShouldResetSummaryLog(false),
+ IncludeAllocationLine(shouldIncludeAllocationSiteInLeakDiagnostics(AO)) {}
virtual ~RetainCountChecker() {
DeleteContainerSeconds(DeadSymbolTags);
@@ -3316,7 +3378,8 @@ void RetainCountChecker::checkReturnWithRetEffect(const ReturnStmt *S,
CFRefReport *report =
new CFRefLeakReport(*getLeakAtReturnBug(LOpts, GCEnabled),
LOpts, GCEnabled, SummaryLog,
- N, Sym, C);
+ N, Sym, C, IncludeAllocationLine);
+
C.emitReport(report);
}
}
@@ -3502,10 +3565,12 @@ RetainCountChecker::handleAutoreleaseCounts(ProgramStateRef state,
if (N) {
SmallString<128> sbuf;
llvm::raw_svector_ostream os(sbuf);
- os << "Object over-autoreleased: object was sent -autorelease ";
+ os << "Object was autoreleased ";
if (V.getAutoreleaseCount() > 1)
- os << V.getAutoreleaseCount() << " times ";
- os << "but the object has a +" << V.getCount() << " retain count";
+ os << V.getAutoreleaseCount() << " times but the object ";
+ else
+ os << "but ";
+ os << "has a +" << V.getCount() << " retain count";
if (!overAutorelease)
overAutorelease.reset(new OverAutorelease());
@@ -3556,7 +3621,8 @@ RetainCountChecker::processLeaks(ProgramStateRef state,
assert(BT && "BugType not initialized.");
CFRefLeakReport *report = new CFRefLeakReport(*BT, LOpts, GCEnabled,
- SummaryLog, N, *I, Ctx);
+ SummaryLog, N, *I, Ctx,
+ IncludeAllocationLine);
Ctx.emitReport(report);
}
}
@@ -3661,8 +3727,10 @@ void RetainCountChecker::printState(raw_ostream &Out, ProgramStateRef State,
RefBindingsTy B = State->get<RefBindings>();
- if (!B.isEmpty())
- Out << Sep << NL;
+ if (B.isEmpty())
+ return;
+
+ Out << Sep << NL;
for (RefBindingsTy::iterator I = B.begin(), E = B.end(); I != E; ++I) {
Out << I->first << " : ";
@@ -3676,6 +3744,6 @@ void RetainCountChecker::printState(raw_ostream &Out, ProgramStateRef State,
//===----------------------------------------------------------------------===//
void ento::registerRetainCountChecker(CheckerManager &Mgr) {
- Mgr.registerChecker<RetainCountChecker>();
+ Mgr.registerChecker<RetainCountChecker>(Mgr.getAnalyzerOptions());
}
diff --git a/lib/StaticAnalyzer/Checkers/ReturnUndefChecker.cpp b/lib/StaticAnalyzer/Checkers/ReturnUndefChecker.cpp
index 7a5d993601..ed96c401a7 100644
--- a/lib/StaticAnalyzer/Checkers/ReturnUndefChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/ReturnUndefChecker.cpp
@@ -55,8 +55,17 @@ void ReturnUndefChecker::checkPreStmt(const ReturnStmt *RS,
// void test() {
// return foo();
// }
- if (RT.isNull() || !RT->isVoidType())
- emitUndef(C, RetE);
+ if (!RT.isNull() && RT->isVoidType())
+ return;
+
+ // Not all blocks have explicitly-specified return types; if the return type
+ // is not available, but the return value expression has 'void' type, assume
+ // Sema already checked it.
+ if (RT.isNull() && isa<BlockDecl>(SFC->getDecl()) &&
+ RetE->getType()->isVoidType())
+ return;
+
+ emitUndef(C, RetE);
return;
}
diff --git a/lib/StaticAnalyzer/Checkers/StreamChecker.cpp b/lib/StaticAnalyzer/Checkers/StreamChecker.cpp
index 1c38ab0b18..ffdf2d54b4 100644
--- a/lib/StaticAnalyzer/Checkers/StreamChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/StreamChecker.cpp
@@ -400,9 +400,8 @@ void StreamChecker::checkDeadSymbols(SymbolReaper &SymReaper,
SymbolRef Sym = *I;
ProgramStateRef state = C.getState();
const StreamState *SS = state->get<StreamMap>(Sym);
- // TODO: Shouldn't we have a continue here?
if (!SS)
- return;
+ continue;
if (SS->isOpened()) {
ExplodedNode *N = C.generateSink();
diff --git a/lib/StaticAnalyzer/Checkers/TraversalChecker.cpp b/lib/StaticAnalyzer/Checkers/TraversalChecker.cpp
index 8b242404b3..57c9ed4ce2 100644
--- a/lib/StaticAnalyzer/Checkers/TraversalChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/TraversalChecker.cpp
@@ -61,9 +61,11 @@ void ento::registerTraversalDumper(CheckerManager &mgr) {
//------------------------------------------------------------------------------
namespace {
-class CallDumper : public Checker< check::PreCall > {
+class CallDumper : public Checker< check::PreCall,
+ check::PostCall > {
public:
void checkPreCall(const CallEvent &Call, CheckerContext &C) const;
+ void checkPostCall(const CallEvent &Call, CheckerContext &C) const;
};
}
@@ -80,6 +82,26 @@ void CallDumper::checkPreCall(const CallEvent &Call, CheckerContext &C) const {
Call.dump(llvm::outs());
}
+void CallDumper::checkPostCall(const CallEvent &Call, CheckerContext &C) const {
+ const Expr *CallE = Call.getOriginExpr();
+ if (!CallE)
+ return;
+
+ unsigned Indentation = 0;
+ for (const LocationContext *LC = C.getLocationContext()->getParent();
+ LC != 0; LC = LC->getParent())
+ ++Indentation;
+
+ // It is mildly evil to print directly to llvm::outs() rather than emitting
+ // warnings, but this ensures things do not get filtered out by the rest of
+ // the static analyzer machinery.
+ llvm::outs().indent(Indentation);
+ if (Call.getResultType()->isVoidType())
+ llvm::outs() << "Returning void\n";
+ else
+ llvm::outs() << "Returning " << C.getSVal(CallE) << "\n";
+}
+
void ento::registerCallDumper(CheckerManager &mgr) {
mgr.registerChecker<CallDumper>();
}
diff --git a/lib/StaticAnalyzer/Checkers/UndefCapturedBlockVarChecker.cpp b/lib/StaticAnalyzer/Checkers/UndefCapturedBlockVarChecker.cpp
index f0ca8a8312..93812f7148 100644
--- a/lib/StaticAnalyzer/Checkers/UndefCapturedBlockVarChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/UndefCapturedBlockVarChecker.cpp
@@ -91,7 +91,8 @@ UndefCapturedBlockVarChecker::checkPostStmt(const BlockExpr *BE,
BugReport *R = new BugReport(*BT, os.str(), N);
if (const Expr *Ex = FindBlockDeclRefExpr(BE->getBody(), VD))
R->addRange(Ex->getSourceRange());
- R->addVisitor(new FindLastStoreBRVisitor(*V, VR));
+ R->addVisitor(new FindLastStoreBRVisitor(*V, VR,
+ /*EnableNullFPSuppression*/false));
R->disablePathPruning();
// need location of block
C.emitReport(R);
diff --git a/lib/StaticAnalyzer/Checkers/UndefinedArraySubscriptChecker.cpp b/lib/StaticAnalyzer/Checkers/UndefinedArraySubscriptChecker.cpp
index be3a34f3ea..176ee48082 100644
--- a/lib/StaticAnalyzer/Checkers/UndefinedArraySubscriptChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/UndefinedArraySubscriptChecker.cpp
@@ -34,18 +34,28 @@ public:
void
UndefinedArraySubscriptChecker::checkPreStmt(const ArraySubscriptExpr *A,
CheckerContext &C) const {
- if (C.getState()->getSVal(A->getIdx(), C.getLocationContext()).isUndef()) {
- if (ExplodedNode *N = C.generateSink()) {
- if (!BT)
- BT.reset(new BuiltinBug("Array subscript is undefined"));
-
- // Generate a report for this bug.
- BugReport *R = new BugReport(*BT, BT->getName(), N);
- R->addRange(A->getIdx()->getSourceRange());
- bugreporter::trackNullOrUndefValue(N, A->getIdx(), *R);
- C.emitReport(R);
- }
- }
+ const Expr *Index = A->getIdx();
+ if (!C.getSVal(Index).isUndef())
+ return;
+
+ // Sema generates anonymous array variables for copying array struct fields.
+ // Don't warn if we're in an implicitly-generated constructor.
+ const Decl *D = C.getLocationContext()->getDecl();
+ if (const CXXConstructorDecl *Ctor = dyn_cast<CXXConstructorDecl>(D))
+ if (Ctor->isImplicitlyDefined())
+ return;
+
+ ExplodedNode *N = C.generateSink();
+ if (!N)
+ return;
+ if (!BT)
+ BT.reset(new BuiltinBug("Array subscript is undefined"));
+
+ // Generate a report for this bug.
+ BugReport *R = new BugReport(*BT, BT->getName(), N);
+ R->addRange(A->getIdx()->getSourceRange());
+ bugreporter::trackNullOrUndefValue(N, A->getIdx(), *R);
+ C.emitReport(R);
}
void ento::registerUndefinedArraySubscriptChecker(CheckerManager &mgr) {
diff --git a/lib/StaticAnalyzer/Core/APSIntType.cpp b/lib/StaticAnalyzer/Core/APSIntType.cpp
index 884b0faa9e..c7e9526821 100644
--- a/lib/StaticAnalyzer/Core/APSIntType.cpp
+++ b/lib/StaticAnalyzer/Core/APSIntType.cpp
@@ -13,20 +13,31 @@ using namespace clang;
using namespace ento;
APSIntType::RangeTestResultKind
-APSIntType::testInRange(const llvm::APSInt &Value) const {
+APSIntType::testInRange(const llvm::APSInt &Value,
+ bool AllowSignConversions) const {
+
// Negative numbers cannot be losslessly converted to unsigned type.
- if (IsUnsigned && Value.isSigned() && Value.isNegative())
+ if (IsUnsigned && !AllowSignConversions &&
+ Value.isSigned() && Value.isNegative())
return RTR_Below;
- // Signed integers can be converted to signed integers of the same width
- // or (if positive) unsigned integers with one fewer bit.
- // Unsigned integers can be converted to unsigned integers of the same width
- // or signed integers with one more bit.
unsigned MinBits;
- if (Value.isSigned())
- MinBits = Value.getMinSignedBits() - IsUnsigned;
- else
- MinBits = Value.getActiveBits() + !IsUnsigned;
+ if (AllowSignConversions) {
+ if (Value.isSigned() && !IsUnsigned)
+ MinBits = Value.getMinSignedBits();
+ else
+ MinBits = Value.getActiveBits();
+
+ } else {
+ // Signed integers can be converted to signed integers of the same width
+ // or (if positive) unsigned integers with one fewer bit.
+ // Unsigned integers can be converted to unsigned integers of the same width
+ // or signed integers with one more bit.
+ if (Value.isSigned())
+ MinBits = Value.getMinSignedBits() - IsUnsigned;
+ else
+ MinBits = Value.getActiveBits() + !IsUnsigned;
+ }
if (MinBits <= BitWidth)
return RTR_Within;
diff --git a/lib/StaticAnalyzer/Core/AnalysisManager.cpp b/lib/StaticAnalyzer/Core/AnalysisManager.cpp
index 011d4c09a2..747b73c416 100644
--- a/lib/StaticAnalyzer/Core/AnalysisManager.cpp
+++ b/lib/StaticAnalyzer/Core/AnalysisManager.cpp
@@ -25,7 +25,8 @@ AnalysisManager::AnalysisManager(ASTContext &ctx, DiagnosticsEngine &diags,
/*AddImplicitDtors=*/true,
/*AddInitializers=*/true,
Options.includeTemporaryDtorsInCFG(),
- Options.shouldSynthesizeBodies()),
+ Options.shouldSynthesizeBodies(),
+ Options.shouldConditionalizeStaticInitializers()),
Ctx(ctx),
Diags(diags),
LangOpts(lang),
diff --git a/lib/StaticAnalyzer/Core/AnalyzerOptions.cpp b/lib/StaticAnalyzer/Core/AnalyzerOptions.cpp
index dca68f71ab..ae707395fc 100644
--- a/lib/StaticAnalyzer/Core/AnalyzerOptions.cpp
+++ b/lib/StaticAnalyzer/Core/AnalyzerOptions.cpp
@@ -74,7 +74,7 @@ AnalyzerOptions::mayInlineCXXMemberFunction(CXXInlineableMemberKind K) {
static const char *ModeKey = "c++-inlining";
StringRef ModeStr(Config.GetOrCreateValue(ModeKey,
- "constructors").getValue());
+ "destructors").getValue());
CXXInlineableMemberKind &MutableMode =
const_cast<CXXInlineableMemberKind &>(CXXMemberInliningMode);
@@ -134,6 +134,13 @@ bool AnalyzerOptions::mayInlineTemplateFunctions() {
/*Default=*/true);
}
+bool AnalyzerOptions::mayInlineCXXContainerCtorsAndDtors() {
+ return getBooleanOption(InlineCXXContainerCtorsAndDtors,
+ "c++-container-inlining",
+ /*Default=*/false);
+}
+
+
bool AnalyzerOptions::mayInlineObjCMethod() {
return getBooleanOption(ObjCInliningMode,
"objc-inlining",
@@ -158,6 +165,12 @@ bool AnalyzerOptions::shouldSuppressInlinedDefensiveChecks() {
/* Default = */ true);
}
+bool AnalyzerOptions::shouldSuppressFromCXXStandardLibrary() {
+ return getBooleanOption(SuppressFromCXXStandardLibrary,
+ "suppress-c++-stdlib",
+ /* Default = */ false);
+}
+
int AnalyzerOptions::getOptionAsInteger(StringRef Name, int DefaultVal) {
SmallString<10> StrBuf;
llvm::raw_svector_ostream OS(StrBuf);
@@ -236,3 +249,8 @@ bool AnalyzerOptions::shouldSynthesizeBodies() {
bool AnalyzerOptions::shouldPrunePaths() {
return getBooleanOption("prune-paths", true);
}
+
+bool AnalyzerOptions::shouldConditionalizeStaticInitializers() {
+ return getBooleanOption("cfg-conditional-static-initializers", true);
+}
+
diff --git a/lib/StaticAnalyzer/Core/BugReporter.cpp b/lib/StaticAnalyzer/Core/BugReporter.cpp
index 0729b5e842..a85235c3e4 100644
--- a/lib/StaticAnalyzer/Core/BugReporter.cpp
+++ b/lib/StaticAnalyzer/Core/BugReporter.cpp
@@ -12,6 +12,8 @@
//
//===----------------------------------------------------------------------===//
+#define DEBUG_TYPE "BugReporter"
+
#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclObjC.h"
@@ -29,12 +31,19 @@
#include "llvm/ADT/OwningPtr.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/Statistic.h"
#include "llvm/Support/raw_ostream.h"
#include <queue>
using namespace clang;
using namespace ento;
+STATISTIC(MaxBugClassSize,
+ "The maximum number of bug reports in the same equivalence class");
+STATISTIC(MaxValidBugClassSize,
+ "The maximum number of bug reports in the same equivalence class "
+ "where at least one report is valid (not suppressed)");
+
BugReporterVisitor::~BugReporterVisitor() {}
void BugReporterContext::anchor() {}
@@ -43,77 +52,22 @@ void BugReporterContext::anchor() {}
// Helper routines for walking the ExplodedGraph and fetching statements.
//===----------------------------------------------------------------------===//
-static inline const Stmt *GetStmt(const ProgramPoint &P) {
- if (Optional<StmtPoint> SP = P.getAs<StmtPoint>())
- return SP->getStmt();
- if (Optional<BlockEdge> BE = P.getAs<BlockEdge>())
- return BE->getSrc()->getTerminator();
- if (Optional<CallEnter> CE = P.getAs<CallEnter>())
- return CE->getCallExpr();
- if (Optional<CallExitEnd> CEE = P.getAs<CallExitEnd>())
- return CEE->getCalleeContext()->getCallSite();
-
- return 0;
-}
-
-static inline const ExplodedNode*
-GetPredecessorNode(const ExplodedNode *N) {
- return N->pred_empty() ? NULL : *(N->pred_begin());
-}
-
-static inline const ExplodedNode*
-GetSuccessorNode(const ExplodedNode *N) {
- return N->succ_empty() ? NULL : *(N->succ_begin());
-}
-
static const Stmt *GetPreviousStmt(const ExplodedNode *N) {
- for (N = GetPredecessorNode(N); N; N = GetPredecessorNode(N))
- if (const Stmt *S = GetStmt(N->getLocation()))
- return S;
-
- return 0;
-}
-
-static const Stmt *GetNextStmt(const ExplodedNode *N) {
- for (N = GetSuccessorNode(N); N; N = GetSuccessorNode(N))
- if (const Stmt *S = GetStmt(N->getLocation())) {
- // Check if the statement is '?' or '&&'/'||'. These are "merges",
- // not actual statement points.
- switch (S->getStmtClass()) {
- case Stmt::ChooseExprClass:
- case Stmt::BinaryConditionalOperatorClass: continue;
- case Stmt::ConditionalOperatorClass: continue;
- case Stmt::BinaryOperatorClass: {
- BinaryOperatorKind Op = cast<BinaryOperator>(S)->getOpcode();
- if (Op == BO_LAnd || Op == BO_LOr)
- continue;
- break;
- }
- default:
- break;
- }
+ for (N = N->getFirstPred(); N; N = N->getFirstPred())
+ if (const Stmt *S = PathDiagnosticLocation::getStmt(N))
return S;
- }
return 0;
}
static inline const Stmt*
GetCurrentOrPreviousStmt(const ExplodedNode *N) {
- if (const Stmt *S = GetStmt(N->getLocation()))
+ if (const Stmt *S = PathDiagnosticLocation::getStmt(N))
return S;
return GetPreviousStmt(N);
}
-static inline const Stmt*
-GetCurrentOrNextStmt(const ExplodedNode *N) {
- if (const Stmt *S = GetStmt(N->getLocation()))
- return S;
-
- return GetNextStmt(N);
-}
-
//===----------------------------------------------------------------------===//
// Diagnostic cleanup.
//===----------------------------------------------------------------------===//
@@ -189,10 +143,16 @@ static void removeRedundantMsgs(PathPieces &path) {
}
}
+/// A map from PathDiagnosticPiece to the LocationContext of the inlined
+/// function call it represents.
+typedef llvm::DenseMap<const PathPieces *, const LocationContext *>
+ LocationContextMap;
+
/// Recursively scan through a path and prune out calls and macros pieces
/// that aren't needed. Return true if afterwards the path contains
/// "interesting stuff" which means it shouldn't be pruned from the parent path.
-bool BugReporter::RemoveUnneededCalls(PathPieces &pieces, BugReport *R) {
+static bool removeUnneededCalls(PathPieces &pieces, BugReport *R,
+ LocationContextMap &LCM) {
bool containsSomethingInteresting = false;
const unsigned N = pieces.size();
@@ -213,13 +173,13 @@ bool BugReporter::RemoveUnneededCalls(PathPieces &pieces, BugReport *R) {
case PathDiagnosticPiece::Call: {
PathDiagnosticCallPiece *call = cast<PathDiagnosticCallPiece>(piece);
// Check if the location context is interesting.
- assert(LocationContextMap.count(call));
- if (R->isInteresting(LocationContextMap[call])) {
+ assert(LCM.count(&call->path));
+ if (R->isInteresting(LCM[&call->path])) {
containsSomethingInteresting = true;
break;
}
- if (!RemoveUnneededCalls(call->path, R))
+ if (!removeUnneededCalls(call->path, R, LCM))
continue;
containsSomethingInteresting = true;
@@ -227,7 +187,7 @@ bool BugReporter::RemoveUnneededCalls(PathPieces &pieces, BugReport *R) {
}
case PathDiagnosticPiece::Macro: {
PathDiagnosticMacroPiece *macro = cast<PathDiagnosticMacroPiece>(piece);
- if (!RemoveUnneededCalls(macro->subPieces, R))
+ if (!removeUnneededCalls(macro->subPieces, R, LCM))
continue;
containsSomethingInteresting = true;
break;
@@ -290,19 +250,14 @@ static void adjustCallLocations(PathPieces &Pieces,
// PathDiagnosticBuilder and its associated routines and helper objects.
//===----------------------------------------------------------------------===//
-typedef llvm::DenseMap<const ExplodedNode*,
-const ExplodedNode*> NodeBackMap;
-
namespace {
class NodeMapClosure : public BugReport::NodeResolver {
- NodeBackMap& M;
+ InterExplodedGraphMap &M;
public:
- NodeMapClosure(NodeBackMap *m) : M(*m) {}
- ~NodeMapClosure() {}
+ NodeMapClosure(InterExplodedGraphMap &m) : M(m) {}
const ExplodedNode *getOriginalNode(const ExplodedNode *N) {
- NodeBackMap::iterator I = M.find(N);
- return I == M.end() ? 0 : I->second;
+ return M.lookup(N);
}
};
@@ -314,7 +269,7 @@ public:
const LocationContext *LC;
PathDiagnosticBuilder(GRBugReporter &br,
- BugReport *r, NodeBackMap *Backmap,
+ BugReport *r, InterExplodedGraphMap &Backmap,
PathDiagnosticConsumer *pdc)
: BugReporterContext(br),
R(r), PDC(pdc), NMC(Backmap), LC(r->getErrorNode()->getLocationContext())
@@ -351,7 +306,7 @@ public:
PathDiagnosticLocation
PathDiagnosticBuilder::ExecutionContinues(const ExplodedNode *N) {
- if (const Stmt *S = GetNextStmt(N))
+ if (const Stmt *S = PathDiagnosticLocation::getNextStmt(N))
return PathDiagnosticLocation(S, getSourceManager(), LC);
return PathDiagnosticLocation::createDeclEnd(N->getLocationContext(),
@@ -562,6 +517,7 @@ static void CompactPathDiagnostic(PathPieces &path, const SourceManager& SM);
static bool GenerateMinimalPathDiagnostic(PathDiagnostic& PD,
PathDiagnosticBuilder &PDB,
const ExplodedNode *N,
+ LocationContextMap &LCM,
ArrayRef<BugReporterVisitor *> visitors) {
SourceManager& SMgr = PDB.getSourceManager();
@@ -574,7 +530,7 @@ static bool GenerateMinimalPathDiagnostic(PathDiagnostic& PD,
while (NextNode) {
N = NextNode;
PDB.LC = N->getLocationContext();
- NextNode = GetPredecessorNode(N);
+ NextNode = N->getFirstPred();
ProgramPoint P = N->getLocation();
@@ -582,8 +538,8 @@ static bool GenerateMinimalPathDiagnostic(PathDiagnostic& PD,
if (Optional<CallExitEnd> CE = P.getAs<CallExitEnd>()) {
PathDiagnosticCallPiece *C =
PathDiagnosticCallPiece::construct(N, *CE, SMgr);
- GRBugReporter& BR = PDB.getBugReporter();
- BR.addCallPieceLocationContextPair(C, CE->getCalleeContext());
+ // Record the mapping from call piece to LocationContext.
+ LCM[&C->path] = CE->getCalleeContext();
PD.getActivePath().push_front(C);
PD.pushActivePath(&C->path);
CallStack.push_back(StackDiagPair(C, N));
@@ -606,8 +562,8 @@ static bool GenerateMinimalPathDiagnostic(PathDiagnostic& PD,
} else {
const Decl *Caller = CE->getLocationContext()->getDecl();
C = PathDiagnosticCallPiece::construct(PD.getActivePath(), Caller);
- GRBugReporter& BR = PDB.getBugReporter();
- BR.addCallPieceLocationContextPair(C, CE->getCalleeContext());
+ // Record the mapping from call piece to LocationContext.
+ LCM[&C->path] = CE->getCalleeContext();
}
C->setCallee(*CE, SMgr);
@@ -636,7 +592,7 @@ static bool GenerateMinimalPathDiagnostic(PathDiagnostic& PD,
case Stmt::GotoStmtClass:
case Stmt::IndirectGotoStmtClass: {
- const Stmt *S = GetNextStmt(N);
+ const Stmt *S = PathDiagnosticLocation::getNextStmt(N);
if (!S)
break;
@@ -925,6 +881,50 @@ public:
bool isDead() const { return IsDead; }
};
+static PathDiagnosticLocation cleanUpLocation(PathDiagnosticLocation L,
+ const LocationContext *LC,
+ bool firstCharOnly = false) {
+ if (const Stmt *S = L.asStmt()) {
+ const Stmt *Original = S;
+ while (1) {
+ // Adjust the location for some expressions that are best referenced
+ // by one of their subexpressions.
+ switch (S->getStmtClass()) {
+ default:
+ break;
+ case Stmt::ParenExprClass:
+ case Stmt::GenericSelectionExprClass:
+ S = cast<Expr>(S)->IgnoreParens();
+ firstCharOnly = true;
+ continue;
+ case Stmt::BinaryConditionalOperatorClass:
+ case Stmt::ConditionalOperatorClass:
+ S = cast<AbstractConditionalOperator>(S)->getCond();
+ firstCharOnly = true;
+ continue;
+ case Stmt::ChooseExprClass:
+ S = cast<ChooseExpr>(S)->getCond();
+ firstCharOnly = true;
+ continue;
+ case Stmt::BinaryOperatorClass:
+ S = cast<BinaryOperator>(S)->getLHS();
+ firstCharOnly = true;
+ continue;
+ }
+
+ break;
+ }
+
+ if (S != Original)
+ L = PathDiagnosticLocation(S, L.getManager(), LC);
+ }
+
+ if (firstCharOnly)
+ L = PathDiagnosticLocation::createSingleLocation(L);
+
+ return L;
+}
+
class EdgeBuilder {
std::vector<ContextLocation> CLocs;
typedef std::vector<ContextLocation>::iterator iterator;
@@ -939,53 +939,12 @@ class EdgeBuilder {
PathDiagnosticLocation getContextLocation(const PathDiagnosticLocation &L);
- PathDiagnosticLocation cleanUpLocation(PathDiagnosticLocation L,
- bool firstCharOnly = false) {
- if (const Stmt *S = L.asStmt()) {
- const Stmt *Original = S;
- while (1) {
- // Adjust the location for some expressions that are best referenced
- // by one of their subexpressions.
- switch (S->getStmtClass()) {
- default:
- break;
- case Stmt::ParenExprClass:
- case Stmt::GenericSelectionExprClass:
- S = cast<Expr>(S)->IgnoreParens();
- firstCharOnly = true;
- continue;
- case Stmt::BinaryConditionalOperatorClass:
- case Stmt::ConditionalOperatorClass:
- S = cast<AbstractConditionalOperator>(S)->getCond();
- firstCharOnly = true;
- continue;
- case Stmt::ChooseExprClass:
- S = cast<ChooseExpr>(S)->getCond();
- firstCharOnly = true;
- continue;
- case Stmt::BinaryOperatorClass:
- S = cast<BinaryOperator>(S)->getLHS();
- firstCharOnly = true;
- continue;
- }
-
- break;
- }
-
- if (S != Original)
- L = PathDiagnosticLocation(S, L.getManager(), PDB.LC);
- }
-
- if (firstCharOnly)
- L = PathDiagnosticLocation::createSingleLocation(L);
- return L;
- }
void popLocation() {
if (!CLocs.back().isDead() && CLocs.back().asLocation().isFileID()) {
// For contexts, we only one the first character as the range.
- rawAddEdge(cleanUpLocation(CLocs.back(), true));
+ rawAddEdge(cleanUpLocation(CLocs.back(), PDB.LC, true));
}
CLocs.pop_back();
}
@@ -1022,7 +981,8 @@ public:
PrevLoc = PathDiagnosticLocation();
}
- void addEdge(PathDiagnosticLocation NewLoc, bool alwaysAdd = false);
+ void addEdge(PathDiagnosticLocation NewLoc, bool alwaysAdd = false,
+ bool IsPostJump = false);
void rawAddEdge(PathDiagnosticLocation NewLoc);
@@ -1098,8 +1058,8 @@ void EdgeBuilder::rawAddEdge(PathDiagnosticLocation NewLoc) {
return;
}
- const PathDiagnosticLocation &NewLocClean = cleanUpLocation(NewLoc);
- const PathDiagnosticLocation &PrevLocClean = cleanUpLocation(PrevLoc);
+ const PathDiagnosticLocation &NewLocClean = cleanUpLocation(NewLoc, PDB.LC);
+ const PathDiagnosticLocation &PrevLocClean = cleanUpLocation(PrevLoc, PDB.LC);
if (PrevLocClean.asLocation().isInvalid()) {
PrevLoc = NewLoc;
@@ -1118,7 +1078,8 @@ void EdgeBuilder::rawAddEdge(PathDiagnosticLocation NewLoc) {
PrevLoc = NewLoc;
}
-void EdgeBuilder::addEdge(PathDiagnosticLocation NewLoc, bool alwaysAdd) {
+void EdgeBuilder::addEdge(PathDiagnosticLocation NewLoc, bool alwaysAdd,
+ bool IsPostJump) {
if (!alwaysAdd && NewLoc.asLocation().isMacroID())
return;
@@ -1131,13 +1092,14 @@ void EdgeBuilder::addEdge(PathDiagnosticLocation NewLoc, bool alwaysAdd) {
// Is the top location context the same as the one for the new location?
if (TopContextLoc == CLoc) {
if (alwaysAdd) {
- if (IsConsumedExpr(TopContextLoc) &&
- !IsControlFlowExpr(TopContextLoc.asStmt()))
- TopContextLoc.markDead();
+ if (IsConsumedExpr(TopContextLoc))
+ TopContextLoc.markDead();
rawAddEdge(NewLoc);
}
+ if (IsPostJump)
+ TopContextLoc.markDead();
return;
}
@@ -1145,13 +1107,13 @@ void EdgeBuilder::addEdge(PathDiagnosticLocation NewLoc, bool alwaysAdd) {
if (alwaysAdd) {
rawAddEdge(NewLoc);
- if (IsConsumedExpr(CLoc) && !IsControlFlowExpr(CLoc.asStmt())) {
- CLocs.push_back(ContextLocation(CLoc, true));
+ if (IsConsumedExpr(CLoc)) {
+ CLocs.push_back(ContextLocation(CLoc, /*IsDead=*/true));
return;
}
}
- CLocs.push_back(CLoc);
+ CLocs.push_back(ContextLocation(CLoc, /*IsDead=*/IsPostJump));
return;
}
@@ -1305,6 +1267,7 @@ static bool isLoopJumpPastBody(const Stmt *Term, const BlockEdge *BE) {
switch (Term->getStmtClass()) {
case Stmt::ForStmtClass:
case Stmt::WhileStmtClass:
+ case Stmt::ObjCForCollectionStmtClass:
break;
default:
// Note that we intentionally do not include do..while here.
@@ -1335,7 +1298,7 @@ static const Stmt *getStmtBeforeCond(ParentMap &PM, const Stmt *Term,
if (!isContainedByStmt(PM, Term, S))
return S;
}
- N = GetPredecessorNode(N);
+ N = N->getFirstPred();
}
return 0;
}
@@ -1350,6 +1313,11 @@ static bool isInLoopBody(ParentMap &PM, const Stmt *S, const Stmt *Term) {
LoopBody = FS->getBody();
break;
}
+ case Stmt::ObjCForCollectionStmtClass: {
+ const ObjCForCollectionStmt *FC = cast<ObjCForCollectionStmt>(Term);
+ LoopBody = FC->getBody();
+ break;
+ }
case Stmt::WhileStmtClass:
LoopBody = cast<WhileStmt>(Term)->getBody();
break;
@@ -1366,6 +1334,7 @@ static bool isInLoopBody(ParentMap &PM, const Stmt *S, const Stmt *Term) {
static bool GenerateExtensivePathDiagnostic(PathDiagnostic& PD,
PathDiagnosticBuilder &PDB,
const ExplodedNode *N,
+ LocationContextMap &LCM,
ArrayRef<BugReporterVisitor *> visitors) {
EdgeBuilder EB(PD, PDB);
const SourceManager& SM = PDB.getSourceManager();
@@ -1375,7 +1344,7 @@ static bool GenerateExtensivePathDiagnostic(PathDiagnostic& PD,
const ExplodedNode *NextNode = N->pred_empty() ? NULL : *(N->pred_begin());
while (NextNode) {
N = NextNode;
- NextNode = GetPredecessorNode(N);
+ NextNode = N->getFirstPred();
ProgramPoint P = N->getLocation();
do {
@@ -1396,10 +1365,9 @@ static bool GenerateExtensivePathDiagnostic(PathDiagnostic& PD,
PathDiagnosticCallPiece *C =
PathDiagnosticCallPiece::construct(N, *CE, SM);
- GRBugReporter& BR = PDB.getBugReporter();
- BR.addCallPieceLocationContextPair(C, CE->getCalleeContext());
+ LCM[&C->path] = CE->getCalleeContext();
- EB.addEdge(C->callReturn, true);
+ EB.addEdge(C->callReturn, /*AlwaysAdd=*/true, /*IsPostJump=*/true);
EB.flushLocations();
PD.getActivePath().push_front(C);
@@ -1434,8 +1402,7 @@ static bool GenerateExtensivePathDiagnostic(PathDiagnostic& PD,
} else {
const Decl *Caller = CE->getLocationContext()->getDecl();
C = PathDiagnosticCallPiece::construct(PD.getActivePath(), Caller);
- GRBugReporter& BR = PDB.getBugReporter();
- BR.addCallPieceLocationContextPair(C, CE->getCalleeContext());
+ LCM[&C->path] = CE->getCalleeContext();
}
C->setCallee(*CE, SM);
@@ -1563,6 +1530,458 @@ static bool GenerateExtensivePathDiagnostic(PathDiagnostic& PD,
return PDB.getBugReport()->isValid();
}
+/// \brief Adds a sanitized control-flow diagnostic edge to a path.
+static void addEdgeToPath(PathPieces &path,
+ PathDiagnosticLocation &PrevLoc,
+ PathDiagnosticLocation NewLoc,
+ const LocationContext *LC) {
+ if (!NewLoc.isValid())
+ return;
+
+ SourceLocation NewLocL = NewLoc.asLocation();
+ if (NewLocL.isInvalid() || NewLocL.isMacroID())
+ return;
+
+ if (!PrevLoc.isValid()) {
+ PrevLoc = NewLoc;
+ return;
+ }
+
+ // FIXME: ignore intra-macro edges for now.
+ if (NewLoc.asLocation().getExpansionLoc() ==
+ PrevLoc.asLocation().getExpansionLoc())
+ return;
+
+ path.push_front(new PathDiagnosticControlFlowPiece(NewLoc,
+ PrevLoc));
+ PrevLoc = NewLoc;
+}
+
+static bool
+GenerateAlternateExtensivePathDiagnostic(PathDiagnostic& PD,
+ PathDiagnosticBuilder &PDB,
+ const ExplodedNode *N,
+ LocationContextMap &LCM,
+ ArrayRef<BugReporterVisitor *> visitors) {
+
+ BugReport *report = PDB.getBugReport();
+ const SourceManager& SM = PDB.getSourceManager();
+ StackDiagVector CallStack;
+ InterestingExprs IE;
+
+ // Record the last location for a given visited stack frame.
+ llvm::DenseMap<const StackFrameContext *, PathDiagnosticLocation>
+ PrevLocMap;
+
+ const ExplodedNode *NextNode = N->getFirstPred();
+ while (NextNode) {
+ N = NextNode;
+ NextNode = N->getFirstPred();
+ ProgramPoint P = N->getLocation();
+ const LocationContext *LC = N->getLocationContext();
+ assert(!LCM[&PD.getActivePath()] || LCM[&PD.getActivePath()] == LC);
+ LCM[&PD.getActivePath()] = LC;
+ PathDiagnosticLocation &PrevLoc = PrevLocMap[LC->getCurrentStackFrame()];
+
+ do {
+ if (Optional<PostStmt> PS = P.getAs<PostStmt>()) {
+ // For expressions, make sure we propagate the
+ // interesting symbols correctly.
+ if (const Expr *Ex = PS->getStmtAs<Expr>())
+ reversePropagateIntererstingSymbols(*PDB.getBugReport(), IE,
+ N->getState().getPtr(), Ex,
+ N->getLocationContext());
+
+ PathDiagnosticLocation L =
+ PathDiagnosticLocation(PS->getStmt(), SM, LC);
+ addEdgeToPath(PD.getActivePath(), PrevLoc, L, LC);
+ break;
+ }
+
+ // Have we encountered an exit from a function call?
+ if (Optional<CallExitEnd> CE = P.getAs<CallExitEnd>()) {
+ const Stmt *S = CE->getCalleeContext()->getCallSite();
+ // Propagate the interesting symbols accordingly.
+ if (const Expr *Ex = dyn_cast_or_null<Expr>(S)) {
+ reversePropagateIntererstingSymbols(*PDB.getBugReport(), IE,
+ N->getState().getPtr(), Ex,
+ N->getLocationContext());
+ }
+
+ // We are descending into a call (backwards). Construct
+ // a new call piece to contain the path pieces for that call.
+ PathDiagnosticCallPiece *C =
+ PathDiagnosticCallPiece::construct(N, *CE, SM);
+
+ // Record the location context for this call piece.
+ LCM[&C->path] = CE->getCalleeContext();
+
+ // Add the edge to the return site.
+ addEdgeToPath(PD.getActivePath(), PrevLoc, C->callReturn, LC);
+
+ // Make the contents of the call the active path for now.
+ PD.pushActivePath(&C->path);
+ CallStack.push_back(StackDiagPair(C, N));
+ break;
+ }
+
+ // Have we encountered an entrance to a call? It may be
+ // the case that we have not encountered a matching
+ // call exit before this point. This means that the path
+ // terminated within the call itself.
+ if (Optional<CallEnter> CE = P.getAs<CallEnter>()) {
+ // Add an edge to the start of the function.
+ const Decl *D = CE->getCalleeContext()->getDecl();
+ addEdgeToPath(PD.getActivePath(), PrevLoc,
+ PathDiagnosticLocation::createBegin(D, SM), LC);
+
+ // Did we visit an entire call?
+ bool VisitedEntireCall = PD.isWithinCall();
+ PD.popActivePath();
+
+ PathDiagnosticCallPiece *C;
+ if (VisitedEntireCall) {
+ C = cast<PathDiagnosticCallPiece>(PD.getActivePath().front());
+ } else {
+ const Decl *Caller = CE->getLocationContext()->getDecl();
+ C = PathDiagnosticCallPiece::construct(PD.getActivePath(), Caller);
+ LCM[&C->path] = CE->getCalleeContext();
+ }
+ C->setCallee(*CE, SM);
+
+ if (!CallStack.empty()) {
+ assert(CallStack.back().first == C);
+ CallStack.pop_back();
+ }
+ break;
+ }
+
+ // Block edges.
+ if (Optional<BlockEdge> BE = P.getAs<BlockEdge>()) {
+ // Does this represent entering a call? If so, look at propagating
+ // interesting symbols across call boundaries.
+ if (NextNode) {
+ const LocationContext *CallerCtx = NextNode->getLocationContext();
+ const LocationContext *CalleeCtx = PDB.LC;
+ if (CallerCtx != CalleeCtx) {
+ reversePropagateInterestingSymbols(*PDB.getBugReport(), IE,
+ N->getState().getPtr(),
+ CalleeCtx, CallerCtx);
+ }
+ }
+
+ // Are we jumping to the head of a loop? Add a special diagnostic.
+ if (const Stmt *Loop = BE->getSrc()->getLoopTarget()) {
+ PathDiagnosticLocation L(Loop, SM, PDB.LC);
+ const CompoundStmt *CS = NULL;
+
+ if (const ForStmt *FS = dyn_cast<ForStmt>(Loop))
+ CS = dyn_cast<CompoundStmt>(FS->getBody());
+ else if (const WhileStmt *WS = dyn_cast<WhileStmt>(Loop))
+ CS = dyn_cast<CompoundStmt>(WS->getBody());
+
+ PathDiagnosticEventPiece *p =
+ new PathDiagnosticEventPiece(L, "Looping back to the head "
+ "of the loop");
+ p->setPrunable(true);
+
+ addEdgeToPath(PD.getActivePath(), PrevLoc, p->getLocation(), LC);
+ PD.getActivePath().push_front(p);
+
+ if (CS) {
+ addEdgeToPath(PD.getActivePath(), PrevLoc,
+ PathDiagnosticLocation::createEndBrace(CS, SM), LC);
+ }
+ }
+
+ const CFGBlock *BSrc = BE->getSrc();
+ ParentMap &PM = PDB.getParentMap();
+
+ if (const Stmt *Term = BSrc->getTerminator()) {
+ // Are we jumping past the loop body without ever executing the
+ // loop (because the condition was false)?
+ if (isLoopJumpPastBody(Term, &*BE) &&
+ !isInLoopBody(PM,
+ getStmtBeforeCond(PM,
+ BSrc->getTerminatorCondition(),
+ N),
+ Term))
+ {
+ PathDiagnosticLocation L(Term, SM, PDB.LC);
+ PathDiagnosticEventPiece *PE =
+ new PathDiagnosticEventPiece(L, "Loop body executed 0 times");
+ PE->setPrunable(true);
+ addEdgeToPath(PD.getActivePath(), PrevLoc,
+ PE->getLocation(), LC);
+ PD.getActivePath().push_front(PE);
+ }
+ }
+ break;
+ }
+ } while (0);
+
+ if (!NextNode)
+ continue;
+
+ // Add pieces from custom visitors.
+ for (ArrayRef<BugReporterVisitor *>::iterator I = visitors.begin(),
+ E = visitors.end();
+ I != E; ++I) {
+ if (PathDiagnosticPiece *p = (*I)->VisitNode(N, NextNode, PDB, *report)) {
+ addEdgeToPath(PD.getActivePath(), PrevLoc, p->getLocation(), LC);
+ PD.getActivePath().push_front(p);
+ updateStackPiecesWithMessage(p, CallStack);
+ }
+ }
+ }
+
+ return report->isValid();
+}
+
+const Stmt *getLocStmt(PathDiagnosticLocation L) {
+ if (!L.isValid())
+ return 0;
+ return L.asStmt();
+}
+
+const Stmt *getStmtParent(const Stmt *S, ParentMap &PM) {
+ if (!S)
+ return 0;
+ return PM.getParentIgnoreParens(S);
+}
+
+#if 0
+static bool isConditionForTerminator(const Stmt *S, const Stmt *Cond) {
+ // Note that we intentionally to do not handle || and && here.
+ switch (S->getStmtClass()) {
+ case Stmt::ForStmtClass:
+ return cast<ForStmt>(S)->getCond() == Cond;
+ case Stmt::WhileStmtClass:
+ return cast<WhileStmt>(S)->getCond() == Cond;
+ case Stmt::DoStmtClass:
+ return cast<DoStmt>(S)->getCond() == Cond;
+ case Stmt::ChooseExprClass:
+ return cast<ChooseExpr>(S)->getCond() == Cond;
+ case Stmt::IndirectGotoStmtClass:
+ return cast<IndirectGotoStmt>(S)->getTarget() == Cond;
+ case Stmt::SwitchStmtClass:
+ return cast<SwitchStmt>(S)->getCond() == Cond;
+ case Stmt::BinaryConditionalOperatorClass:
+ return cast<BinaryConditionalOperator>(S)->getCond() == Cond;
+ case Stmt::ConditionalOperatorClass:
+ return cast<ConditionalOperator>(S)->getCond() == Cond;
+ case Stmt::ObjCForCollectionStmtClass:
+ return cast<ObjCForCollectionStmt>(S)->getElement() == Cond;
+ default:
+ return false;
+ }
+}
+#endif
+
+typedef llvm::DenseSet<const PathDiagnosticControlFlowPiece *>
+ ControlFlowBarrierSet;
+
+typedef llvm::DenseSet<const PathDiagnosticCallPiece *>
+ OptimizedCallsSet;
+
+static bool isBarrier(ControlFlowBarrierSet &CFBS,
+ const PathDiagnosticControlFlowPiece *P) {
+ return CFBS.count(P);
+}
+
+static bool optimizeEdges(PathPieces &path, SourceManager &SM,
+ ControlFlowBarrierSet &CFBS,
+ OptimizedCallsSet &OCS,
+ LocationContextMap &LCM) {
+ bool hasChanges = false;
+ const LocationContext *LC = LCM[&path];
+ assert(LC);
+ bool isFirst = true;
+
+ for (PathPieces::iterator I = path.begin(), E = path.end(); I != E; ) {
+ bool wasFirst = isFirst;
+ isFirst = false;
+
+ // Optimize subpaths.
+ if (PathDiagnosticCallPiece *CallI = dyn_cast<PathDiagnosticCallPiece>(*I)){
+ // Record the fact that a call has been optimized so we only do the
+ // effort once.
+ if (!OCS.count(CallI)) {
+ while (optimizeEdges(CallI->path, SM, CFBS, OCS, LCM)) {}
+ OCS.insert(CallI);
+ }
+ ++I;
+ continue;
+ }
+
+ // Pattern match the current piece and its successor.
+ PathDiagnosticControlFlowPiece *PieceI =
+ dyn_cast<PathDiagnosticControlFlowPiece>(*I);
+
+ if (!PieceI) {
+ ++I;
+ continue;
+ }
+
+ ParentMap &PM = LC->getParentMap();
+ const Stmt *s1Start = getLocStmt(PieceI->getStartLocation());
+ const Stmt *s1End = getLocStmt(PieceI->getEndLocation());
+ const Stmt *level1 = getStmtParent(s1Start, PM);
+ const Stmt *level2 = getStmtParent(s1End, PM);
+
+ if (wasFirst) {
+#if 0
+ // Apply the "first edge" case for Rule V. here.
+ if (s1Start && level1 && isConditionForTerminator(level1, s1Start)) {
+ PathDiagnosticLocation NewLoc(level2, SM, LC);
+ PieceI->setStartLocation(NewLoc);
+ CFBS.insert(PieceI);
+ return true;
+ }
+#endif
+ // Apply the "first edge" case for Rule III. here.
+ if (!isBarrier(CFBS, PieceI) &&
+ level1 && level2 && level2 == PM.getParent(level1)) {
+ path.erase(I);
+ // Since we are erasing the current edge at the start of the
+ // path, just return now so we start analyzing the start of the path
+ // again.
+ return true;
+ }
+ }
+
+ PathPieces::iterator NextI = I; ++NextI;
+ if (NextI == E)
+ break;
+
+ PathDiagnosticControlFlowPiece *PieceNextI =
+ dyn_cast<PathDiagnosticControlFlowPiece>(*NextI);
+
+ if (!PieceNextI) {
+ ++I;
+ continue;
+ }
+
+ const Stmt *s2Start = getLocStmt(PieceNextI->getStartLocation());
+ const Stmt *s2End = getLocStmt(PieceNextI->getEndLocation());
+ const Stmt *level3 = getStmtParent(s2Start, PM);
+ const Stmt *level4 = getStmtParent(s2End, PM);
+
+ // Rule I.
+ //
+ // If we have two consecutive control edges whose end/begin locations
+ // are at the same level (e.g. statements or top-level expressions within
+ // a compound statement, or siblings share a single ancestor expression),
+ // then merge them if they have no interesting intermediate event.
+ //
+ // For example:
+ //
+ // (1.1 -> 1.2) -> (1.2 -> 1.3) becomes (1.1 -> 1.3) because the common
+ // parent is '1'. Here 'x.y.z' represents the hierarchy of statements.
+ //
+ // NOTE: this will be limited later in cases where we add barriers
+ // to prevent this optimization.
+ //
+ if (level1 && level1 == level2 && level1 == level3 && level1 == level4) {
+ PieceI->setEndLocation(PieceNextI->getEndLocation());
+ path.erase(NextI);
+ hasChanges = true;
+ continue;
+ }
+
+ // Rule II.
+ //
+ // If we have two consecutive control edges where we decend to a
+ // subexpression and then pop out merge them.
+ //
+ // NOTE: this will be limited later in cases where we add barriers
+ // to prevent this optimization.
+ //
+ // For example:
+ //
+ // (1.1 -> 1.1.1) -> (1.1.1 -> 1.2) becomes (1.1 -> 1.2).
+ if (level1 && level2 &&
+ level1 == level4 &&
+ level2 == level3 && PM.getParentIgnoreParens(level2) == level1) {
+ PieceI->setEndLocation(PieceNextI->getEndLocation());
+ path.erase(NextI);
+ hasChanges = true;
+ continue;
+ }
+
+ // Rule III.
+ //
+ // Eliminate unnecessary edges where we descend to a subexpression from
+ // a statement at the same level as our parent.
+ //
+ // NOTE: this will be limited later in cases where we add barriers
+ // to prevent this optimization.
+ //
+ // For example:
+ //
+ // (1.1 -> 1.1.1) -> (1.1.1 -> X) becomes (1.1 -> X).
+ //
+ if (level1 && level2 && level1 == PM.getParentIgnoreParens(level2)) {
+ PieceI->setEndLocation(PieceNextI->getEndLocation());
+ path.erase(NextI);
+ hasChanges = true;
+ continue;
+ }
+
+ // Rule IV.
+ //
+ // Eliminate unnecessary edges where we ascend from a subexpression to
+ // a statement at the same level as our parent.
+ //
+ // NOTE: this will be limited later in cases where we add barriers
+ // to prevent this optimization.
+ //
+ // For example:
+ //
+ // (X -> 1.1.1) -> (1.1.1 -> 1.1) becomes (X -> 1.1).
+ // [first edge] (1.1.1 -> 1.1) -> eliminate
+ //
+ if (level2 && level4 && level2 == level3 && level4 == PM.getParent(level2)){
+ PieceI->setEndLocation(PieceNextI->getEndLocation());
+ path.erase(NextI);
+ hasChanges = true;
+ continue;
+ }
+#if 0
+ // Rule V.
+ //
+ // Replace terminator conditions with terminators when the condition
+ // itself has no control-flow.
+ //
+ // For example:
+ //
+ // (X -> condition) -> (condition -> Y) becomes (X -> term) -> (term -> Y)
+ // [first edge] (condition -> Y) becomes (term -> Y)
+ //
+ // This applies to 'if', 'for', 'while', 'do .. while', 'switch'...
+ //
+ if (!isBarrier(CFBS, PieceNextI) &&
+ s1End && s1End == s2Start && level2) {
+ if (isConditionForTerminator(level2, s1End)) {
+ PathDiagnosticLocation NewLoc(level2, SM, LC);
+ PieceI->setEndLocation(NewLoc);
+ PieceNextI->setStartLocation(NewLoc);
+ CFBS.insert(PieceI);
+ hasChanges = true;
+ continue;
+ }
+
+ }
+#endif
+
+ // No changes at this index? Move to the next one.
+ ++I;
+ }
+
+ // No changes.
+ return hasChanges;
+}
+
//===----------------------------------------------------------------------===//
// Methods for BugType and subclasses.
//===----------------------------------------------------------------------===//
@@ -1748,7 +2167,7 @@ const Stmt *BugReport::getStmt() const {
S = GetPreviousStmt(ErrorNode);
}
if (!S)
- S = GetStmt(ProgP);
+ S = PathDiagnosticLocation::getStmt(ErrorNode);
return S;
}
@@ -1775,22 +2194,7 @@ PathDiagnosticLocation BugReport::getLocation(const SourceManager &SM) const {
if (ErrorNode) {
assert(!Location.isValid() &&
"Either Location or ErrorNode should be specified but not both.");
-
- if (const Stmt *S = GetCurrentOrPreviousStmt(ErrorNode)) {
- const LocationContext *LC = ErrorNode->getLocationContext();
-
- // For member expressions, return the location of the '.' or '->'.
- if (const MemberExpr *ME = dyn_cast<MemberExpr>(S))
- return PathDiagnosticLocation::createMemberLoc(ME, SM);
- // For binary operators, return the location of the operator.
- if (const BinaryOperator *B = dyn_cast<BinaryOperator>(S))
- return PathDiagnosticLocation::createOperatorLoc(B, SM);
-
- if (ErrorNode->getLocation().getAs<PostStmtPurgeDeadSymbols>())
- return PathDiagnosticLocation::createEnd(S, SM, LC);
-
- return PathDiagnosticLocation::createBegin(S, SM, LC);
- }
+ return PathDiagnosticLocation::createEndOfPath(ErrorNode, SM);
} else {
assert(Location.isValid());
return Location;
@@ -1863,141 +2267,175 @@ void BugReporter::FlushReports() {
// PathDiagnostics generation.
//===----------------------------------------------------------------------===//
-static std::pair<std::pair<ExplodedGraph*, NodeBackMap*>,
- std::pair<ExplodedNode*, unsigned> >
-MakeReportGraph(const ExplodedGraph* G,
- SmallVectorImpl<const ExplodedNode*> &nodes) {
+namespace {
+/// A wrapper around a report graph, which contains only a single path, and its
+/// node maps.
+class ReportGraph {
+public:
+ InterExplodedGraphMap BackMap;
+ OwningPtr<ExplodedGraph> Graph;
+ const ExplodedNode *ErrorNode;
+ size_t Index;
+};
+
+/// A wrapper around a trimmed graph and its node maps.
+class TrimmedGraph {
+ InterExplodedGraphMap InverseMap;
+
+ typedef llvm::DenseMap<const ExplodedNode *, unsigned> PriorityMapTy;
+ PriorityMapTy PriorityMap;
+
+ typedef std::pair<const ExplodedNode *, size_t> NodeIndexPair;
+ SmallVector<NodeIndexPair, 32> ReportNodes;
- // Create the trimmed graph. It will contain the shortest paths from the
- // error nodes to the root. In the new graph we should only have one
- // error node unless there are two or more error nodes with the same minimum
- // path length.
- ExplodedGraph* GTrim;
- InterExplodedGraphMap* NMap;
+ OwningPtr<ExplodedGraph> G;
- llvm::DenseMap<const void*, const void*> InverseMap;
- llvm::tie(GTrim, NMap) = G->Trim(nodes.data(), nodes.data() + nodes.size(),
- &InverseMap);
+ /// A helper class for sorting ExplodedNodes by priority.
+ template <bool Descending>
+ class PriorityCompare {
+ const PriorityMapTy &PriorityMap;
- // Create owning pointers for GTrim and NMap just to ensure that they are
- // released when this function exists.
- OwningPtr<ExplodedGraph> AutoReleaseGTrim(GTrim);
- OwningPtr<InterExplodedGraphMap> AutoReleaseNMap(NMap);
+ public:
+ PriorityCompare(const PriorityMapTy &M) : PriorityMap(M) {}
+
+ bool operator()(const ExplodedNode *LHS, const ExplodedNode *RHS) const {
+ PriorityMapTy::const_iterator LI = PriorityMap.find(LHS);
+ PriorityMapTy::const_iterator RI = PriorityMap.find(RHS);
+ PriorityMapTy::const_iterator E = PriorityMap.end();
+
+ if (LI == E)
+ return Descending;
+ if (RI == E)
+ return !Descending;
+
+ return Descending ? LI->second > RI->second
+ : LI->second < RI->second;
+ }
+
+ bool operator()(const NodeIndexPair &LHS, const NodeIndexPair &RHS) const {
+ return (*this)(LHS.first, RHS.first);
+ }
+ };
+
+public:
+ TrimmedGraph(const ExplodedGraph *OriginalGraph,
+ ArrayRef<const ExplodedNode *> Nodes);
+
+ bool popNextReportGraph(ReportGraph &GraphWrapper);
+};
+}
+
+TrimmedGraph::TrimmedGraph(const ExplodedGraph *OriginalGraph,
+ ArrayRef<const ExplodedNode *> Nodes) {
+ // The trimmed graph is created in the body of the constructor to ensure
+ // that the DenseMaps have been initialized already.
+ InterExplodedGraphMap ForwardMap;
+ G.reset(OriginalGraph->trim(Nodes, &ForwardMap, &InverseMap));
// Find the (first) error node in the trimmed graph. We just need to consult
- // the node map (NMap) which maps from nodes in the original graph to nodes
+ // the node map which maps from nodes in the original graph to nodes
// in the new graph.
+ llvm::SmallPtrSet<const ExplodedNode *, 32> RemainingNodes;
- std::queue<const ExplodedNode*> WS;
- typedef llvm::DenseMap<const ExplodedNode*, unsigned> IndexMapTy;
- IndexMapTy IndexMap;
-
- for (unsigned nodeIndex = 0 ; nodeIndex < nodes.size(); ++nodeIndex) {
- const ExplodedNode *originalNode = nodes[nodeIndex];
- if (const ExplodedNode *N = NMap->getMappedNode(originalNode)) {
- WS.push(N);
- IndexMap[originalNode] = nodeIndex;
+ for (unsigned i = 0, count = Nodes.size(); i < count; ++i) {
+ if (const ExplodedNode *NewNode = ForwardMap.lookup(Nodes[i])) {
+ ReportNodes.push_back(std::make_pair(NewNode, i));
+ RemainingNodes.insert(NewNode);
}
}
- assert(!WS.empty() && "No error node found in the trimmed graph.");
-
- // Create a new (third!) graph with a single path. This is the graph
- // that will be returned to the caller.
- ExplodedGraph *GNew = new ExplodedGraph();
+ assert(!RemainingNodes.empty() && "No error node found in the trimmed graph");
- // Sometimes the trimmed graph can contain a cycle. Perform a reverse BFS
- // to the root node, and then construct a new graph that contains only
- // a single path.
- llvm::DenseMap<const void*,unsigned> Visited;
+ // Perform a forward BFS to find all the shortest paths.
+ std::queue<const ExplodedNode *> WS;
- unsigned cnt = 0;
- const ExplodedNode *Root = 0;
+ assert(G->num_roots() == 1);
+ WS.push(*G->roots_begin());
+ unsigned Priority = 0;
while (!WS.empty()) {
const ExplodedNode *Node = WS.front();
WS.pop();
- if (Visited.find(Node) != Visited.end())
- continue;
-
- Visited[Node] = cnt++;
+ PriorityMapTy::iterator PriorityEntry;
+ bool IsNew;
+ llvm::tie(PriorityEntry, IsNew) =
+ PriorityMap.insert(std::make_pair(Node, Priority));
+ ++Priority;
- if (Node->pred_empty()) {
- Root = Node;
- break;
+ if (!IsNew) {
+ assert(PriorityEntry->second <= Priority);
+ continue;
}
- for (ExplodedNode::const_pred_iterator I=Node->pred_begin(),
- E=Node->pred_end(); I!=E; ++I)
+ if (RemainingNodes.erase(Node))
+ if (RemainingNodes.empty())
+ break;
+
+ for (ExplodedNode::const_pred_iterator I = Node->succ_begin(),
+ E = Node->succ_end();
+ I != E; ++I)
WS.push(*I);
}
- assert(Root);
+ // Sort the error paths from longest to shortest.
+ std::sort(ReportNodes.begin(), ReportNodes.end(),
+ PriorityCompare<true>(PriorityMap));
+}
- // Now walk from the root down the BFS path, always taking the successor
- // with the lowest number.
- ExplodedNode *Last = 0, *First = 0;
- NodeBackMap *BM = new NodeBackMap();
- unsigned NodeIndex = 0;
+bool TrimmedGraph::popNextReportGraph(ReportGraph &GraphWrapper) {
+ if (ReportNodes.empty())
+ return false;
- for ( const ExplodedNode *N = Root ;;) {
- // Lookup the number associated with the current node.
- llvm::DenseMap<const void*,unsigned>::iterator I = Visited.find(N);
- assert(I != Visited.end());
+ const ExplodedNode *OrigN;
+ llvm::tie(OrigN, GraphWrapper.Index) = ReportNodes.pop_back_val();
+ assert(PriorityMap.find(OrigN) != PriorityMap.end() &&
+ "error node not accessible from root");
+
+ // Create a new graph with a single path. This is the graph
+ // that will be returned to the caller.
+ ExplodedGraph *GNew = new ExplodedGraph();
+ GraphWrapper.Graph.reset(GNew);
+ GraphWrapper.BackMap.clear();
+ // Now walk from the error node up the BFS path, always taking the
+ // predeccessor with the lowest number.
+ ExplodedNode *Succ = 0;
+ while (true) {
// Create the equivalent node in the new graph with the same state
// and location.
- ExplodedNode *NewN = GNew->getNode(N->getLocation(), N->getState());
+ ExplodedNode *NewN = GNew->getNode(OrigN->getLocation(), OrigN->getState(),
+ OrigN->isSink());
// Store the mapping to the original node.
- llvm::DenseMap<const void*, const void*>::iterator IMitr=InverseMap.find(N);
+ InterExplodedGraphMap::const_iterator IMitr = InverseMap.find(OrigN);
assert(IMitr != InverseMap.end() && "No mapping to original node.");
- (*BM)[NewN] = (const ExplodedNode*) IMitr->second;
+ GraphWrapper.BackMap[NewN] = IMitr->second;
// Link up the new node with the previous node.
- if (Last)
- NewN->addPredecessor(Last, *GNew);
+ if (Succ)
+ Succ->addPredecessor(NewN, *GNew);
+ else
+ GraphWrapper.ErrorNode = NewN;
- Last = NewN;
+ Succ = NewN;
// Are we at the final node?
- IndexMapTy::iterator IMI =
- IndexMap.find((const ExplodedNode*)(IMitr->second));
- if (IMI != IndexMap.end()) {
- First = NewN;
- NodeIndex = IMI->second;
+ if (OrigN->pred_empty()) {
+ GNew->addRoot(NewN);
break;
}
- // Find the next successor node. We choose the node that is marked
- // with the lowest DFS number.
- ExplodedNode::const_succ_iterator SI = N->succ_begin();
- ExplodedNode::const_succ_iterator SE = N->succ_end();
- N = 0;
-
- for (unsigned MinVal = 0; SI != SE; ++SI) {
-
- I = Visited.find(*SI);
-
- if (I == Visited.end())
- continue;
-
- if (!N || I->second < MinVal) {
- N = *SI;
- MinVal = I->second;
- }
- }
-
- assert(N);
+ // Find the next predeccessor node. We choose the node that is marked
+ // with the lowest BFS number.
+ OrigN = *std::min_element(OrigN->pred_begin(), OrigN->pred_end(),
+ PriorityCompare<false>(PriorityMap));
}
- assert(First);
-
- return std::make_pair(std::make_pair(GNew, BM),
- std::make_pair(First, NodeIndex));
+ return true;
}
+
/// CompactPathDiagnostic - This function postprocesses a PathDiagnostic object
/// and collapses PathDiagosticPieces that are expanded by macros.
static void CompactPathDiagnostic(PathPieces &path, const SourceManager& SM) {
@@ -2100,133 +2538,150 @@ bool GRBugReporter::generatePathDiagnostic(PathDiagnostic& PD,
assert(!bugReports.empty());
bool HasValid = false;
- SmallVector<const ExplodedNode *, 10> errorNodes;
+ bool HasInvalid = false;
+ SmallVector<const ExplodedNode *, 32> errorNodes;
for (ArrayRef<BugReport*>::iterator I = bugReports.begin(),
E = bugReports.end(); I != E; ++I) {
if ((*I)->isValid()) {
HasValid = true;
errorNodes.push_back((*I)->getErrorNode());
} else {
+ // Keep the errorNodes list in sync with the bugReports list.
+ HasInvalid = true;
errorNodes.push_back(0);
}
}
- // If all the reports have been marked invalid, we're done.
+ // If all the reports have been marked invalid by a previous path generation,
+ // we're done.
if (!HasValid)
return false;
- // Construct a new graph that contains only a single path from the error
- // node to a root.
- const std::pair<std::pair<ExplodedGraph*, NodeBackMap*>,
- std::pair<ExplodedNode*, unsigned> >&
- GPair = MakeReportGraph(&getGraph(), errorNodes);
-
- // Find the BugReport with the original location.
- assert(GPair.second.second < bugReports.size());
- BugReport *R = bugReports[GPair.second.second];
- assert(R && "No original report found for sliced graph.");
- assert(R->isValid() && "Report selected from trimmed graph marked invalid.");
-
- OwningPtr<ExplodedGraph> ReportGraph(GPair.first.first);
- OwningPtr<NodeBackMap> BackMap(GPair.first.second);
- const ExplodedNode *N = GPair.second.first;
-
- // Start building the path diagnostic...
- PathDiagnosticBuilder PDB(*this, R, BackMap.get(), &PC);
-
- // Register additional node visitors.
- R->addVisitor(new NilReceiverBRVisitor());
- R->addVisitor(new ConditionBRVisitor());
- R->addVisitor(new LikelyFalsePositiveSuppressionBRVisitor());
-
- BugReport::VisitorList visitors;
- unsigned originalReportConfigToken, finalReportConfigToken;
-
- // While generating diagnostics, it's possible the visitors will decide
- // new symbols and regions are interesting, or add other visitors based on
- // the information they find. If they do, we need to regenerate the path
- // based on our new report configuration.
- do {
- // Get a clean copy of all the visitors.
- for (BugReport::visitor_iterator I = R->visitor_begin(),
- E = R->visitor_end(); I != E; ++I)
- visitors.push_back((*I)->clone());
-
- // Clear out the active path from any previous work.
- PD.resetPath();
- originalReportConfigToken = R->getConfigurationChangeToken();
-
- // Generate the very last diagnostic piece - the piece is visible before
- // the trace is expanded.
- PathDiagnosticPiece *LastPiece = 0;
- for (BugReport::visitor_iterator I = visitors.begin(), E = visitors.end();
- I != E; ++I) {
- if (PathDiagnosticPiece *Piece = (*I)->getEndPath(PDB, N, *R)) {
- assert (!LastPiece &&
- "There can only be one final piece in a diagnostic.");
- LastPiece = Piece;
- }
- }
+ typedef PathDiagnosticConsumer::PathGenerationScheme PathGenerationScheme;
+ PathGenerationScheme ActiveScheme = PC.getGenerationScheme();
- if (PDB.getGenerationScheme() != PathDiagnosticConsumer::None) {
- if (!LastPiece)
- LastPiece = BugReporterVisitor::getDefaultEndPath(PDB, N, *R);
- if (LastPiece)
- PD.setEndOfPath(LastPiece);
- else
- return false;
+ if (ActiveScheme == PathDiagnosticConsumer::Extensive) {
+ AnalyzerOptions &options = getEngine().getAnalysisManager().options;
+ if (options.getBooleanOption("path-diagnostics-alternate", false)) {
+ ActiveScheme = PathDiagnosticConsumer::AlternateExtensive;
}
+ }
- switch (PDB.getGenerationScheme()) {
- case PathDiagnosticConsumer::Extensive:
- if (!GenerateExtensivePathDiagnostic(PD, PDB, N, visitors)) {
- assert(!R->isValid() && "Failed on valid report");
- // Try again. We'll filter out the bad report when we trim the graph.
- // FIXME: It would be more efficient to use the same intermediate
- // trimmed graph, and just repeat the shortest-path search.
- return generatePathDiagnostic(PD, PC, bugReports);
+ TrimmedGraph TrimG(&getGraph(), errorNodes);
+ ReportGraph ErrorGraph;
+
+ while (TrimG.popNextReportGraph(ErrorGraph)) {
+ // Find the BugReport with the original location.
+ assert(ErrorGraph.Index < bugReports.size());
+ BugReport *R = bugReports[ErrorGraph.Index];
+ assert(R && "No original report found for sliced graph.");
+ assert(R->isValid() && "Report selected by trimmed graph marked invalid.");
+
+ // Start building the path diagnostic...
+ PathDiagnosticBuilder PDB(*this, R, ErrorGraph.BackMap, &PC);
+ const ExplodedNode *N = ErrorGraph.ErrorNode;
+
+ // Register additional node visitors.
+ R->addVisitor(new NilReceiverBRVisitor());
+ R->addVisitor(new ConditionBRVisitor());
+ R->addVisitor(new LikelyFalsePositiveSuppressionBRVisitor());
+
+ BugReport::VisitorList visitors;
+ unsigned origReportConfigToken, finalReportConfigToken;
+ LocationContextMap LCM;
+
+ // While generating diagnostics, it's possible the visitors will decide
+ // new symbols and regions are interesting, or add other visitors based on
+ // the information they find. If they do, we need to regenerate the path
+ // based on our new report configuration.
+ do {
+ // Get a clean copy of all the visitors.
+ for (BugReport::visitor_iterator I = R->visitor_begin(),
+ E = R->visitor_end(); I != E; ++I)
+ visitors.push_back((*I)->clone());
+
+ // Clear out the active path from any previous work.
+ PD.resetPath();
+ origReportConfigToken = R->getConfigurationChangeToken();
+
+ // Generate the very last diagnostic piece - the piece is visible before
+ // the trace is expanded.
+ PathDiagnosticPiece *LastPiece = 0;
+ for (BugReport::visitor_iterator I = visitors.begin(), E = visitors.end();
+ I != E; ++I) {
+ if (PathDiagnosticPiece *Piece = (*I)->getEndPath(PDB, N, *R)) {
+ assert (!LastPiece &&
+ "There can only be one final piece in a diagnostic.");
+ LastPiece = Piece;
+ }
}
- break;
- case PathDiagnosticConsumer::Minimal:
- if (!GenerateMinimalPathDiagnostic(PD, PDB, N, visitors)) {
- assert(!R->isValid() && "Failed on valid report");
- // Try again. We'll filter out the bad report when we trim the graph.
- return generatePathDiagnostic(PD, PC, bugReports);
+
+ if (ActiveScheme != PathDiagnosticConsumer::None) {
+ if (!LastPiece)
+ LastPiece = BugReporterVisitor::getDefaultEndPath(PDB, N, *R);
+ assert(LastPiece);
+ PD.setEndOfPath(LastPiece);
}
- break;
- case PathDiagnosticConsumer::None:
- if (!GenerateVisitorsOnlyPathDiagnostic(PD, PDB, N, visitors)) {
- assert(!R->isValid() && "Failed on valid report");
- // Try again. We'll filter out the bad report when we trim the graph.
- return generatePathDiagnostic(PD, PC, bugReports);
+
+ // Make sure we get a clean location context map so we don't
+ // hold onto old mappings.
+ LCM.clear();
+
+ switch (ActiveScheme) {
+ case PathDiagnosticConsumer::AlternateExtensive:
+ GenerateAlternateExtensivePathDiagnostic(PD, PDB, N, LCM, visitors);
+ break;
+ case PathDiagnosticConsumer::Extensive:
+ GenerateExtensivePathDiagnostic(PD, PDB, N, LCM, visitors);
+ break;
+ case PathDiagnosticConsumer::Minimal:
+ GenerateMinimalPathDiagnostic(PD, PDB, N, LCM, visitors);
+ break;
+ case PathDiagnosticConsumer::None:
+ GenerateVisitorsOnlyPathDiagnostic(PD, PDB, N, visitors);
+ break;
}
- break;
- }
- // Clean up the visitors we used.
- llvm::DeleteContainerPointers(visitors);
+ // Clean up the visitors we used.
+ llvm::DeleteContainerPointers(visitors);
- // Did anything change while generating this path?
- finalReportConfigToken = R->getConfigurationChangeToken();
- } while(finalReportConfigToken != originalReportConfigToken);
+ // Did anything change while generating this path?
+ finalReportConfigToken = R->getConfigurationChangeToken();
+ } while (finalReportConfigToken != origReportConfigToken);
- // Finally, prune the diagnostic path of uninteresting stuff.
- if (!PD.path.empty()) {
- // Remove messages that are basically the same.
- removeRedundantMsgs(PD.getMutablePieces());
+ if (!R->isValid())
+ continue;
- if (R->shouldPrunePath() &&
- getEngine().getAnalysisManager().options.shouldPrunePaths()) {
- bool hasSomethingInteresting = RemoveUnneededCalls(PD.getMutablePieces(),
- R);
- assert(hasSomethingInteresting);
- (void) hasSomethingInteresting;
+ // Finally, prune the diagnostic path of uninteresting stuff.
+ if (!PD.path.empty()) {
+ // Remove messages that are basically the same.
+ removeRedundantMsgs(PD.getMutablePieces());
+
+ if (R->shouldPrunePath() &&
+ getEngine().getAnalysisManager().options.shouldPrunePaths()) {
+ bool stillHasNotes = removeUnneededCalls(PD.getMutablePieces(), R, LCM);
+ assert(stillHasNotes);
+ (void)stillHasNotes;
+ }
+
+ adjustCallLocations(PD.getMutablePieces());
+
+ if (ActiveScheme == PathDiagnosticConsumer::AlternateExtensive) {
+ ControlFlowBarrierSet CFBS;
+ OptimizedCallsSet OCS;
+ while (optimizeEdges(PD.getMutablePieces(), getSourceManager(), CFBS,
+ OCS, LCM)) {}
+ }
}
- adjustCallLocations(PD.getMutablePieces());
+ // We found a report and didn't suppress it.
+ return true;
}
- return true;
+ // We suppressed all the reports in this equivalence class.
+ assert(!HasInvalid && "Inconsistent suppression");
+ (void)HasInvalid;
+ return false;
}
void BugReporter::Register(BugType *BT) {
@@ -2396,6 +2851,9 @@ void BugReporter::FlushReport(BugReport *exampleReport,
exampleReport->getUniqueingLocation(),
exampleReport->getUniqueingDecl()));
+ MaxBugClassSize = std::max(bugReports.size(),
+ static_cast<size_t>(MaxBugClassSize));
+
// Generate the full path diagnostic, using the generation scheme
// specified by the PathDiagnosticConsumer. Note that we have to generate
// path diagnostics even for consumers which do not support paths, because
@@ -2404,6 +2862,9 @@ void BugReporter::FlushReport(BugReport *exampleReport,
if (!generatePathDiagnostic(*D.get(), PD, bugReports))
return;
+ MaxValidBugClassSize = std::max(bugReports.size(),
+ static_cast<size_t>(MaxValidBugClassSize));
+
// If the path is empty, generate a single step path with the location
// of the issue.
if (D->path.empty()) {
diff --git a/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp b/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp
index 517c1a10f8..e078745737 100644
--- a/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp
+++ b/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp
@@ -41,7 +41,7 @@ bool bugreporter::isDeclRefExprToReference(const Expr *E) {
}
const Expr *bugreporter::getDerefExpr(const Stmt *S) {
- // Pattern match for a few useful cases (do something smarter later):
+ // Pattern match for a few useful cases:
// a[0], p->f, *p
const Expr *E = dyn_cast<Expr>(S);
if (!E)
@@ -61,6 +61,10 @@ const Expr *bugreporter::getDerefExpr(const Stmt *S) {
else if (const MemberExpr *ME = dyn_cast<MemberExpr>(E)) {
if (ME->isArrow() || isDeclRefExprToReference(ME->getBase())) {
return ME->getBase()->IgnoreParenCasts();
+ } else {
+ // If we have a member expr with a dot, the base must have been
+ // dereferenced.
+ return getDerefExpr(ME->getBase());
}
}
else if (const ObjCIvarRefExpr *IvarRef = dyn_cast<ObjCIvarRefExpr>(E)) {
@@ -69,6 +73,9 @@ const Expr *bugreporter::getDerefExpr(const Stmt *S) {
else if (const ArraySubscriptExpr *AE = dyn_cast<ArraySubscriptExpr>(E)) {
return AE->getBase();
}
+ else if (isDeclRefExprToReference(E)) {
+ return E;
+ }
break;
}
@@ -137,11 +144,12 @@ class ReturnVisitor : public BugReporterVisitorImpl<ReturnVisitor> {
MaybeUnsuppress,
Satisfied
} Mode;
- bool InitiallySuppressed;
+
+ bool EnableNullFPSuppression;
public:
ReturnVisitor(const StackFrameContext *Frame, bool Suppressed)
- : StackFrame(Frame), Mode(Initial), InitiallySuppressed(Suppressed) {}
+ : StackFrame(Frame), Mode(Initial), EnableNullFPSuppression(Suppressed) {}
static void *getTag() {
static int Tag = 0;
@@ -151,7 +159,7 @@ public:
virtual void Profile(llvm::FoldingSetNodeID &ID) const {
ID.AddPointer(ReturnVisitor::getTag());
ID.AddPointer(StackFrame);
- ID.AddBoolean(InitiallySuppressed);
+ ID.AddBoolean(EnableNullFPSuppression);
}
/// Adds a ReturnVisitor if the given statement represents a call that was
@@ -162,7 +170,8 @@ public:
/// the statement is a call that was inlined, we add the visitor to the
/// bug report, so it can print a note later.
static void addVisitorIfNecessary(const ExplodedNode *Node, const Stmt *S,
- BugReport &BR) {
+ BugReport &BR,
+ bool InEnableNullFPSuppression) {
if (!CallEvent::isCallStmt(S))
return;
@@ -207,13 +216,13 @@ public:
assert(Eng && "Cannot file a bug report without an owning engine");
AnalyzerOptions &Options = Eng->getAnalysisManager().options;
- bool InitiallySuppressed = false;
- if (Options.shouldSuppressNullReturnPaths())
+ bool EnableNullFPSuppression = false;
+ if (InEnableNullFPSuppression && Options.shouldSuppressNullReturnPaths())
if (Optional<Loc> RetLoc = RetVal.getAs<Loc>())
- InitiallySuppressed = !State->assume(*RetLoc, true);
+ EnableNullFPSuppression = State->isNull(*RetLoc).isConstrainedTrue();
BR.markInteresting(CalleeContext);
- BR.addVisitor(new ReturnVisitor(CalleeContext, InitiallySuppressed));
+ BR.addVisitor(new ReturnVisitor(CalleeContext, EnableNullFPSuppression));
}
/// Returns true if any counter-suppression heuristics are enabled for
@@ -270,14 +279,16 @@ public:
// If we can't prove the return value is 0, just mark it interesting, and
// make sure to track it into any further inner functions.
- if (State->assume(V.castAs<DefinedSVal>(), true)) {
+ if (!State->isNull(V).isConstrainedTrue()) {
BR.markInteresting(V);
- ReturnVisitor::addVisitorIfNecessary(N, RetE, BR);
+ ReturnVisitor::addVisitorIfNecessary(N, RetE, BR,
+ EnableNullFPSuppression);
return 0;
}
// If we're returning 0, we should track where that 0 came from.
- bugreporter::trackNullOrUndefValue(N, RetE, BR);
+ bugreporter::trackNullOrUndefValue(N, RetE, BR, /*IsArg*/ false,
+ EnableNullFPSuppression);
// Build an appropriate message based on the return value.
SmallString<64> Msg;
@@ -289,7 +300,7 @@ public:
// the report is resurrected as valid later on.
ExprEngine &Eng = BRC.getBugReporter().getEngine();
AnalyzerOptions &Options = Eng.getAnalysisManager().options;
- if (InitiallySuppressed && hasCounterSuppression(Options))
+ if (EnableNullFPSuppression && hasCounterSuppression(Options))
Mode = MaybeUnsuppress;
if (RetE->getType()->isObjCObjectPointerType())
@@ -303,9 +314,9 @@ public:
if (LValue) {
if (const MemRegion *MR = LValue->getAsRegion()) {
if (MR->canPrintPretty()) {
- Out << " (reference to '";
+ Out << " (reference to ";
MR->printPretty(Out);
- Out << "')";
+ Out << ")";
}
}
} else {
@@ -357,10 +368,11 @@ public:
continue;
// Is it possible for this argument to be non-null?
- if (State->assume(*ArgV, true))
+ if (!State->isNull(*ArgV).isConstrainedTrue())
continue;
- if (bugreporter::trackNullOrUndefValue(N, ArgE, BR, /*IsArg=*/true))
+ if (bugreporter::trackNullOrUndefValue(N, ArgE, BR, /*IsArg=*/true,
+ EnableNullFPSuppression))
BR.removeInvalidation(ReturnVisitor::getTag(), StackFrame);
// If we /can't/ track the null pointer, we should err on the side of
@@ -390,7 +402,7 @@ public:
PathDiagnosticPiece *getEndPath(BugReporterContext &BRC,
const ExplodedNode *N,
BugReport &BR) {
- if (InitiallySuppressed)
+ if (EnableNullFPSuppression)
BR.markInvalid(ReturnVisitor::getTag(), StackFrame);
return 0;
}
@@ -403,6 +415,36 @@ void FindLastStoreBRVisitor ::Profile(llvm::FoldingSetNodeID &ID) const {
ID.AddPointer(&tag);
ID.AddPointer(R);
ID.Add(V);
+ ID.AddBoolean(EnableNullFPSuppression);
+}
+
+/// Returns true if \p N represents the DeclStmt declaring and initializing
+/// \p VR.
+static bool isInitializationOfVar(const ExplodedNode *N, const VarRegion *VR) {
+ Optional<PostStmt> P = N->getLocationAs<PostStmt>();
+ if (!P)
+ return false;
+
+ const DeclStmt *DS = P->getStmtAs<DeclStmt>();
+ if (!DS)
+ return false;
+
+ if (DS->getSingleDecl() != VR->getDecl())
+ return false;
+
+ const MemSpaceRegion *VarSpace = VR->getMemorySpace();
+ const StackSpaceRegion *FrameSpace = dyn_cast<StackSpaceRegion>(VarSpace);
+ if (!FrameSpace) {
+ // If we ever directly evaluate global DeclStmts, this assertion will be
+ // invalid, but this still seems preferable to silently accepting an
+ // initialization that may be for a path-sensitive variable.
+ assert(VR->getDecl()->isStaticLocal() && "non-static stackless VarRegion");
+ return true;
+ }
+
+ assert(VR->getDecl()->hasLocalStorage());
+ const LocationContext *LCtx = N->getLocationContext();
+ return FrameSpace->getStackFrame() == LCtx->getCurrentStackFrame();
}
PathDiagnosticPiece *FindLastStoreBRVisitor::VisitNode(const ExplodedNode *Succ,
@@ -419,16 +461,22 @@ PathDiagnosticPiece *FindLastStoreBRVisitor::VisitNode(const ExplodedNode *Succ,
// First see if we reached the declaration of the region.
if (const VarRegion *VR = dyn_cast<VarRegion>(R)) {
- if (Optional<PostStmt> P = Pred->getLocationAs<PostStmt>()) {
- if (const DeclStmt *DS = P->getStmtAs<DeclStmt>()) {
- if (DS->getSingleDecl() == VR->getDecl()) {
- StoreSite = Pred;
- InitE = VR->getDecl()->getInit();
- }
- }
+ if (isInitializationOfVar(Pred, VR)) {
+ StoreSite = Pred;
+ InitE = VR->getDecl()->getInit();
}
}
+ // If this is a post initializer expression, initializing the region, we
+ // should track the initializer expression.
+ if (Optional<PostInitializer> PIP = Pred->getLocationAs<PostInitializer>()) {
+ const MemRegion *FieldReg = (const MemRegion *)PIP->getLocationValue();
+ if (FieldReg && FieldReg == R) {
+ StoreSite = Pred;
+ InitE = PIP->getInitializer()->getInit();
+ }
+ }
+
// Otherwise, see if this is the store site:
// (1) Succ has this binding and Pred does not, i.e. this is
// where the binding first occurred.
@@ -470,6 +518,11 @@ PathDiagnosticPiece *FindLastStoreBRVisitor::VisitNode(const ExplodedNode *Succ,
IsParam = true;
}
}
+
+ // If this is a CXXTempObjectRegion, the Expr responsible for its creation
+ // is wrapped inside of it.
+ if (const CXXTempObjectRegion *TmpR = dyn_cast<CXXTempObjectRegion>(R))
+ InitE = TmpR->getExpr();
}
if (!StoreSite)
@@ -482,16 +535,14 @@ PathDiagnosticPiece *FindLastStoreBRVisitor::VisitNode(const ExplodedNode *Succ,
if (V.isUndef() || V.getAs<loc::ConcreteInt>()) {
if (!IsParam)
InitE = InitE->IgnoreParenCasts();
- bugreporter::trackNullOrUndefValue(StoreSite, InitE, BR, IsParam);
+ bugreporter::trackNullOrUndefValue(StoreSite, InitE, BR, IsParam,
+ EnableNullFPSuppression);
} else {
ReturnVisitor::addVisitorIfNecessary(StoreSite, InitE->IgnoreParenCasts(),
- BR);
+ BR, EnableNullFPSuppression);
}
}
- if (!R->canPrintPretty())
- return 0;
-
// Okay, we've found the binding. Emit an appropriate message.
SmallString<256> sbuf;
llvm::raw_svector_ostream os(sbuf);
@@ -503,9 +554,11 @@ PathDiagnosticPiece *FindLastStoreBRVisitor::VisitNode(const ExplodedNode *Succ,
const VarRegion *VR = dyn_cast<VarRegion>(R);
if (DS) {
- action = "initialized to ";
+ action = R->canPrintPretty() ? "initialized to " :
+ "Initializing to ";
} else if (isa<BlockExpr>(S)) {
- action = "captured by block as ";
+ action = R->canPrintPretty() ? "captured by block as " :
+ "Captured by block as ";
if (VR) {
// See if we can get the BlockVarRegion.
ProgramStateRef State = StoreSite->getState();
@@ -515,19 +568,18 @@ PathDiagnosticPiece *FindLastStoreBRVisitor::VisitNode(const ExplodedNode *Succ,
if (const VarRegion *OriginalR = BDR->getOriginalRegion(VR)) {
if (Optional<KnownSVal> KV =
State->getSVal(OriginalR).getAs<KnownSVal>())
- BR.addVisitor(new FindLastStoreBRVisitor(*KV, OriginalR));
+ BR.addVisitor(new FindLastStoreBRVisitor(*KV, OriginalR,
+ EnableNullFPSuppression));
}
}
}
}
if (action) {
- if (!R)
- return 0;
-
- os << '\'';
- R->printPretty(os);
- os << "' ";
+ if (R->canPrintPretty()) {
+ R->printPretty(os);
+ os << " ";
+ }
if (V.getAs<loc::ConcreteInt>()) {
bool b = false;
@@ -550,14 +602,18 @@ PathDiagnosticPiece *FindLastStoreBRVisitor::VisitNode(const ExplodedNode *Succ,
if (V.isUndef()) {
if (isa<VarRegion>(R)) {
const VarDecl *VD = cast<VarDecl>(DS->getSingleDecl());
- if (VD->getInit())
- os << "initialized to a garbage value";
- else
- os << "declared without an initial value";
+ if (VD->getInit()) {
+ os << (R->canPrintPretty() ? "initialized" : "Initializing")
+ << " to a garbage value";
+ } else {
+ os << (R->canPrintPretty() ? "declared" : "Declaring")
+ << " without an initial value";
+ }
}
}
else {
- os << "initialized here";
+ os << (R->canPrintPretty() ? "initialized" : "Initialized")
+ << " here";
}
}
}
@@ -583,10 +639,11 @@ PathDiagnosticPiece *FindLastStoreBRVisitor::VisitNode(const ExplodedNode *Succ,
// Printed parameter indexes are 1-based, not 0-based.
unsigned Idx = Param->getFunctionScopeIndex() + 1;
- os << " via " << Idx << llvm::getOrdinalSuffix(Idx) << " parameter '";
-
- R->printPretty(os);
- os << '\'';
+ os << " via " << Idx << llvm::getOrdinalSuffix(Idx) << " parameter";
+ if (R->canPrintPretty()) {
+ os << " ";
+ R->printPretty(os);
+ }
}
}
@@ -596,27 +653,42 @@ PathDiagnosticPiece *FindLastStoreBRVisitor::VisitNode(const ExplodedNode *Succ,
if (R->isBoundable()) {
if (const TypedValueRegion *TR = dyn_cast<TypedValueRegion>(R)) {
if (TR->getValueType()->isObjCObjectPointerType()) {
- os << "nil object reference stored to ";
+ os << "nil object reference stored";
b = true;
}
}
}
+ if (!b) {
+ if (R->canPrintPretty())
+ os << "Null pointer value stored";
+ else
+ os << "Storing null pointer value";
+ }
+
+ } else if (V.isUndef()) {
+ if (R->canPrintPretty())
+ os << "Uninitialized value stored";
+ else
+ os << "Storing uninitialized value";
- if (!b)
- os << "Null pointer value stored to ";
- }
- else if (V.isUndef()) {
- os << "Uninitialized value stored to ";
} else if (Optional<nonloc::ConcreteInt> CV =
V.getAs<nonloc::ConcreteInt>()) {
- os << "The value " << CV->getValue() << " is assigned to ";
- }
- else
- os << "Value assigned to ";
+ if (R->canPrintPretty())
+ os << "The value " << CV->getValue() << " is assigned";
+ else
+ os << "Assigning " << CV->getValue();
- os << '\'';
- R->printPretty(os);
- os << '\'';
+ } else {
+ if (R->canPrintPretty())
+ os << "Value assigned";
+ else
+ os << "Assigning value";
+ }
+
+ if (R->canPrintPretty()) {
+ os << " to ";
+ R->printPretty(os);
+ }
}
// Construct a new PathDiagnosticPiece.
@@ -645,30 +717,43 @@ const char *TrackConstraintBRVisitor::getTag() {
return "TrackConstraintBRVisitor";
}
+bool TrackConstraintBRVisitor::isUnderconstrained(const ExplodedNode *N) const {
+ if (IsZeroCheck)
+ return N->getState()->isNull(Constraint).isUnderconstrained();
+ return N->getState()->assume(Constraint, !Assumption);
+}
+
PathDiagnosticPiece *
TrackConstraintBRVisitor::VisitNode(const ExplodedNode *N,
const ExplodedNode *PrevN,
BugReporterContext &BRC,
BugReport &BR) {
- if (isSatisfied)
+ if (IsSatisfied)
return NULL;
+ // Start tracking after we see the first state in which the value is
+ // constrained.
+ if (!IsTrackingTurnedOn)
+ if (!isUnderconstrained(N))
+ IsTrackingTurnedOn = true;
+ if (!IsTrackingTurnedOn)
+ return 0;
+
// Check if in the previous state it was feasible for this constraint
// to *not* be true.
- if (PrevN->getState()->assume(Constraint, !Assumption)) {
+ if (isUnderconstrained(PrevN)) {
- isSatisfied = true;
+ IsSatisfied = true;
// As a sanity check, make sure that the negation of the constraint
// was infeasible in the current state. If it is feasible, we somehow
// missed the transition point.
- if (N->getState()->assume(Constraint, !Assumption))
- return NULL;
+ assert(!isUnderconstrained(N));
// We found the transition point for the constraint. We now need to
// pretty-print the constraint. (work-in-progress)
- std::string sbuf;
- llvm::raw_string_ostream os(sbuf);
+ SmallString<64> sbuf;
+ llvm::raw_svector_ostream os(sbuf);
if (Constraint.getAs<Loc>()) {
os << "Assuming pointer value is ";
@@ -695,16 +780,22 @@ TrackConstraintBRVisitor::VisitNode(const ExplodedNode *N,
SuppressInlineDefensiveChecksVisitor::
SuppressInlineDefensiveChecksVisitor(DefinedSVal Value, const ExplodedNode *N)
- : V(Value), IsSatisfied(false), StartN(N) {
+ : V(Value), IsSatisfied(false), IsTrackingTurnedOn(false) {
- assert(N->getState()->isNull(V).isConstrainedTrue() &&
- "The visitor only tracks the cases where V is constrained to 0");
+ // Check if the visitor is disabled.
+ SubEngine *Eng = N->getState()->getStateManager().getOwningEngine();
+ assert(Eng && "Cannot file a bug report without an owning engine");
+ AnalyzerOptions &Options = Eng->getAnalysisManager().options;
+ if (!Options.shouldSuppressInlinedDefensiveChecks())
+ IsSatisfied = true;
+
+ assert(N->getState()->isNull(V).isConstrainedTrue() &&
+ "The visitor only tracks the cases where V is constrained to 0");
}
void SuppressInlineDefensiveChecksVisitor::Profile(FoldingSetNodeID &ID) const {
static int id = 0;
ID.AddPointer(&id);
- ID.AddPointer(StartN);
ID.Add(V);
}
@@ -713,15 +804,6 @@ const char *SuppressInlineDefensiveChecksVisitor::getTag() {
}
PathDiagnosticPiece *
-SuppressInlineDefensiveChecksVisitor::getEndPath(BugReporterContext &BRC,
- const ExplodedNode *N,
- BugReport &BR) {
- if (StartN == BR.getErrorNode())
- StartN = 0;
- return 0;
-}
-
-PathDiagnosticPiece *
SuppressInlineDefensiveChecksVisitor::VisitNode(const ExplodedNode *Succ,
const ExplodedNode *Pred,
BugReporterContext &BRC,
@@ -729,23 +811,19 @@ SuppressInlineDefensiveChecksVisitor::VisitNode(const ExplodedNode *Succ,
if (IsSatisfied)
return 0;
- // Start tracking after we see node StartN.
- if (StartN == Succ)
- StartN = 0;
- if (StartN)
- return 0;
-
- AnalyzerOptions &Options =
- BRC.getBugReporter().getEngine().getAnalysisManager().options;
- if (!Options.shouldSuppressInlinedDefensiveChecks())
+ // Start tracking after we see the first state in which the value is null.
+ if (!IsTrackingTurnedOn)
+ if (Succ->getState()->isNull(V).isConstrainedTrue())
+ IsTrackingTurnedOn = true;
+ if (!IsTrackingTurnedOn)
return 0;
// Check if in the previous state it was feasible for this value
// to *not* be null.
- if (Pred->getState()->assume(V, true)) {
+ if (!Pred->getState()->isNull(V).isConstrainedTrue()) {
IsSatisfied = true;
- assert(!Succ->getState()->assume(V, true));
+ assert(Succ->getState()->isNull(V).isConstrainedTrue());
// Check if this is inlined defensive checks.
const LocationContext *CurLC =Succ->getLocationContext();
@@ -756,16 +834,73 @@ SuppressInlineDefensiveChecksVisitor::VisitNode(const ExplodedNode *Succ,
return 0;
}
-bool bugreporter::trackNullOrUndefValue(const ExplodedNode *ErrorNode,
+static const MemRegion *getLocationRegionIfReference(const Expr *E,
+ const ExplodedNode *N) {
+ if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(E)) {
+ if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) {
+ if (!VD->getType()->isReferenceType())
+ return 0;
+ ProgramStateManager &StateMgr = N->getState()->getStateManager();
+ MemRegionManager &MRMgr = StateMgr.getRegionManager();
+ return MRMgr.getVarRegion(VD, N->getLocationContext());
+ }
+ }
+
+ // FIXME: This does not handle other kinds of null references,
+ // for example, references from FieldRegions:
+ // struct Wrapper { int &ref; };
+ // Wrapper w = { *(int *)0 };
+ // w.ref = 1;
+
+ return 0;
+}
+
+static const Expr *peelOffOuterExpr(const Expr *Ex,
+ const ExplodedNode *N) {
+ Ex = Ex->IgnoreParenCasts();
+ if (const ExprWithCleanups *EWC = dyn_cast<ExprWithCleanups>(Ex))
+ return peelOffOuterExpr(EWC->getSubExpr(), N);
+ if (const OpaqueValueExpr *OVE = dyn_cast<OpaqueValueExpr>(Ex))
+ return peelOffOuterExpr(OVE->getSourceExpr(), N);
+
+ // Peel off the ternary operator.
+ if (const ConditionalOperator *CO = dyn_cast<ConditionalOperator>(Ex)) {
+ // Find a node where the branching occured and find out which branch
+ // we took (true/false) by looking at the ExplodedGraph.
+ const ExplodedNode *NI = N;
+ do {
+ ProgramPoint ProgPoint = NI->getLocation();
+ if (Optional<BlockEdge> BE = ProgPoint.getAs<BlockEdge>()) {
+ const CFGBlock *srcBlk = BE->getSrc();
+ if (const Stmt *term = srcBlk->getTerminator()) {
+ if (term == CO) {
+ bool TookTrueBranch = (*(srcBlk->succ_begin()) == BE->getDst());
+ if (TookTrueBranch)
+ return peelOffOuterExpr(CO->getTrueExpr(), N);
+ else
+ return peelOffOuterExpr(CO->getFalseExpr(), N);
+ }
+ }
+ }
+ NI = NI->getFirstPred();
+ } while (NI);
+ }
+ return Ex;
+}
+
+bool bugreporter::trackNullOrUndefValue(const ExplodedNode *N,
const Stmt *S,
- BugReport &report, bool IsArg) {
- if (!S || !ErrorNode)
+ BugReport &report, bool IsArg,
+ bool EnableNullFPSuppression) {
+ if (!S || !N)
return false;
- if (const OpaqueValueExpr *OVE = dyn_cast<OpaqueValueExpr>(S))
- S = OVE->getSourceExpr();
-
- const ExplodedNode *N = ErrorNode;
+ if (const Expr *Ex = dyn_cast<Expr>(S)) {
+ Ex = Ex->IgnoreParenCasts();
+ const Expr *PeeledEx = peelOffOuterExpr(Ex, N);
+ if (Ex != PeeledEx)
+ S = PeeledEx;
+ }
const Expr *Inner = 0;
if (const Expr *Ex = dyn_cast<Expr>(S)) {
@@ -774,7 +909,7 @@ bool bugreporter::trackNullOrUndefValue(const ExplodedNode *ErrorNode,
Inner = Ex;
}
- if (IsArg) {
+ if (IsArg && !Inner) {
assert(N->getLocation().getAs<CallEnter>() && "Tracking arg but not at call");
} else {
// Walk through nodes until we get one that matches the statement exactly.
@@ -782,7 +917,7 @@ bool bugreporter::trackNullOrUndefValue(const ExplodedNode *ErrorNode,
// gone too far (though we can likely track the lvalue better anyway).
do {
const ProgramPoint &pp = N->getLocation();
- if (Optional<PostStmt> ps = pp.getAs<PostStmt>()) {
+ if (Optional<StmtPoint> ps = pp.getAs<StmtPoint>()) {
if (ps->getStmt() == S || ps->getStmt() == Inner)
break;
} else if (Optional<CallExitEnd> CEE = pp.getAs<CallExitEnd>()) {
@@ -799,86 +934,79 @@ bool bugreporter::trackNullOrUndefValue(const ExplodedNode *ErrorNode,
ProgramStateRef state = N->getState();
- // See if the expression we're interested refers to a variable.
+ // The message send could be nil due to the receiver being nil.
+ // At this point in the path, the receiver should be live since we are at the
+ // message send expr. If it is nil, start tracking it.
+ if (const Expr *Receiver = NilReceiverBRVisitor::getNilReceiver(S, N))
+ trackNullOrUndefValue(N, Receiver, report, false, EnableNullFPSuppression);
+
+
+ // See if the expression we're interested refers to a variable.
// If so, we can track both its contents and constraints on its value.
if (Inner && ExplodedGraph::isInterestingLValueExpr(Inner)) {
const MemRegion *R = 0;
- // First check if this is a DeclRefExpr for a C++ reference type.
- // For those, we want the location of the reference.
- if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Inner)) {
- if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) {
- if (VD->getType()->isReferenceType()) {
- ProgramStateManager &StateMgr = state->getStateManager();
- MemRegionManager &MRMgr = StateMgr.getRegionManager();
- R = MRMgr.getVarRegion(VD, N->getLocationContext());
- }
+ // Find the ExplodedNode where the lvalue (the value of 'Ex')
+ // was computed. We need this for getting the location value.
+ const ExplodedNode *LVNode = N;
+ while (LVNode) {
+ if (Optional<PostStmt> P = LVNode->getLocation().getAs<PostStmt>()) {
+ if (P->getStmt() == Inner)
+ break;
}
+ LVNode = LVNode->getFirstPred();
}
+ assert(LVNode && "Unable to find the lvalue node.");
+ ProgramStateRef LVState = LVNode->getState();
+ SVal LVal = LVState->getSVal(Inner, LVNode->getLocationContext());
+
+ if (LVState->isNull(LVal).isConstrainedTrue()) {
+ // In case of C++ references, we want to differentiate between a null
+ // reference and reference to null pointer.
+ // If the LVal is null, check if we are dealing with null reference.
+ // For those, we want to track the location of the reference.
+ if (const MemRegion *RR = getLocationRegionIfReference(Inner, N))
+ R = RR;
+ } else {
+ R = LVState->getSVal(Inner, LVNode->getLocationContext()).getAsRegion();
- // For all other cases, find the location by scouring the ExplodedGraph.
- if (!R) {
- // Find the ExplodedNode where the lvalue (the value of 'Ex')
- // was computed. We need this for getting the location value.
- const ExplodedNode *LVNode = N;
- while (LVNode) {
- if (Optional<PostStmt> P = LVNode->getLocation().getAs<PostStmt>()) {
- if (P->getStmt() == Inner)
- break;
- }
- LVNode = LVNode->getFirstPred();
+ // If this is a C++ reference to a null pointer, we are tracking the
+ // pointer. In additon, we should find the store at which the reference
+ // got initialized.
+ if (const MemRegion *RR = getLocationRegionIfReference(Inner, N)) {
+ if (Optional<KnownSVal> KV = LVal.getAs<KnownSVal>())
+ report.addVisitor(new FindLastStoreBRVisitor(*KV, RR,
+ EnableNullFPSuppression));
}
- assert(LVNode && "Unable to find the lvalue node.");
- ProgramStateRef LVState = LVNode->getState();
- R = LVState->getSVal(Inner, LVNode->getLocationContext()).getAsRegion();
}
if (R) {
// Mark both the variable region and its contents as interesting.
- SVal V = state->getRawSVal(loc::MemRegionVal(R));
-
- // If the value matches the default for the variable region, that
- // might mean that it's been cleared out of the state. Fall back to
- // the full argument expression (with casts and such intact).
- if (IsArg) {
- bool UseArgValue = V.isUnknownOrUndef() || V.isZeroConstant();
- if (!UseArgValue) {
- const SymbolRegionValue *SRV =
- dyn_cast_or_null<SymbolRegionValue>(V.getAsLocSymbol());
- if (SRV)
- UseArgValue = (SRV->getRegion() == R);
- }
- if (UseArgValue)
- V = state->getSValAsScalarOrLoc(S, N->getLocationContext());
- }
+ SVal V = LVState->getRawSVal(loc::MemRegionVal(R));
report.markInteresting(R);
report.markInteresting(V);
report.addVisitor(new UndefOrNullArgVisitor(R));
- if (isa<SymbolicRegion>(R)) {
- TrackConstraintBRVisitor *VI =
- new TrackConstraintBRVisitor(loc::MemRegionVal(R), false);
- report.addVisitor(VI);
- }
-
// If the contents are symbolic, find out when they became null.
- if (V.getAsLocSymbol()) {
+ if (V.getAsLocSymbol(/*IncludeBaseRegions*/ true)) {
BugReporterVisitor *ConstraintTracker =
new TrackConstraintBRVisitor(V.castAs<DefinedSVal>(), false);
report.addVisitor(ConstraintTracker);
// Add visitor, which will suppress inline defensive checks.
- if (ErrorNode->getState()->isNull(V).isConstrainedTrue()) {
+ if (LVState->isNull(V).isConstrainedTrue() &&
+ EnableNullFPSuppression) {
BugReporterVisitor *IDCSuppressor =
new SuppressInlineDefensiveChecksVisitor(V.castAs<DefinedSVal>(),
- ErrorNode);
+ LVNode);
report.addVisitor(IDCSuppressor);
}
}
if (Optional<KnownSVal> KV = V.getAs<KnownSVal>())
- report.addVisitor(new FindLastStoreBRVisitor(*KV, R));
+ report.addVisitor(new FindLastStoreBRVisitor(*KV, R,
+ EnableNullFPSuppression));
return true;
}
}
@@ -891,7 +1019,8 @@ bool bugreporter::trackNullOrUndefValue(const ExplodedNode *ErrorNode,
// sure that function isn't pruned in our output.
if (const Expr *E = dyn_cast<Expr>(S))
S = E->IgnoreParenCasts();
- ReturnVisitor::addVisitorIfNecessary(N, S, report);
+
+ ReturnVisitor::addVisitorIfNecessary(N, S, report, EnableNullFPSuppression);
// Uncomment this to find cases where we aren't properly getting the
// base value that was dereferenced.
@@ -900,7 +1029,13 @@ bool bugreporter::trackNullOrUndefValue(const ExplodedNode *ErrorNode,
if (Optional<loc::MemRegionVal> L = V.getAs<loc::MemRegionVal>()) {
// At this point we are dealing with the region's LValue.
// However, if the rvalue is a symbolic region, we should track it as well.
- SVal RVal = state->getSVal(L->getRegion());
+ // Try to use the correct type when looking up the value.
+ SVal RVal;
+ if (const Expr *E = dyn_cast<Expr>(S))
+ RVal = state->getRawSVal(L.getValue(), E->getType());
+ else
+ RVal = state->getSVal(L->getRegion());
+
const MemRegion *RegionRVal = RVal.getAsRegion();
report.addVisitor(new UndefOrNullArgVisitor(L->getRegion()));
@@ -914,14 +1049,17 @@ bool bugreporter::trackNullOrUndefValue(const ExplodedNode *ErrorNode,
return true;
}
-BugReporterVisitor *
-FindLastStoreBRVisitor::createVisitorObject(const ExplodedNode *N,
- const MemRegion *R) {
- assert(R && "The memory region is null.");
-
- ProgramStateRef state = N->getState();
- if (Optional<KnownSVal> KV = state->getSVal(R).getAs<KnownSVal>())
- return new FindLastStoreBRVisitor(*KV, R);
+const Expr *NilReceiverBRVisitor::getNilReceiver(const Stmt *S,
+ const ExplodedNode *N) {
+ const ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(S);
+ if (!ME)
+ return 0;
+ if (const Expr *Receiver = ME->getInstanceReceiver()) {
+ ProgramStateRef state = N->getState();
+ SVal V = state->getSVal(Receiver, N->getLocationContext());
+ if (state->isNull(V).isConstrainedTrue())
+ return Receiver;
+ }
return 0;
}
@@ -929,38 +1067,41 @@ PathDiagnosticPiece *NilReceiverBRVisitor::VisitNode(const ExplodedNode *N,
const ExplodedNode *PrevN,
BugReporterContext &BRC,
BugReport &BR) {
- Optional<PostStmt> P = N->getLocationAs<PostStmt>();
+ Optional<PreStmt> P = N->getLocationAs<PreStmt>();
if (!P)
return 0;
- const ObjCMessageExpr *ME = P->getStmtAs<ObjCMessageExpr>();
- if (!ME)
- return 0;
- const Expr *Receiver = ME->getInstanceReceiver();
+
+ const Stmt *S = P->getStmt();
+ const Expr *Receiver = getNilReceiver(S, N);
if (!Receiver)
return 0;
- ProgramStateRef state = N->getState();
- const SVal &V = state->getSVal(Receiver, N->getLocationContext());
- Optional<DefinedOrUnknownSVal> DV = V.getAs<DefinedOrUnknownSVal>();
- if (!DV)
- return 0;
- state = state->assume(*DV, true);
- if (state)
- return 0;
+
+ llvm::SmallString<256> Buf;
+ llvm::raw_svector_ostream OS(Buf);
+
+ if (const ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(S)) {
+ OS << "'" << ME->getSelector().getAsString() << "' not called";
+ }
+ else {
+ OS << "No method is called";
+ }
+ OS << " because the receiver is nil";
// The receiver was nil, and hence the method was skipped.
// Register a BugReporterVisitor to issue a message telling us how
// the receiver was null.
- bugreporter::trackNullOrUndefValue(N, Receiver, BR);
+ bugreporter::trackNullOrUndefValue(N, Receiver, BR, /*IsArg*/ false,
+ /*EnableNullFPSuppression*/ false);
// Issue a message saying that the method was skipped.
PathDiagnosticLocation L(Receiver, BRC.getSourceManager(),
N->getLocationContext());
- return new PathDiagnosticEventPiece(L, "No method is called "
- "because the receiver is nil");
+ return new PathDiagnosticEventPiece(L, OS.str());
}
// Registers every VarDecl inside a Stmt with a last store visitor.
void FindLastStoreBRVisitor::registerStatementVarDecls(BugReport &BR,
- const Stmt *S) {
+ const Stmt *S,
+ bool EnableNullFPSuppression) {
const ExplodedNode *N = BR.getErrorNode();
std::deque<const Stmt *> WorkList;
WorkList.push_back(S);
@@ -982,7 +1123,8 @@ void FindLastStoreBRVisitor::registerStatementVarDecls(BugReport &BR,
if (V.getAs<loc::ConcreteInt>() || V.getAs<nonloc::ConcreteInt>()) {
// Register a new visitor with the BugReport.
- BR.addVisitor(new FindLastStoreBRVisitor(V.castAs<KnownSVal>(), R));
+ BR.addVisitor(new FindLastStoreBRVisitor(V.castAs<KnownSVal>(), R,
+ EnableNullFPSuppression));
}
}
}
@@ -1282,7 +1424,7 @@ ConditionBRVisitor::VisitConditionVariable(StringRef LhsString,
Out << (tookTrue ? "not nil" : "nil");
else if (Ty->isBooleanType())
Out << (tookTrue ? "true" : "false");
- else if (Ty->isIntegerType())
+ else if (Ty->isIntegralOrEnumerationType())
Out << (tookTrue ? "non-zero" : "zero");
else
return 0;
@@ -1353,28 +1495,53 @@ ConditionBRVisitor::VisitTrueTest(const Expr *Cond,
return event;
}
+
+// FIXME: Copied from ExprEngineCallAndReturn.cpp.
+static bool isInStdNamespace(const Decl *D) {
+ const DeclContext *DC = D->getDeclContext()->getEnclosingNamespaceContext();
+ const NamespaceDecl *ND = dyn_cast<NamespaceDecl>(DC);
+ if (!ND)
+ return false;
+
+ while (const NamespaceDecl *Parent = dyn_cast<NamespaceDecl>(ND->getParent()))
+ ND = Parent;
+
+ return ND->getName() == "std";
+}
+
+
PathDiagnosticPiece *
LikelyFalsePositiveSuppressionBRVisitor::getEndPath(BugReporterContext &BRC,
const ExplodedNode *N,
BugReport &BR) {
- const Stmt *S = BR.getStmt();
- if (!S)
- return 0;
-
- // Here we suppress false positives coming from system macros. This list is
+ // Here we suppress false positives coming from system headers. This list is
// based on known issues.
+ // Skip reports within the 'std' namespace. Although these can sometimes be
+ // the user's fault, we currently don't report them very well, and
+ // Note that this will not help for any other data structure libraries, like
+ // TR1, Boost, or llvm/ADT.
+ ExprEngine &Eng = BRC.getBugReporter().getEngine();
+ AnalyzerOptions &Options = Eng.getAnalysisManager().options;
+ if (Options.shouldSuppressFromCXXStandardLibrary()) {
+ const LocationContext *LCtx = N->getLocationContext();
+ if (isInStdNamespace(LCtx->getDecl())) {
+ BR.markInvalid(getTag(), 0);
+ return 0;
+ }
+ }
+
// Skip reports within the sys/queue.h macros as we do not have the ability to
// reason about data structure shapes.
SourceManager &SM = BRC.getSourceManager();
- SourceLocation Loc = S->getLocStart();
+ FullSourceLoc Loc = BR.getLocation(SM).asLocation();
while (Loc.isMacroID()) {
if (SM.isInSystemMacro(Loc) &&
(SM.getFilename(SM.getSpellingLoc(Loc)).endswith("sys/queue.h"))) {
BR.markInvalid(getTag(), 0);
return 0;
}
- Loc = SM.getSpellingLoc(Loc);
+ Loc = Loc.getSpellingLoc();
}
return 0;
diff --git a/lib/StaticAnalyzer/Core/CallEvent.cpp b/lib/StaticAnalyzer/Core/CallEvent.cpp
index c482978ca2..dfd20b8b33 100644
--- a/lib/StaticAnalyzer/Core/CallEvent.cpp
+++ b/lib/StaticAnalyzer/Core/CallEvent.cpp
@@ -125,7 +125,7 @@ static bool isPointerToConst(QualType Ty) {
// Try to retrieve the function declaration and find the function parameter
// types which are pointers/references to a non-pointer const.
// We will not invalidate the corresponding argument regions.
-static void findPtrToConstParams(llvm::SmallSet<unsigned, 1> &PreserveArgs,
+static void findPtrToConstParams(llvm::SmallSet<unsigned, 4> &PreserveArgs,
const CallEvent &Call) {
unsigned Idx = 0;
for (CallEvent::param_type_iterator I = Call.param_type_begin(),
@@ -137,71 +137,35 @@ static void findPtrToConstParams(llvm::SmallSet<unsigned, 1> &PreserveArgs,
}
ProgramStateRef CallEvent::invalidateRegions(unsigned BlockCount,
- ProgramStateRef Orig) const {
+ ProgramStateRef Orig) const {
ProgramStateRef Result = (Orig ? Orig : getState());
- SmallVector<const MemRegion *, 8> RegionsToInvalidate;
- getExtraInvalidatedRegions(RegionsToInvalidate);
+ SmallVector<SVal, 8> ConstValues;
+ SmallVector<SVal, 8> ValuesToInvalidate;
+
+ getExtraInvalidatedValues(ValuesToInvalidate);
// Indexes of arguments whose values will be preserved by the call.
- llvm::SmallSet<unsigned, 1> PreserveArgs;
+ llvm::SmallSet<unsigned, 4> PreserveArgs;
if (!argumentsMayEscape())
findPtrToConstParams(PreserveArgs, *this);
for (unsigned Idx = 0, Count = getNumArgs(); Idx != Count; ++Idx) {
+ // Mark this region for invalidation. We batch invalidate regions
+ // below for efficiency.
if (PreserveArgs.count(Idx))
- continue;
-
- SVal V = getArgSVal(Idx);
-
- // If we are passing a location wrapped as an integer, unwrap it and
- // invalidate the values referred by the location.
- if (Optional<nonloc::LocAsInteger> Wrapped =
- V.getAs<nonloc::LocAsInteger>())
- V = Wrapped->getLoc();
- else if (!V.getAs<Loc>())
- continue;
-
- if (const MemRegion *R = V.getAsRegion()) {
- // Invalidate the value of the variable passed by reference.
-
- // Are we dealing with an ElementRegion? If the element type is
- // a basic integer type (e.g., char, int) and the underlying region
- // is a variable region then strip off the ElementRegion.
- // FIXME: We really need to think about this for the general case
- // as sometimes we are reasoning about arrays and other times
- // about (char*), etc., is just a form of passing raw bytes.
- // e.g., void *p = alloca(); foo((char*)p);
- if (const ElementRegion *ER = dyn_cast<ElementRegion>(R)) {
- // Checking for 'integral type' is probably too promiscuous, but
- // we'll leave it in for now until we have a systematic way of
- // handling all of these cases. Eventually we need to come up
- // with an interface to StoreManager so that this logic can be
- // appropriately delegated to the respective StoreManagers while
- // still allowing us to do checker-specific logic (e.g.,
- // invalidating reference counts), probably via callbacks.
- if (ER->getElementType()->isIntegralOrEnumerationType()) {
- const MemRegion *superReg = ER->getSuperRegion();
- if (isa<VarRegion>(superReg) || isa<FieldRegion>(superReg) ||
- isa<ObjCIvarRegion>(superReg))
- R = cast<TypedRegion>(superReg);
- }
- // FIXME: What about layers of ElementRegions?
- }
-
- // Mark this region for invalidation. We batch invalidate regions
- // below for efficiency.
- RegionsToInvalidate.push_back(R);
- }
+ ConstValues.push_back(getArgSVal(Idx));
+ else
+ ValuesToInvalidate.push_back(getArgSVal(Idx));
}
// Invalidate designated regions using the batch invalidation API.
// NOTE: Even if RegionsToInvalidate is empty, we may still invalidate
// global variables.
- return Result->invalidateRegions(RegionsToInvalidate, getOriginExpr(),
+ return Result->invalidateRegions(ValuesToInvalidate, getOriginExpr(),
BlockCount, getLocationContext(),
/*CausedByPointerEscape*/ true,
- /*Symbols=*/0, this);
+ /*Symbols=*/0, this, ConstValues);
}
ProgramPoint CallEvent::getProgramPoint(bool IsPreVisit,
@@ -275,8 +239,20 @@ QualType CallEvent::getDeclaredResultType(const Decl *D) {
assert(D);
if (const FunctionDecl* FD = dyn_cast<FunctionDecl>(D))
return FD->getResultType();
- else if (const ObjCMethodDecl* MD = dyn_cast<ObjCMethodDecl>(D))
+ if (const ObjCMethodDecl* MD = dyn_cast<ObjCMethodDecl>(D))
return MD->getResultType();
+ if (const BlockDecl *BD = dyn_cast<BlockDecl>(D)) {
+ // Blocks are difficult because the return type may not be stored in the
+ // BlockDecl itself. The AST should probably be enhanced, but for now we
+ // just do what we can.
+ QualType Ty = BD->getSignatureAsWritten()->getType();
+ if (const FunctionType *FT = Ty->getAs<FunctionType>())
+ if (!FT->getResultType()->isDependentType())
+ return FT->getResultType();
+
+ return QualType();
+ }
+
return QualType();
}
@@ -407,9 +383,8 @@ const FunctionDecl *CXXInstanceCall::getDecl() const {
return getSVal(CE->getCallee()).getAsFunctionDecl();
}
-void CXXInstanceCall::getExtraInvalidatedRegions(RegionList &Regions) const {
- if (const MemRegion *R = getCXXThisVal().getAsRegion())
- Regions.push_back(R);
+void CXXInstanceCall::getExtraInvalidatedValues(ValueList &Values) const {
+ Values.push_back(getCXXThisVal());
}
SVal CXXInstanceCall::getCXXThisVal() const {
@@ -562,10 +537,10 @@ CallEvent::param_iterator BlockCall::param_end() const {
return D->param_end();
}
-void BlockCall::getExtraInvalidatedRegions(RegionList &Regions) const {
+void BlockCall::getExtraInvalidatedValues(ValueList &Values) const {
// FIXME: This also needs to invalidate captured globals.
if (const MemRegion *R = getBlockRegion())
- Regions.push_back(R);
+ Values.push_back(loc::MemRegionVal(R));
}
void BlockCall::getInitialStackFrameContents(const StackFrameContext *CalleeCtx,
@@ -583,9 +558,9 @@ SVal CXXConstructorCall::getCXXThisVal() const {
return UnknownVal();
}
-void CXXConstructorCall::getExtraInvalidatedRegions(RegionList &Regions) const {
+void CXXConstructorCall::getExtraInvalidatedValues(ValueList &Values) const {
if (Data)
- Regions.push_back(static_cast<const MemRegion *>(Data));
+ Values.push_back(loc::MemRegionVal(static_cast<const MemRegion *>(Data)));
}
void CXXConstructorCall::getInitialStackFrameContents(
@@ -637,9 +612,8 @@ CallEvent::param_iterator ObjCMethodCall::param_end() const {
}
void
-ObjCMethodCall::getExtraInvalidatedRegions(RegionList &Regions) const {
- if (const MemRegion *R = getReceiverSVal().getAsRegion())
- Regions.push_back(R);
+ObjCMethodCall::getExtraInvalidatedValues(ValueList &Values) const {
+ Values.push_back(getReceiverSVal());
}
SVal ObjCMethodCall::getSelfSVal() const {
diff --git a/lib/StaticAnalyzer/Core/CheckerManager.cpp b/lib/StaticAnalyzer/Core/CheckerManager.cpp
index a383799788..8adf3262b3 100644
--- a/lib/StaticAnalyzer/Core/CheckerManager.cpp
+++ b/lib/StaticAnalyzer/Core/CheckerManager.cpp
@@ -489,19 +489,19 @@ ProgramStateRef
CheckerManager::runCheckersForPointerEscape(ProgramStateRef State,
const InvalidatedSymbols &Escaped,
const CallEvent *Call,
- PointerEscapeKind Kind) {
+ PointerEscapeKind Kind,
+ bool IsConst) {
assert((Call != NULL ||
(Kind != PSK_DirectEscapeOnCall &&
Kind != PSK_IndirectEscapeOnCall)) &&
"Call must not be NULL when escaping on call");
-
- for (unsigned i = 0, e = PointerEscapeCheckers.size(); i != e; ++i) {
- // If any checker declares the state infeasible (or if it starts that way),
- // bail out.
- if (!State)
- return NULL;
- State = PointerEscapeCheckers[i](State, Escaped, Call, Kind);
- }
+ for (unsigned i = 0, e = PointerEscapeCheckers.size(); i != e; ++i) {
+ // If any checker declares the state infeasible (or if it starts that
+ // way), bail out.
+ if (!State)
+ return NULL;
+ State = PointerEscapeCheckers[i](State, Escaped, Call, Kind, IsConst);
+ }
return State;
}
@@ -666,6 +666,11 @@ void CheckerManager::_registerForPointerEscape(CheckPointerEscapeFunc checkfn){
PointerEscapeCheckers.push_back(checkfn);
}
+void CheckerManager::_registerForConstPointerEscape(
+ CheckPointerEscapeFunc checkfn) {
+ PointerEscapeCheckers.push_back(checkfn);
+}
+
void CheckerManager::_registerForEvalAssume(EvalAssumeFunc checkfn) {
EvalAssumeCheckers.push_back(checkfn);
}
diff --git a/lib/StaticAnalyzer/Core/CoreEngine.cpp b/lib/StaticAnalyzer/Core/CoreEngine.cpp
index c61bcf7d41..b09b2c2ddf 100644
--- a/lib/StaticAnalyzer/Core/CoreEngine.cpp
+++ b/lib/StaticAnalyzer/Core/CoreEngine.cpp
@@ -346,6 +346,11 @@ void CoreEngine::HandleBlockExit(const CFGBlock * B, ExplodedNode *Pred) {
default:
llvm_unreachable("Analysis for this terminator not implemented.");
+ // Model static initializers.
+ case Stmt::DeclStmtClass:
+ HandleStaticInit(cast<DeclStmt>(Term), B, Pred);
+ return;
+
case Stmt::BinaryOperatorClass: // '&&' and '||'
HandleBranch(cast<BinaryOperator>(Term)->getLHS(), Term, B, Pred);
return;
@@ -456,6 +461,19 @@ void CoreEngine::HandleBranch(const Stmt *Cond, const Stmt *Term,
enqueue(Dst);
}
+
+void CoreEngine::HandleStaticInit(const DeclStmt *DS, const CFGBlock *B,
+ ExplodedNode *Pred) {
+ assert(B->succ_size() == 2);
+ NodeBuilderContext Ctx(*this, B, Pred);
+ ExplodedNodeSet Dst;
+ SubEng.processStaticInitializer(DS, Ctx, Pred, Dst,
+ *(B->succ_begin()), *(B->succ_begin()+1));
+ // Enqueue the new frontier onto the worklist.
+ enqueue(Dst);
+}
+
+
void CoreEngine::HandlePostStmt(const CFGBlock *B, unsigned StmtIdx,
ExplodedNode *Pred) {
assert(B);
diff --git a/lib/StaticAnalyzer/Core/Environment.cpp b/lib/StaticAnalyzer/Core/Environment.cpp
index fe352aa8b4..7b133f6bf6 100644
--- a/lib/StaticAnalyzer/Core/Environment.cpp
+++ b/lib/StaticAnalyzer/Core/Environment.cpp
@@ -80,43 +80,17 @@ SVal Environment::getSVal(const EnvironmentEntry &Entry,
llvm_unreachable("Should have been handled by ignoreTransparentExprs");
case Stmt::AddrLabelExprClass:
- return svalBuilder.makeLoc(cast<AddrLabelExpr>(S));
-
- case Stmt::CharacterLiteralClass: {
- const CharacterLiteral *C = cast<CharacterLiteral>(S);
- return svalBuilder.makeIntVal(C->getValue(), C->getType());
- }
-
+ case Stmt::CharacterLiteralClass:
case Stmt::CXXBoolLiteralExprClass:
- return svalBuilder.makeBoolVal(cast<CXXBoolLiteralExpr>(S));
-
case Stmt::CXXScalarValueInitExprClass:
- case Stmt::ImplicitValueInitExprClass: {
- QualType Ty = cast<Expr>(S)->getType();
- return svalBuilder.makeZeroVal(Ty);
- }
-
+ case Stmt::ImplicitValueInitExprClass:
case Stmt::IntegerLiteralClass:
- return svalBuilder.makeIntVal(cast<IntegerLiteral>(S));
-
case Stmt::ObjCBoolLiteralExprClass:
- return svalBuilder.makeBoolVal(cast<ObjCBoolLiteralExpr>(S));
-
- // For special C0xx nullptr case, make a null pointer SVal.
case Stmt::CXXNullPtrLiteralExprClass:
- return svalBuilder.makeNull();
-
- case Stmt::ObjCStringLiteralClass: {
- MemRegionManager &MRMgr = svalBuilder.getRegionManager();
- const ObjCStringLiteral *SL = cast<ObjCStringLiteral>(S);
- return svalBuilder.makeLoc(MRMgr.getObjCStringRegion(SL));
- }
-
- case Stmt::StringLiteralClass: {
- MemRegionManager &MRMgr = svalBuilder.getRegionManager();
- const StringLiteral *SL = cast<StringLiteral>(S);
- return svalBuilder.makeLoc(MRMgr.getStringRegion(SL));
- }
+ case Stmt::ObjCStringLiteralClass:
+ case Stmt::StringLiteralClass:
+ // Known constants; defer to SValBuilder.
+ return svalBuilder.getConstantVal(cast<Expr>(S)).getValue();
case Stmt::ReturnStmtClass: {
const ReturnStmt *RS = cast<ReturnStmt>(S);
@@ -127,10 +101,8 @@ SVal Environment::getSVal(const EnvironmentEntry &Entry,
// Handle all other Stmt* using a lookup.
default:
- break;
+ return lookupExpr(EnvironmentEntry(S, LCtx));
}
-
- return lookupExpr(EnvironmentEntry(S, LCtx));
}
Environment EnvironmentManager::bindExpr(Environment Env,
diff --git a/lib/StaticAnalyzer/Core/ExplodedGraph.cpp b/lib/StaticAnalyzer/Core/ExplodedGraph.cpp
index 2210b4e2d7..af9518acc7 100644
--- a/lib/StaticAnalyzer/Core/ExplodedGraph.cpp
+++ b/lib/StaticAnalyzer/Core/ExplodedGraph.cpp
@@ -117,8 +117,7 @@ bool ExplodedGraph::shouldCollect(const ExplodedNode *node) {
return false;
// Condition 4.
- PostStmt ps = progPoint.castAs<PostStmt>();
- if (ps.getTag())
+ if (progPoint.getTag())
return false;
// Conditions 5, 6, and 7.
@@ -128,8 +127,9 @@ bool ExplodedGraph::shouldCollect(const ExplodedNode *node) {
progPoint.getLocationContext() != pred->getLocationContext())
return false;
- // All further checks require expressions.
- const Expr *Ex = dyn_cast<Expr>(ps.getStmt());
+ // All further checks require expressions. As per #3, we know that we have
+ // a PostStmt.
+ const Expr *Ex = dyn_cast<Expr>(progPoint.castAs<PostStmt>().getStmt());
if (!Ex)
return false;
@@ -331,45 +331,31 @@ ExplodedNode *ExplodedGraph::getNode(const ProgramPoint &L,
return V;
}
-std::pair<ExplodedGraph*, InterExplodedGraphMap*>
-ExplodedGraph::Trim(const NodeTy* const* NBeg, const NodeTy* const* NEnd,
- llvm::DenseMap<const void*, const void*> *InverseMap) const {
+ExplodedGraph *
+ExplodedGraph::trim(ArrayRef<const NodeTy *> Sinks,
+ InterExplodedGraphMap *ForwardMap,
+ InterExplodedGraphMap *InverseMap) const{
- if (NBeg == NEnd)
- return std::make_pair((ExplodedGraph*) 0,
- (InterExplodedGraphMap*) 0);
-
- assert (NBeg < NEnd);
-
- OwningPtr<InterExplodedGraphMap> M(new InterExplodedGraphMap());
-
- ExplodedGraph* G = TrimInternal(NBeg, NEnd, M.get(), InverseMap);
-
- return std::make_pair(static_cast<ExplodedGraph*>(G), M.take());
-}
-
-ExplodedGraph*
-ExplodedGraph::TrimInternal(const ExplodedNode* const* BeginSources,
- const ExplodedNode* const* EndSources,
- InterExplodedGraphMap* M,
- llvm::DenseMap<const void*, const void*> *InverseMap) const {
+ if (Nodes.empty())
+ return 0;
typedef llvm::DenseSet<const ExplodedNode*> Pass1Ty;
Pass1Ty Pass1;
- typedef llvm::DenseMap<const ExplodedNode*, ExplodedNode*> Pass2Ty;
- Pass2Ty& Pass2 = M->M;
+ typedef InterExplodedGraphMap Pass2Ty;
+ InterExplodedGraphMap Pass2Scratch;
+ Pass2Ty &Pass2 = ForwardMap ? *ForwardMap : Pass2Scratch;
SmallVector<const ExplodedNode*, 10> WL1, WL2;
// ===- Pass 1 (reverse DFS) -===
- for (const ExplodedNode* const* I = BeginSources; I != EndSources; ++I) {
+ for (ArrayRef<const NodeTy *>::iterator I = Sinks.begin(), E = Sinks.end();
+ I != E; ++I) {
if (*I)
WL1.push_back(*I);
}
- // Process the first worklist until it is empty. Because it is a std::list
- // it acts like a FIFO queue.
+ // Process the first worklist until it is empty.
while (!WL1.empty()) {
const ExplodedNode *N = WL1.back();
WL1.pop_back();
@@ -432,7 +418,7 @@ ExplodedGraph::TrimInternal(const ExplodedNode* const* BeginSources,
if (PI == Pass2.end())
continue;
- NewN->addPredecessor(PI->second, *G);
+ NewN->addPredecessor(const_cast<ExplodedNode *>(PI->second), *G);
}
// In the case that some of the intended successors of NewN have already
@@ -443,7 +429,7 @@ ExplodedGraph::TrimInternal(const ExplodedNode* const* BeginSources,
I != E; ++I) {
Pass2Ty::iterator PI = Pass2.find(*I);
if (PI != Pass2.end()) {
- PI->second->addPredecessor(NewN, *G);
+ const_cast<ExplodedNode *>(PI->second)->addPredecessor(NewN, *G);
continue;
}
@@ -456,13 +442,3 @@ ExplodedGraph::TrimInternal(const ExplodedNode* const* BeginSources,
return G;
}
-void InterExplodedGraphMap::anchor() { }
-
-ExplodedNode*
-InterExplodedGraphMap::getMappedNode(const ExplodedNode *N) const {
- llvm::DenseMap<const ExplodedNode*, ExplodedNode*>::const_iterator I =
- M.find(N);
-
- return I == M.end() ? 0 : I->second;
-}
-
diff --git a/lib/StaticAnalyzer/Core/ExprEngine.cpp b/lib/StaticAnalyzer/Core/ExprEngine.cpp
index 437af86ccf..bfe4e15a71 100644
--- a/lib/StaticAnalyzer/Core/ExprEngine.cpp
+++ b/lib/StaticAnalyzer/Core/ExprEngine.cpp
@@ -106,7 +106,8 @@ ProgramStateRef ExprEngine::getInitialState(const LocationContext *InitLoc) {
const ParmVarDecl *PD = FD->getParamDecl(0);
QualType T = PD->getType();
- if (!T->isIntegerType())
+ const BuiltinType *BT = dyn_cast<BuiltinType>(T);
+ if (!BT || !BT->isInteger())
break;
const MemRegion *R = state->getRegion(PD, InitLoc);
@@ -171,18 +172,29 @@ ExprEngine::createTemporaryRegionIfNeeded(ProgramStateRef State,
const Expr *Ex,
const Expr *Result) {
SVal V = State->getSVal(Ex, LC);
- if (!Result && !V.getAs<NonLoc>())
- return State;
+ if (!Result) {
+ // If we don't have an explicit result expression, we're in "if needed"
+ // mode. Only create a region if the current value is a NonLoc.
+ if (!V.getAs<NonLoc>())
+ return State;
+ Result = Ex;
+ } else {
+ // We need to create a region no matter what. For sanity, make sure we don't
+ // try to stuff a Loc into a non-pointer temporary region.
+ assert(!V.getAs<Loc>() || Loc::isLocType(Result->getType()) ||
+ Result->getType()->isMemberPointerType());
+ }
ProgramStateManager &StateMgr = State->getStateManager();
MemRegionManager &MRMgr = StateMgr.getRegionManager();
StoreManager &StoreMgr = StateMgr.getStoreManager();
// We need to be careful about treating a derived type's value as
- // bindings for a base type. Start by stripping and recording base casts.
+ // bindings for a base type. Unless we're creating a temporary pointer region,
+ // start by stripping and recording base casts.
SmallVector<const CastExpr *, 4> Casts;
const Expr *Inner = Ex->IgnoreParens();
- if (V.getAs<NonLoc>()) {
+ if (!Loc::isLocType(Result->getType())) {
while (const CastExpr *CE = dyn_cast<CastExpr>(Inner)) {
if (CE->getCastKind() == CK_DerivedToBase ||
CE->getCastKind() == CK_UncheckedDerivedToBase)
@@ -195,8 +207,13 @@ ExprEngine::createTemporaryRegionIfNeeded(ProgramStateRef State,
}
// Create a temporary object region for the inner expression (which may have
- // a more derived type) and bind the NonLoc value into it.
- SVal Reg = loc::MemRegionVal(MRMgr.getCXXTempObjectRegion(Inner, LC));
+ // a more derived type) and bind the value into it.
+ const TypedValueRegion *TR = MRMgr.getCXXTempObjectRegion(Inner, LC);
+ SVal Reg = loc::MemRegionVal(TR);
+
+ if (V.isUnknown())
+ V = getSValBuilder().conjureSymbolVal(Result, LC, TR->getValueType(),
+ currBldrCtx->blockCount());
State = State->bindLoc(Reg, V);
// Re-apply the casts (from innermost to outermost) for type sanity.
@@ -206,7 +223,7 @@ ExprEngine::createTemporaryRegionIfNeeded(ProgramStateRef State,
Reg = StoreMgr.evalDerivedToBase(Reg, *I);
}
- State = State->BindExpr(Result ? Result : Ex, LC, Reg);
+ State = State->BindExpr(Result, LC, Reg);
return State;
}
@@ -423,8 +440,8 @@ void ExprEngine::ProcessInitializer(const CFGInitializer Init,
ProgramStateRef State = Pred->getState();
SVal thisVal = State->getSVal(svalBuilder.getCXXThis(decl, stackFrame));
- PostInitializer PP(BMI, stackFrame);
ExplodedNodeSet Tmp(Pred);
+ SVal FieldLoc;
// Evaluate the initializer, if necessary
if (BMI->isAnyMemberInitializer()) {
@@ -432,15 +449,43 @@ void ExprEngine::ProcessInitializer(const CFGInitializer Init,
// but non-objects must be copied in from the initializer.
const Expr *Init = BMI->getInit()->IgnoreImplicit();
if (!isa<CXXConstructExpr>(Init)) {
- SVal FieldLoc;
- if (BMI->isIndirectMemberInitializer())
+ const ValueDecl *Field;
+ if (BMI->isIndirectMemberInitializer()) {
+ Field = BMI->getIndirectMember();
FieldLoc = State->getLValue(BMI->getIndirectMember(), thisVal);
- else
+ } else {
+ Field = BMI->getMember();
FieldLoc = State->getLValue(BMI->getMember(), thisVal);
+ }
- SVal InitVal = State->getSVal(BMI->getInit(), stackFrame);
+ SVal InitVal;
+ if (BMI->getNumArrayIndices() > 0) {
+ // Handle arrays of trivial type. We can represent this with a
+ // primitive load/copy from the base array region.
+ const ArraySubscriptExpr *ASE;
+ while ((ASE = dyn_cast<ArraySubscriptExpr>(Init)))
+ Init = ASE->getBase()->IgnoreImplicit();
+
+ SVal LValue = State->getSVal(Init, stackFrame);
+ if (Optional<Loc> LValueLoc = LValue.getAs<Loc>())
+ InitVal = State->getSVal(*LValueLoc);
+
+ // If we fail to get the value for some reason, use a symbolic value.
+ if (InitVal.isUnknownOrUndef()) {
+ SValBuilder &SVB = getSValBuilder();
+ InitVal = SVB.conjureSymbolVal(BMI->getInit(), stackFrame,
+ Field->getType(),
+ currBldrCtx->blockCount());
+ }
+ } else {
+ InitVal = State->getSVal(BMI->getInit(), stackFrame);
+ }
+ assert(Tmp.size() == 1 && "have not generated any new nodes yet");
+ assert(*Tmp.begin() == Pred && "have not generated any new nodes yet");
Tmp.clear();
+
+ PostInitializer PP(BMI, FieldLoc.getAsRegion(), stackFrame);
evalBind(Tmp, Init, Pred, FieldLoc, InitVal, /*isInit=*/true, &PP);
}
} else {
@@ -450,6 +495,7 @@ void ExprEngine::ProcessInitializer(const CFGInitializer Init,
// Construct PostInitializer nodes whether the state changed or not,
// so that the diagnostics don't get confused.
+ PostInitializer PP(BMI, FieldLoc.getAsRegion(), stackFrame);
ExplodedNodeSet Dst;
NodeBuilder Bldr(Tmp, Dst, *currBldrCtx);
for (ExplodedNodeSet::iterator I = Tmp.begin(), E = Tmp.end(); I != E; ++I) {
@@ -488,18 +534,20 @@ void ExprEngine::ProcessImplicitDtor(const CFGImplicitDtor D,
void ExprEngine::ProcessAutomaticObjDtor(const CFGAutomaticObjDtor Dtor,
ExplodedNode *Pred,
ExplodedNodeSet &Dst) {
- ProgramStateRef state = Pred->getState();
const VarDecl *varDecl = Dtor.getVarDecl();
-
QualType varType = varDecl->getType();
- if (const ReferenceType *refType = varType->getAs<ReferenceType>())
- varType = refType->getPointeeType();
+ ProgramStateRef state = Pred->getState();
+ SVal dest = state->getLValue(varDecl, Pred->getLocationContext());
+ const MemRegion *Region = dest.castAs<loc::MemRegionVal>().getRegion();
- Loc dest = state->getLValue(varDecl, Pred->getLocationContext());
+ if (const ReferenceType *refType = varType->getAs<ReferenceType>()) {
+ varType = refType->getPointeeType();
+ Region = state->getSVal(Region).getAsRegion();
+ }
- VisitCXXDestructor(varType, dest.castAs<loc::MemRegionVal>().getRegion(),
- Dtor.getTriggerStmt(), /*IsBase=*/ false, Pred, Dst);
+ VisitCXXDestructor(varType, Region, Dtor.getTriggerStmt(), /*IsBase=*/ false,
+ Pred, Dst);
}
void ExprEngine::ProcessBaseDtor(const CFGBaseDtor D,
@@ -556,11 +604,13 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
switch (S->getStmtClass()) {
// C++ and ARC stuff we don't support yet.
case Expr::ObjCIndirectCopyRestoreExprClass:
+ case Stmt::CXXDefaultInitExprClass:
case Stmt::CXXDependentScopeMemberExprClass:
case Stmt::CXXPseudoDestructorExprClass:
case Stmt::CXXTryStmtClass:
case Stmt::CXXTypeidExprClass:
case Stmt::CXXUuidofExprClass:
+ case Stmt::MSPropertyRefExprClass:
case Stmt::CXXUnresolvedConstructExprClass:
case Stmt::DependentScopeDeclRefExprClass:
case Stmt::UnaryTypeTraitExprClass:
@@ -607,6 +657,7 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
case Stmt::SwitchStmtClass:
case Stmt::WhileStmtClass:
case Expr::MSDependentExistsStmtClass:
+ case Stmt::CapturedStmtClass:
llvm_unreachable("Stmt should not be in analyzer evaluation loop");
case Stmt::ObjCSubscriptRefExprClass:
@@ -690,21 +741,22 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
const CXXDefaultArgExpr *DefaultE = cast<CXXDefaultArgExpr>(S);
const Expr *ArgE = DefaultE->getExpr();
- // Avoid creating and destroying a lot of APSInts.
- SVal V;
- llvm::APSInt Result;
+ bool IsTemporary = false;
+ if (const MaterializeTemporaryExpr *MTE =
+ dyn_cast<MaterializeTemporaryExpr>(ArgE)) {
+ ArgE = MTE->GetTemporaryExpr();
+ IsTemporary = true;
+ }
+
+ Optional<SVal> ConstantVal = svalBuilder.getConstantVal(ArgE);
+ if (!ConstantVal)
+ ConstantVal = UnknownVal();
for (ExplodedNodeSet::iterator I = PreVisit.begin(), E = PreVisit.end();
I != E; ++I) {
ProgramStateRef State = (*I)->getState();
-
- if (ArgE->EvaluateAsInt(Result, getContext()))
- V = svalBuilder.makeIntVal(Result);
- else
- V = State->getSVal(ArgE, LCtx);
-
- State = State->BindExpr(DefaultE, LCtx, V);
- if (DefaultE->isGLValue())
+ State = State->BindExpr(DefaultE, LCtx, *ConstantVal);
+ if (IsTemporary)
State = createTemporaryRegionIfNeeded(State, LCtx, DefaultE,
DefaultE);
Bldr2.generateNode(S, *I, State);
@@ -814,9 +866,13 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
const LocationContext *LCtx = Pred->getLocationContext();
ProgramStateRef NewState =
createTemporaryRegionIfNeeded(State, LCtx, OCE->getArg(0));
- if (NewState != State)
+ if (NewState != State) {
Pred = Bldr.generateNode(OCE, Pred, NewState, /*Tag=*/0,
ProgramPoint::PreStmtKind);
+ // Did we cache out?
+ if (!Pred)
+ break;
+ }
}
}
// FALLTHROUGH
@@ -1189,7 +1245,7 @@ static SVal RecoverCastedSymbol(ProgramStateManager& StateMgr,
while (const CastExpr *CE = dyn_cast<CastExpr>(Ex)) {
QualType T = CE->getType();
- if (!T->isIntegerType())
+ if (!T->isIntegralOrEnumerationType())
return UnknownVal();
uint64_t newBits = Ctx.getTypeSize(T);
@@ -1204,7 +1260,8 @@ static SVal RecoverCastedSymbol(ProgramStateManager& StateMgr,
// We reached a non-cast. Is it a symbolic value?
QualType T = Ex->getType();
- if (!bitsInit || !T->isIntegerType() || Ctx.getTypeSize(T) > bits)
+ if (!bitsInit || !T->isIntegralOrEnumerationType() ||
+ Ctx.getTypeSize(T) > bits)
return UnknownVal();
return state->getSVal(Ex, LCtx);
@@ -1296,7 +1353,7 @@ void ExprEngine::processBranch(const Stmt *Condition, const Stmt *Term,
if (X.isUnknownOrUndef()) {
// Give it a chance to recover from unknown.
if (const Expr *Ex = dyn_cast<Expr>(Condition)) {
- if (Ex->getType()->isIntegerType()) {
+ if (Ex->getType()->isIntegralOrEnumerationType()) {
// Try to recover some path-sensitivity. Right now casts of symbolic
// integers that promote their values are currently not tracked well.
// If 'Condition' is such an expression, try and recover the
@@ -1344,6 +1401,34 @@ void ExprEngine::processBranch(const Stmt *Condition, const Stmt *Term,
currBldrCtx = 0;
}
+/// The GDM component containing the set of global variables which have been
+/// previously initialized with explicit initializers.
+REGISTER_TRAIT_WITH_PROGRAMSTATE(InitializedGlobalsSet,
+ llvm::ImmutableSet<const VarDecl *>)
+
+void ExprEngine::processStaticInitializer(const DeclStmt *DS,
+ NodeBuilderContext &BuilderCtx,
+ ExplodedNode *Pred,
+ clang::ento::ExplodedNodeSet &Dst,
+ const CFGBlock *DstT,
+ const CFGBlock *DstF) {
+ currBldrCtx = &BuilderCtx;
+
+ const VarDecl *VD = cast<VarDecl>(DS->getSingleDecl());
+ ProgramStateRef state = Pred->getState();
+ bool initHasRun = state->contains<InitializedGlobalsSet>(VD);
+ BranchNodeBuilder builder(Pred, Dst, BuilderCtx, DstT, DstF);
+
+ if (!initHasRun) {
+ state = state->add<InitializedGlobalsSet>(VD);
+ }
+
+ builder.generateNode(state, initHasRun, Pred);
+ builder.markInfeasible(!initHasRun);
+
+ currBldrCtx = 0;
+}
+
/// processIndirectGoto - Called by CoreEngine. Used to generate successor
/// nodes by processing the 'effects' of a computed goto jump.
void ExprEngine::processIndirectGoto(IndirectGotoNodeBuilder &builder) {
@@ -1692,12 +1777,6 @@ ProgramStateRef ExprEngine::processPointerEscapedOnBind(ProgramStateRef State,
if (StoredVal != Val)
escapes = (State == (State->bindLoc(*regionLoc, Val)));
}
- if (!escapes) {
- // Case 4: We do not currently model what happens when a symbol is
- // assigned to a struct field, so be conservative here and let the symbol
- // go. TODO: This could definitely be improved upon.
- escapes = !isa<VarRegion>(regionLoc->getRegion());
- }
}
// If our store can represent the binding and we aren't storing to something
@@ -1720,11 +1799,12 @@ ProgramStateRef ExprEngine::processPointerEscapedOnBind(ProgramStateRef State,
}
ProgramStateRef
-ExprEngine::processPointerEscapedOnInvalidateRegions(ProgramStateRef State,
+ExprEngine::notifyCheckersOfPointerEscape(ProgramStateRef State,
const InvalidatedSymbols *Invalidated,
ArrayRef<const MemRegion *> ExplicitRegions,
ArrayRef<const MemRegion *> Regions,
- const CallEvent *Call) {
+ const CallEvent *Call,
+ bool IsConst) {
if (!Invalidated || Invalidated->empty())
return State;
@@ -1733,8 +1813,19 @@ ExprEngine::processPointerEscapedOnInvalidateRegions(ProgramStateRef State,
return getCheckerManager().runCheckersForPointerEscape(State,
*Invalidated,
0,
- PSK_EscapeOther);
-
+ PSK_EscapeOther,
+ IsConst);
+
+ // Note: Due to current limitations of RegionStore, we only process the top
+ // level const pointers correctly. The lower level const pointers are
+ // currently treated as non-const.
+ if (IsConst)
+ return getCheckerManager().runCheckersForPointerEscape(State,
+ *Invalidated,
+ Call,
+ PSK_DirectEscapeOnCall,
+ true);
+
// If the symbols were invalidated by a call, we want to find out which ones
// were invalidated directly due to being arguments to the call.
InvalidatedSymbols SymbolsDirectlyInvalidated;
@@ -2161,54 +2252,27 @@ struct DOTGraphTraits<ExplodedNode*> :
break;
}
- default: {
- if (Optional<StmtPoint> L = Loc.getAs<StmtPoint>()) {
- const Stmt *S = L->getStmt();
-
- Out << S->getStmtClassName() << ' ' << (const void*) S << ' ';
+ case ProgramPoint::PostInitializerKind: {
+ Out << "PostInitializer: ";
+ const CXXCtorInitializer *Init =
+ Loc.castAs<PostInitializer>().getInitializer();
+ if (const FieldDecl *FD = Init->getAnyMember())
+ Out << *FD;
+ else {
+ QualType Ty = Init->getTypeSourceInfo()->getType();
+ Ty = Ty.getLocalUnqualifiedType();
LangOptions LO; // FIXME.
- S->printPretty(Out, 0, PrintingPolicy(LO));
- printLocation(Out, S->getLocStart());
-
- if (Loc.getAs<PreStmt>())
- Out << "\\lPreStmt\\l;";
- else if (Loc.getAs<PostLoad>())
- Out << "\\lPostLoad\\l;";
- else if (Loc.getAs<PostStore>())
- Out << "\\lPostStore\\l";
- else if (Loc.getAs<PostLValue>())
- Out << "\\lPostLValue\\l";
-
-#if 0
- // FIXME: Replace with a general scheme to determine
- // the name of the check.
- if (GraphPrintCheckerState->isImplicitNullDeref(N))
- Out << "\\|Implicit-Null Dereference.\\l";
- else if (GraphPrintCheckerState->isExplicitNullDeref(N))
- Out << "\\|Explicit-Null Dereference.\\l";
- else if (GraphPrintCheckerState->isUndefDeref(N))
- Out << "\\|Dereference of undefialied value.\\l";
- else if (GraphPrintCheckerState->isUndefStore(N))
- Out << "\\|Store to Undefined Loc.";
- else if (GraphPrintCheckerState->isUndefResult(N))
- Out << "\\|Result of operation is undefined.";
- else if (GraphPrintCheckerState->isNoReturnCall(N))
- Out << "\\|Call to function marked \"noreturn\".";
- else if (GraphPrintCheckerState->isBadCall(N))
- Out << "\\|Call to NULL/Undefined.";
- else if (GraphPrintCheckerState->isUndefArg(N))
- Out << "\\|Argument in call is undefined";
-#endif
-
- break;
+ Ty.print(Out, LO);
}
+ break;
+ }
+ case ProgramPoint::BlockEdgeKind: {
const BlockEdge &E = Loc.castAs<BlockEdge>();
Out << "Edge: (B" << E.getSrc()->getBlockID() << ", B"
<< E.getDst()->getBlockID() << ')';
if (const Stmt *T = E.getSrc()->getTerminator()) {
-
SourceLocation SLoc = T->getLocStart();
Out << "\\|Terminator: ";
@@ -2267,6 +2331,48 @@ struct DOTGraphTraits<ExplodedNode*> :
Out << "\\|Control-flow based on\\lUndefined value.\\l";
}
#endif
+ break;
+ }
+
+ default: {
+ const Stmt *S = Loc.castAs<StmtPoint>().getStmt();
+
+ Out << S->getStmtClassName() << ' ' << (const void*) S << ' ';
+ LangOptions LO; // FIXME.
+ S->printPretty(Out, 0, PrintingPolicy(LO));
+ printLocation(Out, S->getLocStart());
+
+ if (Loc.getAs<PreStmt>())
+ Out << "\\lPreStmt\\l;";
+ else if (Loc.getAs<PostLoad>())
+ Out << "\\lPostLoad\\l;";
+ else if (Loc.getAs<PostStore>())
+ Out << "\\lPostStore\\l";
+ else if (Loc.getAs<PostLValue>())
+ Out << "\\lPostLValue\\l";
+
+#if 0
+ // FIXME: Replace with a general scheme to determine
+ // the name of the check.
+ if (GraphPrintCheckerState->isImplicitNullDeref(N))
+ Out << "\\|Implicit-Null Dereference.\\l";
+ else if (GraphPrintCheckerState->isExplicitNullDeref(N))
+ Out << "\\|Explicit-Null Dereference.\\l";
+ else if (GraphPrintCheckerState->isUndefDeref(N))
+ Out << "\\|Dereference of undefialied value.\\l";
+ else if (GraphPrintCheckerState->isUndefStore(N))
+ Out << "\\|Store to Undefined Loc.";
+ else if (GraphPrintCheckerState->isUndefResult(N))
+ Out << "\\|Result of operation is undefined.";
+ else if (GraphPrintCheckerState->isNoReturnCall(N))
+ Out << "\\|Call to function marked \"noreturn\".";
+ else if (GraphPrintCheckerState->isBadCall(N))
+ Out << "\\|Call to NULL/Undefined.";
+ else if (GraphPrintCheckerState->isUndefArg(N))
+ Out << "\\|Argument in call is undefined";
+#endif
+
+ break;
}
}
@@ -2301,7 +2407,7 @@ GetGraphNode<llvm::DenseMap<ExplodedNode*, Expr*>::iterator>
void ExprEngine::ViewGraph(bool trim) {
#ifndef NDEBUG
if (trim) {
- std::vector<ExplodedNode*> Src;
+ std::vector<const ExplodedNode*> Src;
// Flush any outstanding reports to make sure we cover all the nodes.
// This does not cause them to get displayed.
@@ -2315,7 +2421,7 @@ void ExprEngine::ViewGraph(bool trim) {
if (N) Src.push_back(N);
}
- ViewGraph(&Src[0], &Src[0]+Src.size());
+ ViewGraph(Src);
}
else {
GraphPrintCheckerState = this;
@@ -2329,12 +2435,12 @@ void ExprEngine::ViewGraph(bool trim) {
#endif
}
-void ExprEngine::ViewGraph(ExplodedNode** Beg, ExplodedNode** End) {
+void ExprEngine::ViewGraph(ArrayRef<const ExplodedNode*> Nodes) {
#ifndef NDEBUG
GraphPrintCheckerState = this;
GraphPrintSourceManager = &getContext().getSourceManager();
- std::auto_ptr<ExplodedGraph> TrimmedG(G.Trim(Beg, End).first);
+ OwningPtr<ExplodedGraph> TrimmedG(G.trim(Nodes));
if (!TrimmedG.get())
llvm::errs() << "warning: Trimmed ExplodedGraph is empty.\n";
diff --git a/lib/StaticAnalyzer/Core/ExprEngineC.cpp b/lib/StaticAnalyzer/Core/ExprEngineC.cpp
index 2f2eb8628a..67aeab6003 100644
--- a/lib/StaticAnalyzer/Core/ExprEngineC.cpp
+++ b/lib/StaticAnalyzer/Core/ExprEngineC.cpp
@@ -68,12 +68,14 @@ void ExprEngine::VisitBinaryOperator(const BinaryOperator* B,
// SymSymExpr.
unsigned Count = currBldrCtx->blockCount();
if (LeftV.getAs<Loc>() &&
- RHS->getType()->isIntegerType() && RightV.isUnknown()) {
+ RHS->getType()->isIntegralOrEnumerationType() &&
+ RightV.isUnknown()) {
RightV = svalBuilder.conjureSymbolVal(RHS, LCtx, RHS->getType(),
Count);
}
if (RightV.getAs<Loc>() &&
- LHS->getType()->isIntegerType() && LeftV.isUnknown()) {
+ LHS->getType()->isIntegralOrEnumerationType() &&
+ LeftV.isUnknown()) {
LeftV = svalBuilder.conjureSymbolVal(LHS, LCtx, LHS->getType(),
Count);
}
@@ -401,32 +403,33 @@ void ExprEngine::VisitCompoundLiteralExpr(const CompoundLiteralExpr *CL,
ExplodedNodeSet &Dst) {
StmtNodeBuilder B(Pred, Dst, *currBldrCtx);
- const InitListExpr *ILE
- = cast<InitListExpr>(CL->getInitializer()->IgnoreParens());
+ ProgramStateRef State = Pred->getState();
+ const LocationContext *LCtx = Pred->getLocationContext();
+
+ const Expr *Init = CL->getInitializer();
+ SVal V = State->getSVal(CL->getInitializer(), LCtx);
- ProgramStateRef state = Pred->getState();
- SVal ILV = state->getSVal(ILE, Pred->getLocationContext());
- const LocationContext *LC = Pred->getLocationContext();
- state = state->bindCompoundLiteral(CL, LC, ILV);
-
- // Compound literal expressions are a GNU extension in C++.
- // Unlike in C, where CLs are lvalues, in C++ CLs are prvalues,
- // and like temporary objects created by the functional notation T()
- // CLs are destroyed at the end of the containing full-expression.
- // HOWEVER, an rvalue of array type is not something the analyzer can
- // reason about, since we expect all regions to be wrapped in Locs.
- // So we treat array CLs as lvalues as well, knowing that they will decay
- // to pointers as soon as they are used.
- if (CL->isGLValue() || CL->getType()->isArrayType())
- B.generateNode(CL, Pred, state->BindExpr(CL, LC, state->getLValue(CL, LC)));
- else
- B.generateNode(CL, Pred, state->BindExpr(CL, LC, ILV));
-}
+ if (isa<CXXConstructExpr>(Init)) {
+ // No work needed. Just pass the value up to this expression.
+ } else {
+ assert(isa<InitListExpr>(Init));
+ Loc CLLoc = State->getLValue(CL, LCtx);
+ State = State->bindLoc(CLLoc, V);
+
+ // Compound literal expressions are a GNU extension in C++.
+ // Unlike in C, where CLs are lvalues, in C++ CLs are prvalues,
+ // and like temporary objects created by the functional notation T()
+ // CLs are destroyed at the end of the containing full-expression.
+ // HOWEVER, an rvalue of array type is not something the analyzer can
+ // reason about, since we expect all regions to be wrapped in Locs.
+ // So we treat array CLs as lvalues as well, knowing that they will decay
+ // to pointers as soon as they are used.
+ if (CL->isGLValue() || CL->getType()->isArrayType())
+ V = CLLoc;
+ }
-/// The GDM component containing the set of global variables which have been
-/// previously initialized with explicit initializers.
-REGISTER_TRAIT_WITH_PROGRAMSTATE(InitializedGlobalsSet,
- llvm::ImmutableSet<const VarDecl *> )
+ B.generateNode(CL, Pred, State->BindExpr(CL, LCtx, V));
+}
void ExprEngine::VisitDeclStmt(const DeclStmt *DS, ExplodedNode *Pred,
ExplodedNodeSet &Dst) {
@@ -438,15 +441,6 @@ void ExprEngine::VisitDeclStmt(const DeclStmt *DS, ExplodedNode *Pred,
Dst.insert(Pred);
return;
}
-
- // Check if a value has been previously initialized. There will be an entry in
- // the set for variables with global storage which have been previously
- // initialized.
- if (VD->hasGlobalStorage())
- if (Pred->getState()->contains<InitializedGlobalsSet>(VD)) {
- Dst.insert(Pred);
- return;
- }
// FIXME: all pre/post visits should eventually be handled by ::Visit().
ExplodedNodeSet dstPreVisit;
@@ -464,11 +458,6 @@ void ExprEngine::VisitDeclStmt(const DeclStmt *DS, ExplodedNode *Pred,
// Note in the state that the initialization has occurred.
ExplodedNode *UpdatedN = N;
- if (VD->hasGlobalStorage()) {
- state = state->add<InitializedGlobalsSet>(VD);
- UpdatedN = B.generateNode(DS, N, state);
- }
-
SVal InitVal = state->getSVal(InitEx, LC);
if (isa<CXXConstructExpr>(InitEx->IgnoreImplicit())) {
@@ -634,11 +623,15 @@ void ExprEngine::VisitGuardedExpr(const Expr *Ex,
const Expr *R,
ExplodedNode *Pred,
ExplodedNodeSet &Dst) {
+ assert(L && R);
+
StmtNodeBuilder B(Pred, Dst, *currBldrCtx);
ProgramStateRef state = Pred->getState();
const LocationContext *LCtx = Pred->getLocationContext();
const CFGBlock *SrcBlock = 0;
+ // Find the predecessor block.
+ ProgramStateRef SrcState = state;
for (const ExplodedNode *N = Pred ; N ; N = *N->pred_begin()) {
ProgramPoint PP = N->getLocation();
if (PP.getAs<PreStmtPurgeDeadSymbols>() || PP.getAs<BlockEntrance>()) {
@@ -646,6 +639,7 @@ void ExprEngine::VisitGuardedExpr(const Expr *Ex,
continue;
}
SrcBlock = PP.castAs<BlockEdge>().getSrc();
+ SrcState = N->getState();
break;
}
@@ -661,14 +655,25 @@ void ExprEngine::VisitGuardedExpr(const Expr *Ex,
CFGElement CE = *I;
if (Optional<CFGStmt> CS = CE.getAs<CFGStmt>()) {
const Expr *ValEx = cast<Expr>(CS->getStmt());
- hasValue = true;
- V = state->getSVal(ValEx, LCtx);
+ ValEx = ValEx->IgnoreParens();
+
+ // For GNU extension '?:' operator, the left hand side will be an
+ // OpaqueValueExpr, so get the underlying expression.
+ if (const OpaqueValueExpr *OpaqueEx = dyn_cast<OpaqueValueExpr>(L))
+ L = OpaqueEx->getSourceExpr();
+
+ // If the last expression in the predecessor block matches true or false
+ // subexpression, get its the value.
+ if (ValEx == L->IgnoreParens() || ValEx == R->IgnoreParens()) {
+ hasValue = true;
+ V = SrcState->getSVal(ValEx, LCtx);
+ }
break;
}
}
- assert(hasValue);
- (void) hasValue;
+ if (!hasValue)
+ V = svalBuilder.conjureSymbolVal(0, Ex, LCtx, currBldrCtx->blockCount());
// Generate a new node with the binding from the appropriate path.
B.generateNode(Ex, Pred, state->BindExpr(Ex, LCtx, V, true));
@@ -681,8 +686,9 @@ VisitOffsetOfExpr(const OffsetOfExpr *OOE,
APSInt IV;
if (OOE->EvaluateAsInt(IV, getContext())) {
assert(IV.getBitWidth() == getContext().getTypeSize(OOE->getType()));
- assert(OOE->getType()->isIntegerType());
- assert(IV.isSigned() == OOE->getType()->isSignedIntegerOrEnumerationType());
+ assert(OOE->getType()->isBuiltinType());
+ assert(OOE->getType()->getAs<BuiltinType>()->isInteger());
+ assert(IV.isSigned() == OOE->getType()->isSignedIntegerType());
SVal X = svalBuilder.makeIntVal(IV);
B.generateNode(OOE, Pred,
Pred->getState()->BindExpr(OOE, Pred->getLocationContext(),
diff --git a/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp b/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp
index 32b522cbd5..ed90dc5891 100644
--- a/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp
+++ b/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp
@@ -48,13 +48,25 @@ void ExprEngine::CreateCXXTemporaryObject(const MaterializeTemporaryExpr *ME,
Bldr.generateNode(ME, Pred, state);
}
+// FIXME: This is the sort of code that should eventually live in a Core
+// checker rather than as a special case in ExprEngine.
void ExprEngine::performTrivialCopy(NodeBuilder &Bldr, ExplodedNode *Pred,
- const CXXConstructorCall &Call) {
- const CXXConstructExpr *CtorExpr = Call.getOriginExpr();
- assert(CtorExpr->getConstructor()->isCopyOrMoveConstructor());
- assert(CtorExpr->getConstructor()->isTrivial());
+ const CallEvent &Call) {
+ SVal ThisVal;
+ bool AlwaysReturnsLValue;
+ if (const CXXConstructorCall *Ctor = dyn_cast<CXXConstructorCall>(&Call)) {
+ assert(Ctor->getDecl()->isTrivial());
+ assert(Ctor->getDecl()->isCopyOrMoveConstructor());
+ ThisVal = Ctor->getCXXThisVal();
+ AlwaysReturnsLValue = false;
+ } else {
+ assert(cast<CXXMethodDecl>(Call.getDecl())->isTrivial());
+ assert(cast<CXXMethodDecl>(Call.getDecl())->getOverloadedOperator() ==
+ OO_Equal);
+ ThisVal = cast<CXXInstanceCall>(Call).getCXXThisVal();
+ AlwaysReturnsLValue = true;
+ }
- SVal ThisVal = Call.getCXXThisVal();
const LocationContext *LCtx = Pred->getLocationContext();
ExplodedNodeSet Dst;
@@ -62,21 +74,48 @@ void ExprEngine::performTrivialCopy(NodeBuilder &Bldr, ExplodedNode *Pred,
SVal V = Call.getArgSVal(0);
- // Make sure the value being copied is not unknown.
+ // If the value being copied is not unknown, load from its location to get
+ // an aggregate rvalue.
if (Optional<Loc> L = V.getAs<Loc>())
V = Pred->getState()->getSVal(*L);
+ else
+ assert(V.isUnknown());
- evalBind(Dst, CtorExpr, Pred, ThisVal, V, true);
+ const Expr *CallExpr = Call.getOriginExpr();
+ evalBind(Dst, CallExpr, Pred, ThisVal, V, true);
- PostStmt PS(CtorExpr, LCtx);
+ PostStmt PS(CallExpr, LCtx);
for (ExplodedNodeSet::iterator I = Dst.begin(), E = Dst.end();
I != E; ++I) {
ProgramStateRef State = (*I)->getState();
- State = bindReturnValue(Call, LCtx, State);
+ if (AlwaysReturnsLValue)
+ State = State->BindExpr(CallExpr, LCtx, ThisVal);
+ else
+ State = bindReturnValue(Call, LCtx, State);
Bldr.generateNode(PS, State, *I);
}
}
+
+/// Returns a region representing the first element of a (possibly
+/// multi-dimensional) array.
+///
+/// On return, \p Ty will be set to the base type of the array.
+///
+/// If the type is not an array type at all, the original value is returned.
+static SVal makeZeroElementRegion(ProgramStateRef State, SVal LValue,
+ QualType &Ty) {
+ SValBuilder &SVB = State->getStateManager().getSValBuilder();
+ ASTContext &Ctx = SVB.getContext();
+
+ while (const ArrayType *AT = Ctx.getAsArrayType(Ty)) {
+ Ty = AT->getElementType();
+ LValue = State->getLValue(Ty, SVB.makeZeroArrayIndex(), LValue);
+ }
+
+ return LValue;
+}
+
void ExprEngine::VisitCXXConstructExpr(const CXXConstructExpr *CE,
ExplodedNode *Pred,
ExplodedNodeSet &destNodes) {
@@ -84,7 +123,10 @@ void ExprEngine::VisitCXXConstructExpr(const CXXConstructExpr *CE,
ProgramStateRef State = Pred->getState();
const MemRegion *Target = 0;
- bool IsArray = false;
+
+ // FIXME: Handle arrays, which run the same constructor for every element.
+ // For now, we just run the first constructor (which should still invalidate
+ // the entire array).
switch (CE->getConstructionKind()) {
case CXXConstructExpr::CK_Complete: {
@@ -99,19 +141,10 @@ void ExprEngine::VisitCXXConstructExpr(const CXXConstructExpr *CE,
if (const DeclStmt *DS = dyn_cast<DeclStmt>(StmtElem->getStmt())) {
if (const VarDecl *Var = dyn_cast<VarDecl>(DS->getSingleDecl())) {
if (Var->getInit()->IgnoreImplicit() == CE) {
+ SVal LValue = State->getLValue(Var, LCtx);
QualType Ty = Var->getType();
- if (const ArrayType *AT = getContext().getAsArrayType(Ty)) {
- // FIXME: Handle arrays, which run the same constructor for
- // every element. This workaround will just run the first
- // constructor (which should still invalidate the entire array).
- SVal Base = State->getLValue(Var, LCtx);
- Target = State->getLValue(AT->getElementType(),
- getSValBuilder().makeZeroArrayIndex(),
- Base).getAsRegion();
- IsArray = true;
- } else {
- Target = State->getLValue(Var, LCtx).getAsRegion();
- }
+ LValue = makeZeroElementRegion(State, LValue, Ty);
+ Target = LValue.getAsRegion();
}
}
}
@@ -127,13 +160,19 @@ void ExprEngine::VisitCXXConstructExpr(const CXXConstructExpr *CE,
LCtx->getCurrentStackFrame());
SVal ThisVal = State->getSVal(ThisPtr);
+ const ValueDecl *Field;
+ SVal FieldVal;
if (Init->isIndirectMemberInitializer()) {
- SVal Field = State->getLValue(Init->getIndirectMember(), ThisVal);
- Target = Field.getAsRegion();
+ Field = Init->getIndirectMember();
+ FieldVal = State->getLValue(Init->getIndirectMember(), ThisVal);
} else {
- SVal Field = State->getLValue(Init->getMember(), ThisVal);
- Target = Field.getAsRegion();
+ Field = Init->getMember();
+ FieldVal = State->getLValue(Init->getMember(), ThisVal);
}
+
+ QualType Ty = Field->getType();
+ FieldVal = makeZeroElementRegion(State, FieldVal, Ty);
+ Target = FieldVal.getAsRegion();
}
// FIXME: This will eventually need to handle new-expressions as well.
@@ -183,6 +222,7 @@ void ExprEngine::VisitCXXConstructExpr(const CXXConstructExpr *CE,
ExplodedNodeSet DstEvaluated;
StmtNodeBuilder Bldr(DstPreCall, DstEvaluated, *currBldrCtx);
+ bool IsArray = isa<ElementRegion>(Target);
if (CE->getConstructor()->isTrivial() &&
CE->getConstructor()->isCopyOrMoveConstructor() &&
!IsArray) {
@@ -215,12 +255,9 @@ void ExprEngine::VisitCXXDestructor(QualType ObjectType,
// FIXME: We need to run the same destructor on every element of the array.
// This workaround will just run the first destructor (which will still
// invalidate the entire array).
- // This is a loop because of multidimensional arrays.
- while (const ArrayType *AT = getContext().getAsArrayType(ObjectType)) {
- ObjectType = AT->getElementType();
- Dest = State->getLValue(ObjectType, getSValBuilder().makeZeroArrayIndex(),
- loc::MemRegionVal(Dest)).getAsRegion();
- }
+ SVal DestVal = loc::MemRegionVal(Dest);
+ DestVal = makeZeroElementRegion(State, DestVal, ObjectType);
+ Dest = DestVal.getAsRegion();
const CXXRecordDecl *RecordDecl = ObjectType->getAsCXXRecordDecl();
assert(RecordDecl && "Only CXXRecordDecls should have destructors");
@@ -255,15 +292,35 @@ void ExprEngine::VisitCXXNewExpr(const CXXNewExpr *CNE, ExplodedNode *Pred,
// Also, we need to decide how allocators actually work -- they're not
// really part of the CXXNewExpr because they happen BEFORE the
// CXXConstructExpr subexpression. See PR12014 for some discussion.
- StmtNodeBuilder Bldr(Pred, Dst, *currBldrCtx);
unsigned blockCount = currBldrCtx->blockCount();
const LocationContext *LCtx = Pred->getLocationContext();
- DefinedOrUnknownSVal symVal = svalBuilder.conjureSymbolVal(0, CNE, LCtx,
- CNE->getType(),
- blockCount);
- ProgramStateRef State = Pred->getState();
+ DefinedOrUnknownSVal symVal = UnknownVal();
+ FunctionDecl *FD = CNE->getOperatorNew();
+ bool IsStandardGlobalOpNewFunction = false;
+ if (FD && !isa<CXXMethodDecl>(FD) && !FD->isVariadic()) {
+ if (FD->getNumParams() == 2) {
+ QualType T = FD->getParamDecl(1)->getType();
+ if (const IdentifierInfo *II = T.getBaseTypeIdentifier())
+ // NoThrow placement new behaves as a standard new.
+ IsStandardGlobalOpNewFunction = II->getName().equals("nothrow_t");
+ }
+ else
+ // Placement forms are considered non-standard.
+ IsStandardGlobalOpNewFunction = (FD->getNumParams() == 1);
+ }
+
+ // We assume all standard global 'operator new' functions allocate memory in
+ // heap. We realize this is an approximation that might not correctly model
+ // a custom global allocator.
+ if (IsStandardGlobalOpNewFunction)
+ symVal = svalBuilder.getConjuredHeapSymbolVal(CNE, LCtx, blockCount);
+ else
+ symVal = svalBuilder.conjureSymbolVal(0, CNE, LCtx, CNE->getType(),
+ blockCount);
+
+ ProgramStateRef State = Pred->getState();
CallEventManager &CEMgr = getStateManager().getCallEventManager();
CallEventRef<CXXAllocatorCall> Call =
CEMgr.getCXXAllocatorCall(CNE, State, LCtx);
@@ -272,12 +329,13 @@ void ExprEngine::VisitCXXNewExpr(const CXXNewExpr *CNE, ExplodedNode *Pred,
// FIXME: Once we figure out how we want allocators to work,
// we should be using the usual pre-/(default-)eval-/post-call checks here.
State = Call->invalidateRegions(blockCount);
+ if (!State)
+ return;
// If we're compiling with exceptions enabled, and this allocation function
// is not declared as non-throwing, failures /must/ be signalled by
// exceptions, and thus the return value will never be NULL.
// C++11 [basic.stc.dynamic.allocation]p3.
- FunctionDecl *FD = CNE->getOperatorNew();
if (FD && getContext().getLangOpts().CXXExceptions) {
QualType Ty = FD->getType();
if (const FunctionProtoType *ProtoType = Ty->getAs<FunctionProtoType>())
@@ -285,6 +343,8 @@ void ExprEngine::VisitCXXNewExpr(const CXXNewExpr *CNE, ExplodedNode *Pred,
State = State->assume(symVal, true);
}
+ StmtNodeBuilder Bldr(Pred, Dst, *currBldrCtx);
+
if (CNE->isArray()) {
// FIXME: allocating an array requires simulating the constructors.
// For now, just return a symbolicated region.
@@ -302,30 +362,32 @@ void ExprEngine::VisitCXXNewExpr(const CXXNewExpr *CNE, ExplodedNode *Pred,
// CXXNewExpr, we need to make sure that the constructed object is not
// immediately invalidated here. (The placement call should happen before
// the constructor call anyway.)
+ SVal Result = symVal;
if (FD && FD->isReservedGlobalPlacementOperator()) {
// Non-array placement new should always return the placement location.
SVal PlacementLoc = State->getSVal(CNE->getPlacementArg(0), LCtx);
- SVal Result = svalBuilder.evalCast(PlacementLoc, CNE->getType(),
- CNE->getPlacementArg(0)->getType());
- State = State->BindExpr(CNE, LCtx, Result);
- } else {
- State = State->BindExpr(CNE, LCtx, symVal);
+ Result = svalBuilder.evalCast(PlacementLoc, CNE->getType(),
+ CNE->getPlacementArg(0)->getType());
}
+ // Bind the address of the object, then check to see if we cached out.
+ State = State->BindExpr(CNE, LCtx, Result);
+ ExplodedNode *NewN = Bldr.generateNode(CNE, Pred, State);
+ if (!NewN)
+ return;
+
// If the type is not a record, we won't have a CXXConstructExpr as an
// initializer. Copy the value over.
if (const Expr *Init = CNE->getInitializer()) {
if (!isa<CXXConstructExpr>(Init)) {
- QualType ObjTy = CNE->getType()->getAs<PointerType>()->getPointeeType();
- (void)ObjTy;
- assert(!ObjTy->isRecordType());
- SVal Location = State->getSVal(CNE, LCtx);
- if (Optional<Loc> LV = Location.getAs<Loc>())
- State = State->bindLoc(*LV, State->getSVal(Init, LCtx));
+ assert(Bldr.getResults().size() == 1);
+ Bldr.takeNodes(NewN);
+
+ assert(!CNE->getType()->getPointeeCXXRecordDecl());
+ evalBind(Dst, CNE, NewN, Result, State->getSVal(Init, LCtx),
+ /*FirstInit=*/IsStandardGlobalOpNewFunction);
}
}
-
- Bldr.generateNode(CNE, Pred, State);
}
void ExprEngine::VisitCXXDeleteExpr(const CXXDeleteExpr *CDE,
diff --git a/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp b/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp
index 44803dc477..06570a4b4a 100644
--- a/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp
+++ b/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp
@@ -578,9 +578,15 @@ void ExprEngine::conservativeEvalCall(const CallEvent &Call, NodeBuilder &Bldr,
Bldr.generateNode(Call.getProgramPoint(), State, Pred);
}
-static bool shouldInlineCallKind(const CallEvent &Call,
- const ExplodedNode *Pred,
- AnalyzerOptions &Opts) {
+enum CallInlinePolicy {
+ CIP_Allowed,
+ CIP_DisallowedOnce,
+ CIP_DisallowedAlways
+};
+
+static CallInlinePolicy mayInlineCallKind(const CallEvent &Call,
+ const ExplodedNode *Pred,
+ AnalyzerOptions &Opts) {
const LocationContext *CurLC = Pred->getLocationContext();
const StackFrameContext *CallerSFC = CurLC->getCurrentStackFrame();
switch (Call.getKind()) {
@@ -590,18 +596,20 @@ static bool shouldInlineCallKind(const CallEvent &Call,
case CE_CXXMember:
case CE_CXXMemberOperator:
if (!Opts.mayInlineCXXMemberFunction(CIMK_MemberFunctions))
- return false;
+ return CIP_DisallowedAlways;
break;
case CE_CXXConstructor: {
if (!Opts.mayInlineCXXMemberFunction(CIMK_Constructors))
- return false;
+ return CIP_DisallowedAlways;
const CXXConstructorCall &Ctor = cast<CXXConstructorCall>(Call);
// FIXME: We don't handle constructors or destructors for arrays properly.
+ // Even once we do, we still need to be careful about implicitly-generated
+ // initializers for array fields in default move/copy constructors.
const MemRegion *Target = Ctor.getCXXThisVal().getAsRegion();
if (Target && isa<ElementRegion>(Target))
- return false;
+ return CIP_DisallowedOnce;
// FIXME: This is a hack. We don't use the correct region for a new
// expression, so if we inline the constructor its result will just be
@@ -610,7 +618,7 @@ static bool shouldInlineCallKind(const CallEvent &Call,
const CXXConstructExpr *CtorExpr = Ctor.getOriginExpr();
if (const Stmt *Parent = CurLC->getParentMap().getParent(CtorExpr))
if (isa<CXXNewExpr>(Parent))
- return false;
+ return CIP_DisallowedOnce;
// Inlining constructors requires including initializers in the CFG.
const AnalysisDeclContext *ADC = CallerSFC->getAnalysisDeclContext();
@@ -624,19 +632,19 @@ static bool shouldInlineCallKind(const CallEvent &Call,
// For other types, only inline constructors if destructor inlining is
// also enabled.
if (!Opts.mayInlineCXXMemberFunction(CIMK_Destructors))
- return false;
+ return CIP_DisallowedAlways;
// FIXME: This is a hack. We don't handle temporary destructors
// right now, so we shouldn't inline their constructors.
if (CtorExpr->getConstructionKind() == CXXConstructExpr::CK_Complete)
if (!Target || !isa<DeclRegion>(Target))
- return false;
+ return CIP_DisallowedOnce;
break;
}
case CE_CXXDestructor: {
if (!Opts.mayInlineCXXMemberFunction(CIMK_Destructors))
- return false;
+ return CIP_DisallowedAlways;
// Inlining destructors requires building the CFG correctly.
const AnalysisDeclContext *ADC = CallerSFC->getAnalysisDeclContext();
@@ -648,22 +656,117 @@ static bool shouldInlineCallKind(const CallEvent &Call,
// FIXME: We don't handle constructors or destructors for arrays properly.
const MemRegion *Target = Dtor.getCXXThisVal().getAsRegion();
if (Target && isa<ElementRegion>(Target))
- return false;
+ return CIP_DisallowedOnce;
break;
}
case CE_CXXAllocator:
// Do not inline allocators until we model deallocators.
// This is unfortunate, but basically necessary for smart pointers and such.
- return false;
+ return CIP_DisallowedAlways;
case CE_ObjCMessage:
if (!Opts.mayInlineObjCMethod())
- return false;
+ return CIP_DisallowedAlways;
if (!(Opts.getIPAMode() == IPAK_DynamicDispatch ||
Opts.getIPAMode() == IPAK_DynamicDispatchBifurcate))
- return false;
+ return CIP_DisallowedAlways;
break;
}
+
+ return CIP_Allowed;
+}
+
+/// Returns true if the given C++ class contains a member with the given name.
+static bool hasMember(const ASTContext &Ctx, const CXXRecordDecl *RD,
+ StringRef Name) {
+ const IdentifierInfo &II = Ctx.Idents.get(Name);
+ DeclarationName DeclName = Ctx.DeclarationNames.getIdentifier(&II);
+ if (!RD->lookup(DeclName).empty())
+ return true;
+
+ CXXBasePaths Paths(false, false, false);
+ if (RD->lookupInBases(&CXXRecordDecl::FindOrdinaryMember,
+ DeclName.getAsOpaquePtr(),
+ Paths))
+ return true;
+
+ return false;
+}
+
+/// Returns true if the given C++ class is a container or iterator.
+///
+/// Our heuristic for this is whether it contains a method named 'begin()' or a
+/// nested type named 'iterator' or 'iterator_category'.
+static bool isContainerClass(const ASTContext &Ctx, const CXXRecordDecl *RD) {
+ return hasMember(Ctx, RD, "begin") ||
+ hasMember(Ctx, RD, "iterator") ||
+ hasMember(Ctx, RD, "iterator_category");
+}
+
+/// Returns true if the given function refers to a constructor or destructor of
+/// a C++ container or iterator.
+///
+/// We generally do a poor job modeling most containers right now, and would
+/// prefer not to inline their setup and teardown.
+static bool isContainerCtorOrDtor(const ASTContext &Ctx,
+ const FunctionDecl *FD) {
+ if (!(isa<CXXConstructorDecl>(FD) || isa<CXXDestructorDecl>(FD)))
+ return false;
+
+ const CXXRecordDecl *RD = cast<CXXMethodDecl>(FD)->getParent();
+ return isContainerClass(Ctx, RD);
+}
+
+/// Returns true if the function in \p CalleeADC may be inlined in general.
+///
+/// This checks static properties of the function, such as its signature and
+/// CFG, to determine whether the analyzer should ever consider inlining it,
+/// in any context.
+static bool mayInlineDecl(const CallEvent &Call, AnalysisDeclContext *CalleeADC,
+ AnalyzerOptions &Opts) {
+ // FIXME: Do not inline variadic calls.
+ if (Call.isVariadic())
+ return false;
+
+ // Check certain C++-related inlining policies.
+ ASTContext &Ctx = CalleeADC->getASTContext();
+ if (Ctx.getLangOpts().CPlusPlus) {
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(CalleeADC->getDecl())) {
+ // Conditionally control the inlining of template functions.
+ if (!Opts.mayInlineTemplateFunctions())
+ if (FD->getTemplatedKind() != FunctionDecl::TK_NonTemplate)
+ return false;
+
+ // Conditionally control the inlining of C++ standard library functions.
+ if (!Opts.mayInlineCXXStandardLibrary())
+ if (Ctx.getSourceManager().isInSystemHeader(FD->getLocation()))
+ if (IsInStdNamespace(FD))
+ return false;
+
+ // Conditionally control the inlining of methods on objects that look
+ // like C++ containers.
+ if (!Opts.mayInlineCXXContainerCtorsAndDtors())
+ if (!Ctx.getSourceManager().isFromMainFile(FD->getLocation()))
+ if (isContainerCtorOrDtor(Ctx, FD))
+ return false;
+ }
+ }
+
+ // It is possible that the CFG cannot be constructed.
+ // Be safe, and check if the CalleeCFG is valid.
+ const CFG *CalleeCFG = CalleeADC->getCFG();
+ if (!CalleeCFG)
+ return false;
+
+ // Do not inline large functions.
+ if (CalleeCFG->getNumBlockIDs() > Opts.getMaxInlinableSize())
+ return false;
+
+ // It is possible that the live variables analysis cannot be
+ // run. If so, bail out.
+ if (!CalleeADC->getAnalysis<RelaxedLiveVariables>())
+ return false;
+
return true;
}
@@ -683,18 +786,40 @@ bool ExprEngine::shouldInlineCall(const CallEvent &Call, const Decl *D,
if (CalleeADC->isBodyAutosynthesized())
return true;
- if (HowToInline == Inline_None)
+ if (!AMgr.shouldInlineCall())
return false;
+ // Check if this function has been marked as non-inlinable.
+ Optional<bool> MayInline = Engine.FunctionSummaries->mayInline(D);
+ if (MayInline.hasValue()) {
+ if (!MayInline.getValue())
+ return false;
+
+ } else {
+ // We haven't actually checked the static properties of this function yet.
+ // Do that now, and record our decision in the function summaries.
+ if (mayInlineDecl(Call, CalleeADC, Opts)) {
+ Engine.FunctionSummaries->markMayInline(D);
+ } else {
+ Engine.FunctionSummaries->markShouldNotInline(D);
+ return false;
+ }
+ }
+
// Check if we should inline a call based on its kind.
- if (!shouldInlineCallKind(Call, Pred, Opts))
+ // FIXME: this checks both static and dynamic properties of the call, which
+ // means we're redoing a bit of work that could be cached in the function
+ // summary.
+ CallInlinePolicy CIP = mayInlineCallKind(Call, Pred, Opts);
+ if (CIP != CIP_Allowed) {
+ if (CIP == CIP_DisallowedAlways) {
+ assert(!MayInline.hasValue() || MayInline.getValue());
+ Engine.FunctionSummaries->markShouldNotInline(D);
+ }
return false;
+ }
- // It is possible that the CFG cannot be constructed.
- // Be safe, and check if the CalleeCFG is valid.
const CFG *CalleeCFG = CalleeADC->getCFG();
- if (!CalleeCFG)
- return false;
// Do not inline if recursive or we've reached max stack frame count.
bool IsRecursive = false;
@@ -705,39 +830,6 @@ bool ExprEngine::shouldInlineCall(const CallEvent &Call, const Decl *D,
|| IsRecursive))
return false;
- // Do not inline if it took too long to inline previously.
- if (Engine.FunctionSummaries->hasReachedMaxBlockCount(D))
- return false;
-
- // Or if the function is too big.
- if (CalleeCFG->getNumBlockIDs() > Opts.getMaxInlinableSize())
- return false;
-
- // Do not inline variadic calls (for now).
- if (Call.isVariadic())
- return false;
-
- // Check our template policy.
- if (getContext().getLangOpts().CPlusPlus) {
- if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
- // Conditionally allow the inlining of template functions.
- if (!Opts.mayInlineTemplateFunctions())
- if (FD->getTemplatedKind() != FunctionDecl::TK_NonTemplate)
- return false;
-
- // Conditionally allow the inlining of C++ standard library functions.
- if (!Opts.mayInlineCXXStandardLibrary())
- if (getContext().getSourceManager().isInSystemHeader(FD->getLocation()))
- if (IsInStdNamespace(FD))
- return false;
- }
- }
-
- // It is possible that the live variables analysis cannot be
- // run. If so, bail out.
- if (!CalleeADC->getAnalysis<RelaxedLiveVariables>())
- return false;
-
// Do not inline large functions too many times.
if ((Engine.FunctionSummaries->getNumTimesInlined(D) >
Opts.getMaxTimesInlineLarge()) &&
@@ -745,17 +837,43 @@ bool ExprEngine::shouldInlineCall(const CallEvent &Call, const Decl *D,
NumReachedInlineCountMax++;
return false;
}
+
+ if (HowToInline == Inline_Minimal &&
+ (CalleeCFG->getNumBlockIDs() > Opts.getAlwaysInlineSize()
+ || IsRecursive))
+ return false;
+
Engine.FunctionSummaries->bumpNumTimesInlined(D);
return true;
}
+static bool isTrivialObjectAssignment(const CallEvent &Call) {
+ const CXXInstanceCall *ICall = dyn_cast<CXXInstanceCall>(&Call);
+ if (!ICall)
+ return false;
+
+ const CXXMethodDecl *MD = dyn_cast_or_null<CXXMethodDecl>(ICall->getDecl());
+ if (!MD)
+ return false;
+ if (!(MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()))
+ return false;
+
+ return MD->isTrivial();
+}
+
void ExprEngine::defaultEvalCall(NodeBuilder &Bldr, ExplodedNode *Pred,
const CallEvent &CallTemplate) {
// Make sure we have the most recent state attached to the call.
ProgramStateRef State = Pred->getState();
CallEventRef<> Call = CallTemplate.cloneWithState(State);
+ // Special-case trivial assignment operators.
+ if (isTrivialObjectAssignment(*Call)) {
+ performTrivialCopy(Bldr, Pred, *Call);
+ return;
+ }
+
// Try to inline the call.
// The origin expression here is just used as a kind of checksum;
// this should still be safe even for CallEvents that don't come from exprs.
diff --git a/lib/StaticAnalyzer/Core/FunctionSummary.cpp b/lib/StaticAnalyzer/Core/FunctionSummary.cpp
index c227aac2b4..c21735b8b8 100644
--- a/lib/StaticAnalyzer/Core/FunctionSummary.cpp
+++ b/lib/StaticAnalyzer/Core/FunctionSummary.cpp
@@ -1,4 +1,4 @@
-//== FunctionSummary.h - Stores summaries of functions. ------------*- C++ -*-//
+//== FunctionSummary.cpp - Stores summaries of functions. ----------*- C++ -*-//
//
// The LLVM Compiler Infrastructure
//
@@ -7,7 +7,7 @@
//
//===----------------------------------------------------------------------===//
//
-// This file defines a summary of a function gathered/used by static analyzes.
+// This file defines a summary of a function gathered/used by static analysis.
//
//===----------------------------------------------------------------------===//
@@ -15,16 +15,10 @@
using namespace clang;
using namespace ento;
-FunctionSummariesTy::~FunctionSummariesTy() {
- for (MapTy::iterator I = Map.begin(), E = Map.end(); I != E; ++I) {
- delete(I->second);
- }
-}
-
unsigned FunctionSummariesTy::getTotalNumBasicBlocks() {
unsigned Total = 0;
for (MapTy::iterator I = Map.begin(), E = Map.end(); I != E; ++I) {
- Total += I->second->TotalBasicBlocks;
+ Total += I->second.TotalBasicBlocks;
}
return Total;
}
@@ -32,7 +26,7 @@ unsigned FunctionSummariesTy::getTotalNumBasicBlocks() {
unsigned FunctionSummariesTy::getTotalNumVisitedBasicBlocks() {
unsigned Total = 0;
for (MapTy::iterator I = Map.begin(), E = Map.end(); I != E; ++I) {
- Total += I->second->VisitedBasicBlocks.count();
+ Total += I->second.VisitedBasicBlocks.count();
}
return Total;
}
diff --git a/lib/StaticAnalyzer/Core/MemRegion.cpp b/lib/StaticAnalyzer/Core/MemRegion.cpp
index b3a1e65b19..42073d4841 100644
--- a/lib/StaticAnalyzer/Core/MemRegion.cpp
+++ b/lib/StaticAnalyzer/Core/MemRegion.cpp
@@ -555,38 +555,75 @@ void StackLocalsSpaceRegion::dumpToStream(raw_ostream &os) const {
}
bool MemRegion::canPrintPretty() const {
+ return canPrintPrettyAsExpr();
+}
+
+bool MemRegion::canPrintPrettyAsExpr() const {
return false;
}
void MemRegion::printPretty(raw_ostream &os) const {
+ assert(canPrintPretty() && "This region cannot be printed pretty.");
+ os << "'";
+ printPrettyAsExpr(os);
+ os << "'";
+ return;
+}
+
+void MemRegion::printPrettyAsExpr(raw_ostream &os) const {
+ llvm_unreachable("This region cannot be printed pretty.");
return;
}
-bool VarRegion::canPrintPretty() const {
+bool VarRegion::canPrintPrettyAsExpr() const {
return true;
}
-void VarRegion::printPretty(raw_ostream &os) const {
+void VarRegion::printPrettyAsExpr(raw_ostream &os) const {
os << getDecl()->getName();
}
-bool ObjCIvarRegion::canPrintPretty() const {
+bool ObjCIvarRegion::canPrintPrettyAsExpr() const {
return true;
}
-void ObjCIvarRegion::printPretty(raw_ostream &os) const {
+void ObjCIvarRegion::printPrettyAsExpr(raw_ostream &os) const {
os << getDecl()->getName();
}
bool FieldRegion::canPrintPretty() const {
- return superRegion->canPrintPretty();
+ return true;
}
-void FieldRegion::printPretty(raw_ostream &os) const {
- superRegion->printPretty(os);
+bool FieldRegion::canPrintPrettyAsExpr() const {
+ return superRegion->canPrintPrettyAsExpr();
+}
+
+void FieldRegion::printPrettyAsExpr(raw_ostream &os) const {
+ assert(canPrintPrettyAsExpr());
+ superRegion->printPrettyAsExpr(os);
os << "." << getDecl()->getName();
}
+void FieldRegion::printPretty(raw_ostream &os) const {
+ if (canPrintPrettyAsExpr()) {
+ os << "\'";
+ printPrettyAsExpr(os);
+ os << "'";
+ } else {
+ os << "field " << "\'" << getDecl()->getName() << "'";
+ }
+ return;
+}
+
+bool CXXBaseObjectRegion::canPrintPrettyAsExpr() const {
+ return superRegion->canPrintPrettyAsExpr();
+}
+
+void CXXBaseObjectRegion::printPrettyAsExpr(raw_ostream &os) const {
+ superRegion->printPrettyAsExpr(os);
+}
+
//===----------------------------------------------------------------------===//
// MemRegionManager methods.
//===----------------------------------------------------------------------===//
@@ -1043,6 +1080,17 @@ const MemRegion *MemRegion::StripCasts(bool StripBaseCasts) const {
}
}
+const SymbolicRegion *MemRegion::getSymbolicBase() const {
+ const SubRegion *SubR = dyn_cast<SubRegion>(this);
+
+ while (SubR) {
+ if (const SymbolicRegion *SymR = dyn_cast<SymbolicRegion>(SubR))
+ return SymR;
+ SubR = dyn_cast<SubRegion>(SubR->getSuperRegion());
+ }
+ return 0;
+}
+
// FIXME: Merge with the implementation of the same method in Store.cpp
static bool IsCompleteType(ASTContext &Ctx, QualType Ty) {
if (const RecordType *RT = Ty->getAs<RecordType>()) {
diff --git a/lib/StaticAnalyzer/Core/PathDiagnostic.cpp b/lib/StaticAnalyzer/Core/PathDiagnostic.cpp
index 686540b60e..03513106ec 100644
--- a/lib/StaticAnalyzer/Core/PathDiagnostic.cpp
+++ b/lib/StaticAnalyzer/Core/PathDiagnostic.cpp
@@ -297,11 +297,16 @@ static Optional<bool> comparePiece(const PathDiagnosticPiece &X,
static Optional<bool> comparePath(const PathPieces &X, const PathPieces &Y) {
if (X.size() != Y.size())
return X.size() < Y.size();
- for (unsigned i = 0, n = X.size(); i != n; ++i) {
- Optional<bool> b = comparePiece(*X[i], *Y[i]);
+
+ PathPieces::const_iterator X_I = X.begin(), X_end = X.end();
+ PathPieces::const_iterator Y_I = Y.begin(), Y_end = Y.end();
+
+ for ( ; X_I != X_end && Y_I != Y_end; ++X_I, ++Y_I) {
+ Optional<bool> b = comparePiece(**X_I, **Y_I);
if (b.hasValue())
return b.getValue();
}
+
return None;
}
@@ -588,6 +593,9 @@ PathDiagnosticLocation
S = SP->getStmt();
if (P.getAs<PostStmtPurgeDeadSymbols>())
return PathDiagnosticLocation::createEnd(S, SMng, P.getLocationContext());
+ } else if (Optional<PostInitializer> PIP = P.getAs<PostInitializer>()) {
+ return PathDiagnosticLocation(PIP->getInitializer()->getSourceLocation(),
+ SMng);
} else if (Optional<PostImplicitCall> PIE = P.getAs<PostImplicitCall>()) {
return PathDiagnosticLocation(PIE->getLocation(), SMng);
} else if (Optional<CallEnter> CE = P.getAs<CallEnter>()) {
@@ -605,31 +613,73 @@ PathDiagnosticLocation
return PathDiagnosticLocation(S, SMng, P.getLocationContext());
}
+const Stmt *PathDiagnosticLocation::getStmt(const ExplodedNode *N) {
+ ProgramPoint P = N->getLocation();
+ if (Optional<StmtPoint> SP = P.getAs<StmtPoint>())
+ return SP->getStmt();
+ if (Optional<BlockEdge> BE = P.getAs<BlockEdge>())
+ return BE->getSrc()->getTerminator();
+ if (Optional<CallEnter> CE = P.getAs<CallEnter>())
+ return CE->getCallExpr();
+ if (Optional<CallExitEnd> CEE = P.getAs<CallExitEnd>())
+ return CEE->getCalleeContext()->getCallSite();
+ if (Optional<PostInitializer> PIPP = P.getAs<PostInitializer>())
+ return PIPP->getInitializer()->getInit();
+
+ return 0;
+}
+
+const Stmt *PathDiagnosticLocation::getNextStmt(const ExplodedNode *N) {
+ for (N = N->getFirstSucc(); N; N = N->getFirstSucc()) {
+ if (const Stmt *S = getStmt(N)) {
+ // Check if the statement is '?' or '&&'/'||'. These are "merges",
+ // not actual statement points.
+ switch (S->getStmtClass()) {
+ case Stmt::ChooseExprClass:
+ case Stmt::BinaryConditionalOperatorClass:
+ case Stmt::ConditionalOperatorClass:
+ continue;
+ case Stmt::BinaryOperatorClass: {
+ BinaryOperatorKind Op = cast<BinaryOperator>(S)->getOpcode();
+ if (Op == BO_LAnd || Op == BO_LOr)
+ continue;
+ break;
+ }
+ default:
+ break;
+ }
+ // We found the statement, so return it.
+ return S;
+ }
+ }
+
+ return 0;
+}
+
PathDiagnosticLocation
- PathDiagnosticLocation::createEndOfPath(const ExplodedNode* N,
+ PathDiagnosticLocation::createEndOfPath(const ExplodedNode *N,
const SourceManager &SM) {
assert(N && "Cannot create a location with a null node.");
+ const Stmt *S = getStmt(N);
- const ExplodedNode *NI = N;
- const Stmt *S = 0;
-
- while (NI) {
- ProgramPoint P = NI->getLocation();
- if (Optional<StmtPoint> PS = P.getAs<StmtPoint>()) {
- S = PS->getStmt();
- if (P.getAs<PostStmtPurgeDeadSymbols>())
- return PathDiagnosticLocation::createEnd(S, SM,
- NI->getLocationContext());
- break;
- } else if (Optional<BlockEdge> BE = P.getAs<BlockEdge>()) {
- S = BE->getSrc()->getTerminator();
- break;
- }
- NI = NI->succ_empty() ? 0 : *(NI->succ_begin());
- }
+ if (!S)
+ S = getNextStmt(N);
if (S) {
- const LocationContext *LC = NI->getLocationContext();
+ ProgramPoint P = N->getLocation();
+ const LocationContext *LC = N->getLocationContext();
+
+ // For member expressions, return the location of the '.' or '->'.
+ if (const MemberExpr *ME = dyn_cast<MemberExpr>(S))
+ return PathDiagnosticLocation::createMemberLoc(ME, SM);
+
+ // For binary operators, return the location of the operator.
+ if (const BinaryOperator *B = dyn_cast<BinaryOperator>(S))
+ return PathDiagnosticLocation::createOperatorLoc(B, SM);
+
+ if (P.getAs<PostStmtPurgeDeadSymbols>())
+ return PathDiagnosticLocation::createEnd(S, SM, LC);
+
if (S->getLocStart().isValid())
return PathDiagnosticLocation(S, SM, LC);
return PathDiagnosticLocation(getValidSourceLocation(S, LC), SM);
diff --git a/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp b/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp
index 7dcc088d18..850955561e 100644
--- a/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp
+++ b/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp
@@ -296,6 +296,8 @@ static void ReportCall(raw_ostream &o,
for (PathPieces::const_iterator I = P.path.begin(), E = P.path.end();I!=E;++I)
ReportPiece(o, **I, FM, SM, LangOpts, indent, depth, true);
+
+ --depth;
IntrusiveRefCntPtr<PathDiagnosticEventPiece> callExit =
P.getCallExitEvent();
diff --git a/lib/StaticAnalyzer/Core/ProgramState.cpp b/lib/StaticAnalyzer/Core/ProgramState.cpp
index 64205f8d99..653b69bf48 100644
--- a/lib/StaticAnalyzer/Core/ProgramState.cpp
+++ b/lib/StaticAnalyzer/Core/ProgramState.cpp
@@ -111,14 +111,6 @@ ProgramStateManager::removeDeadBindings(ProgramStateRef state,
return ConstraintMgr->removeDeadBindings(Result, SymReaper);
}
-ProgramStateRef ProgramState::bindCompoundLiteral(const CompoundLiteralExpr *CL,
- const LocationContext *LC,
- SVal V) const {
- const StoreRef &newStore =
- getStateManager().StoreMgr->bindCompoundLiteral(getStore(), CL, LC, V);
- return makeWithStore(newStore);
-}
-
ProgramStateRef ProgramState::bindLoc(Loc LV, SVal V, bool notifyChanges) const {
ProgramStateManager &Mgr = getStateManager();
ProgramStateRef newState = makeWithStore(Mgr.StoreMgr->Bind(getStore(),
@@ -140,51 +132,103 @@ ProgramStateRef ProgramState::bindDefault(SVal loc, SVal V) const {
new_state;
}
+typedef ArrayRef<const MemRegion *> RegionList;
+typedef ArrayRef<SVal> ValueList;
+
ProgramStateRef
-ProgramState::invalidateRegions(ArrayRef<const MemRegion *> Regions,
+ProgramState::invalidateRegions(RegionList Regions,
const Expr *E, unsigned Count,
const LocationContext *LCtx,
bool CausedByPointerEscape,
InvalidatedSymbols *IS,
- const CallEvent *Call) const {
+ const CallEvent *Call,
+ RegionList ConstRegions) const {
+ SmallVector<SVal, 8> Values;
+ for (RegionList::const_iterator I = Regions.begin(),
+ End = Regions.end(); I != End; ++I)
+ Values.push_back(loc::MemRegionVal(*I));
+
+ SmallVector<SVal, 8> ConstValues;
+ for (RegionList::const_iterator I = ConstRegions.begin(),
+ End = ConstRegions.end(); I != End; ++I)
+ ConstValues.push_back(loc::MemRegionVal(*I));
+
if (!IS) {
InvalidatedSymbols invalidated;
- return invalidateRegionsImpl(Regions, E, Count, LCtx,
+ return invalidateRegionsImpl(Values, E, Count, LCtx,
CausedByPointerEscape,
- invalidated, Call);
+ invalidated, Call, ConstValues);
}
- return invalidateRegionsImpl(Regions, E, Count, LCtx, CausedByPointerEscape,
- *IS, Call);
+ return invalidateRegionsImpl(Values, E, Count, LCtx, CausedByPointerEscape,
+ *IS, Call, ConstValues);
}
-ProgramStateRef
-ProgramState::invalidateRegionsImpl(ArrayRef<const MemRegion *> Regions,
+ProgramStateRef
+ProgramState::invalidateRegions(ValueList Values,
+ const Expr *E, unsigned Count,
+ const LocationContext *LCtx,
+ bool CausedByPointerEscape,
+ InvalidatedSymbols *IS,
+ const CallEvent *Call,
+ ValueList ConstValues) const {
+ if (!IS) {
+ InvalidatedSymbols invalidated;
+ return invalidateRegionsImpl(Values, E, Count, LCtx,
+ CausedByPointerEscape,
+ invalidated, Call, ConstValues);
+ }
+ return invalidateRegionsImpl(Values, E, Count, LCtx, CausedByPointerEscape,
+ *IS, Call, ConstValues);
+}
+
+ProgramStateRef
+ProgramState::invalidateRegionsImpl(ValueList Values,
const Expr *E, unsigned Count,
const LocationContext *LCtx,
bool CausedByPointerEscape,
InvalidatedSymbols &IS,
- const CallEvent *Call) const {
+ const CallEvent *Call,
+ ValueList ConstValues) const {
ProgramStateManager &Mgr = getStateManager();
SubEngine* Eng = Mgr.getOwningEngine();
-
+ InvalidatedSymbols ConstIS;
+
if (Eng) {
+ StoreManager::InvalidatedRegions TopLevelInvalidated;
+ StoreManager::InvalidatedRegions TopLevelConstInvalidated;
StoreManager::InvalidatedRegions Invalidated;
const StoreRef &newStore
- = Mgr.StoreMgr->invalidateRegions(getStore(), Regions, E, Count, LCtx, IS,
- Call, &Invalidated);
+ = Mgr.StoreMgr->invalidateRegions(getStore(), Values, ConstValues,
+ E, Count, LCtx, Call,
+ IS, ConstIS,
+ &TopLevelInvalidated,
+ &TopLevelConstInvalidated,
+ &Invalidated);
ProgramStateRef newState = makeWithStore(newStore);
- if (CausedByPointerEscape)
- newState = Eng->processPointerEscapedOnInvalidateRegions(newState,
- &IS, Regions, Invalidated, Call);
+ if (CausedByPointerEscape) {
+ newState = Eng->notifyCheckersOfPointerEscape(newState, &IS,
+ TopLevelInvalidated,
+ Invalidated, Call);
+ if (!ConstValues.empty()) {
+ StoreManager::InvalidatedRegions Empty;
+ newState = Eng->notifyCheckersOfPointerEscape(newState, &ConstIS,
+ TopLevelConstInvalidated,
+ Empty, Call,
+ true);
+ }
+ }
- return Eng->processRegionChanges(newState, &IS, Regions, Invalidated, Call);
+ return Eng->processRegionChanges(newState, &IS,
+ TopLevelInvalidated, Invalidated,
+ Call);
}
const StoreRef &newStore =
- Mgr.StoreMgr->invalidateRegions(getStore(), Regions, E, Count, LCtx, IS,
- Call, NULL);
+ Mgr.StoreMgr->invalidateRegions(getStore(), Values, ConstValues,
+ E, Count, LCtx, Call,
+ IS, ConstIS, NULL, NULL, NULL);
return makeWithStore(newStore);
}
@@ -218,7 +262,7 @@ SVal ProgramState::getSValAsScalarOrLoc(const MemRegion *R) const {
if (const TypedValueRegion *TR = dyn_cast<TypedValueRegion>(R)) {
QualType T = TR->getValueType();
- if (Loc::isLocType(T) || T->isIntegerType())
+ if (Loc::isLocType(T) || T->isIntegralOrEnumerationType())
return getSVal(R);
}
@@ -328,9 +372,13 @@ ConditionTruthVal ProgramState::isNull(SVal V) const {
if (V.isZeroConstant())
return true;
- SymbolRef Sym = V.getAsSymbol();
- if (!Sym)
+ if (V.isConstant())
return false;
+
+ SymbolRef Sym = V.getAsSymbol(/* IncludeBaseRegion */ true);
+ if (!Sym)
+ return ConditionTruthVal();
+
return getStateManager().ConstraintMgr->isNull(this, Sym);
}
diff --git a/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp b/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp
index d397e47224..3606e099ce 100644
--- a/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp
+++ b/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp
@@ -153,8 +153,8 @@ private:
// The function returns false if the described range is entirely outside
// the range of values for the associated symbol.
APSIntType Type(getMinValue());
- APSIntType::RangeTestResultKind LowerTest = Type.testInRange(Lower);
- APSIntType::RangeTestResultKind UpperTest = Type.testInRange(Upper);
+ APSIntType::RangeTestResultKind LowerTest = Type.testInRange(Lower, true);
+ APSIntType::RangeTestResultKind UpperTest = Type.testInRange(Upper, true);
switch (LowerTest) {
case APSIntType::RTR_Below:
@@ -285,8 +285,8 @@ namespace {
class RangeConstraintManager : public SimpleConstraintManager{
RangeSet GetRange(ProgramStateRef state, SymbolRef sym);
public:
- RangeConstraintManager(SubEngine *subengine, BasicValueFactory &BVF)
- : SimpleConstraintManager(subengine, BVF) {}
+ RangeConstraintManager(SubEngine *subengine, SValBuilder &SVB)
+ : SimpleConstraintManager(subengine, SVB) {}
ProgramStateRef assumeSymNE(ProgramStateRef state, SymbolRef sym,
const llvm::APSInt& Int,
@@ -328,7 +328,7 @@ private:
ConstraintManager *
ento::CreateRangeConstraintManager(ProgramStateManager &StMgr, SubEngine *Eng) {
- return new RangeConstraintManager(Eng, StMgr.getBasicVals());
+ return new RangeConstraintManager(Eng, StMgr.getSValBuilder());
}
const llvm::APSInt* RangeConstraintManager::getSymVal(ProgramStateRef St,
@@ -419,7 +419,7 @@ RangeConstraintManager::assumeSymNE(ProgramStateRef St, SymbolRef Sym,
const llvm::APSInt &Adjustment) {
// Before we do any real work, see if the value can even show up.
APSIntType AdjustmentType(Adjustment);
- if (AdjustmentType.testInRange(Int) != APSIntType::RTR_Within)
+ if (AdjustmentType.testInRange(Int, true) != APSIntType::RTR_Within)
return St;
llvm::APSInt Lower = AdjustmentType.convert(Int) - Adjustment;
@@ -439,7 +439,7 @@ RangeConstraintManager::assumeSymEQ(ProgramStateRef St, SymbolRef Sym,
const llvm::APSInt &Adjustment) {
// Before we do any real work, see if the value can even show up.
APSIntType AdjustmentType(Adjustment);
- if (AdjustmentType.testInRange(Int) != APSIntType::RTR_Within)
+ if (AdjustmentType.testInRange(Int, true) != APSIntType::RTR_Within)
return NULL;
// [Int-Adjustment, Int-Adjustment]
@@ -454,7 +454,7 @@ RangeConstraintManager::assumeSymLT(ProgramStateRef St, SymbolRef Sym,
const llvm::APSInt &Adjustment) {
// Before we do any real work, see if the value can even show up.
APSIntType AdjustmentType(Adjustment);
- switch (AdjustmentType.testInRange(Int)) {
+ switch (AdjustmentType.testInRange(Int, true)) {
case APSIntType::RTR_Below:
return NULL;
case APSIntType::RTR_Within:
@@ -483,7 +483,7 @@ RangeConstraintManager::assumeSymGT(ProgramStateRef St, SymbolRef Sym,
const llvm::APSInt &Adjustment) {
// Before we do any real work, see if the value can even show up.
APSIntType AdjustmentType(Adjustment);
- switch (AdjustmentType.testInRange(Int)) {
+ switch (AdjustmentType.testInRange(Int, true)) {
case APSIntType::RTR_Below:
return St;
case APSIntType::RTR_Within:
@@ -512,7 +512,7 @@ RangeConstraintManager::assumeSymGE(ProgramStateRef St, SymbolRef Sym,
const llvm::APSInt &Adjustment) {
// Before we do any real work, see if the value can even show up.
APSIntType AdjustmentType(Adjustment);
- switch (AdjustmentType.testInRange(Int)) {
+ switch (AdjustmentType.testInRange(Int, true)) {
case APSIntType::RTR_Below:
return St;
case APSIntType::RTR_Within:
@@ -541,7 +541,7 @@ RangeConstraintManager::assumeSymLE(ProgramStateRef St, SymbolRef Sym,
const llvm::APSInt &Adjustment) {
// Before we do any real work, see if the value can even show up.
APSIntType AdjustmentType(Adjustment);
- switch (AdjustmentType.testInRange(Int)) {
+ switch (AdjustmentType.testInRange(Int, true)) {
case APSIntType::RTR_Below:
return NULL;
case APSIntType::RTR_Within:
diff --git a/lib/StaticAnalyzer/Core/RegionStore.cpp b/lib/StaticAnalyzer/Core/RegionStore.cpp
index 82db23dd6b..88c4eee4bb 100644
--- a/lib/StaticAnalyzer/Core/RegionStore.cpp
+++ b/lib/StaticAnalyzer/Core/RegionStore.cpp
@@ -19,10 +19,12 @@
#include "clang/Analysis/Analyses/LiveVariables.h"
#include "clang/Analysis/AnalysisContext.h"
#include "clang/Basic/TargetInfo.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SubEngine.h"
#include "llvm/ADT/ImmutableList.h"
#include "llvm/ADT/ImmutableMap.h"
#include "llvm/ADT/Optional.h"
@@ -318,10 +320,12 @@ public:
//===----------------------------------------------------------------------===//
namespace {
+class invalidateRegionsWorker;
class RegionStoreManager : public StoreManager {
public:
const RegionStoreFeatures Features;
+
RegionBindings::Factory RBFactory;
mutable ClusterBindings::Factory CBFactory;
@@ -331,10 +335,34 @@ private:
SValListTy> LazyBindingsMapTy;
LazyBindingsMapTy LazyBindingsMap;
+ /// The largest number of fields a struct can have and still be
+ /// considered "small".
+ ///
+ /// This is currently used to decide whether or not it is worth "forcing" a
+ /// LazyCompoundVal on bind.
+ ///
+ /// This is controlled by 'region-store-small-struct-limit' option.
+ /// To disable all small-struct-dependent behavior, set the option to "0".
+ unsigned SmallStructLimit;
+
+ /// \brief A helper used to populate the work list with the given set of
+ /// regions.
+ void populateWorkList(invalidateRegionsWorker &W,
+ ArrayRef<SVal> Values,
+ bool IsArrayOfConstRegions,
+ InvalidatedRegions *TopLevelRegions);
+
public:
RegionStoreManager(ProgramStateManager& mgr, const RegionStoreFeatures &f)
: StoreManager(mgr), Features(f),
- RBFactory(mgr.getAllocator()), CBFactory(mgr.getAllocator()) {}
+ RBFactory(mgr.getAllocator()), CBFactory(mgr.getAllocator()),
+ SmallStructLimit(0) {
+ if (SubEngine *Eng = StateMgr.getOwningEngine()) {
+ AnalyzerOptions &Options = Eng->getAnalysisManager().options;
+ SmallStructLimit =
+ Options.getOptionAsInteger("region-store-small-struct-limit", 2);
+ }
+ }
/// setImplicitDefaultValue - Set the default binding for the provided
@@ -365,12 +393,17 @@ public:
RegionBindingsRef B,
InvalidatedRegions *Invalidated);
- StoreRef invalidateRegions(Store store, ArrayRef<const MemRegion *> Regions,
+ StoreRef invalidateRegions(Store store,
+ ArrayRef<SVal> Values,
+ ArrayRef<SVal> ConstValues,
const Expr *E, unsigned Count,
const LocationContext *LCtx,
- InvalidatedSymbols &IS,
const CallEvent *Call,
- InvalidatedRegions *Invalidated);
+ InvalidatedSymbols &IS,
+ InvalidatedSymbols &ConstIS,
+ InvalidatedRegions *Invalidated,
+ InvalidatedRegions *InvalidatedTopLevel,
+ InvalidatedRegions *InvalidatedTopLevelConst);
bool scanReachableSymbols(Store S, const MemRegion *R,
ScanReachableSymbols &Callbacks);
@@ -396,19 +429,20 @@ public: // Part of public interface to class.
.getRootWithoutRetain(), *this);
}
- /// \brief Create a new store that binds a value to a compound literal.
- ///
- /// \param ST The original store whose bindings are the basis for the new
- /// store.
+ /// Attempt to extract the fields of \p LCV and bind them to the struct region
+ /// \p R.
///
- /// \param CL The compound literal to bind (the binding key).
+ /// This path is used when it seems advantageous to "force" loading the values
+ /// within a LazyCompoundVal to bind memberwise to the struct region, rather
+ /// than using a Default binding at the base of the entire region. This is a
+ /// heuristic attempting to avoid building long chains of LazyCompoundVals.
///
- /// \param LC The LocationContext for the binding.
- ///
- /// \param V The value to bind to the compound literal.
- StoreRef bindCompoundLiteral(Store ST,
- const CompoundLiteralExpr *CL,
- const LocationContext *LC, SVal V);
+ /// \returns The updated store bindings, or \c None if binding non-lazily
+ /// would be too expensive.
+ Optional<RegionBindingsRef> tryBindSmallStruct(RegionBindingsConstRef B,
+ const TypedValueRegion *R,
+ const RecordDecl *RD,
+ nonloc::LazyCompoundVal LCV);
/// BindStruct - Bind a compound value to a structure.
RegionBindingsRef bindStruct(RegionBindingsConstRef B,
@@ -477,8 +511,7 @@ public: // Part of public interface to class.
SVal getBindingForFieldOrElementCommon(RegionBindingsConstRef B,
const TypedValueRegion *R,
- QualType Ty,
- const MemRegion *superR);
+ QualType Ty);
SVal getLazyBinding(const SubRegion *LazyBindingRegion,
RegionBindingsRef LazyBinding);
@@ -591,11 +624,23 @@ ento::CreateFieldsOnlyRegionStoreManager(ProgramStateManager &StMgr) {
//===----------------------------------------------------------------------===//
namespace {
+/// Used to determine which global regions are automatically included in the
+/// initial worklist of a ClusterAnalysis.
+enum GlobalsFilterKind {
+ /// Don't include any global regions.
+ GFK_None,
+ /// Only include system globals.
+ GFK_SystemOnly,
+ /// Include all global regions.
+ GFK_All
+};
+
template <typename DERIVED>
class ClusterAnalysis {
protected:
typedef llvm::DenseMap<const MemRegion *, const ClusterBindings *> ClusterMap;
- typedef SmallVector<const MemRegion *, 10> WorkList;
+ typedef llvm::PointerIntPair<const MemRegion *, 1, bool> WorkListElement;
+ typedef SmallVector<WorkListElement, 10> WorkList;
llvm::SmallPtrSet<const ClusterBindings *, 16> Visited;
@@ -606,19 +651,36 @@ protected:
SValBuilder &svalBuilder;
RegionBindingsRef B;
-
- const bool includeGlobals;
+private:
+ GlobalsFilterKind GlobalsFilter;
+
+protected:
const ClusterBindings *getCluster(const MemRegion *R) {
return B.lookup(R);
}
+ /// Returns true if the memory space of the given region is one of the global
+ /// regions specially included at the start of analysis.
+ bool isInitiallyIncludedGlobalRegion(const MemRegion *R) {
+ switch (GlobalsFilter) {
+ case GFK_None:
+ return false;
+ case GFK_SystemOnly:
+ return isa<GlobalSystemSpaceRegion>(R->getMemorySpace());
+ case GFK_All:
+ return isa<NonStaticGlobalSpaceRegion>(R->getMemorySpace());
+ }
+
+ llvm_unreachable("unknown globals filter");
+ }
+
public:
ClusterAnalysis(RegionStoreManager &rm, ProgramStateManager &StateMgr,
- RegionBindingsRef b, const bool includeGlobals)
+ RegionBindingsRef b, GlobalsFilterKind GFK)
: RM(rm), Ctx(StateMgr.getContext()),
svalBuilder(StateMgr.getSValBuilder()),
- B(b), includeGlobals(includeGlobals) {}
+ B(b), GlobalsFilter(GFK) {}
RegionBindingsRef getRegionBindings() const { return B; }
@@ -636,41 +698,41 @@ public:
assert(!Cluster.isEmpty() && "Empty clusters should be removed");
static_cast<DERIVED*>(this)->VisitAddedToCluster(Base, Cluster);
- if (includeGlobals)
- if (isa<NonStaticGlobalSpaceRegion>(Base->getMemorySpace()))
- AddToWorkList(Base, &Cluster);
+ // If this is an interesting global region, add it the work list up front.
+ if (isInitiallyIncludedGlobalRegion(Base))
+ AddToWorkList(WorkListElement(Base), &Cluster);
}
}
- bool AddToWorkList(const MemRegion *R, const ClusterBindings *C) {
+ bool AddToWorkList(WorkListElement E, const ClusterBindings *C) {
if (C && !Visited.insert(C))
return false;
- WL.push_back(R);
+ WL.push_back(E);
return true;
}
- bool AddToWorkList(const MemRegion *R) {
- const MemRegion *baseR = R->getBaseRegion();
- return AddToWorkList(baseR, getCluster(baseR));
+ bool AddToWorkList(const MemRegion *R, bool Flag = false) {
+ const MemRegion *BaseR = R->getBaseRegion();
+ return AddToWorkList(WorkListElement(BaseR, Flag), getCluster(BaseR));
}
void RunWorkList() {
while (!WL.empty()) {
- const MemRegion *baseR = WL.pop_back_val();
+ WorkListElement E = WL.pop_back_val();
+ const MemRegion *BaseR = E.getPointer();
- // First visit the cluster.
- if (const ClusterBindings *Cluster = getCluster(baseR))
- static_cast<DERIVED*>(this)->VisitCluster(baseR, *Cluster);
-
- // Next, visit the base region.
- static_cast<DERIVED*>(this)->VisitBaseRegion(baseR);
+ static_cast<DERIVED*>(this)->VisitCluster(BaseR, getCluster(BaseR),
+ E.getInt());
}
}
-public:
void VisitAddedToCluster(const MemRegion *baseR, const ClusterBindings &C) {}
- void VisitCluster(const MemRegion *baseR, const ClusterBindings &C) {}
- void VisitBaseRegion(const MemRegion *baseR) {}
+ void VisitCluster(const MemRegion *baseR, const ClusterBindings *C) {}
+
+ void VisitCluster(const MemRegion *BaseR, const ClusterBindings *C,
+ bool Flag) {
+ static_cast<DERIVED*>(this)->VisitCluster(BaseR, C);
+ }
};
}
@@ -831,14 +893,22 @@ RegionStoreManager::removeSubRegionBindings(RegionBindingsConstRef B,
const SubRegion *Top) {
BindingKey TopKey = BindingKey::Make(Top, BindingKey::Default);
const MemRegion *ClusterHead = TopKey.getBaseRegion();
+
if (Top == ClusterHead) {
// We can remove an entire cluster's bindings all in one go.
return B.remove(Top);
}
const ClusterBindings *Cluster = B.lookup(ClusterHead);
- if (!Cluster)
+ if (!Cluster) {
+ // If we're invalidating a region with a symbolic offset, we need to make
+ // sure we don't treat the base region as uninitialized anymore.
+ if (TopKey.hasSymbolicOffset()) {
+ const SubRegion *Concrete = TopKey.getConcreteOffsetRegion();
+ return B.addBinding(Concrete, BindingKey::Default, UnknownVal());
+ }
return B;
+ }
SmallVector<BindingPair, 32> Bindings;
collectSubRegionBindings(Bindings, svalBuilder, *Cluster, Top, TopKey,
@@ -852,7 +922,8 @@ RegionStoreManager::removeSubRegionBindings(RegionBindingsConstRef B,
// If we're invalidating a region with a symbolic offset, we need to make sure
// we don't treat the base region as uninitialized anymore.
- // FIXME: This isn't very precise; see the example in the loop.
+ // FIXME: This isn't very precise; see the example in
+ // collectSubRegionBindings.
if (TopKey.hasSymbolicOffset()) {
const SubRegion *Concrete = TopKey.getConcreteOffsetRegion();
Result = Result.add(BindingKey::Make(Concrete, BindingKey::Default),
@@ -871,6 +942,7 @@ class invalidateRegionsWorker : public ClusterAnalysis<invalidateRegionsWorker>
unsigned Count;
const LocationContext *LCtx;
InvalidatedSymbols &IS;
+ InvalidatedSymbols &ConstIS;
StoreManager::InvalidatedRegions *Regions;
public:
invalidateRegionsWorker(RegionStoreManager &rm,
@@ -879,15 +951,16 @@ public:
const Expr *ex, unsigned count,
const LocationContext *lctx,
InvalidatedSymbols &is,
+ InvalidatedSymbols &inConstIS,
StoreManager::InvalidatedRegions *r,
- bool includeGlobals)
- : ClusterAnalysis<invalidateRegionsWorker>(rm, stateMgr, b, includeGlobals),
- Ex(ex), Count(count), LCtx(lctx), IS(is), Regions(r) {}
-
- void VisitCluster(const MemRegion *baseR, const ClusterBindings &C);
- void VisitBaseRegion(const MemRegion *baseR);
-
-private:
+ GlobalsFilterKind GFK)
+ : ClusterAnalysis<invalidateRegionsWorker>(rm, stateMgr, b, GFK),
+ Ex(ex), Count(count), LCtx(lctx), IS(is), ConstIS(inConstIS), Regions(r){}
+
+ /// \param IsConst Specifies if the region we are invalidating is constant.
+ /// If it is, we invalidate all subregions, but not the base region itself.
+ void VisitCluster(const MemRegion *baseR, const ClusterBindings *C,
+ bool IsConst);
void VisitBinding(SVal V);
};
}
@@ -917,18 +990,17 @@ void invalidateRegionsWorker::VisitBinding(SVal V) {
}
}
-void invalidateRegionsWorker::VisitCluster(const MemRegion *BaseR,
- const ClusterBindings &C) {
- for (ClusterBindings::iterator I = C.begin(), E = C.end(); I != E; ++I)
- VisitBinding(I.getData());
-
- B = B.remove(BaseR);
-}
+void invalidateRegionsWorker::VisitCluster(const MemRegion *baseR,
+ const ClusterBindings *C,
+ bool IsConst) {
+ if (C) {
+ for (ClusterBindings::iterator I = C->begin(), E = C->end(); I != E; ++I)
+ VisitBinding(I.getData());
-void invalidateRegionsWorker::VisitBaseRegion(const MemRegion *baseR) {
- // Symbolic region? Mark that symbol touched by the invalidation.
- if (const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(baseR))
- IS.insert(SR->getSymbol());
+ // Invalidate the contents of a non-const base region.
+ if (!IsConst)
+ B = B.remove(baseR);
+ }
// BlockDataRegion? If so, invalidate captured variables that are passed
// by reference.
@@ -957,6 +1029,21 @@ void invalidateRegionsWorker::VisitBaseRegion(const MemRegion *baseR) {
return;
}
+ // Symbolic region?
+ if (const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(baseR)) {
+ SymbolRef RegionSym = SR->getSymbol();
+
+ // Mark that symbol touched by the invalidation.
+ if (IsConst)
+ ConstIS.insert(RegionSym);
+ else
+ IS.insert(RegionSym);
+ }
+
+ // Nothing else should be done for a const region.
+ if (IsConst)
+ return;
+
// Otherwise, we have a normal data region. Record that we touched the region.
if (Regions)
Regions->push_back(baseR);
@@ -976,7 +1063,13 @@ void invalidateRegionsWorker::VisitBaseRegion(const MemRegion *baseR) {
const TypedValueRegion *TR = cast<TypedValueRegion>(baseR);
QualType T = TR->getValueType();
- // Invalidate the binding.
+ if (isInitiallyIncludedGlobalRegion(baseR)) {
+ // If the region is a global and we are invalidating all globals,
+ // erasing the entry is good enough. This causes all globals to be lazily
+ // symbolicated from the same base symbol.
+ return;
+ }
+
if (T->isStructureOrClassType()) {
// Invalidate the region by setting its default value to
// conjured symbol. The type of the symbol is irrelavant.
@@ -994,16 +1087,6 @@ void invalidateRegionsWorker::VisitBaseRegion(const MemRegion *baseR) {
B = B.addBinding(baseR, BindingKey::Default, V);
return;
}
-
- if (includeGlobals &&
- isa<NonStaticGlobalSpaceRegion>(baseR->getMemorySpace())) {
- // If the region is a global and we are invalidating all globals,
- // just erase the entry. This causes all globals to be lazily
- // symbolicated from the same base symbol.
- B = B.removeBinding(baseR);
- return;
- }
-
DefinedOrUnknownSVal V = svalBuilder.conjureSymbolVal(baseR, Ex, LCtx,
T,Count);
@@ -1036,45 +1119,92 @@ RegionStoreManager::invalidateGlobalRegion(MemRegion::Kind K,
return B;
}
+void RegionStoreManager::populateWorkList(invalidateRegionsWorker &W,
+ ArrayRef<SVal> Values,
+ bool IsArrayOfConstRegions,
+ InvalidatedRegions *TopLevelRegions) {
+ for (ArrayRef<SVal>::iterator I = Values.begin(),
+ E = Values.end(); I != E; ++I) {
+ SVal V = *I;
+ if (Optional<nonloc::LazyCompoundVal> LCS =
+ V.getAs<nonloc::LazyCompoundVal>()) {
+
+ const SValListTy &Vals = getInterestingValues(*LCS);
+
+ for (SValListTy::const_iterator I = Vals.begin(),
+ E = Vals.end(); I != E; ++I) {
+ // Note: the last argument is false here because these are
+ // non-top-level regions.
+ if (const MemRegion *R = (*I).getAsRegion())
+ W.AddToWorkList(R, /*IsConst=*/ false);
+ }
+ continue;
+ }
+
+ if (const MemRegion *R = V.getAsRegion()) {
+ if (TopLevelRegions)
+ TopLevelRegions->push_back(R);
+ W.AddToWorkList(R, /*IsConst=*/ IsArrayOfConstRegions);
+ continue;
+ }
+ }
+}
+
StoreRef
RegionStoreManager::invalidateRegions(Store store,
- ArrayRef<const MemRegion *> Regions,
+ ArrayRef<SVal> Values,
+ ArrayRef<SVal> ConstValues,
const Expr *Ex, unsigned Count,
const LocationContext *LCtx,
- InvalidatedSymbols &IS,
const CallEvent *Call,
+ InvalidatedSymbols &IS,
+ InvalidatedSymbols &ConstIS,
+ InvalidatedRegions *TopLevelRegions,
+ InvalidatedRegions *TopLevelConstRegions,
InvalidatedRegions *Invalidated) {
- invalidateRegionsWorker W(*this, StateMgr,
- RegionStoreManager::getRegionBindings(store),
- Ex, Count, LCtx, IS, Invalidated, false);
+ GlobalsFilterKind GlobalsFilter;
+ if (Call) {
+ if (Call->isInSystemHeader())
+ GlobalsFilter = GFK_SystemOnly;
+ else
+ GlobalsFilter = GFK_All;
+ } else {
+ GlobalsFilter = GFK_None;
+ }
+
+ RegionBindingsRef B = getRegionBindings(store);
+ invalidateRegionsWorker W(*this, StateMgr, B, Ex, Count, LCtx, IS, ConstIS,
+ Invalidated, GlobalsFilter);
// Scan the bindings and generate the clusters.
W.GenerateClusters();
// Add the regions to the worklist.
- for (ArrayRef<const MemRegion *>::iterator
- I = Regions.begin(), E = Regions.end(); I != E; ++I)
- W.AddToWorkList(*I);
+ populateWorkList(W, Values, /*IsArrayOfConstRegions*/ false,
+ TopLevelRegions);
+ populateWorkList(W, ConstValues, /*IsArrayOfConstRegions*/ true,
+ TopLevelConstRegions);
W.RunWorkList();
// Return the new bindings.
- RegionBindingsRef B = W.getRegionBindings();
+ B = W.getRegionBindings();
- // For all globals which are not static nor immutable: determine which global
- // regions should be invalidated and invalidate them.
+ // For calls, determine which global regions should be invalidated and
+ // invalidate them. (Note that function-static and immutable globals are never
+ // invalidated by this.)
// TODO: This could possibly be more precise with modules.
- //
- // System calls invalidate only system globals.
- if (Call && Call->isInSystemHeader()) {
- B = invalidateGlobalRegion(MemRegion::GlobalSystemSpaceRegionKind,
+ switch (GlobalsFilter) {
+ case GFK_All:
+ B = invalidateGlobalRegion(MemRegion::GlobalInternalSpaceRegionKind,
Ex, Count, LCtx, B, Invalidated);
- // Internal calls might invalidate both system and internal globals.
- } else {
+ // FALLTHROUGH
+ case GFK_SystemOnly:
B = invalidateGlobalRegion(MemRegion::GlobalSystemSpaceRegionKind,
Ex, Count, LCtx, B, Invalidated);
- B = invalidateGlobalRegion(MemRegion::GlobalInternalSpaceRegionKind,
- Ex, Count, LCtx, B, Invalidated);
+ // FALLTHROUGH
+ case GFK_None:
+ break;
}
return StoreRef(B.asStore(), *this);
@@ -1265,6 +1395,17 @@ SVal RegionStoreManager::getBinding(RegionBindingsConstRef B, Loc L, QualType T)
return svalBuilder.getRegionValueSymbolVal(R);
}
+static QualType getUnderlyingType(const SubRegion *R) {
+ QualType RegionTy;
+ if (const TypedValueRegion *TVR = dyn_cast<TypedValueRegion>(R))
+ RegionTy = TVR->getValueType();
+
+ if (const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(R))
+ RegionTy = SR->getSymbol()->getType();
+
+ return RegionTy;
+}
+
/// Checks to see if store \p B has a lazy binding for region \p R.
///
/// If \p AllowSubregionBindings is \c false, a lazy binding will be rejected
@@ -1283,11 +1424,11 @@ getExistingLazyBinding(SValBuilder &SVB, RegionBindingsConstRef B,
if (!LCV)
return None;
- // If the LCV is for a subregion, the types won't match, and we shouldn't
- // reuse the binding. Unfortuately we can only check this if the destination
- // region is a TypedValueRegion.
- if (const TypedValueRegion *TVR = dyn_cast<TypedValueRegion>(R)) {
- QualType RegionTy = TVR->getValueType();
+ // If the LCV is for a subregion, the types might not match, and we shouldn't
+ // reuse the binding.
+ QualType RegionTy = getUnderlyingType(R);
+ if (!RegionTy.isNull() &&
+ !RegionTy->isVoidPointerType()) {
QualType SourceRegionTy = LCV->getRegion()->getValueType();
if (!SVB.getContext().hasSameUnqualifiedType(RegionTy, SourceRegionTy))
return None;
@@ -1424,8 +1565,7 @@ SVal RegionStoreManager::getBindingForElement(RegionBindingsConstRef B,
}
}
}
- return getBindingForFieldOrElementCommon(B, R, R->getElementType(),
- superR);
+ return getBindingForFieldOrElementCommon(B, R, R->getElementType());
}
SVal RegionStoreManager::getBindingForField(RegionBindingsConstRef B,
@@ -1436,7 +1576,7 @@ SVal RegionStoreManager::getBindingForField(RegionBindingsConstRef B,
return *V;
QualType Ty = R->getValueType();
- return getBindingForFieldOrElementCommon(B, R, Ty, R->getSuperRegion());
+ return getBindingForFieldOrElementCommon(B, R, Ty);
}
Optional<SVal>
@@ -1499,8 +1639,7 @@ SVal RegionStoreManager::getLazyBinding(const SubRegion *LazyBindingRegion,
SVal
RegionStoreManager::getBindingForFieldOrElementCommon(RegionBindingsConstRef B,
const TypedValueRegion *R,
- QualType Ty,
- const MemRegion *superR) {
+ QualType Ty) {
// At this point we have already checked in either getBindingForElement or
// getBindingForField if 'R' has a direct binding.
@@ -1533,8 +1672,9 @@ RegionStoreManager::getBindingForFieldOrElementCommon(RegionBindingsConstRef B,
// quickly result in a warning.
bool hasPartialLazyBinding = false;
- const SubRegion *Base = dyn_cast<SubRegion>(superR);
- while (Base) {
+ const SubRegion *SR = dyn_cast<SubRegion>(R);
+ while (SR) {
+ const MemRegion *Base = SR->getSuperRegion();
if (Optional<SVal> D = getBindingForDerivedDefaultValue(B, Base, R, Ty)) {
if (D->getAs<nonloc::LazyCompoundVal>()) {
hasPartialLazyBinding = true;
@@ -1552,7 +1692,7 @@ RegionStoreManager::getBindingForFieldOrElementCommon(RegionBindingsConstRef B,
// If our super region is a field or element itself, walk up the region
// hierarchy to see if there is a default value installed in an ancestor.
- Base = dyn_cast<SubRegion>(Base->getSuperRegion());
+ SR = dyn_cast<SubRegion>(Base);
}
if (R->hasStackNonParametersStorage()) {
@@ -1560,7 +1700,7 @@ RegionStoreManager::getBindingForFieldOrElementCommon(RegionBindingsConstRef B,
// Currently we don't reason specially about Clang-style vectors. Check
// if superR is a vector and if so return Unknown.
if (const TypedValueRegion *typedSuperR =
- dyn_cast<TypedValueRegion>(superR)) {
+ dyn_cast<TypedValueRegion>(R->getSuperRegion())) {
if (typedSuperR->getValueType()->isVectorType())
return UnknownVal();
}
@@ -1601,26 +1741,6 @@ SVal RegionStoreManager::getBindingForObjCIvar(RegionBindingsConstRef B,
return getBindingForLazySymbol(R);
}
-static Optional<SVal> getConstValue(SValBuilder &SVB, const VarDecl *VD) {
- ASTContext &Ctx = SVB.getContext();
- if (!VD->getType().isConstQualified())
- return None;
-
- const Expr *Init = VD->getInit();
- if (!Init)
- return None;
-
- llvm::APSInt Result;
- if (!Init->isGLValue() && Init->EvaluateAsInt(Result, Ctx))
- return SVB.makeIntVal(Result);
-
- if (Init->isNullPointerConstant(Ctx, Expr::NPC_ValueDependentIsNotNull))
- return SVB.makeNull();
-
- // FIXME: Handle other possible constant expressions.
- return None;
-}
-
SVal RegionStoreManager::getBindingForVar(RegionBindingsConstRef B,
const VarRegion *R) {
@@ -1637,8 +1757,10 @@ SVal RegionStoreManager::getBindingForVar(RegionBindingsConstRef B,
return svalBuilder.getRegionValueSymbolVal(R);
// Is 'VD' declared constant? If so, retrieve the constant value.
- if (Optional<SVal> V = getConstValue(svalBuilder, VD))
- return *V;
+ if (VD->getType().isConstQualified())
+ if (const Expr *Init = VD->getInit())
+ if (Optional<SVal> V = svalBuilder.getConstantVal(Init))
+ return *V;
// This must come after the check for constants because closure-captured
// constant variables may appear in UnknownSpaceRegion.
@@ -1810,14 +1932,6 @@ RegionStoreManager::bind(RegionBindingsConstRef B, Loc L, SVal V) {
return NewB.addBinding(BindingKey::Make(R, BindingKey::Direct), V);
}
-// FIXME: this method should be merged into Bind().
-StoreRef RegionStoreManager::bindCompoundLiteral(Store ST,
- const CompoundLiteralExpr *CL,
- const LocationContext *LC,
- SVal V) {
- return Bind(ST, loc::MemRegionVal(MRMgr.getCompoundLiteralRegion(CL, LC)), V);
-}
-
RegionBindingsRef
RegionStoreManager::setImplicitDefaultValue(RegionBindingsConstRef B,
const MemRegion *R,
@@ -1826,7 +1940,7 @@ RegionStoreManager::setImplicitDefaultValue(RegionBindingsConstRef B,
if (Loc::isLocType(T))
V = svalBuilder.makeNull();
- else if (T->isIntegerType())
+ else if (T->isIntegralOrEnumerationType())
V = svalBuilder.makeZeroVal(T);
else if (T->isStructureOrClassType() || T->isArrayType()) {
// Set the default value to a zero constant when it is a structure
@@ -1896,7 +2010,7 @@ RegionStoreManager::bindArray(RegionBindingsConstRef B,
else if (ElementTy->isArrayType())
NewB = bindArray(NewB, ER, *VI);
else
- NewB = bind(NewB, svalBuilder.makeLoc(ER), *VI);
+ NewB = bind(NewB, loc::MemRegionVal(ER), *VI);
}
// If the init list is shorter than the array length, set the
@@ -1937,14 +2051,56 @@ RegionBindingsRef RegionStoreManager::bindVector(RegionBindingsConstRef B,
NonLoc Idx = svalBuilder.makeArrayIndex(index);
const ElementRegion *ER = MRMgr.getElementRegion(ElemType, Idx, R, Ctx);
-
+
if (ElemType->isArrayType())
NewB = bindArray(NewB, ER, *VI);
else if (ElemType->isStructureOrClassType())
NewB = bindStruct(NewB, ER, *VI);
else
- NewB = bind(NewB, svalBuilder.makeLoc(ER), *VI);
+ NewB = bind(NewB, loc::MemRegionVal(ER), *VI);
+ }
+ return NewB;
+}
+
+Optional<RegionBindingsRef>
+RegionStoreManager::tryBindSmallStruct(RegionBindingsConstRef B,
+ const TypedValueRegion *R,
+ const RecordDecl *RD,
+ nonloc::LazyCompoundVal LCV) {
+ FieldVector Fields;
+
+ if (const CXXRecordDecl *Class = dyn_cast<CXXRecordDecl>(RD))
+ if (Class->getNumBases() != 0 || Class->getNumVBases() != 0)
+ return None;
+
+ for (RecordDecl::field_iterator I = RD->field_begin(), E = RD->field_end();
+ I != E; ++I) {
+ const FieldDecl *FD = *I;
+ if (FD->isUnnamedBitfield())
+ continue;
+
+ // If there are too many fields, or if any of the fields are aggregates,
+ // just use the LCV as a default binding.
+ if (Fields.size() == SmallStructLimit)
+ return None;
+
+ QualType Ty = FD->getType();
+ if (!(Ty->isScalarType() || Ty->isReferenceType()))
+ return None;
+
+ Fields.push_back(*I);
}
+
+ RegionBindingsRef NewB = B;
+
+ for (FieldVector::iterator I = Fields.begin(), E = Fields.end(); I != E; ++I){
+ const FieldRegion *SourceFR = MRMgr.getFieldRegion(*I, LCV.getRegion());
+ SVal V = getBindingForField(getRegionBindings(LCV.getStore()), SourceFR);
+
+ const FieldRegion *DestFR = MRMgr.getFieldRegion(*I, R);
+ NewB = bind(NewB, loc::MemRegionVal(DestFR), V);
+ }
+
return NewB;
}
@@ -1958,13 +2114,19 @@ RegionBindingsRef RegionStoreManager::bindStruct(RegionBindingsConstRef B,
assert(T->isStructureOrClassType());
const RecordType* RT = T->getAs<RecordType>();
- RecordDecl *RD = RT->getDecl();
+ const RecordDecl *RD = RT->getDecl();
if (!RD->isCompleteDefinition())
return B;
// Handle lazy compound values and symbolic values.
- if (V.getAs<nonloc::LazyCompoundVal>() || V.getAs<nonloc::SymbolVal>())
+ if (Optional<nonloc::LazyCompoundVal> LCV =
+ V.getAs<nonloc::LazyCompoundVal>()) {
+ if (Optional<RegionBindingsRef> NewB = tryBindSmallStruct(B, R, RD, *LCV))
+ return *NewB;
+ return bindAggregate(B, R, V);
+ }
+ if (V.getAs<nonloc::SymbolVal>())
return bindAggregate(B, R, V);
// We may get non-CompoundVal accidentally due to imprecise cast logic or
@@ -1996,7 +2158,7 @@ RegionBindingsRef RegionStoreManager::bindStruct(RegionBindingsConstRef B,
else if (FTy->isStructureOrClassType())
NewB = bindStruct(NewB, FR, *VI);
else
- NewB = bind(NewB, svalBuilder.makeLoc(FR), *VI);
+ NewB = bind(NewB, loc::MemRegionVal(FR), *VI);
++VI;
}
@@ -2034,13 +2196,13 @@ public:
ProgramStateManager &stateMgr,
RegionBindingsRef b, SymbolReaper &symReaper,
const StackFrameContext *LCtx)
- : ClusterAnalysis<removeDeadBindingsWorker>(rm, stateMgr, b,
- /* includeGlobals = */ false),
+ : ClusterAnalysis<removeDeadBindingsWorker>(rm, stateMgr, b, GFK_None),
SymReaper(symReaper), CurrentLCtx(LCtx) {}
// Called by ClusterAnalysis.
void VisitAddedToCluster(const MemRegion *baseR, const ClusterBindings &C);
- void VisitCluster(const MemRegion *baseR, const ClusterBindings &C);
+ void VisitCluster(const MemRegion *baseR, const ClusterBindings *C);
+ using ClusterAnalysis<removeDeadBindingsWorker>::VisitCluster;
bool UpdatePostponed();
void VisitBinding(SVal V);
@@ -2083,13 +2245,16 @@ void removeDeadBindingsWorker::VisitAddedToCluster(const MemRegion *baseR,
}
void removeDeadBindingsWorker::VisitCluster(const MemRegion *baseR,
- const ClusterBindings &C) {
+ const ClusterBindings *C) {
+ if (!C)
+ return;
+
// Mark the symbol for any SymbolicRegion with live bindings as live itself.
// This means we should continue to track that symbol.
if (const SymbolicRegion *SymR = dyn_cast<SymbolicRegion>(baseR))
SymReaper.markLive(SymR->getSymbol());
- for (ClusterBindings::iterator I = C.begin(), E = C.end(); I != E; ++I)
+ for (ClusterBindings::iterator I = C->begin(), E = C->end(); I != E; ++I)
VisitBinding(I.getData());
}
diff --git a/lib/StaticAnalyzer/Core/SValBuilder.cpp b/lib/StaticAnalyzer/Core/SValBuilder.cpp
index c72e780801..9d77a3ef58 100644
--- a/lib/StaticAnalyzer/Core/SValBuilder.cpp
+++ b/lib/StaticAnalyzer/Core/SValBuilder.cpp
@@ -33,7 +33,7 @@ DefinedOrUnknownSVal SValBuilder::makeZeroVal(QualType type) {
if (Loc::isLocType(type))
return makeNull();
- if (type->isIntegerType())
+ if (type->isIntegralOrEnumerationType())
return makeIntVal(0, type);
// FIXME: Handle floats.
@@ -106,12 +106,19 @@ SValBuilder::getRegionValueSymbolVal(const TypedValueRegion* region) {
return nonloc::SymbolVal(sym);
}
-DefinedOrUnknownSVal SValBuilder::conjureSymbolVal(const void *symbolTag,
- const Expr *expr,
+DefinedOrUnknownSVal SValBuilder::conjureSymbolVal(const void *SymbolTag,
+ const Expr *Ex,
const LocationContext *LCtx,
- unsigned count) {
- QualType T = expr->getType();
- return conjureSymbolVal(symbolTag, expr, LCtx, T, count);
+ unsigned Count) {
+ QualType T = Ex->getType();
+
+ // Compute the type of the result. If the expression is not an R-value, the
+ // result should be a location.
+ QualType ExType = Ex->getType();
+ if (Ex->isGLValue())
+ T = LCtx->getAnalysisDeclContext()->getASTContext().getPointerType(ExType);
+
+ return conjureSymbolVal(SymbolTag, Ex, LCtx, T, Count);
}
DefinedOrUnknownSVal SValBuilder::conjureSymbolVal(const void *symbolTag,
@@ -217,6 +224,68 @@ loc::MemRegionVal SValBuilder::getCXXThis(const CXXRecordDecl *D,
return loc::MemRegionVal(getRegionManager().getCXXThisRegion(PT, SFC));
}
+Optional<SVal> SValBuilder::getConstantVal(const Expr *E) {
+ E = E->IgnoreParens();
+
+ switch (E->getStmtClass()) {
+ // Handle expressions that we treat differently from the AST's constant
+ // evaluator.
+ case Stmt::AddrLabelExprClass:
+ return makeLoc(cast<AddrLabelExpr>(E));
+
+ case Stmt::CXXScalarValueInitExprClass:
+ case Stmt::ImplicitValueInitExprClass:
+ return makeZeroVal(E->getType());
+
+ case Stmt::ObjCStringLiteralClass: {
+ const ObjCStringLiteral *SL = cast<ObjCStringLiteral>(E);
+ return makeLoc(getRegionManager().getObjCStringRegion(SL));
+ }
+
+ case Stmt::StringLiteralClass: {
+ const StringLiteral *SL = cast<StringLiteral>(E);
+ return makeLoc(getRegionManager().getStringRegion(SL));
+ }
+
+ // Fast-path some expressions to avoid the overhead of going through the AST's
+ // constant evaluator
+ case Stmt::CharacterLiteralClass: {
+ const CharacterLiteral *C = cast<CharacterLiteral>(E);
+ return makeIntVal(C->getValue(), C->getType());
+ }
+
+ case Stmt::CXXBoolLiteralExprClass:
+ return makeBoolVal(cast<CXXBoolLiteralExpr>(E));
+
+ case Stmt::IntegerLiteralClass:
+ return makeIntVal(cast<IntegerLiteral>(E));
+
+ case Stmt::ObjCBoolLiteralExprClass:
+ return makeBoolVal(cast<ObjCBoolLiteralExpr>(E));
+
+ case Stmt::CXXNullPtrLiteralExprClass:
+ return makeNull();
+
+ // If we don't have a special case, fall back to the AST's constant evaluator.
+ default: {
+ // Don't try to come up with a value for materialized temporaries.
+ if (E->isGLValue())
+ return None;
+
+ ASTContext &Ctx = getContext();
+ llvm::APSInt Result;
+ if (E->EvaluateAsInt(Result, Ctx))
+ return makeIntVal(Result);
+
+ if (Loc::isLocType(E->getType()))
+ if (E->isNullPointerConstant(Ctx, Expr::NPC_ValueDependentIsNotNull))
+ return makeNull();
+
+ return None;
+ }
+ }
+}
+
//===----------------------------------------------------------------------===//
SVal SValBuilder::makeSymExprValNN(ProgramStateRef State,
@@ -320,6 +389,22 @@ SVal SValBuilder::evalCast(SVal val, QualType castTy, QualType originalTy) {
if (val.isUnknownOrUndef() || castTy == originalTy)
return val;
+ if (castTy->isBooleanType()) {
+ if (val.isUnknownOrUndef())
+ return val;
+ if (val.isConstant())
+ return makeTruthVal(!val.isZeroConstant(), castTy);
+ if (SymbolRef Sym = val.getAsSymbol()) {
+ BasicValueFactory &BVF = getBasicValueFactory();
+ // FIXME: If we had a state here, we could see if the symbol is known to
+ // be zero, but we don't.
+ return makeNonLoc(Sym, BO_NE, BVF.getValue(0, Sym->getType()), castTy);
+ }
+
+ assert(val.getAs<Loc>());
+ return makeTruthVal(true, castTy);
+ }
+
// For const casts, casts to void, just propagate the value.
if (!castTy->isVariableArrayType() && !originalTy->isVariableArrayType())
if (shouldBeModeledWithNoOp(Context, Context.getPointerType(castTy),
@@ -327,11 +412,11 @@ SVal SValBuilder::evalCast(SVal val, QualType castTy, QualType originalTy) {
return val;
// Check for casts from pointers to integers.
- if (castTy->isIntegerType() && Loc::isLocType(originalTy))
+ if (castTy->isIntegralOrEnumerationType() && Loc::isLocType(originalTy))
return evalCastFromLoc(val.castAs<Loc>(), castTy);
// Check for casts from integers to pointers.
- if (Loc::isLocType(castTy) && originalTy->isIntegerType()) {
+ if (Loc::isLocType(castTy) && originalTy->isIntegralOrEnumerationType()) {
if (Optional<nonloc::LocAsInteger> LV = val.getAs<nonloc::LocAsInteger>()) {
if (const MemRegion *R = LV->getLoc().getAsRegion()) {
StoreManager &storeMgr = StateMgr.getStoreManager();
@@ -361,7 +446,7 @@ SVal SValBuilder::evalCast(SVal val, QualType castTy, QualType originalTy) {
// Are we casting from an array to an integer? If so, cast the decayed
// pointer value to an integer.
- assert(castTy->isIntegerType());
+ assert(castTy->isIntegralOrEnumerationType());
// FIXME: Keep these here for now in case we decide soon that we
// need the original decayed type.
@@ -373,7 +458,7 @@ SVal SValBuilder::evalCast(SVal val, QualType castTy, QualType originalTy) {
// Check for casts from a region to a specific type.
if (const MemRegion *R = val.getAsRegion()) {
// Handle other casts of locations to integers.
- if (castTy->isIntegerType())
+ if (castTy->isIntegralOrEnumerationType())
return evalCastFromLoc(loc::MemRegionVal(R), castTy);
// FIXME: We should handle the case where we strip off view layers to get
diff --git a/lib/StaticAnalyzer/Core/SVals.cpp b/lib/StaticAnalyzer/Core/SVals.cpp
index da52a90ec5..650691535f 100644
--- a/lib/StaticAnalyzer/Core/SVals.cpp
+++ b/lib/StaticAnalyzer/Core/SVals.cpp
@@ -64,14 +64,18 @@ const FunctionDecl *SVal::getAsFunctionDecl() const {
///
/// Implicit casts (ex: void* -> char*) can turn Symbolic region into Element
/// region. If that is the case, gets the underlining region.
-SymbolRef SVal::getAsLocSymbol() const {
+/// When IncludeBaseRegions is set to true and the SubRegion is non-symbolic,
+/// the first symbolic parent region is returned.
+SymbolRef SVal::getAsLocSymbol(bool IncludeBaseRegions) const {
// FIXME: should we consider SymbolRef wrapped in CodeTextRegion?
if (Optional<nonloc::LocAsInteger> X = getAs<nonloc::LocAsInteger>())
return X->getLoc().getAsLocSymbol();
if (Optional<loc::MemRegionVal> X = getAs<loc::MemRegionVal>()) {
- const MemRegion *R = X->stripCasts();
- if (const SymbolicRegion *SymR = dyn_cast<SymbolicRegion>(R))
+ const MemRegion *R = X->getRegion();
+ if (const SymbolicRegion *SymR = IncludeBaseRegions ?
+ R->getSymbolicBase() :
+ dyn_cast<SymbolicRegion>(R->StripCasts()))
return SymR->getSymbol();
}
return 0;
@@ -99,13 +103,17 @@ SymbolRef SVal::getLocSymbolInBase() const {
// TODO: The next 3 functions have to be simplified.
/// \brief If this SVal wraps a symbol return that SymbolRef.
-/// Otherwise return 0.
-SymbolRef SVal::getAsSymbol() const {
+/// Otherwise, return 0.
+///
+/// Casts are ignored during lookup.
+/// \param IncludeBaseRegions The boolean that controls whether the search
+/// should continue to the base regions if the region is not symbolic.
+SymbolRef SVal::getAsSymbol(bool IncludeBaseRegion) const {
// FIXME: should we consider SymbolRef wrapped in CodeTextRegion?
if (Optional<nonloc::SymbolVal> X = getAs<nonloc::SymbolVal>())
return X->getSymbol();
- return getAsLocSymbol();
+ return getAsLocSymbol(IncludeBaseRegion);
}
/// getAsSymbolicExpression - If this Sval wraps a symbolic expression then
@@ -214,13 +222,12 @@ SVal loc::ConcreteInt::evalBinOp(BasicValueFactory& BasicVals,
BinaryOperator::Opcode Op,
const loc::ConcreteInt& R) const {
- assert (Op == BO_Add || Op == BO_Sub ||
- (Op >= BO_LT && Op <= BO_NE));
+ assert(BinaryOperator::isComparisonOp(Op) || Op == BO_Sub);
- const llvm::APSInt* X = BasicVals.evalAPSInt(Op, getValue(), R.getValue());
+ const llvm::APSInt *X = BasicVals.evalAPSInt(Op, getValue(), R.getValue());
if (X)
- return loc::ConcreteInt(*X);
+ return nonloc::ConcreteInt(*X);
else
return UndefinedVal();
}
diff --git a/lib/StaticAnalyzer/Core/SimpleConstraintManager.cpp b/lib/StaticAnalyzer/Core/SimpleConstraintManager.cpp
index de13241cac..a06268dd33 100644
--- a/lib/StaticAnalyzer/Core/SimpleConstraintManager.cpp
+++ b/lib/StaticAnalyzer/Core/SimpleConstraintManager.cpp
@@ -49,6 +49,16 @@ bool SimpleConstraintManager::canReasonAbout(SVal X) const {
}
}
+ if (const SymSymExpr *SSE = dyn_cast<SymSymExpr>(SE)) {
+ if (BinaryOperator::isComparisonOp(SSE->getOpcode())) {
+ // We handle Loc <> Loc comparisons, but not (yet) NonLoc <> NonLoc.
+ if (Loc::isLocType(SSE->getLHS()->getType())) {
+ assert(Loc::isLocType(SSE->getRHS()->getType()));
+ return true;
+ }
+ }
+ }
+
return false;
}
@@ -80,20 +90,15 @@ ProgramStateRef SimpleConstraintManager::assumeAux(ProgramStateRef state,
case loc::MemRegionKind: {
// FIXME: Should this go into the storemanager?
-
const MemRegion *R = Cond.castAs<loc::MemRegionVal>().getRegion();
- const SubRegion *SubR = dyn_cast<SubRegion>(R);
-
- while (SubR) {
- // FIXME: now we only find the first symbolic region.
- if (const SymbolicRegion *SymR = dyn_cast<SymbolicRegion>(SubR)) {
- const llvm::APSInt &zero = getBasicVals().getZeroWithPtrWidth();
- if (Assumption)
- return assumeSymNE(state, SymR->getSymbol(), zero, zero);
- else
- return assumeSymEQ(state, SymR->getSymbol(), zero, zero);
- }
- SubR = dyn_cast<SubRegion>(SubR->getSuperRegion());
+
+ // FIXME: now we only find the first symbolic region.
+ if (const SymbolicRegion *SymR = R->getSymbolicBase()) {
+ const llvm::APSInt &zero = getBasicVals().getZeroWithPtrWidth();
+ if (Assumption)
+ return assumeSymNE(state, SymR->getSymbol(), zero, zero);
+ else
+ return assumeSymEQ(state, SymR->getSymbol(), zero, zero);
}
// FALL-THROUGH.
@@ -119,21 +124,6 @@ ProgramStateRef SimpleConstraintManager::assume(ProgramStateRef state,
return state;
}
-static BinaryOperator::Opcode NegateComparison(BinaryOperator::Opcode op) {
- // FIXME: This should probably be part of BinaryOperator, since this isn't
- // the only place it's used. (This code was copied from SimpleSValBuilder.cpp.)
- switch (op) {
- default:
- llvm_unreachable("Invalid opcode.");
- case BO_LT: return BO_GE;
- case BO_GT: return BO_LE;
- case BO_LE: return BO_GT;
- case BO_GE: return BO_LT;
- case BO_EQ: return BO_NE;
- case BO_NE: return BO_EQ;
- }
-}
-
ProgramStateRef
SimpleConstraintManager::assumeAuxForSymbol(ProgramStateRef State,
@@ -142,7 +132,7 @@ SimpleConstraintManager::assumeAuxForSymbol(ProgramStateRef State,
QualType T = Sym->getType();
// None of the constraint solvers currently support non-integer types.
- if (!T->isIntegerType())
+ if (!T->isIntegralOrEnumerationType())
return State;
const llvm::APSInt &zero = BVF.getValue(0, T);
@@ -164,8 +154,6 @@ ProgramStateRef SimpleConstraintManager::assumeAux(ProgramStateRef state,
return assumeAuxForSymbol(state, sym, Assumption);
}
- BasicValueFactory &BasicVals = getBasicVals();
-
switch (Cond.getSubKind()) {
default:
llvm_unreachable("'Assume' not implemented for this NonLoc");
@@ -180,26 +168,45 @@ ProgramStateRef SimpleConstraintManager::assumeAux(ProgramStateRef state,
return assumeAuxForSymbol(state, sym, Assumption);
// Handle symbolic expression.
- } else {
+ } else if (const SymIntExpr *SE = dyn_cast<SymIntExpr>(sym)) {
// We can only simplify expressions whose RHS is an integer.
- const SymIntExpr *SE = dyn_cast<SymIntExpr>(sym);
- if (!SE)
- return assumeAuxForSymbol(state, sym, Assumption);
BinaryOperator::Opcode op = SE->getOpcode();
- // Implicitly compare non-comparison expressions to 0.
- if (!BinaryOperator::isComparisonOp(op)) {
- QualType T = SE->getType();
- const llvm::APSInt &zero = BasicVals.getValue(0, T);
- op = (Assumption ? BO_NE : BO_EQ);
- return assumeSymRel(state, SE, op, zero);
+ if (BinaryOperator::isComparisonOp(op)) {
+ if (!Assumption)
+ op = BinaryOperator::negateComparisonOp(op);
+
+ return assumeSymRel(state, SE->getLHS(), op, SE->getRHS());
}
- // From here on out, op is the real comparison we'll be testing.
- if (!Assumption)
- op = NegateComparison(op);
- return assumeSymRel(state, SE->getLHS(), op, SE->getRHS());
+ } else if (const SymSymExpr *SSE = dyn_cast<SymSymExpr>(sym)) {
+ // Translate "a != b" to "(b - a) != 0".
+ // We invert the order of the operands as a heuristic for how loop
+ // conditions are usually written ("begin != end") as compared to length
+ // calculations ("end - begin"). The more correct thing to do would be to
+ // canonicalize "a - b" and "b - a", which would allow us to treat
+ // "a != b" and "b != a" the same.
+ SymbolManager &SymMgr = getSymbolManager();
+ BinaryOperator::Opcode Op = SSE->getOpcode();
+ assert(BinaryOperator::isComparisonOp(Op));
+
+ // For now, we only support comparing pointers.
+ assert(Loc::isLocType(SSE->getLHS()->getType()));
+ assert(Loc::isLocType(SSE->getRHS()->getType()));
+ QualType DiffTy = SymMgr.getContext().getPointerDiffType();
+ SymbolRef Subtraction = SymMgr.getSymSymExpr(SSE->getRHS(), BO_Sub,
+ SSE->getLHS(), DiffTy);
+
+ const llvm::APSInt &Zero = getBasicVals().getValue(0, DiffTy);
+ Op = BinaryOperator::reverseComparisonOp(Op);
+ if (!Assumption)
+ Op = BinaryOperator::negateComparisonOp(Op);
+ return assumeSymRel(state, Subtraction, Op, Zero);
}
+
+ // If we get here, there's nothing else we can do but treat the symbol as
+ // opaque.
+ return assumeAuxForSymbol(state, sym, Assumption);
}
case nonloc::ConcreteIntKind: {
@@ -257,10 +264,14 @@ ProgramStateRef SimpleConstraintManager::assumeSymRel(ProgramStateRef state,
APSIntType ComparisonType = std::max(WraparoundType, APSIntType(Int));
llvm::APSInt ConvertedInt = ComparisonType.convert(Int);
+ // Prefer unsigned comparisons.
+ if (ComparisonType.getBitWidth() == WraparoundType.getBitWidth() &&
+ ComparisonType.isUnsigned() && !WraparoundType.isUnsigned())
+ Adjustment.setIsSigned(false);
+
switch (op) {
default:
- // No logic yet for other operators. assume the constraint is feasible.
- return state;
+ llvm_unreachable("invalid operation not caught by assertion above");
case BO_EQ:
return assumeSymEQ(state, Sym, ConvertedInt, Adjustment);
diff --git a/lib/StaticAnalyzer/Core/SimpleConstraintManager.h b/lib/StaticAnalyzer/Core/SimpleConstraintManager.h
index 01f0b4e446..10ddef1341 100644
--- a/lib/StaticAnalyzer/Core/SimpleConstraintManager.h
+++ b/lib/StaticAnalyzer/Core/SimpleConstraintManager.h
@@ -23,10 +23,10 @@ namespace ento {
class SimpleConstraintManager : public ConstraintManager {
SubEngine *SU;
- BasicValueFactory &BVF;
+ SValBuilder &SVB;
public:
- SimpleConstraintManager(SubEngine *subengine, BasicValueFactory &BV)
- : SU(subengine), BVF(BV) {}
+ SimpleConstraintManager(SubEngine *subengine, SValBuilder &SB)
+ : SU(subengine), SVB(SB) {}
virtual ~SimpleConstraintManager();
//===------------------------------------------------------------------===//
@@ -81,7 +81,8 @@ protected:
// Internal implementation.
//===------------------------------------------------------------------===//
- BasicValueFactory &getBasicVals() const { return BVF; }
+ BasicValueFactory &getBasicVals() const { return SVB.getBasicValueFactory(); }
+ SymbolManager &getSymbolManager() const { return SVB.getSymbolManager(); }
bool canReasonAbout(SVal X) const;
diff --git a/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp b/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp
index 3e50c33000..ee627f2baa 100644
--- a/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp
+++ b/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp
@@ -109,7 +109,7 @@ SVal SimpleSValBuilder::evalCastFromNonLoc(NonLoc val, QualType castTy) {
// Only handle casts from integers to integers - if val is an integer constant
// being cast to a non integer type, produce unknown.
- if (!isLocType && !castTy->isIntegerType())
+ if (!isLocType && !castTy->isIntegralOrEnumerationType())
return UnknownVal();
llvm::APSInt i = val.castAs<nonloc::ConcreteInt>().getValue();
@@ -137,7 +137,7 @@ SVal SimpleSValBuilder::evalCastFromLoc(Loc val, QualType castTy) {
if (castTy->isUnionType())
return UnknownVal();
- if (castTy->isIntegerType()) {
+ if (castTy->isIntegralOrEnumerationType()) {
unsigned BitWidth = Context.getTypeSize(castTy);
if (!val.getAs<loc::ConcreteInt>())
@@ -180,33 +180,6 @@ SVal SimpleSValBuilder::evalComplement(NonLoc X) {
// Transfer function for binary operators.
//===----------------------------------------------------------------------===//
-static BinaryOperator::Opcode NegateComparison(BinaryOperator::Opcode op) {
- switch (op) {
- default:
- llvm_unreachable("Invalid opcode.");
- case BO_LT: return BO_GE;
- case BO_GT: return BO_LE;
- case BO_LE: return BO_GT;
- case BO_GE: return BO_LT;
- case BO_EQ: return BO_NE;
- case BO_NE: return BO_EQ;
- }
-}
-
-static BinaryOperator::Opcode ReverseComparison(BinaryOperator::Opcode op) {
- switch (op) {
- default:
- llvm_unreachable("Invalid opcode.");
- case BO_LT: return BO_GT;
- case BO_GT: return BO_LT;
- case BO_LE: return BO_GE;
- case BO_GE: return BO_LE;
- case BO_EQ:
- case BO_NE:
- return op;
- }
-}
-
SVal SimpleSValBuilder::MakeSymIntVal(const SymExpr *LHS,
BinaryOperator::Opcode op,
const llvm::APSInt &RHS,
@@ -398,7 +371,7 @@ SVal SimpleSValBuilder::evalBinOpNN(ProgramStateRef state,
case BO_GT:
case BO_LE:
case BO_GE:
- op = ReverseComparison(op);
+ op = BinaryOperator::reverseComparisonOp(op);
// FALL-THROUGH
case BO_EQ:
case BO_NE:
@@ -465,9 +438,13 @@ SVal SimpleSValBuilder::evalBinOpNN(ProgramStateRef state,
case BO_GE:
case BO_EQ:
case BO_NE:
+ assert(resultTy->isBooleanType() ||
+ resultTy == getConditionType());
+ assert(symIntExpr->getType()->isBooleanType() ||
+ getContext().hasSameUnqualifiedType(symIntExpr->getType(),
+ getConditionType()));
// Negate the comparison and make a value.
- opc = NegateComparison(opc);
- assert(symIntExpr->getType() == resultTy);
+ opc = BinaryOperator::negateComparisonOp(opc);
return makeNonLoc(symIntExpr->getLHS(), opc,
symIntExpr->getRHS(), resultTy);
}
@@ -508,22 +485,21 @@ SVal SimpleSValBuilder::evalBinOpNN(ProgramStateRef state,
// Otherwise, make a SymIntExpr out of the expression.
return MakeSymIntVal(symIntExpr, op, *RHSValue, resultTy);
}
+ }
-
- } else if (isa<SymbolData>(Sym)) {
- // Does the symbol simplify to a constant? If so, "fold" the constant
- // by setting 'lhs' to a ConcreteInt and try again.
- if (const llvm::APSInt *Constant = state->getConstraintManager()
- .getSymVal(state, Sym)) {
- lhs = nonloc::ConcreteInt(*Constant);
- continue;
- }
-
- // Is the RHS a constant?
- if (const llvm::APSInt *RHSValue = getKnownValue(state, rhs))
- return MakeSymIntVal(Sym, op, *RHSValue, resultTy);
+ // Does the symbolic expression simplify to a constant?
+ // If so, "fold" the constant by setting 'lhs' to a ConcreteInt
+ // and try again.
+ ConstraintManager &CMgr = state->getConstraintManager();
+ if (const llvm::APSInt *Constant = CMgr.getSymVal(state, Sym)) {
+ lhs = nonloc::ConcreteInt(*Constant);
+ continue;
}
+ // Is the RHS a constant?
+ if (const llvm::APSInt *RHSValue = getKnownValue(state, rhs))
+ return MakeSymIntVal(Sym, op, *RHSValue, resultTy);
+
// Give up -- this is not a symbolic expression we can handle.
return makeSymExprValNN(state, op, InputLHS, InputRHS, resultTy);
}
@@ -602,17 +578,19 @@ SVal SimpleSValBuilder::evalBinOpLL(ProgramStateRef state,
return UnknownVal();
const llvm::APSInt &lVal = lhs.castAs<loc::ConcreteInt>().getValue();
- return makeNonLoc(rSym, ReverseComparison(op), lVal, resultTy);
+ op = BinaryOperator::reverseComparisonOp(op);
+ return makeNonLoc(rSym, op, lVal, resultTy);
}
// If both operands are constants, just perform the operation.
if (Optional<loc::ConcreteInt> rInt = rhs.getAs<loc::ConcreteInt>()) {
SVal ResultVal =
lhs.castAs<loc::ConcreteInt>().evalBinOp(BasicVals, op, *rInt);
- if (Optional<Loc> Result = ResultVal.getAs<Loc>())
- return evalCastFromLoc(*Result, resultTy);
- else
- return UnknownVal();
+ if (Optional<NonLoc> Result = ResultVal.getAs<NonLoc>())
+ return evalCastFromNonLoc(*Result, resultTy);
+
+ assert(!ResultVal.getAs<Loc>() && "Loc-Loc ops should not produce Locs");
+ return UnknownVal();
}
// Special case comparisons against NULL.
@@ -682,11 +660,11 @@ SVal SimpleSValBuilder::evalBinOpLL(ProgramStateRef state,
// regions, though.
return UnknownVal();
- const MemSpaceRegion *LeftMS = LeftMR->getMemorySpace();
- const MemSpaceRegion *RightMS = RightMR->getMemorySpace();
- const MemSpaceRegion *UnknownMS = MemMgr.getUnknownRegion();
const MemRegion *LeftBase = LeftMR->getBaseRegion();
const MemRegion *RightBase = RightMR->getBaseRegion();
+ const MemSpaceRegion *LeftMS = LeftBase->getMemorySpace();
+ const MemSpaceRegion *RightMS = RightBase->getMemorySpace();
+ const MemSpaceRegion *UnknownMS = MemMgr.getUnknownRegion();
// If the two regions are from different known memory spaces they cannot be
// equal. Also, assume that no symbolic region (whose memory space is
@@ -789,7 +767,6 @@ SVal SimpleSValBuilder::evalBinOpLL(ProgramStateRef state,
}
// If we get here, we have no way of comparing the ElementRegions.
- return UnknownVal();
}
// See if both regions are fields of the same structure.
@@ -842,6 +819,13 @@ SVal SimpleSValBuilder::evalBinOpLL(ProgramStateRef state,
llvm_unreachable("Fields not found in parent record's definition");
}
+ // At this point we're not going to get a good answer, but we can try
+ // conjuring an expression instead.
+ SymbolRef LHSSym = lhs.getAsLocSymbol();
+ SymbolRef RHSSym = rhs.getAsLocSymbol();
+ if (LHSSym && RHSSym)
+ return makeNonLoc(LHSSym, op, RHSSym, resultTy);
+
// If we get here, we have no way of comparing the regions.
return UnknownVal();
}
diff --git a/lib/StaticAnalyzer/Core/Store.cpp b/lib/StaticAnalyzer/Core/Store.cpp
index a0c24fedcf..690ed08ffc 100644
--- a/lib/StaticAnalyzer/Core/Store.cpp
+++ b/lib/StaticAnalyzer/Core/Store.cpp
@@ -289,62 +289,82 @@ SVal StoreManager::evalDerivedToBase(SVal Derived, QualType BaseType,
return loc::MemRegionVal(BaseReg);
}
-SVal StoreManager::evalDynamicCast(SVal Base, QualType DerivedType,
+/// Returns the static type of the given region, if it represents a C++ class
+/// object.
+///
+/// This handles both fully-typed regions, where the dynamic type is known, and
+/// symbolic regions, where the dynamic type is merely bounded (and even then,
+/// only ostensibly!), but does not take advantage of any dynamic type info.
+static const CXXRecordDecl *getCXXRecordType(const MemRegion *MR) {
+ if (const TypedValueRegion *TVR = dyn_cast<TypedValueRegion>(MR))
+ return TVR->getValueType()->getAsCXXRecordDecl();
+ if (const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(MR))
+ return SR->getSymbol()->getType()->getPointeeCXXRecordDecl();
+ return 0;
+}
+
+SVal StoreManager::evalDynamicCast(SVal Base, QualType TargetType,
bool &Failed) {
Failed = false;
- Optional<loc::MemRegionVal> BaseRegVal = Base.getAs<loc::MemRegionVal>();
- if (!BaseRegVal)
+ const MemRegion *MR = Base.getAsRegion();
+ if (!MR)
return UnknownVal();
- const MemRegion *BaseRegion = BaseRegVal->stripCasts(/*StripBases=*/false);
// Assume the derived class is a pointer or a reference to a CXX record.
- DerivedType = DerivedType->getPointeeType();
- assert(!DerivedType.isNull());
- const CXXRecordDecl *DerivedDecl = DerivedType->getAsCXXRecordDecl();
- if (!DerivedDecl && !DerivedType->isVoidType())
+ TargetType = TargetType->getPointeeType();
+ assert(!TargetType.isNull());
+ const CXXRecordDecl *TargetClass = TargetType->getAsCXXRecordDecl();
+ if (!TargetClass && !TargetType->isVoidType())
return UnknownVal();
// Drill down the CXXBaseObject chains, which represent upcasts (casts from
// derived to base).
- const MemRegion *SR = BaseRegion;
- while (const TypedRegion *TSR = dyn_cast_or_null<TypedRegion>(SR)) {
- QualType BaseType = TSR->getLocationType()->getPointeeType();
- assert(!BaseType.isNull());
- const CXXRecordDecl *SRDecl = BaseType->getAsCXXRecordDecl();
- if (!SRDecl)
- return UnknownVal();
-
+ while (const CXXRecordDecl *MRClass = getCXXRecordType(MR)) {
// If found the derived class, the cast succeeds.
- if (SRDecl == DerivedDecl)
- return loc::MemRegionVal(TSR);
+ if (MRClass == TargetClass)
+ return loc::MemRegionVal(MR);
- if (!DerivedType->isVoidType()) {
+ if (!TargetType->isVoidType()) {
// Static upcasts are marked as DerivedToBase casts by Sema, so this will
// only happen when multiple or virtual inheritance is involved.
CXXBasePaths Paths(/*FindAmbiguities=*/false, /*RecordPaths=*/true,
/*DetectVirtual=*/false);
- if (SRDecl->isDerivedFrom(DerivedDecl, Paths))
- return evalDerivedToBase(loc::MemRegionVal(TSR), Paths.front());
+ if (MRClass->isDerivedFrom(TargetClass, Paths))
+ return evalDerivedToBase(loc::MemRegionVal(MR), Paths.front());
}
- if (const CXXBaseObjectRegion *R = dyn_cast<CXXBaseObjectRegion>(TSR))
+ if (const CXXBaseObjectRegion *BaseR = dyn_cast<CXXBaseObjectRegion>(MR)) {
// Drill down the chain to get the derived classes.
- SR = R->getSuperRegion();
- else {
- // We reached the bottom of the hierarchy.
-
- // If this is a cast to void*, return the region.
- if (DerivedType->isVoidType())
- return loc::MemRegionVal(TSR);
+ MR = BaseR->getSuperRegion();
+ continue;
+ }
- // We did not find the derived class. We we must be casting the base to
- // derived, so the cast should fail.
- Failed = true;
- return UnknownVal();
+ // If this is a cast to void*, return the region.
+ if (TargetType->isVoidType())
+ return loc::MemRegionVal(MR);
+
+ // Strange use of reinterpret_cast can give us paths we don't reason
+ // about well, by putting in ElementRegions where we'd expect
+ // CXXBaseObjectRegions. If it's a valid reinterpret_cast (i.e. if the
+ // derived class has a zero offset from the base class), then it's safe
+ // to strip the cast; if it's invalid, -Wreinterpret-base-class should
+ // catch it. In the interest of performance, the analyzer will silently
+ // do the wrong thing in the invalid case (because offsets for subregions
+ // will be wrong).
+ const MemRegion *Uncasted = MR->StripCasts(/*IncludeBaseCasts=*/false);
+ if (Uncasted == MR) {
+ // We reached the bottom of the hierarchy and did not find the derived
+ // class. We we must be casting the base to derived, so the cast should
+ // fail.
+ break;
}
+
+ MR = Uncasted;
}
-
+
+ // We failed if the region we ended up with has perfect type info.
+ Failed = isa<TypedValueRegion>(MR);
return UnknownVal();
}
diff --git a/lib/StaticAnalyzer/Core/SymbolManager.cpp b/lib/StaticAnalyzer/Core/SymbolManager.cpp
index 0c5098b1e7..7c75b6c3d2 100644
--- a/lib/StaticAnalyzer/Core/SymbolManager.cpp
+++ b/lib/StaticAnalyzer/Core/SymbolManager.cpp
@@ -27,52 +27,33 @@ void SymExpr::dump() const {
dumpToStream(llvm::errs());
}
-static void print(raw_ostream &os, BinaryOperator::Opcode Op) {
- switch (Op) {
- default:
- llvm_unreachable("operator printing not implemented");
- case BO_Mul: os << '*' ; break;
- case BO_Div: os << '/' ; break;
- case BO_Rem: os << '%' ; break;
- case BO_Add: os << '+' ; break;
- case BO_Sub: os << '-' ; break;
- case BO_Shl: os << "<<" ; break;
- case BO_Shr: os << ">>" ; break;
- case BO_LT: os << "<" ; break;
- case BO_GT: os << '>' ; break;
- case BO_LE: os << "<=" ; break;
- case BO_GE: os << ">=" ; break;
- case BO_EQ: os << "==" ; break;
- case BO_NE: os << "!=" ; break;
- case BO_And: os << '&' ; break;
- case BO_Xor: os << '^' ; break;
- case BO_Or: os << '|' ; break;
- }
-}
-
void SymIntExpr::dumpToStream(raw_ostream &os) const {
os << '(';
getLHS()->dumpToStream(os);
- os << ") ";
- print(os, getOpcode());
- os << ' ' << getRHS().getZExtValue();
- if (getRHS().isUnsigned()) os << 'U';
+ os << ") "
+ << BinaryOperator::getOpcodeStr(getOpcode()) << ' '
+ << getRHS().getZExtValue();
+ if (getRHS().isUnsigned())
+ os << 'U';
}
void IntSymExpr::dumpToStream(raw_ostream &os) const {
- os << ' ' << getLHS().getZExtValue();
- if (getLHS().isUnsigned()) os << 'U';
- print(os, getOpcode());
- os << '(';
+ os << getLHS().getZExtValue();
+ if (getLHS().isUnsigned())
+ os << 'U';
+ os << ' '
+ << BinaryOperator::getOpcodeStr(getOpcode())
+ << " (";
getRHS()->dumpToStream(os);
- os << ") ";
+ os << ')';
}
void SymSymExpr::dumpToStream(raw_ostream &os) const {
os << '(';
getLHS()->dumpToStream(os);
- os << ") ";
- os << '(';
+ os << ") "
+ << BinaryOperator::getOpcodeStr(getOpcode())
+ << " (";
getRHS()->dumpToStream(os);
os << ')';
}
@@ -359,8 +340,8 @@ bool SymbolManager::canSymbolicate(QualType T) {
if (Loc::isLocType(T))
return true;
- if (T->isIntegerType())
- return T->isScalarType();
+ if (T->isIntegralOrEnumerationType())
+ return true;
if (T->isRecordType() && !T->isUnionType())
return true;
@@ -468,9 +449,7 @@ bool SymbolReaper::isLive(SymbolRef sym) {
switch (sym->getKind()) {
case SymExpr::RegionValueKind:
- // FIXME: We should be able to use isLiveRegion here (this behavior
- // predates isLiveRegion), but doing so causes test failures. Investigate.
- KnownLive = true;
+ KnownLive = isLiveRegion(cast<SymbolRegionValue>(sym)->getRegion());
break;
case SymExpr::ConjuredKind:
KnownLive = false;
diff --git a/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp b/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp
index f3d545a026..d71e528848 100644
--- a/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp
+++ b/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp
@@ -234,11 +234,11 @@ public:
else if (Mode == AM_Path) {
llvm::errs() << " (Path, ";
switch (IMode) {
- case ExprEngine::Inline_None:
- llvm::errs() << " Inline_None";
+ case ExprEngine::Inline_Minimal:
+ llvm::errs() << " Inline_Minimal";
break;
- case ExprEngine::Inline_All:
- llvm::errs() << " Inline_All";
+ case ExprEngine::Inline_Regular:
+ llvm::errs() << " Inline_Regular";
break;
}
llvm::errs() << ")";
@@ -284,7 +284,8 @@ public:
virtual void HandleTranslationUnit(ASTContext &C);
/// \brief Determine which inlining mode should be used when this function is
- /// analyzed. For example, determines if the callees should be inlined.
+ /// analyzed. This allows to redefine the default inlining policies when
+ /// analyzing a given function.
ExprEngine::InliningModes
getInliningModeForFunction(const Decl *D, SetOfConstDecls Visited);
@@ -299,7 +300,7 @@ public:
/// set of functions which should be considered analyzed after analyzing the
/// given root function.
void HandleCode(Decl *D, AnalysisMode Mode,
- ExprEngine::InliningModes IMode = ExprEngine::Inline_None,
+ ExprEngine::InliningModes IMode = ExprEngine::Inline_Minimal,
SetOfConstDecls *VisitedCallees = 0);
void RunPathSensitiveChecks(Decl *D,
@@ -410,22 +411,18 @@ static bool shouldSkipFunction(const Decl *D,
ExprEngine::InliningModes
AnalysisConsumer::getInliningModeForFunction(const Decl *D,
SetOfConstDecls Visited) {
- ExprEngine::InliningModes HowToInline =
- (Mgr->shouldInlineCall()) ? ExprEngine::Inline_All :
- ExprEngine::Inline_None;
-
// We want to reanalyze all ObjC methods as top level to report Retain
- // Count naming convention errors more aggressively. But we can turn off
+ // Count naming convention errors more aggressively. But we should tune down
// inlining when reanalyzing an already inlined function.
if (Visited.count(D)) {
assert(isa<ObjCMethodDecl>(D) &&
"We are only reanalyzing ObjCMethods.");
const ObjCMethodDecl *ObjCM = cast<ObjCMethodDecl>(D);
if (ObjCM->getMethodFamily() != OMF_init)
- HowToInline = ExprEngine::Inline_None;
+ return ExprEngine::Inline_Minimal;
}
- return HowToInline;
+ return ExprEngine::Inline_Regular;
}
void AnalysisConsumer::HandleDeclsCallGraph(const unsigned LocalTUDeclsSize) {
@@ -595,7 +592,7 @@ void AnalysisConsumer::HandleCode(Decl *D, AnalysisMode Mode,
checkerMgr->runCheckersOnASTBody(D, *Mgr, BR);
if ((Mode & AM_Path) && checkerMgr->hasPathSensitiveCheckers()) {
RunPathSensitiveChecks(D, IMode, VisitedCallees);
- if (IMode != ExprEngine::Inline_None)
+ if (IMode != ExprEngine::Inline_Minimal)
NumFunctionsAnalyzed++;
}
}
diff --git a/lib/StaticAnalyzer/Frontend/CheckerRegistration.cpp b/lib/StaticAnalyzer/Frontend/CheckerRegistration.cpp
index 4fad5a8a7c..e7def08819 100644
--- a/lib/StaticAnalyzer/Frontend/CheckerRegistration.cpp
+++ b/lib/StaticAnalyzer/Frontend/CheckerRegistration.cpp
@@ -100,11 +100,12 @@ void ClangCheckerRegistry::warnIncompatible(DiagnosticsEngine *diags,
}
-CheckerManager *ento::createCheckerManager(const AnalyzerOptions &opts,
+CheckerManager *ento::createCheckerManager(AnalyzerOptions &opts,
const LangOptions &langOpts,
ArrayRef<std::string> plugins,
DiagnosticsEngine &diags) {
- OwningPtr<CheckerManager> checkerMgr(new CheckerManager(langOpts));
+ OwningPtr<CheckerManager> checkerMgr(new CheckerManager(langOpts,
+ &opts));
SmallVector<CheckerOptInfo, 8> checkerOpts;
for (unsigned i = 0, e = opts.CheckersControlList.size(); i != e; ++i) {