aboutsummaryrefslogtreecommitdiff
path: root/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp
diff options
context:
space:
mode:
authorAnna Zaks <ganna@apple.com>2012-12-21 17:27:01 +0000
committerAnna Zaks <ganna@apple.com>2012-12-21 17:27:01 +0000
commitbd80231672a7418aa1a99d3dbbe1774205c88f74 (patch)
tree2b0616e6d9735505ac7749b19a5a811f87c4db1d /lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp
parent95f38f614c8005b5cc398021e1f9e28ed233d533 (diff)
[analyzer] Re-apply r170826 and make the dumping of the GallGraph
deterministic. Commit message for r170826: [analyzer] Traverse the Call Graph in topological order. Modify the call graph by removing the parentless nodes. Instead all nodes are children of root to ensure they are all reachable. Remove the tracking of nodes that are "top level" or global. This information is not used and can be obtained from the Decls stored inside CallGraphNodes. Instead of existing ordering hacks, analyze the functions in topological order over the Call Graph. Together with the addition of devirtualizable ObjC message sends and blocks to the call graph, this gives around 6% performance improvement on several large ObjC benchmarks. git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@170906 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp')
-rw-r--r--lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp62
1 files changed, 18 insertions, 44 deletions
diff --git a/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp b/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp
index 025891adf8..07aa5b074a 100644
--- a/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp
+++ b/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp
@@ -39,6 +39,7 @@
#include "llvm/ADT/OwningPtr.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/Statistic.h"
+#include "llvm/ADT/PostOrderIterator.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/Program.h"
#include "llvm/Support/Timer.h"
@@ -417,61 +418,34 @@ AnalysisConsumer::getInliningModeForFunction(const Decl *D,
}
void AnalysisConsumer::HandleDeclsCallGraph(const unsigned LocalTUDeclsSize) {
- // Otherwise, use the Callgraph to derive the order.
- // Build the Call Graph.
- CallGraph CG;
-
- // Add all the top level declarations to the graph.
+ // Build the Call Graph by adding all the top level declarations to the graph.
// Note: CallGraph can trigger deserialization of more items from a pch
// (though HandleInterestingDecl); triggering additions to LocalTUDecls.
// We rely on random access to add the initially processed Decls to CG.
+ CallGraph CG;
for (unsigned i = 0 ; i < LocalTUDeclsSize ; ++i) {
CG.addToCallGraph(LocalTUDecls[i]);
}
- // Find the top level nodes - children of root + the unreachable (parentless)
- // nodes.
- llvm::SmallVector<CallGraphNode*, 24> TopLevelFunctions;
- for (CallGraph::nodes_iterator TI = CG.parentless_begin(),
- TE = CG.parentless_end(); TI != TE; ++TI) {
- TopLevelFunctions.push_back(*TI);
- NumFunctionTopLevel++;
- }
- CallGraphNode *Entry = CG.getRoot();
- for (CallGraphNode::iterator I = Entry->begin(),
- E = Entry->end(); I != E; ++I) {
- TopLevelFunctions.push_back(*I);
- NumFunctionTopLevel++;
- }
-
- // Make sure the nodes are sorted in order reverse of their definition in the
- // translation unit. This step is very important for performance. It ensures
- // that we analyze the root functions before the externally available
- // subroutines.
- std::deque<CallGraphNode*> BFSQueue;
- for (llvm::SmallVector<CallGraphNode*, 24>::reverse_iterator
- TI = TopLevelFunctions.rbegin(), TE = TopLevelFunctions.rend();
- TI != TE; ++TI)
- BFSQueue.push_back(*TI);
-
- // BFS over all of the functions, while skipping the ones inlined into
- // the previously processed functions. Use external Visited set, which is
- // also modified when we inline a function.
+ // Walk over all of the call graph nodes in topological order, so that we
+ // analyze parents before the children. Skip the functions inlined into
+ // the previously processed functions. Use external Visited set to identify
+ // inlined functions. The topological order allows the "do not reanalyze
+ // previously inlined function" performance heuristic to be triggered more
+ // often.
SetOfConstDecls Visited;
SetOfConstDecls VisitedAsTopLevel;
- while(!BFSQueue.empty()) {
- CallGraphNode *N = BFSQueue.front();
- BFSQueue.pop_front();
-
- // Push the children into the queue.
- for (CallGraphNode::const_iterator CI = N->begin(),
- CE = N->end(); CI != CE; ++CI) {
- if (!shouldSkipFunction((*CI)->getDecl(), Visited, VisitedAsTopLevel))
- BFSQueue.push_back(*CI);
- }
+ llvm::ReversePostOrderTraversal<clang::CallGraph*> RPOT(&CG);
+ for (llvm::ReversePostOrderTraversal<clang::CallGraph*>::rpo_iterator
+ I = RPOT.begin(), E = RPOT.end(); I != E; ++I) {
+ NumFunctionTopLevel++;
+ CallGraphNode *N = *I;
Decl *D = N->getDecl();
- assert(D);
+
+ // Skip the abstract root node.
+ if (!D)
+ continue;
// Skip the functions which have been processed already or previously
// inlined.