diff options
author | Frits van Bommel <fvbommel@gmail.com> | 2010-12-05 19:02:47 +0000 |
---|---|---|
committer | Frits van Bommel <fvbommel@gmail.com> | 2010-12-05 19:02:47 +0000 |
commit | 6f9a8307e087110d57b1e63dae154d351f6b0f6b (patch) | |
tree | 448eaf1d91206efa360d51556d8669601ff06d7e /lib/Transforms/Scalar/JumpThreading.cpp | |
parent | 7ac40c3ffabcdac9510e7efc4dc75a8ed2b32edb (diff) |
Remove trailing whitespace.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@120945 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/Transforms/Scalar/JumpThreading.cpp')
-rw-r--r-- | lib/Transforms/Scalar/JumpThreading.cpp | 416 |
1 files changed, 208 insertions, 208 deletions
diff --git a/lib/Transforms/Scalar/JumpThreading.cpp b/lib/Transforms/Scalar/JumpThreading.cpp index 8f12ee0b61..cb93ae84a5 100644 --- a/lib/Transforms/Scalar/JumpThreading.cpp +++ b/lib/Transforms/Scalar/JumpThreading.cpp @@ -40,7 +40,7 @@ STATISTIC(NumFolds, "Number of terminators folded"); STATISTIC(NumDupes, "Number of branch blocks duplicated to eliminate phi"); static cl::opt<unsigned> -Threshold("jump-threading-threshold", +Threshold("jump-threading-threshold", cl::desc("Max block size to duplicate for jump threading"), cl::init(6), cl::Hidden); @@ -70,16 +70,16 @@ namespace { SmallSet<AssertingVH<BasicBlock>, 16> LoopHeaders; #endif DenseSet<std::pair<Value*, BasicBlock*> > RecursionSet; - + // RAII helper for updating the recursion stack. struct RecursionSetRemover { DenseSet<std::pair<Value*, BasicBlock*> > &TheSet; std::pair<Value*, BasicBlock*> ThePair; - + RecursionSetRemover(DenseSet<std::pair<Value*, BasicBlock*> > &S, std::pair<Value*, BasicBlock*> P) : TheSet(S), ThePair(P) { } - + ~RecursionSetRemover() { TheSet.erase(ThePair); } @@ -91,33 +91,33 @@ namespace { } bool runOnFunction(Function &F); - + virtual void getAnalysisUsage(AnalysisUsage &AU) const { AU.addRequired<LazyValueInfo>(); AU.addPreserved<LazyValueInfo>(); } - + void FindLoopHeaders(Function &F); bool ProcessBlock(BasicBlock *BB); bool ThreadEdge(BasicBlock *BB, const SmallVectorImpl<BasicBlock*> &PredBBs, BasicBlock *SuccBB); bool DuplicateCondBranchOnPHIIntoPred(BasicBlock *BB, const SmallVectorImpl<BasicBlock *> &PredBBs); - + typedef SmallVectorImpl<std::pair<ConstantInt*, BasicBlock*> > PredValueInfo; - + bool ComputeValueKnownInPredecessors(Value *V, BasicBlock *BB, PredValueInfo &Result); bool ProcessThreadableEdges(Value *Cond, BasicBlock *BB); - - + + bool ProcessBranchOnDuplicateCond(BasicBlock *PredBB, BasicBlock *DestBB); bool ProcessSwitchOnDuplicateCond(BasicBlock *PredBB, BasicBlock *DestBB); bool ProcessBranchOnPHI(PHINode *PN); bool ProcessBranchOnXOR(BinaryOperator *BO); - + bool SimplifyPartiallyRedundantLoad(LoadInst *LI); }; } @@ -138,20 +138,20 @@ bool JumpThreading::runOnFunction(Function &F) { DEBUG(dbgs() << "Jump threading on function '" << F.getName() << "'\n"); TD = getAnalysisIfAvailable<TargetData>(); LVI = &getAnalysis<LazyValueInfo>(); - + FindLoopHeaders(F); - + bool Changed, EverChanged = false; do { Changed = false; for (Function::iterator I = F.begin(), E = F.end(); I != E;) { BasicBlock *BB = I; - // Thread all of the branches we can over this block. + // Thread all of the branches we can over this block. while (ProcessBlock(BB)) Changed = true; - + ++I; - + // If the block is trivially dead, zap it. This eliminates the successor // edges which simplifies the CFG. if (pred_begin(BB) == pred_end(BB) && @@ -166,7 +166,7 @@ bool JumpThreading::runOnFunction(Function &F) { // Can't thread an unconditional jump, but if the block is "almost // empty", we can replace uses of it with uses of the successor and make // this dead. - if (BI->isUnconditional() && + if (BI->isUnconditional() && BB != &BB->getParent()->getEntryBlock()) { BasicBlock::iterator BBI = BB->getFirstNonPHI(); // Ignore dbg intrinsics. @@ -179,7 +179,7 @@ bool JumpThreading::runOnFunction(Function &F) { // reinsert afterward if needed. bool ErasedFromLoopHeaders = LoopHeaders.erase(BB); BasicBlock *Succ = BI->getSuccessor(0); - + // FIXME: It is always conservatively correct to drop the info // for a block even if it doesn't get erased. This isn't totally // awesome, but it allows us to use AssertingVH to prevent nasty @@ -191,7 +191,7 @@ bool JumpThreading::runOnFunction(Function &F) { // successor is now the header of the loop. BB = Succ; } - + if (ErasedFromLoopHeaders) LoopHeaders.insert(BB); } @@ -200,7 +200,7 @@ bool JumpThreading::runOnFunction(Function &F) { } EverChanged |= Changed; } while (Changed); - + LoopHeaders.clear(); return EverChanged; } @@ -210,25 +210,25 @@ bool JumpThreading::runOnFunction(Function &F) { static unsigned getJumpThreadDuplicationCost(const BasicBlock *BB) { /// Ignore PHI nodes, these will be flattened when duplication happens. BasicBlock::const_iterator I = BB->getFirstNonPHI(); - + // FIXME: THREADING will delete values that are just used to compute the // branch, so they shouldn't count against the duplication cost. - - + + // Sum up the cost of each instruction until we get to the terminator. Don't // include the terminator because the copy won't include it. unsigned Size = 0; for (; !isa<TerminatorInst>(I); ++I) { // Debugger intrinsics don't incur code size. if (isa<DbgInfoIntrinsic>(I)) continue; - + // If this is a pointer->pointer bitcast, it is free. if (isa<BitCastInst>(I) && I->getType()->isPointerTy()) continue; - + // All other instructions count for at least one unit. ++Size; - + // Calls are more expensive. If they are non-intrinsic calls, we model them // as having cost of 4. If they are a non-vector intrinsic, we model them // as having cost of 2 total, and if they are a vector intrinsic, we model @@ -240,12 +240,12 @@ static unsigned getJumpThreadDuplicationCost(const BasicBlock *BB) { Size += 1; } } - + // Threading through a switch statement is particularly profitable. If this // block ends in a switch, decrease its cost to make it more likely to happen. if (isa<SwitchInst>(I)) Size = Size > 6 ? Size-6 : 0; - + return Size; } @@ -267,7 +267,7 @@ static unsigned getJumpThreadDuplicationCost(const BasicBlock *BB) { void JumpThreading::FindLoopHeaders(Function &F) { SmallVector<std::pair<const BasicBlock*,const BasicBlock*>, 32> Edges; FindFunctionBackedges(F, Edges); - + for (unsigned i = 0, e = Edges.size(); i != e; ++i) LoopHeaders.insert(const_cast<BasicBlock*>(Edges[i].second)); } @@ -298,26 +298,26 @@ ComputeValueKnownInPredecessors(Value *V, BasicBlock *BB,PredValueInfo &Result){ // and terminate the search if we loop back to them if (!RecursionSet.insert(std::make_pair(V, BB)).second) return false; - + // An RAII help to remove this pair from the recursion set once the recursion // stack pops back out again. RecursionSetRemover remover(RecursionSet, std::make_pair(V, BB)); - + // If V is a constantint, then it is known in all predecessors. if (isa<ConstantInt>(V) || isa<UndefValue>(V)) { ConstantInt *CI = dyn_cast<ConstantInt>(V); - + for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) Result.push_back(std::make_pair(CI, *PI)); - + return true; } - + // If V is a non-instruction value, or an instruction in a different block, // then it can't be derived from a PHI. Instruction *I = dyn_cast<Instruction>(V); if (I == 0 || I->getParent() != BB) { - + // Okay, if this is a live-in value, see if it has a known value at the end // of any of our predecessors. // @@ -330,7 +330,7 @@ ComputeValueKnownInPredecessors(Value *V, BasicBlock *BB,PredValueInfo &Result){ // able to handle value inequalities better, for example if the compare is // "X < 4" and "X < 3" is known true but "X < 4" itself is not available. // Perhaps getConstantOnEdge should be smart enough to do this? - + for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) { BasicBlock *P = *PI; // If the value is known by LazyValueInfo to be a constant in a @@ -339,13 +339,13 @@ ComputeValueKnownInPredecessors(Value *V, BasicBlock *BB,PredValueInfo &Result){ if (PredCst == 0 || (!isa<ConstantInt>(PredCst) && !isa<UndefValue>(PredCst))) continue; - + Result.push_back(std::make_pair(dyn_cast<ConstantInt>(PredCst), P)); } - + return !Result.empty(); } - + /// If I is a PHI node, then we know the incoming values for any constants. if (PHINode *PN = dyn_cast<PHINode>(I)) { for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { @@ -361,32 +361,32 @@ ComputeValueKnownInPredecessors(Value *V, BasicBlock *BB,PredValueInfo &Result){ PushConstantIntOrUndef(Result, CI, PN->getIncomingBlock(i)); } } - + return !Result.empty(); } - + SmallVector<std::pair<ConstantInt*, BasicBlock*>, 8> LHSVals, RHSVals; // Handle some boolean conditions. - if (I->getType()->getPrimitiveSizeInBits() == 1) { + if (I->getType()->getPrimitiveSizeInBits() == 1) { // X | true -> true // X & false -> false if (I->getOpcode() == Instruction::Or || I->getOpcode() == Instruction::And) { ComputeValueKnownInPredecessors(I->getOperand(0), BB, LHSVals); ComputeValueKnownInPredecessors(I->getOperand(1), BB, RHSVals); - + if (LHSVals.empty() && RHSVals.empty()) return false; - + ConstantInt *InterestingVal; if (I->getOpcode() == Instruction::Or) InterestingVal = ConstantInt::getTrue(I->getContext()); else InterestingVal = ConstantInt::getFalse(I->getContext()); - + SmallPtrSet<BasicBlock*, 4> LHSKnownBBs; - + // Scan for the sentinel. If we find an undef, force it to the // interesting value: x|undef -> true and x&undef -> false. for (unsigned i = 0, e = LHSVals.size(); i != e; ++i) @@ -404,10 +404,10 @@ ComputeValueKnownInPredecessors(Value *V, BasicBlock *BB,PredValueInfo &Result){ Result.back().first = InterestingVal; } } - + return !Result.empty(); } - + // Handle the NOT form of XOR. if (I->getOpcode() == Instruction::Xor && isa<ConstantInt>(I->getOperand(1)) && @@ -421,29 +421,29 @@ ComputeValueKnownInPredecessors(Value *V, BasicBlock *BB,PredValueInfo &Result){ if (Result[i].first) Result[i].first = cast<ConstantInt>(ConstantExpr::getNot(Result[i].first)); - + return true; } - + // Try to simplify some other binary operator values. } else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(I)) { if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->getOperand(1))) { SmallVector<std::pair<ConstantInt*, BasicBlock*>, 8> LHSVals; ComputeValueKnownInPredecessors(BO->getOperand(0), BB, LHSVals); - + // Try to use constant folding to simplify the binary operator. for (unsigned i = 0, e = LHSVals.size(); i != e; ++i) { Constant *V = LHSVals[i].first; if (V == 0) V = UndefValue::get(BO->getType()); Constant *Folded = ConstantExpr::get(BO->getOpcode(), V, CI); - + PushConstantIntOrUndef(Result, Folded, LHSVals[i].second); } } - + return !Result.empty(); } - + // Handle compare with phi operand, where the PHI is defined in this block. if (CmpInst *Cmp = dyn_cast<CmpInst>(I)) { PHINode *PN = dyn_cast<PHINode>(Cmp->getOperand(0)); @@ -454,28 +454,28 @@ ComputeValueKnownInPredecessors(Value *V, BasicBlock *BB,PredValueInfo &Result){ BasicBlock *PredBB = PN->getIncomingBlock(i); Value *LHS = PN->getIncomingValue(i); Value *RHS = Cmp->getOperand(1)->DoPHITranslation(BB, PredBB); - + Value *Res = SimplifyCmpInst(Cmp->getPredicate(), LHS, RHS, TD); if (Res == 0) { if (!isa<Constant>(RHS)) continue; - - LazyValueInfo::Tristate + + LazyValueInfo::Tristate ResT = LVI->getPredicateOnEdge(Cmp->getPredicate(), LHS, cast<Constant>(RHS), PredBB, BB); if (ResT == LazyValueInfo::Unknown) continue; Res = ConstantInt::get(Type::getInt1Ty(LHS->getContext()), ResT); } - + if (Constant *ConstRes = dyn_cast<Constant>(Res)) PushConstantIntOrUndef(Result, ConstRes, PredBB); } - + return !Result.empty(); } - - + + // If comparing a live-in value against a constant, see if we know the // live-in value on any predecessors. if (isa<Constant>(Cmp->getOperand(1)) && Cmp->getType()->isIntegerTy()) { @@ -499,13 +499,13 @@ ComputeValueKnownInPredecessors(Value *V, BasicBlock *BB,PredValueInfo &Result){ return !Result.empty(); } - + // Try to find a constant value for the LHS of a comparison, // and evaluate it statically if we can. if (Constant *CmpConst = dyn_cast<Constant>(Cmp->getOperand(1))) { SmallVector<std::pair<ConstantInt*, BasicBlock*>, 8> LHSVals; ComputeValueKnownInPredecessors(I->getOperand(0), BB, LHSVals); - + for (unsigned i = 0, e = LHSVals.size(); i != e; ++i) { Constant *V = LHSVals[i].first; if (V == 0) V = UndefValue::get(CmpConst->getType()); @@ -513,12 +513,12 @@ ComputeValueKnownInPredecessors(Value *V, BasicBlock *BB,PredValueInfo &Result){ V, CmpConst); PushConstantIntOrUndef(Result, Folded, LHSVals[i].second); } - + return !Result.empty(); } } } - + // If all else fails, see if LVI can figure out a constant value for us. Constant *CI = LVI->getConstant(V, BB); ConstantInt *CInt = dyn_cast_or_null<ConstantInt>(CI); @@ -526,7 +526,7 @@ ComputeValueKnownInPredecessors(Value *V, BasicBlock *BB,PredValueInfo &Result){ for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) Result.push_back(std::make_pair(CInt, *PI)); } - + return !Result.empty(); } @@ -550,7 +550,7 @@ static unsigned GetBestDestForJumpOnUndef(BasicBlock *BB) { if (NumPreds < MinNumPreds) MinSucc = i; } - + return MinSucc; } @@ -562,7 +562,7 @@ bool JumpThreading::ProcessBlock(BasicBlock *BB) { if (pred_begin(BB) == pred_end(BB) && BB != &BB->getParent()->getEntryBlock()) return false; - + // If this block has a single predecessor, and if that pred has a single // successor, merge the blocks. This encourages recursive jump threading // because now the condition in this block can be threaded through @@ -573,13 +573,13 @@ bool JumpThreading::ProcessBlock(BasicBlock *BB) { // If SinglePred was a loop header, BB becomes one. if (LoopHeaders.erase(SinglePred)) LoopHeaders.insert(BB); - + // Remember if SinglePred was the entry block of the function. If so, we // will need to move BB back to the entry position. bool isEntry = SinglePred == &SinglePred->getParent()->getEntryBlock(); LVI->eraseBlock(SinglePred); MergeBasicBlockIntoOnlyPred(BB); - + if (isEntry && BB != &BB->getParent()->getEntryBlock()) BB->moveBefore(&BB->getParent()->getEntryBlock()); return true; @@ -597,7 +597,7 @@ bool JumpThreading::ProcessBlock(BasicBlock *BB) { Condition = SI->getCondition(); else return false; // Must be an invoke. - + // If the terminator of this block is branching on a constant, simplify the // terminator to an unconditional branch. This can occur due to threading in // other blocks. @@ -608,26 +608,26 @@ bool JumpThreading::ProcessBlock(BasicBlock *BB) { ConstantFoldTerminator(BB); return true; } - + // If the terminator is branching on an undef, we can pick any of the // successors to branch to. Let GetBestDestForJumpOnUndef decide. if (isa<UndefValue>(Condition)) { unsigned BestSucc = GetBestDestForJumpOnUndef(BB); - + // Fold the branch/switch. TerminatorInst *BBTerm = BB->getTerminator(); for (unsigned i = 0, e = BBTerm->getNumSuccessors(); i != e; ++i) { if (i == BestSucc) continue; BBTerm->getSuccessor(i)->removePredecessor(BB, true); } - + DEBUG(dbgs() << " In block '" << BB->getName() << "' folding undef terminator: " << *BBTerm << '\n'); BranchInst::Create(BBTerm->getSuccessor(BestSucc), BBTerm); BBTerm->eraseFromParent(); return true; } - + Instruction *CondInst = dyn_cast<Instruction>(Condition); // All the rest of our checks depend on the condition being an instruction. @@ -636,9 +636,9 @@ bool JumpThreading::ProcessBlock(BasicBlock *BB) { if (ProcessThreadableEdges(Condition, BB)) return true; return false; - } - - + } + + if (CmpInst *CondCmp = dyn_cast<CmpInst>(CondInst)) { // For a comparison where the LHS is outside this block, it's possible // that we've branched on it before. Used LVI to see if we can simplify @@ -653,7 +653,7 @@ bool JumpThreading::ProcessBlock(BasicBlock *BB) { // on that edge. If they're all true or all false, we can simplify the // branch. // FIXME: We could handle mixed true/false by duplicating code. - LazyValueInfo::Tristate Baseline = + LazyValueInfo::Tristate Baseline = LVI->getPredicateOnEdge(CondCmp->getPredicate(), CondCmp->getOperand(0), CondConst, *PI, BB); if (Baseline != LazyValueInfo::Unknown) { @@ -664,7 +664,7 @@ bool JumpThreading::ProcessBlock(BasicBlock *BB) { CondCmp->getOperand(0), CondConst, *PI, BB); if (Ret != Baseline) break; } - + // If we terminated early, then one of the values didn't match. if (PI == PE) { unsigned ToRemove = Baseline == LazyValueInfo::True ? 1 : 0; @@ -687,37 +687,37 @@ bool JumpThreading::ProcessBlock(BasicBlock *BB) { if (CmpInst *CondCmp = dyn_cast<CmpInst>(SimplifyValue)) if (isa<Constant>(CondCmp->getOperand(1))) SimplifyValue = CondCmp->getOperand(0); - + // TODO: There are other places where load PRE would be profitable, such as // more complex comparisons. if (LoadInst *LI = dyn_cast<LoadInst>(SimplifyValue)) if (SimplifyPartiallyRedundantLoad(LI)) return true; - - + + // Handle a variety of cases where we are branching on something derived from // a PHI node in the current block. If we can prove that any predecessors // compute a predictable value based on a PHI node, thread those predecessors. // if (ProcessThreadableEdges(CondInst, BB)) return true; - + // If this is an otherwise-unfoldable branch on a phi node in the current // block, see if we can simplify. if (PHINode *PN = dyn_cast<PHINode>(CondInst)) if (PN->getParent() == BB && isa<BranchInst>(BB->getTerminator())) return ProcessBranchOnPHI(PN); - - + + // If this is an otherwise-unfoldable branch on a XOR, see if we can simplify. if (CondInst->getOpcode() == Instruction::Xor && CondInst->getParent() == BB && isa<BranchInst>(BB->getTerminator())) return ProcessBranchOnXOR(cast<BinaryOperator>(CondInst)); - - + + // TODO: If we have: "br (X > 0)" and we have a predecessor where we know // "(X == 4)", thread through this block. - + return false; } @@ -734,7 +734,7 @@ bool JumpThreading::ProcessBlock(BasicBlock *BB) { bool JumpThreading::ProcessBranchOnDuplicateCond(BasicBlock *PredBB, BasicBlock *BB) { BranchInst *PredBI = cast<BranchInst>(PredBB->getTerminator()); - + // If both successors of PredBB go to DESTBB, we don't know anything. We can // fold the branch to an unconditional one, which allows other recursive // simplifications. @@ -750,7 +750,7 @@ bool JumpThreading::ProcessBranchOnDuplicateCond(BasicBlock *PredBB, ConstantFoldTerminator(PredBB); return true; } - + BranchInst *DestBI = cast<BranchInst>(BB->getTerminator()); // If the dest block has one predecessor, just fix the branch condition to a @@ -769,14 +769,14 @@ bool JumpThreading::ProcessBranchOnDuplicateCond(BasicBlock *PredBB, ConstantFoldTerminator(BB); return true; } - - + + // Next, figure out which successor we are threading to. BasicBlock *SuccBB = DestBI->getSuccessor(!BranchDir); - + SmallVector<BasicBlock*, 2> Preds; Preds.push_back(PredBB); - + // Ok, try to thread it! return ThreadEdge(BB, Preds, SuccBB); } @@ -796,20 +796,20 @@ bool JumpThreading::ProcessSwitchOnDuplicateCond(BasicBlock *PredBB, // Can't thread edge to self. if (PredBB == DestBB) return false; - + SwitchInst *PredSI = cast<SwitchInst>(PredBB->getTerminator()); SwitchInst *DestSI = cast<SwitchInst>(DestBB->getTerminator()); // There are a variety of optimizations that we can potentially do on these // blocks: we order them from most to least preferable. - + // If DESTBB *just* contains the switch, then we can forward edges from PREDBB // directly to their destination. This does not introduce *any* code size // growth. Skip debug info first. BasicBlock::iterator BBI = DestBB->begin(); while (isa<DbgInfoIntrinsic>(BBI)) BBI++; - + // FIXME: Thread if it just contains a PHI. if (isa<SwitchInst>(BBI)) { bool MadeChange = false; @@ -817,19 +817,19 @@ bool JumpThreading::ProcessSwitchOnDuplicateCond(BasicBlock *PredBB, for (unsigned i = 1, e = DestSI->getNumSuccessors(); i != e; ++i) { ConstantInt *DestVal = DestSI->getCaseValue(i); BasicBlock *DestSucc = DestSI->getSuccessor(i); - + // Okay, DestSI has a case for 'DestVal' that goes to 'DestSucc'. See if // PredSI has an explicit case for it. If so, forward. If it is covered // by the default case, we can't update PredSI. unsigned PredCase = PredSI->findCaseValue(DestVal); if (PredCase == 0) continue; - + // If PredSI doesn't go to DestBB on this value, then it won't reach the // case on this condition. if (PredSI->getSuccessor(PredCase) != DestBB && DestSI->getSuccessor(i) != DestBB) continue; - + // Do not forward this if it already goes to this destination, this would // be an infinite loop. if (PredSI->getSuccessor(PredCase) == DestSucc) @@ -850,11 +850,11 @@ bool JumpThreading::ProcessSwitchOnDuplicateCond(BasicBlock *PredBB, PredSI->setSuccessor(PredCase, DestSucc); MadeChange = true; } - + if (MadeChange) return true; } - + return false; } @@ -866,13 +866,13 @@ bool JumpThreading::ProcessSwitchOnDuplicateCond(BasicBlock *PredBB, bool JumpThreading::SimplifyPartiallyRedundantLoad(LoadInst *LI) { // Don't hack volatile loads. if (LI->isVolatile()) return false; - + // If the load is defined in a block with exactly one predecessor, it can't be // partially redundant. BasicBlock *LoadBB = LI->getParent(); if (LoadBB->getSinglePredecessor()) return false; - + Value *LoadedPtr = LI->getOperand(0); // If the loaded operand is defined in the LoadBB, it can't be available. @@ -880,17 +880,17 @@ bool JumpThreading::SimplifyPartiallyRedundantLoad(LoadInst *LI) { if (Instruction *PtrOp = dyn_cast<Instruction>(LoadedPtr)) if (PtrOp->getParent() == LoadBB) return false; - + // Scan a few instructions up from the load, to see if it is obviously live at // the entry to its block. BasicBlock::iterator BBIt = LI; - if (Value *AvailableVal = + if (Value *AvailableVal = FindAvailableLoadedValue(LoadedPtr, LoadBB, BBIt, 6)) { // If the value if the load is locally available within the block, just use // it. This frequently occurs for reg2mem'd allocas. //cerr << "LOAD ELIMINATED:\n" << *BBIt << *LI << "\n"; - + // If the returned value is the load itself, replace with an undef. This can // only happen in dead loops. if (AvailableVal == LI) AvailableVal = UndefValue::get(LI->getType()); @@ -904,13 +904,13 @@ bool JumpThreading::SimplifyPartiallyRedundantLoad(LoadInst *LI) { // might clobber its value. if (BBIt != LoadBB->begin()) return false; - - + + SmallPtrSet<BasicBlock*, 8> PredsScanned; typedef SmallVector<std::pair<BasicBlock*, Value*>, 8> AvailablePredsTy; AvailablePredsTy AvailablePreds; BasicBlock *OneUnavailablePred = 0; - + // If we got here, the loaded value is transparent through to the start of the // block. Check to see if it is available in any of the predecessor blocks. for (pred_iterator PI = pred_begin(LoadBB), PE = pred_end(LoadBB); @@ -928,23 +928,23 @@ bool JumpThreading::SimplifyPartiallyRedundantLoad(LoadInst *LI) { OneUnavailablePred = PredBB; continue; } - + // If so, this load is partially redundant. Remember this info so that we // can create a PHI node. AvailablePreds.push_back(std::make_pair(PredBB, PredAvailable)); } - + // If the loaded value isn't available in any predecessor, it isn't partially // redundant. if (AvailablePreds.empty()) return false; - + // Okay, the loaded value is available in at least one (and maybe all!) // predecessors. If the value is unavailable in more than one unique // predecessor, we want to insert a merge block for those common predecessors. // This ensures that we only have to insert one reload, thus not increasing // code size. BasicBlock *UnavailablePred = 0; - + // If there is exactly one predecessor where the value is unavailable, the // already computed 'OneUnavailablePred' block is it. If it ends in an // unconditional branch, we know that it isn't a critical edge. @@ -967,17 +967,17 @@ bool JumpThreading::SimplifyPartiallyRedundantLoad(LoadInst *LI) { // If the predecessor is an indirect goto, we can't split the edge. if (isa<IndirectBrInst>(P->getTerminator())) return false; - + if (!AvailablePredSet.count(P)) PredsToSplit.push_back(P); } - + // Split them out to their own block. UnavailablePred = SplitBlockPredecessors(LoadBB, &PredsToSplit[0], PredsToSplit.size(), "thread-pre-split", this); } - + // If the value isn't available in all predecessors, then there will be // exactly one where it isn't available. Insert a load on that edge and add // it to the AvailablePreds list. @@ -989,35 +989,35 @@ bool JumpThreading::SimplifyPartiallyRedundantLoad(LoadInst *LI) { UnavailablePred->getTerminator()); AvailablePreds.push_back(std::make_pair(UnavailablePred, NewVal)); } - + // Now we know that each predecessor of this block has a value in // AvailablePreds, sort them for efficient access as we're walking the preds. array_pod_sort(AvailablePreds.begin(), AvailablePreds.end()); - + // Create a PHI node at the start of the block for the PRE'd load value. PHINode *PN = PHINode::Create(LI->getType(), "", LoadBB->begin()); PN->takeName(LI); - + // Insert new entries into the PHI for each predecessor. A single block may // have multiple entries here. for (pred_iterator PI = pred_begin(LoadBB), E = pred_end(LoadBB); PI != E; ++PI) { BasicBlock *P = *PI; - AvailablePredsTy::iterator I = + AvailablePredsTy::iterator I = std::lower_bound(AvailablePreds.begin(), AvailablePreds.end(), std::make_pair(P, (Value*)0)); - + assert(I != AvailablePreds.end() && I->first == P && "Didn't find entry for predecessor!"); - + PN->addIncoming(I->second, I->first); } - + //cerr << "PRE: " << *LI << *PN << "\n"; - + LI->replaceAllUsesWith(PN); LI->eraseFromParent(); - + return true; } @@ -1029,7 +1029,7 @@ FindMostPopularDest(BasicBlock *BB, const SmallVectorImpl<std::pair<BasicBlock*, BasicBlock*> > &PredToDestList) { assert(!PredToDestList.empty()); - + // Determine popularity. If there are multiple possible destinations, we // explicitly choose to ignore 'undef' destinations. We prefer to thread // blocks with known and real destinations to threading undef. We'll handle @@ -1038,13 +1038,13 @@ FindMostPopularDest(BasicBlock *BB, for (unsigned i = 0, e = PredToDestList.size(); i != e; ++i) if (PredToDestList[i].second) DestPopularity[PredToDestList[i].second]++; - + // Find the most popular dest. DenseMap<BasicBlock*, unsigned>::iterator DPI = DestPopularity.begin(); BasicBlock *MostPopularDest = DPI->first; unsigned Popularity = DPI->second; SmallVector<BasicBlock*, 4> SamePopularity; - + for (++DPI; DPI != DestPopularity.end(); ++DPI) { // If the popularity of this entry isn't higher than the popularity we've // seen so far, ignore it. @@ -1058,9 +1058,9 @@ FindMostPopularDest(BasicBlock *BB, SamePopularity.clear(); MostPopularDest = DPI->first; Popularity = DPI->second; - } + } } - + // Okay, now we know the most popular destination. If there is more than // destination, we need to determine one. This is arbitrary, but we need // to make a deterministic decision. Pick the first one that appears in the @@ -1070,16 +1070,16 @@ FindMostPopularDest(BasicBlock *BB, TerminatorInst *TI = BB->getTerminator(); for (unsigned i = 0; ; ++i) { assert(i != TI->getNumSuccessors() && "Didn't find any successor!"); - + if (std::find(SamePopularity.begin(), SamePopularity.end(), TI->getSuccessor(i)) == SamePopularity.end()) continue; - + MostPopularDest = TI->getSuccessor(i); break; } } - + // Okay, we have finally picked the most popular destination. return MostPopularDest; } @@ -1089,11 +1089,11 @@ bool JumpThreading::ProcessThreadableEdges(Value *Cond, BasicBlock *BB) { // thread the edge. if (LoopHeaders.count(BB)) return false; - + SmallVector<std::pair<ConstantInt*, BasicBlock*>, 8> PredValues; if (!ComputeValueKnownInPredecessors(Cond, BB, PredValues)) return false; - + assert(!PredValues.empty() && "ComputeValueKnownInPredecessors returned true with no values"); @@ -1107,29 +1107,29 @@ bool JumpThreading::ProcessThreadableEdges(Value *Cond, BasicBlock *BB) { dbgs() << " for pred '" << PredValues[i].second->getName() << "'.\n"; }); - + // Decide what we want to thread through. Convert our list of known values to // a list of known destinations for each pred. This also discards duplicate // predecessors and keeps track of the undefined inputs (which are represented // as a null dest in the PredToDestList). SmallPtrSet<BasicBlock*, 16> SeenPreds; SmallVector<std::pair<BasicBlock*, BasicBlock*>, 16> PredToDestList; - + BasicBlock *OnlyDest = 0; BasicBlock *MultipleDestSentinel = (BasicBlock*)(intptr_t)~0ULL; - + for (unsigned i = 0, e = PredValues.size(); i != e; ++i) { BasicBlock *Pred = PredValues[i].second; if (!SeenPreds.insert(Pred)) continue; // Duplicate predecessor entry. - + // If the predecessor ends with an indirect goto, we can't change its // destination. if (isa<IndirectBrInst>(Pred->getTerminator())) continue; - + ConstantInt *Val = PredValues[i].first; - + BasicBlock *DestBB; if (Val == 0) // Undef. DestBB = 0; @@ -1145,30 +1145,30 @@ bool JumpThreading::ProcessThreadableEdges(Value *Cond, BasicBlock *BB) { OnlyDest = DestBB; else if (OnlyDest != DestBB) OnlyDest = MultipleDestSentinel; - + PredToDestList.push_back(std::make_pair(Pred, DestBB)); } - + // If all edges were unthreadable, we fail. if (PredToDestList.empty()) return false; - + // Determine which is the most common successor. If we have many inputs and // this block is a switch, we want to start by threading the batch that goes // to the most popular destination first. If we only know about one // threadable destination (the common case) we can avoid this. BasicBlock *MostPopularDest = OnlyDest; - + if (MostPopularDest == MultipleDestSentinel) MostPopularDest = FindMostPopularDest(BB, PredToDestList); - + // Now that we know what the most popular destination is, factor all // predecessors that will jump to it into a single predecessor. SmallVector<BasicBlock*, 16> PredsToFactor; for (unsigned i = 0, e = PredToDestList.size(); i != e; ++i) if (PredToDestList[i].second == MostPopularDest) { BasicBlock *Pred = PredToDestList[i].first; - + // This predecessor may be a switch or something else that has multiple // edges to the block. Factor each of these edges by listing them // according to # occurrences in PredsToFactor. @@ -1183,7 +1183,7 @@ bool JumpThreading::ProcessThreadableEdges(Value *Cond, BasicBlock *BB) { if (MostPopularDest == 0) MostPopularDest = BB->getTerminator()-> getSuccessor(GetBestDestForJumpOnUndef(BB)); - + // Ok, try to thread it! return ThreadEdge(BB, PredsToFactor, MostPo |