diff options
author | Nadav Rotem <nadav.rotem@intel.com> | 2012-07-24 10:51:42 +0000 |
---|---|---|
committer | Nadav Rotem <nadav.rotem@intel.com> | 2012-07-24 10:51:42 +0000 |
commit | a94d6e87c4c49f2e81b01d66d8bfb591277f8f96 (patch) | |
tree | 11026d8db318f33530de45b3abbb9606d0f7ae5e | |
parent | 8899d5c6fb3cf118c5c73eade290b6ebb2b3b850 (diff) |
Clean whitespaces.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@160668 91177308-0d34-0410-b5e6-96231b3b80d8
22 files changed, 500 insertions, 498 deletions
diff --git a/lib/Transforms/Scalar/ADCE.cpp b/lib/Transforms/Scalar/ADCE.cpp index ba214d1a33..b344952cc5 100644 --- a/lib/Transforms/Scalar/ADCE.cpp +++ b/lib/Transforms/Scalar/ADCE.cpp @@ -9,7 +9,7 @@ // // This file implements the Aggressive Dead Code Elimination pass. This pass // optimistically assumes that all instructions are dead until proven otherwise, -// allowing it to eliminate dead computations that other DCE passes do not +// allowing it to eliminate dead computations that other DCE passes do not // catch, particularly involving loop computations. // //===----------------------------------------------------------------------===// @@ -36,13 +36,13 @@ namespace { ADCE() : FunctionPass(ID) { initializeADCEPass(*PassRegistry::getPassRegistry()); } - + virtual bool runOnFunction(Function& F); - + virtual void getAnalysisUsage(AnalysisUsage& AU) const { AU.setPreservesCFG(); } - + }; } @@ -52,7 +52,7 @@ INITIALIZE_PASS(ADCE, "adce", "Aggressive Dead Code Elimination", false, false) bool ADCE::runOnFunction(Function& F) { SmallPtrSet<Instruction*, 128> alive; SmallVector<Instruction*, 128> worklist; - + // Collect the set of "root" instructions that are known live. for (inst_iterator I = inst_begin(F), E = inst_end(F); I != E; ++I) if (isa<TerminatorInst>(I.getInstructionIterator()) || @@ -62,7 +62,7 @@ bool ADCE::runOnFunction(Function& F) { alive.insert(I.getInstructionIterator()); worklist.push_back(I.getInstructionIterator()); } - + // Propagate liveness backwards to operands. while (!worklist.empty()) { Instruction* curr = worklist.pop_back_val(); @@ -72,7 +72,7 @@ bool ADCE::runOnFunction(Function& F) { if (alive.insert(Inst)) worklist.push_back(Inst); } - + // The inverse of the live set is the dead set. These are those instructions // which have no side effects and do not influence the control flow or return // value of the function, and may therefore be deleted safely. @@ -82,7 +82,7 @@ bool ADCE::runOnFunction(Function& F) { worklist.push_back(I.getInstructionIterator()); I->dropAllReferences(); } - + for (SmallVector<Instruction*, 1024>::iterator I = worklist.begin(), E = worklist.end(); I != E; ++I) { ++NumRemoved; diff --git a/lib/Transforms/Scalar/CodeGenPrepare.cpp b/lib/Transforms/Scalar/CodeGenPrepare.cpp index cbc089ab78..277c4d58f9 100644 --- a/lib/Transforms/Scalar/CodeGenPrepare.cpp +++ b/lib/Transforms/Scalar/CodeGenPrepare.cpp @@ -83,7 +83,7 @@ namespace { const TargetLibraryInfo *TLInfo; DominatorTree *DT; ProfileInfo *PFI; - + /// CurInstIterator - As we scan instructions optimizing them, this is the /// next instruction to optimize. Xforms that can invalidate this should /// update it. @@ -157,7 +157,7 @@ bool CodeGenPrepare::runOnFunction(Function &F) { EverMadeChange |= EliminateMostlyEmptyBlocks(F); // llvm.dbg.value is far away from the value then iSel may not be able - // handle it properly. iSel will drop llvm.dbg.value if it can not + // handle it properly. iSel will drop llvm.dbg.value if it can not // find a node corresponding to the value. EverMadeChange |= PlaceDbgValues(F); @@ -336,7 +336,7 @@ void CodeGenPrepare::EliminateMostlyEmptyBlock(BasicBlock *BB) { if (isEntry && BB != &BB->getParent()->getEntryBlock()) BB->moveBefore(&BB->getParent()->getEntryBlock()); - + DEBUG(dbgs() << "AFTER:\n" << *DestBB << "\n\n\n"); return; } @@ -547,7 +547,7 @@ protected: bool CodeGenPrepare::OptimizeCallInst(CallInst *CI) { BasicBlock *BB = CI->getParent(); - + // Lower inline assembly if we can. // If we found an inline asm expession, and if the target knows how to // lower it to normal LLVM code, do so now. @@ -564,19 +564,19 @@ bool CodeGenPrepare::OptimizeCallInst(CallInst *CI) { if (OptimizeInlineAsmInst(CI)) return true; } - + // Lower all uses of llvm.objectsize.* IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI); if (II && II->getIntrinsicID() == Intrinsic::objectsize) { bool Min = (cast<ConstantInt>(II->getArgOperand(1))->getZExtValue() == 1); Type *ReturnTy = CI->getType(); - Constant *RetVal = ConstantInt::get(ReturnTy, Min ? 0 : -1ULL); - + Constant *RetVal = ConstantInt::get(ReturnTy, Min ? 0 : -1ULL); + // Substituting this can cause recursive simplifications, which can // invalidate our iterator. Use a WeakVH to hold onto it in case this // happens. WeakVH IterHandle(CurInstIterator); - + replaceAndRecursivelySimplify(CI, RetVal, TLI ? TLI->getTargetData() : 0, TLInfo, ModifiedDT ? 0 : DT); @@ -604,7 +604,7 @@ bool CodeGenPrepare::OptimizeCallInst(CallInst *CI) { // We'll need TargetData from here on out. const TargetData *TD = TLI ? TLI->getTargetData() : 0; if (!TD) return false; - + // Lower all default uses of _chk calls. This is very similar // to what InstCombineCalls does, but here we are only lowering calls // that have the default "don't know" as the objectsize. Anything else @@ -760,13 +760,13 @@ static bool IsNonLocalValue(Value *V, BasicBlock *BB) { bool CodeGenPrepare::OptimizeMemoryInst(Instruction *MemoryInst, Value *Addr, Type *AccessTy) { Value *Repl = Addr; - - // Try to collapse single-value PHI nodes. This is necessary to undo + + // Try to collapse single-value PHI nodes. This is necessary to undo // unprofitable PRE transformations. SmallVector<Value*, 8> worklist; SmallPtrSet<Value*, 16> Visited; worklist.push_back(Addr); - + // Use a worklist to iteratively look through PHI nodes, and ensure that // the addressing mode obtained from the non-PHI roots of the graph // are equivalent. @@ -778,20 +778,20 @@ bool CodeGenPrepare::OptimizeMemoryInst(Instruction *MemoryInst, Value *Addr, while (!worklist.empty()) { Value *V = worklist.back(); worklist.pop_back(); - + // Break use-def graph loops. if (!Visited.insert(V)) { Consensus = 0; break; } - + // For a PHI node, push all of its incoming values. if (PHINode *P = dyn_cast<PHINode>(V)) { for (unsigned i = 0, e = P->getNumIncomingValues(); i != e; ++i) worklist.push_back(P->getIncomingValue(i)); continue; } - + // For non-PHIs, determine the addressing mode being computed. SmallVector<Instruction*, 16> NewAddrModeInsts; ExtAddrMode NewAddrMode = @@ -826,15 +826,15 @@ bool CodeGenPrepare::OptimizeMemoryInst(Instruction *MemoryInst, Value *Addr, } continue; } - + Consensus = 0; break; } - + // If the addressing mode couldn't be determined, or if multiple different // ones were determined, bail out now. if (!Consensus) return false; - + // Check to see if any of the instructions supersumed by this addr mode are // non-local to I's BB. bool AnyNonLocal = false; @@ -943,7 +943,7 @@ bool CodeGenPrepare::OptimizeMemoryInst(Instruction *MemoryInst, Value *Addr, // Use a WeakVH to hold onto it in case this happens. WeakVH IterHandle(CurInstIterator); BasicBlock *BB = CurInstIterator->getParent(); - + RecursivelyDeleteTriviallyDeadInstructions(Repl); if (IterHandle != CurInstIterator) { @@ -955,7 +955,7 @@ bool CodeGenPrepare::OptimizeMemoryInst(Instruction *MemoryInst, Value *Addr, // This address is now available for reassignment, so erase the table // entry; we don't want to match some completely different instruction. SunkAddrs[Addr] = 0; - } + } } ++NumMemoryInsts; return true; @@ -967,12 +967,12 @@ bool CodeGenPrepare::OptimizeMemoryInst(Instruction *MemoryInst, Value *Addr, bool CodeGenPrepare::OptimizeInlineAsmInst(CallInst *CS) { bool MadeChange = false; - TargetLowering::AsmOperandInfoVector + TargetLowering::AsmOperandInfoVector TargetConstraints = TLI->ParseConstraints(CS); unsigned ArgNo = 0; for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) { TargetLowering::AsmOperandInfo &OpInfo = TargetConstraints[i]; - + // Compute the constraint code and ConstraintType to use. TLI->ComputeConstraintToUse(OpInfo, SDValue()); @@ -1187,7 +1187,7 @@ bool CodeGenPrepare::OptimizeInst(Instruction *I) { } return false; } - + if (CastInst *CI = dyn_cast<CastInst>(I)) { // If the source of the cast is a constant, then this should have // already been constant folded. The only reason NOT to constant fold @@ -1207,23 +1207,23 @@ bool CodeGenPrepare::OptimizeInst(Instruction *I) { } return false; } - + if (CmpInst *CI = dyn_cast<CmpInst>(I)) return OptimizeCmpExpression(CI); - + if (LoadInst *LI = dyn_cast<LoadInst>(I)) { if (TLI) return OptimizeMemoryInst(I, I->getOperand(0), LI->getType()); return false; } - + if (StoreInst *SI = dyn_cast<StoreInst>(I)) { if (TLI) return OptimizeMemoryInst(I, SI->getOperand(1), SI->getOperand(0)->getType()); return false; } - + if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I)) { if (GEPI->hasAllZeroIndices()) { /// The GEP operand must be a pointer, so must its result -> BitCast @@ -1237,7 +1237,7 @@ bool CodeGenPrepare::OptimizeInst(Instruction *I) { } return false; } - + if (CallInst *CI = dyn_cast<CallInst>(I)) return OptimizeCallInst(CI); @@ -1265,7 +1265,7 @@ bool CodeGenPrepare::OptimizeBlock(BasicBlock &BB) { } // llvm.dbg.value is far away from the value then iSel may not be able -// handle it properly. iSel will drop llvm.dbg.value if it can not +// handle it properly. iSel will drop llvm.dbg.value if it can not // find a node corresponding to the value. bool CodeGenPrepare::PlaceDbgValues(Function &F) { bool MadeChange = false; diff --git a/lib/Transforms/Scalar/DeadStoreElimination.cpp b/lib/Transforms/Scalar/DeadStoreElimination.cpp index c8448fa6c1..5eff0e5a36 100644 --- a/lib/Transforms/Scalar/DeadStoreElimination.cpp +++ b/lib/Transforms/Scalar/DeadStoreElimination.cpp @@ -248,7 +248,7 @@ static bool isShortenable(Instruction *I) { // Don't shorten stores for now if (isa<StoreInst>(I)) return false; - + IntrinsicInst *II = cast<IntrinsicInst>(I); switch (II->getIntrinsicID()) { default: return false; @@ -292,7 +292,7 @@ namespace { /// isOverwrite - Return 'OverwriteComplete' if a store to the 'Later' location /// completely overwrites a store to the 'Earlier' location. -/// 'OverwriteEnd' if the end of the 'Earlier' location is completely +/// 'OverwriteEnd' if the end of the 'Earlier' location is completely /// overwritten by 'Later', or 'OverwriteUnknown' if nothing can be determined static OverwriteResult isOverwrite(const AliasAnalysis::Location &Later, const AliasAnalysis::Location &Earlier, @@ -315,7 +315,7 @@ static OverwriteResult isOverwrite(const AliasAnalysis::Location &Later, if (AA.getTargetData() == 0 && Later.Ptr->getType() == Earlier.Ptr->getType()) return OverwriteComplete; - + return OverwriteUnknown; } @@ -381,7 +381,7 @@ static OverwriteResult isOverwrite(const AliasAnalysis::Location &Later, Later.Size > Earlier.Size && uint64_t(EarlierOff - LaterOff) + Earlier.Size <= Later.Size) return OverwriteComplete; - + // The other interesting case is if the later store overwrites the end of // the earlier store // @@ -520,11 +520,11 @@ bool DSE::runOnBasicBlock(BasicBlock &BB) { // If we find a write that is a) removable (i.e., non-volatile), b) is // completely obliterated by the store to 'Loc', and c) which we know that // 'Inst' doesn't load from, then we can remove it. - if (isRemovable(DepWrite) && + if (isRemovable(DepWrite) && !isPossibleSelfRead(Inst, Loc, DepWrite, *AA)) { - int64_t InstWriteOffset, DepWriteOffset; - OverwriteResult OR = isOverwrite(Loc, DepLoc, *AA, - DepWriteOffset, InstWriteOffset); + int64_t InstWriteOffset, DepWriteOffset; + OverwriteResult OR = isOverwrite(Loc, DepLoc, *AA, + DepWriteOffset, InstWriteOffset); if (OR == OverwriteComplete) { DEBUG(dbgs() << "DSE: Remove Dead Store:\n DEAD: " << *DepWrite << "\n KILLER: " << *Inst << '\n'); @@ -533,7 +533,7 @@ bool DSE::runOnBasicBlock(BasicBlock &BB) { DeleteDeadInstruction(DepWrite, *MD); ++NumFastStores; MadeChange = true; - + // DeleteDeadInstruction can delete the current instruction in loop // cases, reset BBI. BBI = Inst; @@ -551,16 +551,16 @@ bool DSE::runOnBasicBlock(BasicBlock &BB) { unsigned DepWriteAlign = DepIntrinsic->getAlignment(); if (llvm::isPowerOf2_64(InstWriteOffset) || ((DepWriteAlign != 0) && InstWriteOffset % DepWriteAlign == 0)) { - + DEBUG(dbgs() << "DSE: Remove Dead Store:\n OW END: " - << *DepWrite << "\n KILLER (offset " - << InstWriteOffset << ", " + << *DepWrite << "\n KILLER (offset " + << InstWriteOffset << ", " << DepLoc.Size << ")" << *Inst << '\n'); - + Value* DepWriteLength = DepIntrinsic->getLength(); Value* TrimmedLength = ConstantInt::get(DepWriteLength->getType(), - InstWriteOffset - + InstWriteOffset - DepWriteOffset); DepIntrinsic->setLength(TrimmedLength); MadeChange = true; diff --git a/lib/Transforms/Scalar/EarlyCSE.cpp b/lib/Transforms/Scalar/EarlyCSE.cpp index f3c92d64c2..975954953b 100644 --- a/lib/Transforms/Scalar/EarlyCSE.cpp +++ b/lib/Transforms/Scalar/EarlyCSE.cpp @@ -39,7 +39,7 @@ static unsigned getHash(const void *V) { } //===----------------------------------------------------------------------===// -// SimpleValue +// SimpleValue //===----------------------------------------------------------------------===// namespace { @@ -47,16 +47,16 @@ namespace { /// scoped hash table. struct SimpleValue { Instruction *Inst; - + SimpleValue(Instruction *I) : Inst(I) { assert((isSentinel() || canHandle(I)) && "Inst can't be handled!"); } - + bool isSentinel() const { return Inst == DenseMapInfo<Instruction*>::getEmptyKey() || Inst == DenseMapInfo<Instruction*>::getTombstoneKey(); } - + static bool canHandle(Instruction *Inst) { // This can only handle non-void readnone functions. if (CallInst *CI = dyn_cast<CallInst>(Inst)) @@ -90,7 +90,7 @@ template<> struct DenseMapInfo<SimpleValue> { unsigned DenseMapInfo<SimpleValue>::getHashValue(SimpleValue Val) { Instruction *Inst = Val.Inst; - + // Hash in all of the operands as pointers. unsigned Res = 0; for (unsigned i = 0, e = Inst->getNumOperands(); i != e; ++i) @@ -126,13 +126,13 @@ bool DenseMapInfo<SimpleValue>::isEqual(SimpleValue LHS, SimpleValue RHS) { if (LHS.isSentinel() || RHS.isSentinel()) return LHSI == RHSI; - + if (LHSI->getOpcode() != RHSI->getOpcode()) return false; return LHSI->isIdenticalTo(RHSI); } //===----------------------------------------------------------------------===// -// CallValue +// CallValue //===----------------------------------------------------------------------===// namespace { @@ -140,21 +140,21 @@ namespace { /// the scoped hash table. struct CallValue { Instruction *Inst; - + CallValue(Instruction *I) : Inst(I) { assert((isSentinel() || canHandle(I)) && "Inst can't be handled!"); } - + bool isSentinel() const { return Inst == DenseMapInfo<Instruction*>::getEmptyKey() || Inst == DenseMapInfo<Instruction*>::getTombstoneKey(); } - + static bool canHandle(Instruction *Inst) { // Don't value number anything that returns void. if (Inst->getType()->isVoidTy()) return false; - + CallInst *CI = dyn_cast<CallInst>(Inst); if (CI == 0 || !CI->onlyReadsMemory()) return false; @@ -168,7 +168,7 @@ namespace llvm { template<> struct isPodLike<CallValue> { static const bool value = true; }; - + template<> struct DenseMapInfo<CallValue> { static inline CallValue getEmptyKey() { return DenseMapInfo<Instruction*>::getEmptyKey(); @@ -189,7 +189,7 @@ unsigned DenseMapInfo<CallValue>::getHashValue(CallValue Val) { "Cannot value number calls with metadata operands"); Res ^= getHash(Inst->getOperand(i)) << (i & 0xF); } - + // Mix in the opcode. return (Res << 1) ^ Inst->getOpcode(); } @@ -203,11 +203,11 @@ bool DenseMapInfo<CallValue>::isEqual(CallValue LHS, CallValue RHS) { //===----------------------------------------------------------------------===// -// EarlyCSE pass. +// EarlyCSE pass. //===----------------------------------------------------------------------===// namespace { - + /// EarlyCSE - This pass does a simple depth-first walk over the dominator /// tree, eliminating trivially redundant instructions and using instsimplify /// to canonicalize things as it goes. It is intended to be fast and catch @@ -223,14 +223,14 @@ public: ScopedHashTableVal<SimpleValue, Value*> > AllocatorTy; typedef ScopedHashTable<SimpleValue, Value*, DenseMapInfo<SimpleValue>, AllocatorTy> ScopedHTType; - + /// AvailableValues - This scoped hash table contains the current values of /// all of our simple scalar expressions. As we walk down the domtree, we /// look to see if instructions are in this: if so, we replace them with what /// we find, otherwise we insert them so that dominated values can succeed in /// their lookup. ScopedHTType *AvailableValues; - + /// AvailableLoads - This scoped hash table contains the current values /// of loads. This allows us to get efficient access to dominating loads when /// we have a fully redundant load. In addition to the most recent load, we @@ -243,15 +243,15 @@ public: typedef ScopedHashTable<Value*, std::pair<Value*, unsigned>, DenseMapInfo<Value*>, LoadMapAllocator> LoadHTType; LoadHTType *AvailableLoads; - + /// AvailableCalls - This scoped hash table contains the current values /// of read-only call values. It uses the same generation count as loads. typedef ScopedHashTable<CallValue, std::pair<Value*, unsigned> > CallHTType; CallHTType *AvailableCalls; - + /// CurrentGeneration - This is the current generation of the memory value. unsigned CurrentGeneration; - + static char ID; explicit EarlyCSE() : FunctionPass(ID) { initializeEarlyCSEPass(*PassRegistry::getPassRegistry()); @@ -326,7 +326,7 @@ private: }; bool processNode(DomTreeNode *Node); - + // This transformation requires dominator postdominator info virtual void getAnalysisUsage(AnalysisUsage &AU) const { AU.addRequired<DominatorTree>(); @@ -350,7 +350,7 @@ INITIALIZE_PASS_END(EarlyCSE, "early-cse", "Early CSE", false, false) bool EarlyCSE::processNode(DomTreeNode *Node) { BasicBlock *BB = Node->getBlock(); - + // If this block has a single predecessor, then the predecessor is the parent // of the domtree node and all of the live out memory values are still current // in this block. If this block has multiple predecessors, then they could @@ -359,20 +359,20 @@ bool EarlyCSE::processNode(DomTreeNode *Node) { // predecessors. if (BB->getSinglePredecessor() == 0) ++CurrentGeneration; - + /// LastStore - Keep track of the last non-volatile store that we saw... for /// as long as there in no instruction that reads memory. If we see a store /// to the same location, we delete the dead store. This zaps trivial dead /// stores which can occur in bitfield code among other things. StoreInst *LastStore = 0; - + bool Changed = false; // See if any instructions in the block can be eliminated. If so, do it. If // not, add them to AvailableValues. for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ) { Instruction *Inst = I++; - + // Dead instructions should just be removed. if (isInstructionTriviallyDead(Inst)) { DEBUG(dbgs() << "EarlyCSE DCE: " << *Inst << '\n'); @@ -381,7 +381,7 @@ bool EarlyCSE::processNode(DomTreeNode *Node) { ++NumSimplify; continue; } - + // If the instruction can be simplified (e.g. X+0 = X) then replace it with // its simpler value. if (Value *V = SimplifyInstruction(Inst, TD, TLI, DT)) { @@ -392,7 +392,7 @@ bool EarlyCSE::processNode(DomTreeNode *Node) { ++NumSimplify; continue; } - + // If this is a simple instruction that we can value number, process it. if (SimpleValue::canHandle(Inst)) { // See if the instruction has an available value. If so, use it. @@ -404,12 +404,12 @@ bool EarlyCSE::processNode(DomTreeNode *Node) { ++NumCSE; continue; } - + // Otherwise, just remember that this value is available. AvailableValues->insert(Inst, Inst); continue; } - + // If this is a non-volatile load, process it. if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) { // Ignore volatile loads. @@ -417,7 +417,7 @@ bool EarlyCSE::processNode(DomTreeNode *Node) { LastStore = 0; continue; } - + // If we have an available version of this load, and if it is the right // generation, replace this instruction. std::pair<Value*, unsigned> InVal = @@ -431,18 +431,18 @@ bool EarlyCSE::processNode(DomTreeNode *Node) { ++NumCSELoad; continue; } - + // Otherwise, remember that we have this instruction. AvailableLoads->insert(Inst->getOperand(0), std::pair<Value*, unsigned>(Inst, CurrentGeneration)); LastStore = 0; continue; } - + // If this instruction may read from memory, forget LastStore. if (Inst->mayReadFromMemory()) LastStore = 0; - + // If this is a read-only call, process it. if (CallValue::canHandle(Inst)) { // If we have an available version of this call, and if it is the right @@ -457,19 +457,19 @@ bool EarlyCSE::processNode(DomTreeNode *Node) { ++NumCSECall; continue; } - + // Otherwise, remember that we have this instruction. AvailableCalls->insert(Inst, std::pair<Value*, unsigned>(Inst, CurrentGeneration)); continue; } - + // Okay, this isn't something we can CSE at all. Check to see if it is // something that could modify memory. If so, our available memory values // cannot be used so bump the generation count. if (Inst->mayWriteToMemory()) { ++CurrentGeneration; - + if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) { // We do a trivial form of DSE if there are two stores to the same // location with no intervening loads. Delete the earlier store. @@ -483,7 +483,7 @@ bool EarlyCSE::processNode(DomTreeNode *Node) { LastStore = 0; continue; } - + // Okay, we just invalidated anything we knew about loaded values. Try // to salvage *something* by remembering that the stored value is a live // version of the pointer. It is safe to forward from volatile stores @@ -491,7 +491,7 @@ bool EarlyCSE::processNode(DomTreeNode *Node) { // the store. AvailableLoads->insert(SI->getPointerOperand(), std::pair<Value*, unsigned>(SI->getValueOperand(), CurrentGeneration)); - + // Remember that this was the last store we saw for DSE. if (SI->isSimple()) LastStore = SI; @@ -509,7 +509,7 @@ bool EarlyCSE::runOnFunction(Function &F) { TD = getAnalysisIfAvailable<TargetData>(); TLI = &getAnalysis<TargetLibraryInfo>(); DT = &getAnalysis<DominatorTree>(); - + // Tables that the pass uses when walking the domtree. ScopedHTType AVTable; AvailableValues = &AVTable; @@ -517,7 +517,7 @@ bool EarlyCSE::runOnFunction(Function &F) { AvailableLoads = &LoadTable; CallHTType CallTable; AvailableCalls = &CallTable; - + CurrentGeneration = 0; bool Changed = false; diff --git a/lib/Transforms/Scalar/GVN.cpp b/lib/Transforms/Scalar/GVN.cpp index 476ec383e6..140864d5f8 100644 --- a/lib/Transforms/Scalar/GVN.cpp +++ b/lib/Transforms/Scalar/GVN.cpp @@ -173,7 +173,7 @@ Expression ValueTable::create_expression(Instruction *I) { if (e.varargs[0] > e.varargs[1]) std::swap(e.varargs[0], e.varargs[1]); } - + if (CmpInst *C = dyn_cast<CmpInst>(I)) { // Sort the operand value numbers so x<y and y>x get the same value number. CmpInst::Predicate Predicate = C->getPredicate(); @@ -187,7 +187,7 @@ Expression ValueTable::create_expression(Instruction *I) { II != IE; ++II) e.varargs.push_back(*II); } - + return e; } @@ -391,7 +391,7 @@ uint32_t ValueTable::lookup_or_add(Value *V) { valueNumbering[V] = nextValueNumber; return nextValueNumber++; } - + Instruction* I = cast<Instruction>(V); Expression exp; switch (I->getOpcode()) { @@ -507,7 +507,7 @@ namespace { const TargetLibraryInfo *TLI; ValueTable VN; - + /// LeaderTable - A mapping from value numbers to lists of Value*'s that /// have that value number. Use findLeader to query it. struct LeaderTableEntry { @@ -517,7 +517,7 @@ namespace { }; DenseMap<uint32_t, LeaderTableEntry> LeaderTable; BumpPtrAllocator TableAllocator; - + SmallVector<Instruction*, 8> InstrsToErase; public: static char ID; // Pass identification, replacement for typeid @@ -527,14 +527,14 @@ namespace { } bool runOnFunction(Function &F); - + /// markInstructionForDeletion - This removes the specified instruction from /// our various maps and marks it for deletion. void markInstructionForDeletion(Instruction *I) { VN.erase(I); InstrsToErase.push_back(I); } - + const TargetData *getTargetData() const { return TD; } DominatorTree &getDominatorTree() const { return *DT; } AliasAnalysis *getAliasAnalysis() const { return VN.getAliasAnalysis(); } @@ -549,14 +549,14 @@ namespace { Curr.BB = BB; return; } - + LeaderTableEntry *Node = TableAllocator.Allocate<LeaderTableEntry>(); Node->Val = V; Node->BB = BB; Node->Next = Curr.Next; Curr.Next = Node; } - + /// removeFromLeaderTable - Scan the list of values corresponding to a given /// value number, and remove the given instruction if encountered. void removeFromLeaderTable(uint32_t N, Instruction *I, BasicBlock *BB) { @@ -567,7 +567,7 @@ namespace { Prev = Curr; Curr = Curr->Next; } - + if (Prev) { Prev->Next = Curr->Next; } else { @@ -597,7 +597,7 @@ namespace { AU.addPreserved<DominatorTree>(); AU.addPreserved<AliasAnalysis>(); } - + // Helper fuctions // FIXME: eliminate or document these better @@ -735,15 +735,15 @@ static bool CanCoerceMustAliasedValueToLoad(Value *StoredVal, StoredVal->getType()->isStructTy() || StoredVal->getType()->isArrayTy()) return false; - + // The store has to be at least as big as the load. if (TD.getTypeSizeInBits(StoredVal->getType()) < TD.getTypeSizeInBits(LoadTy)) return false; - + return true; } - + /// CoerceAvailableValueToLoadType - If we saw a store of a value to memory, and /// then a load from a must-aliased pointer of a different type, try to coerce @@ -751,80 +751,80 @@ static bool CanCoerceMustAliasedValueToLoad(Value *StoredVal, /// InsertPt is the place to insert new instructions. /// /// If we can't do it, return null. |