diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Analysis/IVUsers.cpp | 28 | ||||
-rw-r--r-- | lib/Analysis/LoopVR.cpp | 14 | ||||
-rw-r--r-- | lib/Analysis/ScalarEvolution.cpp | 412 | ||||
-rw-r--r-- | lib/Analysis/ScalarEvolutionExpander.cpp | 58 | ||||
-rw-r--r-- | lib/Transforms/Scalar/IndVarSimplify.cpp | 24 | ||||
-rw-r--r-- | lib/Transforms/Scalar/LoopDeletion.cpp | 2 | ||||
-rw-r--r-- | lib/Transforms/Scalar/LoopStrengthReduce.cpp | 206 |
7 files changed, 372 insertions, 372 deletions
diff --git a/lib/Analysis/IVUsers.cpp b/lib/Analysis/IVUsers.cpp index caeb14bef3..317c869164 100644 --- a/lib/Analysis/IVUsers.cpp +++ b/lib/Analysis/IVUsers.cpp @@ -39,7 +39,7 @@ Pass *llvm::createIVUsersPass() { /// containsAddRecFromDifferentLoop - Determine whether expression S involves a /// subexpression that is an AddRec from a loop other than L. An outer loop /// of L is OK, but not an inner loop nor a disjoint loop. -static bool containsAddRecFromDifferentLoop(const SCEV* S, Loop *L) { +static bool containsAddRecFromDifferentLoop(const SCEV *S, Loop *L) { // This is very common, put it first. if (isa<SCEVConstant>(S)) return false; @@ -80,10 +80,10 @@ static bool containsAddRecFromDifferentLoop(const SCEV* S, Loop *L) { /// a mix of loop invariant and loop variant expressions. The start cannot, /// however, contain an AddRec from a different loop, unless that loop is an /// outer loop of the current loop. -static bool getSCEVStartAndStride(const SCEV* &SH, Loop *L, Loop *UseLoop, - const SCEV* &Start, const SCEV* &Stride, +static bool getSCEVStartAndStride(const SCEV *&SH, Loop *L, Loop *UseLoop, + const SCEV *&Start, const SCEV *&Stride, ScalarEvolution *SE, DominatorTree *DT) { - const SCEV* TheAddRec = Start; // Initialize to zero. + const SCEV *TheAddRec = Start; // Initialize to zero. // If the outer level is an AddExpr, the operands are all start values except // for a nested AddRecExpr. @@ -109,9 +109,9 @@ static bool getSCEVStartAndStride(const SCEV* &SH, Loop *L, Loop *UseLoop, // Use getSCEVAtScope to attempt to simplify other loops out of // the picture. - const SCEV* AddRecStart = AddRec->getStart(); + const SCEV *AddRecStart = AddRec->getStart(); AddRecStart = SE->getSCEVAtScope(AddRecStart, UseLoop); - const SCEV* AddRecStride = AddRec->getStepRecurrence(*SE); + const SCEV *AddRecStride = AddRec->getStepRecurrence(*SE); // FIXME: If Start contains an SCEVAddRecExpr from a different loop, other // than an outer loop of the current loop, reject it. LSR has no concept of @@ -196,13 +196,13 @@ bool IVUsers::AddUsersIfInteresting(Instruction *I) { return true; // Instruction already handled. // Get the symbolic expression for this instruction. - const SCEV* ISE = SE->getSCEV(I); + const SCEV *ISE = SE->getSCEV(I); if (isa<SCEVCouldNotCompute>(ISE)) return false; // Get the start and stride for this expression. Loop *UseLoop = LI->getLoopFor(I->getParent()); - const SCEV* Start = SE->getIntegerSCEV(0, ISE->getType()); - const SCEV* Stride = Start; + const SCEV *Start = SE->getIntegerSCEV(0, ISE->getType()); + const SCEV *Stride = Start; if (!getSCEVStartAndStride(ISE, L, UseLoop, Start, Stride, SE, DT)) return false; // Non-reducible symbolic expression, bail out. @@ -254,7 +254,7 @@ bool IVUsers::AddUsersIfInteresting(Instruction *I) { if (IVUseShouldUsePostIncValue(User, I, L, LI, DT, this)) { // The value used will be incremented by the stride more than we are // expecting, so subtract this off. - const SCEV* NewStart = SE->getMinusSCEV(Start, Stride); + const SCEV *NewStart = SE->getMinusSCEV(Start, Stride); StrideUses->addUser(NewStart, User, I); StrideUses->Users.back().setIsUseOfPostIncrementedValue(true); DOUT << " USING POSTINC SCEV, START=" << *NewStart<< "\n"; @@ -295,9 +295,9 @@ bool IVUsers::runOnLoop(Loop *l, LPPassManager &LPM) { /// getReplacementExpr - Return a SCEV expression which computes the /// value of the OperandValToReplace of the given IVStrideUse. -const SCEV* IVUsers::getReplacementExpr(const IVStrideUse &U) const { +const SCEV *IVUsers::getReplacementExpr(const IVStrideUse &U) const { // Start with zero. - const SCEV* RetVal = SE->getIntegerSCEV(0, U.getParent()->Stride->getType()); + const SCEV *RetVal = SE->getIntegerSCEV(0, U.getParent()->Stride->getType()); // Create the basic add recurrence. RetVal = SE->getAddRecExpr(RetVal, U.getParent()->Stride, L); // Add the offset in a separate step, because it may be loop-variant. @@ -308,7 +308,7 @@ const SCEV* IVUsers::getReplacementExpr(const IVStrideUse &U) const { RetVal = SE->getAddExpr(RetVal, U.getParent()->Stride); // Evaluate the expression out of the loop, if possible. if (!L->contains(U.getUser()->getParent())) { - const SCEV* ExitVal = SE->getSCEVAtScope(RetVal, L->getParentLoop()); + const SCEV *ExitVal = SE->getSCEVAtScope(RetVal, L->getParentLoop()); if (ExitVal->isLoopInvariant(L)) RetVal = ExitVal; } @@ -325,7 +325,7 @@ void IVUsers::print(raw_ostream &OS, const Module *M) const { OS << ":\n"; for (unsigned Stride = 0, e = StrideOrder.size(); Stride != e; ++Stride) { - std::map<const SCEV*, IVUsersOfOneStride*>::const_iterator SI = + std::map<const SCEV *, IVUsersOfOneStride*>::const_iterator SI = IVUsesByStride.find(StrideOrder[Stride]); assert(SI != IVUsesByStride.end() && "Stride doesn't exist!"); OS << " Stride " << *SI->first->getType() << " " << *SI->first << ":\n"; diff --git a/lib/Analysis/LoopVR.cpp b/lib/Analysis/LoopVR.cpp index 3800ef5358..1c78ef9a52 100644 --- a/lib/Analysis/LoopVR.cpp +++ b/lib/Analysis/LoopVR.cpp @@ -27,8 +27,8 @@ char LoopVR::ID = 0; static RegisterPass<LoopVR> X("loopvr", "Loop Value Ranges", false, true); /// getRange - determine the range for a particular SCEV within a given Loop -ConstantRange LoopVR::getRange(const SCEV* S, Loop *L, ScalarEvolution &SE) { - const SCEV* T = SE.getBackedgeTakenCount(L); +ConstantRange LoopVR::getRange(const SCEV *S, Loop *L, ScalarEvolution &SE) { + const SCEV *T = SE.getBackedgeTakenCount(L); if (isa<SCEVCouldNotCompute>(T)) return ConstantRange(cast<IntegerType>(S->getType())->getBitWidth(), true); @@ -37,7 +37,7 @@ ConstantRange LoopVR::getRange(const SCEV* S, Loop *L, ScalarEvolution &SE) { } /// getRange - determine the range for a particular SCEV with a given trip count -ConstantRange LoopVR::getRange(const SCEV* S, const SCEV* T, ScalarEvolution &SE){ +ConstantRange LoopVR::getRange(const SCEV *S, const SCEV *T, ScalarEvolution &SE){ if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) return ConstantRange(C->getValue()->getValue()); @@ -183,8 +183,8 @@ ConstantRange LoopVR::getRange(const SCEV* S, const SCEV* T, ScalarEvolution &SE if (!Trip) return FullSet; if (AddRec->isAffine()) { - const SCEV* StartHandle = AddRec->getStart(); - const SCEV* StepHandle = AddRec->getOperand(1); + const SCEV *StartHandle = AddRec->getStart(); + const SCEV *StepHandle = AddRec->getOperand(1); const SCEVConstant *Step = dyn_cast<SCEVConstant>(StepHandle); if (!Step) return FullSet; @@ -195,7 +195,7 @@ ConstantRange LoopVR::getRange(const SCEV* S, const SCEV* T, ScalarEvolution &SE if ((TripExt * StepExt).ugt(APInt::getLowBitsSet(ExWidth, ExWidth >> 1))) return FullSet; - const SCEV* EndHandle = SE.getAddExpr(StartHandle, + const SCEV *EndHandle = SE.getAddExpr(StartHandle, SE.getMulExpr(T, StepHandle)); const SCEVConstant *Start = dyn_cast<SCEVConstant>(StartHandle); const SCEVConstant *End = dyn_cast<SCEVConstant>(EndHandle); @@ -255,7 +255,7 @@ ConstantRange LoopVR::compute(Value *V) { ScalarEvolution &SE = getAnalysis<ScalarEvolution>(); - const SCEV* S = SE.getSCEV(I); + const SCEV *S = SE.getSCEV(I); if (isa<SCEVUnknown>(S) || isa<SCEVCouldNotCompute>(S)) return ConstantRange(cast<IntegerType>(V->getType())->getBitWidth(), false); diff --git a/lib/Analysis/ScalarEvolution.cpp b/lib/Analysis/ScalarEvolution.cpp index 577315879b..a411f2dab0 100644 --- a/lib/Analysis/ScalarEvolution.cpp +++ b/lib/Analysis/ScalarEvolution.cpp @@ -14,7 +14,7 @@ // There are several aspects to this library. First is the representation of // scalar expressions, which are represented as subclasses of the SCEV class. // These classes are used to represent certain types of subexpressions that we -// can handle. These classes are reference counted, managed by the const SCEV* +// can handle. These classes are reference counted, managed by the const SCEV * // class. We only create one SCEV of a particular shape, so pointer-comparisons // for equality are legal. // @@ -180,7 +180,7 @@ bool SCEVCouldNotCompute::classof(const SCEV *S) { return S->getSCEVType() == scCouldNotCompute; } -const SCEV* ScalarEvolution::getConstant(ConstantInt *V) { +const SCEV *ScalarEvolution::getConstant(ConstantInt *V) { FoldingSetNodeID ID; ID.AddInteger(scConstant); ID.AddPointer(V); @@ -192,11 +192,11 @@ const SCEV* ScalarEvolution::getConstant(ConstantInt *V) { return S; } -const SCEV* ScalarEvolution::getConstant(const APInt& Val) { +const SCEV *ScalarEvolution::getConstant(const APInt& Val) { return getConstant(ConstantInt::get(Val)); } -const SCEV* +const SCEV * ScalarEvolution::getConstant(const Type *Ty, uint64_t V, bool isSigned) { return getConstant(ConstantInt::get(cast<IntegerType>(Ty), V, isSigned)); } @@ -213,7 +213,7 @@ void SCEVConstant::print(raw_ostream &OS) const { } SCEVCastExpr::SCEVCastExpr(unsigned SCEVTy, - const SCEV* op, const Type *ty) + const SCEV *op, const Type *ty) : SCEV(SCEVTy), Op(op), Ty(ty) {} void SCEVCastExpr::Profile(FoldingSetNodeID &ID) const { @@ -226,7 +226,7 @@ bool SCEVCastExpr::dominates(BasicBlock *BB, DominatorTree *DT) const { return Op->dominates(BB, DT); } -SCEVTruncateExpr::SCEVTruncateExpr(const SCEV* op, const Type *ty) +SCEVTruncateExpr::SCEVTruncateExpr(const SCEV *op, const Type *ty) : SCEVCastExpr(scTruncate, op, ty) { assert((Op->getType()->isInteger() || isa<PointerType>(Op->getType())) && (Ty->isInteger() || isa<PointerType>(Ty)) && @@ -237,7 +237,7 @@ void SCEVTruncateExpr::print(raw_ostream &OS) const { OS << "(trunc " << *Op->getType() << " " << *Op << " to " << *Ty << ")"; } -SCEVZeroExtendExpr::SCEVZeroExtendExpr(const SCEV* op, const Type *ty) +SCEVZeroExtendExpr::SCEVZeroExtendExpr(const SCEV *op, const Type *ty) : SCEVCastExpr(scZeroExtend, op, ty) { assert((Op->getType()->isInteger() || isa<PointerType>(Op->getType())) && (Ty->isInteger() || isa<PointerType>(Ty)) && @@ -248,7 +248,7 @@ void SCEVZeroExtendExpr::print(raw_ostream &OS) const { OS << "(zext " << *Op->getType() << " " << *Op << " to " << *Ty << ")"; } -SCEVSignExtendExpr::SCEVSignExtendExpr(const SCEV* op, const Type *ty) +SCEVSignExtendExpr::SCEVSignExtendExpr(const SCEV *op, const Type *ty) : SCEVCastExpr(scSignExtend, op, ty) { assert((Op->getType()->isInteger() || isa<PointerType>(Op->getType())) && (Ty->isInteger() || isa<PointerType>(Ty)) && @@ -274,10 +274,10 @@ SCEVCommutativeExpr::replaceSymbolicValuesWithConcrete( const SCEV *Conc, ScalarEvolution &SE) const { for (unsigned i = 0, e = getNumOperands(); i != e; ++i) { - const SCEV* H = + const SCEV *H = getOperand(i)->replaceSymbolicValuesWithConcrete(Sym, Conc, SE); if (H != getOperand(i)) { - SmallVector<const SCEV*, 8> NewOps; + SmallVector<const SCEV *, 8> NewOps; NewOps.reserve(getNumOperands()); for (unsigned j = 0; j != i; ++j) NewOps.push_back(getOperand(j)); @@ -352,10 +352,10 @@ SCEVAddRecExpr::replaceSymbolicValuesWithConcrete(const SCEV *Sym, const SCEV *Conc, ScalarEvolution &SE) const { for (unsigned i = 0, e = getNumOperands(); i != e; ++i) { - const SCEV* H = + const SCEV *H = getOperand(i)->replaceSymbolicValuesWithConcrete(Sym, Conc, SE); if (H != getOperand(i)) { - SmallVector<const SCEV*, 8> NewOps; + SmallVector<const SCEV *, 8> NewOps; NewOps.reserve(getNumOperands()); for (unsigned j = 0; j != i; ++j) NewOps.push_back(getOperand(j)); @@ -558,7 +558,7 @@ namespace { /// this to depend on where the addresses of various SCEV objects happened to /// land in memory. /// -static void GroupByComplexity(SmallVectorImpl<const SCEV*> &Ops, +static void GroupByComplexity(SmallVectorImpl<const SCEV *> &Ops, LoopInfo *LI) { if (Ops.size() < 2) return; // Noop if (Ops.size() == 2) { @@ -601,7 +601,7 @@ static void GroupByComplexity(SmallVectorImpl<const SCEV*> &Ops, /// BinomialCoefficient - Compute BC(It, K). The result has width W. /// Assume, K > 0. -static const SCEV* BinomialCoefficient(const SCEV* It, unsigned K, +static const SCEV *BinomialCoefficient(const SCEV *It, unsigned K, ScalarEvolution &SE, const Type* ResultTy) { // Handle the simplest case efficiently. @@ -694,15 +694,15 @@ static const SCEV* BinomialCoefficient(const SCEV* It, unsigned K, // Calculate the product, at width T+W const IntegerType *CalculationTy = IntegerType::get(CalculationBits); - const SCEV* Dividend = SE.getTruncateOrZeroExtend(It, CalculationTy); + const SCEV *Dividend = SE.getTruncateOrZeroExtend(It, CalculationTy); for (unsigned i = 1; i != K; ++i) { - const SCEV* S = SE.getMinusSCEV(It, SE.getIntegerSCEV(i, It->getType())); + const SCEV *S = SE.getMinusSCEV(It, SE.getIntegerSCEV(i, It->getType())); Dividend = SE.getMulExpr(Dividend, SE.getTruncateOrZeroExtend(S, CalculationTy)); } // Divide by 2^T - const SCEV* DivResult = SE.getUDivExpr(Dividend, SE.getConstant(DivFactor)); + const SCEV *DivResult = SE.getUDivExpr(Dividend, SE.getConstant(DivFactor)); // Truncate the result, and divide by K! / 2^T. @@ -719,14 +719,14 @@ static const SCEV* BinomialCoefficient(const SCEV* It, unsigned K, /// /// where BC(It, k) stands for binomial coefficient. /// -const SCEV* SCEVAddRecExpr::evaluateAtIteration(const SCEV* It, +const SCEV *SCEVAddRecExpr::evaluateAtIteration(const SCEV *It, ScalarEvolution &SE) const { - const SCEV* Result = getStart(); + const SCEV *Result = getStart(); for (unsigned i = 1, e = getNumOperands(); i != e; ++i) { // The computation is correct in the face of overflow provided that the // multiplication is performed _after_ the evaluation of the binomial // coefficient. - const SCEV* Coeff = BinomialCoefficient(It, i, SE, getType()); + const SCEV *Coeff = BinomialCoefficient(It, i, SE, getType()); if (isa<SCEVCouldNotCompute>(Coeff)) return Coeff; @@ -739,7 +739,7 @@ const SCEV* SCEVAddRecExpr::evaluateAtIteration(const SCEV* It, // SCEV Expression folder implementations //===----------------------------------------------------------------------===// -const SCEV* ScalarEvolution::getTruncateExpr(const SCEV* Op, +const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op, const Type *Ty) { assert(getTypeSizeInBits(Op->getType()) > getTypeSizeInBits(Ty) && "This is not a truncating conversion!"); @@ -766,7 +766,7 @@ const SCEV* ScalarEvolution::getTruncateExpr(const SCEV* Op, // If the input value is a chrec scev, truncate the chrec's operands. if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) { - SmallVector<const SCEV*, 4> Operands; + SmallVector<const SCEV *, 4> Operands; for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) Operands.push_back(getTruncateExpr(AddRec->getOperand(i), Ty)); return getAddRecExpr(Operands, AddRec->getLoop()); @@ -784,7 +784,7 @@ const SCEV* ScalarEvolution::getTruncateExpr(const SCEV* Op, return S; } -const SCEV* ScalarEvolution::getZeroExtendExpr(const SCEV* Op, +const SCEV *ScalarEvolution::getZeroExtendExpr(const SCEV *Op, const Type *Ty) { assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && "This is not an extending conversion!"); @@ -818,28 +818,28 @@ const SCEV* ScalarEvolution::getZeroExtendExpr(const SCEV* Op, // in infinite recursion. In the later case, the analysis code will // cope with a conservative value, and it will take care to purge // that value once it has finished. - const SCEV* MaxBECount = getMaxBackedgeTakenCount(AR->getLoop()); + const SCEV *MaxBECount = getMaxBackedgeTakenCount(AR->getLoop()); if (!isa<SCEVCouldNotCompute>(MaxBECount)) { // Manually compute the final value for AR, checking for // overflow. - const SCEV* Start = AR->getStart(); - const SCEV* Step = AR->getStepRecurrence(*this); + const SCEV *Start = AR->getStart(); + const SCEV *Step = AR->getStepRecurrence(*this); // Check whether the backedge-taken count can be losslessly casted to // the addrec's type. The count is always unsigned. - const SCEV* CastedMaxBECount = + const SCEV *CastedMaxBECount = getTruncateOrZeroExtend(MaxBECount, Start->getType()); - const SCEV* RecastedMaxBECount = + const SCEV *RecastedMaxBECount = getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType()); if (MaxBECount == RecastedMaxBECount) { const Type *WideTy = IntegerType::get(getTypeSizeInBits(Start->getType()) * 2); // Check whether Start+Step*MaxBECount has no unsigned overflow. - const SCEV* ZMul = + const SCEV *ZMul = getMulExpr(CastedMaxBECount, getTruncateOrZeroExtend(Step, Start->getType())); - const SCEV* Add = getAddExpr(Start, ZMul); - const SCEV* OperandExtendedAdd = + const SCEV *Add = getAddExpr(Start, ZMul); + const SCEV *OperandExtendedAdd = getAddExpr(getZeroExtendExpr(Start, WideTy), getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy), getZeroExtendExpr(Step, WideTy))); @@ -851,7 +851,7 @@ const SCEV* ScalarEvolution::getZeroExtendExpr(const SCEV* Op, // Similar to above, only this time treat the step value as signed. // This covers loops that count down. - const SCEV* SMul = + const SCEV *SMul = getMulExpr(CastedMaxBECount, getTruncateOrSignExtend(Step, Start->getType())); Add = getAddExpr(Start, SMul); @@ -880,7 +880,7 @@ const SCEV* ScalarEvolution::getZeroExtendExpr(const SCEV* Op, return S; } -const SCEV* ScalarEvolution::getSignExtendExpr(const SCEV* Op, +const SCEV *ScalarEvolution::getSignExtendExpr(const SCEV *Op, const Type *Ty) { assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && "This is not an extending conversion!"); @@ -914,28 +914,28 @@ const SCEV* ScalarEvolution::getSignExtendExpr(const SCEV* Op, // in infinite recursion. In the later case, the analysis code will // cope with a conservative value, and it will take care to purge // that value once it has finished. - const SCEV* MaxBECount = getMaxBackedgeTakenCount(AR->getLoop()); + const SCEV *MaxBECount = getMaxBackedgeTakenCount(AR->getLoop()); if (!isa<SCEVCouldNotCompute>(MaxBECount)) { // Manually compute the final value for AR, checking for // overflow. - const SCEV* Start = AR->getStart(); - const SCEV* Step = AR->getStepRecurrence(*this); + const SCEV *Start = AR->getStart(); + const SCEV *Step = AR->getStepRecurrence(*this); // Check whether the backedge-taken count can be losslessly casted to // the addrec's type. The count is always unsigned. - const SCEV* CastedMaxBECount = + const SCEV *CastedMaxBECount = getTruncateOrZeroExtend(MaxBECount, Start->getType()); - const SCEV* RecastedMaxBECount = + const SCEV *RecastedMaxBECount = getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType()); if (MaxBECount == RecastedMaxBECount) { const Type *WideTy = IntegerType::get(getTypeSizeInBits(Start->getType()) * 2); // Check whether Start+Step*MaxBECount has no signed overflow. - const SCEV* SMul = + const SCEV *SMul = getMulExpr(CastedMaxBECount, getTruncateOrSignExtend(Step, Start->getType())); - const SCEV* Add = getAddExpr(Start, SMul); - const SCEV* OperandExtendedAdd = + const SCEV *Add = getAddExpr(Start, SMul); + const SCEV *OperandExtendedAdd = getAddExpr(getSignExtendExpr(Start, WideTy), getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy), getSignExtendExpr(Step, WideTy))); @@ -963,7 +963,7 @@ const SCEV* ScalarEvolution::getSignExtendExpr(const SCEV* Op, /// getAnyExtendExpr - Return a SCEV for the given operand extended with /// unspecified bits out to the given type. /// -const SCEV* ScalarEvolution::getAnyExtendExpr(const SCEV* Op, +const SCEV *ScalarEvolution::getAnyExtendExpr(const SCEV *Op, const Type *Ty) { assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && "This is not an extending conversion!"); @@ -978,19 +978,19 @@ const SCEV* ScalarEvolution::getAnyExtendExpr(const SCEV* Op, // Peel off a truncate cast. if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Op)) { - const SCEV* NewOp = T->getOperand(); + const SCEV *NewOp = T->getOperand(); if (getTypeSizeInBits(NewOp->getType()) < getTypeSizeInBits(Ty)) return getAnyExtendExpr(NewOp, Ty); return getTruncateOrNoop(NewOp, Ty); } // Next try a zext cast. If the cast is folded, use it. - const SCEV* ZExt = getZeroExtendExpr(Op, Ty); + const SCEV *ZExt = getZeroExtendExpr(Op, Ty); if (!isa<SCEVZeroExtendExpr>(ZExt)) return ZExt; // Next try a sext cast. If the cast is folded, use it. - const SCEV* SExt = getSignExtendExpr(Op, Ty); + const SCEV *SExt = getSignExtendExpr(Op, Ty); if (!isa<SCEVSignExtendExpr>(SExt)) return SExt; @@ -1028,10 +1028,10 @@ const SCEV* ScalarEvolution::getAnyExtendExpr(const SCEV* Op, /// is also used as a check to avoid infinite recursion. /// static bool -CollectAddOperandsWithScales(DenseMap<const SCEV*, APInt> &M, - SmallVector<const SCEV*, 8> &NewOps, +CollectAddOperandsWithScales(DenseMap<const SCEV *, APInt> &M, + SmallVector<const SCEV *, 8> &NewOps, APInt &AccumulatedConstant, - const SmallVectorImpl<const SCEV*> &Ops, + const SmallVectorImpl<const SCEV *> &Ops, const APInt &Scale, ScalarEvolution &SE) { bool Interesting = false; @@ -1052,9 +1052,9 @@ CollectAddOperandsWithScales(DenseMap<const SCEV*, APInt> &M, } else { // A multiplication of a constant with some other value. Update // the map. - SmallVector<const SCEV*, 4> MulOps(Mul->op_begin()+1, Mul->op_end()); - const SCEV* Key = SE.getMulExpr(MulOps); - std::pair<DenseMap<const SCEV*, APInt>::iterator, bool> Pair = + SmallVector<const SCEV *, 4> MulOps(Mul->op_begin()+1, Mul->op_end()); + const SCEV *Key = SE.getMulExpr(MulOps); + std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair = M.insert(std::make_pair(Key, NewScale)); if (Pair.second) { NewOps.push_back(Pair.first->first); @@ -1072,7 +1072,7 @@ CollectAddOperandsWithScales(DenseMap<const SCEV*, APInt> &M, AccumulatedConstant += Scale * C->getValue()->getValue(); } else { // An ordinary operand. Update the map. - std::pair<DenseMap<const SCEV*, APInt>::iterator, bool> Pair = + std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair = M.insert(std::make_pair(Ops[i], Scale)); if (Pair.second) { NewOps.push_back(Pair.first->first); @@ -1098,7 +1098,7 @@ namespace { /// getAddExpr - Get a canonical add expression, or something simpler if /// possible. -const SCEV* ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV*> &Ops) { +const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops) { assert(!Ops.empty() && "Cannot get empty add!"); if (Ops.size() == 1) return Ops[0]; #ifndef NDEBUG @@ -1142,8 +1142,8 @@ const SCEV* ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV*> &Ops) { if (Ops[i] == Ops[i+1]) { // X + Y + Y --> X + Y*2 // Found a match, merge the two values into a multiply, and add any // remaining values to the result. - const SCEV* Two = getIntegerSCEV(2, Ty); - const SCEV* Mul = getMulExpr(Ops[i], Two); + const SCEV *Two = getIntegerSCEV(2, Ty); + const SCEV *Mul = getMulExpr(Ops[i], Two); if (Ops.size() == 2) return Mul; Ops.erase(Ops.begin()+i, Ops.begin()+i+2); @@ -1159,7 +1159,7 @@ const SCEV* ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV*> &Ops) { const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(Ops[Idx]); const Type *DstType = Trunc->getType(); const Type *SrcType = Trunc->getOperand()->getType(); - SmallVector<const SCEV*, 8> LargeOps; + SmallVector<const SCEV *, 8> LargeOps; bool Ok = true; // Check all the operands to see if they can be represented in the // source type of the truncate. @@ -1175,7 +1175,7 @@ const SCEV* ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV*> &Ops) { // is much more likely to be foldable here. LargeOps.push_back(getSignExtendExpr(C, SrcType)); } else if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Ops[i])) { - SmallVector<const SCEV*, 8> LargeMulOps; + SmallVector<const SCEV *, 8> LargeMulOps; for (unsigned j = 0, f = M->getNumOperands(); j != f && Ok; ++j) { if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(M->getOperand(j))) { @@ -1203,7 +1203,7 @@ const SCEV* ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV*> &Ops) { } if (Ok) { // Evaluate the expression in the larger type. - const SCEV* Fold = getAddExpr(LargeOps); + const SCEV *Fold = getAddExpr(LargeOps); // If it folds to something simple, use it. Otherwise, don't. if (isa<SCEVConstant>(Fold) || isa<SCEVUnknown>(Fold)) return getTruncateExpr(Fold, DstType); @@ -1240,16 +1240,16 @@ const SCEV* ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV*> &Ops) { // operands multiplied by constant values. if (Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx])) { uint64_t BitWidth = getTypeSizeInBits(Ty); - DenseMap<const SCEV*, APInt> M; - SmallVector<const SCEV*, 8> NewOps; + DenseMap<const SCEV *, APInt> M; + SmallVector<const SCEV *, 8> NewOps; APInt AccumulatedConstant(BitWidth, 0); if (CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant, Ops, APInt(BitWidth, 1), *this)) { // Some interesting folding opportunity is present, so its worthwhile to // re-generate the operands list. Group the operands by constant scale, // to avoid multiplying by the same constant scale multiple times. - std::map<APInt, SmallVector<const SCEV*, 4>, APIntCompare> MulOpLists; - for (SmallVector<const SCEV*, 8>::iterator I = NewOps.begin(), + std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare> MulOpLists; + for (SmallVector<const SCEV *, 8>::iterator I = NewOps.begin(), E = NewOps.end(); I != E; ++I) MulOpLists[M.find(*I)->second].push_back(*I); // Re-generate the operands list. @@ -1279,17 +1279,17 @@ const SCEV* ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV*> &Ops) { for (unsigned AddOp = 0, e = Ops.size(); AddOp != e; ++AddOp) if (MulOpSCEV == Ops[AddOp] && !isa<SCEVConstant>(Ops[AddOp])) { // Fold W + X + (X * Y * Z) --> W + (X * ((Y*Z)+1)) - const SCEV* InnerMul = Mul->getOperand(MulOp == 0); + const SCEV *InnerMul = Mul->getOperand(MulOp == 0); if (Mul->getNumOperands() != 2) { // If the multiply has more than two operands, we must get the // Y*Z term. - SmallVector<const SCEV*, 4> MulOps(Mul->op_begin(), Mul->op_end()); + SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(), Mul->op_end()); MulOps.erase(MulOps.begin()+MulOp); InnerMul = getMulExpr(MulOps); } - const SCEV* One = getIntegerSCEV(1, Ty); - const SCEV* AddOne = getAddExpr(InnerMul, One); - const SCEV* OuterMul = getMulExpr(AddOne, Ops[AddOp]); + const SCEV *One = getIntegerSCEV(1, Ty); + const SCEV *AddOne = getAddExpr(InnerMul, One); + const SCEV *OuterMul = getMulExpr(AddOne, Ops[AddOp]); if (Ops.size() == 2) return OuterMul; if (AddOp < Idx) { Ops.erase(Ops.begin()+AddOp); @@ -1313,22 +1313,22 @@ const SCEV* ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV*> &Ops) { OMulOp != e; ++OMulOp) if (OtherMul->getOperand(OMulOp) == MulOpSCEV) { // Fold X + (A*B*C) + (A*D*E) --> X + (A*(B*C+D*E)) - const SCEV* InnerMul1 = Mul->getOperand(MulOp == 0); + const SCEV *InnerMul1 = Mul->getOperand(MulOp == 0); if (Mul->getNumOperands() != 2) { SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(), Mul->op_end()); MulOps.erase(MulOps.begin()+MulOp); InnerMul1 = getMulExpr(MulOps); } - const SCEV* InnerMul2 = OtherMul->getOperand(OMulOp == 0); + const SCEV *InnerMul2 = OtherMul->getOperand(OMulOp == 0); if (OtherMul->getNumOperands() != 2) { SmallVector<const SCEV *, 4> MulOps(OtherMul->op_begin(), OtherMul->op_end()); MulOps.erase(MulOps.begin()+OMulOp); InnerMul2 = getMulExpr(MulOps); } - const SCEV* InnerMulSum = getAddExpr(InnerMul1,InnerMul2); - const SCEV* OuterMul = getMulExpr(MulOpSCEV, InnerMulSum); + const SCEV *InnerMulSum = getAddExpr(InnerMul1,InnerMul2); + const SCEV *OuterMul = getMulExpr(MulOpSCEV, InnerMulSum); if (Ops.size() == 2) return OuterMul; Ops.erase(Ops.begin()+Idx); Ops.erase(Ops.begin()+OtherMulIdx-1); @@ -1349,7 +1349,7 @@ const SCEV* ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV*> &Ops) { for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) { // Scan all of the other operands to this add and add them to the vector if // they are loop invariant w.r.t. the recurrence. - SmallVector<const SCEV*, 8> LIOps; + SmallVector<const SCEV *, 8> LIOps; const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]); for (unsigned i = 0, e = Ops.size(); i != e; ++i) if (Ops[i]->isLoopInvariant(AddRec->getLoop())) { @@ -1363,11 +1363,11 @@ const SCEV* ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV*> &Ops) { // NLI + LI + {Start,+,Step} --> NLI + {LI+Start,+,Step} LIOps.push_back(AddRec->getStart()); - SmallVector<const SCEV*, 4> AddRecOps(AddRec->op_begin(), + SmallVector<const SCEV *, 4> AddRecOps(AddRec->op_begin(), AddRec->op_end()); AddRecOps[0] = getAddExpr(LIOps); - const SCEV* NewRec = getAddRecExpr(AddRecOps, AddRec->getLoop()); + const SCEV *NewRec = getAddRecExpr(AddRecOps, AddRec->getLoop()); // If all of the other operands were loop invariant, we are done. if (Ops.size() == 1) return NewRec; @@ -1399,7 +1399,7 @@ const SCEV* ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV*> &Ops) { } NewOps[i] = getAddExpr(NewOps[i], OtherAddRec->getOperand(i)); } - const SCEV* NewAddRec = getAddRecExpr(NewOps, AddRec->getLoop()); + const SCEV *NewAddRec = getAddRecExpr(NewOps, AddRec->getLoop()); if (Ops.size() == 2) return NewAddRec; @@ -1432,7 +1432,7 @@ const SCEV* ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV*> &Ops) { /// getMulExpr - Get a canonical multiply expression, or something simpler if /// possible. -const SCEV* ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV*> &Ops) { +const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops) { assert(!Ops.empty() && "Cannot get empty mul!"); #ifndef NDEBUG for (unsigned i = 1, e = Ops.size(); i != e; ++i) @@ -1513,7 +1513,7 @@ const SCEV* ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV*> &Ops) { for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) { // Scan all of the other operands to this mul and add them to the vector if // they are loop invariant w.r.t. the recurrence. - SmallVector<const SCEV*, 8> LIOps; + SmallVector<const SCEV *, 8> LIOps; const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]); for (unsigned i = 0, e = Ops.size(); i != e; ++i) if (Ops[i]->isLoopInvariant(AddRec->getLoop())) { @@ -1525,7 +1525,7 @@ const SCEV* ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV*> &Ops) { // If we found some loop invariants, fold them into the recurrence. if (!LIOps.empty()) { // NLI * LI * {Start,+,Step} --> NLI * {LI*Start,+,LI*Step} - SmallVector<const SCEV*, 4> NewOps; + SmallVector<const SCEV *, 4> NewOps; NewOps.reserve(AddRec->getNumOperands()); if (LIOps.size() == 1) { const SCEV *Scale = LIOps[0]; @@ -1533,13 +1533,13 @@ const SCEV* ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV*> &Ops) { NewOps.push_back(getMulExpr(Scale, AddRec->getOperand(i))); } else { for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) { - SmallVector<const SCEV*, 4> MulOps(LIOps.begin(), LIOps.end()); + SmallVector<const SCEV *, 4> MulOps(LIOps.begin(), LIOps.end()); MulOps.push_back(AddRec->getOperand(i)); NewOps.push_back(getMulExpr(MulOps)); } } - const SCEV* NewRec = getAddRecExpr(NewOps, AddRec->getLoop()); + const SCEV *NewRec = getAddRecExpr(NewOps, AddRec->getLoop()); // If all of the other operands were loop invariant, we are done. if (Ops.size() == 1) return NewRec; @@ -1563,14 +1563,14 @@ const SCEV* ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV*> &Ops) { if (AddRec->getLoop() == OtherAddRec->getLoop()) { // F * G --> {A,+,B} * {C,+,D} --> {A*C,+,F*D + G*B + B*D} const SCEVAddRecExpr *F = AddRec, *G = OtherAddRec; - const SCEV* NewStart = getMulExpr(F->getStart(), + const SCEV *NewStart = getMulExpr(F->getStart(), G->getStart()); - const SCEV* B = F->getStepRecurrence(*this); - const SCEV* D = G->getStepRecurrence(*this); - const SCEV* NewStep = getAddExpr(getMulExpr(F, D), + const SCEV *B = F->getStepRecurrence(*this); + const SCEV *D = G->getStepRecurrence(*this); + const SCEV *NewStep = getAddExpr(getMulExpr(F, D), getMulExpr(G, B), getMulExpr(B, D)); - const SCEV* NewAddRec = getAddRecExpr(NewStart, NewStep, + const SCEV *NewAddRec = getAddRecExpr(NewStart, NewStep, F->getLoop()); if (Ops.size() == 2) return NewAddRec; @@ -1636,24 +1636,24 @@ const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS, getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy), getZeroExtendExpr(Step, ExtTy), AR->getLoop())) { - SmallVector<const SCEV*, 4> Operands; + SmallVector<const SCEV *, 4> Operands; for (unsigned i = 0, e = AR->getNumOperands(); i != e; ++i) Operands.push_back(getUDivExpr(AR->getOperand(i), RHS)); return getAddRecExpr(Operands, AR->getLoop()); } // (A*B)/C --> A*(B/C) if safe and B/C can be folded. if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(LHS)) { - SmallVector<const SCEV*, 4> Operands; + SmallVector<const SCEV *, 4> Operands; for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) Operands.push_back(getZeroExtendExpr(M->getOperand(i), ExtTy)); |