diff options
Diffstat (limited to 'lib/Analysis/ScalarEvolution.cpp')
-rw-r--r-- | lib/Analysis/ScalarEvolution.cpp | 511 |
1 files changed, 255 insertions, 256 deletions
diff --git a/lib/Analysis/ScalarEvolution.cpp b/lib/Analysis/ScalarEvolution.cpp index d9394805c8..f45562363a 100644 --- a/lib/Analysis/ScalarEvolution.cpp +++ b/lib/Analysis/ScalarEvolution.cpp @@ -14,7 +14,7 @@ // There are several aspects to this library. First is the representation of // scalar expressions, which are represented as subclasses of the SCEV class. // These classes are used to represent certain types of subexpressions that we -// can handle. These classes are reference counted, managed by the SCEVHandle +// can handle. These classes are reference counted, managed by the const SCEV* // class. We only create one SCEV of a particular shape, so pointer-comparisons // for equality are legal. // @@ -152,9 +152,9 @@ bool SCEVCouldNotCompute::hasComputableLoopEvolution(const Loop *L) const { return false; } -SCEVHandle SCEVCouldNotCompute:: -replaceSymbolicValuesWithConcrete(const SCEVHandle &Sym, - const SCEVHandle &Conc, +const SCEV* SCEVCouldNotCompute:: +replaceSymbolicValuesWithConcrete(const SCEV* Sym, + const SCEV* Conc, ScalarEvolution &SE) const { return this; } @@ -169,20 +169,20 @@ bool SCEVCouldNotCompute::classof(const SCEV *S) { // SCEVConstants - Only allow the creation of one SCEVConstant for any -// particular value. Don't use a SCEVHandle here, or else the object will +// particular value. Don't use a const SCEV* here, or else the object will // never be deleted! -SCEVHandle ScalarEvolution::getConstant(ConstantInt *V) { +const SCEV* ScalarEvolution::getConstant(ConstantInt *V) { SCEVConstant *&R = SCEVConstants[V]; if (R == 0) R = new SCEVConstant(V, this); return R; } -SCEVHandle ScalarEvolution::getConstant(const APInt& Val) { +const SCEV* ScalarEvolution::getConstant(const APInt& Val) { return getConstant(ConstantInt::get(Val)); } -SCEVHandle +const SCEV* ScalarEvolution::getConstant(const Type *Ty, uint64_t V, bool isSigned) { return getConstant(ConstantInt::get(cast<IntegerType>(Ty), V, isSigned)); } @@ -194,7 +194,7 @@ void SCEVConstant::print(raw_ostream &OS) const { } SCEVCastExpr::SCEVCastExpr(unsigned SCEVTy, - const SCEVHandle &op, const Type *ty, + const SCEV* op, const Type *ty, const ScalarEvolution* p) : SCEV(SCEVTy, p), Op(op), Ty(ty) {} @@ -205,10 +205,10 @@ bool SCEVCastExpr::dominates(BasicBlock *BB, DominatorTree *DT) const { } // SCEVTruncates - Only allow the creation of one SCEVTruncateExpr for any -// particular input. Don't use a SCEVHandle here, or else the object will +// particular input. Don't use a const SCEV* here, or else the object will // never be deleted! -SCEVTruncateExpr::SCEVTruncateExpr(const SCEVHandle &op, const Type *ty, +SCEVTruncateExpr::SCEVTruncateExpr(const SCEV* op, const Type *ty, const ScalarEvolution* p) : SCEVCastExpr(scTruncate, op, ty, p) { assert((Op->getType()->isInteger() || isa<PointerType>(Op->getType())) && @@ -222,10 +222,10 @@ void SCEVTruncateExpr::print(raw_ostream &OS) const { } // SCEVZeroExtends - Only allow the creation of one SCEVZeroExtendExpr for any -// particular input. Don't use a SCEVHandle here, or else the object will never +// particular input. Don't use a const SCEV* here, or else the object will never // be deleted! -SCEVZeroExtendExpr::SCEVZeroExtendExpr(const SCEVHandle &op, const Type *ty, +SCEVZeroExtendExpr::SCEVZeroExtendExpr(const SCEV* op, const Type *ty, const ScalarEvolution* p) : SCEVCastExpr(scZeroExtend, op, ty, p) { assert((Op->getType()->isInteger() || isa<PointerType>(Op->getType())) && @@ -238,10 +238,10 @@ void SCEVZeroExtendExpr::print(raw_ostream &OS) const { } // SCEVSignExtends - Only allow the creation of one SCEVSignExtendExpr for any -// particular input. Don't use a SCEVHandle here, or else the object will never +// particular input. Don't use a const SCEV* here, or else the object will never // be deleted! -SCEVSignExtendExpr::SCEVSignExtendExpr(const SCEVHandle &op, const Type *ty, +SCEVSignExtendExpr::SCEVSignExtendExpr(const SCEV* op, const Type *ty, const ScalarEvolution* p) : SCEVCastExpr(scSignExtend, op, ty, p) { assert((Op->getType()->isInteger() || isa<PointerType>(Op->getType())) && @@ -254,7 +254,7 @@ void SCEVSignExtendExpr::print(raw_ostream &OS) const { } // SCEVCommExprs - Only allow the creation of one SCEVCommutativeExpr for any -// particular input. Don't use a SCEVHandle here, or else the object will never +// particular input. Don't use a const SCEV* here, or else the object will never // be deleted! void SCEVCommutativeExpr::print(raw_ostream &OS) const { @@ -266,15 +266,15 @@ void SCEVCommutativeExpr::print(raw_ostream &OS) const { OS << ")"; } -SCEVHandle SCEVCommutativeExpr:: -replaceSymbolicValuesWithConcrete(const SCEVHandle &Sym, - const SCEVHandle &Conc, +const SCEV* SCEVCommutativeExpr:: +replaceSymbolicValuesWithConcrete(const SCEV* Sym, + const SCEV* Conc, ScalarEvolution &SE) const { for (unsigned i = 0, e = getNumOperands(); i != e; ++i) { - SCEVHandle H = + const SCEV* H = getOperand(i)->replaceSymbolicValuesWithConcrete(Sym, Conc, SE); if (H != getOperand(i)) { - SmallVector<SCEVHandle, 8> NewOps; + SmallVector<const SCEV*, 8> NewOps; NewOps.reserve(getNumOperands()); for (unsigned j = 0; j != i; ++j) NewOps.push_back(getOperand(j)); @@ -308,7 +308,7 @@ bool SCEVNAryExpr::dominates(BasicBlock *BB, DominatorTree *DT) const { // SCEVUDivs - Only allow the creation of one SCEVUDivExpr for any particular -// input. Don't use a SCEVHandle here, or else the object will never be +// input. Don't use a const SCEV* here, or else the object will never be // deleted! bool SCEVUDivExpr::dominates(BasicBlock *BB, DominatorTree *DT) const { @@ -329,18 +329,18 @@ const Type *SCEVUDivExpr::getType() const { } // SCEVAddRecExprs - Only allow the creation of one SCEVAddRecExpr for any -// particular input. Don't use a SCEVHandle here, or else the object will never +// particular input. Don't use a const SCEV* here, or else the object will never // be deleted! -SCEVHandle SCEVAddRecExpr:: -replaceSymbolicValuesWithConcrete(const SCEVHandle &Sym, - const SCEVHandle &Conc, +const SCEV* SCEVAddRecExpr:: +replaceSymbolicValuesWithConcrete(const SCEV* Sym, + const SCEV* Conc, ScalarEvolution &SE) const { for (unsigned i = 0, e = getNumOperands(); i != e; ++i) { - SCEVHandle H = + const SCEV* H = getOperand(i)->replaceSymbolicValuesWithConcrete(Sym, Conc, SE); if (H != getOperand(i)) { - SmallVector<SCEVHandle, 8> NewOps; + SmallVector<const SCEV*, 8> NewOps; NewOps.reserve(getNumOperands()); for (unsigned j = 0; j != i; ++j) NewOps.push_back(getOperand(j)); @@ -374,7 +374,7 @@ void SCEVAddRecExpr::print(raw_ostream &OS) const { } // SCEVUnknowns - Only allow the creation of one SCEVUnknown for any particular -// value. Don't use a SCEVHandle here, or else the object will never be +// value. Don't use a const SCEV* here, or else the object will never be // deleted! bool SCEVUnknown::isLoopInvariant(const Loop *L) const { @@ -531,7 +531,7 @@ namespace { /// this to depend on where the addresses of various SCEV objects happened to /// land in memory. /// -static void GroupByComplexity(SmallVectorImpl<SCEVHandle> &Ops, +static void GroupByComplexity(SmallVectorImpl<const SCEV*> &Ops, LoopInfo *LI) { if (Ops.size() < 2) return; // Noop if (Ops.size() == 2) { @@ -574,7 +574,7 @@ static void GroupByComplexity(SmallVectorImpl<SCEVHandle> &Ops, /// BinomialCoefficient - Compute BC(It, K). The result has width W. /// Assume, K > 0. -static SCEVHandle BinomialCoefficient(SCEVHandle It, unsigned K, +static const SCEV* BinomialCoefficient(const SCEV* It, unsigned K, ScalarEvolution &SE, const Type* ResultTy) { // Handle the simplest case efficiently. @@ -667,15 +667,15 @@ static SCEVHandle BinomialCoefficient(SCEVHandle It, unsigned K, // Calculate the product, at width T+W const IntegerType *CalculationTy = IntegerType::get(CalculationBits); - SCEVHandle Dividend = SE.getTruncateOrZeroExtend(It, CalculationTy); + const SCEV* Dividend = SE.getTruncateOrZeroExtend(It, CalculationTy); for (unsigned i = 1; i != K; ++i) { - SCEVHandle S = SE.getMinusSCEV(It, SE.getIntegerSCEV(i, It->getType())); + const SCEV* S = SE.getMinusSCEV(It, SE.getIntegerSCEV(i, It->getType())); Dividend = SE.getMulExpr(Dividend, SE.getTruncateOrZeroExtend(S, CalculationTy)); } // Divide by 2^T - SCEVHandle DivResult = SE.getUDivExpr(Dividend, SE.getConstant(DivFactor)); + const SCEV* DivResult = SE.getUDivExpr(Dividend, SE.getConstant(DivFactor)); // Truncate the result, and divide by K! / 2^T. @@ -692,14 +692,14 @@ static SCEVHandle BinomialCoefficient(SCEVHandle It, unsigned K, /// /// where BC(It, k) stands for binomial coefficient. /// -SCEVHandle SCEVAddRecExpr::evaluateAtIteration(SCEVHandle It, +const SCEV* SCEVAddRecExpr::evaluateAtIteration(const SCEV* It, ScalarEvolution &SE) const { - SCEVHandle Result = getStart(); + const SCEV* Result = getStart(); for (unsigned i = 1, e = getNumOperands(); i != e; ++i) { // The computation is correct in the face of overflow provided that the // multiplication is performed _after_ the evaluation of the binomial // coefficient. - SCEVHandle Coeff = BinomialCoefficient(It, i, SE, getType()); + const SCEV* Coeff = BinomialCoefficient(It, i, SE, getType()); if (isa<SCEVCouldNotCompute>(Coeff)) return Coeff; @@ -712,7 +712,7 @@ SCEVHandle SCEVAddRecExpr::evaluateAtIteration(SCEVHandle It, // SCEV Expression folder implementations //===----------------------------------------------------------------------===// -SCEVHandle ScalarEvolution::getTruncateExpr(const SCEVHandle &Op, +const SCEV* ScalarEvolution::getTruncateExpr(const SCEV* Op, const Type *Ty) { assert(getTypeSizeInBits(Op->getType()) > getTypeSizeInBits(Ty) && "This is not a truncating conversion!"); @@ -738,7 +738,7 @@ SCEVHandle ScalarEvolution::getTruncateExpr(const SCEVHandle &Op, // If the input value is a chrec scev, truncate the chrec's operands. if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) { - SmallVector<SCEVHandle, 4> Operands; + SmallVector<const SCEV*, 4> Operands; for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) Operands.push_back(getTruncateExpr(AddRec->getOperand(i), Ty)); return getAddRecExpr(Operands, AddRec->getLoop()); @@ -749,7 +749,7 @@ SCEVHandle ScalarEvolution::getTruncateExpr(const SCEVHandle &Op, return Result; } -SCEVHandle ScalarEvolution::getZeroExtendExpr(const SCEVHandle &Op, +const SCEV* ScalarEvolution::getZeroExtendExpr(const SCEV* Op, const Type *Ty) { assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && "This is not an extending conversion!"); @@ -782,28 +782,28 @@ SCEVHandle ScalarEvolution::getZeroExtendExpr(const SCEVHandle &Op, // in infinite recursion. In the later case, the analysis code will // cope with a conservative value, and it will take care to purge // that value once it has finished. - SCEVHandle MaxBECount = getMaxBackedgeTakenCount(AR->getLoop()); + const SCEV* MaxBECount = getMaxBackedgeTakenCount(AR->getLoop()); if (!isa<SCEVCouldNotCompute>(MaxBECount)) { // Manually compute the final value for AR, checking for // overflow. - SCEVHandle Start = AR->getStart(); - SCEVHandle Step = AR->getStepRecurrence(*this); + const SCEV* Start = AR->getStart(); + const SCEV* Step = AR->getStepRecurrence(*this); // Check whether the backedge-taken count can be losslessly casted to // the addrec's type. The count is always unsigned. - SCEVHandle CastedMaxBECount = + const SCEV* CastedMaxBECount = getTruncateOrZeroExtend(MaxBECount, Start->getType()); - SCEVHandle RecastedMaxBECount = + const SCEV* RecastedMaxBECount = getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType()); if (MaxBECount == RecastedMaxBECount) { const Type *WideTy = IntegerType::get(getTypeSizeInBits(Start->getType()) * 2); // Check whether Start+Step*MaxBECount has no unsigned overflow. - SCEVHandle ZMul = + const SCEV* ZMul = getMulExpr(CastedMaxBECount, getTruncateOrZeroExtend(Step, Start->getType())); - SCEVHandle Add = getAddExpr(Start, ZMul); - SCEVHandle OperandExtendedAdd = + const SCEV* Add = getAddExpr(Start, ZMul); + const SCEV* OperandExtendedAdd = getAddExpr(getZeroExtendExpr(Start, WideTy), getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy), getZeroExtendExpr(Step, WideTy))); @@ -815,7 +815,7 @@ SCEVHandle ScalarEvolution::getZeroExtendExpr(const SCEVHandle &Op, // Similar to above, only this time treat the step value as signed. // This covers loops that count down. - SCEVHandle SMul = + const SCEV* SMul = getMulExpr(CastedMaxBECount, getTruncateOrSignExtend(Step, Start->getType())); Add = getAddExpr(Start, SMul); @@ -837,7 +837,7 @@ SCEVHandle ScalarEvolution::getZeroExtendExpr(const SCEVHandle &Op, return Result; } -SCEVHandle ScalarEvolution::getSignExtendExpr(const SCEVHandle &Op, +const SCEV* ScalarEvolution::getSignExtendExpr(const SCEV* Op, const Type *Ty) { assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && "This is not an extending conversion!"); @@ -870,28 +870,28 @@ SCEVHandle ScalarEvolution::getSignExtendExpr(const SCEVHandle &Op, // in infinite recursion. In the later case, the analysis code will // cope with a conservative value, and it will take care to purge // that value once it has finished. - SCEVHandle MaxBECount = getMaxBackedgeTakenCount(AR->getLoop()); + const SCEV* MaxBECount = getMaxBackedgeTakenCount(AR->getLoop()); if (!isa<SCEVCouldNotCompute>(MaxBECount)) { // Manually compute the final value for AR, checking for // overflow. - SCEVHandle Start = AR->getStart(); - SCEVHandle Step = AR->getStepRecurrence(*this); + const SCEV* Start = AR->getStart(); + const SCEV* Step = AR->getStepRecurrence(*this); // Check whether the backedge-taken count can be losslessly casted to // the addrec's type. The count is always unsigned. - SCEVHandle CastedMaxBECount = + const SCEV* CastedMaxBECount = getTruncateOrZeroExtend(MaxBECount, Start->getType()); - SCEVHandle RecastedMaxBECount = + const SCEV* RecastedMaxBECount = getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType()); if (MaxBECount == RecastedMaxBECount) { const Type *WideTy = IntegerType::get(getTypeSizeInBits(Start->getType()) * 2); // Check whether Start+Step*MaxBECount has no signed overflow. - SCEVHandle SMul = + const SCEV* SMul = getMulExpr(CastedMaxBECount, getTruncateOrSignExtend(Step, Start->getType())); - SCEVHandle Add = getAddExpr(Start, SMul); - SCEVHandle OperandExtendedAdd = + const SCEV* Add = getAddExpr(Start, SMul); + const SCEV* OperandExtendedAdd = getAddExpr(getSignExtendExpr(Start, WideTy), getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy), getSignExtendExpr(Step, WideTy))); @@ -912,7 +912,7 @@ SCEVHandle ScalarEvolution::getSignExtendExpr(const SCEVHandle &Op, /// getAnyExtendExpr - Return a SCEV for the given operand extended with /// unspecified bits out to the given type. /// -SCEVHandle ScalarEvolution::getAnyExtendExpr(const SCEVHandle &Op, +const SCEV* ScalarEvolution::getAnyExtendExpr(const SCEV* Op, const Type *Ty) { assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && "This is not an extending conversion!"); @@ -927,19 +927,19 @@ SCEVHandle ScalarEvolution::getAnyExtendExpr(const SCEVHandle &Op, // Peel off a truncate cast. if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Op)) { - SCEVHandle NewOp = T->getOperand(); + const SCEV* NewOp = T->getOperand(); if (getTypeSizeInBits(NewOp->getType()) < getTypeSizeInBits(Ty)) return getAnyExtendExpr(NewOp, Ty); return getTruncateOrNoop(NewOp, Ty); } // Next try a zext cast. If the cast is folded, use it. - SCEVHandle ZExt = getZeroExtendExpr(Op, Ty); + const SCEV* ZExt = getZeroExtendExpr(Op, Ty); if (!isa<SCEVZeroExtendExpr>(ZExt)) return ZExt; // Next try a sext cast. If the cast is folded, use it. - SCEVHandle SExt = getSignExtendExpr(Op, Ty); + const SCEV* SExt = getSignExtendExpr(Op, Ty); if (!isa<SCEVSignExtendExpr>(SExt)) return SExt; @@ -977,10 +977,10 @@ SCEVHandle ScalarEvolution::getAnyExtendExpr(const SCEVHandle &Op, /// is also used as a check to avoid infinite recursion. /// static bool -CollectAddOperandsWithScales(DenseMap<SCEVHandle, APInt> &M, - SmallVector<SCEVHandle, 8> &NewOps, +CollectAddOperandsWithScales(DenseMap<const SCEV*, APInt> &M, + SmallVector<const SCEV*, 8> &NewOps, APInt &AccumulatedConstant, - const SmallVectorImpl<SCEVHandle> &Ops, + const SmallVectorImpl<const SCEV*> &Ops, const APInt &Scale, ScalarEvolution &SE) { bool Interesting = false; @@ -1001,9 +1001,9 @@ CollectAddOperandsWithScales(DenseMap<SCEVHandle, APInt> &M, } else { // A multiplication of a constant with some other value. Update // the map. - SmallVector<SCEVHandle, 4> MulOps(Mul->op_begin()+1, Mul->op_end()); - SCEVHandle Key = SE.getMulExpr(MulOps); - std::pair<DenseMap<SCEVHandle, APInt>::iterator, bool> Pair = + SmallVector<const SCEV*, 4> MulOps(Mul->op_begin()+1, Mul->op_end()); + const SCEV* Key = SE.getMulExpr(MulOps); + std::pair<DenseMap<const SCEV*, APInt>::iterator, bool> Pair = M.insert(std::make_pair(Key, APInt())); if (Pair.second) { Pair.first->second = NewScale; @@ -1022,7 +1022,7 @@ CollectAddOperandsWithScales(DenseMap<SCEVHandle, APInt> &M, AccumulatedConstant += Scale * C->getValue()->getValue(); } else { // An ordinary operand. Update the map. - std::pair<DenseMap<SCEVHandle, APInt>::iterator, bool> Pair = + std::pair<DenseMap<const SCEV*, APInt>::iterator, bool> Pair = M.insert(std::make_pair(Ops[i], APInt())); if (Pair.second) { Pair.first->second = Scale; @@ -1049,7 +1049,7 @@ namespace { /// getAddExpr - Get a canonical add expression, or something simpler if /// possible. -SCEVHandle ScalarEvolution::getAddExpr(SmallVectorImpl<SCEVHandle> &Ops) { +const SCEV* ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV*> &Ops) { assert(!Ops.empty() && "Cannot get empty add!"); if (Ops.size() == 1) return Ops[0]; #ifndef NDEBUG @@ -1093,8 +1093,8 @@ SCEVHandle ScalarEvolution::getAddExpr(SmallVectorImpl<SCEVHandle> &Ops) { if (Ops[i] == Ops[i+1]) { // X + Y + Y --> X + Y*2 // Found a match, merge the two values into a multiply, and add any // remaining values to the result. - SCEVHandle Two = getIntegerSCEV(2, Ty); - SCEVHandle Mul = getMulExpr(Ops[i], Two); + const SCEV* Two = getIntegerSCEV(2, Ty); + const SCEV* Mul = getMulExpr(Ops[i], Two); if (Ops.size() == 2) return Mul; Ops.erase(Ops.begin()+i, Ops.begin()+i+2); @@ -1110,7 +1110,7 @@ SCEVHandle ScalarEvolution::getAddExpr(SmallVectorImpl<SCEVHandle> &Ops) { const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(Ops[Idx]); const Type *DstType = Trunc->getType(); const Type *SrcType = Trunc->getOperand()->getType(); - SmallVector<SCEVHandle, 8> LargeOps; + SmallVector<const SCEV*, 8> LargeOps; bool Ok = true; // Check all the operands to see if they can be represented in the // source type of the truncate. @@ -1126,7 +1126,7 @@ SCEVHandle ScalarEvolution::getAddExpr(SmallVectorImpl<SCEVHandle> &Ops) { // is much more likely to be foldable here. LargeOps.push_back(getSignExtendExpr(C, SrcType)); } else if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Ops[i])) { - SmallVector<SCEVHandle, 8> LargeMulOps; + SmallVector<const SCEV*, 8> LargeMulOps; for (unsigned j = 0, f = M->getNumOperands(); j != f && Ok; ++j) { if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(M->getOperand(j))) { @@ -1154,7 +1154,7 @@ SCEVHandle ScalarEvolution::getAddExpr(SmallVectorImpl<SCEVHandle> &Ops) { } if (Ok) { // Evaluate the expression in the larger type. - SCEVHandle Fold = getAddExpr(LargeOps); + const SCEV* Fold = getAddExpr(LargeOps); // If it folds to something simple, use it. Otherwise, don't. if (isa<SCEVConstant>(Fold) || isa<SCEVUnknown>(Fold)) return getTruncateExpr(Fold, DstType); @@ -1191,23 +1191,23 @@ SCEVHandle ScalarEvolution::getAddExpr(SmallVectorImpl<SCEVHandle> &Ops) { // operands multiplied by constant values. if (Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx])) { uint64_t BitWidth = getTypeSizeInBits(Ty); - DenseMap<SCEVHandle, APInt> M; - SmallVector<SCEVHandle, 8> NewOps; + DenseMap<const SCEV*, APInt> M; + SmallVector<const SCEV*, 8> NewOps; APInt AccumulatedConstant(BitWidth, 0); if (CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant, Ops, APInt(BitWidth, 1), *this)) { // Some interesting folding opportunity is present, so its worthwhile to // re-generate the operands list. Group the operands by constant scale, // to avoid multiplying by the same constant scale multiple times. - std::map<APInt, SmallVector<SCEVHandle, 4>, APIntCompare> MulOpLists; - for (SmallVector<SCEVHandle, 8>::iterator I = NewOps.begin(), + std::map<APInt, SmallVector<const SCEV*, 4>, APIntCompare> MulOpLists; + for (SmallVector<const SCEV*, 8>::iterator I = NewOps.begin(), E = NewOps.end(); I != E; ++I) MulOpLists[M.find(*I)->second].push_back(*I); // Re-generate the operands list. Ops.clear(); if (AccumulatedConstant != 0) Ops.push_back(getConstant(AccumulatedConstant)); - for (std::map<APInt, SmallVector<SCEVHandle, 4>, APIntCompare>::iterator I = + for (std::map<APInt, SmallVector<const SCEV*, 4>, APIntCompare>::iterator I = MulOpLists.begin(), E = MulOpLists.end(); I != E; ++I) if (I->first != 0) Ops.push_back(getMulExpr(getConstant(I->first), getAddExpr(I->second))); @@ -1229,17 +1229,17 @@ SCEVHandle ScalarEvolution::getAddExpr(SmallVectorImpl<SCEVHandle> &Ops) { for (unsigned AddOp = 0, e = Ops.size(); AddOp != e; ++AddOp) if (MulOpSCEV == Ops[AddOp] && !isa<SCEVConstant>(Ops[AddOp])) { // Fold W + X + (X * Y * Z) --> W + (X * ((Y*Z)+1)) - SCEVHandle InnerMul = Mul->getOperand(MulOp == 0); + const SCEV* InnerMul = Mul->getOperand(MulOp == 0); if (Mul->getNumOperands() != 2) { // If the multiply has more than two operands, we must get the // Y*Z term. - SmallVector<SCEVHandle, 4> MulOps(Mul->op_begin(), Mul->op_end()); + SmallVector<const SCEV*, 4> MulOps(Mul->op_begin(), Mul->op_end()); MulOps.erase(MulOps.begin()+MulOp); InnerMul = getMulExpr(MulOps); } - SCEVHandle One = getIntegerSCEV(1, Ty); - SCEVHandle AddOne = getAddExpr(InnerMul, One); - SCEVHandle OuterMul = getMulExpr(AddOne, Ops[AddOp]); + const SCEV* One = getIntegerSCEV(1, Ty); + const SCEV* AddOne = getAddExpr(InnerMul, One); + const SCEV* OuterMul = getMulExpr(AddOne, Ops[AddOp]); if (Ops.size() == 2) return OuterMul; if (AddOp < Idx) { Ops.erase(Ops.begin()+AddOp); @@ -1263,21 +1263,21 @@ SCEVHandle ScalarEvolution::getAddExpr(SmallVectorImpl<SCEVHandle> &Ops) { OMulOp != e; ++OMulOp) if (OtherMul->getOperand(OMulOp) == MulOpSCEV) { // Fold X + (A*B*C) + (A*D*E) --> X + (A*(B*C+D*E)) - SCEVHandle InnerMul1 = Mul->getOperand(MulOp == 0); + const SCEV* InnerMul1 = Mul->getOperand(MulOp == 0); if (Mul->getNumOperands() != 2) { - SmallVector<SCEVHandle, 4> MulOps(Mul->op_begin(), Mul->op_end()); + SmallVector<const SCEV*, 4> MulOps(Mul->op_begin(), Mul->op_end()); MulOps.erase(MulOps.begin()+MulOp); InnerMul1 = getMulExpr(MulOps); } - SCEVHandle InnerMul2 = OtherMul->getOperand(OMulOp == 0); + const SCEV* InnerMul2 = OtherMul->getOperand(OMulOp == 0); if (OtherMul->getNumOperands() != 2) { - SmallVector<SCEVHandle, 4> MulOps(OtherMul->op_begin(), + SmallVector<const SCEV*, 4> MulOps(OtherMul->op_begin(), OtherMul->op_end()); MulOps.erase(MulOps.begin()+OMulOp); InnerMul2 = getMulExpr(MulOps); } - SCEVHandle InnerMulSum = getAddExpr(InnerMul1,InnerMul2); - SCEVHandle OuterMul = getMulExpr(MulOpSCEV, InnerMulSum); + const SCEV* InnerMulSum = getAddExpr(InnerMul1,InnerMul2); + const SCEV* OuterMul = getMulExpr(MulOpSCEV, InnerMulSum); if (Ops.size() == 2) return OuterMul; Ops.erase(Ops.begin()+Idx); Ops.erase(Ops.begin()+OtherMulIdx-1); @@ -1298,7 +1298,7 @@ SCEVHandle ScalarEvolution::getAddExpr(SmallVectorImpl<SCEVHandle> &Ops) { for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) { // Scan all of the other operands to this add and add them to the vector if // they are loop invariant w.r.t. the recurrence. - SmallVector<SCEVHandle, 8> LIOps; + SmallVector<const SCEV*, 8> LIOps; const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]); for (unsigned i = 0, e = Ops.size(); i != e; ++i) if (Ops[i]->isLoopInvariant(AddRec->getLoop())) { @@ -1312,11 +1312,11 @@ SCEVHandle ScalarEvolution::getAddExpr(SmallVectorImpl<SCEVHandle> &Ops) { // NLI + LI + {Start,+,Step} --> NLI + {LI+Start,+,Step} LIOps.push_back(AddRec->getStart()); - SmallVector<SCEVHandle, 4> AddRecOps(AddRec->op_begin(), + SmallVector<const SCEV*, 4> AddRecOps(AddRec->op_begin(), AddRec->op_end()); AddRecOps[0] = getAddExpr(LIOps); - SCEVHandle NewRec = getAddRecExpr(AddRecOps, AddRec->getLoop()); + const SCEV* NewRec = getAddRecExpr(AddRecOps, AddRec->getLoop()); // If all of the other operands were loop invariant, we are done. if (Ops.size() == 1) return NewRec; @@ -1338,7 +1338,7 @@ SCEVHandle ScalarEvolution::getAddExpr(SmallVectorImpl<SCEVHandle> &Ops) { const SCEVAddRecExpr *OtherAddRec = cast<SCEVAddRecExpr>(Ops[OtherIdx]); if (AddRec->getLoop() == OtherAddRec->getLoop()) { // Other + {A,+,B} + {C,+,D} --> Other + {A+C,+,B+D} - SmallVector<SCEVHandle, 4> NewOps(AddRec->op_begin(), AddRec->op_end()); + SmallVector<const SCEV*, 4> NewOps(AddRec->op_begin(), AddRec->op_end()); for (unsigned i = 0, e = OtherAddRec->getNumOperands(); i != e; ++i) { if (i >= NewOps.size()) { NewOps.insert(NewOps.end(), OtherAddRec->op_begin()+i, @@ -1347,7 +1347,7 @@ SCEVHandle ScalarEvolution::getAddExpr(SmallVectorImpl<SCEVHandle> &Ops) { } NewOps[i] = getAddExpr(NewOps[i], OtherAddRec->getOperand(i)); } - SCEVHandle NewAddRec = getAddRecExpr(NewOps, AddRec->getLoop()); + const SCEV* NewAddRec = getAddRecExpr(NewOps, AddRec->getLoop()); if (Ops.size() == 2) return NewAddRec; @@ -1374,7 +1374,7 @@ SCEVHandle ScalarEvolution::getAddExpr(SmallVectorImpl<SCEVHandle> &Ops) { /// getMulExpr - Get a canonical multiply expression, or something simpler if /// possible. -SCEVHandle ScalarEvolution::getMulExpr(SmallVectorImpl<SCEVHandle> &Ops) { +const SCEV* ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV*> &Ops) { assert(!Ops.empty() && "Cannot get empty mul!"); #ifndef NDEBUG for (unsigned i = 1, e = Ops.size(); i != e; ++i) @@ -1455,7 +1455,7 @@ SCEVHandle ScalarEvolution::getMulExpr(SmallVectorImpl<SCEVHandle> &Ops) { for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) { // Scan all of the other operands to this mul and add them to the vector if // they are loop invariant w.r.t. the recurrence. - SmallVector<SCEVHandle, 8> LIOps; + SmallVector<const SCEV*, 8> LIOps; const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]); for (unsigned i = 0, e = Ops.size(); i != e; ++i) if (Ops[i]->isLoopInvariant(AddRec->getLoop())) { @@ -1467,7 +1467,7 @@ SCEVHandle ScalarEvolution::getMulExpr(SmallVectorImpl<SCEVHandle> &Ops) { // If we found some loop invariants, fold them into the recurrence. if (!LIOps.empty()) { // NLI * LI * {Start,+,Step} --> NLI * {LI*Start,+,LI*Step} - SmallVector<SCEVHandle, 4> NewOps; + SmallVector<const SCEV*, 4> NewOps; NewOps.reserve(AddRec->getNumOperands()); if (LIOps.size() == 1) { const SCEV *Scale = LIOps[0]; @@ -1475,13 +1475,13 @@ SCEVHandle ScalarEvolution::getMulExpr(SmallVectorImpl<SCEVHandle> &Ops) { NewOps.push_back(getMulExpr(Scale, AddRec->getOperand(i))); } else { for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) { - SmallVector<SCEVHandle, 4> MulOps(LIOps.begin(), LIOps.end()); + SmallVector<const SCEV*, 4> MulOps(LIOps.begin(), LIOps.end()); MulOps.push_back(AddRec->getOperand(i)); NewOps.push_back(getMulExpr(MulOps)); } } - SCEVHandle NewRec = getAddRecExpr(NewOps, AddRec->getLoop()); + const SCEV* NewRec = getAddRecExpr(NewOps, AddRec->getLoop()); // If all of the other operands were loop invariant, we are done. if (Ops.size() == 1) return NewRec; @@ -1505,14 +1505,14 @@ SCEVHandle ScalarEvolution::getMulExpr(SmallVectorImpl<SCEVHandle> &Ops) { if (AddRec->getLoop() == OtherAddRec->getLoop()) { // F * G --> {A,+,B} * {C,+,D} --> {A*C,+,F*D + G*B + B*D} const SCEVAddRecExpr *F = AddRec, *G = OtherAddRec; - SCEVHandle NewStart = getMulExpr(F->getStart(), + const SCEV* NewStart = getMulExpr(F->getStart(), G->getStart()); - SCEVHandle B = F->getStepRecurrence(*this); - SCEVHandle D = G->getStepRecurrence(*this); - SCEVHandle NewStep = getAddExpr(getMulExpr(F, D), + const SCEV* B = F->getStepRecurrence(*this); + const SCEV* D = G->getStepRecurrence(*this); + const SCEV* NewStep = getAddExpr(getMulExpr(F, D), getMulExpr(G, B), getMulExpr(B, D)); - SCEVHandle NewAddRec = getAddRecExpr(NewStart, NewStep, + const SCEV* NewAddRec = getAddRecExpr(NewStart, NewStep, F->getLoop()); if (Ops.size() == 2) return NewAddRec; @@ -1539,8 +1539,8 @@ SCEVHandle ScalarEvolution::getMulExpr(SmallVectorImpl<SCEVHandle> &Ops) { /// getUDivExpr - Get a canonical multiply expression, or something simpler if /// possible. -SCEVHandle ScalarEvolution::getUDivExpr(const SCEVHandle &LHS, - const SCEVHandle &RHS) { +const SCEV* ScalarEvolution::getUDivExpr(const SCEV* LHS, + const SCEV* RHS) { assert(getEffectiveSCEVType(LHS->getType()) == getEffectiveSCEVType(RHS->getType()) && "SCEVUDivExpr operand types don't match!"); @@ -1573,24 +1573,24 @@ SCEVHandle ScalarEvolution::getUDivExpr(const SCEVHandle &LHS, getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy), getZeroExtendExpr(Step, ExtTy), AR->getLoop())) { - SmallVector<SCEVHandle, 4> Operands; + SmallVector<const SCEV*, 4> Operands; for (unsigned i = 0, e = AR->getNumOperands(); i != e; ++i) Operands.push_back(getUDivExpr(AR->getOperand(i), RHS)); return getAddRecExpr(Operands, AR->getLoop()); } // (A*B)/C --> A*(B/C) if safe and B/C can be folded. if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(LHS)) { - SmallVector<SCEVHandle, 4> Operands; + SmallVector<const SCEV*, 4> Operands; for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) Operands.push_back(getZeroExtendExpr(M->getOperand(i), ExtTy)); if (getZeroExtendExpr(M, ExtTy) == getMulExpr(Operands)) // Find an operand that's safely divisible. for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) { - SCEVHandle Op = M->getOperand(i); - SCEVHandle Div = getUDivExpr(Op, RHSC); + const SCEV* Op = M->getOperand(i); + const SCEV* Div = getUDivExpr(Op, RHSC); if (!isa<SCEVUDivExpr>(Div) && getMulExpr(Div, RHSC) == Op) { - const SmallVectorImpl<SCEVHandle> &MOperands = M->getOperands(); - Operands = SmallVector<SCEVHandle, 4>(MOperands.begin(), + const SmallVectorImpl<const SCEV*> &MOperands = M->getOperands(); + Operands = SmallVector<const SCEV*, 4>(MOperands.begin(), MOperands.end()); Operands[i] = Div; return getMulExpr(Operands); @@ -1599,13 +1599,13 @@ SCEVHandle ScalarEvolution::getUDivExpr(const SCEVHandle &LHS, } // (A+B)/C --> (A/C + B/C) if safe and A/C and B/C can be folded. if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(LHS)) { - SmallVector<SCEVHandle, 4> Operands; + SmallVector<const SCEV*, 4> Operands; for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) Operands.push_back(getZeroExtendExpr(A->getOperand(i), ExtTy)); if (getZeroExtendExpr(A, ExtTy) == getAddExpr(Operands)) { Operands.clear(); for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) { - SCEVHandle Op = getUDivExpr(A->getOperand(i), RHS); + const SCEV* Op = getUDivExpr(A->getOperand(i), RHS); if (isa<SCEVUDivExpr>(Op) || getMulExpr(Op, RHS) != A->getOperand(i)) break; Operands.push_back(Op); @@ -1631,9 +1631,9 @@ SCEVHandle ScalarEvolution::getUDivExpr(const SCEVHandle &LHS, /// getAddRecExpr - Get an add recurrence expression for the specified loop. /// Simplify the expression as much as possible. -SCEVHandle ScalarEvolution::getAddRecExpr(const SCEVHandle &Start, - const SCEVHandle &Step, const Loop *L) { - SmallVector<SCEVHandle, 4> Operands; +const SCEV* ScalarEvolution::getAddRecExpr(const SCEV* Start, + const SCEV* Step, const Loop *L) { + SmallVector<const SCEV*, 4> Operands; Operands.push_back(Start); if (const SCEVAddRecExpr *StepChrec = dyn_cast<SCEVAddRecExpr>(Step)) if (StepChrec->getLoop() == L) { @@ -1648,7 +1648,7 @@ SCEVHandle ScalarEvolution::getAddRecExpr(const SCEVHandle &Start, /// getAddRecExpr - Get an add recurrence expression for the specified loop. /// Simplify the expression as much as possible. -SCEVHandle ScalarEvolution::getAddRecExpr(SmallVectorImpl<SCEVHandle> &Operands, +const SCEV* ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV*> &Operands, const Loop *L) { if (Operands.size() == 1) return Operands[0]; #ifndef NDEBUG @@ -1667,9 +1667,8 @@ SCEVHandle ScalarEvolution::getAddRecExpr(SmallVectorImpl<SCEVHandle> &Operands, if (const SCEVAddRecExpr *NestedAR = dyn_cast<SCEVAddRecExpr>(Operands[0])) { const Loop* NestedLoop = NestedAR->getLoop(); if (L->getLoopDepth() < NestedLoop->getLoopDepth()) { - SmallVector<SCEVHandle, 4> NestedOperands(NestedAR->op_begin(), + SmallVector<const SCEV*, 4> NestedOperands(NestedAR->op_begin(), NestedAR->op_end()); - SCEVHandle NestedARHandle(NestedAR); Operands[0] = NestedAR->getStart(); NestedOperands[0] = getAddRecExpr(Operands, L); return getAddRecExpr(NestedOperands, NestedLoop); @@ -1682,16 +1681,16 @@ SCEVHandle ScalarEvolution::getAddRecExpr(SmallVectorImpl<SCEVHandle> &Operands, return Result; } -SCEVHandle ScalarEvolution::getSMaxExpr(const SCEVHandle &LHS, - const SCEVHandle &RHS) { - SmallVector<SCEVHandle, 2> Ops; +const SCEV* ScalarEvolution::getSMaxExpr(const SCEV* LHS, + const SCEV* RHS) { + SmallVector<const SCEV*, 2> Ops; Ops.push_back(LHS); Ops.push_back(RHS); return getSMaxExpr(Ops); } -SCEVHandle -ScalarEvolution::getSMaxExpr(SmallVectorImpl<SCEVHandle> &Ops) { +const SCEV* +ScalarEvolution::getSMaxExpr(SmallVectorImpl<const SCEV*> &Ops) { assert(!Ops.empty() && "Cannot get empty smax!"); if (Ops.size() == 1) return Ops[0]; #ifndef NDEBUG @@ -1769,16 +1768,16 @@ ScalarEvolution::getSMaxExpr(SmallVectorImpl<SCEVHandle> &Ops) { return Result; } -SCEVHandle ScalarEvolution::getUMaxExpr(const SCEVHandle &LHS, - const SCEVHandle &RHS) { - SmallVector<SCEVHandle, 2> Ops; +const SCEV* ScalarEvolution::getUMaxExpr(const SCEV* LHS, + const SCEV* RHS) { + SmallVector<const SCEV*, 2> Ops; Ops.push_back(LHS); Ops.push_back(RHS); return getUMaxExpr(Ops); } -SCEVHandle -ScalarEvolution::getUMaxExpr(SmallVectorImpl<SCEVHandle> &Ops) { +const SCEV* +ScalarEvolution::getUMaxExpr(SmallVectorImpl<const SCEV*> &Ops) { assert(!Ops.empty() && "Cannot get empty umax!"); |