aboutsummaryrefslogtreecommitdiff
path: root/lib/Analysis/ScalarEvolutionExpander.cpp
diff options
context:
space:
mode:
authorDan Gohman <gohman@apple.com>2009-04-16 03:18:22 +0000
committerDan Gohman <gohman@apple.com>2009-04-16 03:18:22 +0000
commit2d1be87ee40a4a0241d94448173879d9df2bc5b3 (patch)
treefa16eff022c8808a5eb6aedb159ea653af0faae9 /lib/Analysis/ScalarEvolutionExpander.cpp
parent9efac568f08de669c8e0003b33b80998cedaf8b6 (diff)
Expand GEPs in ScalarEvolution expressions. SCEV expressions can now
have pointer types, though in contrast to C pointer types, SCEV addition is never implicitly scaled. This not only eliminates the need for special code like IndVars' EliminatePointerRecurrence and LSR's own GEP expansion code, it also does a better job because it lets the normal optimizations handle pointer expressions just like integer expressions. Also, since LLVM IR GEPs can't directly index into multi-dimensional VLAs, moving the GEP analysis out of client code and into the SCEV framework makes it easier for clients to handle multi-dimensional VLAs the same way as other arrays. Some existing regression tests show improved optimization. test/CodeGen/ARM/2007-03-13-InstrSched.ll in particular improved to the point where if-conversion started kicking in; I turned it off for this test to preserve the intent of the test. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@69258 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/Analysis/ScalarEvolutionExpander.cpp')
-rw-r--r--lib/Analysis/ScalarEvolutionExpander.cpp59
1 files changed, 47 insertions, 12 deletions
diff --git a/lib/Analysis/ScalarEvolutionExpander.cpp b/lib/Analysis/ScalarEvolutionExpander.cpp
index 30df087cef..d91061b2b3 100644
--- a/lib/Analysis/ScalarEvolutionExpander.cpp
+++ b/lib/Analysis/ScalarEvolutionExpander.cpp
@@ -15,12 +15,17 @@
#include "llvm/Analysis/ScalarEvolutionExpander.h"
#include "llvm/Analysis/LoopInfo.h"
+#include "llvm/Target/TargetData.h"
using namespace llvm;
/// InsertCastOfTo - Insert a cast of V to the specified type, doing what
/// we can to share the casts.
Value *SCEVExpander::InsertCastOfTo(Instruction::CastOps opcode, Value *V,
const Type *Ty) {
+ // Short-circuit unnecessary bitcasts.
+ if (opcode == Instruction::BitCast && V->getType() == Ty)
+ return V;
+
// FIXME: keep track of the cast instruction.
if (Constant *C = dyn_cast<Constant>(V))
return ConstantExpr::getCast(opcode, C, Ty);
@@ -100,16 +105,23 @@ Value *SCEVExpander::InsertBinop(Instruction::BinaryOps Opcode, Value *LHS,
}
Value *SCEVExpander::visitAddExpr(SCEVAddExpr *S) {
+ const Type *Ty = S->getType();
+ if (isa<PointerType>(Ty)) Ty = TD.getIntPtrType();
Value *V = expand(S->getOperand(S->getNumOperands()-1));
+ V = InsertCastOfTo(CastInst::getCastOpcode(V, false, Ty, false), V, Ty);
// Emit a bunch of add instructions
- for (int i = S->getNumOperands()-2; i >= 0; --i)
- V = InsertBinop(Instruction::Add, V, expand(S->getOperand(i)),
- InsertPt);
+ for (int i = S->getNumOperands()-2; i >= 0; --i) {
+ Value *W = expand(S->getOperand(i));
+ W = InsertCastOfTo(CastInst::getCastOpcode(W, false, Ty, false), W, Ty);
+ V = InsertBinop(Instruction::Add, V, W, InsertPt);
+ }
return V;
}
Value *SCEVExpander::visitMulExpr(SCEVMulExpr *S) {
+ const Type *Ty = S->getType();
+ if (isa<PointerType>(Ty)) Ty = TD.getIntPtrType();
int FirstOp = 0; // Set if we should emit a subtract.
if (SCEVConstant *SC = dyn_cast<SCEVConstant>(S->getOperand(0)))
if (SC->getValue()->isAllOnesValue())
@@ -117,29 +129,37 @@ Value *SCEVExpander::visitMulExpr(SCEVMulExpr *S) {
int i = S->getNumOperands()-2;
Value *V = expand(S->getOperand(i+1));
+ V = InsertCastOfTo(CastInst::getCastOpcode(V, false, Ty, false), V, Ty);
// Emit a bunch of multiply instructions
- for (; i >= FirstOp; --i)
- V = InsertBinop(Instruction::Mul, V, expand(S->getOperand(i)),
- InsertPt);
+ for (; i >= FirstOp; --i) {
+ Value *W = expand(S->getOperand(i));
+ W = InsertCastOfTo(CastInst::getCastOpcode(W, false, Ty, false), W, Ty);
+ V = InsertBinop(Instruction::Mul, V, W, InsertPt);
+ }
+
// -1 * ... ---> 0 - ...
if (FirstOp == 1)
- V = InsertBinop(Instruction::Sub, Constant::getNullValue(V->getType()), V,
- InsertPt);
+ V = InsertBinop(Instruction::Sub, Constant::getNullValue(Ty), V, InsertPt);
return V;
}
Value *SCEVExpander::visitUDivExpr(SCEVUDivExpr *S) {
+ const Type *Ty = S->getType();
+ if (isa<PointerType>(Ty)) Ty = TD.getIntPtrType();
+
Value *LHS = expand(S->getLHS());
+ LHS = InsertCastOfTo(CastInst::getCastOpcode(LHS, false, Ty, false), LHS, Ty);
if (SCEVConstant *SC = dyn_cast<SCEVConstant>(S->getRHS())) {
const APInt &RHS = SC->getValue()->getValue();
if (RHS.isPowerOf2())
return InsertBinop(Instruction::LShr, LHS,
- ConstantInt::get(S->getType(), RHS.logBase2()),
+ ConstantInt::get(Ty, RHS.logBase2()),
InsertPt);
}
Value *RHS = expand(S->getRHS());
+ RHS = InsertCastOfTo(CastInst::getCastOpcode(RHS, false, Ty, false), RHS, Ty);
return InsertBinop(Instruction::UDiv, LHS, RHS, InsertPt);
}
@@ -147,14 +167,19 @@ Value *SCEVExpander::visitAddRecExpr(SCEVAddRecExpr *S) {
const Type *Ty = S->getType();
const Loop *L = S->getLoop();
// We cannot yet do fp recurrences, e.g. the xform of {X,+,F} --> X+{0,+,F}
- assert(Ty->isInteger() && "Cannot expand fp recurrences yet!");
+ assert((Ty->isInteger() || isa<PointerType>(Ty)) &&
+ "Cannot expand fp recurrences yet!");
// {X,+,F} --> X + {0,+,F}
if (!S->getStart()->isZero()) {
Value *Start = expand(S->getStart());
+ if (isa<PointerType>(Start->getType()))
+ Start = InsertCastOfTo(Instruction::PtrToInt, Start, TD.getIntPtrType());
std::vector<SCEVHandle> NewOps(S->op_begin(), S->op_end());
NewOps[0] = SE.getIntegerSCEV(0, Ty);
Value *Rest = expand(SE.getAddRecExpr(NewOps, L));
+ if (isa<PointerType>(Rest->getType()))
+ Rest = InsertCastOfTo(Instruction::PtrToInt, Rest, TD.getIntPtrType());
// FIXME: look for an existing add to use.
return InsertBinop(Instruction::Add, Rest, Start, InsertPt);
@@ -242,16 +267,22 @@ Value *SCEVExpander::visitAddRecExpr(SCEVAddRecExpr *S) {
Value *SCEVExpander::visitTruncateExpr(SCEVTruncateExpr *S) {
Value *V = expand(S->getOperand());
+ if (isa<PointerType>(V->getType()))
+ V = InsertCastOfTo(Instruction::PtrToInt, V, TD.getIntPtrType());
return CastInst::CreateTruncOrBitCast(V, S->getType(), "tmp.", InsertPt);
}
Value *SCEVExpander::visitZeroExtendExpr(SCEVZeroExtendExpr *S) {
Value *V = expand(S->getOperand());
+ if (isa<PointerType>(V->getType()))
+ V = InsertCastOfTo(Instruction::PtrToInt, V, TD.getIntPtrType());
return CastInst::CreateZExtOrBitCast(V, S->getType(), "tmp.", InsertPt);
}
Value *SCEVExpander::visitSignExtendExpr(SCEVSignExtendExpr *S) {
Value *V = expand(S->getOperand());
+ if (isa<PointerType>(V->getType()))
+ V = InsertCastOfTo(Instruction::PtrToInt, V, TD.getIntPtrType());
return CastInst::CreateSExtOrBitCast(V, S->getType(), "tmp.", InsertPt);
}
@@ -275,10 +306,14 @@ Value *SCEVExpander::visitUMaxExpr(SCEVUMaxExpr *S) {
return LHS;
}
-Value *SCEVExpander::expandCodeFor(SCEVHandle SH, Instruction *IP) {
+Value *SCEVExpander::expandCodeFor(SCEVHandle SH, const Type *Ty,
+ Instruction *IP) {
// Expand the code for this SCEV.
+ assert(TD.getTypeSizeInBits(Ty) == TD.getTypeSizeInBits(SH->getType()) &&
+ "non-trivial casts should be done with the SCEVs directly!");
this->InsertPt = IP;
- return expand(SH);
+ Value *V = expand(SH);
+ return InsertCastOfTo(CastInst::getCastOpcode(V, false, Ty, false), V, Ty);
}
Value *SCEVExpander::expand(SCEV *S) {