aboutsummaryrefslogtreecommitdiff
path: root/lib/Transforms/Scalar
diff options
context:
space:
mode:
Diffstat (limited to 'lib/Transforms/Scalar')
-rw-r--r--lib/Transforms/Scalar/CodeGenPrepare.cpp10
-rw-r--r--lib/Transforms/Scalar/ConstantProp.cpp2
-rw-r--r--lib/Transforms/Scalar/GVN.cpp12
-rw-r--r--lib/Transforms/Scalar/GVNPRE.cpp7
-rw-r--r--lib/Transforms/Scalar/IndVarSimplify.cpp12
-rw-r--r--lib/Transforms/Scalar/InstructionCombining.cpp6
-rw-r--r--lib/Transforms/Scalar/JumpThreading.cpp20
-rw-r--r--lib/Transforms/Scalar/LICM.cpp10
-rw-r--r--lib/Transforms/Scalar/LoopIndexSplit.cpp12
-rw-r--r--lib/Transforms/Scalar/LoopRotation.cpp2
-rw-r--r--lib/Transforms/Scalar/LoopStrengthReduce.cpp28
-rw-r--r--lib/Transforms/Scalar/LoopUnswitch.cpp22
-rw-r--r--lib/Transforms/Scalar/MemCpyOptimizer.cpp21
-rw-r--r--lib/Transforms/Scalar/PredicateSimplifier.cpp22
-rw-r--r--lib/Transforms/Scalar/Reassociate.cpp39
-rw-r--r--lib/Transforms/Scalar/Reg2Mem.cpp2
-rw-r--r--lib/Transforms/Scalar/SCCP.cpp12
-rw-r--r--lib/Transforms/Scalar/ScalarReplAggregates.cpp113
-rw-r--r--lib/Transforms/Scalar/SimplifyCFGPass.cpp6
-rw-r--r--lib/Transforms/Scalar/SimplifyLibCalls.cpp6
-rw-r--r--lib/Transforms/Scalar/TailDuplication.cpp4
21 files changed, 210 insertions, 158 deletions
diff --git a/lib/Transforms/Scalar/CodeGenPrepare.cpp b/lib/Transforms/Scalar/CodeGenPrepare.cpp
index e7cde0ad84..8c8d26164b 100644
--- a/lib/Transforms/Scalar/CodeGenPrepare.cpp
+++ b/lib/Transforms/Scalar/CodeGenPrepare.cpp
@@ -518,7 +518,7 @@ static bool OptimizeCmpExpression(CmpInst *CI) {
BasicBlock::iterator InsertPt = UserBB->getFirstNonPHI();
InsertedCmp =
- CmpInst::Create(*DefBB->getContext(), CI->getOpcode(),
+ CmpInst::Create(DefBB->getContext(), CI->getOpcode(),
CI->getPredicate(), CI->getOperand(0),
CI->getOperand(1), "", InsertPt);
MadeChange = true;
@@ -559,6 +559,8 @@ static bool IsNonLocalValue(Value *V, BasicBlock *BB) {
bool CodeGenPrepare::OptimizeMemoryInst(Instruction *MemoryInst, Value *Addr,
const Type *AccessTy,
DenseMap<Value*,Value*> &SunkAddrs) {
+ LLVMContext &Context = MemoryInst->getContext();
+
// Figure out what addressing mode will be built up for this operation.
SmallVector<Instruction*, 16> AddrModeInsts;
ExtAddrMode AddrMode = AddressingModeMatcher::Match(Addr, AccessTy,MemoryInst,
@@ -615,7 +617,7 @@ bool CodeGenPrepare::OptimizeMemoryInst(Instruction *MemoryInst, Value *Addr,
V = new SExtInst(V, IntPtrTy, "sunkaddr", InsertPt);
}
if (AddrMode.Scale != 1)
- V = BinaryOperator::CreateMul(V, Context->getConstantInt(IntPtrTy,
+ V = BinaryOperator::CreateMul(V, Context.getConstantInt(IntPtrTy,
AddrMode.Scale),
"sunkaddr", InsertPt);
Result = V;
@@ -647,7 +649,7 @@ bool CodeGenPrepare::OptimizeMemoryInst(Instruction *MemoryInst, Value *Addr,
// Add in the Base Offset if present.
if (AddrMode.BaseOffs) {
- Value *V = Context->getConstantInt(IntPtrTy, AddrMode.BaseOffs);
+ Value *V = Context.getConstantInt(IntPtrTy, AddrMode.BaseOffs);
if (Result)
Result = BinaryOperator::CreateAdd(Result, V, "sunkaddr", InsertPt);
else
@@ -655,7 +657,7 @@ bool CodeGenPrepare::OptimizeMemoryInst(Instruction *MemoryInst, Value *Addr,
}
if (Result == 0)
- SunkAddr = Context->getNullValue(Addr->getType());
+ SunkAddr = Context.getNullValue(Addr->getType());
else
SunkAddr = new IntToPtrInst(Result, Addr->getType(), "sunkaddr",InsertPt);
}
diff --git a/lib/Transforms/Scalar/ConstantProp.cpp b/lib/Transforms/Scalar/ConstantProp.cpp
index d92ebdae66..2d9d2de153 100644
--- a/lib/Transforms/Scalar/ConstantProp.cpp
+++ b/lib/Transforms/Scalar/ConstantProp.cpp
@@ -67,7 +67,7 @@ bool ConstantPropagation::runOnFunction(Function &F) {
WorkList.erase(WorkList.begin()); // Get an element from the worklist...
if (!I->use_empty()) // Don't muck with dead instructions...
- if (Constant *C = ConstantFoldInstruction(I, Context)) {
+ if (Constant *C = ConstantFoldInstruction(I, F.getContext())) {
// Add all of the users of this instruction to the worklist, they might
// be constant propagatable now...
for (Value::use_iterator UI = I->use_begin(), UE = I->use_end();
diff --git a/lib/Transforms/Scalar/GVN.cpp b/lib/Transforms/Scalar/GVN.cpp
index bdb571dddb..2a50103d56 100644
--- a/lib/Transforms/Scalar/GVN.cpp
+++ b/lib/Transforms/Scalar/GVN.cpp
@@ -797,7 +797,7 @@ Value *GVN::GetValueForBlock(BasicBlock *BB, Instruction* orig,
// If the block is unreachable, just return undef, since this path
// can't actually occur at runtime.
if (!DT->isReachableFromEntry(BB))
- return Phis[BB] = Context->getUndef(orig->getType());
+ return Phis[BB] = BB->getContext().getUndef(orig->getType());
if (BasicBlock *Pred = BB->getSinglePredecessor()) {
Value *ret = GetValueForBlock(Pred, orig, Phis);
@@ -985,7 +985,7 @@ bool GVN::processNonLocalLoad(LoadInst *LI,
// Loading the allocation -> undef.
if (isa<AllocationInst>(DepInst)) {
ValuesPerBlock.push_back(std::make_pair(DepBB,
- Context->getUndef(LI->getType())));
+ DepBB->getContext().getUndef(LI->getType())));
continue;
}
@@ -1272,7 +1272,7 @@ bool GVN::processLoad(LoadInst *L, SmallVectorImpl<Instruction*> &toErase) {
// undef value. This can happen when loading for a fresh allocation with no
// intervening stores, for example.
if (isa<AllocationInst>(DepInst)) {
- L->replaceAllUsesWith(Context->getUndef(L->getType()));
+ L->replaceAllUsesWith(DepInst->getContext().getUndef(L->getType()));
toErase.push_back(L);
NumGVNLoad++;
return true;
@@ -1384,9 +1384,9 @@ bool GVN::processInstruction(Instruction *I,
BasicBlock* falseSucc = BI->getSuccessor(1);
if (trueSucc->getSinglePredecessor())
- localAvail[trueSucc]->table[condVN] = Context->getTrue();
+ localAvail[trueSucc]->table[condVN] = trueSucc->getContext().getTrue();
if (falseSucc->getSinglePredecessor())
- localAvail[falseSucc]->table[condVN] = Context->getFalse();
+ localAvail[falseSucc]->table[condVN] = trueSucc->getContext().getFalse();
return false;
@@ -1628,7 +1628,7 @@ bool GVN::performPRE(Function& F) {
// will be available in the predecessor by the time we need them. Any
// that weren't original present will have been instantiated earlier
// in this loop.
- Instruction* PREInstr = CurInst->clone(*Context);
+ Instruction* PREInstr = CurInst->clone(CurInst->getContext());
bool success = true;
for (unsigned i = 0, e = CurInst->getNumOperands(); i != e; ++i) {
Value *Op = PREInstr->getOperand(i);
diff --git a/lib/Transforms/Scalar/GVNPRE.cpp b/lib/Transforms/Scalar/GVNPRE.cpp
index 3f66131e5b..85c272d711 100644
--- a/lib/Transforms/Scalar/GVNPRE.cpp
+++ b/lib/Transforms/Scalar/GVNPRE.cpp
@@ -800,6 +800,8 @@ void GVNPRE::val_replace(ValueNumberedSet& s, Value* v) {
Value* GVNPRE::phi_translate(Value* V, BasicBlock* pred, BasicBlock* succ) {
if (V == 0)
return 0;
+
+ LLVMContext &Context = V->getContext();
// Unary Operations
if (CastInst* U = dyn_cast<CastInst>(V)) {
@@ -862,7 +864,7 @@ Value* GVNPRE::phi_translate(Value* V, BasicBlock* pred, BasicBlock* succ) {
newOp1, newOp2,
BO->getName()+".expr");
else if (CmpInst* C = dyn_cast<CmpInst>(U))
- newVal = CmpInst::Create(*Context, C->getOpcode(),
+ newVal = CmpInst::Create(Context, C->getOpcode(),
C->getPredicate(),
newOp1, newOp2,
C->getName()+".expr");
@@ -1594,6 +1596,7 @@ void GVNPRE::buildsets(Function& F) {
void GVNPRE::insertion_pre(Value* e, BasicBlock* BB,
DenseMap<BasicBlock*, Value*>& avail,
std::map<BasicBlock*, ValueNumberedSet>& new_sets) {
+ LLVMContext &Context = e->getContext();
for (pred_iterator PI = pred_begin(BB), PE = pred_end(BB); PI != PE; ++PI) {
Value* e2 = avail[*PI];
if (!availableOut[*PI].test(VN.lookup(e2))) {
@@ -1680,7 +1683,7 @@ void GVNPRE::insertion_pre(Value* e, BasicBlock* BB,
BO->getName()+".gvnpre",
(*PI)->getTerminator());
else if (CmpInst* C = dyn_cast<CmpInst>(U))
- newVal = CmpInst::Create(*Context, C->getOpcode(),
+ newVal = CmpInst::Create(Context, C->getOpcode(),
C->getPredicate(), s1, s2,
C->getName()+".gvnpre",
(*PI)->getTerminator());
diff --git a/lib/Transforms/Scalar/IndVarSimplify.cpp b/lib/Transforms/Scalar/IndVarSimplify.cpp
index 0ac58f1fea..66a786287f 100644
--- a/lib/Transforms/Scalar/IndVarSimplify.cpp
+++ b/lib/Transforms/Scalar/IndVarSimplify.cpp
@@ -292,7 +292,7 @@ void IndVarSimplify::RewriteLoopExitValues(Loop *L,
if (NumPreds != 1) {
// Clone the PHI and delete the original one. This lets IVUsers and
// any other maps purge the original user from their records.
- PHINode *NewPN = PN->clone(*Context);
+ PHINode *NewPN = PN->clone(PN->getContext());
NewPN->takeName(PN);
NewPN->insertBefore(PN);
PN->replaceAllUsesWith(NewPN);
@@ -713,21 +713,23 @@ void IndVarSimplify::HandleFloatingPointIV(Loop *L, PHINode *PH) {
}
if (NewPred == CmpInst::BAD_ICMP_PREDICATE) return;
+ LLVMContext &Context = PH->getContext();
+
// Insert new integer induction variable.
PHINode *NewPHI = PHINode::Create(Type::Int32Ty,
PH->getName()+".int", PH);
- NewPHI->addIncoming(Context->getConstantInt(Type::Int32Ty, newInitValue),
+ NewPHI->addIncoming(Context.getConstantInt(Type::Int32Ty, newInitValue),
PH->getIncomingBlock(IncomingEdge));
Value *NewAdd = BinaryOperator::CreateAdd(NewPHI,
- Context->getConstantInt(Type::Int32Ty,
+ Context.getConstantInt(Type::Int32Ty,
newIncrValue),
Incr->getName()+".int", Incr);
NewPHI->addIncoming(NewAdd, PH->getIncomingBlock(BackEdge));
// The back edge is edge 1 of newPHI, whatever it may have been in the
// original PHI.
- ConstantInt *NewEV = Context->getConstantInt(Type::Int32Ty, intEV);
+ ConstantInt *NewEV = Context.getConstantInt(Type::Int32Ty, intEV);
Value *LHS = (EVIndex == 1 ? NewPHI->getIncomingValue(1) : NewEV);
Value *RHS = (EVIndex == 1 ? NewEV : NewPHI->getIncomingValue(1));
ICmpInst *NewEC = new ICmpInst(EC->getParent()->getTerminator(),
@@ -743,7 +745,7 @@ void IndVarSimplify::HandleFloatingPointIV(Loop *L, PHINode *PH) {
RecursivelyDeleteTriviallyDeadInstructions(EC);
// Delete old, floating point, increment instruction.
- Incr->replaceAllUsesWith(Context->getUndef(Incr->getType()));
+ Incr->replaceAllUsesWith(Context.getUndef(Incr->getType()));
RecursivelyDeleteTriviallyDeadInstructions(Incr);
// Replace floating induction variable, if it isn't already deleted.
diff --git a/lib/Transforms/Scalar/InstructionCombining.cpp b/lib/Transforms/Scalar/InstructionCombining.cpp
index f52bb8626e..01fa68b2f0 100644
--- a/lib/Transforms/Scalar/InstructionCombining.cpp
+++ b/lib/Transforms/Scalar/InstructionCombining.cpp
@@ -85,7 +85,8 @@ namespace {
static char ID; // Pass identification, replacement for typeid
InstCombiner() : FunctionPass(&ID) {}
- LLVMContext *getContext() { return Context; }
+ LLVMContext *Context;
+ LLVMContext *getContext() const { return Context; }
/// AddToWorkList - Add the specified instruction to the worklist if it
/// isn't already in it.
@@ -11557,7 +11558,7 @@ Instruction *InstCombiner::visitLoadInst(LoadInst &LI) {
if (GV->isConstant() && GV->hasDefinitiveInitializer())
if (Constant *V =
ConstantFoldLoadThroughGEPConstantExpr(GV->getInitializer(), CE,
- Context))
+ *Context))
return ReplaceInstUsesWith(LI, V);
if (CE->getOperand(0)->isNullValue()) {
// Insert a new store to null instruction before the load to indicate
@@ -13082,6 +13083,7 @@ bool InstCombiner::DoOneIteration(Function &F, unsigned Iteration) {
bool InstCombiner::runOnFunction(Function &F) {
MustPreserveLCSSA = mustPreserveAnalysisID(LCSSAID);
+ Context = &F.getContext();
bool EverMadeChange = false;
diff --git a/lib/Transforms/Scalar/JumpThreading.cpp b/lib/Transforms/Scalar/JumpThreading.cpp
index 1ce823aa5e..6b665cc900 100644
--- a/lib/Transforms/Scalar/JumpThreading.cpp
+++ b/lib/Transforms/Scalar/JumpThreading.cpp
@@ -435,7 +435,8 @@ bool JumpThreading::ProcessBranchOnDuplicateCond(BasicBlock *PredBB,
<< "' folding condition to '" << BranchDir << "': "
<< *BB->getTerminator();
++NumFolds;
- DestBI->setCondition(Context->getConstantInt(Type::Int1Ty, BranchDir));
+ DestBI->setCondition(BB->getContext().getConstantInt(Type::Int1Ty,
+ BranchDir));
ConstantFoldTerminator(BB);
return true;
}
@@ -564,7 +565,8 @@ bool JumpThreading::SimplifyPartiallyRedundantLoad(LoadInst *LI) {
// If the returned value is the load itself, replace with an undef. This can
// only happen in dead loops.
- if (AvailableVal == LI) AvailableVal = Context->getUndef(LI->getType());
+ if (AvailableVal == LI) AvailableVal =
+ AvailableVal->getContext().getUndef(LI->getType());
LI->replaceAllUsesWith(AvailableVal);
LI->eraseFromParent();
return true;
@@ -718,7 +720,7 @@ bool JumpThreading::ProcessJumpOnPHI(PHINode *PN) {
// Next, figure out which successor we are threading to.
BasicBlock *SuccBB;
if (BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator()))
- SuccBB = BI->getSuccessor(PredCst == Context->getFalse());
+ SuccBB = BI->getSuccessor(PredCst == PredBB->getContext().getFalse());
else {
SwitchInst *SI = cast<SwitchInst>(BB->getTerminator());
SuccBB = SI->getSuccessor(SI->findCaseValue(PredCst));
@@ -756,7 +758,7 @@ bool JumpThreading::ProcessBranchOnLogical(Value *V, BasicBlock *BB,
// We can only do the simplification for phi nodes of 'false' with AND or
// 'true' with OR. See if we have any entries in the phi for this.
unsigned PredNo = ~0U;
- ConstantInt *PredCst = Context->getConstantInt(Type::Int1Ty, !isAnd);
+ ConstantInt *PredCst = V->getContext().getConstantInt(Type::Int1Ty, !isAnd);
for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
if (PN->getIncomingValue(i) == PredCst) {
PredNo = i;
@@ -795,15 +797,15 @@ bool JumpThreading::ProcessBranchOnLogical(Value *V, BasicBlock *BB,
/// result can not be determined, a null pointer is returned.
static Constant *GetResultOfComparison(CmpInst::Predicate pred,
Value *LHS, Value *RHS,
- LLVMContext *Context) {
+ LLVMContext &Context) {
if (Constant *CLHS = dyn_cast<Constant>(LHS))
if (Constant *CRHS = dyn_cast<Constant>(RHS))
- return Context->getConstantExprCompare(pred, CLHS, CRHS);
+ return Context.getConstantExprCompare(pred, CLHS, CRHS);
if (LHS == RHS)
if (isa<IntegerType>(LHS->getType()) || isa<PointerType>(LHS->getType()))
return ICmpInst::isTrueWhenEqual(pred) ?
- Context->getTrue() : Context->getFalse();
+ Context.getTrue() : Context.getFalse();
return 0;
}
@@ -829,7 +831,7 @@ bool JumpThreading::ProcessBranchOnCompare(CmpInst *Cmp, BasicBlock *BB) {
PredVal = PN->getIncomingValue(i);
Constant *Res = GetResultOfComparison(Cmp->getPredicate(), PredVal,
- RHS, Context);
+ RHS, Cmp->getContext());
if (!Res) {
PredVal = 0;
continue;
@@ -931,7 +933,7 @@ bool JumpThreading::ThreadEdge(BasicBlock *BB, BasicBlock *PredBB,
// Clone the non-phi instructions of BB into NewBB, keeping track of the
// mapping and using it to remap operands in the cloned instructions.
for (; !isa<TerminatorInst>(BI); ++BI) {
- Instruction *New = BI->clone(*Context);
+ Instruction *New = BI->clone(BI->getContext());
New->setName(BI->getNameStart());
NewBB->getInstList().push_back(New);
ValueMapping[BI] = New;
diff --git a/lib/Transforms/Scalar/LICM.cpp b/lib/Transforms/Scalar/LICM.cpp
index bdf7dee662..e8b543b74a 100644
--- a/lib/Transforms/Scalar/LICM.cpp
+++ b/lib/Transforms/Scalar/LICM.cpp
@@ -475,6 +475,8 @@ void LICM::sink(Instruction &I) {
++NumSunk;
Changed = true;
+ LLVMContext &Context = I.getContext();
+
// The case where there is only a single exit node of this loop is common
// enough that we handle it as a special (more efficient) case. It is more
// efficient to handle because there are no PHI nodes that need to be placed.
@@ -483,7 +485,7 @@ void LICM::sink(Instruction &I) {
// Instruction is not used, just delete it.
CurAST->deleteValue(&I);
if (!I.use_empty()) // If I has users in unreachable blocks, eliminate.
- I.replaceAllUsesWith(Context->getUndef(I.getType()));
+ I.replaceAllUsesWith(Context.getUndef(I.getType()));
I.eraseFromParent();
} else {
// Move the instruction to the start of the exit block, after any PHI
@@ -497,7 +499,7 @@ void LICM::sink(Instruction &I) {
// The instruction is actually dead if there ARE NO exit blocks.
CurAST->deleteValue(&I);
if (!I.use_empty()) // If I has users in unreachable blocks, eliminate.
- I.replaceAllUsesWith(Context->getUndef(I.getType()));
+ I.replaceAllUsesWith(Context.getUndef(I.getType()));
I.eraseFromParent();
} else {
// Otherwise, if we have multiple exits, use the PromoteMem2Reg function to
@@ -570,7 +572,7 @@ void LICM::sink(Instruction &I) {
ExitBlock->getInstList().insert(InsertPt, &I);
New = &I;
} else {
- New = I.clone(*Context);
+ New = I.clone(Context);
CurAST->copyValue(&I, New);
if (!I.getName().empty())
New->setName(I.getName()+".le");
@@ -768,7 +770,7 @@ void LICM::PromoteValuesInLoop() {
PromotedAllocas.reserve(PromotedValues.size());
for (unsigned i = 0, e = PromotedValues.size(); i != e; ++i)
PromotedAllocas.push_back(PromotedValues[i].first);
- PromoteMemToReg(PromotedAllocas, *DT, *DF, Context, CurAST);
+ PromoteMemToReg(PromotedAllocas, *DT, *DF, Preheader->getContext(), CurAST);
}
/// FindPromotableValuesInLoop - Check the current loop for stores to definite
diff --git a/lib/Transforms/Scalar/LoopIndexSplit.cpp b/lib/Transforms/Scalar/LoopIndexSplit.cpp
index 4c6689d6b5..f20511ea28 100644
--- a/lib/Transforms/Scalar/LoopIndexSplit.cpp
+++ b/lib/Transforms/Scalar/LoopIndexSplit.cpp
@@ -294,15 +294,15 @@ static bool isUsedOutsideLoop(Value *V, Loop *L) {
// Return V+1
static Value *getPlusOne(Value *V, bool Sign, Instruction *InsertPt,
- LLVMContext *Context) {
- Constant *One = Context->getConstantInt(V->getType(), 1, Sign);
+ LLVMContext &Context) {
+ Constant *One = Context.getConstantInt(V->getType(), 1, Sign);
return BinaryOperator::CreateAdd(V, One, "lsp", InsertPt);
}
// Return V-1
static Value *getMinusOne(Value *V, bool Sign, Instruction *InsertPt,
- LLVMContext *Context) {
- Constant *One = Context->getConstantInt(V->getType(), 1, Sign);
+ LLVMContext &Context) {
+ Constant *One = Context.getConstantInt(V->getType(), 1, Sign);
return BinaryOperator::CreateSub(V, One, "lsp", InsertPt);
}
@@ -493,6 +493,8 @@ bool LoopIndexSplit::restrictLoopBound(ICmpInst &Op) {
EBR->setSuccessor(1, T);
}
+ LLVMContext &Context = Op.getContext();
+
// New upper and lower bounds.
Value *NLB = NULL;
Value *NUB = NULL;
@@ -879,6 +881,8 @@ bool LoopIndexSplit::splitLoop() {
BasicBlock *ExitingBlock = ExitCondition->getParent();
if (!cleanBlock(ExitingBlock)) return false;
+ LLVMContext &Context = Header->getContext();
+
for (Loop::block_iterator I = L->block_begin(), E = L->block_end();
I != E; ++I) {
BranchInst *BR = dyn_cast<BranchInst>((*I)->getTerminator());
diff --git a/lib/Transforms/Scalar/LoopRotation.cpp b/lib/Transforms/Scalar/LoopRotation.cpp
index 6d80365182..d2b20fa611 100644
--- a/lib/Transforms/Scalar/LoopRotation.cpp
+++ b/lib/Transforms/Scalar/LoopRotation.cpp
@@ -238,7 +238,7 @@ bool LoopRotate::rotateLoop(Loop *Lp, LPPassManager &LPM) {
// This is not a PHI instruction. Insert its clone into original pre-header.
// If this instruction is using a value from same basic block then
// update it to use value from cloned instruction.
- Instruction *C = In->clone(*Context);
+ Instruction *C = In->clone(In->getContext());
C->setName(In->getName());
OrigPreHeader->getInstList().push_back(C);
diff --git a/lib/Transforms/Scalar/LoopStrengthReduce.cpp b/lib/Transforms/Scalar/LoopStrengthReduce.cpp
index a1781c9858..22717c21d4 100644
--- a/lib/Transforms/Scalar/LoopStrengthReduce.cpp
+++ b/lib/Transforms/Scalar/LoopStrengthReduce.cpp
@@ -1576,7 +1576,9 @@ void LoopStrengthReduce::StrengthReduceStridedIVUsers(const SCEV *const &Stride,
BasicBlock *LatchBlock = L->getLoopLatch();
Instruction *IVIncInsertPt = LatchBlock->getTerminator();
- Value *CommonBaseV = Context->getNullValue(ReplacedTy);
+ LLVMContext &Context = Preheader->getContext();
+
+ Value *CommonBaseV = Context.getNullValue(ReplacedTy);
const SCEV *RewriteFactor = SE->getIntegerSCEV(0, ReplacedTy);
IVExpr ReuseIV(SE->getIntegerSCEV(0, Type::Int32Ty),
@@ -1859,6 +1861,8 @@ ICmpInst *LoopStrengthReduce::ChangeCompareStride(Loop *L, ICmpInst *Cond,
const SCEVConstant *SC = dyn_cast<SCEVConstant>(*CondStride);
if (!SC) return Cond;
+ LLVMContext &Context = Cond->getContext();
+
ICmpInst::Predicate Predicate = Cond->getPredicate();
int64_t CmpSSInt = SC->getValue()->getSExtValue();
unsigned BitWidth = SE->getTypeSizeInBits((*CondStride)->getType());
@@ -1942,7 +1946,7 @@ ICmpInst *LoopStrengthReduce::ChangeCompareStride(Loop *L, ICmpInst *Cond,
NewCmpTy = NewCmpLHS->getType();
NewTyBits = SE->getTypeSizeInBits(NewCmpTy);
- const Type *NewCmpIntTy = Context->getIntegerType(NewTyBits);
+ const Type *NewCmpIntTy = Context.getIntegerType(NewTyBits);
if (RequiresTypeConversion(NewCmpTy, CmpTy)) {
// Check if it is possible to rewrite it using
// an iv / stride of a smaller integer type.
@@ -1987,10 +1991,10 @@ ICmpInst *LoopStrengthReduce::ChangeCompareStride(Loop *L, ICmpInst *Cond,
NewStride = &IU->StrideOrder[i];
if (!isa<PointerType>(NewCmpTy))
- NewCmpRHS = Context->getConstantInt(NewCmpTy, NewCmpVal);
+ NewCmpRHS = Context.getConstantInt(NewCmpTy, NewCmpVal);
else {
- Constant *CI = Context->getConstantInt(NewCmpIntTy, NewCmpVal);
- NewCmpRHS = Context->getConstantExprIntToPtr(CI, NewCmpTy);
+ Constant *CI = Context.getConstantInt(NewCmpIntTy, NewCmpVal);
+ NewCmpRHS = Context.getConstantExprIntToPtr(CI, NewCmpTy);
}
NewOffset = TyBits == NewTyBits
? SE->getMulExpr(CondUse->getOffset(),
@@ -2171,6 +2175,8 @@ void LoopStrengthReduce::OptimizeShadowIV(Loop *L) {
const SCEV *BackedgeTakenCount = SE->getBackedgeTakenCount(L);
if (isa<SCEVCouldNotCompute>(BackedgeTakenCount))
return;
+
+ LLVMContext &Context = L->getHeader()->getContext();
for (unsigned Stride = 0, e = IU->StrideOrder.size(); Stride != e;
++Stride) {
@@ -2233,7 +2239,7 @@ void LoopStrengthReduce::OptimizeShadowIV(Loop *L) {
ConstantInt *Init = dyn_cast<ConstantInt>(PH->getIncomingValue(Entry));
if (!Init) continue;
- Constant *NewInit = Context->getConstantFP(DestTy, Init->getZExtValue());
+ Constant *NewInit = Context.getConstantFP(DestTy, Init->getZExtValue());
BinaryOperator *Incr =
dyn_cast<BinaryOperator>(PH->getIncomingValue(Latch));
@@ -2257,7 +2263,7 @@ void LoopStrengthReduce::OptimizeShadowIV(Loop *L) {
PHINode *NewPH = PHINode::Create(DestTy, "IV.S.", PH);
/* create new increment. '++d' in above example. */
- Constant *CFP = Context->getConstantFP(DestTy, C->getZExtValue());
+ Constant *CFP = Context.getConstantFP(DestTy, C->getZExtValue());
BinaryOperator *NewIncr =
BinaryOperator::Create(Incr->getOpcode() == Instruction::Add ?
Instruction::FAdd : Instruction::FSub,
@@ -2293,6 +2299,8 @@ void LoopStrengthReduce::OptimizeLoopTermCond(Loop *L) {
// one register value.
BasicBlock *LatchBlock = L->getLoopLatch();
BasicBlock *ExitingBlock = L->getExitingBlock();
+ LLVMContext &Context = LatchBlock->getContext();
+
if (!ExitingBlock)
// Multiple exits, just look at the exit in the latch block if there is one.
ExitingBlock = LatchBlock;
@@ -2382,7 +2390,7 @@ void LoopStrengthReduce::OptimizeLoopTermCond(Loop *L) {
Cond->moveBefore(TermBr);
} else {
// Otherwise, clone the terminating condition and insert into the loopend.
- Cond = cast<ICmpInst>(Cond->clone(*Context));
+ Cond = cast<ICmpInst>(Cond->clone(Context));
Cond->setName(L->getHeader()->getName() + ".termcond");
LatchBlock->getInstList().insert(TermBr, Cond);
@@ -2424,6 +2432,8 @@ void LoopStrengthReduce::OptimizeLoopCountIV(Loop *L) {
if (!ExitingBlock)
return; // More than one block exiting!
+ LLVMContext &Context = ExitingBlock->getContext();
+
// Okay, we've computed the exiting block. See what condition causes us to
// exit.
//
@@ -2496,7 +2506,7 @@ void LoopStrengthReduce::OptimizeLoopCountIV(Loop *L) {
Value *startVal = phi->getIncomingValue(inBlock);
Value *endVal = Cond->getOperand(1);
// FIXME check for case where both are constant
- Constant* Zero = Context->getConstantInt(Cond->getOperand(1)->getType(), 0);
+ Constant* Zero = Context.getConstantInt(Cond->getOperand(1)->getType(), 0);
BinaryOperator *NewStartVal =
BinaryOperator::Create(Instruction::Sub, endVal, startVal,
"tmp", PreInsertPt);
diff --git a/lib/Transforms/Scalar/LoopUnswitch.cpp b/lib/Transforms/Scalar/LoopUnswitch.cpp
index 16e04b39c1..100b8c7cfb 100644
--- a/lib/Transforms/Scalar/LoopUnswitch.cpp
+++ b/lib/Transforms/Scalar/LoopUnswitch.cpp
@@ -216,6 +216,7 @@ bool LoopUnswitch::runOnLoop(Loop *L, LPPassManager &LPM_Ref) {
/// and profitable.
bool LoopUnswitch::processCurrentLoop() {
bool Changed = false;
+ LLVMContext &Context = currentLoop->getHeader()->getContext();
// Loop over all of the basic blocks in the loop. If we find an interior
// block that is branching on a loop-invariant condition, we can unswitch this
@@ -233,7 +234,7 @@ bool LoopUnswitch::processCurrentLoop() {
Value *LoopCond = FindLIVLoopCondition(BI->getCondition(),
currentLoop, Changed);
if (LoopCond && UnswitchIfProfitable(LoopCond,
- Context->getTrue())) {
+ Context.getTrue())) {
++NumBranches;
return true;
}
@@ -263,7 +264,7 @@ bool LoopUnswitch::processCurrentLoop() {
Value *LoopCond = FindLIVLoopCondition(SI->getCondition(),
currentLoop, Changed);
if (LoopCond && UnswitchIfProfitable(LoopCond,
- Context->getTrue())) {
+ Context.getTrue())) {
++NumSelects;
return true;
}
@@ -337,6 +338,7 @@ bool LoopUnswitch::IsTrivialUnswitchCondition(Value *Cond, Constant **Val,
BasicBlock **LoopExit) {
BasicBlock *Header = currentLoop->getHeader();
TerminatorInst *HeaderTerm = Header->getTerminator();
+ LLVMContext &Context = Header->getContext();
BasicBlock *LoopExitBB = 0;
if (BranchInst *BI = dyn_cast<BranchInst>(HeaderTerm)) {
@@ -351,10 +353,10 @@ bool LoopUnswitch::IsTrivialUnswitchCondition(Value *Cond, Constant **Val,
// this.
if ((LoopExitBB = isTrivialLoopExitBlock(currentLoop,
BI->getSuccessor(0)))) {
- if (Val) *Val = Context->getTrue();
+ if (Val) *Val = Context.getTrue();
} else if ((LoopExitBB = isTrivialLoopExitBlock(currentLoop,
BI->getSuccessor(1)))) {
- if (Val) *Val = Context->getFalse();
+ if (Val) *Val = Context.getFalse();
}
} else if (SwitchInst *SI = dyn_cast<SwitchInst>(HeaderTerm)) {
// If this isn't a switch on Cond, we can't handle it.
@@ -510,7 +512,7 @@ void LoopUnswitch::EmitPreheaderBranchOnCondition(Value *LIC, Constant *Val,
Value *BranchVal = LIC;
if (!isa<ConstantInt>(Val) || Val->getType() != Type::Int1Ty)
BranchVal = new ICmpInst(InsertPt, ICmpInst::ICMP_EQ, LIC, Val, "tmp");
- else if (Val != Context->getTrue())
+ else if (Val != Val->getContext().getTrue())
// We want to enter the new loop when the condition is true.
std::swap(TrueDest, FalseDest);
@@ -818,7 +820,7 @@ void LoopUnswitch::RemoveBlockIfDead(BasicBlock *BB,
// Anything that uses the instructions in this basic block should have their
// uses replaced with undefs.
if (!I->use_empty())
- I->replaceAllUsesWith(Context->getUndef(I->getType()));
+ I->replaceAllUsesWith(I->getContext().getUndef(I->getType()));
}
// If this is the edge to the header block for a loop, remove the loop and
@@ -899,6 +901,8 @@ void LoopUnswitch::RewriteLoopBodyWithConditionConstant(Loop *L, Value *LIC,
// selects, switches.
std::vector<User*> Users(LIC->use_begin(), LIC->use_end());
std::vector<Instruction*> Worklist;
+ LLVMContext &Context = Val->getContext();
+
// If we know that LIC == Val, or that LIC == NotVal, just replace uses of LIC
// in the loop with the appropriate one directly.
@@ -907,7 +911,7 @@ void LoopUnswitch::RewriteLoopBodyWithConditionConstant(Loop *L, Value *LIC,
if (IsEqual)
Replacement = Val;
else
- Replacement = Context->getConstantInt(Type::Int1Ty,
+ Replacement = Context.getConstantInt(Type::Int1Ty,
!cast<ConstantInt>(Val)->getZExtValue());
for (unsigned i = 0, e = Users.size(); i != e; ++i)
@@ -947,7 +951,7 @@ void LoopUnswitch::RewriteLoopBodyWithConditionConstant(Loop *L, Value *LIC,
Instruction* OldTerm = Old->getTerminator();
BranchInst::Create(Split, SISucc,
- Context->getTrue(), OldTerm);
+ Context.getTrue(), OldTerm);
LPM->deleteSimpleAnalysisValue(Old->getTerminator(), L);
Old->getTerminator()->eraseFromParent();
@@ -988,7 +992,7 @@ void LoopUnswitch::SimplifyCode(std::vector<Instruction*> &Worklist, Loop *L) {
Worklist.pop_back();
// Simple constant folding.
- if (Constant *C = ConstantFoldInstruction(I, Context)) {
+ if (Constant *C = ConstantFoldInstruction(I, I->getContext())) {
ReplaceUsesOfWith(I, C, Worklist, L, LPM);
continue;
}
diff --git a/lib/Transforms/Scalar/MemCpyOptimizer.cpp b/lib/Transforms/Scalar/MemCpyOptimizer.cpp
index 3c7a5ab8f4..8937e1c863 100644
--- a/lib/Transforms/Scalar/MemCpyOptimizer.cpp
+++ b/lib/Transforms/Scalar/MemCpyOptimizer.cpp
@@ -36,7 +36,7 @@ STATISTIC(NumMemSetInfer, "Number of memsets inferred");
/// true for all i8 values obviously, but is also true for i32 0, i32 -1,
/// i16 0xF0F0, double 0.0 etc. If the value can't be handled with a repeated
/// byte store (e.g. i16 0x1234), return null.
-static Value *isBytewiseValu