aboutsummaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorOwen Anderson <resistor@mac.com>2009-07-22 00:24:57 +0000
committerOwen Anderson <resistor@mac.com>2009-07-22 00:24:57 +0000
commite922c0201916e0b980ab3cfe91e1413e68d55647 (patch)
tree663be741b84470d97945f01da459a3627af683fd /lib
parent7cf12c7efd37dc12c3ed536a3f4c373dddac2b85 (diff)
Get rid of the Pass+Context magic.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@76702 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib')
-rw-r--r--lib/Analysis/BasicAliasAnalysis.cpp32
-rw-r--r--lib/Analysis/ConstantFolding.cpp142
-rw-r--r--lib/Analysis/IPA/Andersens.cpp2
-rw-r--r--lib/Analysis/LoopVR.cpp6
-rw-r--r--lib/Analysis/ScalarEvolution.cpp63
-rw-r--r--lib/Analysis/ScalarEvolutionExpander.cpp20
-rw-r--r--lib/Analysis/ValueTracking.cpp12
-rw-r--r--lib/CodeGen/DwarfEHPrepare.cpp4
-rw-r--r--lib/CodeGen/IntrinsicLowering.cpp24
-rw-r--r--lib/CodeGen/SelectionDAG/CallingConvLower.cpp2
-rw-r--r--lib/CodeGen/SelectionDAG/FastISel.cpp10
-rw-r--r--lib/CodeGen/SelectionDAG/SelectionDAG.cpp2
-rw-r--r--lib/CodeGen/ShadowStackGC.cpp40
-rw-r--r--lib/CodeGen/UnreachableBlockElim.cpp2
-rw-r--r--lib/ExecutionEngine/JIT/JIT.cpp18
-rw-r--r--lib/Linker/LinkModules.cpp2
-rw-r--r--lib/Target/ARM/ARMBaseRegisterInfo.cpp2
-rw-r--r--lib/Target/ARM/ARMISelLowering.cpp8
-rw-r--r--lib/Target/ARM/Thumb1RegisterInfo.cpp2
-rw-r--r--lib/Target/ARM/Thumb2RegisterInfo.cpp2
-rw-r--r--lib/Target/Alpha/AlphaISelLowering.cpp4
-rw-r--r--lib/Target/CBackend/CBackend.cpp12
-rw-r--r--lib/Target/CellSPU/SPUISelLowering.cpp2
-rw-r--r--lib/Target/MSP430/MSP430ISelLowering.cpp8
-rw-r--r--lib/Target/Mips/MipsISelLowering.cpp8
-rw-r--r--lib/Target/PowerPC/PPCISelLowering.cpp12
-rw-r--r--lib/Target/Sparc/SparcISelLowering.cpp6
-rw-r--r--lib/Target/SystemZ/SystemZISelLowering.cpp8
-rw-r--r--lib/Target/X86/X86FastISel.cpp4
-rw-r--r--lib/Target/X86/X86ISelLowering.cpp8
-rw-r--r--lib/Target/X86/X86InstrInfo.cpp4
-rw-r--r--lib/Target/XCore/XCoreISelLowering.cpp8
-rw-r--r--lib/Transforms/IPO/ArgumentPromotion.cpp17
-rw-r--r--lib/Transforms/IPO/DeadArgumentElimination.cpp22
-rw-r--r--lib/Transforms/IPO/DeadTypeElimination.cpp1
-rw-r--r--lib/Transforms/IPO/ExtractGV.cpp11
-rw-r--r--lib/Transforms/IPO/GlobalDCE.cpp1
-rw-r--r--lib/Transforms/IPO/GlobalOpt.cpp172
-rw-r--r--lib/Transforms/IPO/IPConstantPropagation.cpp10
-rw-r--r--lib/Transforms/IPO/IndMemRemoval.cpp2
-rw-r--r--lib/Transforms/IPO/Internalize.cpp2
-rw-r--r--lib/Transforms/IPO/LowerSetJmp.cpp22
-rw-r--r--lib/Transforms/IPO/MergeFunctions.cpp4
-rw-r--r--lib/Transforms/IPO/PartialInlining.cpp2
-rw-r--r--lib/Transforms/IPO/PartialSpecialization.cpp2
-rw-r--r--lib/Transforms/IPO/PruneEH.cpp2
-rw-r--r--lib/Transforms/IPO/RaiseAllocations.cpp24
-rw-r--r--lib/Transforms/IPO/StripDeadPrototypes.cpp1
-rw-r--r--lib/Transforms/IPO/StripSymbols.cpp1
-rw-r--r--lib/Transforms/IPO/StructRetPromotion.cpp3
-rw-r--r--lib/Transforms/Instrumentation/BlockProfiling.cpp8
-rw-r--r--lib/Transforms/Instrumentation/EdgeProfiling.cpp4
-rw-r--r--lib/Transforms/Instrumentation/ProfilingUtils.cpp28
-rw-r--r--lib/Transforms/Instrumentation/RSProfiling.cpp33
-rw-r--r--lib/Transforms/Scalar/CodeGenPrepare.cpp10
-rw-r--r--lib/Transforms/Scalar/ConstantProp.cpp2
-rw-r--r--lib/Transforms/Scalar/GVN.cpp12
-rw-r--r--lib/Transforms/Scalar/GVNPRE.cpp7
-rw-r--r--lib/Transforms/Scalar/IndVarSimplify.cpp12
-rw-r--r--lib/Transforms/Scalar/InstructionCombining.cpp6
-rw-r--r--lib/Transforms/Scalar/JumpThreading.cpp20
-rw-r--r--lib/Transforms/Scalar/LICM.cpp10
-rw-r--r--lib/Transforms/Scalar/LoopIndexSplit.cpp12
-rw-r--r--lib/Transforms/Scalar/LoopRotation.cpp2
-rw-r--r--lib/Transforms/Scalar/LoopStrengthReduce.cpp28
-rw-r--r--lib/Transforms/Scalar/LoopUnswitch.cpp22
-rw-r--r--lib/Transforms/Scalar/MemCpyOptimizer.cpp21
-rw-r--r--lib/Transforms/Scalar/PredicateSimplifier.cpp22
-rw-r--r--lib/Transforms/Scalar/Reassociate.cpp39
-rw-r--r--lib/Transforms/Scalar/Reg2Mem.cpp2
-rw-r--r--lib/Transforms/Scalar/SCCP.cpp12
-rw-r--r--lib/Transforms/Scalar/ScalarReplAggregates.cpp113
-rw-r--r--lib/Transforms/Scalar/SimplifyCFGPass.cpp6
-rw-r--r--lib/Transforms/Scalar/SimplifyLibCalls.cpp6
-rw-r--r--lib/Transforms/Scalar/TailDuplication.cpp4
-rw-r--r--lib/Transforms/Utils/AddrModeMatcher.cpp2
-rw-r--r--lib/Transforms/Utils/BasicBlockUtils.cpp10
-rw-r--r--lib/Transforms/Utils/CloneFunction.cpp14
-rw-r--r--lib/Transforms/Utils/CloneModule.cpp4
-rw-r--r--lib/Transforms/Utils/CodeExtractor.cpp38
-rw-r--r--lib/Transforms/Utils/InlineFunction.cpp18
-rw-r--r--lib/Transforms/Utils/LCSSA.cpp2
-rw-r--r--lib/Transforms/Utils/Local.cpp6
-rw-r--r--lib/Transforms/Utils/LoopSimplify.cpp2
-rw-r--r--lib/Transforms/Utils/LowerAllocations.cpp23
-rw-r--r--lib/Transforms/Utils/LowerInvoke.cpp56
-rw-r--r--lib/Transforms/Utils/LowerSwitch.cpp7
-rw-r--r--lib/Transforms/Utils/Mem2Reg.cpp2
-rw-r--r--lib/Transforms/Utils/PromoteMemoryToRegister.cpp14
-rw-r--r--lib/Transforms/Utils/SimplifyCFG.cpp26
-rw-r--r--lib/Transforms/Utils/ValueMapper.cpp10
-rw-r--r--lib/VMCore/AutoUpgrade.cpp56
-rw-r--r--lib/VMCore/BasicBlock.cpp6
-rw-r--r--lib/VMCore/Function.cpp6
-rw-r--r--lib/VMCore/Value.cpp2
-rw-r--r--lib/VMCore/Verifier.cpp3
96 files changed, 793 insertions, 733 deletions
diff --git a/lib/Analysis/BasicAliasAnalysis.cpp b/lib/Analysis/BasicAliasAnalysis.cpp
index 308c69a87e..49d771a42d 100644
--- a/lib/Analysis/BasicAliasAnalysis.cpp
+++ b/lib/Analysis/BasicAliasAnalysis.cpp
@@ -309,7 +309,7 @@ BasicAliasAnalysis::getModRefInfo(CallSite CS1, CallSite CS2) {
AliasAnalysis::AliasResult
BasicAliasAnalysis::alias(const Value *V1, unsigned V1Size,
const Value *V2, unsigned V2Size) {
- Context = &V1->getType()->getContext();
+ LLVMContext &Context = V1->getType()->getContext();
// Strip off any constant expression casts if they exist
if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(V1))
@@ -395,13 +395,13 @@ BasicAliasAnalysis::alias(const Value *V1, unsigned V1Size,
// the base pointers.
while (isGEP(GEP1->getOperand(0)) &&
GEP1->getOperand(1) ==
- Context->getNullValue(GEP1->getOperand(1)->getType()))
+ Context.getNullValue(GEP1->getOperand(1)->getType()))
GEP1 = cast<User>(GEP1->getOperand(0));
const Value *BasePtr1 = GEP1->getOperand(0);
while (isGEP(GEP2->getOperand(0)) &&
GEP2->getOperand(1) ==
- Context->getNullValue(GEP2->getOperand(1)->getType()))
+ Context.getNullValue(GEP2->getOperand(1)->getType()))
GEP2 = cast<User>(GEP2->getOperand(0));
const Value *BasePtr2 = GEP2->getOperand(0);
@@ -481,7 +481,7 @@ BasicAliasAnalysis::alias(const Value *V1, unsigned V1Size,
for (unsigned i = 0; i != GEPOperands.size(); ++i)
if (!isa<ConstantInt>(GEPOperands[i]))
GEPOperands[i] =
- Context->getNullValue(GEPOperands[i]->getType());
+ Context.getNullValue(GEPOperands[i]->getType());
int64_t Offset =
getTargetData().getIndexedOffset(BasePtr->getType(),
&GEPOperands[0],
@@ -499,16 +499,16 @@ BasicAliasAnalysis::alias(const Value *V1, unsigned V1Size,
// This function is used to determine if the indices of two GEP instructions are
// equal. V1 and V2 are the indices.
-static bool IndexOperandsEqual(Value *V1, Value *V2, LLVMContext *Context) {
+static bool IndexOperandsEqual(Value *V1, Value *V2, LLVMContext &Context) {
if (V1->getType() == V2->getType())
return V1 == V2;
if (Constant *C1 = dyn_cast<Constant>(V1))
if (Constant *C2 = dyn_cast<Constant>(V2)) {
// Sign extend the constants to long types, if necessary
if (C1->getType() != Type::Int64Ty)
- C1 = Context->getConstantExprSExt(C1, Type::Int64Ty);
+ C1 = Context.getConstantExprSExt(C1, Type::Int64Ty);
if (C2->getType() != Type::Int64Ty)
- C2 = Context->getConstantExprSExt(C2, Type::Int64Ty);
+ C2 = Context.getConstantExprSExt(C2, Type::Int64Ty);
return C1 == C2;
}
return false;
@@ -529,7 +529,7 @@ BasicAliasAnalysis::CheckGEPInstructions(
const PointerType *GEPPointerTy = cast<PointerType>(BasePtr1Ty);
- Context = &GEPPointerTy->getContext();
+ LLVMContext &Context = GEPPointerTy->getContext();
// Find the (possibly empty) initial sequence of equal values... which are not
// necessarily constants.
@@ -604,9 +604,9 @@ BasicAliasAnalysis::CheckGEPInstructions(
if (G1OC->getType() != G2OC->getType()) {
// Sign extend both operands to long.
if (G1OC->getType() != Type::Int64Ty)
- G1OC = Context->getConstantExprSExt(G1OC, Type::Int64Ty);
+ G1OC = Context.getConstantExprSExt(G1OC, Type::Int64Ty);
if (G2OC->getType() != Type::Int64Ty)
- G2OC = Context->getConstantExprSExt(G2OC, Type::Int64Ty);
+ G2OC = Context.getConstantExprSExt(G2OC, Type::Int64Ty);
GEP1Ops[FirstConstantOper] = G1OC;
GEP2Ops[FirstConstantOper] = G2OC;
}
@@ -693,7 +693,7 @@ BasicAliasAnalysis::CheckGEPInstructions(
// TargetData::getIndexedOffset.
for (i = 0; i != MaxOperands; ++i)
if (!isa<ConstantInt>(GEP1Ops[i]))
- GEP1Ops[i] = Context->getNullValue(GEP1Ops[i]->getType());
+ GEP1Ops[i] = Context.getNullValue(GEP1Ops[i]->getType());
// Okay, now get the offset. This is the relative offset for the full
// instruction.
const TargetData &TD = getTargetData();
@@ -738,7 +738,7 @@ BasicAliasAnalysis::CheckGEPInstructions(
const Type *ZeroIdxTy = GEPPointerTy;
for (unsigned i = 0; i != FirstConstantOper; ++i) {
if (!isa<StructType>(ZeroIdxTy))
- GEP1Ops[i] = GEP2Ops[i] = Context->getNullValue(Type::Int32Ty);
+ GEP1Ops[i] = GEP2Ops[i] = Context.getNullValue(Type::Int32Ty);
if (const CompositeType *CT = dyn_cast<CompositeType>(ZeroIdxTy))
ZeroIdxTy = CT->getTypeAtIndex(GEP1Ops[i]);
@@ -753,7 +753,7 @@ BasicAliasAnalysis::CheckGEPInstructions(
// If they are equal, use a zero index...
if (Op1 == Op2 && BasePtr1Ty == BasePtr2Ty) {
if (!isa<ConstantInt>(Op1))
- GEP1Ops[i] = GEP2Ops[i] = Context->getNullValue(Op1->getType());
+ GEP1Ops[i] = GEP2Ops[i] = Context.getNullValue(Op1->getType());
// Otherwise, just keep the constants we have.
} else {
if (Op1) {
@@ -780,10 +780,10 @@ BasicAliasAnalysis::CheckGEPInstructions(
//
if (const ArrayType *AT = dyn_cast<ArrayType>(BasePtr1Ty))
GEP1Ops[i] =
- Context->getConstantInt(Type::Int64Ty,AT->getNumElements()-1);
+ Context.getConstantInt(Type::Int64Ty,AT->getNumElements()-1);
else if (const VectorType *VT = dyn_cast<VectorType>(BasePtr1Ty))
GEP1Ops[i] =
- Context->getConstantInt(Type::Int64Ty,VT->getNumElements()-1);
+ Context.getConstantInt(Type::Int64Ty,VT->getNumElements()-1);
}
}
@@ -798,7 +798,7 @@ BasicAliasAnalysis::CheckGEPInstructions(
return MayAlias; // Be conservative with out-of-range accesses
}
} else { // Conservatively assume the minimum value for this index
- GEP2Ops[i] = Context->getNullValue(Op2->getType());
+ GEP2Ops[i] = Context.getNullValue(Op2->getType());
}
}
}
diff --git a/lib/Analysis/ConstantFolding.cpp b/lib/Analysis/ConstantFolding.cpp
index 7938ca6dd8..d60b4f6800 100644
--- a/lib/Analysis/ConstantFolding.cpp
+++ b/lib/Analysis/ConstantFolding.cpp
@@ -95,7 +95,7 @@ static bool IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV,
/// otherwise TD is null.
static Constant *SymbolicallyEvaluateBinop(unsigned Opc, Constant *Op0,
Constant *Op1, const TargetData *TD,
- LLVMContext *Context){
+ LLVMContext &Context){
// SROA
// Fold (and 0xffffffff00000000, (shl x, 32)) -> shl.
@@ -113,7 +113,7 @@ static Constant *SymbolicallyEvaluateBinop(unsigned Opc, Constant *Op0,
if (IsConstantOffsetFromGlobal(Op1, GV2, Offs2, *TD) &&
GV1 == GV2) {
// (&GV+C1) - (&GV+C2) -> C1-C2, pointer arithmetic cannot overflow.
- return Context->getConstantInt(Op0->getType(), Offs1-Offs2);
+ return Context.getConstantInt(Op0->getType(), Offs1-Offs2);
}
}
@@ -124,7 +124,7 @@ static Constant *SymbolicallyEvaluateBinop(unsigned Opc, Constant *Op0,
/// constant expression, do so.
static Constant *SymbolicallyEvaluateGEP(Constant* const* Ops, unsigned NumOps,
const Type *ResultTy,
- LLVMContext *Context,
+ LLVMContext &Context,
const TargetData *TD) {
Constant *Ptr = Ops[0];
if (!TD || !cast<PointerType>(Ptr->getType())->getElementType()->isSized())
@@ -151,14 +151,14 @@ static Constant *SymbolicallyEvaluateGEP(Constant* const* Ops, unsigned NumOps,
uint64_t Offset = TD->getIndexedOffset(Ptr->getType(),
(Value**)Ops+1, NumOps-1);
- Constant *C = Context->getConstantInt(TD->getIntPtrType(), Offset+BasePtr);
- return Context->getConstantExprIntToPtr(C, ResultTy);
+ Constant *C = Context.getConstantInt(TD->getIntPtrType(), Offset+BasePtr);
+ return Context.getConstantExprIntToPtr(C, ResultTy);
}
/// FoldBitCast - Constant fold bitcast, symbolically evaluating it with
/// targetdata. Return 0 if unfoldable.
static Constant *FoldBitCast(Constant *C, const Type *DestTy,
- const TargetData &TD, LLVMContext *Context) {
+ const TargetData &TD, LLVMContext &Context) {
// If this is a bitcast from constant vector -> vector, fold it.
if (ConstantVector *CV = dyn_cast<ConstantVector>(C)) {
if (const VectorType *DestVTy = dyn_cast<VectorType>(DestTy)) {
@@ -184,24 +184,24 @@ static Constant *FoldBitCast(Constant *C, const Type *DestTy,
if (DstEltTy->isFloatingPoint()) {
// Fold to an vector of integers with same size as our FP type.
unsigned FPWidth = DstEltTy->getPrimitiveSizeInBits();
- const Type *DestIVTy = Context->getVectorType(
- Context->getIntegerType(FPWidth), NumDstElt);
+ const Type *DestIVTy = Context.getVectorType(
+ Context.getIntegerType(FPWidth), NumDstElt);
// Recursively handle this integer conversion, if possible.
C = FoldBitCast(C, DestIVTy, TD, Context);
if (!C) return 0;
// Finally, VMCore can handle this now that #elts line up.
- return Context->getConstantExprBitCast(C, DestTy);
+ return Context.getConstantExprBitCast(C, DestTy);
}
// Okay, we know the destination is integer, if the input is FP, convert
// it to integer first.
if (SrcEltTy->isFloatingPoint()) {
unsigned FPWidth = SrcEltTy->getPrimitiveSizeInBits();
- const Type *SrcIVTy = Context->getVectorType(
- Context->getIntegerType(FPWidth), NumSrcElt);